summaryrefslogtreecommitdiff
path: root/drivers/staging
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2013-11-15 01:38:05 (GMT)
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2013-11-15 01:38:05 (GMT)
commit42249094f79422fbf5ed4b54eeb48ff096809b8f (patch)
tree91e6850c8c7e8cc284cf8bb6363f8662f84011f4 /drivers/staging
parent936816161978ca716a56c5e553c68f25972b1e3a (diff)
parent2c027b7c48a888ab173ba45babb4525e278375d9 (diff)
downloadlinux-42249094f79422fbf5ed4b54eeb48ff096809b8f.tar.xz
Merge branch 'next' into for-linus
Merge first round of changes for 3.13 merge window.
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig22
-rw-r--r--drivers/staging/Makefile12
-rw-r--r--drivers/staging/android/ashmem.c47
-rw-r--r--drivers/staging/android/binder.c39
-rw-r--r--drivers/staging/android/binder.h48
-rw-r--r--drivers/staging/android/logger.c8
-rw-r--r--drivers/staging/android/lowmemorykiller.c43
-rw-r--r--drivers/staging/android/sw_sync.c8
-rw-r--r--drivers/staging/android/sync.c24
-rw-r--r--drivers/staging/android/timed_output.c29
-rw-r--r--drivers/staging/asus_oled/Kconfig6
-rw-r--r--drivers/staging/asus_oled/Makefile1
-rw-r--r--drivers/staging/asus_oled/README156
-rw-r--r--drivers/staging/asus_oled/TODO10
-rw-r--r--drivers/staging/asus_oled/asus_oled.c843
-rw-r--r--drivers/staging/asus_oled/linux.txt33
-rw-r--r--drivers/staging/asus_oled/linux_f.txt18
-rw-r--r--drivers/staging/asus_oled/linux_fr.txt33
-rw-r--r--drivers/staging/asus_oled/tux.txt33
-rw-r--r--drivers/staging/asus_oled/tux_r.txt33
-rw-r--r--drivers/staging/asus_oled/tux_r2.txt33
-rw-r--r--drivers/staging/asus_oled/zig.txt33
-rw-r--r--drivers/staging/bcm/Bcmchar.c6
-rw-r--r--drivers/staging/bcm/DDRInit.c54
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c199
-rw-r--r--drivers/staging/bcm/Ioctl.h2
-rw-r--r--drivers/staging/bcm/LeakyBucket.c262
-rw-r--r--drivers/staging/bcm/Misc.c2
-rw-r--r--drivers/staging/bcm/Qos.c538
-rw-r--r--drivers/staging/bcm/Version.h35
-rw-r--r--drivers/staging/bcm/headers.h3
-rw-r--r--drivers/staging/bcm/nvm.c2
-rw-r--r--drivers/staging/bcm/vendorspecificextn.c198
-rw-r--r--drivers/staging/btmtk_usb/Kconfig11
-rw-r--r--drivers/staging/btmtk_usb/Makefile1
-rw-r--r--drivers/staging/btmtk_usb/README14
-rw-r--r--drivers/staging/btmtk_usb/TODO10
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.c1784
-rw-r--r--drivers/staging/btmtk_usb/btmtk_usb.h138
-rw-r--r--drivers/staging/ced1401/ced_ioc.c665
-rw-r--r--drivers/staging/ced1401/ced_ioctl.h2
-rw-r--r--drivers/staging/ced1401/machine.h32
-rw-r--r--drivers/staging/ced1401/usb1401.c613
-rw-r--r--drivers/staging/ced1401/usb1401.h285
-rw-r--r--drivers/staging/ced1401/use1401.h201
-rw-r--r--drivers/staging/ced1401/use14_ioc.h503
-rw-r--r--drivers/staging/ced1401/userspace/use1401.c152
-rw-r--r--drivers/staging/comedi/Kconfig138
-rw-r--r--drivers/staging/comedi/TODO2
-rw-r--r--drivers/staging/comedi/comedi.h5
-rw-r--r--drivers/staging/comedi/comedi_buf.c6
-rw-r--r--drivers/staging/comedi/comedi_compat32.c5
-rw-r--r--drivers/staging/comedi/comedi_compat32.h5
-rw-r--r--drivers/staging/comedi/comedi_fops.c101
-rw-r--r--drivers/staging/comedi/comedi_internal.h1
-rw-r--r--drivers/staging/comedi/comedi_pci.c4
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.c4
-rw-r--r--drivers/staging/comedi/comedi_usb.c16
-rw-r--r--drivers/staging/comedi/comedidev.h53
-rw-r--r--drivers/staging/comedi/comedilib.h12
-rw-r--r--drivers/staging/comedi/drivers.c148
-rw-r--r--drivers/staging/comedi/drivers/8253.h5
-rw-r--r--drivers/staging/comedi/drivers/8255.c56
-rw-r--r--drivers/staging/comedi/drivers/8255.h5
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c11
-rw-r--r--drivers/staging/comedi/drivers/Makefile8
-rw-r--r--drivers/staging/comedi/drivers/acl7225b.c136
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c1068
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c2054
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c1041
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c5465
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c870
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c3586
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c849
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c2069
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c1048
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c64
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h213
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_eeprom.c11
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c1318
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c12
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c4
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c1376
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c1
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c1
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c12
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c1
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c50
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1710.c99
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c10
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c9
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c1
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c11
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c1399
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.c9
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c8
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c5
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c5
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c8
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c7
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c11
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c4
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c56
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1724.c18
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c12
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c17
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c11
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.h5
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c77
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_pci.c10
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c13
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c12
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c9
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c6
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c7
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c40
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c11
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c37
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c17
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c10
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c17
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c370
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.c8
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.h7
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c10
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c11
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c6
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c34
-rw-r--r--drivers/staging/comedi/drivers/das08.c14
-rw-r--r--drivers/staging/comedi/drivers/das08.h6
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c21
-rw-r--r--drivers/staging/comedi/drivers/das08_isa.c16
-rw-r--r--drivers/staging/comedi/drivers/das08_pci.c16
-rw-r--r--drivers/staging/comedi/drivers/das16.c2004
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c12
-rw-r--r--drivers/staging/comedi/drivers/das1800.c11
-rw-r--r--drivers/staging/comedi/drivers/das6402.c11
-rw-r--r--drivers/staging/comedi/drivers/das800.c11
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c46
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c36
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c10
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c10
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c10
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c38
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c41
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c50
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c1044
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c9
-rw-r--r--drivers/staging/comedi/drivers/fl512.c5
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c44
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c9
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c1015
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c55
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c6
-rw-r--r--drivers/staging/comedi/drivers/me4000.c132
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c82
-rw-r--r--drivers/staging/comedi/drivers/mite.c6
-rw-r--r--drivers/staging/comedi/drivers/mite.h6
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c10
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c11
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c66
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c35
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c13
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c66
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c34
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c37
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c15
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c349
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h29
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c24
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.c226
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.h57
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c17
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_regs.h75
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c67
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c63
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h5
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h5
-rw-r--r--drivers/staging/comedi/drivers/ni_tio_internal.h5
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c5
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c10
-rw-r--r--drivers/staging/comedi/drivers/pcl724.c228
-rw-r--r--drivers/staging/comedi/drivers/pcl725.c91
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c11
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c335
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c6
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c5
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c5
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c63
-rw-r--r--drivers/staging/comedi/drivers/pcm3730.c136
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c198
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c251
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c90
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c988
-rw-r--r--drivers/staging/comedi/drivers/plx9052.h5
-rw-r--r--drivers/staging/comedi/drivers/poc.c64
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c4
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c28
-rw-r--r--drivers/staging/comedi/drivers/rti800.c11
-rw-r--r--drivers/staging/comedi/drivers/rti802.c11
-rw-r--r--drivers/staging/comedi/drivers/s526.c55
-rw-r--r--drivers/staging/comedi/drivers/s626.c32
-rw-r--r--drivers/staging/comedi/drivers/s626.h7
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c10
-rw-r--r--drivers/staging/comedi/drivers/skel.c54
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c84
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c29
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c2599
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c1281
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c2983
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c91
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c65
-rw-r--r--drivers/staging/comedi/proc.c7
-rw-r--r--drivers/staging/comedi/range.c48
-rw-r--r--drivers/staging/cptm1217/clearpad_tm1217.c4
-rw-r--r--drivers/staging/crystalhd/bc_dts_glob_lnx.h19
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c31
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.h19
-rw-r--r--drivers/staging/crystalhd/crystalhd_fw_if.h81
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c236
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.h121
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c36
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.h4
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.c39
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h41
-rw-r--r--drivers/staging/csr/Kconfig9
-rw-r--r--drivers/staging/csr/LICENSE.txt39
-rw-r--r--drivers/staging/csr/Makefile73
-rw-r--r--drivers/staging/csr/bh.c397
-rw-r--r--drivers/staging/csr/csr_framework_ext.c40
-rw-r--r--drivers/staging/csr/csr_framework_ext.h35
-rw-r--r--drivers/staging/csr/csr_framework_ext_types.h30
-rw-r--r--drivers/staging/csr/csr_log.h223
-rw-r--r--drivers/staging/csr/csr_log_configure.h39
-rw-r--r--drivers/staging/csr/csr_log_text.h124
-rw-r--r--drivers/staging/csr/csr_macro.h39
-rw-r--r--drivers/staging/csr/csr_msg_transport.h17
-rw-r--r--drivers/staging/csr/csr_msgconv.c291
-rw-r--r--drivers/staging/csr/csr_msgconv.h78
-rw-r--r--drivers/staging/csr/csr_prim_defs.h55
-rw-r--r--drivers/staging/csr/csr_result.h17
-rw-r--r--drivers/staging/csr/csr_sched.h85
-rw-r--r--drivers/staging/csr/csr_sdio.h723
-rw-r--r--drivers/staging/csr/csr_serialize_primitive_types.c100
-rw-r--r--drivers/staging/csr/csr_time.c33
-rw-r--r--drivers/staging/csr/csr_time.h76
-rw-r--r--drivers/staging/csr/csr_util.c15
-rw-r--r--drivers/staging/csr/csr_wifi_common.h101
-rw-r--r--drivers/staging/csr/csr_wifi_fsm.h240
-rw-r--r--drivers/staging/csr/csr_wifi_fsm_event.h42
-rw-r--r--drivers/staging/csr/csr_wifi_fsm_types.h430
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card.h114
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio.c4001
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio.h694
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio_intr.c2595
-rw-r--r--drivers/staging/csr/csr_wifi_hip_card_sdio_mem.c1713
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper.c793
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper.h407
-rw-r--r--drivers/staging/csr/csr_wifi_hip_chiphelper_private.h200
-rw-r--r--drivers/staging/csr/csr_wifi_hip_conversions.h73
-rw-r--r--drivers/staging/csr/csr_wifi_hip_download.c819
-rw-r--r--drivers/staging/csr/csr_wifi_hip_dump.c837
-rw-r--r--drivers/staging/csr/csr_wifi_hip_packing.c4804
-rw-r--r--drivers/staging/csr/csr_wifi_hip_send.c415
-rw-r--r--drivers/staging/csr/csr_wifi_hip_signals.c1313
-rw-r--r--drivers/staging/csr/csr_wifi_hip_signals.h128
-rw-r--r--drivers/staging/csr/csr_wifi_hip_sigs.h1417
-rw-r--r--drivers/staging/csr/csr_wifi_hip_ta_sampling.c541
-rw-r--r--drivers/staging/csr/csr_wifi_hip_ta_sampling.h66
-rw-r--r--drivers/staging/csr/csr_wifi_hip_udi.c173
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi.h871
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi_signal_names.c41
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifi_udi.h52
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifihw.h59
-rw-r--r--drivers/staging/csr/csr_wifi_hip_unifiversion.h30
-rw-r--r--drivers/staging/csr/csr_wifi_hip_xbv.c1076
-rw-r--r--drivers/staging/csr/csr_wifi_hip_xbv.h119
-rw-r--r--drivers/staging/csr/csr_wifi_hostio_prim.h18
-rw-r--r--drivers/staging/csr/csr_wifi_lib.h103
-rw-r--r--drivers/staging/csr/csr_wifi_msgconv.h49
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_converter_init.c90
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_converter_init.h41
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_free_downstream_contents.c84
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_free_upstream_contents.c39
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_lib.h495
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_prim.h494
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_sef.c30
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_sef.h21
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_serialize.c909
-rw-r--r--drivers/staging/csr/csr_wifi_nme_ap_serialize.h94
-rw-r--r--drivers/staging/csr/csr_wifi_nme_converter_init.h38
-rw-r--r--drivers/staging/csr/csr_wifi_nme_lib.h991
-rw-r--r--drivers/staging/csr/csr_wifi_nme_prim.h1657
-rw-r--r--drivers/staging/csr/csr_wifi_nme_serialize.h166
-rw-r--r--drivers/staging/csr/csr_wifi_nme_task.h27
-rw-r--r--drivers/staging/csr/csr_wifi_private_common.h81
-rw-r--r--drivers/staging/csr/csr_wifi_result.h27
-rw-r--r--drivers/staging/csr/csr_wifi_router_converter_init.c82
-rw-r--r--drivers/staging/csr/csr_wifi_router_converter_init.h34
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_converter_init.c134
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_converter_init.h34
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_free_downstream_contents.c108
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_free_upstream_contents.c87
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_lib.h2082
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_prim.h2113
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_sef.c46
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_sef.h51
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_serialize.c2591
-rw-r--r--drivers/staging/csr/csr_wifi_router_ctrl_serialize.h333
-rw-r--r--drivers/staging/csr/csr_wifi_router_free_downstream_contents.c53
-rw-r--r--drivers/staging/csr/csr_wifi_router_free_upstream_contents.c47
-rw-r--r--drivers/staging/csr/csr_wifi_router_lib.h417
-rw-r--r--drivers/staging/csr/csr_wifi_router_prim.h421
-rw-r--r--drivers/staging/csr/csr_wifi_router_sef.c19
-rw-r--r--drivers/staging/csr/csr_wifi_router_sef.h25
-rw-r--r--drivers/staging/csr/csr_wifi_router_serialize.c418
-rw-r--r--drivers/staging/csr/csr_wifi_router_serialize.h67
-rw-r--r--drivers/staging/csr/csr_wifi_router_task.h25
-rw-r--r--drivers/staging/csr/csr_wifi_router_transport.c199
-rw-r--r--drivers/staging/csr/csr_wifi_serialize_primitive_types.c256
-rw-r--r--drivers/staging/csr/csr_wifi_sme_ap_lib.h774
-rw-r--r--drivers/staging/csr/csr_wifi_sme_ap_prim.h1030
-rw-r--r--drivers/staging/csr/csr_wifi_sme_converter_init.c201
-rw-r--r--drivers/staging/csr/csr_wifi_sme_converter_init.h34
-rw-r--r--drivers/staging/csr/csr_wifi_sme_free_downstream_contents.c187
-rw-r--r--drivers/staging/csr/csr_wifi_sme_free_upstream_contents.c275
-rw-r--r--drivers/staging/csr/csr_wifi_sme_lib.h4303
-rw-r--r--drivers/staging/csr/csr_wifi_sme_prim.h6510
-rw-r--r--drivers/staging/csr/csr_wifi_sme_sef.c85
-rw-r--r--drivers/staging/csr/csr_wifi_sme_sef.h142
-rw-r--r--drivers/staging/csr/csr_wifi_sme_serialize.c5809
-rw-r--r--drivers/staging/csr/csr_wifi_sme_serialize.h666
-rw-r--r--drivers/staging/csr/csr_wifi_sme_task.h25
-rw-r--r--drivers/staging/csr/csr_wifi_vif_utils.h27
-rw-r--r--drivers/staging/csr/data_tx.c54
-rw-r--r--drivers/staging/csr/drv.c2193
-rw-r--r--drivers/staging/csr/firmware.c396
-rw-r--r--drivers/staging/csr/inet.c104
-rw-r--r--drivers/staging/csr/init_hw.c108
-rw-r--r--drivers/staging/csr/io.c1098
-rw-r--r--drivers/staging/csr/mlme.c433
-rw-r--r--drivers/staging/csr/monitor.c384
-rw-r--r--drivers/staging/csr/netdev.c3307
-rw-r--r--drivers/staging/csr/os.c477
-rw-r--r--drivers/staging/csr/putest.c685
-rw-r--r--drivers/staging/csr/sdio_events.c134
-rw-r--r--drivers/staging/csr/sdio_mmc.c1288
-rw-r--r--drivers/staging/csr/sdio_stubs.c82
-rw-r--r--drivers/staging/csr/sme_blocking.c1466
-rw-r--r--drivers/staging/csr/sme_mgt.c1012
-rw-r--r--drivers/staging/csr/sme_native.c566
-rw-r--r--drivers/staging/csr/sme_sys.c3260
-rw-r--r--drivers/staging/csr/sme_userspace.c315
-rw-r--r--drivers/staging/csr/sme_userspace.h38
-rw-r--r--drivers/staging/csr/sme_wext.c3327
-rw-r--r--drivers/staging/csr/ul_int.c528
-rw-r--r--drivers/staging/csr/unifi_clients.h129
-rw-r--r--drivers/staging/csr/unifi_config.h34
-rw-r--r--drivers/staging/csr/unifi_dbg.c110
-rw-r--r--drivers/staging/csr/unifi_event.c692
-rw-r--r--drivers/staging/csr/unifi_native.h257
-rw-r--r--drivers/staging/csr/unifi_os.h122
-rw-r--r--drivers/staging/csr/unifi_pdu_processing.c3729
-rw-r--r--drivers/staging/csr/unifi_priv.h1136
-rw-r--r--drivers/staging/csr/unifi_sme.c1225
-rw-r--r--drivers/staging/csr/unifi_sme.h245
-rw-r--r--drivers/staging/csr/unifi_wext.h108
-rw-r--r--drivers/staging/csr/unifiio.h398
-rw-r--r--drivers/staging/csr/wext_events.c283
-rw-r--r--drivers/staging/cxt1e1/Makefile1
-rw-r--r--drivers/staging/cxt1e1/comet.c836
-rw-r--r--drivers/staging/cxt1e1/functions.c19
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c14
-rw-r--r--drivers/staging/cxt1e1/linux.c116
-rw-r--r--drivers/staging/cxt1e1/musycc.c57
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.c15
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h10
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c63
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h6
-rw-r--r--drivers/staging/cxt1e1/sbeid.c13
-rw-r--r--drivers/staging/cxt1e1/sbeproc.h4
-rw-r--r--drivers/staging/dgap/Kconfig6
-rw-r--r--drivers/staging/dgap/Makefile9
-rw-r--r--drivers/staging/dgap/dgap_conf.h290
-rw-r--r--drivers/staging/dgap/dgap_downld.h69
-rw-r--r--drivers/staging/dgap/dgap_driver.c1051
-rw-r--r--drivers/staging/dgap/dgap_driver.h618
-rw-r--r--drivers/staging/dgap/dgap_fep5.c1953
-rw-r--r--drivers/staging/dgap/dgap_fep5.h253
-rw-r--r--drivers/staging/dgap/dgap_kcompat.h93
-rw-r--r--drivers/staging/dgap/dgap_parse.c1371
-rw-r--r--drivers/staging/dgap/dgap_parse.h35
-rw-r--r--drivers/staging/dgap/dgap_pci.h92
-rw-r--r--drivers/staging/dgap/dgap_sysfs.c793
-rw-r--r--drivers/staging/dgap/dgap_sysfs.h48
-rw-r--r--drivers/staging/dgap/dgap_trace.c185
-rw-r--r--drivers/staging/dgap/dgap_trace.h36
-rw-r--r--drivers/staging/dgap/dgap_tty.c3597
-rw-r--r--drivers/staging/dgap/dgap_tty.h39
-rw-r--r--drivers/staging/dgap/dgap_types.h36
-rw-r--r--drivers/staging/dgap/digi.h376
-rw-r--r--drivers/staging/dgap/downld.c798
-rw-r--r--drivers/staging/dgnc/Kconfig6
-rw-r--r--drivers/staging/dgnc/Makefile7
-rw-r--r--drivers/staging/dgnc/TODO17
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c1409
-rw-r--r--drivers/staging/dgnc/dgnc_cls.h90
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c958
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h563
-rw-r--r--drivers/staging/dgnc/dgnc_kcompat.h93
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c305
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.h31
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c1974
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h157
-rw-r--r--drivers/staging/dgnc/dgnc_pci.h75
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c756
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.h49
-rw-r--r--drivers/staging/dgnc/dgnc_trace.c184
-rw-r--r--drivers/staging/dgnc/dgnc_trace.h44
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c3544
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h42
-rw-r--r--drivers/staging/dgnc/dgnc_types.h36
-rw-r--r--drivers/staging/dgnc/digi.h416
-rw-r--r--drivers/staging/dgnc/dpacompat.h115
-rw-r--r--drivers/staging/dgrp/dgrp_dpa_ops.c2
-rw-r--r--drivers/staging/dgrp/dgrp_driver.c14
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c6
-rw-r--r--drivers/staging/dgrp/dgrp_sysfs.c2
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c2
-rw-r--r--drivers/staging/dgrp/drp.h2
-rw-r--r--drivers/staging/dwc2/Kconfig1
-rw-r--r--drivers/staging/dwc2/core.c491
-rw-r--r--drivers/staging/dwc2/core.h221
-rw-r--r--drivers/staging/dwc2/core_intr.c19
-rw-r--r--drivers/staging/dwc2/hcd.c298
-rw-r--r--drivers/staging/dwc2/hcd.h57
-rw-r--r--drivers/staging/dwc2/hcd_ddma.c31
-rw-r--r--drivers/staging/dwc2/hcd_intr.c153
-rw-r--r--drivers/staging/dwc2/hcd_queue.c22
-rw-r--r--drivers/staging/dwc2/hw.h156
-rw-r--r--drivers/staging/dwc2/pci.c23
-rw-r--r--drivers/staging/echo/echo.c78
-rw-r--r--drivers/staging/echo/echo.h26
-rw-r--r--drivers/staging/et131x/README1
-rw-r--r--drivers/staging/frontier/alphatrack.c53
-rw-r--r--drivers/staging/frontier/alphatrack.h6
-rw-r--r--drivers/staging/frontier/tranzport.c30
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c43
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_debug.c439
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h156
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_proc.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c10
-rw-r--r--drivers/staging/fwserial/fwserial.c149
-rw-r--r--drivers/staging/fwserial/fwserial.h2
-rw-r--r--drivers/staging/gdm724x/Kconfig15
-rw-r--r--drivers/staging/gdm724x/Makefile7
-rw-r--r--drivers/staging/gdm724x/TODO16
-rw-r--r--drivers/staging/gdm724x/gdm_endian.c67
-rw-r--r--drivers/staging/gdm724x/gdm_endian.h49
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c877
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h81
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c690
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h95
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c343
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h71
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c1049
-rw-r--r--drivers/staging/gdm724x/gdm_usb.h109
-rw-r--r--drivers/staging/gdm724x/hci.h55
-rw-r--r--drivers/staging/gdm724x/hci_packet.h93
-rw-r--r--drivers/staging/gdm724x/netlink_k.c149
-rw-r--r--drivers/staging/gdm724x/netlink_k.h25
-rw-r--r--drivers/staging/gdm72xx/Kconfig10
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c2
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c3
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c30
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c11
-rw-r--r--drivers/staging/goldfish/goldfish_nand_reg.h35
-rw-r--r--drivers/staging/iio/Documentation/device.txt4
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c17
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c16
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c16
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c16
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c14
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c16
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c17
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c15
-rw-r--r--drivers/staging/iio/adc/ad7192.c2
-rw-r--r--drivers/staging/iio/adc/ad7280a.c2
-rw-r--r--drivers/staging/iio/adc/ad7291.c218
-rw-r--r--drivers/staging/iio/adc/ad7291.h12
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c12
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c2
-rw-r--r--drivers/staging/iio/adc/ad7816.c8
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c4
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c1
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c71
-rw-r--r--drivers/staging/iio/adc/spear_adc.c33
-rw-r--r--drivers/staging/iio/addac/adt7316.c27
-rw-r--r--drivers/staging/iio/cdc/ad7150.c36
-rw-r--r--drivers/staging/iio/cdc/ad7152.c16
-rw-r--r--drivers/staging/iio/cdc/ad7746.c18
-rw-r--r--drivers/staging/iio/gyro/Kconfig19
-rw-r--r--drivers/staging/iio/gyro/Makefile6
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c17
-rw-r--r--drivers/staging/iio/gyro/adis16130_core.c178
-rw-r--r--drivers/staging/iio/gyro/adis16260.h98
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c427
-rw-r--r--drivers/staging/iio/gyro/adis16260_platform_data.h19
-rw-r--r--drivers/staging/iio/light/isl29018.c17
-rw-r--r--drivers/staging/iio/light/isl29028.c13
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c71
-rw-r--r--drivers/staging/iio/meter/ade7753.c18
-rw-r--r--drivers/staging/iio/meter/ade7754.c19
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c18
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c2
-rw-r--r--drivers/staging/iio/meter/ade7759.c18
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c2
-rw-r--r--drivers/staging/iio/meter/ade7854.c19
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c18
-rw-r--r--drivers/staging/iio/trigger/Kconfig17
-rw-r--r--drivers/staging/iio/trigger/Makefile2
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c167
-rw-r--r--drivers/staging/iio/trigger/iio-trig-sysfs.c227
-rw-r--r--drivers/staging/imx-drm/Kconfig9
-rw-r--r--drivers/staging/imx-drm/Makefile1
-rw-r--r--drivers/staging/imx-drm/TODO2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c42
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c626
-rw-r--r--drivers/staging/imx-drm/imx-tve.c43
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c142
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dc.c5
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-di.c17
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c22
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dp.c13
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-prv.h4
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c16
-rw-r--r--drivers/staging/imx-drm/parallel-display.c12
-rw-r--r--drivers/staging/keucr/init.c116
-rw-r--r--drivers/staging/keucr/scsiglue.c28
-rw-r--r--drivers/staging/keucr/smil.h28
-rw-r--r--drivers/staging/keucr/smilmain.c1937
-rw-r--r--drivers/staging/keucr/smilsub.c52
-rw-r--r--drivers/staging/keucr/smscsi.c38
-rw-r--r--drivers/staging/keucr/transport.c49
-rw-r--r--drivers/staging/keucr/transport.h3
-rw-r--r--drivers/staging/keucr/usb.c67
-rw-r--r--drivers/staging/keucr/usb.h119
-rw-r--r--drivers/staging/line6/driver.c9
-rw-r--r--drivers/staging/line6/driver.h3
-rw-r--r--drivers/staging/line6/pcm.c55
-rw-r--r--drivers/staging/line6/pod.c21
-rw-r--r--drivers/staging/line6/toneport.c10
-rw-r--r--drivers/staging/lustre/Kconfig3
-rw-r--r--drivers/staging/lustre/Makefile4
-rw-r--r--drivers/staging/lustre/TODO13
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/bitmap.h111
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h108
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h188
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h214
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h201
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h282
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h170
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h851
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_heap.h200
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h222
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h117
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h97
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h572
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h137
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_time.h132
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h110
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/kp30.h241
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h122
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-bitops.h38
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h166
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-crypto.h49
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h92
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h204
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h82
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h83
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-tcpip.h72
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h274
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-types.h36
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/portals_compat25.h99
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/lucache.h162
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/params_tree.h164
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api-support.h44
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h220
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h874
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h765
-rw-r--r--drivers/staging/lustre/include/linux/lnet/linux/api-support.h43
-rw-r--r--drivers/staging/lustre/include/linux/lnet/linux/lib-lnet.h72
-rw-r--r--drivers/staging/lustre/include/linux/lnet/linux/lib-types.h45
-rw-r--r--drivers/staging/lustre/include/linux/lnet/linux/lnet.h56
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnet-sysctl.h51
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnet.h51
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetctl.h80
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h491
-rw-r--r--drivers/staging/lustre/include/linux/lnet/ptllnd.h94
-rw-r--r--drivers/staging/lustre/include/linux/lnet/ptllnd_wire.h124
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h103
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h503
-rw-r--r--drivers/staging/lustre/lnet/Kconfig40
-rw-r--r--drivers/staging/lustre/lnet/Makefile1
-rw-r--r--drivers/staging/lustre/lnet/klnds/Makefile1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile5
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c3261
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h1056
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c3529
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c493
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/Makefile7
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c2904
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h602
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c2654
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c1085
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h88
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c198
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c797
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile8
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c527
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c1944
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c1264
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c445
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c451
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c297
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c2441
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c647
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c938
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c120
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c155
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c337
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c1694
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c950
-rw-r--r--drivers/staging/lustre/lnet/selftest/Makefile6
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c499
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c931
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c1397
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h146
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c2071
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h232
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c1814
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c171
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c229
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c1668
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h302
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h611
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c253
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.h53
-rw-r--r--drivers/staging/lustre/lustre/Kconfig60
-rw-r--r--drivers/staging/lustre/lustre/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/fid/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h56
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c95
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c570
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c212
-rw-r--r--drivers/staging/lustre/lustre/fld/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c547
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h194
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c531
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c166
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h3279
-rw-r--r--drivers/staging/lustre/lustre/include/dt_object.h1498
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h124
-rw-r--r--drivers/staging/lustre/lustre/include/ioctl.h106
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h437
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lprocfs_status.h57
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_acl.h66
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_common.h22
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h246
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_debug.h47
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_dlm.h46
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_fsfilt.h171
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_handles.h52
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_intent.h62
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lib.h85
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lite.h98
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_log.h57
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_net.h49
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h83
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_quota.h46
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_user.h70
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lvfs.h134
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lvfs_linux.h66
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h125
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd_class.h58
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd_support.h63
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h1004
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h1359
-rw-r--r--drivers/staging/lustre/lustre/include/lu_ref.h182
-rw-r--r--drivers/staging/lustre/lustre/include/lu_target.h91
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/libiam.h145
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/liblustreapi.h43
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h121
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_errno.h215
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h3723
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_lfsck_user.h95
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h1157
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustreapi.h310
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_acl.h42
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_capa.h305
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h291
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h76
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h545
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h1481
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h460
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_eacl.h95
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h389
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h773
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h161
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fsfilt.h48
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h67
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h93
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_idmap.h104
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h369
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h664
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h57
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lite.h147
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h568
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h174
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h81
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h3488
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h121
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_quota.h239
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h334
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h1145
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_update.h189
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ver.h24
-rw-r--r--drivers/staging/lustre/lustre/include/lvfs.h57
-rw-r--r--drivers/staging/lustre/lustre/include/md_object.h903
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h1550
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cache.h39
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h176
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h2191
-rw-r--r--drivers/staging/lustre/lustre/include/obd_lov.h116
-rw-r--r--drivers/staging/lustre/lustre/include/obd_ost.h96
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h852
-rw-r--r--drivers/staging/lustre/lustre/lclient/glimpse.c269
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c1312
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_misc.c192
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c744
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c76
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c240
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c841
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c75
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h309
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c851
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c2363
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c1218
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c72
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c1425
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c2298
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c1434
-rw-r--r--drivers/staging/lustre/lustre/libcfs/Makefile21
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c469
-rw-r--r--drivers/staging/lustre/lustre/libcfs/fail.c137
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c2113
-rw-r--r--drivers/staging/lustre/lustre/libcfs/heap.c475
-rw-r--r--drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c343
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c201
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_lock.c189
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_mem.c202
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_string.c598
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c1045
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c144
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c291
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c322
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c203
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-module.c182
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c258
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c578
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c658
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c275
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.h48
-rw-r--r--drivers/staging/lustre/lustre/libcfs/lwt.c266
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c496
-rw-r--r--drivers/staging/lustre/lustre/libcfs/nidstrings.c865
-rw-r--r--drivers/staging/lustre/lustre/libcfs/prng.c139
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c1192
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.h340
-rw-r--r--drivers/staging/lustre/lustre/libcfs/upcall_cache.c452
-rw-r--r--drivers/staging/lustre/lustre/libcfs/workitem.c476
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile13
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c659
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c1955
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c3152
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_capa.c653
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c397
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h1582
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c2374
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c498
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c327
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_rmtacl.c301
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c864
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c1370
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c1262
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c330
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c1298
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c581
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c1692
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c225
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c188
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c545
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h62
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c1175
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c84
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c186
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c552
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c582
-rw-r--r--drivers/staging/lustre/lustre/lmv/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c85
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c322
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h159
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c2867
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c234
-rw-r--r--drivers/staging/lustre/lustre/lov/Makefile9
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h823
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c526
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c348
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h323
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c968
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c1218
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_log.c272
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c216
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c2876
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c972
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c266
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c678
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c228
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c663
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c1518
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c205
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_io.c55
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c466
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c164
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c71
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c301
-rw-r--r--drivers/staging/lustre/lustre/lvfs/Makefile6
-rw-r--r--drivers/staging/lustre/lustre/lvfs/fsfilt.c137
-rw-r--r--drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c760
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_lib.c173
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_linux.c290
-rw-r--r--drivers/staging/lustre/lustre/mdc/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c217
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h180
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c565
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c1230
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c483
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c2687
-rw-r--r--drivers/staging/lustre/lustre/mgc/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/mgc/libmgc.c161
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c83
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h73
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c1825
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile13
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c539
-rw-r--r--drivers/staging/lustre/lustre/obdclass/capa.c418
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h121
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c1669
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c2232
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c1137
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c1553
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c686
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c124
-rw-r--r--drivers/staging/lustre/lustre/obdclass/dt_object.c1049
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c1826
-rw-r--r--drivers/staging/lustre/lustre/obdclass/idmap.c477
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linkea.c194
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c406
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c222
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c443
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c934
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c815
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h98
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_ioctl.c418
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_lvfs.c847
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c314
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_osd.c1290
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c413
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_test.c1068
-rw-r--r--drivers/staging/lustre/lustre/obdclass/local_storage.c891
-rw-r--r--drivers/staging/lustre/lustre/obdclass/local_storage.h88
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c1986
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c2187
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c50
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ucred.c107
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c257
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c217
-rw-r--r--drivers/staging/lustre/lustre/obdclass/md_attrs.c199
-rw-r--r--drivers/staging/lustre/lustre/obdclass/mea.c112
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c1882
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c1315
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c362
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c75
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c82
-rw-r--r--drivers/staging/lustre/lustre/obdecho/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo.c670
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c3171
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_internal.h47
-rw-r--r--drivers/staging/lustre/lustre/obdecho/lproc_echo.c57
-rw-r--r--drivers/staging/lustre/lustre/osc/Makefile7
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c727
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c2875
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h677
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c260
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h208
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c828
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c1615
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c274
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c921
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c325
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c3662
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile24
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c3018
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c240
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/errno.c380
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c583
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/Makefile8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_api.h179
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h84
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c506
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c446
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h193
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c285
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_internal.h526
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c1409
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5.h163
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c1786
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c359
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c1233
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_rawobj.c242
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c1093
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c221
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c2887
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c1594
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c2396
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c345
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c74
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_server.c450
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c1342
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c724
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c1759
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_crr.c40
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c270
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c2567
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c75
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c747
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h302
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c155
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c812
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c344
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c2446
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c888
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c1226
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c250
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c199
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c464
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c1001
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c3115
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wirehdr.c47
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c4474
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/davinci_vpfe/Kconfig2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c14
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c14
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c1
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c4
-rw-r--r--drivers/staging/media/go7007/go7007.txt1
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c56
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c7
-rw-r--r--drivers/staging/media/msi3101/Kconfig3
-rw-r--r--drivers/staging/media/msi3101/Makefile1
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c1931
-rw-r--r--drivers/staging/media/solo6x10/Kconfig4
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-tw28.c112
-rw-r--r--drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c38
-rw-r--r--drivers/staging/netlogic/xlr_net.c11
-rw-r--r--drivers/staging/nvec/nvec.c55
-rw-r--r--drivers/staging/nvec/nvec.h25
-rw-r--r--drivers/staging/nvec/nvec_kbd.c10
-rw-r--r--drivers/staging/octeon-usb/Kconfig10
-rw-r--r--drivers/staging/octeon-usb/Makefile3
-rw-r--r--drivers/staging/octeon-usb/TODO11
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.c3158
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.h542
-rw-r--r--drivers/staging/octeon-usb/cvmx-usbcx-defs.h1528
-rw-r--r--drivers/staging/octeon-usb/cvmx-usbnx-defs.h885
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c835
-rw-r--r--drivers/staging/octeon/Kconfig2
-rw-r--r--drivers/staging/octeon/ethernet-mem.c7
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c5
-rw-r--r--drivers/staging/olpc_dcon/Kconfig11
-rw-r--r--drivers/staging/olpc_dcon/TODO11
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c24
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h27
-rw-r--r--drivers/staging/ozwpan/Makefile (renamed from drivers/staging/ozwpan/Kbuild)7
-rw-r--r--drivers/staging/ozwpan/ozappif.h2
-rw-r--r--drivers/staging/ozwpan/ozcdev.c141
-rw-r--r--drivers/staging/ozwpan/ozconfig.h27
-rw-r--r--drivers/staging/ozwpan/ozdbg.h54
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c80
-rw-r--r--drivers/staging/ozwpan/ozevent.c195
-rw-r--r--drivers/staging/ozwpan/ozevent.h32
-rw-r--r--drivers/staging/ozwpan/ozeventdef.h40
-rw-r--r--drivers/staging/ozwpan/ozhcd.c769
-rw-r--r--drivers/staging/ozwpan/ozhcd.h4
-rw-r--r--drivers/staging/ozwpan/ozmain.c27
-rw-r--r--drivers/staging/ozwpan/ozpd.c299
-rw-r--r--drivers/staging/ozwpan/ozpd.h21
-rw-r--r--drivers/staging/ozwpan/ozproto.c543
-rw-r--r--drivers/staging/ozwpan/ozproto.h32
-rw-r--r--drivers/staging/ozwpan/oztrace.c36
-rw-r--r--drivers/staging/ozwpan/oztrace.h35
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.c23
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.h4
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c81
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c69
-rw-r--r--drivers/staging/panel/panel.c25
-rw-r--r--drivers/staging/quickstart/quickstart.c21
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c6
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8187se/r8180.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_93cx6.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c10
-rw-r--r--drivers/staging/rtl8187se/r8180_hw.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c2
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c2
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.h2
-rw-r--r--drivers/staging/rtl8188eu/Kconfig29
-rw-r--r--drivers/staging/rtl8188eu/Makefile70
-rw-r--r--drivers/staging/rtl8188eu/TODO15
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c1988
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_br_ext.c1199
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2364
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c948
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c875
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c1640
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_io.c329
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c1169
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_iol.c209
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c1692
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c2442
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c8481
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c997
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c1508
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_p2p.c2064
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c662
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c2299
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c89
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c1779
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c79
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c655
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c1689
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c2447
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c1761
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188EPwrSeq.c86
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c760
-rw-r--r--drivers/staging/rtl8188eu/hal/HalHWImg8188E_BB.c721
-rw-r--r--drivers/staging/rtl8188eu/hal/HalHWImg8188E_MAC.c231
-rw-r--r--drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c269
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPhyRf.c49
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c1928
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c132
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c381
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c464
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c2171
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c596
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c399
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c130
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_debug.c32
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_interface.c203
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c779
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c268
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c2378
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_mp.c860
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c1144
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c572
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c202
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c80
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c91
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c111
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c138
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c706
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c2346
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_ops_linux.c726
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h28
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h276
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h1094
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h176
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h75
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EReg.h46
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_BB.h44
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h34
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_MAC.h30
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_RF.h30
-rw-r--r--drivers/staging/rtl8188eu/include/HalPhyRf.h30
-rw-r--r--drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h63
-rw-r--r--drivers/staging/rtl8188eu/include/HalPwrSeqCmd.h128
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h167
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h184
-rw-r--r--drivers/staging/rtl8188eu/include/cmd_osdep.h32
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h334
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types_linux.h24
-rw-r--r--drivers/staging/rtl8188eu/include/ethernet.h42
-rw-r--r--drivers/staging/rtl8188eu/include/h2clbk.h35
-rw-r--r--drivers/staging/rtl8188eu/include/hal_com.h173
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h426
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h1274
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211_ext.h290
-rw-r--r--drivers/staging/rtl8188eu/include/if_ether.h111
-rw-r--r--drivers/staging/rtl8188eu/include/ioctl_cfg80211.h107
-rw-r--r--drivers/staging/rtl8188eu/include/ip.h126
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h35
-rw-r--r--drivers/staging/rtl8188eu/include/mp_custom_oid.h352
-rw-r--r--drivers/staging/rtl8188eu/include/nic_spec.h44
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h1198
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h132
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RTL8188E.h56
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h43
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11AC.h54
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h171
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h145
-rw-r--r--drivers/staging/rtl8188eu/include/odm_interface.h164
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h104
-rw-r--r--drivers/staging/rtl8188eu/include/odm_reg.h119
-rw-r--r--drivers/staging/rtl8188eu/include/odm_types.h62
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h83
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h547
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h56
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h122
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h62
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h487
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h35
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h69
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_rf.h36
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h1439
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_sreset.h31
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h178
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h64
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h65
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_br_ext.h66
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h991
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h290
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h130
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h150
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h115
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h44
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_io.h387
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h124
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h79
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h50
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h84
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h197
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h655
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h878
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp.h495
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h340
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h1084
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_p2p.h135
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h283
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h30
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h485
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h146
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h383
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h50
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_version.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h384
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h384
-rw-r--r--drivers/staging/rtl8188eu/include/usb_hal.h26
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops.h115
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h55
-rw-r--r--drivers/staging/rtl8188eu/include/usb_osintf.h45
-rw-r--r--drivers/staging/rtl8188eu/include/usb_vendor_req.h52
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h1127
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h347
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h67
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c8222
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c246
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c1251
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c815
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c261
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c293
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c893
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c288
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c290
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c51
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c7
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c7
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c3
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c8
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c7
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/rtl8192u/authors2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c14
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.h10
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h96
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c14
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c39
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c80
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c28
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c106
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c82
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h10
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c30
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c46
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.h2
-rw-r--r--drivers/staging/rtl8192u/r8180_pm.c2
-rw-r--r--drivers/staging/rtl8192u/r8180_pm.h2
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c22
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.h12
-rw-r--r--drivers/staging/rtl8192u/r8192U.h808
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c3596
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c144
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h11
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c38
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h2
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c623
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h6
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c54
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c2390
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.h88
-rw-r--r--drivers/staging/rtl8712/os_intfs.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c8
-rw-r--r--drivers/staging/rts5139/rts51x_transport.c8
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.c34
-rw-r--r--drivers/staging/sb105x/sb_pci_mp.h1
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c322
-rw-r--r--drivers/staging/silicom/Kconfig7
-rw-r--r--drivers/staging/silicom/bp_mod.h2
-rw-r--r--drivers/staging/silicom/bpctl_mod.c1065
-rw-r--r--drivers/staging/silicom/bypasslib/bp_ioctl.h64
-rw-r--r--drivers/staging/silicom/bypasslib/bplibk.h12
-rw-r--r--drivers/staging/silicom/bypasslib/bypass.c89
-rw-r--r--drivers/staging/slicoss/slicoss.c18
-rw-r--r--drivers/staging/speakup/Kconfig32
-rw-r--r--drivers/staging/speakup/devsynth.c14
-rw-r--r--drivers/staging/speakup/i18n.c12
-rw-r--r--drivers/staging/speakup/kobjects.c48
-rw-r--r--drivers/staging/speakup/main.c79
-rw-r--r--drivers/staging/speakup/serialio.c4
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c22
-rw-r--r--drivers/staging/speakup/speakup_apollo.c20
-rw-r--r--drivers/staging/speakup/speakup_decext.c20
-rw-r--r--drivers/staging/speakup/speakup_decpc.c22
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c20
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c22
-rw-r--r--drivers/staging/speakup/speakup_keypc.c22
-rw-r--r--drivers/staging/speakup/speakup_soft.c34
-rw-r--r--drivers/staging/speakup/spk_priv.h13
-rw-r--r--drivers/staging/speakup/synth.c38
-rw-r--r--drivers/staging/speakup/thread.c4
-rw-r--r--drivers/staging/speakup/varhandlers.c19
-rw-r--r--drivers/staging/ti-soc-thermal/Kconfig48
-rw-r--r--drivers/staging/ti-soc-thermal/Makefile5
-rw-r--r--drivers/staging/ti-soc-thermal/TODO12
-rw-r--r--drivers/staging/ti-soc-thermal/omap4-thermal-data.c267
-rw-r--r--drivers/staging/ti-soc-thermal/omap4xxx-bandgap.h175
-rw-r--r--drivers/staging/ti-soc-thermal/omap5-thermal-data.c359
-rw-r--r--drivers/staging/ti-soc-thermal/omap5xxx-bandgap.h200
-rw-r--r--drivers/staging/ti-soc-thermal/ti-bandgap.c1546
-rw-r--r--drivers/staging/ti-soc-thermal/ti-bandgap.h403
-rw-r--r--drivers/staging/ti-soc-thermal/ti-thermal-common.c367
-rw-r--r--drivers/staging/ti-soc-thermal/ti-thermal.h117
-rw-r--r--drivers/staging/ti-soc-thermal/ti_soc_thermal.txt60
-rw-r--r--drivers/staging/tidspbridge/Kconfig3
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h6
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap_pwr.h10
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c10
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c2
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c4
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmm.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h2
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c7
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c4
-rw-r--r--drivers/staging/usbip/stub_dev.c6
-rw-r--r--drivers/staging/usbip/usbip_common.c11
-rw-r--r--drivers/staging/usbip/usbip_event.c2
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.c2
-rw-r--r--drivers/staging/usbip/userspace/src/usbip.c15
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_attach.c6
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_list.c6
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.c30
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.h5
-rw-r--r--drivers/staging/usbip/userspace/src/usbipd.c141
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c4
-rw-r--r--drivers/staging/vme/devices/vme_user.c6
-rw-r--r--drivers/staging/vme/devices/vme_user.h12
-rw-r--r--drivers/staging/vt6655/80211hdr.h2
-rw-r--r--drivers/staging/vt6655/80211mgr.c72
-rw-r--r--drivers/staging/vt6655/80211mgr.h107
-rw-r--r--drivers/staging/vt6655/aes_ccmp.c37
-rw-r--r--drivers/staging/vt6655/aes_ccmp.h2
-rw-r--r--drivers/staging/vt6655/device_main.c4
-rw-r--r--drivers/staging/vt6655/hostap.c8
-rw-r--r--drivers/staging/vt6655/ioctl.c11
-rw-r--r--drivers/staging/vt6655/wpactl.c2
-rw-r--r--drivers/staging/vt6656/baseband.c80
-rw-r--r--drivers/staging/vt6656/baseband.h10
-rw-r--r--drivers/staging/vt6656/card.c82
-rw-r--r--drivers/staging/vt6656/desc.h224
-rw-r--r--drivers/staging/vt6656/device.h51
-rw-r--r--drivers/staging/vt6656/device_cfg.h14
-rw-r--r--drivers/staging/vt6656/dpc.c16
-rw-r--r--drivers/staging/vt6656/dpc.h4
-rw-r--r--drivers/staging/vt6656/iwctl.c3
-rw-r--r--drivers/staging/vt6656/mac.c51
-rw-r--r--drivers/staging/vt6656/main_usb.c26
-rw-r--r--drivers/staging/vt6656/rf.c657
-rw-r--r--drivers/staging/vt6656/rxtx.c1538
-rw-r--r--drivers/staging/vt6656/rxtx.h761
-rw-r--r--drivers/staging/vt6656/tether.h10
-rw-r--r--drivers/staging/vt6656/tmacro.h6
-rw-r--r--drivers/staging/vt6656/usbpipe.c12
-rw-r--r--drivers/staging/vt6656/usbpipe.h5
-rw-r--r--drivers/staging/vt6656/wmgr.c2
-rw-r--r--drivers/staging/winbond/mds.c56
-rw-r--r--drivers/staging/winbond/mds_f.h13
-rw-r--r--drivers/staging/winbond/phy_calibration.c2
-rw-r--r--drivers/staging/winbond/phy_calibration.h1
-rw-r--r--drivers/staging/winbond/reg.c25
-rw-r--r--drivers/staging/winbond/wb35reg.c299
-rw-r--r--drivers/staging/winbond/wb35rx.c3
-rw-r--r--drivers/staging/wlags49_h2/Makefile2
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c20
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h1
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_sysfs.c138
-rw-r--r--drivers/staging/wlags49_h2/wl_sysfs.h7
-rw-r--r--drivers/staging/wlags49_h25/Makefile3
-rw-r--r--drivers/staging/wlags49_h25/wl_sysfs.c2
-rw-r--r--drivers/staging/wlags49_h25/wl_sysfs.h2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c3
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c12
-rw-r--r--drivers/staging/xgifb/vb_def.h5
-rw-r--r--drivers/staging/xgifb/vb_init.c128
-rw-r--r--drivers/staging/xgifb/vb_setmode.c764
-rw-r--r--drivers/staging/xgifb/vb_setmode.h12
-rw-r--r--drivers/staging/xillybus/Kconfig32
-rw-r--r--drivers/staging/xillybus/Makefile7
-rw-r--r--drivers/staging/xillybus/README403
-rw-r--r--drivers/staging/xillybus/TODO5
-rw-r--r--drivers/staging/xillybus/xillybus.h182
-rw-r--r--drivers/staging/xillybus/xillybus_core.c2345
-rw-r--r--drivers/staging/xillybus/xillybus_of.c212
-rw-r--r--drivers/staging/xillybus/xillybus_pcie.c262
-rw-r--r--drivers/staging/zcache/Kconfig59
-rw-r--r--drivers/staging/zcache/Makefile8
-rw-r--r--drivers/staging/zcache/TODO64
-rw-r--r--drivers/staging/zcache/debug.c107
-rw-r--r--drivers/staging/zcache/debug.h305
-rw-r--r--drivers/staging/zcache/ramster.h59
-rw-r--r--drivers/staging/zcache/ramster/debug.c68
-rw-r--r--drivers/staging/zcache/ramster/debug.h145
-rw-r--r--drivers/staging/zcache/ramster/heartbeat.c462
-rw-r--r--drivers/staging/zcache/ramster/heartbeat.h87
-rw-r--r--drivers/staging/zcache/ramster/masklog.c155
-rw-r--r--drivers/staging/zcache/ramster/masklog.h220
-rw-r--r--drivers/staging/zcache/ramster/nodemanager.c996
-rw-r--r--drivers/staging/zcache/ramster/nodemanager.h88
-rw-r--r--drivers/staging/zcache/ramster/r2net.c414
-rw-r--r--drivers/staging/zcache/ramster/ramster-howto.txt366
-rw-r--r--drivers/staging/zcache/ramster/ramster.c925
-rw-r--r--drivers/staging/zcache/ramster/ramster.h161
-rw-r--r--drivers/staging/zcache/ramster/ramster_nodemanager.h41
-rw-r--r--drivers/staging/zcache/ramster/tcp.c2248
-rw-r--r--drivers/staging/zcache/ramster/tcp.h159
-rw-r--r--drivers/staging/zcache/ramster/tcp_internal.h248
-rw-r--r--drivers/staging/zcache/tmem.c898
-rw-r--r--drivers/staging/zcache/tmem.h259
-rw-r--r--drivers/staging/zcache/zbud.c1066
-rw-r--r--drivers/staging/zcache/zbud.h33
-rw-r--r--drivers/staging/zcache/zcache-main.c1941
-rw-r--r--drivers/staging/zcache/zcache.h53
-rw-r--r--drivers/staging/zram/Makefile2
-rw-r--r--drivers/staging/zram/zram_drv.c637
-rw-r--r--drivers/staging/zram/zram_drv.h46
-rw-r--r--drivers/staging/zram/zram_sysfs.c227
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c11
-rw-r--r--drivers/staging/zsmalloc/zsmalloc.h2
1426 files changed, 405415 insertions, 160026 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index aefe820..3626dbc8 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -40,8 +40,6 @@ source "drivers/staging/comedi/Kconfig"
source "drivers/staging/olpc_dcon/Kconfig"
-source "drivers/staging/asus_oled/Kconfig"
-
source "drivers/staging/panel/Kconfig"
source "drivers/staging/rtl8187se/Kconfig"
@@ -52,6 +50,8 @@ source "drivers/staging/rtl8192e/Kconfig"
source "drivers/staging/rtl8712/Kconfig"
+source "drivers/staging/rtl8188eu/Kconfig"
+
source "drivers/staging/rts5139/Kconfig"
source "drivers/staging/frontier/Kconfig"
@@ -62,6 +62,8 @@ source "drivers/staging/line6/Kconfig"
source "drivers/staging/octeon/Kconfig"
+source "drivers/staging/octeon-usb/Kconfig"
+
source "drivers/staging/serqt_usb2/Kconfig"
source "drivers/staging/vt6655/Kconfig"
@@ -116,9 +118,7 @@ source "drivers/staging/ozwpan/Kconfig"
source "drivers/staging/gdm72xx/Kconfig"
-source "drivers/staging/csr/Kconfig"
-
-source "drivers/staging/ti-soc-thermal/Kconfig"
+source "drivers/staging/gdm724x/Kconfig"
source "drivers/staging/silicom/Kconfig"
@@ -132,12 +132,20 @@ source "drivers/staging/sb105x/Kconfig"
source "drivers/staging/fwserial/Kconfig"
-source "drivers/staging/zcache/Kconfig"
-
source "drivers/staging/goldfish/Kconfig"
source "drivers/staging/netlogic/Kconfig"
source "drivers/staging/dwc2/Kconfig"
+source "drivers/staging/lustre/Kconfig"
+
+source "drivers/staging/btmtk_usb/Kconfig"
+
+source "drivers/staging/xillybus/Kconfig"
+
+source "drivers/staging/dgnc/Kconfig"
+
+source "drivers/staging/dgap/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 415772e..d1b4b80 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -12,12 +12,12 @@ obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
-obj-$(CONFIG_ASUS_OLED) += asus_oled/
obj-$(CONFIG_PANEL) += panel/
obj-$(CONFIG_R8187SE) += rtl8187se/
obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
+obj-$(CONFIG_R8188EU) += rtl8188eu/
obj-$(CONFIG_RTS5139) += rts5139/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_IDE_PHISON) += phison/
@@ -25,6 +25,7 @@ obj-$(CONFIG_LINE6_USB) += line6/
obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
obj-$(CONFIG_USB_SERIAL_QUATECH2) += serqt_usb2/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
+obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme/
@@ -51,14 +52,17 @@ obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
-obj-$(CONFIG_CSR_WIFI) += csr/
-obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
+obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_NET_VENDOR_SILICOM) += silicom/
obj-$(CONFIG_CED1401) += ced1401/
obj-$(CONFIG_DRM_IMX) += imx-drm/
obj-$(CONFIG_DGRP) += dgrp/
obj-$(CONFIG_SB105X) += sb105x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
-obj-$(CONFIG_ZCACHE) += zcache/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_USB_DWC2) += dwc2/
+obj-$(CONFIG_LUSTRE_FS) += lustre/
+obj-$(CONFIG_USB_BTMTK) += btmtk_usb/
+obj-$(CONFIG_XILLYBUS) += xillybus/
+obj-$(CONFIG_DGNC) += dgnc/
+obj-$(CONFIG_DGAP) += dgap/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index e681bdd..8e76ddc 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -341,27 +341,26 @@ out:
/*
* ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
*
- * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
- * many objects (pages) we have in total.
+ * 'nr_to_scan' is the number of objects to scan for freeing.
*
* 'gfp_mask' is the mask of the allocation that got us into this mess.
*
- * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * Return value is the number of objects freed or -1 if we cannot
* proceed without risk of deadlock (due to gfp_mask).
*
* We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
-static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long
+ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct ashmem_range *range, *next;
+ unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
- if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
- return -1;
- if (!sc->nr_to_scan)
- return lru_count;
+ if (!(sc->gfp_mask & __GFP_FS))
+ return SHRINK_STOP;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
@@ -374,17 +373,32 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
- sc->nr_to_scan -= range_size(range);
- if (sc->nr_to_scan <= 0)
+ freed += range_size(range);
+ if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
+ return freed;
+}
+static unsigned long
+ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ /*
+ * note that lru_count is count of pages on the lru, not a count of
+ * objects on the list. This means the scan function needs to return the
+ * number of pages freed, not the number of objects scanned.
+ */
return lru_count;
}
static struct shrinker ashmem_shrinker = {
- .shrink = ashmem_shrink,
+ .count_objects = ashmem_shrink_count,
+ .scan_objects = ashmem_shrink_scan,
+ /*
+ * XXX (dchinner): I wish people would comment on why they need on
+ * significant changes to the default value here
+ */
.seeks = DEFAULT_SEEKS * 4,
};
@@ -690,11 +704,11 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (capable(CAP_SYS_ADMIN)) {
struct shrink_control sc = {
.gfp_mask = GFP_KERNEL,
- .nr_to_scan = 0,
+ .nr_to_scan = LONG_MAX,
};
- ret = ashmem_shrink(&ashmem_shrinker, &sc);
- sc.nr_to_scan = ret;
- ashmem_shrink(&ashmem_shrinker, &sc);
+
+ nodes_setall(sc.nodes_to_scan);
+ ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
break;
}
@@ -704,7 +718,8 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
-static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
switch (cmd) {
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 1567ac2..98ac020 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -20,6 +20,7 @@
#include <asm/cacheflush.h>
#include <linux/fdtable.h>
#include <linux/file.h>
+#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
@@ -790,7 +791,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc,
list_del(&buffer->entry);
if (free_page_start || free_page_end) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with with %p or %p\n",
+ "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
proc->pid, buffer, free_page_start ? "" : " end",
free_page_end ? "" : " start", prev, next);
binder_update_page_range(proc, 0, free_page_start ?
@@ -1247,7 +1248,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
struct flat_binder_object *fp;
if (*offp > buffer->data_size - sizeof(*fp) ||
buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(void *))) {
+ !IS_ALIGNED(*offp, sizeof(u32))) {
pr_err("transaction release %d bad offset %zd, size %zd\n",
debug_id, *offp, buffer->data_size);
continue;
@@ -1271,7 +1272,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
- pr_err("transaction release %d bad handle %ld\n",
+ pr_err("transaction release %d bad handle %d\n",
debug_id, fp->handle);
break;
}
@@ -1283,13 +1284,13 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_FD:
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %ld\n", fp->handle);
+ " fd %d\n", fp->handle);
if (failed_at)
task_close_fd(proc, fp->handle);
break;
default:
- pr_err("transaction release %d bad object type %lx\n",
+ pr_err("transaction release %d bad object type %x\n",
debug_id, fp->type);
break;
}
@@ -1496,7 +1497,7 @@ static void binder_transaction(struct binder_proc *proc,
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(void *))) {
+ !IS_ALIGNED(*offp, sizeof(u32))) {
binder_user_error("%d:%d got transaction with invalid offset, %zd\n",
proc->pid, thread->pid, *offp);
return_error = BR_FAILED_REPLY;
@@ -1547,7 +1548,7 @@ static void binder_transaction(struct binder_proc *proc,
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
- binder_user_error("%d:%d got transaction with invalid handle, %ld\n",
+ binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
@@ -1590,13 +1591,13 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
- binder_user_error("%d:%d got reply with fd, %ld, but target does not allow fds\n",
+ binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
- binder_user_error("%d:%d got transaction with fd, %ld, but target does not allow fds\n",
+ binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
@@ -1604,7 +1605,7 @@ static void binder_transaction(struct binder_proc *proc,
file = fget(fp->handle);
if (file == NULL) {
- binder_user_error("%d:%d got transaction with invalid fd, %ld\n",
+ binder_user_error("%d:%d got transaction with invalid fd, %d\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
@@ -1618,13 +1619,13 @@ static void binder_transaction(struct binder_proc *proc,
task_fd_install(target_proc, target_fd, file);
trace_binder_transaction_fd(t, fp->handle, target_fd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %ld -> %d\n", fp->handle, target_fd);
+ " fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
- binder_user_error("%d:%d got transaction with invalid object type, %lx\n",
+ binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
@@ -1700,7 +1701,7 @@ err_no_context_mgr_node:
}
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
- void __user *buffer, int size, signed long *consumed)
+ void __user *buffer, size_t size, size_t *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
@@ -2080,8 +2081,8 @@ static int binder_has_thread_work(struct binder_thread *thread)
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
- void __user *buffer, int size,
- signed long *consumed, int non_block)
+ void __user *buffer, size_t size,
+ size_t *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -2140,13 +2141,13 @@ retry:
if (!binder_has_proc_work(proc, thread))
ret = -EAGAIN;
} else
- ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+ ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
} else {
if (non_block) {
if (!binder_has_thread_work(thread))
ret = -EAGAIN;
} else
- ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+ ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
binder_lock(__func__);
@@ -2578,7 +2579,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d write %ld at %08lx, read %ld at %08lx\n",
+ "%d:%d write %zd at %016lx, read %zd at %016lx\n",
proc->pid, thread->pid, bwr.write_size,
bwr.write_buffer, bwr.read_size, bwr.read_buffer);
@@ -2604,7 +2605,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d wrote %ld of %ld, read return %ld of %ld\n",
+ "%d:%d wrote %zd of %zd, read return %zd of %zd\n",
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
bwr.read_consumed, bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
index dbe81ce..cbe3451 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/staging/android/binder.h
@@ -48,13 +48,13 @@ enum {
*/
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
- unsigned long type;
- unsigned long flags;
+ __u32 type;
+ __u32 flags;
/* 8 bytes of data. */
union {
void __user *binder; /* local object */
- signed long handle; /* remote object */
+ __u32 handle; /* remote object */
};
/* extra data associated with local object */
@@ -67,18 +67,18 @@ struct flat_binder_object {
*/
struct binder_write_read {
- signed long write_size; /* bytes to write */
- signed long write_consumed; /* bytes consumed by driver */
+ size_t write_size; /* bytes to write */
+ size_t write_consumed; /* bytes consumed by driver */
unsigned long write_buffer;
- signed long read_size; /* bytes to read */
- signed long read_consumed; /* bytes consumed by driver */
+ size_t read_size; /* bytes to read */
+ size_t read_consumed; /* bytes consumed by driver */
unsigned long read_buffer;
};
/* Use with BINDER_VERSION, driver fills in fields. */
struct binder_version {
/* driver protocol version -- increment with incompatible change */
- signed long protocol_version;
+ __s32 protocol_version;
};
/* This is the current protocol version. */
@@ -86,7 +86,7 @@ struct binder_version {
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
@@ -119,14 +119,14 @@ struct binder_transaction_data {
* identifying the target and contents of the transaction.
*/
union {
- size_t handle; /* target descriptor of command transaction */
+ __u32 handle; /* target descriptor of command transaction */
void *ptr; /* target descriptor of return transaction */
} target;
void *cookie; /* target object cookie */
- unsigned int code; /* transaction command */
+ __u32 code; /* transaction command */
/* General information about the transaction. */
- unsigned int flags;
+ __u32 flags;
pid_t sender_pid;
uid_t sender_euid;
size_t data_size; /* number of bytes of data */
@@ -143,7 +143,7 @@ struct binder_transaction_data {
/* offsets from buffer to flat_binder_object structs */
const void __user *offsets;
} ptr;
- uint8_t buf[8];
+ __u8 buf[8];
} data;
};
@@ -153,18 +153,18 @@ struct binder_ptr_cookie {
};
struct binder_pri_desc {
- int priority;
- int desc;
+ __s32 priority;
+ __u32 desc;
};
struct binder_pri_ptr_cookie {
- int priority;
+ __s32 priority;
void *ptr;
void *cookie;
};
enum binder_driver_return_protocol {
- BR_ERROR = _IOR('r', 0, int),
+ BR_ERROR = _IOR('r', 0, __s32),
/*
* int: error code
*/
@@ -178,7 +178,7 @@ enum binder_driver_return_protocol {
* binder_transaction_data: the received command.
*/
- BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
/*
* not currently supported
* int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
@@ -258,22 +258,22 @@ enum binder_driver_command_protocol {
* binder_transaction_data: the sent command.
*/
- BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
/*
* not currently supported
* int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
* Else you have acquired a primary reference on the object.
*/
- BC_FREE_BUFFER = _IOW('c', 3, int),
+ BC_FREE_BUFFER = _IOW('c', 3, void *),
/*
* void *: ptr to transaction data received on a read
*/
- BC_INCREFS = _IOW('c', 4, int),
- BC_ACQUIRE = _IOW('c', 5, int),
- BC_RELEASE = _IOW('c', 6, int),
- BC_DECREFS = _IOW('c', 7, int),
+ BC_INCREFS = _IOW('c', 4, __u32),
+ BC_ACQUIRE = _IOW('c', 5, __u32),
+ BC_RELEASE = _IOW('c', 6, __u32),
+ BC_DECREFS = _IOW('c', 7, __u32),
/*
* int: descriptor
*/
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 9bd8747..d42f5785 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -469,7 +469,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t ppos)
{
struct logger_log *log = file_get_log(iocb->ki_filp);
- size_t orig = log->w_off;
+ size_t orig;
struct logger_entry header;
struct timespec now;
ssize_t ret = 0;
@@ -481,7 +481,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
header.sec = now.tv_sec;
header.nsec = now.tv_nsec;
header.euid = current_euid();
- header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+ header.len = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
header.hdr_size = sizeof(struct logger_entry);
/* null writes succeed, return zero */
@@ -490,6 +490,8 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
mutex_lock(&log->mutex);
+ orig = log->w_off;
+
/*
* Fix up any readers, pulling them forward to the first readable
* entry after (what will be) the new write offset. We do this now
@@ -696,7 +698,7 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -EBADF;
break;
}
- if (!(in_egroup_p(file->f_dentry->d_inode->i_gid) ||
+ if (!(in_egroup_p(file_inode(file)->i_gid) ||
capable(CAP_SYSLOG))) {
ret = -EPERM;
break;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index fe74494..6f094b3 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -66,11 +66,20 @@ static unsigned long lowmem_deathpending_timeout;
pr_info(x); \
} while (0)
-static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long lowmem_count(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ return global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_FILE);
+}
+
+static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *tsk;
struct task_struct *selected = NULL;
- int rem = 0;
+ unsigned long rem = 0;
int tasksize;
int i;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
@@ -92,19 +101,17 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
break;
}
}
- if (sc->nr_to_scan > 0)
- lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %hd\n",
- sc->nr_to_scan, sc->gfp_mask, other_free,
- other_file, min_score_adj);
- rem = global_page_state(NR_ACTIVE_ANON) +
- global_page_state(NR_ACTIVE_FILE) +
- global_page_state(NR_INACTIVE_ANON) +
- global_page_state(NR_INACTIVE_FILE);
- if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
- lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
- sc->nr_to_scan, sc->gfp_mask, rem);
- return rem;
+
+ lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
+ sc->nr_to_scan, sc->gfp_mask, other_free,
+ other_file, min_score_adj);
+
+ if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
+ lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
+ sc->nr_to_scan, sc->gfp_mask);
+ return 0;
}
+
selected_oom_score_adj = min_score_adj;
rcu_read_lock();
@@ -154,16 +161,18 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
lowmem_deathpending_timeout = jiffies + HZ;
send_sig(SIGKILL, selected, 0);
set_tsk_thread_flag(selected, TIF_MEMDIE);
- rem -= selected_tasksize;
+ rem += selected_tasksize;
}
- lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+
+ lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
sc->nr_to_scan, sc->gfp_mask, rem);
rcu_read_unlock();
return rem;
}
static struct shrinker lowmem_shrinker = {
- .shrink = lowmem_shrink,
+ .scan_objects = lowmem_scan,
+ .count_objects = lowmem_count,
.seeks = DEFAULT_SEEKS * 16
};
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
index 4928f93..f24493a 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/staging/android/sw_sync.c
@@ -160,9 +160,10 @@ static int sw_sync_release(struct inode *inode, struct file *file)
return 0;
}
-static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg)
+static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
+ unsigned long arg)
{
- int fd = get_unused_fd();
+ int fd = get_unused_fd_flags(O_CLOEXEC);
int err;
struct sync_pt *pt;
struct sync_fence *fence;
@@ -218,7 +219,8 @@ static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
return 0;
}
-static long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
struct sw_sync_timeline *obj = file->private_data;
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 3893a35..38e5d3b 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -125,9 +125,9 @@ static void sync_timeline_remove_pt(struct sync_pt *pt)
spin_unlock_irqrestore(&obj->active_list_lock, flags);
spin_lock_irqsave(&obj->child_list_lock, flags);
- if (!list_empty(&pt->child_list)) {
+ if (!list_empty(&pt->child_list))
list_del_init(&pt->child_list);
- }
+
spin_unlock_irqrestore(&obj->child_list_lock, flags);
}
@@ -697,7 +697,7 @@ static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
{
- int fd = get_unused_fd();
+ int fd = get_unused_fd_flags(O_CLOEXEC);
int err;
struct sync_fence *fence2, *fence3;
struct sync_merge_data data;
@@ -876,11 +876,11 @@ static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
seq_printf(s, " / %s", value);
}
} else if (pt->parent->ops->print_pt) {
- seq_printf(s, ": ");
+ seq_puts(s, ": ");
pt->parent->ops->print_pt(s, pt);
}
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
}
static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
@@ -895,11 +895,11 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
obj->ops->timeline_value_str(obj, value, sizeof(value));
seq_printf(s, ": %s", value);
} else if (obj->ops->print_obj) {
- seq_printf(s, ": ");
+ seq_puts(s, ": ");
obj->ops->print_obj(s, obj);
}
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
spin_lock_irqsave(&obj->child_list_lock, flags);
list_for_each(pos, &obj->child_list_head) {
@@ -940,7 +940,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
unsigned long flags;
struct list_head *pos;
- seq_printf(s, "objs:\n--------------\n");
+ seq_puts(s, "objs:\n--------------\n");
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_for_each(pos, &sync_timeline_list_head) {
@@ -949,11 +949,11 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
sync_timeline_list);
sync_print_obj(s, obj);
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
}
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
- seq_printf(s, "fences:\n--------------\n");
+ seq_puts(s, "fences:\n--------------\n");
spin_lock_irqsave(&sync_fence_list_lock, flags);
list_for_each(pos, &sync_fence_list_head) {
@@ -961,7 +961,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
container_of(pos, struct sync_fence, sync_fence_list);
sync_print_fence(s, fence);
- seq_printf(s, "\n");
+ seq_puts(s, "\n");
}
spin_unlock_irqrestore(&sync_fence_list_lock, flags);
return 0;
@@ -988,7 +988,7 @@ late_initcall(sync_debugfs_init);
#define DUMP_CHUNK 256
static char sync_dump_buf[64 * 1024];
-void sync_dump(void)
+static void sync_dump(void)
{
struct seq_file s = {
.buf = sync_dump_buf,
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
index ec9e2ae..2c61783 100644
--- a/drivers/staging/android/timed_output.c
+++ b/drivers/staging/android/timed_output.c
@@ -28,7 +28,7 @@ static struct class *timed_output_class;
static atomic_t device_count;
static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct timed_output_dev *tdev = dev_get_drvdata(dev);
int remaining = tdev->get_time(tdev);
@@ -36,9 +36,8 @@ static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", remaining);
}
-static ssize_t enable_store(
- struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
struct timed_output_dev *tdev = dev_get_drvdata(dev);
int value;
@@ -50,8 +49,13 @@ static ssize_t enable_store(
return size;
}
+static DEVICE_ATTR_RW(enable);
-static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static struct attribute *timed_output_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(timed_output);
static int create_timed_output_class(void)
{
@@ -60,6 +64,7 @@ static int create_timed_output_class(void)
if (IS_ERR(timed_output_class))
return PTR_ERR(timed_output_class);
atomic_set(&device_count, 0);
+ timed_output_class->dev_groups = timed_output_groups;
}
return 0;
@@ -78,31 +83,19 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
tdev->index = atomic_inc_return(&device_count);
tdev->dev = device_create(timed_output_class, NULL,
- MKDEV(0, tdev->index), NULL, tdev->name);
+ MKDEV(0, tdev->index), NULL, "%s", tdev->name);
if (IS_ERR(tdev->dev))
return PTR_ERR(tdev->dev);
- ret = device_create_file(tdev->dev, &dev_attr_enable);
- if (ret < 0)
- goto err_create_file;
-
dev_set_drvdata(tdev->dev, tdev);
tdev->state = 0;
return 0;
-
-err_create_file:
- device_destroy(timed_output_class, MKDEV(0, tdev->index));
- pr_err("failed to register driver %s\n",
- tdev->name);
-
- return ret;
}
EXPORT_SYMBOL_GPL(timed_output_dev_register);
void timed_output_dev_unregister(struct timed_output_dev *tdev)
{
tdev->enable(tdev, 0);
- device_remove_file(tdev->dev, &dev_attr_enable);
device_destroy(timed_output_class, MKDEV(0, tdev->index));
dev_set_drvdata(tdev->dev, NULL);
}
diff --git a/drivers/staging/asus_oled/Kconfig b/drivers/staging/asus_oled/Kconfig
deleted file mode 100644
index e56dbb2..0000000
--- a/drivers/staging/asus_oled/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config ASUS_OLED
- tristate "Asus OLED driver"
- depends on USB
- default N
- ---help---
- Enable support for the OLED display present in some Asus laptops.
diff --git a/drivers/staging/asus_oled/Makefile b/drivers/staging/asus_oled/Makefile
deleted file mode 100644
index e71f9aa..0000000
--- a/drivers/staging/asus_oled/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_ASUS_OLED) += asus_oled.o
diff --git a/drivers/staging/asus_oled/README b/drivers/staging/asus_oled/README
deleted file mode 100644
index 2d72123..0000000
--- a/drivers/staging/asus_oled/README
+++ /dev/null
@@ -1,156 +0,0 @@
-
- Driver for Asus OLED display present in some Asus laptops.
-
- The code of this driver is based on 'asusoled' program taken from
- <http://lapsus.berlios.de/asus_oled.html>. I just wanted to have a simple
- kernel driver for controlling this device, but I didn't know how
- to do that. Now I know ;) Also, that program can not be used
- with usbhid loaded, which means no USB mouse/keyboard while
- controlling OLED display :(
-
- It has been tested on Asus G1 and didn't cause any problems,
- but I don't guarantee that it won't do anything wrong :)
-
- It can (and probably does) have errors. It is usable
- in my case, and I hope others will find it useful too!
-
-*******
-
-Building the module
-
- To build the module you need kernel 2.6 include files and some C compiler.
-
- Just run:
- make
- make install (as a root)
-
- It will build (hopefully) the module and install it in
- /lib/modules/'uname -r'/extra/asus_oled.ko.
-
- To load it just use:
- modprobe asus_oled
-
- You can check if it has detected your OLED display by looking into dmesg output.
- There should be something like this:
- asus-oled 2-7:1.0: Attached Asus OLED device
-
- If it doesn't find your display, you can try removing usbhid module.
- If you add asus_oled into the list of modules loaded during system boot
- before usbhid, it will work even when usbhid is present.
-
- If it still doesn't detect your hardware, check lsusb output.
- There should be similar line:
- Bus 002 Device 005: ID 0b05:1726 ASUSTek Computer, Inc.
-
- If you don't see any lines with '0b05:1726' it means that you have different
- type of hardware that is not detected (it may or may not work, but the driver
- knows only '0b05:1726' device).
-
-*******
-
-Configuration
-
- There is only one option: start_off.
- You can use it by: 'modprobe asus_oled start_off=1', or by adding this
- line to /etc/modprobe.d/asus_oled.conf:
- options asus_oled start_off=1
-
- With this option provided, asus_oled driver will switch off the display
- when it is detected and attached. It is nice feature to just switch off the 'ASUS'
- logo. If you don't use the display, it is probably the good idea to switch it off,
- to protect OLEDs from "wearing off".
-
-*******
-
-Usage
-
- This module can be controlled with two special files:
- /sys/class/asus_oled/oled_N/enabled
- /sys/class/asus_oled/oled_N/picture
-
- (N is the device number, the first, and probably the only, has number 1,
- so it is /sys/class/asus_oled/oled_1/enabled
- and /sys/class/asus_oled/oled_1/picture)
-
- 'enabled' files is for reading and writing, 'picture' is writeable only.
-
- You can write 0 or 1 to 'enabled' file, which will switch
- on and off the display. Reading from this file will tell you the last
- status set, either 0 or 1. By default it is 1, so if the device was set to 'off',
- and the computer was rebooted without power-off, this file will contain wrong
- value - because the device is off, but hasn't been disabled this time and is
- assumed to be on...
-
- To 'picture' file you write pictures to be displayed by the OLED device.
- The format of the file:
- <M:WxH>
- 00001110010111000
- 00010101010101010
- ....
-
- First line is a configuration parameter. Meaning of fields in <M:WxH>:
- M - picture mode. It can be either 's' for static pictures,
- 'r' for rolling pictures, and 'f' for flashing pictures.
- W - width of the picture. May be between 1 and 1792
- H - height of the picture. May be between 1 and 32
-
- For example <s:128x32> means static picture, 128 pixels long and 32 pixels high.
-
- The physical size of the display is 128x32 pixels. Static and flashing pictures
- can't be larger than that (actually they can, but only part of them will be displayed ;) )
-
- If the picture is smaller than 128x32 it will be centered. Rolling pictures wider than
- 128 pixels will be centered too, unless their width = n*128. Vertically they will be
- centered just like static pictures, if their height is smaller than 32.
-
- Flashing pictures will be centered horizontally if their width < 128, but they were
- centered vertically in a different way. If their height < 16, they will be centered
- in the upper half of the display (rows 0-15). This is because only the first half
- of flashing pictures is used for flashing. When the picture with heigh = 32 is
- displayed in flashing mode, its upper 16 rows will be flashing in the upper half
- of the display, and the lower half will be empty. After few seconds upper part will
- stop flashing (but that part of the picture will remain there), and the lower
- half of the display will start displayin the lower half of the picture
- in rolling mode, unless it is empty, or the picture was small enough to fit in
- upper part. It is not mine idea, this is just the way Asus' display work ;)
- So if you need just flashing, use at most 128x16 picture. If you need flashing and
- rolling, use whole size of the display.
-
- Lines following the first, configuration, line are picture data. Each '1' means
- that the pixel is lit, and '0' means that it is not. You can also use '#' as ON,
- and ' ' (space) as OFF. Empty lines and all other characters are ignored.
-
- It is possible to write everything in one line <M:WxH>01010101010101010...,
- and W*H characters will be used. If there is not enough characters, nothing will be
- displayed. However, the 'line mode' is easier to read (and write), and it also
- lets to omit parts of data. Whenever End-Of-Line character is found, but
- the line is not W characters long, it is assumed that all missing characters
- are equal to the last character in the line.
-
- Following line represents '0', '1' and a lots of '0's, dependng on the width of the picture
- provided in configuration data:
- 010
-
- So if you need empty line, it is sufficient to write line with only one '0' in it.
- The same works with '1' (or ' ' and '#').
-
- If there are too many data in the file, they will be ignored. If you are not sure
- how many characters you are missing, you can add few lines with one zero in each of them.
-
- There are some example pictures in .txt format, that can be used as follows:
- cat foo.txt > /sys/class/asus_oled/oled_1/picture
-
- If the display is switched off you also need to run:
- echo 1 > /sys/class/asus_oled/oled_1/enabled
- To switch it off, just use:
- echo 0 > /sys/class/asus_oled/oled_1/enabled
-
-
-*******
-
- For any additional info please have a look at http://lapsus.berlios.de/asus_oled.html
-
-
-
- Jakub Schmidtke (sjakub@gmail.com)
-
diff --git a/drivers/staging/asus_oled/TODO b/drivers/staging/asus_oled/TODO
deleted file mode 100644
index 2514131..0000000
--- a/drivers/staging/asus_oled/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - sparse fixes
- - audit the userspace interface
- - sysfs vs. char?
- - Documentation/ABI/ needs to be added
- - put the sample .txt files and README file somewhere.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Cc: Jakub Schmidtke <sjakub@gmail.com>
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
deleted file mode 100644
index d0a5a28..0000000
--- a/drivers/staging/asus_oled/asus_oled.c
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * Asus OLED USB driver
- *
- * Copyright (C) 2007,2008 Jakub Schmidtke (sjakub@gmail.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- *
- * This module is based on usbled and asus-laptop modules.
- *
- *
- * Asus OLED support is based on asusoled program taken from
- * <http://lapsus.berlios.de/asus_oled.html>.
- *
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/platform_device.h>
-#include <linux/ctype.h>
-
-#define ASUS_OLED_VERSION "0.04-dev"
-#define ASUS_OLED_NAME "asus-oled"
-#define ASUS_OLED_UNDERSCORE_NAME "asus_oled"
-
-#define ASUS_OLED_STATIC 's'
-#define ASUS_OLED_ROLL 'r'
-#define ASUS_OLED_FLASH 'f'
-
-#define ASUS_OLED_MAX_WIDTH 1792
-#define ASUS_OLED_DISP_HEIGHT 32
-#define ASUS_OLED_PACKET_BUF_SIZE 256
-
-#define USB_VENDOR_ID_ASUS 0x0b05
-#define USB_DEVICE_ID_ASUS_LCM 0x1726
-#define USB_DEVICE_ID_ASUS_LCM2 0x175b
-
-MODULE_AUTHOR("Jakub Schmidtke, sjakub@gmail.com");
-MODULE_DESCRIPTION("Asus OLED Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(ASUS_OLED_VERSION);
-
-static struct class *oled_class;
-static int oled_num;
-
-static uint start_off;
-
-module_param(start_off, uint, 0644);
-
-MODULE_PARM_DESC(start_off,
- "Set to 1 to switch off OLED display after it is attached");
-
-enum oled_pack_mode {
- PACK_MODE_G1,
- PACK_MODE_G50,
- PACK_MODE_LAST
-};
-
-struct oled_dev_desc_str {
- uint16_t idVendor;
- uint16_t idProduct;
- /* width of display */
- uint16_t devWidth;
- /* formula to be used while packing the picture */
- enum oled_pack_mode packMode;
- const char *devDesc;
-};
-
-/* table of devices that work with this driver */
-static const struct usb_device_id id_table[] = {
- /* Asus G1/G2 (and variants)*/
- { USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM) },
- /* Asus G50V (and possibly others - G70? G71?)*/
- { USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2) },
- { },
-};
-
-/* parameters of specific devices */
-static struct oled_dev_desc_str oled_dev_desc_table[] = {
- { USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM, 128, PACK_MODE_G1,
- "G1/G2" },
- { USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2, 256, PACK_MODE_G50,
- "G50" },
- { },
-};
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-struct asus_oled_header {
- uint8_t magic1;
- uint8_t magic2;
- uint8_t flags;
- uint8_t value3;
- uint8_t buffer1;
- uint8_t buffer2;
- uint8_t value6;
- uint8_t value7;
- uint8_t value8;
- uint8_t padding2[7];
-} __attribute((packed));
-
-struct asus_oled_packet {
- struct asus_oled_header header;
- uint8_t bitmap[ASUS_OLED_PACKET_BUF_SIZE];
-} __attribute((packed));
-
-struct asus_oled_dev {
- struct usb_device *udev;
- uint8_t pic_mode;
- uint16_t dev_width;
- enum oled_pack_mode pack_mode;
- size_t height;
- size_t width;
- size_t x_shift;
- size_t y_shift;
- size_t buf_offs;
- uint8_t last_val;
- size_t buf_size;
- char *buf;
- uint8_t enabled;
- uint8_t enabled_post_resume;
- struct device *dev;
-};
-
-static void setup_packet_header(struct asus_oled_packet *packet, char flags,
- char value3, char buffer1, char buffer2, char value6,
- char value7, char value8)
-{
- memset(packet, 0, sizeof(struct asus_oled_header));
- packet->header.magic1 = 0x55;
- packet->header.magic2 = 0xaa;
- packet->header.flags = flags;
- packet->header.value3 = value3;
- packet->header.buffer1 = buffer1;
- packet->header.buffer2 = buffer2;
- packet->header.value6 = value6;
- packet->header.value7 = value7;
- packet->header.value8 = value8;
-}
-
-static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
-{
- int retval;
- int act_len;
- struct asus_oled_packet *packet;
-
- packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
- if (!packet)
- return;
-
- setup_packet_header(packet, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00);
-
- if (enabl)
- packet->bitmap[0] = 0xaf;
- else
- packet->bitmap[0] = 0xae;
-
- retval = usb_bulk_msg(odev->udev,
- usb_sndbulkpipe(odev->udev, 2),
- packet,
- sizeof(struct asus_oled_header) + 1,
- &act_len,
- -1);
-
- if (retval)
- dev_dbg(&odev->udev->dev, "retval = %d\n", retval);
-
- odev->enabled = enabl;
-
- kfree(packet);
-}
-
-static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct asus_oled_dev *odev = usb_get_intfdata(intf);
- unsigned long value;
- if (kstrtoul(buf, 10, &value))
- return -EINVAL;
-
- enable_oled(odev, value);
-
- return count;
-}
-
-static ssize_t class_set_enabled(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct asus_oled_dev *odev =
- (struct asus_oled_dev *) dev_get_drvdata(device);
- unsigned long value;
-
- if (kstrtoul(buf, 10, &value))
- return -EINVAL;
-
- enable_oled(odev, value);
-
- return count;
-}
-
-static ssize_t get_enabled(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct asus_oled_dev *odev = usb_get_intfdata(intf);
-
- return sprintf(buf, "%d\n", odev->enabled);
-}
-
-static ssize_t class_get_enabled(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct asus_oled_dev *odev =
- (struct asus_oled_dev *) dev_get_drvdata(device);
-
- return sprintf(buf, "%d\n", odev->enabled);
-}
-
-static void send_packets(struct usb_device *udev,
- struct asus_oled_packet *packet,
- char *buf, uint8_t p_type, size_t p_num)
-{
- size_t i;
- int act_len;
-
- for (i = 0; i < p_num; i++) {
- int retval;
-
- switch (p_type) {
- case ASUS_OLED_ROLL:
- setup_packet_header(packet, 0x40, 0x80, p_num,
- i + 1, 0x00, 0x01, 0xff);
- break;
- case ASUS_OLED_STATIC:
- setup_packet_header(packet, 0x10 + i, 0x80, 0x01,
- 0x01, 0x00, 0x01, 0x00);
- break;
- case ASUS_OLED_FLASH:
- setup_packet_header(packet, 0x10 + i, 0x80, 0x01,
- 0x01, 0x00, 0x00, 0xff);
- break;
- }
-
- memcpy(packet->bitmap, buf + (ASUS_OLED_PACKET_BUF_SIZE*i),
- ASUS_OLED_PACKET_BUF_SIZE);
-
- retval = usb_bulk_msg(udev, usb_sndctrlpipe(udev, 2),
- packet, sizeof(struct asus_oled_packet),
- &act_len, -1);
-
- if (retval)
- dev_dbg(&udev->dev, "retval = %d\n", retval);
- }
-}
-
-static void send_packet(struct usb_device *udev,
- struct asus_oled_packet *packet,
- size_t offset, size_t len, char *buf, uint8_t b1,
- uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5,
- uint8_t b6) {
- int retval;
- int act_len;
-
- setup_packet_header(packet, b1, b2, b3, b4, b5, b6, 0x00);
- memcpy(packet->bitmap, buf + offset, len);
-
- retval = usb_bulk_msg(udev,
- usb_sndctrlpipe(udev, 2),
- packet,
- sizeof(struct asus_oled_packet),
- &act_len,
- -1);
-
- if (retval)
- dev_dbg(&udev->dev, "retval = %d\n", retval);
-}
-
-
-static void send_packets_g50(struct usb_device *udev,
- struct asus_oled_packet *packet, char *buf)
-{
- send_packet(udev, packet, 0, 0x100, buf,
- 0x10, 0x00, 0x02, 0x01, 0x00, 0x01);
- send_packet(udev, packet, 0x100, 0x080, buf,
- 0x10, 0x00, 0x02, 0x02, 0x80, 0x00);
-
- send_packet(udev, packet, 0x180, 0x100, buf,
- 0x11, 0x00, 0x03, 0x01, 0x00, 0x01);
- send_packet(udev, packet, 0x280, 0x100, buf,
- 0x11, 0x00, 0x03, 0x02, 0x00, 0x01);
- send_packet(udev, packet, 0x380, 0x080, buf,
- 0x11, 0x00, 0x03, 0x03, 0x80, 0x00);
-}
-
-
-static void send_data(struct asus_oled_dev *odev)
-{
- size_t packet_num = odev->buf_size / ASUS_OLED_PACKET_BUF_SIZE;
- struct asus_oled_packet *packet;
-
- packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
- if (!packet)
- return;
-
- if (odev->pack_mode == PACK_MODE_G1) {
- /* When sending roll-mode data the display updated only
- first packet. I have no idea why, but when static picture
- is sent just before rolling picture everything works fine. */
- if (odev->pic_mode == ASUS_OLED_ROLL)
- send_packets(odev->udev, packet, odev->buf,
- ASUS_OLED_STATIC, 2);
-
- /* Only ROLL mode can use more than 2 packets.*/
- if (odev->pic_mode != ASUS_OLED_ROLL && packet_num > 2)
- packet_num = 2;
-
- send_packets(odev->udev, packet, odev->buf,
- odev->pic_mode, packet_num);
- } else if (odev->pack_mode == PACK_MODE_G50) {
- send_packets_g50(odev->udev, packet, odev->buf);
- }
-
- kfree(packet);
-}
-
-static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
-{
- odev->last_val = val;
-
- if (val == 0) {
- odev->buf_offs += count;
- return 0;
- }
-
- while (count-- > 0) {
- size_t x = odev->buf_offs % odev->width;
- size_t y = odev->buf_offs / odev->width;
- size_t i;
-
- x += odev->x_shift;
- y += odev->y_shift;
-
- switch (odev->pack_mode) {
- case PACK_MODE_G1:
- /* i = (x/128)*640 + 127 - x + (y/8)*128;
- This one for 128 is the same, but might be better
- for different widths? */
- i = (x/odev->dev_width)*640 +
- odev->dev_width - 1 - x +
- (y/8)*odev->dev_width;
- break;
-
- case PACK_MODE_G50:
- i = (odev->dev_width - 1 - x)/8 + y*odev->dev_width/8;
- break;
-
- default:
- i = 0;
- dev_err(odev->dev, "Unknown OLED Pack Mode: %d!\n",
- odev->pack_mode);
- break;
- }
-
- if (i >= odev->buf_size) {
- dev_err(odev->dev, "Buffer overflow! Report a bug:"
- "offs: %d >= %d i: %d (x: %d y: %d)\n",
- (int) odev->buf_offs, (int) odev->buf_size,
- (int) i, (int) x, (int) y);
- return -EIO;
- }
-
- switch (odev->pack_mode) {
- case PACK_MODE_G1:
- odev->buf[i] &= ~(1<<(y%8));
- break;
-
- case PACK_MODE_G50:
- odev->buf[i] &= ~(1<<(x%8));
- break;
-
- default:
- /* cannot get here; stops gcc complaining*/
- ;
- }
-
- odev->buf_offs++;
- }
-
- return 0;
-}
-
-static ssize_t odev_set_picture(struct asus_oled_dev *odev,
- const char *buf, size_t count)
-{
- size_t offs = 0, max_offs;
-
- if (count < 1)
- return 0;
-
- if (tolower(buf[0]) == 'b') {
- /* binary mode, set the entire memory*/
-
- size_t i;
-
- odev->buf_size = (odev->dev_width * ASUS_OLED_DISP_HEIGHT) / 8;
-
- kfree(odev->buf);
- odev->buf = kmalloc(odev->buf_size, GFP_KERNEL);
- if (odev->buf == NULL) {
- odev->buf_size = 0;
- dev_err(odev->dev, "Out of memory!\n");
- return -ENOMEM;
- }
-
- memset(odev->buf, 0xff, odev->buf_size);
-
- for (i = 1; i < count && i <= 32 * 32; i++) {
- odev->buf[i-1] = buf[i];
- odev->buf_offs = i-1;
- }
-
- odev->width = odev->dev_width / 8;
- odev->height = ASUS_OLED_DISP_HEIGHT;
- odev->x_shift = 0;
- odev->y_shift = 0;
- odev->last_val = 0;
-
- send_data(odev);
-
- return count;
- }
-
- if (buf[0] == '<') {
- size_t i;
- size_t w = 0, h = 0;
- size_t w_mem, h_mem;
-
- if (count < 10 || buf[2] != ':')
- goto error_header;
-
-
- switch (tolower(buf[1])) {
- case ASUS_OLED_STATIC:
- case ASUS_OLED_ROLL:
- case ASUS_OLED_FLASH:
- odev->pic_mode = buf[1];
- break;
- default:
- dev_err(odev->dev, "Wrong picture mode: '%c'.\n",
- buf[1]);
- return -EIO;
- break;
- }
-
- for (i = 3; i < count; ++i) {
- if (buf[i] >= '0' && buf[i] <= '9') {
- w = 10*w + (buf[i] - '0');
-
- if (w > ASUS_OLED_MAX_WIDTH)
- goto error_width;
- } else if (tolower(buf[i]) == 'x') {
- break;
- } else {
- goto error_width;
- }
- }
-
- for (++i; i < count; ++i) {
- if (buf[i] >= '0' && buf[i] <= '9') {
- h = 10*h + (buf[i] - '0');
-
- if (h > ASUS_OLED_DISP_HEIGHT)
- goto error_height;
- } else if (tolower(buf[i]) == '>') {
- break;
- } else {
- goto error_height;
- }
- }
-
- if (w < 1 || w > ASUS_OLED_MAX_WIDTH)
- goto error_width;
-
- if (h < 1 || h > ASUS_OLED_DISP_HEIGHT)
- goto error_height;
-
- if (i >= count || buf[i] != '>')
- goto error_header;
-
- offs = i+1;
-
- if (w % (odev->dev_width) != 0)
- w_mem = (w/(odev->dev_width) + 1)*(odev->dev_width);
- else
- w_mem = w;
-
- if (h < ASUS_OLED_DISP_HEIGHT)
- h_mem = ASUS_OLED_DISP_HEIGHT;
- else
- h_mem = h;
-
- odev->buf_size = w_mem * h_mem / 8;
-
- kfree(odev->buf);
- odev->buf = kmalloc(odev->buf_size, GFP_KERNEL);
-
- if (odev->buf == NULL) {
- odev->buf_size = 0;
- dev_err(odev->dev, "Out of memory!\n");
- return -ENOMEM;
- }
-
- memset(odev->buf, 0xff, odev->buf_size);
-
- odev->buf_offs = 0;
- odev->width = w;
- odev->height = h;
- odev->x_shift = 0;
- odev->y_shift = 0;
- odev->last_val = 0;
-
- if (odev->pic_mode == ASUS_OLED_FLASH) {
- if (h < ASUS_OLED_DISP_HEIGHT/2)
- odev->y_shift = (ASUS_OLED_DISP_HEIGHT/2 - h)/2;
- } else {
- if (h < ASUS_OLED_DISP_HEIGHT)
- odev->y_shift = (ASUS_OLED_DISP_HEIGHT - h)/2;
- }
-
- if (w < (odev->dev_width))
- odev->x_shift = ((odev->dev_width) - w)/2;
- }
-
- max_offs = odev->width * odev->height;
-
- while (offs < count && odev->buf_offs < max_offs) {
- int ret = 0;
-
- if (buf[offs] == '1' || buf[offs] == '#') {
- ret = append_values(odev, 1, 1);
- if (ret < 0)
- return ret;
- } else if (buf[offs] == '0' || buf[offs] == ' ') {
- ret = append_values(odev, 0, 1);
- if (ret < 0)
- return ret;
- } else if (buf[offs] == '\n') {
- /* New line detected. Lets assume, that all characters
- till the end of the line were equal to the last
- character in this line.*/
- if (odev->buf_offs % odev->width != 0)
- ret = append_values(odev, odev->last_val,
- odev->width -
- (odev->buf_offs %
- odev->width));
- if (ret < 0)
- return ret;
- }
-
- offs++;
- }
-
- if (odev->buf_offs >= max_offs)
- send_data(odev);
-
- return count;
-
-error_width:
- dev_err(odev->dev, "Wrong picture width specified.\n");
- return -EIO;
-
-error_height:
- dev_err(odev->dev, "Wrong picture height specified.\n");
- return -EIO;
-
-error_header:
- dev_err(odev->dev, "Wrong picture header.\n");
- return -EIO;
-}
-
-static ssize_t set_picture(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_interface *intf = to_usb_interface(dev);
-
- return odev_set_picture(usb_get_intfdata(intf), buf, count);
-}
-
-static ssize_t class_set_picture(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return odev_set_picture((struct asus_oled_dev *)
- dev_get_drvdata(device), buf, count);
-}
-
-#define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file
-
-static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO,
- get_enabled, set_enabled);
-static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture);
-
-static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO,
- class_get_enabled, class_set_enabled);
-static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture);
-
-static int asus_oled_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(interface);
- struct asus_oled_dev *odev = NULL;
- int retval = -ENOMEM;
- uint16_t dev_width = 0;
- enum oled_pack_mode pack_mode = PACK_MODE_LAST;
- const struct oled_dev_desc_str *dev_desc = oled_dev_desc_table;
- const char *desc = NULL;
-
- if (!id) {
- /* Even possible? Just to make sure...*/
- dev_err(&interface->dev, "No usb_device_id provided!\n");
- return -ENODEV;
- }
-
- for (; dev_desc->idVendor; dev_desc++) {
- if (dev_desc->idVendor == id->idVendor
- && dev_desc->idProduct == id->idProduct) {
- dev_width = dev_desc->devWidth;
- desc = dev_desc->devDesc;
- pack_mode = dev_desc->packMode;
- break;
- }
- }
-
- if (!desc || dev_width < 1 || pack_mode == PACK_MODE_LAST) {
- dev_err(&interface->dev,
- "Missing or incomplete device description!\n");
- return -ENODEV;
- }
-
- odev = kzalloc(sizeof(struct asus_oled_dev), GFP_KERNEL);
- if (odev == NULL)
- return -ENOMEM;
-
- odev->udev = usb_get_dev(udev);
- odev->pic_mode = ASUS_OLED_STATIC;
- odev->dev_width = dev_width;
- odev->pack_mode = pack_mode;
- odev->height = 0;
- odev->width = 0;
- odev->x_shift = 0;
- odev->y_shift = 0;
- odev->buf_offs = 0;
- odev->buf_size = 0;
- odev->last_val = 0;
- odev->buf = NULL;
- odev->enabled = 1;
- odev->dev = NULL;
-
- usb_set_intfdata(interface, odev);
-
- retval = device_create_file(&interface->dev,
- &ASUS_OLED_DEVICE_ATTR(enabled));
- if (retval)
- goto err_files;
-
- retval = device_create_file(&interface->dev,
- &ASUS_OLED_DEVICE_ATTR(picture));
- if (retval)
- goto err_files;
-
- odev->dev = device_create(oled_class, &interface->dev, MKDEV(0, 0),
- NULL, "oled_%d", ++oled_num);
-
- if (IS_ERR(odev->dev)) {
- retval = PTR_ERR(odev->dev);
- goto err_files;
- }
-
- dev_set_drvdata(odev->dev, odev);
-
- retval = device_create_file(odev->dev, &dev_attr_enabled);
- if (retval)
- goto err_class_enabled;
-
- retval = device_create_file(odev->dev, &dev_attr_picture);
- if (retval)
- goto err_class_picture;
-
- dev_info(&interface->dev,
- "Attached Asus OLED device: %s [width %u, pack_mode %d]\n",
- desc, odev->dev_width, odev->pack_mode);
-
- if (start_off)
- enable_oled(odev, 0);
-
- return 0;
-
-err_class_picture:
- device_remove_file(odev->dev, &dev_attr_picture);
-
-err_class_enabled:
- device_remove_file(odev->dev, &dev_attr_enabled);
- device_unregister(odev->dev);
-
-err_files:
- device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled));
- device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture));
-
- usb_set_intfdata(interface, NULL);
- usb_put_dev(odev->udev);
- kfree(odev);
-
- return retval;
-}
-
-static void asus_oled_disconnect(struct usb_interface *interface)
-{
- struct asus_oled_dev *odev;
-
- odev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
-
- device_remove_file(odev->dev, &dev_attr_picture);
- device_remove_file(odev->dev, &dev_attr_enabled);
- device_unregister(odev->dev);
-
- device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture));
- device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled));
-
- usb_put_dev(odev->udev);
-
- kfree(odev->buf);
-
- kfree(odev);
-
- dev_info(&interface->dev, "Disconnected Asus OLED device\n");
-}
-
-#ifdef CONFIG_PM
-static int asus_oled_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct asus_oled_dev *odev;
-
- odev = usb_get_intfdata(intf);
- if (!odev)
- return -ENODEV;
-
- odev->enabled_post_resume = odev->enabled;
- enable_oled(odev, 0);
-
- return 0;
-}
-
-static int asus_oled_resume(struct usb_interface *intf)
-{
- struct asus_oled_dev *odev;
-
- odev = usb_get_intfdata(intf);
- if (!odev)
- return -ENODEV;
-
- enable_oled(odev, odev->enabled_post_resume);
-
- return 0;
-}
-#else
-#define asus_oled_suspend NULL
-#define asus_oled_resume NULL
-#endif
-
-static struct usb_driver oled_driver = {
- .name = ASUS_OLED_NAME,
- .probe = asus_oled_probe,
- .disconnect = asus_oled_disconnect,
- .id_table = id_table,
- .suspend = asus_oled_suspend,
- .resume = asus_oled_resume,
-};
-
-static CLASS_ATTR_STRING(version, S_IRUGO,
- ASUS_OLED_UNDERSCORE_NAME " " ASUS_OLED_VERSION);
-
-static int __init asus_oled_init(void)
-{
- int retval = 0;
- oled_class = class_create(THIS_MODULE, ASUS_OLED_UNDERSCORE_NAME);
-
- if (IS_ERR(oled_class)) {
- pr_err("Error creating " ASUS_OLED_UNDERSCORE_NAME " class\n");
- return PTR_ERR(oled_class);
- }
-
- retval = class_create_file(oled_class, &class_attr_version.attr);
- if (retval) {
- pr_err("Error creating class version file\n");
- goto error;
- }
-
- retval = usb_register(&oled_driver);
-
- if (retval) {
- pr_err("usb_register failed. Error number %d\n", retval);
- goto error;
- }
-
- return retval;
-
-error:
- class_destroy(oled_class);
- return retval;
-}
-
-static void __exit asus_oled_exit(void)
-{
- usb_deregister(&oled_driver);
- class_remove_file(oled_class, &class_attr_version.attr);
- class_destroy(oled_class);
-}
-
-module_init(asus_oled_init);
-module_exit(asus_oled_exit);
-
diff --git a/drivers/staging/asus_oled/linux.txt b/drivers/staging/asus_oled/linux.txt
deleted file mode 100644
index dc758b0..0000000
--- a/drivers/staging/asus_oled/linux.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<s:74x32>
-0
-0
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-01111111111000000000000000000000000000000000000000000000000000000000000000
-00011111100000000000000111000000000000000000000000000000000000000000000000
-00001111000000000000000111000000000000000000000000000000000000000000000000
-00001111000000000000000111000000000000000000000000000000000000000000000000
-00001111000000000000000000000000000000000000000000000000000000000000000000
-00001111000000000000000000000000000000000000000000000000000000000000000000
-00001111000000000000011100001111111111100000111110011111100011111101111000
-00001111000000000000111110000011111000111000111110000111100001111000110000
-00001111000000000001101110000011111000111000001111000111100000111100100000
-00001111000000000001001110000011110000111100001111000111100000111101100000
-00001111000000000100001110000011110000111100001111000111100000011111000000
-00001111000000000100011110000011110000111100001111000111100000001111000000
-00001111000000000100011110000011110000111100001111000111100000001111000000
-00001111000000000100011100100011110000111100001111000111100000001111100000
-00001111000000001100111100100011110000111100001111000111100000001111110000
-00001111000000001100111101100011110000111100001111000111100000011011110000
-00001111000000011100111101000011110000111100001111000111100000010001111000
-00011111000001111100111011000011110000111100001111001111100000110000111100
-11111111111111111100011110001111111011111110000111110111111011111011111110
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-0
-0
-0
-0
diff --git a/drivers/staging/asus_oled/linux_f.txt b/drivers/staging/asus_oled/linux_f.txt
deleted file mode 100644
index b4bb85c..0000000
--- a/drivers/staging/asus_oled/linux_f.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-<f:128x16>
-00000000000000000000000000000000001111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000011000111111111100001111001111100111110111000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000111100001111000110001111000111100011100010000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000001011100001111000111000111100111100001110110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000011100001110000111000111100111100001111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100111001001110000111000111100111100000111110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000001100111011001110000111000111100111100000111110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000001100111010001110000111000111100111100000100111000000000000000000000000000000000000
-00000000000000000000000000000000000011110000111100110110001110000111000111100111100001000011100000000000000000000000000000000000
-00000000000000000000000000000000001111111111111100111100111111011111100011110111110111101111110000000000000000000000000000000000
-
diff --git a/drivers/staging/asus_oled/linux_fr.txt b/drivers/staging/asus_oled/linux_fr.txt
deleted file mode 100644
index f88e2b3..0000000
--- a/drivers/staging/asus_oled/linux_fr.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<f:128x32>
-00000000000000000000000000000000001111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000011000111111111100001111001111100111110111000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000111100001111000110001111000111100011100010000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000001011100001111000111000111100111100001110110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000011100001110000111000111100111100001111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100111001001110000111000111100111100000111110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000001100111011001110000111000111100111100000111110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000001100111010001110000111000111100111100000100111000000000000000000000000000000000000
-00000000000000000000000000000000000011110000111100110110001110000111000111100111100001000011100000000000000000000000000000000000
-00000000000000000000000000000000001111111111111100111100111111011111100011110111110111101111110000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
-00000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000
-00000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000
-00000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000
-00000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000
-00000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
diff --git a/drivers/staging/asus_oled/tux.txt b/drivers/staging/asus_oled/tux.txt
deleted file mode 100644
index 9d20528..0000000
--- a/drivers/staging/asus_oled/tux.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<s:32x32>
-00000000000001111111000000000000
-0000000000001 100000000000
-000000000001 10000000000
-000000000001 10000000000
-000000000001 10000000000
-000000000001 1 111 10000000000
-000000000001 1 1 1000000000
-000000000001 111 1000000000
-000000000001 111111 1000000000
-000000000001 111111 1000000000
-000000000001 1 1 100000000
-00000000001 11 100000000
-00000000001 11111111 10000000
-0000000001 11111111 1000000
-000000001 111111111 1000000
-000000001 1111111111 100000
-00000001 11111111111 100000
-00000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-000000011 11111111111 10000
-000011 11 11111111111 100000
-0001 1111 111111111111111 1000
-001 1111111 11111111111111 1000
-001 1111111 1111111 111111 100
-001 11111111 111111 1111111 10
-001 11111111 11111 100
-001 1111111 111 11100
-000111 111 11111 11 100000
-000000111 111111111 1000000
diff --git a/drivers/staging/asus_oled/tux_r.txt b/drivers/staging/asus_oled/tux_r.txt
deleted file mode 100644
index fd81a3e..0000000
--- a/drivers/staging/asus_oled/tux_r.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<r:32x32>
-00000000000001111111000000000000
-0000000000001 100000000000
-000000000001 10000000000
-000000000001 10000000000
-000000000001 10000000000
-000000000001 1 111 10000000000
-000000000001 1 1 1000000000
-000000000001 111 1000000000
-000000000001 111111 1000000000
-000000000001 111111 1000000000
-000000000001 1 1 100000000
-00000000001 11 100000000
-00000000001 11111111 10000000
-0000000001 11111111 1000000
-000000001 111111111 1000000
-000000001 1111111111 100000
-00000001 11111111111 100000
-00000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-000000011 11111111111 10000
-000011 11 11111111111 100000
-0001 1111 111111111111111 1000
-001 1111111 11111111111111 1000
-001 1111111 1111111 111111 100
-001 11111111 111111 1111111 10
-001 11111111 11111 100
-001 1111111 111 11100
-000111 111 11111 11 100000
-000000111 111111111 1000000
diff --git a/drivers/staging/asus_oled/tux_r2.txt b/drivers/staging/asus_oled/tux_r2.txt
deleted file mode 100644
index e94d84e..0000000
--- a/drivers/staging/asus_oled/tux_r2.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<r:256x32>
-000000000000000000000000000000000000000000000000000000000000011111110000000000000000000000000000000000000000000000000000000000000
-0000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1 111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1 1 100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111100000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1 1 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000001 11 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000001 11111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000011100001111111111100000111110011111100011111101111000000000000000000000000000000
-0000000000000000000000000000000000000000000000000000000001 11111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000111110000011111000111000111110000111100001111000110000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000001 111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000001101110000011111000111000001111000111100000111100100000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000001 1111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000001001110000011110000111100001111000111100000111101100000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000001 11111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100001110000011110000111100001111000111100000011111000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000001 111111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000011110000000001000111100000111100001111000011110001111000000011110000000
-0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100011110000011110000111100001111000111100000001111000000
-0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100011100100011110000111100001111000111100000001111100000000000000000000000000000000
-0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000001100111100100011110000111100001111000111100000001111110000000000000000000000000000000
-0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000001100111101100011110000111100001111000111100000011011110000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000011 11111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000011100111101000011110000111100001111000111100000010001111000000000000000000000000000000
-000000000000000000000000000000000000000000000000000011 11 11111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000011111000001111100111011000011110000111100001111001111100000110000111100000000000000000000000000000
-0000000000000000000000000000000000000000000000000001 1111 111111111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100011110001111111011111110000111110111111011111011111110000000000000000000000000000
-000000000000000000000000000000000000000000000000001 1111111 11111111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000001 1111111 1111111 111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000001 11111111 111111 1111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000001 11111111 11111 1000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000001 1111111 111 111000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000111 111 11111 11 10
-000000000000000000000000000000000000000000000000000000111 111111111 10
diff --git a/drivers/staging/asus_oled/zig.txt b/drivers/staging/asus_oled/zig.txt
deleted file mode 100644
index 31573d8..0000000
--- a/drivers/staging/asus_oled/zig.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<r:128x32>
-10000000000000000000000000000000000000000000000000000000000000011000000000000000000000000000000000000000000000000000000000000001
-01000000000000000000000000000000000000000000000000000000000000100100000000000000000000000000000000000000000000000000000000000010
-00100000000000000000000000000000000000000000000000000000000001000010000000000000000000000000000000000000000000000000000000000100
-00010000000000000000000000000000000000000000000000000000000010000001000000000000000000000000000000000000000000000000000000001000
-00001000000000000000000000000000000000000000000000000000000100000000100000000000000000000000000000000000000000000000000000010000
-00000100000000000000000000000000000000000000000000000000001000000000010000000000000000000000000000000000000000000000000000100000
-00000010000000000000000000000000000000000000000000000000010000000000001000000000000000000000000000000000000000000000000001000000
-00000001000000000000000000000000000000000000000000000000100000000000000100000000000000000000000000000000000000000000000010000000
-00000000100000000000000000000000000000000000000000000001000000000000000010000000000000000000000000000000000000000000000100000000
-00000000010000000000000000000000000000000000000000000010000000000000000001000000000000000000000000000000000000000000001000000000
-00000000001000000000000000000000000000000000000000000100000000000000000000100000000000000000000000000000000000000000010000000000
-00000000000100000000000000000000000000000000000000001000000000000000000000010000000000000000000000000000000000000000100000000000
-00000000000010000000000000000000000000000000000000010000000000000000000000001000000000000000000000000000000000000001000000000000
-00000000000001000000000000000000000000000000000000100000000000000000000000000100000000000000000000000000000000000010000000000000
-00000000000000100000000000000000000000000000000001000000000000000000000000000010000000000000000000000000000000000100000000000000
-00000000000000010000000000000000000000000000000010000000000000000000000000000001000000000000000000000000000000001000000000000000
-00000000000000001000000000000000000000000000000100000000000000000000000000000000100000000000000000000000000000010000000000000000
-00000000000000000100000000000000000000000000001000000000000000000000000000000000010000000000000000000000000000100000000000000000
-00000000000000000010000000000000000000000000010000000000000000000000000000000000001000000000000000000000000001000000000000000000
-00000000000000000001000000000000000000000000100000000000000000000000000000000000000100000000000000000000000010000000000000000000
-00000000000000000000100000000000000000000001000000000000000000000000000000000000000010000000000000000000000100000000000000000000
-00000000000000000000010000000000000000000010000000000000000000000000000000000000000001000000000000000000001000000000000000000000
-00000000000000000000001000000000000000000100000000000000000000000000000000000000000000100000000000000000010000000000000000000000
-00000000000000000000000100000000000000001000000000000000000000000000000000000000000000010000000000000000100000000000000000000000
-00000000000000000000000010000000000000010000000000000000000000000000000000000000000000001000000000000001000000000000000000000000
-00000000000000000000000001000000000000100000000000000000000000000000000000000000000000000100000000000010000000000000000000000000
-00000000000000000000000000100000000001000000000000000000000000000000000000000000000000000010000000000100000000000000000000000000
-00000000000000000000000000010000000010000000000000000000000000000000000000000000000000000001000000001000000000000000000000000000
-00000000000000000000000000001000000100000000000000000000000000000000000000000000000000000000100000010000000000000000000000000000
-00000000000000000000000000000100001000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000
-00000000000000000000000000000010010000000000000000000000000000000000000000000000000000000000001001000000000000000000000000000000
-00000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 35641e5..f91bc1f 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -13,7 +13,7 @@
* Returns - Zero(Success)
****************************************************************/
-static int bcm_char_open(struct inode *inode, struct file * filp)
+static int bcm_char_open(struct inode *inode, struct file *filp)
{
struct bcm_mini_adapter *Adapter = NULL;
struct bcm_tarang_data *pTarang = NULL;
@@ -1004,9 +1004,9 @@ cntrlEnd:
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
- len = min_t(ulong, IoBuffer.OutputLength, strlen(VER_FILEVERSION_STR) + 1);
+ len = min_t(ulong, IoBuffer.OutputLength, strlen(DRV_VERSION) + 1);
- if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, len))
+ if (copy_to_user(IoBuffer.OutputBuffer, DRV_VERSION, len))
return -EFAULT;
Status = STATUS_SUCCESS;
break;
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
index 8c696b6..f5eda96 100644
--- a/drivers/staging/bcm/DDRInit.c
+++ b/drivers/staging/bcm/DDRInit.c
@@ -828,13 +828,13 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
{
retval= rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue |= 0x44;
retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, WRM, DBG_LVL_ALL, "%s:%d WRM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
}
@@ -972,7 +972,7 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
}
retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value, sizeof(value));
if(STATUS_SUCCESS != retval) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
@@ -992,25 +992,25 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
uiResetValue = 0x01010001;
retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x00040020;
retval = wrmalt(Adapter, (UINT)0x0F007094, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x01020101;
retval = wrmalt(Adapter, (UINT)0x0F00701c, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x01010000;
retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
}
@@ -1026,34 +1026,34 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
{
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x1322a8;
retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x132296;
retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
}
@@ -1062,34 +1062,34 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x6003229a;
retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x1322a8;
retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
}
@@ -1235,28 +1235,28 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
if(retval)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
return retval;
}
- ul_ddr_setting_load_addr+=sizeof(ULONG);
+ ul_ddr_setting_load_addr += sizeof(ULONG);
/*signature */
value =(0x1d1e0dd0);
retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
if(retval)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
return retval;
}
- ul_ddr_setting_load_addr+=sizeof(ULONG);
+ ul_ddr_setting_load_addr += sizeof(ULONG);
RegCount*=(sizeof(struct bcm_ddr_setting)/sizeof(ULONG));
while(RegCount && !retval)
{
value = psDDRSetting->ulRegAddress ;
retval = wrmalt( Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
- ul_ddr_setting_load_addr+=sizeof(ULONG);
+ ul_ddr_setting_load_addr += sizeof(ULONG);
if(!retval)
{
if(bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018))
@@ -1264,7 +1264,7 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
value = (psDDRSetting->ulRegValue |(1<<8));
if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr,
&value, sizeof(value))){
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
}
@@ -1274,12 +1274,12 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr ,
&value, sizeof(value))){
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
}
}
- ul_ddr_setting_load_addr+=sizeof(ULONG);
+ ul_ddr_setting_load_addr += sizeof(ULONG);
RegCount--;
psDDRSetting++;
}
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
index a1bf215..5347828 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ b/drivers/staging/bcm/InterfaceIdleMode.c
@@ -42,107 +42,95 @@ send to f/w with in 200 ms after the Idle/Shutdown req issued
*/
-int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, unsigned int* puiBuffer)
+int InterfaceIdleModeRespond(struct bcm_mini_adapter *Adapter, unsigned int *puiBuffer)
{
int status = STATUS_SUCCESS;
unsigned int uiRegRead = 0;
int bytes;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"SubType of Message :0x%X", ntohl(*puiBuffer));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "SubType of Message :0x%X", ntohl(*puiBuffer));
- if(ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL," Got GO_TO_IDLE_MODE_PAYLOAD(210) Msg Subtype");
- if(ntohl(*(puiBuffer+1)) == 0 )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"Got IDLE MODE WAKE UP Response From F/W");
+ if (ntohl(*puiBuffer) == GO_TO_IDLE_MODE_PAYLOAD) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, " Got GO_TO_IDLE_MODE_PAYLOAD(210) Msg Subtype");
+ if (ntohl(*(puiBuffer+1)) == 0 ) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Got IDLE MODE WAKE UP Response From F/W");
- status = wrmalt (Adapter,SW_ABORT_IDLEMODE_LOC, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg");
+ status = wrmalt (Adapter, SW_ABORT_IDLEMODE_LOC, &uiRegRead, sizeof(uiRegRead));
+ if (status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg");
return status;
}
- if(Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
- {
+ if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
uiRegRead = 0x00000000 ;
- status = wrmalt (Adapter,DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg");
+ status = wrmalt (Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegRead, sizeof(uiRegRead));
+ if (status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg");
return status;
}
}
- //Below Register should not br read in case of Manual and Protocol Idle mode.
- else if(Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
- {
- //clear on read Register
+ /* Below Register should not br read in case of Manual and Protocol Idle mode */
+ else if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
+ /* clear on read Register */
bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead));
if (bytes < 0) {
status = bytes;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg0");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg0");
return status;
}
- //clear on read Register
+ /* clear on read Register */
bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead));
if (bytes < 0) {
status = bytes;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg1");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg1");
return status;
}
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device Up from Idle Mode");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device Up from Idle Mode");
- // Set Idle Mode Flag to False and Clear IdleMode reg.
+ /* Set Idle Mode Flag to False and Clear IdleMode reg. */
Adapter->IdleMode = FALSE;
Adapter->bTriedToWakeUpFromlowPowerMode = FALSE;
wake_up(&Adapter->lowpower_mode_wait_queue);
- }
- else
- {
- if(TRUE == Adapter->IdleMode)
+ } else {
+ if (TRUE == Adapter->IdleMode)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"Device is already in Idle mode....");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Device is already in Idle mode....");
return status ;
}
uiRegRead = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Got Req from F/W to go in IDLE mode \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Got Req from F/W to go in IDLE mode \n");
- if (Adapter->chip_id== BCS220_2 ||
+ if (Adapter->chip_id == BCS220_2 ||
Adapter->chip_id == BCS220_2BC ||
- Adapter->chip_id== BCS250_BC ||
- Adapter->chip_id== BCS220_3)
- {
+ Adapter->chip_id == BCS250_BC ||
+ Adapter->chip_id == BCS220_3) {
bytes = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
if (bytes < 0) {
status = bytes;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "rdm failed while Reading HPM_CONFIG_LDO145 Reg 0\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "rdm failed while Reading HPM_CONFIG_LDO145 Reg 0\n");
return status;
}
uiRegRead |= (1<<17);
- status = wrmalt (Adapter,HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg\n");
+ status = wrmalt (Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
+ if (status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "wrm failed while clearing Idle Mode Reg\n");
return status;
}
}
SendIdleModeResponse(Adapter);
}
- }
- else if(ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "OverRiding Service Flow Params");
- OverrideServiceFlowParams(Adapter,puiBuffer);
+ } else if (ntohl(*puiBuffer) == IDLE_MODE_SF_UPDATE_MSG) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "OverRiding Service Flow Params");
+ OverrideServiceFlowParams(Adapter, puiBuffer);
}
return status;
}
@@ -152,46 +140,40 @@ static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter, unsigned int
int status = STATUS_SUCCESS;
unsigned int value;
unsigned int chip_id ;
- unsigned long timeout = 0 ,itr = 0;
+ unsigned long timeout = 0, itr = 0;
int lenwritten = 0;
- unsigned char aucAbortPattern[8]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
+ unsigned char aucAbortPattern[8] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
struct bcm_interface_adapter *psInterfaceAdapter = Adapter->pvInterfaceAdapter;
- //Abort Bus suspend if its already suspended
- if((TRUE == psInterfaceAdapter->bSuspended) && (TRUE == Adapter->bDoSuspend))
- {
+ /* Abort Bus suspend if its already suspended */
+ if ((TRUE == psInterfaceAdapter->bSuspended) && (TRUE == Adapter->bDoSuspend)) {
status = usb_autopm_get_interface(psInterfaceAdapter->interface);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"Bus got wakeup..Aborting Idle mode... status:%d \n",status);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Bus got wakeup..Aborting Idle mode... status:%d \n", status);
}
- if((Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
+ if ((Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
||
- (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE))
- {
- //write the SW abort pattern.
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Writing pattern<%d> to SW_ABORT_IDLEMODE_LOC\n", Pattern);
- status = wrmalt(Adapter,SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(Pattern));
- if(status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"WRM to Register SW_ABORT_IDLEMODE_LOC failed..");
+ (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)) {
+ /* write the SW abort pattern. */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Writing pattern<%d> to SW_ABORT_IDLEMODE_LOC\n", Pattern);
+ status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(Pattern));
+ if (status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "WRM to Register SW_ABORT_IDLEMODE_LOC failed..");
return status;
}
}
- if(Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
- {
+ if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
value = 0x80000000;
- status = wrmalt(Adapter,DEBUG_INTERRUPT_GENERATOR_REGISTOR, &value, sizeof(value));
- if(status)
+ status = wrmalt(Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &value, sizeof(value));
+ if (status)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Register failed");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Register failed");
return status;
}
- }
- else if(Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
- {
+ } else if (Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE) {
/*
* Get a Interrupt Out URB and send 8 Bytes Down
* To be Done in Thread Context.
@@ -204,43 +186,32 @@ static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter, unsigned int
8,
&lenwritten,
5000);
- if(status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Sending Abort pattern down fails with status:%d..\n",status);
+ if (status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Sending Abort pattern down fails with status:%d..\n", status);
return status;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "NOB Sent down :%d", lenwritten);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "NOB Sent down :%d", lenwritten);
}
- //mdelay(25);
+ /* mdelay(25); */
- timeout= jiffies + msecs_to_jiffies(50) ;
- while( timeout > jiffies )
- {
+ timeout = jiffies + msecs_to_jiffies(50) ;
+ while ( timeout > jiffies ) {
itr++ ;
rdmalt(Adapter, CHIP_ID_REG, &chip_id, sizeof(UINT));
- if(0xbece3200==(chip_id&~(0xF0)))
- {
+ if (0xbece3200 == (chip_id&~(0xF0)))
chip_id = chip_id&~(0xF0);
- }
- if(chip_id == Adapter->chip_id)
+ if (chip_id == Adapter->chip_id)
break;
}
- if(timeout < jiffies )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"Not able to read chip-id even after 25 msec");
- }
+ if (timeout < jiffies )
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Not able to read chip-id even after 25 msec");
else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"Number of completed iteration to read chip-id :%lu", itr);
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Number of completed iteration to read chip-id :%lu", itr);
- status = wrmalt(Adapter,SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(status));
- if(status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"WRM to Register SW_ABORT_IDLEMODE_LOC failed..");
+ status = wrmalt(Adapter, SW_ABORT_IDLEMODE_LOC, &Pattern, sizeof(status));
+ if (status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "WRM to Register SW_ABORT_IDLEMODE_LOC failed..");
return status;
}
}
@@ -249,13 +220,10 @@ static int InterfaceAbortIdlemode(struct bcm_mini_adapter *Adapter, unsigned int
int InterfaceIdleModeWakeup(struct bcm_mini_adapter *Adapter)
{
ULONG Status = 0;
- if(Adapter->bTriedToWakeUpFromlowPowerMode)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Wake up already attempted.. ignoring\n");
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"Writing Low Power Mode Abort pattern to the Device\n");
+ if (Adapter->bTriedToWakeUpFromlowPowerMode) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Wake up already attempted.. ignoring\n");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "Writing Low Power Mode Abort pattern to the Device\n");
Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
InterfaceAbortIdlemode(Adapter, Adapter->usIdleModePattern);
@@ -269,33 +237,30 @@ void InterfaceHandleShutdownModeWakeup(struct bcm_mini_adapter *Adapter)
INT Status = 0;
int bytes;
- if(Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
- {
- // clear idlemode interrupt.
+ if (Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING) {
+ /* clear idlemode interrupt. */
uiRegVal = 0;
- Status =wrmalt(Adapter,DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegVal, sizeof(uiRegVal));
- if(Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Failed with err :%d", Status);
+ Status = wrmalt(Adapter, DEBUG_INTERRUPT_GENERATOR_REGISTOR, &uiRegVal, sizeof(uiRegVal));
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,"WRM to DEBUG_INTERRUPT_GENERATOR_REGISTOR Failed with err :%d", Status);
return;
}
}
- else
- {
+ else {
- //clear Interrupt EP registers.
- bytes = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
+ /* clear Interrupt EP registers. */
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RDM of DEVICE_INT_OUT_EP_REG0 failed with Err :%d", Status);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM of DEVICE_INT_OUT_EP_REG0 failed with Err :%d", Status);
return;
}
- bytes = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
if (bytes < 0) {
Status = bytes;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RDM of DEVICE_INT_OUT_EP_REG1 failed with Err :%d", Status);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM of DEVICE_INT_OUT_EP_REG1 failed with Err :%d", Status);
return;
}
}
diff --git a/drivers/staging/bcm/Ioctl.h b/drivers/staging/bcm/Ioctl.h
index e253c08..797f862 100644
--- a/drivers/staging/bcm/Ioctl.h
+++ b/drivers/staging/bcm/Ioctl.h
@@ -175,7 +175,7 @@ struct bcm_flash2x_copy_section {
/*
* This section provide the complete bitmap of the Flash.
- * using this map lib/APP will isssue read/write command.
+ * using this map lib/APP will issue read/write command.
* Fields are defined as :
* Bit [0] = section is present //1:present, 0: Not present
* Bit [1] = section is valid //1: valid, 0: not valid
diff --git a/drivers/staging/bcm/LeakyBucket.c b/drivers/staging/bcm/LeakyBucket.c
index 877cf0b..bc48616 100644
--- a/drivers/staging/bcm/LeakyBucket.c
+++ b/drivers/staging/bcm/LeakyBucket.c
@@ -17,47 +17,42 @@
static VOID UpdateTokenCount(register struct bcm_mini_adapter *Adapter)
{
- ULONG liCurrentTime;
- INT i = 0;
+ ULONG liCurrentTime;
+ INT i = 0;
struct timeval tv;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
"=====>\n");
- if(NULL == Adapter)
- {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS,
+ if (NULL == Adapter) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS,
DBG_LVL_ALL, "Adapter found NULL!\n");
return;
}
do_gettimeofday(&tv);
- for(i = 0; i < NO_OF_QUEUES; i++)
- {
- if(TRUE == Adapter->PackInfo[i].bValid &&
- (1 == Adapter->PackInfo[i].ucDirection))
- {
+ for (i = 0; i < NO_OF_QUEUES; i++) {
+ if (TRUE == Adapter->PackInfo[i].bValid &&
+ (1 == Adapter->PackInfo[i].ucDirection)) {
liCurrentTime = ((tv.tv_sec-
Adapter->PackInfo[i].stLastUpdateTokenAt.tv_sec)*1000 +
(tv.tv_usec-Adapter->PackInfo[i].stLastUpdateTokenAt.tv_usec)/
1000);
- if(0!=liCurrentTime)
- {
+ if (0 != liCurrentTime) {
Adapter->PackInfo[i].uiCurrentTokenCount += (ULONG)
((Adapter->PackInfo[i].uiMaxAllowedRate) *
((ULONG)((liCurrentTime)))/1000);
memcpy(&Adapter->PackInfo[i].stLastUpdateTokenAt,
&tv, sizeof(struct timeval));
Adapter->PackInfo[i].liLastUpdateTokenAt = liCurrentTime;
- if((Adapter->PackInfo[i].uiCurrentTokenCount) >=
- Adapter->PackInfo[i].uiMaxBucketSize)
- {
+ if (Adapter->PackInfo[i].uiCurrentTokenCount >=
+ Adapter->PackInfo[i].uiMaxBucketSize) {
Adapter->PackInfo[i].uiCurrentTokenCount =
Adapter->PackInfo[i].uiMaxBucketSize;
}
}
}
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "<=====\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "<=====\n");
return;
}
@@ -79,33 +74,26 @@ static VOID UpdateTokenCount(register struct bcm_mini_adapter *Adapter)
***********************************************************************/
static ULONG GetSFTokenCount(struct bcm_mini_adapter *Adapter, struct bcm_packet_info *psSF)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow ===>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow ===>");
/* Validate the parameters */
- if(NULL == Adapter || (psSF < Adapter->PackInfo &&
- (uintptr_t)psSF > (uintptr_t) &Adapter->PackInfo[HiPriority]))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %zd\n", Adapter, (psSF-Adapter->PackInfo));
+ if (NULL == Adapter || (psSF < Adapter->PackInfo &&
+ (uintptr_t)psSF > (uintptr_t) &Adapter->PackInfo[HiPriority])) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %zd\n", Adapter, (psSF-Adapter->PackInfo));
return 0;
}
- if(FALSE != psSF->bValid && psSF->ucDirection)
- {
- if(0 != psSF->uiCurrentTokenCount)
- {
+ if (FALSE != psSF->bValid && psSF->ucDirection) {
+ if (0 != psSF->uiCurrentTokenCount) {
return psSF->uiCurrentTokenCount;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "Not enough tokens in queue %zd Available %u\n",
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "Not enough tokens in queue %zd Available %u\n",
psSF-Adapter->PackInfo, psSF->uiCurrentTokenCount);
psSF->uiPendedLast = 1;
}
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Queue %zd not valid\n", psSF-Adapter->PackInfo);
}
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Queue %zd not valid\n", psSF-Adapter->PackInfo);
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow <===");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow <===");
return 0;
}
@@ -116,33 +104,29 @@ This function despatches packet from the specified queue.
*/
static INT SendPacketFromQueue(struct bcm_mini_adapter *Adapter,/**<Logical Adapter*/
struct bcm_packet_info *psSF, /**<Queue identifier*/
- struct sk_buff* Packet) /**<Pointer to the packet to be sent*/
+ struct sk_buff *Packet) /**<Pointer to the packet to be sent*/
{
- INT Status=STATUS_FAILURE;
- UINT uiIndex =0,PktLen = 0;
+ INT Status = STATUS_FAILURE;
+ UINT uiIndex = 0, PktLen = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "=====>");
- if(!Adapter || !Packet || !psSF)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "Got NULL Adapter or Packet");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "=====>");
+ if (!Adapter || !Packet || !psSF) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "Got NULL Adapter or Packet");
return -EINVAL;
}
- if(psSF->liDrainCalculated==0)
- {
+ if (psSF->liDrainCalculated == 0)
psSF->liDrainCalculated = jiffies;
- }
- ///send the packet to the fifo..
+ /* send the packet to the fifo.. */
PktLen = Packet->len;
Status = SetupNextSend(Adapter, Packet, psSF->usVCID_Value);
- if(Status == 0)
- {
- for(uiIndex = 0 ; uiIndex < MIBS_MAX_HIST_ENTRIES ; uiIndex++)
- { if((PktLen <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) && (PktLen > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
+ if (Status == 0) {
+ for (uiIndex = 0; uiIndex < MIBS_MAX_HIST_ENTRIES; uiIndex++) {
+ if ((PktLen <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) && (PktLen > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
Adapter->aTxPktSizeHist[uiIndex]++;
}
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "<=====");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "<=====");
return Status;
}
@@ -160,107 +144,93 @@ static INT SendPacketFromQueue(struct bcm_mini_adapter *Adapter,/**<Logical Adap
****************************************************************************/
static VOID CheckAndSendPacketFromIndex(struct bcm_mini_adapter *Adapter, struct bcm_packet_info *psSF)
{
- struct sk_buff *QueuePacket=NULL;
- char *pControlPacket = NULL;
- INT Status=0;
- int iPacketLen=0;
+ struct sk_buff *QueuePacket = NULL;
+ char *pControlPacket = NULL;
+ INT Status = 0;
+ int iPacketLen = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "%zd ====>", (psSF-Adapter->PackInfo));
- if((psSF != &Adapter->PackInfo[HiPriority]) && Adapter->LinkUpStatus && atomic_read(&psSF->uiPerSFTxResourceCount))//Get data packet
- {
- if(!psSF->ucDirection )
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "%zd ====>", (psSF-Adapter->PackInfo));
+ if ((psSF != &Adapter->PackInfo[HiPriority]) && Adapter->LinkUpStatus && atomic_read(&psSF->uiPerSFTxResourceCount)) { /* Get data packet */
+ if (!psSF->ucDirection)
return;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "UpdateTokenCount ");
- if(Adapter->IdleMode || Adapter->bPreparingForLowPowerMode)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "UpdateTokenCount ");
+ if (Adapter->IdleMode || Adapter->bPreparingForLowPowerMode)
return; /* in idle mode */
- // Check for Free Descriptors
- if(atomic_read(&Adapter->CurrNumFreeTxDesc) <= MINIMUM_PENDING_DESCRIPTORS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, " No Free Tx Descriptor(%d) is available for Data pkt..",atomic_read(&Adapter->CurrNumFreeTxDesc));
- return ;
+ /* Check for Free Descriptors */
+ if (atomic_read(&Adapter->CurrNumFreeTxDesc) <= MINIMUM_PENDING_DESCRIPTORS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, " No Free Tx Descriptor(%d) is available for Data pkt..", atomic_read(&Adapter->CurrNumFreeTxDesc));
+ return;
}
spin_lock_bh(&psSF->SFQueueLock);
- QueuePacket=psSF->FirstTxQueue;
+ QueuePacket = psSF->FirstTxQueue;
- if(QueuePacket)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Dequeuing Data Packet");
+ if (QueuePacket) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Dequeuing Data Packet");
- if(psSF->bEthCSSupport)
+ if (psSF->bEthCSSupport)
iPacketLen = QueuePacket->len;
else
iPacketLen = QueuePacket->len-ETH_HLEN;
- iPacketLen<<=3;
- if(iPacketLen <= GetSFTokenCount(Adapter, psSF))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Allowed bytes %d",
+ iPacketLen <<= 3;
+ if (iPacketLen <= GetSFTokenCount(Adapter, psSF)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Allowed bytes %d",
(iPacketLen >> 3));
- DEQUEUEPACKET(psSF->FirstTxQueue,psSF->LastTxQueue);
+ DEQUEUEPACKET(psSF->FirstTxQueue, psSF->LastTxQueue);
psSF->uiCurrentBytesOnHost -= (QueuePacket->len);
psSF->uiCurrentPacketsOnHost--;
atomic_dec(&Adapter->TotalPacketCount);
spin_unlock_bh(&psSF->SFQueueLock);
- Status = SendPacketFromQueue(Adapter, psSF, QueuePacket);
+ Status = SendPacketFromQueue(Adapter, psSF, QueuePacket);
psSF->uiPendedLast = FALSE;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "For Queue: %zd\n", psSF-Adapter->PackInfo);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nAvailable Tokens = %d required = %d\n",
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "For Queue: %zd\n", psSF-Adapter->PackInfo);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nAvailable Tokens = %d required = %d\n",
psSF->uiCurrentTokenCount, iPacketLen);
- //this part indicates that because of non-availability of the tokens
- //pkt has not been send out hence setting the pending flag indicating the host to send it out
- //first next iteration .
+ /*
+ this part indicates that because of non-availability of the tokens
+ pkt has not been send out hence setting the pending flag indicating the host to send it out
+ first next iteration.
+ */
psSF->uiPendedLast = TRUE;
spin_unlock_bh(&psSF->SFQueueLock);
}
- }
- else
- {
+ } else {
spin_unlock_bh(&psSF->SFQueueLock);
}
- }
- else
- {
-
- if((atomic_read(&Adapter->CurrNumFreeTxDesc) > 0 ) &&
- (atomic_read(&Adapter->index_rd_txcntrlpkt) !=
- atomic_read(&Adapter->index_wr_txcntrlpkt))
- )
- {
+ } else {
+
+ if ((atomic_read(&Adapter->CurrNumFreeTxDesc) > 0) &&
+ (atomic_read(&Adapter->index_rd_txcntrlpkt) !=
+ atomic_read(&Adapter->index_wr_txcntrlpkt))) {
pControlPacket = Adapter->txctlpacket
[(atomic_read(&Adapter->index_rd_txcntrlpkt)%MAX_CNTRL_PKTS)];
- if(pControlPacket)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Sending Control packet");
+ if (pControlPacket) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Sending Control packet");
Status = SendControlPacket(Adapter, pControlPacket);
- if(STATUS_SUCCESS==Status)
- {
+ if (STATUS_SUCCESS == Status) {
spin_lock_bh(&psSF->SFQueueLock);
psSF->NumOfPacketsSent++;
- psSF->uiSentBytes+=((struct bcm_leader *)pControlPacket)->PLength;
+ psSF->uiSentBytes += ((struct bcm_leader *)pControlPacket)->PLength;
psSF->uiSentPackets++;
atomic_dec(&Adapter->TotalPacketCount);
psSF->uiCurrentBytesOnHost -= ((struct bcm_leader *)pControlPacket)->PLength;
psSF->uiCurrentPacketsOnHost--;
atomic_inc(&Adapter->index_rd_txcntrlpkt);
spin_unlock_bh(&psSF->SFQueueLock);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "SendControlPacket Failed\n");
}
- else
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "SendControlPacket Failed\n");
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, " Control Pkt is not available, Indexing is wrong....");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, " Control Pkt is not available, Indexing is wrong....");
}
- }
+ }
}
}
@@ -277,79 +247,71 @@ static VOID CheckAndSendPacketFromIndex(struct bcm_mini_adapter *Adapter, struct
********************************************************************/
VOID transmit_packets(struct bcm_mini_adapter *Adapter)
{
- UINT uiPrevTotalCount = 0;
+ UINT uiPrevTotalCount = 0;
int iIndex = 0;
- BOOLEAN exit_flag = TRUE ;
+ BOOLEAN exit_flag = TRUE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "=====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "=====>");
- if(NULL == Adapter)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX,TX_PACKETS, DBG_LVL_ALL, "Got NULL Adapter");
+ if (NULL == Adapter) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Got NULL Adapter");
return;
}
- if(Adapter->device_removed == TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Device removed");
+ if (Adapter->device_removed == TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Device removed");
return;
}
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nUpdateTokenCount ====>\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nUpdateTokenCount ====>\n");
UpdateTokenCount(Adapter);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nPruneQueueAllSF ====>\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nPruneQueueAllSF ====>\n");
PruneQueueAllSF(Adapter);
uiPrevTotalCount = atomic_read(&Adapter->TotalPacketCount);
- for(iIndex=HiPriority;iIndex>=0;iIndex--)
- {
- if( !uiPrevTotalCount || (TRUE == Adapter->device_removed))
+ for (iIndex = HiPriority; iIndex >= 0; iIndex--) {
+ if (!uiPrevTotalCount || (TRUE == Adapter->device_removed))
break;
- if(Adapter->PackInfo[iIndex].bValid &&
- Adapter->PackInfo[iIndex].uiPendedLast &&
- Adapter->PackInfo[iIndex].uiCurrentBytesOnHost)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling CheckAndSendPacketFromIndex..");
+ if (Adapter->PackInfo[iIndex].bValid &&
+ Adapter->PackInfo[iIndex].uiPendedLast &&
+ Adapter->PackInfo[iIndex].uiCurrentBytesOnHost) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling CheckAndSendPacketFromIndex..");
CheckAndSendPacketFromIndex(Adapter, &Adapter->PackInfo[iIndex]);
uiPrevTotalCount--;
}
}
- while(uiPrevTotalCount > 0 && !Adapter->device_removed)
- {
- exit_flag = TRUE ;
- //second iteration to parse non-pending queues
- for(iIndex=HiPriority;iIndex>=0;iIndex--)
- {
- if( !uiPrevTotalCount || (TRUE == Adapter->device_removed))
- break;
-
- if(Adapter->PackInfo[iIndex].bValid &&
- Adapter->PackInfo[iIndex].uiCurrentBytesOnHost &&
- !Adapter->PackInfo[iIndex].uiPendedLast )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling CheckAndSendPacketFromIndex..");
+ while (uiPrevTotalCount > 0 && !Adapter->device_removed) {
+ exit_flag = TRUE;
+ /* second iteration to parse non-pending queues */
+ for (iIndex = HiPriority; iIndex >= 0; iIndex--) {
+ if (!uiPrevTotalCount || (TRUE == Adapter->device_removed))
+ break;
+
+ if (Adapter->PackInfo[iIndex].bValid &&
+ Adapter->PackInfo[iIndex].uiCurrentBytesOnHost &&
+ !Adapter->PackInfo[iIndex].uiPendedLast) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling CheckAndSendPacketFromIndex..");
CheckAndSendPacketFromIndex(Adapter, &Adapter->PackInfo[iIndex]);
uiPrevTotalCount--;
exit_flag = FALSE;
}
}
- if(Adapter->IdleMode || Adapter->bPreparingForLowPowerMode)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "In Idle Mode\n");
+ if (Adapter->IdleMode || Adapter->bPreparingForLowPowerMode) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "In Idle Mode\n");
break;
}
- if(exit_flag == TRUE )
- break ;
- }/* end of inner while loop */
+ if (exit_flag == TRUE)
+ break;
+ } /* end of inner while loop */
- update_per_cid_rx (Adapter);
+ update_per_cid_rx(Adapter);
Adapter->txtransmit_running = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "<======");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "<======");
}
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index d23eeeb..4cfc2c3 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -210,7 +210,7 @@ exit_download:
* @ingroup ctrl_pkt_functions
* This function copies the contents of given buffer
* to the control packet and queues it for transmission.
- * @note Do not acquire the spinock, as it it already acquired.
+ * @note Do not acquire the spinlock, as it it already acquired.
* @return SUCCESS/FAILURE.
* Arguments:
* Logical Adapter
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 8d142a5..2d4a77c 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -4,11 +4,11 @@ This file contains the routines related to Quality of Service.
*/
#include "headers.h"
-static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload, struct bcm_eth_packet_info *pstEthCsPktInfo);
-static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo,struct bcm_classifier_rule *pstClassifierRule, B_UINT8 EthCSCupport);
+static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter, PVOID pvEthPayload, struct bcm_eth_packet_info *pstEthCsPktInfo);
+static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo, struct bcm_classifier_rule *pstClassifierRule, B_UINT8 EthCSCupport);
static USHORT IpVersion4(struct bcm_mini_adapter *Adapter, struct iphdr *iphd,
- struct bcm_classifier_rule *pstClassifierRule );
+ struct bcm_classifier_rule *pstClassifierRule);
static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex);
@@ -20,30 +20,30 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex);
* matches with that of Queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ulSrcIP : Source IP address from the packet.
+* - ulSrcIP : Source IP address from the packet.
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-BOOLEAN MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule,ULONG ulSrcIP)
+BOOLEAN MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP)
{
- UCHAR ucLoopIndex=0;
-
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- ulSrcIP=ntohl(ulSrcIP);
- if(0 == pstClassifierRule->ucIPSourceAddressLength)
- return TRUE;
- for(ucLoopIndex=0; ucLoopIndex < (pstClassifierRule->ucIPSourceAddressLength);ucLoopIndex++)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Ip Address Mask:0x%x PacketIp:0x%x and Classification:0x%x", (UINT)pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex], (UINT)ulSrcIP, (UINT)pstClassifierRule->stSrcIpAddress.ulIpv6Addr[ucLoopIndex]);
- if((pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex] & ulSrcIP)==
- (pstClassifierRule->stSrcIpAddress.ulIpv4Addr[ucLoopIndex] & pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex] ))
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Ip Address Not Matched");
- return FALSE;
+ UCHAR ucLoopIndex = 0;
+
+ struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
+
+ ulSrcIP = ntohl(ulSrcIP);
+ if (0 == pstClassifierRule->ucIPSourceAddressLength)
+ return TRUE;
+ for (ucLoopIndex = 0; ucLoopIndex < (pstClassifierRule->ucIPSourceAddressLength); ucLoopIndex++)
+ {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Ip Address Mask:0x%x PacketIp:0x%x and Classification:0x%x", (UINT)pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex], (UINT)ulSrcIP, (UINT)pstClassifierRule->stSrcIpAddress.ulIpv6Addr[ucLoopIndex]);
+ if ((pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex] & ulSrcIP) ==
+ (pstClassifierRule->stSrcIpAddress.ulIpv4Addr[ucLoopIndex] & pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex]))
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Ip Address Not Matched");
+ return FALSE;
}
@@ -54,30 +54,30 @@ BOOLEAN MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule,ULONG ul
* matches with that of Queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ulDestIP : Destination IP address from the packet.
+* - ulDestIP : Destination IP address from the packet.
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-BOOLEAN MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule,ULONG ulDestIP)
+BOOLEAN MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
{
- UCHAR ucLoopIndex=0;
+ UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- ulDestIP=ntohl(ulDestIP);
- if(0 == pstClassifierRule->ucIPDestinationAddressLength)
- return TRUE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Ip Address 0x%x 0x%x 0x%x ", (UINT)ulDestIP, (UINT)pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex], (UINT)pstClassifierRule->stDestIpAddress.ulIpv4Addr[ucLoopIndex]);
-
- for(ucLoopIndex=0;ucLoopIndex<(pstClassifierRule->ucIPDestinationAddressLength);ucLoopIndex++)
- {
- if((pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex] & ulDestIP)==
- (pstClassifierRule->stDestIpAddress.ulIpv4Addr[ucLoopIndex] & pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex]))
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Ip Address Not Matched");
- return FALSE;
+ ulDestIP = ntohl(ulDestIP);
+ if (0 == pstClassifierRule->ucIPDestinationAddressLength)
+ return TRUE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Ip Address 0x%x 0x%x 0x%x ", (UINT)ulDestIP, (UINT)pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex], (UINT)pstClassifierRule->stDestIpAddress.ulIpv4Addr[ucLoopIndex]);
+
+ for (ucLoopIndex = 0; ucLoopIndex < (pstClassifierRule->ucIPDestinationAddressLength); ucLoopIndex++)
+ {
+ if ((pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex] & ulDestIP) ==
+ (pstClassifierRule->stDestIpAddress.ulIpv4Addr[ucLoopIndex] & pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex]))
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Ip Address Not Matched");
+ return FALSE;
}
@@ -87,23 +87,23 @@ BOOLEAN MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule,ULONG u
* Description - Checks the TOS from the packet matches with that of queue.
*
* Parameters - pstClassifierRule : Pointer to the packet info structure.
-* - ucTypeOfService: TOS from the packet.
+* - ucTypeOfService: TOS from the packet.
*
* Returns - TRUE(If address matches) else FAIL.
**************************************************************************/
-BOOLEAN MatchTos(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucTypeOfService)
+BOOLEAN MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if( 3 != pstClassifierRule->ucIPTypeOfServiceLength )
- return TRUE;
-
- if(((pstClassifierRule->ucTosMask & ucTypeOfService)<=pstClassifierRule->ucTosHigh) && ((pstClassifierRule->ucTosMask & ucTypeOfService)>=pstClassifierRule->ucTosLow))
- {
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Type Of Service Not Matched");
- return FALSE;
+ if (3 != pstClassifierRule->ucIPTypeOfServiceLength)
+ return TRUE;
+
+ if (((pstClassifierRule->ucTosMask & ucTypeOfService) <= pstClassifierRule->ucTosHigh) && ((pstClassifierRule->ucTosMask & ucTypeOfService) >= pstClassifierRule->ucTosLow))
+ {
+ return TRUE;
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Type Of Service Not Matched");
+ return FALSE;
}
@@ -113,26 +113,26 @@ BOOLEAN MatchTos(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucTypeOfSer
* Description - Checks the protocol from the packet matches with that of queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ucProtocol : Protocol from the packet.
+* - ucProtocol : Protocol from the packet.
*
* Returns - TRUE(If address matches) else FAIL.
****************************************************************************/
-bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProtocol)
+bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucProtocol)
{
- UCHAR ucLoopIndex=0;
+ UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(0 == pstClassifierRule->ucProtocolLength)
- return TRUE;
- for(ucLoopIndex=0;ucLoopIndex<pstClassifierRule->ucProtocolLength;ucLoopIndex++)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol:0x%X Classification Protocol:0x%X",ucProtocol,pstClassifierRule->ucProtocol[ucLoopIndex]);
- if(pstClassifierRule->ucProtocol[ucLoopIndex]==ucProtocol)
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Not Matched");
- return FALSE;
+ if (0 == pstClassifierRule->ucProtocolLength)
+ return TRUE;
+ for (ucLoopIndex = 0; ucLoopIndex < pstClassifierRule->ucProtocolLength; ucLoopIndex++)
+ {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol:0x%X Classification Protocol:0x%X", ucProtocol, pstClassifierRule->ucProtocol[ucLoopIndex]);
+ if (pstClassifierRule->ucProtocol[ucLoopIndex] == ucProtocol)
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Not Matched");
+ return FALSE;
}
@@ -142,29 +142,29 @@ bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProtoco
* Description - Checks, Source port from the packet matches with that of queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ushSrcPort : Source port from the packet.
+* - ushSrcPort : Source port from the packet.
*
* Returns - TRUE(If address matches) else FAIL.
***************************************************************************/
-bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort)
+bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushSrcPort)
{
- UCHAR ucLoopIndex=0;
+ UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(0 == pstClassifierRule->ucSrcPortRangeLength)
- return TRUE;
- for(ucLoopIndex=0;ucLoopIndex<pstClassifierRule->ucSrcPortRangeLength;ucLoopIndex++)
- {
- if(ushSrcPort <= pstClassifierRule->usSrcPortRangeHi[ucLoopIndex] &&
- ushSrcPort >= pstClassifierRule->usSrcPortRangeLo[ucLoopIndex])
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port: %x Not Matched ",ushSrcPort);
- return FALSE;
+ if (0 == pstClassifierRule->ucSrcPortRangeLength)
+ return TRUE;
+ for (ucLoopIndex = 0; ucLoopIndex < pstClassifierRule->ucSrcPortRangeLength; ucLoopIndex++)
+ {
+ if (ushSrcPort <= pstClassifierRule->usSrcPortRangeHi[ucLoopIndex] &&
+ ushSrcPort >= pstClassifierRule->usSrcPortRangeLo[ucLoopIndex])
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port: %x Not Matched ", ushSrcPort);
+ return FALSE;
}
@@ -174,30 +174,30 @@ bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPor
* Description - Checks, Destination port from packet matches with that of queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ushDestPort : Destination port from the packet.
+* - ushDestPort : Destination port from the packet.
*
* Returns - TRUE(If address matches) else FAIL.
***************************************************************************/
-bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushDestPort)
+bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushDestPort)
{
- UCHAR ucLoopIndex=0;
+ UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(0 == pstClassifierRule->ucDestPortRangeLength)
- return TRUE;
-
- for(ucLoopIndex=0;ucLoopIndex<pstClassifierRule->ucDestPortRangeLength;ucLoopIndex++)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Matching Port:0x%X 0x%X 0x%X",ushDestPort,pstClassifierRule->usDestPortRangeLo[ucLoopIndex],pstClassifierRule->usDestPortRangeHi[ucLoopIndex]);
-
- if(ushDestPort <= pstClassifierRule->usDestPortRangeHi[ucLoopIndex] &&
- ushDestPort >= pstClassifierRule->usDestPortRangeLo[ucLoopIndex])
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dest Port: %x Not Matched",ushDestPort);
- return FALSE;
+ if (0 == pstClassifierRule->ucDestPortRangeLength)
+ return TRUE;
+
+ for (ucLoopIndex = 0; ucLoopIndex < pstClassifierRule->ucDestPortRangeLength; ucLoopIndex++)
+ {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Matching Port:0x%X 0x%X 0x%X", ushDestPort, pstClassifierRule->usDestPortRangeLo[ucLoopIndex], pstClassifierRule->usDestPortRangeHi[ucLoopIndex]);
+
+ if (ushDestPort <= pstClassifierRule->usDestPortRangeHi[ucLoopIndex] &&
+ ushDestPort >= pstClassifierRule->usDestPortRangeLo[ucLoopIndex])
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dest Port: %x Not Matched", ushDestPort);
+ return FALSE;
}
/**
@ingroup tx_functions
@@ -209,95 +209,95 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
struct bcm_classifier_rule *pstClassifierRule)
{
struct bcm_transport_header *xprt_hdr = NULL;
- BOOLEAN bClassificationSucceed=FALSE;
+ BOOLEAN bClassificationSucceed = FALSE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "========>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "========>");
- xprt_hdr=(struct bcm_transport_header *)((PUCHAR)iphd + sizeof(struct iphdr));
+ xprt_hdr = (struct bcm_transport_header *)((PUCHAR)iphd + sizeof(struct iphdr));
do {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to see Direction = %d %d",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to see Direction = %d %d",
pstClassifierRule->ucDirection,
pstClassifierRule->usVCID_Value);
//Checking classifier validity
- if(!pstClassifierRule->bUsed || pstClassifierRule->ucDirection == DOWNLINK_DIR)
+ if (!pstClassifierRule->bUsed || pstClassifierRule->ucDirection == DOWNLINK_DIR)
{
bClassificationSucceed = FALSE;
break;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "is IPv6 check!");
- if(pstClassifierRule->bIpv6Protocol)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "is IPv6 check!");
+ if (pstClassifierRule->bIpv6Protocol)
break;
//**************Checking IP header parameter**************************//
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to match Source IP Address");
- if(FALSE == (bClassificationSucceed =
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to match Source IP Address");
+ if (FALSE == (bClassificationSucceed =
MatchSrcIpAddress(pstClassifierRule, iphd->saddr)))
break;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source IP Address Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source IP Address Matched");
- if(FALSE == (bClassificationSucceed =
+ if (FALSE == (bClassificationSucceed =
MatchDestIpAddress(pstClassifierRule, iphd->daddr)))
break;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination IP Address Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination IP Address Matched");
- if(FALSE == (bClassificationSucceed =
+ if (FALSE == (bClassificationSucceed =
MatchTos(pstClassifierRule, iphd->tos)))
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Match failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Match failed\n");
break;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Matched");
- if(FALSE == (bClassificationSucceed =
- MatchProtocol(pstClassifierRule,iphd->protocol)))
+ if (FALSE == (bClassificationSucceed =
+ MatchProtocol(pstClassifierRule, iphd->protocol)))
break;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Matched");
//if protocol is not TCP or UDP then no need of comparing source port and destination port
- if(iphd->protocol!=TCP && iphd->protocol!=UDP)
+ if (iphd->protocol != TCP && iphd->protocol != UDP)
break;
//******************Checking Transport Layer Header field if present *****************//
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source Port %04x",
- (iphd->protocol==UDP)?xprt_hdr->uhdr.source:xprt_hdr->thdr.source);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source Port %04x",
+ (iphd->protocol == UDP) ? xprt_hdr->uhdr.source : xprt_hdr->thdr.source);
- if(FALSE == (bClassificationSucceed =
+ if (FALSE == (bClassificationSucceed =
MatchSrcPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP)?
- xprt_hdr->uhdr.source:xprt_hdr->thdr.source))))
+ ntohs((iphd->protocol == UDP) ?
+ xprt_hdr->uhdr.source : xprt_hdr->thdr.source))))
break;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port Matched");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Port %04x",
- (iphd->protocol==UDP)?xprt_hdr->uhdr.dest:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Port %04x",
+ (iphd->protocol == UDP) ? xprt_hdr->uhdr.dest :
xprt_hdr->thdr.dest);
- if(FALSE == (bClassificationSucceed =
+ if (FALSE == (bClassificationSucceed =
MatchDestPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP)?
- xprt_hdr->uhdr.dest:xprt_hdr->thdr.dest))))
+ ntohs((iphd->protocol == UDP) ?
+ xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest))))
break;
- } while(0);
+ } while (0);
- if(TRUE==bClassificationSucceed)
+ if (TRUE == bClassificationSucceed)
{
INT iMatchedSFQueueIndex = 0;
- iMatchedSFQueueIndex = SearchSfid(Adapter,pstClassifierRule->ulSFID);
- if(iMatchedSFQueueIndex >= NO_OF_QUEUES)
+ iMatchedSFQueueIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
+ if (iMatchedSFQueueIndex >= NO_OF_QUEUES)
{
bClassificationSucceed = FALSE;
}
else
{
- if(FALSE == Adapter->PackInfo[iMatchedSFQueueIndex].bActive)
+ if (FALSE == Adapter->PackInfo[iMatchedSFQueueIndex].bActive)
{
bClassificationSucceed = FALSE;
}
}
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "IpVersion4 <==========");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "IpVersion4 <==========");
return bClassificationSucceed;
}
@@ -306,9 +306,9 @@ VOID PruneQueueAllSF(struct bcm_mini_adapter *Adapter)
{
UINT iIndex = 0;
- for(iIndex = 0; iIndex < HiPriority; iIndex++)
+ for (iIndex = 0; iIndex < HiPriority; iIndex++)
{
- if(!Adapter->PackInfo[iIndex].bValid)
+ if (!Adapter->PackInfo[iIndex].bValid)
continue;
PruneQueue(Adapter, iIndex);
@@ -325,15 +325,15 @@ less than or equal to max queue size for the queue.
*/
static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
{
- struct sk_buff* PacketToDrop=NULL;
+ struct sk_buff* PacketToDrop = NULL;
struct net_device_stats *netstats;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "=====> Index %d",iIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "=====> Index %d", iIndex);
- if(iIndex == HiPriority)
+ if (iIndex == HiPriority)
return;
- if(!Adapter || (iIndex < 0) || (iIndex > HiPriority))
+ if (!Adapter || (iIndex < 0) || (iIndex > HiPriority))
return;
/* To Store the netdevice statistic */
@@ -341,26 +341,26 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
spin_lock_bh(&Adapter->PackInfo[iIndex].SFQueueLock);
- while(1)
+ while (1)
// while((UINT)Adapter->PackInfo[iIndex].uiCurrentPacketsOnHost >
// SF_MAX_ALLOWED_PACKETS_TO_BACKUP)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "uiCurrentBytesOnHost:%x uiMaxBucketSize :%x",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "uiCurrentBytesOnHost:%x uiMaxBucketSize :%x",
Adapter->PackInfo[iIndex].uiCurrentBytesOnHost,
Adapter->PackInfo[iIndex].uiMaxBucketSize);
PacketToDrop = Adapter->PackInfo[iIndex].FirstTxQueue;
- if(PacketToDrop == NULL)
+ if (PacketToDrop == NULL)
break;
- if((Adapter->PackInfo[iIndex].uiCurrentPacketsOnHost < SF_MAX_ALLOWED_PACKETS_TO_BACKUP) &&
+ if ((Adapter->PackInfo[iIndex].uiCurrentPacketsOnHost < SF_MAX_ALLOWED_PACKETS_TO_BACKUP) &&
((1000*(jiffies - *((B_UINT32 *)(PacketToDrop->cb)+SKB_CB_LATENCY_OFFSET))/HZ) <= Adapter->PackInfo[iIndex].uiMaxLatency))
break;
- if(PacketToDrop)
+ if (PacketToDrop)
{
if (netif_msg_tx_err(Adapter))
- pr_info(PFX "%s: tx queue %d overlimit\n",
+ pr_info(PFX "%s: tx queue %d overlimit\n",
Adapter->dev->name, iIndex);
netstats->tx_dropped++;
@@ -378,7 +378,7 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "Dropped Bytes:%x Dropped Packets:%x",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "Dropped Bytes:%x Dropped Packets:%x",
Adapter->PackInfo[iIndex].uiDroppedCountBytes,
Adapter->PackInfo[iIndex].uiDroppedCountPackets);
@@ -387,29 +387,29 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
spin_unlock_bh(&Adapter->PackInfo[iIndex].SFQueueLock);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "TotalPacketCount:%x",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "TotalPacketCount:%x",
atomic_read(&Adapter->TotalPacketCount));
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "<=====");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "<=====");
}
VOID flush_all_queues(struct bcm_mini_adapter *Adapter)
{
INT iQIndex;
UINT uiTotalPacketLength;
- struct sk_buff* PacketToDrop=NULL;
+ struct sk_buff* PacketToDrop = NULL;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "=====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "=====>");
// down(&Adapter->data_packet_queue_lock);
- for(iQIndex=LowPriority; iQIndex<HiPriority; iQIndex++)
+ for (iQIndex = LowPriority; iQIndex < HiPriority; iQIndex++)
{
struct net_device_stats *netstats = &Adapter->dev->stats;
spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
- while(Adapter->PackInfo[iQIndex].FirstTxQueue)
+ while (Adapter->PackInfo[iQIndex].FirstTxQueue)
{
PacketToDrop = Adapter->PackInfo[iQIndex].FirstTxQueue;
- if(PacketToDrop)
+ if (PacketToDrop)
{
uiTotalPacketLength = PacketToDrop->len;
netstats->tx_dropped++;
@@ -431,7 +431,7 @@ VOID flush_all_queues(struct bcm_mini_adapter *Adapter)
Adapter->PackInfo[iQIndex].uiDroppedCountBytes += uiTotalPacketLength;
Adapter->PackInfo[iQIndex].uiDroppedCountPackets++;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Dropped Bytes:%x Dropped Packets:%x",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Dropped Bytes:%x Dropped Packets:%x",
Adapter->PackInfo[iQIndex].uiDroppedCountBytes,
Adapter->PackInfo[iQIndex].uiDroppedCountPackets);
atomic_dec(&Adapter->TotalPacketCount);
@@ -439,30 +439,30 @@ VOID flush_all_queues(struct bcm_mini_adapter *Adapter)
spin_unlock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
}
// up(&Adapter->data_packet_queue_lock);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "<=====");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "<=====");
}
-USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
+USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff* skb)
{
- INT uiLoopIndex=0;
+ INT uiLoopIndex = 0;
struct bcm_classifier_rule *pstClassifierRule = NULL;
struct bcm_eth_packet_info stEthCsPktInfo;
PVOID pvEThPayload = NULL;
- struct iphdr *pIpHeader = NULL;
- INT uiSfIndex=0;
- USHORT usIndex=Adapter->usBestEffortQueueIndex;
- BOOLEAN bFragmentedPkt=FALSE,bClassificationSucceed=FALSE;
- USHORT usCurrFragment =0;
+ struct iphdr *pIpHeader = NULL;
+ INT uiSfIndex = 0;
+ USHORT usIndex = Adapter->usBestEffortQueueIndex;
+ BOOLEAN bFragmentedPkt = FALSE, bClassificationSucceed = FALSE;
+ USHORT usCurrFragment = 0;
struct bcm_tcp_header *pTcpHeader;
UCHAR IpHeaderLength;
UCHAR TcpHeaderLength;
pvEThPayload = skb->data;
- *((UINT32*) (skb->cb) +SKB_CB_TCPACK_OFFSET ) = 0;
- EThCSGetPktInfo(Adapter,pvEThPayload,&stEthCsPktInfo);
+ *((UINT32*) (skb->cb) +SKB_CB_TCPACK_OFFSET) = 0;
+ EThCSGetPktInfo(Adapter, pvEThPayload, &stEthCsPktInfo);
- switch(stEthCsPktInfo.eNwpktEthFrameType)
+ switch (stEthCsPktInfo.eNwpktEthFrameType)
{
case eEth802LLCFrame:
{
@@ -497,75 +497,75 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
}
}
- if(stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
+ if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
{
usCurrFragment = (ntohs(pIpHeader->frag_off) & IP_OFFSET);
- if((ntohs(pIpHeader->frag_off) & IP_MF) || usCurrFragment)
+ if ((ntohs(pIpHeader->frag_off) & IP_MF) || usCurrFragment)
bFragmentedPkt = TRUE;
- if(bFragmentedPkt)
+ if (bFragmentedPkt)
{
//Fragmented Packet. Get Frag Classifier Entry.
- pstClassifierRule = GetFragIPClsEntry(Adapter,pIpHeader->id, pIpHeader->saddr);
- if(pstClassifierRule)
+ pstClassifierRule = GetFragIPClsEntry(Adapter, pIpHeader->id, pIpHeader->saddr);
+ if (pstClassifierRule)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL,"It is next Fragmented pkt");
- bClassificationSucceed=TRUE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "It is next Fragmented pkt");
+ bClassificationSucceed = TRUE;
}
- if(!(ntohs(pIpHeader->frag_off) & IP_MF))
+ if (!(ntohs(pIpHeader->frag_off) & IP_MF))
{
//Fragmented Last packet . Remove Frag Classifier Entry
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL,"This is the last fragmented Pkt");
- DelFragIPClsEntry(Adapter,pIpHeader->id, pIpHeader->saddr);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "This is the last fragmented Pkt");
+ DelFragIPClsEntry(Adapter, pIpHeader->id, pIpHeader->saddr);
}
}
}
- for(uiLoopIndex = MAX_CLASSIFIERS - 1; uiLoopIndex >= 0; uiLoopIndex--)
+ for (uiLoopIndex = MAX_CLASSIFIERS - 1; uiLoopIndex >= 0; uiLoopIndex--)
{
- if(bClassificationSucceed)
+ if (bClassificationSucceed)
break;
//Iterate through all classifiers which are already in order of priority
//to classify the packet until match found
do
{
- if(FALSE==Adapter->astClassifierTable[uiLoopIndex].bUsed)
+ if (FALSE == Adapter->astClassifierTable[uiLoopIndex].bUsed)
{
- bClassificationSucceed=FALSE;
+ bClassificationSucceed = FALSE;
break;
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Adapter->PackInfo[%d].bvalid=True\n",uiLoopIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Adapter->PackInfo[%d].bvalid=True\n", uiLoopIndex);
- if(0 == Adapter->astClassifierTable[uiLoopIndex].ucDirection)
+ if (0 == Adapter->astClassifierTable[uiLoopIndex].ucDirection)
{
- bClassificationSucceed=FALSE;//cannot be processed for classification.
+ bClassificationSucceed = FALSE;//cannot be processed for classification.
break; // it is a down link connection
}
pstClassifierRule = &Adapter->astClassifierTable[uiLoopIndex];
- uiSfIndex = SearchSfid(Adapter,pstClassifierRule->ulSFID);
+ uiSfIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
if (uiSfIndex >= NO_OF_QUEUES) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Queue Not Valid. SearchSfid for this classifier Failed\n");
break;
}
- if(Adapter->PackInfo[uiSfIndex].bEthCSSupport)
+ if (Adapter->PackInfo[uiSfIndex].bEthCSSupport)
{
- if(eEthUnsupportedFrame==stEthCsPktInfo.eNwpktEthFrameType)
+ if (eEthUnsupportedFrame == stEthCsPktInfo.eNwpktEthFrameType)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a Valid Supported Ethernet Frame \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a Valid Supported Ethernet Frame\n");
bClassificationSucceed = FALSE;
break;
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Performing ETH CS Classification on Classifier Rule ID : %x Service Flow ID : %lx\n",pstClassifierRule->uiClassifierRuleIndex,Adapter->PackInfo[uiSfIndex].ulSFID);
- bClassificationSucceed = EThCSClassifyPkt(Adapter,skb,&stEthCsPktInfo,pstClassifierRule, Adapter->PackInfo[uiSfIndex].bEthCSSupport);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Performing ETH CS Classification on Classifier Rule ID : %x Service Flow ID : %lx\n", pstClassifierRule->uiClassifierRuleIndex, Adapter->PackInfo[uiSfIndex].ulSFID);
+ bClassificationSucceed = EThCSClassifyPkt(Adapter, skb, &stEthCsPktInfo, pstClassifierRule, Adapter->PackInfo[uiSfIndex].bEthCSSupport);
- if(!bClassificationSucceed)
+ if (!bClassificationSucceed)
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ClassifyPacket : Ethernet CS Classification Failed\n");
break;
@@ -574,9 +574,9 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
else // No ETH Supported on this SF
{
- if(eEthOtherFrame != stEthCsPktInfo.eNwpktEthFrameType)
+ if (eEthOtherFrame != stEthCsPktInfo.eNwpktEthFrameType)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a 802.3 Ethernet Frame... hence not allowed over non-ETH CS SF \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a 802.3 Ethernet Frame... hence not allowed over non-ETH CS SF\n");
bClassificationSucceed = FALSE;
break;
}
@@ -584,51 +584,51 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Proceeding to IP CS Clasification");
- if(Adapter->PackInfo[uiSfIndex].bIPCSSupport)
+ if (Adapter->PackInfo[uiSfIndex].bIPCSSupport)
{
- if(stEthCsPktInfo.eNwpktIPFrameType == eNonIPPacket)
+ if (stEthCsPktInfo.eNwpktIPFrameType == eNonIPPacket)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet is Not an IP Packet \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet is Not an IP Packet\n");
bClassificationSucceed = FALSE;
break;
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dump IP Header : \n");
- DumpFullPacket((PUCHAR)pIpHeader,20);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dump IP Header :\n");
+ DumpFullPacket((PUCHAR)pIpHeader, 20);
- if(stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
- bClassificationSucceed = IpVersion4(Adapter,pIpHeader,pstClassifierRule);
- else if(stEthCsPktInfo.eNwpktIPFrameType == eIPv6Packet)
- bClassificationSucceed = IpVersion6(Adapter,pIpHeader,pstClassifierRule);
+ if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
+ bClassificationSucceed = IpVersion4(Adapter, pIpHeader, pstClassifierRule);
+ else if (stEthCsPktInfo.eNwpktIPFrameType == eIPv6Packet)
+ bClassificationSucceed = IpVersion6(Adapter, pIpHeader, pstClassifierRule);
}
- }while(0);
+ } while (0);
}
- if(bClassificationSucceed == TRUE)
+ if (bClassificationSucceed == TRUE)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "CF id : %d, SF ID is =%lu",pstClassifierRule->uiClassifierRuleIndex, pstClassifierRule->ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "CF id : %d, SF ID is =%lu", pstClassifierRule->uiClassifierRuleIndex, pstClassifierRule->ulSFID);
//Store The matched Classifier in SKB
*((UINT32*)(skb->cb)+SKB_CB_CLASSIFICATION_OFFSET) = pstClassifierRule->uiClassifierRuleIndex;
- if((TCP == pIpHeader->protocol ) && !bFragmentedPkt && (ETH_AND_IP_HEADER_LEN + TCP_HEADER_LEN <= skb->len) )
+ if ((TCP == pIpHeader->protocol) && !bFragmentedPkt && (ETH_AND_IP_HEADER_LEN + TCP_HEADER_LEN <= skb->len))
{
IpHeaderLength = pIpHeader->ihl;
pTcpHeader = (struct bcm_tcp_header *)(((PUCHAR)pIpHeader)+(IpHeaderLength*4));
TcpHeaderLength = GET_TCP_HEADER_LEN(pTcpHeader->HeaderLength);
- if((pTcpHeader->ucFlags & TCP_ACK) &&
+ if ((pTcpHeader->ucFlags & TCP_ACK) &&
(ntohs(pIpHeader->tot_len) == (IpHeaderLength*4)+(TcpHeaderLength*4)))
{
- *((UINT32*) (skb->cb) +SKB_CB_TCPACK_OFFSET ) = TCP_ACK;
+ *((UINT32*) (skb->cb) + SKB_CB_TCPACK_OFFSET) = TCP_ACK;
}
}
usIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "index is =%d", usIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "index is =%d", usIndex);
//If this is the first fragment of a Fragmented pkt, add this CF. Only This CF should be used for all other fragment of this Pkt.
- if(bFragmentedPkt && (usCurrFragment == 0))
+ if (bFragmentedPkt && (usCurrFragment == 0))
{
//First Fragment of Fragmented Packet. Create Frag CLS Entry
struct bcm_fragmented_packet_info stFragPktInfo;
@@ -637,77 +637,77 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
stFragPktInfo.usIpIdentification = pIpHeader->id;
stFragPktInfo.pstMatchedClassifierEntry = pstClassifierRule;
stFragPktInfo.bOutOfOrderFragment = FALSE;
- AddFragIPClsEntry(Adapter,&stFragPktInfo);
+ AddFragIPClsEntry(Adapter, &stFragPktInfo);
}
}
- if(bClassificationSucceed)
+ if (bClassificationSucceed)
return usIndex;
else
return INVALID_QUEUE_INDEX;
}
-static BOOLEAN EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifierRule,PUCHAR Mac)
+static BOOLEAN EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifierRule, PUCHAR Mac)
{
- UINT i=0;
+ UINT i = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(pstClassifierRule->ucEthCSSrcMACLen==0)
+ if (pstClassifierRule->ucEthCSSrcMACLen == 0)
return TRUE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s \n",__FUNCTION__);
- for(i=0;i<MAC_ADDRESS_SIZE;i++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s\n", __FUNCTION__);
+ for (i = 0; i < MAC_ADDRESS_SIZE; i++)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n",i,Mac[i],pstClassifierRule->au8EThCSSrcMAC[i],pstClassifierRule->au8EThCSSrcMACMask[i]);
- if((pstClassifierRule->au8EThCSSrcMAC[i] & pstClassifierRule->au8EThCSSrcMACMask[i])!=
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n", i, Mac[i], pstClassifierRule->au8EThCSSrcMAC[i], pstClassifierRule->au8EThCSSrcMACMask[i]);
+ if ((pstClassifierRule->au8EThCSSrcMAC[i] & pstClassifierRule->au8EThCSSrcMACMask[i]) !=
(Mac[i] & pstClassifierRule->au8EThCSSrcMACMask[i]))
return FALSE;
}
return TRUE;
}
-static BOOLEAN EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifierRule,PUCHAR Mac)
+static BOOLEAN EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifierRule, PUCHAR Mac)
{
- UINT i=0;
+ UINT i = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(pstClassifierRule->ucEthCSDestMACLen==0)
+ if (pstClassifierRule->ucEthCSDestMACLen == 0)
return TRUE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s \n",__FUNCTION__);
- for(i=0;i<MAC_ADDRESS_SIZE;i++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s\n", __FUNCTION__);
+ for (i = 0; i < MAC_ADDRESS_SIZE; i++)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n",i,Mac[i],pstClassifierRule->au8EThCSDestMAC[i],pstClassifierRule->au8EThCSDestMACMask[i]);
- if((pstClassifierRule->au8EThCSDestMAC[i] & pstClassifierRule->au8EThCSDestMACMask[i])!=
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n", i, Mac[i], pstClassifierRule->au8EThCSDestMAC[i], pstClassifierRule->au8EThCSDestMACMask[i]);
+ if ((pstClassifierRule->au8EThCSDestMAC[i] & pstClassifierRule->au8EThCSDestMACMask[i]) !=
(Mac[i] & pstClassifierRule->au8EThCSDestMACMask[i]))
return FALSE;
}
return TRUE;
}
-static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule,struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
+static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if((pstClassifierRule->ucEtherTypeLen==0)||
+ if ((pstClassifierRule->ucEtherTypeLen == 0) ||
(pstClassifierRule->au8EthCSEtherType[0] == 0))
return TRUE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s SrcEtherType:%x CLS EtherType[0]:%x\n",__FUNCTION__,pstEthCsPktInfo->usEtherType,pstClassifierRule->au8EthCSEtherType[0]);
- if(pstClassifierRule->au8EthCSEtherType[0] == 1)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s SrcEtherType:%x CLS EtherType[0]:%x\n", __FUNCTION__, pstEthCsPktInfo->usEtherType, pstClassifierRule->au8EthCSEtherType[0]);
+ if (pstClassifierRule->au8EthCSEtherType[0] == 1)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s CLS EtherType[1]:%x EtherType[2]:%x\n",__FUNCTION__,pstClassifierRule->au8EthCSEtherType[1],pstClassifierRule->au8EthCSEtherType[2]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s CLS EtherType[1]:%x EtherType[2]:%x\n", __FUNCTION__, pstClassifierRule->au8EthCSEtherType[1], pstClassifierRule->au8EthCSEtherType[2]);
- if(memcmp(&pstEthCsPktInfo->usEtherType,&pstClassifierRule->au8EthCSEtherType[1],2)==0)
+ if (memcmp(&pstEthCsPktInfo->usEtherType, &pstClassifierRule->au8EthCSEtherType[1], 2) == 0)
return TRUE;
else
return FALSE;
}
- if(pstClassifierRule->au8EthCSEtherType[0] == 2)
+ if (pstClassifierRule->au8EthCSEtherType[0] == 2)
{
- if(eEth802LLCFrame != pstEthCsPktInfo->eNwpktEthFrameType)
+ if (eEth802LLCFrame != pstEthCsPktInfo->eNwpktEthFrameType)
return FALSE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s EthCS DSAP:%x EtherType[2]:%x\n",__FUNCTION__,pstEthCsPktInfo->ucDSAP,pstClassifierRule->au8EthCSEtherType[2]);
- if(pstEthCsPktInfo->ucDSAP == pstClassifierRule->au8EthCSEtherType[2])
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s EthCS DSAP:%x EtherType[2]:%x\n", __FUNCTION__, pstEthCsPktInfo->ucDSAP, pstClassifierRule->au8EthCSEtherType[2]);
+ if (pstEthCsPktInfo->ucDSAP == pstClassifierRule->au8EthCSEtherType[2])
return TRUE;
else
return FALSE;
@@ -718,27 +718,27 @@ static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRul
}
-static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule,struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
+static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
{
BOOLEAN bClassificationSucceed = FALSE;
USHORT usVLANID;
B_UINT8 uPriority = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s CLS UserPrio:%x CLS VLANID:%x\n",__FUNCTION__,ntohs(*((USHORT *)pstClassifierRule->usUserPriority)),pstClassifierRule->usVLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s CLS UserPrio:%x CLS VLANID:%x\n", __FUNCTION__, ntohs(*((USHORT *)pstClassifierRule->usUserPriority)), pstClassifierRule->usVLANID);
/* In case FW didn't receive the TLV, the priority field should be ignored */
- if(pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_USER_PRIORITY_VALID))
+ if (pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_USER_PRIORITY_VALID))
{
- if(pstEthCsPktInfo->eNwpktEthFrameType!=eEth802QVLANFrame)
+ if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
return FALSE;
uPriority = (ntohs(*(USHORT *)(skb->data + sizeof(struct bcm_eth_header))) & 0xF000) >> 13;
- if((uPriority >= pstClassifierRule->usUserPriority[0]) && (uPriority <= pstClassifierRule->usUserPriority[1]))
+ if ((uPriority >= pstClassifierRule->usUserPriority[0]) && (uPriority <= pstClassifierRule->usUserPriority[1]))
bClassificationSucceed = TRUE;
- if(!bClassificationSucceed)
+ if (!bClassificationSucceed)
return FALSE;
}
@@ -746,19 +746,19 @@ static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule
bClassificationSucceed = FALSE;
- if(pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_VLANID_VALID))
+ if (pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_VLANID_VALID))
{
- if(pstEthCsPktInfo->eNwpktEthFrameType!=eEth802QVLANFrame)
+ if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
return FALSE;
usVLANID = ntohs(*(USHORT *)(skb->data + sizeof(struct bcm_eth_header))) & 0xFFF;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s Pkt VLANID %x Priority: %d\n",__FUNCTION__,usVLANID, uPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s Pkt VLANID %x Priority: %d\n", __FUNCTION__, usVLANID, uPriority);
- if(usVLANID == ((pstClassifierRule->usVLANID & 0xFFF0) >> 4))
+ if (usVLANID == ((pstClassifierRule->usVLANID & 0xFFF0) >> 4))
bClassificationSucceed = TRUE;
- if(!bClassificationSucceed)
+ if (!bClassificationSucceed)
return FALSE;
}
@@ -768,50 +768,50 @@ static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule
}
-static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,struct sk_buff* skb,
+static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, struct sk_buff* skb,
struct bcm_eth_packet_info *pstEthCsPktInfo,
struct bcm_classifier_rule *pstClassifierRule,
B_UINT8 EthCSCupport)
{
BOOLEAN bClassificationSucceed = FALSE;
- bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule,((struct bcm_eth_header *)(skb->data))->au8SourceAddress);
- if(!bClassificationSucceed)
+ bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule, ((struct bcm_eth_header *)(skb->data))->au8SourceAddress);
+ if (!bClassificationSucceed)
return FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS SrcMAC Matched\n");
- bClassificationSucceed = EthCSMatchDestMACAddress(pstClassifierRule,((struct bcm_eth_header *)(skb->data))->au8DestinationAddress);
- if(!bClassificationSucceed)
+ bClassificationSucceed = EthCSMatchDestMACAddress(pstClassifierRule, ((struct bcm_eth_header *)(skb->data))->au8DestinationAddress);
+ if (!bClassificationSucceed)
return FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS DestMAC Matched\n");
//classify on ETHType/802.2SAP TLV
- bClassificationSucceed = EthCSMatchEThTypeSAP(pstClassifierRule,skb,pstEthCsPktInfo);
- if(!bClassificationSucceed)
+ bClassificationSucceed = EthCSMatchEThTypeSAP(pstClassifierRule, skb, pstEthCsPktInfo);
+ if (!bClassificationSucceed)
return FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS EthType/802.2SAP Matched\n");
//classify on 802.1VLAN Header Parameters
- bClassificationSucceed = EthCSMatchVLANRules(pstClassifierRule,skb,pstEthCsPktInfo);
- if(!bClassificationSucceed)
+ bClassificationSucceed = EthCSMatchVLANRules(pstClassifierRule, skb, pstEthCsPktInfo);
+ if (!bClassificationSucceed)
return FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS 802.1 VLAN Rules Matched\n");
return bClassificationSucceed;
}
-static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload,
+static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter, PVOID pvEthPayload,
struct bcm_eth_packet_info *pstEthCsPktInfo)
{
USHORT u16Etype = ntohs(((struct bcm_eth_header *)pvEthPayload)->u16Etype);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCSGetPktInfo : Eth Hdr Type : %X\n",u16Etype);
- if(u16Etype > 0x5dc)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCSGetPktInfo : Eth Hdr Type : %X\n", u16Etype);
+ if (u16Etype > 0x5dc)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCSGetPktInfo : ETH2 Frame \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCSGetPktInfo : ETH2 Frame\n");
//ETH2 Frame
- if(u16Etype == ETHERNET_FRAMETYPE_802QVLAN)
+ if (u16Etype == ETHERNET_FRAMETYPE_802QVLAN)
{
//802.1Q VLAN Header
pstEthCsPktInfo->eNwpktEthFrameType = eEth802QVLANFrame;
@@ -828,27 +828,27 @@ static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload,
else
{
//802.2 LLC
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "802.2 LLC Frame \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "802.2 LLC Frame\n");
pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCFrame;
pstEthCsPktInfo->ucDSAP = ((struct bcm_eth_llc_frame *)pvEthPayload)->DSAP;
- if(pstEthCsPktInfo->ucDSAP == 0xAA && ((struct bcm_eth_llc_frame *)pvEthPayload)->SSAP == 0xAA)
+ if (pstEthCsPktInfo->ucDSAP == 0xAA && ((struct bcm_eth_llc_frame *)pvEthPayload)->SSAP == 0xAA)
{
//SNAP Frame
pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCSNAPFrame;
u16Etype = ((struct bcm_eth_llc_snap_frame *)pvEthPayload)->usEtherType;
}
}
- if(u16Etype == ETHERNET_FRAMETYPE_IPV4)
+ if (u16Etype == ETHERNET_FRAMETYPE_IPV4)
pstEthCsPktInfo->eNwpktIPFrameType = eIPv4Packet;
- else if(u16Etype == ETHERNET_FRAMETYPE_IPV6)
+ else if (u16Etype == ETHERNET_FRAMETYPE_IPV6)
pstEthCsPktInfo->eNwpktIPFrameType = eIPv6Packet;
else
pstEthCsPktInfo->eNwpktIPFrameType = eNonIPPacket;
pstEthCsPktInfo->usEtherType = ((struct bcm_eth_header *)pvEthPayload)->u16Etype;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->eNwpktIPFrameType : %x\n",pstEthCsPktInfo->eNwpktIPFrameType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->eNwpktEthFrameType : %x\n",pstEthCsPktInfo->eNwpktEthFrameType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->usEtherType : %x\n",pstEthCsPktInfo->usEtherType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->eNwpktIPFrameType : %x\n", pstEthCsPktInfo->eNwpktIPFrameType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->eNwpktEthFrameType : %x\n", pstEthCsPktInfo->eNwpktEthFrameType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->usEtherType : %x\n", pstEthCsPktInfo->usEtherType);
}
diff --git a/drivers/staging/bcm/Version.h b/drivers/staging/bcm/Version.h
deleted file mode 100644
index a07b956..0000000
--- a/drivers/staging/bcm/Version.h
+++ /dev/null
@@ -1,35 +0,0 @@
-
-/*Copyright (c) 2005 Beceem Communications Inc.
-
-Module Name:
-
- Version.h
-
-Abstract:
-
-
---*/
-
-#ifndef VERSION_H
-#define VERSION_H
-
-
-#define VER_FILETYPE VFT_DRV
-#define VER_FILESUBTYPE VFT2_DRV_NETWORK
-
-
-#define VER_FILEVERSION 5.2.45
-#define VER_FILEVERSION_STR "5.2.45"
-
-#undef VER_PRODUCTVERSION
-#define VER_PRODUCTVERSION VER_FILEVERSION
-
-#undef VER_PRODUCTVERSION_STR
-#define VER_PRODUCTVERSION_STR VER_FILEVERSION_STR
-
-
-
-
-//#include "common.ver"
-
-#endif //VERSION_H
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
index da47db8..7fd21c6 100644
--- a/drivers/staging/bcm/headers.h
+++ b/drivers/staging/bcm/headers.h
@@ -38,7 +38,6 @@
#include <net/ip.h>
#include "Typedefs.h"
-#include "Version.h"
#include "Macros.h"
#include "HostMIBSInterface.h"
#include "cntrl_SignalingInterface.h"
@@ -71,7 +70,7 @@
#define DEV_NAME "tarang"
#define DRV_DESCRIPTION "Beceem Communications Inc. WiMAX driver"
#define DRV_COPYRIGHT "Copyright 2010. Beceem Communications Inc"
-#define DRV_VERSION VER_FILEVERSION_STR
+#define DRV_VERSION "5.2.45"
#define PFX DRV_NAME " "
extern struct class *bcm_class;
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index bea1330..91a5715 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -2966,7 +2966,7 @@ int BcmFlash2xBulkWrite(struct bcm_mini_adapter *Adapter,
* @Adapter :-Drivers private Data Structure
*
* Return Value:-
- * Return STATUS_SUCESS if get success in setting the right DSD else negaive error code
+ * Return STATUS_SUCESS if get success in setting the right DSD else negative error code
*
*/
diff --git a/drivers/staging/bcm/vendorspecificextn.c b/drivers/staging/bcm/vendorspecificextn.c
index be1f91d..d38a06f 100644
--- a/drivers/staging/bcm/vendorspecificextn.c
+++ b/drivers/staging/bcm/vendorspecificextn.c
@@ -1,70 +1,70 @@
#include "headers.h"
-//-----------------------------------------------------------------------------
-// Procedure: vendorextnGetSectionInfo
-//
-// Description: Finds the type of NVM used.
-//
-// Arguments:
-// Adapter - ptr to Adapter object instance
-// pNVMType - ptr to NVM type.
-// Returns:
-// STATUS_SUCCESS/STATUS_FAILURE
-//
-//-----------------------------------------------------------------------------
+/*
+ * Procedure: vendorextnGetSectionInfo
+ *
+ * Description: Finds the type of NVM used.
+ *
+ * Arguments:
+ * Adapter - ptr to Adapter object instance
+ * pNVMType - ptr to NVM type.
+ * Returns:
+ * STATUS_SUCCESS/STATUS_FAILURE
+ *
+ */
INT vendorextnGetSectionInfo(PVOID pContext, struct bcm_flash2x_vendor_info *pVendorInfo)
{
return STATUS_FAILURE;
}
-//-----------------------------------------------------------------------------
-// Procedure: vendorextnInit
-//
-// Description: Initializing the vendor extension NVM interface
-//
-// Arguments:
-// Adapter - Pointer to MINI Adapter Structure.
-
-// Returns:
-// STATUS_SUCCESS/STATUS_FAILURE
-//
-//-----------------------------------------------------------------------------
+/*
+ * Procedure: vendorextnInit
+ *
+ * Description: Initializing the vendor extension NVM interface
+ *
+ * Arguments:
+ * Adapter - Pointer to MINI Adapter Structure
+ * Returns:
+ * STATUS_SUCCESS/STATUS_FAILURE
+ *
+ *
+ */
INT vendorextnInit(struct bcm_mini_adapter *Adapter)
{
return STATUS_SUCCESS;
}
-//-----------------------------------------------------------------------------
-// Procedure: vendorextnExit
-//
-// Description: Free the resource associated with vendor extension NVM interface
-//
-// Arguments:
-// Adapter - Pointer to MINI Adapter Structure.
-
-// Returns:
-// STATUS_SUCCESS/STATUS_FAILURE
-//
-//-----------------------------------------------------------------------------
+/*
+ * Procedure: vendorextnExit
+ *
+ * Description: Free the resource associated with vendor extension NVM interface
+ *
+ * Arguments:
+ *
+ * Returns:
+ * STATUS_SUCCESS/STATUS_FAILURE
+ *
+ *
+ */
INT vendorextnExit(struct bcm_mini_adapter *Adapter)
{
return STATUS_SUCCESS;
}
-//------------------------------------------------------------------------
-// Procedure: vendorextnIoctl
-//
-// Description: execute the vendor extension specific ioctl
-//
-//Arguments:
-// Adapter -Beceem private Adapter Structure
-// cmd -vendor extension specific Ioctl commad
-// arg -input parameter sent by vendor
-//
-// Returns:
-// CONTINUE_COMMON_PATH in case it is not meant to be processed by vendor ioctls
-// STATUS_SUCCESS/STATUS_FAILURE as per the IOCTL return value
-//
-//--------------------------------------------------------------------------
+/*
+ * Procedure: vendorextnIoctl
+ *
+ * Description: execute the vendor extension specific ioctl
+ *
+ * Arguments:
+ * Adapter -Beceem private Adapter Structure
+ * cmd -vendor extension specific Ioctl commad
+ * arg -input parameter sent by vendor
+ *
+ * Returns:
+ * CONTINUE_COMMON_PATH in case it is not meant to be processed by vendor ioctls
+ * STATUS_SUCCESS/STATUS_FAILURE as per the IOCTL return value
+ */
+
INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg)
{
return CONTINUE_COMMON_PATH;
@@ -72,22 +72,21 @@ INT vendorextnIoctl(struct bcm_mini_adapter *Adapter, UINT cmd, ULONG arg)
-//------------------------------------------------------------------
-// Procedure: vendorextnReadSection
-//
-// Description: Reads from a section of NVM
-//
-// Arguments:
-// pContext - ptr to Adapter object instance
-// pBuffer - Read the data from Vendor Area to this buffer
-// SectionVal - Value of type of Section
-// Offset - Read from the Offset of the Vendor Section.
-// numOfBytes - Read numOfBytes from the Vendor section to Buffer
-//
-// Returns:
-// STATUS_SUCCESS/STATUS_FAILURE
-//
-//------------------------------------------------------------------
+/*
+ * Procedure: vendorextnReadSection
+ *
+ * Description: Reads from a section of NVM
+ *
+ * Arguments:
+ * pContext - ptr to Adapter object instance
+ * pBuffer - Read the data from Vendor Area to this buffer
+ * SectionVal - Value of type of Section
+ * Offset - Read from the Offset of the Vendor Section.
+ * numOfBytes - Read numOfBytes from the Vendor section to Buffer
+ *
+ * Returns:
+ * STATUS_SUCCESS/STATUS_FAILURE
+ */
INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
UINT offset, UINT numOfBytes)
@@ -97,23 +96,22 @@ INT vendorextnReadSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_sect
-//------------------------------------------------------------------
-// Procedure: vendorextnWriteSection
-//
-// Description: Write to a Section of NVM
-//
-// Arguments:
-// pContext - ptr to Adapter object instance
-// pBuffer - Write the data provided in the buffer
-// SectionVal - Value of type of Section
-// Offset - Writes to the Offset of the Vendor Section.
-// numOfBytes - Write num Bytes after reading from pBuffer.
-// bVerify - the Buffer Written should be verified.
-//
-// Returns:
-// STATUS_SUCCESS/STATUS_FAILURE
-//
-//------------------------------------------------------------------
+/*
+ * Procedure: vendorextnWriteSection
+ *
+ * Description: Write to a Section of NVM
+ *
+ * Arguments:
+ * pContext - ptr to Adapter object instance
+ * pBuffer - Write the data provided in the buffer
+ * SectionVal - Value of type of Section
+ * Offset - Writes to the Offset of the Vendor Section.
+ * numOfBytes - Write num Bytes after reading from pBuffer.
+ * bVerify - the Buffer Written should be verified.
+ *
+ * Returns:
+ * STATUS_SUCCESS/STATUS_FAILURE
+ */
INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
UINT offset, UINT numOfBytes, BOOLEAN bVerify)
{
@@ -122,25 +120,23 @@ INT vendorextnWriteSection(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_sec
-//------------------------------------------------------------------
-// Procedure: vendorextnWriteSectionWithoutErase
-//
-// Description: Write to a Section of NVM without erasing the sector
-//
-// Arguments:
-// pContext - ptr to Adapter object instance
-// pBuffer - Write the data provided in the buffer
-// SectionVal - Value of type of Section
-// Offset - Writes to the Offset of the Vendor Section.
-// numOfBytes - Write num Bytes after reading from pBuffer.
-//
-// Returns:
-// STATUS_SUCCESS/STATUS_FAILURE
-//
-//------------------------------------------------------------------
+/*
+ * Procedure: vendorextnWriteSectionWithoutErase
+ *
+ * Description: Write to a Section of NVM without erasing the sector
+ *
+ * Arguments:
+ * pContext - ptr to Adapter object instance
+ * pBuffer - Write the data provided in the buffer
+ * SectionVal - Value of type of Section
+ * Offset - Writes to the Offset of the Vendor Section.
+ * numOfBytes - Write num Bytes after reading from pBuffer.
+ *
+ * Returns:
+ * STATUS_SUCCESS/STATUS_FAILURE
+ */
INT vendorextnWriteSectionWithoutErase(PVOID pContext, PUCHAR pBuffer, enum bcm_flash2x_section_val SectionVal,
UINT offset, UINT numOfBytes)
{
return STATUS_FAILURE;
}
-
diff --git a/drivers/staging/btmtk_usb/Kconfig b/drivers/staging/btmtk_usb/Kconfig
new file mode 100644
index 0000000..a425ebd
--- /dev/null
+++ b/drivers/staging/btmtk_usb/Kconfig
@@ -0,0 +1,11 @@
+config USB_BTMTK
+ tristate "Mediatek Bluetooth support"
+ depends on USB && BT && m
+ ---help---
+ Say Y here if you wish to control a MTK USB Bluetooth.
+
+ This option depends on 'USB' support being enabled
+
+ To compile this driver as a module, choose M here: the
+ module will be called btmtk_usb.
+
diff --git a/drivers/staging/btmtk_usb/Makefile b/drivers/staging/btmtk_usb/Makefile
new file mode 100644
index 0000000..4d6c9d7
--- /dev/null
+++ b/drivers/staging/btmtk_usb/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_USB_BTMTK) += btmtk_usb.o
diff --git a/drivers/staging/btmtk_usb/README b/drivers/staging/btmtk_usb/README
new file mode 100644
index 0000000..c046c8e
--- /dev/null
+++ b/drivers/staging/btmtk_usb/README
@@ -0,0 +1,14 @@
+-build driver modules
+ make
+
+-install driver modules
+ make install
+
+-remove driver modules
+ make clean
+
+-dynamic debug message
+ turn on CONFIG_DYNAMIC_DEBUG compiler flag for current kernel
+ mount -t debugfs none /sys/kernel/debug/
+ echo "module module_name +p" > /sys/kernel/debug/dynamic_debug/control(turn on debug messages, module name such as btmtk_usb)
+ echo "module module_name -p" > /sys/kernel/debug/dynamic_debug/control(turn off debug messages, module name such as btmtk_usb)
diff --git a/drivers/staging/btmtk_usb/TODO b/drivers/staging/btmtk_usb/TODO
new file mode 100644
index 0000000..a71d129
--- /dev/null
+++ b/drivers/staging/btmtk_usb/TODO
@@ -0,0 +1,10 @@
+TODO:
+ - checkpatch.pl clean
+ - determine if the driver should not be using a duplicate
+ version of the usb-bluetooth interface code, but should
+ be merged into the drivers/bluetooth/ directory and
+ infrastructure instead.
+ - review by the bluetooth developer community
+
+Please send any patches for this driver to Yu-Chen, Cho <acho@suse.com> and
+jay.hung@mediatek.com
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.c b/drivers/staging/btmtk_usb/btmtk_usb.c
new file mode 100644
index 0000000..0e783e8
--- /dev/null
+++ b/drivers/staging/btmtk_usb/btmtk_usb.c
@@ -0,0 +1,1784 @@
+/*
+ * MediaTek Bluetooth USB Driver
+ *
+ * Copyright (C) 2013, MediaTek co.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * or on the worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/usb.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btmtk_usb.h"
+
+#define VERSION "1.0.4"
+#define MT7650_FIRMWARE "mt7650.bin"
+#define MT7662_FIRMWARE "mt7662.bin"
+
+static struct usb_driver btmtk_usb_driver;
+
+
+static int btmtk_usb_load_rom_patch(struct btmtk_usb_data *);
+static int btmtk_usb_load_fw(struct btmtk_usb_data *);
+
+static void hex_dump(char *str, u8 *src_buf, u32 src_buf_len)
+{
+ unsigned char *pt;
+ int x;
+
+ pt = src_buf;
+
+ BT_DBG("%s: %p, len = %d\n", str, src_buf, src_buf_len);
+
+ for (x = 0; x < src_buf_len; x++) {
+ if (x % 16 == 0)
+ BT_DBG("0x%04x : ", x);
+ BT_DBG("%02x ", ((unsigned char)pt[x]));
+ if (x % 16 == 15)
+ BT_DBG("\n");
+ }
+
+ BT_DBG("\n");
+}
+
+static int btmtk_usb_reset(struct usb_device *udev)
+{
+ int ret;
+
+ BT_DBG("%s\n", __func__);
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01, DEVICE_VENDOR_REQUEST_OUT,
+ 0x01, 0x00, NULL, 0x00, CONTROL_TIMEOUT_JIFFIES);
+
+ if (ret < 0) {
+ BT_ERR("%s error(%d)\n", __func__, ret);
+ return ret;
+ }
+
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int btmtk_usb_io_read32(struct btmtk_usb_data *data, u32 reg, u32 *val)
+{
+ u8 request = data->r_request;
+ struct usb_device *udev = data->udev;
+ int ret;
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request, DEVICE_VENDOR_REQUEST_IN,
+ 0x0, reg, data->io_buf, 4,
+ CONTROL_TIMEOUT_JIFFIES);
+
+ if (ret < 0) {
+ *val = 0xffffffff;
+ BT_ERR("%s error(%d), reg=%x, value=%x\n", __func__, ret, reg, *val);
+ return ret;
+ }
+
+ memmove(val, data->io_buf, 4);
+
+ *val = le32_to_cpu(*val);
+
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int btmtk_usb_io_write32(struct btmtk_usb_data *data, u32 reg, u32 val)
+{
+ u16 value, index;
+ u8 request = data->w_request;
+ struct usb_device *udev = data->udev;
+ int ret;
+
+ index = (u16)reg;
+ value = val & 0x0000ffff;
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request, DEVICE_VENDOR_REQUEST_OUT,
+ value, index, NULL, 0,
+ CONTROL_TIMEOUT_JIFFIES);
+
+ if (ret < 0) {
+ BT_ERR("%s error(%d), reg=%x, value=%x\n", __func__, ret, reg, val);
+ return ret;
+ }
+
+ index = (u16)(reg + 2);
+ value = (val & 0xffff0000) >> 16;
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ request, DEVICE_VENDOR_REQUEST_OUT,
+ value, index, NULL, 0, CONTROL_TIMEOUT_JIFFIES);
+
+ if (ret < 0) {
+ BT_ERR("%s error(%d), reg=%x, value=%x\n", __func__, ret, reg, val);
+ return ret;
+ }
+
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int btmtk_usb_switch_iobase(struct btmtk_usb_data *data, int base)
+{
+ int ret = 0;
+
+ switch (base) {
+ case SYSCTL:
+ data->w_request = 0x42;
+ data->r_request = 0x47;
+ break;
+ case WLAN:
+ data->w_request = 0x02;
+ data->r_request = 0x07;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void btmtk_usb_cap_init(struct btmtk_usb_data *data)
+{
+ const struct firmware *firmware;
+ struct usb_device *udev = data->udev;
+ int ret;
+
+ btmtk_usb_io_read32(data, 0x00, &data->chip_id);
+
+ BT_DBG("chip id = %x\n", data->chip_id);
+
+ if (is_mt7630(data) || is_mt7650(data)) {
+ data->need_load_fw = 1;
+ data->need_load_rom_patch = 0;
+ ret = request_firmware(&firmware, MT7650_FIRMWARE, &udev->dev);
+ if (ret < 0) {
+ if (ret == -ENOENT) {
+ BT_ERR("Firmware file \"%s\" not found \n", MT7650_FIRMWARE);
+ } else {
+ BT_ERR("Firmware file \"%s\" request failed (err=%d) \n",
+ MT7650_FIRMWARE, ret);
+ }
+ } else {
+ BT_DBG("Firmware file \"%s\" Found \n", MT7650_FIRMWARE);
+ /* load firmware here */
+ data->firmware = firmware;
+ btmtk_usb_load_fw(data);
+ }
+ release_firmware(firmware);
+ } else if (is_mt7632(data) || is_mt7662(data)) {
+ data->need_load_fw = 0;
+ data->need_load_rom_patch = 1;
+ data->rom_patch_offset = 0x90000;
+ ret = request_firmware(&firmware, MT7662_FIRMWARE, &udev->dev);
+ if (ret < 0) {
+ if (ret == -ENOENT) {
+ BT_ERR("Firmware file \"%s\" not found\n", MT7662_FIRMWARE);
+ } else {
+ BT_ERR("Firmware file \"%s\" request failed (err=%d)\n",
+ MT7662_FIRMWARE, ret);
+ }
+ } else {
+ BT_DBG("Firmware file \"%s\" Found\n", MT7662_FIRMWARE);
+ /* load rom patch here */
+ data->firmware = firmware;
+ data->rom_patch_len = firmware->size;
+ btmtk_usb_load_rom_patch(data);
+ }
+ release_firmware(firmware);
+ } else {
+ BT_ERR("unknow chip(%x)\n", data->chip_id);
+ }
+}
+
+static u16 checksume16(u8 *pData, int len)
+{
+ int sum = 0;
+
+ while (len > 1) {
+ sum += *((u16 *)pData);
+
+ pData = pData + 2;
+
+ if (sum & 0x80000000)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ len -= 2;
+ }
+
+ if (len)
+ sum += *((u8 *)pData);
+
+ while (sum >> 16) {
+ sum = (sum & 0xFFFF) + (sum >> 16);
+ }
+
+ return ~sum;
+}
+
+static int btmtk_usb_chk_crc(struct btmtk_usb_data *data, u32 checksum_len)
+{
+ int ret = 0;
+ struct usb_device *udev = data->udev;
+
+ BT_DBG("%s\n", __func__);
+
+ memmove(data->io_buf, &data->rom_patch_offset, 4);
+ memmove(&data->io_buf[4], &checksum_len, 4);
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x1, DEVICE_VENDOR_REQUEST_IN,
+ 0x20, 0x00, data->io_buf, 8,
+ CONTROL_TIMEOUT_JIFFIES);
+
+ if (ret < 0) {
+ BT_ERR("%s error(%d)\n", __func__, ret);
+ }
+
+ return ret;
+}
+
+static u16 btmtk_usb_get_crc(struct btmtk_usb_data *data)
+{
+ int ret = 0;
+ struct usb_device *udev = data->udev;
+ u16 crc, count = 0;
+
+ BT_DBG("%s\n", __func__);
+
+ while (1) {
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ 0x01, DEVICE_VENDOR_REQUEST_IN,
+ 0x21, 0x00, data->io_buf, 2,
+ CONTROL_TIMEOUT_JIFFIES);
+
+ if (ret < 0) {
+ crc = 0xFFFF;
+ BT_ERR("%s error(%d)\n", __func__, ret);
+ }
+
+ memmove(&crc, data->io_buf, 2);
+
+ crc = le16_to_cpu(crc);
+
+ if (crc != 0xFFFF)
+ break;
+
+ mdelay(100);
+
+ if (count++ > 100) {
+ BT_ERR("Query CRC over %d times\n", count);
+ break;
+ }
+ }
+
+ return crc;
+}
+
+static int btmtk_usb_reset_wmt(struct btmtk_usb_data *data)
+{
+ int ret = 0;
+
+ /* reset command */
+ u8 cmd[8] = {0x6F, 0xFC, 0x05, 0x01, 0x07, 0x01, 0x00, 0x04};
+
+ memmove(data->io_buf, cmd, 8);
+
+ BT_DBG("%s\n", __func__);
+
+ ret = usb_control_msg(data->udev, usb_sndctrlpipe(data->udev, 0), 0x01,
+ DEVICE_CLASS_REQUEST_OUT, 0x12, 0x00, data->io_buf,
+ 8, CONTROL_TIMEOUT_JIFFIES);
+
+ if (ret)
+ BT_ERR("%s:(%d)\n", __func__, ret);
+
+ return ret;
+}
+
+static void load_rom_patch_complete(struct urb *urb)
+{
+
+ struct completion *sent_to_mcu_done = (struct completion *)urb->context;
+
+ complete(sent_to_mcu_done);
+}
+
+static int btmtk_usb_load_rom_patch(struct btmtk_usb_data *data)
+{
+ u32 loop = 0;
+ u32 value;
+ s32 sent_len;
+ int ret = 0, total_checksum = 0;
+ struct urb *urb;
+ u32 patch_len = 0;
+ u32 cur_len = 0;
+ dma_addr_t data_dma;
+ struct completion sent_to_mcu_done;
+ int first_block = 1;
+ unsigned char phase;
+ void *buf;
+ char *pos;
+ unsigned int pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
+
+ if (!data->firmware) {
+ BT_ERR("%s:please assign a rom patch\n", __func__);
+ return -1;
+ }
+
+load_patch_protect:
+ btmtk_usb_switch_iobase(data, WLAN);
+ btmtk_usb_io_read32(data, SEMAPHORE_03, &value);
+ loop++;
+
+ if (((value & 0x01) == 0x00) && (loop < 600)) {
+ mdelay(1);
+ goto load_patch_protect;
+ }
+
+ btmtk_usb_io_write32(data, 0x1004, 0x2c);
+
+ btmtk_usb_switch_iobase(data, SYSCTL);
+
+ btmtk_usb_io_write32(data, 0x1c, 0x30);
+
+ /* Enable USB_DMA_CFG */
+ btmtk_usb_io_write32(data, 0x9018, 0x00c00020);
+
+ btmtk_usb_switch_iobase(data, WLAN);
+
+ /* check ROM patch if upgrade */
+ btmtk_usb_io_read32(data, COM_REG0, &value);
+
+ if ((value & 0x02) == 0x02)
+ goto error0;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+
+ if (!urb) {
+ ret = -ENOMEM;
+ goto error0;
+ }
+
+ buf = usb_alloc_coherent(data->udev, UPLOAD_PATCH_UNIT, GFP_ATOMIC, &data_dma);
+
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ pos = buf;
+ BT_DBG("loading rom patch");
+
+ init_completion(&sent_to_mcu_done);
+
+ cur_len = 0x00;
+ patch_len = data->rom_patch_len - PATCH_INFO_SIZE;
+
+ /* loading rom patch */
+ while (1) {
+ s32 sent_len_max = UPLOAD_PATCH_UNIT - PATCH_HEADER_SIZE;
+ sent_len = (patch_len - cur_len) >= sent_len_max ? sent_len_max : (patch_len - cur_len);
+
+ BT_DBG("patch_len = %d\n", patch_len);
+ BT_DBG("cur_len = %d\n", cur_len);
+ BT_DBG("sent_len = %d\n", sent_len);
+
+ if (sent_len > 0) {
+ if (first_block == 1) {
+ if (sent_len < sent_len_max)
+ phase = PATCH_PHASE3;
+ else
+ phase = PATCH_PHASE1;
+ first_block = 0;
+ } else if (sent_len == sent_len_max) {
+ phase = PATCH_PHASE2;
+ } else {
+ phase = PATCH_PHASE3;
+ }
+
+ /* prepare HCI header */
+ pos[0] = 0x6F;
+ pos[1] = 0xFC;
+ pos[2] = (sent_len + 5) & 0xFF;
+ pos[3] = ((sent_len + 5) >> 8) & 0xFF;
+
+ /* prepare WMT header */
+ pos[4] = 0x01;
+ pos[5] = 0x01;
+ pos[6] = (sent_len + 1) & 0xFF;
+ pos[7] = ((sent_len + 1) >> 8) & 0xFF;
+
+ pos[8] = phase;
+
+ memcpy(&pos[9], data->firmware->data + PATCH_INFO_SIZE + cur_len, sent_len);
+
+ BT_DBG("sent_len + PATCH_HEADER_SIZE = %d, phase = %d\n",
+ sent_len + PATCH_HEADER_SIZE, phase);
+
+ usb_fill_bulk_urb(urb,
+ data->udev,
+ pipe,
+ buf,
+ sent_len + PATCH_HEADER_SIZE,
+ load_rom_patch_complete,
+ &sent_to_mcu_done);
+
+ urb->transfer_dma = data_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (ret)
+ goto error2;
+
+ if (!wait_for_completion_timeout(&sent_to_mcu_done, msecs_to_jiffies(1000))) {
+ usb_kill_urb(urb);
+ BT_ERR("upload rom_patch timeout\n");
+ goto error2;
+ }
+
+ BT_DBG(".");
+
+ mdelay(200);
+
+ cur_len += sent_len;
+
+ } else {
+ break;
+ }
+ }
+
+ total_checksum = checksume16((u8 *)data->firmware->data + PATCH_INFO_SIZE, patch_len);
+
+ BT_DBG("Send checksum req..\n");
+
+ btmtk_usb_chk_crc(data, patch_len);
+
+ mdelay(20);
+
+ if (total_checksum != btmtk_usb_get_crc(data)) {
+ BT_ERR("checksum fail!, local(0x%x) <> fw(0x%x)\n",
+ total_checksum, btmtk_usb_get_crc(data));
+ ret = -1;
+ goto error2;
+ }
+
+ mdelay(20);
+
+ ret = btmtk_usb_reset_wmt(data);
+
+ mdelay(20);
+
+error2:
+ usb_free_coherent(data->udev, UPLOAD_PATCH_UNIT, buf, data_dma);
+error1:
+ usb_free_urb(urb);
+error0:
+ btmtk_usb_io_write32(data, SEMAPHORE_03, 0x1);
+ return ret;
+}
+
+
+static int load_fw_iv(struct btmtk_usb_data *data)
+{
+ int ret;
+ struct usb_device *udev = data->udev;
+ char *buf = kmalloc(64, GFP_ATOMIC);
+
+ memmove(buf, data->firmware->data + 32, 64);
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x01,
+ DEVICE_VENDOR_REQUEST_OUT, 0x12, 0x0, buf, 64,
+ CONTROL_TIMEOUT_JIFFIES);
+
+ if (ret < 0) {
+ BT_ERR("%s error(%d) step4\n", __func__, ret);
+ kfree(buf);
+ return ret;
+ }
+
+ if (ret > 0)
+ ret = 0;
+
+ kfree(buf);
+
+ return ret;
+}
+
+static void load_fw_complete(struct urb *urb)
+{
+
+ struct completion *sent_to_mcu_done = (struct completion *)urb->context;
+
+ complete(sent_to_mcu_done);
+}
+
+static int btmtk_usb_load_fw(struct btmtk_usb_data *data)
+{
+ struct usb_device *udev = data->udev;
+ struct urb *urb;
+ void *buf;
+ u32 cur_len = 0;
+ u32 packet_header = 0;
+ u32 value;
+ u32 ilm_len = 0, dlm_len = 0;
+ u16 fw_ver, build_ver;
+ u32 loop = 0;
+ dma_addr_t data_dma;
+ int ret = 0, sent_len;
+ struct completion sent_to_mcu_done;
+ unsigned int pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress);
+
+ if (!data->firmware) {
+ BT_ERR("%s:please assign a fw\n", __func__);
+ return -1;
+ }
+
+ BT_DBG("bulk_tx_ep = %x\n", data->bulk_tx_ep->bEndpointAddress);
+
+loadfw_protect:
+ btmtk_usb_switch_iobase(data, WLAN);
+ btmtk_usb_io_read32(data, SEMAPHORE_00, &value);
+ loop++;
+
+ if (((value & 0x1) == 0) && (loop < 10000))
+ goto loadfw_protect;
+
+ /* check MCU if ready */
+ btmtk_usb_io_read32(data, COM_REG0, &value);
+
+ if ((value & 0x01) == 0x01)
+ goto error0;
+
+ /* Enable MPDMA TX and EP2 load FW mode */
+ btmtk_usb_io_write32(data, 0x238, 0x1c000000);
+
+ btmtk_usb_reset(udev);
+ mdelay(100);
+
+ ilm_len = (*(data->firmware->data + 3) << 24)
+ | (*(data->firmware->data + 2) << 16)
+ | (*(data->firmware->data + 1) << 8)
+ | (*data->firmware->data);
+
+ dlm_len = (*(data->firmware->data + 7) << 24)
+ | (*(data->firmware->data + 6) << 16)
+ | (*(data->firmware->data + 5) << 8)
+ | (*(data->firmware->data + 4));
+
+ fw_ver = (*(data->firmware->data + 11) << 8) | (*(data->firmware->data + 10));
+
+ build_ver = (*(data->firmware->data + 9) << 8) | (*(data->firmware->data + 8));
+
+ BT_DBG("fw version:%d.%d.%02d ",
+ (fw_ver & 0xf000) >> 8,
+ (fw_ver & 0x0f00) >> 8,
+ (fw_ver & 0x00ff));
+
+ BT_DBG("build:%x\n", build_ver);
+
+ BT_DBG("build Time =");
+
+ for (loop = 0; loop < 16; loop++)
+ BT_DBG("%c", *(data->firmware->data + 16 + loop));
+
+ BT_DBG("\n");
+
+ BT_DBG("ILM length = %d(bytes)\n", ilm_len);
+ BT_DBG("DLM length = %d(bytes)\n", dlm_len);
+
+ btmtk_usb_switch_iobase(data, SYSCTL);
+
+ /* U2M_PDMA rx_ring_base_ptr */
+ btmtk_usb_io_write32(data, 0x790, 0x400230);
+
+ /* U2M_PDMA rx_ring_max_cnt */
+ btmtk_usb_io_write32(data, 0x794, 0x1);
+
+ /* U2M_PDMA cpu_idx */
+ btmtk_usb_io_write32(data, 0x798, 0x1);
+
+ /* U2M_PDMA enable */
+ btmtk_usb_io_write32(data, 0x704, 0x44);
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+
+ if (!urb) {
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ buf = usb_alloc_coherent(udev, 14592, GFP_ATOMIC, &data_dma);
+
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error2;
+ }
+
+ BT_DBG("loading fw");
+
+ init_completion(&sent_to_mcu_done);
+
+ btmtk_usb_switch_iobase(data, SYSCTL);
+
+ cur_len = 0x40;
+
+ /* Loading ILM */
+ while (1) {
+ sent_len = (ilm_len - cur_len) >= 14336 ? 14336 : (ilm_len - cur_len);
+
+ if (sent_len > 0) {
+ packet_header &= ~(0xffffffff);
+ packet_header |= (sent_len << 16);
+ packet_header = cpu_to_le32(packet_header);
+
+ memmove(buf, &packet_header, 4);
+ memmove(buf + 4, data->firmware->data + 32 + cur_len, sent_len);
+
+ /* U2M_PDMA descriptor */
+ btmtk_usb_io_write32(data, 0x230, cur_len);
+
+ while ((sent_len % 4) != 0) {
+ sent_len++;
+ }
+
+ /* U2M_PDMA length */
+ btmtk_usb_io_write32(data, 0x234, sent_len << 16);
+
+ usb_fill_bulk_urb(urb,
+ udev,
+ pipe,
+ buf,
+ sent_len + 4,
+ load_fw_complete,
+ &sent_to_mcu_done);
+
+ urb->transfer_dma = data_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (ret)
+ goto error3;
+
+ if (!wait_for_completion_timeout(&sent_to_mcu_done, msecs_to_jiffies(1000))) {
+ usb_kill_urb(urb);
+ BT_ERR("upload ilm fw timeout\n");
+ goto error3;
+ }
+
+ BT_DBG(".");
+
+ mdelay(200);
+
+ cur_len += sent_len;
+ } else {
+ break;
+ }
+ }
+
+ init_completion(&sent_to_mcu_done);
+ cur_len = 0x00;
+
+ /* Loading DLM */
+ while (1) {
+ sent_len = (dlm_len - cur_len) >= 14336 ? 14336 : (dlm_len - cur_len);
+
+ if (sent_len > 0) {
+ packet_header &= ~(0xffffffff);
+ packet_header |= (sent_len << 16);
+ packet_header = cpu_to_le32(packet_header);
+
+ memmove(buf, &packet_header, 4);
+ memmove(buf + 4, data->firmware->data + 32 + ilm_len + cur_len, sent_len);
+
+ /* U2M_PDMA descriptor */
+ btmtk_usb_io_write32(data, 0x230, 0x80000 + cur_len);
+
+ while ((sent_len % 4) != 0) {
+ BT_DBG("sent_len is not divided by 4\n");
+ sent_len++;
+ }
+
+ /* U2M_PDMA length */
+ btmtk_usb_io_write32(data, 0x234, sent_len << 16);
+
+ usb_fill_bulk_urb(urb,
+ udev,
+ pipe,
+ buf,
+ sent_len + 4,
+ load_fw_complete,
+ &sent_to_mcu_done);
+
+ urb->transfer_dma = data_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (ret)
+ goto error3;
+
+ if (!wait_for_completion_timeout(&sent_to_mcu_done, msecs_to_jiffies(1000))) {
+ usb_kill_urb(urb);
+ BT_ERR("upload dlm fw timeout\n");
+ goto error3;
+ }
+
+ BT_DBG(".");
+
+ mdelay(500);
+
+ cur_len += sent_len;
+
+ } else {
+ break;
+ }
+ }
+
+ /* upload 64bytes interrupt vector */
+ ret = load_fw_iv(data);
+ mdelay(100);
+
+ btmtk_usb_switch_iobase(data, WLAN);
+
+ /* check MCU if ready */
+ loop = 0;
+
+ do {
+ btmtk_usb_io_read32(data, COM_REG0, &value);
+
+ if (value == 0x01)
+ break;
+
+ mdelay(10);
+ loop++;
+ } while (loop <= 100);
+
+ if (loop > 1000) {
+ BT_ERR("wait for 100 times\n");
+ ret = -ENODEV;
+ }
+
+error3:
+ usb_free_coherent(udev, 14592, buf, data_dma);
+error2:
+ usb_free_urb(urb);
+error1:
+ /* Disbale load fw mode */
+ btmtk_usb_io_read32(data, 0x238, &value);
+ value = value & ~(0x10000000);
+ btmtk_usb_io_write32(data, 0x238, value);
+error0:
+ btmtk_usb_io_write32(data, SEMAPHORE_00, 0x1);
+ return ret;
+}
+
+static int inc_tx(struct btmtk_usb_data *data)
+{
+ unsigned long flags;
+ int rv;
+
+ spin_lock_irqsave(&data->txlock, flags);
+ rv = test_bit(BTUSB_SUSPENDING, &data->flags);
+ if (!rv)
+ data->tx_in_flight++;
+ spin_unlock_irqrestore(&data->txlock, flags);
+
+ return rv;
+}
+
+static void btmtk_usb_intr_complete(struct urb *urb)
+{
+ struct hci_dev *hdev = urb->context;
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ int err;
+
+ BT_DBG("%s: %s urb %p status %d count %d\n", __func__, hdev->name,
+ urb, urb->status, urb->actual_length);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return;
+
+ if (urb->status == 0) {
+ hdev->stat.byte_rx += urb->actual_length;
+
+ hex_dump("hci event", urb->transfer_buffer, urb->actual_length);
+
+ if (hci_recv_fragment(hdev, HCI_EVENT_PKT,
+ urb->transfer_buffer,
+ urb->actual_length) < 0) {
+ BT_ERR("%s corrupted event packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
+ }
+
+ if (!test_bit(BTUSB_INTR_RUNNING, &data->flags))
+ return;
+
+ usb_mark_last_busy(data->udev);
+ usb_anchor_urb(urb, &data->intr_anchor);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (err < 0) {
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected */
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p failed to resubmit (%d)",
+ hdev->name, urb, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static int btmtk_usb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
+{
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ struct urb *urb;
+ unsigned char *buf;
+ unsigned int pipe;
+ int err, size;
+
+ BT_DBG("%s\n", __func__);
+
+ if (!data->intr_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, mem_flags);
+ if (!urb)
+ return -ENOMEM;
+
+ size = le16_to_cpu(data->intr_ep->wMaxPacketSize);
+
+ buf = kmalloc(size, mem_flags);
+ if (!buf) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress);
+
+ usb_fill_int_urb(urb, data->udev, pipe, buf, size,
+ btmtk_usb_intr_complete, hdev,
+ data->intr_ep->bInterval);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_anchor_urb(urb, &data->intr_anchor);
+
+ err = usb_submit_urb(urb, mem_flags);
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ hdev->name, urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+
+}
+
+static void btmtk_usb_bulk_in_complete(struct urb *urb)
+{
+ struct hci_dev *hdev = urb->context;
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ int err;
+
+ BT_DBG("%s:%s urb %p status %d count %d", __func__, hdev->name,
+ urb, urb->status, urb->actual_length);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+ return;
+ }
+
+ if (urb->status == 0) {
+ hdev->stat.byte_rx += urb->actual_length;
+
+ if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT,
+ urb->transfer_buffer,
+ urb->actual_length) < 0) {
+ BT_ERR("%s corrupted ACL packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
+ }
+
+ if (!test_bit(BTUSB_BULK_RUNNING, &data->flags))
+ return;
+
+ usb_anchor_urb(urb, &data->bulk_anchor);
+ usb_mark_last_busy(data->udev);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected */
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p failed to resubmit (%d)",
+ hdev->name, urb, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static int btmtk_usb_submit_bulk_in_urb(struct hci_dev *hdev, gfp_t mem_flags)
+{
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ struct urb *urb;
+ unsigned char *buf;
+ unsigned int pipe;
+ int err, size = HCI_MAX_FRAME_SIZE;
+
+ BT_DBG("%s:%s\n", __func__, hdev->name);
+
+ if (!data->bulk_rx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, mem_flags);
+ if (!urb)
+ return -ENOMEM;
+
+ buf = kmalloc(size, mem_flags);
+ if (!buf) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress);
+
+ usb_fill_bulk_urb(urb, data->udev, pipe,
+ buf, size, btmtk_usb_bulk_in_complete, hdev);
+
+ urb->transfer_flags |= URB_FREE_BUFFER;
+
+ usb_mark_last_busy(data->udev);
+ usb_anchor_urb(urb, &data->bulk_anchor);
+
+ err = usb_submit_urb(urb, mem_flags);
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ hdev->name, urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static void btmtk_usb_isoc_in_complete(struct urb *urb)
+
+{
+ struct hci_dev *hdev = urb->context;
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ int i, err;
+
+ BT_DBG("%s: %s urb %p status %d count %d", __func__, hdev->name,
+ urb, urb->status, urb->actual_length);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return;
+
+ if (urb->status == 0) {
+ for (i = 0; i < urb->number_of_packets; i++) {
+ unsigned int offset = urb->iso_frame_desc[i].offset;
+ unsigned int length = urb->iso_frame_desc[i].actual_length;
+
+ if (urb->iso_frame_desc[i].status)
+ continue;
+
+ hdev->stat.byte_rx += length;
+
+ if (hci_recv_fragment(hdev, HCI_SCODATA_PKT,
+ urb->transfer_buffer + offset,
+ length) < 0) {
+ BT_ERR("%s corrupted SCO packet", hdev->name);
+ hdev->stat.err_rx++;
+ }
+ }
+ }
+
+ if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags))
+ return;
+
+ usb_anchor_urb(urb, &data->isoc_anchor);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ /* -EPERM: urb is being killed;
+ * -ENODEV: device got disconnected */
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p failed to resubmit (%d)",
+ hdev->name, urb, -err);
+ usb_unanchor_urb(urb);
+ }
+}
+
+static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu)
+{
+ int i, offset = 0;
+
+ BT_DBG("len %d mtu %d", len, mtu);
+
+ for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu;
+ i++, offset += mtu, len -= mtu) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = mtu;
+ }
+
+ if (len && i < BTUSB_MAX_ISOC_FRAMES) {
+ urb->iso_frame_desc[i].offset = offset;
+ urb->iso_frame_desc[i].length = len;
+ i++;
+ }
+
+ urb->number_of_packets = i;
+}
+
+static int btmtk_usb_submit_isoc_in_urb(struct hci_dev *hdev, gfp_t mem_flags)
+{
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ struct urb *urb;
+ unsigned char *buf;
+ unsigned int pipe;
+ int err, size;
+
+ BT_DBG("%s\n", __func__);
+
+ if (!data->isoc_rx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags);
+ if (!urb)
+ return -ENOMEM;
+
+ size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) *
+ BTUSB_MAX_ISOC_FRAMES;
+
+ buf = kmalloc(size, mem_flags);
+ if (!buf) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
+
+ usb_fill_int_urb(urb, data->udev, pipe, buf, size, btmtk_usb_isoc_in_complete,
+ hdev, data->isoc_rx_ep->bInterval);
+
+ urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
+
+ __fill_isoc_descriptor(urb, size,
+ le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
+
+ usb_anchor_urb(urb, &data->isoc_anchor);
+
+ err = usb_submit_urb(urb, mem_flags);
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ hdev->name, urb, -err);
+ usb_unanchor_urb(urb);
+ }
+
+ usb_free_urb(urb);
+
+ return err;
+}
+
+static int btmtk_usb_open(struct hci_dev *hdev)
+{
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ int err;
+
+ BT_DBG("%s\n", __func__);
+
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ return err;
+
+ data->intf->needs_remote_wakeup = 1;
+
+ if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
+ goto done;
+
+ if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
+ goto done;
+
+ err = btmtk_usb_submit_intr_urb(hdev, GFP_KERNEL);
+ if (err < 0)
+ goto failed;
+
+ err = btmtk_usb_submit_bulk_in_urb(hdev, GFP_KERNEL);
+ if (err < 0) {
+ usb_kill_anchored_urbs(&data->intr_anchor);
+ goto failed;
+ }
+
+ set_bit(BTUSB_BULK_RUNNING, &data->flags);
+ btmtk_usb_submit_bulk_in_urb(hdev, GFP_KERNEL);
+
+done:
+ usb_autopm_put_interface(data->intf);
+ return 0;
+
+failed:
+ clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ usb_autopm_put_interface(data->intf);
+ return err;
+}
+
+static void btmtk_usb_stop_traffic(struct btmtk_usb_data *data)
+{
+ BT_DBG("%s\n", __func__);
+
+ usb_kill_anchored_urbs(&data->intr_anchor);
+ usb_kill_anchored_urbs(&data->bulk_anchor);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
+}
+
+static int btmtk_usb_close(struct hci_dev *hdev)
+{
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ int err;
+
+ BT_DBG("%s\n", __func__);
+
+ if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
+ return 0;
+
+ cancel_work_sync(&data->work);
+ cancel_work_sync(&data->waker);
+
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ clear_bit(BTUSB_BULK_RUNNING, &data->flags);
+ clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+
+ btmtk_usb_stop_traffic(data);
+
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ goto failed;
+
+ data->intf->needs_remote_wakeup = 0;
+ usb_autopm_put_interface(data->intf);
+
+failed:
+ usb_scuttle_anchored_urbs(&data->deferred);
+ return 0;
+}
+
+static int btmtk_usb_flush(struct hci_dev *hdev)
+{
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+
+ BT_DBG("%s\n", __func__);
+
+ usb_kill_anchored_urbs(&data->tx_anchor);
+
+ return 0;
+}
+
+static void btmtk_usb_tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct hci_dev *hdev = (struct hci_dev *)skb->dev;
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+
+ BT_DBG("%s: %s urb %p status %d count %d\n", __func__, hdev->name,
+ urb, urb->status, urb->actual_length);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ goto done;
+
+ if (!urb->status)
+ hdev->stat.byte_tx += urb->transfer_buffer_length;
+ else
+ hdev->stat.err_tx++;
+
+done:
+ spin_lock(&data->txlock);
+ data->tx_in_flight--;
+ spin_unlock(&data->txlock);
+
+ kfree(urb->setup_packet);
+
+ kfree_skb(skb);
+}
+
+static void btmtk_usb_isoc_tx_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct hci_dev *hdev = (struct hci_dev *) skb->dev;
+
+ BT_DBG("%s: %s urb %p status %d count %d", __func__, hdev->name,
+ urb, urb->status, urb->actual_length);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ goto done;
+
+ if (!urb->status)
+ hdev->stat.byte_tx += urb->transfer_buffer_length;
+ else
+ hdev->stat.err_tx++;
+
+done:
+ kfree(urb->setup_packet);
+
+ kfree_skb(skb);
+}
+
+static int btmtk_usb_send_frame(struct sk_buff *skb)
+{
+ struct hci_dev *hdev = (struct hci_dev *)skb->dev;
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ unsigned int pipe;
+ int err;
+
+ BT_DBG("%s\n", __func__);
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ return -EBUSY;
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_COMMAND_PKT:
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
+ if (!dr) {
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ dr->bRequestType = data->cmdreq_type;
+ dr->bRequest = 0;
+ dr->wIndex = 0;
+ dr->wValue = 0;
+ dr->wLength = __cpu_to_le16(skb->len);
+
+ pipe = usb_sndctrlpipe(data->udev, 0x00);
+
+ if (test_bit(HCI_RUNNING, &hdev->flags)) {
+ u16 op_code;
+ memcpy(&op_code, skb->data, 2);
+ BT_DBG("ogf = %x\n", (op_code & 0xfc00) >> 10);
+ BT_DBG("ocf = %x\n", op_code & 0x03ff);
+ hex_dump("hci command", skb->data, skb->len);
+
+ }
+
+ usb_fill_control_urb(urb, data->udev, pipe, (void *) dr,
+ skb->data, skb->len, btmtk_usb_tx_complete, skb);
+
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ if (!data->bulk_tx_ep)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ pipe = usb_sndbulkpipe(data->udev,
+ data->bulk_tx_ep->bEndpointAddress);
+
+ usb_fill_bulk_urb(urb, data->udev, pipe,
+ skb->data, skb->len, btmtk_usb_tx_complete, skb);
+
+ hdev->stat.acl_tx++;
+ BT_DBG("HCI_ACLDATA_PKT:\n");
+ break;
+
+ case HCI_SCODATA_PKT:
+ if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1)
+ return -ENODEV;
+
+ urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+ pipe = usb_sndisocpipe(data->udev,
+ data->isoc_tx_ep->bEndpointAddress);
+
+ usb_fill_int_urb(urb, data->udev, pipe,
+ skb->data, skb->len, btmtk_usb_isoc_tx_complete,
+ skb, data->isoc_tx_ep->bInterval);
+
+ urb->transfer_flags = URB_ISO_ASAP;
+
+ __fill_isoc_descriptor(urb, skb->len,
+ le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize));
+
+ hdev->stat.sco_tx++;
+ BT_DBG("HCI_SCODATA_PKT:\n");
+ goto skip_waking;
+
+ default:
+ return -EILSEQ;
+ }
+
+ err = inc_tx(data);
+
+ if (err) {
+ usb_anchor_urb(urb, &data->deferred);
+ schedule_work(&data->waker);
+ err = 0;
+ goto done;
+ }
+
+skip_waking:
+ usb_anchor_urb(urb, &data->tx_anchor);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ hdev->name, urb, -err);
+ kfree(urb->setup_packet);
+ usb_unanchor_urb(urb);
+ } else {
+ usb_mark_last_busy(data->udev);
+ }
+
+done:
+ usb_free_urb(urb);
+ return err;
+}
+
+static void btmtk_usb_notify(struct hci_dev *hdev, unsigned int evt)
+{
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+
+ BT_DBG("%s evt %d", hdev->name, evt);
+
+ if (hdev->conn_hash.sco_num != data->sco_num) {
+ data->sco_num = hdev->conn_hash.sco_num;
+ schedule_work(&data->work);
+ }
+}
+
+static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
+{
+ struct btmtk_usb_data *data = hci_get_drvdata(hdev);
+ struct usb_interface *intf = data->isoc;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, err;
+
+ if (!data->isoc)
+ return -ENODEV;
+
+ err = usb_set_interface(data->udev, 1, altsetting);
+ if (err < 0) {
+ BT_ERR("%s setting interface failed (%d)", hdev->name, -err);
+ return err;
+ }
+
+ data->isoc_altsetting = altsetting;
+
+ data->isoc_tx_ep = NULL;
+ data->isoc_rx_ep = NULL;
+
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ ep_desc = &intf->cur_altsetting->endpoint[i].desc;
+
+ if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) {
+ data->isoc_tx_ep = ep_desc;
+ continue;
+ }
+
+ if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) {
+ data->isoc_rx_ep = ep_desc;
+ continue;
+ }
+ }
+
+ if (!data->isoc_tx_ep || !data->isoc_rx_ep) {
+ BT_ERR("%s invalid SCO descriptors", hdev->name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void btmtk_usb_work(struct work_struct *work)
+{
+ struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data, work);
+ struct hci_dev *hdev = data->hdev;
+ int new_alts;
+ int err;
+
+ BT_DBG("%s\n", __func__);
+
+ if (hdev->conn_hash.sco_num > 0) {
+ if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
+ err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
+ if (err < 0) {
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
+ return;
+ }
+
+ set_bit(BTUSB_DID_ISO_RESUME, &data->flags);
+ }
+
+ if (hdev->voice_setting & 0x0020) {
+ static const int alts[3] = { 2, 4, 5 };
+ new_alts = alts[hdev->conn_hash.sco_num - 1];
+ } else {
+ new_alts = hdev->conn_hash.sco_num;
+ }
+
+ if (data->isoc_altsetting != new_alts) {
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
+
+ if (__set_isoc_interface(hdev, new_alts) < 0)
+ return;
+ }
+
+ if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
+ if (btmtk_usb_submit_isoc_in_urb(hdev, GFP_KERNEL) < 0)
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ else
+ btmtk_usb_submit_isoc_in_urb(hdev, GFP_KERNEL);
+ }
+ } else {
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ usb_kill_anchored_urbs(&data->isoc_anchor);
+
+ __set_isoc_interface(hdev, 0);
+
+ if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
+ usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
+ }
+}
+
+static void btmtk_usb_waker(struct work_struct *work)
+{
+ struct btmtk_usb_data *data = container_of(work, struct btmtk_usb_data, waker);
+ int err;
+
+ err = usb_autopm_get_interface(data->intf);
+
+ if (err < 0)
+ return;
+
+ usb_autopm_put_interface(data->intf);
+}
+
+static int btmtk_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct btmtk_usb_data *data;
+ struct usb_endpoint_descriptor *ep_desc;
+ int i, err;
+ struct hci_dev *hdev;
+
+ /* interface numbers are hardcoded in the spec */
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ ep_desc = &intf->cur_altsetting->endpoint[i].desc;
+
+ if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) {
+ data->intr_ep = ep_desc;
+ continue;
+ }
+
+ if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
+ data->bulk_tx_ep = ep_desc;
+ continue;
+ }
+
+ if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
+ data->bulk_rx_ep = ep_desc;
+ continue;
+ }
+ }
+
+ if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
+ kfree(data);
+ return -ENODEV;
+ }
+
+ data->cmdreq_type = USB_TYPE_CLASS;
+
+ data->udev = interface_to_usbdev(intf);
+ data->intf = intf;
+
+ spin_lock_init(&data->lock);
+ INIT_WORK(&data->work, btmtk_usb_work);
+ INIT_WORK(&data->waker, btmtk_usb_waker);
+ spin_lock_init(&data->txlock);
+
+ init_usb_anchor(&data->tx_anchor);
+ init_usb_anchor(&data->intr_anchor);
+ init_usb_anchor(&data->bulk_anchor);
+ init_usb_anchor(&data->isoc_anchor);
+ init_usb_anchor(&data->deferred);
+
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ hdev->bus = HCI_USB;
+
+ hci_set_drvdata(hdev, data);
+
+ data->hdev = hdev;
+
+ SET_HCIDEV_DEV(hdev, &intf->dev);
+
+ hdev->open = btmtk_usb_open;
+ hdev->close = btmtk_usb_close;
+ hdev->flush = btmtk_usb_flush;
+ hdev->send = btmtk_usb_send_frame;
+ hdev->notify = btmtk_usb_notify;
+
+ /* Interface numbers are hardcoded in the specification */
+ data->isoc = usb_ifnum_to_if(data->udev, 1);
+
+ if (data->isoc) {
+ err = usb_driver_claim_interface(&btmtk_usb_driver,
+ data->isoc, data);
+ if (err < 0) {
+ hci_free_dev(hdev);
+ kfree(data);
+ return err;
+ }
+ }
+
+ data->io_buf = kmalloc(256, GFP_KERNEL);
+ if (!data->io_buf) {
+ hci_free_dev(hdev);
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ btmtk_usb_switch_iobase(data, WLAN);
+
+ btmtk_usb_cap_init(data);
+
+ err = hci_register_dev(hdev);
+ if (err < 0) {
+ hci_free_dev(hdev);
+ kfree(data);
+ return err;
+ }
+
+ usb_set_intfdata(intf, data);
+
+ return 0;
+}
+
+static void btmtk_usb_disconnect(struct usb_interface *intf)
+{
+ struct btmtk_usb_data *data = usb_get_intfdata(intf);
+ struct hci_dev *hdev;
+
+ BT_DBG("%s\n", __func__);
+
+ if (!data)
+ return;
+
+ hdev = data->hdev;
+ usb_set_intfdata(data->intf, NULL);
+
+ if (data->isoc)
+ usb_set_intfdata(data->isoc, NULL);
+
+ hci_unregister_dev(hdev);
+
+ if (intf == data->isoc)
+ usb_driver_release_interface(&btmtk_usb_driver, data->intf);
+ else if (data->isoc)
+ usb_driver_release_interface(&btmtk_usb_driver, data->isoc);
+
+ hci_free_dev(hdev);
+
+ kfree(data->io_buf);
+
+ kfree(data);
+}
+
+#ifdef CONFIG_PM
+static int btmtk_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct btmtk_usb_data *data = usb_get_intfdata(intf);
+
+ BT_DBG("%s\n", __func__);
+
+ if (data->suspend_count++)
+ return 0;
+
+ spin_lock_irq(&data->txlock);
+ if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
+ set_bit(BTUSB_SUSPENDING, &data->flags);
+ spin_unlock_irq(&data->txlock);
+ } else {
+ spin_unlock_irq(&data->txlock);
+ data->suspend_count--;
+ return -EBUSY;
+ }
+
+ cancel_work_sync(&data->work);
+
+ btmtk_usb_stop_traffic(data);
+ usb_kill_anchored_urbs(&data->tx_anchor);
+
+ return 0;
+}
+
+static void play_deferred(struct btmtk_usb_data *data)
+{
+ struct urb *urb;
+ int err;
+
+ while ((urb = usb_get_from_anchor(&data->deferred))) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err < 0)
+ break;
+
+ data->tx_in_flight++;
+ }
+
+ usb_scuttle_anchored_urbs(&data->deferred);
+}
+
+static int btmtk_usb_resume(struct usb_interface *intf)
+{
+ struct btmtk_usb_data *data = usb_get_intfdata(intf);
+ struct hci_dev *hdev = data->hdev;
+ int err = 0;
+
+ BT_DBG("%s\n", __func__);
+
+ if (--data->suspend_count)
+ return 0;
+
+ if (!test_bit(HCI_RUNNING, &hdev->flags))
+ goto done;
+
+ if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) {
+ err = btmtk_usb_submit_intr_urb(hdev, GFP_NOIO);
+ if (err < 0) {
+ clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+ goto failed;
+ }
+ }
+
+ if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) {
+ err = btmtk_usb_submit_bulk_in_urb(hdev, GFP_NOIO);
+ if (err < 0) {
+ clear_bit(BTUSB_BULK_RUNNING, &data->flags);
+ goto failed;
+ }
+
+ btmtk_usb_submit_bulk_in_urb(hdev, GFP_NOIO);
+ }
+
+ if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) {
+ if (btmtk_usb_submit_isoc_in_urb(hdev, GFP_NOIO) < 0)
+ clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
+ else
+ btmtk_usb_submit_isoc_in_urb(hdev, GFP_NOIO);
+ }
+
+ spin_lock_irq(&data->txlock);
+ play_deferred(data);
+ clear_bit(BTUSB_SUSPENDING, &data->flags);
+ spin_unlock_irq(&data->txlock);
+ schedule_work(&data->work);
+
+ return 0;
+
+failed:
+ usb_scuttle_anchored_urbs(&data->deferred);
+done:
+ spin_lock_irq(&data->txlock);
+ clear_bit(BTUSB_SUSPENDING, &data->flags);
+ spin_unlock_irq(&data->txlock);
+
+ return err;
+}
+#endif
+
+static struct usb_device_id btmtk_usb_table[] = {
+ /* Mediatek MT7650 */
+ { USB_DEVICE(0x0e8d, 0x7650) },
+ { USB_DEVICE(0x0e8d, 0x7630) },
+ { USB_DEVICE(0x0e8d, 0x763e) },
+ /* Mediatek MT662 */
+ { USB_DEVICE(0x0e8d, 0x7662) },
+ { USB_DEVICE(0x0e8d, 0x7632) },
+ { } /* Terminating entry */
+};
+
+static struct usb_driver btmtk_usb_driver = {
+ .name = "btmtk_usb",
+ .probe = btmtk_usb_probe,
+ .disconnect = btmtk_usb_disconnect,
+#ifdef CONFIG_PM
+ .suspend = btmtk_usb_suspend,
+ .resume = btmtk_usb_resume,
+#endif
+ .id_table = btmtk_usb_table,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(btmtk_usb_driver);
+
+MODULE_DESCRIPTION("Mediatek Bluetooth USB driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(MT7650_FIRMWARE);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
diff --git a/drivers/staging/btmtk_usb/btmtk_usb.h b/drivers/staging/btmtk_usb/btmtk_usb.h
new file mode 100644
index 0000000..12f0d3b
--- /dev/null
+++ b/drivers/staging/btmtk_usb/btmtk_usb.h
@@ -0,0 +1,138 @@
+/*
+ * MediaTek Bluetooth USB Driver
+ *
+ * Copyright (C) 2013, MediaTek co.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * or on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ */
+
+#ifndef __BTMTK_USB_H__
+#define __BTMTK_USB_H_
+
+/* Memory map for MTK BT */
+
+/* SYS Control */
+#define SYSCTL 0x400000
+
+/* WLAN */
+#define WLAN 0x410000
+
+/* MCUCTL */
+#define INT_LEVEL 0x0718
+#define COM_REG0 0x0730
+#define SEMAPHORE_00 0x07B0
+#define SEMAPHORE_01 0x07B4
+#define SEMAPHORE_02 0x07B8
+#define SEMAPHORE_03 0x07BC
+
+/* Chip definition */
+
+#define CONTROL_TIMEOUT_JIFFIES ((300 * HZ) / 100)
+#define DEVICE_VENDOR_REQUEST_OUT 0x40
+#define DEVICE_VENDOR_REQUEST_IN 0xc0
+#define DEVICE_CLASS_REQUEST_OUT 0x20
+
+#define BTUSB_MAX_ISOC_FRAMES 10
+#define BTUSB_INTR_RUNNING 0
+#define BTUSB_BULK_RUNNING 1
+#define BTUSB_ISOC_RUNNING 2
+#define BTUSB_SUSPENDING 3
+#define BTUSB_DID_ISO_RESUME 4
+
+/* ROM Patch */
+#define PATCH_HCI_HEADER_SIZE 4
+#define PATCH_WMT_HEADER_SIZE 5
+#define PATCH_HEADER_SIZE (PATCH_HCI_HEADER_SIZE + PATCH_WMT_HEADER_SIZE)
+#define UPLOAD_PATCH_UNIT 2048
+#define PATCH_INFO_SIZE 30
+#define PATCH_PHASE1 1
+#define PATCH_PHASE2 2
+#define PATCH_PHASE3 3
+
+struct btmtk_usb_data {
+ struct hci_dev *hdev;
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct usb_interface *isoc;
+
+ spinlock_t lock;
+
+ unsigned long flags;
+ struct work_struct work;
+ struct work_struct waker;
+
+ struct usb_anchor tx_anchor;
+ struct usb_anchor intr_anchor;
+ struct usb_anchor bulk_anchor;
+ struct usb_anchor isoc_anchor;
+ struct usb_anchor deferred;
+ int tx_in_flight;
+ spinlock_t txlock;
+
+ struct usb_endpoint_descriptor *intr_ep;
+ struct usb_endpoint_descriptor *bulk_tx_ep;
+ struct usb_endpoint_descriptor *bulk_rx_ep;
+ struct usb_endpoint_descriptor *isoc_tx_ep;
+ struct usb_endpoint_descriptor *isoc_rx_ep;
+
+ __u8 cmdreq_type;
+
+ unsigned int sco_num;
+ int isoc_altsetting;
+ int suspend_count;
+
+ /* request for different io operation */
+ u8 w_request;
+ u8 r_request;
+
+ /* io buffer for usb control transfer */
+ char *io_buf;
+
+ struct semaphore fw_upload_sem;
+
+ /* unsigned char *fw_image; */
+ /* unsigned char *rom_patch; */
+ const struct firmware *firmware;
+ u32 chip_id;
+ u8 need_load_fw;
+ u8 need_load_rom_patch;
+ u32 rom_patch_offset;
+ u32 rom_patch_len;
+};
+
+static inline int is_mt7630(struct btmtk_usb_data *data)
+{
+ return ((data->chip_id & 0xffff0000) == 0x76300000);
+}
+
+static inline int is_mt7650(struct btmtk_usb_data *data)
+{
+ return ((data->chip_id & 0xffff0000) == 0x76500000);
+}
+
+static inline int is_mt7632(struct btmtk_usb_data *data)
+{
+ return ((data->chip_id & 0xffff0000) == 0x76320000);
+}
+
+static inline int is_mt7662(struct btmtk_usb_data *data)
+{
+ return ((data->chip_id & 0xffff0000) == 0x76620000);
+}
+
+#endif
diff --git a/drivers/staging/ced1401/ced_ioc.c b/drivers/staging/ced1401/ced_ioc.c
index 82a333f..2dbaf39e 100644
--- a/drivers/staging/ced1401/ced_ioc.c
+++ b/drivers/staging/ced1401/ced_ioc.c
@@ -37,13 +37,14 @@
**
** Empties the Output buffer and sets int lines. Used from user level only
****************************************************************************/
-void FlushOutBuff(DEVICE_EXTENSION * pdx)
+static void FlushOutBuff(DEVICE_EXTENSION *pdx)
{
dev_dbg(&pdx->interface->dev, "%s currentState=%d", __func__,
pdx->sCurrentState);
if (pdx->sCurrentState == U14ERR_TIME) /* Do nothing if hardware in trouble */
return;
-// CharSend_Cancel(pdx); /* Kill off any pending I/O */
+ /* Kill off any pending I/O */
+ /* CharSend_Cancel(pdx); */
spin_lock_irq(&pdx->charOutLock);
pdx->dwNumOutput = 0;
pdx->dwOutBuffGet = 0;
@@ -57,13 +58,14 @@ void FlushOutBuff(DEVICE_EXTENSION * pdx)
**
** Empties the input buffer and sets int lines
****************************************************************************/
-void FlushInBuff(DEVICE_EXTENSION * pdx)
+static void FlushInBuff(DEVICE_EXTENSION *pdx)
{
dev_dbg(&pdx->interface->dev, "%s currentState=%d", __func__,
pdx->sCurrentState);
if (pdx->sCurrentState == U14ERR_TIME) /* Do nothing if hardware in trouble */
return;
-// CharRead_Cancel(pDevObject); /* Kill off any pending I/O */
+ /* Kill off any pending I/O */
+ /* CharRead_Cancel(pDevObject); */
spin_lock_irq(&pdx->charInLock);
pdx->dwNumInput = 0;
pdx->dwInBuffGet = 0;
@@ -77,11 +79,11 @@ void FlushInBuff(DEVICE_EXTENSION * pdx)
** Utility routine to copy chars into the output buffer and fire them off.
** called from user mode, holds charOutLock.
****************************************************************************/
-static int PutChars(DEVICE_EXTENSION * pdx, const char *pCh,
+static int PutChars(DEVICE_EXTENSION *pdx, const char *pCh,
unsigned int uCount)
{
int iReturn;
- spin_lock_irq(&pdx->charOutLock); // get the output spin lock
+ spin_lock_irq(&pdx->charOutLock); /* get the output spin lock */
if ((OUTBUF_SZ - pdx->dwNumOutput) >= uCount) {
unsigned int u;
for (u = 0; u < uCount; u++) {
@@ -91,9 +93,9 @@ static int PutChars(DEVICE_EXTENSION * pdx, const char *pCh,
}
pdx->dwNumOutput += uCount;
spin_unlock_irq(&pdx->charOutLock);
- iReturn = SendChars(pdx); // ...give a chance to transmit data
+ iReturn = SendChars(pdx); /* ...give a chance to transmit data */
} else {
- iReturn = U14ERR_NOOUT; // no room at the out (ha-ha)
+ iReturn = U14ERR_NOOUT; /* no room at the out (ha-ha) */
spin_unlock_irq(&pdx->charOutLock);
}
return iReturn;
@@ -104,26 +106,25 @@ static int PutChars(DEVICE_EXTENSION * pdx, const char *pCh,
** trigger an output transfer if this is appropriate. User mode.
** Holds the io_mutex
*****************************************************************************/
-int SendString(DEVICE_EXTENSION * pdx, const char __user * pData,
+int SendString(DEVICE_EXTENSION *pdx, const char __user *pData,
unsigned int n)
{
- int iReturn = U14ERR_NOERROR; // assume all will be well
- char buffer[OUTBUF_SZ + 1]; // space in our address space for characters
- if (n > OUTBUF_SZ) // check space in local buffer...
- return U14ERR_NOOUT; // ...too many characters
+ int iReturn = U14ERR_NOERROR; /* assume all will be well */
+ char buffer[OUTBUF_SZ + 1]; /* space in our address space for characters */
+ if (n > OUTBUF_SZ) /* check space in local buffer... */
+ return U14ERR_NOOUT; /* ...too many characters */
if (copy_from_user(buffer, pData, n))
return -EFAULT;
- buffer[n] = 0; // terminate for debug purposes
+ buffer[n] = 0; /* terminate for debug purposes */
- mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
- if (n > 0) // do nothing if nowt to do!
- {
+ mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
+ if (n > 0) { /* do nothing if nowt to do! */
dev_dbg(&pdx->interface->dev, "%s n=%d>%s<", __func__, n,
buffer);
iReturn = PutChars(pdx, buffer, n);
}
- Allowi(pdx); // make sure we have input int
+ Allowi(pdx); /* make sure we have input int */
mutex_unlock(&pdx->io_mutex);
return iReturn;
@@ -134,13 +135,13 @@ int SendString(DEVICE_EXTENSION * pdx, const char __user * pData,
**
** Sends a single character to the 1401. User mode, holds io_mutex.
****************************************************************************/
-int SendChar(DEVICE_EXTENSION * pdx, char c)
+int SendChar(DEVICE_EXTENSION *pdx, char c)
{
int iReturn;
- mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
+ mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
iReturn = PutChars(pdx, &c, 1);
dev_dbg(&pdx->interface->dev, "SendChar >%c< (0x%02x)", c, c);
- Allowi(pdx); // Make sure char reads are running
+ Allowi(pdx); /* Make sure char reads are running */
mutex_unlock(&pdx->io_mutex);
return iReturn;
}
@@ -171,20 +172,20 @@ int SendChar(DEVICE_EXTENSION * pdx, char c)
**
** return error code (U14ERR_NOERROR for OK)
*/
-int Get1401State(DEVICE_EXTENSION * pdx, __u32 * state, __u32 * error)
+int Get1401State(DEVICE_EXTENSION *pdx, __u32 *state, __u32 *error)
{
int nGot;
dev_dbg(&pdx->interface->dev, "Get1401State() entry");
- *state = 0xFFFFFFFF; // Start off with invalid state
+ *state = 0xFFFFFFFF; /* Start off with invalid state */
nGot = usb_control_msg(pdx->udev, usb_rcvctrlpipe(pdx->udev, 0),
GET_STATUS, (D_TO_H | VENDOR | DEVREQ), 0, 0,
pdx->statBuf, sizeof(pdx->statBuf), HZ);
if (nGot != sizeof(pdx->statBuf)) {
dev_err(&pdx->interface->dev,
"Get1401State() FAILED, return code %d", nGot);
- pdx->sCurrentState = U14ERR_TIME; // Indicate that things are very wrong indeed
- *state = 0; // Force status values to a known state
+ pdx->sCurrentState = U14ERR_TIME; /* Indicate that things are very wrong indeed */
+ *state = 0; /* Force status values to a known state */
*error = 0;
} else {
int nDevice;
@@ -192,17 +193,16 @@ int Get1401State(DEVICE_EXTENSION * pdx, __u32 * state, __u32 * error)
"Get1401State() Success, state: 0x%x, 0x%x",
pdx->statBuf[0], pdx->statBuf[1]);
- *state = pdx->statBuf[0]; // Return the state values to the calling code
+ *state = pdx->statBuf[0]; /* Return the state values to the calling code */
*error = pdx->statBuf[1];
- nDevice = pdx->udev->descriptor.bcdDevice >> 8; // 1401 type code value
- switch (nDevice) // so we can clean up current state
- {
+ nDevice = pdx->udev->descriptor.bcdDevice >> 8; /* 1401 type code value */
+ switch (nDevice) { /* so we can clean up current state */
case 0:
pdx->sCurrentState = U14ERR_U1401;
break;
- default: // allow lots of device codes for future 1401s
+ default: /* allow lots of device codes for future 1401s */
if ((nDevice >= 1) && (nDevice <= 23))
pdx->sCurrentState = (short)(nDevice + 6);
else
@@ -219,7 +219,7 @@ int Get1401State(DEVICE_EXTENSION * pdx, __u32 * state, __u32 * error)
**
** Kills off staged read\write request from the USB if one is pending.
****************************************************************************/
-int ReadWrite_Cancel(DEVICE_EXTENSION * pdx)
+int ReadWrite_Cancel(DEVICE_EXTENSION *pdx)
{
dev_dbg(&pdx->interface->dev, "ReadWrite_Cancel entry %d",
pdx->bStagedUrbPending);
@@ -227,24 +227,23 @@ int ReadWrite_Cancel(DEVICE_EXTENSION * pdx)
int ntStatus = STATUS_SUCCESS;
bool bResult = false;
unsigned int i;
- // We can fill this in when we know how we will implement the staged transfer stuff
+ /* We can fill this in when we know how we will implement the staged transfer stuff */
spin_lock_irq(&pdx->stagedLock);
- if (pdx->bStagedUrbPending) // anything to be cancelled? May need more...
- {
+ if (pdx->bStagedUrbPending) { /* anything to be cancelled? May need more... */
dev_info(&pdx->interface - dev,
"ReadWrite_Cancel about to cancel Urb");
-
- // KeClearEvent(&pdx->StagingDoneEvent); // Clear the staging done flag
+ /* Clear the staging done flag */
+ /* KeClearEvent(&pdx->StagingDoneEvent); */
USB_ASSERT(pdx->pStagedIrp != NULL);
- // Release the spinlock first otherwise the completion routine may hang
- // on the spinlock while this function hands waiting for the event.
+ /* Release the spinlock first otherwise the completion routine may hang */
+ /* on the spinlock while this function hands waiting for the event. */
spin_unlock_irq(&pdx->stagedLock);
- bResult = IoCancelIrp(pdx->pStagedIrp); // Actually do the cancel
+ bResult = IoCancelIrp(pdx->pStagedIrp); /* Actually do the cancel */
if (bResult) {
LARGE_INTEGER timeout;
- timeout.QuadPart = -10000000; // Use a timeout of 1 second
+ timeout.QuadPart = -10000000; /* Use a timeout of 1 second */
dev_info(&pdx->interface - dev,
"ReadWrite_Cancel about to wait till done");
ntStatus =
@@ -274,14 +273,14 @@ int ReadWrite_Cancel(DEVICE_EXTENSION * pdx)
** InSelfTest - utility to check in self test. Return 1 for ST, 0 for not or
** a -ve error code if we failed for some reason.
***************************************************************************/
-static int InSelfTest(DEVICE_EXTENSION * pdx, unsigned int *pState)
+static int InSelfTest(DEVICE_EXTENSION *pdx, unsigned int *pState)
{
unsigned int state, error;
- int iReturn = Get1401State(pdx, &state, &error); // see if in self-test
- if (iReturn == U14ERR_NOERROR) // if all still OK
- iReturn = (state == (unsigned int)-1) || // TX problem or...
- ((state & 0xff) == 0x80); // ...self test
- *pState = state; // return actual state
+ int iReturn = Get1401State(pdx, &state, &error); /* see if in self-test */
+ if (iReturn == U14ERR_NOERROR) /* if all still OK */
+ iReturn = (state == (unsigned int)-1) || /* TX problem or... */
+ ((state & 0xff) == 0x80); /* ...self test */
+ *pState = state; /* return actual state */
return iReturn;
}
@@ -303,48 +302,45 @@ static int InSelfTest(DEVICE_EXTENSION * pdx, unsigned int *pState)
**
** Returns TRUE if a 1401 detected and OK, else FALSE
****************************************************************************/
-bool Is1401(DEVICE_EXTENSION * pdx)
+bool Is1401(DEVICE_EXTENSION *pdx)
{
int iReturn;
dev_dbg(&pdx->interface->dev, "%s", __func__);
- ced_draw_down(pdx); // wait for, then kill outstanding Urbs
- FlushInBuff(pdx); // Clear out input buffer & pipe
- FlushOutBuff(pdx); // Clear output buffer & pipe
+ ced_draw_down(pdx); /* wait for, then kill outstanding Urbs */
+ FlushInBuff(pdx); /* Clear out input buffer & pipe */
+ FlushOutBuff(pdx); /* Clear output buffer & pipe */
- // The next call returns 0 if OK, but has returned 1 in the past, meaning that
- // usb_unlock_device() is needed... now it always is
+ /* The next call returns 0 if OK, but has returned 1 in the past, meaning that */
+ /* usb_unlock_device() is needed... now it always is */
iReturn = usb_lock_device_for_reset(pdx->udev, pdx->interface);
- // release the io_mutex because if we don't, we will deadlock due to system
- // calls back into the driver.
- mutex_unlock(&pdx->io_mutex); // locked, so we will not get system calls
- if (iReturn >= 0) // if we failed
- {
- iReturn = usb_reset_device(pdx->udev); // try to do the reset
- usb_unlock_device(pdx->udev); // undo the lock
+ /* release the io_mutex because if we don't, we will deadlock due to system */
+ /* calls back into the driver. */
+ mutex_unlock(&pdx->io_mutex); /* locked, so we will not get system calls */
+ if (iReturn >= 0) { /* if we failed */
+ iReturn = usb_reset_device(pdx->udev); /* try to do the reset */
+ usb_unlock_device(pdx->udev); /* undo the lock */
}
- mutex_lock(&pdx->io_mutex); // hold stuff off while we wait
- pdx->dwDMAFlag = MODE_CHAR; // Clear DMA mode flag regardless!
- if (iReturn == 0) // if all is OK still
- {
+ mutex_lock(&pdx->io_mutex); /* hold stuff off while we wait */
+ pdx->dwDMAFlag = MODE_CHAR; /* Clear DMA mode flag regardless! */
+ if (iReturn == 0) { /* if all is OK still */
unsigned int state;
- iReturn = InSelfTest(pdx, &state); // see if likely in self test
- if (iReturn > 0) // do we need to wait for self-test?
- {
- unsigned long ulTimeOut = jiffies + 30 * HZ; // when to give up
+ iReturn = InSelfTest(pdx, &state); /* see if likely in self test */
+ if (iReturn > 0) { /* do we need to wait for self-test? */
+ unsigned long ulTimeOut = jiffies + 30 * HZ; /* when to give up */
while ((iReturn > 0) && time_before(jiffies, ulTimeOut)) {
- schedule(); // let other stuff run
- iReturn = InSelfTest(pdx, &state); // see if done yet
+ schedule(); /* let other stuff run */
+ iReturn = InSelfTest(pdx, &state); /* see if done yet */
}
}
- if (iReturn == 0) // if all is OK...
- iReturn = state == 0; // then success is that the state is 0
+ if (iReturn == 0) /* if all is OK... */
+ iReturn = state == 0; /* then success is that the state is 0 */
} else
- iReturn = 0; // we failed
- pdx->bForceReset = false; // Clear forced reset flag now
+ iReturn = 0; /* we failed */
+ pdx->bForceReset = false; /* Clear forced reset flag now */
return iReturn > 0;
}
@@ -363,45 +359,42 @@ bool Is1401(DEVICE_EXTENSION * pdx)
**
** The return value is TRUE if a useable 1401 is found, FALSE if not
*/
-bool QuickCheck(DEVICE_EXTENSION * pdx, bool bTestBuff, bool bCanReset)
+bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset)
{
- bool bRet = false; // assume it will fail and we will reset
+ bool bRet = false; /* assume it will fail and we will reset */
bool bShortTest;
- bShortTest = ((pdx->dwDMAFlag == MODE_CHAR) && // no DMA running
- (!pdx->bForceReset) && // Not had a real reset forced
- (pdx->sCurrentState >= U14ERR_STD)); // No 1401 errors stored
+ bShortTest = ((pdx->dwDMAFlag == MODE_CHAR) && /* no DMA running */
+ (!pdx->bForceReset) && /* Not had a real reset forced */
+ (pdx->sCurrentState >= U14ERR_STD)); /* No 1401 errors stored */
dev_dbg(&pdx->interface->dev,
"%s DMAFlag:%d, state:%d, force:%d, testBuff:%d, short:%d",
__func__, pdx->dwDMAFlag, pdx->sCurrentState, pdx->bForceReset,
bTestBuff, bShortTest);
- if ((bTestBuff) && // Buffer check requested, and...
- (pdx->dwNumInput || pdx->dwNumOutput)) // ...characters were in the buffer?
- {
- bShortTest = false; // Then do the full test
+ if ((bTestBuff) && /* Buffer check requested, and... */
+ (pdx->dwNumInput || pdx->dwNumOutput)) { /* ...characters were in the buffer? */
+ bShortTest = false; /* Then do the full test */
dev_dbg(&pdx->interface->dev,
"%s will reset as buffers not empty", __func__);
}
- if (bShortTest || !bCanReset) // Still OK to try the short test?
- { // Always test if no reset - we want state update
+ if (bShortTest || !bCanReset) { /* Still OK to try the short test? */
+ /* Always test if no reset - we want state update */
unsigned int state, error;
dev_dbg(&pdx->interface->dev, "%s->Get1401State", __func__);
- if (Get1401State(pdx, &state, &error) == U14ERR_NOERROR) // Check on the 1401 state
- {
- if ((state & 0xFF) == 0) // If call worked, check the status value
- bRet = true; // If that was zero, all is OK, no reset needed
+ if (Get1401State(pdx, &state, &error) == U14ERR_NOERROR) { /* Check on the 1401 state */
+ if ((state & 0xFF) == 0) /* If call worked, check the status value */
+ bRet = true; /* If that was zero, all is OK, no reset needed */
}
}
- if (!bRet && bCanReset) // If all not OK, then
- {
+ if (!bRet && bCanReset) { /* If all not OK, then */
dev_info(&pdx->interface->dev, "%s->Is1401 %d %d %d %d",
__func__, bShortTest, pdx->sCurrentState, bTestBuff,
pdx->bForceReset);
- bRet = Is1401(pdx); // do full test
+ bRet = Is1401(pdx); /* do full test */
}
return bRet;
@@ -412,11 +405,11 @@ bool QuickCheck(DEVICE_EXTENSION * pdx, bool bTestBuff, bool bCanReset)
**
** Resets the 1401 and empties the i/o buffers
*****************************************************************************/
-int Reset1401(DEVICE_EXTENSION * pdx)
+int Reset1401(DEVICE_EXTENSION *pdx)
{
- mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
+ mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
dev_dbg(&pdx->interface->dev, "ABout to call QuickCheck");
- QuickCheck(pdx, true, true); // Check 1401, reset if not OK
+ QuickCheck(pdx, true, true); /* Check 1401, reset if not OK */
mutex_unlock(&pdx->io_mutex);
return U14ERR_NOERROR;
}
@@ -426,30 +419,29 @@ int Reset1401(DEVICE_EXTENSION * pdx)
**
** Gets a single character from the 1401
****************************************************************************/
-int GetChar(DEVICE_EXTENSION * pdx)
+int GetChar(DEVICE_EXTENSION *pdx)
{
- int iReturn = U14ERR_NOIN; // assume we will get nothing
- mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
+ int iReturn = U14ERR_NOIN; /* assume we will get nothing */
+ mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
dev_dbg(&pdx->interface->dev, "GetChar");
- Allowi(pdx); // Make sure char reads are running
- SendChars(pdx); // and send any buffered chars
+ Allowi(pdx); /* Make sure char reads are running */
+ SendChars(pdx); /* and send any buffered chars */
spin_lock_irq(&pdx->charInLock);
- if (pdx->dwNumInput > 0) // worth looking
- {
+ if (pdx->dwNumInput > 0) { /* worth looking */
iReturn = pdx->inputBuffer[pdx->dwInBuffGet++];
if (pdx->dwInBuffGet >= INBUF_SZ)
pdx->dwInBuffGet = 0;
pdx->dwNumInput--;
} else
- iReturn = U14ERR_NOIN; // no input data to read
+ iReturn = U14ERR_NOIN; /* no input data to read */
spin_unlock_irq(&pdx->charInLock);
- Allowi(pdx); // Make sure char reads are running
+ Allowi(pdx); /* Make sure char reads are running */
- mutex_unlock(&pdx->io_mutex); // Protect disconnect from new i/o
+ mutex_unlock(&pdx->io_mutex); /* Protect disconnect from new i/o */
return iReturn;
}
@@ -464,46 +456,43 @@ int GetChar(DEVICE_EXTENSION * pdx)
** returns the count of characters (including the terminator, or 0 if none
** or a negative error code.
****************************************************************************/
-int GetString(DEVICE_EXTENSION * pdx, char __user * pUser, int n)
+int GetString(DEVICE_EXTENSION *pdx, char __user *pUser, int n)
{
- int nAvailable; // character in the buffer
+ int nAvailable; /* character in the buffer */
int iReturn = U14ERR_NOIN;
if (n <= 0)
return -ENOMEM;
- mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
- Allowi(pdx); // Make sure char reads are running
- SendChars(pdx); // and send any buffered chars
+ mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
+ Allowi(pdx); /* Make sure char reads are running */
+ SendChars(pdx); /* and send any buffered chars */
spin_lock_irq(&pdx->charInLock);
- nAvailable = pdx->dwNumInput; // characters available now
- if (nAvailable > n) // read max of space in pUser...
- nAvailable = n; // ...or input characters
+ nAvailable = pdx->dwNumInput; /* characters available now */
+ if (nAvailable > n) /* read max of space in pUser... */
+ nAvailable = n; /* ...or input characters */
- if (nAvailable > 0) // worth looking?
- {
- char buffer[INBUF_SZ + 1]; // space for a linear copy of data
+ if (nAvailable > 0) { /* worth looking? */
+ char buffer[INBUF_SZ + 1]; /* space for a linear copy of data */
int nGot = 0;
- int nCopyToUser; // number to copy to user
+ int nCopyToUser; /* number to copy to user */
char cData;
do {
cData = pdx->inputBuffer[pdx->dwInBuffGet++];
- if (cData == CR_CHAR) // replace CR with zero
+ if (cData == CR_CHAR) /* replace CR with zero */
cData = (char)0;
if (pdx->dwInBuffGet >= INBUF_SZ)
- pdx->dwInBuffGet = 0; // wrap buffer pointer
+ pdx->dwInBuffGet = 0; /* wrap buffer pointer */
- buffer[nGot++] = cData; // save the output
- }
- while ((nGot < nAvailable) && cData);
-
- nCopyToUser = nGot; // what to copy...
- if (cData) // do we need null
- {
- buffer[nGot] = (char)0; // make it tidy
- if (nGot < n) // if space in user buffer...
- ++nCopyToUser; // ...copy the 0 as well.
+ buffer[nGot++] = cData; /* save the output */
+ } while ((nGot < nAvailable) && cData);
+
+ nCopyToUser = nGot; /* what to copy... */
+ if (cData) { /* do we need null */
+ buffer[nGot] = (char)0; /* make it tidy */
+ if (nGot < n) /* if space in user buffer... */
+ ++nCopyToUser; /* ...copy the 0 as well. */
}
pdx->dwNumInput -= nGot;
@@ -514,12 +503,12 @@ int GetString(DEVICE_EXTENSION * pdx, char __user * pUser, int n)
if (copy_to_user(pUser, buffer, nCopyToUser))
iReturn = -EFAULT;
else
- iReturn = nGot; // report characters read
+ iReturn = nGot; /* report characters read */
} else
spin_unlock_irq(&pdx->charInLock);
- Allowi(pdx); // Make sure char reads are running
- mutex_unlock(&pdx->io_mutex); // Protect disconnect from new i/o
+ Allowi(pdx); /* Make sure char reads are running */
+ mutex_unlock(&pdx->io_mutex); /* Protect disconnect from new i/o */
return iReturn;
}
@@ -527,14 +516,14 @@ int GetString(DEVICE_EXTENSION * pdx, char __user * pUser, int n)
/*******************************************************************************
** Get count of characters in the inout buffer.
*******************************************************************************/
-int Stat1401(DEVICE_EXTENSION * pdx)
+int Stat1401(DEVICE_EXTENSION *pdx)
{
int iReturn;
- mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
- Allowi(pdx); // make sure we allow pending chars
- SendChars(pdx); // in both directions
- iReturn = pdx->dwNumInput; // no lock as single read
- mutex_unlock(&pdx->io_mutex); // Protect disconnect from new i/o
+ mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
+ Allowi(pdx); /* make sure we allow pending chars */
+ SendChars(pdx); /* in both directions */
+ iReturn = pdx->dwNumInput; /* no lock as single read */
+ mutex_unlock(&pdx->io_mutex); /* Protect disconnect from new i/o */
return iReturn;
}
@@ -545,32 +534,30 @@ int Stat1401(DEVICE_EXTENSION * pdx)
** any fancy interlocks as we only read the interrupt routine data, and the
** system is arranged so nothing can be destroyed.
****************************************************************************/
-int LineCount(DEVICE_EXTENSION * pdx)
+int LineCount(DEVICE_EXTENSION *pdx)
{
- int iReturn = 0; // will be count of line ends
+ int iReturn = 0; /* will be count of line ends */
- mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
- Allowi(pdx); // Make sure char reads are running
- SendChars(pdx); // and send any buffered chars
- spin_lock_irq(&pdx->charInLock); // Get protection
+ mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
+ Allowi(pdx); /* Make sure char reads are running */
+ SendChars(pdx); /* and send any buffered chars */
+ spin_lock_irq(&pdx->charInLock); /* Get protection */
- if (pdx->dwNumInput > 0) // worth looking?
- {
- unsigned int dwIndex = pdx->dwInBuffGet; // start at first available
- unsigned int dwEnd = pdx->dwInBuffPut; // Position for search end
+ if (pdx->dwNumInput > 0) { /* worth looking? */
+ unsigned int dwIndex = pdx->dwInBuffGet; /* start at first available */
+ unsigned int dwEnd = pdx->dwInBuffPut; /* Position for search end */
do {
if (pdx->inputBuffer[dwIndex++] == CR_CHAR)
- ++iReturn; // inc count if CR
+ ++iReturn; /* inc count if CR */
- if (dwIndex >= INBUF_SZ) // see if we fall off buff
+ if (dwIndex >= INBUF_SZ) /* see if we fall off buff */
dwIndex = 0;
- }
- while (dwIndex != dwEnd); // go to last available
+ } while (dwIndex != dwEnd); /* go to last available */
}
spin_unlock_irq(&pdx->charInLock);
dev_dbg(&pdx->interface->dev, "LineCount returned %d", iReturn);
- mutex_unlock(&pdx->io_mutex); // Protect disconnect from new i/o
+ mutex_unlock(&pdx->io_mutex); /* Protect disconnect from new i/o */
return iReturn;
}
@@ -579,14 +566,14 @@ int LineCount(DEVICE_EXTENSION * pdx)
**
** Gets the space in the output buffer. Called from user code.
*****************************************************************************/
-int GetOutBufSpace(DEVICE_EXTENSION * pdx)
+int GetOutBufSpace(DEVICE_EXTENSION *pdx)
{
int iReturn;
- mutex_lock(&pdx->io_mutex); // Protect disconnect from new i/o
- SendChars(pdx); // send any buffered chars
- iReturn = (int)(OUTBUF_SZ - pdx->dwNumOutput); // no lock needed for single read
+ mutex_lock(&pdx->io_mutex); /* Protect disconnect from new i/o */
+ SendChars(pdx); /* send any buffered chars */
+ iReturn = (int)(OUTBUF_SZ - pdx->dwNumOutput); /* no lock needed for single read */
dev_dbg(&pdx->interface->dev, "OutBufSpace %d", iReturn);
- mutex_unlock(&pdx->io_mutex); // Protect disconnect from new i/o
+ mutex_unlock(&pdx->io_mutex); /* Protect disconnect from new i/o */
return iReturn;
}
@@ -597,7 +584,7 @@ int GetOutBufSpace(DEVICE_EXTENSION * pdx)
** Clears up a transfer area. This is always called in the context of a user
** request, never from a call-back.
****************************************************************************/
-int ClearArea(DEVICE_EXTENSION * pdx, int nArea)
+int ClearArea(DEVICE_EXTENSION *pdx, int nArea)
{
int iReturn = U14ERR_NOERROR;
@@ -606,14 +593,14 @@ int ClearArea(DEVICE_EXTENSION * pdx, int nArea)
dev_err(&pdx->interface->dev, "%s Attempt to clear area %d",
__func__, nArea);
} else {
- TRANSAREA *pTA = &pdx->rTransDef[nArea]; // to save typing
- if (!pTA->bUsed) // if not used...
- iReturn = U14ERR_NOTSET; // ...nothing to be done
+ TRANSAREA *pTA = &pdx->rTransDef[nArea]; /* to save typing */
+ if (!pTA->bUsed) /* if not used... */
+ iReturn = U14ERR_NOTSET; /* ...nothing to be done */
else {
- // We must save the memory we return as we shouldn't mess with memory while
- // holding a spin lock.
- struct page **pPages = 0; // save page address list
- int nPages = 0; // and number of pages
+ /* We must save the memory we return as we shouldn't mess with memory while */
+ /* holding a spin lock. */
+ struct page **pPages = NULL; /*save page address list*/
+ int nPages = 0; /* and number of pages */
int np;
dev_dbg(&pdx->interface->dev, "%s area %d", __func__,
@@ -621,33 +608,32 @@ int ClearArea(DEVICE_EXTENSION * pdx, int nArea)
spin_lock_irq(&pdx->stagedLock);
if ((pdx->StagedId == nArea)
&& (pdx->dwDMAFlag > MODE_CHAR)) {
- iReturn = U14ERR_UNLOCKFAIL; // cannot delete as in use
+ iReturn = U14ERR_UNLOCKFAIL; /* cannot delete as in use */
dev_err(&pdx->interface->dev,
"%s call on area %d while active",
__func__, nArea);
} else {
- pPages = pTA->pPages; // save page address list
- nPages = pTA->nPages; // and page count
- if (pTA->dwEventSz) // if events flagging in use
- wake_up_interruptible(&pTA->wqEvent); // release anything that was waiting
+ pPages = pTA->pPages; /* save page address list */
+ nPages = pTA->nPages; /* and page count */
+ if (pTA->dwEventSz) /* if events flagging in use */
+ wake_up_interruptible(&pTA->wqEvent); /* release anything that was waiting */
if (pdx->bXFerWaiting
&& (pdx->rDMAInfo.wIdent == nArea))
- pdx->bXFerWaiting = false; // Cannot have pending xfer if area cleared
+ pdx->bXFerWaiting = false; /* Cannot have pending xfer if area cleared */
- // Clean out the TRANSAREA except for the wait queue, which is at the end
- // This sets bUsed to false and dwEventSz to 0 to say area not used and no events.
+ /* Clean out the TRANSAREA except for the wait queue, which is at the end */
+ /* This sets bUsed to false and dwEventSz to 0 to say area not used and no events. */
memset(pTA, 0,
sizeof(TRANSAREA) -
sizeof(wait_queue_head_t));
}
spin_unlock_irq(&pdx->stagedLock);
- if (pPages) // if we decided to release the memory
- {
- // Now we must undo the pinning down of the pages. We will assume the worst and mark
- // all the pages as dirty. Don't be tempted to move this up above as you must not be
- // holding a spin lock to do this stuff as it is not atomic.
+ if (pPages) { /* if we decided to release the memory */
+ /* Now we must undo the pinning down of the pages. We will assume the worst and mark */
+ /* all the pages as dirty. Don't be tempted to move this up above as you must not be */
+ /* holding a spin lock to do this stuff as it is not atomic. */
dev_dbg(&pdx->interface->dev, "%s nPages=%d",
__func__, nPages);
@@ -674,29 +660,29 @@ int ClearArea(DEVICE_EXTENSION * pdx, int nArea)
** Sets up a transfer area - the functional part. Called by both
** SetTransfer and SetCircular.
****************************************************************************/
-static int SetArea(DEVICE_EXTENSION * pdx, int nArea, char __user * puBuf,
+static int SetArea(DEVICE_EXTENSION *pdx, int nArea, char __user *puBuf,
unsigned int dwLength, bool bCircular, bool bCircToHost)
{
- // Start by working out the page aligned start of the area and the size
- // of the area in pages, allowing for the start not being aligned and the
- // end needing to be rounded up to a page boundary.
+ /* Start by working out the page aligned start of the area and the size */
+ /* of the area in pages, allowing for the start not being aligned and the */
+ /* end needing to be rounded up to a page boundary. */
unsigned long ulStart = ((unsigned long)puBuf) & PAGE_MASK;
unsigned int ulOffset = ((unsigned long)puBuf) & (PAGE_SIZE - 1);
int len = (dwLength + ulOffset + PAGE_SIZE - 1) >> PAGE_SHIFT;
- TRANSAREA *pTA = &pdx->rTransDef[nArea]; // to save typing
- struct page **pPages = 0; // space for page tables
- int nPages = 0; // and number of pages
+ TRANSAREA *pTA = &pdx->rTransDef[nArea]; /* to save typing */
+ struct page **pPages = NULL; /* space for page tables */
+ int nPages = 0; /* and number of pages */
- int iReturn = ClearArea(pdx, nArea); // see if OK to use this area
- if ((iReturn != U14ERR_NOTSET) && // if not area unused and...
- (iReturn != U14ERR_NOERROR)) // ...not all OK, then...
- return iReturn; // ...we cannot use this area
+ int iReturn = ClearArea(pdx, nArea); /* see if OK to use this area */
+ if ((iReturn != U14ERR_NOTSET) && /* if not area unused and... */
+ (iReturn != U14ERR_NOERROR)) /* ...not all OK, then... */
+ return iReturn; /* ...we cannot use this area */
- if (!access_ok(VERIFY_WRITE, puBuf, dwLength)) // if we cannot access the memory...
- return -EFAULT; // ...then we are done
+ if (!access_ok(VERIFY_WRITE, puBuf, dwLength)) /* if we cannot access the memory... */
+ return -EFAULT; /* ...then we are done */
- // Now allocate space to hold the page pointer and virtual address pointer tables
+ /* Now allocate space to hold the page pointer and virtual address pointer tables */
pPages = kmalloc(len * sizeof(struct page *), GFP_KERNEL);
if (!pPages) {
iReturn = U14ERR_NOMEMORY;
@@ -705,24 +691,23 @@ static int SetArea(DEVICE_EXTENSION * pdx, int nArea, char __user * puBuf,
dev_dbg(&pdx->interface->dev, "%s %p, length=%06x, circular %d",
__func__, puBuf, dwLength, bCircular);
- // To pin down user pages we must first acquire the mapping semaphore.
- down_read(&current->mm->mmap_sem); // get memory map semaphore
- nPages =
- get_user_pages(current, current->mm, ulStart, len, 1, 0, pPages, 0);
- up_read(&current->mm->mmap_sem); // release the semaphore
+ /* To pin down user pages we must first acquire the mapping semaphore. */
+ down_read(&current->mm->mmap_sem); /* get memory map semaphore */
+ nPages = get_user_pages(current, current->mm, ulStart, len, 1, 0,
+ pPages, NULL);
+ up_read(&current->mm->mmap_sem); /* release the semaphore */
dev_dbg(&pdx->interface->dev, "%s nPages = %d", __func__, nPages);
- if (nPages > 0) // if we succeeded
- {
- // If you are tempted to use page_address (form LDD3), forget it. You MUST use
- // kmap() or kmap_atomic() to get a virtual address. page_address will give you
- // (null) or at least it does in this context with an x86 machine.
+ if (nPages > 0) { /* if we succeeded */
+ /* If you are tempted to use page_address (form LDD3), forget it. You MUST use */
+ /* kmap() or kmap_atomic() to get a virtual address. page_address will give you */
+ /* (null) or at least it does in this context with an x86 machine. */
spin_lock_irq(&pdx->stagedLock);
- pTA->lpvBuff = puBuf; // keep start of region (user address)
- pTA->dwBaseOffset = ulOffset; // save offset in first page to start of xfer
- pTA->dwLength = dwLength; // Size if the region in bytes
- pTA->pPages = pPages; // list of pages that are used by buffer
- pTA->nPages = nPages; // number of pages
+ pTA->lpvBuff = puBuf; /* keep start of region (user address) */
+ pTA->dwBaseOffset = ulOffset; /* save offset in first page to start of xfer */
+ pTA->dwLength = dwLength; /* Size if the region in bytes */
+ pTA->pPages = pPages; /* list of pages that are used by buffer */
+ pTA->nPages = nPages; /* number of pages */
pTA->bCircular = bCircular;
pTA->bCircToHost = bCircToHost;
@@ -731,10 +716,10 @@ static int SetArea(DEVICE_EXTENSION * pdx, int nArea, char __user * puBuf,
pTA->aBlocks[0].dwSize = 0;
pTA->aBlocks[1].dwOffset = 0;
pTA->aBlocks[1].dwSize = 0;
- pTA->bUsed = true; // This is now a used block
+ pTA->bUsed = true; /* This is now a used block */
spin_unlock_irq(&pdx->stagedLock);
- iReturn = U14ERR_NOERROR; // say all was well
+ iReturn = U14ERR_NOERROR; /* say all was well */
} else {
iReturn = U14ERR_LOCKFAIL;
goto error;
@@ -754,7 +739,7 @@ error:
** unset it. Unsetting will fail if the area is booked, and a transfer to that
** area is in progress. Otherwise, we will release the area and re-assign it.
****************************************************************************/
-int SetTransfer(DEVICE_EXTENSION * pdx, TRANSFERDESC __user * pTD)
+int SetTransfer(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD)
{
int iReturn;
TRANSFERDESC td;
@@ -765,9 +750,9 @@ int SetTransfer(DEVICE_EXTENSION * pdx, TRANSFERDESC __user * pTD)
mutex_lock(&pdx->io_mutex);
dev_dbg(&pdx->interface->dev, "%s area:%d, size:%08x", __func__,
td.wAreaNum, td.dwLength);
- // The strange cast is done so that we don't get warnings in 32-bit linux about the size of the
- // pointer. The pointer is always passed as a 64-bit object so that we don't have problems using
- // a 32-bit program on a 64-bit system. unsigned long is 64-bits on a 64-bit system.
+ /* The strange cast is done so that we don't get warnings in 32-bit linux about the size of the */
+ /* pointer. The pointer is always passed as a 64-bit object so that we don't have problems using */
+ /* a 32-bit program on a 64-bit system. unsigned long is 64-bits on a 64-bit system. */
iReturn =
SetArea(pdx, td.wAreaNum,
(char __user *)((unsigned long)td.lpvBuff), td.dwLength,
@@ -780,7 +765,7 @@ int SetTransfer(DEVICE_EXTENSION * pdx, TRANSFERDESC __user * pTD)
** UnSetTransfer
** Erases a transfer area record
****************************************************************************/
-int UnsetTransfer(DEVICE_EXTENSION * pdx, int nArea)
+int UnsetTransfer(DEVICE_EXTENSION *pdx, int nArea)
{
int iReturn;
mutex_lock(&pdx->io_mutex);
@@ -797,27 +782,26 @@ int UnsetTransfer(DEVICE_EXTENSION * pdx, int nArea)
** pretend that whatever the user asked for was achieved, so we return 1 if
** try to create one, and 0 if they ask to remove (assuming all else was OK).
****************************************************************************/
-int SetEvent(DEVICE_EXTENSION * pdx, TRANSFEREVENT __user * pTE)
+int SetEvent(DEVICE_EXTENSION *pdx, TRANSFEREVENT __user *pTE)
{
int iReturn = U14ERR_NOERROR;
TRANSFEREVENT te;
- // get a local copy of the data
+ /* get a local copy of the data */
if (copy_from_user(&te, pTE, sizeof(te)))
return -EFAULT;
- if (te.wAreaNum >= MAX_TRANSAREAS) // the area must exist
+ if (te.wAreaNum >= MAX_TRANSAREAS) /* the area must exist */
return U14ERR_BADAREA;
else {
TRANSAREA *pTA = &pdx->rTransDef[te.wAreaNum];
- mutex_lock(&pdx->io_mutex); // make sure we have no competitor
+ mutex_lock(&pdx->io_mutex); /* make sure we have no competitor */
spin_lock_irq(&pdx->stagedLock);
- if (pTA->bUsed) // area must be in use
- {
- pTA->dwEventSt = te.dwStart; // set area regions
- pTA->dwEventSz = te.dwLength; // set size (0 cancels it)
- pTA->bEventToHost = te.wFlags & 1; // set the direction
- pTA->iWakeUp = 0; // zero the wake up count
+ if (pTA->bUsed) { /* area must be in use */
+ pTA->dwEventSt = te.dwStart; /* set area regions */
+ pTA->dwEventSz = te.dwLength; /* set size (0 cancels it) */
+ pTA->bEventToHost = te.wFlags & 1; /* set the direction */
+ pTA->iWakeUp = 0; /* zero the wake up count */
} else
iReturn = U14ERR_NOTSET;
spin_unlock_irq(&pdx->stagedLock);
@@ -833,7 +817,7 @@ int SetEvent(DEVICE_EXTENSION * pdx, TRANSFEREVENT __user * pTE)
** of times that a block met the event condition since we last cleared it or
** 0 if timed out, or -ve error (bad area or not set, or signal).
****************************************************************************/
-int WaitEvent(DEVICE_EXTENSION * pdx, int nArea, int msTimeOut)
+int WaitEvent(DEVICE_EXTENSION *pdx, int nArea, int msTimeOut)
{
int iReturn;
if ((unsigned)nArea >= MAX_TRANSAREAS)
@@ -841,15 +825,15 @@ int WaitEvent(DEVICE_EXTENSION * pdx, int nArea, int msTimeOut)
else {
int iWait;
TRANSAREA *pTA = &pdx->rTransDef[nArea];
- msTimeOut = (msTimeOut * HZ + 999) / 1000; // convert timeout to jiffies
-
- // We cannot wait holding the mutex, but we check the flags while holding
- // it. This may well be pointless as another thread could get in between
- // releasing it and the wait call. However, this would have to clear the
- // iWakeUp flag. However, the !pTA-bUsed may help us in this case.
- mutex_lock(&pdx->io_mutex); // make sure we have no competitor
- if (!pTA->bUsed || !pTA->dwEventSz) // check something to wait for...
- return U14ERR_NOTSET; // ...else we do nothing
+ msTimeOut = (msTimeOut * HZ + 999) / 1000; /* convert timeout to jiffies */
+
+ /* We cannot wait holding the mutex, but we check the flags while holding */
+ /* it. This may well be pointless as another thread could get in between */
+ /* releasing it and the wait call. However, this would have to clear the */
+ /* iWakeUp flag. However, the !pTA-bUsed may help us in this case. */
+ mutex_lock(&pdx->io_mutex); /* make sure we have no competitor */
+ if (!pTA->bUsed || !pTA->dwEventSz) /* check something to wait for... */
+ return U14ERR_NOTSET; /* ...else we do nothing */
mutex_unlock(&pdx->io_mutex);
if (msTimeOut)
@@ -863,12 +847,12 @@ int WaitEvent(DEVICE_EXTENSION * pdx, int nArea, int msTimeOut)
wait_event_interruptible(pTA->wqEvent, pTA->iWakeUp
|| !pTA->bUsed);
if (iWait)
- iReturn = -ERESTARTSYS; // oops - we have had a SIGNAL
+ iReturn = -ERESTARTSYS; /* oops - we have had a SIGNAL */
else
- iReturn = pTA->iWakeUp; // else the wakeup count
+ iReturn = pTA->iWakeUp; /* else the wakeup count */
spin_lock_irq(&pdx->stagedLock);
- pTA->iWakeUp = 0; // clear the flag
+ pTA->iWakeUp = 0; /* clear the flag */
spin_unlock_irq(&pdx->stagedLock);
}
return iReturn;
@@ -880,17 +864,17 @@ int WaitEvent(DEVICE_EXTENSION * pdx, int nArea, int msTimeOut)
** number of times a block completed since the last call, or 0 if none or a
** negative error.
****************************************************************************/
-int TestEvent(DEVICE_EXTENSION * pdx, int nArea)
+int TestEvent(DEVICE_EXTENSION *pdx, int nArea)
{
int iReturn;
if ((unsigned)nArea >= MAX_TRANSAREAS)
iReturn = U14ERR_BADAREA;
else {
TRANSAREA *pTA = &pdx->rTransDef[nArea];
- mutex_lock(&pdx->io_mutex); // make sure we have no competitor
+ mutex_lock(&pdx->io_mutex); /* make sure we have no competitor */
spin_lock_irq(&pdx->stagedLock);
- iReturn = pTA->iWakeUp; // get wakeup count since last call
- pTA->iWakeUp = 0; // clear the count
+ iReturn = pTA->iWakeUp; /* get wakeup count since last call */
+ pTA->iWakeUp = 0; /* clear the count */
spin_unlock_irq(&pdx->stagedLock);
mutex_unlock(&pdx->io_mutex);
}
@@ -901,17 +885,17 @@ int TestEvent(DEVICE_EXTENSION * pdx, int nArea)
** GetTransferInfo
** Puts the current state of the 1401 in a TGET_TX_BLOCK.
*****************************************************************************/
-int GetTransfer(DEVICE_EXTENSION * pdx, TGET_TX_BLOCK __user * pTX)
+int GetTransfer(DEVICE_EXTENSION *pdx, TGET_TX_BLOCK __user *pTX)
{
int iReturn = U14ERR_NOERROR;
unsigned int dwIdent;
mutex_lock(&pdx->io_mutex);
- dwIdent = pdx->StagedId; // area ident for last xfer
+ dwIdent = pdx->StagedId; /* area ident for last xfer */
if (dwIdent >= MAX_TRANSAREAS)
iReturn = U14ERR_BADAREA;
else {
- // Return the best information we have - we don't have physical addresses
+ /* Return the best information we have - we don't have physical addresses */
TGET_TX_BLOCK *tx;
tx = kzalloc(sizeof(*tx), GFP_KERNEL);
@@ -921,8 +905,8 @@ int GetTransfer(DEVICE_EXTENSION * pdx, TGET_TX_BLOCK __user * pTX)
}
tx->size = pdx->rTransDef[dwIdent].dwLength;
tx->linear = (long long)((long)pdx->rTransDef[dwIdent].lpvBuff);
- tx->avail = GET_TX_MAXENTRIES; // how many blocks we could return
- tx->used = 1; // number we actually return
+ tx->avail = GET_TX_MAXENTRIES; /* how many blocks we could return */
+ tx->used = 1; /* number we actually return */
tx->entries[0].physical =
(long long)(tx->linear + pdx->StagedOffset);
tx->entries[0].size = tx->size;
@@ -940,7 +924,7 @@ int GetTransfer(DEVICE_EXTENSION * pdx, TGET_TX_BLOCK __user * pTX)
**
** Empties the host i/o buffers
****************************************************************************/
-int KillIO1401(DEVICE_EXTENSION * pdx)
+int KillIO1401(DEVICE_EXTENSION *pdx)
{
dev_dbg(&pdx->interface->dev, "%s", __func__);
mutex_lock(&pdx->io_mutex);
@@ -955,7 +939,7 @@ int KillIO1401(DEVICE_EXTENSION * pdx)
** Returns a 0 or a 1 for whether DMA is happening. No point holding a mutex
** for this as it only does one read.
*****************************************************************************/
-int BlkTransState(DEVICE_EXTENSION * pdx)
+int BlkTransState(DEVICE_EXTENSION *pdx)
{
int iReturn = pdx->dwDMAFlag != MODE_CHAR;
dev_dbg(&pdx->interface->dev, "%s = %d", __func__, iReturn);
@@ -967,12 +951,12 @@ int BlkTransState(DEVICE_EXTENSION * pdx)
**
** Puts the current state of the 1401 in the Irp return buffer.
*****************************************************************************/
-int StateOf1401(DEVICE_EXTENSION * pdx)
+int StateOf1401(DEVICE_EXTENSION *pdx)
{
int iReturn;
mutex_lock(&pdx->io_mutex);
- QuickCheck(pdx, false, false); // get state up to date, no reset
+ QuickCheck(pdx, false, false); /* get state up to date, no reset */
iReturn = pdx->sCurrentState;
mutex_unlock(&pdx->io_mutex);
@@ -987,20 +971,23 @@ int StateOf1401(DEVICE_EXTENSION * pdx)
** Initiates a self-test cycle. The assumption is that we have no interrupts
** active, so we should make sure that this is the case.
*****************************************************************************/
-int StartSelfTest(DEVICE_EXTENSION * pdx)
+int StartSelfTest(DEVICE_EXTENSION *pdx)
{
int nGot;
mutex_lock(&pdx->io_mutex);
dev_dbg(&pdx->interface->dev, "%s", __func__);
- ced_draw_down(pdx); // wait for, then kill outstanding Urbs
- FlushInBuff(pdx); // Clear out input buffer & pipe
- FlushOutBuff(pdx); // Clear output buffer & pipe
-// ReadWrite_Cancel(pDeviceObject); /* so things stay tidy */
+ ced_draw_down(pdx); /* wait for, then kill outstanding Urbs */
+ FlushInBuff(pdx); /* Clear out input buffer & pipe */
+ FlushOutBuff(pdx); /* Clear output buffer & pipe */
+ /* so things stay tidy */
+ /* ReadWrite_Cancel(pDeviceObject); */
pdx->dwDMAFlag = MODE_CHAR; /* Clear DMA mode flags here */
- nGot = usb_control_msg(pdx->udev, usb_rcvctrlpipe(pdx->udev, 0), DB_SELFTEST, (H_TO_D | VENDOR | DEVREQ), 0, 0, 0, 0, HZ); // allow 1 second timeout
- pdx->ulSelfTestTime = jiffies + HZ * 30; // 30 seconds into the future
+ nGot = usb_control_msg(pdx->udev, usb_rcvctrlpipe(pdx->udev, 0),
+ DB_SELFTEST, (H_TO_D | VENDOR | DEVREQ),
+ 0, 0, NULL, 0, HZ); /* allow 1 second timeout */
+ pdx->ulSelfTestTime = jiffies + HZ * 30; /* 30 seconds into the future */
mutex_unlock(&pdx->io_mutex);
if (nGot < 0)
@@ -1013,53 +1000,49 @@ int StartSelfTest(DEVICE_EXTENSION * pdx)
**
** Check progress of a self-test cycle
****************************************************************************/
-int CheckSelfTest(DEVICE_EXTENSION * pdx, TGET_SELFTEST __user * pGST)
+int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST)
{
unsigned int state, error;
int iReturn;
- TGET_SELFTEST gst; // local work space
- memset(&gst, 0, sizeof(gst)); // clear out the space (sets code 0)
+ TGET_SELFTEST gst; /* local work space */
+ memset(&gst, 0, sizeof(gst)); /* clear out the space (sets code 0) */
mutex_lock(&pdx->io_mutex);
dev_dbg(&pdx->interface->dev, "%s", __func__);
iReturn = Get1401State(pdx, &state, &error);
- if (iReturn == U14ERR_NOERROR) // Only accept zero if it happens twice
+ if (iReturn == U14ERR_NOERROR) /* Only accept zero if it happens twice */
iReturn = Get1401State(pdx, &state, &error);
- if (iReturn != U14ERR_NOERROR) // Self-test can cause comms errors
- { // so we assume still testing
+ if (iReturn != U14ERR_NOERROR) { /* Self-test can cause comms errors */
+ /* so we assume still testing */
dev_err(&pdx->interface->dev,
"%s Get1401State=%d, assuming still testing", __func__,
iReturn);
- state = 0x80; // Force still-testing, no error
+ state = 0x80; /* Force still-testing, no error */
error = 0;
iReturn = U14ERR_NOERROR;
}
- if ((state == -1) && (error == -1)) // If Get1401State had problems
- {
+ if ((state == -1) && (error == -1)) { /* If Get1401State had problems */
dev_err(&pdx->interface->dev,
"%s Get1401State failed, assuming still testing",
__func__);
- state = 0x80; // Force still-testing, no error
+ state = 0x80; /* Force still-testing, no error */
error = 0;
}
- if ((state & 0xFF) == 0x80) // If we are still in self-test
- {
- if (state & 0x00FF0000) // Have we got an error?
- {
- gst.code = (state & 0x00FF0000) >> 16; // read the error code
- gst.x = error & 0x0000FFFF; // Error data X
- gst.y = (error & 0xFFFF0000) >> 16; // and data Y
+ if ((state & 0xFF) == 0x80) { /* If we are still in self-test */
+ if (state & 0x00FF0000) { /* Have we got an error? */
+ gst.code = (state & 0x00FF0000) >> 16; /* read the error code */
+ gst.x = error & 0x0000FFFF; /* Error data X */
+ gst.y = (error & 0xFFFF0000) >> 16; /* and data Y */
dev_dbg(&pdx->interface->dev, "Self-test error code %d",
gst.code);
- } else // No error, check for timeout
- {
- unsigned long ulNow = jiffies; // get current time
+ } else { /* No error, check for timeout */
+ unsigned long ulNow = jiffies; /* get current time */
if (time_after(ulNow, pdx->ulSelfTestTime)) {
- gst.code = -2; // Flag the timeout
+ gst.code = -2; /* Flag the timeout */
dev_dbg(&pdx->interface->dev,
"Self-test timed-out");
} else
@@ -1067,16 +1050,16 @@ int CheckSelfTest(DEVICE_EXTENSION * pdx, TGET_SELFTEST __user * pGST)
"Self-test on-going");
}
} else {
- gst.code = -1; // Flag the test is done
+ gst.code = -1; /* Flag the test is done */
dev_dbg(&pdx->interface->dev, "Self-test done");
}
- if (gst.code < 0) // If we have a problem or finished
- { // If using the 2890 we should reset properly
+ if (gst.code < 0) { /* If we have a problem or finished */
+ /* If using the 2890 we should reset properly */
if ((pdx->nPipes == 4) && (pdx->s1401Type <= TYPEPOWER))
- Is1401(pdx); // Get 1401 reset and OK
+ Is1401(pdx); /* Get 1401 reset and OK */
else
- QuickCheck(pdx, true, true); // Otherwise check without reset unless problems
+ QuickCheck(pdx, true, true); /* Otherwise check without reset unless problems */
}
mutex_unlock(&pdx->io_mutex);
@@ -1091,7 +1074,7 @@ int CheckSelfTest(DEVICE_EXTENSION * pdx, TGET_SELFTEST __user * pGST)
**
** Returns code for standard, plus, micro1401, power1401 or none
****************************************************************************/
-int TypeOf1401(DEVICE_EXTENSION * pdx)
+int TypeOf1401(DEVICE_EXTENSION *pdx)
{
int iReturn = TYPEUNKNOWN;
mutex_lock(&pdx->io_mutex);
@@ -1100,7 +1083,7 @@ int TypeOf1401(DEVICE_EXTENSION * pdx)
switch (pdx->s1401Type) {
case TYPE1401:
iReturn = U14ERR_STD;
- break; // Handle these types directly
+ break; /* Handle these types directly */
case TYPEPLUS:
iReturn = U14ERR_PLUS;
break;
@@ -1109,9 +1092,9 @@ int TypeOf1401(DEVICE_EXTENSION * pdx)
break;
default:
if ((pdx->s1401Type >= TYPEPOWER) && (pdx->s1401Type <= 25))
- iReturn = pdx->s1401Type + 4; // We can calculate types
- else // for up-coming 1401 designs
- iReturn = TYPEUNKNOWN; // Don't know or not there
+ iReturn = pdx->s1401Type + 4; /* We can calculate types */
+ else /* for up-coming 1401 designs */
+ iReturn = TYPEUNKNOWN; /* Don't know or not there */
}
dev_dbg(&pdx->interface->dev, "%s %d", __func__, iReturn);
mutex_unlock(&pdx->io_mutex);
@@ -1124,13 +1107,13 @@ int TypeOf1401(DEVICE_EXTENSION * pdx)
**
** Returns flags on block transfer abilities
****************************************************************************/
-int TransferFlags(DEVICE_EXTENSION * pdx)
+int TransferFlags(DEVICE_EXTENSION *pdx)
{
- int iReturn = U14TF_MULTIA | U14TF_DIAG | // we always have multiple DMA area
- U14TF_NOTIFY | U14TF_CIRCTH; // diagnostics, notify and circular
+ int iReturn = U14TF_MULTIA | U14TF_DIAG | /* we always have multiple DMA area */
+ U14TF_NOTIFY | U14TF_CIRCTH; /* diagnostics, notify and circular */
dev_dbg(&pdx->interface->dev, "%s", __func__);
mutex_lock(&pdx->io_mutex);
- if (pdx->bIsUSB2) // Set flag for USB2 if appropriate
+ if (pdx->bIsUSB2) /* Set flag for USB2 if appropriate */
iReturn |= U14TF_USB2;
mutex_unlock(&pdx->io_mutex);
@@ -1142,12 +1125,16 @@ int TransferFlags(DEVICE_EXTENSION * pdx)
** Issues a debug\diagnostic command to the 1401 along with a 32-bit datum
** This is a utility command used for dbg operations.
*/
-static int DbgCmd1401(DEVICE_EXTENSION * pdx, unsigned char cmd,
+static int DbgCmd1401(DEVICE_EXTENSION *pdx, unsigned char cmd,
unsigned int data)
{
int iReturn;
dev_dbg(&pdx->interface->dev, "%s entry", __func__);
- iReturn = usb_control_msg(pdx->udev, usb_sndctrlpipe(pdx->udev, 0), cmd, (H_TO_D | VENDOR | DEVREQ), (unsigned short)data, (unsigned short)(data >> 16), 0, 0, HZ); // allow 1 second timeout
+ iReturn = usb_control_msg(pdx->udev, usb_sndctrlpipe(pdx->udev, 0), cmd,
+ (H_TO_D | VENDOR | DEVREQ),
+ (unsigned short)data,
+ (unsigned short)(data >> 16), NULL, 0, HZ);
+ /* allow 1 second timeout */
if (iReturn < 0)
dev_err(&pdx->interface->dev, "%s fail code=%d", __func__,
iReturn);
@@ -1160,7 +1147,7 @@ static int DbgCmd1401(DEVICE_EXTENSION * pdx, unsigned char cmd,
**
** Execute the diagnostic peek operation. Uses address, width and repeats.
****************************************************************************/
-int DbgPeek(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
+int DbgPeek(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
{
int iReturn;
TDBGBLOCK db;
@@ -1189,7 +1176,7 @@ int DbgPeek(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
** Execute the diagnostic poke operation. Parameters are in the CSBLOCK struct
** in order address, size, repeats and value to poke.
****************************************************************************/
-int DbgPoke(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
+int DbgPoke(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
{
int iReturn;
TDBGBLOCK db;
@@ -1218,7 +1205,7 @@ int DbgPoke(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
** Execute the diagnostic ramp data operation. Parameters are in the CSBLOCK struct
** in order address, default, enable mask, size and repeats.
****************************************************************************/
-int DbgRampData(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
+int DbgRampData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
{
int iReturn;
TDBGBLOCK db;
@@ -1250,7 +1237,7 @@ int DbgRampData(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
**
** Execute the diagnostic ramp address operation
****************************************************************************/
-int DbgRampAddr(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
+int DbgRampAddr(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
{
int iReturn;
TDBGBLOCK db;
@@ -1280,16 +1267,16 @@ int DbgRampAddr(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
**
** Retrieve the data resulting from the last debug Peek operation
****************************************************************************/
-int DbgGetData(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
+int DbgGetData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB)
{
int iReturn;
TDBGBLOCK db;
- memset(&db, 0, sizeof(db)); // fill returned block with 0s
+ memset(&db, 0, sizeof(db)); /* fill returned block with 0s */
mutex_lock(&pdx->io_mutex);
dev_dbg(&pdx->interface->dev, "%s", __func__);
- // Read back the last peeked value from the 1401.
+ /* Read back the last peeked value from the 1401. */
iReturn = usb_control_msg(pdx->udev, usb_rcvctrlpipe(pdx->udev, 0),
DB_DATA, (D_TO_H | VENDOR | DEVREQ), 0, 0,
&db.iData, sizeof(db.iData), HZ);
@@ -1313,7 +1300,7 @@ int DbgGetData(DEVICE_EXTENSION * pdx, TDBGBLOCK __user * pDB)
** Stop any never-ending debug loop, we just call Get1401State for USB
**
****************************************************************************/
-int DbgStopLoop(DEVICE_EXTENSION * pdx)
+int DbgStopLoop(DEVICE_EXTENSION *pdx)
{
int iReturn;
unsigned int uState, uErr;
@@ -1334,7 +1321,7 @@ int DbgStopLoop(DEVICE_EXTENSION * pdx)
** booked and a transfer to that area is in progress. Otherwise, we will
** release the area and re-assign it.
****************************************************************************/
-int SetCircular(DEVICE_EXTENSION * pdx, TRANSFERDESC __user * pTD)
+int SetCircular(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD)
{
int iReturn;
bool bToHost;
@@ -1346,11 +1333,11 @@ int SetCircular(DEVICE_EXTENSION * pdx, TRANSFERDESC __user * pTD)
mutex_lock(&pdx->io_mutex);
dev_dbg(&pdx->interface->dev, "%s area:%d, size:%08x", __func__,
td.wAreaNum, td.dwLength);
- bToHost = td.eSize != 0; // this is used as the tohost flag
+ bToHost = td.eSize != 0; /* this is used as the tohost flag */
- // The strange cast is done so that we don't get warnings in 32-bit linux about the size of the
- // pointer. The pointer is always passed as a 64-bit object so that we don't have problems using
- // a 32-bit program on a 64-bit system. unsigned long is 64-bits on a 64-bit system.
+ /* The strange cast is done so that we don't get warnings in 32-bit linux about the size of the */
+ /* pointer. The pointer is always passed as a 64-bit object so that we don't have problems using */
+ /* a 32-bit program on a 64-bit system. unsigned long is 64-bits on a 64-bit system. */
iReturn =
SetArea(pdx, td.wAreaNum,
(char __user *)((unsigned long)td.lpvBuff), td.dwLength,
@@ -1364,7 +1351,7 @@ int SetCircular(DEVICE_EXTENSION * pdx, TRANSFERDESC __user * pTD)
**
** Return the next available block of circularly-transferred data.
****************************************************************************/
-int GetCircBlock(DEVICE_EXTENSION * pdx, TCIRCBLOCK __user * pCB)
+int GetCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
{
int iReturn = U14ERR_NOERROR;
unsigned int nArea;
@@ -1377,20 +1364,17 @@ int GetCircBlock(DEVICE_EXTENSION * pdx, TCIRCBLOCK __user * pCB)
mutex_lock(&pdx->io_mutex);
- nArea = cb.nArea; // Retrieve parameters first
- cb.dwOffset = 0; // set default result (nothing)
+ nArea = cb.nArea; /* Retrieve parameters first */
+ cb.dwOffset = 0; /* set default result (nothing) */
cb.dwSize = 0;
- if (nArea < MAX_TRANSAREAS) // The area number must be OK
- {
- TRANSAREA *pArea = &pdx->rTransDef[nArea]; // Pointer to relevant info
- spin_lock_irq(&pdx->stagedLock); // Lock others out
+ if (nArea < MAX_TRANSAREAS) { /* The area number must be OK */
+ TRANSAREA *pArea = &pdx->rTransDef[nArea]; /* Pointer to relevant info */
+ spin_lock_irq(&pdx->stagedLock); /* Lock others out */
- if ((pArea->bUsed) && (pArea->bCircular) && // Must be circular area
- (pArea->bCircToHost)) // For now at least must be to host
- {
- if (pArea->aBlocks[0].dwSize > 0) // Got anything?
- {
+ if ((pArea->bUsed) && (pArea->bCircular) && /* Must be circular area */
+ (pArea->bCircToHost)) { /* For now at least must be to host */
+ if (pArea->aBlocks[0].dwSize > 0) { /* Got anything? */
cb.dwOffset = pArea->aBlocks[0].dwOffset;
cb.dwSize = pArea->aBlocks[0].dwSize;
dev_dbg(&pdx->interface->dev,
@@ -1416,7 +1400,7 @@ int GetCircBlock(DEVICE_EXTENSION * pdx, TCIRCBLOCK __user * pCB)
**
** Frees a block of circularly-transferred data and returns the next one.
****************************************************************************/
-int FreeCircBlock(DEVICE_EXTENSION * pdx, TCIRCBLOCK __user * pCB)
+int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB)
{
int iReturn = U14ERR_NOERROR;
unsigned int nArea, uStart, uSize;
@@ -1429,33 +1413,28 @@ int FreeCircBlock(DEVICE_EXTENSION * pdx, TCIRCBLOCK __user * pCB)
mutex_lock(&pdx->io_mutex);
- nArea = cb.nArea; // Retrieve parameters first
+ nArea = cb.nArea; /* Retrieve parameters first */
uStart = cb.dwOffset;
uSize = cb.dwSize;
- cb.dwOffset = 0; // then set default result (nothing)
+ cb.dwOffset = 0; /* then set default result (nothing) */
cb.dwSize = 0;
- if (nArea < MAX_TRANSAREAS) // The area number must be OK
- {
- TRANSAREA *pArea = &pdx->rTransDef[nArea]; // Pointer to relevant info
- spin_lock_irq(&pdx->stagedLock); // Lock others out
+ if (nArea < MAX_TRANSAREAS) { /* The area number must be OK */
+ TRANSAREA *pArea = &pdx->rTransDef[nArea]; /* Pointer to relevant info */
+ spin_lock_irq(&pdx->stagedLock); /* Lock others out */
- if ((pArea->bUsed) && (pArea->bCircular) && // Must be circular area
- (pArea->bCircToHost)) // For now at least must be to host
- {
+ if ((pArea->bUsed) && (pArea->bCircular) && /* Must be circular area */
+ (pArea->bCircToHost)) { /* For now at least must be to host */
bool bWaiting = false;
- if ((pArea->aBlocks[0].dwSize >= uSize) && // Got anything?
- (pArea->aBlocks[0].dwOffset == uStart)) // Must be legal data
- {
+ if ((pArea->aBlocks[0].dwSize >= uSize) && /* Got anything? */
+ (pArea->aBlocks[0].dwOffset == uStart)) { /* Must be legal data */
pArea->aBlocks[0].dwSize -= uSize;
pArea->aBlocks[0].dwOffset += uSize;
- if (pArea->aBlocks[0].dwSize == 0) // Have we emptied this block?
- {
- if (pArea->aBlocks[1].dwSize) // Is there a second block?
- {
- pArea->aBlocks[0] = pArea->aBlocks[1]; // Copy down block 2 data
- pArea->aBlocks[1].dwSize = 0; // and mark the second block as unused
+ if (pArea->aBlocks[0].dwSize == 0) { /* Have we emptied this block? */
+ if (pArea->aBlocks[1].dwSize) { /* Is there a second block? */
+ pArea->aBlocks[0] = pArea->aBlocks[1]; /* Copy down block 2 data */
+ pArea->aBlocks[1].dwSize = 0; /* and mark the second block as unused */
pArea->aBlocks[1].dwOffset = 0;
} else
pArea->aBlocks[0].dwOffset = 0;
@@ -1468,9 +1447,8 @@ int FreeCircBlock(DEVICE_EXTENSION * pdx, TCIRCBLOCK __user * pCB)
pArea->aBlocks[0].dwOffset,
pdx->bXFerWaiting);
- // Return the next available block of memory as well
- if (pArea->aBlocks[0].dwSize > 0) // Got anything?
- {
+ /* Return the next available block of memory as well */
+ if (pArea->aBlocks[0].dwSize > 0) { /* Got anything? */
cb.dwOffset =
pArea->aBlocks[0].dwOffset;
cb.dwSize = pArea->aBlocks[0].dwSize;
@@ -1492,9 +1470,8 @@ int FreeCircBlock(DEVICE_EXTENSION * pdx, TCIRCBLOCK __user * pCB)
iReturn = U14ERR_NOMEMORY;
}
- // If we have one, kick off pending transfer
- if (bWaiting) // Got a block xfer waiting?
- {
+ /* If we have one, kick off pending transfer */
+ if (bWaiting) { /* Got a block xfer waiting? */
int RWMStat =
ReadWriteMem(pdx, !pdx->rDMAInfo.bOutWard,
pdx->rDMAInfo.wIdent,
diff --git a/drivers/staging/ced1401/ced_ioctl.h b/drivers/staging/ced1401/ced_ioctl.h
index 0895c94..aa68878 100644
--- a/drivers/staging/ced1401/ced_ioctl.h
+++ b/drivers/staging/ced1401/ced_ioctl.h
@@ -35,7 +35,7 @@ typedef struct TransferDesc {
short eSize; /* element size - is tohost flag for circular */
} TRANSFERDESC;
-typedef TRANSFERDESC * LPTRANSFERDESC;
+typedef TRANSFERDESC *LPTRANSFERDESC;
typedef struct TransferEvent {
unsigned int dwStart; /* offset into the area */
diff --git a/drivers/staging/ced1401/machine.h b/drivers/staging/ced1401/machine.h
index af07379..dbd4036d 100644
--- a/drivers/staging/ced1401/machine.h
+++ b/drivers/staging/ced1401/machine.h
@@ -77,20 +77,13 @@
#endif
#if defined(LINUX) || defined(MAXOSX)
- #define FAR
+ #define FAR
- typedef int BOOL; // To match Windows
- typedef char * LPSTR;
- typedef const char * LPCSTR;
- typedef unsigned short WORD;
- typedef unsigned int DWORD;
- typedef unsigned char BYTE;
- typedef BYTE BOOLEAN;
- typedef unsigned char UCHAR;
- #define __packed __attribute__((packed))
- typedef BYTE * LPBYTE;
- #define HIWORD(x) (WORD)(((x)>>16) & 0xffff)
- #define LOWORD(x) (WORD)((x) & 0xffff)
+ typedef int BOOL; /* To match Windows */
+ typedef unsigned char BYTE;
+ #define __packed __attribute__((packed))
+ #define HIWORD(x) (unsigned short)(((x)>>16) & 0xffff)
+ #define LOWORD(x) (unsigned short)((x) & 0xffff)
#endif
#ifdef _IS_WINDOWS_
@@ -104,21 +97,20 @@
** a synonym.
*/
#ifdef GNUC
- #define DllExport __attribute__((dllexport))
- #define DllImport __attribute__((dllimport))
+ #define DllExport __attribute__((dllexport))
+ #define DllImport __attribute__((dllimport))
#endif
#ifndef DllExport
#ifdef _IS_WINDOWS_
- #define DllExport __declspec(dllexport)
- #define DllImport __declspec(dllimport)
+ #define DllExport __declspec(dllexport)
+ #define DllImport __declspec(dllimport)
#else
- #define DllExport
- #define DllImport
+ #define DllExport
+ #define DllImport
#endif
#endif /* _IS_WINDOWS_ */
-
#ifndef TRUE
#define TRUE 1
#define FALSE 0
diff --git a/drivers/staging/ced1401/usb1401.c b/drivers/staging/ced1401/usb1401.c
index 254131d..97c55f9 100644
--- a/drivers/staging/ced1401/usb1401.c
+++ b/drivers/staging/ced1401/usb1401.c
@@ -126,18 +126,18 @@ static void ced_delete(struct kref *kref)
{
DEVICE_EXTENSION *pdx = to_DEVICE_EXTENSION(kref);
- // Free up the output buffer, then free the output urb. Note that the interface member
- // of pdx will probably be NULL, so cannot be used to get to dev.
+ /* Free up the output buffer, then free the output urb. Note that the interface member */
+ /* of pdx will probably be NULL, so cannot be used to get to dev. */
usb_free_coherent(pdx->udev, OUTBUF_SZ, pdx->pCoherCharOut,
pdx->pUrbCharOut->transfer_dma);
usb_free_urb(pdx->pUrbCharOut);
- // Do the same for chan input
+ /* Do the same for chan input */
usb_free_coherent(pdx->udev, INBUF_SZ, pdx->pCoherCharIn,
pdx->pUrbCharIn->transfer_dma);
usb_free_urb(pdx->pUrbCharIn);
- // Do the same for the block transfers
+ /* Do the same for the block transfers */
usb_free_coherent(pdx->udev, STAGED_SZ, pdx->pCoherStagedIO,
pdx->pStagedUrb->transfer_dma);
usb_free_urb(pdx->pStagedUrb);
@@ -146,7 +146,7 @@ static void ced_delete(struct kref *kref)
kfree(pdx);
}
-// This is the driver end of the open() call from user space.
+/* This is the driver end of the open() call from user space. */
static int ced_open(struct inode *inode, struct file *file)
{
DEVICE_EXTENSION *pdx;
@@ -184,7 +184,7 @@ static int ced_open(struct inode *inode, struct file *file)
kref_put(&pdx->kref, ced_delete);
goto exit;
}
- } else { //uncomment this block if you want exclusive open
+ } else { /* uncomment this block if you want exclusive open */
dev_err(&interface->dev, "%s fail: already open", __func__);
retval = -EBUSY;
pdx->open_count--;
@@ -210,11 +210,11 @@ static int ced_release(struct inode *inode, struct file *file)
dev_dbg(&pdx->interface->dev, "%s called", __func__);
mutex_lock(&pdx->io_mutex);
- if (!--pdx->open_count && pdx->interface) // Allow autosuspend
+ if (!--pdx->open_count && pdx->interface) /* Allow autosuspend */
usb_autopm_put_interface(pdx->interface);
mutex_unlock(&pdx->io_mutex);
- kref_put(&pdx->kref, ced_delete); // decrement the count on our device
+ kref_put(&pdx->kref, ced_delete); /* decrement the count on our device */
return 0;
}
@@ -252,9 +252,9 @@ static int ced_flush(struct file *file, fl_owner_t id)
** not help with a device extension held by a file.
** return true if can accept new io requests, else false
*/
-static bool CanAcceptIoRequests(DEVICE_EXTENSION * pdx)
+static bool CanAcceptIoRequests(DEVICE_EXTENSION *pdx)
{
- return pdx && pdx->interface; // Can we accept IO requests
+ return pdx && pdx->interface; /* Can we accept IO requests */
}
/****************************************************************************
@@ -264,9 +264,9 @@ static bool CanAcceptIoRequests(DEVICE_EXTENSION * pdx)
static void ced_writechar_callback(struct urb *pUrb)
{
DEVICE_EXTENSION *pdx = pUrb->context;
- int nGot = pUrb->actual_length; // what we transferred
+ int nGot = pUrb->actual_length; /* what we transferred */
- if (pUrb->status) { // sync/async unlink faults aren't errors
+ if (pUrb->status) { /* sync/async unlink faults aren't errors */
if (!
(pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
|| pUrb->status == -ESHUTDOWN)) {
@@ -278,36 +278,35 @@ static void ced_writechar_callback(struct urb *pUrb)
spin_lock(&pdx->err_lock);
pdx->errors = pUrb->status;
spin_unlock(&pdx->err_lock);
- nGot = 0; // and tidy up again if so
+ nGot = 0; /* and tidy up again if so */
- spin_lock(&pdx->charOutLock); // already at irq level
- pdx->dwOutBuffGet = 0; // Reset the output buffer
+ spin_lock(&pdx->charOutLock); /* already at irq level */
+ pdx->dwOutBuffGet = 0; /* Reset the output buffer */
pdx->dwOutBuffPut = 0;
- pdx->dwNumOutput = 0; // Clear the char count
- pdx->bPipeError[0] = 1; // Flag an error for later
- pdx->bSendCharsPending = false; // Allow other threads again
- spin_unlock(&pdx->charOutLock); // already at irq level
+ pdx->dwNumOutput = 0; /* Clear the char count */
+ pdx->bPipeError[0] = 1; /* Flag an error for later */
+ pdx->bSendCharsPending = false; /* Allow other threads again */
+ spin_unlock(&pdx->charOutLock); /* already at irq level */
dev_dbg(&pdx->interface->dev,
"%s - char out done, 0 chars sent", __func__);
} else {
dev_dbg(&pdx->interface->dev,
"%s - char out done, %d chars sent", __func__, nGot);
- spin_lock(&pdx->charOutLock); // already at irq level
- pdx->dwNumOutput -= nGot; // Now adjust the char send buffer
- pdx->dwOutBuffGet += nGot; // to match what we did
- if (pdx->dwOutBuffGet >= OUTBUF_SZ) // Can't do this any earlier as data could be overwritten
+ spin_lock(&pdx->charOutLock); /* already at irq level */
+ pdx->dwNumOutput -= nGot; /* Now adjust the char send buffer */
+ pdx->dwOutBuffGet += nGot; /* to match what we did */
+ if (pdx->dwOutBuffGet >= OUTBUF_SZ) /* Can't do this any earlier as data could be overwritten */
pdx->dwOutBuffGet = 0;
- if (pdx->dwNumOutput > 0) // if more to be done...
- {
- int nPipe = 0; // The pipe number to use
+ if (pdx->dwNumOutput > 0) { /* if more to be done... */
+ int nPipe = 0; /* The pipe number to use */
int iReturn;
char *pDat = &pdx->outputBuffer[pdx->dwOutBuffGet];
- unsigned int dwCount = pdx->dwNumOutput; // maximum to send
- if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) // does it cross buffer end?
+ unsigned int dwCount = pdx->dwNumOutput; /* maximum to send */
+ if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) /* does it cross buffer end? */
dwCount = OUTBUF_SZ - pdx->dwOutBuffGet;
- spin_unlock(&pdx->charOutLock); // we are done with stuff that changes
- memcpy(pdx->pCoherCharOut, pDat, dwCount); // copy output data to the buffer
+ spin_unlock(&pdx->charOutLock); /* we are done with stuff that changes */
+ memcpy(pdx->pCoherCharOut, pDat, dwCount); /* copy output data to the buffer */
usb_fill_bulk_urb(pdx->pUrbCharOut, pdx->udev,
usb_sndbulkpipe(pdx->udev,
pdx->epAddr[0]),
@@ -315,22 +314,22 @@ static void ced_writechar_callback(struct urb *pUrb)
ced_writechar_callback, pdx);
pdx->pUrbCharOut->transfer_flags |=
URB_NO_TRANSFER_DMA_MAP;
- usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted); // in case we need to kill it
+ usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted); /* in case we need to kill it */
iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_ATOMIC);
dev_dbg(&pdx->interface->dev, "%s n=%d>%s<", __func__,
dwCount, pDat);
- spin_lock(&pdx->charOutLock); // grab lock for errors
+ spin_lock(&pdx->charOutLock); /* grab lock for errors */
if (iReturn) {
- pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
- pdx->bSendCharsPending = false; // Allow other threads again
+ pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
+ pdx->bSendCharsPending = false; /* Allow other threads again */
usb_unanchor_urb(pdx->pUrbCharOut);
dev_err(&pdx->interface->dev,
"%s usb_submit_urb() returned %d",
__func__, iReturn);
}
} else
- pdx->bSendCharsPending = false; // Allow other threads again
- spin_unlock(&pdx->charOutLock); // already at irq level
+ pdx->bSendCharsPending = false; /* Allow other threads again */
+ spin_unlock(&pdx->charOutLock); /* already at irq level */
}
}
@@ -339,44 +338,43 @@ static void ced_writechar_callback(struct urb *pUrb)
** Transmit the characters in the output buffer to the 1401. This may need
** breaking down into multiple transfers.
****************************************************************************/
-int SendChars(DEVICE_EXTENSION * pdx)
+int SendChars(DEVICE_EXTENSION *pdx)
{
int iReturn = U14ERR_NOERROR;
- spin_lock_irq(&pdx->charOutLock); // Protect ourselves
+ spin_lock_irq(&pdx->charOutLock); /* Protect ourselves */
- if ((!pdx->bSendCharsPending) && // Not currently sending
- (pdx->dwNumOutput > 0) && // has characters to output
- (CanAcceptIoRequests(pdx))) // and current activity is OK
- {
- unsigned int dwCount = pdx->dwNumOutput; // Get a copy of the character count
- pdx->bSendCharsPending = true; // Set flag to lock out other threads
+ if ((!pdx->bSendCharsPending) && /* Not currently sending */
+ (pdx->dwNumOutput > 0) && /* has characters to output */
+ (CanAcceptIoRequests(pdx))) { /* and current activity is OK */
+ unsigned int dwCount = pdx->dwNumOutput; /* Get a copy of the character count */
+ pdx->bSendCharsPending = true; /* Set flag to lock out other threads */
dev_dbg(&pdx->interface->dev,
"Send %d chars to 1401, EP0 flag %d\n", dwCount,
pdx->nPipes == 3);
- // If we have only 3 end points we must send the characters to the 1401 using EP0.
+ /* If we have only 3 end points we must send the characters to the 1401 using EP0. */
if (pdx->nPipes == 3) {
- // For EP0 character transmissions to the 1401, we have to hang about until they
- // are gone, as otherwise without more character IO activity they will never go.
- unsigned int count = dwCount; // Local char counter
- unsigned int index = 0; // The index into the char buffer
+ /* For EP0 character transmissions to the 1401, we have to hang about until they */
+ /* are gone, as otherwise without more character IO activity they will never go. */
+ unsigned int count = dwCount; /* Local char counter */
+ unsigned int index = 0; /* The index into the char buffer */
- spin_unlock_irq(&pdx->charOutLock); // Free spinlock as we call USBD
+ spin_unlock_irq(&pdx->charOutLock); /* Free spinlock as we call USBD */
while ((count > 0) && (iReturn == U14ERR_NOERROR)) {
- // We have to break the transfer up into 64-byte chunks because of a 2270 problem
- int n = count > 64 ? 64 : count; // Chars for this xfer, max of 64
+ /* We have to break the transfer up into 64-byte chunks because of a 2270 problem */
+ int n = count > 64 ? 64 : count; /* Chars for this xfer, max of 64 */
int nSent = usb_control_msg(pdx->udev,
- usb_sndctrlpipe(pdx->udev, 0), // use end point 0
- DB_CHARS, // bRequest
- (H_TO_D | VENDOR | DEVREQ), // to the device, vendor request to the device
- 0, 0, // value and index are both 0
- &pdx->outputBuffer[index], // where to send from
- n, // how much to send
- 1000); // timeout in jiffies
+ usb_sndctrlpipe(pdx->udev, 0), /* use end point 0 */
+ DB_CHARS, /* bRequest */
+ (H_TO_D | VENDOR | DEVREQ), /* to the device, vendor request to the device */
+ 0, 0, /* value and index are both 0 */
+ &pdx->outputBuffer[index], /* where to send from */
+ n, /* how much to send */
+ 1000); /* timeout in jiffies */
if (nSent <= 0) {
- iReturn = nSent ? nSent : -ETIMEDOUT; // if 0 chars says we timed out
+ iReturn = nSent ? nSent : -ETIMEDOUT; /* if 0 chars says we timed out */
dev_err(&pdx->interface->dev,
"Send %d chars by EP0 failed: %d",
n, iReturn);
@@ -388,19 +386,19 @@ int SendChars(DEVICE_EXTENSION * pdx)
}
}
- spin_lock_irq(&pdx->charOutLock); // Protect pdx changes, released by general code
- pdx->dwOutBuffGet = 0; // so reset the output buffer
+ spin_lock_irq(&pdx->charOutLock); /* Protect pdx changes, released by general code */
+ pdx->dwOutBuffGet = 0; /* so reset the output buffer */
pdx->dwOutBuffPut = 0;
- pdx->dwNumOutput = 0; // and clear the buffer count
- pdx->bSendCharsPending = false; // Allow other threads again
- } else { // Here for sending chars normally - we hold the spin lock
- int nPipe = 0; // The pipe number to use
+ pdx->dwNumOutput = 0; /* and clear the buffer count */
+ pdx->bSendCharsPending = false; /* Allow other threads again */
+ } else { /* Here for sending chars normally - we hold the spin lock */
+ int nPipe = 0; /* The pipe number to use */
char *pDat = &pdx->outputBuffer[pdx->dwOutBuffGet];
- if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) // does it cross buffer end?
+ if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) /* does it cross buffer end? */
dwCount = OUTBUF_SZ - pdx->dwOutBuffGet;
- spin_unlock_irq(&pdx->charOutLock); // we are done with stuff that changes
- memcpy(pdx->pCoherCharOut, pDat, dwCount); // copy output data to the buffer
+ spin_unlock_irq(&pdx->charOutLock); /* we are done with stuff that changes */
+ memcpy(pdx->pCoherCharOut, pDat, dwCount); /* copy output data to the buffer */
usb_fill_bulk_urb(pdx->pUrbCharOut, pdx->udev,
usb_sndbulkpipe(pdx->udev,
pdx->epAddr[0]),
@@ -410,11 +408,11 @@ int SendChars(DEVICE_EXTENSION * pdx)
URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted);
iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_KERNEL);
- spin_lock_irq(&pdx->charOutLock); // grab lock for errors
+ spin_lock_irq(&pdx->charOutLock); /* grab lock for errors */
if (iReturn) {
- pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
- pdx->bSendCharsPending = false; // Allow other threads again
- usb_unanchor_urb(pdx->pUrbCharOut); // remove from list of active urbs
+ pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
+ pdx->bSendCharsPending = false; /* Allow other threads again */
+ usb_unanchor_urb(pdx->pUrbCharOut); /* remove from list of active urbs */
}
}
} else if (pdx->bSendCharsPending && (pdx->dwNumOutput > 0))
@@ -422,7 +420,7 @@ int SendChars(DEVICE_EXTENSION * pdx)
"SendChars bSendCharsPending:true");
dev_dbg(&pdx->interface->dev, "SendChars exit code: %d", iReturn);
- spin_unlock_irq(&pdx->charOutLock); // Now let go of the spinlock
+ spin_unlock_irq(&pdx->charOutLock); /* Now let go of the spinlock */
return iReturn;
}
@@ -440,14 +438,14 @@ int SendChars(DEVICE_EXTENSION * pdx)
** pdx Is our device extension which holds all we know about the transfer.
** n The number of bytes to move one way or the other.
***************************************************************************/
-static void CopyUserSpace(DEVICE_EXTENSION * pdx, int n)
+static void CopyUserSpace(DEVICE_EXTENSION *pdx, int n)
{
unsigned int nArea = pdx->StagedId;
if (nArea < MAX_TRANSAREAS) {
- TRANSAREA *pArea = &pdx->rTransDef[nArea]; // area to be used
+ TRANSAREA *pArea = &pdx->rTransDef[nArea]; /* area to be used */
unsigned int dwOffset =
pdx->StagedDone + pdx->StagedOffset + pArea->dwBaseOffset;
- char *pCoherBuf = pdx->pCoherStagedIO; // coherent buffer
+ char *pCoherBuf = pdx->pCoherStagedIO; /* coherent buffer */
if (!pArea->bUsed) {
dev_err(&pdx->interface->dev, "%s area %d unused",
__func__, nArea);
@@ -455,15 +453,15 @@ static void CopyUserSpace(DEVICE_EXTENSION * pdx, int n)
}
while (n) {
- int nPage = dwOffset >> PAGE_SHIFT; // page number in table
+ int nPage = dwOffset >> PAGE_SHIFT; /* page number in table */
if (nPage < pArea->nPages) {
char *pvAddress =
(char *)kmap_atomic(pArea->pPages[nPage]);
if (pvAddress) {
- unsigned int uiPageOff = dwOffset & (PAGE_SIZE - 1); // offset into the page
- size_t uiXfer = PAGE_SIZE - uiPageOff; // max to transfer on this page
- if (uiXfer > n) // limit byte count if too much
- uiXfer = n; // for the page
+ unsigned int uiPageOff = dwOffset & (PAGE_SIZE - 1); /* offset into the page */
+ size_t uiXfer = PAGE_SIZE - uiPageOff; /* max to transfer on this page */
+ if (uiXfer > n) /* limit byte count if too much */
+ uiXfer = n; /* for the page */
if (pdx->StagedRead)
memcpy(pvAddress + uiPageOff,
pCoherBuf, uiXfer);
@@ -494,8 +492,8 @@ static void CopyUserSpace(DEVICE_EXTENSION * pdx, int n)
nArea);
}
-// Forward declarations for stuff used circularly
-static int StageChunk(DEVICE_EXTENSION * pdx);
+/* Forward declarations for stuff used circularly */
+static int StageChunk(DEVICE_EXTENSION *pdx);
/***************************************************************************
** ReadWrite_Complete
**
@@ -504,14 +502,14 @@ static int StageChunk(DEVICE_EXTENSION * pdx);
static void staged_callback(struct urb *pUrb)
{
DEVICE_EXTENSION *pdx = pUrb->context;
- unsigned int nGot = pUrb->actual_length; // what we transferred
+ unsigned int nGot = pUrb->actual_length; /* what we transferred */
bool bCancel = false;
- bool bRestartCharInput; // used at the end
+ bool bRestartCharInput; /* used at the end */
- spin_lock(&pdx->stagedLock); // stop ReadWriteMem() action while this routine is running
- pdx->bStagedUrbPending = false; // clear the flag for staged IRP pending
+ spin_lock(&pdx->stagedLock); /* stop ReadWriteMem() action while this routine is running */
+ pdx->bStagedUrbPending = false; /* clear the flag for staged IRP pending */
- if (pUrb->status) { // sync/async unlink faults aren't errors
+ if (pUrb->status) { /* sync/async unlink faults aren't errors */
if (!
(pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
|| pUrb->status == -ESHUTDOWN)) {
@@ -525,40 +523,37 @@ static void staged_callback(struct urb *pUrb)
spin_lock(&pdx->err_lock);
pdx->errors = pUrb->status;
spin_unlock(&pdx->err_lock);
- nGot = 0; // and tidy up again if so
+ nGot = 0; /* and tidy up again if so */
bCancel = true;
} else {
dev_dbg(&pdx->interface->dev, "%s %d chars xferred", __func__,
nGot);
- if (pdx->StagedRead) // if reading, save to user space
- CopyUserSpace(pdx, nGot); // copy from buffer to user
+ if (pdx->StagedRead) /* if reading, save to user space */
+ CopyUserSpace(pdx, nGot); /* copy from buffer to user */
if (nGot == 0)
dev_dbg(&pdx->interface->dev, "%s ZLP", __func__);
}
- // Update the transfer length based on the TransferBufferLength value in the URB
+ /* Update the transfer length based on the TransferBufferLength value in the URB */
pdx->StagedDone += nGot;
dev_dbg(&pdx->interface->dev, "%s, done %d bytes of %d", __func__,
pdx->StagedDone, pdx->StagedLength);
- if ((pdx->StagedDone == pdx->StagedLength) || // If no more to do
- (bCancel)) // or this IRP was cancelled
- {
- TRANSAREA *pArea = &pdx->rTransDef[pdx->StagedId]; // Transfer area info
+ if ((pdx->StagedDone == pdx->StagedLength) || /* If no more to do */
+ (bCancel)) { /* or this IRP was cancelled */
+ TRANSAREA *pArea = &pdx->rTransDef[pdx->StagedId]; /* Transfer area info */
dev_dbg(&pdx->interface->dev,
"%s transfer done, bytes %d, cancel %d", __func__,
pdx->StagedDone, bCancel);
- // Here is where we sort out what to do with this transfer if using a circular buffer. We have
- // a completed transfer that can be assumed to fit into the transfer area. We should be able to
- // add this to the end of a growing block or to use it to start a new block unless the code
- // that calculates the offset to use (in ReadWriteMem) is totally duff.
- if ((pArea->bCircular) && (pArea->bCircToHost) && (!bCancel) && // Time to sort out circular buffer info?
- (pdx->StagedRead)) // Only for tohost transfers for now
- {
- if (pArea->aBlocks[1].dwSize > 0) // If block 1 is in use we must append to it
- {
+ /* Here is where we sort out what to do with this transfer if using a circular buffer. We have */
+ /* a completed transfer that can be assumed to fit into the transfer area. We should be able to */
+ /* add this to the end of a growing block or to use it to start a new block unless the code */
+ /* that calculates the offset to use (in ReadWriteMem) is totally duff. */
+ if ((pArea->bCircular) && (pArea->bCircToHost) && (!bCancel) && /* Time to sort out circular buffer info? */
+ (pdx->StagedRead)) { /* Only for tohost transfers for now */
+ if (pArea->aBlocks[1].dwSize > 0) { /* If block 1 is in use we must append to it */
if (pdx->StagedOffset ==
(pArea->aBlocks[1].dwOffset +
pArea->aBlocks[1].dwSize)) {
@@ -569,7 +564,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[1].dwSize,
pArea->aBlocks[1].dwOffset);
} else {
- // Here things have gone very, very, wrong, but I cannot see how this can actually be achieved
+ /* Here things have gone very, very, wrong, but I cannot see how this can actually be achieved */
pArea->aBlocks[1].dwOffset =
pdx->StagedOffset;
pArea->aBlocks[1].dwSize =
@@ -580,22 +575,20 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[1].dwSize,
pArea->aBlocks[1].dwOffset);
}
- } else // If block 1 is not used, we try to add to block 0
- {
- if (pArea->aBlocks[0].dwSize > 0) // Got stored block 0 information?
- { // Must append onto the existing block 0
+ } else { /* If block 1 is not used, we try to add to block 0 */
+ if (pArea->aBlocks[0].dwSize > 0) { /* Got stored block 0 information? */
+ /* Must append onto the existing block 0 */
if (pdx->StagedOffset ==
(pArea->aBlocks[0].dwOffset +
pArea->aBlocks[0].dwSize)) {
- pArea->aBlocks[0].dwSize += pdx->StagedLength; // Just add this transfer in
+ pArea->aBlocks[0].dwSize += pdx->StagedLength; /* Just add this transfer in */
dev_dbg(&pdx->interface->dev,
"RWM_Complete, circ block 0 now %d bytes at %d",
pArea->aBlocks[0].
dwSize,
pArea->aBlocks[0].
dwOffset);
- } else // If it doesn't append, put into new block 1
- {
+ } else { /* If it doesn't append, put into new block 1 */
pArea->aBlocks[1].dwOffset =
pdx->StagedOffset;
pArea->aBlocks[1].dwSize =
@@ -607,8 +600,7 @@ static void staged_callback(struct urb *pUrb)
pArea->aBlocks[1].
dwOffset);
}
- } else // No info stored yet, just save in block 0
- {
+ } else { /* No info stored yet, just save in block 0 */
pArea->aBlocks[0].dwOffset =
pdx->StagedOffset;
pArea->aBlocks[0].dwSize =
@@ -621,21 +613,19 @@ static void staged_callback(struct urb *pUrb)
}
}
- if (!bCancel) // Don't generate an event if cancelled
- {
+ if (!bCancel) { /* Don't generate an event if cancelled */
dev_dbg(&pdx->interface->dev,
"RWM_Complete, bCircular %d, bToHost %d, eStart %d, eSize %d",
pArea->bCircular, pArea->bEventToHost,
pArea->dwEventSt, pArea->dwEventSz);
- if ((pArea->dwEventSz) && // Set a user-mode event...
- (pdx->StagedRead == pArea->bEventToHost)) // ...on transfers in this direction?
- {
- int iWakeUp = 0; // assume
- // If we have completed the right sort of DMA transfer then set the event to notify
- // the user code to wake up anyone that is waiting.
- if ((pArea->bCircular) && // Circular areas use a simpler test
- (pArea->bCircToHost)) // only in supported direction
- { // Is total data waiting up to size limit?
+ if ((pArea->dwEventSz) && /* Set a user-mode event... */
+ (pdx->StagedRead == pArea->bEventToHost)) { /* ...on transfers in this direction? */
+ int iWakeUp = 0; /* assume */
+ /* If we have completed the right sort of DMA transfer then set the event to notify */
+ /* the user code to wake up anyone that is waiting. */
+ if ((pArea->bCircular) && /* Circular areas use a simpler test */
+ (pArea->bCircToHost)) { /* only in supported direction */
+ /* Is total data waiting up to size limit? */
unsigned int dwTotal =
pArea->aBlocks[0].dwSize +
pArea->aBlocks[1].dwSize;
@@ -653,19 +643,17 @@ static void staged_callback(struct urb *pUrb)
if (iWakeUp) {
dev_dbg(&pdx->interface->dev,
"About to set event to notify app");
- wake_up_interruptible(&pArea->wqEvent); // wake up waiting processes
- ++pArea->iWakeUp; // increment wakeup count
+ wake_up_interruptible(&pArea->wqEvent); /* wake up waiting processes */
+ ++pArea->iWakeUp; /* increment wakeup count */
}
}
}
- pdx->dwDMAFlag = MODE_CHAR; // Switch back to char mode before ReadWriteMem call
+ pdx->dwDMAFlag = MODE_CHAR; /* Switch back to char mode before ReadWriteMem call */
- if (!bCancel) // Don't look for waiting transfer if cancelled
- {
- // If we have a transfer waiting, kick it off
- if (pdx->bXFerWaiting) // Got a block xfer waiting?
- {
+ if (!bCancel) { /* Don't look for waiting transfer if cancelled */
+ /* If we have a transfer waiting, kick it off */
+ if (pdx->bXFerWaiting) { /* Got a block xfer waiting? */
int iReturn;
dev_info(&pdx->interface->dev,
"*** RWM_Complete *** pending transfer will now be set up!!!");
@@ -682,22 +670,22 @@ static void staged_callback(struct urb *pUrb)
}
}
- } else // Here for more to do
- StageChunk(pdx); // fire off the next bit
+ } else /* Here for more to do */
+ StageChunk(pdx); /* fire off the next bit */
- // While we hold the stagedLock, see if we should reallow character input ints
- // Don't allow if cancelled, or if a new block has started or if there is a waiting block.
- // This feels wrong as we should ask which spin lock protects dwDMAFlag.
+ /* While we hold the stagedLock, see if we should reallow character input ints */
+ /* Don't allow if cancelled, or if a new block has started or if there is a waiting block. */
+ /* This feels wrong as we should ask which spin lock protects dwDMAFlag. */
bRestartCharInput = !bCancel && (pdx->dwDMAFlag == MODE_CHAR)
&& !pdx->bXFerWaiting;
- spin_unlock(&pdx->stagedLock); // Finally release the lock again
+ spin_unlock(&pdx->stagedLock); /* Finally release the lock again */
- // This is not correct as dwDMAFlag is protected by the staged lock, but it is treated
- // in Allowi as if it were protected by the char lock. In any case, most systems will
- // not be upset by char input during DMA... sigh. Needs sorting out.
- if (bRestartCharInput) // may be out of date, but...
- Allowi(pdx); // ...Allowi tests a lock too.
+ /* This is not correct as dwDMAFlag is protected by the staged lock, but it is treated */
+ /* in Allowi as if it were protected by the char lock. In any case, most systems will */
+ /* not be upset by char input during DMA... sigh. Needs sorting out. */
+ if (bRestartCharInput) /* may be out of date, but... */
+ Allowi(pdx); /* ...Allowi tests a lock too. */
dev_dbg(&pdx->interface->dev, "%s done", __func__);
}
@@ -709,29 +697,28 @@ static void staged_callback(struct urb *pUrb)
** The calling code must have acquired the staging spinlock before calling
** this function, and is responsible for releasing it. We are at callback level.
****************************************************************************/
-static int StageChunk(DEVICE_EXTENSION * pdx)
+static int StageChunk(DEVICE_EXTENSION *pdx)
{
int iReturn = U14ERR_NOERROR;
unsigned int ChunkSize;
- int nPipe = pdx->StagedRead ? 3 : 2; // The pipe number to use for reads or writes
+ int nPipe = pdx->StagedRead ? 3 : 2; /* The pipe number to use for reads or writes */
if (pdx->nPipes == 3)
- nPipe--; // Adjust for the 3-pipe case
- if (nPipe < 0) // and trap case that should never happen
+ nPipe--; /* Adjust for the 3-pipe case */
+ if (nPipe < 0) /* and trap case that should never happen */
return U14ERR_FAIL;
- if (!CanAcceptIoRequests(pdx)) // got sudden remove?
- {
+ if (!CanAcceptIoRequests(pdx)) { /* got sudden remove? */
dev_info(&pdx->interface->dev, "%s sudden remove, giving up",
__func__);
- return U14ERR_FAIL; // could do with a better error
+ return U14ERR_FAIL; /* could do with a better error */
}
- ChunkSize = (pdx->StagedLength - pdx->StagedDone); // transfer length remaining
- if (ChunkSize > STAGED_SZ) // make sure to keep legal
- ChunkSize = STAGED_SZ; // limit to max allowed
+ ChunkSize = (pdx->StagedLength - pdx->StagedDone); /* transfer length remaining */
+ if (ChunkSize > STAGED_SZ) /* make sure to keep legal */
+ ChunkSize = STAGED_SZ; /* limit to max allowed */
- if (!pdx->StagedRead) // if writing...
- CopyUserSpace(pdx, ChunkSize); // ...copy data into the buffer
+ if (!pdx->StagedRead) /* if writing... */
+ CopyUserSpace(pdx, ChunkSize); /* ...copy data into the buffer */
usb_fill_bulk_urb(pdx->pStagedUrb, pdx->udev,
pdx->StagedRead ? usb_rcvbulkpipe(pdx->udev,
@@ -740,15 +727,15 @@ static int StageChunk(DEVICE_EXTENSION * pdx)
usb_sndbulkpipe(pdx->udev, pdx->epAddr[nPipe]),
pdx->pCoherStagedIO, ChunkSize, staged_callback, pdx);
pdx->pStagedUrb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
- usb_anchor_urb(pdx->pStagedUrb, &pdx->submitted); // in case we need to kill it
+ usb_anchor_urb(pdx->pStagedUrb, &pdx->submitted); /* in case we need to kill it */
iReturn = usb_submit_urb(pdx->pStagedUrb, GFP_ATOMIC);
if (iReturn) {
- usb_unanchor_urb(pdx->pStagedUrb); // kill it
- pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
+ usb_unanchor_urb(pdx->pStagedUrb); /* kill it */
+ pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
dev_err(&pdx->interface->dev, "%s submit urb failed, code %d",
__func__, iReturn);
} else
- pdx->bStagedUrbPending = true; // Set the flag for staged URB pending
+ pdx->bStagedUrbPending = true; /* Set the flag for staged URB pending */
dev_dbg(&pdx->interface->dev, "%s done so far:%d, this size:%d",
__func__, pdx->StagedDone, ChunkSize);
@@ -772,13 +759,12 @@ static int StageChunk(DEVICE_EXTENSION * pdx)
** transfer.
** dwLen - the number of bytes to transfer.
*/
-int ReadWriteMem(DEVICE_EXTENSION * pdx, bool Read, unsigned short wIdent,
+int ReadWriteMem(DEVICE_EXTENSION *pdx, bool Read, unsigned short wIdent,
unsigned int dwOffs, unsigned int dwLen)
{
- TRANSAREA *pArea = &pdx->rTransDef[wIdent]; // Transfer area info
+ TRANSAREA *pArea = &pdx->rTransDef[wIdent]; /* Transfer area info */
- if (!CanAcceptIoRequests(pdx)) // Are we in a state to accept new requests?
- {
+ if (!CanAcceptIoRequests(pdx)) { /* Are we in a state to accept new requests? */
dev_err(&pdx->interface->dev, "%s can't accept requests",
__func__);
return U14ERR_FAIL;
@@ -788,56 +774,51 @@ int ReadWriteMem(DEVICE_EXTENSION * pdx, bool Read, unsigned short wIdent,
"%s xfer %d bytes to %s, offset %d, area %d", __func__, dwLen,
Read ? "host" : "1401", dwOffs, wIdent);
- // Amazingly, we can get an escape sequence back before the current staged Urb is done, so we
- // have to check for this situation and, if so, wait until all is OK.
+ /* Amazingly, we can get an escape sequence back before the current staged Urb is done, so we */
+ /* have to check for this situation and, if so, wait until all is OK. */
if (pdx->bStagedUrbPending) {
- pdx->bXFerWaiting = true; // Flag we are waiting
+ pdx->bXFerWaiting = true; /* Flag we are waiting */
dev_info(&pdx->interface->dev,
"%s xfer is waiting, as previous staged pending",
__func__);
return U14ERR_NOERROR;
}
- if (dwLen == 0) // allow 0-len read or write; just return success
- {
+ if (dwLen == 0) { /* allow 0-len read or write; just return success */
dev_dbg(&pdx->interface->dev,
"%s OK; zero-len read/write request", __func__);
return U14ERR_NOERROR;
}
- if ((pArea->bCircular) && // Circular transfer?
- (pArea->bCircToHost) && (Read)) // In a supported direction
- { // If so, we sort out offset ourself
- bool bWait = false; // Flag for transfer having to wait
+ if ((pArea->bCircular) && /* Circular transfer? */
+ (pArea->bCircToHost) && (Read)) { /* In a supported direction */
+ /* If so, we sort out offset ourself */
+ bool bWait = false; /* Flag for transfer having to wait */
dev_dbg(&pdx->interface->dev,
"Circular buffers are %d at %d and %d at %d",
pArea->aBlocks[0].dwSize, pArea->aBlocks[0].dwOffset,
pArea->aBlocks[1].dwSize, pArea->aBlocks[1].dwOffset);
- if (pArea->aBlocks[1].dwSize > 0) // Using the second block already?
- {
- dwOffs = pArea->aBlocks[1].dwOffset + pArea->aBlocks[1].dwSize; // take offset from that
- bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; // Wait if will overwrite block 0?
- bWait |= (dwOffs + dwLen) > pArea->dwLength; // or if it overflows the buffer
- } else // Area 1 not in use, try to use area 0
- {
- if (pArea->aBlocks[0].dwSize == 0) // Reset block 0 if not in use
+ if (pArea->aBlocks[1].dwSize > 0) { /* Using the second block already? */
+ dwOffs = pArea->aBlocks[1].dwOffset + pArea->aBlocks[1].dwSize; /* take offset from that */
+ bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; /* Wait if will overwrite block 0? */
+ bWait |= (dwOffs + dwLen) > pArea->dwLength; /* or if it overflows the buffer */
+ } else { /* Area 1 not in use, try to use area 0 */
+ if (pArea->aBlocks[0].dwSize == 0) /* Reset block 0 if not in use */
pArea->aBlocks[0].dwOffset = 0;
dwOffs =
pArea->aBlocks[0].dwOffset +
pArea->aBlocks[0].dwSize;
- if ((dwOffs + dwLen) > pArea->dwLength) // Off the end of the buffer?
- {
- pArea->aBlocks[1].dwOffset = 0; // Set up to use second block
+ if ((dwOffs + dwLen) > pArea->dwLength) { /* Off the end of the buffer? */
+ pArea->aBlocks[1].dwOffset = 0; /* Set up to use second block */
dwOffs = 0;
- bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; // Wait if will overwrite block 0?
- bWait |= (dwOffs + dwLen) > pArea->dwLength; // or if it overflows the buffer
+ bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; /* Wait if will overwrite block 0? */
+ bWait |= (dwOffs + dwLen) > pArea->dwLength; /* or if it overflows the buffer */
}
}
- if (bWait) // This transfer will have to wait?
- {
- pdx->bXFerWaiting = true; // Flag we are waiting
+ if (bWait) { /* This transfer will have to wait? */
+ pdx->bXFerWaiting = true; /* Flag we are waiting */
dev_dbg(&pdx->interface->dev,
"%s xfer waiting for circular buffer space",
__func__);
@@ -848,17 +829,17 @@ int ReadWriteMem(DEVICE_EXTENSION * pdx, bool Read, unsigned short wIdent,
"%s circular xfer, %d bytes starting at %d", __func__,
dwLen, dwOffs);
}
- // Save the parameters for the read\write transfer
- pdx->StagedRead = Read; // Save the parameters for this read
- pdx->StagedId = wIdent; // ID allows us to get transfer area info
- pdx->StagedOffset = dwOffs; // The area within the transfer area
+ /* Save the parameters for the read\write transfer */
+ pdx->StagedRead = Read; /* Save the parameters for this read */
+ pdx->StagedId = wIdent; /* ID allows us to get transfer area info */
+ pdx->StagedOffset = dwOffs; /* The area within the transfer area */
pdx->StagedLength = dwLen;
- pdx->StagedDone = 0; // Initialise the byte count
- pdx->dwDMAFlag = MODE_LINEAR; // Set DMA mode flag at this point
- pdx->bXFerWaiting = false; // Clearly not a transfer waiting now
+ pdx->StagedDone = 0; /* Initialise the byte count */
+ pdx->dwDMAFlag = MODE_LINEAR; /* Set DMA mode flag at this point */
+ pdx->bXFerWaiting = false; /* Clearly not a transfer waiting now */
-// KeClearEvent(&pdx->StagingDoneEvent); // Clear the transfer done event
- StageChunk(pdx); // fire off the first chunk
+/* KeClearEvent(&pdx->StagingDoneEvent); // Clear the transfer done event */
+ StageChunk(pdx); /* fire off the first chunk */
return U14ERR_NOERROR;
}
@@ -877,12 +858,11 @@ static bool ReadChar(unsigned char *pChar, char *pBuf, unsigned int *pdDone,
bool bRead = false;
unsigned int dDone = *pdDone;
- if (dDone < dGot) // If there is more data
- {
- *pChar = (unsigned char)pBuf[dDone]; // Extract the next char
- dDone++; // Increment the done count
+ if (dDone < dGot) { /* If there is more data */
+ *pChar = (unsigned char)pBuf[dDone]; /* Extract the next char */
+ dDone++; /* Increment the done count */
*pdDone = dDone;
- bRead = true; // and flag success
+ bRead = true; /* and flag success */
}
return bRead;
@@ -962,32 +942,32 @@ static bool ReadHuff(volatile unsigned int *pDWord, char *pBuf,
** we start handling the data at offset zero.
**
*****************************************************************************/
-static bool ReadDMAInfo(volatile DMADESC * pDmaDesc, DEVICE_EXTENSION * pdx,
+static bool ReadDMAInfo(volatile DMADESC *pDmaDesc, DEVICE_EXTENSION *pdx,
char *pBuf, unsigned int dwCount)
{
- bool bResult = false; // assume we won't succeed
+ bool bResult = false; /* assume we won't succeed */
unsigned char ucData;
- unsigned int dDone = 0; // We haven't parsed anything so far
+ unsigned int dDone = 0; /* We haven't parsed anything so far */
dev_dbg(&pdx->interface->dev, "%s", __func__);
if (ReadChar(&ucData, pBuf, &dDone, dwCount)) {
- unsigned char ucTransCode = (ucData & 0x0F); // get code for transfer type
- unsigned short wIdent = ((ucData >> 4) & 0x07); // and area identifier
+ unsigned char ucTransCode = (ucData & 0x0F); /* get code for transfer type */
+ unsigned short wIdent = ((ucData >> 4) & 0x07); /* and area identifier */
- // fill in the structure we were given
- pDmaDesc->wTransType = ucTransCode; // type of transfer
- pDmaDesc->wIdent = wIdent; // area to use
- pDmaDesc->dwSize = 0; // initialise other bits
+ /* fill in the structure we were given */
+ pDmaDesc->wTransType = ucTransCode; /* type of transfer */
+ pDmaDesc->wIdent = wIdent; /* area to use */
+ pDmaDesc->dwSize = 0; /* initialise other bits */
pDmaDesc->dwOffset = 0;
dev_dbg(&pdx->interface->dev, "%s type: %d ident: %d", __func__,
pDmaDesc->wTransType, pDmaDesc->wIdent);
- pDmaDesc->bOutWard = (ucTransCode != TM_EXTTOHOST); // set transfer direction
+ pDmaDesc->bOutWard = (ucTransCode != TM_EXTTOHOST); /* set transfer direction */
switch (ucTransCode) {
- case TM_EXTTOHOST: // Extended linear transfer modes (the only ones!)
+ case TM_EXTTOHOST: /* Extended linear transfer modes (the only ones!) */
case TM_EXTTO1401:
{
bResult =
@@ -1001,14 +981,14 @@ static bool ReadDMAInfo(volatile DMADESC * pDmaDesc, DEVICE_EXTENSION * pdx,
__func__, pDmaDesc->dwOffset,
pDmaDesc->dwSize);
- if ((wIdent >= MAX_TRANSAREAS) || // Illegal area number, or...
- (!pdx->rTransDef[wIdent].bUsed) || // area not set up, or...
- (pDmaDesc->dwOffset > pdx->rTransDef[wIdent].dwLength) || // range/size
+ if ((wIdent >= MAX_TRANSAREAS) || /* Illegal area number, or... */
+ (!pdx->rTransDef[wIdent].bUsed) || /* area not set up, or... */
+ (pDmaDesc->dwOffset > pdx->rTransDef[wIdent].dwLength) || /* range/size */
((pDmaDesc->dwOffset +
pDmaDesc->dwSize) >
(pdx->rTransDef[wIdent].
dwLength))) {
- bResult = false; // bad parameter(s)
+ bResult = false; /* bad parameter(s) */
dev_dbg(&pdx->interface->dev,
"%s bad param - id %d, bUsed %d, offset %d, size %d, area length %d",
__func__, wIdent,
@@ -1028,7 +1008,7 @@ static bool ReadDMAInfo(volatile DMADESC * pDmaDesc, DEVICE_EXTENSION * pdx,
} else
bResult = false;
- if (!bResult) // now check parameters for validity
+ if (!bResult) /* now check parameters for validity */
dev_err(&pdx->interface->dev, "%s error reading Esc sequence",
__func__);
@@ -1049,30 +1029,29 @@ static bool ReadDMAInfo(volatile DMADESC * pDmaDesc, DEVICE_EXTENSION * pdx,
** this is known to be at least 2 or we will not be called.
**
****************************************************************************/
-static int Handle1401Esc(DEVICE_EXTENSION * pdx, char *pCh,
+static int Handle1401Esc(DEVICE_EXTENSION *pdx, char *pCh,
unsigned int dwCount)
{
int iReturn = U14ERR_FAIL;
- // I have no idea what this next test is about. '?' is 0x3f, which is area 3, code
- // 15. At the moment, this is not used, so it does no harm, but unless someone can
- // tell me what this is for, it should be removed from this and the Windows driver.
- if (pCh[0] == '?') // Is this an information response
- { // Parse and save the information
+ /* I have no idea what this next test is about. '?' is 0x3f, which is area 3, code */
+ /* 15. At the moment, this is not used, so it does no harm, but unless someone can */
+ /* tell me what this is for, it should be removed from this and the Windows driver. */
+ if (pCh[0] == '?') { /* Is this an information response */
+ /* Parse and save the information */
} else {
- spin_lock(&pdx->stagedLock); // Lock others out
+ spin_lock(&pdx->stagedLock); /* Lock others out */
- if (ReadDMAInfo(&pdx->rDMAInfo, pdx, pCh, dwCount)) // Get DMA parameters
- {
- unsigned short wTransType = pdx->rDMAInfo.wTransType; // check transfer type
+ if (ReadDMAInfo(&pdx->rDMAInfo, pdx, pCh, dwCount)) { /* Get DMA parameters */
+ unsigned short wTransType = pdx->rDMAInfo.wTransType; /* check transfer type */
dev_dbg(&pdx->interface->dev,
"%s xfer to %s, offset %d, length %d", __func__,
pdx->rDMAInfo.bOutWard ? "1401" : "host",
pdx->rDMAInfo.dwOffset, pdx->rDMAInfo.dwSize);
- if (pdx->bXFerWaiting) // Check here for badly out of kilter...
- { // This can never happen, really
+ if (pdx->bXFerWaiting) { /* Check here for badly out of kilter... */
+ /* This can never happen, really */
dev_err(&pdx->interface->dev,
"ERROR: DMA setup while transfer still waiting");
spin_unlock(&pdx->stagedLock);
@@ -1090,16 +1069,16 @@ static int Handle1401Esc(DEVICE_EXTENSION * pdx, char *pCh,
dev_err(&pdx->interface->dev,
"%s ReadWriteMem() failed %d",
__func__, iReturn);
- } else // This covers non-linear transfer setup
+ } else /* This covers non-linear transfer setup */
dev_err(&pdx->interface->dev,
"%s Unknown block xfer type %d",
__func__, wTransType);
}
- } else // Failed to read parameters
+ } else /* Failed to read parameters */
dev_err(&pdx->interface->dev, "%s ReadDMAInfo() fail",
__func__);
- spin_unlock(&pdx->stagedLock); // OK here
+ spin_unlock(&pdx->stagedLock); /* OK here */
}
dev_dbg(&pdx->interface->dev, "%s returns %d", __func__, iReturn);
@@ -1113,12 +1092,11 @@ static int Handle1401Esc(DEVICE_EXTENSION * pdx, char *pCh,
static void ced_readchar_callback(struct urb *pUrb)
{
DEVICE_EXTENSION *pdx = pUrb->context;
- int nGot = pUrb->actual_length; // what we transferred
+ int nGot = pUrb->actual_length; /* what we transferred */
- if (pUrb->status) // Do we have a problem to handle?
- {
- int nPipe = pdx->nPipes == 4 ? 1 : 0; // The pipe number to use for error
- // sync/async unlink faults aren't errors... just saying device removed or stopped
+ if (pUrb->status) { /* Do we have a problem to handle? */
+ int nPipe = pdx->nPipes == 4 ? 1 : 0; /* The pipe number to use for error */
+ /* sync/async unlink faults aren't errors... just saying device removed or stopped */
if (!
(pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
|| pUrb->status == -ESHUTDOWN)) {
@@ -1133,27 +1111,26 @@ static void ced_readchar_callback(struct urb *pUrb)
spin_lock(&pdx->err_lock);
pdx->errors = pUrb->status;
spin_unlock(&pdx->err_lock);
- nGot = 0; // and tidy up again if so
+ nGot = 0; /* and tidy up again if so */
- spin_lock(&pdx->charInLock); // already at irq level
- pdx->bPipeError[nPipe] = 1; // Flag an error for later
+ spin_lock(&pdx->charInLock); /* already at irq level */
+ pdx->bPipeError[nPipe] = 1; /* Flag an error for later */
} else {
- if ((nGot > 1) && ((pdx->pCoherCharIn[0] & 0x7f) == 0x1b)) // Esc sequence?
- {
- Handle1401Esc(pdx, &pdx->pCoherCharIn[1], nGot - 1); // handle it
- spin_lock(&pdx->charInLock); // already at irq level
+ if ((nGot > 1) && ((pdx->pCoherCharIn[0] & 0x7f) == 0x1b)) { /* Esc sequence? */
+ Handle1401Esc(pdx, &pdx->pCoherCharIn[1], nGot - 1); /* handle it */
+ spin_lock(&pdx->charInLock); /* already at irq level */
} else {
- spin_lock(&pdx->charInLock); // already at irq level
+ spin_lock(&pdx->charInLock); /* already at irq level */
if (nGot > 0) {
unsigned int i;
if (nGot < INBUF_SZ) {
- pdx->pCoherCharIn[nGot] = 0; // tidy the string
+ pdx->pCoherCharIn[nGot] = 0; /* tidy the string */
dev_dbg(&pdx->interface->dev,
"%s got %d chars >%s<",
__func__, nGot,
pdx->pCoherCharIn);
}
- // We know that whatever we read must fit in the input buffer
+ /* We know that whatever we read must fit in the input buffer */
for (i = 0; i < nGot; i++) {
pdx->inputBuffer[pdx->dwInBuffPut++] =
pdx->pCoherCharIn[i] & 0x7F;
@@ -1162,17 +1139,17 @@ static void ced_readchar_callback(struct urb *pUrb)
}
if ((pdx->dwNumInput + nGot) <= INBUF_SZ)
- pdx->dwNumInput += nGot; // Adjust the buffer count accordingly
+ pdx->dwNumInput += nGot; /* Adjust the buffer count accordingly */
} else
dev_dbg(&pdx->interface->dev, "%s read ZLP",
__func__);
}
}
- pdx->bReadCharsPending = false; // No longer have a pending read
- spin_unlock(&pdx->charInLock); // already at irq level
+ pdx->bReadCharsPending = false; /* No longer have a pending read */
+ spin_unlock(&pdx->charInLock); /* already at irq level */
- Allowi(pdx); // see if we can do the next one
+ Allowi(pdx); /* see if we can do the next one */
}
/****************************************************************************
@@ -1182,25 +1159,25 @@ static void ced_readchar_callback(struct urb *pUrb)
** we can pick up any inward transfers. This can be called in multiple contexts
** so we use the irqsave version of the spinlock.
****************************************************************************/
-int Allowi(DEVICE_EXTENSION * pdx)
+int Allowi(DEVICE_EXTENSION *pdx)
{
int iReturn = U14ERR_NOERROR;
unsigned long flags;
- spin_lock_irqsave(&pdx->charInLock, flags); // can be called in multiple contexts
-
- // We don't want char input running while DMA is in progress as we know that this
- // can cause sequencing problems for the 2270. So don't. It will also allow the
- // ERR response to get back to the host code too early on some PCs, even if there
- // is no actual driver failure, so we don't allow this at all.
- if (!pdx->bInDrawDown && // stop input if
- !pdx->bReadCharsPending && // If no read request outstanding
- (pdx->dwNumInput < (INBUF_SZ / 2)) && // and there is some space
- (pdx->dwDMAFlag == MODE_CHAR) && // not doing any DMA
- (!pdx->bXFerWaiting) && // no xfer waiting to start
- (CanAcceptIoRequests(pdx))) // and activity is generally OK
- { // then off we go
- unsigned int nMax = INBUF_SZ - pdx->dwNumInput; // max we could read
- int nPipe = pdx->nPipes == 4 ? 1 : 0; // The pipe number to use
+ spin_lock_irqsave(&pdx->charInLock, flags); /* can be called in multiple contexts */
+
+ /* We don't want char input running while DMA is in progress as we know that this */
+ /* can cause sequencing problems for the 2270. So don't. It will also allow the */
+ /* ERR response to get back to the host code too early on some PCs, even if there */
+ /* is no actual driver failure, so we don't allow this at all. */
+ if (!pdx->bInDrawDown && /* stop input if */
+ !pdx->bReadCharsPending && /* If no read request outstanding */
+ (pdx->dwNumInput < (INBUF_SZ / 2)) && /* and there is some space */
+ (pdx->dwDMAFlag == MODE_CHAR) && /* not doing any DMA */
+ (!pdx->bXFerWaiting) && /* no xfer waiting to start */
+ (CanAcceptIoRequests(pdx))) { /* and activity is generally OK */
+ /* then off we go */
+ unsigned int nMax = INBUF_SZ - pdx->dwNumInput; /* max we could read */
+ int nPipe = pdx->nPipes == 4 ? 1 : 0; /* The pipe number to use */
dev_dbg(&pdx->interface->dev, "%s %d chars in input buffer",
__func__, pdx->dwNumInput);
@@ -1209,16 +1186,16 @@ int Allowi(DEVICE_EXTENSION * pdx)
usb_rcvintpipe(pdx->udev, pdx->epAddr[nPipe]),
pdx->pCoherCharIn, nMax, ced_readchar_callback,
pdx, pdx->bInterval);
- pdx->pUrbCharIn->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; // short xfers are OK by default
- usb_anchor_urb(pdx->pUrbCharIn, &pdx->submitted); // in case we need to kill it
+ pdx->pUrbCharIn->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* short xfers are OK by default */
+ usb_anchor_urb(pdx->pUrbCharIn, &pdx->submitted); /* in case we need to kill it */
iReturn = usb_submit_urb(pdx->pUrbCharIn, GFP_ATOMIC);
if (iReturn) {
- usb_unanchor_urb(pdx->pUrbCharIn); // remove from list of active Urbs
- pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
+ usb_unanchor_urb(pdx->pUrbCharIn); /* remove from list of active Urbs */
+ pdx->bPipeError[nPipe] = 1; /* Flag an error to be handled later */
dev_err(&pdx->interface->dev,
"%s submit urb failed: %d", __func__, iReturn);
} else
- pdx->bReadCharsPending = true; // Flag that we are active here
+ pdx->bReadCharsPending = true; /* Flag that we are active here */
}
spin_unlock_irqrestore(&pdx->charInLock, flags);
@@ -1238,15 +1215,15 @@ static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
{
int err = 0;
DEVICE_EXTENSION *pdx = file->private_data;
- if (!CanAcceptIoRequests(pdx)) // check we still exist
+ if (!CanAcceptIoRequests(pdx)) /* check we still exist */
return -ENODEV;
- // Check that access is allowed, where is is needed. Anything that would have an indeterminate
- // size will be checked by the specific command.
- if (_IOC_DIR(cmd) & _IOC_READ) // read from point of view of user...
- err = !access_ok(VERIFY_WRITE, (void __user *)ulArg, _IOC_SIZE(cmd)); // is kernel write
- else if (_IOC_DIR(cmd) & _IOC_WRITE) // and write from point of view of user...
- err = !access_ok(VERIFY_READ, (void __user *)ulArg, _IOC_SIZE(cmd)); // is kernel read
+ /* Check that access is allowed, where is is needed. Anything that would have an indeterminate */
+ /* size will be checked by the specific command. */
+ if (_IOC_DIR(cmd) & _IOC_READ) /* read from point of view of user... */
+ err = !access_ok(VERIFY_WRITE, (void __user *)ulArg, _IOC_SIZE(cmd)); /* is kernel write */
+ else if (_IOC_DIR(cmd) & _IOC_WRITE) /* and write from point of view of user... */
+ err = !access_ok(VERIFY_READ, (void __user *)ulArg, _IOC_SIZE(cmd)); /* is kernel read */
if (err)
return -EFAULT;
@@ -1289,7 +1266,7 @@ static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
return -1;
case _IOC_NR(IOCTL_CED_GETDRIVERREVISION):
- return (2 << 24) | (DRIVERMAJREV << 16) | DRIVERMINREV; // USB | MAJOR | MINOR
+ return (2 << 24) | (DRIVERMAJREV << 16) | DRIVERMINREV; /* USB | MAJOR | MINOR */
case _IOC_NR(IOCTL_CED_GETTRANSFER):
return GetTransfer(pdx, (TGET_TX_BLOCK __user *) ulArg);
@@ -1335,7 +1312,7 @@ static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
return DbgStopLoop(pdx);
case _IOC_NR(IOCTL_CED_FULLRESET):
- pdx->bForceReset = true; // Set a flag for a full reset
+ pdx->bForceReset = true; /* Set a flag for a full reset */
break;
case _IOC_NR(IOCTL_CED_SETCIRCULAR):
@@ -1378,8 +1355,8 @@ static struct usb_class_driver ced_class = {
.minor_base = USB_CED_MINOR_BASE,
};
-// Check that the device that matches a 1401 vendor and product ID is OK to use and
-// initialise our DEVICE_EXTENSION.
+/* Check that the device that matches a 1401 vendor and product ID is OK to use and */
+/* initialise our DEVICE_EXTENSION. */
static int ced_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -1389,23 +1366,22 @@ static int ced_probe(struct usb_interface *interface,
int i, bcdDevice;
int retval = -ENOMEM;
- // allocate memory for our device extension and initialize it
+ /* allocate memory for our device extension and initialize it */
pdx = kzalloc(sizeof(*pdx), GFP_KERNEL);
if (!pdx)
goto error;
- for (i = 0; i < MAX_TRANSAREAS; ++i) // Initialise the wait queues
- {
+ for (i = 0; i < MAX_TRANSAREAS; ++i) { /* Initialise the wait queues */
init_waitqueue_head(&pdx->rTransDef[i].wqEvent);
}
- // Put initialises for our stuff here. Note that all of *pdx is zero, so
- // no need to explicitly zero it.
+ /* Put initialises for our stuff here. Note that all of *pdx is zero, so */
+ /* no need to explicitly zero it. */
spin_lock_init(&pdx->charOutLock);
spin_lock_init(&pdx->charInLock);
spin_lock_init(&pdx->stagedLock);
- // Initialises from the skeleton stuff
+ /* Initialises from the skeleton stuff */
kref_init(&pdx->kref);
mutex_init(&pdx->io_mutex);
spin_lock_init(&pdx->err_lock);
@@ -1414,7 +1390,7 @@ static int ced_probe(struct usb_interface *interface,
pdx->udev = usb_get_dev(interface_to_usbdev(interface));
pdx->interface = interface;
- // Attempt to identify the device
+ /* Attempt to identify the device */
bcdDevice = pdx->udev->descriptor.bcdDevice;
i = (bcdDevice >> 8);
if (i == 0)
@@ -1426,8 +1402,8 @@ static int ced_probe(struct usb_interface *interface,
__func__, bcdDevice);
goto error;
}
- // set up the endpoint information. We only care about the number of EP as
- // we know that we are dealing with a 1401 device.
+ /* set up the endpoint information. We only care about the number of EP as */
+ /* we know that we are dealing with a 1401 device. */
iface_desc = interface->cur_altsetting;
pdx->nPipes = iface_desc->desc.bNumEndpoints;
dev_info(&interface->dev, "1401Type=%d with %d End Points",
@@ -1435,10 +1411,10 @@ static int ced_probe(struct usb_interface *interface,
if ((pdx->nPipes < 3) || (pdx->nPipes > 4))
goto error;
- // Allocate the URBs we hold for performing transfers
- pdx->pUrbCharOut = usb_alloc_urb(0, GFP_KERNEL); // character output URB
- pdx->pUrbCharIn = usb_alloc_urb(0, GFP_KERNEL); // character input URB
- pdx->pStagedUrb = usb_alloc_urb(0, GFP_KERNEL); // block transfer URB
+ /* Allocate the URBs we hold for performing transfers */
+ pdx->pUrbCharOut = usb_alloc_urb(0, GFP_KERNEL); /* character output URB */
+ pdx->pUrbCharIn = usb_alloc_urb(0, GFP_KERNEL); /* character input URB */
+ pdx->pStagedUrb = usb_alloc_urb(0, GFP_KERNEL); /* block transfer URB */
if (!pdx->pUrbCharOut || !pdx->pUrbCharIn || !pdx->pStagedUrb) {
dev_err(&interface->dev, "%s URB alloc failed", __func__);
goto error;
@@ -1464,15 +1440,14 @@ static int ced_probe(struct usb_interface *interface,
pdx->epAddr[i] = endpoint->bEndpointAddress;
dev_info(&interface->dev, "Pipe %d, ep address %02x", i,
pdx->epAddr[i]);
- if (((pdx->nPipes == 3) && (i == 0)) || // if char input end point
+ if (((pdx->nPipes == 3) && (i == 0)) || /* if char input end point */
((pdx->nPipes == 4) && (i == 1))) {
- pdx->bInterval = endpoint->bInterval; // save the endpoint interrupt interval
+ pdx->bInterval = endpoint->bInterval; /* save the endpoint interrupt interval */
dev_info(&interface->dev, "Pipe %d, bInterval = %d", i,
pdx->bInterval);
}
- // Detect USB2 by checking last ep size (64 if USB1)
- if (i == pdx->nPipes - 1) // if this is the last ep (bulk)
- {
+ /* Detect USB2 by checking last ep size (64 if USB1) */
+ if (i == pdx->nPipes - 1) { /* if this is the last ep (bulk) */
pdx->bIsUSB2 =
le16_to_cpu(endpoint->wMaxPacketSize) > 64;
dev_info(&pdx->interface->dev, "USB%d",
@@ -1501,7 +1476,7 @@ static int ced_probe(struct usb_interface *interface,
error:
if (pdx)
- kref_put(&pdx->kref, ced_delete); // frees allocated memory
+ kref_put(&pdx->kref, ced_delete); /* frees allocated memory */
return retval;
}
@@ -1511,39 +1486,39 @@ static void ced_disconnect(struct usb_interface *interface)
int minor = interface->minor;
int i;
- usb_set_intfdata(interface, NULL); // remove the pdx from the interface
- usb_deregister_dev(interface, &ced_class); // give back our minor device number
+ usb_set_intfdata(interface, NULL); /* remove the pdx from the interface */
+ usb_deregister_dev(interface, &ced_class); /* give back our minor device number */
- mutex_lock(&pdx->io_mutex); // stop more I/O starting while...
- ced_draw_down(pdx); // ...wait for then kill any io
+ mutex_lock(&pdx->io_mutex); /* stop more I/O starting while... */
+ ced_draw_down(pdx); /* ...wait for then kill any io */
for (i = 0; i < MAX_TRANSAREAS; ++i) {
- int iErr = ClearArea(pdx, i); // ...release any used memory
+ int iErr = ClearArea(pdx, i); /* ...release any used memory */
if (iErr == U14ERR_UNLOCKFAIL)
dev_err(&pdx->interface->dev, "%s Area %d was in used",
__func__, i);
}
- pdx->interface = NULL; // ...we kill off link to interface
+ pdx->interface = NULL; /* ...we kill off link to interface */
mutex_unlock(&pdx->io_mutex);
usb_kill_anchored_urbs(&pdx->submitted);
- kref_put(&pdx->kref, ced_delete); // decrement our usage count
+ kref_put(&pdx->kref, ced_delete); /* decrement our usage count */
dev_info(&interface->dev, "USB cedusb #%d now disconnected", minor);
}
-// Wait for all the urbs we know of to be done with, then kill off any that
-// are left. NBNB we will need to have a mechanism to stop circular xfers
-// from trying to fire off more urbs. We will wait up to 3 seconds for Urbs
-// to be done.
-void ced_draw_down(DEVICE_EXTENSION * pdx)
+/* Wait for all the urbs we know of to be done with, then kill off any that */
+/* are left. NBNB we will need to have a mechanism to stop circular xfers */
+/* from trying to fire off more urbs. We will wait up to 3 seconds for Urbs */
+/* to be done. */
+void ced_draw_down(DEVICE_EXTENSION *pdx)
{
int time;
dev_dbg(&pdx->interface->dev, "%s called", __func__);
pdx->bInDrawDown = true;
time = usb_wait_anchor_empty_timeout(&pdx->submitted, 3000);
- if (!time) { // if we timed out we kill the urbs
+ if (!time) { /* if we timed out we kill the urbs */
usb_kill_anchored_urbs(&pdx->submitted);
dev_err(&pdx->interface->dev, "%s timed out", __func__);
}
diff --git a/drivers/staging/ced1401/usb1401.h b/drivers/staging/ced1401/usb1401.h
index 8fc6958..f031e3a 100644
--- a/drivers/staging/ced1401/usb1401.h
+++ b/drivers/staging/ced1401/usb1401.h
@@ -26,31 +26,32 @@
#define UINT unsigned int
#endif
-/// Device type codes, but these don't need to be extended - a succession is assumed
-/// These are set for usb from the bcdDevice field (suitably mangled). Future devices
-/// will be added in order of device creation to the list, so the names here are just
-/// to help use remember which device is which. The U14ERR_... values follow the same
-/// pattern for modern devices.
-#define TYPEUNKNOWN -1 // dont know
-#define TYPE1401 0 // standard 1401
-#define TYPEPLUS 1 // 1401 plus
-#define TYPEU1401 2 // u1401
-#define TYPEPOWER 3 // Power1401
-#define TYPEU14012 4 // u1401 mkII
-#define TYPEPOWER2 5 // Power1401 mk II
-#define TYPEMICRO3 6 // Micro1401-3
-#define TYPEPOWER3 7 // Power1401-3
-
-/// Some useful defines of constants. DONT FORGET to change the version in the
-/// resources whenever you change it here!.
-#define DRIVERMAJREV 2 // driver revision level major (match windows)
-#define DRIVERMINREV 0 // driver revision level minor
-
-/// Definitions of the various block transfer command codes
-#define TM_EXTTOHOST 8 // extended tohost
-#define TM_EXTTO1401 9 // extended to1401
-
-/// Definitions of values in usbReqtype. Used in sorting out setup actions
+/** Device type codes, but these don't need to be extended - a succession is assumed
+** These are set for usb from the bcdDevice field (suitably mangled). Future devices
+** will be added in order of device creation to the list, so the names here are just
+** to help use remember which device is which. The U14ERR_... values follow the same
+** pattern for modern devices.a
+**/
+#define TYPEUNKNOWN -1 /* dont know */
+#define TYPE1401 0 /* standard 1401 */
+#define TYPEPLUS 1 /* 1401 plus */
+#define TYPEU1401 2 /* u1401 */
+#define TYPEPOWER 3 /* Power1401 */
+#define TYPEU14012 4 /* u1401 mkII */
+#define TYPEPOWER2 5 /* Power1401 mk II */
+#define TYPEMICRO3 6 /* Micro1401-3 */
+#define TYPEPOWER3 7 /* Power1401-3 */
+
+/* Some useful defines of constants. DONT FORGET to change the version in the */
+/* resources whenever you change it here!. */
+#define DRIVERMAJREV 2 /* driver revision level major (match windows) */
+#define DRIVERMINREV 0 /* driver revision level minor */
+
+/* Definitions of the various block transfer command codes */
+#define TM_EXTTOHOST 8 /* extended tohost */
+#define TM_EXTTO1401 9 /* extended to1401 */
+
+/* Definitions of values in usbReqtype. Used in sorting out setup actions */
#define H_TO_D 0x00
#define D_TO_H 0x80
#define VENDOR 0x40
@@ -58,7 +59,7 @@
#define INTREQ 0x01
#define ENDREQ 0x02
-/// Definition of values in usbRequest, again used to sort out setup
+/* Definition of values in usbRequest, again used to sort out setup */
#define GET_STATUS 0x00
#define CLEAR_FEATURE 0x01
#define SET_FEATURE 0x03
@@ -71,8 +72,8 @@
#define SET_INTERFACE 0x0b
#define SYNCH_FRAME 0x0c
-/// Definitions of the various debug command codes understood by the 1401. These
-/// are used in various vendor-specific commands to achieve the desired effect
+/* Definitions of the various debug command codes understood by the 1401. These */
+/* are used in various vendor-specific commands to achieve the desired effect */
#define DB_GRAB 0x50 /* Grab is a NOP for USB */
#define DB_FREE 0x51 /* Free is a NOP for the USB */
#define DB_SETADD 0x52 /* Set debug address (double) */
@@ -91,139 +92,135 @@
#define CR_CHAR 0x0D /* The carriage return character */
#define CR_CHAR_80 0x8d /* and with bit 7 set */
-/// A structure holding information about a block of memory for use in circular transfers
-typedef struct circBlk
-{
- volatile UINT dwOffset; /* Offset within area of block start */
- volatile UINT dwSize; /* Size of the block, in bytes (0 = unused) */
+/* A structure holding information about a block of memory for use in circular transfers */
+typedef struct circBlk {
+ volatile UINT dwOffset; /* Offset within area of block start */
+ volatile UINT dwSize; /* Size of the block, in bytes (0 = unused) */
} CIRCBLK;
-/// A structure holding all of the information about a transfer area - an area of
-/// memory set up for use either as a source or destination in DMA transfers.
-typedef struct transarea
-{
- void* lpvBuff; // User address of xfer area saved for completeness
- UINT dwBaseOffset; // offset to start of xfer area in first page
- UINT dwLength; // Length of xfer area, in bytes
- struct page **pPages; // Points at array of locked down pages
- int nPages; // number of pages that are locked down
- bool bUsed; // Is this structure in use?
- bool bCircular; // Is this area for circular transfers?
- bool bCircToHost; // Flag for direction of circular transfer
- bool bEventToHost; // Set event on transfer to host?
- int iWakeUp; // Set 1 on event, cleared by TestEvent()
- UINT dwEventSt; // Defines section within xfer area for...
- UINT dwEventSz; // ...notification by the event SZ is 0 if unset
- CIRCBLK aBlocks[2]; // Info on a pair of circular blocks
- wait_queue_head_t wqEvent; // The wait queue for events in this area MUST BE LAST
+/* A structure holding all of the information about a transfer area - an area of */
+/* memory set up for use either as a source or destination in DMA transfers. */
+typedef struct transarea {
+ void *lpvBuff; /* User address of xfer area saved for completeness */
+ UINT dwBaseOffset; /* offset to start of xfer area in first page */
+ UINT dwLength; /* Length of xfer area, in bytes */
+ struct page **pPages; /* Points at array of locked down pages */
+ int nPages; /* number of pages that are locked down */
+ bool bUsed; /* Is this structure in use? */
+ bool bCircular; /* Is this area for circular transfers? */
+ bool bCircToHost; /* Flag for direction of circular transfer */
+ bool bEventToHost; /* Set event on transfer to host? */
+ int iWakeUp; /* Set 1 on event, cleared by TestEvent() */
+ UINT dwEventSt; /* Defines section within xfer area for... */
+ UINT dwEventSz; /* ...notification by the event SZ is 0 if unset */
+ CIRCBLK aBlocks[2]; /* Info on a pair of circular blocks */
+ wait_queue_head_t wqEvent; /* The wait queue for events in this area MUST BE LAST */
} TRANSAREA;
-/// The DMADESC structure is used to hold information on the transfer in progress. It
-/// is set up by ReadDMAInfo, using information sent by the 1401 in an escape sequence.
-typedef struct dmadesc
-{
- unsigned short wTransType; /* transfer type as TM_xxx above */
- unsigned short wIdent; /* identifier word */
- unsigned int dwSize; /* bytes to transfer */
- unsigned int dwOffset; /* offset into transfer area for trans */
- bool bOutWard; /* true when data is going TO 1401 */
+/* The DMADESC structure is used to hold information on the transfer in progress. It */
+/* is set up by ReadDMAInfo, using information sent by the 1401 in an escape sequence. */
+typedef struct dmadesc {
+ unsigned short wTransType; /* transfer type as TM_xxx above */
+ unsigned short wIdent; /* identifier word */
+ unsigned int dwSize; /* bytes to transfer */
+ unsigned int dwOffset; /* offset into transfer area for trans */
+ bool bOutWard; /* true when data is going TO 1401 */
} DMADESC;
#define INBUF_SZ 256 /* input buffer size */
#define OUTBUF_SZ 256 /* output buffer size */
-#define STAGED_SZ 0x10000 // size of coherent buffer for staged transfers
-
-/// Structure to hold all of our device specific stuff. We are making this as similar as we
-/// can to the Windows driver to help in our understanding of what is going on.
-typedef struct _DEVICE_EXTENSION
-{
- char inputBuffer[INBUF_SZ]; /* The two buffers */
- char outputBuffer[OUTBUF_SZ]; /* accessed by the host functions */
- volatile unsigned int dwNumInput; /* num of chars in input buffer */
- volatile unsigned int dwInBuffGet; /* where to get from input buffer */
- volatile unsigned int dwInBuffPut; /* where to put into input buffer */
- volatile unsigned int dwNumOutput; /* num of chars in output buffer */
- volatile unsigned int dwOutBuffGet; /* where to get from output buffer*/
- volatile unsigned int dwOutBuffPut; /* where to put into output buffer*/
-
- volatile bool bSendCharsPending; /* Flag to indicate sendchar active */
- volatile bool bReadCharsPending; /* Flag to indicate a read is primed */
- char* pCoherCharOut; /* special aligned buffer for chars to 1401 */
- struct urb* pUrbCharOut; /* urb used for chars to 1401 */
- char* pCoherCharIn; /* special aligned buffer for chars to host */
- struct urb* pUrbCharIn; /* urb used for chars to host */
-
- spinlock_t charOutLock; /* to protect the outputBuffer and outputting */
- spinlock_t charInLock; /* to protect the inputBuffer and char reads */
- __u8 bInterval; /* Interrupt end point interval */
-
- volatile unsigned int dwDMAFlag; /* state of DMA */
- TRANSAREA rTransDef[MAX_TRANSAREAS];/* transfer area info */
- volatile DMADESC rDMAInfo; // info on current DMA transfer
- volatile bool bXFerWaiting; // Flag set if DMA transfer stalled
- volatile bool bInDrawDown; // Flag that we want to halt transfers
-
- // Parameters relating to a block read\write that is in progress. Some of these values
- // are equivalent to values in rDMAInfo. The values here are those in use, while those
- // in rDMAInfo are those received from the 1401 via an escape sequence. If another
- // escape sequence arrives before the previous xfer ends, rDMAInfo values are updated while these
- // are used to finish off the current transfer.
- volatile short StagedId; // The transfer area id for this transfer
- volatile bool StagedRead; // Flag TRUE for read from 1401, FALSE for write
- volatile unsigned int StagedLength; // Total length of this transfer
- volatile unsigned int StagedOffset; // Offset within memory area for transfer start
- volatile unsigned int StagedDone; // Bytes transferred so far
- volatile bool bStagedUrbPending; // Flag to indicate active
- char* pCoherStagedIO; // buffer used for block transfers
- struct urb* pStagedUrb; // The URB to use
- spinlock_t stagedLock; // protects ReadWriteMem() and circular buffer stuff
-
- short s1401Type; // type of 1401 attached
- short sCurrentState; // current error state
- bool bIsUSB2; // type of the interface we connect to
- bool bForceReset; // Flag to make sure we get a real reset
- __u32 statBuf[2]; // buffer for 1401 state info
-
- unsigned long ulSelfTestTime; // used to timeout self test
-
- int nPipes; // Should be 3 or 4 depending on 1401 usb chip
- int bPipeError[4]; // set non-zero if an error on one of the pipe
- __u8 epAddr[4]; // addresses of the 3/4 end points
-
- struct usb_device *udev; // the usb device for this device
- struct usb_interface *interface; // the interface for this device, NULL if removed
- struct usb_anchor submitted; // in case we need to retract our submissions
- struct mutex io_mutex; // synchronize I/O with disconnect, one user-mode caller at a time
-
- int errors; // the last request tanked
- int open_count; // count the number of openers
- spinlock_t err_lock; // lock for errors
- struct kref kref;
-}DEVICE_EXTENSION, *PDEVICE_EXTENSION;
+#define STAGED_SZ 0x10000 /* size of coherent buffer for staged transfers */
+
+/* Structure to hold all of our device specific stuff. We are making this as similar as we */
+/* can to the Windows driver to help in our understanding of what is going on. */
+typedef struct _DEVICE_EXTENSION {
+ char inputBuffer[INBUF_SZ]; /* The two buffers */
+ char outputBuffer[OUTBUF_SZ]; /* accessed by the host functions */
+ volatile unsigned int dwNumInput; /* num of chars in input buffer */
+ volatile unsigned int dwInBuffGet; /* where to get from input buffer */
+ volatile unsigned int dwInBuffPut; /* where to put into input buffer */
+ volatile unsigned int dwNumOutput; /* num of chars in output buffer */
+ volatile unsigned int dwOutBuffGet; /* where to get from output buffer*/
+ volatile unsigned int dwOutBuffPut; /* where to put into output buffer*/
+
+ volatile bool bSendCharsPending; /* Flag to indicate sendchar active */
+ volatile bool bReadCharsPending; /* Flag to indicate a read is primed */
+ char *pCoherCharOut; /* special aligned buffer for chars to 1401 */
+ struct urb *pUrbCharOut; /* urb used for chars to 1401 */
+ char *pCoherCharIn; /* special aligned buffer for chars to host */
+ struct urb *pUrbCharIn; /* urb used for chars to host */
+
+ spinlock_t charOutLock; /* to protect the outputBuffer and outputting */
+ spinlock_t charInLock; /* to protect the inputBuffer and char reads */
+ __u8 bInterval; /* Interrupt end point interval */
+
+ volatile unsigned int dwDMAFlag; /* state of DMA */
+ TRANSAREA rTransDef[MAX_TRANSAREAS];/* transfer area info */
+ volatile DMADESC rDMAInfo; /* info on current DMA transfer */
+ volatile bool bXFerWaiting; /* Flag set if DMA transfer stalled */
+ volatile bool bInDrawDown; /* Flag that we want to halt transfers */
+
+ /* Parameters relating to a block read\write that is in progress. Some of these values */
+ /* are equivalent to values in rDMAInfo. The values here are those in use, while those */
+ /* in rDMAInfo are those received from the 1401 via an escape sequence. If another */
+ /* escape sequence arrives before the previous xfer ends, rDMAInfo values are updated while these */
+ /* are used to finish off the current transfer. */
+ volatile short StagedId; /* The transfer area id for this transfer */
+ volatile bool StagedRead; /* Flag TRUE for read from 1401, FALSE for write */
+ volatile unsigned int StagedLength; /* Total length of this transfer */
+ volatile unsigned int StagedOffset; /* Offset within memory area for transfer start */
+ volatile unsigned int StagedDone; /* Bytes transferred so far */
+ volatile bool bStagedUrbPending; /* Flag to indicate active */
+ char *pCoherStagedIO; /* buffer used for block transfers */
+ struct urb *pStagedUrb; /* The URB to use */
+ spinlock_t stagedLock; /* protects ReadWriteMem() and circular buffer stuff */
+
+ short s1401Type; /* type of 1401 attached */
+ short sCurrentState; /* current error state */
+ bool bIsUSB2; /* type of the interface we connect to */
+ bool bForceReset; /* Flag to make sure we get a real reset */
+ __u32 statBuf[2]; /* buffer for 1401 state info */
+
+ unsigned long ulSelfTestTime; /* used to timeout self test */
+
+ int nPipes; /* Should be 3 or 4 depending on 1401 usb chip */
+ int bPipeError[4]; /* set non-zero if an error on one of the pipe */
+ __u8 epAddr[4]; /* addresses of the 3/4 end points */
+
+ struct usb_device *udev; /* the usb device for this device */
+ struct usb_interface *interface; /* the interface for this device, NULL if removed */
+ struct usb_anchor submitted; /* in case we need to retract our submissions */
+ struct mutex io_mutex; /* synchronize I/O with disconnect, one user-mode caller at a time */
+
+ int errors; /* the last request tanked */
+ int open_count; /* count the number of openers */
+ spinlock_t err_lock; /* lock for errors */
+ struct kref kref;
+} DEVICE_EXTENSION, *PDEVICE_EXTENSION;
#define to_DEVICE_EXTENSION(d) container_of(d, DEVICE_EXTENSION, kref)
-/// Definitions of routimes used between compilation object files
-// in usb1401.c
-extern int Allowi(DEVICE_EXTENSION* pdx);
-extern int SendChars(DEVICE_EXTENSION* pdx);
+/* Definitions of routimes used between compilation object files */
+/* in usb1401.c */
+extern int Allowi(DEVICE_EXTENSION *pdx);
+extern int SendChars(DEVICE_EXTENSION *pdx);
extern void ced_draw_down(DEVICE_EXTENSION *pdx);
extern int ReadWriteMem(DEVICE_EXTENSION *pdx, bool Read, unsigned short wIdent,
- unsigned int dwOffs, unsigned int dwLen);
+ unsigned int dwOffs, unsigned int dwLen);
-// in ced_ioc.c
+/* in ced_ioc.c */
extern int ClearArea(DEVICE_EXTENSION *pdx, int nArea);
-extern int SendString(DEVICE_EXTENSION* pdx, const char __user* pData, unsigned int n);
+extern int SendString(DEVICE_EXTENSION *pdx, const char __user *pData, unsigned int n);
extern int SendChar(DEVICE_EXTENSION *pdx, char c);
-extern int Get1401State(DEVICE_EXTENSION* pdx, __u32* state, __u32* error);
+extern int Get1401State(DEVICE_EXTENSION *pdx, __u32 *state, __u32 *error);
extern int ReadWrite_Cancel(DEVICE_EXTENSION *pdx);
-extern bool Is1401(DEVICE_EXTENSION* pdx);
-extern bool QuickCheck(DEVICE_EXTENSION* pdx, bool bTestBuff, bool bCanReset);
+extern bool Is1401(DEVICE_EXTENSION *pdx);
+extern bool QuickCheck(DEVICE_EXTENSION *pdx, bool bTestBuff, bool bCanReset);
extern int Reset1401(DEVICE_EXTENSION *pdx);
extern int GetChar(DEVICE_EXTENSION *pdx);
-extern int GetString(DEVICE_EXTENSION *pdx, char __user* pUser, int n);
+extern int GetString(DEVICE_EXTENSION *pdx, char __user *pUser, int n);
extern int SetTransfer(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD);
extern int UnsetTransfer(DEVICE_EXTENSION *pdx, int nArea);
-extern int SetEvent(DEVICE_EXTENSION *pdx, TRANSFEREVENT __user*pTE);
+extern int SetEvent(DEVICE_EXTENSION *pdx, TRANSFEREVENT __user *pTE);
extern int Stat1401(DEVICE_EXTENSION *pdx);
extern int LineCount(DEVICE_EXTENSION *pdx);
extern int GetOutBufSpace(DEVICE_EXTENSION *pdx);
@@ -235,15 +232,15 @@ extern int StartSelfTest(DEVICE_EXTENSION *pdx);
extern int CheckSelfTest(DEVICE_EXTENSION *pdx, TGET_SELFTEST __user *pGST);
extern int TypeOf1401(DEVICE_EXTENSION *pdx);
extern int TransferFlags(DEVICE_EXTENSION *pdx);
-extern int DbgPeek(DEVICE_EXTENSION *pdx, TDBGBLOCK __user* pDB);
+extern int DbgPeek(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgPoke(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgRampData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgRampAddr(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgGetData(DEVICE_EXTENSION *pdx, TDBGBLOCK __user *pDB);
extern int DbgStopLoop(DEVICE_EXTENSION *pdx);
extern int SetCircular(DEVICE_EXTENSION *pdx, TRANSFERDESC __user *pTD);
-extern int GetCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user* pCB);
-extern int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user* pCB);
+extern int GetCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB);
+extern int FreeCircBlock(DEVICE_EXTENSION *pdx, TCIRCBLOCK __user *pCB);
extern int WaitEvent(DEVICE_EXTENSION *pdx, int nArea, int msTimeOut);
extern int TestEvent(DEVICE_EXTENSION *pdx, int nArea);
#endif
diff --git a/drivers/staging/ced1401/use1401.h b/drivers/staging/ced1401/use1401.h
index 86294e2..b7997c9 100644
--- a/drivers/staging/ced1401/use1401.h
+++ b/drivers/staging/ced1401/use1401.h
@@ -11,16 +11,16 @@
#define __USE1401_H__
#include "machine.h"
-// Some definitions to make things compatible. If you want to use Use1401 directly
-// from a Windows program you should define U14_NOT_DLL, in which case you also
-// MUST make sure that your application startup code calls U14InitLib().
-// DLL_USE1401 is defined when you are building the Use1401 dll, not otherwise.
+/* Some definitions to make things compatible. If you want to use Use1401 directly */
+/* from a Windows program you should define U14_NOT_DLL, in which case you also */
+/* MUST make sure that your application startup code calls U14InitLib(). */
+/* DLL_USE1401 is defined when you are building the Use1401 dll, not otherwise. */
#ifdef _IS_WINDOWS_
#ifndef U14_NOT_DLL
#ifdef DLL_USE1401
-#define U14API(retType) retType DllExport __stdcall
+#define U14API(retType) (retType DllExport __stdcall)
#else
-#define U14API(retType) retType DllImport __stdcall
+#define U14API(retType) (retType DllImport __stdcall)
#endif
#endif
@@ -36,7 +36,7 @@
#ifdef _QT
#ifndef U14_NOT_DLL
#undef U14API
-#define U14API(retType) retType __declspec(dllimport) __stdcall
+#define U14API(retType) (retType __declspec(dllimport) __stdcall)
#endif
#undef U14LONG
#define U14LONG int
@@ -50,20 +50,20 @@
#define U14LONG long
#endif
-/// Error codes: We need them here as user space can see them.
-#define U14ERR_NOERROR 0 // no problems
+/* Error codes: We need them here as user space can see them. */
+#define U14ERR_NOERROR 0 /* no problems */
-/// Device error codes, but these don't need to be extended - a succession is assumed
-#define U14ERR_STD 4 // standard 1401 connected
-#define U14ERR_U1401 5 // u1401 connected
-#define U14ERR_PLUS 6 // 1401 plus connected
-#define U14ERR_POWER 7 // Power1401 connected
-#define U14ERR_U14012 8 // u1401 mkII connected
+/* Device error codes, but these don't need to be extended - a succession is assumed */
+#define U14ERR_STD 4 /* standard 1401 connected */
+#define U14ERR_U1401 5 /* u1401 connected */
+#define U14ERR_PLUS 6 /* 1401 plus connected */
+#define U14ERR_POWER 7 /* Power1401 connected */
+#define U14ERR_U14012 8 /* u1401 mkII connected */
#define U14ERR_POWER2 9
#define U14ERR_U14013 10
#define U14ERR_POWER3 11
-/// NBNB Error numbers need shifting as some linux error codes start at 512
+/* NBNB Error numbers need shifting as some linux error codes start at 512 */
#define U14ERR(n) (n+U14ERRBASE)
#define U14ERR_OFF U14ERR(0) /* 1401 there but switched off */
#define U14ERR_NC U14ERR(-1) /* 1401 not connected */
@@ -113,7 +113,7 @@
#define U14ERR_DRIVCOMMS U14ERR(-110) /* failed talking to driver */
#define U14ERR_OUTOFMEMORY U14ERR(-111) /* needed memory and couldnt get it*/
-/// 1401 type codes.
+/* / 1401 type codes. */
#define U14TYPE1401 0 /* standard 1401 */
#define U14TYPEPLUS 1 /* 1401 plus */
#define U14TYPEU1401 2 /* u1401 */
@@ -124,9 +124,9 @@
#define U14TYPEPOWER3 7 /* power1401-3 */
#define U14TYPEUNKNOWN -1 /* dont know */
-/// Transfer flags to allow driver capabilities to be interrogated
+/* Transfer flags to allow driver capabilities to be interrogated */
-/// Constants for transfer flags
+/* Constants for transfer flags */
#define U14TF_USEDMA 1 /* Transfer flag for use DMA */
#define U14TF_MULTIA 2 /* Transfer flag for multi areas */
#define U14TF_FIFO 4 /* for FIFO interface card */
@@ -138,18 +138,18 @@
#define U14TF_DIAG 256 /* Diagnostics/debug functions */
#define U14TF_CIRC14 512 /* Circular-mode to 1401 */
-/// Definitions of element sizes for DMA transfers - to allow byte-swapping
+/* Definitions of element sizes for DMA transfers - to allow byte-swapping */
#define ESZBYTES 0 /* BYTE element size value */
-#define ESZWORDS 1 /* WORD element size value */
+#define ESZWORDS 1 /* unsigned short element size value */
#define ESZLONGS 2 /* long element size value */
#define ESZUNKNOWN 0 /* unknown element size value */
-/// These define required access types for the debug/diagnostics function
+/* These define required access types for the debug/diagnostics function */
#define BYTE_SIZE 1 /* 8-bit access */
#define WORD_SIZE 2 /* 16-bit access */
#define LONG_SIZE 3 /* 32-bit access */
-/// Stuff used by U14_GetTransfer
+/* Stuff used by U14_GetTransfer */
#define GET_TX_MAXENTRIES 257 /* (max length / page size + 1) */
#ifdef _IS_WINDOWS_
@@ -157,19 +157,19 @@
typedef struct /* used for U14_GetTransfer results */
{ /* Info on a single mapped block */
- U14LONG physical;
- U14LONG size;
+ U14LONG physical;
+ U14LONG size;
} TXENTRY;
typedef struct TGetTxBlock /* used for U14_GetTransfer results */
{ /* matches structure in VXD */
- U14LONG size;
- U14LONG linear;
- short seg;
- short reserved;
- short avail; /* number of available entries */
- short used; /* number of used entries */
- TXENTRY entries[GET_TX_MAXENTRIES]; /* Array of mapped block info */
+ U14LONG size;
+ U14LONG linear;
+ short seg;
+ short reserved;
+ short avail; /* number of available entries */
+ short used; /* number of used entries */
+ TXENTRY entries[GET_TX_MAXENTRIES]; /* Array of mapped block info */
} TGET_TX_BLOCK;
typedef TGET_TX_BLOCK *LPGET_TX_BLOCK;
@@ -180,19 +180,19 @@ typedef TGET_TX_BLOCK *LPGET_TX_BLOCK;
#ifdef LINUX
typedef struct /* used for U14_GetTransfer results */
{ /* Info on a single mapped block */
- long long physical;
- long size;
+ long long physical;
+ long size;
} TXENTRY;
typedef struct TGetTxBlock /* used for U14_GetTransfer results */
{ /* matches structure in VXD */
- long long linear; /* linear address */
- long size; /* total size of the mapped area, holds id when called */
- short seg; /* segment of the address for Win16 */
- short reserved;
- short avail; /* number of available entries */
- short used; /* number of used entries */
- TXENTRY entries[GET_TX_MAXENTRIES]; /* Array of mapped block info */
+ long long linear; /* linear address */
+ long size; /* total size of the mapped area, holds id when called */
+ short seg; /* segment of the address for Win16 */
+ short reserved;
+ short avail; /* number of available entries */
+ short used; /* number of used entries */
+ TXENTRY entries[GET_TX_MAXENTRIES]; /* Array of mapped block info */
} TGET_TX_BLOCK;
#endif
@@ -200,84 +200,84 @@ typedef struct TGetTxBlock /* used for U14_GetTransfer results */
extern "C" {
#endif
-U14API(int) U14WhenToTimeOut(short hand); // when to timeout in ms
-U14API(short) U14PassedTime(int iTime); // non-zero if iTime passed
+U14API(int) U14WhenToTimeOut(short hand); /* when to timeout in ms */
+U14API(short) U14PassedTime(int iTime); /* non-zero if iTime passed */
-U14API(short) U14LastErrCode(short hand);
+U14API(short) U14LastErrCode(short hand);
-U14API(short) U14Open1401(short n1401);
-U14API(short) U14Close1401(short hand);
-U14API(short) U14Reset1401(short hand);
-U14API(short) U14ForceReset(short hand);
-U14API(short) U14TypeOf1401(short hand);
-U14API(short) U14NameOf1401(short hand, char* pBuf, WORD wMax);
+U14API(short) U14Open1401(short n1401);
+U14API(short) U14Close1401(short hand);
+U14API(short) U14Reset1401(short hand);
+U14API(short) U14ForceReset(short hand);
+U14API(short) U14TypeOf1401(short hand);
+U14API(short) U14NameOf1401(short hand, char *pBuf, unsigned short wMax);
-U14API(short) U14Stat1401(short hand);
-U14API(short) U14CharCount(short hand);
-U14API(short) U14LineCount(short hand);
+U14API(short) U14Stat1401(short hand);
+U14API(short) U14CharCount(short hand);
+U14API(short) U14LineCount(short hand);
-U14API(short) U14SendString(short hand, const char* pString);
-U14API(short) U14GetString(short hand, char* pBuffer, WORD wMaxLen);
-U14API(short) U14SendChar(short hand, char cChar);
-U14API(short) U14GetChar(short hand, char* pcChar);
+U14API(short) U14SendString(short hand, const char *pString);
+U14API(short) U14GetString(short hand, char *pBuffer, unsigned short wMaxLen);
+U14API(short) U14SendChar(short hand, char cChar);
+U14API(short) U14GetChar(short hand, char *pcChar);
-U14API(short) U14LdCmd(short hand, const char* command);
-U14API(DWORD) U14Ld(short hand, const char* vl, const char* str);
+U14API(short) U14LdCmd(short hand, const char *command);
+U14API(unsigned int) U14Ld(short hand, const char *vl, const char *str);
-U14API(short) U14SetTransArea(short hand, WORD wArea, void *pvBuff,
- DWORD dwLength, short eSz);
-U14API(short) U14UnSetTransfer(short hand, WORD wArea);
-U14API(short) U14SetTransferEvent(short hand, WORD wArea, BOOL bEvent,
- BOOL bToHost, DWORD dwStart, DWORD dwLength);
-U14API(int) U14TestTransferEvent(short hand, WORD wArea);
-U14API(int) U14WaitTransferEvent(short hand, WORD wArea, int msTimeOut);
-U14API(short) U14GetTransfer(short hand, TGET_TX_BLOCK *pTransBlock);
+U14API(short) U14SetTransArea(short hand, unsigned short wArea, void *pvBuff,
+ unsigned int dwLength, short eSz);
+U14API(short) U14UnSetTransfer(short hand, unsigned short wArea);
+U14API(short) U14SetTransferEvent(short hand, unsigned short wArea, BOOL bEvent,
+ BOOL bToHost, unsigned int dwStart, unsigned int dwLength);
+U14API(int) U14TestTransferEvent(short hand, unsigned short wArea);
+U14API(int) U14WaitTransferEvent(short hand, unsigned short wArea, int msTimeOut);
+U14API(short) U14GetTransfer(short hand, TGET_TX_BLOCK *pTransBlock);
-U14API(short) U14ToHost(short hand, char* pAddrHost,DWORD dwSize,DWORD dw1401,
- short eSz);
-U14API(short) U14To1401(short hand, const char* pAddrHost,DWORD dwSize,DWORD dw1401,
- short eSz);
+U14API(short) U14ToHost(short hand, char *pAddrHost, unsigned int dwSize, unsigned int dw1401,
+ short eSz);
+U14API(short) U14To1401(short hand, const char *pAddrHost, unsigned int dwSize, unsigned int dw1401,
+ short eSz);
-U14API(short) U14SetCircular(short hand, WORD wArea, BOOL bToHost, void *pvBuff,
- DWORD dwLength);
+U14API(short) U14SetCircular(short hand, unsigned short wArea, BOOL bToHost, void *pvBuff,
+ unsigned int dwLength);
-U14API(int) U14GetCircBlk(short hand, WORD wArea, DWORD *pdwOffs);
-U14API(int) U14FreeCircBlk(short hand, WORD wArea, DWORD dwOffs, DWORD dwSize,
- DWORD *pdwOffs);
+U14API(int) U14GetCircBlk(short hand, unsigned short wArea, unsigned int *pdwOffs);
+U14API(int) U14FreeCircBlk(short hand, unsigned short wArea, unsigned int dwOffs, unsigned int dwSize,
+ unsigned int *pdwOffs);
-U14API(short) U14StrToLongs(const char* pszBuff, U14LONG *palNums, short sMaxLongs);
-U14API(short) U14LongsFrom1401(short hand, U14LONG *palBuff, short sMaxLongs);
+U14API(short) U14StrToLongs(const char *pszBuff, U14LONG *palNums, short sMaxLongs);
+U14API(short) U14LongsFrom1401(short hand, U14LONG *palBuff, short sMaxLongs);
U14API(void) U14SetTimeout(short hand, int lTimeout);
U14API(int) U14GetTimeout(short hand);
-U14API(short) U14OutBufSpace(short hand);
+U14API(short) U14OutBufSpace(short hand);
U14API(int) U14BaseAddr1401(short hand);
U14API(int) U14DriverVersion(short hand);
U14API(int) U14DriverType(short hand);
-U14API(short) U14DriverName(short hand, char* pBuf, WORD wMax);
-U14API(short) U14GetUserMemorySize(short hand, DWORD *pMemorySize);
-U14API(short) U14KillIO1401(short hand);
-
-U14API(short) U14BlkTransState(short hand);
-U14API(short) U14StateOf1401(short hand);
-
-U14API(short) U14Grab1401(short hand);
-U14API(short) U14Free1401(short hand);
-U14API(short) U14Peek1401(short hand, DWORD dwAddr, int nSize, int nRepeats);
-U14API(short) U14Poke1401(short hand, DWORD dwAddr, DWORD dwValue, int nSize, int nRepeats);
-U14API(short) U14Ramp1401(short hand, DWORD dwAddr, DWORD dwDef, DWORD dwEnable, int nSize, int nRepeats);
-U14API(short) U14RampAddr(short hand, DWORD dwDef, DWORD dwEnable, int nSize, int nRepeats);
-U14API(short) U14StopDebugLoop(short hand);
-U14API(short) U14GetDebugData(short hand, U14LONG *plValue);
-
-U14API(short) U14StartSelfTest(short hand);
-U14API(short) U14CheckSelfTest(short hand, U14LONG *pData);
-U14API(short) U14TransferFlags(short hand);
-U14API(void) U14GetErrorString(short nErr, char* pStr, WORD wMax);
+U14API(short) U14DriverName(short hand, char *pBuf, unsigned short wMax);
+U14API(short) U14GetUserMemorySize(short hand, unsigned int *pMemorySize);
+U14API(short) U14KillIO1401(short hand);
+
+U14API(short) U14BlkTransState(short hand);
+U14API(short) U14StateOf1401(short hand);
+
+U14API(short) U14Grab1401(short hand);
+U14API(short) U14Free1401(short hand);
+U14API(short) U14Peek1401(short hand, unsigned int dwAddr, int nSize, int nRepeats);
+U14API(short) U14Poke1401(short hand, unsigned int dwAddr, unsigned int dwValue, int nSize, int nRepeats);
+U14API(short) U14Ramp1401(short hand, unsigned int dwAddr, unsigned int dwDef, unsigned int dwEnable, int nSize, int nRepeats);
+U14API(short) U14RampAddr(short hand, unsigned int dwDef, unsigned int dwEnable, int nSize, int nRepeats);
+U14API(short) U14StopDebugLoop(short hand);
+U14API(short) U14GetDebugData(short hand, U14LONG *plValue);
+
+U14API(short) U14StartSelfTest(short hand);
+U14API(short) U14CheckSelfTest(short hand, U14LONG *pData);
+U14API(short) U14TransferFlags(short hand);
+U14API(void) U14GetErrorString(short nErr, char *pStr, unsigned short wMax);
U14API(int) U14MonitorRev(short hand);
U14API(void) U14CloseAll(void);
-U14API(short) U14WorkingSet(DWORD dwMinKb, DWORD dwMaxKb);
+U14API(short) U14WorkingSet(unsigned int dwMinKb, unsigned int dwMaxKb);
U14API(int) U14InitLib(void);
#ifdef __cplusplus
@@ -285,3 +285,4 @@ U14API(int) U14InitLib(void);
#endif
#endif /* End of ifndef __USE1401_H__ */
+
diff --git a/drivers/staging/ced1401/use14_ioc.h b/drivers/staging/ced1401/use14_ioc.h
index 15ca638..97d7913 100644
--- a/drivers/staging/ced1401/use14_ioc.h
+++ b/drivers/staging/ced1401/use14_ioc.h
@@ -19,283 +19,282 @@
** The IOCTL function codes from 0x80 to 0xFF are for developer use.
*/
#define FILE_DEVICE_CED1401 0x8001
-#define FNNUMBASE 0x800
-
-#define U14_OPEN1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_CLOSE1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+1, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_SENDSTRING CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+2, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_RESET1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+3, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_GETCHAR CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+4, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_SENDCHAR CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+5, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_STAT1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+6, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_LINECOUNT CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+7, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_GETSTRING CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+8, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_REGCALLBACK CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+9, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_GETMONITORBUF CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+10, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_SETTRANSFER CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+11, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_UNSETTRANSFER CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+12, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_SETTRANSEVENT CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+13, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_GETOUTBUFSPACE CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+14, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_GETBASEADDRESS CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+15, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_GETDRIVERREVISION CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+16, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_GETTRANSFER CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+17, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_KILLIO1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+18, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_BLKTRANSSTATE CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+19, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_BYTECOUNT CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+20, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_ZEROBLOCKCOUNT CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+21, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_STOPCIRCULAR CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+22, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_STATEOF1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+23, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_REGISTERS1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+24, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_GRAB1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+25, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_FREE1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+26, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_STEP1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+27, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_SET1401REGISTERS CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+28, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_STEPTILL1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+29, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_SETORIN CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+30, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_STARTSELFTEST CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+31, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_CHECKSELFTEST CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+32, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_TYPEOF1401 CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+33, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_TRANSFERFLAGS CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+34, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_DBGPEEK CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+35, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_DBGPOKE CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+36, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_DBGRAMPDATA CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+37, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_DBGRAMPADDR CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+38, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_DBGGETDATA CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+39, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_DBGSTOPLOOP CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+40, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_FULLRESET CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+41, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_SETCIRCULAR CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+42, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_GETCIRCBLK CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+43, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-#define U14_FREECIRCBLK CTL_CODE( FILE_DEVICE_CED1401, \
- FNNUMBASE+44, \
- METHOD_BUFFERED, \
- FILE_ANY_ACCESS)
-
-//--------------- Structures that are shared with the driver -------------
+ FNNUMBASE 0x800
+
+#define U14_OPEN1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_CLOSE1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+1, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_SENDSTRING CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+2, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_RESET1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+3, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_GETCHAR CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+4, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_SENDCHAR CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+5, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_STAT1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+6, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_LINECOUNT CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+7, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_GETSTRING CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+8, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_REGCALLBACK CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+9, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_GETMONITORBUF CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+10, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_SETTRANSFER CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+11, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_UNSETTRANSFER CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+12, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_SETTRANSEVENT CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+13, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_GETOUTBUFSPACE CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+14, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_GETBASEADDRESS CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+15, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_GETDRIVERREVISION CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+16, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_GETTRANSFER CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+17, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_KILLIO1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+18, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_BLKTRANSSTATE CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+19, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_BYTECOUNT CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+20, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_ZEROBLOCKCOUNT CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+21, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_STOPCIRCULAR CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+22, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_STATEOF1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+23, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_REGISTERS1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+24, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_GRAB1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+25, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_FREE1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+26, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_STEP1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+27, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_SET1401REGISTERS CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+28, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_STEPTILL1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+29, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_SETORIN CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+30, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_STARTSELFTEST CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+31, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_CHECKSELFTEST CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+32, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_TYPEOF1401 CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+33, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_TRANSFERFLAGS CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+34, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_DBGPEEK CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+35, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_DBGPOKE CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+36, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_DBGRAMPDATA CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+37, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_DBGRAMPADDR CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+38, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_DBGGETDATA CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+39, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_DBGSTOPLOOP CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+40, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_FULLRESET CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+41, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_SETCIRCULAR CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+42, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_GETCIRCBLK CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+43, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+#define U14_FREECIRCBLK CTL_CODE(FILE_DEVICE_CED1401, \
+ FNNUMBASE+44, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+
+/*--------------- Structures that are shared with the driver ------------- */
#pragma pack(1)
typedef struct /* used for get/set standard 1401 registers */
{
- short sPC;
- char A;
- char X;
- char Y;
- char stat;
- char rubbish;
+ short sPC;
+ char A;
+ char X;
+ char Y;
+ char stat;
+ char rubbish;
} T1401REGISTERS;
typedef union /* to communicate with 1401 driver status & control funcs */
{
- char chrs[22];
- short ints[11];
- long longs[5];
- T1401REGISTERS registers;
+ char chrs[22];
+ short ints[11];
+ long longs[5];
+ T1401REGISTERS registers;
} TCSBLOCK;
typedef TCSBLOCK* LPTCSBLOCK;
-typedef struct paramBlk
-{
- short sState;
- TCSBLOCK csBlock;
+typedef struct paramBlk {
+ short sState;
+ TCSBLOCK csBlock;
} PARAMBLK;
typedef PARAMBLK* PPARAMBLK;
typedef struct TransferDesc /* Structure and type for SetTransArea */
{
- WORD wArea; /* number of transfer area to set up */
- void FAR * lpvBuff; /* address of transfer area */
- DWORD dwLength; /* length of area to set up */
- short eSize; /* size to move (for swapping on MAC) */
+ unsigned short wArea; /* number of transfer area to set up */
+ void FAR *lpvBuff; /* address of transfer area */
+ unsigned int dwLength; /* length of area to set up */
+ short eSize; /* size to move (for swapping on MAC) */
} TRANSFERDESC;
-typedef TRANSFERDESC FAR * LPTRANSFERDESC;
+typedef TRANSFERDESC FAR *LPTRANSFERDESC;
/* This is the structure used to set up a transfer area */
typedef struct VXTransferDesc /* use1401.c and use1432x.x use only */
{
- WORD wArea; /* number of transfer area to set up */
- WORD wAddrSel; /* 16 bit selector for area */
- DWORD dwAddrOfs; /* 32 bit offset for area start */
- DWORD dwLength; /* length of area to set up */
+ unsigned short wArea; /* number of transfer area to set up */
+ unsigned short wAddrSel; /* 16 bit selector for area */
+ unsigned int dwAddrOfs; /* 32 bit offset for area start */
+ unsigned int dwLength; /* length of area to set up */
} VXTRANSFERDESC;
#pragma pack()
-#endif \ No newline at end of file
+#endif
diff --git a/drivers/staging/ced1401/userspace/use1401.c b/drivers/staging/ced1401/userspace/use1401.c
index 38e7c1c..c9bc2eb 100644
--- a/drivers/staging/ced1401/userspace/use1401.c
+++ b/drivers/staging/ced1401/userspace/use1401.c
@@ -36,7 +36,7 @@
** Under Windows 9x and NT, Use1401 uses DeviceIoControl to get access to
** the 1401 driver. This has parameters for the device handle, the function
** code, an input pointer and byte count, an output pointer and byte count
-** and a pointer to a DWORD to hold the output byte count. Note that input
+** and a pointer to a unsigned int to hold the output byte count. Note that input
** and output are from the point-of-view of the driver, so the output stuff
** is used to read values from the 1401, not send to the 1401. The use of
** these parameters varies with the function in use and the operating
@@ -250,7 +250,7 @@ static int iAttached = 0; // counts process attaches so ca
static HANDLE aHand1401[MAX1401] = {0}; // handles for 1401s
static HANDLE aXferEvent[MAX1401] = {0}; // transfer events for the 1401s
static LPVOID apAreas[MAX1401][MAX_TRANSAREAS]; // Locked areas
-static DWORD auAreas[MAX1401][MAX_TRANSAREAS]; // Size of locked areas
+static unsigned int auAreas[MAX1401][MAX_TRANSAREAS]; // Size of locked areas
static BOOL bWindows9x = FALSE; // if we are Windows 95 or better
#ifdef _WIN64
#define USE_NT_DIOC(ind) TRUE
@@ -276,8 +276,8 @@ static int aHand1401[MAX1401] = {0}; // handles for 1401s
typedef struct CmdHead // defines header block on command
{ // for PC commands
char acBasic[5]; // BASIC information - needed to align things
- WORD wBasicSz; // size as seen by BASIC
- WORD wCmdSize; // size of the following info
+ unsigned short wBasicSz; // size as seen by BASIC
+ unsigned short wCmdSize; // size of the following info
} __packed CMDHEAD;
#pragma pack() // back to normal
@@ -311,7 +311,7 @@ static short CheckHandle(short h)
****************************************************************************/
static short U14Status1401(short sHand, LONG lCode, TCSBLOCK* pBlk)
{
- DWORD dwBytes = 0;
+ unsigned int dwBytes = 0;
if ((sHand < 0) || (sHand >= MAX1401)) /* Check parameters */
return U14ERR_BADHAND;
@@ -345,7 +345,7 @@ static short U14Status1401(short sHand, LONG lCode, TCSBLOCK* pBlk)
****************************************************************************/
static short U14Control1401(short sHand, LONG lCode, TCSBLOCK* pBlk)
{
- DWORD dwBytes = 0;
+ unsigned int dwBytes = 0;
if ((sHand < 0) || (sHand >= MAX1401)) /* Check parameters */
return U14ERR_BADHAND;
@@ -455,7 +455,7 @@ static void TranslateString(char* pStr)
****************************************************************************/
U14API(short) U14StrToLongs(const char* pszBuff, U14LONG *palNums, short sMaxLongs)
{
- WORD wChInd = 0; // index into source
+ unsigned short wChInd = 0; // index into source
short sLgInd = 0; // index into result longs
while (pszBuff[wChInd] && // until we get to end of string...
@@ -681,7 +681,7 @@ U14API(int) U14DriverType(short hand)
** U14DriverName
** Returns the driver type as 3 character (ISA, PCI, USB or HSS))
****************************************************************************/
-U14API(short) U14DriverName(short hand, char* pBuf, WORD wMax)
+U14API(short) U14DriverName(short hand, char* pBuf, unsigned short wMax)
{
char* pName;
*pBuf = 0; // Start off with a blank string
@@ -779,7 +779,7 @@ U14API(short) U14Free1401(short hand)
** is called. After the peek is done, use U14GetDebugData to retrieve
** the results of the peek.
****************************************************************************/
-U14API(short) U14Peek1401(short hand, DWORD dwAddr, int nSize, int nRepeats)
+U14API(short) U14Peek1401(short hand, unsigned int dwAddr, int nSize, int nRepeats)
{
short sErr = CheckHandle(hand);
if (sErr == U14ERR_NOERROR)
@@ -813,7 +813,7 @@ U14API(short) U14Peek1401(short hand, DWORD dwAddr, int nSize, int nRepeats)
** If lRepeats is zero, the loop will continue until U14StopDebugLoop
** is called.
****************************************************************************/
-U14API(short) U14Poke1401(short hand, DWORD dwAddr, DWORD dwValue,
+U14API(short) U14Poke1401(short hand, unsigned int dwAddr, unsigned int dwValue,
int nSize, int nRepeats)
{
short sErr = CheckHandle(hand);
@@ -849,7 +849,7 @@ U14API(short) U14Poke1401(short hand, DWORD dwAddr, DWORD dwValue,
** DESCRIPTION Cause the 1401 to loop, writing a ramp to a location.
** If lRepeats is zero, the loop will continue until U14StopDebugLoop.
****************************************************************************/
-U14API(short) U14Ramp1401(short hand, DWORD dwAddr, DWORD dwDef, DWORD dwEnable,
+U14API(short) U14Ramp1401(short hand, unsigned int dwAddr, unsigned int dwDef, unsigned int dwEnable,
int nSize, int nRepeats)
{
short sErr = CheckHandle(hand);
@@ -887,7 +887,7 @@ U14API(short) U14Ramp1401(short hand, DWORD dwAddr, DWORD dwDef, DWORD dwEnable,
** DESCRIPTION Cause the 1401 to loop, reading from a ramping location.
** If lRepeats is zero, the loop will continue until U14StopDebugLoop
****************************************************************************/
-U14API(short) U14RampAddr(short hand, DWORD dwDef, DWORD dwEnable,
+U14API(short) U14RampAddr(short hand, unsigned int dwDef, unsigned int dwEnable,
int nSize, int nRepeats)
{
short sErr = CheckHandle(hand);
@@ -1024,7 +1024,7 @@ U14API(short) U14CheckSelfTest(short hand, U14LONG *pData)
/****************************************************************************
** U14GetUserMemorySize
****************************************************************************/
-U14API(short) U14GetUserMemorySize(short hand, DWORD *pMemorySize)
+U14API(short) U14GetUserMemorySize(short hand, unsigned int *pMemorySize)
{
// The original 1401 used a different command for getting the size
short sErr = U14SendString(hand, (asType1401[hand] == U14TYPE1401) ? "MEMTOP;" : "MEMTOP,?;");
@@ -1061,7 +1061,7 @@ U14API(short) U14TypeOf1401(short hand)
** U14NameOf1401
** Returns the type of the 1401 as a string, blank if unknown
****************************************************************************/
-U14API(short) U14NameOf1401(short hand, char* pBuf, WORD wMax)
+U14API(short) U14NameOf1401(short hand, char* pBuf, unsigned short wMax)
{
short sErr = CheckHandle(hand);
if (sErr == U14ERR_NOERROR)
@@ -1207,7 +1207,7 @@ static short U14TryToOpen(int n1401, long* plRetVal, short* psHandle)
{
short sErr = U14ERR_NOERROR;
HANDLE hDevice = INVALID_HANDLE_VALUE;
- DWORD dwErr = 0;
+ unsigned int dwErr = 0;
int nFirst, nLast, nDev = 0; /* Used for the search for a 1401 */
BOOL bOldName = FALSE; /* start by looking for a modern driver */
@@ -1262,7 +1262,7 @@ static short U14TryToOpen(int n1401, long* plRetVal, short* psHandle)
}
else
{
- DWORD dwe = GetLastError(); /* Get error code otherwise */
+ unsigned int dwe = GetLastError(); /* Get error code otherwise */
if ((dwe != ERROR_FILE_NOT_FOUND) || (dwErr == 0))
dwErr = dwe; /* Ignore repeats of 'not found' */
}
@@ -1454,7 +1454,7 @@ U14API(short) U14Close1401(short hand)
U14Reset1401(hand); // in case an active transfer running
for (j = 0; j < MAX_TRANSAREAS; ++j) // Locate locked areas
if (iAreaMask & (1 << j)) // And kill off any transfers
- U14UnSetTransfer(hand, (WORD)j);
+ U14UnSetTransfer(hand, (unsigned short)j);
}
#ifdef _IS_WINDOWS_
@@ -1581,7 +1581,7 @@ U14API(short) U14SendString(short hand, const char* pString)
if (bSpaceToSend)
{
PARAMBLK rData;
- DWORD dwBytes;
+ unsigned int dwBytes;
char tstr[MAXSTRLEN+5]; /* Buffer for chars */
if ((hand < 0) || (hand >= MAX1401))
@@ -1592,18 +1592,18 @@ U14API(short) U14SendString(short hand, const char* pString)
#ifndef _WIN64
if (!USE_NT_DIOC(hand)) /* Using WIN 95 driver access? */
{
- int iOK = DeviceIoControl(aHand1401[hand], (DWORD)U14_SENDSTRING,
+ int iOK = DeviceIoControl(aHand1401[hand], (unsigned int)U14_SENDSTRING,
NULL, 0, tstr, nChars,
&dwBytes, NULL);
if (iOK)
- sErr = (dwBytes >= (DWORD)nChars) ? U14ERR_NOERROR : U14ERR_DRIVCOMMS;
+ sErr = (dwBytes >= (unsigned int)nChars) ? U14ERR_NOERROR : U14ERR_DRIVCOMMS;
else
sErr = (short)GetLastError();
}
else
#endif
{
- int iOK = DeviceIoControl(aHand1401[hand],(DWORD)U14_SENDSTRING,
+ int iOK = DeviceIoControl(aHand1401[hand],(unsigned int)U14_SENDSTRING,
tstr, nChars,
&rData,sizeof(PARAMBLK),&dwBytes,NULL);
if (iOK && (dwBytes >= sizeof(PARAMBLK)))
@@ -1697,7 +1697,7 @@ U14API(short) U14SendChar(short hand, char cChar)
** error code. Any error from the device causes us to set up for
** a full reset.
****************************************************************************/
-U14API(short) U14GetString(short hand, char* pBuffer, WORD wMaxLen)
+U14API(short) U14GetString(short hand, char* pBuffer, unsigned short wMaxLen)
{
short sErr = CheckHandle(hand);
if (sErr != U14ERR_NOERROR) // If an error...
@@ -1726,8 +1726,8 @@ U14API(short) U14GetString(short hand, char* pBuffer, WORD wMaxLen)
{
if (asLastRetCode[hand] == U14ERR_NOERROR) /* all ok so far */
{
- DWORD dwBytes = 0;
- *((WORD *)pBuffer) = wMaxLen; /* set up length */
+ unsigned int dwBytes = 0;
+ *((unsigned short *)pBuffer) = wMaxLen; /* set up length */
#ifndef _WIN64
if (!USE_NT_DIOC(hand)) /* Win 95 DIOC here ? */
{
@@ -1737,9 +1737,9 @@ U14API(short) U14GetString(short hand, char* pBuffer, WORD wMaxLen)
if (wMaxLen > MAXSTRLEN) /* Truncate length */
wMaxLen = MAXSTRLEN;
- *((WORD *)tstr) = wMaxLen; /* set len */
+ *((unsigned short *)tstr) = wMaxLen; /* set len */
- iOK = DeviceIoControl(aHand1401[hand],(DWORD)U14_GETSTRING,
+ iOK = DeviceIoControl(aHand1401[hand],(unsigned int)U14_GETSTRING,
NULL, 0, tstr, wMaxLen+sizeof(short),
&dwBytes, NULL);
if (iOK) /* Device IO control OK ? */
@@ -1768,7 +1768,7 @@ U14API(short) U14GetString(short hand, char* pBuffer, WORD wMaxLen)
char* pMem = (char*)GlobalLock(hMem);
if (pMem)
{
- int iOK = DeviceIoControl(aHand1401[hand],(DWORD)U14_GETSTRING,
+ int iOK = DeviceIoControl(aHand1401[hand],(unsigned int)U14_GETSTRING,
NULL, 0, pMem, wMaxLen+sizeof(short),
&dwBytes, NULL);
if (iOK) /* Device IO control OK ? */
@@ -1946,7 +1946,7 @@ U14API(short) U14LineCount(short hand)
** other functions after getting an error and before using
** this function.
****************************************************************************/
-U14API(void) U14GetErrorString(short nErr, char* pStr, WORD wMax)
+U14API(void) U14GetErrorString(short nErr, char* pStr, unsigned short wMax)
{
char wstr[150];
@@ -2105,7 +2105,7 @@ U14API(void) U14GetErrorString(short nErr, char* pStr, WORD wMax)
break;
}
- if ((WORD)strlen(wstr) >= wMax-1) /* Check for string being too long */
+ if ((unsigned short)strlen(wstr) >= wMax-1) /* Check for string being too long */
wstr[wMax-1] = 0; /* and truncate it if so */
strcpy(pStr, wstr); /* Return the error string */
}
@@ -2120,8 +2120,8 @@ U14API(short) U14GetTransfer(short hand, TGET_TX_BLOCK *pTransBlock)
#ifdef _IS_WINDOWS_
if (sErr == U14ERR_NOERROR)
{
- DWORD dwBytes = 0;
- BOOL bOK = DeviceIoControl(aHand1401[hand], (DWORD)U14_GETTRANSFER, NULL, 0, pTransBlock,
+ unsigned int dwBytes = 0;
+ BOOL bOK = DeviceIoControl(aHand1401[hand], (unsigned int)U14_GETTRANSFER, NULL, 0, pTransBlock,
sizeof(TGET_TX_BLOCK), &dwBytes, NULL);
if (bOK && (dwBytes >= sizeof(TGET_TX_BLOCK)))
@@ -2145,12 +2145,12 @@ U14API(short) U14GetTransfer(short hand, TGET_TX_BLOCK *pTransBlock)
// 1 unable to access process (insufficient rights?)
// 2 unable to read process working set
// 3 unable to set process working set - bad parameters?
-U14API(short) U14WorkingSet(DWORD dwMinKb, DWORD dwMaxKb)
+U14API(short) U14WorkingSet(unsigned int dwMinKb, unsigned int dwMaxKb)
{
#ifdef _IS_WINDOWS_
short sRetVal = 0; // 0 means all is OK
HANDLE hProcess;
- DWORD dwVer = GetVersion();
+ unsigned int dwVer = GetVersion();
if (dwVer & 0x80000000) // is this not NT?
return 0; // then give up right now
@@ -2164,8 +2164,8 @@ U14API(short) U14WorkingSet(DWORD dwMinKb, DWORD dwMaxKb)
SIZE_T dwMinSize,dwMaxSize;
if (GetProcessWorkingSetSize(hProcess, &dwMinSize, &dwMaxSize))
{
- DWORD dwMin = dwMinKb << 10; // convert from kb to bytes
- DWORD dwMax = dwMaxKb << 10;
+ unsigned int dwMin = dwMinKb << 10; // convert from kb to bytes
+ unsigned int dwMax = dwMaxKb << 10;
// if we get here, we have managed to read the current size
if (dwMin > dwMinSize) // need to change sizes?
@@ -2200,7 +2200,7 @@ U14API(short) U14WorkingSet(DWORD dwMinKb, DWORD dwMaxKb)
** U14UnSetTransfer Cancels a transfer area
** wArea The index of a block previously used in by SetTransfer
*****************************************************************************/
-U14API(short) U14UnSetTransfer(short hand, WORD wArea)
+U14API(short) U14UnSetTransfer(short hand, unsigned short wArea)
{
short sErr = CheckHandle(hand);
#ifdef _IS_WINDOWS_
@@ -2223,13 +2223,13 @@ U14API(short) U14UnSetTransfer(short hand, WORD wArea)
/****************************************************************************
** U14SetTransArea Sets an area up to be used for transfers
-** WORD wArea The area number to set up
+** unsigned short wArea The area number to set up
** void *pvBuff The address of the buffer for the data.
-** DWORD dwLength The length of the buffer for the data
+** unsigned int dwLength The length of the buffer for the data
** short eSz The element size (used for byte swapping on the Mac)
****************************************************************************/
-U14API(short) U14SetTransArea(short hand, WORD wArea, void *pvBuff,
- DWORD dwLength, short eSz)
+U14API(short) U14SetTransArea(short hand, unsigned short wArea, void *pvBuff,
+ unsigned int dwLength, short eSz)
{
TRANSFERDESC td;
short sErr = CheckHandle(hand);
@@ -2254,7 +2254,7 @@ U14API(short) U14SetTransArea(short hand, WORD wArea, void *pvBuff,
#ifndef _WIN64
if (!USE_NT_DIOC(hand)) /* Use Win 9x DIOC? */
{
- DWORD dwBytes;
+ unsigned int dwBytes;
VXTRANSFERDESC vxDesc; /* Structure to pass to VXD */
vxDesc.wArea = wArea; /* Copy across simple params */
vxDesc.dwLength = dwLength;
@@ -2264,10 +2264,10 @@ U14API(short) U14SetTransArea(short hand, WORD wArea, void *pvBuff,
sErr = U14ERR_DRIVTOOOLD;
else
{
- vxDesc.dwAddrOfs = (DWORD)pvBuff; /* 32 bit offset */
+ vxDesc.dwAddrOfs = (unsigned int)pvBuff; /* 32 bit offset */
vxDesc.wAddrSel = 0;
- if (DeviceIoControl(aHand1401[hand], (DWORD)U14_SETTRANSFER,
+ if (DeviceIoControl(aHand1401[hand], (unsigned int)U14_SETTRANSFER,
pvBuff,dwLength, /* Will translate pointer */
&vxDesc,sizeof(VXTRANSFERDESC),
&dwBytes,NULL))
@@ -2285,13 +2285,13 @@ U14API(short) U14SetTransArea(short hand, WORD wArea, void *pvBuff,
#endif
{
PARAMBLK rWork;
- DWORD dwBytes;
+ unsigned int dwBytes;
td.wArea = wArea; /* Pure NT - put data into struct */
td.lpvBuff = pvBuff;
td.dwLength = dwLength;
td.eSize = 0; // Dummy element size
- if (DeviceIoControl(aHand1401[hand],(DWORD)U14_SETTRANSFER,
+ if (DeviceIoControl(aHand1401[hand],(unsigned int)U14_SETTRANSFER,
&td,sizeof(TRANSFERDESC),
&rWork,sizeof(PARAMBLK),&dwBytes,NULL))
{
@@ -2344,8 +2344,8 @@ U14API(short) U14SetTransArea(short hand, WORD wArea, void *pvBuff,
** Returns 1 if an event handle exists, 0 if all OK and no event handle or
** a negative code for an error.
****************************************************************************/
-U14API(short) U14SetTransferEvent(short hand, WORD wArea, BOOL bEvent,
- BOOL bToHost, DWORD dwStart, DWORD dwLength)
+U14API(short) U14SetTransferEvent(short hand, unsigned short wArea, BOOL bEvent,
+ BOOL bToHost, unsigned int dwStart, unsigned int dwLength)
{
#ifdef _IS_WINDOWS_
TCSBLOCK csBlock;
@@ -2416,7 +2416,7 @@ U14API(short) U14SetTransferEvent(short hand, WORD wArea, BOOL bEvent,
** Would a U14WaitTransferEvent() call return immediately? return 1 if so,
** 0 if not or a negative code if a problem.
****************************************************************************/
-U14API(int) U14TestTransferEvent(short hand, WORD wArea)
+U14API(int) U14TestTransferEvent(short hand, unsigned short wArea)
{
#ifdef _IS_WINDOWS_
int iErr = CheckHandle(hand);
@@ -2441,7 +2441,7 @@ U14API(int) U14TestTransferEvent(short hand, WORD wArea)
** Returns If no event handle then return immediately. Else return 1 if
** timed out or 0=event, and a negative code if a problem.
****************************************************************************/
-U14API(int) U14WaitTransferEvent(short hand, WORD wArea, int msTimeOut)
+U14API(int) U14WaitTransferEvent(short hand, unsigned short wArea, int msTimeOut)
{
#ifdef _IS_WINDOWS_
int iErr = CheckHandle(hand);
@@ -2466,13 +2466,13 @@ U14API(int) U14WaitTransferEvent(short hand, WORD wArea, int msTimeOut)
/****************************************************************************
** U14SetCircular Sets an area up for circular DMA transfers
-** WORD wArea The area number to set up
+** unsigned short wArea The area number to set up
** BOOL bToHost Sets the direction of data transfer
** void *pvBuff The address of the buffer for the data
-** DWORD dwLength The length of the buffer for the data
+** unsigned int dwLength The length of the buffer for the data
****************************************************************************/
-U14API(short) U14SetCircular(short hand, WORD wArea, BOOL bToHost,
- void *pvBuff, DWORD dwLength)
+U14API(short) U14SetCircular(short hand, unsigned short wArea, BOOL bToHost,
+ void *pvBuff, unsigned int dwLength)
{
short sErr = CheckHandle(hand);
if (sErr != U14ERR_NOERROR)
@@ -2495,14 +2495,14 @@ U14API(short) U14SetCircular(short hand, WORD wArea, BOOL bToHost,
else
{
PARAMBLK rWork;
- DWORD dwBytes;
+ unsigned int dwBytes;
TRANSFERDESC txDesc;
txDesc.wArea = wArea; /* Pure NT - put data into struct */
txDesc.lpvBuff = pvBuff;
txDesc.dwLength = dwLength;
txDesc.eSize = (short)bToHost; /* Use this for direction flag */
- if (DeviceIoControl(aHand1401[hand],(DWORD)U14_SETCIRCULAR,
+ if (DeviceIoControl(aHand1401[hand],(unsigned int)U14_SETCIRCULAR,
&txDesc, sizeof(TRANSFERDESC),
&rWork, sizeof(PARAMBLK),&dwBytes,NULL))
{
@@ -2542,7 +2542,7 @@ U14API(short) U14SetCircular(short hand, WORD wArea, BOOL bToHost,
** Function GetCircBlk returns the size (& start offset) of the next
** available block of circular data.
****************************************************************************/
-U14API(int) U14GetCircBlk(short hand, WORD wArea, DWORD *pdwOffs)
+U14API(int) U14GetCircBlk(short hand, unsigned short wArea, unsigned int *pdwOffs)
{
int lErr = CheckHandle(hand);
if (lErr != U14ERR_NOERROR)
@@ -2555,10 +2555,10 @@ U14API(int) U14GetCircBlk(short hand, WORD wArea, DWORD *pdwOffs)
#ifdef _IS_WINDOWS_
PARAMBLK rWork;
TCSBLOCK csBlock;
- DWORD dwBytes;
+ unsigned int dwBytes;
csBlock.longs[0] = wArea; // Area number into control block
rWork.sState = U14ERR_DRIVCOMMS;
- if (DeviceIoControl(aHand1401[hand], (DWORD)U14_GETCIRCBLK, &csBlock, sizeof(TCSBLOCK), &rWork, sizeof(PARAMBLK), &dwBytes, NULL) &&
+ if (DeviceIoControl(aHand1401[hand], (unsigned int)U14_GETCIRCBLK, &csBlock, sizeof(TCSBLOCK), &rWork, sizeof(PARAMBLK), &dwBytes, NULL) &&
(dwBytes >= sizeof(PARAMBLK)))
lErr = rWork.sState;
else
@@ -2591,8 +2591,8 @@ U14API(int) U14GetCircBlk(short hand, WORD wArea, DWORD *pdwOffs)
** resuse for circular transfers and returns the size (& start
** offset) of the next available block of circular data.
****************************************************************************/
-U14API(int) U14FreeCircBlk(short hand, WORD wArea, DWORD dwOffs, DWORD dwSize,
- DWORD *pdwOffs)
+U14API(int) U14FreeCircBlk(short hand, unsigned short wArea, unsigned int dwOffs, unsigned int dwSize,
+ unsigned int *pdwOffs)
{
int lErr = CheckHandle(hand);
if (lErr != U14ERR_NOERROR)
@@ -2603,12 +2603,12 @@ U14API(int) U14FreeCircBlk(short hand, WORD wArea, DWORD dwOffs, DWORD dwSize,
#ifdef _IS_WINDOWS_
PARAMBLK rWork;
TCSBLOCK csBlock;
- DWORD dwBytes;
+ unsigned int dwBytes;
csBlock.longs[0] = wArea; // Area number into control block
csBlock.longs[1] = dwOffs;
csBlock.longs[2] = dwSize;
rWork.sState = U14ERR_DRIVCOMMS;
- if (DeviceIoControl(aHand1401[hand], (DWORD)U14_FREECIRCBLK, &csBlock, sizeof(TCSBLOCK),
+ if (DeviceIoControl(aHand1401[hand], (unsigned int)U14_FREECIRCBLK, &csBlock, sizeof(TCSBLOCK),
&rWork, sizeof(PARAMBLK), &dwBytes, NULL) &&
(dwBytes >= sizeof(PARAMBLK)))
lErr = rWork.sState;
@@ -2647,7 +2647,7 @@ U14API(int) U14FreeCircBlk(short hand, WORD wArea, DWORD dwOffs, DWORD dwSize,
** which it should be to get a pointer
*****************************************************************************/
static short Transfer(short hand, BOOL bTo1401, char* pData,
- DWORD dwSize, DWORD dw1401, short eSz)
+ unsigned int dwSize, unsigned int dw1401, short eSz)
{
char strcopy[MAXSTRLEN+1]; // to hold copy of work string
short sResult = U14SetTransArea(hand, 0, (void *)pData, dwSize, eSz);
@@ -2670,8 +2670,8 @@ static short Transfer(short hand, BOOL bTo1401, char* pData,
/****************************************************************************
** Function ToHost transfers data into the host from the 1401
****************************************************************************/
-U14API(short) U14ToHost(short hand, char* pAddrHost, DWORD dwSize,
- DWORD dw1401, short eSz)
+U14API(short) U14ToHost(short hand, char* pAddrHost, unsigned int dwSize,
+ unsigned int dw1401, short eSz)
{
short sErr = CheckHandle(hand);
if ((sErr == U14ERR_NOERROR) && dwSize) // TOHOST is a constant
@@ -2682,8 +2682,8 @@ U14API(short) U14ToHost(short hand, char* pAddrHost, DWORD dwSize,
/****************************************************************************
** Function To1401 transfers data into the 1401 from the host
****************************************************************************/
-U14API(short) U14To1401(short hand, const char* pAddrHost,DWORD dwSize,
- DWORD dw1401, short eSz)
+U14API(short) U14To1401(short hand, const char* pAddrHost,unsigned int dwSize,
+ unsigned int dw1401, short eSz)
{
short sErr = CheckHandle(hand);
if ((sErr == U14ERR_NOERROR) && dwSize) // TO1401 is a constant
@@ -2707,7 +2707,7 @@ U14API(short) U14To1401(short hand, const char* pAddrHost,DWORD dwSize,
#define file_close(h) close(h)
#define file_seek(h, pos) lseek(h, pos, SEEK_SET)
#define file_read(h, buffer, size) (read(h, buffer, size) == (ssize_t)size)
-static DWORD GetModuleFileName(void* dummy, char* buffer, int max)
+static unsigned int GetModuleFileName(void* dummy, char* buffer, int max)
{
// The following works for Linux systems with a /proc file system.
char szProcPath[32];
@@ -2766,7 +2766,7 @@ U14API(short) U14LdCmd(short hand, const char* command)
// application was run from.
if (!bGotIt) // Still not got it?
{
- DWORD dwLen = GetModuleFileName(NULL, filnam, FNSZ); // Get app path
+ unsigned int dwLen = GetModuleFileName(NULL, filnam, FNSZ); // Get app path
if (dwLen > 0) // and use it as path if found
{
char* pStr = strrchr(filnam, PATHSEP); // Point to last separator
@@ -2821,7 +2821,7 @@ U14API(short) U14LdCmd(short hand, const char* command)
file_seek(iFHandle, sizeof(CMDHEAD));
if (file_read(iFHandle, pMem, (UINT)nComSize))
{
- sErr = U14SetTransArea(hand, 0, (void *)pMem, (DWORD)nComSize, ESZBYTES);
+ sErr = U14SetTransArea(hand, 0, (void *)pMem, (unsigned int)nComSize, ESZBYTES);
if (sErr == U14ERR_NOERROR)
{
sprintf(strcopy, "CLOAD,0,$%X;", (int)nComSize);
@@ -2858,9 +2858,9 @@ U14API(short) U14LdCmd(short hand, const char* command)
** Returns NOERROR code or a long with error in lo word and index of
** command that failed in high word
****************************************************************************/
-U14API(DWORD) U14Ld(short hand, const char* vl, const char* str)
+U14API(unsigned int) U14Ld(short hand, const char* vl, const char* str)
{
- DWORD dwIndex = 0; // index to current command
+ unsigned int dwIndex = 0; // index to current command
long lErr = U14ERR_NOERROR; // what the error was that went wrong
char strcopy[MAXSTRLEN+1]; // stores unmodified str parameter
char szFExt[8]; // The command file extension
@@ -2939,7 +2939,7 @@ U14API(DWORD) U14Ld(short hand, const char* vl, const char* str)
return lErr;
}
else
- return ((dwIndex<<16) | ((DWORD)lErr & 0x0000FFFF));
+ return ((dwIndex<<16) | ((unsigned int)lErr & 0x0000FFFF));
}
// Initialise the library (if not initialised) and return the library version
@@ -2951,7 +2951,7 @@ U14API(int) U14InitLib(void)
int i;
#ifdef _IS_WINDOWS_
int j;
- DWORD dwVersion = GetVersion();
+ unsigned int dwVersion = GetVersion();
bWindows9x = FALSE; // Assume not Win9x
if (dwVersion & 0x80000000) // if not windows NT
@@ -2993,12 +2993,12 @@ U14API(int) U14InitLib(void)
#ifdef _IS_WINDOWS_
#ifndef U14_NOT_DLL
/****************************************************************************
-** FUNCTION: DllMain(HANDLE, DWORD, LPVOID)
+** FUNCTION: DllMain(HANDLE, unsigned int, LPVOID)
** LibMain is called by Windows when the DLL is initialized, Thread Attached,
** and other times. Refer to SDK documentation, as to the different ways this
** may be called.
****************************************************************************/
-INT APIENTRY DllMain(HANDLE hInst, DWORD ul_reason_being_called, LPVOID lpReserved)
+INT APIENTRY DllMain(HANDLE hInst, unsigned int ul_reason_being_called, LPVOID lpReserved)
{
int iRetVal = 1;
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 87e852a..f73287e 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -96,11 +96,19 @@ config COMEDI_SKEL
To compile this driver as a module, choose M here: the module will be
called skel.
+config COMEDI_SSV_DNP
+ tristate "SSV Embedded Systems DIL/Net-PC support"
+ depends on X86_32 || COMPILE_TEST
+ ---help---
+ Enable support for SSV Embedded Systems DIL/Net-PC
+
+ To compile this driver as a module, choose M here: the module will be
+ called ssv_dnp.
+
endif # COMEDI_MISC_DRIVERS
menuconfig COMEDI_ISA_DRIVERS
bool "Comedi ISA and PC/104 drivers"
- depends on ISA
---help---
Enable comedi ISA and PC/104 drivers to be built
@@ -110,15 +118,6 @@ menuconfig COMEDI_ISA_DRIVERS
if COMEDI_ISA_DRIVERS
-config COMEDI_ACL7225B
- tristate "ADlink NuDAQ ACL-7225b and compatibles support"
- ---help---
- Enable support for ADlink NuDAQ ACL-7225b and compatibles,
- ADlink ACL-7225b (acl7225b), ICP P16R16DIO (p16r16dio)
-
- To compile this driver as a module, choose M here: the module will be
- called acl7225b.
-
config COMEDI_PCL711
tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
---help---
@@ -131,19 +130,21 @@ config COMEDI_PCL724
tristate "Advantech PCL-722/724/731 and ADlink ACL-7122/7124/PET-48DIO"
select COMEDI_8255
---help---
- Enable support for Advantech PCL-724, PCL-722, PCL-731 and
- ADlink ACL-7122, ACL-7124, PET-48DIO ISA cards
+ Enable support for ISA and PC/104 based 8255 digital i/o boards. This
+ driver provides a legacy comedi driver wrapper for the generic 8255
+ support driver.
- To compile this driver as a module, choose M here: the module will be
- called pcl724.
-
-config COMEDI_PCL725
- tristate "Advantech PCL-725 and compatible ISA card support"
- ---help---
- Enable support for Advantech PCL-725 and compatible ISA cards.
+ Supported boards include:
+ Advantech PCL-724 24 channels
+ Advantech PCL-722 144 (or 96) channels
+ Advantech PCL-731 48 channels
+ ADlink ACL-7122 144 (or 96) channels
+ ADlink ACL-7124 24 channels
+ ADlink PET-48DIO 48 channels
+ WinSystems PCM-IO48 48 channels (PC/104)
To compile this driver as a module, choose M here: the module will be
- called pcl725.
+ called pcl724.
config COMEDI_PCL726
tristate "Advantech PCL-726 and compatible ISA card support"
@@ -154,10 +155,21 @@ config COMEDI_PCL726
called pcl726.
config COMEDI_PCL730
- tristate "Advantech PCL-730 and ADlink ACL-7130 ISA card support"
+ tristate "Simple Digital I/O board support (8-bit ports)"
---help---
- Enable support for Advantech PCL-730, ICP ISO-730 and ADlink
- ACL-7130 ISA cards
+ Enable support for various simple ISA or PC/104 Digital I/O boards.
+ These boards all use 8-bit I/O ports.
+
+ Advantech PCL-730 isolated - 16 in/16 out ttl - 16 in/16 out
+ ICP ISO-730 isolated - 16 in/16 out ttl - 16 in/16 out
+ ADlink ACL-7130 isolated - 16 in/16 out ttl - 16 in/16 out
+ Advantech PCM-3730 isolated - 8 in/8 out ttl - 16 in/16 out
+ Advantech PCL-725 isolated - 8 in/8 out
+ ICP P8R8-DIO isolated - 8 in/8 out
+ ADlink ACL-7225b isolated - 16 in/16 out
+ ICP P16R16-DIO isolated - 16 in/16 out
+ Advantech PCL-733 isolated - 32 in
+ Advantech PCL-734 isolated - 32 out
To compile this driver as a module, choose M here: the module will be
called pcl730.
@@ -201,14 +213,6 @@ config COMEDI_PCM3724
To compile this driver as a module, choose M here: the module will be
called pcm3724.
-config COMEDI_PCM3730
- tristate "Advantech PCM-3730 and clone PC/104 board support"
- ---help---
- Enable support for Advantech PCM-3730 and clone PC/104 boards
-
- To compile this driver as a module, choose M here: the module will be
- called pcm3730.
-
config COMEDI_AMPLC_DIO200_ISA
tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
select COMEDI_AMPLC_DIO200
@@ -391,6 +395,14 @@ config COMEDI_DMM32AT
To compile this driver as a module, choose M here: the module will be
called dmm32at.
+config COMEDI_UNIOXX5
+ tristate "Fastwel UNIOxx-5 analog and digital io board support"
+ ---help---
+ Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
+
+ To compile this driver as a module, choose M here: the module will be
+ called unioxx5.
+
config COMEDI_FL512
tristate "FL512 ISA card support"
---help---
@@ -417,6 +429,15 @@ config COMEDI_AIO_IIRO_16
To compile this driver as a module, choose M here: the module will be
called aio_iiro_16.
+config COMEDI_II_PCI20KC
+ tristate "Intelligent Instruments PCI-20001C carrier support"
+ ---help---
+ Enable support for Intelligent Instruments PCI-20001C carrier
+ PCI-20001, PCI-20006 and PCI-20341
+
+ To compile this driver as a module, choose M here: the module will be
+ called ii_pci20kc.
+
config COMEDI_C6XDIGIO
tristate "Mechatronic Systems Inc. C6x_DIGIO DSP daughter card support"
---help---
@@ -462,7 +483,6 @@ config COMEDI_NI_AT_AO
config COMEDI_NI_ATMIO
tristate "NI AT-MIO E series ISA-PNP card support"
- depends on ISAPNP
select COMEDI_8255
select COMEDI_NI_TIO
---help---
@@ -475,11 +495,10 @@ config COMEDI_NI_ATMIO
called ni_atmio.
config COMEDI_NI_ATMIO16D
- tristate "NI AT-MIO16/AT-MIO16D series ISA-PNP card support"
- depends on ISAPNP
+ tristate "NI AT-MIO-16/AT-MIO-16D series ISA card support"
select COMEDI_8255
---help---
- Enable support for National Instruments AT-MIO16/AT-MIO16D cards.
+ Enable support for National Instruments AT-MIO-16/AT-MIO-16D cards.
To compile this driver as a module, choose M here: the module will be
called ni_atmio16d.
@@ -487,7 +506,7 @@ config COMEDI_NI_ATMIO16D
config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support"
select COMEDI_NI_LABPC
- depends on VIRT_TO_BUS
+ select COMEDI_NI_LABPC_ISADMA if ISA_DMA_API && VIRT_TO_BUS
---help---
Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
@@ -543,12 +562,19 @@ config COMEDI_POC
tristate "Generic driver for very simple devices"
---help---
Enable generic support for very simple / POC (Piece of Crap) boards,
- Keithley Metrabyte DAC-02 (dac02), Advantech PCL-733 (pcl733) and
- PCL-734 (pcl734)
+ Keithley Metrabyte DAC-02 (dac02).
To compile this driver as a module, choose M here: the module will be
called poc.
+config COMEDI_S526
+ tristate "Sensoray s526 support"
+ ---help---
+ Enable support for Sensoray s526
+
+ To compile this driver as a module, choose M here: the module will be
+ called s526.
+
endif # COMEDI_ISA_DRIVERS
menuconfig COMEDI_PCI_DRIVERS
@@ -846,14 +872,6 @@ config COMEDI_DYNA_PCI10XX
To compile this driver as a module, choose M here: the module will be
called dyna_pci10xx.
-config COMEDI_UNIOXX5
- tristate "Fastwel UNIOxx-5 analog and digital io board support"
- ---help---
- Enable support for Fastwel UNIOxx-5 (analog and digital i/o) boards
-
- To compile this driver as a module, choose M here: the module will be
- called unioxx5.
-
config COMEDI_GSC_HPDI
tristate "General Standards PCI-HPDI32 / PMC-HPDI32 support"
select COMEDI_FC
@@ -873,15 +891,6 @@ config COMEDI_ICP_MULTI
To compile this driver as a module, choose M here: the module will be
called icp_multi.
-config COMEDI_II_PCI20KC
- tristate "Intelligent Instruments PCI-20001C carrier support"
- ---help---
- Enable support for Intelligent Instruments PCI-20001C carrier
- PCI-20001, PCI-20006 and PCI-20341
-
- To compile this driver as a module, choose M here: the module will be
- called ii_pci20kc.
-
config COMEDI_DAQBOARD2000
tristate "IOtech DAQboard/2000 support"
select COMEDI_8255
@@ -1076,14 +1085,6 @@ config COMEDI_RTD520
To compile this driver as a module, choose M here: the module will be
called rtd520.
-config COMEDI_S526
- tristate "Sensoray s526 support"
- ---help---
- Enable support for Sensoray s526
-
- To compile this driver as a module, choose M here: the module will be
- called s526.
-
config COMEDI_S626
tristate "Sensoray 626 support"
select COMEDI_FC
@@ -1093,14 +1094,6 @@ config COMEDI_S626
To compile this driver as a module, choose M here: the module will be
called s626.
-config COMEDI_SSV_DNP
- tristate "SSV Embedded Systems DIL/Net-PC support"
- ---help---
- Enable support for SSV Embedded Systems DIL/Net-PC
-
- To compile this driver as a module, choose M here: the module will be
- called ssv_dnp.
-
config COMEDI_MITE
depends on HAS_DMA
tristate
@@ -1277,6 +1270,9 @@ config COMEDI_NI_LABPC
select COMEDI_8255
select COMEDI_FC
+config COMEDI_NI_LABPC_ISADMA
+ tristate
+
config COMEDI_NI_TIO
tristate
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO
index b10f739..fa8da9a 100644
--- a/drivers/staging/comedi/TODO
+++ b/drivers/staging/comedi/TODO
@@ -9,4 +9,4 @@ TODO:
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
copy:
Ian Abbott <abbotti@mev.co.uk>
- Frank Mori Hess <fmhess@users.sourceforge.net>
+ H Hartley Sweeten <hsweeten@visionengravers.com>
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index 4233605..6bbbe5b 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _COMEDI_H
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index d4be0e6..94b2385f 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -13,12 +13,10 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/vmalloc.h>
+
#include "comedidev.h"
#include "comedi_internal.h"
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index ad208cd..2dfb06a 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -17,11 +17,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#include <linux/uaccess.h>
diff --git a/drivers/staging/comedi/comedi_compat32.h b/drivers/staging/comedi/comedi_compat32.h
index 60cf51c..28e3c30 100644
--- a/drivers/staging/comedi/comedi_compat32.h
+++ b/drivers/staging/comedi/comedi_compat32.h
@@ -17,11 +17,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _COMEDI_COMPAT32_H
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 924c54c..1636c7c 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#undef DEBUG
@@ -31,7 +26,6 @@
#include <linux/sched.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
-#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kmod.h>
@@ -267,7 +261,7 @@ static int resize_async_buffer(struct comedi_device *dev,
/* sysfs attribute files */
-static ssize_t show_max_read_buffer_kb(struct device *csdev,
+static ssize_t max_read_buffer_kb_show(struct device *csdev,
struct device_attribute *attr, char *buf)
{
unsigned int minor = MINOR(csdev->devt);
@@ -288,7 +282,7 @@ static ssize_t show_max_read_buffer_kb(struct device *csdev,
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
-static ssize_t store_max_read_buffer_kb(struct device *csdev,
+static ssize_t max_read_buffer_kb_store(struct device *csdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -319,8 +313,9 @@ static ssize_t store_max_read_buffer_kb(struct device *csdev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(max_read_buffer_kb);
-static ssize_t show_read_buffer_kb(struct device *csdev,
+static ssize_t read_buffer_kb_show(struct device *csdev,
struct device_attribute *attr, char *buf)
{
unsigned int minor = MINOR(csdev->devt);
@@ -341,7 +336,7 @@ static ssize_t show_read_buffer_kb(struct device *csdev,
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
-static ssize_t store_read_buffer_kb(struct device *csdev,
+static ssize_t read_buffer_kb_store(struct device *csdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -372,8 +367,9 @@ static ssize_t store_read_buffer_kb(struct device *csdev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(read_buffer_kb);
-static ssize_t show_max_write_buffer_kb(struct device *csdev,
+static ssize_t max_write_buffer_kb_show(struct device *csdev,
struct device_attribute *attr,
char *buf)
{
@@ -395,7 +391,7 @@ static ssize_t show_max_write_buffer_kb(struct device *csdev,
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
-static ssize_t store_max_write_buffer_kb(struct device *csdev,
+static ssize_t max_write_buffer_kb_store(struct device *csdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -426,8 +422,9 @@ static ssize_t store_max_write_buffer_kb(struct device *csdev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(max_write_buffer_kb);
-static ssize_t show_write_buffer_kb(struct device *csdev,
+static ssize_t write_buffer_kb_show(struct device *csdev,
struct device_attribute *attr, char *buf)
{
unsigned int minor = MINOR(csdev->devt);
@@ -448,7 +445,7 @@ static ssize_t show_write_buffer_kb(struct device *csdev,
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
-static ssize_t store_write_buffer_kb(struct device *csdev,
+static ssize_t write_buffer_kb_store(struct device *csdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -479,18 +476,16 @@ static ssize_t store_write_buffer_kb(struct device *csdev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(write_buffer_kb);
-static struct device_attribute comedi_dev_attrs[] = {
- __ATTR(max_read_buffer_kb, S_IRUGO | S_IWUSR,
- show_max_read_buffer_kb, store_max_read_buffer_kb),
- __ATTR(read_buffer_kb, S_IRUGO | S_IWUSR | S_IWGRP,
- show_read_buffer_kb, store_read_buffer_kb),
- __ATTR(max_write_buffer_kb, S_IRUGO | S_IWUSR,
- show_max_write_buffer_kb, store_max_write_buffer_kb),
- __ATTR(write_buffer_kb, S_IRUGO | S_IWUSR | S_IWGRP,
- show_write_buffer_kb, store_write_buffer_kb),
- __ATTR_NULL
+static struct attribute *comedi_dev_attrs[] = {
+ &dev_attr_max_read_buffer_kb.attr,
+ &dev_attr_read_buffer_kb.attr,
+ &dev_attr_max_write_buffer_kb.attr,
+ &dev_attr_write_buffer_kb.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(comedi_dev);
static void comedi_set_subdevice_runflags(struct comedi_subdevice *s,
unsigned mask, unsigned bits)
@@ -536,6 +531,23 @@ static bool comedi_is_subdevice_idle(struct comedi_subdevice *s)
return (runflags & (SRF_ERROR | SRF_RUNNING)) ? false : true;
}
+/**
+ * comedi_alloc_spriv() - Allocate memory for the subdevice private data.
+ * @s: comedi_subdevice struct
+ * @size: size of the memory to allocate
+ *
+ * This also sets the subdevice runflags to allow the core to automatically
+ * free the private data during the detach.
+ */
+void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size)
+{
+ s->private = kzalloc(size, GFP_KERNEL);
+ if (s->private)
+ comedi_set_subdevice_runflags(s, ~0, SRF_FREE_SPRIV);
+ return s->private;
+}
+EXPORT_SYMBOL_GPL(comedi_alloc_spriv);
+
/*
This function restores a subdevice to an idle state.
*/
@@ -665,7 +677,7 @@ static int do_bufconfig_ioctl(struct comedi_device *dev,
if (copy_from_user(&bc, arg, sizeof(bc)))
return -EFAULT;
- if (bc.subdevice >= dev->n_subdevices || bc.subdevice < 0)
+ if (bc.subdevice >= dev->n_subdevices)
return -EINVAL;
s = &dev->subdevices[bc.subdevice];
@@ -918,7 +930,7 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
if (copy_from_user(&bi, arg, sizeof(bi)))
return -EFAULT;
- if (bi.subdevice >= dev->n_subdevices || bi.subdevice < 0)
+ if (bi.subdevice >= dev->n_subdevices)
return -EINVAL;
s = &dev->subdevices[bi.subdevice];
@@ -1401,22 +1413,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
DPRINTK("subdevice busy\n");
return -EBUSY;
}
- s->busy = file;
/* make sure channel/gain list isn't too long */
if (cmd.chanlist_len > s->len_chanlist) {
DPRINTK("channel/gain list too long %u > %d\n",
cmd.chanlist_len, s->len_chanlist);
- ret = -EINVAL;
- goto cleanup;
+ return -EINVAL;
}
/* make sure channel/gain list isn't too short */
if (cmd.chanlist_len < 1) {
DPRINTK("channel/gain list too short %u < 1\n",
cmd.chanlist_len);
- ret = -EINVAL;
- goto cleanup;
+ return -EINVAL;
}
async->cmd = cmd;
@@ -1426,8 +1435,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
if (!async->cmd.chanlist) {
DPRINTK("allocation failed\n");
- ret = -ENOMEM;
- goto cleanup;
+ return -ENOMEM;
}
if (copy_from_user(async->cmd.chanlist, user_chanlist,
@@ -1479,6 +1487,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
+ /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
+ * comedi_read() or comedi_write() */
+ s->busy = file;
ret = s->do_cmd(dev, s);
if (ret == 0)
return 0;
@@ -1693,6 +1704,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
void *file)
{
struct comedi_subdevice *s;
+ int ret;
if (arg >= dev->n_subdevices)
return -EINVAL;
@@ -1709,7 +1721,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
if (s->busy != file)
return -EBUSY;
- return do_cancel(dev, s);
+ ret = do_cancel(dev, s);
+ if (comedi_get_subdevice_runflags(s) & SRF_USER)
+ wake_up_interruptible(&s->async->wait_head);
+
+ return ret;
}
/*
@@ -2041,11 +2057,13 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
if (!comedi_is_subdevice_running(s)) {
if (count == 0) {
+ mutex_lock(&dev->mutex);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
do_become_nonbusy(dev, s);
+ mutex_unlock(&dev->mutex);
}
break;
}
@@ -2144,11 +2162,13 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
if (n == 0) {
if (!comedi_is_subdevice_running(s)) {
+ mutex_lock(&dev->mutex);
do_become_nonbusy(dev, s);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
+ mutex_unlock(&dev->mutex);
break;
}
if (file->f_flags & O_NONBLOCK) {
@@ -2186,9 +2206,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
buf += n;
break; /* makes device work like a pipe */
}
- if (comedi_is_subdevice_idle(s) &&
- async->buf_read_count - async->buf_write_count == 0) {
- do_become_nonbusy(dev, s);
+ if (comedi_is_subdevice_idle(s)) {
+ mutex_lock(&dev->mutex);
+ if (async->buf_read_count - async->buf_write_count == 0)
+ do_become_nonbusy(dev, s);
+ mutex_unlock(&dev->mutex);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&async->wait_head, &wait);
@@ -2317,9 +2339,6 @@ static int comedi_close(struct inode *inode, struct file *file)
mutex_unlock(&dev->mutex);
- if (file->f_flags & FASYNC)
- comedi_fasync(-1, file, 0);
-
return 0;
}
@@ -2545,7 +2564,7 @@ static int __init comedi_init(void)
return PTR_ERR(comedi_class);
}
- comedi_class->dev_attrs = comedi_dev_attrs;
+ comedi_class->dev_groups = comedi_dev_groups;
/* XXX requires /proc interface */
comedi_proc_init();
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index d5e03e5..fda1a7b 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -24,6 +24,7 @@ extern unsigned int comedi_default_buf_maxsize_kb;
/* drivers.c */
extern struct comedi_driver *comedi_drivers;
+extern struct mutex comedi_drivers_list_lock;
int insn_inval(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *);
diff --git a/drivers/staging/comedi/comedi_pci.c b/drivers/staging/comedi/comedi_pci.c
index 5fad084..abbc0e4 100644
--- a/drivers/staging/comedi/comedi_pci.c
+++ b/drivers/staging/comedi/comedi_pci.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/pci.h>
diff --git a/drivers/staging/comedi/comedi_pcmcia.c b/drivers/staging/comedi/comedi_pcmcia.c
index 453ff3b..9d49d5d 100644
--- a/drivers/staging/comedi/comedi_pcmcia.c
+++ b/drivers/staging/comedi/comedi_pcmcia.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/comedi/comedi_usb.c b/drivers/staging/comedi/comedi_usb.c
index 9d9716a..13f18be 100644
--- a/drivers/staging/comedi/comedi_usb.c
+++ b/drivers/staging/comedi/comedi_usb.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/usb.h>
@@ -35,6 +31,18 @@ struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev)
EXPORT_SYMBOL_GPL(comedi_to_usb_interface);
/**
+ * comedi_to_usb_dev() - comedi_device pointer to usb_device pointer.
+ * @dev: comedi_device struct
+ */
+struct usb_device *comedi_to_usb_dev(struct comedi_device *dev)
+{
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+
+ return intf ? interface_to_usbdev(intf) : NULL;
+}
+EXPORT_SYMBOL_GPL(comedi_to_usb_dev);
+
+/**
* comedi_usb_auto_config() - Configure/probe a comedi USB driver.
* @intf: usb_interface struct
* @driver: comedi_driver struct
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index cdd4720..2e19f65 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -14,32 +14,12 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _COMEDIDEV_H
#define _COMEDIDEV_H
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/kdev_t.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
#include <linux/dma-mapping.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/timer.h>
#include "comedi.h"
@@ -270,11 +250,14 @@ enum subdevice_runflags {
/* indicates an COMEDI_CB_ERROR event has occurred since the last
* command was started */
SRF_ERROR = 0x00000004,
- SRF_RUNNING = 0x08000000
+ SRF_RUNNING = 0x08000000,
+ SRF_FREE_SPRIV = 0x80000000, /* free s->private on detach */
};
bool comedi_is_subdevice_running(struct comedi_subdevice *s);
+void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size);
+
int comedi_check_chanlist(struct comedi_subdevice *s,
int n,
unsigned int *chanlist);
@@ -312,6 +295,18 @@ struct comedi_lrange {
struct comedi_krange range[GCC_ZERO_LENGTH_ARRAY];
};
+static inline bool comedi_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min < 0;
+}
+
+static inline bool comedi_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min >= 0;
+}
+
/* some silly little inline functions */
static inline unsigned int bytes_per_sample(const struct comedi_subdevice *subd)
@@ -347,9 +342,19 @@ void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
/* drivers.c - general comedi driver functions */
+int comedi_dio_insn_config(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *data,
+ unsigned int mask);
+
+void *comedi_alloc_devpriv(struct comedi_device *, size_t);
int comedi_alloc_subdevices(struct comedi_device *, int);
-void comedi_spriv_free(struct comedi_device *, int subdev_num);
+int comedi_load_firmware(struct comedi_device *, struct device *,
+ const char *name,
+ int (*cb)(struct comedi_device *,
+ const u8 *data, size_t size,
+ unsigned long context),
+ unsigned long context);
int __comedi_request_region(struct comedi_device *,
unsigned long start, unsigned long len);
@@ -362,7 +367,7 @@ int comedi_auto_config(struct device *, struct comedi_driver *,
void comedi_auto_unconfig(struct device *);
int comedi_driver_register(struct comedi_driver *);
-int comedi_driver_unregister(struct comedi_driver *);
+void comedi_driver_unregister(struct comedi_driver *);
/**
* module_comedi_driver() - Helper macro for registering a comedi driver
@@ -385,7 +390,6 @@ int comedi_driver_unregister(struct comedi_driver *);
*/
#define PCI_VENDOR_ID_KOLTER 0x1001
#define PCI_VENDOR_ID_ICP 0x104c
-#define PCI_VENDOR_ID_AMCC 0x10e8
#define PCI_VENDOR_ID_DT 0x1116
#define PCI_VENDOR_ID_IOTECH 0x1616
#define PCI_VENDOR_ID_CONTEC 0x1221
@@ -489,6 +493,7 @@ struct usb_driver;
struct usb_interface;
struct usb_interface *comedi_to_usb_interface(struct comedi_device *);
+struct usb_device *comedi_to_usb_dev(struct comedi_device *);
int comedi_usb_auto_config(struct usb_interface *, struct comedi_driver *,
unsigned long context);
diff --git a/drivers/staging/comedi/comedilib.h b/drivers/staging/comedi/comedilib.h
index ca92c43..56baf85 100644
--- a/drivers/staging/comedi/comedilib.h
+++ b/drivers/staging/comedi/comedilib.h
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _LINUX_COMEDILIB_H
@@ -26,10 +21,13 @@
struct comedi_device *comedi_open(const char *path);
int comedi_close(struct comedi_device *dev);
+int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int *io);
int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
unsigned int chan, unsigned int io);
-int comedi_dio_bitfield(struct comedi_device *dev, unsigned int subdev,
- unsigned int mask, unsigned int *bits);
+int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
+ unsigned int mask, unsigned int *bits,
+ unsigned int base_channel);
int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd);
int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 06d190f..317a821 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#include <linux/device.h>
@@ -28,7 +23,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
-#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -38,11 +32,13 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/firmware.h>
#include "comedidev.h"
#include "comedi_internal.h"
struct comedi_driver *comedi_drivers;
+DEFINE_MUTEX(comedi_drivers_list_lock);
int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev)
{
@@ -61,6 +57,18 @@ static void comedi_clear_hw_dev(struct comedi_device *dev)
dev->hw_dev = NULL;
}
+/**
+ * comedi_alloc_devpriv() - Allocate memory for the device private data.
+ * @dev: comedi_device struct
+ * @size: size of the memory to allocate
+ */
+void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size)
+{
+ dev->private = kzalloc(size, GFP_KERNEL);
+ return dev->private;
+}
+EXPORT_SYMBOL_GPL(comedi_alloc_devpriv);
+
int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
{
struct comedi_subdevice *s;
@@ -87,18 +95,6 @@ int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
}
EXPORT_SYMBOL_GPL(comedi_alloc_subdevices);
-void comedi_spriv_free(struct comedi_device *dev, int subdev_num)
-{
- struct comedi_subdevice *s;
-
- if (dev->subdevices && subdev_num < dev->n_subdevices) {
- s = &dev->subdevices[subdev_num];
- kfree(s->private);
- s->private = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(comedi_spriv_free);
-
static void cleanup_device(struct comedi_device *dev)
{
int i;
@@ -107,6 +103,8 @@ static void cleanup_device(struct comedi_device *dev)
if (dev->subdevices) {
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
+ if (s->runflags & SRF_FREE_SPRIV)
+ kfree(s->private);
comedi_free_subdevice_minor(s);
if (s->async) {
comedi_buf_alloc(dev, s, 0);
@@ -152,6 +150,46 @@ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
return -EINVAL;
}
+/**
+ * comedi_dio_insn_config() - boilerplate (*insn_config) for DIO subdevices.
+ * @dev: comedi_device struct
+ * @s: comedi_subdevice struct
+ * @insn: comedi_insn struct
+ * @data: parameters for the @insn
+ * @mask: io_bits mask for grouped channels
+ */
+int comedi_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data,
+ unsigned int mask)
+{
+ unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
+
+ if (!mask)
+ mask = chan_mask;
+
+ switch (data[0]) {
+ case INSN_CONFIG_DIO_INPUT:
+ s->io_bits &= ~mask;
+ break;
+
+ case INSN_CONFIG_DIO_OUTPUT:
+ s->io_bits |= mask;
+ break;
+
+ case INSN_CONFIG_DIO_QUERY:
+ data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
+ return insn->n;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
+
static int insn_rw_emulate_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -352,6 +390,38 @@ static void comedi_report_boards(struct comedi_driver *driv)
}
/**
+ * comedi_load_firmware() - Request and load firmware for a device.
+ * @dev: comedi_device struct
+ * @hw_device: device struct for the comedi_device
+ * @name: the name of the firmware image
+ * @cb: callback to the upload the firmware image
+ * @context: private context from the driver
+ */
+int comedi_load_firmware(struct comedi_device *dev,
+ struct device *device,
+ const char *name,
+ int (*cb)(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context),
+ unsigned long context)
+{
+ const struct firmware *fw;
+ int ret;
+
+ if (!cb)
+ return -EINVAL;
+
+ ret = request_firmware(&fw, name, device);
+ if (ret == 0) {
+ ret = cb(dev, fw->data, fw->size, context);
+ release_firmware(fw);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(comedi_load_firmware);
+
+/**
* __comedi_request_region() - Request an I/O reqion for a legacy driver.
* @dev: comedi_device struct
* @start: base address of the I/O reqion
@@ -424,6 +494,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (dev->attached)
return -EBUSY;
+ mutex_lock(&comedi_drivers_list_lock);
for (driv = comedi_drivers; driv; driv = driv->next) {
if (!try_module_get(driv->module))
continue;
@@ -444,7 +515,8 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
comedi_report_boards(driv);
module_put(driv->module);
}
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (driv->attach == NULL) {
/* driver does not support manual configuration */
@@ -452,7 +524,8 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
"driver '%s' does not support attach using comedi_config\n",
driv->driver_name);
module_put(driv->module);
- return -ENOSYS;
+ ret = -ENOSYS;
+ goto out;
}
/* initialize dev->driver here so
* comedi_error() can be called from attach */
@@ -464,9 +537,11 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
ret = comedi_device_postconfig(dev);
if (ret < 0) {
comedi_device_detach(dev);
- module_put(dev->driver->module);
+ module_put(driv->module);
}
/* On success, the driver module count has been incremented. */
+out:
+ mutex_unlock(&comedi_drivers_list_lock);
return ret;
}
@@ -523,18 +598,34 @@ EXPORT_SYMBOL_GPL(comedi_auto_unconfig);
int comedi_driver_register(struct comedi_driver *driver)
{
+ mutex_lock(&comedi_drivers_list_lock);
driver->next = comedi_drivers;
comedi_drivers = driver;
+ mutex_unlock(&comedi_drivers_list_lock);
return 0;
}
EXPORT_SYMBOL_GPL(comedi_driver_register);
-int comedi_driver_unregister(struct comedi_driver *driver)
+void comedi_driver_unregister(struct comedi_driver *driver)
{
struct comedi_driver *prev;
int i;
+ /* unlink the driver */
+ mutex_lock(&comedi_drivers_list_lock);
+ if (comedi_drivers == driver) {
+ comedi_drivers = driver->next;
+ } else {
+ for (prev = comedi_drivers; prev->next; prev = prev->next) {
+ if (prev->next == driver) {
+ prev->next = driver->next;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&comedi_drivers_list_lock);
+
/* check for devices using this driver */
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device *dev = comedi_dev_from_minor(i);
@@ -552,18 +643,5 @@ int comedi_driver_unregister(struct comedi_driver *driver)
}
mutex_unlock(&dev->mutex);
}
-
- if (comedi_drivers == driver) {
- comedi_drivers = driver->next;
- return 0;
- }
-
- for (prev = comedi_drivers; prev->next; prev = prev->next) {
- if (prev->next == driver) {
- prev->next = driver->next;
- return 0;
- }
- }
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(comedi_driver_unregister);
diff --git a/drivers/staging/comedi/drivers/8253.h b/drivers/staging/comedi/drivers/8253.h
index 429e0d6..3abedcd 100644
--- a/drivers/staging/comedi/drivers/8253.h
+++ b/drivers/staging/comedi/drivers/8253.h
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _8253_H
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
index 1d48aa6..2f070fd 100644
--- a/drivers/staging/comedi/drivers/8255.c
+++ b/drivers/staging/comedi/drivers/8255.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: 8255
@@ -78,11 +73,9 @@ I/O port base address can be found in the output of 'lspci -v'.
will copy the latched value to a Comedi buffer.
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-#include <linux/slab.h>
-
#include "comedi_fc.h"
#include "8255.h"
@@ -191,39 +184,29 @@ static void subdev_8255_do_config(struct comedi_device *dev,
static int subdev_8255_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
- unsigned int bits;
+ int ret;
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x0000ff)
- bits = 0x0000ff;
- else if (mask & 0x00ff00)
- bits = 0x00ff00;
- else if (mask & 0x0f0000)
- bits = 0x0f0000;
+ if (chan < 8)
+ mask = 0x0000ff;
+ else if (chan < 16)
+ mask = 0x00ff00;
+ else if (chan < 20)
+ mask = 0x0f0000;
else
- bits = 0xf00000;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ mask = 0xf00000;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
subdev_8255_do_config(dev, s);
- return 1;
+ return insn->n;
}
static int subdev_8255_cmdtest(struct comedi_device *dev,
@@ -290,15 +273,13 @@ int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
{
struct subdev_8255_private *spriv;
- spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
+ spriv = comedi_alloc_spriv(s, sizeof(*spriv));
if (!spriv)
return -ENOMEM;
spriv->iobase = iobase;
spriv->io = io ? io : subdev_8255_io;
- s->private = spriv;
-
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 24;
@@ -391,7 +372,6 @@ static void dev_8255_detach(struct comedi_device *dev)
spriv = s->private;
release_region(spriv->iobase, _8255_SIZE);
}
- comedi_spriv_free(dev, i);
}
}
diff --git a/drivers/staging/comedi/drivers/8255.h b/drivers/staging/comedi/drivers/8255.h
index 0f6e749..4f16ea7 100644
--- a/drivers/staging/comedi/drivers/8255.h
+++ b/drivers/staging/comedi/drivers/8255.h
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _8255_H
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 76dec96..432e3f9 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -19,10 +19,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -54,6 +50,7 @@ Interrupt support for these boards is also not currently supported.
Configuration Options: not applicable, uses PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -190,10 +187,9 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
@@ -242,10 +238,7 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
static void pci_8255_detach(struct comedi_device *dev)
{
struct pci_8255_private *devpriv = dev->private;
- int i;
- for (i = 0; i < dev->n_subdevices; i++)
- comedi_spriv_free(dev, i);
if (devpriv && devpriv->mmio_base)
iounmap(devpriv->mmio_base);
comedi_pci_disable(dev);
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 57e984f..94cbd26 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -11,19 +11,16 @@ obj-$(CONFIG_COMEDI_SERIAL2002) += serial2002.o
obj-$(CONFIG_COMEDI_SKEL) += skel.o
# Comedi ISA drivers
-obj-$(CONFIG_COMEDI_ACL7225B) += acl7225b.o
obj-$(CONFIG_COMEDI_AMPLC_DIO200_ISA) += amplc_dio200.o
obj-$(CONFIG_COMEDI_AMPLC_PC263_ISA) += amplc_pc263.o
obj-$(CONFIG_COMEDI_PCL711) += pcl711.o
obj-$(CONFIG_COMEDI_PCL724) += pcl724.o
-obj-$(CONFIG_COMEDI_PCL725) += pcl725.o
obj-$(CONFIG_COMEDI_PCL726) += pcl726.o
obj-$(CONFIG_COMEDI_PCL730) += pcl730.o
obj-$(CONFIG_COMEDI_PCL812) += pcl812.o
obj-$(CONFIG_COMEDI_PCL816) += pcl816.o
obj-$(CONFIG_COMEDI_PCL818) += pcl818.o
obj-$(CONFIG_COMEDI_PCM3724) += pcm3724.o
-obj-$(CONFIG_COMEDI_PCM3730) += pcm3730.o
obj-$(CONFIG_COMEDI_RTI800) += rti800.o
obj-$(CONFIG_COMEDI_RTI802) += rti802.o
obj-$(CONFIG_COMEDI_DAS16M1) += das16m1.o
@@ -42,6 +39,7 @@ obj-$(CONFIG_COMEDI_DMM32AT) += dmm32at.o
obj-$(CONFIG_COMEDI_FL512) += fl512.o
obj-$(CONFIG_COMEDI_AIO_AIO12_8) += aio_aio12_8.o
obj-$(CONFIG_COMEDI_AIO_IIRO_16) += aio_iiro_16.o
+obj-$(CONFIG_COMEDI_II_PCI20KC) += ii_pci20kc.o
obj-$(CONFIG_COMEDI_C6XDIGIO) += c6xdigio.o
obj-$(CONFIG_COMEDI_MPC624) += mpc624.o
obj-$(CONFIG_COMEDI_ADQ12B) += adq12b.o
@@ -55,6 +53,7 @@ obj-$(CONFIG_COMEDI_PCMMIO) += pcmmio.o
obj-$(CONFIG_COMEDI_PCMUIO) += pcmuio.o
obj-$(CONFIG_COMEDI_MULTIQ3) += multiq3.o
obj-$(CONFIG_COMEDI_POC) += poc.o
+obj-$(CONFIG_COMEDI_S526) += s526.o
# Comedi PCI drivers
obj-$(CONFIG_COMEDI_8255_PCI) += 8255_pci.o
@@ -91,7 +90,6 @@ obj-$(CONFIG_COMEDI_DYNA_PCI10XX) += dyna_pci10xx.o
obj-$(CONFIG_COMEDI_UNIOXX5) += unioxx5.o
obj-$(CONFIG_COMEDI_GSC_HPDI) += gsc_hpdi.o
obj-$(CONFIG_COMEDI_ICP_MULTI) += icp_multi.o
-obj-$(CONFIG_COMEDI_II_PCI20KC) += ii_pci20kc.o
obj-$(CONFIG_COMEDI_DAQBOARD2000) += daqboard2000.o
obj-$(CONFIG_COMEDI_JR3_PCI) += jr3_pci.o
obj-$(CONFIG_COMEDI_KE_COUNTER) += ke_counter.o
@@ -110,7 +108,6 @@ obj-$(CONFIG_COMEDI_NI_LABPC_PCI) += ni_labpc_pci.o
obj-$(CONFIG_COMEDI_NI_PCIDIO) += ni_pcidio.o
obj-$(CONFIG_COMEDI_NI_PCIMIO) += ni_pcimio.o
obj-$(CONFIG_COMEDI_RTD520) += rtd520.o
-obj-$(CONFIG_COMEDI_S526) += s526.o
obj-$(CONFIG_COMEDI_S626) += s626.o
obj-$(CONFIG_COMEDI_SSV_DNP) += ssv_dnp.o
@@ -135,6 +132,7 @@ obj-$(CONFIG_COMEDI_MITE) += mite.o
obj-$(CONFIG_COMEDI_NI_TIO) += ni_tio.o
obj-$(CONFIG_COMEDI_NI_TIOCMD) += ni_tiocmd.o
obj-$(CONFIG_COMEDI_NI_LABPC) += ni_labpc.o
+obj-$(CONFIG_COMEDI_NI_LABPC_ISADMA) += ni_labpc_isadma.o
obj-$(CONFIG_COMEDI_8255) += 8255.o
obj-$(CONFIG_COMEDI_AMPLC_DIO200) += amplc_dio200_common.o
diff --git a/drivers/staging/comedi/drivers/acl7225b.c b/drivers/staging/comedi/drivers/acl7225b.c
deleted file mode 100644
index 9e2c7ae..0000000
--- a/drivers/staging/comedi/drivers/acl7225b.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * comedi/drivers/acl7225b.c
- * Driver for Adlink NuDAQ ACL-7225b and clones
- * José Luis Sánchez
- */
-/*
-Driver: acl7225b
-Description: Adlink NuDAQ ACL-7225b & compatibles
-Author: José Luis Sánchez (jsanchezv@teleline.es)
-Status: testing
-Devices: [Adlink] ACL-7225b (acl7225b), [ICP] P16R16DIO (p16r16dio)
-*/
-
-#include "../comedidev.h"
-
-#include <linux/ioport.h>
-
-#define ACL7225_RIO_LO 0 /* Relays input/output low byte (R0-R7) */
-#define ACL7225_RIO_HI 1 /* Relays input/output high byte (R8-R15) */
-#define ACL7225_DI_LO 2 /* Digital input low byte (DI0-DI7) */
-#define ACL7225_DI_HI 3 /* Digital input high byte (DI8-DI15) */
-
-struct acl7225b_boardinfo {
- const char *name;
- int io_range;
-};
-
-static const struct acl7225b_boardinfo acl7225b_boards[] = {
- {
- .name = "acl7225b",
- .io_range = 8, /* only 4 are used */
- }, {
- .name = "p16r16dio",
- .io_range = 4,
- },
-};
-
-static int acl7225b_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long reg = (unsigned long)s->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
- if (mask & 0x00ff)
- outb(s->state & 0xff, dev->iobase + reg);
- if (mask & 0xff00)
- outb((s->state >> 8), dev->iobase + reg + 1);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int acl7225b_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned long reg = (unsigned long)s->private;
-
- data[1] = inb(dev->iobase + reg) |
- (inb(dev->iobase + reg + 1) << 8);
-
- return insn->n;
-}
-
-static int acl7225b_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- const struct acl7225b_boardinfo *board = comedi_board(dev);
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], board->io_range);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 3);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* Relays outputs */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 1;
- s->n_chan = 16;
- s->insn_bits = acl7225b_do_insn_bits;
- s->range_table = &range_digital;
- s->private = (void *)ACL7225_RIO_LO;
-
- s = &dev->subdevices[1];
- /* Relays status */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->maxdata = 1;
- s->n_chan = 16;
- s->insn_bits = acl7225b_di_insn_bits;
- s->range_table = &range_digital;
- s->private = (void *)ACL7225_RIO_LO;
-
- s = &dev->subdevices[2];
- /* Isolated digital inputs */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->maxdata = 1;
- s->n_chan = 16;
- s->insn_bits = acl7225b_di_insn_bits;
- s->range_table = &range_digital;
- s->private = (void *)ACL7225_DI_LO;
-
- return 0;
-}
-
-static struct comedi_driver acl7225b_driver = {
- .driver_name = "acl7225b",
- .module = THIS_MODULE,
- .attach = acl7225b_attach,
- .detach = comedi_legacy_detach,
- .board_name = &acl7225b_boards[0].name,
- .num_names = ARRAY_SIZE(acl7225b_boards),
- .offset = sizeof(struct acl7225b_boardinfo),
-};
-module_comedi_driver(acl7225b_driver);
-
-MODULE_DESCRIPTION("Comedi: NuDAQ ACL-7225B, 16 Relay & 16 Isolated DI Card");
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c
deleted file mode 100644
index d070208..0000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c
+++ /dev/null
@@ -1,1068 +0,0 @@
-/*
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-/*
- | Description : APCI-1710 82X54 timer module |
-*/
-
-#define APCI1710_PCI_BUS_CLOCK 0
-#define APCI1710_FRONT_CONNECTOR_INPUT 1
-#define APCI1710_TIMER_READVALUE 0
-#define APCI1710_TIMER_GETOUTPUTLEVEL 1
-#define APCI1710_TIMER_GETPROGRESSSTATUS 2
-#define APCI1710_TIMER_WRITEVALUE 3
-
-#define APCI1710_TIMER_READINTERRUPT 1
-#define APCI1710_TIMER_READALLTIMER 2
-
-#ifndef APCI1710_10MHZ
-#define APCI1710_10MHZ 10
-#endif
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitTimer |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| unsigned char_ b_TimerMode, |
-| ULONG_ ul_ReloadValue, |
-| unsigned char_ b_InputClockSelection, |
-| unsigned char_ b_InputClockLevel, |
-| unsigned char_ b_OutputLevel, |
-| unsigned char_ b_HardwareGateLevel)
-int i_InsnConfig_InitTimer(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data)
-|
-+----------------------------------------------------------------------------+
-| Task : Configure the Timer (b_TimerNbr) operating mode |
-| (b_TimerMode) from selected module (b_ModulNbr). |
-| You must calling this function be for you call any |
-| other function witch access of the timer. |
-| |
-| |
-| Timer mode description table |
-| |
-|+--------+-----------------------------+--------------+--------------------+|
-||Selected+ Mode description +u_ReloadValue | Hardware gate input||
-|| mode | | description | action ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |Mode 0 is typically used | | ||
-|| |for event counting. After | | ||
-|| |the initialisation, OUT | | ||
-|| |is initially low, and | | ||
-|| 0 |will remain low until the |Start counting| Hardware gate ||
-|| |counter reaches zero. | value | ||
-|| |OUT then goes high and | | ||
-|| |remains high until a new | | ||
-|| |count is written. See | | ||
-|| |"i_APCI1710_WriteTimerValue" | | ||
-|| |function. | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |Mode 1 is similar to mode 0 | | ||
-|| |except for the gate input | | ||
-|| 1 |action. The gate input is not|Start counting| Hardware trigger ||
-|| |used for enabled or disabled | value | ||
-|| |the timer. | | ||
-|| |The gate input is used for | | ||
-|| |triggered the timer. | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |This mode functions like a | | ||
-|| |divide-by-ul_ReloadValue | | ||
-|| |counter. It is typically used| | ||
-|| |to generate a real time clock| | ||
-|| |interrupt. OUT will initially| | ||
-|| 2 |be high after the | Division | Hardware gate ||
-|| |initialisation. When the | factor | ||
-|| |initial count has decremented| | ||
-|| |to 1, OUT goes low for one | | ||
-|| |CLK pule. OUT then goes high | | ||
-|| |again, the counter reloads | | ||
-|| |the initial count | | ||
-|| |(ul_ReloadValue) and the | | ||
-|| |process is repeated. | | ||
-|| |This action can generated a | | ||
-|| |interrupt. See function | | ||
-|| |"i_APCI1710_SetBoardInt- | | ||
-|| |RoutineX" | | ||
-|| |and "i_APCI1710_EnableTimer" | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |Mode 3 is typically used for | | ||
-|| |baud rate generation. This | | ||
-|| |mode is similar to mode 2 | | ||
-|| |except for the duty cycle of | | ||
-|| 3 |OUT. OUT will initially be | Division | Hardware gate ||
-|| |high after the initialisation| factor | ||
-|| |When half the initial count | | ||
-|| |(ul_ReloadValue) has expired,| | ||
-|| |OUT goes low for the | | ||
-|| |remainder of the count. The | | ||
-|| |mode is periodic; the | | ||
-|| |sequence above is repeated | | ||
-|| |indefinitely. | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |OUT will be initially high | | ||
-|| |after the initialisation. | | ||
-|| |When the initial count | | ||
-|| 4 |expires OUT will go low for |Start counting| Hardware gate ||
-|| |one CLK pulse and then go | value | ||
-|| |high again. | | ||
-|| |The counting sequences is | | ||
-|| |triggered by writing a new | | ||
-|| |value. See | | ||
-|| |"i_APCI1710_WriteTimerValue" | | ||
-|| |function. If a new count is | | ||
-|| |written during counting, | | ||
-|| |it will be loaded on the | | ||
-|| |next CLK pulse | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |Mode 5 is similar to mode 4 | | ||
-|| |except for the gate input | | ||
-|| |action. The gate input is not| | ||
-|| 5 |used for enabled or disabled |Start counting| Hardware trigger ||
-|| |the timer. The gate input is | value | ||
-|| |used for triggered the timer.| | ||
-|+--------+-----------------------------+--------------+--------------------+|
-| |
-| |
-| |
-| Input clock selection table |
-| |
-| +--------------------------------+------------------------------------+ |
-| | b_InputClockSelection | Description | |
-| | parameter | | |
-| +--------------------------------+------------------------------------+ |
-| | APCI1710_PCI_BUS_CLOCK | For the timer input clock, the PCI | |
-| | | bus clock / 4 is used. This PCI bus| |
-| | | clock can be 30MHz or 33MHz. For | |
-| | | Timer 0 only this selection are | |
-| | | available. | |
-| +--------------------------------+------------------------------------+ |
-| | APCI1710_ FRONT_CONNECTOR_INPUT| Of the front connector you have the| |
-| | | possibility to inject a input clock| |
-| | | for Timer 1 or Timer 2. The source | |
-| | | from this clock can eat the output | |
-| | | clock from Timer 0 or any other | |
-| | | clock source. | |
-| +--------------------------------+------------------------------------+ |
-| |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to |
-| configure (0 to 2) |
-| unsigned char_ b_TimerMode : Timer mode selection |
-| (0 to 5) |
-| 0: Interrupt on terminal|
-| count |
-| 1: Hardware |
-| retriggerable one- |
-| shot |
-| 2: Rate generator |
-| 3: Square wave mode |
-| 4: Software triggered |
-| strobe |
-| 5: Hardware triggered |
-| strobe |
-| See timer mode |
-| description table. |
-| ULONG_ ul_ReloadValue : Start counting value |
-| or division factor |
-| See timer mode |
-| description table. |
-| unsigned char_ b_InputClockSelection : Selection from input |
-| timer clock. |
-| See input clock |
-| selection table. |
-| unsigned char_ b_InputClockLevel : Selection from input |
-| clock level. |
-| 0 : Low active |
-| (Input inverted) |
-| 1 : High active |
-| unsigned char_ b_OutputLevel, : Selection from output |
-| clock level. |
-| 0 : Low active |
-| 1 : High active |
-| (Output inverted) |
-| unsigned char_ b_HardwareGateLevel : Selection from |
-| hardware gate level. |
-| 0 : Low active |
-| (Input inverted) |
-| 1 : High active |
-| If you will not used |
-| the hardware gate set |
-| this value to 0.
-|b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec);
- b_TimerMode = (unsigned char) data[0];
- ul_ReloadValue = (unsigned int) data[1];
- b_InputClockSelection =(unsigned char) data[2];
- b_InputClockLevel =(unsigned char) data[3];
- b_OutputLevel =(unsigned char) data[4];
- b_HardwareGateLevel =(unsigned char) data[5];
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer mode selection is wrong |
-| -6: Input timer clock selection is wrong |
-| -7: Selection from input clock level is wrong |
-| -8: Selection from output clock level is wrong |
-| -9: Selection from hardware gate level is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnConfigInitTimer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr;
- unsigned char b_TimerNbr;
- unsigned char b_TimerMode;
- unsigned int ul_ReloadValue;
- unsigned char b_InputClockSelection;
- unsigned char b_InputClockLevel;
- unsigned char b_OutputLevel;
- unsigned char b_HardwareGateLevel;
-
- /* BEGIN JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
- unsigned int dw_Test = 0;
- /* END JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec);
- b_TimerMode = (unsigned char) data[0];
- ul_ReloadValue = (unsigned int) data[1];
- b_InputClockSelection = (unsigned char) data[2];
- b_InputClockLevel = (unsigned char) data[3];
- b_OutputLevel = (unsigned char) data[4];
- b_HardwareGateLevel = (unsigned char) data[5];
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
-
- if (b_TimerNbr <= 2) {
- /* Test the timer mode */
- if (b_TimerMode <= 5) {
- /* BEGIN JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
- /* Test te imput clock selection */
- /*
- if (((b_TimerNbr == 0) && (b_InputClockSelection == 0)) ||
- ((b_TimerNbr != 0) && ((b_InputClockSelection == 0) || (b_InputClockSelection == 1))))
- */
-
- if (((b_TimerNbr == 0) &&
- (b_InputClockSelection == APCI1710_PCI_BUS_CLOCK)) ||
- ((b_TimerNbr == 0) &&
- (b_InputClockSelection == APCI1710_10MHZ)) ||
- ((b_TimerNbr != 0) &&
- ((b_InputClockSelection == APCI1710_PCI_BUS_CLOCK) ||
- (b_InputClockSelection == APCI1710_FRONT_CONNECTOR_INPUT) ||
- (b_InputClockSelection == APCI1710_10MHZ)))) {
- /* BEGIN JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
- if (((b_InputClockSelection == APCI1710_10MHZ) &&
- ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0x0000FFFFUL) >= 0x3131)) ||
- (b_InputClockSelection != APCI1710_10MHZ)) {
- /* END JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
- /* Test the input clock level selection */
-
- if ((b_InputClockLevel == 0) ||
- (b_InputClockLevel == 1)) {
- /* Test the output clock level selection */
- if ((b_OutputLevel == 0) || (b_OutputLevel == 1)) {
- /* Test the hardware gate level selection */
- if ((b_HardwareGateLevel == 0) || (b_HardwareGateLevel == 1)) {
- /* BEGIN JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- /* Test if version > 1.1 and clock selection = 10MHz */
- if ((b_InputClockSelection == APCI1710_10MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0x0000FFFFUL) > 0x3131)) {
- /* Test if 40MHz quartz on board */
- dw_Test = inl(devpriv->s_BoardInfos.ui_Address + (16 + (b_TimerNbr * 4) + (64 * b_ModulNbr)));
-
- dw_Test = (dw_Test >> 16) & 1;
- } else {
- dw_Test = 1;
- }
-
- /* Test if detection OK */
- if (dw_Test == 1) {
- /* END JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- /* Initialisation OK */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init = 1;
-
- /* Save the input clock selection */
- devpriv-> s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_InputClockSelection = b_InputClockSelection;
-
- /* Save the input clock level */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_InputClockLevel = ~b_InputClockLevel & 1;
-
- /* Save the output level */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_OutputLevel = ~b_OutputLevel & 1;
-
- /* Save the gate level */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_HardwareGateLevel = b_HardwareGateLevel;
-
- /* Set the configuration word and disable the timer */
- /* BEGIN JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- /*
- devpriv->s_ModuleInfo [b_ModulNbr].
- s_82X54ModuleInfo.
- s_82X54TimerInfo [b_TimerNbr].
- dw_ConfigurationWord = (unsigned int) (((b_HardwareGateLevel << 0) & 0x1) |
- ((b_InputClockLevel << 1) & 0x2) |
- (((~b_OutputLevel & 1) << 2) & 0x4) |
- ((b_InputClockSelection << 4) & 0x10));
- */
- /* Test if 10MHz selected */
- if (b_InputClockSelection == APCI1710_10MHZ) {
- b_InputClockSelection = 2;
- }
-
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord = (unsigned int)(((b_HardwareGateLevel << 0) & 0x1) | ((b_InputClockLevel << 1) & 0x2) | (((~b_OutputLevel & 1) << 2) & 0x4) | ((b_InputClockSelection << 4) & 0x30));
- /* END JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Initialise the 82X54 Timer */
- outl((unsigned int) b_TimerMode, devpriv->s_BoardInfos.ui_Address + 16 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Write the reload value */
- outl(ul_ReloadValue, devpriv->s_BoardInfos.ui_Address + 0 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- /* BEGIN JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- } /* if (dw_Test == 1) */
- else {
- /* Input timer clock selection is wrong */
- i_ReturnValue = -6;
- } /* if (dw_Test == 1) */
- /* END JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- } /* if ((b_HardwareGateLevel == 0) || (b_HardwareGateLevel == 1)) */
- else {
- /* Selection from hardware gate level is wrong */
- DPRINTK("Selection from hardware gate level is wrong\n");
- i_ReturnValue = -9;
- } /* if ((b_HardwareGateLevel == 0) || (b_HardwareGateLevel == 1)) */
- } /* if ((b_OutputLevel == 0) || (b_OutputLevel == 1)) */
- else {
- /* Selection from output clock level is wrong */
- DPRINTK("Selection from output clock level is wrong\n");
- i_ReturnValue = -8;
- } /* if ((b_OutputLevel == 0) || (b_OutputLevel == 1)) */
- } /* if ((b_InputClockLevel == 0) || (b_InputClockLevel == 1)) */
- else {
- /* Selection from input clock level is wrong */
- DPRINTK("Selection from input clock level is wrong\n");
- i_ReturnValue = -7;
- } /* if ((b_InputClockLevel == 0) || (b_InputClockLevel == 1)) */
- } else {
- /* Input timer clock selection is wrong */
- DPRINTK("Input timer clock selection is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /* Input timer clock selection is wrong */
- DPRINTK("Input timer clock selection is wrong\n");
- i_ReturnValue = -6;
- }
- } /* if ((b_TimerMode >= 0) && (b_TimerMode <= 5)) */
- else {
- /* Timer mode selection is wrong */
- DPRINTK("Timer mode selection is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_TimerMode >= 0) && (b_TimerMode <= 5)) */
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableTimer |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| unsigned char_ b_InterruptEnable)
-int i_APCI1710_InsnWriteEnableDisableTimer(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Enable OR Disable the Timer (b_TimerNbr) from selected module |
-| (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitTimer" function be for you call this |
-| function. If you enable the timer interrupt, the timer |
-| generate a interrupt after the timer value reach |
-| the zero. See function "i_APCI1710_SetBoardIntRoutineX"|
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to enable |
-| (0 to 2) |
-| unsigned char_ b_InterruptEnable : Enable or disable the |
-| timer interrupt. |
-| APCI1710_ENABLE : |
-| Enable the timer interrupt |
-| APCI1710_DISABLE : |
-| Disable the timer interrupt|
-i_ReturnValue=insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec);
- b_ActionType = (unsigned char) data[0]; /* enable disable */
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer not initialised see function |
-| "i_APCI1710_InitTimer" |
-| -6: Interrupt parameter is wrong |
-| -7: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnWriteEnableDisableTimer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_DummyRead;
- unsigned char b_ModulNbr;
- unsigned char b_TimerNbr;
- unsigned char b_ActionType;
- unsigned char b_InterruptEnable;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec);
- b_ActionType = (unsigned char) data[0]; /* enable disable */
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) {
-
- switch (b_ActionType) {
- case APCI1710_ENABLE:
- b_InterruptEnable = (unsigned char) data[1];
- /* Test the interrupt selection */
- if ((b_InterruptEnable == APCI1710_ENABLE) ||
- (b_InterruptEnable == APCI1710_DISABLE)) {
- if (b_InterruptEnable == APCI1710_ENABLE) {
-
- dw_DummyRead = inl(devpriv->s_BoardInfos.ui_Address + 12 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Enable the interrupt */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord | 0x8;
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- } /* if (b_InterruptEnable == APCI1710_ENABLE) */
- else {
- /* Disable the interrupt */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord & 0xF7;
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Save the interrupt flag */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask & (0xFF - (1 << b_TimerNbr));
- } /* if (b_InterruptEnable == APCI1710_ENABLE) */
-
- /* Test if error occur */
- if (i_ReturnValue >= 0) {
- /* Save the interrupt flag */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask | ((1 & b_InterruptEnable) << b_TimerNbr);
-
- /* Enable the timer */
- outl(1, devpriv->s_BoardInfos.ui_Address + 44 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- }
- } else {
- /* Interrupt parameter is wrong */
- DPRINTK("\n");
- i_ReturnValue = -6;
- }
- break;
- case APCI1710_DISABLE:
- /* Test the interrupt flag */
- if (((devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask >> b_TimerNbr) & 1) == 1) {
- /* Disable the interrupt */
-
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr]. dw_ConfigurationWord = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord & 0xF7;
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Save the interrupt flag */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask & (0xFF - (1 << b_TimerNbr));
- }
-
- /* Disable the timer */
- outl(0, devpriv->s_BoardInfos.ui_Address + 44 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- break;
- } /* Switch end */
- } else {
- /* Timer not initialised see function */
- DPRINTK ("Timer not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadAllTimerValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| PULONG_ pul_TimerValueArray)
-int i_APCI1710_InsnReadAllTimerValue(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Return the all timer values from selected timer |
-| module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_TimerValueArray : Timer value array. |
-| Element 0 contain the timer 0 value. |
-| Element 1 contain the timer 1 value. |
-| Element 2 contain the timer 2 value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a TIMER module |
-| -4: Timer 0 not initialised see function |
-| "i_APCI1710_InitTimer" |
-| -5: Timer 1 not initialised see function |
-| "i_APCI1710_InitTimer" |
-| -6: Timer 2 not initialised see function |
-| "i_APCI1710_InitTimer" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnReadAllTimerValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr, b_ReadType;
- unsigned int *pul_TimerValueArray;
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_ReadType = CR_CHAN(insn->chanspec);
- pul_TimerValueArray = (unsigned int *) data;
- i_ReturnValue = insn->n;
-
- switch (b_ReadType) {
- case APCI1710_TIMER_READINTERRUPT:
-
- data[0] = devpriv->s_InterruptParameters.s_FIFOInterruptParameters[devpriv->s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.s_FIFOInterruptParameters[devpriv->s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.s_FIFOInterruptParameters[devpriv->s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /* Increment the read FIFO */
- devpriv->s_InterruptParameters.ui_Read = (devpriv->s_InterruptParameters.ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- break;
-
- case APCI1710_TIMER_READALLTIMER:
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test if timer 0 iniutialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[0].b_82X54Init == 1) {
- /* Test if timer 1 iniutialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[1].b_82X54Init == 1) {
- /* Test if timer 2 iniutialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[2].b_82X54Init == 1) {
- /* Latch all counter */
- outl(0x17, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr));
-
- /* Read the timer 0 value */
- pul_TimerValueArray[0] = inl(devpriv->s_BoardInfos.ui_Address + 0 + (64 * b_ModulNbr));
-
- /* Read the timer 1 value */
- pul_TimerValueArray[1] = inl(devpriv->s_BoardInfos.ui_Address + 4 + (64 * b_ModulNbr));
-
- /* Read the timer 2 value */
- pul_TimerValueArray[2] = inl(devpriv->s_BoardInfos.ui_Address + 8 + (64 * b_ModulNbr));
- } else {
- /* Timer 2 not initialised see function */
- DPRINTK("Timer 2 not initialised see function\n");
- i_ReturnValue = -6;
- }
- } else {
- /* Timer 1 not initialised see function */
- DPRINTK("Timer 1 not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer 0 not initialised see function */
- DPRINTK("Timer 0 not initialised see function\n");
- i_ReturnValue = -4;
- }
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -3;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- } /* End of Switch */
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadTimerValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| PULONG_ pul_TimerValue) |
-+----------------------------------------------------------------------------+
-| Task : Return the timer value from selected digital timer |
-| (b_TimerNbr) from selected timer module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to read |
-| (0 to 2) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_TimerValue : Timer value |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer not initialised see function |
-| "i_APCI1710_InitTimer" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ReadTimerValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_TimerNbr,
- unsigned int *pul_TimerValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_82X54ModuleInfo.
- s_82X54TimerInfo[b_TimerNbr].
- b_82X54Init == 1) {
- /* Latch the timer value */
- outl((2 << b_TimerNbr) | 0xD0,
- devpriv->s_BoardInfos.
- ui_Address + 12 +
- (64 * b_ModulNbr));
-
- /* Read the counter value */
- *pul_TimerValue =
- inl(devpriv->s_BoardInfos.
- ui_Address + (b_TimerNbr * 4) +
- (64 * b_ModulNbr));
- } else {
- /* Timer not initialised see function */
- DPRINTK("Timer not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_GetTimerOutputLevel |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr, |
- | unsigned char_ b_TimerNbr, |
- | unsigned char *_ pb_OutputLevel) |
- +----------------------------------------------------------------------------+
- | Task : Return the output signal level (pb_OutputLevel) from |
- | selected digital timer (b_TimerNbr) from selected timer|
- | module (b_ModulNbr). |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
- | APCI-1710 |
- | unsigned char_ b_ModulNbr : Selected module number |
- | (0 to 3) |
- | unsigned char_ b_TimerNbr : Timer number to test |
- | (0 to 2) |
- +----------------------------------------------------------------------------+
- | Output Parameters : unsigned char *_ pb_OutputLevel : Output signal level |
- | 0 : The output is low |
- | 1 : The output is high |
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: Module selection wrong |
- | -3: Timer selection wrong |
- | -4: The module is not a TIMER module |
- | -5: Timer not initialised see function |
- | "i_APCI1710_InitTimer" |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_GetTimerOutputLevel(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_TimerNbr,
- unsigned char *pb_OutputLevel)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_TimerStatus;
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) {
- /* Latch the timer value */
- outl((2 << b_TimerNbr) | 0xE0, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr));
-
- /* Read the timer status */
- dw_TimerStatus = inl(devpriv->s_BoardInfos.ui_Address + 16 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- *pb_OutputLevel = (unsigned char) (((dw_TimerStatus >> 7) & 1) ^ devpriv-> s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_OutputLevel);
- } else {
- /* Timer not initialised see function */
- DPRINTK("Timer not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetTimerProgressStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| unsigned char *_ pb_TimerStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the progress status (pb_TimerStatus) from |
-| selected digital timer (b_TimerNbr) from selected timer|
-| module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to test |
-| (0 to 2) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TimerStatus : Output signal level |
-| 0 : Timer not in progress |
-| 1 : Timer in progress |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer not initialised see function |
-| "i_APCI1710_InitTimer" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetTimerProgressStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_TimerNbr,
- unsigned char *pb_TimerStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_TimerStatus;
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
-
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) {
- /* Latch the timer value */
- outl((2 << b_TimerNbr) | 0xE0, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr));
-
- /* Read the timer status */
- dw_TimerStatus = inl(devpriv->s_BoardInfos.ui_Address + 16 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- *pb_TimerStatus = (unsigned char) ((dw_TimerStatus) >> 8) & 1;
- printk("ProgressStatus : %d", *pb_TimerStatus);
- } else {
- /* Timer not initialised see function */
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
-
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
-
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_WriteTimerValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| ULONG_ ul_WriteValue) |
-+----------------------------------------------------------------------------+
-| Task : Write the value (ul_WriteValue) into the selected timer|
-| (b_TimerNbr) from selected timer module (b_ModulNbr). |
-| The action in depend of the time mode selection. |
-| See timer mode description table. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to write |
-| (0 to 2) |
-| ULONG_ ul_WriteValue : Value to write |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer not initialised see function |
-| "i_APCI1710_InitTimer" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_WriteTimerValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_TimerNbr,
- unsigned int ul_WriteValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) {
- /* Write the value */
- outl(ul_WriteValue, devpriv->s_BoardInfos.ui_Address + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- } else {
- /* Timer not initialised see function */
- DPRINTK("Timer not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name :INT i_APCI1710_InsnBitsTimer(struct comedi_device *dev,
-struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read write functions for Timer |
-+----------------------------------------------------------------------------+
-| Input Parameters :
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnBitsTimer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned char b_BitsType;
- int i_ReturnValue = 0;
- b_BitsType = data[0];
-
- printk("\n82X54");
-
- switch (b_BitsType) {
- case APCI1710_TIMER_READVALUE:
- i_ReturnValue = i_APCI1710_ReadTimerValue(dev,
- (unsigned char)CR_AREF(insn->chanspec),
- (unsigned char)CR_CHAN(insn->chanspec),
- (unsigned int *) &data[0]);
- break;
-
- case APCI1710_TIMER_GETOUTPUTLEVEL:
- i_ReturnValue = i_APCI1710_GetTimerOutputLevel(dev,
- (unsigned char)CR_AREF(insn->chanspec),
- (unsigned char)CR_CHAN(insn->chanspec),
- (unsigned char *) &data[0]);
- break;
-
- case APCI1710_TIMER_GETPROGRESSSTATUS:
- i_ReturnValue = i_APCI1710_GetTimerProgressStatus(dev,
- (unsigned char)CR_AREF(insn->chanspec),
- (unsigned char)CR_CHAN(insn->chanspec),
- (unsigned char *)&data[0]);
- break;
-
- case APCI1710_TIMER_WRITEVALUE:
- i_ReturnValue = i_APCI1710_WriteTimerValue(dev,
- (unsigned char)CR_AREF(insn->chanspec),
- (unsigned char)CR_CHAN(insn->chanspec),
- (unsigned int)data[1]);
-
- break;
-
- default:
- printk("Bits Config Parameter Wrong\n");
- i_ReturnValue = -1;
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c
deleted file mode 100644
index 5bd7fe6..0000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c
+++ /dev/null
@@ -1,2054 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : CHRONO.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 chronometer module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 29/06/98 | S. Weber | Digital input / output implementation |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
- | | | |
- | | | |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_SINGLE 0
-#define APCI1710_CONTINUOUS 1
-
-#define APCI1710_CHRONO_PROGRESS_STATUS 0
-#define APCI1710_CHRONO_READVALUE 1
-#define APCI1710_CHRONO_CONVERTVALUE 2
-#define APCI1710_CHRONO_READINTERRUPT 3
-
-#define APCI1710_CHRONO_SET_CHANNELON 0
-#define APCI1710_CHRONO_SET_CHANNELOFF 1
-#define APCI1710_CHRONO_READ_CHANNEL 2
-#define APCI1710_CHRONO_READ_PORT 3
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitChrono |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_ChronoMode, |
-| unsigned char_ b_PCIInputClock, |
-| unsigned char_ b_TimingUnit, |
-| ULONG_ ul_TimingInterval, |
-| PULONG_ pul_RealTimingInterval)
-
-+----------------------------------------------------------------------------+
-| Task : Configure the chronometer operating mode (b_ChronoMode)|
-| from selected module (b_ModulNbr). |
-| The ul_TimingInterval and ul_TimingUnit determine the |
-| timing base for the measurement. |
-| The pul_RealTimingInterval return the real timing |
-| value. You must calling this function be for you call |
-| any other function witch access of the chronometer. |
-| |
-| Witch this functionality from the APCI-1710 you have |
-| the possibility to measure the timing witch two event. |
-| |
-| The mode 0 and 1 is appropriate for period measurement.|
-| The mode 2 and 3 is appropriate for frequent |
-| measurement. |
-| The mode 4 to 7 is appropriate for measuring the timing|
-| between two event. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr CR_AREF(insn->chanspec) : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_ChronoMode data[0] : Chronometer action mode |
-| (0 to 7). |
-| unsigned char_ b_PCIInputClock data[1] : Selection from PCI bus clock|
-| - APCI1710_30MHZ : |
-| The PC have a PCI bus |
-| clock from 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC have a PCI bus |
-| clock from 33 MHz |
-| - APCI1710_40MHZ |
-| The APCI-1710 have a |
-| integrated 40Mhz |
-| quartz. |
-| unsigned char_ b_TimingUnit data[2] : Base timing unity (0 to 4) |
-| 0 : ns |
-| 1 : µs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| ULONG_ ul_TimingInterval : data[3] Base timing value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_RealTimingInterval : Real base timing |
-| value.
-| data[0]
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer mode selection is wrong |
-| -5: The selected PCI input clock is wrong |
-| -6: Timing unity selection is wrong |
-| -7: Base timing selection is wrong |
-| -8: You can not used the 40MHz clock selection with |
-| this board |
-| -9: You can not used the 40MHz clock selection with |
-| this CHRONOS version |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnConfigInitChrono(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_TimerValue = 0;
- unsigned int ul_TimingInterval = 0;
- unsigned int ul_RealTimingInterval = 0;
- double d_RealTimingInterval = 0;
- unsigned int dw_ModeArray[8] =
- { 0x01, 0x05, 0x00, 0x04, 0x02, 0x0E, 0x0A, 0x06 };
- unsigned char b_ModulNbr, b_ChronoMode, b_PCIInputClock, b_TimingUnit;
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_ChronoMode = (unsigned char) data[0];
- b_PCIInputClock = (unsigned char) data[1];
- b_TimingUnit = (unsigned char) data[2];
- ul_TimingInterval = (unsigned int) data[3];
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /*****************************/
- /* Test the chronometer mode */
- /*****************************/
-
- if (b_ChronoMode <= 7) {
- /**************************/
- /* Test the PCI bus clock */
- /**************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ) ||
- (b_PCIInputClock == APCI1710_33MHZ) ||
- (b_PCIInputClock == APCI1710_40MHZ)) {
- /*************************/
- /* Test the timing unity */
- /*************************/
-
- if (b_TimingUnit <= 4) {
- /**********************************/
- /* Test the base timing selection */
- /**********************************/
-
- if (((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 66) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 143165576UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 143165UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 143UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 2UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 60) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 130150240UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 130150UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 130UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 2UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 50) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 107374182UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 107374UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 107UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 1UL))) {
- /**************************/
- /* Test the board version */
- /**************************/
-
- if (((b_PCIInputClock == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_PCIInputClock != APCI1710_40MHZ)) {
- /************************/
- /* Test the TOR version */
- /************************/
-
- if (((b_PCIInputClock == APCI1710_40MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3131)) || (b_PCIInputClock != APCI1710_40MHZ)) {
- fpu_begin
- ();
-
- /****************************************/
- /* Calculate the timer 0 division fator */
- /****************************************/
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.001 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.001 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.001 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (0.001
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.001 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (1.0 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (1.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (1.0 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (
- (double)
- 1.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (1.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- ul_TimingInterval
- *
- (1000
- *
- b_PCIInputClock);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (1000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (1000.0 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (1000.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (1000.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (1000000.0
- *
- b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (1000000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (1000000.0
- *
- (double)
- b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (1000000.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (1000000.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (
- (ul_TimingInterval
- *
- 60)
- *
- (1000000.0
- *
- b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_TimingInterval * 60.0) * (1000000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (1000000.0
- *
- (double)
- b_PCIInputClock))
- /
- 60;
- d_RealTimingInterval
- =
- (
- (double)
- ul_TimerValue
- /
- (0.001 * (double)b_PCIInputClock)) / 60.0;
-
- if ((double)(((double)ul_TimerValue / (1000000.0 * (double)b_PCIInputClock)) / 60.0) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
- }
-
- fpu_end();
-
- /****************************/
- /* Save the PCI input clock */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_PCIInputClock
- =
- b_PCIInputClock;
-
- /*************************/
- /* Save the timing unity */
- /*************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_TimingUnit
- =
- b_TimingUnit;
-
- /************************/
- /* Save the base timing */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- d_TimingInterval
- =
- d_RealTimingInterval;
-
- /****************************/
- /* Set the chronometer mode */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg
- =
- dw_ModeArray
- [b_ChronoMode];
-
- /***********************/
- /* Test if 40 MHz used */
- /***********************/
-
- if (b_PCIInputClock == APCI1710_40MHZ) {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg
- |
- 0x80;
- }
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_ChronoModuleInfo.dw_ConfigReg, devpriv->s_BoardInfos.ui_Address + 16 + (64 * b_ModulNbr));
-
- /***********************/
- /* Write timer 0 value */
- /***********************/
-
- outl(ul_TimerValue, devpriv->s_BoardInfos.ui_Address + (64 * b_ModulNbr));
-
- /*********************/
- /* Chronometer init. */
- /*********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_ChronoInit
- =
- 1;
- } else {
- /***********************************************/
- /* TOR version error for 40MHz clock selection */
- /***********************************************/
-
- DPRINTK("TOR version error for 40MHz clock selection\n");
- i_ReturnValue
- =
- -9;
- }
- } else {
- /**************************************************************/
- /* You can not use the 40MHz clock selection with this board */
- /**************************************************************/
-
- DPRINTK("You can not used the 40MHz clock selection with this board\n");
- i_ReturnValue =
- -8;
- }
- } else {
- /**********************************/
- /* Base timing selection is wrong */
- /**********************************/
-
- DPRINTK("Base timing selection is wrong\n");
- i_ReturnValue = -7;
- }
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- else {
- /***********************************/
- /* Timing unity selection is wrong */
- /***********************************/
-
- DPRINTK("Timing unity selection is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
- else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
- } /* if (b_ChronoMode >= 0 && b_ChronoMode <= 7) */
- else {
- /***************************************/
- /* Chronometer mode selection is wrong */
- /***************************************/
-
- DPRINTK("Chronometer mode selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_ChronoMode >= 0 && b_ChronoMode <= 7) */
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
-
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
- data[0] = ul_RealTimingInterval;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableChrono |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_CycleMode, |
-| unsigned char_ b_InterruptEnable)
-int i_APCI1710_InsnWriteEnableDisableChrono(struct comedi_device *dev,
-struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Enable the chronometer from selected module |
-| (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitChrono" function be for you call this |
-| function. |
-| If you enable the chronometer interrupt, the |
-| chronometer generate a interrupt after the stop signal.|
-| See function "i_APCI1710_SetBoardIntRoutineX" and the |
-| Interrupt mask description chapter from this manual. |
-| The b_CycleMode parameter determine if you will |
-| measured a single or more cycle.
-
-| Disable the chronometer from selected module |
-| (b_ModulNbr). If you disable the chronometer after a |
-| start signal occur and you restart the chronometer |
-| witch the " i_APCI1710_EnableChrono" function, if no |
-| stop signal occur this start signal is ignored.
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr CR_AREF(chanspec) : Selected module number (0 to 3) |
- data[0] ENABle/Disable chrono
-| unsigned char_ b_CycleMode : Selected the chronometer |
-| data[1] acquisition mode |
-| unsigned char_ b_InterruptEnable : Enable or disable the |
-| data[2] chronometer interrupt. |
-| APCI1710_ENABLE: |
-| Enable the chronometer |
-| interrupt |
-| APCI1710_DISABLE: |
-| Disable the chronometer |
-| interrupt |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-| -5: Chronometer acquisition mode cycle is wrong |
-| -6: Interrupt parameter is wrong |
-| -7: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX"
- -8: data[0] wrong input |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnWriteEnableDisableChrono(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr, b_CycleMode, b_InterruptEnable, b_Action;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_Action = (unsigned char) data[0];
- b_CycleMode = (unsigned char) data[1];
- b_InterruptEnable = (unsigned char) data[2];
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
-
- switch (b_Action) {
-
- case APCI1710_ENABLE:
-
- /*********************************/
- /* Test the cycle mode parameter */
- /*********************************/
-
- if ((b_CycleMode == APCI1710_SINGLE)
- || (b_CycleMode ==
- APCI1710_CONTINUOUS)) {
- /***************************/
- /* Test the interrupt flag */
- /***************************/
-
- if ((b_InterruptEnable ==
- APCI1710_ENABLE)
- || (b_InterruptEnable ==
- APCI1710_DISABLE))
- {
-
- /***************************/
- /* Save the interrupt flag */
- /***************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_InterruptMask
- =
- b_InterruptEnable;
-
- /***********************/
- /* Save the cycle mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_CycleMode =
- b_CycleMode;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg &
- 0x8F) | ((1 &
- b_InterruptEnable)
- << 5) | ((1 &
- b_CycleMode)
- << 6) | 0x10;
-
- /*****************************/
- /* Test if interrupt enabled */
- /*****************************/
-
- if (b_InterruptEnable ==
- APCI1710_ENABLE)
- {
- /****************************/
- /* Clear the interrupt flag */
- /****************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 32 +
- (64 * b_ModulNbr));
- devpriv->tsk_Current = current; /* Save the current process task structure */
- }
-
- /***********************************/
- /* Enable or disable the interrupt */
- /* Enable the chronometer */
- /***********************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg,
- devpriv->
- s_BoardInfos.
- ui_Address +
- 16 +
- (64 * b_ModulNbr));
-
- /*************************/
- /* Clear status register */
- /*************************/
-
- outl(0, devpriv->
- s_BoardInfos.
- ui_Address +
- 36 +
- (64 * b_ModulNbr));
-
- } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
- else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
-
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
- } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
- else {
- /***********************************************/
- /* Chronometer acquisition mode cycle is wrong */
- /***********************************************/
-
- DPRINTK("Chronometer acquisition mode cycle is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
- break;
-
- case APCI1710_DISABLE:
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.
- b_InterruptMask = 0;
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg & 0x2F;
-
- /***************************/
- /* Disable the interrupt */
- /* Disable the chronometer */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.dw_ConfigReg,
- devpriv->s_BoardInfos.
- ui_Address + 16 +
- (64 * b_ModulNbr));
-
- /***************************/
- /* Test if continuous mode */
- /***************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.
- b_CycleMode ==
- APCI1710_CONTINUOUS) {
- /*************************/
- /* Clear status register */
- /*************************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 36 +
- (64 * b_ModulNbr));
- }
- break;
-
- default:
- DPRINTK("Inputs wrong! Enable or Disable chrono\n");
- i_ReturnValue = -8;
- } /* switch ENABLE/DISABLE */
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
-
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
-
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetChronoProgressStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_ChronoStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the chronometer status (pb_ChronoStatus) from |
-| selected chronometer module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pb_ChronoStatus : Return the chronometer |
-| status. |
-| 0 : Measurement not started.|
-| No start signal occur. |
-| 1 : Measurement started. |
-| A start signal occur. |
-| 2 : Measurement stopped. |
-| A stop signal occur. |
-| The measurement is |
-| terminate. |
-| 3: A overflow occur. You |
-| must change the base |
-| timing witch the |
-| function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetChronoProgressStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_ChronoStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (64 * b_ModulNbr));
-
- /********************/
- /* Test if overflow */
- /********************/
-
- if ((dw_Status & 8) == 8) {
- /******************/
- /* Overflow occur */
- /******************/
-
- *pb_ChronoStatus = 3;
- } /* if ((dw_Status & 8) == 8) */
- else {
- /*******************************/
- /* Test if measurement stopped */
- /*******************************/
-
- if ((dw_Status & 2) == 2) {
- /***********************/
- /* A stop signal occur */
- /***********************/
-
- *pb_ChronoStatus = 2;
- } /* if ((dw_Status & 2) == 2) */
- else {
- /*******************************/
- /* Test if measurement started */
- /*******************************/
-
- if ((dw_Status & 1) == 1) {
- /************************/
- /* A start signal occur */
- /************************/
-
- *pb_ChronoStatus = 1;
- } /* if ((dw_Status & 1) == 1) */
- else {
- /***************************/
- /* Measurement not started */
- /***************************/
-
- *pb_ChronoStatus = 0;
- } /* if ((dw_Status & 1) == 1) */
- } /* if ((dw_Status & 2) == 2) */
- } /* if ((dw_Status & 8) == 8) */
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadChronoValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned int_ ui_TimeOut, |
-| unsigned char *_ pb_ChronoStatus, |
-| PULONG_ pul_ChronoValue) |
-+----------------------------------------------------------------------------+
-| Task : Return the chronometer status (pb_ChronoStatus) and the|
-| timing value (pul_ChronoValue) after a stop signal |
-| occur from selected chronometer module (b_ModulNbr). |
-| This function are only avaible if you have disabled |
-| the interrupt functionality. See function |
-| "i_APCI1710_EnableChrono" and the Interrupt mask |
-| description chapter. |
-| You can test the chronometer status witch the |
-| "i_APCI1710_GetChronoProgressStatus" function. |
-| |
-| The returned value from pul_ChronoValue parameter is |
-| not real measured timing. |
-| You must used the "i_APCI1710_ConvertChronoValue" |
-| function or make this operation for calculate the |
-| timing: |
-| |
-| Timing = pul_ChronoValue * pul_RealTimingInterval. |
-| |
-| pul_RealTimingInterval is the returned parameter from |
-| "i_APCI1710_InitChrono" function and the time unity is |
-| the b_TimingUnit from "i_APCI1710_InitChrono" function|
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pb_ChronoStatus : Return the chronometer |
-| status. |
-| 0 : Measurement not started.|
-| No start signal occur. |
-| 1 : Measurement started. |
-| A start signal occur. |
-| 2 : Measurement stopped. |
-| A stop signal occur. |
-| The measurement is |
-| terminate. |
-| 3: A overflow occur. You |
-| must change the base |
-| timing witch the |
-| function |
-| "i_APCI1710_InitChrono" |
-| unsigned int * pul_ChronoValue : Chronometer timing value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-| -5: Timeout parameter is wrong (0 to 65535) |
-| -6: Interrupt routine installed. You can not read |
-| directly the chronometer measured timing. |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ReadChronoValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int ui_TimeOut,
- unsigned char *pb_ChronoStatus,
- unsigned int *pul_ChronoValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_TimeOut = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
- /*****************************/
- /* Test the timout parameter */
- /*****************************/
-
- if (ui_TimeOut <= 65535UL) {
-
- for (;;) {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_Status =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 8 +
- (64 * b_ModulNbr));
-
- /********************/
- /* Test if overflow */
- /********************/
-
- if ((dw_Status & 8) == 8) {
- /******************/
- /* Overflow occur */
- /******************/
-
- *pb_ChronoStatus = 3;
-
- /***************************/
- /* Test if continuous mode */
- /***************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_CycleMode ==
- APCI1710_CONTINUOUS)
- {
- /*************************/
- /* Clear status register */
- /*************************/
-
- outl(0, devpriv->s_BoardInfos.ui_Address + 36 + (64 * b_ModulNbr));
- }
-
- break;
- } /* if ((dw_Status & 8) == 8) */
- else {
- /*******************************/
- /* Test if measurement stopped */
- /*******************************/
-
- if ((dw_Status & 2) ==
- 2) {
- /***********************/
- /* A stop signal occur */
- /***********************/
-
- *pb_ChronoStatus
- = 2;
-
- /***************************/
- /* Test if continnous mode */
- /***************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_CycleMode
- ==
- APCI1710_CONTINUOUS)
- {
- /*************************/
- /* Clear status register */
- /*************************/
-
- outl(0, devpriv->s_BoardInfos.ui_Address + 36 + (64 * b_ModulNbr));
- }
- break;
- } /* if ((dw_Status & 2) == 2) */
- else {
- /*******************************/
- /* Test if measurement started */
- /*******************************/
-
- if ((dw_Status & 1) == 1) {
- /************************/
- /* A start signal occur */
- /************************/
-
- *pb_ChronoStatus
- =
- 1;
- } /* if ((dw_Status & 1) == 1) */
- else {
- /***************************/
- /* Measurement not started */
- /***************************/
-
- *pb_ChronoStatus
- =
- 0;
- } /* if ((dw_Status & 1) == 1) */
- } /* if ((dw_Status & 2) == 2) */
- } /* if ((dw_Status & 8) == 8) */
-
- if (dw_TimeOut == ui_TimeOut) {
- /*****************/
- /* Timeout occur */
- /*****************/
-
- break;
- } else {
- /*************************/
- /* Increment the timeout */
- /*************************/
-
- dw_TimeOut =
- dw_TimeOut + 1;
- mdelay(1000);
-
- }
- } /* for (;;) */
-
- /*****************************/
- /* Test if stop signal occur */
- /*****************************/
-
- if (*pb_ChronoStatus == 2) {
- /**********************************/
- /* Read the measured timing value */
- /**********************************/
-
- *pul_ChronoValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 4 +
- (64 * b_ModulNbr));
-
- if (*pul_ChronoValue != 0) {
- *pul_ChronoValue =
- *pul_ChronoValue
- - 1;
- }
- } else {
- /*************************/
- /* Test if timeout occur */
- /*************************/
-
- if ((*pb_ChronoStatus != 3)
- && (dw_TimeOut ==
- ui_TimeOut)
- && (ui_TimeOut != 0)) {
- /*****************/
- /* Timeout occur */
- /*****************/
-
- *pb_ChronoStatus = 4;
- }
- }
-
- } else {
- /******************************/
- /* Timeout parameter is wrong */
- /******************************/
- DPRINTK("Timeout parameter is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ConvertChronoValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| ULONG_ ul_ChronoValue, |
-| PULONG_ pul_Hour, |
-| unsigned char *_ pb_Minute, |
-| unsigned char *_ pb_Second, |
-| unsigned int *_ pui_MilliSecond, |
-| unsigned int *_ pui_MicroSecond, |
-| unsigned int *_ pui_NanoSecond) |
-+----------------------------------------------------------------------------+
-| Task : Convert the chronometer measured timing |
-| (ul_ChronoValue) in to h, mn, s, ms, µs, ns. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-| ULONG_ ul_ChronoValue : Measured chronometer timing |
-| value. |
-| See"i_APCI1710_ReadChronoValue"|
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_Hour : Chronometer timing hour |
-| unsigned char *_ pb_Minute : Chronometer timing minute |
-| unsigned char *_ pb_Second : Chronometer timing second |
-| unsigned int *_ pui_MilliSecond : Chronometer timing mini |
-| second |
-| unsigned int *_ pui_MicroSecond : Chronometer timing micro |
-| second |
-| unsigned int *_ pui_NanoSecond : Chronometer timing nano |
-| second |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ConvertChronoValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int ul_ChronoValue,
- unsigned int *pul_Hour,
- unsigned char *pb_Minute,
- unsigned char *pb_Second,
- unsigned int *pui_MilliSecond,
- unsigned int *pui_MicroSecond,
- unsigned int *pui_NanoSecond)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- double d_Hour;
- double d_Minute;
- double d_Second;
- double d_MilliSecond;
- double d_MicroSecond;
- double d_NanoSecond;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
- fpu_begin();
-
- d_Hour = (double)ul_ChronoValue *(double)
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.d_TimingInterval;
-
- switch (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_TimingUnit) {
- case 0:
- d_Hour = d_Hour / (double)1000.0;
-
- case 1:
- d_Hour = d_Hour / (double)1000.0;
-
- case 2:
- d_Hour = d_Hour / (double)1000.0;
-
- case 3:
- d_Hour = d_Hour / (double)60.0;
-
- case 4:
- /**********************/
- /* Calculate the hour */
- /**********************/
-
- d_Hour = d_Hour / (double)60.0;
- *pul_Hour = (unsigned int) d_Hour;
-
- /************************/
- /* Calculate the minute */
- /************************/
-
- d_Minute = d_Hour - *pul_Hour;
- d_Minute = d_Minute * 60;
- *pb_Minute = (unsigned char) d_Minute;
-
- /************************/
- /* Calculate the second */
- /************************/
-
- d_Second = d_Minute - *pb_Minute;
- d_Second = d_Second * 60;
- *pb_Second = (unsigned char) d_Second;
-
- /*****************************/
- /* Calculate the mini second */
- /*****************************/
-
- d_MilliSecond = d_Second - *pb_Second;
- d_MilliSecond = d_MilliSecond * 1000;
- *pui_MilliSecond = (unsigned int) d_MilliSecond;
-
- /******************************/
- /* Calculate the micro second */
- /******************************/
-
- d_MicroSecond =
- d_MilliSecond -
- *pui_MilliSecond;
- d_MicroSecond = d_MicroSecond * 1000;
- *pui_MicroSecond = (unsigned int) d_MicroSecond;
-
- /******************************/
- /* Calculate the micro second */
- /******************************/
-
- d_NanoSecond =
- d_MicroSecond -
- *pui_MicroSecond;
- d_NanoSecond = d_NanoSecond * 1000;
- *pui_NanoSecond = (unsigned int) d_NanoSecond;
- break;
- }
-
- fpu_end();
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name :INT i_APCI1710_InsnReadChrono(struct comedi_device *dev,struct comedi_subdevice *s,
-struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read functions for Timer |
-+----------------------------------------------------------------------------+
-| Input Parameters :
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnReadChrono(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ReadType;
- int i_ReturnValue = insn->n;
-
- b_ReadType = CR_CHAN(insn->chanspec);
-
- switch (b_ReadType) {
- case APCI1710_CHRONO_PROGRESS_STATUS:
- i_ReturnValue = i_APCI1710_GetChronoProgressStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_CHRONO_READVALUE:
- i_ReturnValue = i_APCI1710_ReadChronoValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned int) insn->unused[0],
- (unsigned char *) &data[0], (unsigned int *) &data[1]);
- break;
-
- case APCI1710_CHRONO_CONVERTVALUE:
- i_ReturnValue = i_APCI1710_ConvertChronoValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned int) insn->unused[0],
- (unsigned int *) &data[0],
- (unsigned char *) &data[1],
- (unsigned char *) &data[2],
- (unsigned int *) &data[3],
- (unsigned int *) &data[4], (unsigned int *) &data[5]);
- break;
-
- case APCI1710_CHRONO_READINTERRUPT:
- printk("In Chrono Read Interrupt\n");
-
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /**************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Read = (devpriv->
- s_InterruptParameters.
- ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
- break;
-
- default:
- printk("ReadType Parameter wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1710_InsnBitsChronoDigitalIO(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Sets the output witch has been passed with the |
-| parameter b_Channel. Setting an output means setting an|
-| output high. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-| unsigned char_ b_OutputChannel : Selection from digital output |
-| CR_CHAN() channel (0 to 2) |
-| 0 : Channel H |
-| 1 : Channel A |
-| 2 : Channel B |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: The selected digital output is wrong |
-| -5: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetChronoChlOff |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_OutputChannel) |
-+----------------------------------------------------------------------------+
-| Task : Resets the output witch has been passed with the |
-| parameter b_Channel. Resetting an output means setting |
-| an output low. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710
- data[0] : Chl ON, Chl OFF , Chl Read , Port Read
-
-| unsigned char_ b_ModulNbr CR_AREF : Selected module number (0 to 3)|
-| unsigned char_ b_OutputChannel CR_CHAN : Selection from digital output |
-| channel (0 to 2) |
-| 0 : Channel H |
-| 1 : Channel A |
-| 2 : Channel B |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: The selected digital output is wrong |
-| -5: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadChronoChlValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_InputChannel, |
-| unsigned char *_ pb_ChannelStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the status from selected digital input |
-| (b_InputChannel) from selected chronometer |
-| module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-| unsigned char_ b_InputChannel : Selection from digital input |
-| channel (0 to 2) |
-| CR_CHAN() 0 : Channel E |
-| 1 : Channel F |
-| 2 : Channel G |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_ChannelStatus : Digital input channel status.|
-| data[0] 0 : Channel is not active |
-| 1 : Channel is active |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: The selected digital input is wrong |
-| -5: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadChronoPortValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_PortValue) |
-+----------------------------------------------------------------------------+
-| Task : Return the status from digital inputs port from |
-| selected (b_ModulNbr) chronometer module. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_PortValue : Digital inputs port status.
-| data[0]
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnBitsChronoDigitalIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr, b_OutputChannel, b_InputChannel, b_IOType;
- unsigned int dw_Status;
- unsigned char *pb_ChannelStatus;
- unsigned char *pb_PortValue;
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- i_ReturnValue = insn->n;
- b_IOType = (unsigned char) data[0];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
- /***********************************/
- /* Test the digital output channel */
- /***********************************/
- switch (b_IOType) {
-
- case APCI1710_CHRONO_SET_CHANNELOFF:
-
- b_OutputChannel =
- (unsigned char) CR_CHAN(insn->chanspec);
- if (b_OutputChannel <= 2) {
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 20 +
- (b_OutputChannel * 4) +
- (64 * b_ModulNbr));
- } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */
- else {
- /****************************************/
- /* The selected digital output is wrong */
- /****************************************/
-
- DPRINTK("The selected digital output is wrong\n");
- i_ReturnValue = -4;
-
- } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */
-
- break;
-
- case APCI1710_CHRONO_SET_CHANNELON:
-
- b_OutputChannel =
- (unsigned char) CR_CHAN(insn->chanspec);
- if (b_OutputChannel <= 2) {
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + 20 +
- (b_OutputChannel * 4) +
- (64 * b_ModulNbr));
- } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */
- else {
- /****************************************/
- /* The selected digital output is wrong */
- /****************************************/
-
- DPRINTK("The selected digital output is wrong\n");
- i_ReturnValue = -4;
-
- } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */
-
- break;
-
- case APCI1710_CHRONO_READ_CHANNEL:
- /**********************************/
- /* Test the digital input channel */
- /**********************************/
- pb_ChannelStatus = (unsigned char *) &data[0];
- b_InputChannel =
- (unsigned char) CR_CHAN(insn->chanspec);
-
- if (b_InputChannel <= 2) {
-
- dw_Status =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 12 +
- (64 * b_ModulNbr));
-
- *pb_ChannelStatus =
- (unsigned char) (((dw_Status >>
- b_InputChannel)
- & 1) ^ 1);
- } /* if ((b_InputChannel >= 0) && (b_InputChannel <= 2)) */
- else {
- /***************************************/
- /* The selected digital input is wrong */
- /***************************************/
-
- DPRINTK("The selected digital input is wrong\n");
- i_ReturnValue = -4;
- } /* if ((b_InputChannel >= 0) && (b_InputChannel <= 2)) */
-
- break;
-
- case APCI1710_CHRONO_READ_PORT:
-
- pb_PortValue = (unsigned char *) &data[0];
-
- dw_Status =
- inl(devpriv->s_BoardInfos.
- ui_Address + 12 +
- (64 * b_ModulNbr));
-
- *pb_PortValue =
- (unsigned char) ((dw_Status & 0x7) ^ 7);
- break;
- }
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
-
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
-
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c
deleted file mode 100644
index 6b38ce7..0000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c
+++ /dev/null
@@ -1,1041 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : DIG_IO.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 digital I/O module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 16/06/98 | S. Weber | Digital input / output implementation |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
- | | | |
- | | | |
- +-----------------------------------------------------------------------+
-*/
-
-/* Digital Output ON or OFF */
-#define APCI1710_ON 1
-#define APCI1710_OFF 0
-
-/* Digital I/O */
-#define APCI1710_INPUT 0
-#define APCI1710_OUTPUT 1
-
-#define APCI1710_DIGIO_MEMORYONOFF 0x10
-#define APCI1710_DIGIO_INIT 0x11
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1710_InsnConfigDigitalIO(struct comedi_device *dev, |
-| struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data)|
-+----------------------------------------------------------------------------+
-| Task : Configure the digital I/O operating mode from selected |
-| module (b_ModulNbr). You must calling this function be|
-| for you call any other function witch access of digital|
-| I/O. |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-| unsigned char_ b_ModulNbr data[0]: Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_ChannelAMode data[1] : Channel A mode selection |
-| 0 : Channel used for digital |
-| input |
-| 1 : Channel used for digital |
-| output |
-| unsigned char_ b_ChannelBMode data[2] : Channel B mode selection |
-| 0 : Channel used for digital |
-| input |
-| 1 : Channel used for digital |
-| output |
- data[0] memory on/off
-Activates and deactivates the digital output memory.
- After having |
-| called up this function with memory on,the output you have previously|
-| activated with the function are not reset
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: Bi-directional channel A configuration error |
-| -5: Bi-directional channel B configuration error |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigDigitalIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ModulNbr, b_ChannelAMode, b_ChannelBMode;
- unsigned char b_MemoryOnOff, b_ConfigType;
- int i_ReturnValue = 0;
- unsigned int dw_WriteConfig = 0;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_ConfigType = (unsigned char) data[0]; /* Memory or Init */
- b_ChannelAMode = (unsigned char) data[1];
- b_ChannelBMode = (unsigned char) data[2];
- b_MemoryOnOff = (unsigned char) data[1]; /* if memory operation */
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr >= 4) {
- DPRINTK("Module Number invalid\n");
- i_ReturnValue = -2;
- return i_ReturnValue;
- }
- switch (b_ConfigType) {
- case APCI1710_DIGIO_MEMORYONOFF:
-
- if (b_MemoryOnOff) /* If Memory ON */
- {
- /****************************/
- /* Set the output memory on */
- /****************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_OutputMemoryEnabled = 1;
-
- /***************************/
- /* Clear the output memory */
- /***************************/
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.dw_OutputMemory = 0;
- } else /* If memory off */
- {
- /*****************************/
- /* Set the output memory off */
- /*****************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_OutputMemoryEnabled = 0;
- }
- break;
-
- case APCI1710_DIGIO_INIT:
-
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
-
- /***************************************************/
- /* Test the bi-directional channel A configuration */
- /***************************************************/
-
- if ((b_ChannelAMode == 0) || (b_ChannelAMode == 1)) {
- /***************************************************/
- /* Test the bi-directional channel B configuration */
- /***************************************************/
-
- if ((b_ChannelBMode == 0)
- || (b_ChannelBMode == 1)) {
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit =
- 1;
-
- /********************************/
- /* Save channel A configuration */
- /********************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelAMode = b_ChannelAMode;
-
- /********************************/
- /* Save channel B configuration */
- /********************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelBMode = b_ChannelBMode;
-
- /*****************************************/
- /* Set the channel A and B configuration */
- /*****************************************/
-
- dw_WriteConfig =
- (unsigned int) (b_ChannelAMode |
- (b_ChannelBMode * 2));
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(dw_WriteConfig,
- devpriv->s_BoardInfos.
- ui_Address + 4 +
- (64 * b_ModulNbr));
-
- } else {
- /************************************************/
- /* Bi-directional channel B configuration error */
- /************************************************/
- DPRINTK("Bi-directional channel B configuration error\n");
- i_ReturnValue = -5;
- }
-
- } else {
- /************************************************/
- /* Bi-directional channel A configuration error */
- /************************************************/
- DPRINTK("Bi-directional channel A configuration error\n");
- i_ReturnValue = -4;
-
- }
-
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
- DPRINTK("The module is not a digital I/O module\n");
- i_ReturnValue = -3;
- }
- } /* end of Switch */
- printk("Return Value %d\n", i_ReturnValue);
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| INPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-
-|INT i_APCI1710_InsnReadDigitalIOChlValue(struct comedi_device *dev,comedi_subdevice
-*s, struct comedi_insn *insn,unsigned int *data)
-
-+----------------------------------------------------------------------------+
-| Task : Read the status from selected digital I/O digital input|
-| (b_InputChannel) |
-+----------------------------------------------------------------------------|
-
-
-|
-| unsigned char_ b_ModulNbr CR_AREF(chanspec) : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_InputChannel CR_CHAN(chanspec) : Selection from digital |
-| input ( 0 to 6) |
-| 0 : Channel C |
-| 1 : Channel D |
-| 2 : Channel E |
-| 3 : Channel F |
-| 4 : Channel G |
-| 5 : Channel A |
-| 6 : Channel B
-
-
- |
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0] : Digital input channel |
-| status |
-| 0 : Channle is not active|
-| 1 : Channle is active |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: The selected digital I/O digital input is wrong |
-| -5: Digital I/O not initialised |
-| -6: The digital channel A is used for output |
-| -7: The digital channel B is used for output |
-+----------------------------------------------------------------------------+
-*/
-
-/* _INT_ i_APCI1710_ReadDigitalIOChlValue (unsigned char_ b_BoardHandle, */
-/*
-* unsigned char_ b_ModulNbr, unsigned char_ b_InputChannel,
-* unsigned char *_ pb_ChannelStatus)
-*/
-static int i_APCI1710_InsnReadDigitalIOChlValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr, b_InputChannel;
- unsigned char *pb_ChannelStatus;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_InputChannel = (unsigned char) CR_CHAN(insn->chanspec);
- data[0] = 0;
- pb_ChannelStatus = (unsigned char *) &data[0];
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
- /******************************************/
- /* Test the digital imnput channel number */
- /******************************************/
-
- if (b_InputChannel <= 6) {
- /**********************************************/
- /* Test if the digital I/O module initialised */
- /**********************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit == 1) {
- /**********************************/
- /* Test if channel A or channel B */
- /**********************************/
-
- if (b_InputChannel > 4) {
- /*********************/
- /* Test if channel A */
- /*********************/
-
- if (b_InputChannel == 5) {
- /***************************/
- /* Test the channel A mode */
- /***************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelAMode
- != 0) {
- /********************************************/
- /* The digital channel A is used for output */
- /********************************************/
-
- i_ReturnValue =
- -6;
- }
- } /* if (b_InputChannel == 5) */
- else {
- /***************************/
- /* Test the channel B mode */
- /***************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelBMode
- != 0) {
- /********************************************/
- /* The digital channel B is used for output */
- /********************************************/
-
- i_ReturnValue =
- -7;
- }
- } /* if (b_InputChannel == 5) */
- } /* if (b_InputChannel > 4) */
-
- /***********************/
- /* Test if error occur */
- /***********************/
-
- if (i_ReturnValue >= 0) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
-/*
-* INPDW (ps_APCI1710Variable-> s_Board [b_BoardHandle].
-* s_BoardInfos. ui_Address + (64 * b_ModulNbr), &dw_StatusReg);
-*/
-
- dw_StatusReg =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
-
- *pb_ChannelStatus =
- (unsigned char) ((dw_StatusReg ^
- 0x1C) >>
- b_InputChannel) & 1;
-
- } /* if (i_ReturnValue == 0) */
- } else {
- /*******************************/
- /* Digital I/O not initialised */
- /*******************************/
- DPRINTK("Digital I/O not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /********************************/
- /* Selected digital input error */
- /********************************/
- DPRINTK("Selected digital input error\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
- DPRINTK("The module is not a digital I/O module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| OUTPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1710_InsnWriteDigitalIOChlOnOff(comedi_device
-|*dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data)
-
-+----------------------------------------------------------------------------+
-| Task : Sets or resets the output witch has been passed with the |
-| parameter b_Channel. Setting an output means setting |
-| an ouput high. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr (aref ) : Selected module number (0 to 3)|
-| unsigned char_ b_OutputChannel (CR_CHAN) : Selection from digital output |
-| channel (0 to 2) |
-| 0 : Channel H |
-| 1 : Channel A |
-| 2 : Channel B |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: The selected digital output is wrong |
-| -5: digital I/O not initialised see function |
-| " i_APCI1710_InitDigitalIO" |
-| -6: The digital channel A is used for input |
-| -7: The digital channel B is used for input
- -8: Digital Output Memory OFF. |
-| Use previously the function |
-| "i_APCI1710_SetDigitalIOMemoryOn". |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-* _INT_ i_APCI1710_SetDigitalIOChlOn (unsigned char_ b_BoardHandle,
-* unsigned char_ b_ModulNbr, unsigned char_ b_OutputChannel)
-*/
-static int i_APCI1710_InsnWriteDigitalIOChlOnOff(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_WriteValue = 0;
- unsigned char b_ModulNbr, b_OutputChannel;
- i_ReturnValue = insn->n;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_OutputChannel = CR_CHAN(insn->chanspec);
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
- /**********************************************/
- /* Test if the digital I/O module initialised */
- /**********************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit == 1) {
- /******************************************/
- /* Test the digital output channel number */
- /******************************************/
-
- switch (b_OutputChannel) {
- /*************/
- /* Channel H */
- /*************/
-
- case 0:
- break;
-
- /*************/
- /* Channel A */
- /*************/
-
- case 1:
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelAMode != 1) {
- /*******************************************/
- /* The digital channel A is used for input */
- /*******************************************/
-
- i_ReturnValue = -6;
- }
- break;
-
- /*************/
- /* Channel B */
- /*************/
-
- case 2:
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelBMode != 1) {
- /*******************************************/
- /* The digital channel B is used for input */
- /*******************************************/
-
- i_ReturnValue = -7;
- }
- break;
-
- default:
- /****************************************/
- /* The selected digital output is wrong */
- /****************************************/
-
- i_ReturnValue = -4;
- break;
- }
-
- /***********************/
- /* Test if error occur */
- /***********************/
-
- if (i_ReturnValue >= 0) {
-
- /*********************************/
- /* Test if set channel ON */
- /*********************************/
- if (data[0]) {
- /*********************************/
- /* Test if output memory enabled */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_OutputMemoryEnabled ==
- 1) {
- dw_WriteValue =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- | (1 <<
- b_OutputChannel);
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- = dw_WriteValue;
- } else {
- dw_WriteValue =
- 1 <<
- b_OutputChannel;
- }
- } /* set channel off */
- else {
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_OutputMemoryEnabled ==
- 1) {
- dw_WriteValue =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- & (0xFFFFFFFFUL
- -
- (1 << b_OutputChannel));
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- = dw_WriteValue;
- } else {
- /*****************************/
- /* Digital Output Memory OFF */
- /*****************************/
- /* +Use previously the function "i_APCI1710_SetDigitalIOMemoryOn" */
- i_ReturnValue = -8;
- }
-
- }
- /*******************/
- /* Write the value */
- /*******************/
-
- /* OUTPDW (ps_APCI1710Variable->
- * s_Board [b_BoardHandle].
- * s_BoardInfos. ui_Address + (64 * b_ModulNbr),
- * dw_WriteValue);
- */
-*/
- outl(dw_WriteValue,
- devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
- }
- } else {
- /*******************************/
- /* Digital I/O not initialised */
- /*******************************/
-
- i_ReturnValue = -5;
- }
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
-
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-
-|INT i_APCI1710_InsnBitsDigitalIOPortOnOff(struct comedi_device *dev,comedi_subdevice
- *s, struct comedi_insn *insn,unsigned int *data)
-+----------------------------------------------------------------------------+
-| Task : write:
- Sets or resets one or several outputs from port. |
-| Setting an output means setting an output high. |
-| If you have switched OFF the digital output memory |
-| (OFF), all the other output are set to "0".
-
-| read:
- Read the status from digital input port |
-| from selected digital I/O module (b_ModulNbr)
-+----------------------------------------------------------------------------+
-| Input Parameters :
- unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr CR_AREF(aref) : Selected module number (0 to 3)|
-| unsigned char_ b_PortValue CR_CHAN(chanspec) : Output Value ( 0 To 7 )
-| data[0] read or write port
-| data[1] if write then indicate ON or OFF
-
-| if read : data[1] will return port status.
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :
-
-| INPUT :
-
- 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: Digital I/O not initialised
-
- OUTPUT: 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: Output value wrong |
-| -5: digital I/O not initialised see function |
-| " i_APCI1710_InitDigitalIO" |
-| -6: The digital channel A is used for input |
-| -7: The digital channel B is used for input
- -8: Digital Output Memory OFF. |
-| Use previously the function |
-| "i_APCI1710_SetDigitalIOMemoryOn". |
-+----------------------------------------------------------------------------+
-*/
-
-/*
- * _INT_ i_APCI1710_SetDigitalIOPortOn (unsigned char_
- * b_BoardHandle, unsigned char_ b_ModulNbr, unsigned char_
- * b_PortValue)
-*/
-static int i_APCI1710_InsnBitsDigitalIOPortOnOff(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_WriteValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr, b_PortValue;
- unsigned char b_PortOperation, b_PortOnOFF;
-
- unsigned char *pb_PortValue;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_PortOperation = (unsigned char) data[0]; /* Input or output */
- b_PortOnOFF = (unsigned char) data[1]; /* if output then On or Off */
- b_PortValue = (unsigned char) data[2]; /* if out put then Value */
- i_ReturnValue = insn->n;
- pb_PortValue = (unsigned char *) &data[0];
-/* if input then read value */
-
- switch (b_PortOperation) {
- case APCI1710_INPUT:
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
- /**********************************************/
- /* Test if the digital I/O module initialised */
- /**********************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit == 1) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- /* INPDW (ps_APCI1710Variable->
- * s_Board [b_BoardHandle].
- * s_BoardInfos.
- * ui_Address + (64 * b_ModulNbr),
- * &dw_StatusReg);
- */
-
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
- *pb_PortValue =
- (unsigned char) (dw_StatusReg ^ 0x1C);
-
- } else {
- /*******************************/
- /* Digital I/O not initialised */
- /*******************************/
-
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
-
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- i_ReturnValue = -2;
- }
-
- break;
-
- case APCI1710_OUTPUT:
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
- /**********************************************/
- /* Test if the digital I/O module initialised */
- /**********************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit == 1) {
- /***********************/
- /* Test the port value */
- /***********************/
-
- if (b_PortValue <= 7) {
- /***********************************/
- /* Test the digital output channel */
- /***********************************/
-
- /**************************/
- /* Test if channel A used */
- /**************************/
-
- if ((b_PortValue & 2) == 2) {
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelAMode
- != 1) {
- /*******************************************/
- /* The digital channel A is used for input */
- /*******************************************/
-
- i_ReturnValue =
- -6;
- }
- } /* if ((b_PortValue & 2) == 2) */
-
- /**************************/
- /* Test if channel B used */
- /**************************/
-
- if ((b_PortValue & 4) == 4) {
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelBMode
- != 1) {
- /*******************************************/
- /* The digital channel B is used for input */
- /*******************************************/
-
- i_ReturnValue =
- -7;
- }
- } /* if ((b_PortValue & 4) == 4) */
-
- /***********************/
- /* Test if error occur */
- /***********************/
-
- if (i_ReturnValue >= 0) {
-
- /* if(data[1]) { */
-
- switch (b_PortOnOFF) {
- /*********************************/
- /* Test if set Port ON */
- /*********************************/
-
- case APCI1710_ON:
-
- /*********************************/
- /* Test if output memory enabled */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_OutputMemoryEnabled
- == 1) {
- dw_WriteValue
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- |
- b_PortValue;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- =
- dw_WriteValue;
- } else {
- dw_WriteValue
- =
- b_PortValue;
- }
- break;
-
- /* If Set PORT OFF */
- case APCI1710_OFF:
-
- /*********************************/
- /* Test if output memory enabled */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_OutputMemoryEnabled
- == 1) {
- dw_WriteValue
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- &
- (0xFFFFFFFFUL
- -
- b_PortValue);
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- =
- dw_WriteValue;
- } else {
- /*****************************/
- /* Digital Output Memory OFF */
- /*****************************/
-
- i_ReturnValue
- =
- -8;
- }
- } /* switch */
-
- /*******************/
- /* Write the value */
- /*******************/
-
- /* OUTPDW (ps_APCI1710Variable->
- * s_Board [b_BoardHandle].
- * s_BoardInfos.
- * ui_Address + (64 * b_ModulNbr),
- * dw_WriteValue); */
-
- outl(dw_WriteValue,
- devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
- }
- } else {
- /**********************/
- /* Output value wrong */
- /**********************/
-
- i_ReturnValue = -4;
- }
- } else {
- /*******************************/
- /* Digital I/O not initialised */
- /*******************************/
-
- i_ReturnValue = -5;
- }
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
-
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- i_ReturnValue = -2;
- }
- break;
-
- default:
- i_ReturnValue = -9;
- DPRINTK("NO INPUT/OUTPUT specified\n");
- } /* switch INPUT / OUTPUT */
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c
deleted file mode 100644
index 70a7f95..0000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c
+++ /dev/null
@@ -1,5465 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : INC_CPT.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 incremental counter module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
- | 29/06/01 | Guinot C. | - 1100/0231 -> 0701/0232 |
- | | | See i_APCI1710_DisableFrequencyMeasurement |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_16BIT_COUNTER 0x10
-#define APCI1710_32BIT_COUNTER 0x0
-#define APCI1710_QUADRUPLE_MODE 0x0
-#define APCI1710_DOUBLE_MODE 0x3
-#define APCI1710_SIMPLE_MODE 0xF
-#define APCI1710_DIRECT_MODE 0x80
-#define APCI1710_HYSTERESIS_ON 0x60
-#define APCI1710_HYSTERESIS_OFF 0x0
-#define APCI1710_INCREMENT 0x60
-#define APCI1710_DECREMENT 0x0
-#define APCI1710_LATCH_COUNTER 0x1
-#define APCI1710_CLEAR_COUNTER 0x0
-#define APCI1710_LOW 0x0
-#define APCI1710_HIGH 0x1
-
-/*********************/
-/* Version 0600-0229 */
-/*********************/
-#define APCI1710_HIGH_EDGE_CLEAR_COUNTER 0x0
-#define APCI1710_HIGH_EDGE_LATCH_COUNTER 0x1
-#define APCI1710_LOW_EDGE_CLEAR_COUNTER 0x2
-#define APCI1710_LOW_EDGE_LATCH_COUNTER 0x3
-#define APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER 0x4
-#define APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER 0x5
-#define APCI1710_SOURCE_0 0x0
-#define APCI1710_SOURCE_1 0x1
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_ENABLE_LATCH_INT 0x80
-#define APCI1710_DISABLE_LATCH_INT (~APCI1710_ENABLE_LATCH_INT)
-
-#define APCI1710_INDEX_LATCH_COUNTER 0x10
-#define APCI1710_INDEX_AUTO_MODE 0x8
-#define APCI1710_ENABLE_INDEX 0x4
-#define APCI1710_DISABLE_INDEX (~APCI1710_ENABLE_INDEX)
-#define APCI1710_ENABLE_LATCH_AND_CLEAR 0x8
-#define APCI1710_DISABLE_LATCH_AND_CLEAR (~APCI1710_ENABLE_LATCH_AND_CLEAR)
-#define APCI1710_SET_LOW_INDEX_LEVEL 0x4
-#define APCI1710_SET_HIGH_INDEX_LEVEL (~APCI1710_SET_LOW_INDEX_LEVEL)
-#define APCI1710_INVERT_INDEX_RFERENCE 0x2
-#define APCI1710_DEFAULT_INDEX_RFERENCE (~APCI1710_INVERT_INDEX_RFERENCE)
-
-#define APCI1710_ENABLE_INDEX_INT 0x1
-#define APCI1710_DISABLE_INDEX_INT (~APCI1710_ENABLE_INDEX_INT)
-
-#define APCI1710_ENABLE_FREQUENCY 0x4
-#define APCI1710_DISABLE_FREQUENCY (~APCI1710_ENABLE_FREQUENCY)
-
-#define APCI1710_ENABLE_FREQUENCY_INT 0x8
-#define APCI1710_DISABLE_FREQUENCY_INT (~APCI1710_ENABLE_FREQUENCY_INT)
-
-#define APCI1710_ENABLE_40MHZ_FREQUENCY 0x40
-#define APCI1710_DISABLE_40MHZ_FREQUENCY (~APCI1710_ENABLE_40MHZ_FREQUENCY)
-
-#define APCI1710_ENABLE_40MHZ_FILTER 0x80
-#define APCI1710_DISABLE_40MHZ_FILTER (~APCI1710_ENABLE_40MHZ_FILTER)
-
-#define APCI1710_ENABLE_COMPARE_INT 0x2
-#define APCI1710_DISABLE_COMPARE_INT (~APCI1710_ENABLE_COMPARE_INT)
-
-#define APCI1710_ENABLE_INDEX_ACTION 0x20
-#define APCI1710_DISABLE_INDEX_ACTION (~APCI1710_ENABLE_INDEX_ACTION)
-#define APCI1710_REFERENCE_HIGH 0x40
-#define APCI1710_REFERENCE_LOW (~APCI1710_REFERENCE_HIGH)
-
-#define APCI1710_TOR_GATE_LOW 0x40
-#define APCI1710_TOR_GATE_HIGH (~APCI1710_TOR_GATE_LOW)
-
-/* INSN CONFIG */
-#define APCI1710_INCCPT_INITCOUNTER 100
-#define APCI1710_INCCPT_COUNTERAUTOTEST 101
-#define APCI1710_INCCPT_INITINDEX 102
-#define APCI1710_INCCPT_INITREFERENCE 103
-#define APCI1710_INCCPT_INITEXTERNALSTROBE 104
-#define APCI1710_INCCPT_INITCOMPARELOGIC 105
-#define APCI1710_INCCPT_INITFREQUENCYMEASUREMENT 106
-
-/* INSN READ */
-#define APCI1710_INCCPT_READLATCHREGISTERSTATUS 200
-#define APCI1710_INCCPT_READLATCHREGISTERVALUE 201
-#define APCI1710_INCCPT_READ16BITCOUNTERVALUE 202
-#define APCI1710_INCCPT_READ32BITCOUNTERVALUE 203
-#define APCI1710_INCCPT_GETINDEXSTATUS 204
-#define APCI1710_INCCPT_GETREFERENCESTATUS 205
-#define APCI1710_INCCPT_GETUASSTATUS 206
-#define APCI1710_INCCPT_GETCBSTATUS 207
-#define APCI1710_INCCPT_GET16BITCBSTATUS 208
-#define APCI1710_INCCPT_GETUDSTATUS 209
-#define APCI1710_INCCPT_GETINTERRUPTUDLATCHEDSTATUS 210
-#define APCI1710_INCCPT_READFREQUENCYMEASUREMENT 211
-#define APCI1710_INCCPT_READINTERRUPT 212
-
-/* INSN BITS */
-#define APCI1710_INCCPT_CLEARCOUNTERVALUE 300
-#define APCI1710_INCCPT_CLEARALLCOUNTERVALUE 301
-#define APCI1710_INCCPT_SETINPUTFILTER 302
-#define APCI1710_INCCPT_LATCHCOUNTER 303
-#define APCI1710_INCCPT_SETINDEXANDREFERENCESOURCE 304
-#define APCI1710_INCCPT_SETDIGITALCHLON 305
-#define APCI1710_INCCPT_SETDIGITALCHLOFF 306
-
-/* INSN WRITE */
-#define APCI1710_INCCPT_ENABLELATCHINTERRUPT 400
-#define APCI1710_INCCPT_DISABLELATCHINTERRUPT 401
-#define APCI1710_INCCPT_WRITE16BITCOUNTERVALUE 402
-#define APCI1710_INCCPT_WRITE32BITCOUNTERVALUE 403
-#define APCI1710_INCCPT_ENABLEINDEX 404
-#define APCI1710_INCCPT_DISABLEINDEX 405
-#define APCI1710_INCCPT_ENABLECOMPARELOGIC 406
-#define APCI1710_INCCPT_DISABLECOMPARELOGIC 407
-#define APCI1710_INCCPT_ENABLEFREQUENCYMEASUREMENT 408
-#define APCI1710_INCCPT_DISABLEFREQUENCYMEASUREMENT 409
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitCounter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_CounterRange, |
-| unsigned char_ b_FirstCounterModus, |
-| unsigned char_ b_FirstCounterOption, |
-| unsigned char_ b_SecondCounterModus, |
-| unsigned char_ b_SecondCounterOption) |
-+----------------------------------------------------------------------------+
-| Task : Configure the counter operating mode from selected |
-| module (b_ModulNbr). You must calling this function be |
-| for you call any other function witch access of |
-| counters. |
-| |
-| Counter range |
-| ------------- |
-| +------------------------------------+-----------------------------------+ |
-| | Parameter Passed value | Description | |
-| |------------------------------------+-----------------------------------| |
-| |b_ModulNbr APCI1710_16BIT_COUNTER | The module is configured for | |
-| | | two 16-bit counter. | |
-| | | - b_FirstCounterModus and | |
-| | | b_FirstCounterOption | |
-| | | configure the first 16 bit | |
-| | | counter. | |
-| | | - b_SecondCounterModus and | |
-| | | b_SecondCounterOption | |
-| | | configure the second 16 bit | |
-| | | counter. | |
-| |------------------------------------+-----------------------------------| |
-| |b_ModulNbr APCI1710_32BIT_COUNTER | The module is configured for one | |
-| | | 32-bit counter. | |
-| | | - b_FirstCounterModus and | |
-| | | b_FirstCounterOption | |
-| | | configure the 32 bit counter. | |
-| | | - b_SecondCounterModus and | |
-| | | b_SecondCounterOption | |
-| | | are not used and have no | |
-| | | importance. | |
-| +------------------------------------+-----------------------------------+ |
-| |
-| Counter operating mode |
-| ---------------------- |
-| |
-| +--------------------+-------------------------+-------------------------+ |
-| | Parameter | Passed value | Description | |
-| |--------------------+-------------------------+-------------------------| |
-| |b_FirstCounterModus | APCI1710_QUADRUPLE_MODE | In the quadruple mode, | |
-| | or | | the edge analysis | |
-| |b_SecondCounterModus| | circuit generates a | |
-| | | | counting pulse from | |
-| | | | each edge of 2 signals | |
-| | | | which are phase shifted | |
-| | | | in relation to each | |
-| | | | other. | |
-| |--------------------+-------------------------+-------------------------| |
-| |b_FirstCounterModus | APCI1710_DOUBLE_MODE | Functions in the same | |
-| | or | | way as the quadruple | |
-| |b_SecondCounterModus| | mode, except that only | |
-| | | | two of the four edges | |
-| | | | are analysed per | |
-| | | | period | |
-| |--------------------+-------------------------+-------------------------| |
-| |b_FirstCounterModus | APCI1710_SIMPLE_MODE | Functions in the same | |
-| | or | | way as the quadruple | |
-| |b_SecondCounterModus| | mode, except that only | |
-| | | | one of the four edges | |
-| | | | is analysed per | |
-| | | | period. | |
-| |--------------------+-------------------------+-------------------------| |
-| |b_FirstCounterModus | APCI1710_DIRECT_MODE | In the direct mode the | |
-| | or | | both edge analysis | |
-| |b_SecondCounterModus| | circuits are inactive. | |
-| | | | The inputs A, B in the | |
-| | | | 32-bit mode or A, B and | |
-| | | | C, D in the 16-bit mode | |
-| | | | represent, each, one | |
-| | | | clock pulse gate circuit| |
-| | | | There by frequency and | |
-| | | | pulse duration | |
-| | | | measurements can be | |
-| | | | performed. | |
-| +--------------------+-------------------------+-------------------------+ |
-| |
-| |
-| IMPORTANT! |
-| If you have configured the module for two 16-bit counter, a mixed |
-| mode with a counter in quadruple/double/single mode |
-| and the other counter in direct mode is not possible! |
-| |
-| |
-| Counter operating option for quadruple/double/simple mode |
-| --------------------------------------------------------- |
-| |
-| +----------------------+-------------------------+------------------------+|
-| | Parameter | Passed value | Description ||
-| |----------------------+-------------------------+------------------------||
-| |b_FirstCounterOption | APCI1710_HYSTERESIS_ON | In both edge analysis ||
-| | or | | circuits is available ||
-| |b_SecondCounterOption | | one hysteresis circuit.||
-| | | | It suppresses each ||
-| | | | time the first counting||
-| | | | pulse after a change ||
-| | | | of rotation. ||
-| |----------------------+-------------------------+------------------------||
-| |b_FirstCounterOption | APCI1710_HYSTERESIS_OFF | The first counting ||
-| | or | | pulse is not suppress ||
-| |b_SecondCounterOption | | after a change of ||
-| | | | rotation. ||
-| +----------------------+-------------------------+------------------------+|
-| |
-| |
-| IMPORTANT! |
-| This option are only avaible if you have selected the direct mode. |
-| |
-| |
-| Counter operating option for direct mode |
-| ---------------------------------------- |
-| |
-| +----------------------+--------------------+----------------------------+ |
-| | Parameter | Passed value | Description | |
-| |----------------------+--------------------+----------------------------| |
-| |b_FirstCounterOption | APCI1710_INCREMENT | The counter increment for | |
-| | or | | each counting pulse | |
-| |b_SecondCounterOption | | | |
-| |----------------------+--------------------+----------------------------| |
-| |b_FirstCounterOption | APCI1710_DECREMENT | The counter decrement for | |
-| | or | | each counting pulse | |
-| |b_SecondCounterOption | | | |
-| +----------------------+--------------------+----------------------------+ |
-| |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_CounterRange : Selection form counter |
-| range. |
-| unsigned char_ b_FirstCounterModus : First counter operating |
-| mode. |
-| unsigned char_ b_FirstCounterOption : First counter option. |
-| unsigned char_ b_SecondCounterModus : Second counter operating |
-| mode. |
-| unsigned char_ b_SecondCounterOption : Second counter option. |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module is not a counter module |
-| -3: The selected counter range is wrong. |
-| -4: The selected first counter operating mode is wrong. |
-| -5: The selected first counter operating option is wrong|
-| -6: The selected second counter operating mode is wrong.|
-| -7: The selected second counter operating option is |
-| wrong. |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitCounter(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_CounterRange,
- unsigned char b_FirstCounterModus,
- unsigned char b_FirstCounterOption,
- unsigned char b_SecondCounterModus,
- unsigned char b_SecondCounterOption)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- /**************************/
- /* Test the counter range */
- /**************************/
-
- if (b_CounterRange == APCI1710_16BIT_COUNTER
- || b_CounterRange == APCI1710_32BIT_COUNTER) {
- /********************************/
- /* Test the first counter modus */
- /********************************/
-
- if (b_FirstCounterModus == APCI1710_QUADRUPLE_MODE ||
- b_FirstCounterModus == APCI1710_DOUBLE_MODE ||
- b_FirstCounterModus == APCI1710_SIMPLE_MODE ||
- b_FirstCounterModus == APCI1710_DIRECT_MODE) {
- /*********************************/
- /* Test the first counter option */
- /*********************************/
-
- if ((b_FirstCounterModus == APCI1710_DIRECT_MODE
- && (b_FirstCounterOption ==
- APCI1710_INCREMENT
- || b_FirstCounterOption
- == APCI1710_DECREMENT))
- || (b_FirstCounterModus !=
- APCI1710_DIRECT_MODE
- && (b_FirstCounterOption ==
- APCI1710_HYSTERESIS_ON
- || b_FirstCounterOption
- ==
- APCI1710_HYSTERESIS_OFF)))
- {
- /**************************/
- /* Test if 16-bit counter */
- /**************************/
-
- if (b_CounterRange ==
- APCI1710_16BIT_COUNTER) {
- /*********************************/
- /* Test the second counter modus */
- /*********************************/
-
- if ((b_FirstCounterModus !=
- APCI1710_DIRECT_MODE
- &&
- (b_SecondCounterModus
- ==
- APCI1710_QUADRUPLE_MODE
- ||
- b_SecondCounterModus
- ==
- APCI1710_DOUBLE_MODE
- ||
- b_SecondCounterModus
- ==
- APCI1710_SIMPLE_MODE))
- || (b_FirstCounterModus
- ==
- APCI1710_DIRECT_MODE
- &&
- b_SecondCounterModus
- ==
- APCI1710_DIRECT_MODE))
- {
- /**********************************/
- /* Test the second counter option */
- /**********************************/
-
- if ((b_SecondCounterModus == APCI1710_DIRECT_MODE && (b_SecondCounterOption == APCI1710_INCREMENT || b_SecondCounterOption == APCI1710_DECREMENT)) || (b_SecondCounterModus != APCI1710_DIRECT_MODE && (b_SecondCounterOption == APCI1710_HYSTERESIS_ON || b_SecondCounterOption == APCI1710_HYSTERESIS_OFF))) {
- i_ReturnValue =
- 0;
- } else {
- /*********************************************************/
- /* The selected second counter operating option is wrong */
- /*********************************************************/
-
- DPRINTK("The selected second counter operating option is wrong\n");
- i_ReturnValue =
- -7;
- }
- } else {
- /*******************************************************/
- /* The selected second counter operating mode is wrong */
- /*******************************************************/
-
- DPRINTK("The selected second counter operating mode is wrong\n");
- i_ReturnValue = -6;
- }
- }
- } else {
- /********************************************************/
- /* The selected first counter operating option is wrong */
- /********************************************************/
-
- DPRINTK("The selected first counter operating option is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /******************************************************/
- /* The selected first counter operating mode is wrong */
- /******************************************************/
- DPRINTK("The selected first counter operating mode is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /***************************************/
- /* The selected counter range is wrong */
- /***************************************/
-
- DPRINTK("The selected counter range is wrong\n");
- i_ReturnValue = -3;
- }
-
- /*************************/
- /* Test if a error occur */
- /*************************/
-
- if (i_ReturnValue == 0) {
- /**************************/
- /* Test if 16-Bit counter */
- /**************************/
-
- if (b_CounterRange == APCI1710_32BIT_COUNTER) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 = b_CounterRange |
- b_FirstCounterModus |
- b_FirstCounterOption;
- } else {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 = b_CounterRange |
- (b_FirstCounterModus & 0x5) |
- (b_FirstCounterOption & 0x20) |
- (b_SecondCounterModus & 0xA) |
- (b_SecondCounterOption & 0x40);
-
- /***********************/
- /* Test if direct mode */
- /***********************/
-
- if (b_FirstCounterModus == APCI1710_DIRECT_MODE) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 |
- APCI1710_DIRECT_MODE;
- }
- }
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_CounterInit = 1;
- }
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_CounterAutoTest |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char *_ pb_TestStatus) |
-+----------------------------------------------------------------------------+
-| Task : A test mode is intended for testing the component and |
-| the connected periphery. All the 8-bit counter chains |
-| are operated internally as down counters. |
-| Independently from the external signals, |
-| all the four 8-bit counter chains are decremented in |
-| parallel by each negative clock pulse edge of CLKX. |
-| |
-| Counter auto test conclusion |
-| ---------------------------- |
-| +-----------------+-----------------------------+ |
-| | pb_TestStatus | Error description | |
-| | mask | | |
-| |-----------------+-----------------------------| |
-| | 0000 | No error detected | |
-| |-----------------|-----------------------------| |
-| | 0001 | Error detected of counter 0 | |
-| |-----------------|-----------------------------| |
-| | 0010 | Error detected of counter 1 | |
-| |-----------------|-----------------------------| |
-| | 0100 | Error detected of counter 2 | |
-| |-----------------|-----------------------------| |
-| | 1000 | Error detected of counter 3 | |
-| +-----------------+-----------------------------+ |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TestStatus : Auto test conclusion. See table|
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_CounterAutoTest(struct comedi_device *dev,
- unsigned char *pb_TestStatus)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ModulCpt = 0;
- int i_ReturnValue = 0;
- unsigned int dw_LathchValue;
-
- *pb_TestStatus = 0;
-
- /********************************/
- /* Test if counter module found */
- /********************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[0] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[1] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[2] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[3] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- for (b_ModulCpt = 0; b_ModulCpt < 4; b_ModulCpt++) {
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulCpt] &
- 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- /******************/
- /* Start the test */
- /******************/
-
- outl(3, devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulCpt));
-
- /*********************/
- /* Tatch the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulCpt));
-
- /************************/
- /* Read the latch value */
- /************************/
-
- dw_LathchValue = inl(devpriv->s_BoardInfos.
- ui_Address + 4 + (64 * b_ModulCpt));
-
- if ((dw_LathchValue & 0xFF) !=
- ((dw_LathchValue >> 8) & 0xFF)
- && (dw_LathchValue & 0xFF) !=
- ((dw_LathchValue >> 16) & 0xFF)
- && (dw_LathchValue & 0xFF) !=
- ((dw_LathchValue >> 24) & 0xFF)) {
- *pb_TestStatus =
- *pb_TestStatus | (1 <<
- b_ModulCpt);
- }
-
- /*****************/
- /* Stop the test */
- /*****************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulCpt));
- }
- }
- } else {
- /***************************/
- /* No counter module found */
- /***************************/
-
- DPRINTK("No counter module found\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitIndex (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_ReferenceAction, |
-| unsigned char_ b_IndexOperation, |
-| unsigned char_ b_AutoMode, |
-| unsigned char_ b_InterruptEnable) |
-+----------------------------------------------------------------------------+
-| Task : Initialise the index corresponding to the selected |
-| module (b_ModulNbr). If a INDEX flag occur, you have |
-| the possibility to clear the 32-Bit counter or to latch|
-| the current 32-Bit value in to the first latch |
-| register. The b_IndexOperation parameter give the |
-| possibility to choice the INDEX action. |
-| If you have enabled the automatic mode, each INDEX |
-| action is cleared automatically, else you must read |
-| the index status ("i_APCI1710_ReadIndexStatus") |
-| after each INDEX action. |
-| |
-| |
-| Index action |
-| ------------ |
-| |
-| +------------------------+------------------------------------+ |
-| | b_IndexOperation | Operation | |
-| |------------------------+------------------------------------| |
-| |APCI1710_LATCH_COUNTER | After a index signal, the counter | |
-| | | value (32-Bit) is latched in to | |
-| | | the first latch register | |
-| |------------------------|------------------------------------| |
-| |APCI1710_CLEAR_COUNTER | After a index signal, the counter | |
-| | | value is cleared (32-Bit) | |
-| +------------------------+------------------------------------+ |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_ReferenceAction : Determine if the reference |
-| must set or no for the |
-| acceptance from index |
-| APCI1710_ENABLE : |
-| Reference must be set for |
-| accepted the index |
-| APCI1710_DISABLE : |
-| Reference have not |
-| importance |
-| unsigned char_ b_IndexOperation : Index operating mode. |
-| See table. |
-| unsigned char_ b_AutoMode : Enable or disable the |
-| automatic index reset. |
-| APCI1710_ENABLE : |
-| Enable the automatic mode |
-| APCI1710_DISABLE : |
-| Disable the automatic mode |
-| unsigned char_ b_InterruptEnable : Enable or disable the |
-| interrupt. |
-| APCI1710_ENABLE : |
-| Enable the interrupt |
-| APCI1710_DISABLE : |
-| Disable the interrupt |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4 The reference action parameter is wrong |
-| -5: The index operating mode parameter is wrong |
-| -6: The auto mode parameter is wrong |
-| -7: Interrupt parameter is wrong |
-| -8: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitIndex(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_ReferenceAction,
- unsigned char b_IndexOperation,
- unsigned char b_AutoMode,
- unsigned char b_InterruptEnable)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /********************************/
- /* Test the reference parameter */
- /********************************/
-
- if (b_ReferenceAction == APCI1710_ENABLE ||
- b_ReferenceAction == APCI1710_DISABLE) {
- /****************************/
- /* Test the index parameter */
- /****************************/
-
- if (b_IndexOperation ==
- APCI1710_HIGH_EDGE_LATCH_COUNTER
- || b_IndexOperation ==
- APCI1710_LOW_EDGE_LATCH_COUNTER
- || b_IndexOperation ==
- APCI1710_HIGH_EDGE_CLEAR_COUNTER
- || b_IndexOperation ==
- APCI1710_LOW_EDGE_CLEAR_COUNTER
- || b_IndexOperation ==
- APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER
- || b_IndexOperation ==
- APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER)
- {
- /********************************/
- /* Test the auto mode parameter */
- /********************************/
-
- if (b_AutoMode == APCI1710_ENABLE ||
- b_AutoMode == APCI1710_DISABLE)
- {
- /***************************/
- /* Test the interrupt mode */
- /***************************/
-
- if (b_InterruptEnable ==
- APCI1710_ENABLE
- || b_InterruptEnable ==
- APCI1710_DISABLE) {
-
- /************************************/
- /* Makte the configuration commando */
- /************************************/
-
- if (b_ReferenceAction ==
- APCI1710_ENABLE)
- {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- |
- APCI1710_ENABLE_INDEX_ACTION;
- } else {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- &
- APCI1710_DISABLE_INDEX_ACTION;
- }
-
- /****************************************/
- /* Test if low level latch or/and clear */
- /****************************************/
-
- if (b_IndexOperation ==
- APCI1710_LOW_EDGE_LATCH_COUNTER
- ||
- b_IndexOperation
- ==
- APCI1710_LOW_EDGE_CLEAR_COUNTER
- ||
- b_IndexOperation
- ==
- APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER)
- {
- /*************************************/
- /* Set the index level to low (DQ26) */
- /*************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- |
- APCI1710_SET_LOW_INDEX_LEVEL;
- } else {
- /**************************************/
- /* Set the index level to high (DQ26) */
- /**************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- &
- APCI1710_SET_HIGH_INDEX_LEVEL;
- }
-
- /***********************************/
- /* Test if latch and clear counter */
- /***********************************/
-
- if (b_IndexOperation ==
- APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER
- ||
- b_IndexOperation
- ==
- APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER)
- {
- /***************************************/
- /* Set the latch and clear flag (DQ27) */
- /***************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- |
- APCI1710_ENABLE_LATCH_AND_CLEAR;
- } /* if (b_IndexOperation == APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER || b_IndexOperation == APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER) */
- else {
- /*****************************************/
- /* Clear the latch and clear flag (DQ27) */
- /*****************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- &
- APCI1710_DISABLE_LATCH_AND_CLEAR;
-
- /*************************/
- /* Test if latch counter */
- /*************************/
-
- if (b_IndexOperation == APCI1710_HIGH_EDGE_LATCH_COUNTER || b_IndexOperation == APCI1710_LOW_EDGE_LATCH_COUNTER) {
- /*********************************/
- /* Enable the latch from counter */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- |
- APCI1710_INDEX_LATCH_COUNTER;
- } else {
- /*********************************/
- /* Enable the clear from counter */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- &
- (~APCI1710_INDEX_LATCH_COUNTER);
- }
- } /* // if (b_IndexOperation == APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER || b_IndexOperation == APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER) */
-
- if (b_AutoMode ==
- APCI1710_DISABLE)
- {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- |
- APCI1710_INDEX_AUTO_MODE;
- } else {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- &
- (~APCI1710_INDEX_AUTO_MODE);
- }
-
- if (b_InterruptEnable ==
- APCI1710_ENABLE)
- {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- |
- APCI1710_ENABLE_INDEX_INT;
- } else {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- &
- APCI1710_DISABLE_INDEX_INT;
- }
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_IndexInit = 1;
-
- } else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue = -7;
- }
- } else {
- /************************************/
- /* The auto mode parameter is wrong */
- /************************************/
-
- DPRINTK("The auto mode parameter is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /***********************************************/
- /* The index operating mode parameter is wrong */
- /***********************************************/
-
- DPRINTK("The index operating mode parameter is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************************/
- /* The reference action parameter is wrong */
- /*******************************************/
-
- DPRINTK("The reference action parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitReference |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_ReferenceLevel) |
-+----------------------------------------------------------------------------+
-| Task : Initialise the reference corresponding to the selected |
-| module (b_ModulNbr). |
-| |
-| Reference level |
-| --------------- |
-| +--------------------+-------------------------+ |
-| | b_ReferenceLevel | Operation | |
-| +--------------------+-------------------------+ |
-| | APCI1710_LOW | Reference occur if "0" | |
-| |--------------------|-------------------------| |
-| | APCI1710_HIGH | Reference occur if "1" | |
-| +--------------------+-------------------------+ |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_ReferenceLevel : Reference level. |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number parameter is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Reference level parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitReference(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_ReferenceLevel)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /**************************************/
- /* Test the reference level parameter */
- /**************************************/
-
- if (b_ReferenceLevel == 0 || b_ReferenceLevel == 1) {
- if (b_ReferenceLevel == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 |
- APCI1710_REFERENCE_HIGH;
- } else {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 &
- APCI1710_REFERENCE_LOW;
- }
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_ReferenceInit = 1;
- } else {
- /**************************************/
- /* Reference level parameter is wrong */
- /**************************************/
-
- DPRINTK("Reference level parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitExternalStrobe |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_ExternalStrobe, |
-| unsigned char_ b_ExternalStrobeLevel) |
-+----------------------------------------------------------------------------+
-| Task : Initialises the external strobe level corresponding to |
-| the selected module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_ExternalStrobe : External strobe selection |
-| 0 : External strobe A |
-| 1 : External strobe B |
-| unsigned char_ b_ExternalStrobeLevel : External strobe level |
-| APCI1710_LOW : |
-| External latch occurs if "0" |
-| APCI1710_HIGH : |
-| External latch occurs if "1" |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: Counter not initialised. |
-| See function "i_APCI1710_InitCounter" |
-| -4: External strobe selection is wrong |
-| -5: External strobe level parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitExternalStrobe(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_ExternalStrobe,
- unsigned char b_ExternalStrobeLevel)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /**************************************/
- /* Test the external strobe selection */
- /**************************************/
-
- if (b_ExternalStrobe == 0 || b_ExternalStrobe == 1) {
- /******************/
- /* Test the level */
- /******************/
-
- if ((b_ExternalStrobeLevel == APCI1710_HIGH) ||
- ((b_ExternalStrobeLevel == APCI1710_LOW
- && (devpriv->
- s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) >=
- 0x3135))) {
- /*****************/
- /* Set the level */
- /*****************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 = (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 & (0xFF -
- (0x10 << b_ExternalStrobe))) | ((b_ExternalStrobeLevel ^ 1) << (4 + b_ExternalStrobe));
- } else {
- /********************************************/
- /* External strobe level parameter is wrong */
- /********************************************/
-
- DPRINTK("External strobe level parameter is wrong\n");
- i_ReturnValue = -5;
- }
- } /* if (b_ExternalStrobe == 0 || b_ExternalStrobe == 1) */
- else {
- /**************************************/
- /* External strobe selection is wrong */
- /**************************************/
-
- DPRINTK("External strobe selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_ExternalStrobe == 0 || b_ExternalStrobe == 1) */
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_InitCompareLogic |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr, |
- | unsigned int_ ui_CompareValue) |
- +----------------------------------------------------------------------------+
- | Task : Set the 32-Bit compare value. At that moment that the |
- | incremental counter arrive to the compare value |
- | (ui_CompareValue) a interrupt is generated. |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
- | unsigned char_ b_ModulNbr : Module number to configure |
- | (0 to 3) |
- | unsigned int_ ui_CompareValue : 32-Bit compare value |
- +----------------------------------------------------------------------------+
- | Output Parameters : -
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: No counter module found |
- | -3: Counter not initialised see function |
- | "i_APCI1710_InitCounter" |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_InitCompareLogic(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int ui_CompareValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
-
- outl(ui_CompareValue, devpriv->s_BoardInfos.
- ui_Address + 28 + (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_CompareLogicInit = 1;
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitFrequencyMeasurement |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PCIInputClock, |
-| unsigned char_ b_TimingUnity, |
-| ULONG_ ul_TimingInterval, |
-| PULONG_ pul_RealTimingInterval) |
-+----------------------------------------------------------------------------+
-| Task : Sets the time for the frequency measurement. |
-| Configures the selected TOR incremental counter of the |
-| selected module (b_ModulNbr). The ul_TimingInterval and|
-| ul_TimingUnity determine the time base for the |
-| measurement. The pul_RealTimingInterval returns the |
-| real time value. You must call up this function before |
-| you call up any other function which gives access to |
-| the frequency measurement. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Number of the module to be |
-| configured (0 to 3) |
-| unsigned char_ b_PCIInputClock : Selection of the PCI bus |
-| clock |
-| - APCI1710_30MHZ : |
-| The PC has a PCI bus clock |
-| of 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC has a PCI bus clock |
-| of 33 MHz |
-| unsigned char_ b_TimingUnity : Base time unit (0 to 2) |
-| 0 : ns |
-| 1 : æs |
-| 2 : ms |
-| ULONG_ ul_TimingInterval: Base time value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_RealTimingInterval : Real base time value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected PCI input clock is wrong |
-| -5: Timing unity selection is wrong |
-| -6: Base timing selection is wrong |
-| -7: 40MHz quartz not on board |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitFrequencyMeasurement(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PCIInputClock,
- unsigned char b_TimingUnity,
- unsigned int ul_TimingInterval,
- unsigned int *pul_RealTimingInterval)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_TimerValue = 0;
- double d_RealTimingInterval;
- unsigned int dw_Status = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /**************************/
- /* Test the PCI bus clock */
- /**************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ) ||
- (b_PCIInputClock == APCI1710_33MHZ) ||
- (b_PCIInputClock == APCI1710_40MHZ)) {
- /************************/
- /* Test the timing unit */
- /************************/
-
- if (b_TimingUnity <= 2) {
- /**********************************/
- /* Test the base timing selection */
- /**********************************/
-
- if (((b_PCIInputClock == APCI1710_30MHZ)
- && (b_TimingUnity == 0)
- && (ul_TimingInterval >=
- 266)
- && (ul_TimingInterval <=
- 8738133UL))
- || ((b_PCIInputClock ==
- APCI1710_30MHZ)
- && (b_TimingUnity == 1)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 8738UL))
- || ((b_PCIInputClock ==
- APCI1710_30MHZ)
- && (b_TimingUnity == 2)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 8UL))
- || ((b_PCIInputClock ==
- APCI1710_33MHZ)
- && (b_TimingUnity == 0)
- && (ul_TimingInterval >=
- 242)
- && (ul_TimingInterval <=
- 7943757UL))
- || ((b_PCIInputClock ==
- APCI1710_33MHZ)
- && (b_TimingUnity == 1)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 7943UL))
- || ((b_PCIInputClock ==
- APCI1710_33MHZ)
- && (b_TimingUnity == 2)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 7UL))
- || ((b_PCIInputClock ==
- APCI1710_40MHZ)
- && (b_TimingUnity == 0)
- && (ul_TimingInterval >=
- 200)
- && (ul_TimingInterval <=
- 6553500UL))
- || ((b_PCIInputClock ==
- APCI1710_40MHZ)
- && (b_TimingUnity == 1)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 6553UL))
- || ((b_PCIInputClock ==
- APCI1710_40MHZ)
- && (b_TimingUnity == 2)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 6UL))) {
- /**********************/
- /* Test if 40MHz used */
- /**********************/
-
- if (b_PCIInputClock ==
- APCI1710_40MHZ) {
- /******************************/
- /* Test if firmware >= Rev1.5 */
- /******************************/
-
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3135) {
- /*********************************/
- /* Test if 40MHz quartz on board */
- /*********************************/
-
- /*INPDW (ps_APCI1710Variable->
- s_Board [b_BoardHandle].
- s_BoardInfos.
- ui_Address + 36 + (64 * b_ModulNbr), &dw_Status); */
- dw_Status =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- + 36 +
- (64 * b_ModulNbr));
-
- /******************************/
- /* Test the quartz flag (DQ0) */
- /******************************/
-
- if ((dw_Status & 1) != 1) {
- /*****************************/
- /* 40MHz quartz not on board */
- /*****************************/
-
- DPRINTK("40MHz quartz not on board\n");
- i_ReturnValue
- =
- -7;
- }
- } else {
- /*****************************/
- /* 40MHz quartz not on board */
- /*****************************/
- DPRINTK("40MHz quartz not on board\n");
- i_ReturnValue =
- -7;
- }
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
-
- /***************************/
- /* Test if not error occur */
- /***************************/
-
- if (i_ReturnValue == 0) {
- /****************************/
- /* Test the INC_CPT version */
- /****************************/
-
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3131) {
-
- /**********************/
- /* Test if 40MHz used */
- /**********************/
-
- if (b_PCIInputClock == APCI1710_40MHZ) {
- /*********************************/
- /* Enable the 40MHz quarz (DQ30) */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- |
- APCI1710_ENABLE_40MHZ_FREQUENCY;
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
- else {
- /**********************************/
- /* Disable the 40MHz quarz (DQ30) */
- /**********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- &
- APCI1710_DISABLE_40MHZ_FREQUENCY;
-
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
-
- /********************************/
- /* Calculate the division fator */
- /********************************/
-
- fpu_begin();
- switch (b_TimingUnity) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.00025 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.00025 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.00025 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (0.00025
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.00025 * (double)b_PCIInputClock)) >= (double)((double)*pul_RealTimingInterval + 0.5)) {
- *pul_RealTimingInterval
- =
- *pul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.25 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.25 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.25 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.25 * (double)b_PCIInputClock)) >= (double)((double)*pul_RealTimingInterval + 0.5)) {
- *pul_RealTimingInterval
- =
- *pul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- ul_TimingInterval
- *
- (250.0
- *
- b_PCIInputClock);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (250.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (250.0 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (250.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (250.0 * (double)b_PCIInputClock)) >= (double)((double)*pul_RealTimingInterval + 0.5)) {
- *pul_RealTimingInterval
- =
- *pul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- break;
- }
-
- fpu_end();
- /*************************/
- /* Write the timer value */
- /*************************/
-
- outl(ul_TimerValue, devpriv->s_BoardInfos.ui_Address + 32 + (64 * b_ModulNbr));
-
- /*******************************/
- /* Set the initialisation flag */
- /*******************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_FrequencyMeasurementInit
- = 1;
- } else {
- /***************************/
- /* Counter not initialised */
- /***************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue =
- -3;
- }
- } /* if (i_ReturnValue == 0) */
- } else {
- /**********************************/
- /* Base timing selection is wrong */
- /**********************************/
-
- DPRINTK("Base timing selection is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /***********************************/
- /* Timing unity selection is wrong */
- /***********************************/
-
- DPRINTK("Timing unity selection is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Configuration function for INC_CPT
- */
-static int i_APCI1710_InsnConfigINCCPT(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_ConfigType;
- int i_ReturnValue = 0;
-
- ui_ConfigType = CR_CHAN(insn->chanspec);
-
- printk("\nINC_CPT");
-
- devpriv->tsk_Current = current; /* Save the current process task structure */
- switch (ui_ConfigType) {
- case APCI1710_INCCPT_INITCOUNTER:
- i_ReturnValue = i_APCI1710_InitCounter(dev,
- CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1],
- (unsigned char) data[2], (unsigned char) data[3], (unsigned char) data[4]);
- break;
-
- case APCI1710_INCCPT_COUNTERAUTOTEST:
- i_ReturnValue = i_APCI1710_CounterAutoTest(dev,
- (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_INITINDEX:
- i_ReturnValue = i_APCI1710_InitIndex(dev,
- CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1], (unsigned char) data[2], (unsigned char) data[3]);
- break;
-
- case APCI1710_INCCPT_INITREFERENCE:
- i_ReturnValue = i_APCI1710_InitReference(dev,
- CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_INCCPT_INITEXTERNALSTROBE:
- i_ReturnValue = i_APCI1710_InitExternalStrobe(dev,
- CR_AREF(insn->chanspec),
- (unsigned char) data[0], (unsigned char) data[1]);
- break;
-
- case APCI1710_INCCPT_INITCOMPARELOGIC:
- i_ReturnValue = i_APCI1710_InitCompareLogic(dev,
- CR_AREF(insn->chanspec), (unsigned int) data[0]);
- break;
-
- case APCI1710_INCCPT_INITFREQUENCYMEASUREMENT:
- i_ReturnValue = i_APCI1710_InitFrequencyMeasurement(dev,
- CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1], (unsigned int) data[2], (unsigned int *) &data[0]);
- break;
-
- default:
- printk("Insn Config : Config Parameter Wrong\n");
-
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ClearCounterValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Clear the counter value from selected module |
-| (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number parameter is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ClearCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*********************/
- /* Clear the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ClearAllCounterValue |
-| (unsigned char_ b_BoardHandle) |
-+----------------------------------------------------------------------------+
-| Task : Clear all counter value. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ClearAllCounterValue(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ModulCpt = 0;
- int i_ReturnValue = 0;
-
- /********************************/
- /* Test if counter module found */
- /********************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[0] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[1] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[2] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[3] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- for (b_ModulCpt = 0; b_ModulCpt < 4; b_ModulCpt++) {
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulCpt] &
- 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- /*********************/
- /* Clear the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulCpt));
- }
- }
- } else {
- /***************************/
- /* No counter module found */
- /***************************/
-
- DPRINTK("No counter module found\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetInputFilter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_Module, |
-| unsigned char_ b_PCIInputClock, |
-| unsigned char_ b_Filter) |
-+----------------------------------------------------------------------------+
-| Task : Disable or enable the software filter from selected |
-| module (b_ModulNbr). b_Filter determine the filter time|
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Number of the module to be |
-| configured (0 to 3) |
-| unsigned char_ b_PCIInputClock : Selection of the PCI bus |
-| clock |
-| - APCI1710_30MHZ : |
-| The PC has a PCI bus clock |
-| of 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC has a PCI bus clock |
-| of 33 MHz |
-| - APCI1710_40MHZ : |
-| The APCI1710 has a 40MHz |
-| quartz |
-| unsigned char_ b_Filter : Filter selection |
-| |
-| 30 MHz |
-| ------ |
-| 0: Software filter not used |
-| 1: Filter from 266ns (3.750000MHz) |
-| 2: Filter from 400ns (2.500000MHz) |
-| 3: Filter from 533ns (1.876170MHz) |
-| 4: Filter from 666ns (1.501501MHz) |
-| 5: Filter from 800ns (1.250000MHz) |
-| 6: Filter from 933ns (1.071800MHz) |
-| 7: Filter from 1066ns (0.938080MHz) |
-| 8: Filter from 1200ns (0.833333MHz) |
-| 9: Filter from 1333ns (0.750000MHz) |
-| 10: Filter from 1466ns (0.682100MHz) |
-| 11: Filter from 1600ns (0.625000MHz) |
-| 12: Filter from 1733ns (0.577777MHz) |
-| 13: Filter from 1866ns (0.535900MHz) |
-| 14: Filter from 2000ns (0.500000MHz) |
-| 15: Filter from 2133ns (0.468800MHz) |
-| |
-| 33 MHz |
-| ------ |
-| 0: Software filter not used |
-| 1: Filter from 242ns (4.125000MHz) |
-| 2: Filter from 363ns (2.754820MHz) |
-| 3: Filter from 484ns (2.066115MHz) |
-| 4: Filter from 605ns (1.652892MHz) |
-| 5: Filter from 726ns (1.357741MHz) |
-| 6: Filter from 847ns (1.180637MHz) |
-| 7: Filter from 968ns (1.033055MHz) |
-| 8: Filter from 1089ns (0.918273MHz) |
-| 9: Filter from 1210ns (0.826446MHz) |
-| 10: Filter from 1331ns (0.751314MHz) |
-| 11: Filter from 1452ns (0.688705MHz) |
-| 12: Filter from 1573ns (0.635727MHz) |
-| 13: Filter from 1694ns (0.590318MHz) |
-| 14: Filter from 1815ns (0.550964MHz) |
-| 15: Filter from 1936ns (0.516528MHz) |
-| |
-| 40 MHz |
-| ------ |
-| 0: Software filter not used |
-| 1: Filter from 200ns (5.000000MHz) |
-| 2: Filter from 300ns (3.333333MHz) |
-| 3: Filter from 400ns (2.500000MHz) |
-| 4: Filter from 500ns (2.000000MHz) |
-| 5: Filter from 600ns (1.666666MHz) |
-| 6: Filter from 700ns (1.428500MHz) |
-| 7: Filter from 800ns (1.250000MHz) |
-| 8: Filter from 900ns (1.111111MHz) |
-| 9: Filter from 1000ns (1.000000MHz) |
-| 10: Filter from 1100ns (0.909090MHz) |
-| 11: Filter from 1200ns (0.833333MHz) |
-| 12: Filter from 1300ns (0.769200MHz) |
-| 13: Filter from 1400ns (0.714200MHz) |
-| 14: Filter from 1500ns (0.666666MHz) |
-| 15: Filter from 1600ns (0.625000MHz) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: The module is not a counter module |
-| -4: The selected PCI input clock is wrong |
-| -5: The selected filter value is wrong |
-| -6: 40MHz quartz not on board |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetInputFilter(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PCIInputClock,
- unsigned char b_Filter)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_INCREMENTAL_COUNTER) {
- /******************************/
- /* Test if firmware >= Rev1.5 */
- /******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF) >= 0x3135) {
- /**************************/
- /* Test the PCI bus clock */
- /**************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ) ||
- (b_PCIInputClock == APCI1710_33MHZ) ||
- (b_PCIInputClock == APCI1710_40MHZ)) {
- /*************************/
- /* Test the filter value */
- /*************************/
-
- if (b_Filter < 16) {
- /**********************/
- /* Test if 40MHz used */
- /**********************/
-
- if (b_PCIInputClock ==
- APCI1710_40MHZ) {
- /*********************************/
- /* Test if 40MHz quartz on board */
- /*********************************/
-
- dw_Status =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- 36 +
- (64 * b_ModulNbr));
-
- /******************************/
- /* Test the quartz flag (DQ0) */
- /******************************/
-
- if ((dw_Status & 1) !=
- 1) {
- /*****************************/
- /* 40MHz quartz not on board */
- /*****************************/
-
- DPRINTK("40MHz quartz not on board\n");
- i_ReturnValue =
- -6;
- }
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
-
- /***************************/
- /* Test if error not occur */
- /***************************/
-
- if (i_ReturnValue == 0) {
- /**********************/
- /* Test if 40MHz used */
- /**********************/
-
- if (b_PCIInputClock ==
- APCI1710_40MHZ)
- {
- /*********************************/
- /* Enable the 40MHz quarz (DQ31) */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- |
- APCI1710_ENABLE_40MHZ_FILTER;
-
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
- else {
- /**********************************/
- /* Disable the 40MHz quarz (DQ31) */
- /**********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- &
- APCI1710_DISABLE_40MHZ_FILTER;
-
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
-
- /************************/
- /* Set the filter value */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- & 0x1F) |
- ((b_Filter &
- 0x7) <<
- 5);
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- & 0xFE) |
- ((b_Filter &
- 0x8) >>
- 3);
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->
- s_BoardInfos.
- ui_Address +
- 20 +
- (64 * b_ModulNbr));
- } /* if (i_ReturnValue == 0) */
- } /* if (b_Filter < 16) */
- else {
- /**************************************/
- /* The selected filter value is wrong */
- /**************************************/
-
- DPRINTK("The selected filter value is wrong\n");
- i_ReturnValue = -5;
- } /* if (b_Filter < 16) */
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ) || (b_PCIInputClock == APCI1710_40MHZ)) */
- else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue = 4;
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ) || (b_PCIInputClock == APCI1710_40MHZ)) */
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_LatchCounter (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_LatchReg) |
-+----------------------------------------------------------------------------+
-| Task : Latch the courant value from selected module |
-| (b_ModulNbr) in to the selected latch register |
-| (b_LatchReg). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_LatchReg : Selected latch register |
-| 0 : for the first latch register |
-| 1 : for the second latch register |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected latch register parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_LatchCounter(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_LatchReg)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test the latch register parameter */
- /*************************************/
-
- if (b_LatchReg < 2) {
- /*********************/
- /* Tatch the counter */
- /*********************/
-
- outl(1 << (b_LatchReg * 4),
- devpriv->s_BoardInfos.ui_Address +
- (64 * b_ModulNbr));
- } else {
- /**************************************************/
- /* The selected latch register parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected latch register parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetIndexAndReferenceSource |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SourceSelection) |
-+----------------------------------------------------------------------------+
-| Task : Determine the hardware source for the index and the |
-| reference logic. Per default the index logic is |
-| connected to the difference input C and the reference |
-| logic is connected to the 24V input E |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_SourceSelection : APCI1710_SOURCE_0 : |
-| The index logic is connected |
-| to the difference input C and|
-| the reference logic is |
-| connected to the 24V input E.|
-| This is the default |
-| configuration. |
-| APCI1710_SOURCE_1 : |
-| The reference logic is |
-| connected to the difference |
-| input C and the index logic |
-| is connected to the 24V |
-| input E |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: The module is not a counter module. |
-| -4: The source selection is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetIndexAndReferenceSource(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_SourceSelection)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_INCREMENTAL_COUNTER) {
- /******************************/
- /* Test if firmware >= Rev1.5 */
- /******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF) >= 0x3135) {
- /*****************************/
- /* Test the source selection */
- /*****************************/
-
- if (b_SourceSelection == APCI1710_SOURCE_0 ||
- b_SourceSelection == APCI1710_SOURCE_1)
- {
- /******************************************/
- /* Test if invert the index and reference */
- /******************************************/
-
- if (b_SourceSelection ==
- APCI1710_SOURCE_1) {
- /********************************************/
- /* Invert index and reference source (DQ25) */
- /********************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 |
- APCI1710_INVERT_INDEX_RFERENCE;
- } else {
- /****************************************/
- /* Set the default configuration (DQ25) */
- /****************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 &
- APCI1710_DEFAULT_INDEX_RFERENCE;
- }
- } /* if (b_SourceSelection == APCI1710_SOURCE_0 ||b_SourceSelection == APCI1710_SOURCE_1) */
- else {
- /*********************************/
- /* The source selection is wrong */
- /*********************************/
-
- DPRINTK("The source selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_SourceSelection == APCI1710_SOURCE_0 ||b_SourceSelection == APCI1710_SOURCE_1) */
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***************************************/
- /* The selected module number is wrong */
- /***************************************/
-
- DPRINTK("The selected module number is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetDigitalChlOn |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Sets the digital output H Setting an output means |
-| setting an ouput high. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Number of the module to be |
-| configured (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetDigitalChlOn(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.b_ModeRegister3 | 0x10;
-
- /*********************/
- /* Set the output On */
- /*********************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4, devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetDigitalChlOff |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Resets the digital output H. Resetting an output means |
-| setting an ouput low. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Number of the module to be |
-| configured (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetDigitalChlOff(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.b_ModeRegister3 & 0xEF;
-
- /**********************/
- /* Set the output Off */
- /**********************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4, devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Set & Clear Functions for INC_CPT
- */
-static int i_APCI1710_InsnBitsINCCPT(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_BitsType;
- int i_ReturnValue = 0;
-
- ui_BitsType = CR_CHAN(insn->chanspec);
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- switch (ui_BitsType) {
- case APCI1710_INCCPT_CLEARCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_ClearCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_CLEARALLCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_ClearAllCounterValue(dev);
- break;
-
- case APCI1710_INCCPT_SETINPUTFILTER:
- i_ReturnValue = i_APCI1710_SetInputFilter(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) data[0], (unsigned char) data[1]);
- break;
-
- case APCI1710_INCCPT_LATCHCOUNTER:
- i_ReturnValue = i_APCI1710_LatchCounter(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_INCCPT_SETINDEXANDREFERENCESOURCE:
- i_ReturnValue = i_APCI1710_SetIndexAndReferenceSource(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_INCCPT_SETDIGITALCHLON:
- i_ReturnValue = i_APCI1710_SetDigitalChlOn(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_SETDIGITALCHLOFF:
- i_ReturnValue = i_APCI1710_SetDigitalChlOff(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- default:
- printk("Bits Config Parameter Wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableLatchInterrupt |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Enable the latch interrupt from selected module |
-| (b_ModulNbr). Each software or hardware latch occur a |
-| interrupt. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Interrupt routine not installed see function |
-| "i_APCI1710_SetBoardIntRoutine" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_EnableLatchInterrupt(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
-
- /********************/
- /* Enable interrupt */
- /********************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 | APCI1710_ENABLE_LATCH_INT;
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4, devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisableLatchInterrupt |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Disable the latch interrupt from selected module |
-| (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Interrupt routine not installed see function |
-| "i_APCI1710_SetBoardIntRoutine" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_DisableLatchInterrupt(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4 &
- ((APCI1710_DISABLE_LATCH_INT << 8) | 0xFF),
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
-
- mdelay(1000);
-
- /*********************/
- /* Disable interrupt */
- /*********************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 & APCI1710_DISABLE_LATCH_INT;
-
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Write16BitCounterValue |
-| (unsigned char_ b_BoardHandle |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SelectedCounter, |
-| unsigned int_ ui_WriteValue) |
-+----------------------------------------------------------------------------+
-| Task : Write a 16-Bit value (ui_WriteValue) in to the selected|
-| 16-Bit counter (b_SelectedCounter) from selected module|
-| (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_SelectedCounter : Selected 16-Bit counter |
-| (0 or 1) |
-| unsigned int_ ui_WriteValue : 16-Bit write value |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected 16-Bit counter parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Write16BitCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_SelectedCounter,
- unsigned int ui_WriteValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /******************************/
- /* Test the counter selection */
- /******************************/
-
- if (b_SelectedCounter < 2) {
- /*******************/
- /* Write the value */
- /*******************/
-
- outl((unsigned int) ((unsigned int) (ui_WriteValue) << (16 *
- b_SelectedCounter)),
- devpriv->s_BoardInfos.ui_Address + 8 +
- (b_SelectedCounter * 4) +
- (64 * b_ModulNbr));
- } else {
- /**************************************************/
- /* The selected 16-Bit counter parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected 16-Bit counter parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Write32BitCounterValue |
-| (unsigned char_ b_BoardHandle |
-| unsigned char_ b_ModulNbr, |
-| ULONG_ ul_WriteValue) |
-+----------------------------------------------------------------------------+
-| Task : Write a 32-Bit value (ui_WriteValue) in to the selected|
-| module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| ULONG_ ul_WriteValue : 32-Bit write value |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Write32BitCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int ul_WriteValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*******************/
- /* Write the value */
- /*******************/
-
- outl(ul_WriteValue, devpriv->s_BoardInfos.
- ui_Address + 4 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableIndex (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Enable the INDEX actions |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Index not initialised see function |
-| "i_APCI1710_InitIndex" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_EnableIndex(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_InterruptLatchReg;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*****************************/
- /* Test if index initialised */
- /*****************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_IndexInit) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 | APCI1710_ENABLE_INDEX;
-
- ul_InterruptLatchReg =
- inl(devpriv->s_BoardInfos.ui_Address +
- 24 + (64 * b_ModulNbr));
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- } else {
- /*************************************************************/
- /* Index not initialised see function "i_APCI1710_InitIndex" */
- /*************************************************************/
-
- DPRINTK("Index not initialised \n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisableIndex (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Disable the INDEX actions |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Index not initialised see function |
-| "i_APCI1710_InitIndex" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_DisableIndex(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*****************************/
- /* Test if index initialised */
- /*****************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_IndexInit) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 &
- APCI1710_DISABLE_INDEX;
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- } else {
- /*************************************************************/
- /* Index not initialised see function "i_APCI1710_InitIndex" */
- /*************************************************************/
-
- DPRINTK("Index not initialised \n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableCompareLogic |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Enable the 32-Bit compare logic. At that moment that |
-| the incremental counter arrive to the compare value a |
-| interrupt is generated. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : -
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Compare logic not initialised. |
-| See function "i_APCI1710_InitCompareLogic" |
-| -5: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_EnableCompareLogic(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test if compare logic initialised */
- /*************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_CompareLogicInit == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 |
- APCI1710_ENABLE_COMPARE_INT;
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- } else {
- /*********************************/
- /* Compare logic not initialised */
- /*********************************/
-
- DPRINTK("Compare logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisableCompareLogic |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Disable the 32-Bit compare logic.
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : -
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Compare logic not initialised. |
-| See function "i_APCI1710_InitCompareLogic" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_DisableCompareLogic(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test if compare logic initialised */
- /*************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_CompareLogicInit == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_DISABLE_COMPARE_INT;
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- } else {
- /*********************************/
- /* Compare logic not initialised */
- /*********************************/
-
- DPRINTK("Compare logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_EnableFrequencyMeasurement |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr, |
- | unsigned char_ b_InterruptEnable) |
- +----------------------------------------------------------------------------+
- | Task : Enables the frequency measurement function |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
- | unsigned char_ b_ModulNbr : Number of the module to be |
- | configured (0 to 3) |
- | unsigned char_ b_InterruptEnable: Enable or disable the |
- | interrupt. |
- | APCI1710_ENABLE: |
- | Enable the interrupt |
- | APCI1710_DISABLE: |
- | Disable the interrupt |
- +----------------------------------------------------------------------------+
- | Output Parameters : - |
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: The selected module number is wrong |
- | -3: Counter not initialised see function |
- | "i_APCI1710_InitCounter" |
- | -4: Frequency measurement logic not initialised. |
- | See function "i_APCI1710_InitFrequencyMeasurement" |
- | -5: Interrupt parameter is wrong |
- | -6: Interrupt function not initialised. |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_EnableFrequencyMeasurement(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_InterruptEnable)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /********************************************/
- /* Test if frequency measurement initialised */
- /********************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_FrequencyMeasurementInit == 1) {
- /***************************/
- /* Test the interrupt mode */
- /***************************/
-
- if ((b_InterruptEnable == APCI1710_DISABLE) ||
- (b_InterruptEnable == APCI1710_ENABLE))
- {
-
- /************************************/
- /* Enable the frequency measurement */
- /************************************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 |
- APCI1710_ENABLE_FREQUENCY;
-
- /*********************************************/
- /* Disable or enable the frequency interrupt */
- /*********************************************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_DISABLE_FREQUENCY_INT)
- | (b_InterruptEnable << 3);
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.
- ui_Address + 20 +
- (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_FrequencyMeasurementEnable =
- 1;
- } else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
-
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /***********************************************/
- /* Frequency measurement logic not initialised */
- /***********************************************/
-
- DPRINTK("Frequency measurement logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_DisableFrequencyMeasurement |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr) |
- +----------------------------------------------------------------------------+
- | Task : Disables the frequency measurement function |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
- | unsigned char_ b_ModulNbr : Number of the module to be |
- | configured (0 to 3) |
- +----------------------------------------------------------------------------+
- | Output Parameters : - |
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: The selected module number is wrong |
- | -3: Counter not initialised see function |
- | "i_APCI1710_InitCounter" |
- | -4: Frequency measurement logic not initialised. |
- | See function "i_APCI1710_InitFrequencyMeasurement" |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_DisableFrequencyMeasurement(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /********************************************/
- /* Test if frequency measurement initialised */
- /********************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_FrequencyMeasurementInit == 1) {
- /*************************************/
- /* Disable the frequency measurement */
- /*************************************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_DISABLE_FREQUENCY
- /* Begin CG 29/06/01 CG 1100/0231 -> 0701/0232 Frequence measure IRQ must be cleared */
- & APCI1710_DISABLE_FREQUENCY_INT;
- /* End CG 29/06/01 CG 1100/0231 -> 0701/0232 Frequence measure IRQ must be cleared */
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
-
- /*************************************/
- /* Disable the frequency measurement */
- /*************************************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_FrequencyMeasurementEnable = 0;
- } else {
- /***********************************************/
- /* Frequency measurement logic not initialised */
- /***********************************************/
-
- DPRINTK("Frequency measurement logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Enable Disable functions for INC_CPT
- */
-static int i_APCI1710_InsnWriteINCCPT(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_WriteType;
- int i_ReturnValue = 0;
-
- ui_WriteType = CR_CHAN(insn->chanspec);
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- switch (ui_WriteType) {
- case APCI1710_INCCPT_ENABLELATCHINTERRUPT:
- i_ReturnValue = i_APCI1710_EnableLatchInterrupt(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_DISABLELATCHINTERRUPT:
- i_ReturnValue = i_APCI1710_DisableLatchInterrupt(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_WRITE16BITCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_Write16BitCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) data[0], (unsigned int) data[1]);
- break;
-
- case APCI1710_INCCPT_WRITE32BITCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_Write32BitCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned int) data[0]);
-
- break;
-
- case APCI1710_INCCPT_ENABLEINDEX:
- i_APCI1710_EnableIndex(dev, (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_DISABLEINDEX:
- i_ReturnValue = i_APCI1710_DisableIndex(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_ENABLECOMPARELOGIC:
- i_ReturnValue = i_APCI1710_EnableCompareLogic(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_DISABLECOMPARELOGIC:
- i_ReturnValue = i_APCI1710_DisableCompareLogic(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_ENABLEFREQUENCYMEASUREMENT:
- i_ReturnValue = i_APCI1710_EnableFrequencyMeasurement(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_INCCPT_DISABLEFREQUENCYMEASUREMENT:
- i_ReturnValue = i_APCI1710_DisableFrequencyMeasurement(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- default:
- printk("Write Config Parameter Wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadLatchRegisterStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_LatchReg, |
-| unsigned char *_ pb_LatchStatus) |
-+----------------------------------------------------------------------------+
-| Task : Read the latch register status from selected module |
-| (b_ModulNbr) and selected latch register (b_LatchReg). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_LatchReg : Selected latch register |
-| 0 : for the first latch register |
-| 1 : for the second latch register |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_LatchStatus : Latch register status. |
-| 0 : No latch occur |
-| 1 : A software latch occur |
-| 2 : A hardware latch occur |
-| 3 : A software and hardware |
-| latch occur |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected latch register parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ReadLatchRegisterStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_LatchReg,
- unsigned char *pb_LatchStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_LatchReg;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test the latch register parameter */
- /*************************************/
-
- if (b_LatchReg < 2) {
- dw_LatchReg = inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
-
- *pb_LatchStatus =
- (unsigned char) ((dw_LatchReg >> (b_LatchReg *
- 4)) & 0x3);
- } else {
- /**************************************************/
- /* The selected latch register parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected latch register parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadLatchRegisterValue |
-| (unsigned char_ b_BoardHandle,|
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_LatchReg, |
-| PULONG_ pul_LatchValue) |
-+----------------------------------------------------------------------------+
-| Task : Read the latch register value from selected module |
-| (b_ModulNbr) and selected latch register (b_LatchReg). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_LatchReg : Selected latch register |
-| 0 : for the first latch register |
-| 1 : for the second latch register |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_LatchValue : Latch register value |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected latch register parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ReadLatchRegisterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_LatchReg,
- unsigned int *pul_LatchValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test the latch register parameter */
- /*************************************/
-
- if (b_LatchReg < 2) {
- *pul_LatchValue = inl(devpriv->s_BoardInfos.
- ui_Address + ((b_LatchReg + 1) * 4) +
- (64 * b_ModulNbr));
-
- } else {
- /**************************************************/
- /* The selected latch register parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected latch register parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Read16BitCounterValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SelectedCounter, |
-| unsigned int *_ pui_CounterValue) |
-+----------------------------------------------------------------------------+
-| Task : Latch the selected 16-Bit counter (b_SelectedCounter) |
-| from selected module (b_ModulNbr) in to the first |
-| latch register and return the latched value. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_SelectedCounter : Selected 16-Bit counter |
-| (0 or 1) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned int *_ pui_CounterValue : 16-Bit counter value |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected 16-Bit counter parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Read16BitCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_SelectedCounter,
- unsigned int *pui_CounterValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_LathchValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /******************************/
- /* Test the counter selection */
- /******************************/
-
- if (b_SelectedCounter < 2) {
- /*********************/
- /* Latch the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
-
- /************************/
- /* Read the latch value */
- /************************/
-
- dw_LathchValue = inl(devpriv->s_BoardInfos.
- ui_Address + 4 + (64 * b_ModulNbr));
-
- *pui_CounterValue =
- (unsigned int) ((dw_LathchValue >> (16 *
- b_SelectedCounter)) &
- 0xFFFFU);
- } else {
- /**************************************************/
- /* The selected 16-Bit counter parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected 16-Bit counter parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Read32BitCounterValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| PULONG_ pul_CounterValue) |
-+----------------------------------------------------------------------------+
-| Task : Latch the 32-Bit counter from selected module |
-| (b_ModulNbr) in to the first latch register and return |
-| the latched value. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_CounterValue : 32-Bit counter value |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Read32BitCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int *pul_CounterValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*********************/
- /* Tatch the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
-
- /************************/
- /* Read the latch value */
- /************************/
-
- *pul_CounterValue = inl(devpriv->s_BoardInfos.
- ui_Address + 4 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetIndexStatus (unsigned char_ b_BoardHandle,|
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_IndexStatus)|
-+----------------------------------------------------------------------------+
-| Task : Return the index status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_IndexStatus : 0 : No INDEX occur |
-| 1 : A INDEX occur |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Index not initialised see function |
-| "i_APCI1710_InitIndex" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetIndexStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_IndexStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*****************************/
- /* Test if index initialised */
- /*****************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_IndexInit) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (64 * b_ModulNbr));
-
- *pb_IndexStatus = (unsigned char) (dw_StatusReg & 1);
- } else {
- /*************************************************************/
- /* Index not initialised see function "i_APCI1710_InitIndex" */
- /*************************************************************/
-
- DPRINTK("Index not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetReferenceStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_ReferenceStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the reference status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_ReferenceStatus : 0 : No REFERENCE occur |
-| 1 : A REFERENCE occur |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Reference not initialised see function |
-| "i_APCI1710_InitReference" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetReferenceStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_ReferenceStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*********************************/
- /* Test if reference initialised */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_ReferenceInit) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 24 + (64 * b_ModulNbr));
-
- *pb_ReferenceStatus =
- (unsigned char) (~dw_StatusReg & 1);
- } else {
- /*********************************************************************/
- /* Reference not initialised see function "i_APCI1710_InitReference" */
- /*********************************************************************/
-
- DPRINTK("Reference not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetUASStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_UASStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the error signal (UAS) status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_UASStatus : 0 : UAS is low "0" |
-| 1 : UAS is high "1" |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetUASStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_UASStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 24 + (64 * b_ModulNbr));
-
- *pb_UASStatus = (unsigned char) ((dw_StatusReg >> 1) & 1);
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
-
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetCBStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_CBStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the counter overflow status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_CBStatus : 0 : Counter no overflow |
-| 1 : Counter overflow |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetCBStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_CBStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulNbr));
-
- *pb_CBStatus = (unsigned char) (dw_StatusReg & 1);
-
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Get16BitCBStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_CBStatusCounter0, |
-| unsigned char *_ pb_CBStatusCounter1) |
-+----------------------------------------------------------------------------+
-| Task : Returns the counter overflow (counter initialised to |
-| 2*16-bit) status from selected incremental counter |
-| module |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_CBStatusCounter0 : 0 : No overflow occur for |
-| the first 16-bit |
-| counter |
-| 1 : Overflow occur for the|
-| first 16-bit counter |
-| unsigned char *_ pb_CBStatusCounter1 : 0 : No overflow occur for |
-| the second 16-bit |
-| counter |
-| 1 : Overflow occur for the|
-| second 16-bit counter |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Counter not initialised to 2*16-bit mode. |
-| See function "i_APCI1710_InitCounter" |
-| -5: Firmware revision error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Get16BitCBStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_CBStatusCounter0,
- unsigned char *pb_CBStatusCounter1)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************/
- /* Test if 2*16-Bit mode */
- /*************************/
-
- if ((devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 & 0x10) == 0x10) {
- /*****************************/
- /* Test the Firmware version */
- /*****************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] & 0xFFFF) >=
- 0x3136) {
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.
- ui_Address + 16 +
- (64 * b_ModulNbr));
-
- *pb_CBStatusCounter1 =
- (unsigned char) ((dw_StatusReg >> 0) &
- 1);
- *pb_CBStatusCounter0 =
- (unsigned char) ((dw_StatusReg >> 1) &
- 1);
- } /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_BoardInfos.dw_MolduleConfiguration [b_ModulNbr] & 0xFFFF) >= 0x3136) */
- else {
- /****************************/
- /* Firmware revision error */
- /****************************/
-
- i_ReturnValue = -5;
- } /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_BoardInfos.dw_MolduleConfiguration [b_ModulNbr] & 0xFFFF) >= 0x3136) */
- } /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_ModeRegister.s_ByteModeRegister.b_ModeRegister1 & 0x10) == 0x10) */
- else {
- /********************************************/
- /* Counter not initialised to 2*16-bit mode */
- /* "i_APCI1710_InitCounter" */
- /********************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -4;
- } /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_ModeRegister.s_ByteModeRegister.b_ModeRegister1 & 0x10) == 0x10) */
- } /* if (ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) */
- else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- } /* if (ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) */
- } /* if (b_ModulNbr < 4) */
- else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- } /* if (b_ModulNbr < 4) */
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetUDStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_UDStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the counter progress status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_UDStatus : 0 : Counter progress in the |
-| selected mode down |
-| 1 : Counter progress in the |
-| selected mode up |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetUDStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_UDStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 24 + (64 * b_ModulNbr));
-
- *pb_UDStatus = (unsigned char) ((dw_StatusReg >> 2) & 1);
-
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetInterruptUDLatchedStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_UDStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the counter progress latched status after a |
-| index interrupt occur. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_UDStatus : 0 : Counter progress in the |
-| selected mode down |
-| 1 : Counter progress in the |
-| selected mode up |
-| 2 : No index interrupt occur |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetInterruptUDLatchedStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_UDStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*********************************/
- /* Test if index interrupt occur */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_IndexInterruptOccur == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_IndexInterruptOccur = 0;
-
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (64 * b_ModulNbr));
-
- *pb_UDStatus = (unsigned char) ((dw_StatusReg >> 1) & 1);
- } else {
- /****************************/
- /* No index interrupt occur */
- /****************************/
-
- *pb_UDStatus = 2;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_ReadFrequencyMeasurement |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr, |
- | unsigned char *_ pb_Status, |
- | PULONG_ pul_ReadValue) |
- +----------------------------------------------------------------------------+
- | Task : Returns the status (pb_Status) and the number of |
- | increments in the set time. |
- | See function " i_APCI1710_InitFrequencyMeasurement " |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
- | unsigned char_ b_ModulNbr : Number of the module to be |
- | configured (0 to 3) |
- +----------------------------------------------------------------------------+
- | Output Parameters : unsigned char *_ pb_Status : Returns the frequency |
- | measurement status |
- | 0 : Counting cycle not |
- | started. |
- | 1 : Counting cycle started. |
- | 2 : Counting cycle stopped. |
- | The measurement cycle is |
- | completed. |
- | unsigned char *_ pb_UDStatus : 0 : Counter progress in the |
- | selected mode down |
- | 1 : Counter progress in the |
- | selected mode up |
- | PULONG_ pul_ReadValue : Return the number of |
- | increments in the defined |
- | time base. |
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: The selected module number is wrong |
- | -3: Counter not initialised see function |
- | "i_APCI1710_InitCounter" |
- | -4: Frequency measurement logic not initialised. |
- | See function "i_APCI1710_InitFrequencyMeasurement" |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_ReadFrequencyMeasurement(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_Status,
- unsigned char *pb_UDStatus,
- unsigned int *pul_ReadValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ui_16BitValue;
- unsigned int dw_StatusReg;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /********************************************/
- /* Test if frequency measurement initialised */
- /********************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_FrequencyMeasurementInit == 1) {
- /******************/
- /* Test if enable */
- /******************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_FrequencyMeasurementEnable == 1) {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.
- ui_Address + 32 +
- (64 * b_ModulNbr));
-
- /**************************/
- /* Test if frequency stop */
- /**************************/
-
- if (dw_StatusReg & 1) {
- *pb_Status = 2;
- *pb_UDStatus =
- (unsigned char) ((dw_StatusReg >>
- 1) & 3);
-
- /******************/
- /* Read the value */
- /******************/
-
- *pul_ReadValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 28 +
- (64 * b_ModulNbr));
-
- if (*pb_UDStatus == 0) {
- /*************************/
- /* Test the counter mode */
- /*************************/
-
- if ((devpriv->s_ModuleInfo[b_ModulNbr].s_SiemensCounterInfo.s_ModeRegister.s_ByteModeRegister.b_ModeRegister1 & APCI1710_16BIT_COUNTER) == APCI1710_16BIT_COUNTER) {
- /****************************************/
- /* Test if 16-bit counter 1 pulse occur */
- /****************************************/
-
- if ((*pul_ReadValue & 0xFFFFU) != 0) {
- ui_16BitValue
- =
- (unsigned int)
- *
- pul_ReadValue
- &
- 0xFFFFU;
- *pul_ReadValue
- =
- (*pul_ReadValue
- &
- 0xFFFF0000UL)
- |
- (0xFFFFU
- -
- ui_16BitValue);
- }
-
- /****************************************/
- /* Test if 16-bit counter 2 pulse occur */
- /****************************************/
-
- if ((*pul_ReadValue & 0xFFFF0000UL) != 0) {
- ui_16BitValue
- =
- (unsigned int)
- (
- (*pul_ReadValue
- >>
- 16)
- &
- 0xFFFFU);
- *pul_ReadValue
- =
- (*pul_ReadValue
- &
- 0xFFFFUL)
- |
- (
- (0xFFFFU - ui_16BitValue) << 16);
- }
- } else {
- if (*pul_ReadValue != 0) {
- *pul_ReadValue
- =
- 0xFFFFFFFFUL
- -
- *pul_ReadValue;
- }
- }
- } else {
- if (*pb_UDStatus == 1) {
- /****************************************/
- /* Test if 16-bit counter 2 pulse occur */
- /****************************************/
-
- if ((*pul_ReadValue & 0xFFFF0000UL) != 0) {
- ui_16BitValue
- =
- (unsigned int)
- (
- (*pul_ReadValue
- >>
- 16)
- &
- 0xFFFFU);
- *pul_ReadValue
- =
- (*pul_ReadValue
- &
- 0xFFFFUL)
- |
- (
- (0xFFFFU - ui_16BitValue) << 16);
- }
- } else {
- if (*pb_UDStatus
- == 2) {
- /****************************************/
- /* Test if 16-bit counter 1 pulse occur */
- /****************************************/
-
- if ((*pul_ReadValue & 0xFFFFU) != 0) {
- ui_16BitValue
- =
- (unsigned int)
- *
- pul_ReadValue
- &
- 0xFFFFU;
- *pul_ReadValue
- =
- (*pul_ReadValue
- &
- 0xFFFF0000UL)
- |
- (0xFFFFU
- -
- ui_16BitValue);
- }
- }
- }
- }
- } else {
- *pb_Status = 1;
- *pb_UDStatus = 0;
- }
- } else {
- *pb_Status = 0;
- *pb_UDStatus = 0;
- }
- } else {
- /***********************************************/
- /* Frequency measurement logic not initialised */
- /***********************************************/
-
- DPRINTK("Frequency measurement logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-/*
- * Read and Get functions for INC_CPT
- */
-static int i_APCI1710_InsnReadINCCPT(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_ReadType;
- int i_ReturnValue = 0;
-
- ui_ReadType = CR_CHAN(insn->chanspec);
-
- devpriv->tsk_Current = current; /* Save the current process task structure */
- switch (ui_ReadType) {
- case APCI1710_INCCPT_READLATCHREGISTERSTATUS:
- i_ReturnValue = i_APCI1710_ReadLatchRegisterStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) CR_RANGE(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_READLATCHREGISTERVALUE:
- i_ReturnValue = i_APCI1710_ReadLatchRegisterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) CR_RANGE(insn->chanspec), (unsigned int *) &data[0]);
- printk("Latch Register Value %d\n", data[0]);
- break;
-
- case APCI1710_INCCPT_READ16BITCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_Read16BitCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) CR_RANGE(insn->chanspec), (unsigned int *) &data[0]);
- break;
-
- case APCI1710_INCCPT_READ32BITCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_Read32BitCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned int *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GETINDEXSTATUS:
- i_ReturnValue = i_APCI1710_GetIndexStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GETREFERENCESTATUS:
- i_ReturnValue = i_APCI1710_GetReferenceStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GETUASSTATUS:
- i_ReturnValue = i_APCI1710_GetUASStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GETCBSTATUS:
- i_ReturnValue = i_APCI1710_GetCBStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GET16BITCBSTATUS:
- i_ReturnValue = i_APCI1710_Get16BitCBStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char *) &data[0], (unsigned char *) &data[1]);
- break;
-
- case APCI1710_INCCPT_GETUDSTATUS:
- i_ReturnValue = i_APCI1710_GetUDStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
-
- break;
-
- case APCI1710_INCCPT_GETINTERRUPTUDLATCHEDSTATUS:
- i_ReturnValue = i_APCI1710_GetInterruptUDLatchedStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_READFREQUENCYMEASUREMENT:
- i_ReturnValue = i_APCI1710_ReadFrequencyMeasurement(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char *) &data[0],
- (unsigned char *) &data[1], (unsigned int *) &data[2]);
- break;
-
- case APCI1710_INCCPT_READINTERRUPT:
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /**************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Read = (devpriv->s_InterruptParameters.
- ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- break;
-
- default:
- printk("ReadType Parameter wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c
deleted file mode 100644
index be0c6ad..0000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c
+++ /dev/null
@@ -1,870 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : Inp_CPT.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 pulse encoder module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_SINGLE 0
-#define APCI1710_CONTINUOUS 1
-
-#define APCI1710_PULSEENCODER_READ 0
-#define APCI1710_PULSEENCODER_WRITE 1
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitPulseEncoder |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PulseEncoderNbr, |
-| unsigned char_ b_InputLevelSelection, |
-| unsigned char_ b_TriggerOutputAction, |
-| ULONG_ ul_StartValue) |
-+----------------------------------------------------------------------------+
-| Task : Configure the pulse encoder operating mode selected via|
-| b_ModulNbr and b_PulseEncoderNbr. The pulse encoder |
-| after each pulse decrement the counter value from 1. |
-| |
-| You must calling this function be for you call any |
-| other function witch access of pulse encoders. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_PulseEncoderNbr : Pulse encoder selection |
-| (0 to 3) |
-| unsigned char_ b_InputLevelSelection : Input level selection |
-| (0 or 1) |
-| 0 : Set pulse encoder|
-| count the the low|
-| level pulse. |
-| 1 : Set pulse encoder|
-| count the the |
-| high level pulse.|
-| unsigned char_ b_TriggerOutputAction : Digital TRIGGER output |
-| action |
-| 0 : No action |
-| 1 : Set the trigger |
-| output to "1" |
-| (high) after the |
-| passage from 1 to|
-| 0 from pulse |
-| encoder. |
-| 2 : Set the trigger |
-| output to "0" |
-| (low) after the |
-| passage from 1 to|
-| 0 from pulse |
-| encoder |
-| ULONG_ ul_StartValue : Pulse encoder start value|
-| (1 to 4294967295)
- b_ModulNbr =(unsigned char) CR_AREF(insn->chanspec);
- b_PulseEncoderNbr =(unsigned char) data[0];
- b_InputLevelSelection =(unsigned char) data[1];
- b_TriggerOutputAction =(unsigned char) data[2];
- ul_StartValue =(unsigned int) data[3];
- |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module is not a pulse encoder module |
-| -3: Pulse encoder selection is wrong |
-| -4: Input level selection is wrong |
-| -5: Digital TRIGGER output action selection is wrong |
-| -6: Pulse encoder start value is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigInitPulseEncoder(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_IntRegister;
- unsigned char b_ModulNbr;
- unsigned char b_PulseEncoderNbr;
- unsigned char b_InputLevelSelection;
- unsigned char b_TriggerOutputAction;
- unsigned int ul_StartValue;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_PulseEncoderNbr = (unsigned char) data[0];
- b_InputLevelSelection = (unsigned char) data[1];
- b_TriggerOutputAction = (unsigned char) data[2];
- ul_StartValue = (unsigned int) data[3];
-
- i_ReturnValue = insn->n;
-
- /***********************************/
- /* Test the selected module number */
- /***********************************/
-
- if (b_ModulNbr <= 3) {
- /*************************/
- /* Test if pulse encoder */
- /*************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- APCI1710_PULSE_ENCODER) ==
- APCI1710_PULSE_ENCODER) {
- /******************************************/
- /* Test the selected pulse encoder number */
- /******************************************/
-
- if (b_PulseEncoderNbr <= 3) {
- /************************/
- /* Test the input level */
- /************************/
-
- if ((b_InputLevelSelection == 0)
- || (b_InputLevelSelection == 1)) {
- /*******************************************/
- /* Test the ouput TRIGGER action selection */
- /*******************************************/
-
- if ((b_TriggerOutputAction <= 2)
- || (b_PulseEncoderNbr > 0)) {
- if (ul_StartValue > 1) {
-
- dw_IntRegister =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- 20 +
- (64 * b_ModulNbr));
-
- /***********************/
- /* Set the start value */
- /***********************/
-
- outl(ul_StartValue,
- devpriv->
- s_BoardInfos.
- ui_Address +
- (b_PulseEncoderNbr
- * 4) +
- (64 * b_ModulNbr));
-
- /***********************/
- /* Set the input level */
- /***********************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister &
- (0xFFFFFFFFUL -
- (1UL << (8 + b_PulseEncoderNbr)))) | ((1UL & (~b_InputLevelSelection)) << (8 + b_PulseEncoderNbr));
-
- /*******************************/
- /* Test if output trigger used */
- /*******************************/
-
- if ((b_TriggerOutputAction > 0) && (b_PulseEncoderNbr > 1)) {
- /****************************/
- /* Enable the output action */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- | (1UL
- << (4 + b_PulseEncoderNbr));
-
- /*********************************/
- /* Set the output TRIGGER action */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- &
- (0xFFFFFFFFUL
- -
- (1UL << (12 + b_PulseEncoderNbr)))) | ((1UL & (b_TriggerOutputAction - 1)) << (12 + b_PulseEncoderNbr));
- } else {
- /*****************************/
- /* Disable the output action */
- /*****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- &
- (0xFFFFFFFFUL
- -
- (1UL << (4 + b_PulseEncoderNbr)));
- }
-
- /*************************/
- /* Set the configuration */
- /*************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister,
- devpriv->
- s_BoardInfos.
- ui_Address +
- 20 +
- (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- s_PulseEncoderInfo
- [b_PulseEncoderNbr].
- b_PulseEncoderInit
- = 1;
- } else {
- /**************************************/
- /* Pulse encoder start value is wrong */
- /**************************************/
-
- DPRINTK("Pulse encoder start value is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /****************************************************/
- /* Digital TRIGGER output action selection is wrong */
- /****************************************************/
-
- DPRINTK("Digital TRIGGER output action selection is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /**********************************/
- /* Input level selection is wrong */
- /**********************************/
-
- DPRINTK("Input level selection is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /************************************/
- /* Pulse encoder selection is wrong */
- /************************************/
-
- DPRINTK("Pulse encoder selection is wrong\n");
- i_ReturnValue = -3;
- }
- } else {
- /********************************************/
- /* The module is not a pulse encoder module */
- /********************************************/
-
- DPRINTK("The module is not a pulse encoder module\n");
- i_ReturnValue = -2;
- }
- } else {
- /********************************************/
- /* The module is not a pulse encoder module */
- /********************************************/
-
- DPRINTK("The module is not a pulse encoder module\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnablePulseEncoder |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PulseEncoderNbr, |
-| unsigned char_ b_CycleSelection, |
-| unsigned char_ b_InterruptHandling) |
-+----------------------------------------------------------------------------+
-| Task : Enableor disable the selected pulse encoder (b_PulseEncoderNbr) |
-| from selected module (b_ModulNbr). Each input pulse |
-| decrement the pulse encoder counter value from 1. |
-| If you enabled the interrupt (b_InterruptHandling), a |
-| interrupt is generated when the pulse encoder has run |
-| down. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_PulseEncoderNbr : Pulse encoder selection |
-| (0 to 3) |
-| unsigned char_ b_CycleSelection : APCI1710_CONTINUOUS: |
-| Each time the |
-| counting value is set|
-| on "0", the pulse |
-| encoder load the |
-| start value after |
-| the next pulse. |
-| APCI1710_SINGLE: |
-| If the counter is set|
-| on "0", the pulse |
-| encoder is stopped. |
-| unsigned char_ b_InterruptHandling : Interrupts can be |
-| generated, when the pulse|
-| encoder has run down. |
-| With this parameter the |
-| user decides if |
-| interrupts are used or |
-| not. |
-| APCI1710_ENABLE: |
-| Interrupts are enabled |
-| APCI1710_DISABLE: |
-| Interrupts are disabled
-
- b_ModulNbr =(unsigned char) CR_AREF(insn->chanspec);
- b_Action =(unsigned char) data[0];
- b_PulseEncoderNbr =(unsigned char) data[1];
- b_CycleSelection =(unsigned char) data[2];
- b_InterruptHandling =(unsigned char) data[3];|
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection is wrong |
-| -3: Pulse encoder selection is wrong |
-| -4: Pulse encoder not initialised. |
-| See function "i_APCI1710_InitPulseEncoder" |
-| -5: Cycle selection mode is wrong |
-| -6: Interrupt handling mode is wrong |
-| -7: Interrupt routine not installed. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnWriteEnableDisablePulseEncoder(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr;
- unsigned char b_PulseEncoderNbr;
- unsigned char b_CycleSelection;
- unsigned char b_InterruptHandling;
- unsigned char b_Action;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_Action = (unsigned char) data[0];
- b_PulseEncoderNbr = (unsigned char) data[1];
- b_CycleSelection = (unsigned char) data[2];
- b_InterruptHandling = (unsigned char) data[3];
-
- /***********************************/
- /* Test the selected module number */
- /***********************************/
-
- if (b_ModulNbr <= 3) {
- /******************************************/
- /* Test the selected pulse encoder number */
- /******************************************/
-
- if (b_PulseEncoderNbr <= 3) {
- /*************************************/
- /* Test if pulse encoder initialised */
- /*************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- s_PulseEncoderInfo[b_PulseEncoderNbr].
- b_PulseEncoderInit == 1) {
- switch (b_Action) {
-
- case APCI1710_ENABLE:
- /****************************/
- /* Test the cycle selection */
- /****************************/
-
- if (b_CycleSelection ==
- APCI1710_CONTINUOUS
- || b_CycleSelection ==
- APCI1710_SINGLE) {
- /*******************************/
- /* Test the interrupt handling */
- /*******************************/
-
- if (b_InterruptHandling ==
- APCI1710_ENABLE
- || b_InterruptHandling
- == APCI1710_DISABLE) {
- /******************************/
- /* Test if interrupt not used */
- /******************************/
-
- if (b_InterruptHandling
- ==
- APCI1710_DISABLE)
- {
- /*************************/
- /* Disable the interrupt */
- /*************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- &
- (0xFFFFFFFFUL
- -
- (1UL << b_PulseEncoderNbr));
- } else {
-
- /************************/
- /* Enable the interrupt */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- | (1UL
- <<
- b_PulseEncoderNbr);
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- }
-
- if (i_ReturnValue >= 0) {
- /***********************************/
- /* Enable or disable the interrupt */
- /***********************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 20 +
- (64 * b_ModulNbr));
-
- /****************************/
- /* Enable the pulse encoder */
- /****************************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister
- | (1UL
- <<
- b_PulseEncoderNbr);
-
- /**********************/
- /* Set the cycle mode */
- /**********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister
- =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister
- &
- (0xFFFFFFFFUL
- -
- (1 << (b_PulseEncoderNbr + 4)))) | ((b_CycleSelection & 1UL) << (4 + b_PulseEncoderNbr));
-
- /****************************/
- /* Enable the pulse encoder */
- /****************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 16 +
- (64 * b_ModulNbr));
- }
- } else {
- /************************************/
- /* Interrupt handling mode is wrong */
- /************************************/
-
- DPRINTK("Interrupt handling mode is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /*********************************/
- /* Cycle selection mode is wrong */
- /*********************************/
-
- DPRINTK("Cycle selection mode is wrong\n");
- i_ReturnValue = -5;
- }
- break;
-
- case APCI1710_DISABLE:
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister &
- (0xFFFFFFFFUL -
- (1UL << b_PulseEncoderNbr));
-
- /*****************************/
- /* Disable the pulse encoder */
- /*****************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister,
- devpriv->s_BoardInfos.
- ui_Address + 16 +
- (64 * b_ModulNbr));
-
- break;
- } /* switch End */
-
- } else {
- /*********************************/
- /* Pulse encoder not initialised */
- /*********************************/
-
- DPRINTK("Pulse encoder not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /************************************/
- /* Pulse encoder selection is wrong */
- /************************************/
-
- DPRINTK("Pulse encoder selection is wrong\n");
- i_ReturnValue = -3;
- }
- } else {
- /*****************************/
- /* Module selection is wrong */
- /*****************************/
-
- DPRINTK("Module selection is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadPulseEncoderStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PulseEncoderNbr, |
-| unsigned char *_ pb_Status) |
-+----------------------------------------------------------------------------+
-| Task APCI1710_PULSEENCODER_READ : Reads the pulse encoder status
- and valuefrom selected pulse |
-| encoder (b_PulseEncoderNbr) from selected module |
-| (b_ModulNbr). |
-+----------------------------------------------------------------------------+
- unsigned char b_Type; data[0]
- APCI1710_PULSEENCODER_WRITE
- Writes a 32-bit value (ul_WriteValue) into the selected|
-| pulse encoder (b_PulseEncoderNbr) from selected module |
-| (b_ModulNbr). This operation set the new start pulse |
-| encoder value.
- APCI1710_PULSEENCODER_READ
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| CRAREF() unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| data[1] unsigned char_ b_PulseEncoderNbr : Pulse encoder selection |
-| (0 to 3)
- APCI1710_PULSEENCODER_WRITE
- data[2] ULONG_ ul_WriteValue : 32-bit value to be |
-| written |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_Status : Pulse encoder status. |
-| 0 : No overflow occur|
-| 1 : Overflow occur
- PULONG_ pul_ReadValue : Pulse encoder value | |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection is wrong |
-| -3: Pulse encoder selection is wrong |
-| -4: Pulse encoder not initialised. |
-| See function "i_APCI1710_InitPulseEncoder" |
-+----------------------------------------------------------------------------+
-*/
-
-/*_INT_ i_APCI1710_ReadPulseEncoderStatus (unsigned char_ b_BoardHandle,
- unsigned char_ b_ModulNbr,
- unsigned char_ b_PulseEncoderNbr,
-
- unsigned char *_ pb_Status)
- */
-static int i_APCI1710_InsnBitsReadWritePulseEncoder(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusRegister;
- unsigned char b_ModulNbr;
- unsigned char b_PulseEncoderNbr;
- unsigned char *pb_Status;
- unsigned char b_Type;
- unsigned int *pul_ReadValue;
- unsigned int ul_WriteValue;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_Type = (unsigned char) data[0];
- b_PulseEncoderNbr = (unsigned char) data[1];
- pb_Status = (unsigned char *) &data[0];
- pul_ReadValue = (unsigned int *) &data[1];
-
- /***********************************/
- /* Test the selected module number */
- /***********************************/
-
- if (b_ModulNbr <= 3) {
- /******************************************/
- /* Test the selected pulse encoder number */
- /******************************************/
-
- if (b_PulseEncoderNbr <= 3) {
- /*************************************/
- /* Test if pulse encoder initialised */
- /*************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- s_PulseEncoderInfo[b_PulseEncoderNbr].
- b_PulseEncoderInit == 1) {
-
- switch (b_Type) {
- case APCI1710_PULSEENCODER_READ:
- /****************************/
- /* Read the status register */
- /****************************/
-
- dw_StatusRegister =
- inl(devpriv->s_BoardInfos.
- ui_Address + 16 +
- (64 * b_ModulNbr));
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister |
- dw_StatusRegister;
-
- *pb_Status =
- (unsigned char) (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister >> (1 +
- b_PulseEncoderNbr)) & 1;
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister &
- (0xFFFFFFFFUL - (1 << (1 +
- b_PulseEncoderNbr)));
-
- /******************/
- /* Read the value */
- /******************/
-
- *pul_ReadValue =
- inl(devpriv->s_BoardInfos.
- ui_Address +
- (4 * b_PulseEncoderNbr) +
- (64 * b_ModulNbr));
- break;
-
- case APCI1710_PULSEENCODER_WRITE:
- ul_WriteValue = (unsigned int) data[2];
- /*******************/
- /* Write the value */
- /*******************/
-
- outl(ul_WriteValue,
- devpriv->s_BoardInfos.
- ui_Address +
- (4 * b_PulseEncoderNbr) +
- (64 * b_ModulNbr));
-
- } /* end of switch */
- } else {
- /*********************************/
- /* Pulse encoder not initialised */
- /*********************************/
-
- DPRINTK("Pulse encoder not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /************************************/
- /* Pulse encoder selection is wrong */
- /************************************/
-
- DPRINTK("Pulse encoder selection is wrong\n");
- i_ReturnValue = -3;
- }
- } else {
- /*****************************/
- /* Module selection is wrong */
- /*****************************/
-
- DPRINTK("Module selection is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-static int i_APCI1710_InsnReadInterruptPulseEncoder(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /***************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->s_InterruptParameters.
- ui_Read = (devpriv->
- s_InterruptParameters.ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- return insn->n;
-
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c
deleted file mode 100644
index a211e78..0000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c
+++ /dev/null
@@ -1,3586 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : PWM.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 Wulse wide modulation module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +-----------------------------------------------------------------------+
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_PWM_INIT 0
-#define APCI1710_PWM_GETINITDATA 1
-
-#define APCI1710_PWM_DISABLE 0
-#define APCI1710_PWM_ENABLE 1
-#define APCI1710_PWM_NEWTIMING 2
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitPWM |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char_ b_ClockSelection, |
-| unsigned char_ b_TimingUnit, |
-| ULONG_ ul_LowTiming, |
-| ULONG_ ul_HighTiming, |
-| PULONG_ pul_RealLowTiming, |
-| PULONG_ pul_RealHighTiming) |
-+----------------------------------------------------------------------------+
-| Task : Configure the selected PWM (b_PWM) from selected module|
-| (b_ModulNbr). The ul_LowTiming, ul_HighTiming and |
-| ul_TimingUnit determine the low/high timing base for |
-| the period. pul_RealLowTiming, pul_RealHighTiming |
-| return the real timing value. |
-| You must calling this function be for you call any |
-| other function witch access of the PWM. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure|
-| (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1). |
-| unsigned char_ b_ClockSelection : Selection from PCI bus |
-| clock |
-| - APCI1710_30MHZ : |
-| The PC have a 30 MHz |
-| PCI bus clock |
-| - APCI1710_33MHZ : |
-| The PC have a 33 MHz |
-| PCI bus clock |
-| - APCI1710_40MHZ |
-| The APCI-1710 have a |
-| integrated 40Mhz |
-| quartz. |
-| unsigned char_ b_TimingUnit : Base timing Unit (0 to 4) |
-| 0 : ns |
-| 1 : æs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| ULONG_ ul_LowTiming : Low base timing value. |
-| ULONG_ ul_HighTiming : High base timing value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_RealLowTiming : Real low base timing |
-| value. |
-| PULONG_ pul_RealHighTiming : Real high base timing |
-| value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: The selected input clock is wrong |
-| -6: Timing Unit selection is wrong |
-| -7: Low base timing selection is wrong |
-| -8: High base timing selection is wrong |
-| -9: You can not used the 40MHz clock selection with |
-| this board |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitPWM(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM,
- unsigned char b_ClockSelection,
- unsigned char b_TimingUnit,
- unsigned int ul_LowTiming,
- unsigned int ul_HighTiming,
- unsigned int *pul_RealLowTiming,
- unsigned int *pul_RealHighTiming)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_LowTimerValue = 0;
- unsigned int ul_HighTimerValue = 0;
- unsigned int dw_Command;
- double d_RealLowTiming = 0;
- double d_RealHighTiming = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /******************/
- /* Test the clock */
- /******************/
-
- if ((b_ClockSelection == APCI1710_30MHZ) ||
- (b_ClockSelection == APCI1710_33MHZ) ||
- (b_ClockSelection == APCI1710_40MHZ)) {
- /************************/
- /* Test the timing unit */
- /************************/
-
- if (b_TimingUnit <= 4) {
- /*********************************/
- /* Test the low timing selection */
- /*********************************/
-
- if (((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 266)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571230650UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571230UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <= 9UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 242)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 519691043UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 519691UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 520UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <= 8UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 200)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429496729UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429496UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 7UL))) {
- /**********************************/
- /* Test the High timing selection */
- /**********************************/
-
- if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 266) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 242) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 200) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 7UL))) {
- /**************************/
- /* Test the board version */
- /**************************/
-
- if (((b_ClockSelection == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_ClockSelection != APCI1710_40MHZ)) {
-
- /************************************/
- /* Calculate the low division fator */
- /************************************/
-
- fpu_begin
- ();
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (0.00025 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (0.00025 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (0.00025
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (0.25 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (0.25 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- ul_LowTiming
- *
- (250.0
- *
- b_ClockSelection);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250.0 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (250.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (ul_LowTiming
- *
- 60)
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_LowTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60;
- d_RealLowTiming
- =
- (
- (double)
- ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60.0;
-
- if ((double)(((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- /*************************************/
- /* Calculate the high division fator */
- /*************************************/
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (0.00025 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (0.00025 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (0.00025
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (0.25 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (0.25 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- ul_HighTiming
- *
- (250.0
- *
- b_ClockSelection);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250.0 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (250.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (ul_HighTiming
- *
- 60)
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_HighTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60;
- d_RealHighTiming
- =
- (
- (double)
- ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60.0;
-
- if ((double)(((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- fpu_end();
- /****************************/
- /* Save the clock selection */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- b_ClockSelection
- =
- b_ClockSelection;
-
- /************************/
- /* Save the timing unit */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- b_TimingUnit
- =
- b_TimingUnit;
-
- /****************************/
- /* Save the low base timing */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- d_LowTiming
- =
- d_RealLowTiming;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- ul_RealLowTiming
- =
- *pul_RealLowTiming;
-
- /****************************/
- /* Save the high base timing */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- d_HighTiming
- =
- d_RealHighTiming;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- ul_RealHighTiming
- =
- *pul_RealHighTiming;
-
- /************************/
- /* Write the low timing */
- /************************/
-
- outl(ul_LowTimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /*************************/
- /* Write the high timing */
- /*************************/
-
- outl(ul_HighTimerValue, devpriv->s_BoardInfos.ui_Address + 4 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /***************************/
- /* Set the clock selection */
- /***************************/
-
- dw_Command
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 8
- +
- (20 * b_PWM) + (64 * b_ModulNbr));
-
- dw_Command
- =
- dw_Command
- &
- 0x7F;
-
- if (b_ClockSelection == APCI1710_40MHZ) {
- dw_Command
- =
- dw_Command
- |
- 0x80;
- }
-
- /***************************/
- /* Set the clock selection */
- /***************************/
-
- outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /*************/
- /* PWM init. */
- /*************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- b_PWMInit
- =
- 1;
- } else {
- /***************************************************/
- /* You can not used the 40MHz clock selection with */
- /* this board */
- /***************************************************/
- DPRINTK("You can not used the 40MHz clock selection with this board\n");
- i_ReturnValue
- =
- -9;
- }
- } else {
- /***************************************/
- /* High base timing selection is wrong */
- /***************************************/
- DPRINTK("High base timing selection is wrong\n");
- i_ReturnValue =
- -8;
- }
- } else {
- /**************************************/
- /* Low base timing selection is wrong */
- /**************************************/
- DPRINTK("Low base timing selection is wrong\n");
- i_ReturnValue = -7;
- }
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- else {
- /**********************************/
- /* Timing unit selection is wrong */
- /**********************************/
- DPRINTK("Timing unit selection is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- } /* if ((b_ClockSelection == APCI1710_30MHZ) || (b_ClockSelection == APCI1710_33MHZ) || (b_ClockSelection == APCI1710_40MHZ)) */
- else {
- /*******************************/
- /* The selected clock is wrong */
- /*******************************/
- DPRINTK("The selected clock is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_ClockSelection == APCI1710_30MHZ) || (b_ClockSelection == APCI1710_33MHZ) || (b_ClockSelection == APCI1710_40MHZ)) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetPWMInitialisation |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char *_ pb_TimingUnit, |
-| PULONG_ pul_LowTiming, |
-| PULONG_ pul_HighTiming, |
-| unsigned char *_ pb_StartLevel, |
-| unsigned char *_ pb_StopMode, |
-| unsigned char *_ pb_StopLevel, |
-| unsigned char *_ pb_ExternGate, |
-| unsigned char *_ pb_InterruptEnable, |
-| unsigned char *_ pb_Enable) |
-+----------------------------------------------------------------------------+
-| Task : Return the PWM (b_PWM) initialisation from selected |
-| module (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitPWM" function be for you call this |
-| function. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TimingUnit : Base timing Unit (0 to 4) |
-| 0 : ns |
-| 1 : æs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| PULONG_ pul_LowTiming : Low base timing value. |
-| PULONG_ pul_HighTiming : High base timing value. |
-| unsigned char *_ pb_StartLevel : Start period level |
-| selection |
-| 0 : The period start |
-| with a low level |
-| 1 : The period start |
-| with a high level|
-| unsigned char *_ pb_StopMode : Stop mode selection |
-| 0 : The PWM is stopped |
-| directly after the |
-| "i_APCI1710_DisablePWM"|
-| function and break the|
-| last period |
-| 1 : After the |
-| "i_APCI1710_DisablePWM"|
-| function the PWM is |
-| stopped at the end |
-| from last period cycle|
-| unsigned char *_ pb_StopLevel : Stop PWM level selection |
-| 0 : The output signal |
-| keep the level after|
-| the |
-| "i_APCI1710_DisablePWM"|
-| function |
-| 1 : The output signal is|
-| set to low after the|
-| "i_APCI1710_DisablePWM"|
-| function |
-| 2 : The output signal is|
-| set to high after |
-| the |
-| "i_APCI1710_DisablePWM"|
-| function |
-| unsigned char *_ pb_ExternGate : Extern gate action |
-| selection |
-| 0 : Extern gate signal |
-| not used. |
-| 1 : Extern gate signal |
-| used. |
-| unsigned char *_ pb_InterruptEnable : Enable or disable the PWM |
-| interrupt. |
-| - APCI1710_ENABLE : |
-| Enable the PWM interrupt|
-| A interrupt occur after |
-| each period |
-| - APCI1710_DISABLE : |
-| Disable the PWM |
-| interrupt |
-| unsigned char *_ pb_Enable : Indicate if the PWM is |
-| enabled or no |
-| 0 : PWM not enabled |
-| 1 : PWM enabled |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised see function |
-| "i_APCI1710_InitPWM" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetPWMInitialisation(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM,
- unsigned char *pb_TimingUnit,
- unsigned int *pul_LowTiming,
- unsigned int *pul_HighTiming,
- unsigned char *pb_StartLevel,
- unsigned char *pb_StopMode,
- unsigned char *pb_StopLevel,
- unsigned char *pb_ExternGate,
- unsigned char *pb_InterruptEnable,
- unsigned char *pb_Enable)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_Command;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /***********************/
- /* Read the low timing */
- /***********************/
-
- *pul_LowTiming =
- inl(devpriv->s_BoardInfos.
- ui_Address + 0 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- /************************/
- /* Read the high timing */
- /************************/
-
- *pul_HighTiming =
- inl(devpriv->s_BoardInfos.
- ui_Address + 4 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- /********************/
- /* Read the command */
- /********************/
-
- dw_Command = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- *pb_StartLevel =
- (unsigned char) ((dw_Command >> 5) & 1);
- *pb_StopMode =
- (unsigned char) ((dw_Command >> 0) & 1);
- *pb_StopLevel =
- (unsigned char) ((dw_Command >> 1) & 1);
- *pb_ExternGate =
- (unsigned char) ((dw_Command >> 4) & 1);
- *pb_InterruptEnable =
- (unsigned char) ((dw_Command >> 3) & 1);
-
- if (*pb_StopLevel) {
- *pb_StopLevel =
- *pb_StopLevel +
- (unsigned char) ((dw_Command >>
- 2) & 1);
- }
-
- /********************/
- /* Read the command */
- /********************/
-
- dw_Command = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- *pb_Enable =
- (unsigned char) ((dw_Command >> 0) & 1);
-
- *pb_TimingUnit = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo[b_PWM].b_TimingUnit;
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
- DPRINTK("PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Pwm Init and Get Pwm Initialisation
- */
-static int i_APCI1710_InsnConfigPWM(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned char b_ConfigType;
- int i_ReturnValue = 0;
- b_ConfigType = CR_CHAN(insn->chanspec);
-
- switch (b_ConfigType) {
- case APCI1710_PWM_INIT:
- i_ReturnValue = i_APCI1710_InitPWM(dev, (unsigned char) CR_AREF(insn->chanspec), /* b_ModulNbr */
- (unsigned char) data[0], /* b_PWM */
- (unsigned char) data[1], /* b_ClockSelection */
- (unsigned char) data[2], /* b_TimingUnit */
- (unsigned int) data[3], /* ul_LowTiming */
- (unsigned int) data[4], /* ul_HighTiming */
- (unsigned int *) &data[0], /* pul_RealLowTiming */
- (unsigned int *) &data[1] /* pul_RealHighTiming */
- );
- break;
-
- case APCI1710_PWM_GETINITDATA:
- i_ReturnValue = i_APCI1710_GetPWMInitialisation(dev, (unsigned char) CR_AREF(insn->chanspec), /* b_ModulNbr */
- (unsigned char) data[0], /* b_PWM */
- (unsigned char *) &data[0], /* pb_TimingUnit */
- (unsigned int *) &data[1], /* pul_LowTiming */
- (unsigned int *) &data[2], /* pul_HighTiming */
- (unsigned char *) &data[3], /* pb_StartLevel */
- (unsigned char *) &data[4], /* pb_StopMode */
- (unsigned char *) &data[5], /* pb_StopLevel */
- (unsigned char *) &data[6], /* pb_ExternGate */
- (unsigned char *) &data[7], /* pb_InterruptEnable */
- (unsigned char *) &data[8] /* pb_Enable */
- );
- break;
-
- default:
- printk(" Config Parameter Wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnablePWM |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char_ b_StartLevel, |
-| unsigned char_ b_StopMode, |
-| unsigned char_ b_StopLevel, |
-| unsigned char_ b_ExternGate, |
-| unsigned char_ b_InterruptEnable) |
-+----------------------------------------------------------------------------+
-| Task : Enable the selected PWM (b_PWM) from selected module |
-| (b_ModulNbr). You must calling the "i_APCI1710_InitPWM"|
-| function be for you call this function. |
-| If you enable the PWM interrupt, the PWM generate a |
-| interrupt after each period. |
-| See function "i_APCI1710_SetBoardIntRoutineX" and the |
-| Interrupt mask description chapter. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1) |
-| unsigned char_ b_StartLevel : Start period level selection |
-| 0 : The period start with a |
-| low level |
-| 1 : The period start with a |
-| high level |
-| unsigned char_ b_StopMode : Stop mode selection |
-| 0 : The PWM is stopped |
-| directly after the |
-| "i_APCI1710_DisablePWM" |
-| function and break the |
-| last period |
-| 1 : After the |
-| "i_APCI1710_DisablePWM" |
-| function the PWM is |
-| stopped at the end from|
-| last period cycle. |
-| unsigned char_ b_StopLevel : Stop PWM level selection |
-| 0 : The output signal keep |
-| the level after the |
-| "i_APCI1710_DisablePWM" |
-| function |
-| 1 : The output signal is set|
-| to low after the |
-| "i_APCI1710_DisablePWM" |
-| function |
-| 2 : The output signal is set|
-| to high after the |
-| "i_APCI1710_DisablePWM" |
-| function |
-| unsigned char_ b_ExternGate : Extern gate action selection |
-| 0 : Extern gate signal not |
-| used. |
-| 1 : Extern gate signal used.|
-| unsigned char_ b_InterruptEnable : Enable or disable the PWM |
-| interrupt. |
-| - APCI1710_ENABLE : |
-| Enable the PWM interrupt |
-| A interrupt occur after |
-| each period |
-| - APCI1710_DISABLE : |
-| Disable the PWM interrupt |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised see function |
-| "i_APCI1710_InitPWM" |
-| -6: PWM start level selection is wrong |
-| -7: PWM stop mode selection is wrong |
-| -8: PWM stop level selection is wrong |
-| -9: Extern gate signal selection is wrong |
-| -10: Interrupt parameter is wrong |
-| -11: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_EnablePWM(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM,
- unsigned char b_StartLevel,
- unsigned char b_StopMode,
- unsigned char b_StopLevel,
- unsigned char b_ExternGate,
- unsigned char b_InterruptEnable)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_Command;
-
- devpriv->tsk_Current = current; /* Save the current process task structure */
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /**********************************/
- /* Test the start level selection */
- /**********************************/
-
- if (b_StartLevel <= 1) {
- /**********************/
- /* Test the stop mode */
- /**********************/
-
- if (b_StopMode <= 1) {
- /***********************/
- /* Test the stop level */
- /***********************/
-
- if (b_StopLevel <= 2) {
- /*****************************/
- /* Test the extern gate mode */
- /*****************************/
-
- if (b_ExternGate
- <= 1) {
- /*****************************/
- /* Test the interrupt action */
- /*****************************/
-
- if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) {
- /******************************************/
- /* Test if interrupt function initialised */
- /******************************************/
-
- /********************/
- /* Read the command */
- /********************/
-
- dw_Command
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 8
- +
- (20 * b_PWM) + (64 * b_ModulNbr));
-
- dw_Command
- =
- dw_Command
- &
- 0x80;
-
- /********************/
- /* Make the command */
- /********************/
-
- dw_Command
- =
- dw_Command
- |
- b_StopMode
- |
- (b_InterruptEnable
- <<
- 3)
- |
- (b_ExternGate
- <<
- 4)
- |
- (b_StartLevel
- <<
- 5);
-
- if (b_StopLevel & 3) {
- dw_Command
- =
- dw_Command
- |
- 2;
-
- if (b_StopLevel & 2) {
- dw_Command
- =
- dw_Command
- |
- 4;
- }
- }
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- b_InterruptEnable
- =
- b_InterruptEnable;
-
- /*******************/
- /* Set the command */
- /*******************/
-
- outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /******************/
- /* Enable the PWM */
- /******************/
- outl(1, devpriv->s_BoardInfos.ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr));
- } /* if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) */
- else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue
- =
- -10;
- } /* if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) */
- } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
- else {
- /*****************************************/
- /* Extern gate signal selection is wrong */
- /*****************************************/
- DPRINTK("Extern gate signal selection is wrong\n");
- i_ReturnValue
- =
- -9;
- } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
- } /* if (b_StopLevel >= 0 && b_StopLevel <= 2) */
- else {
- /*************************************/
- /* PWM stop level selection is wrong */
- /*************************************/
- DPRINTK("PWM stop level selection is wrong\n");
- i_ReturnValue =
- -8;
- } /* if (b_StopLevel >= 0 && b_StopLevel <= 2) */
- } /* if (b_StopMode >= 0 && b_StopMode <= 1) */
- else {
- /************************************/
- /* PWM stop mode selection is wrong */
- /************************************/
- DPRINTK("PWM stop mode selection is wrong\n");
- i_ReturnValue = -7;
- } /* if (b_StopMode >= 0 && b_StopMode <= 1) */
- } /* if (b_StartLevel >= 0 && b_StartLevel <= 1) */
- else {
- /**************************************/
- /* PWM start level selection is wrong */
- /**************************************/
- DPRINTK("PWM start level selection is wrong\n");
- i_ReturnValue = -6;
- } /* if (b_StartLevel >= 0 && b_StartLevel <= 1) */
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
- DPRINTK("PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisablePWM (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM) |
-+----------------------------------------------------------------------------+
-| Task : Disable the selected PWM (b_PWM) from selected module |
-| (b_ModulNbr). The output signal level depend of the |
-| initialisation by the "i_APCI1710_EnablePWM". |
-| See the b_StartLevel, b_StopMode and b_StopLevel |
-| parameters from this function. |
-+----------------------------------------------------------------------------+
-| Input Parameters :BYTE_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised see function |
-| "i_APCI1710_InitPWM" |
-| -6: PWM not enabled see function |
-| "i_APCI1710_EnablePWM" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_DisablePWM(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /***********************/
- /* Test if PWM enabled */
- /***********************/
-
- if (dw_Status & 0x1) {
- /*******************/
- /* Disable the PWM */
- /*******************/
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 12 +
- (20 * b_PWM) +
- (64 * b_ModulNbr));
- } /* if (dw_Status & 0x1) */
- else {
- /*******************/
- /* PWM not enabled */
- /*******************/
- DPRINTK("PWM not enabled\n");
- i_ReturnValue = -6;
- } /* if (dw_Status & 0x1) */
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
- DPRINTK(" PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetNewPWMTiming |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char_ b_ClockSelection, |
-| unsigned char_ b_TimingUnit, |
-| ULONG_ ul_LowTiming, |
-| ULONG_ ul_HighTiming) |
-+----------------------------------------------------------------------------+
-| Task : Set a new timing. The ul_LowTiming, ul_HighTiming and |
-| ul_TimingUnit determine the low/high timing base for |
-| the period. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure|
-| (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1). |
-| unsigned char_ b_TimingUnit : Base timing Unit (0 to 4) |
-| 0 : ns |
-| 1 : æs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| ULONG_ ul_LowTiming : Low base timing value. |
-| ULONG_ ul_HighTiming : High base timing value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised |
-| -6: Timing Unit selection is wrong |
-| -7: Low base timing selection is wrong |
-| -8: High base timing selection is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetNewPWMTiming(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM,
- unsigned char b_TimingUnit,
- unsigned int ul_LowTiming,
- unsigned int ul_HighTiming)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ClockSelection;
- int i_ReturnValue = 0;
- unsigned int ul_LowTimerValue = 0;
- unsigned int ul_HighTimerValue = 0;
- unsigned int ul_RealLowTiming = 0;
- unsigned int ul_RealHighTiming = 0;
- unsigned int dw_Status;
- unsigned int dw_Command;
- double d_RealLowTiming = 0;
- double d_RealHighTiming = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- b_ClockSelection = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PWMModuleInfo.
- b_ClockSelection;
-
- /************************/
- /* Test the timing unit */
- /************************/
-
- if (b_TimingUnit <= 4) {
- /*********************************/
- /* Test the low timing selection */
- /*********************************/
-
- if (((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 266)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571230650UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571230UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <= 9UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 242)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 519691043UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 519691UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 520UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <= 8UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 200)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429496729UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429496UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 7UL))) {
- /**********************************/
- /* Test the High timing selection */
- /**********************************/
-
- if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 266) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 242) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 200) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 7UL))) {
- /************************************/
- /* Calculate the low division fator */
- /************************************/
-
- fpu_begin();
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (0.00025 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (0.00025 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (0.00025
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (0.25 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (0.25 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- ul_LowTiming
- *
- (250.0
- *
- b_ClockSelection);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250.0 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (250.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (ul_LowTiming
- *
- 60)
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_LowTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60;
- d_RealLowTiming
- =
- (
- (double)
- ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60.0;
-
- if ((double)(((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- /*************************************/
- /* Calculate the high division fator */
- /*************************************/
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (0.00025 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (0.00025 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (0.00025
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (0.25 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (0.25 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- ul_HighTiming
- *
- (250.0
- *
- b_ClockSelection);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250.0 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (250.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (ul_HighTiming
- *
- 60)
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_HighTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60;
- d_RealHighTiming
- =
- (
- (double)
- ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60.0;
-
- if ((double)(((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- fpu_end();
-
- /************************/
- /* Save the timing unit */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- b_TimingUnit
- =
- b_TimingUnit;
-
- /****************************/
- /* Save the low base timing */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- d_LowTiming
- =
- d_RealLowTiming;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- ul_RealLowTiming
- =
- ul_RealLowTiming;
-
- /****************************/
- /* Save the high base timing */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- d_HighTiming
- =
- d_RealHighTiming;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- ul_RealHighTiming
- =
- ul_RealHighTiming;
-
- /************************/
- /* Write the low timing */
- /************************/
-
- outl(ul_LowTimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /*************************/
- /* Write the high timing */
- /*************************/
-
- outl(ul_HighTimerValue, devpriv->s_BoardInfos.ui_Address + 4 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /***************************/
- /* Set the clock selection */
- /***************************/
-
- dw_Command =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- + 8 +
- (20 * b_PWM) + (64 * b_ModulNbr));
-
- dw_Command =
- dw_Command
- & 0x7F;
-
- if (b_ClockSelection == APCI1710_40MHZ) {
- dw_Command
- =
- dw_Command
- |
- 0x80;
- }
-
- /***************************/
- /* Set the clock selection */
- /***************************/
-
- outl(dw_Command,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 8 +
- (20 * b_PWM) + (64 * b_ModulNbr));
- } else {
- /***************************************/
- /* High base timing selection is wrong */
- /***************************************/
- DPRINTK("High base timing selection is wrong\n");
- i_ReturnValue =
- -8;
- }
- } else {
- /**************************************/
- /* Low base timing selection is wrong */
- /**************************************/
- DPRINTK("Low base timing selection is wrong\n");
- i_ReturnValue = -7;
- }
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- else {
- /**********************************/
- /* Timing unit selection is wrong */
- /**********************************/
- DPRINTK("Timing unit selection is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
- DPRINTK("PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Pwm Enable Disable and Set New Timing
- */
-static int i_APCI1710_InsnWritePWM(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned char b_WriteType;
- int i_ReturnValue = 0;
- b_WriteType = CR_CHAN(insn->chanspec);
-
- switch (b_WriteType) {
- case APCI1710_PWM_ENABLE:
- i_ReturnValue = i_APCI1710_EnablePWM(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1],
- (unsigned char) data[2],
- (unsigned char) data[3], (unsigned char) data[4], (unsigned char) data[5]);
- break;
-
- case APCI1710_PWM_DISABLE:
- i_ReturnValue = i_APCI1710_DisablePWM(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_PWM_NEWTIMING:
- i_ReturnValue = i_APCI1710_SetNewPWMTiming(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1], (unsigned int) data[2], (unsigned int) data[3]);
- break;
-
- default:
- printk("Write Config Parameter Wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetPWMStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char *_ pb_PWMOutputStatus, |
-| unsigned char *_ pb_ExternGateStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the status from selected PWM (b_PWM) from |
-| selected module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_PWM : Selected PWM (0 or 1) |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)
- b_ModulNbr =(unsigned char) CR_AREF(insn->chanspec);
- b_PWM =(unsigned char) data[0];
-
- |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_PWMOutputStatus : Return the PWM output |
-| level status. |
-| 0 : The PWM output level|
-| is low. |
-| 1 : The PWM output level|
-| is high. |
-| unsigned char *_ pb_ExternGateStatus : Return the extern gate |
-| level status. |
-| 0 : The extern gate is |
-| low. |
-| 1 : The extern gate is |
-| high.
- pb_PWMOutputStatus =(unsigned char *) data[0];
- pb_ExternGateStatus =(unsigned char *) data[1]; |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised see function |
-| "i_APCI1710_InitPWM" |
-| -6: PWM not enabled see function "i_APCI1710_EnablePWM"|
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnReadGetPWMStatus(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned char b_ModulNbr;
- unsigned char b_PWM;
- unsigned char *pb_PWMOutputStatus;
- unsigned char *pb_ExternGateStatus;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_PWM = (unsigned char) CR_CHAN(insn->chanspec);
- pb_PWMOutputStatus = (unsigned char *) &data[0];
- pb_ExternGateStatus = (unsigned char *) &data[1];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /***********************/
- /* Test if PWM enabled */
- /***********************/
-
- if (dw_Status & 0x1) {
- *pb_PWMOutputStatus =
- (unsigned char) ((dw_Status >> 7)
- & 1);
- *pb_ExternGateStatus =
- (unsigned char) ((dw_Status >> 6)
- & 1);
- } /* if (dw_Status & 0x1) */
- else {
- /*******************/
- /* PWM not enabled */
- /*******************/
-
- DPRINTK("PWM not enabled \n");
- i_ReturnValue = -6;
- } /* if (dw_Status & 0x1) */
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
-
- DPRINTK("PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
-
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
-
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-static int i_APCI1710_InsnBitsReadPWMInterrupt(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /**************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Read = (devpriv->
- s_InterruptParameters.ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- return insn->n;
-
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c
deleted file mode 100644
index 97e7eec..0000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c
+++ /dev/null
@@ -1,849 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : SSI.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 SSI counter module |
- +-----------------------------------------------------------------------+
- | several changes done by S. Weber in 1998 and C. Guinot in 2000 |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_BINARY_MODE 0x1
-#define APCI1710_GRAY_MODE 0x0
-
-#define APCI1710_SSI_READ1VALUE 1
-#define APCI1710_SSI_READALLVALUE 2
-
-#define APCI1710_SSI_SET_CHANNELON 0
-#define APCI1710_SSI_SET_CHANNELOFF 1
-#define APCI1710_SSI_READ_1CHANNEL 2
-#define APCI1710_SSI_READ_ALLCHANNEL 3
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitSSI |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SSIProfile, |
-| unsigned char_ b_PositionTurnLength, |
-| unsigned char_ b_TurnCptLength, |
-| unsigned char_ b_PCIInputClock, |
-| ULONG_ ul_SSIOutputClock, |
-| unsigned char_ b_SSICountingMode) |
-+----------------------------------------------------------------------------+
-| Task : Configure the SSI operating mode from selected module |
-| (b_ModulNbr). You must calling this function be for you|
-| call any other function witch access of SSI. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_SSIProfile : Selection from SSI |
-| profile length (2 to 32).|
-| unsigned char_ b_PositionTurnLength : Selection from SSI |
-| position data length |
-| (1 to 31). |
-| unsigned char_ b_TurnCptLength : Selection from SSI turn |
-| counter data length |
-| (1 to 31). |
-| unsigned char b_PCIInputClock : Selection from PCI bus |
-| clock |
-| - APCI1710_30MHZ : |
-| The PC have a PCI bus |
-| clock from 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC have a PCI bus |
-| clock from 33 MHz |
-| ULONG_ ul_SSIOutputClock : Selection from SSI output|
-| clock. |
-| From 229 to 5 000 000 Hz|
-| for 30 MHz selection. |
-| From 252 to 5 000 000 Hz|
-| for 33 MHz selection. |
-| unsigned char b_SSICountingMode : SSI counting mode |
-| selection |
-| - APCI1710_BINARY_MODE : |
-| Binary counting mode. |
-| - APCI1710_GRAY_MODE : |
-| Gray counting mode.
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SSIProfile = (unsigned char) data[0];
- b_PositionTurnLength= (unsigned char) data[1];
- b_TurnCptLength = (unsigned char) data[2];
- b_PCIInputClock = (unsigned char) data[3];
- ul_SSIOutputClock = (unsigned int) data[4];
- b_SSICountingMode = (unsigned char) data[5]; |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a SSI module |
-| -4: The selected SSI profile length is wrong |
-| -5: The selected SSI position data length is wrong |
-| -6: The selected SSI turn counter data length is wrong |
-| -7: The selected PCI input clock is wrong |
-| -8: The selected SSI output clock is wrong |
-| -9: The selected SSI counting mode parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigInitSSI(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ui_TimerValue;
- unsigned char b_ModulNbr, b_SSIProfile, b_PositionTurnLength, b_TurnCptLength,
- b_PCIInputClock, b_SSICountingMode;
- unsigned int ul_SSIOutputClock;
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SSIProfile = (unsigned char) data[0];
- b_PositionTurnLength = (unsigned char) data[1];
- b_TurnCptLength = (unsigned char) data[2];
- b_PCIInputClock = (unsigned char) data[3];
- ul_SSIOutputClock = (unsigned int) data[4];
- b_SSICountingMode = (unsigned char) data[5];
-
- i_ReturnValue = insn->n;
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if SSI counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_SSI_COUNTER) {
- /*******************************/
- /* Test the SSI profile length */
- /*******************************/
-
- /* CG 22/03/00 b_SSIProfile >= 2 anstatt b_SSIProfile > 2 */
- if (b_SSIProfile >= 2 && b_SSIProfile < 33) {
- /*************************************/
- /* Test the SSI position data length */
- /*************************************/
-
- if (b_PositionTurnLength > 0
- && b_PositionTurnLength < 32) {
- /*****************************************/
- /* Test the SSI turn counter data length */
- /*****************************************/
-
- if (b_TurnCptLength > 0
- && b_TurnCptLength < 32) {
- /***************************/
- /* Test the profile length */
- /***************************/
-
- if ((b_TurnCptLength +
- b_PositionTurnLength)
- <= b_SSIProfile) {
- /****************************/
- /* Test the PCI input clock */
- /****************************/
-
- if (b_PCIInputClock ==
- APCI1710_30MHZ
- ||
- b_PCIInputClock
- ==
- APCI1710_33MHZ)
- {
- /*************************/
- /* Test the output clock */
- /*************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ && (ul_SSIOutputClock > 228 && ul_SSIOutputClock <= 5000000UL)) || (b_PCIInputClock == APCI1710_33MHZ && (ul_SSIOutputClock > 251 && ul_SSIOutputClock <= 5000000UL))) {
- if (b_SSICountingMode == APCI1710_BINARY_MODE || b_SSICountingMode == APCI1710_GRAY_MODE) {
- /**********************/
- /* Save configuration */
- /**********************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile
- =
- b_SSIProfile;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength
- =
- b_PositionTurnLength;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_TurnCptLength
- =
- b_TurnCptLength;
-
- /*********************************/
- /* Initialise the profile length */
- /*********************************/
-
- if (b_SSICountingMode == APCI1710_BINARY_MODE) {
-
- outl(b_SSIProfile + 1, devpriv->s_BoardInfos.ui_Address + 4 + (64 * b_ModulNbr));
- } else {
-
- outl(b_SSIProfile, devpriv->s_BoardInfos.ui_Address + 4 + (64 * b_ModulNbr));
- }
-
- /******************************/
- /* Calculate the output clock */
- /******************************/
-
- ui_TimerValue
- =
- (unsigned int)
- (
- ((unsigned int) (b_PCIInputClock) * 500000UL) / ul_SSIOutputClock);
-
- /************************/
- /* Initialise the timer */
- /************************/
-
- outl(ui_TimerValue, devpriv->s_BoardInfos.ui_Address + (64 * b_ModulNbr));
-
- /********************************/
- /* Initialise the counting mode */
- /********************************/
-
- outl(7 * b_SSICountingMode, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIInit
- =
- 1;
- } else {
- /*****************************************************/
- /* The selected SSI counting mode parameter is wrong */
- /*****************************************************/
-
- DPRINTK("The selected SSI counting mode parameter is wrong\n");
- i_ReturnValue
- =
- -9;
- }
- } else {
- /******************************************/
- /* The selected SSI output clock is wrong */
- /******************************************/
-
- DPRINTK("The selected SSI output clock is wrong\n");
- i_ReturnValue
- =
- -8;
- }
- } else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue =
- -7;
- }
- } else {
- /********************************************/
- /* The selected SSI profile length is wrong */
- /********************************************/
-
- DPRINTK("The selected SSI profile length is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************************/
- /* The selected SSI turn counter data length is wrong */
- /******************************************************/
-
- DPRINTK("The selected SSI turn counter data length is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /**************************************************/
- /* The selected SSI position data length is wrong */
- /**************************************************/
-
- DPRINTK("The selected SSI position data length is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /********************************************/
- /* The selected SSI profile length is wrong */
- /********************************************/
-
- DPRINTK("The selected SSI profile length is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /**********************************/
- /* The module is not a SSI module */
- /**********************************/
-
- DPRINTK("The module is not a SSI module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Read1SSIValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SelectedSSI, |
-| PULONG_ pul_Position, |
-| PULONG_ pul_TurnCpt)
- int i_APCI1710_ReadSSIValue(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task :
-
-
- Read the selected SSI counter (b_SelectedSSI) from |
-| selected module (b_ModulNbr).
- or Read all SSI counter (b_SelectedSSI) from |
-| selected module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_SelectedSSI : Selection from SSI |
-| counter (0 to 2)
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_SelectedSSI = (unsigned char) CR_CHAN(insn->chanspec); (in case of single ssi)
- b_ReadType = (unsigned char) CR_RANGE(insn->chanspec);
-|
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_Position : SSI position in the turn |
-| PULONG_ pul_TurnCpt : Number of turns
-
-pul_Position = (unsigned int *) &data[0];
- pul_TurnCpt = (unsigned int *) &data[1]; |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a SSI module |
-| -4: SSI not initialised see function |
-| "i_APCI1710_InitSSI" |
-| -5: The selected SSI is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnReadSSIValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_Cpt;
- unsigned char b_Length;
- unsigned char b_Schift;
- unsigned char b_SSICpt;
- unsigned int dw_And;
- unsigned int dw_And1;
- unsigned int dw_And2;
- unsigned int dw_StatusReg;
- unsigned int dw_CounterValue;
- unsigned char b_ModulNbr;
- unsigned char b_SelectedSSI;
- unsigned char b_ReadType;
- unsigned int *pul_Position;
- unsigned int *pul_TurnCpt;
- unsigned int *pul_Position1;
- unsigned int *pul_TurnCpt1;
-
- i_ReturnValue = insn->n;
- pul_Position1 = (unsigned int *) &data[0];
-/* For Read1 */
- pul_TurnCpt1 = (unsigned int *) &data[1];
-/* For Read all */
- pul_Position = (unsigned int *) &data[0]; /* 0-2 */
- pul_TurnCpt = (unsigned int *) &data[3]; /* 3-5 */
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_SelectedSSI = (unsigned char) CR_CHAN(insn->chanspec);
- b_ReadType = (unsigned char) CR_RANGE(insn->chanspec);
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if SSI counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_SSI_COUNTER) {
- /***************************/
- /* Test if SSI initialised */
- /***************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_SSICounterInfo.b_SSIInit == 1) {
-
- switch (b_ReadType) {
-
- case APCI1710_SSI_READ1VALUE:
- /****************************************/
- /* Test the selected SSI counter number */
- /****************************************/
-
- if (b_SelectedSSI < 3) {
- /************************/
- /* Start the conversion */
- /************************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 8 +
- (64 * b_ModulNbr));
-
- do {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_StatusReg =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
- } while ((dw_StatusReg & 0x1)
- != 0);
-
- /******************************/
- /* Read the SSI counter value */
- /******************************/
-
- dw_CounterValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 4 +
- (b_SelectedSSI * 4) +
- (64 * b_ModulNbr));
-
- b_Length =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile / 2;
-
- if ((b_Length * 2) !=
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile) {
- b_Length++;
- }
-
- b_Schift =
- b_Length -
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength;
-
- *pul_Position1 =
- dw_CounterValue >>
- b_Schift;
-
- dw_And = 1;
-
- for (b_Cpt = 0;
- b_Cpt <
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength;
- b_Cpt++) {
- dw_And = dw_And * 2;
- }
-
- *pul_Position1 =
- *pul_Position1 &
- ((dw_And) - 1);
-
- *pul_TurnCpt1 =
- dw_CounterValue >>
- b_Length;
-
- dw_And = 1;
-
- for (b_Cpt = 0;
- b_Cpt <
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_TurnCptLength;
- b_Cpt++) {
- dw_And = dw_And * 2;
- }
-
- *pul_TurnCpt1 =
- *pul_TurnCpt1 &
- ((dw_And) - 1);
- } else {
- /*****************************/
- /* The selected SSI is wrong */
- /*****************************/
-
- DPRINTK("The selected SSI is wrong\n");
- i_ReturnValue = -5;
- }
- break;
-
- case APCI1710_SSI_READALLVALUE:
- dw_And1 = 1;
-
- for (b_Cpt = 0;
- b_Cpt <
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength; b_Cpt++) {
- dw_And1 = dw_And1 * 2;
- }
-
- dw_And2 = 1;
-
- for (b_Cpt = 0;
- b_Cpt <
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SSICounterInfo.
- b_TurnCptLength; b_Cpt++) {
- dw_And2 = dw_And2 * 2;
- }
-
- /************************/
- /* Start the conversion */
- /************************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 8 +
- (64 * b_ModulNbr));
-
- do {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_StatusReg =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
- } while ((dw_StatusReg & 0x1) != 0);
-
- for (b_SSICpt = 0; b_SSICpt < 3;
- b_SSICpt++) {
- /******************************/
- /* Read the SSI counter value */
- /******************************/
-
- dw_CounterValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 4 +
- (b_SSICpt * 4) +
- (64 * b_ModulNbr));
-
- b_Length =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile / 2;
-
- if ((b_Length * 2) !=
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile) {
- b_Length++;
- }
-
- b_Schift =
- b_Length -
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength;
-
- pul_Position[b_SSICpt] =
- dw_CounterValue >>
- b_Schift;
- pul_Position[b_SSICpt] =
- pul_Position[b_SSICpt] &
- ((dw_And1) - 1);
-
- pul_TurnCpt[b_SSICpt] =
- dw_CounterValue >>
- b_Length;
- pul_TurnCpt[b_SSICpt] =
- pul_TurnCpt[b_SSICpt] &
- ((dw_And2) - 1);
- }
- break;
-
- default:
- printk("Read Type Inputs Wrong\n");
-
- } /* switch ending */
-
- } else {
- /***********************/
- /* SSI not initialised */
- /***********************/
-
- DPRINTK("SSI not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /**********************************/
- /* The module is not a SSI module */
- /**********************************/
-
- DPRINTK("The module is not a SSI module\n");
- i_ReturnValue = -3;
-
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadSSI1DigitalInput |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_InputChannel, |
-| unsigned char *_ pb_ChannelStatus) |
-+----------------------------------------------------------------------------+
-| Task :
- (0) Set the digital output from selected SSI module |
-| (b_ModuleNbr) ON
- (1) Set the digital output from selected SSI module |
-| (b_ModuleNbr) OFF
- (2)Read the status from selected SSI digital input |
-| (b_InputChannel)
- (3)Read the status from all SSI digital inputs from |
-| selected SSI module (b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr CR_AREF : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_InputChannel CR_CHAN : Selection from digital |
-| data[0] which IOTYPE input ( 0 to 2) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_ChannelStatus : Digital input channel |
-| data[0] status |
-| 0 : Channle is not active|
-| 1 : Channle is active |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a SSI module |
-| -4: The selected SSI digital input is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnBitsSSIDigitalIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr;
- unsigned char b_InputChannel;
- unsigned char *pb_ChannelStatus;
- unsigned char *pb_InputStatus;
- unsigned char b_IOType;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_IOType = (unsigned char) data[0];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if SSI counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_SSI_COUNTER) {
- switch (b_IOType) {
- case APCI1710_SSI_SET_CHANNELON:
- /*****************************/
- /* Set the digital output ON */
- /*****************************/
-
- outl(1, devpriv->s_BoardInfos.ui_Address + 16 +
- (64 * b_ModulNbr));
- break;
-
- case APCI1710_SSI_SET_CHANNELOFF:
- /******************************/
- /* Set the digital output OFF */
- /******************************/
-
- outl(0, devpriv->s_BoardInfos.ui_Address + 16 +
- (64 * b_ModulNbr));
- break;
-
- case APCI1710_SSI_READ_1CHANNEL:
- /******************************************/
- /* Test the digital imnput channel number */
- /******************************************/
-
- b_InputChannel = (unsigned char) CR_CHAN(insn->chanspec);
- pb_ChannelStatus = (unsigned char *) &data[0];
-
- if (b_InputChannel <= 2) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
- *pb_ChannelStatus =
- (unsigned char) (((~dw_StatusReg) >> (4 +
- b_InputChannel))
- & 1);
- } else {
- /********************************/
- /* Selected digital input error */
- /********************************/
-
- DPRINTK("Selected digital input error\n");
- i_ReturnValue = -4;
- }
- break;
-
- case APCI1710_SSI_READ_ALLCHANNEL:
- /**************************/
- /* Read all digital input */
- /**************************/
- pb_InputStatus = (unsigned char *) &data[0];
-
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.ui_Address +
- (64 * b_ModulNbr));
- *pb_InputStatus =
- (unsigned char) (((~dw_StatusReg) >> 4) & 7);
- break;
-
- default:
- printk("IO type wrong\n");
-
- } /* switch end */
- } else {
- /**********************************/
- /* The module is not a SSI module */
- /**********************************/
-
- DPRINTK("The module is not a SSI module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
deleted file mode 100644
index 3bc9826..0000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
+++ /dev/null
@@ -1,2069 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : TOR.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 tor counter module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 27/01/99 | S. Weber | 40 MHz implementation |
- +-----------------------------------------------------------------------+
- | 28/04/00 | S. Weber | Simple,double and quadruple mode implementation|
- | | | Extern clock implementation |
- +-----------------------------------------------------------------------+
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_GATE_INPUT 10
-
-#define APCI1710_TOR_SIMPLE_MODE 2
-#define APCI1710_TOR_DOUBLE_MODE 3
-#define APCI1710_TOR_QUADRUPLE_MODE 4
-
-#define APCI1710_SINGLE 0
-#define APCI1710_CONTINUOUS 1
-
-#define APCI1710_TOR_GETPROGRESSSTATUS 0
-#define APCI1710_TOR_GETCOUNTERVALUE 1
-#define APCI1710_TOR_READINTERRUPT 2
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitTorCounter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter, |
-| unsigned char_ b_PCIInputClock, |
-| unsigned char_ b_TimingUnit, |
-| ULONG_ ul_TimingInterval, |
-| PULONG_ pul_RealTimingInterval) |
-+----------------------------------------------------------------------------+
-| Task : Configure the selected tor counter (b_TorCounter) |
-| from selected module (b_ModulNbr). |
-| The ul_TimingInterval and ul_TimingUnit determine the |
-| timing base for the measurement. |
-| The pul_RealTimingInterval return the real timing |
-| value. You must calling this function be for you call |
-| any other function witch access of the tor counter. |
-| |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-|
- CR_AREF unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| data[0] unsigned char_ b_TorCounter : Tor counter selection |
-| (0 or 1). |
-| data[1] unsigned char_ b_PCIInputClock : Selection from PCI bus clock|
-| - APCI1710_30MHZ : |
-| The PC have a PCI bus |
-| clock from 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC have a PCI bus |
-| clock from 33 MHz |
-| - APCI1710_40MHZ |
-| The APCI-1710 have a |
-| integrated 40Mhz |
-| quartz. |
-| - APCI1710_GATE_INPUT |
-| Used the gate input for |
-| the base clock. If you |
-| have selected this option,|
-| than it is not possibl to |
-| used the gate input for |
-| enabled the acquisition |
-| data[2] unsigned char_ b_TimingUnit : Base timing unit (0 to 4) |
-| 0 : ns |
-| 1 : µs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| data[3] ULONG_ ul_TimingInterval : Base timing value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_RealTimingInterval : Real base timing |
-| data[0] value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: The selected PCI input clock is wrong |
-| -6: Timing unit selection is wrong |
-| -7: Base timing selection is wrong |
-| -8: You can not used the 40MHz clock selection wich |
-| this board |
-| -9: You can not used the 40MHz clock selection wich |
-| this TOR version |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigInitTorCounter(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_TimerValue = 0;
- unsigned int dw_Command;
- double d_RealTimingInterval = 0;
- unsigned char b_ModulNbr;
- unsigned char b_TorCounter;
- unsigned char b_PCIInputClock;
- unsigned char b_TimingUnit;
- unsigned int ul_TimingInterval;
- unsigned int ul_RealTimingInterval = 0;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
-
- b_TorCounter = (unsigned char) data[0];
- b_PCIInputClock = (unsigned char) data[1];
- b_TimingUnit = (unsigned char) data[2];
- ul_TimingInterval = (unsigned int) data[3];
- printk("INPUT clock %d\n", b_PCIInputClock);
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- /**********************************/
- /* Test the tor counter selection */
- /**********************************/
-
- if (b_TorCounter <= 1) {
- /**************************/
- /* Test the PCI bus clock */
- /**************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ) ||
- (b_PCIInputClock == APCI1710_33MHZ) ||
- (b_PCIInputClock == APCI1710_40MHZ) ||
- (b_PCIInputClock ==
- APCI1710_GATE_INPUT)) {
- /************************/
- /* Test the timing unit */
- /************************/
-
- if ((b_TimingUnit <= 4)
- || (b_PCIInputClock ==
- APCI1710_GATE_INPUT)) {
- /**********************************/
- /* Test the base timing selection */
- /**********************************/
-
- if (((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 133) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 571230650UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 571230UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 571UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 9UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 121) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 519691043UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 519691UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 520UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 8UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 100) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 429496729UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 429496UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 429UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 7UL)) || ((b_PCIInputClock == APCI1710_GATE_INPUT) && (ul_TimingInterval >= 2))) {
- /**************************/
- /* Test the board version */
- /**************************/
-
- if (((b_PCIInputClock == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_PCIInputClock != APCI1710_40MHZ)) {
- /************************/
- /* Test the TOR version */
- /************************/
-
- if (((b_PCIInputClock == APCI1710_40MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3131)) || ((b_PCIInputClock == APCI1710_GATE_INPUT) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3132)) || (b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) {
- /*********************************/
- /* Test if not extern clock used */
- /*********************************/
-
- if (b_PCIInputClock != APCI1710_GATE_INPUT) {
- fpu_begin
- ();
- /****************************************/
- /* Calculate the timer 0 division fator */
- /****************************************/
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.00025 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.00025 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.00025 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (0.00025
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.00025 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.25 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.25 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.25 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.25 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- ul_TimingInterval
- *
- (250.0
- *
- b_PCIInputClock);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (250.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (250.0 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (250.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (250.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (250000.0
- *
- b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (250000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (250000.0
- *
- (double)
- b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (250000.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (250000.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (
- (ul_TimingInterval
- *
- 60)
- *
- (250000.0
- *
- b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_TimingInterval * 60.0) * (250000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (250000.0
- *
- (double)
- b_PCIInputClock))
- /
- 60;
- d_RealTimingInterval
- =
- (
- (double)
- ul_TimerValue
- /
- (250000.0
- *
- (double)
- b_PCIInputClock))
- /
- 60.0;
-
- if ((double)(((double)ul_TimerValue / (250000.0 * (double)b_PCIInputClock)) / 60.0) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- fpu_end();
- } /* if (b_PCIInputClock != APCI1710_GATE_INPUT) */
- else {
- /*************************************************************/
- /* 2 Clock used for the overflow and the reload from counter */
- /*************************************************************/
-
- ul_TimerValue
- =
- ul_TimingInterval
- -
- 2;
- } /* if (b_PCIInputClock != APCI1710_GATE_INPUT) */
-
- /****************************/
- /* Save the PCI input clock */
- /****************************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- b_PCIInputClock
- =
- b_PCIInputClock;
-
- /************************/
- /* Save the timing unit */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- b_TimingUnit
- =
- b_TimingUnit;
-
- /************************/
- /* Save the base timing */
- /************************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- d_TimingInterval
- =
- d_RealTimingInterval;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- ul_RealTimingInterval
- =
- ul_RealTimingInterval;
-
- /*******************/
- /* Get the command */
- /*******************/
-
- dw_Command
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 4
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- dw_Command
- =
- (dw_Command
- >>
- 4)
- &
- 0xF;
-
- /******************/
- /* Test if 40 MHz */
- /******************/
-
- if (b_PCIInputClock == APCI1710_40MHZ) {
- /****************************/
- /* Set the 40 MHz selection */
- /****************************/
-
- dw_Command
- =
- dw_Command
- |
- 0x10;
- }
-
- /*****************************/
- /* Test if extern clock used */
- /*****************************/
-
- if (b_PCIInputClock == APCI1710_GATE_INPUT) {
- /****************************/
- /* Set the 40 MHz selection */
- /****************************/
-
- dw_Command
- =
- dw_Command
- |
- 0x20;
- }
-
- /*************************/
- /* Write the new command */
- /*************************/
-
- outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 4 + (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /*******************/
- /* Disable the tor */
- /*******************/
-
- outl(0, devpriv->s_BoardInfos.ui_Address + 8 + (16 * b_TorCounter) + (64 * b_ModulNbr));
- /*************************/
- /* Set the timer 1 value */
- /*************************/
-
- outl(ul_TimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /*********************/
- /* Tor counter init. */
- /*********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- b_TorCounterInit
- =
- 1;
- } else {
- /***********************************************/
- /* TOR version error for 40MHz clock selection */
- /***********************************************/
-
- DPRINTK("TOR version error for 40MHz clock selection\n");
- i_ReturnValue
- =
- -9;
- }
- } else {
- /**************************************************************/
- /* You can not used the 40MHz clock selection wich this board */
- /**************************************************************/
-
- DPRINTK("You can not used the 40MHz clock selection wich this board\n");
- i_ReturnValue =
- -8;
- }
- } else {
- /**********************************/
- /* Base timing selection is wrong */
- /**********************************/
-
- DPRINTK("Base timing selection is wrong\n");
- i_ReturnValue = -7;
- }
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- else {
- /**********************************/
- /* Timing unit selection is wrong */
- /**********************************/
-
- DPRINTK("Timing unit selection is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
- else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
- } /* if (b_TorCounterMode >= 0 && b_TorCounterMode <= 7) */
- else {
- /**********************************/
- /* Tor Counter selection is wrong */
- /**********************************/
-
- DPRINTK("Tor Counter selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_TorCounterMode >= 0 && b_TorCounterMode <= 7) */
- } else {
- /******************************************/
- /* The module is not a tor counter module */
- /******************************************/
-
- DPRINTK("The module is not a tor counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
- data[0] = (unsigned int) ul_RealTimingInterval;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableTorCounter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter, |
-| unsigned char_ b_InputMode, |
-| unsigned char_ b_ExternGate, |
-| unsigned char_ b_CycleMode, |
-| unsigned char_ b_InterruptEnable) |
-+----------------------------------------------------------------------------+
-| Task : Enable the tor counter (b_TorCounter) from selected |
-| module (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitTorCounter" function be for you call |
-| this function. |
-| If you enable the tor counter interrupt, the |
-| tor counter generate a interrupt after the timing cycle|
-| See function "i_APCI1710_SetBoardIntRoutineX" and the |
-| Interrupt mask description chapter from this manual. |
-| The b_CycleMode parameter determine if you will |
-| measured a single or more cycle. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_TorCounter : Tor counter selection (0 or 1). |
-| unsigned char_ b_InputMode : Input signal level selection |
-| 0 : Tor count each low level |
-| 1 : Tor count each high level|
-| unsigned char_ b_ExternGate : Extern gate action selection |
-| 0 : Extern gate signal not |
-| used |
-| 1 : Extern gate signal used. |
-| If you selected the |
-| single mode, each high |
-| level signal start the |
-| counter. |
-| If you selected the |
-| continuous mode, the |
-| first high level signal |
-| start the tor counter |
-| |
-| APCI1710_TOR_QUADRUPLE _MODE : |
-| In the quadruple mode, the edge|
-| analysis circuit generates a |
-| counting pulse from each edge |
-| of 2 signals which are phase |
-| shifted in relation to each |
-| other. |
-| The gate input is used for the |
-| signal B |
-| |
-| APCI1710_TOR_DOUBLE_MODE: |
-| Functions in the same way as |
-| the quadruple mode, except that|
-| only two of the four edges are |
-| analysed per period. |
-| The gate input is used for the |
-| signal B |
-| |
-| APCI1710_TOR_SIMPLE_MODE: |
-| Functions in the same way as |
-| the quadruple mode, except that|
-| only one of the four edges is |
-| analysed per period. |
-| The gate input is used for the |
-| signal B |
-| |
-| unsigned char_ b_CycleMode : Selected the tor counter |
-| acquisition mode |
-| unsigned char_ b_InterruptEnable : Enable or disable the |
-| tor counter interrupt. |
-| APCI1710_ENABLE: |
-| Enable the tor counter |
-| interrupt |
-| APCI1710_DISABLE: |
-| Disable the tor counter |
-| interrupt |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: Tor counter not initialised see function |
-| "i_APCI1710_InitTorCounter" |
-| -6: Tor input signal selection is wrong |
-| -7: Extern gate signal mode is wrong |
-| -8: Tor counter acquisition mode cycle is wrong |
-| -9: Interrupt parameter is wrong |
-| -10:Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisableTorCounter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter) |
-+----------------------------------------------------------------------------+
-| Task : Disable the tor counter (b_TorCounter) from selected |
-| module (b_ModulNbr). If you disable the tor counter |
-| after a start cycle occur and you restart the tor |
-| counter witch the " i_APCI1710_EnableTorCounter" |
-| function, the status register is cleared |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_TorCounter : Tor counter selection (0 or 1). |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: Tor counter not initialised see function |
-| "i_APCI1710_InitTorCounter" |
-| -6: Tor counter not enabled see function |
-| "i_APCI1710_EnableTorCounter" |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnWriteEnableDisableTorCounter(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_DummyRead;
- unsigned int dw_ConfigReg;
- unsigned char b_ModulNbr, b_Action;
- unsigned char b_TorCounter;
- unsigned char b_InputMode;
- unsigned char b_ExternGate;
- unsigned char b_CycleMode;
- unsigned char b_InterruptEnable;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_Action = (unsigned char) data[0]; /* enable or disable */
- b_TorCounter = (unsigned char) data[1];
- b_InputMode = (unsigned char) data[2];
- b_ExternGate = (unsigned char) data[3];
- b_CycleMode = (unsigned char) data[4];
- b_InterruptEnable = (unsigned char) data[5];
- i_ReturnValue = insn->n;
- devpriv->tsk_Current = current; /* Save the current process task structure */
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- /**********************************/
- /* Test the tor counter selection */
- /**********************************/
-
- if (b_TorCounter <= 1) {
- switch (b_Action) /* Enable or Disable */
- {
- case APCI1710_ENABLE:
- /***********************************/
- /* Test if tor counter initialised */
- /***********************************/
-
- dw_Status =
- inl(devpriv->s_BoardInfos.
- ui_Address + 8 +
- (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /******************************/
- /* Test the input signal mode */
- /******************************/
-
- if (b_InputMode == 0 ||
- b_InputMode == 1 ||
- b_InputMode ==
- APCI1710_TOR_SIMPLE_MODE
- || b_InputMode ==
- APCI1710_TOR_DOUBLE_MODE
- || b_InputMode ==
- APCI1710_TOR_QUADRUPLE_MODE)
- {
- /************************************/
- /* Test the extern gate signal mode */
- /************************************/
-
- if (b_ExternGate == 0
- || b_ExternGate
- == 1
- || b_InputMode >
- 1) {
- /*********************************/
- /* Test the cycle mode parameter */
- /*********************************/
-
- if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) {
- /***************************/
- /* Test the interrupt flag */
- /***************************/
-
- if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) {
-
- /***************************/
- /* Save the interrupt mode */
- /***************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- b_InterruptEnable
- =
- b_InterruptEnable;
-
- /*******************/
- /* Get the command */
- /*******************/
-
- dw_ConfigReg
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 4
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- dw_ConfigReg
- =
- (dw_ConfigReg
- >>
- 4)
- &
- 0x30;
-
- /********************************/
- /* Test if not direct mode used */
- /********************************/
-
- if (b_InputMode > 1) {
- /*******************************/
- /* Extern gate can not be used */
- /*******************************/
-
- b_ExternGate
- =
- 0;
-
- /*******************************************/
- /* Enable the extern gate for the Signal B */
- /*******************************************/
-
- dw_ConfigReg
- =
- dw_ConfigReg
- |
- 0x40;
-
- /***********************/
- /* Test if simple mode */
- /***********************/
-
- if (b_InputMode == APCI1710_TOR_SIMPLE_MODE) {
- /**************************/
- /* Enable the sinple mode */
- /**************************/
-
- dw_ConfigReg
- =
- dw_ConfigReg
- |
- 0x780;
-
- } /* if (b_InputMode == APCI1710_TOR_SIMPLE_MODE) */
-
- /***********************/
- /* Test if double mode */
- /***********************/
-
- if (b_InputMode == APCI1710_TOR_DOUBLE_MODE) {
- /**************************/
- /* Enable the double mode */
- /**************************/
-
- dw_ConfigReg
- =
- dw_ConfigReg
- |
- 0x180;
-
- } /* if (b_InputMode == APCI1710_TOR_DOUBLE_MODE) */
-
- b_InputMode
- =
- 0;
- } /* if (b_InputMode > 1) */
-
- /*******************/
- /* Set the command */
- /*******************/
-
- dw_ConfigReg
- =
- dw_ConfigReg
- |
- b_CycleMode
- |
- (b_InterruptEnable
- *
- 2)
- |
- (b_InputMode
- *
- 4)
- |
- (b_ExternGate
- *
- 8);
-
- /*****************************/
- /* Clear the status register */
- /*****************************/
-
- dw_DummyRead
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 0
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /***************************************/
- /* Clear the interrupt status register */
- /***************************************/
-
- dw_DummyRead
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 12
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /********************/
- /* Set the commando */
- /********************/
-
- outl(dw_ConfigReg, devpriv->s_BoardInfos.ui_Address + 4 + (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /****************/
- /* Set the gate */
- /****************/
-
- outl(1, devpriv->s_BoardInfos.ui_Address + 8 + (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
- else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
-
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue
- =
- -9;
- } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
- } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
- else {
- /***********************************************/
- /* Tor counter acquisition mode cycle is wrong */
- /***********************************************/
-
- DPRINTK("Tor counter acquisition mode cycle is wrong\n");
- i_ReturnValue
- =
- -8;
- } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
- } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
- else {
- /***********************************/
- /* Extern gate input mode is wrong */
- /***********************************/
-
- DPRINTK("Extern gate input mode is wrong\n");
- i_ReturnValue =
- -7;
- } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
- } /* if (b_InputMode >= 0 && b_InputMode <= 1) */
- else {
- /***************************************/
- /* Tor input signal selection is wrong */
- /***************************************/
-
- DPRINTK("Tor input signal selection is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /*******************************/
- /* Tor counter not initialised */
- /*******************************/
-
- DPRINTK("Tor counter not initialised\n");
- i_ReturnValue = -5;
- }
- break;
-
- case APCI1710_DISABLE:
- /***********************************/
- /* Test if tor counter initialised */
- /***********************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 8 +
- (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (dw_Status & 0x10) {
- /***************************/
- /* Test if counter enabled */
- /***************************/
-
- if (dw_Status & 0x1) {
- /****************************/
- /* Clear the interrupt mode */
- /****************************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- b_InterruptEnable
- =
- APCI1710_DISABLE;
-
- /******************/
- /* Clear the gate */
- /******************/
-
- outl(0, devpriv->
- s_BoardInfos.
- ui_Address + 8 +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
- } /* if (dw_Status & 0x1) */
- else {
- /***************************/
- /* Tor counter not enabled */
- /***************************/
-
- DPRINTK("Tor counter not enabled \n");
- i_ReturnValue = -6;
- } /* if (dw_Status & 0x1) */
- } /* if (dw_Status & 0x10) */
- else {
- /*******************************/
- /* Tor counter not initialised */
- /*******************************/
-
- DPRINTK("Tor counter not initialised\n");
- i_ReturnValue = -5;
- } /* // if (dw_Status & 0x10) */
-
- } /* switch */
- } /* if (b_TorCounter <= 1) */
- else {
- /**********************************/
- /* Tor counter selection is wrong */
- /**********************************/
-
- DPRINTK("Tor counter selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_TorCounter <= 1) */
- } else {
- /******************************************/
- /* The module is not a tor counter module */
- /******************************************/
-
- DPRINTK("The module is not a tor counter module \n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error \n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetTorCounterInitialisation |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter, |
-| unsigned char *_ pb_TimingUnit, |
-| PULONG_ pul_TimingInterval, |
-| unsigned char *_ pb_InputMode, |
-| unsigned char *_ pb_ExternGate, |
-| unsigned char *_ pb_CycleMode, |
-| unsigned char *_ pb_Enable, |
-| unsigned char *_ pb_InterruptEnable)|
-+----------------------------------------------------------------------------+
-| Task : Enable the tor counter (b_TorCounter) from selected |
-| module (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitTorCounter" function be for you call |
-| this function. |
-| If you enable the tor counter interrupt, the |
-| tor counter generate a interrupt after the timing cycle|
-| See function "i_APCI1710_SetBoardIntRoutineX" and the |
-| Interrupt mask description chapter from this manual. |
-| The b_CycleMode parameter determine if you will |
-| measured a single or more cycle. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_TorCounter : Tor counter selection (0 or 1)
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_TorCounter = CR_CHAN(insn->chanspec);
-. |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TimingUnit : Base timing unit (0 to 4) |
-| 0 : ns |
-| 1 : µs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| PULONG_ pul_TimingInterval : Base timing value. |
-| unsigned char *_ pb_InputMode : Input signal level |
-| selection |
-| 0 : Tor count each low level |
-| 1 : Tor count each high level|
-| unsigned char *_ pb_ExternGate : Extern gate action |
-| selection |
-| 0 : Extern gate signal not |
-| used |
-| 1 : Extern gate signal used|
-| unsigned char *_ pb_CycleMode : Tor counter acquisition |
-| mode |
-| unsigned char *_ pb_Enable : Indicate if the tor counter|
-| is enabled or no |
-| 0 : Tor counter disabled |
-| 1 : Tor counter enabled |
-| unsigned char *_ pb_InterruptEnable : Enable or disable the |
-| tor counter interrupt. |
-| APCI1710_ENABLE: |
-| Enable the tor counter |
-| interrupt |
-| APCI1710_DISABLE: |
-| Disable the tor counter |
-| interrupt
- pb_TimingUnit = (unsigned char *) &data[0];
- pul_TimingInterval = (unsigned int *) &data[1];
- pb_InputMode = (unsigned char *) &data[2];
- pb_ExternGate = (unsigned char *) &data[3];
- pb_CycleMode = (unsigned char *) &data[4];
- pb_Enable = (unsigned char *) &data[5];
- pb_InterruptEnable = (unsigned char *) &data[6];
- |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: Tor counter not initialised see function |
-| "i_APCI1710_InitTorCounter" |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnReadGetTorCounterInitialisation(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned char b_ModulNbr;
- unsigned char b_TorCounter;
- unsigned char *pb_TimingUnit;
- unsigned int *pul_TimingInterval;
- unsigned char *pb_InputMode;
- unsigned char *pb_ExternGate;
- unsigned char *pb_CycleMode;
- unsigned char *pb_Enable;
- unsigned char *pb_InterruptEnable;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_TorCounter = CR_CHAN(insn->chanspec);
-
- pb_TimingUnit = (unsigned char *) &data[0];
- pul_TimingInterval = (unsigned int *) &data[1];
- pb_InputMode = (unsigned char *) &data[2];
- pb_ExternGate = (unsigned char *) &data[3];
- pb_CycleMode = (unsigned char *) &data[4];
- pb_Enable = (unsigned char *) &data[5];
- pb_InterruptEnable = (unsigned char *) &data[6];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- /**********************************/
- /* Test the tor counter selection */
- /**********************************/
-
- if (b_TorCounter <= 1) {
-
- /***********************************/
- /* Test if tor counter initialised */
- /***********************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- *pb_Enable = dw_Status & 1;
-
- /********************/
- /* Get the commando */
- /********************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 4 +
- (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- *pb_CycleMode =
- (unsigned char) ((dw_Status >> 4) & 1);
- *pb_InterruptEnable =
- (unsigned char) ((dw_Status >> 5) & 1);
-
- /******************************************************/
- /* Test if extern gate used for clock or for signal B */
- /******************************************************/
-
- if (dw_Status & 0x600) {
- /*****************************************/
- /* Test if extern gate used for signal B */
- /*****************************************/
-
- if (dw_Status & 0x400) {
- /***********************/
- /* Test if simple mode */
- /***********************/
-
- if ((dw_Status & 0x7800)
- == 0x7800) {
- *pb_InputMode =
- APCI1710_TOR_SIMPLE_MODE;
- }
-
- /***********************/
- /* Test if double mode */
- /***********************/
-
- if ((dw_Status & 0x7800)
- == 0x1800) {
- *pb_InputMode =
- APCI1710_TOR_DOUBLE_MODE;
- }
-
- /**************************/
- /* Test if quadruple mode */
- /**************************/
-
- if ((dw_Status & 0x7800)
- == 0x0000) {
- *pb_InputMode =
- APCI1710_TOR_QUADRUPLE_MODE;
- }
- } /* if (dw_Status & 0x400) */
- else {
- *pb_InputMode = 1;
- } /* // if (dw_Status & 0x400) */
-
- /************************/
- /* Extern gate not used */
- /************************/
-
- *pb_ExternGate = 0;
- } /* if (dw_Status & 0x600) */
- else {
- *pb_InputMode =
- (unsigned char) ((dw_Status >> 6)
- & 1);
- *pb_ExternGate =
- (unsigned char) ((dw_Status >> 7)
- & 1);
- } /* if (dw_Status & 0x600) */
-
- *pb_TimingUnit =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo[b_TorCounter].
- b_TimingUnit;
-
- *pul_TimingInterval =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo[b_TorCounter].
- ul_RealTimingInterval;
- } else {
- /*******************************/
- /* Tor counter not initialised */
- /*******************************/
-
- DPRINTK("Tor counter not initialised\n");
- i_ReturnValue = -5;
- }
-
- } /* if (b_TorCounter <= 1) */
- else {
- /**********************************/
- /* Tor counter selection is wrong */
- /**********************************/
-
- DPRINTK("Tor counter selection is wrong \n");
- i_ReturnValue = -4;
- } /* if (b_TorCounter <= 1) */
- } else {
- /******************************************/
- /* The module is not a tor counter module */
- /******************************************/
-
- DPRINTK("The module is not a tor counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadTorCounterValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter, |
-| unsigned int_ ui_TimeOut, |
-| unsigned char *_ pb_TorCounterStatus, |
-| PULONG_ pul_TorCounterValue) |
-+----------------------------------------------------------------------------+
-| Task case APCI1710_TOR_GETPROGRESSSTATUS: Return the tor counter
-(b_TorCounter) status (pb_TorCounterStatus) from selected tor counter |
-| module (b_ModulNbr).
-
- case APCI1710_TOR_GETCOUNTERVALUE :
- Return the tor counter (b_TorCounter) status |
-| (pb_TorCounterStatus) and the timing value |
-| (pul_TorCounterValue) after a conting cycle stop |
-| from selected tor counter module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_TorCounter : Tor counter selection (0 or 1).
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_ReadType = (unsigned char) data[0];
- b_TorCounter = (unsigned char) data[1];
- ui_TimeOut = (unsigned int) data[2]; |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TorCounterStatus : Return the tor counter |
-| status. |
-| 0 : Conting cycle not started|
-| Software gate not set. |
-| 1 : Conting cycle started. |
-| Software gate set. |
-| 2 : Conting cycle stopped. |
-| The conting cycle is |
-| terminate. |
-| 3 : A overflow occur. You |
-| must change the base |
-| timing witch the |
-| function |
-| "i_APCI1710_InitTorCounter"|
-| 4 : Timeeout occur |
-| unsigned int * pul_TorCounterValue : Tor counter value.
- pb_TorCounterStatus=(unsigned char *) &data[0];
- pul_TorCounterValue=(unsigned int *) &data[1]; |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: Tor counter not initialised see function |
-| "i_APCI1710_InitTorCounter" |
-| -6: Tor counter not enabled see function |
-| "i_APCI1710_EnableTorCounter" |
-| -7: Timeout parameter is wrong (0 to 65535) |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnBitsGetTorCounterProgressStatusAndValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_TimeOut = 0;
- unsigned char b_ModulNbr;
- unsigned char b_TorCounter;
- unsigned char b_ReadType;
- unsigned int ui_TimeOut;
- unsigned char *pb_TorCounterStatus;
- unsigned int *pul_TorCounterValue;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_ReadType = (unsigned char) data[0];
- b_TorCounter = (unsigned char) data[1];
- ui_TimeOut = (unsigned int) data[2];
- pb_TorCounterStatus = (unsigned char *) &data[0];
- pul_TorCounterValue = (unsigned int *) &data[1];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ReadType == APCI1710_TOR_READINTERRUPT) {
-
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /**************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Read = (devpriv->
- s_InterruptParameters.
- ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- return insn->n;
- }
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- /**********************************/
- /* Test the tor counter selection */
- /**********************************/
-
- if (b_TorCounter <= 1) {
- /***********************************/
- /* Test if tor counter initialised */
- /***********************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (dw_Status & 0x10) {
- /***************************/
- /* Test if counter enabled */
- /***************************/
-
- if (dw_Status & 0x1) {
-
- switch (b_ReadType) {
-
- case APCI1710_TOR_GETPROGRESSSTATUS:
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_Status =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 4 +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- dw_Status =
- dw_Status & 0xF;
-
- /*****************/
- /* Test if start */
- /*****************/
-
- if (dw_Status & 1) {
- if (dw_Status &
- 2) {
- if (dw_Status & 4) {
- /************************/
- /* Tor counter overflow */
- /************************/
-
- *pb_TorCounterStatus
- =
- 3;
- } else {
- /***********************/
- /* Tor counter started */
- /***********************/
-
- *pb_TorCounterStatus
- =
- 2;
- }
- } else {
- /***********************/
- /* Tor counter started */
- /***********************/
-
- *pb_TorCounterStatus
- =
- 1;
- }
- } else {
- /***************************/
- /* Tor counter not started */
- /***************************/
-
- *pb_TorCounterStatus
- = 0;
- }
- break;
-
- case APCI1710_TOR_GETCOUNTERVALUE:
-
- /*****************************/
- /* Test the timout parameter */
- /*****************************/
-
- if ((ui_TimeOut >= 0)
- && (ui_TimeOut
- <=
- 65535UL))
- {
- for (;;) {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_Status
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 4
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
- /********************/
- /* Test if overflow */
- /********************/
-
- if ((dw_Status & 4) == 4) {
- /******************/
- /* Overflow occur */
- /******************/
-
- *pb_TorCounterStatus
- =
- 3;
-
- /******************/
- /* Read the value */
- /******************/
-
- *pul_TorCounterValue
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 0
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
- break;
- } /* if ((dw_Status & 4) == 4) */
- else {
- /*******************************/
- /* Test if measurement stopped */
- /*******************************/
-
- if ((dw_Status & 2) == 2) {
- /***********************/
- /* A stop signal occur */
- /***********************/
-
- *pb_TorCounterStatus
- =
- 2;
-
- /******************/
- /* Read the value */
- /******************/
-
- *pul_TorCounterValue
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 0
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- break;
- } /* if ((dw_Status & 2) == 2) */
- else {
- /*******************************/
- /* Test if measurement started */
- /*******************************/
-
- if ((dw_Status & 1) == 1) {
- /************************/
- /* A start signal occur */
- /************************/
-
- *pb_TorCounterStatus
- =
- 1;
- } /* if ((dw_Status & 1) == 1) */
- else {
- /***************************/
- /* Measurement not started */
- /***************************/
-
- *pb_TorCounterStatus
- =
- 0;
- } /* if ((dw_Status & 1) == 1) */
- } /* if ((dw_Status & 2) == 2) */
- } /* if ((dw_Status & 8) == 8) */
-
- if (dw_TimeOut == ui_TimeOut) {
- /*****************/
- /* Timeout occur */
- /*****************/
-
- break;
- } else {
- /*************************/
- /* Increment the timeout */
- /*************************/
-
- dw_TimeOut
- =
- dw_TimeOut
- +
- 1;
-
- mdelay(1000);
- }
- } /* for (;;) */
-
- /*************************/
- /* Test if timeout occur */
- /*************************/
-
- if ((*pb_TorCounterStatus != 3) && (dw_TimeOut == ui_TimeOut) && (ui_TimeOut != 0)) {
- /*****************/
- /* Timeout occur */
- /*****************/
-
- *pb_TorCounterStatus
- =
- 4;
- }
- } else {
- /******************************/
- /* Timeout parameter is wrong */
- /******************************/
-
- DPRINTK("Timeout parameter is wrong\n");
- i_ReturnValue =
- -7;
- }
- break;
-
- default:
- printk("Inputs wrong\n");
- } /* switch end */
- } /* if (dw_Status & 0x1) */
- else {
- /***************************/
- /* Tor counter not enabled */
- /***************************/
-
- DPRINTK("Tor counter not enabled\n");
- i_ReturnValue = -6;
- } /* if (dw_Status & 0x1) */
- } else {
- /*******************************/
- /* Tor counter not initialised */
- /*******************************/
-
- DPRINTK("Tor counter not initialised\n");
- i_ReturnValue = -5;
- }
- } /* if (b_TorCounter <= 1) */
- else {
- /**********************************/
- /* Tor counter selection is wrong */
- /**********************************/
-
- DPRINTK("Tor counter selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_TorCounter <= 1) */
- } else {
- /******************************************/
- /* The module is not a tor counter module */
- /******************************************/
-
- DPRINTK("The module is not a tor counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c
deleted file mode 100644
index c8238b8..0000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c
+++ /dev/null
@@ -1,1048 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : TTL.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 TTL I/O module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 13/05/98 | S. Weber | TTL digital input / output implementation |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
- | | | |
- | | | |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_TTL_INIT 0
-#define APCI1710_TTL_INITDIRECTION 1
-
-#define APCI1710_TTL_READCHANNEL 0
-#define APCI1710_TTL_READPORT 1
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitTTLIODirection |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PortAMode, |
-| unsigned char_ b_PortBMode, |
-| unsigned char_ b_PortCMode, |
-| unsigned char_ b_PortDMode) |
-+----------------------------------------------------------------------------+
-| Task APCI1710_TTL_INIT (using defaults) : Configure the TTL I/O operating mode from selected |
-| module (b_ModulNbr). You must calling this function be|
-| for you call any other function witch access of TTL. |
- APCI1710_TTL_INITDIRECTION(user inputs for direction)
-
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3)
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_InitType = (unsigned char) data[0];
- b_PortAMode = (unsigned char) data[1];
- b_PortBMode = (unsigned char) data[2];
- b_PortCMode = (unsigned char) data[3];
- b_PortDMode = (unsigned char) data[4];|
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a TTL module |
-| -4: Function not available for this version |
-| -5: Port A mode selection is wrong |
-| -6: Port B mode selection is wrong |
-| -7: Port C mode selection is wrong |
-| -8: Port D mode selection is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigInitTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr;
- unsigned char b_InitType;
- unsigned char b_PortAMode;
- unsigned char b_PortBMode;
- unsigned char b_PortCMode;
- unsigned char b_PortDMode;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_InitType = (unsigned char) data[0];
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /**************************/
- /* Test if TTL I/O module */
- /**************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TTL_IO) {
- switch (b_InitType) {
- case APCI1710_TTL_INIT:
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_TTLInit = 1;
-
- /***************************/
- /* Set TTL port A to input */
- /***************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_PortConfiguration[0] = 0;
-
- /***************************/
- /* Set TTL port B to input */
- /***************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_PortConfiguration[1] = 0;
-
- /***************************/
- /* Set TTL port C to input */
- /***************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_PortConfiguration[2] = 0;
-
- /****************************/
- /* Set TTL port D to output */
- /****************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_PortConfiguration[3] = 1;
-
- /*************************/
- /* Set the configuration */
- /*************************/
-
- outl(0x8,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- break;
-
- case APCI1710_TTL_INITDIRECTION:
-
- b_PortAMode = (unsigned char) data[1];
- b_PortBMode = (unsigned char) data[2];
- b_PortCMode = (unsigned char) data[3];
- b_PortDMode = (unsigned char) data[4];
-
- /********************/
- /* Test the version */
- /********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] & 0xFFFF) >=
- 0x3230) {
- /************************/
- /* Test the port A mode */
- /************************/
-
- if ((b_PortAMode == 0)
- || (b_PortAMode == 1)) {
- /************************/
- /* Test the port B mode */
- /************************/
-
- if ((b_PortBMode == 0)
- || (b_PortBMode == 1)) {
- /************************/
- /* Test the port C mode */
- /************************/
-
- if ((b_PortCMode == 0)
- || (b_PortCMode
- == 1)) {
- /************************/
- /* Test the port D mode */
- /************************/
-
- if ((b_PortDMode == 0) || (b_PortDMode == 1)) {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_TTLInit
- =
- 1;
-
- /***********************/
- /* Set TTL port A mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [0]
- =
- b_PortAMode;
-
- /***********************/
- /* Set TTL port B mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [1]
- =
- b_PortBMode;
-
- /***********************/
- /* Set TTL port C mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [2]
- =
- b_PortCMode;
-
- /***********************/
- /* Set TTL port D mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [3]
- =
- b_PortDMode;
-
- /*************************/
- /* Set the configuration */
- /*************************/
-
- outl((b_PortAMode << 0) | (b_PortBMode << 1) | (b_PortCMode << 2) | (b_PortDMode << 3), devpriv->s_BoardInfos.ui_Address + 20 + (64 * b_ModulNbr));
- } else {
- /**********************************/
- /* Port D mode selection is wrong */
- /**********************************/
-
- DPRINTK("Port D mode selection is wrong\n");
- i_ReturnValue
- =
- -8;
- }
- } else {
- /**********************************/
- /* Port C mode selection is wrong */
- /**********************************/
-
- DPRINTK("Port C mode selection is wrong\n");
- i_ReturnValue =
- -7;
- }
- } else {
- /**********************************/
- /* Port B mode selection is wrong */
- /**********************************/
-
- DPRINTK("Port B mode selection is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /**********************************/
- /* Port A mode selection is wrong */
- /**********************************/
-
- DPRINTK("Port A mode selection is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************************/
- /* Function not available for this version */
- /*******************************************/
-
- DPRINTK("Function not available for this version\n");
- i_ReturnValue = -4;
- }
- break;
-
- DPRINTK("\n");
- default:
- printk("Bad Config Type\n");
- } /* switch end */
- } else {
- /**********************************/
- /* The module is not a TTL module */
- /**********************************/
-
- DPRINTK("The module is not a TTL module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| INPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadTTLIOChannelValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SelectedPort, |
-| unsigned char_ b_InputChannel, |
-| unsigned char *_ pb_ChannelStatus) |
-+----------------------------------------------------------------------------+
-| Task : Read the status from selected TTL digital input |
-| (b_InputChannel)
-+----------------------------------------------------------------------------+
-| Task : Read the status from digital input port |
-| (b_SelectedPort) from selected TTL module (b_ModulNbr) |
-+----------------------------------------------------------------------------+
-
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 7) |
-| unsigned char_ b_SelectedPort, : Selection from TTL I/O |
-| port (0 to 2) |
-| 0 : Port A selection |
-| 1 : Port B selection |
-| 2 : Port C selection |
-| 3 : Port D selection |
-| unsigned char_ b_InputChannel : Selection from digital |
-| input ( 0 to 2)
-APCI1710_TTL_READCHANNEL
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SelectedPort= CR_RANGE(insn->chanspec);
- b_InputChannel= CR_CHAN(insn->chanspec);
- b_ReadType = (unsigned char) data[0];
-
- APCI1710_TTL_READPORT|
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SelectedPort= CR_RANGE(insn->chanspec);
- b_ReadType = (unsigned char) data[0];
-
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0]
-
- unsigned char *_ pb_ChannelStatus : Digital input channel |
-| status |
-| 0 : Channle is not active|
-| 1 : Channle is active |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a TTL module |
-| -4: The selected TTL input port is wrong |
-| -5: The selected TTL digital input is wrong |
-| -6: TTL I/O not initialised |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnBitsReadTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr;
- unsigned char b_SelectedPort;
- unsigned char b_InputChannel;
- unsigned char b_ReadType;
- unsigned char *pb_ChannelStatus;
- unsigned char *pb_PortValue;
-
- i_ReturnValue = insn->n;
- b_ReadType = (unsigned char) data[0];
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SelectedPort = CR_RANGE(insn->chanspec);
- b_InputChannel = CR_CHAN(insn->chanspec);
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /**************************/
- /* Test if TTL I/O module */
- /**************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TTL_IO) {
- switch (b_ReadType) {
-
- case APCI1710_TTL_READCHANNEL:
- pb_ChannelStatus = (unsigned char *) &data[0];
- /********************************/
- /* Test the TTL I/O port number */
- /********************************/
-
- if (((b_SelectedPort <= 2)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) ==
- 0x3130))
- || ((b_SelectedPort <= 3)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) >=
- 0x3230))) {
- /******************************************/
- /* Test the digital imnput channel number */
- /******************************************/
-
- if (((b_InputChannel <= 7)
- && (b_SelectedPort < 3))
- || ((b_InputChannel <= 1)
- && (b_SelectedPort ==
- 3))) {
- /******************************************/
- /* Test if the TTL I/O module initialised */
- /******************************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.b_TTLInit ==
- 1) {
- /***********************************/
- /* Test if TTL port used for input */
- /***********************************/
-
- if (((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) == 0x3130) || (((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3230) && (devpriv->s_ModuleInfo[b_ModulNbr].s_TTLIOInfo.b_PortConfiguration[b_SelectedPort] == 0))) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- dw_StatusReg =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- (64 * b_ModulNbr));
-
- *pb_ChannelStatus
- =
- (unsigned char) (
- (dw_StatusReg
- >>
- (8 * b_SelectedPort)) >> b_InputChannel) & 1;
- } else {
- /*******************************/
- /* Selected TTL I/O port error */
- /*******************************/
-
- DPRINTK("Selected TTL I/O port error\n");
- i_ReturnValue =
- -4;
- }
- } else {
- /***************************/
- /* TTL I/O not initialised */
- /***************************/
-
- DPRINTK("TTL I/O not initialised\n");
- i_ReturnValue = -6;
- }
- } else {
- /********************************/
- /* Selected digital input error */
- /********************************/
-
- DPRINTK("Selected digital input error\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************/
- /* Selected TTL I/O port error */
- /*******************************/
-
- DPRINTK("Selected TTL I/O port error\n");
- i_ReturnValue = -4;
- }
- break;
-
- case APCI1710_TTL_READPORT:
- pb_PortValue = (unsigned char *) &data[0];
- /********************************/
- /* Test the TTL I/O port number */
- /********************************/
-
- if (((b_SelectedPort <= 2)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) ==
- 0x3130))
- || ((b_SelectedPort <= 3)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) >=
- 0x3230))) {
- /******************************************/
- /* Test if the TTL I/O module initialised */
- /******************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_TTLInit == 1) {
- /***********************************/
- /* Test if TTL port used for input */
- /***********************************/
-
- if (((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr]
- &
- 0xFFFF)
- == 0x3130)
- || (((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3230) && (devpriv->s_ModuleInfo[b_ModulNbr].s_TTLIOInfo.b_PortConfiguration[b_SelectedPort] == 0))) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- dw_StatusReg =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
-
- *pb_PortValue =
- (unsigned char) (
- (dw_StatusReg >>
- (8 * b_SelectedPort)) & 0xFF);
- } else {
- /*******************************/
- /* Selected TTL I/O port error */
- /*******************************/
-
- DPRINTK("Selected TTL I/O port error\n");
- i_ReturnValue = -4;
- }
- } else {
- /***************************/
- /* TTL I/O not initialised */
- /***************************/
-
- DPRINTK("TTL I/O not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************/
- /* Selected TTL I/O port error */
- /*******************************/
-
- DPRINTK("Selected TTL I/O port error\n");
- i_ReturnValue = -4;
- }
- break;
-
- default:
- printk("Bad ReadType\n");
-
- } /* End Switch */
- } else {
- /**********************************/
- /* The module is not a TTL module */
- /**********************************/
-
- DPRINTK("The module is not a TTL module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1710_InsnReadTTLIOAllPortValue(comedi_device
-*dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read the status from all digital input ports |
-| (port A, port B and port C) from selected TTL |
-| module (b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_PortValue : Digital TTL inputs port |
-| status |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a TTL module |
-| -4: TTL I/O not initialised |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnReadTTLIOAllPortValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr;
- unsigned int *pul_PortValue;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- i_ReturnValue = insn->n;
- pul_PortValue = (unsigned int *) &data[0];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /**************************/
- /* Test if TTL I/O module */
- /**************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TTL_IO) {
- /******************************************/
- /* Test if the TTL I/O module initialised */
- /******************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_TTLInit == 1) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
-
- /**********************/
- /* Test if TTL Rev1.0 */
- /**********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] & 0xFFFF) ==
- 0x3130) {
- *pul_PortValue =
- dw_StatusReg & 0xFFFFFFUL;
- } else {
- /**************************************/
- /* Test if port A not used for output */
- /**************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration[0] == 1) {
- *pul_PortValue =
- dw_StatusReg &
- 0x3FFFF00UL;
- }
-
- /**************************************/
- /* Test if port B not used for output */
- /**************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration[1] == 1) {
- *pul_PortValue =
- dw_StatusReg &
- 0x3FF00FFUL;
- }
-
- /**************************************/
- /* Test if port C not used for output */
- /**************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration[2] == 1) {
- *pul_PortValue =
- dw_StatusReg &
- 0x300FFFFUL;
- }
-
- /**************************************/
- /* Test if port D not used for output */
- /**************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration[3] == 1) {
- *pul_PortValue =
- dw_StatusReg &
- 0xFFFFFFUL;
- }
- }
- } else {
- /***************************/
- /* TTL I/O not initialised */
- /***************************/
- DPRINTK("TTL I/O not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /**********************************/
- /* The module is not a TTL module */
- /**********************************/
- DPRINTK("The module is not a TTL module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| OUTPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetTTLIOChlOn |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_OutputChannel)
-int i_APCI1710_InsnWriteSetTTLIOChlOnOff(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Sets or resets the output witch has been passed with the |
-| parameter b_Channel. Setting an output means setting |
-| an ouput high. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-| unsigned char_ b_OutputChannel : Selection from digital output |
-| channel (0 or 1) |
-| 0 : PD0 |
-| 1 : PD1 |
-| 2 to 9 : PA |
-| 10 to 17: PB |
-| 18 to 25: PC |
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_OutputChannel= CR_CHAN(insn->chanspec);
- ui_State = data[0]; /* ON or OFF */
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a TTL I/O module |
-| -4: The selected digital output is wrong |
-| -5: TTL I/O not initialised see function |
-| " i_APCI1710_InitTTLIO"
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnWriteSetTTLIOChlOnOff(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
- unsigned char b_ModulNbr;
- unsigned char b_OutputChannel;
- unsigned int ui_State;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_OutputChannel = CR_CHAN(insn->chanspec);
- ui_State = data[0]; /* ON or OFF */
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /**************************/
- /* Test if TTL I/O module */
- /**************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TTL_IO) {
- /******************************************/
- /* Test if the TTL I/O module initialised */
- /******************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_TTLInit == 1) {
- /***********************************/
- /* Test the TTL I/O channel number */
- /***********************************/
-
- if (((b_OutputChannel <= 1)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) ==
- 0x3130))
- || ((b_OutputChannel <= 25)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) >=
- 0x3230))) {
- /****************************************************/
- /* Test if the selected channel is a output channel */
- /****************************************************/
-
- if (((b_OutputChannel <= 1)
- && (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [3] == 1))
- || ((b_OutputChannel >= 2)
- && (b_OutputChannel <=
- 9)
- && (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [0] == 1))
- || ((b_OutputChannel >= 10)
- && (b_OutputChannel <=
- 17)
- && (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [1] == 1))
- || ((b_OutputChannel >= 18)
- && (b_OutputChannel <=
- 25)
- && (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [2] == 1))) {
- /************************/
- /* Test if PD0 selected */
- /************************/
-
- if (b_OutputChannel == 0) {
-
- outl(ui_State,
- devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
- } else {
- /************************/
- /* Test if PD1 selected */
- /************************/
-
- if (b_OutputChannel ==
- 1) {
-
- outl(ui_State,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 4 +
- (64 * b_ModulNbr));
- } else {
- b_OutputChannel
- =
- b_OutputChannel
- - 2;
-
- /********************/
- /* Read all channel */
- /********************/
-
- dw_StatusReg =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- (64 * b_ModulNbr));
- if (ui_State) /* ON */
- {
- dw_StatusReg
- =
- (dw_StatusReg
- >>
- ((b_OutputChannel / 8) * 8)) & 0xFF;
- dw_StatusReg
- =
- dw_StatusReg
- |
- (1
- <<
- (b_OutputChannel
- %
- 8));
- } else /* Off */
- {
- dw_StatusReg
- =
- (dw_StatusReg
- >>
- ((b_OutputChannel / 8) * 8)) & 0xFF;
- dw_StatusReg
- =
- dw_StatusReg
- &
- (0xFF
- -
- (1 << (b_OutputChannel % 8)));
-
- }
-
- /****************************/
- /* Set the new output value */
- /****************************/
-
- outl(dw_StatusReg, devpriv->s_BoardInfos.ui_Address + 8 + ((b_OutputChannel / 8) * 4) + (64 * b_ModulNbr));
- }
- }
- } else {
- /************************************/
- /* The selected TTL output is wrong */
- /************************************/
-
- DPRINTK(" The selected TTL output is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /************************************/
- /* The selected TTL output is wrong */
- /************************************/
-
- DPRINTK("The selected TTL output is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /***************************/
- /* TTL I/O not initialised */
- /***************************/
-
- DPRINTK("TTL I/O not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /**************************************/
- /* The module is not a TTL I/O module */
- /**************************************/
-
- DPRINTK("The module is not a TTL I/O module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 0c3db57..63dff77 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -20,13 +20,6 @@ This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this
-source code.
-
@endverbatim
*/
/*
@@ -46,10 +39,6 @@ source code.
+-----------------------------------------------------------------------+
*/
-#ifndef COMEDI_SUBD_TTLIO
-#define COMEDI_SUBD_TTLIO 11 /* Digital Input Output But TTL */
-#endif
-
static int i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -96,32 +85,22 @@ static int addi_auto_attach(struct comedi_device *dev,
dev->board_name = this_board->pc_DriverName;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
- if (!this_board->pc_EepromChip ||
- strcmp(this_board->pc_EepromChip, ADDIDATA_9054)) {
- /* board does not have an eeprom or is not ADDIDATA_9054 */
- if (this_board->i_IorangeBase1)
- dev->iobase = pci_resource_start(pcidev, 1);
- else
- dev->iobase = pci_resource_start(pcidev, 0);
-
- devpriv->iobase = dev->iobase;
- devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
- devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2);
- } else {
- /* board has an ADDIDATA_9054 eeprom */
- dev->iobase = pci_resource_start(pcidev, 2);
- devpriv->iobase = pci_resource_start(pcidev, 2);
- devpriv->dw_AiBase = pci_ioremap_bar(pcidev, 3);
- }
+ if (this_board->i_IorangeBase1)
+ dev->iobase = pci_resource_start(pcidev, 1);
+ else
+ dev->iobase = pci_resource_start(pcidev, 0);
+
+ devpriv->iobase = dev->iobase;
+ devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
+ devpriv->i_IobaseAddon = pci_resource_start(pcidev, 2);
devpriv->i_IobaseReserved = pci_resource_start(pcidev, 3);
/* Initialize parameters that can be overridden in EEPROM */
@@ -132,7 +111,6 @@ static int addi_auto_attach(struct comedi_device *dev,
devpriv->s_EeParameters.i_NbrDiChannel = this_board->i_NbrDiChannel;
devpriv->s_EeParameters.i_NbrDoChannel = this_board->i_NbrDoChannel;
devpriv->s_EeParameters.i_DoMaxdata = this_board->i_DoMaxdata;
- devpriv->s_EeParameters.i_Dma = this_board->i_Dma;
devpriv->s_EeParameters.i_Timer = this_board->i_Timer;
devpriv->s_EeParameters.ui_MinAcquisitiontimeNs =
this_board->ui_MinAcquisitiontimeNs;
@@ -191,9 +169,6 @@ static int addi_auto_attach(struct comedi_device *dev,
s->len_chanlist = this_board->i_AiChannelList;
s->range_table = this_board->pr_AiRangelist;
- /* Set the initialisation flag */
- devpriv->b_AiInitialisation = 1;
-
s->insn_config = this_board->ai_config;
s->insn_read = this_board->ai_read;
s->insn_write = this_board->ai_write;
@@ -215,8 +190,6 @@ static int addi_auto_attach(struct comedi_device *dev,
s->maxdata = devpriv->s_EeParameters.i_AoMaxdata;
s->len_chanlist =
devpriv->s_EeParameters.i_NbrAoChannel;
- s->range_table = this_board->pr_AoRangelist;
- s->insn_config = this_board->ao_config;
s->insn_write = this_board->ao_write;
} else {
s->type = COMEDI_SUBD_UNUSED;
@@ -281,22 +254,7 @@ static int addi_auto_attach(struct comedi_device *dev,
/* Allocate and Initialise TTL */
s = &dev->subdevices[5];
- if (this_board->i_NbrTTLChannel) {
- s->type = COMEDI_SUBD_TTLIO;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = this_board->i_NbrTTLChannel;
- s->maxdata = 1;
- s->io_bits = 0; /* all bits input */
- s->len_chanlist = this_board->i_NbrTTLChannel;
- s->range_table = &range_digital;
- s->insn_config = this_board->ttl_config;
- s->insn_bits = this_board->ttl_bits;
- s->insn_read = this_board->ttl_read;
- s->insn_write = this_board->ttl_write;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
- }
+ s->type = COMEDI_SUBD_UNUSED;
/* EEPROM */
s = &dev->subdevices[6];
@@ -323,8 +281,6 @@ static void i_ADDI_Detach(struct comedi_device *dev)
i_ADDI_Reset(dev);
if (dev->irq)
free_irq(dev->irq, dev);
- if (devpriv->dw_AiBase)
- iounmap(devpriv->dw_AiBase);
}
comedi_pci_disable(dev);
}
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.h b/drivers/staging/comedi/drivers/addi-data/addi_common.h
index c034bf1..dfd1e66 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.h
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.h
@@ -18,12 +18,8 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
-#define LOBYTE(W) (unsigned char)((W) & 0xFF)
-#define HIBYTE(W) (unsigned char)(((W) >> 8) & 0xFF)
-#define MAKEWORD(H, L) (unsigned short)((L) | ((H) << 8))
#define LOWORD(W) (unsigned short)((W) & 0xFFFF)
#define HIWORD(W) (unsigned short)(((W) >> 16) & 0xFFFF)
-#define MAKEDWORD(H, L) (unsigned int)((L) | ((H) << 16))
#define ADDI_ENABLE 1
#define ADDI_DISABLE 0
@@ -33,8 +29,6 @@
#define ADDIDATA_NO_EEPROM 0
#define ADDIDATA_93C76 "93C76"
#define ADDIDATA_S5920 "S5920"
-#define ADDIDATA_S5933 "S5933"
-#define ADDIDATA_9054 "9054"
/* ADDIDATA Enable Disable */
#define ADDIDATA_ENABLE 1
@@ -55,17 +49,12 @@ struct addi_board {
int i_AiMaxdata; /* resolution of A/D */
int i_AoMaxdata; /* resolution of D/A */
const struct comedi_lrange *pr_AiRangelist; /* rangelist for A/D */
- const struct comedi_lrange *pr_AoRangelist; /* rangelist for D/A */
int i_NbrDiChannel; /* Number of DI channels */
int i_NbrDoChannel; /* Number of DO channels */
int i_DoMaxdata; /* data to set all channels high */
- int i_NbrTTLChannel; /* Number of TTL channels */
-
- int i_Dma; /* dma present or not */
int i_Timer; /* timer subdevice present or not */
- unsigned char b_AvailableConvertUnit;
unsigned int ui_MinAcquisitiontimeNs; /* Minimum Acquisition in Nano secs */
unsigned int ui_MinDelaytimeNs; /* Minimum Delay in Nano secs */
@@ -90,12 +79,8 @@ struct addi_board {
int (*ai_cancel)(struct comedi_device *, struct comedi_subdevice *);
/* Analog Output */
- int (*ao_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
int (*ao_write)(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *);
- int (*ao_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
/* Digital Input */
int (*di_config)(struct comedi_device *, struct comedi_subdevice *,
@@ -126,247 +111,50 @@ struct addi_board {
struct comedi_insn *, unsigned int *);
int (*timer_bits)(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *);
-
- /* TTL IO */
- int (*ttl_config)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ttl_bits)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ttl_read)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
- int (*ttl_write)(struct comedi_device *, struct comedi_subdevice *,
- struct comedi_insn *, unsigned int *);
-};
-
-/* MODULE INFO STRUCTURE */
-
-union str_ModuleInfo {
- /* Incremental counter infos */
- struct {
- union {
- struct {
- unsigned char b_ModeRegister1;
- unsigned char b_ModeRegister2;
- unsigned char b_ModeRegister3;
- unsigned char b_ModeRegister4;
- } s_ByteModeRegister;
- unsigned int dw_ModeRegister1_2_3_4;
- } s_ModeRegister;
-
- struct {
- unsigned int b_IndexInit:1;
- unsigned int b_CounterInit:1;
- unsigned int b_ReferenceInit:1;
- unsigned int b_IndexInterruptOccur:1;
- unsigned int b_CompareLogicInit:1;
- unsigned int b_FrequencyMeasurementInit:1;
- unsigned int b_FrequencyMeasurementEnable:1;
- } s_InitFlag;
-
- } s_SiemensCounterInfo;
-
- /* SSI infos */
- struct {
- unsigned char b_SSIProfile;
- unsigned char b_PositionTurnLength;
- unsigned char b_TurnCptLength;
- unsigned char b_SSIInit;
- } s_SSICounterInfo;
-
- /* TTL I/O infos */
- struct {
- unsigned char b_TTLInit;
- unsigned char b_PortConfiguration[4];
- } s_TTLIOInfo;
-
- /* Digital I/O infos */
- struct {
- unsigned char b_DigitalInit;
- unsigned char b_ChannelAMode;
- unsigned char b_ChannelBMode;
- unsigned char b_OutputMemoryEnabled;
- unsigned int dw_OutputMemory;
- } s_DigitalIOInfo;
-
- /*********************/
- /* 82X54 timer infos */
- /*********************/
-
- struct {
- struct {
- unsigned char b_82X54Init;
- unsigned char b_InputClockSelection;
- unsigned char b_InputClockLevel;
- unsigned char b_OutputLevel;
- unsigned char b_HardwareGateLevel;
- unsigned int dw_ConfigurationWord;
- } s_82X54TimerInfo[3];
- unsigned char b_InterruptMask;
- } s_82X54ModuleInfo;
-
- /*********************/
- /* Chronometer infos */
- /*********************/
-
- struct {
- unsigned char b_ChronoInit;
- unsigned char b_InterruptMask;
- unsigned char b_PCIInputClock;
- unsigned char b_TimingUnit;
- unsigned char b_CycleMode;
- double d_TimingInterval;
- unsigned int dw_ConfigReg;
- } s_ChronoModuleInfo;
-
- /***********************/
- /* Pulse encoder infos */
- /***********************/
-
- struct {
- struct {
- unsigned char b_PulseEncoderInit;
- } s_PulseEncoderInfo[4];
- unsigned int dw_SetRegister;
- unsigned int dw_ControlRegister;
- unsigned int dw_StatusRegister;
- } s_PulseEncoderModuleInfo;
-
- /* Tor conter infos */
- struct {
- struct {
- unsigned char b_TorCounterInit;
- unsigned char b_TimingUnit;
- unsigned char b_InterruptEnable;
- double d_TimingInterval;
- unsigned int ul_RealTimingInterval;
- } s_TorCounterInfo[2];
- unsigned char b_PCIInputClock;
- } s_TorCounterModuleInfo;
-
- /* PWM infos */
- struct {
- struct {
- unsigned char b_PWMInit;
- unsigned char b_TimingUnit;
- unsigned char b_InterruptEnable;
- double d_LowTiming;
- double d_HighTiming;
- unsigned int ul_RealLowTiming;
- unsigned int ul_RealHighTiming;
- } s_PWMInfo[2];
- unsigned char b_ClockSelection;
- } s_PWMModuleInfo;
-
- /* ETM infos */
- struct {
- struct {
- unsigned char b_ETMEnable;
- unsigned char b_ETMInterrupt;
- } s_ETMInfo[2];
- unsigned char b_ETMInit;
- unsigned char b_TimingUnit;
- unsigned char b_ClockSelection;
- double d_TimingInterval;
- unsigned int ul_Timing;
- } s_ETMModuleInfo;
-
- /* CDA infos */
- struct {
- unsigned char b_CDAEnable;
- unsigned char b_CDAInterrupt;
- unsigned char b_CDAInit;
- unsigned char b_FctSelection;
- unsigned char b_CDAReadFIFOOverflow;
- } s_CDAModuleInfo;
-
};
-/* Private structure for the addi_apci3120 driver */
struct addi_private {
-
int iobase;
int i_IobaseAmcc; /* base+size for AMCC chip */
int i_IobaseAddon; /* addon base address */
int i_IobaseReserved;
- void __iomem *dw_AiBase;
unsigned char b_AiContinuous; /* we do unlimited AI */
- unsigned char b_AiInitialisation;
unsigned int ui_AiActualScan; /* how many scans we finished */
- unsigned int ui_AiBufferPtr; /* data buffer ptr in samples */
unsigned int ui_AiNbrofChannels; /* how many channels is measured */
unsigned int ui_AiScanLength; /* Length of actual scanlist */
- unsigned int ui_AiActualScanPosition; /* position in actual scan */
unsigned int *pui_AiChannelList; /* actual chanlist */
unsigned int ui_AiChannelList[32]; /* actual chanlist */
- unsigned char b_AiChannelConfiguration[32]; /* actual chanlist */
unsigned int ui_AiReadData[32];
- unsigned int dw_AiInitialised;
unsigned int ui_AiTimer0; /* Timer Constant for Timer0 */
unsigned int ui_AiTimer1; /* Timer constant for Timer1 */
unsigned int ui_AiFlags;
unsigned int ui_AiDataLength;
- short *AiData; /* Pointer to sample data */
unsigned int ui_AiNbrofScans; /* number of scans to do */
unsigned short us_UseDma; /* To use Dma or not */
unsigned char b_DmaDoubleBuffer; /* we can use double buffering */
unsigned int ui_DmaActualBuffer; /* which buffer is used now */
- /* UPDATE-0.7.57->0.7.68 */
- /* unsigned int ul_DmaBufferVirtual[2]; pointers to begin of DMA buffer */
short *ul_DmaBufferVirtual[2]; /* pointers to begin of DMA buffer */
unsigned int ul_DmaBufferHw[2]; /* hw address of DMA buff */
unsigned int ui_DmaBufferSize[2]; /* size of dma buffer in bytes */
unsigned int ui_DmaBufferUsesize[2]; /* which size we may now used for transfer */
- unsigned int ui_DmaBufferSamples[2]; /* size in samples */
unsigned int ui_DmaBufferPages[2]; /* number of pages in buffer */
unsigned char b_DigitalOutputRegister; /* Digital Output Register */
unsigned char b_OutputMemoryStatus;
- unsigned char b_AnalogInputChannelNbr; /* Analog input channel Nbr */
- unsigned char b_AnalogOutputChannelNbr; /* Analog input Output Nbr */
unsigned char b_TimerSelectMode; /* Contain data written at iobase + 0C */
unsigned char b_ModeSelectRegister; /* Contain data written at iobase + 0E */
unsigned short us_OutputRegister; /* Contain data written at iobase + 0 */
- unsigned char b_InterruptState;
- unsigned char b_TimerInit; /* Specify if InitTimerWatchdog was load */
- unsigned char b_TimerStarted; /* Specify if timer 2 is running or not */
unsigned char b_Timer2Mode; /* Specify the timer 2 mode */
unsigned char b_Timer2Interrupt; /* Timer2 interrupt enable or disable */
unsigned char b_AiCyclicAcquisition; /* indicate cyclic acquisition */
unsigned char b_InterruptMode; /* eoc eos or dma */
unsigned char b_EocEosInterrupt; /* Enable disable eoc eos interrupt */
unsigned int ui_EocEosConversionTime;
- unsigned char b_EocEosConversionTimeBase;
unsigned char b_SingelDiff;
unsigned char b_ExttrigEnable; /* To enable or disable external trigger */
/* Pointer to the current process */
struct task_struct *tsk_Current;
- /* Hardware board infos for 1710 */
- struct {
- unsigned int ui_Address; /* Board address */
- unsigned int ui_FlashAddress;
- unsigned char b_InterruptNbr; /* Board interrupt number */
- unsigned char b_SlotNumber; /* PCI slot number */
- unsigned char b_BoardVersion;
- unsigned int dw_MolduleConfiguration[4]; /* Module config */
- } s_BoardInfos;
-
- /* Interrupt infos */
- struct {
- unsigned int ul_InterruptOccur; /* 0 : No interrupt occur */
- /* > 0 : Interrupt occur */
- unsigned int ui_Read; /* Read FIFO */
- unsigned int ui_Write; /* Write FIFO */
- struct {
- unsigned char b_OldModuleMask;
- unsigned int ul_OldInterruptMask; /* Interrupt mask */
- unsigned int ul_OldCounterLatchValue; /* Interrupt counter value */
- } s_FIFOInterruptParameters[APCI1710_SAVE_INTERRUPT];
- } s_InterruptParameters;
-
- union str_ModuleInfo s_ModuleInfo[4];
- unsigned int ul_TTLPortConfiguration[10];
-
/* Parameters read from EEPROM overriding static board info */
struct {
int i_NbrAiChannel; /* num of A/D chans */
@@ -376,7 +164,6 @@ struct addi_private {
int i_NbrDiChannel; /* Number of DI channels */
int i_NbrDoChannel; /* Number of DO channels */
int i_DoMaxdata; /* data to set all channels high */
- int i_Dma; /* dma present or not */
int i_Timer; /* timer subdevice present or not */
unsigned int ui_MinAcquisitiontimeNs;
/* Minimum Acquisition in Nano secs */
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c b/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
index 5124ac9..aafc172 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
@@ -20,15 +20,10 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * You should also find the complete GPL in the COPYING file accompanying
- * this source code.
*/
+#include <linux/delay.h>
+
#define NVRAM_USER_DATA_START 0x100
#define NVCMD_BEGIN_READ (0x7 << 5) /* nvRam begin read command */
@@ -302,7 +297,7 @@ static void addi_eeprom_read_ai_info(struct comedi_device *dev,
devpriv->s_EeParameters.ui_MinDelaytimeNs = tmp * 1000;
tmp = addi_eeprom_readw(iobase, type, addr + 20);
- devpriv->s_EeParameters.i_Dma = (tmp >> 13) & 0x01;
+ /* dma = (tmp >> 13) & 0x01; */
tmp = addi_eeprom_readw(iobase, type, addr + 72) & 0xff;
if (tmp) { /* > 0 */
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
deleted file mode 100644
index b05f850..0000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
+++ /dev/null
@@ -1,1318 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-1710 | Compiler : GCC |
- | Module name : hwdrv_apci1710.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-1710 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-#define COMEDI_SUBD_TTLIO 11 /* Digital Input Output But TTL */
-#define COMEDI_SUBD_PWM 12 /* Pulse width Measurement */
-#define COMEDI_SUBD_SSI 13 /* Synchronous serial interface */
-#define COMEDI_SUBD_TOR 14 /* Tor counter */
-#define COMEDI_SUBD_CHRONO 15 /* Chrono meter */
-#define COMEDI_SUBD_PULSEENCODER 16 /* Pulse Encoder INP CPT */
-#define COMEDI_SUBD_INCREMENTALCOUNTER 17 /* Incremental Counter */
-
-#define APCI1710_BOARD_NAME "apci1710"
-#define APCI1710_BOARD_DEVICE_ID 0x818F
-#define APCI1710_ADDRESS_RANGE 256
-#define APCI1710_CONFIG_ADDRESS_RANGE 8
-#define APCI1710_INCREMENTAL_COUNTER 0x53430000UL
-#define APCI1710_SSI_COUNTER 0x53490000UL
-#define APCI1710_TTL_IO 0x544C0000UL
-#define APCI1710_DIGITAL_IO 0x44490000UL
-#define APCI1710_82X54_TIMER 0x49430000UL
-#define APCI1710_CHRONOMETER 0x43480000UL
-#define APCI1710_PULSE_ENCODER 0x495A0000UL
-#define APCI1710_TOR_COUNTER 0x544F0000UL
-#define APCI1710_PWM 0x50570000UL
-#define APCI1710_ETM 0x45540000UL
-#define APCI1710_CDA 0x43440000UL
-#define APCI1710_DISABLE 0
-#define APCI1710_ENABLE 1
-#define APCI1710_SYNCHRONOUS_MODE 1
-#define APCI1710_ASYNCHRONOUS_MODE 0
-
-#include "APCI1710_Inp_cpt.c"
-
-#include "APCI1710_Ssi.c"
-#include "APCI1710_Tor.c"
-#include "APCI1710_Ttl.c"
-#include "APCI1710_Dig_io.c"
-#include "APCI1710_82x54.c"
-#include "APCI1710_Chrono.c"
-#include "APCI1710_Pwm.c"
-#include "APCI1710_INCCPT.c"
-
-static const struct comedi_lrange range_apci1710_ttl = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1)
- }
-};
-
-static const struct comedi_lrange range_apci1710_ssi = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1)
- }
-};
-
-static const struct comedi_lrange range_apci1710_inccpt = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1)
- }
-};
-
-static void i_ADDI_AttachPCI1710(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int ret = 0;
- int n_subdevices = 9;
-
- ret = comedi_alloc_subdevices(dev, n_subdevices);
- if (ret)
- return;
-
- /* Allocate and Initialise Timer Subdevice Structures */
- s = &dev->subdevices[0];
-
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 3;
- s->maxdata = 0;
- s->len_chanlist = 3;
- s->range_table = &range_digital;
- s->insn_write = i_APCI1710_InsnWriteEnableDisableTimer;
- s->insn_read = i_APCI1710_InsnReadAllTimerValue;
- s->insn_config = i_APCI1710_InsnConfigInitTimer;
- s->insn_bits = i_APCI1710_InsnBitsTimer;
-
- /* Allocate and Initialise DIO Subdevice Structures */
- s = &dev->subdevices[1];
-
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 7;
- s->maxdata = 1;
- s->len_chanlist = 7;
- s->range_table = &range_digital;
- s->insn_config = i_APCI1710_InsnConfigDigitalIO;
- s->insn_read = i_APCI1710_InsnReadDigitalIOChlValue;
- s->insn_bits = i_APCI1710_InsnBitsDigitalIOPortOnOff;
- s->insn_write = i_APCI1710_InsnWriteDigitalIOChlOnOff;
-
- /* Allocate and Initialise Chrono Subdevice Structures */
- s = &dev->subdevices[2];
-
- s->type = COMEDI_SUBD_CHRONO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 4;
- s->maxdata = 0;
- s->len_chanlist = 4;
- s->range_table = &range_digital;
- s->insn_write = i_APCI1710_InsnWriteEnableDisableChrono;
- s->insn_read = i_APCI1710_InsnReadChrono;
- s->insn_config = i_APCI1710_InsnConfigInitChrono;
- s->insn_bits = i_APCI1710_InsnBitsChronoDigitalIO;
-
- /* Allocate and Initialise PWM Subdevice Structures */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 3;
- s->maxdata = 1;
- s->len_chanlist = 3;
- s->range_table = &range_digital;
- s->io_bits = 0; /* all bits input */
- s->insn_config = i_APCI1710_InsnConfigPWM;
- s->insn_read = i_APCI1710_InsnReadGetPWMStatus;
- s->insn_write = i_APCI1710_InsnWritePWM;
- s->insn_bits = i_APCI1710_InsnBitsReadPWMInterrupt;
-
- /* Allocate and Initialise TTLIO Subdevice Structures */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_TTLIO;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 8;
- s->maxdata = 1;
- s->len_chanlist = 8;
- s->range_table = &range_apci1710_ttl; /* to pass arguments in range */
- s->insn_config = i_APCI1710_InsnConfigInitTTLIO;
- s->insn_bits = i_APCI1710_InsnBitsReadTTLIO;
- s->insn_write = i_APCI1710_InsnWriteSetTTLIOChlOnOff;
- s->insn_read = i_APCI1710_InsnReadTTLIOAllPortValue;
-
- /* Allocate and Initialise TOR Subdevice Structures */
- s = &dev->subdevices[5];
- s->type = COMEDI_SUBD_TOR;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 8;
- s->maxdata = 1;
- s->len_chanlist = 8;
- s->range_table = &range_digital;
- s->io_bits = 0; /* all bits input */
- s->insn_config = i_APCI1710_InsnConfigInitTorCounter;
- s->insn_read = i_APCI1710_InsnReadGetTorCounterInitialisation;
- s->insn_write = i_APCI1710_InsnWriteEnableDisableTorCounter;
- s->insn_bits = i_APCI1710_InsnBitsGetTorCounterProgressStatusAndValue;
-
- /* Allocate and Initialise SSI Subdevice Structures */
- s = &dev->subdevices[6];
- s->type = COMEDI_SUBD_SSI;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 4;
- s->maxdata = 1;
- s->len_chanlist = 4;
- s->range_table = &range_apci1710_ssi;
- s->insn_config = i_APCI1710_InsnConfigInitSSI;
- s->insn_read = i_APCI1710_InsnReadSSIValue;
- s->insn_bits = i_APCI1710_InsnBitsSSIDigitalIO;
-
- /* Allocate and Initialise PULSEENCODER Subdevice Structures */
- s = &dev->subdevices[7];
- s->type = COMEDI_SUBD_PULSEENCODER;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 4;
- s->maxdata = 1;
- s->len_chanlist = 4;
- s->range_table = &range_digital;
- s->insn_config = i_APCI1710_InsnConfigInitPulseEncoder;
- s->insn_write = i_APCI1710_InsnWriteEnableDisablePulseEncoder;
- s->insn_bits = i_APCI1710_InsnBitsReadWritePulseEncoder;
- s->insn_read = i_APCI1710_InsnReadInterruptPulseEncoder;
-
- /* Allocate and Initialise INCREMENTALCOUNTER Subdevice Structures */
- s = &dev->subdevices[8];
- s->type = COMEDI_SUBD_INCREMENTALCOUNTER;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 500;
- s->maxdata = 1;
- s->len_chanlist = 500;
- s->range_table = &range_apci1710_inccpt;
- s->insn_config = i_APCI1710_InsnConfigINCCPT;
- s->insn_write = i_APCI1710_InsnWriteINCCPT;
- s->insn_read = i_APCI1710_InsnReadINCCPT;
- s->insn_bits = i_APCI1710_InsnBitsINCCPT;
-}
-
-static int i_APCI1710_Reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- int ret;
- unsigned int dw_Dummy;
-
- /*********************************/
- /* Read all module configuration */
- /*********************************/
- ret = inl(devpriv->s_BoardInfos.ui_Address + 60);
- devpriv->s_BoardInfos.dw_MolduleConfiguration[0] = ret;
-
- ret = inl(devpriv->s_BoardInfos.ui_Address + 124);
- devpriv->s_BoardInfos.dw_MolduleConfiguration[1] = ret;
-
- ret = inl(devpriv->s_BoardInfos.ui_Address + 188);
- devpriv->s_BoardInfos.dw_MolduleConfiguration[2] = ret;
-
- ret = inl(devpriv->s_BoardInfos.ui_Address + 252);
- devpriv->s_BoardInfos.dw_MolduleConfiguration[3] = ret;
-
- /* outl(0x80808082,devpriv->s_BoardInfos.ui_Address+0x60); */
- outl(0x83838383, devpriv->s_BoardInfos.ui_Address + 0x60);
-
- devpriv->s_BoardInfos.b_BoardVersion = 1;
-
- /* Enable the interrupt for the controller */
- dw_Dummy = inl(devpriv->s_BoardInfos.ui_Address + 0x38);
- outl(dw_Dummy | 0x2000, devpriv->s_BoardInfos.ui_Address + 0x38);
-
- return 0;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function's Name : __void__ v_APCI1710_InterruptFunction |
-| (unsigned char b_Interrupt, __CPPARGS) |
-+----------------------------------------------------------------------------+
-| Task : APCI-1710 interrupt function |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char b_Interrupt : Interrupt number |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0 : OK |
-| -1 : Error |
-+----------------------------------------------------------------------------+
-*/
-
-static void v_APCI1710_Interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- unsigned char b_ModuleCpt = 0;
- unsigned char b_InterruptFlag = 0;
- unsigned char b_PWMCpt = 0;
- unsigned char b_TorCounterCpt = 0;
- unsigned char b_PulseIncoderCpt = 0;
- unsigned int ui_16BitValue;
- unsigned int ul_InterruptLatchReg = 0;
- unsigned int ul_LatchRegisterValue = 0;
- unsigned int ul_82X54InterruptStatus;
- unsigned int ul_StatusRegister;
-
- union str_ModuleInfo *ps_ModuleInfo;
-
- printk("APCI1710 Interrupt\n");
- for (b_ModuleCpt = 0; b_ModuleCpt < 4; b_ModuleCpt++, ps_ModuleInfo++) {
-
- /**************************/
- /* 1199/0225 to 0100/0226 */
- /**************************/
- ps_ModuleInfo = &devpriv->s_ModuleInfo[b_ModuleCpt];
-
- /***********************/
- /* Test if 82X54 timer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
-
- /* printk("TIMER Interrupt Occurred\n"); */
- ul_82X54InterruptStatus = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if ((ul_82X54InterruptStatus & ps_ModuleInfo->
- s_82X54ModuleInfo.
- b_InterruptMask) != 0) {
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask =
- (ul_82X54InterruptStatus &
- ps_ModuleInfo->s_82X54ModuleInfo.
- b_InterruptMask) << 4;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask = 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].ul_OldCounterLatchValue = 0;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write + 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
-
- } /* if ((ul_82X54InterruptStatus & 0x7) != 0) */
- } /* 82X54 timer */
-
- /***************************/
- /* Test if increm. counter */
- /***************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_INCREMENTAL_COUNTER) {
-
- ul_InterruptLatchReg = inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModuleCpt));
-
- /*********************/
- /* Test if interrupt */
- /*********************/
-
- if ((ul_InterruptLatchReg & 0x22) && (ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 & 0x80)) {
- /************************************/
- /* Test if strobe latch I interrupt */
- /************************************/
-
- if (ul_InterruptLatchReg & 2) {
- ul_LatchRegisterValue =
- inl(devpriv->s_BoardInfos.
- ui_Address + 4 +
- (64 * b_ModuleCpt));
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 1UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
-
- /*************************************/
- /* Test if strobe latch II interrupt */
- /*************************************/
-
- if (ul_InterruptLatchReg & 0x20) {
-
- ul_LatchRegisterValue =
- inl(devpriv->s_BoardInfos.
- ui_Address + 8 +
- (64 * b_ModuleCpt));
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 2UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
- }
-
- ul_InterruptLatchReg = inl(devpriv->s_BoardInfos.
- ui_Address + 24 + (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if index interrupt */
- /***************************/
-
- if (ul_InterruptLatchReg & 0x8) {
- ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_InitFlag.b_IndexInterruptOccur = 1;
-
- if (ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 &
- APCI1710_INDEX_AUTO_MODE) {
-
- outl(ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.
- ui_Address + 20 +
- (64 * b_ModuleCpt));
- }
-
- /*****************************/
- /* Test if interrupt enabled */
- /*****************************/
-
- if ((ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_ENABLE_INDEX_INT) ==
- APCI1710_ENABLE_INDEX_INT) {
- devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 4UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
- }
-
- /*****************************/
- /* Test if compare interrupt */
- /*****************************/
-
- if (ul_InterruptLatchReg & 0x10) {
- /*****************************/
- /* Test if interrupt enabled */
- /*****************************/
-
- if ((ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_ENABLE_COMPARE_INT) ==
- APCI1710_ENABLE_COMPARE_INT) {
- devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 8UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
- }
-
- /*******************************************/
- /* Test if frequency measurement interrupt */
- /*******************************************/
-
- if (ul_InterruptLatchReg & 0x20) {
- /*******************/
- /* Read the status */
- /*******************/
-
- ul_StatusRegister = inl(devpriv->s_BoardInfos.
- ui_Address + 32 + (64 * b_ModuleCpt));
-
- /******************/
- /* Read the value */
- /******************/
-
- ul_LatchRegisterValue =
- inl(devpriv->s_BoardInfos.ui_Address +
- 28 + (64 * b_ModuleCpt));
-
- switch ((ul_StatusRegister >> 1) & 3) {
- case 0:
- /*************************/
- /* Test the counter mode */
- /*************************/
-
- if ((devpriv->s_ModuleInfo[b_ModuleCpt].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 &
- APCI1710_16BIT_COUNTER)
- == APCI1710_16BIT_COUNTER) {
- /****************************************/
- /* Test if 16-bit counter 1 pulse occur */
- /****************************************/
-
- if ((ul_LatchRegisterValue &
- 0xFFFFU) != 0) {
- ui_16BitValue =
- (unsigned int)
- ul_LatchRegisterValue
- & 0xFFFFU;
- ul_LatchRegisterValue =
- (ul_LatchRegisterValue
- & 0xFFFF0000UL)
- | (0xFFFFU -
- ui_16BitValue);
- }
-
- /****************************************/
- /* Test if 16-bit counter 2 pulse occur */
- /****************************************/
-
- if ((ul_LatchRegisterValue &
- 0xFFFF0000UL) !=
- 0) {
- ui_16BitValue =
- (unsigned int) (
- (ul_LatchRegisterValue
- >> 16) &
- 0xFFFFU);
- ul_LatchRegisterValue =
- (ul_LatchRegisterValue
- & 0xFFFFUL) |
- ((0xFFFFU -
- ui_16BitValue)
- << 16);
- }
- } else {
- if (ul_LatchRegisterValue != 0) {
- ul_LatchRegisterValue =
- 0xFFFFFFFFUL -
- ul_LatchRegisterValue;
- }
- }
- break;
-
- case 1:
- /****************************************/
- /* Test if 16-bit counter 2 pulse occur */
- /****************************************/
-
- if ((ul_LatchRegisterValue &
- 0xFFFF0000UL) != 0) {
- ui_16BitValue =
- (unsigned int) (
- (ul_LatchRegisterValue
- >> 16) &
- 0xFFFFU);
- ul_LatchRegisterValue =
- (ul_LatchRegisterValue &
- 0xFFFFUL) | ((0xFFFFU -
- ui_16BitValue)
- << 16);
- }
- break;
-
- case 2:
- /****************************************/
- /* Test if 16-bit counter 1 pulse occur */
- /****************************************/
-
- if ((ul_LatchRegisterValue & 0xFFFFU) !=
- 0) {
- ui_16BitValue =
- (unsigned int)
- ul_LatchRegisterValue &
- 0xFFFFU;
- ul_LatchRegisterValue =
- (ul_LatchRegisterValue &
- 0xFFFF0000UL) | (0xFFFFU
- - ui_16BitValue);
- }
- break;
- }
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask = 0x10000UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask = 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write + 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
-
- }
- } /* Incremental counter */
-
- /***************/
- /* Test if CDA */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_CDA) {
- /******************************************/
- /* Test if CDA enable and functionality 0 */
- /******************************************/
-
- if ((devpriv->s_ModuleInfo[b_ModuleCpt].
- s_CDAModuleInfo.
- b_CDAEnable == APCI1710_ENABLE)
- && (devpriv->s_ModuleInfo[b_ModuleCpt].
- s_CDAModuleInfo.b_FctSelection == 0)) {
- /****************************/
- /* Get the interrupt status */
- /****************************/
-
- ul_StatusRegister = inl(devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModuleCpt));
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if (ul_StatusRegister & 1) {
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 0x80000UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue = 0;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- } /* if (ul_StatusRegister & 1) */
-
- }
- } /* CDA */
-
- /***********************/
- /* Test if PWM counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- for (b_PWMCpt = 0; b_PWMCpt < 2; b_PWMCpt++) {
- /*************************************/
- /* Test if PWM interrupt initialised */
- /*************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModuleCpt].
- s_PWMModuleInfo.
- s_PWMInfo[b_PWMCpt].
- b_InterruptEnable == APCI1710_ENABLE) {
- /*****************************/
- /* Read the interrupt status */
- /*****************************/
-
- ul_StatusRegister =
- inl(devpriv->s_BoardInfos.
- ui_Address + 16 +
- (20 * b_PWMCpt) +
- (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if (ul_StatusRegister & 0x1) {
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask =
- 0x4000UL << b_PWMCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) %
- APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO,
- devpriv->tsk_Current,
- 0);
-
- } /* if (ul_StatusRegister & 0x1) */
- } /* if (APCI1710_ENABLE) */
- } /* for (b_PWMCpt == 0; b_PWMCpt < 0; b_PWMCpt ++) */
- } /* PWM counter */
-
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- for (b_TorCounterCpt = 0; b_TorCounterCpt < 2;
- b_TorCounterCpt++) {
- /*************************************/
- /* Test if tor interrupt initialised */
- /*************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModuleCpt].
- s_TorCounterModuleInfo.
- s_TorCounterInfo[b_TorCounterCpt].
- b_InterruptEnable == APCI1710_ENABLE) {
- /*****************************/
- /* Read the interrupt status */
- /*****************************/
-
- ul_StatusRegister =
- inl(devpriv->s_BoardInfos.
- ui_Address + 12 +
- (16 * b_TorCounterCpt) +
- (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if (ul_StatusRegister & 0x1) {
- /******************************/
- /* Read the tor counter value */
- /******************************/
-
- ul_LatchRegisterValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 0 +
- (16 * b_TorCounterCpt) +
- (64 * b_ModuleCpt));
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask =
- 0x1000UL <<
- b_TorCounterCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue
- = ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) %
- APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO,
- devpriv->tsk_Current,
- 0);
- } /* if (ul_StatusRegister & 0x1) */
- } /* if (APCI1710_ENABLE) */
- } /* for (b_TorCounterCpt == 0; b_TorCounterCpt < 0; b_TorCounterCpt ++) */
- } /* Tor counter */
-
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
-
- /* printk("APCI1710 Chrono Interrupt\n"); */
- /*****************************/
- /* Read the interrupt status */
- /*****************************/
-
- ul_InterruptLatchReg = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if ((ul_InterruptLatchReg & 0x8) == 0x8) {
- /****************************/
- /* Clear the interrupt flag */
- /****************************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 32 + (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if continuous mode */
- /***************************/
-
- if (ps_ModuleInfo->
- s_ChronoModuleInfo.
- b_CycleMode == APCI1710_ENABLE) {
- /********************/
- /* Clear the status */
- /********************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 36 +
- (64 * b_ModuleCpt));
- }
-
- /*************************/
- /* Read the timing value */
- /*************************/
-
- ul_LatchRegisterValue =
- inl(devpriv->s_BoardInfos.ui_Address +
- 4 + (64 * b_ModuleCpt));
-
- /*****************************/
- /* Test if interrupt enabled */
- /*****************************/
-
- if (ps_ModuleInfo->
- s_ChronoModuleInfo.b_InterruptMask) {
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 0x80;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
- }
- } /* Chronometer */
-
- /*************************/
- /* Test if pulse encoder */
- /*************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_PULSE_ENCODER) {
- /****************************/
- /* Read the status register */
- /****************************/
-
- ul_StatusRegister = inl(devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModuleCpt));
-
- if (ul_StatusRegister & 0xF) {
- for (b_PulseIncoderCpt = 0;
- b_PulseIncoderCpt < 4;
- b_PulseIncoderCpt++) {
- /*************************************/
- /* Test if pulse encoder initialised */
- /*************************************/
-
- if ((ps_ModuleInfo->
- s_PulseEncoderModuleInfo.
- s_PulseEncoderInfo
- [b_PulseIncoderCpt].
- b_PulseEncoderInit == 1)
- && (((ps_ModuleInfo->s_PulseEncoderModuleInfo.dw_SetRegister >> b_PulseIncoderCpt) & 1) == 1) && (((ul_StatusRegister >> (b_PulseIncoderCpt)) & 1) == 1)) {
- devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask =
- 0x100UL <<
- b_PulseIncoderCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue
- = ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) %
- APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO,
- devpriv->tsk_Current,
- 0);
-
- }
- }
- }
- } /* pulse encoder */
-
- }
- return;
-
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
index 3d66e48..1128c22 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci035.c
@@ -15,10 +15,6 @@ This program is free software; you can redistribute it and/or modify it under th
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
@endverbatim
*/
/*
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
index 24c4c98..0549105 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1500.c
@@ -15,10 +15,6 @@ This program is free software; you can redistribute it and/or modify it under th
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
@endverbatim
*/
/*
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
index fc31c4b..e3cc429 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
@@ -15,10 +15,6 @@ This program is free software; you can redistribute it and/or modify it under th
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
@endverbatim
*/
/*
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
index 74065ba..1449b92 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
@@ -15,10 +15,6 @@ This program is free software; you can redistribute it and/or modify it under th
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
@endverbatim
*/
/*
@@ -44,6 +40,8 @@ You should also find the complete GPL in the COPYING file accompanying this sour
+----------+-----------+------------------------------------------------+
*/
+#include <linux/delay.h>
+
/*
* ADDON RELATED ADDITIONS
*/
@@ -724,9 +722,7 @@ static int i_APCI3120_StopCyclicAcquisition(struct comedi_device *dev,
inb(dev->iobase + APCI3120_RESET_FIFO);
inw(dev->iobase + APCI3120_RD_STATUS);
devpriv->ui_AiActualScan = 0;
- devpriv->ui_AiActualScanPosition = 0;
s->async->cur_chan = 0;
- devpriv->ui_AiBufferPtr = 0;
devpriv->b_AiContinuous = 0;
devpriv->ui_DmaActualBuffer = 0;
@@ -895,9 +891,7 @@ static int i_APCI3120_CyclicAnalogInput(int mode,
/* END JK 07.05.04: Comparison between WIN32 and Linux driver */
devpriv->ui_AiActualScan = 0;
- devpriv->ui_AiActualScanPosition = 0;
s->async->cur_chan = 0;
- devpriv->ui_AiBufferPtr = 0;
devpriv->ui_DmaActualBuffer = 0;
/* value for timer2 minus -2 has to be done .....dunno y?? */
@@ -1351,8 +1345,6 @@ static int i_APCI3120_CommandAnalogInput(struct comedi_device *dev,
devpriv->ui_AiScanLength = cmd->scan_end_arg;
devpriv->pui_AiChannelList = cmd->chanlist;
- /* UPDATE-0.7.57->0.7.68devpriv->AiData=s->async->data; */
- devpriv->AiData = s->async->prealloc_buf;
/* UPDATE-0.7.57->0.7.68devpriv->ui_AiDataLength=s->async->data_len; */
devpriv->ui_AiDataLength = s->async->prealloc_bufsz;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index c790873..32dce03 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -15,10 +15,6 @@ This program is free software; you can redistribute it and/or modify it under th
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
@endverbatim
*/
/*
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c
deleted file mode 100644
index a45a2a2..0000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3xxx.c
+++ /dev/null
@@ -1,1376 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-You should also find the complete GPL in the COPYING file accompanying this source code.
-
-@endverbatim
-*/
-/*
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : APCI-3XXX | Compiler : GCC |
- | Module name : hwdrv_apci3xxx.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: S. Weber | Date : 15/09/2005 |
- +-----------------------------------------------------------------------+
- | Description :APCI3XXX Module. Hardware abstraction Layer for APCI3XXX|
- +-----------------------------------------------------------------------+
- | UPDATE'S |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-#ifndef COMEDI_SUBD_TTLIO
-#define COMEDI_SUBD_TTLIO 11 /* Digital Input Output But TTL */
-#endif
-
-#define APCI3XXX_SINGLE 0
-#define APCI3XXX_DIFF 1
-#define APCI3XXX_CONFIGURATION 0
-
-#define APCI3XXX_TTL_INIT_DIRECTION_PORT2 0
-
-static const struct comedi_lrange range_apci3XXX_ai = {
- 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2),
- UNI_RANGE(1)
- }
-};
-
-static const struct comedi_lrange range_apci3XXX_ao = {
- 2, {
- BIP_RANGE(10),
- UNI_RANGE(10)
- }
-};
-
-/*
-+----------------------------------------------------------------------------+
-| ANALOG INPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_TestConversionStarted |
-| (struct comedi_device *dev) |
-+----------------------------------------------------------------------------+
-| Task Test if any conversion started |
-+----------------------------------------------------------------------------+
-| Input Parameters : - |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0 : Conversion not started |
-| 1 : Conversion started |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3XXX_TestConversionStarted(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
-
- if ((readl(devpriv->dw_AiBase + 8) & 0x80000UL) == 0x80000UL)
- return 1;
- else
- return 0;
-
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_AnalogInputConfigOperatingMode |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task Converting mode and convert time selection |
-+----------------------------------------------------------------------------+
-| Input Parameters : b_SingleDiff = (unsigned char) data[1]; |
-| b_TimeBase = (unsigned char) data[2]; (0: ns, 1:micros 2:ms)|
-| dw_ReloadValue = (unsigned int) data[3]; |
-| ........ |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :>0 : No error |
-| -1 : Single/Diff selection error |
-| -2 : Convert time base unity selection error |
-| -3 : Convert time value selection error |
-| -10: Any conversion started |
-| .... |
-| -100 : Config command error |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3XXX_AnalogInputConfigOperatingMode(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = comedi_board(dev);
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = insn->n;
- unsigned char b_TimeBase = 0;
- unsigned char b_SingleDiff = 0;
- unsigned int dw_ReloadValue = 0;
- unsigned int dw_TestReloadValue = 0;
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n == 4) {
- /****************************/
- /* Get the Singel/Diff flag */
- /****************************/
-
- b_SingleDiff = (unsigned char) data[1];
-
- /****************************/
- /* Get the time base unitiy */
- /****************************/
-
- b_TimeBase = (unsigned char) data[2];
-
- /*************************************/
- /* Get the convert time reload value */
- /*************************************/
-
- dw_ReloadValue = (unsigned int) data[3];
-
- /**********************/
- /* Test the time base */
- /**********************/
-
- if ((this_board->b_AvailableConvertUnit & (1 << b_TimeBase)) !=
- 0) {
- /*******************************/
- /* Test the convert time value */
- /*******************************/
-
- if (dw_ReloadValue <= 65535) {
- dw_TestReloadValue = dw_ReloadValue;
-
- if (b_TimeBase == 1) {
- dw_TestReloadValue =
- dw_TestReloadValue * 1000UL;
- }
- if (b_TimeBase == 2) {
- dw_TestReloadValue =
- dw_TestReloadValue * 1000000UL;
- }
-
- /*******************************/
- /* Test the convert time value */
- /*******************************/
-
- if (dw_TestReloadValue >=
- devpriv->s_EeParameters.
- ui_MinAcquisitiontimeNs) {
- if ((b_SingleDiff == APCI3XXX_SINGLE)
- || (b_SingleDiff ==
- APCI3XXX_DIFF)) {
- if (((b_SingleDiff == APCI3XXX_SINGLE)
- && (devpriv->s_EeParameters.i_NbrAiChannel == 0))
- || ((b_SingleDiff == APCI3XXX_DIFF)
- && (this_board->i_NbrAiChannelDiff == 0))
- ) {
- /*******************************/
- /* Single/Diff selection error */
- /*******************************/
-
- printk("Single/Diff selection error\n");
- i_ReturnValue = -1;
- } else {
- /**********************************/
- /* Test if conversion not started */
- /**********************************/
-
- if (i_APCI3XXX_TestConversionStarted(dev) == 0) {
- devpriv->
- ui_EocEosConversionTime
- =
- (unsigned int)
- dw_ReloadValue;
- devpriv->
- b_EocEosConversionTimeBase
- =
- b_TimeBase;
- devpriv->
- b_SingelDiff
- =
- b_SingleDiff;
- devpriv->
- b_AiInitialisation
- = 1;
-
- /*******************************/
- /* Set the convert timing unit */
- /*******************************/
-
- writel((unsigned int)b_TimeBase,
- devpriv->dw_AiBase + 36);
-
- /**************************/
- /* Set the convert timing */
- /*************************/
-
- writel(dw_ReloadValue, devpriv->dw_AiBase + 32);
- } else {
- /**************************/
- /* Any conversion started */
- /**************************/
-
- printk("Any conversion started\n");
- i_ReturnValue =
- -10;
- }
- }
- } else {
- /*******************************/
- /* Single/Diff selection error */
- /*******************************/
-
- printk("Single/Diff selection error\n");
- i_ReturnValue = -1;
- }
- } else {
- /************************/
- /* Time selection error */
- /************************/
-
- printk("Convert time value selection error\n");
- i_ReturnValue = -3;
- }
- } else {
- /************************/
- /* Time selection error */
- /************************/
-
- printk("Convert time value selection error\n");
- i_ReturnValue = -3;
- }
- } else {
- /*****************************/
- /* Time base selection error */
- /*****************************/
-
- printk("Convert time base unity selection error\n");
- i_ReturnValue = -2;
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("Buffer size error\n");
- i_ReturnValue = -101;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_InsnConfigAnalogInput |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task Converting mode and convert time selection |
-+----------------------------------------------------------------------------+
-| Input Parameters : b_ConvertMode = (unsigned char) data[0]; |
-| b_TimeBase = (unsigned char) data[1]; (0: ns, 1:micros 2:ms)|
-| dw_ReloadValue = (unsigned int) data[2]; |
-| ........ |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :>0: No error |
-| .... |
-| -100 : Config command error |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3XXX_InsnConfigAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- int i_ReturnValue = insn->n;
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= 1) {
- switch ((unsigned char) data[0]) {
- case APCI3XXX_CONFIGURATION:
- i_ReturnValue =
- i_APCI3XXX_AnalogInputConfigOperatingMode(dev,
- s, insn, data);
- break;
-
- default:
- i_ReturnValue = -100;
- printk("Config command error %d\n", data[0]);
- break;
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("Buffer size error\n");
- i_ReturnValue = -101;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_InsnReadAnalogInput |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task Read 1 analog input |
-+----------------------------------------------------------------------------+
-| Input Parameters : b_Range = CR_RANGE(insn->chanspec); |
-| b_Channel = CR_CHAN(insn->chanspec); |
-| dw_NbrOfAcquisition = insn->n; |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :>0: No error |
-| -3 : Channel selection error |
-| -4 : Configuration selelection error |
-| -10: Any conversion started |
-| .... |
-| -100 : Config command error |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3XXX_InsnReadAnalogInput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- const struct addi_board *this_board = comedi_board(dev);
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = insn->n;
- unsigned char b_Configuration = (unsigned char) CR_RANGE(insn->chanspec);
- unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
- unsigned int dw_Temp = 0;
- unsigned int dw_Configuration = 0;
- unsigned int dw_AcquisitionCpt = 0;
- unsigned char b_Interrupt = 0;
-
- /*************************************/
- /* Test if operating mode configured */
- /*************************************/
-
- if (devpriv->b_AiInitialisation) {
- /***************************/
- /* Test the channel number */
- /***************************/
-
- if (((b_Channel < devpriv->s_EeParameters.i_NbrAiChannel)
- && (devpriv->b_SingelDiff == APCI3XXX_SINGLE))
- || ((b_Channel < this_board->i_NbrAiChannelDiff)
- && (devpriv->b_SingelDiff == APCI3XXX_DIFF))) {
- /**********************************/
- /* Test the channel configuration */
- /**********************************/
-
- if (b_Configuration > 7) {
- /***************************/
- /* Channel not initialised */
- /***************************/
-
- i_ReturnValue = -4;
- printk("Channel %d range %d selection error\n",
- b_Channel, b_Configuration);
- }
- } else {
- /***************************/
- /* Channel selection error */
- /***************************/
-
- i_ReturnValue = -3;
- printk("Channel %d selection error\n", b_Channel);
- }
-
- /**************************/
- /* Test if no error occur */
- /**************************/
-
- if (i_ReturnValue >= 0) {
- /************************/
- /* Test the buffer size */
- /************************/
-
- if ((b_Interrupt != 0) || ((b_Interrupt == 0)
- && (insn->n >= 1))) {
- /**********************************/
- /* Test if conversion not started */
- /**********************************/
-
- if (i_APCI3XXX_TestConversionStarted(dev) == 0) {
- /******************/
- /* Clear the FIFO */
- /******************/
-
- writel(0x10000UL, devpriv->dw_AiBase + 12);
-
- /*******************************/
- /* Get and save the delay mode */
- /*******************************/
-
- dw_Temp = readl(devpriv->dw_AiBase + 4);
- dw_Temp = dw_Temp & 0xFFFFFEF0UL;
-
- /***********************************/
- /* Channel configuration selection */
- /***********************************/
-
- writel(dw_Temp, devpriv->dw_AiBase + 4);
-
- /**************************/
- /* Make the configuration */
- /**************************/
-
- dw_Configuration =
- (b_Configuration & 3) |
- ((unsigned int) (b_Configuration >> 2)
- << 6) | ((unsigned int) devpriv->
- b_SingelDiff << 7);
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- writel(dw_Configuration,
- devpriv->dw_AiBase + 0);
-
- /*********************/
- /* Channel selection */
- /*********************/
-
- writel(dw_Temp | 0x100UL,
- devpriv->dw_AiBase + 4);
- writel((unsigned int) b_Channel,
- devpriv->dw_AiBase + 0);
-
- /***********************/
- /* Restaure delay mode */
- /***********************/
-
- writel(dw_Temp, devpriv->dw_AiBase + 4);
-
- /***********************************/
- /* Set the number of sequence to 1 */
- /***********************************/
-
- writel(1, devpriv->dw_AiBase + 48);
-
- /***************************/
- /* Save the interrupt flag */
- /***************************/
-
- devpriv->b_EocEosInterrupt =
- b_Interrupt;
-
- /*******************************/
- /* Save the number of channels */
- /*******************************/
-
- devpriv->ui_AiNbrofChannels = 1;
-
- /******************************/
- /* Test if interrupt not used */
- /******************************/
-
- if (b_Interrupt == 0) {
- for (dw_AcquisitionCpt = 0;
- dw_AcquisitionCpt <
- insn->n;
- dw_AcquisitionCpt++) {
- /************************/
- /* Start the conversion */
- /************************/
-
- writel(0x80000UL, devpriv->dw_AiBase + 8);
-
- /****************/
- /* Wait the EOS */
- /****************/
-
- do {
- dw_Temp = readl(devpriv->dw_AiBase + 20);
- dw_Temp = dw_Temp & 1;
- } while (dw_Temp != 1);
-
- /*************************/
- /* Read the analog value */
- /*************************/
-
- data[dw_AcquisitionCpt] = (unsigned int)readl(devpriv->dw_AiBase + 28);
- }
- } else {
- /************************/
- /* Start the conversion */
- /************************/
-
- writel(0x180000UL, devpriv->dw_AiBase + 8);
- }
- } else {
- /**************************/
- /* Any conversion started */
- /**************************/
-
- printk("Any conversion started\n");
- i_ReturnValue = -10;
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("Buffer size error\n");
- i_ReturnValue = -101;
- }
- }
- } else {
- /***************************/
- /* Channel selection error */
- /***************************/
-
- printk("Operating mode not configured\n");
- i_ReturnValue = -1;
- }
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function name : void v_APCI3XXX_Interrupt (int irq, |
-| void *d) |
-+----------------------------------------------------------------------------+
-| Task :Interrupt handler for APCI3XXX |
-| When interrupt occurs this gets called. |
-| First it finds which interrupt has been generated and |
-| handles corresponding interrupt |
-+----------------------------------------------------------------------------+
-| Input Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : - |
-+----------------------------------------------------------------------------+
-*/
-
-static void v_APCI3XXX_Interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- unsigned char b_CopyCpt = 0;
- unsigned int dw_Status = 0;
-
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- dw_Status = readl(devpriv->dw_AiBase + 16);
- if ( (dw_Status & 0x2UL) == 0x2UL) {
- /***********************/
- /* Reset the interrupt */
- /***********************/
-
- writel(dw_Status, devpriv->dw_AiBase + 16);
-
- /*****************************/
- /* Test if interrupt enabled */
- /*****************************/
-
- if (devpriv->b_EocEosInterrupt == 1) {
- /********************************/
- /* Read all analog inputs value */
- /********************************/
-
- for (b_CopyCpt = 0;
- b_CopyCpt < devpriv->ui_AiNbrofChannels;
- b_CopyCpt++) {
- devpriv->ui_AiReadData[b_CopyCpt] =
- (unsigned int)readl(devpriv->dw_AiBase + 28);
- }
-
- /**************************/
- /* Set the interrupt flag */
- /**************************/
-
- devpriv->b_EocEosInterrupt = 2;
-
- /**********************************************/
- /* Send a signal to from kernel to user space */
- /**********************************************/
-
- send_sig(SIGIO, devpriv->tsk_Current, 0);
- }
- }
-}
-
-/*
-+----------------------------------------------------------------------------+
-| ANALOG OUTPUT SUBDEVICE |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_InsnWriteAnalogOutput |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task Read 1 analog input |
-+----------------------------------------------------------------------------+
-| Input Parameters : b_Range = CR_RANGE(insn->chanspec); |
-| b_Channel = CR_CHAN(insn->chanspec); |
-| data[0] = analog value; |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :>0: No error |
-| -3 : Channel selection error |
-| -4 : Configuration selelection error |
-| .... |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3XXX_InsnWriteAnalogOutput(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_Range = (unsigned char) CR_RANGE(insn->chanspec);
- unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
- unsigned int dw_Status = 0;
- int i_ReturnValue = insn->n;
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= 1) {
- /***************************/
- /* Test the channel number */
- /***************************/
-
- if (b_Channel < devpriv->s_EeParameters.i_NbrAoChannel) {
- /**********************************/
- /* Test the channel configuration */
- /**********************************/
-
- if (b_Range < 2) {
- /***************************/
- /* Set the range selection */
- /***************************/
-
- writel(b_Range, devpriv->dw_AiBase + 96);
-
- /**************************************************/
- /* Write the analog value to the selected channel */
- /**************************************************/
-
- writel((data[0] << 8) | b_Channel,
- devpriv->dw_AiBase + 100);
-
- /****************************/
- /* Wait the end of transfer */
- /****************************/
-
- do {
- dw_Status = readl(devpriv->dw_AiBase + 96);
- } while ((dw_Status & 0x100) != 0x100);
- } else {
- /***************************/
- /* Channel not initialised */
- /***************************/
-
- i_ReturnValue = -4;
- printk("Channel %d range %d selection error\n",
- b_Channel, b_Range);
- }
- } else {
- /***************************/
- /* Channel selection error */
- /***************************/
-
- i_ReturnValue = -3;
- printk("Channel %d selection error\n", b_Channel);
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("Buffer size error\n");
- i_ReturnValue = -101;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| TTL FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_InsnConfigInitTTLIO |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task You must calling this function be |
-| for you call any other function witch access of TTL. |
-| APCI3XXX_TTL_INIT_DIRECTION_PORT2(user inputs for direction)|
-+----------------------------------------------------------------------------+
-| Input Parameters : b_InitType = (unsigned char) data[0]; |
-| b_Port2Mode = (unsigned char) data[1]; |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :>0: No error |
-| -1: Port 2 mode selection is wrong |
-| .... |
-| -100 : Config command error |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3XXX_InsnConfigInitTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = insn->n;
- unsigned char b_Command = 0;
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= 1) {
- /*******************/
- /* Get the command */
- /* **************** */
-
- b_Command = (unsigned char) data[0];
-
- /********************/
- /* Test the command */
- /********************/
-
- if (b_Command == APCI3XXX_TTL_INIT_DIRECTION_PORT2) {
- /***************************************/
- /* Test the initialisation buffer size */
- /***************************************/
-
- if ((b_Command == APCI3XXX_TTL_INIT_DIRECTION_PORT2)
- && (insn->n != 2)) {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("Buffer size error\n");
- i_ReturnValue = -101;
- }
- } else {
- /************************/
- /* Config command error */
- /************************/
-
- printk("Command selection error\n");
- i_ReturnValue = -100;
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("Buffer size error\n");
- i_ReturnValue = -101;
- }
-
- /*********************************************************************************/
- /* Test if no error occur and APCI3XXX_TTL_INIT_DIRECTION_PORT2 command selected */
- /*********************************************************************************/
-
- if ((i_ReturnValue >= 0)
- && (b_Command == APCI3XXX_TTL_INIT_DIRECTION_PORT2)) {
- /**********************/
- /* Test the direction */
- /**********************/
-
- if ((data[1] == 0) || (data[1] == 0xFF)) {
- /**************************/
- /* Save the configuration */
- /**************************/
-
- devpriv->ul_TTLPortConfiguration[0] =
- devpriv->ul_TTLPortConfiguration[0] | data[1];
- } else {
- /************************/
- /* Port direction error */
- /************************/
-
- printk("Port 2 direction selection error\n");
- i_ReturnValue = -1;
- }
- }
-
- /**************************/
- /* Test if no error occur */
- /**************************/
-
- if (i_ReturnValue >= 0) {
- /***********************************/
- /* Test if TTL port initilaisation */
- /***********************************/
-
- if (b_Command == APCI3XXX_TTL_INIT_DIRECTION_PORT2) {
- /*************************/
- /* Set the configuration */
- /*************************/
-
- outl(data[1], devpriv->iobase + 224);
- }
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| TTL INPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_InsnBitsTTLIO |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Write the selected output mask and read the status from|
-| all TTL channles |
-+----------------------------------------------------------------------------+
-| Input Parameters : dw_ChannelMask = data [0]; |
-| dw_BitMask = data [1]; |
-+----------------------------------------------------------------------------+
-| Output Parameters : data[1] : All TTL channles states |
-+----------------------------------------------------------------------------+
-| Return Value : >0 : No error |
-| -4 : Channel mask error |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3XXX_InsnBitsTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = insn->n;
- unsigned char b_ChannelCpt = 0;
- unsigned int dw_ChannelMask = 0;
- unsigned int dw_BitMask = 0;
- unsigned int dw_Status = 0;
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= 2) {
- /*******************************/
- /* Get the channe and bit mask */
- /*******************************/
-
- dw_ChannelMask = data[0];
- dw_BitMask = data[1];
-
- /*************************/
- /* Test the channel mask */
- /*************************/
-
- if (((dw_ChannelMask & 0XFF00FF00) == 0) &&
- (((devpriv->ul_TTLPortConfiguration[0] & 0xFF) == 0xFF)
- || (((devpriv->ul_TTLPortConfiguration[0] &
- 0xFF) == 0)
- && ((dw_ChannelMask & 0XFF0000) ==
- 0)))) {
- /*********************************/
- /* Test if set/reset any channel */
- /*********************************/
-
- if (dw_ChannelMask) {
- /****************************************/
- /* Test if set/rest any port 0 channels */
- /****************************************/
-
- if (dw_ChannelMask & 0xFF) {
- /*******************************************/
- /* Read port 0 (first digital output port) */
- /*******************************************/
-
- dw_Status = inl(devpriv->iobase + 80);
-
- for (b_ChannelCpt = 0; b_ChannelCpt < 8;
- b_ChannelCpt++) {
- if ((dw_ChannelMask >>
- b_ChannelCpt) &
- 1) {
- dw_Status =
- (dw_Status &
- (0xFF - (1 << b_ChannelCpt))) | (dw_BitMask & (1 << b_ChannelCpt));
- }
- }
-
- outl(dw_Status, devpriv->iobase + 80);
- }
-
- /****************************************/
- /* Test if set/rest any port 2 channels */
- /****************************************/
-
- if (dw_ChannelMask & 0xFF0000) {
- dw_BitMask = dw_BitMask >> 16;
- dw_ChannelMask = dw_ChannelMask >> 16;
-
- /********************************************/
- /* Read port 2 (second digital output port) */
- /********************************************/
-
- dw_Status = inl(devpriv->iobase + 112);
-
- for (b_ChannelCpt = 0; b_ChannelCpt < 8;
- b_ChannelCpt++) {
- if ((dw_ChannelMask >>
- b_ChannelCpt) &
- 1) {
- dw_Status =
- (dw_Status &
- (0xFF - (1 << b_ChannelCpt))) | (dw_BitMask & (1 << b_ChannelCpt));
- }
- }
-
- outl(dw_Status, devpriv->iobase + 112);
- }
- }
-
- /*******************************************/
- /* Read port 0 (first digital output port) */
- /*******************************************/
-
- data[1] = inl(devpriv->iobase + 80);
-
- /******************************************/
- /* Read port 1 (first digital input port) */
- /******************************************/
-
- data[1] = data[1] | (inl(devpriv->iobase + 64) << 8);
-
- /************************/
- /* Test if port 2 input */
- /************************/
-
- if ((devpriv->ul_TTLPortConfiguration[0] & 0xFF) == 0) {
- data[1] =
- data[1] | (inl(devpriv->iobase +
- 96) << 16);
- } else {
- data[1] =
- data[1] | (inl(devpriv->iobase +
- 112) << 16);
- }
- } else {
- /************************/
- /* Config command error */
- /************************/
-
- printk("Channel mask error\n");
- i_ReturnValue = -4;
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("Buffer size error\n");
- i_ReturnValue = -101;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_InsnReadTTLIO |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read the status from selected channel |
-+----------------------------------------------------------------------------+
-| Input Parameters : b_Channel = CR_CHAN(insn->chanspec) |
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0] : Selected TTL channel state |
-+----------------------------------------------------------------------------+
-| Return Value : 0 : No error |
-| -3 : Channel selection error |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3XXX_InsnReadTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
- int i_ReturnValue = insn->n;
- unsigned int *pls_ReadData = data;
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= 1) {
- /***********************/
- /* Test if read port 0 */
- /***********************/
-
- if (b_Channel < 8) {
- /*******************************************/
- /* Read port 0 (first digital output port) */
- /*******************************************/
-
- pls_ReadData[0] = inl(devpriv->iobase + 80);
- pls_ReadData[0] = (pls_ReadData[0] >> b_Channel) & 1;
- } else {
- /***********************/
- /* Test if read port 1 */
- /***********************/
-
- if ((b_Channel > 7) && (b_Channel < 16)) {
- /******************************************/
- /* Read port 1 (first digital input port) */
- /******************************************/
-
- pls_ReadData[0] = inl(devpriv->iobase + 64);
- pls_ReadData[0] =
- (pls_ReadData[0] >> (b_Channel -
- 8)) & 1;
- } else {
- /***********************/
- /* Test if read port 2 */
- /***********************/
-
- if ((b_Channel > 15) && (b_Channel < 24)) {
- /************************/
- /* Test if port 2 input */
- /************************/
-
- if ((devpriv->ul_TTLPortConfiguration[0]
- & 0xFF) == 0) {
- pls_ReadData[0] =
- inl(devpriv->iobase +
- 96);
- pls_ReadData[0] =
- (pls_ReadData[0] >>
- (b_Channel - 16)) & 1;
- } else {
- pls_ReadData[0] =
- inl(devpriv->iobase +
- 112);
- pls_ReadData[0] =
- (pls_ReadData[0] >>
- (b_Channel - 16)) & 1;
- }
- } else {
- /***************************/
- /* Channel selection error */
- /***************************/
-
- i_ReturnValue = -3;
- printk("Channel %d selection error\n",
- b_Channel);
- }
- }
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("Buffer size error\n");
- i_ReturnValue = -101;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| TTL OUTPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_InsnWriteTTLIO |
-| (struct comedi_device *dev, |
-| struct comedi_subdevice *s, |
-| struct comedi_insn *insn, |
-| unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Set the state from TTL output channel |
-+----------------------------------------------------------------------------+
-| Input Parameters : b_Channel = CR_CHAN(insn->chanspec) |
-| b_State = data [0] |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0 : No error |
-| -3 : Channel selection error |
-| -101 : Data size error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI3XXX_InsnWriteTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = insn->n;
- unsigned char b_Channel = (unsigned char) CR_CHAN(insn->chanspec);
- unsigned char b_State = 0;
- unsigned int dw_Status = 0;
-
- /************************/
- /* Test the buffer size */
- /************************/
-
- if (insn->n >= 1) {
- b_State = (unsigned char) data[0];
-
- /***********************/
- /* Test if read port 0 */
- /***********************/
-
- if (b_Channel < 8) {
- /*****************************************************************************/
- /* Read port 0 (first digital output port) and set/reset the selected channel */
- /*****************************************************************************/
-
- dw_Status = inl(devpriv->iobase + 80);
- dw_Status =
- (dw_Status & (0xFF -
- (1 << b_Channel))) | ((b_State & 1) <<
- b_Channel);
- outl(dw_Status, devpriv->iobase + 80);
- } else {
- /***********************/
- /* Test if read port 2 */
- /***********************/
-
- if ((b_Channel > 15) && (b_Channel < 24)) {
- /*************************/
- /* Test if port 2 output */
- /*************************/
-
- if ((devpriv->ul_TTLPortConfiguration[0] & 0xFF)
- == 0xFF) {
- /*****************************************************************************/
- /* Read port 2 (first digital output port) and set/reset the selected channel */
- /*****************************************************************************/
-
- dw_Status = inl(devpriv->iobase + 112);
- dw_Status =
- (dw_Status & (0xFF -
- (1 << (b_Channel -
- 16)))) |
- ((b_State & 1) << (b_Channel -
- 16));
- outl(dw_Status, devpriv->iobase + 112);
- } else {
- /***************************/
- /* Channel selection error */
- /***************************/
-
- i_ReturnValue = -3;
- printk("Channel %d selection error\n",
- b_Channel);
- }
- } else {
- /***************************/
- /* Channel selection error */
- /***************************/
-
- i_ReturnValue = -3;
- printk("Channel %d selection error\n",
- b_Channel);
- }
- }
- } else {
- /*******************/
- /* Data size error */
- /*******************/
-
- printk("Buffer size error\n");
- i_ReturnValue = -101;
- }
-
- return i_ReturnValue;
-}
-
-static int apci3xxx_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[1] = inl(devpriv->iobase + 32) & 0xf;
-
- return insn->n;
-}
-
-static int apci3xxx_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int mask = data[0];
- unsigned int bits = data[1];
-
- s->state = inl(devpriv->iobase + 48) & 0xf;
- if (mask) {
- s->state &= ~mask;
- s->state |= (bits & mask);
-
- outl(s->state, devpriv->iobase + 48);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI3XXX_Reset(struct comedi_device *dev) | +----------------------------------------------------------------------------+
-| Task :resets all the registers |
-+----------------------------------------------------------------------------+
-| Input Parameters : struct comedi_device *dev |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : - |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI3XXX_Reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_Cpt = 0;
-
- /*************************/
- /* Disable the interrupt */
- /*************************/
-
- disable_irq(dev->irq);
-
- /****************************/
- /* Reset the interrupt flag */
- /****************************/
-
- devpriv->b_EocEosInterrupt = 0;
-
- /***************************/
- /* Clear the start command */
- /***************************/
-
- writel(0, devpriv->dw_AiBase + 8);
-
- /*****************************/
- /* Reset the interrupt flags */
- /*****************************/
-
- writel(readl(devpriv->dw_AiBase + 16), devpriv->dw_AiBase + 16);
-
- /*****************/
- /* clear the EOS */
- /*****************/
-
- readl(devpriv->dw_AiBase + 20);
-
- /******************/
- /* Clear the FIFO */
- /******************/
-
- for (b_Cpt = 0; b_Cpt < 16; b_Cpt++) {
- readl(devpriv->dw_AiBase + 28);
- }
-
- /************************/
- /* Enable the interrupt */
- /************************/
-
- enable_irq(dev->irq);
-
- return 0;
-}
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
index 43c2c10..8d229b2 100644
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ b/drivers/staging/comedi/drivers/addi_apci_035.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 3d4878f..34ab067 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -20,15 +20,9 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * You should also find the complete GPL in the COPYING file accompanying this
- * source code.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -296,10 +290,9 @@ static int apci1032_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index b52cfe0..ae9ded6 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index ed01c56..08674c1 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -20,15 +20,9 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * You should also find the complete GPL in the COPYING file accompanying
- * this source code.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -143,10 +137,9 @@ static int apci1516_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
@@ -203,7 +196,6 @@ static void apci1516_detach(struct comedi_device *dev)
{
if (dev->iobase)
apci1516_reset(dev);
- comedi_spriv_free(dev, 2);
comedi_pci_disable(dev);
}
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 22bace6..c5717d6 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index 4c6a9b5..9652374 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -20,15 +20,9 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * You should also find the complete GPL in the COPYING file accompanying
- * this source code.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -66,36 +60,22 @@ static int apci16xx_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
- unsigned int bits;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
- /*
- * Each 8-bit "port" is configurable as either input or
- * output. Changing the configuration of any channel in
- * a port changes the entire port.
- */
- if (chan_mask & 0x000000ff)
- bits = 0x000000ff;
- else if (chan_mask & 0x0000ff00)
- bits = 0x0000ff00;
- else if (chan_mask & 0x00ff0000)
- bits = 0x00ff0000;
+ if (chan < 8)
+ mask = 0x000000ff;
+ else if (chan < 16)
+ mask = 0x0000ff00;
+ else if (chan < 24)
+ mask = 0x00ff0000;
else
- bits = 0xff000000;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_INPUT : COMEDI_OUTPUT;
- return insn->n;
- default:
- return -EINVAL;
- }
+ mask = 0xff000000;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
outl(s->io_bits, dev->iobase + APCI16XX_DIR_REG(s->index));
diff --git a/drivers/staging/comedi/drivers/addi_apci_1710.c b/drivers/staging/comedi/drivers/addi_apci_1710.c
deleted file mode 100644
index c9e6471..0000000
--- a/drivers/staging/comedi/drivers/addi_apci_1710.c
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <linux/pci.h>
-
-#include <asm/i387.h>
-
-#include "../comedidev.h"
-#include "comedi_fc.h"
-#include "amcc_s5933.h"
-
-#include "addi-data/addi_common.h"
-
-static void fpu_begin(void)
-{
- kernel_fpu_begin();
-}
-
-static void fpu_end(void)
-{
- kernel_fpu_end();
-}
-
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_APCI1710.c"
-
-static irqreturn_t v_ADDI_Interrupt(int irq, void *d)
-{
- v_APCI1710_Interrupt(irq, d);
- return IRQ_RETVAL(1);
-}
-
-static int apci1710_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct addi_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
- if (!devpriv)
- return -ENOMEM;
- dev->private = devpriv;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- devpriv->s_BoardInfos.ui_Address = pci_resource_start(pcidev, 2);
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, v_ADDI_Interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- i_ADDI_AttachPCI1710(dev);
-
- i_APCI1710_Reset(dev);
- return 0;
-}
-
-static void apci1710_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- i_APCI1710_Reset(dev);
- if (dev->irq)
- free_irq(dev->irq, dev);
- comedi_pci_disable(dev);
-}
-
-static struct comedi_driver apci1710_driver = {
- .driver_name = "addi_apci_1710",
- .module = THIS_MODULE,
- .auto_attach = apci1710_auto_attach,
- .detach = apci1710_detach,
-};
-
-static int apci1710_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci1710_driver, id->driver_data);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(apci1710_pci_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMCC, APCI1710_BOARD_DEVICE_ID) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci1710_pci_table);
-
-static struct pci_driver apci1710_pci_driver = {
- .name = "addi_apci_1710",
- .id_table = apci1710_pci_table,
- .probe = apci1710_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci1710_driver, apci1710_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index b666637..6b0ea16 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -20,17 +20,12 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * You should also find the complete GPL in the COPYING file accompanying
- * this source code.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include "../comedidev.h"
#include "addi_watchdog.h"
@@ -354,7 +349,6 @@ static void apci2032_detach(struct comedi_device *dev)
free_irq(dev->irq, dev);
if (dev->read_subdev)
kfree(dev->read_subdev->private);
- comedi_spriv_free(dev, 1);
comedi_pci_disable(dev);
}
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index 1cdc08d..92ac8ec 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -20,15 +20,9 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * You should also find the complete GPL in the COPYING file accompanying
- * this source code.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -130,7 +124,6 @@ static void apci2200_detach(struct comedi_device *dev)
{
if (dev->iobase)
apci2200_reset(dev);
- comedi_spriv_free(dev, 2);
comedi_pci_disable(dev);
}
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index 317a26d..d804957 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -65,10 +66,9 @@ static int apci3120_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->pc_DriverName;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
@@ -103,8 +103,6 @@ static int apci3120_auto_attach(struct comedi_device *dev,
if (devpriv->ul_DmaBufferVirtual[i]) {
devpriv->ui_DmaBufferPages[i] = pages;
devpriv->ui_DmaBufferSize[i] = PAGE_SIZE * pages;
- devpriv->ui_DmaBufferSamples[i] =
- devpriv->ui_DmaBufferSize[i] >> 1;
devpriv->ul_DmaBufferHw[i] =
virt_to_bus((void *)devpriv->
ul_DmaBufferVirtual[i]);
@@ -138,9 +136,6 @@ static int apci3120_auto_attach(struct comedi_device *dev,
s->len_chanlist = this_board->i_AiChannelList;
s->range_table = &range_apci3120_ai;
- /* Set the initialisation flag */
- devpriv->b_AiInitialisation = 1;
-
s->insn_config = i_APCI3120_InsnConfigAnalogInput;
s->insn_read = i_APCI3120_InsnReadAnalogInput;
s->do_cmdtest = i_APCI3120_CommandTestAnalogInput;
diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c
index 17b540d..1213d5a 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3200.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include <asm/i387.h>
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index a0cf6ec..d9650ff 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -20,15 +20,9 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * You should also find the complete GPL in the COPYING file accompanying
- * this source code.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -339,10 +333,9 @@ static int apci3501_auto_attach(struct comedi_device *dev,
int ao_n_chan;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index ec4d6ca..cf5dd10 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -1,14 +1,58 @@
+/*
+ * addi_apci_3xxx.c
+ * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
+ * Project manager: S. Weber
+ *
+ * ADDI-DATA GmbH
+ * Dieselstrasse 3
+ * D-77833 Ottersweier
+ * Tel: +19(0)7223/9493-0
+ * Fax: +49(0)7223/9493-92
+ * http://www.addi-data.com
+ * info@addi-data.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/interrupt.h>
#include "../comedidev.h"
+
#include "comedi_fc.h"
-#include "amcc_s5933.h"
-#include "addi-data/addi_common.h"
+#define CONV_UNIT_NS (1 << 0)
+#define CONV_UNIT_US (1 << 1)
+#define CONV_UNIT_MS (1 << 2)
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_apci3xxx.c"
-#include "addi-data/addi_common.c"
+static const struct comedi_lrange apci3xxx_ai_range = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2),
+ BIP_RANGE(1),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2),
+ UNI_RANGE(1)
+ }
+};
+
+static const struct comedi_lrange apci3xxx_ao_range = {
+ 2, {
+ BIP_RANGE(10),
+ UNI_RANGE(10)
+ }
+};
enum apci3xxx_boardid {
BOARD_APCI3000_16,
@@ -38,651 +82,842 @@ enum apci3xxx_boardid {
BOARD_APCI3500,
};
-static const struct addi_board apci3xxx_boardtypes[] = {
+struct apci3xxx_boardinfo {
+ const char *name;
+ int ai_subdev_flags;
+ int ai_n_chan;
+ unsigned int ai_maxdata;
+ unsigned char ai_conv_units;
+ unsigned int ai_min_acq_ns;
+ unsigned int has_ao:1;
+ unsigned int has_dig_in:1;
+ unsigned int has_dig_out:1;
+ unsigned int has_ttl_io:1;
+};
+
+static const struct apci3xxx_boardinfo apci3xxx_boardtypes[] = {
[BOARD_APCI3000_16] = {
- .pc_DriverName = "apci3000-16",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3000-16",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 16,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ttl_io = 1,
},
[BOARD_APCI3000_8] = {
- .pc_DriverName = "apci3000-8",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 8,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 8,
- .i_AiMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3000-8",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 8,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ttl_io = 1,
},
[BOARD_APCI3000_4] = {
- .pc_DriverName = "apci3000-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 4,
- .i_NbrAiChannelDiff = 2,
- .i_AiChannelList = 4,
- .i_AiMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3000-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 4,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ttl_io = 1,
},
[BOARD_APCI3006_16] = {
- .pc_DriverName = "apci3006-16",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3006-16",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 16,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ttl_io = 1,
},
[BOARD_APCI3006_8] = {
- .pc_DriverName = "apci3006-8",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 8,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 8,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3006-8",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 8,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ttl_io = 1,
},
[BOARD_APCI3006_4] = {
- .pc_DriverName = "apci3006-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 4,
- .i_NbrAiChannelDiff = 2,
- .i_AiChannelList = 4,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3006-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 4,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ttl_io = 1,
},
[BOARD_APCI3010_16] = {
- .pc_DriverName = "apci3010-16",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3010-16",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 16,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3010_8] = {
- .pc_DriverName = "apci3010-8",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 8,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 8,
- .i_AiMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3010-8",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 8,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3010_4] = {
- .pc_DriverName = "apci3010-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 4,
- .i_NbrAiChannelDiff = 2,
- .i_AiChannelList = 4,
- .i_AiMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3010-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 4,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3016_16] = {
- .pc_DriverName = "apci3016-16",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3016-16",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 16,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3016_8] = {
- .pc_DriverName = "apci3016-8",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 8,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 8,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3016-8",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 8,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3016_4] = {
- .pc_DriverName = "apci3016-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 4,
- .i_NbrAiChannelDiff = 2,
- .i_AiChannelList = 4,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3016-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 4,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3100_16_4] = {
- .pc_DriverName = "apci3100-16-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_NbrAoChannel = 4,
- .i_AiMaxdata = 4095,
- .i_AoMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .pr_AoRangelist = &range_apci3XXX_ao,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3100-16-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 16,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ao = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3100_8_4] = {
- .pc_DriverName = "apci3100-8-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 8,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 8,
- .i_NbrAoChannel = 4,
- .i_AiMaxdata = 4095,
- .i_AoMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .pr_AoRangelist = &range_apci3XXX_ao,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3100-8-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 8,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ao = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3106_16_4] = {
- .pc_DriverName = "apci3106-16-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_NbrAoChannel = 4,
- .i_AiMaxdata = 65535,
- .i_AoMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .pr_AoRangelist = &range_apci3XXX_ao,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3106-16-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 16,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ao = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3106_8_4] = {
- .pc_DriverName = "apci3106-8-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 8,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 8,
- .i_NbrAoChannel = 4,
- .i_AiMaxdata = 65535,
- .i_AoMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .pr_AoRangelist = &range_apci3XXX_ao,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 10000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3106-8-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 8,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 10000,
+ .has_ao = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3110_16_4] = {
- .pc_DriverName = "apci3110-16-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_NbrAoChannel = 4,
- .i_AiMaxdata = 4095,
- .i_AoMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .pr_AoRangelist = &range_apci3XXX_ao,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3110-16-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 16,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_ao = 1,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3110_8_4] = {
- .pc_DriverName = "apci3110-8-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 8,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 8,
- .i_NbrAoChannel = 4,
- .i_AiMaxdata = 4095,
- .i_AoMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .pr_AoRangelist = &range_apci3XXX_ao,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3110-8-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 8,
+ .ai_maxdata = 0x0fff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_ao = 1,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3116_16_4] = {
- .pc_DriverName = "apci3116-16-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 16,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 16,
- .i_NbrAoChannel = 4,
- .i_AiMaxdata = 65535,
- .i_AoMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .pr_AoRangelist = &range_apci3XXX_ao,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3116-16-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 16,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_ao = 1,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3116_8_4] = {
- .pc_DriverName = "apci3116-8-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannel = 8,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 8,
- .i_NbrAoChannel = 4,
- .i_AiMaxdata = 65535,
- .i_AoMaxdata = 4095,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .pr_AoRangelist = &range_apci3XXX_ao,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .i_NbrTTLChannel = 24,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3116-8-4",
+ .ai_subdev_flags = SDF_COMMON | SDF_GROUND | SDF_DIFF,
+ .ai_n_chan = 8,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_ao = 1,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
+ .has_ttl_io = 1,
},
[BOARD_APCI3003] = {
- .pc_DriverName = "apci3003",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 4,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .b_AvailableConvertUnit = 7,
- .ui_MinAcquisitiontimeNs = 2500,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
+ .name = "apci3003",
+ .ai_subdev_flags = SDF_DIFF,
+ .ai_n_chan = 4,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US |
+ CONV_UNIT_NS,
+ .ai_min_acq_ns = 2500,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
},
[BOARD_APCI3002_16] = {
- .pc_DriverName = "apci3002-16",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannelDiff = 16,
- .i_AiChannelList = 16,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
+ .name = "apci3002-16",
+ .ai_subdev_flags = SDF_DIFF,
+ .ai_n_chan = 16,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
},
[BOARD_APCI3002_8] = {
- .pc_DriverName = "apci3002-8",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannelDiff = 8,
- .i_AiChannelList = 8,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
+ .name = "apci3002-8",
+ .ai_subdev_flags = SDF_DIFF,
+ .ai_n_chan = 8,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
},
[BOARD_APCI3002_4] = {
- .pc_DriverName = "apci3002-4",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAiChannelDiff = 4,
- .i_AiChannelList = 4,
- .i_AiMaxdata = 65535,
- .pr_AiRangelist = &range_apci3XXX_ai,
- .i_NbrDiChannel = 4,
- .i_NbrDoChannel = 4,
- .i_DoMaxdata = 1,
- .b_AvailableConvertUnit = 6,
- .ui_MinAcquisitiontimeNs = 5000,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ai_config = i_APCI3XXX_InsnConfigAnalogInput,
- .ai_read = i_APCI3XXX_InsnReadAnalogInput,
- .di_bits = apci3xxx_di_insn_bits,
- .do_bits = apci3xxx_do_insn_bits,
+ .name = "apci3002-4",
+ .ai_subdev_flags = SDF_DIFF,
+ .ai_n_chan = 4,
+ .ai_maxdata = 0xffff,
+ .ai_conv_units = CONV_UNIT_MS | CONV_UNIT_US,
+ .ai_min_acq_ns = 5000,
+ .has_dig_in = 1,
+ .has_dig_out = 1,
},
[BOARD_APCI3500] = {
- .pc_DriverName = "apci3500",
- .i_IorangeBase1 = 256,
- .i_PCIEeprom = ADDIDATA_NO_EEPROM,
- .pc_EepromChip = ADDIDATA_9054,
- .i_NbrAoChannel = 4,
- .i_AoMaxdata = 4095,
- .pr_AoRangelist = &range_apci3XXX_ao,
- .i_NbrTTLChannel = 24,
- .interrupt = v_APCI3XXX_Interrupt,
- .reset = i_APCI3XXX_Reset,
- .ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
- .ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
- .ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
- .ttl_read = i_APCI3XXX_InsnReadTTLIO,
- .ttl_write = i_APCI3XXX_InsnWriteTTLIO,
+ .name = "apci3500",
+ .has_ao = 1,
+ .has_ttl_io = 1,
},
};
+struct apci3xxx_private {
+ void __iomem *mmio;
+ unsigned int ai_timer;
+ unsigned char ai_time_base;
+};
+
+static irqreturn_t apci3xxx_irq_handler(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+ struct apci3xxx_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ unsigned int status;
+ unsigned int val;
+
+ /* Test if interrupt occur */
+ status = readl(devpriv->mmio + 16);
+ if ((status & 0x2) == 0x2) {
+ /* Reset the interrupt */
+ writel(status, devpriv->mmio + 16);
+
+ val = readl(devpriv->mmio + 28);
+ comedi_buf_put(s->async, val);
+
+ s->async->events |= COMEDI_CB_EOA;
+ comedi_event(dev, s);
+
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int apci3xxx_ai_started(struct comedi_device *dev)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+
+ if ((readl(devpriv->mmio + 8) & 0x80000) == 0x80000)
+ return 1;
+ else
+ return 0;
+
+}
+
+static int apci3xxx_ai_setup(struct comedi_device *dev, unsigned int chanspec)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+ unsigned int delay_mode;
+ unsigned int val;
+
+ if (apci3xxx_ai_started(dev))
+ return -EBUSY;
+
+ /* Clear the FIFO */
+ writel(0x10000, devpriv->mmio + 12);
+
+ /* Get and save the delay mode */
+ delay_mode = readl(devpriv->mmio + 4);
+ delay_mode &= 0xfffffef0;
+
+ /* Channel configuration selection */
+ writel(delay_mode, devpriv->mmio + 4);
+
+ /* Make the configuration */
+ val = (range & 3) | ((range >> 2) << 6) |
+ ((aref == AREF_DIFF) << 7);
+ writel(val, devpriv->mmio + 0);
+
+ /* Channel selection */
+ writel(delay_mode | 0x100, devpriv->mmio + 4);
+ writel(chan, devpriv->mmio + 0);
+
+ /* Restore delay mode */
+ writel(delay_mode, devpriv->mmio + 4);
+
+ /* Set the number of sequence to 1 */
+ writel(1, devpriv->mmio + 48);
+
+ return 0;
+}
+
+static int apci3xxx_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+ unsigned int val;
+ int ret;
+ int i;
+
+ ret = apci3xxx_ai_setup(dev, insn->chanspec);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < insn->n; i++) {
+ /* Start the conversion */
+ writel(0x80000, devpriv->mmio + 8);
+
+ /* Wait the EOS */
+ do {
+ val = readl(devpriv->mmio + 20);
+ val &= 0x1;
+ } while (!val);
+
+ /* Read the analog value */
+ data[i] = readl(devpriv->mmio + 28);
+ }
+
+ return insn->n;
+}
+
+static int apci3xxx_ai_ns_to_timer(struct comedi_device *dev,
+ unsigned int *ns, int round_mode)
+{
+ const struct apci3xxx_boardinfo *board = comedi_board(dev);
+ struct apci3xxx_private *devpriv = dev->private;
+ unsigned int base;
+ unsigned int timer;
+ int time_base;
+
+ /* time_base: 0 = ns, 1 = us, 2 = ms */
+ for (time_base = 0; time_base < 3; time_base++) {
+ /* skip unsupported time bases */
+ if (!(board->ai_conv_units & (1 << time_base)))
+ continue;
+
+ switch (time_base) {
+ case 0:
+ base = 1;
+ break;
+ case 1:
+ base = 1000;
+ break;
+ case 2:
+ base = 1000000;
+ break;
+ }
+
+ switch (round_mode) {
+ case TRIG_ROUND_NEAREST:
+ default:
+ timer = (*ns + base / 2) / base;
+ break;
+ case TRIG_ROUND_DOWN:
+ timer = *ns / base;
+ break;
+ case TRIG_ROUND_UP:
+ timer = (*ns + base - 1) / base;
+ break;
+ }
+
+ if (timer < 0x10000) {
+ devpriv->ai_time_base = time_base;
+ devpriv->ai_timer = timer;
+ *ns = timer * time_base;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int apci3xxx_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ const struct apci3xxx_boardinfo *board = comedi_board(dev);
+ int err = 0;
+ unsigned int tmp;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
+ err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
+ err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER);
+ err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
+ board->ai_min_acq_ns);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
+
+ if (cmd->stop_src == TRIG_COUNT)
+ err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+ else /* TRIG_NONE */
+ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* step 4: fix up any arguments */
+
+ /*
+ * FIXME: The hardware supports multiple scan modes but the original
+ * addi-data driver only supported reading a single channel with
+ * interrupts. Need a proper datasheet to fix this.
+ *
+ * The following scan modes are supported by the hardware:
+ * 1) Single software scan
+ * 2) Single hardware triggered scan
+ * 3) Continuous software scan
+ * 4) Continuous software scan with timer delay
+ * 5) Continuous hardware triggered scan
+ * 6) Continuous hardware triggered scan with timer delay
+ *
+ * For now, limit the chanlist to a single channel.
+ */
+ if (cmd->chanlist_len > 1) {
+ cmd->chanlist_len = 1;
+ err |= -EINVAL;
+ }
+
+ tmp = cmd->convert_arg;
+ err |= apci3xxx_ai_ns_to_timer(dev, &cmd->convert_arg,
+ cmd->flags & TRIG_ROUND_MASK);
+ if (tmp != cmd->convert_arg)
+ err |= -EINVAL;
+
+ if (err)
+ return 4;
+
+ return 0;
+}
+
+static int apci3xxx_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ int ret;
+
+ ret = apci3xxx_ai_setup(dev, cmd->chanlist[0]);
+ if (ret)
+ return ret;
+
+ /* Set the convert timing unit */
+ writel(devpriv->ai_time_base, devpriv->mmio + 36);
+
+ /* Set the convert timing */
+ writel(devpriv->ai_timer, devpriv->mmio + 32);
+
+ /* Start the conversion */
+ writel(0x180000, devpriv->mmio + 8);
+
+ return 0;
+}
+
+static int apci3xxx_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ return 0;
+}
+
+static int apci3xxx_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int status;
+ int i;
+
+ for (i = 0; i < insn->n; i++) {
+ /* Set the range selection */
+ writel(range, devpriv->mmio + 96);
+
+ /* Write the analog value to the selected channel */
+ writel((data[i] << 8) | chan, devpriv->mmio + 100);
+
+ /* Wait the end of transfer */
+ do {
+ status = readl(devpriv->mmio + 96);
+ } while ((status & 0x100) != 0x100);
+ }
+
+ return insn->n;
+}
+
+static int apci3xxx_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ data[1] = inl(dev->iobase + 32) & 0xf;
+
+ return insn->n;
+}
+
+static int apci3xxx_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+
+ s->state = inl(dev->iobase + 48) & 0xf;
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+
+ outl(s->state, dev->iobase + 48);
+ }
+
+ data[1] = s->state;
+
+ return insn->n;
+}
+
+static int apci3xxx_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
+
+ /*
+ * Port 0 (channels 0-7) are always inputs
+ * Port 1 (channels 8-15) are always outputs
+ * Port 2 (channels 16-23) are programmable i/o
+ */
+ if (chan < 16) {
+ if (data[0] != INSN_CONFIG_DIO_QUERY)
+ return -EINVAL;
+ } else {
+ /* changing any channel in port 2 changes the entire port */
+ mask = 0xff0000;
+ }
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ /* update port 2 configuration */
+ outl((s->io_bits >> 24) & 0xff, dev->iobase + 224);
+
+ return insn->n;
+}
+
+static int apci3xxx_dio_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+ unsigned int val;
+
+ /* only update output channels */
+ mask &= s->io_bits;
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+
+ if (mask & 0xff)
+ outl(s->state & 0xff, dev->iobase + 80);
+ if (mask & 0xff0000)
+ outl((s->state >> 16) & 0xff, dev->iobase + 112);
+ }
+
+ val = inl(dev->iobase + 80);
+ val |= (inl(dev->iobase + 64) << 8);
+ if (s->io_bits & 0xff0000)
+ val |= (inl(dev->iobase + 112) << 16);
+ else
+ val |= (inl(dev->iobase + 96) << 16);
+
+ data[1] = val;
+
+ return insn->n;
+}
+
+static int apci3xxx_reset(struct comedi_device *dev)
+{
+ struct apci3xxx_private *devpriv = dev->private;
+ unsigned int val;
+ int i;
+
+ /* Disable the interrupt */
+ disable_irq(dev->irq);
+
+ /* Clear the start command */
+ writel(0, devpriv->mmio + 8);
+
+ /* Reset the interrupt flags */
+ val = readl(devpriv->mmio + 16);
+ writel(val, devpriv->mmio + 16);
+
+ /* clear the EOS */
+ readl(devpriv->mmio + 20);
+
+ /* Clear the FIFO */
+ for (i = 0; i < 16; i++)
+ val = readl(devpriv->mmio + 28);
+
+ /* Enable the interrupt */
+ enable_irq(dev->irq);
+
+ return 0;
+}
+
static int apci3xxx_auto_attach(struct comedi_device *dev,
unsigned long context)
{
- const struct addi_board *board = NULL;
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ const struct apci3xxx_boardinfo *board = NULL;
+ struct apci3xxx_private *devpriv;
+ struct comedi_subdevice *s;
+ int n_subdevices;
+ int subdev;
+ int ret;
if (context < ARRAY_SIZE(apci3xxx_boardtypes))
board = &apci3xxx_boardtypes[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
+ dev->board_name = board->name;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ dev->iobase = pci_resource_start(pcidev, 2);
+ devpriv->mmio = pci_ioremap_bar(pcidev, 3);
+
+ if (pcidev->irq > 0) {
+ ret = request_irq(pcidev->irq, apci3xxx_irq_handler,
+ IRQF_SHARED, dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = pcidev->irq;
+ }
+
+ n_subdevices = (board->ai_n_chan ? 0 : 1) + board->has_ao +
+ board->has_dig_in + board->has_dig_out +
+ board->has_ttl_io;
+ ret = comedi_alloc_subdevices(dev, n_subdevices);
+ if (ret)
+ return ret;
+
+ subdev = 0;
+
+ /* Analog Input subdevice */
+ if (board->ai_n_chan) {
+ s = &dev->subdevices[subdev];
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | board->ai_subdev_flags;
+ s->n_chan = board->ai_n_chan;
+ s->maxdata = board->ai_maxdata;
+ s->len_chanlist = s->n_chan;
+ s->range_table = &apci3xxx_ai_range;
+ s->insn_read = apci3xxx_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->do_cmdtest = apci3xxx_ai_cmdtest;
+ s->do_cmd = apci3xxx_ai_cmd;
+ s->cancel = apci3xxx_ai_cancel;
+ }
+
+ subdev++;
+ }
+
+ /* Analog Output subdevice */
+ if (board->has_ao) {
+ s = &dev->subdevices[subdev];
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
+ s->n_chan = 4;
+ s->maxdata = 0x0fff;
+ s->range_table = &apci3xxx_ao_range;
+ s->insn_write = apci3xxx_ao_insn_write;
+
+ subdev++;
+ }
+
+ /* Digital Input subdevice */
+ if (board->has_dig_in) {
+ s = &dev->subdevices[subdev];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci3xxx_di_insn_bits;
+
+ subdev++;
+ }
+
+ /* Digital Output subdevice */
+ if (board->has_dig_out) {
+ s = &dev->subdevices[subdev];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = apci3xxx_do_insn_bits;
+
+ subdev++;
+ }
+
+ /* TTL Digital I/O subdevice */
+ if (board->has_ttl_io) {
+ s = &dev->subdevices[subdev];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->io_bits = 0xff; /* channels 0-7 are always outputs */
+ s->range_table = &range_digital;
+ s->insn_config = apci3xxx_dio_insn_config;
+ s->insn_bits = apci3xxx_dio_insn_bits;
+
+ subdev++;
+ }
+
+ apci3xxx_reset(dev);
+ return 0;
+}
+
+static void apci3xxx_detach(struct comedi_device *dev)
+{
+ struct apci3xxx_private *devpriv = dev->private;
- return addi_auto_attach(dev, context);
+ if (devpriv) {
+ if (dev->iobase)
+ apci3xxx_reset(dev);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+ if (devpriv->mmio)
+ iounmap(devpriv->mmio);
+ }
+ comedi_pci_disable(dev);
}
static struct comedi_driver apci3xxx_driver = {
.driver_name = "addi_apci_3xxx",
.module = THIS_MODULE,
.auto_attach = apci3xxx_auto_attach,
- .detach = i_ADDI_Detach,
+ .detach = apci3xxx_detach,
};
static int apci3xxx_pci_probe(struct pci_dev *dev,
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.c b/drivers/staging/comedi/drivers/addi_watchdog.c
index 1666b5f..23031fe 100644
--- a/drivers/staging/comedi/drivers/addi_watchdog.c
+++ b/drivers/staging/comedi/drivers/addi_watchdog.c
@@ -16,12 +16,9 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include "addi_watchdog.h"
@@ -130,14 +127,12 @@ int addi_watchdog_init(struct comedi_subdevice *s, unsigned long iobase)
{
struct addi_watchdog_private *spriv;
- spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
+ spriv = comedi_alloc_spriv(s, sizeof(*spriv));
if (!spriv)
return -ENOMEM;
spriv->iobase = iobase;
- s->private = spriv;
-
s->type = COMEDI_SUBD_TIMER;
s->subdev_flags = SDF_WRITEABLE;
s->n_chan = 1;
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index 8a438ff..a67ad57 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -20,10 +20,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: adl_pci6208
@@ -42,6 +38,7 @@ References:
- adl_pci9118.c
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -176,10 +173,9 @@ static int pci6208_auto_attach(struct comedi_device *dev,
dev->board_ptr = boardinfo;
dev->board_name = boardinfo->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index e396074..81b7203 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -19,10 +19,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -48,6 +44,7 @@ driver.
Configuration Options: not applicable, uses comedi PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index b3ec60a..b3d0092 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -31,6 +27,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index 6247fdc..78cea19 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -17,10 +17,6 @@ This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -68,6 +64,7 @@ TODO:
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -859,10 +856,9 @@ static int pci9111_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- dev_private = kzalloc(sizeof(*dev_private), GFP_KERNEL);
+ dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private));
if (!dev_private)
return -ENOMEM;
- dev->private = dev_private;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index cb4ef2d..22196ad 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -77,6 +77,7 @@ Configuration options:
* manual attachment.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/gfp.h>
@@ -2140,10 +2141,9 @@ static int pci9118_attach(struct comedi_device *dev,
softsshdelay = it->options[4];
hw_err_mask = it->options[5];
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
pcidev = pci9118_find_pci(dev, it);
if (!pcidev)
@@ -2160,10 +2160,9 @@ static int pci9118_auto_attach(struct comedi_device *dev,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct pci9118_private *devpriv;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
dev->board_ptr = pci9118_find_boardinfo(pcidev);
if (dev->board_ptr == NULL) {
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
index 71142e3..cdf5ba2 100644
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ b/drivers/staging/comedi/drivers/adq12b.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: adq12b
@@ -78,6 +73,9 @@ If you do not specify any options, they will default to
*/
+#include <linux/module.h>
+#include <linux/delay.h>
+
#include "../comedidev.h"
/* address scheme (page 2.17 of the manual) */
@@ -219,10 +217,9 @@ static int adq12b_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->unipolar = it->options[1];
devpriv->differential = it->options[2];
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index f847bbc..f84df46 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -41,6 +41,7 @@ Configuration options:
device will be used.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -1233,10 +1234,9 @@ static int pci1710_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index ccc114d..b793d69 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -1,4 +1,4 @@
-/*******************************************************************************
+/*
comedi/drivers/pci1723.c
COMEDI - Linux Control and Measurement Device Interface
@@ -13,12 +13,7 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*******************************************************************************/
+*/
/*
Driver: adv_pci1723
Description: Advantech PCI-1723
@@ -48,6 +43,7 @@ TODO:
3. Implement calibration.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -184,38 +180,29 @@ static int pci1723_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
- unsigned int bits;
- unsigned short dio_mode;
+ unsigned short mode;
+ int ret;
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x00FF)
- bits = 0x00FF;
+ if (chan < 8)
+ mask = 0x00ff;
else
- bits = 0xFF00;
+ mask = 0xff00;
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
/* update hardware DIO mode */
- dio_mode = 0x0000; /* low byte output, high byte output */
- if ((s->io_bits & 0x00FF) == 0)
- dio_mode |= 0x0001; /* low byte input */
- if ((s->io_bits & 0xFF00) == 0)
- dio_mode |= 0x0002; /* high byte input */
- outw(dio_mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET);
- return 1;
+ mode = 0x0000; /* assume output */
+ if (!(s->io_bits & 0x00ff))
+ mode |= 0x0001; /* low byte input */
+ if (!(s->io_bits & 0xff00))
+ mode |= 0x0002; /* high byte input */
+ outw(mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET);
+
+ return insn->n;
}
/*
@@ -242,10 +229,9 @@ static int pci1723_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c
index e60f125..009a303 100644
--- a/drivers/staging/comedi/drivers/adv_pci1724.c
+++ b/drivers/staging/comedi/drivers/adv_pci1724.c
@@ -17,12 +17,7 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************/
+*/
/*
@@ -57,6 +52,8 @@ supported PCI devices are configured as comedi devices automatically.
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -128,10 +125,6 @@ static const struct comedi_lrange ao_ranges_1724 = { 4,
}
};
-static const struct comedi_lrange *const ao_range_list_1724[NUM_AO_CHANNELS] = {
- [0 ... NUM_AO_CHANNELS - 1] = &ao_ranges_1724,
-};
-
/* this structure is for data unique to this hardware driver. */
struct adv_pci1724_private {
int ao_value[NUM_AO_CHANNELS];
@@ -311,7 +304,7 @@ static int setup_subdevices(struct comedi_device *dev)
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
s->n_chan = NUM_AO_CHANNELS;
s->maxdata = 0x3fff;
- s->range_table_list = ao_range_list_1724;
+ s->range_table = &ao_ranges_1724;
s->insn_read = ao_readback_insn;
s->insn_write = ao_winsn;
@@ -345,10 +338,9 @@ static int adv_pci1724_auto_attach(struct comedi_device *dev,
int retval;
unsigned int board_id;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* init software copies of output values to indicate we don't know
* what the output value is since it has never been written. */
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index f70c6747..f091fa0 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -29,6 +29,7 @@ Configuration options:
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1107,10 +1108,9 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
@@ -1173,19 +1173,11 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
static void pci_dio_detach(struct comedi_device *dev)
{
struct pci_dio_private *devpriv = dev->private;
- struct comedi_subdevice *s;
- int i;
if (devpriv) {
if (devpriv->valid)
pci_dio_reset(dev);
}
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- if (s->type == COMEDI_SUBD_DIO)
- comedi_spriv_free(dev, i);
- s->private = NULL; /* some private data is static */
- }
comedi_pci_disable(dev);
}
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index e2dc08a..abb2849 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -14,10 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -39,8 +35,8 @@ Notes:
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include "8255.h"
/*
@@ -206,10 +202,9 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
@@ -259,17 +254,11 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
return 0;
}
-static void aio_aio12_8_detach(struct comedi_device *dev)
-{
- comedi_spriv_free(dev, 2);
- comedi_legacy_detach(dev);
-}
-
static struct comedi_driver aio_aio12_8_driver = {
.driver_name = "aio_aio12_8",
.module = THIS_MODULE,
.attach = aio_aio12_8_attach,
- .detach = aio_aio12_8_detach,
+ .detach = comedi_legacy_detach,
.board_name = &board_types[0].name,
.num_names = ARRAY_SIZE(board_types),
.offset = sizeof(struct aio12_8_boardtype),
diff --git a/drivers/staging/comedi/drivers/aio_iiro_16.c b/drivers/staging/comedi/drivers/aio_iiro_16.c
index 126854c..afe87cc 100644
--- a/drivers/staging/comedi/drivers/aio_iiro_16.c
+++ b/drivers/staging/comedi/drivers/aio_iiro_16.c
@@ -14,10 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -34,8 +30,8 @@ Configuration Options:
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#define AIO_IIRO_16_SIZE 0x08
#define AIO_IIRO_16_RELAY_0_7 0x00
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index 297750b..dc1dee7 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -17,11 +17,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
* Driver: amplc_dio200
@@ -197,8 +192,7 @@
* order they appear in the channel list.
*/
-#include <linux/slab.h>
-
+#include <linux/module.h>
#include "../comedidev.h"
#include "amplc_dio200.h"
@@ -277,10 +271,9 @@ static int dio200_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq = it->options[1];
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], thisboard->mainsize);
if (ret)
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.h b/drivers/staging/comedi/drivers/amplc_dio200.h
index cf2e726..43160b9 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.h
+++ b/drivers/staging/comedi/drivers/amplc_dio200.h
@@ -18,11 +18,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef AMPLC_DIO200_H_INCLUDED
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index 3403e5c..c1f723e 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -17,15 +17,10 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -561,7 +556,7 @@ dio200_subdev_intr_init(struct comedi_device *dev, struct comedi_subdevice *s,
const struct dio200_layout *layout = dio200_dev_layout(dev);
struct dio200_subdev_intr *subpriv;
- subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL);
+ subpriv = comedi_alloc_spriv(s, sizeof(*subpriv));
if (!subpriv)
return -ENOMEM;
@@ -573,7 +568,6 @@ dio200_subdev_intr_init(struct comedi_device *dev, struct comedi_subdevice *s,
/* Disable interrupt sources. */
dio200_write8(dev, subpriv->ofs, 0);
- s->private = subpriv;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
if (layout->has_int_sce) {
@@ -888,11 +882,10 @@ dio200_subdev_8254_init(struct comedi_device *dev, struct comedi_subdevice *s,
struct dio200_subdev_8254 *subpriv;
unsigned int chan;
- subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL);
+ subpriv = comedi_alloc_spriv(s, sizeof(*subpriv));
if (!subpriv)
return -ENOMEM;
- s->private = subpriv;
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = 3;
@@ -983,34 +976,26 @@ static int dio200_subdev_8255_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
- unsigned int bits;
-
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x0000ff)
- bits = 0x0000ff;
- else if (mask & 0x00ff00)
- bits = 0x00ff00;
- else if (mask & 0x0f0000)
- bits = 0x0f0000;
+ int ret;
+
+ if (chan < 8)
+ mask = 0x0000ff;
+ else if (chan < 16)
+ mask = 0x00ff00;
+ else if (chan < 20)
+ mask = 0x0f0000;
else
- bits = 0xf00000;
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ mask = 0xf00000;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
dio200_subdev_8255_set_dir(dev, s);
- return 1;
+
+ return insn->n;
}
/*
@@ -1024,11 +1009,12 @@ static int dio200_subdev_8255_init(struct comedi_device *dev,
{
struct dio200_subdev_8255 *subpriv;
- subpriv = kzalloc(sizeof(*subpriv), GFP_KERNEL);
+ subpriv = comedi_alloc_spriv(s, sizeof(*subpriv));
if (!subpriv)
return -ENOMEM;
+
subpriv->ofs = offset;
- s->private = subpriv;
+
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 24;
@@ -1230,28 +1216,11 @@ void amplc_dio200_common_detach(struct comedi_device *dev)
{
const struct dio200_board *thisboard = comedi_board(dev);
struct dio200_private *devpriv = dev->private;
- const struct dio200_layout *layout;
- unsigned n;
if (!thisboard || !devpriv)
return;
if (dev->irq)
free_irq(dev->irq, dev);
- if (dev->subdevices) {
- layout = dio200_board_layout(thisboard);
- for (n = 0; n < dev->n_subdevices; n++) {
- switch (layout->sdtype[n]) {
- case sd_8254:
- case sd_8255:
- case sd_intr:
- comedi_spriv_free(dev, n);
- break;
- case sd_timer:
- default:
- break;
- }
- }
- }
}
EXPORT_SYMBOL_GPL(amplc_dio200_common_detach);
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_pci.c b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
index 4be44e8..a810a24 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_pci.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
@@ -16,11 +16,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
* Driver: amplc_dio200_pci
@@ -225,9 +220,9 @@
* order they appear in the channel list.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -385,10 +380,9 @@ static int dio200_pci_auto_attach(struct comedi_device *dev,
dev_info(dev->class_dev, "%s: attach pci %s (%s)\n",
dev->driver->driver_name, pci_name(pci_dev), dev->board_name);
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 115ecd5..98075f9 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -16,11 +16,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: amplc_pc236
@@ -52,6 +47,7 @@ the IRQ jumper. If no interrupt is connected, then subdevice 1 is
unused.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -472,10 +468,9 @@ static int pc236_attach(struct comedi_device *dev, struct comedi_devconfig *it)
struct pc236_private *devpriv;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* Process options according to bus type. */
if (is_isa_board(thisboard)) {
@@ -515,10 +510,9 @@ static int pc236_auto_attach(struct comedi_device *dev,
dev_info(dev->class_dev, PC236_DRIVER_NAME ": attach pci %s\n",
pci_name(pci_dev));
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
dev->board_ptr = pc236_find_pci_board(pci_dev);
if (dev->board_ptr == NULL) {
@@ -543,7 +537,6 @@ static void pc236_detach(struct comedi_device *dev)
return;
if (dev->iobase)
pc236_intr_disable(dev);
- comedi_spriv_free(dev, 0);
if (is_isa_board(thisboard)) {
comedi_legacy_detach(dev);
} else if (is_pci_board(thisboard)) {
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 94a752d..e710804 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -16,11 +16,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: amplc_pc263
@@ -38,6 +33,7 @@ connected to a reed-relay. Relay contacts are closed when output is 1.
The state of the outputs can be read.
*/
+#include <linux/module.h>
#include "../comedidev.h"
#define PC263_DRIVER_NAME "amplc_pc263"
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 4d7eab9..179de53 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -16,11 +16,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: amplc_pci224
@@ -103,6 +98,7 @@ Caveats:
correctly.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -1424,10 +1420,9 @@ static int pci224_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev_info(dev->class_dev, DRIVER_NAME ": attach\n");
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
pci_dev = pci224_find_pci_dev(dev, it);
if (!pci_dev)
@@ -1445,10 +1440,9 @@ pci224_auto_attach(struct comedi_device *dev, unsigned long context_unused)
dev_info(dev->class_dev, DRIVER_NAME ": attach pci %s\n",
pci_name(pci_dev));
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
dev->board_ptr = pci224_find_pci_board(pci_dev);
if (dev->board_ptr == NULL) {
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 49200fb..43059c2 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -16,10 +16,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: amplc_pci230
@@ -188,6 +184,7 @@ Support for PCI230+/260+, more triggered scan functionality, and workarounds
for (or detection of) various hardware problems added by Ian Abbott.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -2619,10 +2616,9 @@ static int pci230_alloc_private(struct comedi_device *dev)
{
struct pci230_private *devpriv;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
spin_lock_init(&devpriv->isr_spinlock);
spin_lock_init(&devpriv->res_spinlock);
@@ -2834,7 +2830,6 @@ static void pci230_detach(struct comedi_device *dev)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- comedi_spriv_free(dev, 2);
if (dev->irq)
free_irq(dev->irq, dev);
comedi_pci_disable(dev);
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index 8b57533..145bb48 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -16,11 +16,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: amplc_pci263
@@ -37,6 +32,7 @@ connected to a reed-relay. Relay contacts are closed when output is 1.
The state of the outputs can be read.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 92376dc..217aa19c 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -16,11 +16,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: c6xdigio
@@ -40,8 +35,6 @@ http://robot0.ge.uiuc.edu/~spong/mecha/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/timer.h>
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index f874fff..0ce93da 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -15,10 +15,6 @@
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
PCMCIA support code for this driver is adapted from the dummy_cs.c
driver of the Linux PCMCIA Card Services package.
@@ -38,8 +34,8 @@ Status: experimental
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include "../comedidev.h"
@@ -345,33 +341,22 @@ static int das16cs_dio_insn_bits(struct comedi_device *dev,
static int das16cs_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct das16cs_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- int bits;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
if (chan < 4)
- bits = 0x0f;
+ mask = 0x0f;
else
- bits = 0xf0;
+ mask = 0xf0;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
devpriv->status2 &= ~0x00c0;
devpriv->status2 |= (s->io_bits & 0xf0) ? 0x0080 : 0;
@@ -424,10 +409,9 @@ static int das16cs_auto_attach(struct comedi_device *dev,
return ret;
dev->irq = link->irq;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 53dd298..41d89ee 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -19,12 +19,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************
*/
/*
Driver: cb_pcidas
@@ -67,6 +61,7 @@ TODO:
analog triggering on 1602 series
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1450,10 +1445,9 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
@@ -1608,7 +1602,6 @@ static void cb_pcidas_detach(struct comedi_device *dev)
}
if (dev->irq)
free_irq(dev->irq, dev);
- comedi_spriv_free(dev, 2);
comedi_pci_disable(dev);
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index c3e5495..388dbd7 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -28,12 +28,7 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************/
+*/
/*
* Driver: cb_pcidas64
@@ -87,6 +82,7 @@ TODO:
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -3514,31 +3510,20 @@ static int do_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
static int dio_60xx_config_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcidas64_private *devpriv = dev->private;
- unsigned int mask;
-
- mask = 1 << CR_CHAN(insn->chanspec);
+ int ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return 2;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
writeb(s->io_bits,
devpriv->dio_counter_iobase + DIO_DIRECTION_60XX_REG);
- return 1;
+ return insn->n;
}
static int dio_60xx_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
@@ -4039,10 +4024,9 @@ static int auto_attach(struct comedi_device *dev,
return -ENODEV;
dev->board_ptr = thisboard;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
retval = comedi_pci_enable(dev);
if (retval)
@@ -4163,7 +4147,6 @@ static void detach(struct comedi_device *dev)
devpriv->ao_dma_desc_bus_addr);
}
}
- comedi_spriv_free(dev, 4);
comedi_pci_disable(dev);
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index f9b4598..94f1158 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -17,10 +17,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -41,6 +37,7 @@
* Only simple analog output writing is supported.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -352,10 +349,9 @@ static int cb_pcidda_auto_attach(struct comedi_device *dev,
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
@@ -397,18 +393,11 @@ static int cb_pcidda_auto_attach(struct comedi_device *dev,
return 0;
}
-static void cb_pcidda_detach(struct comedi_device *dev)
-{
- comedi_spriv_free(dev, 1);
- comedi_spriv_free(dev, 2);
- comedi_pci_disable(dev);
-}
-
static struct comedi_driver cb_pcidda_driver = {
.driver_name = "cb_pcidda",
.module = THIS_MODULE,
.auto_attach = cb_pcidda_auto_attach,
- .detach = cb_pcidda_detach,
+ .detach = comedi_pci_disable,
};
static int cb_pcidda_pci_probe(struct pci_dev *dev,
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 29813c9..30520d4 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: cb_pcimdas
@@ -40,8 +35,8 @@ No interrupts, multi channel or FIFO AI, although the card looks like it could s
See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
@@ -215,10 +210,9 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
unsigned long iobase_8255;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 88f03ae..edf17b6 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -15,11 +15,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: cb_pcimdda
@@ -79,6 +74,7 @@ Configuration Options: not applicable, uses PCI auto config
-Calin Culianu <calin@ajvar.org>
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -161,10 +157,9 @@ static int cb_pcimdda_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
@@ -197,17 +192,11 @@ static int cb_pcimdda_auto_attach(struct comedi_device *dev,
return 1;
}
-static void cb_pcimdda_detach(struct comedi_device *dev)
-{
- comedi_spriv_free(dev, 1);
- comedi_pci_disable(dev);
-}
-
static struct comedi_driver cb_pcimdda_driver = {
.driver_name = "cb_pcimdda",
.module = THIS_MODULE,
.auto_attach = cb_pcimdda_auto_attach,
- .detach = cb_pcimdda_detach,
+ .detach = comedi_pci_disable,
};
static int cb_pcimdda_pci_probe(struct pci_dev *dev,
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 1bb5381..51a59e5 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -1,135 +1,131 @@
/*
- comedi/drivers/comedi_bond.c
- A Comedi driver to 'bond' or merge multiple drivers and devices as one.
+ * comedi_bond.c
+ * A Comedi driver to 'bond' or merge multiple drivers and devices as one.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2005 Calin A. Culianu <calin@ajvar.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- Copyright (C) 2005 Calin A. Culianu <calin@ajvar.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
/*
-Driver: comedi_bond
-Description: A driver to 'bond' (merge) multiple subdevices from multiple
- devices together as one.
-Devices:
-Author: ds
-Updated: Mon, 10 Oct 00:18:25 -0500
-Status: works
-
-This driver allows you to 'bond' (merge) multiple comedi subdevices
-(coming from possibly difference boards and/or drivers) together. For
-example, if you had a board with 2 different DIO subdevices, and
-another with 1 DIO subdevice, you could 'bond' them with this driver
-so that they look like one big fat DIO subdevice. This makes writing
-applications slightly easier as you don't have to worry about managing
-different subdevices in the application -- you just worry about
-indexing one linear array of channel id's.
-
-Right now only DIO subdevices are supported as that's the personal itch
-I am scratching with this driver. If you want to add support for AI and AO
-subdevs, go right on ahead and do so!
-
-Commands aren't supported -- although it would be cool if they were.
-
-Configuration Options:
- List of comedi-minors to bond. All subdevices of the same type
- within each minor will be concatenated together in the order given here.
-*/
-
+ * Driver: comedi_bond
+ * Description: A driver to 'bond' (merge) multiple subdevices from multiple
+ * devices together as one.
+ * Devices:
+ * Author: ds
+ * Updated: Mon, 10 Oct 00:18:25 -0500
+ * Status: works
+ *
+ * This driver allows you to 'bond' (merge) multiple comedi subdevices
+ * (coming from possibly difference boards and/or drivers) together. For
+ * example, if you had a board with 2 different DIO subdevices, and
+ * another with 1 DIO subdevice, you could 'bond' them with this driver
+ * so that they look like one big fat DIO subdevice. This makes writing
+ * applications slightly easier as you don't have to worry about managing
+ * different subdevices in the application -- you just worry about
+ * indexing one linear array of channel id's.
+ *
+ * Right now only DIO subdevices are supported as that's the personal itch
+ * I am scratching with this driver. If you want to add support for AI and AO
+ * subdevs, go right on ahead and do so!
+ *
+ * Commands aren't supported -- although it would be cool if they were.
+ *
+ * Configuration Options:
+ * List of comedi-minors to bond. All subdevices of the same type
+ * within each minor will be concatenated together in the order given here.
+ */
+
+#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "../comedi.h"
#include "../comedilib.h"
#include "../comedidev.h"
-/* The maxiumum number of channels per subdevice. */
-#define MAX_CHANS 256
-
-struct BondedDevice {
+struct bonded_device {
struct comedi_device *dev;
unsigned minor;
unsigned subdev;
- unsigned subdev_type;
unsigned nchans;
- unsigned chanid_offset; /* The offset into our unified linear
- channel-id's of chanid 0 on this
- subdevice. */
};
-/* this structure is for data unique to this hardware driver. If
- several hardware drivers keep similar information in this structure,
- feel free to suggest moving the variable to the struct comedi_device struct. */
struct comedi_bond_private {
# define MAX_BOARD_NAME 256
char name[MAX_BOARD_NAME];
- struct BondedDevice **devs;
+ struct bonded_device **devs;
unsigned ndevs;
- struct BondedDevice *chanIdDevMap[MAX_CHANS];
unsigned nchans;
};
-/* DIO devices are slightly special. Although it is possible to
- * implement the insn_read/insn_write interface, it is much more
- * useful to applications if you implement the insn_bits interface.
- * This allows packed reading/writing of the DIO channels. The
- * comedi core can convert between insn_bits and insn_read/write */
static int bonding_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct comedi_bond_private *devpriv = dev->private;
-#define LSAMPL_BITS (sizeof(unsigned int)*8)
- unsigned nchans = LSAMPL_BITS, num_done = 0, i;
-
- if (devpriv->nchans < nchans)
- nchans = devpriv->nchans;
-
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
- for (i = 0; num_done < nchans && i < devpriv->ndevs; ++i) {
- struct BondedDevice *bdev = devpriv->devs[i];
- /* Grab the channel mask and data of only the bits corresponding
- to this subdevice.. need to shift them to zero position of
- course. */
- /* Bits corresponding to this subdev. */
- unsigned int subdevMask = ((1 << bdev->nchans) - 1);
- unsigned int writeMask, dataBits;
-
- /* Argh, we have >= LSAMPL_BITS chans.. take all bits */
- if (bdev->nchans >= LSAMPL_BITS)
- subdevMask = (unsigned int)(-1);
-
- writeMask = (data[0] >> num_done) & subdevMask;
- dataBits = (data[1] >> num_done) & subdevMask;
-
- /* Read/Write the new digital lines */
- if (comedi_dio_bitfield(bdev->dev, bdev->subdev, writeMask,
- &dataBits) != 2)
- return -EINVAL;
-
- /* Make room for the new bits in data[1], the return value */
- data[1] &= ~(subdevMask << num_done);
- /* Put the bits in the return value */
- data[1] |= (dataBits & subdevMask) << num_done;
- /* Save the new bits to the saved state.. */
- s->state = data[1];
-
- num_done += bdev->nchans;
- }
+ unsigned int n_left, n_done, base_chan;
+ unsigned int write_mask, data_bits;
+ struct bonded_device **devs;
+
+ write_mask = data[0];
+ data_bits = data[1];
+ base_chan = CR_CHAN(insn->chanspec);
+ /* do a maximum of 32 channels, starting from base_chan. */
+ n_left = devpriv->nchans - base_chan;
+ if (n_left > 32)
+ n_left = 32;
+
+ n_done = 0;
+ devs = devpriv->devs;
+ do {
+ struct bonded_device *bdev = *devs++;
+
+ if (base_chan < bdev->nchans) {
+ /* base channel falls within bonded device */
+ unsigned int b_chans, b_mask, b_write_mask, b_data_bits;
+ int ret;
+
+ /*
+ * Get num channels to do for bonded device and set
+ * up mask and data bits for bonded device.
+ */
+ b_chans = bdev->nchans - base_chan;
+ if (b_chans > n_left)
+ b_chans = n_left;
+ b_mask = (1U << b_chans) - 1;
+ b_write_mask = (write_mask >> n_done) & b_mask;
+ b_data_bits = (data_bits >> n_done) & b_mask;
+ /* Read/Write the new digital lines. */
+ ret = comedi_dio_bitfield2(bdev->dev, bdev->subdev,
+ b_write_mask, &b_data_bits,
+ base_chan);
+ if (ret < 0)
+ return ret;
+ /* Place read bits into data[1]. */
+ data[1] &= ~(b_mask << n_done);
+ data[1] |= (b_data_bits & b_mask) << n_done;
+ /*
+ * Set up for following bonded device (if still have
+ * channels to read/write).
+ */
+ base_chan = 0;
+ n_done += b_chans;
+ n_left -= b_chans;
+ } else {
+ /* Skip bonded devices before base channel. */
+ base_chan -= bdev->nchans;
+ }
+ } while (n_left);
return insn->n;
}
@@ -139,99 +135,91 @@ static int bonding_dio_insn_config(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct comedi_bond_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec), ret, io_bits = s->io_bits;
- unsigned int io;
- struct BondedDevice *bdev;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int ret;
+ struct bonded_device *bdev;
+ struct bonded_device **devs;
- if (chan < 0 || chan >= devpriv->nchans)
- return -EINVAL;
- bdev = devpriv->chanIdDevMap[chan];
+ /*
+ * Locate bonded subdevice and adjust channel.
+ */
+ devs = devpriv->devs;
+ for (bdev = *devs++; chan >= bdev->nchans; bdev = *devs++)
+ chan -= bdev->nchans;
- /* The input or output configuration of each digital line is
+ /*
+ * The input or output configuration of each digital line is
* configured by a special insn_config instruction. chanspec
* contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
+ * configuration instruction INSN_CONFIG_DIO_OUTPUT,
+ * INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_QUERY.
+ *
+ * Note that INSN_CONFIG_DIO_OUTPUT == COMEDI_OUTPUT,
+ * and INSN_CONFIG_DIO_INPUT == COMEDI_INPUT. This is deliberate ;)
+ */
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
- io = COMEDI_OUTPUT; /* is this really necessary? */
- io_bits |= 1 << chan;
- break;
case INSN_CONFIG_DIO_INPUT:
- io = COMEDI_INPUT; /* is this really necessary? */
- io_bits &= ~(1 << chan);
+ ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, data[0]);
break;
case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
+ ret = comedi_dio_get_config(bdev->dev, bdev->subdev, chan,
+ &data[1]);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
break;
}
- /* 'real' channel id for this subdev.. */
- chan -= bdev->chanid_offset;
- ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, io);
- if (ret != 1)
- return -EINVAL;
- /* Finally, save the new io_bits values since we didn't get
- an error above. */
- s->io_bits = io_bits;
- return insn->n;
-}
-
-static void *Realloc(const void *oldmem, size_t newlen, size_t oldlen)
-{
- void *newmem = kmalloc(newlen, GFP_KERNEL);
-
- if (newmem && oldmem)
- memcpy(newmem, oldmem, min(oldlen, newlen));
- kfree(oldmem);
- return newmem;
+ if (ret >= 0)
+ ret = insn->n;
+ return ret;
}
-static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
+static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_bond_private *devpriv = dev->private;
+ DECLARE_BITMAP(devs_opened, COMEDI_NUM_BOARD_MINORS);
int i;
- struct comedi_device *devs_opened[COMEDI_NUM_BOARD_MINORS];
- memset(devs_opened, 0, sizeof(devs_opened));
+ memset(&devs_opened, 0, sizeof(devs_opened));
devpriv->name[0] = 0;
- /* Loop through all comedi devices specified on the command-line,
- building our device list */
+ /*
+ * Loop through all comedi devices specified on the command-line,
+ * building our device list.
+ */
for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) {
- char file[] = "/dev/comediXXXXXX";
+ char file[sizeof("/dev/comediXXXXXX")];
int minor = it->options[i];
struct comedi_device *d;
- int sdev = -1, nchans, tmp;
- struct BondedDevice *bdev = NULL;
+ int sdev = -1, nchans;
+ struct bonded_device *bdev;
+ struct bonded_device **devs;
if (minor < 0 || minor >= COMEDI_NUM_BOARD_MINORS) {
dev_err(dev->class_dev,
"Minor %d is invalid!\n", minor);
- return 0;
+ return -EINVAL;
}
if (minor == dev->minor) {
dev_err(dev->class_dev,
"Cannot bond this driver to itself!\n");
- return 0;
+ return -EINVAL;
}
- if (devs_opened[minor]) {
+ if (test_and_set_bit(minor, devs_opened)) {
dev_err(dev->class_dev,
"Minor %d specified more than once!\n", minor);
- return 0;
+ return -EINVAL;
}
snprintf(file, sizeof(file), "/dev/comedi%u", minor);
file[sizeof(file) - 1] = 0;
- d = devs_opened[minor] = comedi_open(file);
+ d = comedi_open(file);
if (!d) {
dev_err(dev->class_dev,
"Minor %u could not be opened\n", minor);
- return 0;
+ return -ENODEV;
}
/* Do DIO, as that's all we support now.. */
@@ -242,45 +230,41 @@ static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
dev_err(dev->class_dev,
"comedi_get_n_channels() returned %d on minor %u subdev %d!\n",
nchans, minor, sdev);
- return 0;
+ return -EINVAL;
}
bdev = kmalloc(sizeof(*bdev), GFP_KERNEL);
if (!bdev)
- return 0;
+ return -ENOMEM;
bdev->dev = d;
bdev->minor = minor;
bdev->subdev = sdev;
- bdev->subdev_type = COMEDI_SUBD_DIO;
bdev->nchans = nchans;
- bdev->chanid_offset = devpriv->nchans;
-
- /* map channel id's to BondedDevice * pointer.. */
- while (nchans--)
- devpriv->chanIdDevMap[devpriv->nchans++] = bdev;
+ devpriv->nchans += nchans;
- /* Now put bdev pointer at end of devpriv->devs array
- * list.. */
+ /*
+ * Now put bdev pointer at end of devpriv->devs array
+ * list..
+ */
/* ergh.. ugly.. we need to realloc :( */
- tmp = devpriv->ndevs * sizeof(bdev);
- devpriv->devs =
- Realloc(devpriv->devs,
- ++devpriv->ndevs * sizeof(bdev), tmp);
- if (!devpriv->devs) {
+ devs = krealloc(devpriv->devs,
+ (devpriv->ndevs + 1) * sizeof(*devs),
+ GFP_KERNEL);
+ if (!devs) {
dev_err(dev->class_dev,
"Could not allocate memory. Out of memory?\n");
- return 0;
+ return -ENOMEM;
}
-
- devpriv->devs[devpriv->ndevs - 1] = bdev;
+ devpriv->devs = devs;
+ devpriv->devs[devpriv->ndevs++] = bdev;
{
- /** Append dev:subdev to devpriv->name */
+ /* Append dev:subdev to devpriv->name */
char buf[20];
int left =
MAX_BOARD_NAME - strlen(devpriv->name) - 1;
- snprintf(buf, sizeof(buf), "%d:%d ", dev->minor,
- bdev->subdev);
+ snprintf(buf, sizeof(buf), "%d:%d ",
+ bdev->minor, bdev->subdev);
buf[sizeof(buf) - 1] = 0;
strncat(devpriv->name, buf, left);
}
@@ -290,10 +274,10 @@ static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv->nchans) {
dev_err(dev->class_dev, "No channels found!\n");
- return 0;
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int bonding_attach(struct comedi_device *dev,
@@ -303,16 +287,16 @@ static int bonding_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/*
* Setup our bonding from config params.. sets up our private struct..
*/
- if (!doDevConfig(dev, it))
- return -EINVAL;
+ ret = do_dev_config(dev, it);
+ if (ret)
+ return ret;
dev->board_name = devpriv->name;
@@ -334,31 +318,29 @@ static int bonding_attach(struct comedi_device *dev,
dev->driver->driver_name, dev->board_name,
devpriv->nchans, devpriv->ndevs);
- return 1;
+ return 0;
}
static void bonding_detach(struct comedi_device *dev)
{
struct comedi_bond_private *devpriv = dev->private;
- unsigned long devs_closed = 0;
- if (devpriv) {
- while (devpriv->ndevs-- && devpriv->devs) {
- struct BondedDevice *bdev;
+ if (devpriv && devpriv->devs) {
+ DECLARE_BITMAP(devs_closed, COMEDI_NUM_BOARD_MINORS);
+
+ memset(&devs_closed, 0, sizeof(devs_closed));
+ while (devpriv->ndevs--) {
+ struct bonded_device *bdev;
bdev = devpriv->devs[devpriv->ndevs];
if (!bdev)
continue;
- if (!(devs_closed & (0x1 << bdev->minor))) {
+ if (!test_and_set_bit(bdev->minor, devs_closed))
comedi_close(bdev->dev);
- devs_closed |= (0x1 << bdev->minor);
- }
kfree(bdev);
}
kfree(devpriv->devs);
devpriv->devs = NULL;
- kfree(devpriv);
- dev->private = NULL;
}
}
@@ -371,7 +353,5 @@ static struct comedi_driver bonding_driver = {
module_comedi_driver(bonding_driver);
MODULE_AUTHOR("Calin A. Culianu");
-MODULE_DESCRIPTION("comedi_bond: A driver for COMEDI to bond multiple COMEDI "
- "devices together as one. In the words of John Lennon: "
- "'And the world will live as one...'");
+MODULE_DESCRIPTION("comedi_bond: A driver for COMEDI to bond multiple COMEDI devices together as one.");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_fc.c b/drivers/staging/comedi/drivers/comedi_fc.c
index 37dc796..26d9dbc 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.c
+++ b/drivers/staging/comedi/drivers/comedi_fc.c
@@ -17,13 +17,9 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
+*/
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************/
-
+#include <linux/module.h>
#include "../comedidev.h"
#include "comedi_fc.h"
diff --git a/drivers/staging/comedi/drivers/comedi_fc.h b/drivers/staging/comedi/drivers/comedi_fc.h
index 31afab7..a4dea7c 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.h
+++ b/drivers/staging/comedi/drivers/comedi_fc.h
@@ -17,12 +17,7 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************/
+*/
#ifndef _COMEDI_FC_H
#define _COMEDI_FC_H
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
index 3e061cc..f28a15f 100644
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ b/drivers/staging/comedi/drivers/comedi_parport.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: comedi_parport
@@ -81,9 +76,9 @@ pin, which can be used to wake up tasks.
or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/interrupt.h>
-#include <linux/ioport.h>
#include "comedi_fc.h"
@@ -284,10 +279,9 @@ static int parport_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DIO;
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index c1d8e86..16c0780 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -21,12 +21,7 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************/
+*/
/*
Driver: comedi_test
Description: generates fake waveforms
@@ -50,6 +45,7 @@ zero volts).
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <asm/div64.h>
@@ -384,10 +380,9 @@ static int waveform_attach(struct comedi_device *dev,
int i;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* set default amplitude and period */
if (amplitude <= 0)
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index f2230bf..e781716 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -13,11 +13,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: contec_pci_dio
@@ -30,6 +25,7 @@ Status: works
Configuration Options: not applicable, uses comedi PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index b87f95c..de920cc 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: daqboard2000
@@ -107,10 +102,10 @@ Configuration options: not applicable, uses PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/firmware.h>
#include "../comedidev.h"
@@ -524,7 +519,8 @@ static int daqboard2000_writeCPLD(struct comedi_device *dev, int data)
}
static int initialize_daqboard2000(struct comedi_device *dev,
- const u8 *cpld_array, size_t len)
+ const u8 *cpld_array, size_t len,
+ unsigned long context)
{
struct daqboard2000_private *devpriv = dev->private;
int result = -EIO;
@@ -565,22 +561,6 @@ static int initialize_daqboard2000(struct comedi_device *dev,
return result;
}
-static int daqboard2000_upload_firmware(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, DAQBOARD2000_FIRMWARE, &pcidev->dev);
- if (ret)
- return ret;
-
- ret = initialize_daqboard2000(dev, fw->data, fw->size);
- release_firmware(fw);
-
- return ret;
-}
-
static void daqboard2000_adcStopDmaTransfer(struct comedi_device *dev)
{
}
@@ -704,10 +684,9 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
result = comedi_pci_enable(dev);
if (result)
@@ -724,7 +703,9 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
readl(devpriv->plx + 0x6c);
- result = daqboard2000_upload_firmware(dev);
+ result = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
+ DAQBOARD2000_FIRMWARE,
+ initialize_daqboard2000, 0);
if (result < 0)
return result;
@@ -766,7 +747,6 @@ static void daqboard2000_detach(struct comedi_device *dev)
{
struct daqboard2000_private *devpriv = dev->private;
- comedi_spriv_free(dev, 2);
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv) {
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index ba12c1d..5f66970 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -16,12 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *****************************************************************
*/
/*
@@ -39,7 +33,7 @@
* cheap das08 hardware doesn't really support them.
*/
-#include <linux/delay.h>
+#include <linux/module.h>
#include "../comedidev.h"
@@ -566,12 +560,6 @@ int das08_common_attach(struct comedi_device *dev, unsigned long iobase)
}
EXPORT_SYMBOL_GPL(das08_common_attach);
-void das08_common_detach(struct comedi_device *dev)
-{
- comedi_spriv_free(dev, 4);
-}
-EXPORT_SYMBOL_GPL(das08_common_detach);
-
static int __init das08_init(void)
{
return 0;
diff --git a/drivers/staging/comedi/drivers/das08.h b/drivers/staging/comedi/drivers/das08.h
index 89bb8d6..cce1b58 100644
--- a/drivers/staging/comedi/drivers/das08.h
+++ b/drivers/staging/comedi/drivers/das08.h
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _DAS08_H
@@ -52,6 +47,5 @@ struct das08_private_struct {
};
int das08_common_attach(struct comedi_device *dev, unsigned long iobase);
-void das08_common_detach(struct comedi_device *dev);
#endif /* _DAS08_H */
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index d9f3e92..f3ccc2c 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -16,19 +16,12 @@
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
PCMCIA support code for this driver is adapted from the dummy_cs.c
driver of the Linux PCMCIA Card Services package.
The initial developer of the original code is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
-*****************************************************************
-
*/
/*
Driver: das08_cs
@@ -46,8 +39,7 @@ Options (for pcm-das08):
Command support does not exist, but could be added for this board.
*/
-#include <linux/delay.h>
-#include <linux/slab.h>
+#include <linux/module.h>
#include "../comedidev.h"
@@ -85,25 +77,18 @@ static int das08_cs_auto_attach(struct comedi_device *dev,
return ret;
iobase = link->resource[0]->start;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
return das08_common_attach(dev, iobase);
}
-static void das08_cs_detach(struct comedi_device *dev)
-{
- das08_common_detach(dev);
- comedi_pcmcia_disable(dev);
-}
-
static struct comedi_driver driver_das08_cs = {
.driver_name = "das08_cs",
.module = THIS_MODULE,
.auto_attach = das08_cs_auto_attach,
- .detach = das08_cs_detach,
+ .detach = comedi_pcmcia_disable,
};
static int das08_pcmcia_attach(struct pcmcia_device *link)
diff --git a/drivers/staging/comedi/drivers/das08_isa.c b/drivers/staging/comedi/drivers/das08_isa.c
index f09f696..4fb03d3 100644
--- a/drivers/staging/comedi/drivers/das08_isa.c
+++ b/drivers/staging/comedi/drivers/das08_isa.c
@@ -16,10 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -47,6 +43,7 @@
* [0] - base io address
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include "das08.h"
@@ -181,10 +178,9 @@ static int das08_isa_attach(struct comedi_device *dev,
struct das08_private_struct *devpriv;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], thisboard->iosize);
if (ret)
@@ -193,17 +189,11 @@ static int das08_isa_attach(struct comedi_device *dev,
return das08_common_attach(dev, dev->iobase);
}
-static void das08_isa_detach(struct comedi_device *dev)
-{
- das08_common_detach(dev);
- comedi_legacy_detach(dev);
-}
-
static struct comedi_driver das08_isa_driver = {
.driver_name = "isa-das08",
.module = THIS_MODULE,
.attach = das08_isa_attach,
- .detach = das08_isa_detach,
+ .detach = comedi_legacy_detach,
.board_name = &das08_isa_boards[0].name,
.num_names = ARRAY_SIZE(das08_isa_boards),
.offset = sizeof(das08_isa_boards[0]),
diff --git a/drivers/staging/comedi/drivers/das08_pci.c b/drivers/staging/comedi/drivers/das08_pci.c
index 53fa943..3a6d3725 100644
--- a/drivers/staging/comedi/drivers/das08_pci.c
+++ b/drivers/staging/comedi/drivers/das08_pci.c
@@ -16,10 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -35,6 +31,7 @@
* Configuration Options: not applicable, uses PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -63,10 +60,9 @@ static int das08_pci_auto_attach(struct comedi_device *dev,
struct das08_private_struct *devpriv;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* The das08 driver needs the board_ptr */
dev->board_ptr = &das08_pci_boards[0];
@@ -79,17 +75,11 @@ static int das08_pci_auto_attach(struct comedi_device *dev,
return das08_common_attach(dev, dev->iobase);
}
-static void das08_pci_detach(struct comedi_device *dev)
-{
- das08_common_detach(dev);
- comedi_pci_disable(dev);
-}
-
static struct comedi_driver das08_pci_comedi_driver = {
.driver_name = "pci-das08",
.module = THIS_MODULE,
.auto_attach = das08_pci_auto_attach,
- .detach = das08_pci_detach,
+ .detach = comedi_pci_disable,
};
static int das08_pci_probe(struct pci_dev *dev,
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index 762b5a6..1b0793f 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -1,86 +1,89 @@
/*
- comedi/drivers/das16.c
- DAS16 driver
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
- Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * das16.c
+ * DAS16 driver
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
+ * Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
-************************************************************************
-*/
/*
-Driver: das16
-Description: DAS16 compatible boards
-Author: Sam Moore, Warren Jasper, ds, Chris Baugher, Frank Hess, Roman Fietze
-Devices: [Keithley Metrabyte] DAS-16 (das-16), DAS-16G (das-16g),
- DAS-16F (das-16f), DAS-1201 (das-1201), DAS-1202 (das-1202),
- DAS-1401 (das-1401), DAS-1402 (das-1402), DAS-1601 (das-1601),
- DAS-1602 (das-1602),
- [ComputerBoards] PC104-DAS16/JR (pc104-das16jr),
- PC104-DAS16JR/16 (pc104-das16jr/16),
- CIO-DAS16JR/16 (cio-das16jr/16),
- CIO-DAS16/JR (cio-das16/jr), CIO-DAS1401/12 (cio-das1401/12),
- CIO-DAS1402/12 (cio-das1402/12), CIO-DAS1402/16 (cio-das1402/16),
- CIO-DAS1601/12 (cio-das1601/12), CIO-DAS1602/12 (cio-das1602/12),
- CIO-DAS1602/16 (cio-das1602/16), CIO-DAS16/330 (cio-das16/330)
-Status: works
-Updated: 2003-10-12
-
-A rewrite of the das16 and das1600 drivers.
-Options:
- [0] - base io address
- [1] - irq (does nothing, irq is not used anymore)
- [2] - dma (optional, required for comedi_command support)
- [3] - master clock speed in MHz (optional, 1 or 10, ignored if
- board can probe clock, defaults to 1)
- [4] - analog input range lowest voltage in microvolts (optional,
- only useful if your board does not have software
- programmable gain)
- [5] - analog input range highest voltage in microvolts (optional,
- only useful if board does not have software programmable
- gain)
- [6] - analog output range lowest voltage in microvolts (optional)
- [7] - analog output range highest voltage in microvolts (optional)
- [8] - use timer mode for DMA. Timer mode is needed e.g. for
- buggy DMA controllers in NS CS5530A (Geode Companion), and for
- 'jr' cards that lack a hardware fifo. This option is no
- longer needed, since timer mode is _always_ used.
-
-Passing a zero for an option is the same as leaving it unspecified.
+ * Driver: das16
+ * Description: DAS16 compatible boards
+ * Author: Sam Moore, Warren Jasper, ds, Chris Baugher, Frank Hess, Roman Fietze
+ * Devices: (Keithley Metrabyte) DAS-16 [das-16]
+ * (Keithley Metrabyte) DAS-16G [das-16g]
+ * (Keithley Metrabyte) DAS-16F [das-16f]
+ * (Keithley Metrabyte) DAS-1201 [das-1201]
+ * (Keithley Metrabyte) DAS-1202 [das-1202]
+ * (Keithley Metrabyte) DAS-1401 [das-1401]
+ * (Keithley Metrabyte) DAS-1402 [das-1402]
+ * (Keithley Metrabyte) DAS-1601 [das-1601]
+ * (Keithley Metrabyte) DAS-1602 [das-1602]
+ * (ComputerBoards) PC104-DAS16/JR [pc104-das16jr]
+ * (ComputerBoards) PC104-DAS16JR/16 [pc104-das16jr/16]
+ * (ComputerBoards) CIO-DAS16 [cio-das16]
+ * (ComputerBoards) CIO-DAS16F [cio-das16/f]
+ * (ComputerBoards) CIO-DAS16/JR [cio-das16/jr]
+ * (ComputerBoards) CIO-DAS16JR/16 [cio-das16jr/16]
+ * (ComputerBoards) CIO-DAS1401/12 [cio-das1401/12]
+ * (ComputerBoards) CIO-DAS1402/12 [cio-das1402/12]
+ * (ComputerBoards) CIO-DAS1402/16 [cio-das1402/16]
+ * (ComputerBoards) CIO-DAS1601/12 [cio-das1601/12]
+ * (ComputerBoards) CIO-DAS1602/12 [cio-das1602/12]
+ * (ComputerBoards) CIO-DAS1602/16 [cio-das1602/16]
+ * (ComputerBoards) CIO-DAS16/330 [cio-das16/330]
+ * Status: works
+ * Updated: 2003-10-12
+ *
+ * A rewrite of the das16 and das1600 drivers.
+ *
+ * Options:
+ * [0] - base io address
+ * [1] - irq (does nothing, irq is not used anymore)
+ * [2] - dma channel (optional, required for comedi_command support)
+ * [3] - master clock speed in MHz (optional, 1 or 10, ignored if
+ * board can probe clock, defaults to 1)
+ * [4] - analog input range lowest voltage in microvolts (optional,
+ * only useful if your board does not have software
+ * programmable gain)
+ * [5] - analog input range highest voltage in microvolts (optional,
+ * only useful if board does not have software programmable
+ * gain)
+ * [6] - analog output range lowest voltage in microvolts (optional)
+ * [7] - analog output range highest voltage in microvolts (optional)
+ *
+ * Passing a zero for an option is the same as leaving it unspecified.
+ */
-*/
/*
+ * Testing and debugging help provided by Daniel Koch.
+ *
+ * Keithley Manuals:
+ * 2309.PDF (das16)
+ * 4919.PDF (das1400, 1600)
+ * 4922.PDF (das-1400)
+ * 4923.PDF (das1200, 1400, 1600)
+ *
+ * Computer boards manuals also available from their website
+ * www.measurementcomputing.com
+ */
-Testing and debugging help provided by Daniel Koch.
-
-Keithley Manuals:
- 2309.PDF (das16)
- 4919.PDF (das1400, 1600)
- 4922.PDF (das-1400)
- 4923.PDF (das1200, 1400, 1600)
-
-Computer boards manuals also available from their website
-www.measurementcomputing.com
-
-*/
-
-#include <linux/pci.h>
+#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include <asm/dma.h>
@@ -91,214 +94,112 @@ www.measurementcomputing.com
#include "8255.h"
#include "comedi_fc.h"
-#undef DEBUG
-/* #define DEBUG */
-
-#ifdef DEBUG
-#define DEBUG_PRINT(format, args...) \
- printk(KERN_DEBUG "das16: " format, ## args)
-#else
-#define DEBUG_PRINT(format, args...)
-#endif
-
-#define DAS16_SIZE 20 /* number of ioports */
#define DAS16_DMA_SIZE 0xff00 /* size in bytes of allocated dma buffer */
/*
- cio-das16.pdf
-
- "das16"
- "das16/f"
-
- 0 a/d bits 0-3 start 12 bit
- 1 a/d bits 4-11 unused
- 2 mux read mux set
- 3 di 4 bit do 4 bit
- 4 unused ao0_lsb
- 5 unused ao0_msb
- 6 unused ao1_lsb
- 7 unused ao1_msb
- 8 status eoc uni/bip interrupt reset
- 9 dma, int, trig ctrl set dma, int
- a pacer control unused
- b reserved reserved
- cdef 8254
- 0123 8255
-
-*/
-
-/*
- cio-das16jr.pdf
-
- "das16jr"
-
- 0 a/d bits 0-3 start 12 bit
- 1 a/d bits 4-11 unused
- 2 mux read mux set
- 3 di 4 bit do 4 bit
- 4567 unused unused
- 8 status eoc uni/bip interrupt reset
- 9 dma, int, trig ctrl set dma, int
- a pacer control unused
- b gain status gain control
- cdef 8254
-
-*/
-
-/*
- cio-das16jr_16.pdf
-
- "das16jr_16"
-
- 0 a/d bits 0-7 start 16 bit
- 1 a/d bits 8-15 unused
- 2 mux read mux set
- 3 di 4 bit do 4 bit
- 4567 unused unused
- 8 status eoc uni/bip interrupt reset
- 9 dma, int, trig ctrl set dma, int
- a pacer control unused
- b gain status gain control
- cdef 8254
-
-*/
-/*
- cio-das160x-1x.pdf
-
- "das1601/12"
- "das1602/12"
- "das1602/16"
-
- 0 a/d bits 0-3 start 12 bit
- 1 a/d bits 4-11 unused
- 2 mux read mux set
- 3 di 4 bit do 4 bit
- 4 unused ao0_lsb
- 5 unused ao0_msb
- 6 unused ao1_lsb
- 7 unused ao1_msb
- 8 status eoc uni/bip interrupt reset
- 9 dma, int, trig ctrl set dma, int
- a pacer control unused
- b gain status gain control
- cdef 8254
- 400 8255
- 404 unused conversion enable
- 405 unused burst enable
- 406 unused das1600 enable
- 407 status
-
-*/
-
-/* size in bytes of a sample from board */
-static const int sample_size = 2;
-
-#define DAS16_TRIG 0
-#define DAS16_AI_LSB 0
-#define DAS16_AI_MSB 1
-#define DAS16_MUX 2
-#define DAS16_DIO 3
-#define DAS16_AO_LSB(x) ((x) ? 6 : 4)
-#define DAS16_AO_MSB(x) ((x) ? 7 : 5)
-#define DAS16_STATUS 8
-#define BUSY (1<<7)
-#define UNIPOLAR (1<<6)
-#define DAS16_MUXBIT (1<<5)
-#define DAS16_INT (1<<4)
-#define DAS16_CONTROL 9
-#define DAS16_INTE (1<<7)
-#define DAS16_IRQ(x) (((x) & 0x7) << 4)
-#define DMA_ENABLE (1<<2)
-#define PACING_MASK 0x3
-#define INT_PACER 0x03
-#define EXT_PACER 0x02
-#define DAS16_SOFT 0x00
-#define DAS16_PACER 0x0A
-#define DAS16_CTR0 (1<<1)
-#define DAS16_TRIG0 (1<<0)
-#define BURST_LEN_BITS(x) (((x) & 0xf) << 4)
-#define DAS16_GAIN 0x0B
-#define DAS16_CNTR0_DATA 0x0C
-#define DAS16_CNTR1_DATA 0x0D
-#define DAS16_CNTR2_DATA 0x0E
-#define DAS16_CNTR_CONTROL 0x0F
-#define DAS16_TERM_CNT 0x00
-#define DAS16_ONE_SHOT 0x02
-#define DAS16_RATE_GEN 0x04
-#define DAS16_CNTR_LSB_MSB 0x30
-#define DAS16_CNTR0 0x00
-#define DAS16_CNTR1 0x40
-#define DAS16_CNTR2 0x80
-
-#define DAS1600_CONV 0x404
-#define DAS1600_CONV_DISABLE 0x40
-#define DAS1600_BURST 0x405
-#define DAS1600_BURST_VAL 0x40
-#define DAS1600_ENABLE 0x406
-#define DAS1600_ENABLE_VAL 0x40
-#define DAS1600_STATUS_B 0x407
-#define DAS1600_BME 0x40
-#define DAS1600_ME 0x20
-#define DAS1600_CD 0x10
-#define DAS1600_WS 0x02
-#define DAS1600_CLK_10MHZ 0x01
-
-static const struct comedi_lrange range_das1x01_bip = { 4, {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- }
+ * Register I/O map
+ */
+#define DAS16_TRIG_REG 0x00
+#define DAS16_AI_LSB_REG 0x00
+#define DAS16_AI_MSB_REG 0x01
+#define DAS16_MUX_REG 0x02
+#define DAS16_DIO_REG 0x03
+#define DAS16_AO_LSB_REG(x) ((x) ? 0x06 : 0x04)
+#define DAS16_AO_MSB_REG(x) ((x) ? 0x07 : 0x05)
+#define DAS16_STATUS_REG 0x08
+#define DAS16_STATUS_BUSY (1 << 7)
+#define DAS16_STATUS_UNIPOLAR (1 << 6)
+#define DAS16_STATUS_MUXBIT (1 << 5)
+#define DAS16_STATUS_INT (1 << 4)
+#define DAS16_CTRL_REG 0x09
+#define DAS16_CTRL_INTE (1 << 7)
+#define DAS16_CTRL_IRQ(x) (((x) & 0x7) << 4)
+#define DAS16_CTRL_DMAE (1 << 2)
+#define DAS16_CTRL_PACING_MASK (3 << 0)
+#define DAS16_CTRL_INT_PACER (3 << 0)
+#define DAS16_CTRL_EXT_PACER (2 << 0)
+#define DAS16_CTRL_SOFT_PACER (0 << 0)
+#define DAS16_PACER_REG 0x0a
+#define DAS16_PACER_BURST_LEN(x) (((x) & 0xf) << 4)
+#define DAS16_PACER_CTR0 (1 << 1)
+#define DAS16_PACER_TRIG0 (1 << 0)
+#define DAS16_GAIN_REG 0x0b
+#define DAS16_TIMER_BASE_REG 0x0c /* to 0x0f */
+
+#define DAS1600_CONV_REG 0x404
+#define DAS1600_CONV_DISABLE (1 << 6)
+#define DAS1600_BURST_REG 0x405
+#define DAS1600_BURST_VAL (1 << 6)
+#define DAS1600_ENABLE_REG 0x406
+#define DAS1600_ENABLE_VAL (1 << 6)
+#define DAS1600_STATUS_REG 0x407
+#define DAS1600_STATUS_BME (1 << 6)
+#define DAS1600_STATUS_ME (1 << 5)
+#define DAS1600_STATUS_CD (1 << 4)
+#define DAS1600_STATUS_WS (1 << 1)
+#define DAS1600_STATUS_CLK_10MHZ (1 << 0)
+
+static const struct comedi_lrange range_das1x01_bip = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_das1x01_unip = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- }
+static const struct comedi_lrange range_das1x01_unip = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_das1x02_bip = { 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- }
+static const struct comedi_lrange range_das1x02_bip = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_das1x02_unip = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_das1x02_unip = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_das16jr = { 9, {
- /* also used by 16/330 */
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_das16jr = {
+ 9, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_das16jr_16 = { 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_das16jr_16 = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
static const int das16jr_gainlist[] = { 8, 0, 1, 2, 3, 4, 5, 6, 7 };
@@ -336,30 +237,211 @@ static const struct comedi_lrange *const das16_ai_bip_lranges[] = {
&range_das1x02_bip,
};
-struct munge_info {
- uint8_t byte;
- unsigned have_byte:1;
-};
-
struct das16_board {
const char *name;
- void *ai;
- unsigned int ai_nbits;
+ unsigned int ai_maxdata;
unsigned int ai_speed; /* max conversion speed in nanosec */
unsigned int ai_pg;
- void *ao;
- unsigned int ao_nbits;
- void *di;
- void *do_;
+ unsigned int has_ao:1;
+ unsigned int has_8255:1;
unsigned int i8255_offset;
- unsigned int i8254_offset;
unsigned int size;
unsigned int id;
};
-#define DAS16_TIMEOUT 1000
+static const struct das16_board das16_boards[] = {
+ {
+ .name = "das-16",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 15000,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x00,
+ }, {
+ .name = "das-16g",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 15000,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x00,
+ }, {
+ .name = "das-16f",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 8500,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x00,
+ }, {
+ .name = "cio-das16",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 20000,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x80,
+ }, {
+ .name = "cio-das16/f",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x80,
+ }, {
+ .name = "cio-das16/jr",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 7692,
+ .ai_pg = das16_pg_16jr,
+ .size = 0x10,
+ .id = 0x00,
+ }, {
+ .name = "pc104-das16jr",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 3300,
+ .ai_pg = das16_pg_16jr,
+ .size = 0x10,
+ .id = 0x00,
+ }, {
+ .name = "cio-das16jr/16",
+ .ai_maxdata = 0xffff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_16jr_16,
+ .size = 0x10,
+ .id = 0x00,
+ }, {
+ .name = "pc104-das16jr/16",
+ .ai_maxdata = 0xffff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_16jr_16,
+ .size = 0x10,
+ .id = 0x00,
+ }, {
+ .name = "das-1201",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 20000,
+ .ai_pg = das16_pg_none,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0x20,
+ }, {
+ .name = "das-1202",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_none,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0x20,
+ }, {
+ .name = "das-1401",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1601,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "das-1402",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "das-1601",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1601,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "das-1602",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1401/12",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 6250,
+ .ai_pg = das16_pg_1601,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1402/12",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 6250,
+ .ai_pg = das16_pg_1602,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1402/16",
+ .ai_maxdata = 0xffff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1601/12",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 6250,
+ .ai_pg = das16_pg_1601,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1602/12",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1602/16",
+ .ai_maxdata = 0xffff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das16/330",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 3030,
+ .ai_pg = das16_pg_16jr,
+ .size = 0x14,
+ .id = 0xf0,
+ },
+};
/* Period for timer interrupt in jiffies. It's a function
* to deal with possibility of dynamic HZ patches */
@@ -369,34 +451,155 @@ static inline int timer_period(void)
}
struct das16_private_struct {
- unsigned int ai_unipolar; /* unipolar flag */
- unsigned int ai_singleended; /* single ended flag */
- unsigned int clockbase; /* master clock speed in ns */
- volatile unsigned int control_state; /* dma, interrupt and trigger control bits */
- volatile unsigned long adc_byte_count; /* number of bytes remaining */
- /* divisor dividing master clock to get conversion frequency */
- unsigned int divisor1;
- /* divisor dividing master clock to get conversion frequency */
- unsigned int divisor2;
- unsigned int dma_chan; /* dma channel */
- uint16_t *dma_buffer[2];
- dma_addr_t dma_buffer_addr[2];
- unsigned int current_buffer;
- volatile unsigned int dma_transfer_size; /* target number of bytes to transfer per dma shot */
- /**
- * user-defined analog input and output ranges
- * defined from config options
- */
- struct comedi_lrange *user_ai_range_table;
- struct comedi_lrange *user_ao_range_table;
-
- struct timer_list timer; /* for timed interrupt */
- volatile short timer_running;
- volatile short timer_mode; /* true if using timer mode */
-
- unsigned long extra_iobase;
+ unsigned int clockbase;
+ unsigned int ctrl_reg;
+ unsigned long adc_byte_count;
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int dma_chan;
+ uint16_t *dma_buffer[2];
+ dma_addr_t dma_buffer_addr[2];
+ unsigned int current_buffer;
+ unsigned int dma_transfer_size;
+ struct comedi_lrange *user_ai_range_table;
+ struct comedi_lrange *user_ao_range_table;
+ struct timer_list timer;
+ short timer_running;
+ unsigned long extra_iobase;
+ unsigned int can_burst:1;
};
+static void das16_ai_enable(struct comedi_device *dev,
+ unsigned int mode, unsigned int src)
+{
+ struct das16_private_struct *devpriv = dev->private;
+
+ devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE |
+ DAS16_CTRL_DMAE |
+ DAS16_CTRL_PACING_MASK);
+ devpriv->ctrl_reg |= mode;
+
+ if (src == TRIG_EXT)
+ devpriv->ctrl_reg |= DAS16_CTRL_EXT_PACER;
+ else
+ devpriv->ctrl_reg |= DAS16_CTRL_INT_PACER;
+ outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
+}
+
+static void das16_ai_disable(struct comedi_device *dev)
+{
+ struct das16_private_struct *devpriv = dev->private;
+
+ /* disable interrupts, dma and pacer clocked conversions */
+ devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE |
+ DAS16_CTRL_DMAE |
+ DAS16_CTRL_PACING_MASK);
+ outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
+}
+
+/* the pc104-das16jr (at least) has problems if the dma
+ transfer is interrupted in the middle of transferring
+ a 16 bit sample, so this function takes care to get
+ an even transfer count after disabling dma
+ channel.
+*/
+static int disable_dma_on_even(struct comedi_device *dev)
+{
+ struct das16_private_struct *devpriv = dev->private;
+ int residue;
+ int i;
+ static const int disable_limit = 100;
+ static const int enable_timeout = 100;
+
+ disable_dma(devpriv->dma_chan);
+ residue = get_dma_residue(devpriv->dma_chan);
+ for (i = 0; i < disable_limit && (residue % 2); ++i) {
+ int j;
+ enable_dma(devpriv->dma_chan);
+ for (j = 0; j < enable_timeout; ++j) {
+ int new_residue;
+ udelay(2);
+ new_residue = get_dma_residue(devpriv->dma_chan);
+ if (new_residue != residue)
+ break;
+ }
+ disable_dma(devpriv->dma_chan);
+ residue = get_dma_residue(devpriv->dma_chan);
+ }
+ if (i == disable_limit) {
+ dev_err(dev->class_dev,
+ "failed to get an even dma transfer, could be trouble\n");
+ }
+ return residue;
+}
+
+static void das16_interrupt(struct comedi_device *dev)
+{
+ struct das16_private_struct *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned long spin_flags;
+ unsigned long dma_flags;
+ int num_bytes, residue;
+ int buffer_index;
+
+ spin_lock_irqsave(&dev->spinlock, spin_flags);
+ if (!(devpriv->ctrl_reg & DAS16_CTRL_DMAE)) {
+ spin_unlock_irqrestore(&dev->spinlock, spin_flags);
+ return;
+ }
+
+ dma_flags = claim_dma_lock();
+ clear_dma_ff(devpriv->dma_chan);
+ residue = disable_dma_on_even(dev);
+
+ /* figure out how many points to read */
+ if (residue > devpriv->dma_transfer_size) {
+ dev_err(dev->class_dev, "residue > transfer size!\n");
+ async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ num_bytes = 0;
+ } else
+ num_bytes = devpriv->dma_transfer_size - residue;
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ num_bytes >= devpriv->adc_byte_count) {
+ num_bytes = devpriv->adc_byte_count;
+ async->events |= COMEDI_CB_EOA;
+ }
+
+ buffer_index = devpriv->current_buffer;
+ devpriv->current_buffer = (devpriv->current_buffer + 1) % 2;
+ devpriv->adc_byte_count -= num_bytes;
+
+ /* re-enable dma */
+ if ((async->events & COMEDI_CB_EOA) == 0) {
+ set_dma_addr(devpriv->dma_chan,
+ devpriv->dma_buffer_addr[devpriv->current_buffer]);
+ set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
+ enable_dma(devpriv->dma_chan);
+ }
+ release_dma_lock(dma_flags);
+
+ spin_unlock_irqrestore(&dev->spinlock, spin_flags);
+
+ cfc_write_array_to_buffer(s,
+ devpriv->dma_buffer[buffer_index], num_bytes);
+
+ cfc_handle_events(dev, s);
+}
+
+static void das16_timer_interrupt(unsigned long arg)
+{
+ struct comedi_device *dev = (struct comedi_device *)arg;
+ struct das16_private_struct *devpriv = dev->private;
+
+ das16_interrupt(dev);
+
+ if (devpriv->timer_running)
+ mod_timer(&devpriv->timer, jiffies + timer_period());
+}
+
static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
@@ -411,15 +614,13 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
mask = TRIG_FOLLOW;
- /* if board supports burst mode */
- if (board->size > 0x400)
+ if (devpriv->can_burst)
mask |= TRIG_TIMER | TRIG_EXT;
err |= cfc_check_trigger_src(&cmd->scan_begin_src, mask);
tmp = cmd->convert_src;
mask = TRIG_TIMER | TRIG_EXT;
- /* if board supports burst mode */
- if (board->size > 0x400)
+ if (devpriv->can_burst)
mask |= TRIG_NOW;
err |= cfc_check_trigger_src(&cmd->convert_src, mask);
@@ -475,9 +676,9 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int tmp = cmd->scan_begin_arg;
/* set divisors, correct timing arguments */
i8253_cascade_ns_to_timer_2div(devpriv->clockbase,
- &(devpriv->divisor1),
- &(devpriv->divisor2),
- &(cmd->scan_begin_arg),
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
err += (tmp != cmd->scan_begin_arg);
}
@@ -485,9 +686,9 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int tmp = cmd->convert_arg;
/* set divisors, correct timing arguments */
i8253_cascade_ns_to_timer_2div(devpriv->clockbase,
- &(devpriv->divisor1),
- &(devpriv->divisor2),
- &(cmd->convert_arg),
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
err += (tmp != cmd->convert_arg);
}
@@ -501,16 +702,13 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
for (i = 1; i < cmd->chanlist_len; i++) {
if (CR_CHAN(cmd->chanlist[i]) !=
(start_chan + i) % s->n_chan) {
- comedi_error(dev,
- "entries in chanlist must be "
- "consecutive channels, "
- "counting upwards\n");
+ dev_err(dev->class_dev,
+ "entries in chanlist must be consecutive channels, counting upwards\n");
err++;
}
if (CR_RANGE(cmd->chanlist[i]) != gain) {
- comedi_error(dev,
- "entries in chanlist must all "
- "have the same gain\n");
+ dev_err(dev->class_dev,
+ "entries in chanlist must all have the same gain\n");
err++;
}
}
@@ -521,61 +719,21 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
}
-/* utility function that suggests a dma transfer size in bytes */
-static unsigned int das16_suggest_transfer_size(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct das16_private_struct *devpriv = dev->private;
- unsigned int size;
- unsigned int freq;
-
- /* if we are using timer interrupt, we don't care how long it
- * will take to complete transfer since it will be interrupted
- * by timer interrupt */
- if (devpriv->timer_mode)
- return DAS16_DMA_SIZE;
-
- /* otherwise, we are relying on dma terminal count interrupt,
- * so pick a reasonable size */
- if (cmd->convert_src == TRIG_TIMER)
- freq = 1000000000 / cmd->convert_arg;
- else if (cmd->scan_begin_src == TRIG_TIMER)
- freq = (1000000000 / cmd->scan_begin_arg) * cmd->chanlist_len;
- /* return some default value */
- else
- freq = 0xffffffff;
-
- if (cmd->flags & TRIG_WAKE_EOS) {
- size = sample_size * cmd->chanlist_len;
- } else {
- /* make buffer fill in no more than 1/3 second */
- size = (freq / 3) * sample_size;
- }
-
- /* set a minimum and maximum size allowed */
- if (size > DAS16_DMA_SIZE)
- size = DAS16_DMA_SIZE - DAS16_DMA_SIZE % sample_size;
- else if (size < sample_size)
- size = sample_size;
-
- if (cmd->stop_src == TRIG_COUNT && size > devpriv->adc_byte_count)
- size = devpriv->adc_byte_count;
-
- return size;
-}
-
static unsigned int das16_set_pacer(struct comedi_device *dev, unsigned int ns,
int rounding_flags)
{
struct das16_private_struct *devpriv = dev->private;
+ unsigned long timer_base = dev->iobase + DAS16_TIMER_BASE_REG;
- i8253_cascade_ns_to_timer_2div(devpriv->clockbase, &(devpriv->divisor1),
- &(devpriv->divisor2), &ns,
+ i8253_cascade_ns_to_timer_2div(devpriv->clockbase,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &ns,
rounding_flags & TRIG_ROUND_MASK);
/* Write the values of ctr1 and ctr2 into counters 1 and 2 */
- i8254_load(dev->iobase + DAS16_CNTR0_DATA, 0, 1, devpriv->divisor1, 2);
- i8254_load(dev->iobase + DAS16_CNTR0_DATA, 0, 2, devpriv->divisor2, 2);
+ i8254_load(timer_base, 0, 1, devpriv->divisor1, 2);
+ i8254_load(timer_base, 0, 2, devpriv->divisor2, 2);
return ns;
}
@@ -590,30 +748,22 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned long flags;
int range;
- if (devpriv->dma_chan == 0 || (dev->irq == 0
- && devpriv->timer_mode == 0)) {
- comedi_error(dev,
- "irq (or use of 'timer mode') dma required to "
- "execute comedi_cmd");
- return -1;
- }
if (cmd->flags & TRIG_RT) {
- comedi_error(dev, "isa dma transfers cannot be performed with "
- "TRIG_RT, aborting");
+ dev_err(dev->class_dev,
+ "isa dma transfers cannot be performed with TRIG_RT, aborting\n");
return -1;
}
devpriv->adc_byte_count =
cmd->stop_arg * cmd->chanlist_len * sizeof(uint16_t);
- /* disable conversions for das1600 mode */
- if (board->size > 0x400)
- outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV);
+ if (devpriv->can_burst)
+ outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV_REG);
/* set scan limits */
byte = CR_CHAN(cmd->chanlist[0]);
byte |= CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]) << 4;
- outb(byte, dev->iobase + DAS16_MUX);
+ outb(byte, dev->iobase + DAS16_MUX_REG);
/* set gain (this is also burst rate register but according to
* computer boards manual, burst rate does nothing, even on
@@ -621,28 +771,27 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
if (board->ai_pg != das16_pg_none) {
range = CR_RANGE(cmd->chanlist[0]);
outb((das16_gainlists[board->ai_pg])[range],
- dev->iobase + DAS16_GAIN);
+ dev->iobase + DAS16_GAIN_REG);
}
/* set counter mode and counts */
cmd->convert_arg =
das16_set_pacer(dev, cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
- DEBUG_PRINT("pacer period: %d ns\n", cmd->convert_arg);
/* enable counters */
byte = 0;
- /* Enable burst mode if appropriate. */
- if (board->size > 0x400) {
+ if (devpriv->can_burst) {
if (cmd->convert_src == TRIG_NOW) {
- outb(DAS1600_BURST_VAL, dev->iobase + DAS1600_BURST);
+ outb(DAS1600_BURST_VAL,
+ dev->iobase + DAS1600_BURST_REG);
/* set burst length */
- byte |= BURST_LEN_BITS(cmd->chanlist_len - 1);
+ byte |= DAS16_PACER_BURST_LEN(cmd->chanlist_len - 1);
} else {
- outb(0, dev->iobase + DAS1600_BURST);
+ outb(0, dev->iobase + DAS1600_BURST_REG);
}
}
- outb(byte, dev->iobase + DAS16_PACER);
+ outb(byte, dev->iobase + DAS16_PACER_REG);
/* set up dma transfer */
flags = claim_dma_lock();
@@ -653,465 +802,220 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->current_buffer = 0;
set_dma_addr(devpriv->dma_chan,
devpriv->dma_buffer_addr[devpriv->current_buffer]);
- /* set appropriate size of transfer */
- devpriv->dma_transfer_size = das16_suggest_transfer_size(dev, cmd);
+ devpriv->dma_transfer_size = DAS16_DMA_SIZE;
set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
enable_dma(devpriv->dma_chan);
release_dma_lock(flags);
/* set up interrupt */
- if (devpriv->timer_mode) {
- devpriv->timer_running = 1;
- devpriv->timer.expires = jiffies + timer_period();
- add_timer(&devpriv->timer);
- devpriv->control_state &= ~DAS16_INTE;
- } else {
- /* clear interrupt bit */
- outb(0x00, dev->iobase + DAS16_STATUS);
- /* enable interrupts */
- devpriv->control_state |= DAS16_INTE;
- }
- devpriv->control_state |= DMA_ENABLE;
- devpriv->control_state &= ~PACING_MASK;
- if (cmd->convert_src == TRIG_EXT)
- devpriv->control_state |= EXT_PACER;
- else
- devpriv->control_state |= INT_PACER;
- outb(devpriv->control_state, dev->iobase + DAS16_CONTROL);
+ devpriv->timer_running = 1;
+ devpriv->timer.expires = jiffies + timer_period();
+ add_timer(&devpriv->timer);
- /* Enable conversions if using das1600 mode */
- if (board->size > 0x400)
- outb(0, dev->iobase + DAS1600_CONV);
+ das16_ai_enable(dev, DAS16_CTRL_DMAE, cmd->convert_src);
+ if (devpriv->can_burst)
+ outb(0, dev->iobase + DAS1600_CONV_REG);
return 0;
}
static int das16_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
- const struct das16_board *board = comedi_board(dev);
struct das16_private_struct *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
- /* disable interrupts, dma and pacer clocked conversions */
- devpriv->control_state &= ~DAS16_INTE & ~PACING_MASK & ~DMA_ENABLE;
- outb(devpriv->control_state, dev->iobase + DAS16_CONTROL);
- if (devpriv->dma_chan)
- disable_dma(devpriv->dma_chan);
+
+ das16_ai_disable(dev);
+ disable_dma(devpriv->dma_chan);
/* disable SW timer */
- if (devpriv->timer_mode && devpriv->timer_running) {
+ if (devpriv->timer_running) {
devpriv->timer_running = 0;
del_timer(&devpriv->timer);
}
- /* disable burst mode */
- if (board->size > 0x400)
- outb(0, dev->iobase + DAS1600_BURST);
-
+ if (devpriv->can_burst)
+ outb(0, dev->iobase + DAS1600_BURST_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
-static void das16_reset(struct comedi_device *dev)
+static void das16_ai_munge(struct comedi_device *dev,
+ struct comedi_subdevice *s, void *array,
+ unsigned int num_bytes,
+ unsigned int start_chan_index)
{
- outb(0, dev->iobase + DAS16_STATUS);
- outb(0, dev->iobase + DAS16_CONTROL);
- outb(0, dev->iobase + DAS16_PACER);
- outb(0, dev->iobase + DAS16_CNTR_CONTROL);
+ unsigned int i, num_samples = num_bytes / sizeof(short);
+ short *data = array;
+
+ for (i = 0; i < num_samples; i++) {
+ data[i] = le16_to_cpu(data[i]);
+ if (s->maxdata == 0x0fff)
+ data[i] >>= 4;
+ data[i] &= s->maxdata;
+ }
}
-static int das16_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das16_ai_wait_for_conv(struct comedi_device *dev,
+ unsigned int timeout)
+{
+ unsigned int status;
+ int i;
+
+ for (i = 0; i < timeout; i++) {
+ status = inb(dev->iobase + DAS16_STATUS_REG);
+ if (!(status & DAS16_STATUS_BUSY))
+ return 0;
+ }
+ return -ETIME;
+}
+
+static int das16_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct das16_board *board = comedi_board(dev);
- struct das16_private_struct *devpriv = dev->private;
- int i, n;
- int range;
- int chan;
- int msb, lsb;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int ret;
+ int i;
- /* disable interrupts and pacing */
- devpriv->control_state &= ~DAS16_INTE & ~DMA_ENABLE & ~PACING_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16_CONTROL);
+ das16_ai_disable(dev);
/* set multiplexer */
- chan = CR_CHAN(insn->chanspec);
- chan |= CR_CHAN(insn->chanspec) << 4;
- outb(chan, dev->iobase + DAS16_MUX);
+ outb(chan | (chan << 4), dev->iobase + DAS16_MUX_REG);
/* set gain */
if (board->ai_pg != das16_pg_none) {
- range = CR_RANGE(insn->chanspec);
outb((das16_gainlists[board->ai_pg])[range],
- dev->iobase + DAS16_GAIN);
+ dev->iobase + DAS16_GAIN_REG);
}
- for (n = 0; n < insn->n; n++) {
+ for (i = 0; i < insn->n; i++) {
/* trigger conversion */
- outb_p(0, dev->iobase + DAS16_TRIG);
-
- for (i = 0; i < DAS16_TIMEOUT; i++) {
- if (!(inb(dev->iobase + DAS16_STATUS) & BUSY))
- break;
- }
- if (i == DAS16_TIMEOUT) {
- printk("das16: timeout\n");
- return -ETIME;
- }
- msb = inb(dev->iobase + DAS16_AI_MSB);
- lsb = inb(dev->iobase + DAS16_AI_LSB);
- if (board->ai_nbits == 12)
- data[n] = ((lsb >> 4) & 0xf) | (msb << 4);
- else
- data[n] = lsb | (msb << 8);
-
- }
-
- return n;
-}
+ outb_p(0, dev->iobase + DAS16_TRIG_REG);
-static int das16_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int bits;
-
- bits = inb(dev->iobase + DAS16_DIO) & 0xf;
- data[1] = bits;
- data[0] = 0;
-
- return insn->n;
-}
-
-static int das16_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int wbits;
+ ret = das16_ai_wait_for_conv(dev, 1000);
+ if (ret)
+ return ret;
- /* only set bits that have been masked */
- data[0] &= 0xf;
- wbits = s->state;
- /* zero bits that have been masked */
- wbits &= ~data[0];
- /* set masked bits */
- wbits |= data[0] & data[1];
- s->state = wbits;
- data[1] = wbits;
+ val = inb(dev->iobase + DAS16_AI_MSB_REG) << 8;
+ val |= inb(dev->iobase + DAS16_AI_LSB_REG);
+ if (s->maxdata == 0x0fff)
+ val >>= 4;
+ val &= s->maxdata;
- outb(s->state, dev->iobase + DAS16_DIO);
+ data[i] = val;
+ }
return insn->n;
}
-static int das16_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das16_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const struct das16_board *board = comedi_board(dev);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
int i;
- int lsb, msb;
- int chan;
-
- chan = CR_CHAN(insn->chanspec);
for (i = 0; i < insn->n; i++) {
- if (board->ao_nbits == 12) {
- lsb = (data[i] << 4) & 0xff;
- msb = (data[i] >> 4) & 0xff;
- } else {
- lsb = data[i] & 0xff;
- msb = (data[i] >> 8) & 0xff;
- }
- outb(lsb, dev->iobase + DAS16_AO_LSB(chan));
- outb(msb, dev->iobase + DAS16_AO_MSB(chan));
- }
-
- return i;
-}
-
-/* the pc104-das16jr (at least) has problems if the dma
- transfer is interrupted in the middle of transferring
- a 16 bit sample, so this function takes care to get
- an even transfer count after disabling dma
- channel.
-*/
-static int disable_dma_on_even(struct comedi_device *dev)
-{
- struct das16_private_struct *devpriv = dev->private;
- int residue;
- int i;
- static const int disable_limit = 100;
- static const int enable_timeout = 100;
+ val = data[i];
+ val <<= 4;
- disable_dma(devpriv->dma_chan);
- residue = get_dma_residue(devpriv->dma_chan);
- for (i = 0; i < disable_limit && (residue % 2); ++i) {
- int j;
- enable_dma(devpriv->dma_chan);
- for (j = 0; j < enable_timeout; ++j) {
- int new_residue;
- udelay(2);
- new_residue = get_dma_residue(devpriv->dma_chan);
- if (new_residue != residue)
- break;
- }
- disable_dma(devpriv->dma_chan);
- residue = get_dma_residue(devpriv->dma_chan);
- }
- if (i == disable_limit) {
- comedi_error(dev, "failed to get an even dma transfer, "
- "could be trouble.");
+ outb(val & 0xff, dev->iobase + DAS16_AO_LSB_REG(chan));
+ outb((val >> 8) & 0xff, dev->iobase + DAS16_AO_MSB_REG(chan));
}
- return residue;
+
+ return insn->n;
}
-static void das16_interrupt(struct comedi_device *dev)
+static int das16_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const struct das16_board *board = comedi_board(dev);
- struct das16_private_struct *devpriv = dev->private;
- unsigned long dma_flags, spin_flags;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
- int num_bytes, residue;
- int buffer_index;
-
- if (!dev->attached) {
- comedi_error(dev, "premature interrupt");
- return;
- }
- /* initialize async here to make sure it is not NULL */
- async = s->async;
- cmd = &async->cmd;
-
- if (devpriv->dma_chan == 0) {
- comedi_error(dev, "interrupt with no dma channel?");
- return;
- }
-
- spin_lock_irqsave(&dev->spinlock, spin_flags);
- if ((devpriv->control_state & DMA_ENABLE) == 0) {
- spin_unlock_irqrestore(&dev->spinlock, spin_flags);
- DEBUG_PRINT("interrupt while dma disabled?\n");
- return;
- }
-
- dma_flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma_chan);
- residue = disable_dma_on_even(dev);
-
- /* figure out how many points to read */
- if (residue > devpriv->dma_transfer_size) {
- comedi_error(dev, "residue > transfer size!\n");
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- num_bytes = 0;
- } else
- num_bytes = devpriv->dma_transfer_size - residue;
+ data[1] = inb(dev->iobase + DAS16_DIO_REG) & 0xf;
- if (cmd->stop_src == TRIG_COUNT &&
- num_bytes >= devpriv->adc_byte_count) {
- num_bytes = devpriv->adc_byte_count;
- async->events |= COMEDI_CB_EOA;
- }
-
- buffer_index = devpriv->current_buffer;
- devpriv->current_buffer = (devpriv->current_buffer + 1) % 2;
- devpriv->adc_byte_count -= num_bytes;
-
- /* figure out how many bytes for next transfer */
- if (cmd->stop_src == TRIG_COUNT && devpriv->timer_mode == 0 &&
- devpriv->dma_transfer_size > devpriv->adc_byte_count)
- devpriv->dma_transfer_size = devpriv->adc_byte_count;
-
- /* re-enable dma */
- if ((async->events & COMEDI_CB_EOA) == 0) {
- set_dma_addr(devpriv->dma_chan,
- devpriv->dma_buffer_addr[devpriv->current_buffer]);
- set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma_chan);
- /* reenable conversions for das1600 mode, (stupid hardware) */
- if (board->size > 0x400 && devpriv->timer_mode == 0)
- outb(0x00, dev->iobase + DAS1600_CONV);
-
- }
- release_dma_lock(dma_flags);
-
- spin_unlock_irqrestore(&dev->spinlock, spin_flags);
-
- cfc_write_array_to_buffer(s,
- devpriv->dma_buffer[buffer_index], num_bytes);
-
- cfc_handle_events(dev, s);
+ return insn->n;
}
-static irqreturn_t das16_dma_interrupt(int irq, void *d)
+static int das16_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int status;
- struct comedi_device *dev = d;
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
- status = inb(dev->iobase + DAS16_STATUS);
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
- if ((status & DAS16_INT) == 0) {
- DEBUG_PRINT("spurious interrupt\n");
- return IRQ_NONE;
+ outb(s->state, dev->iobase + DAS16_DIO_REG);
}
- /* clear interrupt */
- outb(0x00, dev->iobase + DAS16_STATUS);
- das16_interrupt(dev);
- return IRQ_HANDLED;
-}
-
-static void das16_timer_interrupt(unsigned long arg)
-{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct das16_private_struct *devpriv = dev->private;
-
- das16_interrupt(dev);
-
- if (devpriv->timer_running)
- mod_timer(&devpriv->timer, jiffies + timer_period());
-}
+ data[1] = s->state;
-static void reg_dump(struct comedi_device *dev)
-{
- DEBUG_PRINT("********DAS1600 REGISTER DUMP********\n");
- DEBUG_PRINT("DAS16_MUX: %x\n", inb(dev->iobase + DAS16_MUX));
- DEBUG_PRINT("DAS16_DIO: %x\n", inb(dev->iobase + DAS16_DIO));
- DEBUG_PRINT("DAS16_STATUS: %x\n", inb(dev->iobase + DAS16_STATUS));
- DEBUG_PRINT("DAS16_CONTROL: %x\n", inb(dev->iobase + DAS16_CONTROL));
- DEBUG_PRINT("DAS16_PACER: %x\n", inb(dev->iobase + DAS16_PACER));
- DEBUG_PRINT("DAS16_GAIN: %x\n", inb(dev->iobase + DAS16_GAIN));
- DEBUG_PRINT("DAS16_CNTR_CONTROL: %x\n",
- inb(dev->iobase + DAS16_CNTR_CONTROL));
- DEBUG_PRINT("DAS1600_CONV: %x\n", inb(dev->iobase + DAS1600_CONV));
- DEBUG_PRINT("DAS1600_BURST: %x\n", inb(dev->iobase + DAS1600_BURST));
- DEBUG_PRINT("DAS1600_ENABLE: %x\n", inb(dev->iobase + DAS1600_ENABLE));
- DEBUG_PRINT("DAS1600_STATUS_B: %x\n",
- inb(dev->iobase + DAS1600_STATUS_B));
+ return insn->n;
}
static int das16_probe(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct das16_board *board = comedi_board(dev);
- struct das16_private_struct *devpriv = dev->private;
- int status;
int diobits;
- /* status is available on all boards */
-
- status = inb(dev->iobase + DAS16_STATUS);
-
- if ((status & UNIPOLAR))
- devpriv->ai_unipolar = 1;
- else
- devpriv->ai_unipolar = 0;
-
-
- if ((status & DAS16_MUXBIT))
- devpriv->ai_singleended = 1;
- else
- devpriv->ai_singleended = 0;
-
-
/* diobits indicates boards */
-
- diobits = inb(dev->iobase + DAS16_DIO) & 0xf0;
-
- printk(KERN_INFO " id bits are 0x%02x\n", diobits);
+ diobits = inb(dev->iobase + DAS16_DIO_REG) & 0xf0;
if (board->id != diobits) {
- printk(KERN_INFO " requested board's id bits are 0x%x (ignore)\n",
- board->id);
- }
-
- return 0;
-}
-
-static int das1600_mode_detect(struct comedi_device *dev)
-{
- struct das16_private_struct *devpriv = dev->private;
- int status = 0;
-
- status = inb(dev->iobase + DAS1600_STATUS_B);
-
- if (status & DAS1600_CLK_10MHZ) {
- devpriv->clockbase = 100;
- printk(KERN_INFO " 10MHz pacer clock\n");
- } else {
- devpriv->clockbase = 1000;
- printk(KERN_INFO " 1MHz pacer clock\n");
+ dev_err(dev->class_dev,
+ "requested board's id bits are incorrect (0x%x != 0x%x)\n",
+ board->id, diobits);
+ return -EINVAL;
}
- reg_dump(dev);
-
return 0;
}
-static void das16_ai_munge(struct comedi_device *dev,
- struct comedi_subdevice *s, void *array,
- unsigned int num_bytes,
- unsigned int start_chan_index)
+static void das16_reset(struct comedi_device *dev)
{
- const struct das16_board *board = comedi_board(dev);
- unsigned int i, num_samples = num_bytes / sizeof(short);
- short *data = array;
-
- for (i = 0; i < num_samples; i++) {
- data[i] = le16_to_cpu(data[i]);
- if (board->ai_nbits == 12)
- data[i] = (data[i] >> 4) & 0xfff;
-
- }
+ outb(0, dev->iobase + DAS16_STATUS_REG);
+ outb(0, dev->iobase + DAS16_CTRL_REG);
+ outb(0, dev->iobase + DAS16_PACER_REG);
+ outb(0, dev->iobase + DAS16_TIMER_BASE_REG + i8254_control_reg);
}
-/*
- *
- * Options list:
- * 0 I/O base
- * 1 IRQ
- * 2 DMA
- * 3 Clock speed (in MHz)
- */
static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct das16_board *board = comedi_board(dev);
struct das16_private_struct *devpriv;
struct comedi_subdevice *s;
+ struct comedi_lrange *lrange;
+ struct comedi_krange *krange;
+ unsigned int dma_chan = it->options[2];
+ unsigned int status;
int ret;
- unsigned int irq;
- unsigned int dma_chan;
- int timer_mode;
- unsigned long flags;
- struct comedi_krange *user_ai_range, *user_ao_range;
-
-#if 0
- irq = it->options[1];
- timer_mode = it->options[8];
-#endif
- /* always use time_mode since using irq can drop samples while
- * waiting for dma done interrupt (due to hardware limitations) */
- irq = 0;
- timer_mode = 1;
- if (timer_mode)
- irq = 0;
/* check that clock setting is valid */
if (it->options[3]) {
if (it->options[3] != 0 &&
it->options[3] != 1 && it->options[3] != 10) {
- printk
- ("\n Invalid option. Master clock must be set "
- "to 1 or 10 (MHz)\n");
+ dev_err(dev->class_dev,
+ "Invalid option. Master clock must be set to 1 or 10 (MHz)\n");
return -EINVAL;
}
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
if (board->size < 0x400) {
ret = comedi_request_region(dev, it->options[0], board->size);
@@ -1127,207 +1031,183 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
devpriv->extra_iobase = dev->iobase + 0x400;
+ devpriv->can_burst = 1;
}
/* probe id bits to make sure they are consistent */
- if (das16_probe(dev, it)) {
- printk(KERN_ERR " id bits do not match selected board, aborting\n");
+ if (das16_probe(dev, it))
return -EINVAL;
- }
/* get master clock speed */
- if (board->size < 0x400) {
+ if (devpriv->can_burst) {
+ status = inb(dev->iobase + DAS1600_STATUS_REG);
+
+ if (status & DAS1600_STATUS_CLK_10MHZ)
+ devpriv->clockbase = 100;
+ else
+ devpriv->clockbase = 1000;
+ } else {
if (it->options[3])
devpriv->clockbase = 1000 / it->options[3];
else
devpriv->clockbase = 1000; /* 1 MHz default */
- } else {
- das1600_mode_detect(dev);
- }
-
- /* now for the irq */
- if (irq > 1 && irq < 8) {
- ret = request_irq(irq, das16_dma_interrupt, 0,
- dev->board_name, dev);
-
- if (ret < 0)
- return ret;
- dev->irq = irq;
- printk(KERN_INFO " ( irq = %u )", irq);
- } else if (irq == 0) {
- printk(" ( no irq )");
- } else {
- printk(" invalid irq\n");
- return -EINVAL;
}
- /* initialize dma */
- dma_chan = it->options[2];
+ /* initialize dma */
if (dma_chan == 1 || dma_chan == 3) {
- /* allocate dma buffers */
+ unsigned long flags;
int i;
- for (i = 0; i < 2; i++) {
- devpriv->dma_buffer[i] = pci_alloc_consistent(
- NULL, DAS16_DMA_SIZE,
- &devpriv->dma_buffer_addr[i]);
- if (devpriv->dma_buffer[i] == NULL)
- return -ENOMEM;
- }
if (request_dma(dma_chan, dev->board_name)) {
- printk(KERN_ERR " failed to allocate dma channel %i\n",
- dma_chan);
+ dev_err(dev->class_dev,
+ "failed to request dma channel %i\n",
+ dma_chan);
return -EINVAL;
}
devpriv->dma_chan = dma_chan;
+
+ /* allocate dma buffers */
+ for (i = 0; i < 2; i++) {
+ void *p;
+
+ p = pci_alloc_consistent(NULL, DAS16_DMA_SIZE,
+ &devpriv->dma_buffer_addr[i]);
+ if (!p)
+ return -ENOMEM;
+ devpriv->dma_buffer[i] = p;
+ }
+
flags = claim_dma_lock();
disable_dma(devpriv->dma_chan);
set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
release_dma_lock(flags);
- printk(KERN_INFO " ( dma = %u)\n", dma_chan);
- } else if (dma_chan == 0) {
- printk(KERN_INFO " ( no dma )\n");
- } else {
- printk(KERN_ERR " invalid dma channel\n");
- return -EINVAL;
+
+ init_timer(&devpriv->timer);
+ devpriv->timer.function = das16_timer_interrupt;
+ devpriv->timer.data = (unsigned long)dev;
}
- /* get any user-defined input range */
+ /* get any user-defined input range */
if (board->ai_pg == das16_pg_none &&
(it->options[4] || it->options[5])) {
- /* allocate single-range range table */
- devpriv->user_ai_range_table =
- kmalloc(sizeof(struct comedi_lrange) +
- sizeof(struct comedi_krange), GFP_KERNEL);
- /* initialize ai range */
- devpriv->user_ai_range_table->length = 1;
- user_ai_range = devpriv->user_ai_range_table->range;
- user_ai_range->min = it->options[4];
- user_ai_range->max = it->options[5];
- user_ai_range->flags = UNIT_volt;
- }
- /* get any user-defined output range */
- if (it->options[6] || it->options[7]) {
- /* allocate single-range range table */
- devpriv->user_ao_range_table =
- kmalloc(sizeof(struct comedi_lrange) +
- sizeof(struct comedi_krange), GFP_KERNEL);
- /* initialize ao range */
- devpriv->user_ao_range_table->length = 1;
- user_ao_range = devpriv->user_ao_range_table->range;
- user_ao_range->min = it->options[6];
- user_ao_range->max = it->options[7];
- user_ao_range->flags = UNIT_volt;
+ /* allocate single-range range table */
+ lrange = kzalloc(sizeof(*lrange) + sizeof(*krange), GFP_KERNEL);
+ if (!lrange)
+ return -ENOMEM;
+
+ /* initialize ai range */
+ devpriv->user_ai_range_table = lrange;
+ lrange->length = 1;
+ krange = devpriv->user_ai_range_table->range;
+ krange->min = it->options[4];
+ krange->max = it->options[5];
+ krange->flags = UNIT_volt;
}
- if (timer_mode) {
- init_timer(&(devpriv->timer));
- devpriv->timer.function = das16_timer_interrupt;
- devpriv->timer.data = (unsigned long)dev;
+ /* get any user-defined output range */
+ if (it->options[6] || it->options[7]) {
+ /* allocate single-range range table */
+ lrange = kzalloc(sizeof(*lrange) + sizeof(*krange), GFP_KERNEL);
+ if (!lrange)
+ return -ENOMEM;
+
+ /* initialize ao range */
+ devpriv->user_ao_range_table = lrange;
+ lrange->length = 1;
+ krange = devpriv->user_ao_range_table->range;
+ krange->min = it->options[6];
+ krange->max = it->options[7];
+ krange->flags = UNIT_volt;
}
- devpriv->timer_mode = timer_mode ? 1 : 0;
- ret = comedi_alloc_subdevices(dev, 5);
+ ret = comedi_alloc_subdevices(dev, 4 + board->has_8255);
if (ret)
return ret;
+ status = inb(dev->iobase + DAS16_STATUS_REG);
+
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- dev->read_subdev = s;
- /* ai */
- if (board->ai) {
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- if (devpriv->ai_singleended) {
- s->n_chan = 16;
- s->len_chanlist = 16;
- s->subdev_flags |= SDF_GROUND;
- } else {
- s->n_chan = 8;
- s->len_chanlist = 8;
- s->subdev_flags |= SDF_DIFF;
- }
- s->maxdata = (1 << board->ai_nbits) - 1;
- if (devpriv->user_ai_range_table) { /* user defined ai range */
- s->range_table = devpriv->user_ai_range_table;
- } else if (devpriv->ai_unipolar) {
- s->range_table = das16_ai_uni_lranges[board->ai_pg];
- } else {
- s->range_table = das16_ai_bip_lranges[board->ai_pg];
- }
- s->insn_read = board->ai;
- s->do_cmdtest = das16_cmd_test;
- s->do_cmd = das16_cmd_exec;
- s->cancel = das16_cancel;
- s->munge = das16_ai_munge;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE;
+ if (status & DAS16_STATUS_MUXBIT) {
+ s->subdev_flags |= SDF_GROUND;
+ s->n_chan = 16;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->subdev_flags |= SDF_DIFF;
+ s->n_chan = 8;
}
-
- s = &dev->subdevices[1];
- /* ao */
- if (board->ao) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = (1 << board->ao_nbits) - 1;
- /* user defined ao range */
- if (devpriv->user_ao_range_table)
- s->range_table = devpriv->user_ao_range_table;
- else
- s->range_table = &range_unknown;
-
- s->insn_write = board->ao;
+ s->len_chanlist = s->n_chan;
+ s->maxdata = board->ai_maxdata;
+ if (devpriv->user_ai_range_table) { /* user defined ai range */
+ s->range_table = devpriv->user_ai_range_table;
+ } else if (status & DAS16_STATUS_UNIPOLAR) {
+ s->range_table = das16_ai_uni_lranges[board->ai_pg];
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->range_table = das16_ai_bip_lranges[board->ai_pg];
}
-
- s = &dev->subdevices[2];
- /* di */
- if (board->di) {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = board->di;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->insn_read = das16_ai_insn_read;
+ if (devpriv->dma_chan) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->do_cmdtest = das16_cmd_test;
+ s->do_cmd = das16_cmd_exec;
+ s->cancel = das16_cancel;
+ s->munge = das16_ai_munge;
}
- s = &dev->subdevices[3];
- /* do */
- if (board->do_) {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = board->do_;
- /* initialize digital output lines */
- outb(s->state, dev->iobase + DAS16_DIO);
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ if (board->has_ao) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = devpriv->user_ao_range_table;
+ s->insn_write = das16_ao_insn_write;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
- s = &dev->subdevices[4];
- /* 8255 */
- if (board->i8255_offset != 0) {
- subdev_8255_init(dev, s, NULL, (dev->iobase +
- board->i8255_offset));
- } else {
- s->type = COMEDI_SUBD_UNUSED;
+ /* Digital Input subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das16_di_insn_bits;
+
+ /* Digital Output subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das16_do_insn_bits;
+
+ /* initialize digital output lines */
+ outb(s->state, dev->iobase + DAS16_DIO_REG);
+
+ /* 8255 Digital I/O subdevice */
+ if (board->has_8255) {
+ s = &dev->subdevices[4];
+ ret = subdev_8255_init(dev, s, NULL,
+ dev->iobase + board->i8255_offset);
+ if (ret)
+ return ret;
}
das16_reset(dev);
/* set the interrupt level */
- devpriv->control_state = DAS16_IRQ(dev->irq);
- outb(devpriv->control_state, dev->iobase + DAS16_CONTROL);
-
- /* turn on das1600 mode if available */
- if (board->size > 0x400) {
- outb(DAS1600_ENABLE_VAL, dev->iobase + DAS1600_ENABLE);
- outb(0, dev->iobase + DAS1600_CONV);
- outb(0, dev->iobase + DAS1600_BURST);
+ devpriv->ctrl_reg = DAS16_CTRL_IRQ(dev->irq);
+ outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
+
+ if (devpriv->can_burst) {
+ outb(DAS1600_ENABLE_VAL, dev->iobase + DAS1600_ENABLE_REG);
+ outb(0, dev->iobase + DAS1600_CONV_REG);
+ outb(0, dev->iobase + DAS1600_BURST_REG);
}
return 0;
@@ -1337,11 +1217,12 @@ static void das16_detach(struct comedi_device *dev)
{
const struct das16_board *board = comedi_board(dev);
struct das16_private_struct *devpriv = dev->private;
+ int i;
- das16_reset(dev);
- comedi_spriv_free(dev, 4);
if (devpriv) {
- int i;
+ if (dev->iobase)
+ das16_reset(dev);
+
for (i = 0; i < 2; i++) {
if (devpriv->dma_buffer[i])
pci_free_consistent(NULL, DAS16_DMA_SIZE,
@@ -1353,312 +1234,15 @@ static void das16_detach(struct comedi_device *dev)
free_dma(devpriv->dma_chan);
kfree(devpriv->user_ai_range_table);
kfree(devpriv->user_ao_range_table);
+
+ if (devpriv->extra_iobase)
+ release_region(devpriv->extra_iobase,
+ board->size & 0x3ff);
}
- if (devpriv->extra_iobase)
- release_region(devpriv->extra_iobase, board->size & 0x3ff);
+
comedi_legacy_detach(dev);
}
-static const struct das16_board das16_boards[] = {
- {
- .name = "das-16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 15000,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x00,
- }, {
- .name = "das-16g",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 15000,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x00,
- }, {
- .name = "das-16f",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 8500,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x00,
- }, {
- .name = "cio-das16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 20000,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x80,
- }, {
- .name = "cio-das16/f",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x80,
- }, {
- .name = "cio-das16/jr",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 7692,
- .ai_pg = das16_pg_16jr,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "pc104-das16jr",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 3300,
- .ai_pg = das16_pg_16jr,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "cio-das16jr/16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 16,
- .ai_speed = 10000,
- .ai_pg = das16_pg_16jr_16,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "pc104-das16jr/16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 16,
- .ai_speed = 10000,
- .ai_pg = das16_pg_16jr_16,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "das-1201",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 20000,
- .ai_pg = das16_pg_none,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0x20,
- }, {
- .name = "das-1202",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_none,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0x20,
- }, {
- .name = "das-1401",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1601,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "das-1402",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "das-1601",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1601,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "das-1602",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1401/12",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 6250,
- .ai_pg = das16_pg_1601,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1402/12",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 6250,
- .ai_pg = das16_pg_1602,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1402/16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 16,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1601/12",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 6250,
- .ai_pg = das16_pg_1601,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1602/12",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1602/16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 16,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das16/330",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 3030,
- .ai_pg = das16_pg_16jr,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0xf0,
- },
-};
-
static struct comedi_driver das16_driver = {
.driver_name = "das16",
.module = THIS_MODULE,
@@ -1671,5 +1255,5 @@ static struct comedi_driver das16_driver = {
module_comedi_driver(das16_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for DAS16 compatible boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index 9cb9c3b..b943c44 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -17,12 +17,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************
*/
/*
Driver: das16m1
@@ -58,7 +52,7 @@ Options:
irq can be omitted, although the cmd interface will not work without it.
*/
-#include <linux/ioport.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
@@ -573,10 +567,9 @@ static int das16m1_attach(struct comedi_device *dev,
int ret;
unsigned int irq;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], DAS16M1_SIZE);
if (ret)
@@ -672,7 +665,6 @@ static void das16m1_detach(struct comedi_device *dev)
{
struct das16m1_private_struct *devpriv = dev->private;
- comedi_spriv_free(dev, 3);
if (devpriv && devpriv->extra_iobase)
release_region(devpriv->extra_iobase, DAS16M1_SIZE2);
comedi_legacy_detach(dev);
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index abf7638..5b30029 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -15,12 +15,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************
*/
/*
Driver: das1800
@@ -100,12 +94,12 @@ TODO:
read insn for analog out
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <asm/dma.h>
#include "8253.h"
@@ -1517,10 +1511,9 @@ static int das1800_attach(struct comedi_device *dev,
int board;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], DAS1800_SIZE);
if (ret)
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 11424fb..fb25cb8 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -22,11 +22,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: das6402
@@ -38,11 +33,10 @@ Devices: [Keithley Metrabyte] DAS6402 (das6402)
This driver has suffered bitrot.
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#define DAS6402_SIZE 16
#define N_WORDS (3000*64)
@@ -299,10 +293,9 @@ static int das6402_attach(struct comedi_device *dev,
dev->irq = irq;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index 9ce6cbc..11e1611 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -15,12 +15,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************
*/
/*
Driver: das800
@@ -62,10 +56,10 @@ cmd triggers supported:
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
#include "8253.h"
@@ -706,10 +700,9 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int board;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], DAS800_SIZE);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 6c85dd2..118a4fd 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: dmm32at
@@ -37,9 +32,10 @@ Configuration Options:
comedi_config /dev/comedi0 dmm32at baseaddr,irq
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include "comedi_fc.h"
@@ -652,31 +648,34 @@ static int dmm32at_dio_insn_bits(struct comedi_device *dev,
static int dmm32at_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct dmm32at_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
unsigned char chanbit;
- int chan = CR_CHAN(insn->chanspec);
-
- if (insn->n != 1)
- return -EINVAL;
+ int ret;
- if (chan < 8)
+ if (chan < 8) {
+ mask = 0x0000ff;
chanbit = DMM32AT_DIRA;
- else if (chan < 16)
+ } else if (chan < 16) {
+ mask = 0x00ff00;
chanbit = DMM32AT_DIRB;
- else if (chan < 20)
+ } else if (chan < 20) {
+ mask = 0x0f0000;
chanbit = DMM32AT_DIRCL;
- else
+ } else {
+ mask = 0xf00000;
chanbit = DMM32AT_DIRCH;
+ }
- /* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
- /* if output clear the bit, otherwise set it */
- if (data[0] == COMEDI_OUTPUT)
+ if (data[0] == INSN_CONFIG_DIO_OUTPUT)
devpriv->dio_config &= ~chanbit;
else
devpriv->dio_config |= chanbit;
@@ -685,7 +684,7 @@ static int dmm32at_dio_insn_config(struct comedi_device *dev,
/* set the DIO's to the new configuration setting */
outb(devpriv->dio_config, dev->iobase + DMM32AT_DIOCONF);
- return 1;
+ return insn->n;
}
static int dmm32at_attach(struct comedi_device *dev,
@@ -758,10 +757,9 @@ static int dmm32at_attach(struct comedi_device *dev,
dev->irq = irq;
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 8f5006d..38918a1 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -29,9 +29,9 @@ Configuration options:
[5] - D/A 1 range (same choices)
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/ioport.h>
#define DT2801_TIMEOUT 1000
@@ -551,32 +551,19 @@ static int dt2801_dio_insn_bits(struct comedi_device *dev,
static int dt2801_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int which = 0;
+ int ret;
- if (s == &dev->subdevices[3])
- which = 1;
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0xff);
+ if (ret)
+ return ret;
- /* configure */
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits = 0xff;
- dt2801_writecmd(dev, DT_C_SET_DIGOUT);
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits = 0;
- dt2801_writecmd(dev, DT_C_SET_DIGIN);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = s->io_bits ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- default:
- return -EINVAL;
- }
- dt2801_writedata(dev, which);
+ dt2801_writecmd(dev, s->io_bits ? DT_C_SET_DIGOUT : DT_C_SET_DIGIN);
+ dt2801_writedata(dev, (s == &dev->subdevices[3]) ? 1 : 0);
- return 1;
+ return insn->n;
}
/*
@@ -627,10 +614,9 @@ havetype:
if (ret)
goto out;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
dev->board_name = board->name;
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 8757b54..a41a571 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -18,10 +18,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: dt2811
@@ -45,11 +41,10 @@ Configuration options:
[4] - D/A 1 range (same choices)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
4, {
RANGE(0, 5),
@@ -454,10 +449,9 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
switch (it->options[2]) {
case 0:
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 7c95b3b..6514b9e 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: dt2814
@@ -39,10 +34,10 @@ a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In
addition, the clock does not seem to be very accurate.
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
#include "comedi_fc.h"
@@ -303,10 +298,9 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
dev->read_subdev = s;
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index b24e876..34040f0 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: dt2815
@@ -56,9 +51,9 @@ Configuration options:
[12] - Analog output 7 range configuration (same options)
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
#define DT2815_SIZE 2
@@ -170,10 +165,9 @@ static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
/* ao subdevice */
diff --git a/drivers/staging/comedi/drivers/dt2817.c b/drivers/staging/comedi/drivers/dt2817.c
index b5c8e82..f4a8529 100644
--- a/drivers/staging/comedi/drivers/dt2817.c
+++ b/drivers/staging/comedi/drivers/dt2817.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: dt2817
@@ -38,10 +33,9 @@ Configuration options:
[0] - I/O port base base address
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#define DT2817_SIZE 5
#define DT2817_CR 0
@@ -49,28 +43,26 @@ Configuration options:
static int dt2817_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int mask;
- int chan;
- int oe = 0;
-
- if (insn->n != 1)
- return -EINVAL;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int oe = 0;
+ unsigned int mask;
+ int ret;
- chan = CR_CHAN(insn->chanspec);
if (chan < 8)
- mask = 0xff;
+ mask = 0x000000ff;
else if (chan < 16)
- mask = 0xff00;
+ mask = 0x0000ff00;
else if (chan < 24)
- mask = 0xff0000;
+ mask = 0x00ff0000;
else
mask = 0xff000000;
- if (data[0])
- s->io_bits |= mask;
- else
- s->io_bits &= ~mask;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
if (s->io_bits & 0x000000ff)
oe |= 0x1;
@@ -83,7 +75,7 @@ static int dt2817_dio_insn_config(struct comedi_device *dev,
outb(oe, dev->iobase + DT2817_CR);
- return 1;
+ return insn->n;
}
static int dt2817_dio_insn_bits(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 90f2de9..da3ee85 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: dt282x
@@ -56,13 +51,16 @@ Notes:
be fixed to check for this situation and return an error.
*/
+#include <linux/module.h>
#include "../comedidev.h"
+#include <linux/delay.h>
#include <linux/gfp.h>
-#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+
#include <asm/dma.h>
+
#include "comedi_fc.h"
#define DEBUG
@@ -269,8 +267,9 @@ struct dt282x_private {
} \
udelay(5); \
} \
- if (_i) \
+ if (_i) { \
b \
+ } \
} while (0)
static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
@@ -983,29 +982,32 @@ static int dt282x_dio_insn_bits(struct comedi_device *dev,
static int dt282x_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct dt282x_private *devpriv = dev->private;
- int mask;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
- mask = (CR_CHAN(insn->chanspec) < 8) ? 0x00ff : 0xff00;
- if (data[0])
- s->io_bits |= mask;
+ if (chan < 8)
+ mask = 0x00ff;
else
- s->io_bits &= ~mask;
+ mask = 0xff00;
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ devpriv->dacsr &= ~(DT2821_LBOE | DT2821_HBOE);
if (s->io_bits & 0x00ff)
devpriv->dacsr |= DT2821_LBOE;
- else
- devpriv->dacsr &= ~DT2821_LBOE;
if (s->io_bits & 0xff00)
devpriv->dacsr |= DT2821_HBOE;
- else
- devpriv->dacsr &= ~DT2821_HBOE;
outw(devpriv->dacsr, dev->iobase + DT2821_DACSR);
- return 1;
+ return insn->n;
}
static const struct comedi_lrange *const ai_range_table[] = {
@@ -1193,10 +1195,9 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
#endif
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = dt282x_grab_dma(dev, it->options[opt_dma1],
it->options[opt_dma2]);
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 7e03929..64ef875 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: dt3000
@@ -55,6 +50,7 @@ AO commands are not supported.
#define DEBUG 1
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -646,32 +642,23 @@ static void dt3k_dio_config(struct comedi_device *dev, int bits)
static int dt3k_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int mask;
-
- mask = (CR_CHAN(insn->chanspec) < 4) ? 0x0f : 0xf0;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->
- io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
- COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
- mask = (s->io_bits & 0x01) | ((s->io_bits & 0x10) >> 3);
- dt3k_dio_config(dev, mask);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
+
+ if (chan < 4)
+ mask = 0x0f;
+ else
+ mask = 0xf0;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ dt3k_dio_config(dev, (s->io_bits & 0x01) | ((s->io_bits & 0x10) >> 3));
return insn->n;
}
@@ -727,10 +714,9 @@ static int dt3000_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret < 0)
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 81eb5ed..b5e6f33 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
/*
@@ -43,14 +38,10 @@ for my needs.
* says P1).
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kref.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
@@ -60,6 +51,9 @@ for my needs.
#define DT9812_MAX_WRITE_CMD_PIPE_SIZE 32
#define DT9812_MAX_READ_CMD_PIPE_SIZE 32
+/* usb_bulk_msg() timout in milliseconds */
+#define DT9812_USB_TIMEOUT 1000
+
/*
* See Silican Laboratories C8051F020/1/2/3 manual
*/
@@ -193,8 +187,8 @@ enum {
};
struct dt9812_flash_data {
- u16 numbytes;
- u16 address;
+ __le16 numbytes;
+ __le16 address;
};
#define DT9812_MAX_NUM_MULTI_BYTE_RDS \
@@ -235,94 +229,32 @@ struct dt9812_rmw_multi {
};
struct dt9812_usb_cmd {
- u32 cmd;
+ __le32 cmd;
union {
struct dt9812_flash_data flash_data_info;
struct dt9812_read_multi read_multi_info;
struct dt9812_write_multi write_multi_info;
struct dt9812_rmw_multi rmw_multi_info;
} u;
-#if 0
- WRITE_BYTE_INFO WriteByteInfo;
- READ_BYTE_INFO ReadByteInfo;
- WRITE_MULTI_INFO WriteMultiInfo;
- READ_MULTI_INFO ReadMultiInfo;
- RMW_BYTE_INFO RMWByteInfo;
- RMW_MULTI_INFO RMWMultiInfo;
- DAC_THRESHOLD_INFO DacThresholdInfo;
- INT_ON_CHANGE_MASK_INFO IntOnChangeMaskInfo;
- CGL_INFO CglInfo;
- SUBSYSTEM_INFO SubsystemInfo;
- CAL_POT_CMD CalPotCmd;
- WRITE_DEV_BYTE_INFO WriteDevByteInfo;
- READ_DEV_BYTE_INFO ReadDevByteInfo;
- WRITE_DEV_MULTI_INFO WriteDevMultiInfo;
- READ_DEV_MULTI_INFO ReadDevMultiInfo;
- READ_SINGLE_VALUE_INFO ReadSingleValueInfo;
- WRITE_SINGLE_VALUE_INFO WriteSingleValueInfo;
-#endif
-};
-
-#define DT9812_NUM_SLOTS 16
-
-static DEFINE_SEMAPHORE(dt9812_mutex);
-
-static const struct usb_device_id dt9812_table[] = {
- {USB_DEVICE(0x0867, 0x9812)},
- {} /* Terminating entry */
};
-MODULE_DEVICE_TABLE(usb, dt9812_table);
-
-struct usb_dt9812 {
- struct slot_dt9812 *slot;
- struct usb_device *udev;
- struct usb_interface *interface;
- u16 vendor;
- u16 product;
- u16 device;
- u32 serial;
+struct dt9812_private {
+ struct semaphore sem;
struct {
__u8 addr;
size_t size;
- } message_pipe, command_write, command_read, write_stream, read_stream;
- struct kref kref;
- u16 analog_out_shadow[2];
- u8 digital_out_shadow;
-};
-
-struct comedi_dt9812 {
- struct slot_dt9812 *slot;
- u32 serial;
-};
-
-struct slot_dt9812 {
- struct semaphore mutex;
- u32 serial;
- struct usb_dt9812 *usb;
- struct comedi_dt9812 *comedi;
+ } cmd_wr, cmd_rd;
+ u16 device;
+ u16 ao_shadow[2];
};
-static struct slot_dt9812 dt9812[DT9812_NUM_SLOTS];
-
-static inline struct usb_dt9812 *to_dt9812_dev(struct kref *d)
-{
- return container_of(d, struct usb_dt9812, kref);
-}
-
-static void dt9812_delete(struct kref *kref)
-{
- struct usb_dt9812 *dev = to_dt9812_dev(kref);
-
- usb_put_dev(dev->udev);
- kfree(dev);
-}
-
-static int dt9812_read_info(struct usb_dt9812 *dev, int offset, void *buf,
- size_t buf_size)
+static int dt9812_read_info(struct comedi_device *dev,
+ int offset, void *buf, size_t buf_size)
{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct dt9812_private *devpriv = dev->private;
struct dt9812_usb_cmd cmd;
- int count, retval;
+ int count, ret;
cmd.cmd = cpu_to_le32(DT9812_R_FLASH_DATA);
cmd.u.flash_data_info.address =
@@ -330,25 +262,23 @@ static int dt9812_read_info(struct usb_dt9812 *dev, int offset, void *buf,
cmd.u.flash_data_info.numbytes = cpu_to_le16(buf_size);
/* DT9812 only responds to 32 byte writes!! */
- count = 32;
- retval = usb_bulk_msg(dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->command_write.addr),
- &cmd, 32, &count, HZ * 1);
- if (retval)
- return retval;
- retval = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->command_read.addr),
- buf, buf_size, &count, HZ * 1);
- return retval;
+ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
+ &cmd, 32, &count, DT9812_USB_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr),
+ buf, buf_size, &count, DT9812_USB_TIMEOUT);
}
-static int dt9812_read_multiple_registers(struct usb_dt9812 *dev, int reg_count,
- u8 *address, u8 *value)
+static int dt9812_read_multiple_registers(struct comedi_device *dev,
+ int reg_count, u8 *address,
+ u8 *value)
{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct dt9812_private *devpriv = dev->private;
struct dt9812_usb_cmd cmd;
- int i, count, retval;
+ int i, count, ret;
cmd.cmd = cpu_to_le32(DT9812_R_MULTI_BYTE_REG);
cmd.u.read_multi_info.count = reg_count;
@@ -356,26 +286,23 @@ static int dt9812_read_multiple_registers(struct usb_dt9812 *dev, int reg_count,
cmd.u.read_multi_info.address[i] = address[i];
/* DT9812 only responds to 32 byte writes!! */
- count = 32;
- retval = usb_bulk_msg(dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->command_write.addr),
- &cmd, 32, &count, HZ * 1);
- if (retval)
- return retval;
- retval = usb_bulk_msg(dev->udev,
- usb_rcvbulkpipe(dev->udev,
- dev->command_read.addr),
- value, reg_count, &count, HZ * 1);
- return retval;
+ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
+ &cmd, 32, &count, DT9812_USB_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return usb_bulk_msg(usb, usb_rcvbulkpipe(usb, devpriv->cmd_rd.addr),
+ value, reg_count, &count, DT9812_USB_TIMEOUT);
}
-static int dt9812_write_multiple_registers(struct usb_dt9812 *dev,
+static int dt9812_write_multiple_registers(struct comedi_device *dev,
int reg_count, u8 *address,
u8 *value)
{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct dt9812_private *devpriv = dev->private;
struct dt9812_usb_cmd cmd;
- int i, count, retval;
+ int i, count;
cmd.cmd = cpu_to_le32(DT9812_W_MULTI_BYTE_REG);
cmd.u.read_multi_info.count = reg_count;
@@ -383,19 +310,20 @@ static int dt9812_write_multiple_registers(struct usb_dt9812 *dev,
cmd.u.write_multi_info.write[i].address = address[i];
cmd.u.write_multi_info.write[i].value = value[i];
}
+
/* DT9812 only responds to 32 byte writes!! */
- retval = usb_bulk_msg(dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->command_write.addr),
- &cmd, 32, &count, HZ * 1);
- return retval;
+ return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
+ &cmd, 32, &count, DT9812_USB_TIMEOUT);
}
-static int dt9812_rmw_multiple_registers(struct usb_dt9812 *dev, int reg_count,
+static int dt9812_rmw_multiple_registers(struct comedi_device *dev,
+ int reg_count,
struct dt9812_rmw_byte *rmw)
{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct dt9812_private *devpriv = dev->private;
struct dt9812_usb_cmd cmd;
- int i, count, retval;
+ int i, count;
cmd.cmd = cpu_to_le32(DT9812_RMW_MULTI_BYTE_REG);
cmd.u.rmw_multi_info.count = reg_count;
@@ -403,76 +331,52 @@ static int dt9812_rmw_multiple_registers(struct usb_dt9812 *dev, int reg_count,
cmd.u.rmw_multi_info.rmw[i] = rmw[i];
/* DT9812 only responds to 32 byte writes!! */
- retval = usb_bulk_msg(dev->udev,
- usb_sndbulkpipe(dev->udev,
- dev->command_write.addr),
- &cmd, 32, &count, HZ * 1);
- return retval;
+ return usb_bulk_msg(usb, usb_sndbulkpipe(usb, devpriv->cmd_wr.addr),
+ &cmd, 32, &count, DT9812_USB_TIMEOUT);
}
-static int dt9812_digital_in(struct slot_dt9812 *slot, u8 *bits)
+static int dt9812_digital_in(struct comedi_device *dev, u8 *bits)
{
- int result = -ENODEV;
-
- down(&slot->mutex);
- if (slot->usb) {
- u8 reg[2] = { F020_SFR_P3, F020_SFR_P1 };
- u8 value[2];
+ struct dt9812_private *devpriv = dev->private;
+ u8 reg[2] = { F020_SFR_P3, F020_SFR_P1 };
+ u8 value[2];
+ int ret;
- result = dt9812_read_multiple_registers(slot->usb, 2, reg,
- value);
- if (result == 0) {
- /*
- * bits 0-6 in F020_SFR_P3 are bits 0-6 in the digital
- * input port bit 3 in F020_SFR_P1 is bit 7 in the
- * digital input port
- */
- *bits = (value[0] & 0x7f) | ((value[1] & 0x08) << 4);
- /* printk("%2.2x, %2.2x -> %2.2x\n",
- value[0], value[1], *bits); */
- }
+ down(&devpriv->sem);
+ ret = dt9812_read_multiple_registers(dev, 2, reg, value);
+ if (ret == 0) {
+ /*
+ * bits 0-6 in F020_SFR_P3 are bits 0-6 in the digital
+ * input port bit 3 in F020_SFR_P1 is bit 7 in the
+ * digital input port
+ */
+ *bits = (value[0] & 0x7f) | ((value[1] & 0x08) << 4);
}
- up(&slot->mutex);
+ up(&devpriv->sem);
- return result;
+ return ret;
}
-static int dt9812_digital_out(struct slot_dt9812 *slot, u8 bits)
+static int dt9812_digital_out(struct comedi_device *dev, u8 bits)
{
- int result = -ENODEV;
-
- down(&slot->mutex);
- if (slot->usb) {
- u8 reg[1];
- u8 value[1];
-
- reg[0] = F020_SFR_P2;
- value[0] = bits;
- result = dt9812_write_multiple_registers(slot->usb, 1, reg,
- value);
- slot->usb->digital_out_shadow = bits;
- }
- up(&slot->mutex);
- return result;
-}
+ struct dt9812_private *devpriv = dev->private;
+ u8 reg[1] = { F020_SFR_P2 };
+ u8 value[1] = { bits };
+ int ret;
-static int dt9812_digital_out_shadow(struct slot_dt9812 *slot, u8 *bits)
-{
- int result = -ENODEV;
+ down(&devpriv->sem);
+ ret = dt9812_write_multiple_registers(dev, 1, reg, value);
+ up(&devpriv->sem);
- down(&slot->mutex);
- if (slot->usb) {
- *bits = slot->usb->digital_out_shadow;
- result = 0;
- }
- up(&slot->mutex);
- return result;
+ return ret;
}
-static void dt9812_configure_mux(struct usb_dt9812 *dev,
+static void dt9812_configure_mux(struct comedi_device *dev,
struct dt9812_rmw_byte *rmw, int channel)
{
- if (dev->device == DT9812_DEVID_DT9812_10) {
+ struct dt9812_private *devpriv = dev->private;
+
+ if (devpriv->device == DT9812_DEVID_DT9812_10) {
/* In the DT9812/10V MUX is selected by P1.5-7 */
rmw->address = F020_SFR_P1;
rmw->and_mask = 0xe0;
@@ -485,18 +389,21 @@ static void dt9812_configure_mux(struct usb_dt9812 *dev,
}
}
-static void dt9812_configure_gain(struct usb_dt9812 *dev,
+static void dt9812_configure_gain(struct comedi_device *dev,
struct dt9812_rmw_byte *rmw,
enum dt9812_gain gain)
{
- if (dev->device == DT9812_DEVID_DT9812_10) {
- /* In the DT9812/10V, there is an external gain of 0.5 */
+ struct dt9812_private *devpriv = dev->private;
+
+ /* In the DT9812/10V, there is an external gain of 0.5 */
+ if (devpriv->device == DT9812_DEVID_DT9812_10)
gain <<= 1;
- }
rmw->address = F020_SFR_ADC0CF;
rmw->and_mask = F020_MASK_ADC0CF_AMP0GN2 |
- F020_MASK_ADC0CF_AMP0GN1 | F020_MASK_ADC0CF_AMP0GN0;
+ F020_MASK_ADC0CF_AMP0GN1 |
+ F020_MASK_ADC0CF_AMP0GN0;
+
switch (gain) {
/*
* 000 -> Gain = 1
@@ -508,8 +415,10 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
*/
case DT9812_GAIN_0PT5:
rmw->or_value = F020_MASK_ADC0CF_AMP0GN2 |
- F020_MASK_ADC0CF_AMP0GN1;
+ F020_MASK_ADC0CF_AMP0GN1;
break;
+ default:
+ /* this should never happen, just use a gain of 1 */
case DT9812_GAIN_1:
rmw->or_value = 0x00;
break;
@@ -521,20 +430,18 @@ static void dt9812_configure_gain(struct usb_dt9812 *dev,
break;
case DT9812_GAIN_8:
rmw->or_value = F020_MASK_ADC0CF_AMP0GN1 |
- F020_MASK_ADC0CF_AMP0GN0;
+ F020_MASK_ADC0CF_AMP0GN0;
break;
case DT9812_GAIN_16:
rmw->or_value = F020_MASK_ADC0CF_AMP0GN2;
break;
- default:
- dev_err(&dev->interface->dev, "Illegal gain %d\n", gain);
-
}
}
-static int dt9812_analog_in(struct slot_dt9812 *slot, int channel, u16 *value,
- enum dt9812_gain gain)
+static int dt9812_analog_in(struct comedi_device *dev,
+ int channel, u16 *value, enum dt9812_gain gain)
{
+ struct dt9812_private *devpriv = dev->private;
struct dt9812_rmw_byte rmw[3];
u8 reg[3] = {
F020_SFR_ADC0CN,
@@ -542,31 +449,30 @@ static int dt9812_analog_in(struct slot_dt9812 *slot, int channel, u16 *value,
F020_SFR_ADC0L
};
u8 val[3];
- int result = -ENODEV;
+ int ret;
- down(&slot->mutex);
- if (!slot->usb)
- goto exit;
+ down(&devpriv->sem);
/* 1 select the gain */
- dt9812_configure_gain(slot->usb, &rmw[0], gain);
+ dt9812_configure_gain(dev, &rmw[0], gain);
/* 2 set the MUX to select the channel */
- dt9812_configure_mux(slot->usb, &rmw[1], channel);
+ dt9812_configure_mux(dev, &rmw[1], channel);
/* 3 start conversion */
rmw[2].address = F020_SFR_ADC0CN;
rmw[2].and_mask = 0xff;
rmw[2].or_value = F020_MASK_ADC0CN_AD0EN | F020_MASK_ADC0CN_AD0BUSY;
- result = dt9812_rmw_multiple_registers(slot->usb, 3, rmw);
- if (result)
+ ret = dt9812_rmw_multiple_registers(dev, 3, rmw);
+ if (ret)
goto exit;
/* read the status and ADC */
- result = dt9812_read_multiple_registers(slot->usb, 3, reg, val);
- if (result)
+ ret = dt9812_read_multiple_registers(dev, 3, reg, val);
+ if (ret)
goto exit;
+
/*
* An ADC conversion takes 16 SAR clocks cycles, i.e. about 9us.
* Therefore, between the instant that AD0BUSY was set via
@@ -578,7 +484,7 @@ static int dt9812_analog_in(struct slot_dt9812 *slot, int channel, u16 *value,
*/
if ((val[0] & (F020_MASK_ADC0CN_AD0INT | F020_MASK_ADC0CN_AD0BUSY)) ==
F020_MASK_ADC0CN_AD0INT) {
- switch (slot->usb->device) {
+ switch (devpriv->device) {
case DT9812_DEVID_DT9812_10:
/*
* For DT9812-10V the personality module set the
@@ -594,548 +500,392 @@ static int dt9812_analog_in(struct slot_dt9812 *slot, int channel, u16 *value,
}
exit:
- up(&slot->mutex);
- return result;
+ up(&devpriv->sem);
+
+ return ret;
}
-static int dt9812_analog_out_shadow(struct slot_dt9812 *slot, int channel,
- u16 *value)
+static int dt9812_analog_out(struct comedi_device *dev, int channel, u16 value)
{
- int result = -ENODEV;
+ struct dt9812_private *devpriv = dev->private;
+ struct dt9812_rmw_byte rmw[3];
+ int ret;
+
+ down(&devpriv->sem);
+
+ switch (channel) {
+ case 0:
+ /* 1. Set DAC mode */
+ rmw[0].address = F020_SFR_DAC0CN;
+ rmw[0].and_mask = 0xff;
+ rmw[0].or_value = F020_MASK_DACxCN_DACxEN;
+
+ /* 2 load low byte of DAC value first */
+ rmw[1].address = F020_SFR_DAC0L;
+ rmw[1].and_mask = 0xff;
+ rmw[1].or_value = value & 0xff;
+
+ /* 3 load high byte of DAC value next to latch the
+ 12-bit value */
+ rmw[2].address = F020_SFR_DAC0H;
+ rmw[2].and_mask = 0xff;
+ rmw[2].or_value = (value >> 8) & 0xf;
+ break;
- down(&slot->mutex);
- if (slot->usb) {
- *value = slot->usb->analog_out_shadow[channel];
- result = 0;
+ case 1:
+ /* 1. Set DAC mode */
+ rmw[0].address = F020_SFR_DAC1CN;
+ rmw[0].and_mask = 0xff;
+ rmw[0].or_value = F020_MASK_DACxCN_DACxEN;
+
+ /* 2 load low byte of DAC value first */
+ rmw[1].address = F020_SFR_DAC1L;
+ rmw[1].and_mask = 0xff;
+ rmw[1].or_value = value & 0xff;
+
+ /* 3 load high byte of DAC value next to latch the
+ 12-bit value */
+ rmw[2].address = F020_SFR_DAC1H;
+ rmw[2].and_mask = 0xff;
+ rmw[2].or_value = (value >> 8) & 0xf;
+ break;
}
- up(&slot->mutex);
+ ret = dt9812_rmw_multiple_registers(dev, 3, rmw);
+ devpriv->ao_shadow[channel] = value;
+
+ up(&devpriv->sem);
- return result;
+ return ret;
}
-static int dt9812_analog_out(struct slot_dt9812 *slot, int channel, u16 value)
+static int dt9812_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int result = -ENODEV;
+ u8 bits = 0;
+ int ret;
- down(&slot->mutex);
- if (slot->usb) {
- struct dt9812_rmw_byte rmw[3];
+ ret = dt9812_digital_in(dev, &bits);
+ if (ret)
+ return ret;
- switch (channel) {
- case 0:
- /* 1. Set DAC mode */
- rmw[0].address = F020_SFR_DAC0CN;
- rmw[0].and_mask = 0xff;
- rmw[0].or_value = F020_MASK_DACxCN_DACxEN;
-
- /* 2 load low byte of DAC value first */
- rmw[1].address = F020_SFR_DAC0L;
- rmw[1].and_mask = 0xff;
- rmw[1].or_value = value & 0xff;
-
- /* 3 load high byte of DAC value next to latch the
- 12-bit value */
- rmw[2].address = F020_SFR_DAC0H;
- rmw[2].and_mask = 0xff;
- rmw[2].or_value = (value >> 8) & 0xf;
- break;
+ data[1] = bits;
- case 1:
- /* 1. Set DAC mode */
- rmw[0].address = F020_SFR_DAC1CN;
- rmw[0].and_mask = 0xff;
- rmw[0].or_value = F020_MASK_DACxCN_DACxEN;
-
- /* 2 load low byte of DAC value first */
- rmw[1].address = F020_SFR_DAC1L;
- rmw[1].and_mask = 0xff;
- rmw[1].or_value = value & 0xff;
-
- /* 3 load high byte of DAC value next to latch the
- 12-bit value */
- rmw[2].address = F020_SFR_DAC1H;
- rmw[2].and_mask = 0xff;
- rmw[2].or_value = (value >> 8) & 0xf;
- break;
- }
- result = dt9812_rmw_multiple_registers(slot->usb, 3, rmw);
- slot->usb->analog_out_shadow[channel] = value;
+ return insn->n;
+}
+
+static int dt9812_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+
+ dt9812_digital_out(dev, s->state);
}
- up(&slot->mutex);
- return result;
+ data[1] = s->state;
+
+ return insn->n;
}
-/*
- * USB framework functions
- */
+static int dt9812_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ u16 val = 0;
+ int ret;
+ int i;
-static int dt9812_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
+ for (i = 0; i < insn->n; i++) {
+ ret = dt9812_analog_in(dev, chan, &val, DT9812_GAIN_1);
+ if (ret)
+ return ret;
+ data[i] = val;
+ }
+
+ return insn->n;
+}
+
+static int dt9812_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int retval = -ENOMEM;
- struct usb_dt9812 *dev = NULL;
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
+ struct dt9812_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
- u8 fw;
- /* allocate memory for our device state and initialize it */
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL)
- goto error;
+ down(&devpriv->sem);
+ for (i = 0; i < insn->n; i++)
+ data[i] = devpriv->ao_shadow[chan];
+ up(&devpriv->sem);
+
+ return insn->n;
+}
+
+static int dt9812_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int ret;
+ int i;
- kref_init(&dev->kref);
+ for (i = 0; i < insn->n; i++) {
+ ret = dt9812_analog_out(dev, chan, data[i]);
+ if (ret)
+ return ret;
+ }
- dev->udev = usb_get_dev(interface_to_usbdev(interface));
- dev->interface = interface;
+ return insn->n;
+}
- /* Check endpoints */
- iface_desc = interface->cur_altsetting;
+static int dt9812_find_endpoints(struct comedi_device *dev)
+{
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usb_host_interface *host = intf->cur_altsetting;
+ struct dt9812_private *devpriv = dev->private;
+ struct usb_endpoint_descriptor *ep;
+ int i;
- if (iface_desc->desc.bNumEndpoints != 5) {
- dev_err(&interface->dev, "Wrong number of endpoints.\n");
- retval = -ENODEV;
- goto error;
+ if (host->desc.bNumEndpoints != 5) {
+ dev_err(dev->class_dev, "Wrong number of endpoints\n");
+ return -ENODEV;
}
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- int direction = -1;
- endpoint = &iface_desc->endpoint[i].desc;
+ for (i = 0; i < host->desc.bNumEndpoints; ++i) {
+ int dir = -1;
+ ep = &host->endpoint[i].desc;
switch (i) {
case 0:
- direction = USB_DIR_IN;
- dev->message_pipe.addr = endpoint->bEndpointAddress;
- dev->message_pipe.size =
- le16_to_cpu(endpoint->wMaxPacketSize);
-
+ /* unused message pipe */
+ dir = USB_DIR_IN;
break;
case 1:
- direction = USB_DIR_OUT;
- dev->command_write.addr = endpoint->bEndpointAddress;
- dev->command_write.size =
- le16_to_cpu(endpoint->wMaxPacketSize);
+ dir = USB_DIR_OUT;
+ devpriv->cmd_wr.addr = ep->bEndpointAddress;
+ devpriv->cmd_wr.size = le16_to_cpu(ep->wMaxPacketSize);
break;
case 2:
- direction = USB_DIR_IN;
- dev->command_read.addr = endpoint->bEndpointAddress;
- dev->command_read.size =
- le16_to_cpu(endpoint->wMaxPacketSize);
+ dir = USB_DIR_IN;
+ devpriv->cmd_rd.addr = ep->bEndpointAddress;
+ devpriv->cmd_rd.size = le16_to_cpu(ep->wMaxPacketSize);
break;
case 3:
- direction = USB_DIR_OUT;
- dev->write_stream.addr = endpoint->bEndpointAddress;
- dev->write_stream.size =
- le16_to_cpu(endpoint->wMaxPacketSize);
+ /* unused write stream */
+ dir = USB_DIR_OUT;
break;
case 4:
- direction = USB_DIR_IN;
- dev->read_stream.addr = endpoint->bEndpointAddress;
- dev->read_stream.size =
- le16_to_cpu(endpoint->wMaxPacketSize);
+ /* unused read stream */
+ dir = USB_DIR_IN;
break;
}
- if ((endpoint->bEndpointAddress & USB_DIR_IN) != direction) {
- dev_err(&interface->dev,
- "Endpoint has wrong direction.\n");
- retval = -ENODEV;
- goto error;
+ if ((ep->bEndpointAddress & USB_DIR_IN) != dir) {
+ dev_err(dev->class_dev,
+ "Endpoint has wrong direction\n");
+ return -ENODEV;
}
}
- if (dt9812_read_info(dev, 0, &fw, sizeof(fw)) != 0) {
+ return 0;
+}
+
+static int dt9812_reset_device(struct comedi_device *dev)
+{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct dt9812_private *devpriv = dev->private;
+ u32 serial;
+ u16 vendor;
+ u16 product;
+ u8 tmp8;
+ __le16 tmp16;
+ __le32 tmp32;
+ int ret;
+ int i;
+
+ ret = dt9812_read_info(dev, 0, &tmp8, sizeof(tmp8));
+ if (ret) {
/*
* Seems like a configuration reset is necessary if driver is
* reloaded while device is attached
*/
- usb_reset_configuration(dev->udev);
+ usb_reset_configuration(usb);
for (i = 0; i < 10; i++) {
- retval = dt9812_read_info(dev, 1, &fw, sizeof(fw));
- if (retval == 0) {
- dev_info(&interface->dev,
- "usb_reset_configuration succeeded "
- "after %d iterations\n", i);
+ ret = dt9812_read_info(dev, 1, &tmp8, sizeof(tmp8));
+ if (ret == 0)
break;
- }
}
- }
-
- if (dt9812_read_info(dev, 1, &dev->vendor, sizeof(dev->vendor)) != 0) {
- dev_err(&interface->dev, "Failed to read vendor.\n");
- retval = -ENODEV;
- goto error;
- }
- if (dt9812_read_info(dev, 3, &dev->product, sizeof(dev->product)) != 0) {
- dev_err(&interface->dev, "Failed to read product.\n");
- retval = -ENODEV;
- goto error;
- }
- if (dt9812_read_info(dev, 5, &dev->device, sizeof(dev->device)) != 0) {
- dev_err(&interface->dev, "Failed to read device.\n");
- retval = -ENODEV;
- goto error;
- }
- if (dt9812_read_info(dev, 7, &dev->serial, sizeof(dev->serial)) != 0) {
- dev_err(&interface->dev, "Failed to read serial.\n");
- retval = -ENODEV;
- goto error;
- }
-
- dev->vendor = le16_to_cpu(dev->vendor);
- dev->product = le16_to_cpu(dev->product);
- dev->device = le16_to_cpu(dev->device);
- dev->serial = le32_to_cpu(dev->serial);
- switch (dev->device) {
- case DT9812_DEVID_DT9812_10:
- dev->analog_out_shadow[0] = 0x0800;
- dev->analog_out_shadow[1] = 0x800;
- break;
- case DT9812_DEVID_DT9812_2PT5:
- dev->analog_out_shadow[0] = 0x0000;
- dev->analog_out_shadow[1] = 0x0000;
- break;
- }
- dev->digital_out_shadow = 0;
-
- /* save our data pointer in this interface device */
- usb_set_intfdata(interface, dev);
-
- /* let the user know what node this device is now attached to */
- dev_info(&interface->dev, "USB DT9812 (%4.4x.%4.4x.%4.4x) #0x%8.8x\n",
- dev->vendor, dev->product, dev->device, dev->serial);
-
- down(&dt9812_mutex);
- {
- /* Find a slot for the USB device */
- struct slot_dt9812 *first = NULL;
- struct slot_dt9812 *best = NULL;
-
- for (i = 0; i < DT9812_NUM_SLOTS; i++) {
- if (!first && !dt9812[i].usb && dt9812[i].serial == 0)
- first = &dt9812[i];
- if (!best && dt9812[i].serial == dev->serial)
- best = &dt9812[i];
- }
-
- if (!best)
- best = first;
-
- if (best) {
- down(&best->mutex);
- best->usb = dev;
- dev->slot = best;
- up(&best->mutex);
+ if (ret) {
+ dev_err(dev->class_dev,
+ "unable to reset configuration\n");
+ return ret;
}
}
- up(&dt9812_mutex);
-
- return 0;
-
-error:
- if (dev)
- kref_put(&dev->kref, dt9812_delete);
- return retval;
-}
-static void dt9812_disconnect(struct usb_interface *interface)
-{
- struct usb_dt9812 *dev;
- int minor = interface->minor;
-
- down(&dt9812_mutex);
- dev = usb_get_intfdata(interface);
- if (dev->slot) {
- down(&dev->slot->mutex);
- dev->slot->usb = NULL;
- up(&dev->slot->mutex);
- dev->slot = NULL;
+ ret = dt9812_read_info(dev, 1, &tmp16, sizeof(tmp16));
+ if (ret) {
+ dev_err(dev->class_dev, "failed to read vendor id\n");
+ return ret;
}
- usb_set_intfdata(interface, NULL);
- up(&dt9812_mutex);
-
- /* queue final destruction */
- kref_put(&dev->kref, dt9812_delete);
+ vendor = le16_to_cpu(tmp16);
- dev_info(&interface->dev, "USB Dt9812 #%d now disconnected\n", minor);
-}
-
-static struct usb_driver dt9812_usb_driver = {
- .name = "dt9812",
- .probe = dt9812_probe,
- .disconnect = dt9812_disconnect,
- .id_table = dt9812_table,
-};
-
-/*
- * Comedi functions
- */
-
-static int dt9812_comedi_open(struct comedi_device *dev)
-{
- struct comedi_dt9812 *devpriv = dev->private;
- int result = -ENODEV;
-
- down(&devpriv->slot->mutex);
- if (devpriv->slot->usb) {
- /* We have an attached device, fill in current range info */
- struct comedi_subdevice *s;
-
- s = &dev->subdevices[0];
- s->n_chan = 8;
- s->maxdata = 1;
-
- s = &dev->subdevices[1];
- s->n_chan = 8;
- s->maxdata = 1;
-
- s = &dev->subdevices[2];
- s->n_chan = 8;
- switch (devpriv->slot->usb->device) {
- case 0:{
- s->maxdata = 4095;
- s->range_table = &range_bipolar10;
- }
- break;
- case 1:{
- s->maxdata = 4095;
- s->range_table = &range_unipolar2_5;
- }
- break;
- }
-
- s = &dev->subdevices[3];
- s->n_chan = 2;
- switch (devpriv->slot->usb->device) {
- case 0:{
- s->maxdata = 4095;
- s->range_table = &range_bipolar10;
- }
- break;
- case 1:{
- s->maxdata = 4095;
- s->range_table = &range_unipolar2_5;
- }
- break;
- }
- result = 0;
+ ret = dt9812_read_info(dev, 3, &tmp16, sizeof(tmp16));
+ if (ret) {
+ dev_err(dev->class_dev, "failed to read product id\n");
+ return ret;
}
- up(&devpriv->slot->mutex);
- return result;
-}
+ product = le16_to_cpu(tmp16);
-static int dt9812_di_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct comedi_dt9812 *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
- int n;
- u8 bits = 0;
-
- dt9812_digital_in(devpriv->slot, &bits);
- for (n = 0; n < insn->n; n++)
- data[n] = ((1 << channel) & bits) != 0;
- return n;
-}
-
-static int dt9812_do_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct comedi_dt9812 *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
- int n;
- u8 bits = 0;
-
- dt9812_digital_out_shadow(devpriv->slot, &bits);
- for (n = 0; n < insn->n; n++) {
- u8 mask = 1 << channel;
-
- bits &= ~mask;
- if (data[n])
- bits |= mask;
+ ret = dt9812_read_info(dev, 5, &tmp16, sizeof(tmp16));
+ if (ret) {
+ dev_err(dev->class_dev, "failed to read device id\n");
+ return ret;
}
- dt9812_digital_out(devpriv->slot, bits);
- return n;
-}
-
-static int dt9812_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct comedi_dt9812 *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
- int n;
-
- for (n = 0; n < insn->n; n++) {
- u16 value = 0;
+ devpriv->device = le16_to_cpu(tmp16);
- dt9812_analog_in(devpriv->slot, channel, &value, DT9812_GAIN_1);
- data[n] = value;
+ ret = dt9812_read_info(dev, 7, &tmp32, sizeof(tmp32));
+ if (ret) {
+ dev_err(dev->class_dev, "failed to read serial number\n");
+ return ret;
}
- return n;
-}
+ serial = le32_to_cpu(tmp32);
-static int dt9812_ao_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct comedi_dt9812 *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
- int n;
- u16 value;
-
- for (n = 0; n < insn->n; n++) {
- value = 0;
- dt9812_analog_out_shadow(devpriv->slot, channel, &value);
- data[n] = value;
- }
- return n;
-}
+ /* let the user know what node this device is now attached to */
+ dev_info(dev->class_dev, "USB DT9812 (%4.4x.%4.4x.%4.4x) #0x%8.8x\n",
+ vendor, product, devpriv->device, serial);
-static int dt9812_ao_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
- unsigned int *data)
-{
- struct comedi_dt9812 *devpriv = dev->private;
- unsigned int channel = CR_CHAN(insn->chanspec);
- int n;
+ if (devpriv->device != DT9812_DEVID_DT9812_10 &&
+ devpriv->device != DT9812_DEVID_DT9812_2PT5) {
+ dev_err(dev->class_dev, "Unsupported device!\n");
+ return -EINVAL;
+ }
- for (n = 0; n < insn->n; n++)
- dt9812_analog_out(devpriv->slot, channel, data[n]);
- return n;
+ return 0;
}
-static int dt9812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+static int dt9812_auto_attach(struct comedi_device *dev,
+ unsigned long context)
{
- struct comedi_dt9812 *devpriv;
- int i;
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct dt9812_private *devpriv;
struct comedi_subdevice *s;
+ bool is_unipolar;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
- /*
- * Special open routine, since USB unit may be unattached at
- * comedi_config time, hence range can not be determined
- */
- dev->open = dt9812_comedi_open;
+ sema_init(&devpriv->sem, 1);
+ usb_set_intfdata(intf, devpriv);
- devpriv->serial = it->options[0];
+ ret = dt9812_find_endpoints(dev);
+ if (ret)
+ return ret;
+
+ ret = dt9812_reset_device(dev);
+ if (ret)
+ return ret;
+
+ is_unipolar = (devpriv->device == DT9812_DEVID_DT9812_2PT5);
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
- /* digital input subdevice */
+ /* Digital Input subdevice */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_read = &dt9812_di_rinsn;
-
- /* digital output subdevice */
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = dt9812_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITEABLE;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_write = &dt9812_do_winsn;
-
- /* analog input subdevice */
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = dt9812_do_insn_bits;
+
+ /* Analog Input subdevice */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = NULL;
- s->insn_read = &dt9812_ai_rinsn;
-
- /* analog output subdevice */
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->n_chan = 8;
+ s->maxdata = 0x0fff;
+ s->range_table = is_unipolar ? &range_unipolar2_5 : &range_bipolar10;
+ s->insn_read = dt9812_ai_insn_read;
+
+ /* Analog Output subdevice */
s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITEABLE;
- s->n_chan = 0;
- s->maxdata = 1;
- s->range_table = NULL;
- s->insn_write = &dt9812_ao_winsn;
- s->insn_read = &dt9812_ao_rinsn;
-
- dev_info(dev->class_dev, "successfully attached to dt9812.\n");
-
- down(&dt9812_mutex);
- /* Find a slot for the comedi device */
- {
- struct slot_dt9812 *first = NULL;
- struct slot_dt9812 *best = NULL;
- for (i = 0; i < DT9812_NUM_SLOTS; i++) {
- if (!first && !dt9812[i].comedi) {
- /* First free slot from comedi side */
- first = &dt9812[i];
- }
- if (!best &&
- dt9812[i].usb &&
- dt9812[i].usb->serial == devpriv->serial) {
- /* We have an attaced device with matching ID */
- best = &dt9812[i];
- }
- }
- if (!best)
- best = first;
- if (best) {
- down(&best->mutex);
- best->comedi = devpriv;
- best->serial = devpriv->serial;
- devpriv->slot = best;
- up(&best->mutex);
- }
- }
- up(&dt9812_mutex);
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITEABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = is_unipolar ? &range_unipolar2_5 : &range_bipolar10;
+ s->insn_write = dt9812_ao_insn_write;
+ s->insn_read = dt9812_ao_insn_read;
+
+ devpriv->ao_shadow[0] = is_unipolar ? 0x0000 : 0x0800;
+ devpriv->ao_shadow[1] = is_unipolar ? 0x0000 : 0x0800;
return 0;
}
static void dt9812_detach(struct comedi_device *dev)
{
- /* Nothing to cleanup */
-}
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct dt9812_private *devpriv = dev->private;
-static struct comedi_driver dt9812_comedi_driver = {
- .module = THIS_MODULE,
- .driver_name = "dt9812",
- .attach = dt9812_attach,
- .detach = dt9812_detach,
-};
+ if (!devpriv)
+ return;
-static int __init usb_dt9812_init(void)
-{
- int i;
+ down(&devpriv->sem);
- /* Initialize all driver slots */
- for (i = 0; i < DT9812_NUM_SLOTS; i++) {
- sema_init(&dt9812[i].mutex, 1);
- dt9812[i].serial = 0;
- dt9812[i].usb = NULL;
- dt9812[i].comedi = NULL;
- }
- dt9812[12].serial = 0x0;
+ usb_set_intfdata(intf, NULL);
- return comedi_usb_driver_register(&dt9812_comedi_driver,
- &dt9812_usb_driver);
+ up(&devpriv->sem);
}
-static void __exit usb_dt9812_exit(void)
+static struct comedi_driver dt9812_driver = {
+ .driver_name = "dt9812",
+ .module = THIS_MODULE,
+ .auto_attach = dt9812_auto_attach,
+ .detach = dt9812_detach,
+};
+
+static int dt9812_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
- comedi_usb_driver_unregister(&dt9812_comedi_driver, &dt9812_usb_driver);
+ return comedi_usb_auto_config(intf, &dt9812_driver, id->driver_info);
}
-module_init(usb_dt9812_init);
-module_exit(usb_dt9812_exit);
+static const struct usb_device_id dt9812_usb_table[] = {
+ { USB_DEVICE(0x0867, 0x9812) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, dt9812_usb_table);
+
+static struct usb_driver dt9812_usb_driver = {
+ .name = "dt9812",
+ .id_table = dt9812_usb_table,
+ .probe = dt9812_usb_probe,
+ .disconnect = comedi_usb_auto_unconfig,
+};
+module_comedi_usb_driver(dt9812_driver, dt9812_usb_driver);
MODULE_AUTHOR("Anders Blomdell <anders.blomdell@control.lth.se>");
MODULE_DESCRIPTION("Comedi DT9812 driver");
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index 93ec8e4..fd525f4 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -37,6 +33,8 @@
their cards in their manuals.
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/mutex.h>
@@ -187,10 +185,9 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index ff6f0bd..8d70f64 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -18,10 +18,10 @@ Configuration options:
#define DEBUG 0
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/ioport.h>
#define FL512_SIZE 16 /* the size of the used memory */
struct fl512_private {
@@ -118,10 +118,9 @@ static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 0c061df..559bf55 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -18,12 +18,7 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************/
+*/
/*
* Driver: gsc_hpdi
@@ -47,6 +42,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -228,37 +224,26 @@ struct hpdi_private {
volatile uint32_t bits[24];
/* number of bytes at which to generate COMEDI_CB_BLOCK events */
volatile unsigned int block_size;
- unsigned dio_config_output:1;
};
static int dio_config_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
unsigned int *data)
{
- struct hpdi_private *devpriv = dev->private;
+ int ret;
switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- devpriv->dio_config_output = 1;
- return insn->n;
- break;
- case INSN_CONFIG_DIO_INPUT:
- devpriv->dio_config_output = 0;
- return insn->n;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- devpriv->dio_config_output ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
case INSN_CONFIG_BLOCK_SIZE:
return dio_config_block_size(dev, data);
- break;
default:
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0xffffffff);
+ if (ret)
+ return ret;
break;
}
- return -EINVAL;
+ return insn->n;
}
static void disable_plx_interrupts(struct comedi_device *dev)
@@ -488,10 +473,9 @@ static int hpdi_auto_attach(struct comedi_device *dev,
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
retval = comedi_pci_enable(dev);
if (retval)
@@ -678,9 +662,7 @@ static int di_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
static int hpdi_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- struct hpdi_private *devpriv = dev->private;
-
- if (devpriv->dio_config_output)
+ if (s->io_bits)
return -EINVAL;
else
return di_cmd_test(dev, s, cmd);
@@ -751,9 +733,7 @@ static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int hpdi_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct hpdi_private *devpriv = dev->private;
-
- if (devpriv->dio_config_output)
+ if (s->io_bits)
return -EINVAL;
else
return di_cmd(dev, s);
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index 08ab9d6..3889d23 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -13,11 +13,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
@@ -47,6 +42,7 @@ There are 4 x 12-bit Analogue Outputs. Ranges : 5V, 10V, +/-5V, +/-10V
Configuration options: not applicable, uses PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -500,10 +496,9 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index ee7537d..5c3a318 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -1,662 +1,537 @@
/*
- * comedi/drivers/ii_pci20kc.c
- * Driver for Intelligent Instruments PCI-20001C carrier board
- * and modules.
+ * ii_pci20kc.c
+ * Driver for Intelligent Instruments PCI-20001C carrier board and modules.
*
- * Copyright (C) 2000 Markus Kempf <kempf@matsci.uni-sb.de>
- * with suggestions from David Schleef
- * 16.06.2000
- *
- * Linux device driver for COMEDI
- * Intelligent Instrumentation
- * PCI-20001 C-2A Carrier Board
- * PCI-20341 M-1A 16-Bit analog input module
- * - differential
- * - range (-5V - +5V)
- * - 16 bit
- * PCI-20006 M-2 16-Bit analog output module
- * - ranges (-10V - +10V) (0V - +10V) (-5V - +5V)
- * - 16 bit
- *
- * only ONE PCI-20341 module possible
- * only ONE PCI-20006 module possible
- * no extern trigger implemented
- *
- * NOT WORKING (but soon) only 4 on-board differential channels supported
- * NOT WORKING (but soon) only ONE di-port and ONE do-port supported
- * instead of 4 digital ports
- * di-port == Port 0
- * do-port == Port 1
- *
- * The state of this driver is only a starting point for a complete
- * COMEDI-driver. The final driver should support all features of the
- * carrier board and modules.
+ * Copyright (C) 2000 Markus Kempf <kempf@matsci.uni-sb.de>
+ * with suggestions from David Schleef 16.06.2000
+ */
+
+/*
+ * Driver: ii_pci20kc
+ * Description: Intelligent Instruments PCI-20001C carrier board
+ * Devices: (Intelligent Instrumentation) PCI-20001C [ii_pci20kc]
+ * Author: Markus Kempf <kempf@matsci.uni-sb.de>
+ * Status: works
*
- * The test configuration:
+ * Supports the PCI-20001C-1a and PCI-20001C-2a carrier boards. The
+ * -2a version has 32 on-board DIO channels. Three add-on modules
+ * can be added to the carrier board for additional functionality.
*
- * kernel 2.2.14 with RTAI v1.2 and patch-2.2.14rthal2
- * COMEDI 0.7.45
- * COMEDILIB 0.7.9
+ * Supported add-on modules:
+ * PCI-20006M-1 1 channel, 16-bit analog output module
+ * PCI-20006M-2 2 channel, 16-bit analog output module
+ * PCI-20341M-1A 4 channel, 16-bit analog input module
*
+ * Options:
+ * 0 Board base address
+ * 1 IRQ (not-used)
*/
-/*
-Driver: ii_pci20kc
-Description: Intelligent Instruments PCI-20001C carrier board
-Author: Markus Kempf <kempf@matsci.uni-sb.de>
-Devices: [Intelligent Instrumentation] PCI-20001C (ii_pci20kc)
-Status: works
-
-Supports the PCI-20001 C-2a Carrier board, and could probably support
-the other carrier boards with small modifications. Modules supported
-are:
- PCI-20006 M-2 16-bit analog output module
- PCI-20341 M-1A 16-bit analog input module
-
-Options:
- 0 Board base address
- 1 IRQ
- 2 first option for module 1
- 3 second option for module 1
- 4 first option for module 2
- 5 second option for module 2
- 6 first option for module 3
- 7 second option for module 3
-
-options for PCI-20006M:
- first: Analog output channel 0 range configuration
- 0 bipolar 10 (-10V -- +10V)
- 1 unipolar 10 (0V -- +10V)
- 2 bipolar 5 (-5V -- 5V)
- second: Analog output channel 1 range configuration
-
-options for PCI-20341M:
- first: Analog input gain configuration
- 0 1
- 1 10
- 2 100
- 3 200
-*/
+#include <linux/module.h>
#include "../comedidev.h"
-#define PCI20000_ID 0x1d
-#define PCI20341_ID 0x77
-#define PCI20006_ID 0xe3
-#define PCI20xxx_EMPTY_ID 0xff
-
-#define PCI20000_OFFSET 0x100
-#define PCI20000_MODULES 3
-
-#define PCI20000_DIO_0 0x80
-#define PCI20000_DIO_1 0x81
-#define PCI20000_DIO_2 0xc0
-#define PCI20000_DIO_3 0xc1
-#define PCI20000_DIO_CONTROL_01 0x83 /* port 0, 1 control */
-#define PCI20000_DIO_CONTROL_23 0xc3 /* port 2, 3 control */
-#define PCI20000_DIO_BUFFER 0x82 /* buffer direction & enable */
-#define PCI20000_DIO_EOC 0xef /* even port, control output */
-#define PCI20000_DIO_OOC 0xfd /* odd port, control output */
-#define PCI20000_DIO_EIC 0x90 /* even port, control input */
-#define PCI20000_DIO_OIC 0x82 /* odd port, control input */
-#define DIO_CAND 0x12 /* and bit 1 & 4 of control */
-#define DIO_BE 0x01 /* buffer: port enable */
-#define DIO_BO 0x04 /* buffer: output */
-#define DIO_BI 0x05 /* buffer: input */
-#define DIO_PS_0 0x00 /* buffer: port shift 0 */
-#define DIO_PS_1 0x01 /* buffer: port shift 1 */
-#define DIO_PS_2 0x04 /* buffer: port shift 2 */
-#define DIO_PS_3 0x05 /* buffer: port shift 3 */
-
-#define PCI20006_LCHAN0 0x0d
-#define PCI20006_STROBE0 0x0b
-#define PCI20006_LCHAN1 0x15
-#define PCI20006_STROBE1 0x13
-
-#define PCI20341_INIT 0x04
-#define PCI20341_REPMODE 0x00 /* single shot mode */
-#define PCI20341_PACER 0x00 /* Hardware Pacer disabled */
-#define PCI20341_CHAN_NR 0x04 /* number of input channels */
-#define PCI20341_CONFIG_REG 0x10
-#define PCI20341_MOD_STATUS 0x01
-#define PCI20341_OPT_REG 0x11
-#define PCI20341_SET_TIME_REG 0x15
-#define PCI20341_LCHAN_ADDR_REG 0x13
-#define PCI20341_CHAN_LIST 0x80
-#define PCI20341_CC_RESET 0x1b
-#define PCI20341_CHAN_RESET 0x19
-#define PCI20341_SOFT_PACER 0x04
-#define PCI20341_STATUS_REG 0x12
-#define PCI20341_LDATA 0x02
-#define PCI20341_DAISY_CHAIN 0x20 /* On-board inputs only */
-#define PCI20341_MUX 0x04 /* Enable on-board MUX */
-#define PCI20341_SCANLIST 0x80 /* Channel/Gain Scan List */
-
-union pci20xxx_subdev_private {
- void __iomem *iobase;
- struct {
- void __iomem *iobase;
- const struct comedi_lrange *ao_range_list[2];
- /* range of channels of ao module */
- unsigned int last_data[2];
- } pci20006;
- struct {
- void __iomem *iobase;
- int timebase;
- int settling_time;
- int ai_gain;
- } pci20341;
+/*
+ * Register I/O map
+ */
+#define II20K_MOD_OFFSET 0x100
+#define II20K_ID_REG 0x00
+#define II20K_ID_MOD1_EMPTY (1 << 7)
+#define II20K_ID_MOD2_EMPTY (1 << 6)
+#define II20K_ID_MOD3_EMPTY (1 << 5)
+#define II20K_ID_MASK 0x1f
+#define II20K_ID_PCI20001C_1A 0x1b /* no on-board DIO */
+#define II20K_ID_PCI20001C_2A 0x1d /* on-board DIO */
+#define II20K_MOD_STATUS_REG 0x40
+#define II20K_MOD_STATUS_IRQ_MOD1 (1 << 7)
+#define II20K_MOD_STATUS_IRQ_MOD2 (1 << 6)
+#define II20K_MOD_STATUS_IRQ_MOD3 (1 << 5)
+#define II20K_DIO0_REG 0x80
+#define II20K_DIO1_REG 0x81
+#define II20K_DIR_ENA_REG 0x82
+#define II20K_DIR_DIO3_OUT (1 << 7)
+#define II20K_DIR_DIO2_OUT (1 << 6)
+#define II20K_BUF_DISAB_DIO3 (1 << 5)
+#define II20K_BUF_DISAB_DIO2 (1 << 4)
+#define II20K_DIR_DIO1_OUT (1 << 3)
+#define II20K_DIR_DIO0_OUT (1 << 2)
+#define II20K_BUF_DISAB_DIO1 (1 << 1)
+#define II20K_BUF_DISAB_DIO0 (1 << 0)
+#define II20K_CTRL01_REG 0x83
+#define II20K_CTRL01_SET (1 << 7)
+#define II20K_CTRL01_DIO0_IN (1 << 4)
+#define II20K_CTRL01_DIO1_IN (1 << 1)
+#define II20K_DIO2_REG 0xc0
+#define II20K_DIO3_REG 0xc1
+#define II20K_CTRL23_REG 0xc3
+#define II20K_CTRL23_SET (1 << 7)
+#define II20K_CTRL23_DIO2_IN (1 << 4)
+#define II20K_CTRL23_DIO3_IN (1 << 1)
+
+#define II20K_ID_PCI20006M_1 0xe2 /* 1 AO channels */
+#define II20K_ID_PCI20006M_2 0xe3 /* 2 AO channels */
+#define II20K_AO_STRB_REG(x) (0x0b + ((x) * 0x08))
+#define II20K_AO_LSB_REG(x) (0x0d + ((x) * 0x08))
+#define II20K_AO_MSB_REG(x) (0x0e + ((x) * 0x08))
+#define II20K_AO_STRB_BOTH_REG 0x1b
+
+#define II20K_ID_PCI20341M_1 0x77 /* 4 AI channels */
+#define II20K_AI_STATUS_CMD_REG 0x01
+#define II20K_AI_STATUS_CMD_BUSY (1 << 7)
+#define II20K_AI_STATUS_CMD_HW_ENA (1 << 1)
+#define II20K_AI_STATUS_CMD_EXT_START (1 << 0)
+#define II20K_AI_LSB_REG 0x02
+#define II20K_AI_MSB_REG 0x03
+#define II20K_AI_PACER_RESET_REG 0x04
+#define II20K_AI_16BIT_DATA_REG 0x06
+#define II20K_AI_CONF_REG 0x10
+#define II20K_AI_CONF_ENA (1 << 2)
+#define II20K_AI_OPT_REG 0x11
+#define II20K_AI_OPT_TRIG_ENA (1 << 5)
+#define II20K_AI_OPT_TRIG_INV (1 << 4)
+#define II20K_AI_OPT_TIMEBASE(x) (((x) & 0x3) << 1)
+#define II20K_AI_OPT_BURST_MODE (1 << 0)
+#define II20K_AI_STATUS_REG 0x12
+#define II20K_AI_STATUS_INT (1 << 7)
+#define II20K_AI_STATUS_TRIG (1 << 6)
+#define II20K_AI_STATUS_TRIG_ENA (1 << 5)
+#define II20K_AI_STATUS_PACER_ERR (1 << 2)
+#define II20K_AI_STATUS_DATA_ERR (1 << 1)
+#define II20K_AI_STATUS_SET_TIME_ERR (1 << 0)
+#define II20K_AI_LAST_CHAN_ADDR_REG 0x13
+#define II20K_AI_CUR_ADDR_REG 0x14
+#define II20K_AI_SET_TIME_REG 0x15
+#define II20K_AI_DELAY_LSB_REG 0x16
+#define II20K_AI_DELAY_MSB_REG 0x17
+#define II20K_AI_CHAN_ADV_REG 0x18
+#define II20K_AI_CHAN_RESET_REG 0x19
+#define II20K_AI_START_TRIG_REG 0x1a
+#define II20K_AI_COUNT_RESET_REG 0x1b
+#define II20K_AI_CHANLIST_REG 0x80
+#define II20K_AI_CHANLIST_ONBOARD_ONLY (1 << 5)
+#define II20K_AI_CHANLIST_GAIN(x) (((x) & 0x3) << 3)
+#define II20K_AI_CHANLIST_MUX_ENA (1 << 2)
+#define II20K_AI_CHANLIST_CHAN(x) (((x) & 0x3) << 0)
+#define II20K_AI_CHANLIST_LEN 0x80
+
+/* the AO range is set by jumpers on the 20006M module */
+static const struct comedi_lrange ii20k_ao_ranges = {
+ 3, {
+ BIP_RANGE(5), /* Chan 0 - W1/W3 in Chan 1 - W2/W4 in */
+ UNI_RANGE(10), /* Chan 0 - W1/W3 out Chan 1 - W2/W4 in */
+ BIP_RANGE(10) /* Chan 0 - W1/W3 in Chan 1 - W2/W4 out */
+ }
};
-struct pci20xxx_private {
-
- void __iomem *ioaddr;
- union pci20xxx_subdev_private subdev_private[PCI20000_MODULES];
+static const struct comedi_lrange ii20k_ai_ranges = {
+ 4, {
+ BIP_RANGE(5), /* gain 1 */
+ BIP_RANGE(0.5), /* gain 10 */
+ BIP_RANGE(0.05), /* gain 100 */
+ BIP_RANGE(0.025) /* gain 200 */
+ },
};
-#define CHAN (CR_CHAN(it->chanlist[0]))
+struct ii20k_ao_private {
+ unsigned int last_data[2];
+};
-static int pci20006_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int opt0, int opt1);
-static int pci20341_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int opt0, int opt1);
-static int pci20xxx_dio_init(struct comedi_device *dev,
- struct comedi_subdevice *s);
+struct ii20k_private {
+ void __iomem *ioaddr;
+};
-/*
- options[0] Board base address
- options[1] IRQ
- options[2] first option for module 1
- options[3] second option for module 1
- options[4] first option for module 2
- options[5] second option for module 2
- options[6] first option for module 3
- options[7] second option for module 3
-
- options for PCI-20341M:
- first Analog input gain configuration
- 0 == 1
- 1 == 10
- 2 == 100
- 3 == 200
-
- options for PCI-20006M:
- first Analog output channel 0 range configuration
- 0 == bipolar 10 (-10V -- +10V)
- 1 == unipolar 10V (0V -- +10V)
- 2 == bipolar 5V (-5V -- +5V)
- second Analog output channel 1 range configuration
- 0 == bipolar 10 (-10V -- +10V)
- 1 == unipolar 10V (0V -- +10V)
- 2 == bipolar 5V (-5V -- +5V)
-*/
-static int pci20xxx_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
+static void __iomem *ii20k_module_iobase(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct pci20xxx_private *devpriv;
- unsigned char i;
- int ret;
- int id;
- struct comedi_subdevice *s;
- union pci20xxx_subdev_private *sdp;
-
- ret = comedi_alloc_subdevices(dev, 1 + PCI20000_MODULES);
- if (ret)
- return ret;
+ struct ii20k_private *devpriv = dev->private;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
- if (!devpriv)
- return -ENOMEM;
- dev->private = devpriv;
-
- devpriv->ioaddr = (void __iomem *)(unsigned long)it->options[0];
+ return devpriv->ioaddr + (s->index + 1) * II20K_MOD_OFFSET;
+}
- /* Check PCI-20001 C-2A Carrier Board ID */
- if ((readb(devpriv->ioaddr) & PCI20000_ID) != PCI20000_ID) {
- dev_warn(dev->class_dev,
- "PCI-20001 C-2A Carrier Board at base=0x%p not found !\n",
- devpriv->ioaddr);
- return -EINVAL;
- }
- dev_info(dev->class_dev, "PCI-20001 C-2A at base=0x%p\n",
- devpriv->ioaddr);
-
- for (i = 0; i < PCI20000_MODULES; i++) {
- s = &dev->subdevices[i];
- id = readb(devpriv->ioaddr + (i + 1) * PCI20000_OFFSET);
- s->private = devpriv->subdev_private + i;
- sdp = s->private;
- switch (id) {
- case PCI20006_ID:
- sdp->pci20006.iobase =
- devpriv->ioaddr + (i + 1) * PCI20000_OFFSET;
- pci20006_init(dev, s, it->options[2 * i + 2],
- it->options[2 * i + 3]);
- dev_info(dev->class_dev,
- "PCI-20006 module in slot %d\n", i + 1);
- break;
- case PCI20341_ID:
- sdp->pci20341.iobase =
- devpriv->ioaddr + (i + 1) * PCI20000_OFFSET;
- pci20341_init(dev, s, it->options[2 * i + 2],
- it->options[2 * i + 3]);
- dev_info(dev->class_dev,
- "PCI-20341 module in slot %d\n", i + 1);
- break;
- default:
- dev_warn(dev->class_dev,
- "unknown module code 0x%02x in slot %d: module disabled\n",
- id, i); /* XXX this looks like a bug! i + 1 ?? */
- /* fall through */
- case PCI20xxx_EMPTY_ID:
- s->type = COMEDI_SUBD_UNUSED;
- break;
- }
- }
+static int ii20k_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct ii20k_ao_private *ao_spriv = s->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- /* initialize struct pci20xxx_private */
- pci20xxx_dio_init(dev, &dev->subdevices[PCI20000_MODULES]);
+ for (i = 0; i < insn->n; i++)
+ data[i] = ao_spriv->last_data[chan];
- return 1;
+ return insn->n;
}
-static void pci20xxx_detach(struct comedi_device *dev)
+static int ii20k_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- /* Nothing to cleanup */
-}
+ struct ii20k_ao_private *ao_spriv = s->private;
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = ao_spriv->last_data[chan];
+ int i;
-/* pci20006m */
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
-static int pci20006_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-static int pci20006_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
+ /* munge data */
+ val += ((s->maxdata + 1) >> 1);
+ val &= s->maxdata;
-static const struct comedi_lrange *pci20006_range_list[] = {
- &range_bipolar10,
- &range_unipolar10,
- &range_bipolar5,
-};
+ writeb(val & 0xff, iobase + II20K_AO_LSB_REG(chan));
+ writeb((val >> 8) & 0xff, iobase + II20K_AO_MSB_REG(chan));
+ writeb(0x00, iobase + II20K_AO_STRB_REG(chan));
+ }
-static int pci20006_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int opt0, int opt1)
-{
- union pci20xxx_subdev_private *sdp = s->private;
-
- if (opt0 < 0 || opt0 > 2)
- opt0 = 0;
- if (opt1 < 0 || opt1 > 2)
- opt1 = 0;
-
- sdp->pci20006.ao_range_list[0] = pci20006_range_list[opt0];
- sdp->pci20006.ao_range_list[1] = pci20006_range_list[opt1];
-
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->len_chanlist = 2;
- s->insn_read = pci20006_insn_read;
- s->insn_write = pci20006_insn_write;
- s->maxdata = 0xffff;
- s->range_table_list = sdp->pci20006.ao_range_list;
- return 0;
+ ao_spriv->last_data[chan] = val;
+
+ return insn->n;
}
-static int pci20006_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int ii20k_ai_wait_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ int timeout)
{
- union pci20xxx_subdev_private *sdp = s->private;
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ unsigned char status;
- data[0] = sdp->pci20006.last_data[CR_CHAN(insn->chanspec)];
+ do {
+ status = readb(iobase + II20K_AI_STATUS_REG);
+ if ((status & II20K_AI_STATUS_INT) == 0)
+ return 0;
+ } while (timeout--);
- return 1;
+ return -ETIME;
}
-static int pci20006_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void ii20k_ai_setup(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chanspec)
{
- union pci20xxx_subdev_private *sdp = s->private;
- int hi, lo;
- unsigned int boarddata;
-
- sdp->pci20006.last_data[CR_CHAN(insn->chanspec)] = data[0];
- boarddata = (((unsigned int)data[0] + 0x8000) & 0xffff);
- /* comedi-data -> board-data */
- lo = (boarddata & 0xff);
- hi = ((boarddata >> 8) & 0xff);
-
- switch (CR_CHAN(insn->chanspec)) {
- case 0:
- writeb(lo, sdp->iobase + PCI20006_LCHAN0);
- writeb(hi, sdp->iobase + PCI20006_LCHAN0 + 1);
- writeb(0x00, sdp->iobase + PCI20006_STROBE0);
- break;
- case 1:
- writeb(lo, sdp->iobase + PCI20006_LCHAN1);
- writeb(hi, sdp->iobase + PCI20006_LCHAN1 + 1);
- writeb(0x00, sdp->iobase + PCI20006_STROBE1);
- break;
- default:
- dev_warn(dev->class_dev, "ao channel Error!\n");
- return -EINVAL;
- }
-
- return 1;
-}
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned char val;
-/* PCI20341M */
+ /* initialize module */
+ writeb(II20K_AI_CONF_ENA, iobase + II20K_AI_CONF_REG);
-static int pci20341_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
+ /* software conversion */
+ writeb(0, iobase + II20K_AI_STATUS_CMD_REG);
-static const int pci20341_timebase[] = { 0x00, 0x00, 0x00, 0x04 };
-static const int pci20341_settling_time[] = { 0x58, 0x58, 0x93, 0x99 };
+ /* set the time base for the settling time counter based on the gain */
+ val = (range < 3) ? II20K_AI_OPT_TIMEBASE(0) : II20K_AI_OPT_TIMEBASE(2);
+ writeb(val, iobase + II20K_AI_OPT_REG);
-static const struct comedi_lrange range_bipolar0_5 = {
- 1,
- {BIP_RANGE(0.5)}
-};
+ /* set the settling time counter based on the gain */
+ val = (range < 2) ? 0x58 : (range < 3) ? 0x93 : 0x99;
+ writeb(val, iobase + II20K_AI_SET_TIME_REG);
-static const struct comedi_lrange range_bipolar0_05 = {
- 1,
- {BIP_RANGE(0.05)}
-};
+ /* set number of input channels */
+ writeb(1, iobase + II20K_AI_LAST_CHAN_ADDR_REG);
-static const struct comedi_lrange range_bipolar0_025 = {
- 1,
- {BIP_RANGE(0.025)}
-};
+ /* set the channel list byte */
+ val = II20K_AI_CHANLIST_ONBOARD_ONLY |
+ II20K_AI_CHANLIST_MUX_ENA |
+ II20K_AI_CHANLIST_GAIN(range) |
+ II20K_AI_CHANLIST_CHAN(chan);
+ writeb(val, iobase + II20K_AI_CHANLIST_REG);
-static const struct comedi_lrange *const pci20341_ranges[] = {
- &range_bipolar5,
- &range_bipolar0_5,
- &range_bipolar0_05,
- &range_bipolar0_025,
-};
+ /* reset settling time counter and trigger delay counter */
+ writeb(0, iobase + II20K_AI_COUNT_RESET_REG);
-static int pci20341_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int opt0, int opt1)
-{
- union pci20xxx_subdev_private *sdp = s->private;
- int option;
-
- /* options handling */
- if (opt0 < 0 || opt0 > 3)
- opt0 = 0;
- sdp->pci20341.timebase = pci20341_timebase[opt0];
- sdp->pci20341.settling_time = pci20341_settling_time[opt0];
-
- /* ai subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = PCI20341_CHAN_NR;
- s->len_chanlist = PCI20341_SCANLIST;
- s->insn_read = pci20341_insn_read;
- s->maxdata = 0xffff;
- s->range_table = pci20341_ranges[opt0];
-
- /* depends on gain, trigger, repetition mode */
- option = sdp->pci20341.timebase | PCI20341_REPMODE;
-
- /* initialize Module */
- writeb(PCI20341_INIT, sdp->iobase + PCI20341_CONFIG_REG);
- /* set Pacer */
- writeb(PCI20341_PACER, sdp->iobase + PCI20341_MOD_STATUS);
- /* option register */
- writeb(option, sdp->iobase + PCI20341_OPT_REG);
- /* settling time counter */
- writeb(sdp->pci20341.settling_time,
- sdp->iobase + PCI20341_SET_TIME_REG);
- /* trigger not implemented */
- return 0;
+ /* reset channel scanner */
+ writeb(0, iobase + II20K_AI_CHAN_RESET_REG);
}
-static int pci20341_insn_read(struct comedi_device *dev,
+static int ii20k_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- union pci20xxx_subdev_private *sdp = s->private;
- unsigned int i = 0, j = 0;
- int lo, hi;
- unsigned char eoc; /* end of conversion */
- unsigned int clb; /* channel list byte */
- unsigned int boarddata;
-
- /* write number of input channels */
- writeb(1, sdp->iobase + PCI20341_LCHAN_ADDR_REG);
- clb = PCI20341_DAISY_CHAIN | PCI20341_MUX | (sdp->pci20341.ai_gain << 3)
- | CR_CHAN(insn->chanspec);
- writeb(clb, sdp->iobase + PCI20341_CHAN_LIST);
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ int ret;
+ int i;
- /* reset settling time counter and trigger delay counter */
- writeb(0x00, sdp->iobase + PCI20341_CC_RESET);
+ ii20k_ai_setup(dev, s, insn->chanspec);
- writeb(0x00, sdp->iobase + PCI20341_CHAN_RESET);
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val;
- /* generate Pacer */
+ /* generate a software start convert signal */
+ readb(iobase + II20K_AI_PACER_RESET_REG);
- for (i = 0; i < insn->n; i++) {
- /* data polling isn't the niciest way to get the data, I know,
- * but there are only 6 cycles (mean) and it is easier than
- * the whole interrupt stuff
- */
- j = 0;
- /* generate Pacer */
- readb(sdp->iobase + PCI20341_SOFT_PACER);
-
- eoc = readb(sdp->iobase + PCI20341_STATUS_REG);
- /* poll Interrupt Flag */
- while ((eoc < 0x80) && j < 100) {
- j++;
- eoc = readb(sdp->iobase + PCI20341_STATUS_REG);
- }
- if (j >= 100) {
- dev_warn(dev->class_dev,
- "AI interrupt channel %i polling exit !\n", i);
- return -EINVAL;
- }
- lo = readb(sdp->iobase + PCI20341_LDATA);
- hi = readb(sdp->iobase + PCI20341_LDATA + 1);
- boarddata = lo + 0x100 * hi;
-
- /* board-data -> comedi-data */
- data[i] = (short)((boarddata + 0x8000) & 0xffff);
- }
+ ret = ii20k_ai_wait_eoc(dev, s, 100);
+ if (ret)
+ return ret;
- return i;
-}
+ val = readb(iobase + II20K_AI_LSB_REG);
+ val |= (readb(iobase + II20K_AI_MSB_REG) << 8);
+
+ /* munge two's complement data */
+ val += ((s->maxdata + 1) >> 1);
+ val &= s->maxdata;
-/* native DIO */
+ data[i] = val;
+ }
-static void pci20xxx_dio_config(struct comedi_device *dev,
- struct comedi_subdevice *s);
-static int pci20xxx_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-static int pci20xxx_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data);
+ return insn->n;
+}
-/* initialize struct pci20xxx_private */
-static int pci20xxx_dio_init(struct comedi_device *dev,
+static void ii20k_dio_config(struct comedi_device *dev,
struct comedi_subdevice *s)
{
+ struct ii20k_private *devpriv = dev->private;
+ unsigned char ctrl01 = 0;
+ unsigned char ctrl23 = 0;
+ unsigned char dir_ena = 0;
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 32;
- s->insn_bits = pci20xxx_dio_insn_bits;
- s->insn_config = pci20xxx_dio_insn_config;
- s->maxdata = 1;
- s->len_chanlist = 32;
- s->range_table = &range_digital;
- s->io_bits = 0;
+ /* port 0 - channels 0-7 */
+ if (s->io_bits & 0x000000ff) {
+ /* output port */
+ ctrl01 &= ~II20K_CTRL01_DIO0_IN;
+ dir_ena &= ~II20K_BUF_DISAB_DIO0;
+ dir_ena |= II20K_DIR_DIO0_OUT;
+ } else {
+ /* input port */
+ ctrl01 |= II20K_CTRL01_DIO0_IN;
+ dir_ena &= ~II20K_DIR_DIO0_OUT;
+ }
- /* digital I/O lines default to input on board reset. */
- pci20xxx_dio_config(dev, s);
+ /* port 1 - channels 8-15 */
+ if (s->io_bits & 0x0000ff00) {
+ /* output port */
+ ctrl01 &= ~II20K_CTRL01_DIO1_IN;
+ dir_ena &= ~II20K_BUF_DISAB_DIO1;
+ dir_ena |= II20K_DIR_DIO1_OUT;
+ } else {
+ /* input port */
+ ctrl01 |= II20K_CTRL01_DIO1_IN;
+ dir_ena &= ~II20K_DIR_DIO1_OUT;
+ }
- return 0;
+ /* port 2 - channels 16-23 */
+ if (s->io_bits & 0x00ff0000) {
+ /* output port */
+ ctrl23 &= ~II20K_CTRL23_DIO2_IN;
+ dir_ena &= ~II20K_BUF_DISAB_DIO2;
+ dir_ena |= II20K_DIR_DIO2_OUT;
+ } else {
+ /* input port */
+ ctrl23 |= II20K_CTRL23_DIO2_IN;
+ dir_ena &= ~II20K_DIR_DIO2_OUT;
+ }
+
+ /* port 3 - channels 24-31 */
+ if (s->io_bits & 0xff000000) {
+ /* output port */
+ ctrl23 &= ~II20K_CTRL23_DIO3_IN;
+ dir_ena &= ~II20K_BUF_DISAB_DIO3;
+ dir_ena |= II20K_DIR_DIO3_OUT;
+ } else {
+ /* input port */
+ ctrl23 |= II20K_CTRL23_DIO3_IN;
+ dir_ena &= ~II20K_DIR_DIO3_OUT;
+ }
+
+ ctrl23 |= II20K_CTRL01_SET;
+ ctrl23 |= II20K_CTRL23_SET;
+
+ /* order is important */
+ writeb(ctrl01, devpriv->ioaddr + II20K_CTRL01_REG);
+ writeb(ctrl23, devpriv->ioaddr + II20K_CTRL23_REG);
+ writeb(dir_ena, devpriv->ioaddr + II20K_DIR_ENA_REG);
}
-static int pci20xxx_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int ii20k_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int mask, bits;
-
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x000000ff)
- bits = 0x000000ff;
- else if (mask & 0x0000ff00)
- bits = 0x0000ff00;
- else if (mask & 0x00ff0000)
- bits = 0x00ff0000;
- else
- bits = 0xff000000;
- if (data[0])
- s->io_bits |= bits;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
+
+ if (chan < 8)
+ mask = 0x000000ff;
+ else if (chan < 16)
+ mask = 0x0000ff00;
+ else if (chan < 24)
+ mask = 0x00ff0000;
else
- s->io_bits &= ~bits;
- pci20xxx_dio_config(dev, s);
+ mask = 0xff000000;
- return 1;
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ ii20k_dio_config(dev, s);
+
+ return insn->n;
}
-static int pci20xxx_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int ii20k_dio_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pci20xxx_private *devpriv = dev->private;
- unsigned int mask = data[0];
-
- s->state &= ~mask;
- s->state |= (mask & data[1]);
-
- mask &= s->io_bits;
- if (mask & 0x000000ff)
- writeb((s->state >> 0) & 0xff,
- devpriv->ioaddr + PCI20000_DIO_0);
- if (mask & 0x0000ff00)
- writeb((s->state >> 8) & 0xff,
- devpriv->ioaddr + PCI20000_DIO_1);
- if (mask & 0x00ff0000)
- writeb((s->state >> 16) & 0xff,
- devpriv->ioaddr + PCI20000_DIO_2);
- if (mask & 0xff000000)
- writeb((s->state >> 24) & 0xff,
- devpriv->ioaddr + PCI20000_DIO_3);
-
- data[1] = readb(devpriv->ioaddr + PCI20000_DIO_0);
- data[1] |= readb(devpriv->ioaddr + PCI20000_DIO_1) << 8;
- data[1] |= readb(devpriv->ioaddr + PCI20000_DIO_2) << 16;
- data[1] |= readb(devpriv->ioaddr + PCI20000_DIO_3) << 24;
+ struct ii20k_private *devpriv = dev->private;
+ unsigned int mask = data[0] & s->io_bits; /* outputs only */
+ unsigned int bits = data[1];
+
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+
+ if (mask & 0x000000ff)
+ writeb((s->state >> 0) & 0xff,
+ devpriv->ioaddr + II20K_DIO0_REG);
+ if (mask & 0x0000ff00)
+ writeb((s->state >> 8) & 0xff,
+ devpriv->ioaddr + II20K_DIO1_REG);
+ if (mask & 0x00ff0000)
+ writeb((s->state >> 16) & 0xff,
+ devpriv->ioaddr + II20K_DIO2_REG);
+ if (mask & 0xff000000)
+ writeb((s->state >> 24) & 0xff,
+ devpriv->ioaddr + II20K_DIO3_REG);
+ }
+
+ data[1] = readb(devpriv->ioaddr + II20K_DIO0_REG);
+ data[1] |= readb(devpriv->ioaddr + II20K_DIO1_REG) << 8;
+ data[1] |= readb(devpriv->ioaddr + II20K_DIO2_REG) << 16;
+ data[1] |= readb(devpriv->ioaddr + II20K_DIO3_REG) << 24;
return insn->n;
}
-static void pci20xxx_dio_config(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int ii20k_init_module(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct ii20k_ao_private *ao_spriv;
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ unsigned char id;
+
+ id = readb(iobase + II20K_ID_REG);
+ switch (id) {
+ case II20K_ID_PCI20006M_1:
+ case II20K_ID_PCI20006M_2:
+ ao_spriv = comedi_alloc_spriv(s, sizeof(*ao_spriv));
+ if (!ao_spriv)
+ return -ENOMEM;
+
+ /* Analog Output subdevice */
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = (id == II20K_ID_PCI20006M_2) ? 2 : 1;
+ s->maxdata = 0xffff;
+ s->range_table = &ii20k_ao_ranges;
+ s->insn_read = ii20k_ao_insn_read;
+ s->insn_write = ii20k_ao_insn_write;
+ break;
+ case II20K_ID_PCI20341M_1:
+ /* Analog Input subdevice */
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF;
+ s->n_chan = 4;
+ s->maxdata = 0xffff;
+ s->range_table = &ii20k_ai_ranges;
+ s->insn_read = ii20k_ai_insn_read;
+ break;
+ default:
+ s->type = COMEDI_SUBD_UNUSED;
+ break;
+ }
+
+ return 0;
+}
+
+static int ii20k_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
{
- struct pci20xxx_private *devpriv = dev->private;
- unsigned char control_01;
- unsigned char control_23;
- unsigned char buffer;
+ struct ii20k_private *devpriv;
+ struct comedi_subdevice *s;
+ unsigned char id;
+ bool has_dio;
+ int ret;
- control_01 = readb(devpriv->ioaddr + PCI20000_DIO_CONTROL_01);
- control_23 = readb(devpriv->ioaddr + PCI20000_DIO_CONTROL_23);
- buffer = readb(devpriv->ioaddr + PCI20000_DIO_BUFFER);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
- if (s->io_bits & 0x000000ff) {
- /* output port 0 */
- control_01 &= PCI20000_DIO_EOC;
- buffer = (buffer & (~(DIO_BE << DIO_PS_0))) | (DIO_BO <<
- DIO_PS_0);
- } else {
- /* input port 0 */
- control_01 = (control_01 & DIO_CAND) | PCI20000_DIO_EIC;
- buffer = (buffer & (~(DIO_BI << DIO_PS_0)));
+ devpriv->ioaddr = (void __iomem *)(unsigned long)it->options[0];
+
+ id = readb(devpriv->ioaddr + II20K_ID_REG);
+ switch (id & II20K_ID_MASK) {
+ case II20K_ID_PCI20001C_1A:
+ break;
+ case II20K_ID_PCI20001C_2A:
+ has_dio = true;
+ break;
+ default:
+ return -ENODEV;
}
- if (s->io_bits & 0x0000ff00) {
- /* output port 1 */
- control_01 &= PCI20000_DIO_OOC;
- buffer = (buffer & (~(DIO_BE << DIO_PS_1))) | (DIO_BO <<
- DIO_PS_1);
+
+ ret = comedi_alloc_subdevices(dev, 4);
+ if (ret)
+ return ret;
+
+ s = &dev->subdevices[0];
+ if (id & II20K_ID_MOD1_EMPTY) {
+ s->type = COMEDI_SUBD_UNUSED;
} else {
- /* input port 1 */
- control_01 = (control_01 & DIO_CAND) | PCI20000_DIO_OIC;
- buffer = (buffer & (~(DIO_BI << DIO_PS_1)));
+ ret = ii20k_init_module(dev, s);
+ if (ret)
+ return ret;
}
- if (s->io_bits & 0x00ff0000) {
- /* output port 2 */
- control_23 &= PCI20000_DIO_EOC;
- buffer = (buffer & (~(DIO_BE << DIO_PS_2))) | (DIO_BO <<
- DIO_PS_2);
+
+ s = &dev->subdevices[1];
+ if (id & II20K_ID_MOD2_EMPTY) {
+ s->type = COMEDI_SUBD_UNUSED;
} else {
- /* input port 2 */
- control_23 = (control_23 & DIO_CAND) | PCI20000_DIO_EIC;
- buffer = (buffer & (~(DIO_BI << DIO_PS_2)));
+ ret = ii20k_init_module(dev, s);
+ if (ret)
+ return ret;
}
- if (s->io_bits & 0xff000000) {
- /* output port 3 */
- control_23 &= PCI20000_DIO_OOC;
- buffer = (buffer & (~(DIO_BE << DIO_PS_3))) | (DIO_BO <<
- DIO_PS_3);
+
+ s = &dev->subdevices[2];
+ if (id & II20K_ID_MOD3_EMPTY) {
+ s->type = COMEDI_SUBD_UNUSED;
} else {
- /* input port 3 */
- control_23 = (control_23 & DIO_CAND) | PCI20000_DIO_OIC;
- buffer = (buffer & (~(DIO_BI << DIO_PS_3)));
+ ret = ii20k_init_module(dev, s);
+ if (ret)
+ return ret;
}
- writeb(control_01, devpriv->ioaddr + PCI20000_DIO_CONTROL_01);
- writeb(control_23, devpriv->ioaddr + PCI20000_DIO_CONTROL_23);
- writeb(buffer, devpriv->ioaddr + PCI20000_DIO_BUFFER);
-}
-#if 0
-static void pci20xxx_do(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pci20xxx_private *devpriv = dev->private;
-
- /* XXX if the channel is configured for input, does this
- do bad things? */
- /* XXX it would be a good idea to only update the registers
- that _need_ to be updated. This requires changes to
- comedi, however. */
- writeb((s->state >> 0) & 0xff, devpriv->ioaddr + PCI20000_DIO_0);
- writeb((s->state >> 8) & 0xff, devpriv->ioaddr + PCI20000_DIO_1);
- writeb((s->state >> 16) & 0xff, devpriv->ioaddr + PCI20000_DIO_2);
- writeb((s->state >> 24) & 0xff, devpriv->ioaddr + PCI20000_DIO_3);
-}
-
-static unsigned int pci20xxx_di(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci20xxx_private *devpriv = dev->private;
- unsigned int bits;
-
- /* XXX same note as above */
- bits = readb(devpriv->ioaddr + PCI20000_DIO_0);
- bits |= readb(devpriv->ioaddr + PCI20000_DIO_1) << 8;
- bits |= readb(devpriv->ioaddr + PCI20000_DIO_2) << 16;
- bits |= readb(devpriv->ioaddr + PCI20000_DIO_3) << 24;
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[3];
+ if (has_dio) {
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 32;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = ii20k_dio_insn_bits;
+ s->insn_config = ii20k_dio_insn_config;
+
+ /* default all channels to input */
+ ii20k_dio_config(dev, s);
+ } else {
+ s->type = COMEDI_SUBD_UNUSED;
+ }
- return bits;
+ return 0;
}
-#endif
-static struct comedi_driver pci20xxx_driver = {
+static struct comedi_driver ii20k_driver = {
.driver_name = "ii_pci20kc",
.module = THIS_MODULE,
- .attach = pci20xxx_attach,
- .detach = pci20xxx_detach,
+ .attach = ii20k_attach,
+ .detach = comedi_legacy_detach,
};
-module_comedi_driver(pci20xxx_driver);
+module_comedi_driver(ii20k_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 90b303a..b52d58e 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
* Driver: jr3_pci
@@ -43,10 +38,10 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ctype.h>
-#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/timer.h>
@@ -97,37 +92,6 @@ struct jr3_pci_subdev_private {
int retries;
};
-/* Hotplug firmware loading stuff */
-static int comedi_load_firmware(struct comedi_device *dev, const char *name,
- int (*cb)(struct comedi_device *dev,
- const u8 *data, size_t size))
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- int result = 0;
- const struct firmware *fw;
- char *firmware_path;
- static const char *prefix = "comedi/";
-
- firmware_path = kmalloc(strlen(prefix) + strlen(name) + 1, GFP_KERNEL);
- if (!firmware_path) {
- result = -ENOMEM;
- } else {
- firmware_path[0] = '\0';
- strcat(firmware_path, prefix);
- strcat(firmware_path, name);
- result = request_firmware(&fw, firmware_path, &pcidev->dev);
- if (result == 0) {
- if (!cb)
- result = -EINVAL;
- else
- result = cb(dev, fw->data, fw->size);
- release_firmware(fw);
- }
- kfree(firmware_path);
- }
- return result;
-}
-
static struct poll_delay_t poll_delay_min_max(int min, int max)
{
struct poll_delay_t result;
@@ -362,8 +326,9 @@ static int read_idm_word(const u8 *data, size_t size, int *pos,
return result;
}
-static int jr3_download_firmware(struct comedi_device *dev, const u8 *data,
- size_t size)
+static int jr3_download_firmware(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context)
{
/*
* IDM file format is:
@@ -674,10 +639,9 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
return -EINVAL;
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
init_timer(&devpriv->timer);
switch (pcidev->device) {
@@ -768,7 +732,9 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
/* Reset DSP card */
writel(0, &devpriv->iobase->channel[0].reset);
- result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware);
+ result = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
+ "comedi/jr3pci.idm",
+ jr3_download_firmware, 0);
dev_dbg(dev->class_dev, "Firmare load %d\n", result);
if (result < 0)
@@ -778,8 +744,9 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
* format:
* model serial Fx Fy Fz Mx My Mz\n
*
- * comedi_load_firmware(dev, "jr3_offsets_table",
- * jr3_download_firmware);
+ * comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
+ * "comedi/jr3_offsets_table",
+ * jr3_download_firmware, 1);
*/
/*
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index e0e6475..15589f6 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: ke_counter
@@ -34,6 +29,7 @@ This driver is a simple driver to read the counter values from
Kolter Electronic PCI Counter Card.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index 641e693..8f4afad 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: me4000
@@ -45,6 +40,7 @@ broken.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1362,98 +1358,57 @@ static int me4000_dio_insn_bits(struct comedi_device *dev,
static int me4000_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned long tmp;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ unsigned int tmp;
+ int ret;
- switch (data[0]) {
- default:
- return -EINVAL;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- case INSN_CONFIG_DIO_INPUT:
- case INSN_CONFIG_DIO_OUTPUT:
- break;
- }
+ if (chan < 8)
+ mask = 0x000000ff;
+ else if (chan < 16)
+ mask = 0x0000ff00;
+ else if (chan < 24)
+ mask = 0x00ff0000;
+ else
+ mask = 0xff000000;
- /*
- * The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_OUTPUT.
- * On the ME-4000 it is only possible to switch port wise (8 bit)
- */
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
tmp = inl(dev->iobase + ME4000_DIO_CTRL_REG);
+ tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 | ME4000_DIO_CTRL_BIT_MODE_1 |
+ ME4000_DIO_CTRL_BIT_MODE_2 | ME4000_DIO_CTRL_BIT_MODE_3 |
+ ME4000_DIO_CTRL_BIT_MODE_4 | ME4000_DIO_CTRL_BIT_MODE_5 |
+ ME4000_DIO_CTRL_BIT_MODE_6 | ME4000_DIO_CTRL_BIT_MODE_7);
+ if (s->io_bits & 0x000000ff)
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
+ if (s->io_bits & 0x0000ff00)
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_2;
+ if (s->io_bits & 0x00ff0000)
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_4;
+ if (s->io_bits & 0xff000000)
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_6;
- if (data[0] == INSN_CONFIG_DIO_OUTPUT) {
- if (chan < 8) {
- s->io_bits |= 0xFF;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 |
- ME4000_DIO_CTRL_BIT_MODE_1);
- tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
- } else if (chan < 16) {
- /*
- * Chech for optoisolated ME-4000 version.
- * If one the first port is a fixed output
- * port and the second is a fixed input port.
- */
- if (!inl(dev->iobase + ME4000_DIO_DIR_REG))
- return -ENODEV;
-
- s->io_bits |= 0xFF00;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 |
- ME4000_DIO_CTRL_BIT_MODE_3);
- tmp |= ME4000_DIO_CTRL_BIT_MODE_2;
- } else if (chan < 24) {
- s->io_bits |= 0xFF0000;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_4 |
- ME4000_DIO_CTRL_BIT_MODE_5);
- tmp |= ME4000_DIO_CTRL_BIT_MODE_4;
- } else if (chan < 32) {
- s->io_bits |= 0xFF000000;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_6 |
- ME4000_DIO_CTRL_BIT_MODE_7);
- tmp |= ME4000_DIO_CTRL_BIT_MODE_6;
- } else {
- return -EINVAL;
- }
- } else {
- if (chan < 8) {
- /*
- * Chech for optoisolated ME-4000 version.
- * If one the first port is a fixed output
- * port and the second is a fixed input port.
- */
- if (!inl(dev->iobase + ME4000_DIO_DIR_REG))
- return -ENODEV;
-
- s->io_bits &= ~0xFF;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 |
- ME4000_DIO_CTRL_BIT_MODE_1);
- } else if (chan < 16) {
- s->io_bits &= ~0xFF00;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 |
- ME4000_DIO_CTRL_BIT_MODE_3);
- } else if (chan < 24) {
- s->io_bits &= ~0xFF0000;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_4 |
- ME4000_DIO_CTRL_BIT_MODE_5);
- } else if (chan < 32) {
- s->io_bits &= ~0xFF000000;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_6 |
- ME4000_DIO_CTRL_BIT_MODE_7);
- } else {
- return -EINVAL;
- }
+ /*
+ * Check for optoisolated ME-4000 version.
+ * If one the first port is a fixed output
+ * port and the second is a fixed input port.
+ */
+ if (inl(dev->iobase + ME4000_DIO_DIR_REG)) {
+ s->io_bits |= 0x000000ff;
+ s->io_bits &= ~0x0000ff00;
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
+ tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 |
+ ME4000_DIO_CTRL_BIT_MODE_3);
}
outl(tmp, dev->iobase + ME4000_DIO_CTRL_REG);
- return 1;
+ return insn->n;
}
/*=============================================================================
@@ -1549,10 +1504,9 @@ static int me4000_auto_attach(struct comedi_device *dev,
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = comedi_alloc_devpriv(dev, sizeof(*info));
if (!info)
return -ENOMEM;
- dev->private = info;
result = comedi_pci_enable(dev);
if (result)
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 09f2a9fe..a6f6d4a 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -34,10 +30,10 @@
* Analog Input, Analog Output, Digital I/O
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
-#include <linux/firmware.h>
#include "../comedidev.h"
@@ -190,38 +186,30 @@ static int me_dio_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct me_private_data *dev_private = dev->private;
- unsigned int mask = 1 << CR_CHAN(insn->chanspec);
- unsigned int bits;
- unsigned int port;
+ struct me_private_data *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
- if (mask & 0x0000ffff) {
- bits = 0x0000ffff;
- port = ENABLE_PORT_A;
- } else {
- bits = 0xffff0000;
- port = ENABLE_PORT_B;
- }
+ if (chan < 16)
+ mask = 0x0000ffff;
+ else
+ mask = 0xffff0000;
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- dev_private->control_2 &= ~port;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- dev_private->control_2 |= port;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
- /* Update the port configuration */
- writew(dev_private->control_2, dev_private->me_regbase + ME_CONTROL_2);
+ if (s->io_bits & 0x0000ffff)
+ devpriv->control_2 |= ENABLE_PORT_A;
+ else
+ devpriv->control_2 &= ~ENABLE_PORT_A;
+ if (s->io_bits & 0xffff0000)
+ devpriv->control_2 |= ENABLE_PORT_B;
+ else
+ devpriv->control_2 &= ~ENABLE_PORT_B;
+
+ writew(devpriv->control_2, devpriv->me_regbase + ME_CONTROL_2);
return insn->n;
}
@@ -391,7 +379,8 @@ static int me_ao_insn_read(struct comedi_device *dev,
}
static int me2600_xilinx_download(struct comedi_device *dev,
- const u8 *data, size_t size)
+ const u8 *data, size_t size,
+ unsigned long context)
{
struct me_private_data *dev_private = dev->private;
unsigned int value;
@@ -460,22 +449,6 @@ static int me2600_xilinx_download(struct comedi_device *dev,
return 0;
}
-static int me2600_upload_firmware(struct comedi_device *dev)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, ME2600_FIRMWARE, &pcidev->dev);
- if (ret)
- return ret;
-
- ret = me2600_xilinx_download(dev, fw->data, fw->size);
- release_firmware(fw);
-
- return ret;
-}
-
static int me_reset(struct comedi_device *dev)
{
struct me_private_data *dev_private = dev->private;
@@ -510,10 +483,9 @@ static int me_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- dev_private = kzalloc(sizeof(*dev_private), GFP_KERNEL);
+ dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private));
if (!dev_private)
return -ENOMEM;
- dev->private = dev_private;
ret = comedi_pci_enable(dev);
if (ret)
@@ -529,7 +501,9 @@ static int me_auto_attach(struct comedi_device *dev,
/* Download firmware and reset card */
if (board->needs_firmware) {
- ret = me2600_upload_firmware(dev);
+ ret = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
+ ME2600_FIRMWARE,
+ me2600_xilinx_download, 0);
if (ret < 0)
return ret;
}
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 523c656..35cb4ac 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
@@ -51,6 +46,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index 255b8ba..8423b8b 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _MITE_H_
@@ -26,6 +21,7 @@
#include <linux/pci.h>
#include <linux/log2.h>
+#include <linux/slab.h>
#include "../comedidev.h"
/* #define DEBUG_MITE */
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index 4717be4..acbaeee 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: mpc624
@@ -56,9 +51,9 @@ Configuration Options:
1 -10.1V .. +10.1V
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
/* Consecutive I/O port addresses */
@@ -291,10 +286,9 @@ static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
switch (it->options[1]) {
case 0:
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
index 7a82920..9d75ea4 100644
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ b/drivers/staging/comedi/drivers/multiq3.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: multiq3
@@ -29,11 +24,10 @@ Devices: [Quanser Consulting] MultiQ-3 (multiq3)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#define MULTIQ3_SIZE 16
/*
@@ -237,10 +231,9 @@ static int multiq3_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
/* ai subdevice */
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index d10f777..c2745f2 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: ni_6527
@@ -41,6 +36,7 @@ Updated: Sat, 25 Jan 2003 13:24:40 -0800
#define DEBUG 1
#define DEBUG_FLAGS
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -340,10 +336,9 @@ static int ni6527_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->mite = mite_alloc(pcidev);
if (!devpriv->mite)
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 3f71f0f..853f62b 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -17,11 +17,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: ni_65xx
@@ -51,9 +46,9 @@ except maybe the 6514.
#define DEBUG 1
#define DEBUG_FLAGS
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -286,15 +281,6 @@ static inline struct ni_65xx_subdevice_private *sprivate(struct comedi_subdevice
return subdev->private;
}
-static struct ni_65xx_subdevice_private *ni_65xx_alloc_subdevice_private(void)
-{
- struct ni_65xx_subdevice_private *subdev_private =
- kzalloc(sizeof(struct ni_65xx_subdevice_private), GFP_KERNEL);
- if (subdev_private == NULL)
- return NULL;
- return subdev_private;
-}
-
static int ni_65xx_config_filter(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -383,28 +369,23 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
{
const struct ni_65xx_board *board = comedi_board(dev);
struct ni_65xx_private *devpriv = dev->private;
- unsigned base_bitfield_channel;
- const unsigned max_ports_per_bitfield = 5;
+ int base_bitfield_channel;
unsigned read_bits = 0;
- unsigned j;
+ int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
+ int port_offset;
base_bitfield_channel = CR_CHAN(insn->chanspec);
- for (j = 0; j < max_ports_per_bitfield; ++j) {
- const unsigned port_offset =
- ni_65xx_port_by_channel(base_bitfield_channel) + j;
- const unsigned port =
- sprivate(s)->base_port + port_offset;
- unsigned base_port_channel;
+ for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
+ port_offset <= last_port_offset; port_offset++) {
+ unsigned port = sprivate(s)->base_port + port_offset;
+ int base_port_channel = port_offset * ni_65xx_channels_per_port;
unsigned port_mask, port_data, port_read_bits;
- int bitshift;
- if (port >= ni_65xx_total_num_ports(board))
+ int bitshift = base_port_channel - base_bitfield_channel;
+
+ if (bitshift >= 32)
break;
- base_port_channel = port_offset * ni_65xx_channels_per_port;
port_mask = data[0];
port_data = data[1];
- bitshift = base_port_channel - base_bitfield_channel;
- if (bitshift >= 32 || bitshift <= -32)
- break;
if (bitshift > 0) {
port_mask >>= bitshift;
port_data >>= bitshift;
@@ -589,6 +570,7 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct ni_65xx_board *board = NULL;
struct ni_65xx_private *devpriv;
+ struct ni_65xx_subdevice_private *spriv;
struct comedi_subdevice *s;
unsigned i;
int ret;
@@ -604,10 +586,9 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->mite = mite_alloc(pcidev);
if (!devpriv->mite)
@@ -637,10 +618,10 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
s->maxdata = 1;
s->insn_config = ni_65xx_dio_insn_config;
s->insn_bits = ni_65xx_dio_insn_bits;
- s->private = ni_65xx_alloc_subdevice_private();
- if (s->private == NULL)
+ spriv = comedi_alloc_spriv(s, sizeof(*spriv));
+ if (!spriv)
return -ENOMEM;
- sprivate(s)->base_port = 0;
+ spriv->base_port = 0;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -654,10 +635,10 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
s->range_table = &range_digital;
s->maxdata = 1;
s->insn_bits = ni_65xx_dio_insn_bits;
- s->private = ni_65xx_alloc_subdevice_private();
- if (s->private == NULL)
+ spriv = comedi_alloc_spriv(s, sizeof(*spriv));
+ if (!spriv)
return -ENOMEM;
- sprivate(s)->base_port = board->num_di_ports;
+ spriv->base_port = board->num_di_ports;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
@@ -672,10 +653,10 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
s->maxdata = 1;
s->insn_config = ni_65xx_dio_insn_config;
s->insn_bits = ni_65xx_dio_insn_bits;
- s->private = ni_65xx_alloc_subdevice_private();
- if (s->private == NULL)
+ spriv = comedi_alloc_spriv(s, sizeof(*spriv));
+ if (!spriv)
return -ENOMEM;
- sprivate(s)->base_port = 0;
+ spriv->base_port = 0;
for (i = 0; i < board->num_dio_ports; ++i) {
/* configure all ports for input */
writeb(0x1,
@@ -730,7 +711,6 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
static void ni_65xx_detach(struct comedi_device *dev)
{
struct ni_65xx_private *devpriv = dev->private;
- int i;
if (devpriv && devpriv->mite && devpriv->mite->daq_io_addr) {
writeb(0x00,
@@ -739,8 +719,6 @@ static void ni_65xx_detach(struct comedi_device *dev)
}
if (dev->irq)
free_irq(dev->irq, dev);
- for (i = 0; i < dev->n_subdevices; ++i)
- comedi_spriv_free(dev, i);
if (devpriv) {
if (devpriv->mite) {
mite_unsetup(devpriv->mite);
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 5cdda7f..3607336 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -11,10 +11,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -38,6 +34,7 @@
* DAQ 6601/6602 User Manual (NI 322137B-01)
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -933,10 +930,9 @@ static int ni_660x_allocate_private(struct comedi_device *dev)
struct ni_660x_private *devpriv;
unsigned i;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
spin_lock_init(&devpriv->mite_channel_lock);
spin_lock_init(&devpriv->interrupt_lock);
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 42ab6db..e2926ce 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: ni_670x
@@ -41,9 +36,9 @@ Commands are not supported.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -163,27 +158,16 @@ static int ni_670x_dio_insn_bits(struct comedi_device *dev,
static int ni_670x_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct ni_670x_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
+ int ret;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << chan;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << chan);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
writel(s->io_bits, devpriv->mite->daq_io_addr + DIO_PORT0_DIR_OFFSET);
return insn->n;
@@ -210,10 +194,9 @@ static int ni_670x_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->mite = mite_alloc(pcidev);
if (!devpriv->mite)
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 2d37516..2512ce8 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -15,12 +15,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************
*/
/*
Driver: ni_at_a2150
@@ -64,12 +58,14 @@ TRIG_WAKE_EOS
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/io.h>
+
#include <asm/dma.h>
#include "8253.h"
@@ -725,10 +721,9 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int i;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], A2150_SIZE);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index 7e5783a..b9122fd 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: ni_at_ao
@@ -41,10 +36,9 @@ Configuration options:
* document 320379.pdf.
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
/* board egisters */
/* registers with _2_ are accessed when GRP2WR is set in CFG1 */
@@ -254,42 +248,35 @@ static int atao_dio_insn_bits(struct comedi_device *dev,
static int atao_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct atao_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- unsigned int mask, bit;
-
- /* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
-
- mask = (chan < 4) ? 0x0f : 0xf0;
- bit = (chan < 4) ? DOUTEN1 : DOUTEN2;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- devpriv->cfg3 |= bit;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- devpriv->cfg3 &= ~bit;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
+
+ if (chan < 4)
+ mask = 0x0f;
+ else
+ mask = 0xf0;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ if (s->io_bits & 0x0f)
+ devpriv->cfg3 |= DOUTEN1;
+ else
+ devpriv->cfg3 &= ~DOUTEN1;
+ if (s->io_bits & 0xf0)
+ devpriv->cfg3 |= DOUTEN2;
+ else
+ devpriv->cfg3 &= ~DOUTEN2;
outw(devpriv->cfg3, dev->iobase + ATAO_CFG3);
- return 1;
+ return insn->n;
}
/*
@@ -346,10 +333,9 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 4ced7ba..856c73d 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -14,10 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: ni_atmio
@@ -93,10 +89,10 @@ are not supported.
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/delay.h>
#include <linux/isapnp.h>
#include "ni_stc.h"
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index 6c97a09..bb3491f 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -12,11 +12,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: ni_atmio16d
@@ -35,11 +30,10 @@ Devices: [National Instruments] AT-MIO-16 (atmio16), AT-MIO-16D (atmio16d)
*
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#include "comedi_fc.h"
#include "8255.h"
@@ -582,15 +576,19 @@ static int atmio16d_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct atmio16d_private *devpriv = dev->private;
- int i;
- int mask;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
+
+ if (chan < 4)
+ mask = 0x0f;
+ else
+ mask = 0xf0;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
- for (i = 0; i < insn->n; i++) {
- mask = (CR_CHAN(insn->chanspec) < 4) ? 0x0f : 0xf0;
- s->io_bits &= ~mask;
- if (data[i])
- s->io_bits |= mask;
- }
devpriv->com_reg_2_state &= ~(COMREG2_DOUTEN0 | COMREG2_DOUTEN1);
if (s->io_bits & 0x0f)
devpriv->com_reg_2_state |= COMREG2_DOUTEN0;
@@ -598,7 +596,7 @@ static int atmio16d_dio_insn_config(struct comedi_device *dev,
devpriv->com_reg_2_state |= COMREG2_DOUTEN1;
outw(devpriv->com_reg_2_state, dev->iobase + COM_REG_2);
- return i;
+ return insn->n;
}
/*
@@ -650,10 +648,9 @@ static int atmio16d_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* reset the atmio16d hardware */
reset_atmio16d(dev);
@@ -767,7 +764,6 @@ static int atmio16d_attach(struct comedi_device *dev,
static void atmio16d_detach(struct comedi_device *dev)
{
- comedi_spriv_free(dev, 3);
reset_atmio16d(dev);
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index d067ef7..404f83d 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -15,11 +15,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
/*
@@ -40,7 +35,7 @@ port, bit 0; channel 8 corresponds to the input port, bit 0.
Digital direction configuration: channels 0-7 output, 8-15 input (8225 device
emu as port A output, port B input, port C N/A).
-Analog: The input range is 0 to 4095 for -10 to +10 volts
+Analog: The input range is 0 to 4095 for -10 to +10 volts
IRQ is assigned but not used.
Version 0.1 Original DIO only driver
@@ -50,9 +45,9 @@ Manuals: Register level: http://www.ni.com/pdf/manuals/340698.pdf
User Manual: http://www.ni.com/pdf/manuals/320676d.pdf
*/
-#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -95,21 +90,17 @@ static int daq700_dio_insn_bits(struct comedi_device *dev,
static int daq700_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int chan = 1 << CR_CHAN(insn->chanspec);
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & chan) ? COMEDI_OUTPUT : COMEDI_INPUT;
- break;
- default:
- return -EINVAL;
- }
+ int ret;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
+
+ /* The DIO channels are not configurable, fix the io_bits */
+ s->io_bits = 0x00ff;
return insn->n;
}
@@ -183,7 +174,7 @@ static int daq700_ai_rinsn(struct comedi_device *dev,
*/
static void daq700_ai_config(struct comedi_device *dev,
struct comedi_subdevice *s)
-{
+{
unsigned long iobase = dev->iobase;
outb(0x80, iobase + CMD_R1); /* disable scanning, ADC to chan 0 */
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 9b7805f..335ea34 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -18,12 +18,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************
*/
/*
Driver: ni_daq_dio24
@@ -37,6 +31,7 @@ This is just a wrapper around the 8255.o driver to properly handle
the PCMCIA interface.
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <pcmcia/cistpl.h>
@@ -71,17 +66,11 @@ static int dio24_auto_attach(struct comedi_device *dev,
return 0;
}
-static void dio24_detach(struct comedi_device *dev)
-{
- comedi_spriv_free(dev, 0);
- comedi_pcmcia_disable(dev);
-}
-
static struct comedi_driver driver_dio24 = {
.driver_name = "ni_daq_dio24",
.module = THIS_MODULE,
.auto_attach = dio24_auto_attach,
- .detach = dio24_detach,
+ .detach = comedi_pcmcia_disable,
};
static int dio24_cs_attach(struct pcmcia_device *link)
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 77a7bb6..1add114 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -61,6 +57,7 @@
* 320502b (lab-pc+)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -68,79 +65,12 @@
#include "../comedidev.h"
-#include <asm/dma.h>
-
#include "8253.h"
#include "8255.h"
#include "comedi_fc.h"
#include "ni_labpc.h"
-
-/*
- * Register map (all registers are 8-bit)
- */
-#define STAT1_REG 0x00 /* R: Status 1 reg */
-#define STAT1_DAVAIL (1 << 0)
-#define STAT1_OVERRUN (1 << 1)
-#define STAT1_OVERFLOW (1 << 2)
-#define STAT1_CNTINT (1 << 3)
-#define STAT1_GATA0 (1 << 5)
-#define STAT1_EXTGATA0 (1 << 6)
-#define CMD1_REG 0x00 /* W: Command 1 reg */
-#define CMD1_MA(x) (((x) & 0x7) << 0)
-#define CMD1_TWOSCMP (1 << 3)
-#define CMD1_GAIN_MASK (7 << 4)
-#define CMD1_SCANEN (1 << 7)
-#define CMD2_REG 0x01 /* W: Command 2 reg */
-#define CMD2_PRETRIG (1 << 0)
-#define CMD2_HWTRIG (1 << 1)
-#define CMD2_SWTRIG (1 << 2)
-#define CMD2_TBSEL (1 << 3)
-#define CMD2_2SDAC0 (1 << 4)
-#define CMD2_2SDAC1 (1 << 5)
-#define CMD2_LDAC(x) (1 << (6 + (x)))
-#define CMD3_REG 0x02 /* W: Command 3 reg */
-#define CMD3_DMAEN (1 << 0)
-#define CMD3_DIOINTEN (1 << 1)
-#define CMD3_DMATCINTEN (1 << 2)
-#define CMD3_CNTINTEN (1 << 3)
-#define CMD3_ERRINTEN (1 << 4)
-#define CMD3_FIFOINTEN (1 << 5)
-#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */
-#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */
-#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */
-#define ADC_FIFO_CLEAR_REG 0x08 /* W: A/D FIFO Clear reg */
-#define ADC_FIFO_REG 0x0a /* R: A/D FIFO reg */
-#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */
-#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */
-#define CMD6_REG 0x0e /* W: Command 6 reg */
-#define CMD6_NRSE (1 << 0)
-#define CMD6_ADCUNI (1 << 1)
-#define CMD6_DACUNI(x) (1 << (2 + (x)))
-#define CMD6_HFINTEN (1 << 5)
-#define CMD6_DQINTEN (1 << 6)
-#define CMD6_SCANUP (1 << 7)
-#define CMD4_REG 0x0f /* W: Command 3 reg */
-#define CMD4_INTSCAN (1 << 0)
-#define CMD4_EOIRCV (1 << 1)
-#define CMD4_ECLKDRV (1 << 2)
-#define CMD4_SEDIFF (1 << 3)
-#define CMD4_ECLKRCV (1 << 4)
-#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */
-#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */
-#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */
-#define CMD5_REG 0x1c /* W: Command 5 reg */
-#define CMD5_WRTPRT (1 << 2)
-#define CMD5_DITHEREN (1 << 3)
-#define CMD5_CALDACLD (1 << 4)
-#define CMD5_SCLK (1 << 5)
-#define CMD5_SDATA (1 << 6)
-#define CMD5_EEPROMCS (1 << 7)
-#define STAT2_REG 0x1d /* R: Status 2 reg */
-#define STAT2_PROMOUT (1 << 0)
-#define STAT2_OUTA1 (1 << 1)
-#define STAT2_FIFONHF (1 << 2)
-#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */
-#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */
+#include "ni_labpc_regs.h"
+#include "ni_labpc_isadma.h"
#define LABPC_SIZE 0x20 /* size of ISA io region */
#define LABPC_TIMER_BASE 500 /* 2 MHz master clock */
@@ -153,11 +83,6 @@ enum scan_mode {
MODE_MULT_CHAN_DOWN,
};
-static const int labpc_plus_ai_gain_bits[] = {
- 0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70,
- 0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70,
-};
-
static const struct comedi_lrange range_labpc_plus_ai = {
16, {
BIP_RANGE(5),
@@ -179,13 +104,7 @@ static const struct comedi_lrange range_labpc_plus_ai = {
}
};
-const int labpc_1200_ai_gain_bits[] = {
- 0x00, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70,
- 0x00, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70,
-};
-EXPORT_SYMBOL_GPL(labpc_1200_ai_gain_bits);
-
-const struct comedi_lrange range_labpc_1200_ai = {
+static const struct comedi_lrange range_labpc_1200_ai = {
14, {
BIP_RANGE(5),
BIP_RANGE(2.5),
@@ -203,7 +122,6 @@ const struct comedi_lrange range_labpc_1200_ai = {
UNI_RANGE(0.1)
}
};
-EXPORT_SYMBOL_GPL(range_labpc_1200_ai);
static const struct comedi_lrange range_labpc_ao = {
2, {
@@ -239,34 +157,22 @@ static const struct labpc_boardinfo labpc_boards[] = {
{
.name = "lab-pc-1200",
.ai_speed = 10000,
- .register_layout = labpc_1200_layout,
- .has_ao = 1,
- .ai_range_table = &range_labpc_1200_ai,
- .ai_range_code = labpc_1200_ai_gain_bits,
.ai_scan_up = 1,
+ .has_ao = 1,
+ .is_labpc1200 = 1,
}, {
.name = "lab-pc-1200ai",
.ai_speed = 10000,
- .register_layout = labpc_1200_layout,
- .ai_range_table = &range_labpc_1200_ai,
- .ai_range_code = labpc_1200_ai_gain_bits,
.ai_scan_up = 1,
+ .is_labpc1200 = 1,
}, {
.name = "lab-pc+",
.ai_speed = 12000,
- .register_layout = labpc_plus_layout,
.has_ao = 1,
- .ai_range_table = &range_labpc_plus_ai,
- .ai_range_code = labpc_plus_ai_gain_bits,
},
};
#endif
-/* size in bytes of dma buffer */
-static const int dma_buffer_size = 0xff00;
-/* 2 bytes per sample */
-static const int sample_size = 2;
-
static int labpc_counter_load(struct comedi_device *dev,
unsigned long base_address,
unsigned int counter_number,
@@ -326,12 +232,21 @@ static void labpc_ai_set_chan_and_gain(struct comedi_device *dev,
const struct labpc_boardinfo *board = comedi_board(dev);
struct labpc_private *devpriv = dev->private;
+ if (board->is_labpc1200) {
+ /*
+ * The LabPC-1200 boards do not have a gain
+ * of '0x10'. Skip the range values that would
+ * result in this gain.
+ */
+ range += (range > 0) + (range > 7);
+ }
+
/* munge channel bits for differential/scan disabled mode */
if ((mode == MODE_SINGLE_CHAN || mode == MODE_SINGLE_CHAN_INTERVAL) &&
aref == AREF_DIFF)
chan *= 2;
devpriv->cmd1 = CMD1_MA(chan);
- devpriv->cmd1 |= board->ai_range_code[range];
+ devpriv->cmd1 |= CMD1_GAIN(range);
devpriv->write_byte(devpriv->cmd1, dev->iobase + CMD1_REG);
}
@@ -347,7 +262,7 @@ static void labpc_setup_cmd6_reg(struct comedi_device *dev,
const struct labpc_boardinfo *board = comedi_board(dev);
struct labpc_private *devpriv = dev->private;
- if (board->register_layout != labpc_1200_layout)
+ if (!board->is_labpc1200)
return;
/* reference inputs to ground or common? */
@@ -465,32 +380,6 @@ static int labpc_ai_insn_read(struct comedi_device *dev,
return insn->n;
}
-#ifdef CONFIG_ISA_DMA_API
-/* utility function that suggests a dma transfer size in bytes */
-static unsigned int labpc_suggest_transfer_size(const struct comedi_cmd *cmd)
-{
- unsigned int size;
- unsigned int freq;
-
- if (cmd->convert_src == TRIG_TIMER)
- freq = 1000000000 / cmd->convert_arg;
- /* return some default value */
- else
- freq = 0xffffffff;
-
- /* make buffer fill in no more than 1/3 second */
- size = (freq / 3) * sample_size;
-
- /* set a minimum and maximum size allowed */
- if (size > dma_buffer_size)
- size = dma_buffer_size - dma_buffer_size % sample_size;
- else if (size < sample_size)
- size = sample_size;
-
- return size;
-}
-#endif
-
static bool labpc_use_continuous_mode(const struct comedi_cmd *cmd,
enum scan_mode mode)
{
@@ -759,7 +648,7 @@ static int labpc_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
stop_mask = TRIG_COUNT | TRIG_NONE;
- if (board->register_layout == labpc_1200_layout)
+ if (board->is_labpc1200)
stop_mask |= TRIG_EXT;
err |= cfc_check_trigger_src(&cmd->stop_src, stop_mask);
@@ -883,25 +772,20 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
-#ifdef CONFIG_ISA_DMA_API
- /* figure out what method we will use to transfer data */
- if (devpriv->dma_chan && /* need a dma channel allocated */
- /*
- * dma unsafe at RT priority,
- * and too much setup time for TRIG_WAKE_EOS for
- */
- (cmd->flags & (TRIG_WAKE_EOS | TRIG_RT)) == 0) {
+ /* figure out what method we will use to transfer data */
+ if (labpc_have_dma_chan(dev) &&
+ /* dma unsafe at RT priority,
+ * and too much setup time for TRIG_WAKE_EOS */
+ (cmd->flags & (TRIG_WAKE_EOS | TRIG_RT)) == 0)
xfer = isa_dma_transfer;
- /* pc-plus has no fifo-half full interrupt */
- } else
-#endif
- if (board->register_layout == labpc_1200_layout &&
- /* wake-end-of-scan should interrupt on fifo not empty */
- (cmd->flags & TRIG_WAKE_EOS) == 0 &&
- /* make sure we are taking more than just a few points */
- (cmd->stop_src != TRIG_COUNT || devpriv->count > 256)) {
+ else if (/* pc-plus has no fifo-half full interrupt */
+ board->is_labpc1200 &&
+ /* wake-end-of-scan should interrupt on fifo not empty */
+ (cmd->flags & TRIG_WAKE_EOS) == 0 &&
+ /* make sure we are taking more than just a few points */
+ (cmd->stop_src != TRIG_COUNT || devpriv->count > 256))
xfer = fifo_half_full_transfer;
- } else
+ else
xfer = fifo_not_empty_transfer;
devpriv->current_transfer = xfer;
@@ -966,40 +850,14 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
labpc_clear_adc_fifo(dev);
-#ifdef CONFIG_ISA_DMA_API
- /* set up dma transfer */
- if (xfer == isa_dma_transfer) {
- unsigned long irq_flags;
-
- irq_flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma_chan);
- set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
- /* set appropriate size of transfer */
- devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
- if (cmd->stop_src == TRIG_COUNT &&
- devpriv->count * sample_size < devpriv->dma_transfer_size) {
- devpriv->dma_transfer_size =
- devpriv->count * sample_size;
- }
- set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma_chan);
- release_dma_lock(irq_flags);
- /* enable board's dma */
- devpriv->cmd3 |= (CMD3_DMAEN | CMD3_DMATCINTEN);
- } else
- devpriv->cmd3 &= ~(CMD3_DMAEN | CMD3_DMATCINTEN);
-#endif
+ if (xfer == isa_dma_transfer)
+ labpc_setup_dma(dev, s);
/* enable error interrupts */
devpriv->cmd3 |= CMD3_ERRINTEN;
/* enable fifo not empty interrupt? */
if (xfer == fifo_not_empty_transfer)
devpriv->cmd3 |= CMD3_FIFOINTEN;
- else
- devpriv->cmd3 &= ~CMD3_FIFOINTEN;
devpriv->write_byte(devpriv->cmd3, dev->iobase + CMD3_REG);
/* setup any external triggering/pacing (cmd4 register) */
@@ -1040,74 +898,6 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-#ifdef CONFIG_ISA_DMA_API
-static void labpc_drain_dma(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- int status;
- unsigned long flags;
- unsigned int max_points, num_points, residue, leftover;
- int i;
-
- status = devpriv->stat1;
-
- flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma_chan);
-
- /* figure out how many points to read */
- max_points = devpriv->dma_transfer_size / sample_size;
- /* residue is the number of points left to be done on the dma
- * transfer. It should always be zero at this point unless
- * the stop_src is set to external triggering.
- */
- residue = get_dma_residue(devpriv->dma_chan) / sample_size;
- num_points = max_points - residue;
- if (devpriv->count < num_points && async->cmd.stop_src == TRIG_COUNT)
- num_points = devpriv->count;
-
- /* figure out how many points will be stored next time */
- leftover = 0;
- if (async->cmd.stop_src != TRIG_COUNT) {
- leftover = devpriv->dma_transfer_size / sample_size;
- } else if (devpriv->count > num_points) {
- leftover = devpriv->count - num_points;
- if (leftover > max_points)
- leftover = max_points;
- }
-
- /* write data to comedi buffer */
- for (i = 0; i < num_points; i++)
- cfc_write_to_buffer(s, devpriv->dma_buffer[i]);
-
- if (async->cmd.stop_src == TRIG_COUNT)
- devpriv->count -= num_points;
-
- /* set address and count for next transfer */
- set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
- set_dma_count(devpriv->dma_chan, leftover * sample_size);
- release_dma_lock(flags);
-
- async->events |= COMEDI_CB_BLOCK;
-}
-
-static void handle_isa_dma(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
-
- labpc_drain_dma(dev);
-
- enable_dma(devpriv->dma_chan);
-
- /* clear dma tc interrupt */
- devpriv->write_byte(0x1, dev->iobase + DMATC_CLEAR_REG);
-}
-#endif
-
/* read all available samples from ai fifo */
static int labpc_drain_fifo(struct comedi_device *dev)
{
@@ -1144,12 +934,10 @@ static int labpc_drain_fifo(struct comedi_device *dev)
* when acquisition is terminated by stop_src == TRIG_EXT). */
static void labpc_drain_dregs(struct comedi_device *dev)
{
-#ifdef CONFIG_ISA_DMA_API
struct labpc_private *devpriv = dev->private;
if (devpriv->current_transfer == isa_dma_transfer)
labpc_drain_dma(dev);
-#endif
labpc_drain_fifo(dev);
}
@@ -1175,7 +963,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
/* read board status */
devpriv->stat1 = devpriv->read_byte(dev->iobase + STAT1_REG);
- if (board->register_layout == labpc_1200_layout)
+ if (board->is_labpc1200)
devpriv->stat2 = devpriv->read_byte(dev->iobase + STAT2_REG);
if ((devpriv->stat1 & (STAT1_GATA0 | STAT1_CNTINT | STAT1_OVERFLOW |
@@ -1194,19 +982,9 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
-#ifdef CONFIG_ISA_DMA_API
- if (devpriv->current_transfer == isa_dma_transfer) {
- /*
- * if a dma terminal count of external stop trigger
- * has occurred
- */
- if (devpriv->stat1 & STAT1_GATA0 ||
- (board->register_layout == labpc_1200_layout
- && devpriv->stat2 & STAT2_OUTA1)) {
- handle_isa_dma(dev);
- }
- } else
-#endif
+ if (devpriv->current_transfer == isa_dma_transfer)
+ labpc_handle_dma_status(dev);
+ else
labpc_drain_fifo(dev);
if (devpriv->stat1 & STAT1_CNTINT) {
@@ -1266,7 +1044,7 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
spin_unlock_irqrestore(&dev->spinlock, flags);
/* set range */
- if (board->register_layout == labpc_1200_layout) {
+ if (board->is_labpc1200) {
range = CR_RANGE(insn->chanspec);
if (labpc_range_is_unipolar(s, range))
devpriv->cmd6 |= CMD6_DACUNI(channel);
@@ -1603,7 +1381,7 @@ int labpc_common_attach(struct comedi_device *dev,
devpriv->write_byte(devpriv->cmd2, dev->iobase + CMD2_REG);
devpriv->write_byte(devpriv->cmd3, dev->iobase + CMD3_REG);
devpriv->write_byte(devpriv->cmd4, dev->iobase + CMD4_REG);
- if (board->register_layout == labpc_1200_layout) {
+ if (board->is_labpc1200) {
devpriv->write_byte(devpriv->cmd5, dev->iobase + CMD5_REG);
devpriv->write_byte(devpriv->cmd6, dev->iobase + CMD6_REG);
}
@@ -1626,7 +1404,8 @@ int labpc_common_attach(struct comedi_device *dev,
s->n_chan = 8;
s->len_chanlist = 8;
s->maxdata = 0x0fff;
- s->range_table = board->ai_range_table;
+ s->range_table = board->is_labpc1200
+ ? &range_labpc_1200_ai : &range_labpc_plus_ai;
s->insn_read = labpc_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
@@ -1671,7 +1450,7 @@ int labpc_common_attach(struct comedi_device *dev,
/* calibration subdevices for boards that have one */
s = &dev->subdevices[3];
- if (board->register_layout == labpc_1200_layout) {
+ if (board->is_labpc1200) {
s->type = COMEDI_SUBD_CALIB;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = 16;
@@ -1686,7 +1465,7 @@ int labpc_common_attach(struct comedi_device *dev,
/* EEPROM */
s = &dev->subdevices[4];
- if (board->register_layout == labpc_1200_layout) {
+ if (board->is_labpc1200) {
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->n_chan = EEPROM_SIZE;
@@ -1703,12 +1482,6 @@ int labpc_common_attach(struct comedi_device *dev,
}
EXPORT_SYMBOL_GPL(labpc_common_attach);
-void labpc_common_detach(struct comedi_device *dev)
-{
- comedi_spriv_free(dev, 2);
-}
-EXPORT_SYMBOL_GPL(labpc_common_detach);
-
#if IS_ENABLED(CONFIG_COMEDI_NI_LABPC_ISA)
static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
@@ -1717,10 +1490,9 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned int dma_chan = it->options[2];
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], LABPC_SIZE);
if (ret)
@@ -1730,29 +1502,8 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
-#ifdef CONFIG_ISA_DMA_API
- if (dev->irq && (dma_chan == 1 || dma_chan == 3)) {
- devpriv->dma_buffer = kmalloc(dma_buffer_size,
- GFP_KERNEL | GFP_DMA);
- if (devpriv->dma_buffer) {
- ret = request_dma(dma_chan, dev->board_name);
- if (ret == 0) {
- unsigned long dma_flags;
-
- devpriv->dma_chan = dma_chan;
- devpriv->dma_addr =
- virt_to_bus(devpriv->dma_buffer);
-
- dma_flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
- release_dma_lock(dma_flags);
- } else {
- kfree(devpriv->dma_buffer);
- }
- }
- }
-#endif
+ if (dev->irq)
+ labpc_init_dma_chan(dev, dma_chan);
return 0;
}
@@ -1761,13 +1512,9 @@ static void labpc_detach(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
- labpc_common_detach(dev);
+ if (devpriv)
+ labpc_free_dma_chan(dev);
- if (devpriv) {
- kfree(devpriv->dma_buffer);
- if (devpriv->dma_chan)
- free_dma(devpriv->dma_chan);
- }
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index 4b691f5..486589f 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _NI_LABPC_H
@@ -27,27 +22,17 @@
#define EEPROM_SIZE 256 /* 256 byte eeprom */
#define NUM_AO_CHAN 2 /* boards have two analog output channels */
-enum labpc_register_layout { labpc_plus_layout, labpc_1200_layout };
enum transfer_type { fifo_not_empty_transfer, fifo_half_full_transfer,
isa_dma_transfer
};
struct labpc_boardinfo {
const char *name;
- int device_id; /* device id for pci and pcmcia boards */
- int ai_speed; /* maximum input speed in nanoseconds */
-
- /* 1200 has extra registers compared to pc+ */
- enum labpc_register_layout register_layout;
- int has_ao; /* has analog output true/false */
- const struct comedi_lrange *ai_range_table;
- const int *ai_range_code;
-
- /* board can auto scan up in ai channels, not just down */
- unsigned ai_scan_up:1;
-
- /* uses memory mapped io instead of ioports */
- unsigned has_mmio:1;
+ int ai_speed; /* maximum input speed in ns */
+ unsigned ai_scan_up:1; /* can auto scan up in ai channels */
+ unsigned has_ao:1; /* has analog outputs */
+ unsigned is_labpc1200:1; /* has extra regs compared to pc+ */
+ unsigned has_mmio:1; /* uses memory mapped io */
};
struct labpc_private {
@@ -101,9 +86,5 @@ struct labpc_private {
int labpc_common_attach(struct comedi_device *dev,
unsigned int irq, unsigned long isr_flags);
-void labpc_common_detach(struct comedi_device *dev);
-
-extern const int labpc_1200_ai_gain_bits[];
-extern const struct comedi_lrange range_labpc_1200_ai;
#endif /* _NI_LABPC_H */
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 9e3737c..0a8b322 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -18,12 +18,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-************************************************************************
*/
/*
Driver: ni_labpc_cs
@@ -59,10 +53,10 @@ NI manuals:
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/slab.h>
#include "8253.h"
#include "8255.h"
@@ -76,12 +70,9 @@ NI manuals:
static const struct labpc_boardinfo labpc_cs_boards[] = {
{
.name = "daqcard-1200",
- .device_id = 0x103,
.ai_speed = 10000,
- .register_layout = labpc_1200_layout,
.has_ao = 1,
- .ai_range_table = &range_labpc_1200_ai,
- .ai_range_code = labpc_1200_ai_gain_bits,
+ .is_labpc1200 = 1,
},
};
@@ -105,25 +96,18 @@ static int labpc_auto_attach(struct comedi_device *dev,
if (!link->irq)
return -EINVAL;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
return labpc_common_attach(dev, link->irq, IRQF_SHARED);
}
-static void labpc_detach(struct comedi_device *dev)
-{
- labpc_common_detach(dev);
- comedi_pcmcia_disable(dev);
-}
-
static struct comedi_driver driver_labpc_cs = {
.driver_name = "ni_labpc_cs",
.module = THIS_MODULE,
.auto_attach = labpc_auto_attach,
- .detach = labpc_detach,
+ .detach = comedi_pcmcia_disable,
};
static int labpc_cs_attach(struct pcmcia_device *link)
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.c b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
new file mode 100644
index 0000000..2149596
--- /dev/null
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
@@ -0,0 +1,226 @@
+/*
+ * comedi/drivers/ni_labpc_isadma.c
+ * ISA DMA support for National Instruments Lab-PC series boards and
+ * compatibles.
+ *
+ * Extracted from ni_labpc.c:
+ * Copyright (C) 2001-2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "../comedidev.h"
+
+#include <asm/dma.h>
+
+#include "comedi_fc.h"
+#include "ni_labpc.h"
+#include "ni_labpc_regs.h"
+#include "ni_labpc_isadma.h"
+
+/* size in bytes of dma buffer */
+static const int dma_buffer_size = 0xff00;
+/* 2 bytes per sample */
+static const int sample_size = 2;
+
+/* utility function that suggests a dma transfer size in bytes */
+static unsigned int labpc_suggest_transfer_size(const struct comedi_cmd *cmd)
+{
+ unsigned int size;
+ unsigned int freq;
+
+ if (cmd->convert_src == TRIG_TIMER)
+ freq = 1000000000 / cmd->convert_arg;
+ else
+ /* return some default value */
+ freq = 0xffffffff;
+
+ /* make buffer fill in no more than 1/3 second */
+ size = (freq / 3) * sample_size;
+
+ /* set a minimum and maximum size allowed */
+ if (size > dma_buffer_size)
+ size = dma_buffer_size - dma_buffer_size % sample_size;
+ else if (size < sample_size)
+ size = sample_size;
+
+ return size;
+}
+
+void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ struct labpc_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned long irq_flags;
+
+ irq_flags = claim_dma_lock();
+ disable_dma(devpriv->dma_chan);
+ /* clear flip-flop to make sure 2-byte registers for
+ * count and address get set correctly */
+ clear_dma_ff(devpriv->dma_chan);
+ set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
+ /* set appropriate size of transfer */
+ devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
+ if (cmd->stop_src == TRIG_COUNT &&
+ devpriv->count * sample_size < devpriv->dma_transfer_size)
+ devpriv->dma_transfer_size = devpriv->count * sample_size;
+ set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
+ enable_dma(devpriv->dma_chan);
+ release_dma_lock(irq_flags);
+ /* set CMD3 bits for caller to enable DMA and interrupt */
+ devpriv->cmd3 |= (CMD3_DMAEN | CMD3_DMATCINTEN);
+}
+EXPORT_SYMBOL_GPL(labpc_setup_dma);
+
+void labpc_drain_dma(struct comedi_device *dev)
+{
+ struct labpc_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ int status;
+ unsigned long flags;
+ unsigned int max_points, num_points, residue, leftover;
+ int i;
+
+ status = devpriv->stat1;
+
+ flags = claim_dma_lock();
+ disable_dma(devpriv->dma_chan);
+ /* clear flip-flop to make sure 2-byte registers for
+ * count and address get set correctly */
+ clear_dma_ff(devpriv->dma_chan);
+
+ /* figure out how many points to read */
+ max_points = devpriv->dma_transfer_size / sample_size;
+ /* residue is the number of points left to be done on the dma
+ * transfer. It should always be zero at this point unless
+ * the stop_src is set to external triggering.
+ */
+ residue = get_dma_residue(devpriv->dma_chan) / sample_size;
+ num_points = max_points - residue;
+ if (devpriv->count < num_points && async->cmd.stop_src == TRIG_COUNT)
+ num_points = devpriv->count;
+
+ /* figure out how many points will be stored next time */
+ leftover = 0;
+ if (async->cmd.stop_src != TRIG_COUNT) {
+ leftover = devpriv->dma_transfer_size / sample_size;
+ } else if (devpriv->count > num_points) {
+ leftover = devpriv->count - num_points;
+ if (leftover > max_points)
+ leftover = max_points;
+ }
+
+ /* write data to comedi buffer */
+ for (i = 0; i < num_points; i++)
+ cfc_write_to_buffer(s, devpriv->dma_buffer[i]);
+
+ if (async->cmd.stop_src == TRIG_COUNT)
+ devpriv->count -= num_points;
+
+ /* set address and count for next transfer */
+ set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
+ set_dma_count(devpriv->dma_chan, leftover * sample_size);
+ release_dma_lock(flags);
+
+ async->events |= COMEDI_CB_BLOCK;
+}
+EXPORT_SYMBOL_GPL(labpc_drain_dma);
+
+static void handle_isa_dma(struct comedi_device *dev)
+{
+ struct labpc_private *devpriv = dev->private;
+
+ labpc_drain_dma(dev);
+
+ enable_dma(devpriv->dma_chan);
+
+ /* clear dma tc interrupt */
+ devpriv->write_byte(0x1, dev->iobase + DMATC_CLEAR_REG);
+}
+
+void labpc_handle_dma_status(struct comedi_device *dev)
+{
+ const struct labpc_boardinfo *board = comedi_board(dev);
+ struct labpc_private *devpriv = dev->private;
+
+ /*
+ * if a dma terminal count of external stop trigger
+ * has occurred
+ */
+ if (devpriv->stat1 & STAT1_GATA0 ||
+ (board->is_labpc1200 && devpriv->stat2 & STAT2_OUTA1))
+ handle_isa_dma(dev);
+}
+EXPORT_SYMBOL_GPL(labpc_handle_dma_status);
+
+int labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan)
+{
+ struct labpc_private *devpriv = dev->private;
+ void *dma_buffer;
+ unsigned long dma_flags;
+ int ret;
+
+ if (dma_chan != 1 && dma_chan != 3)
+ return -EINVAL;
+
+ dma_buffer = kmalloc(dma_buffer_size, GFP_KERNEL | GFP_DMA);
+ if (!dma_buffer)
+ return -ENOMEM;
+
+ ret = request_dma(dma_chan, dev->board_name);
+ if (ret) {
+ kfree(dma_buffer);
+ return ret;
+ }
+
+ devpriv->dma_buffer = dma_buffer;
+ devpriv->dma_chan = dma_chan;
+ devpriv->dma_addr = virt_to_bus(devpriv->dma_buffer);
+
+ dma_flags = claim_dma_lock();
+ disable_dma(devpriv->dma_chan);
+ set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
+ release_dma_lock(dma_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(labpc_init_dma_chan);
+
+void labpc_free_dma_chan(struct comedi_device *dev)
+{
+ struct labpc_private *devpriv = dev->private;
+
+ kfree(devpriv->dma_buffer);
+ devpriv->dma_buffer = NULL;
+ if (devpriv->dma_chan) {
+ free_dma(devpriv->dma_chan);
+ devpriv->dma_chan = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(labpc_free_dma_chan);
+
+static int __init ni_labpc_isadma_init_module(void)
+{
+ return 0;
+}
+module_init(ni_labpc_isadma_init_module);
+
+static void __exit ni_labpc_isadma_cleanup_module(void)
+{
+}
+module_exit(ni_labpc_isadma_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi NI Lab-PC ISA DMA support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.h b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
new file mode 100644
index 0000000..771af4b
--- /dev/null
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
@@ -0,0 +1,57 @@
+/*
+ * ni_labpc ISA DMA support.
+*/
+
+#ifndef _NI_LABPC_ISADMA_H
+#define _NI_LABPC_ISADMA_H
+
+#define NI_LABPC_HAVE_ISA_DMA IS_ENABLED(CONFIG_COMEDI_NI_LABPC_ISADMA)
+
+#if NI_LABPC_HAVE_ISA_DMA
+
+static inline bool labpc_have_dma_chan(struct comedi_device *dev)
+{
+ struct labpc_private *devpriv = dev->private;
+
+ return (bool)devpriv->dma_chan;
+}
+
+int labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan);
+void labpc_free_dma_chan(struct comedi_device *dev);
+void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s);
+void labpc_drain_dma(struct comedi_device *dev);
+void labpc_handle_dma_status(struct comedi_device *dev);
+
+#else
+
+static inline bool labpc_have_dma_chan(struct comedi_device *dev)
+{
+ return false;
+}
+
+static inline int labpc_init_dma_chan(struct comedi_device *dev,
+ unsigned int dma_chan)
+{
+ return -ENOTSUPP;
+}
+
+static inline void labpc_free_dma_chan(struct comedi_device *dev)
+{
+}
+
+static inline void labpc_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+}
+
+static inline void labpc_drain_dma(struct comedi_device *dev)
+{
+}
+
+static inline void labpc_handle_dma_status(struct comedi_device *dev)
+{
+}
+
+#endif
+
+#endif /* _NI_LABPC_ISADMA_H */
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index 8e916f8..8be681f 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -33,8 +29,8 @@
* 340914a (pci-1200)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -50,11 +46,9 @@ static const struct labpc_boardinfo labpc_pci_boards[] = {
[BOARD_NI_PCI1200] = {
.name = "ni_pci-1200",
.ai_speed = 10000,
- .register_layout = labpc_1200_layout,
- .has_ao = 1,
- .ai_range_table = &range_labpc_1200_ai,
- .ai_range_code = labpc_1200_ai_gain_bits,
.ai_scan_up = 1,
+ .has_ao = 1,
+ .is_labpc1200 = 1,
.has_mmio = 1,
},
};
@@ -78,10 +72,9 @@ static int labpc_pci_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->mite = mite_alloc(pcidev);
if (!devpriv->mite)
@@ -98,8 +91,6 @@ static void labpc_pci_detach(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
- labpc_common_detach(dev);
-
if (devpriv && devpriv->mite) {
mite_unsetup(devpriv->mite);
mite_free(devpriv->mite);
diff --git a/drivers/staging/comedi/drivers/ni_labpc_regs.h b/drivers/staging/comedi/drivers/ni_labpc_regs.h
new file mode 100644
index 0000000..2a274a3
--- /dev/null
+++ b/drivers/staging/comedi/drivers/ni_labpc_regs.h
@@ -0,0 +1,75 @@
+/*
+ * ni_labpc register definitions.
+*/
+
+#ifndef _NI_LABPC_REGS_H
+#define _NI_LABPC_REGS_H
+
+/*
+ * Register map (all registers are 8-bit)
+ */
+#define STAT1_REG 0x00 /* R: Status 1 reg */
+#define STAT1_DAVAIL (1 << 0)
+#define STAT1_OVERRUN (1 << 1)
+#define STAT1_OVERFLOW (1 << 2)
+#define STAT1_CNTINT (1 << 3)
+#define STAT1_GATA0 (1 << 5)
+#define STAT1_EXTGATA0 (1 << 6)
+#define CMD1_REG 0x00 /* W: Command 1 reg */
+#define CMD1_MA(x) (((x) & 0x7) << 0)
+#define CMD1_TWOSCMP (1 << 3)
+#define CMD1_GAIN(x) (((x) & 0x7) << 4)
+#define CMD1_SCANEN (1 << 7)
+#define CMD2_REG 0x01 /* W: Command 2 reg */
+#define CMD2_PRETRIG (1 << 0)
+#define CMD2_HWTRIG (1 << 1)
+#define CMD2_SWTRIG (1 << 2)
+#define CMD2_TBSEL (1 << 3)
+#define CMD2_2SDAC0 (1 << 4)
+#define CMD2_2SDAC1 (1 << 5)
+#define CMD2_LDAC(x) (1 << (6 + (x)))
+#define CMD3_REG 0x02 /* W: Command 3 reg */
+#define CMD3_DMAEN (1 << 0)
+#define CMD3_DIOINTEN (1 << 1)
+#define CMD3_DMATCINTEN (1 << 2)
+#define CMD3_CNTINTEN (1 << 3)
+#define CMD3_ERRINTEN (1 << 4)
+#define CMD3_FIFOINTEN (1 << 5)
+#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */
+#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */
+#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */
+#define ADC_FIFO_CLEAR_REG 0x08 /* W: A/D FIFO Clear reg */
+#define ADC_FIFO_REG 0x0a /* R: A/D FIFO reg */
+#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */
+#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */
+#define CMD6_REG 0x0e /* W: Command 6 reg */
+#define CMD6_NRSE (1 << 0)
+#define CMD6_ADCUNI (1 << 1)
+#define CMD6_DACUNI(x) (1 << (2 + (x)))
+#define CMD6_HFINTEN (1 << 5)
+#define CMD6_DQINTEN (1 << 6)
+#define CMD6_SCANUP (1 << 7)
+#define CMD4_REG 0x0f /* W: Command 3 reg */
+#define CMD4_INTSCAN (1 << 0)
+#define CMD4_EOIRCV (1 << 1)
+#define CMD4_ECLKDRV (1 << 2)
+#define CMD4_SEDIFF (1 << 3)
+#define CMD4_ECLKRCV (1 << 4)
+#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */
+#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */
+#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */
+#define CMD5_REG 0x1c /* W: Command 5 reg */
+#define CMD5_WRTPRT (1 << 2)
+#define CMD5_DITHEREN (1 << 3)
+#define CMD5_CALDACLD (1 << 4)
+#define CMD5_SCLK (1 << 5)
+#define CMD5_SDATA (1 << 6)
+#define CMD5_EEPROMCS (1 << 7)
+#define STAT2_REG 0x1d /* R: Status 2 reg */
+#define STAT2_PROMOUT (1 << 0)
+#define STAT2_OUTA1 (1 << 1)
+#define STAT2_FIFONHF (1 << 2)
+#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */
+#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */
+
+#endif /* _NI_LABPC_REGS_H */
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 8c5dee9..4e02770 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -15,11 +15,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
@@ -63,6 +58,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#include "8255.h"
#include "mite.h"
#include "comedi_fc.h"
@@ -3532,37 +3528,21 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
static int ni_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct ni_private *devpriv = dev->private;
+ int ret;
-#ifdef DEBUG_DIO
- printk("ni_dio_insn_config() chan=%d io=%d\n",
- CR_CHAN(insn->chanspec), data[0]);
-#endif
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << CR_CHAN(insn->chanspec);
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << CR_CHAN(insn->chanspec));
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->
- io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
- COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
devpriv->dio_control &= ~DIO_Pins_Dir_Mask;
devpriv->dio_control |= DIO_Pins_Dir(s->io_bits);
devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
- return 1;
+ return insn->n;
}
static int ni_dio_insn_bits(struct comedi_device *dev,
@@ -3600,32 +3580,15 @@ static int ni_m_series_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv __maybe_unused = dev->private;
+ int ret;
-#ifdef DEBUG_DIO
- printk("ni_m_series_dio_insn_config() chan=%d io=%d\n",
- CR_CHAN(insn->chanspec), data[0]);
-#endif
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << CR_CHAN(insn->chanspec);
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << CR_CHAN(insn->chanspec));
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->
- io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
- COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
ni_writel(s->io_bits, M_Offset_DIO_Direction);
- return 1;
+ return insn->n;
}
static int ni_m_series_dio_insn_bits(struct comedi_device *dev,
@@ -4077,7 +4040,6 @@ static void mio_common_detach(struct comedi_device *dev)
ni_gpct_device_destroy(devpriv->counter_dev);
}
}
- comedi_spriv_free(dev, NI_8255_DIO_SUBDEV);
}
static void init_ao_67xx(struct comedi_device *dev, struct comedi_subdevice *s)
@@ -4369,10 +4331,9 @@ static int ni_alloc_private(struct comedi_device *dev)
{
struct ni_private *devpriv;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
spin_lock_init(&devpriv->window_lock);
spin_lock_init(&devpriv->soft_reg_copy_lock);
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 888be7b..229a273 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: ni_mio_cs
@@ -41,6 +36,7 @@ See the notes in the ni_atmio.o driver.
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index b5f340c..fad81bc 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: ni_pcidio
@@ -55,10 +50,10 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
/* #define DEBUG 1 */
/* #define DEBUG_FLAGS */
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
-#include <linux/firmware.h>
#include "../comedidev.h"
@@ -645,32 +640,19 @@ static void debug_int(struct comedi_device *dev)
static int ni_pcidio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct nidio96_private *devpriv = dev->private;
+ int ret;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- if (insn->n != 1)
- return -EINVAL;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << CR_CHAN(insn->chanspec);
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << CR_CHAN(insn->chanspec));
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->
- io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
- COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
writel(s->io_bits, devpriv->mite->daq_io_addr + Port_Pin_Directions(0));
- return 1;
+ return insn->n;
}
static int ni_pcidio_insn_bits(struct comedi_device *dev,
@@ -971,11 +953,13 @@ static int ni_pcidio_change(struct comedi_device *dev,
return 0;
}
-static int pci_6534_load_fpga(struct comedi_device *dev, int fpga_index,
- const u8 *data, size_t data_len)
+static int pci_6534_load_fpga(struct comedi_device *dev,
+ const u8 *data, size_t data_len,
+ unsigned long context)
{
struct nidio96_private *devpriv = dev->private;
static const int timeout = 1000;
+ int fpga_index = context;
int i;
size_t j;
@@ -1033,7 +1017,7 @@ static int pci_6534_load_fpga(struct comedi_device *dev, int fpga_index,
static int pci_6534_reset_fpga(struct comedi_device *dev, int fpga_index)
{
- return pci_6534_load_fpga(dev, fpga_index, NULL, 0);
+ return pci_6534_load_fpga(dev, NULL, 0, fpga_index);
}
static int pci_6534_reset_fpgas(struct comedi_device *dev)
@@ -1067,13 +1051,12 @@ static void pci_6534_init_main_fpga(struct comedi_device *dev)
static int pci_6534_upload_firmware(struct comedi_device *dev)
{
struct nidio96_private *devpriv = dev->private;
- int ret;
- const struct firmware *fw;
static const char *const fw_file[3] = {
FW_PCI_6534_SCARAB_DI, /* loaded into scarab A for DI */
FW_PCI_6534_SCARAB_DO, /* loaded into scarab B for DO */
FW_PCI_6534_MAIN, /* loaded into main FPGA */
};
+ int ret;
int n;
ret = pci_6534_reset_fpgas(dev);
@@ -1081,14 +1064,11 @@ static int pci_6534_upload_firmware(struct comedi_device *dev)
return ret;
/* load main FPGA first, then the two scarabs */
for (n = 2; n >= 0; n--) {
- ret = request_firmware(&fw, fw_file[n],
- &devpriv->mite->pcidev->dev);
- if (ret == 0) {
- ret = pci_6534_load_fpga(dev, n, fw->data, fw->size);
- if (ret == 0 && n == 2)
- pci_6534_init_main_fpga(dev);
- release_firmware(fw);
- }
+ ret = comedi_load_firmware(dev, &devpriv->mite->pcidev->dev,
+ fw_file[n],
+ pci_6534_load_fpga, n);
+ if (ret == 0 && n == 2)
+ pci_6534_init_main_fpga(dev);
if (ret < 0)
break;
}
@@ -1116,10 +1096,9 @@ static int nidio_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
spin_lock_init(&devpriv->mite_channel_lock);
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 634d023..536be83 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -14,10 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: ni_pcimio
@@ -110,6 +106,7 @@ Bugs:
*/
+#include <linux/module.h>
#include <linux/delay.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 0a613c0..11bf0aa 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 2252877..9b120c7 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -13,10 +13,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -48,6 +44,9 @@ TODO:
Support use of both banks X and Y
*/
+#include <linux/module.h>
+#include <linux/slab.h>
+
#include "ni_tio_internal.h"
static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 8572996..7e13697 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -13,11 +13,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _COMEDI_NI_TIO_H
diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h
index 5e00212..b009876 100644
--- a/drivers/staging/comedi/drivers/ni_tio_internal.h
+++ b/drivers/staging/comedi/drivers/ni_tio_internal.h
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _COMEDI_NI_TIO_INTERNAL_H
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 13747f3..45691ef 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -13,10 +13,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -48,6 +44,7 @@ TODO:
Support use of both banks X and Y
*/
+#include <linux/module.h>
#include "comedi_fc.h"
#include "ni_tio_internal.h"
#include "mite.h"
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index 8be2a4c..e859f85 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -17,11 +17,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: pcl711
@@ -58,10 +53,10 @@ supported.
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
#include "comedi_fc.h"
@@ -479,10 +474,9 @@ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
/* AI subdevice */
diff --git a/drivers/staging/comedi/drivers/pcl724.c b/drivers/staging/comedi/drivers/pcl724.c
index 4f033d8..8af13e7 100644
--- a/drivers/staging/comedi/drivers/pcl724.c
+++ b/drivers/staging/comedi/drivers/pcl724.c
@@ -1,85 +1,82 @@
/*
- comedi/drivers/pcl724.c
-
- Michal Dobes <dobes@tesnet.cz>
-
- hardware driver for Advantech cards:
- card: PCL-724, PCL-722, PCL-731
- driver: pcl724, pcl722, pcl731
- and ADLink cards:
- card: ACL-7122, ACL-7124, PET-48DIO
- driver: acl7122, acl7124, pet48dio
-
- Options for PCL-724, PCL-731, ACL-7124 and PET-48DIO:
- [0] - IO Base
-
- Options for PCL-722 and ACL-7122:
- [0] - IO Base
- [1] - IRQ (0=disable IRQ) IRQ isn't supported at this time!
- [2] -number of DIO:
- 0, 144: 144 DIO configuration
- 1, 96: 96 DIO configuration
-*/
-/*
-Driver: pcl724
-Description: Advantech PCL-724, PCL-722, PCL-731 ADLink ACL-7122, ACL-7124,
- PET-48DIO
-Author: Michal Dobes <dobes@tesnet.cz>
-Devices: [Advantech] PCL-724 (pcl724), PCL-722 (pcl722), PCL-731 (pcl731),
- [ADLink] ACL-7122 (acl7122), ACL-7124 (acl7124), PET-48DIO (pet48dio)
-Status: untested
-
-This is driver for digital I/O boards PCL-722/724/731 with 144/24/48 DIO
-and for digital I/O boards ACL-7122/7124/PET-48DIO with 144/24/48 DIO.
-It need 8255.o for operations and only immediate mode is supported.
-See the source for configuration details.
-*/
+ * pcl724.c
+ * Comedi driver for 8255 based ISA and PC/104 DIO boards
+ *
+ * Michal Dobes <dobes@tesnet.cz>
+ */
+
/*
- * check_driver overrides:
- * struct comedi_insn
+ * Driver: pcl724
+ * Description: Comedi driver for 8255 based ISA DIO boards
+ * Devices: (Advantech) PCL-724 [pcl724]
+ * (Advantech) PCL-722 [pcl722]
+ * (Advantech) PCL-731 [pcl731]
+ * (ADLink) ACL-7122 [acl7122]
+ * (ADLink) ACL-7124 [acl7124]
+ * (ADLink) PET-48DIO [pet48dio]
+ * (WinSystems) PCM-IO48 [pcmio48]
+ * Author: Michal Dobes <dobes@tesnet.cz>
+ * Status: untested
+ *
+ * Configuration options:
+ * [0] - IO Base
+ * [1] - IRQ (not supported)
+ * [2] - number of DIO (pcl722 and acl7122 boards)
+ * 0, 144: 144 DIO configuration
+ * 1, 96: 96 DIO configuration
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-#include <linux/delay.h>
-
#include "8255.h"
-#define PCL722_SIZE 32
-#define PCL722_96_SIZE 16
-#define PCL724_SIZE 4
-#define PCL731_SIZE 8
-#define PET48_SIZE 2
-
#define SIZE_8255 4
-/* #define PCL724_IRQ 1 no IRQ support now */
-
struct pcl724_board {
-
- const char *name; /* board name */
- int dio; /* num of DIO */
- int numofports; /* num of 8255 subdevices */
- unsigned int IRQbits; /* allowed interrupts */
- unsigned int io_range; /* len of IO space */
- char can_have96;
- char is_pet48;
+ const char *name;
+ unsigned int io_range;
+ unsigned int can_have96:1;
+ unsigned int is_pet48:1;
+ int numofports;
};
-static int subdev_8255_cb(int dir, int port, int data, unsigned long arg)
-{
- unsigned long iobase = arg;
-
- if (dir) {
- outb(data, iobase + port);
- return 0;
- } else {
- return inb(iobase + port);
- }
-}
+static const struct pcl724_board boardtypes[] = {
+ {
+ .name = "pcl724",
+ .io_range = 0x04,
+ .numofports = 1, /* 24 DIO channels */
+ }, {
+ .name = "pcl722",
+ .io_range = 0x20,
+ .can_have96 = 1,
+ .numofports = 6, /* 144 (or 96) DIO channels */
+ }, {
+ .name = "pcl731",
+ .io_range = 0x08,
+ .numofports = 2, /* 48 DIO channels */
+ }, {
+ .name = "acl7122",
+ .io_range = 0x20,
+ .can_have96 = 1,
+ .numofports = 6, /* 144 (or 96) DIO channels */
+ }, {
+ .name = "acl7124",
+ .io_range = 0x04,
+ .numofports = 1, /* 24 DIO channels */
+ }, {
+ .name = "pet48dio",
+ .io_range = 0x02,
+ .is_pet48 = 1,
+ .numofports = 2, /* 48 DIO channels */
+ }, {
+ .name = "pcmio48",
+ .io_range = 0x08,
+ .numofports = 2, /* 48 DIO channels */
+ },
+};
-static int subdev_8255mapped_cb(int dir, int port, int data,
+static int pcl724_8255mapped_io(int dir, int port, int data,
unsigned long iobase)
{
int movport = SIZE_8255 * (iobase >> 12);
@@ -96,57 +93,30 @@ static int subdev_8255mapped_cb(int dir, int port, int data,
}
}
-static int pcl724_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+static int pcl724_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
{
const struct pcl724_board *board = comedi_board(dev);
struct comedi_subdevice *s;
+ unsigned long iobase;
unsigned int iorange;
- int ret, i, n_subdevices;
-#ifdef PCL724_IRQ
- unsigned int irq;
-#endif
+ int n_subdevices;
+ int ret;
+ int i;
iorange = board->io_range;
- if ((board->can_have96) &&
- ((it->options[1] == 1) || (it->options[1] == 96)))
- iorange = PCL722_96_SIZE; /* PCL-724 in 96 DIO configuration */
- ret = comedi_request_region(dev, it->options[0], iorange);
- if (ret)
- return ret;
+ n_subdevices = board->numofports;
-#ifdef PCL724_IRQ
- irq = 0;
- if (board->IRQbits != 0) { /* board support IRQ */
- irq = it->options[1];
- if (irq) { /* we want to use IRQ */
- if (((1 << irq) & board->IRQbits) == 0) {
- printk(KERN_WARNING
- ", IRQ %u is out of allowed range, "
- "DISABLING IT", irq);
- irq = 0; /* Bad IRQ */
- } else {
- if (request_irq(irq, interrupt_pcl724, 0,
- dev->board_name, dev)) {
- printk(KERN_WARNING
- ", unable to allocate IRQ %u, "
- "DISABLING IT", irq);
- irq = 0; /* Can't use IRQ */
- } else {
- printk(", irq=%u", irq);
- }
- }
- }
+ /* Handle PCL-724 in 96 DIO configuration */
+ if (board->can_have96 &&
+ (it->options[2] == 1 || it->options[2] == 96)) {
+ iorange = 0x10;
+ n_subdevices = 4;
}
- dev->irq = irq;
-#endif
-
- printk("\n");
-
- n_subdevices = board->numofports;
- if ((board->can_have96) && ((it->options[1] == 1)
- || (it->options[1] == 96)))
- n_subdevices = 4; /* PCL-724 in 96 DIO configuration */
+ ret = comedi_request_region(dev, it->options[0], iorange);
+ if (ret)
+ return ret;
ret = comedi_alloc_subdevices(dev, n_subdevices);
if (ret)
@@ -155,41 +125,25 @@ static int pcl724_attach(struct comedi_device *dev, struct comedi_devconfig *it)
for (i = 0; i < dev->n_subdevices; i++) {
s = &dev->subdevices[i];
if (board->is_pet48) {
- subdev_8255_init(dev, s, subdev_8255mapped_cb,
- (unsigned long)(dev->iobase +
- i * 0x1000));
- } else
- subdev_8255_init(dev, s, subdev_8255_cb,
- (unsigned long)(dev->iobase +
- SIZE_8255 * i));
+ iobase = dev->iobase + (i * 0x1000);
+ ret = subdev_8255_init(dev, s, pcl724_8255mapped_io,
+ iobase);
+ } else {
+ iobase = dev->iobase + (i * SIZE_8255);
+ ret = subdev_8255_init(dev, s, NULL, iobase);
+ }
+ if (ret)
+ return ret;
}
return 0;
}
-static void pcl724_detach(struct comedi_device *dev)
-{
- int i;
-
- for (i = 0; i < dev->n_subdevices; i++)
- comedi_spriv_free(dev, i);
- comedi_legacy_detach(dev);
-}
-
-static const struct pcl724_board boardtypes[] = {
- { "pcl724", 24, 1, 0x00fc, PCL724_SIZE, 0, 0, },
- { "pcl722", 144, 6, 0x00fc, PCL722_SIZE, 1, 0, },
- { "pcl731", 48, 2, 0x9cfc, PCL731_SIZE, 0, 0, },
- { "acl7122", 144, 6, 0x9ee8, PCL722_SIZE, 1, 0, },
- { "acl7124", 24, 1, 0x00fc, PCL724_SIZE, 0, 0, },
- { "pet48dio", 48, 2, 0x9eb8, PET48_SIZE, 0, 1, },
-};
-
static struct comedi_driver pcl724_driver = {
.driver_name = "pcl724",
.module = THIS_MODULE,
.attach = pcl724_attach,
- .detach = pcl724_detach,
+ .detach = comedi_legacy_detach,
.board_name = &boardtypes[0].name,
.num_names = ARRAY_SIZE(boardtypes),
.offset = sizeof(struct pcl724_board),
@@ -197,5 +151,5 @@ static struct comedi_driver pcl724_driver = {
module_comedi_driver(pcl724_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for 8255 based ISA and PC/104 DIO boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl725.c b/drivers/staging/comedi/drivers/pcl725.c
deleted file mode 100644
index 6b02f06..0000000
--- a/drivers/staging/comedi/drivers/pcl725.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * comedi/drivers/pcl725.c
- * Driver for PCL725 and clones
- * David A. Schleef
- */
-/*
-Driver: pcl725
-Description: Advantech PCL-725 (& compatibles)
-Author: ds
-Status: unknown
-Devices: [Advantech] PCL-725 (pcl725)
-*/
-
-#include "../comedidev.h"
-
-#include <linux/ioport.h>
-
-#define PCL725_SIZE 2
-
-#define PCL725_DO 0
-#define PCL725_DI 1
-
-static int pcl725_do_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
- outb(s->state, dev->iobase + PCL725_DO);
- }
-
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pcl725_di_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[1] = inb(dev->iobase + PCL725_DI);
-
- return insn->n;
-}
-
-static int pcl725_attach(struct comedi_device *dev, struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], PCL725_SIZE);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 2);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- /* do */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 1;
- s->n_chan = 8;
- s->insn_bits = pcl725_do_insn;
- s->range_table = &range_digital;
-
- s = &dev->subdevices[1];
- /* di */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->maxdata = 1;
- s->n_chan = 8;
- s->insn_bits = pcl725_di_insn;
- s->range_table = &range_digital;
-
- printk(KERN_INFO "\n");
-
- return 0;
-}
-
-static struct comedi_driver pcl725_driver = {
- .driver_name = "pcl725",
- .module = THIS_MODULE,
- .attach = pcl725_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(pcl725_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
index 4aa9943..a4d0bcc 100644
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ b/drivers/staging/comedi/drivers/pcl726.c
@@ -20,11 +20,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: pcl726
@@ -67,10 +62,9 @@ Interrupts are not supported.
their web page. (http://www.cir.com/)
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#undef ACL6126_IRQ /* no interrupt support (yet) */
#define PCL726_SIZE 16
@@ -234,10 +228,9 @@ static int pcl726_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
for (i = 0; i < 12; i++) {
devpriv->bipolar[i] = 0;
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
index 2879db7..2a659f2 100644
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ b/drivers/staging/comedi/drivers/pcl730.c
@@ -3,135 +3,298 @@
* Driver for Advantech PCL-730 and clones
* José Luis Sánchez
*/
-/*
-Driver: pcl730
-Description: Advantech PCL-730 (& compatibles)
-Author: José Luis Sánchez (jsanchezv@teleline.es)
-Status: untested
-Devices: [Advantech] PCL-730 (pcl730), [ICP] ISO-730 (iso730),
- [Adlink] ACL-7130 (acl7130)
-Interrupts are not supported.
-The ACL-7130 card have an 8254 timer/counter not supported by this driver.
-*/
+/*
+ * Driver: pcl730
+ * Description: Advantech PCL-730 (& compatibles)
+ * Devices: (Advantech) PCL-730 [pcl730]
+ * (ICP) ISO-730 [iso730]
+ * (Adlink) ACL-7130 [acl7130]
+ * (Advantech) PCM-3730 [pcm3730]
+ * (Advantech) PCL-725 [pcl725]
+ * (ICP) P8R8-DIO [p16r16dio]
+ * (Adlink) ACL-7225b [acl7225b]
+ * (ICP) P16R16-DIO [p16r16dio]
+ * (Advantech) PCL-733 [pcl733]
+ * (Advantech) PCL-734 [pcl734]
+ * Author: José Luis Sánchez (jsanchezv@teleline.es)
+ * Status: untested
+ *
+ * Configuration options:
+ * [0] - I/O port base
+ *
+ * Interrupts are not supported.
+ * The ACL-7130 card has an 8254 timer/counter not supported by this driver.
+ */
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
-#define PCL730_SIZE 4
-#define ACL7130_SIZE 8
-#define PCL730_IDIO_LO 0 /* Isolated Digital I/O low byte (ID0-ID7) */
-#define PCL730_IDIO_HI 1 /* Isolated Digital I/O high byte (ID8-ID15) */
-#define PCL730_DIO_LO 2 /* TTL Digital I/O low byte (D0-D7) */
-#define PCL730_DIO_HI 3 /* TTL Digital I/O high byte (D8-D15) */
+/*
+ * Register map
+ *
+ * The register map varies slightly depending on the board type but
+ * all registers are 8-bit.
+ *
+ * The boardinfo 'io_range' is used to allow comedi to request the
+ * proper range required by the board.
+ *
+ * The comedi_subdevice 'private' data is used to pass the register
+ * offset to the (*insn_bits) functions to read/write the correct
+ * registers.
+ *
+ * The basic register mapping looks like this:
+ *
+ * BASE+0 Isolated outputs 0-7 (write) / inputs 0-7 (read)
+ * BASE+1 Isolated outputs 8-15 (write) / inputs 8-15 (read)
+ * BASE+2 TTL outputs 0-7 (write) / inputs 0-7 (read)
+ * BASE+3 TTL outputs 8-15 (write) / inputs 8-15 (read)
+ *
+ * The pcm3730 board does not have register BASE+1.
+ *
+ * The pcl725 and p8r8dio only have registers BASE+0 and BASE+1:
+ *
+ * BASE+0 Isolated outputs 0-7 (write) (read back on p8r8dio)
+ * BASE+1 Isolated inputs 0-7 (read)
+ *
+ * The acl7225b and p16r16dio boards have this register mapping:
+ *
+ * BASE+0 Isolated outputs 0-7 (write) (read back)
+ * BASE+1 Isolated outputs 8-15 (write) (read back)
+ * BASE+2 Isolated inputs 0-7 (read)
+ * BASE+3 Isolated inputs 8-15 (read)
+ *
+ * The pcl733 and pcl733 boards have this register mapping:
+ *
+ * BASE+0 Isolated outputs 0-7 (write) or inputs 0-7 (read)
+ * BASE+1 Isolated outputs 8-15 (write) or inputs 8-15 (read)
+ * BASE+2 Isolated outputs 16-23 (write) or inputs 16-23 (read)
+ * BASE+3 Isolated outputs 24-31 (write) or inputs 24-31 (read)
+ */
struct pcl730_board {
+ const char *name;
+ unsigned int io_range;
+ unsigned is_pcl725:1;
+ unsigned is_acl7225b:1;
+ unsigned has_readback:1;
+ unsigned has_ttl_io:1;
+ int n_subdevs;
+ int n_iso_out_chan;
+ int n_iso_in_chan;
+ int n_ttl_chan;
+};
- const char *name; /* board name */
- unsigned int io_range; /* len of I/O space */
+static const struct pcl730_board pcl730_boards[] = {
+ {
+ .name = "pcl730",
+ .io_range = 0x04,
+ .has_ttl_io = 1,
+ .n_subdevs = 4,
+ .n_iso_out_chan = 16,
+ .n_iso_in_chan = 16,
+ .n_ttl_chan = 16,
+ }, {
+ .name = "iso730",
+ .io_range = 0x04,
+ .n_subdevs = 4,
+ .n_iso_out_chan = 16,
+ .n_iso_in_chan = 16,
+ .n_ttl_chan = 16,
+ }, {
+ .name = "acl7130",
+ .io_range = 0x08,
+ .has_ttl_io = 1,
+ .n_subdevs = 4,
+ .n_iso_out_chan = 16,
+ .n_iso_in_chan = 16,
+ .n_ttl_chan = 16,
+ }, {
+ .name = "pcm3730",
+ .io_range = 0x04,
+ .has_ttl_io = 1,
+ .n_subdevs = 4,
+ .n_iso_out_chan = 8,
+ .n_iso_in_chan = 8,
+ .n_ttl_chan = 16,
+ }, {
+ .name = "pcl725",
+ .io_range = 0x02,
+ .is_pcl725 = 1,
+ .n_subdevs = 2,
+ .n_iso_out_chan = 8,
+ .n_iso_in_chan = 8,
+ }, {
+ .name = "p8r8dio",
+ .io_range = 0x02,
+ .is_pcl725 = 1,
+ .has_readback = 1,
+ .n_subdevs = 2,
+ .n_iso_out_chan = 8,
+ .n_iso_in_chan = 8,
+ }, {
+ .name = "acl7225b",
+ .io_range = 0x08, /* only 4 are used */
+ .is_acl7225b = 1,
+ .has_readback = 1,
+ .n_subdevs = 2,
+ .n_iso_out_chan = 16,
+ .n_iso_in_chan = 16,
+ }, {
+ .name = "p16r16dio",
+ .io_range = 0x04,
+ .is_acl7225b = 1,
+ .has_readback = 1,
+ .n_subdevs = 2,
+ .n_iso_out_chan = 16,
+ .n_iso_in_chan = 16,
+ }, {
+ .name = "pcl733",
+ .io_range = 0x04,
+ .n_subdevs = 1,
+ .n_iso_in_chan = 32,
+ }, {
+ .name = "pcl734",
+ .io_range = 0x04,
+ .n_subdevs = 1,
+ .n_iso_out_chan = 32,
+ },
};
-static int pcl730_do_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcl730_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
+ unsigned long reg = (unsigned long)s->private;
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+
+ if (mask & 0x00ff)
+ outb(s->state & 0xff, dev->iobase + reg);
+ if ((mask & 0xff00) && (s->n_chan > 8))
+ outb((s->state >> 8) & 0xff, dev->iobase + reg + 1);
+ if ((mask & 0xff0000) && (s->n_chan > 16))
+ outb((s->state >> 16) & 0xff, dev->iobase + reg + 2);
+ if ((mask & 0xff000000) && (s->n_chan > 24))
+ outb((s->state >> 24) & 0xff, dev->iobase + reg + 3);
}
- if (data[0] & 0x00ff)
- outb(s->state & 0xff,
- dev->iobase + ((unsigned long)s->private));
- if (data[0] & 0xff00)
- outb((s->state >> 8),
- dev->iobase + ((unsigned long)s->private) + 1);
data[1] = s->state;
return insn->n;
}
-static int pcl730_di_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static unsigned int pcl730_get_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned long reg = (unsigned long)s->private;
+ unsigned int val;
+
+ val = inb(dev->iobase + reg);
+ if (s->n_chan > 8)
+ val |= (inb(dev->iobase + reg + 1) << 8);
+ if (s->n_chan > 16)
+ val |= (inb(dev->iobase + reg + 2) << 16);
+ if (s->n_chan > 24)
+ val |= (inb(dev->iobase + reg + 3) << 24);
+
+ return val;
+}
+
+static int pcl730_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- data[1] = inb(dev->iobase + ((unsigned long)s->private)) |
- (inb(dev->iobase + ((unsigned long)s->private) + 1) << 8);
+ data[1] = pcl730_get_bits(dev, s);
return insn->n;
}
-static int pcl730_attach(struct comedi_device *dev, struct comedi_devconfig *it)
+static int pcl730_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
{
const struct pcl730_board *board = comedi_board(dev);
struct comedi_subdevice *s;
+ int subdev;
int ret;
ret = comedi_request_region(dev, it->options[0], board->io_range);
if (ret)
return ret;
- ret = comedi_alloc_subdevices(dev, 4);
+ ret = comedi_alloc_subdevices(dev, board->n_subdevs);
if (ret)
return ret;
- s = &dev->subdevices[0];
- /* Isolated do */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 1;
- s->n_chan = 16;
- s->insn_bits = pcl730_do_insn;
- s->range_table = &range_digital;
- s->private = (void *)PCL730_IDIO_LO;
-
- s = &dev->subdevices[1];
- /* Isolated di */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->maxdata = 1;
- s->n_chan = 16;
- s->insn_bits = pcl730_di_insn;
- s->range_table = &range_digital;
- s->private = (void *)PCL730_IDIO_LO;
-
- s = &dev->subdevices[2];
- /* TTL do */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 1;
- s->n_chan = 16;
- s->insn_bits = pcl730_do_insn;
- s->range_table = &range_digital;
- s->private = (void *)PCL730_DIO_LO;
-
- s = &dev->subdevices[3];
- /* TTL di */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->maxdata = 1;
- s->n_chan = 16;
- s->insn_bits = pcl730_di_insn;
- s->range_table = &range_digital;
- s->private = (void *)PCL730_DIO_LO;
-
- printk(KERN_INFO "\n");
+ subdev = 0;
+
+ if (board->n_iso_out_chan) {
+ /* Isolated Digital Outputs */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = board->n_iso_out_chan;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl730_do_insn_bits;
+ s->private = (void *)0;
+
+ /* get the initial state if supported */
+ if (board->has_readback)
+ s->state = pcl730_get_bits(dev, s);
+ }
+
+ if (board->n_iso_in_chan) {
+ /* Isolated Digital Inputs */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = board->n_iso_in_chan;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl730_di_insn_bits;
+ s->private = board->is_acl7225b ? (void *)2 :
+ board->is_pcl725 ? (void *)1 : (void *)0;
+ }
+
+ if (board->has_ttl_io) {
+ /* TTL Digital Outputs */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = board->n_ttl_chan;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl730_do_insn_bits;
+ s->private = (void *)2;
+
+ /* TTL Digital Inputs */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = board->n_ttl_chan;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pcl730_di_insn_bits;
+ s->private = (void *)2;
+ }
return 0;
}
-static const struct pcl730_board boardtypes[] = {
- { "pcl730", PCL730_SIZE, },
- { "iso730", PCL730_SIZE, },
- { "acl7130", ACL7130_SIZE, },
-};
-
static struct comedi_driver pcl730_driver = {
.driver_name = "pcl730",
.module = THIS_MODULE,
.attach = pcl730_attach,
.detach = comedi_legacy_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
+ .board_name = &pcl730_boards[0].name,
+ .num_names = ARRAY_SIZE(pcl730_boards),
.offset = sizeof(struct pcl730_board),
};
module_comedi_driver(pcl730_driver);
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index cd02786..03a0989 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -108,12 +108,12 @@
* 3= 20V unipolar inputs
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/ioport.h>
#include <linux/io.h>
#include <asm/dma.h>
@@ -1110,10 +1110,9 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
irq = 0;
if (board->IRQbits != 0) { /* board support IRQ */
@@ -1405,6 +1404,7 @@ no_dma:
if (it->options[3] > 0)
/* we use external trigger */
devpriv->use_ext_trg = 1;
+ break;
case boardA821:
devpriv->max_812_ai_mode0_rangewait = 1;
devpriv->mode_reg_int = (irq << 4) & 0xf0;
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 91bd207..f031349 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -32,9 +32,9 @@ Configuration Options:
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -922,10 +922,9 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* grab our IRQ */
irq = 0;
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 91cb1bd..a52ba82 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -98,7 +98,7 @@ A word or two about DMA. Driver support DMA operations at two ways:
*/
-#include <linux/ioport.h>
+#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -1227,10 +1227,9 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned long pages;
struct comedi_subdevice *s;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->io_range = board->io_range;
if ((board->fifo) && (it->options[2] == -1)) {
diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c
index 4ef0df3..cc1dc7f 100644
--- a/drivers/staging/comedi/drivers/pcm3724.c
+++ b/drivers/staging/comedi/drivers/pcm3724.c
@@ -28,11 +28,9 @@ Copy/pasted/hacked from pcm724.c
* struct comedi_insn
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-#include <linux/delay.h>
-
#include "8255.h"
#define PCM3724_SIZE 16
@@ -186,39 +184,30 @@ static void enable_chan(struct comedi_device *dev, struct comedi_subdevice *s,
/* overriding the 8255 insn config */
static int subdev_3724_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
- unsigned int bits;
-
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x0000ff)
- bits = 0x0000ff;
- else if (mask & 0x00ff00)
- bits = 0x00ff00;
- else if (mask & 0x0f0000)
- bits = 0x0f0000;
+ int ret;
+
+ if (chan < 8)
+ mask = 0x0000ff;
+ else if (chan < 16)
+ mask = 0x00ff00;
+ else if (chan < 20)
+ mask = 0x0f0000;
else
- bits = 0xf00000;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ mask = 0xf00000;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
do_3724_config(dev, s, insn->chanspec);
enable_chan(dev, s, insn->chanspec);
- return 1;
+
+ return insn->n;
}
static int pcm3724_attach(struct comedi_device *dev,
@@ -228,10 +217,9 @@ static int pcm3724_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret, i;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = comedi_alloc_devpriv(dev, sizeof(*priv));
if (!priv)
return -ENOMEM;
- dev->private = priv;
ret = comedi_request_region(dev, it->options[0], PCM3724_SIZE);
if (ret)
@@ -250,20 +238,11 @@ static int pcm3724_attach(struct comedi_device *dev,
return 0;
}
-static void pcm3724_detach(struct comedi_device *dev)
-{
- int i;
-
- for (i = 0; i < dev->n_subdevices; i++)
- comedi_spriv_free(dev, i);
- comedi_legacy_detach(dev);
-}
-
static struct comedi_driver pcm3724_driver = {
.driver_name = "pcm3724",
.module = THIS_MODULE,
.attach = pcm3724_attach,
- .detach = pcm3724_detach,
+ .detach = comedi_legacy_detach,
};
module_comedi_driver(pcm3724_driver);
diff --git a/drivers/staging/comedi/drivers/pcm3730.c b/drivers/staging/comedi/drivers/pcm3730.c
deleted file mode 100644
index 3a3ce2c7..0000000
--- a/drivers/staging/comedi/drivers/pcm3730.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * comedi/drivers/pcm3730.c
- * Driver for PCM3730 and clones
- * Blaine Lee
- * from pcl725 by David S.
- */
-/*
-Driver: pcm3730
-Description: PCM3730
-Author: Blaine Lee
-Devices: [Advantech] PCM-3730 (pcm3730)
-Status: unknown
-
-Configuration options:
- [0] - I/O port base
-*/
-
-#include "../comedidev.h"
-
-#include <linux/ioport.h>
-
-#define PCM3730_SIZE 4 /* consecutive io port addresses */
-
-#define PCM3730_DOA 0 /* offsets for each port */
-#define PCM3730_DOB 2
-#define PCM3730_DOC 3
-#define PCM3730_DIA 0
-#define PCM3730_DIB 2
-#define PCM3730_DIC 3
-
-static int pcm3730_do_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
- outb(s->state, dev->iobase + (unsigned long)(s->private));
- }
- data[1] = s->state;
-
- return insn->n;
-}
-
-static int pcm3730_di_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[1] = inb(dev->iobase + (unsigned long)(s->private));
- return insn->n;
-}
-
-static int pcm3730_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
-{
- struct comedi_subdevice *s;
- int ret;
-
- ret = comedi_request_region(dev, it->options[0], PCM3730_SIZE);
- if (ret)
- return ret;
-
- ret = comedi_alloc_subdevices(dev, 6);
- if (ret)
- return ret;
-
- s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 1;
- s->n_chan = 8;
- s->insn_bits = pcm3730_do_insn_bits;
- s->range_table = &range_digital;
- s->private = (void *)PCM3730_DOA;
-
- s = &dev->subdevices[1];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 1;
- s->n_chan = 8;
- s->insn_bits = pcm3730_do_insn_bits;
- s->range_table = &range_digital;
- s->private = (void *)PCM3730_DOB;
-
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->maxdata = 1;
- s->n_chan = 8;
- s->insn_bits = pcm3730_do_insn_bits;
- s->range_table = &range_digital;
- s->private = (void *)PCM3730_DOC;
-
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->maxdata = 1;
- s->n_chan = 8;
- s->insn_bits = pcm3730_di_insn_bits;
- s->range_table = &range_digital;
- s->private = (void *)PCM3730_DIA;
-
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->maxdata = 1;
- s->n_chan = 8;
- s->insn_bits = pcm3730_di_insn_bits;
- s->range_table = &range_digital;
- s->private = (void *)PCM3730_DIB;
-
- s = &dev->subdevices[5];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->maxdata = 1;
- s->n_chan = 8;
- s->insn_bits = pcm3730_di_insn_bits;
- s->range_table = &range_digital;
- s->private = (void *)PCM3730_DIC;
-
- printk(KERN_INFO "\n");
-
- return 0;
-}
-
-static struct comedi_driver pcm3730_driver = {
- .driver_name = "pcm3730",
- .module = THIS_MODULE,
- .attach = pcm3730_attach,
- .detach = comedi_legacy_detach,
-};
-module_comedi_driver(pcm3730_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcmad.c b/drivers/staging/comedi/drivers/pcmad.c
index b7c932e..423f236 100644
--- a/drivers/staging/comedi/drivers/pcmad.c
+++ b/drivers/staging/comedi/drivers/pcmad.c
@@ -1,52 +1,45 @@
/*
- comedi/drivers/pcmad.c
- Hardware driver for Winsystems PCM-A/D12 and PCM-A/D16
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000,2001 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * pcmad.c
+ * Hardware driver for Winsystems PCM-A/D12 and PCM-A/D16
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000,2001 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
-*/
/*
-Driver: pcmad
-Description: Winsystems PCM-A/D12, PCM-A/D16
-Author: ds
-Devices: [Winsystems] PCM-A/D12 (pcmad12), PCM-A/D16 (pcmad16)
-Status: untested
-
-This driver was written on a bet that I couldn't write a driver
-in less than 2 hours. I won the bet, but never got paid. =(
-
-Configuration options:
- [0] - I/O port base
- [1] - unused
- [2] - Analog input reference
- 0 = single ended
- 1 = differential
- [3] - Analog input encoding (must match jumpers)
- 0 = straight binary
- 1 = two's complement
-*/
-
-#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include <linux/ioport.h>
+ * Driver: pcmad
+ * Description: Winsystems PCM-A/D12, PCM-A/D16
+ * Devices: (Winsystems) PCM-A/D12 [pcmad12]
+ * (Winsystems) PCM-A/D16 [pcmad16]
+ * Author: ds
+ * Status: untested
+ *
+ * This driver was written on a bet that I couldn't write a driver
+ * in less than 2 hours. I won the bet, but never got paid. =(
+ *
+ * Configuration options:
+ * [0] - I/O port base
+ * [1] - IRQ (unused)
+ * [2] - Analog input reference (must match jumpers)
+ * 0 = single-ended (16 channels)
+ * 1 = differential (8 channels)
+ * [3] - Analog input encoding (must match jumpers)
+ * 0 = straight binary (0-5V input range)
+ * 1 = two's complement (+-10V input range)
+ */
-#define PCMAD_SIZE 4
+#include <linux/module.h>
+#include "../comedidev.h"
#define PCMAD_STATUS 0
#define PCMAD_LSB 1
@@ -55,60 +48,82 @@ Configuration options:
struct pcmad_board_struct {
const char *name;
- int n_ai_bits;
+ unsigned int ai_maxdata;
};
-struct pcmad_priv_struct {
- int differential;
- int twos_comp;
+static const struct pcmad_board_struct pcmad_boards[] = {
+ {
+ .name = "pcmad12",
+ .ai_maxdata = 0x0fff,
+ }, {
+ .name = "pcmad16",
+ .ai_maxdata = 0xffff,
+ },
};
#define TIMEOUT 100
+static int pcmad_ai_wait_for_eoc(struct comedi_device *dev,
+ int timeout)
+{
+ int i;
+
+ for (i = 0; i < timeout; i++) {
+ if ((inb(dev->iobase + PCMAD_STATUS) & 0x3) == 0x3)
+ return 0;
+ }
+ return -ETIME;
+}
+
+static bool pcmad_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min < 0;
+}
+
static int pcmad_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const struct pcmad_board_struct *board = comedi_board(dev);
- struct pcmad_priv_struct *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int ret;
int i;
- int chan;
- int n;
- chan = CR_CHAN(insn->chanspec);
-
- for (n = 0; n < insn->n; n++) {
+ for (i = 0; i < insn->n; i++) {
outb(chan, dev->iobase + PCMAD_CONVERT);
- for (i = 0; i < TIMEOUT; i++) {
- if ((inb(dev->iobase + PCMAD_STATUS) & 0x3) == 0x3)
- break;
+ ret = pcmad_ai_wait_for_eoc(dev, TIMEOUT);
+ if (ret)
+ return ret;
+
+ val = inb(dev->iobase + PCMAD_LSB) |
+ (inb(dev->iobase + PCMAD_MSB) << 8);
+
+ /* data is shifted on the pcmad12, fix it */
+ if (s->maxdata == 0x0fff)
+ val >>= 4;
+
+ if (pcmad_range_is_bipolar(s, range)) {
+ /* munge the two's complement value */
+ val ^= ((s->maxdata + 1) >> 1);
}
- data[n] = inb(dev->iobase + PCMAD_LSB);
- data[n] |= (inb(dev->iobase + PCMAD_MSB) << 8);
- if (devpriv->twos_comp)
- data[n] ^= (1 << (board->n_ai_bits - 1));
+ data[i] = val;
}
- return n;
+ return insn->n;
}
-/*
- * options:
- * 0 i/o base
- * 1 unused
- * 2 0=single ended 1=differential
- * 3 0=straight binary 1=two's comp
- */
static int pcmad_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcmad_board_struct *board = comedi_board(dev);
- struct pcmad_priv_struct *devpriv;
struct comedi_subdevice *s;
int ret;
- ret = comedi_request_region(dev, it->options[0], PCMAD_SIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x04);
if (ret)
return ret;
@@ -116,32 +131,25 @@ static int pcmad_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
- if (!devpriv)
- return -ENOMEM;
- dev->private = devpriv;
-
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | AREF_GROUND;
- s->n_chan = 16; /* XXX */
- s->len_chanlist = 1;
- s->insn_read = pcmad_ai_insn_read;
- s->maxdata = (1 << board->n_ai_bits) - 1;
- s->range_table = &range_unknown;
+ s->type = COMEDI_SUBD_AI;
+ if (it->options[1]) {
+ /* 8 differential channels */
+ s->subdev_flags = SDF_READABLE | AREF_DIFF;
+ s->n_chan = 8;
+ } else {
+ /* 16 single-ended channels */
+ s->subdev_flags = SDF_READABLE | AREF_GROUND;
+ s->n_chan = 16;
+ }
+ s->len_chanlist = 1;
+ s->maxdata = board->ai_maxdata;
+ s->range_table = it->options[2] ? &range_bipolar10 : &range_unipolar5;
+ s->insn_read = pcmad_ai_insn_read;
return 0;
}
-static const struct pcmad_board_struct pcmad_boards[] = {
- {
- .name = "pcmad12",
- .n_ai_bits = 12,
- }, {
- .name = "pcmad16",
- .n_ai_bits = 16,
- },
-};
static struct comedi_driver pcmad_driver = {
.driver_name = "pcmad",
.module = THIS_MODULE,
diff --git a/drivers/staging/comedi/drivers/pcmda12.c b/drivers/staging/comedi/drivers/pcmda12.c
index 61e7fd1..1c7a135 100644
--- a/drivers/staging/comedi/drivers/pcmda12.c
+++ b/drivers/staging/comedi/drivers/pcmda12.c
@@ -1,152 +1,131 @@
/*
- comedi/drivers/pcmda12.c
- Driver for Winsystems PC-104 based PCM-D/A-12 8-channel AO board.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-/*
-Driver: pcmda12
-Description: A driver for the Winsystems PCM-D/A-12
-Devices: [Winsystems] PCM-D/A-12 (pcmda12)
-Author: Calin Culianu <calin@ajvar.org>
-Updated: Fri, 13 Jan 2006 12:01:01 -0500
-Status: works
-
-A driver for the relatively straightforward-to-program PCM-D/A-12.
-This board doesn't support commands, and the only way to set its
-analog output range is to jumper the board. As such,
-comedi_data_write() ignores the range value specified.
-
-The board uses 16 consecutive I/O addresses starting at the I/O port
-base address. Each address corresponds to the LSB then MSB of a
-particular channel from 0-7.
-
-Note that the board is not ISA-PNP capable and thus
-needs the I/O port comedi_config parameter.
-
-Note that passing a nonzero value as the second config option will
-enable "simultaneous xfer" mode for this board, in which AO writes
-will not take effect until a subsequent read of any AO channel. This
-is so that one can speed up programming by preloading all AO registers
-with values before simultaneously setting them to take effect with one
-read command.
-
-Configuration Options:
- [0] - I/O port base address
- [1] - Do Simultaneous Xfer (see description)
-*/
+ * pcmda12.c
+ * Driver for Winsystems PC-104 based PCM-D/A-12 8-channel AO board.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Driver: pcmda12
+ * Description: A driver for the Winsystems PCM-D/A-12
+ * Devices: (Winsystems) PCM-D/A-12 [pcmda12]
+ * Author: Calin Culianu <calin@ajvar.org>
+ * Updated: Fri, 13 Jan 2006 12:01:01 -0500
+ * Status: works
+ *
+ * A driver for the relatively straightforward-to-program PCM-D/A-12.
+ * This board doesn't support commands, and the only way to set its
+ * analog output range is to jumper the board. As such,
+ * comedi_data_write() ignores the range value specified.
+ *
+ * The board uses 16 consecutive I/O addresses starting at the I/O port
+ * base address. Each address corresponds to the LSB then MSB of a
+ * particular channel from 0-7.
+ *
+ * Note that the board is not ISA-PNP capable and thus needs the I/O
+ * port comedi_config parameter.
+ *
+ * Note that passing a nonzero value as the second config option will
+ * enable "simultaneous xfer" mode for this board, in which AO writes
+ * will not take effect until a subsequent read of any AO channel. This
+ * is so that one can speed up programming by preloading all AO registers
+ * with values before simultaneously setting them to take effect with one
+ * read command.
+ *
+ * Configuration Options:
+ * [0] - I/O port base address
+ * [1] - Do Simultaneous Xfer (see description)
+ */
+
+#include <linux/module.h>
#include "../comedidev.h"
-#define CHANS 8
-#define IOSIZE 16
-#define LSB(x) ((unsigned char)((x) & 0xff))
-#define MSB(x) ((unsigned char)((((unsigned short)(x))>>8) & 0xff))
-#define LSB_PORT(chan) (dev->iobase + (chan)*2)
-#define MSB_PORT(chan) (LSB_PORT(chan)+1)
-#define BITS 12
-
-/* note these have no effect and are merely here for reference..
- these are configured by jumpering the board! */
+/* AI range is not configurable, it's set by jumpers on the board */
static const struct comedi_lrange pcmda12_ranges = {
- 3,
- {
- UNI_RANGE(5), UNI_RANGE(10), BIP_RANGE(5)
- }
+ 3, {
+ UNI_RANGE(5),
+ UNI_RANGE(10),
+ BIP_RANGE(5)
+ }
};
struct pcmda12_private {
-
- unsigned int ao_readback[CHANS];
+ unsigned int ao_readback[8];
int simultaneous_xfer_mode;
};
-static void zero_chans(struct comedi_device *dev)
-{ /* sets up an
- ASIC chip to defaults */
- int i;
- for (i = 0; i < CHANS; ++i) {
-/* /\* do this as one instruction?? *\/ */
-/* outw(0, LSB_PORT(chan)); */
- outb(0, LSB_PORT(i));
- outb(0, MSB_PORT(i));
- }
- inb(LSB_PORT(0)); /* update chans. */
-}
-
-static int ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcmda12_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcmda12_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = devpriv->ao_readback[chan];
+ unsigned long ioreg = dev->iobase + (chan * 2);
int i;
- int chan = CR_CHAN(insn->chanspec);
- /* Writing a list of values to an AO channel is probably not
- * very useful, but that's how the interface is defined. */
for (i = 0; i < insn->n; ++i) {
-
-/* /\* do this as one instruction?? *\/ */
-/* outw(data[i], LSB_PORT(chan)); */
-
- /* Need to do this as two instructions due to 8-bit bus?? */
- /* first, load the low byte */
- outb(LSB(data[i]), LSB_PORT(chan));
- /* next, write the high byte */
- outb(MSB(data[i]), MSB_PORT(chan));
-
- /* save shadow register */
- devpriv->ao_readback[chan] = data[i];
-
+ val = data[i];
+ outb(val & 0xff, ioreg);
+ outb((val >> 8) & 0xff, ioreg + 1);
+
+ /*
+ * Initiate transfer if not in simultaneaous xfer
+ * mode by reading one of the AO registers.
+ */
if (!devpriv->simultaneous_xfer_mode)
- inb(LSB_PORT(chan));
+ inb(ioreg);
}
+ devpriv->ao_readback[chan] = val;
- /* return the number of samples written */
- return i;
+ return insn->n;
}
-/* AO subdevices should have a read insn as well as a write insn.
-
- Usually this means copying a value stored in devpriv->ao_readback.
- However, since this driver supports simultaneous xfer then sometimes
- this function actually accomplishes work.
-
- Simultaneaous xfer mode is accomplished by loading ALL the values
- you want for AO in all the channels, then READing off one of the AO
- registers to initiate the instantaneous simultaneous update of all
- DAC outputs, which makes all AO channels update simultaneously.
- This is useful for some control applications, I would imagine.
-*/
-static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int pcmda12_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcmda12_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
- int chan = CR_CHAN(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
- if (devpriv->simultaneous_xfer_mode)
- inb(LSB_PORT(chan));
- /* read back shadow register */
+ /*
+ * Initiate simultaneaous xfer mode by reading one of the
+ * AO registers. All analog outputs will then be updated.
+ */
+ if (devpriv->simultaneous_xfer_mode)
+ inb(dev->iobase);
+
+ for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
- }
- return i;
+ return insn->n;
+}
+
+static void pcmda12_ao_reset(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ int i;
+
+ for (i = 0; i < s->n_chan; ++i) {
+ outb(0, dev->iobase + (i * 2));
+ outb(0, dev->iobase + (i * 2) + 1);
+ }
+ /* Initiate transfer by reading one of the AO registers. */
+ inb(dev->iobase);
}
static int pcmda12_attach(struct comedi_device *dev,
@@ -156,14 +135,13 @@ static int pcmda12_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- ret = comedi_request_region(dev, it->options[0], IOSIZE);
+ ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->simultaneous_xfer_mode = it->options[1];
@@ -172,18 +150,17 @@ static int pcmda12_attach(struct comedi_device *dev,
return ret;
s = &dev->subdevices[0];
- s->private = NULL;
- s->maxdata = (0x1 << BITS) - 1;
- s->range_table = &pcmda12_ranges;
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = CHANS;
- s->insn_write = &ao_winsn;
- s->insn_read = &ao_rinsn;
-
- zero_chans(dev); /* clear out all the registers, basically */
-
- return 1;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 0x0fff;
+ s->range_table = &pcmda12_ranges;
+ s->insn_write = pcmda12_ao_insn_write;
+ s->insn_read = pcmda12_ao_insn_read;
+
+ pcmda12_ao_reset(dev, s);
+
+ return 0;
}
static struct comedi_driver pcmda12_driver = {
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 5a236cd..574443d 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -14,10 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: pcmmio
@@ -76,6 +72,7 @@ Configuration Options:
leave out if you don't need this feature)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -313,68 +310,27 @@ static int pcmmio_dio_insn_bits(struct comedi_device *dev,
return insn->n;
}
-/* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
static int pcmmio_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec), byte_no = chan / 8, bit_no =
- chan % 8;
- unsigned long ioaddr;
- unsigned char byte;
-
- /* Compute ioaddr for this channel */
- ioaddr = subpriv->iobases[byte_no];
-
- /* NOTE:
- writing a 0 an IO channel's bit sets the channel to INPUT
- and pulls the line high as well
-
- writing a 1 to an IO channel's bit pulls the line low
-
- All channels are implicitly always in OUTPUT mode -- but when
- they are high they can be considered to be in INPUT mode..
-
- Thus, we only force channels low if the config request was INPUT,
- otherwise we do nothing to the hardware. */
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- /* save to io_bits -- don't actually do anything since
- all input channels are also output channels... */
- s->io_bits |= 1 << chan;
- break;
- case INSN_CONFIG_DIO_INPUT:
- /* write a 0 to the actual register representing the channel
- to set it to 'input'. 0 means "float high". */
- byte = inb(ioaddr);
- byte &= ~(1 << bit_no);
- /**< set input channel to '0' */
-
- /*
- * write out byte -- this is the only time we actually affect
- * the hardware as all channels are implicitly output
- * -- but input channels are set to float-high
- */
- outb(byte, ioaddr);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int byte_no = chan / 8;
+ int bit_no = chan % 8;
+ int ret;
- /* save to io_bits */
- s->io_bits &= ~(1 << chan);
- break;
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- case INSN_CONFIG_DIO_QUERY:
- /* retrieve from shadow register */
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
+ if (data[0] == INSN_CONFIG_DIO_INPUT) {
+ unsigned long ioaddr = subpriv->iobases[byte_no];
+ unsigned char val;
- default:
- return -EINVAL;
- break;
+ val = inb(ioaddr);
+ val &= ~(1 << bit_no);
+ outb(val, ioaddr);
}
return insn->n;
@@ -1043,10 +999,9 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
for (asic = 0; asic < MAX_ASICS; ++asic) {
devpriv->asics[asic].num = asic;
@@ -1201,12 +1156,13 @@ static void pcmmio_detach(struct comedi_device *dev)
struct pcmmio_private *devpriv = dev->private;
int i;
- for (i = 0; i < MAX_ASICS; ++i) {
- if (devpriv && devpriv->asics[i].irq)
- free_irq(devpriv->asics[i].irq, dev);
- }
- if (devpriv && devpriv->sprivs)
+ if (devpriv) {
+ for (i = 0; i < MAX_ASICS; ++i) {
+ if (devpriv->asics[i].irq)
+ free_irq(devpriv->asics[i].irq, dev);
+ }
kfree(devpriv->sprivs);
+ }
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 0c98e26..67e2bb1 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -1,80 +1,79 @@
/*
- comedi/drivers/pcmuio.c
- Driver for Winsystems PC-104 based 48-channel and 96-channel DIO boards.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ * pcmuio.c
+ * Comedi driver for Winsystems PC-104 based 48/96-channel DIO boards.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2006 Calin A. Culianu <calin@ajvar.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: pcmuio
-Description: A driver for the PCM-UIO48A and PCM-UIO96A boards from Winsystems.
-Devices: [Winsystems] PCM-UIO48A (pcmuio48), PCM-UIO96A (pcmuio96)
-Author: Calin Culianu <calin@ajvar.org>
-Updated: Fri, 13 Jan 2006 12:01:01 -0500
-Status: works
-
-A driver for the relatively straightforward-to-program PCM-UIO48A and
-PCM-UIO96A boards from Winsystems. These boards use either one or two
-(in the 96-DIO version) WS16C48 ASIC HighDensity I/O Chips (HDIO).
-This chip is interesting in that each I/O line is individually
-programmable for INPUT or OUTPUT (thus comedi_dio_config can be done
-on a per-channel basis). Also, each chip supports edge-triggered
-interrupts for the first 24 I/O lines. Of course, since the
-96-channel version of the board has two ASICs, it can detect polarity
-changes on up to 48 I/O lines. Since this is essentially an (non-PnP)
-ISA board, I/O Address and IRQ selection are done through jumpers on
-the board. You need to pass that information to this driver as the
-first and second comedi_config option, respectively. Note that the
-48-channel version uses 16 bytes of IO memory and the 96-channel
-version uses 32-bytes (in case you are worried about conflicts). The
-48-channel board is split into two 24-channel comedi subdevices.
-The 96-channel board is split into 4 24-channel DIO subdevices.
-
-Note that IRQ support has been added, but it is untested.
-
-To use edge-detection IRQ support, pass the IRQs of both ASICS
-(for the 96 channel version) or just 1 ASIC (for 48-channel version).
-Then, use use comedi_commands with TRIG_NOW.
-Your callback will be called each time an edge is triggered, and the data
-values will be two sample_t's, which should be concatenated to form one
-32-bit unsigned int. This value is the mask of channels that had
-edges detected from your channel list. Note that the bits positions
-in the mask correspond to positions in your chanlist when you specified
-the command and *not* channel id's!
-
-To set the polarity of the edge-detection interrupts pass a nonzero value for
-either CR_RANGE or CR_AREF for edge-up polarity, or a zero value for both
-CR_RANGE and CR_AREF if you want edge-down polarity.
-
-In the 48-channel version:
-
-On subdev 0, the first 24 channels channels are edge-detect channels.
-
-In the 96-channel board you have the collowing channels that can do edge detection:
-
-subdev 0, channels 0-24 (first 24 channels of 1st ASIC)
-subdev 2, channels 0-24 (first 24 channels of 2nd ASIC)
-
-Configuration Options:
- [0] - I/O port base address
- [1] - IRQ (for first ASIC, or first 24 channels)
- [2] - IRQ for second ASIC (pcmuio96 only - IRQ for chans 48-72 .. can be the same as first irq!)
-*/
+ * Driver: pcmuio
+ * Description: Winsystems PC-104 based 48/96-channel DIO boards.
+ * Devices: (Winsystems) PCM-UIO48A [pcmuio48]
+ * (Winsystems) PCM-UIO96A [pcmuio96]
+ * Author: Calin Culianu <calin@ajvar.org>
+ * Updated: Fri, 13 Jan 2006 12:01:01 -0500
+ * Status: works
+ *
+ * A driver for the relatively straightforward-to-program PCM-UIO48A and
+ * PCM-UIO96A boards from Winsystems. These boards use either one or two
+ * (in the 96-DIO version) WS16C48 ASIC HighDensity I/O Chips (HDIO). This
+ * chip is interesting in that each I/O line is individually programmable
+ * for INPUT or OUTPUT (thus comedi_dio_config can be done on a per-channel
+ * basis). Also, each chip supports edge-triggered interrupts for the first
+ * 24 I/O lines. Of course, since the 96-channel version of the board has
+ * two ASICs, it can detect polarity changes on up to 48 I/O lines. Since
+ * this is essentially an (non-PnP) ISA board, I/O Address and IRQ selection
+ * are done through jumpers on the board. You need to pass that information
+ * to this driver as the first and second comedi_config option, respectively.
+ * Note that the 48-channel version uses 16 bytes of IO memory and the 96-
+ * channel version uses 32-bytes (in case you are worried about conflicts).
+ * The 48-channel board is split into two 24-channel comedi subdevices. The
+ * 96-channel board is split into 4 24-channel DIO subdevices.
+ *
+ * Note that IRQ support has been added, but it is untested.
+ *
+ * To use edge-detection IRQ support, pass the IRQs of both ASICS (for the
+ * 96 channel version) or just 1 ASIC (for 48-channel version). Then, use
+ * comedi_commands with TRIG_NOW. Your callback will be called each time an
+ * edge is triggered, and the data values will be two sample_t's, which
+ * should be concatenated to form one 32-bit unsigned int. This value is
+ * the mask of channels that had edges detected from your channel list. Note
+ * that the bits positions in the mask correspond to positions in your
+ * chanlist when you specified the command and *not* channel id's!
+ *
+ * To set the polarity of the edge-detection interrupts pass a nonzero value
+ * for either CR_RANGE or CR_AREF for edge-up polarity, or a zero value for
+ * both CR_RANGE and CR_AREF if you want edge-down polarity.
+ *
+ * In the 48-channel version:
+ *
+ * On subdev 0, the first 24 channels channels are edge-detect channels.
+ *
+ * In the 96-channel board you have the following channels that can do edge
+ * detection:
+ *
+ * subdev 0, channels 0-24 (first 24 channels of 1st ASIC)
+ * subdev 2, channels 0-24 (first 24 channels of 2nd ASIC)
+ *
+ * Configuration Options:
+ * [0] - I/O port base address
+ * [1] - IRQ (for first ASIC, or first 24 channels)
+ * [2] - IRQ (for second ASIC, pcmuio96 only - IRQ for chans 48-72
+ * can be the same as first irq!)
+ */
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -82,94 +81,62 @@ Configuration Options:
#include "comedi_fc.h"
-#define CHANS_PER_PORT 8
-#define PORTS_PER_ASIC 6
-#define INTR_PORTS_PER_ASIC 3
-#define MAX_CHANS_PER_SUBDEV 24 /* number of channels per comedi subdevice */
-#define PORTS_PER_SUBDEV (MAX_CHANS_PER_SUBDEV/CHANS_PER_PORT)
-#define CHANS_PER_ASIC (CHANS_PER_PORT*PORTS_PER_ASIC)
-#define INTR_CHANS_PER_ASIC 24
-#define INTR_PORTS_PER_SUBDEV (INTR_CHANS_PER_ASIC/CHANS_PER_PORT)
-#define MAX_DIO_CHANS (PORTS_PER_ASIC*2*CHANS_PER_PORT)
-#define MAX_ASICS (MAX_DIO_CHANS/CHANS_PER_ASIC)
-#define CALC_N_SUBDEVS(nchans) ((nchans)/MAX_CHANS_PER_SUBDEV + (!!((nchans)%MAX_CHANS_PER_SUBDEV)) /*+ (nchans > INTR_CHANS_PER_ASIC ? 2 : 1)*/)
-/* IO Memory sizes */
-#define ASIC_IOSIZE (0x10)
-#define PCMUIO48_IOSIZE ASIC_IOSIZE
-#define PCMUIO96_IOSIZE (ASIC_IOSIZE*2)
-
-/* Some offsets - these are all in the 16byte IO memory offset from
- the base address. Note that there is a paging scheme to swap out
- offsets 0x8-0xA using the PAGELOCK register. See the table below.
-
- Register(s) Pages R/W? Description
- --------------------------------------------------------------
- REG_PORTx All R/W Read/Write/Configure IO
- REG_INT_PENDING All ReadOnly Quickly see which INT_IDx has int.
- REG_PAGELOCK All WriteOnly Select a page
- REG_POLx Pg. 1 only WriteOnly Select edge-detection polarity
- REG_ENABx Pg. 2 only WriteOnly Enable/Disable edge-detect. int.
- REG_INT_IDx Pg. 3 only R/W See which ports/bits have ints.
- */
-#define REG_PORT0 0x0
-#define REG_PORT1 0x1
-#define REG_PORT2 0x2
-#define REG_PORT3 0x3
-#define REG_PORT4 0x4
-#define REG_PORT5 0x5
-#define REG_INT_PENDING 0x6
-#define REG_PAGELOCK 0x7 /* page selector register, upper 2 bits select a page
- and bits 0-5 are used to 'lock down' a particular
- port above to make it readonly. */
-#define REG_POL0 0x8
-#define REG_POL1 0x9
-#define REG_POL2 0xA
-#define REG_ENAB0 0x8
-#define REG_ENAB1 0x9
-#define REG_ENAB2 0xA
-#define REG_INT_ID0 0x8
-#define REG_INT_ID1 0x9
-#define REG_INT_ID2 0xA
-
-#define NUM_PAGED_REGS 3
-#define NUM_PAGES 4
-#define FIRST_PAGED_REG 0x8
-#define REG_PAGE_BITOFFSET 6
-#define REG_LOCK_BITOFFSET 0
-#define REG_PAGE_MASK (~((0x1<<REG_PAGE_BITOFFSET)-1))
-#define REG_LOCK_MASK ~(REG_PAGE_MASK)
-#define PAGE_POL 1
-#define PAGE_ENAB 2
-#define PAGE_INT_ID 3
-
/*
- * Board descriptions for two imaginary boards. Describing the
- * boards in this way is optional, and completely driver-dependent.
- * Some drivers use arrays such as this, other do not.
+ * Register I/O map
+ *
+ * Offset Page 0 Page 1 Page 2 Page 3
+ * ------ ----------- ----------- ----------- -----------
+ * 0x00 Port 0 I/O Port 0 I/O Port 0 I/O Port 0 I/O
+ * 0x01 Port 1 I/O Port 1 I/O Port 1 I/O Port 1 I/O
+ * 0x02 Port 2 I/O Port 2 I/O Port 2 I/O Port 2 I/O
+ * 0x03 Port 3 I/O Port 3 I/O Port 3 I/O Port 3 I/O
+ * 0x04 Port 4 I/O Port 4 I/O Port 4 I/O Port 4 I/O
+ * 0x05 Port 5 I/O Port 5 I/O Port 5 I/O Port 5 I/O
+ * 0x06 INT_PENDING INT_PENDING INT_PENDING INT_PENDING
+ * 0x07 Page/Lock Page/Lock Page/Lock Page/Lock
+ * 0x08 N/A POL_0 ENAB_0 INT_ID0
+ * 0x09 N/A POL_1 ENAB_1 INT_ID1
+ * 0x0a N/A POL_2 ENAB_2 INT_ID2
*/
+#define PCMUIO_PORT_REG(x) (0x00 + (x))
+#define PCMUIO_INT_PENDING_REG 0x06
+#define PCMUIO_PAGE_LOCK_REG 0x07
+#define PCMUIO_LOCK_PORT(x) ((1 << (x)) & 0x3f)
+#define PCMUIO_PAGE(x) (((x) & 0x3) << 6)
+#define PCMUIO_PAGE_MASK PCMUIO_PAGE(3)
+#define PCMUIO_PAGE_POL 1
+#define PCMUIO_PAGE_ENAB 2
+#define PCMUIO_PAGE_INT_ID 3
+#define PCMUIO_PAGE_REG(x) (0x08 + (x))
+
+#define PCMUIO_ASIC_IOSIZE 0x10
+#define PCMUIO_MAX_ASICS 2
+
struct pcmuio_board {
const char *name;
const int num_asics;
- const int num_channels_per_port;
- const int num_ports;
};
-/* this structure is for data unique to this subdevice. */
-struct pcmuio_subdev_private {
- /* mapping of halfwords (bytes) in port/chanarray to iobase */
- unsigned long iobases[PORTS_PER_SUBDEV];
+static const struct pcmuio_board pcmuio_boards[] = {
+ {
+ .name = "pcmuio48",
+ .num_asics = 1,
+ }, {
+ .name = "pcmuio96",
+ .num_asics = 2,
+ },
+};
+struct pcmuio_subdev_private {
/* The below is only used for intr subdevices */
struct {
- int asic; /* if non-negative, this subdev has an interrupt asic */
- int first_chan; /* if nonnegative, the first channel id for
- interrupts. */
- int num_asic_chans; /* the number of asic channels in this subdev
- that have interrutps */
- int asic_chan; /* if nonnegative, the first channel id with
- respect to the asic that has interrupts */
- int enabled_mask; /* subdev-relative channel mask for channels
- we are interested in */
+ /* if non-negative, this subdev has an interrupt asic */
+ int asic;
+ /*
+ * subdev-relative channel mask for channels
+ * we are interested in
+ */
+ int enabled_mask;
int active;
int stop_count;
int continuous;
@@ -177,425 +144,260 @@ struct pcmuio_subdev_private {
} intr;
};
-/* this structure is for data unique to this hardware driver. If
- several hardware drivers keep similar information in this structure,
- feel free to suggest moving the variable to the struct comedi_device struct. */
struct pcmuio_private {
struct {
- unsigned char pagelock; /* current page and lock */
- unsigned char pol[NUM_PAGED_REGS]; /* shadow of POLx registers */
- unsigned char enab[NUM_PAGED_REGS]; /* shadow of ENABx registers */
- int num;
- unsigned long iobase;
unsigned int irq;
spinlock_t spinlock;
- } asics[MAX_ASICS];
+ } asics[PCMUIO_MAX_ASICS];
struct pcmuio_subdev_private *sprivs;
};
-#define subpriv ((struct pcmuio_subdev_private *)s->private)
+static void pcmuio_write(struct comedi_device *dev, unsigned int val,
+ int asic, int page, int port)
+{
+ unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
-/* DIO devices are slightly special. Although it is possible to
- * implement the insn_read/insn_write interface, it is much more
- * useful to applications if you implement the insn_bits interface.
- * This allows packed reading/writing of the DIO channels. The
- * comedi core can convert between insn_bits and insn_read/write */
+ if (page == 0) {
+ /* Port registers are valid for any page */
+ outb(val & 0xff, iobase + PCMUIO_PORT_REG(port + 0));
+ outb((val >> 8) & 0xff, iobase + PCMUIO_PORT_REG(port + 1));
+ outb((val >> 16) & 0xff, iobase + PCMUIO_PORT_REG(port + 2));
+ } else {
+ outb(PCMUIO_PAGE(page), iobase + PCMUIO_PAGE_LOCK_REG);
+ outb(val & 0xff, iobase + PCMUIO_PAGE_REG(0));
+ outb((val >> 8) & 0xff, iobase + PCMUIO_PAGE_REG(1));
+ outb((val >> 16) & 0xff, iobase + PCMUIO_PAGE_REG(2));
+ }
+}
+
+static unsigned int pcmuio_read(struct comedi_device *dev,
+ int asic, int page, int port)
+{
+ unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
+ unsigned int val;
+
+ if (page == 0) {
+ /* Port registers are valid for any page */
+ val = inb(iobase + PCMUIO_PORT_REG(port + 0));
+ val |= (inb(iobase + PCMUIO_PORT_REG(port + 1)) << 8);
+ val |= (inb(iobase + PCMUIO_PORT_REG(port + 2)) << 16);
+ } else {
+ outb(PCMUIO_PAGE(page), iobase + PCMUIO_PAGE_LOCK_REG);
+ val = inb(iobase + PCMUIO_PAGE_REG(0));
+ val |= (inb(iobase + PCMUIO_PAGE_REG(1)) << 8);
+ val |= (inb(iobase + PCMUIO_PAGE_REG(2)) << 16);
+ }
+
+ return val;
+}
+
+/*
+ * Each channel can be individually programmed for input or output.
+ * Writing a '0' to a channel causes the corresponding output pin
+ * to go to a high-z state (pulled high by an external 10K resistor).
+ * This allows it to be used as an input. When used in the input mode,
+ * a read reflects the inverted state of the I/O pin, such that a
+ * high on the pin will read as a '0' in the register. Writing a '1'
+ * to a bit position causes the pin to sink current (up to 12mA),
+ * effectively pulling it low.
+ */
static int pcmuio_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
- int byte_no;
-
- /* NOTE:
- reading a 0 means this channel was high
- writine a 0 sets the channel high
- reading a 1 means this channel was low
- writing a 1 means set this channel low
-
- Therefore everything is always inverted. */
-
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
-
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- dev_dbg(dev->class_dev, "write mask: %08x data: %08x\n", data[0],
- data[1]);
-#endif
-
- s->state = 0;
-
- for (byte_no = 0; byte_no < s->n_chan / CHANS_PER_PORT; ++byte_no) {
- /* address of 8-bit port */
- unsigned long ioaddr = subpriv->iobases[byte_no],
- /* bit offset of port in 32-bit doubleword */
- offset = byte_no * 8;
- /* this 8-bit port's data */
- unsigned char byte = 0,
- /* The write mask for this port (if any) */
- write_mask_byte = (data[0] >> offset) & 0xff,
- /* The data byte for this port */
- data_byte = (data[1] >> offset) & 0xff;
-
- byte = inb(ioaddr); /* read all 8-bits for this port */
-
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- printk
- ("byte %d wmb %02x db %02x offset %02d io %04x, data_in %02x ",
- byte_no, (unsigned)write_mask_byte, (unsigned)data_byte,
- offset, ioaddr, (unsigned)byte);
-#endif
-
- if (write_mask_byte) {
- /* this byte has some write_bits -- so set the output lines */
- byte &= ~write_mask_byte; /* clear bits for write mask */
- byte |= ~data_byte & write_mask_byte; /* set to inverted data_byte */
- /* Write out the new digital output state */
- outb(byte, ioaddr);
- }
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- dev_dbg(dev->class_dev, "data_out_byte %02x\n", (unsigned)byte);
-#endif
- /* save the digital input lines for this byte.. */
- s->state |= ((unsigned int)byte) << offset;
- }
+ unsigned int mask = data[0] & s->io_bits; /* outputs only */
+ unsigned int bits = data[1];
+ int asic = s->index / 2;
+ int port = (s->index % 2) ? 3 : 0;
+ unsigned int val;
+
+ /* get inverted state of the channels from the port */
+ val = pcmuio_read(dev, asic, 0, port);
- /* now return the DIO lines to data[1] - note they came inverted! */
- data[1] = ~s->state;
+ /* get the true state of the channels */
+ s->state = val ^ ((0x1 << s->n_chan) - 1);
-#ifdef DAMMIT_ITS_BROKEN
- /* DEBUG */
- dev_dbg(dev->class_dev, "s->state %08x data_out %08x\n", s->state,
- data[1]);
-#endif
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (mask & bits);
+
+ /* invert the state and update the channels */
+ val = s->state ^ ((0x1 << s->n_chan) - 1);
+ pcmuio_write(dev, val, asic, 0, port);
+ }
+
+ data[1] = s->state;
return insn->n;
}
-/* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
static int pcmuio_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec), byte_no = chan / 8, bit_no =
- chan % 8;
- unsigned long ioaddr;
- unsigned char byte;
-
- /* Compute ioaddr for this channel */
- ioaddr = subpriv->iobases[byte_no];
-
- /* NOTE:
- writing a 0 an IO channel's bit sets the channel to INPUT
- and pulls the line high as well
+ int asic = s->index / 2;
+ int port = (s->index % 2) ? 3 : 0;
+ int ret;
- writing a 1 to an IO channel's bit pulls the line low
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- All channels are implicitly always in OUTPUT mode -- but when
- they are high they can be considered to be in INPUT mode..
+ if (data[0] == INSN_CONFIG_DIO_INPUT)
+ pcmuio_write(dev, s->io_bits, asic, 0, port);
- Thus, we only force channels low if the config request was INPUT,
- otherwise we do nothing to the hardware. */
+ return insn->n;
+}
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- /* save to io_bits -- don't actually do anything since
- all input channels are also output channels... */
- s->io_bits |= 1 << chan;
- break;
- case INSN_CONFIG_DIO_INPUT:
- /* write a 0 to the actual register representing the channel
- to set it to 'input'. 0 means "float high". */
- byte = inb(ioaddr);
- byte &= ~(1 << bit_no);
- /**< set input channel to '0' */
-
- /* write out byte -- this is the only time we actually affect the
- hardware as all channels are implicitly output -- but input
- channels are set to float-high */
- outb(byte, ioaddr);
-
- /* save to io_bits */
- s->io_bits &= ~(1 << chan);
- break;
+static void pcmuio_reset(struct comedi_device *dev)
+{
+ const struct pcmuio_board *board = comedi_board(dev);
+ int asic;
- case INSN_CONFIG_DIO_QUERY:
- /* retrieve from shadow register */
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
+ for (asic = 0; asic < board->num_asics; ++asic) {
+ /* first, clear all the DIO port bits */
+ pcmuio_write(dev, 0, asic, 0, 0);
+ pcmuio_write(dev, 0, asic, 0, 3);
- default:
- return -EINVAL;
- break;
+ /* Next, clear all the paged registers for each page */
+ pcmuio_write(dev, 0, asic, PCMUIO_PAGE_POL, 0);
+ pcmuio_write(dev, 0, asic, PCMUIO_PAGE_ENAB, 0);
+ pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
}
-
- return insn->n;
}
-static void switch_page(struct comedi_device *dev, int asic, int page)
+static void pcmuio_stop_intr(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- const struct pcmuio_board *board = comedi_board(dev);
- struct pcmuio_private *devpriv = dev->private;
+ struct pcmuio_subdev_private *subpriv = s->private;
+ int asic;
- if (asic < 0 || asic >= board->num_asics)
- return; /* paranoia */
- if (page < 0 || page >= NUM_PAGES)
- return; /* more paranoia */
+ asic = subpriv->intr.asic;
+ if (asic < 0)
+ return; /* not an interrupt subdev */
- devpriv->asics[asic].pagelock &= ~REG_PAGE_MASK;
- devpriv->asics[asic].pagelock |= page << REG_PAGE_BITOFFSET;
+ subpriv->intr.enabled_mask = 0;
+ subpriv->intr.active = 0;
+ s->async->inttrig = NULL;
- /* now write out the shadow register */
- outb(devpriv->asics[asic].pagelock,
- dev->iobase + ASIC_IOSIZE * asic + REG_PAGELOCK);
+ /* disable all intrs for this subdev.. */
+ pcmuio_write(dev, 0, asic, PCMUIO_PAGE_ENAB, 0);
}
-static void init_asics(struct comedi_device *dev)
-{ /* sets up an
- ASIC chip to defaults */
- const struct pcmuio_board *board = comedi_board(dev);
- int asic;
-
- for (asic = 0; asic < board->num_asics; ++asic) {
- int port, page;
- unsigned long baseaddr = dev->iobase + asic * ASIC_IOSIZE;
+static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned triggered)
+{
+ struct pcmuio_subdev_private *subpriv = s->private;
+ unsigned int len = s->async->cmd.chanlist_len;
+ unsigned oldevents = s->async->events;
+ unsigned int val = 0;
+ unsigned long flags;
+ unsigned mytrig;
+ unsigned int i;
- switch_page(dev, asic, 0); /* switch back to page 0 */
+ spin_lock_irqsave(&subpriv->intr.spinlock, flags);
- /* first, clear all the DIO port bits */
- for (port = 0; port < PORTS_PER_ASIC; ++port)
- outb(0, baseaddr + REG_PORT0 + port);
+ if (!subpriv->intr.active)
+ goto done;
- /* Next, clear all the paged registers for each page */
- for (page = 1; page < NUM_PAGES; ++page) {
- int reg;
- /* now clear all the paged registers */
- switch_page(dev, asic, page);
- for (reg = FIRST_PAGED_REG;
- reg < FIRST_PAGED_REG + NUM_PAGED_REGS; ++reg)
- outb(0, baseaddr + reg);
- }
+ mytrig = triggered;
+ mytrig &= ((0x1 << s->n_chan) - 1);
- /* DEBUG set rising edge interrupts on port0 of both asics */
- /*switch_page(dev, asic, PAGE_POL);
- outb(0xff, baseaddr + REG_POL0);
- switch_page(dev, asic, PAGE_ENAB);
- outb(0xff, baseaddr + REG_ENAB0); */
- /* END DEBUG */
+ if (!(mytrig & subpriv->intr.enabled_mask))
+ goto done;
- switch_page(dev, asic, 0); /* switch back to default page 0 */
+ for (i = 0; i < len; i++) {
+ unsigned int chan = CR_CHAN(s->async->cmd.chanlist[i]);
+ if (mytrig & (1U << chan))
+ val |= (1U << i);
+ }
+ /* Write the scan to the buffer. */
+ if (comedi_buf_put(s->async, ((short *)&val)[0]) &&
+ comedi_buf_put(s->async, ((short *)&val)[1])) {
+ s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
+ } else {
+ /* Overflow! Stop acquisition!! */
+ /* TODO: STOP_ACQUISITION_CALL_HERE!! */
+ pcmuio_stop_intr(dev, s);
}
-}
-#ifdef notused
-static void lock_port(struct comedi_device *dev, int asic, int port)
-{
- const struct pcmuio_board *board = comedi_board(dev);
- struct pcmuio_private *devpriv = dev->private;
+ /* Check for end of acquisition. */
+ if (!subpriv->intr.continuous) {
+ /* stop_src == TRIG_COUNT */
+ if (subpriv->intr.stop_count > 0) {
+ subpriv->intr.stop_count--;
+ if (subpriv->intr.stop_count == 0) {
+ s->async->events |= COMEDI_CB_EOA;
+ /* TODO: STOP_ACQUISITION_CALL_HERE!! */
+ pcmuio_stop_intr(dev, s);
+ }
+ }
+ }
- if (asic < 0 || asic >= board->num_asics)
- return; /* paranoia */
- if (port < 0 || port >= PORTS_PER_ASIC)
- return; /* more paranoia */
+done:
+ spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
- devpriv->asics[asic].pagelock |= 0x1 << port;
- /* now write out the shadow register */
- outb(devpriv->asics[asic].pagelock,
- dev->iobase + ASIC_IOSIZE * asic + REG_PAGELOCK);
+ if (oldevents != s->async->events)
+ comedi_event(dev, s);
}
-static void unlock_port(struct comedi_device *dev, int asic, int port)
+static int pcmuio_handle_asic_interrupt(struct comedi_device *dev, int asic)
{
- const struct pcmuio_board *board = comedi_board(dev);
struct pcmuio_private *devpriv = dev->private;
+ struct pcmuio_subdev_private *subpriv;
+ unsigned long iobase = dev->iobase + (asic * PCMUIO_ASIC_IOSIZE);
+ unsigned int triggered = 0;
+ int got1 = 0;
+ unsigned long flags;
+ unsigned char int_pend;
+ int i;
- if (asic < 0 || asic >= board->num_asics)
- return; /* paranoia */
- if (port < 0 || port >= PORTS_PER_ASIC)
- return; /* more paranoia */
- devpriv->asics[asic].pagelock &= ~(0x1 << port) | REG_LOCK_MASK;
- /* now write out the shadow register */
- outb(devpriv->asics[asic].pagelock,
- dev->iobase + ASIC_IOSIZE * asic + REG_PAGELOCK);
-}
-#endif /* notused */
+ spin_lock_irqsave(&devpriv->asics[asic].spinlock, flags);
-static void pcmuio_stop_intr(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- int nports, firstport, asic, port;
- struct pcmuio_private *devpriv = dev->private;
+ int_pend = inb(iobase + PCMUIO_INT_PENDING_REG) & 0x07;
+ if (int_pend) {
+ triggered = pcmuio_read(dev, asic, PCMUIO_PAGE_INT_ID, 0);
+ pcmuio_write(dev, 0, asic, PCMUIO_PAGE_INT_ID, 0);
- asic = subpriv->intr.asic;
- if (asic < 0)
- return; /* not an interrupt subdev */
+ ++got1;
+ }
- subpriv->intr.enabled_mask = 0;
- subpriv->intr.active = 0;
- s->async->inttrig = NULL;
- nports = subpriv->intr.num_asic_chans / CHANS_PER_PORT;
- firstport = subpriv->intr.asic_chan / CHANS_PER_PORT;
- switch_page(dev, asic, PAGE_ENAB);
- for (port = firstport; port < firstport + nports; ++port) {
- /* disable all intrs for this subdev.. */
- outb(0, devpriv->asics[asic].iobase + REG_ENAB0 + port);
+ spin_unlock_irqrestore(&devpriv->asics[asic].spinlock, flags);
+
+ if (triggered) {
+ struct comedi_subdevice *s;
+ /* TODO here: dispatch io lines to subdevs with commands.. */
+ for (i = 0; i < dev->n_subdevices; i++) {
+ s = &dev->subdevices[i];
+ subpriv = s->private;
+ if (subpriv->intr.asic == asic) {
+ /*
+ * This is an interrupt subdev, and it
+ * matches this asic!
+ */
+ pcmuio_handle_intr_subdev(dev, s,
+ triggered);
+ }
+ }
}
+ return got1;
}
-static irqreturn_t interrupt_pcmuio(int irq, void *d)
+static irqreturn_t pcmuio_interrupt(int irq, void *d)
{
- int asic, got1 = 0;
- struct comedi_device *dev = (struct comedi_device *)d;
+ struct comedi_device *dev = d;
struct pcmuio_private *devpriv = dev->private;
- int i;
+ int got1 = 0;
+ int asic;
- for (asic = 0; asic < MAX_ASICS; ++asic) {
+ for (asic = 0; asic < PCMUIO_MAX_ASICS; ++asic) {
if (irq == devpriv->asics[asic].irq) {
- unsigned long flags;
- unsigned triggered = 0;
- unsigned long iobase = devpriv->asics[asic].iobase;
/* it is an interrupt for ASIC #asic */
- unsigned char int_pend;
-
- spin_lock_irqsave(&devpriv->asics[asic].spinlock,
- flags);
-
- int_pend = inb(iobase + REG_INT_PENDING) & 0x07;
-
- if (int_pend) {
- int port;
- for (port = 0; port < INTR_PORTS_PER_ASIC;
- ++port) {
- if (int_pend & (0x1 << port)) {
- unsigned char
- io_lines_with_edges = 0;
- switch_page(dev, asic,
- PAGE_INT_ID);
- io_lines_with_edges =
- inb(iobase +
- REG_INT_ID0 + port);
-
- if (io_lines_with_edges)
- /* clear pending interrupt */
- outb(0, iobase +
- REG_INT_ID0 +
- port);
-
- triggered |=
- io_lines_with_edges <<
- port * 8;
- }
- }
-
- ++got1;
- }
-
- spin_unlock_irqrestore(&devpriv->asics[asic].spinlock,
- flags);
-
- if (triggered) {
- struct comedi_subdevice *s;
- /* TODO here: dispatch io lines to subdevs with commands.. */
- printk
- ("PCMUIO DEBUG: got edge detect interrupt %d asic %d which_chans: %06x\n",
- irq, asic, triggered);
- for (i = 0; i < dev->n_subdevices; i++) {
- s = &dev->subdevices[i];
- if (subpriv->intr.asic == asic) { /* this is an interrupt subdev, and it matches this asic! */
- unsigned long flags;
- unsigned oldevents;
-
- spin_lock_irqsave(&subpriv->
- intr.spinlock,
- flags);
-
- oldevents = s->async->events;
-
- if (subpriv->intr.active) {
- unsigned mytrig =
- ((triggered >>
- subpriv->intr.asic_chan)
- &
- ((0x1 << subpriv->
- intr.
- num_asic_chans) -
- 1)) << subpriv->
- intr.first_chan;
- if (mytrig &
- subpriv->intr.enabled_mask)
- {
- unsigned int val
- = 0;
- unsigned int n,
- ch, len;
-
- len =
- s->
- async->cmd.chanlist_len;
- for (n = 0;
- n < len;
- n++) {
- ch = CR_CHAN(s->async->cmd.chanlist[n]);
- if (mytrig & (1U << ch)) {
- val |= (1U << n);
- }
- }
- /* Write the scan to the buffer. */
- if (comedi_buf_put(s->async, ((short *)&val)[0])
- &&
- comedi_buf_put
- (s->async,
- ((short *)
- &val)[1]))
- {
- s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
- } else {
- /* Overflow! Stop acquisition!! */
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmuio_stop_intr
- (dev,
- s);
- }
-
- /* Check for end of acquisition. */
- if (!subpriv->intr.continuous) {
- /* stop_src == TRIG_COUNT */
- if (subpriv->intr.stop_count > 0) {
- subpriv->intr.stop_count--;
- if (subpriv->intr.stop_count == 0) {
- s->async->events |= COMEDI_CB_EOA;
- /* TODO: STOP_ACQUISITION_CALL_HERE!! */
- pcmuio_stop_intr
- (dev,
- s);
- }
- }
- }
- }
- }
-
- spin_unlock_irqrestore
- (&subpriv->intr.spinlock,
- flags);
-
- if (oldevents !=
- s->async->events) {
- comedi_event(dev, s);
- }
-
- }
-
- }
- }
-
+ if (pcmuio_handle_asic_interrupt(dev, asic))
+ got1++;
}
}
if (!got1)
@@ -606,7 +408,7 @@ static irqreturn_t interrupt_pcmuio(int irq, void *d)
static int pcmuio_start_intr(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct pcmuio_private *devpriv = dev->private;
+ struct pcmuio_subdev_private *subpriv = s->private;
if (!subpriv->intr.continuous && subpriv->intr.stop_count == 0) {
/* An empty acquisition! */
@@ -615,7 +417,7 @@ static int pcmuio_start_intr(struct comedi_device *dev,
return 1;
} else {
unsigned bits = 0, pol_bits = 0, n;
- int nports, firstport, asic, port;
+ int asic;
struct comedi_cmd *cmd = &s->async->cmd;
asic = subpriv->intr.asic;
@@ -624,8 +426,6 @@ static int pcmuio_start_intr(struct comedi_device *dev,
subdev */
subpriv->intr.enabled_mask = 0;
subpriv->intr.active = 1;
- nports = subpriv->intr.num_asic_chans / CHANS_PER_PORT;
- firstport = subpriv->intr.asic_chan / CHANS_PER_PORT;
if (cmd->chanlist) {
for (n = 0; n < cmd->chanlist_len; n++) {
bits |= (1U << CR_CHAN(cmd->chanlist[n]));
@@ -635,31 +435,19 @@ static int pcmuio_start_intr(struct comedi_device *dev,
<< CR_CHAN(cmd->chanlist[n]);
}
}
- bits &= ((0x1 << subpriv->intr.num_asic_chans) -
- 1) << subpriv->intr.first_chan;
+ bits &= ((0x1 << s->n_chan) - 1);
subpriv->intr.enabled_mask = bits;
- switch_page(dev, asic, PAGE_ENAB);
- for (port = firstport; port < firstport + nports; ++port) {
- unsigned enab =
- bits >> (subpriv->intr.first_chan + (port -
- firstport) *
- 8) & 0xff, pol =
- pol_bits >> (subpriv->intr.first_chan +
- (port - firstport) * 8) & 0xff;
- /* set enab intrs for this subdev.. */
- outb(enab,
- devpriv->asics[asic].iobase + REG_ENAB0 + port);
- switch_page(dev, asic, PAGE_POL);
- outb(pol,
- devpriv->asics[asic].iobase + REG_ENAB0 + port);
- }
+ /* set pol and enab intrs for this subdev.. */
+ pcmuio_write(dev, pol_bits, asic, PCMUIO_PAGE_POL, 0);
+ pcmuio_write(dev, bits, asic, PCMUIO_PAGE_ENAB, 0);
}
return 0;
}
static int pcmuio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct pcmuio_subdev_private *subpriv = s->private;
unsigned long flags;
spin_lock_irqsave(&subpriv->intr.spinlock, flags);
@@ -677,6 +465,7 @@ static int
pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int trignum)
{
+ struct pcmuio_subdev_private *subpriv = s->private;
unsigned long flags;
int event = 0;
@@ -701,6 +490,7 @@ pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
*/
static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct pcmuio_subdev_private *subpriv = s->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned long flags;
int event = 0;
@@ -797,39 +587,30 @@ static int pcmuio_cmdtest(struct comedi_device *dev,
static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcmuio_board *board = comedi_board(dev);
- struct pcmuio_private *devpriv;
struct comedi_subdevice *s;
- int sdev_no, chans_left, n_subdevs, port, asic, thisasic_chanct = 0;
- unsigned int irq[MAX_ASICS];
+ struct pcmuio_private *devpriv;
+ struct pcmuio_subdev_private *subpriv;
+ int sdev_no, n_subdevs, asic;
+ unsigned int irq[PCMUIO_MAX_ASICS];
int ret;
irq[0] = it->options[1];
irq[1] = it->options[2];
ret = comedi_request_region(dev, it->options[0],
- board->num_asics * ASIC_IOSIZE);
+ board->num_asics * PCMUIO_ASIC_IOSIZE);
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
-
- for (asic = 0; asic < MAX_ASICS; ++asic) {
- devpriv->asics[asic].num = asic;
- devpriv->asics[asic].iobase = dev->iobase + asic * ASIC_IOSIZE;
- devpriv->asics[asic].irq = 0; /* this gets actually set at the end of
- this function when we
- request_irqs */
+
+ for (asic = 0; asic < PCMUIO_MAX_ASICS; ++asic)
spin_lock_init(&devpriv->asics[asic].spinlock);
- }
- chans_left = CHANS_PER_ASIC * board->num_asics;
- n_subdevs = CALC_N_SUBDEVS(chans_left);
- devpriv->sprivs = kcalloc(n_subdevs,
- sizeof(struct pcmuio_subdev_private),
- GFP_KERNEL);
+ n_subdevs = board->num_asics * 2;
+ devpriv->sprivs = kcalloc(n_subdevs, sizeof(*subpriv), GFP_KERNEL);
if (!devpriv->sprivs)
return -ENOMEM;
@@ -837,74 +618,40 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- port = 0;
- asic = 0;
for (sdev_no = 0; sdev_no < (int)dev->n_subdevices; ++sdev_no) {
- int byte_no;
-
s = &dev->subdevices[sdev_no];
- s->private = &devpriv->sprivs[sdev_no];
+ subpriv = &devpriv->sprivs[sdev_no];
+ s->private = subpriv;
s->maxdata = 1;
s->range_table = &range_digital;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->type = COMEDI_SUBD_DIO;
s->insn_bits = pcmuio_dio_insn_bits;
s->insn_config = pcmuio_dio_insn_config;
- s->n_chan = min(chans_left, MAX_CHANS_PER_SUBDEV);
- subpriv->intr.asic = -1;
- subpriv->intr.first_chan = -1;
- subpriv->intr.asic_chan = -1;
- subpriv->intr.num_asic_chans = -1;
- subpriv->intr.active = 0;
- s->len_chanlist = 1;
-
- /* save the ioport address for each 'port' of 8 channels in the
- subdevice */
- for (byte_no = 0; byte_no < PORTS_PER_SUBDEV; ++byte_no, ++port) {
- if (port >= PORTS_PER_ASIC) {
- port = 0;
- ++asic;
- thisasic_chanct = 0;
- }
- subpriv->iobases[byte_no] =
- devpriv->asics[asic].iobase + port;
-
- if (thisasic_chanct <
- CHANS_PER_PORT * INTR_PORTS_PER_ASIC
- && subpriv->intr.asic < 0) {
- /* this is an interrupt subdevice, so setup the struct */
- subpriv->intr.asic = asic;
- subpriv->intr.active = 0;
- subpriv->intr.stop_count = 0;
- subpriv->intr.first_chan = byte_no * 8;
- subpriv->intr.asic_chan = thisasic_chanct;
- subpriv->intr.num_asic_chans =
- s->n_chan - subpriv->intr.first_chan;
- dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->cancel = pcmuio_cancel;
- s->do_cmd = pcmuio_cmd;
- s->do_cmdtest = pcmuio_cmdtest;
- s->len_chanlist = subpriv->intr.num_asic_chans;
- }
- thisasic_chanct += CHANS_PER_PORT;
+ s->n_chan = 24;
+
+ /* subdevices 0 and 2 suppport interrupts */
+ if ((sdev_no % 2) == 0) {
+ /* setup the interrupt subdevice */
+ subpriv->intr.asic = sdev_no / 2;
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->cancel = pcmuio_cancel;
+ s->do_cmd = pcmuio_cmd;
+ s->do_cmdtest = pcmuio_cmdtest;
+ s->len_chanlist = s->n_chan;
+ } else {
+ subpriv->intr.asic = -1;
+ s->len_chanlist = 1;
}
spin_lock_init(&subpriv->intr.spinlock);
-
- chans_left -= s->n_chan;
-
- if (!chans_left) {
- asic = 0; /* reset the asic to our first asic, to do intr subdevs */
- port = 0;
- }
-
}
- init_asics(dev); /* clear out all the registers, basically */
+ pcmuio_reset(dev);
- for (asic = 0; irq[0] && asic < MAX_ASICS; ++asic) {
+ for (asic = 0; irq[0] && asic < PCMUIO_MAX_ASICS; ++asic) {
if (irq[asic]
- && request_irq(irq[asic], interrupt_pcmuio,
+ && request_irq(irq[asic], pcmuio_interrupt,
IRQF_SHARED, board->name, dev)) {
int i;
/* unroll the allocated irqs.. */
@@ -917,17 +664,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv->asics[asic].irq = irq[asic];
}
- if (irq[0]) {
- dev_dbg(dev->class_dev, "irq: %u\n", irq[0]);
- if (irq[1] && board->num_asics == 2)
- dev_dbg(dev->class_dev, "second ASIC irq: %u\n",
- irq[1]);
- } else {
- dev_dbg(dev->class_dev, "(IRQ mode disabled)\n");
- }
-
-
- return 1;
+ return 0;
}
static void pcmuio_detach(struct comedi_device *dev)
@@ -935,27 +672,16 @@ static void pcmuio_detach(struct comedi_device *dev)
struct pcmuio_private *devpriv = dev->private;
int i;
- for (i = 0; i < MAX_ASICS; ++i) {
- if (devpriv->asics[i].irq)
- free_irq(devpriv->asics[i].irq, dev);
- }
- if (devpriv && devpriv->sprivs)
+ if (devpriv) {
+ for (i = 0; i < PCMUIO_MAX_ASICS; ++i) {
+ if (devpriv->asics[i].irq)
+ free_irq(devpriv->asics[i].irq, dev);
+ }
kfree(devpriv->sprivs);
+ }
comedi_legacy_detach(dev);
}
-static const struct pcmuio_board pcmuio_boards[] = {
- {
- .name = "pcmuio48",
- .num_asics = 1,
- .num_ports = 6,
- }, {
- .name = "pcmuio96",
- .num_asics = 2,
- .num_ports = 12,
- },
-};
-
static struct comedi_driver pcmuio_driver = {
.driver_name = "pcmuio",
.module = THIS_MODULE,
diff --git a/drivers/staging/comedi/drivers/plx9052.h b/drivers/staging/comedi/drivers/plx9052.h
index ff76fbb..fbcf250 100644
--- a/drivers/staging/comedi/drivers/plx9052.h
+++ b/drivers/staging/comedi/drivers/plx9052.h
@@ -16,11 +16,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#ifndef _PLX9052_H_
diff --git a/drivers/staging/comedi/drivers/poc.c b/drivers/staging/comedi/drivers/poc.c
index b55a16b..2ae4ee1 100644
--- a/drivers/staging/comedi/drivers/poc.c
+++ b/drivers/staging/comedi/drivers/poc.c
@@ -13,34 +13,26 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: poc
Description: Generic driver for very simple devices
Author: ds
-Devices: [Keithley Metrabyte] DAC-02 (dac02), [Advantech] PCL-733 (pcl733),
- PCL-734 (pcl734)
+Devices: [Keithley Metrabyte] DAC-02 (dac02)
Updated: Sat, 16 Mar 2002 17:34:48 -0800
Status: unknown
This driver is indended to support very simple ISA-based devices,
including:
dac02 - Keithley DAC-02 analog output board
- pcl733 - Advantech PCL-733
- pcl734 - Advantech PCL-734
Configuration options:
[0] - I/O port base
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
struct boarddef_struct {
const char *name;
unsigned int iosize;
@@ -101,39 +93,6 @@ static int dac02_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
return 1;
}
-static int pcl733_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- data[1] = inb(dev->iobase + 0);
- data[1] |= (inb(dev->iobase + 1) << 8);
- data[1] |= (inb(dev->iobase + 2) << 16);
- data[1] |= (inb(dev->iobase + 3) << 24);
-
- return insn->n;
-}
-
-static int pcl734_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- if (data[0]) {
- s->state &= ~data[0];
- s->state |= (data[0] & data[1]);
- if ((data[0] >> 0) & 0xff)
- outb((s->state >> 0) & 0xff, dev->iobase + 0);
- if ((data[0] >> 8) & 0xff)
- outb((s->state >> 8) & 0xff, dev->iobase + 1);
- if ((data[0] >> 16) & 0xff)
- outb((s->state >> 16) & 0xff, dev->iobase + 2);
- if ((data[0] >> 24) & 0xff)
- outb((s->state >> 24) & 0xff, dev->iobase + 3);
- }
- data[1] = s->state;
-
- return insn->n;
-}
-
static int poc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct boarddef_struct *board = comedi_board(dev);
@@ -149,10 +108,9 @@ static int poc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* analog output subdevice */
s = &dev->subdevices[0];
@@ -180,22 +138,6 @@ static const struct boarddef_struct boards[] = {
.winsn = dac02_ao_winsn,
.rinsn = readback_insn,
.range = &range_unknown,
- }, {
- .name = "pcl733",
- .iosize = 4,
- .type = COMEDI_SUBD_DI,
- .n_chan = 32,
- .n_bits = 1,
- .insnbits = pcl733_insn_bits,
- .range = &range_digital,
- }, {
- .name = "pcl734",
- .iosize = 4,
- .type = COMEDI_SUBD_DO,
- .n_chan = 32,
- .n_bits = 1,
- .insnbits = pcl734_insn_bits,
- .range = &range_digital,
},
};
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index e092ce8..9775d36 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -47,6 +47,7 @@ Status: works
Devices: [Quatech] DAQP-208 (daqp), DAQP-308
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/semaphore.h>
@@ -715,10 +716,9 @@ static int daqp_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
link->config_flags |= CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
ret = comedi_pcmcia_enable(dev, NULL);
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 30a1728..93c980c 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -99,6 +95,7 @@
* Digital-IO and Analog-Out only support instruction mode.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1241,23 +1238,11 @@ static int rtd_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask = 1 << chan;
+ int ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
/* TODO support digital match interrupts and strobes */
@@ -1342,10 +1327,9 @@ static int rtd_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index f4163fd..cbb4ba5 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -53,11 +49,11 @@
* [8] - DAC 1 encoding (same as DAC 0)
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
/*
* Register map
*/
@@ -302,10 +298,9 @@ static int rti800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
inb(dev->iobase + RTI800_ADCHI);
outb(0, dev->iobase + RTI800_CLRFLAGS);
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->adc_2comp = (it->options[4] == 0);
devpriv->dac_2comp[0] = (it->options[6] == 0);
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
index 46dbbe6..a3fa2a4 100644
--- a/drivers/staging/comedi/drivers/rti802.c
+++ b/drivers/staging/comedi/drivers/rti802.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: rti802
@@ -37,10 +32,9 @@ Configuration Options:
[17] - dac#7 ...
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#define RTI802_SIZE 4
#define RTI802_SELECT 0
@@ -98,10 +92,9 @@ static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index d240ce8..d629463 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: s526
@@ -41,8 +36,8 @@ comedi_config /dev/comedi0 s526 0x2C0,0x3
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <asm/byteorder.h>
#define S526_SIZE 64
@@ -520,32 +515,35 @@ static int s526_dio_insn_bits(struct comedi_device *dev,
static int s526_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- int group, mask;
+ unsigned int mask;
+ int ret;
+
+ if (chan < 4)
+ mask = 0x0f;
+ else
+ mask = 0xf0;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ /* bit 10/11 set the group 1/2's mode */
+ if (s->io_bits & 0x0f)
+ s->state |= (1 << 10);
+ else
+ s->state &= ~(1 << 10);
+ if (s->io_bits & 0xf0)
+ s->state |= (1 << 11);
+ else
+ s->state &= ~(1 << 11);
- group = chan >> 2;
- mask = 0xF << (group << 2);
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- /* bit 10/11 set the group 1/2's mode */
- s->state |= 1 << (group + 10);
- s->io_bits |= mask;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->state &= ~(1 << (group + 10)); /* 1 is output, 0 is input. */
- s->io_bits &= ~mask;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- default:
- return -EINVAL;
- }
outw(s->state, dev->iobase + REG_DIO);
- return 1;
+ return insn->n;
}
static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -558,10 +556,9 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 0cf4b3d..d22b95d 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -17,11 +17,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
@@ -64,6 +59,8 @@ INSN_CONFIG instructions:
comedi_do_insn(cf,&insn); //executing configuration
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -1665,24 +1662,12 @@ static int s626_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
unsigned long group = (unsigned long)s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask = 1 << chan;
+ int ret;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- case COMEDI_INPUT:
- s->io_bits &= ~mask;
- break;
- case COMEDI_OUTPUT:
- s->io_bits |= mask;
- break;
- default:
- return -EINVAL;
- break;
- }
DEBIwrite(dev, LP_WRDOUT(group), s->io_bits);
return insn->n;
@@ -2590,10 +2575,9 @@ static int s626_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
index 99cd57b..a85e6bd 100644
--- a/drivers/staging/comedi/drivers/s626.h
+++ b/drivers/staging/comedi/drivers/s626.h
@@ -17,11 +17,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
@@ -70,8 +65,6 @@
#define FALSE (0)
#endif
-#include <linux/slab.h>
-
#define S626_SIZE 0x0200
#define DMABUF_SIZE 4096 /* 4k pages */
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index 8900086..441813f 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
@@ -31,10 +26,10 @@ Status: in development
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -724,10 +719,9 @@ static int serial2002_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->port = it->options[0];
devpriv->speed = it->options[1];
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index dbc8c54d..9e96495 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: skel
@@ -72,6 +67,7 @@ Configuration Options:
* options that are used with comedi_config.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -366,31 +362,27 @@ static int skel_dio_insn_bits(struct comedi_device *dev,
static int skel_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec);
+ int ret;
- /* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << chan;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << chan);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
- /* outw(s->io_bits,dev->iobase + SKEL_DIO_CONFIG); */
+ /*
+ * The input or output configuration of each digital line is
+ * configured by special insn_config instructions.
+ *
+ * chanspec contains the channel to be changed
+ * data[0] contains the instruction to perform on the channel
+ *
+ * Normally the core provided comedi_dio_insn_config() function
+ * can be used to handle the boilerplpate.
+ */
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
+
+ /* Update the hardware to the new configuration */
+ /* outw(s->io_bits, dev->iobase + SKEL_DIO_CONFIG); */
return insn->n;
}
@@ -489,10 +481,9 @@ static int skel_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* dev->board_name = thisboard->name; */
/* Allocate the private data */
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/*
* Supported boards are usually either auto-attached via the
@@ -563,10 +554,9 @@ static int skel_auto_attach(struct comedi_device *dev,
dev->board_name = thisboard->name;
/* Allocate the private data */
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* Enable the PCI device. */
ret = comedi_pci_enable(dev);
diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
index a76df09..11758a5 100644
--- a/drivers/staging/comedi/drivers/ssv_dnp.c
+++ b/drivers/staging/comedi/drivers/ssv_dnp.c
@@ -15,11 +15,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: ssv_dnp
@@ -31,6 +26,7 @@ Status: unknown
/* include files ----------------------------------------------------------- */
+#include <linux/module.h>
#include "../comedidev.h"
/* Some global definitions: the registers of the DNP ----------------------- */
@@ -97,68 +93,48 @@ static int dnp_dio_insn_bits(struct comedi_device *dev,
}
-/* ------------------------------------------------------------------------- */
-/* Configure the direction of the bidirectional digital i/o pins. chanspec */
-/* contains the channel to be changed and data[0] contains either */
-/* COMEDI_INPUT or COMEDI_OUTPUT. */
-/* ------------------------------------------------------------------------- */
-
static int dnp_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ unsigned int val;
+ int ret;
- u8 register_buffer;
-
- /* reduces chanspec to lower 16 bits */
- int chan = CR_CHAN(insn->chanspec);
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- case INSN_CONFIG_DIO_INPUT:
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (inb(CSCDR) & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
- /* Test: which port does the channel belong to? */
-
- /* We have to pay attention with port C: this is the meaning of PCMR: */
- /* Bit in PCMR: 7 6 5 4 3 2 1 0 */
- /* Corresponding port C pin: d 3 d 2 d 1 d 0 d= don't touch */
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- if ((chan >= 0) && (chan <= 7)) {
- /* this is port A */
+ if (chan < 8) { /* Port A */
+ mask = 1 << chan;
outb(PAMR, CSCIR);
- } else if ((chan >= 8) && (chan <= 15)) {
- /* this is port B */
- chan -= 8;
+ } else if (chan < 16) { /* Port B */
+ mask = 1 << (chan - 8);
outb(PBMR, CSCIR);
- } else if ((chan >= 16) && (chan <= 19)) {
- /* this is port C; multiplication with 2 brings bits into */
- /* correct position for PCMR! */
- chan -= 16;
- chan *= 2;
+ } else { /* Port C */
+ /*
+ * We have to pay attention with port C.
+ * This is the meaning of PCMR:
+ * Bit in PCMR: 7 6 5 4 3 2 1 0
+ * Corresponding port C pin: d 3 d 2 d 1 d 0 d= don't touch
+ *
+ * Multiplication by 2 brings bits into correct position
+ * for PCMR!
+ */
+ mask = 1 << ((chan - 16) * 2);
outb(PCMR, CSCIR);
- } else {
- return -EINVAL;
}
- /* read 'old' direction of the port and set bits (out=1, in=0) */
- register_buffer = inb(CSCDR);
+ val = inb(CSCDR);
if (data[0] == COMEDI_OUTPUT)
- register_buffer |= (1 << chan);
+ val |= mask;
else
- register_buffer &= ~(1 << chan);
+ val &= ~mask;
+ outb(val, CSCDR);
- outb(register_buffer, CSCDR);
-
- return 1;
+ return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index 0c243477..93eec2f 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -18,10 +18,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
- * You should have received a copy of the GNU General Public License *
- * along with this program; if not, write to the Free Software *
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
- * *
***************************************************************************/
/*
@@ -44,9 +40,9 @@ Devices: [Fastwel] UNIOxx-5 (unioxx5),
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/delay.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-#include <linux/slab.h>
#define DRIVER_NAME "unioxx5"
#define UNIOXX5_SIZE 0x10
@@ -375,15 +371,13 @@ static int __unioxx5_subdev_init(struct comedi_device *dev,
int i, to, ndef_flag = 0;
int ret;
- usp = kzalloc(sizeof(*usp), GFP_KERNEL);
- if (usp == NULL)
+ usp = comedi_alloc_spriv(s, sizeof(*usp));
+ if (!usp)
return -ENOMEM;
ret = __comedi_request_region(dev, iobase, UNIOXX5_SIZE);
- if (ret) {
- kfree(usp);
+ if (ret)
return ret;
- }
usp->usp_iobase = iobase;
/* defining modules types */
@@ -417,7 +411,6 @@ static int __unioxx5_subdev_init(struct comedi_device *dev,
/* initial subdevice for digital or analog i/o */
s->type = COMEDI_SUBD_DIO;
- s->private = usp;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = UNIOXX5_NUM_OF_CHANS;
s->maxdata = 0xFFF;
@@ -478,15 +471,15 @@ static int unioxx5_attach(struct comedi_device *dev,
static void unioxx5_detach(struct comedi_device *dev)
{
+ struct comedi_subdevice *s;
+ struct unioxx5_subd_priv *spriv;
int i;
- struct comedi_subdevice *subdev;
- struct unioxx5_subd_priv *usp;
for (i = 0; i < dev->n_subdevices; i++) {
- subdev = &dev->subdevices[i];
- usp = subdev->private;
- release_region(usp->usp_iobase, UNIOXX5_SIZE);
- kfree(subdev->private);
+ s = &dev->subdevices[i];
+ spriv = s->private;
+ if (spriv && spriv->usp_iobase)
+ release_region(spriv->usp_iobase, UNIOXX5_SIZE);
}
}
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 6f5da67..701ad1a 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -11,11 +11,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: usbdux
@@ -83,9 +78,6 @@ sampling rate. If you sample two channels you get 4kHz and so on.
*
*/
-/* generates loads of debug info */
-/* #define NOISY_DUX_DEBUGBUG */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -94,48 +86,34 @@ sampling rate. If you sample two channels you get 4kHz and so on.
#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
-#include <linux/firmware.h>
#include "../comedidev.h"
#include "comedi_fc.h"
-/* timeout for the USB-transfer in ms*/
-#define BULK_TIMEOUT 1000
-
-/* constants for "firmware" upload and download */
-#define FIRMWARE "usbdux_firmware.bin"
-#define USBDUXSUB_FIRMWARE 0xA0
-#define VENDOR_DIR_IN 0xC0
-#define VENDOR_DIR_OUT 0x40
-
-/* internal addresses of the 8051 processor */
-#define USBDUXSUB_CPUCS 0xE600
-
-/*
- * the minor device number, major is 180 only for debugging purposes and to
- * upload special firmware (programming the eeprom etc) which is not compatible
- * with the comedi framwork
- */
-#define USBDUXSUB_MINOR 32
-
-/* max lenghth of the transfer-buffer for software upload */
-#define TB_LEN 0x2000
-
-/* Input endpoint number: ISO/IRQ */
-#define ISOINEP 6
-
-/* Output endpoint number: ISO/IRQ */
-#define ISOOUTEP 2
-
-/* This EP sends DUX commands to USBDUX */
-#define COMMAND_OUT_EP 1
-
-/* This EP receives the DUX commands from USBDUX */
-#define COMMAND_IN_EP 8
-
-/* Output endpoint for PWM */
-#define PWM_EP 4
+/* constants for firmware upload and download */
+#define USBDUX_FIRMWARE "usbdux_firmware.bin"
+#define USBDUX_FIRMWARE_MAX_LEN 0x2000
+#define USBDUX_FIRMWARE_CMD 0xa0
+#define VENDOR_DIR_IN 0xc0
+#define VENDOR_DIR_OUT 0x40
+#define USBDUX_CPU_CS 0xe600
+
+/* usbdux bulk transfer commands */
+#define USBDUX_CMD_MULT_AI 0
+#define USBDUX_CMD_AO 1
+#define USBDUX_CMD_DIO_CFG 2
+#define USBDUX_CMD_DIO_BITS 3
+#define USBDUX_CMD_SINGLE_AI 4
+#define USBDUX_CMD_TIMER_RD 5
+#define USBDUX_CMD_TIMER_WR 6
+#define USBDUX_CMD_PWM_ON 7
+#define USBDUX_CMD_PWM_OFF 8
+
+#define USBDUX_NUM_AO_CHAN 4
+
+/* timeout for the USB-transfer in ms */
+#define BULK_TIMEOUT 1000
/* 300Hz max frequ under PWM */
#define MIN_PWM_PERIOD ((long)(1E9/300))
@@ -143,9 +121,6 @@ sampling rate. If you sample two channels you get 4kHz and so on.
/* Default PWM frequency */
#define PWM_DEFAULT_PERIOD ((long)(1E9/100))
-/* Number of channels */
-#define NUMCHANNELS 8
-
/* Size of one A/D value */
#define SIZEADIN ((sizeof(int16_t)))
@@ -158,9 +133,6 @@ sampling rate. If you sample two channels you get 4kHz and so on.
/* 16 bytes. */
#define SIZEINSNBUF 16
-/* Number of DA channels */
-#define NUMOUTCHANNELS 8
-
/* size of one value for the D/A converter: channel and value */
#define SIZEDAOUT ((sizeof(int8_t)+sizeof(int16_t)))
@@ -191,101 +163,56 @@ sampling rate. If you sample two channels you get 4kHz and so on.
/* must have more buffers due to buggy USB ctr */
#define NUMOFOUTBUFFERSHIGH 10
-/* Total number of usbdux devices */
-#define NUMUSBDUX 16
-
-/* Analogue in subdevice */
-#define SUBDEV_AD 0
-
-/* Analogue out subdevice */
-#define SUBDEV_DA 1
-
-/* Digital I/O */
-#define SUBDEV_DIO 2
-
-/* counter */
-#define SUBDEV_COUNTER 3
-
-/* timer aka pwm output */
-#define SUBDEV_PWM 4
-
/* number of retries to get the right dux command */
#define RETRIES 10
-/**************************************************/
-/* comedi constants */
-static const struct comedi_lrange range_usbdux_ai_range = { 4, {
- BIP_RANGE
- (4.096),
- BIP_RANGE(4.096
- / 2),
- UNI_RANGE
- (4.096),
- UNI_RANGE(4.096
- / 2)
- }
+static const struct comedi_lrange range_usbdux_ai_range = {
+ 4, {
+ BIP_RANGE(4.096),
+ BIP_RANGE(4.096 / 2),
+ UNI_RANGE(4.096),
+ UNI_RANGE(4.096 / 2)
+ }
};
-static const struct comedi_lrange range_usbdux_ao_range = { 2, {
- BIP_RANGE
- (4.096),
- UNI_RANGE
- (4.096),
- }
+static const struct comedi_lrange range_usbdux_ao_range = {
+ 2, {
+ BIP_RANGE(4.096),
+ UNI_RANGE(4.096)
+ }
};
-/*
- * private structure of one subdevice
- */
-
-/*
- * This is the structure which holds all the data of
- * this driver one sub device just now: A/D
- */
-struct usbduxsub {
- /* attached? */
- int attached;
- /* is it associated with a subdevice? */
- int probed;
- /* pointer to the usb-device */
- struct usb_device *usbdev;
+struct usbdux_private {
/* actual number of in-buffers */
- int num_in_buffers;
+ int n_ai_urbs;
/* actual number of out-buffers */
- int num_out_buffers;
+ int n_ao_urbs;
/* ISO-transfer handling: buffers */
- struct urb **urb_in;
- struct urb **urb_out;
+ struct urb **ai_urbs;
+ struct urb **ao_urbs;
/* pwm-transfer handling */
- struct urb *urb_pwm;
+ struct urb *pwm_urb;
/* PWM period */
unsigned int pwm_period;
/* PWM internal delay for the GPIF in the FX2 */
- int8_t pwn_delay;
+ int8_t pwm_delay;
/* size of the PWM buffer which holds the bit pattern */
- int size_pwm_buf;
+ int pwm_buf_sz;
/* input buffer for the ISO-transfer */
- int16_t *in_buffer;
+ int16_t *in_buf;
/* input buffer for single insn */
- int16_t *insn_buffer;
- /* output buffer for single DA outputs */
- int16_t *out_buffer;
- /* interface number */
- int ifnum;
- /* interface structure in 2.6 */
- struct usb_interface *interface;
- /* comedi device for the interrupt context */
- struct comedi_device *comedidev;
- /* is it USB_SPEED_HIGH or not? */
- short int high_speed;
- /* asynchronous command is running */
- short int ai_cmd_running;
- short int ao_cmd_running;
- /* pwm is running */
- short int pwm_cmd_running;
- /* continous acquisition */
- short int ai_continous;
- short int ao_continous;
+ int16_t *insn_buf;
+
+ int8_t ao_chanlist[USBDUX_NUM_AO_CHAN];
+ unsigned int ao_readback[USBDUX_NUM_AO_CHAN];
+
+ unsigned int high_speed:1;
+ unsigned int ai_cmd_running:1;
+ unsigned int ai_continous:1;
+ unsigned int ao_cmd_running:1;
+ unsigned int ao_continous:1;
+ unsigned int pwm_cmd_running:1;
+
/* number of samples to acquire */
int ai_sample_count;
int ao_sample_count;
@@ -297,132 +224,62 @@ struct usbduxsub {
unsigned int ao_counter;
/* interval in frames/uframes */
unsigned int ai_interval;
- /* D/A commands */
- int8_t *dac_commands;
/* commands */
int8_t *dux_commands;
struct semaphore sem;
};
-/*
- * The pointer to the private usb-data of the driver is also the private data
- * for the comedi-device. This has to be global as the usb subsystem needs
- * global variables. The other reason is that this structure must be there
- * _before_ any comedi command is issued. The usb subsystem must be initialised
- * before comedi can access it.
- */
-static struct usbduxsub usbduxsub[NUMUSBDUX];
-
-static DEFINE_SEMAPHORE(start_stop_sem);
-
-/*
- * Stops the data acquision
- * It should be safe to call this function from any context
- */
-static int usbduxsub_unlink_inurbs(struct usbduxsub *usbduxsub_tmp)
+static void usbdux_unlink_urbs(struct urb **urbs, int num_urbs)
{
- int i = 0;
- int err = 0;
+ int i;
- if (usbduxsub_tmp && usbduxsub_tmp->urb_in) {
- for (i = 0; i < usbduxsub_tmp->num_in_buffers; i++) {
- if (usbduxsub_tmp->urb_in[i]) {
- /* We wait here until all transfers have been
- * cancelled. */
- usb_kill_urb(usbduxsub_tmp->urb_in[i]);
- }
- dev_dbg(&usbduxsub_tmp->interface->dev,
- "comedi: usbdux: unlinked InURB %d, err=%d\n",
- i, err);
- }
- }
- return err;
+ for (i = 0; i < num_urbs; i++)
+ usb_kill_urb(urbs[i]);
}
-/*
- * This will stop a running acquisition operation
- * Is called from within this driver from both the
- * interrupt context and from comedi
- */
-static int usbdux_ai_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+static void usbdux_ai_stop(struct comedi_device *dev, int do_unlink)
{
- int ret = 0;
-
- if (!this_usbduxsub) {
- pr_err("comedi?: usbdux_ai_stop: this_usbduxsub=NULL!\n");
- return -EFAULT;
- }
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_stop\n");
-
- if (do_unlink) {
- /* stop aquistion */
- ret = usbduxsub_unlink_inurbs(this_usbduxsub);
- }
+ struct usbdux_private *devpriv = dev->private;
- this_usbduxsub->ai_cmd_running = 0;
+ if (do_unlink && devpriv->ai_urbs)
+ usbdux_unlink_urbs(devpriv->ai_urbs, devpriv->n_ai_urbs);
- return ret;
+ devpriv->ai_cmd_running = 0;
}
-/*
- * This will cancel a running acquisition operation.
- * This is called by comedi but never from inside the driver.
- */
static int usbdux_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct usbduxsub *this_usbduxsub;
- int res = 0;
-
- /* force unlink of all urbs */
- this_usbduxsub = dev->private;
- if (!this_usbduxsub)
- return -EFAULT;
-
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_cancel\n");
+ struct usbdux_private *devpriv = dev->private;
/* prevent other CPUs from submitting new commands just now */
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ down(&devpriv->sem);
/* unlink only if the urb really has been submitted */
- res = usbdux_ai_stop(this_usbduxsub, this_usbduxsub->ai_cmd_running);
- up(&this_usbduxsub->sem);
- return res;
+ usbdux_ai_stop(dev, devpriv->ai_cmd_running);
+ up(&devpriv->sem);
+
+ return 0;
}
/* analogue IN - interrupt service routine */
static void usbduxsub_ai_isoc_irq(struct urb *urb)
{
+ struct comedi_device *dev = urb->context;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct usbdux_private *devpriv = dev->private;
int i, err, n;
- struct usbduxsub *this_usbduxsub;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
-
- /* the context variable points to the subdevice */
- this_comedidev = urb->context;
- /* the private structure of the subdevice is struct usbduxsub */
- this_usbduxsub = this_comedidev->private;
- /* subdevice which is the AD converter */
- s = &this_comedidev->subdevices[SUBDEV_AD];
/* first we test if something unusual has just happened */
switch (urb->status) {
case 0:
/* copy the result in the transfer buffer */
- memcpy(this_usbduxsub->in_buffer,
- urb->transfer_buffer, SIZEINBUF);
+ memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF);
break;
case -EILSEQ:
/* error in the ISOchronous data */
/* we don't copy the data into the transfer buffer */
/* and recycle the last data byte */
- dev_dbg(&urb->dev->dev,
- "comedi%d: usbdux: CRC error in ISO IN stream.\n",
- this_usbduxsub->comedidev->minor);
-
+ dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n");
break;
case -ECONNRESET:
@@ -430,29 +287,27 @@ static void usbduxsub_ai_isoc_irq(struct urb *urb)
case -ESHUTDOWN:
case -ECONNABORTED:
/* happens after an unlink command */
- if (this_usbduxsub->ai_cmd_running) {
- /* we are still running a command */
- /* tell this comedi */
+ if (devpriv->ai_cmd_running) {
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* stop the transfer w/o unlink */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
}
return;
default:
/* a real error on the bus */
/* pass error to comedi if we are really running a command */
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&urb->dev->dev,
- "Non-zero urb status received in ai intr "
- "context: %d\n", urb->status);
+ if (devpriv->ai_cmd_running) {
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in ai intr context: %d\n",
+ urb->status);
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* don't do an unlink here */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
}
return;
}
@@ -461,7 +316,7 @@ static void usbduxsub_ai_isoc_irq(struct urb *urb)
* at this point we are reasonably sure that nothing dodgy has happened
* are we running a command?
*/
- if (unlikely((!(this_usbduxsub->ai_cmd_running)))) {
+ if (unlikely(!devpriv->ai_cmd_running)) {
/*
* not running a command, do not continue execution if no
* asynchronous command is running in particular not resubmit
@@ -469,144 +324,101 @@ static void usbduxsub_ai_isoc_irq(struct urb *urb)
return;
}
- urb->dev = this_usbduxsub->usbdev;
+ urb->dev = comedi_to_usb_dev(dev);
/* resubmit the urb */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(err < 0)) {
- dev_err(&urb->dev->dev,
- "comedi_: urb resubmit failed in int-context! err=%d\n",
- err);
+ dev_err(dev->class_dev,
+ "urb resubmit failed in int-context! err=%d\n", err);
if (err == -EL2NSYNC)
- dev_err(&urb->dev->dev,
- "buggy USB host controller or bug in IRQ "
- "handler!\n");
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler!\n");
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* don't do an unlink here */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
return;
}
- this_usbduxsub->ai_counter--;
- if (likely(this_usbduxsub->ai_counter > 0))
+ devpriv->ai_counter--;
+ if (likely(devpriv->ai_counter > 0))
return;
/* timer zero, transfer measurements to comedi */
- this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
+ devpriv->ai_counter = devpriv->ai_timer;
/* test, if we transmit only a fixed number of samples */
- if (!(this_usbduxsub->ai_continous)) {
+ if (!devpriv->ai_continous) {
/* not continuous, fixed number of samples */
- this_usbduxsub->ai_sample_count--;
+ devpriv->ai_sample_count--;
/* all samples received? */
- if (this_usbduxsub->ai_sample_count < 0) {
+ if (devpriv->ai_sample_count < 0) {
/* prevent a resubmit next time */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
/* say comedi that the acquistion is over */
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
return;
}
}
/* get the data from the USB bus and hand it over to comedi */
n = s->async->cmd.chanlist_len;
for (i = 0; i < n; i++) {
+ unsigned int range = CR_RANGE(s->async->cmd.chanlist[i]);
+ int16_t val = le16_to_cpu(devpriv->in_buf[i]);
+
+ /* bipolar data is two's-complement */
+ if (comedi_range_is_bipolar(s, range))
+ val ^= ((s->maxdata + 1) >> 1);
+
/* transfer data */
- if (CR_RANGE(s->async->cmd.chanlist[i]) <= 1) {
- err = comedi_buf_put
- (s->async,
- le16_to_cpu(this_usbduxsub->in_buffer[i]) ^ 0x800);
- } else {
- err = comedi_buf_put
- (s->async,
- le16_to_cpu(this_usbduxsub->in_buffer[i]));
- }
+ err = comedi_buf_put(s->async, val);
if (unlikely(err == 0)) {
/* buffer overflow */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
return;
}
}
/* tell comedi that data is there */
s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
}
-static int usbduxsub_unlink_outurbs(struct usbduxsub *usbduxsub_tmp)
+static void usbdux_ao_stop(struct comedi_device *dev, int do_unlink)
{
- int i = 0;
- int err = 0;
-
- if (usbduxsub_tmp && usbduxsub_tmp->urb_out) {
- for (i = 0; i < usbduxsub_tmp->num_out_buffers; i++) {
- if (usbduxsub_tmp->urb_out[i])
- usb_kill_urb(usbduxsub_tmp->urb_out[i]);
-
- dev_dbg(&usbduxsub_tmp->interface->dev,
- "comedi: usbdux: unlinked OutURB %d: res=%d\n",
- i, err);
- }
- }
- return err;
-}
-
-/* This will cancel a running acquisition operation
- * in any context.
- */
-static int usbdux_ao_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
-{
- int ret = 0;
-
- if (!this_usbduxsub)
- return -EFAULT;
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ao_cancel\n");
-
- if (do_unlink)
- ret = usbduxsub_unlink_outurbs(this_usbduxsub);
+ struct usbdux_private *devpriv = dev->private;
- this_usbduxsub->ao_cmd_running = 0;
+ if (do_unlink && devpriv->ao_urbs)
+ usbdux_unlink_urbs(devpriv->ao_urbs, devpriv->n_ao_urbs);
- return ret;
+ devpriv->ao_cmd_running = 0;
}
-/* force unlink, is called by comedi */
static int usbdux_ao_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int res = 0;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
/* prevent other CPUs from submitting a command just now */
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ down(&devpriv->sem);
/* unlink only if it is really running */
- res = usbdux_ao_stop(this_usbduxsub, this_usbduxsub->ao_cmd_running);
- up(&this_usbduxsub->sem);
- return res;
+ usbdux_ao_stop(dev, devpriv->ao_cmd_running);
+ up(&devpriv->sem);
+
+ return 0;
}
static void usbduxsub_ao_isoc_irq(struct urb *urb)
{
- int i, ret;
+ struct comedi_device *dev = urb->context;
+ struct comedi_subdevice *s = dev->write_subdev;
+ struct usbdux_private *devpriv = dev->private;
int8_t *datap;
- struct usbduxsub *this_usbduxsub;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
-
- /* the context variable points to the subdevice */
- this_comedidev = urb->context;
- /* the private structure of the subdevice is struct usbduxsub */
- this_usbduxsub = this_comedidev->private;
-
- s = &this_comedidev->subdevices[SUBDEV_DA];
+ int len;
+ int ret;
+ int i;
switch (urb->status) {
case 0:
@@ -619,318 +431,131 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
case -ECONNABORTED:
/* after an unlink command, unplug, ... etc */
/* no unlink needed here. Already shutting down. */
- if (this_usbduxsub->ao_cmd_running) {
+ if (devpriv->ao_cmd_running) {
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
- usbdux_ao_stop(this_usbduxsub, 0);
+ comedi_event(dev, s);
+ usbdux_ao_stop(dev, 0);
}
return;
default:
/* a real error */
- if (this_usbduxsub->ao_cmd_running) {
- dev_err(&urb->dev->dev,
- "comedi_: Non-zero urb status received in ao "
- "intr context: %d\n", urb->status);
+ if (devpriv->ao_cmd_running) {
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in ao intr context: %d\n",
+ urb->status);
s->async->events |= COMEDI_CB_ERROR;
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* we do an unlink if we are in the high speed mode */
- usbdux_ao_stop(this_usbduxsub, 0);
+ usbdux_ao_stop(dev, 0);
}
return;
}
/* are we actually running? */
- if (!(this_usbduxsub->ao_cmd_running))
+ if (!devpriv->ao_cmd_running)
return;
/* normal operation: executing a command in this subdevice */
- this_usbduxsub->ao_counter--;
- if ((int)this_usbduxsub->ao_counter <= 0) {
+ devpriv->ao_counter--;
+ if ((int)devpriv->ao_counter <= 0) {
/* timer zero */
- this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
+ devpriv->ao_counter = devpriv->ao_timer;
/* handle non continous acquisition */
- if (!(this_usbduxsub->ao_continous)) {
+ if (!devpriv->ao_continous) {
/* fixed number of samples */
- this_usbduxsub->ao_sample_count--;
- if (this_usbduxsub->ao_sample_count < 0) {
+ devpriv->ao_sample_count--;
+ if (devpriv->ao_sample_count < 0) {
/* all samples transmitted */
- usbdux_ao_stop(this_usbduxsub, 0);
+ usbdux_ao_stop(dev, 0);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* no resubmit of the urb */
return;
}
}
+
/* transmit data to the USB bus */
- ((uint8_t *) (urb->transfer_buffer))[0] =
- s->async->cmd.chanlist_len;
+ datap = urb->transfer_buffer;
+ len = s->async->cmd.chanlist_len;
+ *datap++ = len;
for (i = 0; i < s->async->cmd.chanlist_len; i++) {
- short temp;
- if (i >= NUMOUTCHANNELS)
- break;
+ unsigned int chan = devpriv->ao_chanlist[i];
+ short val;
- /* pointer to the DA */
- datap =
- (&(((int8_t *) urb->transfer_buffer)[i * 3 + 1]));
- /* get the data from comedi */
- ret = comedi_buf_get(s->async, &temp);
- datap[0] = temp;
- datap[1] = temp >> 8;
- datap[2] = this_usbduxsub->dac_commands[i];
- /* printk("data[0]=%x, data[1]=%x, data[2]=%x\n", */
- /* datap[0],datap[1],datap[2]); */
+ ret = comedi_buf_get(s->async, &val);
if (ret < 0) {
- dev_err(&urb->dev->dev,
- "comedi: buffer underflow\n");
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_OVERFLOW;
+ dev_err(dev->class_dev, "buffer underflow\n");
+ s->async->events |= (COMEDI_CB_EOA |
+ COMEDI_CB_OVERFLOW);
}
- /* transmit data to comedi */
+ /* pointer to the DA */
+ *datap++ = val & 0xff;
+ *datap++ = (val >> 8) & 0xff;
+ *datap++ = chan;
+ devpriv->ao_readback[chan] = val;
+
s->async->events |= COMEDI_CB_BLOCK;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
}
}
urb->transfer_buffer_length = SIZEOUTBUF;
- urb->dev = this_usbduxsub->usbdev;
+ urb->dev = comedi_to_usb_dev(dev);
urb->status = 0;
- if (this_usbduxsub->ao_cmd_running) {
- if (this_usbduxsub->high_speed) {
- /* uframes */
- urb->interval = 8;
- } else {
- /* frames */
- urb->interval = 1;
- }
+ if (devpriv->ao_cmd_running) {
+ if (devpriv->high_speed)
+ urb->interval = 8; /* uframes */
+ else
+ urb->interval = 1; /* frames */
urb->number_of_packets = 1;
urb->iso_frame_desc[0].offset = 0;
urb->iso_frame_desc[0].length = SIZEOUTBUF;
urb->iso_frame_desc[0].status = 0;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
- dev_err(&urb->dev->dev,
- "comedi_: ao urb resubm failed in int-cont. "
- "ret=%d", ret);
+ dev_err(dev->class_dev,
+ "ao urb resubm failed in int-cont. ret=%d",
+ ret);
if (ret == EL2NSYNC)
- dev_err(&urb->dev->dev,
- "buggy USB host controller or bug in "
- "IRQ handling!\n");
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handling!\n");
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* don't do an unlink here */
- usbdux_ao_stop(this_usbduxsub, 0);
+ usbdux_ao_stop(dev, 0);
}
}
}
-static int usbduxsub_start(struct usbduxsub *usbduxsub)
-{
- int errcode = 0;
- uint8_t *local_transfer_buffer;
-
- local_transfer_buffer = kmalloc(1, GFP_KERNEL);
- if (!local_transfer_buffer)
- return -ENOMEM;
-
- /* 7f92 to zero */
- *local_transfer_buffer = 0;
- errcode = usb_control_msg(usbduxsub->usbdev,
- /* create a pipe for a control transfer */
- usb_sndctrlpipe(usbduxsub->usbdev, 0),
- /* bRequest, "Firmware" */
- USBDUXSUB_FIRMWARE,
- /* bmRequestType */
- VENDOR_DIR_OUT,
- /* Value */
- USBDUXSUB_CPUCS,
- /* Index */
- 0x0000,
- /* address of the transfer buffer */
- local_transfer_buffer,
- /* Length */
- 1,
- /* Timeout */
- BULK_TIMEOUT);
- if (errcode < 0)
- dev_err(&usbduxsub->interface->dev,
- "comedi_: control msg failed (start)\n");
-
- kfree(local_transfer_buffer);
- return errcode;
-}
-
-static int usbduxsub_stop(struct usbduxsub *usbduxsub)
-{
- int errcode = 0;
- uint8_t *local_transfer_buffer;
-
- local_transfer_buffer = kmalloc(1, GFP_KERNEL);
- if (!local_transfer_buffer)
- return -ENOMEM;
-
- /* 7f92 to one */
- *local_transfer_buffer = 1;
- errcode = usb_control_msg(usbduxsub->usbdev,
- usb_sndctrlpipe(usbduxsub->usbdev, 0),
- /* bRequest, "Firmware" */
- USBDUXSUB_FIRMWARE,
- /* bmRequestType */
- VENDOR_DIR_OUT,
- /* Value */
- USBDUXSUB_CPUCS,
- /* Index */
- 0x0000, local_transfer_buffer,
- /* Length */
- 1,
- /* Timeout */
- BULK_TIMEOUT);
- if (errcode < 0)
- dev_err(&usbduxsub->interface->dev,
- "comedi_: control msg failed (stop)\n");
-
- kfree(local_transfer_buffer);
- return errcode;
-}
-
-static int usbduxsub_upload(struct usbduxsub *usbduxsub,
- uint8_t *local_transfer_buffer,
- unsigned int start_addr, unsigned int len)
-{
- int errcode;
-
- errcode = usb_control_msg(usbduxsub->usbdev,
- usb_sndctrlpipe(usbduxsub->usbdev, 0),
- /* brequest, firmware */
- USBDUXSUB_FIRMWARE,
- /* bmRequestType */
- VENDOR_DIR_OUT,
- /* value */
- start_addr,
- /* index */
- 0x0000,
- /* our local safe buffer */
- local_transfer_buffer,
- /* length */
- len,
- /* timeout */
- BULK_TIMEOUT);
- dev_dbg(&usbduxsub->interface->dev, "comedi_: result=%d\n", errcode);
- if (errcode < 0) {
- dev_err(&usbduxsub->interface->dev, "comedi_: upload failed\n");
- return errcode;
- }
- return 0;
-}
-
-#define FIRMWARE_MAX_LEN 0x2000
-
-static int firmware_upload(struct usbduxsub *usbduxsub,
- const u8 *firmware_binary, int size_firmware)
+static int usbdux_submit_urbs(struct comedi_device *dev,
+ struct urb **urbs, int num_urbs,
+ int input_urb)
{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ struct urb *urb;
int ret;
- uint8_t *fw_buf;
-
- if (!firmware_binary)
- return 0;
-
- if (size_firmware > FIRMWARE_MAX_LEN) {
- dev_err(&usbduxsub->interface->dev,
- "usbdux firmware binary it too large for FX2.\n");
- return -ENOMEM;
- }
-
- /* we generate a local buffer for the firmware */
- fw_buf = kmemdup(firmware_binary, size_firmware, GFP_KERNEL);
- if (!fw_buf) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: mem alloc for firmware failed\n");
- return -ENOMEM;
- }
-
- ret = usbduxsub_stop(usbduxsub);
- if (ret < 0) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: can not stop firmware\n");
- kfree(fw_buf);
- return ret;
- }
-
- ret = usbduxsub_upload(usbduxsub, fw_buf, 0, size_firmware);
- if (ret < 0) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: firmware upload failed\n");
- kfree(fw_buf);
- return ret;
- }
- ret = usbduxsub_start(usbduxsub);
- if (ret < 0) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: can not start firmware\n");
- kfree(fw_buf);
- return ret;
- }
- kfree(fw_buf);
- return 0;
-}
-
-static int usbduxsub_submit_inurbs(struct usbduxsub *usbduxsub)
-{
- int i, err_flag;
-
- if (!usbduxsub)
- return -EFAULT;
+ int i;
/* Submit all URBs and start the transfer on the bus */
- for (i = 0; i < usbduxsub->num_in_buffers; i++) {
- /* in case of a resubmission after an unlink... */
- usbduxsub->urb_in[i]->interval = usbduxsub->ai_interval;
- usbduxsub->urb_in[i]->context = usbduxsub->comedidev;
- usbduxsub->urb_in[i]->dev = usbduxsub->usbdev;
- usbduxsub->urb_in[i]->status = 0;
- usbduxsub->urb_in[i]->transfer_flags = URB_ISO_ASAP;
- dev_dbg(&usbduxsub->interface->dev,
- "comedi%d: submitting in-urb[%d]: %p,%p intv=%d\n",
- usbduxsub->comedidev->minor, i,
- (usbduxsub->urb_in[i]->context),
- (usbduxsub->urb_in[i]->dev),
- (usbduxsub->urb_in[i]->interval));
- err_flag = usb_submit_urb(usbduxsub->urb_in[i], GFP_ATOMIC);
- if (err_flag) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: ai: usb_submit_urb(%d) error %d\n",
- i, err_flag);
- return err_flag;
- }
- }
- return 0;
-}
+ for (i = 0; i < num_urbs; i++) {
+ urb = urbs[i];
-static int usbduxsub_submit_outurbs(struct usbduxsub *usbduxsub)
-{
- int i, err_flag;
-
- if (!usbduxsub)
- return -EFAULT;
-
- for (i = 0; i < usbduxsub->num_out_buffers; i++) {
- dev_dbg(&usbduxsub->interface->dev,
- "comedi_: submitting out-urb[%d]\n", i);
/* in case of a resubmission after an unlink... */
- usbduxsub->urb_out[i]->context = usbduxsub->comedidev;
- usbduxsub->urb_out[i]->dev = usbduxsub->usbdev;
- usbduxsub->urb_out[i]->status = 0;
- usbduxsub->urb_out[i]->transfer_flags = URB_ISO_ASAP;
- err_flag = usb_submit_urb(usbduxsub->urb_out[i], GFP_ATOMIC);
- if (err_flag) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: ao: usb_submit_urb(%d) error %d\n",
- i, err_flag);
- return err_flag;
- }
+ if (input_urb)
+ urb->interval = devpriv->ai_interval;
+ urb->context = dev;
+ urb->dev = usb;
+ urb->status = 0;
+ urb->transfer_flags = URB_ISO_ASAP;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -938,13 +563,10 @@ static int usbduxsub_submit_outurbs(struct usbduxsub *usbduxsub)
static int usbdux_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *this_usbduxsub = dev->private;
int err = 0, i;
unsigned int tmp_timer;
- if (!(this_usbduxsub->probed))
- return -ENODEV;
-
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
@@ -1034,221 +656,143 @@ static int8_t create_adc_command(unsigned int chan, int range)
return (chan << 4) | ((p == 1) << 2) | ((r == 1) << 3);
}
-/* bulk transfers to usbdux */
+static int send_dux_commands(struct comedi_device *dev, int cmd_type)
+{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ int nsent;
-#define SENDADCOMMANDS 0
-#define SENDDACOMMANDS 1
-#define SENDDIOCONFIGCOMMAND 2
-#define SENDDIOBITSCOMMAND 3
-#define SENDSINGLEAD 4
-#define READCOUNTERCOMMAND 5
-#define WRITECOUNTERCOMMAND 6
-#define SENDPWMON 7
-#define SENDPWMOFF 8
+ devpriv->dux_commands[0] = cmd_type;
-static int send_dux_commands(struct usbduxsub *this_usbduxsub, int cmd_type)
-{
- int result, nsent;
-
- this_usbduxsub->dux_commands[0] = cmd_type;
-#ifdef NOISY_DUX_DEBUGBUG
- printk(KERN_DEBUG "comedi%d: usbdux: dux_commands: ",
- this_usbduxsub->comedidev->minor);
- for (result = 0; result < SIZEOFDUXBUFFER; result++)
- printk(" %02x", this_usbduxsub->dux_commands[result]);
- printk("\n");
-#endif
- result = usb_bulk_msg(this_usbduxsub->usbdev,
- usb_sndbulkpipe(this_usbduxsub->usbdev,
- COMMAND_OUT_EP),
- this_usbduxsub->dux_commands, SIZEOFDUXBUFFER,
- &nsent, BULK_TIMEOUT);
- if (result < 0)
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
- "could not transmit dux_command to the usb-device, "
- "err=%d\n", this_usbduxsub->comedidev->minor, result);
-
- return result;
+ return usb_bulk_msg(usb, usb_sndbulkpipe(usb, 1),
+ devpriv->dux_commands, SIZEOFDUXBUFFER,
+ &nsent, BULK_TIMEOUT);
}
-static int receive_dux_commands(struct usbduxsub *this_usbduxsub, int command)
+static int receive_dux_commands(struct comedi_device *dev, int command)
{
- int result = (-EFAULT);
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ int ret;
int nrec;
int i;
for (i = 0; i < RETRIES; i++) {
- result = usb_bulk_msg(this_usbduxsub->usbdev,
- usb_rcvbulkpipe(this_usbduxsub->usbdev,
- COMMAND_IN_EP),
- this_usbduxsub->insn_buffer, SIZEINSNBUF,
+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, 8),
+ devpriv->insn_buf, SIZEINSNBUF,
&nrec, BULK_TIMEOUT);
- if (result < 0) {
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
- "insn: USB error %d while receiving DUX command"
- "\n", this_usbduxsub->comedidev->minor, result);
- return result;
- }
- if (le16_to_cpu(this_usbduxsub->insn_buffer[0]) == command)
- return result;
+ if (ret < 0)
+ return ret;
+ if (le16_to_cpu(devpriv->insn_buf[0]) == command)
+ return ret;
}
- /* this is only reached if the data has been requested a couple of
- * times */
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: insn: "
- "wrong data returned from firmware: want cmd %d, got cmd %d.\n",
- this_usbduxsub->comedidev->minor, command,
- le16_to_cpu(this_usbduxsub->insn_buffer[0]));
+ /* command not received */
return -EFAULT;
}
static int usbdux_ai_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int trignum)
+ struct comedi_subdevice *s,
+ unsigned int trignum)
{
- int ret;
- struct usbduxsub *this_usbduxsub = dev->private;
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
+ int ret = -EINVAL;
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_inttrig\n", dev->minor);
-
- if (trignum != 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_inttrig: invalid trignum\n",
- dev->minor);
- up(&this_usbduxsub->sem);
- return -EINVAL;
- }
- if (!(this_usbduxsub->ai_cmd_running)) {
- this_usbduxsub->ai_cmd_running = 1;
- ret = usbduxsub_submit_inurbs(this_usbduxsub);
+ down(&devpriv->sem);
+
+ if (trignum != 0)
+ goto ai_trig_exit;
+
+ if (!devpriv->ai_cmd_running) {
+ devpriv->ai_cmd_running = 1;
+ ret = usbdux_submit_urbs(dev, devpriv->ai_urbs,
+ devpriv->n_ai_urbs, 1);
if (ret < 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_inttrig: "
- "urbSubmit: err=%d\n", dev->minor, ret);
- this_usbduxsub->ai_cmd_running = 0;
- up(&this_usbduxsub->sem);
- return ret;
+ devpriv->ai_cmd_running = 0;
+ goto ai_trig_exit;
}
s->async->inttrig = NULL;
} else {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ai_inttrig but acqu is already running\n",
- dev->minor);
+ ret = -EBUSY;
}
- up(&this_usbduxsub->sem);
- return 1;
+
+ai_trig_exit:
+ up(&devpriv->sem);
+ return ret;
}
static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct usbdux_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int chan, range;
- int i, ret;
- struct usbduxsub *this_usbduxsub = dev->private;
- int result;
-
- if (!this_usbduxsub)
- return -EFAULT;
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_cmd\n", dev->minor);
+ int len = cmd->chanlist_len;
+ int ret = -EBUSY;
+ int i;
/* block other CPUs from starting an ai_cmd */
- down(&this_usbduxsub->sem);
+ down(&devpriv->sem);
+
+ if (devpriv->ai_cmd_running)
+ goto ai_cmd_exit;
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
- "ai_cmd not possible. Another ai_cmd is running.\n",
- dev->minor);
- up(&this_usbduxsub->sem);
- return -EBUSY;
- }
/* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
- this_usbduxsub->dux_commands[1] = cmd->chanlist_len;
- for (i = 0; i < cmd->chanlist_len; ++i) {
- chan = CR_CHAN(cmd->chanlist[i]);
- range = CR_RANGE(cmd->chanlist[i]);
- if (i >= NUMCHANNELS) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: channel list too long\n",
- dev->minor);
- break;
- }
- this_usbduxsub->dux_commands[i + 2] =
- create_adc_command(chan, range);
- }
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi %d: sending commands to the usb device: size=%u\n",
- dev->minor, NUMCHANNELS);
+ devpriv->dux_commands[1] = len;
+ for (i = 0; i < len; ++i) {
+ unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+ unsigned int range = CR_RANGE(cmd->chanlist[i]);
- result = send_dux_commands(this_usbduxsub, SENDADCOMMANDS);
- if (result < 0) {
- up(&this_usbduxsub->sem);
- return result;
+ devpriv->dux_commands[i + 2] = create_adc_command(chan, range);
}
- if (this_usbduxsub->high_speed) {
+ ret = send_dux_commands(dev, USBDUX_CMD_MULT_AI);
+ if (ret < 0)
+ goto ai_cmd_exit;
+
+ if (devpriv->high_speed) {
/*
* every channel gets a time window of 125us. Thus, if we
* sample all 8 channels we need 1ms. If we sample only one
* channel we need only 125us
*/
- this_usbduxsub->ai_interval = 1;
+ devpriv->ai_interval = 1;
/* find a power of 2 for the interval */
- while ((this_usbduxsub->ai_interval) < (cmd->chanlist_len)) {
- this_usbduxsub->ai_interval =
- (this_usbduxsub->ai_interval) * 2;
- }
- this_usbduxsub->ai_timer = cmd->scan_begin_arg / (125000 *
- (this_usbduxsub->
- ai_interval));
+ while (devpriv->ai_interval < len)
+ devpriv->ai_interval *= 2;
+
+ devpriv->ai_timer = cmd->scan_begin_arg /
+ (125000 * devpriv->ai_interval);
} else {
/* interval always 1ms */
- this_usbduxsub->ai_interval = 1;
- this_usbduxsub->ai_timer = cmd->scan_begin_arg / 1000000;
+ devpriv->ai_interval = 1;
+ devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
}
- if (this_usbduxsub->ai_timer < 1) {
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: ai_cmd: "
- "timer=%d, scan_begin_arg=%d. "
- "Not properly tested by cmdtest?\n", dev->minor,
- this_usbduxsub->ai_timer, cmd->scan_begin_arg);
- up(&this_usbduxsub->sem);
- return -EINVAL;
+ if (devpriv->ai_timer < 1) {
+ ret = -EINVAL;
+ goto ai_cmd_exit;
}
- this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
+
+ devpriv->ai_counter = devpriv->ai_timer;
if (cmd->stop_src == TRIG_COUNT) {
/* data arrives as one packet */
- this_usbduxsub->ai_sample_count = cmd->stop_arg;
- this_usbduxsub->ai_continous = 0;
+ devpriv->ai_sample_count = cmd->stop_arg;
+ devpriv->ai_continous = 0;
} else {
/* continous acquisition */
- this_usbduxsub->ai_continous = 1;
- this_usbduxsub->ai_sample_count = 0;
+ devpriv->ai_continous = 1;
+ devpriv->ai_sample_count = 0;
}
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
- this_usbduxsub->ai_cmd_running = 1;
- ret = usbduxsub_submit_inurbs(this_usbduxsub);
+ devpriv->ai_cmd_running = 1;
+ ret = usbdux_submit_urbs(dev, devpriv->ai_urbs,
+ devpriv->n_ai_urbs, 1);
if (ret < 0) {
- this_usbduxsub->ai_cmd_running = 0;
+ devpriv->ai_cmd_running = 0;
/* fixme: unlink here?? */
- up(&this_usbduxsub->sem);
- return ret;
+ goto ai_cmd_exit;
}
s->async->inttrig = NULL;
} else {
@@ -1257,202 +801,156 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* wait for an internal signal */
s->async->inttrig = usbdux_ai_inttrig;
}
- up(&this_usbduxsub->sem);
- return 0;
+
+ai_cmd_exit:
+ up(&devpriv->sem);
+
+ return ret;
}
/* Mode 0 is used to get a single conversion on demand */
static int usbdux_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int ret = -EBUSY;
int i;
- unsigned int one = 0;
- int chan, range;
- int err;
- struct usbduxsub *this_usbduxsub = dev->private;
- if (!this_usbduxsub)
- return 0;
+ down(&devpriv->sem);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ai_insn_read, insn->n=%d, insn->subdev=%d\n",
- dev->minor, insn->n, insn->subdev);
+ if (devpriv->ai_cmd_running)
+ goto ai_read_exit;
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ai_insn_read not possible. "
- "Async Command is running.\n", dev->minor);
- up(&this_usbduxsub->sem);
- return 0;
- }
-
- /* sample one channel */
- chan = CR_CHAN(insn->chanspec);
- range = CR_RANGE(insn->chanspec);
/* set command for the first channel */
- this_usbduxsub->dux_commands[1] = create_adc_command(chan, range);
+ devpriv->dux_commands[1] = create_adc_command(chan, range);
/* adc commands */
- err = send_dux_commands(this_usbduxsub, SENDSINGLEAD);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
+ ret = send_dux_commands(dev, USBDUX_CMD_SINGLE_AI);
+ if (ret < 0)
+ goto ai_read_exit;
for (i = 0; i < insn->n; i++) {
- err = receive_dux_commands(this_usbduxsub, SENDSINGLEAD);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return 0;
- }
- one = le16_to_cpu(this_usbduxsub->insn_buffer[1]);
- if (CR_RANGE(insn->chanspec) <= 1)
- one = one ^ 0x800;
+ ret = receive_dux_commands(dev, USBDUX_CMD_SINGLE_AI);
+ if (ret < 0)
+ goto ai_read_exit;
+
+ val = le16_to_cpu(devpriv->insn_buf[1]);
- data[i] = one;
+ /* bipolar data is two's-complement */
+ if (comedi_range_is_bipolar(s, range))
+ val ^= ((s->maxdata + 1) >> 1);
+
+ data[i] = val;
}
- up(&this_usbduxsub->sem);
- return i;
-}
-/************************************/
-/* analog out */
+ai_read_exit:
+ up(&devpriv->sem);
+
+ return ret ? ret : insn->n;
+}
static int usbdux_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
- int chan = CR_CHAN(insn->chanspec);
- struct usbduxsub *this_usbduxsub = dev->private;
-
- if (!this_usbduxsub)
- return -EFAULT;
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ down(&devpriv->sem);
for (i = 0; i < insn->n; i++)
- data[i] = this_usbduxsub->out_buffer[chan];
+ data[i] = devpriv->ao_readback[chan];
+ up(&devpriv->sem);
- up(&this_usbduxsub->sem);
- return i;
+ return insn->n;
}
static int usbdux_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int i, err;
- int chan = CR_CHAN(insn->chanspec);
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = devpriv->ao_readback[chan];
+ int16_t *p = (int16_t *)&devpriv->dux_commands[2];
+ int ret = -EBUSY;
+ int i;
- if (!this_usbduxsub)
- return -EFAULT;
+ down(&devpriv->sem);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ao_insn_write\n", dev->minor);
+ if (devpriv->ao_cmd_running)
+ goto ao_write_exit;
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (this_usbduxsub->ao_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ao_insn_write: "
- "ERROR: asynchronous ao_cmd is running\n", dev->minor);
- up(&this_usbduxsub->sem);
- return 0;
- }
+ /* number of channels: 1 */
+ devpriv->dux_commands[1] = 1;
+ /* channel number */
+ devpriv->dux_commands[4] = chan << 6;
for (i = 0; i < insn->n; i++) {
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ao_insn_write: data[chan=%d,i=%d]=%d\n",
- dev->minor, chan, i, data[i]);
+ val = data[i];
- /* number of channels: 1 */
- this_usbduxsub->dux_commands[1] = 1;
/* one 16 bit value */
- *((int16_t *) (this_usbduxsub->dux_commands + 2)) =
- cpu_to_le16(data[i]);
- this_usbduxsub->out_buffer[chan] = data[i];
- /* channel number */
- this_usbduxsub->dux_commands[4] = (chan << 6);
- err = send_dux_commands(this_usbduxsub, SENDDACOMMANDS);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
+ *p = cpu_to_le16(val);
+
+ ret = send_dux_commands(dev, USBDUX_CMD_AO);
+ if (ret < 0)
+ goto ao_write_exit;
}
- up(&this_usbduxsub->sem);
+ devpriv->ao_readback[chan] = val;
+
+ao_write_exit:
+ up(&devpriv->sem);
- return i;
+ return ret ? ret : insn->n;
}
static int usbdux_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int trignum)
+ struct comedi_subdevice *s,
+ unsigned int trignum)
{
- int ret;
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
+ int ret = -EINVAL;
- if (!this_usbduxsub)
- return -EFAULT;
+ down(&devpriv->sem);
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (trignum != 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ao_inttrig: invalid trignum\n",
- dev->minor);
- up(&this_usbduxsub->sem);
- return -EINVAL;
- }
- if (!(this_usbduxsub->ao_cmd_running)) {
- this_usbduxsub->ao_cmd_running = 1;
- ret = usbduxsub_submit_outurbs(this_usbduxsub);
+ if (trignum != 0)
+ goto ao_trig_exit;
+
+ if (!devpriv->ao_cmd_running) {
+ devpriv->ao_cmd_running = 1;
+ ret = usbdux_submit_urbs(dev, devpriv->ao_urbs,
+ devpriv->n_ao_urbs, 0);
if (ret < 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ao_inttrig: submitURB: "
- "err=%d\n", dev->minor, ret);
- this_usbduxsub->ao_cmd_running = 0;
- up(&this_usbduxsub->sem);
- return ret;
+ devpriv->ao_cmd_running = 0;
+ goto ao_trig_exit;
}
s->async->inttrig = NULL;
} else {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ao_inttrig but acqu is already running.\n",
- dev->minor);
+ ret = -EBUSY;
}
- up(&this_usbduxsub->sem);
- return 1;
+
+ao_trig_exit:
+ up(&devpriv->sem);
+ return ret;
}
static int usbdux_ao_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *this_usbduxsub = dev->private;
int err = 0;
unsigned int flags;
if (!this_usbduxsub)
return -EFAULT;
- if (!(this_usbduxsub->probed))
- return -ENODEV;
-
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
@@ -1529,99 +1027,72 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct usbdux_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int chan, gain;
- int i, ret;
- struct usbduxsub *this_usbduxsub = dev->private;
+ int ret = -EBUSY;
+ int i;
- if (!this_usbduxsub)
- return -EFAULT;
+ down(&devpriv->sem);
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s\n", dev->minor, __func__);
+ if (devpriv->ao_cmd_running)
+ goto ao_cmd_exit;
/* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
+
for (i = 0; i < cmd->chanlist_len; ++i) {
- chan = CR_CHAN(cmd->chanlist[i]);
- gain = CR_RANGE(cmd->chanlist[i]);
- if (i >= NUMOUTCHANNELS) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: %s: channel list too long\n",
- dev->minor, __func__);
- break;
- }
- this_usbduxsub->dac_commands[i] = (chan << 6);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: dac command for ch %d is %x\n",
- dev->minor, i, this_usbduxsub->dac_commands[i]);
+ unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+
+ devpriv->ao_chanlist[i] = chan << 6;
}
/* we count in steps of 1ms (125us) */
/* 125us mode not used yet */
- if (0) { /* (this_usbduxsub->high_speed) */
+ if (0) { /* (devpriv->high_speed) */
/* 125us */
/* timing of the conversion itself: every 125 us */
- this_usbduxsub->ao_timer = cmd->convert_arg / 125000;
+ devpriv->ao_timer = cmd->convert_arg / 125000;
} else {
/* 1ms */
/* timing of the scan: we get all channels at once */
- this_usbduxsub->ao_timer = cmd->scan_begin_arg / 1000000;
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: scan_begin_src=%d, scan_begin_arg=%d, "
- "convert_src=%d, convert_arg=%d\n", dev->minor,
- cmd->scan_begin_src, cmd->scan_begin_arg,
- cmd->convert_src, cmd->convert_arg);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ao_timer=%d (ms)\n",
- dev->minor, this_usbduxsub->ao_timer);
- if (this_usbduxsub->ao_timer < 1) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux: ao_timer=%d, "
- "scan_begin_arg=%d. "
- "Not properly tested by cmdtest?\n",
- dev->minor, this_usbduxsub->ao_timer,
- cmd->scan_begin_arg);
- up(&this_usbduxsub->sem);
- return -EINVAL;
+ devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
+ if (devpriv->ao_timer < 1) {
+ ret = -EINVAL;
+ goto ao_cmd_exit;
}
}
- this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
+
+ devpriv->ao_counter = devpriv->ao_timer;
if (cmd->stop_src == TRIG_COUNT) {
/* not continuous */
/* counter */
/* high speed also scans everything at once */
- if (0) { /* (this_usbduxsub->high_speed) */
- this_usbduxsub->ao_sample_count =
- (cmd->stop_arg) * (cmd->scan_end_arg);
+ if (0) { /* (devpriv->high_speed) */
+ devpriv->ao_sample_count = cmd->stop_arg *
+ cmd->scan_end_arg;
} else {
/* there's no scan as the scan has been */
/* perf inside the FX2 */
/* data arrives as one packet */
- this_usbduxsub->ao_sample_count = cmd->stop_arg;
+ devpriv->ao_sample_count = cmd->stop_arg;
}
- this_usbduxsub->ao_continous = 0;
+ devpriv->ao_continous = 0;
} else {
/* continous acquisition */
- this_usbduxsub->ao_continous = 1;
- this_usbduxsub->ao_sample_count = 0;
+ devpriv->ao_continous = 1;
+ devpriv->ao_sample_count = 0;
}
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
- this_usbduxsub->ao_cmd_running = 1;
- ret = usbduxsub_submit_outurbs(this_usbduxsub);
+ devpriv->ao_cmd_running = 1;
+ ret = usbdux_submit_urbs(dev, devpriv->ao_urbs,
+ devpriv->n_ao_urbs, 0);
if (ret < 0) {
- this_usbduxsub->ao_cmd_running = 0;
+ devpriv->ao_cmd_running = 0;
/* fixme: unlink here?? */
- up(&this_usbduxsub->sem);
- return ret;
+ goto ao_cmd_exit;
}
s->async->inttrig = NULL;
} else {
@@ -1631,149 +1102,123 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
s->async->inttrig = usbdux_ao_inttrig;
}
- up(&this_usbduxsub->sem);
- return 0;
+ao_cmd_exit:
+ up(&devpriv->sem);
+
+ return ret;
}
static int usbdux_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec);
+ int ret;
- /* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << chan; /* 1 means Out */
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << chan);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- break;
- default:
- return -EINVAL;
- break;
- }
- /* we don't tell the firmware here as it would take 8 frames */
- /* to submit the information. We do it in the insn_bits. */
+ /*
+ * We don't tell the firmware here as it would take 8 frames
+ * to submit the information. We do it in the insn_bits.
+ */
return insn->n;
}
static int usbdux_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int err;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+ int ret;
- if (!this_usbduxsub)
- return -EFAULT;
+ down(&devpriv->sem);
- down(&this_usbduxsub->sem);
+ s->state &= ~mask;
+ s->state |= (bits & mask);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ devpriv->dux_commands[1] = s->io_bits;
+ devpriv->dux_commands[2] = s->state;
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
- this_usbduxsub->dux_commands[1] = s->io_bits;
- this_usbduxsub->dux_commands[2] = s->state;
-
- /* This command also tells the firmware to return */
- /* the digital input lines */
- err = send_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
- err = receive_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
+ /*
+ * This command also tells the firmware to return
+ * the digital input lines.
+ */
+ ret = send_dux_commands(dev, USBDUX_CMD_DIO_BITS);
+ if (ret < 0)
+ goto dio_exit;
+ ret = receive_dux_commands(dev, USBDUX_CMD_DIO_BITS);
+ if (ret < 0)
+ goto dio_exit;
- data[1] = le16_to_cpu(this_usbduxsub->insn_buffer[1]);
- up(&this_usbduxsub->sem);
- return insn->n;
+ data[1] = le16_to_cpu(devpriv->insn_buf[1]);
+
+dio_exit:
+ up(&devpriv->sem);
+
+ return ret ? ret : insn->n;
}
-/* reads the 4 counters, only two are used just now */
static int usbdux_counter_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int chan = insn->chanspec;
- int err;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int ret = 0;
+ int i;
- if (!this_usbduxsub)
- return -EFAULT;
+ down(&devpriv->sem);
- down(&this_usbduxsub->sem);
+ for (i = 0; i < insn->n; i++) {
+ ret = send_dux_commands(dev, USBDUX_CMD_TIMER_RD);
+ if (ret < 0)
+ goto counter_read_exit;
+ ret = receive_dux_commands(dev, USBDUX_CMD_TIMER_RD);
+ if (ret < 0)
+ goto counter_read_exit;
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
+ data[i] = le16_to_cpu(devpriv->insn_buf[chan + 1]);
}
- err = send_dux_commands(this_usbduxsub, READCOUNTERCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
+counter_read_exit:
+ up(&devpriv->sem);
- err = receive_dux_commands(this_usbduxsub, READCOUNTERCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
-
- data[0] = le16_to_cpu(this_usbduxsub->insn_buffer[chan + 1]);
- up(&this_usbduxsub->sem);
- return 1;
+ return ret ? ret : insn->n;
}
static int usbdux_counter_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int err;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int16_t *p = (int16_t *)&devpriv->dux_commands[2];
+ int ret = 0;
+ int i;
- down(&this_usbduxsub->sem);
+ down(&devpriv->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ devpriv->dux_commands[1] = chan;
- this_usbduxsub->dux_commands[1] = insn->chanspec;
- *((int16_t *) (this_usbduxsub->dux_commands + 2)) = cpu_to_le16(*data);
+ for (i = 0; i < insn->n; i++) {
+ *p = cpu_to_le16(data[i]);
- err = send_dux_commands(this_usbduxsub, WRITECOUNTERCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
+ ret = send_dux_commands(dev, USBDUX_CMD_TIMER_WR);
+ if (ret < 0)
+ break;
}
- up(&this_usbduxsub->sem);
+ up(&devpriv->sem);
- return 1;
+ return ret ? ret : insn->n;
}
static int usbdux_counter_config(struct comedi_device *dev,
@@ -1784,73 +1229,43 @@ static int usbdux_counter_config(struct comedi_device *dev,
return 2;
}
-/***********************************/
-/* PWM */
-
-static int usbduxsub_unlink_pwm_urbs(struct usbduxsub *usbduxsub_tmp)
+static void usbduxsub_unlink_pwm_urbs(struct comedi_device *dev)
{
- int err = 0;
+ struct usbdux_private *devpriv = dev->private;
- if (usbduxsub_tmp && usbduxsub_tmp->urb_pwm) {
- if (usbduxsub_tmp->urb_pwm)
- usb_kill_urb(usbduxsub_tmp->urb_pwm);
- dev_dbg(&usbduxsub_tmp->interface->dev,
- "comedi: unlinked PwmURB: res=%d\n", err);
- }
- return err;
+ usb_kill_urb(devpriv->pwm_urb);
}
-/* This cancels a running acquisition operation
- * in any context.
- */
-static int usbdux_pwm_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+static void usbdux_pwm_stop(struct comedi_device *dev, int do_unlink)
{
- int ret = 0;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: %s\n", __func__);
if (do_unlink)
- ret = usbduxsub_unlink_pwm_urbs(this_usbduxsub);
+ usbduxsub_unlink_pwm_urbs(dev);
- this_usbduxsub->pwm_cmd_running = 0;
-
- return ret;
+ devpriv->pwm_cmd_running = 0;
}
-/* force unlink - is called by comedi */
static int usbdux_pwm_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int res = 0;
+ struct usbdux_private *devpriv = dev->private;
+ int ret;
+ down(&devpriv->sem);
/* unlink only if it is really running */
- res = usbdux_pwm_stop(this_usbduxsub, this_usbduxsub->pwm_cmd_running);
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi %d: sending pwm off command to the usb device.\n",
- dev->minor);
+ usbdux_pwm_stop(dev, devpriv->pwm_cmd_running);
+ ret = send_dux_commands(dev, USBDUX_CMD_PWM_OFF);
+ up(&devpriv->sem);
- return send_dux_commands(this_usbduxsub, SENDPWMOFF);
+ return ret;
}
static void usbduxsub_pwm_irq(struct urb *urb)
{
+ struct comedi_device *dev = urb->context;
+ struct usbdux_private *devpriv = dev->private;
int ret;
- struct usbduxsub *this_usbduxsub;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
-
- /* printk(KERN_DEBUG "PWM: IRQ\n"); */
-
- /* the context variable points to the subdevice */
- this_comedidev = urb->context;
- /* the private structure of the subdevice is struct usbduxsub */
- this_usbduxsub = this_comedidev->private;
-
- s = &this_comedidev->subdevices[SUBDEV_DA];
switch (urb->status) {
case 0:
@@ -1865,220 +1280,171 @@ static void usbduxsub_pwm_irq(struct urb *urb)
* after an unlink command, unplug, ... etc
* no unlink needed here. Already shutting down.
*/
- if (this_usbduxsub->pwm_cmd_running)
- usbdux_pwm_stop(this_usbduxsub, 0);
+ if (devpriv->pwm_cmd_running)
+ usbdux_pwm_stop(dev, 0);
return;
default:
/* a real error */
- if (this_usbduxsub->pwm_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi_: Non-zero urb status received in "
- "pwm intr context: %d\n", urb->status);
- usbdux_pwm_stop(this_usbduxsub, 0);
+ if (devpriv->pwm_cmd_running) {
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in pwm intr context: %d\n",
+ urb->status);
+ usbdux_pwm_stop(dev, 0);
}
return;
}
/* are we actually running? */
- if (!(this_usbduxsub->pwm_cmd_running))
+ if (!devpriv->pwm_cmd_running)
return;
- urb->transfer_buffer_length = this_usbduxsub->size_pwm_buf;
- urb->dev = this_usbduxsub->usbdev;
+ urb->transfer_buffer_length = devpriv->pwm_buf_sz;
+ urb->dev = comedi_to_usb_dev(dev);
urb->status = 0;
- if (this_usbduxsub->pwm_cmd_running) {
+ if (devpriv->pwm_cmd_running) {
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi_: pwm urb resubm failed in int-cont. "
- "ret=%d", ret);
+ dev_err(dev->class_dev,
+ "pwm urb resubm failed in int-cont. ret=%d",
+ ret);
if (ret == EL2NSYNC)
- dev_err(&this_usbduxsub->interface->dev,
- "buggy USB host controller or bug in "
- "IRQ handling!\n");
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handling!\n");
/* don't do an unlink here */
- usbdux_pwm_stop(this_usbduxsub, 0);
+ usbdux_pwm_stop(dev, 0);
}
}
}
-static int usbduxsub_submit_pwm_urbs(struct usbduxsub *usbduxsub)
+static int usbduxsub_submit_pwm_urbs(struct comedi_device *dev)
{
- int err_flag;
-
- if (!usbduxsub)
- return -EFAULT;
-
- dev_dbg(&usbduxsub->interface->dev, "comedi_: submitting pwm-urb\n");
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ struct urb *urb = devpriv->pwm_urb;
/* in case of a resubmission after an unlink... */
- usb_fill_bulk_urb(usbduxsub->urb_pwm,
- usbduxsub->usbdev,
- usb_sndbulkpipe(usbduxsub->usbdev, PWM_EP),
- usbduxsub->urb_pwm->transfer_buffer,
- usbduxsub->size_pwm_buf, usbduxsub_pwm_irq,
- usbduxsub->comedidev);
-
- err_flag = usb_submit_urb(usbduxsub->urb_pwm, GFP_ATOMIC);
- if (err_flag) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: usbdux: pwm: usb_submit_urb error %d\n",
- err_flag);
- return err_flag;
- }
- return 0;
+ usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, 4),
+ urb->transfer_buffer,
+ devpriv->pwm_buf_sz,
+ usbduxsub_pwm_irq,
+ dev);
+
+ return usb_submit_urb(urb, GFP_ATOMIC);
}
static int usbdux_pwm_period(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int period)
+ struct comedi_subdevice *s,
+ unsigned int period)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
int fx2delay = 255;
if (period < MIN_PWM_PERIOD) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: illegal period setting for pwm.\n",
- dev->minor);
return -EAGAIN;
} else {
- fx2delay = period / ((int)(6 * 512 * (1.0 / 0.033))) - 6;
- if (fx2delay > 255) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: period %d for pwm is too low.\n",
- dev->minor, period);
+ fx2delay = (period / (6 * 512 * 1000 / 33)) - 6;
+ if (fx2delay > 255)
return -EAGAIN;
- }
}
- this_usbduxsub->pwn_delay = fx2delay;
- this_usbduxsub->pwm_period = period;
- dev_dbg(&this_usbduxsub->interface->dev, "%s: frequ=%d, period=%d\n",
- __func__, period, fx2delay);
+ devpriv->pwm_delay = fx2delay;
+ devpriv->pwm_period = period;
+
return 0;
}
-/* is called from insn so there's no need to do all the sanity checks */
static int usbdux_pwm_start(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- int ret, i;
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
+ int ret = 0;
- dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s\n",
- dev->minor, __func__);
+ down(&devpriv->sem);
- if (this_usbduxsub->pwm_cmd_running) {
- /* already running */
- return 0;
- }
+ if (devpriv->pwm_cmd_running)
+ goto pwm_start_exit;
- this_usbduxsub->dux_commands[1] = ((int8_t) this_usbduxsub->pwn_delay);
- ret = send_dux_commands(this_usbduxsub, SENDPWMON);
+ devpriv->dux_commands[1] = devpriv->pwm_delay;
+ ret = send_dux_commands(dev, USBDUX_CMD_PWM_ON);
if (ret < 0)
- return ret;
+ goto pwm_start_exit;
/* initialise the buffer */
- for (i = 0; i < this_usbduxsub->size_pwm_buf; i++)
- ((char *)(this_usbduxsub->urb_pwm->transfer_buffer))[i] = 0;
+ memset(devpriv->pwm_urb->transfer_buffer, 0, devpriv->pwm_buf_sz);
- this_usbduxsub->pwm_cmd_running = 1;
- ret = usbduxsub_submit_pwm_urbs(this_usbduxsub);
- if (ret < 0) {
- this_usbduxsub->pwm_cmd_running = 0;
- return ret;
- }
- return 0;
+ devpriv->pwm_cmd_running = 1;
+ ret = usbduxsub_submit_pwm_urbs(dev);
+ if (ret < 0)
+ devpriv->pwm_cmd_running = 0;
+
+pwm_start_exit:
+ up(&devpriv->sem);
+
+ return ret;
}
-/* generates the bit pattern for PWM with the optional sign bit */
-static int usbdux_pwm_pattern(struct comedi_device *dev,
- struct comedi_subdevice *s, int channel,
- unsigned int value, unsigned int sign)
+static void usbdux_pwm_pattern(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int value,
+ unsigned int sign)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int i, szbuf;
- char *p_buf;
- char pwm_mask;
- char sgn_mask;
- char c;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
+ char pwm_mask = (1 << chan); /* DIO bit for the PWM data */
+ char sgn_mask = (16 << chan); /* DIO bit for the sign */
+ char *buf = (char *)(devpriv->pwm_urb->transfer_buffer);
+ int szbuf = devpriv->pwm_buf_sz;
+ int i;
- /* this is the DIO bit which carries the PWM data */
- pwm_mask = (1 << channel);
- /* this is the DIO bit which carries the optional direction bit */
- sgn_mask = (16 << channel);
- /* this is the buffer which will be filled with the with bit */
- /* pattern for one period */
- szbuf = this_usbduxsub->size_pwm_buf;
- p_buf = (char *)(this_usbduxsub->urb_pwm->transfer_buffer);
for (i = 0; i < szbuf; i++) {
- c = *p_buf;
- /* reset bits */
- c = c & (~pwm_mask);
- /* set the bit as long as the index is lower than the value */
+ char c = *buf;
+
+ c &= ~pwm_mask;
if (i < value)
- c = c | pwm_mask;
- /* set the optional sign bit for a relay */
- if (!sign) {
- /* positive value */
- c = c & (~sgn_mask);
- } else {
- /* negative value */
- c = c | sgn_mask;
- }
- *(p_buf++) = c;
+ c |= pwm_mask;
+ if (!sign)
+ c &= ~sgn_mask;
+ else
+ c |= sgn_mask;
+ *buf++ = c;
}
- return 1;
}
static int usbdux_pwm_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ unsigned int chan = CR_CHAN(insn->chanspec);
- if ((insn->n) != 1) {
- /*
- * doesn't make sense to have more than one value here because
- * it would just overwrite the PWM buffer a couple of times
- */
+ /*
+ * It doesn't make sense to support more than one value here
+ * because it would just overwrite the PWM buffer.
+ */
+ if (insn->n != 1)
return -EINVAL;
- }
/*
- * the sign is set via a special INSN only, this gives us 8 bits for
- * normal operation
- * relay sign 0 by default
+ * The sign is set via a special INSN only, this gives us 8 bits
+ * for normal operation, sign is 0 by default.
*/
- return usbdux_pwm_pattern(dev, s, CR_CHAN(insn->chanspec), data[0], 0);
-}
+ usbdux_pwm_pattern(dev, s, chan, data[0], 0);
-static int usbdux_pwm_read(struct comedi_device *x1,
- struct comedi_subdevice *x2, struct comedi_insn *x3,
- unsigned int *x4)
-{
- /* not needed */
- return -EINVAL;
-};
+ return insn->n;
+}
-/* switches on/off PWM */
static int usbdux_pwm_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+
switch (data[0]) {
case INSN_CONFIG_ARM:
- /* switch it on */
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s: pwm on\n", dev->minor, __func__);
/*
* if not zero the PWM is limited to a certain time which is
* not supported here
@@ -2087,33 +1453,22 @@ static int usbdux_pwm_config(struct comedi_device *dev,
return -EINVAL;
return usbdux_pwm_start(dev, s);
case INSN_CONFIG_DISARM:
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s: pwm off\n", dev->minor, __func__);
return usbdux_pwm_cancel(dev, s);
case INSN_CONFIG_GET_PWM_STATUS:
- /*
- * to check if the USB transmission has failed or in case PWM
- * was limited to n cycles to check if it has terminated
- */
- data[1] = this_usbduxsub->pwm_cmd_running;
+ data[1] = devpriv->pwm_cmd_running;
return 0;
case INSN_CONFIG_PWM_SET_PERIOD:
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s: setting period\n", dev->minor, __func__);
return usbdux_pwm_period(dev, s, data[1]);
case INSN_CONFIG_PWM_GET_PERIOD:
- data[1] = this_usbduxsub->pwm_period;
+ data[1] = devpriv->pwm_period;
return 0;
case INSN_CONFIG_PWM_SET_H_BRIDGE:
- /* value in the first byte and the sign in the second for a
- relay */
- return usbdux_pwm_pattern(dev, s,
- /* the channel number */
- CR_CHAN(insn->chanspec),
- /* actual PWM data */
- data[1],
- /* just a sign */
- (data[2] != 0));
+ /*
+ * data[1] = value
+ * data[2] = sign (for a relay)
+ */
+ usbdux_pwm_pattern(dev, s, chan, data[1], (data[2] != 0));
+ return 0;
case INSN_CONFIG_PWM_GET_H_BRIDGE:
/* values are not kept in this driver, nothing to return here */
return -EINVAL;
@@ -2121,547 +1476,344 @@ static int usbdux_pwm_config(struct comedi_device *dev,
return -EINVAL;
}
-/* end of PWM */
-/*****************************************************************/
-
-static void tidy_up(struct usbduxsub *usbduxsub_tmp)
-{
- int i;
-
- if (!usbduxsub_tmp)
- return;
- dev_dbg(&usbduxsub_tmp->interface->dev, "comedi_: tiding up\n");
-
- /* shows the usb subsystem that the driver is down */
- if (usbduxsub_tmp->interface)
- usb_set_intfdata(usbduxsub_tmp->interface, NULL);
-
- usbduxsub_tmp->probed = 0;
-
- if (usbduxsub_tmp->urb_in) {
- if (usbduxsub_tmp->ai_cmd_running) {
- usbduxsub_tmp->ai_cmd_running = 0;
- usbduxsub_unlink_inurbs(usbduxsub_tmp);
- }
- for (i = 0; i < usbduxsub_tmp->num_in_buffers; i++) {
- kfree(usbduxsub_tmp->urb_in[i]->transfer_buffer);
- usbduxsub_tmp->urb_in[i]->transfer_buffer = NULL;
- usb_kill_urb(usbduxsub_tmp->urb_in[i]);
- usb_free_urb(usbduxsub_tmp->urb_in[i]);
- usbduxsub_tmp->urb_in[i] = NULL;
- }
- kfree(usbduxsub_tmp->urb_in);
- usbduxsub_tmp->urb_in = NULL;
- }
- if (usbduxsub_tmp->urb_out) {
- if (usbduxsub_tmp->ao_cmd_running) {
- usbduxsub_tmp->ao_cmd_running = 0;
- usbduxsub_unlink_outurbs(usbduxsub_tmp);
- }
- for (i = 0; i < usbduxsub_tmp->num_out_buffers; i++) {
- kfree(usbduxsub_tmp->urb_out[i]->transfer_buffer);
- usbduxsub_tmp->urb_out[i]->transfer_buffer = NULL;
- if (usbduxsub_tmp->urb_out[i]) {
- usb_kill_urb(usbduxsub_tmp->urb_out[i]);
- usb_free_urb(usbduxsub_tmp->urb_out[i]);
- usbduxsub_tmp->urb_out[i] = NULL;
- }
- }
- kfree(usbduxsub_tmp->urb_out);
- usbduxsub_tmp->urb_out = NULL;
- }
- if (usbduxsub_tmp->urb_pwm) {
- if (usbduxsub_tmp->pwm_cmd_running) {
- usbduxsub_tmp->pwm_cmd_running = 0;
- usbduxsub_unlink_pwm_urbs(usbduxsub_tmp);
- }
- kfree(usbduxsub_tmp->urb_pwm->transfer_buffer);
- usbduxsub_tmp->urb_pwm->transfer_buffer = NULL;
- usb_kill_urb(usbduxsub_tmp->urb_pwm);
- usb_free_urb(usbduxsub_tmp->urb_pwm);
- usbduxsub_tmp->urb_pwm = NULL;
- }
- kfree(usbduxsub_tmp->in_buffer);
- usbduxsub_tmp->in_buffer = NULL;
- kfree(usbduxsub_tmp->insn_buffer);
- usbduxsub_tmp->insn_buffer = NULL;
- kfree(usbduxsub_tmp->out_buffer);
- usbduxsub_tmp->out_buffer = NULL;
- kfree(usbduxsub_tmp->dac_commands);
- usbduxsub_tmp->dac_commands = NULL;
- kfree(usbduxsub_tmp->dux_commands);
- usbduxsub_tmp->dux_commands = NULL;
- usbduxsub_tmp->ai_cmd_running = 0;
- usbduxsub_tmp->ao_cmd_running = 0;
- usbduxsub_tmp->pwm_cmd_running = 0;
-}
-
-static int usbdux_attach_common(struct comedi_device *dev,
- struct usbduxsub *udev)
+static int usbdux_firmware_upload(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context)
{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ uint8_t *buf;
+ uint8_t *tmp;
int ret;
- struct comedi_subdevice *s = NULL;
- int n_subdevs;
-
- down(&udev->sem);
- /* pointer back to the corresponding comedi device */
- udev->comedidev = dev;
- /* set number of subdevices */
- if (udev->high_speed) {
- /* with pwm */
- n_subdevs = 5;
- } else {
- /* without pwm */
- n_subdevs = 4;
- }
+ if (!data)
+ return 0;
- ret = comedi_alloc_subdevices(dev, n_subdevs);
- if (ret) {
- up(&udev->sem);
- return ret;
+ if (size > USBDUX_FIRMWARE_MAX_LEN) {
+ dev_err(dev->class_dev,
+ "usbdux firmware binary it too large for FX2.\n");
+ return -ENOMEM;
}
- /* private structure is also simply the usb-structure */
- dev->private = udev;
+ /* we generate a local buffer for the firmware */
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- /* the first subdevice is the A/D converter */
- s = &dev->subdevices[SUBDEV_AD];
- /* the URBs get the comedi subdevice */
- /* which is responsible for reading */
- /* this is the subdevice which reads data */
- dev->read_subdev = s;
- /* the subdevice receives as private structure the */
- /* usb-structure */
- s->private = NULL;
- /* analog input */
- s->type = COMEDI_SUBD_AI;
- /* readable and ref is to ground */
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
- /* 8 channels */
- s->n_chan = 8;
- /* length of the channellist */
- s->len_chanlist = 8;
- /* callback functions */
- s->insn_read = usbdux_ai_insn_read;
- s->do_cmdtest = usbdux_ai_cmdtest;
- s->do_cmd = usbdux_ai_cmd;
- s->cancel = usbdux_ai_cancel;
- /* max value from the A/D converter (12bit) */
- s->maxdata = 0xfff;
- /* range table to convert to physical units */
- s->range_table = (&range_usbdux_ai_range);
-
- /* analog out */
- s = &dev->subdevices[SUBDEV_DA];
- /* analog out */
- s->type = COMEDI_SUBD_AO;
- /* backward pointer */
- dev->write_subdev = s;
- /* the subdevice receives as private structure the */
- /* usb-structure */
- s->private = NULL;
- /* are writable */
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
- /* 4 channels */
- s->n_chan = 4;
- /* length of the channellist */
- s->len_chanlist = 4;
- /* 12 bit resolution */
- s->maxdata = 0x0fff;
- /* bipolar range */
- s->range_table = (&range_usbdux_ao_range);
- /* callback */
- s->do_cmdtest = usbdux_ao_cmdtest;
- s->do_cmd = usbdux_ao_cmd;
- s->cancel = usbdux_ao_cancel;
- s->insn_read = usbdux_ao_insn_read;
- s->insn_write = usbdux_ao_insn_write;
-
- /* digital I/O */
- s = &dev->subdevices[SUBDEV_DIO];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = (&range_digital);
- s->insn_bits = usbdux_dio_insn_bits;
- s->insn_config = usbdux_dio_insn_config;
- /* we don't use it */
- s->private = NULL;
-
- /* counter */
- s = &dev->subdevices[SUBDEV_COUNTER];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 0xFFFF;
- s->insn_read = usbdux_counter_read;
- s->insn_write = usbdux_counter_write;
- s->insn_config = usbdux_counter_config;
-
- if (udev->high_speed) {
- /* timer / pwm */
- s = &dev->subdevices[SUBDEV_PWM];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
- s->n_chan = 8;
- /* this defines the max duty cycle resolution */
- s->maxdata = udev->size_pwm_buf;
- s->insn_write = usbdux_pwm_write;
- s->insn_read = usbdux_pwm_read;
- s->insn_config = usbdux_pwm_config;
- usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
+ /* we need a malloc'ed buffer for usb_control_msg() */
+ tmp = kmalloc(1, GFP_KERNEL);
+ if (!tmp) {
+ kfree(buf);
+ return -ENOMEM;
}
- /* finally decide that it's attached */
- udev->attached = 1;
- up(&udev->sem);
+ /* stop the current firmware on the device */
+ *tmp = 1; /* 7f92 to one */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUX_FIRMWARE_CMD,
+ VENDOR_DIR_OUT,
+ USBDUX_CPU_CS, 0x0000,
+ tmp, 1,
+ BULK_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "can not stop firmware\n");
+ goto done;
+ }
- dev_info(&udev->interface->dev, "comedi%d: attached to usbdux.\n",
- dev->minor);
+ /* upload the new firmware to the device */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUX_FIRMWARE_CMD,
+ VENDOR_DIR_OUT,
+ 0, 0x0000,
+ buf, size,
+ BULK_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "firmware upload failed\n");
+ goto done;
+ }
+
+ /* start the new firmware on the device */
+ *tmp = 0; /* 7f92 to zero */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUX_FIRMWARE_CMD,
+ VENDOR_DIR_OUT,
+ USBDUX_CPU_CS, 0x0000,
+ tmp, 1,
+ BULK_TIMEOUT);
+ if (ret < 0)
+ dev_err(dev->class_dev, "can not start firmware\n");
- return 0;
+done:
+ kfree(tmp);
+ kfree(buf);
+ return ret;
}
-static int usbdux_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+static int usbdux_alloc_usb_buffers(struct comedi_device *dev)
{
- struct usb_interface *uinterf = comedi_to_usb_interface(dev);
- int ret;
- struct usbduxsub *this_usbduxsub;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ struct urb *urb;
+ int i;
- dev->private = NULL;
+ devpriv->dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
+ devpriv->in_buf = kzalloc(SIZEINBUF, GFP_KERNEL);
+ devpriv->insn_buf = kzalloc(SIZEINSNBUF, GFP_KERNEL);
+ devpriv->ai_urbs = kcalloc(devpriv->n_ai_urbs, sizeof(void *),
+ GFP_KERNEL);
+ devpriv->ao_urbs = kcalloc(devpriv->n_ao_urbs, sizeof(void *),
+ GFP_KERNEL);
+ if (!devpriv->dux_commands || !devpriv->in_buf || !devpriv->insn_buf ||
+ !devpriv->ai_urbs || !devpriv->ao_urbs)
+ return -ENOMEM;
- down(&start_stop_sem);
- this_usbduxsub = usb_get_intfdata(uinterf);
- if (!this_usbduxsub || !this_usbduxsub->probed) {
- dev_err(dev->class_dev,
- "usbdux: error: auto_attach failed, not connected\n");
- ret = -ENODEV;
- } else if (this_usbduxsub->attached) {
- dev_err(dev->class_dev,
- "error: auto_attach failed, already attached\n");
- ret = -ENODEV;
- } else
- ret = usbdux_attach_common(dev, this_usbduxsub);
- up(&start_stop_sem);
- return ret;
-}
+ for (i = 0; i < devpriv->n_ai_urbs; i++) {
+ /* one frame: 1ms */
+ urb = usb_alloc_urb(1, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+ devpriv->ai_urbs[i] = urb;
+
+ urb->dev = usb;
+ urb->context = dev;
+ urb->pipe = usb_rcvisocpipe(usb, 6);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL);
+ if (!urb->transfer_buffer)
+ return -ENOMEM;
-static void usbdux_detach(struct comedi_device *dev)
-{
- struct usbduxsub *usb = dev->private;
-
- if (usb) {
- down(&usb->sem);
- dev->private = NULL;
- usb->attached = 0;
- usb->comedidev = NULL;
- up(&usb->sem);
+ urb->complete = usbduxsub_ai_isoc_irq;
+ urb->number_of_packets = 1;
+ urb->transfer_buffer_length = SIZEINBUF;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEINBUF;
}
-}
-static struct comedi_driver usbdux_driver = {
- .driver_name = "usbdux",
- .module = THIS_MODULE,
- .auto_attach = usbdux_auto_attach,
- .detach = usbdux_detach,
-};
-
-static void usbdux_firmware_request_complete_handler(const struct firmware *fw,
- void *context)
-{
- struct usbduxsub *usbduxsub_tmp = context;
- struct usb_interface *uinterf = usbduxsub_tmp->interface;
- int ret;
+ for (i = 0; i < devpriv->n_ao_urbs; i++) {
+ /* one frame: 1ms */
+ urb = usb_alloc_urb(1, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+ devpriv->ao_urbs[i] = urb;
+
+ urb->dev = usb;
+ urb->context = dev;
+ urb->pipe = usb_sndisocpipe(usb, 2);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
+ if (!urb->transfer_buffer)
+ return -ENOMEM;
- if (fw == NULL) {
- dev_err(&uinterf->dev,
- "Firmware complete handler without firmware!\n");
- return;
+ urb->complete = usbduxsub_ao_isoc_irq;
+ urb->number_of_packets = 1;
+ urb->transfer_buffer_length = SIZEOUTBUF;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEOUTBUF;
+ if (devpriv->high_speed)
+ urb->interval = 8; /* uframes */
+ else
+ urb->interval = 1; /* frames */
}
- /*
- * we need to upload the firmware here because fw will be
- * freed once we've left this function
- */
- ret = firmware_upload(usbduxsub_tmp, fw->data, fw->size);
+ /* pwm */
+ if (devpriv->pwm_buf_sz) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+ devpriv->pwm_urb = urb;
- if (ret) {
- dev_err(&uinterf->dev,
- "Could not upload firmware (err=%d)\n", ret);
- goto out;
+ /* max bulk ep size in high speed */
+ urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz,
+ GFP_KERNEL);
+ if (!urb->transfer_buffer)
+ return -ENOMEM;
}
- comedi_usb_auto_config(uinterf, &usbdux_driver, 0);
- out:
- release_firmware(fw);
+
+ return 0;
}
-static int usbdux_usb_probe(struct usb_interface *uinterf,
- const struct usb_device_id *id)
+static void usbdux_free_usb_buffers(struct comedi_device *dev)
{
- struct usb_device *udev = interface_to_usbdev(uinterf);
- struct device *dev = &uinterf->dev;
+ struct usbdux_private *devpriv = dev->private;
+ struct urb *urb;
int i;
- int index;
- int ret;
-
- dev_dbg(dev, "comedi_: usbdux_: "
- "finding a free structure for the usb-device\n");
- down(&start_stop_sem);
- /* look for a free place in the usbdux array */
- index = -1;
- for (i = 0; i < NUMUSBDUX; i++) {
- if (!(usbduxsub[i].probed)) {
- index = i;
- break;
- }
- }
-
- /* no more space */
- if (index == -1) {
- dev_err(dev, "Too many usbdux-devices connected.\n");
- up(&start_stop_sem);
- return -EMFILE;
- }
- dev_dbg(dev, "comedi_: usbdux: "
- "usbduxsub[%d] is ready to connect to comedi.\n", index);
-
- sema_init(&(usbduxsub[index].sem), 1);
- /* save a pointer to the usb device */
- usbduxsub[index].usbdev = udev;
-
- /* 2.6: save the interface itself */
- usbduxsub[index].interface = uinterf;
- /* get the interface number from the interface */
- usbduxsub[index].ifnum = uinterf->altsetting->desc.bInterfaceNumber;
- /* hand the private data over to the usb subsystem */
- /* will be needed for disconnect */
- usb_set_intfdata(uinterf, &(usbduxsub[index]));
-
- dev_dbg(dev, "comedi_: usbdux: ifnum=%d\n", usbduxsub[index].ifnum);
-
- /* test if it is high speed (USB 2.0) */
- usbduxsub[index].high_speed =
- (usbduxsub[index].usbdev->speed == USB_SPEED_HIGH);
-
- /* create space for the commands of the DA converter */
- usbduxsub[index].dac_commands = kzalloc(NUMOUTCHANNELS, GFP_KERNEL);
- if (!usbduxsub[index].dac_commands) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space for the commands going to the usb device */
- usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
- if (!usbduxsub[index].dux_commands) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space for the in buffer and set it to zero */
- usbduxsub[index].in_buffer = kzalloc(SIZEINBUF, GFP_KERNEL);
- if (!(usbduxsub[index].in_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space of the instruction buffer */
- usbduxsub[index].insn_buffer = kzalloc(SIZEINSNBUF, GFP_KERNEL);
- if (!(usbduxsub[index].insn_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space for the outbuffer */
- usbduxsub[index].out_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
- if (!(usbduxsub[index].out_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* setting to alternate setting 3: enabling iso ep and bulk ep. */
- i = usb_set_interface(usbduxsub[index].usbdev,
- usbduxsub[index].ifnum, 3);
- if (i < 0) {
- dev_err(dev, "comedi_: usbdux%d: "
- "could not set alternate setting 3 in high speed.\n",
- index);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENODEV;
- }
- if (usbduxsub[index].high_speed)
- usbduxsub[index].num_in_buffers = NUMOFINBUFFERSHIGH;
- else
- usbduxsub[index].num_in_buffers = NUMOFINBUFFERSFULL;
-
- usbduxsub[index].urb_in =
- kcalloc(usbduxsub[index].num_in_buffers, sizeof(struct urb *),
- GFP_KERNEL);
- if (!(usbduxsub[index].urb_in)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- for (i = 0; i < usbduxsub[index].num_in_buffers; i++) {
- /* one frame: 1ms */
- usbduxsub[index].urb_in[i] = usb_alloc_urb(1, GFP_KERNEL);
- if (usbduxsub[index].urb_in[i] == NULL) {
- dev_err(dev, "comedi_: usbdux%d: "
- "Could not alloc. urb(%d)\n", index, i);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
+ urb = devpriv->pwm_urb;
+ if (urb) {
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ if (devpriv->ao_urbs) {
+ for (i = 0; i < devpriv->n_ao_urbs; i++) {
+ urb = devpriv->ao_urbs[i];
+ if (urb) {
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
}
- usbduxsub[index].urb_in[i]->dev = usbduxsub[index].usbdev;
- /* will be filled later with a pointer to the comedi-device */
- /* and ONLY then the urb should be submitted */
- usbduxsub[index].urb_in[i]->context = NULL;
- usbduxsub[index].urb_in[i]->pipe =
- usb_rcvisocpipe(usbduxsub[index].usbdev, ISOINEP);
- usbduxsub[index].urb_in[i]->transfer_flags = URB_ISO_ASAP;
- usbduxsub[index].urb_in[i]->transfer_buffer =
- kzalloc(SIZEINBUF, GFP_KERNEL);
- if (!(usbduxsub[index].urb_in[i]->transfer_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
+ kfree(devpriv->ao_urbs);
+ }
+ if (devpriv->ai_urbs) {
+ for (i = 0; i < devpriv->n_ai_urbs; i++) {
+ urb = devpriv->ai_urbs[i];
+ if (urb) {
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
}
- usbduxsub[index].urb_in[i]->complete = usbduxsub_ai_isoc_irq;
- usbduxsub[index].urb_in[i]->number_of_packets = 1;
- usbduxsub[index].urb_in[i]->transfer_buffer_length = SIZEINBUF;
- usbduxsub[index].urb_in[i]->iso_frame_desc[0].offset = 0;
- usbduxsub[index].urb_in[i]->iso_frame_desc[0].length = SIZEINBUF;
+ kfree(devpriv->ai_urbs);
}
+ kfree(devpriv->insn_buf);
+ kfree(devpriv->in_buf);
+ kfree(devpriv->dux_commands);
+}
- /* out */
- if (usbduxsub[index].high_speed)
- usbduxsub[index].num_out_buffers = NUMOFOUTBUFFERSHIGH;
- else
- usbduxsub[index].num_out_buffers = NUMOFOUTBUFFERSFULL;
-
- usbduxsub[index].urb_out =
- kcalloc(usbduxsub[index].num_out_buffers, sizeof(struct urb *),
- GFP_KERNEL);
- if (!(usbduxsub[index].urb_out)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
+static int usbdux_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv;
+ struct comedi_subdevice *s;
+ int ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
return -ENOMEM;
- }
- for (i = 0; i < usbduxsub[index].num_out_buffers; i++) {
- /* one frame: 1ms */
- usbduxsub[index].urb_out[i] = usb_alloc_urb(1, GFP_KERNEL);
- if (usbduxsub[index].urb_out[i] == NULL) {
- dev_err(dev, "comedi_: usbdux%d: "
- "Could not alloc. urb(%d)\n", index, i);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- usbduxsub[index].urb_out[i]->dev = usbduxsub[index].usbdev;
- /* will be filled later with a pointer to the comedi-device */
- /* and ONLY then the urb should be submitted */
- usbduxsub[index].urb_out[i]->context = NULL;
- usbduxsub[index].urb_out[i]->pipe =
- usb_sndisocpipe(usbduxsub[index].usbdev, ISOOUTEP);
- usbduxsub[index].urb_out[i]->transfer_flags = URB_ISO_ASAP;
- usbduxsub[index].urb_out[i]->transfer_buffer =
- kzalloc(SIZEOUTBUF, GFP_KERNEL);
- if (!(usbduxsub[index].urb_out[i]->transfer_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- usbduxsub[index].urb_out[i]->complete = usbduxsub_ao_isoc_irq;
- usbduxsub[index].urb_out[i]->number_of_packets = 1;
- usbduxsub[index].urb_out[i]->transfer_buffer_length = SIZEOUTBUF;
- usbduxsub[index].urb_out[i]->iso_frame_desc[0].offset = 0;
- usbduxsub[index].urb_out[i]->iso_frame_desc[0].length =
- SIZEOUTBUF;
- if (usbduxsub[index].high_speed) {
- /* uframes */
- usbduxsub[index].urb_out[i]->interval = 8;
- } else {
- /* frames */
- usbduxsub[index].urb_out[i]->interval = 1;
- }
- }
- /* pwm */
- if (usbduxsub[index].high_speed) {
- /* max bulk ep size in high speed */
- usbduxsub[index].size_pwm_buf = 512;
- usbduxsub[index].urb_pwm = usb_alloc_urb(0, GFP_KERNEL);
- if (usbduxsub[index].urb_pwm == NULL) {
- dev_err(dev, "comedi_: usbdux%d: "
- "Could not alloc. pwm urb\n", index);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- usbduxsub[index].urb_pwm->transfer_buffer =
- kzalloc(usbduxsub[index].size_pwm_buf, GFP_KERNEL);
- if (!(usbduxsub[index].urb_pwm->transfer_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
+ sema_init(&devpriv->sem, 1);
+
+ usb_set_intfdata(intf, devpriv);
+
+ devpriv->high_speed = (usb->speed == USB_SPEED_HIGH);
+ if (devpriv->high_speed) {
+ devpriv->n_ai_urbs = NUMOFINBUFFERSHIGH;
+ devpriv->n_ao_urbs = NUMOFOUTBUFFERSHIGH;
+ devpriv->pwm_buf_sz = 512;
} else {
- usbduxsub[index].urb_pwm = NULL;
- usbduxsub[index].size_pwm_buf = 0;
+ devpriv->n_ai_urbs = NUMOFINBUFFERSFULL;
+ devpriv->n_ao_urbs = NUMOFOUTBUFFERSFULL;
}
- usbduxsub[index].ai_cmd_running = 0;
- usbduxsub[index].ao_cmd_running = 0;
- usbduxsub[index].pwm_cmd_running = 0;
+ ret = usbdux_alloc_usb_buffers(dev);
+ if (ret)
+ return ret;
- /* we've reached the bottom of the function */
- usbduxsub[index].probed = 1;
- up(&start_stop_sem);
+ /* setting to alternate setting 3: enabling iso ep and bulk ep. */
+ ret = usb_set_interface(usb, intf->altsetting->desc.bInterfaceNumber,
+ 3);
+ if (ret < 0) {
+ dev_err(dev->class_dev,
+ "could not set alternate setting 3 in high speed\n");
+ return ret;
+ }
- ret = request_firmware_nowait(THIS_MODULE,
- FW_ACTION_HOTPLUG,
- FIRMWARE,
- &udev->dev,
- GFP_KERNEL,
- usbduxsub + index,
- usbdux_firmware_request_complete_handler);
+ ret = comedi_load_firmware(dev, &usb->dev, USBDUX_FIRMWARE,
+ usbdux_firmware_upload, 0);
+ if (ret < 0)
+ return ret;
- if (ret) {
- dev_err(dev, "Could not load firmware (err=%d)\n", ret);
+ ret = comedi_alloc_subdevices(dev, (devpriv->high_speed) ? 5 : 4);
+ if (ret)
return ret;
+
+ /* Analog Input subdevice */
+ s = &dev->subdevices[0];
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
+ s->n_chan = 8;
+ s->maxdata = 0x0fff;
+ s->len_chanlist = 8;
+ s->range_table = &range_usbdux_ai_range;
+ s->insn_read = usbdux_ai_insn_read;
+ s->do_cmdtest = usbdux_ai_cmdtest;
+ s->do_cmd = usbdux_ai_cmd;
+ s->cancel = usbdux_ai_cancel;
+
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ dev->write_subdev = s;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
+ s->n_chan = USBDUX_NUM_AO_CHAN;
+ s->maxdata = 0x0fff;
+ s->len_chanlist = s->n_chan;
+ s->range_table = &range_usbdux_ao_range;
+ s->do_cmdtest = usbdux_ao_cmdtest;
+ s->do_cmd = usbdux_ao_cmd;
+ s->cancel = usbdux_ao_cancel;
+ s->insn_read = usbdux_ao_insn_read;
+ s->insn_write = usbdux_ao_insn_write;
+
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = usbdux_dio_insn_bits;
+ s->insn_config = usbdux_dio_insn_config;
+
+ /* Counter subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 0xffff;
+ s->insn_read = usbdux_counter_read;
+ s->insn_write = usbdux_counter_write;
+ s->insn_config = usbdux_counter_config;
+
+ if (devpriv->high_speed) {
+ /* PWM subdevice */
+ s = &dev->subdevices[4];
+ s->type = COMEDI_SUBD_PWM;
+ s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
+ s->n_chan = 8;
+ s->maxdata = devpriv->pwm_buf_sz;
+ s->insn_write = usbdux_pwm_write;
+ s->insn_config = usbdux_pwm_config;
+
+ usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
}
- dev_info(dev, "comedi_: usbdux%d "
- "has been successfully initialised.\n", index);
- /* success */
return 0;
}
-static void usbdux_usb_disconnect(struct usb_interface *intf)
+static void usbdux_detach(struct comedi_device *dev)
{
- struct usbduxsub *usbduxsub_tmp = usb_get_intfdata(intf);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usbdux_private *devpriv = dev->private;
- if (!usbduxsub_tmp) {
- dev_err(&intf->dev,
- "comedi_: disconnect called with null pointer.\n");
- return;
- }
- if (usbduxsub_tmp->usbdev != udev) {
- dev_err(&intf->dev, "comedi_: BUG! called with wrong ptr!!!\n");
+ usb_set_intfdata(intf, NULL);
+
+ if (!devpriv)
return;
- }
- comedi_usb_auto_unconfig(intf);
- down(&start_stop_sem);
- down(&usbduxsub_tmp->sem);
- tidy_up(usbduxsub_tmp);
- up(&usbduxsub_tmp->sem);
- up(&start_stop_sem);
- dev_dbg(&intf->dev, "comedi_: disconnected from the usb\n");
+
+ down(&devpriv->sem);
+
+ /* force unlink all urbs */
+ usbdux_pwm_stop(dev, 1);
+ usbdux_ao_stop(dev, 1);
+ usbdux_ai_stop(dev, 1);
+
+ usbdux_free_usb_buffers(dev);
+
+ up(&devpriv->sem);
+}
+
+static struct comedi_driver usbdux_driver = {
+ .driver_name = "usbdux",
+ .module = THIS_MODULE,
+ .auto_attach = usbdux_auto_attach,
+ .detach = usbdux_detach,
+};
+
+static int usbdux_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return comedi_usb_auto_config(intf, &usbdux_driver, 0);
}
static const struct usb_device_id usbdux_usb_table[] = {
@@ -2669,13 +1821,12 @@ static const struct usb_device_id usbdux_usb_table[] = {
{ USB_DEVICE(0x13d8, 0x0002) },
{ }
};
-
MODULE_DEVICE_TABLE(usb, usbdux_usb_table);
static struct usb_driver usbdux_usb_driver = {
.name = "usbdux",
.probe = usbdux_usb_probe,
- .disconnect = usbdux_usb_disconnect,
+ .disconnect = comedi_usb_auto_unconfig,
.id_table = usbdux_usb_table,
};
module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver);
@@ -2683,4 +1834,4 @@ module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver);
MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com");
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FIRMWARE);
+MODULE_FIRMWARE(USBDUX_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 7f95af3..9707dd1 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -10,10 +10,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -40,7 +36,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -60,6 +55,7 @@
* constants for "firmware" upload and download
*/
#define FIRMWARE "usbduxfast_firmware.bin"
+#define FIRMWARE_MAX_LEN 0x2000
#define USBDUXFASTSUB_FIRMWARE 0xA0
#define VENDOR_DIR_IN 0xC0
#define VENDOR_DIR_OUT 0x40
@@ -112,7 +108,7 @@
/*
* size of the buffer for the dux commands in bytes
*/
-#define SIZEOFDUXBUFFER 256
+#define SIZEOFDUXBUF 256
/*
* number of in-URBs which receive the data: min=5
@@ -120,16 +116,6 @@
#define NUMOFINBUFFERSHIGH 10
/*
- * total number of usbduxfast devices
- */
-#define NUMUSBDUXFAST 16
-
-/*
- * analogue in subdevice
- */
-#define SUBDEV_AD 0
-
-/*
* min delay steps for more than one channel
* basically when the mux gives up ;-)
*
@@ -161,143 +147,83 @@ static const struct comedi_lrange range_usbduxfast_ai_range = {
* this is the structure which holds all the data of this driver
* one sub device just now: A/D
*/
-struct usbduxfastsub_s {
- int attached; /* is attached? */
- int probed; /* is it associated with a subdevice? */
- struct usb_device *usbdev; /* pointer to the usb-device */
- struct urb *urbIn; /* BULK-transfer handling: urb */
- int8_t *transfer_buffer;
- int16_t *insnBuffer; /* input buffer for single insn */
- int ifnum; /* interface number */
- struct usb_interface *interface; /* interface structure */
- /* comedi device for the interrupt context */
- struct comedi_device *comedidev;
+struct usbduxfast_private {
+ struct urb *urb; /* BULK-transfer handling: urb */
+ uint8_t *duxbuf;
+ int8_t *inbuf;
short int ai_cmd_running; /* asynchronous command is running */
short int ai_continous; /* continous acquisition */
long int ai_sample_count; /* number of samples to acquire */
- uint8_t *dux_commands; /* commands */
int ignore; /* counter which ignores the first
buffers */
struct semaphore sem;
};
/*
- * The pointer to the private usb-data of the driver
- * is also the private data for the comedi-device.
- * This has to be global as the usb subsystem needs
- * global variables. The other reason is that this
- * structure must be there _before_ any comedi
- * command is issued. The usb subsystem must be
- * initialised before comedi can access it.
- */
-static struct usbduxfastsub_s usbduxfastsub[NUMUSBDUXFAST];
-
-static DEFINE_SEMAPHORE(start_stop_sem);
-
-/*
* bulk transfers to usbduxfast
*/
#define SENDADCOMMANDS 0
#define SENDINITEP6 1
-static int send_dux_commands(struct usbduxfastsub_s *udfs, int cmd_type)
+static int usbduxfast_send_cmd(struct comedi_device *dev, int cmd_type)
{
- int tmp, nsent;
-
- udfs->dux_commands[0] = cmd_type;
-
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi%d: usbduxfast: dux_commands: ",
- udfs->comedidev->minor);
- for (tmp = 0; tmp < SIZEOFDUXBUFFER; tmp++)
- printk(" %02x", udfs->dux_commands[tmp]);
- printk("\n");
-#endif
-
- tmp = usb_bulk_msg(udfs->usbdev,
- usb_sndbulkpipe(udfs->usbdev, CHANNELLISTEP),
- udfs->dux_commands, SIZEOFDUXBUFFER, &nsent, 10000);
- if (tmp < 0)
- dev_err(&udfs->interface->dev,
- "could not transmit dux_commands to the usb-device, err=%d\n",
- tmp);
- return tmp;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxfast_private *devpriv = dev->private;
+ int nsent;
+ int ret;
+
+ devpriv->duxbuf[0] = cmd_type;
+
+ ret = usb_bulk_msg(usb, usb_sndbulkpipe(usb, CHANNELLISTEP),
+ devpriv->duxbuf, SIZEOFDUXBUF,
+ &nsent, 10000);
+ if (ret < 0)
+ dev_err(dev->class_dev,
+ "could not transmit command to the usb-device, err=%d\n",
+ ret);
+ return ret;
}
-/*
- * Stops the data acquision.
- * It should be safe to call this function from any context.
- */
-static int usbduxfastsub_unlink_InURBs(struct usbduxfastsub_s *udfs)
+static void usbduxfast_cmd_data(struct comedi_device *dev, int index,
+ uint8_t len, uint8_t op, uint8_t out,
+ uint8_t log)
{
- int j = 0;
- int err = 0;
+ struct usbduxfast_private *devpriv = dev->private;
- if (udfs && udfs->urbIn) {
- udfs->ai_cmd_running = 0;
- /* waits until a running transfer is over */
- usb_kill_urb(udfs->urbIn);
- j = 0;
- }
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi: usbduxfast: unlinked InURB: res=%d\n", j);
-#endif
- return err;
+ /* Set the GPIF bytes, the first byte is the command byte */
+ devpriv->duxbuf[1 + 0x00 + index] = len;
+ devpriv->duxbuf[1 + 0x08 + index] = op;
+ devpriv->duxbuf[1 + 0x10 + index] = out;
+ devpriv->duxbuf[1 + 0x18 + index] = log;
}
-/*
- * This will stop a running acquisition operation.
- * Is called from within this driver from both the
- * interrupt context and from comedi.
- */
-static int usbduxfast_ai_stop(struct usbduxfastsub_s *udfs, int do_unlink)
+static int usbduxfast_ai_stop(struct comedi_device *dev, int do_unlink)
{
- int ret = 0;
+ struct usbduxfast_private *devpriv = dev->private;
- if (!udfs) {
- pr_err("%s: udfs=NULL!\n", __func__);
- return -EFAULT;
- }
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi: usbduxfast_ai_stop\n");
-#endif
+ /* stop aquistion */
+ devpriv->ai_cmd_running = 0;
- udfs->ai_cmd_running = 0;
-
- if (do_unlink)
- /* stop aquistion */
- ret = usbduxfastsub_unlink_InURBs(udfs);
+ if (do_unlink && devpriv->urb) {
+ /* kill the running transfer */
+ usb_kill_urb(devpriv->urb);
+ }
- return ret;
+ return 0;
}
-/*
- * This will cancel a running acquisition operation.
- * This is called by comedi but never from inside the driver.
- */
static int usbduxfast_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct usbduxfastsub_s *udfs;
+ struct usbduxfast_private *devpriv = dev->private;
int ret;
- /* force unlink of all urbs */
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi: usbduxfast_ai_cancel\n");
-#endif
- udfs = dev->private;
- if (!udfs) {
- dev_err(dev->class_dev, "%s: udfs=NULL\n", __func__);
+ if (!devpriv)
return -EFAULT;
- }
- down(&udfs->sem);
- if (!udfs->probed) {
- up(&udfs->sem);
- return -ENODEV;
- }
- /* unlink */
- ret = usbduxfast_ai_stop(udfs, 1);
- up(&udfs->sem);
+
+ down(&devpriv->sem);
+ ret = usbduxfast_ai_stop(dev, 1);
+ up(&devpriv->sem);
return ret;
}
@@ -306,32 +232,17 @@ static int usbduxfast_ai_cancel(struct comedi_device *dev,
* analogue IN
* interrupt service routine
*/
-static void usbduxfastsub_ai_Irq(struct urb *urb)
+static void usbduxfast_ai_interrupt(struct urb *urb)
{
+ struct comedi_device *dev = urb->context;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxfast_private *devpriv = dev->private;
int n, err;
- struct usbduxfastsub_s *udfs;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
- /* sanity checks - is the urb there? */
- if (!urb) {
- pr_err("ao int-handler called with urb=NULL!\n");
- return;
- }
- /* the context variable points to the subdevice */
- this_comedidev = urb->context;
- if (!this_comedidev) {
- pr_err("urb context is a NULL pointer!\n");
- return;
- }
- /* the private structure of the subdevice is usbduxfastsub_s */
- udfs = this_comedidev->private;
- if (!udfs) {
- pr_err("private of comedi subdev is a NULL pointer!\n");
- return;
- }
/* are we running a command? */
- if (unlikely(!udfs->ai_cmd_running)) {
+ if (unlikely(!devpriv->ai_cmd_running)) {
/*
* not running a command
* do not continue execution if no asynchronous command
@@ -340,13 +251,6 @@ static void usbduxfastsub_ai_Irq(struct urb *urb)
return;
}
- if (unlikely(!udfs->attached)) {
- /* no comedi device there */
- return;
- }
- /* subdevice which is the AD converter */
- s = &this_comedidev->subdevices[SUBDEV_AD];
-
/* first we test if something unusual has just happened */
switch (urb->status) {
case 0:
@@ -361,189 +265,93 @@ static void usbduxfastsub_ai_Irq(struct urb *urb)
case -ESHUTDOWN:
case -ECONNABORTED:
/* tell this comedi */
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(udfs->comedidev, s);
+ async->events |= COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
+ comedi_event(dev, s);
/* stop the transfer w/o unlink */
- usbduxfast_ai_stop(udfs, 0);
+ usbduxfast_ai_stop(dev, 0);
return;
default:
pr_err("non-zero urb status received in ai intr context: %d\n",
urb->status);
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(udfs->comedidev, s);
- usbduxfast_ai_stop(udfs, 0);
+ async->events |= COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
+ comedi_event(dev, s);
+ usbduxfast_ai_stop(dev, 0);
return;
}
- if (!udfs->ignore) {
- if (!udfs->ai_continous) {
+ if (!devpriv->ignore) {
+ if (!devpriv->ai_continous) {
/* not continuous, fixed number of samples */
n = urb->actual_length / sizeof(uint16_t);
- if (unlikely(udfs->ai_sample_count < n)) {
- /*
- * we have send only a fraction of the bytes
- * received
- */
+ if (unlikely(devpriv->ai_sample_count < n)) {
+ unsigned int num_bytes;
+
+ /* partial sample received */
+ num_bytes = devpriv->ai_sample_count *
+ sizeof(uint16_t);
cfc_write_array_to_buffer(s,
urb->transfer_buffer,
- udfs->ai_sample_count
- * sizeof(uint16_t));
- usbduxfast_ai_stop(udfs, 0);
+ num_bytes);
+ usbduxfast_ai_stop(dev, 0);
/* tell comedi that the acquistion is over */
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(udfs->comedidev, s);
+ async->events |= COMEDI_CB_EOA;
+ comedi_event(dev, s);
return;
}
- udfs->ai_sample_count -= n;
+ devpriv->ai_sample_count -= n;
}
/* write the full buffer to comedi */
err = cfc_write_array_to_buffer(s, urb->transfer_buffer,
urb->actual_length);
if (unlikely(err == 0)) {
/* buffer overflow */
- usbduxfast_ai_stop(udfs, 0);
+ usbduxfast_ai_stop(dev, 0);
return;
}
/* tell comedi that data is there */
- comedi_event(udfs->comedidev, s);
-
+ comedi_event(dev, s);
} else {
/* ignore this packet */
- udfs->ignore--;
+ devpriv->ignore--;
}
/*
* command is still running
* resubmit urb for BULK transfer
*/
- urb->dev = udfs->usbdev;
+ urb->dev = usb;
urb->status = 0;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- dev_err(&urb->dev->dev,
+ dev_err(dev->class_dev,
"urb resubm failed: %d", err);
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(udfs->comedidev, s);
- usbduxfast_ai_stop(udfs, 0);
- }
-}
-
-static int usbduxfastsub_start(struct usbduxfastsub_s *udfs)
-{
- int ret;
- unsigned char *local_transfer_buffer;
-
- local_transfer_buffer = kmalloc(1, GFP_KERNEL);
- if (!local_transfer_buffer)
- return -ENOMEM;
-
- /* 7f92 to zero */
- *local_transfer_buffer = 0;
- /* bRequest, "Firmware" */
- ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0),
- USBDUXFASTSUB_FIRMWARE,
- VENDOR_DIR_OUT, /* bmRequestType */
- USBDUXFASTSUB_CPUCS, /* Value */
- 0x0000, /* Index */
- /* address of the transfer buffer */
- local_transfer_buffer,
- 1, /* Length */
- EZTIMEOUT); /* Timeout */
- if (ret < 0)
- dev_err(&udfs->interface->dev,
- "control msg failed (start)\n");
-
- kfree(local_transfer_buffer);
- return ret;
-}
-
-static int usbduxfastsub_stop(struct usbduxfastsub_s *udfs)
-{
- int ret;
- unsigned char *local_transfer_buffer;
-
- local_transfer_buffer = kmalloc(1, GFP_KERNEL);
- if (!local_transfer_buffer)
- return -ENOMEM;
-
- /* 7f92 to one */
- *local_transfer_buffer = 1;
- /* bRequest, "Firmware" */
- ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0),
- USBDUXFASTSUB_FIRMWARE,
- VENDOR_DIR_OUT, /* bmRequestType */
- USBDUXFASTSUB_CPUCS, /* Value */
- 0x0000, /* Index */
- local_transfer_buffer, 1, /* Length */
- EZTIMEOUT); /* Timeout */
- if (ret < 0)
- dev_err(&udfs->interface->dev,
- "control msg failed (stop)\n");
-
- kfree(local_transfer_buffer);
- return ret;
-}
-
-static int usbduxfastsub_upload(struct usbduxfastsub_s *udfs,
- unsigned char *local_transfer_buffer,
- unsigned int startAddr, unsigned int len)
-{
- int ret;
-
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi: usbduxfast: uploading %d bytes", len);
- printk(KERN_DEBUG " to addr %d, first byte=%d.\n",
- startAddr, local_transfer_buffer[0]);
-#endif
- /* brequest, firmware */
- ret = usb_control_msg(udfs->usbdev, usb_sndctrlpipe(udfs->usbdev, 0),
- USBDUXFASTSUB_FIRMWARE,
- VENDOR_DIR_OUT, /* bmRequestType */
- startAddr, /* value */
- 0x0000, /* index */
- /* our local safe buffer */
- local_transfer_buffer,
- len, /* length */
- EZTIMEOUT); /* timeout */
-
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi_: usbduxfast: result=%d\n", ret);
-#endif
-
- if (ret < 0) {
- dev_err(&udfs->interface->dev, "uppload failed\n");
- return ret;
+ async->events |= COMEDI_CB_EOA;
+ async->events |= COMEDI_CB_ERROR;
+ comedi_event(dev, s);
+ usbduxfast_ai_stop(dev, 0);
}
-
- return 0;
}
-static int usbduxfastsub_submit_InURBs(struct usbduxfastsub_s *udfs)
+static int usbduxfast_submit_urb(struct comedi_device *dev)
{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxfast_private *devpriv = dev->private;
int ret;
- if (!udfs)
+ if (!devpriv)
return -EFAULT;
- usb_fill_bulk_urb(udfs->urbIn, udfs->usbdev,
- usb_rcvbulkpipe(udfs->usbdev, BULKINEP),
- udfs->transfer_buffer,
- SIZEINBUF, usbduxfastsub_ai_Irq, udfs->comedidev);
-
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi%d: usbduxfast: submitting in-urb: "
- "0x%p,0x%p\n", udfs->comedidev->minor, udfs->urbIn->context,
- udfs->urbIn->dev);
-#endif
- ret = usb_submit_urb(udfs->urbIn, GFP_ATOMIC);
+ usb_fill_bulk_urb(devpriv->urb, usb, usb_rcvbulkpipe(usb, BULKINEP),
+ devpriv->inbuf, SIZEINBUF,
+ usbduxfast_ai_interrupt, dev);
+
+ ret = usb_submit_urb(devpriv->urb, GFP_ATOMIC);
if (ret) {
- dev_err(&udfs->interface->dev,
- "ai: usb_submit_urb error %d\n", ret);
+ dev_err(dev->class_dev, "usb_submit_urb error %d\n", ret);
return ret;
}
return 0;
@@ -553,13 +361,9 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- struct usbduxfastsub_s *udfs = dev->private;
int err = 0;
long int steps, tmp;
- int minSamplPer;
-
- if (!udfs->probed)
- return -ENODEV;
+ int min_sample_period;
/* Step 1 : check if triggers are trivially valid */
@@ -601,14 +405,14 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
if (cmd->chanlist_len == 1)
- minSamplPer = 1;
+ min_sample_period = 1;
else
- minSamplPer = MIN_SAMPLING_PERIOD;
+ min_sample_period = MIN_SAMPLING_PERIOD;
if (cmd->convert_src == TRIG_TIMER) {
steps = cmd->convert_arg * 30;
- if (steps < (minSamplPer * 1000))
- steps = minSamplPer * 1000;
+ if (steps < (min_sample_period * 1000))
+ steps = min_sample_period * 1000;
if (steps > (MAX_SAMPLING_PERIOD * 1000))
steps = MAX_SAMPLING_PERIOD * 1000;
@@ -650,80 +454,53 @@ static int usbduxfast_ai_inttrig(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int trignum)
{
+ struct usbduxfast_private *devpriv = dev->private;
int ret;
- struct usbduxfastsub_s *udfs = dev->private;
- if (!udfs)
+ if (!devpriv)
return -EFAULT;
- down(&udfs->sem);
- if (!udfs->probed) {
- up(&udfs->sem);
- return -ENODEV;
- }
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi%d: usbduxfast_ai_inttrig\n", dev->minor);
-#endif
+ down(&devpriv->sem);
if (trignum != 0) {
- dev_err(dev->class_dev, "%s: invalid trignum\n", __func__);
- up(&udfs->sem);
+ dev_err(dev->class_dev, "invalid trignum\n");
+ up(&devpriv->sem);
return -EINVAL;
}
- if (!udfs->ai_cmd_running) {
- udfs->ai_cmd_running = 1;
- ret = usbduxfastsub_submit_InURBs(udfs);
+ if (!devpriv->ai_cmd_running) {
+ devpriv->ai_cmd_running = 1;
+ ret = usbduxfast_submit_urb(dev);
if (ret < 0) {
- dev_err(dev->class_dev,
- "%s: urbSubmit: err=%d\n", __func__, ret);
- udfs->ai_cmd_running = 0;
- up(&udfs->sem);
+ dev_err(dev->class_dev, "urbSubmit: err=%d\n", ret);
+ devpriv->ai_cmd_running = 0;
+ up(&devpriv->sem);
return ret;
}
s->async->inttrig = NULL;
} else {
- dev_err(dev->class_dev,
- "ai_inttrig but acqu is already running\n");
+ dev_err(dev->class_dev, "ai is already running\n");
}
- up(&udfs->sem);
+ up(&devpriv->sem);
return 1;
}
-/*
- * offsets for the GPIF bytes
- * the first byte is the command byte
- */
-#define LENBASE (1+0x00)
-#define OPBASE (1+0x08)
-#define OUTBASE (1+0x10)
-#define LOGBASE (1+0x18)
-
static int usbduxfast_ai_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
+ struct usbduxfast_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int chan, gain, rngmask = 0xff;
int i, j, ret;
- struct usbduxfastsub_s *udfs;
int result;
long steps, steps_tmp;
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi%d: usbduxfast_ai_cmd\n", dev->minor);
-#endif
- udfs = dev->private;
- if (!udfs)
+ if (!devpriv)
return -EFAULT;
- down(&udfs->sem);
- if (!udfs->probed) {
- up(&udfs->sem);
- return -ENODEV;
- }
- if (udfs->ai_cmd_running) {
- dev_err(dev->class_dev,
- "ai_cmd not possible. Another ai_cmd is running.\n");
- up(&udfs->sem);
+ down(&devpriv->sem);
+ if (devpriv->ai_cmd_running) {
+ dev_err(dev->class_dev, "ai_cmd not possible\n");
+ up(&devpriv->sem);
return -EBUSY;
}
/* set current channel of the running acquisition to zero */
@@ -733,7 +510,7 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
* ignore the first buffers from the device if there
* is an error condition
*/
- udfs->ignore = PACKETS_TO_IGNORE;
+ devpriv->ignore = PACKETS_TO_IGNORE;
if (cmd->chanlist_len > 0) {
gain = CR_RANGE(cmd->chanlist[0]);
@@ -741,20 +518,19 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
chan = CR_CHAN(cmd->chanlist[i]);
if (chan != i) {
dev_err(dev->class_dev,
- "cmd is accepting only consecutive channels.\n");
- up(&udfs->sem);
+ "channels are not consecutive\n");
+ up(&devpriv->sem);
return -EINVAL;
}
if ((gain != CR_RANGE(cmd->chanlist[i]))
&& (cmd->chanlist_len > 3)) {
dev_err(dev->class_dev,
- "the gain must be the same for all channels.\n");
- up(&udfs->sem);
+ "gain must be the same for all channels\n");
+ up(&devpriv->sem);
return -EINVAL;
}
if (i >= NUMCHANNELS) {
- dev_err(dev->class_dev,
- "channel list too long\n");
+ dev_err(dev->class_dev, "chanlist too long\n");
break;
}
}
@@ -762,8 +538,8 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
steps = 0;
if (cmd->scan_begin_src == TRIG_TIMER) {
dev_err(dev->class_dev,
- "scan_begin_src==TRIG_TIMER not valid.\n");
- up(&udfs->sem);
+ "scan_begin_src==TRIG_TIMER not valid\n");
+ up(&devpriv->sem);
return -EINVAL;
}
if (cmd->convert_src == TRIG_TIMER)
@@ -771,27 +547,23 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
if ((steps < MIN_SAMPLING_PERIOD) && (cmd->chanlist_len != 1)) {
dev_err(dev->class_dev,
- "ai_cmd: steps=%ld, scan_begin_arg=%d. Not properly tested by cmdtest?\n",
+ "steps=%ld, scan_begin_arg=%d. Not properly tested by cmdtest?\n",
steps, cmd->scan_begin_arg);
- up(&udfs->sem);
+ up(&devpriv->sem);
return -EINVAL;
}
if (steps > MAX_SAMPLING_PERIOD) {
- dev_err(dev->class_dev, "ai_cmd: sampling rate too low.\n");
- up(&udfs->sem);
+ dev_err(dev->class_dev, "sampling rate too low\n");
+ up(&devpriv->sem);
return -EINVAL;
}
if ((cmd->start_src == TRIG_EXT) && (cmd->chanlist_len != 1)
&& (cmd->chanlist_len != 16)) {
dev_err(dev->class_dev,
- "ai_cmd: TRIG_EXT only with 1 or 16 channels possible.\n");
- up(&udfs->sem);
+ "TRIG_EXT only with 1 or 16 channels possible\n");
+ up(&devpriv->sem);
return -EINVAL;
}
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi%d: usbduxfast: steps=%ld, convert_arg=%u\n",
- dev->minor, steps, cmd->convert_arg);
-#endif
switch (cmd->chanlist_len) {
case 1:
@@ -812,17 +584,11 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
/* we loop here until ready has been set */
if (cmd->start_src == TRIG_EXT) {
/* branch back to state 0 */
- udfs->dux_commands[LENBASE + 0] = 0x01;
/* deceision state w/o data */
- udfs->dux_commands[OPBASE + 0] = 0x01;
- udfs->dux_commands[OUTBASE + 0] = 0xFF & rngmask;
/* RDY0 = 0 */
- udfs->dux_commands[LOGBASE + 0] = 0x00;
+ usbduxfast_cmd_data(dev, 0, 0x01, 0x01, rngmask, 0x00);
} else { /* we just proceed to state 1 */
- udfs->dux_commands[LENBASE + 0] = 1;
- udfs->dux_commands[OPBASE + 0] = 0;
- udfs->dux_commands[OUTBASE + 0] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 0] = 0;
+ usbduxfast_cmd_data(dev, 0, 0x01, 0x00, rngmask, 0x00);
}
if (steps < MIN_SAMPLING_PERIOD) {
@@ -835,33 +601,25 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
*/
/* branch back to state 1 */
- udfs->dux_commands[LENBASE + 1] = 0x89;
/* deceision state with data */
- udfs->dux_commands[OPBASE + 1] = 0x03;
- udfs->dux_commands[OUTBASE + 1] =
- 0xFF & rngmask;
/* doesn't matter */
- udfs->dux_commands[LOGBASE + 1] = 0xFF;
+ usbduxfast_cmd_data(dev, 1,
+ 0x89, 0x03, rngmask, 0xff);
} else {
/*
* we loop through two states: data and delay
* max rate is 15MHz
*/
- udfs->dux_commands[LENBASE + 1] = steps - 1;
/* data */
- udfs->dux_commands[OPBASE + 1] = 0x02;
- udfs->dux_commands[OUTBASE + 1] =
- 0xFF & rngmask;
/* doesn't matter */
- udfs->dux_commands[LOGBASE + 1] = 0;
+ usbduxfast_cmd_data(dev, 1, steps - 1,
+ 0x02, rngmask, 0x00);
+
/* branch back to state 1 */
- udfs->dux_commands[LENBASE + 2] = 0x09;
/* deceision state w/o data */
- udfs->dux_commands[OPBASE + 2] = 0x01;
- udfs->dux_commands[OUTBASE + 2] =
- 0xFF & rngmask;
/* doesn't matter */
- udfs->dux_commands[LOGBASE + 2] = 0xFF;
+ usbduxfast_cmd_data(dev, 2,
+ 0x09, 0x01, rngmask, 0xff);
}
} else {
/*
@@ -873,26 +631,20 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
steps = steps - 1;
/* do the first part of the delay */
- udfs->dux_commands[LENBASE + 1] = steps / 2;
- udfs->dux_commands[OPBASE + 1] = 0;
- udfs->dux_commands[OUTBASE + 1] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 1] = 0;
+ usbduxfast_cmd_data(dev, 1,
+ steps / 2, 0x00, rngmask, 0x00);
/* and the second part */
- udfs->dux_commands[LENBASE + 2] = steps - steps / 2;
- udfs->dux_commands[OPBASE + 2] = 0;
- udfs->dux_commands[OUTBASE + 2] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 2] = 0;
+ usbduxfast_cmd_data(dev, 2, steps - steps / 2,
+ 0x00, rngmask, 0x00);
/* get the data and branch back */
/* branch back to state 1 */
- udfs->dux_commands[LENBASE + 3] = 0x09;
/* deceision state w data */
- udfs->dux_commands[OPBASE + 3] = 0x03;
- udfs->dux_commands[OUTBASE + 3] = 0xFF & rngmask;
/* doesn't matter */
- udfs->dux_commands[LOGBASE + 3] = 0xFF;
+ usbduxfast_cmd_data(dev, 3,
+ 0x09, 0x03, rngmask, 0xff);
}
break;
@@ -907,11 +659,8 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
else
rngmask = 0xff;
- udfs->dux_commands[LENBASE + 0] = 1;
/* data */
- udfs->dux_commands[OPBASE + 0] = 0x02;
- udfs->dux_commands[OUTBASE + 0] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 0] = 0;
+ usbduxfast_cmd_data(dev, 0, 0x01, 0x02, rngmask, 0x00);
/* we have 1 state with duration 1: state 0 */
steps_tmp = steps - 1;
@@ -922,23 +671,16 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
rngmask = 0xff;
/* do the first part of the delay */
- udfs->dux_commands[LENBASE + 1] = steps_tmp / 2;
- udfs->dux_commands[OPBASE + 1] = 0;
/* count */
- udfs->dux_commands[OUTBASE + 1] = 0xFE & rngmask;
- udfs->dux_commands[LOGBASE + 1] = 0;
+ usbduxfast_cmd_data(dev, 1, steps_tmp / 2,
+ 0x00, 0xfe & rngmask, 0x00);
/* and the second part */
- udfs->dux_commands[LENBASE + 2] = steps_tmp - steps_tmp / 2;
- udfs->dux_commands[OPBASE + 2] = 0;
- udfs->dux_commands[OUTBASE + 2] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 2] = 0;
+ usbduxfast_cmd_data(dev, 2, steps_tmp - steps_tmp / 2,
+ 0x00, rngmask, 0x00);
- udfs->dux_commands[LENBASE + 3] = 1;
/* data */
- udfs->dux_commands[OPBASE + 3] = 0x02;
- udfs->dux_commands[OUTBASE + 3] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 3] = 0;
+ usbduxfast_cmd_data(dev, 3, 0x01, 0x02, rngmask, 0x00);
/*
* we have 2 states with duration 1: step 6 and
@@ -952,22 +694,15 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
rngmask = 0xff;
/* do the first part of the delay */
- udfs->dux_commands[LENBASE + 4] = steps_tmp / 2;
- udfs->dux_commands[OPBASE + 4] = 0;
/* reset */
- udfs->dux_commands[OUTBASE + 4] = (0xFF - 0x02) & rngmask;
- udfs->dux_commands[LOGBASE + 4] = 0;
+ usbduxfast_cmd_data(dev, 4, steps_tmp / 2,
+ 0x00, (0xff - 0x02) & rngmask, 0x00);
/* and the second part */
- udfs->dux_commands[LENBASE + 5] = steps_tmp - steps_tmp / 2;
- udfs->dux_commands[OPBASE + 5] = 0;
- udfs->dux_commands[OUTBASE + 5] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 5] = 0;
-
- udfs->dux_commands[LENBASE + 6] = 1;
- udfs->dux_commands[OPBASE + 6] = 0;
- udfs->dux_commands[OUTBASE + 6] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 6] = 0;
+ usbduxfast_cmd_data(dev, 5, steps_tmp - steps_tmp / 2,
+ 0x00, rngmask, 0x00);
+
+ usbduxfast_cmd_data(dev, 6, 0x01, 0x00, rngmask, 0x00);
break;
case 3:
@@ -975,6 +710,8 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
* three channels
*/
for (j = 0; j < 1; j++) {
+ int index = j * 2;
+
if (CR_RANGE(cmd->chanlist[j]) > 0)
rngmask = 0xff - 0x04;
else
@@ -983,12 +720,10 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
* commit data to the FIFO and do the first part
* of the delay
*/
- udfs->dux_commands[LENBASE + j * 2] = steps / 2;
/* data */
- udfs->dux_commands[OPBASE + j * 2] = 0x02;
/* no change */
- udfs->dux_commands[OUTBASE + j * 2] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + j * 2] = 0;
+ usbduxfast_cmd_data(dev, index, steps / 2,
+ 0x02, rngmask, 0x00);
if (CR_RANGE(cmd->chanlist[j + 1]) > 0)
rngmask = 0xff - 0x04;
@@ -996,25 +731,19 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
rngmask = 0xff;
/* do the second part of the delay */
- udfs->dux_commands[LENBASE + j * 2 + 1] =
- steps - steps / 2;
/* no data */
- udfs->dux_commands[OPBASE + j * 2 + 1] = 0;
/* count */
- udfs->dux_commands[OUTBASE + j * 2 + 1] =
- 0xFE & rngmask;
- udfs->dux_commands[LOGBASE + j * 2 + 1] = 0;
+ usbduxfast_cmd_data(dev, index + 1, steps - steps / 2,
+ 0x00, 0xfe & rngmask, 0x00);
}
/* 2 steps with duration 1: the idele step and step 6: */
steps_tmp = steps - 2;
/* commit data to the FIFO and do the first part of the delay */
- udfs->dux_commands[LENBASE + 4] = steps_tmp / 2;
/* data */
- udfs->dux_commands[OPBASE + 4] = 0x02;
- udfs->dux_commands[OUTBASE + 4] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 4] = 0;
+ usbduxfast_cmd_data(dev, 4, steps_tmp / 2,
+ 0x02, rngmask, 0x00);
if (CR_RANGE(cmd->chanlist[0]) > 0)
rngmask = 0xff - 0x04;
@@ -1022,17 +751,12 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
rngmask = 0xff;
/* do the second part of the delay */
- udfs->dux_commands[LENBASE + 5] = steps_tmp - steps_tmp / 2;
/* no data */
- udfs->dux_commands[OPBASE + 5] = 0;
/* reset */
- udfs->dux_commands[OUTBASE + 5] = (0xFF - 0x02) & rngmask;
- udfs->dux_commands[LOGBASE + 5] = 0;
+ usbduxfast_cmd_data(dev, 5, steps_tmp - steps_tmp / 2,
+ 0x00, (0xff - 0x02) & rngmask, 0x00);
- udfs->dux_commands[LENBASE + 6] = 1;
- udfs->dux_commands[OPBASE + 6] = 0;
- udfs->dux_commands[OUTBASE + 6] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 6] = 0;
+ usbduxfast_cmd_data(dev, 6, 0x01, 0x00, rngmask, 0x00);
case 16:
if (CR_RANGE(cmd->chanlist[0]) > 0)
@@ -1046,101 +770,79 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
*/
/* branch back to state 0 */
- udfs->dux_commands[LENBASE + 0] = 0x01;
/* deceision state w/o data */
- udfs->dux_commands[OPBASE + 0] = 0x01;
/* reset */
- udfs->dux_commands[OUTBASE + 0] =
- (0xFF - 0x02) & rngmask;
/* RDY0 = 0 */
- udfs->dux_commands[LOGBASE + 0] = 0x00;
+ usbduxfast_cmd_data(dev, 0, 0x01, 0x01,
+ (0xff - 0x02) & rngmask, 0x00);
} else {
/*
* we just proceed to state 1
*/
/* 30us reset pulse */
- udfs->dux_commands[LENBASE + 0] = 255;
- udfs->dux_commands[OPBASE + 0] = 0;
/* reset */
- udfs->dux_commands[OUTBASE + 0] =
- (0xFF - 0x02) & rngmask;
- udfs->dux_commands[LOGBASE + 0] = 0;
+ usbduxfast_cmd_data(dev, 0, 0xff, 0x00,
+ (0xff - 0x02) & rngmask, 0x00);
}
/* commit data to the FIFO */
- udfs->dux_commands[LENBASE + 1] = 1;
/* data */
- udfs->dux_commands[OPBASE + 1] = 0x02;
- udfs->dux_commands[OUTBASE + 1] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 1] = 0;
+ usbduxfast_cmd_data(dev, 1, 0x01, 0x02, rngmask, 0x00);
/* we have 2 states with duration 1 */
steps = steps - 2;
/* do the first part of the delay */
- udfs->dux_commands[LENBASE + 2] = steps / 2;
- udfs->dux_commands[OPBASE + 2] = 0;
- udfs->dux_commands[OUTBASE + 2] = 0xFE & rngmask;
- udfs->dux_commands[LOGBASE + 2] = 0;
+ usbduxfast_cmd_data(dev, 2, steps / 2,
+ 0x00, 0xfe & rngmask, 0x00);
/* and the second part */
- udfs->dux_commands[LENBASE + 3] = steps - steps / 2;
- udfs->dux_commands[OPBASE + 3] = 0;
- udfs->dux_commands[OUTBASE + 3] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 3] = 0;
+ usbduxfast_cmd_data(dev, 3, steps - steps / 2,
+ 0x00, rngmask, 0x00);
/* branch back to state 1 */
- udfs->dux_commands[LENBASE + 4] = 0x09;
/* deceision state w/o data */
- udfs->dux_commands[OPBASE + 4] = 0x01;
- udfs->dux_commands[OUTBASE + 4] = 0xFF & rngmask;
/* doesn't matter */
- udfs->dux_commands[LOGBASE + 4] = 0xFF;
+ usbduxfast_cmd_data(dev, 4, 0x09, 0x01, rngmask, 0xff);
break;
default:
dev_err(dev->class_dev, "unsupported combination of channels\n");
- up(&udfs->sem);
+ up(&devpriv->sem);
return -EFAULT;
}
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi %d: sending commands to the usb device\n",
- dev->minor);
-#endif
/* 0 means that the AD commands are sent */
- result = send_dux_commands(udfs, SENDADCOMMANDS);
+ result = usbduxfast_send_cmd(dev, SENDADCOMMANDS);
if (result < 0) {
- dev_err(dev->class_dev,
- "adc command could not be submitted. Aborting...\n");
- up(&udfs->sem);
+ up(&devpriv->sem);
return result;
}
if (cmd->stop_src == TRIG_COUNT) {
- udfs->ai_sample_count = cmd->stop_arg * cmd->scan_end_arg;
- if (udfs->ai_sample_count < 1) {
+ devpriv->ai_sample_count = cmd->stop_arg * cmd->scan_end_arg;
+ if (devpriv->ai_sample_count < 1) {
dev_err(dev->class_dev,
- "(cmd->stop_arg)*(cmd->scan_end_arg)<1, aborting.\n");
- up(&udfs->sem);
+ "(cmd->stop_arg)*(cmd->scan_end_arg)<1, aborting\n");
+ up(&devpriv->sem);
return -EFAULT;
}
- udfs->ai_continous = 0;
+ devpriv->ai_continous = 0;
} else {
/* continous acquisition */
- udfs->ai_continous = 1;
- udfs->ai_sample_count = 0;
+ devpriv->ai_continous = 1;
+ devpriv->ai_sample_count = 0;
}
if ((cmd->start_src == TRIG_NOW) || (cmd->start_src == TRIG_EXT)) {
/* enable this acquisition operation */
- udfs->ai_cmd_running = 1;
- ret = usbduxfastsub_submit_InURBs(udfs);
+ devpriv->ai_cmd_running = 1;
+ ret = usbduxfast_submit_urb(dev);
if (ret < 0) {
- udfs->ai_cmd_running = 0;
+ devpriv->ai_cmd_running = 0;
/* fixme: unlink here?? */
- up(&udfs->sem);
+ up(&devpriv->sem);
return ret;
}
s->async->inttrig = NULL;
@@ -1152,7 +854,7 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
*/
s->async->inttrig = usbduxfast_ai_inttrig;
}
- up(&udfs->sem);
+ up(&devpriv->sem);
return 0;
}
@@ -1162,490 +864,282 @@ static int usbduxfast_ai_cmd(struct comedi_device *dev,
*/
static int usbduxfast_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxfast_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ uint8_t rngmask = range ? (0xff - 0x04) : 0xff;
int i, j, n, actual_length;
- int chan, range, rngmask;
- int err;
- struct usbduxfastsub_s *udfs;
+ int ret;
- udfs = dev->private;
- if (!udfs) {
- dev_err(dev->class_dev, "%s: no usb dev.\n", __func__);
- return -ENODEV;
- }
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi%d: ai_insn_read, insn->n=%d, "
- "insn->subdev=%d\n", dev->minor, insn->n, insn->subdev);
-#endif
- down(&udfs->sem);
- if (!udfs->probed) {
- up(&udfs->sem);
- return -ENODEV;
- }
- if (udfs->ai_cmd_running) {
+ down(&devpriv->sem);
+
+ if (devpriv->ai_cmd_running) {
dev_err(dev->class_dev,
- "ai_insn_read not possible. Async Command is running.\n");
- up(&udfs->sem);
+ "ai_insn_read not possible, async cmd is running\n");
+ up(&devpriv->sem);
return -EBUSY;
}
- /* sample one channel */
- chan = CR_CHAN(insn->chanspec);
- range = CR_RANGE(insn->chanspec);
- /* set command for the first channel */
- if (range > 0)
- rngmask = 0xff - 0x04;
- else
- rngmask = 0xff;
+ /* set command for the first channel */
/* commit data to the FIFO */
- udfs->dux_commands[LENBASE + 0] = 1;
/* data */
- udfs->dux_commands[OPBASE + 0] = 0x02;
- udfs->dux_commands[OUTBASE + 0] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 0] = 0;
+ usbduxfast_cmd_data(dev, 0, 0x01, 0x02, rngmask, 0x00);
/* do the first part of the delay */
- udfs->dux_commands[LENBASE + 1] = 12;
- udfs->dux_commands[OPBASE + 1] = 0;
- udfs->dux_commands[OUTBASE + 1] = 0xFE & rngmask;
- udfs->dux_commands[LOGBASE + 1] = 0;
-
- udfs->dux_commands[LENBASE + 2] = 1;
- udfs->dux_commands[OPBASE + 2] = 0;
- udfs->dux_commands[OUTBASE + 2] = 0xFE & rngmask;
- udfs->dux_commands[LOGBASE + 2] = 0;
-
- udfs->dux_commands[LENBASE + 3] = 1;
- udfs->dux_commands[OPBASE + 3] = 0;
- udfs->dux_commands[OUTBASE + 3] = 0xFE & rngmask;
- udfs->dux_commands[LOGBASE + 3] = 0;
-
- udfs->dux_commands[LENBASE + 4] = 1;
- udfs->dux_commands[OPBASE + 4] = 0;
- udfs->dux_commands[OUTBASE + 4] = 0xFE & rngmask;
- udfs->dux_commands[LOGBASE + 4] = 0;
+ usbduxfast_cmd_data(dev, 1, 0x0c, 0x00, 0xfe & rngmask, 0x00);
+ usbduxfast_cmd_data(dev, 2, 0x01, 0x00, 0xfe & rngmask, 0x00);
+ usbduxfast_cmd_data(dev, 3, 0x01, 0x00, 0xfe & rngmask, 0x00);
+ usbduxfast_cmd_data(dev, 4, 0x01, 0x00, 0xfe & rngmask, 0x00);
/* second part */
- udfs->dux_commands[LENBASE + 5] = 12;
- udfs->dux_commands[OPBASE + 5] = 0;
- udfs->dux_commands[OUTBASE + 5] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 5] = 0;
-
- udfs->dux_commands[LENBASE + 6] = 1;
- udfs->dux_commands[OPBASE + 6] = 0;
- udfs->dux_commands[OUTBASE + 6] = 0xFF & rngmask;
- udfs->dux_commands[LOGBASE + 0] = 0;
-
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi %d: sending commands to the usb device\n",
- dev->minor);
-#endif
- /* 0 means that the AD commands are sent */
- err = send_dux_commands(udfs, SENDADCOMMANDS);
- if (err < 0) {
- dev_err(dev->class_dev,
- "adc command could not be submitted. Aborting...\n");
- up(&udfs->sem);
- return err;
+ usbduxfast_cmd_data(dev, 5, 0x0c, 0x00, rngmask, 0x00);
+ usbduxfast_cmd_data(dev, 6, 0x01, 0x00, rngmask, 0x00);
+
+ ret = usbduxfast_send_cmd(dev, SENDADCOMMANDS);
+ if (ret < 0) {
+ up(&devpriv->sem);
+ return ret;
}
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi%d: usbduxfast: submitting in-urb: "
- "0x%p,0x%p\n", udfs->comedidev->minor, udfs->urbIn->context,
- udfs->urbIn->dev);
-#endif
+
for (i = 0; i < PACKETS_TO_IGNORE; i++) {
- err = usb_bulk_msg(udfs->usbdev,
- usb_rcvbulkpipe(udfs->usbdev, BULKINEP),
- udfs->transfer_buffer, SIZEINBUF,
+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, BULKINEP),
+ devpriv->inbuf, SIZEINBUF,
&actual_length, 10000);
- if (err < 0) {
- dev_err(dev->class_dev, "insn timeout. No data.\n");
- up(&udfs->sem);
- return err;
+ if (ret < 0) {
+ dev_err(dev->class_dev, "insn timeout, no data\n");
+ up(&devpriv->sem);
+ return ret;
}
}
- /* data points */
+
for (i = 0; i < insn->n;) {
- err = usb_bulk_msg(udfs->usbdev,
- usb_rcvbulkpipe(udfs->usbdev, BULKINEP),
- udfs->transfer_buffer, SIZEINBUF,
+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, BULKINEP),
+ devpriv->inbuf, SIZEINBUF,
&actual_length, 10000);
- if (err < 0) {
- dev_err(dev->class_dev, "insn data error: %d\n", err);
- up(&udfs->sem);
- return err;
+ if (ret < 0) {
+ dev_err(dev->class_dev, "insn data error: %d\n", ret);
+ up(&devpriv->sem);
+ return ret;
}
n = actual_length / sizeof(uint16_t);
if ((n % 16) != 0) {
- dev_err(dev->class_dev, "insn data packet corrupted.\n");
- up(&udfs->sem);
+ dev_err(dev->class_dev, "insn data packet corrupted\n");
+ up(&devpriv->sem);
return -EINVAL;
}
for (j = chan; (j < n) && (i < insn->n); j = j + 16) {
- data[i] = ((uint16_t *) (udfs->transfer_buffer))[j];
+ data[i] = ((uint16_t *) (devpriv->inbuf))[j];
i++;
}
}
- up(&udfs->sem);
- return i;
-}
-
-#define FIRMWARE_MAX_LEN 0x2000
-
-static int firmwareUpload(struct usbduxfastsub_s *usbduxfastsub,
- const u8 *firmwareBinary, int sizeFirmware)
-{
- int ret;
- uint8_t *fwBuf;
-
- if (!firmwareBinary)
- return 0;
-
- if (sizeFirmware > FIRMWARE_MAX_LEN) {
- dev_err(&usbduxfastsub->interface->dev,
- "comedi_: usbduxfast firmware binary it too large for FX2.\n");
- return -ENOMEM;
- }
-
- /* we generate a local buffer for the firmware */
- fwBuf = kmemdup(firmwareBinary, sizeFirmware, GFP_KERNEL);
- if (!fwBuf) {
- dev_err(&usbduxfastsub->interface->dev,
- "comedi_: mem alloc for firmware failed\n");
- return -ENOMEM;
- }
-
- ret = usbduxfastsub_stop(usbduxfastsub);
- if (ret < 0) {
- dev_err(&usbduxfastsub->interface->dev,
- "comedi_: can not stop firmware\n");
- kfree(fwBuf);
- return ret;
- }
-
- ret = usbduxfastsub_upload(usbduxfastsub, fwBuf, 0, sizeFirmware);
- if (ret < 0) {
- dev_err(&usbduxfastsub->interface->dev,
- "comedi_: firmware upload failed\n");
- kfree(fwBuf);
- return ret;
- }
- ret = usbduxfastsub_start(usbduxfastsub);
- if (ret < 0) {
- dev_err(&usbduxfastsub->interface->dev,
- "comedi_: can not start firmware\n");
- kfree(fwBuf);
- return ret;
- }
- kfree(fwBuf);
- return 0;
-}
-
-static void tidy_up(struct usbduxfastsub_s *udfs)
-{
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi_: usbduxfast: tiding up\n");
-#endif
-
- if (!udfs)
- return;
- /* shows the usb subsystem that the driver is down */
- if (udfs->interface)
- usb_set_intfdata(udfs->interface, NULL);
+ up(&devpriv->sem);
- udfs->probed = 0;
-
- if (udfs->urbIn) {
- /* waits until a running transfer is over */
- usb_kill_urb(udfs->urbIn);
-
- kfree(udfs->transfer_buffer);
- udfs->transfer_buffer = NULL;
-
- usb_free_urb(udfs->urbIn);
- udfs->urbIn = NULL;
- }
-
- kfree(udfs->insnBuffer);
- udfs->insnBuffer = NULL;
-
- kfree(udfs->dux_commands);
- udfs->dux_commands = NULL;
-
- udfs->ai_cmd_running = 0;
+ return insn->n;
}
-static int usbduxfast_attach_common(struct comedi_device *dev,
- struct usbduxfastsub_s *udfs)
+static int usbduxfast_attach_common(struct comedi_device *dev)
{
- int ret;
+ struct usbduxfast_private *devpriv = dev->private;
struct comedi_subdevice *s;
+ int ret;
- down(&udfs->sem);
- /* pointer back to the corresponding comedi device */
- udfs->comedidev = dev;
+ down(&devpriv->sem);
ret = comedi_alloc_subdevices(dev, 1);
if (ret) {
- up(&udfs->sem);
+ up(&devpriv->sem);
return ret;
}
- /* private structure is also simply the usb-structure */
- dev->private = udfs;
- /* the first subdevice is the A/D converter */
- s = &dev->subdevices[SUBDEV_AD];
- /*
- * the URBs get the comedi subdevice which is responsible for reading
- * this is the subdevice which reads data
- */
+
+ /* Analog Input subdevice */
+ s = &dev->subdevices[0];
dev->read_subdev = s;
- /* the subdevice receives as private structure the usb-structure */
- s->private = NULL;
- /* analog input */
- s->type = COMEDI_SUBD_AI;
- /* readable and ref is to ground */
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
- /* 16 channels */
- s->n_chan = 16;
- /* length of the channellist */
- s->len_chanlist = 16;
- /* callback functions */
- s->insn_read = usbduxfast_ai_insn_read;
- s->do_cmdtest = usbduxfast_ai_cmdtest;
- s->do_cmd = usbduxfast_ai_cmd;
- s->cancel = usbduxfast_ai_cancel;
- /* max value from the A/D converter (12bit+1 bit for overflow) */
- s->maxdata = 0x1000;
- /* range table to convert to physical units */
- s->range_table = &range_usbduxfast_ai_range;
- /* finally decide that it's attached */
- udfs->attached = 1;
- up(&udfs->sem);
- dev_info(dev->class_dev, "successfully attached to usbduxfast.\n");
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
+ s->n_chan = 16;
+ s->len_chanlist = 16;
+ s->insn_read = usbduxfast_ai_insn_read;
+ s->do_cmdtest = usbduxfast_ai_cmdtest;
+ s->do_cmd = usbduxfast_ai_cmd;
+ s->cancel = usbduxfast_ai_cancel;
+ s->maxdata = 0x1000;
+ s->range_table = &range_usbduxfast_ai_range;
+
+ up(&devpriv->sem);
+
return 0;
}
-static int usbduxfast_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+static int usbduxfast_upload_firmware(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context)
{
- struct usb_interface *uinterf = comedi_to_usb_interface(dev);
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ uint8_t *buf;
+ unsigned char *tmp;
int ret;
- struct usbduxfastsub_s *udfs;
- dev->private = NULL;
- down(&start_stop_sem);
- udfs = usb_get_intfdata(uinterf);
- if (!udfs || !udfs->probed) {
- dev_err(dev->class_dev,
- "usbduxfast: error: auto_attach failed, not connected\n");
- ret = -ENODEV;
- } else if (udfs->attached) {
- dev_err(dev->class_dev,
- "usbduxfast: error: auto_attach failed, already attached\n");
- ret = -ENODEV;
- } else
- ret = usbduxfast_attach_common(dev, udfs);
- up(&start_stop_sem);
- return ret;
-}
+ if (!data)
+ return 0;
-static void usbduxfast_detach(struct comedi_device *dev)
-{
- struct usbduxfastsub_s *usb = dev->private;
-
- if (usb) {
- down(&usb->sem);
- down(&start_stop_sem);
- dev->private = NULL;
- usb->attached = 0;
- usb->comedidev = NULL;
- up(&start_stop_sem);
- up(&usb->sem);
+ if (size > FIRMWARE_MAX_LEN) {
+ dev_err(dev->class_dev, "firmware binary too large for FX2\n");
+ return -ENOMEM;
}
-}
-
-static struct comedi_driver usbduxfast_driver = {
- .driver_name = "usbduxfast",
- .module = THIS_MODULE,
- .auto_attach = usbduxfast_auto_attach,
- .detach = usbduxfast_detach,
-};
-static void usbduxfast_firmware_request_complete_handler(const struct firmware
- *fw, void *context)
-{
- struct usbduxfastsub_s *usbduxfastsub_tmp = context;
- struct usb_interface *uinterf = usbduxfastsub_tmp->interface;
- int ret;
+ /* we generate a local buffer for the firmware */
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if (fw == NULL)
- return;
+ /* we need a malloc'ed buffer for usb_control_msg() */
+ tmp = kmalloc(1, GFP_KERNEL);
+ if (!tmp) {
+ kfree(buf);
+ return -ENOMEM;
+ }
- /*
- * we need to upload the firmware here because fw will be
- * freed once we've left this function
- */
- ret = firmwareUpload(usbduxfastsub_tmp, fw->data, fw->size);
+ /* stop the current firmware on the device */
+ *tmp = 1; /* 7f92 to one */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUXFASTSUB_FIRMWARE,
+ VENDOR_DIR_OUT,
+ USBDUXFASTSUB_CPUCS, 0x0000,
+ tmp, 1,
+ EZTIMEOUT);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "can not stop firmware\n");
+ goto done;
+ }
- if (ret) {
- dev_err(&uinterf->dev,
- "Could not upload firmware (err=%d)\n", ret);
- goto out;
+ /* upload the new firmware to the device */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUXFASTSUB_FIRMWARE,
+ VENDOR_DIR_OUT,
+ 0, 0x0000,
+ buf, size,
+ EZTIMEOUT);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "firmware upload failed\n");
+ goto done;
}
- comedi_usb_auto_config(uinterf, &usbduxfast_driver, 0);
- out:
- release_firmware(fw);
+ /* start the new firmware on the device */
+ *tmp = 0; /* 7f92 to zero */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUXFASTSUB_FIRMWARE,
+ VENDOR_DIR_OUT,
+ USBDUXFASTSUB_CPUCS, 0x0000,
+ tmp, 1,
+ EZTIMEOUT);
+ if (ret < 0)
+ dev_err(dev->class_dev, "can not start firmware\n");
+
+done:
+ kfree(tmp);
+ kfree(buf);
+ return ret;
}
-static int usbduxfast_usb_probe(struct usb_interface *uinterf,
- const struct usb_device_id *id)
+static int usbduxfast_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
{
- struct usb_device *udev = interface_to_usbdev(uinterf);
- int i;
- int index;
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxfast_private *devpriv;
int ret;
- if (udev->speed != USB_SPEED_HIGH) {
- dev_err(&uinterf->dev,
+ if (usb->speed != USB_SPEED_HIGH) {
+ dev_err(dev->class_dev,
"This driver needs USB 2.0 to operate. Aborting...\n");
return -ENODEV;
}
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi_: usbduxfast_: finding a free structure for "
- "the usb-device\n");
-#endif
- down(&start_stop_sem);
- /* look for a free place in the usbduxfast array */
- index = -1;
- for (i = 0; i < NUMUSBDUXFAST; i++) {
- if (!usbduxfastsub[i].probed) {
- index = i;
- break;
- }
- }
- /* no more space */
- if (index == -1) {
- dev_err(&uinterf->dev,
- "Too many usbduxfast-devices connected.\n");
- up(&start_stop_sem);
- return -EMFILE;
- }
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi_: usbduxfast: usbduxfastsub[%d] is ready to "
- "connect to comedi.\n", index);
-#endif
-
- sema_init(&(usbduxfastsub[index].sem), 1);
- /* save a pointer to the usb device */
- usbduxfastsub[index].usbdev = udev;
-
- /* save the interface itself */
- usbduxfastsub[index].interface = uinterf;
- /* get the interface number from the interface */
- usbduxfastsub[index].ifnum = uinterf->altsetting->desc.bInterfaceNumber;
- /*
- * hand the private data over to the usb subsystem
- * will be needed for disconnect
- */
- usb_set_intfdata(uinterf, &(usbduxfastsub[index]));
-
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi_: usbduxfast: ifnum=%d\n",
- usbduxfastsub[index].ifnum);
-#endif
- /* create space for the commands going to the usb device */
- usbduxfastsub[index].dux_commands = kmalloc(SIZEOFDUXBUFFER,
- GFP_KERNEL);
- if (!usbduxfastsub[index].dux_commands) {
- tidy_up(&(usbduxfastsub[index]));
- up(&start_stop_sem);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
return -ENOMEM;
- }
- /* create space of the instruction buffer */
- usbduxfastsub[index].insnBuffer = kmalloc(SIZEINSNBUF, GFP_KERNEL);
- if (!usbduxfastsub[index].insnBuffer) {
- tidy_up(&(usbduxfastsub[index]));
- up(&start_stop_sem);
+
+ sema_init(&devpriv->sem, 1);
+ usb_set_intfdata(intf, devpriv);
+
+ devpriv->duxbuf = kmalloc(SIZEOFDUXBUF, GFP_KERNEL);
+ if (!devpriv->duxbuf)
return -ENOMEM;
- }
- /* setting to alternate setting 1: enabling bulk ep */
- i = usb_set_interface(usbduxfastsub[index].usbdev,
- usbduxfastsub[index].ifnum, 1);
- if (i < 0) {
- dev_err(&uinterf->dev,
- "usbduxfast%d: could not switch to alternate setting 1.\n",
- index);
- tidy_up(&(usbduxfastsub[index]));
- up(&start_stop_sem);
+
+ ret = usb_set_interface(usb,
+ intf->altsetting->desc.bInterfaceNumber, 1);
+ if (ret < 0) {
+ dev_err(dev->class_dev,
+ "could not switch to alternate setting 1\n");
return -ENODEV;
}
- usbduxfastsub[index].urbIn = usb_alloc_urb(0, GFP_KERNEL);
- if (!usbduxfastsub[index].urbIn) {
- dev_err(&uinterf->dev,
- "usbduxfast%d: Could not alloc. urb\n", index);
- tidy_up(&(usbduxfastsub[index]));
- up(&start_stop_sem);
+
+ devpriv->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!devpriv->urb) {
+ dev_err(dev->class_dev, "Could not alloc. urb\n");
return -ENOMEM;
}
- usbduxfastsub[index].transfer_buffer = kmalloc(SIZEINBUF, GFP_KERNEL);
- if (!usbduxfastsub[index].transfer_buffer) {
- tidy_up(&(usbduxfastsub[index]));
- up(&start_stop_sem);
+
+ devpriv->inbuf = kmalloc(SIZEINBUF, GFP_KERNEL);
+ if (!devpriv->inbuf)
return -ENOMEM;
- }
- /* we've reached the bottom of the function */
- usbduxfastsub[index].probed = 1;
- up(&start_stop_sem);
-
- ret = request_firmware_nowait(THIS_MODULE,
- FW_ACTION_HOTPLUG,
- FIRMWARE,
- &udev->dev,
- GFP_KERNEL,
- usbduxfastsub + index,
- usbduxfast_firmware_request_complete_handler);
- if (ret) {
- dev_err(&uinterf->dev, "could not load firmware (err=%d)\n", ret);
+ ret = comedi_load_firmware(dev, &usb->dev, FIRMWARE,
+ usbduxfast_upload_firmware, 0);
+ if (ret)
return ret;
- }
- dev_info(&uinterf->dev,
- "usbduxfast%d has been successfully initialized.\n", index);
- /* success */
- return 0;
+ return usbduxfast_attach_common(dev);
}
-static void usbduxfast_usb_disconnect(struct usb_interface *intf)
+static void usbduxfast_detach(struct comedi_device *dev)
{
- struct usbduxfastsub_s *udfs = usb_get_intfdata(intf);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usbduxfast_private *devpriv = dev->private;
- if (!udfs) {
- dev_err(&intf->dev, "disconnect called with null pointer.\n");
- return;
- }
- if (udfs->usbdev != udev) {
- dev_err(&intf->dev, "BUG! called with wrong ptr!!!\n");
+ if (!devpriv)
return;
+
+ down(&devpriv->sem);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (devpriv->urb) {
+ /* waits until a running transfer is over */
+ usb_kill_urb(devpriv->urb);
+
+ kfree(devpriv->inbuf);
+ devpriv->inbuf = NULL;
+
+ usb_free_urb(devpriv->urb);
+ devpriv->urb = NULL;
}
- comedi_usb_auto_unconfig(intf);
+ kfree(devpriv->duxbuf);
+ devpriv->duxbuf = NULL;
- down(&start_stop_sem);
- down(&udfs->sem);
- tidy_up(udfs);
- up(&udfs->sem);
- up(&start_stop_sem);
+ devpriv->ai_cmd_running = 0;
-#ifdef CONFIG_COMEDI_DEBUG
- printk(KERN_DEBUG "comedi_: usbduxfast: disconnected from the usb\n");
-#endif
+ up(&devpriv->sem);
+}
+
+static struct comedi_driver usbduxfast_driver = {
+ .driver_name = "usbduxfast",
+ .module = THIS_MODULE,
+ .auto_attach = usbduxfast_auto_attach,
+ .detach = usbduxfast_detach,
+};
+
+static int usbduxfast_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return comedi_usb_auto_config(intf, &usbduxfast_driver, 0);
}
static const struct usb_device_id usbduxfast_usb_table[] = {
@@ -1657,12 +1151,9 @@ static const struct usb_device_id usbduxfast_usb_table[] = {
MODULE_DEVICE_TABLE(usb, usbduxfast_usb_table);
static struct usb_driver usbduxfast_usb_driver = {
-#ifdef COMEDI_HAVE_USB_DRIVER_OWNER
- .owner = THIS_MODULE,
-#endif
.name = "usbduxfast",
.probe = usbduxfast_usb_probe,
- .disconnect = usbduxfast_usb_disconnect,
+ .disconnect = comedi_usb_auto_unconfig,
.id_table = usbduxfast_usb_table,
};
module_comedi_usb_driver(usbduxfast_driver, usbduxfast_usb_driver);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index d3bc1b9..c47f408 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,30 +1,27 @@
/*
- comedi/drivers/usbdux.c
- Copyright (C) 2011 Bernd Porr, Bernd.Porr@f2s.com
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
+ * usbduxsigma.c
+ * Copyright (C) 2011 Bernd Porr, Bernd.Porr@f2s.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
+
/*
-Driver: usbduxsigma
-Description: University of Stirling USB DAQ & INCITE Technology Limited
-Devices: [ITL] USB-DUX (usbduxsigma.o)
-Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 8 Nov 2011
-Status: testing
-*/
+ * Driver: usbduxsigma
+ * Description: University of Stirling USB DAQ & INCITE Technology Limited
+ * Devices: (ITL) USB-DUX [usbduxsigma]
+ * Author: Bernd Porr <BerndPorr@f2s.com>
+ * Updated: 8 Nov 2011
+ * Status: testing
+ */
+
/*
* I must give credit here to Chris Baugher who
* wrote the driver for AT-MIO-16d. I used some parts of this
@@ -44,9 +41,6 @@ Status: testing
* 0.6: corrected wrong input range
*/
-/* generates loads of debug info */
-/* #define NOISY_DUX_DEBUGBUG */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -55,7 +49,7 @@ Status: testing
#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
-#include <linux/firmware.h>
+
#include "comedi_fc.h"
#include "../comedidev.h"
@@ -63,39 +57,15 @@ Status: testing
#define BULK_TIMEOUT 1000
/* constants for "firmware" upload and download */
-#define FIRMWARE "usbduxsigma_firmware.bin"
-#define USBDUXSUB_FIRMWARE 0xA0
-#define VENDOR_DIR_IN 0xC0
-#define VENDOR_DIR_OUT 0x40
+#define FIRMWARE "usbduxsigma_firmware.bin"
+#define FIRMWARE_MAX_LEN 0x4000
+#define USBDUXSUB_FIRMWARE 0xa0
+#define VENDOR_DIR_IN 0xc0
+#define VENDOR_DIR_OUT 0x40
/* internal addresses of the 8051 processor */
#define USBDUXSUB_CPUCS 0xE600
-/*
- * the minor device number, major is 180 only for debugging purposes and to
- * upload special firmware (programming the eeprom etc) which is not
- * compatible with the comedi framwork
- */
-#define USBDUXSUB_MINOR 32
-
-/* max lenghth of the transfer-buffer for software upload */
-#define TB_LEN 0x2000
-
-/* Input endpoint number: ISO/IRQ */
-#define ISOINEP 6
-
-/* Output endpoint number: ISO/IRQ */
-#define ISOOUTEP 2
-
-/* This EP sends DUX commands to USBDUX */
-#define COMMAND_OUT_EP 1
-
-/* This EP receives the DUX commands from USBDUX */
-#define COMMAND_IN_EP 8
-
-/* Output endpoint for PWM */
-#define PWM_EP 4
-
/* 300Hz max frequ under PWM */
#define MIN_PWM_PERIOD ((long)(1E9/300))
@@ -105,6 +75,8 @@ Status: testing
/* Number of channels (16 AD and offset)*/
#define NUMCHANNELS 16
+#define USBDUXSIGMA_NUM_AO_CHAN 4
+
/* Size of one A/D value */
#define SIZEADIN ((sizeof(int32_t)))
@@ -150,84 +122,55 @@ Status: testing
/* must have more buffers due to buggy USB ctr */
#define NUMOFOUTBUFFERSHIGH 10
-/* Total number of usbdux devices */
-#define NUMUSBDUX 16
-
-/* Analogue in subdevice */
-#define SUBDEV_AD 0
-
-/* Analogue out subdevice */
-#define SUBDEV_DA 1
-
-/* Digital I/O */
-#define SUBDEV_DIO 2
-
-/* timer aka pwm output */
-#define SUBDEV_PWM 3
-
/* number of retries to get the right dux command */
#define RETRIES 10
-/**************************************************/
-/* comedi constants */
-static const struct comedi_lrange range_usbdux_ai_range = { 1, {
- BIP_RANGE
- (2.65/2.0)
- }
-};
+/* bulk transfer commands to usbduxsigma */
+#define USBBUXSIGMA_AD_CMD 0
+#define USBDUXSIGMA_DA_CMD 1
+#define USBDUXSIGMA_DIO_CFG_CMD 2
+#define USBDUXSIGMA_DIO_BITS_CMD 3
+#define USBDUXSIGMA_SINGLE_AD_CMD 4
+#define USBDUXSIGMA_PWM_ON_CMD 7
+#define USBDUXSIGMA_PWM_OFF_CMD 8
-/*
- * private structure of one subdevice
- */
+static const struct comedi_lrange usbduxsigma_ai_range = {
+ 1, {
+ BIP_RANGE(2.65 / 2.0)
+ }
+};
-/*
- * This is the structure which holds all the data of
- * this driver one sub device just now: A/D
- */
-struct usbduxsub {
- /* attached? */
- int attached;
- /* is it associated with a subdevice? */
- int probed;
- /* pointer to the usb-device */
- struct usb_device *usbdev;
+struct usbduxsigma_private {
/* actual number of in-buffers */
- int numOfInBuffers;
+ int n_ai_urbs;
/* actual number of out-buffers */
- int numOfOutBuffers;
+ int n_ao_urbs;
/* ISO-transfer handling: buffers */
- struct urb **urbIn;
- struct urb **urbOut;
+ struct urb **ai_urbs;
+ struct urb **ao_urbs;
/* pwm-transfer handling */
- struct urb *urbPwm;
+ struct urb *pwm_urb;
/* PWM period */
- unsigned int pwmPeriod;
+ unsigned int pwm_period;
/* PWM internal delay for the GPIF in the FX2 */
- uint8_t pwmDelay;
+ uint8_t pwm_delay;
/* size of the PWM buffer which holds the bit pattern */
- int sizePwmBuf;
+ int pwm_buf_sz;
/* input buffer for the ISO-transfer */
- int32_t *inBuffer;
+ int32_t *in_buf;
/* input buffer for single insn */
- int8_t *insnBuffer;
- /* output buffer for single DA outputs */
- int16_t *outBuffer;
- /* interface number */
- int ifnum;
- /* interface structure in 2.6 */
- struct usb_interface *interface;
- /* comedi device for the interrupt context */
- struct comedi_device *comedidev;
- /* is it USB_SPEED_HIGH or not? */
- short int high_speed;
- /* asynchronous command is running */
- short int ai_cmd_running;
- short int ao_cmd_running;
- /* pwm is running */
- short int pwm_cmd_running;
- /* continuous acquisition */
- short int ai_continuous;
- short int ao_continuous;
+ int8_t *insn_buf;
+
+ int8_t ao_chanlist[USBDUXSIGMA_NUM_AO_CHAN];
+ unsigned int ao_readback[USBDUXSIGMA_NUM_AO_CHAN];
+
+ unsigned high_speed:1;
+ unsigned ai_cmd_running:1;
+ unsigned ai_continuous:1;
+ unsigned ao_cmd_running:1;
+ unsigned ao_continuous:1;
+ unsigned pwm_cmd_running:1;
+
/* number of samples to acquire */
int ai_sample_count;
int ao_sample_count;
@@ -239,133 +182,65 @@ struct usbduxsub {
unsigned int ao_counter;
/* interval in frames/uframes */
unsigned int ai_interval;
- /* D/A commands */
- uint8_t *dac_commands;
/* commands */
uint8_t *dux_commands;
struct semaphore sem;
};
-/*
- * The pointer to the private usb-data of the driver is also the private data
- * for the comedi-device. This has to be global as the usb subsystem needs
- * global variables. The other reason is that this structure must be there
- * _before_ any comedi command is issued. The usb subsystem must be initialised
- * before comedi can access it.
- */
-static struct usbduxsub usbduxsub[NUMUSBDUX];
-
-static DEFINE_SEMAPHORE(start_stop_sem);
-
-/*
- * Stops the data acquision
- * It should be safe to call this function from any context
- */
-static int usbduxsub_unlink_InURBs(struct usbduxsub *usbduxsub_tmp)
+static void usbduxsigma_unlink_urbs(struct urb **urbs, int num_urbs)
{
- int i = 0;
- int err = 0;
+ int i;
- if (usbduxsub_tmp && usbduxsub_tmp->urbIn) {
- for (i = 0; i < usbduxsub_tmp->numOfInBuffers; i++) {
- if (usbduxsub_tmp->urbIn[i]) {
- /* We wait here until all transfers have been
- * cancelled. */
- usb_kill_urb(usbduxsub_tmp->urbIn[i]);
- }
- dev_dbg(&usbduxsub_tmp->interface->dev,
- "comedi: usbdux: unlinked InURB %d, err=%d\n",
- i, err);
- }
- }
- return err;
+ for (i = 0; i < num_urbs; i++)
+ usb_kill_urb(urbs[i]);
}
-/*
- * This will stop a running acquisition operation
- * Is called from within this driver from both the
- * interrupt context and from comedi
- */
-static int usbdux_ai_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+static void usbduxsigma_ai_stop(struct comedi_device *dev, int do_unlink)
{
- int ret = 0;
+ struct usbduxsigma_private *devpriv = dev->private;
- if (!this_usbduxsub) {
- pr_err("comedi?: usbdux_ai_stop: this_usbduxsub=NULL!\n");
- return -EFAULT;
- }
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_stop\n");
-
- if (do_unlink) {
- /* stop aquistion */
- ret = usbduxsub_unlink_InURBs(this_usbduxsub);
- }
-
- this_usbduxsub->ai_cmd_running = 0;
+ if (do_unlink && devpriv->ai_urbs)
+ usbduxsigma_unlink_urbs(devpriv->ai_urbs, devpriv->n_ai_urbs);
- return ret;
+ devpriv->ai_cmd_running = 0;
}
-/*
- * This will cancel a running acquisition operation.
- * This is called by comedi but never from inside the driver.
- */
-static int usbdux_ai_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int usbduxsigma_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct usbduxsub *this_usbduxsub;
- int res = 0;
-
- /* force unlink of all urbs */
- this_usbduxsub = dev->private;
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbduxsigma_private *devpriv = dev->private;
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_cancel\n");
+ down(&devpriv->sem);
+ /* unlink only if it is really running */
+ usbduxsigma_ai_stop(dev, devpriv->ai_cmd_running);
+ up(&devpriv->sem);
- /* prevent other CPUs from submitting new commands just now */
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- /* unlink only if the urb really has been submitted */
- res = usbdux_ai_stop(this_usbduxsub, this_usbduxsub->ai_cmd_running);
- up(&this_usbduxsub->sem);
- return res;
+ return 0;
}
-/* analogue IN - interrupt service routine */
-static void usbduxsub_ai_IsocIrq(struct urb *urb)
+static void usbduxsigma_ai_urb_complete(struct urb *urb)
{
- int i, err, n;
- struct usbduxsub *this_usbduxsub;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
- int32_t v;
+ struct comedi_device *dev = urb->context;
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
unsigned int dio_state;
-
- /* the context variable points to the comedi device */
- this_comedidev = urb->context;
- /* the private structure of the subdevice is struct usbduxsub */
- this_usbduxsub = this_comedidev->private;
- /* subdevice which is the AD converter */
- s = &this_comedidev->subdevices[SUBDEV_AD];
+ int32_t val;
+ int ret;
+ int i;
/* first we test if something unusual has just happened */
switch (urb->status) {
case 0:
/* copy the result in the transfer buffer */
- memcpy(this_usbduxsub->inBuffer,
- urb->transfer_buffer, SIZEINBUF);
+ memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF);
break;
case -EILSEQ:
- /* error in the ISOchronous data */
- /* we don't copy the data into the transfer buffer */
- /* and recycle the last data byte */
- dev_dbg(&urb->dev->dev,
- "comedi%d: usbdux: CRC error in ISO IN stream.\n",
- this_usbduxsub->comedidev->minor);
+ /*
+ * error in the ISOchronous data
+ * we don't copy the data into the transfer buffer
+ * and recycle the last data byte
+ */
+ dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n");
break;
@@ -374,185 +249,121 @@ static void usbduxsub_ai_IsocIrq(struct urb *urb)
case -ESHUTDOWN:
case -ECONNABORTED:
/* happens after an unlink command */
- if (this_usbduxsub->ai_cmd_running) {
- /* we are still running a command */
- /* tell this comedi */
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
- /* stop the transfer w/o unlink */
- usbdux_ai_stop(this_usbduxsub, 0);
+ if (devpriv->ai_cmd_running) {
+ usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
+ /* we are still running a command, tell comedi */
+ s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
+ comedi_event(dev, s);
}
return;
default:
- /* a real error on the bus */
- /* pass error to comedi if we are really running a command */
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&urb->dev->dev,
- "Non-zero urb status received in ai intr "
- "context: %d\n", urb->status);
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
- /* don't do an unlink here */
- usbdux_ai_stop(this_usbduxsub, 0);
+ /*
+ * a real error on the bus
+ * pass error to comedi if we are really running a command
+ */
+ if (devpriv->ai_cmd_running) {
+ dev_err(dev->class_dev,
+ "%s: non-zero urb status (%d)\n",
+ __func__, urb->status);
+ usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
+ s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
+ comedi_event(dev, s);
}
return;
}
- /*
- * at this point we are reasonably sure that nothing dodgy has happened
- * are we running a command?
- */
- if (unlikely((!(this_usbduxsub->ai_cmd_running)))) {
- /*
- * not running a command, do not continue execution if no
- * asynchronous command is running in particular not resubmit
- */
+ if (unlikely(!devpriv->ai_cmd_running))
return;
- }
- urb->dev = this_usbduxsub->usbdev;
-
- /* resubmit the urb */
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err < 0)) {
- dev_err(&urb->dev->dev,
- "comedi_: urb resubmit failed in int-context!"
- "err=%d\n",
- err);
- if (err == -EL2NSYNC)
- dev_err(&urb->dev->dev,
- "buggy USB host controller or bug in IRQ "
- "handler!\n");
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
- /* don't do an unlink here */
- usbdux_ai_stop(this_usbduxsub, 0);
+ urb->dev = comedi_to_usb_dev(dev);
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(ret < 0)) {
+ dev_err(dev->class_dev, "%s: urb resubmit failed (%d)\n",
+ __func__, ret);
+ if (ret == -EL2NSYNC)
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler\n");
+ usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
+ s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
+ comedi_event(dev, s);
return;
}
/* get the state of the dio pins to allow external trigger */
- dio_state = be32_to_cpu(this_usbduxsub->inBuffer[0]);
+ dio_state = be32_to_cpu(devpriv->in_buf[0]);
- this_usbduxsub->ai_counter--;
- if (likely(this_usbduxsub->ai_counter > 0))
+ devpriv->ai_counter--;
+ if (likely(devpriv->ai_counter > 0))
return;
/* timer zero, transfer measurements to comedi */
- this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
+ devpriv->ai_counter = devpriv->ai_timer;
- /* test, if we transmit only a fixed number of samples */
- if (!(this_usbduxsub->ai_continuous)) {
+ if (!devpriv->ai_continuous) {
/* not continuous, fixed number of samples */
- this_usbduxsub->ai_sample_count--;
- /* all samples received? */
- if (this_usbduxsub->ai_sample_count < 0) {
- /* prevent a resubmit next time */
- usbdux_ai_stop(this_usbduxsub, 0);
- /* say comedi that the acquistion is over */
+ devpriv->ai_sample_count--;
+ if (devpriv->ai_sample_count < 0) {
+ usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
+ /* acquistion is over, tell comedi */
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
return;
}
}
+
/* get the data from the USB bus and hand it over to comedi */
- n = s->async->cmd.chanlist_len;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < s->async->cmd.chanlist_len; i++) {
/* transfer data, note first byte is the DIO state */
- v = be32_to_cpu(this_usbduxsub->inBuffer[i+1]);
- /* strip status byte */
- v = v & 0x00ffffff;
- /* convert to unsigned */
- v = v ^ 0x00800000;
- /* write the byte to the buffer */
- err = cfc_write_array_to_buffer(s, &v, sizeof(uint32_t));
- if (unlikely(err == 0)) {
+ val = be32_to_cpu(devpriv->in_buf[i+1]);
+ val &= 0x00ffffff; /* strip status byte */
+ val ^= 0x00800000; /* convert to unsigned */
+
+ ret = cfc_write_array_to_buffer(s, &val, sizeof(uint32_t));
+ if (unlikely(ret == 0)) {
/* buffer overflow */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbduxsigma_ai_stop(dev, 0); /* w/o unlink */
return;
}
}
/* tell comedi that data is there */
- s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(this_usbduxsub->comedidev, s);
-}
-
-static int usbduxsub_unlink_OutURBs(struct usbduxsub *usbduxsub_tmp)
-{
- int i = 0;
- int err = 0;
-
- if (usbduxsub_tmp && usbduxsub_tmp->urbOut) {
- for (i = 0; i < usbduxsub_tmp->numOfOutBuffers; i++) {
- if (usbduxsub_tmp->urbOut[i])
- usb_kill_urb(usbduxsub_tmp->urbOut[i]);
-
- dev_dbg(&usbduxsub_tmp->interface->dev,
- "comedi: usbdux: unlinked OutURB %d: res=%d\n",
- i, err);
- }
- }
- return err;
+ s->async->events |= (COMEDI_CB_BLOCK | COMEDI_CB_EOS);
+ comedi_event(dev, s);
}
-/* This will cancel a running acquisition operation
- * in any context.
- */
-static int usbdux_ao_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+static void usbduxsigma_ao_stop(struct comedi_device *dev, int do_unlink)
{
- int ret = 0;
+ struct usbduxsigma_private *devpriv = dev->private;
- if (!this_usbduxsub)
- return -EFAULT;
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ao_cancel\n");
+ if (do_unlink && devpriv->ao_urbs)
+ usbduxsigma_unlink_urbs(devpriv->ao_urbs, devpriv->n_ao_urbs);
- if (do_unlink)
- ret = usbduxsub_unlink_OutURBs(this_usbduxsub);
-
- this_usbduxsub->ao_cmd_running = 0;
-
- return ret;
+ devpriv->ao_cmd_running = 0;
}
-/* force unlink, is called by comedi */
-static int usbdux_ao_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int usbduxsigma_ao_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int res = 0;
+ struct usbduxsigma_private *devpriv = dev->private;
- if (!this_usbduxsub)
- return -EFAULT;
-
- /* prevent other CPUs from submitting a command just now */
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ down(&devpriv->sem);
/* unlink only if it is really running */
- res = usbdux_ao_stop(this_usbduxsub, this_usbduxsub->ao_cmd_running);
- up(&this_usbduxsub->sem);
- return res;
+ usbduxsigma_ao_stop(dev, devpriv->ao_cmd_running);
+ up(&devpriv->sem);
+
+ return 0;
}
-static void usbduxsub_ao_IsocIrq(struct urb *urb)
+static void usbduxsigma_ao_urb_complete(struct urb *urb)
{
- int i, ret;
+ struct comedi_device *dev = urb->context;
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->write_subdev;
uint8_t *datap;
- struct usbduxsub *this_usbduxsub;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
-
- /* the context variable points to the subdevice */
- this_comedidev = urb->context;
- /* the private structure of the subdevice is struct usbduxsub */
- this_usbduxsub = this_comedidev->private;
-
- s = &this_comedidev->subdevices[SUBDEV_DA];
+ int len;
+ int ret;
+ int i;
switch (urb->status) {
case 0:
@@ -563,347 +374,141 @@ static void usbduxsub_ao_IsocIrq(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
case -ECONNABORTED:
- /* after an unlink command, unplug, ... etc */
- /* no unlink needed here. Already shutting down. */
- if (this_usbduxsub->ao_cmd_running) {
+ /* happens after an unlink command */
+ if (devpriv->ao_cmd_running) {
+ usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
- usbdux_ao_stop(this_usbduxsub, 0);
+ comedi_event(dev, s);
}
return;
default:
/* a real error */
- if (this_usbduxsub->ao_cmd_running) {
- dev_err(&urb->dev->dev,
- "comedi_: Non-zero urb status received in ao "
- "intr context: %d\n", urb->status);
- s->async->events |= COMEDI_CB_ERROR;
- s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
- /* we do an unlink if we are in the high speed mode */
- usbdux_ao_stop(this_usbduxsub, 0);
+ if (devpriv->ao_cmd_running) {
+ dev_err(dev->class_dev,
+ "%s: non-zero urb status (%d)\n",
+ __func__, urb->status);
+ usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
+ s->async->events |= (COMEDI_CB_ERROR | COMEDI_CB_EOA);
+ comedi_event(dev, s);
}
return;
}
- /* are we actually running? */
- if (!(this_usbduxsub->ao_cmd_running))
+ if (!devpriv->ao_cmd_running)
return;
- /* normal operation: executing a command in this subdevice */
- this_usbduxsub->ao_counter--;
- if ((int)this_usbduxsub->ao_counter <= 0) {
- /* timer zero */
- this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
-
- /* handle non continuous acquisition */
- if (!(this_usbduxsub->ao_continuous)) {
- /* fixed number of samples */
- this_usbduxsub->ao_sample_count--;
- if (this_usbduxsub->ao_sample_count < 0) {
- /* all samples transmitted */
- usbdux_ao_stop(this_usbduxsub, 0);
+ devpriv->ao_counter--;
+ if ((int)devpriv->ao_counter <= 0) {
+ /* timer zero, transfer from comedi */
+ devpriv->ao_counter = devpriv->ao_timer;
+
+ if (!devpriv->ao_continuous) {
+ /* not continuous, fixed number of samples */
+ devpriv->ao_sample_count--;
+ if (devpriv->ao_sample_count < 0) {
+ usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
+ /* acquistion is over, tell comedi */
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
- /* no resubmit of the urb */
+ comedi_event(dev, s);
return;
}
}
+
/* transmit data to the USB bus */
- ((uint8_t *) (urb->transfer_buffer))[0] =
- s->async->cmd.chanlist_len;
- for (i = 0; i < s->async->cmd.chanlist_len; i++) {
- short temp;
- if (i >= NUMOUTCHANNELS)
- break;
-
- /* pointer to the DA */
- datap =
- (&(((uint8_t *) urb->transfer_buffer)[i * 2 + 1]));
- /* get the data from comedi */
- ret = comedi_buf_get(s->async, &temp);
- datap[0] = temp;
- datap[1] = this_usbduxsub->dac_commands[i];
- /* printk("data[0]=%x, data[1]=%x, data[2]=%x\n", */
- /* datap[0],datap[1],datap[2]); */
+ datap = urb->transfer_buffer;
+ len = s->async->cmd.chanlist_len;
+ *datap++ = len;
+ for (i = 0; i < len; i++) {
+ unsigned int chan = devpriv->ao_chanlist[i];
+ short val;
+
+ ret = comedi_buf_get(s->async, &val);
if (ret < 0) {
- dev_err(&urb->dev->dev,
- "comedi: buffer underflow\n");
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_OVERFLOW;
+ dev_err(dev->class_dev, "buffer underflow\n");
+ s->async->events |= (COMEDI_CB_EOA |
+ COMEDI_CB_OVERFLOW);
}
- /* transmit data to comedi */
+ *datap++ = val;
+ *datap++ = chan;
+ devpriv->ao_readback[chan] = val;
+
s->async->events |= COMEDI_CB_BLOCK;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
}
}
+
urb->transfer_buffer_length = SIZEOUTBUF;
- urb->dev = this_usbduxsub->usbdev;
+ urb->dev = comedi_to_usb_dev(dev);
urb->status = 0;
- if (this_usbduxsub->ao_cmd_running) {
- if (this_usbduxsub->high_speed) {
- /* uframes */
- urb->interval = 8;
- } else {
- /* frames */
- urb->interval = 1;
- }
- urb->number_of_packets = 1;
- urb->iso_frame_desc[0].offset = 0;
- urb->iso_frame_desc[0].length = SIZEOUTBUF;
- urb->iso_frame_desc[0].status = 0;
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(&urb->dev->dev,
- "comedi_: ao urb resubm failed in int-cont. "
- "ret=%d", ret);
- if (ret == EL2NSYNC)
- dev_err(&urb->dev->dev,
- "buggy USB host controller or bug in "
- "IRQ handling!\n");
-
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
- /* don't do an unlink here */
- usbdux_ao_stop(this_usbduxsub, 0);
- }
- }
-}
-
-static int usbduxsub_start(struct usbduxsub *usbduxsub)
-{
- int errcode = 0;
- uint8_t *local_transfer_buffer;
-
- local_transfer_buffer = kmalloc(16, GFP_KERNEL);
- if (!local_transfer_buffer)
- return -ENOMEM;
-
- /* 7f92 to zero */
- local_transfer_buffer[0] = 0;
- errcode = usb_control_msg(usbduxsub->usbdev,
- /* create a pipe for a control transfer */
- usb_sndctrlpipe(usbduxsub->usbdev, 0),
- /* bRequest, "Firmware" */
- USBDUXSUB_FIRMWARE,
- /* bmRequestType */
- VENDOR_DIR_OUT,
- /* Value */
- USBDUXSUB_CPUCS,
- /* Index */
- 0x0000,
- /* address of the transfer buffer */
- local_transfer_buffer,
- /* Length */
- 1,
- /* Timeout */
- BULK_TIMEOUT);
- if (errcode < 0)
- dev_err(&usbduxsub->interface->dev,
- "comedi_: control msg failed (start)\n");
-
- kfree(local_transfer_buffer);
- return errcode;
-}
-
-static int usbduxsub_stop(struct usbduxsub *usbduxsub)
-{
- int errcode = 0;
- uint8_t *local_transfer_buffer;
-
- local_transfer_buffer = kmalloc(16, GFP_KERNEL);
- if (!local_transfer_buffer)
- return -ENOMEM;
-
- /* 7f92 to one */
- local_transfer_buffer[0] = 1;
- errcode = usb_control_msg(usbduxsub->usbdev,
- usb_sndctrlpipe(usbduxsub->usbdev, 0),
- /* bRequest, "Firmware" */
- USBDUXSUB_FIRMWARE,
- /* bmRequestType */
- VENDOR_DIR_OUT,
- /* Value */
- USBDUXSUB_CPUCS,
- /* Index */
- 0x0000, local_transfer_buffer,
- /* Length */
- 1,
- /* Timeout */
- BULK_TIMEOUT);
- if (errcode < 0)
- dev_err(&usbduxsub->interface->dev,
- "comedi_: control msg failed (stop)\n");
-
- kfree(local_transfer_buffer);
- return errcode;
-}
-
-static int usbduxsub_upload(struct usbduxsub *usbduxsub,
- uint8_t *local_transfer_buffer,
- unsigned int startAddr, unsigned int len)
-{
- int errcode;
-
- errcode = usb_control_msg(usbduxsub->usbdev,
- usb_sndctrlpipe(usbduxsub->usbdev, 0),
- /* brequest, firmware */
- USBDUXSUB_FIRMWARE,
- /* bmRequestType */
- VENDOR_DIR_OUT,
- /* value */
- startAddr,
- /* index */
- 0x0000,
- /* our local safe buffer */
- local_transfer_buffer,
- /* length */
- len,
- /* timeout */
- BULK_TIMEOUT);
- dev_dbg(&usbduxsub->interface->dev, "comedi_: result=%d\n", errcode);
- if (errcode < 0) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: upload failed\n");
- return errcode;
- }
- return 0;
-}
-
-/* the FX2LP has twice as much as the standard FX2 */
-#define FIRMWARE_MAX_LEN 0x4000
-
-static int firmwareUpload(struct usbduxsub *usbduxsub,
- const u8 *firmwareBinary, int sizeFirmware)
-{
- int ret;
- uint8_t *fwBuf;
-
- if (!firmwareBinary)
- return 0;
-
- if (sizeFirmware > FIRMWARE_MAX_LEN) {
- dev_err(&usbduxsub->interface->dev,
- "usbduxsigma firmware binary it too large for FX2.\n");
- return -ENOMEM;
- }
-
- /* we generate a local buffer for the firmware */
- fwBuf = kmemdup(firmwareBinary, sizeFirmware, GFP_KERNEL);
- if (!fwBuf) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: mem alloc for firmware failed\n");
- return -ENOMEM;
- }
-
- ret = usbduxsub_stop(usbduxsub);
- if (ret < 0) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: can not stop firmware\n");
- kfree(fwBuf);
- return ret;
- }
-
- ret = usbduxsub_upload(usbduxsub, fwBuf, 0, sizeFirmware);
- if (ret < 0) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: firmware upload failed\n");
- kfree(fwBuf);
- return ret;
- }
- ret = usbduxsub_start(usbduxsub);
+ if (devpriv->high_speed)
+ urb->interval = 8; /* uframes */
+ else
+ urb->interval = 1; /* frames */
+ urb->number_of_packets = 1;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEOUTBUF;
+ urb->iso_frame_desc[0].status = 0;
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: can not start firmware\n");
- kfree(fwBuf);
- return ret;
+ dev_err(dev->class_dev,
+ "%s: urb resubmit failed (%d)\n",
+ __func__, ret);
+ if (ret == EL2NSYNC)
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler\n");
+ usbduxsigma_ao_stop(dev, 0); /* w/o unlink */
+ s->async->events |= (COMEDI_CB_EOA | COMEDI_CB_ERROR);
+ comedi_event(dev, s);
}
- kfree(fwBuf);
- return 0;
}
-static int usbduxsub_submit_InURBs(struct usbduxsub *usbduxsub)
+static int usbduxsigma_submit_urbs(struct comedi_device *dev,
+ struct urb **urbs, int num_urbs,
+ int input_urb)
{
- int i, errFlag;
-
- if (!usbduxsub)
- return -EFAULT;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct urb *urb;
+ int ret;
+ int i;
/* Submit all URBs and start the transfer on the bus */
- for (i = 0; i < usbduxsub->numOfInBuffers; i++) {
- /* in case of a resubmission after an unlink... */
- usbduxsub->urbIn[i]->interval = usbduxsub->ai_interval;
- usbduxsub->urbIn[i]->context = usbduxsub->comedidev;
- usbduxsub->urbIn[i]->dev = usbduxsub->usbdev;
- usbduxsub->urbIn[i]->status = 0;
- usbduxsub->urbIn[i]->transfer_flags = URB_ISO_ASAP;
- dev_dbg(&usbduxsub->interface->dev,
- "comedi%d: submitting in-urb[%d]: %p,%p intv=%d\n",
- usbduxsub->comedidev->minor, i,
- (usbduxsub->urbIn[i]->context),
- (usbduxsub->urbIn[i]->dev),
- (usbduxsub->urbIn[i]->interval));
- errFlag = usb_submit_urb(usbduxsub->urbIn[i], GFP_ATOMIC);
- if (errFlag) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: ai: usb_submit_urb(%d) error %d\n",
- i, errFlag);
- return errFlag;
- }
- }
- return 0;
-}
-
-static int usbduxsub_submit_OutURBs(struct usbduxsub *usbduxsub)
-{
- int i, errFlag;
+ for (i = 0; i < num_urbs; i++) {
+ urb = urbs[i];
- if (!usbduxsub)
- return -EFAULT;
-
- for (i = 0; i < usbduxsub->numOfOutBuffers; i++) {
- dev_dbg(&usbduxsub->interface->dev,
- "comedi_: submitting out-urb[%d]\n", i);
/* in case of a resubmission after an unlink... */
- usbduxsub->urbOut[i]->context = usbduxsub->comedidev;
- usbduxsub->urbOut[i]->dev = usbduxsub->usbdev;
- usbduxsub->urbOut[i]->status = 0;
- usbduxsub->urbOut[i]->transfer_flags = URB_ISO_ASAP;
- errFlag = usb_submit_urb(usbduxsub->urbOut[i], GFP_ATOMIC);
- if (errFlag) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: ao: usb_submit_urb(%d) error %d\n",
- i, errFlag);
- return errFlag;
- }
+ if (input_urb)
+ urb->interval = devpriv->ai_interval;
+ urb->context = dev;
+ urb->dev = usb;
+ urb->status = 0;
+ urb->transfer_flags = URB_ISO_ASAP;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ return ret;
}
return 0;
}
-static int chanToInterval(int nChannels)
+static int usbduxsigma_chans_to_interval(int num_chan)
{
- if (nChannels <= 2)
- /* 4kHz */
- return 2;
- if (nChannels <= 8)
- /* 2kHz */
- return 4;
- /* 1kHz */
- return 8;
+ if (num_chan <= 2)
+ return 2; /* 4kHz */
+ if (num_chan <= 8)
+ return 4; /* 2kHz */
+ return 8; /* 1kHz */
}
-static int usbdux_ai_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int err = 0, i;
- unsigned int tmpTimer;
-
- if (!(this_usbduxsub->probed))
- return -ENODEV;
+ struct usbduxsigma_private *devpriv = dev->private;
+ int high_speed = devpriv->high_speed;
+ int interval = usbduxsigma_chans_to_interval(cmd->chanlist_len);
+ int err = 0;
/* Step 1 : check if triggers are trivially valid */
@@ -934,34 +539,28 @@ static int usbdux_ai_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
if (cmd->scan_begin_src == TRIG_TIMER) {
- if (this_usbduxsub->high_speed) {
+ unsigned int tmp;
+
+ if (high_speed) {
/*
* In high speed mode microframes are possible.
* However, during one microframe we can roughly
* sample two channels. Thus, the more channels
* are in the channel list the more time we need.
*/
- i = chanToInterval(cmd->chanlist_len);
err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
- (1000000 / 8 * i));
- /* now calc the real sampling rate with all the
- * rounding errors */
- tmpTimer =
- ((unsigned int)(cmd->scan_begin_arg / 125000)) *
- 125000;
+ (1000000 / 8 * interval));
+
+ tmp = (cmd->scan_begin_arg / 125000) * 125000;
} else {
/* full speed */
/* 1kHz scans every USB frame */
err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
1000000);
- /*
- * calc the real sampling rate with the rounding errors
- */
- tmpTimer = ((unsigned int)(cmd->scan_begin_arg /
- 1000000)) * 1000000;
+
+ tmp = (cmd->scan_begin_arg / 1000000) * 1000000;
}
- err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg,
- tmpTimer);
+ err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
}
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
@@ -976,6 +575,37 @@ static int usbdux_ai_cmdtest(struct comedi_device *dev,
if (err)
return 3;
+ /* Step 4: fix up any arguments */
+
+ if (high_speed) {
+ /*
+ * every 2 channels get a time window of 125us. Thus, if we
+ * sample all 16 channels we need 1ms. If we sample only one
+ * channel we need only 125us
+ */
+ devpriv->ai_interval = interval;
+ devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
+ } else {
+ /* interval always 1ms */
+ devpriv->ai_interval = 1;
+ devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
+ }
+ if (devpriv->ai_timer < 1)
+ err |= -EINVAL;
+
+ if (cmd->stop_src == TRIG_COUNT) {
+ /* data arrives as one packet */
+ devpriv->ai_sample_count = cmd->stop_arg;
+ devpriv->ai_continuous = 0;
+ } else {
+ /* continuous acquisition */
+ devpriv->ai_continuous = 1;
+ devpriv->ai_sample_count = 0;
+ }
+
+ if (err)
+ return 4;
+
return 0;
}
@@ -993,536 +623,280 @@ static void create_adc_command(unsigned int chan,
(*muxsg1) = (*muxsg1) | (1 << (chan-8));
}
+static int usbbuxsigma_send_cmd(struct comedi_device *dev, int cmd_type)
+{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxsigma_private *devpriv = dev->private;
+ int nsent;
-/* bulk transfers to usbdux */
-
-#define SENDADCOMMANDS 0
-#define SENDDACOMMANDS 1
-#define SENDDIOCONFIGCOMMAND 2
-#define SENDDIOBITSCOMMAND 3
-#define SENDSINGLEAD 4
-#define SENDPWMON 7
-#define SENDPWMOFF 8
+ devpriv->dux_commands[0] = cmd_type;
-static int send_dux_commands(struct usbduxsub *this_usbduxsub, int cmd_type)
-{
- int result, nsent;
-
- this_usbduxsub->dux_commands[0] = cmd_type;
-#ifdef NOISY_DUX_DEBUGBUG
- printk(KERN_DEBUG "comedi%d: usbdux: dux_commands: ",
- this_usbduxsub->comedidev->minor);
- for (result = 0; result < SIZEOFDUXBUFFER; result++)
- printk(" %02x", this_usbduxsub->dux_commands[result]);
- printk("\n");
-#endif
- result = usb_bulk_msg(this_usbduxsub->usbdev,
- usb_sndbulkpipe(this_usbduxsub->usbdev,
- COMMAND_OUT_EP),
- this_usbduxsub->dux_commands, SIZEOFDUXBUFFER,
- &nsent, BULK_TIMEOUT);
- if (result < 0)
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
- "could not transmit dux_command to the usb-device, "
- "err=%d\n", this_usbduxsub->comedidev->minor, result);
-
- return result;
+ return usb_bulk_msg(usb, usb_sndbulkpipe(usb, 1),
+ devpriv->dux_commands, SIZEOFDUXBUFFER,
+ &nsent, BULK_TIMEOUT);
}
-static int receive_dux_commands(struct usbduxsub *this_usbduxsub, int command)
+static int usbduxsigma_receive_cmd(struct comedi_device *dev, int command)
{
- int result = (-EFAULT);
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxsigma_private *devpriv = dev->private;
int nrec;
+ int ret;
int i;
for (i = 0; i < RETRIES; i++) {
- result = usb_bulk_msg(this_usbduxsub->usbdev,
- usb_rcvbulkpipe(this_usbduxsub->usbdev,
- COMMAND_IN_EP),
- this_usbduxsub->insnBuffer, SIZEINSNBUF,
- &nrec, BULK_TIMEOUT);
- if (result < 0) {
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
- "insn: USB error %d "
- "while receiving DUX command"
- "\n", this_usbduxsub->comedidev->minor,
- result);
- return result;
- }
- if (this_usbduxsub->insnBuffer[0] == command)
- return result;
+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, 8),
+ devpriv->insn_buf, SIZEINSNBUF,
+ &nrec, BULK_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ if (devpriv->insn_buf[0] == command)
+ return 0;
}
- /* this is only reached if the data has been requested a couple of
- * times */
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: insn: "
- "wrong data returned from firmware: want %d, got %d.\n",
- this_usbduxsub->comedidev->minor, command,
- this_usbduxsub->insnBuffer[0]);
+ /*
+ * This is only reached if the data has been requested a
+ * couple of times and the command was not received.
+ */
return -EFAULT;
}
-static int usbdux_ai_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int trignum)
+static int usbduxsigma_ai_inttrig(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int trignum)
{
+ struct usbduxsigma_private *devpriv = dev->private;
int ret;
- struct usbduxsub *this_usbduxsub = dev->private;
- if (!this_usbduxsub)
- return -EFAULT;
-
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_inttrig\n", dev->minor);
-
- if (trignum != 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_inttrig: invalid trignum\n",
- dev->minor);
- up(&this_usbduxsub->sem);
+
+ if (trignum != 0)
return -EINVAL;
- }
- if (!(this_usbduxsub->ai_cmd_running)) {
- this_usbduxsub->ai_cmd_running = 1;
- ret = usbduxsub_submit_InURBs(this_usbduxsub);
+
+ down(&devpriv->sem);
+ if (!devpriv->ai_cmd_running) {
+ devpriv->ai_cmd_running = 1;
+ ret = usbduxsigma_submit_urbs(dev, devpriv->ai_urbs,
+ devpriv->n_ai_urbs, 1);
if (ret < 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_inttrig: "
- "urbSubmit: err=%d\n", dev->minor, ret);
- this_usbduxsub->ai_cmd_running = 0;
- up(&this_usbduxsub->sem);
+ devpriv->ai_cmd_running = 0;
+ up(&devpriv->sem);
return ret;
}
s->async->inttrig = NULL;
- } else {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ai_inttrig but acqu is already running\n",
- dev->minor);
}
- up(&this_usbduxsub->sem);
+ up(&devpriv->sem);
+
return 1;
}
-static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+static int usbduxsigma_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
+ struct usbduxsigma_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int chan;
- int i, ret;
- struct usbduxsub *this_usbduxsub = dev->private;
- int result;
+ unsigned int len = cmd->chanlist_len;
uint8_t muxsg0 = 0;
uint8_t muxsg1 = 0;
uint8_t sysred = 0;
+ int ret;
+ int i;
- if (!this_usbduxsub)
- return -EFAULT;
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_cmd\n", dev->minor);
-
- /* block other CPUs from starting an ai_cmd */
- down(&this_usbduxsub->sem);
+ down(&devpriv->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
- "ai_cmd not possible. Another ai_cmd is running.\n",
- dev->minor);
- up(&this_usbduxsub->sem);
- return -EBUSY;
- }
/* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
+ for (i = 0; i < len; i++) {
+ unsigned int chan = CR_CHAN(cmd->chanlist[i]);
- /* first the number of channels per time step */
- this_usbduxsub->dux_commands[1] = cmd->chanlist_len;
-
- /* CONFIG0 */
- this_usbduxsub->dux_commands[2] = 0x12;
-
- /* CONFIG1: 23kHz sampling rate, delay = 0us, */
- this_usbduxsub->dux_commands[3] = 0x03;
-
- /* CONFIG3: differential channels off */
- this_usbduxsub->dux_commands[4] = 0x00;
-
- for (i = 0; i < cmd->chanlist_len; i++) {
- chan = CR_CHAN(cmd->chanlist[i]);
create_adc_command(chan, &muxsg0, &muxsg1);
- if (i >= NUMCHANNELS) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: channel list too long\n",
- dev->minor);
- break;
- }
- }
- this_usbduxsub->dux_commands[5] = muxsg0;
- this_usbduxsub->dux_commands[6] = muxsg1;
- this_usbduxsub->dux_commands[7] = sysred;
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi %d: sending commands to the usb device: size=%u\n",
- dev->minor, NUMCHANNELS);
-
- result = send_dux_commands(this_usbduxsub, SENDADCOMMANDS);
- if (result < 0) {
- up(&this_usbduxsub->sem);
- return result;
}
- if (this_usbduxsub->high_speed) {
- /*
- * every 2 channels get a time window of 125us. Thus, if we
- * sample all 16 channels we need 1ms. If we sample only one
- * channel we need only 125us
- */
- this_usbduxsub->ai_interval =
- chanToInterval(cmd->chanlist_len);
- this_usbduxsub->ai_timer = cmd->scan_begin_arg / (125000 *
- (this_usbduxsub->
- ai_interval));
- } else {
- /* interval always 1ms */
- this_usbduxsub->ai_interval = 1;
- this_usbduxsub->ai_timer = cmd->scan_begin_arg / 1000000;
- }
- if (this_usbduxsub->ai_timer < 1) {
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: ai_cmd: "
- "timer=%d, scan_begin_arg=%d. "
- "Not properly tested by cmdtest?\n", dev->minor,
- this_usbduxsub->ai_timer, cmd->scan_begin_arg);
- up(&this_usbduxsub->sem);
- return -EINVAL;
- }
- this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
+ devpriv->dux_commands[1] = len; /* num channels per time step */
+ devpriv->dux_commands[2] = 0x12; /* CONFIG0 */
+ devpriv->dux_commands[3] = 0x03; /* CONFIG1: 23kHz sample, delay 0us */
+ devpriv->dux_commands[4] = 0x00; /* CONFIG3: diff. channels off */
+ devpriv->dux_commands[5] = muxsg0;
+ devpriv->dux_commands[6] = muxsg1;
+ devpriv->dux_commands[7] = sysred;
- if (cmd->stop_src == TRIG_COUNT) {
- /* data arrives as one packet */
- this_usbduxsub->ai_sample_count = cmd->stop_arg;
- this_usbduxsub->ai_continuous = 0;
- } else {
- /* continuous acquisition */
- this_usbduxsub->ai_continuous = 1;
- this_usbduxsub->ai_sample_count = 0;
+ ret = usbbuxsigma_send_cmd(dev, USBBUXSIGMA_AD_CMD);
+ if (ret < 0) {
+ up(&devpriv->sem);
+ return ret;
}
+ devpriv->ai_counter = devpriv->ai_timer;
+
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
- this_usbduxsub->ai_cmd_running = 1;
- ret = usbduxsub_submit_InURBs(this_usbduxsub);
+ devpriv->ai_cmd_running = 1;
+ ret = usbduxsigma_submit_urbs(dev, devpriv->ai_urbs,
+ devpriv->n_ai_urbs, 1);
if (ret < 0) {
- this_usbduxsub->ai_cmd_running = 0;
- /* fixme: unlink here?? */
- up(&this_usbduxsub->sem);
+ devpriv->ai_cmd_running = 0;
+ up(&devpriv->sem);
return ret;
}
s->async->inttrig = NULL;
- } else {
- /* TRIG_INT */
- /* don't enable the acquision operation */
- /* wait for an internal signal */
- s->async->inttrig = usbdux_ai_inttrig;
+ } else { /* TRIG_INT */
+ /* wait for an internal signal and submit the urbs later */
+ s->async->inttrig = usbduxsigma_ai_inttrig;
}
- up(&this_usbduxsub->sem);
+
+ up(&devpriv->sem);
+
return 0;
}
-/* Mode 0 is used to get a single conversion on demand */
-static int usbdux_ai_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int i;
- int32_t one = 0;
- int chan;
- int err;
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbduxsigma_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
uint8_t muxsg0 = 0;
uint8_t muxsg1 = 0;
uint8_t sysred = 0;
+ int ret;
+ int i;
- if (!this_usbduxsub)
- return 0;
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ai_insn_read, insn->n=%d, insn->subdev=%d\n",
- dev->minor, insn->n, insn->subdev);
-
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ai_insn_read not possible. "
- "Async Command is running.\n", dev->minor);
- up(&this_usbduxsub->sem);
- return 0;
+ down(&devpriv->sem);
+ if (devpriv->ai_cmd_running) {
+ up(&devpriv->sem);
+ return -EBUSY;
}
- /* sample one channel */
- /* CONFIG0: chopper on */
- this_usbduxsub->dux_commands[1] = 0x16;
-
- /* CONFIG1: 2kHz sampling rate */
- this_usbduxsub->dux_commands[2] = 0x80;
-
- /* CONFIG3: differential channels off */
- this_usbduxsub->dux_commands[3] = 0x00;
-
- chan = CR_CHAN(insn->chanspec);
create_adc_command(chan, &muxsg0, &muxsg1);
- this_usbduxsub->dux_commands[4] = muxsg0;
- this_usbduxsub->dux_commands[5] = muxsg1;
- this_usbduxsub->dux_commands[6] = sysred;
+ /* Mode 0 is used to get a single conversion on demand */
+ devpriv->dux_commands[1] = 0x16; /* CONFIG0: chopper on */
+ devpriv->dux_commands[2] = 0x80; /* CONFIG1: 2kHz sampling rate */
+ devpriv->dux_commands[3] = 0x00; /* CONFIG3: diff. channels off */
+ devpriv->dux_commands[4] = muxsg0;
+ devpriv->dux_commands[5] = muxsg1;
+ devpriv->dux_commands[6] = sysred;
/* adc commands */
- err = send_dux_commands(this_usbduxsub, SENDSINGLEAD);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
+ ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD);
+ if (ret < 0) {
+ up(&devpriv->sem);
+ return ret;
}
for (i = 0; i < insn->n; i++) {
- err = receive_dux_commands(this_usbduxsub, SENDSINGLEAD);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return 0;
- }
- /* 32 bits big endian from the A/D converter */
- one = be32_to_cpu(*((int32_t *)
- ((this_usbduxsub->insnBuffer)+1)));
- /* mask out the status byte */
- one = one & 0x00ffffff;
- /* turn it into an unsigned integer */
- one = one ^ 0x00800000;
- data[i] = one;
- }
- up(&this_usbduxsub->sem);
- return i;
-}
-
-
-
+ int32_t val;
-static int usbdux_getstatusinfo(struct comedi_device *dev, int chan)
-{
- struct usbduxsub *this_usbduxsub = dev->private;
- uint8_t sysred = 0;
- uint32_t one;
- int err;
-
- if (!this_usbduxsub)
- return 0;
+ ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD);
+ if (ret < 0) {
+ up(&devpriv->sem);
+ return ret;
+ }
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: status read not possible. "
- "Async Command is running.\n", dev->minor);
- return 0;
- }
+ /* 32 bits big endian from the A/D converter */
+ val = be32_to_cpu(*((int32_t *)((devpriv->insn_buf) + 1)));
+ val &= 0x00ffffff; /* strip status byte */
+ val ^= 0x00800000; /* convert to unsigned */
- /* CONFIG0 */
- this_usbduxsub->dux_commands[1] = 0x12;
-
- /* CONFIG1: 2kHz sampling rate */
- this_usbduxsub->dux_commands[2] = 0x80;
-
- /* CONFIG3: differential channels off */
- this_usbduxsub->dux_commands[3] = 0x00;
-
- if (chan == 1) {
- /* ADC offset */
- sysred = sysred | 1;
- } else if (chan == 2) {
- /* VCC */
- sysred = sysred | 4;
- } else if (chan == 3) {
- /* temperature */
- sysred = sysred | 8;
- } else if (chan == 4) {
- /* gain */
- sysred = sysred | 16;
- } else if (chan == 5) {
- /* ref */
- sysred = sysred | 32;
+ data[i] = val;
}
+ up(&devpriv->sem);
- this_usbduxsub->dux_commands[4] = 0;
- this_usbduxsub->dux_commands[5] = 0;
- this_usbduxsub->dux_commands[6] = sysred;
-
- /* adc commands */
- err = send_dux_commands(this_usbduxsub, SENDSINGLEAD);
- if (err < 0)
- return err;
-
- err = receive_dux_commands(this_usbduxsub, SENDSINGLEAD);
- if (err < 0)
- return err;
-
- /* 32 bits big endian from the A/D converter */
- one = be32_to_cpu(*((int32_t *)((this_usbduxsub->insnBuffer)+1)));
- /* mask out the status byte */
- one = one & 0x00ffffff;
- one = one ^ 0x00800000;
-
- return (int)one;
+ return insn->n;
}
-
-
-
-
-
-/************************************/
-/* analog out */
-
-static int usbdux_ao_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int usbduxsigma_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct usbduxsigma_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
- int chan = CR_CHAN(insn->chanspec);
- struct usbduxsub *this_usbduxsub = dev->private;
-
- if (!this_usbduxsub)
- return -EFAULT;
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ down(&devpriv->sem);
for (i = 0; i < insn->n; i++)
- data[i] = this_usbduxsub->outBuffer[chan];
+ data[i] = devpriv->ao_readback[chan];
+ up(&devpriv->sem);
- up(&this_usbduxsub->sem);
- return i;
+ return insn->n;
}
-static int usbdux_ao_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int usbduxsigma_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int i, err;
- int chan = CR_CHAN(insn->chanspec);
- struct usbduxsub *this_usbduxsub = dev->private;
-
- if (!this_usbduxsub)
- return -EFAULT;
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ao_insn_write\n", dev->minor);
+ struct usbduxsigma_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int ret;
+ int i;
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (this_usbduxsub->ao_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ao_insn_write: "
- "ERROR: asynchronous ao_cmd is running\n", dev->minor);
- up(&this_usbduxsub->sem);
- return 0;
+ down(&devpriv->sem);
+ if (devpriv->ao_cmd_running) {
+ up(&devpriv->sem);
+ return -EBUSY;
}
for (i = 0; i < insn->n; i++) {
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ao_insn_write: data[chan=%d,i=%d]=%d\n",
- dev->minor, chan, i, data[i]);
-
- /* number of channels: 1 */
- this_usbduxsub->dux_commands[1] = 1;
- /* channel number */
- this_usbduxsub->dux_commands[2] = data[i];
- this_usbduxsub->outBuffer[chan] = data[i];
- this_usbduxsub->dux_commands[3] = chan;
- err = send_dux_commands(this_usbduxsub, SENDDACOMMANDS);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
+ devpriv->dux_commands[1] = 1; /* num channels */
+ devpriv->dux_commands[2] = data[i]; /* value */
+ devpriv->dux_commands[3] = chan; /* channel number */
+ ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_DA_CMD);
+ if (ret < 0) {
+ up(&devpriv->sem);
+ return ret;
}
+ devpriv->ao_readback[chan] = data[i];
}
- up(&this_usbduxsub->sem);
+ up(&devpriv->sem);
- return i;
+ return insn->n;
}
-static int usbdux_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int trignum)
+static int usbduxsigma_ao_inttrig(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int trignum)
{
+ struct usbduxsigma_private *devpriv = dev->private;
int ret;
- struct usbduxsub *this_usbduxsub = dev->private;
-
- if (!this_usbduxsub)
- return -EFAULT;
- down(&this_usbduxsub->sem);
+ if (trignum != 0)
+ return -EINVAL;
- if (!(this_usbduxsub->probed)) {
- ret = -ENODEV;
- goto out;
- }
- if (trignum != 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ao_inttrig: invalid trignum\n",
- dev->minor);
- ret = -EINVAL;
- goto out;
- }
- if (!(this_usbduxsub->ao_cmd_running)) {
- this_usbduxsub->ao_cmd_running = 1;
- ret = usbduxsub_submit_OutURBs(this_usbduxsub);
+ down(&devpriv->sem);
+ if (!devpriv->ao_cmd_running) {
+ devpriv->ao_cmd_running = 1;
+ ret = usbduxsigma_submit_urbs(dev, devpriv->ao_urbs,
+ devpriv->n_ao_urbs, 0);
if (ret < 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ao_inttrig: submitURB: "
- "err=%d\n", dev->minor, ret);
- this_usbduxsub->ao_cmd_running = 0;
- goto out;
+ devpriv->ao_cmd_running = 0;
+ up(&devpriv->sem);
+ return ret;
}
s->async->inttrig = NULL;
- } else {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ao_inttrig but acqu is already running.\n",
- dev->minor);
}
- ret = 1;
-out:
- up(&this_usbduxsub->sem);
- return ret;
+ up(&devpriv->sem);
+
+ return 1;
}
-static int usbdux_ao_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbduxsigma_private *devpriv = dev->private;
int err = 0;
+ int high_speed;
unsigned int flags;
- if (!this_usbduxsub)
- return -EFAULT;
-
- if (!(this_usbduxsub->probed))
- return -ENODEV;
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ao_cmdtest\n", dev->minor);
+ /* high speed conversions are not used yet */
+ high_speed = 0; /* (devpriv->high_speed) */
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
- if (0) { /* (this_usbduxsub->high_speed) */
+ if (high_speed) {
/*
* start immediately a new scan
* the sampling rate is set by the coversion rate
@@ -1538,8 +912,10 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
- if (err)
+ if (err) {
+ up(&devpriv->sem);
return 1;
+ }
/* Step 2a : make sure trigger sources are unique */
@@ -1578,272 +954,175 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
if (err)
return 3;
- return 0;
-}
+ /* Step 4: fix up any arguments */
-static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int chan, gain;
- int i, ret;
- struct usbduxsub *this_usbduxsub = dev->private;
-
- if (!this_usbduxsub)
- return -EFAULT;
-
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s\n", dev->minor, __func__);
-
- /* set current channel of the running acquisition to zero */
- s->async->cur_chan = 0;
- for (i = 0; i < cmd->chanlist_len; ++i) {
- chan = CR_CHAN(cmd->chanlist[i]);
- gain = CR_RANGE(cmd->chanlist[i]);
- if (i >= NUMOUTCHANNELS) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: %s: channel list too long\n",
- dev->minor, __func__);
- break;
- }
- this_usbduxsub->dac_commands[i] = chan;
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: dac command for ch %d is %x\n",
- dev->minor, i, this_usbduxsub->dac_commands[i]);
- }
-
- /* we count in steps of 1ms (125us) */
- /* 125us mode not used yet */
- if (0) { /* (this_usbduxsub->high_speed) */
- /* 125us */
+ /* we count in timer steps */
+ if (high_speed) {
/* timing of the conversion itself: every 125 us */
- this_usbduxsub->ao_timer = cmd->convert_arg / 125000;
+ devpriv->ao_timer = cmd->convert_arg / 125000;
} else {
- /* 1ms */
- /* timing of the scan: we get all channels at once */
- this_usbduxsub->ao_timer = cmd->scan_begin_arg / 1000000;
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: scan_begin_src=%d, scan_begin_arg=%d, "
- "convert_src=%d, convert_arg=%d\n", dev->minor,
- cmd->scan_begin_src, cmd->scan_begin_arg,
- cmd->convert_src, cmd->convert_arg);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ao_timer=%d (ms)\n",
- dev->minor, this_usbduxsub->ao_timer);
- if (this_usbduxsub->ao_timer < 1) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux: ao_timer=%d, "
- "scan_begin_arg=%d. "
- "Not properly tested by cmdtest?\n",
- dev->minor, this_usbduxsub->ao_timer,
- cmd->scan_begin_arg);
- up(&this_usbduxsub->sem);
- return -EINVAL;
- }
+ /*
+ * timing of the scan: every 1ms
+ * we get all channels at once
+ */
+ devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
}
- this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
+ if (devpriv->ao_timer < 1)
+ err |= -EINVAL;
if (cmd->stop_src == TRIG_COUNT) {
- /* not continuous */
- /* counter */
- /* high speed also scans everything at once */
- if (0) { /* (this_usbduxsub->high_speed) */
- this_usbduxsub->ao_sample_count =
- (cmd->stop_arg) * (cmd->scan_end_arg);
+ /* not continuous, use counter */
+ if (high_speed) {
+ /* high speed also scans everything at once */
+ devpriv->ao_sample_count = cmd->stop_arg *
+ cmd->scan_end_arg;
} else {
- /* there's no scan as the scan has been */
- /* perf inside the FX2 */
- /* data arrives as one packet */
- this_usbduxsub->ao_sample_count = cmd->stop_arg;
+ /*
+ * There's no scan as the scan has been
+ * handled inside the FX2. Data arrives as
+ * one packet.
+ */
+ devpriv->ao_sample_count = cmd->stop_arg;
}
- this_usbduxsub->ao_continuous = 0;
+ devpriv->ao_continuous = 0;
} else {
/* continuous acquisition */
- this_usbduxsub->ao_continuous = 1;
- this_usbduxsub->ao_sample_count = 0;
+ devpriv->ao_continuous = 1;
+ devpriv->ao_sample_count = 0;
}
+ if (err)
+ return 4;
+
+ return 0;
+}
+
+static int usbduxsigma_ao_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ int ret;
+ int i;
+
+ down(&devpriv->sem);
+
+ /* set current channel of the running acquisition to zero */
+ s->async->cur_chan = 0;
+ for (i = 0; i < cmd->chanlist_len; ++i)
+ devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
+
+ devpriv->ao_counter = devpriv->ao_timer;
+
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
- this_usbduxsub->ao_cmd_running = 1;
- ret = usbduxsub_submit_OutURBs(this_usbduxsub);
+ devpriv->ao_cmd_running = 1;
+ ret = usbduxsigma_submit_urbs(dev, devpriv->ao_urbs,
+ devpriv->n_ao_urbs, 0);
if (ret < 0) {
- this_usbduxsub->ao_cmd_running = 0;
- /* fixme: unlink here?? */
- up(&this_usbduxsub->sem);
+ devpriv->ao_cmd_running = 0;
+ up(&devpriv->sem);
return ret;
}
s->async->inttrig = NULL;
- } else {
- /* TRIG_INT */
- /* submit the urbs later */
- /* wait for an internal signal */
- s->async->inttrig = usbdux_ao_inttrig;
+ } else { /* TRIG_INT */
+ /* wait for an internal signal and submit the urbs later */
+ s->async->inttrig = usbduxsigma_ao_inttrig;
}
- up(&this_usbduxsub->sem);
+ up(&devpriv->sem);
+
return 0;
}
-static int usbdux_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int usbduxsigma_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec);
+ int ret;
- /* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << chan; /* 1 means Out */
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << chan);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- break;
- default:
- return -EINVAL;
- break;
- }
- /* we don't tell the firmware here as it would take 8 frames */
- /* to submit the information. We do it in the insn_bits. */
+ /*
+ * We don't tell the firmware here as it would take 8 frames
+ * to submit the information. We do it in the (*insn_bits).
+ */
return insn->n;
}
-static int usbdux_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int usbduxsigma_dio_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct usbduxsigma_private *devpriv = dev->private;
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+ int ret;
- struct usbduxsub *this_usbduxsub = dev->private;
- int err;
+ down(&devpriv->sem);
- if (!this_usbduxsub)
- return -EFAULT;
+ s->state &= ~mask;
+ s->state |= (bits & mask);
- down(&this_usbduxsub->sem);
+ devpriv->dux_commands[1] = s->io_bits & 0xff;
+ devpriv->dux_commands[4] = s->state & 0xff;
+ devpriv->dux_commands[2] = (s->io_bits >> 8) & 0xff;
+ devpriv->dux_commands[5] = (s->state >> 8) & 0xff;
+ devpriv->dux_commands[3] = (s->io_bits >> 16) & 0xff;
+ devpriv->dux_commands[6] = (s->state >> 16) & 0xff;
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_DIO_BITS_CMD);
+ if (ret < 0)
+ goto done;
+ ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_DIO_BITS_CMD);
+ if (ret < 0)
+ goto done;
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
- /* The commands are 8 bits wide */
- this_usbduxsub->dux_commands[1] = (s->io_bits) & 0x000000FF;
- this_usbduxsub->dux_commands[4] = (s->state) & 0x000000FF;
- this_usbduxsub->dux_commands[2] = ((s->io_bits) & 0x0000FF00) >> 8;
- this_usbduxsub->dux_commands[5] = ((s->state) & 0x0000FF00) >> 8;
- this_usbduxsub->dux_commands[3] = ((s->io_bits) & 0x00FF0000) >> 16;
- this_usbduxsub->dux_commands[6] = ((s->state) & 0x00FF0000) >> 16;
-
- /* This command also tells the firmware to return */
- /* the digital input lines */
- err = send_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
- err = receive_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
+ s->state = devpriv->insn_buf[1] |
+ (devpriv->insn_buf[2] << 8) |
+ (devpriv->insn_buf[3] << 16);
- data[1] = (((unsigned int)(this_usbduxsub->insnBuffer[1]))&0xff) |
- ((((unsigned int)(this_usbduxsub->insnBuffer[2]))&0xff) << 8) |
- ((((unsigned int)(this_usbduxsub->insnBuffer[3]))&0xff) << 16);
+ data[1] = s->state;
+ ret = insn->n;
- s->state = data[1];
+done:
+ up(&devpriv->sem);
- up(&this_usbduxsub->sem);
- return insn->n;
+ return ret;
}
-/***********************************/
-/* PWM */
-
-static int usbduxsub_unlink_PwmURBs(struct usbduxsub *usbduxsub_tmp)
+static void usbduxsigma_pwm_stop(struct comedi_device *dev, int do_unlink)
{
- int err = 0;
+ struct usbduxsigma_private *devpriv = dev->private;
- if (usbduxsub_tmp && usbduxsub_tmp->urbPwm) {
- if (usbduxsub_tmp->urbPwm)
- usb_kill_urb(usbduxsub_tmp->urbPwm);
- dev_dbg(&usbduxsub_tmp->interface->dev,
- "comedi: unlinked PwmURB: res=%d\n", err);
+ if (do_unlink) {
+ if (devpriv->pwm_urb)
+ usb_kill_urb(devpriv->pwm_urb);
}
- return err;
-}
-
-/* This cancels a running acquisition operation
- * in any context.
- */
-static int usbdux_pwm_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
-{
- int ret = 0;
-
- if (!this_usbduxsub)
- return -EFAULT;
-
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: %s\n", __func__);
- if (do_unlink)
- ret = usbduxsub_unlink_PwmURBs(this_usbduxsub);
- this_usbduxsub->pwm_cmd_running = 0;
-
- return ret;
+ devpriv->pwm_cmd_running = 0;
}
-/* force unlink - is called by comedi */
-static int usbdux_pwm_cancel(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int usbduxsigma_pwm_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int res = 0;
+ struct usbduxsigma_private *devpriv = dev->private;
/* unlink only if it is really running */
- res = usbdux_pwm_stop(this_usbduxsub, this_usbduxsub->pwm_cmd_running);
+ usbduxsigma_pwm_stop(dev, devpriv->pwm_cmd_running);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi %d: sending pwm off command to the usb device.\n",
- dev->minor);
- res = send_dux_commands(this_usbduxsub, SENDPWMOFF);
- if (res < 0)
- return res;
-
- return res;
+ return usbbuxsigma_send_cmd(dev, USBDUXSIGMA_PWM_OFF_CMD);
}
-static void usbduxsub_pwm_irq(struct urb *urb)
+static void usbduxsigma_pwm_urb_complete(struct urb *urb)
{
+ struct comedi_device *dev = urb->context;
+ struct usbduxsigma_private *devpriv = dev->private;
int ret;
- struct usbduxsub *this_usbduxsub;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
-
- /* printk(KERN_DEBUG "PWM: IRQ\n"); */
-
- /* the context variable points to the subdevice */
- this_comedidev = urb->context;
- /* the private structure of the subdevice is struct usbduxsub */
- this_usbduxsub = this_comedidev->private;
-
- s = &this_comedidev->subdevices[SUBDEV_DA];
switch (urb->status) {
case 0:
@@ -1854,260 +1133,182 @@ static void usbduxsub_pwm_irq(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
case -ECONNABORTED:
- /*
- * after an unlink command, unplug, ... etc
- * no unlink needed here. Already shutting down.
- */
- if (this_usbduxsub->pwm_cmd_running)
- usbdux_pwm_stop(this_usbduxsub, 0);
-
+ /* happens after an unlink command */
+ if (devpriv->pwm_cmd_running)
+ usbduxsigma_pwm_stop(dev, 0); /* w/o unlink */
return;
default:
/* a real error */
- if (this_usbduxsub->pwm_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi_: Non-zero urb status received in "
- "pwm intr context: %d\n", urb->status);
- usbdux_pwm_stop(this_usbduxsub, 0);
+ if (devpriv->pwm_cmd_running) {
+ dev_err(dev->class_dev,
+ "%s: non-zero urb status (%d)\n",
+ __func__, urb->status);
+ usbduxsigma_pwm_stop(dev, 0); /* w/o unlink */
}
return;
}
- /* are we actually running? */
- if (!(this_usbduxsub->pwm_cmd_running))
+ if (!devpriv->pwm_cmd_running)
return;
- urb->transfer_buffer_length = this_usbduxsub->sizePwmBuf;
- urb->dev = this_usbduxsub->usbdev;
+ urb->transfer_buffer_length = devpriv->pwm_buf_sz;
+ urb->dev = comedi_to_usb_dev(dev);
urb->status = 0;
- if (this_usbduxsub->pwm_cmd_running) {
- ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret < 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi_: pwm urb resubm failed in int-cont. "
- "ret=%d", ret);
- if (ret == EL2NSYNC)
- dev_err(&this_usbduxsub->interface->dev,
- "buggy USB host controller or bug in "
- "IRQ handling!\n");
-
- /* don't do an unlink here */
- usbdux_pwm_stop(this_usbduxsub, 0);
- }
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "%s: urb resubmit failed (%d)\n",
+ __func__, ret);
+ if (ret == EL2NSYNC)
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler\n");
+ usbduxsigma_pwm_stop(dev, 0); /* w/o unlink */
}
}
-static int usbduxsub_submit_PwmURBs(struct usbduxsub *usbduxsub)
+static int usbduxsigma_submit_pwm_urb(struct comedi_device *dev)
{
- int errFlag;
-
- if (!usbduxsub)
- return -EFAULT;
-
- dev_dbg(&usbduxsub->interface->dev, "comedi_: submitting pwm-urb\n");
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct urb *urb = devpriv->pwm_urb;
/* in case of a resubmission after an unlink... */
- usb_fill_bulk_urb(usbduxsub->urbPwm,
- usbduxsub->usbdev,
- usb_sndbulkpipe(usbduxsub->usbdev, PWM_EP),
- usbduxsub->urbPwm->transfer_buffer,
- usbduxsub->sizePwmBuf, usbduxsub_pwm_irq,
- usbduxsub->comedidev);
-
- errFlag = usb_submit_urb(usbduxsub->urbPwm, GFP_ATOMIC);
- if (errFlag) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: usbduxsigma: pwm: usb_submit_urb error %d\n",
- errFlag);
- return errFlag;
- }
- return 0;
+ usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, 4),
+ urb->transfer_buffer, devpriv->pwm_buf_sz,
+ usbduxsigma_pwm_urb_complete, dev);
+
+ return usb_submit_urb(urb, GFP_ATOMIC);
}
-static int usbdux_pwm_period(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int period)
+static int usbduxsigma_pwm_period(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int period)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbduxsigma_private *devpriv = dev->private;
int fx2delay = 255;
if (period < MIN_PWM_PERIOD) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: illegal period setting for pwm.\n",
- dev->minor);
return -EAGAIN;
} else {
- fx2delay = period / ((int)(6 * 512 * (1.0 / 0.033))) - 6;
- if (fx2delay > 255) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: period %d for pwm is too low.\n",
- dev->minor, period);
+ fx2delay = (period / (6 * 512 * 1000 / 33)) - 6;
+ if (fx2delay > 255)
return -EAGAIN;
- }
}
- this_usbduxsub->pwmDelay = fx2delay;
- this_usbduxsub->pwmPeriod = period;
- dev_dbg(&this_usbduxsub->interface->dev, "%s: frequ=%d, period=%d\n",
- __func__, period, fx2delay);
+ devpriv->pwm_delay = fx2delay;
+ devpriv->pwm_period = period;
return 0;
}
-/* is called from insn so there's no need to do all the sanity checks */
-static int usbdux_pwm_start(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int usbduxsigma_pwm_start(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- int ret, i;
- struct usbduxsub *this_usbduxsub = dev->private;
-
- dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s\n",
- dev->minor, __func__);
+ struct usbduxsigma_private *devpriv = dev->private;
+ int ret;
- if (this_usbduxsub->pwm_cmd_running) {
- /* already running */
+ if (devpriv->pwm_cmd_running)
return 0;
- }
- this_usbduxsub->dux_commands[1] = ((uint8_t) this_usbduxsub->pwmDelay);
- ret = send_dux_commands(this_usbduxsub, SENDPWMON);
+ devpriv->dux_commands[1] = devpriv->pwm_delay;
+ ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_PWM_ON_CMD);
if (ret < 0)
return ret;
- /* initialise the buffer */
- for (i = 0; i < this_usbduxsub->sizePwmBuf; i++)
- ((char *)(this_usbduxsub->urbPwm->transfer_buffer))[i] = 0;
+ memset(devpriv->pwm_urb->transfer_buffer, 0, devpriv->pwm_buf_sz);
- this_usbduxsub->pwm_cmd_running = 1;
- ret = usbduxsub_submit_PwmURBs(this_usbduxsub);
+ devpriv->pwm_cmd_running = 1;
+ ret = usbduxsigma_submit_pwm_urb(dev);
if (ret < 0) {
- this_usbduxsub->pwm_cmd_running = 0;
+ devpriv->pwm_cmd_running = 0;
return ret;
}
+
return 0;
}
-/* generates the bit pattern for PWM with the optional sign bit */
-static int usbdux_pwm_pattern(struct comedi_device *dev,
- struct comedi_subdevice *s, int channel,
- unsigned int value, unsigned int sign)
+static void usbduxsigma_pwm_pattern(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int value,
+ unsigned int sign)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int i, szbuf;
- char *pBuf;
- char pwm_mask;
- char sgn_mask;
- char c;
-
- if (!this_usbduxsub)
- return -EFAULT;
-
- /* this is the DIO bit which carries the PWM data */
- pwm_mask = (1 << channel);
- /* this is the DIO bit which carries the optional direction bit */
- sgn_mask = (16 << channel);
- /* this is the buffer which will be filled with the with bit */
- /* pattern for one period */
- szbuf = this_usbduxsub->sizePwmBuf;
- pBuf = (char *)(this_usbduxsub->urbPwm->transfer_buffer);
+ struct usbduxsigma_private *devpriv = dev->private;
+ char pwm_mask = (1 << chan); /* DIO bit for the PWM data */
+ char sgn_mask = (16 << chan); /* DIO bit for the sign */
+ char *buf = (char *)(devpriv->pwm_urb->transfer_buffer);
+ int szbuf = devpriv->pwm_buf_sz;
+ int i;
+
for (i = 0; i < szbuf; i++) {
- c = *pBuf;
- /* reset bits */
- c = c & (~pwm_mask);
- /* set the bit as long as the index is lower than the value */
+ char c = *buf;
+
+ c &= ~pwm_mask;
if (i < value)
- c = c | pwm_mask;
- /* set the optional sign bit for a relay */
- if (!sign) {
- /* positive value */
- c = c & (~sgn_mask);
- } else {
- /* negative value */
- c = c | sgn_mask;
- }
- *(pBuf++) = c;
+ c |= pwm_mask;
+ if (!sign)
+ c &= ~sgn_mask;
+ else
+ c |= sgn_mask;
+ *buf++ = c;
}
- return 1;
}
-static int usbdux_pwm_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int usbduxsigma_pwm_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ unsigned int chan = CR_CHAN(insn->chanspec);
- if ((insn->n) != 1) {
- /*
- * doesn't make sense to have more than one value here because
- * it would just overwrite the PWM buffer a couple of times
- */
+ /*
+ * It doesn't make sense to support more than one value here
+ * because it would just overwrite the PWM buffer.
+ */
+ if (insn->n != 1)
return -EINVAL;
- }
/*
- * the sign is set via a special INSN only, this gives us 8 bits for
- * normal operation
- * relay sign 0 by default
+ * The sign is set via a special INSN only, this gives us 8 bits
+ * for normal operation, sign is 0 by default.
*/
- return usbdux_pwm_pattern(dev, s, CR_CHAN(insn->chanspec), data[0], 0);
+ usbduxsigma_pwm_pattern(dev, s, chan, data[0], 0);
+
+ return insn->n;
}
-static int usbdux_pwm_read(struct comedi_device *x1,
- struct comedi_subdevice *x2, struct comedi_insn *x3,
- unsigned int *x4)
+static int usbduxsigma_pwm_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- /* not needed */
- return -EINVAL;
-};
+ struct usbduxsigma_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
-/* switches on/off PWM */
-static int usbdux_pwm_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- struct usbduxsub *this_usbduxsub = dev->private;
switch (data[0]) {
case INSN_CONFIG_ARM:
- /* switch it on */
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s: pwm on\n", dev->minor, __func__);
/*
* if not zero the PWM is limited to a certain time which is
* not supported here
*/
if (data[1] != 0)
return -EINVAL;
- return usbdux_pwm_start(dev, s);
+ return usbduxsigma_pwm_start(dev, s);
case INSN_CONFIG_DISARM:
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s: pwm off\n", dev->minor, __func__);
- return usbdux_pwm_cancel(dev, s);
+ return usbduxsigma_pwm_cancel(dev, s);
case INSN_CONFIG_GET_PWM_STATUS:
- /*
- * to check if the USB transmission has failed or in case PWM
- * was limited to n cycles to check if it has terminated
- */
- data[1] = this_usbduxsub->pwm_cmd_running;
+ data[1] = devpriv->pwm_cmd_running;
return 0;
case INSN_CONFIG_PWM_SET_PERIOD:
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s: setting period\n", dev->minor,
- __func__);
- return usbdux_pwm_period(dev, s, data[1]);
+ return usbduxsigma_pwm_period(dev, s, data[1]);
case INSN_CONFIG_PWM_GET_PERIOD:
- data[1] = this_usbduxsub->pwmPeriod;
+ data[1] = devpriv->pwm_period;
return 0;
case INSN_CONFIG_PWM_SET_H_BRIDGE:
- /* value in the first byte and the sign in the second for a
- relay */
- return usbdux_pwm_pattern(dev, s,
- /* the channel number */
- CR_CHAN(insn->chanspec),
- /* actual PWM data */
- data[1],
- /* just a sign */
- (data[2] != 0));
+ /*
+ * data[1] = value
+ * data[2] = sign (for a relay)
+ */
+ usbduxsigma_pwm_pattern(dev, s, chan, data[1], (data[2] != 0));
+ return 0;
case INSN_CONFIG_PWM_GET_H_BRIDGE:
/* values are not kept in this driver, nothing to return */
return -EINVAL;
@@ -2115,542 +1316,390 @@ static int usbdux_pwm_config(struct comedi_device *dev,
return -EINVAL;
}
-/* end of PWM */
-/*****************************************************************/
-
-static void tidy_up(struct usbduxsub *usbduxsub_tmp)
+static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
{
- int i;
-
- if (!usbduxsub_tmp)
- return;
- dev_dbg(&usbduxsub_tmp->interface->dev, "comedi_: tiding up\n");
-
- /* shows the usb subsystem that the driver is down */
- if (usbduxsub_tmp->interface)
- usb_set_intfdata(usbduxsub_tmp->interface, NULL);
-
- usbduxsub_tmp->probed = 0;
+ struct usbduxsigma_private *devpriv = dev->private;
+ uint8_t sysred;
+ uint32_t val;
+ int ret;
- if (usbduxsub_tmp->urbIn) {
- if (usbduxsub_tmp->ai_cmd_running) {
- usbduxsub_tmp->ai_cmd_running = 0;
- usbduxsub_unlink_InURBs(usbduxsub_tmp);
- }
- for (i = 0; i < usbduxsub_tmp->numOfInBuffers; i++) {
- kfree(usbduxsub_tmp->urbIn[i]->transfer_buffer);
- usbduxsub_tmp->urbIn[i]->transfer_buffer = NULL;
- usb_kill_urb(usbduxsub_tmp->urbIn[i]);
- usb_free_urb(usbduxsub_tmp->urbIn[i]);
- usbduxsub_tmp->urbIn[i] = NULL;
- }
- kfree(usbduxsub_tmp->urbIn);
- usbduxsub_tmp->urbIn = NULL;
- }
- if (usbduxsub_tmp->urbOut) {
- if (usbduxsub_tmp->ao_cmd_running) {
- usbduxsub_tmp->ao_cmd_running = 0;
- usbduxsub_unlink_OutURBs(usbduxsub_tmp);
- }
- for (i = 0; i < usbduxsub_tmp->numOfOutBuffers; i++) {
- if (usbduxsub_tmp->urbOut[i]->transfer_buffer) {
- kfree(usbduxsub_tmp->
- urbOut[i]->transfer_buffer);
- usbduxsub_tmp->urbOut[i]->transfer_buffer =
- NULL;
- }
- if (usbduxsub_tmp->urbOut[i]) {
- usb_kill_urb(usbduxsub_tmp->urbOut[i]);
- usb_free_urb(usbduxsub_tmp->urbOut[i]);
- usbduxsub_tmp->urbOut[i] = NULL;
- }
- }
- kfree(usbduxsub_tmp->urbOut);
- usbduxsub_tmp->urbOut = NULL;
- }
- if (usbduxsub_tmp->urbPwm) {
- if (usbduxsub_tmp->pwm_cmd_running) {
- usbduxsub_tmp->pwm_cmd_running = 0;
- usbduxsub_unlink_PwmURBs(usbduxsub_tmp);
- }
- kfree(usbduxsub_tmp->urbPwm->transfer_buffer);
- usbduxsub_tmp->urbPwm->transfer_buffer = NULL;
- usb_kill_urb(usbduxsub_tmp->urbPwm);
- usb_free_urb(usbduxsub_tmp->urbPwm);
- usbduxsub_tmp->urbPwm = NULL;
+ switch (chan) {
+ default:
+ case 0:
+ sysred = 0; /* ADC zero */
+ break;
+ case 1:
+ sysred = 1; /* ADC offset */
+ break;
+ case 2:
+ sysred = 4; /* VCC */
+ break;
+ case 3:
+ sysred = 8; /* temperature */
+ break;
+ case 4:
+ sysred = 16; /* gain */
+ break;
+ case 5:
+ sysred = 32; /* ref */
+ break;
}
- kfree(usbduxsub_tmp->inBuffer);
- usbduxsub_tmp->inBuffer = NULL;
- kfree(usbduxsub_tmp->insnBuffer);
- usbduxsub_tmp->insnBuffer = NULL;
- kfree(usbduxsub_tmp->outBuffer);
- usbduxsub_tmp->outBuffer = NULL;
- kfree(usbduxsub_tmp->dac_commands);
- usbduxsub_tmp->dac_commands = NULL;
- kfree(usbduxsub_tmp->dux_commands);
- usbduxsub_tmp->dux_commands = NULL;
- usbduxsub_tmp->ai_cmd_running = 0;
- usbduxsub_tmp->ao_cmd_running = 0;
- usbduxsub_tmp->pwm_cmd_running = 0;
-}
-
-static int usbduxsigma_attach_common(struct comedi_device *dev,
- struct usbduxsub *uds)
-{
- int ret;
- struct comedi_subdevice *s;
- int n_subdevs;
- int offset;
- down(&uds->sem);
- /* pointer back to the corresponding comedi device */
- uds->comedidev = dev;
+ devpriv->dux_commands[1] = 0x12; /* CONFIG0 */
+ devpriv->dux_commands[2] = 0x80; /* CONFIG1: 2kHz sampling rate */
+ devpriv->dux_commands[3] = 0x00; /* CONFIG3: diff. channels off */
+ devpriv->dux_commands[4] = 0;
+ devpriv->dux_commands[5] = 0;
+ devpriv->dux_commands[6] = sysred;
+ ret = usbbuxsigma_send_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD);
+ if (ret < 0)
+ return ret;
- /* set number of subdevices */
- if (uds->high_speed)
- n_subdevs = 4; /* with pwm */
- else
- n_subdevs = 3; /* without pwm */
- ret = comedi_alloc_subdevices(dev, n_subdevs);
- if (ret) {
- up(&uds->sem);
+ ret = usbduxsigma_receive_cmd(dev, USBDUXSIGMA_SINGLE_AD_CMD);
+ if (ret < 0)
return ret;
- }
- /* private structure is also simply the usb-structure */
- dev->private = uds;
- /* the first subdevice is the A/D converter */
- s = &dev->subdevices[SUBDEV_AD];
- /* the URBs get the comedi subdevice */
- /* which is responsible for reading */
- /* this is the subdevice which reads data */
- dev->read_subdev = s;
- /* the subdevice receives as private structure the */
- /* usb-structure */
- s->private = NULL;
- /* analog input */
- s->type = COMEDI_SUBD_AI;
- /* readable and ref is to ground, 32 bit wide data! */
- s->subdev_flags = SDF_READABLE | SDF_GROUND |
- SDF_CMD_READ | SDF_LSAMPL;
- /* 16 A/D channels */
- s->n_chan = NUMCHANNELS;
- /* length of the channellist */
- s->len_chanlist = NUMCHANNELS;
- /* callback functions */
- s->insn_read = usbdux_ai_insn_read;
- s->do_cmdtest = usbdux_ai_cmdtest;
- s->do_cmd = usbdux_ai_cmd;
- s->cancel = usbdux_ai_cancel;
- /* max value from the A/D converter (24bit) */
- s->maxdata = 0x00FFFFFF;
- /* range table to convert to physical units */
- s->range_table = (&range_usbdux_ai_range);
- /* analog output subdevice */
- s = &dev->subdevices[SUBDEV_DA];
- /* analog out */
- s->type = COMEDI_SUBD_AO;
- /* backward pointer */
- dev->write_subdev = s;
- /* the subdevice receives as private structure the */
- /* usb-structure */
- s->private = NULL;
- /* are writable */
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
- /* 4 channels */
- s->n_chan = 4;
- /* length of the channellist */
- s->len_chanlist = 4;
- /* 8 bit resolution */
- s->maxdata = 0x00ff;
- /* unipolar range */
- s->range_table = &range_unipolar2_5;
- /* callback */
- s->do_cmdtest = usbdux_ao_cmdtest;
- s->do_cmd = usbdux_ao_cmd;
- s->cancel = usbdux_ao_cancel;
- s->insn_read = usbdux_ao_insn_read;
- s->insn_write = usbdux_ao_insn_write;
- /* digital I/O subdevice */
- s = &dev->subdevices[SUBDEV_DIO];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- /* 8 external and 16 internal channels */
- s->n_chan = 24;
- s->maxdata = 1;
- s->range_table = (&range_digital);
- s->insn_bits = usbdux_dio_insn_bits;
- s->insn_config = usbdux_dio_insn_config;
- /* we don't use it */
- s->private = NULL;
- if (uds->high_speed) {
- /* timer / pwm subdevice */
- s = &dev->subdevices[SUBDEV_PWM];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
- s->n_chan = 8;
- /* this defines the max duty cycle resolution */
- s->maxdata = uds->sizePwmBuf;
- s->insn_write = usbdux_pwm_write;
- s->insn_read = usbdux_pwm_read;
- s->insn_config = usbdux_pwm_config;
- usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
- }
- /* finally decide that it's attached */
- uds->attached = 1;
- up(&uds->sem);
- offset = usbdux_getstatusinfo(dev, 0);
- if (offset < 0)
- dev_err(&uds->interface->dev,
- "Communication to USBDUXSIGMA failed! Check firmware and cabling.");
- dev_info(&uds->interface->dev,
- "comedi%d: attached, ADC_zero = %x\n", dev->minor, offset);
- return 0;
+
+ /* 32 bits big endian from the A/D converter */
+ val = be32_to_cpu(*((int32_t *)((devpriv->insn_buf)+1)));
+ val &= 0x00ffffff; /* strip status byte */
+ val ^= 0x00800000; /* convert to unsigned */
+
+ return (int)val;
}
-static int usbduxsigma_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
+static int usbduxsigma_firmware_upload(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context)
{
- struct usb_interface *uinterf = comedi_to_usb_interface(dev);
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ uint8_t *buf;
+ uint8_t *tmp;
int ret;
- struct usbduxsub *uds;
- dev->private = NULL;
- down(&start_stop_sem);
- uds = usb_get_intfdata(uinterf);
- if (!uds || !uds->probed) {
- dev_err(dev->class_dev,
- "usbduxsigma: error: auto_attach failed, not connected\n");
- ret = -ENODEV;
- } else if (uds->attached) {
- dev_err(dev->class_dev,
- "usbduxsigma: error: auto_attach failed, already attached\n");
- ret = -ENODEV;
- } else
- ret = usbduxsigma_attach_common(dev, uds);
- up(&start_stop_sem);
- return ret;
-}
+ if (!data)
+ return 0;
-static void usbduxsigma_detach(struct comedi_device *dev)
-{
- struct usbduxsub *usb = dev->private;
-
- if (usb) {
- down(&usb->sem);
- dev->private = NULL;
- usb->attached = 0;
- usb->comedidev = NULL;
- up(&usb->sem);
+ if (size > FIRMWARE_MAX_LEN) {
+ dev_err(dev->class_dev, "firmware binary too large for FX2\n");
+ return -ENOMEM;
}
-}
-static struct comedi_driver usbduxsigma_driver = {
- .driver_name = "usbduxsigma",
- .module = THIS_MODULE,
- .auto_attach = usbduxsigma_auto_attach,
- .detach = usbduxsigma_detach,
-};
+ /* we generate a local buffer for the firmware */
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
-static void usbdux_firmware_request_complete_handler(const struct firmware *fw,
- void *context)
-{
- struct usbduxsub *usbduxsub_tmp = context;
- struct usb_interface *uinterf = usbduxsub_tmp->interface;
- int ret;
+ /* we need a malloc'ed buffer for usb_control_msg() */
+ tmp = kmalloc(1, GFP_KERNEL);
+ if (!tmp) {
+ kfree(buf);
+ return -ENOMEM;
+ }
- if (fw == NULL) {
- dev_err(&uinterf->dev,
- "Firmware complete handler without firmware!\n");
- return;
+ /* stop the current firmware on the device */
+ *tmp = 1; /* 7f92 to one */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUXSUB_FIRMWARE,
+ VENDOR_DIR_OUT,
+ USBDUXSUB_CPUCS, 0x0000,
+ tmp, 1,
+ BULK_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "can not stop firmware\n");
+ goto done;
}
- /*
- * we need to upload the firmware here because fw will be
- * freed once we've left this function
- */
- ret = firmwareUpload(usbduxsub_tmp, fw->data, fw->size);
+ /* upload the new firmware to the device */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUXSUB_FIRMWARE,
+ VENDOR_DIR_OUT,
+ 0, 0x0000,
+ buf, size,
+ BULK_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "firmware upload failed\n");
+ goto done;
+ }
+
+ /* start the new firmware on the device */
+ *tmp = 0; /* 7f92 to zero */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUXSUB_FIRMWARE,
+ VENDOR_DIR_OUT,
+ USBDUXSUB_CPUCS, 0x0000,
+ tmp, 1,
+ BULK_TIMEOUT);
+ if (ret < 0)
+ dev_err(dev->class_dev, "can not start firmware\n");
- if (ret) {
- dev_err(&uinterf->dev,
- "Could not upload firmware (err=%d)\n", ret);
- goto out;
- }
- comedi_usb_auto_config(uinterf, &usbduxsigma_driver, 0);
-out:
- release_firmware(fw);
+done:
+ kfree(tmp);
+ kfree(buf);
+ return ret;
}
-static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
- const struct usb_device_id *id)
+static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev)
{
- struct usb_device *udev = interface_to_usbdev(uinterf);
- struct device *dev = &uinterf->dev;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct urb *urb;
int i;
- int index;
- int ret;
-
- dev_dbg(dev, "comedi_: usbdux_: "
- "finding a free structure for the usb-device\n");
-
- down(&start_stop_sem);
- /* look for a free place in the usbdux array */
- index = -1;
- for (i = 0; i < NUMUSBDUX; i++) {
- if (!(usbduxsub[i].probed)) {
- index = i;
- break;
- }
- }
- /* no more space */
- if (index == -1) {
- dev_err(dev, "Too many usbduxsigma-devices connected.\n");
- up(&start_stop_sem);
- return -EMFILE;
- }
- dev_dbg(dev, "comedi_: usbdux: "
- "usbduxsub[%d] is ready to connect to comedi.\n", index);
-
- sema_init(&(usbduxsub[index].sem), 1);
- /* save a pointer to the usb device */
- usbduxsub[index].usbdev = udev;
-
- /* save the interface itself */
- usbduxsub[index].interface = uinterf;
- /* get the interface number from the interface */
- usbduxsub[index].ifnum = uinterf->altsetting->desc.bInterfaceNumber;
- /* hand the private data over to the usb subsystem */
- /* will be needed for disconnect */
- usb_set_intfdata(uinterf, &(usbduxsub[index]));
-
- dev_dbg(dev, "comedi_: usbdux: ifnum=%d\n", usbduxsub[index].ifnum);
-
- /* test if it is high speed (USB 2.0) */
- usbduxsub[index].high_speed =
- (usbduxsub[index].usbdev->speed == USB_SPEED_HIGH);
-
- /* create space for the commands of the DA converter */
- usbduxsub[index].dac_commands = kzalloc(NUMOUTCHANNELS, GFP_KERNEL);
- if (!usbduxsub[index].dac_commands) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space for the commands going to the usb device */
- usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
- if (!usbduxsub[index].dux_commands) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
+ devpriv->dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
+ devpriv->in_buf = kzalloc(SIZEINBUF, GFP_KERNEL);
+ devpriv->insn_buf = kzalloc(SIZEINSNBUF, GFP_KERNEL);
+ devpriv->ai_urbs = kcalloc(devpriv->n_ai_urbs, sizeof(*urb),
+ GFP_KERNEL);
+ devpriv->ao_urbs = kcalloc(devpriv->n_ao_urbs, sizeof(*urb),
+ GFP_KERNEL);
+ if (!devpriv->dux_commands || !devpriv->in_buf || !devpriv->insn_buf ||
+ !devpriv->ai_urbs || !devpriv->ao_urbs)
return -ENOMEM;
- }
- /* create space for the in buffer and set it to zero */
- usbduxsub[index].inBuffer = kzalloc(SIZEINBUF, GFP_KERNEL);
- if (!(usbduxsub[index].inBuffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space of the instruction buffer */
- usbduxsub[index].insnBuffer = kzalloc(SIZEINSNBUF, GFP_KERNEL);
- if (!(usbduxsub[index].insnBuffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space for the outbuffer */
- usbduxsub[index].outBuffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
- if (!(usbduxsub[index].outBuffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* setting to alternate setting 3: enabling iso ep and bulk ep. */
- i = usb_set_interface(usbduxsub[index].usbdev,
- usbduxsub[index].ifnum, 3);
- if (i < 0) {
- dev_err(dev, "comedi_: usbduxsigma%d: "
- "could not set alternate setting 3 in high speed.\n",
- index);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENODEV;
- }
- if (usbduxsub[index].high_speed)
- usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSHIGH;
- else
- usbduxsub[index].numOfInBuffers = NUMOFINBUFFERSFULL;
-
- usbduxsub[index].urbIn = kcalloc(usbduxsub[index].numOfInBuffers,
- sizeof(struct urb *),
- GFP_KERNEL);
- if (!(usbduxsub[index].urbIn)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- for (i = 0; i < usbduxsub[index].numOfInBuffers; i++) {
+
+ for (i = 0; i < devpriv->n_ai_urbs; i++) {
/* one frame: 1ms */
- usbduxsub[index].urbIn[i] = usb_alloc_urb(1, GFP_KERNEL);
- if (usbduxsub[index].urbIn[i] == NULL) {
- dev_err(dev, "comedi_: usbduxsigma%d: "
- "Could not alloc. urb(%d)\n", index, i);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
+ urb = usb_alloc_urb(1, GFP_KERNEL);
+ if (!urb)
return -ENOMEM;
- }
- usbduxsub[index].urbIn[i]->dev = usbduxsub[index].usbdev;
+ devpriv->ai_urbs[i] = urb;
+ urb->dev = usb;
/* will be filled later with a pointer to the comedi-device */
/* and ONLY then the urb should be submitted */
- usbduxsub[index].urbIn[i]->context = NULL;
- usbduxsub[index].urbIn[i]->pipe =
- usb_rcvisocpipe(usbduxsub[index].usbdev, ISOINEP);
- usbduxsub[index].urbIn[i]->transfer_flags = URB_ISO_ASAP;
- usbduxsub[index].urbIn[i]->transfer_buffer =
- kzalloc(SIZEINBUF, GFP_KERNEL);
- if (!(usbduxsub[index].urbIn[i]->transfer_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
+ urb->context = NULL;
+ urb->pipe = usb_rcvisocpipe(usb, 6);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL);
+ if (!urb->transfer_buffer)
return -ENOMEM;
- }
- usbduxsub[index].urbIn[i]->complete = usbduxsub_ai_IsocIrq;
- usbduxsub[index].urbIn[i]->number_of_packets = 1;
- usbduxsub[index].urbIn[i]->transfer_buffer_length = SIZEINBUF;
- usbduxsub[index].urbIn[i]->iso_frame_desc[0].offset = 0;
- usbduxsub[index].urbIn[i]->iso_frame_desc[0].length =
- SIZEINBUF;
+ urb->complete = usbduxsigma_ai_urb_complete;
+ urb->number_of_packets = 1;
+ urb->transfer_buffer_length = SIZEINBUF;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEINBUF;
}
- /* out */
- if (usbduxsub[index].high_speed)
- usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSHIGH;
- else
- usbduxsub[index].numOfOutBuffers = NUMOFOUTBUFFERSFULL;
-
- usbduxsub[index].urbOut = kcalloc(usbduxsub[index].numOfOutBuffers,
- sizeof(struct urb *), GFP_KERNEL);
- if (!(usbduxsub[index].urbOut)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- for (i = 0; i < usbduxsub[index].numOfOutBuffers; i++) {
+ for (i = 0; i < devpriv->n_ao_urbs; i++) {
/* one frame: 1ms */
- usbduxsub[index].urbOut[i] = usb_alloc_urb(1, GFP_KERNEL);
- if (usbduxsub[index].urbOut[i] == NULL) {
- dev_err(dev, "comedi_: usbduxsigma%d: "
- "Could not alloc. urb(%d)\n", index, i);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
+ urb = usb_alloc_urb(1, GFP_KERNEL);
+ if (!urb)
return -ENOMEM;
- }
- usbduxsub[index].urbOut[i]->dev = usbduxsub[index].usbdev;
+ devpriv->ao_urbs[i] = urb;
+ urb->dev = usb;
/* will be filled later with a pointer to the comedi-device */
/* and ONLY then the urb should be submitted */
- usbduxsub[index].urbOut[i]->context = NULL;
- usbduxsub[index].urbOut[i]->pipe =
- usb_sndisocpipe(usbduxsub[index].usbdev, ISOOUTEP);
- usbduxsub[index].urbOut[i]->transfer_flags = URB_ISO_ASAP;
- usbduxsub[index].urbOut[i]->transfer_buffer =
- kzalloc(SIZEOUTBUF, GFP_KERNEL);
- if (!(usbduxsub[index].urbOut[i]->transfer_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
+ urb->context = NULL;
+ urb->pipe = usb_sndisocpipe(usb, 2);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
+ if (!urb->transfer_buffer)
return -ENOMEM;
- }
- usbduxsub[index].urbOut[i]->complete = usbduxsub_ao_IsocIrq;
- usbduxsub[index].urbOut[i]->number_of_packets = 1;
- usbduxsub[index].urbOut[i]->transfer_buffer_length =
- SIZEOUTBUF;
- usbduxsub[index].urbOut[i]->iso_frame_desc[0].offset = 0;
- usbduxsub[index].urbOut[i]->iso_frame_desc[0].length =
- SIZEOUTBUF;
- if (usbduxsub[index].high_speed) {
- /* uframes */
- usbduxsub[index].urbOut[i]->interval = 8;
- } else {
- /* frames */
- usbduxsub[index].urbOut[i]->interval = 1;
- }
+ urb->complete = usbduxsigma_ao_urb_complete;
+ urb->number_of_packets = 1;
+ urb->transfer_buffer_length = SIZEOUTBUF;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEOUTBUF;
+ if (devpriv->high_speed)
+ urb->interval = 8; /* uframes */
+ else
+ urb->interval = 1; /* frames */
}
- /* pwm */
- if (usbduxsub[index].high_speed) {
- /* max bulk ep size in high speed */
- usbduxsub[index].sizePwmBuf = 512;
- usbduxsub[index].urbPwm = usb_alloc_urb(0, GFP_KERNEL);
- if (usbduxsub[index].urbPwm == NULL) {
- dev_err(dev, "comedi_: usbduxsigma%d: "
- "Could not alloc. pwm urb\n", index);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
+ if (devpriv->pwm_buf_sz) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
return -ENOMEM;
- }
- usbduxsub[index].urbPwm->transfer_buffer =
- kzalloc(usbduxsub[index].sizePwmBuf, GFP_KERNEL);
- if (!(usbduxsub[index].urbPwm->transfer_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
+ devpriv->pwm_urb = urb;
+
+ urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz,
+ GFP_KERNEL);
+ if (!urb->transfer_buffer)
return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void usbduxsigma_free_usb_buffers(struct comedi_device *dev)
+{
+ struct usbduxsigma_private *devpriv = dev->private;
+ struct urb *urb;
+ int i;
+
+ urb = devpriv->pwm_urb;
+ if (urb) {
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ if (devpriv->ao_urbs) {
+ for (i = 0; i < devpriv->n_ao_urbs; i++) {
+ urb = devpriv->ao_urbs[i];
+ if (urb) {
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ }
+ kfree(devpriv->ao_urbs);
+ }
+ if (devpriv->ai_urbs) {
+ for (i = 0; i < devpriv->n_ai_urbs; i++) {
+ urb = devpriv->ai_urbs[i];
+ if (urb) {
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
}
+ kfree(devpriv->ai_urbs);
+ }
+ kfree(devpriv->insn_buf);
+ kfree(devpriv->in_buf);
+ kfree(devpriv->dux_commands);
+}
+
+static int usbduxsigma_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbduxsigma_private *devpriv;
+ struct comedi_subdevice *s;
+ int offset;
+ int ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
+ sema_init(&devpriv->sem, 1);
+
+ usb_set_intfdata(intf, devpriv);
+
+ devpriv->high_speed = (usb->speed == USB_SPEED_HIGH);
+ if (devpriv->high_speed) {
+ devpriv->n_ai_urbs = NUMOFINBUFFERSHIGH;
+ devpriv->n_ao_urbs = NUMOFOUTBUFFERSHIGH;
+ devpriv->pwm_buf_sz = 512;
} else {
- usbduxsub[index].urbPwm = NULL;
- usbduxsub[index].sizePwmBuf = 0;
+ devpriv->n_ai_urbs = NUMOFINBUFFERSFULL;
+ devpriv->n_ao_urbs = NUMOFOUTBUFFERSFULL;
}
- usbduxsub[index].ai_cmd_running = 0;
- usbduxsub[index].ao_cmd_running = 0;
- usbduxsub[index].pwm_cmd_running = 0;
-
- /* we've reached the bottom of the function */
- usbduxsub[index].probed = 1;
- up(&start_stop_sem);
-
- ret = request_firmware_nowait(THIS_MODULE,
- FW_ACTION_HOTPLUG,
- FIRMWARE,
- &udev->dev,
- GFP_KERNEL,
- usbduxsub + index,
- usbdux_firmware_request_complete_handler
- );
-
- if (ret) {
- dev_err(dev, "Could not load firmware (err=%d)\n", ret);
+ ret = usbduxsigma_alloc_usb_buffers(dev);
+ if (ret)
+ return ret;
+
+ /* setting to alternate setting 3: enabling iso ep and bulk ep. */
+ ret = usb_set_interface(usb, intf->altsetting->desc.bInterfaceNumber,
+ 3);
+ if (ret < 0) {
+ dev_err(dev->class_dev,
+ "could not set alternate setting 3 in high speed\n");
return ret;
}
- dev_info(dev, "comedi_: successfully initialised.\n");
- /* success */
+ ret = comedi_load_firmware(dev, &usb->dev, FIRMWARE,
+ usbduxsigma_firmware_upload, 0);
+ if (ret)
+ return ret;
+
+ ret = comedi_alloc_subdevices(dev, (devpriv->high_speed) ? 4 : 3);
+ if (ret)
+ return ret;
+
+ /* Analog Input subdevice */
+ s = &dev->subdevices[0];
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ | SDF_LSAMPL;
+ s->n_chan = NUMCHANNELS;
+ s->len_chanlist = NUMCHANNELS;
+ s->maxdata = 0x00ffffff;
+ s->range_table = &usbduxsigma_ai_range;
+ s->insn_read = usbduxsigma_ai_insn_read;
+ s->do_cmdtest = usbduxsigma_ai_cmdtest;
+ s->do_cmd = usbduxsigma_ai_cmd;
+ s->cancel = usbduxsigma_ai_cancel;
+
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ dev->write_subdev = s;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
+ s->n_chan = USBDUXSIGMA_NUM_AO_CHAN;
+ s->len_chanlist = s->n_chan;
+ s->maxdata = 0x00ff;
+ s->range_table = &range_unipolar2_5;
+ s->insn_write = usbduxsigma_ao_insn_write;
+ s->insn_read = usbduxsigma_ao_insn_read;
+ s->do_cmdtest = usbduxsigma_ao_cmdtest;
+ s->do_cmd = usbduxsigma_ao_cmd;
+ s->cancel = usbduxsigma_ao_cancel;
+
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = usbduxsigma_dio_insn_bits;
+ s->insn_config = usbduxsigma_dio_insn_config;
+
+ if (devpriv->high_speed) {
+ /* Timer / pwm subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_PWM;
+ s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
+ s->n_chan = 8;
+ s->maxdata = devpriv->pwm_buf_sz;
+ s->insn_write = usbduxsigma_pwm_write;
+ s->insn_config = usbduxsigma_pwm_config;
+
+ usbduxsigma_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
+ }
+
+ offset = usbduxsigma_getstatusinfo(dev, 0);
+ if (offset < 0)
+ dev_err(dev->class_dev,
+ "Communication to USBDUXSIGMA failed! Check firmware and cabling\n");
+
+ dev_info(dev->class_dev, "attached, ADC_zero = %x\n", offset);
+
return 0;
}
-static void usbduxsigma_usb_disconnect(struct usb_interface *intf)
+static void usbduxsigma_detach(struct comedi_device *dev)
{
- struct usbduxsub *usbduxsub_tmp = usb_get_intfdata(intf);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usbduxsigma_private *devpriv = dev->private;
- if (!usbduxsub_tmp) {
- dev_err(&intf->dev,
- "comedi_: disconnect called with null pointer.\n");
- return;
- }
- if (usbduxsub_tmp->usbdev != udev) {
- dev_err(&intf->dev, "comedi_: BUG! wrong ptr!\n");
+ usb_set_intfdata(intf, NULL);
+
+ if (!devpriv)
return;
- }
- if (usbduxsub_tmp->ai_cmd_running)
- /* we are still running a command */
- usbdux_ai_stop(usbduxsub_tmp, 1);
- if (usbduxsub_tmp->ao_cmd_running)
- /* we are still running a command */
- usbdux_ao_stop(usbduxsub_tmp, 1);
- comedi_usb_auto_unconfig(intf);
- down(&start_stop_sem);
- down(&usbduxsub_tmp->sem);
- tidy_up(usbduxsub_tmp);
- up(&usbduxsub_tmp->sem);
- up(&start_stop_sem);
- dev_info(&intf->dev, "comedi_: disconnected from the usb\n");
+
+ down(&devpriv->sem);
+
+ /* force unlink all urbs */
+ usbduxsigma_ai_stop(dev, 1);
+ usbduxsigma_ao_stop(dev, 1);
+ usbduxsigma_pwm_stop(dev, 1);
+
+ usbduxsigma_free_usb_buffers(dev);
+
+ up(&devpriv->sem);
+}
+
+static struct comedi_driver usbduxsigma_driver = {
+ .driver_name = "usbduxsigma",
+ .module = THIS_MODULE,
+ .auto_attach = usbduxsigma_auto_attach,
+ .detach = usbduxsigma_detach,
+};
+
+static int usbduxsigma_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return comedi_usb_auto_config(intf, &usbduxsigma_driver, 0);
}
static const struct usb_device_id usbduxsigma_usb_table[] = {
@@ -2664,7 +1713,7 @@ MODULE_DEVICE_TABLE(usb, usbduxsigma_usb_table);
static struct usb_driver usbduxsigma_usb_driver = {
.name = "usbduxsigma",
.probe = usbduxsigma_usb_probe,
- .disconnect = usbduxsigma_usb_disconnect,
+ .disconnect = comedi_usb_auto_unconfig,
.id_table = usbduxsigma_usb_table,
};
module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver);
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 2be5087..06efa16 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -16,11 +16,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
Driver: vmk80xx
@@ -159,8 +154,6 @@ static const struct vmk80xx_board vmk80xx_boardinfo[] = {
};
struct vmk80xx_private {
- struct usb_device *usb;
- struct usb_interface *intf;
struct usb_endpoint_descriptor *ep_rx;
struct usb_endpoint_descriptor *ep_tx;
struct firmware_version fw;
@@ -170,9 +163,10 @@ struct vmk80xx_private {
enum vmk80xx_model model;
};
-static int vmk80xx_check_data_link(struct vmk80xx_private *devpriv)
+static int vmk80xx_check_data_link(struct comedi_device *dev)
{
- struct usb_device *usb = devpriv->usb;
+ struct vmk80xx_private *devpriv = dev->private;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
unsigned int tx_pipe;
unsigned int rx_pipe;
unsigned char tx[1];
@@ -194,9 +188,10 @@ static int vmk80xx_check_data_link(struct vmk80xx_private *devpriv)
return (int)rx[1];
}
-static void vmk80xx_read_eeprom(struct vmk80xx_private *devpriv, int flag)
+static void vmk80xx_read_eeprom(struct comedi_device *dev, int flag)
{
- struct usb_device *usb = devpriv->usb;
+ struct vmk80xx_private *devpriv = dev->private;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
unsigned int tx_pipe;
unsigned int rx_pipe;
unsigned char tx[1];
@@ -223,9 +218,10 @@ static void vmk80xx_read_eeprom(struct vmk80xx_private *devpriv, int flag)
strncpy(devpriv->fw.ic6_vers, rx + 25, 24);
}
-static void vmk80xx_do_bulk_msg(struct vmk80xx_private *devpriv)
+static void vmk80xx_do_bulk_msg(struct comedi_device *dev)
{
- struct usb_device *usb = devpriv->usb;
+ struct vmk80xx_private *devpriv = dev->private;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
__u8 tx_addr;
__u8 rx_addr;
unsigned int tx_pipe;
@@ -248,21 +244,18 @@ static void vmk80xx_do_bulk_msg(struct vmk80xx_private *devpriv)
usb_bulk_msg(usb, rx_pipe, devpriv->usb_rx_buf, size, NULL, HZ * 10);
}
-static int vmk80xx_read_packet(struct vmk80xx_private *devpriv)
+static int vmk80xx_read_packet(struct comedi_device *dev)
{
- struct usb_device *usb;
+ struct vmk80xx_private *devpriv = dev->private;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
struct usb_endpoint_descriptor *ep;
unsigned int pipe;
- if (!devpriv->intf)
- return -ENODEV;
-
if (devpriv->model == VMK8061_MODEL) {
- vmk80xx_do_bulk_msg(devpriv);
+ vmk80xx_do_bulk_msg(dev);
return 0;
}
- usb = devpriv->usb;
ep = devpriv->ep_rx;
pipe = usb_rcvintpipe(usb, ep->bEndpointAddress);
return usb_interrupt_msg(usb, pipe, devpriv->usb_rx_buf,
@@ -270,23 +263,20 @@ static int vmk80xx_read_packet(struct vmk80xx_private *devpriv)
HZ * 10);
}
-static int vmk80xx_write_packet(struct vmk80xx_private *devpriv, int cmd)
+static int vmk80xx_write_packet(struct comedi_device *dev, int cmd)
{
- struct usb_device *usb;
+ struct vmk80xx_private *devpriv = dev->private;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
struct usb_endpoint_descriptor *ep;
unsigned int pipe;
- if (!devpriv->intf)
- return -ENODEV;
-
devpriv->usb_tx_buf[0] = cmd;
if (devpriv->model == VMK8061_MODEL) {
- vmk80xx_do_bulk_msg(devpriv);
+ vmk80xx_do_bulk_msg(dev);
return 0;
}
- usb = devpriv->usb;
ep = devpriv->ep_tx;
pipe = usb_sndintpipe(usb, ep->bEndpointAddress);
return usb_interrupt_msg(usb, pipe, devpriv->usb_tx_buf,
@@ -294,18 +284,19 @@ static int vmk80xx_write_packet(struct vmk80xx_private *devpriv, int cmd)
HZ * 10);
}
-static int vmk80xx_reset_device(struct vmk80xx_private *devpriv)
+static int vmk80xx_reset_device(struct comedi_device *dev)
{
+ struct vmk80xx_private *devpriv = dev->private;
size_t size;
int retval;
size = le16_to_cpu(devpriv->ep_tx->wMaxPacketSize);
memset(devpriv->usb_tx_buf, 0, size);
- retval = vmk80xx_write_packet(devpriv, VMK8055_CMD_RST);
+ retval = vmk80xx_write_packet(dev, VMK8055_CMD_RST);
if (retval)
return retval;
/* set outputs to known state as we cannot read them */
- return vmk80xx_write_packet(devpriv, VMK8055_CMD_WRT_AD);
+ return vmk80xx_write_packet(dev, VMK8055_CMD_WRT_AD);
}
static int vmk80xx_ai_insn_read(struct comedi_device *dev,
@@ -338,7 +329,7 @@ static int vmk80xx_ai_insn_read(struct comedi_device *dev,
}
for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(devpriv))
+ if (vmk80xx_read_packet(dev))
break;
if (devpriv->model == VMK8055_MODEL) {
@@ -388,7 +379,7 @@ static int vmk80xx_ao_insn_write(struct comedi_device *dev,
for (n = 0; n < insn->n; n++) {
devpriv->usb_tx_buf[reg] = data[n];
- if (vmk80xx_write_packet(devpriv, cmd))
+ if (vmk80xx_write_packet(dev, cmd))
break;
}
@@ -415,7 +406,7 @@ static int vmk80xx_ao_insn_read(struct comedi_device *dev,
devpriv->usb_tx_buf[0] = VMK8061_CMD_RD_AO;
for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(devpriv))
+ if (vmk80xx_read_packet(dev))
break;
data[n] = devpriv->usb_rx_buf[reg + chan];
@@ -447,7 +438,7 @@ static int vmk80xx_di_insn_bits(struct comedi_device *dev,
reg = VMK8055_DI_REG;
}
- retval = vmk80xx_read_packet(devpriv);
+ retval = vmk80xx_read_packet(dev);
if (!retval) {
if (devpriv->model == VMK8055_MODEL)
@@ -492,7 +483,7 @@ static int vmk80xx_do_insn_bits(struct comedi_device *dev,
tx_buf[reg] &= ~data[0];
tx_buf[reg] |= (data[0] & data[1]);
- retval = vmk80xx_write_packet(devpriv, cmd);
+ retval = vmk80xx_write_packet(dev, cmd);
if (retval)
goto out;
@@ -501,7 +492,7 @@ static int vmk80xx_do_insn_bits(struct comedi_device *dev,
if (devpriv->model == VMK8061_MODEL) {
tx_buf[0] = VMK8061_CMD_RD_DO;
- retval = vmk80xx_read_packet(devpriv);
+ retval = vmk80xx_read_packet(dev);
if (!retval) {
data[1] = rx_buf[reg];
@@ -547,7 +538,7 @@ static int vmk80xx_cnt_insn_read(struct comedi_device *dev,
}
for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(devpriv))
+ if (vmk80xx_read_packet(dev))
break;
if (devpriv->model == VMK8055_MODEL)
@@ -597,7 +588,7 @@ static int vmk80xx_cnt_insn_config(struct comedi_device *dev,
}
for (n = 0; n < insn->n; n++)
- if (vmk80xx_write_packet(devpriv, cmd))
+ if (vmk80xx_write_packet(dev, cmd))
break;
up(&devpriv->limit_sem);
@@ -640,7 +631,7 @@ static int vmk80xx_cnt_insn_write(struct comedi_device *dev,
devpriv->usb_tx_buf[6 + chan] = val;
- if (vmk80xx_write_packet(devpriv, cmd))
+ if (vmk80xx_write_packet(dev, cmd))
break;
}
@@ -671,7 +662,7 @@ static int vmk80xx_pwm_insn_read(struct comedi_device *dev,
tx_buf[0] = VMK8061_CMD_RD_PWM;
for (n = 0; n < insn->n; n++) {
- if (vmk80xx_read_packet(devpriv))
+ if (vmk80xx_read_packet(dev))
break;
data[n] = rx_buf[reg[0]] + 4 * rx_buf[reg[1]];
@@ -719,7 +710,7 @@ static int vmk80xx_pwm_insn_write(struct comedi_device *dev,
tx_buf[reg[0]] = (unsigned char)(data[n] & 0x03);
tx_buf[reg[1]] = (unsigned char)(data[n] >> 2) & 0xff;
- if (vmk80xx_write_packet(devpriv, cmd))
+ if (vmk80xx_write_packet(dev, cmd))
break;
}
@@ -731,7 +722,7 @@ static int vmk80xx_pwm_insn_write(struct comedi_device *dev,
static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
{
struct vmk80xx_private *devpriv = dev->private;
- struct usb_interface *intf = devpriv->intf;
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_desc;
int i;
@@ -884,13 +875,10 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
dev->board_ptr = boardinfo;
dev->board_name = boardinfo->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
- devpriv->usb = interface_to_usbdev(intf);
- devpriv->intf = intf;
devpriv->model = boardinfo->model;
ret = vmk80xx_find_usb_endpoints(dev);
@@ -906,23 +894,24 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
usb_set_intfdata(intf, devpriv);
if (devpriv->model == VMK8061_MODEL) {
- vmk80xx_read_eeprom(devpriv, IC3_VERSION);
+ vmk80xx_read_eeprom(dev, IC3_VERSION);
dev_info(&intf->dev, "%s\n", devpriv->fw.ic3_vers);
- if (vmk80xx_check_data_link(devpriv)) {
- vmk80xx_read_eeprom(devpriv, IC6_VERSION);
+ if (vmk80xx_check_data_link(dev)) {
+ vmk80xx_read_eeprom(dev, IC6_VERSION);
dev_info(&intf->dev, "%s\n", devpriv->fw.ic6_vers);
}
}
if (devpriv->model == VMK8055_MODEL)
- vmk80xx_reset_device(devpriv);
+ vmk80xx_reset_device(dev);
return vmk80xx_init_subdevices(dev);
}
static void vmk80xx_detach(struct comedi_device *dev)
{
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
struct vmk80xx_private *devpriv = dev->private;
if (!devpriv)
@@ -930,7 +919,7 @@ static void vmk80xx_detach(struct comedi_device *dev)
down(&devpriv->limit_sem);
- usb_set_intfdata(devpriv->intf, NULL);
+ usb_set_intfdata(intf, NULL);
kfree(devpriv->usb_rx_buf);
kfree(devpriv->usb_tx_buf);
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index 3231a48..cd60677 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#include <linux/module.h>
@@ -27,8 +22,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/io.h>
@@ -130,6 +123,27 @@ error:
return ret;
}
+int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int *io)
+{
+ struct comedi_insn insn;
+ unsigned int data[2];
+ int ret;
+
+ memset(&insn, 0, sizeof(insn));
+ insn.insn = INSN_CONFIG;
+ insn.n = 2;
+ insn.subdev = subdev;
+ insn.chanspec = CR_PACK(chan, 0, 0);
+ data[0] = INSN_CONFIG_DIO_QUERY;
+ data[1] = 0;
+ ret = comedi_do_insn(dev, &insn, data);
+ if (ret >= 0)
+ *io = data[1];
+ return ret;
+}
+EXPORT_SYMBOL_GPL(comedi_dio_get_config);
+
int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
unsigned int chan, unsigned int io)
{
@@ -145,28 +159,53 @@ int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
}
EXPORT_SYMBOL_GPL(comedi_dio_config);
-int comedi_dio_bitfield(struct comedi_device *dev, unsigned int subdev,
- unsigned int mask, unsigned int *bits)
+int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
+ unsigned int mask, unsigned int *bits,
+ unsigned int base_channel)
{
struct comedi_insn insn;
unsigned int data[2];
+ unsigned int n_chan;
+ unsigned int shift;
int ret;
+ if (subdev >= dev->n_subdevices)
+ return -EINVAL;
+
+ base_channel = CR_CHAN(base_channel);
+ n_chan = comedi_get_n_channels(dev, subdev);
+ if (base_channel >= n_chan)
+ return -EINVAL;
+
memset(&insn, 0, sizeof(insn));
insn.insn = INSN_BITS;
+ insn.chanspec = base_channel;
insn.n = 2;
insn.subdev = subdev;
data[0] = mask;
data[1] = *bits;
- ret = comedi_do_insn(dev, &insn, data);
-
- *bits = data[1];
+ /*
+ * Most drivers ignore the base channel in insn->chanspec.
+ * Fix this here if the subdevice has <= 32 channels.
+ */
+ if (n_chan <= 32) {
+ shift = base_channel;
+ if (shift) {
+ insn.chanspec = 0;
+ data[0] <<= shift;
+ data[1] <<= shift;
+ }
+ } else {
+ shift = 0;
+ }
+ ret = comedi_do_insn(dev, &insn, data);
+ *bits = data[1] >> shift;
return ret;
}
-EXPORT_SYMBOL_GPL(comedi_dio_bitfield);
+EXPORT_SYMBOL_GPL(comedi_dio_bitfield2);
int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd)
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index 886c202..ade0003 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
/*
@@ -60,6 +55,7 @@ static int comedi_read(struct seq_file *m, void *v)
if (!devices_q)
seq_puts(m, "no devices\n");
+ mutex_lock(&comedi_drivers_list_lock);
for (driv = comedi_drivers; driv; driv = driv->next) {
seq_printf(m, "%s:\n", driv->driver_name);
for (i = 0; i < driv->num_names; i++)
@@ -70,6 +66,7 @@ static int comedi_read(struct seq_file *m, void *v)
if (!driv->num_names)
seq_printf(m, " %s\n", driv->driver_name);
}
+ mutex_unlock(&comedi_drivers_list_lock);
return 0;
}
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 1dc391b..8fde554 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -14,11 +14,6 @@
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
*/
#include <linux/uaccess.h>
@@ -132,38 +127,35 @@ static int aref_invalid(struct comedi_subdevice *s, unsigned int chanspec)
return 1;
}
-/*
- This function checks each element in a channel/gain list to make
- make sure it is valid.
+/**
+ * comedi_check_chanlist() - Validate each element in a chanlist.
+ * @s: comedi_subdevice struct
+ * @n: number of elements in the chanlist
+ * @chanlist: the chanlist to validate
*/
int comedi_check_chanlist(struct comedi_subdevice *s, int n,
unsigned int *chanlist)
{
struct comedi_device *dev = s->device;
- int i;
- int chan;
+ unsigned int chanspec;
+ int chan, range_len, i;
- if (s->range_table) {
- for (i = 0; i < n; i++)
- if (CR_CHAN(chanlist[i]) >= s->n_chan ||
- CR_RANGE(chanlist[i]) >= s->range_table->length
- || aref_invalid(s, chanlist[i])) {
- dev_warn(dev->class_dev,
- "bad chanlist[%d]=0x%08x in_chan=%d range length=%d\n",
- i, chanlist[i], s->n_chan,
- s->range_table->length);
- return -EINVAL;
- }
- } else if (s->range_table_list) {
+ if (s->range_table || s->range_table_list) {
for (i = 0; i < n; i++) {
- chan = CR_CHAN(chanlist[i]);
+ chanspec = chanlist[i];
+ chan = CR_CHAN(chanspec);
+ if (s->range_table)
+ range_len = s->range_table->length;
+ else if (s->range_table_list && chan < s->n_chan)
+ range_len = s->range_table_list[chan]->length;
+ else
+ range_len = 0;
if (chan >= s->n_chan ||
- CR_RANGE(chanlist[i]) >=
- s->range_table_list[chan]->length
- || aref_invalid(s, chanlist[i])) {
+ CR_RANGE(chanspec) >= range_len ||
+ aref_invalid(s, chanspec)) {
dev_warn(dev->class_dev,
- "bad chanlist[%d]=0x%08x\n",
- i, chanlist[i]);
+ "bad chanlist[%d]=0x%08x chan=%d range length=%d\n",
+ i, chanspec, chan, range_len);
return -EINVAL;
}
}
diff --git a/drivers/staging/cptm1217/clearpad_tm1217.c b/drivers/staging/cptm1217/clearpad_tm1217.c
index e96eee3..42a5f5c 100644
--- a/drivers/staging/cptm1217/clearpad_tm1217.c
+++ b/drivers/staging/cptm1217/clearpad_tm1217.c
@@ -547,10 +547,8 @@ fail_gpio:
fail:
/* Clean up before returning failure */
for (i = 0; i < TOUCH_SUPPORTED; i++) {
- if (ts->cp_input_info[i].input) {
+ if (ts->cp_input_info[i].input)
input_unregister_device(ts->cp_input_info[i].input);
- input_free_device(ts->cp_input_info[i].input);
- }
}
kfree(ts);
return retval;
diff --git a/drivers/staging/crystalhd/bc_dts_glob_lnx.h b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
index fd1a6e6..981708f 100644
--- a/drivers/staging/crystalhd/bc_dts_glob_lnx.h
+++ b/drivers/staging/crystalhd/bc_dts_glob_lnx.h
@@ -58,11 +58,11 @@
* between the driver and the application.
*/
enum BC_DTS_GLOBALS {
- BC_MAX_FW_CMD_BUFF_SZ = 0x40, /* FW passthrough cmd/rsp buffer size */
+ BC_MAX_FW_CMD_BUFF_SZ = 0x40, /* FW passthrough cmd/rsp buffer size */
PCI_CFG_SIZE = 256, /* PCI config size buffer */
BC_IOCTL_DATA_POOL_SIZE = 8, /* BC_IOCTL_DATA Pool size */
- BC_LINK_MAX_OPENS = 3, /* Maximum simultaneous opens*/
- BC_LINK_MAX_SGLS = 1024, /* Maximum SG elements 4M/4K */
+ BC_LINK_MAX_OPENS = 3, /* Maximum simultaneous opens*/
+ BC_LINK_MAX_SGLS = 1024, /* Maximum SG elements 4M/4K */
BC_TX_LIST_CNT = 2, /* Max Tx DMA Rings */
BC_RX_LIST_CNT = 8, /* Max Rx DMA Rings*/
BC_PROC_OUTPUT_TIMEOUT = 3000, /* Milliseconds */
@@ -240,11 +240,14 @@ enum BC_DRV_CMD {
DRV_CMD_ADD_RXBUFFS, /* Add Rx side buffers to driver pool */
DRV_CMD_FETCH_RXBUFF, /* Get Rx DMAed buffer */
DRV_CMD_START_RX_CAP, /* Start Rx Buffer Capture */
- DRV_CMD_FLUSH_RX_CAP, /* Stop the capture for now...we will enhance this later*/
+ DRV_CMD_FLUSH_RX_CAP, /* Stop the capture for now...
+ we will enhance this later*/
DRV_CMD_GET_DRV_STAT, /* Get Driver Internal Statistics */
DRV_CMD_RST_DRV_STAT, /* Reset Driver Internal Statistics */
- DRV_CMD_NOTIFY_MODE, /* Notify the Mode to driver in which the application is Operating*/
- DRV_CMD_CHANGE_CLOCK, /* Change the core clock to either save power or improve performance */
+ DRV_CMD_NOTIFY_MODE, /* Notify the Mode to driver
+ in which the application is Operating*/
+ DRV_CMD_CHANGE_CLOCK, /* Change the core clock to either save power
+ or improve performance */
/* MUST be the last one.. */
DRV_CMD_END, /* End of the List.. */
@@ -283,8 +286,8 @@ struct crystalhd_ioctl_data {
struct BC_IOCTL_DATA udata; /* IOCTL from App..*/
uint32_t u_id; /* Driver specific user ID */
uint32_t cmd; /* Cmd ID for driver's use. */
- void *add_cdata; /* Additional command specific data..*/
- uint32_t add_cdata_sz; /* Additional command specific data size */
+ void *add_cdata; /* Additional command specific data..*/
+ uint32_t add_cdata_sz; /* Additional command specific data size */
struct crystalhd_ioctl_data *next; /* List/Fifo management */
};
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c
index ed99daa6..07a2f24 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.c
+++ b/drivers/staging/crystalhd/crystalhd_cmds.c
@@ -94,8 +94,7 @@ static enum BC_STATUS bc_cproc_notify_mode(struct crystalhd_cmd *ctx,
for (i = 0; i < BC_LINK_MAX_OPENS; i++) {
if (ctx->user[i].mode == DTS_DIAG_MODE ||
ctx->user[i].mode == DTS_PLAYBACK_MODE) {
- BCMLOG_ERR("multiple playback sessions are not "
- "supported..\n");
+ BCMLOG_ERR("multiple playback sessions are not supported..\n");
return BC_STS_ERR_USAGE;
}
}
@@ -472,8 +471,8 @@ static enum BC_STATUS bc_cproc_hw_txdma(struct crystalhd_cmd *ctx,
}
/* Helper function to check on user buffers */
-static enum BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff, uint32_t ub_sz,
- uint32_t uv_off, bool en_422)
+static enum BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff,
+ uint32_t ub_sz, uint32_t uv_off, bool en_422)
{
if (!ubuff || !ub_sz) {
BCMLOG_ERR("%s->Invalid Arg %p %x\n",
@@ -483,8 +482,9 @@ static enum BC_STATUS bc_cproc_check_inbuffs(bool pin, void *ubuff, uint32_t ub_
/* Check for alignment */
if (((uintptr_t)ubuff) & 0x03) {
- BCMLOG_ERR("%s-->Un-aligned address not implemented yet.. %p\n",
- ((pin) ? "TX" : "RX"), ubuff);
+ BCMLOG_ERR(
+ "%s-->Un-aligned address not implemented yet.. %p\n",
+ ((pin) ? "TX" : "RX"), ubuff);
return BC_STS_NOT_IMPL;
}
if (pin)
@@ -572,7 +572,8 @@ static enum BC_STATUS bc_cproc_add_cap_buff(struct crystalhd_cmd *ctx,
if (!dio_hnd)
return BC_STS_ERROR;
- sts = crystalhd_hw_add_cap_buffer(&ctx->hw_ctx, dio_hnd, (ctx->state == BC_LINK_READY));
+ sts = crystalhd_hw_add_cap_buffer(&ctx->hw_ctx, dio_hnd,
+ (ctx->state == BC_LINK_READY));
if ((sts != BC_STS_SUCCESS) && (sts != BC_STS_BUSY)) {
crystalhd_unmap_dio(ctx->adp, dio_hnd);
return sts;
@@ -618,7 +619,8 @@ static enum BC_STATUS bc_cproc_fetch_frame(struct crystalhd_cmd *ctx,
sts = crystalhd_hw_get_cap_buffer(&ctx->hw_ctx, &frame->PibInfo, &dio);
if (sts != BC_STS_SUCCESS)
- return (ctx->state & BC_LINK_SUSPEND) ? BC_STS_IO_USER_ABORT : sts;
+ return (ctx->state & BC_LINK_SUSPEND) ?
+ BC_STS_IO_USER_ABORT : sts;
frame->Flags = dio->uinfo.comp_flags;
@@ -673,7 +675,8 @@ static enum BC_STATUS bc_cproc_flush_cap_buffs(struct crystalhd_cmd *ctx,
frame = &idata->udata.u.DecOutData;
for (count = 0; count < BC_RX_LIST_CNT; count++) {
- sts = crystalhd_hw_get_cap_buffer(&ctx->hw_ctx, &frame->PibInfo, &dio);
+ sts = crystalhd_hw_get_cap_buffer(&ctx->hw_ctx,
+ &frame->PibInfo, &dio);
if (sts != BC_STS_SUCCESS)
break;
@@ -916,7 +919,8 @@ enum BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx,
* Closer application handle and release app specific
* resources.
*/
-enum BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc)
+enum BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx,
+ struct crystalhd_user *uc)
{
uint32_t mode = uc->mode;
@@ -1008,8 +1012,8 @@ enum BC_STATUS crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx)
* mode of operation and returns the function pointer
* from the cproc table.
*/
-crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cmd,
- struct crystalhd_user *uc)
+crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx,
+ uint32_t cmd, struct crystalhd_user *uc)
{
crystalhd_cmd_proc cproc = NULL;
unsigned int i, tbl_sz;
@@ -1024,7 +1028,8 @@ crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cm
return NULL;
}
- tbl_sz = sizeof(g_crystalhd_cproc_tbl) / sizeof(struct crystalhd_cmd_tbl);
+ tbl_sz = sizeof(g_crystalhd_cproc_tbl) /
+ sizeof(struct crystalhd_cmd_tbl);
for (i = 0; i < tbl_sz; i++) {
if (g_crystalhd_cproc_tbl[i].cmd_id == cmd) {
if ((uc->mode == DTS_MONITOR_MODE) &&
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.h b/drivers/staging/crystalhd/crystalhd_cmds.h
index 4066ba3..377cd9d 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.h
+++ b/drivers/staging/crystalhd/crystalhd_cmds.h
@@ -66,7 +66,8 @@ struct crystalhd_cmd {
struct crystalhd_hw hw_ctx;
};
-typedef enum BC_STATUS(*crystalhd_cmd_proc)(struct crystalhd_cmd *, struct crystalhd_ioctl_data *);
+typedef enum BC_STATUS(*crystalhd_cmd_proc)(struct crystalhd_cmd *,
+ struct crystalhd_ioctl_data *);
struct crystalhd_cmd_tbl {
uint32_t cmd_id;
@@ -74,13 +75,17 @@ struct crystalhd_cmd_tbl {
uint32_t block_mon;
};
-enum BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx, struct crystalhd_ioctl_data *idata);
+enum BC_STATUS crystalhd_suspend(struct crystalhd_cmd *ctx,
+ struct crystalhd_ioctl_data *idata);
enum BC_STATUS crystalhd_resume(struct crystalhd_cmd *ctx);
-crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx, uint32_t cmd,
- struct crystalhd_user *uc);
-enum BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx, struct crystalhd_user **user_ctx);
-enum BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx, struct crystalhd_user *uc);
-enum BC_STATUS crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx, struct crystalhd_adp *adp);
+crystalhd_cmd_proc crystalhd_get_cmd_proc(struct crystalhd_cmd *ctx,
+ uint32_t cmd, struct crystalhd_user *uc);
+enum BC_STATUS crystalhd_user_open(struct crystalhd_cmd *ctx,
+ struct crystalhd_user **user_ctx);
+enum BC_STATUS crystalhd_user_close(struct crystalhd_cmd *ctx,
+ struct crystalhd_user *uc);
+enum BC_STATUS crystalhd_setup_cmd_context(struct crystalhd_cmd *ctx,
+ struct crystalhd_adp *adp);
enum BC_STATUS crystalhd_delete_cmd_context(struct crystalhd_cmd *ctx);
bool crystalhd_cmd_interrupt(struct crystalhd_cmd *ctx);
diff --git a/drivers/staging/crystalhd/crystalhd_fw_if.h b/drivers/staging/crystalhd/crystalhd_fw_if.h
index 9e2831e..4b363a5 100644
--- a/drivers/staging/crystalhd/crystalhd_fw_if.h
+++ b/drivers/staging/crystalhd/crystalhd_fw_if.h
@@ -106,7 +106,8 @@ struct ppb_vc1 {
struct fgt_sei {
struct fgt_sei *next;
- unsigned char model_values[3][MAX_FGT_VALUE_INTERVAL][MAX_FGT_MODEL_VALUE];
+ unsigned char
+ model_values[3][MAX_FGT_VALUE_INTERVAL][MAX_FGT_MODEL_VALUE];
unsigned char upper_bound[3][MAX_FGT_VALUE_INTERVAL];
unsigned char lower_bound[3][MAX_FGT_VALUE_INTERVAL];
@@ -125,10 +126,12 @@ struct fgt_sei {
unsigned char blending_mode_id; /* Blending mode. */
unsigned char log2_scale_factor; /* Log2 scale factor (2-7). */
- unsigned char comp_flag[3]; /* Components [0,2] parameters present flag. */
- unsigned char num_intervals_minus1[3]; /* Number of intensity level intervals. */
+ unsigned char comp_flag[3]; /* Components [0,2]
+ parameters present flag. */
+ unsigned char num_intervals_minus1[3]; /* Number of
+ intensity level intervals. */
unsigned char num_model_values[3]; /* Number of model values. */
- uint16_t repetition_period; /* Repetition period (0-16384) */
+ uint16_t repetition_period; /* Repetition period (0-16384) */
};
@@ -266,40 +269,40 @@ enum c011_ts_cmd {
/* Decoding commands */
eCMD_C011_DEC_CHAN_OPEN = eCMD_C011_CMD_BASE + 0x100,
- eCMD_C011_DEC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x101,
- eCMD_C011_DEC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x102,
- eCMD_C011_DEC_CHAN_STATUS = eCMD_C011_CMD_BASE + 0x103,
- eCMD_C011_DEC_CHAN_FLUSH = eCMD_C011_CMD_BASE + 0x104,
+ eCMD_C011_DEC_CHAN_CLOSE = eCMD_C011_CMD_BASE + 0x101,
+ eCMD_C011_DEC_CHAN_ACTIVATE = eCMD_C011_CMD_BASE + 0x102,
+ eCMD_C011_DEC_CHAN_STATUS = eCMD_C011_CMD_BASE + 0x103,
+ eCMD_C011_DEC_CHAN_FLUSH = eCMD_C011_CMD_BASE + 0x104,
eCMD_C011_DEC_CHAN_TRICK_PLAY = eCMD_C011_CMD_BASE + 0x105,
- eCMD_C011_DEC_CHAN_TS_PIDS = eCMD_C011_CMD_BASE + 0x106,
+ eCMD_C011_DEC_CHAN_TS_PIDS = eCMD_C011_CMD_BASE + 0x106,
eCMD_C011_DEC_CHAN_PS_STREAM_ID = eCMD_C011_CMD_BASE + 0x107,
eCMD_C011_DEC_CHAN_INPUT_PARAMS = eCMD_C011_CMD_BASE + 0x108,
eCMD_C011_DEC_CHAN_VIDEO_OUTPUT = eCMD_C011_CMD_BASE + 0x109,
- eCMD_C011_DEC_CHAN_OUTPUT_FORMAT = eCMD_C011_CMD_BASE + 0x10A,
- eCMD_C011_DEC_CHAN_SCALING_FILTERS = eCMD_C011_CMD_BASE + 0x10B,
- eCMD_C011_DEC_CHAN_OSD_MODE = eCMD_C011_CMD_BASE + 0x10D,
+ eCMD_C011_DEC_CHAN_OUTPUT_FORMAT = eCMD_C011_CMD_BASE + 0x10A,
+ eCMD_C011_DEC_CHAN_SCALING_FILTERS = eCMD_C011_CMD_BASE + 0x10B,
+ eCMD_C011_DEC_CHAN_OSD_MODE = eCMD_C011_CMD_BASE + 0x10D,
eCMD_C011_DEC_CHAN_DROP = eCMD_C011_CMD_BASE + 0x10E,
- eCMD_C011_DEC_CHAN_RELEASE = eCMD_C011_CMD_BASE + 0x10F,
- eCMD_C011_DEC_CHAN_STREAM_SETTINGS = eCMD_C011_CMD_BASE + 0x110,
+ eCMD_C011_DEC_CHAN_RELEASE = eCMD_C011_CMD_BASE + 0x10F,
+ eCMD_C011_DEC_CHAN_STREAM_SETTINGS = eCMD_C011_CMD_BASE + 0x110,
eCMD_C011_DEC_CHAN_PAUSE_OUTPUT = eCMD_C011_CMD_BASE + 0x111,
- eCMD_C011_DEC_CHAN_CHANGE = eCMD_C011_CMD_BASE + 0x112,
- eCMD_C011_DEC_CHAN_SET_STC = eCMD_C011_CMD_BASE + 0x113,
- eCMD_C011_DEC_CHAN_SET_PTS = eCMD_C011_CMD_BASE + 0x114,
- eCMD_C011_DEC_CHAN_CC_MODE = eCMD_C011_CMD_BASE + 0x115,
- eCMD_C011_DEC_CREATE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x116,
- eCMD_C011_DEC_COPY_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x117,
- eCMD_C011_DEC_DELETE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x118,
- eCMD_C011_DEC_CHAN_SET_DECYPTION = eCMD_C011_CMD_BASE + 0x119,
+ eCMD_C011_DEC_CHAN_CHANGE = eCMD_C011_CMD_BASE + 0x112,
+ eCMD_C011_DEC_CHAN_SET_STC = eCMD_C011_CMD_BASE + 0x113,
+ eCMD_C011_DEC_CHAN_SET_PTS = eCMD_C011_CMD_BASE + 0x114,
+ eCMD_C011_DEC_CHAN_CC_MODE = eCMD_C011_CMD_BASE + 0x115,
+ eCMD_C011_DEC_CREATE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x116,
+ eCMD_C011_DEC_COPY_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x117,
+ eCMD_C011_DEC_DELETE_AUDIO_CONTEXT = eCMD_C011_CMD_BASE + 0x118,
+ eCMD_C011_DEC_CHAN_SET_DECYPTION = eCMD_C011_CMD_BASE + 0x119,
eCMD_C011_DEC_CHAN_START_VIDEO = eCMD_C011_CMD_BASE + 0x11A,
eCMD_C011_DEC_CHAN_STOP_VIDEO = eCMD_C011_CMD_BASE + 0x11B,
eCMD_C011_DEC_CHAN_PIC_CAPTURE = eCMD_C011_CMD_BASE + 0x11C,
- eCMD_C011_DEC_CHAN_PAUSE = eCMD_C011_CMD_BASE + 0x11D,
+ eCMD_C011_DEC_CHAN_PAUSE = eCMD_C011_CMD_BASE + 0x11D,
eCMD_C011_DEC_CHAN_PAUSE_STATE = eCMD_C011_CMD_BASE + 0x11E,
- eCMD_C011_DEC_CHAN_SET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x11F,
- eCMD_C011_DEC_CHAN_GET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x120,
+ eCMD_C011_DEC_CHAN_SET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x11F,
+ eCMD_C011_DEC_CHAN_GET_SLOWM_RATE = eCMD_C011_CMD_BASE + 0x120,
eCMD_C011_DEC_CHAN_SET_FF_RATE = eCMD_C011_CMD_BASE + 0x121,
eCMD_C011_DEC_CHAN_GET_FF_RATE = eCMD_C011_CMD_BASE + 0x122,
- eCMD_C011_DEC_CHAN_FRAME_ADVANCE = eCMD_C011_CMD_BASE + 0x123,
+ eCMD_C011_DEC_CHAN_FRAME_ADVANCE = eCMD_C011_CMD_BASE + 0x123,
eCMD_C011_DEC_CHAN_SET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x124,
eCMD_C011_DEC_CHAN_GET_SKIP_PIC_MODE = eCMD_C011_CMD_BASE + 0x125,
eCMD_C011_DEC_CHAN_FILL_PIC_BUF = eCMD_C011_CMD_BASE + 0x126,
@@ -308,15 +311,16 @@ enum c011_ts_cmd {
eCMD_C011_DEC_CHAN_SET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x129,
eCMD_C011_DEC_CHAN_GET_BRCM_TRICK_MODE = eCMD_C011_CMD_BASE + 0x12A,
eCMD_C011_DEC_CHAN_REVERSE_FIELD_STATUS = eCMD_C011_CMD_BASE + 0x12B,
- eCMD_C011_DEC_CHAN_I_PICTURE_FOUND = eCMD_C011_CMD_BASE + 0x12C,
- eCMD_C011_DEC_CHAN_SET_PARAMETER = eCMD_C011_CMD_BASE + 0x12D,
+ eCMD_C011_DEC_CHAN_I_PICTURE_FOUND = eCMD_C011_CMD_BASE + 0x12C,
+ eCMD_C011_DEC_CHAN_SET_PARAMETER = eCMD_C011_CMD_BASE + 0x12D,
eCMD_C011_DEC_CHAN_SET_USER_DATA_MODE = eCMD_C011_CMD_BASE + 0x12E,
- eCMD_C011_DEC_CHAN_SET_PAUSE_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x12F,
- eCMD_C011_DEC_CHAN_SET_SLOW_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x130,
+ eCMD_C011_DEC_CHAN_SET_PAUSE_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x12F,
+ eCMD_C011_DEC_CHAN_SET_SLOW_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x130,
eCMD_C011_DEC_CHAN_SET_FF_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x131,
- eCMD_C011_DEC_CHAN_SET_DISPLAY_TIMING_MODE = eCMD_C011_CMD_BASE + 0x132,
- eCMD_C011_DEC_CHAN_SET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x133,
- eCMD_C011_DEC_CHAN_GET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x134,
+ eCMD_C011_DEC_CHAN_SET_DISPLAY_TIMING_MODE = eCMD_C011_CMD_BASE +
+ 0x132,
+ eCMD_C011_DEC_CHAN_SET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x133,
+ eCMD_C011_DEC_CHAN_GET_DISPLAY_MODE = eCMD_C011_CMD_BASE + 0x134,
eCMD_C011_DEC_CHAN_SET_REVERSE_FIELD = eCMD_C011_CMD_BASE + 0x135,
eCMD_C011_DEC_CHAN_STREAM_OPEN = eCMD_C011_CMD_BASE + 0x136,
eCMD_C011_DEC_CHAN_SET_PCR_PID = eCMD_C011_CMD_BASE + 0x137,
@@ -328,19 +332,22 @@ enum c011_ts_cmd {
eCMD_C011_DEC_CHAN_GET_DISPLAY_ORDER = eCMD_C011_CMD_BASE + 0x143,
eCMD_C011_DEC_CHAN_SET_HOST_TRICK_MODE = eCMD_C011_CMD_BASE + 0x144,
eCMD_C011_DEC_CHAN_SET_OPERATION_MODE = eCMD_C011_CMD_BASE + 0x145,
- eCMD_C011_DEC_CHAN_DISPLAY_PAUSE_UNTO_PTS = eCMD_C011_CMD_BASE + 0x146,
- eCMD_C011_DEC_CHAN_SET_PTS_STC_DIFF_THRESHOLD = eCMD_C011_CMD_BASE + 0x147,
+ eCMD_C011_DEC_CHAN_DISPLAY_PAUSE_UNTO_PTS = eCMD_C011_CMD_BASE + 0x146,
+ eCMD_C011_DEC_CHAN_SET_PTS_STC_DIFF_THRESHOLD = eCMD_C011_CMD_BASE +
+ 0x147,
eCMD_C011_DEC_CHAN_SEND_COMPRESSED_BUF = eCMD_C011_CMD_BASE + 0x148,
eCMD_C011_DEC_CHAN_SET_CLIPPING = eCMD_C011_CMD_BASE + 0x149,
eCMD_C011_DEC_CHAN_SET_PARAMETERS_FOR_HARD_RESET_INTERRUPT_TO_HOST
= eCMD_C011_CMD_BASE + 0x150,
/* Decoder RevD commands */
- eCMD_C011_DEC_CHAN_SET_CSC = eCMD_C011_CMD_BASE + 0x180, /* color space conversion */
+ eCMD_C011_DEC_CHAN_SET_CSC = eCMD_C011_CMD_BASE + 0x180, /* color
+ space conversion */
eCMD_C011_DEC_CHAN_SET_RANGE_REMAP = eCMD_C011_CMD_BASE + 0x181,
eCMD_C011_DEC_CHAN_SET_FGT = eCMD_C011_CMD_BASE + 0x182,
/* Note: 0x183 not implemented yet in Rev D main */
- eCMD_C011_DEC_CHAN_SET_LASTPICTURE_PADDING = eCMD_C011_CMD_BASE + 0x183,
+ eCMD_C011_DEC_CHAN_SET_LASTPICTURE_PADDING = eCMD_C011_CMD_BASE +
+ 0x183,
/* Decoder 7412 commands (7412-only) */
eCMD_C011_DEC_CHAN_SET_CONTENT_KEY = eCMD_C011_CMD_BASE + 0x190,
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index e617d2f..5845e89 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -94,15 +94,19 @@ static bool crystalhd_bring_out_of_rst(struct crystalhd_adp *adp)
* Enable clocks while 7412 reset is asserted, delay
* De-assert 7412 reset
*/
- rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
+ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
+ MISC_PERST_DECODER_CTRL);
rst_deco_cntrl.stop_bcm_7412_clk = 0;
rst_deco_cntrl.bcm7412_rst = 1;
- crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
+ crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
+ rst_deco_cntrl.whole_reg);
msleep_interruptible(10);
- rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
+ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
+ MISC_PERST_DECODER_CTRL);
rst_deco_cntrl.bcm7412_rst = 0;
- crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
+ crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
+ rst_deco_cntrl.whole_reg);
msleep_interruptible(50);
/* Disable OTP_CONTENT_MISC to 0 to disable all secure modes */
@@ -132,9 +136,11 @@ static bool crystalhd_put_in_reset(struct crystalhd_adp *adp)
* Assert 7412 reset, delay
* Assert 7412 stop clock
*/
- rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp, MISC_PERST_DECODER_CTRL);
+ rst_deco_cntrl.whole_reg = crystalhd_reg_rd(adp,
+ MISC_PERST_DECODER_CTRL);
rst_deco_cntrl.stop_bcm_7412_clk = 1;
- crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL, rst_deco_cntrl.whole_reg);
+ crystalhd_reg_wr(adp, MISC_PERST_DECODER_CTRL,
+ rst_deco_cntrl.whole_reg);
msleep_interruptible(50);
/* Bus Arbiter Timeout: GISB_ARBITER_TIMER
@@ -213,7 +219,8 @@ static void crystalhd_clear_errors(struct crystalhd_adp *adp)
{
uint32_t reg;
- /* FIXME: jarod: wouldn't we want to write a 0 to the reg? Or does the write clear the bits specified? */
+ /* FIXME: jarod: wouldn't we want to write a 0 to the reg?
+ Or does the write clear the bits specified? */
reg = crystalhd_reg_rd(adp, MISC1_Y_RX_ERROR_STATUS);
if (reg)
crystalhd_reg_wr(adp, MISC1_Y_RX_ERROR_STATUS, reg);
@@ -263,10 +270,12 @@ static bool crystalhd_load_firmware_config(struct crystalhd_adp *adp)
crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (BC_DRAM_FW_CFG_ADDR >> 19));
crystalhd_reg_wr(adp, AES_CMD, 0);
- crystalhd_reg_wr(adp, AES_CONFIG_INFO, (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
+ crystalhd_reg_wr(adp, AES_CONFIG_INFO,
+ (BC_DRAM_FW_CFG_ADDR & 0x7FFFF));
crystalhd_reg_wr(adp, AES_CMD, 0x1);
- /* FIXME: jarod: I've seen this fail, and introducing extra delays helps... */
+ /* FIXME: jarod: I've seen this fail,
+ and introducing extra delays helps... */
for (i = 0; i < 100; ++i) {
reg = crystalhd_reg_rd(adp, AES_STATUS);
if (reg & 0x1)
@@ -349,7 +358,8 @@ static bool crystalhd_stop_device(struct crystalhd_adp *adp)
return true;
}
-static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(struct crystalhd_hw *hw)
+static struct crystalhd_rx_dma_pkt *crystalhd_hw_alloc_rx_pkt(
+ struct crystalhd_hw *hw)
{
unsigned long flags = 0;
struct crystalhd_rx_dma_pkt *temp = NULL;
@@ -484,8 +494,8 @@ hw_create_ioq_err:
}
-static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz,
- bool b_188_byte_pkts, uint8_t flags)
+static bool crystalhd_code_in_full(struct crystalhd_adp *adp,
+ uint32_t needed_sz, bool b_188_byte_pkts, uint8_t flags)
{
uint32_t base, end, writep, readp;
uint32_t cpbSize, cpbFullness, fifoSize;
@@ -525,7 +535,7 @@ static bool crystalhd_code_in_full(struct crystalhd_adp *adp, uint32_t needed_sz
}
static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
- uint32_t list_id, enum BC_STATUS cs)
+ uint32_t list_id, enum BC_STATUS cs)
{
struct tx_dma_pkt *tx_req;
@@ -536,7 +546,8 @@ static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
hw->pwr_lock--;
- tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(hw->tx_actq, list_id);
+ tx_req = (struct tx_dma_pkt *)crystalhd_dioq_find_and_fetch(
+ hw->tx_actq, list_id);
if (!tx_req) {
if (cs != BC_STS_IO_USER_ABORT)
BCMLOG_ERR("Find and Fetch Did not find req\n");
@@ -559,7 +570,8 @@ static enum BC_STATUS crystalhd_hw_tx_req_complete(struct crystalhd_hw *hw,
return crystalhd_dioq_add(hw->tx_freeq, tx_req, false, 0);
}
-static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts)
+static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw,
+ uint32_t err_sts)
{
uint32_t err_mask, tmp;
unsigned long flags = 0;
@@ -591,7 +603,8 @@ static bool crystalhd_tx_list0_handler(struct crystalhd_hw *hw, uint32_t err_sts
return true;
}
-static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw, uint32_t err_sts)
+static bool crystalhd_tx_list1_handler(struct crystalhd_hw *hw,
+ uint32_t err_sts)
{
uint32_t err_mask, tmp;
unsigned long flags = 0;
@@ -663,14 +676,15 @@ static void crystalhd_hw_dump_desc(struct dma_descriptor *p_dma_desc,
if (!p_dma_desc || !cnt)
return;
- /* FIXME: jarod: perhaps a modparam desc_debug to enable this, rather than
- * setting ll (log level, I presume) to non-zero? */
+ /* FIXME: jarod: perhaps a modparam desc_debug to enable this,
+ rather than setting ll (log level, I presume) to non-zero? */
if (!ll)
return;
for (ix = ul_desc_index; ix < (ul_desc_index + cnt); ix++) {
- BCMLOG(ll, "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
- ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
+ BCMLOG(ll,
+ "%s[%d] Buff[%x:%x] Next:[%x:%x] XferSz:%x Intr:%x,Last:%x\n",
+ ((p_dma_desc[ul_desc_index].dma_dir) ? "TDesc" : "RDesc"),
ul_desc_index,
p_dma_desc[ul_desc_index].buff_addr_high,
p_dma_desc[ul_desc_index].buff_addr_low,
@@ -707,7 +721,8 @@ static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
/* Get SGLE length */
len = crystalhd_get_sgle_len(ioreq, sg_ix);
if (len % 4) {
- BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix, sg_cnt);
+ BCMLOG_ERR(" len in sg %d %d %d\n", len, sg_ix,
+ sg_cnt);
return BC_STS_NOT_IMPL;
}
/* Setup DMA desc with Phy addr & Length at current index. */
@@ -722,7 +737,8 @@ static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
desc[ix].dma_dir = ioreq->uinfo.dir_tx;
/* Chain DMA descriptor. */
- addr_temp.full_addr = desc_phy_addr + sizeof(struct dma_descriptor);
+ addr_temp.full_addr = desc_phy_addr +
+ sizeof(struct dma_descriptor);
desc[ix].next_desc_addr_low = addr_temp.low_part;
desc[ix].next_desc_addr_high = addr_temp.high_part;
@@ -731,8 +747,9 @@ static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
/* Debug.. */
if ((!len) || (len > crystalhd_get_sgle_len(ioreq, sg_ix))) {
- BCMLOG_ERR("inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
- len, ix, count, xfr_sz, sg_cnt);
+ BCMLOG_ERR(
+ "inv-len(%x) Ix(%d) count:%x xfr_sz:%x sg_cnt:%d\n",
+ len, ix, count, xfr_sz, sg_cnt);
return BC_STS_ERROR;
}
/* Length expects Multiple of 4 */
@@ -774,7 +791,8 @@ static enum BC_STATUS crystalhd_hw_fill_desc(struct crystalhd_dio_req *ioreq,
return BC_STS_SUCCESS;
}
-static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(struct crystalhd_dio_req *ioreq,
+static enum BC_STATUS crystalhd_xlat_sgl_to_dma_desc(
+ struct crystalhd_dio_req *ioreq,
struct dma_desc_mem *pdesc_mem,
uint32_t *uv_desc_index)
{
@@ -887,12 +905,14 @@ static enum BC_STATUS crystalhd_stop_tx_dma_engine(struct crystalhd_hw *hw)
while ((l1 || l2) && cnt) {
if (l1) {
- l1 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
+ l1 = crystalhd_reg_rd(hw->adp,
+ MISC1_TX_FIRST_DESC_L_ADDR_LIST0);
l1 &= DMA_START_BIT;
}
if (l2) {
- l2 = crystalhd_reg_rd(hw->adp, MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
+ l2 = crystalhd_reg_rd(hw->adp,
+ MISC1_TX_FIRST_DESC_L_ADDR_LIST1);
l2 &= DMA_START_BIT;
}
@@ -986,7 +1006,8 @@ static uint32_t crystalhd_get_addr_from_pib_Q(struct crystalhd_hw *hw)
return addr_entry;
}
-static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_to_rel)
+static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw,
+ uint32_t addr_to_rel)
{
uint32_t Q_addr;
uint32_t r_offset, w_offset, n_offset;
@@ -1021,7 +1042,8 @@ static bool crystalhd_rel_addr_to_pib_Q(struct crystalhd_hw *hw, uint32_t addr_t
return true;
}
-static void cpy_pib_to_app(struct c011_pib *src_pib, struct BC_PIC_INFO_BLOCK *dst_pib)
+static void cpy_pib_to_app(struct c011_pib *src_pib,
+ struct BC_PIC_INFO_BLOCK *dst_pib)
{
if (!src_pib || !dst_pib) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -1039,7 +1061,7 @@ static void cpy_pib_to_app(struct c011_pib *src_pib, struct BC_PIC_INFO_BLOCK *d
dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio;
dst_pib->colour_primaries = src_pib->ppb.colour_primaries;
dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
- dst_pib->frame_rate = src_pib->resolution ;
+ dst_pib->frame_rate = src_pib->resolution;
return;
}
@@ -1063,11 +1085,13 @@ static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
(uint32_t *)&src_pib);
if (src_pib.bFormatChange) {
- rx_pkt = (struct crystalhd_rx_dma_pkt *)crystalhd_dioq_fetch(hw->rx_freeq);
+ rx_pkt = (struct crystalhd_rx_dma_pkt *)
+ crystalhd_dioq_fetch(hw->rx_freeq);
if (!rx_pkt)
return;
rx_pkt->flags = 0;
- rx_pkt->flags |= COMP_FLAG_PIB_VALID | COMP_FLAG_FMT_CHANGE;
+ rx_pkt->flags |= COMP_FLAG_PIB_VALID |
+ COMP_FLAG_FMT_CHANGE;
AppPib = &rx_pkt->pib;
cpy_pib_to_app(&src_pib, AppPib);
@@ -1084,7 +1108,8 @@ static void crystalhd_hw_proc_pib(struct crystalhd_hw *hw)
rx_pkt->pib.pulldown,
rx_pkt->pib.ycom);
- crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true, rx_pkt->pkt_tag);
+ crystalhd_dioq_add(hw->rx_rdyq, (void *)rx_pkt, true,
+ rx_pkt->pkt_tag);
}
@@ -1096,16 +1121,20 @@ static void crystalhd_start_rx_dma_engine(struct crystalhd_hw *hw)
{
uint32_t dma_cntrl;
- dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
+ dma_cntrl = crystalhd_reg_rd(hw->adp,
+ MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
if (!(dma_cntrl & DMA_START_BIT)) {
dma_cntrl |= DMA_START_BIT;
- crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
+ crystalhd_reg_wr(hw->adp,
+ MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
}
- dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
+ dma_cntrl = crystalhd_reg_rd(hw->adp,
+ MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
if (!(dma_cntrl & DMA_START_BIT)) {
dma_cntrl |= DMA_START_BIT;
- crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
+ crystalhd_reg_wr(hw->adp,
+ MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
}
return;
@@ -1116,44 +1145,52 @@ static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
uint32_t dma_cntrl = 0, count = 30;
uint32_t l0y = 1, l0uv = 1, l1y = 1, l1uv = 1;
- dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
+ dma_cntrl = crystalhd_reg_rd(hw->adp,
+ MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
if ((dma_cntrl & DMA_START_BIT)) {
dma_cntrl &= ~DMA_START_BIT;
- crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
+ crystalhd_reg_wr(hw->adp,
+ MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
}
- dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
+ dma_cntrl = crystalhd_reg_rd(hw->adp,
+ MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
if ((dma_cntrl & DMA_START_BIT)) {
dma_cntrl &= ~DMA_START_BIT;
- crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
+ crystalhd_reg_wr(hw->adp,
+ MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
}
/* Poll for 3seconds (30 * 100ms) on both the lists..*/
while ((l0y || l0uv || l1y || l1uv) && count) {
if (l0y) {
- l0y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
+ l0y = crystalhd_reg_rd(hw->adp,
+ MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST0);
l0y &= DMA_START_BIT;
if (!l0y)
hw->rx_list_sts[0] &= ~rx_waiting_y_intr;
}
if (l1y) {
- l1y = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
+ l1y = crystalhd_reg_rd(hw->adp,
+ MISC1_Y_RX_FIRST_DESC_L_ADDR_LIST1);
l1y &= DMA_START_BIT;
if (!l1y)
hw->rx_list_sts[1] &= ~rx_waiting_y_intr;
}
if (l0uv) {
- l0uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
+ l0uv = crystalhd_reg_rd(hw->adp,
+ MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST0);
l0uv &= DMA_START_BIT;
if (!l0uv)
hw->rx_list_sts[0] &= ~rx_waiting_uv_intr;
}
if (l1uv) {
- l1uv = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
+ l1uv = crystalhd_reg_rd(hw->adp,
+ MISC1_UV_RX_FIRST_DESC_L_ADDR_LIST1);
l1uv &= DMA_START_BIT;
if (!l1uv)
hw->rx_list_sts[1] &= ~rx_waiting_uv_intr;
@@ -1168,7 +1205,8 @@ static void crystalhd_stop_rx_dma_engine(struct crystalhd_hw *hw)
count, hw->rx_list_sts[0], hw->rx_list_sts[1]);
}
-static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct crystalhd_rx_dma_pkt *rx_pkt)
+static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw,
+ struct crystalhd_rx_dma_pkt *rx_pkt)
{
uint32_t y_low_addr_reg, y_high_addr_reg;
uint32_t uv_low_addr_reg, uv_high_addr_reg;
@@ -1186,7 +1224,8 @@ static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct cr
}
spin_lock_irqsave(&hw->rx_lock, flags);
- /* FIXME: jarod: sts_free is an enum for 0, in crystalhd_hw.h... yuk... */
+ /* FIXME: jarod: sts_free is an enum for 0,
+ in crystalhd_hw.h... yuk... */
if (sts_free != hw->rx_list_sts[hw->rx_list_post_index]) {
spin_unlock_irqrestore(&hw->rx_lock, flags);
return BC_STS_BUSY;
@@ -1210,7 +1249,8 @@ static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct cr
hw->rx_list_post_index = (hw->rx_list_post_index + 1) % DMA_ENGINE_CNT;
spin_unlock_irqrestore(&hw->rx_lock, flags);
- crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false, rx_pkt->pkt_tag);
+ crystalhd_dioq_add(hw->rx_actq, (void *)rx_pkt, false,
+ rx_pkt->pkt_tag);
crystalhd_start_rx_dma_engine(hw);
/* Program the Y descriptor */
@@ -1221,8 +1261,10 @@ static enum BC_STATUS crystalhd_hw_prog_rxdma(struct crystalhd_hw *hw, struct cr
if (rx_pkt->uv_phy_addr) {
/* Program the UV descriptor */
desc_addr.full_addr = rx_pkt->uv_phy_addr;
- crystalhd_reg_wr(hw->adp, uv_high_addr_reg, desc_addr.high_part);
- crystalhd_reg_wr(hw->adp, uv_low_addr_reg, desc_addr.low_part | 0x01);
+ crystalhd_reg_wr(hw->adp, uv_high_addr_reg,
+ desc_addr.high_part);
+ crystalhd_reg_wr(hw->adp, uv_low_addr_reg,
+ desc_addr.low_part | 0x01);
}
return BC_STS_SUCCESS;
@@ -1268,16 +1310,20 @@ static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
hw->stop_pending = 0;
- dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
+ dma_cntrl = crystalhd_reg_rd(hw->adp,
+ MISC1_Y_RX_SW_DESC_LIST_CTRL_STS);
if (dma_cntrl & DMA_START_BIT) {
dma_cntrl &= ~DMA_START_BIT;
- crystalhd_reg_wr(hw->adp, MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
+ crystalhd_reg_wr(hw->adp,
+ MISC1_Y_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
}
- dma_cntrl = crystalhd_reg_rd(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
+ dma_cntrl = crystalhd_reg_rd(hw->adp,
+ MISC1_UV_RX_SW_DESC_LIST_CTRL_STS);
if (dma_cntrl & DMA_START_BIT) {
dma_cntrl &= ~DMA_START_BIT;
- crystalhd_reg_wr(hw->adp, MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
+ crystalhd_reg_wr(hw->adp,
+ MISC1_UV_RX_SW_DESC_LIST_CTRL_STS, dma_cntrl);
}
hw->rx_list_post_index = 0;
@@ -1287,8 +1333,8 @@ static void crystalhd_hw_finalize_pause(struct crystalhd_hw *hw)
crystalhd_reg_wr(hw->adp, PCIE_DLL_DATA_LINK_CONTROL, aspm);
}
-static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t list_index,
- enum BC_STATUS comp_sts)
+static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw,
+ uint32_t list_index, enum BC_STATUS comp_sts)
{
struct crystalhd_rx_dma_pkt *rx_pkt = NULL;
uint32_t y_dw_dnsz, uv_dw_dnsz;
@@ -1302,7 +1348,8 @@ static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t li
rx_pkt = crystalhd_dioq_find_and_fetch(hw->rx_actq,
hw->rx_pkt_tag_seed + list_index);
if (!rx_pkt) {
- BCMLOG_ERR("Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
+ BCMLOG_ERR(
+ "Act-Q:PostIx:%x L0Sts:%x L1Sts:%x current L:%x tag:%x comp:%x\n",
hw->rx_list_post_index, hw->rx_list_sts[0],
hw->rx_list_sts[1], list_index,
hw->rx_pkt_tag_seed + list_index, comp_sts);
@@ -1324,8 +1371,8 @@ static enum BC_STATUS crystalhd_rx_pkt_done(struct crystalhd_hw *hw, uint32_t li
return crystalhd_hw_post_cap_buff(hw, rx_pkt);
}
-static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts,
- uint32_t y_err_sts, uint32_t uv_err_sts)
+static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw,
+ uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts)
{
uint32_t tmp;
enum list_sts tmp_lsts;
@@ -1367,7 +1414,8 @@ static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts
tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK;
}
- if (uv_err_sts & MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
+ if (uv_err_sts &
+ MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK) {
hw->rx_list_sts[0] &= ~rx_uv_mask;
hw->rx_list_sts[0] |= rx_uv_error;
tmp &= ~MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK;
@@ -1392,8 +1440,8 @@ static bool crystalhd_rx_list0_handler(struct crystalhd_hw *hw, uint32_t int_sts
return (tmp_lsts != hw->rx_list_sts[0]);
}
-static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw, uint32_t int_sts,
- uint32_t y_err_sts, uint32_t uv_err_sts)
+static bool crystalhd_rx_list1_handler(struct crystalhd_hw *hw,
+ uint32_t int_sts, uint32_t y_err_sts, uint32_t uv_err_sts)
{
uint32_t tmp;
enum list_sts tmp_lsts;
@@ -1486,9 +1534,11 @@ static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
/* Update States..*/
spin_lock_irqsave(&hw->rx_lock, flags);
if (i == 0)
- ret = crystalhd_rx_list0_handler(hw, intr_sts, y_err_sts, uv_err_sts);
+ ret = crystalhd_rx_list0_handler(hw, intr_sts,
+ y_err_sts, uv_err_sts);
else
- ret = crystalhd_rx_list1_handler(hw, intr_sts, y_err_sts, uv_err_sts);
+ ret = crystalhd_rx_list1_handler(hw, intr_sts,
+ y_err_sts, uv_err_sts);
if (ret) {
switch (hw->rx_list_sts[i]) {
case sts_free:
@@ -1501,11 +1551,12 @@ static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
/* We got error on both or Y or uv. */
hw->stats.rx_errors++;
crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
- /* FIXME: jarod: this is where my mini pci-e card is tripping up */
- BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
- "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
+ /* FIXME: jarod: this is where
+ my mini pci-e card is tripping up */
+ BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
i, hw->stats.rx_errors, y_err_sts,
- uv_err_sts, intr_sts, y_dn_sz, uv_dn_sz);
+ uv_err_sts, intr_sts, y_dn_sz,
+ uv_dn_sz);
hw->rx_list_sts[i] = sts_free;
comp_sts = BC_STS_ERROR;
break;
@@ -1567,14 +1618,17 @@ static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
union link_misc_perst_decoder_ctrl rst_cntrl_reg;
/* Pulse reset pin of 7412 (MISC_PERST_DECODER_CTRL) */
- rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp, MISC_PERST_DECODER_CTRL);
+ rst_cntrl_reg.whole_reg = crystalhd_reg_rd(hw->adp,
+ MISC_PERST_DECODER_CTRL);
rst_cntrl_reg.bcm_7412_rst = 1;
- crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
+ crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL,
+ rst_cntrl_reg.whole_reg);
msleep_interruptible(50);
rst_cntrl_reg.bcm_7412_rst = 0;
- crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL, rst_cntrl_reg.whole_reg);
+ crystalhd_reg_wr(hw->adp, MISC_PERST_DECODER_CTRL,
+ rst_cntrl_reg.whole_reg);
/* Close all banks, put DDR in idle */
bc_dec_reg_wr(hw->adp, SDRAM_PRECHARGE, 0);
@@ -1622,7 +1676,8 @@ static enum BC_STATUS crystalhd_put_ddr2sleep(struct crystalhd_hw *hw)
**
*************************************************/
-enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer, uint32_t sz)
+enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp, void *buffer,
+ uint32_t sz)
{
uint32_t reg_data, cnt, *temp_buff;
uint32_t fw_sig_len = 36;
@@ -1828,7 +1883,8 @@ bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
crystalhd_hw_proc_pib(hw);
bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, deco_intr);
- /* FIXME: jarod: No udelay? might this be the real reason mini pci-e cards were stalling out? */
+ /* FIXME: jarod: No udelay? might this be
+ the real reason mini pci-e cards were stalling out? */
bc_dec_reg_wr(adp, Stream2Host_Intr_Sts, 0);
rc = 1;
}
@@ -1852,7 +1908,8 @@ bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw)
return rc;
}
-enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw, struct crystalhd_adp *adp)
+enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *hw,
+ struct crystalhd_adp *adp)
{
if (!hw || !adp) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -1967,7 +2024,8 @@ enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *hw)
}
rpkt->desc_mem.pdma_desc_start = mem;
rpkt->desc_mem.phy_addr = phy_addr;
- rpkt->desc_mem.sz = BC_LINK_MAX_SGLS * sizeof(struct dma_descriptor);
+ rpkt->desc_mem.sz = BC_LINK_MAX_SGLS *
+ sizeof(struct dma_descriptor);
rpkt->pkt_tag = hw->rx_pkt_tag_seed + i;
crystalhd_hw_free_rx_pkt(hw, rpkt);
}
@@ -2013,7 +2071,8 @@ enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *hw)
return BC_STS_SUCCESS;
}
-enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_dio_req *ioreq,
+enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw,
+ struct crystalhd_dio_req *ioreq,
hw_comp_callback call_back,
wait_queue_head_t *cb_event, uint32_t *list_id,
uint8_t data_flags)
@@ -2047,7 +2106,8 @@ enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_di
}
/* Get a list from TxFreeQ */
- tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(hw->tx_freeq);
+ tx_dma_packet = (struct tx_dma_pkt *)crystalhd_dioq_fetch(
+ hw->tx_freeq);
if (!tx_dma_packet) {
BCMLOG_ERR("No empty elements..\n");
return BC_STS_ERR_USAGE;
@@ -2105,7 +2165,8 @@ enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_di
crystalhd_start_tx_dma_engine(hw);
crystalhd_reg_wr(hw->adp, first_desc_u_addr, desc_addr.high_part);
- crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part | 0x01);
+ crystalhd_reg_wr(hw->adp, first_desc_l_addr, desc_addr.low_part |
+ 0x01);
/* Be sure we set the valid bit ^^^^ */
return BC_STS_SUCCESS;
@@ -2120,7 +2181,8 @@ enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_di
*
* FIX_ME: Not Tested the actual condition..
*/
-enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
+enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw,
+ uint32_t list_id)
{
if (!hw || !list_id) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -2134,7 +2196,7 @@ enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id)
}
enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
- struct crystalhd_dio_req *ioreq, bool en_post)
+ struct crystalhd_dio_req *ioreq, bool en_post)
{
struct crystalhd_rx_dma_pkt *rpkt;
uint32_t tag, uv_desc_ix = 0;
@@ -2154,7 +2216,8 @@ enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
rpkt->dio_req = ioreq;
tag = rpkt->pkt_tag;
- sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem, &uv_desc_ix);
+ sts = crystalhd_xlat_sgl_to_dma_desc(ioreq, &rpkt->desc_mem,
+ &uv_desc_ix);
if (sts != BC_STS_SUCCESS)
return sts;
@@ -2163,7 +2226,7 @@ enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
/* Store the address of UV in the rx packet for post*/
if (uv_desc_ix)
rpkt->uv_phy_addr = rpkt->desc_mem.phy_addr +
- (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
+ (sizeof(struct dma_descriptor) * (uv_desc_ix + 1));
if (en_post)
sts = crystalhd_hw_post_cap_buff(hw, rpkt);
@@ -2190,7 +2253,8 @@ enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
rpkt = crystalhd_dioq_fetch_wait(hw->rx_rdyq, timeout, &sig_pending);
if (!rpkt) {
if (sig_pending) {
- BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n", sig_pending);
+ BCMLOG(BCMLOG_INFO, "wait on frame time out %d\n",
+ sig_pending);
return BC_STS_IO_USER_ABORT;
} else {
return BC_STS_TIMEOUT;
@@ -2305,7 +2369,8 @@ enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw)
return BC_STS_SUCCESS;
}
-void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats)
+void crystalhd_hw_stats(struct crystalhd_hw *hw,
+ struct crystalhd_hw_stats *stats)
{
if (!hw) {
BCMLOG_ERR("Invalid Arguments\n");
@@ -2378,7 +2443,8 @@ enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *hw)
if (reg & 0x00020000) {
hw->prev_n = n;
- /* FIXME: jarod: outputting a random "C" is... confusing... */
+ /* FIXME: jarod: outputting
+ a random "C" is... confusing... */
BCMLOG(BCMLOG_INFO, "C");
return BC_STS_SUCCESS;
} else {
diff --git a/drivers/staging/crystalhd/crystalhd_hw.h b/drivers/staging/crystalhd/crystalhd_hw.h
index 2d0e6c6..3780944 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.h
+++ b/drivers/staging/crystalhd/crystalhd_hw.h
@@ -46,7 +46,7 @@
#define Cpu2HstMbx1 0x00100F04
#define MbxStat1 0x00100F08
#define Stream2Host_Intr_Sts 0x00100F24
-#define C011_RET_SUCCESS 0x0 /* Reutrn status of firmware command. */
+#define C011_RET_SUCCESS 0x0 /* Reutrn status of firmware command. */
/* TS input status register */
#define TS_StreamAFIFOStatus 0x0010044C
@@ -103,7 +103,7 @@
#define BC_FWIMG_ST_ADDR 0x00000000
/* FIXME: jarod: there's a kernel function that'll do this for us... */
#define rotr32_1(x, n) (((x) >> n) | ((x) << (32 - n)))
-#define bswap_32_1(x) ((rotr32_1((x), 24) & 0x00ff00ff) | (rotr32_1((x), 8) & 0xff00ff00))
+#define bswap_32_1(x) ((rotr32_1((x), 24) & 0x00ff00ff) | (rotr32_1((x), 8) & 0xff00ff00))
#define DecHt_HostSwReset 0x340000
#define BC_DRAM_FW_CFG_ADDR 0x001c2000
@@ -136,9 +136,11 @@ union intr_mask_reg {
union link_misc_perst_deco_ctrl {
struct {
- uint32_t bcm7412_rst:1; /* 1 -> BCM7412 is held in reset. Reset value 1.*/
+ uint32_t bcm7412_rst:1; /* 1 -> BCM7412 is held
+ in reset. Reset value 1.*/
uint32_t reserved0:3; /* Reserved.No Effect*/
- uint32_t stop_bcm_7412_clk:1; /* 1 ->Stops branch of 27MHz clk used to clk BCM7412*/
+ uint32_t stop_bcm_7412_clk:1; /* 1 ->Stops branch of
+ 27MHz clk used to clk BCM7412*/
uint32_t reserved1:27; /* Reseved. No Effect*/
};
@@ -148,13 +150,18 @@ union link_misc_perst_deco_ctrl {
union link_misc_perst_clk_ctrl {
struct {
- uint32_t sel_alt_clk:1; /* When set, selects a 6.75MHz clock as the source of core_clk */
- uint32_t stop_core_clk:1; /* When set, stops the branch of core_clk that is not needed for low power operation */
- uint32_t pll_pwr_dn:1; /* When set, powers down the main PLL. The alternate clock bit should be set
- to select an alternate clock before setting this bit.*/
+ uint32_t sel_alt_clk:1; /* When set, selects a
+ 6.75MHz clock as the source of core_clk */
+ uint32_t stop_core_clk:1; /* When set, stops the branch
+ of core_clk that is not needed for low power operation */
+ uint32_t pll_pwr_dn:1; /* When set, powers down the
+ main PLL. The alternate clock bit should be set to
+ select an alternate clock before setting this bit.*/
uint32_t reserved0:5; /* Reserved */
- uint32_t pll_mult:8; /* This setting controls the multiplier for the PLL. */
- uint32_t pll_div:4; /* This setting controls the divider for the PLL. */
+ uint32_t pll_mult:8; /* This setting controls
+ the multiplier for the PLL. */
+ uint32_t pll_div:4; /* This setting controls
+ the divider for the PLL. */
uint32_t reserved1:12; /* Reserved */
};
@@ -164,9 +171,11 @@ union link_misc_perst_clk_ctrl {
union link_misc_perst_decoder_ctrl {
struct {
- uint32_t bcm_7412_rst:1; /* 1 -> BCM7412 is held in reset. Reset value 1.*/
+ uint32_t bcm_7412_rst:1; /* 1 -> BCM7412 is held
+ in reset. Reset value 1.*/
uint32_t res0:3; /* Reserved.No Effect*/
- uint32_t stop_7412_clk:1; /* 1 ->Stops branch of 27MHz clk used to clk BCM7412*/
+ uint32_t stop_7412_clk:1; /* 1 ->Stops branch of 27MHz
+ clk used to clk BCM7412*/
uint32_t res1:27; /* Reseved. No Effect */
};
@@ -225,10 +234,12 @@ struct dma_descriptor { /* 8 32-bit values */
* The virtual address will determine what should be freed.
*/
struct dma_desc_mem {
- struct dma_descriptor *pdma_desc_start; /* 32-bytes for dma descriptor. should be first element */
- dma_addr_t phy_addr; /* physical address of each DMA desc */
+ struct dma_descriptor *pdma_desc_start; /* 32-bytes for dma
+ descriptor. should be first element */
+ dma_addr_t phy_addr; /* physical address
+ of each DMA desc */
uint32_t sz;
- struct _dma_desc_mem_ *Next; /* points to Next Descriptor in chain */
+ struct _dma_desc_mem_ *Next; /* points to Next Descriptor in chain */
};
@@ -323,50 +334,54 @@ struct crystalhd_hw {
#define CLOCK_PRESET 175
/* DMA engine register BIT mask wrappers.. */
-#define DMA_START_BIT MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_MASK
-
-#define GET_RX_INTR_MASK (INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_MASK | \
- INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK | \
- INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_MASK | \
- INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK | \
- INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_MASK | \
- INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK | \
- INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_MASK | \
- INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
-
-#define GET_Y0_ERR_MSK (MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK | \
- MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK | \
- MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK | \
- MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK)
-
-#define GET_UV0_ERR_MSK (MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK | \
- MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK | \
- MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK | \
- MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK)
-
-#define GET_Y1_ERR_MSK (MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK | \
- MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK | \
- MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK | \
- MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK)
-
-#define GET_UV1_ERR_MSK (MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK | \
- MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK | \
- MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK | \
- MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK)
+#define DMA_START_BIT MISC1_TX_SW_DESC_LIST_CTRL_STS_TX_DMA_RUN_STOP_MASK
+
+#define GET_RX_INTR_MASK (INTR_INTR_STATUS_L1_UV_RX_DMA_ERR_INTR_MASK | \
+ INTR_INTR_STATUS_L1_UV_RX_DMA_DONE_INTR_MASK | \
+ INTR_INTR_STATUS_L1_Y_RX_DMA_ERR_INTR_MASK | \
+ INTR_INTR_STATUS_L1_Y_RX_DMA_DONE_INTR_MASK | \
+ INTR_INTR_STATUS_L0_UV_RX_DMA_ERR_INTR_MASK | \
+ INTR_INTR_STATUS_L0_UV_RX_DMA_DONE_INTR_MASK | \
+ INTR_INTR_STATUS_L0_Y_RX_DMA_ERR_INTR_MASK | \
+ INTR_INTR_STATUS_L0_Y_RX_DMA_DONE_INTR_MASK)
+
+#define GET_Y0_ERR_MSK (MISC1_Y_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK | \
+ MISC1_Y_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK | \
+ MISC1_Y_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK | \
+ MISC1_Y_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK)
+
+#define GET_UV0_ERR_MSK (MISC1_UV_RX_ERROR_STATUS_RX_L0_OVERRUN_ERROR_MASK | \
+ MISC1_UV_RX_ERROR_STATUS_RX_L0_UNDERRUN_ERROR_MASK | \
+ MISC1_UV_RX_ERROR_STATUS_RX_L0_DESC_TX_ABORT_ERRORS_MASK | \
+ MISC1_UV_RX_ERROR_STATUS_RX_L0_FIFO_FULL_ERRORS_MASK)
+
+#define GET_Y1_ERR_MSK (MISC1_Y_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK | \
+ MISC1_Y_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK | \
+ MISC1_Y_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK | \
+ MISC1_Y_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK)
+
+#define GET_UV1_ERR_MSK (MISC1_UV_RX_ERROR_STATUS_RX_L1_OVERRUN_ERROR_MASK | \
+ MISC1_UV_RX_ERROR_STATUS_RX_L1_UNDERRUN_ERROR_MASK | \
+ MISC1_UV_RX_ERROR_STATUS_RX_L1_DESC_TX_ABORT_ERRORS_MASK | \
+ MISC1_UV_RX_ERROR_STATUS_RX_L1_FIFO_FULL_ERRORS_MASK)
/**** API Exposed to the other layers ****/
enum BC_STATUS crystalhd_download_fw(struct crystalhd_adp *adp,
void *buffer, uint32_t sz);
-enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw, struct BC_FW_CMD *fw_cmd);
-bool crystalhd_hw_interrupt(struct crystalhd_adp *adp, struct crystalhd_hw *hw);
-enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *, struct crystalhd_adp *);
+enum BC_STATUS crystalhd_do_fw_cmd(struct crystalhd_hw *hw,
+ struct BC_FW_CMD *fw_cmd);
+bool crystalhd_hw_interrupt(struct crystalhd_adp *adp,
+ struct crystalhd_hw *hw);
+enum BC_STATUS crystalhd_hw_open(struct crystalhd_hw *,
+ struct crystalhd_adp *);
enum BC_STATUS crystalhd_hw_close(struct crystalhd_hw *);
enum BC_STATUS crystalhd_hw_setup_dma_rings(struct crystalhd_hw *);
enum BC_STATUS crystalhd_hw_free_dma_rings(struct crystalhd_hw *);
-enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_dio_req *ioreq,
+enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw,
+ struct crystalhd_dio_req *ioreq,
hw_comp_callback call_back,
wait_queue_head_t *cb_event,
uint32_t *list_id, uint8_t data_flags);
@@ -374,15 +389,17 @@ enum BC_STATUS crystalhd_hw_post_tx(struct crystalhd_hw *hw, struct crystalhd_di
enum BC_STATUS crystalhd_hw_pause(struct crystalhd_hw *hw);
enum BC_STATUS crystalhd_hw_unpause(struct crystalhd_hw *hw);
enum BC_STATUS crystalhd_hw_suspend(struct crystalhd_hw *hw);
-enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw, uint32_t list_id);
+enum BC_STATUS crystalhd_hw_cancel_tx(struct crystalhd_hw *hw,
+ uint32_t list_id);
enum BC_STATUS crystalhd_hw_add_cap_buffer(struct crystalhd_hw *hw,
- struct crystalhd_dio_req *ioreq, bool en_post);
+ struct crystalhd_dio_req *ioreq, bool en_post);
enum BC_STATUS crystalhd_hw_get_cap_buffer(struct crystalhd_hw *hw,
struct BC_PIC_INFO_BLOCK *pib,
struct crystalhd_dio_req **ioreq);
enum BC_STATUS crystalhd_hw_stop_capture(struct crystalhd_hw *hw);
enum BC_STATUS crystalhd_hw_start_capture(struct crystalhd_hw *hw);
-void crystalhd_hw_stats(struct crystalhd_hw *hw, struct crystalhd_hw_stats *stats);
+void crystalhd_hw_stats(struct crystalhd_hw *hw,
+ struct crystalhd_hw_stats *stats);
/* API to program the core clock on the decoder */
enum BC_STATUS crystalhd_hw_set_core_clock(struct crystalhd_hw *);
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index 85f51fb..b17fbf8 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -75,7 +75,8 @@ static int chd_dec_disable_int(struct crystalhd_adp *adp)
return 0;
}
-struct crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp, bool isr)
+struct crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp,
+ bool isr)
{
unsigned long flags = 0;
struct crystalhd_ioctl_data *temp;
@@ -95,8 +96,8 @@ struct crystalhd_ioctl_data *chd_dec_alloc_iodata(struct crystalhd_adp *adp, boo
return temp;
}
-void chd_dec_free_iodata(struct crystalhd_adp *adp, struct crystalhd_ioctl_data *iodata,
- bool isr)
+void chd_dec_free_iodata(struct crystalhd_adp *adp,
+ struct crystalhd_ioctl_data *iodata, bool isr)
{
unsigned long flags = 0;
@@ -109,7 +110,8 @@ void chd_dec_free_iodata(struct crystalhd_adp *adp, struct crystalhd_ioctl_data
spin_unlock_irqrestore(&adp->lock, flags);
}
-static inline int crystalhd_user_data(unsigned long ud, void *dr, int size, int set)
+static inline int crystalhd_user_data(unsigned long ud, void *dr,
+ int size, int set)
{
int rc;
@@ -131,8 +133,8 @@ static inline int crystalhd_user_data(unsigned long ud, void *dr, int size, int
return rc;
}
-static int chd_dec_fetch_cdata(struct crystalhd_adp *adp, struct crystalhd_ioctl_data *io,
- uint32_t m_sz, unsigned long ua)
+static int chd_dec_fetch_cdata(struct crystalhd_adp *adp,
+ struct crystalhd_ioctl_data *io, uint32_t m_sz, unsigned long ua)
{
unsigned long ua_off;
int rc = 0;
@@ -163,7 +165,7 @@ static int chd_dec_fetch_cdata(struct crystalhd_adp *adp, struct crystalhd_ioctl
}
static int chd_dec_release_cdata(struct crystalhd_adp *adp,
- struct crystalhd_ioctl_data *io, unsigned long ua)
+ struct crystalhd_ioctl_data *io, unsigned long ua)
{
unsigned long ua_off;
int rc;
@@ -178,8 +180,9 @@ static int chd_dec_release_cdata(struct crystalhd_adp *adp,
rc = crystalhd_user_data(ua_off, io->add_cdata,
io->add_cdata_sz, 1);
if (rc) {
- BCMLOG_ERR("failed to push add_cdata sz:%x ua_off:%x\n",
- io->add_cdata_sz, (unsigned int)ua_off);
+ BCMLOG_ERR(
+ "failed to push add_cdata sz:%x ua_off:%x\n",
+ io->add_cdata_sz, (unsigned int)ua_off);
return -ENODATA;
}
}
@@ -252,10 +255,7 @@ static int chd_dec_api_cmd(struct crystalhd_adp *adp, unsigned long ua,
rc = chd_dec_proc_user_data(adp, temp, ua, 1);
}
- if (temp) {
- chd_dec_free_iodata(adp, temp, 0);
- temp = NULL;
- }
+ chd_dec_free_iodata(adp, temp, 0);
return rc;
}
@@ -378,8 +378,8 @@ static int chd_dec_init_chdev(struct crystalhd_adp *adp)
goto class_create_fail;
}
- dev = device_create(crystalhd_class, NULL, MKDEV(adp->chd_dec_major, 0),
- NULL, "crystalhd");
+ dev = device_create(crystalhd_class, NULL,
+ MKDEV(adp->chd_dec_major, 0), NULL, "crystalhd");
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
BCMLOG_ERR("failed to create device\n");
@@ -394,7 +394,8 @@ static int chd_dec_init_chdev(struct crystalhd_adp *adp)
/* Allocate general purpose ioctl pool. */
for (i = 0; i < CHD_IODATA_POOL_SZ; i++) {
- temp = kzalloc(sizeof(struct crystalhd_ioctl_data), GFP_KERNEL);
+ temp = kzalloc(sizeof(struct crystalhd_ioctl_data),
+ GFP_KERNEL);
if (!temp) {
BCMLOG_ERR("ioctl data pool kzalloc failed\n");
rc = -ENOMEM;
@@ -544,8 +545,7 @@ static int chd_dec_pci_probe(struct pci_dev *pdev,
int rc;
enum BC_STATUS sts = BC_STS_SUCCESS;
- BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x "
- "s_vendor:0x%04x s_device: 0x%04x\n",
+ BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x s_vendor:0x%04x s_device: 0x%04x\n",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device);
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.h b/drivers/staging/crystalhd/crystalhd_lnx.h
index a9e3633..bac572a 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.h
+++ b/drivers/staging/crystalhd/crystalhd_lnx.h
@@ -77,8 +77,8 @@ struct crystalhd_adp {
int chd_dec_major;
unsigned int cfg_users;
- struct crystalhd_ioctl_data *idata_free_head; /* ioctl data pool */
- struct crystalhd_elem *elem_pool_head; /* Queue element pool */
+ struct crystalhd_ioctl_data *idata_free_head; /* ioctl data pool */
+ struct crystalhd_elem *elem_pool_head; /* Queue element pool */
struct crystalhd_cmd cmds;
diff --git a/drivers/staging/crystalhd/crystalhd_misc.c b/drivers/staging/crystalhd/crystalhd_misc.c
index a5f109c..51f6980 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.c
+++ b/drivers/staging/crystalhd/crystalhd_misc.c
@@ -30,19 +30,22 @@
uint32_t g_linklog_level;
-static inline uint32_t crystalhd_dram_rd(struct crystalhd_adp *adp, uint32_t mem_off)
+static inline uint32_t crystalhd_dram_rd(struct crystalhd_adp *adp,
+ uint32_t mem_off)
{
crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (mem_off >> 19));
return bc_dec_reg_rd(adp, (0x00380000 | (mem_off & 0x0007FFFF)));
}
-static inline void crystalhd_dram_wr(struct crystalhd_adp *adp, uint32_t mem_off, uint32_t val)
+static inline void crystalhd_dram_wr(struct crystalhd_adp *adp,
+ uint32_t mem_off, uint32_t val)
{
crystalhd_reg_wr(adp, DCI_DRAM_BASE_ADDR, (mem_off >> 19));
bc_dec_reg_wr(adp, (0x00380000 | (mem_off & 0x0007FFFF)), val);
}
-static inline enum BC_STATUS bc_chk_dram_range(struct crystalhd_adp *adp, uint32_t start_off, uint32_t cnt)
+static inline enum BC_STATUS bc_chk_dram_range(struct crystalhd_adp *adp,
+ uint32_t start_off, uint32_t cnt)
{
return BC_STS_SUCCESS;
}
@@ -66,7 +69,8 @@ static struct crystalhd_dio_req *crystalhd_alloc_dio(struct crystalhd_adp *adp)
return temp;
}
-static void crystalhd_free_dio(struct crystalhd_adp *adp, struct crystalhd_dio_req *dio)
+static void crystalhd_free_dio(struct crystalhd_adp *adp,
+ struct crystalhd_dio_req *dio)
{
unsigned long flags = 0;
@@ -99,7 +103,8 @@ static struct crystalhd_elem *crystalhd_alloc_elem(struct crystalhd_adp *adp)
return temp;
}
-static void crystalhd_free_elem(struct crystalhd_adp *adp, struct crystalhd_elem *elem)
+static void crystalhd_free_elem(struct crystalhd_adp *adp,
+ struct crystalhd_elem *elem)
{
unsigned long flags = 0;
@@ -120,7 +125,8 @@ static inline void crystalhd_set_sg(struct scatterlist *sg, struct page *page,
#endif
}
-static inline void crystalhd_init_sg(struct scatterlist *sg, unsigned int entries)
+static inline void crystalhd_init_sg(struct scatterlist *sg,
+ unsigned int entries)
{
/* http://lkml.org/lkml/2007/11/27/68 */
sg_init_table(sg, entries);
@@ -208,7 +214,8 @@ uint32_t crystalhd_reg_rd(struct crystalhd_adp *adp, uint32_t reg_off)
* configuration space.
*
*/
-void crystalhd_reg_wr(struct crystalhd_adp *adp, uint32_t reg_off, uint32_t val)
+void crystalhd_reg_wr(struct crystalhd_adp *adp, uint32_t reg_off,
+ uint32_t val)
{
if (!adp || (reg_off > adp->pci_i2o_len)) {
BCMLOG_ERR("link_wr_reg_off outof range: 0x%08x\n", reg_off);
@@ -469,7 +476,8 @@ enum BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *adp,
* by calling the call back provided during creation.
*
*/
-void crystalhd_delete_dioq(struct crystalhd_adp *adp, struct crystalhd_dioq *dioq)
+void crystalhd_delete_dioq(struct crystalhd_adp *adp,
+ struct crystalhd_dioq *dioq)
{
void *temp;
@@ -639,7 +647,8 @@ void *crystalhd_dioq_fetch_wait(struct crystalhd_dioq *ioq, uint32_t to_secs,
while ((ioq->count == 0) && count) {
spin_unlock_irqrestore(&ioq->lock, flags);
- crystalhd_wait_on_event(&ioq->event, (ioq->count > 0), 1000, rc, 0);
+ crystalhd_wait_on_event(&ioq->event,
+ (ioq->count > 0), 1000, rc, 0);
if (rc == 0) {
goto out;
} else if (rc == -EINTR) {
@@ -678,7 +687,8 @@ enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
struct crystalhd_dio_req **dio_hnd)
{
struct crystalhd_dio_req *dio;
- /* FIXME: jarod: should some of these unsigned longs be uint32_t or uintptr_t? */
+ /* FIXME: jarod: should some of these
+ unsigned longs be uint32_t or uintptr_t? */
unsigned long start = 0, end = 0, uaddr = 0, count = 0;
unsigned long spsz = 0, uv_start = 0;
int i = 0, rw = 0, res = 0, nr_pages = 0, skip_fb_sg = 0;
@@ -723,7 +733,8 @@ enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
if (uv_offset) {
uv_start = (uaddr + (unsigned long)uv_offset) >> PAGE_SHIFT;
dio->uinfo.uv_sg_ix = uv_start - start;
- dio->uinfo.uv_sg_off = ((uaddr + (unsigned long)uv_offset) & ~PAGE_MASK);
+ dio->uinfo.uv_sg_off = ((uaddr + (unsigned long)uv_offset) &
+ ~PAGE_MASK);
}
dio->fb_size = ubuff_sz & 0x03;
@@ -819,7 +830,8 @@ enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *adp, void *ubuff,
*
* This routine is to unmap the user buffer pages.
*/
-enum BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, struct crystalhd_dio_req *dio)
+enum BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp,
+ struct crystalhd_dio_req *dio)
{
struct page *page = NULL;
int j = 0;
@@ -841,7 +853,8 @@ enum BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, struct crystalhd_d
}
}
if (dio->sig == crystalhd_dio_sg_mapped)
- pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction);
+ pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt,
+ dio->direction);
crystalhd_free_dio(adp, dio);
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index 8cdaa7a..aa736c8 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -127,12 +127,16 @@ uint32_t crystalhd_reg_rd(struct crystalhd_adp *, uint32_t);
void crystalhd_reg_wr(struct crystalhd_adp *, uint32_t, uint32_t);
/*========= Decoder (7412) memory access routines..=================*/
-enum BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
-enum BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
+enum BC_STATUS crystalhd_mem_rd(struct crystalhd_adp *,
+ uint32_t, uint32_t, uint32_t *);
+enum BC_STATUS crystalhd_mem_wr(struct crystalhd_adp *,
+ uint32_t, uint32_t, uint32_t *);
/*==========Link (70012) PCIe Config access routines.================*/
-enum BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t *);
-enum BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *, uint32_t, uint32_t, uint32_t);
+enum BC_STATUS crystalhd_pci_cfg_rd(struct crystalhd_adp *,
+ uint32_t, uint32_t, uint32_t *);
+enum BC_STATUS crystalhd_pci_cfg_wr(struct crystalhd_adp *,
+ uint32_t, uint32_t, uint32_t);
/*========= Linux Kernel Interface routines. ======================= */
void *bc_kern_dma_alloc(struct crystalhd_adp *, uint32_t, dma_addr_t *);
@@ -168,20 +172,26 @@ do { \
/*================ Direct IO mapping routines ==================*/
extern int crystalhd_create_dio_pool(struct crystalhd_adp *, uint32_t);
extern void crystalhd_destroy_dio_pool(struct crystalhd_adp *);
-extern enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *, void *, uint32_t,
- uint32_t, bool, bool, struct crystalhd_dio_req**);
+extern enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *, void *,
+ uint32_t, uint32_t, bool, bool, struct crystalhd_dio_req**);
-extern enum BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *, struct crystalhd_dio_req*);
-#define crystalhd_get_sgle_paddr(_dio, _ix) (cpu_to_le64(sg_dma_address(&_dio->sg[_ix])))
-#define crystalhd_get_sgle_len(_dio, _ix) (cpu_to_le32(sg_dma_len(&_dio->sg[_ix])))
+extern enum BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *,
+ struct crystalhd_dio_req*);
+#define crystalhd_get_sgle_paddr(_dio, _ix) (sg_dma_address(&_dio->sg[_ix]))
+#define crystalhd_get_sgle_len(_dio, _ix) (sg_dma_len(&_dio->sg[_ix]))
/*================ General Purpose Queues ==================*/
-extern enum BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *, struct crystalhd_dioq **, crystalhd_data_free_cb , void *);
-extern void crystalhd_delete_dioq(struct crystalhd_adp *, struct crystalhd_dioq *);
-extern enum BC_STATUS crystalhd_dioq_add(struct crystalhd_dioq *ioq, void *data, bool wake, uint32_t tag);
+extern enum BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *,
+ struct crystalhd_dioq **, crystalhd_data_free_cb , void *);
+extern void crystalhd_delete_dioq(struct crystalhd_adp *,
+ struct crystalhd_dioq *);
+extern enum BC_STATUS crystalhd_dioq_add(struct crystalhd_dioq *ioq,
+ void *data, bool wake, uint32_t tag);
extern void *crystalhd_dioq_fetch(struct crystalhd_dioq *ioq);
-extern void *crystalhd_dioq_find_and_fetch(struct crystalhd_dioq *ioq, uint32_t tag);
-extern void *crystalhd_dioq_fetch_wait(struct crystalhd_dioq *ioq, uint32_t to_secs, uint32_t *sig_pend);
+extern void *crystalhd_dioq_find_and_fetch(struct crystalhd_dioq *ioq,
+ uint32_t tag);
+extern void *crystalhd_dioq_fetch_wait(struct crystalhd_dioq *ioq,
+ uint32_t to_secs, uint32_t *sig_pend);
#define crystalhd_dioq_count(_ioq) ((_ioq) ? _ioq->count : 0)
@@ -190,7 +200,8 @@ extern void crystalhd_delete_elem_pool(struct crystalhd_adp *);
/*================ Debug routines/macros .. ================================*/
-extern void crystalhd_show_buffer(uint32_t off, uint8_t *buff, uint32_t dwcount);
+extern void crystalhd_show_buffer(uint32_t off, uint8_t *buff,
+ uint32_t dwcount);
enum _chd_log_levels {
BCMLOG_ERROR = 0x80000000, /* Don't disable this option */
diff --git a/drivers/staging/csr/Kconfig b/drivers/staging/csr/Kconfig
deleted file mode 100644
index ad2a109..0000000
--- a/drivers/staging/csr/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config CSR_WIFI
- tristate "CSR wireless driver"
- depends on MMC && CFG80211_WEXT && INET
- select WIRELESS_EXT
- select WEXT_PRIV
- help
- Driver for the CSR wireless SDIO device.
-
- If unsure, select N.
diff --git a/drivers/staging/csr/LICENSE.txt b/drivers/staging/csr/LICENSE.txt
deleted file mode 100644
index 364853e..0000000
--- a/drivers/staging/csr/LICENSE.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-Except as contained in this notice, the names of above-listed
-copyright holders and the names of any contributors shall not be used
-in advertising or otherwise to promote the sale, use or other dealings
-in this Software without prior written authorization.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
-CONTRIBUTORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
-OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-Alternatively, this software may be distributed under the terms of the
-GNU General Public License ("GPL") version 2 as published
-by the Free Software Foundation.
-
-As a special exception, if other files instantiate templates or use
-macros or inline functions from this file, or you compile this file
-and link it with other works to produce a work based on this file,
-this file does not by itself cause the resulting work to be covered by
-the GNU General Public License. However the source code for this file
-must still be made available in accordance with section (3) of the GNU
-General Public License.
-
-This exception does not invalidate any other reasons why a work based
-on this file might be covered by the GNU General Public License.
diff --git a/drivers/staging/csr/Makefile b/drivers/staging/csr/Makefile
deleted file mode 100644
index dbd135a..0000000
--- a/drivers/staging/csr/Makefile
+++ /dev/null
@@ -1,73 +0,0 @@
-ccflags-y := -DCSR_SME_USERSPACE -DCSR_SUPPORT_SME -DREMOTE_SYS_SAP -DCSR_WIFI_SECURITY_WAPI_ENABLE -DENABLE_SHUTDOWN -DUNIFI_DEBUG
-ccflags-y += -DSDIO_EXPORTS_STRUCT_DEVICE -DCSR_WIFI_SUPPORT_MMC_DRIVER -DCSR_WIFI_SINGLE_FUNCTION -DCSR_WIFI_SPLIT_PATCH
-ccflags-y += -DCSR_SUPPORT_WEXT -DREMOTE_SYS_SAP -DREMOTE_MGT_SAP -DCSR_WIFI_SECURITY_WAPI_ENABLE -DCSR_WIFI_SECURITY_WAPI_QOSCTRL_MIC_WORKAROUND -DENABLE_SHUTDOWN -DCSR_WIFI_NME_ENABLE -DCSR_WIFI_AP_ENABLE -DCSR_SUPPORT_WEXT_AP -DCSR_WIFI_REQUEUE_PACKET_TO_HAL
-
-obj-$(CONFIG_CSR_WIFI) += csr_wifi.o
-obj-$(CONFIG_CSR_WIFI) += csr_helper.o
-
-csr_wifi-y := bh.o \
- data_tx.o \
- drv.o \
- firmware.o \
- inet.o \
- init_hw.o \
- io.o \
- monitor.o \
- netdev.o \
- os.o \
- putest.o \
- sdio_events.o \
- sdio_mmc.o \
- sdio_stubs.o \
- sme_blocking.o \
- ul_int.o \
- unifi_dbg.o \
- unifi_event.o \
- unifi_pdu_processing.o \
- unifi_sme.o \
- csr_wifi_hip_card_sdio.o \
- csr_wifi_hip_card_sdio_intr.o \
- csr_wifi_hip_card_sdio_mem.o \
- csr_wifi_hip_chiphelper.o \
- csr_wifi_hip_download.o \
- csr_wifi_hip_dump.o \
- csr_wifi_hip_packing.o \
- csr_wifi_hip_send.o \
- csr_wifi_hip_signals.o \
- csr_wifi_hip_ta_sampling.o \
- csr_wifi_hip_udi.o \
- csr_wifi_hip_unifi_signal_names.o \
- csr_wifi_hip_xbv.o \
- csr_wifi_nme_ap_converter_init.o \
- csr_wifi_nme_ap_free_downstream_contents.o \
- csr_wifi_nme_ap_free_upstream_contents.o \
- csr_wifi_nme_ap_serialize.o \
- csr_wifi_nme_ap_sef.o \
- csr_wifi_router_ctrl_sef.o \
- csr_wifi_router_sef.o \
- csr_wifi_router_transport.o \
- csr_wifi_sme_sef.o \
- csr_wifi_sme_converter_init.o \
- csr_wifi_sme_free_downstream_contents.o \
- csr_wifi_sme_free_upstream_contents.o \
- csr_wifi_sme_serialize.o \
- csr_wifi_router_ctrl_converter_init.o \
- csr_wifi_router_ctrl_free_downstream_contents.o \
- csr_wifi_router_ctrl_free_upstream_contents.o \
- csr_wifi_router_ctrl_serialize.o \
- csr_wifi_router_converter_init.o \
- csr_wifi_router_free_downstream_contents.o \
- csr_wifi_router_free_upstream_contents.o \
- csr_wifi_router_serialize.o \
- sme_mgt.o \
- sme_sys.o \
- sme_userspace.o \
- sme_wext.o \
- wext_events.o
-
-csr_helper-y := csr_time.o \
- csr_util.o \
- csr_framework_ext.o \
- csr_wifi_serialize_primitive_types.o \
- csr_serialize_primitive_types.o \
- csr_msgconv.o
diff --git a/drivers/staging/csr/bh.c b/drivers/staging/csr/bh.c
deleted file mode 100644
index b53a9e2..0000000
--- a/drivers/staging/csr/bh.c
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: bh.c
- *
- * PURPOSE:
- * Provides an implementation for the driver bottom-half.
- * It is part of the porting exercise in Linux.
- *
- * Copyright (C) 2005-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include "csr_wifi_hip_unifi.h"
-#include "unifi_priv.h"
-#include <linux/sched/rt.h>
-
-/*
- * ---------------------------------------------------------------------------
- * uf_start_thread
- *
- * Helper function to start a new thread.
- *
- * Arguments:
- * priv Pointer to OS driver structure for the device.
- * thread Pointer to the thread object
- * func The thread function
- *
- * Returns:
- * 0 on success or else a Linux error code.
- * ---------------------------------------------------------------------------
- */
-int uf_start_thread(unifi_priv_t *priv,
- struct uf_thread *thread, int (*func)(void *))
-{
- if (thread->thread_task != NULL) {
- unifi_error(priv, "%s thread already started\n", thread->name);
- return 0;
- }
-
- /* Start the kernel thread that handles all h/w accesses. */
- thread->thread_task = kthread_run(func, priv, "%s", thread->name);
- if (IS_ERR(thread->thread_task))
- return PTR_ERR(thread->thread_task);
-
- /* Module parameter overides the thread priority */
- if (bh_priority != -1) {
- if (bh_priority >= 0 && bh_priority <= MAX_RT_PRIO) {
- struct sched_param param;
- priv->bh_thread.prio = bh_priority;
- unifi_trace(priv, UDBG1,
- "%s thread (RT) priority = %d\n",
- thread->name, bh_priority);
- param.sched_priority = bh_priority;
- sched_setscheduler(thread->thread_task,
- SCHED_FIFO, &param);
- } else if (bh_priority > MAX_RT_PRIO &&
- bh_priority <= MAX_PRIO) {
- priv->bh_thread.prio = bh_priority;
- unifi_trace(priv, UDBG1, "%s thread priority = %d\n",
- thread->name,
- PRIO_TO_NICE(bh_priority));
- set_user_nice(thread->thread_task,
- PRIO_TO_NICE(bh_priority));
- } else {
- priv->bh_thread.prio = DEFAULT_PRIO;
- unifi_warning(priv,
- "%s thread unsupported (%d) priority\n",
- thread->name, bh_priority);
- }
- } else
- priv->bh_thread.prio = DEFAULT_PRIO;
- unifi_trace(priv, UDBG2, "Started %s thread\n", thread->name);
-
- return 0;
-} /* uf_start_thread() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_stop_thread
- *
- * Helper function to stop a thread.
- *
- * Arguments:
- * priv Pointer to OS driver structure for the device.
- * thread Pointer to the thread object
- *
- * Returns:
- *
- * ---------------------------------------------------------------------------
- */
-void uf_stop_thread(unifi_priv_t *priv, struct uf_thread *thread)
-{
- if (!thread->thread_task) {
- unifi_notice(priv, "%s thread is already stopped\n",
- thread->name);
- return;
- }
-
- unifi_trace(priv, UDBG2, "Stopping %s thread\n", thread->name);
-
- kthread_stop(thread->thread_task);
- thread->thread_task = NULL;
-
-} /* uf_stop_thread() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_wait_for_thread_to_stop
- *
- * Helper function to wait until a thread is stopped.
- *
- * Arguments:
- * priv Pointer to OS driver structure for the device.
- *
- * Returns:
- *
- * ---------------------------------------------------------------------------
- */
-void
-uf_wait_for_thread_to_stop(unifi_priv_t *priv, struct uf_thread *thread)
-{
- /*
- * kthread_stop() cannot handle the thread exiting while
- * kthread_should_stop() is false, so sleep until kthread_stop()
- * wakes us up
- */
- unifi_trace(priv, UDBG2, "%s waiting for the stop signal.\n",
- thread->name);
- set_current_state(TASK_INTERRUPTIBLE);
- if (!kthread_should_stop()) {
- unifi_trace(priv, UDBG2, "%s schedule....\n", thread->name);
- schedule();
- }
-
- thread->thread_task = NULL;
- unifi_trace(priv, UDBG2, "%s exiting....\n", thread->name);
-} /* uf_wait_for_thread_to_stop() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * handle_bh_error
- *
- * This function reports an error returned from the HIP core bottom-half.
- * Normally, implemented during the porting exercise, passing the error
- * to the SME using unifi_sys_wifi_off_ind().
- * The SME will try to reset the device and go through
- * the initialisation of the UniFi.
- *
- * Arguments:
- * priv Pointer to OS driver structure for the device.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void
-handle_bh_error(unifi_priv_t *priv)
-{
- netInterface_priv_t *interfacePriv;
- u8 conf_param = CONFIG_IND_ERROR;
- u8 interfaceTag;
-
-
- /* Block unifi_run_bh() until the error has been handled. */
- priv->bh_thread.block_thread = 1;
-
- /* Consider UniFi to be uninitialised */
- priv->init_progress = UNIFI_INIT_NONE;
-
- /* Stop the network traffic */
- for (interfaceTag = 0;
- interfaceTag < CSR_WIFI_NUM_INTERFACES; interfaceTag++) {
- interfacePriv = priv->interfacePriv[interfaceTag];
- if (interfacePriv->netdev_registered)
- netif_carrier_off(priv->netdev[interfaceTag]);
- }
-
-#ifdef CSR_NATIVE_LINUX
- /* Force any client waiting on an mlme_wait_for_reply() to abort. */
- uf_abort_mlme(priv);
-
- /* Cancel any pending workqueue tasks */
- flush_workqueue(priv->unifi_workqueue);
-
-#endif /* CSR_NATIVE_LINUX */
-
- unifi_error(priv,
- "handle_bh_error: fatal error is reported to the SME.\n");
- /* Notify the clients (SME or unifi_manager) for the error. */
- ul_log_config_ind(priv, &conf_param, sizeof(u8));
-
-} /* handle_bh_error() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * bh_thread_function
- *
- * All hardware access happens in this thread.
- * This means there is no need for locks on the hardware and we don't need
- * to worry about reentrancy with the SDIO library.
- * Provides and example implementation on how to call unifi_bh(), which
- * is part of the HIP core API.
- *
- * It processes the events generated by unifi_run_bh() to serialise calls
- * to unifi_bh(). It also demonstrates how the timeout parameter passed in
- * and returned from unifi_bh() needs to be handled.
- *
- * Arguments:
- * arg Pointer to OS driver structure for the device.
- *
- * Returns:
- * None.
- *
- * Notes:
- * When the bottom half of the driver needs to process signals, events,
- * or simply the host status (i.e sleep mode), it invokes unifi_run_bh().
- * Since we need all SDIO transaction to be in a single thread, the
- * unifi_run_bh() will wake up this thread to process it.
- *
- * ---------------------------------------------------------------------------
- */
-static int bh_thread_function(void *arg)
-{
- unifi_priv_t *priv = (unifi_priv_t *)arg;
- CsrResult csrResult;
- long ret;
- u32 timeout, t;
- struct uf_thread *this_thread;
-
- unifi_trace(priv, UDBG2, "bh_thread_function starting\n");
-
- this_thread = &priv->bh_thread;
-
- t = timeout = 0;
- while (!kthread_should_stop()) {
- /* wait until an error occurs, or we need to process something. */
- unifi_trace(priv, UDBG3, "bh_thread goes to sleep.\n");
-
- if (timeout > 0) {
- /* Convert t in ms to jiffies */
- t = msecs_to_jiffies(timeout);
- ret = wait_event_interruptible_timeout(this_thread->wakeup_q,
- (this_thread->wakeup_flag && !this_thread->block_thread) ||
- kthread_should_stop(),
- t);
- timeout = (ret > 0) ? jiffies_to_msecs(ret) : 0;
- } else {
- ret = wait_event_interruptible(this_thread->wakeup_q,
- (this_thread->wakeup_flag && !this_thread->block_thread) ||
- kthread_should_stop());
- }
-
- if (kthread_should_stop()) {
- unifi_trace(priv, UDBG2, "bh_thread: signalled to exit\n");
- break;
- }
-
- if (ret < 0) {
- unifi_notice(priv,
- "bh_thread: wait_event returned %d, thread will exit\n",
- ret);
- uf_wait_for_thread_to_stop(priv, this_thread);
- break;
- }
-
- this_thread->wakeup_flag = 0;
-
- unifi_trace(priv, UDBG3, "bh_thread calls unifi_bh().\n");
-
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_bh(priv->card, &timeout);
- if(csrResult != CSR_RESULT_SUCCESS) {
- if (csrResult == CSR_WIFI_HIP_RESULT_NO_DEVICE) {
- CsrSdioRelease(priv->sdio);
- uf_wait_for_thread_to_stop(priv, this_thread);
- break;
- }
- /* Errors must be delivered to the error task */
- handle_bh_error(priv);
- }
- CsrSdioRelease(priv->sdio);
- }
-
- /*
- * I would normally try to call csr_sdio_remove_irq() here to make sure
- * that we do not get any interrupts while this thread is not running.
- * However, the MMC/SDIO driver tries to kill its' interrupt thread.
- * The kernel threads implementation does not allow to kill threads
- * from a signalled to stop thread.
- * So, instead call csr_sdio_linux_remove_irq() always after calling
- * uf_stop_thread() to kill this thread.
- */
-
- unifi_trace(priv, UDBG2, "bh_thread exiting....\n");
- return 0;
-} /* bh_thread_function() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_init_bh
- *
- * Helper function to start the bottom half of the driver.
- * All we need to do here is start the I/O bh thread.
- *
- * Arguments:
- * priv Pointer to OS driver structure for the device.
- *
- * Returns:
- * 0 on success or else a Linux error code.
- * ---------------------------------------------------------------------------
- */
- int
-uf_init_bh(unifi_priv_t *priv)
-{
- int r;
-
- /* Enable mlme interface. */
- priv->io_aborted = 0;
-
-
- /* Start the BH thread */
- r = uf_start_thread(priv, &priv->bh_thread, bh_thread_function);
- if (r) {
- unifi_error(priv,
- "uf_init_bh: failed to start the BH thread.\n");
- return r;
- }
-
- /* Allow interrupts */
- r = csr_sdio_linux_install_irq(priv->sdio);
- if (r) {
- unifi_error(priv,
- "uf_init_bh: failed to install the IRQ.\n");
-
- uf_stop_thread(priv, &priv->bh_thread);
- }
-
- return r;
-} /* uf_init_bh() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_run_bh
- *
- * Part of the HIP core lib API, implemented in the porting exercise.
- * The bottom half of the driver calls this function when
- * it wants to process anything that requires access to unifi.
- * We need to call unifi_bh() which in this implementation is done
- * by waking up the I/O thread.
- *
- * Arguments:
- * ospriv Pointer to OS driver structure for the device.
- *
- * Returns:
- * 0 on success or else a Linux error code.
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_run_bh(void *ospriv)
-{
- unifi_priv_t *priv = ospriv;
-
- /*
- * If an error has occurred, we discard silently all messages from the bh
- * until the error has been processed and the unifi has been reinitialised.
- */
- if (priv->bh_thread.block_thread == 1) {
- unifi_trace(priv, UDBG3, "unifi_run_bh: discard message.\n");
- /*
- * Do not try to acknowledge a pending interrupt here.
- * This function is called by unifi_send_signal() which in turn can be
- * running in an atomic or 'disabled irq' level if a signal is sent
- * from a workqueue task (i.e multicass addresses set).
- * We can not hold the SDIO lock because it might sleep.
- */
- return CSR_RESULT_FAILURE;
- }
-
- priv->bh_thread.wakeup_flag = 1;
- /* wake up I/O thread */
- wake_up_interruptible(&priv->bh_thread.wakeup_q);
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_run_bh() */
-
diff --git a/drivers/staging/csr/csr_framework_ext.c b/drivers/staging/csr/csr_framework_ext.c
deleted file mode 100644
index 2aabb6c..0000000
--- a/drivers/staging/csr/csr_framework_ext.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/freezer.h>
-#include <linux/semaphore.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-
-#include "csr_framework_ext.h"
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrThreadSleep
- *
- * DESCRIPTION
- * Sleep for a given period.
- *
- * RETURNS
- * void
- *
- *----------------------------------------------------------------------------*/
-void CsrThreadSleep(u16 sleepTimeInMs)
-{
- unsigned long t;
-
- /* Convert t in ms to jiffies and round up */
- t = ((sleepTimeInMs * HZ) + 999) / 1000;
- schedule_timeout_uninterruptible(t);
-}
-EXPORT_SYMBOL_GPL(CsrThreadSleep);
diff --git a/drivers/staging/csr/csr_framework_ext.h b/drivers/staging/csr/csr_framework_ext.h
deleted file mode 100644
index e8ae490..0000000
--- a/drivers/staging/csr/csr_framework_ext.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef CSR_FRAMEWORK_EXT_H__
-#define CSR_FRAMEWORK_EXT_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include "csr_result.h"
-#include "csr_framework_ext_types.h"
-
-/* Result codes */
-#define CSR_FE_RESULT_NO_MORE_EVENTS ((CsrResult) 0x0001)
-#define CSR_FE_RESULT_INVALID_POINTER ((CsrResult) 0x0002)
-#define CSR_FE_RESULT_INVALID_HANDLE ((CsrResult) 0x0003)
-#define CSR_FE_RESULT_NO_MORE_MUTEXES ((CsrResult) 0x0004)
-#define CSR_FE_RESULT_TIMEOUT ((CsrResult) 0x0005)
-#define CSR_FE_RESULT_NO_MORE_THREADS ((CsrResult) 0x0006)
-
-/* Thread priorities */
-#define CSR_THREAD_PRIORITY_HIGHEST ((u16) 0)
-#define CSR_THREAD_PRIORITY_HIGH ((u16) 1)
-#define CSR_THREAD_PRIORITY_NORMAL ((u16) 2)
-#define CSR_THREAD_PRIORITY_LOW ((u16) 3)
-#define CSR_THREAD_PRIORITY_LOWEST ((u16) 4)
-
-#define CSR_EVENT_WAIT_INFINITE ((u16) 0xFFFF)
-
-void CsrThreadSleep(u16 sleepTimeInMs);
-
-#endif
diff --git a/drivers/staging/csr/csr_framework_ext_types.h b/drivers/staging/csr/csr_framework_ext_types.h
deleted file mode 100644
index 575598c..0000000
--- a/drivers/staging/csr/csr_framework_ext_types.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef CSR_FRAMEWORK_EXT_TYPES_H__
-#define CSR_FRAMEWORK_EXT_TYPES_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifdef __KERNEL__
-#include <linux/kthread.h>
-#include <linux/semaphore.h>
-#else
-#include <pthread.h>
-#endif
-
-#ifdef __KERNEL__
-
-typedef struct semaphore CsrMutexHandle;
-
-#else /* __KERNEL __ */
-
-typedef pthread_mutex_t CsrMutexHandle;
-
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/drivers/staging/csr/csr_log.h b/drivers/staging/csr/csr_log.h
deleted file mode 100644
index 9829410..0000000
--- a/drivers/staging/csr/csr_log.h
+++ /dev/null
@@ -1,223 +0,0 @@
-#ifndef CSR_LOG_H__
-#define CSR_LOG_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include "csr_sched.h"
-#include "csr_prim_defs.h"
-#include "csr_msgconv.h"
-
-/*
- * Log filtering
- */
-
-/*----------------------------------------------------*/
-/* Filtering on environment specific log levels */
-/*----------------------------------------------------*/
-typedef u32 CsrLogLevelEnvironment;
-#define CSR_LOG_LEVEL_ENVIRONMENT_OFF ((CsrLogLevelEnvironment) 0x00000000) /* No environment data/events are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_BCI_ACL ((CsrLogLevelEnvironment) 0x00000001) /* BlueCore Channel Interface HCI Acl data are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_BCI_HCI ((CsrLogLevelEnvironment) 0x00000002) /* BlueCore Channel Interface HCI Cmd/Evt data are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_BCI_SCO ((CsrLogLevelEnvironment) 0x00000004) /* BlueCore Channel Interface HCI Sco data are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_BCI_VENDOR ((CsrLogLevelEnvironment) 0x00000008) /* BlueCore Channel Interface HCI Vendor specific data are logged (This includes BCCMD, HQ, VM etc) */
-#define CSR_LOG_LEVEL_ENVIRONMENT_TRANSPORTS ((CsrLogLevelEnvironment) 0x00000010) /* Transport protocol data is logged (This includes transport protocols like BCSP, H4 etc.) */
-#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_REG ((CsrLogLevelEnvironment) 0x00000020) /* Background Interrupt registration events are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_UNREG ((CsrLogLevelEnvironment) 0x00000040) /* Background Interrupt unregistration events are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_SET ((CsrLogLevelEnvironment) 0x00000080) /* Background Interrupt set events are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_START ((CsrLogLevelEnvironment) 0x00000100) /* Background Interrupt start events are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_BGINT_DONE ((CsrLogLevelEnvironment) 0x00000200) /* Background Interrupt done events are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_PROTO ((CsrLogLevelEnvironment) 0x00000400) /* Transport protocol events are logged */
-#define CSR_LOG_LEVEL_ENVIRONMENT_PROTO_LOC ((CsrLogLevelEnvironment) 0x00000800) /* The Location where the transport protocol event occurred are logged NB: This is a supplement to CSR_LOG_LEVEL_ENVIRONMENT_PROTO, it has no effect without it */
-/* The bit masks between here are reserved for future usage */
-#define CSR_LOG_LEVEL_ENVIRONMENT_ALL ((CsrLogLevelEnvironment) 0xFFFFFFFF) /* All possible environment data/events are logged WARNING: By using this define the application also accepts future possible environment data/events in the logs */
-
-/*----------------------------------------------------*/
-/* Filtering on task specific log levels */
-/*----------------------------------------------------*/
-typedef u32 CsrLogLevelTask;
-#define CSR_LOG_LEVEL_TASK_OFF ((CsrLogLevelTask) 0x00000000) /* No events are logged for this task */
-#define CSR_LOG_LEVEL_TASK_TEXT ((CsrLogLevelTask) 0x00000001) /* Text strings printed by a task are logged NB: This bit does not affect the CSR_LOG_TEXT_LEVEL interface. This has to be configured separately */
-#define CSR_LOG_LEVEL_TASK_TEXT_LOC ((CsrLogLevelTask) 0x00000002) /* The locaction where the text string call occurred are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_TEXT, it has no effect without it */
-#define CSR_LOG_LEVEL_TASK_STATE ((CsrLogLevelTask) 0x00000004) /* FSM state transitions in a task are logged */
-#define CSR_LOG_LEVEL_TASK_STATE_NAME ((CsrLogLevelTask) 0x00000008) /* The name of each state in a FSM state transition are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_STATE, it has no effect without it */
-#define CSR_LOG_LEVEL_TASK_STATE_LOC ((CsrLogLevelTask) 0x00000010) /* The location where the FSM state transition occurred are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_STATE, it has no effect without it */
-#define CSR_LOG_LEVEL_TASK_TASK_SWITCH ((CsrLogLevelTask) 0x00000020) /* Activation and deactiation of a task are logged */
-#define CSR_LOG_LEVEL_TASK_MESSAGE_PUT ((CsrLogLevelTask) 0x00000080) /* Message put operations are logged */
-#define CSR_LOG_LEVEL_TASK_MESSAGE_PUT_LOC ((CsrLogLevelTask) 0x00000100) /* The location where a message was sent are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_MESSAGE_PUT, it has no effect without it */
-#define CSR_LOG_LEVEL_TASK_MESSAGE_GET ((CsrLogLevelTask) 0x00000200) /* Message get operations are logged */
-#define CSR_LOG_LEVEL_TASK_MESSAGE_QUEUE_PUSH ((CsrLogLevelTask) 0x00000400) /* Message push operations are logged */
-#define CSR_LOG_LEVEL_TASK_MESSAGE_QUEUE_POP ((CsrLogLevelTask) 0x00000800) /* Message pop operations are logged */
-#define CSR_LOG_LEVEL_TASK_PRIM_ONLY_TYPE ((CsrLogLevelTask) 0x00001000) /* Only the type of primitives in messages are logged. By default the entire primitive is serialized and logged */
-#define CSR_LOG_LEVEL_TASK_PRIM_APPLY_LIMIT ((CsrLogLevelTask) 0x00002000) /* An upper limit (defined by CSR_LOG_PRIM_SIZE_UPPER_LIMIT) is applied to how much of a primitive in a message are logged. NB: This limit is only applied if CSR_LOG_LEVEL_TASK_PRIM_ONLY_TYPE is _not_ defined */
-#define CSR_LOG_LEVEL_TASK_TIMER_IN ((CsrLogLevelTask) 0x00004000) /* TimedEventIn events are logged */
-#define CSR_LOG_LEVEL_TASK_TIMER_IN_LOC ((CsrLogLevelTask) 0x00008000) /* The location where a timer was started are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_TIMER_IN, it has no effect without it */
-#define CSR_LOG_LEVEL_TASK_TIMER_CANCEL ((CsrLogLevelTask) 0x00010000) /* TimedEventCancel events are logged */
-#define CSR_LOG_LEVEL_TASK_TIMER_CANCEL_LOC ((CsrLogLevelTask) 0x00020000) /* The location where a timer was cancelled are logged. NB: This is a supplement to CSR_LOG_LEVEL_TASK_TIMER_CANCEL, it has no effect without it */
-#define CSR_LOG_LEVEL_TASK_TIMER_FIRE ((CsrLogLevelTask) 0x00040000) /* TimedEventFire events are logged */
-#define CSR_LOG_LEVEL_TASK_TIMER_DONE ((CsrLogLevelTask) 0x00080000) /* TimedEventDone events are logged */
-/* The bit masks between here are reserved for future usage */
-#define CSR_LOG_LEVEL_TASK_ALL ((CsrLogLevelTask) 0xFFFFFFFF & ~(CSR_LOG_LEVEL_TASK_PRIM_ONLY_TYPE | CSR_LOG_LEVEL_TASK_PRIM_APPLY_LIMIT)) /* All info possible to log for a task are logged. WARNING: By using this define the application also accepts future possible task data/events in the logs */
-
-u8 CsrLogEnvironmentIsFiltered(CsrLogLevelEnvironment level);
-CsrLogLevelTask CsrLogTaskFilterGet(CsrSchedQid taskId);
-u8 CsrLogTaskIsFiltered(CsrSchedQid taskId, CsrLogLevelTask level);
-
-/*
- * Logging stuff
- */
-#define CSR_LOG_STRINGIFY_REAL(a) (#a)
-#define CSR_LOG_STRINGIFY(a) CSR_LOG_STRINGIFY_REAL(a)
-
-typedef struct {
- u16 primitiveType;
- const char *primitiveName;
- CsrMsgConvMsgEntry *messageConv; /* Private - do not use */
-} CsrLogPrimitiveInformation;
-
-typedef struct {
- const char *techVer;
- u32 primitiveInfoCount;
- CsrLogPrimitiveInformation *primitiveInfo;
-} CsrLogTechInformation;
-
-/*---------------------------------*/
-/* Tech logging */
-/*---------------------------------*/
-typedef u8 bitmask8_t;
-typedef u16 bitmask16_t;
-typedef u32 bitmask32_t;
-
-#ifdef CSR_LOG_ENABLE
-#ifdef CSR_LOG_INCLUDE_FILE_NAME_AND_LINE_NUMBER
-/* DEPRECATED - replaced by csr_log_text.h */
-#define CSR_LOG_TEXT(text) \
- do { \
- if (!CsrLogTaskIsFiltered(CsrSchedTaskQueueGet(), CSR_LOG_LEVEL_TASK_TEXT)) { \
- CsrLogTaskText(text, __LINE__, __FILE__); \
- } \
- } while (0)
-#else
-/* DEPRECATED - replaced by csr_log_text.h */
-#define CSR_LOG_TEXT(text) \
- do { \
- if (!CsrLogTaskIsFiltered(CsrSchedTaskQueueGet(), CSR_LOG_LEVEL_TASK_TEXT)) { \
- CsrLogTaskText(text, 0, NULL); \
- } \
- } while (0)
-#endif
-#else
-#define CSR_LOG_TEXT(text)
-#endif
-
-/* DEPRECATED - replaced by csr_log_text.h */
-void CsrLogTaskText(const char *text,
- u32 line,
- const char *file);
-
-#define CSR_LOG_STATE_TRANSITION_MASK_FSM_NAME (0x001)
-#define CSR_LOG_STATE_TRANSITION_MASK_NEXT_STATE (0x002)
-#define CSR_LOG_STATE_TRANSITION_MASK_NEXT_STATE_STR (0x004)
-#define CSR_LOG_STATE_TRANSITION_MASK_PREV_STATE (0x008)
-#define CSR_LOG_STATE_TRANSITION_MASK_PREV_STATE_STR (0x010)
-#define CSR_LOG_STATE_TRANSITION_MASK_EVENT (0x020)
-#define CSR_LOG_STATE_TRANSITION_MASK_EVENT_STR (0x040)
-
-/* DEPRECATED - replaced by csr_log_text.h */
-void CsrLogStateTransition(bitmask16_t mask,
- u32 identifier,
- const char *fsm_name,
- u32 prev_state,
- const char *prev_state_str,
- u32 in_event,
- const char *in_event_str,
- u32 next_state,
- const char *next_state_str,
- u32 line,
- const char *file);
-
-/*---------------------------------*/
-/* BSP logging */
-/*---------------------------------*/
-void CsrLogSchedInit(u8 thread_id);
-void CsrLogSchedDeinit(u8 thread_id);
-
-void CsrLogSchedStart(u8 thread_id);
-void CsrLogSchedStop(u8 thread_id);
-
-void CsrLogInitTask(u8 thread_id, CsrSchedQid tskid, const char *tskName);
-void CsrLogDeinitTask(u16 task_id);
-
-void CsrLogActivate(CsrSchedQid tskid);
-void CsrLogDeactivate(CsrSchedQid tskid);
-
-#define SYNERGY_SERIALIZER_TYPE_DUMP (0x000)
-#define SYNERGY_SERIALIZER_TYPE_SER (0x001)
-
-void CsrLogMessagePut(u32 line,
- const char *file,
- CsrSchedQid src_task_id,
- CsrSchedQid dst_taskid,
- CsrSchedMsgId msg_id,
- u16 prim_type,
- const void *msg);
-
-void CsrLogMessageGet(CsrSchedQid src_task_id,
- CsrSchedQid dst_taskid,
- u8 get_res,
- CsrSchedMsgId msg_id,
- u16 prim_type,
- const void *msg);
-
-void CsrLogTimedEventIn(u32 line,
- const char *file,
- CsrSchedQid task_id,
- CsrSchedTid tid,
- u32 requested_delay,
- u16 fniarg,
- const void *fnvarg);
-
-void CsrLogTimedEventFire(CsrSchedQid task_id,
- CsrSchedTid tid);
-
-void CsrLogTimedEventDone(CsrSchedQid task_id,
- CsrSchedTid tid);
-
-void CsrLogTimedEventCancel(u32 line,
- const char *file,
- CsrSchedQid task_id,
- CsrSchedTid tid,
- u8 cancel_res);
-
-void CsrLogBgintRegister(u8 thread_id,
- CsrSchedBgint irq,
- const char *callback,
- const void *ptr);
-void CsrLogBgintUnregister(CsrSchedBgint irq);
-void CsrLogBgintSet(CsrSchedBgint irq);
-void CsrLogBgintServiceStart(CsrSchedBgint irq);
-void CsrLogBgintServiceDone(CsrSchedBgint irq);
-
-void CsrLogExceptionStateEvent(u16 prim_type,
- CsrPrim msg_type,
- u16 state,
- u32 line,
- const char *file);
-void CsrLogExceptionGeneral(u16 prim_type,
- u16 state,
- const char *text,
- u32 line,
- const char *file);
-void CsrLogExceptionWarning(u16 prim_type,
- u16 state,
- const char *text,
- u32 line,
- const char *file);
-
-#endif
diff --git a/drivers/staging/csr/csr_log_configure.h b/drivers/staging/csr/csr_log_configure.h
deleted file mode 100644
index 283647c..0000000
--- a/drivers/staging/csr/csr_log_configure.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef CSR_LOG_CONFIGURE_H__
-#define CSR_LOG_CONFIGURE_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
- *****************************************************************************/
-
-#include "csr_log.h"
-
-/*--------------------------------------------*/
-/* Filtering on log text warning levels */
-/*--------------------------------------------*/
-typedef u32 CsrLogLevelText;
-#define CSR_LOG_LEVEL_TEXT_OFF ((CsrLogLevelText) 0x0000)
-
-#define CSR_LOG_LEVEL_TEXT_CRITICAL ((CsrLogLevelText) 0x0001)
-#define CSR_LOG_LEVEL_TEXT_ERROR ((CsrLogLevelText) 0x0002)
-#define CSR_LOG_LEVEL_TEXT_WARNING ((CsrLogLevelText) 0x0004)
-#define CSR_LOG_LEVEL_TEXT_INFO ((CsrLogLevelText) 0x0008)
-#define CSR_LOG_LEVEL_TEXT_DEBUG ((CsrLogLevelText) 0x0010)
-
-#define CSR_LOG_LEVEL_TEXT_ALL ((CsrLogLevelText) 0xFFFF)
-
-/* The log text interface is used by both scheduler tasks and components outside the scheduler context.
- * Therefore a CsrLogTextTaskId is introduced. It is effectively considered as two u16's. The lower
- * 16 bits corresponds one2one with the scheduler queueId's (CsrSchedQid) and as such these bits can not be used
- * by components outside scheduler tasks. The upper 16 bits are allocated for use of components outside the
- * scheduler like drivers etc. Components in this range is defined independently by each technology. To avoid
- * clashes the technologies are only allowed to assign values within the same restrictive range as allies to
- * primitive identifiers. eg. for the framework components outside the scheduler is only allowed to assign
- * taskId's in the range 0x0600xxxx to 0x06FFxxxx. And so on for other technologies. */
-typedef u32 CsrLogTextTaskId;
-
-#endif
diff --git a/drivers/staging/csr/csr_log_text.h b/drivers/staging/csr/csr_log_text.h
deleted file mode 100644
index cfcf64a..0000000
--- a/drivers/staging/csr/csr_log_text.h
+++ /dev/null
@@ -1,124 +0,0 @@
-#ifndef CSR_LOG_TEXT_H__
-#define CSR_LOG_TEXT_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include "csr_log_configure.h"
-
-typedef struct CsrLogSubOrigin
-{
- u16 subOriginNumber; /* Id of the given SubOrigin */
- const char *subOriginName; /* Prefix Text for this SubOrigin */
-} CsrLogSubOrigin;
-
-/* Register a task which is going to use the CSR_LOG_TEXT_XXX interface */
-#ifdef CSR_LOG_ENABLE
-void CsrLogTextRegister(CsrLogTextTaskId taskId, const char *taskName, u16 subOriginsLength, const CsrLogSubOrigin *subOrigins);
-#else
-#define CsrLogTextRegister(taskId, taskName, subOriginsLength, subOrigins)
-#endif
-
-/* CRITICAL: Conditions that are threatening to the integrity/stability of the
- system as a whole. */
-#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_CRITICAL_DISABLE)
-void CsrLogTextCritical(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
-void CsrLogTextBufferCritical(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
-#define CSR_LOG_TEXT_CRITICAL(taskId_subOrigin_formatString_varargs) CsrLogTextCritical taskId_subOrigin_formatString_varargs
-#define CSR_LOG_TEXT_CONDITIONAL_CRITICAL(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_CRITICAL(logtextargs);}}
-#define CSR_LOG_TEXT_BUFFER_CRITICAL(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferCritical taskId_subOrigin_length_buffer_formatString_varargs
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_CRITICAL(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_CRITICAL(logtextbufferargs);}}
-#else
-#define CSR_LOG_TEXT_CRITICAL(taskId_subOrigin_formatString_varargs)
-#define CSR_LOG_TEXT_CONDITIONAL_CRITICAL(condition, logtextargs)
-#define CSR_LOG_TEXT_BUFFER_CRITICAL(taskId_subOrigin_length_buffer_formatString_varargs)
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_CRITICAL(condition, logtextbufferargs)
-#endif
-
-/* ERROR: Malfunction of a component rendering it unable to operate correctly,
- causing lack of functionality but not loss of system integrity/stability. */
-#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_ERROR_DISABLE)
-void CsrLogTextError(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
-void CsrLogTextBufferError(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
-#define CSR_LOG_TEXT_ERROR(taskId_subOrigin_formatString_varargs) CsrLogTextError taskId_subOrigin_formatString_varargs
-#define CSR_LOG_TEXT_CONDITIONAL_ERROR(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_ERROR(logtextargs);}}
-#define CSR_LOG_TEXT_BUFFER_ERROR(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferError taskId_subOrigin_length_buffer_formatString_varargs
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_ERROR(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_ERROR(logtextbufferargs);}}
-#else
-#define CSR_LOG_TEXT_ERROR(taskId_subOrigin_formatString_varargs)
-#define CSR_LOG_TEXT_CONDITIONAL_ERROR(condition, logtextargs)
-#define CSR_LOG_TEXT_BUFFER_ERROR(taskId_subOrigin_length_buffer_formatString_varargs)
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_ERROR(condition, logtextbufferargs)
-#endif
-
-/* WARNING: Conditions that are unexpected and indicative of possible problems
- or violations of specifications, where the result of such deviations does not
- lead to malfunction of the component. */
-#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_WARNING_DISABLE)
-void CsrLogTextWarning(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
-void CsrLogTextBufferWarning(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
-#define CSR_LOG_TEXT_WARNING(taskId_subOrigin_formatString_varargs) CsrLogTextWarning taskId_subOrigin_formatString_varargs
-#define CSR_LOG_TEXT_CONDITIONAL_WARNING(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_WARNING(logtextargs);}}
-#define CSR_LOG_TEXT_BUFFER_WARNING(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferWarning taskId_subOrigin_length_buffer_formatString_varargs
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_WARNING(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_WARNING(logtextbufferargs);}}
-#else
-#define CSR_LOG_TEXT_WARNING(taskId_subOrigin_formatString_varargs)
-#define CSR_LOG_TEXT_CONDITIONAL_WARNING(condition, logtextargs)
-#define CSR_LOG_TEXT_BUFFER_WARNING(taskId_subOrigin_length_buffer_formatString_varargs)
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_WARNING(condition, logtextbufferargs)
-#endif
-
-/* INFO: Important events that may aid in determining the conditions under which
- the more severe conditions are encountered. */
-#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_INFO_DISABLE)
-void CsrLogTextInfo(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
-void CsrLogTextBufferInfo(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
-#define CSR_LOG_TEXT_INFO(taskId_subOrigin_formatString_varargs) CsrLogTextInfo taskId_subOrigin_formatString_varargs
-#define CSR_LOG_TEXT_CONDITIONAL_INFO(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_INFO(logtextargs);}}
-#define CSR_LOG_TEXT_BUFFER_INFO(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferInfo taskId_subOrigin_length_buffer_formatString_varargs
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_INFO(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_INFO(logtextbufferargs);}}
-#else
-#define CSR_LOG_TEXT_INFO(taskId_subOrigin_formatString_varargs)
-#define CSR_LOG_TEXT_CONDITIONAL_INFO(condition, logtextargs)
-#define CSR_LOG_TEXT_BUFFER_INFO(taskId_subOrigin_length_buffer_formatString_varargs)
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_INFO(condition, logtextbufferargs)
-#endif
-
-/* DEBUG: Similar to INFO, but dedicated to events that occur more frequently. */
-#if defined(CSR_LOG_ENABLE) && !defined(CSR_LOG_LEVEL_TEXT_DEBUG_DISABLE)
-void CsrLogTextDebug(CsrLogTextTaskId taskId, u16 subOrigin, const char *formatString, ...);
-void CsrLogTextBufferDebug(CsrLogTextTaskId taskId, u16 subOrigin, size_t bufferLength, const void *buffer, const char *formatString, ...);
-#define CSR_LOG_TEXT_DEBUG(taskId_subOrigin_formatString_varargs) CsrLogTextDebug taskId_subOrigin_formatString_varargs
-#define CSR_LOG_TEXT_CONDITIONAL_DEBUG(condition, logtextargs) {if (condition) {CSR_LOG_TEXT_DEBUG(logtextargs);}}
-#define CSR_LOG_TEXT_BUFFER_DEBUG(taskId_subOrigin_length_buffer_formatString_varargs) CsrLogTextBufferDebug taskId_subOrigin_length_buffer_formatString_varargs
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_DEBUG(condition, logtextbufferargs) {if (condition) {CSR_LOG_TEXT_BUFFER_DEBUG(logtextbufferargs);}}
-#else
-#define CSR_LOG_TEXT_DEBUG(taskId_subOrigin_formatString_varargs)
-#define CSR_LOG_TEXT_CONDITIONAL_DEBUG(condition, logtextargs)
-#define CSR_LOG_TEXT_BUFFER_DEBUG(taskId_subOrigin_length_buffer_formatString_varargs)
-#define CSR_LOG_TEXT_BUFFER_CONDITIONAL_DEBUG(condition, logtextbufferargs)
-#endif
-
-/* CSR_LOG_TEXT_ASSERT (CRITICAL) */
-#ifdef CSR_LOG_ENABLE
-#define CSR_LOG_TEXT_ASSERT(origin, suborigin, condition) \
- {if (!(condition)) {CSR_LOG_TEXT_CRITICAL((origin, suborigin, "Assertion \"%s\" failed at %s:%u", #condition, __FILE__, __LINE__));}}
-#else
-#define CSR_LOG_TEXT_ASSERT(origin, suborigin, condition)
-#endif
-
-/* CSR_LOG_TEXT_UNHANDLED_PRIM (CRITICAL) */
-#ifdef CSR_LOG_ENABLE
-#define CSR_LOG_TEXT_UNHANDLED_PRIMITIVE(origin, suborigin, primClass, primType) \
- CSR_LOG_TEXT_CRITICAL((origin, suborigin, "Unhandled primitive 0x%04X:0x%04X at %s:%u", primClass, primType, __FILE__, __LINE__))
-#else
-#define CSR_LOG_TEXT_UNHANDLED_PRIMITIVE(origin, suborigin, primClass, primType)
-#endif
-
-#endif
diff --git a/drivers/staging/csr/csr_macro.h b/drivers/staging/csr/csr_macro.h
deleted file mode 100644
index c47f1d9..0000000
--- a/drivers/staging/csr/csr_macro.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef CSR_MACRO_H__
-#define CSR_MACRO_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include <linux/types.h>
-
-#define FALSE (0)
-#define TRUE (1)
-
-/*------------------------------------------------------------------*/
-/* Endian conversion */
-/*------------------------------------------------------------------*/
-#define CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr) (((u16) ((u8 *) (ptr))[0]) | ((u16) ((u8 *) (ptr))[1]) << 8)
-#define CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr) (((u32) ((u8 *) (ptr))[0]) | ((u32) ((u8 *) (ptr))[1]) << 8 | \
- ((u32) ((u8 *) (ptr))[2]) << 16 | ((u32) ((u8 *) (ptr))[3]) << 24)
-#define CSR_COPY_UINT16_TO_LITTLE_ENDIAN(uint, ptr) ((u8 *) (ptr))[0] = ((u8) ((uint) & 0x00FF)); \
- ((u8 *) (ptr))[1] = ((u8) ((uint) >> 8))
-#define CSR_COPY_UINT32_TO_LITTLE_ENDIAN(uint, ptr) ((u8 *) (ptr))[0] = ((u8) ((uint) & 0x000000FF)); \
- ((u8 *) (ptr))[1] = ((u8) (((uint) >> 8) & 0x000000FF)); \
- ((u8 *) (ptr))[2] = ((u8) (((uint) >> 16) & 0x000000FF)); \
- ((u8 *) (ptr))[3] = ((u8) (((uint) >> 24) & 0x000000FF))
-
-/*------------------------------------------------------------------*/
-/* Misc */
-/*------------------------------------------------------------------*/
-/* Use this macro on unused local variables that cannot be removed (such as
- unused function parameters). This will quell warnings from certain compilers
- and static code analysis tools like Lint and Valgrind. */
-#define CSR_UNUSED(x) ((void) (x))
-
-#endif
diff --git a/drivers/staging/csr/csr_msg_transport.h b/drivers/staging/csr/csr_msg_transport.h
deleted file mode 100644
index 8d88e78..0000000
--- a/drivers/staging/csr/csr_msg_transport.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef CSR_MSG_TRANSPORT_H__
-#define CSR_MSG_TRANSPORT_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CsrMsgTransport
-#define CsrMsgTransport CsrSchedMessagePut
-#endif
-
-#endif /* CSR_MSG_TRANSPORT */
diff --git a/drivers/staging/csr/csr_msgconv.c b/drivers/staging/csr/csr_msgconv.c
deleted file mode 100644
index db5e845..0000000
--- a/drivers/staging/csr/csr_msgconv.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include "csr_sched.h"
-#include "csr_msgconv.h"
-#include "csr_macro.h"
-
-static CsrMsgConvEntry *converter;
-
-CsrMsgConvPrimEntry *CsrMsgConvFind(u16 primType)
-{
- CsrMsgConvPrimEntry *ptr = NULL;
-
- if (converter)
- {
- ptr = converter->profile_converters;
- while (ptr)
- {
- if (ptr->primType == primType)
- {
- break;
- }
- else
- {
- ptr = ptr->next;
- }
- }
- }
-
- return ptr;
-}
-
-static const CsrMsgConvMsgEntry *find_msg_converter(CsrMsgConvPrimEntry *ptr, u16 msgType)
-{
- const CsrMsgConvMsgEntry *cv = ptr->conv;
- if (ptr->lookupFunc)
- {
- return (const CsrMsgConvMsgEntry *) ptr->lookupFunc((CsrMsgConvMsgEntry *) cv, msgType);
- }
-
- while (cv)
- {
- if (cv->serFunc == NULL)
- {
- /* We've reached the end of the chain */
- cv = NULL;
- break;
- }
-
- if (cv->msgType == msgType)
- {
- break;
- }
- else
- {
- cv++;
- }
- }
-
- return cv;
-}
-
-static void *deserialize_data(u16 primType,
- size_t length,
- u8 *data)
-{
- CsrMsgConvPrimEntry *ptr;
- u8 *ret;
-
- ptr = CsrMsgConvFind(primType);
-
- if (ptr)
- {
- const CsrMsgConvMsgEntry *cv;
- u16 msgId = 0;
- size_t offset = 0;
- CsrUint16Des(&msgId, data, &offset);
-
- cv = find_msg_converter(ptr, msgId);
- if (cv)
- {
- ret = cv->deserFunc(data, length);
- }
- else
- {
- ret = NULL;
- }
- }
- else
- {
- ret = NULL;
- }
-
- return ret;
-}
-
-static size_t sizeof_message(u16 primType, void *msg)
-{
- CsrMsgConvPrimEntry *ptr = CsrMsgConvFind(primType);
- size_t ret;
-
- if (ptr)
- {
- const CsrMsgConvMsgEntry *cv;
- u16 msgId = *(u16 *) msg;
-
- cv = find_msg_converter(ptr, msgId);
- if (cv)
- {
- ret = cv->sizeofFunc(msg);
- }
- else
- {
- ret = 0;
- }
- }
- else
- {
- ret = 0;
- }
-
- return ret;
-}
-
-static u8 free_message(u16 primType, u8 *data)
-{
- CsrMsgConvPrimEntry *ptr;
- u8 ret;
-
- ptr = CsrMsgConvFind(primType);
-
- if (ptr)
- {
- const CsrMsgConvMsgEntry *cv;
- u16 msgId = *(u16 *) data;
-
- cv = find_msg_converter(ptr, msgId);
- if (cv)
- {
- cv->freeFunc(data);
- ret = TRUE;
- }
- else
- {
- ret = FALSE;
- }
- }
- else
- {
- ret = FALSE;
- }
-
- return ret;
-}
-
-static u8 *serialize_message(u16 primType,
- void *msg,
- size_t *length,
- u8 *buffer)
-{
- CsrMsgConvPrimEntry *ptr;
- u8 *ret;
-
- ptr = CsrMsgConvFind(primType);
-
- *length = 0;
-
- if (ptr)
- {
- const CsrMsgConvMsgEntry *cv;
-
- cv = find_msg_converter(ptr, *(u16 *) msg);
- if (cv)
- {
- ret = cv->serFunc(buffer, length, msg);
- }
- else
- {
- ret = NULL;
- }
- }
- else
- {
- ret = NULL;
- }
-
- return ret;
-}
-
-size_t CsrMsgConvSizeof(u16 primType, void *msg)
-{
- return sizeof_message(primType, msg);
-}
-
-u8 *CsrMsgConvSerialize(u8 *buffer, size_t maxBufferOffset, size_t *offset, u16 primType, void *msg)
-{
- if (converter)
- {
- size_t serializedLength;
- u8 *bufSerialized;
- u8 *bufOffset = &buffer[*offset];
- bufSerialized = converter->serialize_message(primType, msg, &serializedLength, bufOffset);
- *offset += serializedLength;
- return bufSerialized;
- }
- else
- {
- return NULL;
- }
-}
-
-/* Insert profile converter at head of converter list. */
-void CsrMsgConvInsert(u16 primType, const CsrMsgConvMsgEntry *ce)
-{
- CsrMsgConvPrimEntry *pc;
- pc = CsrMsgConvFind(primType);
-
- if (pc)
- {
- /* Already registered. Do nothing */
- }
- else
- {
- pc = kmalloc(sizeof(*pc), GFP_KERNEL);
- pc->primType = primType;
- pc->conv = ce;
- pc->lookupFunc = NULL;
- pc->next = converter->profile_converters;
- converter->profile_converters = pc;
- }
-}
-EXPORT_SYMBOL_GPL(CsrMsgConvInsert);
-
-CsrMsgConvMsgEntry *CsrMsgConvFindEntry(u16 primType, u16 msgType)
-{
- CsrMsgConvPrimEntry *ptr = CsrMsgConvFind(primType);
- if (ptr)
- {
- return (CsrMsgConvMsgEntry *) find_msg_converter(ptr, msgType);
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(CsrMsgConvFindEntry);
-
-CsrMsgConvMsgEntry *CsrMsgConvFindEntryByMsg(u16 primType, const void *msg)
-{
- CsrMsgConvPrimEntry *ptr = CsrMsgConvFind(primType);
- if (ptr && msg)
- {
- u16 msgType = *((u16 *) msg);
- return (CsrMsgConvMsgEntry *) find_msg_converter(ptr, msgType);
- }
- return NULL;
-}
-
-void CsrMsgConvCustomLookupRegister(u16 primType, CsrMsgCustomLookupFunc *lookupFunc)
-{
- CsrMsgConvPrimEntry *ptr = CsrMsgConvFind(primType);
- if (ptr)
- {
- ptr->lookupFunc = lookupFunc;
- }
-}
-EXPORT_SYMBOL_GPL(CsrMsgConvCustomLookupRegister);
-
-CsrMsgConvEntry *CsrMsgConvInit(void)
-{
- if (!converter)
- {
- converter = kmalloc(sizeof(CsrMsgConvEntry), GFP_KERNEL);
-
- converter->profile_converters = NULL;
- converter->free_message = free_message;
- converter->sizeof_message = sizeof_message;
- converter->serialize_message = serialize_message;
- converter->deserialize_data = deserialize_data;
- }
-
- return converter;
-}
-EXPORT_SYMBOL_GPL(CsrMsgConvInit);
diff --git a/drivers/staging/csr/csr_msgconv.h b/drivers/staging/csr/csr_msgconv.h
deleted file mode 100644
index 7e4dd38..0000000
--- a/drivers/staging/csr/csr_msgconv.h
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef CSR_MSGCONV_H__
-#define CSR_MSGCONV_H__
-
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include <linux/types.h>
-#include "csr_prim_defs.h"
-#include "csr_sched.h"
-
-typedef size_t (CsrMsgSizeofFunc)(void *msg);
-typedef u8 *(CsrMsgSerializeFunc)(u8 *buffer, size_t *length, void *msg);
-typedef void (CsrMsgFreeFunc)(void *msg);
-typedef void *(CsrMsgDeserializeFunc)(u8 *buffer, size_t length);
-
-/* Converter entry for one message type */
-typedef struct CsrMsgConvMsgEntry
-{
- u16 msgType;
- CsrMsgSizeofFunc *sizeofFunc;
- CsrMsgSerializeFunc *serFunc;
- CsrMsgDeserializeFunc *deserFunc;
- CsrMsgFreeFunc *freeFunc;
-} CsrMsgConvMsgEntry;
-
-/* Optional lookup function */
-typedef CsrMsgConvMsgEntry *(CsrMsgCustomLookupFunc)(CsrMsgConvMsgEntry *ce, u16 msgType);
-
-/* All converter entries for one specific primitive */
-typedef struct CsrMsgConvPrimEntry
-{
- u16 primType;
- const CsrMsgConvMsgEntry *conv;
- CsrMsgCustomLookupFunc *lookupFunc;
- struct CsrMsgConvPrimEntry *next;
-} CsrMsgConvPrimEntry;
-
-typedef struct
-{
- CsrMsgConvPrimEntry *profile_converters;
- void *(*deserialize_data)(u16 primType, size_t length, u8 * data);
- u8 (*free_message)(u16 primType, u8 *data);
- size_t (*sizeof_message)(u16 primType, void *msg);
- u8 *(*serialize_message)(u16 primType, void *msg,
- size_t * length,
- u8 * buffer);
-} CsrMsgConvEntry;
-
-size_t CsrMsgConvSizeof(u16 primType, void *msg);
-u8 *CsrMsgConvSerialize(u8 *buffer, size_t maxBufferOffset, size_t *offset, u16 primType, void *msg);
-void CsrMsgConvCustomLookupRegister(u16 primType, CsrMsgCustomLookupFunc *lookupFunc);
-void CsrMsgConvInsert(u16 primType, const CsrMsgConvMsgEntry *ce);
-CsrMsgConvPrimEntry *CsrMsgConvFind(u16 primType);
-CsrMsgConvMsgEntry *CsrMsgConvFindEntry(u16 primType, u16 msgType);
-CsrMsgConvMsgEntry *CsrMsgConvFindEntryByMsg(u16 primType, const void *msg);
-CsrMsgConvEntry *CsrMsgConvInit(void);
-
-/* Prototypes for primitive type serializers */
-void CsrUint8Ser(u8 *buffer, size_t *offset, u8 value);
-void CsrUint16Ser(u8 *buffer, size_t *offset, u16 value);
-void CsrUint32Ser(u8 *buffer, size_t *offset, u32 value);
-void CsrMemCpySer(u8 *buffer, size_t *offset, const void *value, size_t length);
-void CsrCharStringSer(u8 *buffer, size_t *offset, const char *value);
-
-void CsrUint8Des(u8 *value, u8 *buffer, size_t *offset);
-void CsrUint16Des(u16 *value, u8 *buffer, size_t *offset);
-void CsrUint32Des(u32 *value, u8 *buffer, size_t *offset);
-void CsrMemCpyDes(void *value, u8 *buffer, size_t *offset, size_t length);
-void CsrCharStringDes(char **value, u8 *buffer, size_t *offset);
-
-#endif
diff --git a/drivers/staging/csr/csr_prim_defs.h b/drivers/staging/csr/csr_prim_defs.h
deleted file mode 100644
index 81a1eaa..0000000
--- a/drivers/staging/csr/csr_prim_defs.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef CSR_PRIM_DEFS_H__
-#define CSR_PRIM_DEFS_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/************************************************************************************
- * Segmentation of primitives in upstream and downstream segment
- ************************************************************************************/
-typedef u16 CsrPrim;
-#define CSR_PRIM_UPSTREAM ((CsrPrim) (0x8000))
-
-/************************************************************************************
- * Primitive definitions for Synergy framework
- ************************************************************************************/
-#define CSR_SYNERGY_EVENT_CLASS_BASE ((u16) (0x0600))
-
-#define CSR_HCI_PRIM ((u16) (0x0000 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_BCCMD_PRIM ((u16) (0x0001 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_HQ_PRIM ((u16) (0x0002 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_VM_PRIM ((u16) (0x0003 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_TM_BLUECORE_PRIM ((u16) (0x0004 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_FP_PRIM ((u16) (0x0005 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_IP_SOCKET_PRIM ((u16) (0x0006 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_IP_ETHER_PRIM ((u16) (0x0007 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_IP_IFCONFIG_PRIM ((u16) (0x0008 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_IP_INTERNAL_PRIM ((u16) (0x0009 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_FSAL_PRIM ((u16) (0x000A | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_DATA_STORE_PRIM ((u16) (0x000B | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_AM_PRIM ((u16) (0x000C | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_TLS_PRIM ((u16) (0x000D | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_DHCP_SERVER_PRIM ((u16) (0x000E | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_TFTP_PRIM ((u16) (0x000F | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_DSPM_PRIM ((u16) (0x0010 | CSR_SYNERGY_EVENT_CLASS_BASE))
-#define CSR_TLS_INTERNAL_PRIM ((u16) (0x0011 | CSR_SYNERGY_EVENT_CLASS_BASE))
-
-#define NUMBER_OF_CSR_FW_EVENTS (CSR_DSPM_PRIM - CSR_SYNERGY_EVENT_CLASS_BASE + 1)
-
-#define CSR_SYNERGY_EVENT_CLASS_MISC_BASE ((u16) (0x06A0))
-
-#define CSR_UI_PRIM ((u16) (0x0000 | CSR_SYNERGY_EVENT_CLASS_MISC_BASE))
-#define CSR_APP_PRIM ((u16) (0x0001 | CSR_SYNERGY_EVENT_CLASS_MISC_BASE))
-#define CSR_SDIO_PROBE_PRIM ((u16) (0x0002 | CSR_SYNERGY_EVENT_CLASS_MISC_BASE))
-
-#define NUMBER_OF_CSR_FW_MISC_EVENTS (CSR_SDIO_PROBE_PRIM - CSR_SYNERGY_EVENT_CLASS_MISC_BASE + 1)
-
-#define CSR_ENV_PRIM ((u16) (0x00FF | CSR_SYNERGY_EVENT_CLASS_MISC_BASE))
-
-#endif /* CSR_PRIM_DEFS_H__ */
diff --git a/drivers/staging/csr/csr_result.h b/drivers/staging/csr/csr_result.h
deleted file mode 100644
index cbb607d..0000000
--- a/drivers/staging/csr/csr_result.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef CSR_RESULT_H__
-#define CSR_RESULT_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-typedef u16 CsrResult;
-#define CSR_RESULT_SUCCESS ((CsrResult) 0x0000)
-#define CSR_RESULT_FAILURE ((CsrResult) 0xFFFF)
-
-#endif
diff --git a/drivers/staging/csr/csr_sched.h b/drivers/staging/csr/csr_sched.h
deleted file mode 100644
index c7d672c..0000000
--- a/drivers/staging/csr/csr_sched.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef CSR_SCHED_H__
-#define CSR_SCHED_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-#include <linux/types.h>
-#include "csr_time.h"
-
-/* An identifier issued by the scheduler. */
-typedef u32 CsrSchedIdentifier;
-
-/* A task identifier */
-typedef u16 CsrSchedTaskId;
-
-/* A queue identifier */
-typedef u16 CsrSchedQid;
-
-/* A message identifier */
-typedef CsrSchedIdentifier CsrSchedMsgId;
-
-/* A timer event identifier */
-typedef CsrSchedIdentifier CsrSchedTid;
-#define CSR_SCHED_TID_INVALID ((CsrSchedTid) 0)
-
-/* Time constants. */
-#define CSR_SCHED_TIME_MAX (0xFFFFFFFF)
-#define CSR_SCHED_MILLISECOND (1000)
-#define CSR_SCHED_SECOND (1000 * CSR_SCHED_MILLISECOND)
-#define CSR_SCHED_MINUTE (60 * CSR_SCHED_SECOND)
-
-/* Queue and primitive that identifies the environment */
-#define CSR_SCHED_TASK_ID 0xFFFF
-#define CSR_SCHED_PRIM (CSR_SCHED_TASK_ID)
-#define CSR_SCHED_EXCLUDED_MODULE_QUEUE 0xFFFF
-
-/*
- * Background interrupt definitions
- */
-typedef u16 CsrSchedBgint;
-#define CSR_SCHED_BGINT_INVALID ((CsrSchedBgint) 0xFFFF)
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSchedMessagePut
- *
- * DESCRIPTION
- * Sends a message consisting of the integer "mi" and the void * pointer
- * "mv" to the message queue "q".
- *
- * "mi" and "mv" are neither inspected nor changed by the scheduler - the
- * task that owns "q" is expected to make sense of the values. "mv" may
- * be null.
- *
- * NOTE
- * If "mv" is not null then it will typically be a chunk of kmalloc()ed
- * memory, though there is no need for it to be so. Tasks should normally
- * obey the convention that when a message built with kmalloc()ed memory
- * is given to CsrSchedMessagePut() then ownership of the memory is ceded to the
- * scheduler - and eventually to the recipient task. I.e., the receiver of
- * the message will be expected to kfree() the message storage.
- *
- * RETURNS
- * void.
- *
- *----------------------------------------------------------------------------*/
-#if defined(CSR_LOG_ENABLE) && defined(CSR_LOG_INCLUDE_FILE_NAME_AND_LINE_NUMBER)
-void CsrSchedMessagePutStringLog(CsrSchedQid q,
- u16 mi,
- void *mv,
- u32 line,
- const char *file);
-#define CsrSchedMessagePut(q, mi, mv) CsrSchedMessagePutStringLog((q), (mi), (mv), __LINE__, __FILE__)
-#else
-void CsrSchedMessagePut(CsrSchedQid q,
- u16 mi,
- void *mv);
-#endif
-
-#endif
diff --git a/drivers/staging/csr/csr_sdio.h b/drivers/staging/csr/csr_sdio.h
deleted file mode 100644
index 0971d13..0000000
--- a/drivers/staging/csr/csr_sdio.h
+++ /dev/null
@@ -1,723 +0,0 @@
-#ifndef CSR_SDIO_H__
-#define CSR_SDIO_H__
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include "csr_result.h"
-
-/* Result Codes */
-#define CSR_SDIO_RESULT_INVALID_VALUE ((CsrResult) 1) /* Invalid argument value */
-#define CSR_SDIO_RESULT_NO_DEVICE ((CsrResult) 2) /* The specified device is no longer present */
-#define CSR_SDIO_RESULT_CRC_ERROR ((CsrResult) 3) /* The transmitted/received data or command response contained a CRC error */
-#define CSR_SDIO_RESULT_TIMEOUT ((CsrResult) 4) /* No command response or data received from device, or function enable/disable did not succeed within timeout period */
-#define CSR_SDIO_RESULT_NOT_RESET ((CsrResult) 5) /* The device was not reset */
-
-/* Features (for use in features member of CsrSdioFunction) */
-#define CSR_SDIO_FEATURE_BYTE_MODE 0x00000001 /* Transfer sizes do not have to be a multiple of block size */
-#define CSR_SDIO_FEATURE_DMA_CAPABLE_MEM_REQUIRED 0x00000002 /* Bulk operations require DMA friendly memory */
-
-/* CsrSdioFunctionId wildcards (for use in CsrSdioFunctionId members) */
-#define CSR_SDIO_ANY_MANF_ID 0xFFFF
-#define CSR_SDIO_ANY_CARD_ID 0xFFFF
-#define CSR_SDIO_ANY_SDIO_FUNCTION 0xFF
-#define CSR_SDIO_ANY_SDIO_INTERFACE 0xFF
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioFunctionId
- *
- * DESCRIPTION
- * This structure describes one or more functions of a device, based on
- * four qualitative measures. The CsrSdioFunctionId wildcard defines can be
- * used for making the CsrSdioFunctionId match more than one function.
- *
- * MEMBERS
- * manfId - Vendor ID (or CSR_SDIO_ANY_MANF_ID).
- * cardId - Device ID (or CSR_SDIO_ANY_CARD_ID).
- * sdioFunction - SDIO Function number (or CSR_SDIO_ANY_SDIO_FUNCTION).
- * sdioInterface - SDIO Standard Interface Code (or CSR_SDIO_ANY_SDIO_INTERFACE)
- *
- *----------------------------------------------------------------------------*/
-typedef struct
-{
- u16 manfId; /* Vendor ID to match or CSR_SDIO_ANY_MANF_ID */
- u16 cardId; /* Device ID to match or CSR_SDIO_ANY_CARD_ID */
- u8 sdioFunction; /* SDIO Function number to match or CSR_SDIO_ANY_SDIO_FUNCTION */
- u8 sdioInterface; /* SDIO Standard Interface Code to match or CSR_SDIO_ANY_SDIO_INTERFACE */
-} CsrSdioFunctionId;
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioFunction
- *
- * DESCRIPTION
- * This structure represents a single function on a device.
- *
- * MEMBERS
- * sdioId - A CsrSdioFunctionId describing this particular function. The
- * subfield shall not contain any CsrSdioFunctionId wildcards. The
- * subfields shall describe the specific single function
- * represented by this structure.
- * blockSize - Actual configured block size, or 0 if unconfigured.
- * features - Bit mask with any of CSR_SDIO_FEATURE_* set.
- * device - Handle of device containing the function. If two functions have
- * the same device handle, they reside on the same device.
- * driverData - For use by the Function Driver. The SDIO Driver shall not
- * attempt to dereference the pointer.
- * priv - For use by the SDIO Driver. The Function Driver shall not attempt
- * to dereference the pointer.
- *
- *
- *----------------------------------------------------------------------------*/
-typedef struct
-{
- CsrSdioFunctionId sdioId;
- u16 blockSize; /* Actual configured block size, or 0 if unconfigured */
- u32 features; /* Bit mask with any of CSR_SDIO_FEATURE_* set */
- void *device; /* Handle of device containing the function */
- void *driverData; /* For use by the Function Driver */
- void *priv; /* For use by the SDIO Driver */
-} CsrSdioFunction;
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioInsertedCallback, CsrSdioRemovedCallback
- *
- * DESCRIPTION
- * CsrSdioInsertedCallback is called when a function becomes available to
- * a registered Function Driver that supports the function.
- * CsrSdioRemovedCallback is called when a function is no longer available
- * to a Function Driver, either because the device has been removed, or the
- * Function Driver has been unregistered.
- *
- * NOTE: These functions are implemented by the Function Driver, and are
- * passed as function pointers in the CsrSdioFunctionDriver struct.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- *
- *----------------------------------------------------------------------------*/
-typedef void (*CsrSdioInsertedCallback)(CsrSdioFunction *function);
-typedef void (*CsrSdioRemovedCallback)(CsrSdioFunction *function);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioInterruptDsrCallback, CsrSdioInterruptCallback
- *
- * DESCRIPTION
- * CsrSdioInterruptCallback is called when an interrupt occurs on the
- * the device associated with the specified function.
- *
- * NOTE: These functions are implemented by the Function Driver, and are
- * passed as function pointers in the CsrSdioFunctionDriver struct.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- *
- * RETURNS (only CsrSdioInterruptCallback)
- * A pointer to a CsrSdioInterruptDsrCallback function.
- *
- *----------------------------------------------------------------------------*/
-typedef void (*CsrSdioInterruptDsrCallback)(CsrSdioFunction *function);
-typedef CsrSdioInterruptDsrCallback (*CsrSdioInterruptCallback)(CsrSdioFunction *function);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioSuspendCallback, CsrSdioResumeCallback
- *
- * DESCRIPTION
- * CsrSdioSuspendCallback is called when the system is preparing to go
- * into a suspended state. CsrSdioResumeCallback is called when the system
- * has entered an active state again.
- *
- * NOTE: These functions are implemented by the Function Driver, and are
- * passed as function pointers in the CsrSdioFunctionDriver struct.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- *
- *----------------------------------------------------------------------------*/
-typedef void (*CsrSdioSuspendCallback)(CsrSdioFunction *function);
-typedef void (*CsrSdioResumeCallback)(CsrSdioFunction *function);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioAsyncCallback, CsrSdioAsyncDsrCallback
- *
- * DESCRIPTION
- * CsrSdioAsyncCallback is called when an asynchronous operation completes.
- *
- * NOTE: These functions are implemented by the Function Driver, and are
- * passed as function pointers in the function calls that initiate
- * the operation.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- * result - The result of the operation that completed. See the description
- * of the initiating function for possible result values.
- *
- * RETURNS (only CsrSdioAsyncCallback)
- * A pointer to a CsrSdioAsyncDsrCallback function.
- *
- *----------------------------------------------------------------------------*/
-typedef void (*CsrSdioAsyncDsrCallback)(CsrSdioFunction *function, CsrResult result);
-typedef CsrSdioAsyncDsrCallback (*CsrSdioAsyncCallback)(CsrSdioFunction *function, CsrResult result);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioFunctionDriver
- *
- * DESCRIPTION
- * Structure representing a Function Driver.
- *
- * MEMBERS
- * inserted - Callback, see description of CsrSdioInsertedCallback.
- * removed - Callback, see description of CsrSdioRemovedCallback.
- * intr - Callback, see description of CsrSdioInterruptCallback.
- * suspend - Callback, see description of CsrSdioSuspendCallback.
- * resume - Callback, see description of CsrSdioResumeCallback.
- * ids - Array of CsrSdioFunctionId describing one or more functions that
- * are supported by the Function Driver.
- * idsCount - Length of the ids array.
- * priv - For use by the SDIO Driver. The Function Driver may initialise
- * it to NULL, but shall otherwise not access the pointer or attempt
- * to dereference it.
- *
- *----------------------------------------------------------------------------*/
-typedef struct
-{
- CsrSdioInsertedCallback inserted;
- CsrSdioRemovedCallback removed;
- CsrSdioInterruptCallback intr;
- CsrSdioSuspendCallback suspend;
- CsrSdioResumeCallback resume;
- CsrSdioFunctionId *ids;
- u8 idsCount;
- void *priv; /* For use by the SDIO Driver */
-} CsrSdioFunctionDriver;
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioFunctionDriverRegister
- *
- * DESCRIPTION
- * Register a Function Driver.
- *
- * PARAMETERS
- * functionDriver - Pointer to struct describing the Function Driver.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - The Function Driver was successfully
- * registered.
- * CSR_RESULT_FAILURE - Unable to register the function driver,
- * because of an unspecified/unknown error. The
- * Function Driver has not been registered.
- * CSR_SDIO_RESULT_INVALID_VALUE - The specified Function Driver pointer
- * does not point at a valid Function
- * Driver structure, or some of the members
- * contain invalid entries.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioFunctionDriverRegister(CsrSdioFunctionDriver *functionDriver);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioFunctionDriverUnregister
- *
- * DESCRIPTION
- * Unregister a previously registered Function Driver.
- *
- * PARAMETERS
- * functionDriver - pointer to struct describing the Function Driver.
- *
- *----------------------------------------------------------------------------*/
-void CsrSdioFunctionDriverUnregister(CsrSdioFunctionDriver *functionDriver);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioFunctionEnable, CsrSdioFunctionDisable
- *
- * DESCRIPTION
- * Enable/disable the specified function by setting/clearing the
- * corresponding bit in the I/O Enable register in function 0, and then
- * periodically reading the related bit in the I/O Ready register until it
- * is set/clear, limited by an implementation defined timeout.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - The specified function was enabled/disabled.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. The state of the
- * related bit in the I/O Enable register is
- * undefined.
- * CSR_SDIO_RESULT_TIMEOUT - No response from the device, or the related
- * bit in the I/O ready register was not
- * set/cleared within the timeout period.
- *
- * NOTE: If the SDIO R5 response is available, and either of the
- * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
- * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
- * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
- * COM_CRC_ERROR bits shall be ignored.
- *
- * If the CSPI response is available, and any of the
- * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioFunctionEnable(CsrSdioFunction *function);
-CsrResult CsrSdioFunctionDisable(CsrSdioFunction *function);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioInterruptEnable, CsrSdioInterruptDisable
- *
- * DESCRIPTION
- * Enable/disable the interrupt for the specified function by
- * setting/clearing the corresponding bit in the INT Enable register in
- * function 0.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - The specified function was enabled/disabled.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. The state of the
- * related bit in the INT Enable register is
- * unchanged.
- * CSR_SDIO_RESULT_INVALID_VALUE - The specified function cannot be
- * enabled/disabled, because it either
- * does not exist or it is not possible to
- * individually enable/disable functions.
- * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
- *
- * NOTE: If the SDIO R5 response is available, and either of the
- * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
- * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
- * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
- * COM_CRC_ERROR bits shall be ignored.
- *
- * If the CSPI response is available, and any of the
- * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioInterruptEnable(CsrSdioFunction *function);
-CsrResult CsrSdioInterruptDisable(CsrSdioFunction *function);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioInterruptAcknowledge
- *
- * DESCRIPTION
- * Acknowledge that a signalled interrupt has been handled. Shall only
- * be called once, and exactly once for each signalled interrupt to the
- * corresponding function.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function to which the
- * event was signalled.
- *
- *----------------------------------------------------------------------------*/
-void CsrSdioInterruptAcknowledge(CsrSdioFunction *function);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioInsertedAcknowledge, CsrSdioRemovedAcknowledge
- *
- * DESCRIPTION
- * Acknowledge that a signalled inserted/removed event has been handled.
- * Shall only be called once, and exactly once for each signalled event to
- * the corresponding function.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function to which the
- * inserted was signalled.
- * result (CsrSdioInsertedAcknowledge only)
- * CSR_RESULT_SUCCESS - The Function Driver has accepted the
- * function, and the function is attached to
- * the Function Driver until the
- * CsrSdioRemovedCallback is called and
- * acknowledged.
- * CSR_RESULT_FAILURE - Unable to accept the function. The
- * function is not attached to the Function
- * Driver, and it may be passed to another
- * Function Driver which supports the
- * function.
- *
- *----------------------------------------------------------------------------*/
-void CsrSdioInsertedAcknowledge(CsrSdioFunction *function, CsrResult result);
-void CsrSdioRemovedAcknowledge(CsrSdioFunction *function);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioSuspendAcknowledge, CsrSdioResumeAcknowledge
- *
- * DESCRIPTION
- * Acknowledge that a signalled suspend event has been handled. Shall only
- * be called once, and exactly once for each signalled event to the
- * corresponding function.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function to which the
- * event was signalled.
- * result
- * CSR_RESULT_SUCCESS - Successfully suspended/resumed.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- *
- *----------------------------------------------------------------------------*/
-void CsrSdioSuspendAcknowledge(CsrSdioFunction *function, CsrResult result);
-void CsrSdioResumeAcknowledge(CsrSdioFunction *function, CsrResult result);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioBlockSizeSet
- *
- * DESCRIPTION
- * Set the block size to use for the function. The actual configured block
- * size shall be the minimum of:
- * 1) Maximum block size supported by the function.
- * 2) Maximum block size supported by the host controller.
- * 3) The block size specified by the blockSize argument.
- *
- * When this function returns, the actual configured block size is
- * available in the blockSize member of the function struct.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- * blockSize - Block size to use for the function. Valid range is 1 to
- * 2048.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - The block size register on the chip
- * was updated.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. The configured block
- * size is undefined.
- * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
- *
- * NOTE: If the SDIO R5 response is available, and the FUNCTION_NUMBER
- * bits is set, CSR_SDIO_RESULT_INVALID_VALUE shall be returned.
- * If the ERROR bit is set (but not FUNCTION_NUMBER),
- * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
- * COM_CRC_ERROR bits shall be ignored.
- *
- * If the CSPI response is available, and any of the
- * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
- *
- * NOTE: Setting the block size requires two individual operations. The
- * implementation shall ignore the OUT_OF_RANGE bit of the SDIO R5
- * response for the first operation, as the partially configured
- * block size may be out of range, even if the final block size
- * (after the second operation) is in the valid range.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioBlockSizeSet(CsrSdioFunction *function, u16 blockSize);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioMaxBusClockFrequencySet
- *
- * DESCRIPTION
- * Set the maximum clock frequency to use for the device associated with
- * the specified function. The actual configured clock frequency for the
- * device shall be the minimum of:
- * 1) Maximum clock frequency supported by the device.
- * 2) Maximum clock frequency supported by the host controller.
- * 3) Maximum clock frequency specified for any function on the same
- * device.
- *
- * If the clock frequency exceeds 25MHz, it is the responsibility of the
- * SDIO driver to enable high speed mode on the device, using the standard
- * defined procedure, before increasing the frequency beyond the limit.
- *
- * Note that the clock frequency configured affects all functions on the
- * same device.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- * maxFrequency - The maximum clock frequency for the function in Hertz.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - The maximum clock frequency was successfully
- * set for the function.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- *
- * NOTE: If the SDIO R5 response is available, and the FUNCTION_NUMBER
- * bits is set, CSR_SDIO_RESULT_INVALID_VALUE shall be returned.
- * If the ERROR bit is set (but not FUNCTION_NUMBER),
- * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
- * COM_CRC_ERROR bits shall be ignored.
- *
- * If the CSPI response is available, and any of the
- * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
- *
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioMaxBusClockFrequencySet(CsrSdioFunction *function, u32 maxFrequency);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioRead8, CsrSdioWrite8, CsrSdioRead8Async, CsrSdioWrite8Async
- *
- * DESCRIPTION
- * Read/write an 8bit value from/to the specified register address.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- * address - Register address within the function.
- * data - The data to read/write.
- * callback - The function to call on operation completion.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - The data was successfully read/written.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. No data read/written.
- * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
- *
- * NOTE: If the SDIO R5 response is available, and either of the
- * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
- * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
- * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
- * COM_CRC_ERROR bits shall be ignored.
- *
- * If the CSPI response is available, and any of the
- * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
- *
- * NOTE: The CsrSdioRead8Async and CsrSdioWrite8Async functions return
- * immediately, and the supplied callback function is called when the
- * operation is complete. The result value is given as an argument to
- * the callback function.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioRead8(CsrSdioFunction *function, u32 address, u8 *data);
-CsrResult CsrSdioWrite8(CsrSdioFunction *function, u32 address, u8 data);
-void CsrSdioRead8Async(CsrSdioFunction *function, u32 address, u8 *data, CsrSdioAsyncCallback callback);
-void CsrSdioWrite8Async(CsrSdioFunction *function, u32 address, u8 data, CsrSdioAsyncCallback callback);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioRead16, CsrSdioWrite16, CsrSdioRead16Async, CsrSdioWrite16Async
- *
- * DESCRIPTION
- * Read/write a 16bit value from/to the specified register address.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- * address - Register address within the function.
- * data - The data to read/write.
- * callback - The function to call on operation completion.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - The data was successfully read/written.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. Data may have been
- * partially read/written.
- * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
- *
- * NOTE: If the SDIO R5 response is available, and either of the
- * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
- * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
- * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
- * COM_CRC_ERROR bits shall be ignored.
- *
- * If the CSPI response is available, and any of the
- * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
- *
- * NOTE: The CsrSdioRead16Async and CsrSdioWrite16Async functions return
- * immediately, and the supplied callback function is called when the
- * operation is complete. The result value is given as an argument to
- * the callback function.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioRead16(CsrSdioFunction *function, u32 address, u16 *data);
-CsrResult CsrSdioWrite16(CsrSdioFunction *function, u32 address, u16 data);
-void CsrSdioRead16Async(CsrSdioFunction *function, u32 address, u16 *data, CsrSdioAsyncCallback callback);
-void CsrSdioWrite16Async(CsrSdioFunction *function, u32 address, u16 data, CsrSdioAsyncCallback callback);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioF0Read8, CsrSdioF0Write8, CsrSdioF0Read8Async,
- * CsrSdioF0Write8Async
- *
- * DESCRIPTION
- * Read/write an 8bit value from/to the specified register address in
- * function 0.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- * address - Register address within the function.
- * data - The data to read/write.
- * callback - The function to call on operation completion.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - The data was successfully read/written.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. No data read/written.
- * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
- *
- * NOTE: If the SDIO R5 response is available, and either of the
- * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
- * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
- * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
- * COM_CRC_ERROR bits shall be ignored.
- *
- * If the CSPI response is available, and any of the
- * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
- *
- * NOTE: The CsrSdioF0Read8Async and CsrSdioF0Write8Async functions return
- * immediately, and the supplied callback function is called when the
- * operation is complete. The result value is given as an argument to
- * the callback function.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioF0Read8(CsrSdioFunction *function, u32 address, u8 *data);
-CsrResult CsrSdioF0Write8(CsrSdioFunction *function, u32 address, u8 data);
-void CsrSdioF0Read8Async(CsrSdioFunction *function, u32 address, u8 *data, CsrSdioAsyncCallback callback);
-void CsrSdioF0Write8Async(CsrSdioFunction *function, u32 address, u8 data, CsrSdioAsyncCallback callback);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioRead, CsrSdioWrite, CsrSdioReadAsync, CsrSdioWriteAsync
- *
- * DESCRIPTION
- * Read/write a specified number of bytes from/to the specified register
- * address.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- * address - Register address within the function.
- * data - The data to read/write.
- * length - Number of byte to read/write.
- * callback - The function to call on operation completion.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - The data was successfully read/written.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_INVALID_VALUE - One or more arguments were invalid.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred. Data may have been
- * partially read/written.
- * CSR_SDIO_RESULT_TIMEOUT - No response from the device.
- *
- * NOTE: If the SDIO R5 response is available, and either of the
- * FUNCTION_NUMBER or OUT_OF_RANGE bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE shall be returned. If the ERROR bit
- * is set (but none of FUNCTION_NUMBER or OUT_OF_RANGE),
- * CSR_RESULT_FAILURE shall be returned. The ILLEGAL_COMMAND and
- * COM_CRC_ERROR bits shall be ignored.
- *
- * If the CSPI response is available, and any of the
- * FUNCTION_DISABLED or CLOCK_DISABLED bits are set,
- * CSR_SDIO_RESULT_INVALID_VALUE will be returned.
- *
- * NOTE: The CsrSdioF0Read8Async and CsrSdioF0Write8Async functions return
- * immediately, and the supplied callback function is called when the
- * operation is complete. The result value is given as an argument to
- * the callback function.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioRead(CsrSdioFunction *function, u32 address, void *data, u32 length);
-CsrResult CsrSdioWrite(CsrSdioFunction *function, u32 address, const void *data, u32 length);
-void CsrSdioReadAsync(CsrSdioFunction *function, u32 address, void *data, u32 length, CsrSdioAsyncCallback callback);
-void CsrSdioWriteAsync(CsrSdioFunction *function, u32 address, const void *data, u32 length, CsrSdioAsyncCallback callback);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioPowerOn, CsrSdioPowerOff
- *
- * DESCRIPTION
- * Power on/off the device.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function that resides on
- * the device to power on/off.
- *
- * RETURNS (only CsrSdioPowerOn)
- * CSR_RESULT_SUCCESS - Power was successfully reapplied and the device
- * has been reinitialised.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred during reinitialisation.
- * CSR_SDIO_RESULT_TIMEOUT - No response from the device during
- * reinitialisation.
- * CSR_SDIO_RESULT_NOT_RESET - The power was not removed by the
- * CsrSdioPowerOff call. The state of the
- * device is unchanged.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioPowerOn(CsrSdioFunction *function);
-void CsrSdioPowerOff(CsrSdioFunction *function);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioHardReset
- *
- * DESCRIPTION
- * Perform a hardware reset of the device.
- *
- * PARAMETERS
- * function - Pointer to struct representing the function that resides on
- * the device to hard reset.
- *
- * RETURNS
- * CSR_RESULT_SUCCESS - Reset was successfully performed and the device
- * has been reinitialised.
- * CSR_RESULT_FAILURE - Unspecified/unknown error.
- * CSR_SDIO_RESULT_NO_DEVICE - The device does not exist anymore.
- * CSR_SDIO_RESULT_CRC_ERROR - A CRC error occurred during reinitialisation.
- * CSR_SDIO_RESULT_TIMEOUT - No response from the device during
- * reinitialisation.
- * CSR_SDIO_RESULT_NOT_RESET - The reset was not applied because it is not
- * supported. The state of the device is
- * unchanged.
- *
- *----------------------------------------------------------------------------*/
-CsrResult CsrSdioHardReset(CsrSdioFunction *function);
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrSdioFunctionActive, CsrSdioFunctionIdle
- *
- * DESCRIPTION
- *
- * PARAMETERS
- * function - Pointer to struct representing the function.
- *
- *----------------------------------------------------------------------------*/
-void CsrSdioFunctionActive(CsrSdioFunction *function);
-void CsrSdioFunctionIdle(CsrSdioFunction *function);
-
-#endif
diff --git a/drivers/staging/csr/csr_serialize_primitive_types.c b/drivers/staging/csr/csr_serialize_primitive_types.c
deleted file mode 100644
index 9713b9a..0000000
--- a/drivers/staging/csr/csr_serialize_primitive_types.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "csr_prim_defs.h"
-#include "csr_msgconv.h"
-#include "csr_macro.h"
-
-void CsrUint8Des(u8 *value, u8 *buffer, size_t *offset)
-{
- *value = buffer[*offset];
- *offset += sizeof(*value);
-}
-EXPORT_SYMBOL_GPL(CsrUint8Des);
-
-void CsrUint16Des(u16 *value, u8 *buffer, size_t *offset)
-{
- *value = (buffer[*offset + 0] << 0) |
- (buffer[*offset + 1] << 8);
- *offset += sizeof(*value);
-}
-EXPORT_SYMBOL_GPL(CsrUint16Des);
-
-void CsrUint32Des(u32 *value, u8 *buffer, size_t *offset)
-{
- *value = (buffer[*offset + 0] << 0) |
- (buffer[*offset + 1] << 8) |
- (buffer[*offset + 2] << 16) |
- (buffer[*offset + 3] << 24);
- *offset += sizeof(*value);
-}
-EXPORT_SYMBOL_GPL(CsrUint32Des);
-
-void CsrMemCpyDes(void *value, u8 *buffer, size_t *offset, size_t length)
-{
- memcpy(value, &buffer[*offset], length);
- *offset += length;
-}
-EXPORT_SYMBOL_GPL(CsrMemCpyDes);
-
-void CsrCharStringDes(char **value, u8 *buffer, size_t *offset)
-{
- *value = kstrdup((char *) &buffer[*offset], GFP_KERNEL);
- *offset += strlen(*value) + 1;
-}
-EXPORT_SYMBOL_GPL(CsrCharStringDes);
-
-void CsrUint8Ser(u8 *buffer, size_t *offset, u8 value)
-{
- buffer[*offset] = value;
- *offset += sizeof(value);
-}
-EXPORT_SYMBOL_GPL(CsrUint8Ser);
-
-void CsrUint16Ser(u8 *buffer, size_t *offset, u16 value)
-{
- buffer[*offset + 0] = (u8) ((value >> 0) & 0xFF);
- buffer[*offset + 1] = (u8) ((value >> 8) & 0xFF);
- *offset += sizeof(value);
-}
-EXPORT_SYMBOL_GPL(CsrUint16Ser);
-
-void CsrUint32Ser(u8 *buffer, size_t *offset, u32 value)
-{
- buffer[*offset + 0] = (u8) ((value >> 0) & 0xFF);
- buffer[*offset + 1] = (u8) ((value >> 8) & 0xFF);
- buffer[*offset + 2] = (u8) ((value >> 16) & 0xFF);
- buffer[*offset + 3] = (u8) ((value >> 24) & 0xFF);
- *offset += sizeof(value);
-}
-EXPORT_SYMBOL_GPL(CsrUint32Ser);
-
-void CsrMemCpySer(u8 *buffer, size_t *offset, const void *value, size_t length)
-{
- memcpy(&buffer[*offset], value, length);
- *offset += length;
-}
-EXPORT_SYMBOL_GPL(CsrMemCpySer);
-
-void CsrCharStringSer(u8 *buffer, size_t *offset, const char *value)
-{
- if (value)
- {
- strcpy(((char *) &buffer[*offset]), value);
- *offset += strlen(value) + 1;
- }
- else
- {
- CsrUint8Ser(buffer, offset, 0);
- }
-}
-EXPORT_SYMBOL_GPL(CsrCharStringSer);
diff --git a/drivers/staging/csr/csr_time.c b/drivers/staging/csr/csr_time.c
deleted file mode 100644
index 01179e4..0000000
--- a/drivers/staging/csr/csr_time.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/module.h>
-
-#include "csr_time.h"
-
-u32 CsrTimeGet(u32 *high)
-{
- struct timespec ts;
- u64 time;
- u32 low;
-
- ts = current_kernel_time();
- time = (u64) ts.tv_sec * 1000000 + ts.tv_nsec / 1000;
-
- if (high != NULL)
- *high = (u32) ((time >> 32) & 0xFFFFFFFF);
-
- low = (u32) (time & 0xFFFFFFFF);
-
- return low;
-}
-EXPORT_SYMBOL_GPL(CsrTimeGet);
diff --git a/drivers/staging/csr/csr_time.h b/drivers/staging/csr/csr_time.h
deleted file mode 100644
index fc29e8e..0000000
--- a/drivers/staging/csr/csr_time.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef CSR_TIME_H__
-#define CSR_TIME_H__
-/*****************************************************************************
-
-(c) Cambridge Silicon Radio Limited 2010
-All rights reserved and confidential information of CSR
-
-Refer to LICENSE.txt included with this source for details
-on the license terms.
-
-*****************************************************************************/
-
-#include <linux/types.h>
-
-/*******************************************************************************
-
-NAME
- CsrTimeGet
-
-DESCRIPTION
- Returns the current system time in a low and a high part. The low part
- is expressed in microseconds. The high part is incremented when the low
- part wraps to provide an extended range.
-
- The caller may provide a NULL pointer as the high parameter.
- In this case the function just returns the low part and ignores the
- high parameter.
-
- Although the time is expressed in microseconds the actual resolution is
- platform dependent and can be less. It is recommended that the
- resolution is at least 10 milliseconds.
-
-PARAMETERS
- high - Pointer to variable that will receive the high part of the
- current system time. Passing NULL is valid.
-
-RETURNS
- Low part of current system time in microseconds.
-
-*******************************************************************************/
-u32 CsrTimeGet(u32 *high);
-
-
-/*------------------------------------------------------------------*/
-/* CsrTime Macros */
-/*------------------------------------------------------------------*/
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrTimeAdd
- *
- * DESCRIPTION
- * Add two time values. Adding the numbers can overflow the range of a
- * CsrTime, so the user must be cautious.
- *
- * RETURNS
- * CsrTime - the sum of "t1" and "t2".
- *
- *----------------------------------------------------------------------------*/
-#define CsrTimeAdd(t1, t2) ((t1) + (t2))
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrTimeSub
- *
- * DESCRIPTION
- * Subtract two time values. Subtracting the numbers can provoke an
- * underflow, so the user must be cautious.
- *
- * RETURNS
- * CsrTime - "t1" - "t2".
- *
- *----------------------------------------------------------------------------*/
-#define CsrTimeSub(t1, t2) ((s32) (t1) - (s32) (t2))
-
-#endif
diff --git a/drivers/staging/csr/csr_util.c b/drivers/staging/csr/csr_util.c
deleted file mode 100644
index c3aa9d5..0000000
--- a/drivers/staging/csr/csr_util.c
+++ /dev/null
@@ -1,15 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include <linux/module.h>
-
-MODULE_DESCRIPTION("CSR Operating System Kernel Abstraction");
-MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/staging/csr/csr_wifi_common.h b/drivers/staging/csr/csr_wifi_common.h
deleted file mode 100644
index efc43a5..0000000
--- a/drivers/staging/csr/csr_wifi_common.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_COMMON_H__
-#define CSR_WIFI_COMMON_H__
-
-#include <linux/types.h>
-#include "csr_result.h"
-
-/* MAC address */
-typedef struct
-{
- u8 a[6];
-} CsrWifiMacAddress;
-
-/* IPv4 address */
-typedef struct
-{
- u8 a[4];
-} CsrWifiIp4Address;
-
-/* IPv6 address */
-typedef struct
-{
- u8 a[16];
-} CsrWifiIp6Address;
-
-typedef struct
-{
- u8 ssid[32];
- u8 length;
-} CsrWifiSsid;
-
-/*******************************************************************************
-
- DESCRIPTION
- Result values used on the Wifi Interfaces
-
- VALUES
- CSR_RESULT_SUCCESS
- - The request/procedure succeeded
- CSR_RESULT_FAILURE
- - The request/procedure did not succeed because of an error
- CSR_WIFI_RESULT_NOT_FOUND
- - The request did not succeed because some resource was not
- found.
- CSR_WIFI_RESULT_TIMED_OUT
- - The request/procedure did not succeed because of a time out
- CSR_WIFI_RESULT_CANCELLED
- - The request was canceled due to another conflicting
- request that was issued before this one was completed
- CSR_WIFI_RESULT_INVALID_PARAMETER
- - The request/procedure did not succeed because it had an
- invalid parameter
- CSR_WIFI_RESULT_NO_ROOM
- - The request did not succeed due to a lack of resources,
- e.g. out of memory problem.
- CSR_WIFI_RESULT_UNSUPPORTED
- - The request/procedure did not succeed because the feature
- is not supported yet
- CSR_WIFI_RESULT_UNAVAILABLE
- - The request cannot be processed at this time
- CSR_WIFI_RESULT_WIFI_OFF
- - The requested action is not available because Wi-Fi is
- currently off
- CSR_WIFI_RESULT_SECURITY_ERROR
- - The request/procedure did not succeed because of a security
- error
- CSR_WIFI_RESULT_MIB_SET_FAILURE
- - MIB Set Failure: either the MIB OID to be written to does
- not exist or the MIB Value is invalid.
- CSR_WIFI_RESULT_INVALID_INTERFACE_TAG
- - The supplied Interface Tag is not valid.
- CSR_WIFI_RESULT_P2P_NOA_CONFIG_CONFLICT
- - The new NOA configuration conflicts with the existing NOA configuration
- hence not accepted"
-*******************************************************************************/
-#define CSR_WIFI_RESULT_NOT_FOUND ((CsrResult) 0x0001)
-#define CSR_WIFI_RESULT_TIMED_OUT ((CsrResult) 0x0002)
-#define CSR_WIFI_RESULT_CANCELLED ((CsrResult) 0x0003)
-#define CSR_WIFI_RESULT_INVALID_PARAMETER ((CsrResult) 0x0004)
-#define CSR_WIFI_RESULT_NO_ROOM ((CsrResult) 0x0005)
-#define CSR_WIFI_RESULT_UNSUPPORTED ((CsrResult) 0x0006)
-#define CSR_WIFI_RESULT_UNAVAILABLE ((CsrResult) 0x0007)
-#define CSR_WIFI_RESULT_WIFI_OFF ((CsrResult) 0x0008)
-#define CSR_WIFI_RESULT_SECURITY_ERROR ((CsrResult) 0x0009)
-#define CSR_WIFI_RESULT_MIB_SET_FAILURE ((CsrResult) 0x000A)
-#define CSR_WIFI_RESULT_INVALID_INTERFACE_TAG ((CsrResult) 0x000B)
-#define CSR_WIFI_RESULT_P2P_NOA_CONFIG_CONFLICT ((CsrResult) 0x000C)
-
-#define CSR_WIFI_VERSION "5.1.0.0"
-
-#endif
-
diff --git a/drivers/staging/csr/csr_wifi_fsm.h b/drivers/staging/csr/csr_wifi_fsm.h
deleted file mode 100644
index fc5c5aa..0000000
--- a/drivers/staging/csr/csr_wifi_fsm.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_FSM_H
-#define CSR_WIFI_FSM_H
-
-#include "csr_prim_defs.h"
-#include "csr_log_text.h"
-#include "csr_wifi_fsm_event.h"
-
-/* including this file for CsrWifiInterfaceMode*/
-#include "csr_wifi_common.h"
-
-#define CSR_WIFI_FSM_ENV (0xFFFF)
-
-/**
- * @brief
- * Toplevel FSM context data
- *
- * @par Description
- * Holds ALL FSM static and dynamic data for a FSM
- */
-typedef struct CsrWifiFsmContext CsrWifiFsmContext;
-
-/**
- * @brief
- * FSM External Wakeup CallbackFunction Pointer
- *
- * @par Description
- * Defines the external wakeup function for the FSM
- * to call when an external event is injected into the systen
- *
- * @param[in] context : External context
- *
- * @return
- * void
- */
-typedef void (*CsrWifiFsmExternalWakupCallbackPtr)(void *context);
-
-/**
- * @brief
- * Initialises a top level FSM context
- *
- * @par Description
- * Initialises the FSM Context to an initial state and allocates
- * space for "maxProcesses" number of instances
- *
- * @param[in] osaContext : OSA context
- * @param[in] applicationContext : Internal fsm application context
- * @param[in] externalContext : External context
- * @param[in] maxProcesses : Max processes to allocate room for
- *
- * @return
- * CsrWifiFsmContext* fsm context
- */
-extern CsrWifiFsmContext* CsrWifiFsmInit(void *applicationContext, void *externalContext, u16 maxProcesses, CsrLogTextTaskId loggingTaskId);
-
-/**
- * @brief
- * Resets the FSM's back to first conditions
- *
- * @par Description
- * This function is used to free any dynamic resources allocated for the
- * given context by CsrWifiFsmInit().
- * The FSM's reset function is called to cleanup any fsm specific memory
- * The reset function does NOT need to free the fsm data pointer as
- * CsrWifiFsmShutdown() will do it.
- * the FSM's init function is call again to reinitialise the FSM context.
- * CsrWifiFsmReset() should NEVER be called when CsrWifiFsmExecute() is running.
- *
- * @param[in] context : FSM context
- *
- * @return
- * void
- */
-extern void CsrWifiFsmReset(CsrWifiFsmContext *context);
-
-/**
- * @brief
- * Frees resources allocated by CsrWifiFsmInit
- *
- * @par Description
- * This function is used to free any dynamic resources allocated for the
- * given context by CsrWifiFsmInit(), prior to complete termination of
- * the program.
- * The FSM's reset function is called to cleanup any fsm specific memory.
- * The reset function does NOT need to free the fsm data pointer as
- * CsrWifiFsmShutdown() will do it.
- * CsrWifiFsmShutdown() should NEVER be called when CsrWifiFsmExecute() is running.
- *
- * @param[in] context : FSM context
- *
- * @return
- * void
- */
-extern void CsrWifiFsmShutdown(CsrWifiFsmContext *context);
-
-/**
- * @brief
- * Executes the fsm context
- *
- * @par Description
- * Executes the FSM context and runs until ALL events in the context are processed.
- * When no more events are left to process then CsrWifiFsmExecute() returns to a time
- * specifying when to next call the CsrWifiFsmExecute()
- * Scheduling, threading, blocking and external event notification are outside
- * the scope of the FSM and CsrWifiFsmExecute().
- *
- * @param[in] context : FSM context
- *
- * @return
- * u32 Time in ms until next timeout or 0xFFFFFFFF for no timer set
- */
-extern u32 CsrWifiFsmExecute(CsrWifiFsmContext *context);
-
-/**
- * @brief
- * Adds an event to the FSM context's external event queue for processing
- *
- * @par Description
- * Adds an event to the contexts external queue
- * This is thread safe and adds an event to the fsm's external event queue.
- *
- * @param[in] context : FSM context
- * @param[in] event : event to add to the event queue
- * @param[in] source : source of the event (this can be a synergy task queue or an fsm instance id)
- * @param[in] destination : destination of the event (This can be a fsm instance id or CSR_WIFI_FSM_ENV)
- * @param[in] id : event id
- *
- * @return
- * void
- */
-extern void CsrWifiFsmSendEventExternal(CsrWifiFsmContext *context, CsrWifiFsmEvent *event, u16 source, u16 destination, CsrPrim primtype, u16 id);
-
-/**
- * @brief
- * Adds an Alien event to the FSM context's external event queue for processing
- *
- * @par Description
- * Adds an event to the contexts external queue
- * This is thread safe and adds an event to the fsm's external event queue.
- *
- * @param[in] context : FSM context
- * @param[in] event : event to add to the event queue
- * @param[in] source : source of the event (this can be a synergy task queue or an fsm instance id)
- * @param[in] destination : destination of the event (This can be a fsm instance id or CSR_WIFI_FSM_ENV)
- * @param[in] id : event id
- */
-#define CsrWifiFsmSendAlienEventExternal(_context, _alienEvent, _source, _destination, _primtype, _id) \
- { \
- CsrWifiFsmAlienEvent *_evt = kmalloc(sizeof(CsrWifiFsmAlienEvent), GFP_KERNEL); \
- _evt->alienEvent = _alienEvent; \
- CsrWifiFsmSendEventExternal(_context, (CsrWifiFsmEvent *)_evt, _source, _destination, _primtype, _id); \
- }
-
-
-/**
- * @brief
- * Current time of day in ms
- *
- * @param[in] context : FSM context
- *
- * @return
- * u32 32 bit ms tick
- */
-extern u32 CsrWifiFsmGetTimeOfDayMs(CsrWifiFsmContext *context);
-
-/**
- * @brief
- * Gets the time until the next FSM timer expiry
- *
- * @par Description
- * Returns the next timeout time or 0 if no timers are set.
- *
- * @param[in] context : FSM context
- *
- * @return
- * u32 Time in ms until next timeout or 0xFFFFFFFF for no timer set
- */
-extern u32 CsrWifiFsmGetNextTimeout(CsrWifiFsmContext *context);
-
-/**
- * @brief
- * Fast forwards the fsm timers by ms Milliseconds
- *
- * @param[in] context : FSM context
- * @param[in] ms : Milliseconds to fast forward by
- *
- * @return
- * void
- */
-extern void CsrWifiFsmFastForward(CsrWifiFsmContext *context, u16 ms);
-
-/**
- * @brief
- * shift the current time of day by ms amount
- *
- * @par Description
- * useful to speed up tests where time needs to pass
- *
- * @param[in] context : FSM context
- * @param[in] ms : ms to adjust time by
- *
- * @return
- * void
- */
-extern void CsrWifiFsmTestAdvanceTime(CsrWifiFsmContext *context, u32 ms);
-
-/**
- * @brief
- * Check if the fsm has events to process
- *
- * @param[in] context : FSM context
- *
- * @return
- * u8 returns TRUE if there are events for the FSM to process
- */
-extern u8 CsrWifiFsmHasEvents(CsrWifiFsmContext *context);
-
-/**
- * @brief
- * function that installs the contexts wakeup function
- *
- * @param[in] context : FSM context
- * @param[in] callback : Callback function pointer
- *
- * @return
- * void
- */
-extern void CsrWifiFsmInstallWakeupCallback(CsrWifiFsmContext *context, CsrWifiFsmExternalWakupCallbackPtr callback);
-
-#endif /* CSR_WIFI_FSM_H */
-
diff --git a/drivers/staging/csr/csr_wifi_fsm_event.h b/drivers/staging/csr/csr_wifi_fsm_event.h
deleted file mode 100644
index 0690ca9..0000000
--- a/drivers/staging/csr/csr_wifi_fsm_event.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_FSM_EVENT_H
-#define CSR_WIFI_FSM_EVENT_H
-
-#include "csr_prim_defs.h"
-#include "csr_sched.h"
-
-/**
- * @brief
- * FSM event header.
- *
- * @par Description
- * All events MUST have this struct as the FIRST member.
- * The next member is used internally for linked lists
- */
-typedef struct CsrWifiFsmEvent
-{
- CsrPrim type;
- u16 primtype;
- CsrSchedQid destination;
- CsrSchedQid source;
-
- /* Private pointer to allow an optimal Event list */
- /* NOTE: Ignore this pointer.
- * Do not waste code initializing OR freeing it.
- * The pointer is used internally in the CsrWifiFsm code
- * to avoid a second malloc when queuing events.
- */
- struct CsrWifiFsmEvent *next;
-} CsrWifiFsmEvent;
-
-#endif /* CSR_WIFI_FSM_EVENT_H */
-
diff --git a/drivers/staging/csr/csr_wifi_fsm_types.h b/drivers/staging/csr/csr_wifi_fsm_types.h
deleted file mode 100644
index d21c60a..0000000
--- a/drivers/staging/csr/csr_wifi_fsm_types.h
+++ /dev/null
@@ -1,430 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_FSM_TYPES_H
-#define CSR_WIFI_FSM_TYPES_H
-
-#include <linux/types.h>
-#include "csr_macro.h"
-#include "csr_sched.h"
-
-#ifdef CSR_WIFI_FSM_MUTEX_ENABLE
-#include "csr_framework_ext.h"
-#endif
-
-#include "csr_wifi_fsm.h"
-
-#define CSR_WIFI_FSM_MAX_TRANSITION_HISTORY 10
-
-/**
- * @brief
- * FSM event list header.
- *
- * @par Description
- * Singly linked list of events.
- */
-typedef struct CsrWifiFsmEventList
-{
- CsrWifiFsmEvent *first;
- CsrWifiFsmEvent *last;
-} CsrWifiFsmEventList;
-
-
-/**
- * @brief
- * FSM timer id.
- *
- * @par Description
- * Composite Id made up of the type, dest and a unique id so
- * CsrWifiFsmRemoveTimer knows where to look when removing the timer
- */
-typedef struct CsrWifiFsmTimerId
-{
- CsrPrim type;
- u16 primtype;
- CsrSchedQid destination;
- u16 uniqueid;
-} CsrWifiFsmTimerId;
-
-/**
- * @brief
- * FSM timer header.
- *
- * @par Description
- * All timer MUST have this struct as the FIRST member.
- * The first members of the structure MUST remain compatable
- * with the CsrWifiFsmEvent so that timers are just specialised events
- */
-typedef struct CsrWifiFsmTimer
-{
- CsrPrim type;
- u16 primtype;
- CsrSchedQid destination;
- CsrSchedQid source;
-
- /* Private pointer to allow an optimal Event list */
- struct CsrWifiFsmTimer *next;
-
- CsrWifiFsmTimerId timerid;
- u32 timeoutTimeMs;
-} CsrWifiFsmTimer;
-
-
-/**
- * @brief
- * Fsm Alien Event
- *
- * @par Description
- * Allows the wrapping of alien events that do not use CsrWifiFsmEvent
- * as the first member of the Event struct
- */
-typedef struct
-{
- CsrWifiFsmEvent event;
- void *alienEvent;
-} CsrWifiFsmAlienEvent;
-
-
-/**
- * @brief
- * FSM timer list header.
- *
- * @par Description
- * Singly linked list of timers.
- */
-typedef struct CsrWifiFsmTimerList
-{
- CsrWifiFsmTimer *first;
- CsrWifiFsmTimer *last;
- u16 nexttimerid;
-} CsrWifiFsmTimerList;
-
-/**
- * @brief
- * Process Entry Function Pointer
- *
- * @par Description
- * Defines the entry function for a processes.
- * Called at process initialisation.
- *
- * @param[in] context : FSM context
- *
- * @return
- * void
- */
-typedef void (*CsrWifiFsmProcEntryFnPtr)(CsrWifiFsmContext *context);
-
-/**
- * @brief
- * Process Transition Function Pointer
- *
- * @par Description
- * Defines a transition function for a processes.
- * Called when an event causes a transition on a process
- *
- * @param[in] CsrWifiFsmContext* : FSM context
- * @param[in] void* : FSM data (can be NULL)
- * @param[in] const CsrWifiFsmEvent* : event to process
- *
- * @return
- * void
- */
-typedef void (*CsrWifiFsmTransitionFnPtr)(CsrWifiFsmContext *context, void *fsmData, const CsrWifiFsmEvent *event);
-
-/**
- * @brief
- * Process reset/shutdown Function Pointer
- *
- * @par Description
- * Defines the reset/shutdown function for a processes.
- * Called to reset or shutdown an fsm.
- *
- * @param[in] context : FSM context
- *
- * @return
- * void
- */
-typedef void (*CsrWifiFsmProcResetFnPtr)(CsrWifiFsmContext *context);
-
-/**
- * @brief
- * FSM Default Destination CallbackFunction Pointer
- *
- * @par Description
- * Defines the default destination function for the FSM
- * to call when an event does not have a valid destination.
- * This
- *
- * @param[in] context : External context
- *
- * @return
- * u16 a valid destination OR CSR_WIFI_FSM_ENV
- */
-typedef u16 (*CsrWifiFsmDestLookupCallbackPtr)(void *context, const CsrWifiFsmEvent *event);
-
-
-#ifdef CSR_WIFI_FSM_DUMP_ENABLE
-/**
- * @brief
- * Trace Dump Function Pointer
- *
- * @par Description
- * Called when we want to trace the FSM
- *
- * @param[in] context : FSM context
- * @param[in] id : fsm id
- *
- * @return
- * void
- */
-typedef void (*CsrWifiFsmDumpFnPtr)(CsrWifiFsmContext *context, void *fsmData);
-#endif
-
-/**
- * @brief
- * Event ID to transition function entry
- *
- * @par Description
- * Event ID to Transition Entry in a state table.
- */
-typedef struct
-{
- u32 eventid;
- CsrWifiFsmTransitionFnPtr transition;
-#ifdef CSR_LOG_ENABLE
- const char *transitionName;
-#endif
-} CsrWifiFsmEventEntry;
-
-/**
- * @brief
- * Single State's Transition Table
- *
- * @par Description
- * Stores Data for a single State's event to
- * transition functions mapping
- */
-typedef struct
-{
- const u8 numEntries;
- const u8 saveAll;
- const CsrWifiFsmEventEntry *eventEntryArray; /* array of transition function pointers for state */
-#ifdef CSR_LOG_ENABLE
- u16 stateNumber;
- const char *stateName;
-#endif
-} CsrWifiFsmTableEntry;
-
-/**
- * @brief
- * Process State Transtion table
- *
- * @par Description
- * Stores Data for a processes State to transition table
- */
-typedef struct
-{
- u16 numStates; /* number of states */
- const CsrWifiFsmTableEntry *aStateEventMatrix; /* state event matrix */
-} CsrWifiFsmTransitionFunctionTable;
-
-/**
- * @brief
- * Const Process definition
- *
- * @par Description
- * Constant process specification.
- * This is ALL the non dynamic data that defines
- * a process.
- */
-typedef struct
-{
- const char *processName;
- const u32 processId;
- const CsrWifiFsmTransitionFunctionTable transitionTable;
- const CsrWifiFsmTableEntry unhandledTransitions;
- const CsrWifiFsmTableEntry ignoreFunctions;
- const CsrWifiFsmProcEntryFnPtr entryFn;
- const CsrWifiFsmProcResetFnPtr resetFn;
-#ifdef CSR_WIFI_FSM_DUMP_ENABLE
- const CsrWifiFsmDumpFnPtr dumpFn; /* Called to dump fsm specific trace if not NULL */
-#endif
-} CsrWifiFsmProcessStateMachine;
-
-#ifdef CSR_WIFI_FSM_DUMP_ENABLE
-/**
- * @brief
- * Storage for state transition info
- */
-typedef struct
-{
- u16 transitionNumber;
- CsrWifiFsmEvent event;
- u16 fromState;
- u16 toState;
- CsrWifiFsmTransitionFnPtr transitionFn;
- u16 transitionCount; /* number consecutive of times this transition was seen */
-#ifdef CSR_LOG_ENABLE
- const char *transitionName;
-#endif
-} CsrWifiFsmTransitionRecord;
-
-/**
- * @brief
- * Storage for the last state X transitions
- */
-typedef struct
-{
- u16 numTransitions;
- CsrWifiFsmTransitionRecord records[CSR_WIFI_FSM_MAX_TRANSITION_HISTORY];
-} CsrWifiFsmTransitionRecords;
-#endif
-
-/**
- * @brief
- * Dynamic Process data
- *
- * @par Description
- * Dynamic process data that is used to keep track of the
- * state and data for a process instance
- */
-typedef struct
-{
- const CsrWifiFsmProcessStateMachine *fsmInfo; /* state machine info that is constant regardless of context */
- u16 instanceId; /* Runtime process id */
- u16 state; /* Current state */
- void *params; /* Instance user data */
- CsrWifiFsmEventList savedEventQueue; /* The saved event queue */
- struct CsrWifiFsmInstanceEntry *subFsm; /* Sub Fsm instance data */
- struct CsrWifiFsmInstanceEntry *subFsmCaller; /* The Fsm instance that created the SubFsm and should be used for callbacks*/
-#ifdef CSR_WIFI_FSM_DUMP_ENABLE
- CsrWifiFsmTransitionRecords transitionRecords; /* Last X transitions in the FSM */
-#endif
-} CsrWifiFsmInstanceEntry;
-
-/**
- * @brief
- * OnCreate Callback Function Pointer
- *
- * @par Description
- * Called when an fsm is created.
- *
- * @param[in] extContext : External context
- * @param[in] instance : FSM instance
- *
- * @return
- * void
- */
-typedef void (*CsrWifiFsmOnCreateFnPtr)(void *extContext, const CsrWifiFsmInstanceEntry *instance);
-
-/**
- * @brief
- * OnTransition Callback Function Pointer
- *
- * @par Description
- * Called when an event is processed by a fsm
- *
- * @param[in] extContext : External context
- * @param[in] eventEntryArray : Entry data
- * @param[in] event : Event
- *
- * @return
- * void
- */
-typedef void (*CsrWifiFsmOnTransitionFnPtr)(void *extContext, const CsrWifiFsmEventEntry *eventEntryArray, const CsrWifiFsmEvent *event);
-
-/**
- * @brief
- * OnStateChange Callback Function Pointer
- *
- * @par Description
- * Called when CsrWifiFsmNextState is called
- *
- * @param[in] extContext : External context
- *
- * @return
- * void
- */
-typedef void (*CsrWifiFsmOnStateChangeFnPtr)(void *extContext, u16 nextstate);
-
-/**
- * @brief
- * OnIgnore,OnError or OnInvalid Callback Function Pointer
- *
- * @par Description
- * Called when an event is processed by a fsm
- *
- * @param[in] extContext : External context
- * @param[in] event : Event
- *
- * @return
- * void
- */
-typedef void (*CsrWifiFsmOnEventFnPtr)(void *extContext, const CsrWifiFsmEvent *event);
-
-/**
- * @brief
- * Toplevel FSM context data
- *
- * @par Description
- * Holds ALL FSM static and dynamic data for a FSM
- */
-struct CsrWifiFsmContext
-{
- CsrWifiFsmEventList eventQueue; /* The internal event queue */
- CsrWifiFsmEventList externalEventQueue; /* The external event queue */
-#ifdef CSR_WIFI_FSM_MUTEX_ENABLE
- CsrMutexHandle externalEventQueueLock; /* The external event queue mutex */
-#endif
- u32 timeOffset; /* Amount to adjust the TimeOfDayMs by */
- CsrWifiFsmTimerList timerQueue; /* The internal timer queue */
- u8 useTempSaveList; /* Should the temp save list be used */
- CsrWifiFsmEventList tempSaveList; /* The temp save event queue */
- CsrWifiFsmEvent *eventForwardedOrSaved; /* The event that was forwarded or Saved */
- u16 maxProcesses; /* Size of instanceArray */
- u16 numProcesses; /* Current number allocated in instanceArray */
- CsrWifiFsmInstanceEntry *instanceArray; /* Array of processes for this component */
- CsrWifiFsmInstanceEntry *ownerInstance; /* The Process that owns currentInstance (SubFsm support) */
- CsrWifiFsmInstanceEntry *currentInstance; /* Current Process that is executing */
- CsrWifiFsmExternalWakupCallbackPtr externalEventFn; /* External event Callback */
- CsrWifiFsmOnEventFnPtr appIgnoreCallback; /* Application Ignore event Callback */
- CsrWifiFsmDestLookupCallbackPtr appEvtDstCallback; /* Application Lookup event Destination Function*/
-
- void *applicationContext; /* Internal fsm application context */
- void *externalContext; /* External context (set by the user of the fsm)*/
- CsrLogTextTaskId loggingTaskId; /* Task Id to use in any logging output */
-
-#ifndef CSR_WIFI_FSM_SCHEDULER_DISABLED
- CsrSchedTid schedTimerId; /* Scheduler TimerId for use in Scheduler Tasks */
- u32 schedTimerNexttimeoutMs; /* Next timeout time for the current timer */
-#endif
-
-#ifdef CSR_WIFI_FSM_MUTEX_ENABLE
-#ifdef CSR_WIFI_FSM_TRANSITION_LOCK
- CsrMutexHandle transitionLock; /* Lock when calling transition functions */
-#endif
-#endif
-
-#ifdef CSR_LOG_ENABLE
- CsrWifiFsmOnCreateFnPtr onCreate; /* Debug Transition Callback */
- CsrWifiFsmOnTransitionFnPtr onTransition; /* Debug Transition Callback */
- CsrWifiFsmOnTransitionFnPtr onUnhandedCallback; /* Unhanded event Callback */
- CsrWifiFsmOnStateChangeFnPtr onStateChange; /* Debug State Change Callback */
- CsrWifiFsmOnEventFnPtr onIgnoreCallback; /* Ignore event Callback */
- CsrWifiFsmOnEventFnPtr onSaveCallback; /* Save event Callback */
- CsrWifiFsmOnEventFnPtr onErrorCallback; /* Error event Callback */
- CsrWifiFsmOnEventFnPtr onInvalidCallback; /* Invalid event Callback */
-#endif
-#ifdef CSR_WIFI_FSM_DUMP_ENABLE
- u16 masterTransitionNumber; /* Increments on every transition */
-#endif
-};
-
-#endif /* CSR_WIFI_FSM_TYPES_H */
diff --git a/drivers/staging/csr/csr_wifi_hip_card.h b/drivers/staging/csr/csr_wifi_hip_card.h
deleted file mode 100644
index bd47f60..0000000
--- a/drivers/staging/csr/csr_wifi_hip_card.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- ******************************************************************************
- * FILE : csr_wifi_hip_card.h
- *
- * PURPOSE : Defines abstract interface for hardware specific functions.
- * Note, this is a different file from one of the same name in the
- * Windows driver.
- *
- *****************************************************************************
- */
-#ifndef __CARD_H__
-#define __CARD_H__
-
-#include "csr_wifi_hip_card_sdio.h"
-#include "csr_wifi_hip_signals.h"
-#include "csr_wifi_hip_unifi_udi.h"
-
-
-/*****************************************************************************
- * CardEnableInt -
- */
-CsrResult CardEnableInt(card_t *card);
-
-/*****************************************************************************
- * CardGenInt -
- */
-CsrResult CardGenInt(card_t *card);
-
-/*****************************************************************************
- * CardPendingInt -
- */
-CsrResult CardPendingInt(card_t *card, u8 *pintr);
-
-/*****************************************************************************
- * CardDisableInt -
- */
-CsrResult CardDisableInt(card_t *card);
-
-/*****************************************************************************
- * CardClearInt -
- */
-CsrResult CardClearInt(card_t *card);
-
-/*****************************************************************************
- * CardDisable -
- */
-void CardDisable(card_t *card);
-
-/*****************************************************************************
- * CardIntEnabled -
- */
-CsrResult CardIntEnabled(card_t *card, u8 *enabled);
-
-/*****************************************************************************
- * CardGetDataSlotSize
- */
-u16 CardGetDataSlotSize(card_t *card);
-
-/*****************************************************************************
- * CardWriteBulkData -
- */
-CsrResult CardWriteBulkData(card_t *card, card_signal_t *csptr, unifi_TrafficQueue queue);
-
-
-/*****************************************************************************
- * CardClearFromHostDataSlot -
- */
-void CardClearFromHostDataSlot(card_t *card, const s16 aSlotNum);
-
-#ifdef CSR_WIFI_REQUEUE_PACKET_TO_HAL
-/*****************************************************************************
- * CardClearFromHostDataSlotWithoutFreeingBulkData - Clear the data stot
- * without freeing the bulk data
- */
-
-void CardClearFromHostDataSlotWithoutFreeingBulkData(card_t *card, const s16 aSlotNum);
-#endif
-
-/*****************************************************************************
- * CardGetFreeFromHostDataSlots -
- */
-u16 CardGetFreeFromHostDataSlots(card_t *card);
-
-u16 CardAreAllFromHostDataSlotsEmpty(card_t *card);
-
-CsrResult card_start_processor(card_t *card, enum unifi_dbg_processors_select which);
-
-CsrResult card_wait_for_firmware_to_start(card_t *card, u32 *paddr);
-
-CsrResult unifi_dl_firmware(card_t *card, void *arg);
-CsrResult unifi_dl_patch(card_t *card, void *arg, u32 boot_ctrl);
-CsrResult unifi_do_loader_op(card_t *card, u32 op_addr, u8 opcode);
-void* unifi_dl_fw_read_start(card_t *card, s8 is_fw);
-
-CsrResult unifi_coredump_handle_request(card_t *card);
-
-CsrResult ConvertCsrSdioToCsrHipResult(card_t *card, CsrResult csrResult);
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-void unifi_debug_log_to_buf(const char *fmt, ...);
-void unifi_debug_string_to_buf(const char *str);
-void unifi_debug_hex_to_buf(const char *buff, u16 length);
-#endif
-
-#endif /* __CARD_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_card_sdio.c b/drivers/staging/csr/csr_wifi_hip_card_sdio.c
deleted file mode 100644
index d542532..0000000
--- a/drivers/staging/csr/csr_wifi_hip_card_sdio.c
+++ /dev/null
@@ -1,4001 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_card_sdio.c
- *
- * PURPOSE: Implementation of the Card API for SDIO.
- *
- * NOTES:
- * CardInit() is called from the SDIO probe callback when a card is
- * inserted. This performs the basic SDIO initialisation, enabling i/o
- * etc.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/slab.h>
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-#include "csr_wifi_hip_unifiversion.h"
-#include "csr_wifi_hip_card.h"
-#include "csr_wifi_hip_card_sdio.h"
-#include "csr_wifi_hip_chiphelper.h"
-
-
-/* Time to wait between attempts to read MAILBOX0 */
-#define MAILBOX1_TIMEOUT 10 /* in millisecs */
-#define MAILBOX1_ATTEMPTS 200 /* 2 seconds */
-
-#define MAILBOX2_TIMEOUT 5 /* in millisecs */
-#define MAILBOX2_ATTEMPTS 10 /* 50ms */
-
-#define RESET_SETTLE_DELAY 25 /* in millisecs */
-
-static CsrResult card_init_slots(card_t *card);
-static CsrResult card_hw_init(card_t *card);
-static CsrResult firmware_present_in_flash(card_t *card);
-static void bootstrap_chip_hw(card_t *card);
-static CsrResult unifi_reset_hardware(card_t *card);
-static CsrResult unifi_hip_init(card_t *card);
-static CsrResult card_access_panic(card_t *card);
-static CsrResult unifi_read_chip_version(card_t *card);
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_alloc_card
- *
- * Allocate and initialise the card context structure.
- *
- * Arguments:
- * sdio Pointer to SDIO context pointer to pass to low
- * level i/o functions.
- * ospriv Pointer to O/S private struct to pass when calling
- * callbacks to the higher level system.
- *
- * Returns:
- * Pointer to card struct, which represents the driver context or
- * NULL if the allocation failed.
- * ---------------------------------------------------------------------------
- */
-card_t* unifi_alloc_card(CsrSdioFunction *sdio, void *ospriv)
-{
- card_t *card;
- u32 i;
-
-
- card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL)
- {
- return NULL;
- }
-
- card->sdio_if = sdio;
- card->ospriv = ospriv;
-
- card->unifi_interrupt_seq = 1;
-
- /* Make these invalid. */
- card->proc_select = (u32)(-1);
- card->dmem_page = (u32)(-1);
- card->pmem_page = (u32)(-1);
-
- card->bh_reason_host = 0;
- card->bh_reason_unifi = 0;
-
- for (i = 0; i < sizeof(card->tx_q_paused_flag) / sizeof(card->tx_q_paused_flag[0]); i++)
- {
- card->tx_q_paused_flag[i] = 0;
- }
- card->memory_resources_allocated = 0;
-
- card->low_power_mode = UNIFI_LOW_POWER_DISABLED;
- card->periodic_wake_mode = UNIFI_PERIODIC_WAKE_HOST_DISABLED;
-
- card->host_state = UNIFI_HOST_STATE_AWAKE;
- card->intmode = CSR_WIFI_INTMODE_DEFAULT;
-
- /*
- * Memory resources for buffers are allocated when the chip is initialised
- * because we need configuration information from the firmware.
- */
-
- /*
- * Initialise wait queues and lists
- */
- card->fh_command_queue.q_body = card->fh_command_q_body;
- card->fh_command_queue.q_length = UNIFI_SOFT_COMMAND_Q_LENGTH;
-
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- card->fh_traffic_queue[i].q_body = card->fh_traffic_q_body[i];
- card->fh_traffic_queue[i].q_length = UNIFI_SOFT_TRAFFIC_Q_LENGTH;
- }
-
-
- /* Initialise mini-coredump pointers in case no coredump buffers
- * are requested by the OS layer.
- */
- card->request_coredump_on_reset = 0;
- card->dump_next_write = NULL;
- card->dump_cur_read = NULL;
- card->dump_buf = NULL;
-
-#ifdef UNIFI_DEBUG
- /* Determine offset of LSB in pointer for later alignment sanity check.
- * Synergy integer types have specific widths, which cause compiler
- * warnings when casting pointer types, e.g. on 64-bit systems.
- */
- {
- u32 val = 0x01234567;
-
- if (*((u8 *)&val) == 0x01)
- {
- card->lsb = sizeof(void *) - 1; /* BE */
- }
- else
- {
- card->lsb = 0; /* LE */
- }
- }
-#endif
- return card;
-} /* unifi_alloc_card() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_init_card
- *
- * Reset the hardware and perform HIP initialization
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CsrResult code
- * CSR_RESULT_SUCCESS if successful
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_init_card(card_t *card, s32 led_mask)
-{
- CsrResult r;
-
-
- if (card == NULL)
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- r = unifi_init(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- r = unifi_hip_init(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to start host protocol.\n");
- return r;
- }
-
- return CSR_RESULT_SUCCESS;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_init
- *
- * Init the hardware.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CsrResult code
- * CSR_RESULT_SUCCESS if successful
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_init(card_t *card)
-{
- CsrResult r;
- CsrResult csrResult;
-
- if (card == NULL)
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /*
- * Disable the SDIO interrupts while initialising UniFi.
- * Re-enable them when f/w is running.
- */
- csrResult = CsrSdioInterruptDisable(card->sdio_if);
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
-
- /*
- * UniFi's PLL may start with a slow clock (~ 1 MHz) so initially
- * set the SDIO bus clock to a similar value or SDIO accesses may
- * fail.
- */
- csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_SAFE_HZ);
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- return r;
- }
- card->sdio_clock_speed = UNIFI_SDIO_CLOCK_SAFE_HZ;
-
- /*
- * Reset UniFi. Note, this only resets the WLAN function part of the chip,
- * the SDIO interface is not reset.
- */
- unifi_trace(card->ospriv, UDBG1, "Resetting UniFi\n");
- r = unifi_reset_hardware(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to reset UniFi\n");
- return r;
- }
-
- /* Reset the power save mode, to be active until the MLME-reset is complete */
- r = unifi_configure_low_power_mode(card,
- UNIFI_LOW_POWER_DISABLED, UNIFI_PERIODIC_WAKE_HOST_DISABLED);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to set power save mode\n");
- return r;
- }
-
- /*
- * Set initial value of page registers.
- * The page registers will be maintained by unifi_read...() and
- * unifi_write...().
- */
- card->proc_select = (u32)(-1);
- card->dmem_page = (u32)(-1);
- card->pmem_page = (u32)(-1);
- r = unifi_write_direct16(card, ChipHelper_HOST_WINDOW3_PAGE(card->helper) * 2, 0);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write SHARED_DMEM_PAGE\n");
- return r;
- }
- r = unifi_write_direct16(card, ChipHelper_HOST_WINDOW2_PAGE(card->helper) * 2, 0);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write PROG_MEM2_PAGE\n");
- return r;
- }
-
- /*
- * If the driver has reset UniFi due to previous SDIO failure, this may
- * have been due to a chip watchdog reset. In this case, the driver may
- * have requested a mini-coredump which needs to be captured now the
- * SDIO interface is alive.
- */
- (void)unifi_coredump_handle_request(card);
-
- /*
- * Probe to see if the UniFi has ROM/flash to boot from. CSR6xxx should do.
- */
- r = firmware_present_in_flash(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r == CSR_WIFI_HIP_RESULT_NOT_FOUND)
- {
- unifi_error(card->ospriv, "No firmware found\n");
- }
- else if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Probe for Flash failed\n");
- }
-
- return r;
-} /* unifi_init() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_download
- *
- * Load the firmware.
- *
- * Arguments:
- * card Pointer to card struct
- * led_mask Loader LED mask
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success
- * CsrResult error code on failure.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_download(card_t *card, s32 led_mask)
-{
- CsrResult r;
- void *dlpriv;
-
- if (card == NULL)
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /* Set the loader led mask */
- card->loader_led_mask = led_mask;
-
- /* Get the firmware file information */
- unifi_trace(card->ospriv, UDBG1, "downloading firmware...\n");
-
- dlpriv = unifi_dl_fw_read_start(card, UNIFI_FW_STA);
- if (dlpriv == NULL)
- {
- return CSR_WIFI_HIP_RESULT_NOT_FOUND;
- }
-
- /* Download the firmware. */
- r = unifi_dl_firmware(card, dlpriv);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to download firmware\n");
- return r;
- }
-
- /* Free the firmware file information. */
- unifi_fw_read_stop(card->ospriv, dlpriv);
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_download() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_hip_init
- *
- * This function performs the f/w initialisation sequence as described
- * in the Unifi Host Interface Protocol Specification.
- * It allocates memory for host-side slot data and signal queues.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success or else a CSR error code
- *
- * Notes:
- * The firmware must have been downloaded.
- * ---------------------------------------------------------------------------
- */
-static CsrResult unifi_hip_init(card_t *card)
-{
- CsrResult r;
- CsrResult csrResult;
-
- r = card_hw_init(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to establish communication with UniFi\n");
- return r;
- }
-#ifdef CSR_PRE_ALLOC_NET_DATA
- /* if there is any preallocated netdata left from the prev session free it now */
- prealloc_netdata_free(card);
-#endif
- /*
- * Allocate memory for host-side slot data and signal queues.
- * We need the config info read from the firmware to know how much
- * memory to allocate.
- */
- r = card_init_slots(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Init slots failed: %d\n", r);
- return r;
- }
-
- unifi_trace(card->ospriv, UDBG2, "Sending first UniFi interrupt\n");
-
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- /* Enable the SDIO interrupts now that the f/w is running. */
- csrResult = CsrSdioInterruptEnable(card->sdio_if);
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
-
- /* Signal the UniFi to start handling messages */
- r = CardGenInt(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_hip_init() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * _build_sdio_config_data
- *
- * Unpack the SDIO configuration information from a buffer read from
- * UniFi into a host structure.
- * The data is byte-swapped for a big-endian host if necessary by the
- * UNPACK... macros.
- *
- * Arguments:
- * card Pointer to card struct
- * cfg_data Destination structure to unpack into.
- * cfg_data_buf Source buffer to read from. This should be the raw
- * data read from UniFi.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void _build_sdio_config_data(sdio_config_data_t *cfg_data,
- const u8 *cfg_data_buf)
-{
- s16 offset = 0;
-
- cfg_data->version = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->sdio_ctrl_offset = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->fromhost_sigbuf_handle = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->tohost_sigbuf_handle = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->num_fromhost_sig_frags = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->num_tohost_sig_frags = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->num_fromhost_data_slots = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->num_tohost_data_slots = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->data_slot_size = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->initialised = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->overlay_size = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT32;
-
- cfg_data->data_slot_round = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->sig_frag_size = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
- offset += SIZEOF_UINT16;
-
- cfg_data->tohost_signal_padding = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cfg_data_buf + offset);
-} /* _build_sdio_config_data() */
-
-
-/*
- * - Function ----------------------------------------------------------------
- * card_hw_init()
- *
- * Perform the initialisation procedure described in the UniFi Host
- * Interface Protocol document (section 3.3.8) and read the run-time
- * configuration information from the UniFi. This is stuff like number
- * of bulk data slots etc.
- *
- * The card enumeration and SD initialisation has already been done by
- * the SDIO library, see card_sdio_init().
- *
- * The initialisation is done when firmware is ready, i.e. this may need
- * to be called after a f/w download operation.
- *
- * The initialisation procedure goes like this:
- * - Wait for UniFi to start-up by polling SHARED_MAILBOX1
- * - Find the symbol table and look up SLT_SDIO_SLOT_CONFIG
- * - Read the config structure
- * - Check the "SDIO initialised" flag, if not zero do a h/w reset and
- * start again
- * - Decide the number of bulk data slots to allocate, allocate them and
- * set "SDIO initialised" flag (and generate an interrupt) to say so.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCEESS on success,
- * a CSR error code on failure
- *
- * Notes:
- * All data in the f/w is stored in a little endian format, without any
- * padding bytes. Every read from this memory has to be transformed in
- * host (cpu specific) format, before it is stored in driver's parameters
- * or/and structures. Athough unifi_card_read16() and unifi_read32() do perform
- * the conversion internally, unifi_readn() does not.
- * ---------------------------------------------------------------------------
- */
-static CsrResult card_hw_init(card_t *card)
-{
- u32 slut_address;
- u16 initialised;
- u16 finger_print;
- symbol_t slut;
- sdio_config_data_t *cfg_data;
- u8 cfg_data_buf[SDIO_CONFIG_DATA_SIZE];
- CsrResult r;
- void *dlpriv;
- s16 major, minor;
- s16 search_4slut_again;
- CsrResult csrResult;
-
- /*
- * The device revision from the TPLMID_MANF and TPLMID_CARD fields
- * of the CIS are available as
- * card->sdio_if->pDevice->ManfID
- * card->sdio_if->pDevice->AppID
- */
-
- /*
- * Run in a loop so we can patch.
- */
- do
- {
- /* Reset these each time around the loop. */
- search_4slut_again = 0;
- cfg_data = NULL;
-
- r = card_wait_for_firmware_to_start(card, &slut_address);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Firmware hasn't started\n");
- return r;
- }
- unifi_trace(card->ospriv, UDBG4, "SLUT addr 0x%lX\n", slut_address);
-
- /*
- * Firmware has started, but doesn't know full clock configuration yet
- * as some of the information may be in the MIB. Therefore we set an
- * initial SDIO clock speed, faster than UNIFI_SDIO_CLOCK_SAFE_HZ, for
- * the patch download and subsequent firmware initialisation, and
- * full speed UNIFI_SDIO_CLOCK_MAX_HZ will be set once the f/w tells us
- * that it is ready.
- */
- csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_INIT_HZ);
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- return r;
- }
- card->sdio_clock_speed = UNIFI_SDIO_CLOCK_INIT_HZ;
-
- /*
- * Check the SLUT fingerprint.
- * The slut_address is a generic pointer so we must use unifi_card_read16().
- */
- unifi_trace(card->ospriv, UDBG4, "Looking for SLUT finger print\n");
- finger_print = 0;
- r = unifi_card_read16(card, slut_address, &finger_print);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read SLUT finger print\n");
- return r;
- }
-
- if (finger_print != SLUT_FINGERPRINT)
- {
- unifi_error(card->ospriv, "Failed to find Symbol lookup table fingerprint\n");
- return CSR_RESULT_FAILURE;
- }
-
- /* Symbol table starts imedately after the fingerprint */
- slut_address += 2;
-
- /* Search the table until either the end marker is found, or the
- * loading of patch firmware invalidates the current table.
- */
- while (!search_4slut_again)
- {
- u16 s;
- u32 l;
-
- r = unifi_card_read16(card, slut_address, &s);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- slut_address += 2;
-
- if (s == CSR_SLT_END)
- {
- unifi_trace(card->ospriv, UDBG3, " found CSR_SLT_END\n");
- break;
- }
-
- r = unifi_read32(card, slut_address, &l);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- slut_address += 4;
-
- slut.id = s;
- slut.obj = l;
-
- unifi_trace(card->ospriv, UDBG3, " found SLUT id %02d.%08lx\n", slut.id, slut.obj);
- switch (slut.id)
- {
- case CSR_SLT_SDIO_SLOT_CONFIG:
- cfg_data = &card->config_data;
- /*
- * unifi_card_readn reads n bytes from the card, where data is stored
- * in a little endian format, without any padding bytes. So, we
- * can not just pass the cfg_data pointer or use the
- * sizeof(sdio_config_data_t) since the structure in the host can
- * be big endian formatted or have padding bytes for alignment.
- * We use a char buffer to read the data from the card.
- */
- r = unifi_card_readn(card, slut.obj, cfg_data_buf, SDIO_CONFIG_DATA_SIZE);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read config data\n");
- return r;
- }
- /* .. and then we copy the data to the host structure */
- _build_sdio_config_data(cfg_data, cfg_data_buf);
-
- /* Make sure the from host data slots are what we expect
- we reserve 2 for commands and there should be at least
- 1 left for each access category */
- if ((cfg_data->num_fromhost_data_slots < UNIFI_RESERVED_COMMAND_SLOTS)
- || (cfg_data->num_fromhost_data_slots - UNIFI_RESERVED_COMMAND_SLOTS) / UNIFI_NO_OF_TX_QS == 0)
- {
- unifi_error(card->ospriv, "From host data slots %d\n", cfg_data->num_fromhost_data_slots);
- unifi_error(card->ospriv, "need to be (queues * x + 2) (UNIFI_RESERVED_COMMAND_SLOTS for commands)\n");
- return CSR_RESULT_FAILURE;
- }
-
- /* Configure SDIO to-block-size padding */
- if (card->sdio_io_block_pad)
- {
- /*
- * Firmware limits the maximum padding size via data_slot_round.
- * Therefore when padding to whole block sizes, the block size
- * must be configured correctly by adjusting CSR_WIFI_HIP_SDIO_BLOCK_SIZE.
- */
- if (cfg_data->data_slot_round < card->sdio_io_block_size)
- {
- unifi_error(card->ospriv,
- "Configuration error: Block size of %d exceeds f/w data_slot_round of %d\n",
- card->sdio_io_block_size, cfg_data->data_slot_round);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /*
- * To force the To-Host signals to be rounded up to the SDIO block
- * size, we need to write the To-Host Signal Padding Fragments
- * field of the SDIO configuration in UniFi.
- */
- if ((card->sdio_io_block_size % cfg_data->sig_frag_size) != 0)
- {
- unifi_error(card->ospriv, "Configuration error: Can not pad to-host signals.\n");
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- cfg_data->tohost_signal_padding = (u16) (card->sdio_io_block_size / cfg_data->sig_frag_size);
- unifi_info(card->ospriv, "SDIO block size %d requires %d padding chunks\n",
- card->sdio_io_block_size, cfg_data->tohost_signal_padding);
- r = unifi_card_write16(card, slut.obj + SDIO_TO_HOST_SIG_PADDING_OFFSET, cfg_data->tohost_signal_padding);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write To-Host Signal Padding Fragments\n");
- return r;
- }
- }
-
- /* Reconstruct the Generic Pointer address of the
- * SDIO Control Data Struct.
- */
- card->sdio_ctrl_addr = cfg_data->sdio_ctrl_offset | (UNIFI_SH_DMEM << 24);
- card->init_flag_addr = slut.obj + SDIO_INIT_FLAG_OFFSET;
- break;
-
- case CSR_SLT_BUILD_ID_NUMBER:
- {
- u32 n;
- r = unifi_read32(card, slut.obj, &n);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read build id\n");
- return r;
- }
- card->build_id = n;
- }
- break;
-
- case CSR_SLT_BUILD_ID_STRING:
- r = unifi_readnz(card, slut.obj, card->build_id_string,
- sizeof(card->build_id_string));
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read build string\n");
- return r;
- }
- break;
-
- case CSR_SLT_PERSISTENT_STORE_DB:
- break;
-
- case CSR_SLT_BOOT_LOADER_CONTROL:
-
- /* This command copies most of the station firmware
- * image from ROM into program RAM. It also clears
- * out the zerod data and sets up the initialised
- * data. */
- r = unifi_do_loader_op(card, slut.obj + 6, UNIFI_BOOT_LOADER_LOAD_STA);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write loader load image command\n");
- return r;
- }
-
- dlpriv = unifi_dl_fw_read_start(card, UNIFI_FW_STA);
-
- /* dlpriv might be NULL, we still need to do the do_loader_op step. */
- if (dlpriv != NULL)
- {
- /* Download the firmware. */
- r = unifi_dl_patch(card, dlpriv, slut.obj);
-
- /* Free the firmware file information. */
- unifi_fw_read_stop(card->ospriv, dlpriv);
-
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to patch firmware\n");
- return r;
- }
- }
-
- /* This command starts the firmware image that we want (the
- * station by default) with any patches required applied. */
- r = unifi_do_loader_op(card, slut.obj + 6, UNIFI_BOOT_LOADER_RESTART);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write loader restart command\n");
- return r;
- }
-
- /* The now running patch f/w defines a new SLUT data structure -
- * the current one is no longer valid. We must drop out of the
- * processing loop and enumerate the new SLUT (which may appear
- * at a different offset).
- */
- search_4slut_again = 1;
- break;
-
- case CSR_SLT_PANIC_DATA_PHY:
- card->panic_data_phy_addr = slut.obj;
- break;
-
- case CSR_SLT_PANIC_DATA_MAC:
- card->panic_data_mac_addr = slut.obj;
- break;
-
- default:
- /* do nothing */
- break;
- }
- } /* while */
- } while (search_4slut_again);
-
- /* Did we find the Config Data ? */
- if (cfg_data == NULL)
- {
- unifi_error(card->ospriv, "Failed to find SDIO_SLOT_CONFIG Symbol\n");
- return CSR_RESULT_FAILURE;
- }
-
- /*
- * Has ths card already been initialised?
- * If so, return an error so we do a h/w reset and start again.
- */
- r = unifi_card_read16(card, card->init_flag_addr, &initialised);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read init flag at %08lx\n",
- card->init_flag_addr);
- return r;
- }
- if (initialised != 0)
- {
- return CSR_RESULT_FAILURE;
- }
-
-
- /*
- * Now check the UniFi firmware version
- */
- major = (cfg_data->version >> 8) & 0xFF;
- minor = cfg_data->version & 0xFF;
- unifi_info(card->ospriv, "UniFi f/w protocol version %d.%d (driver %d.%d)\n",
- major, minor,
- UNIFI_HIP_MAJOR_VERSION, UNIFI_HIP_MINOR_VERSION);
-
- unifi_info(card->ospriv, "Firmware build %u: %s\n",
- card->build_id, card->build_id_string);
-
- if (major != UNIFI_HIP_MAJOR_VERSION)
- {
- unifi_error(card->ospriv, "UniFi f/w protocol major version (%d) is different from driver (v%d.%d)\n",
- major, UNIFI_HIP_MAJOR_VERSION, UNIFI_HIP_MINOR_VERSION);
-#ifndef CSR_WIFI_DISABLE_HIP_VERSION_CHECK
- return CSR_RESULT_FAILURE;
-#endif
- }
- if (minor < UNIFI_HIP_MINOR_VERSION)
- {
- unifi_error(card->ospriv, "UniFi f/w protocol version (v%d.%d) is older than minimum required by driver (v%d.%d).\n",
- major, minor,
- UNIFI_HIP_MAJOR_VERSION, UNIFI_HIP_MINOR_VERSION);
-#ifndef CSR_WIFI_DISABLE_HIP_VERSION_CHECK
- return CSR_RESULT_FAILURE;
-#endif
- }
-
- /* Read panic codes from a previous firmware panic. If the firmware has
- * not panicked since power was applied (e.g. power-off hard reset)
- * the stored panic codes will not be updated.
- */
- unifi_read_panic(card);
-
- return CSR_RESULT_SUCCESS;
-} /* card_hw_init() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_wait_for_unifi_to_reset
- *
- * Waits for a reset to complete by polling the WLAN function enable
- * bit (which is cleared on reset).
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code on failure.
- * ---------------------------------------------------------------------------
- */
-static CsrResult card_wait_for_unifi_to_reset(card_t *card)
-{
- s16 i;
- CsrResult r;
- u8 io_enable;
- CsrResult csrResult;
-
- r = CSR_RESULT_SUCCESS;
- for (i = 0; i < MAILBOX2_ATTEMPTS; i++)
- {
- unifi_trace(card->ospriv, UDBG1, "waiting for reset to complete, attempt %d\n", i);
- if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
- {
- /* It's quite likely that this read will timeout for the
- * first few tries - especially if we have reset via
- * DBG_RESET.
- */
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("m0@%02X=", SDIO_IO_READY);
-#endif
- csrResult = CsrSdioF0Read8(card->sdio_if, SDIO_IO_READY, &io_enable);
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_debug_log_to_buf("error=%X\n", csrResult);
- }
- else
- {
- unifi_debug_log_to_buf("%X\n", io_enable);
- }
-#endif
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
- r = CSR_RESULT_SUCCESS;
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- }
- }
- else
- {
- r = sdio_read_f0(card, SDIO_IO_ENABLE, &io_enable);
- }
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r == CSR_RESULT_SUCCESS)
- {
- u16 mbox2;
- s16 enabled = io_enable & (1 << card->function);
-
- if (!enabled)
- {
- unifi_trace(card->ospriv, UDBG1,
- "Reset complete (function %d is disabled) in ~ %u msecs\n",
- card->function, i * MAILBOX2_TIMEOUT);
-
- /* Enable WLAN function and verify MAILBOX2 is zero'd */
- csrResult = CsrSdioFunctionEnable(card->sdio_if);
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- unifi_error(card->ospriv, "CsrSdioFunctionEnable failed %d\n", r);
- break;
- }
- }
-
- r = unifi_read_direct16(card, ChipHelper_SDIO_HIP_HANDSHAKE(card->helper) * 2, &mbox2);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "read HIP_HANDSHAKE failed %d\n", r);
- break;
- }
- if (mbox2 != 0)
- {
- unifi_error(card->ospriv, "MAILBOX2 non-zero after reset (mbox2 = %04x)\n", mbox2);
- r = CSR_RESULT_FAILURE;
- }
- break;
- }
- else
- {
- if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
- {
- /* We ignore read failures for the first few reads,
- * they are probably benign. */
- if (i > MAILBOX2_ATTEMPTS / 4)
- {
- unifi_trace(card->ospriv, UDBG1, "Failed to read CCCR IO Ready register while polling for reset\n");
- }
- }
- else
- {
- unifi_trace(card->ospriv, UDBG1, "Failed to read CCCR IO Enable register while polling for reset\n");
- }
- }
- CsrThreadSleep(MAILBOX2_TIMEOUT);
- }
-
- if (r == CSR_RESULT_SUCCESS && i == MAILBOX2_ATTEMPTS)
- {
- unifi_trace(card->ospriv, UDBG1, "Timeout waiting for UniFi to complete reset\n");
- r = CSR_RESULT_FAILURE;
- }
-
- return r;
-} /* card_wait_for_unifi_to_reset() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_wait_for_unifi_to_disable
- *
- * Waits for the function to become disabled by polling the
- * IO_READY bit.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code on failure.
- *
- * Notes: This function can only be used with
- * card->chip_id > SDIO_CARD_ID_UNIFI_2
- * ---------------------------------------------------------------------------
- */
-static CsrResult card_wait_for_unifi_to_disable(card_t *card)
-{
- s16 i;
- CsrResult r;
- u8 io_enable;
- CsrResult csrResult;
-
- if (card->chip_id <= SDIO_CARD_ID_UNIFI_2)
- {
- unifi_error(card->ospriv,
- "Function reset method not supported for chip_id=%d\n",
- card->chip_id);
- return CSR_RESULT_FAILURE;
- }
-
- r = CSR_RESULT_SUCCESS;
- for (i = 0; i < MAILBOX2_ATTEMPTS; i++)
- {
- unifi_trace(card->ospriv, UDBG1, "waiting for disable to complete, attempt %d\n", i);
-
- /*
- * It's quite likely that this read will timeout for the
- * first few tries - especially if we have reset via
- * DBG_RESET.
- */
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("r0@%02X=", SDIO_IO_READY);
-#endif
- csrResult = CsrSdioF0Read8(card->sdio_if, SDIO_IO_READY, &io_enable);
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_debug_log_to_buf("error=%X\n", csrResult);
- }
- else
- {
- unifi_debug_log_to_buf("%X\n", io_enable);
- }
-#endif
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
- if (csrResult == CSR_RESULT_SUCCESS)
- {
- s16 enabled = io_enable & (1 << card->function);
- r = CSR_RESULT_SUCCESS;
- if (!enabled)
- {
- unifi_trace(card->ospriv, UDBG1,
- "Disable complete (function %d is disabled) in ~ %u msecs\n",
- card->function, i * MAILBOX2_TIMEOUT);
-
- break;
- }
- }
- else
- {
- /*
- * We ignore read failures for the first few reads,
- * they are probably benign.
- */
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- if (i > (MAILBOX2_ATTEMPTS / 4))
- {
- unifi_trace(card->ospriv, UDBG1,
- "Failed to read CCCR IO Ready register while polling for disable\n");
- }
- }
- CsrThreadSleep(MAILBOX2_TIMEOUT);
- }
-
- if ((r == CSR_RESULT_SUCCESS) && (i == MAILBOX2_ATTEMPTS))
- {
- unifi_trace(card->ospriv, UDBG1, "Timeout waiting for UniFi to complete disable\n");
- r = CSR_RESULT_FAILURE;
- }
-
- return r;
-} /* card_wait_for_unifi_to_reset() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_wait_for_firmware_to_start
- *
- * Polls the MAILBOX1 register for a non-zero value.
- * Then reads MAILBOX0 and forms the two values into a 32-bit address
- * which is returned to the caller.
- *
- * Arguments:
- * card Pointer to card struct
- * paddr Pointer to receive the UniFi address formed
- * by concatenating MAILBOX1 and MAILBOX0.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code on failure.
- * ---------------------------------------------------------------------------
- */
-CsrResult card_wait_for_firmware_to_start(card_t *card, u32 *paddr)
-{
- s32 i;
- u16 mbox0, mbox1;
- CsrResult r;
-
- /*
- * Wait for UniFi to initialise its data structures by polling
- * the SHARED_MAILBOX1 register.
- * Experience shows this is typically 120ms.
- */
- CsrThreadSleep(MAILBOX1_TIMEOUT);
-
- mbox1 = 0;
- unifi_trace(card->ospriv, UDBG1, "waiting for MAILBOX1 to be non-zero...\n");
- for (i = 0; i < MAILBOX1_ATTEMPTS; i++)
- {
- r = unifi_read_direct16(card, ChipHelper_MAILBOX1(card->helper) * 2, &mbox1);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- /* These reads can fail if UniFi isn't up yet, so try again */
- unifi_warning(card->ospriv, "Failed to read UniFi Mailbox1 register\n");
- }
-
- if ((r == CSR_RESULT_SUCCESS) && (mbox1 != 0))
- {
- unifi_trace(card->ospriv, UDBG1, "MAILBOX1 ready (0x%04X) in %u millisecs\n",
- mbox1, i * MAILBOX1_TIMEOUT);
-
- /* Read the MAILBOX1 again in case we caught the value as it
- * changed. */
- r = unifi_read_direct16(card, ChipHelper_MAILBOX1(card->helper) * 2, &mbox1);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read UniFi Mailbox1 register for second time\n");
- return r;
- }
- unifi_trace(card->ospriv, UDBG1, "MAILBOX1 value=0x%04X\n", mbox1);
-
- break;
- }
-
- CsrThreadSleep(MAILBOX1_TIMEOUT);
- if ((i % 100) == 99)
- {
- unifi_trace(card->ospriv, UDBG2, "MAILBOX1 not ready (0x%X), still trying...\n", mbox1);
- }
- }
-
- if ((r == CSR_RESULT_SUCCESS) && (mbox1 == 0))
- {
- unifi_trace(card->ospriv, UDBG1, "Timeout waiting for firmware to start, Mailbox1 still 0 after %d ms\n",
- MAILBOX1_ATTEMPTS * MAILBOX1_TIMEOUT);
- return CSR_RESULT_FAILURE;
- }
-
-
- /*
- * Complete the reset handshake by setting MAILBOX2 to 0xFFFF
- */
- r = unifi_write_direct16(card, ChipHelper_SDIO_HIP_HANDSHAKE(card->helper) * 2, 0xFFFF);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write f/w startup handshake to MAILBOX2\n");
- return r;
- }
-
-
- /*
- * Read the Symbol Look Up Table (SLUT) offset.
- * Top 16 bits are in mbox1, read the lower 16 bits from mbox0.
- */
- mbox0 = 0;
- r = unifi_read_direct16(card, ChipHelper_MAILBOX0(card->helper) * 2, &mbox0);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read UniFi Mailbox0 register\n");
- return r;
- }
-
- *paddr = (((u32)mbox1 << 16) | mbox0);
-
- return CSR_RESULT_SUCCESS;
-} /* card_wait_for_firmware_to_start() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_capture_panic
- *
- * Attempt to capture panic codes from the firmware. This may involve
- * warm reset of the chip to regain access following a watchdog reset.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS if panic codes were captured, or none available
- * CSR_RESULT_FAILURE if the driver could not access function 1
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_capture_panic(card_t *card)
-{
-
- /* The firmware must have previously initialised to read the panic addresses
- * from the SLUT
- */
- if (!card->panic_data_phy_addr || !card->panic_data_mac_addr)
- {
- return CSR_RESULT_SUCCESS;
- }
-
- /* Ensure we can access function 1 following a panic/watchdog reset */
- if (card_access_panic(card) == CSR_RESULT_SUCCESS)
- {
- /* Read the panic codes */
- unifi_read_panic(card);
- }
- else
- {
- unifi_info(card->ospriv, "Unable to read panic codes");
- }
-
- return CSR_RESULT_SUCCESS;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_access_panic
- * Attempt to read the WLAN SDIO function in order to read panic codes
- * and perform various reset steps to regain access if the read fails.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS if panic codes can be read
- * CSR error code if panic codes can not be read
- * ---------------------------------------------------------------------------
- */
-static CsrResult card_access_panic(card_t *card)
-{
- u16 data_u16 = 0;
- s32 i;
- CsrResult r, sr;
-
- /* A chip version of zero means that the version never got successfully read
- * during reset. In this case give up because it will not be possible to
- * verify the chip version.
- */
- if (!card->chip_version)
- {
- unifi_info(card->ospriv, "Unknown chip version\n");
- return CSR_RESULT_FAILURE;
- }
-
- /* Ensure chip is awake or access to function 1 will fail */
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "unifi_set_host_state() failed %d\n", r);
- return CSR_RESULT_FAILURE; /* Card is probably unpowered */
- }
- CsrThreadSleep(20);
-
- for (i = 0; i < 3; i++)
- {
- sr = CsrSdioRead16(card->sdio_if, CHIP_HELPER_UNIFI_GBL_CHIP_VERSION * 2, &data_u16);
- if (sr != CSR_RESULT_SUCCESS || data_u16 != card->chip_version)
- {
- unifi_info(card->ospriv, "Failed to read valid chip version sr=%d (0x%04x want 0x%04x) try %d\n",
- sr, data_u16, card->chip_version, i);
-
- /* Set clock speed low */
- sr = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_SAFE_HZ);
- if (sr != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "CsrSdioMaxBusClockFrequencySet() failed1 %d\n", sr);
- r = ConvertCsrSdioToCsrHipResult(card, sr);
- }
- card->sdio_clock_speed = UNIFI_SDIO_CLOCK_SAFE_HZ;
-
- /* First try re-enabling function in case a f/w watchdog reset disabled it */
- if (i == 0)
- {
- unifi_info(card->ospriv, "Try function enable\n");
- sr = CsrSdioFunctionEnable(card->sdio_if);
- if (sr != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, sr);
- unifi_error(card->ospriv, "CsrSdioFunctionEnable failed %d (HIP %d)\n", sr, r);
- }
- continue;
- }
-
- /* Second try, set awake */
- unifi_info(card->ospriv, "Try set awake\n");
-
- /* Ensure chip is awake */
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "unifi_set_host_state() failed2 %d\n", r);
- }
-
- /* Set clock speed low in case setting the host state raised it, which
- * would only happen if host state was previously TORPID
- */
- sr = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_SAFE_HZ);
- if (sr != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "CsrSdioMaxBusClockFrequencySet() failed2 %d\n", sr);
- }
- card->sdio_clock_speed = UNIFI_SDIO_CLOCK_SAFE_HZ;
-
- if (i == 1)
- {
- continue;
- }
-
- /* Perform a s/w reset to preserve as much as the card state as possible,
- * (mainly the preserve RAM). The context will be lost for coredump - but as we
- * were unable to access the WLAN function for panic, the coredump would have
- * also failed without a reset.
- */
- unifi_info(card->ospriv, "Try s/w reset\n");
-
- r = unifi_card_hard_reset(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "unifi_card_hard_reset() failed %d\n", r);
- }
- }
- else
- {
- if (i > 0)
- {
- unifi_info(card->ospriv, "Read chip version 0x%x after %d retries\n", data_u16, i);
- }
- break;
- }
- }
-
- r = ConvertCsrSdioToCsrHipResult(card, sr);
- return r;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read_panic
- * Reads, saves and prints panic codes stored by the firmware in UniFi's
- * preserve RAM by the last panic that occurred since chip was powered.
- * Nothing is saved if the panic codes are read as zero.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * ---------------------------------------------------------------------------
- */
-void unifi_read_panic(card_t *card)
-{
- CsrResult r;
- u16 p_code, p_arg;
-
- /* The firmware must have previously initialised to read the panic addresses
- * from the SLUT
- */
- if (!card->panic_data_phy_addr || !card->panic_data_mac_addr)
- {
- return;
- }
-
- /* Get the panic data from PHY */
- r = unifi_card_read16(card, card->panic_data_phy_addr, &p_code);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "capture_panic: unifi_read16 %08x failed %d\n", card->panic_data_phy_addr, r);
- p_code = 0;
- }
- if (p_code)
- {
- r = unifi_card_read16(card, card->panic_data_phy_addr + 2, &p_arg);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "capture_panic: unifi_read16 %08x failed %d\n", card->panic_data_phy_addr + 2, r);
- }
- unifi_error(card->ospriv, "Last UniFi PHY PANIC %04x arg %04x\n", p_code, p_arg);
- card->last_phy_panic_code = p_code;
- card->last_phy_panic_arg = p_arg;
- }
-
- /* Get the panic data from MAC */
- r = unifi_card_read16(card, card->panic_data_mac_addr, &p_code);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "capture_panic: unifi_read16 %08x failed %d\n", card->panic_data_mac_addr, r);
- p_code = 0;
- }
- if (p_code)
- {
- r = unifi_card_read16(card, card->panic_data_mac_addr + 2, &p_arg);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "capture_panic: unifi_read16 %08x failed %d\n", card->panic_data_mac_addr + 2, r);
- }
- unifi_error(card->ospriv, "Last UniFi MAC PANIC %04x arg %04x\n", p_code, p_arg);
- card->last_mac_panic_code = p_code;
- card->last_mac_panic_arg = p_arg;
- }
-
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_allocate_memory_resources
- *
- * Allocates memory for the from-host, to-host bulk data slots,
- * soft queue buffers and bulk data buffers.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code on failure.
- * ---------------------------------------------------------------------------
- */
-static CsrResult card_allocate_memory_resources(card_t *card)
-{
- s16 n, i, k, r;
- sdio_config_data_t *cfg_data;
-
- /* Reset any state carried forward from a previous life */
- card->fh_command_queue.q_rd_ptr = 0;
- card->fh_command_queue.q_wr_ptr = 0;
- (void)scnprintf(card->fh_command_queue.name, UNIFI_QUEUE_NAME_MAX_LENGTH,
- "fh_cmd_q");
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- card->fh_traffic_queue[i].q_rd_ptr = 0;
- card->fh_traffic_queue[i].q_wr_ptr = 0;
- (void)scnprintf(card->fh_traffic_queue[i].name,
- UNIFI_QUEUE_NAME_MAX_LENGTH, "fh_data_q%d", i);
- }
-#ifndef CSR_WIFI_HIP_TA_DISABLE
- unifi_ta_sampling_init(card);
-#endif
- /* Convenience short-cut */
- cfg_data = &card->config_data;
-
- /*
- * Allocate memory for the from-host and to-host signal buffers.
- */
- card->fh_buffer.buf = kmalloc(UNIFI_FH_BUF_SIZE, GFP_KERNEL);
- if (card->fh_buffer.buf == NULL)
- {
- unifi_error(card->ospriv, "Failed to allocate memory for F-H signals\n");
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
- card->fh_buffer.bufsize = UNIFI_FH_BUF_SIZE;
- card->fh_buffer.ptr = card->fh_buffer.buf;
- card->fh_buffer.count = 0;
-
- card->th_buffer.buf = kmalloc(UNIFI_FH_BUF_SIZE, GFP_KERNEL);
- if (card->th_buffer.buf == NULL)
- {
- unifi_error(card->ospriv, "Failed to allocate memory for T-H signals\n");
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
- card->th_buffer.bufsize = UNIFI_FH_BUF_SIZE;
- card->th_buffer.ptr = card->th_buffer.buf;
- card->th_buffer.count = 0;
-
-
- /*
- * Allocate memory for the from-host and to-host bulk data slots.
- * This is done as separate kmallocs because lots of smaller
- * allocations are more likely to succeed than one huge one.
- */
-
- /* Allocate memory for the array of pointers */
- n = cfg_data->num_fromhost_data_slots;
-
- unifi_trace(card->ospriv, UDBG3, "Alloc from-host resources, %d slots.\n", n);
- card->from_host_data = kmalloc(n * sizeof(slot_desc_t), GFP_KERNEL);
- if (card->from_host_data == NULL)
- {
- unifi_error(card->ospriv, "Failed to allocate memory for F-H bulk data array\n");
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
-
- /* Initialise from-host bulk data slots */
- for (i = 0; i < n; i++)
- {
- UNIFI_INIT_BULK_DATA(&card->from_host_data[i].bd);
- }
-
- /* Allocate memory for the array used for slot host tag mapping */
- card->fh_slot_host_tag_record = kmalloc(n * sizeof(u32), GFP_KERNEL);
-
- if (card->fh_slot_host_tag_record == NULL)
- {
- unifi_error(card->ospriv, "Failed to allocate memory for F-H slot host tag mapping array\n");
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
-
- /* Initialise host tag entries for from-host bulk data slots */
- for (i = 0; i < n; i++)
- {
- card->fh_slot_host_tag_record[i] = CSR_WIFI_HIP_RESERVED_HOST_TAG;
- }
-
-
- /* Allocate memory for the array of pointers */
- n = cfg_data->num_tohost_data_slots;
-
- unifi_trace(card->ospriv, UDBG3, "Alloc to-host resources, %d slots.\n", n);
- card->to_host_data = kmalloc(n * sizeof(bulk_data_desc_t), GFP_KERNEL);
- if (card->to_host_data == NULL)
- {
- unifi_error(card->ospriv, "Failed to allocate memory for T-H bulk data array\n");
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
-
- /* Initialise to-host bulk data slots */
- for (i = 0; i < n; i++)
- {
- UNIFI_INIT_BULK_DATA(&card->to_host_data[i]);
- }
-
- /*
- * Initialise buffers for soft Q
- */
- for (i = 0; i < UNIFI_SOFT_COMMAND_Q_LENGTH; i++)
- {
- for (r = 0; r < UNIFI_MAX_DATA_REFERENCES; r++)
- {
- UNIFI_INIT_BULK_DATA(&card->fh_command_q_body[i].bulkdata[r]);
- }
- }
-
- for (k = 0; k < UNIFI_NO_OF_TX_QS; k++)
- {
- for (i = 0; i < UNIFI_SOFT_TRAFFIC_Q_LENGTH; i++)
- {
- for (r = 0; r < UNIFI_MAX_DATA_REFERENCES; r++)
- {
- UNIFI_INIT_BULK_DATA(&card->fh_traffic_q_body[k][i].bulkdata[r]);
- }
- }
- }
-
- card->memory_resources_allocated = 1;
-
- return CSR_RESULT_SUCCESS;
-} /* card_allocate_memory_resources() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_free_bulk_data
- *
- * Free the data associated to a bulk data structure.
- *
- * Arguments:
- * card Pointer to card struct
- * bulk_data_slot Pointer to bulk data structure
- *
- * Returns:
- * None.
- *
- * ---------------------------------------------------------------------------
- */
-static void unifi_free_bulk_data(card_t *card, bulk_data_desc_t *bulk_data_slot)
-{
- if (bulk_data_slot->data_length != 0)
- {
- unifi_net_data_free(card->ospriv, bulk_data_slot);
- }
-} /* unifi_free_bulk_data() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_free_memory_resources
- *
- * Frees memory allocated for the from-host, to-host bulk data slots,
- * soft queue buffers and bulk data buffers.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void card_free_memory_resources(card_t *card)
-{
-
- unifi_trace(card->ospriv, UDBG1, "Freeing card memory resources.\n");
-
- /* Clear our internal queues */
- unifi_cancel_pending_signals(card);
-
-
- kfree(card->to_host_data);
- card->to_host_data = NULL;
-
- kfree(card->from_host_data);
- card->from_host_data = NULL;
-
- /* free the memory for slot host tag mapping array */
- kfree(card->fh_slot_host_tag_record);
- card->fh_slot_host_tag_record = NULL;
-
- kfree(card->fh_buffer.buf);
- card->fh_buffer.ptr = card->fh_buffer.buf = NULL;
- card->fh_buffer.bufsize = 0;
- card->fh_buffer.count = 0;
-
- kfree(card->th_buffer.buf);
- card->th_buffer.ptr = card->th_buffer.buf = NULL;
- card->th_buffer.bufsize = 0;
- card->th_buffer.count = 0;
-
-
- card->memory_resources_allocated = 0;
-
-} /* card_free_memory_resources() */
-
-
-static void card_init_soft_queues(card_t *card)
-{
- s16 i;
-
- unifi_trace(card->ospriv, UDBG1, "Initialising internal signal queues.\n");
- /* Reset any state carried forward from a previous life */
- card->fh_command_queue.q_rd_ptr = 0;
- card->fh_command_queue.q_wr_ptr = 0;
- (void)scnprintf(card->fh_command_queue.name, UNIFI_QUEUE_NAME_MAX_LENGTH,
- "fh_cmd_q");
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- card->fh_traffic_queue[i].q_rd_ptr = 0;
- card->fh_traffic_queue[i].q_wr_ptr = 0;
- (void)scnprintf(card->fh_traffic_queue[i].name,
- UNIFI_QUEUE_NAME_MAX_LENGTH, "fh_data_q%d", i);
- }
-#ifndef CSR_WIFI_HIP_TA_DISABLE
- unifi_ta_sampling_init(card);
-#endif
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_cancel_pending_signals
- *
- * Free the signals and associated bulk data, pending in the core.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void unifi_cancel_pending_signals(card_t *card)
-{
- s16 i, n, r;
-
- unifi_trace(card->ospriv, UDBG1, "Canceling pending signals.\n");
-
- if (card->to_host_data)
- {
- /*
- * Free any bulk data buffers allocated for the t-h slots
- * This will clear all buffers that did not make it to
- * unifi_receive_event() before cancel was request.
- */
- n = card->config_data.num_tohost_data_slots;
- unifi_trace(card->ospriv, UDBG3, "Freeing to-host resources, %d slots.\n", n);
- for (i = 0; i < n; i++)
- {
- unifi_free_bulk_data(card, &card->to_host_data[i]);
- }
- }
-
- /*
- * If any of the from-host bulk data has reached the card->from_host_data
- * but not UniFi, we need to free the buffers here.
- */
- if (card->from_host_data)
- {
- /* Free any bulk data buffers allocated for the f-h slots */
- n = card->config_data.num_fromhost_data_slots;
- unifi_trace(card->ospriv, UDBG3, "Freeing from-host resources, %d slots.\n", n);
- for (i = 0; i < n; i++)
- {
- unifi_free_bulk_data(card, &card->from_host_data[i].bd);
- }
-
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- card->dynamic_slot_data.from_host_used_slots[i] = 0;
- card->dynamic_slot_data.from_host_max_slots[i] = 0;
- card->dynamic_slot_data.from_host_reserved_slots[i] = 0;
- }
- }
-
- /*
- * Free any bulk data buffers allocated in the soft queues.
- * This covers the case where a bulk data pointer has reached the soft queue
- * but not the card->from_host_data.
- */
- unifi_trace(card->ospriv, UDBG3, "Freeing cmd q resources.\n");
- for (i = 0; i < UNIFI_SOFT_COMMAND_Q_LENGTH; i++)
- {
- for (r = 0; r < UNIFI_MAX_DATA_REFERENCES; r++)
- {
- unifi_free_bulk_data(card, &card->fh_command_q_body[i].bulkdata[r]);
- }
- }
-
- unifi_trace(card->ospriv, UDBG3, "Freeing traffic q resources.\n");
- for (n = 0; n < UNIFI_NO_OF_TX_QS; n++)
- {
- for (i = 0; i < UNIFI_SOFT_TRAFFIC_Q_LENGTH; i++)
- {
- for (r = 0; r < UNIFI_MAX_DATA_REFERENCES; r++)
- {
- unifi_free_bulk_data(card, &card->fh_traffic_q_body[n][i].bulkdata[r]);
- }
- }
- }
-
- card_init_soft_queues(card);
-
-} /* unifi_cancel_pending_signals() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_free_card
- *
- * Free the memory allocated for the card structure and buffers.
- *
- * Notes:
- * The porting layer is responsible for freeing any mini-coredump buffers
- * allocated when it called unifi_coredump_init(), by calling
- * unifi_coredump_free() before calling this function.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void unifi_free_card(card_t *card)
-{
-#ifdef CSR_PRE_ALLOC_NET_DATA
- prealloc_netdata_free(card);
-#endif
- /* Free any memory allocated. */
- card_free_memory_resources(card);
-
- /* Warn if caller didn't free coredump buffers */
- if (card->dump_buf)
- {
- unifi_error(card->ospriv, "Caller should call unifi_coredump_free()\n");
- unifi_coredump_free(card); /* free anyway to prevent memory leak */
- }
-
- kfree(card);
-
-} /* unifi_free_card() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_init_slots
- *
- * Allocate memory for host-side slot data and signal queues.
- *
- * Arguments:
- * card Pointer to card object
- *
- * Returns:
- * CSR error code.
- * ---------------------------------------------------------------------------
- */
-static CsrResult card_init_slots(card_t *card)
-{
- CsrResult r;
- u8 i;
-
- /* Allocate the buffers we need, only once. */
- if (card->memory_resources_allocated == 1)
- {
- card_free_memory_resources(card);
- }
- else
- {
- /* Initialise our internal command and traffic queues */
- card_init_soft_queues(card);
- }
-
- r = card_allocate_memory_resources(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to allocate card memory resources.\n");
- card_free_memory_resources(card);
- return r;
- }
-
- if (card->sdio_ctrl_addr == 0)
- {
- unifi_error(card->ospriv, "Failed to find config struct!\n");
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /*
- * Set initial counts.
- */
-
- card->from_host_data_head = 0;
-
- /* Get initial signal counts from UniFi, in case it has not been reset. */
- {
- u16 s;
-
- /* Get the from-host-signals-written count */
- r = unifi_card_read16(card, card->sdio_ctrl_addr + 0, &s);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read from-host sig written count\n");
- return r;
- }
- card->from_host_signals_w = (s16)s;
-
- /* Get the to-host-signals-written count */
- r = unifi_card_read16(card, card->sdio_ctrl_addr + 6, &s);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read to-host sig read count\n");
- return r;
- }
- card->to_host_signals_r = (s16)s;
- }
-
- /* Set Initialised flag. */
- r = unifi_card_write16(card, card->init_flag_addr, 0x0001);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write initialised flag\n");
- return r;
- }
-
- /* Dynamic queue reservation */
- memset(&card->dynamic_slot_data, 0, sizeof(card_dynamic_slot_t));
-
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- card->dynamic_slot_data.from_host_max_slots[i] = card->config_data.num_fromhost_data_slots -
- UNIFI_RESERVED_COMMAND_SLOTS;
- card->dynamic_slot_data.queue_stable[i] = FALSE;
- }
-
- card->dynamic_slot_data.packets_interval = UNIFI_PACKETS_INTERVAL;
-
- return CSR_RESULT_SUCCESS;
-} /* card_init_slots() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_set_udi_hook
- *
- * Registers the udi hook that reports the sent signals to the core.
- *
- * Arguments:
- * card Pointer to the card context struct
- * udi_fn Pointer to the callback function.
- *
- * Returns:
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE if the card pointer is invalid,
- * CSR_RESULT_SUCCESS on success.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_set_udi_hook(card_t *card, udi_func_t udi_fn)
-{
- if (card == NULL)
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- if (card->udi_hook == NULL)
- {
- card->udi_hook = udi_fn;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_set_udi_hook() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_remove_udi_hook
- *
- * Removes the udi hook that reports the sent signals from the core.
- *
- * Arguments:
- * card Pointer to the card context struct
- * udi_fn Pointer to the callback function.
- *
- * Returns:
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE if the card pointer is invalid,
- * CSR_RESULT_SUCCESS on success.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_remove_udi_hook(card_t *card, udi_func_t udi_fn)
-{
- if (card == NULL)
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- if (card->udi_hook == udi_fn)
- {
- card->udi_hook = NULL;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_remove_udi_hook() */
-
-
-static void CardReassignDynamicReservation(card_t *card)
-{
- u8 i;
-
- unifi_trace(card->ospriv, UDBG5, "Packets Txed %d %d %d %d\n",
- card->dynamic_slot_data.packets_txed[0],
- card->dynamic_slot_data.packets_txed[1],
- card->dynamic_slot_data.packets_txed[2],
- card->dynamic_slot_data.packets_txed[3]);
-
- /* Clear reservation and recalculate max slots */
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- card->dynamic_slot_data.queue_stable[i] = FALSE;
- card->dynamic_slot_data.from_host_reserved_slots[i] = 0;
- card->dynamic_slot_data.from_host_max_slots[i] = card->config_data.num_fromhost_data_slots -
- UNIFI_RESERVED_COMMAND_SLOTS;
- card->dynamic_slot_data.packets_txed[i] = 0;
-
- unifi_trace(card->ospriv, UDBG5, "CardReassignDynamicReservation: queue %d reserved %d Max %d\n", i,
- card->dynamic_slot_data.from_host_reserved_slots[i],
- card->dynamic_slot_data.from_host_max_slots[i]);
- }
-
- card->dynamic_slot_data.total_packets_txed = 0;
-}
-
-
-/* Algorithm to dynamically reserve slots. The logic is based mainly on the outstanding queue
- * length. Slots are reserved for particular queues during an interval and cleared after the interval.
- * Each queue has three associated variables.. a) used slots - the number of slots currently occupied
- * by the queue b) reserved slots - number of slots reserved specifically for the queue c) max slots - total
- * slots that this queue can actually use (may be higher than reserved slots and is dependent on reserved slots
- * for other queues).
- * This function is called when there are no slots available for a queue. It checks to see if there are enough
- * unreserved slots sufficient for this request. If available these slots are reserved for the queue.
- * If there are not enough unreserved slots, a fair share for each queue is calculated based on the total slots
- * and the number of active queues (any queue with existing reservation is considered active). Queues needing
- * less than their fair share are allowed to have the previously reserved slots. The remaining slots are
- * distributed evenly among queues that need more than the fair share
- *
- * A better scheme would take current bandwidth per AC into consideration when reserving slots. An
- * implementation scheme could consider the relative time/service period for slots in an AC. If the firmware
- * services other ACs faster than a particular AC (packets wait in the slots longer) then it is fair to reserve
- * less slots for the AC
- */
-static void CardCheckDynamicReservation(card_t *card, unifi_TrafficQueue queue)
-{
- u16 q_len, active_queues = 0, excess_queue_slots, div_extra_slots,
- queue_fair_share, reserved_slots = 0, q, excess_need_queues = 0, unmovable_slots = 0;
- s32 i;
- q_t *sigq;
- u16 num_data_slots = card->config_data.num_fromhost_data_slots - UNIFI_RESERVED_COMMAND_SLOTS;
-
- /* Calculate the pending queue length */
- sigq = &card->fh_traffic_queue[queue];
- q_len = CSR_WIFI_HIP_Q_SLOTS_USED(sigq);
-
- if (q_len <= card->dynamic_slot_data.from_host_reserved_slots[queue])
- {
- unifi_trace(card->ospriv, UDBG5, "queue %d q_len %d already has that many reserved slots, exiting\n", queue, q_len);
- return;
- }
-
- /* Upper limit */
- if (q_len > num_data_slots)
- {
- q_len = num_data_slots;
- }
-
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- if (i != (s32)queue)
- {
- reserved_slots += card->dynamic_slot_data.from_host_reserved_slots[i];
- }
- if ((i == (s32)queue) || (card->dynamic_slot_data.from_host_reserved_slots[i] > 0))
- {
- active_queues++;
- }
- }
-
- unifi_trace(card->ospriv, UDBG5, "CardCheckDynamicReservation: queue %d q_len %d\n", queue, q_len);
- unifi_trace(card->ospriv, UDBG5, "Active queues %d reserved slots on other queues %d\n",
- active_queues, reserved_slots);
-
- if (reserved_slots + q_len <= num_data_slots)
- {
- card->dynamic_slot_data.from_host_reserved_slots[queue] = q_len;
- if (q_len == num_data_slots)
- {
- /* This is the common case when just 1 stream is going */
- card->dynamic_slot_data.queue_stable[queue] = TRUE;
- }
- }
- else
- {
- queue_fair_share = num_data_slots / active_queues;
- unifi_trace(card->ospriv, UDBG5, "queue fair share %d\n", queue_fair_share);
-
- /* Evenly distribute slots among active queues */
- /* Find out the queues that need excess of fair share. Also find slots allocated
- * to queues less than their fair share, these slots cannot be reallocated (unmovable slots) */
-
- card->dynamic_slot_data.from_host_reserved_slots[queue] = q_len;
-
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- if (card->dynamic_slot_data.from_host_reserved_slots[i] > queue_fair_share)
- {
- excess_need_queues++;
- }
- else
- {
- unmovable_slots += card->dynamic_slot_data.from_host_reserved_slots[i];
- }
- }
-
- unifi_trace(card->ospriv, UDBG5, "Excess need queues %d\n", excess_need_queues);
-
- /* Now find the slots per excess demand queue */
- excess_queue_slots = (num_data_slots - unmovable_slots) / excess_need_queues;
- div_extra_slots = (num_data_slots - unmovable_slots) - excess_queue_slots * excess_need_queues;
- for (i = UNIFI_NO_OF_TX_QS - 1; i >= 0; i--)
- {
- if (card->dynamic_slot_data.from_host_reserved_slots[i] > excess_queue_slots)
- {
- card->dynamic_slot_data.from_host_reserved_slots[i] = excess_queue_slots;
- if (div_extra_slots > 0)
- {
- card->dynamic_slot_data.from_host_reserved_slots[i]++;
- div_extra_slots--;
- }
- /* No more slots will be allocated to this queue during the current interval */
- card->dynamic_slot_data.queue_stable[i] = TRUE;
- unifi_trace(card->ospriv, UDBG5, "queue stable %d\n", i);
- }
- }
- }
-
- /* Redistribute max slots */
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- reserved_slots = 0;
- for (q = 0; q < UNIFI_NO_OF_TX_QS; q++)
- {
- if (i != q)
- {
- reserved_slots += card->dynamic_slot_data.from_host_reserved_slots[q];
- }
- }
-
- card->dynamic_slot_data.from_host_max_slots[i] = num_data_slots - reserved_slots;
- unifi_trace(card->ospriv, UDBG5, "queue %d reserved %d Max %d\n", i,
- card->dynamic_slot_data.from_host_reserved_slots[i],
- card->dynamic_slot_data.from_host_max_slots[i]);
- }
-
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * CardClearFromHostDataSlot
- *
- * Clear a the given data slot, making it available again.
- *
- * Arguments:
- * card Pointer to Card object
- * slot Index of the signal slot to clear.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void CardClearFromHostDataSlot(card_t *card, const s16 slot)
-{
- u8 queue = card->from_host_data[slot].queue;
- const void *os_data_ptr = card->from_host_data[slot].bd.os_data_ptr;
-
- if (card->from_host_data[slot].bd.data_length == 0)
- {
- unifi_warning(card->ospriv,
- "Surprise: request to clear an already free FH data slot: %d\n",
- slot);
- return;
- }
-
- if (os_data_ptr == NULL)
- {
- unifi_warning(card->ospriv,
- "Clearing FH data slot %d: has null payload, len=%d\n",
- slot, card->from_host_data[slot].bd.data_length);
- }
-
- /* Free card->from_host_data[slot].bd.os_net_ptr here. */
- /* Mark slot as free by setting length to 0. */
- unifi_free_bulk_data(card, &card->from_host_data[slot].bd);
- if (queue < UNIFI_NO_OF_TX_QS)
- {
- if (card->dynamic_slot_data.from_host_used_slots[queue] == 0)
- {
- unifi_error(card->ospriv, "Goofed up used slots q = %d used slots = %d\n",
- queue,
- card->dynamic_slot_data.from_host_used_slots[queue]);
- }
- else
- {
- card->dynamic_slot_data.from_host_used_slots[queue]--;
- }
- card->dynamic_slot_data.packets_txed[queue]++;
- card->dynamic_slot_data.total_packets_txed++;
- if (card->dynamic_slot_data.total_packets_txed >= card->dynamic_slot_data.packets_interval)
- {
- CardReassignDynamicReservation(card);
- }
- }
-
- unifi_trace(card->ospriv, UDBG4, "CardClearFromHostDataSlot: slot %d recycled %p\n", slot, os_data_ptr);
-
-} /* CardClearFromHostDataSlot() */
-
-
-#ifdef CSR_WIFI_REQUEUE_PACKET_TO_HAL
-/*
- * ---------------------------------------------------------------------------
- * CardClearFromHostDataSlotWithoutFreeingBulkData
- *
- * Clear the given data slot with out freeing the bulk data.
- *
- * Arguments:
- * card Pointer to Card object
- * slot Index of the signal slot to clear.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void CardClearFromHostDataSlotWithoutFreeingBulkData(card_t *card, const s16 slot)
-{
- u8 queue = card->from_host_data[slot].queue;
-
- /* Initialise the from_host data slot so it can be re-used,
- * Set length field in from_host_data array to 0.
- */
- UNIFI_INIT_BULK_DATA(&card->from_host_data[slot].bd);
-
- queue = card->from_host_data[slot].queue;
-
- if (queue < UNIFI_NO_OF_TX_QS)
- {
- if (card->dynamic_slot_data.from_host_used_slots[queue] == 0)
- {
- unifi_error(card->ospriv, "Goofed up used slots q = %d used slots = %d\n",
- queue,
- card->dynamic_slot_data.from_host_used_slots[queue]);
- }
- else
- {
- card->dynamic_slot_data.from_host_used_slots[queue]--;
- }
- card->dynamic_slot_data.packets_txed[queue]++;
- card->dynamic_slot_data.total_packets_txed++;
- if (card->dynamic_slot_data.total_packets_txed >=
- card->dynamic_slot_data.packets_interval)
- {
- CardReassignDynamicReservation(card);
- }
- }
-} /* CardClearFromHostDataSlotWithoutFreeingBulkData() */
-
-
-#endif
-
-u16 CardGetDataSlotSize(card_t *card)
-{
- return card->config_data.data_slot_size;
-} /* CardGetDataSlotSize() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CardGetFreeFromHostDataSlots
- *
- * Retrieve the number of from-host bulk data slots available.
- *
- * Arguments:
- * card Pointer to the card context struct
- *
- * Returns:
- * Number of free from-host bulk data slots.
- * ---------------------------------------------------------------------------
- */
-u16 CardGetFreeFromHostDataSlots(card_t *card)
-{
- u16 i, n = 0;
-
- /* First two slots reserved for MLME */
- for (i = 0; i < card->config_data.num_fromhost_data_slots; i++)
- {
- if (card->from_host_data[i].bd.data_length == 0)
- {
- /* Free slot */
- n++;
- }
- }
-
- return n;
-} /* CardGetFreeFromHostDataSlots() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CardAreAllFromHostDataSlotsEmpty
- *
- * Returns the state of from-host bulk data slots.
- *
- * Arguments:
- * card Pointer to the card context struct
- *
- * Returns:
- * 1 The from-host bulk data slots are all empty (available).
- * 0 Some or all the from-host bulk data slots are in use.
- * ---------------------------------------------------------------------------
- */
-u16 CardAreAllFromHostDataSlotsEmpty(card_t *card)
-{
- u16 i;
-
- for (i = 0; i < card->config_data.num_fromhost_data_slots; i++)
- {
- if (card->from_host_data[i].bd.data_length != 0)
- {
- return 0;
- }
- }
-
- return 1;
-} /* CardGetFreeFromHostDataSlots() */
-
-
-static CsrResult unifi_identify_hw(card_t *card)
-{
-
- card->chip_id = card->sdio_if->sdioId.cardId;
- card->function = card->sdio_if->sdioId.sdioFunction;
- card->sdio_io_block_size = card->sdio_if->blockSize;
-
- /* If SDIO controller doesn't support byte mode CMD53, pad transfers to block sizes */
- card->sdio_io_block_pad = (card->sdio_if->features & CSR_SDIO_FEATURE_BYTE_MODE)?FALSE : TRUE;
-
- /*
- * Setup the chip helper so that we can access the registers (and
- * also tell what sub-type of HIP we should use).
- */
- card->helper = ChipHelper_GetVersionSdio((u8)card->chip_id);
- if (!card->helper)
- {
- unifi_error(card->ospriv, "Null ChipHelper\n");
- }
-
- unifi_info(card->ospriv, "Chip ID 0x%02X Function %u Block Size %u Name %s(%s)\n",
- card->chip_id, card->function, card->sdio_io_block_size,
- ChipHelper_MarketingName(card->helper),
- ChipHelper_FriendlyName(card->helper));
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_identify_hw() */
-
-
-static CsrResult unifi_prepare_hw(card_t *card)
-{
- CsrResult r;
- CsrResult csrResult;
- enum unifi_host_state old_state = card->host_state;
-
- r = unifi_identify_hw(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to identify hw\n");
- return r;
- }
-
- unifi_trace(card->ospriv, UDBG1,
- "%s mode SDIO\n", card->sdio_io_block_pad?"Block" : "Byte");
- /*
- * Chip must be a awake or blocks that are asleep may not get
- * reset. We can only do this after we have read the chip_id.
- */
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
-
- if (old_state == UNIFI_HOST_STATE_TORPID)
- {
- /* Ensure the initial clock rate is set; if a reset occurred when the chip was
- * TORPID, unifi_set_host_state() may have raised it to MAX.
- */
- csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_INIT_HZ);
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- return r;
- }
- card->sdio_clock_speed = UNIFI_SDIO_CLOCK_INIT_HZ;
- }
-
- /*
- * The WLAN function must be enabled to access MAILBOX2 and DEBUG_RST
- * registers.
- */
- csrResult = CsrSdioFunctionEnable(card->sdio_if);
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- /* Can't enable WLAN function. Try resetting the SDIO block. */
- unifi_error(card->ospriv, "Failed to re-enable function %d.\n", card->function);
- return r;
- }
-
- /*
- * Poke some registers to make sure the PLL has started,
- * otherwise memory accesses are likely to fail.
- */
- bootstrap_chip_hw(card);
-
- /* Try to read the chip version from register. */
- r = unifi_read_chip_version(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_prepare_hw() */
-
-
-static CsrResult unifi_read_chip_version(card_t *card)
-{
- u32 gbl_chip_version;
- CsrResult r;
- u16 ver;
-
- gbl_chip_version = ChipHelper_GBL_CHIP_VERSION(card->helper);
-
- /* Try to read the chip version from register. */
- if (gbl_chip_version != 0)
- {
- r = unifi_read_direct16(card, gbl_chip_version * 2, &ver);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read GBL_CHIP_VERSION\n");
- return r;
- }
- card->chip_version = ver;
- }
- else
- {
- unifi_info(card->ospriv, "Unknown Chip ID, cannot locate GBL_CHIP_VERSION\n");
- r = CSR_RESULT_FAILURE;
- }
-
- unifi_info(card->ospriv, "Chip Version 0x%04X\n", card->chip_version);
-
- return r;
-} /* unifi_read_chip_version() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_reset_hardware
- *
- * Execute the UniFi reset sequence.
- *
- * Note: This may fail if the chip is going TORPID so retry at
- * least once.
- *
- * Arguments:
- * card - pointer to card context structure
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error otherwise.
- *
- * Notes:
- * Some platforms (e.g. Windows Vista) do not allow access to registers
- * that are necessary for a software soft reset.
- * ---------------------------------------------------------------------------
- */
-static CsrResult unifi_reset_hardware(card_t *card)
-{
- CsrResult r;
- u16 new_block_size = UNIFI_IO_BLOCK_SIZE;
- CsrResult csrResult;
-
- /* Errors returned by unifi_prepare_hw() are not critical at this point */
- r = unifi_prepare_hw(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
-
- /* First try SDIO controller reset, which may power cycle the UniFi, assert
- * its reset line, or not be implemented depending on the platform.
- */
- unifi_info(card->ospriv, "Calling CsrSdioHardReset\n");
- csrResult = CsrSdioHardReset(card->sdio_if);
- if (csrResult == CSR_RESULT_SUCCESS)
- {
- unifi_info(card->ospriv, "CsrSdioHardReset succeeded on resetting UniFi\n");
- r = unifi_prepare_hw(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "unifi_prepare_hw failed after hard reset\n");
- return r;
- }
- }
- else if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
- else
- {
- /* Falling back to software hard reset methods */
- unifi_info(card->ospriv, "Falling back to software hard reset\n");
- r = unifi_card_hard_reset(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "software hard reset failed\n");
- return r;
- }
-
- /* If we fell back to unifi_card_hard_reset() methods, chip version may
- * not have been read. (Note in the unlikely event that it is zero,
- * it will be harmlessly read again)
- */
- if (card->chip_version == 0)
- {
- r = unifi_read_chip_version(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- }
- }
-
-#ifdef CSR_WIFI_HIP_SDIO_BLOCK_SIZE
- new_block_size = CSR_WIFI_HIP_SDIO_BLOCK_SIZE;
-#endif
-
- /* After hard reset, we need to restore the SDIO block size */
- csrResult = CsrSdioBlockSizeSet(card->sdio_if, new_block_size);
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
-
- /* Warn if a different block size was achieved by the transport */
- if (card->sdio_if->blockSize != new_block_size)
- {
- unifi_info(card->ospriv,
- "Actually got block size %d\n", card->sdio_if->blockSize);
- }
-
- /* sdio_io_block_size always needs be updated from the achieved block size,
- * as it is used by the OS layer to allocate memory in unifi_net_malloc().
- * Controllers which don't support block mode (e.g. CSPI) will report a
- * block size of zero.
- */
- if (card->sdio_if->blockSize == 0)
- {
- unifi_info(card->ospriv, "Block size 0, block mode not available\n");
-
- /* Set sdio_io_block_size to 1 so that unifi_net_data_malloc() has a
- * sensible rounding value. Elsewhere padding will already be
- * disabled because the controller supports byte mode.
- */
- card->sdio_io_block_size = 1;
-
- /* Controller features must declare support for byte mode */
- if (!(card->sdio_if->features & CSR_SDIO_FEATURE_BYTE_MODE))
- {
- unifi_error(card->ospriv, "Requires byte mode\n");
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- }
- else
- {
- /* Padding will be enabled if CSR_SDIO_FEATURE_BYTE_MODE isn't set */
- card->sdio_io_block_size = card->sdio_if->blockSize;
- }
-
-
- return r;
-} /* unifi_reset_hardware() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_reset_method_io_enable
- *
- * Issue a hard reset to the hw writing the IO_ENABLE.
- *
- * Arguments:
- * card Pointer to Card object
- *
- * Returns:
- * 0 on success,
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred or if a response
- * was not seen in the expected time
- * ---------------------------------------------------------------------------
- */
-static CsrResult card_reset_method_io_enable(card_t *card)
-{
- CsrResult r;
- CsrResult csrResult;
-
- /*
- * This resets only function 1, so should be used in
- * preference to the method below (CSR_FUNC_EN)
- */
- unifi_trace(card->ospriv, UDBG1, "Hard reset (IO_ENABLE)\n");
-
- csrResult = CsrSdioFunctionDisable(card->sdio_if);
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- unifi_warning(card->ospriv, "SDIO error writing IO_ENABLE: %d\n", r);
- }
- else
- {
- /* Delay here to let the reset take affect. */
- CsrThreadSleep(RESET_SETTLE_DELAY);
-
- r = card_wait_for_unifi_to_disable(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
-
- if (r == CSR_RESULT_SUCCESS)
- {
- r = card_wait_for_unifi_to_reset(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- }
- }
-
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_trace(card->ospriv, UDBG1, "Hard reset (CSR_FUNC_EN)\n");
-
- r = sdio_write_f0(card, SDIO_CSR_FUNC_EN, 0);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_warning(card->ospriv, "SDIO error writing SDIO_CSR_FUNC_EN: %d\n", r);
- return r;
- }
- else
- {
- /* Delay here to let the reset take affect. */
- CsrThreadSleep(RESET_SETTLE_DELAY);
-
- r = card_wait_for_unifi_to_reset(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- }
- }
-
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_warning(card->ospriv, "card_reset_method_io_enable failed to reset UniFi\n");
- }
-
- return r;
-} /* card_reset_method_io_enable() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_reset_method_dbg_reset
- *
- * Issue a hard reset to the hw writing the DBG_RESET.
- *
- * Arguments:
- * card Pointer to Card object
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred or if a response
- * was not seen in the expected time
- * ---------------------------------------------------------------------------
- */
-static CsrResult card_reset_method_dbg_reset(card_t *card)
-{
- CsrResult r;
-
- /*
- * Prepare UniFi for h/w reset
- */
- if (card->host_state == UNIFI_HOST_STATE_TORPID)
- {
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_DROWSY);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to set UNIFI_HOST_STATE_DROWSY\n");
- return r;
- }
- CsrThreadSleep(5);
- }
-
- r = unifi_card_stop_processor(card, UNIFI_PROC_BOTH);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Can't stop processors\n");
- return r;
- }
-
- unifi_trace(card->ospriv, UDBG1, "Hard reset (DBG_RESET)\n");
-
- /*
- * This register write may fail. The debug reset resets
- * parts of the Function 0 sections of the chip, and
- * therefore the response cannot be sent back to the host.
- */
- r = unifi_write_direct_8_or_16(card, ChipHelper_DBG_RESET(card->helper) * 2, 1);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_warning(card->ospriv, "SDIO error writing DBG_RESET: %d\n", r);
- return r;
- }
-
- /* Delay here to let the reset take affect. */
- CsrThreadSleep(RESET_SETTLE_DELAY);
-
- r = card_wait_for_unifi_to_reset(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_warning(card->ospriv, "card_reset_method_dbg_reset failed to reset UniFi\n");
- }
-
- return r;
-} /* card_reset_method_dbg_reset() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_card_hard_reset
- *
- * Issue reset to hardware, by writing to registers on the card.
- * Power to the card is preserved.
- *
- * Arguments:
- * card Pointer to Card object
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred or if a response
- * was not seen in the expected time
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_card_hard_reset(card_t *card)
-{
- CsrResult r;
- const struct chip_helper_reset_values *init_data;
- u32 chunks;
-
- /* Clear cache of page registers */
- card->proc_select = (u32)(-1);
- card->dmem_page = (u32)(-1);
- card->pmem_page = (u32)(-1);
-
- /*
- * We need to have a valid card->helper before we use software hard reset.
- * If unifi_identify_hw() fails to get the card ID, it probably means
- * that there is no way to talk to the h/w.
- */
- r = unifi_identify_hw(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "unifi_card_hard_reset failed to identify h/w\n");
- return r;
- }
-
- /* Search for some reset code. */
- chunks = ChipHelper_HostResetSequence(card->helper, &init_data);
- if (chunks != 0)
- {
- unifi_error(card->ospriv,
- "Hard reset (Code download) is unsupported\n");
-
- return CSR_RESULT_FAILURE;
- }
-
- if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
- {
- /* The HIP spec considers this a bus-specific reset.
- * This resets only function 1, so should be used in
- * preference to the method below (CSR_FUNC_EN)
- * If this method fails, it means that the f/w is probably
- * not running. In this case, try the DBG_RESET method.
- */
- r = card_reset_method_io_enable(card);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r == CSR_RESULT_SUCCESS)
- {
- return r;
- }
- }
-
- /* Software hard reset */
- r = card_reset_method_dbg_reset(card);
-
- return r;
-} /* unifi_card_hard_reset() */
-
-
-/*
- * ---------------------------------------------------------------------------
- *
- * CardGenInt
- *
- * Prod the card.
- * This function causes an internal interrupt to be raised in the
- * UniFi chip. It is used to signal the firmware that some action has
- * been completed.
- * The UniFi Host Interface asks that the value used increments for
- * debugging purposes.
- *
- * Arguments:
- * card Pointer to Card object
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred or if a response
- * was not seen in the expected time
- * ---------------------------------------------------------------------------
- */
-CsrResult CardGenInt(card_t *card)
-{
- CsrResult r;
-
- if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
- {
- r = sdio_write_f0(card, SDIO_CSR_FROM_HOST_SCRATCH0,
- (u8)card->unifi_interrupt_seq);
- }
- else
- {
- r = unifi_write_direct_8_or_16(card,
- ChipHelper_SHARED_IO_INTERRUPT(card->helper) * 2,
- (u8)card->unifi_interrupt_seq);
- }
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "SDIO error writing UNIFI_SHARED_IO_INTERRUPT: %d\n", r);
- return r;
- }
-
- card->unifi_interrupt_seq++;
-
- return CSR_RESULT_SUCCESS;
-} /* CardGenInt() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CardEnableInt
- *
- * Enable the outgoing SDIO interrupt from UniFi to the host.
- *
- * Arguments:
- * card Pointer to Card object
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred,
- * ---------------------------------------------------------------------------
- */
-CsrResult CardEnableInt(card_t *card)
-{
- CsrResult r;
- u8 int_enable;
-
- r = sdio_read_f0(card, SDIO_INT_ENABLE, &int_enable);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "SDIO error reading SDIO_INT_ENABLE\n");
- return r;
- }
-
- int_enable |= (1 << card->function) | UNIFI_SD_INT_ENABLE_IENM;
-
- r = sdio_write_f0(card, SDIO_INT_ENABLE, int_enable);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "SDIO error writing SDIO_INT_ENABLE\n");
- return r;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CardEnableInt() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CardDisableInt
- *
- * Disable the outgoing SDIO interrupt from UniFi to the host.
- *
- * Arguments:
- * card Pointer to Card object
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred,
- * ---------------------------------------------------------------------------
- */
-CsrResult CardDisableInt(card_t *card)
-{
- CsrResult r;
- u8 int_enable;
-
- r = sdio_read_f0(card, SDIO_INT_ENABLE, &int_enable);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "SDIO error reading SDIO_INT_ENABLE\n");
- return r;
- }
-
- int_enable &= ~(1 << card->function);
-
- r = sdio_write_f0(card, SDIO_INT_ENABLE, int_enable);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "SDIO error writing SDIO_INT_ENABLE\n");
- return r;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CardDisableInt() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CardPendingInt
- *
- * Determine whether UniFi is currently asserting the SDIO interrupt
- * request.
- *
- * Arguments:
- * card Pointer to Card object
- * pintr Pointer to location to write interrupt status,
- * TRUE if interrupt pending,
- * FALSE if no interrupt pending.
- * Returns:
- * CSR_RESULT_SUCCESS interrupt status read successfully
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred,
- * ---------------------------------------------------------------------------
- */
-CsrResult CardPendingInt(card_t *card, u8 *pintr)
-{
- CsrResult r;
- u8 pending;
-
- *pintr = FALSE;
-
- r = sdio_read_f0(card, SDIO_INT_PENDING, &pending);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "SDIO error reading SDIO_INT_PENDING\n");
- return r;
- }
-
- *pintr = (pending & (1 << card->function))?TRUE : FALSE;
-
- return CSR_RESULT_SUCCESS;
-} /* CardPendingInt() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CardClearInt
- *
- * Clear the UniFi SDIO interrupt request.
- *
- * Arguments:
- * card Pointer to Card object
- *
- * Returns:
- * CSR_RESULT_SUCCESS if pending interrupt was cleared, or no pending interrupt.
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred,
- * ---------------------------------------------------------------------------
- */
-CsrResult CardClearInt(card_t *card)
-{
- CsrResult r;
- u8 intr;
-
- if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
- {
- /* CardPendingInt() sets intr, if there is a pending interrupt */
- r = CardPendingInt(card, &intr);
- if (intr == FALSE)
- {
- return r;
- }
-
- r = sdio_write_f0(card, SDIO_CSR_HOST_INT_CLEAR, 1);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "SDIO error writing SDIO_CSR_HOST_INT_CLEAR\n");
- }
- }
- else
- {
- r = unifi_write_direct_8_or_16(card,
- ChipHelper_SDIO_HOST_INT(card->helper) * 2,
- 0);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "SDIO error writing UNIFI_SDIO_HOST_INT\n");
- }
- }
-
- return r;
-} /* CardClearInt() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CardIntEnabled
- *
- * Determine whether UniFi is currently asserting the SDIO interrupt
- * request.
- *
- * Arguments:
- * card Pointer to Card object
- * enabled Pointer to location to write interrupt enable status,
- * TRUE if interrupts enabled,
- * FALSE if interupts disabled.
- *
- * Returns:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred,
- * ---------------------------------------------------------------------------
- */
-CsrResult CardIntEnabled(card_t *card, u8 *enabled)
-{
- CsrResult r;
- u8 int_enable;
-
- r = sdio_read_f0(card, SDIO_INT_ENABLE, &int_enable);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "SDIO error reading SDIO_INT_ENABLE\n");
- return r;
- }
-
- *enabled = (int_enable & (1 << card->function))?TRUE : FALSE;
-
- return CSR_RESULT_SUCCESS;
-} /* CardIntEnabled() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CardWriteBulkData
- * Allocate slot in the pending bulkdata arrays and assign it to a signal's
- * bulkdata reference. The slot is then ready for UniFi's bulkdata commands
- * to transfer the data to/from the host.
- *
- * Arguments:
- * card Pointer to Card object
- * csptr Pending signal pointer, including bulkdata ref
- * queue Traffic queue that this signal is using
- *
- * Returns:
- * CSR_RESULT_SUCCESS if a free slot was assigned
- * CSR_RESULT_FAILURE if no slot was available
- * ---------------------------------------------------------------------------
- */
-CsrResult CardWriteBulkData(card_t *card, card_signal_t *csptr, unifi_TrafficQueue queue)
-{
- u16 i, slots[UNIFI_MAX_DATA_REFERENCES], j = 0;
- u8 *packed_sigptr, num_slots_required = 0;
- bulk_data_desc_t *bulkdata = csptr->bulkdata;
- s16 h, nslots;
-
- /* Count the number of slots required */
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
- {
- if (bulkdata[i].data_length != 0)
- {
- num_slots_required++;
- }
- }
-
- /* Get the slot numbers */
- if (num_slots_required != 0)
- {
- /* Last 2 slots for MLME */
- if (queue == UNIFI_TRAFFIC_Q_MLME)
- {
- h = card->config_data.num_fromhost_data_slots - UNIFI_RESERVED_COMMAND_SLOTS;
- for (i = 0; i < card->config_data.num_fromhost_data_slots; i++)
- {
- if (card->from_host_data[h].bd.data_length == 0)
- {
- /* Free data slot, claim it */
- slots[j++] = h;
- if (j == num_slots_required)
- {
- break;
- }
- }
-
- if (++h >= card->config_data.num_fromhost_data_slots)
- {
- h = 0;
- }
- }
- }
- else
- {
- if (card->dynamic_slot_data.from_host_used_slots[queue]
- < card->dynamic_slot_data.from_host_max_slots[queue])
- {
- /* Data commands get a free slot only after a few checks */
- nslots = card->config_data.num_fromhost_data_slots - UNIFI_RESERVED_COMMAND_SLOTS;
-
- h = card->from_host_data_head;
-
- for (i = 0; i < nslots; i++)
- {
- if (card->from_host_data[h].bd.data_length == 0)
- {
- /* Free data slot, claim it */
- slots[j++] = h;
- if (j == num_slots_required)
- {
- break;
- }
- }
-
- if (++h >= nslots)
- {
- h = 0;
- }
- }
- card->from_host_data_head = h;
- }
- }
-
- /* Required number of slots are not available, bail out */
- if (j != num_slots_required)
- {
- unifi_trace(card->ospriv, UDBG5, "CardWriteBulkData: didn't find free slot/s\n");
-
- /* If we haven't already reached the stable state we can ask for reservation */
- if ((queue != UNIFI_TRAFFIC_Q_MLME) && (card->dynamic_slot_data.queue_stable[queue] == FALSE))
- {
- CardCheckDynamicReservation(card, queue);
- }
-
- for (i = 0; i < card->config_data.num_fromhost_data_slots; i++)
- {
- unifi_trace(card->ospriv, UDBG5, "fh data slot %d: %d\n", i, card->from_host_data[i].bd.data_length);
- }
- return CSR_RESULT_FAILURE;
- }
- }
-
- packed_sigptr = csptr->sigbuf;
-
- /* Fill in the slots with data */
- j = 0;
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
- {
- if (bulkdata[i].data_length == 0)
- {
- /* Zero-out the DATAREF in the signal */
- SET_PACKED_DATAREF_SLOT(packed_sigptr, i, 0);
- SET_PACKED_DATAREF_LEN(packed_sigptr, i, 0);
- }
- else
- {
- /*
- * Fill in the slot number in the SIGNAL structure but
- * preserve the offset already in there
- */
- SET_PACKED_DATAREF_SLOT(packed_sigptr, i, slots[j] | (((u16)packed_sigptr[SIZEOF_SIGNAL_HEADER + (i * SIZEOF_DATAREF) + 1]) << 8));
- SET_PACKED_DATAREF_LEN(packed_sigptr, i, bulkdata[i].data_length);
-
- /* Do not copy the data, just store the information to them */
- card->from_host_data[slots[j]].bd.os_data_ptr = bulkdata[i].os_data_ptr;
- card->from_host_data[slots[j]].bd.os_net_buf_ptr = bulkdata[i].os_net_buf_ptr;
- card->from_host_data[slots[j]].bd.data_length = bulkdata[i].data_length;
- card->from_host_data[slots[j]].bd.net_buf_length = bulkdata[i].net_buf_length;
- card->from_host_data[slots[j]].queue = queue;
-
- unifi_trace(card->ospriv, UDBG4, "CardWriteBulkData sig=0x%x, fh slot %d = %p\n",
- GET_SIGNAL_ID(packed_sigptr), i, bulkdata[i].os_data_ptr);
-
- /* Sanity-check that the bulk data desc being assigned to the slot
- * actually has a payload.
- */
- if (!bulkdata[i].os_data_ptr)
- {
- unifi_error(card->ospriv, "Assign null os_data_ptr (len=%d) fh slot %d, i=%d, q=%d, sig=0x%x",
- bulkdata[i].data_length, slots[j], i, queue, GET_SIGNAL_ID(packed_sigptr));
- }
-
- j++;
- if (queue < UNIFI_NO_OF_TX_QS)
- {
- card->dynamic_slot_data.from_host_used_slots[queue]++;
- }
- }
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CardWriteBulkData() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_find_data_slot
- *
- * Dereference references to bulk data slots into pointers to real data.
- *
- * Arguments:
- * card Pointer to the card struct.
- * slot Slot number from a signal structure
- *
- * Returns:
- * Pointer to entry in bulk_data_slot array.
- * ---------------------------------------------------------------------------
- */
-bulk_data_desc_t* card_find_data_slot(card_t *card, s16 slot)
-{
- s16 sn;
- bulk_data_desc_t *bd;
-
- sn = slot & 0x7FFF;
-
- /* ?? check sanity of slot number ?? */
-
- if (slot & SLOT_DIR_TO_HOST)
- {
- bd = &card->to_host_data[sn];
- }
- else
- {
- bd = &card->from_host_data[sn].bd;
- }
-
- return bd;
-} /* card_find_data_slot() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * firmware_present_in_flash
- *
- * Probe for external Flash that looks like it might contain firmware.
- *
- * If Flash is not present, reads always return 0x0008.
- * If Flash is present, but empty, reads return 0xFFFF.
- * Anything else is considered to be firmware.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS firmware is present in ROM or flash
- * CSR_WIFI_HIP_RESULT_NOT_FOUND firmware is not present in ROM or flash
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred
- * ---------------------------------------------------------------------------
- */
-static CsrResult firmware_present_in_flash(card_t *card)
-{
- CsrResult r;
- u16 m1, m5;
-
- if (ChipHelper_HasRom(card->helper))
- {
- return CSR_RESULT_SUCCESS;
- }
- if (!ChipHelper_HasFlash(card->helper))
- {
- return CSR_WIFI_HIP_RESULT_NOT_FOUND;
- }
-
- /*
- * Examine the Flash locations that are the power-on default reset
- * vectors of the XAP processors.
- * These are words 1 and 5 in Flash.
- */
- r = unifi_card_read16(card, UNIFI_MAKE_GP(EXT_FLASH, 2), &m1);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- r = unifi_card_read16(card, UNIFI_MAKE_GP(EXT_FLASH, 10), &m5);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- /* Check for uninitialised/missing flash */
- if ((m1 == 0x0008) || (m1 == 0xFFFF) ||
- (m1 == 0x0004) || (m5 == 0x0004) ||
- (m5 == 0x0008) || (m5 == 0xFFFF))
- {
- return CSR_WIFI_HIP_RESULT_NOT_FOUND;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* firmware_present_in_flash() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * bootstrap_chip_hw
- *
- * Perform chip specific magic to "Get It Working" TM. This will
- * increase speed of PLLs in analogue and maybe enable some
- * on-chip regulators.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void bootstrap_chip_hw(card_t *card)
-{
- const struct chip_helper_init_values *vals;
- u32 i, len;
- void *sdio = card->sdio_if;
- CsrResult csrResult;
-
- len = ChipHelper_ClockStartupSequence(card->helper, &vals);
- if (len != 0)
- {
- for (i = 0; i < len; i++)
- {
- csrResult = CsrSdioWrite16(sdio, vals[i].addr * 2, vals[i].value);
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_warning(card->ospriv, "Failed to write bootstrap value %d\n", i);
- /* Might not be fatal */
- }
-
- CsrThreadSleep(1);
- }
- }
-} /* bootstrap_chip_hw() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_card_stop_processor
- *
- * Stop the UniFi XAP processors.
- *
- * Arguments:
- * card Pointer to card struct
- * which One of UNIFI_PROC_MAC, UNIFI_PROC_PHY, UNIFI_PROC_BOTH
- *
- * Returns:
- * CSR_RESULT_SUCCESS if successful, or CSR error code
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_card_stop_processor(card_t *card, enum unifi_dbg_processors_select which)
-{
- CsrResult r = CSR_RESULT_SUCCESS;
- u8 status;
- s16 retry = 100;
-
- while (retry--)
- {
- /* Select both XAPs */
- r = unifi_set_proc_select(card, which);
- if (r != CSR_RESULT_SUCCESS)
- {
- break;
- }
-
- /* Stop processors */
- r = unifi_write_direct16(card, ChipHelper_DBG_EMU_CMD(card->helper) * 2, 2);
- if (r != CSR_RESULT_SUCCESS)
- {
- break;
- }
-
- /* Read status */
- r = unifi_read_direct_8_or_16(card,
- ChipHelper_DBG_HOST_STOP_STATUS(card->helper) * 2,
- &status);
- if (r != CSR_RESULT_SUCCESS)
- {
- break;
- }
-
- if ((status & 1) == 1)
- {
- /* Success! */
- return CSR_RESULT_SUCCESS;
- }
-
- /* Processors didn't stop, try again */
- }
-
- if (r != CSR_RESULT_SUCCESS)
- {
- /* An SDIO error occurred */
- unifi_error(card->ospriv, "Failed to stop processors: SDIO error\n");
- }
- else
- {
- /* If we reach here, we didn't the status in time. */
- unifi_error(card->ospriv, "Failed to stop processors: timeout waiting for stopped status\n");
- r = CSR_RESULT_FAILURE;
- }
-
- return r;
-} /* unifi_card_stop_processor() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * card_start_processor
- *
- * Start the UniFi XAP processors.
- *
- * Arguments:
- * card Pointer to card struct
- * which One of UNIFI_PROC_MAC, UNIFI_PROC_PHY, UNIFI_PROC_BOTH
- *
- * Returns:
- * CSR_RESULT_SUCCESS or CSR error code
- * ---------------------------------------------------------------------------
- */
-CsrResult card_start_processor(card_t *card, enum unifi_dbg_processors_select which)
-{
- CsrResult r;
-
- /* Select both XAPs */
- r = unifi_set_proc_select(card, which);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "unifi_set_proc_select failed: %d.\n", r);
- return r;
- }
-
-
- r = unifi_write_direct_8_or_16(card,
- ChipHelper_DBG_EMU_CMD(card->helper) * 2, 8);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- r = unifi_write_direct_8_or_16(card,
- ChipHelper_DBG_EMU_CMD(card->helper) * 2, 0);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* card_start_processor() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_set_interrupt_mode
- *
- * Configure the interrupt processing mode used by the HIP
- *
- * Arguments:
- * card Pointer to card struct
- * mode Interrupt mode to apply
- *
- * Returns:
- * None
- * ---------------------------------------------------------------------------
- */
-void unifi_set_interrupt_mode(card_t *card, u32 mode)
-{
- if (mode == CSR_WIFI_INTMODE_RUN_BH_ONCE)
- {
- unifi_info(card->ospriv, "Scheduled interrupt mode");
- }
- card->intmode = mode;
-} /* unifi_set_interrupt_mode() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_start_processors
- *
- * Start all UniFi XAP processors.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code on error
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_start_processors(card_t *card)
-{
- return card_start_processor(card, UNIFI_PROC_BOTH);
-} /* unifi_start_processors() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_request_max_sdio_clock
- *
- * Requests that the maximum SDIO clock rate is set at the next suitable
- * opportunity (e.g. when the BH next runs, so as not to interfere with
- * any current operation).
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * None
- * ---------------------------------------------------------------------------
- */
-void unifi_request_max_sdio_clock(card_t *card)
-{
- card->request_max_clock = 1;
-} /* unifi_request_max_sdio_clock() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_set_host_state
- *
- * Set the host deep-sleep state.
- *
- * If transitioning to TORPID, the SDIO driver will be notified
- * that the SD bus will be unused (idle) and conversely, when
- * transitioning from TORPID that the bus will be used (active).
- *
- * Arguments:
- * card Pointer to card struct
- * state New deep-sleep state.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success
- * CSR_WIFI_HIP_RESULT_NO_DEVICE if the card was ejected
- * CSR_RESULT_FAILURE if an SDIO error occurred
- *
- * Notes:
- * We need to reduce the SDIO clock speed before trying to wake up the
- * chip. Actually, in the implementation below we reduce the clock speed
- * not just before we try to wake up the chip, but when we put the chip to
- * deep sleep. This means that if the f/w wakes up on its' own, we waste
- * a reduce/increace cycle. However, trying to eliminate this overhead is
- * proved difficult, as the current state machine in the HIP lib does at
- * least a CMD52 to disable the interrupts before we configure the host
- * state.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_set_host_state(card_t *card, enum unifi_host_state state)
-{
- CsrResult r = CSR_RESULT_SUCCESS;
- CsrResult csrResult;
- static const char *const states[] = {
- "AWAKE", "DROWSY", "TORPID"
- };
- static const u8 state_csr_host_wakeup[] = {
- 1, 3, 0
- };
- static const u8 state_io_abort[] = {
- 0, 2, 3
- };
-
- unifi_trace(card->ospriv, UDBG4, "State %s to %s\n",
- states[card->host_state], states[state]);
-
- if (card->host_state == UNIFI_HOST_STATE_TORPID)
- {
- CsrSdioFunctionActive(card->sdio_if);
- }
-
- /* Write the new state to UniFi. */
- if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
- {
- r = sdio_write_f0(card, SDIO_CSR_HOST_WAKEUP,
- (u8)((card->function << 4) | state_csr_host_wakeup[state]));
- }
- else
- {
- r = sdio_write_f0(card, SDIO_IO_ABORT, state_io_abort[state]);
- }
-
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write UniFi deep sleep state\n");
- }
- else
- {
- /*
- * If the chip was in state TORPID then we can now increase
- * the maximum bus clock speed.
- */
- if (card->host_state == UNIFI_HOST_STATE_TORPID)
- {
- csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if,
- UNIFI_SDIO_CLOCK_MAX_HZ);
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- /* Non-fatal error */
- if (r != CSR_RESULT_SUCCESS && r != CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- unifi_warning(card->ospriv,
- "Failed to increase the SDIO clock speed\n");
- }
- else
- {
- card->sdio_clock_speed = UNIFI_SDIO_CLOCK_MAX_HZ;
- }
- }
-
- /*
- * Cache the current state in the card structure to avoid
- * unnecessary SDIO reads.
- */
- card->host_state = state;
-
- if (state == UNIFI_HOST_STATE_TORPID)
- {
- /*
- * If the chip is now in state TORPID then we must now decrease
- * the maximum bus clock speed.
- */
- csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if,
- UNIFI_SDIO_CLOCK_SAFE_HZ);
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- if (r != CSR_RESULT_SUCCESS && r != CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- unifi_warning(card->ospriv,
- "Failed to decrease the SDIO clock speed\n");
- }
- else
- {
- card->sdio_clock_speed = UNIFI_SDIO_CLOCK_SAFE_HZ;
- }
- CsrSdioFunctionIdle(card->sdio_if);
- }
- }
-
- return r;
-} /* unifi_set_host_state() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_card_info
- *
- * Update the card information data structure
- *
- * Arguments:
- * card Pointer to card struct
- * card_info Pointer to info structure to update
- *
- * Returns:
- * None
- * ---------------------------------------------------------------------------
- */
-void unifi_card_info(card_t *card, card_info_t *card_info)
-{
- card_info->chip_id = card->chip_id;
- card_info->chip_version = card->chip_version;
- card_info->fw_build = card->build_id;
- card_info->fw_hip_version = card->config_data.version;
- card_info->sdio_block_size = card->sdio_io_block_size;
-} /* unifi_card_info() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_check_io_status
- *
- * Check UniFi for spontaneous reset and pending interrupt.
- *
- * Arguments:
- * card Pointer to card struct
- * status Pointer to location to write chip status:
- * 0 if UniFi is running, and no interrupt pending
- * 1 if UniFi has spontaneously reset
- * 2 if there is a pending interrupt
- * Returns:
- * CSR_RESULT_SUCCESS if OK, or CSR error
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_check_io_status(card_t *card, s32 *status)
-{
- u8 io_en;
- CsrResult r;
- u8 pending;
-
- *status = 0;
-
- r = sdio_read_f0(card, SDIO_IO_ENABLE, &io_en);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read SDIO_IO_ENABLE to check for spontaneous reset\n");
- return r;
- }
-
- if ((io_en & (1 << card->function)) == 0)
- {
- s32 fw_count;
- *status = 1;
- unifi_error(card->ospriv, "UniFi has spontaneously reset.\n");
-
- /*
- * These reads are very likely to fail. We want to know if the function is really
- * disabled or the SDIO driver just returns rubbish.
- */
- fw_count = unifi_read_shared_count(card, card->sdio_ctrl_addr + 4);
- if (fw_count < 0)
- {
- unifi_error(card->ospriv, "Failed to read to-host sig written count\n");
- }
- else
- {
- unifi_error(card->ospriv, "thsw: %u (driver thinks is %u)\n",
- fw_count, card->to_host_signals_w);
- }
- fw_count = unifi_read_shared_count(card, card->sdio_ctrl_addr + 2);
- if (fw_count < 0)
- {
- unifi_error(card->ospriv, "Failed to read from-host sig read count\n");
- }
- else
- {
- unifi_error(card->ospriv, "fhsr: %u (driver thinks is %u)\n",
- fw_count, card->from_host_signals_r);
- }
-
- return r;
- }
-
- unifi_info(card->ospriv, "UniFi function %d is enabled.\n", card->function);
-
- /* See if we missed an SDIO interrupt */
- r = CardPendingInt(card, &pending);
- if (pending)
- {
- unifi_error(card->ospriv, "There is an unhandled pending interrupt.\n");
- *status = 2;
- return r;
- }
-
- return r;
-} /* unifi_check_io_status() */
-
-
-void unifi_get_hip_qos_info(card_t *card, unifi_HipQosInfo *hipqosinfo)
-{
- s32 count_fhr;
- s16 t;
- u32 occupied_fh;
-
- q_t *sigq;
- u16 nslots, i;
-
- memset(hipqosinfo, 0, sizeof(unifi_HipQosInfo));
-
- nslots = card->config_data.num_fromhost_data_slots;
-
- for (i = 0; i < nslots; i++)
- {
- if (card->from_host_data[i].bd.data_length == 0)
- {
- hipqosinfo->free_fh_bulkdata_slots++;
- }
- }
-
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- {
- sigq = &card->fh_traffic_queue[i];
- t = sigq->q_wr_ptr - sigq->q_rd_ptr;
- if (t < 0)
- {
- t += sigq->q_length;
- }
- hipqosinfo->free_fh_sig_queue_slots[i] = (sigq->q_length - t) - 1;
- }
-
- count_fhr = unifi_read_shared_count(card, card->sdio_ctrl_addr + 2);
- if (count_fhr < 0)
- {
- unifi_error(card->ospriv, "Failed to read from-host sig read count - %d\n", count_fhr);
- hipqosinfo->free_fh_fw_slots = 0xfa;
- return;
- }
-
- occupied_fh = (card->from_host_signals_w - count_fhr) % 128;
-
- hipqosinfo->free_fh_fw_slots = (u16)(card->config_data.num_fromhost_sig_frags - occupied_fh);
-}
-
-
-
-CsrResult ConvertCsrSdioToCsrHipResult(card_t *card, CsrResult csrResult)
-{
- CsrResult r = CSR_RESULT_FAILURE;
-
- switch (csrResult)
- {
- case CSR_RESULT_SUCCESS:
- r = CSR_RESULT_SUCCESS;
- break;
- /* Timeout errors */
- case CSR_SDIO_RESULT_TIMEOUT:
- /* Integrity errors */
- case CSR_SDIO_RESULT_CRC_ERROR:
- r = CSR_RESULT_FAILURE;
- break;
- case CSR_SDIO_RESULT_NO_DEVICE:
- r = CSR_WIFI_HIP_RESULT_NO_DEVICE;
- break;
- case CSR_SDIO_RESULT_INVALID_VALUE:
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- break;
- case CSR_RESULT_FAILURE:
- r = CSR_RESULT_FAILURE;
- break;
- default:
- unifi_warning(card->ospriv, "Unrecognised csrResult error code: %d\n", csrResult);
- break;
- }
-
- return r;
-} /* ConvertCsrSdioToCsrHipResult() */
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_card_sdio.h b/drivers/staging/csr/csr_wifi_hip_card_sdio.h
deleted file mode 100644
index a9b9ec4..0000000
--- a/drivers/staging/csr/csr_wifi_hip_card_sdio.h
+++ /dev/null
@@ -1,694 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- *
- * FILE: csr_wifi_hip_card_sdio.h
- *
- * PURPOSE:
- * Internal header for Card API for SDIO.
- * ---------------------------------------------------------------------------
- */
-#ifndef __CARD_SDIO_H__
-#define __CARD_SDIO_H__
-
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_unifi_udi.h"
-#include "csr_wifi_hip_unifihw.h"
-#include "csr_wifi_hip_unifiversion.h"
-#ifndef CSR_WIFI_HIP_TA_DISABLE
-#include "csr_wifi_hip_ta_sampling.h"
-#endif
-#include "csr_wifi_hip_xbv.h"
-#include "csr_wifi_hip_chiphelper.h"
-
-
-/*
- *
- * Configuration items.
- * Which of these should go in a platform unifi_config.h file?
- *
- */
-
-/*
- * When the traffic queues contain more signals than there is space for on
- * UniFi, a limiting algorithm comes into play.
- * If a traffic queue has enough slots free to buffer more traffic from the
- * network stack, then the following check is applied. The number of free
- * slots is RESUME_XMIT_THRESHOLD.
- */
-#define RESUME_XMIT_THRESHOLD 4
-
-
-/*
- * When reading signals from UniFi, the host processes pending all signals
- * and then acknowledges them together in a single write to update the
- * to-host-chunks-read location.
- * When there is more than one bulk data transfer (e.g. one received data
- * packet and a request for the payload data of a transmitted packet), the
- * update can be delayed significantly. This ties up resources on chip.
- *
- * To remedy this problem, to-host-chunks-read is updated after processing
- * a signal if TO_HOST_FLUSH_THRESHOLD bytes of bulk data have been
- * transferred since the last update.
- */
-#define TO_HOST_FLUSH_THRESHOLD (500 * 5)
-
-
-/* SDIO Card Common Control Registers */
-#define SDIO_CCCR_SDIO_REVISION (0x00)
-#define SDIO_SD_SPEC_REVISION (0x01)
-#define SDIO_IO_ENABLE (0x02)
-#define SDIO_IO_READY (0x03)
-#define SDIO_INT_ENABLE (0x04)
-#define SDIO_INT_PENDING (0x05)
-#define SDIO_IO_ABORT (0x06)
-#define SDIO_BUS_IFACE_CONTROL (0x07)
-#define SDIO_CARD_CAPABILOTY (0x08)
-#define SDIO_COMMON_CIS_POINTER (0x09)
-#define SDIO_BUS_SUSPEND (0x0C)
-#define SDIO_FUNCTION_SELECT (0x0D)
-#define SDIO_EXEC_FLAGS (0x0E)
-#define SDIO_READY_FLAGS (0x0F)
-#define SDIO_FN0_BLOCK_SIZE (0x10)
-#define SDIO_POWER_CONTROL (0x12)
-#define SDIO_VENDOR_START (0xF0)
-
-#define SDIO_CSR_HOST_WAKEUP (0xf0)
-#define SDIO_CSR_HOST_INT_CLEAR (0xf1)
-#define SDIO_CSR_FROM_HOST_SCRATCH0 (0xf2)
-#define SDIO_CSR_FROM_HOST_SCRATCH1 (0xf3)
-#define SDIO_CSR_TO_HOST_SCRATCH0 (0xf4)
-#define SDIO_CSR_TO_HOST_SCRATCH1 (0xf5)
-#define SDIO_CSR_FUNC_EN (0xf6)
-#define SDIO_CSR_CSPI_MODE (0xf7)
-#define SDIO_CSR_CSPI_STATUS (0xf8)
-#define SDIO_CSR_CSPI_PADDING (0xf9)
-
-
-#define UNIFI_SD_INT_ENABLE_IENM 0x0001 /* Master INT Enable */
-
-#ifdef CSR_PRE_ALLOC_NET_DATA
-#define BULK_DATA_PRE_ALLOC_NUM 16
-#endif
-
-/*
- * Structure to hold configuration information read from UniFi.
- */
-typedef struct
-{
- /*
- * The version of the SDIO signal queues and bulk data pools
- * configuration structure. The MSB is the major version number, used to
- * indicate incompatible changes. The LSB gives the minor revision number,
- * used to indicate changes that maintain backwards compatibility.
- */
- u16 version;
-
- /*
- * offset from the start of the shared data memory to the SD IO
- * control structure.
- */
- u16 sdio_ctrl_offset;
-
- /* Buffer handle of the from-host signal queue */
- u16 fromhost_sigbuf_handle;
-
- /* Buffer handle of the to-host signal queue */
- u16 tohost_sigbuf_handle;
-
- /*
- * Maximum number of signal primitive or bulk data command fragments that may be
- * pending in the to-hw signal queue.
- */
- u16 num_fromhost_sig_frags;
-
- /*
- * Number of signal primitive or bulk data command fragments that must be pending
- * in the to-host signal queue before the host will generate an interrupt
- * to indicate that it has read a signal. This will usually be the total
- * capacity of the to-host signal buffer less the size of the largest signal
- * primitive divided by the signal primitive fragment size, but may be set
- * to 1 to request interrupts every time that the host read a signal.
- * Note that the hw may place more signals in the to-host signal queue
- * than indicated by this field.
- */
- u16 num_tohost_sig_frags;
-
- /*
- * Number of to-hw bulk data slots. Slots are numbered from 0 (zero) to
- * one less than the value in this field
- */
- u16 num_fromhost_data_slots;
-
- /*
- * Number of frm-hw bulk data slots. Slots are numbered from 0 (zero) to
- * one less than the value in this field
- */
- u16 num_tohost_data_slots;
-
- /*
- * Size of the bulk data slots (2 octets)
- * The size of the bulk data slots in octets. This will usually be
- * the size of the largest MSDU. The value should always be even.
- */
- u16 data_slot_size;
-
- /*
- * Indicates that the host has finished the initialisation sequence.
- * Initialised to 0x0000 by the firmware, and set to 0x0001 by us.
- */
- u16 initialised;
-
- /* Added by protocol version 0x0001 */
- u32 overlay_size;
-
- /* Added by protocol version 0x0300 */
- u16 data_slot_round;
- u16 sig_frag_size;
-
- /* Added by protocol version 0x0500 */
- u16 tohost_signal_padding;
-} sdio_config_data_t;
-
-/*
- * These values may change with versions of the Host Interface Protocol.
- */
-/*
- * Size of config info block pointed to by the CSR_SLT_SDIO_SLOT_CONFIG
- * entry in the f/w symbol table
- */
-#define SDIO_CONFIG_DATA_SIZE 30
-
-/* Offset of the INIT flag in the config info block. */
-#define SDIO_INIT_FLAG_OFFSET 0x12
-#define SDIO_TO_HOST_SIG_PADDING_OFFSET 0x1C
-
-
-/* Structure for a bulk data transfer command */
-typedef struct
-{
- u16 cmd_and_len; /* bits 12-15 cmd, bits 0-11 len */
- u16 data_slot; /* slot number, perhaps OR'd with SLOT_DIR_TO_HOST */
- u16 offset;
- u16 buffer_handle;
-} bulk_data_cmd_t;
-
-
-/* Bulk Data signal command values */
-#define SDIO_CMD_SIGNAL 0x00
-#define SDIO_CMD_TO_HOST_TRANSFER 0x01
-#define SDIO_CMD_TO_HOST_TRANSFER_ACK 0x02 /*deprecated*/
-#define SDIO_CMD_FROM_HOST_TRANSFER 0x03
-#define SDIO_CMD_FROM_HOST_TRANSFER_ACK 0x04 /*deprecated*/
-#define SDIO_CMD_CLEAR_SLOT 0x05
-#define SDIO_CMD_OVERLAY_TRANSFER 0x06
-#define SDIO_CMD_OVERLAY_TRANSFER_ACK 0x07 /*deprecated*/
-#define SDIO_CMD_FROM_HOST_AND_CLEAR 0x08
-#define SDIO_CMD_PADDING 0x0f
-
-#define SLOT_DIR_TO_HOST 0x8000
-
-
-/* Initialise bulkdata slot
- * params:
- * bulk_data_desc_t *bulk_data_slot
- */
-#define UNIFI_INIT_BULK_DATA(bulk_data_slot) \
- { \
- (bulk_data_slot)->os_data_ptr = NULL; \
- (bulk_data_slot)->data_length = 0; \
- (bulk_data_slot)->os_net_buf_ptr = NULL; \
- (bulk_data_slot)->net_buf_length = 0; \
- }
-
-/*
- * Structure to contain a SIGNAL datagram.
- * This is used to build signal queues between the main driver and the
- * i/o thread.
- * The fields are:
- * sigbuf Contains the HIP signal is wire-format (i.e. packed,
- * little-endian)
- * bulkdata Contains a copy of any associated bulk data
- * signal_length The size of the signal in the sigbuf
- */
-typedef struct card_signal
-{
- u8 sigbuf[UNIFI_PACKED_SIGBUF_SIZE];
-
- /* Length of the SIGNAL inside sigbuf */
- u16 signal_length;
-
- bulk_data_desc_t bulkdata[UNIFI_MAX_DATA_REFERENCES];
-} card_signal_t;
-
-
-/*
- * Control structure for a generic ring buffer.
- */
-#define UNIFI_QUEUE_NAME_MAX_LENGTH 16
-typedef struct
-{
- card_signal_t *q_body;
-
- /* Num elements in queue (capacity is one less than this!) */
- u16 q_length;
-
- u16 q_wr_ptr;
- u16 q_rd_ptr;
-
- char name[UNIFI_QUEUE_NAME_MAX_LENGTH];
-} q_t;
-
-
-#define UNIFI_RESERVED_COMMAND_SLOTS 2
-
-/* Considering approx 500 us per packet giving 0.5 secs */
-#define UNIFI_PACKETS_INTERVAL 1000
-
-/*
- * Dynamic slot reservation for QoS
- */
-typedef struct
-{
- u16 from_host_used_slots[UNIFI_NO_OF_TX_QS];
- u16 from_host_max_slots[UNIFI_NO_OF_TX_QS];
- u16 from_host_reserved_slots[UNIFI_NO_OF_TX_QS];
-
- /* Parameters to determine if a queue was active.
- If number of packets sent is greater than the threshold
- for the queue, the queue is considered active and no
- re reservation is done, it is important not to keep this
- value too low */
- /* Packets sent during this interval */
- u16 packets_txed[UNIFI_NO_OF_TX_QS];
- u16 total_packets_txed;
-
- /* Number of packets to see if slots need to be reassigned */
- u16 packets_interval;
-
- /* Once a queue reaches a stable state, avoid processing */
- u8 queue_stable[UNIFI_NO_OF_TX_QS];
-} card_dynamic_slot_t;
-
-
-/* These are type-safe and don't write incorrect values to the
- * structure. */
-
-/* Return queue slots used count
- * params:
- * const q_t *q
- * returns:
- * u16
- */
-#define CSR_WIFI_HIP_Q_SLOTS_USED(q) \
- (((q)->q_wr_ptr - (q)->q_rd_ptr < 0)? \
- ((q)->q_wr_ptr - (q)->q_rd_ptr + (q)->q_length) : ((q)->q_wr_ptr - (q)->q_rd_ptr))
-
-/* Return queue slots free count
- * params:
- * const q_t *q
- * returns:
- * u16
- */
-#define CSR_WIFI_HIP_Q_SLOTS_FREE(q) \
- ((q)->q_length - CSR_WIFI_HIP_Q_SLOTS_USED((q)) - 1)
-
-/* Return slot signal data pointer
- * params:
- * const q_t *q
- * u16 slot
- * returns:
- * card_signal_t *
- */
-#define CSR_WIFI_HIP_Q_SLOT_DATA(q, slot) \
- ((q)->q_body + slot)
-
-/* Return queue next read slot
- * params:
- * const q_t *q
- * returns:
- * u16 slot offset
- */
-#define CSR_WIFI_HIP_Q_NEXT_R_SLOT(q) \
- ((q)->q_rd_ptr)
-
-/* Return queue next write slot
- * params:
- * const q_t *q
- * returns:
- * u16 slot offset
- */
-#define CSR_WIFI_HIP_Q_NEXT_W_SLOT(q) \
- ((q)->q_wr_ptr)
-
-/* Return updated queue pointer wrapped around its length
- * params:
- * const q_t *q
- * u16 x amount to add to queue pointer
- * returns:
- * u16 wrapped queue pointer
- */
-#define CSR_WIFI_HIP_Q_WRAP(q, x) \
- ((((x) >= (q)->q_length)?((x) % (q)->q_length) : (x)))
-
-/* Advance queue read pointer
- * params:
- * const q_t *q
- */
-#define CSR_WIFI_HIP_Q_INC_R(q) \
- ((q)->q_rd_ptr = CSR_WIFI_HIP_Q_WRAP((q), (q)->q_rd_ptr + 1))
-
-/* Advance queue write pointer
- * params:
- * const q_t *q
- */
-#define CSR_WIFI_HIP_Q_INC_W(q) \
- ((q)->q_wr_ptr = CSR_WIFI_HIP_Q_WRAP((q), (q)->q_wr_ptr + 1))
-
-enum unifi_host_state
-{
- UNIFI_HOST_STATE_AWAKE = 0,
- UNIFI_HOST_STATE_DROWSY = 1,
- UNIFI_HOST_STATE_TORPID = 2
-};
-
-typedef struct
-{
- bulk_data_desc_t bd;
- unifi_TrafficQueue queue; /* Used for dynamic slot reservation */
-} slot_desc_t;
-
-/*
- * Structure describing a UniFi SDIO card.
- */
-struct card
-{
- /*
- * Back pointer for the higher level OS code. This is passed as
- * an argument to callbacks (e.g. for received data and indications).
- */
- void *ospriv;
-
- /*
- * mapping of HIP slot to MA-PACKET.req host tag, the
- * array is indexed by slot numbers and each index stores
- * information of the last host tag it was used for
- */
- u32 *fh_slot_host_tag_record;
-
-
- /* Info read from Symbol Table during probe */
- u32 build_id;
- char build_id_string[128];
-
- /* Retrieve from SDIO driver. */
- u16 chip_id;
-
- /* Read from GBL_CHIP_VERSION. */
- u16 chip_version;
-
- /* From the SDIO driver (probably 1) */
- u8 function;
-
- /* This is sused to get the register addresses and things. */
- ChipDescript *helper;
-
- /*
- * Bit mask of PIOs for the loader to waggle during download.
- * We assume these are connected to LEDs. The main firmware gets
- * the mask from a MIB entry.
- */
- s32 loader_led_mask;
-
- /*
- * Support for flow control. When the from-host queue of signals
- * is full, we ask the host upper layer to stop sending packets. When
- * the queue drains we tell it that it can send packets again.
- * We use this flag to remember the current state.
- */
-#define card_is_tx_q_paused(card, q) (card->tx_q_paused_flag[q])
-#define card_tx_q_unpause(card, q) (card->tx_q_paused_flag[q] = 0)
-#define card_tx_q_pause(card, q) (card->tx_q_paused_flag[q] = 1)
-
- u16 tx_q_paused_flag[UNIFI_TRAFFIC_Q_MAX + 1 + UNIFI_NO_OF_TX_QS]; /* defensive more than big enough */
-
- /* UDI callback for logging UniFi interactions */
- udi_func_t udi_hook;
-
- u8 bh_reason_host;
- u8 bh_reason_unifi;
-
- /* SDIO clock speed request from OS layer */
- u8 request_max_clock;
-
- /* Last SDIO clock frequency set */
- u32 sdio_clock_speed;
-
- /*
- * Current host state (copy of value in IOABORT register and
- * spinlock to protect it.
- */
- enum unifi_host_state host_state;
-
- enum unifi_low_power_mode low_power_mode;
- enum unifi_periodic_wake_mode periodic_wake_mode;
-
- /*
- * Ring buffer of signal structs for a queue of data packets from
- * the host.
- * The queue is empty when fh_data_q_num_rd == fh_data_q_num_wr.
- * To add a packet to the queue, copy it to index given by
- * (fh_data_q_num_wr%UNIFI_SOFT_Q_LENGTH) and advance fh_data_q_num_wr.
- * To take a packet from the queue, copy data from index given by
- * (fh_data_q_num_rd%UNIFI_SOFT_Q_LENGTH) and advance fh_data_q_num_rd.
- * fh_data_q_num_rd and fh_data_q_num_rd are both modulo 256.
- */
- card_signal_t fh_command_q_body[UNIFI_SOFT_COMMAND_Q_LENGTH];
- q_t fh_command_queue;
-
- card_signal_t fh_traffic_q_body[UNIFI_NO_OF_TX_QS][UNIFI_SOFT_TRAFFIC_Q_LENGTH];
- q_t fh_traffic_queue[UNIFI_NO_OF_TX_QS];
-
- /*
- * Signal counts from UniFi SDIO Control Data Structure.
- * These are cached and synchronised with the UniFi before and after
- * a batch of operations.
- *
- * These are the modulo-256 count of signals written to or read from UniFi
- * The value is incremented for every signal.
- */
- s32 from_host_signals_w;
- s32 from_host_signals_r;
- s32 to_host_signals_r;
- s32 to_host_signals_w;
-
-
- /* Should specify buffer size as a number of signals */
- /*
- * Enough for 10 th and 10 fh data slots:
- * 1 * 10 * 8 = 80
- * 2 * 10 * 8 = 160
- */
-#define UNIFI_FH_BUF_SIZE 1024
- struct sigbuf
- {
- u8 *buf; /* buffer area */
- u8 *ptr; /* current pos */
- u16 count; /* signal count */
- u16 bufsize;
- } fh_buffer;
- struct sigbuf th_buffer;
-
-
- /*
- * Field to use for the incrementing value to write to the UniFi
- * SHARED_IO_INTERRUPT register.
- * Flag to say we need to generate an interrupt at end of processing.
- */
- u32 unifi_interrupt_seq;
- u8 generate_interrupt;
-
-
- /* Pointers to the bulk data slots */
- slot_desc_t *from_host_data;
- bulk_data_desc_t *to_host_data;
-
-
- /*
- * Index of the next (hopefully) free data slot.
- * This is an optimisation that starts searching at a more likely point
- * than the beginning.
- */
- s16 from_host_data_head;
-
- /* Dynamic slot allocation for queues */
- card_dynamic_slot_t dynamic_slot_data;
-
- /*
- * SDIO specific fields
- */
-
- /* Interface pointer for the SDIO library */
- CsrSdioFunction *sdio_if;
-
- /* Copy of config_data struct from the card */
- sdio_config_data_t config_data;
-
- /* SDIO address of the Initialised flag and Control Data struct */
- u32 init_flag_addr;
- u32 sdio_ctrl_addr;
-
- /* The last value written to the Shared Data Memory Page register */
- u32 proc_select;
- u32 dmem_page;
- u32 pmem_page;
-
- /* SDIO traffic counters limited to 32 bits for Synergy compatibility */
- u32 sdio_bytes_read;
- u32 sdio_bytes_written;
-
- u8 memory_resources_allocated;
-
- /* UniFi SDIO I/O Block size. */
- u16 sdio_io_block_size;
-
- /* Pad transfer sizes to SDIO block boundaries */
- u8 sdio_io_block_pad;
-
- /* Read from the XBV */
- struct FWOV fwov;
-
-#ifndef CSR_WIFI_HIP_TA_DISABLE
- /* TA sampling */
- ta_data_t ta_sampling;
-#endif
-
- /* Auto-coredump */
- s16 request_coredump_on_reset; /* request coredump on next reset */
- struct coredump_buf *dump_buf; /* root node */
- struct coredump_buf *dump_next_write; /* node to fill at next dump */
- struct coredump_buf *dump_cur_read; /* valid node to read, or NULL */
-
-#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
- struct cmd_profile
- {
- u32 cmd52_count;
- u32 cmd53_count;
- u32 tx_count;
- u32 tx_cfm_count;
- u32 rx_count;
- u32 bh_count;
- u32 process_count;
- u32 protocol_count;
-
- u32 cmd52_f0_r_count;
- u32 cmd52_f0_w_count;
- u32 cmd52_r8or16_count;
- u32 cmd52_w8or16_count;
- u32 cmd52_r16_count;
- u32 cmd52_w16_count;
- u32 cmd52_r32_count;
-
- u32 sdio_cmd_signal;
- u32 sdio_cmd_clear_slot;
- u32 sdio_cmd_to_host;
- u32 sdio_cmd_from_host;
- u32 sdio_cmd_from_host_and_clear;
- } hip_prof;
- struct cmd_profile cmd_prof;
-#endif
-
- /* Interrupt processing mode flags */
- u32 intmode;
-
-#ifdef UNIFI_DEBUG
- u8 lsb;
-#endif
-
- /* Historic firmware panic codes */
- u32 panic_data_phy_addr;
- u32 panic_data_mac_addr;
- u16 last_phy_panic_code;
- u16 last_phy_panic_arg;
- u16 last_mac_panic_code;
- u16 last_mac_panic_arg;
-#ifdef CSR_PRE_ALLOC_NET_DATA
- bulk_data_desc_t bulk_data_desc_list[BULK_DATA_PRE_ALLOC_NUM];
- u16 prealloc_netdata_r;
- u16 prealloc_netdata_w;
-#endif
-}; /* struct card */
-
-
-/* Reset types */
-enum unifi_reset_type
-{
- UNIFI_COLD_RESET = 1,
- UNIFI_WARM_RESET = 2
-};
-
-/*
- * unifi_set_host_state() implements signalling for waking UniFi from
- * deep sleep. The host indicates to UniFi that it is in one of three states:
- * Torpid - host has nothing to send, UniFi can go to sleep.
- * Drowsy - host has data to send to UniFi. UniFi will respond with an
- * SDIO interrupt. When hosts responds it moves to Awake.
- * Awake - host has data to transfer, UniFi must stay awake.
- * When host has finished, it moves to Torpid.
- */
-CsrResult unifi_set_host_state(card_t *card, enum unifi_host_state state);
-
-
-CsrResult unifi_set_proc_select(card_t *card, enum unifi_dbg_processors_select select);
-s32 card_read_signal_counts(card_t *card);
-bulk_data_desc_t* card_find_data_slot(card_t *card, s16 slot);
-
-
-CsrResult unifi_read32(card_t *card, u32 unifi_addr, u32 *pdata);
-CsrResult unifi_readnz(card_t *card, u32 unifi_addr,
- void *pdata, u16 len);
-s32 unifi_read_shared_count(card_t *card, u32 addr);
-
-CsrResult unifi_writen(card_t *card, u32 unifi_addr, void *pdata, u16 len);
-
-CsrResult unifi_bulk_rw(card_t *card, u32 handle,
- void *pdata, u32 len, s16 direction);
-CsrResult unifi_bulk_rw_noretry(card_t *card, u32 handle,
- void *pdata, u32 len, s16 direction);
-#define UNIFI_SDIO_READ 0
-#define UNIFI_SDIO_WRITE 1
-
-CsrResult unifi_read_8_or_16(card_t *card, u32 unifi_addr, u8 *pdata);
-CsrResult unifi_write_8_or_16(card_t *card, u32 unifi_addr, u8 data);
-CsrResult unifi_read_direct_8_or_16(card_t *card, u32 addr, u8 *pdata);
-CsrResult unifi_write_direct_8_or_16(card_t *card, u32 addr, u8 data);
-
-CsrResult unifi_read_direct16(card_t *card, u32 addr, u16 *pdata);
-CsrResult unifi_read_direct32(card_t *card, u32 addr, u32 *pdata);
-CsrResult unifi_read_directn(card_t *card, u32 addr, void *pdata, u16 len);
-
-CsrResult unifi_write_direct16(card_t *card, u32 addr, u16 data);
-CsrResult unifi_write_directn(card_t *card, u32 addr, void *pdata, u16 len);
-
-CsrResult sdio_read_f0(card_t *card, u32 addr, u8 *pdata);
-CsrResult sdio_write_f0(card_t *card, u32 addr, u8 data);
-
-void unifi_read_panic(card_t *card);
-#ifdef CSR_PRE_ALLOC_NET_DATA
-void prealloc_netdata_free(card_t *card);
-CsrResult prealloc_netdata_alloc(card_t *card);
-#endif
-/* For diagnostic use */
-void dump(void *mem, u16 len);
-void dump16(void *mem, u16 len);
-
-#endif /* __CARD_SDIO_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_card_sdio_intr.c b/drivers/staging/csr/csr_wifi_hip_card_sdio_intr.c
deleted file mode 100644
index cfe186e..0000000
--- a/drivers/staging/csr/csr_wifi_hip_card_sdio_intr.c
+++ /dev/null
@@ -1,2595 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_card_sdio_intr.c
- *
- * PURPOSE:
- * Interrupt processing for the UniFi SDIO driver.
- *
- * We may need another signal queue of responses to UniFi to hold
- * bulk data commands generated by read_to_host_signals().
- *
- * ---------------------------------------------------------------------------
- */
-#undef CSR_WIFI_HIP_NOISY
-
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-#include "csr_wifi_hip_card.h"
-#include "csr_wifi_hip_xbv.h"
-
-
-/*
- * If the SDIO link is idle for this time (in milliseconds),
- * signal UniFi to go into Deep Sleep.
- * Valid return value of unifi_bh().
- */
-#define UNIFI_DEFAULT_HOST_IDLE_TIMEOUT 5
-/*
- * If the UniFi has not woken up for this time (in milliseconds),
- * signal the bottom half to take action.
- * Valid return value of unifi_bh().
- */
-#define UNIFI_DEFAULT_WAKE_TIMEOUT 1000
-
-
-static CsrResult process_bh(card_t *card);
-static CsrResult handle_host_protocol(card_t *card, u8 *processed_something);
-
-static CsrResult flush_fh_buffer(card_t *card);
-
-static CsrResult check_fh_sig_slots(card_t *card, u16 needed, s32 *space);
-
-static CsrResult read_to_host_signals(card_t *card, s32 *processed);
-static CsrResult process_to_host_signals(card_t *card, s32 *processed);
-
-static CsrResult process_bulk_data_command(card_t *card,
- const u8 *cmdptr,
- s16 cmd, u16 len);
-static CsrResult process_clear_slot_command(card_t *card,
- const u8 *cmdptr);
-static CsrResult process_fh_cmd_queue(card_t *card, s32 *processed);
-static CsrResult process_fh_traffic_queue(card_t *card, s32 *processed);
-static void restart_packet_flow(card_t *card);
-static CsrResult process_clock_request(card_t *card);
-
-#ifdef CSR_WIFI_HIP_NOISY
-s16 dump_fh_buf = 0;
-#endif /* CSR_WIFI_HIP_NOISY */
-
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-
-/*
- * The unifi_debug_output buffer can be used to debug the HIP behaviour offline
- * i.e. without using the tracing functions that change the timing.
- *
- * Call unifi_debug_log_to_buf() with printf arguments to store a string into
- * unifi_debug_output. When unifi_debug_buf_dump() is called, the contents of the
- * buffer are dumped with dump_str() which has to be implemented in the
- * OS layer, during the porting exercise. The offset printed, holds the
- * offset where the last character is (always a zero).
- *
- */
-
-#define UNIFI_DEBUG_GBUFFER_SIZE 8192
-static char unifi_debug_output[UNIFI_DEBUG_GBUFFER_SIZE];
-static char *unifi_dbgbuf_ptr = unifi_debug_output;
-static char *unifi_dbgbuf_start = unifi_debug_output;
-
-static void append_char(char c)
-{
- /* write char and advance pointer */
- *unifi_dbgbuf_ptr++ = c;
- /* wrap pointer at end of buffer */
- if ((unifi_dbgbuf_ptr - unifi_debug_output) >= UNIFI_DEBUG_GBUFFER_SIZE)
- {
- unifi_dbgbuf_ptr = unifi_debug_output;
- }
-} /* append_char() */
-
-
-void unifi_debug_string_to_buf(const char *str)
-{
- const char *p = str;
- while (*p)
- {
- append_char(*p);
- p++;
- }
- /* Update start-of-buffer pointer */
- unifi_dbgbuf_start = unifi_dbgbuf_ptr + 1;
- if ((unifi_dbgbuf_start - unifi_debug_output) >= UNIFI_DEBUG_GBUFFER_SIZE)
- {
- unifi_dbgbuf_start = unifi_debug_output;
- }
-}
-
-
-void unifi_debug_log_to_buf(const char *fmt, ...)
-{
-#define DEBUG_BUFFER_SIZE 80
- static char s[DEBUG_BUFFER_SIZE];
- va_list args;
-
- va_start(args, fmt);
- vsnprintf(s, DEBUG_BUFFER_SIZE, fmt, args);
- va_end(args);
-
- unifi_debug_string_to_buf(s);
-} /* unifi_debug_log_to_buf() */
-
-
-/* Convert signed 32 bit (or less) integer to string */
-static void CsrUInt16ToHex(u16 number, char *str)
-{
- u16 index;
- u16 currentValue;
-
- for (index = 0; index < 4; index++)
- {
- currentValue = (u16) (number & 0x000F);
- number >>= 4;
- str[3 - index] = (char) (currentValue > 9 ? currentValue + 55 : currentValue + '0');
- }
- str[4] = '\0';
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_debug_hex_to_buf
- *
- * puts the contents of the passed buffer into the debug buffer as a hex string
- *
- * Arguments:
- * buff buffer to print as hex
- * length number of chars to print
- *
- * Returns:
- * None.
- *
- * ---------------------------------------------------------------------------
- */
-void unifi_debug_hex_to_buf(const char *buff, u16 length)
-{
- char s[5];
- u16 i;
-
- for (i = 0; i < length; i = i + 2)
- {
- CsrUInt16ToHex(*((u16 *)(buff + i)), s);
- unifi_debug_string_to_buf(s);
- }
-}
-
-
-void unifi_debug_buf_dump(void)
-{
- s32 offset = unifi_dbgbuf_ptr - unifi_debug_output;
-
- unifi_error(NULL, "HIP debug buffer offset=%d\n", offset);
- dump_str(unifi_debug_output + offset, UNIFI_DEBUG_GBUFFER_SIZE - offset);
- dump_str(unifi_debug_output, offset);
-} /* unifi_debug_buf_dump() */
-
-
-#endif /* CSR_WIFI_HIP_DEBUG_OFFLINE */
-
-#ifdef CSR_PRE_ALLOC_NET_DATA
-#define NETDATA_PRE_ALLOC_BUF_SIZE 8000
-
-void prealloc_netdata_free(card_t *card)
-{
- unifi_warning(card->ospriv, "prealloc_netdata_free: IN: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
-
- while (card->bulk_data_desc_list[card->prealloc_netdata_r].data_length != 0)
- {
- unifi_warning(card->ospriv, "prealloc_netdata_free: r=%d\n", card->prealloc_netdata_r);
-
- unifi_net_data_free(card->ospriv, &card->bulk_data_desc_list[card->prealloc_netdata_r]);
- card->prealloc_netdata_r++;
- card->prealloc_netdata_r %= BULK_DATA_PRE_ALLOC_NUM;
- }
- card->prealloc_netdata_r = card->prealloc_netdata_w = 0;
-
- unifi_warning(card->ospriv, "prealloc_netdata_free: OUT: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
-}
-
-
-CsrResult prealloc_netdata_alloc(card_t *card)
-{
- CsrResult r;
-
- unifi_trace(card->ospriv, UDBG5, "prealloc_netdata_alloc: IN: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
-
- while (card->bulk_data_desc_list[card->prealloc_netdata_w].data_length == 0)
- {
- r = unifi_net_data_malloc(card->ospriv, &card->bulk_data_desc_list[card->prealloc_netdata_w], NETDATA_PRE_ALLOC_BUF_SIZE);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "prealloc_netdata_alloc: Failed to allocate t-h bulk data\n");
- return CSR_RESULT_FAILURE;
- }
- card->prealloc_netdata_w++;
- card->prealloc_netdata_w %= BULK_DATA_PRE_ALLOC_NUM;
- }
- unifi_trace(card->ospriv, UDBG5, "prealloc_netdata_alloc: OUT: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
-
- return CSR_RESULT_SUCCESS;
-}
-
-
-static CsrResult prealloc_netdata_get(card_t *card, bulk_data_desc_t *bulk_data_slot, u32 size)
-{
- CsrResult r;
-
- unifi_trace(card->ospriv, UDBG5, "prealloc_netdata_get: IN: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
-
- if (card->bulk_data_desc_list[card->prealloc_netdata_r].data_length == 0)
- {
- unifi_error(card->ospriv, "prealloc_netdata_get: data_length = 0\n");
- }
-
- if ((size > NETDATA_PRE_ALLOC_BUF_SIZE) || (card->bulk_data_desc_list[card->prealloc_netdata_r].data_length == 0))
- {
- unifi_warning(card->ospriv, "prealloc_netdata_get: Calling net_data_malloc\n");
-
- r = unifi_net_data_malloc(card->ospriv, bulk_data_slot, size);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "prealloc_netdata_get: Failed to allocate t-h bulk data\n");
- return CSR_RESULT_FAILURE;
- }
- return CSR_RESULT_SUCCESS;
- }
-
- *bulk_data_slot = card->bulk_data_desc_list[card->prealloc_netdata_r];
- card->bulk_data_desc_list[card->prealloc_netdata_r].os_data_ptr = NULL;
- card->bulk_data_desc_list[card->prealloc_netdata_r].os_net_buf_ptr = NULL;
- card->bulk_data_desc_list[card->prealloc_netdata_r].net_buf_length = 0;
- card->bulk_data_desc_list[card->prealloc_netdata_r].data_length = 0;
-
- card->prealloc_netdata_r++;
- card->prealloc_netdata_r %= BULK_DATA_PRE_ALLOC_NUM;
-
- unifi_trace(card->ospriv, UDBG5, "prealloc_netdata_get: OUT: w=%d r=%d\n", card->prealloc_netdata_w, card->prealloc_netdata_r);
-
- return CSR_RESULT_SUCCESS;
-}
-
-
-#endif
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_sdio_interrupt_handler
- *
- * This function should be called by the OS-dependent code to handle
- * an SDIO interrupt from the UniFi.
- *
- * Arguments:
- * card Pointer to card context structure.
- *
- * Returns:
- * None.
- *
- * Notes: This function may be called in DRS context. In this case,
- * tracing with the unifi_trace(), etc, is not allowed.
- * ---------------------------------------------------------------------------
- */
-void unifi_sdio_interrupt_handler(card_t *card)
-{
- /*
- * Set the flag to say reason for waking was SDIO interrupt.
- * Then ask the OS layer to run the unifi_bh to give attention to the UniFi.
- */
- card->bh_reason_unifi = 1;
- (void)unifi_run_bh(card->ospriv);
-} /* sdio_interrupt_handler() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_configure_low_power_mode
- *
- * This function should be called by the OS-dependent when
- * the deep sleep signaling needs to be enabled or disabled.
- *
- * Arguments:
- * card Pointer to card context structure.
- * low_power_mode Disable/Enable the deep sleep signaling
- * periodic_wake_mode UniFi wakes host periodically.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success or a CSR error code.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_configure_low_power_mode(card_t *card,
- enum unifi_low_power_mode low_power_mode,
- enum unifi_periodic_wake_mode periodic_wake_mode)
-{
- card->low_power_mode = low_power_mode;
- card->periodic_wake_mode = periodic_wake_mode;
-
- unifi_trace(card->ospriv, UDBG1,
- "unifi_configure_low_power_mode: new mode = %s, wake_host = %s\n",
- (low_power_mode == UNIFI_LOW_POWER_DISABLED)?"disabled" : "enabled",
- (periodic_wake_mode == UNIFI_PERIODIC_WAKE_HOST_DISABLED)?"FALSE" : "TRUE");
-
- (void)unifi_run_bh(card->ospriv);
- return CSR_RESULT_SUCCESS;
-} /* unifi_configure_low_power_mode() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_force_low_power_mode
- *
- * This function should be called by the OS-dependent when
- * UniFi needs to be set to the low power mode (e.g. on suspend)
- *
- * Arguments:
- * card Pointer to card context structure.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success or a CSR error code.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_force_low_power_mode(card_t *card)
-{
- if (card->low_power_mode == UNIFI_LOW_POWER_DISABLED)
- {
- unifi_error(card->ospriv, "Attempt to set mode to TORPID when lower power mode is disabled\n");
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- return unifi_set_host_state(card, UNIFI_HOST_STATE_TORPID);
-} /* unifi_force_low_power_mode() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_bh
- *
- * This function should be called by the OS-dependent code when
- * host and/or UniFi has requested an exchange of messages.
- *
- * Arguments:
- * card Pointer to card context structure.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success or a CSR error code.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_bh(card_t *card, u32 *remaining)
-{
- CsrResult r;
- CsrResult csrResult;
- u8 pending;
- s32 iostate, j;
- const enum unifi_low_power_mode low_power_mode = card->low_power_mode;
- u16 data_slots_used = 0;
-
-
- /* Process request to raise the maximum SDIO clock */
- r = process_clock_request(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Error setting maximum SDIO clock\n");
- goto exit;
- }
-
- /*
- * Why was the BH thread woken?
- * If it was an SDIO interrupt, UniFi is awake and we need to process it.
- * If it was a host process queueing data, then we need to awaken UniFi.
- *
- * Priority of flags is top down.
- *
- * ----------------------------------------------------------+
- * \state| AWAKE | DROWSY | TORPID |
- * flag\ | | | |
- * ---------+--------------+----------------+----------------|
- * | do the host | go to AWAKE and| go to AWAKE and|
- * unifi | protocol | do the host | do the host |
- * | | protocol | protocol |
- * ---------+--------------+----------------+----------------|
- * | do the host | | |
- * host | protocol | do nothing | go to DROWSY |
- * | | | |
- * ---------+--------------+----------------+----------------|
- * | | | should not |
- * timeout | go to TORPID | error, unifi | occur |
- * | | didn't wake up | do nothing |
- * ----------------------------------------------------------+
- *
- * Note that if we end up in the AWAKE state we always do the host protocol.
- */
-
- do
- {
- /*
- * When the host state is set to DROWSY, then we can not disable the
- * interrupts as UniFi can generate an interrupt even when the INT_ENABLE
- * register has the interrupts disabled. This interrupt will be lost.
- */
- if (card->host_state == UNIFI_HOST_STATE_DROWSY || card->host_state == UNIFI_HOST_STATE_TORPID)
- {
- u8 reason_unifi;
-
- /*
- * An interrupt may occur while or after we cache the reason.
- * This interrupt will cause the unifi_bh() to be scheduled again.
- * Any interrupt that has happened before the register is read
- * and is considered spurious has to acknowledged.
- */
- reason_unifi = card->bh_reason_unifi;
-
- /*
- * If an interrupt is received, check if it was a real one,
- * set the host state to AWAKE and run the BH.
- */
- r = CardPendingInt(card, &pending);
- if (r != CSR_RESULT_SUCCESS)
- {
- goto exit;
- }
-
- if (pending)
- {
- unifi_trace(card->ospriv, UDBG5,
- "UNIFI_HOST_STATE_%s: Set state to AWAKE.\n",
- (card->host_state == UNIFI_HOST_STATE_TORPID)?"TORPID" : "DROWSY");
-
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
- if (r == CSR_RESULT_SUCCESS)
- {
- (*remaining) = 0;
- break;
- }
- }
- else if (reason_unifi)
- {
- CsrSdioInterruptAcknowledge(card->sdio_if);
- }
-
- /*
- * If an chip is in TORPID, and the host wants to wake it up,
- * set the host state to DROWSY and wait for the wake-up interrupt.
- */
- if ((card->host_state == UNIFI_HOST_STATE_TORPID) && card->bh_reason_host)
- {
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_DROWSY);
- if (r == CSR_RESULT_SUCCESS)
- {
- /*
- * set the timeout value to UNIFI_DEFAULT_WAKE_TIMEOUT
- * to capture a wake error.
- */
- card->bh_reason_host = 0;
- (*remaining) = UNIFI_DEFAULT_WAKE_TIMEOUT;
- return CSR_RESULT_SUCCESS;
- }
-
- goto exit;
- }
-
- /*
- * If the chip is in DROWSY, and the timeout expires,
- * we need to reset the chip. This should never occur.
- * (If it does, check that the calling thread set "remaining"
- * according to the time remaining when unifi_bh() was called).
- */
- if ((card->host_state == UNIFI_HOST_STATE_DROWSY) && ((*remaining) == 0))
- {
- unifi_error(card->ospriv, "UniFi did not wake up on time...\n");
-
- /*
- * Check if Function1 has gone away or
- * if we missed an SDIO interrupt.
- */
- r = unifi_check_io_status(card, &iostate);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- goto exit;
- }
- /* Need to reset and reboot */
- return CSR_RESULT_FAILURE;
- }
- }
- else
- {
- if (card->bh_reason_unifi || card->bh_reason_host)
- {
- break;
- }
-
- if (((*remaining) == 0) && (low_power_mode == UNIFI_LOW_POWER_ENABLED))
- {
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_TORPID);
- if (r == CSR_RESULT_SUCCESS)
- {
- (*remaining) = 0;
- return CSR_RESULT_SUCCESS;
- }
-
- goto exit;
- }
- }
-
- /* No need to run the host protocol */
- return CSR_RESULT_SUCCESS;
- } while (0);
-
-
- /* Disable the SDIO interrupts while doing SDIO ops */
- csrResult = CsrSdioInterruptDisable(card->sdio_if);
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- r = CSR_WIFI_HIP_RESULT_NO_DEVICE;
- goto exit;
- }
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- unifi_error(card->ospriv, "Failed to disable SDIO interrupts. unifi_bh queues error.\n");
- goto exit;
- }
-
- /* Now that the interrupts are disabled, ack the interrupt */
- CsrSdioInterruptAcknowledge(card->sdio_if);
-
- /* Run the HIP */
- r = process_bh(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- goto exit;
- }
-
- /*
- * If host is now idle, schedule a timer for the delay before we
- * let UniFi go into deep sleep.
- * If the timer goes off, we will move to TORPID state.
- * If UniFi raises an interrupt in the meantime, we will cancel
- * the timer and start a new one when we become idle.
- */
- for (j = 0; j < UNIFI_NO_OF_TX_QS; j++)
- {
- data_slots_used += CSR_WIFI_HIP_Q_SLOTS_USED(&card->fh_traffic_queue[j]);
- }
-
- if ((low_power_mode == UNIFI_LOW_POWER_ENABLED) && (data_slots_used == 0))
- {
-#ifndef CSR_WIFI_HIP_TA_DISABLE
- if (card->ta_sampling.traffic_type != CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_PERIODIC)
- {
-#endif
- /* return the UNIFI_DEFAULT_HOST_IDLE_TIMEOUT, so we can go to sleep. */
- unifi_trace(card->ospriv, UDBG5,
- "Traffic is not periodic, set timer for TORPID.\n");
- (*remaining) = UNIFI_DEFAULT_HOST_IDLE_TIMEOUT;
-#ifndef CSR_WIFI_HIP_TA_DISABLE
- }
- else
- {
- unifi_trace(card->ospriv, UDBG5,
- "Traffic is periodic, set unifi to TORPID immediately.\n");
- if (CardAreAllFromHostDataSlotsEmpty(card) == 1)
- {
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_TORPID);
- if (r != CSR_RESULT_SUCCESS)
- {
- goto exit;
- }
- }
- }
-#endif
- }
-
- csrResult = CsrSdioInterruptEnable(card->sdio_if);
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- r = CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- unifi_error(card->ospriv, "Failed to enable SDIO interrupt\n");
- }
-
-exit:
-
- unifi_trace(card->ospriv, UDBG4, "New state=%d\n", card->host_state);
-
- if (r != CSR_RESULT_SUCCESS)
- {
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_buf_dump();
-#endif
- /* If an interrupt has been raised, ack it here */
- if (card->bh_reason_unifi)
- {
- CsrSdioInterruptAcknowledge(card->sdio_if);
- }
-
- unifi_error(card->ospriv,
- "unifi_bh: state=%d %c, clock=%dkHz, interrupt=%d host=%d, power_save=%s\n",
- card->host_state,
- (card->host_state == UNIFI_HOST_STATE_AWAKE)?'A' : (card->host_state == UNIFI_HOST_STATE_DROWSY)?'D' : 'T',
- card->sdio_clock_speed / 1000,
- card->bh_reason_unifi, card->bh_reason_host,
- (low_power_mode == UNIFI_LOW_POWER_DISABLED)?"disabled" : "enabled");
-
- /* Try to capture firmware panic codes */
- (void)unifi_capture_panic(card);
-
- /* Ask for a mini-coredump when the driver has reset UniFi */
- (void)unifi_coredump_request_at_next_reset(card, 1);
- }
-
- return r;
-} /* unifi_bh() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * process_clock_request
- *
- * Handle request from the OS layer to increase the SDIO clock speed.
- * The fast clock is limited until the firmware has indicated that it has
- * completed initialisation to the OS layer.
- *
- * Arguments:
- * card Pointer to card context structure.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success or CSR error code.
- * ---------------------------------------------------------------------------
- */
-static CsrResult process_clock_request(card_t *card)
-{
- CsrResult r = CSR_RESULT_SUCCESS;
- CsrResult csrResult;
-
- if (!card->request_max_clock)
- {
- return CSR_RESULT_SUCCESS; /* No pending request */
- }
-
- /*
- * The SDIO clock speed request from the OS layer is only acted upon if
- * the UniFi is awake. If it was in any other state, the clock speed will
- * transition through SAFE to MAX while the host wakes it up, and the
- * final speed reached will be UNIFI_SDIO_CLOCK_MAX_HZ.
- * This assumes that the SME never requests low power mode while the f/w
- * initialisation takes place.
- */
- if (card->host_state == UNIFI_HOST_STATE_AWAKE)
- {
- unifi_trace(card->ospriv, UDBG1, "Set SDIO max clock\n");
- csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_MAX_HZ);
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- }
- else
- {
- card->sdio_clock_speed = UNIFI_SDIO_CLOCK_MAX_HZ; /* log the new freq */
- }
- }
- else
- {
- unifi_trace(card->ospriv, UDBG1, "Will set SDIO max clock after wakeup\n");
- }
-
- /* Cancel the request now that it has been acted upon, or is about to be
- * by the wakeup mechanism
- */
- card->request_max_clock = 0;
-
- return r;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * process_bh
- *
- * Exchange messages with UniFi
- *
- * Arguments:
- * card Pointer to card context structure.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success or CSR error code.
- * ---------------------------------------------------------------------------
- */
-static CsrResult process_bh(card_t *card)
-{
- CsrResult r;
- u8 more;
- more = FALSE;
-
- /* Process the reasons (interrupt, signals) */
- do
- {
- /*
- * Run in a while loop, to save clearing the interrupts
- * every time around the outside loop.
- */
- do
- {
- /* If configured to run the HIP just once, skip first loop */
- if (card->intmode & CSR_WIFI_INTMODE_RUN_BH_ONCE)
- {
- break;
- }
-
- r = handle_host_protocol(card, &more);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- unifi_debug_log_to_buf("c52=%d c53=%d tx=%d txc=%d rx=%d s=%d t=%d fc=%d\n",
- card->cmd_prof.cmd52_count,
- card->cmd_prof.cmd53_count,
- card->cmd_prof.tx_count,
- card->cmd_prof.tx_cfm_count,
- card->cmd_prof.rx_count,
- card->cmd_prof.sdio_cmd_signal,
- card->cmd_prof.sdio_cmd_to_host,
- card->cmd_prof.sdio_cmd_from_host_and_clear
- );
-
- card->cmd_prof.cmd52_count = card->cmd_prof.cmd53_count = 0;
- card->cmd_prof.tx_count = card->cmd_prof.tx_cfm_count = card->cmd_prof.rx_count = 0;
-
- card->cmd_prof.cmd52_f0_r_count = 0;
- card->cmd_prof.cmd52_f0_w_count = 0;
- card->cmd_prof.cmd52_r8or16_count = 0;
- card->cmd_prof.cmd52_w8or16_count = 0;
- card->cmd_prof.cmd52_r16_count = 0;
- card->cmd_prof.cmd52_w16_count = 0;
- card->cmd_prof.cmd52_r32_count = 0;
-
- card->cmd_prof.sdio_cmd_signal = 0;
- card->cmd_prof.sdio_cmd_clear_slot = 0;
- card->cmd_prof.sdio_cmd_to_host = 0;
- card->cmd_prof.sdio_cmd_from_host = 0;
- card->cmd_prof.sdio_cmd_from_host_and_clear = 0;
-#endif
-
-
- } while (more || card->bh_reason_unifi || card->bh_reason_host);
-
- /* Acknowledge the h/w interrupt */
- r = CardClearInt(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to acknowledge interrupt.\n");
- return r;
- }
-
- /*
- * UniFi may have tried to generate an interrupt during the
- * CardClearInt() was running. So, we need to run the host
- * protocol again, to check if there are any pending requests.
- */
- r = handle_host_protocol(card, &more);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- unifi_debug_log_to_buf("c52=%d c53=%d tx=%d txc=%d rx=%d s=%d t=%d fc=%d\n",
- card->cmd_prof.cmd52_count,
- card->cmd_prof.cmd53_count,
- card->cmd_prof.tx_count,
- card->cmd_prof.tx_cfm_count,
- card->cmd_prof.rx_count,
- card->cmd_prof.sdio_cmd_signal,
- card->cmd_prof.sdio_cmd_to_host,
- card->cmd_prof.sdio_cmd_from_host_and_clear
- );
-
- card->cmd_prof.cmd52_count = card->cmd_prof.cmd53_count = 0;
- card->cmd_prof.tx_count = card->cmd_prof.tx_cfm_count = card->cmd_prof.rx_count = 0;
-
- card->cmd_prof.cmd52_f0_r_count = 0;
- card->cmd_prof.cmd52_f0_w_count = 0;
- card->cmd_prof.cmd52_r8or16_count = 0;
- card->cmd_prof.cmd52_w8or16_count = 0;
- card->cmd_prof.cmd52_r16_count = 0;
- card->cmd_prof.cmd52_w16_count = 0;
- card->cmd_prof.cmd52_r32_count = 0;
-
- card->cmd_prof.sdio_cmd_signal = 0;
- card->cmd_prof.sdio_cmd_clear_slot = 0;
- card->cmd_prof.sdio_cmd_to_host = 0;
- card->cmd_prof.sdio_cmd_from_host = 0;
- card->cmd_prof.sdio_cmd_from_host_and_clear = 0;
-#endif
- /* If configured to run the HIP just once, work is now done */
- if (card->intmode & CSR_WIFI_INTMODE_RUN_BH_ONCE)
- {
- break;
- }
-
- } while (more || card->bh_reason_unifi || card->bh_reason_host);
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- if ((card->intmode & CSR_WIFI_INTMODE_RUN_BH_ONCE) == 0)
- {
- unifi_debug_log_to_buf("proc=%d\n",
- card->cmd_prof.process_count);
- }
-#endif
-
- return CSR_RESULT_SUCCESS;
-} /* process_bh() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * handle_host_protocol
- *
- * This function implements the Host Interface Protocol (HIP) as
- * described in the Host Interface Protocol Specification.
- *
- * Arguments:
- * card Pointer to card context structure.
- * processed_something Pointer to location to update processing status:
- * TRUE when data was transferred
- * FALSE when no data was transferred (queues empty)
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success or CSR error code.
- * ---------------------------------------------------------------------------
- */
-static CsrResult handle_host_protocol(card_t *card, u8 *processed_something)
-{
- CsrResult r;
- s32 done;
-
- *processed_something = FALSE;
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, " ======================== \n");
-#endif /* CSR_WIFI_HIP_NOISY */
-
-#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
- card->cmd_prof.process_count++;
-#endif
-
- card->bh_reason_unifi = card->bh_reason_host = 0;
- card->generate_interrupt = 0;
-
-
- /*
- * (Re)fill the T-H signal buffer
- */
- r = read_to_host_signals(card, &done);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Error occurred reading to-host signals\n");
- return r;
- }
- if (done > 0)
- {
- *processed_something = TRUE;
- }
-
- /*
- * Process any to-host signals.
- * Perform any requested CMD53 transfers here, but just queue any
- * bulk data command responses.
- */
- r = process_to_host_signals(card, &done);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Error occurred processing to-host signals\n");
- return r;
- }
-
- /* Now send any signals in the F-H queues */
- /* Give precedence to the command queue */
- r = process_fh_cmd_queue(card, &done);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Error occurred processing from-host signals\n");
- return r;
- }
- if (done > 0)
- {
- *processed_something = TRUE;
- }
-
- r = process_fh_traffic_queue(card, &done);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Error occurred processing from-host data signals\n");
- return r;
- }
- if (done > 0)
- {
- *processed_something = TRUE;
- }
-
- /* Flush out the batch of signals to the UniFi. */
- r = flush_fh_buffer(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to copy from-host signals to UniFi\n");
- return r;
- }
-
-
- /*
- * Send the host interrupt to say the queues have been modified.
- */
- if (card->generate_interrupt)
- {
- r = CardGenInt(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to notify UniFi that queues have been modified.\n");
- return r;
- }
- }
-
-#ifdef CSR_WIFI_RX_PATH_SPLIT
-#ifdef CSR_WIFI_RX_PATH_SPLIT_DONT_USE_WQ
- unifi_rx_queue_flush(card->ospriv);
-#endif
-#endif
-
- /* See if we can re-enable transmission now */
- restart_packet_flow(card);
-
-#ifdef CSR_PRE_ALLOC_NET_DATA
- r = prealloc_netdata_alloc(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "prealloc_netdata failed\n");
- return r;
- }
-#endif
-
- /*
- * Don't put the thread sleep if we just interacted with the chip,
- * there might be more to do if we look again.
- */
- return r;
-} /* handle_host_protocol() */
-
-
-/*
- * Rounds the given signal length in bytes to a whole number
- * of sig_frag_size.
- */
-#define GET_CHUNKS_FOR(SIG_FRAG_SIZE, LENGTH) (((LENGTH) + ((SIG_FRAG_SIZE)-1)) / (SIG_FRAG_SIZE))
-
-
-/*
- * ---------------------------------------------------------------------------
- * read_to_host_signals
- *
- * Read everything pending in the UniFi TH signal buffer.
- * Only do it if the local buffer is empty.
- *
- * Arguments:
- * card Pointer to card context struct
- * processed Number of signals read:
- * 0 if there were no signals pending,
- * 1 if we read at least one signal
- * Returns:
- * CSR error code if an error occurred.
- * ---------------------------------------------------------------------------
- */
-static CsrResult read_to_host_signals(card_t *card, s32 *processed)
-{
- s32 count_thw, count_thr;
- s32 unread_chunks, unread_bytes;
- CsrResult r;
-
- *processed = 0;
-
- /* Read any pending signals or bulk data commands */
- count_thw = unifi_read_shared_count(card, card->sdio_ctrl_addr + 4);
- if (count_thw < 0)
- {
- unifi_error(card->ospriv, "Failed to read to-host sig written count\n");
- return CSR_RESULT_FAILURE;
- }
- card->to_host_signals_w = count_thw; /* diag */
-
- count_thr = card->to_host_signals_r;
-
- if (count_thw == count_thr)
- {
- return CSR_RESULT_SUCCESS;
- }
-
- unread_chunks =
- (((count_thw - count_thr) + 128) % 128) - card->th_buffer.count;
-
- if (unread_chunks == 0)
- {
- return CSR_RESULT_SUCCESS;
- }
-
- unread_bytes = card->config_data.sig_frag_size * unread_chunks;
-
-
- r = unifi_bulk_rw(card,
- card->config_data.tohost_sigbuf_handle,
- card->th_buffer.ptr,
- unread_bytes,
- UNIFI_SDIO_READ);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read ToHost signal\n");
- return r;
- }
-
- card->th_buffer.ptr += unread_bytes;
- card->th_buffer.count += (u16)unread_chunks;
-
- *processed = 1;
-
- return CSR_RESULT_SUCCESS;
-} /* read_to_host_signals() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * update_to_host_signals_r
- *
- * Advance the shared-memory count of chunks read from the to-host
- * signal buffer.
- * Raise a UniFi internal interrupt to tell the firmware that the
- * count has changed.
- *
- * Arguments:
- * card Pointer to card context struct
- * pending Number of chunks remaining
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success or CSR error code
- * ---------------------------------------------------------------------------
- */
-static CsrResult update_to_host_signals_r(card_t *card, s16 pending)
-{
- CsrResult r;
-
- card->to_host_signals_r =
- (card->to_host_signals_r + (card->th_buffer.count - pending)) % 128;
- card->th_buffer.count = pending;
-
- /* Update the count of signals read */
- r = unifi_write_8_or_16(card, card->sdio_ctrl_addr + 6,
- (u8)card->to_host_signals_r);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to update to-host signals read\n");
- return r;
- }
-
- r = CardGenInt(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to notify UniFi that we processed to-host signals.\n");
- return r;
- }
-
- card->generate_interrupt = 0;
-
- return CSR_RESULT_SUCCESS;
-} /* update_to_host_signals_r() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * read_unpack_cmd
- *
- * Converts a wire-formatted command to the host bulk_data_cmd_t structure.
- *
- * Arguments:
- * ptr Pointer to the command
- * bulk_data_cmd Pointer to the host structure
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void read_unpack_cmd(const u8 *ptr, bulk_data_cmd_t *bulk_data_cmd)
-{
- s16 index = 0;
- bulk_data_cmd->cmd_and_len = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- bulk_data_cmd->data_slot = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- bulk_data_cmd->offset = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- bulk_data_cmd->buffer_handle = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
-} /* read_unpack_cmd */
-
-
-/*
- * ---------------------------------------------------------------------------
- * process_to_host_signals
- *
- * Read and dispatch signals from the UniFi
- *
- * Arguments:
- * card Pointer to card context struct
- * processed Pointer to location to write processing result:
- * 0 if there were no signals pending,
- * 1 if we read at least one signal
- *
- * Returns:
- * CSR error code if there was an error
- *
- * Notes:
- * Since bulk data transfers can take a long time, if we wait until
- * all are done before we acknowledge the signals, the UniFi runs out
- * of buffer space. Therefore we keep a count of the bytes transferred
- * in bulk data commands, and update the to-host-signals-read count
- * if we've done a large transfer.
- *
- * All data in the f/w is stored in a little endian format, without any
- * padding bytes. Every read from the memory has to be transformed in
- * host (cpu specific) format, before we can process it. Therefore we
- * use read_unpack_cmd() and read_unpack_signal() to convert the raw data
- * contained in the card->th_buffer.buf to host structures.
- * Important: UDI clients use wire-formatted structures, so we need to
- * indicate all data, as we have read it from the device.
- * ---------------------------------------------------------------------------
- */
-static CsrResult process_to_host_signals(card_t *card, s32 *processed)
-{
- s16 pending;
- s16 remaining;
- u8 *bufptr;
- bulk_data_param_t data_ptrs;
- s16 cmd;
- u16 sig_len;
- s16 i;
- u16 chunks_in_buf;
- u16 bytes_transferred = 0;
- CsrResult r = CSR_RESULT_SUCCESS;
-
- *processed = 0;
-
- pending = card->th_buffer.count;
-
- /* Are there new to-host signals? */
- unifi_trace(card->ospriv, UDBG4, "handling %d to-host chunks\n", pending);
-
- if (!pending)
- {
- return CSR_RESULT_SUCCESS;
- }
-
- /*
- * This is a pointer to the raw data we have read from the f/w.
- * Can be a signal or a command. Note that we need to convert
- * it to a host structure before we process it.
- */
- bufptr = card->th_buffer.buf;
-
- while (pending > 0)
- {
- s16 f_flush_count = 0;
-
- /*
- * Command and length are common to signal and bulk data msgs.
- * If command == 0 (i.e. a signal), len is number of bytes
- * *following* the 2-byte header.
- */
- cmd = bufptr[1] >> 4;
- sig_len = bufptr[0] + ((bufptr[1] & 0x0F) << 8);
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "Received UniFi msg cmd=%d, len=%d\n",
- cmd, sig_len);
-#endif /* CSR_WIFI_HIP_NOISY */
-
- if ((sig_len == 0) &&
- ((cmd != SDIO_CMD_CLEAR_SLOT) && (cmd != SDIO_CMD_PADDING)))
- {
- unifi_error(card->ospriv, "incomplete signal or command: has size zero\n");
- return CSR_RESULT_FAILURE;
- }
- /*
- * Make sure the buffer contains a complete message.
- * Signals may occupy multiple chunks, bulk-data commands occupy
- * one chunk.
- */
- if (cmd == SDIO_CMD_SIGNAL)
- {
- chunks_in_buf = GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(sig_len + 2));
- }
- else
- {
- chunks_in_buf = 1;
- }
-
- if (chunks_in_buf > (u16)pending)
- {
- unifi_error(card->ospriv, "incomplete signal (0x%x?): need %d chunks, got %d\n",
- GET_SIGNAL_ID(bufptr + 2),
- chunks_in_buf, pending);
- unifi_error(card->ospriv, " thsw=%d, thsr=%d\n",
- card->to_host_signals_w,
- card->to_host_signals_r);
- return CSR_RESULT_FAILURE;
- }
-
-
- switch (cmd)
- {
- case SDIO_CMD_SIGNAL:
- /* This is a signal. Read the rest of it and then handle it. */
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.sdio_cmd_signal++;
-#endif
-
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
- {
- /* Retrieve dataRefs[i].DataLength */
- u16 data_len = GET_PACKED_DATAREF_LEN(bufptr + 2, i);
-
- /*
- * The bulk data length in the signal can not be greater than
- * the maximun length allowed by the SDIO config structure.
- */
- if (data_len > card->config_data.data_slot_size)
- {
- unifi_error(card->ospriv,
- "Bulk Data length (%d) exceeds Maximum Bulk Data length (%d)\n",
- data_len, card->config_data.data_slot_size);
- return CSR_RESULT_FAILURE;
- }
-
- /*
- * Len here might not be the same as the length in the
- * bulk data slot. The slot length will always be even,
- * but len could be odd.
- */
- if (data_len != 0)
- {
- /* Retrieve dataRefs[i].SlotNumber */
- s16 slot = GET_PACKED_DATAREF_SLOT(bufptr + 2, i);
-
- if (slot >= card->config_data.num_tohost_data_slots)
- {
- unifi_error(card->ospriv, "!!!bad slot number in to-host signal: %d, sig 0x%X\n",
- slot, cmd);
- return CSR_RESULT_FAILURE;
- }
-
- data_ptrs.d[i].os_data_ptr = card->to_host_data[slot].os_data_ptr;
- data_ptrs.d[i].os_net_buf_ptr = card->to_host_data[slot].os_net_buf_ptr;
- data_ptrs.d[i].net_buf_length = card->to_host_data[slot].net_buf_length;
- data_ptrs.d[i].data_length = data_len;
- }
- else
- {
- UNIFI_INIT_BULK_DATA(&data_ptrs.d[i]);
- }
- }
-
- /*
- * Log the signal to the UDI, before call unifi_receive_event() as
- * it can modify the bulk data.
- */
- if (card->udi_hook)
- {
- (*card->udi_hook)(card->ospriv, bufptr + 2, sig_len,
- &data_ptrs, UDI_LOG_TO_HOST);
- }
-
-#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
- if (GET_SIGNAL_ID(bufptr + 2) == CSR_MA_PACKET_CONFIRM_ID)
- {
- card->cmd_prof.tx_cfm_count++;
- }
- else if (GET_SIGNAL_ID(bufptr + 2) == CSR_MA_PACKET_INDICATION_ID)
- {
- if (data_ptrs.d[0].os_data_ptr)
- {
- if ((*data_ptrs.d[0].os_data_ptr) & 0x08)
- {
- card->cmd_prof.rx_count++;
- }
- }
- }
-#endif
- /*
- * Check if the signal is MA-PACKET.cfm and if so check the status.
- * If the status is failure, search through the slot records to find
- * if any slots are occupied for this host tag. This can happen if
- * f/w has not downloaded the bulkdata and before that itself it has
- * signalled the confirm with failure. If it finds a slot with that
- * host tag then, it clears the corresponding slot
- */
-
- if (GET_SIGNAL_ID(bufptr + 2) == CSR_MA_PACKET_CONFIRM_ID)
- {
- /* Get host tag and transmission status */
- u32 host_tag = GET_PACKED_MA_PACKET_CONFIRM_HOST_TAG(bufptr + 2);
- u16 status = GET_PACKED_MA_PACKET_CONFIRM_TRANSMISSION_STATUS(bufptr + 2);
-
- unifi_trace(card->ospriv, UDBG4, "process_to_host_signals signal ID=%x host Tag=%x status=%x\n",
- GET_SIGNAL_ID(bufptr + 2), host_tag, status);
-
- /* If transmission status is failure then search through the slot records
- * and if for any slot records the clear slot is not done then do it now
- */
-
- if (status && (card->fh_slot_host_tag_record))
- {
- u16 num_fh_slots = card->config_data.num_fromhost_data_slots;
-
- /* search through the list of slot records and match with host tag
- * If a slot is not yet cleared then clear the slot from here
- */
- for (i = 0; i < num_fh_slots; i++)
- {
- if (card->fh_slot_host_tag_record[i] == host_tag)
- {
-#ifdef CSR_WIFI_REQUEUE_PACKET_TO_HAL
- /* Invoke the HAL module function to requeue it back to HAL Queues */
- r = unifi_reque_ma_packet_request(card->ospriv, host_tag, status, &card->from_host_data[i].bd);
- card->fh_slot_host_tag_record[i] = CSR_WIFI_HIP_RESERVED_HOST_TAG;
- if (CSR_RESULT_SUCCESS != r)
- {
- unifi_trace(card->ospriv, UDBG5, "process_to_host_signals: Failed to requeue Packet(hTag:%x) back to HAL \n", host_tag);
- CardClearFromHostDataSlot(card, i);
- }
- else
- {
- CardClearFromHostDataSlotWithoutFreeingBulkData(card, i);
- }
-
-#else
- unifi_trace(card->ospriv, UDBG4, "process_to_host_signals Clear slot=%x host tag=%x\n", i, host_tag);
- card->fh_slot_host_tag_record[i] = CSR_WIFI_HIP_RESERVED_HOST_TAG;
-
- /* Set length field in from_host_data array to 0 */
- CardClearFromHostDataSlot(card, i);
-#endif
- break;
- }
- }
- }
- }
-
- /* Pass event to OS layer */
- unifi_receive_event(card->ospriv, bufptr + 2, sig_len, &data_ptrs);
-
- /* Initialise the to_host data, so it can be re-used. */
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
- {
- /* The slot is only valid if the length is non-zero. */
- if (GET_PACKED_DATAREF_LEN(bufptr + 2, i) != 0)
- {
- s16 slot = GET_PACKED_DATAREF_SLOT(bufptr + 2, i);
- if (slot < card->config_data.num_tohost_data_slots)
- {
- UNIFI_INIT_BULK_DATA(&card->to_host_data[slot]);
- }
- }
- }
-
-#ifndef CSR_WIFI_DEFER_TH_FLUSH
- /*
- * If we have previously transferred a lot of data, ack
- * the signals read so far, so f/w can reclaim the buffer
- * memory sooner.
- */
- if (bytes_transferred >= TO_HOST_FLUSH_THRESHOLD)
- {
- f_flush_count = 1;
- }
-#endif
- break;
-
-
- case SDIO_CMD_CLEAR_SLOT:
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.sdio_cmd_clear_slot++;
-#endif
- /* This is a clear slot command. */
- if (sig_len != 0)
- {
- unifi_error(card->ospriv, "process_to_host_signals: clear slot, bad data len: 0x%X at offset %d\n",
- sig_len, bufptr - card->th_buffer.buf);
- return CSR_RESULT_FAILURE;
- }
-
- r = process_clear_slot_command(card, bufptr);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to process clear slot\n");
- return r;
- }
- break;
-
- case SDIO_CMD_TO_HOST_TRANSFER:
- case SDIO_CMD_FROM_HOST_TRANSFER:
- case SDIO_CMD_FROM_HOST_AND_CLEAR:
- case SDIO_CMD_OVERLAY_TRANSFER:
- /* This is a bulk data command. */
- if (sig_len & 1)
- {
- unifi_error(card->ospriv, "process_to_host_signals: bulk data, bad data len: 0x%X at offset %d\n",
- sig_len, bufptr - card->th_buffer.buf);
- return CSR_RESULT_FAILURE;
- }
-
- r = process_bulk_data_command(card, bufptr, cmd, sig_len);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to process bulk cmd\n");
- return r;
- }
- /* Count the bytes transferred */
- bytes_transferred += sig_len;
-
- if (cmd == SDIO_CMD_FROM_HOST_AND_CLEAR)
- {
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.sdio_cmd_from_host_and_clear++;
-#endif
-#ifndef CSR_WIFI_DEFER_TH_FLUSH
- f_flush_count = 1;
-#endif
- }
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- else if (cmd == SDIO_CMD_FROM_HOST_TRANSFER)
- {
- card->cmd_prof.sdio_cmd_from_host++;
- }
- else if (cmd == SDIO_CMD_TO_HOST_TRANSFER)
- {
- card->cmd_prof.sdio_cmd_to_host++;
- }
-#endif
- break;
-
- case SDIO_CMD_PADDING:
- break;
-
- default:
- unifi_error(card->ospriv, "Unrecognised to-host command: %d\n", cmd);
- break;
- }
-
- bufptr += chunks_in_buf * card->config_data.sig_frag_size;
- pending -= chunks_in_buf;
-
- /*
- * Write out the host signal count when a significant
- * number of bytes of bulk data have been transferred or
- * when we have performed a CopyFromHostAndClear.
- */
- if (f_flush_count)
- {
- r = update_to_host_signals_r(card, pending);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- bytes_transferred = 0;
- }
- }
-
- if (pending)
- {
- unifi_warning(card->ospriv, "proc_th_sigs: %d unprocessed\n", pending);
- }
-
- /* If we processed any signals, write the updated count to UniFi */
- if (card->th_buffer.count != pending)
- {
- r = update_to_host_signals_r(card, pending);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- }
-
- /*
- * Reset the buffer pointer, copying down any un-processed signals.
- * This can happen if we enable the optimisation in read_to_host_signals()
- * that limits the length to whole blocks.
- */
- remaining = card->th_buffer.ptr - bufptr;
- if (remaining < 0)
- {
- unifi_error(card->ospriv, "Processing TH signals overran the buffer\n");
- return CSR_RESULT_FAILURE;
- }
- if (remaining > 0)
- {
- /* Use a safe copy because source and destination may overlap */
- u8 *d = card->th_buffer.buf;
- u8 *s = bufptr;
- s32 n = remaining;
- while (n--)
- {
- *d++ = *s++;
- }
- }
- card->th_buffer.ptr = card->th_buffer.buf + remaining;
-
-
- /* If we reach here then we processed something */
- *processed = 1;
- return CSR_RESULT_SUCCESS;
-} /* process_to_host_signals() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * process_clear_slot_command
- *
- * Process a clear slot command fom the UniFi.
- *
- * Arguments:
- * card Pointer to card context struct
- * bdcmd Pointer to bulk-data command msg from UniFi
- *
- * Returns:
- * 0 on success, CSR error code on error
- * ---------------------------------------------------------------------------
- */
-static CsrResult process_clear_slot_command(card_t *card, const u8 *cmdptr)
-{
- u16 data_slot;
- s16 slot;
-
- data_slot = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(cmdptr + SIZEOF_UINT16);
-
- unifi_trace(card->ospriv, UDBG4, "Processing clear slot cmd, slot=0x%X\n",
- data_slot);
-
- slot = data_slot & 0x7FFF;
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "CMD clear data slot 0x%04x\n", data_slot);
-#endif /* CSR_WIFI_HIP_NOISY */
-
- if (data_slot & SLOT_DIR_TO_HOST)
- {
- if (slot >= card->config_data.num_tohost_data_slots)
- {
- unifi_error(card->ospriv,
- "Invalid to-host data slot in SDIO_CMD_CLEAR_SLOT: %d\n",
- slot);
- return CSR_RESULT_FAILURE;
- }
- /* clear to-host data slot */
- unifi_warning(card->ospriv, "Unexpected clear to-host data slot cmd: 0x%04x\n",
- data_slot);
- }
- else
- {
- if (slot >= card->config_data.num_fromhost_data_slots)
- {
- unifi_error(card->ospriv,
- "Invalid from-host data slot in SDIO_CMD_CLEAR_SLOT: %d\n",
- slot);
- return CSR_RESULT_FAILURE;
- }
-
- /*
- * The driver is the owner to clear all slots now
- * Ref - comment in process_fh_traffic_queue
- * so it will just ignore the clear slot command from firmware
- * and return success
- */
- return CSR_RESULT_SUCCESS;
-
- /* Set length field in from_host_data array to 0 */
- /* CardClearFromHostDataSlot(card, slot); */
- }
-
- return CSR_RESULT_SUCCESS;
-} /* process_clear_slot_command() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * process_bulk_data_command
- *
- * Process a bulk data request from the UniFi.
- *
- * Arguments:
- * card Pointer to card context struct
- * bdcmd Pointer to bulk-data command msg from UniFi
- * cmd, len Decoded values of command and length from the msg header
- * Cmd will only be one of:
- * SDIO_CMD_TO_HOST_TRANSFER
- * SDIO_CMD_FROM_HOST_TRANSFER
- * SDIO_CMD_FROM_HOST_AND_CLEAR
- * SDIO_CMD_OVERLAY_TRANSFER
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code on error
- * ---------------------------------------------------------------------------
- */
-static CsrResult process_bulk_data_command(card_t *card, const u8 *cmdptr,
- s16 cmd, u16 len)
-{
- bulk_data_desc_t *bdslot;
-#ifdef CSR_WIFI_ALIGNMENT_WORKAROUND
- u8 *host_bulk_data_slot;
-#endif
- bulk_data_cmd_t bdcmd;
- s16 offset;
- s16 slot;
- s16 dir;
- CsrResult r;
-
- read_unpack_cmd(cmdptr, &bdcmd);
-
- unifi_trace(card->ospriv, UDBG4, "Processing bulk data cmd %d %s, len=%d, slot=0x%X\n",
- cmd, lookup_bulkcmd_name(cmd), len, bdcmd.data_slot);
-
- /*
- * Round up the transfer length if required.
- * This is useful to force all transfers to be a multiple of the SDIO block
- * size, so the SDIO driver won't try to use a byte-mode CMD53. These are
- * broken on some hardware platforms.
- */
- if (card->sdio_io_block_pad)
- {
- len = (len + card->sdio_io_block_size - 1) & ~(card->sdio_io_block_size - 1);
- unifi_trace(card->ospriv, UDBG4, "Rounded bulk data length up to %d\n", len);
- }
-
- slot = bdcmd.data_slot & 0x7FFF;
-
- if (cmd == SDIO_CMD_OVERLAY_TRANSFER)
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE; /* Not used on CSR6xxx */
- }
- else
- {
- if (bdcmd.data_slot & SLOT_DIR_TO_HOST)
- {
- /* Request is for to-host bulk data */
-
- /* Check sanity of slot number */
- if (slot >= card->config_data.num_tohost_data_slots)
- {
- unifi_error(card->ospriv,
- "Invalid to-host data slot in SDIO bulk xfr req: %d\n",
- slot);
- return CSR_RESULT_FAILURE;
- }
-
- /* Allocate memory for card->to_host_data[slot] bulk data here. */
-#ifdef CSR_PRE_ALLOC_NET_DATA
- r = prealloc_netdata_get(card, &card->to_host_data[slot], len);
-#else
- r = unifi_net_data_malloc(card->ospriv, &card->to_host_data[slot], len);
-#endif
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to allocate t-h bulk data\n");
- return CSR_RESULT_FAILURE;
- }
-
- bdslot = &card->to_host_data[slot];
-
- /* Make sure that the buffer is 4-bytes aligned */
- r = unifi_net_dma_align(card->ospriv, bdslot);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to align t-h bulk data buffer for DMA\n");
- return CSR_RESULT_FAILURE;
- }
- }
- else
- {
- /* Request is for from-host bulk data */
-
- if (slot >= card->config_data.num_fromhost_data_slots)
- {
- unifi_error(card->ospriv,
- "Invalid from-host data slot in SDIO bulk xfr req: %d\n",
- slot);
- return CSR_RESULT_FAILURE;
- }
- bdslot = &card->from_host_data[slot].bd;
- }
- offset = bdcmd.offset;
- }
- /* Do the transfer */
- dir = (cmd == SDIO_CMD_TO_HOST_TRANSFER)?
- UNIFI_SDIO_READ : UNIFI_SDIO_WRITE;
-
- unifi_trace(card->ospriv, UDBG4,
- "Bulk %c %s len=%d, handle %d - slot=%d %p+(%d)\n",
- (dir == UNIFI_SDIO_READ)?'R' : 'W',
- lookup_bulkcmd_name(cmd),
- len,
- bdcmd.buffer_handle,
- slot, bdslot->os_data_ptr, offset);
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "Bulk %s len=%d, handle %d - slot=%d %p+(%d)\n",
- lookup_bulkcmd_name(cmd),
- len,
- bdcmd.buffer_handle,
- slot, bdslot->os_data_ptr, offset);
-#endif /* CSR_WIFI_HIP_NOISY */
-
-
- if (bdslot->os_data_ptr == NULL)
- {
- unifi_error(card->ospriv, "Null os_data_ptr - Bulk %s handle %d - slot=%d o=(%d)\n",
- lookup_bulkcmd_name(cmd),
- bdcmd.buffer_handle,
- slot,
- offset);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
-#ifdef CSR_WIFI_ALIGNMENT_WORKAROUND
- /* if os_data_ptr is not 4-byte aligned, then allocate a new buffer and copy data
- to new buffer to ensure the address passed to unifi_bulk_rw is 4-byte aligned */
-
- if (len != 0 && (dir == UNIFI_SDIO_WRITE) && (((ptrdiff_t)bdslot->os_data_ptr + offset) & 3))
- {
- host_bulk_data_slot = kmalloc(len, GFP_KERNEL);
-
- if (!host_bulk_data_slot)
- {
- unifi_error(card->ospriv, " failed to allocate request_data before unifi_bulk_rw\n");
- return -1;
- }
-
- memcpy((void *)host_bulk_data_slot,
- (void *)(bdslot->os_data_ptr + offset), len);
-
- r = unifi_bulk_rw(card,
- bdcmd.buffer_handle,
- (void *)host_bulk_data_slot,
- len,
- dir);
- }
- else
-#endif
- {
- r = unifi_bulk_rw(card,
- bdcmd.buffer_handle,
- (void *)(bdslot->os_data_ptr + offset),
- len,
- dir);
- }
-
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv,
- "Failed: %s hlen=%d, slen=%d, handle %d - slot=%d %p+0x%X\n",
- lookup_bulkcmd_name(cmd),
- len, /* Header length */
- bdslot->data_length, /* Length stored in slot */
- bdcmd.buffer_handle,
- slot, bdslot->os_data_ptr, offset);
- return r;
- }
-
- bdslot->data_length = len;
-
- if (cmd == SDIO_CMD_FROM_HOST_AND_CLEAR)
- {
- if (slot >= card->config_data.num_fromhost_data_slots)
- {
- unifi_error(card->ospriv,
- "Invalid from-host data slot in SDIO_CMD_FROM_HOST_AND_CLEAR: %d\n",
- slot);
- return CSR_RESULT_FAILURE;
- }
-
-#ifdef CSR_WIFI_ALIGNMENT_WORKAROUND
- /* moving this check before we clear host data slot */
- if ((len != 0) && (dir == UNIFI_SDIO_WRITE) && (((ptrdiff_t)bdslot->os_data_ptr + offset) & 3))
- {
- kfree(host_bulk_data_slot);
- }
-#endif
-
- if (card->fh_slot_host_tag_record)
- {
- unifi_trace(card->ospriv, UDBG5, "CopyFromHostAndClearSlot Reset entry for slot=%d\n", slot);
-
- /* reset the host tag entry for the corresponding slot */
- card->fh_slot_host_tag_record[slot] = CSR_WIFI_HIP_RESERVED_HOST_TAG;
- }
-
-
- /* Set length field in from_host_data array to 0 */
- CardClearFromHostDataSlot(card, slot);
- }
-
- return CSR_RESULT_SUCCESS;
-} /* process_bulk_data_command() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * check_fh_sig_slots
- *
- * Check whether there are <n> free signal slots available on UniFi.
- * This takes into account the signals already batched since the
- * from_host_signal counts were last read.
- * If the from_host_signal counts indicate not enough space, we read
- * the latest count from UniFi to see if some more have been freed.
- *
- * Arguments:
- * None.
- *
- * Returns:
- * CSR_RESULT_SUCCESS, otherwise CSR error code on error.
- * ---------------------------------------------------------------------------
- */
-static CsrResult check_fh_sig_slots(card_t *card, u16 needed, s32 *space_fh)
-{
- u32 count_fhw;
- u32 occupied_fh, slots_fh;
- s32 count_fhr;
-
- count_fhw = card->from_host_signals_w;
- count_fhr = card->from_host_signals_r;
- slots_fh = card->config_data.num_fromhost_sig_frags;
-
- /* Only read the space in from-host queue if necessary */
- occupied_fh = (count_fhw - count_fhr) % 128;
-
- if (slots_fh < occupied_fh)
- {
- *space_fh = 0;
- }
- else
- {
- *space_fh = slots_fh - occupied_fh;
- }
-
- if ((occupied_fh != 0) && (*space_fh < needed))
- {
- count_fhr = unifi_read_shared_count(card, card->sdio_ctrl_addr + 2);
- if (count_fhr < 0)
- {
- unifi_error(card->ospriv, "Failed to read from-host sig read count\n");
- return CSR_RESULT_FAILURE;
- }
- card->from_host_signals_r = count_fhr; /* diag */
-
- occupied_fh = (count_fhw - count_fhr) % 128;
- *space_fh = slots_fh - occupied_fh;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* check_fh_sig_slots() */
-
-
-/*
-* If we are padding the From-Host signals to the SDIO block size,
-* we need to round up the needed_chunks to the SDIO block size.
-*/
-#define ROUND_UP_NEEDED_CHUNKS(_card, _needed_chunks) \
- { \
- u16 _chunks_per_block; \
- u16 _chunks_in_last_block; \
- \
- if (_card->sdio_io_block_pad) \
- { \
- _chunks_per_block = _card->sdio_io_block_size / _card->config_data.sig_frag_size; \
- _chunks_in_last_block = _needed_chunks % _chunks_per_block; \
- if (_chunks_in_last_block != 0) \
- { \
- _needed_chunks = _needed_chunks + (_chunks_per_block - _chunks_in_last_block); \
- } \
- } \
- }
-
-
-#define ROUND_UP_SPACE_CHUNKS(_card, _space_chunks) \
- { \
- u16 _chunks_per_block; \
- \
- if (_card->sdio_io_block_pad) \
- { \
- _chunks_per_block = _card->sdio_io_block_size / _card->config_data.sig_frag_size; \
- _space_chunks = ((_space_chunks / _chunks_per_block) * _chunks_per_block); \
- } \
- }
-
-
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * process_fh_cmd_queue
- *
- * Take one signal off the from-host queue and copy it to the UniFi.
- * Does nothing if the UniFi has no slots free.
- *
- * Arguments:
- * card Pointer to card context struct
- * processed Location to write:
- * 0 if there is nothing on the queue to process
- * 1 if a signal was successfully processed
- *
- * Returns:
- * CSR error code if an error occurred.
- *
- * Notes:
- * The from-host queue contains signal requests from the network driver
- * and any UDI clients interspersed. UDI clients' requests have been stored
- * in the from-host queue using the wire-format structures, as they arrive.
- * All other requests are stored in the from-host queue using the host
- * (cpu specific) structures. We use the is_packed member of the card_signal_t
- * structure that describes the queue to make the distinction.
- * ---------------------------------------------------------------------------
- */
-static CsrResult process_fh_cmd_queue(card_t *card, s32 *processed)
-{
- q_t *sigq = &card->fh_command_queue;
-
- CsrResult r;
- u16 pending_sigs;
- u16 pending_chunks;
- u16 needed_chunks;
- s32 space_chunks;
- u16 q_index;
-
- *processed = 0;
-
- /* Get the number of pending signals. */
- pending_sigs = CSR_WIFI_HIP_Q_SLOTS_USED(sigq);
- unifi_trace(card->ospriv, UDBG5, "proc_fh: %d pending\n", pending_sigs);
- if (pending_sigs == 0)
- {
- /* Nothing to do */
- return CSR_RESULT_SUCCESS;
- }
-
- /* Work out how many chunks we have waiting to send */
- for (pending_chunks = 0, q_index = CSR_WIFI_HIP_Q_NEXT_R_SLOT(sigq);
- q_index != CSR_WIFI_HIP_Q_NEXT_W_SLOT(sigq);
- q_index = CSR_WIFI_HIP_Q_WRAP(sigq, q_index + 1))
- {
- card_signal_t *csptr = CSR_WIFI_HIP_Q_SLOT_DATA(sigq, q_index);
-
- /*
- * Note that GET_CHUNKS_FOR() needs the size of the packed
- * (wire-formatted) structure
- */
- pending_chunks += GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(csptr->signal_length + 2));
- }
-
- /*
- * Check whether UniFi has space for all the buffered bulk-data
- * commands and signals as well.
- */
- needed_chunks = pending_chunks + card->fh_buffer.count;
-
- /* Round up to the block size if necessary */
- ROUND_UP_NEEDED_CHUNKS(card, needed_chunks);
-
- r = check_fh_sig_slots(card, needed_chunks, &space_chunks);
- if (r != CSR_RESULT_SUCCESS)
- {
- /* Error */
- unifi_error(card->ospriv, "Failed to read fh sig count\n");
- return r;
- }
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "proc_fh: %d chunks free, need %d\n",
- space_chunks, needed_chunks);
-#endif /* CSR_WIFI_HIP_NOISY */
-
-
- /*
- * Coalesce as many from-host signals as possible
- * into a single block and write using a single CMD53
- */
- if (needed_chunks > (u16)space_chunks)
- {
- /* Round up to the block size if necessary */
- ROUND_UP_SPACE_CHUNKS(card, space_chunks);
-
- /*
- * If the f/w has less free chunks than those already pending
- * return immediately.
- */
- if ((u16)space_chunks <= card->fh_buffer.count)
- {
- /*
- * No room in UniFi for any signals after the buffered bulk
- * data commands have been sent.
- */
- unifi_error(card->ospriv, "not enough room to send signals, need %d chunks, %d free\n",
- card->fh_buffer.count, space_chunks);
- card->generate_interrupt = 1;
- return CSR_RESULT_SUCCESS;
- }
- pending_chunks = (u16)(space_chunks - card->fh_buffer.count);
- }
-
- while (pending_sigs-- && pending_chunks > 0)
- {
- card_signal_t *csptr;
- s16 i;
- u16 sig_chunks, total_length, free_chunks_in_fh_buffer;
- bulk_data_param_t bulkdata;
- u8 *packed_sigptr;
- u16 signal_length = 0;
-
- /* Retrieve the entry at the head of the queue */
- q_index = CSR_WIFI_HIP_Q_NEXT_R_SLOT(sigq);
-
- /* Get a pointer to the containing card_signal_t struct */
- csptr = CSR_WIFI_HIP_Q_SLOT_DATA(sigq, q_index);
-
- /* Get the new length of the packed signal */
- signal_length = csptr->signal_length;
-
- if ((signal_length & 1) || (signal_length > UNIFI_PACKED_SIGBUF_SIZE))
- {
- unifi_error(card->ospriv, "process_fh_queue: Bad len: %d\n", signal_length);
- return CSR_RESULT_FAILURE;
- }
-
- /* Need space for 2-byte SDIO protocol header + signal */
- sig_chunks = GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(signal_length + 2));
-
- free_chunks_in_fh_buffer = GET_CHUNKS_FOR(card->config_data.sig_frag_size,
- (u16)((card->fh_buffer.buf + UNIFI_FH_BUF_SIZE) - card->fh_buffer.ptr));
- if (free_chunks_in_fh_buffer < sig_chunks)
- {
- /* No more room */
- unifi_notice(card->ospriv, "proc_fh_cmd_q: no room in fh buffer for 0x%.4X, deferring\n",
- (u16)(GET_SIGNAL_ID(csptr->sigbuf)));
- break;
- }
-
- packed_sigptr = csptr->sigbuf;
-
- /* Claim and set up a from-host data slot */
- if (CSR_RESULT_FAILURE == CardWriteBulkData(card, csptr, UNIFI_TRAFFIC_Q_MLME))
- {
- unifi_notice(card->ospriv, "proc_fh_cmd_q: no fh data slots for 0x%.4X, deferring\n",
- (u16)(GET_SIGNAL_ID(csptr->sigbuf)));
- break;
- }
-
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
- {
- if (csptr->bulkdata[i].data_length == 0)
- {
- UNIFI_INIT_BULK_DATA(&bulkdata.d[i]);
- }
- else
- {
- bulkdata.d[i].os_data_ptr = csptr->bulkdata[i].os_data_ptr;
- bulkdata.d[i].data_length = csptr->bulkdata[i].data_length;
- }
-
- /* Pass the free responsibility to the lower layer. */
- UNIFI_INIT_BULK_DATA(&csptr->bulkdata[i]);
- }
-
- unifi_trace(card->ospriv, UDBG2, "Sending signal 0x%.4X\n",
- GET_SIGNAL_ID(packed_sigptr));
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "Sending signal 0x%.4X\n",
- GET_SIGNAL_ID(packed_sigptr));
-#endif /* CSR_WIFI_HIP_NOISY */
-
-
- /* Append packed signal to F-H buffer */
- total_length = sig_chunks * card->config_data.sig_frag_size;
-
- card->fh_buffer.ptr[0] = (u8)(signal_length & 0xff);
- card->fh_buffer.ptr[1] =
- (u8)(((signal_length >> 8) & 0xf) | (SDIO_CMD_SIGNAL << 4));
-
- memcpy(card->fh_buffer.ptr + 2, packed_sigptr, signal_length);
- memset(card->fh_buffer.ptr + 2 + signal_length, 0,
- total_length - (2 + signal_length));
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "proc_fh: fh_buffer %d bytes \n",
- signal_length + 2);
- dump(card->fh_buffer.ptr, signal_length + 2);
- unifi_trace(card->ospriv, UDBG1, " \n");
-#endif /* CSR_WIFI_HIP_NOISY */
-
- card->fh_buffer.ptr += total_length;
- card->fh_buffer.count += sig_chunks;
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "Added %d to fh buf, len now %d, count %d\n",
- signal_length,
- card->fh_buffer.ptr - card->fh_buffer.buf,
- card->fh_buffer.count);
-#endif /* CSR_WIFI_HIP_NOISY */
-
- (*processed)++;
- pending_chunks -= sig_chunks;
-
- /* Log the signal to the UDI. */
- /* UDI will get the packed structure */
- /* Can not log the unpacked signal, unless we reconstruct it! */
- if (card->udi_hook)
- {
- (*card->udi_hook)(card->ospriv, packed_sigptr, signal_length,
- &bulkdata, UDI_LOG_FROM_HOST);
- }
-
- /* Remove entry from q */
- csptr->signal_length = 0;
- CSR_WIFI_HIP_Q_INC_R(sigq);
- }
-
- return CSR_RESULT_SUCCESS;
-} /* process_fh_cmd_queue() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * process_fh_traffic_queue
- *
- * Take signals off the from-host queue and copy them to the UniFi.
- * Does nothing if the UniFi has no slots free.
- *
- * Arguments:
- * card Pointer to card context struct
- * sigq Pointer to the traffic queue
- * processed Pointer to location to write:
- * 0 if there is nothing on the queue to process
- * 1 if a signal was successfully processed
- *
- * Returns:
- * CSR error code if an error occurred.
- *
- * Notes:
- * The from-host queue contains signal requests from the network driver
- * and any UDI clients interspersed.
- * ---------------------------------------------------------------------------
- */
-static CsrResult process_fh_traffic_queue(card_t *card, s32 *processed)
-{
- q_t *sigq = card->fh_traffic_queue;
-
- CsrResult r;
- s16 n = 0;
- s32 q_no;
- u16 pending_sigs = 0;
- u16 pending_chunks = 0;
- u16 needed_chunks;
- s32 space_chunks;
- u16 q_index;
- u32 host_tag = 0;
- u16 slot_num = 0;
-
- *processed = 0;
-
- /* calculate how many signals are in queues and how many chunks are needed. */
- for (n = UNIFI_NO_OF_TX_QS - 1; n >= 0; n--)
- {
- /* Get the number of pending signals. */
- pending_sigs += CSR_WIFI_HIP_Q_SLOTS_USED(&sigq[n]);
- unifi_trace(card->ospriv, UDBG5, "proc_fh%d: %d pending\n", n, pending_sigs);
-
- /* Work out how many chunks we have waiting to send */
- for (q_index = CSR_WIFI_HIP_Q_NEXT_R_SLOT(&sigq[n]);
- q_index != CSR_WIFI_HIP_Q_NEXT_W_SLOT(&sigq[n]);
- q_index = CSR_WIFI_HIP_Q_WRAP(&sigq[n], q_index + 1))
- {
- card_signal_t *csptr = CSR_WIFI_HIP_Q_SLOT_DATA(&sigq[n], q_index);
-
- /*
- * Note that GET_CHUNKS_FOR() needs the size of the packed
- * (wire-formatted) structure
- */
- pending_chunks += GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(csptr->signal_length + 2));
- }
- }
-
- /* If there are no pending signals, just return */
- if (pending_sigs == 0)
- {
- /* Nothing to do */
- return CSR_RESULT_SUCCESS;
- }
-
- /*
- * Check whether UniFi has space for all the buffered bulk-data
- * commands and signals as well.
- */
- needed_chunks = pending_chunks + card->fh_buffer.count;
-
- /* Round up to the block size if necessary */
- ROUND_UP_NEEDED_CHUNKS(card, needed_chunks);
-
- r = check_fh_sig_slots(card, needed_chunks, &space_chunks);
- if (r != CSR_RESULT_SUCCESS)
- {
- /* Error */
- unifi_error(card->ospriv, "Failed to read fh sig count\n");
- return r;
- }
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv,
- "process_fh_traffic_queue: %d chunks free, need %d\n",
- space_chunks, needed_chunks);
- read_fhsr(card); /* debugging only */
-#endif /* CSR_WIFI_HIP_NOISY */
-
- /* Coalesce as many from-host signals as possible
- into a single block and write using a single CMD53 */
- if (needed_chunks > (u16)space_chunks)
- {
- /* Round up to the block size if necessary */
- ROUND_UP_SPACE_CHUNKS(card, space_chunks);
-
- if ((u16)space_chunks <= card->fh_buffer.count)
- {
- /*
- * No room in UniFi for any signals after the buffered bulk
- * data commands have been sent.
- */
- unifi_error(card->ospriv, "not enough room to send signals, need %d chunks, %d free\n",
- card->fh_buffer.count, space_chunks);
- card->generate_interrupt = 1;
- return 0;
- }
-
- pending_chunks = (u16)space_chunks - card->fh_buffer.count;
- }
-
- q_no = UNIFI_NO_OF_TX_QS - 1;
-
- /*
- * pending_sigs will be exhausted if there are is no restriction to the pending
- * signals per queue. pending_chunks may be exhausted if there is a restriction.
- * q_no check will be exhausted if there is a restriction and our round-robin
- * algorith fails to fill all chunks.
- */
- do
- {
- card_signal_t *csptr;
- u16 sig_chunks, total_length, free_chunks_in_fh_buffer;
- bulk_data_param_t bulkdata;
- u8 *packed_sigptr;
- u16 signal_length = 0;
-
- /* if this queue is empty go to next one. */
- if (CSR_WIFI_HIP_Q_SLOTS_USED(&sigq[q_no]) == 0)
- {
- q_no--;
- continue;
- }
-
- /* Retrieve the entry at the head of the queue */
- q_index = CSR_WIFI_HIP_Q_NEXT_R_SLOT(&sigq[q_no]);
-
- /* Get a pointer to the containing card_signal_t struct */
- csptr = CSR_WIFI_HIP_Q_SLOT_DATA(&sigq[q_no], q_index);
-
- /* Get the new length of the packed signal */
- signal_length = csptr->signal_length;
-
- if ((signal_length & 1) || (signal_length > UNIFI_PACKED_SIGBUF_SIZE))
- {
- unifi_error(card->ospriv, "process_fh_traffic_queue: Bad len: %d\n", signal_length);
- return CSR_RESULT_FAILURE;
- }
-
- /* Need space for 2-byte SDIO protocol header + signal */
- sig_chunks = GET_CHUNKS_FOR(card->config_data.sig_frag_size, (u16)(signal_length + 2));
- free_chunks_in_fh_buffer = GET_CHUNKS_FOR(card->config_data.sig_frag_size,
- (u16)((card->fh_buffer.buf + UNIFI_FH_BUF_SIZE) - card->fh_buffer.ptr));
- if (free_chunks_in_fh_buffer < sig_chunks)
- {
- /* No more room */
- unifi_notice(card->ospriv, "process_fh_traffic_queue: no more chunks.\n");
- break;
- }
-
- packed_sigptr = csptr->sigbuf;
- /* Claim and set up a from-host data slot */
- if (CSR_RESULT_FAILURE == CardWriteBulkData(card, csptr, (unifi_TrafficQueue)q_no))
- {
- q_no--;
- continue;
- }
-
- /* Sanity check: MA-PACKET.req must have a valid bulk data */
- if ((csptr->bulkdata[0].data_length == 0) || (csptr->bulkdata[0].os_data_ptr == NULL))
- {
- unifi_error(card->ospriv, "MA-PACKET.req with empty bulk data (%d bytes in %p)\n",
- csptr->bulkdata[0].data_length, csptr->bulkdata[0].os_data_ptr);
- dump(packed_sigptr, signal_length);
- return CSR_RESULT_FAILURE;
- }
-
- bulkdata.d[0].os_data_ptr = csptr->bulkdata[0].os_data_ptr;
- bulkdata.d[0].data_length = csptr->bulkdata[0].data_length;
- bulkdata.d[0].os_net_buf_ptr = csptr->bulkdata[0].os_net_buf_ptr;
- bulkdata.d[0].net_buf_length = csptr->bulkdata[0].net_buf_length;
-
- /* The driver owns clearing of HIP slots for following scenario
- * - driver has requested a MA-PACKET.req signal
- * - The f/w after receiving the signal decides it can't send it out due to various reasons
- * - So the f/w without downloading the bulk data decides to just send a confirmation with fail
- * - and then sends a clear slot signal to HIP
- *
- * But in some cases the clear slot signal never comes and the slot remains --NOT-- freed for ever
- *
- * To handle this, HIP will keep the record of host tag for each occupied slot
- * and then based on status of that Host tag and slot the driver will decide if the slot is
- * cleared by f/w signal or the slot has to be freed by driver
- */
-
- if (card->fh_slot_host_tag_record)
- {
- /* Update the f-h slot record for the corresponding host tag */
- host_tag = GET_PACKED_MA_PACKET_REQUEST_HOST_TAG(packed_sigptr);
- slot_num = GET_PACKED_DATAREF_SLOT(packed_sigptr, 0) & 0x00FF;
-
- unifi_trace(card->ospriv, UDBG5,
- "process_fh_traffic_queue signal ID =%x fh slot=%x Host tag =%x\n",
- GET_SIGNAL_ID(packed_sigptr), slot_num, host_tag);
- card->fh_slot_host_tag_record[slot_num] = host_tag;
- }
- UNIFI_INIT_BULK_DATA(&bulkdata.d[1]);
- UNIFI_INIT_BULK_DATA(&csptr->bulkdata[0]);
- UNIFI_INIT_BULK_DATA(&csptr->bulkdata[1]);
-
-#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
- if (bulkdata.d[0].os_data_ptr)
- {
- if ((*bulkdata.d[0].os_data_ptr) & 0x08)
- {
- card->cmd_prof.tx_count++;
- }
- }
-#endif
- unifi_trace(card->ospriv, UDBG3, "Sending signal 0x%.4X\n",
- GET_SIGNAL_ID(packed_sigptr));
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "Sending signal 0x%.4X\n",
- GET_SIGNAL_ID(packed_sigptr));
-#endif /* CSR_WIFI_HIP_NOISY */
-
- /* Append packed signal to F-H buffer */
- total_length = sig_chunks * card->config_data.sig_frag_size;
-
- card->fh_buffer.ptr[0] = (u8)(signal_length & 0xff);
- card->fh_buffer.ptr[1] =
- (u8)(((signal_length >> 8) & 0xf) | (SDIO_CMD_SIGNAL << 4));
-
- memcpy(card->fh_buffer.ptr + 2, packed_sigptr, signal_length);
- memset(card->fh_buffer.ptr + 2 + signal_length, 0,
- total_length - (2 + signal_length));
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "proc_fh: fh_buffer %d bytes \n",
- signal_length + 2);
- dump(card->fh_buffer.ptr, signal_length + 2);
- unifi_trace(card->ospriv, UDBG1, " \n");
-#endif /* CSR_WIFI_HIP_NOISY */
-
- card->fh_buffer.ptr += total_length;
- card->fh_buffer.count += sig_chunks;
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "Added %d to fh buf, len now %d, count %d\n",
- signal_length,
- card->fh_buffer.ptr - card->fh_buffer.buf,
- card->fh_buffer.count);
-#endif /* CSR_WIFI_HIP_NOISY */
-
- (*processed)++;
- pending_sigs--;
- pending_chunks -= sig_chunks;
-
- /* Log the signal to the UDI. */
- /* UDI will get the packed structure */
- /* Can not log the unpacked signal, unless we reconstruct it! */
- if (card->udi_hook)
- {
- (*card->udi_hook)(card->ospriv, packed_sigptr, signal_length,
- &bulkdata, UDI_LOG_FROM_HOST);
- }
-
- /* Remove entry from q */
- csptr->signal_length = 0;
- /* Note that the traffic queue has only one valid bulk data buffer. */
- csptr->bulkdata[0].data_length = 0;
-
- CSR_WIFI_HIP_Q_INC_R(&sigq[q_no]);
- } while ((pending_sigs > 0) && (pending_chunks > 0) && (q_no >= 0));
-
- return CSR_RESULT_SUCCESS;
-} /* process_fh_traffic_queue() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * flush_fh_buffer
- *
- * Write out the cache from-hosts signals to the UniFi.
- *
- * Arguments:
- * card Pointer to card context struct
- *
- * Returns:
- * CSR error code if an SDIO error occurred.
- * ---------------------------------------------------------------------------
- */
-static CsrResult flush_fh_buffer(card_t *card)
-{
- CsrResult r;
- u16 len;
- u16 sig_units;
- u16 data_round;
- u16 chunks_in_last_block;
- u16 padding_chunks;
- u16 i;
-
- len = card->fh_buffer.ptr - card->fh_buffer.buf;
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "fh_buffer is at %p, ptr= %p\n",
- card->fh_buffer.buf, card->fh_buffer.ptr);
-#endif /* CSR_WIFI_HIP_NOISY */
-
- if (len == 0)
- {
- return CSR_RESULT_SUCCESS;
- }
-
-#ifdef CSR_WIFI_HIP_NOISY
- if (dump_fh_buf)
- {
- dump(card->fh_buffer.buf, len);
- dump_fh_buf = 0;
- }
-#endif /* CSR_WIFI_HIP_NOISY */
-
- if (card->sdio_io_block_pad)
- {
- /* Both of these are powers of 2 */
- sig_units = card->config_data.sig_frag_size;
- data_round = card->sdio_io_block_size;
-
- if (data_round > sig_units)
- {
- chunks_in_last_block = (len % data_round) / sig_units;
-
- if (chunks_in_last_block != 0)
- {
- padding_chunks = (data_round / sig_units) - chunks_in_last_block;
-
- memset(card->fh_buffer.ptr, 0, padding_chunks * sig_units);
- for (i = 0; i < padding_chunks; i++)
- {
- card->fh_buffer.ptr[1] = SDIO_CMD_PADDING << 4;
- card->fh_buffer.ptr += sig_units;
- }
-
- card->fh_buffer.count += padding_chunks;
- len += padding_chunks * sig_units;
- }
- }
- }
-
- r = unifi_bulk_rw(card,
- card->config_data.fromhost_sigbuf_handle,
- card->fh_buffer.buf,
- len, UNIFI_SDIO_WRITE);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write fh signals: %u bytes, error %d\n", len, r);
- return r;
- }
-
- /* Update from-host-signals-written signal count */
- card->from_host_signals_w =
- (card->from_host_signals_w + card->fh_buffer.count) % 128u;
- r = unifi_write_8_or_16(card, card->sdio_ctrl_addr + 0,
- (u8)card->from_host_signals_w);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write fh signal count %u with error %d\n",
- card->from_host_signals_w, r);
- return r;
- }
- card->generate_interrupt = 1;
-
- /* Reset the fh buffer pointer */
- card->fh_buffer.ptr = card->fh_buffer.buf;
- card->fh_buffer.count = 0;
-
-#ifdef CSR_WIFI_HIP_NOISY
- unifi_error(card->ospriv, "END flush: fh len %d, count %d\n",
- card->fh_buffer.ptr - card->fh_buffer.buf,
- card->fh_buffer.count);
-#endif /* CSR_WIFI_HIP_NOISY */
-
- return CSR_RESULT_SUCCESS;
-} /* flush_fh_buffer() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * restart_packet_flow
- *
- * This function is called before the bottom-half thread sleeps.
- * It checks whether both data and signal resources are available and
- * then calls the OS-layer function to re-enable packet transmission.
- *
- * Arguments:
- * card Pointer to card context struct
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void restart_packet_flow(card_t *card)
-{
- u8 q;
-
- /*
- * We only look at the fh_traffic_queue, because that is where packets from
- * the network stack are placed.
- */
- for (q = 0; q <= UNIFI_TRAFFIC_Q_VO; q++)
- {
- if (card_is_tx_q_paused(card, q) &&
- CSR_WIFI_HIP_Q_SLOTS_FREE(&card->fh_traffic_queue[q]) >= RESUME_XMIT_THRESHOLD)
- {
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- unifi_debug_log_to_buf("U");
-#endif
- card_tx_q_unpause(card, q);
- unifi_restart_xmit(card->ospriv, (unifi_TrafficQueue)q);
- }
- }
-} /* restart_packet_flow() */
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_card_sdio_mem.c b/drivers/staging/csr/csr_wifi_hip_card_sdio_mem.c
deleted file mode 100644
index 17867f6..0000000
--- a/drivers/staging/csr/csr_wifi_hip_card_sdio_mem.c
+++ /dev/null
@@ -1,1713 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_card_sdio_mem.c
- *
- * PURPOSE: Implementation of the Card API for SDIO.
- *
- * ---------------------------------------------------------------------------
- */
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_card.h"
-
-#define SDIO_RETRIES 3
-#define CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH 16
-
-
-#define retryable_sdio_error(_csrResult) (((_csrResult) == CSR_SDIO_RESULT_CRC_ERROR) || ((_csrResult) == CSR_SDIO_RESULT_TIMEOUT))
-
-
-/*
- * ---------------------------------------------------------------------------
- * retrying_read8
- * retrying_write8
- *
- * These functions provide the first level of retry for SDIO operations.
- * If an SDIO command fails for reason of a response timeout or CRC
- * error, it is retried immediately. If three attempts fail we report a
- * failure.
- * If the command failed for any other reason, the failure is reported
- * immediately.
- *
- * Arguments:
- * card Pointer to card structure.
- * funcnum The SDIO function to access.
- * Function 0 is the Card Configuration Register space,
- * function 1/2 is the UniFi register space.
- * addr Address to access
- * pdata Pointer in which to return the value read.
- * data Value to write.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * ---------------------------------------------------------------------------
- */
-static CsrResult retrying_read8(card_t *card, s16 funcnum, u32 addr, u8 *pdata)
-{
- CsrSdioFunction *sdio = card->sdio_if;
- CsrResult r = CSR_RESULT_SUCCESS;
- s16 retries;
- CsrResult csrResult = CSR_RESULT_SUCCESS;
-
- retries = 0;
- while (retries++ < SDIO_RETRIES)
- {
- if (funcnum == 0)
- {
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("r0@%02X", addr);
-#endif
- csrResult = CsrSdioF0Read8(sdio, addr, pdata);
- }
- else
- {
-#ifdef CSR_WIFI_TRANSPORT_CSPI
- unifi_error(card->ospriv,
- "retrying_read_f0_8: F1 8-bit reads are not allowed.\n");
- return CSR_RESULT_FAILURE;
-#else
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("r@%02X", addr);
-#endif
- csrResult = CsrSdioRead8(sdio, addr, pdata);
-#endif
- }
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_debug_log_to_buf("error=%X\n", csrResult);
- }
- else
- {
- unifi_debug_log_to_buf("=%X\n", *pdata);
- }
-#endif
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
- /*
- * Try again for retryable (CRC or TIMEOUT) errors,
- * break on success or fatal error
- */
- if (!retryable_sdio_error(csrResult))
- {
-#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
- card->cmd_prof.cmd52_count++;
-#endif
- break;
- }
- unifi_trace(card->ospriv, UDBG2, "retryable SDIO error reading F%d 0x%lX\n", funcnum, addr);
- }
-
- if ((csrResult == CSR_RESULT_SUCCESS) && (retries > 1))
- {
- unifi_warning(card->ospriv, "Read succeeded after %d attempts\n", retries);
- }
-
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read from UniFi (addr 0x%lX) after %d tries\n",
- addr, retries - 1);
- /* Report any SDIO error as a general i/o error */
- r = CSR_RESULT_FAILURE;
- }
-
- return r;
-} /* retrying_read8() */
-
-
-static CsrResult retrying_write8(card_t *card, s16 funcnum, u32 addr, u8 data)
-{
- CsrSdioFunction *sdio = card->sdio_if;
- CsrResult r = CSR_RESULT_SUCCESS;
- s16 retries;
- CsrResult csrResult = CSR_RESULT_SUCCESS;
-
- retries = 0;
- while (retries++ < SDIO_RETRIES)
- {
- if (funcnum == 0)
- {
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("w0@%02X=%X", addr, data);
-#endif
- csrResult = CsrSdioF0Write8(sdio, addr, data);
- }
- else
- {
-#ifdef CSR_WIFI_TRANSPORT_CSPI
- unifi_error(card->ospriv,
- "retrying_write_f0_8: F1 8-bit writes are not allowed.\n");
- return CSR_RESULT_FAILURE;
-#else
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("w@%02X=%X", addr, data);
-#endif
- csrResult = CsrSdioWrite8(sdio, addr, data);
-#endif
- }
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_debug_log_to_buf(",error=%X", csrResult);
- }
- unifi_debug_string_to_buf("\n");
-#endif
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
- /*
- * Try again for retryable (CRC or TIMEOUT) errors,
- * break on success or fatal error
- */
- if (!retryable_sdio_error(csrResult))
- {
-#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
- card->cmd_prof.cmd52_count++;
-#endif
- break;
- }
- unifi_trace(card->ospriv, UDBG2, "retryable SDIO error writing %02X to F%d 0x%lX\n",
- data, funcnum, addr);
- }
-
- if ((csrResult == CSR_RESULT_SUCCESS) && (retries > 1))
- {
- unifi_warning(card->ospriv, "Write succeeded after %d attempts\n", retries);
- }
-
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write to UniFi (addr 0x%lX) after %d tries\n",
- addr, retries - 1);
- /* Report any SDIO error as a general i/o error */
- r = CSR_RESULT_FAILURE;
- }
-
- return r;
-} /* retrying_write8() */
-
-
-static CsrResult retrying_read16(card_t *card, s16 funcnum,
- u32 addr, u16 *pdata)
-{
- CsrSdioFunction *sdio = card->sdio_if;
- CsrResult r = CSR_RESULT_SUCCESS;
- s16 retries;
- CsrResult csrResult = CSR_RESULT_SUCCESS;
-
- retries = 0;
- while (retries++ < SDIO_RETRIES)
- {
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("r@%02X", addr);
-#endif
- csrResult = CsrSdioRead16(sdio, addr, pdata);
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_debug_log_to_buf("error=%X\n", csrResult);
- }
- else
- {
- unifi_debug_log_to_buf("=%X\n", *pdata);
- }
-#endif
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
-
- /*
- * Try again for retryable (CRC or TIMEOUT) errors,
- * break on success or fatal error
- */
- if (!retryable_sdio_error(csrResult))
- {
-#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
- card->cmd_prof.cmd52_count++;
-#endif
- break;
- }
- unifi_trace(card->ospriv, UDBG2, "retryable SDIO error reading F%d 0x%lX\n", funcnum, addr);
- }
-
- if ((csrResult == CSR_RESULT_SUCCESS) && (retries > 1))
- {
- unifi_warning(card->ospriv, "Read succeeded after %d attempts\n", retries);
- }
-
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read from UniFi (addr 0x%lX) after %d tries\n",
- addr, retries - 1);
- /* Report any SDIO error as a general i/o error */
- r = CSR_RESULT_FAILURE;
- }
-
- return r;
-} /* retrying_read16() */
-
-
-static CsrResult retrying_write16(card_t *card, s16 funcnum,
- u32 addr, u16 data)
-{
- CsrSdioFunction *sdio = card->sdio_if;
- CsrResult r = CSR_RESULT_SUCCESS;
- s16 retries;
- CsrResult csrResult = CSR_RESULT_SUCCESS;
-
- retries = 0;
- while (retries++ < SDIO_RETRIES)
- {
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("w@%02X=%X", addr, data);
-#endif
- csrResult = CsrSdioWrite16(sdio, addr, data);
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_debug_log_to_buf(",error=%X", csrResult);
- }
- unifi_debug_string_to_buf("\n");
-#endif
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
-
- /*
- * Try again for retryable (CRC or TIMEOUT) errors,
- * break on success or fatal error
- */
- if (!retryable_sdio_error(csrResult))
- {
-#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
- card->cmd_prof.cmd52_count++;
-#endif
- break;
- }
- unifi_trace(card->ospriv, UDBG2, "retryable SDIO error writing %02X to F%d 0x%lX\n",
- data, funcnum, addr);
- }
-
- if ((csrResult == CSR_RESULT_SUCCESS) && (retries > 1))
- {
- unifi_warning(card->ospriv, "Write succeeded after %d attempts\n", retries);
- }
-
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write to UniFi (addr 0x%lX) after %d tries\n",
- addr, retries - 1);
- /* Report any SDIO error as a general i/o error */
- r = CSR_RESULT_FAILURE;
- }
-
- return r;
-} /* retrying_write16() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * sdio_read_f0
- *
- * Reads a byte value from the CCCR (func 0) area of UniFi.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Address to read from
- * pdata Pointer in which to store the read value.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * ---------------------------------------------------------------------------
- */
-CsrResult sdio_read_f0(card_t *card, u32 addr, u8 *pdata)
-{
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.cmd52_f0_r_count++;
-#endif
- return retrying_read8(card, 0, addr, pdata);
-} /* sdio_read_f0() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * sdio_write_f0
- *
- * Writes a byte value to the CCCR (func 0) area of UniFi.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Address to read from
- * data Data value to write.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * ---------------------------------------------------------------------------
- */
-CsrResult sdio_write_f0(card_t *card, u32 addr, u8 data)
-{
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.cmd52_f0_w_count++;
-#endif
- return retrying_write8(card, 0, addr, data);
-} /* sdio_write_f0() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read_direct_8_or_16
- *
- * Read a 8-bit value from the UniFi SDIO interface.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Address to read from
- * pdata Pointer in which to return data.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_read_direct_8_or_16(card_t *card, u32 addr, u8 *pdata)
-{
-#ifdef CSR_WIFI_TRANSPORT_CSPI
- u16 w;
- CsrResult r;
-
- r = retrying_read16(card, card->function, addr, &w);
- *pdata = (u8)(w & 0xFF);
- return r;
-#else
- return retrying_read8(card, card->function, addr, pdata);
-#endif
-} /* unifi_read_direct_8_or_16() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_write_direct_8_or_16
- *
- * Write a byte value to the UniFi SDIO interface.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Address to write to
- * data Value to write.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error
- *
- * Notes:
- * If 8-bit write is used, the even address *must* be written second.
- * This is because writes to odd bytes are cached and not committed
- * to memory until the preceding even address is written.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_write_direct_8_or_16(card_t *card, u32 addr, u8 data)
-{
- if (addr & 1)
- {
- unifi_warning(card->ospriv,
- "Warning: Byte write to an odd address (0x%lX) is dangerous\n",
- addr);
- }
-
-#ifdef CSR_WIFI_TRANSPORT_CSPI
- return retrying_write16(card, card->function, addr, (u16)data);
-#else
- return retrying_write8(card, card->function, addr, data);
-#endif
-} /* unifi_write_direct_8_or_16() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read_direct16
- *
- * Read a 16-bit value from the UniFi SDIO interface.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Address to read from
- * pdata Pointer in which to return data.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- *
- * Notes:
- * The even address *must* be read first. This is because reads from
- * odd bytes are cached and read from memory when the preceding
- * even address is read.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_read_direct16(card_t *card, u32 addr, u16 *pdata)
-{
- return retrying_read16(card, card->function, addr, pdata);
-} /* unifi_read_direct16() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_write_direct16
- *
- * Write a 16-bit value to the UniFi SDIO interface.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Address to write to
- * data Value to write.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- *
- * Notes:
- * The even address *must* be written second. This is because writes to
- * odd bytes are cached and not committed to memory until the preceding
- * even address is written.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_write_direct16(card_t *card, u32 addr, u16 data)
-{
- return retrying_write16(card, card->function, addr, data);
-} /* unifi_write_direct16() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read_direct32
- *
- * Read a 32-bit value from the UniFi SDIO interface.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Address to read from
- * pdata Pointer in which to return data.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_read_direct32(card_t *card, u32 addr, u32 *pdata)
-{
- CsrResult r;
- u16 w0, w1;
-
- r = retrying_read16(card, card->function, addr, &w0);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- r = retrying_read16(card, card->function, addr + 2, &w1);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- *pdata = ((u32)w1 << 16) | (u32)w0;
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_read_direct32() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read_directn_match
- *
- * Read multiple 8-bit values from the UniFi SDIO interface,
- * stopping when either we have read 'len' bytes or we have read
- * a octet equal to 'match'. If 'match' is not a valid octet
- * then this function is the same as 'unifi_read_directn'.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Start address to read from.
- * pdata Pointer to which to write data.
- * len Maximum umber of bytes to read
- * match The value to stop reading at.
- * num Pointer to buffer to write number of bytes read
- *
- * Returns:
- * number of octets read on success, negative error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- *
- * Notes:
- * The even address *must* be read first. This is because reads from
- * odd bytes are cached and read from memory when the preceding
- * even address is read.
- * ---------------------------------------------------------------------------
- */
-static CsrResult unifi_read_directn_match(card_t *card, u32 addr, void *pdata, u16 len, s8 m, u32 *num)
-{
- CsrResult r;
- u32 i;
- u8 *cptr;
- u16 w;
-
- *num = 0;
-
- cptr = (u8 *)pdata;
- for (i = 0; i < len; i += 2)
- {
- r = retrying_read16(card, card->function, addr, &w);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- *cptr++ = ((u8)w & 0xFF);
- if ((m >= 0) && (((s8)w & 0xFF) == m))
- {
- break;
- }
-
- if (i + 1 == len)
- {
- /* The len is odd. Ignore the last high byte */
- break;
- }
-
- *cptr++ = ((u8)(w >> 8) & 0xFF);
- if ((m >= 0) && (((s8)(w >> 8) & 0xFF) == m))
- {
- break;
- }
-
- addr += 2;
- }
-
- *num = (s32)(cptr - (u8 *)pdata);
- return CSR_RESULT_SUCCESS;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read_directn
- *
- * Read multiple 8-bit values from the UniFi SDIO interface.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Start address to read from.
- * pdata Pointer to which to write data.
- * len Number of bytes to read
- *
- * Returns:
- * 0 on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- *
- * Notes:
- * The even address *must* be read first. This is because reads from
- * odd bytes are cached and read from memory when the preceding
- * even address is read.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_read_directn(card_t *card, u32 addr, void *pdata, u16 len)
-{
- u32 num;
-
- return unifi_read_directn_match(card, addr, pdata, len, -1, &num);
-} /* unifi_read_directn() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_write_directn
- *
- * Write multiple 8-bit values to the UniFi SDIO interface.
- *
- * Arguments:
- * card Pointer to card structure.
- * addr Start address to write to.
- * pdata Source data pointer.
- * len Number of bytes to write, must be even.
- *
- * Returns:
- * 0 on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- *
- * Notes:
- * The UniFi has a peculiar 16-bit bus architecture. Writes are only
- * committed to memory when an even address is accessed. Writes to
- * odd addresses are cached and only committed if the next write is
- * to the preceding address.
- * This means we must write data as pairs of bytes in reverse order.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_write_directn(card_t *card, u32 addr, void *pdata, u16 len)
-{
- CsrResult r;
- u8 *cptr;
- s16 signed_len;
-
- cptr = (u8 *)pdata;
- signed_len = (s16)len;
- while (signed_len > 0)
- {
- /* This is UniFi-1 specific code. CSPI not supported so 8-bit write allowed */
- r = retrying_write16(card, card->function, addr, *cptr);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- cptr += 2;
- addr += 2;
- signed_len -= 2;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_write_directn() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * set_dmem_page
- * set_pmem_page
- *
- * Set up the page register for the shared data memory window or program
- * memory window.
- *
- * Arguments:
- * card Pointer to card structure.
- * dmem_addr UniFi shared-data-memory address to access.
- * pmem_addr UniFi program memory address to access. This includes
- * External FLASH memory at 0x000000
- * Processor program memory at 0x200000
- * External SRAM at memory 0x400000
- * paddr Location to write an SDIO address (24-bit) for
- * use in a unifi_read_direct or unifi_write_direct call.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * ---------------------------------------------------------------------------
- */
-static CsrResult set_dmem_page(card_t *card, u32 dmem_addr, u32 *paddr)
-{
- u16 page, addr;
- u32 len;
- CsrResult r;
-
- *paddr = 0;
-
- if (!ChipHelper_DecodeWindow(card->helper,
- CHIP_HELPER_WINDOW_3,
- CHIP_HELPER_WT_SHARED,
- dmem_addr / 2,
- &page, &addr, &len))
- {
- unifi_error(card->ospriv, "Failed to decode SHARED_DMEM_PAGE %08lx\n", dmem_addr);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- if (page != card->dmem_page)
- {
- unifi_trace(card->ospriv, UDBG6, "setting dmem page=0x%X, addr=0x%lX\n", page, addr);
-
- /* change page register */
- r = unifi_write_direct16(card, ChipHelper_HOST_WINDOW3_PAGE(card->helper) * 2, page);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write SHARED_DMEM_PAGE\n");
- return r;
- }
-
- card->dmem_page = page;
- }
-
- *paddr = ((s32)addr * 2) + (dmem_addr & 1);
-
- return CSR_RESULT_SUCCESS;
-} /* set_dmem_page() */
-
-
-static CsrResult set_pmem_page(card_t *card, u32 pmem_addr,
- enum chip_helper_window_type mem_type, u32 *paddr)
-{
- u16 page, addr;
- u32 len;
- CsrResult r;
-
- *paddr = 0;
-
- if (!ChipHelper_DecodeWindow(card->helper,
- CHIP_HELPER_WINDOW_2,
- mem_type,
- pmem_addr / 2,
- &page, &addr, &len))
- {
- unifi_error(card->ospriv, "Failed to decode PROG MEM PAGE %08lx %d\n", pmem_addr, mem_type);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- if (page != card->pmem_page)
- {
- unifi_trace(card->ospriv, UDBG6, "setting pmem page=0x%X, addr=0x%lX\n", page, addr);
-
- /* change page register */
- r = unifi_write_direct16(card, ChipHelper_HOST_WINDOW2_PAGE(card->helper) * 2, page);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write PROG MEM PAGE\n");
- return r;
- }
-
- card->pmem_page = page;
- }
-
- *paddr = ((s32)addr * 2) + (pmem_addr & 1);
-
- return CSR_RESULT_SUCCESS;
-} /* set_pmem_page() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * set_page
- *
- * Sets up the appropriate page register to access the given address.
- * Returns the sdio address at which the unifi address can be accessed.
- *
- * Arguments:
- * card Pointer to card structure.
- * generic_addr UniFi internal address to access, in Generic Pointer
- * format, i.e. top byte is space indicator.
- * paddr Location to write page address
- * SDIO address (24-bit) for use in a unifi_read_direct or
- * unifi_write_direct call
- *
- * Returns:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE the address is invalid
- * ---------------------------------------------------------------------------
- */
-static CsrResult set_page(card_t *card, u32 generic_addr, u32 *paddr)
-{
- s32 space;
- u32 addr;
- CsrResult r = CSR_RESULT_SUCCESS;
-
- if (!paddr)
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- *paddr = 0;
- space = UNIFI_GP_SPACE(generic_addr);
- addr = UNIFI_GP_OFFSET(generic_addr);
- switch (space)
- {
- case UNIFI_SH_DMEM:
- /* Shared Data Memory is accessed via the Shared Data Memory window */
- r = set_dmem_page(card, addr, paddr);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- break;
-
- case UNIFI_EXT_FLASH:
- if (!ChipHelper_HasFlash(card->helper))
- {
- unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
- generic_addr, card->helper);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- /* External FLASH is accessed via the Program Memory window */
- r = set_pmem_page(card, addr, CHIP_HELPER_WT_FLASH, paddr);
- break;
-
- case UNIFI_EXT_SRAM:
- if (!ChipHelper_HasExtSram(card->helper))
- {
- unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08l (helper=0x%x)\n",
- generic_addr, card->helper);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- /* External SRAM is accessed via the Program Memory window */
- r = set_pmem_page(card, addr, CHIP_HELPER_WT_EXT_SRAM, paddr);
- break;
-
- case UNIFI_REGISTERS:
- /* Registers are accessed directly */
- *paddr = addr;
- break;
-
- case UNIFI_PHY_DMEM:
- r = unifi_set_proc_select(card, UNIFI_PROC_PHY);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- *paddr = ChipHelper_DATA_MEMORY_RAM_OFFSET(card->helper) * 2 + addr;
- break;
-
- case UNIFI_MAC_DMEM:
- r = unifi_set_proc_select(card, UNIFI_PROC_MAC);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- *paddr = ChipHelper_DATA_MEMORY_RAM_OFFSET(card->helper) * 2 + addr;
- break;
-
- case UNIFI_BT_DMEM:
- if (!ChipHelper_HasBt(card->helper))
- {
- unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
- generic_addr, card->helper);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- r = unifi_set_proc_select(card, UNIFI_PROC_BT);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- *paddr = ChipHelper_DATA_MEMORY_RAM_OFFSET(card->helper) * 2 + addr;
- break;
-
- case UNIFI_PHY_PMEM:
- r = unifi_set_proc_select(card, UNIFI_PROC_PHY);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- r = set_pmem_page(card, addr, CHIP_HELPER_WT_CODE_RAM, paddr);
- break;
-
- case UNIFI_MAC_PMEM:
- r = unifi_set_proc_select(card, UNIFI_PROC_MAC);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- r = set_pmem_page(card, addr, CHIP_HELPER_WT_CODE_RAM, paddr);
- break;
-
- case UNIFI_BT_PMEM:
- if (!ChipHelper_HasBt(card->helper))
- {
- unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
- generic_addr, card->helper);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- r = unifi_set_proc_select(card, UNIFI_PROC_BT);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- r = set_pmem_page(card, addr, CHIP_HELPER_WT_CODE_RAM, paddr);
- break;
-
- case UNIFI_PHY_ROM:
- if (!ChipHelper_HasRom(card->helper))
- {
- unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
- generic_addr, card->helper);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- r = unifi_set_proc_select(card, UNIFI_PROC_PHY);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- r = set_pmem_page(card, addr, CHIP_HELPER_WT_ROM, paddr);
- break;
-
- case UNIFI_MAC_ROM:
- if (!ChipHelper_HasRom(card->helper))
- {
- unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
- generic_addr, card->helper);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- r = unifi_set_proc_select(card, UNIFI_PROC_MAC);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- r = set_pmem_page(card, addr, CHIP_HELPER_WT_ROM, paddr);
- break;
-
- case UNIFI_BT_ROM:
- if (!ChipHelper_HasRom(card->helper) || !ChipHelper_HasBt(card->helper))
- {
- unifi_error(card->ospriv, "Bad address space for chip in generic pointer 0x%08lX (helper=0x%x)\n",
- generic_addr, card->helper);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- r = unifi_set_proc_select(card, UNIFI_PROC_BT);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- r = set_pmem_page(card, addr, CHIP_HELPER_WT_ROM, paddr);
- break;
-
- default:
- unifi_error(card->ospriv, "Bad address space %d in generic pointer 0x%08lX (helper=0x%x)\n",
- space, generic_addr, card->helper);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- return r;
-} /* set_page() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_set_proc_select
- *
- *
- * Arguments:
- * card Pointer to card structure.
- * select Which XAP core to select
- *
- * Returns:
- * 0 on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_set_proc_select(card_t *card, enum unifi_dbg_processors_select select)
-{
- CsrResult r;
-
- /* Verify the the select value is allowed. */
- switch (select)
- {
- case UNIFI_PROC_MAC:
- case UNIFI_PROC_PHY:
- case UNIFI_PROC_BOTH:
- break;
-
-
- default:
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- if (card->proc_select != (u32)select)
- {
- r = unifi_write_direct16(card,
- ChipHelper_DBG_HOST_PROC_SELECT(card->helper) * 2,
- (u8)select);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write to Proc Select register\n");
- return r;
- }
-
- card->proc_select = (u32)select;
- }
-
- return CSR_RESULT_SUCCESS;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read_8_or_16
- *
- * Performs a byte read of the given address in shared data memory.
- * Set up the shared data memory page register as required.
- *
- * Arguments:
- * card Pointer to card structure.
- * unifi_addr UniFi shared-data-memory address to access.
- * pdata Pointer to a byte variable for the value read.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_read_8_or_16(card_t *card, u32 unifi_addr, u8 *pdata)
-{
- u32 sdio_addr;
- CsrResult r;
-#ifdef CSR_WIFI_TRANSPORT_CSPI
- u16 w;
-#endif
-
- r = set_page(card, unifi_addr, &sdio_addr);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.cmd52_r8or16_count++;
-#endif
-#ifdef CSR_WIFI_TRANSPORT_CSPI
- r = retrying_read16(card, card->function, sdio_addr, &w);
- *pdata = (u8)(w & 0xFF);
- return r;
-#else
- return retrying_read8(card, card->function, sdio_addr, pdata);
-#endif
-} /* unifi_read_8_or_16() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_write_8_or_16
- *
- * Performs a byte write of the given address in shared data memory.
- * Set up the shared data memory page register as required.
- *
- * Arguments:
- * card Pointer to card context struct.
- * unifi_addr UniFi shared-data-memory address to access.
- * data Value to write.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
- *
- * Notes:
- * Beware using unifi_write8() because byte writes are not safe on UniFi.
- * Writes to odd bytes are cached, writes to even bytes perform a 16-bit
- * write with the previously cached odd byte.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_write_8_or_16(card_t *card, u32 unifi_addr, u8 data)
-{
- u32 sdio_addr;
- CsrResult r;
-#ifdef CSR_WIFI_TRANSPORT_CSPI
- u16 w;
-#endif
-
- r = set_page(card, unifi_addr, &sdio_addr);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- if (sdio_addr & 1)
- {
- unifi_warning(card->ospriv,
- "Warning: Byte write to an odd address (0x%lX) is dangerous\n",
- sdio_addr);
- }
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.cmd52_w8or16_count++;
-#endif
-#ifdef CSR_WIFI_TRANSPORT_CSPI
- w = data;
- return retrying_write16(card, card->function, sdio_addr, w);
-#else
- return retrying_write8(card, card->function, sdio_addr, data);
-#endif
-} /* unifi_write_8_or_16() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_card_read16
- *
- * Performs a 16-bit read of the given address in shared data memory.
- * Set up the shared data memory page register as required.
- *
- * Arguments:
- * card Pointer to card structure.
- * unifi_addr UniFi shared-data-memory address to access.
- * pdata Pointer to a 16-bit int variable for the value read.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_card_read16(card_t *card, u32 unifi_addr, u16 *pdata)
-{
- u32 sdio_addr;
- CsrResult r;
-
- r = set_page(card, unifi_addr, &sdio_addr);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.cmd52_r16_count++;
-#endif
- return unifi_read_direct16(card, sdio_addr, pdata);
-} /* unifi_card_read16() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_card_write16
- *
- * Performs a 16-bit write of the given address in shared data memory.
- * Set up the shared data memory page register as required.
- *
- * Arguments:
- * card Pointer to card structure.
- * unifi_addr UniFi shared-data-memory address to access.
- * pdata Pointer to a byte variable for the value write.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_card_write16(card_t *card, u32 unifi_addr, u16 data)
-{
- u32 sdio_addr;
- CsrResult r;
-
- r = set_page(card, unifi_addr, &sdio_addr);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.cmd52_w16_count++;
-#endif
- return unifi_write_direct16(card, sdio_addr, data);
-} /* unifi_card_write16() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read32
- *
- * Performs a 32-bit read of the given address in shared data memory.
- * Set up the shared data memory page register as required.
- *
- * Arguments:
- * card Pointer to card structure.
- * unifi_addr UniFi shared-data-memory address to access.
- * pdata Pointer to a int variable for the value read.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_read32(card_t *card, u32 unifi_addr, u32 *pdata)
-{
- u32 sdio_addr;
- CsrResult r;
-
- r = set_page(card, unifi_addr, &sdio_addr);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- card->cmd_prof.cmd52_r32_count++;
-#endif
- return unifi_read_direct32(card, sdio_addr, pdata);
-} /* unifi_read32() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_card_readn
- * unifi_readnz
- *
- * Read multiple 8-bit values from the UniFi SDIO interface.
- * This function interprets the address as a GenericPointer as
- * defined in the UniFi Host Interface Protocol Specification.
- * The readnz version of this function will stop when it reads a
- * zero octet.
- *
- * Arguments:
- * card Pointer to card structure.
- * unifi_addr UniFi shared-data-memory address to access.
- * pdata Pointer to which to write data.
- * len Number of bytes to read
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE a bad generic pointer was specified
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_readn_match(card_t *card, u32 unifi_addr, void *pdata, u16 len, s8 match)
-{
- u32 sdio_addr;
- CsrResult r;
- u32 num;
-
- r = set_page(card, unifi_addr, &sdio_addr);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- r = unifi_read_directn_match(card, sdio_addr, pdata, len, match, &num);
- return r;
-} /* unifi_readn_match() */
-
-
-CsrResult unifi_card_readn(card_t *card, u32 unifi_addr, void *pdata, u16 len)
-{
- return unifi_readn_match(card, unifi_addr, pdata, len, -1);
-} /* unifi_card_readn() */
-
-
-CsrResult unifi_readnz(card_t *card, u32 unifi_addr, void *pdata, u16 len)
-{
- return unifi_readn_match(card, unifi_addr, pdata, len, 0);
-} /* unifi_readnz() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read_shared_count
- *
- * Read signal count locations, checking for an SDIO error. The
- * signal count locations only contain a valid number if the
- * highest bit isn't set.
- *
- * Arguments:
- * card Pointer to card context structure.
- * addr Shared-memory address to read.
- *
- * Returns:
- * Value read from memory (0-127) or -1 on error
- * ---------------------------------------------------------------------------
- */
-s32 unifi_read_shared_count(card_t *card, u32 addr)
-{
- u8 b;
- /* I've increased this count, because I have seen cases where
- * there were three reads in a row with the top bit set. I'm not
- * sure why this might have happened, but I can't see a problem
- * with increasing this limit. It's better to take a while to
- * recover than to fail. */
-#define SHARED_READ_RETRY_LIMIT 10
- s32 i;
-
- /*
- * Get the to-host-signals-written count.
- * The top-bit will be set if the firmware was in the process of
- * changing the value, in which case we read again.
- */
- /* Limit the number of repeats so we don't freeze */
- for (i = 0; i < SHARED_READ_RETRY_LIMIT; i++)
- {
- CsrResult r;
- r = unifi_read_8_or_16(card, addr, &b);
- if (r != CSR_RESULT_SUCCESS)
- {
- return -1;
- }
- if (!(b & 0x80))
- {
- /* There is a chance that the MSB may have contained invalid data
- * (overflow) at the time it was read. Therefore mask off the MSB.
- * This avoids a race between driver read and firmware write of the
- * word, the value we need is in the lower 8 bits anway.
- */
- return (s32)(b & 0xff);
- }
- }
-
- return -1; /* this function has changed in WMM mods */
-} /* unifi_read_shared_count() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_writen
- *
- * Write multiple 8-bit values to the UniFi SDIO interface using CMD52
- * This function interprets the address as a GenericPointer as
- * defined in the UniFi Host Interface Protocol Specification.
- *
- * Arguments:
- * card Pointer to card structure.
- * unifi_addr UniFi shared-data-memory address to access.
- * pdata Pointer to which to write data.
- * len Number of bytes to write
- *
- * Returns:
- * 0 on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE an odd length or length too big.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_writen(card_t *card, u32 unifi_addr, void *pdata, u16 len)
-{
- u32 sdio_addr;
- CsrResult r;
-
- r = set_page(card, unifi_addr, &sdio_addr);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- return unifi_write_directn(card, sdio_addr, pdata, len);
-} /* unifi_writen() */
-
-
-static CsrResult csr_sdio_block_rw(card_t *card, s16 funcnum,
- u32 addr, u8 *pdata,
- u16 count, s16 dir_is_write)
-{
- CsrResult csrResult;
-
- if (dir_is_write == UNIFI_SDIO_READ)
- {
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("r@%02X#%X=", addr, count);
-#endif
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- unifi_debug_log_to_buf("R");
-#endif
- csrResult = CsrSdioRead(card->sdio_if, addr, pdata, count);
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- unifi_debug_log_to_buf("<");
-#endif
- }
- else
- {
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- unifi_debug_log_to_buf("w@%02X#%X=", addr, count);
- unifi_debug_hex_to_buf(pdata, count > CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH?CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH : count);
-#endif
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- unifi_debug_log_to_buf("W");
-#endif
- csrResult = CsrSdioWrite(card->sdio_if, addr, pdata, count);
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- unifi_debug_log_to_buf(">");
-#endif
- }
-#ifdef CSR_WIFI_HIP_DATA_PLANE_PROFILE
- card->cmd_prof.cmd53_count++;
-#endif
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_SDIO_TRACE)
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_debug_log_to_buf("error=%X", csrResult);
- }
- else if (dir_is_write == UNIFI_SDIO_READ)
- {
- unifi_debug_hex_to_buf(pdata, count > CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH?CSR_WIFI_HIP_SDIO_TRACE_DATA_LENGTH : count);
- }
- unifi_debug_string_to_buf("\n");
-#endif
- return csrResult; /* CSR SDIO (not HIP) error code */
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_bulk_rw
- *
- * Transfer bulk data to or from the UniFi SDIO interface.
- * This function is used to read or write signals and bulk data.
- *
- * Arguments:
- * card Pointer to card structure.
- * handle Value to put in the Register Address field of the CMD53 req.
- * data Pointer to data to write.
- * direction One of UNIFI_SDIO_READ or UNIFI_SDIO_WRITE
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- *
- * Notes:
- * This function uses SDIO CMD53, which is the block transfer mode.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_bulk_rw(card_t *card, u32 handle, void *pdata,
- u32 len, s16 direction)
-{
-#define CMD53_RETRIES 3
- /*
- * Ideally instead of sleeping, we want to busy wait.
- * Currently there is no framework API to do this. When it becomes available,
- * we can use it to busy wait using usecs
- */
-#define REWIND_RETRIES 15 /* when REWIND_DELAY==1msec, or 250 when REWIND_DELAY==50usecs */
-#define REWIND_POLLING_RETRIES 5
-#define REWIND_DELAY 1 /* msec or 50usecs */
- CsrResult csrResult; /* SDIO error code */
- CsrResult r = CSR_RESULT_SUCCESS; /* HIP error code */
- s16 retries = CMD53_RETRIES;
- s16 stat_retries;
- u8 stat;
- s16 dump_read;
-#ifdef UNIFI_DEBUG
- u8 *pdata_lsb = ((u8 *)&pdata) + card->lsb;
-#endif
-#ifdef CSR_WIFI_MAKE_FAKE_CMD53_ERRORS
- static s16 fake_error;
-#endif
-
- dump_read = 0;
-#ifdef UNIFI_DEBUG
- if (*pdata_lsb & 1)
- {
- unifi_notice(card->ospriv, "CD53 request on a unaligned buffer (addr: 0x%X) dir %s-Host\n",
- pdata, (direction == UNIFI_SDIO_READ)?"To" : "From");
- if (direction == UNIFI_SDIO_WRITE)
- {
- dump(pdata, (u16)len);
- }
- else
- {
- dump_read = 1;
- }
- }
-#endif
-
- /* Defensive checks */
- if (!pdata)
- {
- unifi_error(card->ospriv, "Null pdata for unifi_bulk_rw() len: %d\n", len);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- if ((len & 1) || (len > 0xffff))
- {
- unifi_error(card->ospriv, "Impossible CMD53 length requested: %d\n", len);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- while (1)
- {
- csrResult = csr_sdio_block_rw(card, card->function, handle,
- (u8 *)pdata, (u16)len,
- direction);
- if (csrResult == CSR_SDIO_RESULT_NO_DEVICE)
- {
- return CSR_WIFI_HIP_RESULT_NO_DEVICE;
- }
-#ifdef CSR_WIFI_MAKE_FAKE_CMD53_ERRORS
- if (++fake_error > 100)
- {
- fake_error = 90;
- unifi_warning(card->ospriv, "Faking a CMD53 error,\n");
- if (csrResult == CSR_RESULT_SUCCESS)
- {
- csrResult = CSR_RESULT_FAILURE;
- }
- }
-#endif
- if (csrResult == CSR_RESULT_SUCCESS)
- {
- if (dump_read)
- {
- dump(pdata, (u16)len);
- }
- break;
- }
-
- /*
- * At this point the SDIO driver should have written the I/O Abort
- * register to notify UniFi that the command has failed.
- * UniFi-1 and UniFi-2 (not UF6xxx) use the same register to store the
- * Deep Sleep State. This means we have to restore the Deep Sleep
- * State (AWAKE in any case since we can not perform a CD53 in any other
- * state) by rewriting the I/O Abort register to its previous value.
- */
- if (card->chip_id <= SDIO_CARD_ID_UNIFI_2)
- {
- (void)unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
- }
-
- /* If csr_sdio_block_rw() failed in a non-retryable way, or retries exhausted
- * then stop retrying
- */
- if (!retryable_sdio_error(csrResult))
- {
- unifi_error(card->ospriv, "Fatal error in a CMD53 transfer\n");
- break;
- }
-
- /*
- * These happen from time to time, try again
- */
- if (--retries == 0)
- {
- break;
- }
-
- unifi_trace(card->ospriv, UDBG4,
- "Error in a CMD53 transfer, retrying (h:%d,l:%u)...\n",
- (s16)handle & 0xff, len);
-
- /* The transfer failed, rewind and try again */
- r = unifi_write_8_or_16(card, card->sdio_ctrl_addr + 8,
- (u8)(handle & 0xff));
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- /*
- * If we can't even do CMD52 (register read/write) then
- * stop here.
- */
- unifi_error(card->ospriv, "Failed to write REWIND cmd\n");
- return r;
- }
-
- /* Signal the UniFi to look for the rewind request. */
- r = CardGenInt(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- /* Wait for UniFi to acknowledge the rewind */
- stat_retries = REWIND_RETRIES;
- while (1)
- {
- r = unifi_read_8_or_16(card, card->sdio_ctrl_addr + 8, &stat);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read REWIND status\n");
- return CSR_RESULT_FAILURE;
- }
-
- if (stat == 0)
- {
- break;
- }
- if (--stat_retries == 0)
- {
- unifi_error(card->ospriv, "Timeout waiting for REWIND ready\n");
- return CSR_RESULT_FAILURE;
- }
-
- /* Poll for the ack a few times */
- if (stat_retries < REWIND_RETRIES - REWIND_POLLING_RETRIES)
- {
- CsrThreadSleep(REWIND_DELAY);
- }
- }
- }
-
- /* The call to csr_sdio_block_rw() still failed after retrying */
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Block %s failed after %d retries\n",
- (direction == UNIFI_SDIO_READ)?"read" : "write",
- CMD53_RETRIES - retries);
- /* Report any SDIO error as a general i/o error */
- return CSR_RESULT_FAILURE;
- }
-
- /* Collect some stats */
- if (direction == UNIFI_SDIO_READ)
- {
- card->sdio_bytes_read += len;
- }
- else
- {
- card->sdio_bytes_written += len;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_bulk_rw() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_bulk_rw_noretry
- *
- * Transfer bulk data to or from the UniFi SDIO interface.
- * This function is used to read or write signals and bulk data.
- *
- * Arguments:
- * card Pointer to card structure.
- * handle Value to put in the Register Address field of
- * the CMD53 req.
- * data Pointer to data to write.
- * direction One of UNIFI_SDIO_READ or UNIFI_SDIO_WRITE
- *
- * Returns:
- * 0 on success, non-zero error code on error:
- * CSR_WIFI_HIP_RESULT_NO_DEVICE card was ejected
- * CSR_RESULT_FAILURE an SDIO error occurred
- *
- * Notes:
- * This function uses SDIO CMD53, which is the block transfer mode.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_bulk_rw_noretry(card_t *card, u32 handle, void *pdata,
- u32 len, s16 direction)
-{
- CsrResult csrResult;
-
- csrResult = csr_sdio_block_rw(card, card->function, handle,
- (u8 *)pdata, (u16)len, direction);
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Block %s failed\n",
- (direction == UNIFI_SDIO_READ)?"read" : "write");
- return csrResult;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_bulk_rw_noretry() */
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_chiphelper.c b/drivers/staging/csr/csr_wifi_hip_chiphelper.c
deleted file mode 100644
index 5cf5b8a..0000000
--- a/drivers/staging/csr/csr_wifi_hip_chiphelper.c
+++ /dev/null
@@ -1,793 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include "csr_macro.h"
-#include "csr_wifi_hip_chiphelper_private.h"
-
-#ifndef nelem
-#define nelem(a) (sizeof(a) / sizeof(a[0]))
-#endif
-
-#define counted(foo) { nelem(foo), foo }
-#define null_counted() { 0, NULL }
-
-/* The init values are a set of register writes that we must
- perform when we first connect to the chip to get it working.
- They swicth on the correct clocks and possibly set the host
- interface as a wkaeup source. They should not be used if
- proper HIP opperation is required, but are useful before we
- do a code download. */
-static const struct chip_helper_init_values init_vals_v1[] = {
- { 0xFDBB, 0xFFFF },
- { 0xFDB6, 0x03FF },
- { 0xFDB1, 0x01E3 },
- { 0xFDB3, 0x0FFF },
- { 0xFEE3, 0x08F0 },
- { 0xFEE7, 0x3C3F },
- { 0xFEE6, 0x0050 },
- { 0xFDBA, 0x0000 }
-};
-
-static const struct chip_helper_init_values init_vals_v2[] = {
- { 0xFDB6, 0x0FFF },
- { 0xF023, 0x3F3F },
- { 0xFDB1, 0x01E3 },
- { 0xFDB3, 0x0FFF },
- { 0xF003, 0x08F0 },
- { 0xF007, 0x3C3F },
- { 0xF006, 0x0050 }
-};
-
-
-static const struct chip_helper_init_values init_vals_v22_v23[] = {
- { 0xF81C, 0x00FF },
- /*{ 0x????, 0x???? }, */
- { 0xF80C, 0x1FFF },
- { 0xFA25, 0x001F },
- { 0xF804, 0x00FF },
- { 0xF802, 0x0FFF },
- /*{ 0x????, 0x???? },
- { 0x????, 0x???? },
- { 0x????, 0x???? }*/
-};
-
-static const u16 reset_program_a_v1_or_v2[] = {
- 0x0000
-};
-static const u16 reset_program_b_v1_or_v2[] = {
- 0x0010, 0xFE00, 0xA021, 0xFF00, 0x8111, 0x0009, 0x0CA4, 0x0114,
- 0x0280, 0x04F8, 0xFE00, 0x6F25, 0x06E0, 0x0010, 0xFC00, 0x0121,
- 0xFC00, 0x0225, 0xFE00, 0x7125, 0xFE00, 0x6D11, 0x03F0, 0xFE00,
- 0x6E25, 0x0008, 0x00E0
-};
-
-static const struct chip_helper_reset_values reset_program_v1_or_v2[] =
-{
- {
- MAKE_GP(REGISTERS, 0x000C),
- nelem(reset_program_a_v1_or_v2),
- reset_program_a_v1_or_v2
- },
- {
- MAKE_GP(MAC_PMEM, 0x000000),
- nelem(reset_program_b_v1_or_v2),
- reset_program_b_v1_or_v2
- }
-};
-
-static const struct chip_map_address_t unifi_map_address_v1_v2[] =
-{
- { 0xFE9F, 0xFE7B }, /* PM1_BANK_SELECT */
- { 0xFE9E, 0xFE78 }, /* PM2_BANK_SELECT */
- { 0xFE9D, 0xFE7E }, /* SHARED_DMEM_PAGE */
- { 0xFE91, 0xFE90 }, /* PROC_SELECT */
- { 0xFE8D, 0xFE8C }, /* STOP_STATUS */
-};
-
-static const struct chip_map_address_t unifi_map_address_v22_v23[] =
-{
- { 0xF8F9, 0xF8AC }, /* GW1_CONFIG */
- { 0xF8FA, 0xF8AD }, /* GW2_CONFIG */
- { 0xF8FB, 0xF8AE }, /* GW3_CONFIG */
- { 0xF830, 0xF81E }, /* PROC_SELECT */
- { 0xF831, 0xF81F }, /* STOP_STATUS */
- { 0xF8FC, 0xF8AF }, /* IO_LOG_ADDRESS */
-};
-
-static const struct chip_device_regs_t unifi_device_regs_null =
-{
- 0xFE81, /* GBL_CHIP_VERSION */
- 0x0000, /* GBL_MISC_ENABLES */
- 0x0000, /* DBG_EMU_CMD */
- {
- 0x0000, /* HOST.DBG_PROC_SELECT */
- 0x0000, /* HOST.DBG_STOP_STATUS */
- 0x0000, /* HOST.WINDOW1_PAGE */
- 0x0000, /* HOST.WINDOW2_PAGE */
- 0x0000, /* HOST.WINDOW3_PAGE */
- 0x0000 /* HOST.IO_LOG_ADDR */
- },
- {
- 0x0000, /* SPI.DBG_PROC_SELECT */
- 0x0000, /* SPI.DBG_STOP_STATUS */
- 0x0000, /* SPI.WINDOW1_PAGE */
- 0x0000, /* SPI.WINDOW2_PAGE */
- 0x0000, /* SPI.WINDOW3_PAGE */
- 0x0000 /* SPI.IO_LOG_ADDR */
- },
- 0x0000, /* DBG_RESET */
- 0x0000, /* > DBG_RESET_VALUE */
- 0x0000, /* DBG_RESET_WARN */
- 0x0000, /* DBG_RESET_WARN_VALUE */
- 0x0000, /* DBG_RESET_RESULT */
- 0xFFE9, /* XAP_PCH */
- 0xFFEA, /* XAP_PCL */
- 0x0000, /* PROC_PC_SNOOP */
- 0x0000, /* WATCHDOG_DISABLE */
- 0x0000, /* MAILBOX0 */
- 0x0000, /* MAILBOX1 */
- 0x0000, /* MAILBOX2 */
- 0x0000, /* MAILBOX3 */
- 0x0000, /* SDIO_HOST_INT */
- 0x0000, /* SHARED_IO_INTERRUPT */
- 0x0000, /* SDIO HIP HANDSHAKE */
- 0x0000 /* COEX_STATUS */
-};
-
-/* UF105x */
-static const struct chip_device_regs_t unifi_device_regs_v1 =
-{
- 0xFE81, /* GBL_CHIP_VERSION */
- 0xFE87, /* GBL_MISC_ENABLES */
- 0xFE9C, /* DBG_EMU_CMD */
- {
- 0xFE90, /* HOST.DBG_PROC_SELECT */
- 0xFE8C, /* HOST.DBG_STOP_STATUS */
- 0xFE7B, /* HOST.WINDOW1_PAGE */
- 0xFE78, /* HOST.WINDOW2_PAGE */
- 0xFE7E, /* HOST.WINDOW3_PAGE */
- 0x0000 /* HOST.IO_LOG_ADDR */
- },
- {
- 0xFE91, /* SPI.DBG_PROC_SELECT */
- 0xFE8D, /* SPI.DBG_STOP_STATUS */
- 0xFE9F, /* SPI.WINDOW1_PAGE */
- 0xFE9E, /* SPI.WINDOW2_PAGE */
- 0xFE9D, /* SPI.WINDOW3_PAGE */
- 0x0000 /* SPI.IO_LOG_ADDR */
- },
- 0xFE92, /* DBG_RESET */
- 0x0001, /* > DBG_RESET_VALUE */
- 0xFDA0, /* DBG_RESET_WARN (HOST_SELECT) */
- 0x0000, /* DBG_RESET_WARN_VALUE */
- 0xFE92, /* DBG_RESET_RESULT */
- 0xFFE9, /* XAP_PCH */
- 0xFFEA, /* XAP_PCL */
- 0x0051, /* PROC_PC_SNOOP */
- 0xFE70, /* WATCHDOG_DISABLE */
- 0xFE6B, /* MAILBOX0 */
- 0xFE6A, /* MAILBOX1 */
- 0xFE69, /* MAILBOX2 */
- 0xFE68, /* MAILBOX3 */
- 0xFE67, /* SDIO_HOST_INT */
- 0xFE65, /* SHARED_IO_INTERRUPT */
- 0xFDE9, /* SDIO HIP HANDSHAKE */
- 0x0000 /* COEX_STATUS */
-};
-
-/* UF2... */
-static const struct chip_device_regs_t unifi_device_regs_v2 =
-{
- 0xFE81, /* GBL_CHIP_VERSION */
- 0xFE87, /* GBL_MISC_ENABLES */
- 0xFE9C, /* DBG_EMU_CMD */
- {
- 0xFE90, /* HOST.DBG_PROC_SELECT */
- 0xFE8C, /* HOST.DBG_STOP_STATUS */
- 0xFE7B, /* HOST.WINDOW1_PAGE */
- 0xFE78, /* HOST.WINDOW2_PAGE */
- 0xFE7E, /* HOST.WINDOW3_PAGE */
- 0x0000 /* HOST.IO_LOG_ADDR */
- },
- {
- 0xFE91, /* SPI.DBG_PROC_SELECT */
- 0xFE8D, /* SPI.DBG_STOP_STATUS */
- 0xFE9F, /* SPI.WINDOW1_PAGE */
- 0xFE9E, /* SPI.WINDOW2_PAGE */
- 0xFE9D, /* SPI.WINDOW3_PAGE */
- 0x0000 /* SPI.IO_LOG_ADDR */
- },
- 0xFE92, /* DBG_RESET */
- 0x0000, /* > DBG_RESET_VALUE */
- 0xFDE9, /* DBG_RESET_WARN (TEST_FLASH_DATA - SHARED_MAILBOX2B) */
- 0xFFFF, /* DBG_RESET_WARN_VALUE */
- 0xFDE9, /* DBG_RESET_RESULT (TEST_FLASH_DATA) */
- 0xFFE9, /* XAP_PCH */
- 0xFFEA, /* XAP_PCL */
- 0x0051, /* PROC_PC_SNOOP */
- 0xFE70, /* WATCHDOG_DISABLE */
- 0xFE6B, /* MAILBOX0 */
- 0xFE6A, /* MAILBOX1 */
- 0xFE69, /* MAILBOX2 */
- 0xFE68, /* MAILBOX3 */
- 0xFE67, /* SDIO_HOST_INT */
- 0xFE65, /* SHARED_IO_INTERRUPT */
- 0xFE69, /* SDIO HIP HANDSHAKE */
- 0x0000 /* COEX_STATUS */
-};
-
-/* UF60xx */
-static const struct chip_device_regs_t unifi_device_regs_v22_v23 =
-{
- 0xFE81, /* GBL_CHIP_VERSION */
- 0xF84F, /* GBL_MISC_ENABLES */
- 0xF81D, /* DBG_EMU_CMD */
- {
- 0xF81E, /* HOST.DBG_PROC_SELECT */
- 0xF81F, /* HOST.DBG_STOP_STATUS */
- 0xF8AC, /* HOST.WINDOW1_PAGE */
- 0xF8AD, /* HOST.WINDOW2_PAGE */
- 0xF8AE, /* HOST.WINDOW3_PAGE */
- 0xF8AF /* HOST.IO_LOG_ADDR */
- },
- {
- 0xF830, /* SPI.DBG_PROC_SELECT */
- 0xF831, /* SPI.DBG_STOP_STATUS */
- 0xF8F9, /* SPI.WINDOW1_PAGE */
- 0xF8FA, /* SPI.WINDOW2_PAGE */
- 0xF8FB, /* SPI.WINDOW3_PAGE */
- 0xF8FC /* SPI.IO_LOG_ADDR */
- },
- 0xF82F, /* DBG_RESET */
- 0x0001, /* > DBG_RESET_VALUE */
- 0x0000, /* DBG_RESET_WARN */
- 0x0000, /* DBG_RESET_WARN_VALUE */
- 0xF82F, /* DBG_RESET_RESULT */
- 0xFFE9, /* XAP_PCH */
- 0xFFEA, /* XAP_PCL */
- 0x001B, /* PROC_PC_SNOOP */
- 0x0055, /* WATCHDOG_DISABLE */
- 0xF84B, /* MAILBOX0 */
- 0xF84C, /* MAILBOX1 */
- 0xF84D, /* MAILBOX2 */
- 0xF84E, /* MAILBOX3 */
- 0xF92F, /* SDIO_HOST_INT */
- 0xF92B, /* SDIO_FROMHOST_SCRTACH0 / SHARED_IO_INTERRUPT */
- 0xF84D, /* SDIO HIP HANDSHAKE (MAILBOX2) */
- 0xF9FB /* COEX_STATUS */
-};
-
-/* Program memory window on UF105x. */
-static const struct window_shift_info_t prog_window_array_unifi_v1_v2[CHIP_HELPER_WT_COUNT] =
-{
- { TRUE, 11, 0x0200 }, /* CODE RAM */
- { TRUE, 11, 0x0000 }, /* FLASH */
- { TRUE, 11, 0x0400 }, /* External SRAM */
- { FALSE, 0, 0 }, /* ROM */
- { FALSE, 0, 0 } /* SHARED */
-};
-
-/* Shared memory window on UF105x. */
-static const struct window_shift_info_t shared_window_array_unifi_v1_v2[CHIP_HELPER_WT_COUNT] =
-{
- { FALSE, 0, 0 }, /* CODE RAM */
- { FALSE, 0, 0 }, /* FLASH */
- { FALSE, 0, 0 }, /* External SRAM */
- { FALSE, 0, 0 }, /* ROM */
- { TRUE, 11, 0x0000 } /* SHARED */
-};
-
-/* One of the Generic Windows on UF60xx and later. */
-static const struct window_shift_info_t generic_window_array_unifi_v22_v23[CHIP_HELPER_WT_COUNT] =
-{
- { TRUE, 11, 0x3800 }, /* CODE RAM */
- { FALSE, 0, 0 }, /* FLASH */
- { FALSE, 0, 0 }, /* External SRAM */
- { TRUE, 11, 0x2000 }, /* ROM */
- { TRUE, 11, 0x0000 } /* SHARED */
-};
-
-/* The three windows on UF105x. */
-static const struct window_info_t prog1_window_unifi_v1_v2 = { 0x0000, 0x2000, 0x0080, prog_window_array_unifi_v1_v2 };
-static const struct window_info_t prog2_window_unifi_v1_v2 = { 0x2000, 0x2000, 0x0000, prog_window_array_unifi_v1_v2 };
-static const struct window_info_t shared_window_unifi_v1_v2 = { 0x4000, 0x2000, 0x0000, shared_window_array_unifi_v1_v2 };
-
-/* The three windows on UF60xx and later. */
-static const struct window_info_t generic1_window_unifi_v22_v23 = { 0x0000, 0x2000, 0x0080, generic_window_array_unifi_v22_v23 };
-static const struct window_info_t generic2_window_unifi_v22_v23 = { 0x2000, 0x2000, 0x0000, generic_window_array_unifi_v22_v23 };
-static const struct window_info_t generic3_window_unifi_v22_v23 = { 0x4000, 0x2000, 0x0000, generic_window_array_unifi_v22_v23 };
-
-static const struct chip_device_desc_t chip_device_desc_null =
-{
- { FALSE, 0x0000, 0x0000, 0x00 },
- "",
- "",
- null_counted(), /* init */
- null_counted(), /* reset_prog */
- &unifi_device_regs_null, /* regs */
- {
- FALSE, /* has_flash */
- FALSE, /* has_ext_sram */
- FALSE, /* has_rom */
- FALSE, /* has_bt */
- FALSE, /* has_wlan */
- },
- null_counted(),
- /* prog_offset */
- {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000
- },
- /* data_offset */
- {
- 0x0000 /* ram */
- },
- /* windows */
- {
- NULL,
- NULL,
- NULL
- }
-};
-
-static const struct chip_device_desc_t unifi_device_desc_v1 =
-{
- { FALSE, 0xf0ff, 0x1001, 0x01 }, /* UF105x R01 */
- "UF105x",
- "UniFi-1",
- counted(init_vals_v1), /* init */
- counted(reset_program_v1_or_v2), /* reset_prog */
- &unifi_device_regs_v1, /* regs */
- {
- TRUE, /* has_flash */
- TRUE, /* has_ext_sram */
- FALSE, /* has_rom */
- FALSE, /* has_bt */
- TRUE, /* has_wlan */
- },
- counted(unifi_map_address_v1_v2), /* map */
- /* prog_offset */
- {
- 0x00100000, /* ram */
- 0x00000000, /* rom (invalid) */
- 0x00000000, /* flash */
- 0x00200000, /* ext_ram */
- },
- /* data_offset */
- {
- 0x8000 /* ram */
- },
- /* windows */
- {
- &prog1_window_unifi_v1_v2,
- &prog2_window_unifi_v1_v2,
- &shared_window_unifi_v1_v2
- }
-};
-
-static const struct chip_device_desc_t unifi_device_desc_v2 =
-{
- { FALSE, 0xf0ff, 0x2001, 0x02 }, /* UF2... R02 */
- "UF2...",
- "UniFi-2",
- counted(init_vals_v2), /* init */
- counted(reset_program_v1_or_v2), /* reset_prog */
- &unifi_device_regs_v2, /* regs */
- {
- TRUE, /* has_flash */
- TRUE, /* has_ext_sram */
- FALSE, /* has_rom */
- FALSE, /* has_bt */
- TRUE, /* has_wlan */
- },
- counted(unifi_map_address_v1_v2), /* map */
- /* prog_offset */
- {
- 0x00100000, /* ram */
- 0x00000000, /* rom (invalid) */
- 0x00000000, /* flash */
- 0x00200000, /* ext_ram */
- },
- /* data_offset */
- {
- 0x8000 /* ram */
- },
- /* windows */
- {
- &prog1_window_unifi_v1_v2,
- &prog2_window_unifi_v1_v2,
- &shared_window_unifi_v1_v2
- }
-};
-
-static const struct chip_device_desc_t unifi_device_desc_v3 =
-{
- { FALSE, 0xf0ff, 0x3001, 0x02 }, /* UF2... R03 */
- "UF2...",
- "UniFi-3",
- counted(init_vals_v2), /* init */
- counted(reset_program_v1_or_v2), /* reset_prog */
- &unifi_device_regs_v2, /* regs */
- {
- TRUE, /* has_flash */
- TRUE, /* has_ext_sram */
- FALSE, /* has_rom */
- FALSE, /* has_bt */
- TRUE, /* has_wlan */
- },
- counted(unifi_map_address_v1_v2), /* map */
- /* prog_offset */
- {
- 0x00100000, /* ram */
- 0x00000000, /* rom (invalid) */
- 0x00000000, /* flash */
- 0x00200000, /* ext_ram */
- },
- /* data_offset */
- {
- 0x8000 /* ram */
- },
- /* windows */
- {
- &prog1_window_unifi_v1_v2,
- &prog2_window_unifi_v1_v2,
- &shared_window_unifi_v1_v2
- }
-};
-
-static const struct chip_device_desc_t unifi_device_desc_v22 =
-{
- { FALSE, 0x00ff, 0x0022, 0x07 }, /* UF60xx */
- "UF60xx",
- "UniFi-4",
- counted(init_vals_v22_v23), /* init */
- null_counted(), /* reset_prog */
- &unifi_device_regs_v22_v23, /* regs */
- {
- FALSE, /* has_flash */
- FALSE, /* has_ext_sram */
- TRUE, /* has_rom */
- FALSE, /* has_bt */
- TRUE, /* has_wlan */
- },
- counted(unifi_map_address_v22_v23), /* map */
- /* prog_offset */
- {
- 0x00C00000, /* ram */
- 0x00000000, /* rom */
- 0x00000000, /* flash (invalid) */
- 0x00000000, /* ext_ram (invalid) */
- },
- /* data_offset */
- {
- 0x8000 /* ram */
- },
- /* windows */
- {
- &generic1_window_unifi_v22_v23,
- &generic2_window_unifi_v22_v23,
- &generic3_window_unifi_v22_v23
- }
-};
-
-static const struct chip_device_desc_t unifi_device_desc_v23 =
-{
- { FALSE, 0x00ff, 0x0023, 0x08 }, /* UF.... */
- "UF....",
- "UF.... (5)",
- counted(init_vals_v22_v23), /* init */
- null_counted(), /* reset_prog */
- &unifi_device_regs_v22_v23, /* regs */
- {
- FALSE, /* has_flash */
- FALSE, /* has_ext_sram */
- TRUE, /* has_rom */
- TRUE, /* has_bt */
- TRUE, /* has_wlan */
- },
- counted(unifi_map_address_v22_v23),
- /* prog_offset */
- {
- 0x00C00000, /* ram */
- 0x00000000, /* rom */
- 0x00000000, /* flash (invalid) */
- 0x00000000, /* ext_sram (invalid) */
- },
- /* data_offset */
- {
- 0x8000 /* ram */
- },
- /* windows */
- {
- &generic1_window_unifi_v22_v23,
- &generic2_window_unifi_v22_v23,
- &generic3_window_unifi_v22_v23
- }
-};
-
-static const struct chip_device_desc_t hyd_wlan_subsys_desc_v1 =
-{
- { FALSE, 0x00ff, 0x0044, 0x00 }, /* UF.... */
- "HYD...",
- "HYD... ",
- counted(init_vals_v22_v23), /* init */
- null_counted(), /* reset_prog */
- &unifi_device_regs_v22_v23, /* regs */
- {
- FALSE, /* has_flash */
- FALSE, /* has_ext_sram */
- TRUE, /* has_rom */
- FALSE, /* has_bt */
- TRUE, /* has_wlan */
- },
- counted(unifi_map_address_v22_v23),
- /* prog_offset */
- {
- 0x00C00000, /* ram */
- 0x00000000, /* rom */
- 0x00000000, /* flash (invalid) */
- 0x00000000, /* ext_sram (invalid) */
- },
- /* data_offset */
- {
- 0x8000 /* ram */
- },
- /* windows */
- {
- &generic1_window_unifi_v22_v23,
- &generic2_window_unifi_v22_v23,
- &generic3_window_unifi_v22_v23
- }
-};
-
-
-/* This is the list of all chips that we know about. I'm
- assuming that the order here will be important - we
- might have multiple entries witrh the same SDIO id for
- instance. The first one in this list will be the one
- that is returned if a search is done on only that id.
- The client will then have to call GetVersionXXX again
- but with more detailed info.
-
- I don't know if we need to signal this up to the client
- in some way?
-
- (We get the SDIO id before we know anything else about
- the chip. We might not be able to read any of the other
- registers at first, but we still need to know about the
- chip). */
-static const struct chip_device_desc_t *chip_ver_to_desc[] =
-{
- &unifi_device_desc_v1, /* UF105x R01 */
- &unifi_device_desc_v2, /* UF2... R02 */
- &unifi_device_desc_v3, /* UF2... R03 */
- &unifi_device_desc_v22, /* UF60xx */
- &unifi_device_desc_v23, /* UF.... */
- &hyd_wlan_subsys_desc_v1
-};
-
-ChipDescript* ChipHelper_GetVersionSdio(u8 sdio_ver)
-{
- u32 i;
-
- for (i = 0; i < nelem(chip_ver_to_desc); i++)
- {
- if (chip_ver_to_desc[i]->chip_version.sdio == sdio_ver)
- {
- return chip_ver_to_desc[i];
- }
- }
-
- return &chip_device_desc_null;
-}
-
-
-ChipDescript* ChipHelper_GetVersionAny(u16 from_FF9A, u16 from_FE81)
-{
- u32 i;
-
- if ((from_FF9A & 0xFF00) != 0)
- {
- for (i = 0; i < nelem(chip_ver_to_desc); i++)
- {
- if (chip_ver_to_desc[i]->chip_version.pre_bc7 &&
- ((from_FF9A & chip_ver_to_desc[i]->chip_version.mask) ==
- chip_ver_to_desc[i]->chip_version.result))
- {
- return chip_ver_to_desc[i];
- }
- }
- }
- else
- {
- for (i = 0; i < nelem(chip_ver_to_desc); i++)
- {
- if (!chip_ver_to_desc[i]->chip_version.pre_bc7 &&
- ((from_FE81 & chip_ver_to_desc[i]->chip_version.mask) ==
- chip_ver_to_desc[i]->chip_version.result))
- {
- return chip_ver_to_desc[i];
- }
- }
- }
-
- return &chip_device_desc_null;
-}
-
-
-ChipDescript* ChipHelper_GetVersionUniFi(u16 ver)
-{
- return ChipHelper_GetVersionAny(0x0000, ver);
-}
-
-
-ChipDescript *ChipHelper_Null(void)
-{
- return &chip_device_desc_null;
-}
-
-
-ChipDescript* ChipHelper_GetVersionBlueCore(enum chip_helper_bluecore_age bc_age, u16 version)
-{
- if (bc_age == chip_helper_bluecore_pre_bc7)
- {
- return ChipHelper_GetVersionAny(version, 0x0000);
- }
- else
- {
- return ChipHelper_GetVersionAny(0x0000, version);
- }
-}
-
-
-/* Expand the DEF0 functions into simple code to return the
- correct thing. The DEF1 functions expand to nothing in
- this X macro expansion. */
-#define CHIP_HELPER_DEF0_C_DEF(ret_type, name, info) \
- ret_type ChipHelper_ ## name(ChipDescript * chip_help) \
- { \
- return chip_help->info; \
- }
-#define CHIP_HELPER_DEF1_C_DEF(ret_type, name, type1, name1)
-
-CHIP_HELPER_LIST(C_DEF)
-
-/*
- * Map register addresses between HOST and SPI access.
- */
-u16 ChipHelper_MapAddress_SPI2HOST(ChipDescript *chip_help, u16 addr)
-{
- u32 i;
- for (i = 0; i < chip_help->map.len; i++)
- {
- if (chip_help->map.vals[i].spi == addr)
- {
- return chip_help->map.vals[i].host;
- }
- }
- return addr;
-}
-
-
-u16 ChipHelper_MapAddress_HOST2SPI(ChipDescript *chip_help, u16 addr)
-{
- u32 i;
- for (i = 0; i < chip_help->map.len; i++)
- {
- if (chip_help->map.vals[i].host == addr)
- {
- return chip_help->map.vals[i].spi;
- }
- }
- return addr;
-}
-
-
-/* The address returned by this function is the start of the
- window in the address space, that is where we can start
- accessing data from. If a section of the window at the
- start is unusable because something else is cluttering up
- the address map then that is taken into account and this
- function returns that address justt past that. */
-u16 ChipHelper_WINDOW_ADDRESS(ChipDescript *chip_help,
- enum chip_helper_window_index window)
-{
- if (window < CHIP_HELPER_WINDOW_COUNT &&
- chip_help->windows[window] != NULL)
- {
- return chip_help->windows[window]->address + chip_help->windows[window]->blocked;
- }
- return 0;
-}
-
-
-/* This returns the size of the window minus any blocked section */
-u16 ChipHelper_WINDOW_SIZE(ChipDescript *chip_help,
- enum chip_helper_window_index window)
-{
- if (window < CHIP_HELPER_WINDOW_COUNT &&
- chip_help->windows[window] != NULL)
- {
- return chip_help->windows[window]->size - chip_help->windows[window]->blocked;
- }
- return 0;
-}
-
-
-/* Get the register writes we should do to make sure that
- the chip is running with most clocks on. */
-u32 ChipHelper_ClockStartupSequence(ChipDescript *chip_help,
- const struct chip_helper_init_values **val)
-{
- *val = chip_help->init.vals;
- return chip_help->init.len;
-}
-
-
-/* Get the set of values tat we should write to the chip to perform a reset. */
-u32 ChipHelper_HostResetSequence(ChipDescript *chip_help,
- const struct chip_helper_reset_values **val)
-{
- *val = chip_help->reset_prog.vals;
- return chip_help->reset_prog.len;
-}
-
-
-/* Decode a windowed access to the chip. */
-s32 ChipHelper_DecodeWindow(ChipDescript *chip_help,
- enum chip_helper_window_index window,
- enum chip_helper_window_type type,
- u32 offset,
- u16 *page, u16 *addr, u32 *len)
-{
- const struct window_info_t *win;
- const struct window_shift_info_t *mode;
- u16 of, pg;
-
- if (window >= CHIP_HELPER_WINDOW_COUNT)
- {
- return FALSE;
- }
- if ((win = chip_help->windows[window]) == NULL)
- {
- return FALSE;
- }
- if (type >= CHIP_HELPER_WT_COUNT)
- {
- return FALSE;
- }
- if ((mode = &win->mode[type]) == NULL)
- {
- return FALSE;
- }
- if (!mode->allowed)
- {
- return FALSE;
- }
-
- pg = (u16)(offset >> mode->page_shift) + mode->page_offset;
- of = (u16)(offset & ((1 << mode->page_shift) - 1));
- /* If 'blocked' is zero this does nothing, else decrease
- the page register and increase the offset until we aren't
- in the blocked region of the window. */
- while (of < win->blocked)
- {
- of += 1 << mode->page_shift;
- pg--;
- }
- *page = pg;
- *addr = win->address + of;
- *len = win->size - of;
- return TRUE;
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_chiphelper.h b/drivers/staging/csr/csr_wifi_hip_chiphelper.h
deleted file mode 100644
index 09b3aef..0000000
--- a/drivers/staging/csr/csr_wifi_hip_chiphelper.h
+++ /dev/null
@@ -1,407 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_HIP_CHIPHELPER_H__
-#define CSR_WIFI_HIP_CHIPHELPER_H__
-
-
-#include <linux/types.h>
-
-/* The age of the BlueCore chip. This is probably not useful, if
- you know the age then you can probably work out the version directly. */
-enum chip_helper_bluecore_age
-{
- chip_helper_bluecore_pre_bc7,
- chip_helper_bluecore_bc7_or_later
-};
-
-/* We support up to three windowed regions at the moment.
- Don't reorder these - they're used to index into an array. */
-enum chip_helper_window_index
-{
- CHIP_HELPER_WINDOW_1 = 0,
- CHIP_HELPER_WINDOW_2 = 1,
- CHIP_HELPER_WINDOW_3 = 2,
- CHIP_HELPER_WINDOW_COUNT = 3
-};
-
-/* These are the things that we can access through a window.
- Don't reorder these - they're used to index into an array. */
-enum chip_helper_window_type
-{
- CHIP_HELPER_WT_CODE_RAM = 0,
- CHIP_HELPER_WT_FLASH = 1,
- CHIP_HELPER_WT_EXT_SRAM = 2,
- CHIP_HELPER_WT_ROM = 3,
- CHIP_HELPER_WT_SHARED = 4,
- CHIP_HELPER_WT_COUNT = 5
-};
-
-/* Commands to stop and start the XAP */
-enum chip_helper_dbg_emu_cmd_enum
-{
- CHIP_HELPER_DBG_EMU_CMD_XAP_STEP_MASK = 0x0001,
- CHIP_HELPER_DBG_EMU_CMD_XAP_RUN_B_MASK = 0x0002,
- CHIP_HELPER_DBG_EMU_CMD_XAP_BRK_MASK = 0x0004,
- CHIP_HELPER_DBG_EMU_CMD_XAP_WAKEUP_MASK = 0x0008
-};
-
-/* Bitmasks for Stop and sleep status: DBG_SPI_STOP_STATUS & DBG_HOST_STOP_STATUS */
-enum chip_helper_dbg_stop_status_enum
-{
- CHIP_HELPER_DBG_STOP_STATUS_NONE_MASK = 0x0000,
- CHIP_HELPER_DBG_STOP_STATUS_P0_MASK = 0x0001,
- CHIP_HELPER_DBG_STOP_STATUS_P1_MASK = 0x0002,
- CHIP_HELPER_DBG_STOP_STATUS_P2_MASK = 0x0004,
- CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_P0_MASK = 0x0008,
- CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_P1_MASK = 0x0010,
- CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_P2_MASK = 0x0020,
- /* Legacy names/alias */
- CHIP_HELPER_DBG_STOP_STATUS_MAC_MASK = 0x0001,
- CHIP_HELPER_DBG_STOP_STATUS_PHY_MASK = 0x0002,
- CHIP_HELPER_DBG_STOP_STATUS_BT_MASK = 0x0004,
- CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_MAC_MASK = 0x0008,
- CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_PHY_MASK = 0x0010,
- CHIP_HELPER_DBG_STOP_STATUS_SLEEP_STATUS_BT_MASK = 0x0020
-};
-
-/* Codes to disable the watchdog */
-enum chip_helper_watchdog_disable_enum
-{
- CHIP_HELPER_WATCHDOG_DISABLE_CODE1 = 0x6734,
- CHIP_HELPER_WATCHDOG_DISABLE_CODE2 = 0xD6BF,
- CHIP_HELPER_WATCHDOG_DISABLE_CODE3 = 0xC31E
-};
-
-/* Other bits have changed between versions */
-enum chip_helper_gbl_misc_enum
-{
- CHIP_HELPER_GBL_MISC_SPI_STOP_OUT_EN_MASK = 0x0001,
- CHIP_HELPER_GBL_MISC_MMU_INIT_DONE_MASK = 0x0004
-};
-
-/* Coex status register, contains interrupt status and reset pullup status.
- * CHIP_HELPER_COEX_STATUS_RST_PULLS_MSB_MASK can be used to check
- * for WAPI on R03 chips and later. */
-enum chip_helper_coex_status_mask_enum
-{
- CHIP_HELPER_COEX_STATUS_RST_PULLS_LSB_MASK = 0x0001,
- CHIP_HELPER_COEX_STATUS_RST_PULLS_MSB_MASK = 0x0008,
- CHIP_HELPER_COEX_STATUS_WL_FEC_PINS_LSB_MASK = 0x0010,
- CHIP_HELPER_COEX_STATUS_WL_FEC_PINS_MSB_MASK = 0x0080,
- CHIP_HELPER_COEX_STATUS_INT_UART_MASK = 0x0100,
- CHIP_HELPER_COEX_STATUS_INT_BT_LEG_MASK = 0x0200
-};
-
-/* How to select the different CPUs */
-enum chip_helper_dbg_proc_sel_enum
-{
- CHIP_HELPER_DBG_PROC_SEL_MAC = 0,
- CHIP_HELPER_DBG_PROC_SEL_PHY = 1,
- CHIP_HELPER_DBG_PROC_SEL_BT = 2,
- CHIP_HELPER_DBG_PROC_SEL_NONE = 2,
- CHIP_HELPER_DBG_PROC_SEL_BOTH = 3
-};
-
-/* These are the only registers that we have to know the
- address of before we know the chip version. */
-enum chip_helper_fixed_registers
-{
- /* This is the address of GBL_CHIP_VERISON on BC7,
- UF105x, UF60xx and
- anything later than that. */
- CHIP_HELPER_UNIFI_GBL_CHIP_VERSION = 0xFE81,
-
- CHIP_HELPER_OLD_BLUECORE_GBL_CHIP_VERSION = 0xFF9A
-
- /* This isn't used at the moment (but might be needed
- to distinguish the BlueCore sub version?) */
- /* CHIP_HELPER_OLD_BLUECORE_ANA_VERSION_ID = 0xFF7D */
-};
-
-/* Address-value pairs for defining initialisation values */
-struct chip_helper_init_values
-{
- u16 addr;
- u16 value;
-};
-
-/* A block of data that should be written to the device */
-struct chip_helper_reset_values
-{
- u32 gp_address;
- u32 len;
- const u16 *data;
-};
-
-/*
- * This is the C API.
- */
-
-/* opaque type */
-typedef const struct chip_device_desc_t ChipDescript;
-
-/* Return a NULL descriptor */
-ChipDescript* ChipHelper_Null(void);
-
-/* This should get the correct version for any CSR chip.
- The two parameters are what is read from addresses
- 0xFF9A and 0xFE81 (OLD_BLUECORE_GBL_CHIP_VERSION and
- UNIFI_GBL_CHIP_VERSION). These should give a unique identity
- for most (all?) chips.
-
- FF9A is the old GBL_CHIP_VERSION register. If the high
- eight bits are zero then the chip is a new (BC7 +) one
- and FE81 is the _new_ GBL_CHIP_VERSION register. */
-ChipDescript* ChipHelper_GetVersionAny(u16 from_FF9A, u16 from_FE81);
-
-/* The chip is a UniFi, but we don't know which type
- The parameter is the value of UNIFI_GBL_CHIP_VERSION (0xFE81) */
-ChipDescript* ChipHelper_GetVersionUniFi(u16 version);
-
-/* This gets the version from the SDIO device id. This only
- gives quite a coarse grained version, so we should update once
- we hav access to the function N registers. */
-ChipDescript* ChipHelper_GetVersionSdio(u8 sdio_version);
-
-/* The chip is some sort of BlueCore. If "age" is "pre_bc7" then
- "version" is what was read from FF9A. If "age" is bc7_or_later
- then "version" is read from FE81. If we don't know if we're pre
- or post BC7 then we should use "GetVersionAny". */
-ChipDescript* ChipHelper_GetVersionBlueCore(enum chip_helper_bluecore_age age,
- u16 version);
-
-/* The main functions of this class are built with an X macro. This
- means we can generate the C and C++ versions from the same source
- without the two diverging.
-
- The DEF0 functions are simple and take no parameters. The first
- parameter to the macro is the return type. The second parameter
- is the function name and the third parameter is where to get the
- info from (this is hidden from the user).
-
- The DEF1 functions take one parameter. This time the third macro
- parameter is the type of this parameter, and the fourth macro
- parameter is the name of the parameter. The bodies of these
- functions are hand written. */
-#define CHIP_HELPER_LIST(m) \
- CHIP_HELPER_DEF0(m, (const char *, FriendlyName, friendly_name)) \
- CHIP_HELPER_DEF0(m, (const char *, MarketingName, marketing_name)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_EMU_CMD, regs->dbg_emu_cmd)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_HOST_PROC_SELECT, regs->host.dbg_proc_select)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_HOST_STOP_STATUS, regs->host.dbg_stop_status)) \
- CHIP_HELPER_DEF0(m, (u16, HOST_WINDOW1_PAGE, regs->host.window1_page)) \
- CHIP_HELPER_DEF0(m, (u16, HOST_WINDOW2_PAGE, regs->host.window2_page)) \
- CHIP_HELPER_DEF0(m, (u16, HOST_WINDOW3_PAGE, regs->host.window3_page)) \
- CHIP_HELPER_DEF0(m, (u16, HOST_IO_LOG_ADDR, regs->host.io_log_addr)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_SPI_PROC_SELECT, regs->spi.dbg_proc_select)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_SPI_STOP_STATUS, regs->spi.dbg_stop_status)) \
- CHIP_HELPER_DEF0(m, (u16, SPI_WINDOW1_PAGE, regs->spi.window1_page)) \
- CHIP_HELPER_DEF0(m, (u16, SPI_WINDOW2_PAGE, regs->spi.window2_page)) \
- CHIP_HELPER_DEF0(m, (u16, SPI_WINDOW3_PAGE, regs->spi.window3_page)) \
- CHIP_HELPER_DEF0(m, (u16, SPI_IO_LOG_ADDR, regs->spi.io_log_addr)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_RESET, regs->dbg_reset)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_RESET_VALUE, regs->dbg_reset_value)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_RESET_WARN, regs->dbg_reset_warn)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_RESET_WARN_VALUE, regs->dbg_reset_warn_value)) \
- CHIP_HELPER_DEF0(m, (u16, DBG_RESET_RESULT, regs->dbg_reset_result)) \
- CHIP_HELPER_DEF0(m, (u16, WATCHDOG_DISABLE, regs->watchdog_disable)) \
- CHIP_HELPER_DEF0(m, (u16, PROC_PC_SNOOP, regs->proc_pc_snoop)) \
- CHIP_HELPER_DEF0(m, (u16, GBL_CHIP_VERSION, regs->gbl_chip_version)) \
- CHIP_HELPER_DEF0(m, (u16, GBL_MISC_ENABLES, regs->gbl_misc_enables)) \
- CHIP_HELPER_DEF0(m, (u16, XAP_PCH, regs->xap_pch)) \
- CHIP_HELPER_DEF0(m, (u16, XAP_PCL, regs->xap_pcl)) \
- CHIP_HELPER_DEF0(m, (u16, MAILBOX0, regs->mailbox0)) \
- CHIP_HELPER_DEF0(m, (u16, MAILBOX1, regs->mailbox1)) \
- CHIP_HELPER_DEF0(m, (u16, MAILBOX2, regs->mailbox2)) \
- CHIP_HELPER_DEF0(m, (u16, MAILBOX3, regs->mailbox3)) \
- CHIP_HELPER_DEF0(m, (u16, SDIO_HIP_HANDSHAKE, regs->sdio_hip_handshake)) \
- CHIP_HELPER_DEF0(m, (u16, SDIO_HOST_INT, regs->sdio_host_int)) \
- CHIP_HELPER_DEF0(m, (u16, COEX_STATUS, regs->coex_status)) \
- CHIP_HELPER_DEF0(m, (u16, SHARED_IO_INTERRUPT, regs->shared_io_interrupt)) \
- CHIP_HELPER_DEF0(m, (u32, PROGRAM_MEMORY_RAM_OFFSET, prog_offset.ram)) \
- CHIP_HELPER_DEF0(m, (u32, PROGRAM_MEMORY_ROM_OFFSET, prog_offset.rom)) \
- CHIP_HELPER_DEF0(m, (u32, PROGRAM_MEMORY_FLASH_OFFSET, prog_offset.flash)) \
- CHIP_HELPER_DEF0(m, (u32, PROGRAM_MEMORY_EXT_SRAM_OFFSET, prog_offset.ext_sram)) \
- CHIP_HELPER_DEF0(m, (u16, DATA_MEMORY_RAM_OFFSET, data_offset.ram)) \
- CHIP_HELPER_DEF0(m, (s32, HasFlash, bools.has_flash)) \
- CHIP_HELPER_DEF0(m, (s32, HasExtSram, bools.has_ext_sram)) \
- CHIP_HELPER_DEF0(m, (s32, HasRom, bools.has_rom)) \
- CHIP_HELPER_DEF0(m, (s32, HasBt, bools.has_bt)) \
- CHIP_HELPER_DEF0(m, (s32, HasWLan, bools.has_wlan)) \
- CHIP_HELPER_DEF1(m, (u16, WINDOW_ADDRESS, enum chip_helper_window_index, window)) \
- CHIP_HELPER_DEF1(m, (u16, WINDOW_SIZE, enum chip_helper_window_index, window)) \
- CHIP_HELPER_DEF1(m, (u16, MapAddress_SPI2HOST, u16, addr)) \
- CHIP_HELPER_DEF1(m, (u16, MapAddress_HOST2SPI, u16, addr)) \
- CHIP_HELPER_DEF1(m, (u32, ClockStartupSequence, const struct chip_helper_init_values **, val)) \
- CHIP_HELPER_DEF1(m, (u32, HostResetSequence, const struct chip_helper_reset_values **, val))
-
-/* Some magic to help the expansion */
-#define CHIP_HELPER_DEF0(a, b) \
- CHIP_HELPER_DEF0_ ## a b
-#define CHIP_HELPER_DEF1(a, b) \
- CHIP_HELPER_DEF1_ ## a b
-
-/* Macros so that when we expand the list we get "C" function prototypes. */
-#define CHIP_HELPER_DEF0_C_DEC(ret_type, name, info) \
- ret_type ChipHelper_ ## name(ChipDescript * chip_help);
-#define CHIP_HELPER_DEF1_C_DEC(ret_type, name, type1, name1) \
- ret_type ChipHelper_ ## name(ChipDescript * chip_help, type1 name1);
-
-CHIP_HELPER_LIST(C_DEC)
-
-/* FriendlyName
- MarketingName
-
- These two functions return human readable strings that describe
- the chip. FriendlyName returns something that a software engineer
- at CSR might understand. MarketingName returns something more like
- an external name for a CSR chip.
-*/
-/* DBG_EMU_CMD
- WATCHDOG_DISABLE
- PROC_PC_SNOOP
- GBL_CHIP_VERSION
- XAP_PCH
- XAP_PCL
-
- These registers are used to control the XAPs.
-*/
-/* DBG_HOST_PROC_SELECT DBG_HOST_STOP_STATUS
- HOST_WINDOW1_PAGE HOST_WINDOW2_PAGE HOST_WINDOW3_PAGE
- HOST_IO_LOG_ADDR
- DBG_SPI_PROC_SELECT DBG_SPI_STOP_STATUS
- SPI_WINDOW1_PAGE SPI_WINDOW2_PAGE SPI_WINDOW3_PAGE
- SPI_IO_LOG_ADDR
-
- These register are used to control the XAPs and the memory
- windows, normally while debugging the code on chip. There
- are two versons of these registers, one for access via SPI
- and another for access via the host interface.
-*/
-/* DBG_RESET
- DBG_RESET_VALUE
- DBG_RESET_WARN
- DBG_RESET_WARN_VALUE
- DBG_RESET_RESULT
-
- These registers are used to reset the XAP. This can be
- quite complex for some chips. If DBG_RESET_WARN is non
- zero the DBG_RESET_WARN_VALUE should be written to address
- DBG_RESET_WARN before the reset is perfeormed. DBG_RESET_VALUE
- should then be written to DBG_RESET to make the reset happen.
- The DBG_RESET_RESULT register should contain 0 if the reset
- was successful.
-*/
-/* GBL_MISC_ENABLES
-
- This register controls some special chip features. It
- should be used with care is it changes quite a lot between
- chip versions.
-*/
-/* MAILBOX0
- MAILBOX1
- MAILBOX2
- MAILBOX3
-
- The mailbox registers are for communication between the host
- and the firmware. There use is described in part by the host
- interface protcol specifcation.
-*/
-/* SDIO_HIP_HANDSHAKE
-
- This is one of the more important SDIO HIP registers. On some
- chips it has the same value as one of the mailbox registers
- and on other chips it is different.
-*/
-/* SDIO_HOST_INT
- SHARED_IO_INTERRUPT
-
- These registers are used by some versions of the host interface
- protocol specification. Their names should probably be changed
- to hide the registers and to expose the functions more.
-*/
-/* COEX_STATUS
-
- Coex status register, contains interrupt status and reset
- pullup status. The latter is used to detect WAPI.
-*/
-/* PROGRAM_MEMORY_RAM_OFFSET
- PROGRAM_MEMORY_ROM_OFFSET
- PROGRAM_MEMORY_FLASH_OFFSET
- PROGRAM_MEMORY_EXT_SRAM_OFFSET
- DATA_MEMORY_RAM_OFFSET
-
- These are constants that describe the offset of the different
- memory types in the two different address spaces.
-*/
-/* HasFlash HasExtSram HasRom
- HasBt HasWLan
-
- These are a set of bools describing the chip.
-*/
-/* WINDOW_ADDRESS WINDOW_SIZE
-
- These two functions return the size and address of the windows.
- The address is the address of the lowest value in the address
- map that is part of the window and the size is the number of
- visible words.
-
- Some of the windows have their lowest portion covered by
- registers. For these windows address is the first address
- after the registers and size is the siave excluding the part
- covered by registers.
-*/
-/* MapAddress_SPI2HOST
- MapAddress_HOST2SPI
-
- The debugging interface is duplicated on UniFi and later chips
- so that there are two versions - one over the SPI interaface and
- the other over the SDIO interface. These functions map the
- registers between these two interfaces.
-*/
-/* ClockStartupSequence
-
- This function returns the list of register value pairs that
- should be forced into UniFi to enable SPI communication. This
- set of registers is not needed if the firmware is running, but
- will be needed if the device is being booted from cold. These
- register writes enable the clocks and setup the PLL to a basic
- working state. SPI access might be unreliable until these writes
- have occurred (And they may take mulitple goes).
-*/
-/* HostResetSequence
-
- This returns a number of chunks of data and generic pointers.
- All of the XAPs should be stopped. The data should be written
- to the generic pointers. The instruction pointer for the MAC
- should then be set to the start of program memory and then the
- MAC should be "go"d. This will reset the chip in a reliable
- and orderly manner without resetting the SDIO interface. It
- is therefore not needed if the chip is being accessed by the
- SPI interface (the DBG_RESET_ mechanism can be used instead).
-*/
-
-/* The Decode Window function is more complex. For the window
- 'window' it tries to return the address and page register
- value needed to see offset 'offset' of memory type 'type'.
-
- It return 1 on success and 0 on failure. 'page' is what
- should be written to the page register. 'addr' is the
- address in the XAPs 16 address map to read from. 'len'
- is the length that we can read without having to change
- the page registers. */
-s32 ChipHelper_DecodeWindow(ChipDescript *chip_help,
- enum chip_helper_window_index window,
- enum chip_helper_window_type type,
- u32 offset,
- u16 *page, u16 *addr, u32 *len);
-
-#endif
diff --git a/drivers/staging/csr/csr_wifi_hip_chiphelper_private.h b/drivers/staging/csr/csr_wifi_hip_chiphelper_private.h
deleted file mode 100644
index e5e5799..0000000
--- a/drivers/staging/csr/csr_wifi_hip_chiphelper_private.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_HIP_CHIPHELPER_PRIVATE_H__
-#define CSR_WIFI_HIP_CHIPHELPER_PRIVATE_H__
-
-
-#include "csr_wifi_hip_chiphelper.h"
-
-/* This GP stuff should be somewhere else? */
-
-/* Memory spaces encoded in top byte of Generic Pointer type */
-#define UNIFI_SH_DMEM 0x01 /* Shared Data Memory */
-#define UNIFI_EXT_FLASH 0x02 /* External FLASH */
-#define UNIFI_EXT_SRAM 0x03 /* External SRAM */
-#define UNIFI_REGISTERS 0x04 /* Registers */
-#define UNIFI_PHY_DMEM 0x10 /* PHY Data Memory */
-#define UNIFI_PHY_PMEM 0x11 /* PHY Program Memory */
-#define UNIFI_PHY_ROM 0x12 /* PHY ROM */
-#define UNIFI_MAC_DMEM 0x20 /* MAC Data Memory */
-#define UNIFI_MAC_PMEM 0x21 /* MAC Program Memory */
-#define UNIFI_MAC_ROM 0x22 /* MAC ROM */
-#define UNIFI_BT_DMEM 0x30 /* BT Data Memory */
-#define UNIFI_BT_PMEM 0x31 /* BT Program Memory */
-#define UNIFI_BT_ROM 0x32 /* BT ROM */
-
-#define MAKE_GP(R, O) (((UNIFI_ ## R) << 24) | (O))
-#define GP_OFFSET(GP) ((GP) & 0xFFFFFF)
-#define GP_SPACE(GP) (((GP) >> 24) & 0xFF)
-
-
-/* Address value pairs */
-struct val_array_t
-{
- u32 len;
- const struct chip_helper_init_values *vals;
-};
-
-/* Just a (counted) u16 array */
-struct data_array_t
-{
- u32 len;
- const u16 *vals;
-};
-
-struct reset_prog_t
-{
- u32 len;
- const struct chip_helper_reset_values *vals;
-};
-
-/* The addresses of registers that are equivalent but on
- different host transports. */
-struct chip_map_address_t
-{
- u16 spi, host;
-};
-
-struct map_array_t
-{
- u32 len;
- const struct chip_map_address_t *vals;
-};
-
-struct chip_device_regs_per_transport_t
-{
- u16 dbg_proc_select;
- u16 dbg_stop_status;
- u16 window1_page; /* PROG_PMEM1 or GW1 */
- u16 window2_page; /* PROG_PMEM2 or GW2 */
- u16 window3_page; /* SHARED or GW3 */
- u16 io_log_addr;
-};
-
-struct chip_device_regs_t
-{
- u16 gbl_chip_version;
- u16 gbl_misc_enables;
- u16 dbg_emu_cmd;
- struct chip_device_regs_per_transport_t host;
- struct chip_device_regs_per_transport_t spi;
- u16 dbg_reset;
- u16 dbg_reset_value;
- u16 dbg_reset_warn;
- u16 dbg_reset_warn_value;
- u16 dbg_reset_result;
- u16 xap_pch;
- u16 xap_pcl;
- u16 proc_pc_snoop;
- u16 watchdog_disable;
- u16 mailbox0;
- u16 mailbox1;
- u16 mailbox2;
- u16 mailbox3;
- u16 sdio_host_int;
- u16 shared_io_interrupt;
- u16 sdio_hip_handshake;
- u16 coex_status; /* Allows WAPI detection */
-};
-
-/* If allowed is false then this window does not provide this
- type of access.
- This describes how addresses should be shifted to make the
- "page" address. The address is shifted left by 'page_shift'
- and then has 'page_offset' added. This value should then be
- written to the page register. */
-struct window_shift_info_t
-{
- s32 allowed;
- u32 page_shift;
- u16 page_offset;
-};
-
-/* Each window has an address and size. These are obvious. It then
- has a description for each type of memory that might be accessed
- through it. There might also be a start to the offset of the window.
- This means that that number of addresses at the start of the window
- are unusable. */
-struct window_info_t
-{
- u16 address;
- u16 size;
- u16 blocked;
- const struct window_shift_info_t *mode;
-};
-
-/* If GBL_CHIP_VERSION and'ed with 'mask' and is equal to 'result'
- then this is the correct set of info. If pre_bc7 is true then the
- address of GBL_CHIP_VERSION is FF9A, else its FE81. */
-struct chip_version_t
-{
- s32 pre_bc7;
- u16 mask;
- u16 result;
- u8 sdio;
-};
-
-struct chip_device_desc_t
-{
- struct chip_version_t chip_version;
-
- /* This is a text string that a human might find useful (BC02, UF105x) */
- const char *friendly_name;
- /* This is what we show to customers */
- const char *marketing_name;
-
- /* Initialisation values to write following a reset */
- struct val_array_t init;
-
- /* Binary sequence for hard reset */
- struct reset_prog_t reset_prog;
-
- /* The register map */
- const struct chip_device_regs_t *regs;
-
- /* Some misc. info on the chip */
- struct
- {
- u32 has_flash : 1;
- u32 has_ext_sram : 1;
- u32 has_rom : 1;
- u32 has_bt : 1;
- u32 has_wlan : 1;
- } bools;
-
- /* This table is used to remap register addresses depending on what
- host interface is used. On the BC7 and later chips there are
- multiple sets of memory window registers, on for each host
- interafce (SDIO / SPI). The correct one is needed. */
- struct map_array_t map;
-
- /* The offsets into the program address space of the different types of memory.
- The RAM offset is probably the most useful. */
- struct
- {
- u32 ram;
- u32 rom;
- u32 flash;
- u32 ext_sram;
- } prog_offset;
-
- /* The offsets into the data address space of interesting things. */
- struct
- {
- u16 ram;
- /* maybe add shared / page tables? */
- } data_offset;
-
- /* Information on the different windows */
- const struct window_info_t *windows[CHIP_HELPER_WINDOW_COUNT];
-};
-
-#endif /* CSR_WIFI_HIP_CHIPHELPER_PRIVATE_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_conversions.h b/drivers/staging/csr/csr_wifi_hip_conversions.h
deleted file mode 100644
index bf7a52e..0000000
--- a/drivers/staging/csr/csr_wifi_hip_conversions.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- *
- * FILE: csr_wifi_hip_conversions.h
- *
- * PURPOSE:
- * This header file provides the macros for converting to and from
- * wire format.
- * These macros *MUST* work for little-endian AND big-endian hosts.
- *
- * ---------------------------------------------------------------------------
- */
-#ifndef __CSR_WIFI_HIP_CONVERSIONS_H__
-#define __CSR_WIFI_HIP_CONVERSIONS_H__
-
-#define SIZEOF_UINT16 2
-#define SIZEOF_UINT32 4
-#define SIZEOF_UINT64 8
-
-#define SIZEOF_SIGNAL_HEADER 6
-#define SIZEOF_DATAREF 4
-
-
-/*
- * Macro to retrieve the signal ID from a wire-format signal.
- */
-#define GET_SIGNAL_ID(_buf) CSR_GET_UINT16_FROM_LITTLE_ENDIAN((_buf))
-
-/*
- * Macros to retrieve and set the DATAREF fields in a packed (i.e. wire-format)
- * HIP signal.
- */
-#define GET_PACKED_DATAREF_SLOT(_buf, _ref) \
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + ((_ref) * SIZEOF_DATAREF) + 0))
-
-#define GET_PACKED_DATAREF_LEN(_buf, _ref) \
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + ((_ref) * SIZEOF_DATAREF) + 2))
-
-#define SET_PACKED_DATAREF_SLOT(_buf, _ref, _slot) \
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN((_slot), ((_buf) + SIZEOF_SIGNAL_HEADER + ((_ref) * SIZEOF_DATAREF) + 0))
-
-#define SET_PACKED_DATAREF_LEN(_buf, _ref, _len) \
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN((_len), ((_buf) + SIZEOF_SIGNAL_HEADER + ((_ref) * SIZEOF_DATAREF) + 2))
-
-#define GET_PACKED_MA_PACKET_REQUEST_FRAME_PRIORITY(_buf) \
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + UNIFI_MAX_DATA_REFERENCES * SIZEOF_DATAREF + 8))
-
-#define GET_PACKED_MA_PACKET_REQUEST_HOST_TAG(_buf) \
- CSR_GET_UINT32_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + UNIFI_MAX_DATA_REFERENCES * SIZEOF_DATAREF + 4))
-
-#define GET_PACKED_MA_PACKET_CONFIRM_HOST_TAG(_buf) \
- CSR_GET_UINT32_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + UNIFI_MAX_DATA_REFERENCES * SIZEOF_DATAREF + 8))
-
-#define GET_PACKED_MA_PACKET_CONFIRM_TRANSMISSION_STATUS(_buf) \
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(((_buf) + SIZEOF_SIGNAL_HEADER + UNIFI_MAX_DATA_REFERENCES * SIZEOF_DATAREF + 2))
-
-
-s32 get_packed_struct_size(const u8 *buf);
-CsrResult read_unpack_signal(const u8 *ptr, CSR_SIGNAL *sig);
-CsrResult write_pack(const CSR_SIGNAL *sig, u8 *ptr, u16 *sig_len);
-
-#endif /* __CSR_WIFI_HIP_CONVERSIONS_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_hip_download.c b/drivers/staging/csr/csr_wifi_hip_download.c
deleted file mode 100644
index 2f44a38..0000000
--- a/drivers/staging/csr/csr_wifi_hip_download.c
+++ /dev/null
@@ -1,819 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_download.c
- *
- * PURPOSE:
- * Routines for downloading firmware to UniFi.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/slab.h>
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_unifiversion.h"
-#include "csr_wifi_hip_card.h"
-#include "csr_wifi_hip_xbv.h"
-
-#undef CSR_WIFI_IGNORE_PATCH_VERSION_MISMATCH
-
-static CsrResult do_patch_download(card_t *card, void *dlpriv,
- xbv1_t *pfwinfo, u32 boot_ctrl_addr);
-
-static CsrResult do_patch_convert_download(card_t *card,
- void *dlpriv, xbv1_t *pfwinfo);
-
-/*
- * ---------------------------------------------------------------------------
- * _find_in_slut
- *
- * Find the offset of the appropriate object in the SLUT of a card
- *
- * Arguments:
- * card Pointer to card struct
- * psym Pointer to symbol object.
- * id set up by caller
- * obj will be set up by this function
- * pslut Pointer to SLUT address, if 0xffffffff then it must be
- * read from the chip.
- * Returns:
- * CSR_RESULT_SUCCESS on success
- * Non-zero on error,
- * CSR_WIFI_HIP_RESULT_NOT_FOUND if not found
- * ---------------------------------------------------------------------------
- */
-static CsrResult _find_in_slut(card_t *card, symbol_t *psym, u32 *pslut)
-{
- u32 slut_address;
- u16 finger_print;
- CsrResult r;
- CsrResult csrResult;
-
- /* Get SLUT address */
- if (*pslut == 0xffffffff)
- {
- r = card_wait_for_firmware_to_start(card, &slut_address);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Firmware hasn't started\n");
- return r;
- }
- *pslut = slut_address;
-
- /*
- * Firmware has started so set the SDIO bus clock to the initial speed,
- * faster than UNIFI_SDIO_CLOCK_SAFE_HZ, to speed up the f/w download.
- */
- csrResult = CsrSdioMaxBusClockFrequencySet(card->sdio_if, UNIFI_SDIO_CLOCK_INIT_HZ);
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- r = ConvertCsrSdioToCsrHipResult(card, csrResult);
- return r;
- }
- card->sdio_clock_speed = UNIFI_SDIO_CLOCK_INIT_HZ;
- }
- else
- {
- slut_address = *pslut; /* Use previously discovered address */
- }
- unifi_trace(card->ospriv, UDBG4, "SLUT addr: 0x%lX\n", slut_address);
-
- /*
- * Check the SLUT fingerprint.
- * The slut_address is a generic pointer so we must use unifi_card_read16().
- */
- unifi_trace(card->ospriv, UDBG4, "Looking for SLUT finger print\n");
- finger_print = 0;
- r = unifi_card_read16(card, slut_address, &finger_print);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read SLUT finger print\n");
- return r;
- }
-
- if (finger_print != SLUT_FINGERPRINT)
- {
- unifi_error(card->ospriv, "Failed to find SLUT fingerprint\n");
- return CSR_RESULT_FAILURE;
- }
-
- /* Symbol table starts imedately after the fingerprint */
- slut_address += 2;
-
- while (1)
- {
- u16 id;
- u32 obj;
-
- r = unifi_card_read16(card, slut_address, &id);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- slut_address += 2;
-
- if (id == CSR_SLT_END)
- {
- /* End of table reached: not found */
- r = CSR_WIFI_HIP_RESULT_RANGE;
- break;
- }
-
- r = unifi_read32(card, slut_address, &obj);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
- slut_address += 4;
-
- unifi_trace(card->ospriv, UDBG3, " found SLUT id %02d.%08lx\n", id, obj);
-
- r = CSR_WIFI_HIP_RESULT_NOT_FOUND;
- /* Found search term? */
- if (id == psym->id)
- {
- unifi_trace(card->ospriv, UDBG1, " matched SLUT id %02d.%08lx\n", id, obj);
- psym->obj = obj;
- r = CSR_RESULT_SUCCESS;
- break;
- }
- }
-
- return r;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * do_patch_convert_download
- *
- * Download the given firmware image to the UniFi, converting from FWDL
- * to PTDL XBV format.
- *
- * Arguments:
- * card Pointer to card struct
- * dlpriv Pointer to source firmware image
- * fwinfo Pointer to source firmware info struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code on error
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-static CsrResult do_patch_convert_download(card_t *card, void *dlpriv, xbv1_t *pfwinfo)
-{
- CsrResult r;
- u32 slut_base = 0xffffffff;
- void *pfw;
- u32 psize;
- symbol_t sym;
-
- /* Reset the chip to guarantee that the ROM loader is running */
- r = unifi_init(card);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv,
- "do_patch_convert_download: failed to re-init UniFi\n");
- return r;
- }
-
- /* If no unifi_helper is running, the firmware version must be read */
- if (card->build_id == 0)
- {
- u32 ver = 0;
- sym.id = CSR_SLT_BUILD_ID_NUMBER;
- sym.obj = 0; /* To be updated by _find_in_slut() */
-
- unifi_trace(card->ospriv, UDBG1, "Need f/w version\n");
-
- /* Find chip build id entry in SLUT */
- r = _find_in_slut(card, &sym, &slut_base);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to find CSR_SLT_BUILD_ID_NUMBER\n");
- return CSR_RESULT_FAILURE;
- }
-
- /* Read running f/w version */
- r = unifi_read32(card, sym.obj, &ver);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read f/w id\n");
- return CSR_RESULT_FAILURE;
- }
- card->build_id = ver;
- }
-
- /* Convert the ptest firmware to a patch against the running firmware */
- pfw = xbv_to_patch(card, unifi_fw_read, dlpriv, pfwinfo, &psize);
- if (!pfw)
- {
- unifi_error(card->ospriv, "Failed to convert f/w to patch");
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
- else
- {
- void *desc;
- sym.id = CSR_SLT_BOOT_LOADER_CONTROL;
- sym.obj = 0; /* To be updated by _find_in_slut() */
-
- /* Find boot loader control entry in SLUT */
- r = _find_in_slut(card, &sym, &slut_base);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to find BOOT_LOADER_CONTROL\n");
- kfree(pfw);
- return CSR_RESULT_FAILURE;
- }
-
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to wake UniFi\n");
- }
-
- /* Get a dlpriv for the patch buffer so that unifi_fw_read() can
- * access it.
- */
- desc = unifi_fw_open_buffer(card->ospriv, pfw, psize);
- if (!desc)
- {
- kfree(pfw);
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
-
- /* Download the patch */
- unifi_info(card->ospriv, "Downloading converted f/w as patch\n");
- r = unifi_dl_patch(card, desc, sym.obj);
- kfree(pfw);
- unifi_fw_close_buffer(card->ospriv, desc);
-
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Converted patch download failed\n");
- return r;
- }
- else
- {
- unifi_trace(card->ospriv, UDBG1, "Converted patch downloaded\n");
- }
-
- /* This command starts the firmware */
- r = unifi_do_loader_op(card, sym.obj + 6, UNIFI_BOOT_LOADER_RESTART);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write loader restart cmd\n");
- }
-
- return r;
- }
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_dl_firmware
- *
- * Download the given firmware image to the UniFi.
- *
- * Arguments:
- * card Pointer to card struct
- * dlpriv A context pointer from the calling function to be
- * passed when calling unifi_fw_read().
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_WIFI_HIP_RESULT_NO_MEMORY memory allocation failed
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE error in XBV file
- * CSR_RESULT_FAILURE SDIO error
- *
- * Notes:
- * Stops and resets the chip, does the download and runs the new
- * firmware.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_dl_firmware(card_t *card, void *dlpriv)
-{
- xbv1_t *fwinfo;
- CsrResult r;
-
- fwinfo = kmalloc(sizeof(xbv1_t), GFP_KERNEL);
- if (fwinfo == NULL)
- {
- unifi_error(card->ospriv, "Failed to allocate memory for firmware\n");
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
-
- /*
- * Scan the firmware file to find the TLVs we are interested in.
- * These are:
- * - check we support the file format version in VERF
- * - SLTP Symbol Lookup Table Pointer
- * - FWDL firmware download segments
- * - FWOV firmware overlay segment
- * - VMEQ Register probe tests to verify matching h/w
- */
- r = xbv1_parse(card, unifi_fw_read, dlpriv, fwinfo);
- if (r != CSR_RESULT_SUCCESS || fwinfo->mode != xbv_firmware)
- {
- unifi_error(card->ospriv, "File type is %s, expected firmware.\n",
- fwinfo->mode == xbv_patch?"patch" : "unknown");
- kfree(fwinfo);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /* UF6xxx doesn't accept firmware, only patches. Therefore we convert
- * the file to patch format with version numbers matching the current
- * running firmware, and then download via the patch mechanism.
- * The sole purpose of this is to support production test firmware across
- * different ROM releases, the test firmware being provided in non-patch
- * format.
- */
- if (card->chip_id > SDIO_CARD_ID_UNIFI_2)
- {
- unifi_info(card->ospriv, "Must convert f/w to patch format\n");
- r = do_patch_convert_download(card, dlpriv, fwinfo);
- }
- else
- {
- /* Older UniFi chips allowed firmware to be directly loaded onto the
- * chip, which is no longer supported.
- */
- unifi_error(card->ospriv, "Only patch downloading supported\n");
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- kfree(fwinfo);
- return r;
-} /* unifi_dl_firmware() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_dl_patch
- *
- * Load the given patch set into UniFi.
- *
- * Arguments:
- * card Pointer to card struct
- * dlpriv The os specific handle to the firmware file.
- * boot_ctrl The address of the boot loader control structure.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_WIFI_HIP_RESULT_NO_MEMORY memory allocation failed
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE error in XBV file
- * CSR_RESULT_FAILURE SDIO error
- *
- * Notes:
- * This ends up telling UniFi to restart.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_dl_patch(card_t *card, void *dlpriv, u32 boot_ctrl)
-{
- xbv1_t *fwinfo;
- CsrResult r;
-
- unifi_info(card->ospriv, "unifi_dl_patch %p %08x\n", dlpriv, boot_ctrl);
-
- fwinfo = kmalloc(sizeof(xbv1_t), GFP_KERNEL);
- if (fwinfo == NULL)
- {
- unifi_error(card->ospriv, "Failed to allocate memory for patches\n");
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
-
- /*
- * Scan the firmware file to find the TLVs we are interested in.
- * These are:
- * - check we support the file format version in VERF
- * - FWID The build ID of the ROM that we can patch
- * - PTDL patch download segments
- */
- r = xbv1_parse(card, unifi_fw_read, dlpriv, fwinfo);
- if (r != CSR_RESULT_SUCCESS || fwinfo->mode != xbv_patch)
- {
- kfree(fwinfo);
- unifi_error(card->ospriv, "Failed to read in patch file\n");
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /*
- * We have to check the build id read from the SLUT against that
- * for the patch file. They have to match exactly.
- * "card->build_id" == XBV1.PTCH.FWID
- */
- if (card->build_id != fwinfo->build_id)
- {
- unifi_error(card->ospriv, "Wrong patch file for chip (chip = %lu, file = %lu)\n",
- card->build_id, fwinfo->build_id);
- kfree(fwinfo);
-#ifndef CSR_WIFI_IGNORE_PATCH_VERSION_MISMATCH
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
-#else
- fwinfo = NULL;
- dlpriv = NULL;
- return CSR_RESULT_SUCCESS;
-#endif
- }
-
- r = do_patch_download(card, dlpriv, fwinfo, boot_ctrl);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to patch image\n");
- }
-
- kfree(fwinfo);
-
- return r;
-} /* unifi_dl_patch() */
-
-
-void* unifi_dl_fw_read_start(card_t *card, s8 is_fw)
-{
- card_info_t card_info;
-
- unifi_card_info(card, &card_info);
- unifi_trace(card->ospriv, UDBG5,
- "id=%d, ver=0x%x, fw_build=%u, fw_hip=0x%x, block_size=%d\n",
- card_info.chip_id, card_info.chip_version,
- card_info.fw_build, card_info.fw_hip_version,
- card_info.sdio_block_size);
-
- return unifi_fw_read_start(card->ospriv, is_fw, &card_info);
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * safe_read_shared_location
- *
- * Read a shared memory location repeatedly until we get two readings
- * the same.
- *
- * Arguments:
- * card Pointer to card context struct.
- * unifi_addr UniFi shared-data-memory address to access.
- * pdata Pointer to a byte variable for the value read.
- *
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code on failure
- * ---------------------------------------------------------------------------
- */
-static CsrResult safe_read_shared_location(card_t *card, u32 address, u8 *pdata)
-{
- CsrResult r;
- u16 limit = 1000;
- u8 b, b2;
-
- *pdata = 0;
-
- r = unifi_read_8_or_16(card, address, &b);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- while (limit--)
- {
- r = unifi_read_8_or_16(card, address, &b2);
- if (r != CSR_RESULT_SUCCESS)
- {
- return r;
- }
-
- /* When we have a stable value, return it */
- if (b == b2)
- {
- *pdata = b;
- return CSR_RESULT_SUCCESS;
- }
-
- b = b2;
- }
-
- return CSR_RESULT_FAILURE;
-} /* safe_read_shared_location() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_do_loader_op
- *
- * Send a loader / boot_loader command to the UniFi and wait for
- * it to complete.
- *
- * Arguments:
- * card Pointer to card context struct.
- * op_addr The address of the loader operation control word.
- * opcode The operation to perform.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success
- * CSR_RESULT_FAILURE SDIO error or SDIO/XAP timeout
- * ---------------------------------------------------------------------------
- */
-
-/*
- * Ideally instead of sleeping, we want to busy wait.
- * Currently there is no framework API to do this. When it becomes available,
- * we can use it to busy wait using usecs
- */
-#define OPERATION_TIMEOUT_LOOPS (100) /* when OPERATION_TIMEOUT_DELAY==1, (500) otherwise */
-#define OPERATION_TIMEOUT_DELAY 1 /* msec, or 200usecs */
-
-CsrResult unifi_do_loader_op(card_t *card, u32 op_addr, u8 opcode)
-{
- CsrResult r;
- s16 op_retries;
-
- unifi_trace(card->ospriv, UDBG4, "Loader cmd 0x%0x -> 0x%08x\n", opcode, op_addr);
-
- /* Set the Operation command byte to the opcode */
- r = unifi_write_8_or_16(card, op_addr, opcode);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to write loader copy command\n");
- return r;
- }
-
- /* Wait for Operation command byte to be Idle */
- /* Typically takes ~100us */
- op_retries = 0;
- r = CSR_RESULT_SUCCESS;
- while (1)
- {
- u8 op;
-
- /*
- * Read the memory location until two successive reads give
- * the same value.
- * Then handle it.
- */
- r = safe_read_shared_location(card, op_addr, &op);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to read loader status\n");
- break;
- }
-
- if (op == UNIFI_LOADER_IDLE)
- {
- /* Success */
- break;
- }
-
- if (op != opcode)
- {
- unifi_error(card->ospriv, "Error reported by loader: 0x%X\n", op);
- r = CSR_RESULT_FAILURE;
- break;
- }
-
- /* Allow 500us timeout */
- if (++op_retries >= OPERATION_TIMEOUT_LOOPS)
- {
- unifi_error(card->ospriv, "Timeout waiting for loader to ack transfer\n");
- /* Stop XAPs to aid post-mortem */
- r = unifi_card_stop_processor(card, UNIFI_PROC_BOTH);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to stop UniFi processors\n");
- }
- else
- {
- r = CSR_RESULT_FAILURE;
- }
- break;
- }
- CsrThreadSleep(OPERATION_TIMEOUT_DELAY);
- } /* Loop exits with r != CSR_RESULT_SUCCESS on error */
-
- return r;
-} /* unifi_do_loader_op() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * send_ptdl_to_unifi
- *
- * Copy a patch block from userland to the UniFi.
- * This function reads data, 2K at a time, from userland and writes
- * it to the UniFi.
- *
- * Arguments:
- * card A pointer to the card structure
- * dlpriv The os specific handle for the firmware file
- * ptdl A pointer ot the PTDL block
- * handle The buffer handle to use for the xfer
- * op_addr The address of the loader operation control word
- *
- * Returns:
- * Number of bytes sent (Positive) or negative value indicating
- * error code:
- * CSR_WIFI_HIP_RESULT_NO_MEMORY memory allocation failed
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE error in XBV file
- * CSR_RESULT_FAILURE SDIO error
- * ---------------------------------------------------------------------------
- */
-static CsrResult send_ptdl_to_unifi(card_t *card, void *dlpriv,
- const struct PTDL *ptdl, u32 handle,
- u32 op_addr)
-{
- u32 offset;
- u8 *buf;
- s32 data_len;
- u32 write_len;
- CsrResult r;
- const u16 buf_size = 2 * 1024;
-
- offset = ptdl->dl_offset;
- data_len = ptdl->dl_size;
-
- if (data_len > buf_size)
- {
- unifi_error(card->ospriv, "PTDL block is too large (%u)\n",
- ptdl->dl_size);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- buf = kmalloc(buf_size, GFP_KERNEL);
- if (buf == NULL)
- {
- unifi_error(card->ospriv, "Failed to allocate transfer buffer for firmware download\n");
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
- }
-
- r = CSR_RESULT_SUCCESS;
-
- if (unifi_fw_read(card->ospriv, dlpriv, offset, buf, data_len) != data_len)
- {
- unifi_error(card->ospriv, "Failed to read from file\n");
- }
- else
- {
- /* We can always round these if the host wants to */
- if (card->sdio_io_block_pad)
- {
- write_len = (data_len + (card->sdio_io_block_size - 1)) &
- ~(card->sdio_io_block_size - 1);
-
- /* Zero out the rest of the buffer (This isn't needed, but it
- * makes debugging things later much easier). */
- memset(buf + data_len, 0, write_len - data_len);
- }
- else
- {
- write_len = data_len;
- }
-
- r = unifi_bulk_rw_noretry(card, handle, buf, write_len, UNIFI_SDIO_WRITE);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "CMD53 failed writing %d bytes to handle %ld\n",
- data_len, handle);
- }
- else
- {
- /*
- * Can change the order of things to overlap read from file
- * with copy to unifi
- */
- r = unifi_do_loader_op(card, op_addr, UNIFI_BOOT_LOADER_PATCH);
- }
- }
-
- kfree(buf);
-
- if (r != CSR_RESULT_SUCCESS && r != CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- unifi_error(card->ospriv, "Failed to copy block of %u bytes to UniFi\n",
- ptdl->dl_size);
- }
-
- return r;
-} /* send_ptdl_to_unifi() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * do_patch_download
- *
- * This function downloads a set of patches to UniFi and then
- * causes it to restart.
- *
- * Arguments:
- * card Pointer to card struct.
- * dlpriv A context pointer from the calling function to be
- * used when reading the XBV file. This can be NULL
- * in which case not patches are applied.
- * pfwinfo Pointer to a fwinfo struct describing the f/w
- * XBV file.
- * boot_ctrl_addr The address of the boot loader control structure.
- *
- * Returns:
- * 0 on success, or an error code
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE for a bad laoader version number
- * ---------------------------------------------------------------------------
- */
-static CsrResult do_patch_download(card_t *card, void *dlpriv, xbv1_t *pfwinfo, u32 boot_ctrl_addr)
-{
- CsrResult r;
- s32 i;
- u16 loader_version;
- u16 handle;
- u32 total_bytes;
-
- /*
- * Read info from the SDIO Loader Control Data Structure
- */
- /* Check the loader version */
- r = unifi_card_read16(card, boot_ctrl_addr, &loader_version);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Patch download: Failed to read loader version\n");
- return r;
- }
- unifi_trace(card->ospriv, UDBG2, "Patch download: boot loader version 0x%04X\n", loader_version);
- switch (loader_version)
- {
- case 0x0000:
- break;
-
- default:
- unifi_error(card->ospriv, "Patch loader version (0x%04X) is not supported by this driver\n",
- loader_version);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /* Retrieve the handle to use with CMD53 */
- r = unifi_card_read16(card, boot_ctrl_addr + 4, &handle);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Patch download: Failed to read loader handle\n");
- return r;
- }
-
- /* Set the mask of LEDs to flash */
- if (card->loader_led_mask)
- {
- r = unifi_card_write16(card, boot_ctrl_addr + 2,
- (u16)card->loader_led_mask);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Patch download: Failed to write LED mask\n");
- return r;
- }
- }
-
- total_bytes = 0;
-
- /* Copy download data to UniFi memory */
- for (i = 0; i < pfwinfo->num_ptdl; i++)
- {
- unifi_trace(card->ospriv, UDBG3, "Patch download: %d Downloading for %d from offset %d\n",
- i,
- pfwinfo->ptdl[i].dl_size,
- pfwinfo->ptdl[i].dl_offset);
-
- r = send_ptdl_to_unifi(card, dlpriv, &pfwinfo->ptdl[i],
- handle, boot_ctrl_addr + 6);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- return r;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Patch failed after %u bytes\n",
- total_bytes);
- return r;
- }
- total_bytes += pfwinfo->ptdl[i].dl_size;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* do_patch_download() */
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_dump.c b/drivers/staging/csr/csr_wifi_hip_dump.c
deleted file mode 100644
index 7b7eec49..0000000
--- a/drivers/staging/csr/csr_wifi_hip_dump.c
+++ /dev/null
@@ -1,837 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_dump.c
- *
- * PURPOSE:
- * Routines for retrieving and buffering core status from the UniFi
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/slab.h>
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_unifiversion.h"
-#include "csr_wifi_hip_card.h"
-
-/* Locations to capture in dump (XAP words) */
-#define HIP_CDUMP_FIRST_CPUREG (0xFFE0) /* First CPU register */
-#define HIP_CDUMP_FIRST_LO (0) /* Start of low address range */
-#define HIP_CDUMP_FIRST_HI_MAC (0x3C00) /* Start of MAC high area */
-#define HIP_CDUMP_FIRST_HI_PHY (0x1C00) /* Start of PHY high area */
-#define HIP_CDUMP_FIRST_SH (0) /* Start of shared memory area */
-
-#define HIP_CDUMP_NCPUREGS (10) /* No. of 16-bit XAP registers */
-#define HIP_CDUMP_NWORDS_LO (0x0100) /* Low area size in 16-bit words */
-#define HIP_CDUMP_NWORDS_HI (0x0400) /* High area size in 16-bit words */
-#define HIP_CDUMP_NWORDS_SH (0x0500) /* Shared memory area size, 16-bit words */
-
-#define HIP_CDUMP_NUM_ZONES 7 /* Number of UniFi memory areas to capture */
-
-/* Mini-coredump state */
-typedef struct coredump_buf
-{
- u16 count; /* serial number of dump */
- u32 timestamp; /* host's system time at capture */
- s16 requestor; /* request: 0=auto dump, 1=manual */
- u16 chip_ver;
- u32 fw_ver;
- u16 *zone[HIP_CDUMP_NUM_ZONES];
-
- struct coredump_buf *next; /* circular list */
- struct coredump_buf *prev; /* circular list */
-} coredump_buffer;
-
-/* Structure used to describe a zone of chip memory captured by mini-coredump */
-struct coredump_zone
-{
- unifi_coredump_space_t space; /* XAP memory space this zone covers */
- enum unifi_dbg_processors_select cpu; /* XAP CPU core selector */
- u32 gp; /* Generic Pointer to memory zone on XAP */
- u16 offset; /* 16-bit XAP word offset of zone in memory space */
- u16 length; /* Length of zone in XAP words */
-};
-
-static CsrResult unifi_coredump_from_sdio(card_t *card, coredump_buffer *dump_buf);
-static CsrResult unifi_coredump_read_zones(card_t *card, coredump_buffer *dump_buf);
-static CsrResult unifi_coredump_read_zone(card_t *card, u16 *zone,
- const struct coredump_zone *def);
-static s32 get_value_from_coredump(const coredump_buffer *dump,
- const unifi_coredump_space_t space, const u16 offset);
-
-/* Table of chip memory zones we capture on mini-coredump */
-static const struct coredump_zone zonedef_table[HIP_CDUMP_NUM_ZONES] = {
- { UNIFI_COREDUMP_MAC_REG, UNIFI_PROC_MAC, UNIFI_MAKE_GP(REGISTERS, HIP_CDUMP_FIRST_CPUREG * 2), HIP_CDUMP_FIRST_CPUREG, HIP_CDUMP_NCPUREGS },
- { UNIFI_COREDUMP_PHY_REG, UNIFI_PROC_PHY, UNIFI_MAKE_GP(REGISTERS, HIP_CDUMP_FIRST_CPUREG * 2), HIP_CDUMP_FIRST_CPUREG, HIP_CDUMP_NCPUREGS },
- { UNIFI_COREDUMP_SH_DMEM, UNIFI_PROC_INVALID, UNIFI_MAKE_GP(SH_DMEM, HIP_CDUMP_FIRST_SH * 2), HIP_CDUMP_FIRST_SH, HIP_CDUMP_NWORDS_SH },
- { UNIFI_COREDUMP_MAC_DMEM, UNIFI_PROC_MAC, UNIFI_MAKE_GP(MAC_DMEM, HIP_CDUMP_FIRST_LO * 2), HIP_CDUMP_FIRST_LO, HIP_CDUMP_NWORDS_LO },
- { UNIFI_COREDUMP_MAC_DMEM, UNIFI_PROC_MAC, UNIFI_MAKE_GP(MAC_DMEM, HIP_CDUMP_FIRST_HI_MAC * 2), HIP_CDUMP_FIRST_HI_MAC, HIP_CDUMP_NWORDS_HI },
- { UNIFI_COREDUMP_PHY_DMEM, UNIFI_PROC_PHY, UNIFI_MAKE_GP(PHY_DMEM, HIP_CDUMP_FIRST_LO * 2), HIP_CDUMP_FIRST_LO, HIP_CDUMP_NWORDS_LO },
- { UNIFI_COREDUMP_PHY_DMEM, UNIFI_PROC_PHY, UNIFI_MAKE_GP(PHY_DMEM, HIP_CDUMP_FIRST_HI_PHY * 2), HIP_CDUMP_FIRST_HI_PHY, HIP_CDUMP_NWORDS_HI },
-};
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_coredump_request_at_next_reset
- *
- * Request that a mini-coredump is performed when the driver has
- * completed resetting the UniFi device.
- *
- * Arguments:
- * card Pointer to card struct
- * enable If non-zero, sets the request.
- * If zero, cancels any pending request.
- *
- * Returns:
- * CSR_RESULT_SUCCESS or CSR HIP error code
- *
- * Notes:
- * This function is typically called once the driver has detected that
- * the UniFi device has become unresponsive due to crash, or internal
- * watchdog reset. The driver must reset it to regain communication and,
- * immediately after that, the mini-coredump can be captured.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_coredump_request_at_next_reset(card_t *card, s8 enable)
-{
- CsrResult r;
-
- if (enable)
- {
- unifi_trace(card->ospriv, UDBG2, "Mini-coredump requested after reset\n");
- }
-
- if (card == NULL)
- {
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- else
- {
- card->request_coredump_on_reset = enable?1 : 0;
- r = CSR_RESULT_SUCCESS;
- }
-
- return r;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_coredump_handle_request
- *
- * Performs a coredump now, if one was requested, and clears the request.
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * CSR_RESULT_SUCCESS or CSR HIP error code
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_coredump_handle_request(card_t *card)
-{
- CsrResult r = CSR_RESULT_SUCCESS;
-
- if (card == NULL)
- {
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- else
- {
- if (card->request_coredump_on_reset == 1)
- {
- card->request_coredump_on_reset = 0;
- r = unifi_coredump_capture(card, NULL);
- }
- }
-
- return r;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_coredump_capture
- *
- * Capture the current status of the UniFi device.
- * Various registers are buffered for future offline inspection.
- *
- * Arguments:
- * card Pointer to card struct
- * req Pointer to request struct, or NULL:
- * A coredump requested manually by the user app
- * will have a request struct pointer, an automatic
- * coredump will have a NULL pointer.
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_RESULT_FAILURE SDIO error
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE Initialisation not complete
- *
- * Notes:
- * The result is a filled entry in the circular buffer of core dumps,
- * values from which can be extracted to userland via an ioctl.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_coredump_capture(card_t *card, struct unifi_coredump_req *req)
-{
- CsrResult r = CSR_RESULT_SUCCESS;
- static u16 dump_seq_no = 1;
- u32 time_of_capture;
-
- if (card->dump_next_write == NULL)
- {
- r = CSR_RESULT_SUCCESS;
- goto done;
- }
-
- /* Reject forced capture before initialisation has happened */
- if (card->helper == NULL)
- {
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- goto done;
- }
-
-
- /*
- * Force a mini-coredump capture right now
- */
- time_of_capture = CsrTimeGet(NULL);
- unifi_info(card->ospriv, "Mini-coredump capture at t=%u\n", time_of_capture);
-
- /* Wake up the processors so we can talk to them */
- r = unifi_set_host_state(card, UNIFI_HOST_STATE_AWAKE);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to wake UniFi\n");
- goto done;
- }
- CsrThreadSleep(20);
-
- /* Stop both XAPs */
- unifi_trace(card->ospriv, UDBG4, "Stopping XAPs for coredump capture\n");
- r = unifi_card_stop_processor(card, UNIFI_PROC_BOTH);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to stop UniFi XAPs\n");
- goto done;
- }
-
- /* Dump core into the next available slot in the circular list */
- r = unifi_coredump_from_sdio(card, card->dump_next_write);
- if (r == CSR_RESULT_SUCCESS)
- {
- /* Record whether the dump was manual or automatic */
- card->dump_next_write->requestor = (req?1 : 0);
- card->dump_next_write->timestamp = time_of_capture;
- /* Advance to the next buffer */
- card->dump_next_write->count = dump_seq_no++;
- card->dump_cur_read = card->dump_next_write;
- card->dump_next_write = card->dump_next_write->next;
-
- /* Sequence no. of zero indicates slot not in use, so handle wrap */
- if (dump_seq_no == 0)
- {
- dump_seq_no = 1;
- }
-
- unifi_trace(card->ospriv, UDBG3,
- "Coredump (%p), SeqNo=%d, cur_read=%p, next_write=%p\n",
- req,
- card->dump_cur_read->count,
- card->dump_cur_read, card->dump_next_write);
- }
-
- /* Start both XAPs */
- unifi_trace(card->ospriv, UDBG4, "Restart XAPs after coredump\n");
- r = card_start_processor(card, UNIFI_PROC_BOTH);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Failed to start UniFi XAPs\n");
- goto done;
- }
-
-done:
- return r;
-} /* unifi_coredump_capture() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * get_value_from_coredump
- *
- *
- *
- * Arguments:
- * dump Pointer to buffered coredump data
- * offset_in_space XAP memory space to retrieve from the buffer (there
- * may be more than one zone covering the same memory
- * space, but starting from different offsets).
- * offset Offset within the XAP memory space to be retrieved
- *
- * Returns:
- * >=0 Register value on success
- * <0 Register out of range of any captured zones
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-static s32 get_value_from_coredump(const coredump_buffer *coreDump,
- const unifi_coredump_space_t space,
- const u16 offset_in_space)
-{
- s32 r = -1;
- u16 offset_in_zone;
- u32 zone_end_offset;
- s32 i;
- const struct coredump_zone *def = &zonedef_table[0];
-
- /* Search zone def table for a match with the requested memory space */
- for (i = 0; i < HIP_CDUMP_NUM_ZONES; i++, def++)
- {
- if (space == def->space)
- {
- zone_end_offset = def->offset + def->length;
-
- /* Is the space offset contained in this zone? */
- if (offset_in_space < zone_end_offset &&
- offset_in_space >= def->offset)
- {
- /* Calculate the offset of data within the zone buffer */
- offset_in_zone = offset_in_space - def->offset;
- r = (s32) * (coreDump->zone[i] + offset_in_zone);
-
- unifi_trace(NULL, UDBG6,
- "sp %d, offs 0x%04x = 0x%04x (in z%d 0x%04x->0x%04x)\n",
- space, offset_in_space, r,
- i, def->offset, zone_end_offset - 1);
- break;
- }
- }
- }
- return r;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_coredump_get_value
- *
- * Retrieve the value of a register buffered from a previous core dump,
- * so that it may be reported back to application code.
- *
- * Arguments:
- * card Pointer to card struct
- * req_reg Pointer to request parameter partially filled. This
- * function puts in the values retrieved from the dump.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, or:
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE Null parameter error
- * CSR_WIFI_HIP_RESULT_RANGE Register out of range
- * CSR_WIFI_HIP_RESULT_NOT_FOUND Dump index not (yet) captured
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_coredump_get_value(card_t *card, struct unifi_coredump_req *req)
-{
- CsrResult r;
- s32 i = 0;
- coredump_buffer *find_dump = NULL;
-
- if (req == NULL || card == NULL)
- {
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- goto done;
- }
- req->value = -1;
- if (card->dump_buf == NULL)
- {
- unifi_trace(card->ospriv, UDBG2, "No coredump buffers\n");
- r = CSR_WIFI_HIP_RESULT_NOT_FOUND; /* Coredumping disabled */
- goto done;
- }
- if (card->dump_cur_read == NULL)
- {
- unifi_trace(card->ospriv, UDBG4, "No coredumps captured\n");
- r = CSR_WIFI_HIP_RESULT_NOT_FOUND; /* No coredump yet captured */
- goto done;
- }
-
- /* Find the requested dump buffer */
- switch (req->index)
- {
- case 0: /* Newest */
- find_dump = card->dump_cur_read;
- break;
- case -1: /* Oldest: The next used slot forward */
- for (find_dump = card->dump_cur_read->next;
- (find_dump->count == 0) && (find_dump != card->dump_cur_read);
- find_dump = card->dump_cur_read->next)
- {
- }
- break;
- default: /* Number of steps back from current read position */
- for (i = 0, find_dump = card->dump_cur_read;
- i < req->index;
- i++, find_dump = find_dump->prev)
- {
- /* Walk the list for the index'th entry, but
- * stop when about to wrap. */
- unifi_trace(card->ospriv, UDBG6,
- "%d: %d, @%p, p=%p, n=%p, cr=%p, h=%p\n",
- i, find_dump->count, find_dump, find_dump->prev,
- find_dump->next, card->dump_cur_read, card->dump_buf);
- if (find_dump->prev == card->dump_cur_read)
- {
- /* Wrapped but still not found, index out of range */
- if (i != req->index)
- {
- unifi_trace(card->ospriv, UDBG6,
- "Dump index %d not found %d\n", req->index, i);
- r = CSR_WIFI_HIP_RESULT_NOT_FOUND;
- goto done;
- }
- break;
- }
- }
- break;
- }
-
- /* Check if the slot is actually filled with a core dump */
- if (find_dump->count == 0)
- {
- unifi_trace(card->ospriv, UDBG4, "Not captured %d\n", req->index);
- r = CSR_WIFI_HIP_RESULT_NOT_FOUND;
- goto done;
- }
-
- unifi_trace(card->ospriv, UDBG6, "Req index %d, found seq %d at step %d\n",
- req->index, find_dump->count, i);
-
- /* Find the appropriate entry in the buffer */
- req->value = get_value_from_coredump(find_dump, req->space, (u16)req->offset);
- if (req->value < 0)
- {
- r = CSR_WIFI_HIP_RESULT_RANGE; /* Un-captured register */
- unifi_trace(card->ospriv, UDBG4,
- "Can't read space %d, reg 0x%x from coredump buffer %d\n",
- req->space, req->offset, req->index);
- }
- else
- {
- r = CSR_RESULT_SUCCESS;
- }
-
- /* Update the private request structure with the found values */
- req->chip_ver = find_dump->chip_ver;
- req->fw_ver = find_dump->fw_ver;
- req->timestamp = find_dump->timestamp;
- req->requestor = find_dump->requestor;
- req->serial = find_dump->count;
-
-done:
- return r;
-} /* unifi_coredump_get_value() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_coredump_read_zone
- *
- * Captures a UniFi memory zone into a buffer on the host
- *
- * Arguments:
- * card Pointer to card struct
- * zonebuf Pointer to on-host buffer to dump the memory zone into
- * def Pointer to description of the memory zone to read from UniFi.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, or:
- * CSR_RESULT_FAILURE SDIO error
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE Parameter error
- *
- * Notes:
- * It is assumed that the caller has already stopped the XAPs
- * ---------------------------------------------------------------------------
- */
-static CsrResult unifi_coredump_read_zone(card_t *card, u16 *zonebuf, const struct coredump_zone *def)
-{
- CsrResult r;
-
- if (zonebuf == NULL || def == NULL)
- {
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- goto done;
- }
-
- /* Select XAP CPU if necessary */
- if (def->cpu != UNIFI_PROC_INVALID)
- {
- if (def->cpu != UNIFI_PROC_MAC && def->cpu != UNIFI_PROC_PHY)
- {
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- goto done;
- }
- r = unifi_set_proc_select(card, def->cpu);
- if (r != CSR_RESULT_SUCCESS)
- {
- goto done;
- }
- }
-
- unifi_trace(card->ospriv, UDBG4,
- "Dump sp %d, offs 0x%04x, 0x%04x words @GP=%08x CPU %d\n",
- def->space, def->offset, def->length, def->gp, def->cpu);
-
- /* Read on-chip RAM (byte-wise) */
- r = unifi_card_readn(card, def->gp, zonebuf, (u16)(def->length * 2));
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- goto done;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Can't read UniFi shared data area\n");
- goto done;
- }
-
-done:
- return r;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_coredump_read_zones
- *
- * Walks through the table of on-chip memory zones defined in zonedef_table,
- * and reads each of them from the UniFi chip
- *
- * Arguments:
- * card Pointer to card struct
- * dump_buf Buffer into which register values will be dumped
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, or:
- * CSR_RESULT_FAILURE SDIO error
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE Parameter error
- *
- * Notes:
- * It is assumed that the caller has already stopped the XAPs
- * ---------------------------------------------------------------------------
- */
-static CsrResult unifi_coredump_read_zones(card_t *card, coredump_buffer *dump_buf)
-{
- CsrResult r = CSR_RESULT_SUCCESS;
- s32 i;
-
- /* Walk the table of coredump zone definitions and read them from the chip */
- for (i = 0;
- (i < HIP_CDUMP_NUM_ZONES) && (r == 0);
- i++)
- {
- r = unifi_coredump_read_zone(card, dump_buf->zone[i], &zonedef_table[i]);
- }
-
- return r;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_coredump_from_sdio
- *
- * Capture the status of the UniFi processors, over SDIO
- *
- * Arguments:
- * card Pointer to card struct
- * reg_buffer Buffer into which register values will be dumped
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, or:
- * CSR_RESULT_FAILURE SDIO error
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE Parameter error
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-static CsrResult unifi_coredump_from_sdio(card_t *card, coredump_buffer *dump_buf)
-{
- u16 val;
- CsrResult r;
- u32 sdio_addr;
-
- if (dump_buf == NULL)
- {
- r = CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- goto done;
- }
-
-
- /* Chip and firmware version */
- unifi_trace(card->ospriv, UDBG4, "Get chip version\n");
- sdio_addr = 2 * ChipHelper_GBL_CHIP_VERSION(card->helper);
- if (sdio_addr != 0)
- {
- r = unifi_read_direct16(card, sdio_addr, &val);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- goto done;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Can't read GBL_CHIP_VERSION\n");
- goto done;
- }
- }
- dump_buf->chip_ver = val;
- dump_buf->fw_ver = card->build_id;
-
- unifi_trace(card->ospriv, UDBG4, "chip_ver 0x%04x, fw_ver %u\n",
- dump_buf->chip_ver, dump_buf->fw_ver);
-
- /* Capture the memory zones required from UniFi */
- r = unifi_coredump_read_zones(card, dump_buf);
- if (r == CSR_WIFI_HIP_RESULT_NO_DEVICE)
- {
- goto done;
- }
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "Can't read UniFi memory areas\n");
- goto done;
- }
-
-done:
- return r;
-} /* unifi_coredump_from_sdio() */
-
-
-#ifndef UNIFI_DISABLE_COREDUMP
-/*
- * ---------------------------------------------------------------------------
- * new_coredump_node
- *
- * Allocates a coredump linked-list node, and links it to the previous.
- *
- * Arguments:
- * ospriv OS context
- * prevnode Previous node to link into
- *
- * Returns:
- * Pointer to valid coredump_buffer on success
- * NULL on memory allocation failure
- *
- * Notes:
- * Allocates "all or nothing"
- * ---------------------------------------------------------------------------
- */
-static
-coredump_buffer* new_coredump_node(void *ospriv, coredump_buffer *prevnode)
-{
- coredump_buffer *newnode = NULL;
- u16 *newzone = NULL;
- s32 i;
- u32 zone_size;
-
- /* Allocate node header */
- newnode = kzalloc(sizeof(coredump_buffer), GFP_KERNEL);
- if (newnode == NULL)
- {
- return NULL;
- }
-
- /* Allocate chip memory zone capture buffers */
- for (i = 0; i < HIP_CDUMP_NUM_ZONES; i++)
- {
- zone_size = sizeof(u16) * zonedef_table[i].length;
- newzone = kzalloc(zone_size, GFP_KERNEL);
- newnode->zone[i] = newzone;
- if (newzone == NULL)
- {
- unifi_error(ospriv, "Out of memory on coredump zone %d (%d words)\n",
- i, zonedef_table[i].length);
- break;
- }
- }
-
- /* Clean up if any zone alloc failed */
- if (newzone == NULL)
- {
- for (i = 0; newnode->zone[i] != NULL; i++)
- {
- kfree(newnode->zone[i]);
- newnode->zone[i] = NULL;
- }
- }
-
- /* Link to previous node */
- newnode->prev = prevnode;
- if (prevnode)
- {
- prevnode->next = newnode;
- }
- newnode->next = NULL;
-
- return newnode;
-}
-
-
-#endif /* UNIFI_DISABLE_COREDUMP */
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_coredump_init
- *
- * Allocates buffers for the automatic SDIO core dump
- *
- * Arguments:
- * card Pointer to card struct
- * num_dump_buffers Number of buffers to reserve for coredumps
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, or:
- * CSR_WIFI_HIP_RESULT_NO_MEMORY memory allocation failed
- *
- * Notes:
- * Allocates space in advance, to be used for the last n coredump buffers
- * the intention being that the size is sufficient for at least one dump,
- * probably several.
- * It's probably advisable to have at least 2 coredump buffers to allow
- * one to be enquired with the unifi_coredump tool, while leaving another
- * free for capturing.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_coredump_init(card_t *card, u16 num_dump_buffers)
-{
-#ifndef UNIFI_DISABLE_COREDUMP
- void *ospriv = card->ospriv;
- coredump_buffer *prev = NULL;
- coredump_buffer *newnode = NULL;
- u32 i = 0;
-#endif
-
- card->request_coredump_on_reset = 0;
- card->dump_next_write = NULL;
- card->dump_cur_read = NULL;
- card->dump_buf = NULL;
-
-#ifndef UNIFI_DISABLE_COREDUMP
- unifi_trace(ospriv, UDBG1,
- "Allocate buffers for %d core dumps\n", num_dump_buffers);
- if (num_dump_buffers == 0)
- {
- goto done;
- }
-
- /* Root node */
- card->dump_buf = new_coredump_node(ospriv, NULL);
- if (card->dump_buf == NULL)
- {
- goto fail;
- }
- prev = card->dump_buf;
- newnode = card->dump_buf;
-
- /* Add each subsequent node at tail */
- for (i = 1; i < num_dump_buffers; i++)
- {
- newnode = new_coredump_node(ospriv, prev);
- if (newnode == NULL)
- {
- goto fail;
- }
- prev = newnode;
- }
-
- /* Link the first and last nodes to make the list circular */
- card->dump_buf->prev = newnode;
- newnode->next = card->dump_buf;
-
- /* Set initial r/w access pointers */
- card->dump_next_write = card->dump_buf;
- card->dump_cur_read = NULL;
-
- unifi_trace(ospriv, UDBG2, "Core dump configured (%d dumps max)\n", i);
-
-done:
-#endif
- return CSR_RESULT_SUCCESS;
-
-#ifndef UNIFI_DISABLE_COREDUMP
-fail:
- /* Unwind what we allocated so far */
- unifi_error(ospriv, "Out of memory allocating core dump node %d\n", i);
- unifi_coredump_free(card);
- return CSR_WIFI_HIP_RESULT_NO_MEMORY;
-#endif
-} /* unifi_coreump_init() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_coredump_free
- *
- * Free all memory dynamically allocated for core dump
- *
- * Arguments:
- * card Pointer to card struct
- *
- * Returns:
- * None
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-void unifi_coredump_free(card_t *card)
-{
- void *ospriv = card->ospriv;
- coredump_buffer *node, *del_node;
- s16 i = 0;
- s16 j;
-
- unifi_trace(ospriv, UDBG2, "Core dump de-configured\n");
-
- if (card->dump_buf == NULL)
- {
- return;
- }
-
- node = card->dump_buf;
- do
- {
- /* Free payload zones */
- for (j = 0; j < HIP_CDUMP_NUM_ZONES; j++)
- {
- kfree(node->zone[j]);
- node->zone[j] = NULL;
- }
-
- /* Detach */
- del_node = node;
- node = node->next;
-
- /* Free header */
- kfree(del_node);
- i++;
- } while ((node != NULL) && (node != card->dump_buf));
-
- unifi_trace(ospriv, UDBG3, "Freed %d coredump buffers\n", i);
-
- card->dump_buf = NULL;
- card->dump_next_write = NULL;
- card->dump_cur_read = NULL;
-} /* unifi_coredump_free() */
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_packing.c b/drivers/staging/csr/csr_wifi_hip_packing.c
deleted file mode 100644
index 0768aef..0000000
--- a/drivers/staging/csr/csr_wifi_hip_packing.c
+++ /dev/null
@@ -1,4804 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#include "csr_wifi_hip_signals.h"
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-
-
-/*
- * ---------------------------------------------------------------------------
- * get_packed_struct_size
- *
- * Examine a buffer containing a UniFi signal in wire-format.
- * The first two bytes contain the signal ID, decode the signal ID and
- * return the size, in bytes, of the signal, not including any bulk
- * data.
- *
- * WARNING: This function is auto-generated, DO NOT EDIT!
- *
- * Arguments:
- * buf Pointer to buffer to decode.
- *
- * Returns:
- * 0 if the signal ID is not recognised (i.e. zero length),
- * otherwise the number of bytes occupied by the signal in the buffer.
- * This is useful for stepping past the signal to the object in the buffer.
- * ---------------------------------------------------------------------------
- */
-s32 get_packed_struct_size(const u8 *buf)
-{
- s32 size = 0;
- u16 sig_id;
-
- sig_id = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(buf);
-
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- switch (sig_id)
- {
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
- case CSR_DEBUG_WORD16_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
- case CSR_DEBUG_GENERIC_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
- case CSR_MA_PACKET_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT64;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
- case CSR_MLME_SET_TIM_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECTED_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT32;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- break;
-#endif
- case CSR_DEBUG_GENERIC_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT32;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MLME_SET_TIM_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
- case CSR_DEBUG_GENERIC_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
- case CSR_MA_PACKET_CANCEL_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT32;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_PACKET_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT32;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- break;
-#endif
- case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT32;
- size += SIZEOF_UINT32;
- size += SIZEOF_UINT32;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT32;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 32 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_PACKET_ERROR_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT32;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT32;
- size += SIZEOF_UINT32;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
- case CSR_DEBUG_STRING_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += 48 / 8;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_PACKET_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT32;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_REQUEST_ID:
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- size += SIZEOF_UINT16;
- break;
-#endif
- default:
- size = 0;
- }
- return size;
-} /* get_packed_struct_size() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * read_unpack_signal
- *
- * Unpack a wire-format signal into a host-native structure.
- * This function handles any necessary conversions for endianness and
- * places no restrictions on packing or alignment for the structure
- * definition.
- *
- * WARNING: This function is auto-generated, DO NOT EDIT!
- *
- * Arguments:
- * ptr Signal buffer to unpack.
- * sig Pointer to destination structure to populate.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE if the ID of signal was not recognised.
- * ---------------------------------------------------------------------------
- */
-CsrResult read_unpack_signal(const u8 *ptr, CSR_SIGNAL *sig)
-{
- s32 index = 0;
-
- sig->SignalPrimitiveHeader.SignalId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
-
- sig->SignalPrimitiveHeader.ReceiverProcessId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
-
- sig->SignalPrimitiveHeader.SenderProcessId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
-
- switch (sig->SignalPrimitiveHeader.SignalId)
- {
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
- sig->u.MlmeSetPacketFilterConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_CONFIRM_ID:
- sig->u.MlmeSetkeysConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
- sig->u.MlmeConfigQueueConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
- sig->u.MlmeAddAutonomousScanConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanConfirm.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
- sig->u.MlmeAddBlackoutConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutConfirm.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
- sig->u.MlmeDelBlackoutRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutRequest.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
- sig->u.MlmeGetKeySequenceConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_CONFIRM_ID:
- sig->u.MlmeSmStartConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
- sig->u.MlmeStopAggregationConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeStopAggregationConfirm.PeerQstaAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeStopAggregationConfirm.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationConfirm.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_REQUEST_ID:
- sig->u.MlmeDelTspecRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecRequest.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_DEBUG_WORD16_INDICATION_ID:
- sig->u.DebugWord16Indication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[8] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[9] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[10] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[11] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[12] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[13] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[14] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugWord16Indication.DebugWords[15] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
- case CSR_DEBUG_GENERIC_CONFIRM_ID:
- sig->u.DebugGenericConfirm.DebugVariable.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.DebugVariable.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.DebugWords[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.DebugWords[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.DebugWords[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.DebugWords[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.DebugWords[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.DebugWords[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.DebugWords[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericConfirm.DebugWords[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
- case CSR_MA_PACKET_INDICATION_ID:
- sig->u.MaPacketIndication.Data.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketIndication.Data.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MaPacketIndication.LocalTime.x, &ptr[index], 64 / 8);
- index += 64 / 8;
- sig->u.MaPacketIndication.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketIndication.Channel = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketIndication.ReceptionStatus = (CSR_RECEPTION_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketIndication.Rssi = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketIndication.Snr = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketIndication.ReceivedRate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
- case CSR_MLME_SET_TIM_REQUEST_ID:
- sig->u.MlmeSetTimRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimRequest.AssociationId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimRequest.TimValue = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECTED_INDICATION_ID:
- sig->u.MlmeConnectedIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectedIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectedIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectedIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectedIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectedIndication.ConnectionStatus = (CSR_CONNECTION_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeConnectedIndication.PeerMacAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
- sig->u.MlmeDelRxTriggerRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerRequest.TriggerId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
- sig->u.MlmeTriggeredGetIndication.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeTriggeredGetIndication.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeTriggeredGetIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeTriggeredGetIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeTriggeredGetIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeTriggeredGetIndication.Status = (CSR_MIB_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeTriggeredGetIndication.ErrorIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeTriggeredGetIndication.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_REQUEST_ID:
- sig->u.MlmeScanRequest.ChannelList.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanRequest.ChannelList.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanRequest.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanRequest.ScanType = (CSR_SCAN_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanRequest.ProbeDelay = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- sig->u.MlmeScanRequest.MinChannelTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanRequest.MaxChannelTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_CONFIRM_ID:
- sig->u.MlmeDeletekeysConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_REQUEST_ID:
- sig->u.MlmeGetNextRequest.MibAttribute.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetNextRequest.MibAttribute.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetNextRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetNextRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
- sig->u.MlmeSetChannelConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_REQUEST_ID:
- sig->u.MlmeStartAggregationRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeStartAggregationRequest.PeerQstaAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeStartAggregationRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationRequest.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationRequest.StartingSequenceNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationRequest.BufferSize = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationRequest.BlockAckTimeout = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_REQUEST_ID:
- sig->u.MlmeHlSyncRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeHlSyncRequest.GroupAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- break;
-#endif
- case CSR_DEBUG_GENERIC_REQUEST_ID:
- sig->u.DebugGenericRequest.DebugVariable.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.DebugVariable.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.DebugWords[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.DebugWords[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.DebugWords[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.DebugWords[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.DebugWords[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.DebugWords[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.DebugWords[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericRequest.DebugWords[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_CONFIRM_ID:
- sig->u.MlmeLeaveConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeLeaveConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeLeaveConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeLeaveConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeLeaveConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeLeaveConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
- sig->u.MlmeDelTriggeredGetRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetRequest.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
- sig->u.MlmeAddMulticastAddressRequest.Data.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressRequest.Data.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressRequest.NumberOfMulticastGroupAddresses = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_REQUEST_ID:
- sig->u.MlmeResetRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeResetRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeResetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeResetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeResetRequest.StaAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeResetRequest.SetDefaultMib = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
- sig->u.MlmeScanCancelRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanCancelRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanCancelRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanCancelRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanCancelRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
- sig->u.MlmeAddTriggeredGetConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetConfirm.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
- sig->u.MlmeSetPacketFilterRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterRequest.PacketFilterMode = (CSR_PACKET_FILTER_MODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetPacketFilterRequest.ArpFilterAddress = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
- sig->u.MlmeDelRxTriggerConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerConfirm.TriggerId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelRxTriggerConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
- sig->u.MlmeConnectStatusRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusRequest.ConnectionStatus = (CSR_CONNECTION_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeConnectStatusRequest.StaAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeConnectStatusRequest.AssociationId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusRequest.AssociationCapabilityInformation = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_REQUEST_ID:
- sig->u.MlmeLeaveRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeLeaveRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeLeaveRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeLeaveRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeLeaveRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
- sig->u.MlmeConfigQueueRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueRequest.QueueIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueRequest.Aifs = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueRequest.Cwmin = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueRequest.Cwmax = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConfigQueueRequest.TxopLimit = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
- sig->u.MlmeDelTspecConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecConfirm.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTspecConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MLME_SET_TIM_CONFIRM_ID:
- sig->u.MlmeSetTimConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetTimConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_INDICATION_ID:
- sig->u.MlmeMeasureIndication.MeasurementReportSet.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureIndication.MeasurementReportSet.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureIndication.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
- sig->u.MlmeDelBlackoutConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutConfirm.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelBlackoutConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
- sig->u.MlmeDelTriggeredGetConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelTriggeredGetConfirm.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_DEBUG_GENERIC_INDICATION_ID:
- sig->u.DebugGenericIndication.DebugVariable.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.DebugVariable.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.DebugWords[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.DebugWords[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.DebugWords[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.DebugWords[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.DebugWords[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.DebugWords[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.DebugWords[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugGenericIndication.DebugWords[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
- case CSR_MA_PACKET_CANCEL_REQUEST_ID:
- sig->u.MaPacketCancelRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketCancelRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketCancelRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketCancelRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketCancelRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketCancelRequest.HostTag = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
- sig->u.MlmeModifyBssParameterConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
- sig->u.MlmePauseAutonomousScanConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanConfirm.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_PACKET_REQUEST_ID:
- sig->u.MaPacketRequest.Data.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketRequest.Data.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketRequest.TransmitRate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketRequest.HostTag = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- sig->u.MaPacketRequest.Priority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MaPacketRequest.Ra.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MaPacketRequest.TransmissionControl = (CSR_TRANSMISSION_CONTROL) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
- sig->u.MlmeModifyBssParameterRequest.Data.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterRequest.Data.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterRequest.BeaconPeriod = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterRequest.DtimPeriod = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeModifyBssParameterRequest.CapabilityInformation = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeModifyBssParameterRequest.Bssid.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeModifyBssParameterRequest.RtsThreshold = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
- sig->u.MlmeAddRxTriggerRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerRequest.TriggerId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerRequest.Priority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
- sig->u.MaVifAvailabilityIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityIndication.Multicast = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
- sig->u.MlmeHlSyncCancelRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncCancelRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncCancelRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncCancelRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeHlSyncCancelRequest.GroupAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
- sig->u.MlmeDelAutonomousScanRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanRequest.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
- sig->u.MlmeBlackoutEndedIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlackoutEndedIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlackoutEndedIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlackoutEndedIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlackoutEndedIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlackoutEndedIndication.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
- sig->u.MlmeAutonomousScanDoneIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanDoneIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanDoneIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanDoneIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanDoneIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanDoneIndication.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanDoneIndication.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
- sig->u.MlmeGetKeySequenceRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceRequest.KeyId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetKeySequenceRequest.KeyType = (CSR_KEY_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeGetKeySequenceRequest.Address.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_REQUEST_ID:
- sig->u.MlmeSetChannelRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelRequest.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelRequest.Channel = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeSetChannelRequest.Address.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeSetChannelRequest.AvailabilityDuration = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetChannelRequest.AvailabilityInterval = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_CONFIRM_ID:
- sig->u.MlmeMeasureConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureConfirm.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
- sig->u.MlmeAddTriggeredGetRequest.MibAttribute.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetRequest.MibAttribute.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTriggeredGetRequest.TriggeredId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
- sig->u.MlmeAutonomousScanLossIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanLossIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanLossIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanLossIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAutonomousScanLossIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeAutonomousScanLossIndication.Bssid.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- break;
-#endif
- case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
- sig->u.MaVifAvailabilityResponse.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityResponse.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityResponse.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityResponse.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityResponse.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaVifAvailabilityResponse.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
- sig->u.MlmeAddTemplateRequest.Data1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateRequest.Data1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateRequest.Data2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateRequest.Data2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateRequest.FrameType = (CSR_FRAME_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateRequest.MinTransmitRate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_CONFIRM_ID:
- sig->u.MlmePowermgtConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
- sig->u.MlmeAddPeriodicConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicConfirm.PeriodicId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_CONFIRM_ID:
- sig->u.MlmeGetConfirm.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetConfirm.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetConfirm.Status = (CSR_MIB_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetConfirm.ErrorIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_CONFIRM_ID:
- sig->u.MlmeGetNextConfirm.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetNextConfirm.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetNextConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetNextConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetNextConfirm.Status = (CSR_MIB_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetNextConfirm.ErrorIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
- sig->u.MlmeStopAggregationRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeStopAggregationRequest.PeerQstaAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeStopAggregationRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopAggregationRequest.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
- sig->u.MlmeAddRxTriggerConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerConfirm.TriggerId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddRxTriggerConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
- sig->u.MlmeAddBlackoutRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutRequest.BlackoutId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutRequest.BlackoutType = (CSR_BLACKOUT_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutRequest.BlackoutSource = (CSR_BLACKOUT_SOURCE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddBlackoutRequest.BlackoutStartReference = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- sig->u.MlmeAddBlackoutRequest.BlackoutPeriod = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- sig->u.MlmeAddBlackoutRequest.BlackoutDuration = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- memcpy(sig->u.MlmeAddBlackoutRequest.PeerStaAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeAddBlackoutRequest.BlackoutCount = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_REQUEST_ID:
- sig->u.MlmeDeletekeysRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysRequest.KeyId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDeletekeysRequest.KeyType = (CSR_KEY_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeDeletekeysRequest.Address.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_CONFIRM_ID:
- sig->u.MlmeResetConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeResetConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeResetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeResetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeResetConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CONFIRM_ID:
- sig->u.MlmeHlSyncConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeHlSyncConfirm.GroupAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeHlSyncConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
- sig->u.MlmeAddAutonomousScanRequest.ChannelList.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.ChannelList.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.InformationElements.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.InformationElements.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.ChannelStartingFactor = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.ScanType = (CSR_SCAN_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.ProbeDelay = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- sig->u.MlmeAddAutonomousScanRequest.MinChannelTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddAutonomousScanRequest.MaxChannelTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_REQUEST_ID:
- sig->u.MlmeSetRequest.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetRequest.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_REQUEST_ID:
- sig->u.MlmeSmStartRequest.Beacon.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartRequest.Beacon.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartRequest.BssParameters.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartRequest.BssParameters.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartRequest.Ifindex = (CSR_IFINTERFACE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartRequest.Channel = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeSmStartRequest.InterfaceAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- memcpy(sig->u.MlmeSmStartRequest.Bssid.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeSmStartRequest.BeaconPeriod = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartRequest.DtimPeriod = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSmStartRequest.CapabilityInformation = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
- sig->u.MlmeConnectStatusConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeConnectStatusConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
- sig->u.MlmeDelAutonomousScanConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelAutonomousScanConfirm.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
- sig->u.MlmeDelPeriodicRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicRequest.PeriodicId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_REQUEST_ID:
- sig->u.MlmeSetkeysRequest.Key.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.Key.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.Length = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.KeyId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.KeyType = (CSR_KEY_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeSetkeysRequest.Address.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeSetkeysRequest.SequenceNumber[0] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.SequenceNumber[1] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.SequenceNumber[2] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.SequenceNumber[3] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.SequenceNumber[4] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.SequenceNumber[5] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.SequenceNumber[6] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetkeysRequest.SequenceNumber[7] = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(&sig->u.MlmeSetkeysRequest.CipherSuiteSelector, &ptr[index], 32 / 8);
- index += 32 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
- sig->u.MlmePauseAutonomousScanRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanRequest.AutonomousScanId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePauseAutonomousScanRequest.Pause = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_REQUEST_ID:
- sig->u.MlmeGetRequest.MibAttribute.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetRequest.MibAttribute.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeGetRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_REQUEST_ID:
- sig->u.MlmePowermgtRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtRequest.PowerManagementMode = (CSR_POWER_MANAGEMENT_MODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtRequest.ReceiveDtims = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtRequest.ListenInterval = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmePowermgtRequest.TrafficWindow = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_PACKET_ERROR_INDICATION_ID:
- sig->u.MaPacketErrorIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketErrorIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketErrorIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketErrorIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketErrorIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MaPacketErrorIndication.PeerQstaAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MaPacketErrorIndication.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketErrorIndication.SequenceNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
- sig->u.MlmeAddPeriodicRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicRequest.PeriodicId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicRequest.MaximumLatency = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- sig->u.MlmeAddPeriodicRequest.PeriodicSchedulingMode = (CSR_PERIODIC_SCHEDULING_MODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicRequest.WakeHost = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddPeriodicRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_REQUEST_ID:
- sig->u.MlmeAddTspecRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecRequest.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecRequest.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecRequest.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecRequest.PsScheme = (CSR_PS_SCHEME) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecRequest.MediumTime = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecRequest.ServiceStartTime = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- sig->u.MlmeAddTspecRequest.ServiceInterval = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- sig->u.MlmeAddTspecRequest.MinimumDataRate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
- sig->u.MlmeAddMulticastAddressConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddMulticastAddressConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
- sig->u.MlmeAddTspecConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecConfirm.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTspecConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
- sig->u.MlmeHlSyncCancelConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncCancelConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncCancelConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncCancelConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeHlSyncCancelConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CONFIRM_ID:
- sig->u.MlmeScanConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeScanConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_DEBUG_STRING_INDICATION_ID:
- sig->u.DebugStringIndication.DebugMessage.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugStringIndication.DebugMessage.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugStringIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.DebugStringIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
- sig->u.MlmeAddTemplateConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateConfirm.FrameType = (CSR_FRAME_TYPE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeAddTemplateConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
- sig->u.MlmeBlockackErrorIndication.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlockackErrorIndication.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlockackErrorIndication.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlockackErrorIndication.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlockackErrorIndication.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeBlockackErrorIndication.ResultCode = (CSR_REASON_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeBlockackErrorIndication.PeerQstaAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CONFIRM_ID:
- sig->u.MlmeSetConfirm.MibAttributeValue.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetConfirm.MibAttributeValue.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetConfirm.Status = (CSR_MIB_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeSetConfirm.ErrorIndex = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_REQUEST_ID:
- sig->u.MlmeMeasureRequest.MeasurementRequestSet.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureRequest.MeasurementRequestSet.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeMeasureRequest.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
- sig->u.MlmeStartAggregationConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- memcpy(sig->u.MlmeStartAggregationConfirm.PeerQstaAddress.x, &ptr[index], 48 / 8);
- index += 48 / 8;
- sig->u.MlmeStartAggregationConfirm.UserPriority = (CSR_PRIORITY) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationConfirm.Direction = (CSR_DIRECTION) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStartAggregationConfirm.SequenceNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
- sig->u.MlmeStopMeasureConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopMeasureConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopMeasureConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopMeasureConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopMeasureConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopMeasureConfirm.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_PACKET_CONFIRM_ID:
- sig->u.MaPacketConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketConfirm.TransmissionStatus = (CSR_TRANSMISSION_STATUS) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketConfirm.RetryCount = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketConfirm.Rate = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MaPacketConfirm.HostTag = CSR_GET_UINT32_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT32;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
- sig->u.MlmeDelPeriodicConfirm.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicConfirm.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicConfirm.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicConfirm.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicConfirm.VirtualInterfaceIdentifier = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicConfirm.PeriodicId = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeDelPeriodicConfirm.ResultCode = (CSR_RESULT_CODE) CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_REQUEST_ID:
- sig->u.MlmeStopMeasureRequest.Dummydataref1.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopMeasureRequest.Dummydataref1.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopMeasureRequest.Dummydataref2.SlotNumber = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopMeasureRequest.Dummydataref2.DataLength = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- sig->u.MlmeStopMeasureRequest.DialogToken = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-
- default:
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- return CSR_RESULT_SUCCESS;
-} /* read_unpack_signal() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * write_pack
- *
- * Convert a signal structure, in host-native format, to the
- * little-endian wire format specified in the UniFi Host Interface
- * Protocol Specification.
- *
- * WARNING: This function is auto-generated, DO NOT EDIT!
- *
- * Arguments:
- * sig Pointer to signal structure to pack.
- * ptr Destination buffer to pack into.
- * sig_len Returns the length of the packed signal, i.e. the
- * number of bytes written to ptr.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success,
- * CSR_WIFI_HIP_RESULT_INVALID_VALUE if the ID of signal was not recognised.
- * ---------------------------------------------------------------------------
- */
-CsrResult write_pack(const CSR_SIGNAL *sig, u8 *ptr, u16 *sig_len)
-{
- s16 index = 0;
-
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->SignalPrimitiveHeader.SignalId, ptr + index);
- index += SIZEOF_UINT16;
-
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->SignalPrimitiveHeader.ReceiverProcessId, ptr + index);
- index += SIZEOF_UINT16;
-
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->SignalPrimitiveHeader.SenderProcessId, ptr + index);
- index += SIZEOF_UINT16;
-
- switch (sig->SignalPrimitiveHeader.SignalId)
- {
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanConfirm.AutonomousScanId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.BlackoutId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutRequest.BlackoutId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[0], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[1], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[2], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[3], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[4], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[5], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[6], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceConfirm.SequenceNumber[7], ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeStopAggregationConfirm.PeerQstaAddress.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.Direction, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecRequest.Direction, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_DEBUG_WORD16_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[0], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[1], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[2], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[3], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[4], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[5], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[6], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[7], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[8], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[9], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[10], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[11], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[12], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[13], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[14], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugWord16Indication.DebugWords[15], ptr + index);
- index += SIZEOF_UINT16;
- break;
- case CSR_DEBUG_GENERIC_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugVariable.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugVariable.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[0], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[1], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[2], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[3], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[4], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[5], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[6], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericConfirm.DebugWords[7], ptr + index);
- index += SIZEOF_UINT16;
- break;
- case CSR_MA_PACKET_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Data.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Data.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MaPacketIndication.LocalTime.x, 64 / 8);
- index += 64 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Ifindex, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Channel, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.ReceptionStatus, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Rssi, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.Snr, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketIndication.ReceivedRate, ptr + index);
- index += SIZEOF_UINT16;
- break;
- case CSR_MLME_SET_TIM_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.AssociationId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimRequest.TimValue, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECTED_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectedIndication.ConnectionStatus, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeConnectedIndication.PeerMacAddress.x, 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerRequest.TriggerId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.MibAttributeValue.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.MibAttributeValue.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.Status, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.ErrorIndex, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeTriggeredGetIndication.TriggeredId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.ChannelList.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.ChannelList.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.InformationElements.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.InformationElements.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.Ifindex, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.ScanType, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.ProbeDelay, ptr + index);
- index += SIZEOF_UINT32;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.MinChannelTime, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanRequest.MaxChannelTime, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextRequest.MibAttribute.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextRequest.MibAttribute.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeStartAggregationRequest.PeerQstaAddress.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.Direction, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.StartingSequenceNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.BufferSize, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationRequest.BlockAckTimeout, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeHlSyncRequest.GroupAddress.x, 48 / 8);
- index += 48 / 8;
- break;
-#endif
- case CSR_DEBUG_GENERIC_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugVariable.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugVariable.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[0], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[1], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[2], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[3], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[4], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[5], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[6], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericRequest.DebugWords[7], ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetRequest.TriggeredId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.Data.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.Data.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressRequest.NumberOfMulticastGroupAddresses, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeResetRequest.StaAddress.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetRequest.SetDefaultMib, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanCancelRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetConfirm.TriggeredId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.InformationElements.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.InformationElements.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.PacketFilterMode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeSetPacketFilterRequest.ArpFilterAddress, ptr + index);
- index += SIZEOF_UINT32;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.TriggerId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelRxTriggerConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.InformationElements.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.InformationElements.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.ConnectionStatus, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeConnectStatusRequest.StaAddress.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.AssociationId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusRequest.AssociationCapabilityInformation, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeLeaveRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.QueueIndex, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Aifs, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Cwmin, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.Cwmax, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConfigQueueRequest.TxopLimit, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTspecConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MLME_SET_TIM_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetTimConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.MeasurementReportSet.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.MeasurementReportSet.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureIndication.DialogToken, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.BlackoutId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelBlackoutConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelTriggeredGetConfirm.TriggeredId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_DEBUG_GENERIC_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugVariable.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugVariable.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[0], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[1], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[2], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[3], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[4], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[5], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[6], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugGenericIndication.DebugWords[7], ptr + index);
- index += SIZEOF_UINT16;
- break;
- case CSR_MA_PACKET_CANCEL_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MaPacketCancelRequest.HostTag, ptr + index);
- index += SIZEOF_UINT32;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanConfirm.AutonomousScanId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_PACKET_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Data.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Data.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.TransmitRate, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.HostTag, ptr + index);
- index += SIZEOF_UINT32;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.Priority, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MaPacketRequest.Ra.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketRequest.TransmissionControl, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.Data.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.Data.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.BeaconPeriod, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.DtimPeriod, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.CapabilityInformation, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeModifyBssParameterRequest.Bssid.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeModifyBssParameterRequest.RtsThreshold, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.InformationElements.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.InformationElements.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.TriggerId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerRequest.Priority, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityIndication.Multicast, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeHlSyncCancelRequest.GroupAddress.x, 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanRequest.AutonomousScanId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlackoutEndedIndication.BlackoutId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanDoneIndication.AutonomousScanId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.KeyId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetKeySequenceRequest.KeyType, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeGetKeySequenceRequest.Address.x, 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Ifindex, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.Channel, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeSetChannelRequest.Address.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.AvailabilityDuration, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetChannelRequest.AvailabilityInterval, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureConfirm.DialogToken, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.MibAttribute.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.MibAttribute.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTriggeredGetRequest.TriggeredId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAutonomousScanLossIndication.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeAutonomousScanLossIndication.Bssid.x, 48 / 8);
- index += 48 / 8;
- break;
-#endif
- case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaVifAvailabilityResponse.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.Data1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.Data1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.Data2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.Data2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.FrameType, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateRequest.MinTransmitRate, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.PeriodicId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.MibAttributeValue.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.MibAttributeValue.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.Status, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetConfirm.ErrorIndex, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.MibAttributeValue.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.MibAttributeValue.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.Status, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetNextConfirm.ErrorIndex, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeStopAggregationRequest.PeerQstaAddress.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopAggregationRequest.Direction, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.TriggerId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddRxTriggerConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutType, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutSource, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutStartReference, ptr + index);
- index += SIZEOF_UINT32;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutPeriod, ptr + index);
- index += SIZEOF_UINT32;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutDuration, ptr + index);
- index += SIZEOF_UINT32;
- memcpy(ptr + index, sig->u.MlmeAddBlackoutRequest.PeerStaAddress.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddBlackoutRequest.BlackoutCount, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.KeyId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDeletekeysRequest.KeyType, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeDeletekeysRequest.Address.x, 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeResetConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeHlSyncConfirm.GroupAddress.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ChannelList.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ChannelList.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.InformationElements.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.InformationElements.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.AutonomousScanId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.Ifindex, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ChannelStartingFactor, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ScanType, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.ProbeDelay, ptr + index);
- index += SIZEOF_UINT32;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.MinChannelTime, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddAutonomousScanRequest.MaxChannelTime, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetRequest.MibAttributeValue.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetRequest.MibAttributeValue.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.Beacon.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.Beacon.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.BssParameters.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.BssParameters.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.Ifindex, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.Channel, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeSmStartRequest.InterfaceAddress.x, 48 / 8);
- index += 48 / 8;
- memcpy(ptr + index, sig->u.MlmeSmStartRequest.Bssid.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.BeaconPeriod, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.DtimPeriod, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSmStartRequest.CapabilityInformation, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeConnectStatusConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelAutonomousScanConfirm.AutonomousScanId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicRequest.PeriodicId, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Key.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Key.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.Length, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.KeyId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.KeyType, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeSetkeysRequest.Address.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[0], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[1], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[2], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[3], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[4], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[5], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[6], ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetkeysRequest.SequenceNumber[7], ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, &sig->u.MlmeSetkeysRequest.CipherSuiteSelector, 32 / 8);
- index += 32 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.AutonomousScanId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePauseAutonomousScanRequest.Pause, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetRequest.MibAttribute.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetRequest.MibAttribute.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeGetRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.PowerManagementMode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.ReceiveDtims, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.ListenInterval, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmePowermgtRequest.TrafficWindow, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_PACKET_ERROR_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MaPacketErrorIndication.PeerQstaAddress.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketErrorIndication.SequenceNumber, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.PeriodicId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.MaximumLatency, ptr + index);
- index += SIZEOF_UINT32;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.PeriodicSchedulingMode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.WakeHost, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddPeriodicRequest.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.Direction, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.PsScheme, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.MediumTime, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.ServiceStartTime, ptr + index);
- index += SIZEOF_UINT32;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.ServiceInterval, ptr + index);
- index += SIZEOF_UINT32;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecRequest.MinimumDataRate, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddMulticastAddressConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTspecConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeHlSyncCancelConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeScanConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_DEBUG_STRING_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugStringIndication.DebugMessage.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugStringIndication.DebugMessage.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugStringIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.DebugStringIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.FrameType, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeAddTemplateConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeBlockackErrorIndication.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeBlockackErrorIndication.PeerQstaAddress.x, 48 / 8);
- index += 48 / 8;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.MibAttributeValue.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.MibAttributeValue.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.Status, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeSetConfirm.ErrorIndex, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.MeasurementRequestSet.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.MeasurementRequestSet.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeMeasureRequest.DialogToken, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- memcpy(ptr + index, sig->u.MlmeStartAggregationConfirm.PeerQstaAddress.x, 48 / 8);
- index += 48 / 8;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.UserPriority, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.Direction, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStartAggregationConfirm.SequenceNumber, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureConfirm.DialogToken, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
- case CSR_MA_PACKET_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.TransmissionStatus, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.RetryCount, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.Rate, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT32_TO_LITTLE_ENDIAN(sig->u.MaPacketConfirm.HostTag, ptr + index);
- index += SIZEOF_UINT32;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.VirtualInterfaceIdentifier, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.PeriodicId, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeDelPeriodicConfirm.ResultCode, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_REQUEST_ID:
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.Dummydataref1.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.Dummydataref1.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.Dummydataref2.SlotNumber, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.Dummydataref2.DataLength, ptr + index);
- index += SIZEOF_UINT16;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(sig->u.MlmeStopMeasureRequest.DialogToken, ptr + index);
- index += SIZEOF_UINT16;
- break;
-#endif
-
- default:
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- *sig_len = index;
-
- return CSR_RESULT_SUCCESS;
-} /* write_pack() */
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_send.c b/drivers/staging/csr/csr_wifi_hip_send.c
deleted file mode 100644
index 76429e5..0000000
--- a/drivers/staging/csr/csr_wifi_hip_send.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ***************************************************************************
- *
- * FILE: csr_wifi_hip_send.c
- *
- * PURPOSE:
- * Code for adding a signal request to the from-host queue.
- * When the driver bottom-half is run, it will take requests from the
- * queue and pass them to the UniFi.
- *
- * ***************************************************************************
- */
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-#include "csr_wifi_hip_sigs.h"
-#include "csr_wifi_hip_card.h"
-
-unifi_TrafficQueue unifi_frame_priority_to_queue(CSR_PRIORITY priority)
-{
- switch (priority)
- {
- case CSR_QOS_UP0:
- case CSR_QOS_UP3:
- return UNIFI_TRAFFIC_Q_BE;
- case CSR_QOS_UP1:
- case CSR_QOS_UP2:
- return UNIFI_TRAFFIC_Q_BK;
- case CSR_QOS_UP4:
- case CSR_QOS_UP5:
- return UNIFI_TRAFFIC_Q_VI;
- case CSR_QOS_UP6:
- case CSR_QOS_UP7:
- case CSR_MANAGEMENT:
- return UNIFI_TRAFFIC_Q_VO;
- default:
- return UNIFI_TRAFFIC_Q_BE;
- }
-}
-
-
-CSR_PRIORITY unifi_get_default_downgrade_priority(unifi_TrafficQueue queue)
-{
- switch (queue)
- {
- case UNIFI_TRAFFIC_Q_BE:
- return CSR_QOS_UP0;
- case UNIFI_TRAFFIC_Q_BK:
- return CSR_QOS_UP1;
- case UNIFI_TRAFFIC_Q_VI:
- return CSR_QOS_UP5;
- case UNIFI_TRAFFIC_Q_VO:
- return CSR_QOS_UP6;
- default:
- return CSR_QOS_UP0;
- }
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * send_signal
- *
- * This function queues a signal for sending to UniFi. It first checks
- * that there is space on the fh_signal_queue for another entry, then
- * claims any bulk data slots required and copies data into them. Then
- * increments the fh_signal_queue write count.
- *
- * The fh_signal_queue is later processed by the driver bottom half
- * (in unifi_bh()).
- *
- * This function call unifi_pause_xmit() to pause the flow of data plane
- * packets when:
- * - the fh_signal_queue ring buffer is full
- * - there are less than UNIFI_MAX_DATA_REFERENCES (2) bulk data
- * slots available.
- *
- * Arguments:
- * card Pointer to card context structure
- * sigptr Pointer to the signal to write to UniFi.
- * siglen Number of bytes pointer to by sigptr.
- * bulkdata Array of pointers to an associated bulk data.
- * sigq To which from-host queue to add the signal.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success
- * CSR_WIFI_HIP_RESULT_NO_SPACE if there were insufficient data slots or
- * no free signal queue entry
- *
- * Notes:
- * Calls unifi_pause_xmit() when the last slots are used.
- * ---------------------------------------------------------------------------
- */
-static CsrResult send_signal(card_t *card, const u8 *sigptr, u32 siglen,
- const bulk_data_param_t *bulkdata,
- q_t *sigq, u32 priority_q, u32 run_bh)
-{
- u16 i, data_slot_size;
- card_signal_t *csptr;
- s16 qe;
- CsrResult r;
- s16 debug_print = 0;
-
- data_slot_size = CardGetDataSlotSize(card);
-
- /* Check that the fh_data_queue has a free slot */
- if (!CSR_WIFI_HIP_Q_SLOTS_FREE(sigq))
- {
- unifi_trace(card->ospriv, UDBG3, "send_signal: %s full\n", sigq->name);
-
- return CSR_WIFI_HIP_RESULT_NO_SPACE;
- }
-
- /*
- * Now add the signal to the From Host signal queue
- */
- /* Get next slot on queue */
- qe = CSR_WIFI_HIP_Q_NEXT_W_SLOT(sigq);
- csptr = CSR_WIFI_HIP_Q_SLOT_DATA(sigq, qe);
-
- /* Make up the card_signal struct */
- csptr->signal_length = (u16)siglen;
- memcpy((void *)csptr->sigbuf, (void *)sigptr, siglen);
-
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; ++i)
- {
- if ((bulkdata != NULL) && (bulkdata->d[i].data_length != 0))
- {
- u32 datalen = bulkdata->d[i].data_length;
-
- /* Make sure data will fit in a bulk data slot */
- if (bulkdata->d[i].os_data_ptr == NULL)
- {
- unifi_error(card->ospriv, "send_signal - NULL bulkdata[%d]\n", i);
- debug_print++;
- csptr->bulkdata[i].data_length = 0;
- }
- else
- {
- if (datalen > data_slot_size)
- {
- unifi_error(card->ospriv,
- "send_signal - Invalid data length %u (@%p), "
- "truncating\n",
- datalen, bulkdata->d[i].os_data_ptr);
- datalen = data_slot_size;
- debug_print++;
- }
- /* Store the bulk data info in the soft queue. */
- csptr->bulkdata[i].os_data_ptr = (u8 *)bulkdata->d[i].os_data_ptr;
- csptr->bulkdata[i].os_net_buf_ptr = (u8 *)bulkdata->d[i].os_net_buf_ptr;
- csptr->bulkdata[i].net_buf_length = bulkdata->d[i].net_buf_length;
- csptr->bulkdata[i].data_length = datalen;
- }
- }
- else
- {
- UNIFI_INIT_BULK_DATA(&csptr->bulkdata[i]);
- }
- }
-
- if (debug_print)
- {
- const u8 *sig = sigptr;
-
- unifi_error(card->ospriv, "Signal(%d): %*ph\n", siglen,
- 16, sig);
- unifi_error(card->ospriv, "Bulkdata pointer %p(%d), %p(%d)\n",
- bulkdata != NULL?bulkdata->d[0].os_data_ptr : NULL,
- bulkdata != NULL?bulkdata->d[0].data_length : 0,
- bulkdata != NULL?bulkdata->d[1].os_data_ptr : NULL,
- bulkdata != NULL?bulkdata->d[1].data_length : 0);
- }
-
- /* Advance the written count to say there is a new entry */
- CSR_WIFI_HIP_Q_INC_W(sigq);
-
- /*
- * Set the flag to say reason for waking was a host request.
- * Then ask the OS layer to run the unifi_bh.
- */
- if (run_bh == 1)
- {
- card->bh_reason_host = 1;
- r = unifi_run_bh(card->ospriv);
- if (r != CSR_RESULT_SUCCESS)
- {
- unifi_error(card->ospriv, "failed to run bh.\n");
- card->bh_reason_host = 0;
-
- /*
- * The bulk data buffer will be freed by the caller.
- * We need to invalidate the description of the bulk data in our
- * soft queue, to prevent the core freeing the bulk data again later.
- */
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; ++i)
- {
- if (csptr->bulkdata[i].data_length != 0)
- {
- csptr->bulkdata[i].os_data_ptr = csptr->bulkdata[i].os_net_buf_ptr = NULL;
- csptr->bulkdata[i].net_buf_length = csptr->bulkdata[i].data_length = 0;
- }
- }
- return r;
- }
- }
- else
- {
- unifi_error(card->ospriv, "run_bh=%d, bh not called.\n", run_bh);
- }
-
- /*
- * Have we used up all the fh signal list entries?
- */
- if (CSR_WIFI_HIP_Q_SLOTS_FREE(sigq) == 0)
- {
- /* We have filled the queue, so stop the upper layer. The command queue
- * is an exception, as suspending due to that being full could delay
- * resume/retry until new commands or data are received.
- */
- if (sigq != &card->fh_command_queue)
- {
- /*
- * Must call unifi_pause_xmit() *before* setting the paused flag.
- * (the unifi_pause_xmit call should not be after setting the flag because of the possibility of being interrupted
- * by the bh thread between our setting the flag and the call to unifi_pause_xmit()
- * If bh thread then cleared the flag, we would end up paused, but without the flag set)
- * Instead, setting it afterwards means that if this thread is interrupted by the bh thread
- * the pause flag is still guaranteed to end up set
- * However the potential deadlock now is that if bh thread emptied the queue and cleared the flag before this thread's
- * call to unifi_pause_xmit(), then bh thread may not run again because it will be waiting for
- * a packet to appear in the queue but nothing ever will because xmit is paused.
- * So we will end up with the queue paused, and the flag set to say it is paused, but bh never runs to unpause it.
- * (Note even this bad situation would not persist long in practice, because something else (eg rx, or tx in different queue)
- * is likely to wake bh thread quite soon)
- * But to avoid this deadlock completely, after setting the flag we check that there is something left in the queue.
- * If there is, we know that bh thread has not emptied the queue yet.
- * Since bh thread checks to unpause the queue *after* taking packets from the queue, we know that it is still going to make at
- * least one more check to see whether it needs to unpause the queue. So all is well.
- * If there are no packets in the queue, then the deadlock described above might happen. To make sure it does not, we
- * unpause the queue here. A possible side effect is that unifi_restart_xmit() may (rarely) be called for second time
- * unnecessarily, which is harmless
- */
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- unifi_debug_log_to_buf("P");
-#endif
- unifi_pause_xmit(card->ospriv, (unifi_TrafficQueue)priority_q);
- card_tx_q_pause(card, priority_q);
- if (CSR_WIFI_HIP_Q_SLOTS_USED(sigq) == 0)
- {
- card_tx_q_unpause(card, priority_q);
- unifi_restart_xmit(card->ospriv, (unifi_TrafficQueue) priority_q);
- }
- }
- else
- {
- unifi_warning(card->ospriv,
- "send_signal: fh_cmd_q full, not pausing (run_bh=%d)\n",
- run_bh);
- }
- }
-
- return CSR_RESULT_SUCCESS;
-} /* send_signal() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_send_signal
- *
- * Invokes send_signal() to queue a signal in the command or traffic queue
- * If sigptr pointer is NULL, it pokes the bh to check if UniFi is responsive.
- *
- * Arguments:
- * card Pointer to card context struct
- * sigptr Pointer to signal from card.
- * siglen Size of the signal
- * bulkdata Pointer to the bulk data of the signal
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success
- * CSR_WIFI_HIP_RESULT_NO_SPACE if there were insufficient data slots or no free signal queue entry
- *
- * Notes:
- * unifi_send_signal() is used to queue signals, created by the driver,
- * to the device. Signals are constructed using the UniFi packed structures.
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_send_signal(card_t *card, const u8 *sigptr, u32 siglen,
- const bulk_data_param_t *bulkdata)
-{
- q_t *sig_soft_q;
- u16 signal_id;
- CsrResult r;
- u32 run_bh;
- u32 priority_q;
-
- /* A NULL signal pointer is a request to check if UniFi is responsive */
- if (sigptr == NULL)
- {
- card->bh_reason_host = 1;
- return unifi_run_bh(card->ospriv);
- }
-
- priority_q = 0;
- run_bh = 1;
- signal_id = GET_SIGNAL_ID(sigptr);
- /*
- * If the signal is a CSR_MA_PACKET_REQUEST ,
- * we send it using the traffic soft queue. Else we use the command soft queue.
- */
- if (signal_id == CSR_MA_PACKET_REQUEST_ID)
- {
- u16 frame_priority;
-
- if (card->periodic_wake_mode == UNIFI_PERIODIC_WAKE_HOST_ENABLED)
- {
- run_bh = 0;
- }
-
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE) && defined (CSR_WIFI_HIP_DATA_PLANE_PROFILE)
- unifi_debug_log_to_buf("D");
-#endif
- /* Sanity check: MA-PACKET.req must have a valid bulk data */
- if ((bulkdata->d[0].data_length == 0) || (bulkdata->d[0].os_data_ptr == NULL))
- {
- unifi_error(card->ospriv, "MA-PACKET.req with empty bulk data (%d bytes in %p)\n",
- bulkdata->d[0].data_length, bulkdata->d[0].os_data_ptr);
- dump((void *)sigptr, siglen);
- return CSR_RESULT_FAILURE;
- }
-
- /* Map the frame priority to a traffic queue index. */
- frame_priority = GET_PACKED_MA_PACKET_REQUEST_FRAME_PRIORITY(sigptr);
- priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY)frame_priority);
-
- sig_soft_q = &card->fh_traffic_queue[priority_q];
- }
- else
- {
- sig_soft_q = &card->fh_command_queue;
- }
-
- r = send_signal(card, sigptr, siglen, bulkdata, sig_soft_q, priority_q, run_bh);
- /* On error, the caller must free or requeue bulkdata buffers */
-
- return r;
-} /* unifi_send_signal() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_send_resources_available
- *
- * Examines whether there is available space to queue
- * a signal in the command or traffic queue
- *
- * Arguments:
- * card Pointer to card context struct
- * sigptr Pointer to signal.
- *
- * Returns:
- * CSR_RESULT_SUCCESS if resources available
- * CSR_WIFI_HIP_RESULT_NO_SPACE if there was no free signal queue entry
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_send_resources_available(card_t *card, const u8 *sigptr)
-{
- q_t *sig_soft_q;
- u16 signal_id = GET_SIGNAL_ID(sigptr);
-
- /*
- * If the signal is a CSR_MA_PACKET_REQUEST ,
- * we send it using the traffic soft queue. Else we use the command soft queue.
- */
- if (signal_id == CSR_MA_PACKET_REQUEST_ID)
- {
- u16 frame_priority;
- u32 priority_q;
-
- /* Map the frame priority to a traffic queue index. */
- frame_priority = GET_PACKED_MA_PACKET_REQUEST_FRAME_PRIORITY(sigptr);
- priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY)frame_priority);
-
- sig_soft_q = &card->fh_traffic_queue[priority_q];
- }
- else
- {
- sig_soft_q = &card->fh_command_queue;
- }
-
- /* Check that the fh_data_queue has a free slot */
- if (!CSR_WIFI_HIP_Q_SLOTS_FREE(sig_soft_q))
- {
- unifi_notice(card->ospriv, "unifi_send_resources_available: %s full\n",
- sig_soft_q->name);
- return CSR_WIFI_HIP_RESULT_NO_SPACE;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_send_resources_available() */
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_signals.c b/drivers/staging/csr/csr_wifi_hip_signals.c
deleted file mode 100644
index 3c82132..0000000
--- a/drivers/staging/csr/csr_wifi_hip_signals.c
+++ /dev/null
@@ -1,1313 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-
-/* Generated by hip_dd_l_c_gen.pl */
-
-#include "csr_wifi_hip_signals.h"
-
-#include "csr_wifi_hip_unifi.h"
-
-s32 SigGetSize(const CSR_SIGNAL *aSignal)
-{
- switch (aSignal->SignalPrimitiveHeader.SignalId)
- {
- case CSR_MA_PACKET_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_REQUEST);
- case CSR_MA_PACKET_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_CONFIRM);
- case CSR_MA_PACKET_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_INDICATION);
- case CSR_MA_PACKET_CANCEL_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_CANCEL_REQUEST);
- case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_VIF_AVAILABILITY_RESPONSE);
- case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_VIF_AVAILABILITY_INDICATION);
- case CSR_MA_PACKET_ERROR_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MA_PACKET_ERROR_INDICATION);
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_RESET_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_RESET_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_NEXT_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_NEXT_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_POWERMGT_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_POWERMGT_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SCAN_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SCAN_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_HL_SYNC_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_HL_SYNC_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MEASURE_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MEASURE_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MEASURE_INDICATION);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SETKEYS_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SETKEYS_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DELETEKEYS_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DELETEKEYS_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECTED_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONNECTED_INDICATION);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SCAN_CANCEL_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_HL_SYNC_CANCEL_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_HL_SYNC_CANCEL_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_PERIODIC_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_PERIODIC_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_PERIODIC_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_PERIODIC_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_PACKET_FILTER_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_PACKET_FILTER_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_STOP_MEASURE_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_STOP_MEASURE_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TRIGGERED_GET_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TRIGGERED_GET_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_TRIGGERED_GET_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_TRIGGERED_GET_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_TRIGGERED_GET_INDICATION);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_BLACKOUT_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_BLACKOUT_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_BLACKOUT_ENDED_INDICATION);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_BLACKOUT_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_BLACKOUT_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_RX_TRIGGER_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_RX_TRIGGER_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_RX_TRIGGER_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_RX_TRIGGER_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONNECT_STATUS_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONNECT_STATUS_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TEMPLATE_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TEMPLATE_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONFIG_QUEUE_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_CONFIG_QUEUE_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TSPEC_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_TSPEC_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_TSPEC_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_DEL_TSPEC_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_START_AGGREGATION_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_START_AGGREGATION_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_BLOCKACK_ERROR_INDICATION);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_STOP_AGGREGATION_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_STOP_AGGREGATION_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SM_START_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SM_START_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_LEAVE_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_LEAVE_CONFIRM);
-#endif
- case CSR_MLME_SET_TIM_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_TIM_REQUEST);
- case CSR_MLME_SET_TIM_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_TIM_CONFIRM);
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_KEY_SEQUENCE_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_GET_KEY_SEQUENCE_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_CHANNEL_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_SET_CHANNEL_CONFIRM);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST);
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM);
-#endif
- case CSR_DEBUG_STRING_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_STRING_INDICATION);
- case CSR_DEBUG_WORD16_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_WORD16_INDICATION);
- case CSR_DEBUG_GENERIC_REQUEST_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_GENERIC_REQUEST);
- case CSR_DEBUG_GENERIC_CONFIRM_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_GENERIC_CONFIRM);
- case CSR_DEBUG_GENERIC_INDICATION_ID:
- return offsetof(struct CSR_SIGNAL_PRIMITIVE, u) + sizeof(CSR_DEBUG_GENERIC_INDICATION);
- default:
- return 0;
- }
-}
-
-
-s32 SigGetDataRefs(CSR_SIGNAL *aSignal, CSR_DATAREF **aDataRef)
-{
- s32 numRefs = 0;
-
- switch (aSignal->SignalPrimitiveHeader.SignalId)
- {
- case CSR_MA_PACKET_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MaPacketRequest.Data;
- aDataRef[numRefs++] = &aSignal->u.MaPacketRequest.Dummydataref2;
- break;
- case CSR_MA_PACKET_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MaPacketConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MaPacketConfirm.Dummydataref2;
- break;
- case CSR_MA_PACKET_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MaPacketIndication.Data;
- aDataRef[numRefs++] = &aSignal->u.MaPacketIndication.Dummydataref2;
- break;
- case CSR_MA_PACKET_CANCEL_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MaPacketCancelRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MaPacketCancelRequest.Dummydataref2;
- break;
- case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
- aDataRef[numRefs++] = &aSignal->u.MaVifAvailabilityResponse.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MaVifAvailabilityResponse.Dummydataref2;
- break;
- case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MaVifAvailabilityIndication.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MaVifAvailabilityIndication.Dummydataref2;
- break;
- case CSR_MA_PACKET_ERROR_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MaPacketErrorIndication.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MaPacketErrorIndication.Dummydataref2;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeResetRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeResetRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeResetConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeResetConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeGetRequest.MibAttribute;
- aDataRef[numRefs++] = &aSignal->u.MlmeGetRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeGetConfirm.MibAttributeValue;
- aDataRef[numRefs++] = &aSignal->u.MlmeGetConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetRequest.MibAttributeValue;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetConfirm.MibAttributeValue;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeGetNextRequest.MibAttribute;
- aDataRef[numRefs++] = &aSignal->u.MlmeGetNextRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeGetNextConfirm.MibAttributeValue;
- aDataRef[numRefs++] = &aSignal->u.MlmeGetNextConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmePowermgtRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmePowermgtRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmePowermgtConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmePowermgtConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeScanRequest.ChannelList;
- aDataRef[numRefs++] = &aSignal->u.MlmeScanRequest.InformationElements;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeScanConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeScanConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeMeasureRequest.MeasurementRequestSet;
- aDataRef[numRefs++] = &aSignal->u.MlmeMeasureRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeMeasureConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeMeasureConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeMeasureIndication.MeasurementReportSet;
- aDataRef[numRefs++] = &aSignal->u.MlmeMeasureIndication.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetkeysRequest.Key;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetkeysRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetkeysConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetkeysConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDeletekeysRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDeletekeysRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDeletekeysConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDeletekeysConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAutonomousScanLossIndication.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAutonomousScanLossIndication.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECTED_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeConnectedIndication.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeConnectedIndication.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeScanCancelRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeScanCancelRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncCancelRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncCancelRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncCancelConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeHlSyncCancelConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddPeriodicRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddPeriodicRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddPeriodicConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddPeriodicConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelPeriodicRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelPeriodicRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelPeriodicConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelPeriodicConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddAutonomousScanRequest.ChannelList;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddAutonomousScanRequest.InformationElements;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddAutonomousScanConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddAutonomousScanConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelAutonomousScanRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelAutonomousScanRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelAutonomousScanConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelAutonomousScanConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetPacketFilterRequest.InformationElements;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetPacketFilterRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetPacketFilterConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetPacketFilterConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeStopMeasureRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeStopMeasureRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeStopMeasureConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeStopMeasureConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmePauseAutonomousScanRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmePauseAutonomousScanRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmePauseAutonomousScanConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmePauseAutonomousScanConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAutonomousScanDoneIndication.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAutonomousScanDoneIndication.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTriggeredGetRequest.MibAttribute;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTriggeredGetRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTriggeredGetConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTriggeredGetConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelTriggeredGetRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelTriggeredGetRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelTriggeredGetConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelTriggeredGetConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeTriggeredGetIndication.MibAttributeValue;
- aDataRef[numRefs++] = &aSignal->u.MlmeTriggeredGetIndication.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddBlackoutRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddBlackoutRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddBlackoutConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddBlackoutConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeBlackoutEndedIndication.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeBlackoutEndedIndication.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelBlackoutRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelBlackoutRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelBlackoutConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelBlackoutConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddRxTriggerRequest.InformationElements;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddRxTriggerRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddRxTriggerConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddRxTriggerConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelRxTriggerRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelRxTriggerRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelRxTriggerConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelRxTriggerConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeConnectStatusRequest.InformationElements;
- aDataRef[numRefs++] = &aSignal->u.MlmeConnectStatusRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeConnectStatusConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeConnectStatusConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeModifyBssParameterRequest.Data;
- aDataRef[numRefs++] = &aSignal->u.MlmeModifyBssParameterRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeModifyBssParameterConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeModifyBssParameterConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTemplateRequest.Data1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTemplateRequest.Data2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTemplateConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTemplateConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeConfigQueueRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeConfigQueueRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeConfigQueueConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeConfigQueueConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTspecRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTspecRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTspecConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddTspecConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelTspecRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelTspecRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeDelTspecConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeDelTspecConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeStartAggregationRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeStartAggregationRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeStartAggregationConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeStartAggregationConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeBlockackErrorIndication.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeBlockackErrorIndication.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeStopAggregationRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeStopAggregationRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeStopAggregationConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeStopAggregationConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSmStartRequest.Beacon;
- aDataRef[numRefs++] = &aSignal->u.MlmeSmStartRequest.BssParameters;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSmStartConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeSmStartConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeLeaveRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeLeaveRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeLeaveConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeLeaveConfirm.Dummydataref2;
- break;
-#endif
- case CSR_MLME_SET_TIM_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetTimRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetTimRequest.Dummydataref2;
- break;
- case CSR_MLME_SET_TIM_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetTimConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetTimConfirm.Dummydataref2;
- break;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeGetKeySequenceRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeGetKeySequenceRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeGetKeySequenceConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeGetKeySequenceConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetChannelRequest.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetChannelRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeSetChannelConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeSetChannelConfirm.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddMulticastAddressRequest.Data;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddMulticastAddressRequest.Dummydataref2;
- break;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.MlmeAddMulticastAddressConfirm.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.MlmeAddMulticastAddressConfirm.Dummydataref2;
- break;
-#endif
- case CSR_DEBUG_STRING_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.DebugStringIndication.DebugMessage;
- aDataRef[numRefs++] = &aSignal->u.DebugStringIndication.Dummydataref2;
- break;
- case CSR_DEBUG_WORD16_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.DebugWord16Indication.Dummydataref1;
- aDataRef[numRefs++] = &aSignal->u.DebugWord16Indication.Dummydataref2;
- break;
- case CSR_DEBUG_GENERIC_REQUEST_ID:
- aDataRef[numRefs++] = &aSignal->u.DebugGenericRequest.DebugVariable;
- aDataRef[numRefs++] = &aSignal->u.DebugGenericRequest.Dummydataref2;
- break;
- case CSR_DEBUG_GENERIC_CONFIRM_ID:
- aDataRef[numRefs++] = &aSignal->u.DebugGenericConfirm.DebugVariable;
- aDataRef[numRefs++] = &aSignal->u.DebugGenericConfirm.Dummydataref2;
- break;
- case CSR_DEBUG_GENERIC_INDICATION_ID:
- aDataRef[numRefs++] = &aSignal->u.DebugGenericIndication.DebugVariable;
- aDataRef[numRefs++] = &aSignal->u.DebugGenericIndication.Dummydataref2;
- break;
- default:
- return 0;
- }
- return numRefs;
-}
-
-
-u32 SigGetFilterPos(u16 aSigID)
-{
- switch (aSigID)
- {
- case CSR_MA_PACKET_REQUEST_ID:
- return 0x00000001;
- case CSR_MA_PACKET_CONFIRM_ID:
- return 0x00000002;
- case CSR_MA_PACKET_INDICATION_ID:
- return 0x00000004;
- case CSR_MA_PACKET_CANCEL_REQUEST_ID:
- return 0x00000008;
- case CSR_MA_VIF_AVAILABILITY_RESPONSE_ID:
- return 0x00000010;
- case CSR_MA_VIF_AVAILABILITY_INDICATION_ID:
- return 0x00000020;
- case CSR_MA_PACKET_ERROR_INDICATION_ID:
- return 0x00000040;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_REQUEST_ID:
- return 0x00000080;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_RESET_CONFIRM_ID:
- return 0x00000100;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_REQUEST_ID:
- return 0x00000200;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_CONFIRM_ID:
- return 0x00000400;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_REQUEST_ID:
- return 0x00000800;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CONFIRM_ID:
- return 0x00001000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_REQUEST_ID:
- return 0x00002000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_NEXT_CONFIRM_ID:
- return 0x00004000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_REQUEST_ID:
- return 0x00008000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_POWERMGT_CONFIRM_ID:
- return 0x00010001;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_REQUEST_ID:
- return 0x00010002;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CONFIRM_ID:
- return 0x00010004;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_REQUEST_ID:
- return 0x00010008;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CONFIRM_ID:
- return 0x00010010;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_REQUEST_ID:
- return 0x00010020;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_CONFIRM_ID:
- return 0x00010040;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MEASURE_INDICATION_ID:
- return 0x00010080;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_REQUEST_ID:
- return 0x00010100;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SETKEYS_CONFIRM_ID:
- return 0x00010200;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_REQUEST_ID:
- return 0x00010400;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DELETEKEYS_CONFIRM_ID:
- return 0x00010800;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID:
- return 0x00011000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECTED_INDICATION_ID:
- return 0x00012000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SCAN_CANCEL_REQUEST_ID:
- return 0x00014000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID:
- return 0x00018000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
- return 0x00020001;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_REQUEST_ID:
- return 0x00020002;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
- return 0x00020004;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_REQUEST_ID:
- return 0x00020008;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
- return 0x00020010;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID:
- return 0x00020020;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
- return 0x00020040;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID:
- return 0x00020080;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
- return 0x00020100;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_REQUEST_ID:
- return 0x00020200;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
- return 0x00020400;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_REQUEST_ID:
- return 0x00020800;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
- return 0x00021000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID:
- return 0x00022000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
- return 0x00024000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID:
- return 0x00028000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID:
- return 0x00030001;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
- return 0x00030002;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID:
- return 0x00030004;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
- return 0x00030008;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_TRIGGERED_GET_INDICATION_ID:
- return 0x00030010;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_REQUEST_ID:
- return 0x00030020;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
- return 0x00030040;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLACKOUT_ENDED_INDICATION_ID:
- return 0x00030080;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_REQUEST_ID:
- return 0x00030100;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
- return 0x00030200;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID:
- return 0x00030400;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
- return 0x00030800;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID:
- return 0x00031000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
- return 0x00032000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_REQUEST_ID:
- return 0x00034000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
- return 0x00038000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID:
- return 0x00040001;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
- return 0x00040002;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_REQUEST_ID:
- return 0x00040004;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
- return 0x00040008;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_REQUEST_ID:
- return 0x00040010;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
- return 0x00040020;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_REQUEST_ID:
- return 0x00040040;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
- return 0x00040080;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_REQUEST_ID:
- return 0x00040100;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
- return 0x00040200;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_REQUEST_ID:
- return 0x00040400;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
- return 0x00040800;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_BLOCKACK_ERROR_INDICATION_ID:
- return 0x00041000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_REQUEST_ID:
- return 0x00042000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
- return 0x00044000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_REQUEST_ID:
- return 0x00048000;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SM_START_CONFIRM_ID:
- return 0x00050001;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_REQUEST_ID:
- return 0x00050002;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_LEAVE_CONFIRM_ID:
- return 0x00050004;
-#endif
- case CSR_MLME_SET_TIM_REQUEST_ID:
- return 0x00050008;
- case CSR_MLME_SET_TIM_CONFIRM_ID:
- return 0x00050010;
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID:
- return 0x00050020;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
- return 0x00050040;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_REQUEST_ID:
- return 0x00050080;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
- return 0x00050100;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID:
- return 0x00050200;
-#endif
-#ifdef CSR_WIFI_HIP_FULL_SIGNAL_SET
- case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
- return 0x00050400;
-#endif
- case CSR_DEBUG_STRING_INDICATION_ID:
- return 0x00050800;
- case CSR_DEBUG_WORD16_INDICATION_ID:
- return 0x00051000;
- case CSR_DEBUG_GENERIC_REQUEST_ID:
- return 0x00052000;
- case CSR_DEBUG_GENERIC_CONFIRM_ID:
- return 0x00054000;
- case CSR_DEBUG_GENERIC_INDICATION_ID:
- return 0x00058000;
- default:
- break;
- }
- return 0xffffffff;
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_signals.h b/drivers/staging/csr/csr_wifi_hip_signals.h
deleted file mode 100644
index ca4d077..0000000
--- a/drivers/staging/csr/csr_wifi_hip_signals.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- *****************************************************************************
- *
- * FILE: csr_wifi_hip_signals.h
- *
- * PURPOSE:
- * Header file wrapping the auto-generated code in csr_wifi_hip_sigs.h
- * and csr_wifi_hip_signals.c -
- * csr_wifi_hip_sigs.h provides structures defining UniFi signals and
- * csr_wifi_hip_signals.c provides SigGetSize() and SigGetDataRefs().
- *
- *****************************************************************************
- */
-#ifndef __CSR_WIFI_HIP_SIGNALS_H__
-#define __CSR_WIFI_HIP_SIGNALS_H__
-
-#include <linux/types.h>
-#include "csr_wifi_hip_sigs.h"
-
-
-/****************************************************************************/
-/* INFORMATION ELEMENTS */
-/****************************************************************************/
-
-/* Information Element ID's - shouldn't be in here, but nowhere better yet */
-#define IE_SSID_ID 0
-#define IE_SUPPORTED_RATES_ID 1
-#define IE_FH_PARAM_SET_ID 2
-#define IE_DS_PARAM_SET_ID 3
-#define IE_CF_PARAM_SET_ID 4
-#define IE_TIM_ID 5
-#define IE_IBSS_PARAM_SET_ID 6
-#define IE_COUNTRY_ID 7
-#define IE_HOPPING_PATTERN_PARAMS_ID 8
-#define IE_HOPPING_PATTERN_TABLE_ID 9
-#define IE_REQUEST_ID 10
-#define IE_QBSS_LOAD_ID 11
-#define IE_EDCA_PARAM_SET_ID 12
-#define IE_TRAFFIC_SPEC_ID 13
-#define IE_TRAFFIC_CLASS_ID 14
-#define IE_SCHEDULE_ID 15
-#define IE_CHALLENGE_TEXT_ID 16
-#define IE_POWER_CONSTRAINT_ID 32
-#define IE_POWER_CAPABILITY_ID 33
-#define IE_TPC_REQUEST_ID 34
-#define IE_TPC_REPORT_ID 35
-#define IE_SUPPORTED_CHANNELS_ID 36
-#define IE_CHANNEL_SWITCH_ANNOUNCE_ID 37
-#define IE_MEASUREMENT_REQUEST_ID 38
-#define IE_MEASUREMENT_REPORT_ID 39
-#define IE_QUIET_ID 40
-#define IE_IBSS_DFS_ID 41
-#define IE_ERP_INFO_ID 42
-#define IE_TS_DELAY_ID 43
-#define IE_TCLAS_PROCESSING_ID 44
-#define IE_QOS_CAPABILITY_ID 46
-#define IE_RSN_ID 48
-#define IE_EXTENDED_SUPPORTED_RATES_ID 50
-#define IE_AP_CHANNEL_REPORT_ID 52
-#define IE_RCPI_ID 53
-#define IE_WPA_ID 221
-
-
-/* The maximum number of data references in a signal structure */
-#define UNIFI_MAX_DATA_REFERENCES 2
-
-/* The space to allow for a wire-format signal structure */
-#define UNIFI_PACKED_SIGBUF_SIZE 64
-
-
-/******************************************************************************/
-/* SIGNAL PARAMETER VALUES */
-/******************************************************************************/
-
-/* ifIndex */
-#define UNIFI_IF_2G4 1
-#define UNIFI_IF_5G 2
-
-/* SendProcessId */
-#define HOST_PROC_ID 0xc000
-
-#define SIG_CAP_ESS 0x0001
-#define SIG_CAP_IBSS 0x0002
-#define SIG_CAP_CF_POLLABLE 0x0004
-#define SIG_CAP_CF_POLL_REQUEST 0x0008
-#define SIG_CAP_PRIVACY 0x0010
-#define SIG_CAP_SHORT_PREAMBLE 0x0020
-#define SIG_CAP_DSSSOFDM 0x2000
-
-/******************************************************************************/
-/* FUNCTION DECLARATIONS */
-/******************************************************************************/
-
-/******************************************************************************
- * SigGetNumDataRefs - Retrieve pointers to data-refs from a signal.
- *
- * PARAMETERS:
- * aSignal - Pointer to signal to retrieve the data refs of.
- * aDataRef - Address of a pointer to the structure that the data refs
- * pointers will be stored.
- *
- * RETURNS:
- * The number of data-refs in the signal.
- */
-s32 SigGetDataRefs(CSR_SIGNAL *aSignal, CSR_DATAREF **aDataRef);
-
-/******************************************************************************
- * SigGetSize - Retrieve the size (in bytes) of a given signal.
- *
- * PARAMETERS:
- * aSignal - Pointer to signal to retrieve size of.
- *
- * RETURNS:
- * The size (in bytes) of the given signal.
- */
-s32 SigGetSize(const CSR_SIGNAL *aSignal);
-
-#endif /* __CSR_WIFI_HIP_SIGNALS_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_sigs.h b/drivers/staging/csr/csr_wifi_hip_sigs.h
deleted file mode 100644
index 6112cc3..0000000
--- a/drivers/staging/csr/csr_wifi_hip_sigs.h
+++ /dev/null
@@ -1,1417 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-
-/* Generated by hip_dd_l_h_gen.pl */
-
-#ifndef CSR_WIFI_HIP_SIGS_H
-#define CSR_WIFI_HIP_SIGS_H
-
-typedef s16 csr_place_holding_type;
-
-typedef u16 CSR_ASSOCIATION_ID;
-
-typedef u16 CSR_AUTONOMOUS_SCAN_ID;
-
-typedef u16 CSR_BEACON_PERIODS;
-
-typedef u16 CSR_BLACKOUT_ID;
-
-typedef enum CSR_BLACKOUT_SOURCE
-{
- CSR_DOT11_LOCAL = 0x0000,
- CSR_DOT11_REMOTE = 0x0001,
- CSR_OTHER_RADIO = 0x0002,
- CSR_NOT_LINKED = 0x0004
-} CSR_BLACKOUT_SOURCE;
-
-typedef enum CSR_BLACKOUT_TYPE
-{
- CSR_LOCAL_DEVICE_ONLY = 0x0001,
- CSR_SPECIFIED_PEER = 0x0002,
- CSR_CURRENT_CHANNEL = 0x0004,
- CSR_P2P = 0x0008
-} CSR_BLACKOUT_TYPE;
-
-typedef enum CSR_BOOT_LOADER_OPERATION
-{
- CSR_BOOT_LOADER_IDLE = 0x00,
- CSR_BOOT_LOADER_RESTART = 0x01,
- CSR_BOOT_LOADER_PATCH = 0x02,
- CSR_BOOT_LOADER_IMAGE_0 = 0x10,
- CSR_BOOT_LOADER_IMAGE_1 = 0x11,
- CSR_BOOT_LOADER_IMAGE_2 = 0x12,
- CSR_BOOT_LOADER_IMAGE_3 = 0x13
-} CSR_BOOT_LOADER_OPERATION;
-
-typedef u16 CSR_CAPABILITY_INFORMATION;
-
-typedef u16 CSR_CHANNEL_STARTING_FACTOR;
-
-typedef u32 CSR_CIPHER_SUITE_SELECTOR;
-
-typedef u32 CSR_CLIENT_TAG;
-
-typedef enum CSR_CONNECTION_STATUS
-{
- CSR_DISCONNECTED = 0x0000,
- CSR_CONNECTED_AWAKE = 0x0001
-} CSR_CONNECTION_STATUS;
-
-typedef s16 CSR_DECIBELS;
-
-typedef enum CSR_DIRECTION
-{
- CSR_TRANSMIT = 0x0000,
- CSR_RECEIVE = 0x0001,
- CSR_BIDIRECTIONAL = 0x0003
-} CSR_DIRECTION;
-
-typedef enum CSR_FRAME_TYPE
-{
- CSR_RESERVED = 0x0000,
- CSR_BEACON = 0x0001,
- CSR_PROBE_RESPONSE = 0x0002,
- CSR_BEACON_AND_PROBE_RESPONSE = 0x0003,
- CSR_PROBE_REQUEST = 0x0004
-} CSR_FRAME_TYPE;
-
-typedef u32 CSR_IPV4_ADDRESS;
-
-typedef enum CSR_IFINTERFACE
-{
- CSR_INDEX_2G4 = 0x0001,
- CSR_INDEX_5G = 0x0002
-} CSR_IFINTERFACE;
-
-typedef enum CSR_KEY_TYPE
-{
- CSR_GROUP = 0x0000,
- CSR_PAIRWISE = 0x0001,
- CSR_PEER_KEY = 0x0002,
- CSR_IGTK = 0x0003
-} CSR_KEY_TYPE;
-
-typedef enum CSR_LOADER_OPERATION
-{
- CSR_LOADER_IDLE = 0x0000,
- CSR_LOADER_COPY = 0x0001
-} CSR_LOADER_OPERATION;
-
-typedef struct CSR_MAC_ADDRESS
-{
- u8 x[6];
-} CSR_MACADDRESS;
-
-typedef enum CSR_MIB_STATUS
-{
- CSR_MIB_SUCCESSFUL = 0x0000,
- CSR_MIB_INVALID_PARAMETERS = 0x0001,
- CSR_MIB_WRITE_ONLY = 0x0002,
- CSR_MIB_READ_ONLY = 0x0003
-} CSR_MIB_STATUS;
-
-typedef enum CSR_MEMORY_SPACE
-{
- CSR_NONE = 0x00,
- CSR_SHARED_DATA_MEMORY = 0x01,
- CSR_EXTERNAL_FLASH_MEMORY = 0x02,
- CSR_EXTERNAL_SRAM = 0x03,
- CSR_REGISTERS = 0x04,
- CSR_PHY_PROCESSOR_DATA_MEMORY = 0x10,
- CSR_PHY_PROCESSOR_PROGRAM_MEMORY = 0x11,
- CSR_PHY_PROCESSOR_ROM = 0x12,
- CSR_MAC_PROCESSOR_DATA_MEMORY = 0x20,
- CSR_MAC_PROCESSOR_PROGRAM_MEMORY = 0x21,
- CSR_MAC_PROCESSOR_ROM = 0x22,
- CSR_BT_PROCESSOR_DATA_MEMORY = 0x30,
- CSR_BT_PROCESSOR_PROGRAM_MEMORY = 0x31,
- CSR_BT_PROCESSOR_ROM = 0x32
-} CSR_MEMORY_SPACE;
-
-typedef u16 CSR_MICROSECONDS16;
-
-typedef u32 CSR_MICROSECONDS32;
-
-typedef u16 CSR_NATURAL16;
-
-typedef enum CSR_PS_SCHEME
-{
- CSR_LEGACY_PS = 0x0001,
- CSR_U_APSD = 0x0002,
- CSR_S_APSD = 0x0004
-} CSR_PS_SCHEME;
-
-typedef enum CSR_PACKET_FILTER_MODE
-{
- CSR_PFM_OPT_OUT = 0x0000,
- CSR_PFM_OPT_IN = 0x0003
-} CSR_PACKET_FILTER_MODE;
-
-typedef u16 CSR_PERIODIC_ID;
-
-typedef enum CSR_PERIODIC_SCHEDULING_MODE
-{
- CSR_PSM_PERIODIC_SCHEDULE_PS_POLL = 0x0001,
- CSR_PSM_PERIODIC_SCHEDULE_PM_BIT = 0x0002,
- CSR_PSM_PERIODIC_SCHEDULE_UAPSD = 0x0004,
- CSR_PSM_PERIODIC_SCHEDULE_SAPSD = 0x0008
-} CSR_PERIODIC_SCHEDULING_MODE;
-
-typedef enum CSR_POWER_MANAGEMENT_MODE
-{
- CSR_PMM_ACTIVE_MODE = 0x0000,
- CSR_PMM_POWER_SAVE = 0x0001,
- CSR_PMM_FAST_POWER_SAVE = 0x0002
-} CSR_POWER_MANAGEMENT_MODE;
-
-typedef enum CSR_PRIORITY
-{
- CSR_QOS_UP0 = 0x0000,
- CSR_QOS_UP1 = 0x0001,
- CSR_QOS_UP2 = 0x0002,
- CSR_QOS_UP3 = 0x0003,
- CSR_QOS_UP4 = 0x0004,
- CSR_QOS_UP5 = 0x0005,
- CSR_QOS_UP6 = 0x0006,
- CSR_QOS_UP7 = 0x0007,
- CSR_CONTENTION = 0x8000,
- CSR_MANAGEMENT = 0x8010
-} CSR_PRIORITY;
-
-typedef enum CSR_REASON_CODE
-{
- CSR_UNSPECIFIED_REASON = 0x0001,
- CSR_INVALID_INFORMATION_ELEMENT = 0x000d,
- CSR_QOS_UNSPECIFIED_REASON = 0x0020,
- CSR_QOS_EXCESSIVE_NOT_ACK = 0x0022,
- CSR_QOS_TXOP_LIMIT_EXCEEDED = 0x0023,
- CSR_QSTA_LEAVING = 0x0024,
- CSR_UNKNOWN_BA = 0x0026,
- CSR_UNKNOWN_TS = 0x0026,
- CSR_TIMEOUT = 0x0027
-} CSR_REASON_CODE;
-
-typedef enum CSR_RECEPTION_STATUS
-{
- CSR_RX_SUCCESS = 0x0000,
- CSR_RX_FAILURE_UNSPECIFIED = 0x0001,
- CSR_MICHAEL_MIC_ERROR = 0x0002,
- CSR_DECRYPTION_ERROR = 0x0003,
- CSR_NO_TEMPORAL_KEY_AVAILABLE = 0x0004,
- CSR_UNSUPPORTED_MODULATION = 0x0011,
- CSR_BAD_FCS = 0x0012,
- CSR_BAD_SIGNAL = 0x0013
-} CSR_RECEPTION_STATUS;
-
-typedef enum CSR_RESULT_CODE
-{
- CSR_RC_SUCCESS = 0x0000,
- CSR_RC_UNSPECIFIED_FAILURE = 0x0001,
- CSR_RC_REFUSED = 0x0003,
- CSR_RC_INVALID_PARAMETERS = 0x0026,
- CSR_RC_REJECTED_INVALID_IE = 0x0028,
- CSR_RC_REJECTED_INVALID_GROUP_CIPHER = 0x0029,
- CSR_RC_REJECTED_INVALID_PAIRWISE_CIPHER = 0x002a,
- CSR_RC_TIMEOUT = 0x8000,
- CSR_RC_TOO_MANY_SIMULTANEOUS_REQUESTS = 0x8001,
- CSR_RC_BSS_ALREADY_STARTED_OR_JOINED = 0x8002,
- CSR_RC_NOT_SUPPORTED = 0x8003,
- CSR_RC_TRANSMISSION_FAILURE = 0x8004,
- CSR_RC_RESET_REQUIRED_BEFORE_START = 0x8006,
- CSR_RC_INSUFFICIENT_RESOURCE = 0x8007,
- CSR_RC_NO_BUFFERED_BROADCAST_MULTICAST_FRAMES = 0x8008,
- CSR_RC_INVALID_UNICAST_CIPHER = 0xf02f,
- CSR_RC_INVALID_MULTICAST_CIPHER = 0xf030
-} CSR_RESULT_CODE;
-
-typedef enum CSR_SCAN_TYPE
-{
- CSR_SC_ACTIVE_SCAN = 0x0000,
- CSR_SC_PASSIVE_SCAN = 0x0001
-} CSR_SCAN_TYPE;
-
-typedef enum CSR_SIGNAL_ID
-{
- CSR_MA_PACKET_REQUEST_ID = 0x0110,
- CSR_MA_PACKET_CONFIRM_ID = 0x0111,
- CSR_MA_PACKET_INDICATION_ID = 0x0113,
- CSR_MA_PACKET_CANCEL_REQUEST_ID = 0x0114,
- CSR_MA_VIF_AVAILABILITY_RESPONSE_ID = 0x0116,
- CSR_MA_VIF_AVAILABILITY_INDICATION_ID = 0x0117,
- CSR_MA_PACKET_ERROR_INDICATION_ID = 0x011b,
- CSR_MLME_RESET_REQUEST_ID = 0x0200,
- CSR_MLME_RESET_CONFIRM_ID = 0x0201,
- CSR_MLME_GET_REQUEST_ID = 0x0204,
- CSR_MLME_GET_CONFIRM_ID = 0x0205,
- CSR_MLME_SET_REQUEST_ID = 0x0208,
- CSR_MLME_SET_CONFIRM_ID = 0x0209,
- CSR_MLME_GET_NEXT_REQUEST_ID = 0x020c,
- CSR_MLME_GET_NEXT_CONFIRM_ID = 0x020d,
- CSR_MLME_POWERMGT_REQUEST_ID = 0x0210,
- CSR_MLME_POWERMGT_CONFIRM_ID = 0x0211,
- CSR_MLME_SCAN_REQUEST_ID = 0x0214,
- CSR_MLME_SCAN_CONFIRM_ID = 0x0215,
- CSR_MLME_HL_SYNC_REQUEST_ID = 0x0244,
- CSR_MLME_HL_SYNC_CONFIRM_ID = 0x0245,
- CSR_MLME_MEASURE_REQUEST_ID = 0x0258,
- CSR_MLME_MEASURE_CONFIRM_ID = 0x0259,
- CSR_MLME_MEASURE_INDICATION_ID = 0x025b,
- CSR_MLME_SETKEYS_REQUEST_ID = 0x0268,
- CSR_MLME_SETKEYS_CONFIRM_ID = 0x0269,
- CSR_MLME_DELETEKEYS_REQUEST_ID = 0x026c,
- CSR_MLME_DELETEKEYS_CONFIRM_ID = 0x026d,
- CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION_ID = 0x0287,
- CSR_MLME_CONNECTED_INDICATION_ID = 0x028b,
- CSR_MLME_SCAN_CANCEL_REQUEST_ID = 0x028c,
- CSR_MLME_HL_SYNC_CANCEL_REQUEST_ID = 0x0298,
- CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID = 0x0299,
- CSR_MLME_ADD_PERIODIC_REQUEST_ID = 0x02a0,
- CSR_MLME_ADD_PERIODIC_CONFIRM_ID = 0x02a1,
- CSR_MLME_DEL_PERIODIC_REQUEST_ID = 0x02a4,
- CSR_MLME_DEL_PERIODIC_CONFIRM_ID = 0x02a5,
- CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST_ID = 0x02a8,
- CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID = 0x02a9,
- CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST_ID = 0x02ac,
- CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID = 0x02ad,
- CSR_MLME_SET_PACKET_FILTER_REQUEST_ID = 0x02b8,
- CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID = 0x02b9,
- CSR_MLME_STOP_MEASURE_REQUEST_ID = 0x02bc,
- CSR_MLME_STOP_MEASURE_CONFIRM_ID = 0x02bd,
- CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST_ID = 0x02cc,
- CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID = 0x02cd,
- CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION_ID = 0x02db,
- CSR_MLME_ADD_TRIGGERED_GET_REQUEST_ID = 0x02dc,
- CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID = 0x02dd,
- CSR_MLME_DEL_TRIGGERED_GET_REQUEST_ID = 0x02e0,
- CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID = 0x02e1,
- CSR_MLME_TRIGGERED_GET_INDICATION_ID = 0x02e7,
- CSR_MLME_ADD_BLACKOUT_REQUEST_ID = 0x02f8,
- CSR_MLME_ADD_BLACKOUT_CONFIRM_ID = 0x02f9,
- CSR_MLME_BLACKOUT_ENDED_INDICATION_ID = 0x02fb,
- CSR_MLME_DEL_BLACKOUT_REQUEST_ID = 0x02fc,
- CSR_MLME_DEL_BLACKOUT_CONFIRM_ID = 0x02fd,
- CSR_MLME_ADD_RX_TRIGGER_REQUEST_ID = 0x0304,
- CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID = 0x0305,
- CSR_MLME_DEL_RX_TRIGGER_REQUEST_ID = 0x0308,
- CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID = 0x0309,
- CSR_MLME_CONNECT_STATUS_REQUEST_ID = 0x0310,
- CSR_MLME_CONNECT_STATUS_CONFIRM_ID = 0x0311,
- CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST_ID = 0x0314,
- CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID = 0x0315,
- CSR_MLME_ADD_TEMPLATE_REQUEST_ID = 0x0318,
- CSR_MLME_ADD_TEMPLATE_CONFIRM_ID = 0x0319,
- CSR_MLME_CONFIG_QUEUE_REQUEST_ID = 0x031c,
- CSR_MLME_CONFIG_QUEUE_CONFIRM_ID = 0x031d,
- CSR_MLME_ADD_TSPEC_REQUEST_ID = 0x0320,
- CSR_MLME_ADD_TSPEC_CONFIRM_ID = 0x0321,
- CSR_MLME_DEL_TSPEC_REQUEST_ID = 0x0324,
- CSR_MLME_DEL_TSPEC_CONFIRM_ID = 0x0325,
- CSR_MLME_START_AGGREGATION_REQUEST_ID = 0x0328,
- CSR_MLME_START_AGGREGATION_CONFIRM_ID = 0x0329,
- CSR_MLME_BLOCKACK_ERROR_INDICATION_ID = 0x032b,
- CSR_MLME_STOP_AGGREGATION_REQUEST_ID = 0x032c,
- CSR_MLME_STOP_AGGREGATION_CONFIRM_ID = 0x032d,
- CSR_MLME_SM_START_REQUEST_ID = 0x0334,
- CSR_MLME_SM_START_CONFIRM_ID = 0x0335,
- CSR_MLME_LEAVE_REQUEST_ID = 0x0338,
- CSR_MLME_LEAVE_CONFIRM_ID = 0x0339,
- CSR_MLME_SET_TIM_REQUEST_ID = 0x033c,
- CSR_MLME_SET_TIM_CONFIRM_ID = 0x033d,
- CSR_MLME_GET_KEY_SEQUENCE_REQUEST_ID = 0x0340,
- CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID = 0x0341,
- CSR_MLME_SET_CHANNEL_REQUEST_ID = 0x034c,
- CSR_MLME_SET_CHANNEL_CONFIRM_ID = 0x034d,
- CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST_ID = 0x040c,
- CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID = 0x040d,
- CSR_DEBUG_STRING_INDICATION_ID = 0x0803,
- CSR_DEBUG_WORD16_INDICATION_ID = 0x0807,
- CSR_DEBUG_GENERIC_REQUEST_ID = 0x0808,
- CSR_DEBUG_GENERIC_CONFIRM_ID = 0x0809,
- CSR_DEBUG_GENERIC_INDICATION_ID = 0x080b
-} CSR_SIGNAL_ID;
-
-typedef u16 CSR_SIMPLE_POINTER;
-
-typedef u16 CSR_STARTING_SEQUENCE_NUMBER;
-
-typedef enum CSR_SYMBOL_ID
-{
- CSR_SLT_END = 0x0000,
- CSR_SLT_PCI_SLOT_CONFIG = 0x0001,
- CSR_SLT_SDIO_SLOT_CONFIG = 0x0002,
- CSR_SLT_BUILD_ID_NUMBER = 0x0003,
- CSR_SLT_BUILD_ID_STRING = 0x0004,
- CSR_SLT_PERSISTENT_STORE_DB = 0x0005,
- CSR_SLT_RESET_VECTOR_PHY = 0x0006,
- CSR_SLT_RESET_VECTOR_MAC = 0x0007,
- CSR_SLT_SDIO_LOADER_CONTROL = 0x0008,
- CSR_SLT_TEST_CMD = 0x0009,
- CSR_SLT_TEST_ALIVE_COUNTER = 0x000a,
- CSR_SLT_TEST_PARAMETERS = 0x000b,
- CSR_SLT_TEST_RESULTS = 0x000c,
- CSR_SLT_TEST_VERSION = 0x000d,
- CSR_SLT_MIB_PSID_RANGES = 0x000e,
- CSR_SLT_KIP_TABLE = 0x000f,
- CSR_SLT_PANIC_DATA_PHY = 0x0010,
- CSR_SLT_PANIC_DATA_MAC = 0x0011,
- CSR_SLT_BOOT_LOADER_CONTROL = 0x0012,
- CSR_SLT_SOFT_MAC = 0x0013
-} CSR_SYMBOL_ID;
-
-typedef struct CSR_TSF_TIME
-{
- u8 x[8];
-} CSR_TSF_TIME;
-
-typedef u16 CSR_TIME_UNITS;
-
-typedef enum CSR_TRANSMISSION_CONTROL
-{
- CSR_TRIGGERED = 0x0001,
- CSR_END_OF_SERVICE = 0x0002,
- CSR_NO_CONFIRM_REQUIRED = 0x0004,
- CSR_ALLOW_BA = 0x0008
-} CSR_TRANSMISSION_CONTROL;
-
-typedef enum CSR_TRANSMISSION_STATUS
-{
- CSR_TX_SUCCESSFUL = 0x0000,
- CSR_TX_RETRY_LIMIT = 0x0001,
- CSR_TX_LIFETIME = 0x0002,
- CSR_TX_NO_BSS = 0x0003,
- CSR_TX_EXCESSIVE_DATA_LENGTH = 0x0004,
- CSR_TX_UNSUPPORTED_PRIORITY = 0x0006,
- CSR_TX_UNAVAILABLE_PRIORITY = 0x0007,
- CSR_TX_UNAVAILABLE_KEY_MAPPING = 0x000a,
- CSR_TX_EDCA_TIMEOUT = 0x000b,
- CSR_TX_BLOCK_ACK_TIMEOUT = 0x000c,
- CSR_TX_FAIL_TRANSMISSION_VIF_INTERRUPTED = 0x000d,
- CSR_TX_REJECTED_PEER_STATION_SLEEPING = 0x000e,
- CSR_TX_REJECTED_DTIM_ENDED = 0x000f,
- CSR_TX_REJECTED_DTIM_STARTED = 0x0010
-} CSR_TRANSMISSION_STATUS;
-
-typedef u16 CSR_TRIGGER_ID;
-
-typedef u16 CSR_TRIGGERED_ID;
-
-typedef enum CSR_HIP_VERSIONS
-{
- CSR_HIP_ENG_VERSION = 0x0001,
- CSR_HIP_VERSION = 0x0900
-} CSR_HIP_VERSIONS;
-
-typedef u16 CSR_BUFFER_HANDLE;
-
-typedef u16 CSR_CHANNEL_NUMBER;
-
-typedef struct CSR_DATA_REFERENCE
-{
- u16 SlotNumber;
- u16 DataLength;
-} CSR_DATAREF;
-
-typedef u16 CSR_DIALOG_TOKEN;
-
-typedef struct CSR_GENERIC_POINTER
-{
- u32 MemoryOffset;
- CSR_MEMORY_SPACE MemorySpace;
-} CSR_GENERIC_POINTER;
-
-typedef struct CSR_MLME_CONFIG_QUEUE_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_CONFIG_QUEUE_CONFIRM;
-
-typedef struct CSR_MLME_CONFIG_QUEUE_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_NATURAL16 QueueIndex;
- CSR_NATURAL16 Aifs;
- CSR_NATURAL16 Cwmin;
- CSR_NATURAL16 Cwmax;
- CSR_NATURAL16 TxopLimit;
-} CSR_MLME_CONFIG_QUEUE_REQUEST;
-
-typedef struct CSR_MLME_GET_CONFIRM
-{
- CSR_DATAREF MibAttributeValue;
- CSR_DATAREF Dummydataref2;
- CSR_MIB_STATUS Status;
- CSR_NATURAL16 ErrorIndex;
-} CSR_MLME_GET_CONFIRM;
-
-typedef struct CSR_MLME_GET_REQUEST
-{
- CSR_DATAREF MibAttribute;
- CSR_DATAREF Dummydataref2;
-} CSR_MLME_GET_REQUEST;
-
-typedef struct CSR_MLME_GET_NEXT_CONFIRM
-{
- CSR_DATAREF MibAttributeValue;
- CSR_DATAREF Dummydataref2;
- CSR_MIB_STATUS Status;
- CSR_NATURAL16 ErrorIndex;
-} CSR_MLME_GET_NEXT_CONFIRM;
-
-typedef struct CSR_MLME_GET_NEXT_REQUEST
-{
- CSR_DATAREF MibAttribute;
- CSR_DATAREF Dummydataref2;
-} CSR_MLME_GET_NEXT_REQUEST;
-
-typedef struct CSR_MLME_HL_SYNC_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_MACADDRESS GroupAddress;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_HL_SYNC_CONFIRM;
-
-typedef struct CSR_MLME_HL_SYNC_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_MACADDRESS GroupAddress;
-} CSR_MLME_HL_SYNC_REQUEST;
-
-typedef struct CSR_MLME_HL_SYNC_CANCEL_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_HL_SYNC_CANCEL_CONFIRM;
-
-typedef struct CSR_MLME_HL_SYNC_CANCEL_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_MACADDRESS GroupAddress;
-} CSR_MLME_HL_SYNC_CANCEL_REQUEST;
-
-typedef struct CSR_MLME_MEASURE_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_RESULT_CODE ResultCode;
- CSR_DIALOG_TOKEN DialogToken;
-} CSR_MLME_MEASURE_CONFIRM;
-
-typedef struct CSR_MLME_MEASURE_INDICATION
-{
- CSR_DATAREF MeasurementReportSet;
- CSR_DATAREF Dummydataref2;
- CSR_DIALOG_TOKEN DialogToken;
-} CSR_MLME_MEASURE_INDICATION;
-
-typedef struct CSR_MLME_MEASURE_REQUEST
-{
- CSR_DATAREF MeasurementRequestSet;
- CSR_DATAREF Dummydataref2;
- CSR_DIALOG_TOKEN DialogToken;
-} CSR_MLME_MEASURE_REQUEST;
-
-typedef struct CSR_MLME_RESET_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_RESET_CONFIRM;
-
-typedef struct CSR_MLME_RESET_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_MACADDRESS StaAddress;
- s16 SetDefaultMib;
-} CSR_MLME_RESET_REQUEST;
-
-typedef struct CSR_MLME_SET_CONFIRM
-{
- CSR_DATAREF MibAttributeValue;
- CSR_DATAREF Dummydataref2;
- CSR_MIB_STATUS Status;
- CSR_NATURAL16 ErrorIndex;
-} CSR_MLME_SET_CONFIRM;
-
-typedef struct CSR_MLME_SET_REQUEST
-{
- CSR_DATAREF MibAttributeValue;
- CSR_DATAREF Dummydataref2;
-} CSR_MLME_SET_REQUEST;
-
-typedef struct CSR_MLME_STOP_MEASURE_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_RESULT_CODE ResultCode;
- CSR_DIALOG_TOKEN DialogToken;
-} CSR_MLME_STOP_MEASURE_CONFIRM;
-
-typedef struct CSR_MLME_STOP_MEASURE_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_DIALOG_TOKEN DialogToken;
-} CSR_MLME_STOP_MEASURE_REQUEST;
-
-typedef u16 CSR_PROCESS_ID;
-
-typedef u16 CSR_RATE;
-
-typedef u16 CSR_SEQUENCE_NUMBER;
-
-typedef struct CSR_SIGNAL_PRIMITIVE_HEADER
-{
- s16 SignalId;
- CSR_PROCESS_ID ReceiverProcessId;
- CSR_PROCESS_ID SenderProcessId;
-} CSR_SIGNAL_PRIMITIVE_HEADER;
-
-typedef u16 CSR_TRAFFIC_WINDOW;
-
-typedef u16 CSR_VIF_IDENTIFIER;
-
-typedef struct CSR_DEBUG_GENERIC_CONFIRM
-{
- CSR_DATAREF DebugVariable;
- CSR_DATAREF Dummydataref2;
- CSR_NATURAL16 DebugWords[8];
-} CSR_DEBUG_GENERIC_CONFIRM;
-
-typedef struct CSR_DEBUG_GENERIC_INDICATION
-{
- CSR_DATAREF DebugVariable;
- CSR_DATAREF Dummydataref2;
- CSR_NATURAL16 DebugWords[8];
-} CSR_DEBUG_GENERIC_INDICATION;
-
-typedef struct CSR_DEBUG_GENERIC_REQUEST
-{
- CSR_DATAREF DebugVariable;
- CSR_DATAREF Dummydataref2;
- CSR_NATURAL16 DebugWords[8];
-} CSR_DEBUG_GENERIC_REQUEST;
-
-typedef struct CSR_DEBUG_STRING_INDICATION
-{
- CSR_DATAREF DebugMessage;
- CSR_DATAREF Dummydataref2;
-} CSR_DEBUG_STRING_INDICATION;
-
-typedef struct CSR_DEBUG_WORD16_INDICATION
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_NATURAL16 DebugWords[16];
-} CSR_DEBUG_WORD16_INDICATION;
-
-typedef struct CSR_MA_PACKET_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_TRANSMISSION_STATUS TransmissionStatus;
- CSR_NATURAL16 RetryCount;
- CSR_RATE Rate;
- CSR_CLIENT_TAG HostTag;
-} CSR_MA_PACKET_CONFIRM;
-
-typedef struct CSR_MA_PACKET_INDICATION
-{
- CSR_DATAREF Data;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_TSF_TIME LocalTime;
- CSR_IFINTERFACE Ifindex;
- CSR_CHANNEL_NUMBER Channel;
- CSR_RECEPTION_STATUS ReceptionStatus;
- CSR_DECIBELS Rssi;
- CSR_DECIBELS Snr;
- CSR_RATE ReceivedRate;
-} CSR_MA_PACKET_INDICATION;
-
-typedef struct CSR_MA_PACKET_REQUEST
-{
- CSR_DATAREF Data;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RATE TransmitRate;
- CSR_CLIENT_TAG HostTag;
- CSR_PRIORITY Priority;
- CSR_MACADDRESS Ra;
- CSR_TRANSMISSION_CONTROL TransmissionControl;
-} CSR_MA_PACKET_REQUEST;
-
-typedef struct CSR_MA_PACKET_CANCEL_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_CLIENT_TAG HostTag;
-} CSR_MA_PACKET_CANCEL_REQUEST;
-
-typedef struct CSR_MA_PACKET_ERROR_INDICATION
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_MACADDRESS PeerQstaAddress;
- CSR_PRIORITY UserPriority;
- CSR_SEQUENCE_NUMBER SequenceNumber;
-} CSR_MA_PACKET_ERROR_INDICATION;
-
-typedef struct CSR_MA_VIF_AVAILABILITY_INDICATION
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- s16 Multicast;
-} CSR_MA_VIF_AVAILABILITY_INDICATION;
-
-typedef struct CSR_MA_VIF_AVAILABILITY_RESPONSE
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MA_VIF_AVAILABILITY_RESPONSE;
-
-typedef struct CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
- CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
-} CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM;
-
-typedef struct CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST
-{
- CSR_DATAREF ChannelList;
- CSR_DATAREF InformationElements;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
- CSR_IFINTERFACE Ifindex;
- CSR_CHANNEL_STARTING_FACTOR ChannelStartingFactor;
- CSR_SCAN_TYPE ScanType;
- CSR_MICROSECONDS32 ProbeDelay;
- CSR_TIME_UNITS MinChannelTime;
- CSR_TIME_UNITS MaxChannelTime;
-} CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST;
-
-typedef struct CSR_MLME_ADD_BLACKOUT_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_BLACKOUT_ID BlackoutId;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_ADD_BLACKOUT_CONFIRM;
-
-typedef struct CSR_MLME_ADD_BLACKOUT_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_BLACKOUT_ID BlackoutId;
- CSR_BLACKOUT_TYPE BlackoutType;
- CSR_BLACKOUT_SOURCE BlackoutSource;
- CSR_MICROSECONDS32 BlackoutStartReference;
- CSR_MICROSECONDS32 BlackoutPeriod;
- CSR_MICROSECONDS32 BlackoutDuration;
- CSR_MACADDRESS PeerStaAddress;
- CSR_NATURAL16 BlackoutCount;
-} CSR_MLME_ADD_BLACKOUT_REQUEST;
-
-typedef struct CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM;
-
-typedef struct CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST
-{
- CSR_DATAREF Data;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_NATURAL16 NumberOfMulticastGroupAddresses;
-} CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST;
-
-typedef struct CSR_MLME_ADD_PERIODIC_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_PERIODIC_ID PeriodicId;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_ADD_PERIODIC_CONFIRM;
-
-typedef struct CSR_MLME_ADD_PERIODIC_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_PERIODIC_ID PeriodicId;
- CSR_MICROSECONDS32 MaximumLatency;
- CSR_PERIODIC_SCHEDULING_MODE PeriodicSchedulingMode;
- s16 WakeHost;
- CSR_PRIORITY UserPriority;
-} CSR_MLME_ADD_PERIODIC_REQUEST;
-
-typedef struct CSR_MLME_ADD_RX_TRIGGER_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_TRIGGER_ID TriggerId;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_ADD_RX_TRIGGER_CONFIRM;
-
-typedef struct CSR_MLME_ADD_RX_TRIGGER_REQUEST
-{
- CSR_DATAREF InformationElements;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_TRIGGER_ID TriggerId;
- CSR_PRIORITY Priority;
-} CSR_MLME_ADD_RX_TRIGGER_REQUEST;
-
-typedef struct CSR_MLME_ADD_TEMPLATE_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_FRAME_TYPE FrameType;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_ADD_TEMPLATE_CONFIRM;
-
-typedef struct CSR_MLME_ADD_TEMPLATE_REQUEST
-{
- CSR_DATAREF Data1;
- CSR_DATAREF Data2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_FRAME_TYPE FrameType;
- CSR_RATE MinTransmitRate;
-} CSR_MLME_ADD_TEMPLATE_REQUEST;
-
-typedef struct CSR_MLME_ADD_TRIGGERED_GET_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
- CSR_TRIGGERED_ID TriggeredId;
-} CSR_MLME_ADD_TRIGGERED_GET_CONFIRM;
-
-typedef struct CSR_MLME_ADD_TRIGGERED_GET_REQUEST
-{
- CSR_DATAREF MibAttribute;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_TRIGGERED_ID TriggeredId;
-} CSR_MLME_ADD_TRIGGERED_GET_REQUEST;
-
-typedef struct CSR_MLME_ADD_TSPEC_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_PRIORITY UserPriority;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_ADD_TSPEC_CONFIRM;
-
-typedef struct CSR_MLME_ADD_TSPEC_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_PRIORITY UserPriority;
- CSR_DIRECTION Direction;
- CSR_PS_SCHEME PsScheme;
- CSR_NATURAL16 MediumTime;
- CSR_MICROSECONDS32 ServiceStartTime;
- CSR_MICROSECONDS32 ServiceInterval;
- CSR_RATE MinimumDataRate;
-} CSR_MLME_ADD_TSPEC_REQUEST;
-
-typedef struct CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
- CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
-} CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION;
-
-typedef struct CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_MACADDRESS Bssid;
-} CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION;
-
-typedef struct CSR_MLME_BLACKOUT_ENDED_INDICATION
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_BLACKOUT_ID BlackoutId;
-} CSR_MLME_BLACKOUT_ENDED_INDICATION;
-
-typedef struct CSR_MLME_BLOCKACK_ERROR_INDICATION
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_REASON_CODE ResultCode;
- CSR_MACADDRESS PeerQstaAddress;
-} CSR_MLME_BLOCKACK_ERROR_INDICATION;
-
-typedef struct CSR_MLME_CONNECTED_INDICATION
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_CONNECTION_STATUS ConnectionStatus;
- CSR_MACADDRESS PeerMacAddress;
-} CSR_MLME_CONNECTED_INDICATION;
-
-typedef struct CSR_MLME_CONNECT_STATUS_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_CONNECT_STATUS_CONFIRM;
-
-typedef struct CSR_MLME_CONNECT_STATUS_REQUEST
-{
- CSR_DATAREF InformationElements;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_CONNECTION_STATUS ConnectionStatus;
- CSR_MACADDRESS StaAddress;
- CSR_ASSOCIATION_ID AssociationId;
- CSR_CAPABILITY_INFORMATION AssociationCapabilityInformation;
-} CSR_MLME_CONNECT_STATUS_REQUEST;
-
-typedef struct CSR_MLME_DELETEKEYS_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_DELETEKEYS_CONFIRM;
-
-typedef struct CSR_MLME_DELETEKEYS_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_NATURAL16 KeyId;
- CSR_KEY_TYPE KeyType;
- CSR_MACADDRESS Address;
-} CSR_MLME_DELETEKEYS_REQUEST;
-
-typedef struct CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
- CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
-} CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM;
-
-typedef struct CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
-} CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST;
-
-typedef struct CSR_MLME_DEL_BLACKOUT_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_BLACKOUT_ID BlackoutId;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_DEL_BLACKOUT_CONFIRM;
-
-typedef struct CSR_MLME_DEL_BLACKOUT_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_BLACKOUT_ID BlackoutId;
-} CSR_MLME_DEL_BLACKOUT_REQUEST;
-
-typedef struct CSR_MLME_DEL_PERIODIC_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_PERIODIC_ID PeriodicId;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_DEL_PERIODIC_CONFIRM;
-
-typedef struct CSR_MLME_DEL_PERIODIC_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_PERIODIC_ID PeriodicId;
-} CSR_MLME_DEL_PERIODIC_REQUEST;
-
-typedef struct CSR_MLME_DEL_RX_TRIGGER_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_TRIGGER_ID TriggerId;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_DEL_RX_TRIGGER_CONFIRM;
-
-typedef struct CSR_MLME_DEL_RX_TRIGGER_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_TRIGGER_ID TriggerId;
-} CSR_MLME_DEL_RX_TRIGGER_REQUEST;
-
-typedef struct CSR_MLME_DEL_TRIGGERED_GET_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
- CSR_TRIGGERED_ID TriggeredId;
-} CSR_MLME_DEL_TRIGGERED_GET_CONFIRM;
-
-typedef struct CSR_MLME_DEL_TRIGGERED_GET_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_TRIGGERED_ID TriggeredId;
-} CSR_MLME_DEL_TRIGGERED_GET_REQUEST;
-
-typedef struct CSR_MLME_DEL_TSPEC_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_PRIORITY UserPriority;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_DEL_TSPEC_CONFIRM;
-
-typedef struct CSR_MLME_DEL_TSPEC_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_PRIORITY UserPriority;
- CSR_DIRECTION Direction;
-} CSR_MLME_DEL_TSPEC_REQUEST;
-
-typedef struct CSR_MLME_GET_KEY_SEQUENCE_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
- CSR_NATURAL16 SequenceNumber[8];
-} CSR_MLME_GET_KEY_SEQUENCE_CONFIRM;
-
-typedef struct CSR_MLME_GET_KEY_SEQUENCE_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_NATURAL16 KeyId;
- CSR_KEY_TYPE KeyType;
- CSR_MACADDRESS Address;
-} CSR_MLME_GET_KEY_SEQUENCE_REQUEST;
-
-typedef struct CSR_MLME_LEAVE_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_LEAVE_CONFIRM;
-
-typedef struct CSR_MLME_LEAVE_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
-} CSR_MLME_LEAVE_REQUEST;
-
-typedef struct CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM;
-
-typedef struct CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST
-{
- CSR_DATAREF Data;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_TIME_UNITS BeaconPeriod;
- CSR_BEACON_PERIODS DtimPeriod;
- CSR_CAPABILITY_INFORMATION CapabilityInformation;
- CSR_MACADDRESS Bssid;
- CSR_NATURAL16 RtsThreshold;
-} CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST;
-
-typedef struct CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
- CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
-} CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM;
-
-typedef struct CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_AUTONOMOUS_SCAN_ID AutonomousScanId;
- s16 Pause;
-} CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST;
-
-typedef struct CSR_MLME_POWERMGT_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_POWERMGT_CONFIRM;
-
-typedef struct CSR_MLME_POWERMGT_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_POWER_MANAGEMENT_MODE PowerManagementMode;
- s16 ReceiveDtims;
- CSR_BEACON_PERIODS ListenInterval;
- CSR_TRAFFIC_WINDOW TrafficWindow;
-} CSR_MLME_POWERMGT_REQUEST;
-
-typedef struct CSR_MLME_SCAN_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_SCAN_CONFIRM;
-
-typedef struct CSR_MLME_SCAN_REQUEST
-{
- CSR_DATAREF ChannelList;
- CSR_DATAREF InformationElements;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_IFINTERFACE Ifindex;
- CSR_SCAN_TYPE ScanType;
- CSR_MICROSECONDS32 ProbeDelay;
- CSR_TIME_UNITS MinChannelTime;
- CSR_TIME_UNITS MaxChannelTime;
-} CSR_MLME_SCAN_REQUEST;
-
-typedef struct CSR_MLME_SCAN_CANCEL_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
-} CSR_MLME_SCAN_CANCEL_REQUEST;
-
-typedef struct CSR_MLME_SETKEYS_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_SETKEYS_CONFIRM;
-
-typedef struct CSR_MLME_SETKEYS_REQUEST
-{
- CSR_DATAREF Key;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_NATURAL16 Length;
- CSR_NATURAL16 KeyId;
- CSR_KEY_TYPE KeyType;
- CSR_MACADDRESS Address;
- CSR_NATURAL16 SequenceNumber[8];
- CSR_CIPHER_SUITE_SELECTOR CipherSuiteSelector;
-} CSR_MLME_SETKEYS_REQUEST;
-
-typedef struct CSR_MLME_SET_CHANNEL_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_SET_CHANNEL_CONFIRM;
-
-typedef struct CSR_MLME_SET_CHANNEL_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_IFINTERFACE Ifindex;
- CSR_CHANNEL_NUMBER Channel;
- CSR_MACADDRESS Address;
- CSR_TIME_UNITS AvailabilityDuration;
- CSR_TIME_UNITS AvailabilityInterval;
-} CSR_MLME_SET_CHANNEL_REQUEST;
-
-typedef struct CSR_MLME_SET_PACKET_FILTER_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_SET_PACKET_FILTER_CONFIRM;
-
-typedef struct CSR_MLME_SET_PACKET_FILTER_REQUEST
-{
- CSR_DATAREF InformationElements;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_PACKET_FILTER_MODE PacketFilterMode;
- CSR_IPV4_ADDRESS ArpFilterAddress;
-} CSR_MLME_SET_PACKET_FILTER_REQUEST;
-
-typedef struct CSR_MLME_SET_TIM_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_SET_TIM_CONFIRM;
-
-typedef struct CSR_MLME_SET_TIM_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_ASSOCIATION_ID AssociationId;
- s16 TimValue;
-} CSR_MLME_SET_TIM_REQUEST;
-
-typedef struct CSR_MLME_SM_START_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_SM_START_CONFIRM;
-
-typedef struct CSR_MLME_SM_START_REQUEST
-{
- CSR_DATAREF Beacon;
- CSR_DATAREF BssParameters;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_IFINTERFACE Ifindex;
- CSR_CHANNEL_NUMBER Channel;
- CSR_MACADDRESS InterfaceAddress;
- CSR_MACADDRESS Bssid;
- CSR_TIME_UNITS BeaconPeriod;
- CSR_BEACON_PERIODS DtimPeriod;
- CSR_CAPABILITY_INFORMATION CapabilityInformation;
-} CSR_MLME_SM_START_REQUEST;
-
-typedef struct CSR_MLME_START_AGGREGATION_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_MACADDRESS PeerQstaAddress;
- CSR_PRIORITY UserPriority;
- CSR_DIRECTION Direction;
- CSR_RESULT_CODE ResultCode;
- CSR_SEQUENCE_NUMBER SequenceNumber;
-} CSR_MLME_START_AGGREGATION_CONFIRM;
-
-typedef struct CSR_MLME_START_AGGREGATION_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_MACADDRESS PeerQstaAddress;
- CSR_PRIORITY UserPriority;
- CSR_DIRECTION Direction;
- CSR_STARTING_SEQUENCE_NUMBER StartingSequenceNumber;
- CSR_NATURAL16 BufferSize;
- CSR_TIME_UNITS BlockAckTimeout;
-} CSR_MLME_START_AGGREGATION_REQUEST;
-
-typedef struct CSR_MLME_STOP_AGGREGATION_CONFIRM
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_MACADDRESS PeerQstaAddress;
- CSR_PRIORITY UserPriority;
- CSR_DIRECTION Direction;
- CSR_RESULT_CODE ResultCode;
-} CSR_MLME_STOP_AGGREGATION_CONFIRM;
-
-typedef struct CSR_MLME_STOP_AGGREGATION_REQUEST
-{
- CSR_DATAREF Dummydataref1;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_MACADDRESS PeerQstaAddress;
- CSR_PRIORITY UserPriority;
- CSR_DIRECTION Direction;
-} CSR_MLME_STOP_AGGREGATION_REQUEST;
-
-typedef struct CSR_MLME_TRIGGERED_GET_INDICATION
-{
- CSR_DATAREF MibAttributeValue;
- CSR_DATAREF Dummydataref2;
- CSR_VIF_IDENTIFIER VirtualInterfaceIdentifier;
- CSR_MIB_STATUS Status;
- CSR_NATURAL16 ErrorIndex;
- CSR_TRIGGERED_ID TriggeredId;
-} CSR_MLME_TRIGGERED_GET_INDICATION;
-
-typedef struct CSR_SIGNAL_PRIMITIVE
-{
- CSR_SIGNAL_PRIMITIVE_HEADER SignalPrimitiveHeader;
- union
- {
- CSR_MA_PACKET_REQUEST MaPacketRequest;
- CSR_MA_PACKET_CONFIRM MaPacketConfirm;
- CSR_MA_PACKET_INDICATION MaPacketIndication;
- CSR_MA_PACKET_CANCEL_REQUEST MaPacketCancelRequest;
- CSR_MA_VIF_AVAILABILITY_RESPONSE MaVifAvailabilityResponse;
- CSR_MA_VIF_AVAILABILITY_INDICATION MaVifAvailabilityIndication;
- CSR_MA_PACKET_ERROR_INDICATION MaPacketErrorIndication;
- CSR_MLME_RESET_REQUEST MlmeResetRequest;
- CSR_MLME_RESET_CONFIRM MlmeResetConfirm;
- CSR_MLME_GET_REQUEST MlmeGetRequest;
- CSR_MLME_GET_CONFIRM MlmeGetConfirm;
- CSR_MLME_SET_REQUEST MlmeSetRequest;
- CSR_MLME_SET_CONFIRM MlmeSetConfirm;
- CSR_MLME_GET_NEXT_REQUEST MlmeGetNextRequest;
- CSR_MLME_GET_NEXT_CONFIRM MlmeGetNextConfirm;
- CSR_MLME_POWERMGT_REQUEST MlmePowermgtRequest;
- CSR_MLME_POWERMGT_CONFIRM MlmePowermgtConfirm;
- CSR_MLME_SCAN_REQUEST MlmeScanRequest;
- CSR_MLME_SCAN_CONFIRM MlmeScanConfirm;
- CSR_MLME_HL_SYNC_REQUEST MlmeHlSyncRequest;
- CSR_MLME_HL_SYNC_CONFIRM MlmeHlSyncConfirm;
- CSR_MLME_MEASURE_REQUEST MlmeMeasureRequest;
- CSR_MLME_MEASURE_CONFIRM MlmeMeasureConfirm;
- CSR_MLME_MEASURE_INDICATION MlmeMeasureIndication;
- CSR_MLME_SETKEYS_REQUEST MlmeSetkeysRequest;
- CSR_MLME_SETKEYS_CONFIRM MlmeSetkeysConfirm;
- CSR_MLME_DELETEKEYS_REQUEST MlmeDeletekeysRequest;
- CSR_MLME_DELETEKEYS_CONFIRM MlmeDeletekeysConfirm;
- CSR_MLME_AUTONOMOUS_SCAN_LOSS_INDICATION MlmeAutonomousScanLossIndication;
- CSR_MLME_CONNECTED_INDICATION MlmeConnectedIndication;
- CSR_MLME_SCAN_CANCEL_REQUEST MlmeScanCancelRequest;
- CSR_MLME_HL_SYNC_CANCEL_REQUEST MlmeHlSyncCancelRequest;
- CSR_MLME_HL_SYNC_CANCEL_CONFIRM MlmeHlSyncCancelConfirm;
- CSR_MLME_ADD_PERIODIC_REQUEST MlmeAddPeriodicRequest;
- CSR_MLME_ADD_PERIODIC_CONFIRM MlmeAddPeriodicConfirm;
- CSR_MLME_DEL_PERIODIC_REQUEST MlmeDelPeriodicRequest;
- CSR_MLME_DEL_PERIODIC_CONFIRM MlmeDelPeriodicConfirm;
- CSR_MLME_ADD_AUTONOMOUS_SCAN_REQUEST MlmeAddAutonomousScanRequest;
- CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM MlmeAddAutonomousScanConfirm;
- CSR_MLME_DEL_AUTONOMOUS_SCAN_REQUEST MlmeDelAutonomousScanRequest;
- CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM MlmeDelAutonomousScanConfirm;
- CSR_MLME_SET_PACKET_FILTER_REQUEST MlmeSetPacketFilterRequest;
- CSR_MLME_SET_PACKET_FILTER_CONFIRM MlmeSetPacketFilterConfirm;
- CSR_MLME_STOP_MEASURE_REQUEST MlmeStopMeasureRequest;
- CSR_MLME_STOP_MEASURE_CONFIRM MlmeStopMeasureConfirm;
- CSR_MLME_PAUSE_AUTONOMOUS_SCAN_REQUEST MlmePauseAutonomousScanRequest;
- CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM MlmePauseAutonomousScanConfirm;
- CSR_MLME_AUTONOMOUS_SCAN_DONE_INDICATION MlmeAutonomousScanDoneIndication;
- CSR_MLME_ADD_TRIGGERED_GET_REQUEST MlmeAddTriggeredGetRequest;
- CSR_MLME_ADD_TRIGGERED_GET_CONFIRM MlmeAddTriggeredGetConfirm;
- CSR_MLME_DEL_TRIGGERED_GET_REQUEST MlmeDelTriggeredGetRequest;
- CSR_MLME_DEL_TRIGGERED_GET_CONFIRM MlmeDelTriggeredGetConfirm;
- CSR_MLME_TRIGGERED_GET_INDICATION MlmeTriggeredGetIndication;
- CSR_MLME_ADD_BLACKOUT_REQUEST MlmeAddBlackoutRequest;
- CSR_MLME_ADD_BLACKOUT_CONFIRM MlmeAddBlackoutConfirm;
- CSR_MLME_BLACKOUT_ENDED_INDICATION MlmeBlackoutEndedIndication;
- CSR_MLME_DEL_BLACKOUT_REQUEST MlmeDelBlackoutRequest;
- CSR_MLME_DEL_BLACKOUT_CONFIRM MlmeDelBlackoutConfirm;
- CSR_MLME_ADD_RX_TRIGGER_REQUEST MlmeAddRxTriggerRequest;
- CSR_MLME_ADD_RX_TRIGGER_CONFIRM MlmeAddRxTriggerConfirm;
- CSR_MLME_DEL_RX_TRIGGER_REQUEST MlmeDelRxTriggerRequest;
- CSR_MLME_DEL_RX_TRIGGER_CONFIRM MlmeDelRxTriggerConfirm;
- CSR_MLME_CONNECT_STATUS_REQUEST MlmeConnectStatusRequest;
- CSR_MLME_CONNECT_STATUS_CONFIRM MlmeConnectStatusConfirm;
- CSR_MLME_MODIFY_BSS_PARAMETER_REQUEST MlmeModifyBssParameterRequest;
- CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM MlmeModifyBssParameterConfirm;
- CSR_MLME_ADD_TEMPLATE_REQUEST MlmeAddTemplateRequest;
- CSR_MLME_ADD_TEMPLATE_CONFIRM MlmeAddTemplateConfirm;
- CSR_MLME_CONFIG_QUEUE_REQUEST MlmeConfigQueueRequest;
- CSR_MLME_CONFIG_QUEUE_CONFIRM MlmeConfigQueueConfirm;
- CSR_MLME_ADD_TSPEC_REQUEST MlmeAddTspecRequest;
- CSR_MLME_ADD_TSPEC_CONFIRM MlmeAddTspecConfirm;
- CSR_MLME_DEL_TSPEC_REQUEST MlmeDelTspecRequest;
- CSR_MLME_DEL_TSPEC_CONFIRM MlmeDelTspecConfirm;
- CSR_MLME_START_AGGREGATION_REQUEST MlmeStartAggregationRequest;
- CSR_MLME_START_AGGREGATION_CONFIRM MlmeStartAggregationConfirm;
- CSR_MLME_BLOCKACK_ERROR_INDICATION MlmeBlockackErrorIndication;
- CSR_MLME_STOP_AGGREGATION_REQUEST MlmeStopAggregationRequest;
- CSR_MLME_STOP_AGGREGATION_CONFIRM MlmeStopAggregationConfirm;
- CSR_MLME_SM_START_REQUEST MlmeSmStartRequest;
- CSR_MLME_SM_START_CONFIRM MlmeSmStartConfirm;
- CSR_MLME_LEAVE_REQUEST MlmeLeaveRequest;
- CSR_MLME_LEAVE_CONFIRM MlmeLeaveConfirm;
- CSR_MLME_SET_TIM_REQUEST MlmeSetTimRequest;
- CSR_MLME_SET_TIM_CONFIRM MlmeSetTimConfirm;
- CSR_MLME_GET_KEY_SEQUENCE_REQUEST MlmeGetKeySequenceRequest;
- CSR_MLME_GET_KEY_SEQUENCE_CONFIRM MlmeGetKeySequenceConfirm;
- CSR_MLME_SET_CHANNEL_REQUEST MlmeSetChannelRequest;
- CSR_MLME_SET_CHANNEL_CONFIRM MlmeSetChannelConfirm;
- CSR_MLME_ADD_MULTICAST_ADDRESS_REQUEST MlmeAddMulticastAddressRequest;
- CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM MlmeAddMulticastAddressConfirm;
- CSR_DEBUG_STRING_INDICATION DebugStringIndication;
- CSR_DEBUG_WORD16_INDICATION DebugWord16Indication;
- CSR_DEBUG_GENERIC_REQUEST DebugGenericRequest;
- CSR_DEBUG_GENERIC_CONFIRM DebugGenericConfirm;
- CSR_DEBUG_GENERIC_INDICATION DebugGenericIndication;
- } u;
-} CSR_SIGNAL;
-
-#define SIG_FILTER_SIZE 6
-
-u32 SigGetFilterPos(u16 aSigID);
-
-#endif
diff --git a/drivers/staging/csr/csr_wifi_hip_ta_sampling.c b/drivers/staging/csr/csr_wifi_hip_ta_sampling.c
deleted file mode 100644
index f1df36a..0000000
--- a/drivers/staging/csr/csr_wifi_hip_ta_sampling.c
+++ /dev/null
@@ -1,541 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_ta_sampling.c
- *
- * PURPOSE:
- * The traffic analysis sampling module.
- * This gathers data which is sent to the SME and used to analyse
- * the traffic behaviour.
- *
- * Provides:
- * unifi_ta_sampling_init - Initialise the internal state
- * unifi_ta_sample - Sampling function, call this for every data packet
- *
- * Calls these external functions which must be provided:
- * unifi_ta_indicate_sampling - Pass sample data to the SME.
- * unifi_ta_indicate_protocol - Report certain data packet types to the SME.
- * ---------------------------------------------------------------------------
- */
-
-#include "csr_wifi_hip_card_sdio.h"
-
-/* Maximum number of Tx frames we store each CYCLE_1, for detecting period */
-#define TA_MAX_INTERVALS_IN_C1 100
-
-/* Number of intervals in CYCLE_1 (one second), for detecting periodic */
-/* Must match size of unifi_TrafficStats.intervals - 1 */
-#define TA_INTERVALS_NUM 10
-
-/* Step (in msecs) between intervals, for detecting periodic */
-/* We are only interested in periods up to 100ms, i.e. between beacons */
-/* This is correct for TA_INTERVALS_NUM=10 */
-#define TA_INTERVALS_STEP 10
-
-
-enum ta_frame_identity
-{
- TA_FRAME_UNKNOWN,
- TA_FRAME_ETHERNET_UNINTERESTING,
- TA_FRAME_ETHERNET_INTERESTING
-};
-
-
-#define TA_ETHERNET_TYPE_OFFSET 6
-#define TA_LLC_HEADER_SIZE 8
-#define TA_IP_TYPE_OFFSET 17
-#define TA_UDP_SOURCE_PORT_OFFSET 28
-#define TA_UDP_DEST_PORT_OFFSET (TA_UDP_SOURCE_PORT_OFFSET + 2)
-#define TA_BOOTP_CLIENT_MAC_ADDR_OFFSET 64
-#define TA_DHCP_MESSAGE_TYPE_OFFSET 278
-#define TA_DHCP_MESSAGE_TYPE_ACK 0x05
-#define TA_PROTO_TYPE_IP 0x0800
-#define TA_PROTO_TYPE_EAP 0x888E
-#define TA_PROTO_TYPE_WAI 0x8864
-#define TA_PROTO_TYPE_ARP 0x0806
-#define TA_IP_TYPE_TCP 0x06
-#define TA_IP_TYPE_UDP 0x11
-#define TA_UDP_PORT_BOOTPC 0x0044
-#define TA_UDP_PORT_BOOTPS 0x0043
-#define TA_EAPOL_TYPE_OFFSET 9
-#define TA_EAPOL_TYPE_START 0x01
-
-#define snap_802_2 0xAAAA0300
-#define oui_rfc1042 0x00000000
-#define oui_8021h 0x0000f800
-static const u8 aironet_snap[5] = { 0x00, 0x40, 0x96, 0x00, 0x00 };
-
-
-/*
- * ---------------------------------------------------------------------------
- * ta_detect_protocol
- *
- * Internal only.
- * Detects a specific protocol in a frame and indicates a TA event.
- *
- * Arguments:
- * ta The pointer to the TA module.
- * direction The direction of the frame (tx or rx).
- * data Pointer to the structure that contains the data.
- *
- * Returns:
- * None
- * ---------------------------------------------------------------------------
- */
-static enum ta_frame_identity ta_detect_protocol(card_t *card, CsrWifiRouterCtrlProtocolDirection direction,
- const bulk_data_desc_t *data,
- const u8 *saddr,
- const u8 *sta_macaddr)
-{
- ta_data_t *tad = &card->ta_sampling;
- u16 proto;
- u16 source_port, dest_port;
- CsrWifiMacAddress srcAddress;
- u32 snap_hdr, oui_hdr;
-
- if (data->data_length < TA_LLC_HEADER_SIZE)
- {
- return TA_FRAME_UNKNOWN;
- }
-
- snap_hdr = (((u32)data->os_data_ptr[0]) << 24) |
- (((u32)data->os_data_ptr[1]) << 16) |
- (((u32)data->os_data_ptr[2]) << 8);
- if (snap_hdr != snap_802_2)
- {
- return TA_FRAME_UNKNOWN;
- }
-
- if (tad->packet_filter & CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_CUSTOM)
- {
- /*
- * Here we would use the custom filter to detect interesting frames.
- */
- }
-
- oui_hdr = (((u32)data->os_data_ptr[3]) << 24) |
- (((u32)data->os_data_ptr[4]) << 16) |
- (((u32)data->os_data_ptr[5]) << 8);
- if ((oui_hdr == oui_rfc1042) || (oui_hdr == oui_8021h))
- {
- proto = (data->os_data_ptr[TA_ETHERNET_TYPE_OFFSET] * 256) +
- data->os_data_ptr[TA_ETHERNET_TYPE_OFFSET + 1];
-
- /* The only interesting IP frames are the DHCP */
- if (proto == TA_PROTO_TYPE_IP)
- {
- if (data->data_length > TA_IP_TYPE_OFFSET)
- {
- if (tad->packet_filter & CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_CUSTOM)
- {
- ta_l4stats_t *ta_l4stats = &tad->ta_l4stats;
- u8 l4proto = data->os_data_ptr[TA_IP_TYPE_OFFSET];
-
- if (l4proto == TA_IP_TYPE_TCP)
- {
- if (direction == CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_TX)
- {
- ta_l4stats->txTcpBytesCount += data->data_length;
- }
- else
- {
- ta_l4stats->rxTcpBytesCount += data->data_length;
- }
- }
- else if (l4proto == TA_IP_TYPE_UDP)
- {
- if (direction == CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_TX)
- {
- ta_l4stats->txUdpBytesCount += data->data_length;
- }
- else
- {
- ta_l4stats->rxUdpBytesCount += data->data_length;
- }
- }
- }
-
- /* detect DHCP frames */
- if (tad->packet_filter & CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_DHCP)
- {
- /* DHCP frames are UDP frames with BOOTP ports */
- if (data->os_data_ptr[TA_IP_TYPE_OFFSET] == TA_IP_TYPE_UDP)
- {
- if (data->data_length > TA_UDP_DEST_PORT_OFFSET)
- {
- source_port = (data->os_data_ptr[TA_UDP_SOURCE_PORT_OFFSET] * 256) +
- data->os_data_ptr[TA_UDP_SOURCE_PORT_OFFSET + 1];
- dest_port = (data->os_data_ptr[TA_UDP_DEST_PORT_OFFSET] * 256) +
- data->os_data_ptr[TA_UDP_DEST_PORT_OFFSET + 1];
-
- if (((source_port == TA_UDP_PORT_BOOTPC) && (dest_port == TA_UDP_PORT_BOOTPS)) ||
- ((source_port == TA_UDP_PORT_BOOTPS) && (dest_port == TA_UDP_PORT_BOOTPC)))
- {
- /* The DHCP should have at least a message type (request, ack, nack, etc) */
- if (data->data_length > TA_DHCP_MESSAGE_TYPE_OFFSET + 6)
- {
- UNIFI_MAC_ADDRESS_COPY(srcAddress.a, saddr);
-
- if (direction == CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_TX)
- {
- unifi_ta_indicate_protocol(card->ospriv,
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_DHCP,
- direction,
- &srcAddress);
- return TA_FRAME_ETHERNET_UNINTERESTING;
- }
-
- /* DHCPACK is a special indication */
- if (UNIFI_MAC_ADDRESS_CMP(data->os_data_ptr + TA_BOOTP_CLIENT_MAC_ADDR_OFFSET, sta_macaddr) == TRUE)
- {
- if (data->os_data_ptr[TA_DHCP_MESSAGE_TYPE_OFFSET] == TA_DHCP_MESSAGE_TYPE_ACK)
- {
- unifi_ta_indicate_protocol(card->ospriv,
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_DHCP_ACK,
- direction,
- &srcAddress);
- }
- else
- {
- unifi_ta_indicate_protocol(card->ospriv,
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_DHCP,
- direction,
- &srcAddress);
- }
- }
- }
- }
- }
- }
- }
- }
-
- return TA_FRAME_ETHERNET_INTERESTING;
- }
-
- /* detect protocol type EAPOL or WAI (treated as equivalent here) */
- if (tad->packet_filter & CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_EAPOL)
- {
- if (TA_PROTO_TYPE_EAP == proto || TA_PROTO_TYPE_WAI == proto)
- {
- if ((TA_PROTO_TYPE_WAI == proto) || (direction != CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_TX) ||
- (data->os_data_ptr[TA_EAPOL_TYPE_OFFSET] == TA_EAPOL_TYPE_START))
- {
- UNIFI_MAC_ADDRESS_COPY(srcAddress.a, saddr);
- unifi_ta_indicate_protocol(card->ospriv,
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_EAPOL,
- direction, &srcAddress);
- }
- return TA_FRAME_ETHERNET_UNINTERESTING;
- }
- }
-
- /* detect protocol type 0x0806 (ARP) */
- if (tad->packet_filter & CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_ARP)
- {
- if (proto == TA_PROTO_TYPE_ARP)
- {
- UNIFI_MAC_ADDRESS_COPY(srcAddress.a, saddr);
- unifi_ta_indicate_protocol(card->ospriv,
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_ARP,
- direction, &srcAddress);
- return TA_FRAME_ETHERNET_UNINTERESTING;
- }
- }
-
- return TA_FRAME_ETHERNET_INTERESTING;
- }
- else if (tad->packet_filter & CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_AIRONET)
- {
- /* detect Aironet frames */
- if (!memcmp(data->os_data_ptr + 3, aironet_snap, 5))
- {
- UNIFI_MAC_ADDRESS_COPY(srcAddress.a, saddr);
- unifi_ta_indicate_protocol(card->ospriv, CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_AIRONET,
- direction, &srcAddress);
- }
- }
-
- return TA_FRAME_ETHERNET_UNINTERESTING;
-} /* ta_detect_protocol() */
-
-
-static void tas_reset_data(ta_data_t *tad)
-{
- s16 i;
-
- for (i = 0; i < (TA_INTERVALS_NUM + 1); i++)
- {
- tad->stats.intervals[i] = 0;
- }
-
- tad->stats.rxFramesNum = 0;
- tad->stats.txFramesNum = 0;
- tad->stats.rxBytesCount = 0;
- tad->stats.txBytesCount = 0;
- tad->stats.rxMeanRate = 0;
-
- tad->rx_sum_rate = 0;
-
- tad->ta_l4stats.rxTcpBytesCount = 0;
- tad->ta_l4stats.txTcpBytesCount = 0;
- tad->ta_l4stats.rxUdpBytesCount = 0;
- tad->ta_l4stats.txUdpBytesCount = 0;
-} /* tas_reset_data() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * API.
- * unifi_ta_sampling_init
- *
- * (Re)Initialise the Traffic Analysis sampling module.
- * Resets the counters and timestamps.
- *
- * Arguments:
- * tad Pointer to a ta_data_t structure containing the
- * context for this device instance.
- * drv_priv An opaque pointer that the TA sampling module will
- * pass in call-outs.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void unifi_ta_sampling_init(card_t *card)
-{
- (void)unifi_ta_configure(card, CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_RESET, NULL);
-
- card->ta_sampling.packet_filter = CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_NONE;
- card->ta_sampling.traffic_type = CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_OCCASIONAL;
-} /* unifi_ta_sampling_init() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * API.
- * unifi_ta_sample
- *
- * Sample a data frame for the TA module.
- * This function stores all the useful information it can extract from
- * the frame and detects any specific protocols.
- *
- * Arguments:
- * tad The pointer to the TA sampling context struct.
- * direction The direction of the frame (rx, tx)
- * data Pointer to the frame data
- * saddr Source MAC address of frame.
- * timestamp Time (in msecs) that the frame was received.
- * rate Reported data rate for the rx frame (0 for tx frames)
- *
- * Returns:
- * None
- * ---------------------------------------------------------------------------
- */
-void unifi_ta_sample(card_t *card,
- CsrWifiRouterCtrlProtocolDirection direction,
- const bulk_data_desc_t *data,
- const u8 *saddr,
- const u8 *sta_macaddr,
- u32 timestamp,
- u16 rate)
-{
- ta_data_t *tad = &card->ta_sampling;
- enum ta_frame_identity identity;
- u32 time_delta;
-
-
-
- /* Step1: Check for specific frames */
- if (tad->packet_filter != CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_NONE)
- {
- identity = ta_detect_protocol(card, direction, data, saddr, sta_macaddr);
- }
- else
- {
- identity = TA_FRAME_ETHERNET_INTERESTING;
- }
-
-
- /* Step2: Update the information in the current record */
- if (direction == CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_RX)
- {
- /* Update the Rx packet count and the throughput count */
- tad->stats.rxFramesNum++;
- tad->stats.rxBytesCount += data->data_length;
-
- /* Accumulate packet Rx rates for later averaging */
- tad->rx_sum_rate += rate;
- }
- else
- {
- if (identity == TA_FRAME_ETHERNET_INTERESTING)
- {
- /*
- * Store the period between the last and the current frame.
- * There is not point storing more than TA_MAX_INTERVALS_IN_C1 periods,
- * the traffic will be bursty or continuous.
- */
- if (tad->stats.txFramesNum < TA_MAX_INTERVALS_IN_C1)
- {
- u32 interval;
- u32 index_in_intervals;
-
- interval = timestamp - tad->tx_last_ts;
- tad->tx_last_ts = timestamp;
- index_in_intervals = (interval + TA_INTERVALS_STEP / 2 - 1) / TA_INTERVALS_STEP;
-
- /* If the interval is interesting, update the t1_intervals count */
- if (index_in_intervals <= TA_INTERVALS_NUM)
- {
- unifi_trace(card->ospriv, UDBG5,
- "unifi_ta_sample: TX interval=%d index=%d\n",
- interval, index_in_intervals);
- tad->stats.intervals[index_in_intervals]++;
- }
- }
- }
-
- /* Update the Tx packet count... */
- tad->stats.txFramesNum++;
- /* ... and the number of bytes for throughput. */
- tad->stats.txBytesCount += data->data_length;
- }
-
- /*
- * If more than one second has elapsed since the last report, send
- * another one.
- */
- /* Unsigned subtraction handles wrap-around from 0xFFFFFFFF to 0 */
- time_delta = timestamp - tad->last_indication_time;
- if (time_delta >= 1000)
- {
- /*
- * rxFramesNum can be flashed in tas_reset_data() by another thread.
- * Use a temp to avoid division by zero.
- */
- u32 temp_rxFramesNum;
- temp_rxFramesNum = tad->stats.rxFramesNum;
-
- /* Calculate this interval's mean frame Rx rate from the sum */
- if (temp_rxFramesNum)
- {
- tad->stats.rxMeanRate = tad->rx_sum_rate / temp_rxFramesNum;
- }
- unifi_trace(card->ospriv, UDBG5,
- "unifi_ta_sample: RX fr=%lu, r=%u, sum=%lu, av=%lu\n",
- tad->stats.rxFramesNum, rate,
- tad->rx_sum_rate, tad->stats.rxMeanRate);
-
- /*
- * Send the information collected in the stats struct
- * to the SME and reset the counters.
- */
- if (tad->packet_filter & CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_CUSTOM)
- {
- u32 rxTcpThroughput = tad->ta_l4stats.rxTcpBytesCount / time_delta;
- u32 txTcpThroughput = tad->ta_l4stats.txTcpBytesCount / time_delta;
- u32 rxUdpThroughput = tad->ta_l4stats.rxUdpBytesCount / time_delta;
- u32 txUdpThroughput = tad->ta_l4stats.txUdpBytesCount / time_delta;
-
- unifi_ta_indicate_l4stats(card->ospriv,
- rxTcpThroughput,
- txTcpThroughput,
- rxUdpThroughput,
- txUdpThroughput
- );
- }
- unifi_ta_indicate_sampling(card->ospriv, &tad->stats);
- tas_reset_data(tad);
- tad->last_indication_time = timestamp;
- }
-} /* unifi_ta_sample() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * External API.
- * unifi_ta_configure
- *
- * Configures the TA module parameters.
- *
- * Arguments:
- * ta The pointer to the TA module.
- * config_type The type of the configuration request
- * config Pointer to the configuration parameters.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code otherwise
- * ---------------------------------------------------------------------------
- */
-CsrResult unifi_ta_configure(card_t *card,
- CsrWifiRouterCtrlTrafficConfigType config_type,
- const CsrWifiRouterCtrlTrafficConfig *config)
-{
- ta_data_t *tad = &card->ta_sampling;
-
- /* Reinitialise our data when we are reset */
- if (config_type == CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_RESET)
- {
- /* Reset the stats to zero */
- tas_reset_data(tad);
-
- /* Reset the timer variables */
- tad->tx_last_ts = 0;
- tad->last_indication_time = 0;
-
- return CSR_RESULT_SUCCESS;
- }
-
- if (config_type == CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_FILTER)
- {
- tad->packet_filter = config->packetFilter;
-
- if (tad->packet_filter & CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_CUSTOM)
- {
- tad->custom_filter = config->customFilter;
- }
-
- return CSR_RESULT_SUCCESS;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_ta_configure() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * External API.
- * unifi_ta_classification
- *
- * Configures the current TA classification.
- *
- * Arguments:
- * ta The pointer to the TA module.
- * traffic_type The classification type
- * period The traffic period if the type is periodic
- *
- * Returns:
- * None
- * ---------------------------------------------------------------------------
- */
-void unifi_ta_classification(card_t *card,
- CsrWifiRouterCtrlTrafficType traffic_type,
- u16 period)
-{
- unifi_trace(card->ospriv, UDBG3,
- "Changed current ta classification to: %d\n", traffic_type);
-
- card->ta_sampling.traffic_type = traffic_type;
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_ta_sampling.h b/drivers/staging/csr/csr_wifi_hip_ta_sampling.h
deleted file mode 100644
index aa684c6..0000000
--- a/drivers/staging/csr/csr_wifi_hip_ta_sampling.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_ta_sampling.h
- *
- * PURPOSE:
- * This file contains Traffic Analysis definitions common to the
- * sampling and analysis modules.
- *
- * ---------------------------------------------------------------------------
- */
-#ifndef __TA_SAMPLING_H__
-#define __TA_SAMPLING_H__
-
-#include "csr_wifi_hip_unifi.h"
-
-typedef struct ta_l4stats
-{
- u32 rxTcpBytesCount;
- u32 txTcpBytesCount;
- u32 rxUdpBytesCount;
- u32 txUdpBytesCount;
-} ta_l4stats_t;
-
-/*
- * Context structure to preserve state between calls.
- */
-
-typedef struct ta_data
-{
- /* Current packet filter configuration */
- u16 packet_filter;
-
- /* Current packet custom filter configuration */
- CsrWifiRouterCtrlTrafficFilter custom_filter;
-
- /* The timestamp of the last tx packet processed. */
- u32 tx_last_ts;
-
- /* The timestamp of the last packet processed. */
- u32 last_indication_time;
-
- /* Statistics */
- CsrWifiRouterCtrlTrafficStats stats;
-
- /* Current traffic classification */
- CsrWifiRouterCtrlTrafficType traffic_type;
-
- /* Sum of packet rx rates for this interval used to calculate mean */
- u32 rx_sum_rate;
- ta_l4stats_t ta_l4stats;
-} ta_data_t;
-
-
-void unifi_ta_sampling_init(card_t *card);
-
-#endif /* __TA_SAMPLING_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_udi.c b/drivers/staging/csr/csr_wifi_hip_udi.c
deleted file mode 100644
index a6b006b..0000000
--- a/drivers/staging/csr/csr_wifi_hip_udi.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_card_udi.c
- *
- * PURPOSE:
- * Maintain a list of callbacks to log UniFi exchanges to one or more
- * debug/monitoring client applications.
- *
- * NOTES:
- * Just call the UDI driver log fn directly for now.
- * When done properly, each open() on the UDI device will install
- * a log function. We will call all log fns whenever a signal is written
- * to or read form the UniFi.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/seq_file.h>
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_card.h"
-
-
-static void unifi_print_unsafe_sdio_status(card_t *card, struct seq_file *m)
-{
-#ifdef CSR_UNSAFE_SDIO_ACCESS
- s32 iostate;
- CsrResult r;
- static const char *const states[] = {
- "AWAKE", "DROWSY", "TORPID"
- };
-#define SHARED_READ_RETRY_LIMIT 10
- u8 b;
-
- seq_printf(m, "Host State: %s\n", states[card->host_state]);
-
- r = unifi_check_io_status(card, &iostate);
- if (iostate == 1) {
- seq_puts(m, remaining, "I/O Check: F1 disabled\n");
- } else {
- if (iostate == 1) {
- seq_puts(m, "I/O Check: pending interrupt\n");
-
- seq_printf(m, "BH reason interrupt = %d\n", card->bh_reason_unifi);
- seq_printf(m, "BH reason host = %d\n", card->bh_reason_host);
-
- for (i = 0; i < SHARED_READ_RETRY_LIMIT; i++) {
- r = unifi_read_8_or_16(card, card->sdio_ctrl_addr + 2, &b);
- if (r == CSR_RESULT_SUCCESS && !(b & 0x80)) {
- seq_printf(m, "fhsr: %u (driver thinks is %u)\n",
- b, card->from_host_signals_r);
- break;
- }
- }
-
- iostate = unifi_read_shared_count(card, card->sdio_ctrl_addr + 4);
- seq_printf(m, "thsw: %u (driver thinks is %u)\n",
- iostate, card->to_host_signals_w);
- }
-#endif
-}
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_print_status
- *
- * Print status info to given character buffer.
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-s32 unifi_print_status(card_t *card, struct seq_file *m)
-{
- sdio_config_data_t *cfg;
- u16 i, n;
-
- i = n = 0;
- seq_printf(m, "Chip ID %u\n", card->chip_id);
- seq_printf(m, "Chip Version %04X\n", card->chip_version);
- seq_printf(m, "HIP v%u.%u\n",
- (card->config_data.version >> 8) & 0xFF,
- card->config_data.version & 0xFF);
- seq_printf(m, "Build %u: %s\n", card->build_id, card->build_id_string);
-
- cfg = &card->config_data;
-
- seq_printf(m, "sdio ctrl offset %u\n", cfg->sdio_ctrl_offset);
- seq_printf(m, "fromhost sigbuf handle %u\n", cfg->fromhost_sigbuf_handle);
- seq_printf(m, "tohost_sigbuf_handle %u\n", cfg->tohost_sigbuf_handle);
- seq_printf(m, "num_fromhost_sig_frags %u\n", cfg->num_fromhost_sig_frags);
- seq_printf(m, "num_tohost_sig_frags %u\n", cfg->num_tohost_sig_frags);
- seq_printf(m, "num_fromhost_data_slots %u\n", cfg->num_fromhost_data_slots);
- seq_printf(m, "num_tohost_data_slots %u\n", cfg->num_tohost_data_slots);
- seq_printf(m, "data_slot_size %u\n", cfg->data_slot_size);
-
- /* Added by protocol version 0x0001 */
- seq_printf(m, "overlay_size %u\n", cfg->overlay_size);
-
- /* Added by protocol version 0x0300 */
- seq_printf(m, "data_slot_round %u\n", cfg->data_slot_round);
- seq_printf(m, "sig_frag_size %u\n", cfg->sig_frag_size);
-
- /* Added by protocol version 0x0300 */
- seq_printf(m, "tohost_sig_pad %u\n", cfg->tohost_signal_padding);
-
- seq_puts(m, "\nInternal state:\n");
-
- seq_printf(m, "Last PHY PANIC: %04x:%04x\n",
- card->last_phy_panic_code, card->last_phy_panic_arg);
- seq_printf(m, "Last MAC PANIC: %04x:%04x\n",
- card->last_mac_panic_code, card->last_mac_panic_arg);
-
- seq_printf(m, "fhsr: %hu\n", (u16)card->from_host_signals_r);
- seq_printf(m, "fhsw: %hu\n", (u16)card->from_host_signals_w);
- seq_printf(m, "thsr: %hu\n", (u16)card->to_host_signals_r);
- seq_printf(m, "thsw: %hu\n", (u16)card->to_host_signals_w);
- seq_printf(m, "fh buffer contains: %d signals, %td bytes\n",
- card->fh_buffer.count,
- card->fh_buffer.ptr - card->fh_buffer.buf);
-
- seq_puts(m, "paused: ");
- for (i = 0; i < ARRAY_SIZE(card->tx_q_paused_flag); i++)
- seq_printf(m, card->tx_q_paused_flag[i] ? "1" : "0");
- seq_putc(m, '\n');
-
- seq_printf(m, "fh command q: %u waiting, %u free of %u:\n",
- CSR_WIFI_HIP_Q_SLOTS_USED(&card->fh_command_queue),
- CSR_WIFI_HIP_Q_SLOTS_FREE(&card->fh_command_queue),
- UNIFI_SOFT_COMMAND_Q_LENGTH);
-
- for (i = 0; i < UNIFI_NO_OF_TX_QS; i++)
- seq_printf(m, "fh traffic q[%u]: %u waiting, %u free of %u:\n",
- i,
- CSR_WIFI_HIP_Q_SLOTS_USED(&card->fh_traffic_queue[i]),
- CSR_WIFI_HIP_Q_SLOTS_FREE(&card->fh_traffic_queue[i]),
- UNIFI_SOFT_TRAFFIC_Q_LENGTH);
-
- seq_printf(m, "fh data slots free: %u\n",
- card->from_host_data ? CardGetFreeFromHostDataSlots(card) : 0);
-
- seq_puts(m, "From host data slots:");
- n = card->config_data.num_fromhost_data_slots;
- for (i = 0; i < n && card->from_host_data; i++)
- seq_printf(m, " %hu", (u16)card->from_host_data[i].bd.data_length);
- seq_putc(m, '\n');
-
- seq_puts(m, "To host data slots:");
- n = card->config_data.num_tohost_data_slots;
- for (i = 0; i < n && card->to_host_data; i++)
- seq_printf(m, " %hu", (u16)card->to_host_data[i].data_length);
- seq_putc(m, '\n');
-
- unifi_print_unsafe_sdio_status(card, m);
-
- seq_puts(m, "\nStats:\n");
- seq_printf(m, "Total SDIO bytes: R=%u W=%u\n",
- card->sdio_bytes_read, card->sdio_bytes_written);
-
- seq_printf(m, "Interrupts generated on card: %u\n", card->unifi_interrupt_seq);
- return 0;
-}
diff --git a/drivers/staging/csr/csr_wifi_hip_unifi.h b/drivers/staging/csr/csr_wifi_hip_unifi.h
deleted file mode 100644
index 1160a0e..0000000
--- a/drivers/staging/csr/csr_wifi_hip_unifi.h
+++ /dev/null
@@ -1,871 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- *
- * FILE : csr_wifi_hip_unifi.h
- *
- * PURPOSE : Public API for the UniFi HIP core library.
- *
- * ---------------------------------------------------------------------------
- */
-#ifndef __CSR_WIFI_HIP_UNIFI_H__
-#define __CSR_WIFI_HIP_UNIFI_H__ 1
-
-#ifndef CSR_WIFI_HIP_TA_DISABLE
-#include "csr_wifi_router_ctrl_prim.h"
-#include "csr_wifi_router_prim.h"
-#else
-#include "csr_time.h"
-#endif
-
-/* SDIO chip ID numbers */
-
-/* Manufacturer id */
-#define SDIO_MANF_ID_CSR 0x032a
-
-/* Device id */
-#define SDIO_CARD_ID_UNIFI_1 0x0001
-#define SDIO_CARD_ID_UNIFI_2 0x0002
-#define SDIO_CARD_ID_UNIFI_3 0x0007
-#define SDIO_CARD_ID_UNIFI_4 0x0008
-
-/* Function number for WLAN */
-#define SDIO_WLAN_FUNC_ID_UNIFI_1 0x0001
-#define SDIO_WLAN_FUNC_ID_UNIFI_2 0x0001
-#define SDIO_WLAN_FUNC_ID_UNIFI_3 0x0001
-#define SDIO_WLAN_FUNC_ID_UNIFI_4 0x0002
-
-/* Maximum SDIO bus clock supported. */
-#define UNIFI_SDIO_CLOCK_MAX_HZ 50000000 /* Hz */
-
-/*
- * Initialisation SDIO bus clock.
- *
- * The initialisation clock speed should be used from when the chip has been
- * reset until the first MLME-reset has been received (i.e. during firmware
- * initialisation), unless UNIFI_SDIO_CLOCK_SAFE_HZ applies.
- */
-#define UNIFI_SDIO_CLOCK_INIT_HZ 12500000 /* Hz */
-
-/*
- * Safe SDIO bus clock.
- *
- * The safe speed should be used when the chip is in deep sleep or
- * it's state is unknown (just after reset / power on).
- */
-#define UNIFI_SDIO_CLOCK_SAFE_HZ 1000000 /* Hz */
-
-/* I/O default block size to use for UniFi. */
-#define UNIFI_IO_BLOCK_SIZE 64
-
-#define UNIFI_WOL_OFF 0
-#define UNIFI_WOL_SDIO 1
-#define UNIFI_WOL_PIO 2
-
-/* The number of Tx traffic queues */
-#define UNIFI_NO_OF_TX_QS 4
-
-#define CSR_WIFI_HIP_RESERVED_HOST_TAG 0xFFFFFFFF
-
-/*
- * The number of slots in the from-host queues.
- *
- * UNIFI_SOFT_TRAFFIC_Q_LENGTH is the number of slots in the traffic queues
- * and there will be UNIFI_NO_OF_TX_QS of them.
- * Traffic queues are used for data packets.
- *
- * UNIFI_SOFT_COMMAND_Q_LENGTH is the number of slots in the command queue.
- * The command queue is used for MLME management requests.
- *
- * Queues are ring buffers and so must always have 1 unused slot.
- */
-#define UNIFI_SOFT_TRAFFIC_Q_LENGTH (20 + 1)
-#define UNIFI_SOFT_COMMAND_Q_LENGTH (16 + 1)
-
-#include "csr_framework_ext.h" /* from the synergy porting folder */
-#include "csr_sdio.h" /* from the synergy porting folder */
-#include "csr_macro.h" /* from the synergy porting folder */
-#include "csr_wifi_result.h"
-
-/* Utility MACROS. Note that UNIFI_MAC_ADDRESS_CMP returns TRUE on success */
-#define UNIFI_MAC_ADDRESS_COPY(dst, src) \
- do { (dst)[0] = (src)[0]; (dst)[1] = (src)[1]; \
- (dst)[2] = (src)[2]; (dst)[3] = (src)[3]; \
- (dst)[4] = (src)[4]; (dst)[5] = (src)[5]; \
- } while (0)
-
-#define UNIFI_MAC_ADDRESS_CMP(addr1, addr2) \
- (((addr1)[0] == (addr2)[0]) && ((addr1)[1] == (addr2)[1]) && \
- ((addr1)[2] == (addr2)[2]) && ((addr1)[3] == (addr2)[3]) && \
- ((addr1)[4] == (addr2)[4]) && ((addr1)[5] == (addr2)[5]))
-
-/* Traffic queue ordered according to priority
- * EAPOL/Uncontrolled port Queue should be the last
- */
-typedef enum
-{
- UNIFI_TRAFFIC_Q_BK = 0,
- UNIFI_TRAFFIC_Q_BE,
- UNIFI_TRAFFIC_Q_VI,
- UNIFI_TRAFFIC_Q_VO,
- UNIFI_TRAFFIC_Q_EAPOL, /* Non existent in HIP */
- UNIFI_TRAFFIC_Q_MAX, /* Non existent */
- UNIFI_TRAFFIC_Q_MLME /* Non existent */
-} unifi_TrafficQueue;
-
-/*
- * Structure describing a bulk data slot.
- * This structure is shared between the HIP core library and the OS
- * layer. See the definition of unifi_net_data_malloc() for more details.
- *
- * The data_length field is used to indicate empty/occupied state.
- * Needs to be defined before #include "unifi_os.h".
- */
-typedef struct _bulk_data_desc
-{
- const u8 *os_data_ptr;
- u32 data_length;
- const void *os_net_buf_ptr;
- u32 net_buf_length;
-} bulk_data_desc_t;
-
-/* Structure of an entry in the Symbol Look Up Table (SLUT). */
-typedef struct _symbol
-{
- u16 id;
- u32 obj;
-} symbol_t;
-
-/*
- * Header files need to be included from the current directory,
- * the SME library, the synergy framework and the OS layer.
- * A thin OS layer needs to be implemented in the porting exercise.
- *
- * Note that unifi_os.h should be included only in unifi.h
- */
-
-#include "unifi_os.h"
-
-/*
- * Contains the HIP core definitions selected in the porting exercise, such as
- * UNIFI_PAD_BULK_DATA_TO_BLOCK_SIZE and UNIFI_PAD_SIGNALS_TO_BLOCK_SIZE.
- * Implemented in the OS layer, as part of the porting exersice.
- */
-#include "unifi_config.h"
-
-#include "csr_wifi_hip_signals.h" /* from this dir */
-
-/*
- * The card structure is an opaque pointer that is used to pass context
- * to the upper-edge API functions.
- */
-typedef struct card card_t;
-
-
-/*
- * This structure describes all of the bulk data that 'might' be
- * associated with a signal.
- */
-typedef struct _bulk_data_param
-{
- bulk_data_desc_t d[UNIFI_MAX_DATA_REFERENCES];
-} bulk_data_param_t;
-
-
-/*
- * This structure describes the chip and HIP core lib
- * information that exposed to the OS layer.
- */
-typedef struct _card_info
-{
- u16 chip_id;
- u16 chip_version;
- u32 fw_build;
- u16 fw_hip_version;
- u32 sdio_block_size;
-} card_info_t;
-
-
-/*
- * Mini-coredump definitions
- */
-/* Definition of XAP memory ranges used by the mini-coredump system.
- * Note that, these values are NOT the same as UNIFI_REGISTERS, etc
- * in unifihw.h which don't allow selection of register areas for each XAP.
- */
-typedef enum unifi_coredump_space
-{
- UNIFI_COREDUMP_MAC_REG,
- UNIFI_COREDUMP_PHY_REG,
- UNIFI_COREDUMP_SH_DMEM,
- UNIFI_COREDUMP_MAC_DMEM,
- UNIFI_COREDUMP_PHY_DMEM,
- UNIFI_COREDUMP_TRIGGER_MAGIC = 0xFEED
-} unifi_coredump_space_t;
-
-/* Structure used to request a register value from a mini-coredump buffer */
-typedef struct unifi_coredump_req
-{
- /* From user */
- s32 index; /* 0=newest, -1=oldest */
- unifi_coredump_space_t space; /* memory space */
- u32 offset; /* register offset in space */
- /* From driver */
- u32 drv_build; /* Driver build id */
- u32 chip_ver; /* Chip version */
- u32 fw_ver; /* Firmware version */
- s32 requestor; /* Requestor: 0=auto dump, 1=manual */
- u32 timestamp; /* time of capture by driver */
- u32 serial; /* capture serial number */
- s32 value; /* register value */
-} unifi_coredump_req_t; /* mini-coredumped reg value request */
-
-
-/**
- * @defgroup upperedge Upper edge API
- *
- * The following functions are implemented in the HIP core lib.
- */
-
-/**
- *
- * Initialise the HIP core lib.
- * Note that the OS layer must initialise the SDIO glue layer and obtain
- * an SDIO function context, prior to this call.
- *
- * @param sdiopriv the SDIO function context.
- *
- * @param ospriv the OS layer context.
- *
- * @return \p card_t the HIP core lib API context.
- *
- * @ingroup upperedge
- */
-card_t* unifi_alloc_card(CsrSdioFunction *sdiopriv, void *ospriv);
-
-
-/**
- *
- * Initialise the UniFi chip.
- *
- * @param card the HIP core lib API context.
- *
- * @param led_mask the led mask to apply to UniFi.
- *
- * @return \b 0 if UniFi is initialized.
- *
- * @return \b -CSR_EIO if an I/O error occurred while initializing UniFi
- *
- * @return \b -CSR_ENODEV if the card is no longer present.
- *
- * @ingroup upperedge
- */
-CsrResult unifi_init_card(card_t *card, s32 led_mask);
-
-/**
- *
- * De-Initialise the HIP core lib.
- *
- * @param card the HIP core lib API context.
- *
- * @ingroup upperedge
- */
-void unifi_free_card(card_t *card);
-
-/**
- *
- * Cancel all the signals pending in the HIP core lib.
- * Normally used during a system suspend when the power is retained on UniFi.
- *
- * @param card the HIP core lib API context.
- *
- * @ingroup upperedge
- */
-void unifi_cancel_pending_signals(card_t *card);
-
-/**
- *
- * Send a signal to UniFi.
- * Normally it is called from unifi_sys_hip_req() and the OS layer
- * Tx data plane.
- *
- * Note that the bulkdata buffers ownership is passed to the HIP core lib.
- * These buffers must be allocated using unifi_net_data_malloc().
- *
- * @param card the HIP core lib API context.
- *
- * @param sigptr pointer to the signal.
- *
- * @param siglen size of the signal.
- *
- * @param bulkdata pointer to the bulk data associated with the signal.
- *
- * @return \b 0 signal is sent.
- *
- * @return \b -CSR_EIO if an error occurred while sending the signal
- *
- * @return \b -CSR_ENODEV if the card is no longer present.
- *
- * @ingroup upperedge
- */
-CsrResult unifi_send_signal(card_t *card, const u8 *sigptr,
- u32 siglen,
- const bulk_data_param_t *bulkdata);
-
-/**
- *
- * Check if the HIP core lib has resources to send a signal.
- * Normally there no need to use this function.
- *
- * @param card the HIP core lib API context.
- *
- * @param sigptr pointer to the signal.
- *
- * @return \b 0 if there are resources for the signal.
- *
- * @return \b -CSR_ENOSPC if there are not enough resources
- *
- * @ingroup upperedge
- */
-CsrResult unifi_send_resources_available(card_t *card, const u8 *sigptr);
-
-/**
- *
- * Read the UniFi chip and the HIP core lib information.
- *
- * @param card the HIP core lib API context.
- *
- * @param card_info pointer to save the information.
- *
- * @ingroup upperedge
- */
-void unifi_card_info(card_t *card, card_info_t *card_info);
-
-/**
- *
- * Print the UniFi I/O and Interrupt status.
- * Normally it is used for debug purposes only.
- *
- * @param card the HIP core lib API context.
-
- * @param status buffer for the chip status
- *
- * @return \b 0 if the check was performed.
- *
- * @return \b -CSR_EIO if an error occurred while checking the status.
- *
- * @return \b -CSR_ENODEV if the card is no longer present.
- *
- * @ingroup upperedge
- */
-CsrResult unifi_check_io_status(card_t *card, s32 *status);
-
-
-/**
- *
- * Run the HIP core lib Botton-Half.
- * Whenever the HIP core lib want this function to be called
- * by the OS layer, it calls unifi_run_bh().
- *
- * @param card the HIP core lib API context.
- *
- * @param remaining pointer to return the time (in msecs) that this function
- * should be re-scheduled. A return value of 0 means that no re-scheduling
- * is required. If unifi_bh() is called before the timeout expires,
- * the caller must pass in the remaining time.
- *
- * @return \b 0 if no error occurred.
- *
- * @return \b -CSR_ENODEV if the card is no longer present.
- *
- * @return \b -CSR_E* if an error occurred while running the bottom half.
- *
- * @ingroup upperedge
- */
-CsrResult unifi_bh(card_t *card, u32 *remaining);
-
-
-/**
- * UniFi Low Power Mode (Deep Sleep Signaling)
- *
- * unifi_low_power_mode defines the UniFi Deep Sleep Signaling status.
- * Use with unifi_configure_low_power_mode() to enable/disable
- * the Deep Sleep Signaling.
- */
-enum unifi_low_power_mode
-{
- UNIFI_LOW_POWER_DISABLED,
- UNIFI_LOW_POWER_ENABLED
-};
-
-/**
- * Periodic Wake Host Mode
- *
- * unifi_periodic_wake_mode defines the Periodic Wake Host Mode.
- * It can only be set to UNIFI_PERIODIC_WAKE_HOST_ENABLED if
- * low_power_mode == UNIFI_LOW_POWER_ENABLED.
- */
-enum unifi_periodic_wake_mode
-{
- UNIFI_PERIODIC_WAKE_HOST_DISABLED,
- UNIFI_PERIODIC_WAKE_HOST_ENABLED
-};
-
-/**
- *
- * Run the HIP core lib Botton-Half.
- * Whenever the HIP core lib want this function to be called
- * by the OS layer, it calls unifi_run_bh().
- *
- * Typically, the SME is responsible for configuring these parameters,
- * so unifi_sys_configure_power_mode_req() is usually implemented
- * as a direct call to unifi_configure_low_power_mode().
- *
- * Note: When polling mode is used instead of interrupts,
- * low_power_mode must never be set to UNIFI_LOW_POWER_ENABLED.
- *
- * @param card the HIP core lib API context.
- *
- * @param low_power_mode the Low Power Mode.
- *
- * @param periodic_wake_mode the Periodic Wake Mode.
- *
- * @return \b 0 if no error occurred.
- *
- * @return \b -CSR_E* if the request failed.
- *
- * @ingroup upperedge
- */
-CsrResult unifi_configure_low_power_mode(card_t *card,
- enum unifi_low_power_mode low_power_mode,
- enum unifi_periodic_wake_mode periodic_wake_mode);
-
-/**
- *
- * Forces the UniFi chip to enter a Deep Sleep state.
- * This is normally called by the OS layer when the platform suspends.
- *
- * Note that if the UniFi Low Power Mode is disabled this call fails.
- *
- * @param card the HIP core lib API context.
- *
- * @return \b 0 if no error occurred.
- *
- * @return \b -CSR_ENODEV if the card is no longer present.
- *
- * @return \b -CSR_E* if the request failed.
- *
- * @ingroup upperedge
- */
-CsrResult unifi_force_low_power_mode(card_t *card);
-
-#ifndef CSR_WIFI_HIP_TA_DISABLE
-/**
- * Configure the Traffic Analysis sampling
- *
- * Enable or disable statistics gathering.
- * Enable or disable particular packet detection.
- *
- * @param card the HIP core context
- * @param config_type the item to configure
- * @param config pointer to struct containing config info
- *
- * @return \b 0 if configuration was successful
- *
- * @return \b -CSR_EINVAL if a parameter had an invalid value
- *
- * @ingroup upperedge
- */
-CsrResult unifi_ta_configure(card_t *card,
- CsrWifiRouterCtrlTrafficConfigType config_type,
- const CsrWifiRouterCtrlTrafficConfig *config);
-
-/**
- * Pass a packet for Traffic Analysis sampling
- *
- * @param card the HIP core context
- * @param direction the direction (Rx or Tx) of the frame.
- * @param data pointer to bulkdata struct containing the packet
- * @param saddr the source address of the packet
- * @param sta_macaddr the MAC address of the UniFi chip
- * @param timestamp the current time in msecs
- *
- * @ingroup upperedge
- */
-void unifi_ta_sample(card_t *card,
- CsrWifiRouterCtrlProtocolDirection direction,
- const bulk_data_desc_t *data,
- const u8 *saddr,
- const u8 *sta_macaddr,
- u32 timestamp,
- u16 rate);
-
-/**
- * Notify the HIP core lib for a detected Traffic Classification.
- * Typically, the SME is responsible for configuring these parameters,
- * so unifi_sys_traffic_classification_req() is usually implemented
- * as a direct call to unifi_ta_classification().
- *
- * @param card the HIP core context.
- * @param traffic_type the detected traffic type.
- * @param period The detected period of the traffic.
- *
- * @ingroup upperedge
- */
-void unifi_ta_classification(card_t *card,
- CsrWifiRouterCtrlTrafficType traffic_type,
- u16 period);
-
-#endif
-/**
- * Use software to hard reset the chip.
- * This is a subset of the unifi_init_card() functionality and should
- * only be used only to reset a paniced chip before a coredump is taken.
- *
- * @param card the HIP core context.
- *
- * @ingroup upperedge
- */
-CsrResult unifi_card_hard_reset(card_t *card);
-
-
-CsrResult unifi_card_readn(card_t *card, u32 unifi_addr, void *pdata, u16 len);
-CsrResult unifi_card_read16(card_t *card, u32 unifi_addr, u16 *pdata);
-CsrResult unifi_card_write16(card_t *card, u32 unifi_addr, u16 data);
-
-
-enum unifi_dbg_processors_select
-{
- UNIFI_PROC_MAC,
- UNIFI_PROC_PHY,
- UNIFI_PROC_BT,
- UNIFI_PROC_BOTH,
- UNIFI_PROC_INVALID
-};
-
-CsrResult unifi_card_stop_processor(card_t *card, enum unifi_dbg_processors_select which);
-
-/**
- * Call-outs from the HIP core lib to the OS layer.
- * The following functions need to be implemented during the porting exercise.
- */
-
-/**
- * Selects appropriate queue according to priority
- * Helps maintain uniformity in queue selection between the HIP
- * and the OS layers.
- *
- * @param priority priority of the packet
- *
- * @return \b Traffic queue to which a packet of this priority belongs
- *
- * @ingroup upperedge
- */
-unifi_TrafficQueue
-unifi_frame_priority_to_queue(CSR_PRIORITY priority);
-
-/**
- * Returns the priority corresponding to a particular Queue when that is used
- * when downgrading a packet to a lower AC.
- * Helps maintain uniformity in queue - priority mapping between the HIP
- * and the OS layers.
- *
- * @param queue
- *
- * @return \b Highest priority corresponding to this queue
- *
- * @ingroup upperedge
- */
-CSR_PRIORITY unifi_get_default_downgrade_priority(unifi_TrafficQueue queue);
-
-/**
- *
- * Flow control callbacks.
- * unifi_pause_xmit() is called when the HIP core lib does not have any
- * resources to store data packets. The OS layer needs to pause
- * the Tx data plane until unifi_restart_xmit() is called.
- *
- * @param ospriv the OS layer context.
- *
- * @ingroup upperedge
- */
-void unifi_pause_xmit(void *ospriv, unifi_TrafficQueue queue);
-void unifi_restart_xmit(void *ospriv, unifi_TrafficQueue queue);
-
-/**
- *
- * Request to run the Bottom-Half.
- * The HIP core lib calls this function to request that unifi_bh()
- * needs to be run by the OS layer. It can be called anytime, i.e.
- * when the unifi_bh() is running.
- * Since unifi_bh() is not re-entrant, usually unifi_run_bh() sets
- * an event to a thread that schedules a call to unifi_bh().
- *
- * @param ospriv the OS layer context.
- *
- * @ingroup upperedge
- */
-CsrResult unifi_run_bh(void *ospriv);
-
-/**
- *
- * Delivers a signal received from UniFi to the OS layer.
- * Normally, the data signals should be delivered to the data plane
- * and all the rest to the SME (unifi_sys_hip_ind()).
- *
- * Note that the OS layer is responsible for freeing the bulkdata
- * buffers, using unifi_net_data_free().
- *
- * @param ospriv the OS layer context.
- *
- * @param sigptr pointer to the signal.
- *
- * @param siglen size of the signal.
- *
- * @param bulkdata pointer to the bulk data associated with the signal.
- *
- * @ingroup upperedge
- */
-void unifi_receive_event(void *ospriv,
- u8 *sigdata, u32 siglen,
- const bulk_data_param_t *bulkdata);
-
-#ifdef CSR_WIFI_REQUEUE_PACKET_TO_HAL
-/**
- *
- * Used to reque the failed ma packet request back to hal queues
- *
- * @param ospriv the OS layer context.
- *
- * @param host_tag host tag for the packet to requeue.
- *
- * @param bulkDataDesc pointer to the bulk data.
- *
- * @ingroup upperedge
- */
-CsrResult unifi_reque_ma_packet_request(void *ospriv, u32 host_tag,
- u16 status,
- bulk_data_desc_t *bulkDataDesc);
-
-#endif
-typedef struct
-{
- u16 free_fh_sig_queue_slots[UNIFI_NO_OF_TX_QS];
- u16 free_fh_bulkdata_slots;
- u16 free_fh_fw_slots;
-} unifi_HipQosInfo;
-
-void unifi_get_hip_qos_info(card_t *card, unifi_HipQosInfo *hipqosinfo);
-
-
-/**
- * Functions that read a portion of a firmware file.
- *
- * Note: If the UniFi chip runs the f/w from ROM, the HIP core may never
- * call these functions. Also, the HIP core may call these functions even if
- * a f/w file is not available. In this case, it is safe to fail the request.
- */
-#define UNIFI_FW_STA 1 /* Identify STA firmware file */
-
-/**
- *
- * Ask the OS layer to initialise a read from a f/w file.
- *
- * @param ospriv the OS layer context.
- *
- * @param is_fw if 0 the request if for the loader file, if 1 the request
- * is for a f/w file.
- *
- * @param info a card_info_t structure containing versions information.
- * Note that some members of the structure may not be initialised.
- *
- * @return \p NULL if the file is not available, or a pointer which contains
- * OS specific information for the file (typically the contents of the file)
- * that the HIP core uses when calling unifi_fw_read() and unifi_fw_read_stop()
- *
- * @ingroup upperedge
- */
-void* unifi_fw_read_start(void *ospriv, s8 is_fw, const card_info_t *info);
-
-/**
- *
- * Ask the OS layer to return a portion from a f/w file.
- *
- * @param ospriv the OS layer context.
- *
- * @param arg the OS pointer returned by unifi_fw_read_start().
- *
- * @param offset the offset in the f/w file to read the read from.
- *
- * @param buf the buffer to store the returned data.
- *
- * @param len the size in bytes of the requested read.
- *
- * @ingroup upperedge
- */
-s32 unifi_fw_read(void *ospriv, void *arg, u32 offset, void *buf, u32 len);
-
-/**
- *
- * Ask the OS layer to finish reading from a f/w file.
- *
- * @param ospriv the OS layer context.
- *
- * @param dlpriv the OS pointer returned by unifi_fw_read_start().
- *
- * @ingroup upperedge
- */
-void unifi_fw_read_stop(void *ospriv, void *dlpriv);
-
-/**
- *
- * Ask OS layer for a handle to a dynamically allocated firmware buffer
- * (primarily intended for production test images which may need conversion)
- *
- * @param ospriv the OS layer context.
- *
- * @param fwbuf pointer to dynamically allocated buffer
- *
- * @param len length of provided buffer in bytes
- *
- * @ingroup upperedge
- */
-void* unifi_fw_open_buffer(void *ospriv, void *fwbuf, u32 len);
-
-/**
- *
- * Release a handle to a dynamically allocated firmware buffer
- * (primarily intended for production test images which may need conversion)
- *
- * @param ospriv the OS layer context.
- *
- * @param fwbuf pointer to dynamically allocated buffer
- *
- * @ingroup upperedge
- */
-void unifi_fw_close_buffer(void *ospriv, void *fwbuf);
-
-#ifndef CSR_WIFI_HIP_TA_DISABLE
-/*
- * Driver must provide these.
- *
- * A simple implementation will just call
- * unifi_sys_traffic_protocol_ind() or unifi_sys_traffic_classification_ind()
- * respectively. See sme_csr_userspace/sme_userspace.c.
- */
-/**
- *
- * Indicates a detected packet of type packet_type.
- * Typically, this information is processed by the SME so
- * unifi_ta_indicate_protocol() needs to schedule a call to
- * unifi_sys_traffic_protocol_ind().
- *
- * @param ospriv the OS layer context.
- *
- * @param packet_type the detected packet type.
- *
- * @param direction the direction of the packet (Rx, Tx).
- *
- * @param src_addr the source address of the packet.
- *
- * @ingroup upperedge
- */
-void unifi_ta_indicate_protocol(void *ospriv,
- CsrWifiRouterCtrlTrafficPacketType packet_type,
- CsrWifiRouterCtrlProtocolDirection direction,
- const CsrWifiMacAddress *src_addr);
-
-/**
- *
- * Indicates statistics for the sample data over a period.
- * Typically, this information is processed by the SME so
- * unifi_ta_indicate_sampling() needs to schedule a call to
- * unifi_sys_traffic_sample_ind().
- *
- * @param ospriv the OS layer context.
- *
- * @param stats the pointer to the structure that contains the statistics.
- *
- * @ingroup upperedge
- */
-void unifi_ta_indicate_sampling(void *ospriv, CsrWifiRouterCtrlTrafficStats *stats);
-void unifi_ta_indicate_l4stats(void *ospriv,
- u32 rxTcpThroughput,
- u32 txTcpThroughput,
- u32 rxUdpThroughput,
- u32 txUdpThroughput);
-#endif
-
-void unifi_rx_queue_flush(void *ospriv);
-
-/**
- * Call-out from the SDIO glue layer.
- *
- * The glue layer needs to call unifi_sdio_interrupt_handler() every time
- * an interrupts occurs.
- *
- * @param card the HIP core context.
- *
- * @ingroup bottomedge
- */
-void unifi_sdio_interrupt_handler(card_t *card);
-
-
-/* HELPER FUNCTIONS */
-
-/*
- * unifi_init() and unifi_download() implement a subset of unifi_init_card functionality
- * that excludes HIP initialization.
- */
-CsrResult unifi_init(card_t *card);
-CsrResult unifi_download(card_t *card, s32 led_mask);
-
-/*
- * unifi_start_processors() ensures both on-chip processors are running
- */
-CsrResult unifi_start_processors(card_t *card);
-
-CsrResult unifi_capture_panic(card_t *card);
-
-/*
- * Configure HIP interrupt processing mode
- */
-#define CSR_WIFI_INTMODE_DEFAULT 0
-#define CSR_WIFI_INTMODE_RUN_BH_ONCE 1 /* Run BH once per interrupt */
-
-void unifi_set_interrupt_mode(card_t *card, u32 mode);
-
-/*
- * unifi_request_max_clock() requests that max SDIO clock speed is set at the
- * next suitable opportunity.
- */
-void unifi_request_max_sdio_clock(card_t *card);
-
-
-/* Functions to lookup bulk data command names. */
-const char* lookup_bulkcmd_name(u16 id);
-
-/* Function to log HIP's global debug buffer */
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-void unifi_debug_buf_dump(void);
-void unifi_debug_log_to_buf(const char *fmt, ...);
-void unifi_debug_hex_to_buf(const char *buff, u16 length);
-#endif
-
-/* Mini-coredump utility functions */
-CsrResult unifi_coredump_get_value(card_t *card, struct unifi_coredump_req *req);
-CsrResult unifi_coredump_capture(card_t *card, struct unifi_coredump_req *req);
-CsrResult unifi_coredump_request_at_next_reset(card_t *card, s8 enable);
-CsrResult unifi_coredump_init(card_t *card, u16 num_dump_buffers);
-void unifi_coredump_free(card_t *card);
-
-#endif /* __CSR_WIFI_HIP_UNIFI_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_unifi_signal_names.c b/drivers/staging/csr/csr_wifi_hip_unifi_signal_names.c
deleted file mode 100644
index 9a35285..0000000
--- a/drivers/staging/csr/csr_wifi_hip_unifi_signal_names.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include "csr_wifi_hip_unifi.h"
-
-struct sig_name {
- s16 id;
- const char *name;
-};
-
-static const struct sig_name Unifi_bulkcmd_names[] = {
- { 0, "SignalCmd" },
- { 1, "CopyToHost" },
- { 2, "CopyToHostAck" },
- { 3, "CopyFromHost" },
- { 4, "CopyFromHostAck" },
- { 5, "ClearSlot" },
- { 6, "CopyOverlay" },
- { 7, "CopyOverlayAck" },
- { 8, "CopyFromHostAndClearSlot" },
- { 15, "Padding" }
-};
-
-const char *lookup_bulkcmd_name(u16 id)
-{
- if (id < 9)
- return Unifi_bulkcmd_names[id].name;
- if (id == 15)
- return "Padding";
-
- return "UNKNOWN";
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_unifi_udi.h b/drivers/staging/csr/csr_wifi_hip_unifi_udi.h
deleted file mode 100644
index 4126e85..0000000
--- a/drivers/staging/csr/csr_wifi_hip_unifi_udi.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_unifi_udi.h
- *
- * PURPOSE:
- * Declarations and definitions for the UniFi Debug Interface.
- *
- * ---------------------------------------------------------------------------
- */
-#ifndef __CSR_WIFI_HIP_UNIFI_UDI_H__
-#define __CSR_WIFI_HIP_UNIFI_UDI_H__
-
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_signals.h"
-
-
-/*
- * Support for tracing the wire protocol.
- */
-enum udi_log_direction
-{
- UDI_LOG_FROM_HOST = 0x0000,
- UDI_LOG_TO_HOST = 0x0001
-};
-
-typedef void (*udi_func_t)(void *ospriv, u8 *sigdata,
- u32 signal_len,
- const bulk_data_param_t *bulkdata,
- enum udi_log_direction dir);
-
-CsrResult unifi_set_udi_hook(card_t *card, udi_func_t udi_fn);
-CsrResult unifi_remove_udi_hook(card_t *card, udi_func_t udi_fn);
-
-
-/*
- * Function to print current status info to a string.
- * This is used in the linux /proc interface and might be useful
- * in other systems.
- */
-s32 unifi_print_status(card_t *card, struct seq_file *m);
-
-#endif /* __CSR_WIFI_HIP_UNIFI_UDI_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_unifihw.h b/drivers/staging/csr/csr_wifi_hip_unifihw.h
deleted file mode 100644
index 3f9fcbd..0000000
--- a/drivers/staging/csr/csr_wifi_hip_unifihw.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- *
- * File: csr_wifi_hip_unifihw.h
- *
- * Definitions of various chip registers, addresses, values etc.
- *
- * ---------------------------------------------------------------------------
- */
-#ifndef __UNIFIHW_H__
-#define __UNIFIHW_H__ 1
-
-/* Symbol Look Up Table fingerprint. IDs are in sigs.h */
-#define SLUT_FINGERPRINT 0xD397
-
-
-/* Values of LoaderOperation */
-#define UNIFI_LOADER_IDLE 0x00
-#define UNIFI_LOADER_COPY 0x01
-#define UNIFI_LOADER_ERROR_MASK 0xF0
-
-/* Values of BootLoaderOperation */
-#define UNIFI_BOOT_LOADER_IDLE 0x00
-#define UNIFI_BOOT_LOADER_RESTART 0x01
-#define UNIFI_BOOT_LOADER_PATCH 0x02
-#define UNIFI_BOOT_LOADER_LOAD_STA 0x10
-#define UNIFI_BOOT_LOADER_LOAD_PTEST 0x11
-
-
-/* Memory spaces encoded in top byte of Generic Pointer type */
-#define UNIFI_SH_DMEM 0x01 /* Shared Data Memory */
-#define UNIFI_EXT_FLASH 0x02 /* External FLASH */
-#define UNIFI_EXT_SRAM 0x03 /* External SRAM */
-#define UNIFI_REGISTERS 0x04 /* Registers */
-#define UNIFI_PHY_DMEM 0x10 /* PHY Data Memory */
-#define UNIFI_PHY_PMEM 0x11 /* PHY Program Memory */
-#define UNIFI_PHY_ROM 0x12 /* PHY ROM */
-#define UNIFI_MAC_DMEM 0x20 /* MAC Data Memory */
-#define UNIFI_MAC_PMEM 0x21 /* MAC Program Memory */
-#define UNIFI_MAC_ROM 0x22 /* MAC ROM */
-#define UNIFI_BT_DMEM 0x30 /* BT Data Memory */
-#define UNIFI_BT_PMEM 0x31 /* BT Program Memory */
-#define UNIFI_BT_ROM 0x32 /* BT ROM */
-
-#define UNIFI_MAKE_GP(R, O) (((UNIFI_ ## R) << 24) | (O))
-#define UNIFI_GP_OFFSET(GP) ((GP) & 0xFFFFFF)
-#define UNIFI_GP_SPACE(GP) (((GP) >> 24) & 0xFF)
-
-#endif /* __UNIFIHW_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_unifiversion.h b/drivers/staging/csr/csr_wifi_hip_unifiversion.h
deleted file mode 100644
index d1c6678..0000000
--- a/drivers/staging/csr/csr_wifi_hip_unifiversion.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: unifiversion.h
- *
- * PURPOSE:
- * Version information for the portable UniFi driver.
- *
- * ---------------------------------------------------------------------------
- */
-
-#ifndef __UNIFIVERSION_H__
-#define __UNIFIVERSION_H__
-
-/*
- * The minimum version of Host Interface Protocol required by the driver.
- */
-#define UNIFI_HIP_MAJOR_VERSION 9
-#define UNIFI_HIP_MINOR_VERSION 1
-
-#endif /* __UNIFIVERSION_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hip_xbv.c b/drivers/staging/csr/csr_wifi_hip_xbv.c
deleted file mode 100644
index 050a15f..0000000
--- a/drivers/staging/csr/csr_wifi_hip_xbv.c
+++ /dev/null
@@ -1,1076 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_xbv.c
- *
- * PURPOSE:
- * Routines for downloading firmware to UniFi.
- *
- * UniFi firmware files use a nested TLV (Tag-Length-Value) format.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/slab.h>
-
-#ifdef CSR_WIFI_XBV_TEST
-/* Standalone test harness */
-#include "unifi_xbv.h"
-#include "csr_wifi_hip_unifihw.h"
-#else
-/* Normal driver build */
-#include "csr_wifi_hip_unifiversion.h"
-#include "csr_wifi_hip_card.h"
-#define DBG_TAG(t)
-#endif
-
-#include "csr_wifi_hip_xbv.h"
-
-#define STREAM_CHECKSUM 0x6d34 /* Sum of uint16s in each patch stream */
-
-/* XBV sizes used in patch conversion
- */
-#define PTDL_MAX_SIZE 2048 /* Max bytes allowed per PTDL */
-#define PTDL_HDR_SIZE (4 + 2 + 6 + 2) /* sizeof(fw_id, sec_len, patch_cmd, csum) */
-
-/* Struct to represent a buffer for reading firmware file */
-
-typedef struct
-{
- void *dlpriv;
- s32 ioffset;
- fwreadfn_t iread;
-} ct_t;
-
-/* Struct to represent a TLV field */
-typedef struct
-{
- char t_name[4];
- u32 t_len;
-} tag_t;
-
-
-#define TAG_EQ(i, v) (((i)[0] == (v)[0]) && \
- ((i)[1] == (v)[1]) && \
- ((i)[2] == (v)[2]) && \
- ((i)[3] == (v)[3]))
-
-/* We create a small stack on the stack that contains an enum
- * indicating the containing list segments, and the offset at which
- * those lists end. This enables a lot more error checking. */
-typedef enum
-{
- xbv_xbv1,
- /*xbv_info,*/
- xbv_fw,
- xbv_vers,
- xbv_vand,
- xbv_ptch,
- xbv_other
-} xbv_container;
-
-#define XBV_STACK_SIZE 6
-#define XBV_MAX_OFFS 0x7fffffff
-
-typedef struct
-{
- struct
- {
- xbv_container container;
- s32 ioffset_end;
- } s[XBV_STACK_SIZE];
- u32 ptr;
-} xbv_stack_t;
-
-static s32 read_tag(card_t *card, ct_t *ct, tag_t *tag);
-static s32 read_bytes(card_t *card, ct_t *ct, void *buf, u32 len);
-static s32 read_uint(card_t *card, ct_t *ct, u32 *u, u32 len);
-static s32 xbv_check(xbv1_t *fwinfo, const xbv_stack_t *stack,
- xbv_mode new_mode, xbv_container old_cont);
-static s32 xbv_push(xbv1_t *fwinfo, xbv_stack_t *stack,
- xbv_mode new_mode, xbv_container old_cont,
- xbv_container new_cont, u32 ioff);
-
-static u32 write_uint16(void *buf, const u32 offset,
- const u16 val);
-static u32 write_uint32(void *buf, const u32 offset,
- const u32 val);
-static u32 write_bytes(void *buf, const u32 offset,
- const u8 *data, const u32 len);
-static u32 write_tag(void *buf, const u32 offset,
- const char *tag_str);
-static u32 write_chunk(void *buf, const u32 offset,
- const char *tag_str,
- const u32 payload_len);
-static u16 calc_checksum(void *buf, const u32 offset,
- const u32 bytes_len);
-static u32 calc_patch_size(const xbv1_t *fwinfo);
-
-static u32 write_xbv_header(void *buf, const u32 offset,
- const u32 file_payload_length);
-static u32 write_ptch_header(void *buf, const u32 offset,
- const u32 fw_id);
-static u32 write_patchcmd(void *buf, const u32 offset,
- const u32 dst_genaddr, const u16 len);
-static u32 write_reset_ptdl(void *buf, const u32 offset,
- const xbv1_t *fwinfo, u32 fw_id);
-static u32 write_fwdl_to_ptdl(void *buf, const u32 offset,
- fwreadfn_t readfn, const struct FWDL *fwdl,
- const void *fw_buf, const u32 fw_id,
- void *rdbuf);
-
-/*
- * ---------------------------------------------------------------------------
- * parse_xbv1
- *
- * Scan the firmware file to find the TLVs we are interested in.
- * Actions performed:
- * - check we support the file format version in VERF
- * Store these TLVs if we have a firmware image:
- * - SLTP Symbol Lookup Table Pointer
- * - FWDL firmware download segments
- * - FWOL firmware overlay segment
- * - VMEQ Register probe tests to verify matching h/w
- * Store these TLVs if we have a patch file:
- * - FWID the firmware build ID that this file patches
- * - PTDL The actual patches
- *
- * The structure pointed to by fwinfo is cleared and
- * 'fwinfo->mode' is set to 'unknown'. The 'fwinfo->mode'
- * variable is set to 'firmware' or 'patch' once we know which
- * sort of XBV file we have.
- *
- * Arguments:
- * readfn Pointer to function to call to read from the file.
- * dlpriv Opaque pointer arg to pass to readfn.
- * fwinfo Pointer to fwinfo struct to fill in.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR error code on failure
- * ---------------------------------------------------------------------------
- */
-CsrResult xbv1_parse(card_t *card, fwreadfn_t readfn, void *dlpriv, xbv1_t *fwinfo)
-{
- ct_t ct;
- tag_t tag;
- xbv_stack_t stack;
-
- ct.dlpriv = dlpriv;
- ct.ioffset = 0;
- ct.iread = readfn;
-
- memset(fwinfo, 0, sizeof(xbv1_t));
- fwinfo->mode = xbv_unknown;
-
- /* File must start with XBV1 triplet */
- if (read_tag(card, &ct, &tag) <= 0)
- {
- unifi_error(NULL, "File is not UniFi firmware\n");
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- DBG_TAG(tag.t_name);
-
- if (!TAG_EQ(tag.t_name, "XBV1"))
- {
- unifi_error(NULL, "File is not UniFi firmware (%s)\n", tag.t_name);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- stack.ptr = 0;
- stack.s[stack.ptr].container = xbv_xbv1;
- stack.s[stack.ptr].ioffset_end = XBV_MAX_OFFS;
-
- /* Now scan the file */
- while (1)
- {
- s32 n;
-
- n = read_tag(card, &ct, &tag);
- if (n < 0)
- {
- unifi_error(NULL, "No tag\n");
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- if (n == 0)
- {
- /* End of file */
- break;
- }
-
- DBG_TAG(tag.t_name);
-
- /* File format version */
- if (TAG_EQ(tag.t_name, "VERF"))
- {
- u32 version;
-
- if (xbv_check(fwinfo, &stack, xbv_unknown, xbv_xbv1) ||
- (tag.t_len != 2) ||
- read_uint(card, &ct, &version, 2))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- if (version != 0)
- {
- unifi_error(NULL, "Unsupported firmware file version: %d.%d\n",
- version >> 8, version & 0xFF);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- }
- else if (TAG_EQ(tag.t_name, "LIST"))
- {
- char name[4];
- u32 list_end;
-
- list_end = ct.ioffset + tag.t_len;
-
- if (read_bytes(card, &ct, name, 4))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- DBG_TAG(name);
- if (TAG_EQ(name, "FW "))
- {
- if (xbv_push(fwinfo, &stack, xbv_firmware, xbv_xbv1, xbv_fw, list_end))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- }
- else if (TAG_EQ(name, "VERS"))
- {
- if (xbv_push(fwinfo, &stack, xbv_firmware, xbv_fw, xbv_vers, list_end) ||
- (fwinfo->vers.num_vand != 0))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- }
- else if (TAG_EQ(name, "VAND"))
- {
- struct VAND *vand;
-
- if (xbv_push(fwinfo, &stack, xbv_firmware, xbv_vers, xbv_vand, list_end) ||
- (fwinfo->vers.num_vand >= MAX_VAND))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /* Get a new VAND */
- vand = fwinfo->vand + fwinfo->vers.num_vand++;
-
- /* Fill it in */
- vand->first = fwinfo->num_vmeq;
- vand->count = 0;
- }
- else if (TAG_EQ(name, "PTCH"))
- {
- if (xbv_push(fwinfo, &stack, xbv_patch, xbv_xbv1, xbv_ptch, list_end))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- }
- else
- {
- /* Skip over any other lists. We dont bother to push
- * the new list type now as we would only pop it at
- * the end of the outer loop. */
- ct.ioffset += tag.t_len - 4;
- }
- }
- else if (TAG_EQ(tag.t_name, "SLTP"))
- {
- u32 addr;
-
- if (xbv_check(fwinfo, &stack, xbv_firmware, xbv_fw) ||
- (tag.t_len != 4) ||
- (fwinfo->slut_addr != 0) ||
- read_uint(card, &ct, &addr, 4))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- fwinfo->slut_addr = addr;
- }
- else if (TAG_EQ(tag.t_name, "FWDL"))
- {
- u32 addr;
- struct FWDL *fwdl;
-
- if (xbv_check(fwinfo, &stack, xbv_firmware, xbv_fw) ||
- (fwinfo->num_fwdl >= MAX_FWDL) ||
- (read_uint(card, &ct, &addr, 4)))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- fwdl = fwinfo->fwdl + fwinfo->num_fwdl++;
-
- fwdl->dl_size = tag.t_len - 4;
- fwdl->dl_addr = addr;
- fwdl->dl_offset = ct.ioffset;
-
- ct.ioffset += tag.t_len - 4;
- }
- else if (TAG_EQ(tag.t_name, "FWOV"))
- {
- if (xbv_check(fwinfo, &stack, xbv_firmware, xbv_fw) ||
- (fwinfo->fwov.dl_size != 0) ||
- (fwinfo->fwov.dl_offset != 0))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- fwinfo->fwov.dl_size = tag.t_len;
- fwinfo->fwov.dl_offset = ct.ioffset;
-
- ct.ioffset += tag.t_len;
- }
- else if (TAG_EQ(tag.t_name, "VMEQ"))
- {
- u32 temp[3];
- struct VAND *vand;
- struct VMEQ *vmeq;
-
- if (xbv_check(fwinfo, &stack, xbv_firmware, xbv_vand) ||
- (fwinfo->num_vmeq >= MAX_VMEQ) ||
- (fwinfo->vers.num_vand == 0) ||
- (tag.t_len != 8) ||
- read_uint(card, &ct, &temp[0], 4) ||
- read_uint(card, &ct, &temp[1], 2) ||
- read_uint(card, &ct, &temp[2], 2))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /* Get the last VAND */
- vand = fwinfo->vand + (fwinfo->vers.num_vand - 1);
-
- /* Get a new VMEQ */
- vmeq = fwinfo->vmeq + fwinfo->num_vmeq++;
-
- /* Note that this VAND contains another VMEQ */
- vand->count++;
-
- /* Fill in the VMEQ */
- vmeq->addr = temp[0];
- vmeq->mask = (u16)temp[1];
- vmeq->value = (u16)temp[2];
- }
- else if (TAG_EQ(tag.t_name, "FWID"))
- {
- u32 build_id;
-
- if (xbv_check(fwinfo, &stack, xbv_patch, xbv_ptch) ||
- (tag.t_len != 4) ||
- (fwinfo->build_id != 0) ||
- read_uint(card, &ct, &build_id, 4))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- fwinfo->build_id = build_id;
- }
- else if (TAG_EQ(tag.t_name, "PTDL"))
- {
- struct PTDL *ptdl;
-
- if (xbv_check(fwinfo, &stack, xbv_patch, xbv_ptch) ||
- (fwinfo->num_ptdl >= MAX_PTDL))
- {
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- /* Allocate a new PTDL */
- ptdl = fwinfo->ptdl + fwinfo->num_ptdl++;
-
- ptdl->dl_size = tag.t_len;
- ptdl->dl_offset = ct.ioffset;
-
- ct.ioffset += tag.t_len;
- }
- else
- {
- /*
- * If we get here it is a tag we are not interested in,
- * just skip over it.
- */
- ct.ioffset += tag.t_len;
- }
-
- /* Check to see if we are at the end of the currently stacked
- * segment. We could finish more than one list at a time. */
- while (ct.ioffset >= stack.s[stack.ptr].ioffset_end)
- {
- if (ct.ioffset > stack.s[stack.ptr].ioffset_end)
- {
- unifi_error(NULL,
- "XBV file has overrun stack'd segment %d (%d > %d)\n",
- stack.ptr, ct.ioffset, stack.s[stack.ptr].ioffset_end);
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- if (stack.ptr <= 0)
- {
- unifi_error(NULL, "XBV file has underrun stack pointer\n");
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
- stack.ptr--;
- }
- }
-
- if (stack.ptr != 0)
- {
- unifi_error(NULL, "Last list of XBV is not complete.\n");
- return CSR_WIFI_HIP_RESULT_INVALID_VALUE;
- }
-
- return CSR_RESULT_SUCCESS;
-} /* xbv1_parse() */
-
-
-/* Check the the XBV file is of a consistant sort (either firmware or
- * patch) and that we are in the correct containing list type. */
-static s32 xbv_check(xbv1_t *fwinfo, const xbv_stack_t *stack,
- xbv_mode new_mode, xbv_container old_cont)
-{
- /* If the new file mode is unknown the current packet could be in
- * either (any) type of XBV file, and we cant make a decission at
- * this time. */
- if (new_mode != xbv_unknown)
- {
- if (fwinfo->mode == xbv_unknown)
- {
- fwinfo->mode = new_mode;
- }
- else if (fwinfo->mode != new_mode)
- {
- return -1;
- }
- }
- /* If the current stack top doesn't match what we expect then the
- * file is corrupt. */
- if (stack->s[stack->ptr].container != old_cont)
- {
- return -1;
- }
- return 0;
-}
-
-
-/* Make checks as above and then enter a new list */
-static s32 xbv_push(xbv1_t *fwinfo, xbv_stack_t *stack,
- xbv_mode new_mode, xbv_container old_cont,
- xbv_container new_cont, u32 new_ioff)
-{
- if (xbv_check(fwinfo, stack, new_mode, old_cont))
- {
- return -1;
- }
-
- /* Check that our stack won't overflow. */
- if (stack->ptr >= (XBV_STACK_SIZE - 1))
- {
- return -1;
- }
-
- /* Add the new list element to the top of the stack. */
- stack->ptr++;
- stack->s[stack->ptr].container = new_cont;
- stack->s[stack->ptr].ioffset_end = new_ioff;
-
- return 0;
-}
-
-
-static u32 xbv2uint(u8 *ptr, s32 len)
-{
- u32 u = 0;
- s16 i;
-
- for (i = 0; i < len; i++)
- {
- u32 b;
- b = ptr[i];
- u += b << (i * 8);
- }
- return u;
-}
-
-
-static s32 read_tag(card_t *card, ct_t *ct, tag_t *tag)
-{
- u8 buf[8];
- s32 n;
-
- n = (*ct->iread)(card->ospriv, ct->dlpriv, ct->ioffset, buf, 8);
- if (n <= 0)
- {
- return n;
- }
-
- /* read the tag and length */
- if (n != 8)
- {
- return -1;
- }
-
- /* get section tag */
- memcpy(tag->t_name, buf, 4);
-
- /* get section length */
- tag->t_len = xbv2uint(buf + 4, 4);
-
- ct->ioffset += 8;
-
- return 8;
-} /* read_tag() */
-
-
-static s32 read_bytes(card_t *card, ct_t *ct, void *buf, u32 len)
-{
- /* read the tag value */
- if ((*ct->iread)(card->ospriv, ct->dlpriv, ct->ioffset, buf, len) != (s32)len)
- {
- return -1;
- }
-
- ct->ioffset += len;
-
- return 0;
-} /* read_bytes() */
-
-
-static s32 read_uint(card_t *card, ct_t *ct, u32 *u, u32 len)
-{
- u8 buf[4];
-
- /* Integer cannot be more than 4 bytes */
- if (len > 4)
- {
- return -1;
- }
-
- if (read_bytes(card, ct, buf, len))
- {
- return -1;
- }
-
- *u = xbv2uint(buf, len);
-
- return 0;
-} /* read_uint() */
-
-
-static u32 write_uint16(void *buf, const u32 offset, const u16 val)
-{
- u8 *dst = (u8 *)buf + offset;
- *dst++ = (u8)(val & 0xff); /* LSB first */
- *dst = (u8)(val >> 8);
- return sizeof(u16);
-}
-
-
-static u32 write_uint32(void *buf, const u32 offset, const u32 val)
-{
- (void)write_uint16(buf, offset + 0, (u16)(val & 0xffff));
- (void)write_uint16(buf, offset + 2, (u16)(val >> 16));
- return sizeof(u32);
-}
-
-
-static u32 write_bytes(void *buf, const u32 offset, const u8 *data, const u32 len)
-{
- u32 i;
- u8 *dst = (u8 *)buf + offset;
-
- for (i = 0; i < len; i++)
- {
- *dst++ = *((u8 *)data + i);
- }
- return len;
-}
-
-
-static u32 write_tag(void *buf, const u32 offset, const char *tag_str)
-{
- u8 *dst = (u8 *)buf + offset;
- memcpy(dst, tag_str, 4);
- return 4;
-}
-
-
-static u32 write_chunk(void *buf, const u32 offset, const char *tag_str, const u32 payload_len)
-{
- u32 written = 0;
- written += write_tag(buf, offset, tag_str);
- written += write_uint32(buf, written + offset, (u32)payload_len);
-
- return written;
-}
-
-
-static u16 calc_checksum(void *buf, const u32 offset, const u32 bytes_len)
-{
- u32 i;
- u8 *src = (u8 *)buf + offset;
- u16 sum = 0;
- u16 val;
-
- for (i = 0; i < bytes_len / 2; i++)
- {
- /* Contents copied to file is LE, host might not be */
- val = (u16) * src++; /* LSB */
- val += (u16)(*src++) << 8; /* MSB */
- sum += val;
- }
-
- /* Total of uint16s in the stream plus the stored check value
- * should equal STREAM_CHECKSUM when decoded.
- */
- return (STREAM_CHECKSUM - sum);
-}
-
-
-#define PTDL_RESET_DATA_SIZE 20 /* Size of reset vectors PTDL */
-
-static u32 calc_patch_size(const xbv1_t *fwinfo)
-{
- s16 i;
- u32 size = 0;
-
- /*
- * Work out how big an equivalent patch format file must be for this image.
- * This only needs to be approximate, so long as it's large enough.
- */
- if (fwinfo->mode != xbv_firmware)
- {
- return 0;
- }
-
- /* Payload (which will get put into a series of PTDLs) */
- for (i = 0; i < fwinfo->num_fwdl; i++)
- {
- size += fwinfo->fwdl[i].dl_size;
- }
-
- /* Another PTDL at the end containing reset vectors */
- size += PTDL_RESET_DATA_SIZE;
-
- /* PTDL headers. Add one for remainder, one for reset vectors */
- size += ((fwinfo->num_fwdl / PTDL_MAX_SIZE) + 2) * PTDL_HDR_SIZE;
-
- /* Another 1K sufficient to cover miscellaneous headers */
- size += 1024;
-
- return size;
-}
-
-
-static u32 write_xbv_header(void *buf, const u32 offset, const u32 file_payload_length)
-{
- u32 written = 0;
-
- /* The length value given to the XBV chunk is the length of all subsequent
- * contents of the file, excluding the 8 byte size of the XBV1 header itself
- * (The added 6 bytes thus accounts for the size of the VERF)
- */
- written += write_chunk(buf, offset + written, (char *)"XBV1", file_payload_length + 6);
-
- written += write_chunk(buf, offset + written, (char *)"VERF", 2);
- written += write_uint16(buf, offset + written, 0); /* File version */
-
- return written;
-}
-
-
-static u32 write_ptch_header(void *buf, const u32 offset, const u32 fw_id)
-{
- u32 written = 0;
-
- /* LIST is written with a zero length, to be updated later */
- written += write_chunk(buf, offset + written, (char *)"LIST", 0);
- written += write_tag(buf, offset + written, (char *)"PTCH"); /* List type */
-
- written += write_chunk(buf, offset + written, (char *)"FWID", 4);
- written += write_uint32(buf, offset + written, fw_id);
-
-
- return written;
-}
-
-
-#define UF_REGION_PHY 1
-#define UF_REGION_MAC 2
-#define UF_MEMPUT_MAC 0x0000
-#define UF_MEMPUT_PHY 0x1000
-
-static u32 write_patchcmd(void *buf, const u32 offset, const u32 dst_genaddr, const u16 len)
-{
- u32 written = 0;
- u32 region = (dst_genaddr >> 28);
- u16 cmd_and_len = UF_MEMPUT_MAC;
-
- if (region == UF_REGION_PHY)
- {
- cmd_and_len = UF_MEMPUT_PHY;
- }
- else if (region != UF_REGION_MAC)
- {
- return 0; /* invalid */
- }
-
- /* Write the command and data length */
- cmd_and_len |= len;
- written += write_uint16(buf, offset + written, cmd_and_len);
-
- /* Write the destination generic address */
- written += write_uint16(buf, offset + written, (u16)(dst_genaddr >> 16));
- written += write_uint16(buf, offset + written, (u16)(dst_genaddr & 0xffff));
-
- /* The data payload should be appended to the command */
- return written;
-}
-
-
-static u32 write_fwdl_to_ptdl(void *buf, const u32 offset, fwreadfn_t readfn,
- const struct FWDL *fwdl, const void *dlpriv,
- const u32 fw_id, void *fw_buf)
-{
- u32 written = 0;
- s16 chunks = 0;
- u32 left = fwdl->dl_size; /* Bytes left in this fwdl */
- u32 dl_addr = fwdl->dl_addr; /* Target address of fwdl image on XAP */
- u32 dl_offs = fwdl->dl_offset; /* Offset of fwdl image data in source */
- u16 csum;
- u32 csum_start_offs; /* first offset to include in checksum */
- u32 sec_data_len; /* section data byte count */
- u32 sec_len; /* section data + header byte count */
-
- /* FWDL maps to one or more PTDLs, as max size for a PTDL is 1K words */
- while (left)
- {
- /* Calculate amount to be transferred */
- sec_data_len = min_t(u32, left, PTDL_MAX_SIZE - PTDL_HDR_SIZE);
- sec_len = sec_data_len + PTDL_HDR_SIZE;
-
- /* Write PTDL header + entire PTDL size */
- written += write_chunk(buf, offset + written, (char *)"PTDL", sec_len);
- /* bug digest implies 4 bytes of padding here, but that seems wrong */
-
- /* Checksum starts here */
- csum_start_offs = offset + written;
-
- /* Patch-chunk header: fw_id. Note that this is in XAP word order */
- written += write_uint16(buf, offset + written, (u16)(fw_id >> 16));
- written += write_uint16(buf, offset + written, (u16)(fw_id & 0xffff));
-
- /* Patch-chunk header: section length in uint16s */
- written += write_uint16(buf, offset + written, (u16)(sec_len / 2));
-
-
- /* Write the appropriate patch command for the data's destination ptr */
- written += write_patchcmd(buf, offset + written, dl_addr, (u16)(sec_data_len / 2));
-
- /* Write the data itself (limited to the max chunk length) */
- if (readfn(NULL, (void *)dlpriv, dl_offs, fw_buf, sec_data_len) < 0)
- {
- return 0;
- }
-
- written += write_bytes(buf,
- offset + written,
- fw_buf,
- sec_data_len);
-
- /* u16 checksum calculated over data written */
- csum = calc_checksum(buf, csum_start_offs, written - (csum_start_offs - offset));
- written += write_uint16(buf, offset + written, csum);
-
- left -= sec_data_len;
- dl_addr += sec_data_len;
- dl_offs += sec_data_len;
- chunks++;
- }
-
- return written;
-}
-
-
-#define SEC_CMD_LEN ((4 + 2) * 2) /* sizeof(cmd, vector) per XAP */
-#define PTDL_VEC_HDR_SIZE (4 + 2 + 2) /* sizeof(fw_id, sec_len, csum) */
-#define UF_MAC_START_VEC 0x00c00000 /* Start address of image on MAC */
-#define UF_PHY_START_VEC 0x00c00000 /* Start address of image on PHY */
-#define UF_MAC_START_CMD 0x6000 /* MAC "Set start address" command */
-#define UF_PHY_START_CMD 0x7000 /* PHY "Set start address" command */
-
-static u32 write_reset_ptdl(void *buf, const u32 offset, const xbv1_t *fwinfo, u32 fw_id)
-{
- u32 written = 0;
- u16 csum;
- u32 csum_start_offs; /* first offset to include in checksum */
- u32 sec_len; /* section data + header byte count */
-
- sec_len = SEC_CMD_LEN + PTDL_VEC_HDR_SIZE; /* Total section byte length */
-
- /* Write PTDL header + entire PTDL size */
- written += write_chunk(buf, offset + written, (char *)"PTDL", sec_len);
-
- /* Checksum starts here */
- csum_start_offs = offset + written;
-
- /* Patch-chunk header: fw_id. Note that this is in XAP word order */
- written += write_uint16(buf, offset + written, (u16)(fw_id >> 16));
- written += write_uint16(buf, offset + written, (u16)(fw_id & 0xffff));
-
- /* Patch-chunk header: section length in uint16s */
- written += write_uint16(buf, offset + written, (u16)(sec_len / 2));
-
- /*
- * Restart addresses to be executed on subsequent loader restart command.
- */
-
- /* Setup the MAC start address, note word ordering */
- written += write_uint16(buf, offset + written, UF_MAC_START_CMD);
- written += write_uint16(buf, offset + written, (UF_MAC_START_VEC >> 16));
- written += write_uint16(buf, offset + written, (UF_MAC_START_VEC & 0xffff));
-
- /* Setup the PHY start address, note word ordering */
- written += write_uint16(buf, offset + written, UF_PHY_START_CMD);
- written += write_uint16(buf, offset + written, (UF_PHY_START_VEC >> 16));
- written += write_uint16(buf, offset + written, (UF_PHY_START_VEC & 0xffff));
-
- /* u16 checksum calculated over data written */
- csum = calc_checksum(buf, csum_start_offs, written - (csum_start_offs - offset));
- written += write_uint16(buf, offset + written, csum);
-
- return written;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * read_slut
- *
- * desc
- *
- * Arguments:
- * readfn Pointer to function to call to read from the file.
- * dlpriv Opaque pointer arg to pass to readfn.
- * addr Offset into firmware image of SLUT.
- * fwinfo Pointer to fwinfo struct to fill in.
- *
- * Returns:
- * Number of SLUT entries in the f/w, or -1 if the image was corrupt.
- * ---------------------------------------------------------------------------
- */
-s32 xbv1_read_slut(card_t *card, fwreadfn_t readfn, void *dlpriv, xbv1_t *fwinfo,
- symbol_t *slut, u32 slut_len)
-{
- s16 i;
- s32 offset;
- u32 magic;
- u32 count = 0;
- ct_t ct;
-
- if (fwinfo->mode != xbv_firmware)
- {
- return -1;
- }
-
- /* Find the d/l segment containing the SLUT */
- /* This relies on the SLUT being entirely contained in one segment */
- offset = -1;
- for (i = 0; i < fwinfo->num_fwdl; i++)
- {
- if ((fwinfo->slut_addr >= fwinfo->fwdl[i].dl_addr) &&
- (fwinfo->slut_addr < (fwinfo->fwdl[i].dl_addr + fwinfo->fwdl[i].dl_size)))
- {
- offset = fwinfo->fwdl[i].dl_offset +
- (fwinfo->slut_addr - fwinfo->fwdl[i].dl_addr);
- }
- }
- if (offset < 0)
- {
- return -1;
- }
-
- ct.dlpriv = dlpriv;
- ct.ioffset = offset;
- ct.iread = readfn;
-
- if (read_uint(card, &ct, &magic, 2))
- {
- return -1;
- }
- if (magic != SLUT_FINGERPRINT)
- {
- return -1;
- }
-
- while (count < slut_len)
- {
- u32 id, obj;
-
- /* Read Symbol Id */
- if (read_uint(card, &ct, &id, 2))
- {
- return -1;
- }
-
- /* Check for end of table marker */
- if (id == CSR_SLT_END)
- {
- break;
- }
-
- /* Read Symbol Value */
- if (read_uint(card, &ct, &obj, 4))
- {
- return -1;
- }
-
- slut[count].id = (u16)id;
- slut[count].obj = obj;
- count++;
- }
-
- return count;
-} /* read_slut() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * xbv_to_patch
- *
- * Convert (the relevant parts of) a firmware xbv file into a patch xbv
- *
- * Arguments:
- * card
- * fw_buf - pointer to xbv firmware image
- * fwinfo - structure describing the firmware image
- * size - pointer to location into which size of f/w is written.
- *
- * Returns:
- * Pointer to firmware image, or NULL on error. Caller must free this
- * buffer via kfree() once it's finished with.
- *
- * Notes:
- * The input fw_buf should have been checked via xbv1_parse prior to
- * calling this function, so the input image is assumed valid.
- * ---------------------------------------------------------------------------
- */
-#define PTCH_LIST_SIZE 16 /* sizeof PTCH+FWID chunk in LIST header */
-
-void* xbv_to_patch(card_t *card, fwreadfn_t readfn,
- const void *fw_buf, const xbv1_t *fwinfo, u32 *size)
-{
- void *patch_buf = NULL;
- u32 patch_buf_size;
- u32 payload_offs = 0; /* Start of XBV payload */
- s16 i;
- u32 patch_offs = 0;
- u32 list_len_offs = 0; /* Offset of PTDL LIST length parameter */
- u32 ptdl_start_offs = 0; /* Offset of first PTDL chunk */
- u32 fw_id;
- void *rdbuf;
-
- if (!fw_buf || !fwinfo || !card)
- {
- return NULL;
- }
-
- if (fwinfo->mode != xbv_firmware)
- {
- unifi_error(NULL, "Not a firmware file\n");
- return NULL;
- }
-
- /* Pre-allocate read buffer for chunk conversion */
- rdbuf = kmalloc(PTDL_MAX_SIZE, GFP_KERNEL);
- if (!rdbuf)
- {
- unifi_error(card, "Couldn't alloc conversion buffer\n");
- return NULL;
- }
-
- /* Loader requires patch file's build ID to match the running firmware's */
- fw_id = card->build_id;
-
- /* Firmware XBV1 contains VERF, optional INFO, SLUT(s), FWDL(s) */
- /* Other chunks should get skipped. */
- /* VERF should be sanity-checked against chip version */
-
- /* Patch XBV1 contains VERF, optional INFO, PTCH */
- /* PTCH contains FWID, optional INFO, PTDL(s), PTDL(start_vec) */
- /* Each FWDL is split into PTDLs (each is 1024 XAP words max) */
- /* Each PTDL contains running ROM f/w version, and checksum */
- /* MAC/PHY reset addresses (known) are added into a final PTDL */
-
- /* The input image has already been parsed, and loaded into fwinfo, so we
- * can use that to build the output image
- */
- patch_buf_size = calc_patch_size(fwinfo);
-
- patch_buf = kmalloc(patch_buf_size, GFP_KERNEL);
- if (!patch_buf)
- {
- kfree(rdbuf);
- unifi_error(NULL, "Can't malloc buffer for patch conversion\n");
- return NULL;
- }
-
- memset(patch_buf, 0xdd, patch_buf_size);
-
- /* Write XBV + VERF headers */
- patch_offs += write_xbv_header(patch_buf, patch_offs, 0);
- payload_offs = patch_offs;
-
- /* Write patch (LIST) header */
- list_len_offs = patch_offs + 4; /* Save LIST.length offset for later update */
- patch_offs += write_ptch_header(patch_buf, patch_offs, fw_id);
-
- /* Save start offset of the PTDL chunks */
- ptdl_start_offs = patch_offs;
-
- /* Write LIST of firmware PTDL blocks */
- for (i = 0; i < fwinfo->num_fwdl; i++)
- {
- patch_offs += write_fwdl_to_ptdl(patch_buf,
- patch_offs,
- readfn,
- &fwinfo->fwdl[i],
- fw_buf,
- fw_id,
- rdbuf);
- }
-
- /* Write restart-vector PTDL last */
- patch_offs += write_reset_ptdl(patch_buf, patch_offs, fwinfo, fw_id);
-
- /* Now the length is known, update the LIST.length */
- (void)write_uint32(patch_buf, list_len_offs,
- (patch_offs - ptdl_start_offs) + PTCH_LIST_SIZE);
-
- /* Re write XBV headers just to fill in the correct file size */
- (void)write_xbv_header(patch_buf, 0, (patch_offs - payload_offs));
-
- unifi_trace(card->ospriv, UDBG1, "XBV:PTCH size %u, fw_id %u\n",
- patch_offs, fw_id);
- if (size)
- {
- *size = patch_offs;
- }
- kfree(rdbuf);
-
- return patch_buf;
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_hip_xbv.h b/drivers/staging/csr/csr_wifi_hip_xbv.h
deleted file mode 100644
index 3c50723..0000000
--- a/drivers/staging/csr/csr_wifi_hip_xbv.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/*
- * ---------------------------------------------------------------------------
- * FILE: csr_wifi_hip_xbv.h
- *
- * PURPOSE:
- * Definitions and declarations for code to read XBV files - the UniFi
- * firmware download file format.
- *
- * ---------------------------------------------------------------------------
- */
-#ifndef __XBV_H__
-#define __XBV_H__
-
-#ifndef CSR_WIFI_XBV_TEST
-/* Driver includes */
-#include "csr_wifi_hip_unifi.h"
-#endif
-
-
-struct VMEQ
-{
- u32 addr;
- u16 mask;
- u16 value;
-};
-
-struct VAND
-{
- u32 first;
- u32 count;
-};
-
-struct VERS
-{
- u32 num_vand;
-};
-
-struct FWDL
-{
- u32 dl_addr;
- u32 dl_size;
- u32 dl_offset;
-};
-
-struct FWOV
-{
- u32 dl_size;
- u32 dl_offset;
-};
-
-struct PTDL
-{
- u32 dl_size;
- u32 dl_offset;
-};
-
-#define MAX_VMEQ 64
-#define MAX_VAND 64
-#define MAX_FWDL 256
-#define MAX_PTDL 256
-
-/* An XBV1 file can either contain firmware or patches (at the
- * moment). The 'mode' member of the xbv1_t structure tells us which
- * one is the case. */
-typedef enum
-{
- xbv_unknown,
- xbv_firmware,
- xbv_patch
-} xbv_mode;
-
-typedef struct
-{
- xbv_mode mode;
-
- /* Parts of a Firmware XBV1 */
-
- struct VMEQ vmeq[MAX_VMEQ];
- u32 num_vmeq;
- struct VAND vand[MAX_VAND];
- struct VERS vers;
-
- u32 slut_addr;
-
- /* F/W download image, possibly more than one part */
- struct FWDL fwdl[MAX_FWDL];
- s16 num_fwdl;
-
- /* F/W overlay image, add r not used */
- struct FWOV fwov;
-
- /* Parts of a Patch XBV1 */
-
- u32 build_id;
-
- struct PTDL ptdl[MAX_PTDL];
- s16 num_ptdl;
-} xbv1_t;
-
-
-typedef s32 (*fwreadfn_t)(void *ospriv, void *dlpriv, u32 offset, void *buf, u32 len);
-
-CsrResult xbv1_parse(card_t *card, fwreadfn_t readfn, void *dlpriv, xbv1_t *fwinfo);
-s32 xbv1_read_slut(card_t *card, fwreadfn_t readfn, void *dlpriv, xbv1_t *fwinfo,
- symbol_t *slut, u32 slut_len);
-void* xbv_to_patch(card_t *card, fwreadfn_t readfn, const void *fw_buf, const xbv1_t *fwinfo,
- u32 *size);
-
-#endif /* __XBV_H__ */
diff --git a/drivers/staging/csr/csr_wifi_hostio_prim.h b/drivers/staging/csr/csr_wifi_hostio_prim.h
deleted file mode 100644
index cfb3e27..0000000
--- a/drivers/staging/csr/csr_wifi_hostio_prim.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-
-#ifndef CSR_WIFI_HOSTIO_H
-#define CSR_WIFI_HOSTIO_H
-
-#define CSR_WIFI_HOSTIO_PRIM 0x0453
-
-#endif /* CSR_WIFI_HOSTIO_H */
-
diff --git a/drivers/staging/csr/csr_wifi_lib.h b/drivers/staging/csr/csr_wifi_lib.h
deleted file mode 100644
index 5fde0ef..0000000
--- a/drivers/staging/csr/csr_wifi_lib.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-#ifndef CSR_WIFI_LIB_H__
-#define CSR_WIFI_LIB_H__
-
-#include "csr_wifi_fsm_event.h"
-
-/*----------------------------------------------------------------------------*
- * CsrWifiFsmEventInit
- *
- * DESCRIPTION
- * Macro to initialise the members of a CsrWifiFsmEvent.
- *----------------------------------------------------------------------------*/
-#define CsrWifiFsmEventInit(evt, p_primtype, p_msgtype, p_dst, p_src) \
- (evt)->primtype = p_primtype; \
- (evt)->type = p_msgtype; \
- (evt)->destination = p_dst; \
- (evt)->source = p_src
-
-
-/*----------------------------------------------------------------------------*
- * CsrWifiEvent_struct
- *
- * DESCRIPTION
- * Generic message creator.
- * Allocates and fills in a message with the signature CsrWifiEvent
- *
- *----------------------------------------------------------------------------*/
-CsrWifiFsmEvent* CsrWifiEvent_struct(u16 primtype, u16 msgtype, CsrSchedQid dst, CsrSchedQid src);
-
-typedef struct
-{
- CsrWifiFsmEvent common;
- u8 value;
-} CsrWifiEventCsrUint8;
-
-/*----------------------------------------------------------------------------*
- * CsrWifiEventCsrUint8_struct
- *
- * DESCRIPTION
- * Generic message creator.
- * Allocates and fills in a message with the signature CsrWifiEventCsrUint8
- *
- *----------------------------------------------------------------------------*/
-CsrWifiEventCsrUint8* CsrWifiEventCsrUint8_struct(u16 primtype, u16 msgtype, CsrSchedQid dst, CsrSchedQid src, u8 value);
-
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 value;
-} CsrWifiEventCsrUint16;
-
-/*----------------------------------------------------------------------------*
- * CsrWifiEventCsrUint16_struct
- *
- * DESCRIPTION
- * Generic message creator.
- * Allocates and fills in a message with the signature CsrWifiEventCsrUint16
- *
- *----------------------------------------------------------------------------*/
-CsrWifiEventCsrUint16* CsrWifiEventCsrUint16_struct(u16 primtype, u16 msgtype, CsrSchedQid dst, CsrSchedQid src, u16 value);
-
-typedef struct
-{
- CsrWifiFsmEvent common;
- u32 value;
-} CsrWifiEventCsrUint32;
-
-/*----------------------------------------------------------------------------*
- * CsrWifiEventCsrUint32_struct
- *
- * DESCRIPTION
- * Generic message creator.
- * Allocates and fills in a message with the signature CsrWifiEventCsrUint32
- *
- *----------------------------------------------------------------------------*/
-CsrWifiEventCsrUint32* CsrWifiEventCsrUint32_struct(u16 primtype, u16 msgtype, CsrSchedQid dst, CsrSchedQid src, u32 value);
-
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 value16;
- u8 value8;
-} CsrWifiEventCsrUint16CsrUint8;
-
-/*----------------------------------------------------------------------------*
- * CsrWifiEventCsrUint16CsrUint8_struct
- *
- * DESCRIPTION
- * Generic message creator.
- * Allocates and fills in a message with the signature CsrWifiEventCsrUint16CsrUint8
- *
- *----------------------------------------------------------------------------*/
-CsrWifiEventCsrUint16CsrUint8* CsrWifiEventCsrUint16CsrUint8_struct(u16 primtype, u16 msgtype, CsrSchedQid dst, CsrSchedQid src, u16 value16, u8 value8);
-
-#endif /* CSR_WIFI_LIB_H__ */
diff --git a/drivers/staging/csr/csr_wifi_msgconv.h b/drivers/staging/csr/csr_wifi_msgconv.h
deleted file mode 100644
index f8b4029..0000000
--- a/drivers/staging/csr/csr_wifi_msgconv.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_MSGCONV_H__
-#define CSR_WIFI_MSGCONV_H__
-
-#include "csr_prim_defs.h"
-#include "csr_sched.h"
-
-void CsrUint16SerBigEndian(u8 *ptr, size_t *len, u16 v);
-void CsrUint24SerBigEndian(u8 *ptr, size_t *len, u32 v);
-void CsrUint32SerBigEndian(u8 *ptr, size_t *len, u32 v);
-
-void CsrUint16DesBigEndian(u16 *v, u8 *buffer, size_t *offset);
-void CsrUint24DesBigEndian(u32 *v, u8 *buffer, size_t *offset);
-void CsrUint32DesBigEndian(u32 *v, u8 *buffer, size_t *offset);
-
-void CsrUint24Ser(u8 *ptr, size_t *len, u32 v);
-void CsrUint24Des(u32 *v, u8 *buffer, size_t *offset);
-
-
-size_t CsrWifiEventSizeof(void *msg);
-u8* CsrWifiEventSer(u8 *ptr, size_t *len, void *msg);
-void* CsrWifiEventDes(u8 *buffer, size_t length);
-
-size_t CsrWifiEventCsrUint8Sizeof(void *msg);
-u8* CsrWifiEventCsrUint8Ser(u8 *ptr, size_t *len, void *msg);
-void* CsrWifiEventCsrUint8Des(u8 *buffer, size_t length);
-
-size_t CsrWifiEventCsrUint16Sizeof(void *msg);
-u8* CsrWifiEventCsrUint16Ser(u8 *ptr, size_t *len, void *msg);
-void* CsrWifiEventCsrUint16Des(u8 *buffer, size_t length);
-
-size_t CsrWifiEventCsrUint32Sizeof(void *msg);
-u8* CsrWifiEventCsrUint32Ser(u8 *ptr, size_t *len, void *msg);
-void* CsrWifiEventCsrUint32Des(u8 *buffer, size_t length);
-
-size_t CsrWifiEventCsrUint16CsrUint8Sizeof(void *msg);
-u8* CsrWifiEventCsrUint16CsrUint8Ser(u8 *ptr, size_t *len, void *msg);
-void* CsrWifiEventCsrUint16CsrUint8Des(u8 *buffer, size_t length);
-
-#endif /* CSR_WIFI_MSGCONV_H__ */
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_converter_init.c b/drivers/staging/csr/csr_wifi_nme_ap_converter_init.c
deleted file mode 100644
index 0689d6f..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_converter_init.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#include "csr_msgconv.h"
-#include "csr_macro.h"
-
-#ifdef CSR_WIFI_NME_ENABLE
-#ifdef CSR_WIFI_AP_ENABLE
-
-#ifdef CSR_LOG_ENABLE
-#include "csr_log.h"
-#endif
-
-#ifndef EXCLUDE_CSR_WIFI_NME_AP_MODULE
-#include "csr_wifi_nme_ap_serialize.h"
-#include "csr_wifi_nme_ap_prim.h"
-
-static CsrMsgConvMsgEntry csrwifinmeap_conv_lut[] = {
- { CSR_WIFI_NME_AP_CONFIG_SET_REQ, CsrWifiNmeApConfigSetReqSizeof, CsrWifiNmeApConfigSetReqSer, CsrWifiNmeApConfigSetReqDes, CsrWifiNmeApConfigSetReqSerFree },
- { CSR_WIFI_NME_AP_WPS_REGISTER_REQ, CsrWifiNmeApWpsRegisterReqSizeof, CsrWifiNmeApWpsRegisterReqSer, CsrWifiNmeApWpsRegisterReqDes, CsrWifiNmeApWpsRegisterReqSerFree },
- { CSR_WIFI_NME_AP_START_REQ, CsrWifiNmeApStartReqSizeof, CsrWifiNmeApStartReqSer, CsrWifiNmeApStartReqDes, CsrWifiNmeApStartReqSerFree },
- { CSR_WIFI_NME_AP_STOP_REQ, CsrWifiNmeApStopReqSizeof, CsrWifiNmeApStopReqSer, CsrWifiNmeApStopReqDes, CsrWifiNmeApStopReqSerFree },
- { CSR_WIFI_NME_AP_WMM_PARAM_UPDATE_REQ, CsrWifiNmeApWmmParamUpdateReqSizeof, CsrWifiNmeApWmmParamUpdateReqSer, CsrWifiNmeApWmmParamUpdateReqDes, CsrWifiNmeApWmmParamUpdateReqSerFree },
- { CSR_WIFI_NME_AP_STA_REMOVE_REQ, CsrWifiNmeApStaRemoveReqSizeof, CsrWifiNmeApStaRemoveReqSer, CsrWifiNmeApStaRemoveReqDes, CsrWifiNmeApStaRemoveReqSerFree },
- { CSR_WIFI_NME_AP_CONFIG_SET_CFM, CsrWifiNmeApConfigSetCfmSizeof, CsrWifiNmeApConfigSetCfmSer, CsrWifiNmeApConfigSetCfmDes, CsrWifiNmeApConfigSetCfmSerFree },
- { CSR_WIFI_NME_AP_WPS_REGISTER_CFM, CsrWifiNmeApWpsRegisterCfmSizeof, CsrWifiNmeApWpsRegisterCfmSer, CsrWifiNmeApWpsRegisterCfmDes, CsrWifiNmeApWpsRegisterCfmSerFree },
- { CSR_WIFI_NME_AP_START_CFM, CsrWifiNmeApStartCfmSizeof, CsrWifiNmeApStartCfmSer, CsrWifiNmeApStartCfmDes, CsrWifiNmeApStartCfmSerFree },
- { CSR_WIFI_NME_AP_STOP_CFM, CsrWifiNmeApStopCfmSizeof, CsrWifiNmeApStopCfmSer, CsrWifiNmeApStopCfmDes, CsrWifiNmeApStopCfmSerFree },
- { CSR_WIFI_NME_AP_STOP_IND, CsrWifiNmeApStopIndSizeof, CsrWifiNmeApStopIndSer, CsrWifiNmeApStopIndDes, CsrWifiNmeApStopIndSerFree },
- { CSR_WIFI_NME_AP_WMM_PARAM_UPDATE_CFM, CsrWifiNmeApWmmParamUpdateCfmSizeof, CsrWifiNmeApWmmParamUpdateCfmSer, CsrWifiNmeApWmmParamUpdateCfmDes, CsrWifiNmeApWmmParamUpdateCfmSerFree },
- { CSR_WIFI_NME_AP_STATION_IND, CsrWifiNmeApStationIndSizeof, CsrWifiNmeApStationIndSer, CsrWifiNmeApStationIndDes, CsrWifiNmeApStationIndSerFree },
-
- { 0, NULL, NULL, NULL, NULL },
-};
-
-CsrMsgConvMsgEntry* CsrWifiNmeApConverterLookup(CsrMsgConvMsgEntry *ce, u16 msgType)
-{
- if (msgType & CSR_PRIM_UPSTREAM)
- {
- u16 idx = (msgType & ~CSR_PRIM_UPSTREAM) + CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_COUNT;
- if (idx < (CSR_WIFI_NME_AP_PRIM_UPSTREAM_COUNT + CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_COUNT) &&
- csrwifinmeap_conv_lut[idx].msgType == msgType)
- {
- return &csrwifinmeap_conv_lut[idx];
- }
- }
- else
- {
- if (msgType < CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_COUNT &&
- csrwifinmeap_conv_lut[msgType].msgType == msgType)
- {
- return &csrwifinmeap_conv_lut[msgType];
- }
- }
- return NULL;
-}
-
-
-void CsrWifiNmeApConverterInit(void)
-{
- CsrMsgConvInsert(CSR_WIFI_NME_AP_PRIM, csrwifinmeap_conv_lut);
- CsrMsgConvCustomLookupRegister(CSR_WIFI_NME_AP_PRIM, CsrWifiNmeApConverterLookup);
-}
-
-
-#ifdef CSR_LOG_ENABLE
-static const CsrLogPrimitiveInformation csrwifinmeap_conv_info = {
- CSR_WIFI_NME_AP_PRIM,
- (char *)"CSR_WIFI_NME_AP_PRIM",
- csrwifinmeap_conv_lut
-};
-const CsrLogPrimitiveInformation* CsrWifiNmeApTechInfoGet(void)
-{
- return &csrwifinmeap_conv_info;
-}
-
-
-#endif /* CSR_LOG_ENABLE */
-#endif /* EXCLUDE_CSR_WIFI_NME_AP_MODULE */
-#endif /* CSR_WIFI_NME_ENABLE */
-#endif /* CSR_WIFI_AP_ENABLE */
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_converter_init.h b/drivers/staging/csr/csr_wifi_nme_ap_converter_init.h
deleted file mode 100644
index b89d7c7..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_converter_init.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_NME_AP_CONVERTER_INIT_H__
-#define CSR_WIFI_NME_AP_CONVERTER_INIT_H__
-
-#ifndef CSR_WIFI_NME_ENABLE
-#error CSR_WIFI_NME_ENABLE MUST be defined inorder to use csr_wifi_nme_ap_converter_init.h
-#endif
-#ifndef CSR_WIFI_AP_ENABLE
-#error CSR_WIFI_AP_ENABLE MUST be defined inorder to use csr_wifi_nme_ap_converter_init.h
-#endif
-
-#ifndef EXCLUDE_CSR_WIFI_NME_AP_MODULE
-
-#include "csr_msgconv.h"
-
-#ifdef CSR_LOG_ENABLE
-#include "csr_log.h"
-
-extern const CsrLogPrimitiveInformation* CsrWifiNmeApTechInfoGet(void);
-#endif /* CSR_LOG_ENABLE */
-
-extern void CsrWifiNmeApConverterInit(void);
-
-#else /* EXCLUDE_CSR_WIFI_NME_AP_MODULE */
-
-#define CsrWifiNmeApConverterInit()
-
-#endif /* EXCLUDE_CSR_WIFI_NME_AP_MODULE */
-
-#endif /* CSR_WIFI_NME_AP_CONVERTER_INIT_H__ */
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_free_downstream_contents.c b/drivers/staging/csr/csr_wifi_nme_ap_free_downstream_contents.c
deleted file mode 100644
index ab93588..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_free_downstream_contents.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/slab.h>
-#include "csr_wifi_nme_ap_prim.h"
-#include "csr_wifi_nme_ap_lib.h"
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrWifiNmeApFreeDownstreamMessageContents
- *
- * DESCRIPTION
- *
- *
- * PARAMETERS
- * eventClass: only the value CSR_WIFI_NME_AP_PRIM will be handled
- * message: the message to free
- *----------------------------------------------------------------------------*/
-void CsrWifiNmeApFreeDownstreamMessageContents(u16 eventClass, void *message)
-{
- if (eventClass != CSR_WIFI_NME_AP_PRIM)
- {
- return;
- }
- if (NULL == message)
- {
- return;
- }
-
- switch (*((CsrWifiNmeApPrim *) message))
- {
- case CSR_WIFI_NME_AP_CONFIG_SET_REQ:
- {
- CsrWifiNmeApConfigSetReq *p = (CsrWifiNmeApConfigSetReq *)message;
- kfree(p->apMacConfig.macAddressList);
- p->apMacConfig.macAddressList = NULL;
- break;
- }
- case CSR_WIFI_NME_AP_START_REQ:
- {
- CsrWifiNmeApStartReq *p = (CsrWifiNmeApStartReq *)message;
- switch (p->apCredentials.authType)
- {
- case CSR_WIFI_SME_AP_AUTH_TYPE_PERSONAL:
- switch (p->apCredentials.nmeAuthType.authTypePersonal.pskOrPassphrase)
- {
- case CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PASSPHRASE:
- kfree(p->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.passphrase);
- p->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.passphrase = NULL;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- {
- u16 i3;
- for (i3 = 0; i3 < p->p2pGoParam.operatingChanList.channelEntryListCount; i3++)
- {
- kfree(p->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannel);
- p->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannel = NULL;
- }
- }
- kfree(p->p2pGoParam.operatingChanList.channelEntryList);
- p->p2pGoParam.operatingChanList.channelEntryList = NULL;
- break;
- }
-
- default:
- break;
- }
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_free_upstream_contents.c b/drivers/staging/csr/csr_wifi_nme_ap_free_upstream_contents.c
deleted file mode 100644
index 2786a6b..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_free_upstream_contents.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#include "csr_wifi_nme_ap_prim.h"
-#include "csr_wifi_nme_ap_lib.h"
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrWifiNmeApFreeUpstreamMessageContents
- *
- * DESCRIPTION
- *
- *
- * PARAMETERS
- * eventClass: only the value CSR_WIFI_NME_AP_PRIM will be handled
- * message: the message to free
- *----------------------------------------------------------------------------*/
-void CsrWifiNmeApFreeUpstreamMessageContents(u16 eventClass, void *message)
-{
- if (eventClass != CSR_WIFI_NME_AP_PRIM)
- {
- return;
- }
- if (NULL == message)
- {
- return;
- }
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_lib.h b/drivers/staging/csr/csr_wifi_nme_ap_lib.h
deleted file mode 100644
index 6d8df83..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_lib.h
+++ /dev/null
@@ -1,495 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_NME_AP_LIB_H__
-#define CSR_WIFI_NME_AP_LIB_H__
-
-#include "csr_sched.h"
-#include "csr_macro.h"
-#include "csr_msg_transport.h"
-
-#include "csr_wifi_lib.h"
-
-#include "csr_wifi_nme_ap_prim.h"
-#include "csr_wifi_nme_task.h"
-
-#ifndef CSR_WIFI_NME_ENABLE
-#error CSR_WIFI_NME_ENABLE MUST be defined inorder to use csr_wifi_nme_ap_lib.h
-#endif
-#ifndef CSR_WIFI_AP_ENABLE
-#error CSR_WIFI_AP_ENABLE MUST be defined inorder to use csr_wifi_nme_ap_lib.h
-#endif
-
-/*----------------------------------------------------------------------------*
- * CsrWifiNmeApFreeUpstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_NME_AP upstream message. Does not
- * free the message itself, and can only be used for upstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_NME_AP upstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiNmeApFreeUpstreamMessageContents(u16 eventClass, void *message);
-
-/*----------------------------------------------------------------------------*
- * CsrWifiNmeApFreeDownstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_NME_AP downstream message. Does not
- * free the message itself, and can only be used for downstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_NME_AP downstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiNmeApFreeDownstreamMessageContents(u16 eventClass, void *message);
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApConfigSetReqSend
-
- DESCRIPTION
- This primitive passes AP configuration info for NME. This can be sent at
- any time but will be acted upon when the AP is started again. This
- information is common to both P2P GO and AP
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- apConfig - AP configuration for the NME.
- apMacConfig - MAC configuration to be acted on when
- CSR_WIFI_NME_AP_START.request is sent.
-
-*******************************************************************************/
-#define CsrWifiNmeApConfigSetReqCreate(msg__, dst__, src__, apConfig__, apMacConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_CONFIG_SET_REQ, dst__, src__); \
- msg__->apConfig = (apConfig__); \
- msg__->apMacConfig = (apMacConfig__);
-
-#define CsrWifiNmeApConfigSetReqSendTo(dst__, src__, apConfig__, apMacConfig__) \
- { \
- CsrWifiNmeApConfigSetReq *msg__; \
- CsrWifiNmeApConfigSetReqCreate(msg__, dst__, src__, apConfig__, apMacConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApConfigSetReqSend(src__, apConfig__, apMacConfig__) \
- CsrWifiNmeApConfigSetReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, apConfig__, apMacConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApConfigSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Status of the request.
-
-*******************************************************************************/
-#define CsrWifiNmeApConfigSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_CONFIG_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeApConfigSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiNmeApConfigSetCfm *msg__; \
- CsrWifiNmeApConfigSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApConfigSetCfmSend(dst__, status__) \
- CsrWifiNmeApConfigSetCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStaRemoveReqSend
-
- DESCRIPTION
- This primitive disconnects a connected station. If keepBlocking is set to
- TRUE, the station with the specified MAC address is not allowed to
- connect. If the requested station is not already connected,it may be
- blocked based on keepBlocking parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- staMacAddress - Mac Address of the station to be disconnected or blocked
- keepBlocking - If TRUE, the station is blocked. If FALSE and the station is
- connected, disconnect the station. If FALSE and the station
- is not connected, no action is taken.
-
-*******************************************************************************/
-#define CsrWifiNmeApStaRemoveReqCreate(msg__, dst__, src__, interfaceTag__, staMacAddress__, keepBlocking__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApStaRemoveReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_STA_REMOVE_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->staMacAddress = (staMacAddress__); \
- msg__->keepBlocking = (keepBlocking__);
-
-#define CsrWifiNmeApStaRemoveReqSendTo(dst__, src__, interfaceTag__, staMacAddress__, keepBlocking__) \
- { \
- CsrWifiNmeApStaRemoveReq *msg__; \
- CsrWifiNmeApStaRemoveReqCreate(msg__, dst__, src__, interfaceTag__, staMacAddress__, keepBlocking__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApStaRemoveReqSend(src__, interfaceTag__, staMacAddress__, keepBlocking__) \
- CsrWifiNmeApStaRemoveReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, interfaceTag__, staMacAddress__, keepBlocking__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStartReqSend
-
- DESCRIPTION
- This primitive requests NME to started the AP operation.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface identifier; unique identifier of an interface
- apType - AP Type specifies the Legacy AP or P2P GO operation
- cloakSsid - Indicates whether the SSID should be cloaked (hidden and
- not broadcast in beacon) or not
- ssid - Service Set Identifier
- ifIndex - Radio interface
- channel - Channel number of the channel to use
- apCredentials - Security credential configuration.
- maxConnections - Maximum number of stations/P2P clients allowed
- p2pGoParam - P2P specific GO parameters.
- wpsEnabled - Indicates whether WPS should be enabled or not
-
-*******************************************************************************/
-#define CsrWifiNmeApStartReqCreate(msg__, dst__, src__, interfaceTag__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, apCredentials__, maxConnections__, p2pGoParam__, wpsEnabled__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApStartReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_START_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->apType = (apType__); \
- msg__->cloakSsid = (cloakSsid__); \
- msg__->ssid = (ssid__); \
- msg__->ifIndex = (ifIndex__); \
- msg__->channel = (channel__); \
- msg__->apCredentials = (apCredentials__); \
- msg__->maxConnections = (maxConnections__); \
- msg__->p2pGoParam = (p2pGoParam__); \
- msg__->wpsEnabled = (wpsEnabled__);
-
-#define CsrWifiNmeApStartReqSendTo(dst__, src__, interfaceTag__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, apCredentials__, maxConnections__, p2pGoParam__, wpsEnabled__) \
- { \
- CsrWifiNmeApStartReq *msg__; \
- CsrWifiNmeApStartReqCreate(msg__, dst__, src__, interfaceTag__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, apCredentials__, maxConnections__, p2pGoParam__, wpsEnabled__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApStartReqSend(src__, interfaceTag__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, apCredentials__, maxConnections__, p2pGoParam__, wpsEnabled__) \
- CsrWifiNmeApStartReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, interfaceTag__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, apCredentials__, maxConnections__, p2pGoParam__, wpsEnabled__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStartCfmSend
-
- DESCRIPTION
- This primitive reports the result of CSR_WIFI_NME_AP_START.request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface identifier; unique identifier of an interface
- status - Status of the request.
- ssid - Service Set Identifier
-
-*******************************************************************************/
-#define CsrWifiNmeApStartCfmCreate(msg__, dst__, src__, interfaceTag__, status__, ssid__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApStartCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_START_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->ssid = (ssid__);
-
-#define CsrWifiNmeApStartCfmSendTo(dst__, src__, interfaceTag__, status__, ssid__) \
- { \
- CsrWifiNmeApStartCfm *msg__; \
- CsrWifiNmeApStartCfmCreate(msg__, dst__, src__, interfaceTag__, status__, ssid__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApStartCfmSend(dst__, interfaceTag__, status__, ssid__) \
- CsrWifiNmeApStartCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, status__, ssid__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStationIndSend
-
- DESCRIPTION
- This primitive indicates that a station has joined or a previously joined
- station has left the BSS/group
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- mediaStatus - Indicates whether the station is connected or
- disconnected
- peerMacAddress - MAC address of the station
- peerDeviceAddress - P2P Device Address
-
-*******************************************************************************/
-#define CsrWifiNmeApStationIndCreate(msg__, dst__, src__, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApStationInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_STATION_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->mediaStatus = (mediaStatus__); \
- msg__->peerMacAddress = (peerMacAddress__); \
- msg__->peerDeviceAddress = (peerDeviceAddress__);
-
-#define CsrWifiNmeApStationIndSendTo(dst__, src__, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__) \
- { \
- CsrWifiNmeApStationInd *msg__; \
- CsrWifiNmeApStationIndCreate(msg__, dst__, src__, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApStationIndSend(dst__, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__) \
- CsrWifiNmeApStationIndSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStopReqSend
-
- DESCRIPTION
- This primitive requests NME to stop the AP operation.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiNmeApStopReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApStopReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_STOP_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiNmeApStopReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiNmeApStopReq *msg__; \
- CsrWifiNmeApStopReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApStopReqSend(src__, interfaceTag__) \
- CsrWifiNmeApStopReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStopIndSend
-
- DESCRIPTION
- Indicates that AP operation had stopped because of some unrecoverable
- error after AP operation was started successfully. NME sends this signal
- after failing to restart the AP operation internally following an error
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- apType - Reports AP Type (P2PGO or AP)
- status - Error Status
-
-*******************************************************************************/
-#define CsrWifiNmeApStopIndCreate(msg__, dst__, src__, interfaceTag__, apType__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApStopInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_STOP_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->apType = (apType__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeApStopIndSendTo(dst__, src__, interfaceTag__, apType__, status__) \
- { \
- CsrWifiNmeApStopInd *msg__; \
- CsrWifiNmeApStopIndCreate(msg__, dst__, src__, interfaceTag__, apType__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApStopIndSend(dst__, interfaceTag__, apType__, status__) \
- CsrWifiNmeApStopIndSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, apType__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStopCfmSend
-
- DESCRIPTION
- This primitive confirms that the AP operation is stopped. NME shall send
- this primitive in response to the request even if AP operation has
- already been stopped
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface identifier; unique identifier of an interface
- status - Status of the request.
-
-*******************************************************************************/
-#define CsrWifiNmeApStopCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApStopCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_STOP_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeApStopCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiNmeApStopCfm *msg__; \
- CsrWifiNmeApStopCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApStopCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiNmeApStopCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApWmmParamUpdateReqSend
-
- DESCRIPTION
- Application uses this primitive to update the WMM parameters
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- wmmApParams - WMM Access point parameters per access category. The array
- index corresponds to the ACI
- wmmApBcParams - WMM station parameters per access category to be advertised
- in the beacons and probe response The array index
- corresponds to the ACI
-
-*******************************************************************************/
-#define CsrWifiNmeApWmmParamUpdateReqCreate(msg__, dst__, src__, wmmApParams__, wmmApBcParams__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApWmmParamUpdateReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_WMM_PARAM_UPDATE_REQ, dst__, src__); \
- memcpy(msg__->wmmApParams, (wmmApParams__), sizeof(CsrWifiSmeWmmAcParams) * 4); \
- memcpy(msg__->wmmApBcParams, (wmmApBcParams__), sizeof(CsrWifiSmeWmmAcParams) * 4);
-
-#define CsrWifiNmeApWmmParamUpdateReqSendTo(dst__, src__, wmmApParams__, wmmApBcParams__) \
- { \
- CsrWifiNmeApWmmParamUpdateReq *msg__; \
- CsrWifiNmeApWmmParamUpdateReqCreate(msg__, dst__, src__, wmmApParams__, wmmApBcParams__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApWmmParamUpdateReqSend(src__, wmmApParams__, wmmApBcParams__) \
- CsrWifiNmeApWmmParamUpdateReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, wmmApParams__, wmmApBcParams__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApWmmParamUpdateCfmSend
-
- DESCRIPTION
- A confirm for for the WMM parameters update
-
- PARAMETERS
- queue - Destination Task Queue
- status - Status of the request.
-
-*******************************************************************************/
-#define CsrWifiNmeApWmmParamUpdateCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApWmmParamUpdateCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_WMM_PARAM_UPDATE_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeApWmmParamUpdateCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiNmeApWmmParamUpdateCfm *msg__; \
- CsrWifiNmeApWmmParamUpdateCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApWmmParamUpdateCfmSend(dst__, status__) \
- CsrWifiNmeApWmmParamUpdateCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApWpsRegisterReqSend
-
- DESCRIPTION
- This primitive allows the NME to accept the WPS registration from an
- enrollee. Such registration procedure can be cancelled by sending
- CSR_WIFI_NME_WPS_CANCEL.request.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- selectedDevicePasswordId - Selected password type
- selectedConfigMethod - Selected WPS configuration method type
- pin - PIN value.
- Relevant if selected device password ID is PIN.4
- digit pin is passed by sending the pin digits in
- pin[0]..pin[3] and rest of the contents filled
- with '-'.
-
-*******************************************************************************/
-#define CsrWifiNmeApWpsRegisterReqCreate(msg__, dst__, src__, interfaceTag__, selectedDevicePasswordId__, selectedConfigMethod__, pin__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApWpsRegisterReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_WPS_REGISTER_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->selectedDevicePasswordId = (selectedDevicePasswordId__); \
- msg__->selectedConfigMethod = (selectedConfigMethod__); \
- memcpy(msg__->pin, (pin__), sizeof(u8) * 8);
-
-#define CsrWifiNmeApWpsRegisterReqSendTo(dst__, src__, interfaceTag__, selectedDevicePasswordId__, selectedConfigMethod__, pin__) \
- { \
- CsrWifiNmeApWpsRegisterReq *msg__; \
- CsrWifiNmeApWpsRegisterReqCreate(msg__, dst__, src__, interfaceTag__, selectedDevicePasswordId__, selectedConfigMethod__, pin__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApWpsRegisterReqSend(src__, interfaceTag__, selectedDevicePasswordId__, selectedConfigMethod__, pin__) \
- CsrWifiNmeApWpsRegisterReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, interfaceTag__, selectedDevicePasswordId__, selectedConfigMethod__, pin__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApWpsRegisterCfmSend
-
- DESCRIPTION
- This primitive reports the result of WPS procedure.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface identifier; unique identifier of an interface
- status - Status of the request.
-
-*******************************************************************************/
-#define CsrWifiNmeApWpsRegisterCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeApWpsRegisterCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_AP_PRIM, CSR_WIFI_NME_AP_WPS_REGISTER_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeApWpsRegisterCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiNmeApWpsRegisterCfm *msg__; \
- CsrWifiNmeApWpsRegisterCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiNmeApWpsRegisterCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiNmeApWpsRegisterCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, status__)
-
-#endif /* CSR_WIFI_NME_AP_LIB_H__ */
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_prim.h b/drivers/staging/csr/csr_wifi_nme_ap_prim.h
deleted file mode 100644
index b32bdbc..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_prim.h
+++ /dev/null
@@ -1,494 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_NME_AP_PRIM_H__
-#define CSR_WIFI_NME_AP_PRIM_H__
-
-#include <linux/types.h>
-#include "csr_prim_defs.h"
-#include "csr_sched.h"
-#include "csr_wifi_common.h"
-#include "csr_result.h"
-#include "csr_wifi_fsm_event.h"
-#include "csr_wifi_sme_ap_prim.h"
-#include "csr_wifi_nme_prim.h"
-
-#ifndef CSR_WIFI_NME_ENABLE
-#error CSR_WIFI_NME_ENABLE MUST be defined inorder to use csr_wifi_nme_ap_prim.h
-#endif
-#ifndef CSR_WIFI_AP_ENABLE
-#error CSR_WIFI_AP_ENABLE MUST be defined inorder to use csr_wifi_nme_ap_prim.h
-#endif
-
-#define CSR_WIFI_NME_AP_PRIM (0x0426)
-
-typedef CsrPrim CsrWifiNmeApPrim;
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApPersCredentialType
-
- DESCRIPTION
- NME Credential Types
-
- VALUES
- CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PSK
- - Use PSK as credential.
- CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PASSPHRASE
- - Use the specified passphrase as credential
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeApPersCredentialType;
-#define CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PSK ((CsrWifiNmeApPersCredentialType) 0x00)
-#define CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PASSPHRASE ((CsrWifiNmeApPersCredentialType) 0x01)
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApConfig
-
- DESCRIPTION
- Structure holding AP config data.
-
- MEMBERS
- apGroupkeyTimeout - Access point group key timeout.
- apStrictGtkRekey - Access point strict GTK rekey flag. If set TRUE, the AP
- shall rekey GTK every time a connected STA leaves BSS.
- apGmkTimeout - Access point GMK timeout
- apResponseTimeout - Response timeout
- apRetransLimit - Max allowed retransmissions
-
-*******************************************************************************/
-typedef struct
-{
- u16 apGroupkeyTimeout;
- u8 apStrictGtkRekey;
- u16 apGmkTimeout;
- u16 apResponseTimeout;
- u8 apRetransLimit;
-} CsrWifiNmeApConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApAuthPers
-
- DESCRIPTION
-
- MEMBERS
- authSupport - Credential type value (as defined in the
- enumeration type).
- rsnCapabilities - RSN capabilities mask
- wapiCapabilities - WAPI capabilities mask
- pskOrPassphrase - Credential type value (as defined in the
- enumeration type).
- authPers_credentials - Union containing credentials which depends
- on credentialType parameter.
- authPers_credentialspsk -
- authPers_credentialspassphrase -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeApAuthSupportMask authSupport;
- CsrWifiSmeApRsnCapabilitiesMask rsnCapabilities;
- CsrWifiSmeApWapiCapabilitiesMask wapiCapabilities;
- CsrWifiNmeApPersCredentialType pskOrPassphrase;
- union {
- CsrWifiNmePsk psk;
- CsrWifiNmePassphrase passphrase;
- } authPers_credentials;
-} CsrWifiNmeApAuthPers;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApCredentials
-
- DESCRIPTION
- Structure containing the Credentials data.
-
- MEMBERS
- authType - Authentication type
- nmeAuthType - Authentication parameters
- nmeAuthTypeopenSystemEmpty -
- nmeAuthTypeauthwep -
- nmeAuthTypeauthTypePersonal -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeApAuthType authType;
- union {
- CsrWifiSmeEmpty openSystemEmpty;
- CsrWifiSmeWepAuth authwep;
- CsrWifiNmeApAuthPers authTypePersonal;
- } nmeAuthType;
-} CsrWifiNmeApCredentials;
-
-
-/* Downstream */
-#define CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_LOWEST (0x0000)
-
-#define CSR_WIFI_NME_AP_CONFIG_SET_REQ ((CsrWifiNmeApPrim) (0x0000 + CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_WPS_REGISTER_REQ ((CsrWifiNmeApPrim) (0x0001 + CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_START_REQ ((CsrWifiNmeApPrim) (0x0002 + CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_STOP_REQ ((CsrWifiNmeApPrim) (0x0003 + CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_WMM_PARAM_UPDATE_REQ ((CsrWifiNmeApPrim) (0x0004 + CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_STA_REMOVE_REQ ((CsrWifiNmeApPrim) (0x0005 + CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_LOWEST))
-
-
-#define CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_HIGHEST (0x0005 + CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_LOWEST)
-
-/* Upstream */
-#define CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST (0x0000 + CSR_PRIM_UPSTREAM)
-
-#define CSR_WIFI_NME_AP_CONFIG_SET_CFM ((CsrWifiNmeApPrim)(0x0000 + CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_WPS_REGISTER_CFM ((CsrWifiNmeApPrim)(0x0001 + CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_START_CFM ((CsrWifiNmeApPrim)(0x0002 + CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_STOP_CFM ((CsrWifiNmeApPrim)(0x0003 + CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_STOP_IND ((CsrWifiNmeApPrim)(0x0004 + CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_WMM_PARAM_UPDATE_CFM ((CsrWifiNmeApPrim)(0x0005 + CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_AP_STATION_IND ((CsrWifiNmeApPrim)(0x0006 + CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST))
-
-#define CSR_WIFI_NME_AP_PRIM_UPSTREAM_HIGHEST (0x0006 + CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST)
-
-#define CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_COUNT (CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_HIGHEST + 1 - CSR_WIFI_NME_AP_PRIM_DOWNSTREAM_LOWEST)
-#define CSR_WIFI_NME_AP_PRIM_UPSTREAM_COUNT (CSR_WIFI_NME_AP_PRIM_UPSTREAM_HIGHEST + 1 - CSR_WIFI_NME_AP_PRIM_UPSTREAM_LOWEST)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApConfigSetReq
-
- DESCRIPTION
- This primitive passes AP configuration info for NME. This can be sent at
- any time but will be acted upon when the AP is started again. This
- information is common to both P2P GO and AP
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- apConfig - AP configuration for the NME.
- apMacConfig - MAC configuration to be acted on when
- CSR_WIFI_NME_AP_START.request is sent.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiNmeApConfig apConfig;
- CsrWifiSmeApMacConfig apMacConfig;
-} CsrWifiNmeApConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApWpsRegisterReq
-
- DESCRIPTION
- This primitive allows the NME to accept the WPS registration from an
- enrollee. Such registration procedure can be cancelled by sending
- CSR_WIFI_NME_WPS_CANCEL.request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- selectedDevicePasswordId - Selected password type
- selectedConfigMethod - Selected WPS configuration method type
- pin - PIN value.
- Relevant if selected device password ID is PIN.4
- digit pin is passed by sending the pin digits in
- pin[0]..pin[3] and rest of the contents filled
- with '-'.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeWpsDpid selectedDevicePasswordId;
- CsrWifiSmeWpsConfigType selectedConfigMethod;
- u8 pin[8];
-} CsrWifiNmeApWpsRegisterReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStartReq
-
- DESCRIPTION
- This primitive requests NME to started the AP operation.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface identifier; unique identifier of an interface
- apType - AP Type specifies the Legacy AP or P2P GO operation
- cloakSsid - Indicates whether the SSID should be cloaked (hidden and
- not broadcast in beacon) or not
- ssid - Service Set Identifier
- ifIndex - Radio interface
- channel - Channel number of the channel to use
- apCredentials - Security credential configuration.
- maxConnections - Maximum number of stations/P2P clients allowed
- p2pGoParam - P2P specific GO parameters.
- wpsEnabled - Indicates whether WPS should be enabled or not
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeApType apType;
- u8 cloakSsid;
- CsrWifiSsid ssid;
- CsrWifiSmeRadioIF ifIndex;
- u8 channel;
- CsrWifiNmeApCredentials apCredentials;
- u8 maxConnections;
- CsrWifiSmeApP2pGoConfig p2pGoParam;
- u8 wpsEnabled;
-} CsrWifiNmeApStartReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStopReq
-
- DESCRIPTION
- This primitive requests NME to stop the AP operation.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiNmeApStopReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApWmmParamUpdateReq
-
- DESCRIPTION
- Application uses this primitive to update the WMM parameters
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- wmmApParams - WMM Access point parameters per access category. The array
- index corresponds to the ACI
- wmmApBcParams - WMM station parameters per access category to be advertised
- in the beacons and probe response The array index
- corresponds to the ACI
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeWmmAcParams wmmApParams[4];
- CsrWifiSmeWmmAcParams wmmApBcParams[4];
-} CsrWifiNmeApWmmParamUpdateReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStaRemoveReq
-
- DESCRIPTION
- This primitive disconnects a connected station. If keepBlocking is set to
- TRUE, the station with the specified MAC address is not allowed to
- connect. If the requested station is not already connected,it may be
- blocked based on keepBlocking parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- staMacAddress - Mac Address of the station to be disconnected or blocked
- keepBlocking - If TRUE, the station is blocked. If FALSE and the station is
- connected, disconnect the station. If FALSE and the station
- is not connected, no action is taken.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiMacAddress staMacAddress;
- u8 keepBlocking;
-} CsrWifiNmeApStaRemoveReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApConfigSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Status of the request.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiNmeApConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApWpsRegisterCfm
-
- DESCRIPTION
- This primitive reports the result of WPS procedure.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface identifier; unique identifier of an interface
- status - Status of the request.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiNmeApWpsRegisterCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStartCfm
-
- DESCRIPTION
- This primitive reports the result of CSR_WIFI_NME_AP_START.request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface identifier; unique identifier of an interface
- status - Status of the request.
- ssid - Service Set Identifier
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSsid ssid;
-} CsrWifiNmeApStartCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStopCfm
-
- DESCRIPTION
- This primitive confirms that the AP operation is stopped. NME shall send
- this primitive in response to the request even if AP operation has
- already been stopped
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface identifier; unique identifier of an interface
- status - Status of the request.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiNmeApStopCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStopInd
-
- DESCRIPTION
- Indicates that AP operation had stopped because of some unrecoverable
- error after AP operation was started successfully. NME sends this signal
- after failing to restart the AP operation internally following an error
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- apType - Reports AP Type (P2PGO or AP)
- status - Error Status
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeApType apType;
- CsrResult status;
-} CsrWifiNmeApStopInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApWmmParamUpdateCfm
-
- DESCRIPTION
- A confirm for for the WMM parameters update
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Status of the request.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiNmeApWmmParamUpdateCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeApStationInd
-
- DESCRIPTION
- This primitive indicates that a station has joined or a previously joined
- station has left the BSS/group
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- mediaStatus - Indicates whether the station is connected or
- disconnected
- peerMacAddress - MAC address of the station
- peerDeviceAddress - P2P Device Address
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeMediaStatus mediaStatus;
- CsrWifiMacAddress peerMacAddress;
- CsrWifiMacAddress peerDeviceAddress;
-} CsrWifiNmeApStationInd;
-
-#endif /* CSR_WIFI_NME_AP_PRIM_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_sef.c b/drivers/staging/csr/csr_wifi_nme_ap_sef.c
deleted file mode 100644
index e048848..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_sef.c
+++ /dev/null
@@ -1,30 +0,0 @@
-/*****************************************************************************
-
- FILE: csr_wifi_nme_sef.c
-
- (c) Cambridge Silicon Radio Limited 2010
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
- *****************************************************************************/
-#include "csr_wifi_nme_ap_sef.h"
-#include "unifi_priv.h"
-
-void CsrWifiNmeApUpstreamStateHandlers(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- switch(msg->type) {
- case CSR_WIFI_NME_AP_START_CFM:
- CsrWifiNmeApStartCfmHandler(drvpriv, msg);
- break;
- case CSR_WIFI_NME_AP_STOP_CFM:
- CsrWifiNmeApStopCfmHandler(drvpriv, msg);
- break;
- case CSR_WIFI_NME_AP_CONFIG_SET_CFM:
- CsrWifiNmeApConfigSetCfmHandler(drvpriv,msg);
- break;
- default:
- unifi_error(drvpriv, "CsrWifiNmeApUpstreamStateHandlers: unhandled NME_AP message type 0x%.4X\n",msg->type);
- break;
- }
-}
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_sef.h b/drivers/staging/csr/csr_wifi_nme_ap_sef.h
deleted file mode 100644
index 3daaa09..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_sef.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*****************************************************************************
- FILE: csr_wifi_nme_sef.h
- (c) Cambridge Silicon Radio Limited 2010
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-#ifndef CSR_WIFI_ROUTER_SEF_CSR_WIFI_NME_H__
-#define CSR_WIFI_ROUTER_SEF_CSR_WIFI_NME_H__
-
-#include "csr_wifi_nme_prim.h"
-
-void CsrWifiNmeApUpstreamStateHandlers(void* drvpriv, CsrWifiFsmEvent* msg);
-
-
-extern void CsrWifiNmeApConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg);
-extern void CsrWifiNmeApStartCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg);
-extern void CsrWifiNmeApStopCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg);
-
-#endif /* CSR_WIFI_ROUTER_SEF_CSR_WIFI_NME_H__ */
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_serialize.c b/drivers/staging/csr/csr_wifi_nme_ap_serialize.c
deleted file mode 100644
index 1a901a7..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_serialize.c
+++ /dev/null
@@ -1,909 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/string.h>
-#include <linux/slab.h>
-#include "csr_msgconv.h"
-
-#ifdef CSR_WIFI_NME_ENABLE
-#ifdef CSR_WIFI_AP_ENABLE
-
-#include "csr_wifi_nme_ap_prim.h"
-#include "csr_wifi_nme_ap_serialize.h"
-
-void CsrWifiNmeApPfree(void *ptr)
-{
- kfree(ptr);
-}
-
-
-size_t CsrWifiNmeApConfigSetReqSizeof(void *msg)
-{
- CsrWifiNmeApConfigSetReq *primitive = (CsrWifiNmeApConfigSetReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 104) */
- bufferSize += 2; /* u16 primitive->apConfig.apGroupkeyTimeout */
- bufferSize += 1; /* u8 primitive->apConfig.apStrictGtkRekey */
- bufferSize += 2; /* u16 primitive->apConfig.apGmkTimeout */
- bufferSize += 2; /* u16 primitive->apConfig.apResponseTimeout */
- bufferSize += 1; /* u8 primitive->apConfig.apRetransLimit */
- bufferSize += 1; /* CsrWifiSmeApPhySupportMask primitive->apMacConfig.phySupportedBitmap */
- bufferSize += 2; /* u16 primitive->apMacConfig.beaconInterval */
- bufferSize += 1; /* u8 primitive->apMacConfig.dtimPeriod */
- bufferSize += 2; /* u16 primitive->apMacConfig.maxListenInterval */
- bufferSize += 1; /* u8 primitive->apMacConfig.supportedRatesCount */
- bufferSize += 20; /* u8 primitive->apMacConfig.supportedRates[20] */
- bufferSize += 1; /* CsrWifiSmePreambleType primitive->apMacConfig.preamble */
- bufferSize += 1; /* u8 primitive->apMacConfig.shortSlotTimeEnabled */
- bufferSize += 1; /* CsrWifiSmeCtsProtectionType primitive->apMacConfig.ctsProtectionType */
- bufferSize += 1; /* u8 primitive->apMacConfig.wmmEnabled */
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- bufferSize += 1; /* u8 primitive->apMacConfig.wmmApParams[i2].cwMin */
- bufferSize += 1; /* u8 primitive->apMacConfig.wmmApParams[i2].cwMax */
- bufferSize += 1; /* u8 primitive->apMacConfig.wmmApParams[i2].aifs */
- bufferSize += 2; /* u16 primitive->apMacConfig.wmmApParams[i2].txopLimit */
- bufferSize += 1; /* u8 primitive->apMacConfig.wmmApParams[i2].admissionControlMandatory */
- }
- }
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- bufferSize += 1; /* u8 primitive->apMacConfig.wmmApBcParams[i2].cwMin */
- bufferSize += 1; /* u8 primitive->apMacConfig.wmmApBcParams[i2].cwMax */
- bufferSize += 1; /* u8 primitive->apMacConfig.wmmApBcParams[i2].aifs */
- bufferSize += 2; /* u16 primitive->apMacConfig.wmmApBcParams[i2].txopLimit */
- bufferSize += 1; /* u8 primitive->apMacConfig.wmmApBcParams[i2].admissionControlMandatory */
- }
- }
- bufferSize += 1; /* CsrWifiSmeApAccessType primitive->apMacConfig.accessType */
- bufferSize += 1; /* u8 primitive->apMacConfig.macAddressListCount */
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->apMacConfig.macAddressListCount; i2++)
- {
- bufferSize += 6; /* u8 primitive->apMacConfig.macAddressList[i2].a[6] */
- }
- }
- bufferSize += 1; /* u8 primitive->apMacConfig.apHtParams.greenfieldSupported */
- bufferSize += 1; /* u8 primitive->apMacConfig.apHtParams.shortGi20MHz */
- bufferSize += 1; /* u8 primitive->apMacConfig.apHtParams.rxStbc */
- bufferSize += 1; /* u8 primitive->apMacConfig.apHtParams.rifsModeAllowed */
- bufferSize += 1; /* u8 primitive->apMacConfig.apHtParams.htProtection */
- bufferSize += 1; /* u8 primitive->apMacConfig.apHtParams.dualCtsProtection */
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApConfigSetReq *primitive = (CsrWifiNmeApConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->apConfig.apGroupkeyTimeout);
- CsrUint8Ser(ptr, len, (u8) primitive->apConfig.apStrictGtkRekey);
- CsrUint16Ser(ptr, len, (u16) primitive->apConfig.apGmkTimeout);
- CsrUint16Ser(ptr, len, (u16) primitive->apConfig.apResponseTimeout);
- CsrUint8Ser(ptr, len, (u8) primitive->apConfig.apRetransLimit);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.phySupportedBitmap);
- CsrUint16Ser(ptr, len, (u16) primitive->apMacConfig.beaconInterval);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.dtimPeriod);
- CsrUint16Ser(ptr, len, (u16) primitive->apMacConfig.maxListenInterval);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.supportedRatesCount);
- CsrMemCpySer(ptr, len, (const void *) primitive->apMacConfig.supportedRates, ((u16) (20)));
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.preamble);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.shortSlotTimeEnabled);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.ctsProtectionType);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.wmmEnabled);
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.wmmApParams[i2].cwMin);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.wmmApParams[i2].cwMax);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.wmmApParams[i2].aifs);
- CsrUint16Ser(ptr, len, (u16) primitive->apMacConfig.wmmApParams[i2].txopLimit);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.wmmApParams[i2].admissionControlMandatory);
- }
- }
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.wmmApBcParams[i2].cwMin);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.wmmApBcParams[i2].cwMax);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.wmmApBcParams[i2].aifs);
- CsrUint16Ser(ptr, len, (u16) primitive->apMacConfig.wmmApBcParams[i2].txopLimit);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.wmmApBcParams[i2].admissionControlMandatory);
- }
- }
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.accessType);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.macAddressListCount);
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->apMacConfig.macAddressListCount; i2++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->apMacConfig.macAddressList[i2].a, ((u16) (6)));
- }
- }
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.apHtParams.greenfieldSupported);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.apHtParams.shortGi20MHz);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.apHtParams.rxStbc);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.apHtParams.rifsModeAllowed);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.apHtParams.htProtection);
- CsrUint8Ser(ptr, len, (u8) primitive->apMacConfig.apHtParams.dualCtsProtection);
- return(ptr);
-}
-
-
-void* CsrWifiNmeApConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApConfigSetReq *primitive = kmalloc(sizeof(CsrWifiNmeApConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->apConfig.apGroupkeyTimeout, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apConfig.apStrictGtkRekey, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->apConfig.apGmkTimeout, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->apConfig.apResponseTimeout, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apConfig.apRetransLimit, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.phySupportedBitmap, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->apMacConfig.beaconInterval, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.dtimPeriod, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->apMacConfig.maxListenInterval, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.supportedRatesCount, buffer, &offset);
- CsrMemCpyDes(primitive->apMacConfig.supportedRates, buffer, &offset, ((u16) (20)));
- CsrUint8Des((u8 *) &primitive->apMacConfig.preamble, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.shortSlotTimeEnabled, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.ctsProtectionType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.wmmEnabled, buffer, &offset);
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- CsrUint8Des((u8 *) &primitive->apMacConfig.wmmApParams[i2].cwMin, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.wmmApParams[i2].cwMax, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.wmmApParams[i2].aifs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->apMacConfig.wmmApParams[i2].txopLimit, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.wmmApParams[i2].admissionControlMandatory, buffer, &offset);
- }
- }
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- CsrUint8Des((u8 *) &primitive->apMacConfig.wmmApBcParams[i2].cwMin, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.wmmApBcParams[i2].cwMax, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.wmmApBcParams[i2].aifs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->apMacConfig.wmmApBcParams[i2].txopLimit, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.wmmApBcParams[i2].admissionControlMandatory, buffer, &offset);
- }
- }
- CsrUint8Des((u8 *) &primitive->apMacConfig.accessType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.macAddressListCount, buffer, &offset);
- primitive->apMacConfig.macAddressList = NULL;
- if (primitive->apMacConfig.macAddressListCount)
- {
- primitive->apMacConfig.macAddressList = kmalloc(sizeof(CsrWifiMacAddress) * primitive->apMacConfig.macAddressListCount, GFP_KERNEL);
- }
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->apMacConfig.macAddressListCount; i2++)
- {
- CsrMemCpyDes(primitive->apMacConfig.macAddressList[i2].a, buffer, &offset, ((u16) (6)));
- }
- }
- CsrUint8Des((u8 *) &primitive->apMacConfig.apHtParams.greenfieldSupported, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.apHtParams.shortGi20MHz, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.apHtParams.rxStbc, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.apHtParams.rifsModeAllowed, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.apHtParams.htProtection, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apMacConfig.apHtParams.dualCtsProtection, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiNmeApConfigSetReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiNmeApConfigSetReq *primitive = (CsrWifiNmeApConfigSetReq *) voidPrimitivePointer;
- kfree(primitive->apMacConfig.macAddressList);
- kfree(primitive);
-}
-
-
-size_t CsrWifiNmeApWpsRegisterReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 17) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiSmeWpsDpid primitive->selectedDevicePasswordId */
- bufferSize += 2; /* CsrWifiSmeWpsConfigType primitive->selectedConfigMethod */
- bufferSize += 8; /* u8 primitive->pin[8] */
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApWpsRegisterReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApWpsRegisterReq *primitive = (CsrWifiNmeApWpsRegisterReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->selectedDevicePasswordId);
- CsrUint16Ser(ptr, len, (u16) primitive->selectedConfigMethod);
- CsrMemCpySer(ptr, len, (const void *) primitive->pin, ((u16) (8)));
- return(ptr);
-}
-
-
-void* CsrWifiNmeApWpsRegisterReqDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApWpsRegisterReq *primitive = kmalloc(sizeof(CsrWifiNmeApWpsRegisterReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->selectedDevicePasswordId, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->selectedConfigMethod, buffer, &offset);
- CsrMemCpyDes(primitive->pin, buffer, &offset, ((u16) (8)));
-
- return primitive;
-}
-
-
-size_t CsrWifiNmeApStartReqSizeof(void *msg)
-{
- CsrWifiNmeApStartReq *primitive = (CsrWifiNmeApStartReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 112) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeApType primitive->apType */
- bufferSize += 1; /* u8 primitive->cloakSsid */
- bufferSize += 32; /* u8 primitive->ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->ssid.length */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->ifIndex */
- bufferSize += 1; /* u8 primitive->channel */
- bufferSize += 1; /* CsrWifiSmeApAuthType primitive->apCredentials.authType */
- switch (primitive->apCredentials.authType)
- {
- case CSR_WIFI_SME_AP_AUTH_TYPE_OPEN_SYSTEM:
- bufferSize += 1; /* u8 primitive->apCredentials.nmeAuthType.openSystemEmpty.empty */
- break;
- case CSR_WIFI_SME_AP_AUTH_TYPE_WEP:
- bufferSize += 1; /* CsrWifiSmeWepCredentialType primitive->apCredentials.nmeAuthType.authwep.wepKeyType */
- switch (primitive->apCredentials.nmeAuthType.authwep.wepKeyType)
- {
- case CSR_WIFI_SME_CREDENTIAL_TYPE_WEP128:
- bufferSize += 1; /* CsrWifiSmeWepAuthMode primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.wepAuthType */
- bufferSize += 1; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.selectedWepKey */
- bufferSize += 13; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key1[13] */
- bufferSize += 13; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key2[13] */
- bufferSize += 13; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key3[13] */
- bufferSize += 13; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key4[13] */
- break;
- case CSR_WIFI_SME_CREDENTIAL_TYPE_WEP64:
- bufferSize += 1; /* CsrWifiSmeWepAuthMode primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.wepAuthType */
- bufferSize += 1; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.selectedWepKey */
- bufferSize += 5; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key1[5] */
- bufferSize += 5; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key2[5] */
- bufferSize += 5; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key3[5] */
- bufferSize += 5; /* u8 primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key4[5] */
- break;
- default:
- break;
- }
- break;
- case CSR_WIFI_SME_AP_AUTH_TYPE_PERSONAL:
- bufferSize += 1; /* CsrWifiSmeApAuthSupportMask primitive->apCredentials.nmeAuthType.authTypePersonal.authSupport */
- bufferSize += 2; /* CsrWifiSmeApRsnCapabilitiesMask primitive->apCredentials.nmeAuthType.authTypePersonal.rsnCapabilities */
- bufferSize += 2; /* CsrWifiSmeApWapiCapabilitiesMask primitive->apCredentials.nmeAuthType.authTypePersonal.wapiCapabilities */
- bufferSize += 1; /* CsrWifiNmeApPersCredentialType primitive->apCredentials.nmeAuthType.authTypePersonal.pskOrPassphrase */
- switch (primitive->apCredentials.nmeAuthType.authTypePersonal.pskOrPassphrase)
- {
- case CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PSK:
- bufferSize += 2; /* u16 primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.psk.encryptionMode */
- bufferSize += 32; /* u8 primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.psk.psk[32] */
- break;
- case CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PASSPHRASE:
- bufferSize += 2; /* u16 primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.encryptionMode */
- bufferSize += (primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.passphrase ? strlen(primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.passphrase) : 0) + 1; /* char* primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.passphrase (0 byte len + 1 for NULL Term) */
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- bufferSize += 1; /* u8 primitive->maxConnections */
- bufferSize += 1; /* CsrWifiSmeP2pGroupCapabilityMask primitive->p2pGoParam.groupCapability */
- bufferSize += 3; /* u8 primitive->p2pGoParam.operatingChanList.country[3] */
- bufferSize += 1; /* u8 primitive->p2pGoParam.operatingChanList.channelEntryListCount */
- {
- u16 i3;
- for (i3 = 0; i3 < primitive->p2pGoParam.operatingChanList.channelEntryListCount; i3++)
- {
- bufferSize += 1; /* u8 primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingClass */
- bufferSize += 1; /* u8 primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannelCount */
- bufferSize += primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannelCount; /* u8 primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannel */
- }
- }
- bufferSize += 1; /* u8 primitive->p2pGoParam.opPsEnabled */
- bufferSize += 1; /* u8 primitive->p2pGoParam.ctWindow */
- bufferSize += 1; /* CsrWifiSmeP2pNoaConfigMethod primitive->p2pGoParam.noaConfigMethod */
- bufferSize += 1; /* u8 primitive->p2pGoParam.allowNoaWithNonP2pDevices */
- bufferSize += 1; /* u8 primitive->wpsEnabled */
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApStartReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApStartReq *primitive = (CsrWifiNmeApStartReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->apType);
- CsrUint8Ser(ptr, len, (u8) primitive->cloakSsid);
- CsrMemCpySer(ptr, len, (const void *) primitive->ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->ssid.length);
- CsrUint8Ser(ptr, len, (u8) primitive->ifIndex);
- CsrUint8Ser(ptr, len, (u8) primitive->channel);
- CsrUint8Ser(ptr, len, (u8) primitive->apCredentials.authType);
- switch (primitive->apCredentials.authType)
- {
- case CSR_WIFI_SME_AP_AUTH_TYPE_OPEN_SYSTEM:
- CsrUint8Ser(ptr, len, (u8) primitive->apCredentials.nmeAuthType.openSystemEmpty.empty);
- break;
- case CSR_WIFI_SME_AP_AUTH_TYPE_WEP:
- CsrUint8Ser(ptr, len, (u8) primitive->apCredentials.nmeAuthType.authwep.wepKeyType);
- switch (primitive->apCredentials.nmeAuthType.authwep.wepKeyType)
- {
- case CSR_WIFI_SME_CREDENTIAL_TYPE_WEP128:
- CsrUint8Ser(ptr, len, (u8) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.wepAuthType);
- CsrUint8Ser(ptr, len, (u8) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.selectedWepKey);
- CsrMemCpySer(ptr, len, (const void *) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key1, ((u16) (13)));
- CsrMemCpySer(ptr, len, (const void *) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key2, ((u16) (13)));
- CsrMemCpySer(ptr, len, (const void *) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key3, ((u16) (13)));
- CsrMemCpySer(ptr, len, (const void *) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key4, ((u16) (13)));
- break;
- case CSR_WIFI_SME_CREDENTIAL_TYPE_WEP64:
- CsrUint8Ser(ptr, len, (u8) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.wepAuthType);
- CsrUint8Ser(ptr, len, (u8) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.selectedWepKey);
- CsrMemCpySer(ptr, len, (const void *) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key1, ((u16) (5)));
- CsrMemCpySer(ptr, len, (const void *) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key2, ((u16) (5)));
- CsrMemCpySer(ptr, len, (const void *) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key3, ((u16) (5)));
- CsrMemCpySer(ptr, len, (const void *) primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key4, ((u16) (5)));
- break;
- default:
- break;
- }
- break;
- case CSR_WIFI_SME_AP_AUTH_TYPE_PERSONAL:
- CsrUint8Ser(ptr, len, (u8) primitive->apCredentials.nmeAuthType.authTypePersonal.authSupport);
- CsrUint16Ser(ptr, len, (u16) primitive->apCredentials.nmeAuthType.authTypePersonal.rsnCapabilities);
- CsrUint16Ser(ptr, len, (u16) primitive->apCredentials.nmeAuthType.authTypePersonal.wapiCapabilities);
- CsrUint8Ser(ptr, len, (u8) primitive->apCredentials.nmeAuthType.authTypePersonal.pskOrPassphrase);
- switch (primitive->apCredentials.nmeAuthType.authTypePersonal.pskOrPassphrase)
- {
- case CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PSK:
- CsrUint16Ser(ptr, len, (u16) primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.psk.encryptionMode);
- CsrMemCpySer(ptr, len, (const void *) primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.psk.psk, ((u16) (32)));
- break;
- case CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PASSPHRASE:
- CsrUint16Ser(ptr, len, (u16) primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.encryptionMode);
- CsrCharStringSer(ptr, len, primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.passphrase);
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- CsrUint8Ser(ptr, len, (u8) primitive->maxConnections);
- CsrUint8Ser(ptr, len, (u8) primitive->p2pGoParam.groupCapability);
- CsrMemCpySer(ptr, len, (const void *) primitive->p2pGoParam.operatingChanList.country, ((u16) (3)));
- CsrUint8Ser(ptr, len, (u8) primitive->p2pGoParam.operatingChanList.channelEntryListCount);
- {
- u16 i3;
- for (i3 = 0; i3 < primitive->p2pGoParam.operatingChanList.channelEntryListCount; i3++)
- {
- CsrUint8Ser(ptr, len, (u8) primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingClass);
- CsrUint8Ser(ptr, len, (u8) primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannelCount);
- if (primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannelCount)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannel, ((u16) (primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannelCount)));
- }
- }
- }
- CsrUint8Ser(ptr, len, (u8) primitive->p2pGoParam.opPsEnabled);
- CsrUint8Ser(ptr, len, (u8) primitive->p2pGoParam.ctWindow);
- CsrUint8Ser(ptr, len, (u8) primitive->p2pGoParam.noaConfigMethod);
- CsrUint8Ser(ptr, len, (u8) primitive->p2pGoParam.allowNoaWithNonP2pDevices);
- CsrUint8Ser(ptr, len, (u8) primitive->wpsEnabled);
- return(ptr);
-}
-
-
-void* CsrWifiNmeApStartReqDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApStartReq *primitive = kmalloc(sizeof(CsrWifiNmeApStartReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->cloakSsid, buffer, &offset);
- CsrMemCpyDes(primitive->ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->ssid.length, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ifIndex, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->channel, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apCredentials.authType, buffer, &offset);
- switch (primitive->apCredentials.authType)
- {
- case CSR_WIFI_SME_AP_AUTH_TYPE_OPEN_SYSTEM:
- CsrUint8Des((u8 *) &primitive->apCredentials.nmeAuthType.openSystemEmpty.empty, buffer, &offset);
- break;
- case CSR_WIFI_SME_AP_AUTH_TYPE_WEP:
- CsrUint8Des((u8 *) &primitive->apCredentials.nmeAuthType.authwep.wepKeyType, buffer, &offset);
- switch (primitive->apCredentials.nmeAuthType.authwep.wepKeyType)
- {
- case CSR_WIFI_SME_CREDENTIAL_TYPE_WEP128:
- CsrUint8Des((u8 *) &primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.wepAuthType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.selectedWepKey, buffer, &offset);
- CsrMemCpyDes(primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key1, buffer, &offset, ((u16) (13)));
- CsrMemCpyDes(primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key2, buffer, &offset, ((u16) (13)));
- CsrMemCpyDes(primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key3, buffer, &offset, ((u16) (13)));
- CsrMemCpyDes(primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep128Key.key4, buffer, &offset, ((u16) (13)));
- break;
- case CSR_WIFI_SME_CREDENTIAL_TYPE_WEP64:
- CsrUint8Des((u8 *) &primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.wepAuthType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.selectedWepKey, buffer, &offset);
- CsrMemCpyDes(primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key1, buffer, &offset, ((u16) (5)));
- CsrMemCpyDes(primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key2, buffer, &offset, ((u16) (5)));
- CsrMemCpyDes(primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key3, buffer, &offset, ((u16) (5)));
- CsrMemCpyDes(primitive->apCredentials.nmeAuthType.authwep.wepCredentials.wep64Key.key4, buffer, &offset, ((u16) (5)));
- break;
- default:
- break;
- }
- break;
- case CSR_WIFI_SME_AP_AUTH_TYPE_PERSONAL:
- CsrUint8Des((u8 *) &primitive->apCredentials.nmeAuthType.authTypePersonal.authSupport, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->apCredentials.nmeAuthType.authTypePersonal.rsnCapabilities, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->apCredentials.nmeAuthType.authTypePersonal.wapiCapabilities, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apCredentials.nmeAuthType.authTypePersonal.pskOrPassphrase, buffer, &offset);
- switch (primitive->apCredentials.nmeAuthType.authTypePersonal.pskOrPassphrase)
- {
- case CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PSK:
- CsrUint16Des((u16 *) &primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.psk.encryptionMode, buffer, &offset);
- CsrMemCpyDes(primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.psk.psk, buffer, &offset, ((u16) (32)));
- break;
- case CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PASSPHRASE:
- CsrUint16Des((u16 *) &primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.encryptionMode, buffer, &offset);
- CsrCharStringDes(&primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.passphrase, buffer, &offset);
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- CsrUint8Des((u8 *) &primitive->maxConnections, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->p2pGoParam.groupCapability, buffer, &offset);
- CsrMemCpyDes(primitive->p2pGoParam.operatingChanList.country, buffer, &offset, ((u16) (3)));
- CsrUint8Des((u8 *) &primitive->p2pGoParam.operatingChanList.channelEntryListCount, buffer, &offset);
- primitive->p2pGoParam.operatingChanList.channelEntryList = NULL;
- if (primitive->p2pGoParam.operatingChanList.channelEntryListCount)
- {
- primitive->p2pGoParam.operatingChanList.channelEntryList = kmalloc(sizeof(CsrWifiSmeApP2pOperatingChanEntry) * primitive->p2pGoParam.operatingChanList.channelEntryListCount, GFP_KERNEL);
- }
- {
- u16 i3;
- for (i3 = 0; i3 < primitive->p2pGoParam.operatingChanList.channelEntryListCount; i3++)
- {
- CsrUint8Des((u8 *) &primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingClass, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannelCount, buffer, &offset);
- if (primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannelCount)
- {
- primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannel = kmalloc(primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannelCount, GFP_KERNEL);
- CsrMemCpyDes(primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannel, buffer, &offset, ((u16) (primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannelCount)));
- }
- else
- {
- primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannel = NULL;
- }
- }
- }
- CsrUint8Des((u8 *) &primitive->p2pGoParam.opPsEnabled, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->p2pGoParam.ctWindow, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->p2pGoParam.noaConfigMethod, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->p2pGoParam.allowNoaWithNonP2pDevices, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wpsEnabled, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiNmeApStartReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiNmeApStartReq *primitive = (CsrWifiNmeApStartReq *) voidPrimitivePointer;
- switch (primitive->apCredentials.authType)
- {
- case CSR_WIFI_SME_AP_AUTH_TYPE_PERSONAL:
- switch (primitive->apCredentials.nmeAuthType.authTypePersonal.pskOrPassphrase)
- {
- case CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PASSPHRASE:
- kfree(primitive->apCredentials.nmeAuthType.authTypePersonal.authPers_credentials.passphrase.passphrase);
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- {
- u16 i3;
- for (i3 = 0; i3 < primitive->p2pGoParam.operatingChanList.channelEntryListCount; i3++)
- {
- kfree(primitive->p2pGoParam.operatingChanList.channelEntryList[i3].operatingChannel);
- }
- }
- kfree(primitive->p2pGoParam.operatingChanList.channelEntryList);
- kfree(primitive);
-}
-
-
-size_t CsrWifiNmeApWmmParamUpdateReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 51) */
- {
- u16 i1;
- for (i1 = 0; i1 < 4; i1++)
- {
- bufferSize += 1; /* u8 primitive->wmmApParams[i1].cwMin */
- bufferSize += 1; /* u8 primitive->wmmApParams[i1].cwMax */
- bufferSize += 1; /* u8 primitive->wmmApParams[i1].aifs */
- bufferSize += 2; /* u16 primitive->wmmApParams[i1].txopLimit */
- bufferSize += 1; /* u8 primitive->wmmApParams[i1].admissionControlMandatory */
- }
- }
- {
- u16 i1;
- for (i1 = 0; i1 < 4; i1++)
- {
- bufferSize += 1; /* u8 primitive->wmmApBcParams[i1].cwMin */
- bufferSize += 1; /* u8 primitive->wmmApBcParams[i1].cwMax */
- bufferSize += 1; /* u8 primitive->wmmApBcParams[i1].aifs */
- bufferSize += 2; /* u16 primitive->wmmApBcParams[i1].txopLimit */
- bufferSize += 1; /* u8 primitive->wmmApBcParams[i1].admissionControlMandatory */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApWmmParamUpdateReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApWmmParamUpdateReq *primitive = (CsrWifiNmeApWmmParamUpdateReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- {
- u16 i1;
- for (i1 = 0; i1 < 4; i1++)
- {
- CsrUint8Ser(ptr, len, (u8) primitive->wmmApParams[i1].cwMin);
- CsrUint8Ser(ptr, len, (u8) primitive->wmmApParams[i1].cwMax);
- CsrUint8Ser(ptr, len, (u8) primitive->wmmApParams[i1].aifs);
- CsrUint16Ser(ptr, len, (u16) primitive->wmmApParams[i1].txopLimit);
- CsrUint8Ser(ptr, len, (u8) primitive->wmmApParams[i1].admissionControlMandatory);
- }
- }
- {
- u16 i1;
- for (i1 = 0; i1 < 4; i1++)
- {
- CsrUint8Ser(ptr, len, (u8) primitive->wmmApBcParams[i1].cwMin);
- CsrUint8Ser(ptr, len, (u8) primitive->wmmApBcParams[i1].cwMax);
- CsrUint8Ser(ptr, len, (u8) primitive->wmmApBcParams[i1].aifs);
- CsrUint16Ser(ptr, len, (u16) primitive->wmmApBcParams[i1].txopLimit);
- CsrUint8Ser(ptr, len, (u8) primitive->wmmApBcParams[i1].admissionControlMandatory);
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiNmeApWmmParamUpdateReqDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApWmmParamUpdateReq *primitive = kmalloc(sizeof(CsrWifiNmeApWmmParamUpdateReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- {
- u16 i1;
- for (i1 = 0; i1 < 4; i1++)
- {
- CsrUint8Des((u8 *) &primitive->wmmApParams[i1].cwMin, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wmmApParams[i1].cwMax, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wmmApParams[i1].aifs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->wmmApParams[i1].txopLimit, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wmmApParams[i1].admissionControlMandatory, buffer, &offset);
- }
- }
- {
- u16 i1;
- for (i1 = 0; i1 < 4; i1++)
- {
- CsrUint8Des((u8 *) &primitive->wmmApBcParams[i1].cwMin, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wmmApBcParams[i1].cwMax, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wmmApBcParams[i1].aifs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->wmmApBcParams[i1].txopLimit, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wmmApBcParams[i1].admissionControlMandatory, buffer, &offset);
- }
- }
-
- return primitive;
-}
-
-
-size_t CsrWifiNmeApStaRemoveReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 12) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 6; /* u8 primitive->staMacAddress.a[6] */
- bufferSize += 1; /* u8 primitive->keepBlocking */
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApStaRemoveReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApStaRemoveReq *primitive = (CsrWifiNmeApStaRemoveReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->staMacAddress.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->keepBlocking);
- return(ptr);
-}
-
-
-void* CsrWifiNmeApStaRemoveReqDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApStaRemoveReq *primitive = kmalloc(sizeof(CsrWifiNmeApStaRemoveReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->staMacAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->keepBlocking, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiNmeApWpsRegisterCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApWpsRegisterCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApWpsRegisterCfm *primitive = (CsrWifiNmeApWpsRegisterCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiNmeApWpsRegisterCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApWpsRegisterCfm *primitive = kmalloc(sizeof(CsrWifiNmeApWpsRegisterCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiNmeApStartCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 40) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 32; /* u8 primitive->ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->ssid.length */
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApStartCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApStartCfm *primitive = (CsrWifiNmeApStartCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrMemCpySer(ptr, len, (const void *) primitive->ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->ssid.length);
- return(ptr);
-}
-
-
-void* CsrWifiNmeApStartCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApStartCfm *primitive = kmalloc(sizeof(CsrWifiNmeApStartCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrMemCpyDes(primitive->ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->ssid.length, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiNmeApStopCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApStopCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApStopCfm *primitive = (CsrWifiNmeApStopCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiNmeApStopCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApStopCfm *primitive = kmalloc(sizeof(CsrWifiNmeApStopCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiNmeApStopIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeApType primitive->apType */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApStopIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApStopInd *primitive = (CsrWifiNmeApStopInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->apType);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiNmeApStopIndDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApStopInd *primitive = kmalloc(sizeof(CsrWifiNmeApStopInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->apType, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiNmeApStationIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 18) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeMediaStatus primitive->mediaStatus */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- bufferSize += 6; /* u8 primitive->peerDeviceAddress.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiNmeApStationIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiNmeApStationInd *primitive = (CsrWifiNmeApStationInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->mediaStatus);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- CsrMemCpySer(ptr, len, (const void *) primitive->peerDeviceAddress.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiNmeApStationIndDes(u8 *buffer, size_t length)
-{
- CsrWifiNmeApStationInd *primitive = kmalloc(sizeof(CsrWifiNmeApStationInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->mediaStatus, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
- CsrMemCpyDes(primitive->peerDeviceAddress.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-#endif /* CSR_WIFI_NME_ENABLE */
-#endif /* CSR_WIFI_AP_ENABLE */
diff --git a/drivers/staging/csr/csr_wifi_nme_ap_serialize.h b/drivers/staging/csr/csr_wifi_nme_ap_serialize.h
deleted file mode 100644
index c04585e..0000000
--- a/drivers/staging/csr/csr_wifi_nme_ap_serialize.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_NME_AP_SERIALIZE_H__
-#define CSR_WIFI_NME_AP_SERIALIZE_H__
-
-#include "csr_wifi_msgconv.h"
-
-#include "csr_wifi_nme_ap_prim.h"
-
-#ifndef CSR_WIFI_NME_ENABLE
-#error CSR_WIFI_NME_ENABLE MUST be defined inorder to use csr_wifi_nme_ap_serialize.h
-#endif
-#ifndef CSR_WIFI_AP_ENABLE
-#error CSR_WIFI_AP_ENABLE MUST be defined inorder to use csr_wifi_nme_ap_serialize.h
-#endif
-
-extern void CsrWifiNmeApPfree(void *ptr);
-
-extern u8* CsrWifiNmeApConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApConfigSetReqSizeof(void *msg);
-extern void CsrWifiNmeApConfigSetReqSerFree(void *msg);
-
-extern u8* CsrWifiNmeApWpsRegisterReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApWpsRegisterReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApWpsRegisterReqSizeof(void *msg);
-#define CsrWifiNmeApWpsRegisterReqSerFree CsrWifiNmeApPfree
-
-extern u8* CsrWifiNmeApStartReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApStartReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApStartReqSizeof(void *msg);
-extern void CsrWifiNmeApStartReqSerFree(void *msg);
-
-#define CsrWifiNmeApStopReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeApStopReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeApStopReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeApStopReqSerFree CsrWifiNmeApPfree
-
-extern u8* CsrWifiNmeApWmmParamUpdateReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApWmmParamUpdateReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApWmmParamUpdateReqSizeof(void *msg);
-#define CsrWifiNmeApWmmParamUpdateReqSerFree CsrWifiNmeApPfree
-
-extern u8* CsrWifiNmeApStaRemoveReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApStaRemoveReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApStaRemoveReqSizeof(void *msg);
-#define CsrWifiNmeApStaRemoveReqSerFree CsrWifiNmeApPfree
-
-#define CsrWifiNmeApConfigSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeApConfigSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeApConfigSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeApConfigSetCfmSerFree CsrWifiNmeApPfree
-
-extern u8* CsrWifiNmeApWpsRegisterCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApWpsRegisterCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApWpsRegisterCfmSizeof(void *msg);
-#define CsrWifiNmeApWpsRegisterCfmSerFree CsrWifiNmeApPfree
-
-extern u8* CsrWifiNmeApStartCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApStartCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApStartCfmSizeof(void *msg);
-#define CsrWifiNmeApStartCfmSerFree CsrWifiNmeApPfree
-
-extern u8* CsrWifiNmeApStopCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApStopCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApStopCfmSizeof(void *msg);
-#define CsrWifiNmeApStopCfmSerFree CsrWifiNmeApPfree
-
-extern u8* CsrWifiNmeApStopIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApStopIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApStopIndSizeof(void *msg);
-#define CsrWifiNmeApStopIndSerFree CsrWifiNmeApPfree
-
-#define CsrWifiNmeApWmmParamUpdateCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeApWmmParamUpdateCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeApWmmParamUpdateCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeApWmmParamUpdateCfmSerFree CsrWifiNmeApPfree
-
-extern u8* CsrWifiNmeApStationIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeApStationIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeApStationIndSizeof(void *msg);
-#define CsrWifiNmeApStationIndSerFree CsrWifiNmeApPfree
-
-#endif /* CSR_WIFI_NME_AP_SERIALIZE_H__ */
diff --git a/drivers/staging/csr/csr_wifi_nme_converter_init.h b/drivers/staging/csr/csr_wifi_nme_converter_init.h
deleted file mode 100644
index 85e6f5f..0000000
--- a/drivers/staging/csr/csr_wifi_nme_converter_init.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_NME_CONVERTER_INIT_H__
-#define CSR_WIFI_NME_CONVERTER_INIT_H__
-
-#ifndef CSR_WIFI_NME_ENABLE
-#error CSR_WIFI_NME_ENABLE MUST be defined inorder to use csr_wifi_nme_converter_init.h
-#endif
-
-#ifndef EXCLUDE_CSR_WIFI_NME_MODULE
-
-#include "csr_msgconv.h"
-
-#ifdef CSR_LOG_ENABLE
-#include "csr_log.h"
-
-extern const CsrLogPrimitiveInformation* CsrWifiNmeTechInfoGet(void);
-#endif /* CSR_LOG_ENABLE */
-
-extern void CsrWifiNmeConverterInit(void);
-
-#else /* EXCLUDE_CSR_WIFI_NME_MODULE */
-
-#define CsrWifiNmeConverterInit()
-
-#endif /* EXCLUDE_CSR_WIFI_NME_MODULE */
-
-#endif /* CSR_WIFI_NME_CONVERTER_INIT_H__ */
diff --git a/drivers/staging/csr/csr_wifi_nme_lib.h b/drivers/staging/csr/csr_wifi_nme_lib.h
deleted file mode 100644
index 5a1f132..0000000
--- a/drivers/staging/csr/csr_wifi_nme_lib.h
+++ /dev/null
@@ -1,991 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_NME_LIB_H__
-#define CSR_WIFI_NME_LIB_H__
-
-#include "csr_sched.h"
-#include "csr_macro.h"
-#include "csr_msg_transport.h"
-
-#include "csr_wifi_lib.h"
-
-#include "csr_wifi_nme_prim.h"
-#include "csr_wifi_nme_task.h"
-
-
-#ifndef CSR_WIFI_NME_ENABLE
-#error CSR_WIFI_NME_ENABLE MUST be defined inorder to use csr_wifi_nme_lib.h
-#endif
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeConnectionStatusGetReqSend
-
- DESCRIPTION
- Requests the current connection status of the NME.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiNmeConnectionStatusGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeConnectionStatusGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_CONNECTION_STATUS_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiNmeConnectionStatusGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiNmeConnectionStatusGetReq *msg__; \
- CsrWifiNmeConnectionStatusGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeConnectionStatusGetReqSend(src__, interfaceTag__) \
- CsrWifiNmeConnectionStatusGetReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeConnectionStatusGetCfmSend
-
- DESCRIPTION
- Reports the connection status of the NME.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Indicates the success or otherwise of the requested
- operation.
- connectionStatus - NME current connection status
-
-*******************************************************************************/
-#define CsrWifiNmeConnectionStatusGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectionStatus__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeConnectionStatusGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_CONNECTION_STATUS_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->connectionStatus = (connectionStatus__);
-
-#define CsrWifiNmeConnectionStatusGetCfmSendTo(dst__, src__, interfaceTag__, status__, connectionStatus__) \
- { \
- CsrWifiNmeConnectionStatusGetCfm *msg__; \
- CsrWifiNmeConnectionStatusGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectionStatus__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeConnectionStatusGetCfmSend(dst__, interfaceTag__, status__, connectionStatus__) \
- CsrWifiNmeConnectionStatusGetCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, status__, connectionStatus__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEventMaskSetReqSend
-
- DESCRIPTION
- The wireless manager application may register with the NME to receive
- notification of interesting events. Indications will be sent only if the
- wireless manager explicitly registers to be notified of that event.
- indMask is a bit mask of values defined in CsrWifiNmeIndicationsMask.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- indMask - Set mask with values from CsrWifiNmeIndications
-
-*******************************************************************************/
-#define CsrWifiNmeEventMaskSetReqCreate(msg__, dst__, src__, indMask__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeEventMaskSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_EVENT_MASK_SET_REQ, dst__, src__); \
- msg__->indMask = (indMask__);
-
-#define CsrWifiNmeEventMaskSetReqSendTo(dst__, src__, indMask__) \
- { \
- CsrWifiNmeEventMaskSetReq *msg__; \
- CsrWifiNmeEventMaskSetReqCreate(msg__, dst__, src__, indMask__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeEventMaskSetReqSend(src__, indMask__) \
- CsrWifiNmeEventMaskSetReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, indMask__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEventMaskSetCfmSend
-
- DESCRIPTION
- The NME calls the primitive to report the result of the request
- primitive.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiNmeEventMaskSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeEventMaskSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_EVENT_MASK_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeEventMaskSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiNmeEventMaskSetCfm *msg__; \
- CsrWifiNmeEventMaskSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeEventMaskSetCfmSend(dst__, status__) \
- CsrWifiNmeEventMaskSetCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileConnectReqSend
-
- DESCRIPTION
- Requests the NME to attempt to connect to the specified profile.
- Overrides any current connection attempt.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- profileIdentity - Identity (BSSID, SSID) of profile to be connected to.
- It must match an existing profile in the NME.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileConnectReqCreate(msg__, dst__, src__, interfaceTag__, profileIdentity__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileConnectReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_CONNECT_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->profileIdentity = (profileIdentity__);
-
-#define CsrWifiNmeProfileConnectReqSendTo(dst__, src__, interfaceTag__, profileIdentity__) \
- { \
- CsrWifiNmeProfileConnectReq *msg__; \
- CsrWifiNmeProfileConnectReqCreate(msg__, dst__, src__, interfaceTag__, profileIdentity__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileConnectReqSend(src__, interfaceTag__, profileIdentity__) \
- CsrWifiNmeProfileConnectReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, interfaceTag__, profileIdentity__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileConnectCfmSend
-
- DESCRIPTION
- Reports the status of the NME PROFILE CONNECT REQ. If unsuccessful the
- connectAttempt parameters contain details of the APs that the NME
- attempted to connect to before reporting the failure of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- status - Indicates the success or otherwise of the requested
- operation.
- connectAttemptsCount - This parameter is relevant only if
- status!=CSR_WIFI_NME_STATUS_SUCCESS.
- Number of connection attempt elements provided with
- this primitive
- connectAttempts - This parameter is relevant only if
- status!=CSR_WIFI_NME_STATUS_SUCCESS.
- Points to the list of connection attempt elements
- provided with this primitive
- Each element of the list provides information about
- an AP on which the connection attempt was made and
- the error that occurred during the attempt.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileConnectCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectAttemptsCount__, connectAttempts__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileConnectCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_CONNECT_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->connectAttemptsCount = (connectAttemptsCount__); \
- msg__->connectAttempts = (connectAttempts__);
-
-#define CsrWifiNmeProfileConnectCfmSendTo(dst__, src__, interfaceTag__, status__, connectAttemptsCount__, connectAttempts__) \
- { \
- CsrWifiNmeProfileConnectCfm *msg__; \
- CsrWifiNmeProfileConnectCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectAttemptsCount__, connectAttempts__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileConnectCfmSend(dst__, interfaceTag__, status__, connectAttemptsCount__, connectAttempts__) \
- CsrWifiNmeProfileConnectCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, status__, connectAttemptsCount__, connectAttempts__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDeleteAllReqSend
-
- DESCRIPTION
- Deletes all profiles present in the NME, but does NOT modify the
- preferred profile list.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiNmeProfileDeleteAllReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileDeleteAllReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_DELETE_ALL_REQ, dst__, src__);
-
-#define CsrWifiNmeProfileDeleteAllReqSendTo(dst__, src__) \
- { \
- CsrWifiNmeProfileDeleteAllReq *msg__; \
- CsrWifiNmeProfileDeleteAllReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileDeleteAllReqSend(src__) \
- CsrWifiNmeProfileDeleteAllReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDeleteAllCfmSend
-
- DESCRIPTION
- Reports the status of the CSR_WIFI_NME_PROFILE_DELETE_ALL_REQ.
- Returns always CSR_WIFI_NME_STATUS_SUCCESS.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Indicates the success or otherwise of the requested operation, but
- in this case it always set to success.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileDeleteAllCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileDeleteAllCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_DELETE_ALL_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeProfileDeleteAllCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiNmeProfileDeleteAllCfm *msg__; \
- CsrWifiNmeProfileDeleteAllCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileDeleteAllCfmSend(dst__, status__) \
- CsrWifiNmeProfileDeleteAllCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDeleteReqSend
-
- DESCRIPTION
- Will delete the profile with a matching identity, but does NOT modify the
- preferred profile list.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- profileIdentity - Identity (BSSID, SSID) of profile to be deleted.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileDeleteReqCreate(msg__, dst__, src__, profileIdentity__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileDeleteReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_DELETE_REQ, dst__, src__); \
- msg__->profileIdentity = (profileIdentity__);
-
-#define CsrWifiNmeProfileDeleteReqSendTo(dst__, src__, profileIdentity__) \
- { \
- CsrWifiNmeProfileDeleteReq *msg__; \
- CsrWifiNmeProfileDeleteReqCreate(msg__, dst__, src__, profileIdentity__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileDeleteReqSend(src__, profileIdentity__) \
- CsrWifiNmeProfileDeleteReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, profileIdentity__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDeleteCfmSend
-
- DESCRIPTION
- Reports the status of the CSR_WIFI_NME_PROFILE_DELETE_REQ.
- Returns CSR_WIFI_NME_STATUS_NOT_FOUND if there is no matching profile.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Indicates the success or otherwise of the requested operation.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileDeleteCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileDeleteCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_DELETE_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeProfileDeleteCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiNmeProfileDeleteCfm *msg__; \
- CsrWifiNmeProfileDeleteCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileDeleteCfmSend(dst__, status__) \
- CsrWifiNmeProfileDeleteCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDisconnectIndSend
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that informs that application that the current profile
- connection has disconnected. The indication will contain information
- about APs that it attempted to maintain the connection via i.e. in the
- case of failed roaming.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- connectAttemptsCount - Number of connection attempt elements provided with
- this primitive
- connectAttempts - Points to the list of connection attempt elements
- provided with this primitive
- Each element of the list provides information about
- an AP on which the connection attempt was made and
- the error occurred during the attempt.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileDisconnectIndCreate(msg__, dst__, src__, interfaceTag__, connectAttemptsCount__, connectAttempts__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileDisconnectInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_DISCONNECT_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->connectAttemptsCount = (connectAttemptsCount__); \
- msg__->connectAttempts = (connectAttempts__);
-
-#define CsrWifiNmeProfileDisconnectIndSendTo(dst__, src__, interfaceTag__, connectAttemptsCount__, connectAttempts__) \
- { \
- CsrWifiNmeProfileDisconnectInd *msg__; \
- CsrWifiNmeProfileDisconnectIndCreate(msg__, dst__, src__, interfaceTag__, connectAttemptsCount__, connectAttempts__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileDisconnectIndSend(dst__, interfaceTag__, connectAttemptsCount__, connectAttempts__) \
- CsrWifiNmeProfileDisconnectIndSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, connectAttemptsCount__, connectAttempts__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileOrderSetReqSend
-
- DESCRIPTION
- Defines the preferred order that profiles present in the NME should be
- used during the NME auto-connect behaviour.
- If profileIdentitysCount == 0, it removes any existing preferred profile
- list already present in the NME, effectively disabling the auto-connect
- behaviour.
- NOTE: Profile identities that do not match any profile stored in the NME
- are ignored during the auto-connect procedure.
- NOTE: during auto-connect the NME will only attempt to join an existing
- adhoc network and it will never attempt to host an adhoc network; for
- hosting and adhoc network, use CSR_WIFI_NME_PROFILE_CONNECT_REQ
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- profileIdentitysCount - The number of profiles identities in the list.
- profileIdentitys - Points to the list of profile identities.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileOrderSetReqCreate(msg__, dst__, src__, interfaceTag__, profileIdentitysCount__, profileIdentitys__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileOrderSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_ORDER_SET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->profileIdentitysCount = (profileIdentitysCount__); \
- msg__->profileIdentitys = (profileIdentitys__);
-
-#define CsrWifiNmeProfileOrderSetReqSendTo(dst__, src__, interfaceTag__, profileIdentitysCount__, profileIdentitys__) \
- { \
- CsrWifiNmeProfileOrderSetReq *msg__; \
- CsrWifiNmeProfileOrderSetReqCreate(msg__, dst__, src__, interfaceTag__, profileIdentitysCount__, profileIdentitys__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileOrderSetReqSend(src__, interfaceTag__, profileIdentitysCount__, profileIdentitys__) \
- CsrWifiNmeProfileOrderSetReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, interfaceTag__, profileIdentitysCount__, profileIdentitys__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileOrderSetCfmSend
-
- DESCRIPTION
- Confirmation to UNIFI_NME_PROFILE_ORDER_SET.request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Indicates the success or otherwise of the requested
- operation.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileOrderSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileOrderSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_ORDER_SET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeProfileOrderSetCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiNmeProfileOrderSetCfm *msg__; \
- CsrWifiNmeProfileOrderSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileOrderSetCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiNmeProfileOrderSetCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileSetReqSend
-
- DESCRIPTION
- Creates or updates an existing profile in the NME that matches the unique
- identity of the profile. Each profile is identified by the combination of
- BSSID and SSID. The profile contains all the required credentials for
- attempting to connect to the network. Creating or updating a profile via
- the NME PROFILE SET REQ does NOT add the profile to the preferred profile
- list within the NME used for the NME auto-connect behaviour.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- profile - Specifies the identity and credentials of the network.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileSetReqCreate(msg__, dst__, src__, profile__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_SET_REQ, dst__, src__); \
- msg__->profile = (profile__);
-
-#define CsrWifiNmeProfileSetReqSendTo(dst__, src__, profile__) \
- { \
- CsrWifiNmeProfileSetReq *msg__; \
- CsrWifiNmeProfileSetReqCreate(msg__, dst__, src__, profile__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileSetReqSend(src__, profile__) \
- CsrWifiNmeProfileSetReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, profile__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileSetCfmSend
-
- DESCRIPTION
- Reports the status of the NME PROFILE SET REQ; the request will only fail
- if the details specified in the profile contains an invalid combination
- of parameters for example specifying the profile as cloaked but not
- specifying the SSID. The NME doesn't limit the number of profiles that
- may be created. The NME assumes that the entity configuring it is aware
- of the appropriate limits.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Indicates the success or otherwise of the requested operation.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeProfileSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiNmeProfileSetCfm *msg__; \
- CsrWifiNmeProfileSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileSetCfmSend(dst__, status__) \
- CsrWifiNmeProfileSetCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileUpdateIndSend
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that informs that application that the contained profile has
- changed.
- For example, either the credentials EAP-FAST PAC file or the session data
- within the profile has changed.
- It is up to the application whether it stores this updated profile or
- not.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- profile - The identity and credentials of the network.
-
-*******************************************************************************/
-#define CsrWifiNmeProfileUpdateIndCreate(msg__, dst__, src__, interfaceTag__, profile__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeProfileUpdateInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_PROFILE_UPDATE_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->profile = (profile__);
-
-#define CsrWifiNmeProfileUpdateIndSendTo(dst__, src__, interfaceTag__, profile__) \
- { \
- CsrWifiNmeProfileUpdateInd *msg__; \
- CsrWifiNmeProfileUpdateIndCreate(msg__, dst__, src__, interfaceTag__, profile__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeProfileUpdateIndSend(dst__, interfaceTag__, profile__) \
- CsrWifiNmeProfileUpdateIndSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, profile__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimGsmAuthIndSend
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that requests the UICC Manager to perform a GSM
- authentication on behalf of the NME. This indication is generated when
- the NME is attempting to connect to a profile configured for EAP-SIM. An
- application MUST register to receive this indication for the NME to
- support the EAP-SIM credential types. Otherwise the NME has no route to
- obtain the information from the UICC. EAP-SIM authentication requires 2
- or 3 GSM authentication rounds and therefore 2 or 3 RANDS (GSM Random
- Challenges) are included.
-
- PARAMETERS
- queue - Destination Task Queue
- randsLength - GSM RAND is 16 bytes long hence valid values are 32 (2 RANDS)
- or 48 (3 RANDs).
- rands - 2 or 3 RANDs values.
-
-*******************************************************************************/
-#define CsrWifiNmeSimGsmAuthIndCreate(msg__, dst__, src__, randsLength__, rands__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeSimGsmAuthInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_SIM_GSM_AUTH_IND, dst__, src__); \
- msg__->randsLength = (randsLength__); \
- msg__->rands = (rands__);
-
-#define CsrWifiNmeSimGsmAuthIndSendTo(dst__, src__, randsLength__, rands__) \
- { \
- CsrWifiNmeSimGsmAuthInd *msg__; \
- CsrWifiNmeSimGsmAuthIndCreate(msg__, dst__, src__, randsLength__, rands__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeSimGsmAuthIndSend(dst__, randsLength__, rands__) \
- CsrWifiNmeSimGsmAuthIndSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, randsLength__, rands__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimGsmAuthResSend
-
- DESCRIPTION
- Response from the application that received the NME SIM GSM AUTH IND. For
- each GSM authentication round a GSM Ciphering key (Kc) and a signed
- response (SRES) are produced. Since 2 or 3 GSM authentication rounds are
- used the 2 or 3 Kc's obtained respectively are combined into one buffer
- and similarly the 2 or 3 SRES's obtained are combined into another
- buffer. The order of Kc values (SRES values respectively) in their buffer
- is the same as that of their corresponding RAND values in the incoming
- indication.
-
- PARAMETERS
- status - Indicates the outcome of the requested operation:
- STATUS_SUCCESS or STATUS_ERROR
- kcsLength - Length in Bytes of Kc buffer. Legal values are: 16 or 24.
- kcs - Kc buffer holding 2 or 3 Kc values.
- sresLength - Length in Bytes of SRES buffer. Legal values are: 8 or 12.
- sres - SRES buffer holding 2 or 3 SRES values.
-
-*******************************************************************************/
-#define CsrWifiNmeSimGsmAuthResCreate(msg__, dst__, src__, status__, kcsLength__, kcs__, sresLength__, sres__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeSimGsmAuthRes), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_SIM_GSM_AUTH_RES, dst__, src__); \
- msg__->status = (status__); \
- msg__->kcsLength = (kcsLength__); \
- msg__->kcs = (kcs__); \
- msg__->sresLength = (sresLength__); \
- msg__->sres = (sres__);
-
-#define CsrWifiNmeSimGsmAuthResSendTo(dst__, src__, status__, kcsLength__, kcs__, sresLength__, sres__) \
- { \
- CsrWifiNmeSimGsmAuthRes *msg__; \
- CsrWifiNmeSimGsmAuthResCreate(msg__, dst__, src__, status__, kcsLength__, kcs__, sresLength__, sres__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeSimGsmAuthResSend(src__, status__, kcsLength__, kcs__, sresLength__, sres__) \
- CsrWifiNmeSimGsmAuthResSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, status__, kcsLength__, kcs__, sresLength__, sres__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimImsiGetIndSend
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that requests the IMSI and UICC type from the UICC Manager.
- This indication is generated when the NME is attempting to connect to a
- profile configured for EAP-SIM/AKA. An application MUST register to
- receive this indication for the NME to support the EAP-SIM/AKA credential
- types. Otherwise the NME has no route to obtain the information from the
- UICC.
-
- PARAMETERS
- queue - Destination Task Queue
-
-*******************************************************************************/
-#define CsrWifiNmeSimImsiGetIndCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeSimImsiGetInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_SIM_IMSI_GET_IND, dst__, src__);
-
-#define CsrWifiNmeSimImsiGetIndSendTo(dst__, src__) \
- { \
- CsrWifiNmeSimImsiGetInd *msg__; \
- CsrWifiNmeSimImsiGetIndCreate(msg__, dst__, src__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeSimImsiGetIndSend(dst__) \
- CsrWifiNmeSimImsiGetIndSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimImsiGetResSend
-
- DESCRIPTION
- Response from the application that received the NME SIM IMSI GET IND.
-
- PARAMETERS
- status - Indicates the outcome of the requested operation: STATUS_SUCCESS
- or STATUS_ERROR.
- imsi - The value of the IMSI obtained from the UICC.
- cardType - The UICC type (GSM only (SIM), UMTS only (USIM), Both).
-
-*******************************************************************************/
-#define CsrWifiNmeSimImsiGetResCreate(msg__, dst__, src__, status__, imsi__, cardType__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeSimImsiGetRes), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_SIM_IMSI_GET_RES, dst__, src__); \
- msg__->status = (status__); \
- msg__->imsi = (imsi__); \
- msg__->cardType = (cardType__);
-
-#define CsrWifiNmeSimImsiGetResSendTo(dst__, src__, status__, imsi__, cardType__) \
- { \
- CsrWifiNmeSimImsiGetRes *msg__; \
- CsrWifiNmeSimImsiGetResCreate(msg__, dst__, src__, status__, imsi__, cardType__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeSimImsiGetResSend(src__, status__, imsi__, cardType__) \
- CsrWifiNmeSimImsiGetResSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, status__, imsi__, cardType__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimUmtsAuthIndSend
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that requests the UICC Manager to perform a UMTS
- authentication on behalf of the NME. This indication is generated when
- the NME is attempting to connect to a profile configured for EAP-AKA. An
- application MUST register to receive this indication for the NME to
- support the EAP-AKA credential types. Otherwise the NME has no route to
- obtain the information from the USIM. EAP-AKA requires one UMTS
- authentication round and therefore only one RAND and one AUTN values are
- included.
-
- PARAMETERS
- queue - Destination Task Queue
- rand - UMTS RAND value.
- autn - UMTS AUTN value.
-
-*******************************************************************************/
-#define CsrWifiNmeSimUmtsAuthIndCreate(msg__, dst__, src__, rand__, autn__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeSimUmtsAuthInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_SIM_UMTS_AUTH_IND, dst__, src__); \
- memcpy(msg__->rand, (rand__), sizeof(u8) * 16); \
- memcpy(msg__->autn, (autn__), sizeof(u8) * 16);
-
-#define CsrWifiNmeSimUmtsAuthIndSendTo(dst__, src__, rand__, autn__) \
- { \
- CsrWifiNmeSimUmtsAuthInd *msg__; \
- CsrWifiNmeSimUmtsAuthIndCreate(msg__, dst__, src__, rand__, autn__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeSimUmtsAuthIndSend(dst__, rand__, autn__) \
- CsrWifiNmeSimUmtsAuthIndSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, rand__, autn__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimUmtsAuthResSend
-
- DESCRIPTION
- Response from the application that received the NME SIM UMTS AUTH IND.
- The values of umtsCipherKey, umtsIntegrityKey, resParameterLength and
- resParameter are only meanigful when result = UMTS_AUTH_RESULT_SUCCESS.
- The value of auts is only meaningful when
- result=UMTS_AUTH_RESULT_SYNC_FAIL.
-
- PARAMETERS
- status - Indicates the outcome of the requested operation:
- STATUS_SUCCESS or STATUS_ERROR.
- result - The result of UMTS authentication as performed by the
- UICC which could be: Success, Authentication Reject or
- Synchronisation Failure. For all these 3 outcomes the
- value of status is success.
- umtsCipherKey - The UMTS Cipher Key as calculated and returned by the
- UICC.
- umtsIntegrityKey - The UMTS Integrity Key as calculated and returned by
- the UICC.
- resParameterLength - The length (in bytes) of the RES parameter (min=4; max
- = 16).
- resParameter - The RES parameter as calculated and returned by the
- UICC.
- auts - The AUTS parameter as calculated and returned by the
- UICC.
-
-*******************************************************************************/
-#define CsrWifiNmeSimUmtsAuthResCreate(msg__, dst__, src__, status__, result__, umtsCipherKey__, umtsIntegrityKey__, resParameterLength__, resParameter__, auts__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeSimUmtsAuthRes), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_SIM_UMTS_AUTH_RES, dst__, src__); \
- msg__->status = (status__); \
- msg__->result = (result__); \
- memcpy(msg__->umtsCipherKey, (umtsCipherKey__), sizeof(u8) * 16); \
- memcpy(msg__->umtsIntegrityKey, (umtsIntegrityKey__), sizeof(u8) * 16); \
- msg__->resParameterLength = (resParameterLength__); \
- msg__->resParameter = (resParameter__); \
- memcpy(msg__->auts, (auts__), sizeof(u8) * 14);
-
-#define CsrWifiNmeSimUmtsAuthResSendTo(dst__, src__, status__, result__, umtsCipherKey__, umtsIntegrityKey__, resParameterLength__, resParameter__, auts__) \
- { \
- CsrWifiNmeSimUmtsAuthRes *msg__; \
- CsrWifiNmeSimUmtsAuthResCreate(msg__, dst__, src__, status__, result__, umtsCipherKey__, umtsIntegrityKey__, resParameterLength__, resParameter__, auts__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeSimUmtsAuthResSend(src__, status__, result__, umtsCipherKey__, umtsIntegrityKey__, resParameterLength__, resParameter__, auts__) \
- CsrWifiNmeSimUmtsAuthResSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, status__, result__, umtsCipherKey__, umtsIntegrityKey__, resParameterLength__, resParameter__, auts__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsCancelReqSend
-
- DESCRIPTION
- Requests the NME to cancel any WPS procedure that it is currently
- performing. This includes WPS registrar activities started because of
- CSR_WIFI_NME_AP_REGISTER.request
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiNmeWpsCancelReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeWpsCancelReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_WPS_CANCEL_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiNmeWpsCancelReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiNmeWpsCancelReq *msg__; \
- CsrWifiNmeWpsCancelReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeWpsCancelReqSend(src__, interfaceTag__) \
- CsrWifiNmeWpsCancelReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsCancelCfmSend
-
- DESCRIPTION
- Reports the status of the NME WPS REQ, the request is always SUCCESSFUL.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Only returns CSR_WIFI_NME_STATUS_SUCCESS
-
-*******************************************************************************/
-#define CsrWifiNmeWpsCancelCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeWpsCancelCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_WPS_CANCEL_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeWpsCancelCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiNmeWpsCancelCfm *msg__; \
- CsrWifiNmeWpsCancelCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeWpsCancelCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiNmeWpsCancelCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsCfmSend
-
- DESCRIPTION
- Reports the status of the NME WPS REQ.
- If CSR_WIFI_NME_STATUS_SUCCESS, the profile parameter contains the
- identity and credentials of the AP.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Indicates the success or otherwise of the requested
- operation.
- profile - This parameter is relevant only if
- status==CSR_WIFI_NME_STATUS_SUCCESS.
- The identity and credentials of the network.
-
-*******************************************************************************/
-#define CsrWifiNmeWpsCfmCreate(msg__, dst__, src__, interfaceTag__, status__, profile__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeWpsCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_WPS_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->profile = (profile__);
-
-#define CsrWifiNmeWpsCfmSendTo(dst__, src__, interfaceTag__, status__, profile__) \
- { \
- CsrWifiNmeWpsCfm *msg__; \
- CsrWifiNmeWpsCfmCreate(msg__, dst__, src__, interfaceTag__, status__, profile__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeWpsCfmSend(dst__, interfaceTag__, status__, profile__) \
- CsrWifiNmeWpsCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, interfaceTag__, status__, profile__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsConfigSetReqSend
-
- DESCRIPTION
- This primitive passes the WPS information for the device to NME. This may
- be accepted only if no interface is active.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- wpsConfig - WPS config.
-
-*******************************************************************************/
-#define CsrWifiNmeWpsConfigSetReqCreate(msg__, dst__, src__, wpsConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeWpsConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_WPS_CONFIG_SET_REQ, dst__, src__); \
- msg__->wpsConfig = (wpsConfig__);
-
-#define CsrWifiNmeWpsConfigSetReqSendTo(dst__, src__, wpsConfig__) \
- { \
- CsrWifiNmeWpsConfigSetReq *msg__; \
- CsrWifiNmeWpsConfigSetReqCreate(msg__, dst__, src__, wpsConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeWpsConfigSetReqSend(src__, wpsConfig__) \
- CsrWifiNmeWpsConfigSetReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, wpsConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsConfigSetCfmSend
-
- DESCRIPTION
- Confirm.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Status of the request.
-
-*******************************************************************************/
-#define CsrWifiNmeWpsConfigSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeWpsConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_WPS_CONFIG_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiNmeWpsConfigSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiNmeWpsConfigSetCfm *msg__; \
- CsrWifiNmeWpsConfigSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeWpsConfigSetCfmSend(dst__, status__) \
- CsrWifiNmeWpsConfigSetCfmSendTo(dst__, CSR_WIFI_NME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsReqSend
-
- DESCRIPTION
- Requests the NME to look for WPS enabled APs and attempt to perform WPS
- to determine the appropriate security credentials to connect to the AP.
- If the PIN == '00000000' then 'push button mode' is indicated, otherwise
- the PIN has to match that of the AP. 4 digit pin is passed by sending the
- pin digits in pin[0]..pin[3] and rest of the contents filled with '-'.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- pin - PIN value.
- ssid - Service Set identifier
- bssid - ID of Basic Service Set for which a WPS connection attempt is
- being made.
-
-*******************************************************************************/
-#define CsrWifiNmeWpsReqCreate(msg__, dst__, src__, interfaceTag__, pin__, ssid__, bssid__) \
- msg__ = kmalloc(sizeof(CsrWifiNmeWpsReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_NME_PRIM, CSR_WIFI_NME_WPS_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- memcpy(msg__->pin, (pin__), sizeof(u8) * 8); \
- msg__->ssid = (ssid__); \
- msg__->bssid = (bssid__);
-
-#define CsrWifiNmeWpsReqSendTo(dst__, src__, interfaceTag__, pin__, ssid__, bssid__) \
- { \
- CsrWifiNmeWpsReq *msg__; \
- CsrWifiNmeWpsReqCreate(msg__, dst__, src__, interfaceTag__, pin__, ssid__, bssid__); \
- CsrMsgTransport(dst__, CSR_WIFI_NME_PRIM, msg__); \
- }
-
-#define CsrWifiNmeWpsReqSend(src__, interfaceTag__, pin__, ssid__, bssid__) \
- CsrWifiNmeWpsReqSendTo(CSR_WIFI_NME_IFACEQUEUE, src__, interfaceTag__, pin__, ssid__, bssid__)
-
-#endif /* CSR_WIFI_NME_LIB_H__ */
diff --git a/drivers/staging/csr/csr_wifi_nme_prim.h b/drivers/staging/csr/csr_wifi_nme_prim.h
deleted file mode 100644
index 9a7927a..0000000
--- a/drivers/staging/csr/csr_wifi_nme_prim.h
+++ /dev/null
@@ -1,1657 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_NME_PRIM_H__
-#define CSR_WIFI_NME_PRIM_H__
-
-#include <linux/types.h>
-#include "csr_prim_defs.h"
-#include "csr_sched.h"
-#include "csr_wifi_common.h"
-#include "csr_result.h"
-#include "csr_wifi_fsm_event.h"
-#include "csr_wifi_sme_prim.h"
-
-#ifndef CSR_WIFI_NME_ENABLE
-#error CSR_WIFI_NME_ENABLE MUST be defined inorder to use csr_wifi_nme_prim.h
-#endif
-
-#define CSR_WIFI_NME_PRIM (0x0424)
-
-typedef CsrPrim CsrWifiNmePrim;
-
-typedef void (*CsrWifiNmeFrameFreeFunction)(void *frame);
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeAuthMode
-
- DESCRIPTION
- WiFi Authentication Mode
-
- VALUES
- CSR_WIFI_NME_AUTH_MODE_80211_OPEN
- - Connects to an open system network (i.e. no authentication,
- no encryption) or to a WEP enabled network.
- CSR_WIFI_NME_AUTH_MODE_80211_SHARED
- - Connect to a WEP enabled network.
- CSR_WIFI_NME_AUTH_MODE_8021X_WPA
- - Connects to a WPA Enterprise enabled network.
- CSR_WIFI_NME_AUTH_MODE_8021X_WPAPSK
- - Connects to a WPA with Pre-Shared Key enabled network.
- CSR_WIFI_NME_AUTH_MODE_8021X_WPA2
- - Connects to a WPA2 Enterprise enabled network.
- CSR_WIFI_NME_AUTH_MODE_8021X_WPA2PSK
- - Connects to a WPA2 with Pre-Shared Key enabled network.
- CSR_WIFI_NME_AUTH_MODE_8021X_CCKM
- - Connects to a CCKM enabled network.
- CSR_WIFI_NME_AUTH_MODE_WAPI_WAI
- - Connects to a WAPI Enterprise enabled network.
- CSR_WIFI_NME_AUTH_MODE_WAPI_WAIPSK
- - Connects to a WAPI with Pre-Shared Key enabled network.
- CSR_WIFI_NME_AUTH_MODE_8021X_OTHER1X
- - For future use.
-
-*******************************************************************************/
-typedef u16 CsrWifiNmeAuthMode;
-#define CSR_WIFI_NME_AUTH_MODE_80211_OPEN ((CsrWifiNmeAuthMode) 0x0001)
-#define CSR_WIFI_NME_AUTH_MODE_80211_SHARED ((CsrWifiNmeAuthMode) 0x0002)
-#define CSR_WIFI_NME_AUTH_MODE_8021X_WPA ((CsrWifiNmeAuthMode) 0x0004)
-#define CSR_WIFI_NME_AUTH_MODE_8021X_WPAPSK ((CsrWifiNmeAuthMode) 0x0008)
-#define CSR_WIFI_NME_AUTH_MODE_8021X_WPA2 ((CsrWifiNmeAuthMode) 0x0010)
-#define CSR_WIFI_NME_AUTH_MODE_8021X_WPA2PSK ((CsrWifiNmeAuthMode) 0x0020)
-#define CSR_WIFI_NME_AUTH_MODE_8021X_CCKM ((CsrWifiNmeAuthMode) 0x0040)
-#define CSR_WIFI_NME_AUTH_MODE_WAPI_WAI ((CsrWifiNmeAuthMode) 0x0080)
-#define CSR_WIFI_NME_AUTH_MODE_WAPI_WAIPSK ((CsrWifiNmeAuthMode) 0x0100)
-#define CSR_WIFI_NME_AUTH_MODE_8021X_OTHER1X ((CsrWifiNmeAuthMode) 0x0200)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeBssType
-
- DESCRIPTION
- Type of BSS
-
- VALUES
- CSR_WIFI_NME_BSS_TYPE_INFRASTRUCTURE
- - Infrastructure BSS type where access to the network is via
- one or several Access Points.
- CSR_WIFI_NME_BSS_TYPE_ADHOC
- - Adhoc or Independent BSS Type where one Station acts as a
- host and future stations can join the adhoc network without
- needing an access point.
- CSR_WIFI_NME_BSS_TYPE_RESERVED
- - To be in sync with SME.This is not used.
- CSR_WIFI_NME_BSS_TYPE_P2P
- - P2P mode of operation.
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeBssType;
-#define CSR_WIFI_NME_BSS_TYPE_INFRASTRUCTURE ((CsrWifiNmeBssType) 0x00)
-#define CSR_WIFI_NME_BSS_TYPE_ADHOC ((CsrWifiNmeBssType) 0x01)
-#define CSR_WIFI_NME_BSS_TYPE_RESERVED ((CsrWifiNmeBssType) 0x02)
-#define CSR_WIFI_NME_BSS_TYPE_P2P ((CsrWifiNmeBssType) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeCcxOptionsMask
-
- DESCRIPTION
- Enumeration type defining possible mask values for setting CCX options.
-
- VALUES
- CSR_WIFI_NME_CCX_OPTION_NONE - No CCX option is set.
- CSR_WIFI_NME_CCX_OPTION_CCKM - CCX option cckm is set.
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeCcxOptionsMask;
-#define CSR_WIFI_NME_CCX_OPTION_NONE ((CsrWifiNmeCcxOptionsMask) 0x00)
-#define CSR_WIFI_NME_CCX_OPTION_CCKM ((CsrWifiNmeCcxOptionsMask) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeConfigAction
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_PIN_ENTRY_PUSH_BUTTON -
- CSR_WIFI_PIN_ENTRY_DISPLAY_PIN -
- CSR_WIFI_PIN_ENTRY_ENTER_PIN -
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeConfigAction;
-#define CSR_WIFI_PIN_ENTRY_PUSH_BUTTON ((CsrWifiNmeConfigAction) 0x00)
-#define CSR_WIFI_PIN_ENTRY_DISPLAY_PIN ((CsrWifiNmeConfigAction) 0x01)
-#define CSR_WIFI_PIN_ENTRY_ENTER_PIN ((CsrWifiNmeConfigAction) 0x02)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeConnectionStatus
-
- DESCRIPTION
- Indicate the NME Connection Status when connecting or when disconnecting
-
- VALUES
- CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_DISCONNECTED
- - NME is disconnected.
- CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_CONNECTING
- - NME is in the process of connecting.
- CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_AUTHENTICATING
- - NME is in the authentication stage of a connection attempt.
- CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_CONNECTED
- - NME is connected.
- CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_DISCONNECTING
- - NME is in the process of disconnecting.
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeConnectionStatus;
-#define CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_DISCONNECTED ((CsrWifiNmeConnectionStatus) 0x00)
-#define CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_CONNECTING ((CsrWifiNmeConnectionStatus) 0x01)
-#define CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_AUTHENTICATING ((CsrWifiNmeConnectionStatus) 0x02)
-#define CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_CONNECTED ((CsrWifiNmeConnectionStatus) 0x03)
-#define CSR_WIFI_NME_CONNECTION_STATUS_CONNECTION_STATUS_DISCONNECTING ((CsrWifiNmeConnectionStatus) 0x04)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeCredentialType
-
- DESCRIPTION
- NME Credential Types
-
- VALUES
- CSR_WIFI_NME_CREDENTIAL_TYPE_OPEN_SYSTEM
- - Credential Type Open System.
- CSR_WIFI_NME_CREDENTIAL_TYPE_WEP64
- - Credential Type WEP-64
- CSR_WIFI_NME_CREDENTIAL_TYPE_WEP128
- - Credential Type WEP-128
- CSR_WIFI_NME_CREDENTIAL_TYPE_WPA_PSK
- - Credential Type WPA Pre-Shared Key
- CSR_WIFI_NME_CREDENTIAL_TYPE_WPA_PASSPHRASE
- - Credential Type WPA pass phrase
- CSR_WIFI_NME_CREDENTIAL_TYPE_WPA2_PSK
- - Credential Type WPA2 Pre-Shared Key.
- CSR_WIFI_NME_CREDENTIAL_TYPE_WPA2_PASSPHRASE
- - Credential Type WPA2 pass phrase
- CSR_WIFI_NME_CREDENTIAL_TYPE_WAPI_PSK
- - Credential Type WAPI Pre-Shared Key.
- CSR_WIFI_NME_CREDENTIAL_TYPE_WAPI_PASSPHRASE
- - Credential Type WAPI pass phrase
- CSR_WIFI_NME_CREDENTIAL_TYPE_WAPI
- - Credential Type WAPI certificates
- CSR_WIFI_NME_CREDENTIAL_TYPE_8021X
- - Credential Type 802.1X: the associated type supports
- FAST/LEAP/TLS/TTLS/PEAP/etc.
-
-*******************************************************************************/
-typedef u16 CsrWifiNmeCredentialType;
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_OPEN_SYSTEM ((CsrWifiNmeCredentialType) 0x0000)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_WEP64 ((CsrWifiNmeCredentialType) 0x0001)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_WEP128 ((CsrWifiNmeCredentialType) 0x0002)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_WPA_PSK ((CsrWifiNmeCredentialType) 0x0003)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_WPA_PASSPHRASE ((CsrWifiNmeCredentialType) 0x0004)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_WPA2_PSK ((CsrWifiNmeCredentialType) 0x0005)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_WPA2_PASSPHRASE ((CsrWifiNmeCredentialType) 0x0006)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_WAPI_PSK ((CsrWifiNmeCredentialType) 0x0007)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_WAPI_PASSPHRASE ((CsrWifiNmeCredentialType) 0x0008)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_WAPI ((CsrWifiNmeCredentialType) 0x0009)
-#define CSR_WIFI_NME_CREDENTIAL_TYPE_8021X ((CsrWifiNmeCredentialType) 0x000A)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEapMethod
-
- DESCRIPTION
- Outer EAP method with possibly inner method.
-
- VALUES
- CSR_WIFI_NME_EAP_METHOD_TLS
- - EAP-TLS Method.
- CSR_WIFI_NME_EAP_METHOD_TTLS_MSCHAPV2
- - EAP-TTLS Method with MSCHAPV2.
- CSR_WIFI_NME_EAP_METHOD_PEAP_GTC
- - EAP-PEAP Method with GTC.
- CSR_WIFI_NME_EAP_METHOD_PEAP_MSCHAPV2
- - EAP-PEAP Method with MSCHAPV2.
- CSR_WIFI_NME_EAP_METHOD_SIM
- - EAP-SIM Method.
- CSR_WIFI_NME_EAP_METHOD_AKA
- - EAP-AKA Method.
- CSR_WIFI_NME_EAP_METHOD_FAST_GTC
- - EAP-FAST Method with GTC.
- CSR_WIFI_NME_EAP_METHOD_FAST_MSCHAPV2
- - EAP-FAST Method with MSCHAPV2.
- CSR_WIFI_NME_EAP_METHOD_LEAP
- - EAP-LEAP Method.
-
-*******************************************************************************/
-typedef u16 CsrWifiNmeEapMethod;
-#define CSR_WIFI_NME_EAP_METHOD_TLS ((CsrWifiNmeEapMethod) 0x0001)
-#define CSR_WIFI_NME_EAP_METHOD_TTLS_MSCHAPV2 ((CsrWifiNmeEapMethod) 0x0002)
-#define CSR_WIFI_NME_EAP_METHOD_PEAP_GTC ((CsrWifiNmeEapMethod) 0x0004)
-#define CSR_WIFI_NME_EAP_METHOD_PEAP_MSCHAPV2 ((CsrWifiNmeEapMethod) 0x0008)
-#define CSR_WIFI_NME_EAP_METHOD_SIM ((CsrWifiNmeEapMethod) 0x0010)
-#define CSR_WIFI_NME_EAP_METHOD_AKA ((CsrWifiNmeEapMethod) 0x0020)
-#define CSR_WIFI_NME_EAP_METHOD_FAST_GTC ((CsrWifiNmeEapMethod) 0x0040)
-#define CSR_WIFI_NME_EAP_METHOD_FAST_MSCHAPV2 ((CsrWifiNmeEapMethod) 0x0080)
-#define CSR_WIFI_NME_EAP_METHOD_LEAP ((CsrWifiNmeEapMethod) 0x0100)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEncryption
-
- DESCRIPTION
- WiFi Encryption method
-
- VALUES
- CSR_WIFI_NME_ENCRYPTION_CIPHER_NONE
- - No encryprion set.
- CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_WEP40
- - 40 bytes WEP key for peer to peer communication.
- CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_WEP104
- - 104 bytes WEP key for peer to peer communication.
- CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_TKIP
- - TKIP key for peer to peer communication.
- CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_CCMP
- - CCMP key for peer to peer communication.
- CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_SMS4
- - SMS4 key for peer to peer communication.
- CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_WEP40
- - 40 bytes WEP key for broadcast messages.
- CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_WEP104
- - 104 bytes WEP key for broadcast messages.
- CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_TKIP
- - TKIP key for broadcast messages.
- CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_CCMP
- - CCMP key for broadcast messages
- CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_SMS4
- - SMS4 key for broadcast messages.
-
-*******************************************************************************/
-typedef u16 CsrWifiNmeEncryption;
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_NONE ((CsrWifiNmeEncryption) 0x0000)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_WEP40 ((CsrWifiNmeEncryption) 0x0001)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_WEP104 ((CsrWifiNmeEncryption) 0x0002)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_TKIP ((CsrWifiNmeEncryption) 0x0004)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_CCMP ((CsrWifiNmeEncryption) 0x0008)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_SMS4 ((CsrWifiNmeEncryption) 0x0010)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_WEP40 ((CsrWifiNmeEncryption) 0x0020)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_WEP104 ((CsrWifiNmeEncryption) 0x0040)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_TKIP ((CsrWifiNmeEncryption) 0x0080)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_CCMP ((CsrWifiNmeEncryption) 0x0100)
-#define CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_SMS4 ((CsrWifiNmeEncryption) 0x0200)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeIndications
-
- DESCRIPTION
- NME indications
-
- VALUES
- CSR_WIFI_NME_INDICATIONS_IND_AP_STATION
- - NME AP Station Indication.
- CSR_WIFI_NME_INDICATIONS_IND_AP_STOP
- - NME AP Stop Indication.
- CSR_WIFI_NME_INDICATIONS_IND_SIM_UMTS_AUTH
- - NME UMTS Authentication Indication.
- CSR_WIFI_NME_INDICATIONS_IND_P2P_GROUP_START
- - NME P2P Group Start Indication.
- CSR_WIFI_NME_INDICATIONS_IND_P2P_GROUP_STATUS
- - NME P2P Group Status Indication.
- CSR_WIFI_NME_INDICATIONS_IND_P2P_GROUP_ROLE
- - NME P2P Group Role Indication.
- CSR_WIFI_NME_INDICATIONS_IND_PROFILE_DISCONNECT
- - NME Profile Disconnect Indication.
- CSR_WIFI_NME_INDICATIONS_IND_PROFILE_UPDATE
- - NME Profile Update Indication.
- CSR_WIFI_NME_INDICATIONS_IND_SIM_IMSI_GET
- - NME GET IMSI Indication.
- CSR_WIFI_NME_INDICATIONS_IND_SIM_GSM_AUTH
- - NME GSM Authentication Indication.
- CSR_WIFI_NME_INDICATIONS_ALL
- - Used to register for all available indications
-
-*******************************************************************************/
-typedef u32 CsrWifiNmeIndications;
-#define CSR_WIFI_NME_INDICATIONS_IND_AP_STATION ((CsrWifiNmeIndications) 0x00100000)
-#define CSR_WIFI_NME_INDICATIONS_IND_AP_STOP ((CsrWifiNmeIndications) 0x00200000)
-#define CSR_WIFI_NME_INDICATIONS_IND_SIM_UMTS_AUTH ((CsrWifiNmeIndications) 0x01000000)
-#define CSR_WIFI_NME_INDICATIONS_IND_P2P_GROUP_START ((CsrWifiNmeIndications) 0x02000000)
-#define CSR_WIFI_NME_INDICATIONS_IND_P2P_GROUP_STATUS ((CsrWifiNmeIndications) 0x04000000)
-#define CSR_WIFI_NME_INDICATIONS_IND_P2P_GROUP_ROLE ((CsrWifiNmeIndications) 0x08000000)
-#define CSR_WIFI_NME_INDICATIONS_IND_PROFILE_DISCONNECT ((CsrWifiNmeIndications) 0x10000000)
-#define CSR_WIFI_NME_INDICATIONS_IND_PROFILE_UPDATE ((CsrWifiNmeIndications) 0x20000000)
-#define CSR_WIFI_NME_INDICATIONS_IND_SIM_IMSI_GET ((CsrWifiNmeIndications) 0x40000000)
-#define CSR_WIFI_NME_INDICATIONS_IND_SIM_GSM_AUTH ((CsrWifiNmeIndications) 0x80000000)
-#define CSR_WIFI_NME_INDICATIONS_ALL ((CsrWifiNmeIndications) 0xFFFFFFFF)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSecError
-
- DESCRIPTION
- NME Security Errors
- place holder for the security library abort reason
-
- VALUES
- CSR_WIFI_NME_SEC_ERROR_SEC_ERROR_UNKNOWN
- - Unknown Security Error.
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeSecError;
-#define CSR_WIFI_NME_SEC_ERROR_SEC_ERROR_UNKNOWN ((CsrWifiNmeSecError) 0x00)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimCardType
-
- DESCRIPTION
- (U)SIM Card (or UICC) types
-
- VALUES
- CSR_WIFI_NME_SIM_CARD_TYPE_2G - 2G SIM card, capable of performing GSM
- authentication only.
- CSR_WIFI_NME_SIM_CARD_TYPE_3G - UICC supporting USIM application, capable
- of performing UMTS authentication only.
- CSR_WIFI_NME_SIM_CARD_TYPE_2G3G - UICC supporting both USIM and SIM
- applications, capable of performing both
- UMTS and GSM authentications.
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeSimCardType;
-#define CSR_WIFI_NME_SIM_CARD_TYPE_2G ((CsrWifiNmeSimCardType) 0x01)
-#define CSR_WIFI_NME_SIM_CARD_TYPE_3G ((CsrWifiNmeSimCardType) 0x02)
-#define CSR_WIFI_NME_SIM_CARD_TYPE_2G3G ((CsrWifiNmeSimCardType) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeUmtsAuthResult
-
- DESCRIPTION
- Only relevant for UMTS Authentication. It indicates if the UICC has
- successfully authenticated the network or otherwise.
-
- VALUES
- CSR_WIFI_NME_UMTS_AUTH_RESULT_SUCCESS
- - Successful outcome from USIM indicating that the card has
- successfully authenticated the network.
- CSR_WIFI_NME_UMTS_AUTH_RESULT_SYNC_FAIL
- - Unsuccessful outcome from USIM indicating that the card is
- requesting the network to synchronise and re-try again. If
- no further request is received an NME timer will expire and
- the authentication is aborted.
- CSR_WIFI_NME_UMTS_AUTH_RESULT_REJECT
- - Unsuccessful outcome from USIM indicating that the card has
- rejected the network and that the authentication is
- aborted.
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeUmtsAuthResult;
-#define CSR_WIFI_NME_UMTS_AUTH_RESULT_SUCCESS ((CsrWifiNmeUmtsAuthResult) 0x00)
-#define CSR_WIFI_NME_UMTS_AUTH_RESULT_SYNC_FAIL ((CsrWifiNmeUmtsAuthResult) 0x01)
-#define CSR_WIFI_NME_UMTS_AUTH_RESULT_REJECT ((CsrWifiNmeUmtsAuthResult) 0x02)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWmmQosInfo
-
- DESCRIPTION
- Defines bits for the QoS Info octect as defined in the WMM specification.
- The values of this type are used across the NME/SME/Router API's and they
- must be kept consistent with the corresponding types in the .xml of the
- other interfaces
-
- VALUES
- CSR_WIFI_NME_WMM_QOS_INFO_AC_MAX_SP_ALL
- - WMM AP may deliver all buffered frames.
- CSR_WIFI_NME_WMM_QOS_INFO_AC_VO
- - To enable the triggering and delivery of QoS Voice.
- CSR_WIFI_NME_WMM_QOS_INFO_AC_VI
- - To enable the triggering and delivery of QoS Video.
- CSR_WIFI_NME_WMM_QOS_INFO_AC_BK
- - To enable the triggering and delivery of QoS Background.
- CSR_WIFI_NME_WMM_QOS_INFO_AC_BE
- - To enable the triggering and delivery of QoS Best Effort.
- CSR_WIFI_NME_WMM_QOS_INFO_AC_MAX_SP_TWO
- - WMM AP may deliver a maximum of 2 buffered frames per
- Unscheduled Service Period (USP).
- CSR_WIFI_NME_WMM_QOS_INFO_AC_MAX_SP_FOUR
- - WMM AP may deliver a maximum of 4 buffered frames per USP.
- CSR_WIFI_NME_WMM_QOS_INFO_AC_MAX_SP_SIX
- - WMM AP may deliver a maximum of 6 buffered frames per USP.
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeWmmQosInfo;
-#define CSR_WIFI_NME_WMM_QOS_INFO_AC_MAX_SP_ALL ((CsrWifiNmeWmmQosInfo) 0x00)
-#define CSR_WIFI_NME_WMM_QOS_INFO_AC_VO ((CsrWifiNmeWmmQosInfo) 0x01)
-#define CSR_WIFI_NME_WMM_QOS_INFO_AC_VI ((CsrWifiNmeWmmQosInfo) 0x02)
-#define CSR_WIFI_NME_WMM_QOS_INFO_AC_BK ((CsrWifiNmeWmmQosInfo) 0x04)
-#define CSR_WIFI_NME_WMM_QOS_INFO_AC_BE ((CsrWifiNmeWmmQosInfo) 0x08)
-#define CSR_WIFI_NME_WMM_QOS_INFO_AC_MAX_SP_TWO ((CsrWifiNmeWmmQosInfo) 0x20)
-#define CSR_WIFI_NME_WMM_QOS_INFO_AC_MAX_SP_FOUR ((CsrWifiNmeWmmQosInfo) 0x40)
-#define CSR_WIFI_NME_WMM_QOS_INFO_AC_MAX_SP_SIX ((CsrWifiNmeWmmQosInfo) 0x60)
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEapMethodMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiNmeEapMethod.
-
-*******************************************************************************/
-typedef u16 CsrWifiNmeEapMethodMask;
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEncryptionMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiNmeEncryption
-
-*******************************************************************************/
-typedef u16 CsrWifiNmeEncryptionMask;
-/*******************************************************************************
-
- NAME
- CsrWifiNmeIndicationsMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiNmeIndications
-
-*******************************************************************************/
-typedef u32 CsrWifiNmeIndicationsMask;
-/*******************************************************************************
-
- NAME
- CsrWifiNmeNmeIndicationsMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiNmeNmeIndications.
- Used to overlap the unused portion of the unifi_IndicationsMask For NME
- specific indications
-
-*******************************************************************************/
-typedef u32 CsrWifiNmeNmeIndicationsMask;
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWmmQosInfoMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiNmeWmmQosInfo
-
-*******************************************************************************/
-typedef u8 CsrWifiNmeWmmQosInfoMask;
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEmpty
-
- DESCRIPTION
- Empty Structure to indicate that no credentials are available.
-
- MEMBERS
- empty - Only element of the empty structure (always set to 0).
-
-*******************************************************************************/
-typedef struct
-{
- u8 empty;
-} CsrWifiNmeEmpty;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmePassphrase
-
- DESCRIPTION
- Structure holding the ASCII Pass Phrase data.
-
- MEMBERS
- encryptionMode - Encryption type as defined in CsrWifiSmeEncryption.
- passphrase - Pass phrase ASCII value.
-
-*******************************************************************************/
-typedef struct
-{
- u16 encryptionMode;
- char *passphrase;
-} CsrWifiNmePassphrase;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmePsk
-
- DESCRIPTION
- Structure holding the Pre-Shared Key data.
-
- MEMBERS
- encryptionMode - Encryption type as defined in CsrWifiSmeEncryption.
- psk - Pre-Shared Key value.
-
-*******************************************************************************/
-typedef struct
-{
- u16 encryptionMode;
- u8 psk[32];
-} CsrWifiNmePsk;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWapiCredentials
-
- DESCRIPTION
- Structure holding WAPI credentials data.
-
- MEMBERS
- certificateLength - Length in bytes of the following client certificate.
- certificate - The actual client certificate data (if present).
- DER/PEM format supported.
- privateKeyLength - Length in bytes of the following private key.
- privateKey - The actual private key. DER/PEM format.
- caCertificateLength - Length in bytes of the following certificate authority
- certificate.
- caCertificate - The actual certificate authority certificate data. If
- not supplied the received certificate authority
- certificate is assumed to be validate, if present the
- received certificate is validated against it. DER/PEM
- format supported.
-
-*******************************************************************************/
-typedef struct
-{
- u32 certificateLength;
- u8 *certificate;
- u16 privateKeyLength;
- u8 *privateKey;
- u32 caCertificateLength;
- u8 *caCertificate;
-} CsrWifiNmeWapiCredentials;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeConnectAttempt
-
- DESCRIPTION
- Structure holding Connection attempt data.
-
- MEMBERS
- bssid - Id of Basic Service Set connections attempt have been made
- to.
- status - Status returned to indicate the success or otherwise of the
- connection attempt.
- securityError - Security error status indicating the nature of the failure
- to connect.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiMacAddress bssid;
- CsrResult status;
- CsrWifiNmeSecError securityError;
-} CsrWifiNmeConnectAttempt;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEapCredentials
-
- DESCRIPTION
- Supports the use of multiple EAP methods via a single structure. The
- methods required are indicated by the value set in the eapMethodMask
-
- MEMBERS
- eapMethodMask
- - Bit mask of supported EAP methods
- Currently only supports the setting of one bit.
- Required for all the EAP methods.
- authMode
- - Bit mask representing the authentication types that may be
- supported by a suitable AP. An AP must support at least one
- of the authentication types specified to be considered for
- connection. Required for all EAP methods.
- encryptionMode
- - Bit mask representing the encryption types that may be
- supported by a suitable AP. An AP must support a suitable
- mix of the pairwise and group encryption types requested to
- be considered for connection. Required for all EAP methods.
- userName
- - User name. Required for all EAP methods except: SIM or AKA.
- userPassword
- - User Password. Required for all EAP methods except: TLS,
- SIM or AKA.
- authServerUserIdentity
- - Authentication server user Identity. Required for all EAP
- methods except: TLS, SIM, AKA or FAST.
- clientCertificateLength
- - Length in bytes of the following client certificate (if
- present). Only required for TLS.
- clientCertificate
- - The actual client certificate data (if present). Only
- required for TLS. DER/PEM format supported.
- certificateAuthorityCertificateLength
- - Length in bytes of the following certificate authority
- certificate (if present). Optional for TLS, TTLS, PEAP.
- certificateAuthorityCertificate
- - The actual certificate authority certificate data (if
- present). If not supplied the received certificate
- authority certificate is assumed to be valid, if present
- the received certificate is validated against it. Optional
- for TLS, TTLS, PEAP. DER/PEM format supported.
- privateKeyLength
- - Length in bytes of the following private key (if present).
- Only required for TLS.
- privateKey
- - The actual private key (if present). Only required for TLS.
- DER/PEM format, maybe password protected.
- privateKeyPassword
- - Optional password to protect the private key.
- sessionLength
- - Length in bytes of the following session field Supported
- for all EAP methods except: SIM or AKA.
- session
- - Session information to support faster re-authentication.
- Supported for all EAP methods except: SIM or AKA.
- allowPacProvisioning
- - If TRUE: PAC provisioning is allowed 'over-the_air';
- If FALSE: a PAC must be supplied.
- Only required for FAST.
- pacLength
- - Length the following PAC field. If allowPacProvisioning is
- FALSE then the PAC MUST be supplied (i.e. non-zero). Only
- required for FAST.
- pac
- - The actual PAC data. If allowPacProvisioning is FALSE then
- the PAC MUST be supplied. Only required for FAST.
- pacPassword
- - Optional password to protect the PAC. Only required for
- FAST.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiNmeEapMethodMask eapMethodMask;
- CsrWifiSmeAuthModeMask authMode;
- CsrWifiNmeEncryptionMask encryptionMode;
- char *userName;
- char *userPassword;
- char *authServerUserIdentity;
- u32 clientCertificateLength;
- u8 *clientCertificate;
- u32 certificateAuthorityCertificateLength;
- u8 *certificateAuthorityCertificate;
- u16 privateKeyLength;
- u8 *privateKey;
- char *privateKeyPassword;
- u32 sessionLength;
- u8 *session;
- u8 allowPacProvisioning;
- u32 pacLength;
- u8 *pac;
- char *pacPassword;
-} CsrWifiNmeEapCredentials;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmePeerConfig
-
- DESCRIPTION
- Structure holding Peer Config data.
-
- MEMBERS
- p2pDeviceId -
- groupCapabilityMask -
- groupOwnerIntent -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiMacAddress p2pDeviceId;
- CsrWifiSmeP2pGroupCapabilityMask groupCapabilityMask;
- u8 groupOwnerIntent;
-} CsrWifiNmePeerConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileIdentity
-
- DESCRIPTION
- The identity of a profile is defined as the unique combination the BSSID
- and SSID.
-
- MEMBERS
- bssid - ID of Basic Service Set for or the P2pDevice address of the GO for
- which a connection attempt was made.
- ssid - Service Set Id.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiMacAddress bssid;
- CsrWifiSsid ssid;
-} CsrWifiNmeProfileIdentity;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWep128Keys
-
- DESCRIPTION
- Structure holding WEP Authentication Type and WEP keys that can be used
- when using WEP128.
-
- MEMBERS
- wepAuthType - Mask to select the WEP authentication type (Open or Shared)
- selectedWepKey - Index to one of the four keys below indicating the
- currently used WEP key.
- key1 - Value for key number 1.
- key2 - Value for key number 2.
- key3 - Value for key number 3.
- key4 - Value for key number 4.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeAuthModeMask wepAuthType;
- u8 selectedWepKey;
- u8 key1[13];
- u8 key2[13];
- u8 key3[13];
- u8 key4[13];
-} CsrWifiNmeWep128Keys;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWep64Keys
-
- DESCRIPTION
- Structure for holding WEP Authentication Type and WEP keys that can be
- used when using WEP64.
-
- MEMBERS
- wepAuthType - Mask to select the WEP authentication type (Open or Shared)
- selectedWepKey - Index to one of the four keys below indicating the
- currently used WEP key.
- key1 - Value for key number 1.
- key2 - Value for key number 2.
- key3 - Value for key number 3.
- key4 - Value for key number 4.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeAuthModeMask wepAuthType;
- u8 selectedWepKey;
- u8 key1[5];
- u8 key2[5];
- u8 key3[5];
- u8 key4[5];
-} CsrWifiNmeWep64Keys;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeCredentials
-
- DESCRIPTION
- Structure containing the Credentials data.
-
- MEMBERS
- credentialType - Credential type value (as defined in the
- enumeration type).
- credential - Union containing credentials which depends on
- credentialType parameter.
- credentialeap -
- credentialwapiPassphrase -
- credentialwpa2Passphrase -
- credentialwpa2Psk -
- credentialwapiPsk -
- credentialwpaPassphrase -
- credentialwapi -
- credentialwep128Key -
- credentialwpaPsk -
- credentialopenSystem -
- credentialwep64Key -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiNmeCredentialType credentialType;
- union {
- CsrWifiNmeEapCredentials eap;
- CsrWifiNmePassphrase wapiPassphrase;
- CsrWifiNmePassphrase wpa2Passphrase;
- CsrWifiNmePsk wpa2Psk;
- CsrWifiNmePsk wapiPsk;
- CsrWifiNmePassphrase wpaPassphrase;
- CsrWifiNmeWapiCredentials wapi;
- CsrWifiNmeWep128Keys wep128Key;
- CsrWifiNmePsk wpaPsk;
- CsrWifiNmeEmpty openSystem;
- CsrWifiNmeWep64Keys wep64Key;
- } credential;
-} CsrWifiNmeCredentials;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfile
-
- DESCRIPTION
- Structure containing the Profile data.
-
- MEMBERS
- profileIdentity - Profile Identity.
- wmmQosInfoMask - Mask for WMM QoS information.
- bssType - Type of BSS (Infrastructure or Adhoc).
- channelNo - Channel Number.
- ccxOptionsMask - Options mask for Cisco Compatible Extentions.
- cloakedSsid - Flag to decide whether the SSID is cloaked (not
- transmitted) or not.
- credentials - Credentials data.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiNmeProfileIdentity profileIdentity;
- CsrWifiNmeWmmQosInfoMask wmmQosInfoMask;
- CsrWifiNmeBssType bssType;
- u8 channelNo;
- u8 ccxOptionsMask;
- u8 cloakedSsid;
- CsrWifiNmeCredentials credentials;
-} CsrWifiNmeProfile;
-
-
-/* Downstream */
-#define CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST (0x0000)
-
-#define CSR_WIFI_NME_PROFILE_SET_REQ ((CsrWifiNmePrim) (0x0000 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_DELETE_REQ ((CsrWifiNmePrim) (0x0001 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_DELETE_ALL_REQ ((CsrWifiNmePrim) (0x0002 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_ORDER_SET_REQ ((CsrWifiNmePrim) (0x0003 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_CONNECT_REQ ((CsrWifiNmePrim) (0x0004 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_WPS_REQ ((CsrWifiNmePrim) (0x0005 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_WPS_CANCEL_REQ ((CsrWifiNmePrim) (0x0006 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_CONNECTION_STATUS_GET_REQ ((CsrWifiNmePrim) (0x0007 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_SIM_IMSI_GET_RES ((CsrWifiNmePrim) (0x0008 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_SIM_GSM_AUTH_RES ((CsrWifiNmePrim) (0x0009 + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_SIM_UMTS_AUTH_RES ((CsrWifiNmePrim) (0x000A + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_WPS_CONFIG_SET_REQ ((CsrWifiNmePrim) (0x000B + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_NME_EVENT_MASK_SET_REQ ((CsrWifiNmePrim) (0x000C + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST))
-
-
-#define CSR_WIFI_NME_PRIM_DOWNSTREAM_HIGHEST (0x000C + CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST)
-
-/* Upstream */
-#define CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST (0x0000 + CSR_PRIM_UPSTREAM)
-
-#define CSR_WIFI_NME_PROFILE_SET_CFM ((CsrWifiNmePrim)(0x0000 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_DELETE_CFM ((CsrWifiNmePrim)(0x0001 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_DELETE_ALL_CFM ((CsrWifiNmePrim)(0x0002 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_ORDER_SET_CFM ((CsrWifiNmePrim)(0x0003 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_CONNECT_CFM ((CsrWifiNmePrim)(0x0004 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_WPS_CFM ((CsrWifiNmePrim)(0x0005 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_WPS_CANCEL_CFM ((CsrWifiNmePrim)(0x0006 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_CONNECTION_STATUS_GET_CFM ((CsrWifiNmePrim)(0x0007 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_UPDATE_IND ((CsrWifiNmePrim)(0x0008 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_PROFILE_DISCONNECT_IND ((CsrWifiNmePrim)(0x0009 + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_SIM_IMSI_GET_IND ((CsrWifiNmePrim)(0x000A + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_SIM_GSM_AUTH_IND ((CsrWifiNmePrim)(0x000B + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_SIM_UMTS_AUTH_IND ((CsrWifiNmePrim)(0x000C + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_WPS_CONFIG_SET_CFM ((CsrWifiNmePrim)(0x000D + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_NME_EVENT_MASK_SET_CFM ((CsrWifiNmePrim)(0x000E + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST))
-
-#define CSR_WIFI_NME_PRIM_UPSTREAM_HIGHEST (0x000E + CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST)
-
-#define CSR_WIFI_NME_PRIM_DOWNSTREAM_COUNT (CSR_WIFI_NME_PRIM_DOWNSTREAM_HIGHEST + 1 - CSR_WIFI_NME_PRIM_DOWNSTREAM_LOWEST)
-#define CSR_WIFI_NME_PRIM_UPSTREAM_COUNT (CSR_WIFI_NME_PRIM_UPSTREAM_HIGHEST + 1 - CSR_WIFI_NME_PRIM_UPSTREAM_LOWEST)
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileSetReq
-
- DESCRIPTION
- Creates or updates an existing profile in the NME that matches the unique
- identity of the profile. Each profile is identified by the combination of
- BSSID and SSID. The profile contains all the required credentials for
- attempting to connect to the network. Creating or updating a profile via
- the NME PROFILE SET REQ does NOT add the profile to the preferred profile
- list within the NME used for the NME auto-connect behaviour.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- profile - Specifies the identity and credentials of the network.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiNmeProfile profile;
-} CsrWifiNmeProfileSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDeleteReq
-
- DESCRIPTION
- Will delete the profile with a matching identity, but does NOT modify the
- preferred profile list.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- profileIdentity - Identity (BSSID, SSID) of profile to be deleted.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiNmeProfileIdentity profileIdentity;
-} CsrWifiNmeProfileDeleteReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDeleteAllReq
-
- DESCRIPTION
- Deletes all profiles present in the NME, but does NOT modify the
- preferred profile list.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiNmeProfileDeleteAllReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileOrderSetReq
-
- DESCRIPTION
- Defines the preferred order that profiles present in the NME should be
- used during the NME auto-connect behaviour.
- If profileIdentitysCount == 0, it removes any existing preferred profile
- list already present in the NME, effectively disabling the auto-connect
- behaviour.
- NOTE: Profile identities that do not match any profile stored in the NME
- are ignored during the auto-connect procedure.
- NOTE: during auto-connect the NME will only attempt to join an existing
- adhoc network and it will never attempt to host an adhoc network; for
- hosting and adhoc network, use CSR_WIFI_NME_PROFILE_CONNECT_REQ
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- profileIdentitysCount - The number of profiles identities in the list.
- profileIdentitys - Points to the list of profile identities.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 profileIdentitysCount;
- CsrWifiNmeProfileIdentity *profileIdentitys;
-} CsrWifiNmeProfileOrderSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileConnectReq
-
- DESCRIPTION
- Requests the NME to attempt to connect to the specified profile.
- Overrides any current connection attempt.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- profileIdentity - Identity (BSSID, SSID) of profile to be connected to.
- It must match an existing profile in the NME.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiNmeProfileIdentity profileIdentity;
-} CsrWifiNmeProfileConnectReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsReq
-
- DESCRIPTION
- Requests the NME to look for WPS enabled APs and attempt to perform WPS
- to determine the appropriate security credentials to connect to the AP.
- If the PIN == '00000000' then 'push button mode' is indicated, otherwise
- the PIN has to match that of the AP. 4 digit pin is passed by sending the
- pin digits in pin[0]..pin[3] and rest of the contents filled with '-'.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- pin - PIN value.
- ssid - Service Set identifier
- bssid - ID of Basic Service Set for which a WPS connection attempt is
- being made.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 pin[8];
- CsrWifiSsid ssid;
- CsrWifiMacAddress bssid;
-} CsrWifiNmeWpsReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsCancelReq
-
- DESCRIPTION
- Requests the NME to cancel any WPS procedure that it is currently
- performing. This includes WPS registrar activities started because of
- CSR_WIFI_NME_AP_REGISTER.request
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiNmeWpsCancelReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeConnectionStatusGetReq
-
- DESCRIPTION
- Requests the current connection status of the NME.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiNmeConnectionStatusGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimImsiGetRes
-
- DESCRIPTION
- Response from the application that received the NME SIM IMSI GET IND.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Indicates the outcome of the requested operation: STATUS_SUCCESS
- or STATUS_ERROR.
- imsi - The value of the IMSI obtained from the UICC.
- cardType - The UICC type (GSM only (SIM), UMTS only (USIM), Both).
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- char *imsi;
- CsrWifiNmeSimCardType cardType;
-} CsrWifiNmeSimImsiGetRes;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimGsmAuthRes
-
- DESCRIPTION
- Response from the application that received the NME SIM GSM AUTH IND. For
- each GSM authentication round a GSM Ciphering key (Kc) and a signed
- response (SRES) are produced. Since 2 or 3 GSM authentication rounds are
- used the 2 or 3 Kc's obtained respectively are combined into one buffer
- and similarly the 2 or 3 SRES's obtained are combined into another
- buffer. The order of Kc values (SRES values respectively) in their buffer
- is the same as that of their corresponding RAND values in the incoming
- indication.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Indicates the outcome of the requested operation:
- STATUS_SUCCESS or STATUS_ERROR
- kcsLength - Length in Bytes of Kc buffer. Legal values are: 16 or 24.
- kcs - Kc buffer holding 2 or 3 Kc values.
- sresLength - Length in Bytes of SRES buffer. Legal values are: 8 or 12.
- sres - SRES buffer holding 2 or 3 SRES values.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- u8 kcsLength;
- u8 *kcs;
- u8 sresLength;
- u8 *sres;
-} CsrWifiNmeSimGsmAuthRes;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimUmtsAuthRes
-
- DESCRIPTION
- Response from the application that received the NME SIM UMTS AUTH IND.
- The values of umtsCipherKey, umtsIntegrityKey, resParameterLength and
- resParameter are only meanigful when result = UMTS_AUTH_RESULT_SUCCESS.
- The value of auts is only meaningful when
- result=UMTS_AUTH_RESULT_SYNC_FAIL.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Indicates the outcome of the requested operation:
- STATUS_SUCCESS or STATUS_ERROR.
- result - The result of UMTS authentication as performed by the
- UICC which could be: Success, Authentication Reject or
- Synchronisation Failure. For all these 3 outcomes the
- value of status is success.
- umtsCipherKey - The UMTS Cipher Key as calculated and returned by the
- UICC.
- umtsIntegrityKey - The UMTS Integrity Key as calculated and returned by
- the UICC.
- resParameterLength - The length (in bytes) of the RES parameter (min=4; max
- = 16).
- resParameter - The RES parameter as calculated and returned by the
- UICC.
- auts - The AUTS parameter as calculated and returned by the
- UICC.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiNmeUmtsAuthResult result;
- u8 umtsCipherKey[16];
- u8 umtsIntegrityKey[16];
- u8 resParameterLength;
- u8 *resParameter;
- u8 auts[14];
-} CsrWifiNmeSimUmtsAuthRes;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsConfigSetReq
-
- DESCRIPTION
- This primitive passes the WPS information for the device to NME. This may
- be accepted only if no interface is active.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- wpsConfig - WPS config.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeWpsConfig wpsConfig;
-} CsrWifiNmeWpsConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEventMaskSetReq
-
- DESCRIPTION
- The wireless manager application may register with the NME to receive
- notification of interesting events. Indications will be sent only if the
- wireless manager explicitly registers to be notified of that event.
- indMask is a bit mask of values defined in CsrWifiNmeIndicationsMask.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- indMask - Set mask with values from CsrWifiNmeIndications
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiNmeIndicationsMask indMask;
-} CsrWifiNmeEventMaskSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileSetCfm
-
- DESCRIPTION
- Reports the status of the NME PROFILE SET REQ; the request will only fail
- if the details specified in the profile contains an invalid combination
- of parameters for example specifying the profile as cloaked but not
- specifying the SSID. The NME doesn't limit the number of profiles that
- may be created. The NME assumes that the entity configuring it is aware
- of the appropriate limits.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Indicates the success or otherwise of the requested operation.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiNmeProfileSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDeleteCfm
-
- DESCRIPTION
- Reports the status of the CSR_WIFI_NME_PROFILE_DELETE_REQ.
- Returns CSR_WIFI_NME_STATUS_NOT_FOUND if there is no matching profile.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Indicates the success or otherwise of the requested operation.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiNmeProfileDeleteCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDeleteAllCfm
-
- DESCRIPTION
- Reports the status of the CSR_WIFI_NME_PROFILE_DELETE_ALL_REQ.
- Returns always CSR_WIFI_NME_STATUS_SUCCESS.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Indicates the success or otherwise of the requested operation, but
- in this case it always set to success.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiNmeProfileDeleteAllCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileOrderSetCfm
-
- DESCRIPTION
- Confirmation to UNIFI_NME_PROFILE_ORDER_SET.request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Indicates the success or otherwise of the requested
- operation.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiNmeProfileOrderSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileConnectCfm
-
- DESCRIPTION
- Reports the status of the NME PROFILE CONNECT REQ. If unsuccessful the
- connectAttempt parameters contain details of the APs that the NME
- attempted to connect to before reporting the failure of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- status - Indicates the success or otherwise of the requested
- operation.
- connectAttemptsCount - This parameter is relevant only if
- status!=CSR_WIFI_NME_STATUS_SUCCESS.
- Number of connection attempt elements provided with
- this primitive
- connectAttempts - This parameter is relevant only if
- status!=CSR_WIFI_NME_STATUS_SUCCESS.
- Points to the list of connection attempt elements
- provided with this primitive
- Each element of the list provides information about
- an AP on which the connection attempt was made and
- the error that occurred during the attempt.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- u8 connectAttemptsCount;
- CsrWifiNmeConnectAttempt *connectAttempts;
-} CsrWifiNmeProfileConnectCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsCfm
-
- DESCRIPTION
- Reports the status of the NME WPS REQ.
- If CSR_WIFI_NME_STATUS_SUCCESS, the profile parameter contains the
- identity and credentials of the AP.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Indicates the success or otherwise of the requested
- operation.
- profile - This parameter is relevant only if
- status==CSR_WIFI_NME_STATUS_SUCCESS.
- The identity and credentials of the network.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiNmeProfile profile;
-} CsrWifiNmeWpsCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsCancelCfm
-
- DESCRIPTION
- Reports the status of the NME WPS REQ, the request is always SUCCESSFUL.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Only returns CSR_WIFI_NME_STATUS_SUCCESS
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiNmeWpsCancelCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeConnectionStatusGetCfm
-
- DESCRIPTION
- Reports the connection status of the NME.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Indicates the success or otherwise of the requested
- operation.
- connectionStatus - NME current connection status
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiNmeConnectionStatus connectionStatus;
-} CsrWifiNmeConnectionStatusGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileUpdateInd
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that informs that application that the contained profile has
- changed.
- For example, either the credentials EAP-FAST PAC file or the session data
- within the profile has changed.
- It is up to the application whether it stores this updated profile or
- not.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- profile - The identity and credentials of the network.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiNmeProfile profile;
-} CsrWifiNmeProfileUpdateInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeProfileDisconnectInd
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that informs that application that the current profile
- connection has disconnected. The indication will contain information
- about APs that it attempted to maintain the connection via i.e. in the
- case of failed roaming.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- connectAttemptsCount - Number of connection attempt elements provided with
- this primitive
- connectAttempts - Points to the list of connection attempt elements
- provided with this primitive
- Each element of the list provides information about
- an AP on which the connection attempt was made and
- the error occurred during the attempt.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 connectAttemptsCount;
- CsrWifiNmeConnectAttempt *connectAttempts;
-} CsrWifiNmeProfileDisconnectInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimImsiGetInd
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that requests the IMSI and UICC type from the UICC Manager.
- This indication is generated when the NME is attempting to connect to a
- profile configured for EAP-SIM/AKA. An application MUST register to
- receive this indication for the NME to support the EAP-SIM/AKA credential
- types. Otherwise the NME has no route to obtain the information from the
- UICC.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiNmeSimImsiGetInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimGsmAuthInd
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that requests the UICC Manager to perform a GSM
- authentication on behalf of the NME. This indication is generated when
- the NME is attempting to connect to a profile configured for EAP-SIM. An
- application MUST register to receive this indication for the NME to
- support the EAP-SIM credential types. Otherwise the NME has no route to
- obtain the information from the UICC. EAP-SIM authentication requires 2
- or 3 GSM authentication rounds and therefore 2 or 3 RANDS (GSM Random
- Challenges) are included.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- randsLength - GSM RAND is 16 bytes long hence valid values are 32 (2 RANDS)
- or 48 (3 RANDs).
- rands - 2 or 3 RANDs values.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u8 randsLength;
- u8 *rands;
-} CsrWifiNmeSimGsmAuthInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeSimUmtsAuthInd
-
- DESCRIPTION
- Indication generated from the NME (if an application subscribes to
- receive it) that requests the UICC Manager to perform a UMTS
- authentication on behalf of the NME. This indication is generated when
- the NME is attempting to connect to a profile configured for EAP-AKA. An
- application MUST register to receive this indication for the NME to
- support the EAP-AKA credential types. Otherwise the NME has no route to
- obtain the information from the USIM. EAP-AKA requires one UMTS
- authentication round and therefore only one RAND and one AUTN values are
- included.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- rand - UMTS RAND value.
- autn - UMTS AUTN value.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u8 rand[16];
- u8 autn[16];
-} CsrWifiNmeSimUmtsAuthInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeWpsConfigSetCfm
-
- DESCRIPTION
- Confirm.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Status of the request.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiNmeWpsConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiNmeEventMaskSetCfm
-
- DESCRIPTION
- The NME calls the primitive to report the result of the request
- primitive.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiNmeEventMaskSetCfm;
-
-#endif /* CSR_WIFI_NME_PRIM_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_nme_serialize.h b/drivers/staging/csr/csr_wifi_nme_serialize.h
deleted file mode 100644
index ebac484..0000000
--- a/drivers/staging/csr/csr_wifi_nme_serialize.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_NME_SERIALIZE_H__
-#define CSR_WIFI_NME_SERIALIZE_H__
-
-#include "csr_wifi_msgconv.h"
-#include "csr_wifi_nme_prim.h"
-
-#ifndef CSR_WIFI_NME_ENABLE
-#error CSR_WIFI_NME_ENABLE MUST be defined inorder to use csr_wifi_nme_serialize.h
-#endif
-
-extern void CsrWifiNmePfree(void *ptr);
-
-extern u8* CsrWifiNmeProfileSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeProfileSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeProfileSetReqSizeof(void *msg);
-extern void CsrWifiNmeProfileSetReqSerFree(void *msg);
-
-extern u8* CsrWifiNmeProfileDeleteReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeProfileDeleteReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeProfileDeleteReqSizeof(void *msg);
-#define CsrWifiNmeProfileDeleteReqSerFree CsrWifiNmePfree
-
-#define CsrWifiNmeProfileDeleteAllReqSer CsrWifiEventSer
-#define CsrWifiNmeProfileDeleteAllReqDes CsrWifiEventDes
-#define CsrWifiNmeProfileDeleteAllReqSizeof CsrWifiEventSizeof
-#define CsrWifiNmeProfileDeleteAllReqSerFree CsrWifiNmePfree
-
-extern u8* CsrWifiNmeProfileOrderSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeProfileOrderSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeProfileOrderSetReqSizeof(void *msg);
-extern void CsrWifiNmeProfileOrderSetReqSerFree(void *msg);
-
-extern u8* CsrWifiNmeProfileConnectReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeProfileConnectReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeProfileConnectReqSizeof(void *msg);
-#define CsrWifiNmeProfileConnectReqSerFree CsrWifiNmePfree
-
-extern u8* CsrWifiNmeWpsReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeWpsReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeWpsReqSizeof(void *msg);
-#define CsrWifiNmeWpsReqSerFree CsrWifiNmePfree
-
-#define CsrWifiNmeWpsCancelReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeWpsCancelReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeWpsCancelReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeWpsCancelReqSerFree CsrWifiNmePfree
-
-#define CsrWifiNmeConnectionStatusGetReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeConnectionStatusGetReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeConnectionStatusGetReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeConnectionStatusGetReqSerFree CsrWifiNmePfree
-
-extern u8* CsrWifiNmeSimImsiGetResSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeSimImsiGetResDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeSimImsiGetResSizeof(void *msg);
-extern void CsrWifiNmeSimImsiGetResSerFree(void *msg);
-
-extern u8* CsrWifiNmeSimGsmAuthResSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeSimGsmAuthResDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeSimGsmAuthResSizeof(void *msg);
-extern void CsrWifiNmeSimGsmAuthResSerFree(void *msg);
-
-extern u8* CsrWifiNmeSimUmtsAuthResSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeSimUmtsAuthResDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeSimUmtsAuthResSizeof(void *msg);
-extern void CsrWifiNmeSimUmtsAuthResSerFree(void *msg);
-
-extern u8* CsrWifiNmeWpsConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeWpsConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeWpsConfigSetReqSizeof(void *msg);
-extern void CsrWifiNmeWpsConfigSetReqSerFree(void *msg);
-
-#define CsrWifiNmeEventMaskSetReqSer CsrWifiEventCsrUint32Ser
-#define CsrWifiNmeEventMaskSetReqDes CsrWifiEventCsrUint32Des
-#define CsrWifiNmeEventMaskSetReqSizeof CsrWifiEventCsrUint32Sizeof
-#define CsrWifiNmeEventMaskSetReqSerFree CsrWifiNmePfree
-
-#define CsrWifiNmeProfileSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeProfileSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeProfileSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeProfileSetCfmSerFree CsrWifiNmePfree
-
-#define CsrWifiNmeProfileDeleteCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeProfileDeleteCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeProfileDeleteCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeProfileDeleteCfmSerFree CsrWifiNmePfree
-
-#define CsrWifiNmeProfileDeleteAllCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeProfileDeleteAllCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeProfileDeleteAllCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeProfileDeleteAllCfmSerFree CsrWifiNmePfree
-
-extern u8* CsrWifiNmeProfileOrderSetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeProfileOrderSetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeProfileOrderSetCfmSizeof(void *msg);
-#define CsrWifiNmeProfileOrderSetCfmSerFree CsrWifiNmePfree
-
-extern u8* CsrWifiNmeProfileConnectCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeProfileConnectCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeProfileConnectCfmSizeof(void *msg);
-extern void CsrWifiNmeProfileConnectCfmSerFree(void *msg);
-
-extern u8* CsrWifiNmeWpsCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeWpsCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeWpsCfmSizeof(void *msg);
-extern void CsrWifiNmeWpsCfmSerFree(void *msg);
-
-extern u8* CsrWifiNmeWpsCancelCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeWpsCancelCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeWpsCancelCfmSizeof(void *msg);
-#define CsrWifiNmeWpsCancelCfmSerFree CsrWifiNmePfree
-
-extern u8* CsrWifiNmeConnectionStatusGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeConnectionStatusGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeConnectionStatusGetCfmSizeof(void *msg);
-#define CsrWifiNmeConnectionStatusGetCfmSerFree CsrWifiNmePfree
-
-extern u8* CsrWifiNmeProfileUpdateIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeProfileUpdateIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeProfileUpdateIndSizeof(void *msg);
-extern void CsrWifiNmeProfileUpdateIndSerFree(void *msg);
-
-extern u8* CsrWifiNmeProfileDisconnectIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeProfileDisconnectIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeProfileDisconnectIndSizeof(void *msg);
-extern void CsrWifiNmeProfileDisconnectIndSerFree(void *msg);
-
-#define CsrWifiNmeSimImsiGetIndSer CsrWifiEventSer
-#define CsrWifiNmeSimImsiGetIndDes CsrWifiEventDes
-#define CsrWifiNmeSimImsiGetIndSizeof CsrWifiEventSizeof
-#define CsrWifiNmeSimImsiGetIndSerFree CsrWifiNmePfree
-
-extern u8* CsrWifiNmeSimGsmAuthIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeSimGsmAuthIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeSimGsmAuthIndSizeof(void *msg);
-extern void CsrWifiNmeSimGsmAuthIndSerFree(void *msg);
-
-extern u8* CsrWifiNmeSimUmtsAuthIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiNmeSimUmtsAuthIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiNmeSimUmtsAuthIndSizeof(void *msg);
-#define CsrWifiNmeSimUmtsAuthIndSerFree CsrWifiNmePfree
-
-#define CsrWifiNmeWpsConfigSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeWpsConfigSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeWpsConfigSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeWpsConfigSetCfmSerFree CsrWifiNmePfree
-
-#define CsrWifiNmeEventMaskSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiNmeEventMaskSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiNmeEventMaskSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiNmeEventMaskSetCfmSerFree CsrWifiNmePfree
-
-#endif /* CSR_WIFI_NME_SERIALIZE_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_nme_task.h b/drivers/staging/csr/csr_wifi_nme_task.h
deleted file mode 100644
index 84e973a..0000000
--- a/drivers/staging/csr/csr_wifi_nme_task.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_NME_TASK_H__
-#define CSR_WIFI_NME_TASK_H__
-
-#include <linux/types.h>
-#include "csr_sched.h"
-
-#ifndef CSR_WIFI_NME_ENABLE
-#error CSR_WIFI_NME_ENABLE MUST be defined inorder to use csr_wifi_nme_task.h
-#endif
-
-#define CSR_WIFI_NME_LOG_ID 0x1203FFFF
-extern CsrSchedQid CSR_WIFI_NME_IFACEQUEUE;
-
-#endif /* CSR_WIFI_NME_TASK_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_private_common.h b/drivers/staging/csr/csr_wifi_private_common.h
deleted file mode 100644
index ee3bd51..0000000
--- a/drivers/staging/csr/csr_wifi_private_common.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_PRIVATE_COMMON_H__
-#define CSR_WIFI_PRIVATE_COMMON_H__
-
-/**
- * @brief maximum number of STAs allowed to be connected
- *
- * @par Description
- * min & max Beacon Interval
- */
-#define CSR_WIFI_AP_MAX_ASSOC_STA 8
-
-/** Number of only b rates */
-#define CSR_WIFI_SME_AP_MAX_ONLY_B_RATES 4
-
-
-/** Number of mandatory b rates */
-#define CSR_WIFI_SME_AP_MAX_MANDATORY_B_RATES 2
-
-
-/** Number of mandatory bg rates */
-#define CSR_WIFI_SME_AP_MAX_MANDATORY_BG_RATES 4
-
-
-/** Number of bg rates */
-#define CSR_WIFI_SME_AP_MAX_BG_RATES 12
-
-
-/** Number of no b only g rates */
-#define CSR_WIFI_SME_AP_MAX_NO_B_ONLY_G_RATES 8
-
-
-/** Number of mandatory g rates */
-#define CSR_WIFI_SME_AP_MAX_MANDATORY_G_RATES 7
-
-
-/* Number of g mandatory rates */
-#define CSR_WIFI_SME_AP_G_MANDATORY_RATES_NUM 7
-
-
-/* Number of b mandatory rates */
-#define CSR_WIFI_SME_AP_B_MANDATORY_RATES_NUM 2
-
-
-/* Number of b/g mandatory rates */
-#define CSR_WIFI_SME_AP_BG_MANDATORY_RATES_NUM 4
-
-
-/* The maximum allowed length of SSID */
-#define CSR_WIFI_SME_AP_SSID_MAX_LENGTH 32
-
-/* Refer 8.4.2.27 RSN element - we support TKIP, WPA2, WAPI and PSK only, no pmkid, group cipher suite */
-#define CSR_WIFI_SME_RSN_PACKED_SIZE (1 + 1 + 2 + 4 + 2 + 4 * 2 + 2 + 4 * 1 + 2 + 24)
-
-/* Refer 7.3.2.9 (ISO/IEC 8802-11:2006) WAPI element - we support WAPI PSK only, no bkid, group cipher suite */
-#define CSR_WIFI_SME_WAPI_PACKED_SIZE (1 + 1 + 2 + 2 + 4 * 1 + 2 + 4 * 1 + 4 + 2 + 24)
-
-
-/* Common structure for NME and SME to maintain Interface mode*/
-typedef u8 CsrWifiInterfaceMode;
-#define CSR_WIFI_MODE_NONE ((CsrWifiInterfaceMode) 0xFF)
-#define CSR_WIFI_MODE_STA ((CsrWifiInterfaceMode) 0x00)
-#define CSR_WIFI_MODE_AP ((CsrWifiInterfaceMode) 0x01)
-#define CSR_WIFI_MODE_P2P_DEVICE ((CsrWifiInterfaceMode) 0x02)
-#define CSR_WIFI_MODE_P2P_CLI ((CsrWifiInterfaceMode) 0x03)
-#define CSR_WIFI_MODE_P2P_GO ((CsrWifiInterfaceMode) 0x04)
-#define CSR_WIFI_MODE_AMP ((CsrWifiInterfaceMode) 0x05)
-#define CSR_WIFI_MODE_WPS_ENROLLEE ((CsrWifiInterfaceMode) 0x06)
-#define CSR_WIFI_MODE_IBSS ((CsrWifiInterfaceMode) 0x07)
-
-#endif
-
diff --git a/drivers/staging/csr/csr_wifi_result.h b/drivers/staging/csr/csr_wifi_result.h
deleted file mode 100644
index 3c394c7..0000000
--- a/drivers/staging/csr/csr_wifi_result.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_RESULT_H__
-#define CSR_WIFI_RESULT_H__
-
-#include "csr_result.h"
-
-/* THIS FILE SHOULD CONTAIN ONLY RESULT CODES */
-
-/* Result Codes */
-#define CSR_WIFI_HIP_RESULT_INVALID_VALUE ((CsrResult) 1) /* Invalid argument value */
-#define CSR_WIFI_HIP_RESULT_NO_DEVICE ((CsrResult) 2) /* The specified device is no longer present */
-#define CSR_WIFI_HIP_RESULT_NO_SPACE ((CsrResult) 3) /* A queue or buffer is full */
-#define CSR_WIFI_HIP_RESULT_NO_MEMORY ((CsrResult) 4) /* Fatal error, no memory */
-#define CSR_WIFI_HIP_RESULT_RANGE ((CsrResult) 5) /* Request exceeds the range of a file or a buffer */
-#define CSR_WIFI_HIP_RESULT_NOT_FOUND ((CsrResult) 6) /* A file (typically a f/w patch) is not found */
-
-#endif /* CSR_WIFI_RESULT_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_router_converter_init.c b/drivers/staging/csr/csr_wifi_router_converter_init.c
deleted file mode 100644
index 775c013..0000000
--- a/drivers/staging/csr/csr_wifi_router_converter_init.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#include "csr_msgconv.h"
-#include "csr_macro.h"
-
-
-#ifdef CSR_LOG_ENABLE
-#include "csr_log.h"
-#endif
-
-#ifndef EXCLUDE_CSR_WIFI_ROUTER_MODULE
-#include "csr_wifi_router_serialize.h"
-#include "csr_wifi_router_prim.h"
-
-static CsrMsgConvMsgEntry csrwifirouter_conv_lut[] = {
- { CSR_WIFI_ROUTER_MA_PACKET_SUBSCRIBE_REQ, CsrWifiRouterMaPacketSubscribeReqSizeof, CsrWifiRouterMaPacketSubscribeReqSer, CsrWifiRouterMaPacketSubscribeReqDes, CsrWifiRouterMaPacketSubscribeReqSerFree },
- { CSR_WIFI_ROUTER_MA_PACKET_UNSUBSCRIBE_REQ, CsrWifiRouterMaPacketUnsubscribeReqSizeof, CsrWifiRouterMaPacketUnsubscribeReqSer, CsrWifiRouterMaPacketUnsubscribeReqDes, CsrWifiRouterMaPacketUnsubscribeReqSerFree },
- { CSR_WIFI_ROUTER_MA_PACKET_REQ, CsrWifiRouterMaPacketReqSizeof, CsrWifiRouterMaPacketReqSer, CsrWifiRouterMaPacketReqDes, CsrWifiRouterMaPacketReqSerFree },
- { CSR_WIFI_ROUTER_MA_PACKET_RES, CsrWifiRouterMaPacketResSizeof, CsrWifiRouterMaPacketResSer, CsrWifiRouterMaPacketResDes, CsrWifiRouterMaPacketResSerFree },
- { CSR_WIFI_ROUTER_MA_PACKET_CANCEL_REQ, CsrWifiRouterMaPacketCancelReqSizeof, CsrWifiRouterMaPacketCancelReqSer, CsrWifiRouterMaPacketCancelReqDes, CsrWifiRouterMaPacketCancelReqSerFree },
- { CSR_WIFI_ROUTER_MA_PACKET_SUBSCRIBE_CFM, CsrWifiRouterMaPacketSubscribeCfmSizeof, CsrWifiRouterMaPacketSubscribeCfmSer, CsrWifiRouterMaPacketSubscribeCfmDes, CsrWifiRouterMaPacketSubscribeCfmSerFree },
- { CSR_WIFI_ROUTER_MA_PACKET_UNSUBSCRIBE_CFM, CsrWifiRouterMaPacketUnsubscribeCfmSizeof, CsrWifiRouterMaPacketUnsubscribeCfmSer, CsrWifiRouterMaPacketUnsubscribeCfmDes, CsrWifiRouterMaPacketUnsubscribeCfmSerFree },
- { CSR_WIFI_ROUTER_MA_PACKET_CFM, CsrWifiRouterMaPacketCfmSizeof, CsrWifiRouterMaPacketCfmSer, CsrWifiRouterMaPacketCfmDes, CsrWifiRouterMaPacketCfmSerFree },
- { CSR_WIFI_ROUTER_MA_PACKET_IND, CsrWifiRouterMaPacketIndSizeof, CsrWifiRouterMaPacketIndSer, CsrWifiRouterMaPacketIndDes, CsrWifiRouterMaPacketIndSerFree },
-
- { 0, NULL, NULL, NULL, NULL },
-};
-
-CsrMsgConvMsgEntry* CsrWifiRouterConverterLookup(CsrMsgConvMsgEntry *ce, u16 msgType)
-{
- if (msgType & CSR_PRIM_UPSTREAM)
- {
- u16 idx = (msgType & ~CSR_PRIM_UPSTREAM) + CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_COUNT;
- if (idx < (CSR_WIFI_ROUTER_PRIM_UPSTREAM_COUNT + CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_COUNT) &&
- csrwifirouter_conv_lut[idx].msgType == msgType)
- {
- return &csrwifirouter_conv_lut[idx];
- }
- }
- else
- {
- if (msgType < CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_COUNT &&
- csrwifirouter_conv_lut[msgType].msgType == msgType)
- {
- return &csrwifirouter_conv_lut[msgType];
- }
- }
- return NULL;
-}
-
-
-void CsrWifiRouterConverterInit(void)
-{
- CsrMsgConvInsert(CSR_WIFI_ROUTER_PRIM, csrwifirouter_conv_lut);
- CsrMsgConvCustomLookupRegister(CSR_WIFI_ROUTER_PRIM, CsrWifiRouterConverterLookup);
-}
-
-
-#ifdef CSR_LOG_ENABLE
-static const CsrLogPrimitiveInformation csrwifirouter_conv_info = {
- CSR_WIFI_ROUTER_PRIM,
- (char *)"CSR_WIFI_ROUTER_PRIM",
- csrwifirouter_conv_lut
-};
-const CsrLogPrimitiveInformation* CsrWifiRouterTechInfoGet(void)
-{
- return &csrwifirouter_conv_info;
-}
-
-
-#endif /* CSR_LOG_ENABLE */
-#endif /* EXCLUDE_CSR_WIFI_ROUTER_MODULE */
diff --git a/drivers/staging/csr/csr_wifi_router_converter_init.h b/drivers/staging/csr/csr_wifi_router_converter_init.h
deleted file mode 100644
index 478327b7..0000000
--- a/drivers/staging/csr/csr_wifi_router_converter_init.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_ROUTER_CONVERTER_INIT_H__
-#define CSR_WIFI_ROUTER_CONVERTER_INIT_H__
-
-#ifndef EXCLUDE_CSR_WIFI_ROUTER_MODULE
-
-#include "csr_msgconv.h"
-
-#ifdef CSR_LOG_ENABLE
-#include "csr_log.h"
-
-extern const CsrLogPrimitiveInformation* CsrWifiRouterTechInfoGet(void);
-#endif /* CSR_LOG_ENABLE */
-
-extern void CsrWifiRouterConverterInit(void);
-
-#else /* EXCLUDE_CSR_WIFI_ROUTER_MODULE */
-
-#define CsrWifiRouterConverterInit()
-
-#endif /* EXCLUDE_CSR_WIFI_ROUTER_MODULE */
-
-#endif /* CSR_WIFI_ROUTER_CONVERTER_INIT_H__ */
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_converter_init.c b/drivers/staging/csr/csr_wifi_router_ctrl_converter_init.c
deleted file mode 100644
index a02e307..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_converter_init.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#include "csr_msgconv.h"
-#include "csr_macro.h"
-
-#ifdef CSR_LOG_ENABLE
-#include "csr_log.h"
-#endif
-
-#ifndef EXCLUDE_CSR_WIFI_ROUTER_CTRL_MODULE
-#include "csr_wifi_router_ctrl_serialize.h"
-#include "csr_wifi_router_ctrl_prim.h"
-
-static CsrMsgConvMsgEntry csrwifirouterctrl_conv_lut[] = {
- { CSR_WIFI_ROUTER_CTRL_CONFIGURE_POWER_MODE_REQ, CsrWifiRouterCtrlConfigurePowerModeReqSizeof, CsrWifiRouterCtrlConfigurePowerModeReqSer, CsrWifiRouterCtrlConfigurePowerModeReqDes, CsrWifiRouterCtrlConfigurePowerModeReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_HIP_REQ, CsrWifiRouterCtrlHipReqSizeof, CsrWifiRouterCtrlHipReqSer, CsrWifiRouterCtrlHipReqDes, CsrWifiRouterCtrlHipReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_MEDIA_STATUS_REQ, CsrWifiRouterCtrlMediaStatusReqSizeof, CsrWifiRouterCtrlMediaStatusReqSer, CsrWifiRouterCtrlMediaStatusReqDes, CsrWifiRouterCtrlMediaStatusReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_MULTICAST_ADDRESS_RES, CsrWifiRouterCtrlMulticastAddressResSizeof, CsrWifiRouterCtrlMulticastAddressResSer, CsrWifiRouterCtrlMulticastAddressResDes, CsrWifiRouterCtrlMulticastAddressResSerFree },
- { CSR_WIFI_ROUTER_CTRL_PORT_CONFIGURE_REQ, CsrWifiRouterCtrlPortConfigureReqSizeof, CsrWifiRouterCtrlPortConfigureReqSer, CsrWifiRouterCtrlPortConfigureReqDes, CsrWifiRouterCtrlPortConfigureReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_REQ, CsrWifiRouterCtrlQosControlReqSizeof, CsrWifiRouterCtrlQosControlReqSer, CsrWifiRouterCtrlQosControlReqDes, CsrWifiRouterCtrlQosControlReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_SUSPEND_RES, CsrWifiRouterCtrlSuspendResSizeof, CsrWifiRouterCtrlSuspendResSer, CsrWifiRouterCtrlSuspendResDes, CsrWifiRouterCtrlSuspendResSerFree },
- { CSR_WIFI_ROUTER_CTRL_TCLAS_ADD_REQ, CsrWifiRouterCtrlTclasAddReqSizeof, CsrWifiRouterCtrlTclasAddReqSer, CsrWifiRouterCtrlTclasAddReqDes, CsrWifiRouterCtrlTclasAddReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_RESUME_RES, CsrWifiRouterCtrlResumeResSizeof, CsrWifiRouterCtrlResumeResSer, CsrWifiRouterCtrlResumeResDes, CsrWifiRouterCtrlResumeResSerFree },
- { CSR_WIFI_ROUTER_CTRL_RAW_SDIO_DEINITIALISE_REQ, CsrWifiRouterCtrlRawSdioDeinitialiseReqSizeof, CsrWifiRouterCtrlRawSdioDeinitialiseReqSer, CsrWifiRouterCtrlRawSdioDeinitialiseReqDes, CsrWifiRouterCtrlRawSdioDeinitialiseReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_RAW_SDIO_INITIALISE_REQ, CsrWifiRouterCtrlRawSdioInitialiseReqSizeof, CsrWifiRouterCtrlRawSdioInitialiseReqSer, CsrWifiRouterCtrlRawSdioInitialiseReqDes, CsrWifiRouterCtrlRawSdioInitialiseReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_TCLAS_DEL_REQ, CsrWifiRouterCtrlTclasDelReqSizeof, CsrWifiRouterCtrlTclasDelReqSer, CsrWifiRouterCtrlTclasDelReqDes, CsrWifiRouterCtrlTclasDelReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_TRAFFIC_CLASSIFICATION_REQ, CsrWifiRouterCtrlTrafficClassificationReqSizeof, CsrWifiRouterCtrlTrafficClassificationReqSer, CsrWifiRouterCtrlTrafficClassificationReqDes, CsrWifiRouterCtrlTrafficClassificationReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_REQ, CsrWifiRouterCtrlTrafficConfigReqSizeof, CsrWifiRouterCtrlTrafficConfigReqSer, CsrWifiRouterCtrlTrafficConfigReqDes, CsrWifiRouterCtrlTrafficConfigReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_WIFI_OFF_REQ, CsrWifiRouterCtrlWifiOffReqSizeof, CsrWifiRouterCtrlWifiOffReqSer, CsrWifiRouterCtrlWifiOffReqDes, CsrWifiRouterCtrlWifiOffReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_WIFI_OFF_RES, CsrWifiRouterCtrlWifiOffResSizeof, CsrWifiRouterCtrlWifiOffResSer, CsrWifiRouterCtrlWifiOffResDes, CsrWifiRouterCtrlWifiOffResSerFree },
- { CSR_WIFI_ROUTER_CTRL_WIFI_ON_REQ, CsrWifiRouterCtrlWifiOnReqSizeof, CsrWifiRouterCtrlWifiOnReqSer, CsrWifiRouterCtrlWifiOnReqDes, CsrWifiRouterCtrlWifiOnReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_WIFI_ON_RES, CsrWifiRouterCtrlWifiOnResSizeof, CsrWifiRouterCtrlWifiOnResSer, CsrWifiRouterCtrlWifiOnResDes, CsrWifiRouterCtrlWifiOnResSerFree },
- { CSR_WIFI_ROUTER_CTRL_M4_TRANSMIT_REQ, CsrWifiRouterCtrlM4TransmitReqSizeof, CsrWifiRouterCtrlM4TransmitReqSer, CsrWifiRouterCtrlM4TransmitReqDes, CsrWifiRouterCtrlM4TransmitReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_MODE_SET_REQ, CsrWifiRouterCtrlModeSetReqSizeof, CsrWifiRouterCtrlModeSetReqSer, CsrWifiRouterCtrlModeSetReqDes, CsrWifiRouterCtrlModeSetReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_PEER_ADD_REQ, CsrWifiRouterCtrlPeerAddReqSizeof, CsrWifiRouterCtrlPeerAddReqSer, CsrWifiRouterCtrlPeerAddReqDes, CsrWifiRouterCtrlPeerAddReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_PEER_DEL_REQ, CsrWifiRouterCtrlPeerDelReqSizeof, CsrWifiRouterCtrlPeerDelReqSer, CsrWifiRouterCtrlPeerDelReqDes, CsrWifiRouterCtrlPeerDelReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_PEER_UPDATE_REQ, CsrWifiRouterCtrlPeerUpdateReqSizeof, CsrWifiRouterCtrlPeerUpdateReqSer, CsrWifiRouterCtrlPeerUpdateReqDes, CsrWifiRouterCtrlPeerUpdateReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_CAPABILITIES_REQ, CsrWifiRouterCtrlCapabilitiesReqSizeof, CsrWifiRouterCtrlCapabilitiesReqSer, CsrWifiRouterCtrlCapabilitiesReqDes, CsrWifiRouterCtrlCapabilitiesReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ENABLE_REQ, CsrWifiRouterCtrlBlockAckEnableReqSizeof, CsrWifiRouterCtrlBlockAckEnableReqSer, CsrWifiRouterCtrlBlockAckEnableReqDes, CsrWifiRouterCtrlBlockAckEnableReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_DISABLE_REQ, CsrWifiRouterCtrlBlockAckDisableReqSizeof, CsrWifiRouterCtrlBlockAckDisableReqSer, CsrWifiRouterCtrlBlockAckDisableReqDes, CsrWifiRouterCtrlBlockAckDisableReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_WAPI_RX_PKT_REQ, CsrWifiRouterCtrlWapiRxPktReqSizeof, CsrWifiRouterCtrlWapiRxPktReqSer, CsrWifiRouterCtrlWapiRxPktReqDes, CsrWifiRouterCtrlWapiRxPktReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_WAPI_MULTICAST_FILTER_REQ, CsrWifiRouterCtrlWapiMulticastFilterReqSizeof, CsrWifiRouterCtrlWapiMulticastFilterReqSer, CsrWifiRouterCtrlWapiMulticastFilterReqDes, CsrWifiRouterCtrlWapiMulticastFilterReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_FILTER_REQ, CsrWifiRouterCtrlWapiUnicastFilterReqSizeof, CsrWifiRouterCtrlWapiUnicastFilterReqSer, CsrWifiRouterCtrlWapiUnicastFilterReqDes, CsrWifiRouterCtrlWapiUnicastFilterReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_TX_PKT_REQ, CsrWifiRouterCtrlWapiUnicastTxPktReqSizeof, CsrWifiRouterCtrlWapiUnicastTxPktReqSer, CsrWifiRouterCtrlWapiUnicastTxPktReqDes, CsrWifiRouterCtrlWapiUnicastTxPktReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_WAPI_FILTER_REQ, CsrWifiRouterCtrlWapiFilterReqSizeof, CsrWifiRouterCtrlWapiFilterReqSer, CsrWifiRouterCtrlWapiFilterReqDes, CsrWifiRouterCtrlWapiFilterReqSerFree },
- { CSR_WIFI_ROUTER_CTRL_HIP_IND, CsrWifiRouterCtrlHipIndSizeof, CsrWifiRouterCtrlHipIndSer, CsrWifiRouterCtrlHipIndDes, CsrWifiRouterCtrlHipIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_MULTICAST_ADDRESS_IND, CsrWifiRouterCtrlMulticastAddressIndSizeof, CsrWifiRouterCtrlMulticastAddressIndSer, CsrWifiRouterCtrlMulticastAddressIndDes, CsrWifiRouterCtrlMulticastAddressIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_PORT_CONFIGURE_CFM, CsrWifiRouterCtrlPortConfigureCfmSizeof, CsrWifiRouterCtrlPortConfigureCfmSer, CsrWifiRouterCtrlPortConfigureCfmDes, CsrWifiRouterCtrlPortConfigureCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_RESUME_IND, CsrWifiRouterCtrlResumeIndSizeof, CsrWifiRouterCtrlResumeIndSer, CsrWifiRouterCtrlResumeIndDes, CsrWifiRouterCtrlResumeIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_SUSPEND_IND, CsrWifiRouterCtrlSuspendIndSizeof, CsrWifiRouterCtrlSuspendIndSer, CsrWifiRouterCtrlSuspendIndDes, CsrWifiRouterCtrlSuspendIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_TCLAS_ADD_CFM, CsrWifiRouterCtrlTclasAddCfmSizeof, CsrWifiRouterCtrlTclasAddCfmSer, CsrWifiRouterCtrlTclasAddCfmDes, CsrWifiRouterCtrlTclasAddCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_RAW_SDIO_DEINITIALISE_CFM, CsrWifiRouterCtrlRawSdioDeinitialiseCfmSizeof, CsrWifiRouterCtrlRawSdioDeinitialiseCfmSer, CsrWifiRouterCtrlRawSdioDeinitialiseCfmDes, CsrWifiRouterCtrlRawSdioDeinitialiseCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_RAW_SDIO_INITIALISE_CFM, CsrWifiRouterCtrlRawSdioInitialiseCfmSizeof, CsrWifiRouterCtrlRawSdioInitialiseCfmSer, CsrWifiRouterCtrlRawSdioInitialiseCfmDes, CsrWifiRouterCtrlRawSdioInitialiseCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_TCLAS_DEL_CFM, CsrWifiRouterCtrlTclasDelCfmSizeof, CsrWifiRouterCtrlTclasDelCfmSer, CsrWifiRouterCtrlTclasDelCfmDes, CsrWifiRouterCtrlTclasDelCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_TRAFFIC_PROTOCOL_IND, CsrWifiRouterCtrlTrafficProtocolIndSizeof, CsrWifiRouterCtrlTrafficProtocolIndSer, CsrWifiRouterCtrlTrafficProtocolIndDes, CsrWifiRouterCtrlTrafficProtocolIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_TRAFFIC_SAMPLE_IND, CsrWifiRouterCtrlTrafficSampleIndSizeof, CsrWifiRouterCtrlTrafficSampleIndSer, CsrWifiRouterCtrlTrafficSampleIndDes, CsrWifiRouterCtrlTrafficSampleIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_WIFI_OFF_IND, CsrWifiRouterCtrlWifiOffIndSizeof, CsrWifiRouterCtrlWifiOffIndSer, CsrWifiRouterCtrlWifiOffIndDes, CsrWifiRouterCtrlWifiOffIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_WIFI_OFF_CFM, CsrWifiRouterCtrlWifiOffCfmSizeof, CsrWifiRouterCtrlWifiOffCfmSer, CsrWifiRouterCtrlWifiOffCfmDes, CsrWifiRouterCtrlWifiOffCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_WIFI_ON_IND, CsrWifiRouterCtrlWifiOnIndSizeof, CsrWifiRouterCtrlWifiOnIndSer, CsrWifiRouterCtrlWifiOnIndDes, CsrWifiRouterCtrlWifiOnIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_WIFI_ON_CFM, CsrWifiRouterCtrlWifiOnCfmSizeof, CsrWifiRouterCtrlWifiOnCfmSer, CsrWifiRouterCtrlWifiOnCfmDes, CsrWifiRouterCtrlWifiOnCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_M4_READY_TO_SEND_IND, CsrWifiRouterCtrlM4ReadyToSendIndSizeof, CsrWifiRouterCtrlM4ReadyToSendIndSer, CsrWifiRouterCtrlM4ReadyToSendIndDes, CsrWifiRouterCtrlM4ReadyToSendIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_M4_TRANSMITTED_IND, CsrWifiRouterCtrlM4TransmittedIndSizeof, CsrWifiRouterCtrlM4TransmittedIndSer, CsrWifiRouterCtrlM4TransmittedIndDes, CsrWifiRouterCtrlM4TransmittedIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_MIC_FAILURE_IND, CsrWifiRouterCtrlMicFailureIndSizeof, CsrWifiRouterCtrlMicFailureIndSer, CsrWifiRouterCtrlMicFailureIndDes, CsrWifiRouterCtrlMicFailureIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_CONNECTED_IND, CsrWifiRouterCtrlConnectedIndSizeof, CsrWifiRouterCtrlConnectedIndSer, CsrWifiRouterCtrlConnectedIndDes, CsrWifiRouterCtrlConnectedIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_PEER_ADD_CFM, CsrWifiRouterCtrlPeerAddCfmSizeof, CsrWifiRouterCtrlPeerAddCfmSer, CsrWifiRouterCtrlPeerAddCfmDes, CsrWifiRouterCtrlPeerAddCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_PEER_DEL_CFM, CsrWifiRouterCtrlPeerDelCfmSizeof, CsrWifiRouterCtrlPeerDelCfmSer, CsrWifiRouterCtrlPeerDelCfmDes, CsrWifiRouterCtrlPeerDelCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_UNEXPECTED_FRAME_IND, CsrWifiRouterCtrlUnexpectedFrameIndSizeof, CsrWifiRouterCtrlUnexpectedFrameIndSer, CsrWifiRouterCtrlUnexpectedFrameIndDes, CsrWifiRouterCtrlUnexpectedFrameIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_PEER_UPDATE_CFM, CsrWifiRouterCtrlPeerUpdateCfmSizeof, CsrWifiRouterCtrlPeerUpdateCfmSer, CsrWifiRouterCtrlPeerUpdateCfmDes, CsrWifiRouterCtrlPeerUpdateCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_CAPABILITIES_CFM, CsrWifiRouterCtrlCapabilitiesCfmSizeof, CsrWifiRouterCtrlCapabilitiesCfmSer, CsrWifiRouterCtrlCapabilitiesCfmDes, CsrWifiRouterCtrlCapabilitiesCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ENABLE_CFM, CsrWifiRouterCtrlBlockAckEnableCfmSizeof, CsrWifiRouterCtrlBlockAckEnableCfmSer, CsrWifiRouterCtrlBlockAckEnableCfmDes, CsrWifiRouterCtrlBlockAckEnableCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_DISABLE_CFM, CsrWifiRouterCtrlBlockAckDisableCfmSizeof, CsrWifiRouterCtrlBlockAckDisableCfmSer, CsrWifiRouterCtrlBlockAckDisableCfmDes, CsrWifiRouterCtrlBlockAckDisableCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ERROR_IND, CsrWifiRouterCtrlBlockAckErrorIndSizeof, CsrWifiRouterCtrlBlockAckErrorIndSer, CsrWifiRouterCtrlBlockAckErrorIndDes, CsrWifiRouterCtrlBlockAckErrorIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_STA_INACTIVE_IND, CsrWifiRouterCtrlStaInactiveIndSizeof, CsrWifiRouterCtrlStaInactiveIndSer, CsrWifiRouterCtrlStaInactiveIndDes, CsrWifiRouterCtrlStaInactiveIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_WAPI_RX_MIC_CHECK_IND, CsrWifiRouterCtrlWapiRxMicCheckIndSizeof, CsrWifiRouterCtrlWapiRxMicCheckIndSer, CsrWifiRouterCtrlWapiRxMicCheckIndDes, CsrWifiRouterCtrlWapiRxMicCheckIndSerFree },
- { CSR_WIFI_ROUTER_CTRL_MODE_SET_CFM, CsrWifiRouterCtrlModeSetCfmSizeof, CsrWifiRouterCtrlModeSetCfmSer, CsrWifiRouterCtrlModeSetCfmDes, CsrWifiRouterCtrlModeSetCfmSerFree },
- { CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_TX_ENCRYPT_IND, CsrWifiRouterCtrlWapiUnicastTxEncryptIndSizeof, CsrWifiRouterCtrlWapiUnicastTxEncryptIndSer, CsrWifiRouterCtrlWapiUnicastTxEncryptIndDes, CsrWifiRouterCtrlWapiUnicastTxEncryptIndSerFree },
-
- { 0, NULL, NULL, NULL, NULL },
-};
-
-CsrMsgConvMsgEntry* CsrWifiRouterCtrlConverterLookup(CsrMsgConvMsgEntry *ce, u16 msgType)
-{
- if (msgType & CSR_PRIM_UPSTREAM)
- {
- u16 idx = (msgType & ~CSR_PRIM_UPSTREAM) + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_COUNT;
- if (idx < (CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_COUNT + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_COUNT) &&
- csrwifirouterctrl_conv_lut[idx].msgType == msgType)
- {
- return &csrwifirouterctrl_conv_lut[idx];
- }
- }
- else
- {
- if (msgType < CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_COUNT &&
- csrwifirouterctrl_conv_lut[msgType].msgType == msgType)
- {
- return &csrwifirouterctrl_conv_lut[msgType];
- }
- }
- return NULL;
-}
-
-
-void CsrWifiRouterCtrlConverterInit(void)
-{
- CsrMsgConvInsert(CSR_WIFI_ROUTER_CTRL_PRIM, csrwifirouterctrl_conv_lut);
- CsrMsgConvCustomLookupRegister(CSR_WIFI_ROUTER_CTRL_PRIM, CsrWifiRouterCtrlConverterLookup);
-}
-
-
-#ifdef CSR_LOG_ENABLE
-static const CsrLogPrimitiveInformation csrwifirouterctrl_conv_info = {
- CSR_WIFI_ROUTER_CTRL_PRIM,
- (char *)"CSR_WIFI_ROUTER_CTRL_PRIM",
- csrwifirouterctrl_conv_lut
-};
-const CsrLogPrimitiveInformation* CsrWifiRouterCtrlTechInfoGet(void)
-{
- return &csrwifirouterctrl_conv_info;
-}
-
-
-#endif /* CSR_LOG_ENABLE */
-#endif /* EXCLUDE_CSR_WIFI_ROUTER_CTRL_MODULE */
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_converter_init.h b/drivers/staging/csr/csr_wifi_router_ctrl_converter_init.h
deleted file mode 100644
index c984589..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_converter_init.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_ROUTER_CTRL_CONVERTER_INIT_H__
-#define CSR_WIFI_ROUTER_CTRL_CONVERTER_INIT_H__
-
-#ifndef EXCLUDE_CSR_WIFI_ROUTER_CTRL_MODULE
-
-#include "csr_msgconv.h"
-
-#ifdef CSR_LOG_ENABLE
-#include "csr_log.h"
-
-extern const CsrLogPrimitiveInformation* CsrWifiRouterCtrlTechInfoGet(void);
-#endif /* CSR_LOG_ENABLE */
-
-extern void CsrWifiRouterCtrlConverterInit(void);
-
-#else /* EXCLUDE_CSR_WIFI_ROUTER_CTRL_MODULE */
-
-#define CsrWifiRouterCtrlConverterInit()
-
-#endif /* EXCLUDE_CSR_WIFI_ROUTER_CTRL_MODULE */
-
-#endif /* CSR_WIFI_ROUTER_CTRL_CONVERTER_INIT_H__ */
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_free_downstream_contents.c b/drivers/staging/csr/csr_wifi_router_ctrl_free_downstream_contents.c
deleted file mode 100644
index 7fa85fb..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_free_downstream_contents.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/slab.h>
-#include "csr_wifi_router_ctrl_prim.h"
-#include "csr_wifi_router_ctrl_lib.h"
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrWifiRouterCtrlFreeDownstreamMessageContents
- *
- * DESCRIPTION
- *
- *
- * PARAMETERS
- * eventClass: only the value CSR_WIFI_ROUTER_CTRL_PRIM will be handled
- * message: the message to free
- *----------------------------------------------------------------------------*/
-void CsrWifiRouterCtrlFreeDownstreamMessageContents(u16 eventClass, void *message)
-{
- if (eventClass != CSR_WIFI_ROUTER_CTRL_PRIM)
- {
- return;
- }
- if (NULL == message)
- {
- return;
- }
-
- switch (*((CsrWifiRouterCtrlPrim *) message))
- {
- case CSR_WIFI_ROUTER_CTRL_HIP_REQ:
- {
- CsrWifiRouterCtrlHipReq *p = (CsrWifiRouterCtrlHipReq *)message;
- kfree(p->mlmeCommand);
- p->mlmeCommand = NULL;
- kfree(p->dataRef1);
- p->dataRef1 = NULL;
- kfree(p->dataRef2);
- p->dataRef2 = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_MULTICAST_ADDRESS_RES:
- {
- CsrWifiRouterCtrlMulticastAddressRes *p = (CsrWifiRouterCtrlMulticastAddressRes *)message;
- kfree(p->getAddresses);
- p->getAddresses = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_TCLAS_ADD_REQ:
- {
- CsrWifiRouterCtrlTclasAddReq *p = (CsrWifiRouterCtrlTclasAddReq *)message;
- kfree(p->tclas);
- p->tclas = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_TCLAS_DEL_REQ:
- {
- CsrWifiRouterCtrlTclasDelReq *p = (CsrWifiRouterCtrlTclasDelReq *)message;
- kfree(p->tclas);
- p->tclas = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_WIFI_ON_REQ:
- {
- CsrWifiRouterCtrlWifiOnReq *p = (CsrWifiRouterCtrlWifiOnReq *)message;
- kfree(p->data);
- p->data = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_WIFI_ON_RES:
- {
- CsrWifiRouterCtrlWifiOnRes *p = (CsrWifiRouterCtrlWifiOnRes *)message;
- kfree(p->smeVersions.smeBuild);
- p->smeVersions.smeBuild = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_WAPI_RX_PKT_REQ:
- {
- CsrWifiRouterCtrlWapiRxPktReq *p = (CsrWifiRouterCtrlWapiRxPktReq *)message;
- kfree(p->signal);
- p->signal = NULL;
- kfree(p->data);
- p->data = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_TX_PKT_REQ:
- {
- CsrWifiRouterCtrlWapiUnicastTxPktReq *p = (CsrWifiRouterCtrlWapiUnicastTxPktReq *)message;
- kfree(p->data);
- p->data = NULL;
- break;
- }
-
- default:
- break;
- }
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_free_upstream_contents.c b/drivers/staging/csr/csr_wifi_router_ctrl_free_upstream_contents.c
deleted file mode 100644
index 954b3de..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_free_upstream_contents.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/slab.h>
-#include "csr_wifi_router_ctrl_prim.h"
-#include "csr_wifi_router_ctrl_lib.h"
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrWifiRouterCtrlFreeUpstreamMessageContents
- *
- * DESCRIPTION
- *
- *
- * PARAMETERS
- * eventClass: only the value CSR_WIFI_ROUTER_CTRL_PRIM will be handled
- * message: the message to free
- *----------------------------------------------------------------------------*/
-void CsrWifiRouterCtrlFreeUpstreamMessageContents(u16 eventClass, void *message)
-{
- if (eventClass != CSR_WIFI_ROUTER_CTRL_PRIM)
- {
- return;
- }
- if (NULL == message)
- {
- return;
- }
-
- switch (*((CsrWifiRouterCtrlPrim *) message))
- {
- case CSR_WIFI_ROUTER_CTRL_HIP_IND:
- {
- CsrWifiRouterCtrlHipInd *p = (CsrWifiRouterCtrlHipInd *)message;
- kfree(p->mlmeCommand);
- p->mlmeCommand = NULL;
- kfree(p->dataRef1);
- p->dataRef1 = NULL;
- kfree(p->dataRef2);
- p->dataRef2 = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_MULTICAST_ADDRESS_IND:
- {
- CsrWifiRouterCtrlMulticastAddressInd *p = (CsrWifiRouterCtrlMulticastAddressInd *)message;
- kfree(p->setAddresses);
- p->setAddresses = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_WIFI_ON_IND:
- {
- CsrWifiRouterCtrlWifiOnInd *p = (CsrWifiRouterCtrlWifiOnInd *)message;
- kfree(p->versions.routerBuild);
- p->versions.routerBuild = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_WAPI_RX_MIC_CHECK_IND:
- {
- CsrWifiRouterCtrlWapiRxMicCheckInd *p = (CsrWifiRouterCtrlWapiRxMicCheckInd *)message;
- kfree(p->signal);
- p->signal = NULL;
- kfree(p->data);
- p->data = NULL;
- break;
- }
- case CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_TX_ENCRYPT_IND:
- {
- CsrWifiRouterCtrlWapiUnicastTxEncryptInd *p = (CsrWifiRouterCtrlWapiUnicastTxEncryptInd *)message;
- kfree(p->data);
- p->data = NULL;
- break;
- }
-
- default:
- break;
- }
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_lib.h b/drivers/staging/csr/csr_wifi_router_ctrl_lib.h
deleted file mode 100644
index f235153..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_lib.h
+++ /dev/null
@@ -1,2082 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_ROUTER_CTRL_LIB_H__
-#define CSR_WIFI_ROUTER_CTRL_LIB_H__
-
-#include "csr_sched.h"
-#include "csr_macro.h"
-#include "csr_msg_transport.h"
-
-#include "csr_wifi_lib.h"
-
-#include "csr_wifi_router_ctrl_prim.h"
-#include "csr_wifi_router_task.h"
-
-/*----------------------------------------------------------------------------*
- * CsrWifiRouterCtrlFreeUpstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_ROUTER_CTRL upstream message. Does not
- * free the message itself, and can only be used for upstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_ROUTER_CTRL upstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiRouterCtrlFreeUpstreamMessageContents(u16 eventClass, void *message);
-
-/*----------------------------------------------------------------------------*
- * CsrWifiRouterCtrlFreeDownstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_ROUTER_CTRL downstream message. Does not
- * free the message itself, and can only be used for downstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_ROUTER_CTRL downstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiRouterCtrlFreeDownstreamMessageContents(u16 eventClass, void *message);
-
-/*----------------------------------------------------------------------------*
- * Enum to string functions
- *----------------------------------------------------------------------------*/
-const char* CsrWifiRouterCtrlBlockAckRoleToString(CsrWifiRouterCtrlBlockAckRole value);
-const char* CsrWifiRouterCtrlControlIndicationToString(CsrWifiRouterCtrlControlIndication value);
-const char* CsrWifiRouterCtrlListActionToString(CsrWifiRouterCtrlListAction value);
-const char* CsrWifiRouterCtrlLowPowerModeToString(CsrWifiRouterCtrlLowPowerMode value);
-const char* CsrWifiRouterCtrlMediaStatusToString(CsrWifiRouterCtrlMediaStatus value);
-const char* CsrWifiRouterCtrlModeToString(CsrWifiRouterCtrlMode value);
-const char* CsrWifiRouterCtrlPeerStatusToString(CsrWifiRouterCtrlPeerStatus value);
-const char* CsrWifiRouterCtrlPortActionToString(CsrWifiRouterCtrlPortAction value);
-const char* CsrWifiRouterCtrlPowersaveTypeToString(CsrWifiRouterCtrlPowersaveType value);
-const char* CsrWifiRouterCtrlProtocolDirectionToString(CsrWifiRouterCtrlProtocolDirection value);
-const char* CsrWifiRouterCtrlQoSControlToString(CsrWifiRouterCtrlQoSControl value);
-const char* CsrWifiRouterCtrlQueueConfigToString(CsrWifiRouterCtrlQueueConfig value);
-const char* CsrWifiRouterCtrlTrafficConfigTypeToString(CsrWifiRouterCtrlTrafficConfigType value);
-const char* CsrWifiRouterCtrlTrafficPacketTypeToString(CsrWifiRouterCtrlTrafficPacketType value);
-const char* CsrWifiRouterCtrlTrafficTypeToString(CsrWifiRouterCtrlTrafficType value);
-
-
-/*----------------------------------------------------------------------------*
- * CsrPrim Type toString function.
- * Converts a message type to the String name of the Message
- *----------------------------------------------------------------------------*/
-const char* CsrWifiRouterCtrlPrimTypeToString(CsrPrim msgType);
-
-/*----------------------------------------------------------------------------*
- * Lookup arrays for PrimType name Strings
- *----------------------------------------------------------------------------*/
-extern const char *CsrWifiRouterCtrlUpstreamPrimNames[CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_COUNT];
-extern const char *CsrWifiRouterCtrlDownstreamPrimNames[CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_COUNT];
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckDisableReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- macAddress -
- trafficStreamID -
- role -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlBlockAckDisableReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckDisableReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_DISABLE_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->macAddress = (macAddress__); \
- msg__->trafficStreamID = (trafficStreamID__); \
- msg__->role = (role__);
-
-#define CsrWifiRouterCtrlBlockAckDisableReqSendTo(dst__, src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__) \
- { \
- CsrWifiRouterCtrlBlockAckDisableReq *msg__; \
- CsrWifiRouterCtrlBlockAckDisableReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlBlockAckDisableReqSend(src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__) \
- CsrWifiRouterCtrlBlockAckDisableReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckDisableCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlBlockAckDisableCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckDisableCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_DISABLE_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlBlockAckDisableCfmSendTo(dst__, src__, clientData__, interfaceTag__, status__) \
- { \
- CsrWifiRouterCtrlBlockAckDisableCfm *msg__; \
- CsrWifiRouterCtrlBlockAckDisableCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlBlockAckDisableCfmSend(dst__, clientData__, interfaceTag__, status__) \
- CsrWifiRouterCtrlBlockAckDisableCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckEnableReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- macAddress -
- trafficStreamID -
- role -
- bufferSize -
- timeout -
- ssn -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlBlockAckEnableReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__, bufferSize__, timeout__, ssn__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckEnableReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ENABLE_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->macAddress = (macAddress__); \
- msg__->trafficStreamID = (trafficStreamID__); \
- msg__->role = (role__); \
- msg__->bufferSize = (bufferSize__); \
- msg__->timeout = (timeout__); \
- msg__->ssn = (ssn__);
-
-#define CsrWifiRouterCtrlBlockAckEnableReqSendTo(dst__, src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__, bufferSize__, timeout__, ssn__) \
- { \
- CsrWifiRouterCtrlBlockAckEnableReq *msg__; \
- CsrWifiRouterCtrlBlockAckEnableReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__, bufferSize__, timeout__, ssn__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlBlockAckEnableReqSend(src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__, bufferSize__, timeout__, ssn__) \
- CsrWifiRouterCtrlBlockAckEnableReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, macAddress__, trafficStreamID__, role__, bufferSize__, timeout__, ssn__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckEnableCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlBlockAckEnableCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckEnableCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ENABLE_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlBlockAckEnableCfmSendTo(dst__, src__, clientData__, interfaceTag__, status__) \
- { \
- CsrWifiRouterCtrlBlockAckEnableCfm *msg__; \
- CsrWifiRouterCtrlBlockAckEnableCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlBlockAckEnableCfmSend(dst__, clientData__, interfaceTag__, status__) \
- CsrWifiRouterCtrlBlockAckEnableCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckErrorIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- trafficStreamID -
- peerMacAddress -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlBlockAckErrorIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, trafficStreamID__, peerMacAddress__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckErrorInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ERROR_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->trafficStreamID = (trafficStreamID__); \
- msg__->peerMacAddress = (peerMacAddress__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlBlockAckErrorIndSendTo(dst__, src__, clientData__, interfaceTag__, trafficStreamID__, peerMacAddress__, status__) \
- { \
- CsrWifiRouterCtrlBlockAckErrorInd *msg__; \
- CsrWifiRouterCtrlBlockAckErrorIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, trafficStreamID__, peerMacAddress__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlBlockAckErrorIndSend(dst__, clientData__, interfaceTag__, trafficStreamID__, peerMacAddress__, status__) \
- CsrWifiRouterCtrlBlockAckErrorIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, trafficStreamID__, peerMacAddress__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlCapabilitiesReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- clientData -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlCapabilitiesReqCreate(msg__, dst__, src__, clientData__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlCapabilitiesReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_CAPABILITIES_REQ, dst__, src__); \
- msg__->clientData = (clientData__);
-
-#define CsrWifiRouterCtrlCapabilitiesReqSendTo(dst__, src__, clientData__) \
- { \
- CsrWifiRouterCtrlCapabilitiesReq *msg__; \
- CsrWifiRouterCtrlCapabilitiesReqCreate(msg__, dst__, src__, clientData__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlCapabilitiesReqSend(src__, clientData__) \
- CsrWifiRouterCtrlCapabilitiesReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlCapabilitiesCfmSend
-
- DESCRIPTION
- The router sends this primitive to confirm the size of the queues of the
- HIP.
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- commandQueueSize - Size of command queue
- trafficQueueSize - Size of traffic queue (per AC)
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlCapabilitiesCfmCreate(msg__, dst__, src__, clientData__, commandQueueSize__, trafficQueueSize__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlCapabilitiesCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_CAPABILITIES_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->commandQueueSize = (commandQueueSize__); \
- msg__->trafficQueueSize = (trafficQueueSize__);
-
-#define CsrWifiRouterCtrlCapabilitiesCfmSendTo(dst__, src__, clientData__, commandQueueSize__, trafficQueueSize__) \
- { \
- CsrWifiRouterCtrlCapabilitiesCfm *msg__; \
- CsrWifiRouterCtrlCapabilitiesCfmCreate(msg__, dst__, src__, clientData__, commandQueueSize__, trafficQueueSize__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlCapabilitiesCfmSend(dst__, clientData__, commandQueueSize__, trafficQueueSize__) \
- CsrWifiRouterCtrlCapabilitiesCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, commandQueueSize__, trafficQueueSize__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlConfigurePowerModeReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- clientData -
- mode -
- wakeHost -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlConfigurePowerModeReqCreate(msg__, dst__, src__, clientData__, mode__, wakeHost__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlConfigurePowerModeReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_CONFIGURE_POWER_MODE_REQ, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->mode = (mode__); \
- msg__->wakeHost = (wakeHost__);
-
-#define CsrWifiRouterCtrlConfigurePowerModeReqSendTo(dst__, src__, clientData__, mode__, wakeHost__) \
- { \
- CsrWifiRouterCtrlConfigurePowerModeReq *msg__; \
- CsrWifiRouterCtrlConfigurePowerModeReqCreate(msg__, dst__, src__, clientData__, mode__, wakeHost__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlConfigurePowerModeReqSend(src__, clientData__, mode__, wakeHost__) \
- CsrWifiRouterCtrlConfigurePowerModeReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__, mode__, wakeHost__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlConnectedIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- peerMacAddress -
- peerStatus -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlConnectedIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__, peerStatus__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlConnectedInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_CONNECTED_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->peerMacAddress = (peerMacAddress__); \
- msg__->peerStatus = (peerStatus__);
-
-#define CsrWifiRouterCtrlConnectedIndSendTo(dst__, src__, clientData__, interfaceTag__, peerMacAddress__, peerStatus__) \
- { \
- CsrWifiRouterCtrlConnectedInd *msg__; \
- CsrWifiRouterCtrlConnectedIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__, peerStatus__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlConnectedIndSend(dst__, clientData__, interfaceTag__, peerMacAddress__, peerStatus__) \
- CsrWifiRouterCtrlConnectedIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, peerMacAddress__, peerStatus__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlHipReqSend
-
- DESCRIPTION
- This primitive is used for transferring MLME messages to the HIP.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- mlmeCommandLength - Length of the MLME signal
- mlmeCommand - Pointer to the MLME signal
- dataRef1Length - Length of the dataRef1 bulk data
- dataRef1 - Pointer to the bulk data 1
- dataRef2Length - Length of the dataRef2 bulk data
- dataRef2 - Pointer to the bulk data 2
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlHipReqCreate(msg__, dst__, src__, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlHipReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_HIP_REQ, dst__, src__); \
- msg__->mlmeCommandLength = (mlmeCommandLength__); \
- msg__->mlmeCommand = (mlmeCommand__); \
- msg__->dataRef1Length = (dataRef1Length__); \
- msg__->dataRef1 = (dataRef1__); \
- msg__->dataRef2Length = (dataRef2Length__); \
- msg__->dataRef2 = (dataRef2__);
-
-#define CsrWifiRouterCtrlHipReqSendTo(dst__, src__, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__) \
- { \
- CsrWifiRouterCtrlHipReq *msg__; \
- CsrWifiRouterCtrlHipReqCreate(msg__, dst__, src__, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlHipReqSend(src__, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__) \
- CsrWifiRouterCtrlHipReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlHipIndSend
-
- DESCRIPTION
- This primitive is used for transferring MLME messages from the HIP.
-
- PARAMETERS
- queue - Destination Task Queue
- mlmeCommandLength - Length of the MLME signal
- mlmeCommand - Pointer to the MLME signal
- dataRef1Length - Length of the dataRef1 bulk data
- dataRef1 - Pointer to the bulk data 1
- dataRef2Length - Length of the dataRef2 bulk data
- dataRef2 - Pointer to the bulk data 2
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlHipIndCreate(msg__, dst__, src__, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlHipInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_HIP_IND, dst__, src__); \
- msg__->mlmeCommandLength = (mlmeCommandLength__); \
- msg__->mlmeCommand = (mlmeCommand__); \
- msg__->dataRef1Length = (dataRef1Length__); \
- msg__->dataRef1 = (dataRef1__); \
- msg__->dataRef2Length = (dataRef2Length__); \
- msg__->dataRef2 = (dataRef2__);
-
-#define CsrWifiRouterCtrlHipIndSendTo(dst__, src__, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__) \
- { \
- CsrWifiRouterCtrlHipInd *msg__; \
- CsrWifiRouterCtrlHipIndCreate(msg__, dst__, src__, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlHipIndSend(dst__, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__) \
- CsrWifiRouterCtrlHipIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, mlmeCommandLength__, mlmeCommand__, dataRef1Length__, dataRef1__, dataRef2Length__, dataRef2__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlM4ReadyToSendIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- peerMacAddress -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlM4ReadyToSendIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlM4ReadyToSendInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_M4_READY_TO_SEND_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->peerMacAddress = (peerMacAddress__);
-
-#define CsrWifiRouterCtrlM4ReadyToSendIndSendTo(dst__, src__, clientData__, interfaceTag__, peerMacAddress__) \
- { \
- CsrWifiRouterCtrlM4ReadyToSendInd *msg__; \
- CsrWifiRouterCtrlM4ReadyToSendIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlM4ReadyToSendIndSend(dst__, clientData__, interfaceTag__, peerMacAddress__) \
- CsrWifiRouterCtrlM4ReadyToSendIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, peerMacAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlM4TransmitReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlM4TransmitReqCreate(msg__, dst__, src__, interfaceTag__, clientData__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlM4TransmitReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_M4_TRANSMIT_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__);
-
-#define CsrWifiRouterCtrlM4TransmitReqSendTo(dst__, src__, interfaceTag__, clientData__) \
- { \
- CsrWifiRouterCtrlM4TransmitReq *msg__; \
- CsrWifiRouterCtrlM4TransmitReqCreate(msg__, dst__, src__, interfaceTag__, clientData__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlM4TransmitReqSend(src__, interfaceTag__, clientData__) \
- CsrWifiRouterCtrlM4TransmitReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlM4TransmittedIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- peerMacAddress -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlM4TransmittedIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlM4TransmittedInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_M4_TRANSMITTED_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->peerMacAddress = (peerMacAddress__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlM4TransmittedIndSendTo(dst__, src__, clientData__, interfaceTag__, peerMacAddress__, status__) \
- { \
- CsrWifiRouterCtrlM4TransmittedInd *msg__; \
- CsrWifiRouterCtrlM4TransmittedIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlM4TransmittedIndSend(dst__, clientData__, interfaceTag__, peerMacAddress__, status__) \
- CsrWifiRouterCtrlM4TransmittedIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, peerMacAddress__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMediaStatusReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- mediaStatus -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlMediaStatusReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, mediaStatus__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlMediaStatusReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_MEDIA_STATUS_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->mediaStatus = (mediaStatus__);
-
-#define CsrWifiRouterCtrlMediaStatusReqSendTo(dst__, src__, interfaceTag__, clientData__, mediaStatus__) \
- { \
- CsrWifiRouterCtrlMediaStatusReq *msg__; \
- CsrWifiRouterCtrlMediaStatusReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, mediaStatus__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlMediaStatusReqSend(src__, interfaceTag__, clientData__, mediaStatus__) \
- CsrWifiRouterCtrlMediaStatusReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, mediaStatus__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMicFailureIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- peerMacAddress -
- unicastPdu -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlMicFailureIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__, unicastPdu__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlMicFailureInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_MIC_FAILURE_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->peerMacAddress = (peerMacAddress__); \
- msg__->unicastPdu = (unicastPdu__);
-
-#define CsrWifiRouterCtrlMicFailureIndSendTo(dst__, src__, clientData__, interfaceTag__, peerMacAddress__, unicastPdu__) \
- { \
- CsrWifiRouterCtrlMicFailureInd *msg__; \
- CsrWifiRouterCtrlMicFailureIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__, unicastPdu__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlMicFailureIndSend(dst__, clientData__, interfaceTag__, peerMacAddress__, unicastPdu__) \
- CsrWifiRouterCtrlMicFailureIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, peerMacAddress__, unicastPdu__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlModeSetReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- mode -
- bssid - BSSID of the network the device is going to be a part
- of
- protection - Set to TRUE if encryption is enabled for the
- connection/broadcast frames
- intraBssDistEnabled - If set to TRUE, intra BSS destribution will be
- enabled. If set to FALSE, any unicast PDU which does
- not have the RA as the the local MAC address, shall be
- ignored. This field is interpreted by the receive if
- mode is set to CSR_WIFI_ROUTER_CTRL_MODE_P2PGO
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlModeSetReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, mode__, bssid__, protection__, intraBssDistEnabled__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlModeSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_MODE_SET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->mode = (mode__); \
- msg__->bssid = (bssid__); \
- msg__->protection = (protection__); \
- msg__->intraBssDistEnabled = (intraBssDistEnabled__);
-
-#define CsrWifiRouterCtrlModeSetReqSendTo(dst__, src__, interfaceTag__, clientData__, mode__, bssid__, protection__, intraBssDistEnabled__) \
- { \
- CsrWifiRouterCtrlModeSetReq *msg__; \
- CsrWifiRouterCtrlModeSetReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, mode__, bssid__, protection__, intraBssDistEnabled__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlModeSetReqSend(src__, interfaceTag__, clientData__, mode__, bssid__, protection__, intraBssDistEnabled__) \
- CsrWifiRouterCtrlModeSetReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, mode__, bssid__, protection__, intraBssDistEnabled__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlModeSetCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- mode -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlModeSetCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, mode__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlModeSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_MODE_SET_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->mode = (mode__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlModeSetCfmSendTo(dst__, src__, clientData__, interfaceTag__, mode__, status__) \
- { \
- CsrWifiRouterCtrlModeSetCfm *msg__; \
- CsrWifiRouterCtrlModeSetCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, mode__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlModeSetCfmSend(dst__, clientData__, interfaceTag__, mode__, status__) \
- CsrWifiRouterCtrlModeSetCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, mode__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMulticastAddressIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- action -
- setAddressesCount -
- setAddresses -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlMulticastAddressIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, action__, setAddressesCount__, setAddresses__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlMulticastAddressInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_MULTICAST_ADDRESS_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->action = (action__); \
- msg__->setAddressesCount = (setAddressesCount__); \
- msg__->setAddresses = (setAddresses__);
-
-#define CsrWifiRouterCtrlMulticastAddressIndSendTo(dst__, src__, clientData__, interfaceTag__, action__, setAddressesCount__, setAddresses__) \
- { \
- CsrWifiRouterCtrlMulticastAddressInd *msg__; \
- CsrWifiRouterCtrlMulticastAddressIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, action__, setAddressesCount__, setAddresses__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlMulticastAddressIndSend(dst__, clientData__, interfaceTag__, action__, setAddressesCount__, setAddresses__) \
- CsrWifiRouterCtrlMulticastAddressIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, action__, setAddressesCount__, setAddresses__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMulticastAddressResSend
-
- DESCRIPTION
-
- PARAMETERS
- interfaceTag -
- clientData -
- status -
- action -
- getAddressesCount -
- getAddresses -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlMulticastAddressResCreate(msg__, dst__, src__, interfaceTag__, clientData__, status__, action__, getAddressesCount__, getAddresses__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlMulticastAddressRes), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_MULTICAST_ADDRESS_RES, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->status = (status__); \
- msg__->action = (action__); \
- msg__->getAddressesCount = (getAddressesCount__); \
- msg__->getAddresses = (getAddresses__);
-
-#define CsrWifiRouterCtrlMulticastAddressResSendTo(dst__, src__, interfaceTag__, clientData__, status__, action__, getAddressesCount__, getAddresses__) \
- { \
- CsrWifiRouterCtrlMulticastAddressRes *msg__; \
- CsrWifiRouterCtrlMulticastAddressResCreate(msg__, dst__, src__, interfaceTag__, clientData__, status__, action__, getAddressesCount__, getAddresses__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlMulticastAddressResSend(src__, interfaceTag__, clientData__, status__, action__, getAddressesCount__, getAddresses__) \
- CsrWifiRouterCtrlMulticastAddressResSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, status__, action__, getAddressesCount__, getAddresses__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerAddReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- peerMacAddress -
- associationId -
- staInfo -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlPeerAddReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, peerMacAddress__, associationId__, staInfo__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlPeerAddReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_PEER_ADD_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->peerMacAddress = (peerMacAddress__); \
- msg__->associationId = (associationId__); \
- msg__->staInfo = (staInfo__);
-
-#define CsrWifiRouterCtrlPeerAddReqSendTo(dst__, src__, interfaceTag__, clientData__, peerMacAddress__, associationId__, staInfo__) \
- { \
- CsrWifiRouterCtrlPeerAddReq *msg__; \
- CsrWifiRouterCtrlPeerAddReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, peerMacAddress__, associationId__, staInfo__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlPeerAddReqSend(src__, interfaceTag__, clientData__, peerMacAddress__, associationId__, staInfo__) \
- CsrWifiRouterCtrlPeerAddReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, peerMacAddress__, associationId__, staInfo__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerAddCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- peerMacAddress -
- peerRecordHandle -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlPeerAddCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__, peerRecordHandle__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlPeerAddCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_PEER_ADD_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->peerMacAddress = (peerMacAddress__); \
- msg__->peerRecordHandle = (peerRecordHandle__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlPeerAddCfmSendTo(dst__, src__, clientData__, interfaceTag__, peerMacAddress__, peerRecordHandle__, status__) \
- { \
- CsrWifiRouterCtrlPeerAddCfm *msg__; \
- CsrWifiRouterCtrlPeerAddCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__, peerRecordHandle__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlPeerAddCfmSend(dst__, clientData__, interfaceTag__, peerMacAddress__, peerRecordHandle__, status__) \
- CsrWifiRouterCtrlPeerAddCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, peerMacAddress__, peerRecordHandle__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerDelReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- peerRecordHandle -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlPeerDelReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, peerRecordHandle__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlPeerDelReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_PEER_DEL_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->peerRecordHandle = (peerRecordHandle__);
-
-#define CsrWifiRouterCtrlPeerDelReqSendTo(dst__, src__, interfaceTag__, clientData__, peerRecordHandle__) \
- { \
- CsrWifiRouterCtrlPeerDelReq *msg__; \
- CsrWifiRouterCtrlPeerDelReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, peerRecordHandle__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlPeerDelReqSend(src__, interfaceTag__, clientData__, peerRecordHandle__) \
- CsrWifiRouterCtrlPeerDelReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, peerRecordHandle__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerDelCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlPeerDelCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlPeerDelCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_PEER_DEL_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlPeerDelCfmSendTo(dst__, src__, clientData__, interfaceTag__, status__) \
- { \
- CsrWifiRouterCtrlPeerDelCfm *msg__; \
- CsrWifiRouterCtrlPeerDelCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlPeerDelCfmSend(dst__, clientData__, interfaceTag__, status__) \
- CsrWifiRouterCtrlPeerDelCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerUpdateReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- peerRecordHandle -
- powersaveMode -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlPeerUpdateReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, peerRecordHandle__, powersaveMode__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlPeerUpdateReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_PEER_UPDATE_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->peerRecordHandle = (peerRecordHandle__); \
- msg__->powersaveMode = (powersaveMode__);
-
-#define CsrWifiRouterCtrlPeerUpdateReqSendTo(dst__, src__, interfaceTag__, clientData__, peerRecordHandle__, powersaveMode__) \
- { \
- CsrWifiRouterCtrlPeerUpdateReq *msg__; \
- CsrWifiRouterCtrlPeerUpdateReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, peerRecordHandle__, powersaveMode__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlPeerUpdateReqSend(src__, interfaceTag__, clientData__, peerRecordHandle__, powersaveMode__) \
- CsrWifiRouterCtrlPeerUpdateReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, peerRecordHandle__, powersaveMode__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerUpdateCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlPeerUpdateCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlPeerUpdateCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_PEER_UPDATE_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlPeerUpdateCfmSendTo(dst__, src__, clientData__, interfaceTag__, status__) \
- { \
- CsrWifiRouterCtrlPeerUpdateCfm *msg__; \
- CsrWifiRouterCtrlPeerUpdateCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlPeerUpdateCfmSend(dst__, clientData__, interfaceTag__, status__) \
- CsrWifiRouterCtrlPeerUpdateCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPortConfigureReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- uncontrolledPortAction -
- controlledPortAction -
- macAddress -
- setProtection -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlPortConfigureReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, uncontrolledPortAction__, controlledPortAction__, macAddress__, setProtection__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlPortConfigureReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_PORT_CONFIGURE_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->uncontrolledPortAction = (uncontrolledPortAction__); \
- msg__->controlledPortAction = (controlledPortAction__); \
- msg__->macAddress = (macAddress__); \
- msg__->setProtection = (setProtection__);
-
-#define CsrWifiRouterCtrlPortConfigureReqSendTo(dst__, src__, interfaceTag__, clientData__, uncontrolledPortAction__, controlledPortAction__, macAddress__, setProtection__) \
- { \
- CsrWifiRouterCtrlPortConfigureReq *msg__; \
- CsrWifiRouterCtrlPortConfigureReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, uncontrolledPortAction__, controlledPortAction__, macAddress__, setProtection__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlPortConfigureReqSend(src__, interfaceTag__, clientData__, uncontrolledPortAction__, controlledPortAction__, macAddress__, setProtection__) \
- CsrWifiRouterCtrlPortConfigureReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, uncontrolledPortAction__, controlledPortAction__, macAddress__, setProtection__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPortConfigureCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- status -
- macAddress -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlPortConfigureCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__, macAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlPortConfigureCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_PORT_CONFIGURE_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->macAddress = (macAddress__);
-
-#define CsrWifiRouterCtrlPortConfigureCfmSendTo(dst__, src__, clientData__, interfaceTag__, status__, macAddress__) \
- { \
- CsrWifiRouterCtrlPortConfigureCfm *msg__; \
- CsrWifiRouterCtrlPortConfigureCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__, macAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlPortConfigureCfmSend(dst__, clientData__, interfaceTag__, status__, macAddress__) \
- CsrWifiRouterCtrlPortConfigureCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, status__, macAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlQosControlReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- control -
- queueConfig -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlQosControlReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, control__, queueConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlQosControlReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->control = (control__); \
- msg__->queueConfig = (queueConfig__);
-
-#define CsrWifiRouterCtrlQosControlReqSendTo(dst__, src__, interfaceTag__, clientData__, control__, queueConfig__) \
- { \
- CsrWifiRouterCtrlQosControlReq *msg__; \
- CsrWifiRouterCtrlQosControlReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, control__, queueConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlQosControlReqSend(src__, interfaceTag__, clientData__, control__, queueConfig__) \
- CsrWifiRouterCtrlQosControlReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, control__, queueConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlRawSdioDeinitialiseReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- clientData -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlRawSdioDeinitialiseReqCreate(msg__, dst__, src__, clientData__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlRawSdioDeinitialiseReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_RAW_SDIO_DEINITIALISE_REQ, dst__, src__); \
- msg__->clientData = (clientData__);
-
-#define CsrWifiRouterCtrlRawSdioDeinitialiseReqSendTo(dst__, src__, clientData__) \
- { \
- CsrWifiRouterCtrlRawSdioDeinitialiseReq *msg__; \
- CsrWifiRouterCtrlRawSdioDeinitialiseReqCreate(msg__, dst__, src__, clientData__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlRawSdioDeinitialiseReqSend(src__, clientData__) \
- CsrWifiRouterCtrlRawSdioDeinitialiseReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlRawSdioDeinitialiseCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- result -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlRawSdioDeinitialiseCfmCreate(msg__, dst__, src__, clientData__, result__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlRawSdioDeinitialiseCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_RAW_SDIO_DEINITIALISE_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->result = (result__);
-
-#define CsrWifiRouterCtrlRawSdioDeinitialiseCfmSendTo(dst__, src__, clientData__, result__) \
- { \
- CsrWifiRouterCtrlRawSdioDeinitialiseCfm *msg__; \
- CsrWifiRouterCtrlRawSdioDeinitialiseCfmCreate(msg__, dst__, src__, clientData__, result__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlRawSdioDeinitialiseCfmSend(dst__, clientData__, result__) \
- CsrWifiRouterCtrlRawSdioDeinitialiseCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, result__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlRawSdioInitialiseReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- clientData -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlRawSdioInitialiseReqCreate(msg__, dst__, src__, clientData__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlRawSdioInitialiseReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_RAW_SDIO_INITIALISE_REQ, dst__, src__); \
- msg__->clientData = (clientData__);
-
-#define CsrWifiRouterCtrlRawSdioInitialiseReqSendTo(dst__, src__, clientData__) \
- { \
- CsrWifiRouterCtrlRawSdioInitialiseReq *msg__; \
- CsrWifiRouterCtrlRawSdioInitialiseReqCreate(msg__, dst__, src__, clientData__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlRawSdioInitialiseReqSend(src__, clientData__) \
- CsrWifiRouterCtrlRawSdioInitialiseReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlRawSdioInitialiseCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- result -
- byteRead -
- byteWrite -
- firmwareDownload -
- reset -
- coreDumpPrepare -
- byteBlockRead -
- gpRead16 -
- gpWrite16 -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlRawSdioInitialiseCfmCreate(msg__, dst__, src__, clientData__, result__, byteRead__, byteWrite__, firmwareDownload__, reset__, coreDumpPrepare__, byteBlockRead__, gpRead16__, gpWrite16__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlRawSdioInitialiseCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_RAW_SDIO_INITIALISE_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->result = (result__); \
- msg__->byteRead = (byteRead__); \
- msg__->byteWrite = (byteWrite__); \
- msg__->firmwareDownload = (firmwareDownload__); \
- msg__->reset = (reset__); \
- msg__->coreDumpPrepare = (coreDumpPrepare__); \
- msg__->byteBlockRead = (byteBlockRead__); \
- msg__->gpRead16 = (gpRead16__); \
- msg__->gpWrite16 = (gpWrite16__);
-
-#define CsrWifiRouterCtrlRawSdioInitialiseCfmSendTo(dst__, src__, clientData__, result__, byteRead__, byteWrite__, firmwareDownload__, reset__, coreDumpPrepare__, byteBlockRead__, gpRead16__, gpWrite16__) \
- { \
- CsrWifiRouterCtrlRawSdioInitialiseCfm *msg__; \
- CsrWifiRouterCtrlRawSdioInitialiseCfmCreate(msg__, dst__, src__, clientData__, result__, byteRead__, byteWrite__, firmwareDownload__, reset__, coreDumpPrepare__, byteBlockRead__, gpRead16__, gpWrite16__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlRawSdioInitialiseCfmSend(dst__, clientData__, result__, byteRead__, byteWrite__, firmwareDownload__, reset__, coreDumpPrepare__, byteBlockRead__, gpRead16__, gpWrite16__) \
- CsrWifiRouterCtrlRawSdioInitialiseCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, result__, byteRead__, byteWrite__, firmwareDownload__, reset__, coreDumpPrepare__, byteBlockRead__, gpRead16__, gpWrite16__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlResumeIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- powerMaintained -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlResumeIndCreate(msg__, dst__, src__, clientData__, powerMaintained__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlResumeInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_RESUME_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->powerMaintained = (powerMaintained__);
-
-#define CsrWifiRouterCtrlResumeIndSendTo(dst__, src__, clientData__, powerMaintained__) \
- { \
- CsrWifiRouterCtrlResumeInd *msg__; \
- CsrWifiRouterCtrlResumeIndCreate(msg__, dst__, src__, clientData__, powerMaintained__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlResumeIndSend(dst__, clientData__, powerMaintained__) \
- CsrWifiRouterCtrlResumeIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, powerMaintained__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlResumeResSend
-
- DESCRIPTION
-
- PARAMETERS
- clientData -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlResumeResCreate(msg__, dst__, src__, clientData__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlResumeRes), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_RESUME_RES, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlResumeResSendTo(dst__, src__, clientData__, status__) \
- { \
- CsrWifiRouterCtrlResumeRes *msg__; \
- CsrWifiRouterCtrlResumeResCreate(msg__, dst__, src__, clientData__, status__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlResumeResSend(src__, clientData__, status__) \
- CsrWifiRouterCtrlResumeResSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlStaInactiveIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- staAddress -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlStaInactiveIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, staAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlStaInactiveInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_STA_INACTIVE_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->staAddress = (staAddress__);
-
-#define CsrWifiRouterCtrlStaInactiveIndSendTo(dst__, src__, clientData__, interfaceTag__, staAddress__) \
- { \
- CsrWifiRouterCtrlStaInactiveInd *msg__; \
- CsrWifiRouterCtrlStaInactiveIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, staAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlStaInactiveIndSend(dst__, clientData__, interfaceTag__, staAddress__) \
- CsrWifiRouterCtrlStaInactiveIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, staAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlSuspendIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- hardSuspend -
- d3Suspend -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlSuspendIndCreate(msg__, dst__, src__, clientData__, hardSuspend__, d3Suspend__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlSuspendInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_SUSPEND_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->hardSuspend = (hardSuspend__); \
- msg__->d3Suspend = (d3Suspend__);
-
-#define CsrWifiRouterCtrlSuspendIndSendTo(dst__, src__, clientData__, hardSuspend__, d3Suspend__) \
- { \
- CsrWifiRouterCtrlSuspendInd *msg__; \
- CsrWifiRouterCtrlSuspendIndCreate(msg__, dst__, src__, clientData__, hardSuspend__, d3Suspend__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlSuspendIndSend(dst__, clientData__, hardSuspend__, d3Suspend__) \
- CsrWifiRouterCtrlSuspendIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, hardSuspend__, d3Suspend__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlSuspendResSend
-
- DESCRIPTION
-
- PARAMETERS
- clientData -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlSuspendResCreate(msg__, dst__, src__, clientData__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlSuspendRes), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_SUSPEND_RES, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlSuspendResSendTo(dst__, src__, clientData__, status__) \
- { \
- CsrWifiRouterCtrlSuspendRes *msg__; \
- CsrWifiRouterCtrlSuspendResCreate(msg__, dst__, src__, clientData__, status__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlSuspendResSend(src__, clientData__, status__) \
- CsrWifiRouterCtrlSuspendResSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTclasAddReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- tclasLength -
- tclas -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlTclasAddReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, tclasLength__, tclas__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlTclasAddReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_TCLAS_ADD_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->tclasLength = (tclasLength__); \
- msg__->tclas = (tclas__);
-
-#define CsrWifiRouterCtrlTclasAddReqSendTo(dst__, src__, interfaceTag__, clientData__, tclasLength__, tclas__) \
- { \
- CsrWifiRouterCtrlTclasAddReq *msg__; \
- CsrWifiRouterCtrlTclasAddReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, tclasLength__, tclas__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlTclasAddReqSend(src__, interfaceTag__, clientData__, tclasLength__, tclas__) \
- CsrWifiRouterCtrlTclasAddReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, tclasLength__, tclas__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTclasAddCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlTclasAddCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlTclasAddCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_TCLAS_ADD_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlTclasAddCfmSendTo(dst__, src__, clientData__, interfaceTag__, status__) \
- { \
- CsrWifiRouterCtrlTclasAddCfm *msg__; \
- CsrWifiRouterCtrlTclasAddCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlTclasAddCfmSend(dst__, clientData__, interfaceTag__, status__) \
- CsrWifiRouterCtrlTclasAddCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTclasDelReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- tclasLength -
- tclas -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlTclasDelReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, tclasLength__, tclas__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlTclasDelReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_TCLAS_DEL_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->tclasLength = (tclasLength__); \
- msg__->tclas = (tclas__);
-
-#define CsrWifiRouterCtrlTclasDelReqSendTo(dst__, src__, interfaceTag__, clientData__, tclasLength__, tclas__) \
- { \
- CsrWifiRouterCtrlTclasDelReq *msg__; \
- CsrWifiRouterCtrlTclasDelReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, tclasLength__, tclas__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlTclasDelReqSend(src__, interfaceTag__, clientData__, tclasLength__, tclas__) \
- CsrWifiRouterCtrlTclasDelReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, tclasLength__, tclas__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTclasDelCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlTclasDelCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlTclasDelCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_TCLAS_DEL_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlTclasDelCfmSendTo(dst__, src__, clientData__, interfaceTag__, status__) \
- { \
- CsrWifiRouterCtrlTclasDelCfm *msg__; \
- CsrWifiRouterCtrlTclasDelCfmCreate(msg__, dst__, src__, clientData__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlTclasDelCfmSend(dst__, clientData__, interfaceTag__, status__) \
- CsrWifiRouterCtrlTclasDelCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficClassificationReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- trafficType -
- period -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlTrafficClassificationReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, trafficType__, period__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlTrafficClassificationReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_TRAFFIC_CLASSIFICATION_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->trafficType = (trafficType__); \
- msg__->period = (period__);
-
-#define CsrWifiRouterCtrlTrafficClassificationReqSendTo(dst__, src__, interfaceTag__, clientData__, trafficType__, period__) \
- { \
- CsrWifiRouterCtrlTrafficClassificationReq *msg__; \
- CsrWifiRouterCtrlTrafficClassificationReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, trafficType__, period__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlTrafficClassificationReqSend(src__, interfaceTag__, clientData__, trafficType__, period__) \
- CsrWifiRouterCtrlTrafficClassificationReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, trafficType__, period__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficConfigReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- clientData -
- trafficConfigType -
- config -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlTrafficConfigReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, trafficConfigType__, config__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlTrafficConfigReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->clientData = (clientData__); \
- msg__->trafficConfigType = (trafficConfigType__); \
- msg__->config = (config__);
-
-#define CsrWifiRouterCtrlTrafficConfigReqSendTo(dst__, src__, interfaceTag__, clientData__, trafficConfigType__, config__) \
- { \
- CsrWifiRouterCtrlTrafficConfigReq *msg__; \
- CsrWifiRouterCtrlTrafficConfigReqCreate(msg__, dst__, src__, interfaceTag__, clientData__, trafficConfigType__, config__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlTrafficConfigReqSend(src__, interfaceTag__, clientData__, trafficConfigType__, config__) \
- CsrWifiRouterCtrlTrafficConfigReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, clientData__, trafficConfigType__, config__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficProtocolIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- packetType -
- direction -
- srcAddress -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlTrafficProtocolIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, packetType__, direction__, srcAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlTrafficProtocolInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_TRAFFIC_PROTOCOL_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->packetType = (packetType__); \
- msg__->direction = (direction__); \
- msg__->srcAddress = (srcAddress__);
-
-#define CsrWifiRouterCtrlTrafficProtocolIndSendTo(dst__, src__, clientData__, interfaceTag__, packetType__, direction__, srcAddress__) \
- { \
- CsrWifiRouterCtrlTrafficProtocolInd *msg__; \
- CsrWifiRouterCtrlTrafficProtocolIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, packetType__, direction__, srcAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlTrafficProtocolIndSend(dst__, clientData__, interfaceTag__, packetType__, direction__, srcAddress__) \
- CsrWifiRouterCtrlTrafficProtocolIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, packetType__, direction__, srcAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficSampleIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- stats -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlTrafficSampleIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, stats__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlTrafficSampleInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_TRAFFIC_SAMPLE_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->stats = (stats__);
-
-#define CsrWifiRouterCtrlTrafficSampleIndSendTo(dst__, src__, clientData__, interfaceTag__, stats__) \
- { \
- CsrWifiRouterCtrlTrafficSampleInd *msg__; \
- CsrWifiRouterCtrlTrafficSampleIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, stats__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlTrafficSampleIndSend(dst__, clientData__, interfaceTag__, stats__) \
- CsrWifiRouterCtrlTrafficSampleIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, stats__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlUnexpectedFrameIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- peerMacAddress -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlUnexpectedFrameIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlUnexpectedFrameInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_UNEXPECTED_FRAME_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->peerMacAddress = (peerMacAddress__);
-
-#define CsrWifiRouterCtrlUnexpectedFrameIndSendTo(dst__, src__, clientData__, interfaceTag__, peerMacAddress__) \
- { \
- CsrWifiRouterCtrlUnexpectedFrameInd *msg__; \
- CsrWifiRouterCtrlUnexpectedFrameIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, peerMacAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlUnexpectedFrameIndSend(dst__, clientData__, interfaceTag__, peerMacAddress__) \
- CsrWifiRouterCtrlUnexpectedFrameIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, peerMacAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiFilterReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- isWapiConnected -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWapiFilterReqCreate(msg__, dst__, src__, interfaceTag__, isWapiConnected__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWapiFilterReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WAPI_FILTER_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->isWapiConnected = (isWapiConnected__);
-
-#define CsrWifiRouterCtrlWapiFilterReqSendTo(dst__, src__, interfaceTag__, isWapiConnected__) \
- { \
- CsrWifiRouterCtrlWapiFilterReq *msg__; \
- CsrWifiRouterCtrlWapiFilterReqCreate(msg__, dst__, src__, interfaceTag__, isWapiConnected__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWapiFilterReqSend(src__, interfaceTag__, isWapiConnected__) \
- CsrWifiRouterCtrlWapiFilterReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, isWapiConnected__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiMulticastFilterReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWapiMulticastFilterReqCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWapiMulticastFilterReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WAPI_MULTICAST_FILTER_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlWapiMulticastFilterReqSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiRouterCtrlWapiMulticastFilterReq *msg__; \
- CsrWifiRouterCtrlWapiMulticastFilterReqCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWapiMulticastFilterReqSend(src__, interfaceTag__, status__) \
- CsrWifiRouterCtrlWapiMulticastFilterReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiRxMicCheckIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- signalLength -
- signal -
- dataLength -
- data -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWapiRxMicCheckIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, signalLength__, signal__, dataLength__, data__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWapiRxMicCheckInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WAPI_RX_MIC_CHECK_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->signalLength = (signalLength__); \
- msg__->signal = (signal__); \
- msg__->dataLength = (dataLength__); \
- msg__->data = (data__);
-
-#define CsrWifiRouterCtrlWapiRxMicCheckIndSendTo(dst__, src__, clientData__, interfaceTag__, signalLength__, signal__, dataLength__, data__) \
- { \
- CsrWifiRouterCtrlWapiRxMicCheckInd *msg__; \
- CsrWifiRouterCtrlWapiRxMicCheckIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, signalLength__, signal__, dataLength__, data__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWapiRxMicCheckIndSend(dst__, clientData__, interfaceTag__, signalLength__, signal__, dataLength__, data__) \
- CsrWifiRouterCtrlWapiRxMicCheckIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, signalLength__, signal__, dataLength__, data__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiRxPktReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- signalLength -
- signal -
- dataLength -
- data -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWapiRxPktReqCreate(msg__, dst__, src__, interfaceTag__, signalLength__, signal__, dataLength__, data__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWapiRxPktReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WAPI_RX_PKT_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->signalLength = (signalLength__); \
- msg__->signal = (signal__); \
- msg__->dataLength = (dataLength__); \
- msg__->data = (data__);
-
-#define CsrWifiRouterCtrlWapiRxPktReqSendTo(dst__, src__, interfaceTag__, signalLength__, signal__, dataLength__, data__) \
- { \
- CsrWifiRouterCtrlWapiRxPktReq *msg__; \
- CsrWifiRouterCtrlWapiRxPktReqCreate(msg__, dst__, src__, interfaceTag__, signalLength__, signal__, dataLength__, data__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWapiRxPktReqSend(src__, interfaceTag__, signalLength__, signal__, dataLength__, data__) \
- CsrWifiRouterCtrlWapiRxPktReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, signalLength__, signal__, dataLength__, data__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiUnicastFilterReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWapiUnicastFilterReqCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWapiUnicastFilterReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_FILTER_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlWapiUnicastFilterReqSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiRouterCtrlWapiUnicastFilterReq *msg__; \
- CsrWifiRouterCtrlWapiUnicastFilterReqCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWapiUnicastFilterReqSend(src__, interfaceTag__, status__) \
- CsrWifiRouterCtrlWapiUnicastFilterReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiUnicastTxEncryptIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- interfaceTag -
- dataLength -
- data -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWapiUnicastTxEncryptIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, dataLength__, data__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWapiUnicastTxEncryptInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_TX_ENCRYPT_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->dataLength = (dataLength__); \
- msg__->data = (data__);
-
-#define CsrWifiRouterCtrlWapiUnicastTxEncryptIndSendTo(dst__, src__, clientData__, interfaceTag__, dataLength__, data__) \
- { \
- CsrWifiRouterCtrlWapiUnicastTxEncryptInd *msg__; \
- CsrWifiRouterCtrlWapiUnicastTxEncryptIndCreate(msg__, dst__, src__, clientData__, interfaceTag__, dataLength__, data__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWapiUnicastTxEncryptIndSend(dst__, clientData__, interfaceTag__, dataLength__, data__) \
- CsrWifiRouterCtrlWapiUnicastTxEncryptIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, interfaceTag__, dataLength__, data__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiUnicastTxPktReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- dataLength -
- data -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWapiUnicastTxPktReqCreate(msg__, dst__, src__, interfaceTag__, dataLength__, data__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWapiUnicastTxPktReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_TX_PKT_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->dataLength = (dataLength__); \
- msg__->data = (data__);
-
-#define CsrWifiRouterCtrlWapiUnicastTxPktReqSendTo(dst__, src__, interfaceTag__, dataLength__, data__) \
- { \
- CsrWifiRouterCtrlWapiUnicastTxPktReq *msg__; \
- CsrWifiRouterCtrlWapiUnicastTxPktReqCreate(msg__, dst__, src__, interfaceTag__, dataLength__, data__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWapiUnicastTxPktReqSend(src__, interfaceTag__, dataLength__, data__) \
- CsrWifiRouterCtrlWapiUnicastTxPktReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, dataLength__, data__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOffReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- clientData -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWifiOffReqCreate(msg__, dst__, src__, clientData__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWifiOffReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WIFI_OFF_REQ, dst__, src__); \
- msg__->clientData = (clientData__);
-
-#define CsrWifiRouterCtrlWifiOffReqSendTo(dst__, src__, clientData__) \
- { \
- CsrWifiRouterCtrlWifiOffReq *msg__; \
- CsrWifiRouterCtrlWifiOffReqCreate(msg__, dst__, src__, clientData__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWifiOffReqSend(src__, clientData__) \
- CsrWifiRouterCtrlWifiOffReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOffIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- controlIndication -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWifiOffIndCreate(msg__, dst__, src__, clientData__, controlIndication__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWifiOffInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WIFI_OFF_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->controlIndication = (controlIndication__);
-
-#define CsrWifiRouterCtrlWifiOffIndSendTo(dst__, src__, clientData__, controlIndication__) \
- { \
- CsrWifiRouterCtrlWifiOffInd *msg__; \
- CsrWifiRouterCtrlWifiOffIndCreate(msg__, dst__, src__, clientData__, controlIndication__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWifiOffIndSend(dst__, clientData__, controlIndication__) \
- CsrWifiRouterCtrlWifiOffIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, controlIndication__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOffResSend
-
- DESCRIPTION
-
- PARAMETERS
- clientData -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWifiOffResCreate(msg__, dst__, src__, clientData__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWifiOffRes), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WIFI_OFF_RES, dst__, src__); \
- msg__->clientData = (clientData__);
-
-#define CsrWifiRouterCtrlWifiOffResSendTo(dst__, src__, clientData__) \
- { \
- CsrWifiRouterCtrlWifiOffRes *msg__; \
- CsrWifiRouterCtrlWifiOffResCreate(msg__, dst__, src__, clientData__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWifiOffResSend(src__, clientData__) \
- CsrWifiRouterCtrlWifiOffResSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOffCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWifiOffCfmCreate(msg__, dst__, src__, clientData__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWifiOffCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WIFI_OFF_CFM, dst__, src__); \
- msg__->clientData = (clientData__);
-
-#define CsrWifiRouterCtrlWifiOffCfmSendTo(dst__, src__, clientData__) \
- { \
- CsrWifiRouterCtrlWifiOffCfm *msg__; \
- CsrWifiRouterCtrlWifiOffCfmCreate(msg__, dst__, src__, clientData__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWifiOffCfmSend(dst__, clientData__) \
- CsrWifiRouterCtrlWifiOffCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOnReqSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- clientData -
- dataLength - Number of bytes in the buffer pointed to by 'data'
- data - Pointer to the buffer containing 'dataLength' bytes
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWifiOnReqCreate(msg__, dst__, src__, clientData__, dataLength__, data__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWifiOnReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WIFI_ON_REQ, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->dataLength = (dataLength__); \
- msg__->data = (data__);
-
-#define CsrWifiRouterCtrlWifiOnReqSendTo(dst__, src__, clientData__, dataLength__, data__) \
- { \
- CsrWifiRouterCtrlWifiOnReq *msg__; \
- CsrWifiRouterCtrlWifiOnReqCreate(msg__, dst__, src__, clientData__, dataLength__, data__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWifiOnReqSend(src__, clientData__, dataLength__, data__) \
- CsrWifiRouterCtrlWifiOnReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__, dataLength__, data__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOnIndSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- status -
- versions -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWifiOnIndCreate(msg__, dst__, src__, clientData__, status__, versions__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWifiOnInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WIFI_ON_IND, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->status = (status__); \
- msg__->versions = (versions__);
-
-#define CsrWifiRouterCtrlWifiOnIndSendTo(dst__, src__, clientData__, status__, versions__) \
- { \
- CsrWifiRouterCtrlWifiOnInd *msg__; \
- CsrWifiRouterCtrlWifiOnIndCreate(msg__, dst__, src__, clientData__, status__, versions__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWifiOnIndSend(dst__, clientData__, status__, versions__) \
- CsrWifiRouterCtrlWifiOnIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, status__, versions__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOnResSend
-
- DESCRIPTION
-
- PARAMETERS
- clientData -
- status -
- numInterfaceAddress -
- stationMacAddress - array size 1 MUST match CSR_WIFI_NUM_INTERFACES
- smeVersions -
- scheduledInterrupt -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWifiOnResCreate(msg__, dst__, src__, clientData__, status__, numInterfaceAddress__, stationMacAddress__, smeVersions__, scheduledInterrupt__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWifiOnRes), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WIFI_ON_RES, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->status = (status__); \
- msg__->numInterfaceAddress = (numInterfaceAddress__); \
- memcpy(msg__->stationMacAddress, (stationMacAddress__), sizeof(CsrWifiMacAddress) * 2); \
- msg__->smeVersions = (smeVersions__); \
- msg__->scheduledInterrupt = (scheduledInterrupt__);
-
-#define CsrWifiRouterCtrlWifiOnResSendTo(dst__, src__, clientData__, status__, numInterfaceAddress__, stationMacAddress__, smeVersions__, scheduledInterrupt__) \
- { \
- CsrWifiRouterCtrlWifiOnRes *msg__; \
- CsrWifiRouterCtrlWifiOnResCreate(msg__, dst__, src__, clientData__, status__, numInterfaceAddress__, stationMacAddress__, smeVersions__, scheduledInterrupt__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWifiOnResSend(src__, clientData__, status__, numInterfaceAddress__, stationMacAddress__, smeVersions__, scheduledInterrupt__) \
- CsrWifiRouterCtrlWifiOnResSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, clientData__, status__, numInterfaceAddress__, stationMacAddress__, smeVersions__, scheduledInterrupt__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOnCfmSend
-
- DESCRIPTION
-
- PARAMETERS
- queue - Destination Task Queue
- clientData -
- status -
-
-*******************************************************************************/
-#define CsrWifiRouterCtrlWifiOnCfmCreate(msg__, dst__, src__, clientData__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterCtrlWifiOnCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_CTRL_PRIM, CSR_WIFI_ROUTER_CTRL_WIFI_ON_CFM, dst__, src__); \
- msg__->clientData = (clientData__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterCtrlWifiOnCfmSendTo(dst__, src__, clientData__, status__) \
- { \
- CsrWifiRouterCtrlWifiOnCfm *msg__; \
- CsrWifiRouterCtrlWifiOnCfmCreate(msg__, dst__, src__, clientData__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_CTRL_PRIM, msg__); \
- }
-
-#define CsrWifiRouterCtrlWifiOnCfmSend(dst__, clientData__, status__) \
- CsrWifiRouterCtrlWifiOnCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, clientData__, status__)
-
-#endif /* CSR_WIFI_ROUTER_CTRL_LIB_H__ */
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_prim.h b/drivers/staging/csr/csr_wifi_router_ctrl_prim.h
deleted file mode 100644
index 1312a33..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_prim.h
+++ /dev/null
@@ -1,2113 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_ROUTER_CTRL_PRIM_H__
-#define CSR_WIFI_ROUTER_CTRL_PRIM_H__
-
-#include <linux/types.h>
-#include "csr_prim_defs.h"
-#include "csr_sched.h"
-#include "csr_wifi_common.h"
-#include "csr_result.h"
-#include "csr_wifi_fsm_event.h"
-
-#define CSR_WIFI_ROUTER_CTRL_PRIM (0x0401)
-
-typedef CsrPrim CsrWifiRouterCtrlPrim;
-
-typedef CsrResult (*CsrWifiRouterCtrlRawSdioByteWrite)(u8 func, u32 address, u8 data);
-typedef CsrResult (*CsrWifiRouterCtrlRawSdioByteRead)(u8 func, u32 address, u8 *pdata);
-typedef CsrResult (*CsrWifiRouterCtrlRawSdioFirmwareDownload)(u32 length, const u8 *pdata);
-typedef CsrResult (*CsrWifiRouterCtrlRawSdioReset)(void);
-typedef CsrResult (*CsrWifiRouterCtrlRawSdioCoreDumpPrepare)(u8 suspendSme);
-typedef CsrResult (*CsrWifiRouterCtrlRawSdioByteBlockRead)(u8 func, u32 address, u8 *pdata, u32 length);
-typedef CsrResult (*CsrWifiRouterCtrlRawSdioGpRead16)(u8 func, u32 address, u16 *pdata);
-typedef CsrResult (*CsrWifiRouterCtrlRawSdioGpWrite16)(u8 func, u32 address, u16 data);
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckRole
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ORIGINATOR
- -
- CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_RECIPIENT
- -
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlBlockAckRole;
-#define CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ORIGINATOR ((CsrWifiRouterCtrlBlockAckRole) 0x00)
-#define CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_RECIPIENT ((CsrWifiRouterCtrlBlockAckRole) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlControlIndication
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_CONTROL_INDICATION_ERROR
- -
- CSR_WIFI_ROUTER_CTRL_CONTROL_INDICATION_EXIT
- -
- CSR_WIFI_ROUTER_CTRL_CONTROL_INDICATION_USER_REQUESTED
- -
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlControlIndication;
-#define CSR_WIFI_ROUTER_CTRL_CONTROL_INDICATION_ERROR ((CsrWifiRouterCtrlControlIndication) 0x01)
-#define CSR_WIFI_ROUTER_CTRL_CONTROL_INDICATION_EXIT ((CsrWifiRouterCtrlControlIndication) 0x02)
-#define CSR_WIFI_ROUTER_CTRL_CONTROL_INDICATION_USER_REQUESTED ((CsrWifiRouterCtrlControlIndication) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlListAction
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_LIST_ACTION_GET
- -
- CSR_WIFI_ROUTER_CTRL_LIST_ACTION_ADD
- -
- CSR_WIFI_ROUTER_CTRL_LIST_ACTION_REMOVE
- -
- CSR_WIFI_ROUTER_CTRL_LIST_ACTION_FLUSH
- -
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlListAction;
-#define CSR_WIFI_ROUTER_CTRL_LIST_ACTION_GET ((CsrWifiRouterCtrlListAction) 0x00)
-#define CSR_WIFI_ROUTER_CTRL_LIST_ACTION_ADD ((CsrWifiRouterCtrlListAction) 0x01)
-#define CSR_WIFI_ROUTER_CTRL_LIST_ACTION_REMOVE ((CsrWifiRouterCtrlListAction) 0x02)
-#define CSR_WIFI_ROUTER_CTRL_LIST_ACTION_FLUSH ((CsrWifiRouterCtrlListAction) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlLowPowerMode
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_LOW_POWER_MODE_DISABLED
- -
- CSR_WIFI_ROUTER_CTRL_LOW_POWER_MODE_ENABLED
- -
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterCtrlLowPowerMode;
-#define CSR_WIFI_ROUTER_CTRL_LOW_POWER_MODE_DISABLED ((CsrWifiRouterCtrlLowPowerMode) 0x0000)
-#define CSR_WIFI_ROUTER_CTRL_LOW_POWER_MODE_ENABLED ((CsrWifiRouterCtrlLowPowerMode) 0x0001)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMediaStatus
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_MEDIA_STATUS_CONNECTED
- -
- CSR_WIFI_ROUTER_CTRL_MEDIA_STATUS_DISCONNECTED
- -
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlMediaStatus;
-#define CSR_WIFI_ROUTER_CTRL_MEDIA_STATUS_CONNECTED ((CsrWifiRouterCtrlMediaStatus) 0x00)
-#define CSR_WIFI_ROUTER_CTRL_MEDIA_STATUS_DISCONNECTED ((CsrWifiRouterCtrlMediaStatus) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMode
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_MODE_NONE -
- CSR_WIFI_ROUTER_CTRL_MODE_IBSS -
- CSR_WIFI_ROUTER_CTRL_MODE_STA -
- CSR_WIFI_ROUTER_CTRL_MODE_AP -
- CSR_WIFI_ROUTER_CTRL_MODE_MONITOR -
- CSR_WIFI_ROUTER_CTRL_MODE_AMP -
- CSR_WIFI_ROUTER_CTRL_MODE_P2P -
- CSR_WIFI_ROUTER_CTRL_MODE_P2PGO -
- CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI -
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlMode;
-#define CSR_WIFI_ROUTER_CTRL_MODE_NONE ((CsrWifiRouterCtrlMode) 0x00)
-#define CSR_WIFI_ROUTER_CTRL_MODE_IBSS ((CsrWifiRouterCtrlMode) 0x01)
-#define CSR_WIFI_ROUTER_CTRL_MODE_STA ((CsrWifiRouterCtrlMode) 0x02)
-#define CSR_WIFI_ROUTER_CTRL_MODE_AP ((CsrWifiRouterCtrlMode) 0x03)
-#define CSR_WIFI_ROUTER_CTRL_MODE_MONITOR ((CsrWifiRouterCtrlMode) 0x04)
-#define CSR_WIFI_ROUTER_CTRL_MODE_AMP ((CsrWifiRouterCtrlMode) 0x05)
-#define CSR_WIFI_ROUTER_CTRL_MODE_P2P ((CsrWifiRouterCtrlMode) 0x06)
-#define CSR_WIFI_ROUTER_CTRL_MODE_P2PGO ((CsrWifiRouterCtrlMode) 0x07)
-#define CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI ((CsrWifiRouterCtrlMode) 0x08)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerStatus
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE
- -
- CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE
- -
- CSR_WIFI_ROUTER_CTRL_PEER_DISCONNECTED
- -
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlPeerStatus;
-#define CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE ((CsrWifiRouterCtrlPeerStatus) 0x00)
-#define CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE ((CsrWifiRouterCtrlPeerStatus) 0x01)
-#define CSR_WIFI_ROUTER_CTRL_PEER_DISCONNECTED ((CsrWifiRouterCtrlPeerStatus) 0x02)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPortAction
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN
- -
- CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD
- -
- CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_BLOCK
- -
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterCtrlPortAction;
-#define CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN ((CsrWifiRouterCtrlPortAction) 0x0000)
-#define CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD ((CsrWifiRouterCtrlPortAction) 0x0001)
-#define CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_BLOCK ((CsrWifiRouterCtrlPortAction) 0x0002)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPowersaveType
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_AC_BK_PS_INFO_PRESENT
- - If set, AC BK PS info is present in b4 and b5
- CSR_WIFI_ROUTER_CTRL_AC_BE_PS_INFO_PRESENT
- - If set, AC BE PS info is present in b6 and b7
- CSR_WIFI_ROUTER_CTRL_AC_VI_PS_INFO_PRESENT
- - If set, AC VI PS info is present in b8 and b9
- CSR_WIFI_ROUTER_CTRL_AC_VO_PS_INFO_PRESENT
- - If set, AC VO PS info is present in b10 and b11
- CSR_WIFI_ROUTER_CTRL_AC_BK_TRIGGER_ENABLED
- -
- CSR_WIFI_ROUTER_CTRL_AC_BK_DELIVERY_ENABLED
- -
- CSR_WIFI_ROUTER_CTRL_AC_BE_TRIGGER_ENABLED
- -
- CSR_WIFI_ROUTER_CTRL_AC_BE_DELIVERY_ENABLED
- -
- CSR_WIFI_ROUTER_CTRL_AC_VI_TRIGGER_ENABLED
- -
- CSR_WIFI_ROUTER_CTRL_AC_VI_DELIVERY_ENABLED
- -
- CSR_WIFI_ROUTER_CTRL_AC_VO_TRIGGER_ENABLED
- -
- CSR_WIFI_ROUTER_CTRL_AC_VO_DELIVERY_ENABLED
- -
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterCtrlPowersaveType;
-#define CSR_WIFI_ROUTER_CTRL_AC_BK_PS_INFO_PRESENT ((CsrWifiRouterCtrlPowersaveType) 0x0001)
-#define CSR_WIFI_ROUTER_CTRL_AC_BE_PS_INFO_PRESENT ((CsrWifiRouterCtrlPowersaveType) 0x0002)
-#define CSR_WIFI_ROUTER_CTRL_AC_VI_PS_INFO_PRESENT ((CsrWifiRouterCtrlPowersaveType) 0x0004)
-#define CSR_WIFI_ROUTER_CTRL_AC_VO_PS_INFO_PRESENT ((CsrWifiRouterCtrlPowersaveType) 0x0008)
-#define CSR_WIFI_ROUTER_CTRL_AC_BK_TRIGGER_ENABLED ((CsrWifiRouterCtrlPowersaveType) 0x0010)
-#define CSR_WIFI_ROUTER_CTRL_AC_BK_DELIVERY_ENABLED ((CsrWifiRouterCtrlPowersaveType) 0x0020)
-#define CSR_WIFI_ROUTER_CTRL_AC_BE_TRIGGER_ENABLED ((CsrWifiRouterCtrlPowersaveType) 0x0040)
-#define CSR_WIFI_ROUTER_CTRL_AC_BE_DELIVERY_ENABLED ((CsrWifiRouterCtrlPowersaveType) 0x0080)
-#define CSR_WIFI_ROUTER_CTRL_AC_VI_TRIGGER_ENABLED ((CsrWifiRouterCtrlPowersaveType) 0x0100)
-#define CSR_WIFI_ROUTER_CTRL_AC_VI_DELIVERY_ENABLED ((CsrWifiRouterCtrlPowersaveType) 0x0200)
-#define CSR_WIFI_ROUTER_CTRL_AC_VO_TRIGGER_ENABLED ((CsrWifiRouterCtrlPowersaveType) 0x0400)
-#define CSR_WIFI_ROUTER_CTRL_AC_VO_DELIVERY_ENABLED ((CsrWifiRouterCtrlPowersaveType) 0x0800)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlProtocolDirection
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_RX
- -
- CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_TX
- -
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterCtrlProtocolDirection;
-#define CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_RX ((CsrWifiRouterCtrlProtocolDirection) 0x0000)
-#define CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_TX ((CsrWifiRouterCtrlProtocolDirection) 0x0001)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlQoSControl
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_OFF
- -
- CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_WMM_ON
- -
- CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_80211_ON
- -
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterCtrlQoSControl;
-#define CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_OFF ((CsrWifiRouterCtrlQoSControl) 0x0000)
-#define CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_WMM_ON ((CsrWifiRouterCtrlQoSControl) 0x0001)
-#define CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_80211_ON ((CsrWifiRouterCtrlQoSControl) 0x0002)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlQueueConfig
-
- DESCRIPTION
- Defines which Queues are enabled for use.
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_QUEUE_BE_ENABLE
- -
- CSR_WIFI_ROUTER_CTRL_QUEUE_BK_ENABLE
- -
- CSR_WIFI_ROUTER_CTRL_QUEUE_VI_ENABLE
- -
- CSR_WIFI_ROUTER_CTRL_QUEUE_VO_ENABLE
- -
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlQueueConfig;
-#define CSR_WIFI_ROUTER_CTRL_QUEUE_BE_ENABLE ((CsrWifiRouterCtrlQueueConfig) 0x01)
-#define CSR_WIFI_ROUTER_CTRL_QUEUE_BK_ENABLE ((CsrWifiRouterCtrlQueueConfig) 0x02)
-#define CSR_WIFI_ROUTER_CTRL_QUEUE_VI_ENABLE ((CsrWifiRouterCtrlQueueConfig) 0x04)
-#define CSR_WIFI_ROUTER_CTRL_QUEUE_VO_ENABLE ((CsrWifiRouterCtrlQueueConfig) 0x08)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficConfigType
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_RESET
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_FILTER
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_CLS
- -
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterCtrlTrafficConfigType;
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_RESET ((CsrWifiRouterCtrlTrafficConfigType) 0x0000)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_FILTER ((CsrWifiRouterCtrlTrafficConfigType) 0x0001)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_CLS ((CsrWifiRouterCtrlTrafficConfigType) 0x0002)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficPacketType
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_NONE
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_EAPOL
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_DHCP
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_DHCP_ACK
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_ARP
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_AIRONET
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_CUSTOM
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_ALL
- -
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterCtrlTrafficPacketType;
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_NONE ((CsrWifiRouterCtrlTrafficPacketType) 0x0000)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_EAPOL ((CsrWifiRouterCtrlTrafficPacketType) 0x0001)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_DHCP ((CsrWifiRouterCtrlTrafficPacketType) 0x0002)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_DHCP_ACK ((CsrWifiRouterCtrlTrafficPacketType) 0x0004)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_ARP ((CsrWifiRouterCtrlTrafficPacketType) 0x0008)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_AIRONET ((CsrWifiRouterCtrlTrafficPacketType) 0x0010)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_CUSTOM ((CsrWifiRouterCtrlTrafficPacketType) 0x0020)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_ALL ((CsrWifiRouterCtrlTrafficPacketType) 0x00FF)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficType
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_OCCASIONAL
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_BURSTY
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_PERIODIC
- -
- CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_CONTINUOUS
- -
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlTrafficType;
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_OCCASIONAL ((CsrWifiRouterCtrlTrafficType) 0x00)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_BURSTY ((CsrWifiRouterCtrlTrafficType) 0x01)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_PERIODIC ((CsrWifiRouterCtrlTrafficType) 0x02)
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_TYPE_CONTINUOUS ((CsrWifiRouterCtrlTrafficType) 0x03)
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerRecordHandle
-
- DESCRIPTION
-
-*******************************************************************************/
-typedef u32 CsrWifiRouterCtrlPeerRecordHandle;
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPowersaveTypeMask
-
- DESCRIPTION
- Mask type for use with the values defined by
- CsrWifiRouterCtrlPowersaveType
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterCtrlPowersaveTypeMask;
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlQueueConfigMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiRouterCtrlQueueConfig
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlQueueConfigMask;
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlRequestorInfo
-
- DESCRIPTION
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterCtrlRequestorInfo;
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficStreamId
-
- DESCRIPTION
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterCtrlTrafficStreamId;
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlSmeVersions
-
- DESCRIPTION
-
- MEMBERS
- firmwarePatch -
- smeBuild -
- smeHip -
-
-*******************************************************************************/
-typedef struct
-{
- u32 firmwarePatch;
- char *smeBuild;
- u32 smeHip;
-} CsrWifiRouterCtrlSmeVersions;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlStaInfo
-
- DESCRIPTION
-
- MEMBERS
- wmmOrQosEnabled -
- powersaveMode -
- maxSpLength -
- listenIntervalInTus -
-
-*******************************************************************************/
-typedef struct
-{
- u8 wmmOrQosEnabled;
- CsrWifiRouterCtrlPowersaveTypeMask powersaveMode;
- u8 maxSpLength;
- u16 listenIntervalInTus;
-} CsrWifiRouterCtrlStaInfo;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficFilter
-
- DESCRIPTION
-
- MEMBERS
- etherType -
- ipType -
- udpSourcePort -
- udpDestPort -
-
-*******************************************************************************/
-typedef struct
-{
- u32 etherType;
- u8 ipType;
- u32 udpSourcePort;
- u32 udpDestPort;
-} CsrWifiRouterCtrlTrafficFilter;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficStats
-
- DESCRIPTION
-
- MEMBERS
- rxMeanRate - Mean rx data rate over the interval
- rxFramesNum - Keep number of Rx frames per second, for CYCLE_3.
- txFramesNum - Keep number of Tx frames per second, for CYCLE_3.
- rxBytesCount - Keep calculated Rx throughput per second, for CYCLE_2.
- txBytesCount - Keep calculated Tx throughput per second, for CYCLE_2.
- intervals - array size 11 MUST match TA_INTERVALS_NUM
-
-*******************************************************************************/
-typedef struct
-{
- u32 rxMeanRate;
- u32 rxFramesNum;
- u32 txFramesNum;
- u32 rxBytesCount;
- u32 txBytesCount;
- u8 intervals[11];
-} CsrWifiRouterCtrlTrafficStats;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlVersions
-
- DESCRIPTION
-
- MEMBERS
- chipId -
- chipVersion -
- firmwareBuild -
- firmwareHip -
- routerBuild -
- routerHip -
-
-*******************************************************************************/
-typedef struct
-{
- u32 chipId;
- u32 chipVersion;
- u32 firmwareBuild;
- u32 firmwareHip;
- char *routerBuild;
- u32 routerHip;
-} CsrWifiRouterCtrlVersions;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficConfig
-
- DESCRIPTION
-
- MEMBERS
- packetFilter -
- customFilter -
-
-*******************************************************************************/
-typedef struct
-{
- u16 packetFilter;
- CsrWifiRouterCtrlTrafficFilter customFilter;
-} CsrWifiRouterCtrlTrafficConfig;
-
-
-/* Downstream */
-#define CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST (0x0000)
-
-#define CSR_WIFI_ROUTER_CTRL_CONFIGURE_POWER_MODE_REQ ((CsrWifiRouterCtrlPrim) (0x0000 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_HIP_REQ ((CsrWifiRouterCtrlPrim) (0x0001 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_MEDIA_STATUS_REQ ((CsrWifiRouterCtrlPrim) (0x0002 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_MULTICAST_ADDRESS_RES ((CsrWifiRouterCtrlPrim) (0x0003 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_PORT_CONFIGURE_REQ ((CsrWifiRouterCtrlPrim) (0x0004 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_REQ ((CsrWifiRouterCtrlPrim) (0x0005 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_SUSPEND_RES ((CsrWifiRouterCtrlPrim) (0x0006 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_TCLAS_ADD_REQ ((CsrWifiRouterCtrlPrim) (0x0007 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_RESUME_RES ((CsrWifiRouterCtrlPrim) (0x0008 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_RAW_SDIO_DEINITIALISE_REQ ((CsrWifiRouterCtrlPrim) (0x0009 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_RAW_SDIO_INITIALISE_REQ ((CsrWifiRouterCtrlPrim) (0x000A + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_TCLAS_DEL_REQ ((CsrWifiRouterCtrlPrim) (0x000B + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_CLASSIFICATION_REQ ((CsrWifiRouterCtrlPrim) (0x000C + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_REQ ((CsrWifiRouterCtrlPrim) (0x000D + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WIFI_OFF_REQ ((CsrWifiRouterCtrlPrim) (0x000E + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WIFI_OFF_RES ((CsrWifiRouterCtrlPrim) (0x000F + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WIFI_ON_REQ ((CsrWifiRouterCtrlPrim) (0x0010 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WIFI_ON_RES ((CsrWifiRouterCtrlPrim) (0x0011 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_M4_TRANSMIT_REQ ((CsrWifiRouterCtrlPrim) (0x0012 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_MODE_SET_REQ ((CsrWifiRouterCtrlPrim) (0x0013 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_PEER_ADD_REQ ((CsrWifiRouterCtrlPrim) (0x0014 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_PEER_DEL_REQ ((CsrWifiRouterCtrlPrim) (0x0015 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_PEER_UPDATE_REQ ((CsrWifiRouterCtrlPrim) (0x0016 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_CAPABILITIES_REQ ((CsrWifiRouterCtrlPrim) (0x0017 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ENABLE_REQ ((CsrWifiRouterCtrlPrim) (0x0018 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_DISABLE_REQ ((CsrWifiRouterCtrlPrim) (0x0019 + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WAPI_RX_PKT_REQ ((CsrWifiRouterCtrlPrim) (0x001A + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WAPI_MULTICAST_FILTER_REQ ((CsrWifiRouterCtrlPrim) (0x001B + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_FILTER_REQ ((CsrWifiRouterCtrlPrim) (0x001C + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_TX_PKT_REQ ((CsrWifiRouterCtrlPrim) (0x001D + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WAPI_FILTER_REQ ((CsrWifiRouterCtrlPrim) (0x001E + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST))
-
-
-#define CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_HIGHEST (0x001E + CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST)
-
-/* Upstream */
-#define CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST (0x0000 + CSR_PRIM_UPSTREAM)
-
-#define CSR_WIFI_ROUTER_CTRL_HIP_IND ((CsrWifiRouterCtrlPrim)(0x0000 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_MULTICAST_ADDRESS_IND ((CsrWifiRouterCtrlPrim)(0x0001 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_PORT_CONFIGURE_CFM ((CsrWifiRouterCtrlPrim)(0x0002 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_RESUME_IND ((CsrWifiRouterCtrlPrim)(0x0003 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_SUSPEND_IND ((CsrWifiRouterCtrlPrim)(0x0004 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_TCLAS_ADD_CFM ((CsrWifiRouterCtrlPrim)(0x0005 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_RAW_SDIO_DEINITIALISE_CFM ((CsrWifiRouterCtrlPrim)(0x0006 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_RAW_SDIO_INITIALISE_CFM ((CsrWifiRouterCtrlPrim)(0x0007 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_TCLAS_DEL_CFM ((CsrWifiRouterCtrlPrim)(0x0008 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_PROTOCOL_IND ((CsrWifiRouterCtrlPrim)(0x0009 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_TRAFFIC_SAMPLE_IND ((CsrWifiRouterCtrlPrim)(0x000A + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WIFI_OFF_IND ((CsrWifiRouterCtrlPrim)(0x000B + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WIFI_OFF_CFM ((CsrWifiRouterCtrlPrim)(0x000C + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WIFI_ON_IND ((CsrWifiRouterCtrlPrim)(0x000D + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WIFI_ON_CFM ((CsrWifiRouterCtrlPrim)(0x000E + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_M4_READY_TO_SEND_IND ((CsrWifiRouterCtrlPrim)(0x000F + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_M4_TRANSMITTED_IND ((CsrWifiRouterCtrlPrim)(0x0010 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_MIC_FAILURE_IND ((CsrWifiRouterCtrlPrim)(0x0011 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_CONNECTED_IND ((CsrWifiRouterCtrlPrim)(0x0012 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_PEER_ADD_CFM ((CsrWifiRouterCtrlPrim)(0x0013 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_PEER_DEL_CFM ((CsrWifiRouterCtrlPrim)(0x0014 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_UNEXPECTED_FRAME_IND ((CsrWifiRouterCtrlPrim)(0x0015 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_PEER_UPDATE_CFM ((CsrWifiRouterCtrlPrim)(0x0016 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_CAPABILITIES_CFM ((CsrWifiRouterCtrlPrim)(0x0017 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ENABLE_CFM ((CsrWifiRouterCtrlPrim)(0x0018 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_DISABLE_CFM ((CsrWifiRouterCtrlPrim)(0x0019 + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ERROR_IND ((CsrWifiRouterCtrlPrim)(0x001A + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_STA_INACTIVE_IND ((CsrWifiRouterCtrlPrim)(0x001B + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WAPI_RX_MIC_CHECK_IND ((CsrWifiRouterCtrlPrim)(0x001C + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_MODE_SET_CFM ((CsrWifiRouterCtrlPrim)(0x001D + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_CTRL_WAPI_UNICAST_TX_ENCRYPT_IND ((CsrWifiRouterCtrlPrim)(0x001E + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST))
-
-#define CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_HIGHEST (0x001E + CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST)
-
-#define CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_COUNT (CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_HIGHEST + 1 - CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST)
-#define CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_COUNT (CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_HIGHEST + 1 - CSR_WIFI_ROUTER_CTRL_PRIM_UPSTREAM_LOWEST)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlConfigurePowerModeReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- mode -
- wakeHost -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlLowPowerMode mode;
- u8 wakeHost;
-} CsrWifiRouterCtrlConfigurePowerModeReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlHipReq
-
- DESCRIPTION
- This primitive is used for transferring MLME messages to the HIP.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- mlmeCommandLength - Length of the MLME signal
- mlmeCommand - Pointer to the MLME signal
- dataRef1Length - Length of the dataRef1 bulk data
- dataRef1 - Pointer to the bulk data 1
- dataRef2Length - Length of the dataRef2 bulk data
- dataRef2 - Pointer to the bulk data 2
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 mlmeCommandLength;
- u8 *mlmeCommand;
- u16 dataRef1Length;
- u8 *dataRef1;
- u16 dataRef2Length;
- u8 *dataRef2;
-} CsrWifiRouterCtrlHipReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMediaStatusReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- mediaStatus -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlMediaStatus mediaStatus;
-} CsrWifiRouterCtrlMediaStatusReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMulticastAddressRes
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- status -
- action -
- getAddressesCount -
- getAddresses -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrResult status;
- CsrWifiRouterCtrlListAction action;
- u8 getAddressesCount;
- CsrWifiMacAddress *getAddresses;
-} CsrWifiRouterCtrlMulticastAddressRes;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPortConfigureReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- uncontrolledPortAction -
- controlledPortAction -
- macAddress -
- setProtection -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlPortAction uncontrolledPortAction;
- CsrWifiRouterCtrlPortAction controlledPortAction;
- CsrWifiMacAddress macAddress;
- u8 setProtection;
-} CsrWifiRouterCtrlPortConfigureReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlQosControlReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- control -
- queueConfig -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlQoSControl control;
- CsrWifiRouterCtrlQueueConfigMask queueConfig;
-} CsrWifiRouterCtrlQosControlReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlSuspendRes
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrResult status;
-} CsrWifiRouterCtrlSuspendRes;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTclasAddReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- tclasLength -
- tclas -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 tclasLength;
- u8 *tclas;
-} CsrWifiRouterCtrlTclasAddReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlResumeRes
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrResult status;
-} CsrWifiRouterCtrlResumeRes;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlRawSdioDeinitialiseReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
-} CsrWifiRouterCtrlRawSdioDeinitialiseReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlRawSdioInitialiseReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
-} CsrWifiRouterCtrlRawSdioInitialiseReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTclasDelReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- tclasLength -
- tclas -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 tclasLength;
- u8 *tclas;
-} CsrWifiRouterCtrlTclasDelReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficClassificationReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- trafficType -
- period -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlTrafficType trafficType;
- u16 period;
-} CsrWifiRouterCtrlTrafficClassificationReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficConfigReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- trafficConfigType -
- config -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlTrafficConfigType trafficConfigType;
- CsrWifiRouterCtrlTrafficConfig config;
-} CsrWifiRouterCtrlTrafficConfigReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOffReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
-} CsrWifiRouterCtrlWifiOffReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOffRes
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
-} CsrWifiRouterCtrlWifiOffRes;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOnReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- dataLength - Number of bytes in the buffer pointed to by 'data'
- data - Pointer to the buffer containing 'dataLength' bytes
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u32 dataLength;
- u8 *data;
-} CsrWifiRouterCtrlWifiOnReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOnRes
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- status -
- numInterfaceAddress -
- stationMacAddress - array size 1 MUST match CSR_WIFI_NUM_INTERFACES
- smeVersions -
- scheduledInterrupt -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrResult status;
- u16 numInterfaceAddress;
- CsrWifiMacAddress stationMacAddress[2];
- CsrWifiRouterCtrlSmeVersions smeVersions;
- u8 scheduledInterrupt;
-} CsrWifiRouterCtrlWifiOnRes;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlM4TransmitReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
-} CsrWifiRouterCtrlM4TransmitReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlModeSetReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- mode -
- bssid - BSSID of the network the device is going to be a part
- of
- protection - Set to TRUE if encryption is enabled for the
- connection/broadcast frames
- intraBssDistEnabled - If set to TRUE, intra BSS destribution will be
- enabled. If set to FALSE, any unicast PDU which does
- not have the RA as the the local MAC address, shall be
- ignored. This field is interpreted by the receive if
- mode is set to CSR_WIFI_ROUTER_CTRL_MODE_P2PGO
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlMode mode;
- CsrWifiMacAddress bssid;
- u8 protection;
- u8 intraBssDistEnabled;
-} CsrWifiRouterCtrlModeSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerAddReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- peerMacAddress -
- associationId -
- staInfo -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiMacAddress peerMacAddress;
- u16 associationId;
- CsrWifiRouterCtrlStaInfo staInfo;
-} CsrWifiRouterCtrlPeerAddReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerDelReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- peerRecordHandle -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlPeerRecordHandle peerRecordHandle;
-} CsrWifiRouterCtrlPeerDelReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerUpdateReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- peerRecordHandle -
- powersaveMode -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlPeerRecordHandle peerRecordHandle;
- CsrWifiRouterCtrlPowersaveTypeMask powersaveMode;
-} CsrWifiRouterCtrlPeerUpdateReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlCapabilitiesReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
-} CsrWifiRouterCtrlCapabilitiesReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckEnableReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- macAddress -
- trafficStreamID -
- role -
- bufferSize -
- timeout -
- ssn -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiMacAddress macAddress;
- CsrWifiRouterCtrlTrafficStreamId trafficStreamID;
- CsrWifiRouterCtrlBlockAckRole role;
- u16 bufferSize;
- u16 timeout;
- u16 ssn;
-} CsrWifiRouterCtrlBlockAckEnableReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckDisableReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- clientData -
- macAddress -
- trafficStreamID -
- role -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiMacAddress macAddress;
- CsrWifiRouterCtrlTrafficStreamId trafficStreamID;
- CsrWifiRouterCtrlBlockAckRole role;
-} CsrWifiRouterCtrlBlockAckDisableReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiRxPktReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- signalLength -
- signal -
- dataLength -
- data -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u16 signalLength;
- u8 *signal;
- u16 dataLength;
- u8 *data;
-} CsrWifiRouterCtrlWapiRxPktReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiMulticastFilterReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 status;
-} CsrWifiRouterCtrlWapiMulticastFilterReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiUnicastFilterReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 status;
-} CsrWifiRouterCtrlWapiUnicastFilterReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiUnicastTxPktReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- dataLength -
- data -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u16 dataLength;
- u8 *data;
-} CsrWifiRouterCtrlWapiUnicastTxPktReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiFilterReq
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- isWapiConnected -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 isWapiConnected;
-} CsrWifiRouterCtrlWapiFilterReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlHipInd
-
- DESCRIPTION
- This primitive is used for transferring MLME messages from the HIP.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- mlmeCommandLength - Length of the MLME signal
- mlmeCommand - Pointer to the MLME signal
- dataRef1Length - Length of the dataRef1 bulk data
- dataRef1 - Pointer to the bulk data 1
- dataRef2Length - Length of the dataRef2 bulk data
- dataRef2 - Pointer to the bulk data 2
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 mlmeCommandLength;
- u8 *mlmeCommand;
- u16 dataRef1Length;
- u8 *dataRef1;
- u16 dataRef2Length;
- u8 *dataRef2;
-} CsrWifiRouterCtrlHipInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMulticastAddressInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- action -
- setAddressesCount -
- setAddresses -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiRouterCtrlListAction action;
- u8 setAddressesCount;
- CsrWifiMacAddress *setAddresses;
-} CsrWifiRouterCtrlMulticastAddressInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPortConfigureCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- status -
- macAddress -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiMacAddress macAddress;
-} CsrWifiRouterCtrlPortConfigureCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlResumeInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- powerMaintained -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u8 powerMaintained;
-} CsrWifiRouterCtrlResumeInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlSuspendInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- hardSuspend -
- d3Suspend -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u8 hardSuspend;
- u8 d3Suspend;
-} CsrWifiRouterCtrlSuspendInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTclasAddCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiRouterCtrlTclasAddCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlRawSdioDeinitialiseCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- result -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrResult result;
-} CsrWifiRouterCtrlRawSdioDeinitialiseCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlRawSdioInitialiseCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- result -
- byteRead -
- byteWrite -
- firmwareDownload -
- reset -
- coreDumpPrepare -
- byteBlockRead -
- gpRead16 -
- gpWrite16 -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrResult result;
- CsrWifiRouterCtrlRawSdioByteRead byteRead;
- CsrWifiRouterCtrlRawSdioByteWrite byteWrite;
- CsrWifiRouterCtrlRawSdioFirmwareDownload firmwareDownload;
- CsrWifiRouterCtrlRawSdioReset reset;
- CsrWifiRouterCtrlRawSdioCoreDumpPrepare coreDumpPrepare;
- CsrWifiRouterCtrlRawSdioByteBlockRead byteBlockRead;
- CsrWifiRouterCtrlRawSdioGpRead16 gpRead16;
- CsrWifiRouterCtrlRawSdioGpWrite16 gpWrite16;
-} CsrWifiRouterCtrlRawSdioInitialiseCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTclasDelCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiRouterCtrlTclasDelCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficProtocolInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- packetType -
- direction -
- srcAddress -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiRouterCtrlTrafficPacketType packetType;
- CsrWifiRouterCtrlProtocolDirection direction;
- CsrWifiMacAddress srcAddress;
-} CsrWifiRouterCtrlTrafficProtocolInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlTrafficSampleInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- stats -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiRouterCtrlTrafficStats stats;
-} CsrWifiRouterCtrlTrafficSampleInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOffInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- controlIndication -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrWifiRouterCtrlControlIndication controlIndication;
-} CsrWifiRouterCtrlWifiOffInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOffCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
-} CsrWifiRouterCtrlWifiOffCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOnInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- status -
- versions -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrResult status;
- CsrWifiRouterCtrlVersions versions;
-} CsrWifiRouterCtrlWifiOnInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWifiOnCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- CsrResult status;
-} CsrWifiRouterCtrlWifiOnCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlM4ReadyToSendInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- peerMacAddress -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiMacAddress peerMacAddress;
-} CsrWifiRouterCtrlM4ReadyToSendInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlM4TransmittedInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- peerMacAddress -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiMacAddress peerMacAddress;
- CsrResult status;
-} CsrWifiRouterCtrlM4TransmittedInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlMicFailureInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- peerMacAddress -
- unicastPdu -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiMacAddress peerMacAddress;
- u8 unicastPdu;
-} CsrWifiRouterCtrlMicFailureInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlConnectedInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- peerMacAddress -
- peerStatus -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiMacAddress peerMacAddress;
- CsrWifiRouterCtrlPeerStatus peerStatus;
-} CsrWifiRouterCtrlConnectedInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerAddCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- peerMacAddress -
- peerRecordHandle -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiMacAddress peerMacAddress;
- CsrWifiRouterCtrlPeerRecordHandle peerRecordHandle;
- CsrResult status;
-} CsrWifiRouterCtrlPeerAddCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerDelCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiRouterCtrlPeerDelCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlUnexpectedFrameInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- peerMacAddress -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiMacAddress peerMacAddress;
-} CsrWifiRouterCtrlUnexpectedFrameInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlPeerUpdateCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiRouterCtrlPeerUpdateCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlCapabilitiesCfm
-
- DESCRIPTION
- The router sends this primitive to confirm the size of the queues of the
- HIP.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- commandQueueSize - Size of command queue
- trafficQueueSize - Size of traffic queue (per AC)
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 commandQueueSize;
- u16 trafficQueueSize;
-} CsrWifiRouterCtrlCapabilitiesCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckEnableCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiRouterCtrlBlockAckEnableCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckDisableCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiRouterCtrlBlockAckDisableCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlBlockAckErrorInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- trafficStreamID -
- peerMacAddress -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiRouterCtrlTrafficStreamId trafficStreamID;
- CsrWifiMacAddress peerMacAddress;
- CsrResult status;
-} CsrWifiRouterCtrlBlockAckErrorInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlStaInactiveInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- staAddress -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiMacAddress staAddress;
-} CsrWifiRouterCtrlStaInactiveInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiRxMicCheckInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- signalLength -
- signal -
- dataLength -
- data -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- u16 signalLength;
- u8 *signal;
- u16 dataLength;
- u8 *data;
-} CsrWifiRouterCtrlWapiRxMicCheckInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlModeSetCfm
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- mode -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- CsrWifiRouterCtrlMode mode;
- CsrResult status;
-} CsrWifiRouterCtrlModeSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterCtrlWapiUnicastTxEncryptInd
-
- DESCRIPTION
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- clientData -
- interfaceTag -
- dataLength -
- data -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiRouterCtrlRequestorInfo clientData;
- u16 interfaceTag;
- u16 dataLength;
- u8 *data;
-} CsrWifiRouterCtrlWapiUnicastTxEncryptInd;
-
-#endif /* CSR_WIFI_ROUTER_CTRL_PRIM_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_sef.c b/drivers/staging/csr/csr_wifi_router_ctrl_sef.c
deleted file mode 100644
index 99cf930..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_sef.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- Confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
- *****************************************************************************/
-#include "csr_wifi_router_ctrl_sef.h"
-
-const CsrWifiRouterCtrlStateHandlerType
- CsrWifiRouterCtrlDownstreamStateHandlers
- [CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_COUNT] = {
- /* 0x0000 */ CsrWifiRouterCtrlConfigurePowerModeReqHandler,
- /* 0x0001 */ CsrWifiRouterCtrlHipReqHandler,
- /* 0x0002 */ CsrWifiRouterCtrlMediaStatusReqHandler,
- /* 0x0003 */ CsrWifiRouterCtrlMulticastAddressResHandler,
- /* 0x0004 */ CsrWifiRouterCtrlPortConfigureReqHandler,
- /* 0x0005 */ CsrWifiRouterCtrlQosControlReqHandler,
- /* 0x0006 */ CsrWifiRouterCtrlSuspendResHandler,
- /* 0x0007 */ CsrWifiRouterCtrlTclasAddReqHandler,
- /* 0x0008 */ CsrWifiRouterCtrlResumeResHandler,
- /* 0x0009 */ CsrWifiRouterCtrlRawSdioDeinitialiseReqHandler,
- /* 0x000A */ CsrWifiRouterCtrlRawSdioInitialiseReqHandler,
- /* 0x000B */ CsrWifiRouterCtrlTclasDelReqHandler,
- /* 0x000C */ CsrWifiRouterCtrlTrafficClassificationReqHandler,
- /* 0x000D */ CsrWifiRouterCtrlTrafficConfigReqHandler,
- /* 0x000E */ CsrWifiRouterCtrlWifiOffReqHandler,
- /* 0x000F */ CsrWifiRouterCtrlWifiOffResHandler,
- /* 0x0010 */ CsrWifiRouterCtrlWifiOnReqHandler,
- /* 0x0011 */ CsrWifiRouterCtrlWifiOnResHandler,
- /* 0x0012 */ CsrWifiRouterCtrlM4TransmitReqHandler,
- /* 0x0013 */ CsrWifiRouterCtrlModeSetReqHandler,
- /* 0x0014 */ CsrWifiRouterCtrlPeerAddReqHandler,
- /* 0x0015 */ CsrWifiRouterCtrlPeerDelReqHandler,
- /* 0x0016 */ CsrWifiRouterCtrlPeerUpdateReqHandler,
- /* 0x0017 */ CsrWifiRouterCtrlCapabilitiesReqHandler,
- /* 0x0018 */ CsrWifiRouterCtrlBlockAckEnableReqHandler,
- /* 0x0019 */ CsrWifiRouterCtrlBlockAckDisableReqHandler,
- /* 0x001A */ CsrWifiRouterCtrlWapiRxPktReqHandler,
- /* 0x001B */ CsrWifiRouterCtrlWapiMulticastFilterReqHandler,
- /* 0x001C */ CsrWifiRouterCtrlWapiUnicastFilterReqHandler,
- /* 0x001D */ CsrWifiRouterCtrlWapiUnicastTxPktReqHandler,
- /* 0x001E */ CsrWifiRouterCtrlWapiFilterReqHandler,
-};
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_sef.h b/drivers/staging/csr/csr_wifi_router_ctrl_sef.h
deleted file mode 100644
index 2fb4937..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_sef.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- Confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
- *****************************************************************************/
-#ifndef CSR_WIFI_ROUTER_SEF_CSR_WIFI_ROUTER_CTRL_H__
-#define CSR_WIFI_ROUTER_SEF_CSR_WIFI_ROUTER_CTRL_H__
-
-#include "csr_wifi_router_ctrl_prim.h"
-
- typedef void (*CsrWifiRouterCtrlStateHandlerType)(void* drvpriv, CsrWifiFsmEvent* msg);
-
- extern const CsrWifiRouterCtrlStateHandlerType CsrWifiRouterCtrlDownstreamStateHandlers[CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_COUNT];
-
- extern void CsrWifiRouterCtrlConfigurePowerModeReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlHipReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlMediaStatusReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlMulticastAddressResHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlPortConfigureReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlQosControlReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlSuspendResHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlTclasAddReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlResumeResHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlRawSdioDeinitialiseReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlRawSdioInitialiseReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlTclasDelReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlTrafficClassificationReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlTrafficConfigReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlWifiOffReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlWifiOffResHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlWifiOnReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlWifiOnResHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlM4TransmitReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlModeSetReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlPeerAddReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlPeerDelReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlPeerUpdateReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlCapabilitiesReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlBlockAckEnableReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlBlockAckDisableReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlWapiMulticastFilterReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlWapiRxPktReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlWapiUnicastTxPktReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlWapiUnicastFilterReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterCtrlWapiFilterReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
-
-#endif /* CSR_WIFI_ROUTER_SEF_CSR_WIFI_ROUTER_CTRL_H__ */
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_serialize.c b/drivers/staging/csr/csr_wifi_router_ctrl_serialize.c
deleted file mode 100644
index 3eda1b6..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_serialize.c
+++ /dev/null
@@ -1,2591 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/string.h>
-#include <linux/slab.h>
-#include "csr_msgconv.h"
-#include "csr_wifi_router_ctrl_prim.h"
-#include "csr_wifi_router_ctrl_serialize.h"
-
-void CsrWifiRouterCtrlPfree(void *ptr)
-{
- kfree(ptr);
-}
-
-
-size_t CsrWifiRouterCtrlConfigurePowerModeReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrWifiRouterCtrlLowPowerMode primitive->mode */
- bufferSize += 1; /* u8 primitive->wakeHost */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlConfigurePowerModeReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlConfigurePowerModeReq *primitive = (CsrWifiRouterCtrlConfigurePowerModeReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->mode);
- CsrUint8Ser(ptr, len, (u8) primitive->wakeHost);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlConfigurePowerModeReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlConfigurePowerModeReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlConfigurePowerModeReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mode, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wakeHost, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlHipReqSizeof(void *msg)
-{
- CsrWifiRouterCtrlHipReq *primitive = (CsrWifiRouterCtrlHipReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 12) */
- bufferSize += 2; /* u16 primitive->mlmeCommandLength */
- bufferSize += primitive->mlmeCommandLength; /* u8 primitive->mlmeCommand */
- bufferSize += 2; /* u16 primitive->dataRef1Length */
- bufferSize += primitive->dataRef1Length; /* u8 primitive->dataRef1 */
- bufferSize += 2; /* u16 primitive->dataRef2Length */
- bufferSize += primitive->dataRef2Length; /* u8 primitive->dataRef2 */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlHipReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlHipReq *primitive = (CsrWifiRouterCtrlHipReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->mlmeCommandLength);
- if (primitive->mlmeCommandLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->mlmeCommand, ((u16) (primitive->mlmeCommandLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->dataRef1Length);
- if (primitive->dataRef1Length)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->dataRef1, ((u16) (primitive->dataRef1Length)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->dataRef2Length);
- if (primitive->dataRef2Length)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->dataRef2, ((u16) (primitive->dataRef2Length)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlHipReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlHipReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlHipReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mlmeCommandLength, buffer, &offset);
- if (primitive->mlmeCommandLength)
- {
- primitive->mlmeCommand = kmalloc(primitive->mlmeCommandLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->mlmeCommand, buffer, &offset, ((u16) (primitive->mlmeCommandLength)));
- }
- else
- {
- primitive->mlmeCommand = NULL;
- }
- CsrUint16Des((u16 *) &primitive->dataRef1Length, buffer, &offset);
- if (primitive->dataRef1Length)
- {
- primitive->dataRef1 = kmalloc(primitive->dataRef1Length, GFP_KERNEL);
- CsrMemCpyDes(primitive->dataRef1, buffer, &offset, ((u16) (primitive->dataRef1Length)));
- }
- else
- {
- primitive->dataRef1 = NULL;
- }
- CsrUint16Des((u16 *) &primitive->dataRef2Length, buffer, &offset);
- if (primitive->dataRef2Length)
- {
- primitive->dataRef2 = kmalloc(primitive->dataRef2Length, GFP_KERNEL);
- CsrMemCpyDes(primitive->dataRef2, buffer, &offset, ((u16) (primitive->dataRef2Length)));
- }
- else
- {
- primitive->dataRef2 = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlHipReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlHipReq *primitive = (CsrWifiRouterCtrlHipReq *) voidPrimitivePointer;
- kfree(primitive->mlmeCommand);
- kfree(primitive->dataRef1);
- kfree(primitive->dataRef2);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlMediaStatusReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 1; /* CsrWifiRouterCtrlMediaStatus primitive->mediaStatus */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlMediaStatusReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlMediaStatusReq *primitive = (CsrWifiRouterCtrlMediaStatusReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint8Ser(ptr, len, (u8) primitive->mediaStatus);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlMediaStatusReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlMediaStatusReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlMediaStatusReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->mediaStatus, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlMulticastAddressResSizeof(void *msg)
-{
- CsrWifiRouterCtrlMulticastAddressRes *primitive = (CsrWifiRouterCtrlMulticastAddressRes *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 17) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* CsrWifiRouterCtrlListAction primitive->action */
- bufferSize += 1; /* u8 primitive->getAddressesCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getAddressesCount; i1++)
- {
- bufferSize += 6; /* u8 primitive->getAddresses[i1].a[6] */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlMulticastAddressResSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlMulticastAddressRes *primitive = (CsrWifiRouterCtrlMulticastAddressRes *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->getAddressesCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getAddressesCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->getAddresses[i1].a, ((u16) (6)));
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlMulticastAddressResDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlMulticastAddressRes *primitive = kmalloc(sizeof(CsrWifiRouterCtrlMulticastAddressRes), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->getAddressesCount, buffer, &offset);
- primitive->getAddresses = NULL;
- if (primitive->getAddressesCount)
- {
- primitive->getAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->getAddressesCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getAddressesCount; i1++)
- {
- CsrMemCpyDes(primitive->getAddresses[i1].a, buffer, &offset, ((u16) (6)));
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlMulticastAddressResSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlMulticastAddressRes *primitive = (CsrWifiRouterCtrlMulticastAddressRes *) voidPrimitivePointer;
- kfree(primitive->getAddresses);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlPortConfigureReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 18) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrWifiRouterCtrlPortAction primitive->uncontrolledPortAction */
- bufferSize += 2; /* CsrWifiRouterCtrlPortAction primitive->controlledPortAction */
- bufferSize += 6; /* u8 primitive->macAddress.a[6] */
- bufferSize += 1; /* u8 primitive->setProtection */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlPortConfigureReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlPortConfigureReq *primitive = (CsrWifiRouterCtrlPortConfigureReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->uncontrolledPortAction);
- CsrUint16Ser(ptr, len, (u16) primitive->controlledPortAction);
- CsrMemCpySer(ptr, len, (const void *) primitive->macAddress.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->setProtection);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlPortConfigureReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlPortConfigureReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlPortConfigureReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->uncontrolledPortAction, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->controlledPortAction, buffer, &offset);
- CsrMemCpyDes(primitive->macAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->setProtection, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlQosControlReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrWifiRouterCtrlQoSControl primitive->control */
- bufferSize += 1; /* CsrWifiRouterCtrlQueueConfigMask primitive->queueConfig */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlQosControlReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlQosControlReq *primitive = (CsrWifiRouterCtrlQosControlReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->control);
- CsrUint8Ser(ptr, len, (u8) primitive->queueConfig);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlQosControlReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlQosControlReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlQosControlReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->control, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->queueConfig, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlSuspendResSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlSuspendResSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlSuspendRes *primitive = (CsrWifiRouterCtrlSuspendRes *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlSuspendResDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlSuspendRes *primitive = kmalloc(sizeof(CsrWifiRouterCtrlSuspendRes), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlTclasAddReqSizeof(void *msg)
-{
- CsrWifiRouterCtrlTclasAddReq *primitive = (CsrWifiRouterCtrlTclasAddReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->tclasLength */
- bufferSize += primitive->tclasLength; /* u8 primitive->tclas */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlTclasAddReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlTclasAddReq *primitive = (CsrWifiRouterCtrlTclasAddReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->tclasLength);
- if (primitive->tclasLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->tclas, ((u16) (primitive->tclasLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlTclasAddReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlTclasAddReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlTclasAddReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->tclasLength, buffer, &offset);
- if (primitive->tclasLength)
- {
- primitive->tclas = kmalloc(primitive->tclasLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->tclas, buffer, &offset, ((u16) (primitive->tclasLength)));
- }
- else
- {
- primitive->tclas = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlTclasAddReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlTclasAddReq *primitive = (CsrWifiRouterCtrlTclasAddReq *) voidPrimitivePointer;
- kfree(primitive->tclas);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlResumeResSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlResumeResSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlResumeRes *primitive = (CsrWifiRouterCtrlResumeRes *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlResumeResDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlResumeRes *primitive = kmalloc(sizeof(CsrWifiRouterCtrlResumeRes), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlTclasDelReqSizeof(void *msg)
-{
- CsrWifiRouterCtrlTclasDelReq *primitive = (CsrWifiRouterCtrlTclasDelReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->tclasLength */
- bufferSize += primitive->tclasLength; /* u8 primitive->tclas */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlTclasDelReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlTclasDelReq *primitive = (CsrWifiRouterCtrlTclasDelReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->tclasLength);
- if (primitive->tclasLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->tclas, ((u16) (primitive->tclasLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlTclasDelReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlTclasDelReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlTclasDelReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->tclasLength, buffer, &offset);
- if (primitive->tclasLength)
- {
- primitive->tclas = kmalloc(primitive->tclasLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->tclas, buffer, &offset, ((u16) (primitive->tclasLength)));
- }
- else
- {
- primitive->tclas = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlTclasDelReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlTclasDelReq *primitive = (CsrWifiRouterCtrlTclasDelReq *) voidPrimitivePointer;
- kfree(primitive->tclas);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlTrafficClassificationReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 1; /* CsrWifiRouterCtrlTrafficType primitive->trafficType */
- bufferSize += 2; /* u16 primitive->period */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlTrafficClassificationReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlTrafficClassificationReq *primitive = (CsrWifiRouterCtrlTrafficClassificationReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint8Ser(ptr, len, (u8) primitive->trafficType);
- CsrUint16Ser(ptr, len, (u16) primitive->period);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlTrafficClassificationReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlTrafficClassificationReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlTrafficClassificationReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->trafficType, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->period, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlTrafficConfigReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 24) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrWifiRouterCtrlTrafficConfigType primitive->trafficConfigType */
- bufferSize += 2; /* u16 primitive->config.packetFilter */
- bufferSize += 4; /* u32 primitive->config.customFilter.etherType */
- bufferSize += 1; /* u8 primitive->config.customFilter.ipType */
- bufferSize += 4; /* u32 primitive->config.customFilter.udpSourcePort */
- bufferSize += 4; /* u32 primitive->config.customFilter.udpDestPort */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlTrafficConfigReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlTrafficConfigReq *primitive = (CsrWifiRouterCtrlTrafficConfigReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->trafficConfigType);
- CsrUint16Ser(ptr, len, (u16) primitive->config.packetFilter);
- CsrUint32Ser(ptr, len, (u32) primitive->config.customFilter.etherType);
- CsrUint8Ser(ptr, len, (u8) primitive->config.customFilter.ipType);
- CsrUint32Ser(ptr, len, (u32) primitive->config.customFilter.udpSourcePort);
- CsrUint32Ser(ptr, len, (u32) primitive->config.customFilter.udpDestPort);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlTrafficConfigReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlTrafficConfigReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlTrafficConfigReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->trafficConfigType, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->config.packetFilter, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->config.customFilter.etherType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->config.customFilter.ipType, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->config.customFilter.udpSourcePort, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->config.customFilter.udpDestPort, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlWifiOnReqSizeof(void *msg)
-{
- CsrWifiRouterCtrlWifiOnReq *primitive = (CsrWifiRouterCtrlWifiOnReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 4; /* u32 primitive->dataLength */
- bufferSize += primitive->dataLength; /* u8 primitive->data */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlWifiOnReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlWifiOnReq *primitive = (CsrWifiRouterCtrlWifiOnReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint32Ser(ptr, len, (u32) primitive->dataLength);
- if (primitive->dataLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->data, ((u16) (primitive->dataLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlWifiOnReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlWifiOnReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlWifiOnReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->dataLength, buffer, &offset);
- if (primitive->dataLength)
- {
- primitive->data = kmalloc(primitive->dataLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->data, buffer, &offset, ((u16) (primitive->dataLength)));
- }
- else
- {
- primitive->data = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlWifiOnReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlWifiOnReq *primitive = (CsrWifiRouterCtrlWifiOnReq *) voidPrimitivePointer;
- kfree(primitive->data);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlWifiOnResSizeof(void *msg)
-{
- CsrWifiRouterCtrlWifiOnRes *primitive = (CsrWifiRouterCtrlWifiOnRes *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 30) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 2; /* u16 primitive->numInterfaceAddress */
- {
- u16 i1;
- for (i1 = 0; i1 < 2; i1++)
- {
- bufferSize += 6; /* u8 primitive->stationMacAddress[i1].a[6] */
- }
- }
- bufferSize += 4; /* u32 primitive->smeVersions.firmwarePatch */
- bufferSize += (primitive->smeVersions.smeBuild ? strlen(primitive->smeVersions.smeBuild) : 0) + 1; /* char* primitive->smeVersions.smeBuild (0 byte len + 1 for NULL Term) */
- bufferSize += 4; /* u32 primitive->smeVersions.smeHip */
- bufferSize += 1; /* u8 primitive->scheduledInterrupt */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlWifiOnResSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlWifiOnRes *primitive = (CsrWifiRouterCtrlWifiOnRes *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint16Ser(ptr, len, (u16) primitive->numInterfaceAddress);
- {
- u16 i1;
- for (i1 = 0; i1 < 2; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->stationMacAddress[i1].a, ((u16) (6)));
- }
- }
- CsrUint32Ser(ptr, len, (u32) primitive->smeVersions.firmwarePatch);
- CsrCharStringSer(ptr, len, primitive->smeVersions.smeBuild);
- CsrUint32Ser(ptr, len, (u32) primitive->smeVersions.smeHip);
- CsrUint8Ser(ptr, len, (u8) primitive->scheduledInterrupt);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlWifiOnResDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlWifiOnRes *primitive = kmalloc(sizeof(CsrWifiRouterCtrlWifiOnRes), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->numInterfaceAddress, buffer, &offset);
- {
- u16 i1;
- for (i1 = 0; i1 < 2; i1++)
- {
- CsrMemCpyDes(primitive->stationMacAddress[i1].a, buffer, &offset, ((u16) (6)));
- }
- }
- CsrUint32Des((u32 *) &primitive->smeVersions.firmwarePatch, buffer, &offset);
- CsrCharStringDes(&primitive->smeVersions.smeBuild, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->smeVersions.smeHip, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scheduledInterrupt, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlWifiOnResSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlWifiOnRes *primitive = (CsrWifiRouterCtrlWifiOnRes *) voidPrimitivePointer;
- kfree(primitive->smeVersions.smeBuild);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlM4TransmitReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlM4TransmitReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlM4TransmitReq *primitive = (CsrWifiRouterCtrlM4TransmitReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlM4TransmitReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlM4TransmitReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlM4TransmitReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlModeSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 16) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 1; /* CsrWifiRouterCtrlMode primitive->mode */
- bufferSize += 6; /* u8 primitive->bssid.a[6] */
- bufferSize += 1; /* u8 primitive->protection */
- bufferSize += 1; /* u8 primitive->intraBssDistEnabled */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlModeSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlModeSetReq *primitive = (CsrWifiRouterCtrlModeSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint8Ser(ptr, len, (u8) primitive->mode);
- CsrMemCpySer(ptr, len, (const void *) primitive->bssid.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->protection);
- CsrUint8Ser(ptr, len, (u8) primitive->intraBssDistEnabled);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlModeSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlModeSetReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlModeSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->mode, buffer, &offset);
- CsrMemCpyDes(primitive->bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->protection, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->intraBssDistEnabled, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlPeerAddReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 21) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- bufferSize += 2; /* u16 primitive->associationId */
- bufferSize += 1; /* u8 primitive->staInfo.wmmOrQosEnabled */
- bufferSize += 2; /* CsrWifiRouterCtrlPowersaveTypeMask primitive->staInfo.powersaveMode */
- bufferSize += 1; /* u8 primitive->staInfo.maxSpLength */
- bufferSize += 2; /* u16 primitive->staInfo.listenIntervalInTus */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlPeerAddReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlPeerAddReq *primitive = (CsrWifiRouterCtrlPeerAddReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->associationId);
- CsrUint8Ser(ptr, len, (u8) primitive->staInfo.wmmOrQosEnabled);
- CsrUint16Ser(ptr, len, (u16) primitive->staInfo.powersaveMode);
- CsrUint8Ser(ptr, len, (u8) primitive->staInfo.maxSpLength);
- CsrUint16Ser(ptr, len, (u16) primitive->staInfo.listenIntervalInTus);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlPeerAddReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlPeerAddReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlPeerAddReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->associationId, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->staInfo.wmmOrQosEnabled, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->staInfo.powersaveMode, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->staInfo.maxSpLength, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->staInfo.listenIntervalInTus, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlPeerDelReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 4; /* CsrWifiRouterCtrlPeerRecordHandle primitive->peerRecordHandle */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlPeerDelReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlPeerDelReq *primitive = (CsrWifiRouterCtrlPeerDelReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint32Ser(ptr, len, (u32) primitive->peerRecordHandle);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlPeerDelReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlPeerDelReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlPeerDelReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->peerRecordHandle, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlPeerUpdateReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 4; /* CsrWifiRouterCtrlPeerRecordHandle primitive->peerRecordHandle */
- bufferSize += 2; /* CsrWifiRouterCtrlPowersaveTypeMask primitive->powersaveMode */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlPeerUpdateReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlPeerUpdateReq *primitive = (CsrWifiRouterCtrlPeerUpdateReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint32Ser(ptr, len, (u32) primitive->peerRecordHandle);
- CsrUint16Ser(ptr, len, (u16) primitive->powersaveMode);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlPeerUpdateReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlPeerUpdateReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlPeerUpdateReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->peerRecordHandle, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->powersaveMode, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlBlockAckEnableReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 21) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 6; /* u8 primitive->macAddress.a[6] */
- bufferSize += 1; /* CsrWifiRouterCtrlTrafficStreamId primitive->trafficStreamID */
- bufferSize += 1; /* CsrWifiRouterCtrlBlockAckRole primitive->role */
- bufferSize += 2; /* u16 primitive->bufferSize */
- bufferSize += 2; /* u16 primitive->timeout */
- bufferSize += 2; /* u16 primitive->ssn */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlBlockAckEnableReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlBlockAckEnableReq *primitive = (CsrWifiRouterCtrlBlockAckEnableReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrMemCpySer(ptr, len, (const void *) primitive->macAddress.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->trafficStreamID);
- CsrUint8Ser(ptr, len, (u8) primitive->role);
- CsrUint16Ser(ptr, len, (u16) primitive->bufferSize);
- CsrUint16Ser(ptr, len, (u16) primitive->timeout);
- CsrUint16Ser(ptr, len, (u16) primitive->ssn);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlBlockAckEnableReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlBlockAckEnableReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckEnableReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrMemCpyDes(primitive->macAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->trafficStreamID, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->role, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->bufferSize, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->timeout, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->ssn, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlBlockAckDisableReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 6; /* u8 primitive->macAddress.a[6] */
- bufferSize += 1; /* CsrWifiRouterCtrlTrafficStreamId primitive->trafficStreamID */
- bufferSize += 1; /* CsrWifiRouterCtrlBlockAckRole primitive->role */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlBlockAckDisableReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlBlockAckDisableReq *primitive = (CsrWifiRouterCtrlBlockAckDisableReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrMemCpySer(ptr, len, (const void *) primitive->macAddress.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->trafficStreamID);
- CsrUint8Ser(ptr, len, (u8) primitive->role);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlBlockAckDisableReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlBlockAckDisableReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckDisableReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrMemCpyDes(primitive->macAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->trafficStreamID, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->role, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlWapiRxPktReqSizeof(void *msg)
-{
- CsrWifiRouterCtrlWapiRxPktReq *primitive = (CsrWifiRouterCtrlWapiRxPktReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* u16 primitive->signalLength */
- bufferSize += primitive->signalLength; /* u8 primitive->signal */
- bufferSize += 2; /* u16 primitive->dataLength */
- bufferSize += primitive->dataLength; /* u8 primitive->data */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlWapiRxPktReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlWapiRxPktReq *primitive = (CsrWifiRouterCtrlWapiRxPktReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->signalLength);
- if (primitive->signalLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->signal, ((u16) (primitive->signalLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->dataLength);
- if (primitive->dataLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->data, ((u16) (primitive->dataLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlWapiRxPktReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlWapiRxPktReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlWapiRxPktReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->signalLength, buffer, &offset);
- if (primitive->signalLength)
- {
- primitive->signal = kmalloc(primitive->signalLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->signal, buffer, &offset, ((u16) (primitive->signalLength)));
- }
- else
- {
- primitive->signal = NULL;
- }
- CsrUint16Des((u16 *) &primitive->dataLength, buffer, &offset);
- if (primitive->dataLength)
- {
- primitive->data = kmalloc(primitive->dataLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->data, buffer, &offset, ((u16) (primitive->dataLength)));
- }
- else
- {
- primitive->data = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlWapiRxPktReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlWapiRxPktReq *primitive = (CsrWifiRouterCtrlWapiRxPktReq *) voidPrimitivePointer;
- kfree(primitive->signal);
- kfree(primitive->data);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlWapiUnicastTxPktReqSizeof(void *msg)
-{
- CsrWifiRouterCtrlWapiUnicastTxPktReq *primitive = (CsrWifiRouterCtrlWapiUnicastTxPktReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* u16 primitive->dataLength */
- bufferSize += primitive->dataLength; /* u8 primitive->data */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlWapiUnicastTxPktReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlWapiUnicastTxPktReq *primitive = (CsrWifiRouterCtrlWapiUnicastTxPktReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->dataLength);
- if (primitive->dataLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->data, ((u16) (primitive->dataLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlWapiUnicastTxPktReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlWapiUnicastTxPktReq *primitive = kmalloc(sizeof(CsrWifiRouterCtrlWapiUnicastTxPktReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->dataLength, buffer, &offset);
- if (primitive->dataLength)
- {
- primitive->data = kmalloc(primitive->dataLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->data, buffer, &offset, ((u16) (primitive->dataLength)));
- }
- else
- {
- primitive->data = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlWapiUnicastTxPktReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlWapiUnicastTxPktReq *primitive = (CsrWifiRouterCtrlWapiUnicastTxPktReq *) voidPrimitivePointer;
- kfree(primitive->data);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlHipIndSizeof(void *msg)
-{
- CsrWifiRouterCtrlHipInd *primitive = (CsrWifiRouterCtrlHipInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 12) */
- bufferSize += 2; /* u16 primitive->mlmeCommandLength */
- bufferSize += primitive->mlmeCommandLength; /* u8 primitive->mlmeCommand */
- bufferSize += 2; /* u16 primitive->dataRef1Length */
- bufferSize += primitive->dataRef1Length; /* u8 primitive->dataRef1 */
- bufferSize += 2; /* u16 primitive->dataRef2Length */
- bufferSize += primitive->dataRef2Length; /* u8 primitive->dataRef2 */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlHipIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlHipInd *primitive = (CsrWifiRouterCtrlHipInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->mlmeCommandLength);
- if (primitive->mlmeCommandLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->mlmeCommand, ((u16) (primitive->mlmeCommandLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->dataRef1Length);
- if (primitive->dataRef1Length)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->dataRef1, ((u16) (primitive->dataRef1Length)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->dataRef2Length);
- if (primitive->dataRef2Length)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->dataRef2, ((u16) (primitive->dataRef2Length)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlHipIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlHipInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlHipInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mlmeCommandLength, buffer, &offset);
- if (primitive->mlmeCommandLength)
- {
- primitive->mlmeCommand = kmalloc(primitive->mlmeCommandLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->mlmeCommand, buffer, &offset, ((u16) (primitive->mlmeCommandLength)));
- }
- else
- {
- primitive->mlmeCommand = NULL;
- }
- CsrUint16Des((u16 *) &primitive->dataRef1Length, buffer, &offset);
- if (primitive->dataRef1Length)
- {
- primitive->dataRef1 = kmalloc(primitive->dataRef1Length, GFP_KERNEL);
- CsrMemCpyDes(primitive->dataRef1, buffer, &offset, ((u16) (primitive->dataRef1Length)));
- }
- else
- {
- primitive->dataRef1 = NULL;
- }
- CsrUint16Des((u16 *) &primitive->dataRef2Length, buffer, &offset);
- if (primitive->dataRef2Length)
- {
- primitive->dataRef2 = kmalloc(primitive->dataRef2Length, GFP_KERNEL);
- CsrMemCpyDes(primitive->dataRef2, buffer, &offset, ((u16) (primitive->dataRef2Length)));
- }
- else
- {
- primitive->dataRef2 = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlHipIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlHipInd *primitive = (CsrWifiRouterCtrlHipInd *) voidPrimitivePointer;
- kfree(primitive->mlmeCommand);
- kfree(primitive->dataRef1);
- kfree(primitive->dataRef2);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlMulticastAddressIndSizeof(void *msg)
-{
- CsrWifiRouterCtrlMulticastAddressInd *primitive = (CsrWifiRouterCtrlMulticastAddressInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiRouterCtrlListAction primitive->action */
- bufferSize += 1; /* u8 primitive->setAddressesCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setAddressesCount; i1++)
- {
- bufferSize += 6; /* u8 primitive->setAddresses[i1].a[6] */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlMulticastAddressIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlMulticastAddressInd *primitive = (CsrWifiRouterCtrlMulticastAddressInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->setAddressesCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setAddressesCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->setAddresses[i1].a, ((u16) (6)));
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlMulticastAddressIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlMulticastAddressInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlMulticastAddressInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->setAddressesCount, buffer, &offset);
- primitive->setAddresses = NULL;
- if (primitive->setAddressesCount)
- {
- primitive->setAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->setAddressesCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setAddressesCount; i1++)
- {
- CsrMemCpyDes(primitive->setAddresses[i1].a, buffer, &offset, ((u16) (6)));
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlMulticastAddressIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlMulticastAddressInd *primitive = (CsrWifiRouterCtrlMulticastAddressInd *) voidPrimitivePointer;
- kfree(primitive->setAddresses);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlPortConfigureCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 6; /* u8 primitive->macAddress.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlPortConfigureCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlPortConfigureCfm *primitive = (CsrWifiRouterCtrlPortConfigureCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrMemCpySer(ptr, len, (const void *) primitive->macAddress.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlPortConfigureCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlPortConfigureCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlPortConfigureCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrMemCpyDes(primitive->macAddress.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlSuspendIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 1; /* u8 primitive->hardSuspend */
- bufferSize += 1; /* u8 primitive->d3Suspend */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlSuspendIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlSuspendInd *primitive = (CsrWifiRouterCtrlSuspendInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint8Ser(ptr, len, (u8) primitive->hardSuspend);
- CsrUint8Ser(ptr, len, (u8) primitive->d3Suspend);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlSuspendIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlSuspendInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlSuspendInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->hardSuspend, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->d3Suspend, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlTclasAddCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlTclasAddCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlTclasAddCfm *primitive = (CsrWifiRouterCtrlTclasAddCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlTclasAddCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlTclasAddCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlTclasAddCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlRawSdioDeinitialiseCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrResult primitive->result */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlRawSdioDeinitialiseCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlRawSdioDeinitialiseCfm *primitive = (CsrWifiRouterCtrlRawSdioDeinitialiseCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->result);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlRawSdioDeinitialiseCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlRawSdioDeinitialiseCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlRawSdioDeinitialiseCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->result, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlRawSdioInitialiseCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 39) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrResult primitive->result */
- bufferSize += 4; /* CsrWifiRouterCtrlRawSdioByteRead primitive->byteRead */
- bufferSize += 4; /* CsrWifiRouterCtrlRawSdioByteWrite primitive->byteWrite */
- bufferSize += 4; /* CsrWifiRouterCtrlRawSdioFirmwareDownload primitive->firmwareDownload */
- bufferSize += 4; /* CsrWifiRouterCtrlRawSdioReset primitive->reset */
- bufferSize += 4; /* CsrWifiRouterCtrlRawSdioCoreDumpPrepare primitive->coreDumpPrepare */
- bufferSize += 4; /* CsrWifiRouterCtrlRawSdioByteBlockRead primitive->byteBlockRead */
- bufferSize += 4; /* CsrWifiRouterCtrlRawSdioGpRead16 primitive->gpRead16 */
- bufferSize += 4; /* CsrWifiRouterCtrlRawSdioGpWrite16 primitive->gpWrite16 */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlRawSdioInitialiseCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlRawSdioInitialiseCfm *primitive = (CsrWifiRouterCtrlRawSdioInitialiseCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->result);
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->byteRead */
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->byteWrite */
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->firmwareDownload */
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->reset */
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->coreDumpPrepare */
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->byteBlockRead */
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->gpRead16 */
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->gpWrite16 */
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlRawSdioInitialiseCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlRawSdioInitialiseCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlRawSdioInitialiseCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->result, buffer, &offset);
- primitive->byteRead = NULL; /* Special for Function Pointers... */
- offset += 4;
- primitive->byteWrite = NULL; /* Special for Function Pointers... */
- offset += 4;
- primitive->firmwareDownload = NULL; /* Special for Function Pointers... */
- offset += 4;
- primitive->reset = NULL; /* Special for Function Pointers... */
- offset += 4;
- primitive->coreDumpPrepare = NULL; /* Special for Function Pointers... */
- offset += 4;
- primitive->byteBlockRead = NULL; /* Special for Function Pointers... */
- offset += 4;
- primitive->gpRead16 = NULL; /* Special for Function Pointers... */
- offset += 4;
- primitive->gpWrite16 = NULL; /* Special for Function Pointers... */
- offset += 4;
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlTclasDelCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlTclasDelCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlTclasDelCfm *primitive = (CsrWifiRouterCtrlTclasDelCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlTclasDelCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlTclasDelCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlTclasDelCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlTrafficProtocolIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 17) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrWifiRouterCtrlTrafficPacketType primitive->packetType */
- bufferSize += 2; /* CsrWifiRouterCtrlProtocolDirection primitive->direction */
- bufferSize += 6; /* u8 primitive->srcAddress.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlTrafficProtocolIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlTrafficProtocolInd *primitive = (CsrWifiRouterCtrlTrafficProtocolInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->packetType);
- CsrUint16Ser(ptr, len, (u16) primitive->direction);
- CsrMemCpySer(ptr, len, (const void *) primitive->srcAddress.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlTrafficProtocolIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlTrafficProtocolInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlTrafficProtocolInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->packetType, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->direction, buffer, &offset);
- CsrMemCpyDes(primitive->srcAddress.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlTrafficSampleIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 38) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 4; /* u32 primitive->stats.rxMeanRate */
- bufferSize += 4; /* u32 primitive->stats.rxFramesNum */
- bufferSize += 4; /* u32 primitive->stats.txFramesNum */
- bufferSize += 4; /* u32 primitive->stats.rxBytesCount */
- bufferSize += 4; /* u32 primitive->stats.txBytesCount */
- bufferSize += 11; /* u8 primitive->stats.intervals[11] */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlTrafficSampleIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlTrafficSampleInd *primitive = (CsrWifiRouterCtrlTrafficSampleInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint32Ser(ptr, len, (u32) primitive->stats.rxMeanRate);
- CsrUint32Ser(ptr, len, (u32) primitive->stats.rxFramesNum);
- CsrUint32Ser(ptr, len, (u32) primitive->stats.txFramesNum);
- CsrUint32Ser(ptr, len, (u32) primitive->stats.rxBytesCount);
- CsrUint32Ser(ptr, len, (u32) primitive->stats.txBytesCount);
- CsrMemCpySer(ptr, len, (const void *) primitive->stats.intervals, ((u16) (11)));
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlTrafficSampleIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlTrafficSampleInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlTrafficSampleInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->stats.rxMeanRate, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->stats.rxFramesNum, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->stats.txFramesNum, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->stats.rxBytesCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->stats.txBytesCount, buffer, &offset);
- CsrMemCpyDes(primitive->stats.intervals, buffer, &offset, ((u16) (11)));
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlWifiOnIndSizeof(void *msg)
-{
- CsrWifiRouterCtrlWifiOnInd *primitive = (CsrWifiRouterCtrlWifiOnInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 27) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 4; /* u32 primitive->versions.chipId */
- bufferSize += 4; /* u32 primitive->versions.chipVersion */
- bufferSize += 4; /* u32 primitive->versions.firmwareBuild */
- bufferSize += 4; /* u32 primitive->versions.firmwareHip */
- bufferSize += (primitive->versions.routerBuild ? strlen(primitive->versions.routerBuild) : 0) + 1; /* char* primitive->versions.routerBuild (0 byte len + 1 for NULL Term) */
- bufferSize += 4; /* u32 primitive->versions.routerHip */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlWifiOnIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlWifiOnInd *primitive = (CsrWifiRouterCtrlWifiOnInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.chipId);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.chipVersion);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.firmwareBuild);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.firmwareHip);
- CsrCharStringSer(ptr, len, primitive->versions.routerBuild);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.routerHip);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlWifiOnIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlWifiOnInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlWifiOnInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.chipId, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.chipVersion, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.firmwareBuild, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.firmwareHip, buffer, &offset);
- CsrCharStringDes(&primitive->versions.routerBuild, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.routerHip, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlWifiOnIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlWifiOnInd *primitive = (CsrWifiRouterCtrlWifiOnInd *) voidPrimitivePointer;
- kfree(primitive->versions.routerBuild);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlWifiOnCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlWifiOnCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlWifiOnCfm *primitive = (CsrWifiRouterCtrlWifiOnCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlWifiOnCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlWifiOnCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlWifiOnCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlM4ReadyToSendIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlM4ReadyToSendIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlM4ReadyToSendInd *primitive = (CsrWifiRouterCtrlM4ReadyToSendInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlM4ReadyToSendIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlM4ReadyToSendInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlM4ReadyToSendInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlM4TransmittedIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlM4TransmittedIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlM4TransmittedInd *primitive = (CsrWifiRouterCtrlM4TransmittedInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlM4TransmittedIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlM4TransmittedInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlM4TransmittedInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlMicFailureIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 14) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- bufferSize += 1; /* u8 primitive->unicastPdu */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlMicFailureIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlMicFailureInd *primitive = (CsrWifiRouterCtrlMicFailureInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->unicastPdu);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlMicFailureIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlMicFailureInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlMicFailureInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->unicastPdu, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlConnectedIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 14) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- bufferSize += 1; /* CsrWifiRouterCtrlPeerStatus primitive->peerStatus */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlConnectedIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlConnectedInd *primitive = (CsrWifiRouterCtrlConnectedInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->peerStatus);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlConnectedIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlConnectedInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlConnectedInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->peerStatus, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlPeerAddCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 19) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- bufferSize += 4; /* CsrWifiRouterCtrlPeerRecordHandle primitive->peerRecordHandle */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlPeerAddCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlPeerAddCfm *primitive = (CsrWifiRouterCtrlPeerAddCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- CsrUint32Ser(ptr, len, (u32) primitive->peerRecordHandle);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlPeerAddCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlPeerAddCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlPeerAddCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint32Des((u32 *) &primitive->peerRecordHandle, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlPeerDelCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlPeerDelCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlPeerDelCfm *primitive = (CsrWifiRouterCtrlPeerDelCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlPeerDelCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlPeerDelCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlPeerDelCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlUnexpectedFrameIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlUnexpectedFrameIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlUnexpectedFrameInd *primitive = (CsrWifiRouterCtrlUnexpectedFrameInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlUnexpectedFrameIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlUnexpectedFrameInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlUnexpectedFrameInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlPeerUpdateCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlPeerUpdateCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlPeerUpdateCfm *primitive = (CsrWifiRouterCtrlPeerUpdateCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlPeerUpdateCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlPeerUpdateCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlPeerUpdateCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlCapabilitiesCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->commandQueueSize */
- bufferSize += 2; /* u16 primitive->trafficQueueSize */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlCapabilitiesCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlCapabilitiesCfm *primitive = (CsrWifiRouterCtrlCapabilitiesCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->commandQueueSize);
- CsrUint16Ser(ptr, len, (u16) primitive->trafficQueueSize);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlCapabilitiesCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlCapabilitiesCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlCapabilitiesCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->commandQueueSize, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->trafficQueueSize, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlBlockAckEnableCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlBlockAckEnableCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlBlockAckEnableCfm *primitive = (CsrWifiRouterCtrlBlockAckEnableCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlBlockAckEnableCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlBlockAckEnableCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckEnableCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlBlockAckDisableCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlBlockAckDisableCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlBlockAckDisableCfm *primitive = (CsrWifiRouterCtrlBlockAckDisableCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlBlockAckDisableCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlBlockAckDisableCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckDisableCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlBlockAckErrorIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 16) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiRouterCtrlTrafficStreamId primitive->trafficStreamID */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlBlockAckErrorIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlBlockAckErrorInd *primitive = (CsrWifiRouterCtrlBlockAckErrorInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->trafficStreamID);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlBlockAckErrorIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlBlockAckErrorInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlBlockAckErrorInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->trafficStreamID, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlStaInactiveIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 6; /* u8 primitive->staAddress.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlStaInactiveIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlStaInactiveInd *primitive = (CsrWifiRouterCtrlStaInactiveInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->staAddress.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlStaInactiveIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlStaInactiveInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlStaInactiveInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->staAddress.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlWapiRxMicCheckIndSizeof(void *msg)
-{
- CsrWifiRouterCtrlWapiRxMicCheckInd *primitive = (CsrWifiRouterCtrlWapiRxMicCheckInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* u16 primitive->signalLength */
- bufferSize += primitive->signalLength; /* u8 primitive->signal */
- bufferSize += 2; /* u16 primitive->dataLength */
- bufferSize += primitive->dataLength; /* u8 primitive->data */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlWapiRxMicCheckIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlWapiRxMicCheckInd *primitive = (CsrWifiRouterCtrlWapiRxMicCheckInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->signalLength);
- if (primitive->signalLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->signal, ((u16) (primitive->signalLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->dataLength);
- if (primitive->dataLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->data, ((u16) (primitive->dataLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlWapiRxMicCheckIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlWapiRxMicCheckInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlWapiRxMicCheckInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->signalLength, buffer, &offset);
- if (primitive->signalLength)
- {
- primitive->signal = kmalloc(primitive->signalLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->signal, buffer, &offset, ((u16) (primitive->signalLength)));
- }
- else
- {
- primitive->signal = NULL;
- }
- CsrUint16Des((u16 *) &primitive->dataLength, buffer, &offset);
- if (primitive->dataLength)
- {
- primitive->data = kmalloc(primitive->dataLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->data, buffer, &offset, ((u16) (primitive->dataLength)));
- }
- else
- {
- primitive->data = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlWapiRxMicCheckIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlWapiRxMicCheckInd *primitive = (CsrWifiRouterCtrlWapiRxMicCheckInd *) voidPrimitivePointer;
- kfree(primitive->signal);
- kfree(primitive->data);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterCtrlModeSetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiRouterCtrlMode primitive->mode */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlModeSetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlModeSetCfm *primitive = (CsrWifiRouterCtrlModeSetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->mode);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlModeSetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlModeSetCfm *primitive = kmalloc(sizeof(CsrWifiRouterCtrlModeSetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->mode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterCtrlWapiUnicastTxEncryptIndSizeof(void *msg)
-{
- CsrWifiRouterCtrlWapiUnicastTxEncryptInd *primitive = (CsrWifiRouterCtrlWapiUnicastTxEncryptInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* CsrWifiRouterCtrlRequestorInfo primitive->clientData */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* u16 primitive->dataLength */
- bufferSize += primitive->dataLength; /* u8 primitive->data */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterCtrlWapiUnicastTxEncryptIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterCtrlWapiUnicastTxEncryptInd *primitive = (CsrWifiRouterCtrlWapiUnicastTxEncryptInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->clientData);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->dataLength);
- if (primitive->dataLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->data, ((u16) (primitive->dataLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiRouterCtrlWapiUnicastTxEncryptIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterCtrlWapiUnicastTxEncryptInd *primitive = kmalloc(sizeof(CsrWifiRouterCtrlWapiUnicastTxEncryptInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->clientData, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->dataLength, buffer, &offset);
- if (primitive->dataLength)
- {
- primitive->data = kmalloc(primitive->dataLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->data, buffer, &offset, ((u16) (primitive->dataLength)));
- }
- else
- {
- primitive->data = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiRouterCtrlWapiUnicastTxEncryptIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterCtrlWapiUnicastTxEncryptInd *primitive = (CsrWifiRouterCtrlWapiUnicastTxEncryptInd *) voidPrimitivePointer;
- kfree(primitive->data);
- kfree(primitive);
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_router_ctrl_serialize.h b/drivers/staging/csr/csr_wifi_router_ctrl_serialize.h
deleted file mode 100644
index c904838..0000000
--- a/drivers/staging/csr/csr_wifi_router_ctrl_serialize.h
+++ /dev/null
@@ -1,333 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_ROUTER_CTRL_SERIALIZE_H__
-#define CSR_WIFI_ROUTER_CTRL_SERIALIZE_H__
-
-#include "csr_wifi_msgconv.h"
-
-#include "csr_wifi_router_ctrl_prim.h"
-
-extern void CsrWifiRouterCtrlPfree(void *ptr);
-
-extern u8* CsrWifiRouterCtrlConfigurePowerModeReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlConfigurePowerModeReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlConfigurePowerModeReqSizeof(void *msg);
-#define CsrWifiRouterCtrlConfigurePowerModeReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlHipReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlHipReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlHipReqSizeof(void *msg);
-extern void CsrWifiRouterCtrlHipReqSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlMediaStatusReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlMediaStatusReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlMediaStatusReqSizeof(void *msg);
-#define CsrWifiRouterCtrlMediaStatusReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlMulticastAddressResSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlMulticastAddressResDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlMulticastAddressResSizeof(void *msg);
-extern void CsrWifiRouterCtrlMulticastAddressResSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlPortConfigureReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlPortConfigureReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlPortConfigureReqSizeof(void *msg);
-#define CsrWifiRouterCtrlPortConfigureReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlQosControlReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlQosControlReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlQosControlReqSizeof(void *msg);
-#define CsrWifiRouterCtrlQosControlReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlSuspendResSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlSuspendResDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlSuspendResSizeof(void *msg);
-#define CsrWifiRouterCtrlSuspendResSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlTclasAddReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlTclasAddReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlTclasAddReqSizeof(void *msg);
-extern void CsrWifiRouterCtrlTclasAddReqSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlResumeResSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlResumeResDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlResumeResSizeof(void *msg);
-#define CsrWifiRouterCtrlResumeResSerFree CsrWifiRouterCtrlPfree
-
-#define CsrWifiRouterCtrlRawSdioDeinitialiseReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiRouterCtrlRawSdioDeinitialiseReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiRouterCtrlRawSdioDeinitialiseReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiRouterCtrlRawSdioDeinitialiseReqSerFree CsrWifiRouterCtrlPfree
-
-#define CsrWifiRouterCtrlRawSdioInitialiseReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiRouterCtrlRawSdioInitialiseReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiRouterCtrlRawSdioInitialiseReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiRouterCtrlRawSdioInitialiseReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlTclasDelReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlTclasDelReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlTclasDelReqSizeof(void *msg);
-extern void CsrWifiRouterCtrlTclasDelReqSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlTrafficClassificationReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlTrafficClassificationReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlTrafficClassificationReqSizeof(void *msg);
-#define CsrWifiRouterCtrlTrafficClassificationReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlTrafficConfigReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlTrafficConfigReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlTrafficConfigReqSizeof(void *msg);
-#define CsrWifiRouterCtrlTrafficConfigReqSerFree CsrWifiRouterCtrlPfree
-
-#define CsrWifiRouterCtrlWifiOffReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiRouterCtrlWifiOffReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiRouterCtrlWifiOffReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiRouterCtrlWifiOffReqSerFree CsrWifiRouterCtrlPfree
-
-#define CsrWifiRouterCtrlWifiOffResSer CsrWifiEventCsrUint16Ser
-#define CsrWifiRouterCtrlWifiOffResDes CsrWifiEventCsrUint16Des
-#define CsrWifiRouterCtrlWifiOffResSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiRouterCtrlWifiOffResSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlWifiOnReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlWifiOnReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlWifiOnReqSizeof(void *msg);
-extern void CsrWifiRouterCtrlWifiOnReqSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlWifiOnResSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlWifiOnResDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlWifiOnResSizeof(void *msg);
-extern void CsrWifiRouterCtrlWifiOnResSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlM4TransmitReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlM4TransmitReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlM4TransmitReqSizeof(void *msg);
-#define CsrWifiRouterCtrlM4TransmitReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlModeSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlModeSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlModeSetReqSizeof(void *msg);
-#define CsrWifiRouterCtrlModeSetReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlPeerAddReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlPeerAddReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlPeerAddReqSizeof(void *msg);
-#define CsrWifiRouterCtrlPeerAddReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlPeerDelReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlPeerDelReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlPeerDelReqSizeof(void *msg);
-#define CsrWifiRouterCtrlPeerDelReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlPeerUpdateReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlPeerUpdateReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlPeerUpdateReqSizeof(void *msg);
-#define CsrWifiRouterCtrlPeerUpdateReqSerFree CsrWifiRouterCtrlPfree
-
-#define CsrWifiRouterCtrlCapabilitiesReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiRouterCtrlCapabilitiesReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiRouterCtrlCapabilitiesReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiRouterCtrlCapabilitiesReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlBlockAckEnableReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlBlockAckEnableReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlBlockAckEnableReqSizeof(void *msg);
-#define CsrWifiRouterCtrlBlockAckEnableReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlBlockAckDisableReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlBlockAckDisableReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlBlockAckDisableReqSizeof(void *msg);
-#define CsrWifiRouterCtrlBlockAckDisableReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlWapiRxPktReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlWapiRxPktReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlWapiRxPktReqSizeof(void *msg);
-extern void CsrWifiRouterCtrlWapiRxPktReqSerFree(void *msg);
-
-#define CsrWifiRouterCtrlWapiMulticastFilterReqSer CsrWifiEventCsrUint16CsrUint8Ser
-#define CsrWifiRouterCtrlWapiMulticastFilterReqDes CsrWifiEventCsrUint16CsrUint8Des
-#define CsrWifiRouterCtrlWapiMulticastFilterReqSizeof CsrWifiEventCsrUint16CsrUint8Sizeof
-#define CsrWifiRouterCtrlWapiMulticastFilterReqSerFree CsrWifiRouterCtrlPfree
-
-#define CsrWifiRouterCtrlWapiUnicastFilterReqSer CsrWifiEventCsrUint16CsrUint8Ser
-#define CsrWifiRouterCtrlWapiUnicastFilterReqDes CsrWifiEventCsrUint16CsrUint8Des
-#define CsrWifiRouterCtrlWapiUnicastFilterReqSizeof CsrWifiEventCsrUint16CsrUint8Sizeof
-#define CsrWifiRouterCtrlWapiUnicastFilterReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlWapiUnicastTxPktReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlWapiUnicastTxPktReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlWapiUnicastTxPktReqSizeof(void *msg);
-extern void CsrWifiRouterCtrlWapiUnicastTxPktReqSerFree(void *msg);
-
-#define CsrWifiRouterCtrlWapiFilterReqSer CsrWifiEventCsrUint16CsrUint8Ser
-#define CsrWifiRouterCtrlWapiFilterReqDes CsrWifiEventCsrUint16CsrUint8Des
-#define CsrWifiRouterCtrlWapiFilterReqSizeof CsrWifiEventCsrUint16CsrUint8Sizeof
-#define CsrWifiRouterCtrlWapiFilterReqSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlHipIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlHipIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlHipIndSizeof(void *msg);
-extern void CsrWifiRouterCtrlHipIndSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlMulticastAddressIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlMulticastAddressIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlMulticastAddressIndSizeof(void *msg);
-extern void CsrWifiRouterCtrlMulticastAddressIndSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlPortConfigureCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlPortConfigureCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlPortConfigureCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlPortConfigureCfmSerFree CsrWifiRouterCtrlPfree
-
-#define CsrWifiRouterCtrlResumeIndSer CsrWifiEventCsrUint16CsrUint8Ser
-#define CsrWifiRouterCtrlResumeIndDes CsrWifiEventCsrUint16CsrUint8Des
-#define CsrWifiRouterCtrlResumeIndSizeof CsrWifiEventCsrUint16CsrUint8Sizeof
-#define CsrWifiRouterCtrlResumeIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlSuspendIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlSuspendIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlSuspendIndSizeof(void *msg);
-#define CsrWifiRouterCtrlSuspendIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlTclasAddCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlTclasAddCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlTclasAddCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlTclasAddCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlRawSdioDeinitialiseCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlRawSdioDeinitialiseCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlRawSdioDeinitialiseCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlRawSdioDeinitialiseCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlRawSdioInitialiseCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlRawSdioInitialiseCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlRawSdioInitialiseCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlRawSdioInitialiseCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlTclasDelCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlTclasDelCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlTclasDelCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlTclasDelCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlTrafficProtocolIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlTrafficProtocolIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlTrafficProtocolIndSizeof(void *msg);
-#define CsrWifiRouterCtrlTrafficProtocolIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlTrafficSampleIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlTrafficSampleIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlTrafficSampleIndSizeof(void *msg);
-#define CsrWifiRouterCtrlTrafficSampleIndSerFree CsrWifiRouterCtrlPfree
-
-#define CsrWifiRouterCtrlWifiOffIndSer CsrWifiEventCsrUint16CsrUint8Ser
-#define CsrWifiRouterCtrlWifiOffIndDes CsrWifiEventCsrUint16CsrUint8Des
-#define CsrWifiRouterCtrlWifiOffIndSizeof CsrWifiEventCsrUint16CsrUint8Sizeof
-#define CsrWifiRouterCtrlWifiOffIndSerFree CsrWifiRouterCtrlPfree
-
-#define CsrWifiRouterCtrlWifiOffCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiRouterCtrlWifiOffCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiRouterCtrlWifiOffCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiRouterCtrlWifiOffCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlWifiOnIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlWifiOnIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlWifiOnIndSizeof(void *msg);
-extern void CsrWifiRouterCtrlWifiOnIndSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlWifiOnCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlWifiOnCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlWifiOnCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlWifiOnCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlM4ReadyToSendIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlM4ReadyToSendIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlM4ReadyToSendIndSizeof(void *msg);
-#define CsrWifiRouterCtrlM4ReadyToSendIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlM4TransmittedIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlM4TransmittedIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlM4TransmittedIndSizeof(void *msg);
-#define CsrWifiRouterCtrlM4TransmittedIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlMicFailureIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlMicFailureIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlMicFailureIndSizeof(void *msg);
-#define CsrWifiRouterCtrlMicFailureIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlConnectedIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlConnectedIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlConnectedIndSizeof(void *msg);
-#define CsrWifiRouterCtrlConnectedIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlPeerAddCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlPeerAddCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlPeerAddCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlPeerAddCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlPeerDelCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlPeerDelCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlPeerDelCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlPeerDelCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlUnexpectedFrameIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlUnexpectedFrameIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlUnexpectedFrameIndSizeof(void *msg);
-#define CsrWifiRouterCtrlUnexpectedFrameIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlPeerUpdateCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlPeerUpdateCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlPeerUpdateCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlPeerUpdateCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlCapabilitiesCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlCapabilitiesCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlCapabilitiesCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlCapabilitiesCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlBlockAckEnableCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlBlockAckEnableCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlBlockAckEnableCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlBlockAckEnableCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlBlockAckDisableCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlBlockAckDisableCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlBlockAckDisableCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlBlockAckDisableCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlBlockAckErrorIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlBlockAckErrorIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlBlockAckErrorIndSizeof(void *msg);
-#define CsrWifiRouterCtrlBlockAckErrorIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlStaInactiveIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlStaInactiveIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlStaInactiveIndSizeof(void *msg);
-#define CsrWifiRouterCtrlStaInactiveIndSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlWapiRxMicCheckIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlWapiRxMicCheckIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlWapiRxMicCheckIndSizeof(void *msg);
-extern void CsrWifiRouterCtrlWapiRxMicCheckIndSerFree(void *msg);
-
-extern u8* CsrWifiRouterCtrlModeSetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlModeSetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlModeSetCfmSizeof(void *msg);
-#define CsrWifiRouterCtrlModeSetCfmSerFree CsrWifiRouterCtrlPfree
-
-extern u8* CsrWifiRouterCtrlWapiUnicastTxEncryptIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterCtrlWapiUnicastTxEncryptIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterCtrlWapiUnicastTxEncryptIndSizeof(void *msg);
-extern void CsrWifiRouterCtrlWapiUnicastTxEncryptIndSerFree(void *msg);
-
-#endif /* CSR_WIFI_ROUTER_CTRL_SERIALIZE_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_router_free_downstream_contents.c b/drivers/staging/csr/csr_wifi_router_free_downstream_contents.c
deleted file mode 100644
index c4badc5..0000000
--- a/drivers/staging/csr/csr_wifi_router_free_downstream_contents.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/slab.h>
-#include "csr_wifi_router_prim.h"
-#include "csr_wifi_router_lib.h"
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrWifiRouterFreeDownstreamMessageContents
- *
- * DESCRIPTION
- *
- *
- * PARAMETERS
- * eventClass: only the value CSR_WIFI_ROUTER_PRIM will be handled
- * message: the message to free
- *----------------------------------------------------------------------------*/
-void CsrWifiRouterFreeDownstreamMessageContents(u16 eventClass, void *message)
-{
- if (eventClass != CSR_WIFI_ROUTER_PRIM)
- {
- return;
- }
- if (NULL == message)
- {
- return;
- }
-
- switch (*((CsrWifiRouterPrim *) message))
- {
- case CSR_WIFI_ROUTER_MA_PACKET_REQ:
- {
- CsrWifiRouterMaPacketReq *p = (CsrWifiRouterMaPacketReq *)message;
- kfree(p->frame);
- p->frame = NULL;
- break;
- }
-
- default:
- break;
- }
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_router_free_upstream_contents.c b/drivers/staging/csr/csr_wifi_router_free_upstream_contents.c
deleted file mode 100644
index 4cd1263..0000000
--- a/drivers/staging/csr/csr_wifi_router_free_upstream_contents.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/slab.h>
-#include "csr_wifi_router_prim.h"
-#include "csr_wifi_router_lib.h"
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrWifiRouterFreeUpstreamMessageContents
- *
- * DESCRIPTION
- *
- *
- * PARAMETERS
- * eventClass: only the value CSR_WIFI_ROUTER_PRIM will be handled
- * message: the message to free
- *----------------------------------------------------------------------------*/
-void CsrWifiRouterFreeUpstreamMessageContents(u16 eventClass, void *message)
-{
- if (eventClass != CSR_WIFI_ROUTER_PRIM)
- return;
- if (NULL == message)
- return;
- switch (*((CsrWifiRouterPrim *) message)) {
- case CSR_WIFI_ROUTER_MA_PACKET_IND:
- {
- CsrWifiRouterMaPacketInd *p =
- (CsrWifiRouterMaPacketInd *) message;
- kfree(p->frame);
- p->frame = NULL;
- break;
- }
- default:
- break;
- }
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_router_lib.h b/drivers/staging/csr/csr_wifi_router_lib.h
deleted file mode 100644
index b0477c4..0000000
--- a/drivers/staging/csr/csr_wifi_router_lib.h
+++ /dev/null
@@ -1,417 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_ROUTER_LIB_H__
-#define CSR_WIFI_ROUTER_LIB_H__
-
-#include "csr_sched.h"
-#include "csr_macro.h"
-#include "csr_msg_transport.h"
-
-#include "csr_wifi_lib.h"
-
-#include "csr_wifi_router_prim.h"
-#include "csr_wifi_router_task.h"
-
-/*----------------------------------------------------------------------------*
- * CsrWifiRouterFreeUpstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_ROUTER upstream message. Does not
- * free the message itself, and can only be used for upstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_ROUTER upstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiRouterFreeUpstreamMessageContents(u16 eventClass, void *message);
-
-/*----------------------------------------------------------------------------*
- * CsrWifiRouterFreeDownstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_ROUTER downstream message. Does not
- * free the message itself, and can only be used for downstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_ROUTER downstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiRouterFreeDownstreamMessageContents(u16 eventClass, void *message);
-
-/*----------------------------------------------------------------------------*
- * Enum to string functions
- *----------------------------------------------------------------------------*/
-const char* CsrWifiRouterAppTypeToString(CsrWifiRouterAppType value);
-const char* CsrWifiRouterEncapsulationToString(CsrWifiRouterEncapsulation value);
-const char* CsrWifiRouterOuiToString(CsrWifiRouterOui value);
-const char* CsrWifiRouterPriorityToString(CsrWifiRouterPriority value);
-
-
-/*----------------------------------------------------------------------------*
- * CsrPrim Type toString function.
- * Converts a message type to the String name of the Message
- *----------------------------------------------------------------------------*/
-const char* CsrWifiRouterPrimTypeToString(CsrPrim msgType);
-
-/*----------------------------------------------------------------------------*
- * Lookup arrays for PrimType name Strings
- *----------------------------------------------------------------------------*/
-extern const char *CsrWifiRouterUpstreamPrimNames[CSR_WIFI_ROUTER_PRIM_UPSTREAM_COUNT];
-extern const char *CsrWifiRouterDownstreamPrimNames[CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_COUNT];
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketCancelReqSend
-
- DESCRIPTION
- This primitive is used to request cancellation of a previously send
- CsrWifiRouterMaPacketReq.
- The frame may already have been transmitted so there is no guarantees
- that the CsrWifiRouterMaPacketCancelReq actually cancels the transmission
- of the frame in question.
- If the cancellation fails, the Router will send, if required,
- CsrWifiRouterMaPacketCfm.
- If the cancellation succeeds, the Router will not send
- CsrWifiRouterMaPacketCfm.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- hostTag - The hostTag for the frame, which should be cancelled.
- priority - Priority of the frame, which should be cancelled
- peerMacAddress - Destination MAC address of the frame, which should be
- cancelled
-
-*******************************************************************************/
-#define CsrWifiRouterMaPacketCancelReqCreate(msg__, dst__, src__, interfaceTag__, hostTag__, priority__, peerMacAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterMaPacketCancelReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_PRIM, CSR_WIFI_ROUTER_MA_PACKET_CANCEL_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->hostTag = (hostTag__); \
- msg__->priority = (priority__); \
- msg__->peerMacAddress = (peerMacAddress__);
-
-#define CsrWifiRouterMaPacketCancelReqSendTo(dst__, src__, interfaceTag__, hostTag__, priority__, peerMacAddress__) \
- { \
- CsrWifiRouterMaPacketCancelReq *msg__; \
- CsrWifiRouterMaPacketCancelReqCreate(msg__, dst__, src__, interfaceTag__, hostTag__, priority__, peerMacAddress__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_PRIM, msg__); \
- }
-
-#define CsrWifiRouterMaPacketCancelReqSend(src__, interfaceTag__, hostTag__, priority__, peerMacAddress__) \
- CsrWifiRouterMaPacketCancelReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, hostTag__, priority__, peerMacAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketReqSend
-
- DESCRIPTION
- A task sends this primitive to transmit a frame.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - The handle of the subscription
- frameLength - Length of the frame to be sent in bytes
- frame - Pointer to the frame to be sent
- freeFunction - Pointer to function to be used to free the frame
- priority - Priority of the frame, which should be sent
- hostTag - An application shall set the bits b31..b28 using one of
- the CSR_WIFI_ROUTER_APP_TYPE_* masks. Bits b0..b27 can
- be used by the requestor without any restrictions, but
- the hostTag shall be unique so the hostTag for
- CSR_WIFI_ROUTER_APP _TYPE_OTHER should be constructured
- in the following way [ CSR_WIFI_ROUTER_APP_TYPE_OTHER
- (4 bits) | SubscriptionHandle (8 bits) | Sequence no.
- (20 bits) ]. If the hostTag is not unique, the
- behaviour of the system is unpredicatable with respect
- to data/management frame transfer.
- cfmRequested - Indicates if the requestor needs a confirm for packet
- requests sent under this subscription. If set to TRUE,
- the router will send a confirm, else it will not send
- any confirm
-
-*******************************************************************************/
-#define CsrWifiRouterMaPacketReqCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__, frameLength__, frame__, freeFunction__, priority__, hostTag__, cfmRequested__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterMaPacketReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_PRIM, CSR_WIFI_ROUTER_MA_PACKET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->subscriptionHandle = (subscriptionHandle__); \
- msg__->frameLength = (frameLength__); \
- msg__->frame = (frame__); \
- msg__->freeFunction = (freeFunction__); \
- msg__->priority = (priority__); \
- msg__->hostTag = (hostTag__); \
- msg__->cfmRequested = (cfmRequested__);
-
-#define CsrWifiRouterMaPacketReqSendTo(dst__, src__, interfaceTag__, subscriptionHandle__, frameLength__, frame__, freeFunction__, priority__, hostTag__, cfmRequested__) \
- { \
- CsrWifiRouterMaPacketReq *msg__; \
- CsrWifiRouterMaPacketReqCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__, frameLength__, frame__, freeFunction__, priority__, hostTag__, cfmRequested__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_PRIM, msg__); \
- }
-
-#define CsrWifiRouterMaPacketReqSend(src__, interfaceTag__, subscriptionHandle__, frameLength__, frame__, freeFunction__, priority__, hostTag__, cfmRequested__) \
- CsrWifiRouterMaPacketReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, subscriptionHandle__, frameLength__, frame__, freeFunction__, priority__, hostTag__, cfmRequested__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketIndSend
-
- DESCRIPTION
- The router sends the primitive to a subscribed task when it receives a
- frame matching the subscription.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - The handle of the subscription
- result - Status of the operation
- frameLength - Length of the received frame in bytes
- frame - Pointer to the received frame
- freeFunction - Pointer to function to be used to free the frame
- rssi - Received signal strength indication in dBm
- snr - Signal to Noise Ratio
- rate - Transmission/Reception rate
-
-*******************************************************************************/
-#define CsrWifiRouterMaPacketIndCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__, result__, frameLength__, frame__, freeFunction__, rssi__, snr__, rate__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterMaPacketInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_PRIM, CSR_WIFI_ROUTER_MA_PACKET_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->subscriptionHandle = (subscriptionHandle__); \
- msg__->result = (result__); \
- msg__->frameLength = (frameLength__); \
- msg__->frame = (frame__); \
- msg__->freeFunction = (freeFunction__); \
- msg__->rssi = (rssi__); \
- msg__->snr = (snr__); \
- msg__->rate = (rate__);
-
-#define CsrWifiRouterMaPacketIndSendTo(dst__, src__, interfaceTag__, subscriptionHandle__, result__, frameLength__, frame__, freeFunction__, rssi__, snr__, rate__) \
- { \
- CsrWifiRouterMaPacketInd *msg__; \
- CsrWifiRouterMaPacketIndCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__, result__, frameLength__, frame__, freeFunction__, rssi__, snr__, rate__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_PRIM, msg__); \
- }
-
-#define CsrWifiRouterMaPacketIndSend(dst__, interfaceTag__, subscriptionHandle__, result__, frameLength__, frame__, freeFunction__, rssi__, snr__, rate__) \
- CsrWifiRouterMaPacketIndSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, interfaceTag__, subscriptionHandle__, result__, frameLength__, frame__, freeFunction__, rssi__, snr__, rate__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketResSend
-
- DESCRIPTION
- A task send this primitive to confirm the reception of the received
- frame.
-
- PARAMETERS
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - The handle of the subscription
- result - Status of the operation
-
-*******************************************************************************/
-#define CsrWifiRouterMaPacketResCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__, result__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterMaPacketRes), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_PRIM, CSR_WIFI_ROUTER_MA_PACKET_RES, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->subscriptionHandle = (subscriptionHandle__); \
- msg__->result = (result__);
-
-#define CsrWifiRouterMaPacketResSendTo(dst__, src__, interfaceTag__, subscriptionHandle__, result__) \
- { \
- CsrWifiRouterMaPacketRes *msg__; \
- CsrWifiRouterMaPacketResCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__, result__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_PRIM, msg__); \
- }
-
-#define CsrWifiRouterMaPacketResSend(src__, interfaceTag__, subscriptionHandle__, result__) \
- CsrWifiRouterMaPacketResSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, subscriptionHandle__, result__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketCfmSend
-
- DESCRIPTION
- The router sends the primitive to confirm the result of the transmission
- of the packet of the corresponding CSR_WIFI_ROUTER MA_PACKET_REQ request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- result - Status of the operation
- hostTag - The hostTrag will match the hostTag sent in the request.
- rate - Transmission/Reception rate
-
-*******************************************************************************/
-#define CsrWifiRouterMaPacketCfmCreate(msg__, dst__, src__, interfaceTag__, result__, hostTag__, rate__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterMaPacketCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_PRIM, CSR_WIFI_ROUTER_MA_PACKET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->result = (result__); \
- msg__->hostTag = (hostTag__); \
- msg__->rate = (rate__);
-
-#define CsrWifiRouterMaPacketCfmSendTo(dst__, src__, interfaceTag__, result__, hostTag__, rate__) \
- { \
- CsrWifiRouterMaPacketCfm *msg__; \
- CsrWifiRouterMaPacketCfmCreate(msg__, dst__, src__, interfaceTag__, result__, hostTag__, rate__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_PRIM, msg__); \
- }
-
-#define CsrWifiRouterMaPacketCfmSend(dst__, interfaceTag__, result__, hostTag__, rate__) \
- CsrWifiRouterMaPacketCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, interfaceTag__, result__, hostTag__, rate__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketSubscribeReqSend
-
- DESCRIPTION
- A task can use this primitive to subscribe for a particular OUI/protocol
- and transmit and receive frames matching the subscription.
- NOTE: Multiple subscriptions for a given protocol and OUI will result in
- the first subscription receiving the data and not the subsequent
- subscriptions.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- encapsulation - Specifies the encapsulation type, which will be used for the
- subscription
- protocol - Together with the OUI, specifies the protocol, which a task
- wants to subscribe to
- oui - Specifies the OUI for the protocol, which a task wants to
- subscribe to
-
-*******************************************************************************/
-#define CsrWifiRouterMaPacketSubscribeReqCreate(msg__, dst__, src__, interfaceTag__, encapsulation__, protocol__, oui__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterMaPacketSubscribeReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_PRIM, CSR_WIFI_ROUTER_MA_PACKET_SUBSCRIBE_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->encapsulation = (encapsulation__); \
- msg__->protocol = (protocol__); \
- msg__->oui = (oui__);
-
-#define CsrWifiRouterMaPacketSubscribeReqSendTo(dst__, src__, interfaceTag__, encapsulation__, protocol__, oui__) \
- { \
- CsrWifiRouterMaPacketSubscribeReq *msg__; \
- CsrWifiRouterMaPacketSubscribeReqCreate(msg__, dst__, src__, interfaceTag__, encapsulation__, protocol__, oui__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_PRIM, msg__); \
- }
-
-#define CsrWifiRouterMaPacketSubscribeReqSend(src__, interfaceTag__, encapsulation__, protocol__, oui__) \
- CsrWifiRouterMaPacketSubscribeReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, encapsulation__, protocol__, oui__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketSubscribeCfmSend
-
- DESCRIPTION
- The router sends this primitive to confirm the result of the
- subscription.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - Handle to the subscription
- This handle must be used in all subsequent requests
- status - Status of the operation
- allocOffset - Size of the offset for the frames of the subscription
-
-*******************************************************************************/
-#define CsrWifiRouterMaPacketSubscribeCfmCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__, status__, allocOffset__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterMaPacketSubscribeCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_PRIM, CSR_WIFI_ROUTER_MA_PACKET_SUBSCRIBE_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->subscriptionHandle = (subscriptionHandle__); \
- msg__->status = (status__); \
- msg__->allocOffset = (allocOffset__);
-
-#define CsrWifiRouterMaPacketSubscribeCfmSendTo(dst__, src__, interfaceTag__, subscriptionHandle__, status__, allocOffset__) \
- { \
- CsrWifiRouterMaPacketSubscribeCfm *msg__; \
- CsrWifiRouterMaPacketSubscribeCfmCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__, status__, allocOffset__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_PRIM, msg__); \
- }
-
-#define CsrWifiRouterMaPacketSubscribeCfmSend(dst__, interfaceTag__, subscriptionHandle__, status__, allocOffset__) \
- CsrWifiRouterMaPacketSubscribeCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, interfaceTag__, subscriptionHandle__, status__, allocOffset__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketUnsubscribeReqSend
-
- DESCRIPTION
- A task sends this primitive to unsubscribe a subscription
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - The handle of the subscription
-
-*******************************************************************************/
-#define CsrWifiRouterMaPacketUnsubscribeReqCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterMaPacketUnsubscribeReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_PRIM, CSR_WIFI_ROUTER_MA_PACKET_UNSUBSCRIBE_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->subscriptionHandle = (subscriptionHandle__);
-
-#define CsrWifiRouterMaPacketUnsubscribeReqSendTo(dst__, src__, interfaceTag__, subscriptionHandle__) \
- { \
- CsrWifiRouterMaPacketUnsubscribeReq *msg__; \
- CsrWifiRouterMaPacketUnsubscribeReqCreate(msg__, dst__, src__, interfaceTag__, subscriptionHandle__); \
- CsrMsgTransport(dst__, CSR_WIFI_ROUTER_PRIM, msg__); \
- }
-
-#define CsrWifiRouterMaPacketUnsubscribeReqSend(src__, interfaceTag__, subscriptionHandle__) \
- CsrWifiRouterMaPacketUnsubscribeReqSendTo(CSR_WIFI_ROUTER_IFACEQUEUE, src__, interfaceTag__, subscriptionHandle__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketUnsubscribeCfmSend
-
- DESCRIPTION
- The router sends this primitive to confirm the result of the
- unsubscription.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Status of the operation
-
-*******************************************************************************/
-#define CsrWifiRouterMaPacketUnsubscribeCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiRouterMaPacketUnsubscribeCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_ROUTER_PRIM, CSR_WIFI_ROUTER_MA_PACKET_UNSUBSCRIBE_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiRouterMaPacketUnsubscribeCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiRouterMaPacketUnsubscribeCfm *msg__; \
- CsrWifiRouterMaPacketUnsubscribeCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_ROUTER_PRIM, msg__); \
- }
-
-#define CsrWifiRouterMaPacketUnsubscribeCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiRouterMaPacketUnsubscribeCfmSendTo(dst__, CSR_WIFI_ROUTER_IFACEQUEUE, interfaceTag__, status__)
-
-#endif /* CSR_WIFI_ROUTER_LIB_H__ */
diff --git a/drivers/staging/csr/csr_wifi_router_prim.h b/drivers/staging/csr/csr_wifi_router_prim.h
deleted file mode 100644
index c52344b..0000000
--- a/drivers/staging/csr/csr_wifi_router_prim.h
+++ /dev/null
@@ -1,421 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_ROUTER_PRIM_H__
-#define CSR_WIFI_ROUTER_PRIM_H__
-
-#include <linux/types.h>
-#include "csr_prim_defs.h"
-#include "csr_sched.h"
-#include "csr_wifi_common.h"
-#include "csr_result.h"
-#include "csr_wifi_fsm_event.h"
-
-#define CSR_WIFI_ROUTER_PRIM (0x0400)
-
-typedef CsrPrim CsrWifiRouterPrim;
-
-typedef void (*CsrWifiRouterFrameFreeFunction)(void *frame);
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterAppType
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_APP_TYPE_SME -
- CSR_WIFI_ROUTER_APP_TYPE_PAL -
- CSR_WIFI_ROUTER_APP_TYPE_NME -
- CSR_WIFI_ROUTER_APP_TYPE_OTHER -
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterAppType;
-#define CSR_WIFI_ROUTER_APP_TYPE_SME ((CsrWifiRouterAppType) 0x0)
-#define CSR_WIFI_ROUTER_APP_TYPE_PAL ((CsrWifiRouterAppType) 0x1)
-#define CSR_WIFI_ROUTER_APP_TYPE_NME ((CsrWifiRouterAppType) 0x2)
-#define CSR_WIFI_ROUTER_APP_TYPE_OTHER ((CsrWifiRouterAppType) 0x3)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterEncapsulation
-
- DESCRIPTION
- Indicates the type of encapsulation used for the subscription
-
- VALUES
- CSR_WIFI_ROUTER_ENCAPSULATION_ETHERNET
- - Ethernet encapsulation
- CSR_WIFI_ROUTER_ENCAPSULATION_LLC_SNAP
- - LLC/SNAP encapsulation
-
-*******************************************************************************/
-typedef u8 CsrWifiRouterEncapsulation;
-#define CSR_WIFI_ROUTER_ENCAPSULATION_ETHERNET ((CsrWifiRouterEncapsulation) 0x00)
-#define CSR_WIFI_ROUTER_ENCAPSULATION_LLC_SNAP ((CsrWifiRouterEncapsulation) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterOui
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_ROUTER_OUI_RFC_1042 -
- CSR_WIFI_ROUTER_OUI_BT -
-
-*******************************************************************************/
-typedef u32 CsrWifiRouterOui;
-#define CSR_WIFI_ROUTER_OUI_RFC_1042 ((CsrWifiRouterOui) 0x000000)
-#define CSR_WIFI_ROUTER_OUI_BT ((CsrWifiRouterOui) 0x001958)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterPriority
-
- DESCRIPTION
- As defined in the IEEE 802.11 standards
-
- VALUES
- CSR_WIFI_ROUTER_PRIORITY_QOS_UP0
- - See IEEE 802.11 Standard
- CSR_WIFI_ROUTER_PRIORITY_QOS_UP1
- - See IEEE 802.11 Standard
- CSR_WIFI_ROUTER_PRIORITY_QOS_UP2
- - See IEEE 802.11 Standard
- CSR_WIFI_ROUTER_PRIORITY_QOS_UP3
- - See IEEE 802.11 Standard
- CSR_WIFI_ROUTER_PRIORITY_QOS_UP4
- - See IEEE 802.11 Standard
- CSR_WIFI_ROUTER_PRIORITY_QOS_UP5
- - See IEEE 802.11 Standard
- CSR_WIFI_ROUTER_PRIORITY_QOS_UP6
- - See IEEE 802.11 Standard
- CSR_WIFI_ROUTER_PRIORITY_QOS_UP7
- - See IEEE 802.11 Standard
- CSR_WIFI_ROUTER_PRIORITY_CONTENTION
- - See IEEE 802.11 Standard
- CSR_WIFI_ROUTER_PRIORITY_MANAGEMENT
- - See IEEE 802.11 Standard
-
-*******************************************************************************/
-typedef u16 CsrWifiRouterPriority;
-#define CSR_WIFI_ROUTER_PRIORITY_QOS_UP0 ((CsrWifiRouterPriority) 0x0000)
-#define CSR_WIFI_ROUTER_PRIORITY_QOS_UP1 ((CsrWifiRouterPriority) 0x0001)
-#define CSR_WIFI_ROUTER_PRIORITY_QOS_UP2 ((CsrWifiRouterPriority) 0x0002)
-#define CSR_WIFI_ROUTER_PRIORITY_QOS_UP3 ((CsrWifiRouterPriority) 0x0003)
-#define CSR_WIFI_ROUTER_PRIORITY_QOS_UP4 ((CsrWifiRouterPriority) 0x0004)
-#define CSR_WIFI_ROUTER_PRIORITY_QOS_UP5 ((CsrWifiRouterPriority) 0x0005)
-#define CSR_WIFI_ROUTER_PRIORITY_QOS_UP6 ((CsrWifiRouterPriority) 0x0006)
-#define CSR_WIFI_ROUTER_PRIORITY_QOS_UP7 ((CsrWifiRouterPriority) 0x0007)
-#define CSR_WIFI_ROUTER_PRIORITY_CONTENTION ((CsrWifiRouterPriority) 0x8000)
-#define CSR_WIFI_ROUTER_PRIORITY_MANAGEMENT ((CsrWifiRouterPriority) 0x8010)
-
-
-/* Downstream */
-#define CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_LOWEST (0x0000)
-
-#define CSR_WIFI_ROUTER_MA_PACKET_SUBSCRIBE_REQ ((CsrWifiRouterPrim) (0x0000 + CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_MA_PACKET_UNSUBSCRIBE_REQ ((CsrWifiRouterPrim) (0x0001 + CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_MA_PACKET_REQ ((CsrWifiRouterPrim) (0x0002 + CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_MA_PACKET_RES ((CsrWifiRouterPrim) (0x0003 + CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_MA_PACKET_CANCEL_REQ ((CsrWifiRouterPrim) (0x0004 + CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_LOWEST))
-
-
-#define CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_HIGHEST (0x0004 + CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_LOWEST)
-
-/* Upstream */
-#define CSR_WIFI_ROUTER_PRIM_UPSTREAM_LOWEST (0x0000 + CSR_PRIM_UPSTREAM)
-
-#define CSR_WIFI_ROUTER_MA_PACKET_SUBSCRIBE_CFM ((CsrWifiRouterPrim)(0x0000 + CSR_WIFI_ROUTER_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_MA_PACKET_UNSUBSCRIBE_CFM ((CsrWifiRouterPrim)(0x0001 + CSR_WIFI_ROUTER_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_MA_PACKET_CFM ((CsrWifiRouterPrim)(0x0002 + CSR_WIFI_ROUTER_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_ROUTER_MA_PACKET_IND ((CsrWifiRouterPrim)(0x0003 + CSR_WIFI_ROUTER_PRIM_UPSTREAM_LOWEST))
-
-#define CSR_WIFI_ROUTER_PRIM_UPSTREAM_HIGHEST (0x0003 + CSR_WIFI_ROUTER_PRIM_UPSTREAM_LOWEST)
-
-#define CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_COUNT (CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_HIGHEST + 1 - CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_LOWEST)
-#define CSR_WIFI_ROUTER_PRIM_UPSTREAM_COUNT (CSR_WIFI_ROUTER_PRIM_UPSTREAM_HIGHEST + 1 - CSR_WIFI_ROUTER_PRIM_UPSTREAM_LOWEST)
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketSubscribeReq
-
- DESCRIPTION
- A task can use this primitive to subscribe for a particular OUI/protocol
- and transmit and receive frames matching the subscription.
- NOTE: Multiple subscriptions for a given protocol and OUI will result in
- the first subscription receiving the data and not the subsequent
- subscriptions.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- encapsulation - Specifies the encapsulation type, which will be used for the
- subscription
- protocol - Together with the OUI, specifies the protocol, which a task
- wants to subscribe to
- oui - Specifies the OUI for the protocol, which a task wants to
- subscribe to
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiRouterEncapsulation encapsulation;
- u16 protocol;
- u32 oui;
-} CsrWifiRouterMaPacketSubscribeReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketUnsubscribeReq
-
- DESCRIPTION
- A task sends this primitive to unsubscribe a subscription
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - The handle of the subscription
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 subscriptionHandle;
-} CsrWifiRouterMaPacketUnsubscribeReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketReq
-
- DESCRIPTION
- A task sends this primitive to transmit a frame.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - The handle of the subscription
- frameLength - Length of the frame to be sent in bytes
- frame - Pointer to the frame to be sent
- freeFunction - Pointer to function to be used to free the frame
- priority - Priority of the frame, which should be sent
- hostTag - An application shall set the bits b31..b28 using one of
- the CSR_WIFI_ROUTER_APP_TYPE_* masks. Bits b0..b27 can
- be used by the requestor without any restrictions, but
- the hostTag shall be unique so the hostTag for
- CSR_WIFI_ROUTER_APP _TYPE_OTHER should be constructured
- in the following way [ CSR_WIFI_ROUTER_APP_TYPE_OTHER
- (4 bits) | SubscriptionHandle (8 bits) | Sequence no.
- (20 bits) ]. If the hostTag is not unique, the
- behaviour of the system is unpredicatable with respect
- to data/management frame transfer.
- cfmRequested - Indicates if the requestor needs a confirm for packet
- requests sent under this subscription. If set to TRUE,
- the router will send a confirm, else it will not send
- any confirm
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 subscriptionHandle;
- u16 frameLength;
- u8 *frame;
- CsrWifiRouterFrameFreeFunction freeFunction;
- CsrWifiRouterPriority priority;
- u32 hostTag;
- u8 cfmRequested;
-} CsrWifiRouterMaPacketReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketRes
-
- DESCRIPTION
- A task send this primitive to confirm the reception of the received
- frame.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - The handle of the subscription
- result - Status of the operation
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 subscriptionHandle;
- CsrResult result;
-} CsrWifiRouterMaPacketRes;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketCancelReq
-
- DESCRIPTION
- This primitive is used to request cancellation of a previously send
- CsrWifiRouterMaPacketReq.
- The frame may already have been transmitted so there is no guarantees
- that the CsrWifiRouterMaPacketCancelReq actually cancels the transmission
- of the frame in question.
- If the cancellation fails, the Router will send, if required,
- CsrWifiRouterMaPacketCfm.
- If the cancellation succeeds, the Router will not send
- CsrWifiRouterMaPacketCfm.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- hostTag - The hostTag for the frame, which should be cancelled.
- priority - Priority of the frame, which should be cancelled
- peerMacAddress - Destination MAC address of the frame, which should be
- cancelled
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u32 hostTag;
- CsrWifiRouterPriority priority;
- CsrWifiMacAddress peerMacAddress;
-} CsrWifiRouterMaPacketCancelReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketSubscribeCfm
-
- DESCRIPTION
- The router sends this primitive to confirm the result of the
- subscription.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - Handle to the subscription
- This handle must be used in all subsequent requests
- status - Status of the operation
- allocOffset - Size of the offset for the frames of the subscription
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 subscriptionHandle;
- CsrResult status;
- u16 allocOffset;
-} CsrWifiRouterMaPacketSubscribeCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketUnsubscribeCfm
-
- DESCRIPTION
- The router sends this primitive to confirm the result of the
- unsubscription.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Status of the operation
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiRouterMaPacketUnsubscribeCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketCfm
-
- DESCRIPTION
- The router sends the primitive to confirm the result of the transmission
- of the packet of the corresponding CSR_WIFI_ROUTER MA_PACKET_REQ request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- result - Status of the operation
- hostTag - The hostTrag will match the hostTag sent in the request.
- rate - Transmission/Reception rate
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult result;
- u32 hostTag;
- u16 rate;
-} CsrWifiRouterMaPacketCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiRouterMaPacketInd
-
- DESCRIPTION
- The router sends the primitive to a subscribed task when it receives a
- frame matching the subscription.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- subscriptionHandle - The handle of the subscription
- result - Status of the operation
- frameLength - Length of the received frame in bytes
- frame - Pointer to the received frame
- freeFunction - Pointer to function to be used to free the frame
- rssi - Received signal strength indication in dBm
- snr - Signal to Noise Ratio
- rate - Transmission/Reception rate
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 subscriptionHandle;
- CsrResult result;
- u16 frameLength;
- u8 *frame;
- CsrWifiRouterFrameFreeFunction freeFunction;
- s16 rssi;
- s16 snr;
- u16 rate;
-} CsrWifiRouterMaPacketInd;
-
-#endif /* CSR_WIFI_ROUTER_PRIM_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_router_sef.c b/drivers/staging/csr/csr_wifi_router_sef.c
deleted file mode 100644
index 45a10fb..0000000
--- a/drivers/staging/csr/csr_wifi_router_sef.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- Confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
- *****************************************************************************/
-#include "csr_wifi_router_sef.h"
-
-const CsrWifiRouterStateHandlerType CsrWifiRouterDownstreamStateHandlers[CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_COUNT] =
-{
- /* 0x0000 */ CsrWifiRouterMaPacketSubscribeReqHandler,
- /* 0x0001 */ CsrWifiRouterMaPacketUnsubscribeReqHandler,
- /* 0x0002 */ CsrWifiRouterMaPacketReqHandler,
- /* 0x0003 */ CsrWifiRouterMaPacketResHandler,
- /* 0x0004 */ CsrWifiRouterMaPacketCancelReqHandler,
-};
diff --git a/drivers/staging/csr/csr_wifi_router_sef.h b/drivers/staging/csr/csr_wifi_router_sef.h
deleted file mode 100644
index 86692c7..0000000
--- a/drivers/staging/csr/csr_wifi_router_sef.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- Confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
- *****************************************************************************/
-#ifndef CSR_WIFI_ROUTER_SEF_CSR_WIFI_ROUTER_H__
-#define CSR_WIFI_ROUTER_SEF_CSR_WIFI_ROUTER_H__
-
-#include "csr_wifi_router_prim.h"
-
- typedef void (*CsrWifiRouterStateHandlerType)(void* drvpriv, CsrWifiFsmEvent* msg);
-
- extern const CsrWifiRouterStateHandlerType CsrWifiRouterDownstreamStateHandlers[CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_COUNT];
-
- extern void CsrWifiRouterMaPacketSubscribeReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterMaPacketUnsubscribeReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterMaPacketReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterMaPacketResHandler(void* drvpriv, CsrWifiFsmEvent* msg);
- extern void CsrWifiRouterMaPacketCancelReqHandler(void* drvpriv, CsrWifiFsmEvent* msg);
-
-#endif /* CSR_WIFI_ROUTER_SEF_CSR_WIFI_ROUTER_H__ */
diff --git a/drivers/staging/csr/csr_wifi_router_serialize.c b/drivers/staging/csr/csr_wifi_router_serialize.c
deleted file mode 100644
index 4eccf5d..0000000
--- a/drivers/staging/csr/csr_wifi_router_serialize.c
+++ /dev/null
@@ -1,418 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/slab.h>
-#include "csr_msgconv.h"
-#include "csr_wifi_router_prim.h"
-#include "csr_wifi_router_serialize.h"
-
-void CsrWifiRouterPfree(void *ptr)
-{
- kfree(ptr);
-}
-
-
-size_t CsrWifiRouterMaPacketSubscribeReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 12) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiRouterEncapsulation primitive->encapsulation */
- bufferSize += 2; /* u16 primitive->protocol */
- bufferSize += 4; /* u32 primitive->oui */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterMaPacketSubscribeReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterMaPacketSubscribeReq *primitive = (CsrWifiRouterMaPacketSubscribeReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->encapsulation);
- CsrUint16Ser(ptr, len, (u16) primitive->protocol);
- CsrUint32Ser(ptr, len, (u32) primitive->oui);
- return(ptr);
-}
-
-
-void* CsrWifiRouterMaPacketSubscribeReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterMaPacketSubscribeReq *primitive = kmalloc(sizeof(CsrWifiRouterMaPacketSubscribeReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->encapsulation, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->protocol, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->oui, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterMaPacketReqSizeof(void *msg)
-{
- CsrWifiRouterMaPacketReq *primitive = (CsrWifiRouterMaPacketReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 20) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* u8 primitive->subscriptionHandle */
- bufferSize += 2; /* u16 primitive->frameLength */
- bufferSize += primitive->frameLength; /* u8 primitive->frame */
- bufferSize += 4; /* CsrWifiRouterFrameFreeFunction primitive->freeFunction */
- bufferSize += 2; /* CsrWifiRouterPriority primitive->priority */
- bufferSize += 4; /* u32 primitive->hostTag */
- bufferSize += 1; /* u8 primitive->cfmRequested */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterMaPacketReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterMaPacketReq *primitive = (CsrWifiRouterMaPacketReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->subscriptionHandle);
- CsrUint16Ser(ptr, len, (u16) primitive->frameLength);
- if (primitive->frameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->frame, ((u16) (primitive->frameLength)));
- }
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->freeFunction */
- CsrUint16Ser(ptr, len, (u16) primitive->priority);
- CsrUint32Ser(ptr, len, (u32) primitive->hostTag);
- CsrUint8Ser(ptr, len, (u8) primitive->cfmRequested);
- return(ptr);
-}
-
-
-void* CsrWifiRouterMaPacketReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterMaPacketReq *primitive = kmalloc(sizeof(CsrWifiRouterMaPacketReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->subscriptionHandle, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->frameLength, buffer, &offset);
- if (primitive->frameLength)
- {
- primitive->frame = kmalloc(primitive->frameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->frame, buffer, &offset, ((u16) (primitive->frameLength)));
- }
- else
- {
- primitive->frame = NULL;
- }
- primitive->freeFunction = NULL; /* Special for Function Pointers... */
- offset += 4;
- CsrUint16Des((u16 *) &primitive->priority, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->hostTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->cfmRequested, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiRouterMaPacketReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterMaPacketReq *primitive = (CsrWifiRouterMaPacketReq *) voidPrimitivePointer;
- kfree(primitive->frame);
- kfree(primitive);
-}
-
-
-size_t CsrWifiRouterMaPacketResSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* u8 primitive->subscriptionHandle */
- bufferSize += 2; /* CsrResult primitive->result */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterMaPacketResSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterMaPacketRes *primitive = (CsrWifiRouterMaPacketRes *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->subscriptionHandle);
- CsrUint16Ser(ptr, len, (u16) primitive->result);
- return(ptr);
-}
-
-
-void* CsrWifiRouterMaPacketResDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterMaPacketRes *primitive = kmalloc(sizeof(CsrWifiRouterMaPacketRes), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->subscriptionHandle, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->result, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterMaPacketCancelReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 17) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 4; /* u32 primitive->hostTag */
- bufferSize += 2; /* CsrWifiRouterPriority primitive->priority */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterMaPacketCancelReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterMaPacketCancelReq *primitive = (CsrWifiRouterMaPacketCancelReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint32Ser(ptr, len, (u32) primitive->hostTag);
- CsrUint16Ser(ptr, len, (u16) primitive->priority);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiRouterMaPacketCancelReqDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterMaPacketCancelReq *primitive = kmalloc(sizeof(CsrWifiRouterMaPacketCancelReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->hostTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->priority, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterMaPacketSubscribeCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* u8 primitive->subscriptionHandle */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 2; /* u16 primitive->allocOffset */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterMaPacketSubscribeCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterMaPacketSubscribeCfm *primitive = (CsrWifiRouterMaPacketSubscribeCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->subscriptionHandle);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint16Ser(ptr, len, (u16) primitive->allocOffset);
- return(ptr);
-}
-
-
-void* CsrWifiRouterMaPacketSubscribeCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterMaPacketSubscribeCfm *primitive = kmalloc(sizeof(CsrWifiRouterMaPacketSubscribeCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->subscriptionHandle, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->allocOffset, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterMaPacketUnsubscribeCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterMaPacketUnsubscribeCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterMaPacketUnsubscribeCfm *primitive = (CsrWifiRouterMaPacketUnsubscribeCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiRouterMaPacketUnsubscribeCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterMaPacketUnsubscribeCfm *primitive = kmalloc(sizeof(CsrWifiRouterMaPacketUnsubscribeCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterMaPacketCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->result */
- bufferSize += 4; /* u32 primitive->hostTag */
- bufferSize += 2; /* u16 primitive->rate */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterMaPacketCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterMaPacketCfm *primitive = (CsrWifiRouterMaPacketCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->result);
- CsrUint32Ser(ptr, len, (u32) primitive->hostTag);
- CsrUint16Ser(ptr, len, (u16) primitive->rate);
- return(ptr);
-}
-
-
-void* CsrWifiRouterMaPacketCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterMaPacketCfm *primitive = kmalloc(sizeof(CsrWifiRouterMaPacketCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->result, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->hostTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->rate, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiRouterMaPacketIndSizeof(void *msg)
-{
- CsrWifiRouterMaPacketInd *primitive = (CsrWifiRouterMaPacketInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 21) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* u8 primitive->subscriptionHandle */
- bufferSize += 2; /* CsrResult primitive->result */
- bufferSize += 2; /* u16 primitive->frameLength */
- bufferSize += primitive->frameLength; /* u8 primitive->frame */
- bufferSize += 4; /* CsrWifiRouterFrameFreeFunction primitive->freeFunction */
- bufferSize += 2; /* s16 primitive->rssi */
- bufferSize += 2; /* s16 primitive->snr */
- bufferSize += 2; /* u16 primitive->rate */
- return bufferSize;
-}
-
-
-u8* CsrWifiRouterMaPacketIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiRouterMaPacketInd *primitive = (CsrWifiRouterMaPacketInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->subscriptionHandle);
- CsrUint16Ser(ptr, len, (u16) primitive->result);
- CsrUint16Ser(ptr, len, (u16) primitive->frameLength);
- if (primitive->frameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->frame, ((u16) (primitive->frameLength)));
- }
- CsrUint32Ser(ptr, len, 0); /* Special for Function Pointers... primitive->freeFunction */
- CsrUint16Ser(ptr, len, (u16) primitive->rssi);
- CsrUint16Ser(ptr, len, (u16) primitive->snr);
- CsrUint16Ser(ptr, len, (u16) primitive->rate);
- return(ptr);
-}
-
-
-void* CsrWifiRouterMaPacketIndDes(u8 *buffer, size_t length)
-{
- CsrWifiRouterMaPacketInd *primitive = kmalloc(sizeof(CsrWifiRouterMaPacketInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->subscriptionHandle, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->result, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->frameLength, buffer, &offset);
- if (primitive->frameLength)
- {
- primitive->frame = kmalloc(primitive->frameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->frame, buffer, &offset, ((u16) (primitive->frameLength)));
- }
- else
- {
- primitive->frame = NULL;
- }
- primitive->freeFunction = NULL; /* Special for Function Pointers... */
- offset += 4;
- CsrUint16Des((u16 *) &primitive->rssi, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->snr, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->rate, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiRouterMaPacketIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiRouterMaPacketInd *primitive = (CsrWifiRouterMaPacketInd *) voidPrimitivePointer;
- kfree(primitive->frame);
- kfree(primitive);
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_router_serialize.h b/drivers/staging/csr/csr_wifi_router_serialize.h
deleted file mode 100644
index 94ccdac..0000000
--- a/drivers/staging/csr/csr_wifi_router_serialize.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_ROUTER_SERIALIZE_H__
-#define CSR_WIFI_ROUTER_SERIALIZE_H__
-
-#include "csr_wifi_msgconv.h"
-#include "csr_wifi_router_prim.h"
-
-extern void CsrWifiRouterPfree(void *ptr);
-
-extern u8* CsrWifiRouterMaPacketSubscribeReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterMaPacketSubscribeReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterMaPacketSubscribeReqSizeof(void *msg);
-#define CsrWifiRouterMaPacketSubscribeReqSerFree CsrWifiRouterPfree
-
-#define CsrWifiRouterMaPacketUnsubscribeReqSer CsrWifiEventCsrUint16CsrUint8Ser
-#define CsrWifiRouterMaPacketUnsubscribeReqDes CsrWifiEventCsrUint16CsrUint8Des
-#define CsrWifiRouterMaPacketUnsubscribeReqSizeof CsrWifiEventCsrUint16CsrUint8Sizeof
-#define CsrWifiRouterMaPacketUnsubscribeReqSerFree CsrWifiRouterPfree
-
-extern u8* CsrWifiRouterMaPacketReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterMaPacketReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterMaPacketReqSizeof(void *msg);
-extern void CsrWifiRouterMaPacketReqSerFree(void *msg);
-
-extern u8* CsrWifiRouterMaPacketResSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterMaPacketResDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterMaPacketResSizeof(void *msg);
-#define CsrWifiRouterMaPacketResSerFree CsrWifiRouterPfree
-
-extern u8* CsrWifiRouterMaPacketCancelReqSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterMaPacketCancelReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterMaPacketCancelReqSizeof(void *msg);
-#define CsrWifiRouterMaPacketCancelReqSerFree CsrWifiRouterPfree
-
-extern u8* CsrWifiRouterMaPacketSubscribeCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterMaPacketSubscribeCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterMaPacketSubscribeCfmSizeof(void *msg);
-#define CsrWifiRouterMaPacketSubscribeCfmSerFree CsrWifiRouterPfree
-
-extern u8* CsrWifiRouterMaPacketUnsubscribeCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterMaPacketUnsubscribeCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterMaPacketUnsubscribeCfmSizeof(void *msg);
-#define CsrWifiRouterMaPacketUnsubscribeCfmSerFree CsrWifiRouterPfree
-
-extern u8* CsrWifiRouterMaPacketCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterMaPacketCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterMaPacketCfmSizeof(void *msg);
-#define CsrWifiRouterMaPacketCfmSerFree CsrWifiRouterPfree
-
-extern u8* CsrWifiRouterMaPacketIndSer(u8 *ptr, size_t *len, void *msg);
-extern void* CsrWifiRouterMaPacketIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiRouterMaPacketIndSizeof(void *msg);
-extern void CsrWifiRouterMaPacketIndSerFree(void *msg);
-
-#endif /* CSR_WIFI_ROUTER_SERIALIZE_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_router_task.h b/drivers/staging/csr/csr_wifi_router_task.h
deleted file mode 100644
index 9ba892f..0000000
--- a/drivers/staging/csr/csr_wifi_router_task.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_ROUTER_TASK_H__
-#define CSR_WIFI_ROUTER_TASK_H__
-
-#include "csr_sched.h"
-
-#define CSR_WIFI_ROUTER_LOG_ID 0x1201FFFF
-extern CsrSchedQid CSR_WIFI_ROUTER_IFACEQUEUE;
-void CsrWifiRouterInit(void **gash);
-void CsrWifiRouterDeinit(void **gash);
-void CsrWifiRouterHandler(void **gash);
-
-#endif /* CSR_WIFI_ROUTER_TASK_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_router_transport.c b/drivers/staging/csr/csr_wifi_router_transport.c
deleted file mode 100644
index e905ead..0000000
--- a/drivers/staging/csr/csr_wifi_router_transport.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/** @file router_transport.c
- *
- *
- * Copyright (C) Cambridge Silicon Radio Ltd 2006-2010. All rights reserved.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- ****************************************************************************/
-
-#include "unifi_priv.h"
-
-#include "csr_sched.h"
-#include "csr_msgconv.h"
-
-#include "sme_userspace.h"
-
-#include "csr_wifi_hostio_prim.h"
-#include "csr_wifi_router_lib.h"
-#include "csr_wifi_router_sef.h"
-#include "csr_wifi_router_converter_init.h"
-#include "csr_wifi_router_ctrl_lib.h"
-#include "csr_wifi_router_ctrl_sef.h"
-#include "csr_wifi_router_ctrl_converter_init.h"
-#include "csr_wifi_sme_prim.h"
-#include "csr_wifi_sme_sef.h"
-#include "csr_wifi_sme_converter_init.h"
-#ifdef CSR_SUPPORT_WEXT
-#ifdef CSR_SUPPORT_WEXT_AP
-#include "csr_wifi_nme_ap_prim.h"
-#include "csr_wifi_nme_ap_sef.h"
-#include "csr_wifi_nme_ap_converter_init.h"
-#endif
-#endif
-
-static unifi_priv_t *drvpriv = NULL;
-void CsrWifiRouterTransportInit(unifi_priv_t *priv)
-{
- unifi_trace(priv, UDBG1, "CsrWifiRouterTransportInit: \n");
-
- drvpriv = priv;
- (void)CsrMsgConvInit();
- CsrWifiRouterConverterInit();
- CsrWifiRouterCtrlConverterInit();
- CsrWifiSmeConverterInit();
-#ifdef CSR_SUPPORT_WEXT
-#ifdef CSR_SUPPORT_WEXT_AP
- CsrWifiNmeApConverterInit();
-#endif
-#endif
-}
-
-void CsrWifiRouterTransportRecv(unifi_priv_t *priv, u8* buffer, size_t bufferLength)
-{
- CsrMsgConvMsgEntry* msgEntry;
- u16 primType;
- CsrSchedQid src;
- CsrSchedQid dest;
- u16 msgType;
- size_t offset = 0;
- CsrWifiFsmEvent* msg;
-
- /* Decode the prim and message type */
- CsrUint16Des(&primType, buffer, &offset);
- CsrUint16Des(&src, buffer, &offset);
- CsrUint16Des(&dest, buffer, &offset);
- CsrUint16Des(&msgType, buffer, &offset);
- offset -= 2; /* Adjust as the Deserialise Function will read this as well */
-
- unifi_trace(priv, UDBG4, "CsrWifiRouterTransportRecv: primType=0x%.4X, msgType=0x%.4X, bufferLength=%d\n",
- primType, msgType, bufferLength);
-
- /* Special handling for HOSTIO messages.... */
- if (primType == CSR_WIFI_HOSTIO_PRIM)
- {
- CsrWifiRouterCtrlHipReq req = {{CSR_WIFI_ROUTER_CTRL_HIP_REQ, CSR_WIFI_ROUTER_CTRL_PRIM, dest, src, NULL}, 0, NULL, 0, NULL, 0, NULL};
-
- req.mlmeCommandLength = bufferLength;
- req.mlmeCommand = buffer;
-
- offset += 8;/* Skip the id, src, dest and slot number */
- CsrUint16Des(&req.dataRef1Length, buffer, &offset);
- offset += 2; /* Skip the slot number */
- CsrUint16Des(&req.dataRef2Length, buffer, &offset);
-
- if (req.dataRef1Length)
- {
- u16 dr1Offset = (bufferLength - req.dataRef2Length) - req.dataRef1Length;
- req.dataRef1 = &buffer[dr1Offset];
- }
-
- if (req.dataRef2Length)
- {
- u16 dr2Offset = bufferLength - req.dataRef2Length;
- req.dataRef2 = &buffer[dr2Offset];
- }
-
- /* Copy the hip data but strip off the prim type */
- req.mlmeCommandLength -= (req.dataRef1Length + req.dataRef2Length + 6);
- req.mlmeCommand = &buffer[6];
-
- CsrWifiRouterCtrlHipReqHandler(priv, &req.common);
- return;
- }
-
- msgEntry = CsrMsgConvFindEntry(primType, msgType);
- if (!msgEntry)
- {
- unifi_error(priv, "CsrWifiRouterTransportDeserialiseAndSend can not process the message. primType=0x%.4X, msgType=0x%.4X\n",
- primType, msgType);
- dump(buffer, bufferLength);
- return;
- }
-
- msg = (CsrWifiFsmEvent*)(msgEntry->deserFunc)(&buffer[offset], bufferLength - offset);
-
- msg->primtype = primType;
- msg->type = msgType;
- msg->source = src;
- msg->destination = dest;
-
- switch(primType)
- {
- case CSR_WIFI_ROUTER_CTRL_PRIM:
- CsrWifiRouterCtrlDownstreamStateHandlers[msg->type - CSR_WIFI_ROUTER_CTRL_PRIM_DOWNSTREAM_LOWEST](priv, msg);
- CsrWifiRouterCtrlFreeDownstreamMessageContents(CSR_WIFI_ROUTER_CTRL_PRIM, msg);
- break;
- case CSR_WIFI_ROUTER_PRIM:
- CsrWifiRouterDownstreamStateHandlers[msg->type - CSR_WIFI_ROUTER_PRIM_DOWNSTREAM_LOWEST](priv, msg);
- CsrWifiRouterFreeDownstreamMessageContents(CSR_WIFI_ROUTER_PRIM, msg);
- break;
- case CSR_WIFI_SME_PRIM:
- CsrWifiSmeUpstreamStateHandlers[msg->type - CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST](priv, msg);
- CsrWifiSmeFreeUpstreamMessageContents(CSR_WIFI_SME_PRIM, msg);
- break;
-#ifdef CSR_SUPPORT_WEXT
-#ifdef CSR_SUPPORT_WEXT_AP
- case CSR_WIFI_NME_AP_PRIM:
- CsrWifiNmeApUpstreamStateHandlers(priv, msg);
- CsrWifiNmeApFreeUpstreamMessageContents(CSR_WIFI_NME_AP_PRIM, msg);
- break;
-#endif
-#endif
- default:
- unifi_error(priv, "CsrWifiRouterTransportDeserialiseAndSend unhandled prim type 0x%.4X\n", primType);
- break;
- }
- kfree(msg);
-}
-
-static void CsrWifiRouterTransportSerialiseAndSend(u16 primType, void* msg)
-{
- CsrWifiFsmEvent* evt = (CsrWifiFsmEvent*)msg;
- CsrMsgConvMsgEntry* msgEntry;
- size_t msgSize;
- size_t encodeBufferLen = 0;
- size_t offset = 0;
- u8* encodeBuffer;
-
- unifi_trace(drvpriv, UDBG4, "CsrWifiRouterTransportSerialiseAndSend: primType=0x%.4X, msgType=0x%.4X\n",
- primType, evt->type);
-
- msgEntry = CsrMsgConvFindEntry(primType, evt->type);
- if (!msgEntry)
- {
- unifi_error(drvpriv, "CsrWifiRouterTransportSerialiseAndSend can not process the message. primType=0x%.4X, msgType=0x%.4X\n",
- primType, evt->type);
- return;
- }
-
- msgSize = 6 + (msgEntry->sizeofFunc)((void*)msg);
-
- encodeBuffer = kmalloc(msgSize, GFP_KERNEL);
-
- /* Encode PrimType */
- CsrUint16Ser(encodeBuffer, &encodeBufferLen, primType);
- CsrUint16Ser(encodeBuffer, &encodeBufferLen, evt->source);
- CsrUint16Ser(encodeBuffer, &encodeBufferLen, evt->destination);
-
- (void)(msgEntry->serFunc)(&encodeBuffer[encodeBufferLen], &offset, msg);
- encodeBufferLen += offset;
-
- uf_sme_queue_message(drvpriv, encodeBuffer, encodeBufferLen);
-
- /* Do not use msgEntry->freeFunc because the memory is owned by the driver */
- kfree(msg);
-}
-
-#if defined(CSR_LOG_ENABLE) && defined(CSR_LOG_INCLUDE_FILE_NAME_AND_LINE_NUMBER)
-void CsrSchedMessagePutStringLog(CsrSchedQid q, u16 mi, void *mv, u32 line, char *file)
-#else
-void CsrSchedMessagePut(CsrSchedQid q, u16 mi, void *mv)
-#endif
-{
- CsrWifiFsmEvent* evt = (CsrWifiFsmEvent*)mv;
- evt->destination = q;
- CsrWifiRouterTransportSerialiseAndSend(mi, mv);
-}
-
diff --git a/drivers/staging/csr/csr_wifi_serialize_primitive_types.c b/drivers/staging/csr/csr_wifi_serialize_primitive_types.c
deleted file mode 100644
index dd93d00..0000000
--- a/drivers/staging/csr/csr_wifi_serialize_primitive_types.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "csr_macro.h"
-#include "csr_msgconv.h"
-#include "csr_wifi_msgconv.h"
-#include "csr_wifi_lib.h"
-
-void CsrUint24Des(u32 *v, u8 *buffer, size_t *offset)
-{
- u32 val;
-
- val = ((buffer[(*offset) + 2] << 16) |
- (buffer[(*offset) + 1] << 8) |
- (buffer[(*offset)]));
-
- *offset += 3;
- *v = val;
-}
-
-
-/* Big endian :e.g WSC, TCLAS */
-void CsrUint16DesBigEndian(u16 *v, u8 *buffer, size_t *offset)
-{
- u16 val;
-
- val = (buffer[(*offset)] << 8) | (buffer[(*offset) + 1]);
- *offset += 2;
-
- *v = val;
-}
-
-
-void CsrUint24DesBigEndian(u32 *v, u8 *buffer, size_t *offset)
-{
- u32 val;
-
- val = ((buffer[(*offset)] << 16) |
- (buffer[(*offset) + 1] << 8) |
- (buffer[(*offset) + 2]));
-
- *offset += 3;
- *v = val;
-}
-
-
-void CsrUint32DesBigEndian(u32 *v, u8 *buffer, size_t *offset)
-{
- u32 val;
-
- val = ((buffer[(*offset)] << 24) |
- (buffer[(*offset) + 1] << 16) |
- (buffer[(*offset) + 2] << 8) |
- (buffer[(*offset) + 3]));
-
- *offset += 4;
- *v = val;
-}
-
-
-void CsrUint24Ser(u8 *ptr, size_t *len, u32 v)
-{
- ptr[(*len) + 2] = (u8)((v & 0x00ff0000) >> 16);
- ptr[(*len) + 1] = (u8)((v & 0x0000ff00) >> 8);
- ptr[(*len)] = (u8)((v & 0x000000ff));
-
- *len += 3;
-}
-
-
-/* Big endian :e.g WSC, TCLAS */
-void CsrUint16SerBigEndian(u8 *ptr, size_t *len, u16 v)
-{
- ptr[(*len)] = (u8)((v & 0xff00) >> 8);
- ptr[(*len) + 1] = (u8)((v & 0x00ff));
-
- *len += 2;
-}
-
-
-void CsrUint32SerBigEndian(u8 *ptr, size_t *len, u32 v)
-{
- ptr[(*len)] = (u8)((v & 0xff000000) >> 24);
- ptr[(*len) + 1] = (u8)((v & 0x00ff0000) >> 16);
- ptr[(*len) + 2] = (u8)((v & 0x0000ff00) >> 8);
- ptr[(*len) + 3] = (u8)((v & 0x000000ff));
-
- *len += 4;
-}
-
-
-void CsrUint24SerBigEndian(u8 *ptr, size_t *len, u32 v)
-{
- ptr[(*len)] = (u8)((v & 0x00ff0000) >> 16);
- ptr[(*len) + 1] = (u8)((v & 0x0000ff00) >> 8);
- ptr[(*len) + 2] = (u8)((v & 0x000000ff));
-
- *len += 3;
-}
-
-
-size_t CsrWifiEventSizeof(void *msg)
-{
- return 2;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventSizeof);
-
-u8* CsrWifiEventSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiFsmEvent *primitive = (CsrWifiFsmEvent *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->type);
- return(ptr);
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventSer);
-
-void* CsrWifiEventDes(u8 *buffer, size_t length)
-{
- CsrWifiFsmEvent *primitive = kmalloc(sizeof(CsrWifiFsmEvent), GFP_KERNEL);
- size_t offset = 0;
- CsrUint16Des(&primitive->type, buffer, &offset);
-
- return primitive;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventDes);
-
-size_t CsrWifiEventCsrUint8Sizeof(void *msg)
-{
- return 3;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint8Sizeof);
-
-u8* CsrWifiEventCsrUint8Ser(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiEventCsrUint8 *primitive = (CsrWifiEventCsrUint8 *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint8Ser(ptr, len, primitive->value);
- return(ptr);
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint8Ser);
-
-
-void* CsrWifiEventCsrUint8Des(u8 *buffer, size_t length)
-{
- CsrWifiEventCsrUint8 *primitive = kmalloc(sizeof(CsrWifiEventCsrUint8), GFP_KERNEL);
-
- size_t offset = 0;
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint8Des(&primitive->value, buffer, &offset);
-
- return primitive;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint8Des);
-
-
-size_t CsrWifiEventCsrUint16Sizeof(void *msg)
-{
- return 4;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint16Sizeof);
-
-
-u8* CsrWifiEventCsrUint16Ser(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiEventCsrUint16 *primitive = (CsrWifiEventCsrUint16 *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, primitive->value);
- return(ptr);
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint16Ser);
-
-void* CsrWifiEventCsrUint16Des(u8 *buffer, size_t length)
-{
- CsrWifiEventCsrUint16 *primitive = kmalloc(sizeof(CsrWifiEventCsrUint16), GFP_KERNEL);
-
- size_t offset = 0;
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des(&primitive->value, buffer, &offset);
-
- return primitive;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint16Des);
-
-
-size_t CsrWifiEventCsrUint32Sizeof(void *msg)
-{
- return 6;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint32Sizeof);
-
-u8* CsrWifiEventCsrUint32Ser(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiEventCsrUint32 *primitive = (CsrWifiEventCsrUint32 *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint32Ser(ptr, len, primitive->value);
- return(ptr);
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint32Ser);
-
-
-void* CsrWifiEventCsrUint32Des(u8 *buffer, size_t length)
-{
- CsrWifiEventCsrUint32 *primitive = kmalloc(sizeof(CsrWifiEventCsrUint32), GFP_KERNEL);
-
- size_t offset = 0;
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint32Des(&primitive->value, buffer, &offset);
-
- return primitive;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint32Des);
-
-size_t CsrWifiEventCsrUint16CsrUint8Sizeof(void *msg)
-{
- return 5;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint16CsrUint8Sizeof);
-
-u8* CsrWifiEventCsrUint16CsrUint8Ser(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiEventCsrUint16CsrUint8 *primitive = (CsrWifiEventCsrUint16CsrUint8 *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, primitive->value16);
- CsrUint8Ser(ptr, len, primitive->value8);
- return(ptr);
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint16CsrUint8Ser);
-
-
-void* CsrWifiEventCsrUint16CsrUint8Des(u8 *buffer, size_t length)
-{
- CsrWifiEventCsrUint16CsrUint8 *primitive = kmalloc(sizeof(CsrWifiEventCsrUint16CsrUint8), GFP_KERNEL);
-
- size_t offset = 0;
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des(&primitive->value16, buffer, &offset);
- CsrUint8Des(&primitive->value8, buffer, &offset);
-
- return primitive;
-}
-EXPORT_SYMBOL_GPL(CsrWifiEventCsrUint16CsrUint8Des);
-
-
diff --git a/drivers/staging/csr/csr_wifi_sme_ap_lib.h b/drivers/staging/csr/csr_wifi_sme_ap_lib.h
deleted file mode 100644
index 48ea914..0000000
--- a/drivers/staging/csr/csr_wifi_sme_ap_lib.h
+++ /dev/null
@@ -1,774 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_SME_AP_LIB_H__
-#define CSR_WIFI_SME_AP_LIB_H__
-
-#include "csr_sched.h"
-#include "csr_macro.h"
-#include "csr_msg_transport.h"
-
-#include "csr_wifi_lib.h"
-
-#include "csr_wifi_sme_ap_prim.h"
-#include "csr_wifi_sme_task.h"
-
-#ifndef CSR_WIFI_AP_ENABLE
-#error CSR_WIFI_AP_ENABLE MUST be defined inorder to use csr_wifi_sme_ap_lib.h
-#endif
-
-/*----------------------------------------------------------------------------*
- * CsrWifiSmeApFreeUpstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_SME_AP upstream message. Does not
- * free the message itself, and can only be used for upstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_SME_AP upstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiSmeApFreeUpstreamMessageContents(u16 eventClass, void *message);
-
-/*----------------------------------------------------------------------------*
- * CsrWifiSmeApFreeDownstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_SME_AP downstream message. Does not
- * free the message itself, and can only be used for downstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_SME_AP downstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiSmeApFreeDownstreamMessageContents(u16 eventClass, void *message);
-
-/*----------------------------------------------------------------------------*
- * Enum to string functions
- *----------------------------------------------------------------------------*/
-const char* CsrWifiSmeApAccessTypeToString(CsrWifiSmeApAccessType value);
-const char* CsrWifiSmeApAuthSupportToString(CsrWifiSmeApAuthSupport value);
-const char* CsrWifiSmeApAuthTypeToString(CsrWifiSmeApAuthType value);
-const char* CsrWifiSmeApDirectionToString(CsrWifiSmeApDirection value);
-const char* CsrWifiSmeApPhySupportToString(CsrWifiSmeApPhySupport value);
-const char* CsrWifiSmeApTypeToString(CsrWifiSmeApType value);
-
-
-/*----------------------------------------------------------------------------*
- * CsrPrim Type toString function.
- * Converts a message type to the String name of the Message
- *----------------------------------------------------------------------------*/
-const char* CsrWifiSmeApPrimTypeToString(CsrPrim msgType);
-
-/*----------------------------------------------------------------------------*
- * Lookup arrays for PrimType name Strings
- *----------------------------------------------------------------------------*/
-extern const char *CsrWifiSmeApUpstreamPrimNames[CSR_WIFI_SME_AP_PRIM_UPSTREAM_COUNT];
-extern const char *CsrWifiSmeApDownstreamPrimNames[CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_COUNT];
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApActiveBaGetReqSend
-
- DESCRIPTION
- This primitive used to retrieve information related to the active block
- ack sessions
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
-
-*******************************************************************************/
-#define CsrWifiSmeApActiveBaGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApActiveBaGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_ACTIVE_BA_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeApActiveBaGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeApActiveBaGetReq *msg__; \
- CsrWifiSmeApActiveBaGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApActiveBaGetReqSend(src__, interfaceTag__) \
- CsrWifiSmeApActiveBaGetReqSendTo(CSR_WIFI_SME_IFACEQUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApActiveBaGetCfmSend
-
- DESCRIPTION
- This primitive carries the information related to the active ba sessions
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- status - Reports the result of the request
- activeBaCount - Number of active block ack session
- activeBaSessions - Points to a buffer containing an array of
- CsrWifiSmeApBaSession structures.
-
-*******************************************************************************/
-#define CsrWifiSmeApActiveBaGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, activeBaCount__, activeBaSessions__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApActiveBaGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_ACTIVE_BA_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->activeBaCount = (activeBaCount__); \
- msg__->activeBaSessions = (activeBaSessions__);
-
-#define CsrWifiSmeApActiveBaGetCfmSendTo(dst__, src__, interfaceTag__, status__, activeBaCount__, activeBaSessions__) \
- { \
- CsrWifiSmeApActiveBaGetCfm *msg__; \
- CsrWifiSmeApActiveBaGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, activeBaCount__, activeBaSessions__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApActiveBaGetCfmSend(dst__, interfaceTag__, status__, activeBaCount__, activeBaSessions__) \
- CsrWifiSmeApActiveBaGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, activeBaCount__, activeBaSessions__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBaDeleteReqSend
-
- DESCRIPTION
- This primitive is used to delete an active block ack session
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- reason -
- baSession - BA session to be deleted
-
-*******************************************************************************/
-#define CsrWifiSmeApBaDeleteReqCreate(msg__, dst__, src__, interfaceTag__, reason__, baSession__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApBaDeleteReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_BA_DELETE_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->reason = (reason__); \
- msg__->baSession = (baSession__);
-
-#define CsrWifiSmeApBaDeleteReqSendTo(dst__, src__, interfaceTag__, reason__, baSession__) \
- { \
- CsrWifiSmeApBaDeleteReq *msg__; \
- CsrWifiSmeApBaDeleteReqCreate(msg__, dst__, src__, interfaceTag__, reason__, baSession__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApBaDeleteReqSend(src__, interfaceTag__, reason__, baSession__) \
- CsrWifiSmeApBaDeleteReqSendTo(CSR_WIFI_SME_IFACEQUEUE, src__, interfaceTag__, reason__, baSession__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBaDeleteCfmSend
-
- DESCRIPTION
- This primitive confirms the BA is deleted
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- status - Reports the result of the request
- baSession - deleted BA session
-
-*******************************************************************************/
-#define CsrWifiSmeApBaDeleteCfmCreate(msg__, dst__, src__, interfaceTag__, status__, baSession__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApBaDeleteCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_BA_DELETE_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->baSession = (baSession__);
-
-#define CsrWifiSmeApBaDeleteCfmSendTo(dst__, src__, interfaceTag__, status__, baSession__) \
- { \
- CsrWifiSmeApBaDeleteCfm *msg__; \
- CsrWifiSmeApBaDeleteCfmCreate(msg__, dst__, src__, interfaceTag__, status__, baSession__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApBaDeleteCfmSend(dst__, interfaceTag__, status__, baSession__) \
- CsrWifiSmeApBaDeleteCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, baSession__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBeaconingStartReqSend
-
- DESCRIPTION
- This primitive requests the SME to start AP or GO functionality
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- initialPresence - Set to 0, if Not in a group fomration phase, set to 1 ,
- during group formation phase
- apType - apType : Legacy AP or P2PGO
- cloakSsid - cloakSsid flag.
- ssid - ssid.
- ifIndex - Radio Interface
- channel - channel.
- maxConnections - Maximum Stations + P2PClients allowed
- apCredentials - AP security credeitals used to advertise in beacon /probe
- response
- smeApConfig - AP configuration
- p2pGoParam - P2P specific GO parameters. Ignored if it is a leagacy AP
-
-*******************************************************************************/
-#define CsrWifiSmeApBeaconingStartReqCreate(msg__, dst__, src__, interfaceTag__, initialPresence__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, maxConnections__, apCredentials__, smeApConfig__, p2pGoParam__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApBeaconingStartReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_BEACONING_START_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->initialPresence = (initialPresence__); \
- msg__->apType = (apType__); \
- msg__->cloakSsid = (cloakSsid__); \
- msg__->ssid = (ssid__); \
- msg__->ifIndex = (ifIndex__); \
- msg__->channel = (channel__); \
- msg__->maxConnections = (maxConnections__); \
- msg__->apCredentials = (apCredentials__); \
- msg__->smeApConfig = (smeApConfig__); \
- msg__->p2pGoParam = (p2pGoParam__);
-
-#define CsrWifiSmeApBeaconingStartReqSendTo(dst__, src__, interfaceTag__, initialPresence__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, maxConnections__, apCredentials__, smeApConfig__, p2pGoParam__) \
- { \
- CsrWifiSmeApBeaconingStartReq *msg__; \
- CsrWifiSmeApBeaconingStartReqCreate(msg__, dst__, src__, interfaceTag__, initialPresence__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, maxConnections__, apCredentials__, smeApConfig__, p2pGoParam__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApBeaconingStartReqSend(src__, interfaceTag__, initialPresence__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, maxConnections__, apCredentials__, smeApConfig__, p2pGoParam__) \
- CsrWifiSmeApBeaconingStartReqSendTo(CSR_WIFI_SME_IFACEQUEUE, src__, interfaceTag__, initialPresence__, apType__, cloakSsid__, ssid__, ifIndex__, channel__, maxConnections__, apCredentials__, smeApConfig__, p2pGoParam__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBeaconingStartCfmSend
-
- DESCRIPTION
- This primitive confirms the completion of the request along with the
- status
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- status -
- secIeLength -
- secIe -
-
-*******************************************************************************/
-#define CsrWifiSmeApBeaconingStartCfmCreate(msg__, dst__, src__, interfaceTag__, status__, secIeLength__, secIe__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApBeaconingStartCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_BEACONING_START_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->secIeLength = (secIeLength__); \
- msg__->secIe = (secIe__);
-
-#define CsrWifiSmeApBeaconingStartCfmSendTo(dst__, src__, interfaceTag__, status__, secIeLength__, secIe__) \
- { \
- CsrWifiSmeApBeaconingStartCfm *msg__; \
- CsrWifiSmeApBeaconingStartCfmCreate(msg__, dst__, src__, interfaceTag__, status__, secIeLength__, secIe__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApBeaconingStartCfmSend(dst__, interfaceTag__, status__, secIeLength__, secIe__) \
- CsrWifiSmeApBeaconingStartCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, secIeLength__, secIe__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBeaconingStopReqSend
-
- DESCRIPTION
- This primitive requests the SME to STOP AP or P2PGO operation
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
-
-*******************************************************************************/
-#define CsrWifiSmeApBeaconingStopReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApBeaconingStopReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_BEACONING_STOP_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeApBeaconingStopReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeApBeaconingStopReq *msg__; \
- CsrWifiSmeApBeaconingStopReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApBeaconingStopReqSend(src__, interfaceTag__) \
- CsrWifiSmeApBeaconingStopReqSendTo(CSR_WIFI_SME_IFACEQUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBeaconingStopCfmSend
-
- DESCRIPTION
- This primitive confirms AP or P2PGO operation is terminated
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiSmeApBeaconingStopCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApBeaconingStopCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_BEACONING_STOP_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeApBeaconingStopCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeApBeaconingStopCfm *msg__; \
- CsrWifiSmeApBeaconingStopCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApBeaconingStopCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeApBeaconingStopCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApErrorIndSend
-
- DESCRIPTION
- This primitve is sent by SME to indicate some error in AP operationi
- after AP operations were started successfully and continuing the AP
- operation may lead to undesired behaviour. It is the responsibility of
- the upper layers to stop AP operation if needed
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Range 0-1
- apType -
- status - Contains the error status
-
-*******************************************************************************/
-#define CsrWifiSmeApErrorIndCreate(msg__, dst__, src__, interfaceTag__, apType__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApErrorInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_ERROR_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->apType = (apType__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeApErrorIndSendTo(dst__, src__, interfaceTag__, apType__, status__) \
- { \
- CsrWifiSmeApErrorInd *msg__; \
- CsrWifiSmeApErrorIndCreate(msg__, dst__, src__, interfaceTag__, apType__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApErrorIndSend(dst__, interfaceTag__, apType__, status__) \
- CsrWifiSmeApErrorIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, apType__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApStaConnectStartIndSend
-
- DESCRIPTION
- This primitive indicates that a stations request to join the group/BSS is
- accepted
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- peerMacAddress -
-
-*******************************************************************************/
-#define CsrWifiSmeApStaConnectStartIndCreate(msg__, dst__, src__, interfaceTag__, peerMacAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApStaConnectStartInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_STA_CONNECT_START_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->peerMacAddress = (peerMacAddress__);
-
-#define CsrWifiSmeApStaConnectStartIndSendTo(dst__, src__, interfaceTag__, peerMacAddress__) \
- { \
- CsrWifiSmeApStaConnectStartInd *msg__; \
- CsrWifiSmeApStaConnectStartIndCreate(msg__, dst__, src__, interfaceTag__, peerMacAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApStaConnectStartIndSend(dst__, interfaceTag__, peerMacAddress__) \
- CsrWifiSmeApStaConnectStartIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, peerMacAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApStaDisconnectReqSend
-
- DESCRIPTION
- This primitive tells SME to deauth ot disassociate a particular station
- within BSS
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- deauthReason -
- disassocReason -
- peerMacaddress -
- keepBlocking - If TRUE, the station is blocked. If FALSE and the station
- is connected, disconnect the station. If FALSE and the
- station is not connected, no action is taken.
-
-*******************************************************************************/
-#define CsrWifiSmeApStaDisconnectReqCreate(msg__, dst__, src__, interfaceTag__, deauthReason__, disassocReason__, peerMacaddress__, keepBlocking__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApStaDisconnectReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_STA_DISCONNECT_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->deauthReason = (deauthReason__); \
- msg__->disassocReason = (disassocReason__); \
- msg__->peerMacaddress = (peerMacaddress__); \
- msg__->keepBlocking = (keepBlocking__);
-
-#define CsrWifiSmeApStaDisconnectReqSendTo(dst__, src__, interfaceTag__, deauthReason__, disassocReason__, peerMacaddress__, keepBlocking__) \
- { \
- CsrWifiSmeApStaDisconnectReq *msg__; \
- CsrWifiSmeApStaDisconnectReqCreate(msg__, dst__, src__, interfaceTag__, deauthReason__, disassocReason__, peerMacaddress__, keepBlocking__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApStaDisconnectReqSend(src__, interfaceTag__, deauthReason__, disassocReason__, peerMacaddress__, keepBlocking__) \
- CsrWifiSmeApStaDisconnectReqSendTo(CSR_WIFI_SME_IFACEQUEUE, src__, interfaceTag__, deauthReason__, disassocReason__, peerMacaddress__, keepBlocking__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApStaDisconnectCfmSend
-
- DESCRIPTION
- This primitive confirms the station is disconnected
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- status -
- peerMacaddress -
-
-*******************************************************************************/
-#define CsrWifiSmeApStaDisconnectCfmCreate(msg__, dst__, src__, interfaceTag__, status__, peerMacaddress__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApStaDisconnectCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_STA_DISCONNECT_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->peerMacaddress = (peerMacaddress__);
-
-#define CsrWifiSmeApStaDisconnectCfmSendTo(dst__, src__, interfaceTag__, status__, peerMacaddress__) \
- { \
- CsrWifiSmeApStaDisconnectCfm *msg__; \
- CsrWifiSmeApStaDisconnectCfmCreate(msg__, dst__, src__, interfaceTag__, status__, peerMacaddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApStaDisconnectCfmSend(dst__, interfaceTag__, status__, peerMacaddress__) \
- CsrWifiSmeApStaDisconnectCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, peerMacaddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApStaNotifyIndSend
-
- DESCRIPTION
- This primitive indicates that a station has joined or a previously joined
- station has left the BSS/group
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- mediaStatus -
- peerMacAddress -
- peerDeviceAddress -
- disassocReason -
- deauthReason -
- WpsRegistration -
- secIeLength -
- secIe -
- groupKeyId -
- seqNumber -
-
-*******************************************************************************/
-#define CsrWifiSmeApStaNotifyIndCreate(msg__, dst__, src__, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__, disassocReason__, deauthReason__, WpsRegistration__, secIeLength__, secIe__, groupKeyId__, seqNumber__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApStaNotifyInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_STA_NOTIFY_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->mediaStatus = (mediaStatus__); \
- msg__->peerMacAddress = (peerMacAddress__); \
- msg__->peerDeviceAddress = (peerDeviceAddress__); \
- msg__->disassocReason = (disassocReason__); \
- msg__->deauthReason = (deauthReason__); \
- msg__->WpsRegistration = (WpsRegistration__); \
- msg__->secIeLength = (secIeLength__); \
- msg__->secIe = (secIe__); \
- msg__->groupKeyId = (groupKeyId__); \
- memcpy(msg__->seqNumber, (seqNumber__), sizeof(u16) * 8);
-
-#define CsrWifiSmeApStaNotifyIndSendTo(dst__, src__, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__, disassocReason__, deauthReason__, WpsRegistration__, secIeLength__, secIe__, groupKeyId__, seqNumber__) \
- { \
- CsrWifiSmeApStaNotifyInd *msg__; \
- CsrWifiSmeApStaNotifyIndCreate(msg__, dst__, src__, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__, disassocReason__, deauthReason__, WpsRegistration__, secIeLength__, secIe__, groupKeyId__, seqNumber__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApStaNotifyIndSend(dst__, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__, disassocReason__, deauthReason__, WpsRegistration__, secIeLength__, secIe__, groupKeyId__, seqNumber__) \
- CsrWifiSmeApStaNotifyIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, mediaStatus__, peerMacAddress__, peerDeviceAddress__, disassocReason__, deauthReason__, WpsRegistration__, secIeLength__, secIe__, groupKeyId__, seqNumber__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWmmParamUpdateReqSend
-
- DESCRIPTION
- Application uses this primitive to update the WMM parameters on the fly
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- wmmApParams - WMM parameters to be used for local firmware queue
- configuration
- wmmApBcParams - WMM parameters to be advertised in beacon/probe response
-
-*******************************************************************************/
-#define CsrWifiSmeApWmmParamUpdateReqCreate(msg__, dst__, src__, interfaceTag__, wmmApParams__, wmmApBcParams__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApWmmParamUpdateReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_WMM_PARAM_UPDATE_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- memcpy(msg__->wmmApParams, (wmmApParams__), sizeof(CsrWifiSmeWmmAcParams) * 4); \
- memcpy(msg__->wmmApBcParams, (wmmApBcParams__), sizeof(CsrWifiSmeWmmAcParams) * 4);
-
-#define CsrWifiSmeApWmmParamUpdateReqSendTo(dst__, src__, interfaceTag__, wmmApParams__, wmmApBcParams__) \
- { \
- CsrWifiSmeApWmmParamUpdateReq *msg__; \
- CsrWifiSmeApWmmParamUpdateReqCreate(msg__, dst__, src__, interfaceTag__, wmmApParams__, wmmApBcParams__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApWmmParamUpdateReqSend(src__, interfaceTag__, wmmApParams__, wmmApBcParams__) \
- CsrWifiSmeApWmmParamUpdateReqSendTo(CSR_WIFI_SME_IFACEQUEUE, src__, interfaceTag__, wmmApParams__, wmmApBcParams__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWmmParamUpdateCfmSend
-
- DESCRIPTION
- A confirm for CSR_WIFI_SME_AP_WMM_PARAM_UPDATE.request
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiSmeApWmmParamUpdateCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApWmmParamUpdateCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_WMM_PARAM_UPDATE_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeApWmmParamUpdateCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeApWmmParamUpdateCfm *msg__; \
- CsrWifiSmeApWmmParamUpdateCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApWmmParamUpdateCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeApWmmParamUpdateCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsConfigurationReqSend
-
- DESCRIPTION
- This primitive passes the WPS information for the device to SME. This may
- be accepted only if no interface is active.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- wpsConfig - WPS config.
-
-*******************************************************************************/
-#define CsrWifiSmeApWpsConfigurationReqCreate(msg__, dst__, src__, wpsConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApWpsConfigurationReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_WPS_CONFIGURATION_REQ, dst__, src__); \
- msg__->wpsConfig = (wpsConfig__);
-
-#define CsrWifiSmeApWpsConfigurationReqSendTo(dst__, src__, wpsConfig__) \
- { \
- CsrWifiSmeApWpsConfigurationReq *msg__; \
- CsrWifiSmeApWpsConfigurationReqCreate(msg__, dst__, src__, wpsConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApWpsConfigurationReqSend(src__, wpsConfig__) \
- CsrWifiSmeApWpsConfigurationReqSendTo(CSR_WIFI_SME_IFACEQUEUE, src__, wpsConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsConfigurationCfmSend
-
- DESCRIPTION
- Confirm.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Status of the request.
-
-*******************************************************************************/
-#define CsrWifiSmeApWpsConfigurationCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApWpsConfigurationCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_WPS_CONFIGURATION_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeApWpsConfigurationCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeApWpsConfigurationCfm *msg__; \
- CsrWifiSmeApWpsConfigurationCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApWpsConfigurationCfmSend(dst__, status__) \
- CsrWifiSmeApWpsConfigurationCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsRegistrationFinishedReqSend
-
- DESCRIPTION
- This primitive tells SME that WPS registration procedure has finished
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
-
-*******************************************************************************/
-#define CsrWifiSmeApWpsRegistrationFinishedReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApWpsRegistrationFinishedReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_WPS_REGISTRATION_FINISHED_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeApWpsRegistrationFinishedReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeApWpsRegistrationFinishedReq *msg__; \
- CsrWifiSmeApWpsRegistrationFinishedReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApWpsRegistrationFinishedReqSend(src__, interfaceTag__) \
- CsrWifiSmeApWpsRegistrationFinishedReqSendTo(CSR_WIFI_SME_IFACEQUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsRegistrationFinishedCfmSend
-
- DESCRIPTION
- A confirm for UNIFI_MGT_AP_WPS_REGISTRATION_FINISHED.request
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiSmeApWpsRegistrationFinishedCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApWpsRegistrationFinishedCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_WPS_REGISTRATION_FINISHED_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeApWpsRegistrationFinishedCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeApWpsRegistrationFinishedCfm *msg__; \
- CsrWifiSmeApWpsRegistrationFinishedCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApWpsRegistrationFinishedCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeApWpsRegistrationFinishedCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsRegistrationStartedReqSend
-
- DESCRIPTION
- This primitive tells SME that WPS registration procedure has started
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag -
- SelectedDevicePasswordId -
- SelectedconfigMethod -
-
-*******************************************************************************/
-#define CsrWifiSmeApWpsRegistrationStartedReqCreate(msg__, dst__, src__, interfaceTag__, SelectedDevicePasswordId__, SelectedconfigMethod__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApWpsRegistrationStartedReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_WPS_REGISTRATION_STARTED_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->SelectedDevicePasswordId = (SelectedDevicePasswordId__); \
- msg__->SelectedconfigMethod = (SelectedconfigMethod__);
-
-#define CsrWifiSmeApWpsRegistrationStartedReqSendTo(dst__, src__, interfaceTag__, SelectedDevicePasswordId__, SelectedconfigMethod__) \
- { \
- CsrWifiSmeApWpsRegistrationStartedReq *msg__; \
- CsrWifiSmeApWpsRegistrationStartedReqCreate(msg__, dst__, src__, interfaceTag__, SelectedDevicePasswordId__, SelectedconfigMethod__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApWpsRegistrationStartedReqSend(src__, interfaceTag__, SelectedDevicePasswordId__, SelectedconfigMethod__) \
- CsrWifiSmeApWpsRegistrationStartedReqSendTo(CSR_WIFI_SME_IFACEQUEUE, src__, interfaceTag__, SelectedDevicePasswordId__, SelectedconfigMethod__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsRegistrationStartedCfmSend
-
- DESCRIPTION
- A confirm for UNIFI_MGT_AP_WPS_REGISTRATION_STARTED.request
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag -
- status -
-
-*******************************************************************************/
-#define CsrWifiSmeApWpsRegistrationStartedCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeApWpsRegistrationStartedCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_AP_PRIM, CSR_WIFI_SME_AP_WPS_REGISTRATION_STARTED_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeApWpsRegistrationStartedCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeApWpsRegistrationStartedCfm *msg__; \
- CsrWifiSmeApWpsRegistrationStartedCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_AP_PRIM, msg__); \
- }
-
-#define CsrWifiSmeApWpsRegistrationStartedCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeApWpsRegistrationStartedCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-
-#endif /* CSR_WIFI_SME_AP_LIB_H__ */
diff --git a/drivers/staging/csr/csr_wifi_sme_ap_prim.h b/drivers/staging/csr/csr_wifi_sme_ap_prim.h
deleted file mode 100644
index 3c4bcbc..0000000
--- a/drivers/staging/csr/csr_wifi_sme_ap_prim.h
+++ /dev/null
@@ -1,1030 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_SME_AP_PRIM_H__
-#define CSR_WIFI_SME_AP_PRIM_H__
-
-#include "csr_prim_defs.h"
-#include "csr_sched.h"
-#include "csr_wifi_common.h"
-#include "csr_result.h"
-#include "csr_wifi_fsm_event.h"
-#include "csr_wifi_sme_prim.h"
-
-#ifndef CSR_WIFI_AP_ENABLE
-#error CSR_WIFI_AP_ENABLE MUST be defined inorder to use csr_wifi_sme_ap_prim.h
-#endif
-
-#define CSR_WIFI_SME_AP_PRIM (0x0407)
-
-typedef CsrPrim CsrWifiSmeApPrim;
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApAccessType
-
- DESCRIPTION
- Allow or deny STAs based on MAC address
-
- VALUES
- CSR_WIFI_AP_ACCESS_TYPE_NONE - None
- CSR_WIFI_AP_ACCESS_TYPE_ALLOW - Allow only if MAC address is from the list
- CSR_WIFI_AP_ACCESS_TYPE_DENY - Disallow if MAC address is from the list
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeApAccessType;
-#define CSR_WIFI_AP_ACCESS_TYPE_NONE ((CsrWifiSmeApAccessType) 0x00)
-#define CSR_WIFI_AP_ACCESS_TYPE_ALLOW ((CsrWifiSmeApAccessType) 0x01)
-#define CSR_WIFI_AP_ACCESS_TYPE_DENY ((CsrWifiSmeApAccessType) 0x02)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApAuthSupport
-
- DESCRIPTION
- Define bits for AP authentication support
-
- VALUES
- CSR_WIFI_SME_RSN_AUTH_WPAPSK - RSN WPA-PSK Support
- CSR_WIFI_SME_RSN_AUTH_WPA2PSK - RSN WPA2-PSK Support
- CSR_WIFI_SME_AUTH_WAPIPSK - WAPI-PSK Support
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeApAuthSupport;
-#define CSR_WIFI_SME_RSN_AUTH_WPAPSK ((CsrWifiSmeApAuthSupport) 0x01)
-#define CSR_WIFI_SME_RSN_AUTH_WPA2PSK ((CsrWifiSmeApAuthSupport) 0x02)
-#define CSR_WIFI_SME_AUTH_WAPIPSK ((CsrWifiSmeApAuthSupport) 0x04)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApAuthType
-
- DESCRIPTION
- Definition of the SME AP Authentication Options
-
- VALUES
- CSR_WIFI_SME_AP_AUTH_TYPE_OPEN_SYSTEM
- - Open authentication
- CSR_WIFI_SME_AP_AUTH_TYPE_PERSONAL
- - Personal authentication using a passphrase or a pre-shared
- key.
- CSR_WIFI_SME_AP_AUTH_TYPE_WEP
- - WEP authentication. This can be either open or shared key
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeApAuthType;
-#define CSR_WIFI_SME_AP_AUTH_TYPE_OPEN_SYSTEM ((CsrWifiSmeApAuthType) 0x00)
-#define CSR_WIFI_SME_AP_AUTH_TYPE_PERSONAL ((CsrWifiSmeApAuthType) 0x01)
-#define CSR_WIFI_SME_AP_AUTH_TYPE_WEP ((CsrWifiSmeApAuthType) 0x02)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApDirection
-
- DESCRIPTION
- Definition of Direction
-
- VALUES
- CSR_WIFI_AP_DIRECTION_RECEIPIENT - Receipient
- CSR_WIFI_AP_DIRECTION_ORIGINATOR - Originator
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeApDirection;
-#define CSR_WIFI_AP_DIRECTION_RECEIPIENT ((CsrWifiSmeApDirection) 0x00)
-#define CSR_WIFI_AP_DIRECTION_ORIGINATOR ((CsrWifiSmeApDirection) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApPhySupport
-
- DESCRIPTION
- Define bits for CsrWifiSmeApPhySupportMask
-
- VALUES
- CSR_WIFI_SME_AP_PHY_SUPPORT_A - 802.11a. It is not supported in the current
- release.
- CSR_WIFI_SME_AP_PHY_SUPPORT_B - 802.11b
- CSR_WIFI_SME_AP_PHY_SUPPORT_G - 802.11g
- CSR_WIFI_SME_AP_PHY_SUPPORT_N - 802.11n
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeApPhySupport;
-#define CSR_WIFI_SME_AP_PHY_SUPPORT_A ((CsrWifiSmeApPhySupport) 0x01)
-#define CSR_WIFI_SME_AP_PHY_SUPPORT_B ((CsrWifiSmeApPhySupport) 0x02)
-#define CSR_WIFI_SME_AP_PHY_SUPPORT_G ((CsrWifiSmeApPhySupport) 0x04)
-#define CSR_WIFI_SME_AP_PHY_SUPPORT_N ((CsrWifiSmeApPhySupport) 0x08)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApType
-
- DESCRIPTION
- Definition of AP types
-
- VALUES
- CSR_WIFI_AP_TYPE_LEGACY - Legacy AP
- CSR_WIFI_AP_TYPE_P2P - P2P Group Owner(GO)
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeApType;
-#define CSR_WIFI_AP_TYPE_LEGACY ((CsrWifiSmeApType) 0x00)
-#define CSR_WIFI_AP_TYPE_P2P ((CsrWifiSmeApType) 0x01)
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApAuthSupportMask
-
- DESCRIPTION
- See CsrWifiSmeApAuthSupport for bit definitions
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeApAuthSupportMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApPhySupportMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeApPhySupport
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeApPhySupportMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApRsnCapabilities
-
- DESCRIPTION
- Set to 0 for the current release
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeApRsnCapabilities;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApRsnCapabilitiesMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeApRsnCapabilities
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeApRsnCapabilitiesMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWapiCapabilities
-
- DESCRIPTION
- Ignored by the stack as WAPI is not supported for AP operations in the
- current release
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeApWapiCapabilities;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWapiCapabilitiesMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeApWapiCapabilities
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeApWapiCapabilitiesMask;
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApHtParams
-
- DESCRIPTION
- Structure holding HT parameters
-
- MEMBERS
- greenfieldSupported - Indicates if the AP supports Htgreenfield operation
- subject to the chip capability. If the chip does not
- support Htgreenfield operation, this parameter will be
- ignored.
- NOTE: if shortGi20MHz is set to TRUE and the chip
- supports short GI operation for 20MHz this field will
- be be ignored and the AP will not support Htgreenfield
- operation.
- NOTE: This field is ignored by the Wi-Fi stack for the
- current release. It implies that AP does not support
- greenfield operation.
- shortGi20MHz - Indicates if the AP support short GI operation for
- 20MHz subject to the chip capability.If the chip does
- not support short GI for 20MHz, this parameter is
- ignored
- rxStbc - Support for STBC for receive. 0 => No support for STBC
- , 1=> Use STBC for Rx
- rifsModeAllowed - RIFS Mode is allowed to protect overlapping non-HT BSS
- htProtection - Deprecated
- dualCtsProtection - Dual CTS Protection enabled
-
-*******************************************************************************/
-typedef struct
-{
- u8 greenfieldSupported;
- u8 shortGi20MHz;
- u8 rxStbc;
- u8 rifsModeAllowed;
- u8 htProtection;
- u8 dualCtsProtection;
-} CsrWifiSmeApHtParams;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApP2pOperatingChanEntry
-
- DESCRIPTION
-
- MEMBERS
- operatingClass - Channel operating class
- operatingChannelCount - Number of channels in this entry
- operatingChannel - List of channels
-
-*******************************************************************************/
-typedef struct
-{
- u8 operatingClass;
- u8 operatingChannelCount;
- u8 *operatingChannel;
-} CsrWifiSmeApP2pOperatingChanEntry;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApP2pOperatingChanList
-
- DESCRIPTION
- This structure contains the lists of P2P operating channels
-
- MEMBERS
- country - Country
- channelEntryListCount - Number of entries
- channelEntryList - List of entries
-
-*******************************************************************************/
-typedef struct
-{
- u8 country[3];
- u8 channelEntryListCount;
- CsrWifiSmeApP2pOperatingChanEntry *channelEntryList;
-} CsrWifiSmeApP2pOperatingChanList;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApAuthPers
-
- DESCRIPTION
-
- MEMBERS
- authSupport -
- encryptionModeMask -
- rsnCapabilities -
- wapiCapabilities -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeApAuthSupportMask authSupport;
- CsrWifiSmeEncryptionMask encryptionModeMask;
- CsrWifiSmeApRsnCapabilitiesMask rsnCapabilities;
- CsrWifiSmeApWapiCapabilitiesMask wapiCapabilities;
-} CsrWifiSmeApAuthPers;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBaSession
-
- DESCRIPTION
-
- MEMBERS
- peerMacAddress - Indicates MAC address of the peer station
- tid - Specifies the TID of the MSDUs for which this Block Ack has
- been set up. Range: 0-15
- direction - Specifies if the AP is the originator or the recipient of
- the data stream that uses the Block Ack.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiMacAddress peerMacAddress;
- u8 tid;
- CsrWifiSmeApDirection direction;
-} CsrWifiSmeApBaSession;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApMacConfig
-
- DESCRIPTION
- Structure holding AP MAC configuration.
-
- MEMBERS
- phySupportedBitmap - Indicates supported physical layers
- beaconInterval - Beacon interval in terms of TUs
- dtimPeriod - DTIM period in terms of number of beacon intervals
- maxListenInterval - Maximum allowed listen interval as number of beacon
- intervals
- supportedRatesCount - Number of supported rates. Range : 0 to 20
- supportedRates - List of supportedRates. A rate is specied in the
- units of 500kbps. An entry for a basic rate shall
- have the MSB set to 1.
- preamble - Preamble to be advertised in beacons and probe
- responses
- shortSlotTimeEnabled - TRUE indicates the AP shall use short slot time if
- all the stations use short slot operation.
- ctsProtectionType - CTS protection to be used
- wmmEnabled - Indicate whether WMM is enabled or not. If set to
- FALSE,the WMM parameters shall be ignored by the
- receiver.
- wmmApParams - WMM parameters to be used for local firmware queue
- configuration. Array index corresponds to the ACI.
- wmmApBcParams - WMM parameters to be advertised in beacon/probe
- response. Array index corresponds to the ACI
- accessType - Specifies whether the MAC addresses from the list
- should be allowed or denied
- macAddressListCount - Number of MAC addresses
- macAddressList - List of MAC addresses
- apHtParams - AP HT parameters. The stack shall use these
- parameters only if phySupportedBitmap indicates
- support for IEEE 802.11n
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeApPhySupportMask phySupportedBitmap;
- u16 beaconInterval;
- u8 dtimPeriod;
- u16 maxListenInterval;
- u8 supportedRatesCount;
- u8 supportedRates[20];
- CsrWifiSmePreambleType preamble;
- u8 shortSlotTimeEnabled;
- CsrWifiSmeCtsProtectionType ctsProtectionType;
- u8 wmmEnabled;
- CsrWifiSmeWmmAcParams wmmApParams[4];
- CsrWifiSmeWmmAcParams wmmApBcParams[4];
- CsrWifiSmeApAccessType accessType;
- u8 macAddressListCount;
- CsrWifiMacAddress *macAddressList;
- CsrWifiSmeApHtParams apHtParams;
-} CsrWifiSmeApMacConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApP2pGoConfig
-
- DESCRIPTION
-
- MEMBERS
- groupCapability - Indicates the P2P group capabilities
- operatingChanList - List of operating channels in the order of
- decreasing priority. It may contain channel
- entry/entries not supported by the wifi stack.
- These shall be filtered out by the wifi stack
- opPsEnabled - Indicates whether opportunistic power save can
- be used.
- Note: This parameter is ignored by the WiFi
- stack for the current release
- ctWindow - Define Client Traffic window to be used in terms
- of number of TUs. Range: 0 to 127.
- Note: This parameter is ignored by the WiFi
- stack for the current release.
- noaConfigMethod - Notice of Absence configuration method.
- Note: This parameter is ignored by the WiFi
- stack for the current release.
- allowNoaWithNonP2pDevices - Indicates if NOA should be allowed if non P2P
- devices are connected. If allowed the non P2P
- devices may suffer in throughput.
- Note: This parameter is ignored by the WiFi
- stack for the current release.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeP2pGroupCapabilityMask groupCapability;
- CsrWifiSmeApP2pOperatingChanList operatingChanList;
- u8 opPsEnabled;
- u8 ctWindow;
- CsrWifiSmeP2pNoaConfigMethod noaConfigMethod;
- u8 allowNoaWithNonP2pDevices;
-} CsrWifiSmeApP2pGoConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApCredentials
-
- DESCRIPTION
-
- MEMBERS
- authType -
- smeAuthType -
- smeAuthTypeopenSystemEmpty -
- smeAuthTypeauthwep -
- smeAuthTypeauthPers -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeApAuthType authType;
- union {
- CsrWifiSmeEmpty openSystemEmpty;
- CsrWifiSmeWepAuth authwep;
- CsrWifiSmeApAuthPers authPers;
- } smeAuthType;
-} CsrWifiSmeApCredentials;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApSecConfig
-
- DESCRIPTION
-
- MEMBERS
- apCredentials -
- wpsEnabled -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeApCredentials apCredentials;
- u8 wpsEnabled;
-} CsrWifiSmeApSecConfig;
-
-
-/* Downstream */
-#define CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST (0x0000)
-
-#define CSR_WIFI_SME_AP_BEACONING_START_REQ ((CsrWifiSmeApPrim) (0x0000 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_BEACONING_STOP_REQ ((CsrWifiSmeApPrim) (0x0001 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_WPS_REGISTRATION_STARTED_REQ ((CsrWifiSmeApPrim) (0x0002 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_WPS_REGISTRATION_FINISHED_REQ ((CsrWifiSmeApPrim) (0x0003 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_WMM_PARAM_UPDATE_REQ ((CsrWifiSmeApPrim) (0x0004 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_STA_DISCONNECT_REQ ((CsrWifiSmeApPrim) (0x0005 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_WPS_CONFIGURATION_REQ ((CsrWifiSmeApPrim) (0x0006 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_ACTIVE_BA_GET_REQ ((CsrWifiSmeApPrim) (0x0007 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_BA_DELETE_REQ ((CsrWifiSmeApPrim) (0x0008 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST))
-
-
-#define CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_HIGHEST (0x0008 + CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST)
-
-/* Upstream */
-#define CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST (0x0000 + CSR_PRIM_UPSTREAM)
-
-#define CSR_WIFI_SME_AP_BEACONING_START_CFM ((CsrWifiSmeApPrim)(0x0000 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_BEACONING_STOP_CFM ((CsrWifiSmeApPrim)(0x0001 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_STA_NOTIFY_IND ((CsrWifiSmeApPrim)(0x0002 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_STA_CONNECT_START_IND ((CsrWifiSmeApPrim)(0x0003 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_WPS_REGISTRATION_STARTED_CFM ((CsrWifiSmeApPrim)(0x0004 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_WPS_REGISTRATION_FINISHED_CFM ((CsrWifiSmeApPrim)(0x0005 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_WMM_PARAM_UPDATE_CFM ((CsrWifiSmeApPrim)(0x0006 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_STA_DISCONNECT_CFM ((CsrWifiSmeApPrim)(0x0007 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_WPS_CONFIGURATION_CFM ((CsrWifiSmeApPrim)(0x0008 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_ERROR_IND ((CsrWifiSmeApPrim)(0x0009 + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_ACTIVE_BA_GET_CFM ((CsrWifiSmeApPrim)(0x000A + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AP_BA_DELETE_CFM ((CsrWifiSmeApPrim)(0x000B + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST))
-
-#define CSR_WIFI_SME_AP_PRIM_UPSTREAM_HIGHEST (0x000B + CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST)
-
-#define CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_COUNT (CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_HIGHEST + 1 - CSR_WIFI_SME_AP_PRIM_DOWNSTREAM_LOWEST)
-#define CSR_WIFI_SME_AP_PRIM_UPSTREAM_COUNT (CSR_WIFI_SME_AP_PRIM_UPSTREAM_HIGHEST + 1 - CSR_WIFI_SME_AP_PRIM_UPSTREAM_LOWEST)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBeaconingStartReq
-
- DESCRIPTION
- This primitive requests the SME to start AP or GO functionality
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- initialPresence - Set to 0, if Not in a group fomration phase, set to 1 ,
- during group formation phase
- apType - apType : Legacy AP or P2PGO
- cloakSsid - cloakSsid flag.
- ssid - ssid.
- ifIndex - Radio Interface
- channel - channel.
- maxConnections - Maximum Stations + P2PClients allowed
- apCredentials - AP security credeitals used to advertise in beacon /probe
- response
- smeApConfig - AP configuration
- p2pGoParam - P2P specific GO parameters. Ignored if it is a leagacy AP
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 initialPresence;
- CsrWifiSmeApType apType;
- u8 cloakSsid;
- CsrWifiSsid ssid;
- CsrWifiSmeRadioIF ifIndex;
- u8 channel;
- u8 maxConnections;
- CsrWifiSmeApSecConfig apCredentials;
- CsrWifiSmeApMacConfig smeApConfig;
- CsrWifiSmeApP2pGoConfig p2pGoParam;
-} CsrWifiSmeApBeaconingStartReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBeaconingStopReq
-
- DESCRIPTION
- This primitive requests the SME to STOP AP or P2PGO operation
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeApBeaconingStopReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsRegistrationStartedReq
-
- DESCRIPTION
- This primitive tells SME that WPS registration procedure has started
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- SelectedDevicePasswordId -
- SelectedconfigMethod -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeWpsDpid SelectedDevicePasswordId;
- CsrWifiSmeWpsConfigType SelectedconfigMethod;
-} CsrWifiSmeApWpsRegistrationStartedReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsRegistrationFinishedReq
-
- DESCRIPTION
- This primitive tells SME that WPS registration procedure has finished
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeApWpsRegistrationFinishedReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWmmParamUpdateReq
-
- DESCRIPTION
- Application uses this primitive to update the WMM parameters on the fly
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- wmmApParams - WMM parameters to be used for local firmware queue
- configuration
- wmmApBcParams - WMM parameters to be advertised in beacon/probe response
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeWmmAcParams wmmApParams[4];
- CsrWifiSmeWmmAcParams wmmApBcParams[4];
-} CsrWifiSmeApWmmParamUpdateReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApStaDisconnectReq
-
- DESCRIPTION
- This primitive tells SME to deauth ot disassociate a particular station
- within BSS
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- deauthReason -
- disassocReason -
- peerMacaddress -
- keepBlocking - If TRUE, the station is blocked. If FALSE and the station
- is connected, disconnect the station. If FALSE and the
- station is not connected, no action is taken.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeIEEE80211Reason deauthReason;
- CsrWifiSmeIEEE80211Reason disassocReason;
- CsrWifiMacAddress peerMacaddress;
- u8 keepBlocking;
-} CsrWifiSmeApStaDisconnectReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsConfigurationReq
-
- DESCRIPTION
- This primitive passes the WPS information for the device to SME. This may
- be accepted only if no interface is active.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- wpsConfig - WPS config.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeWpsConfig wpsConfig;
-} CsrWifiSmeApWpsConfigurationReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApActiveBaGetReq
-
- DESCRIPTION
- This primitive used to retrieve information related to the active block
- ack sessions
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeApActiveBaGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBaDeleteReq
-
- DESCRIPTION
- This primitive is used to delete an active block ack session
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- reason -
- baSession - BA session to be deleted
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeIEEE80211Reason reason;
- CsrWifiSmeApBaSession baSession;
-} CsrWifiSmeApBaDeleteReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBeaconingStartCfm
-
- DESCRIPTION
- This primitive confirms the completion of the request along with the
- status
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status -
- secIeLength -
- secIe -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- u16 secIeLength;
- u8 *secIe;
-} CsrWifiSmeApBeaconingStartCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBeaconingStopCfm
-
- DESCRIPTION
- This primitive confirms AP or P2PGO operation is terminated
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeApBeaconingStopCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApStaNotifyInd
-
- DESCRIPTION
- This primitive indicates that a station has joined or a previously joined
- station has left the BSS/group
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- mediaStatus -
- peerMacAddress -
- peerDeviceAddress -
- disassocReason -
- deauthReason -
- WpsRegistration -
- secIeLength -
- secIe -
- groupKeyId -
- seqNumber -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeMediaStatus mediaStatus;
- CsrWifiMacAddress peerMacAddress;
- CsrWifiMacAddress peerDeviceAddress;
- CsrWifiSmeIEEE80211Reason disassocReason;
- CsrWifiSmeIEEE80211Reason deauthReason;
- CsrWifiSmeWpsRegistration WpsRegistration;
- u8 secIeLength;
- u8 *secIe;
- u8 groupKeyId;
- u16 seqNumber[8];
-} CsrWifiSmeApStaNotifyInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApStaConnectStartInd
-
- DESCRIPTION
- This primitive indicates that a stations request to join the group/BSS is
- accepted
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- peerMacAddress -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiMacAddress peerMacAddress;
-} CsrWifiSmeApStaConnectStartInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsRegistrationStartedCfm
-
- DESCRIPTION
- A confirm for UNIFI_MGT_AP_WPS_REGISTRATION_STARTED.request
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeApWpsRegistrationStartedCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsRegistrationFinishedCfm
-
- DESCRIPTION
- A confirm for UNIFI_MGT_AP_WPS_REGISTRATION_FINISHED.request
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeApWpsRegistrationFinishedCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWmmParamUpdateCfm
-
- DESCRIPTION
- A confirm for CSR_WIFI_SME_AP_WMM_PARAM_UPDATE.request
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeApWmmParamUpdateCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApStaDisconnectCfm
-
- DESCRIPTION
- This primitive confirms the station is disconnected
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status -
- peerMacaddress -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiMacAddress peerMacaddress;
-} CsrWifiSmeApStaDisconnectCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApWpsConfigurationCfm
-
- DESCRIPTION
- Confirm.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Status of the request.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeApWpsConfigurationCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApErrorInd
-
- DESCRIPTION
- This primitve is sent by SME to indicate some error in AP operationi
- after AP operations were started successfully and continuing the AP
- operation may lead to undesired behaviour. It is the responsibility of
- the upper layers to stop AP operation if needed
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Range 0-1
- apType -
- status - Contains the error status
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeApType apType;
- CsrResult status;
-} CsrWifiSmeApErrorInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApActiveBaGetCfm
-
- DESCRIPTION
- This primitive carries the information related to the active ba sessions
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status - Reports the result of the request
- activeBaCount - Number of active block ack session
- activeBaSessions - Points to a buffer containing an array of
- CsrWifiSmeApBaSession structures.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- u16 activeBaCount;
- CsrWifiSmeApBaSession *activeBaSessions;
-} CsrWifiSmeApActiveBaGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeApBaDeleteCfm
-
- DESCRIPTION
- This primitive confirms the BA is deleted
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag -
- status - Reports the result of the request
- baSession - deleted BA session
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeApBaSession baSession;
-} CsrWifiSmeApBaDeleteCfm;
-
-
-#endif /* CSR_WIFI_SME_AP_PRIM_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_sme_converter_init.c b/drivers/staging/csr/csr_wifi_sme_converter_init.c
deleted file mode 100644
index 31835f0..0000000
--- a/drivers/staging/csr/csr_wifi_sme_converter_init.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#include "csr_msgconv.h"
-#include "csr_macro.h"
-
-
-#ifdef CSR_LOG_ENABLE
-#include "csr_log.h"
-#endif
-
-#ifndef EXCLUDE_CSR_WIFI_SME_MODULE
-#include "csr_wifi_sme_serialize.h"
-#include "csr_wifi_sme_prim.h"
-
-static CsrMsgConvMsgEntry csrwifisme_conv_lut[] = {
- { CSR_WIFI_SME_ACTIVATE_REQ, CsrWifiSmeActivateReqSizeof, CsrWifiSmeActivateReqSer, CsrWifiSmeActivateReqDes, CsrWifiSmeActivateReqSerFree },
- { CSR_WIFI_SME_ADHOC_CONFIG_GET_REQ, CsrWifiSmeAdhocConfigGetReqSizeof, CsrWifiSmeAdhocConfigGetReqSer, CsrWifiSmeAdhocConfigGetReqDes, CsrWifiSmeAdhocConfigGetReqSerFree },
- { CSR_WIFI_SME_ADHOC_CONFIG_SET_REQ, CsrWifiSmeAdhocConfigSetReqSizeof, CsrWifiSmeAdhocConfigSetReqSer, CsrWifiSmeAdhocConfigSetReqDes, CsrWifiSmeAdhocConfigSetReqSerFree },
- { CSR_WIFI_SME_BLACKLIST_REQ, CsrWifiSmeBlacklistReqSizeof, CsrWifiSmeBlacklistReqSer, CsrWifiSmeBlacklistReqDes, CsrWifiSmeBlacklistReqSerFree },
- { CSR_WIFI_SME_CALIBRATION_DATA_GET_REQ, CsrWifiSmeCalibrationDataGetReqSizeof, CsrWifiSmeCalibrationDataGetReqSer, CsrWifiSmeCalibrationDataGetReqDes, CsrWifiSmeCalibrationDataGetReqSerFree },
- { CSR_WIFI_SME_CALIBRATION_DATA_SET_REQ, CsrWifiSmeCalibrationDataSetReqSizeof, CsrWifiSmeCalibrationDataSetReqSer, CsrWifiSmeCalibrationDataSetReqDes, CsrWifiSmeCalibrationDataSetReqSerFree },
- { CSR_WIFI_SME_CCX_CONFIG_GET_REQ, CsrWifiSmeCcxConfigGetReqSizeof, CsrWifiSmeCcxConfigGetReqSer, CsrWifiSmeCcxConfigGetReqDes, CsrWifiSmeCcxConfigGetReqSerFree },
- { CSR_WIFI_SME_CCX_CONFIG_SET_REQ, CsrWifiSmeCcxConfigSetReqSizeof, CsrWifiSmeCcxConfigSetReqSer, CsrWifiSmeCcxConfigSetReqDes, CsrWifiSmeCcxConfigSetReqSerFree },
- { CSR_WIFI_SME_COEX_CONFIG_GET_REQ, CsrWifiSmeCoexConfigGetReqSizeof, CsrWifiSmeCoexConfigGetReqSer, CsrWifiSmeCoexConfigGetReqDes, CsrWifiSmeCoexConfigGetReqSerFree },
- { CSR_WIFI_SME_COEX_CONFIG_SET_REQ, CsrWifiSmeCoexConfigSetReqSizeof, CsrWifiSmeCoexConfigSetReqSer, CsrWifiSmeCoexConfigSetReqDes, CsrWifiSmeCoexConfigSetReqSerFree },
- { CSR_WIFI_SME_COEX_INFO_GET_REQ, CsrWifiSmeCoexInfoGetReqSizeof, CsrWifiSmeCoexInfoGetReqSer, CsrWifiSmeCoexInfoGetReqDes, CsrWifiSmeCoexInfoGetReqSerFree },
- { CSR_WIFI_SME_CONNECT_REQ, CsrWifiSmeConnectReqSizeof, CsrWifiSmeConnectReqSer, CsrWifiSmeConnectReqDes, CsrWifiSmeConnectReqSerFree },
- { CSR_WIFI_SME_CONNECTION_CONFIG_GET_REQ, CsrWifiSmeConnectionConfigGetReqSizeof, CsrWifiSmeConnectionConfigGetReqSer, CsrWifiSmeConnectionConfigGetReqDes, CsrWifiSmeConnectionConfigGetReqSerFree },
- { CSR_WIFI_SME_CONNECTION_INFO_GET_REQ, CsrWifiSmeConnectionInfoGetReqSizeof, CsrWifiSmeConnectionInfoGetReqSer, CsrWifiSmeConnectionInfoGetReqDes, CsrWifiSmeConnectionInfoGetReqSerFree },
- { CSR_WIFI_SME_CONNECTION_STATS_GET_REQ, CsrWifiSmeConnectionStatsGetReqSizeof, CsrWifiSmeConnectionStatsGetReqSer, CsrWifiSmeConnectionStatsGetReqDes, CsrWifiSmeConnectionStatsGetReqSerFree },
- { CSR_WIFI_SME_DEACTIVATE_REQ, CsrWifiSmeDeactivateReqSizeof, CsrWifiSmeDeactivateReqSer, CsrWifiSmeDeactivateReqDes, CsrWifiSmeDeactivateReqSerFree },
- { CSR_WIFI_SME_DISCONNECT_REQ, CsrWifiSmeDisconnectReqSizeof, CsrWifiSmeDisconnectReqSer, CsrWifiSmeDisconnectReqDes, CsrWifiSmeDisconnectReqSerFree },
- { CSR_WIFI_SME_EVENT_MASK_SET_REQ, CsrWifiSmeEventMaskSetReqSizeof, CsrWifiSmeEventMaskSetReqSer, CsrWifiSmeEventMaskSetReqDes, CsrWifiSmeEventMaskSetReqSerFree },
- { CSR_WIFI_SME_HOST_CONFIG_GET_REQ, CsrWifiSmeHostConfigGetReqSizeof, CsrWifiSmeHostConfigGetReqSer, CsrWifiSmeHostConfigGetReqDes, CsrWifiSmeHostConfigGetReqSerFree },
- { CSR_WIFI_SME_HOST_CONFIG_SET_REQ, CsrWifiSmeHostConfigSetReqSizeof, CsrWifiSmeHostConfigSetReqSer, CsrWifiSmeHostConfigSetReqDes, CsrWifiSmeHostConfigSetReqSerFree },
- { CSR_WIFI_SME_KEY_REQ, CsrWifiSmeKeyReqSizeof, CsrWifiSmeKeyReqSer, CsrWifiSmeKeyReqDes, CsrWifiSmeKeyReqSerFree },
- { CSR_WIFI_SME_LINK_QUALITY_GET_REQ, CsrWifiSmeLinkQualityGetReqSizeof, CsrWifiSmeLinkQualityGetReqSer, CsrWifiSmeLinkQualityGetReqDes, CsrWifiSmeLinkQualityGetReqSerFree },
- { CSR_WIFI_SME_MIB_CONFIG_GET_REQ, CsrWifiSmeMibConfigGetReqSizeof, CsrWifiSmeMibConfigGetReqSer, CsrWifiSmeMibConfigGetReqDes, CsrWifiSmeMibConfigGetReqSerFree },
- { CSR_WIFI_SME_MIB_CONFIG_SET_REQ, CsrWifiSmeMibConfigSetReqSizeof, CsrWifiSmeMibConfigSetReqSer, CsrWifiSmeMibConfigSetReqDes, CsrWifiSmeMibConfigSetReqSerFree },
- { CSR_WIFI_SME_MIB_GET_NEXT_REQ, CsrWifiSmeMibGetNextReqSizeof, CsrWifiSmeMibGetNextReqSer, CsrWifiSmeMibGetNextReqDes, CsrWifiSmeMibGetNextReqSerFree },
- { CSR_WIFI_SME_MIB_GET_REQ, CsrWifiSmeMibGetReqSizeof, CsrWifiSmeMibGetReqSer, CsrWifiSmeMibGetReqDes, CsrWifiSmeMibGetReqSerFree },
- { CSR_WIFI_SME_MIB_SET_REQ, CsrWifiSmeMibSetReqSizeof, CsrWifiSmeMibSetReqSer, CsrWifiSmeMibSetReqDes, CsrWifiSmeMibSetReqSerFree },
- { CSR_WIFI_SME_MULTICAST_ADDRESS_REQ, CsrWifiSmeMulticastAddressReqSizeof, CsrWifiSmeMulticastAddressReqSer, CsrWifiSmeMulticastAddressReqDes, CsrWifiSmeMulticastAddressReqSerFree },
- { CSR_WIFI_SME_PACKET_FILTER_SET_REQ, CsrWifiSmePacketFilterSetReqSizeof, CsrWifiSmePacketFilterSetReqSer, CsrWifiSmePacketFilterSetReqDes, CsrWifiSmePacketFilterSetReqSerFree },
- { CSR_WIFI_SME_PERMANENT_MAC_ADDRESS_GET_REQ, CsrWifiSmePermanentMacAddressGetReqSizeof, CsrWifiSmePermanentMacAddressGetReqSer, CsrWifiSmePermanentMacAddressGetReqDes, CsrWifiSmePermanentMacAddressGetReqSerFree },
- { CSR_WIFI_SME_PMKID_REQ, CsrWifiSmePmkidReqSizeof, CsrWifiSmePmkidReqSer, CsrWifiSmePmkidReqDes, CsrWifiSmePmkidReqSerFree },
- { CSR_WIFI_SME_POWER_CONFIG_GET_REQ, CsrWifiSmePowerConfigGetReqSizeof, CsrWifiSmePowerConfigGetReqSer, CsrWifiSmePowerConfigGetReqDes, CsrWifiSmePowerConfigGetReqSerFree },
- { CSR_WIFI_SME_POWER_CONFIG_SET_REQ, CsrWifiSmePowerConfigSetReqSizeof, CsrWifiSmePowerConfigSetReqSer, CsrWifiSmePowerConfigSetReqDes, CsrWifiSmePowerConfigSetReqSerFree },
- { CSR_WIFI_SME_REGULATORY_DOMAIN_INFO_GET_REQ, CsrWifiSmeRegulatoryDomainInfoGetReqSizeof, CsrWifiSmeRegulatoryDomainInfoGetReqSer, CsrWifiSmeRegulatoryDomainInfoGetReqDes, CsrWifiSmeRegulatoryDomainInfoGetReqSerFree },
- { CSR_WIFI_SME_ROAMING_CONFIG_GET_REQ, CsrWifiSmeRoamingConfigGetReqSizeof, CsrWifiSmeRoamingConfigGetReqSer, CsrWifiSmeRoamingConfigGetReqDes, CsrWifiSmeRoamingConfigGetReqSerFree },
- { CSR_WIFI_SME_ROAMING_CONFIG_SET_REQ, CsrWifiSmeRoamingConfigSetReqSizeof, CsrWifiSmeRoamingConfigSetReqSer, CsrWifiSmeRoamingConfigSetReqDes, CsrWifiSmeRoamingConfigSetReqSerFree },
- { CSR_WIFI_SME_SCAN_CONFIG_GET_REQ, CsrWifiSmeScanConfigGetReqSizeof, CsrWifiSmeScanConfigGetReqSer, CsrWifiSmeScanConfigGetReqDes, CsrWifiSmeScanConfigGetReqSerFree },
- { CSR_WIFI_SME_SCAN_CONFIG_SET_REQ, CsrWifiSmeScanConfigSetReqSizeof, CsrWifiSmeScanConfigSetReqSer, CsrWifiSmeScanConfigSetReqDes, CsrWifiSmeScanConfigSetReqSerFree },
- { CSR_WIFI_SME_SCAN_FULL_REQ, CsrWifiSmeScanFullReqSizeof, CsrWifiSmeScanFullReqSer, CsrWifiSmeScanFullReqDes, CsrWifiSmeScanFullReqSerFree },
- { CSR_WIFI_SME_SCAN_RESULTS_FLUSH_REQ, CsrWifiSmeScanResultsFlushReqSizeof, CsrWifiSmeScanResultsFlushReqSer, CsrWifiSmeScanResultsFlushReqDes, CsrWifiSmeScanResultsFlushReqSerFree },
- { CSR_WIFI_SME_SCAN_RESULTS_GET_REQ, CsrWifiSmeScanResultsGetReqSizeof, CsrWifiSmeScanResultsGetReqSer, CsrWifiSmeScanResultsGetReqDes, CsrWifiSmeScanResultsGetReqSerFree },
- { CSR_WIFI_SME_SME_STA_CONFIG_GET_REQ, CsrWifiSmeSmeStaConfigGetReqSizeof, CsrWifiSmeSmeStaConfigGetReqSer, CsrWifiSmeSmeStaConfigGetReqDes, CsrWifiSmeSmeStaConfigGetReqSerFree },
- { CSR_WIFI_SME_SME_STA_CONFIG_SET_REQ, CsrWifiSmeSmeStaConfigSetReqSizeof, CsrWifiSmeSmeStaConfigSetReqSer, CsrWifiSmeSmeStaConfigSetReqDes, CsrWifiSmeSmeStaConfigSetReqSerFree },
- { CSR_WIFI_SME_STATION_MAC_ADDRESS_GET_REQ, CsrWifiSmeStationMacAddressGetReqSizeof, CsrWifiSmeStationMacAddressGetReqSer, CsrWifiSmeStationMacAddressGetReqDes, CsrWifiSmeStationMacAddressGetReqSerFree },
- { CSR_WIFI_SME_TSPEC_REQ, CsrWifiSmeTspecReqSizeof, CsrWifiSmeTspecReqSer, CsrWifiSmeTspecReqDes, CsrWifiSmeTspecReqSerFree },
- { CSR_WIFI_SME_VERSIONS_GET_REQ, CsrWifiSmeVersionsGetReqSizeof, CsrWifiSmeVersionsGetReqSer, CsrWifiSmeVersionsGetReqDes, CsrWifiSmeVersionsGetReqSerFree },
- { CSR_WIFI_SME_WIFI_FLIGHTMODE_REQ, CsrWifiSmeWifiFlightmodeReqSizeof, CsrWifiSmeWifiFlightmodeReqSer, CsrWifiSmeWifiFlightmodeReqDes, CsrWifiSmeWifiFlightmodeReqSerFree },
- { CSR_WIFI_SME_WIFI_OFF_REQ, CsrWifiSmeWifiOffReqSizeof, CsrWifiSmeWifiOffReqSer, CsrWifiSmeWifiOffReqDes, CsrWifiSmeWifiOffReqSerFree },
- { CSR_WIFI_SME_WIFI_ON_REQ, CsrWifiSmeWifiOnReqSizeof, CsrWifiSmeWifiOnReqSer, CsrWifiSmeWifiOnReqDes, CsrWifiSmeWifiOnReqSerFree },
- { CSR_WIFI_SME_CLOAKED_SSIDS_SET_REQ, CsrWifiSmeCloakedSsidsSetReqSizeof, CsrWifiSmeCloakedSsidsSetReqSer, CsrWifiSmeCloakedSsidsSetReqDes, CsrWifiSmeCloakedSsidsSetReqSerFree },
- { CSR_WIFI_SME_CLOAKED_SSIDS_GET_REQ, CsrWifiSmeCloakedSsidsGetReqSizeof, CsrWifiSmeCloakedSsidsGetReqSer, CsrWifiSmeCloakedSsidsGetReqDes, CsrWifiSmeCloakedSsidsGetReqSerFree },
- { CSR_WIFI_SME_SME_COMMON_CONFIG_GET_REQ, CsrWifiSmeSmeCommonConfigGetReqSizeof, CsrWifiSmeSmeCommonConfigGetReqSer, CsrWifiSmeSmeCommonConfigGetReqDes, CsrWifiSmeSmeCommonConfigGetReqSerFree },
- { CSR_WIFI_SME_SME_COMMON_CONFIG_SET_REQ, CsrWifiSmeSmeCommonConfigSetReqSizeof, CsrWifiSmeSmeCommonConfigSetReqSer, CsrWifiSmeSmeCommonConfigSetReqDes, CsrWifiSmeSmeCommonConfigSetReqSerFree },
- { CSR_WIFI_SME_INTERFACE_CAPABILITY_GET_REQ, CsrWifiSmeInterfaceCapabilityGetReqSizeof, CsrWifiSmeInterfaceCapabilityGetReqSer, CsrWifiSmeInterfaceCapabilityGetReqDes, CsrWifiSmeInterfaceCapabilityGetReqSerFree },
- { CSR_WIFI_SME_WPS_CONFIGURATION_REQ, CsrWifiSmeWpsConfigurationReqSizeof, CsrWifiSmeWpsConfigurationReqSer, CsrWifiSmeWpsConfigurationReqDes, CsrWifiSmeWpsConfigurationReqSerFree },
- { CSR_WIFI_SME_SET_REQ, CsrWifiSmeSetReqSizeof, CsrWifiSmeSetReqSer, CsrWifiSmeSetReqDes, CsrWifiSmeSetReqSerFree },
- { CSR_WIFI_SME_ACTIVATE_CFM, CsrWifiSmeActivateCfmSizeof, CsrWifiSmeActivateCfmSer, CsrWifiSmeActivateCfmDes, CsrWifiSmeActivateCfmSerFree },
- { CSR_WIFI_SME_ADHOC_CONFIG_GET_CFM, CsrWifiSmeAdhocConfigGetCfmSizeof, CsrWifiSmeAdhocConfigGetCfmSer, CsrWifiSmeAdhocConfigGetCfmDes, CsrWifiSmeAdhocConfigGetCfmSerFree },
- { CSR_WIFI_SME_ADHOC_CONFIG_SET_CFM, CsrWifiSmeAdhocConfigSetCfmSizeof, CsrWifiSmeAdhocConfigSetCfmSer, CsrWifiSmeAdhocConfigSetCfmDes, CsrWifiSmeAdhocConfigSetCfmSerFree },
- { CSR_WIFI_SME_ASSOCIATION_COMPLETE_IND, CsrWifiSmeAssociationCompleteIndSizeof, CsrWifiSmeAssociationCompleteIndSer, CsrWifiSmeAssociationCompleteIndDes, CsrWifiSmeAssociationCompleteIndSerFree },
- { CSR_WIFI_SME_ASSOCIATION_START_IND, CsrWifiSmeAssociationStartIndSizeof, CsrWifiSmeAssociationStartIndSer, CsrWifiSmeAssociationStartIndDes, CsrWifiSmeAssociationStartIndSerFree },
- { CSR_WIFI_SME_BLACKLIST_CFM, CsrWifiSmeBlacklistCfmSizeof, CsrWifiSmeBlacklistCfmSer, CsrWifiSmeBlacklistCfmDes, CsrWifiSmeBlacklistCfmSerFree },
- { CSR_WIFI_SME_CALIBRATION_DATA_GET_CFM, CsrWifiSmeCalibrationDataGetCfmSizeof, CsrWifiSmeCalibrationDataGetCfmSer, CsrWifiSmeCalibrationDataGetCfmDes, CsrWifiSmeCalibrationDataGetCfmSerFree },
- { CSR_WIFI_SME_CALIBRATION_DATA_SET_CFM, CsrWifiSmeCalibrationDataSetCfmSizeof, CsrWifiSmeCalibrationDataSetCfmSer, CsrWifiSmeCalibrationDataSetCfmDes, CsrWifiSmeCalibrationDataSetCfmSerFree },
- { CSR_WIFI_SME_CCX_CONFIG_GET_CFM, CsrWifiSmeCcxConfigGetCfmSizeof, CsrWifiSmeCcxConfigGetCfmSer, CsrWifiSmeCcxConfigGetCfmDes, CsrWifiSmeCcxConfigGetCfmSerFree },
- { CSR_WIFI_SME_CCX_CONFIG_SET_CFM, CsrWifiSmeCcxConfigSetCfmSizeof, CsrWifiSmeCcxConfigSetCfmSer, CsrWifiSmeCcxConfigSetCfmDes, CsrWifiSmeCcxConfigSetCfmSerFree },
- { CSR_WIFI_SME_COEX_CONFIG_GET_CFM, CsrWifiSmeCoexConfigGetCfmSizeof, CsrWifiSmeCoexConfigGetCfmSer, CsrWifiSmeCoexConfigGetCfmDes, CsrWifiSmeCoexConfigGetCfmSerFree },
- { CSR_WIFI_SME_COEX_CONFIG_SET_CFM, CsrWifiSmeCoexConfigSetCfmSizeof, CsrWifiSmeCoexConfigSetCfmSer, CsrWifiSmeCoexConfigSetCfmDes, CsrWifiSmeCoexConfigSetCfmSerFree },
- { CSR_WIFI_SME_COEX_INFO_GET_CFM, CsrWifiSmeCoexInfoGetCfmSizeof, CsrWifiSmeCoexInfoGetCfmSer, CsrWifiSmeCoexInfoGetCfmDes, CsrWifiSmeCoexInfoGetCfmSerFree },
- { CSR_WIFI_SME_CONNECT_CFM, CsrWifiSmeConnectCfmSizeof, CsrWifiSmeConnectCfmSer, CsrWifiSmeConnectCfmDes, CsrWifiSmeConnectCfmSerFree },
- { CSR_WIFI_SME_CONNECTION_CONFIG_GET_CFM, CsrWifiSmeConnectionConfigGetCfmSizeof, CsrWifiSmeConnectionConfigGetCfmSer, CsrWifiSmeConnectionConfigGetCfmDes, CsrWifiSmeConnectionConfigGetCfmSerFree },
- { CSR_WIFI_SME_CONNECTION_INFO_GET_CFM, CsrWifiSmeConnectionInfoGetCfmSizeof, CsrWifiSmeConnectionInfoGetCfmSer, CsrWifiSmeConnectionInfoGetCfmDes, CsrWifiSmeConnectionInfoGetCfmSerFree },
- { CSR_WIFI_SME_CONNECTION_QUALITY_IND, CsrWifiSmeConnectionQualityIndSizeof, CsrWifiSmeConnectionQualityIndSer, CsrWifiSmeConnectionQualityIndDes, CsrWifiSmeConnectionQualityIndSerFree },
- { CSR_WIFI_SME_CONNECTION_STATS_GET_CFM, CsrWifiSmeConnectionStatsGetCfmSizeof, CsrWifiSmeConnectionStatsGetCfmSer, CsrWifiSmeConnectionStatsGetCfmDes, CsrWifiSmeConnectionStatsGetCfmSerFree },
- { CSR_WIFI_SME_DEACTIVATE_CFM, CsrWifiSmeDeactivateCfmSizeof, CsrWifiSmeDeactivateCfmSer, CsrWifiSmeDeactivateCfmDes, CsrWifiSmeDeactivateCfmSerFree },
- { CSR_WIFI_SME_DISCONNECT_CFM, CsrWifiSmeDisconnectCfmSizeof, CsrWifiSmeDisconnectCfmSer, CsrWifiSmeDisconnectCfmDes, CsrWifiSmeDisconnectCfmSerFree },
- { CSR_WIFI_SME_EVENT_MASK_SET_CFM, CsrWifiSmeEventMaskSetCfmSizeof, CsrWifiSmeEventMaskSetCfmSer, CsrWifiSmeEventMaskSetCfmDes, CsrWifiSmeEventMaskSetCfmSerFree },
- { CSR_WIFI_SME_HOST_CONFIG_GET_CFM, CsrWifiSmeHostConfigGetCfmSizeof, CsrWifiSmeHostConfigGetCfmSer, CsrWifiSmeHostConfigGetCfmDes, CsrWifiSmeHostConfigGetCfmSerFree },
- { CSR_WIFI_SME_HOST_CONFIG_SET_CFM, CsrWifiSmeHostConfigSetCfmSizeof, CsrWifiSmeHostConfigSetCfmSer, CsrWifiSmeHostConfigSetCfmDes, CsrWifiSmeHostConfigSetCfmSerFree },
- { CSR_WIFI_SME_IBSS_STATION_IND, CsrWifiSmeIbssStationIndSizeof, CsrWifiSmeIbssStationIndSer, CsrWifiSmeIbssStationIndDes, CsrWifiSmeIbssStationIndSerFree },
- { CSR_WIFI_SME_KEY_CFM, CsrWifiSmeKeyCfmSizeof, CsrWifiSmeKeyCfmSer, CsrWifiSmeKeyCfmDes, CsrWifiSmeKeyCfmSerFree },
- { CSR_WIFI_SME_LINK_QUALITY_GET_CFM, CsrWifiSmeLinkQualityGetCfmSizeof, CsrWifiSmeLinkQualityGetCfmSer, CsrWifiSmeLinkQualityGetCfmDes, CsrWifiSmeLinkQualityGetCfmSerFree },
- { CSR_WIFI_SME_MEDIA_STATUS_IND, CsrWifiSmeMediaStatusIndSizeof, CsrWifiSmeMediaStatusIndSer, CsrWifiSmeMediaStatusIndDes, CsrWifiSmeMediaStatusIndSerFree },
- { CSR_WIFI_SME_MIB_CONFIG_GET_CFM, CsrWifiSmeMibConfigGetCfmSizeof, CsrWifiSmeMibConfigGetCfmSer, CsrWifiSmeMibConfigGetCfmDes, CsrWifiSmeMibConfigGetCfmSerFree },
- { CSR_WIFI_SME_MIB_CONFIG_SET_CFM, CsrWifiSmeMibConfigSetCfmSizeof, CsrWifiSmeMibConfigSetCfmSer, CsrWifiSmeMibConfigSetCfmDes, CsrWifiSmeMibConfigSetCfmSerFree },
- { CSR_WIFI_SME_MIB_GET_CFM, CsrWifiSmeMibGetCfmSizeof, CsrWifiSmeMibGetCfmSer, CsrWifiSmeMibGetCfmDes, CsrWifiSmeMibGetCfmSerFree },
- { CSR_WIFI_SME_MIB_GET_NEXT_CFM, CsrWifiSmeMibGetNextCfmSizeof, CsrWifiSmeMibGetNextCfmSer, CsrWifiSmeMibGetNextCfmDes, CsrWifiSmeMibGetNextCfmSerFree },
- { CSR_WIFI_SME_MIB_SET_CFM, CsrWifiSmeMibSetCfmSizeof, CsrWifiSmeMibSetCfmSer, CsrWifiSmeMibSetCfmDes, CsrWifiSmeMibSetCfmSerFree },
- { CSR_WIFI_SME_MIC_FAILURE_IND, CsrWifiSmeMicFailureIndSizeof, CsrWifiSmeMicFailureIndSer, CsrWifiSmeMicFailureIndDes, CsrWifiSmeMicFailureIndSerFree },
- { CSR_WIFI_SME_MULTICAST_ADDRESS_CFM, CsrWifiSmeMulticastAddressCfmSizeof, CsrWifiSmeMulticastAddressCfmSer, CsrWifiSmeMulticastAddressCfmDes, CsrWifiSmeMulticastAddressCfmSerFree },
- { CSR_WIFI_SME_PACKET_FILTER_SET_CFM, CsrWifiSmePacketFilterSetCfmSizeof, CsrWifiSmePacketFilterSetCfmSer, CsrWifiSmePacketFilterSetCfmDes, CsrWifiSmePacketFilterSetCfmSerFree },
- { CSR_WIFI_SME_PERMANENT_MAC_ADDRESS_GET_CFM, CsrWifiSmePermanentMacAddressGetCfmSizeof, CsrWifiSmePermanentMacAddressGetCfmSer, CsrWifiSmePermanentMacAddressGetCfmDes, CsrWifiSmePermanentMacAddressGetCfmSerFree },
- { CSR_WIFI_SME_PMKID_CANDIDATE_LIST_IND, CsrWifiSmePmkidCandidateListIndSizeof, CsrWifiSmePmkidCandidateListIndSer, CsrWifiSmePmkidCandidateListIndDes, CsrWifiSmePmkidCandidateListIndSerFree },
- { CSR_WIFI_SME_PMKID_CFM, CsrWifiSmePmkidCfmSizeof, CsrWifiSmePmkidCfmSer, CsrWifiSmePmkidCfmDes, CsrWifiSmePmkidCfmSerFree },
- { CSR_WIFI_SME_POWER_CONFIG_GET_CFM, CsrWifiSmePowerConfigGetCfmSizeof, CsrWifiSmePowerConfigGetCfmSer, CsrWifiSmePowerConfigGetCfmDes, CsrWifiSmePowerConfigGetCfmSerFree },
- { CSR_WIFI_SME_POWER_CONFIG_SET_CFM, CsrWifiSmePowerConfigSetCfmSizeof, CsrWifiSmePowerConfigSetCfmSer, CsrWifiSmePowerConfigSetCfmDes, CsrWifiSmePowerConfigSetCfmSerFree },
- { CSR_WIFI_SME_REGULATORY_DOMAIN_INFO_GET_CFM, CsrWifiSmeRegulatoryDomainInfoGetCfmSizeof, CsrWifiSmeRegulatoryDomainInfoGetCfmSer, CsrWifiSmeRegulatoryDomainInfoGetCfmDes, CsrWifiSmeRegulatoryDomainInfoGetCfmSerFree },
- { CSR_WIFI_SME_ROAM_COMPLETE_IND, CsrWifiSmeRoamCompleteIndSizeof, CsrWifiSmeRoamCompleteIndSer, CsrWifiSmeRoamCompleteIndDes, CsrWifiSmeRoamCompleteIndSerFree },
- { CSR_WIFI_SME_ROAM_START_IND, CsrWifiSmeRoamStartIndSizeof, CsrWifiSmeRoamStartIndSer, CsrWifiSmeRoamStartIndDes, CsrWifiSmeRoamStartIndSerFree },
- { CSR_WIFI_SME_ROAMING_CONFIG_GET_CFM, CsrWifiSmeRoamingConfigGetCfmSizeof, CsrWifiSmeRoamingConfigGetCfmSer, CsrWifiSmeRoamingConfigGetCfmDes, CsrWifiSmeRoamingConfigGetCfmSerFree },
- { CSR_WIFI_SME_ROAMING_CONFIG_SET_CFM, CsrWifiSmeRoamingConfigSetCfmSizeof, CsrWifiSmeRoamingConfigSetCfmSer, CsrWifiSmeRoamingConfigSetCfmDes, CsrWifiSmeRoamingConfigSetCfmSerFree },
- { CSR_WIFI_SME_SCAN_CONFIG_GET_CFM, CsrWifiSmeScanConfigGetCfmSizeof, CsrWifiSmeScanConfigGetCfmSer, CsrWifiSmeScanConfigGetCfmDes, CsrWifiSmeScanConfigGetCfmSerFree },
- { CSR_WIFI_SME_SCAN_CONFIG_SET_CFM, CsrWifiSmeScanConfigSetCfmSizeof, CsrWifiSmeScanConfigSetCfmSer, CsrWifiSmeScanConfigSetCfmDes, CsrWifiSmeScanConfigSetCfmSerFree },
- { CSR_WIFI_SME_SCAN_FULL_CFM, CsrWifiSmeScanFullCfmSizeof, CsrWifiSmeScanFullCfmSer, CsrWifiSmeScanFullCfmDes, CsrWifiSmeScanFullCfmSerFree },
- { CSR_WIFI_SME_SCAN_RESULT_IND, CsrWifiSmeScanResultIndSizeof, CsrWifiSmeScanResultIndSer, CsrWifiSmeScanResultIndDes, CsrWifiSmeScanResultIndSerFree },
- { CSR_WIFI_SME_SCAN_RESULTS_FLUSH_CFM, CsrWifiSmeScanResultsFlushCfmSizeof, CsrWifiSmeScanResultsFlushCfmSer, CsrWifiSmeScanResultsFlushCfmDes, CsrWifiSmeScanResultsFlushCfmSerFree },
- { CSR_WIFI_SME_SCAN_RESULTS_GET_CFM, CsrWifiSmeScanResultsGetCfmSizeof, CsrWifiSmeScanResultsGetCfmSer, CsrWifiSmeScanResultsGetCfmDes, CsrWifiSmeScanResultsGetCfmSerFree },
- { CSR_WIFI_SME_SME_STA_CONFIG_GET_CFM, CsrWifiSmeSmeStaConfigGetCfmSizeof, CsrWifiSmeSmeStaConfigGetCfmSer, CsrWifiSmeSmeStaConfigGetCfmDes, CsrWifiSmeSmeStaConfigGetCfmSerFree },
- { CSR_WIFI_SME_SME_STA_CONFIG_SET_CFM, CsrWifiSmeSmeStaConfigSetCfmSizeof, CsrWifiSmeSmeStaConfigSetCfmSer, CsrWifiSmeSmeStaConfigSetCfmDes, CsrWifiSmeSmeStaConfigSetCfmSerFree },
- { CSR_WIFI_SME_STATION_MAC_ADDRESS_GET_CFM, CsrWifiSmeStationMacAddressGetCfmSizeof, CsrWifiSmeStationMacAddressGetCfmSer, CsrWifiSmeStationMacAddressGetCfmDes, CsrWifiSmeStationMacAddressGetCfmSerFree },
- { CSR_WIFI_SME_TSPEC_IND, CsrWifiSmeTspecIndSizeof, CsrWifiSmeTspecIndSer, CsrWifiSmeTspecIndDes, CsrWifiSmeTspecIndSerFree },
- { CSR_WIFI_SME_TSPEC_CFM, CsrWifiSmeTspecCfmSizeof, CsrWifiSmeTspecCfmSer, CsrWifiSmeTspecCfmDes, CsrWifiSmeTspecCfmSerFree },
- { CSR_WIFI_SME_VERSIONS_GET_CFM, CsrWifiSmeVersionsGetCfmSizeof, CsrWifiSmeVersionsGetCfmSer, CsrWifiSmeVersionsGetCfmDes, CsrWifiSmeVersionsGetCfmSerFree },
- { CSR_WIFI_SME_WIFI_FLIGHTMODE_CFM, CsrWifiSmeWifiFlightmodeCfmSizeof, CsrWifiSmeWifiFlightmodeCfmSer, CsrWifiSmeWifiFlightmodeCfmDes, CsrWifiSmeWifiFlightmodeCfmSerFree },
- { CSR_WIFI_SME_WIFI_OFF_IND, CsrWifiSmeWifiOffIndSizeof, CsrWifiSmeWifiOffIndSer, CsrWifiSmeWifiOffIndDes, CsrWifiSmeWifiOffIndSerFree },
- { CSR_WIFI_SME_WIFI_OFF_CFM, CsrWifiSmeWifiOffCfmSizeof, CsrWifiSmeWifiOffCfmSer, CsrWifiSmeWifiOffCfmDes, CsrWifiSmeWifiOffCfmSerFree },
- { CSR_WIFI_SME_WIFI_ON_CFM, CsrWifiSmeWifiOnCfmSizeof, CsrWifiSmeWifiOnCfmSer, CsrWifiSmeWifiOnCfmDes, CsrWifiSmeWifiOnCfmSerFree },
- { CSR_WIFI_SME_CLOAKED_SSIDS_SET_CFM, CsrWifiSmeCloakedSsidsSetCfmSizeof, CsrWifiSmeCloakedSsidsSetCfmSer, CsrWifiSmeCloakedSsidsSetCfmDes, CsrWifiSmeCloakedSsidsSetCfmSerFree },
- { CSR_WIFI_SME_CLOAKED_SSIDS_GET_CFM, CsrWifiSmeCloakedSsidsGetCfmSizeof, CsrWifiSmeCloakedSsidsGetCfmSer, CsrWifiSmeCloakedSsidsGetCfmDes, CsrWifiSmeCloakedSsidsGetCfmSerFree },
- { CSR_WIFI_SME_WIFI_ON_IND, CsrWifiSmeWifiOnIndSizeof, CsrWifiSmeWifiOnIndSer, CsrWifiSmeWifiOnIndDes, CsrWifiSmeWifiOnIndSerFree },
- { CSR_WIFI_SME_SME_COMMON_CONFIG_GET_CFM, CsrWifiSmeSmeCommonConfigGetCfmSizeof, CsrWifiSmeSmeCommonConfigGetCfmSer, CsrWifiSmeSmeCommonConfigGetCfmDes, CsrWifiSmeSmeCommonConfigGetCfmSerFree },
- { CSR_WIFI_SME_SME_COMMON_CONFIG_SET_CFM, CsrWifiSmeSmeCommonConfigSetCfmSizeof, CsrWifiSmeSmeCommonConfigSetCfmSer, CsrWifiSmeSmeCommonConfigSetCfmDes, CsrWifiSmeSmeCommonConfigSetCfmSerFree },
- { CSR_WIFI_SME_INTERFACE_CAPABILITY_GET_CFM, CsrWifiSmeInterfaceCapabilityGetCfmSizeof, CsrWifiSmeInterfaceCapabilityGetCfmSer, CsrWifiSmeInterfaceCapabilityGetCfmDes, CsrWifiSmeInterfaceCapabilityGetCfmSerFree },
- { CSR_WIFI_SME_ERROR_IND, CsrWifiSmeErrorIndSizeof, CsrWifiSmeErrorIndSer, CsrWifiSmeErrorIndDes, CsrWifiSmeErrorIndSerFree },
- { CSR_WIFI_SME_INFO_IND, CsrWifiSmeInfoIndSizeof, CsrWifiSmeInfoIndSer, CsrWifiSmeInfoIndDes, CsrWifiSmeInfoIndSerFree },
- { CSR_WIFI_SME_CORE_DUMP_IND, CsrWifiSmeCoreDumpIndSizeof, CsrWifiSmeCoreDumpIndSer, CsrWifiSmeCoreDumpIndDes, CsrWifiSmeCoreDumpIndSerFree },
- { CSR_WIFI_SME_AMP_STATUS_CHANGE_IND, CsrWifiSmeAmpStatusChangeIndSizeof, CsrWifiSmeAmpStatusChangeIndSer, CsrWifiSmeAmpStatusChangeIndDes, CsrWifiSmeAmpStatusChangeIndSerFree },
- { CSR_WIFI_SME_WPS_CONFIGURATION_CFM, CsrWifiSmeWpsConfigurationCfmSizeof, CsrWifiSmeWpsConfigurationCfmSer, CsrWifiSmeWpsConfigurationCfmDes, CsrWifiSmeWpsConfigurationCfmSerFree },
-
- { 0, NULL, NULL, NULL, NULL },
-};
-
-CsrMsgConvMsgEntry* CsrWifiSmeConverterLookup(CsrMsgConvMsgEntry *ce, u16 msgType)
-{
- if (msgType & CSR_PRIM_UPSTREAM)
- {
- u16 idx = (msgType & ~CSR_PRIM_UPSTREAM) + CSR_WIFI_SME_PRIM_DOWNSTREAM_COUNT;
- if (idx < (CSR_WIFI_SME_PRIM_UPSTREAM_COUNT + CSR_WIFI_SME_PRIM_DOWNSTREAM_COUNT) &&
- csrwifisme_conv_lut[idx].msgType == msgType)
- {
- return &csrwifisme_conv_lut[idx];
- }
- }
- else
- {
- if (msgType < CSR_WIFI_SME_PRIM_DOWNSTREAM_COUNT &&
- csrwifisme_conv_lut[msgType].msgType == msgType)
- {
- return &csrwifisme_conv_lut[msgType];
- }
- }
- return NULL;
-}
-
-
-void CsrWifiSmeConverterInit(void)
-{
- CsrMsgConvInsert(CSR_WIFI_SME_PRIM, csrwifisme_conv_lut);
- CsrMsgConvCustomLookupRegister(CSR_WIFI_SME_PRIM, CsrWifiSmeConverterLookup);
-}
-
-
-#ifdef CSR_LOG_ENABLE
-static const CsrLogPrimitiveInformation csrwifisme_conv_info = {
- CSR_WIFI_SME_PRIM,
- (char *)"CSR_WIFI_SME_PRIM",
- csrwifisme_conv_lut
-};
-const CsrLogPrimitiveInformation* CsrWifiSmeTechInfoGet(void)
-{
- return &csrwifisme_conv_info;
-}
-
-
-#endif /* CSR_LOG_ENABLE */
-#endif /* EXCLUDE_CSR_WIFI_SME_MODULE */
diff --git a/drivers/staging/csr/csr_wifi_sme_converter_init.h b/drivers/staging/csr/csr_wifi_sme_converter_init.h
deleted file mode 100644
index ba5e4b4..0000000
--- a/drivers/staging/csr/csr_wifi_sme_converter_init.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_SME_CONVERTER_INIT_H__
-#define CSR_WIFI_SME_CONVERTER_INIT_H__
-
-#ifndef EXCLUDE_CSR_WIFI_SME_MODULE
-
-#include "csr_msgconv.h"
-
-#ifdef CSR_LOG_ENABLE
-#include "csr_log.h"
-
-extern const CsrLogPrimitiveInformation* CsrWifiSmeTechInfoGet(void);
-#endif /* CSR_LOG_ENABLE */
-
-extern void CsrWifiSmeConverterInit(void);
-
-#else /* EXCLUDE_CSR_WIFI_SME_MODULE */
-
-#define CsrWifiSmeConverterInit()
-
-#endif /* EXCLUDE_CSR_WIFI_SME_MODULE */
-
-#endif /* CSR_WIFI_SME_CONVERTER_INIT_H__ */
diff --git a/drivers/staging/csr/csr_wifi_sme_free_downstream_contents.c b/drivers/staging/csr/csr_wifi_sme_free_downstream_contents.c
deleted file mode 100644
index 03b5ddb..0000000
--- a/drivers/staging/csr/csr_wifi_sme_free_downstream_contents.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/slab.h>
-#include "csr_wifi_sme_prim.h"
-#include "csr_wifi_sme_lib.h"
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrWifiSmeFreeDownstreamMessageContents
- *
- * DESCRIPTION
- *
- *
- * PARAMETERS
- * eventClass: only the value CSR_WIFI_SME_PRIM will be handled
- * message: the message to free
- *----------------------------------------------------------------------------*/
-void CsrWifiSmeFreeDownstreamMessageContents(u16 eventClass, void *message)
-{
- if (eventClass != CSR_WIFI_SME_PRIM)
- {
- return;
- }
- if (NULL == message)
- {
- return;
- }
-
- switch (*((CsrWifiSmePrim *) message))
- {
- case CSR_WIFI_SME_BLACKLIST_REQ:
- {
- CsrWifiSmeBlacklistReq *p = (CsrWifiSmeBlacklistReq *)message;
- kfree(p->setAddresses);
- p->setAddresses = NULL;
- break;
- }
- case CSR_WIFI_SME_CALIBRATION_DATA_SET_REQ:
- {
- CsrWifiSmeCalibrationDataSetReq *p = (CsrWifiSmeCalibrationDataSetReq *)message;
- kfree(p->calibrationData);
- p->calibrationData = NULL;
- break;
- }
- case CSR_WIFI_SME_CONNECT_REQ:
- {
- CsrWifiSmeConnectReq *p = (CsrWifiSmeConnectReq *)message;
- kfree(p->connectionConfig.mlmeAssociateReqInformationElements);
- p->connectionConfig.mlmeAssociateReqInformationElements = NULL;
- break;
- }
- case CSR_WIFI_SME_MIB_GET_NEXT_REQ:
- {
- CsrWifiSmeMibGetNextReq *p = (CsrWifiSmeMibGetNextReq *)message;
- kfree(p->mibAttribute);
- p->mibAttribute = NULL;
- break;
- }
- case CSR_WIFI_SME_MIB_GET_REQ:
- {
- CsrWifiSmeMibGetReq *p = (CsrWifiSmeMibGetReq *)message;
- kfree(p->mibAttribute);
- p->mibAttribute = NULL;
- break;
- }
- case CSR_WIFI_SME_MIB_SET_REQ:
- {
- CsrWifiSmeMibSetReq *p = (CsrWifiSmeMibSetReq *)message;
- kfree(p->mibAttribute);
- p->mibAttribute = NULL;
- break;
- }
- case CSR_WIFI_SME_MULTICAST_ADDRESS_REQ:
- {
- CsrWifiSmeMulticastAddressReq *p = (CsrWifiSmeMulticastAddressReq *)message;
- kfree(p->setAddresses);
- p->setAddresses = NULL;
- break;
- }
- case CSR_WIFI_SME_PACKET_FILTER_SET_REQ:
- {
- CsrWifiSmePacketFilterSetReq *p = (CsrWifiSmePacketFilterSetReq *)message;
- kfree(p->filter);
- p->filter = NULL;
- break;
- }
- case CSR_WIFI_SME_PMKID_REQ:
- {
- CsrWifiSmePmkidReq *p = (CsrWifiSmePmkidReq *)message;
- kfree(p->setPmkids);
- p->setPmkids = NULL;
- break;
- }
- case CSR_WIFI_SME_SCAN_CONFIG_SET_REQ:
- {
- CsrWifiSmeScanConfigSetReq *p = (CsrWifiSmeScanConfigSetReq *)message;
- kfree(p->scanConfig.passiveChannelList);
- p->scanConfig.passiveChannelList = NULL;
- break;
- }
- case CSR_WIFI_SME_SCAN_FULL_REQ:
- {
- CsrWifiSmeScanFullReq *p = (CsrWifiSmeScanFullReq *)message;
- kfree(p->ssid);
- p->ssid = NULL;
- kfree(p->channelList);
- p->channelList = NULL;
- kfree(p->probeIe);
- p->probeIe = NULL;
- break;
- }
- case CSR_WIFI_SME_TSPEC_REQ:
- {
- CsrWifiSmeTspecReq *p = (CsrWifiSmeTspecReq *)message;
- kfree(p->tspec);
- p->tspec = NULL;
- kfree(p->tclas);
- p->tclas = NULL;
- break;
- }
- case CSR_WIFI_SME_WIFI_FLIGHTMODE_REQ:
- {
- CsrWifiSmeWifiFlightmodeReq *p = (CsrWifiSmeWifiFlightmodeReq *)message;
- {
- u16 i1;
- for (i1 = 0; i1 < p->mibFilesCount; i1++)
- {
- kfree(p->mibFiles[i1].data);
- p->mibFiles[i1].data = NULL;
- }
- }
- kfree(p->mibFiles);
- p->mibFiles = NULL;
- break;
- }
- case CSR_WIFI_SME_WIFI_ON_REQ:
- {
- CsrWifiSmeWifiOnReq *p = (CsrWifiSmeWifiOnReq *)message;
- {
- u16 i1;
- for (i1 = 0; i1 < p->mibFilesCount; i1++)
- {
- kfree(p->mibFiles[i1].data);
- p->mibFiles[i1].data = NULL;
- }
- }
- kfree(p->mibFiles);
- p->mibFiles = NULL;
- break;
- }
- case CSR_WIFI_SME_CLOAKED_SSIDS_SET_REQ:
- {
- CsrWifiSmeCloakedSsidsSetReq *p = (CsrWifiSmeCloakedSsidsSetReq *)message;
- kfree(p->cloakedSsids.cloakedSsids);
- p->cloakedSsids.cloakedSsids = NULL;
- break;
- }
- case CSR_WIFI_SME_WPS_CONFIGURATION_REQ:
- {
- CsrWifiSmeWpsConfigurationReq *p = (CsrWifiSmeWpsConfigurationReq *)message;
- kfree(p->wpsConfig.secondaryDeviceType);
- p->wpsConfig.secondaryDeviceType = NULL;
- break;
- }
- case CSR_WIFI_SME_SET_REQ:
- {
- CsrWifiSmeSetReq *p = (CsrWifiSmeSetReq *)message;
- kfree(p->data);
- p->data = NULL;
- break;
- }
-
- default:
- break;
- }
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_sme_free_upstream_contents.c b/drivers/staging/csr/csr_wifi_sme_free_upstream_contents.c
deleted file mode 100644
index c04767b..0000000
--- a/drivers/staging/csr/csr_wifi_sme_free_upstream_contents.c
+++ /dev/null
@@ -1,275 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/slab.h>
-#include "csr_wifi_sme_prim.h"
-#include "csr_wifi_sme_lib.h"
-
-/*----------------------------------------------------------------------------*
- * NAME
- * CsrWifiSmeFreeUpstreamMessageContents
- *
- * DESCRIPTION
- *
- *
- * PARAMETERS
- * eventClass: only the value CSR_WIFI_SME_PRIM will be handled
- * message: the message to free
- *----------------------------------------------------------------------------*/
-void CsrWifiSmeFreeUpstreamMessageContents(u16 eventClass, void *message)
-{
- if (eventClass != CSR_WIFI_SME_PRIM)
- {
- return;
- }
- if (NULL == message)
- {
- return;
- }
-
- switch (*((CsrWifiSmePrim *) message))
- {
- case CSR_WIFI_SME_ASSOCIATION_COMPLETE_IND:
- {
- CsrWifiSmeAssociationCompleteInd *p = (CsrWifiSmeAssociationCompleteInd *)message;
- kfree(p->connectionInfo.beaconFrame);
- p->connectionInfo.beaconFrame = NULL;
- kfree(p->connectionInfo.associationReqFrame);
- p->connectionInfo.associationReqFrame = NULL;
- kfree(p->connectionInfo.associationRspFrame);
- p->connectionInfo.associationRspFrame = NULL;
- kfree(p->connectionInfo.assocScanInfoElements);
- p->connectionInfo.assocScanInfoElements = NULL;
- kfree(p->connectionInfo.assocReqInfoElements);
- p->connectionInfo.assocReqInfoElements = NULL;
- kfree(p->connectionInfo.assocRspInfoElements);
- p->connectionInfo.assocRspInfoElements = NULL;
- break;
- }
- case CSR_WIFI_SME_BLACKLIST_CFM:
- {
- CsrWifiSmeBlacklistCfm *p = (CsrWifiSmeBlacklistCfm *)message;
- kfree(p->getAddresses);
- p->getAddresses = NULL;
- break;
- }
- case CSR_WIFI_SME_CALIBRATION_DATA_GET_CFM:
- {
- CsrWifiSmeCalibrationDataGetCfm *p = (CsrWifiSmeCalibrationDataGetCfm *)message;
- kfree(p->calibrationData);
- p->calibrationData = NULL;
- break;
- }
- case CSR_WIFI_SME_CONNECTION_CONFIG_GET_CFM:
- {
- CsrWifiSmeConnectionConfigGetCfm *p = (CsrWifiSmeConnectionConfigGetCfm *)message;
- kfree(p->connectionConfig.mlmeAssociateReqInformationElements);
- p->connectionConfig.mlmeAssociateReqInformationElements = NULL;
- break;
- }
- case CSR_WIFI_SME_CONNECTION_INFO_GET_CFM:
- {
- CsrWifiSmeConnectionInfoGetCfm *p = (CsrWifiSmeConnectionInfoGetCfm *)message;
- kfree(p->connectionInfo.beaconFrame);
- p->connectionInfo.beaconFrame = NULL;
- kfree(p->connectionInfo.associationReqFrame);
- p->connectionInfo.associationReqFrame = NULL;
- kfree(p->connectionInfo.associationRspFrame);
- p->connectionInfo.associationRspFrame = NULL;
- kfree(p->connectionInfo.assocScanInfoElements);
- p->connectionInfo.assocScanInfoElements = NULL;
- kfree(p->connectionInfo.assocReqInfoElements);
- p->connectionInfo.assocReqInfoElements = NULL;
- kfree(p->connectionInfo.assocRspInfoElements);
- p->connectionInfo.assocRspInfoElements = NULL;
- break;
- }
- case CSR_WIFI_SME_MEDIA_STATUS_IND:
- {
- CsrWifiSmeMediaStatusInd *p = (CsrWifiSmeMediaStatusInd *)message;
- kfree(p->connectionInfo.beaconFrame);
- p->connectionInfo.beaconFrame = NULL;
- kfree(p->connectionInfo.associationReqFrame);
- p->connectionInfo.associationReqFrame = NULL;
- kfree(p->connectionInfo.associationRspFrame);
- p->connectionInfo.associationRspFrame = NULL;
- kfree(p->connectionInfo.assocScanInfoElements);
- p->connectionInfo.assocScanInfoElements = NULL;
- kfree(p->connectionInfo.assocReqInfoElements);
- p->connectionInfo.assocReqInfoElements = NULL;
- kfree(p->connectionInfo.assocRspInfoElements);
- p->connectionInfo.assocRspInfoElements = NULL;
- break;
- }
- case CSR_WIFI_SME_MIB_GET_CFM:
- {
- CsrWifiSmeMibGetCfm *p = (CsrWifiSmeMibGetCfm *)message;
- kfree(p->mibAttribute);
- p->mibAttribute = NULL;
- break;
- }
- case CSR_WIFI_SME_MIB_GET_NEXT_CFM:
- {
- CsrWifiSmeMibGetNextCfm *p = (CsrWifiSmeMibGetNextCfm *)message;
- kfree(p->mibAttribute);
- p->mibAttribute = NULL;
- break;
- }
- case CSR_WIFI_SME_MULTICAST_ADDRESS_CFM:
- {
- CsrWifiSmeMulticastAddressCfm *p = (CsrWifiSmeMulticastAddressCfm *)message;
- kfree(p->getAddresses);
- p->getAddresses = NULL;
- break;
- }
- case CSR_WIFI_SME_PMKID_CANDIDATE_LIST_IND:
- {
- CsrWifiSmePmkidCandidateListInd *p = (CsrWifiSmePmkidCandidateListInd *)message;
- kfree(p->pmkidCandidates);
- p->pmkidCandidates = NULL;
- break;
- }
- case CSR_WIFI_SME_PMKID_CFM:
- {
- CsrWifiSmePmkidCfm *p = (CsrWifiSmePmkidCfm *)message;
- kfree(p->getPmkids);
- p->getPmkids = NULL;
- break;
- }
- case CSR_WIFI_SME_SCAN_CONFIG_GET_CFM:
- {
- CsrWifiSmeScanConfigGetCfm *p = (CsrWifiSmeScanConfigGetCfm *)message;
- kfree(p->scanConfig.passiveChannelList);
- p->scanConfig.passiveChannelList = NULL;
- break;
- }
- case CSR_WIFI_SME_SCAN_RESULT_IND:
- {
- CsrWifiSmeScanResultInd *p = (CsrWifiSmeScanResultInd *)message;
- kfree(p->result.informationElements);
- p->result.informationElements = NULL;
- switch (p->result.p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_GO:
- {
- u16 i4;
- for (i4 = 0; i4 < p->result.deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- kfree(p->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType);
- p->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = NULL;
- }
- }
- kfree(p->result.deviceInfo.groupInfo.p2PClientInfo);
- p->result.deviceInfo.groupInfo.p2PClientInfo = NULL;
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- kfree(p->result.deviceInfo.standalonedevInfo.secDeviceType);
- p->result.deviceInfo.standalonedevInfo.secDeviceType = NULL;
- break;
- default:
- break;
- }
- break;
- }
- case CSR_WIFI_SME_SCAN_RESULTS_GET_CFM:
- {
- CsrWifiSmeScanResultsGetCfm *p = (CsrWifiSmeScanResultsGetCfm *)message;
- {
- u16 i1;
- for (i1 = 0; i1 < p->scanResultsCount; i1++)
- {
- kfree(p->scanResults[i1].informationElements);
- p->scanResults[i1].informationElements = NULL;
- switch (p->scanResults[i1].p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_GO:
- {
- u16 i4;
- for (i4 = 0; i4 < p->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- kfree(p->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType);
- p->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = NULL;
- }
- }
- kfree(p->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo);
- p->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo = NULL;
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- kfree(p->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType);
- p->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType = NULL;
- break;
- default:
- break;
- }
- }
- }
- kfree(p->scanResults);
- p->scanResults = NULL;
- break;
- }
- case CSR_WIFI_SME_TSPEC_IND:
- {
- CsrWifiSmeTspecInd *p = (CsrWifiSmeTspecInd *)message;
- kfree(p->tspec);
- p->tspec = NULL;
- break;
- }
- case CSR_WIFI_SME_TSPEC_CFM:
- {
- CsrWifiSmeTspecCfm *p = (CsrWifiSmeTspecCfm *)message;
- kfree(p->tspec);
- p->tspec = NULL;
- break;
- }
- case CSR_WIFI_SME_VERSIONS_GET_CFM:
- {
- CsrWifiSmeVersionsGetCfm *p = (CsrWifiSmeVersionsGetCfm *)message;
- kfree(p->versions.routerBuild);
- p->versions.routerBuild = NULL;
- kfree(p->versions.smeBuild);
- p->versions.smeBuild = NULL;
- break;
- }
- case CSR_WIFI_SME_CLOAKED_SSIDS_GET_CFM:
- {
- CsrWifiSmeCloakedSsidsGetCfm *p = (CsrWifiSmeCloakedSsidsGetCfm *)message;
- kfree(p->cloakedSsids.cloakedSsids);
- p->cloakedSsids.cloakedSsids = NULL;
- break;
- }
- case CSR_WIFI_SME_ERROR_IND:
- {
- CsrWifiSmeErrorInd *p = (CsrWifiSmeErrorInd *)message;
- kfree(p->errorMessage);
- p->errorMessage = NULL;
- break;
- }
- case CSR_WIFI_SME_INFO_IND:
- {
- CsrWifiSmeInfoInd *p = (CsrWifiSmeInfoInd *)message;
- kfree(p->infoMessage);
- p->infoMessage = NULL;
- break;
- }
- case CSR_WIFI_SME_CORE_DUMP_IND:
- {
- CsrWifiSmeCoreDumpInd *p = (CsrWifiSmeCoreDumpInd *)message;
- kfree(p->data);
- p->data = NULL;
- break;
- }
-
- default:
- break;
- }
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_sme_lib.h b/drivers/staging/csr/csr_wifi_sme_lib.h
deleted file mode 100644
index 53cf126..0000000
--- a/drivers/staging/csr/csr_wifi_sme_lib.h
+++ /dev/null
@@ -1,4303 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_SME_LIB_H__
-#define CSR_WIFI_SME_LIB_H__
-
-#include "csr_sched.h"
-#include "csr_macro.h"
-#include "csr_msg_transport.h"
-
-#include "csr_wifi_lib.h"
-
-#include "csr_wifi_sme_prim.h"
-#include "csr_wifi_sme_task.h"
-
-
-#ifndef CSR_WIFI_SME_LIB_DESTINATION_QUEUE
-# ifdef CSR_WIFI_NME_ENABLE
-# include "csr_wifi_nme_task.h"
-# define CSR_WIFI_SME_LIB_DESTINATION_QUEUE CSR_WIFI_NME_IFACEQUEUE
-# else
-# define CSR_WIFI_SME_LIB_DESTINATION_QUEUE CSR_WIFI_SME_IFACEQUEUE
-# endif
-#endif
-
-/*----------------------------------------------------------------------------*
- * CsrWifiSmeFreeUpstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_SME upstream message. Does not
- * free the message itself, and can only be used for upstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_SME upstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiSmeFreeUpstreamMessageContents(u16 eventClass, void *message);
-
-/*----------------------------------------------------------------------------*
- * CsrWifiSmeFreeDownstreamMessageContents
- *
- * DESCRIPTION
- * Free the allocated memory in a CSR_WIFI_SME downstream message. Does not
- * free the message itself, and can only be used for downstream messages.
- *
- * PARAMETERS
- * Deallocates the resources in a CSR_WIFI_SME downstream message
- *----------------------------------------------------------------------------*/
-void CsrWifiSmeFreeDownstreamMessageContents(u16 eventClass, void *message);
-
-/*----------------------------------------------------------------------------*
- * Enum to string functions
- *----------------------------------------------------------------------------*/
-const char* CsrWifiSme80211NetworkTypeToString(CsrWifiSme80211NetworkType value);
-const char* CsrWifiSme80211PrivacyModeToString(CsrWifiSme80211PrivacyMode value);
-const char* CsrWifiSme80211dTrustLevelToString(CsrWifiSme80211dTrustLevel value);
-const char* CsrWifiSmeAmpStatusToString(CsrWifiSmeAmpStatus value);
-const char* CsrWifiSmeAuthModeToString(CsrWifiSmeAuthMode value);
-const char* CsrWifiSmeBasicUsabilityToString(CsrWifiSmeBasicUsability value);
-const char* CsrWifiSmeBssTypeToString(CsrWifiSmeBssType value);
-const char* CsrWifiSmeCoexSchemeToString(CsrWifiSmeCoexScheme value);
-const char* CsrWifiSmeControlIndicationToString(CsrWifiSmeControlIndication value);
-const char* CsrWifiSmeCtsProtectionTypeToString(CsrWifiSmeCtsProtectionType value);
-const char* CsrWifiSmeD3AutoScanModeToString(CsrWifiSmeD3AutoScanMode value);
-const char* CsrWifiSmeEncryptionToString(CsrWifiSmeEncryption value);
-const char* CsrWifiSmeFirmwareDriverInterfaceToString(CsrWifiSmeFirmwareDriverInterface value);
-const char* CsrWifiSmeHostPowerModeToString(CsrWifiSmeHostPowerMode value);
-const char* CsrWifiSmeIEEE80211ReasonToString(CsrWifiSmeIEEE80211Reason value);
-const char* CsrWifiSmeIEEE80211ResultToString(CsrWifiSmeIEEE80211Result value);
-const char* CsrWifiSmeIndicationsToString(CsrWifiSmeIndications value);
-const char* CsrWifiSmeKeyTypeToString(CsrWifiSmeKeyType value);
-const char* CsrWifiSmeListActionToString(CsrWifiSmeListAction value);
-const char* CsrWifiSmeMediaStatusToString(CsrWifiSmeMediaStatus value);
-const char* CsrWifiSmeP2pCapabilityToString(CsrWifiSmeP2pCapability value);
-const char* CsrWifiSmeP2pGroupCapabilityToString(CsrWifiSmeP2pGroupCapability value);
-const char* CsrWifiSmeP2pNoaConfigMethodToString(CsrWifiSmeP2pNoaConfigMethod value);
-const char* CsrWifiSmeP2pRoleToString(CsrWifiSmeP2pRole value);
-const char* CsrWifiSmeP2pStatusToString(CsrWifiSmeP2pStatus value);
-const char* CsrWifiSmePacketFilterModeToString(CsrWifiSmePacketFilterMode value);
-const char* CsrWifiSmePowerSaveLevelToString(CsrWifiSmePowerSaveLevel value);
-const char* CsrWifiSmePreambleTypeToString(CsrWifiSmePreambleType value);
-const char* CsrWifiSmeRadioIFToString(CsrWifiSmeRadioIF value);
-const char* CsrWifiSmeRegulatoryDomainToString(CsrWifiSmeRegulatoryDomain value);
-const char* CsrWifiSmeRoamReasonToString(CsrWifiSmeRoamReason value);
-const char* CsrWifiSmeScanTypeToString(CsrWifiSmeScanType value);
-const char* CsrWifiSmeTrafficTypeToString(CsrWifiSmeTrafficType value);
-const char* CsrWifiSmeTspecCtrlToString(CsrWifiSmeTspecCtrl value);
-const char* CsrWifiSmeTspecResultCodeToString(CsrWifiSmeTspecResultCode value);
-const char* CsrWifiSmeWepAuthModeToString(CsrWifiSmeWepAuthMode value);
-const char* CsrWifiSmeWepCredentialTypeToString(CsrWifiSmeWepCredentialType value);
-const char* CsrWifiSmeWmmModeToString(CsrWifiSmeWmmMode value);
-const char* CsrWifiSmeWmmQosInfoToString(CsrWifiSmeWmmQosInfo value);
-const char* CsrWifiSmeWpsConfigTypeToString(CsrWifiSmeWpsConfigType value);
-const char* CsrWifiSmeWpsDeviceCategoryToString(CsrWifiSmeWpsDeviceCategory value);
-const char* CsrWifiSmeWpsDeviceSubCategoryToString(CsrWifiSmeWpsDeviceSubCategory value);
-const char* CsrWifiSmeWpsDpidToString(CsrWifiSmeWpsDpid value);
-const char* CsrWifiSmeWpsRegistrationToString(CsrWifiSmeWpsRegistration value);
-
-
-/*----------------------------------------------------------------------------*
- * CsrPrim Type toString function.
- * Converts a message type to the String name of the Message
- *----------------------------------------------------------------------------*/
-const char* CsrWifiSmePrimTypeToString(CsrPrim msgType);
-
-/*----------------------------------------------------------------------------*
- * Lookup arrays for PrimType name Strings
- *----------------------------------------------------------------------------*/
-extern const char *CsrWifiSmeUpstreamPrimNames[CSR_WIFI_SME_PRIM_UPSTREAM_COUNT];
-extern const char *CsrWifiSmeDownstreamPrimNames[CSR_WIFI_SME_PRIM_DOWNSTREAM_COUNT];
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeActivateReqSend
-
- DESCRIPTION
- The WMA sends this primitive to activate the SME.
- The WMA must activate the SME before it can send any other primitive.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeActivateReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeActivateReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ACTIVATE_REQ, dst__, src__);
-
-#define CsrWifiSmeActivateReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeActivateReq *msg__; \
- CsrWifiSmeActivateReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeActivateReqSend(src__) \
- CsrWifiSmeActivateReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeActivateCfmSend
-
- DESCRIPTION
- The SME sends this primitive when the activation is complete.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeActivateCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeActivateCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ACTIVATE_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeActivateCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeActivateCfm *msg__; \
- CsrWifiSmeActivateCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeActivateCfmSend(dst__, status__) \
- CsrWifiSmeActivateCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAdhocConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the adHocConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeAdhocConfigGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeAdhocConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ADHOC_CONFIG_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeAdhocConfigGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeAdhocConfigGetReq *msg__; \
- CsrWifiSmeAdhocConfigGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeAdhocConfigGetReqSend(src__) \
- CsrWifiSmeAdhocConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAdhocConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- adHocConfig - Contains the values used when starting an Ad-hoc (IBSS)
- connection.
-
-*******************************************************************************/
-#define CsrWifiSmeAdhocConfigGetCfmCreate(msg__, dst__, src__, status__, adHocConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeAdhocConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ADHOC_CONFIG_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->adHocConfig = (adHocConfig__);
-
-#define CsrWifiSmeAdhocConfigGetCfmSendTo(dst__, src__, status__, adHocConfig__) \
- { \
- CsrWifiSmeAdhocConfigGetCfm *msg__; \
- CsrWifiSmeAdhocConfigGetCfmCreate(msg__, dst__, src__, status__, adHocConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeAdhocConfigGetCfmSend(dst__, status__, adHocConfig__) \
- CsrWifiSmeAdhocConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, adHocConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAdhocConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the adHocConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- adHocConfig - Sets the values to use when starting an ad hoc network.
-
-*******************************************************************************/
-#define CsrWifiSmeAdhocConfigSetReqCreate(msg__, dst__, src__, adHocConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeAdhocConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ADHOC_CONFIG_SET_REQ, dst__, src__); \
- msg__->adHocConfig = (adHocConfig__);
-
-#define CsrWifiSmeAdhocConfigSetReqSendTo(dst__, src__, adHocConfig__) \
- { \
- CsrWifiSmeAdhocConfigSetReq *msg__; \
- CsrWifiSmeAdhocConfigSetReqCreate(msg__, dst__, src__, adHocConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeAdhocConfigSetReqSend(src__, adHocConfig__) \
- CsrWifiSmeAdhocConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, adHocConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAdhocConfigSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeAdhocConfigSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeAdhocConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ADHOC_CONFIG_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeAdhocConfigSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeAdhocConfigSetCfm *msg__; \
- CsrWifiSmeAdhocConfigSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeAdhocConfigSetCfmSend(dst__, status__) \
- CsrWifiSmeAdhocConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAmpStatusChangeIndSend
-
- DESCRIPTION
- Indication of change to AMP activity.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface on which the AMP activity changed.
- ampStatus - The new status of AMP activity.Range: {AMP_ACTIVE,
- AMP_INACTIVE}.
-
-*******************************************************************************/
-#define CsrWifiSmeAmpStatusChangeIndCreate(msg__, dst__, src__, interfaceTag__, ampStatus__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeAmpStatusChangeInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_AMP_STATUS_CHANGE_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->ampStatus = (ampStatus__);
-
-#define CsrWifiSmeAmpStatusChangeIndSendTo(dst__, src__, interfaceTag__, ampStatus__) \
- { \
- CsrWifiSmeAmpStatusChangeInd *msg__; \
- CsrWifiSmeAmpStatusChangeIndCreate(msg__, dst__, src__, interfaceTag__, ampStatus__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeAmpStatusChangeIndSend(dst__, interfaceTag__, ampStatus__) \
- CsrWifiSmeAmpStatusChangeIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, ampStatus__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAssociationCompleteIndSend
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it whenever it completes an attempt to associate with an AP. If
- the association was successful, status will be set to
- CSR_WIFI_SME_STATUS_SUCCESS, otherwise status and deauthReason shall be
- set to appropriate error codes.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the association procedure
- connectionInfo - This parameter is relevant only if result is
- CSR_WIFI_SME_STATUS_SUCCESS:
- it points to the connection information for the new network
- deauthReason - This parameter is relevant only if result is not
- CSR_WIFI_SME_STATUS_SUCCESS:
- if the AP deauthorised the station, it gives the reason of
- the deauthorization
-
-*******************************************************************************/
-#define CsrWifiSmeAssociationCompleteIndCreate(msg__, dst__, src__, interfaceTag__, status__, connectionInfo__, deauthReason__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeAssociationCompleteInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ASSOCIATION_COMPLETE_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->connectionInfo = (connectionInfo__); \
- msg__->deauthReason = (deauthReason__);
-
-#define CsrWifiSmeAssociationCompleteIndSendTo(dst__, src__, interfaceTag__, status__, connectionInfo__, deauthReason__) \
- { \
- CsrWifiSmeAssociationCompleteInd *msg__; \
- CsrWifiSmeAssociationCompleteIndCreate(msg__, dst__, src__, interfaceTag__, status__, connectionInfo__, deauthReason__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeAssociationCompleteIndSend(dst__, interfaceTag__, status__, connectionInfo__, deauthReason__) \
- CsrWifiSmeAssociationCompleteIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, connectionInfo__, deauthReason__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAssociationStartIndSend
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it whenever it begins an attempt to associate with an AP.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- address - BSSID of the associating network
- ssid - Service Set identifier of the associating network
-
-*******************************************************************************/
-#define CsrWifiSmeAssociationStartIndCreate(msg__, dst__, src__, interfaceTag__, address__, ssid__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeAssociationStartInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ASSOCIATION_START_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->address = (address__); \
- msg__->ssid = (ssid__);
-
-#define CsrWifiSmeAssociationStartIndSendTo(dst__, src__, interfaceTag__, address__, ssid__) \
- { \
- CsrWifiSmeAssociationStartInd *msg__; \
- CsrWifiSmeAssociationStartIndCreate(msg__, dst__, src__, interfaceTag__, address__, ssid__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeAssociationStartIndSend(dst__, interfaceTag__, address__, ssid__) \
- CsrWifiSmeAssociationStartIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, address__, ssid__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeBlacklistReqSend
-
- DESCRIPTION
- The wireless manager application should call this primitive to notify the
- driver of any networks that should not be connected to. The interface
- allows the wireless manager application to query, add, remove, and flush
- the BSSIDs that the driver may not connect or roam to.
- When this primitive adds to the black list the BSSID to which the SME is
- currently connected, the SME will try to roam, if applicable, to another
- BSSID in the same ESS; if the roaming procedure fails, the SME will
- disconnect.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - The value of the CsrWifiSmeListAction parameter instructs
- the driver to modify or provide the list of blacklisted
- networks.
- setAddressCount - Number of BSSIDs sent with this primitive
- setAddresses - Pointer to the list of BBSIDs sent with the primitive, set
- to NULL if none is sent.
-
-*******************************************************************************/
-#define CsrWifiSmeBlacklistReqCreate(msg__, dst__, src__, interfaceTag__, action__, setAddressCount__, setAddresses__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeBlacklistReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_BLACKLIST_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->action = (action__); \
- msg__->setAddressCount = (setAddressCount__); \
- msg__->setAddresses = (setAddresses__);
-
-#define CsrWifiSmeBlacklistReqSendTo(dst__, src__, interfaceTag__, action__, setAddressCount__, setAddresses__) \
- { \
- CsrWifiSmeBlacklistReq *msg__; \
- CsrWifiSmeBlacklistReqCreate(msg__, dst__, src__, interfaceTag__, action__, setAddressCount__, setAddresses__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeBlacklistReqSend(src__, interfaceTag__, action__, setAddressCount__, setAddresses__) \
- CsrWifiSmeBlacklistReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, action__, setAddressCount__, setAddresses__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeBlacklistCfmSend
-
- DESCRIPTION
- The SME will call this primitive when the action on the blacklist has
- completed. For a GET action, this primitive also reports the list of
- BBSIDs in the blacklist.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- action - Action in the request
- getAddressCount - This parameter is only relevant if action is
- CSR_WIFI_SME_LIST_ACTION_GET:
- number of BSSIDs sent with this primitive
- getAddresses - Pointer to the list of BBSIDs sent with the primitive, set
- to NULL if none is sent.
-
-*******************************************************************************/
-#define CsrWifiSmeBlacklistCfmCreate(msg__, dst__, src__, interfaceTag__, status__, action__, getAddressCount__, getAddresses__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeBlacklistCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_BLACKLIST_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->action = (action__); \
- msg__->getAddressCount = (getAddressCount__); \
- msg__->getAddresses = (getAddresses__);
-
-#define CsrWifiSmeBlacklistCfmSendTo(dst__, src__, interfaceTag__, status__, action__, getAddressCount__, getAddresses__) \
- { \
- CsrWifiSmeBlacklistCfm *msg__; \
- CsrWifiSmeBlacklistCfmCreate(msg__, dst__, src__, interfaceTag__, status__, action__, getAddressCount__, getAddresses__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeBlacklistCfmSend(dst__, interfaceTag__, status__, action__, getAddressCount__, getAddresses__) \
- CsrWifiSmeBlacklistCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, action__, getAddressCount__, getAddresses__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCalibrationDataGetReqSend
-
- DESCRIPTION
- This primitive retrieves the Wi-Fi radio calibration data.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeCalibrationDataGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCalibrationDataGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CALIBRATION_DATA_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeCalibrationDataGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeCalibrationDataGetReq *msg__; \
- CsrWifiSmeCalibrationDataGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCalibrationDataGetReqSend(src__) \
- CsrWifiSmeCalibrationDataGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCalibrationDataGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- calibrationDataLength - Number of bytes in the buffer pointed by
- calibrationData
- calibrationData - Pointer to a buffer of length calibrationDataLength
- containing the calibration data
-
-*******************************************************************************/
-#define CsrWifiSmeCalibrationDataGetCfmCreate(msg__, dst__, src__, status__, calibrationDataLength__, calibrationData__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCalibrationDataGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CALIBRATION_DATA_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->calibrationDataLength = (calibrationDataLength__); \
- msg__->calibrationData = (calibrationData__);
-
-#define CsrWifiSmeCalibrationDataGetCfmSendTo(dst__, src__, status__, calibrationDataLength__, calibrationData__) \
- { \
- CsrWifiSmeCalibrationDataGetCfm *msg__; \
- CsrWifiSmeCalibrationDataGetCfmCreate(msg__, dst__, src__, status__, calibrationDataLength__, calibrationData__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCalibrationDataGetCfmSend(dst__, status__, calibrationDataLength__, calibrationData__) \
- CsrWifiSmeCalibrationDataGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, calibrationDataLength__, calibrationData__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCalibrationDataSetReqSend
-
- DESCRIPTION
- This primitive sets the Wi-Fi radio calibration data.
- The usage of the primitive with proper calibration data will avoid
- time-consuming configuration after power-up.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- calibrationDataLength - Number of bytes in the buffer pointed by
- calibrationData
- calibrationData - Pointer to a buffer of length calibrationDataLength
- containing the calibration data
-
-*******************************************************************************/
-#define CsrWifiSmeCalibrationDataSetReqCreate(msg__, dst__, src__, calibrationDataLength__, calibrationData__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCalibrationDataSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CALIBRATION_DATA_SET_REQ, dst__, src__); \
- msg__->calibrationDataLength = (calibrationDataLength__); \
- msg__->calibrationData = (calibrationData__);
-
-#define CsrWifiSmeCalibrationDataSetReqSendTo(dst__, src__, calibrationDataLength__, calibrationData__) \
- { \
- CsrWifiSmeCalibrationDataSetReq *msg__; \
- CsrWifiSmeCalibrationDataSetReqCreate(msg__, dst__, src__, calibrationDataLength__, calibrationData__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCalibrationDataSetReqSend(src__, calibrationDataLength__, calibrationData__) \
- CsrWifiSmeCalibrationDataSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, calibrationDataLength__, calibrationData__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCalibrationDataSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeCalibrationDataSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCalibrationDataSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CALIBRATION_DATA_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeCalibrationDataSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeCalibrationDataSetCfm *msg__; \
- CsrWifiSmeCalibrationDataSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCalibrationDataSetCfmSend(dst__, status__) \
- CsrWifiSmeCalibrationDataSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCcxConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the CcxConfig parameter.
- CURRENTLY NOT SUPPORTED.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiSmeCcxConfigGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCcxConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CCX_CONFIG_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeCcxConfigGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeCcxConfigGetReq *msg__; \
- CsrWifiSmeCcxConfigGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCcxConfigGetReqSend(src__, interfaceTag__) \
- CsrWifiSmeCcxConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCcxConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- ccxConfig - Currently not supported
-
-*******************************************************************************/
-#define CsrWifiSmeCcxConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, ccxConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCcxConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CCX_CONFIG_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->ccxConfig = (ccxConfig__);
-
-#define CsrWifiSmeCcxConfigGetCfmSendTo(dst__, src__, interfaceTag__, status__, ccxConfig__) \
- { \
- CsrWifiSmeCcxConfigGetCfm *msg__; \
- CsrWifiSmeCcxConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, ccxConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCcxConfigGetCfmSend(dst__, interfaceTag__, status__, ccxConfig__) \
- CsrWifiSmeCcxConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, ccxConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCcxConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the CcxConfig parameter.
- CURRENTLY NOT SUPPORTED.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- ccxConfig - Currently not supported
-
-*******************************************************************************/
-#define CsrWifiSmeCcxConfigSetReqCreate(msg__, dst__, src__, interfaceTag__, ccxConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCcxConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CCX_CONFIG_SET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->ccxConfig = (ccxConfig__);
-
-#define CsrWifiSmeCcxConfigSetReqSendTo(dst__, src__, interfaceTag__, ccxConfig__) \
- { \
- CsrWifiSmeCcxConfigSetReq *msg__; \
- CsrWifiSmeCcxConfigSetReqCreate(msg__, dst__, src__, interfaceTag__, ccxConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCcxConfigSetReqSend(src__, interfaceTag__, ccxConfig__) \
- CsrWifiSmeCcxConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, ccxConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCcxConfigSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeCcxConfigSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCcxConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CCX_CONFIG_SET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeCcxConfigSetCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeCcxConfigSetCfm *msg__; \
- CsrWifiSmeCcxConfigSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCcxConfigSetCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeCcxConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCloakedSsidsGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the CloakedSsids parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeCloakedSsidsGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCloakedSsidsGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CLOAKED_SSIDS_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeCloakedSsidsGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeCloakedSsidsGetReq *msg__; \
- CsrWifiSmeCloakedSsidsGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCloakedSsidsGetReqSend(src__) \
- CsrWifiSmeCloakedSsidsGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCloakedSsidsGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- cloakedSsids - Reports list of cloaked SSIDs that are explicitly scanned for
- by the driver
-
-*******************************************************************************/
-#define CsrWifiSmeCloakedSsidsGetCfmCreate(msg__, dst__, src__, status__, cloakedSsids__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCloakedSsidsGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CLOAKED_SSIDS_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->cloakedSsids = (cloakedSsids__);
-
-#define CsrWifiSmeCloakedSsidsGetCfmSendTo(dst__, src__, status__, cloakedSsids__) \
- { \
- CsrWifiSmeCloakedSsidsGetCfm *msg__; \
- CsrWifiSmeCloakedSsidsGetCfmCreate(msg__, dst__, src__, status__, cloakedSsids__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCloakedSsidsGetCfmSend(dst__, status__, cloakedSsids__) \
- CsrWifiSmeCloakedSsidsGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, cloakedSsids__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCloakedSsidsSetReqSend
-
- DESCRIPTION
- This primitive sets the list of cloaked SSIDs for which the WMA possesses
- profiles.
- When the driver detects a cloaked AP, the SME will explicitly scan for it
- using the list of cloaked SSIDs provided it, and, if the scan succeeds,
- it will report the AP to the WMA either via CSR_WIFI_SME_SCAN_RESULT_IND
- (if registered) or via CSR_WIFI_SCAN_RESULT_GET_CFM.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- cloakedSsids - Sets the list of cloaked SSIDs
-
-*******************************************************************************/
-#define CsrWifiSmeCloakedSsidsSetReqCreate(msg__, dst__, src__, cloakedSsids__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCloakedSsidsSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CLOAKED_SSIDS_SET_REQ, dst__, src__); \
- msg__->cloakedSsids = (cloakedSsids__);
-
-#define CsrWifiSmeCloakedSsidsSetReqSendTo(dst__, src__, cloakedSsids__) \
- { \
- CsrWifiSmeCloakedSsidsSetReq *msg__; \
- CsrWifiSmeCloakedSsidsSetReqCreate(msg__, dst__, src__, cloakedSsids__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCloakedSsidsSetReqSend(src__, cloakedSsids__) \
- CsrWifiSmeCloakedSsidsSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, cloakedSsids__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCloakedSsidsSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeCloakedSsidsSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCloakedSsidsSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CLOAKED_SSIDS_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeCloakedSsidsSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeCloakedSsidsSetCfm *msg__; \
- CsrWifiSmeCloakedSsidsSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCloakedSsidsSetCfmSend(dst__, status__) \
- CsrWifiSmeCloakedSsidsSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the CoexConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeCoexConfigGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCoexConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_COEX_CONFIG_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeCoexConfigGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeCoexConfigGetReq *msg__; \
- CsrWifiSmeCoexConfigGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCoexConfigGetReqSend(src__) \
- CsrWifiSmeCoexConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- coexConfig - Reports the parameters used to configure the coexistence
- behaviour
-
-*******************************************************************************/
-#define CsrWifiSmeCoexConfigGetCfmCreate(msg__, dst__, src__, status__, coexConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCoexConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_COEX_CONFIG_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->coexConfig = (coexConfig__);
-
-#define CsrWifiSmeCoexConfigGetCfmSendTo(dst__, src__, status__, coexConfig__) \
- { \
- CsrWifiSmeCoexConfigGetCfm *msg__; \
- CsrWifiSmeCoexConfigGetCfmCreate(msg__, dst__, src__, status__, coexConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCoexConfigGetCfmSend(dst__, status__, coexConfig__) \
- CsrWifiSmeCoexConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, coexConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the CoexConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- coexConfig - Configures the coexistence behaviour
-
-*******************************************************************************/
-#define CsrWifiSmeCoexConfigSetReqCreate(msg__, dst__, src__, coexConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCoexConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_COEX_CONFIG_SET_REQ, dst__, src__); \
- msg__->coexConfig = (coexConfig__);
-
-#define CsrWifiSmeCoexConfigSetReqSendTo(dst__, src__, coexConfig__) \
- { \
- CsrWifiSmeCoexConfigSetReq *msg__; \
- CsrWifiSmeCoexConfigSetReqCreate(msg__, dst__, src__, coexConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCoexConfigSetReqSend(src__, coexConfig__) \
- CsrWifiSmeCoexConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, coexConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexConfigSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeCoexConfigSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCoexConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_COEX_CONFIG_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeCoexConfigSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeCoexConfigSetCfm *msg__; \
- CsrWifiSmeCoexConfigSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCoexConfigSetCfmSend(dst__, status__) \
- CsrWifiSmeCoexConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexInfoGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the CoexInfo parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeCoexInfoGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCoexInfoGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_COEX_INFO_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeCoexInfoGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeCoexInfoGetReq *msg__; \
- CsrWifiSmeCoexInfoGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCoexInfoGetReqSend(src__) \
- CsrWifiSmeCoexInfoGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexInfoGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- coexInfo - Reports information and state related to coexistence.
-
-*******************************************************************************/
-#define CsrWifiSmeCoexInfoGetCfmCreate(msg__, dst__, src__, status__, coexInfo__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCoexInfoGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_COEX_INFO_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->coexInfo = (coexInfo__);
-
-#define CsrWifiSmeCoexInfoGetCfmSendTo(dst__, src__, status__, coexInfo__) \
- { \
- CsrWifiSmeCoexInfoGetCfm *msg__; \
- CsrWifiSmeCoexInfoGetCfmCreate(msg__, dst__, src__, status__, coexInfo__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCoexInfoGetCfmSend(dst__, status__, coexInfo__) \
- CsrWifiSmeCoexInfoGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, coexInfo__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectReqSend
-
- DESCRIPTION
- The wireless manager application calls this primitive to start the
- process of joining an 802.11 wireless network or to start an ad hoc
- network.
- The structure pointed by connectionConfig contains parameters describing
- the network to join or, in case of an ad hoc network, to host or join.
- The SME will select a network, perform the IEEE 802.11 Join, Authenticate
- and Associate exchanges.
- The SME selects the networks from the current scan list that match both
- the SSID and BSSID, however either or both of these may be the wildcard
- value. Using this rule, the following operations are possible:
- * To connect to a network by name, specify the SSID and set the BSSID to
- 0xFF 0xFF 0xFF 0xFF 0xFF 0xFF. If there are two or more networks visible,
- the SME will select the one with the strongest signal.
- * To connect to a specific network, specify the BSSID. The SSID is
- optional, but if given it must match the SSID of the network. An empty
- SSID may be specified by setting the SSID length to zero. Please note
- that if the BSSID is specified (i.e. not equal to 0xFF 0xFF 0xFF 0xFF
- 0xFF 0xFF), the SME will not attempt to roam if signal conditions become
- poor, even if there is an alternative AP with an SSID that matches the
- current network SSID.
- * To connect to any network matching the other parameters (i.e. security,
- etc), set the SSID length to zero and set the BSSID to 0xFF 0xFF 0xFF
- 0xFF 0xFF 0xFF. In this case, the SME will order all available networks
- by their signal strengths and will iterate through this list until it
- successfully connects.
- NOTE: Specifying the BSSID will restrict the selection to one specific
- network. If SSID and BSSID are given, they must both match the network
- for it to be selected. To select a network based on the SSID only, the
- wireless manager application must set the BSSID to 0xFF 0xFF 0xFF 0xFF
- 0xFF 0xFF.
- The SME will try to connect to each network that matches the provided
- parameters, one by one, until it succeeds or has tried unsuccessfully
- with all the matching networks.
- If there is no network that matches the parameters and the request allows
- to host an ad hoc network, the SME will advertise a new ad hoc network
- instead.
- If the SME cannot connect, it will notify the failure in the confirm.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- connectionConfig - Describes the candidate network to join or to host.
-
-*******************************************************************************/
-#define CsrWifiSmeConnectReqCreate(msg__, dst__, src__, interfaceTag__, connectionConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeConnectReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CONNECT_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->connectionConfig = (connectionConfig__);
-
-#define CsrWifiSmeConnectReqSendTo(dst__, src__, interfaceTag__, connectionConfig__) \
- { \
- CsrWifiSmeConnectReq *msg__; \
- CsrWifiSmeConnectReqCreate(msg__, dst__, src__, interfaceTag__, connectionConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeConnectReqSend(src__, interfaceTag__, connectionConfig__) \
- CsrWifiSmeConnectReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, connectionConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectCfmSend
-
- DESCRIPTION
- The SME calls this primitive when the connection exchange is complete or
- all connection attempts fail.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request.
- CSR_WIFI_SME_STATUS_NOT_FOUND: all attempts by the SME to
- locate the requested AP failed
-
-*******************************************************************************/
-#define CsrWifiSmeConnectCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeConnectCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CONNECT_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeConnectCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeConnectCfm *msg__; \
- CsrWifiSmeConnectCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeConnectCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeConnectCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the ConnectionConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiSmeConnectionConfigGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeConnectionConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CONNECTION_CONFIG_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeConnectionConfigGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeConnectionConfigGetReq *msg__; \
- CsrWifiSmeConnectionConfigGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeConnectionConfigGetReqSend(src__, interfaceTag__) \
- CsrWifiSmeConnectionConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- connectionConfig - Parameters used by the SME for selecting a network
-
-*******************************************************************************/
-#define CsrWifiSmeConnectionConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectionConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeConnectionConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CONNECTION_CONFIG_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->connectionConfig = (connectionConfig__);
-
-#define CsrWifiSmeConnectionConfigGetCfmSendTo(dst__, src__, interfaceTag__, status__, connectionConfig__) \
- { \
- CsrWifiSmeConnectionConfigGetCfm *msg__; \
- CsrWifiSmeConnectionConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectionConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeConnectionConfigGetCfmSend(dst__, interfaceTag__, status__, connectionConfig__) \
- CsrWifiSmeConnectionConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, connectionConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionInfoGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the ConnectionInfo parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiSmeConnectionInfoGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeConnectionInfoGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CONNECTION_INFO_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeConnectionInfoGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeConnectionInfoGetReq *msg__; \
- CsrWifiSmeConnectionInfoGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeConnectionInfoGetReqSend(src__, interfaceTag__) \
- CsrWifiSmeConnectionInfoGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionInfoGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- connectionInfo - Information about the current connection
-
-*******************************************************************************/
-#define CsrWifiSmeConnectionInfoGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectionInfo__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeConnectionInfoGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CONNECTION_INFO_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->connectionInfo = (connectionInfo__);
-
-#define CsrWifiSmeConnectionInfoGetCfmSendTo(dst__, src__, interfaceTag__, status__, connectionInfo__) \
- { \
- CsrWifiSmeConnectionInfoGetCfm *msg__; \
- CsrWifiSmeConnectionInfoGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectionInfo__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeConnectionInfoGetCfmSend(dst__, interfaceTag__, status__, connectionInfo__) \
- CsrWifiSmeConnectionInfoGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, connectionInfo__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionQualityIndSend
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it whenever the value of the current connection quality
- parameters change by more than a certain configurable amount.
- The wireless manager application may configure the trigger thresholds for
- this indication using the field in smeConfig parameter of
- CSR_WIFI_SME_SME_CONFIG_SET_REQ.
- Connection quality messages can be suppressed by setting both thresholds
- to zero.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- linkQuality - Indicates the quality of the link
-
-*******************************************************************************/
-#define CsrWifiSmeConnectionQualityIndCreate(msg__, dst__, src__, interfaceTag__, linkQuality__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeConnectionQualityInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CONNECTION_QUALITY_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->linkQuality = (linkQuality__);
-
-#define CsrWifiSmeConnectionQualityIndSendTo(dst__, src__, interfaceTag__, linkQuality__) \
- { \
- CsrWifiSmeConnectionQualityInd *msg__; \
- CsrWifiSmeConnectionQualityIndCreate(msg__, dst__, src__, interfaceTag__, linkQuality__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeConnectionQualityIndSend(dst__, interfaceTag__, linkQuality__) \
- CsrWifiSmeConnectionQualityIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, linkQuality__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionStatsGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the ConnectionStats parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiSmeConnectionStatsGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeConnectionStatsGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CONNECTION_STATS_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeConnectionStatsGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeConnectionStatsGetReq *msg__; \
- CsrWifiSmeConnectionStatsGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeConnectionStatsGetReqSend(src__, interfaceTag__) \
- CsrWifiSmeConnectionStatsGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionStatsGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- connectionStats - Statistics for current connection.
-
-*******************************************************************************/
-#define CsrWifiSmeConnectionStatsGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectionStats__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeConnectionStatsGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CONNECTION_STATS_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->connectionStats = (connectionStats__);
-
-#define CsrWifiSmeConnectionStatsGetCfmSendTo(dst__, src__, interfaceTag__, status__, connectionStats__) \
- { \
- CsrWifiSmeConnectionStatsGetCfm *msg__; \
- CsrWifiSmeConnectionStatsGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, connectionStats__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeConnectionStatsGetCfmSend(dst__, interfaceTag__, status__, connectionStats__) \
- CsrWifiSmeConnectionStatsGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, connectionStats__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoreDumpIndSend
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive Wi-Fi Chip core dump data.
- The core dump data may be fragmented and sent using more than one
- indication.
- To indicate that all the data has been sent, the last indication contains
- a 'length' of 0 and 'data' of NULL.
-
- PARAMETERS
- queue - Destination Task Queue
- dataLength - Number of bytes in the buffer pointed to by 'data'
- data - Pointer to the buffer containing 'dataLength' bytes of core
- dump data
-
-*******************************************************************************/
-#define CsrWifiSmeCoreDumpIndCreate(msg__, dst__, src__, dataLength__, data__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeCoreDumpInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_CORE_DUMP_IND, dst__, src__); \
- msg__->dataLength = (dataLength__); \
- msg__->data = (data__);
-
-#define CsrWifiSmeCoreDumpIndSendTo(dst__, src__, dataLength__, data__) \
- { \
- CsrWifiSmeCoreDumpInd *msg__; \
- CsrWifiSmeCoreDumpIndCreate(msg__, dst__, src__, dataLength__, data__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeCoreDumpIndSend(dst__, dataLength__, data__) \
- CsrWifiSmeCoreDumpIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, dataLength__, data__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDeactivateReqSend
-
- DESCRIPTION
- The WMA sends this primitive to deactivate the SME.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeDeactivateReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeDeactivateReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_DEACTIVATE_REQ, dst__, src__);
-
-#define CsrWifiSmeDeactivateReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeDeactivateReq *msg__; \
- CsrWifiSmeDeactivateReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeDeactivateReqSend(src__) \
- CsrWifiSmeDeactivateReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDeactivateCfmSend
-
- DESCRIPTION
- The SME sends this primitive when the deactivation is complete.
- The WMA cannot send any more primitives until it actives the SME again
- sending another CSR_WIFI_SME_ACTIVATE_REQ.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeDeactivateCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeDeactivateCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_DEACTIVATE_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeDeactivateCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeDeactivateCfm *msg__; \
- CsrWifiSmeDeactivateCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeDeactivateCfmSend(dst__, status__) \
- CsrWifiSmeDeactivateCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDisconnectReqSend
-
- DESCRIPTION
- The wireless manager application may disconnect from the current network
- by calling this primitive
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiSmeDisconnectReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeDisconnectReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_DISCONNECT_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeDisconnectReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeDisconnectReq *msg__; \
- CsrWifiSmeDisconnectReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeDisconnectReqSend(src__, interfaceTag__) \
- CsrWifiSmeDisconnectReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDisconnectCfmSend
-
- DESCRIPTION
- On reception of CSR_WIFI_SME_DISCONNECT_REQ the SME will perform a
- disconnect operation, sending a CsrWifiSmeMediaStatusInd with
- CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED and then call this primitive when
- disconnection is complete.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeDisconnectCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeDisconnectCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_DISCONNECT_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeDisconnectCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeDisconnectCfm *msg__; \
- CsrWifiSmeDisconnectCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeDisconnectCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeDisconnectCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeErrorIndSend
-
- DESCRIPTION
- Important error message indicating a error of some importance
-
- PARAMETERS
- queue - Destination Task Queue
- errorMessage - Contains the error message.
-
-*******************************************************************************/
-#define CsrWifiSmeErrorIndCreate(msg__, dst__, src__, errorMessage__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeErrorInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ERROR_IND, dst__, src__); \
- msg__->errorMessage = (errorMessage__);
-
-#define CsrWifiSmeErrorIndSendTo(dst__, src__, errorMessage__) \
- { \
- CsrWifiSmeErrorInd *msg__; \
- CsrWifiSmeErrorIndCreate(msg__, dst__, src__, errorMessage__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeErrorIndSend(dst__, errorMessage__) \
- CsrWifiSmeErrorIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, errorMessage__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeEventMaskSetReqSend
-
- DESCRIPTION
- The wireless manager application may register with the SME to receive
- notification of interesting events. Indications will be sent only if the
- wireless manager explicitly registers to be notified of that event.
- indMask is a bit mask of values defined in CsrWifiSmeIndicationsMask.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- indMask - Set mask with values from CsrWifiSmeIndications
-
-*******************************************************************************/
-#define CsrWifiSmeEventMaskSetReqCreate(msg__, dst__, src__, indMask__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeEventMaskSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_EVENT_MASK_SET_REQ, dst__, src__); \
- msg__->indMask = (indMask__);
-
-#define CsrWifiSmeEventMaskSetReqSendTo(dst__, src__, indMask__) \
- { \
- CsrWifiSmeEventMaskSetReq *msg__; \
- CsrWifiSmeEventMaskSetReqCreate(msg__, dst__, src__, indMask__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeEventMaskSetReqSend(src__, indMask__) \
- CsrWifiSmeEventMaskSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, indMask__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeEventMaskSetCfmSend
-
- DESCRIPTION
- The SME calls the primitive to report the result of the request
- primitive.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeEventMaskSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeEventMaskSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_EVENT_MASK_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeEventMaskSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeEventMaskSetCfm *msg__; \
- CsrWifiSmeEventMaskSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeEventMaskSetCfmSend(dst__, status__) \
- CsrWifiSmeEventMaskSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the hostConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiSmeHostConfigGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeHostConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_HOST_CONFIG_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeHostConfigGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeHostConfigGetReq *msg__; \
- CsrWifiSmeHostConfigGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeHostConfigGetReqSend(src__, interfaceTag__) \
- CsrWifiSmeHostConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- hostConfig - Current host power state.
-
-*******************************************************************************/
-#define CsrWifiSmeHostConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, hostConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeHostConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_HOST_CONFIG_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->hostConfig = (hostConfig__);
-
-#define CsrWifiSmeHostConfigGetCfmSendTo(dst__, src__, interfaceTag__, status__, hostConfig__) \
- { \
- CsrWifiSmeHostConfigGetCfm *msg__; \
- CsrWifiSmeHostConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, hostConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeHostConfigGetCfmSend(dst__, interfaceTag__, status__, hostConfig__) \
- CsrWifiSmeHostConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, hostConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the hostConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- hostConfig - Communicates a change of host power state (for example, on
- mains power, on battery power etc) and of the periodicity of
- traffic data
-
-*******************************************************************************/
-#define CsrWifiSmeHostConfigSetReqCreate(msg__, dst__, src__, interfaceTag__, hostConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeHostConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_HOST_CONFIG_SET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->hostConfig = (hostConfig__);
-
-#define CsrWifiSmeHostConfigSetReqSendTo(dst__, src__, interfaceTag__, hostConfig__) \
- { \
- CsrWifiSmeHostConfigSetReq *msg__; \
- CsrWifiSmeHostConfigSetReqCreate(msg__, dst__, src__, interfaceTag__, hostConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeHostConfigSetReqSend(src__, interfaceTag__, hostConfig__) \
- CsrWifiSmeHostConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, hostConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostConfigSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeHostConfigSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeHostConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_HOST_CONFIG_SET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeHostConfigSetCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeHostConfigSetCfm *msg__; \
- CsrWifiSmeHostConfigSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeHostConfigSetCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeHostConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeIbssStationIndSend
-
- DESCRIPTION
- The SME will send this primitive to indicate that a station has joined or
- left the ad-hoc network.
-
- PARAMETERS
- queue - Destination Task Queue
- address - MAC address of the station that has joined or left
- isconnected - TRUE if the station joined, FALSE if the station left
-
-*******************************************************************************/
-#define CsrWifiSmeIbssStationIndCreate(msg__, dst__, src__, address__, isconnected__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeIbssStationInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_IBSS_STATION_IND, dst__, src__); \
- msg__->address = (address__); \
- msg__->isconnected = (isconnected__);
-
-#define CsrWifiSmeIbssStationIndSendTo(dst__, src__, address__, isconnected__) \
- { \
- CsrWifiSmeIbssStationInd *msg__; \
- CsrWifiSmeIbssStationIndCreate(msg__, dst__, src__, address__, isconnected__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeIbssStationIndSend(dst__, address__, isconnected__) \
- CsrWifiSmeIbssStationIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, address__, isconnected__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeInfoIndSend
-
- DESCRIPTION
- Message indicating a some info about current activity. Mostly of interest
- in testing but may be useful in the field.
-
- PARAMETERS
- queue - Destination Task Queue
- infoMessage - Contains the message.
-
-*******************************************************************************/
-#define CsrWifiSmeInfoIndCreate(msg__, dst__, src__, infoMessage__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeInfoInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_INFO_IND, dst__, src__); \
- msg__->infoMessage = (infoMessage__);
-
-#define CsrWifiSmeInfoIndSendTo(dst__, src__, infoMessage__) \
- { \
- CsrWifiSmeInfoInd *msg__; \
- CsrWifiSmeInfoIndCreate(msg__, dst__, src__, infoMessage__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeInfoIndSend(dst__, infoMessage__) \
- CsrWifiSmeInfoIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, infoMessage__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeInterfaceCapabilityGetReqSend
-
- DESCRIPTION
- The Wireless Manager calls this primitive to ask the SME for the
- capabilities of the supported interfaces
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeInterfaceCapabilityGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeInterfaceCapabilityGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_INTERFACE_CAPABILITY_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeInterfaceCapabilityGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeInterfaceCapabilityGetReq *msg__; \
- CsrWifiSmeInterfaceCapabilityGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeInterfaceCapabilityGetReqSend(src__) \
- CsrWifiSmeInterfaceCapabilityGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeInterfaceCapabilityGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Result of the request
- numInterfaces - Number of the interfaces supported
- capBitmap - Points to the list of capabilities bitmaps provided for each
- interface.
- The bits represent the following capabilities:
- -bits 7 to 4-Reserved
- -bit 3-AMP
- -bit 2-P2P
- -bit 1-AP
- -bit 0-STA
-
-*******************************************************************************/
-#define CsrWifiSmeInterfaceCapabilityGetCfmCreate(msg__, dst__, src__, status__, numInterfaces__, capBitmap__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeInterfaceCapabilityGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_INTERFACE_CAPABILITY_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->numInterfaces = (numInterfaces__); \
- memcpy(msg__->capBitmap, (capBitmap__), sizeof(u8) * 2);
-
-#define CsrWifiSmeInterfaceCapabilityGetCfmSendTo(dst__, src__, status__, numInterfaces__, capBitmap__) \
- { \
- CsrWifiSmeInterfaceCapabilityGetCfm *msg__; \
- CsrWifiSmeInterfaceCapabilityGetCfmCreate(msg__, dst__, src__, status__, numInterfaces__, capBitmap__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeInterfaceCapabilityGetCfmSend(dst__, status__, numInterfaces__, capBitmap__) \
- CsrWifiSmeInterfaceCapabilityGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, numInterfaces__, capBitmap__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeKeyReqSend
-
- DESCRIPTION
- The wireless manager application calls this primitive to add or remove
- keys that the chip should use for encryption of data.
- The interface allows the wireless manager application to add and remove
- keys according to the specified action.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - The value of the CsrWifiSmeListAction parameter instructs the
- driver to modify or provide the list of keys.
- CSR_WIFI_SME_LIST_ACTION_GET is not supported here.
- key - Key to be added or removed
-
-*******************************************************************************/
-#define CsrWifiSmeKeyReqCreate(msg__, dst__, src__, interfaceTag__, action__, key__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeKeyReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_KEY_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->action = (action__); \
- msg__->key = (key__);
-
-#define CsrWifiSmeKeyReqSendTo(dst__, src__, interfaceTag__, action__, key__) \
- { \
- CsrWifiSmeKeyReq *msg__; \
- CsrWifiSmeKeyReqCreate(msg__, dst__, src__, interfaceTag__, action__, key__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeKeyReqSend(src__, interfaceTag__, action__, key__) \
- CsrWifiSmeKeyReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, action__, key__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeKeyCfmSend
-
- DESCRIPTION
- The SME calls the primitive to report the result of the request
- primitive.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- action - Action in the request
- keyType - Type of the key added/deleted
- peerMacAddress - Peer MAC Address of the key added/deleted
-
-*******************************************************************************/
-#define CsrWifiSmeKeyCfmCreate(msg__, dst__, src__, interfaceTag__, status__, action__, keyType__, peerMacAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeKeyCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_KEY_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->action = (action__); \
- msg__->keyType = (keyType__); \
- msg__->peerMacAddress = (peerMacAddress__);
-
-#define CsrWifiSmeKeyCfmSendTo(dst__, src__, interfaceTag__, status__, action__, keyType__, peerMacAddress__) \
- { \
- CsrWifiSmeKeyCfm *msg__; \
- CsrWifiSmeKeyCfmCreate(msg__, dst__, src__, interfaceTag__, status__, action__, keyType__, peerMacAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeKeyCfmSend(dst__, interfaceTag__, status__, action__, keyType__, peerMacAddress__) \
- CsrWifiSmeKeyCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, action__, keyType__, peerMacAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeLinkQualityGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the LinkQuality parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiSmeLinkQualityGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeLinkQualityGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_LINK_QUALITY_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeLinkQualityGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeLinkQualityGetReq *msg__; \
- CsrWifiSmeLinkQualityGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeLinkQualityGetReqSend(src__, interfaceTag__) \
- CsrWifiSmeLinkQualityGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeLinkQualityGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- linkQuality - Indicates the quality of the link
-
-*******************************************************************************/
-#define CsrWifiSmeLinkQualityGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, linkQuality__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeLinkQualityGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_LINK_QUALITY_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->linkQuality = (linkQuality__);
-
-#define CsrWifiSmeLinkQualityGetCfmSendTo(dst__, src__, interfaceTag__, status__, linkQuality__) \
- { \
- CsrWifiSmeLinkQualityGetCfm *msg__; \
- CsrWifiSmeLinkQualityGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, linkQuality__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeLinkQualityGetCfmSend(dst__, interfaceTag__, status__, linkQuality__) \
- CsrWifiSmeLinkQualityGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, linkQuality__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMediaStatusIndSend
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it when a network connection is established, lost or has moved to
- another AP.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- mediaStatus - Indicates the media status
- connectionInfo - This parameter is relevant only if the mediaStatus is
- CSR_WIFI_SME_MEDIA_STATUS_CONNECTED:
- it points to the connection information for the new network
- disassocReason - This parameter is relevant only if the mediaStatus is
- CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED:
- if a disassociation has occurred it gives the reason of the
- disassociation
- deauthReason - This parameter is relevant only if the mediaStatus is
- CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED:
- if a deauthentication has occurred it gives the reason of
- the deauthentication
-
-*******************************************************************************/
-#define CsrWifiSmeMediaStatusIndCreate(msg__, dst__, src__, interfaceTag__, mediaStatus__, connectionInfo__, disassocReason__, deauthReason__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMediaStatusInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MEDIA_STATUS_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->mediaStatus = (mediaStatus__); \
- msg__->connectionInfo = (connectionInfo__); \
- msg__->disassocReason = (disassocReason__); \
- msg__->deauthReason = (deauthReason__);
-
-#define CsrWifiSmeMediaStatusIndSendTo(dst__, src__, interfaceTag__, mediaStatus__, connectionInfo__, disassocReason__, deauthReason__) \
- { \
- CsrWifiSmeMediaStatusInd *msg__; \
- CsrWifiSmeMediaStatusIndCreate(msg__, dst__, src__, interfaceTag__, mediaStatus__, connectionInfo__, disassocReason__, deauthReason__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMediaStatusIndSend(dst__, interfaceTag__, mediaStatus__, connectionInfo__, disassocReason__, deauthReason__) \
- CsrWifiSmeMediaStatusIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, mediaStatus__, connectionInfo__, disassocReason__, deauthReason__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the MibConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeMibConfigGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_CONFIG_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeMibConfigGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeMibConfigGetReq *msg__; \
- CsrWifiSmeMibConfigGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibConfigGetReqSend(src__) \
- CsrWifiSmeMibConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- mibConfig - Reports various IEEE 802.11 attributes as currently configured
-
-*******************************************************************************/
-#define CsrWifiSmeMibConfigGetCfmCreate(msg__, dst__, src__, status__, mibConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_CONFIG_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->mibConfig = (mibConfig__);
-
-#define CsrWifiSmeMibConfigGetCfmSendTo(dst__, src__, status__, mibConfig__) \
- { \
- CsrWifiSmeMibConfigGetCfm *msg__; \
- CsrWifiSmeMibConfigGetCfmCreate(msg__, dst__, src__, status__, mibConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibConfigGetCfmSend(dst__, status__, mibConfig__) \
- CsrWifiSmeMibConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, mibConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the MibConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- mibConfig - Conveys the desired value of various IEEE 802.11 attributes as
- currently configured
-
-*******************************************************************************/
-#define CsrWifiSmeMibConfigSetReqCreate(msg__, dst__, src__, mibConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_CONFIG_SET_REQ, dst__, src__); \
- msg__->mibConfig = (mibConfig__);
-
-#define CsrWifiSmeMibConfigSetReqSendTo(dst__, src__, mibConfig__) \
- { \
- CsrWifiSmeMibConfigSetReq *msg__; \
- CsrWifiSmeMibConfigSetReqCreate(msg__, dst__, src__, mibConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibConfigSetReqSend(src__, mibConfig__) \
- CsrWifiSmeMibConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, mibConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibConfigSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeMibConfigSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_CONFIG_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeMibConfigSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeMibConfigSetCfm *msg__; \
- CsrWifiSmeMibConfigSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibConfigSetCfmSend(dst__, status__) \
- CsrWifiSmeMibConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibGetCfmSend
-
- DESCRIPTION
- The SME calls this primitive to return the requested MIB variable values.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to the VarBind or VarBindList containing the
- names and values of the MIB variables requested
-
-*******************************************************************************/
-#define CsrWifiSmeMibGetCfmCreate(msg__, dst__, src__, status__, mibAttributeLength__, mibAttribute__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->mibAttributeLength = (mibAttributeLength__); \
- msg__->mibAttribute = (mibAttribute__);
-
-#define CsrWifiSmeMibGetCfmSendTo(dst__, src__, status__, mibAttributeLength__, mibAttribute__) \
- { \
- CsrWifiSmeMibGetCfm *msg__; \
- CsrWifiSmeMibGetCfmCreate(msg__, dst__, src__, status__, mibAttributeLength__, mibAttribute__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibGetCfmSend(dst__, status__, mibAttributeLength__, mibAttribute__) \
- CsrWifiSmeMibGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, mibAttributeLength__, mibAttribute__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibGetNextReqSend
-
- DESCRIPTION
- To read a sequence of MIB parameters, for example a table, call this
- primitive to find the name of the next MIB variable
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to a VarBind or VarBindList containing the
- name(s) of the MIB variable(s) to search from.
-
-*******************************************************************************/
-#define CsrWifiSmeMibGetNextReqCreate(msg__, dst__, src__, mibAttributeLength__, mibAttribute__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibGetNextReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_GET_NEXT_REQ, dst__, src__); \
- msg__->mibAttributeLength = (mibAttributeLength__); \
- msg__->mibAttribute = (mibAttribute__);
-
-#define CsrWifiSmeMibGetNextReqSendTo(dst__, src__, mibAttributeLength__, mibAttribute__) \
- { \
- CsrWifiSmeMibGetNextReq *msg__; \
- CsrWifiSmeMibGetNextReqCreate(msg__, dst__, src__, mibAttributeLength__, mibAttribute__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibGetNextReqSend(src__, mibAttributeLength__, mibAttribute__) \
- CsrWifiSmeMibGetNextReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, mibAttributeLength__, mibAttribute__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibGetNextCfmSend
-
- DESCRIPTION
- The SME calls this primitive to return the requested MIB name(s).
- The wireless manager application can then read the value of the MIB
- variable using CSR_WIFI_SME_MIB_GET_REQ, using the names provided.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to a VarBind or VarBindList containing the
- name(s) of the MIB variable(s) lexicographically
- following the name(s) given in the request
-
-*******************************************************************************/
-#define CsrWifiSmeMibGetNextCfmCreate(msg__, dst__, src__, status__, mibAttributeLength__, mibAttribute__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibGetNextCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_GET_NEXT_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->mibAttributeLength = (mibAttributeLength__); \
- msg__->mibAttribute = (mibAttribute__);
-
-#define CsrWifiSmeMibGetNextCfmSendTo(dst__, src__, status__, mibAttributeLength__, mibAttribute__) \
- { \
- CsrWifiSmeMibGetNextCfm *msg__; \
- CsrWifiSmeMibGetNextCfmCreate(msg__, dst__, src__, status__, mibAttributeLength__, mibAttribute__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibGetNextCfmSend(dst__, status__, mibAttributeLength__, mibAttribute__) \
- CsrWifiSmeMibGetNextCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, mibAttributeLength__, mibAttribute__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibGetReqSend
-
- DESCRIPTION
- The wireless manager application calls this primitive to retrieve one or
- more MIB variables.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to the VarBind or VarBindList containing the
- names of the MIB variables to be retrieved
-
-*******************************************************************************/
-#define CsrWifiSmeMibGetReqCreate(msg__, dst__, src__, mibAttributeLength__, mibAttribute__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_GET_REQ, dst__, src__); \
- msg__->mibAttributeLength = (mibAttributeLength__); \
- msg__->mibAttribute = (mibAttribute__);
-
-#define CsrWifiSmeMibGetReqSendTo(dst__, src__, mibAttributeLength__, mibAttribute__) \
- { \
- CsrWifiSmeMibGetReq *msg__; \
- CsrWifiSmeMibGetReqCreate(msg__, dst__, src__, mibAttributeLength__, mibAttribute__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibGetReqSend(src__, mibAttributeLength__, mibAttribute__) \
- CsrWifiSmeMibGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, mibAttributeLength__, mibAttribute__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibSetReqSend
-
- DESCRIPTION
- The SME provides raw access to the MIB on the chip, which may be used by
- some configuration or diagnostic utilities, but is not normally needed by
- the wireless manager application.
- The MIB access functions use BER encoded names (OID) of the MIB
- parameters and BER encoded values, as described in the chip Host
- Interface Protocol Specification.
- The MIB parameters are described in 'Wi-Fi 5.0.0 Management Information
- Base Reference Guide'.
- The wireless manager application calls this primitive to set one or more
- MIB variables
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to the VarBind or VarBindList containing the
- names and values of the MIB variables to set
-
-*******************************************************************************/
-#define CsrWifiSmeMibSetReqCreate(msg__, dst__, src__, mibAttributeLength__, mibAttribute__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_SET_REQ, dst__, src__); \
- msg__->mibAttributeLength = (mibAttributeLength__); \
- msg__->mibAttribute = (mibAttribute__);
-
-#define CsrWifiSmeMibSetReqSendTo(dst__, src__, mibAttributeLength__, mibAttribute__) \
- { \
- CsrWifiSmeMibSetReq *msg__; \
- CsrWifiSmeMibSetReqCreate(msg__, dst__, src__, mibAttributeLength__, mibAttribute__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibSetReqSend(src__, mibAttributeLength__, mibAttribute__) \
- CsrWifiSmeMibSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, mibAttributeLength__, mibAttribute__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibSetCfmSend
-
- DESCRIPTION
- The SME calls the primitive to report the result of the set primitive.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeMibSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMibSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIB_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeMibSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeMibSetCfm *msg__; \
- CsrWifiSmeMibSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMibSetCfmSend(dst__, status__) \
- CsrWifiSmeMibSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMicFailureIndSend
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it whenever the chip firmware reports a MIC failure.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- secondFailure - TRUE if this indication is for a second failure in 60
- seconds
- count - The number of MIC failure events since the connection was
- established
- address - MAC address of the transmitter that caused the MIC failure
- keyType - Type of key for which the failure occurred
-
-*******************************************************************************/
-#define CsrWifiSmeMicFailureIndCreate(msg__, dst__, src__, interfaceTag__, secondFailure__, count__, address__, keyType__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMicFailureInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MIC_FAILURE_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->secondFailure = (secondFailure__); \
- msg__->count = (count__); \
- msg__->address = (address__); \
- msg__->keyType = (keyType__);
-
-#define CsrWifiSmeMicFailureIndSendTo(dst__, src__, interfaceTag__, secondFailure__, count__, address__, keyType__) \
- { \
- CsrWifiSmeMicFailureInd *msg__; \
- CsrWifiSmeMicFailureIndCreate(msg__, dst__, src__, interfaceTag__, secondFailure__, count__, address__, keyType__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMicFailureIndSend(dst__, interfaceTag__, secondFailure__, count__, address__, keyType__) \
- CsrWifiSmeMicFailureIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, secondFailure__, count__, address__, keyType__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMulticastAddressReqSend
-
- DESCRIPTION
- The wireless manager application calls this primitive to specify the
- multicast addresses which the chip should recognise. The interface allows
- the wireless manager application to query, add, remove and flush the
- multicast addresses for the network interface according to the specified
- action.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - The value of the CsrWifiSmeListAction parameter
- instructs the driver to modify or provide the list of
- MAC addresses.
- setAddressesCount - Number of MAC addresses sent with the primitive
- setAddresses - Pointer to the list of MAC Addresses sent with the
- primitive, set to NULL if none is sent.
-
-*******************************************************************************/
-#define CsrWifiSmeMulticastAddressReqCreate(msg__, dst__, src__, interfaceTag__, action__, setAddressesCount__, setAddresses__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMulticastAddressReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MULTICAST_ADDRESS_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->action = (action__); \
- msg__->setAddressesCount = (setAddressesCount__); \
- msg__->setAddresses = (setAddresses__);
-
-#define CsrWifiSmeMulticastAddressReqSendTo(dst__, src__, interfaceTag__, action__, setAddressesCount__, setAddresses__) \
- { \
- CsrWifiSmeMulticastAddressReq *msg__; \
- CsrWifiSmeMulticastAddressReqCreate(msg__, dst__, src__, interfaceTag__, action__, setAddressesCount__, setAddresses__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMulticastAddressReqSend(src__, interfaceTag__, action__, setAddressesCount__, setAddresses__) \
- CsrWifiSmeMulticastAddressReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, action__, setAddressesCount__, setAddresses__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMulticastAddressCfmSend
-
- DESCRIPTION
- The SME will call this primitive when the operation is complete. For a
- GET action, this primitive reports the current list of MAC addresses.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- action - Action in the request
- getAddressesCount - This parameter is only relevant if action is
- CSR_WIFI_SME_LIST_ACTION_GET:
- number of MAC addresses sent with the primitive
- getAddresses - Pointer to the list of MAC Addresses sent with the
- primitive, set to NULL if none is sent.
-
-*******************************************************************************/
-#define CsrWifiSmeMulticastAddressCfmCreate(msg__, dst__, src__, interfaceTag__, status__, action__, getAddressesCount__, getAddresses__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeMulticastAddressCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_MULTICAST_ADDRESS_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->action = (action__); \
- msg__->getAddressesCount = (getAddressesCount__); \
- msg__->getAddresses = (getAddresses__);
-
-#define CsrWifiSmeMulticastAddressCfmSendTo(dst__, src__, interfaceTag__, status__, action__, getAddressesCount__, getAddresses__) \
- { \
- CsrWifiSmeMulticastAddressCfm *msg__; \
- CsrWifiSmeMulticastAddressCfmCreate(msg__, dst__, src__, interfaceTag__, status__, action__, getAddressesCount__, getAddresses__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeMulticastAddressCfmSend(dst__, interfaceTag__, status__, action__, getAddressesCount__, getAddresses__) \
- CsrWifiSmeMulticastAddressCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, action__, getAddressesCount__, getAddresses__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePacketFilterSetReqSend
-
- DESCRIPTION
- The wireless manager application should call this primitive to enable or
- disable filtering of broadcast packets: uninteresting broadcast packets
- will be dropped by the Wi-Fi chip, instead of passing them up to the
- host.
- This has the advantage of saving power in the host application processor
- as it removes the need to process unwanted packets.
- All broadcast packets are filtered according to the filter and the filter
- mode provided, except ARP packets, which are filtered using
- arpFilterAddress.
- Filters are not cumulative: only the parameters specified in the most
- recent successful request are significant.
- For more information, see 'UniFi Firmware API Specification'.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- filterLength - Length of the filter in bytes.
- filterLength=0 disables the filter previously set
- filter - Points to the first byte of the filter provided, if any.
- This shall include zero or more instance of the
- information elements of one of these types
- * Traffic Classification (TCLAS) elements
- * WMM-SA TCLAS elements
- mode - Specifies whether the filter selects or excludes packets
- matching the filter
- arpFilterAddress - IPv4 address to be used for filtering the ARP packets.
- * If the specified address is the IPv4 broadcast address
- (255.255.255.255), all ARP packets are reported to the
- host,
- * If the specified address is NOT the IPv4 broadcast
- address, only ARP packets with the specified address in
- the Source or Target Protocol Address fields are reported
- to the host
-
-*******************************************************************************/
-#define CsrWifiSmePacketFilterSetReqCreate(msg__, dst__, src__, interfaceTag__, filterLength__, filter__, mode__, arpFilterAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePacketFilterSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_PACKET_FILTER_SET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->filterLength = (filterLength__); \
- msg__->filter = (filter__); \
- msg__->mode = (mode__); \
- msg__->arpFilterAddress = (arpFilterAddress__);
-
-#define CsrWifiSmePacketFilterSetReqSendTo(dst__, src__, interfaceTag__, filterLength__, filter__, mode__, arpFilterAddress__) \
- { \
- CsrWifiSmePacketFilterSetReq *msg__; \
- CsrWifiSmePacketFilterSetReqCreate(msg__, dst__, src__, interfaceTag__, filterLength__, filter__, mode__, arpFilterAddress__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePacketFilterSetReqSend(src__, interfaceTag__, filterLength__, filter__, mode__, arpFilterAddress__) \
- CsrWifiSmePacketFilterSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, filterLength__, filter__, mode__, arpFilterAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePacketFilterSetCfmSend
-
- DESCRIPTION
- The SME calls the primitive to report the result of the set primitive.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmePacketFilterSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePacketFilterSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_PACKET_FILTER_SET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmePacketFilterSetCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmePacketFilterSetCfm *msg__; \
- CsrWifiSmePacketFilterSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePacketFilterSetCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmePacketFilterSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePermanentMacAddressGetReqSend
-
- DESCRIPTION
- This primitive retrieves the MAC address stored in EEPROM
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmePermanentMacAddressGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePermanentMacAddressGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_PERMANENT_MAC_ADDRESS_GET_REQ, dst__, src__);
-
-#define CsrWifiSmePermanentMacAddressGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmePermanentMacAddressGetReq *msg__; \
- CsrWifiSmePermanentMacAddressGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePermanentMacAddressGetReqSend(src__) \
- CsrWifiSmePermanentMacAddressGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePermanentMacAddressGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- permanentMacAddress - MAC address stored in the EEPROM
-
-*******************************************************************************/
-#define CsrWifiSmePermanentMacAddressGetCfmCreate(msg__, dst__, src__, status__, permanentMacAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePermanentMacAddressGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_PERMANENT_MAC_ADDRESS_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->permanentMacAddress = (permanentMacAddress__);
-
-#define CsrWifiSmePermanentMacAddressGetCfmSendTo(dst__, src__, status__, permanentMacAddress__) \
- { \
- CsrWifiSmePermanentMacAddressGetCfm *msg__; \
- CsrWifiSmePermanentMacAddressGetCfmCreate(msg__, dst__, src__, status__, permanentMacAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePermanentMacAddressGetCfmSend(dst__, status__, permanentMacAddress__) \
- CsrWifiSmePermanentMacAddressGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, permanentMacAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePmkidCandidateListIndSend
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it when a new network supporting preauthentication and/or PMK
- caching is seen.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- pmkidCandidatesCount - Number of PMKID candidates provided
- pmkidCandidates - Points to the first PMKID candidate
-
-*******************************************************************************/
-#define CsrWifiSmePmkidCandidateListIndCreate(msg__, dst__, src__, interfaceTag__, pmkidCandidatesCount__, pmkidCandidates__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePmkidCandidateListInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_PMKID_CANDIDATE_LIST_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->pmkidCandidatesCount = (pmkidCandidatesCount__); \
- msg__->pmkidCandidates = (pmkidCandidates__);
-
-#define CsrWifiSmePmkidCandidateListIndSendTo(dst__, src__, interfaceTag__, pmkidCandidatesCount__, pmkidCandidates__) \
- { \
- CsrWifiSmePmkidCandidateListInd *msg__; \
- CsrWifiSmePmkidCandidateListIndCreate(msg__, dst__, src__, interfaceTag__, pmkidCandidatesCount__, pmkidCandidates__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePmkidCandidateListIndSend(dst__, interfaceTag__, pmkidCandidatesCount__, pmkidCandidates__) \
- CsrWifiSmePmkidCandidateListIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, pmkidCandidatesCount__, pmkidCandidates__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePmkidReqSend
-
- DESCRIPTION
- The wireless manager application calls this primitive to request an
- operation on the SME PMKID list.
- The action argument specifies the operation to perform.
- When the connection is complete, the wireless manager application may
- then send and receive EAPOL packets to complete WPA or WPA2
- authentication if appropriate.
- The wireless manager application can then pass the resulting encryption
- keys using this primitive.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - The value of the CsrWifiSmeListAction parameter instructs
- the driver to modify or provide the list of PMKIDs.
- setPmkidsCount - Number of PMKIDs sent with the primitive
- setPmkids - Pointer to the list of PMKIDs sent with the primitive, set
- to NULL if none is sent.
-
-*******************************************************************************/
-#define CsrWifiSmePmkidReqCreate(msg__, dst__, src__, interfaceTag__, action__, setPmkidsCount__, setPmkids__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePmkidReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_PMKID_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->action = (action__); \
- msg__->setPmkidsCount = (setPmkidsCount__); \
- msg__->setPmkids = (setPmkids__);
-
-#define CsrWifiSmePmkidReqSendTo(dst__, src__, interfaceTag__, action__, setPmkidsCount__, setPmkids__) \
- { \
- CsrWifiSmePmkidReq *msg__; \
- CsrWifiSmePmkidReqCreate(msg__, dst__, src__, interfaceTag__, action__, setPmkidsCount__, setPmkids__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePmkidReqSend(src__, interfaceTag__, action__, setPmkidsCount__, setPmkids__) \
- CsrWifiSmePmkidReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, action__, setPmkidsCount__, setPmkids__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePmkidCfmSend
-
- DESCRIPTION
- The SME will call this primitive when the operation is complete. For a
- GET action, this primitive reports the current list of PMKIDs
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- action - Action in the request
- getPmkidsCount - This parameter is only relevant if action is
- CSR_WIFI_SME_LIST_ACTION_GET:
- number of PMKIDs sent with the primitive
- getPmkids - Pointer to the list of PMKIDs sent with the primitive, set
- to NULL if none is sent.
-
-*******************************************************************************/
-#define CsrWifiSmePmkidCfmCreate(msg__, dst__, src__, interfaceTag__, status__, action__, getPmkidsCount__, getPmkids__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePmkidCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_PMKID_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->action = (action__); \
- msg__->getPmkidsCount = (getPmkidsCount__); \
- msg__->getPmkids = (getPmkids__);
-
-#define CsrWifiSmePmkidCfmSendTo(dst__, src__, interfaceTag__, status__, action__, getPmkidsCount__, getPmkids__) \
- { \
- CsrWifiSmePmkidCfm *msg__; \
- CsrWifiSmePmkidCfmCreate(msg__, dst__, src__, interfaceTag__, status__, action__, getPmkidsCount__, getPmkids__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePmkidCfmSend(dst__, interfaceTag__, status__, action__, getPmkidsCount__, getPmkids__) \
- CsrWifiSmePmkidCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, action__, getPmkidsCount__, getPmkids__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the PowerConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmePowerConfigGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePowerConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_POWER_CONFIG_GET_REQ, dst__, src__);
-
-#define CsrWifiSmePowerConfigGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmePowerConfigGetReq *msg__; \
- CsrWifiSmePowerConfigGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePowerConfigGetReqSend(src__) \
- CsrWifiSmePowerConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- powerConfig - Returns the current parameters for the power configuration of
- the firmware
-
-*******************************************************************************/
-#define CsrWifiSmePowerConfigGetCfmCreate(msg__, dst__, src__, status__, powerConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePowerConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_POWER_CONFIG_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->powerConfig = (powerConfig__);
-
-#define CsrWifiSmePowerConfigGetCfmSendTo(dst__, src__, status__, powerConfig__) \
- { \
- CsrWifiSmePowerConfigGetCfm *msg__; \
- CsrWifiSmePowerConfigGetCfmCreate(msg__, dst__, src__, status__, powerConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePowerConfigGetCfmSend(dst__, status__, powerConfig__) \
- CsrWifiSmePowerConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, powerConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the PowerConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- powerConfig - Power saving configuration
-
-*******************************************************************************/
-#define CsrWifiSmePowerConfigSetReqCreate(msg__, dst__, src__, powerConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePowerConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_POWER_CONFIG_SET_REQ, dst__, src__); \
- msg__->powerConfig = (powerConfig__);
-
-#define CsrWifiSmePowerConfigSetReqSendTo(dst__, src__, powerConfig__) \
- { \
- CsrWifiSmePowerConfigSetReq *msg__; \
- CsrWifiSmePowerConfigSetReqCreate(msg__, dst__, src__, powerConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePowerConfigSetReqSend(src__, powerConfig__) \
- CsrWifiSmePowerConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, powerConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerConfigSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmePowerConfigSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmePowerConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_POWER_CONFIG_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmePowerConfigSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmePowerConfigSetCfm *msg__; \
- CsrWifiSmePowerConfigSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmePowerConfigSetCfmSend(dst__, status__) \
- CsrWifiSmePowerConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRegulatoryDomainInfoGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the RegulatoryDomainInfo parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeRegulatoryDomainInfoGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeRegulatoryDomainInfoGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_REGULATORY_DOMAIN_INFO_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeRegulatoryDomainInfoGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeRegulatoryDomainInfoGetReq *msg__; \
- CsrWifiSmeRegulatoryDomainInfoGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeRegulatoryDomainInfoGetReqSend(src__) \
- CsrWifiSmeRegulatoryDomainInfoGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRegulatoryDomainInfoGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- regDomInfo - Reports information and state related to regulatory domain
- operation.
-
-*******************************************************************************/
-#define CsrWifiSmeRegulatoryDomainInfoGetCfmCreate(msg__, dst__, src__, status__, regDomInfo__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeRegulatoryDomainInfoGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_REGULATORY_DOMAIN_INFO_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->regDomInfo = (regDomInfo__);
-
-#define CsrWifiSmeRegulatoryDomainInfoGetCfmSendTo(dst__, src__, status__, regDomInfo__) \
- { \
- CsrWifiSmeRegulatoryDomainInfoGetCfm *msg__; \
- CsrWifiSmeRegulatoryDomainInfoGetCfmCreate(msg__, dst__, src__, status__, regDomInfo__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeRegulatoryDomainInfoGetCfmSend(dst__, status__, regDomInfo__) \
- CsrWifiSmeRegulatoryDomainInfoGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, regDomInfo__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamCompleteIndSend
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it whenever it completes an attempt to roam to an AP. If the roam
- attempt was successful, status will be set to CSR_WIFI_SME_SUCCESS,
- otherwise it shall be set to the appropriate error code.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the roaming procedure
-
-*******************************************************************************/
-#define CsrWifiSmeRoamCompleteIndCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeRoamCompleteInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ROAM_COMPLETE_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeRoamCompleteIndSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeRoamCompleteInd *msg__; \
- CsrWifiSmeRoamCompleteIndCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeRoamCompleteIndSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeRoamCompleteIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamStartIndSend
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it whenever it begins an attempt to roam to an AP.
- If the wireless manager application connect request specified the SSID
- and the BSSID was set to the broadcast address (0xFF 0xFF 0xFF 0xFF 0xFF
- 0xFF), the SME monitors the signal quality and maintains a list of
- candidates to roam to. When the signal quality of the current connection
- falls below a threshold, and there is a candidate with better quality,
- the SME will attempt to the candidate AP.
- If the roaming procedure succeeds, the SME will also issue a Media
- Connect indication to inform the wireless manager application of the
- change.
- NOTE: to prevent the SME from initiating roaming the WMA must specify the
- BSSID in the connection request; this forces the SME to connect only to
- that AP.
- The wireless manager application can obtain statistics for roaming
- purposes using CSR_WIFI_SME_CONNECTION_QUALITY_IND and
- CSR_WIFI_SME_CONNECTION_STATS_GET_REQ.
- When the wireless manager application wishes to roam to another AP, it
- must issue a connection request specifying the BSSID of the desired AP.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- roamReason - Indicates the reason for starting the roaming procedure
- reason80211 - Indicates the reason for deauthentication or disassociation
-
-*******************************************************************************/
-#define CsrWifiSmeRoamStartIndCreate(msg__, dst__, src__, interfaceTag__, roamReason__, reason80211__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeRoamStartInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ROAM_START_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->roamReason = (roamReason__); \
- msg__->reason80211 = (reason80211__);
-
-#define CsrWifiSmeRoamStartIndSendTo(dst__, src__, interfaceTag__, roamReason__, reason80211__) \
- { \
- CsrWifiSmeRoamStartInd *msg__; \
- CsrWifiSmeRoamStartIndCreate(msg__, dst__, src__, interfaceTag__, roamReason__, reason80211__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeRoamStartIndSend(dst__, interfaceTag__, roamReason__, reason80211__) \
- CsrWifiSmeRoamStartIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, roamReason__, reason80211__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the RoamingConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiSmeRoamingConfigGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeRoamingConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ROAMING_CONFIG_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeRoamingConfigGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeRoamingConfigGetReq *msg__; \
- CsrWifiSmeRoamingConfigGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeRoamingConfigGetReqSend(src__, interfaceTag__) \
- CsrWifiSmeRoamingConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- roamingConfig - Reports the roaming behaviour of the driver and firmware
-
-*******************************************************************************/
-#define CsrWifiSmeRoamingConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, roamingConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeRoamingConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ROAMING_CONFIG_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->roamingConfig = (roamingConfig__);
-
-#define CsrWifiSmeRoamingConfigGetCfmSendTo(dst__, src__, interfaceTag__, status__, roamingConfig__) \
- { \
- CsrWifiSmeRoamingConfigGetCfm *msg__; \
- CsrWifiSmeRoamingConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, roamingConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeRoamingConfigGetCfmSend(dst__, interfaceTag__, status__, roamingConfig__) \
- CsrWifiSmeRoamingConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, roamingConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the RoamingConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- roamingConfig - Desired roaming behaviour values
-
-*******************************************************************************/
-#define CsrWifiSmeRoamingConfigSetReqCreate(msg__, dst__, src__, interfaceTag__, roamingConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeRoamingConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ROAMING_CONFIG_SET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->roamingConfig = (roamingConfig__);
-
-#define CsrWifiSmeRoamingConfigSetReqSendTo(dst__, src__, interfaceTag__, roamingConfig__) \
- { \
- CsrWifiSmeRoamingConfigSetReq *msg__; \
- CsrWifiSmeRoamingConfigSetReqCreate(msg__, dst__, src__, interfaceTag__, roamingConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeRoamingConfigSetReqSend(src__, interfaceTag__, roamingConfig__) \
- CsrWifiSmeRoamingConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, roamingConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingConfigSetCfmSend
-
- DESCRIPTION
- This primitive sets the value of the RoamingConfig parameter.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeRoamingConfigSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeRoamingConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_ROAMING_CONFIG_SET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeRoamingConfigSetCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeRoamingConfigSetCfm *msg__; \
- CsrWifiSmeRoamingConfigSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeRoamingConfigSetCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeRoamingConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the ScanConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeScanConfigGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_CONFIG_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeScanConfigGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeScanConfigGetReq *msg__; \
- CsrWifiSmeScanConfigGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanConfigGetReqSend(src__) \
- CsrWifiSmeScanConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- scanConfig - Returns the current parameters for the autonomous scanning
- behaviour of the firmware
-
-*******************************************************************************/
-#define CsrWifiSmeScanConfigGetCfmCreate(msg__, dst__, src__, status__, scanConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_CONFIG_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->scanConfig = (scanConfig__);
-
-#define CsrWifiSmeScanConfigGetCfmSendTo(dst__, src__, status__, scanConfig__) \
- { \
- CsrWifiSmeScanConfigGetCfm *msg__; \
- CsrWifiSmeScanConfigGetCfmCreate(msg__, dst__, src__, status__, scanConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanConfigGetCfmSend(dst__, status__, scanConfig__) \
- CsrWifiSmeScanConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, scanConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the ScanConfig parameter.
- The SME normally configures the firmware to perform autonomous scanning
- without involving the host.
- The firmware passes beacon / probe response or indicates loss of beacon
- on certain changes of state, for example:
- * A new AP is seen for the first time
- * An AP is no longer visible
- * The signal strength of an AP changes by more than a certain amount, as
- configured by the thresholds in the scanConfig parameter
- In addition to the autonomous scan, the wireless manager application may
- request a scan at any time using CSR_WIFI_SME_SCAN_FULL_REQ.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- scanConfig - Reports the configuration for the autonomous scanning behaviour
- of the firmware
-
-*******************************************************************************/
-#define CsrWifiSmeScanConfigSetReqCreate(msg__, dst__, src__, scanConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_CONFIG_SET_REQ, dst__, src__); \
- msg__->scanConfig = (scanConfig__);
-
-#define CsrWifiSmeScanConfigSetReqSendTo(dst__, src__, scanConfig__) \
- { \
- CsrWifiSmeScanConfigSetReq *msg__; \
- CsrWifiSmeScanConfigSetReqCreate(msg__, dst__, src__, scanConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanConfigSetReqSend(src__, scanConfig__) \
- CsrWifiSmeScanConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, scanConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfigSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeScanConfigSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_CONFIG_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeScanConfigSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeScanConfigSetCfm *msg__; \
- CsrWifiSmeScanConfigSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanConfigSetCfmSend(dst__, status__) \
- CsrWifiSmeScanConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanFullReqSend
-
- DESCRIPTION
- The wireless manager application should call this primitive to request a
- full scan.
- Channels are scanned actively or passively according to the requirement
- set by regulatory domain.
- If the SME receives this primitive while a full scan is going on, the new
- request is buffered and it will be served after the current full scan is
- completed.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- ssidCount - Number of SSIDs provided.
- If it is 0, the SME will attempt to detect any network
- ssid - Points to the first SSID provided, if any.
- bssid - BSS identifier.
- If it is equal to FF-FF-FF-FF-FF, the SME will listen for
- messages from any BSS.
- If it is different from FF-FF-FF-FF-FF and any SSID is
- provided, one SSID must match the network of the BSS.
- forceScan - Forces the scan even if the SME is in a state which would
- normally prevent it (e.g. autonomous scan is running).
- bssType - Type of BSS to scan for
- scanType - Type of scan to perform
- channelListCount - Number of channels provided.
- If it is 0, the SME will initiate a scan of all the
- supported channels that are permitted by the current
- regulatory domain.
- channelList - Points to the first channel , or NULL if channelListCount
- is zero.
- probeIeLength - Length of the information element in bytes to be sent
- with the probe message.
- probeIe - Points to the first byte of the information element to be
- sent with the probe message.
-
-*******************************************************************************/
-#define CsrWifiSmeScanFullReqCreate(msg__, dst__, src__, ssidCount__, ssid__, bssid__, forceScan__, bssType__, scanType__, channelListCount__, channelList__, probeIeLength__, probeIe__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanFullReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_FULL_REQ, dst__, src__); \
- msg__->ssidCount = (ssidCount__); \
- msg__->ssid = (ssid__); \
- msg__->bssid = (bssid__); \
- msg__->forceScan = (forceScan__); \
- msg__->bssType = (bssType__); \
- msg__->scanType = (scanType__); \
- msg__->channelListCount = (channelListCount__); \
- msg__->channelList = (channelList__); \
- msg__->probeIeLength = (probeIeLength__); \
- msg__->probeIe = (probeIe__);
-
-#define CsrWifiSmeScanFullReqSendTo(dst__, src__, ssidCount__, ssid__, bssid__, forceScan__, bssType__, scanType__, channelListCount__, channelList__, probeIeLength__, probeIe__) \
- { \
- CsrWifiSmeScanFullReq *msg__; \
- CsrWifiSmeScanFullReqCreate(msg__, dst__, src__, ssidCount__, ssid__, bssid__, forceScan__, bssType__, scanType__, channelListCount__, channelList__, probeIeLength__, probeIe__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanFullReqSend(src__, ssidCount__, ssid__, bssid__, forceScan__, bssType__, scanType__, channelListCount__, channelList__, probeIeLength__, probeIe__) \
- CsrWifiSmeScanFullReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, ssidCount__, ssid__, bssid__, forceScan__, bssType__, scanType__, channelListCount__, channelList__, probeIeLength__, probeIe__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanFullCfmSend
-
- DESCRIPTION
- The SME calls this primitive when the results from the scan are
- available.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeScanFullCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanFullCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_FULL_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeScanFullCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeScanFullCfm *msg__; \
- CsrWifiSmeScanFullCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanFullCfmSend(dst__, status__) \
- CsrWifiSmeScanFullCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultIndSend
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it whenever a scan indication is received from the firmware.
-
- PARAMETERS
- queue - Destination Task Queue
- result - Points to a buffer containing a scan result.
-
-*******************************************************************************/
-#define CsrWifiSmeScanResultIndCreate(msg__, dst__, src__, result__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanResultInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_RESULT_IND, dst__, src__); \
- msg__->result = (result__);
-
-#define CsrWifiSmeScanResultIndSendTo(dst__, src__, result__) \
- { \
- CsrWifiSmeScanResultInd *msg__; \
- CsrWifiSmeScanResultIndCreate(msg__, dst__, src__, result__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanResultIndSend(dst__, result__) \
- CsrWifiSmeScanResultIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, result__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultsFlushReqSend
-
- DESCRIPTION
- The Wireless Manager calls this primitive to ask the SME to delete all
- scan results from its cache, except for the scan result of any currently
- connected network.
- As scan results are received by the SME from the firmware, they are
- cached in the SME memory.
- Any time the Wireless Manager requests scan results, they are returned
- from the SME internal cache.
- For some applications it may be desirable to clear this cache prior to
- requesting that a scan be performed; this will ensure that the cache then
- only contains the networks detected in the most recent scan.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeScanResultsFlushReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanResultsFlushReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_RESULTS_FLUSH_REQ, dst__, src__);
-
-#define CsrWifiSmeScanResultsFlushReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeScanResultsFlushReq *msg__; \
- CsrWifiSmeScanResultsFlushReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanResultsFlushReqSend(src__) \
- CsrWifiSmeScanResultsFlushReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultsFlushCfmSend
-
- DESCRIPTION
- The SME will call this primitive when the cache has been cleared.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeScanResultsFlushCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanResultsFlushCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_RESULTS_FLUSH_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeScanResultsFlushCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeScanResultsFlushCfm *msg__; \
- CsrWifiSmeScanResultsFlushCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanResultsFlushCfmSend(dst__, status__) \
- CsrWifiSmeScanResultsFlushCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultsGetReqSend
-
- DESCRIPTION
- The wireless manager application calls this primitive to retrieve the
- current set of scan results, either after receiving a successful
- CSR_WIFI_SME_SCAN_FULL_CFM, or to get autonomous scan results.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeScanResultsGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanResultsGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_RESULTS_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeScanResultsGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeScanResultsGetReq *msg__; \
- CsrWifiSmeScanResultsGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanResultsGetReqSend(src__) \
- CsrWifiSmeScanResultsGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultsGetCfmSend
-
- DESCRIPTION
- The SME sends this primitive to provide the current set of scan results.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- scanResultsCount - Number of scan results
- scanResults - Points to a buffer containing an array of
- CsrWifiSmeScanResult structures.
-
-*******************************************************************************/
-#define CsrWifiSmeScanResultsGetCfmCreate(msg__, dst__, src__, status__, scanResultsCount__, scanResults__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeScanResultsGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SCAN_RESULTS_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->scanResultsCount = (scanResultsCount__); \
- msg__->scanResults = (scanResults__);
-
-#define CsrWifiSmeScanResultsGetCfmSendTo(dst__, src__, status__, scanResultsCount__, scanResults__) \
- { \
- CsrWifiSmeScanResultsGetCfm *msg__; \
- CsrWifiSmeScanResultsGetCfmCreate(msg__, dst__, src__, status__, scanResultsCount__, scanResults__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeScanResultsGetCfmSend(dst__, status__, scanResultsCount__, scanResults__) \
- CsrWifiSmeScanResultsGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, scanResultsCount__, scanResults__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSetReqSend
-
- DESCRIPTION
- Used to pass custom data to the SME. Format is the same as 802.11 Info
- Elements => | Id | Length | Data
- 1) Cmanr Test Mode "Id:0 Length:1 Data:0x00 = OFF 0x01 = ON" "0x00 0x01
- (0x00|0x01)"
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- dataLength - Number of bytes in the buffer pointed to by 'data'
- data - Pointer to the buffer containing 'dataLength' bytes
-
-*******************************************************************************/
-#define CsrWifiSmeSetReqCreate(msg__, dst__, src__, dataLength__, data__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SET_REQ, dst__, src__); \
- msg__->dataLength = (dataLength__); \
- msg__->data = (data__);
-
-#define CsrWifiSmeSetReqSendTo(dst__, src__, dataLength__, data__) \
- { \
- CsrWifiSmeSetReq *msg__; \
- CsrWifiSmeSetReqCreate(msg__, dst__, src__, dataLength__, data__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeSetReqSend(src__, dataLength__, data__) \
- CsrWifiSmeSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, dataLength__, data__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeCommonConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the Sme common parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeSmeCommonConfigGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeSmeCommonConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SME_COMMON_CONFIG_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeSmeCommonConfigGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeSmeCommonConfigGetReq *msg__; \
- CsrWifiSmeSmeCommonConfigGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeSmeCommonConfigGetReqSend(src__) \
- CsrWifiSmeSmeCommonConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeCommonConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- deviceConfig - Configuration options in the SME
-
-*******************************************************************************/
-#define CsrWifiSmeSmeCommonConfigGetCfmCreate(msg__, dst__, src__, status__, deviceConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeSmeCommonConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SME_COMMON_CONFIG_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->deviceConfig = (deviceConfig__);
-
-#define CsrWifiSmeSmeCommonConfigGetCfmSendTo(dst__, src__, status__, deviceConfig__) \
- { \
- CsrWifiSmeSmeCommonConfigGetCfm *msg__; \
- CsrWifiSmeSmeCommonConfigGetCfmCreate(msg__, dst__, src__, status__, deviceConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeSmeCommonConfigGetCfmSend(dst__, status__, deviceConfig__) \
- CsrWifiSmeSmeCommonConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, deviceConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeCommonConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the Sme common.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- deviceConfig - Configuration options in the SME
-
-*******************************************************************************/
-#define CsrWifiSmeSmeCommonConfigSetReqCreate(msg__, dst__, src__, deviceConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeSmeCommonConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SME_COMMON_CONFIG_SET_REQ, dst__, src__); \
- msg__->deviceConfig = (deviceConfig__);
-
-#define CsrWifiSmeSmeCommonConfigSetReqSendTo(dst__, src__, deviceConfig__) \
- { \
- CsrWifiSmeSmeCommonConfigSetReq *msg__; \
- CsrWifiSmeSmeCommonConfigSetReqCreate(msg__, dst__, src__, deviceConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeSmeCommonConfigSetReqSend(src__, deviceConfig__) \
- CsrWifiSmeSmeCommonConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, deviceConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeCommonConfigSetCfmSend
-
- DESCRIPTION
- Reports the result of the request
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeSmeCommonConfigSetCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeSmeCommonConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SME_COMMON_CONFIG_SET_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeSmeCommonConfigSetCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeSmeCommonConfigSetCfm *msg__; \
- CsrWifiSmeSmeCommonConfigSetCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeSmeCommonConfigSetCfmSend(dst__, status__) \
- CsrWifiSmeSmeCommonConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeStaConfigGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the SmeStaConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-#define CsrWifiSmeSmeStaConfigGetReqCreate(msg__, dst__, src__, interfaceTag__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeSmeStaConfigGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SME_STA_CONFIG_GET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__);
-
-#define CsrWifiSmeSmeStaConfigGetReqSendTo(dst__, src__, interfaceTag__) \
- { \
- CsrWifiSmeSmeStaConfigGetReq *msg__; \
- CsrWifiSmeSmeStaConfigGetReqCreate(msg__, dst__, src__, interfaceTag__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeSmeStaConfigGetReqSend(src__, interfaceTag__) \
- CsrWifiSmeSmeStaConfigGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeStaConfigGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- smeConfig - Current SME Station Parameters
-
-*******************************************************************************/
-#define CsrWifiSmeSmeStaConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, smeConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeSmeStaConfigGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SME_STA_CONFIG_GET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->smeConfig = (smeConfig__);
-
-#define CsrWifiSmeSmeStaConfigGetCfmSendTo(dst__, src__, interfaceTag__, status__, smeConfig__) \
- { \
- CsrWifiSmeSmeStaConfigGetCfm *msg__; \
- CsrWifiSmeSmeStaConfigGetCfmCreate(msg__, dst__, src__, interfaceTag__, status__, smeConfig__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeSmeStaConfigGetCfmSend(dst__, interfaceTag__, status__, smeConfig__) \
- CsrWifiSmeSmeStaConfigGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, smeConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeStaConfigSetReqSend
-
- DESCRIPTION
- This primitive sets the value of the SmeConfig parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- smeConfig - SME Station Parameters to be set
-
-*******************************************************************************/
-#define CsrWifiSmeSmeStaConfigSetReqCreate(msg__, dst__, src__, interfaceTag__, smeConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeSmeStaConfigSetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SME_STA_CONFIG_SET_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->smeConfig = (smeConfig__);
-
-#define CsrWifiSmeSmeStaConfigSetReqSendTo(dst__, src__, interfaceTag__, smeConfig__) \
- { \
- CsrWifiSmeSmeStaConfigSetReq *msg__; \
- CsrWifiSmeSmeStaConfigSetReqCreate(msg__, dst__, src__, interfaceTag__, smeConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeSmeStaConfigSetReqSend(src__, interfaceTag__, smeConfig__) \
- CsrWifiSmeSmeStaConfigSetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, smeConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeStaConfigSetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeSmeStaConfigSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeSmeStaConfigSetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_SME_STA_CONFIG_SET_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeSmeStaConfigSetCfmSendTo(dst__, src__, interfaceTag__, status__) \
- { \
- CsrWifiSmeSmeStaConfigSetCfm *msg__; \
- CsrWifiSmeSmeStaConfigSetCfmCreate(msg__, dst__, src__, interfaceTag__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeSmeStaConfigSetCfmSend(dst__, interfaceTag__, status__) \
- CsrWifiSmeSmeStaConfigSetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeStationMacAddressGetReqSend
-
- DESCRIPTION
- This primitives is used to retrieve the current MAC address used by the
- station.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeStationMacAddressGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeStationMacAddressGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_STATION_MAC_ADDRESS_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeStationMacAddressGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeStationMacAddressGetReq *msg__; \
- CsrWifiSmeStationMacAddressGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeStationMacAddressGetReqSend(src__) \
- CsrWifiSmeStationMacAddressGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeStationMacAddressGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- stationMacAddress - Current MAC address of the station.
-
-*******************************************************************************/
-#define CsrWifiSmeStationMacAddressGetCfmCreate(msg__, dst__, src__, status__, stationMacAddress__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeStationMacAddressGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_STATION_MAC_ADDRESS_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- memcpy(msg__->stationMacAddress, (stationMacAddress__), sizeof(CsrWifiMacAddress) * 2);
-
-#define CsrWifiSmeStationMacAddressGetCfmSendTo(dst__, src__, status__, stationMacAddress__) \
- { \
- CsrWifiSmeStationMacAddressGetCfm *msg__; \
- CsrWifiSmeStationMacAddressGetCfmCreate(msg__, dst__, src__, status__, stationMacAddress__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeStationMacAddressGetCfmSend(dst__, status__, stationMacAddress__) \
- CsrWifiSmeStationMacAddressGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, stationMacAddress__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTspecReqSend
-
- DESCRIPTION
- The wireless manager application should call this primitive to use the
- TSPEC feature.
- The chip supports the use of TSPECs and TCLAS for the use of IEEE
- 802.11/WMM Quality of Service features.
- The API allows the wireless manager application to supply a correctly
- formatted TSPEC and TCLAS pair to the driver.
- After performing basic validation, the driver negotiates the installation
- of the TSPEC with the AP as defined by the 802.11 specification.
- The driver retains all TSPEC and TCLAS pairs until they are specifically
- removed.
- It is not compulsory for a TSPEC to have a TCLAS (NULL is used to
- indicate that no TCLAS is supplied), while a TCLASS always require a
- TSPEC.
- The format of the TSPEC element is specified in 'WMM (including WMM Power
- Save) Specification - Version 1.1' and 'ANSI/IEEE Std 802.11-REVmb/D3.0'.
- For more information, see 'UniFi Configuring WMM and WMM-PS'.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - Specifies the action to be carried out on the list of TSPECs.
- CSR_WIFI_SME_LIST_ACTION_FLUSH is not applicable here.
- transactionId - Unique Transaction ID for the TSPEC, as assigned by the
- driver
- strict - If it set to false, allows the SME to perform automatic
- TSPEC negotiation
- ctrlMask - Additional TSPEC configuration for CCX.
- Set mask with values from CsrWifiSmeTspecCtrl.
- CURRENTLY NOT SUPPORTED
- tspecLength - Length of the TSPEC.
- tspec - Points to the first byte of the TSPEC
- tclasLength - Length of the TCLAS.
- If it is equal to 0, no TCLASS is provided for the TSPEC
- tclas - Points to the first byte of the TCLAS, if any.
-
-*******************************************************************************/
-#define CsrWifiSmeTspecReqCreate(msg__, dst__, src__, interfaceTag__, action__, transactionId__, strict__, ctrlMask__, tspecLength__, tspec__, tclasLength__, tclas__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeTspecReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_TSPEC_REQ, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->action = (action__); \
- msg__->transactionId = (transactionId__); \
- msg__->strict = (strict__); \
- msg__->ctrlMask = (ctrlMask__); \
- msg__->tspecLength = (tspecLength__); \
- msg__->tspec = (tspec__); \
- msg__->tclasLength = (tclasLength__); \
- msg__->tclas = (tclas__);
-
-#define CsrWifiSmeTspecReqSendTo(dst__, src__, interfaceTag__, action__, transactionId__, strict__, ctrlMask__, tspecLength__, tspec__, tclasLength__, tclas__) \
- { \
- CsrWifiSmeTspecReq *msg__; \
- CsrWifiSmeTspecReqCreate(msg__, dst__, src__, interfaceTag__, action__, transactionId__, strict__, ctrlMask__, tspecLength__, tspec__, tclasLength__, tclas__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeTspecReqSend(src__, interfaceTag__, action__, transactionId__, strict__, ctrlMask__, tspecLength__, tspec__, tclasLength__, tclas__) \
- CsrWifiSmeTspecReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, interfaceTag__, action__, transactionId__, strict__, ctrlMask__, tspecLength__, tspec__, tclasLength__, tclas__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTspecIndSend
-
- DESCRIPTION
- The SME will send this primitive to all the task that have registered to
- receive it when a status change in the TSPEC occurs.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- transactionId - Unique Transaction ID for the TSPEC, as assigned by the
- driver
- tspecResultCode - Specifies the TSPEC operation requested by the peer
- station
- tspecLength - Length of the TSPEC.
- tspec - Points to the first byte of the TSPEC
-
-*******************************************************************************/
-#define CsrWifiSmeTspecIndCreate(msg__, dst__, src__, interfaceTag__, transactionId__, tspecResultCode__, tspecLength__, tspec__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeTspecInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_TSPEC_IND, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->transactionId = (transactionId__); \
- msg__->tspecResultCode = (tspecResultCode__); \
- msg__->tspecLength = (tspecLength__); \
- msg__->tspec = (tspec__);
-
-#define CsrWifiSmeTspecIndSendTo(dst__, src__, interfaceTag__, transactionId__, tspecResultCode__, tspecLength__, tspec__) \
- { \
- CsrWifiSmeTspecInd *msg__; \
- CsrWifiSmeTspecIndCreate(msg__, dst__, src__, interfaceTag__, transactionId__, tspecResultCode__, tspecLength__, tspec__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeTspecIndSend(dst__, interfaceTag__, transactionId__, tspecResultCode__, tspecLength__, tspec__) \
- CsrWifiSmeTspecIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, transactionId__, tspecResultCode__, tspecLength__, tspec__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTspecCfmSend
-
- DESCRIPTION
- The SME calls the primitive to report the result of the TSpec primitive
- request.
-
- PARAMETERS
- queue - Destination Task Queue
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- transactionId - Unique Transaction ID for the TSPEC, as assigned by the
- driver
- tspecResultCode - Specifies the result of the negotiated TSPEC operation
- tspecLength - Length of the TSPEC.
- tspec - Points to the first byte of the TSPEC
-
-*******************************************************************************/
-#define CsrWifiSmeTspecCfmCreate(msg__, dst__, src__, interfaceTag__, status__, transactionId__, tspecResultCode__, tspecLength__, tspec__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeTspecCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_TSPEC_CFM, dst__, src__); \
- msg__->interfaceTag = (interfaceTag__); \
- msg__->status = (status__); \
- msg__->transactionId = (transactionId__); \
- msg__->tspecResultCode = (tspecResultCode__); \
- msg__->tspecLength = (tspecLength__); \
- msg__->tspec = (tspec__);
-
-#define CsrWifiSmeTspecCfmSendTo(dst__, src__, interfaceTag__, status__, transactionId__, tspecResultCode__, tspecLength__, tspec__) \
- { \
- CsrWifiSmeTspecCfm *msg__; \
- CsrWifiSmeTspecCfmCreate(msg__, dst__, src__, interfaceTag__, status__, transactionId__, tspecResultCode__, tspecLength__, tspec__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeTspecCfmSend(dst__, interfaceTag__, status__, transactionId__, tspecResultCode__, tspecLength__, tspec__) \
- CsrWifiSmeTspecCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, interfaceTag__, status__, transactionId__, tspecResultCode__, tspecLength__, tspec__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeVersionsGetReqSend
-
- DESCRIPTION
- This primitive gets the value of the Versions parameter.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeVersionsGetReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeVersionsGetReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_VERSIONS_GET_REQ, dst__, src__);
-
-#define CsrWifiSmeVersionsGetReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeVersionsGetReq *msg__; \
- CsrWifiSmeVersionsGetReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeVersionsGetReqSend(src__) \
- CsrWifiSmeVersionsGetReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeVersionsGetCfmSend
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
- versions - Version IDs of the product
-
-*******************************************************************************/
-#define CsrWifiSmeVersionsGetCfmCreate(msg__, dst__, src__, status__, versions__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeVersionsGetCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_VERSIONS_GET_CFM, dst__, src__); \
- msg__->status = (status__); \
- msg__->versions = (versions__);
-
-#define CsrWifiSmeVersionsGetCfmSendTo(dst__, src__, status__, versions__) \
- { \
- CsrWifiSmeVersionsGetCfm *msg__; \
- CsrWifiSmeVersionsGetCfmCreate(msg__, dst__, src__, status__, versions__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeVersionsGetCfmSend(dst__, status__, versions__) \
- CsrWifiSmeVersionsGetCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__, versions__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiFlightmodeReqSend
-
- DESCRIPTION
- The wireless manager application may call this primitive on boot-up of
- the platform to ensure that the chip is placed in a mode that prevents
- any emission of RF energy.
- This primitive is an alternative to CSR_WIFI_SME_WIFI_ON_REQ.
- As in CSR_WIFI_SME_WIFI_ON_REQ, it causes the download of the patch file
- (if any) and the programming of the initial MIB settings (if supplied by
- the WMA), but it also ensures that the chip is left in its lowest
- possible power-mode with the radio subsystems disabled.
- This feature is useful on platforms where power cannot be removed from
- the chip (leaving the chip not initialised will cause it to consume more
- power so calling this function ensures that the chip is initialised into
- a low power mode but without entering a state where it could emit any RF
- energy).
- NOTE: this primitive does not cause the Wi-Fi to change state: Wi-Fi
- stays conceptually off. Configuration primitives can be sent after
- CSR_WIFI_SME_WIFI_FLIGHTMODE_REQ and the configuration will be maintained.
- Requests that require the state of the Wi-Fi to be ON will return
- CSR_WIFI_SME_STATUS_WIFI_OFF in their confirms.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- address - Optionally specifies a station MAC address.
- In normal use, the manager should set the address to 0xFF
- 0xFF 0xFF 0xFF 0xFF 0xFF, which will cause the chip to use
- the MAC address in the MIB.
- mibFilesCount - Number of provided data blocks with initial MIB values
- mibFiles - Points to the first data block with initial MIB values.
- These data blocks are typically the contents of the provided
- files ufmib.dat and localmib.dat, available from the host
- file system, if they exist.
- These files typically contain radio tuning and calibration
- values.
- More values can be created using the Host Tools.
-
-*******************************************************************************/
-#define CsrWifiSmeWifiFlightmodeReqCreate(msg__, dst__, src__, address__, mibFilesCount__, mibFiles__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWifiFlightmodeReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WIFI_FLIGHTMODE_REQ, dst__, src__); \
- msg__->address = (address__); \
- msg__->mibFilesCount = (mibFilesCount__); \
- msg__->mibFiles = (mibFiles__);
-
-#define CsrWifiSmeWifiFlightmodeReqSendTo(dst__, src__, address__, mibFilesCount__, mibFiles__) \
- { \
- CsrWifiSmeWifiFlightmodeReq *msg__; \
- CsrWifiSmeWifiFlightmodeReqCreate(msg__, dst__, src__, address__, mibFilesCount__, mibFiles__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWifiFlightmodeReqSend(src__, address__, mibFilesCount__, mibFiles__) \
- CsrWifiSmeWifiFlightmodeReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, address__, mibFilesCount__, mibFiles__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiFlightmodeCfmSend
-
- DESCRIPTION
- The SME calls this primitive when the chip is initialised for low power
- mode and with the radio subsystem disabled. To leave flight mode, and
- enable Wi-Fi, the wireless manager application should call
- CSR_WIFI_SME_WIFI_ON_REQ.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeWifiFlightmodeCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWifiFlightmodeCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WIFI_FLIGHTMODE_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeWifiFlightmodeCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeWifiFlightmodeCfm *msg__; \
- CsrWifiSmeWifiFlightmodeCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWifiFlightmodeCfmSend(dst__, status__) \
- CsrWifiSmeWifiFlightmodeCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOffReqSend
-
- DESCRIPTION
- The wireless manager application calls this primitive to turn off the
- chip, thus saving power when Wi-Fi is not in use.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
-
-*******************************************************************************/
-#define CsrWifiSmeWifiOffReqCreate(msg__, dst__, src__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWifiOffReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WIFI_OFF_REQ, dst__, src__);
-
-#define CsrWifiSmeWifiOffReqSendTo(dst__, src__) \
- { \
- CsrWifiSmeWifiOffReq *msg__; \
- CsrWifiSmeWifiOffReqCreate(msg__, dst__, src__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWifiOffReqSend(src__) \
- CsrWifiSmeWifiOffReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOffIndSend
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it to report that the chip has been turned off.
-
- PARAMETERS
- queue - Destination Task Queue
- reason - Indicates the reason why the Wi-Fi has been switched off.
-
-*******************************************************************************/
-#define CsrWifiSmeWifiOffIndCreate(msg__, dst__, src__, reason__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWifiOffInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WIFI_OFF_IND, dst__, src__); \
- msg__->reason = (reason__);
-
-#define CsrWifiSmeWifiOffIndSendTo(dst__, src__, reason__) \
- { \
- CsrWifiSmeWifiOffInd *msg__; \
- CsrWifiSmeWifiOffIndCreate(msg__, dst__, src__, reason__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWifiOffIndSend(dst__, reason__) \
- CsrWifiSmeWifiOffIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, reason__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOffCfmSend
-
- DESCRIPTION
- After receiving CSR_WIFI_SME_WIFI_OFF_REQ, if the chip is connected to a
- network, the SME will perform a disconnect operation, will send a
- CSR_WIFI_SME_MEDIA_STATUS_IND with
- CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED, and then will call
- CSR_WIFI_SME_WIFI_OFF_CFM when the chip is off.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeWifiOffCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWifiOffCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WIFI_OFF_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeWifiOffCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeWifiOffCfm *msg__; \
- CsrWifiSmeWifiOffCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWifiOffCfmSend(dst__, status__) \
- CsrWifiSmeWifiOffCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOnReqSend
-
- DESCRIPTION
- The wireless manager application calls this primitive to turn on the
- Wi-Fi chip.
- If the Wi-Fi chip is currently off, the SME turns the Wi-Fi chip on,
- downloads the patch file (if any), and programs the initial MIB settings
- (if supplied by the WMA).
- The patch file is not provided with the SME API; its downloading is
- automatic and handled internally by the system.
- The MIB settings, when provided, override the default values that the
- firmware loads from EEPROM.
- If the Wi-Fi chip is already on, the SME takes no action and returns a
- successful status in the confirm.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- address - Optionally specifies a station MAC address.
- In normal use, the manager should set the address to 0xFF
- 0xFF 0xFF 0xFF 0xFF 0xFF, which will cause the chip to use
- the MAC address in the MIB
- mibFilesCount - Number of provided data blocks with initial MIB values
- mibFiles - Points to the first data block with initial MIB values.
- These data blocks are typically the contents of the provided
- files ufmib.dat and localmib.dat, available from the host
- file system, if they exist.
- These files typically contain radio tuning and calibration
- values.
- More values can be created using the Host Tools.
-
-*******************************************************************************/
-#define CsrWifiSmeWifiOnReqCreate(msg__, dst__, src__, address__, mibFilesCount__, mibFiles__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWifiOnReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WIFI_ON_REQ, dst__, src__); \
- msg__->address = (address__); \
- msg__->mibFilesCount = (mibFilesCount__); \
- msg__->mibFiles = (mibFiles__);
-
-#define CsrWifiSmeWifiOnReqSendTo(dst__, src__, address__, mibFilesCount__, mibFiles__) \
- { \
- CsrWifiSmeWifiOnReq *msg__; \
- CsrWifiSmeWifiOnReqCreate(msg__, dst__, src__, address__, mibFilesCount__, mibFiles__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWifiOnReqSend(src__, address__, mibFilesCount__, mibFiles__) \
- CsrWifiSmeWifiOnReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, address__, mibFilesCount__, mibFiles__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOnIndSend
-
- DESCRIPTION
- The SME sends this primitive to all tasks that have registered to receive
- it once the chip becomes available and ready to use.
-
- PARAMETERS
- queue - Destination Task Queue
- address - Current MAC address
-
-*******************************************************************************/
-#define CsrWifiSmeWifiOnIndCreate(msg__, dst__, src__, address__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWifiOnInd), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WIFI_ON_IND, dst__, src__); \
- msg__->address = (address__);
-
-#define CsrWifiSmeWifiOnIndSendTo(dst__, src__, address__) \
- { \
- CsrWifiSmeWifiOnInd *msg__; \
- CsrWifiSmeWifiOnIndCreate(msg__, dst__, src__, address__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWifiOnIndSend(dst__, address__) \
- CsrWifiSmeWifiOnIndSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, address__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOnCfmSend
-
- DESCRIPTION
- The SME sends this primitive to the task that has sent the request once
- the chip has been initialised and is available for use.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Reports the result of the request
-
-*******************************************************************************/
-#define CsrWifiSmeWifiOnCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWifiOnCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WIFI_ON_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeWifiOnCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeWifiOnCfm *msg__; \
- CsrWifiSmeWifiOnCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWifiOnCfmSend(dst__, status__) \
- CsrWifiSmeWifiOnCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsConfigurationReqSend
-
- DESCRIPTION
- This primitive passes the WPS information for the device to SME. This may
- be accepted only if no interface is active.
-
- PARAMETERS
- queue - Message Source Task Queue (Cfm's will be sent to this Queue)
- wpsConfig - WPS config.
-
-*******************************************************************************/
-#define CsrWifiSmeWpsConfigurationReqCreate(msg__, dst__, src__, wpsConfig__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWpsConfigurationReq), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WPS_CONFIGURATION_REQ, dst__, src__); \
- msg__->wpsConfig = (wpsConfig__);
-
-#define CsrWifiSmeWpsConfigurationReqSendTo(dst__, src__, wpsConfig__) \
- { \
- CsrWifiSmeWpsConfigurationReq *msg__; \
- CsrWifiSmeWpsConfigurationReqCreate(msg__, dst__, src__, wpsConfig__); \
- CsrMsgTransport(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWpsConfigurationReqSend(src__, wpsConfig__) \
- CsrWifiSmeWpsConfigurationReqSendTo(CSR_WIFI_SME_LIB_DESTINATION_QUEUE, src__, wpsConfig__)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsConfigurationCfmSend
-
- DESCRIPTION
- Confirm.
-
- PARAMETERS
- queue - Destination Task Queue
- status - Status of the request.
-
-*******************************************************************************/
-#define CsrWifiSmeWpsConfigurationCfmCreate(msg__, dst__, src__, status__) \
- msg__ = kmalloc(sizeof(CsrWifiSmeWpsConfigurationCfm), GFP_KERNEL); \
- CsrWifiFsmEventInit(&msg__->common, CSR_WIFI_SME_PRIM, CSR_WIFI_SME_WPS_CONFIGURATION_CFM, dst__, src__); \
- msg__->status = (status__);
-
-#define CsrWifiSmeWpsConfigurationCfmSendTo(dst__, src__, status__) \
- { \
- CsrWifiSmeWpsConfigurationCfm *msg__; \
- CsrWifiSmeWpsConfigurationCfmCreate(msg__, dst__, src__, status__); \
- CsrSchedMessagePut(dst__, CSR_WIFI_SME_PRIM, msg__); \
- }
-
-#define CsrWifiSmeWpsConfigurationCfmSend(dst__, status__) \
- CsrWifiSmeWpsConfigurationCfmSendTo(dst__, CSR_WIFI_SME_IFACEQUEUE, status__)
-
-#endif /* CSR_WIFI_SME_LIB_H__ */
diff --git a/drivers/staging/csr/csr_wifi_sme_prim.h b/drivers/staging/csr/csr_wifi_sme_prim.h
deleted file mode 100644
index 17ec79c..0000000
--- a/drivers/staging/csr/csr_wifi_sme_prim.h
+++ /dev/null
@@ -1,6510 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_SME_PRIM_H__
-#define CSR_WIFI_SME_PRIM_H__
-
-#include <linux/types.h>
-#include "csr_prim_defs.h"
-#include "csr_sched.h"
-#include "csr_wifi_common.h"
-#include "csr_result.h"
-#include "csr_wifi_fsm_event.h"
-
-#define CSR_WIFI_SME_PRIM (0x0404)
-
-typedef CsrPrim CsrWifiSmePrim;
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiSme80211NetworkType
-
- DESCRIPTION
- Indicates the physical layer of the network
-
- VALUES
- CSR_WIFI_SME_80211_NETWORK_TYPE_DS
- - Direct-sequence spread spectrum
- CSR_WIFI_SME_80211_NETWORK_TYPE_OFDM24
- - Orthogonal Frequency Division Multiplexing at 2.4 GHz
- CSR_WIFI_SME_80211_NETWORK_TYPE_OFDM5
- - Orthogonal Frequency Division Multiplexing at 5 GHz
- CSR_WIFI_SME_80211_NETWORK_TYPE_AUTO
- - Automatic
-
-*******************************************************************************/
-typedef u8 CsrWifiSme80211NetworkType;
-#define CSR_WIFI_SME_80211_NETWORK_TYPE_DS ((CsrWifiSme80211NetworkType) 0x00)
-#define CSR_WIFI_SME_80211_NETWORK_TYPE_OFDM24 ((CsrWifiSme80211NetworkType) 0x01)
-#define CSR_WIFI_SME_80211_NETWORK_TYPE_OFDM5 ((CsrWifiSme80211NetworkType) 0x02)
-#define CSR_WIFI_SME_80211_NETWORK_TYPE_AUTO ((CsrWifiSme80211NetworkType) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSme80211PrivacyMode
-
- DESCRIPTION
- Bits to enable or disable the privacy mode
-
- VALUES
- CSR_WIFI_SME_80211_PRIVACY_MODE_DISABLED
- - Privacy mode is enabled: use of WEP for confidentiality is
- required.
- CSR_WIFI_SME_80211_PRIVACY_MODE_ENABLED
- - Privacy mode is disabled
-
-*******************************************************************************/
-typedef u8 CsrWifiSme80211PrivacyMode;
-#define CSR_WIFI_SME_80211_PRIVACY_MODE_DISABLED ((CsrWifiSme80211PrivacyMode) 0x00)
-#define CSR_WIFI_SME_80211_PRIVACY_MODE_ENABLED ((CsrWifiSme80211PrivacyMode) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSme80211dTrustLevel
-
- DESCRIPTION
- Level of trust for the information coming from the network
-
- VALUES
- CSR_WIFI_SME_80211D_TRUST_LEVEL_STRICT
- - Start with passive scanning and only accept country IE for
- updating channel lists
- CSR_WIFI_SME_80211D_TRUST_LEVEL_ADJUNCT
- - As above plus accept adjunct technology location
- information
- CSR_WIFI_SME_80211D_TRUST_LEVEL_BSS
- - As above accept plus receiving channel from infrastructure
- networks
- CSR_WIFI_SME_80211D_TRUST_LEVEL_IBSS
- - As above accept plus receiving channel from the ad hoc
- networks
- CSR_WIFI_SME_80211D_TRUST_LEVEL_MIB
- - Start with active scanning with list of active channels
- from the MIB and accept as above
- CSR_WIFI_SME_80211D_TRUST_LEVEL_DISABLED
- - Start with active scanning with list of active channels
- from the MIB and ignore any channel information from the
- network
-
-*******************************************************************************/
-typedef u8 CsrWifiSme80211dTrustLevel;
-#define CSR_WIFI_SME_80211D_TRUST_LEVEL_STRICT ((CsrWifiSme80211dTrustLevel) 0x01)
-#define CSR_WIFI_SME_80211D_TRUST_LEVEL_ADJUNCT ((CsrWifiSme80211dTrustLevel) 0x02)
-#define CSR_WIFI_SME_80211D_TRUST_LEVEL_BSS ((CsrWifiSme80211dTrustLevel) 0x03)
-#define CSR_WIFI_SME_80211D_TRUST_LEVEL_IBSS ((CsrWifiSme80211dTrustLevel) 0x04)
-#define CSR_WIFI_SME_80211D_TRUST_LEVEL_MIB ((CsrWifiSme80211dTrustLevel) 0x05)
-#define CSR_WIFI_SME_80211D_TRUST_LEVEL_DISABLED ((CsrWifiSme80211dTrustLevel) 0x06)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAmpStatus
-
- DESCRIPTION
- AMP Current Status
-
- VALUES
- CSR_WIFI_SME_AMP_ACTIVE - AMP ACTIVE.
- CSR_WIFI_SME_AMP_INACTIVE - AMP INACTIVE
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeAmpStatus;
-#define CSR_WIFI_SME_AMP_ACTIVE ((CsrWifiSmeAmpStatus) 0x00)
-#define CSR_WIFI_SME_AMP_INACTIVE ((CsrWifiSmeAmpStatus) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAuthMode
-
- DESCRIPTION
- Define bits for CsrWifiSmeAuthMode
-
- VALUES
- CSR_WIFI_SME_AUTH_MODE_80211_OPEN
- - Connects to an open system network (i.e. no authentication,
- no encryption) or to a WEP enabled network.
- CSR_WIFI_SME_AUTH_MODE_80211_SHARED
- - Connect to a WEP enabled network.
- CSR_WIFI_SME_AUTH_MODE_8021X_WPA
- - Connects to a WPA Enterprise enabled network.
- CSR_WIFI_SME_AUTH_MODE_8021X_WPAPSK
- - Connects to a WPA with Pre-Shared Key enabled network.
- CSR_WIFI_SME_AUTH_MODE_8021X_WPA2
- - Connects to a WPA2 Enterprise enabled network.
- CSR_WIFI_SME_AUTH_MODE_8021X_WPA2PSK
- - Connects to a WPA2 with Pre-Shared Key enabled network.
- CSR_WIFI_SME_AUTH_MODE_8021X_CCKM
- - Connects to a CCKM enabled network.
- CSR_WIFI_SME_AUTH_MODE_WAPI_WAI
- - Connects to a WAPI Enterprise enabled network.
- CSR_WIFI_SME_AUTH_MODE_WAPI_WAIPSK
- - Connects to a WAPI with Pre-Shared Key enabled network.
- CSR_WIFI_SME_AUTH_MODE_8021X_OTHER1X
- - For future use.
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeAuthMode;
-#define CSR_WIFI_SME_AUTH_MODE_80211_OPEN ((CsrWifiSmeAuthMode) 0x0001)
-#define CSR_WIFI_SME_AUTH_MODE_80211_SHARED ((CsrWifiSmeAuthMode) 0x0002)
-#define CSR_WIFI_SME_AUTH_MODE_8021X_WPA ((CsrWifiSmeAuthMode) 0x0004)
-#define CSR_WIFI_SME_AUTH_MODE_8021X_WPAPSK ((CsrWifiSmeAuthMode) 0x0008)
-#define CSR_WIFI_SME_AUTH_MODE_8021X_WPA2 ((CsrWifiSmeAuthMode) 0x0010)
-#define CSR_WIFI_SME_AUTH_MODE_8021X_WPA2PSK ((CsrWifiSmeAuthMode) 0x0020)
-#define CSR_WIFI_SME_AUTH_MODE_8021X_CCKM ((CsrWifiSmeAuthMode) 0x0040)
-#define CSR_WIFI_SME_AUTH_MODE_WAPI_WAI ((CsrWifiSmeAuthMode) 0x0080)
-#define CSR_WIFI_SME_AUTH_MODE_WAPI_WAIPSK ((CsrWifiSmeAuthMode) 0x0100)
-#define CSR_WIFI_SME_AUTH_MODE_8021X_OTHER1X ((CsrWifiSmeAuthMode) 0x0200)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeBasicUsability
-
- DESCRIPTION
- Indicates the usability level of a channel
-
- VALUES
- CSR_WIFI_SME_BASIC_USABILITY_UNUSABLE
- - Not usable; connection not recommended
- CSR_WIFI_SME_BASIC_USABILITY_POOR
- - Poor quality; connect only if nothing better is available
- CSR_WIFI_SME_BASIC_USABILITY_SATISFACTORY
- - Quality is satisfactory
- CSR_WIFI_SME_BASIC_USABILITY_NOT_CONNECTED
- - Not connected
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeBasicUsability;
-#define CSR_WIFI_SME_BASIC_USABILITY_UNUSABLE ((CsrWifiSmeBasicUsability) 0x00)
-#define CSR_WIFI_SME_BASIC_USABILITY_POOR ((CsrWifiSmeBasicUsability) 0x01)
-#define CSR_WIFI_SME_BASIC_USABILITY_SATISFACTORY ((CsrWifiSmeBasicUsability) 0x02)
-#define CSR_WIFI_SME_BASIC_USABILITY_NOT_CONNECTED ((CsrWifiSmeBasicUsability) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeBssType
-
- DESCRIPTION
- Indicates the BSS type
-
- VALUES
- CSR_WIFI_SME_BSS_TYPE_INFRASTRUCTURE
- - Infrastructure BSS.
- CSR_WIFI_SME_BSS_TYPE_ADHOC
- - Ad hoc or Independent BSS.
- CSR_WIFI_SME_BSS_TYPE_ANY_BSS
- - Specifies any type of BSS
- CSR_WIFI_SME_BSS_TYPE_P2P
- - Specifies P2P
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeBssType;
-#define CSR_WIFI_SME_BSS_TYPE_INFRASTRUCTURE ((CsrWifiSmeBssType) 0x00)
-#define CSR_WIFI_SME_BSS_TYPE_ADHOC ((CsrWifiSmeBssType) 0x01)
-#define CSR_WIFI_SME_BSS_TYPE_ANY_BSS ((CsrWifiSmeBssType) 0x02)
-#define CSR_WIFI_SME_BSS_TYPE_P2P ((CsrWifiSmeBssType) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexScheme
-
- DESCRIPTION
- Options for the coexistence signalling
- Same as MibValues
-
- VALUES
- CSR_WIFI_SME_COEX_SCHEME_DISABLED
- - The coexistence signalling is disabled
- CSR_WIFI_SME_COEX_SCHEME_CSR
- - Basic CSR coexistence signalling
- CSR_WIFI_SME_COEX_SCHEME_CSR_CHANNEL
- - Full CSR coexistence signalling
- CSR_WIFI_SME_COEX_SCHEME_PTA
- - Packet Traffic Arbitrator coexistence signalling
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeCoexScheme;
-#define CSR_WIFI_SME_COEX_SCHEME_DISABLED ((CsrWifiSmeCoexScheme) 0x00)
-#define CSR_WIFI_SME_COEX_SCHEME_CSR ((CsrWifiSmeCoexScheme) 0x01)
-#define CSR_WIFI_SME_COEX_SCHEME_CSR_CHANNEL ((CsrWifiSmeCoexScheme) 0x02)
-#define CSR_WIFI_SME_COEX_SCHEME_PTA ((CsrWifiSmeCoexScheme) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeControlIndication
-
- DESCRIPTION
- Indicates the reason why the Wi-Fi has been switched off.
- The values of this type are used across the NME/SME/Router API's and they
- must be kept consistent with the corresponding types in the .xml of the
- ottherinterfaces
-
- VALUES
- CSR_WIFI_SME_CONTROL_INDICATION_ERROR
- - An unrecoverable error (for example, an unrecoverable SDIO
- error) has occurred.
- The wireless manager application should reinitialise the
- chip by calling CSR_WIFI_SME_WIFI_ON_REQ.
- CSR_WIFI_SME_CONTROL_INDICATION_EXIT
- - The chip became unavailable due to an external action, for
- example, when a plug-in card is ejected or the driver is
- unloaded.
- CSR_WIFI_SME_CONTROL_INDICATION_USER_REQUESTED
- - The Wi-Fi has been switched off as the wireless manager
- application has sent CSR_WIFI_SME_WIFI_OFF_REQ
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeControlIndication;
-#define CSR_WIFI_SME_CONTROL_INDICATION_ERROR ((CsrWifiSmeControlIndication) 0x01)
-#define CSR_WIFI_SME_CONTROL_INDICATION_EXIT ((CsrWifiSmeControlIndication) 0x02)
-#define CSR_WIFI_SME_CONTROL_INDICATION_USER_REQUESTED ((CsrWifiSmeControlIndication) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCtsProtectionType
-
- DESCRIPTION
- SME CTS Protection Types
-
- VALUES
- CSR_WIFI_SME_CTS_PROTECTION_AUTOMATIC
- - AP CTS Protection automatic based on non-ERP station in own
- BSS or neighbouring BSS on the same channel based on OLBC.
- This requires monitoring of beacons from other APs.
- CSR_WIFI_SME_CTS_PROTECTION_FORCE_ENABLED
- - AP CTS Protection Force enabled
- CSR_WIFI_SME_CTS_PROTECTION_FORCE_DISABLED
- - AP CTS Protection Force disabled.
- CSR_WIFI_SME_CTS_PROTECTION_AUTOMATIC_NO_OLBC
- - AP CTS Protection automatic without considering OLBC but
- considering non-ERP station in the own BSS Valid only if AP
- is configured to work in 802.11bg or 802.11g mode otherwise
- this option specifies the same behaviour as AUTOMATIC
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeCtsProtectionType;
-#define CSR_WIFI_SME_CTS_PROTECTION_AUTOMATIC ((CsrWifiSmeCtsProtectionType) 0x00)
-#define CSR_WIFI_SME_CTS_PROTECTION_FORCE_ENABLED ((CsrWifiSmeCtsProtectionType) 0x01)
-#define CSR_WIFI_SME_CTS_PROTECTION_FORCE_DISABLED ((CsrWifiSmeCtsProtectionType) 0x02)
-#define CSR_WIFI_SME_CTS_PROTECTION_AUTOMATIC_NO_OLBC ((CsrWifiSmeCtsProtectionType) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeD3AutoScanMode
-
- DESCRIPTION
- Autonomous scan status while in D3 suspended period
-
- VALUES
- CSR_WIFI_SME_D3AUTO_SCAN_MODE_PSON
- - Autonomous scan stays on
- CSR_WIFI_SME_D3AUTO_SCAN_MODE_PSOFF
- - Autonomous scan is switched off
- CSR_WIFI_SME_D3AUTO_SCAN_MODE_PSAUTO
- - Automatically select autoscanning behaviour.
- CURRENTLY NOT SUPPORTED
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeD3AutoScanMode;
-#define CSR_WIFI_SME_D3AUTO_SCAN_MODE_PSON ((CsrWifiSmeD3AutoScanMode) 0x00)
-#define CSR_WIFI_SME_D3AUTO_SCAN_MODE_PSOFF ((CsrWifiSmeD3AutoScanMode) 0x01)
-#define CSR_WIFI_SME_D3AUTO_SCAN_MODE_PSAUTO ((CsrWifiSmeD3AutoScanMode) 0x02)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeEncryption
-
- DESCRIPTION
- Defines bits for CsrWifiSmeEncryption
- For a WEP enabled network, the caller must specify the correct
- combination of flags in the encryptionModeMask.
-
- VALUES
- CSR_WIFI_SME_ENCRYPTION_CIPHER_NONE
- - No encryption set
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_WEP40
- - Selects 40 byte key WEP for unicast communication
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_WEP104
- - Selects 104 byte key WEP for unicast communication
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_TKIP
- - Selects TKIP for unicast communication
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_CCMP
- - Selects CCMP for unicast communication
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_SMS4
- - Selects SMS4 for unicast communication
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP40
- - Selects 40 byte key WEP for broadcast messages
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP104
- - Selects 104 byte key WEP for broadcast messages
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_TKIP
- - Selects a TKIP for broadcast messages
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_CCMP
- - Selects CCMP for broadcast messages
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_SMS4
- - Selects SMS4 for broadcast messages
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeEncryption;
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_NONE ((CsrWifiSmeEncryption) 0x0000)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_WEP40 ((CsrWifiSmeEncryption) 0x0001)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_WEP104 ((CsrWifiSmeEncryption) 0x0002)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_TKIP ((CsrWifiSmeEncryption) 0x0004)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_CCMP ((CsrWifiSmeEncryption) 0x0008)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_SMS4 ((CsrWifiSmeEncryption) 0x0010)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP40 ((CsrWifiSmeEncryption) 0x0020)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP104 ((CsrWifiSmeEncryption) 0x0040)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_TKIP ((CsrWifiSmeEncryption) 0x0080)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_CCMP ((CsrWifiSmeEncryption) 0x0100)
-#define CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_SMS4 ((CsrWifiSmeEncryption) 0x0200)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeFirmwareDriverInterface
-
- DESCRIPTION
- Type of communication between Host and Firmware
-
- VALUES
- CSR_WIFI_SME_FIRMWARE_DRIVER_INTERFACE_UNIT_DATA_INTERFACE
- - No preformated header. NOT SUPPORTED in the current release
- CSR_WIFI_SME_FIRMWARE_DRIVER_INTERFACE_PACKET_INTERFACE
- - Preformated IEEE 802.11 header for user plane
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeFirmwareDriverInterface;
-#define CSR_WIFI_SME_FIRMWARE_DRIVER_INTERFACE_UNIT_DATA_INTERFACE ((CsrWifiSmeFirmwareDriverInterface) 0x00)
-#define CSR_WIFI_SME_FIRMWARE_DRIVER_INTERFACE_PACKET_INTERFACE ((CsrWifiSmeFirmwareDriverInterface) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostPowerMode
-
- DESCRIPTION
- Defines the power mode
-
- VALUES
- CSR_WIFI_SME_HOST_POWER_MODE_ACTIVE
- - Host device is running on external power.
- CSR_WIFI_SME_HOST_POWER_MODE_POWER_SAVE
- - Host device is running on (internal) battery power.
- CSR_WIFI_SME_HOST_POWER_MODE_FULL_POWER_SAVE
- - For future use.
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeHostPowerMode;
-#define CSR_WIFI_SME_HOST_POWER_MODE_ACTIVE ((CsrWifiSmeHostPowerMode) 0x00)
-#define CSR_WIFI_SME_HOST_POWER_MODE_POWER_SAVE ((CsrWifiSmeHostPowerMode) 0x01)
-#define CSR_WIFI_SME_HOST_POWER_MODE_FULL_POWER_SAVE ((CsrWifiSmeHostPowerMode) 0x02)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeIEEE80211Reason
-
- DESCRIPTION
- As definined in the IEEE 802.11 standards
-
- VALUES
- CSR_WIFI_SME_IEEE80211_REASON_SUCCESS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_UNSPECIFIED_REASON
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_AUTHENTICATION_NOT_VALID
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_DEAUTHENTICATED_LEAVE_BSS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_DISASSOCIATED_INACTIVITY
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_AP_OVERLOAD
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_CLASS_2FRAME_ERROR
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_CLASS_3FRAME_ERROR
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_DISASSOCIATED_LEAVE_BSS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_ASSOCIATION_NOT_AUTHENTICATED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_DISASSOCIATED_POWER_CAPABILITY
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_DISASSOCIATED_SUPPORTED_CHANNELS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_INVALID_INFORMATION_ELEMENT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_MICHAEL_MIC_FAILURE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_FOURWAY_HANDSHAKE_TIMEOUT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_GROUP_KEY_UPDATE_TIMEOUT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_HANDSHAKE_ELEMENT_DIFFERENT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_INVALID_GROUP_CIPHER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_INVALID_PAIRWISE_CIPHER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_INVALID_AKMP
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_UNSUPPORTED_RSN_IEVERSION
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_INVALID_RSN_IECAPABILITIES
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_DOT1X_AUTH_FAILED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_CIPHER_REJECTED_BY_POLICY
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_SERVICE_CHANGE_PRECLUDES_TS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_QOS_UNSPECIFIED_REASON
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_QOS_INSUFFICIENT_BANDWIDTH
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_QOS_EXCESSIVE_NOT_ACK
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_QOS_TXOPLIMIT_EXCEEDED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_QSTA_LEAVING
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_END_TS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_END_DLS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_END_BA
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_UNKNOWN_TS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_UNKNOWN_BA
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_UNKNOWN_DLS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_TIMEOUT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_STAKEY_MISMATCH
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_UNICAST_KEY_NEGOTIATION_TIMEOUT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_MULTICAST_KEY_ANNOUNCEMENT_TIMEOUT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_INCOMPATIBLE_UNICAST_KEY_NEGOTIATION_IE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_INVALID_MULTICAST_CIPHER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_INVALID_UNICAST_CIPHER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_UNSUPPORTED_WAPI_IE_VERSION
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_INVALID_WAPI_CAPABILITY_IE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_REASON_WAI_CERTIFICATE_AUTHENTICATION_FAILED
- - See IEEE 802.11 Standard
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeIEEE80211Reason;
-#define CSR_WIFI_SME_IEEE80211_REASON_SUCCESS ((CsrWifiSmeIEEE80211Reason) 0x0000)
-#define CSR_WIFI_SME_IEEE80211_REASON_UNSPECIFIED_REASON ((CsrWifiSmeIEEE80211Reason) 0x0001)
-#define CSR_WIFI_SME_IEEE80211_REASON_AUTHENTICATION_NOT_VALID ((CsrWifiSmeIEEE80211Reason) 0x0002)
-#define CSR_WIFI_SME_IEEE80211_REASON_DEAUTHENTICATED_LEAVE_BSS ((CsrWifiSmeIEEE80211Reason) 0x0003)
-#define CSR_WIFI_SME_IEEE80211_REASON_DISASSOCIATED_INACTIVITY ((CsrWifiSmeIEEE80211Reason) 0x0004)
-#define CSR_WIFI_SME_IEEE80211_REASON_AP_OVERLOAD ((CsrWifiSmeIEEE80211Reason) 0x0005)
-#define CSR_WIFI_SME_IEEE80211_REASON_CLASS_2FRAME_ERROR ((CsrWifiSmeIEEE80211Reason) 0x0006)
-#define CSR_WIFI_SME_IEEE80211_REASON_CLASS_3FRAME_ERROR ((CsrWifiSmeIEEE80211Reason) 0x0007)
-#define CSR_WIFI_SME_IEEE80211_REASON_DISASSOCIATED_LEAVE_BSS ((CsrWifiSmeIEEE80211Reason) 0x0008)
-#define CSR_WIFI_SME_IEEE80211_REASON_ASSOCIATION_NOT_AUTHENTICATED ((CsrWifiSmeIEEE80211Reason) 0x0009)
-#define CSR_WIFI_SME_IEEE80211_REASON_DISASSOCIATED_POWER_CAPABILITY ((CsrWifiSmeIEEE80211Reason) 0x000a)
-#define CSR_WIFI_SME_IEEE80211_REASON_DISASSOCIATED_SUPPORTED_CHANNELS ((CsrWifiSmeIEEE80211Reason) 0x000b)
-#define CSR_WIFI_SME_IEEE80211_REASON_INVALID_INFORMATION_ELEMENT ((CsrWifiSmeIEEE80211Reason) 0x000d)
-#define CSR_WIFI_SME_IEEE80211_REASON_MICHAEL_MIC_FAILURE ((CsrWifiSmeIEEE80211Reason) 0x000e)
-#define CSR_WIFI_SME_IEEE80211_REASON_FOURWAY_HANDSHAKE_TIMEOUT ((CsrWifiSmeIEEE80211Reason) 0x000f)
-#define CSR_WIFI_SME_IEEE80211_REASON_GROUP_KEY_UPDATE_TIMEOUT ((CsrWifiSmeIEEE80211Reason) 0x0010)
-#define CSR_WIFI_SME_IEEE80211_REASON_HANDSHAKE_ELEMENT_DIFFERENT ((CsrWifiSmeIEEE80211Reason) 0x0011)
-#define CSR_WIFI_SME_IEEE80211_REASON_INVALID_GROUP_CIPHER ((CsrWifiSmeIEEE80211Reason) 0x0012)
-#define CSR_WIFI_SME_IEEE80211_REASON_INVALID_PAIRWISE_CIPHER ((CsrWifiSmeIEEE80211Reason) 0x0013)
-#define CSR_WIFI_SME_IEEE80211_REASON_INVALID_AKMP ((CsrWifiSmeIEEE80211Reason) 0x0014)
-#define CSR_WIFI_SME_IEEE80211_REASON_UNSUPPORTED_RSN_IEVERSION ((CsrWifiSmeIEEE80211Reason) 0x0015)
-#define CSR_WIFI_SME_IEEE80211_REASON_INVALID_RSN_IECAPABILITIES ((CsrWifiSmeIEEE80211Reason) 0x0016)
-#define CSR_WIFI_SME_IEEE80211_REASON_DOT1X_AUTH_FAILED ((CsrWifiSmeIEEE80211Reason) 0x0017)
-#define CSR_WIFI_SME_IEEE80211_REASON_CIPHER_REJECTED_BY_POLICY ((CsrWifiSmeIEEE80211Reason) 0x0018)
-#define CSR_WIFI_SME_IEEE80211_REASON_SERVICE_CHANGE_PRECLUDES_TS ((CsrWifiSmeIEEE80211Reason) 0x001F)
-#define CSR_WIFI_SME_IEEE80211_REASON_QOS_UNSPECIFIED_REASON ((CsrWifiSmeIEEE80211Reason) 0x0020)
-#define CSR_WIFI_SME_IEEE80211_REASON_QOS_INSUFFICIENT_BANDWIDTH ((CsrWifiSmeIEEE80211Reason) 0x0021)
-#define CSR_WIFI_SME_IEEE80211_REASON_QOS_EXCESSIVE_NOT_ACK ((CsrWifiSmeIEEE80211Reason) 0x0022)
-#define CSR_WIFI_SME_IEEE80211_REASON_QOS_TXOPLIMIT_EXCEEDED ((CsrWifiSmeIEEE80211Reason) 0x0023)
-#define CSR_WIFI_SME_IEEE80211_REASON_QSTA_LEAVING ((CsrWifiSmeIEEE80211Reason) 0x0024)
-#define CSR_WIFI_SME_IEEE80211_REASON_END_TS ((CsrWifiSmeIEEE80211Reason) 0x0025)
-#define CSR_WIFI_SME_IEEE80211_REASON_END_DLS ((CsrWifiSmeIEEE80211Reason) 0x0025)
-#define CSR_WIFI_SME_IEEE80211_REASON_END_BA ((CsrWifiSmeIEEE80211Reason) 0x0025)
-#define CSR_WIFI_SME_IEEE80211_REASON_UNKNOWN_TS ((CsrWifiSmeIEEE80211Reason) 0x0026)
-#define CSR_WIFI_SME_IEEE80211_REASON_UNKNOWN_BA ((CsrWifiSmeIEEE80211Reason) 0x0026)
-#define CSR_WIFI_SME_IEEE80211_REASON_UNKNOWN_DLS ((CsrWifiSmeIEEE80211Reason) 0x0026)
-#define CSR_WIFI_SME_IEEE80211_REASON_TIMEOUT ((CsrWifiSmeIEEE80211Reason) 0x0027)
-#define CSR_WIFI_SME_IEEE80211_REASON_STAKEY_MISMATCH ((CsrWifiSmeIEEE80211Reason) 0x002d)
-#define CSR_WIFI_SME_IEEE80211_REASON_UNICAST_KEY_NEGOTIATION_TIMEOUT ((CsrWifiSmeIEEE80211Reason) 0xf019)
-#define CSR_WIFI_SME_IEEE80211_REASON_MULTICAST_KEY_ANNOUNCEMENT_TIMEOUT ((CsrWifiSmeIEEE80211Reason) 0xf01a)
-#define CSR_WIFI_SME_IEEE80211_REASON_INCOMPATIBLE_UNICAST_KEY_NEGOTIATION_IE ((CsrWifiSmeIEEE80211Reason) 0xf01b)
-#define CSR_WIFI_SME_IEEE80211_REASON_INVALID_MULTICAST_CIPHER ((CsrWifiSmeIEEE80211Reason) 0xf01c)
-#define CSR_WIFI_SME_IEEE80211_REASON_INVALID_UNICAST_CIPHER ((CsrWifiSmeIEEE80211Reason) 0xf01d)
-#define CSR_WIFI_SME_IEEE80211_REASON_UNSUPPORTED_WAPI_IE_VERSION ((CsrWifiSmeIEEE80211Reason) 0xf01e)
-#define CSR_WIFI_SME_IEEE80211_REASON_INVALID_WAPI_CAPABILITY_IE ((CsrWifiSmeIEEE80211Reason) 0xf01f)
-#define CSR_WIFI_SME_IEEE80211_REASON_WAI_CERTIFICATE_AUTHENTICATION_FAILED ((CsrWifiSmeIEEE80211Reason) 0xf020)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeIEEE80211Result
-
- DESCRIPTION
- As definined in the IEEE 802.11 standards
-
- VALUES
- CSR_WIFI_SME_IEEE80211_RESULT_SUCCESS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_UNSPECIFIED_FAILURE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_CAPABILITIES_MISMATCH
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REASSOCIATION_DENIED_NO_ASSOCIATION
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_EXTERNAL_REASON
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_AUTHENTICATION_MISMATCH
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_INVALID_AUTHENTICATION_SEQUENCE_NUMBER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_CHALLENGE_FAILURE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_AUTHENTICATION_TIMEOUT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_AP_OUT_OF_MEMORY
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_BASIC_RATES_MISMATCH
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_SHORT_PREAMBLE_REQUIRED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_PBCC_MODULATION_REQUIRED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_CHANNEL_AGILITY_REQUIRED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_SPECTRUM_MANAGEMENT_REQUIRED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_POWER_CAPABILITY_UNACCEPTABLE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_SUPPORTED_CHANNELS_UNACCEPTABLE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_SHORT_SLOT_REQUIRED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_DSSS_OFDMREQUIRED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_NO_HT_SUPPORT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_R0KH_UNREACHABLE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_PCO_TRANSITION_SUPPORT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_ASSOCIATION_REQUEST_REJECTED_TEMPORARILY
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_ROBUST_MANAGEMENT_FRAME_POLICY_VIOLATION
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_FAILURE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_AP_BANDWIDTH_INSUFFICIENT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_POOR_OPERATING_CHANNEL
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_QOS_REQUIRED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_REASON_UNSPECIFIED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INVALID_PARAMETERS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_WITH_SUGGESTED_TSPEC_CHANGES
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_IE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_GROUP_CIPHER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_PAIRWISE_CIPHER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_AKMP
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_UNSUPPORTED_RSN_VERSION
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_RSN_CAPABILITY
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_SECURITY_POLICY
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_FOR_DELAY_PERIOD
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_NOT_ALLOWED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_NOT_PRESENT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_NOT_QSTA
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_LISTEN_INTERVAL_TOO_LARGE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INVALID_FT_ACTION_FRAME_COUNT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INVALID_PMKID
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INVALID_MDIE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INVALID_FTIE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_UNSPECIFIED_QOS_FAILURE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_WRONG_POLICY
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INSUFFICIENT_BANDWIDTH
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INVALID_TSPEC_PARAMETERS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_TIMEOUT
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_TOO_MANY_SIMULTANEOUS_REQUESTS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_BSS_ALREADY_STARTED_OR_JOINED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_NOT_SUPPORTED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_TRANSMISSION_FAILURE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_NOT_AUTHENTICATED
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_RESET_REQUIRED_BEFORE_START
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_LM_INFO_UNAVAILABLE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INVALID_UNICAST_CIPHER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INVALID_MULTICAST_CIPHER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_UNSUPPORTED_WAPI_IE_VERSION
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_IEEE80211_RESULT_INVALID_WAPI_CAPABILITY_IE
- - See IEEE 802.11 Standard
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeIEEE80211Result;
-#define CSR_WIFI_SME_IEEE80211_RESULT_SUCCESS ((CsrWifiSmeIEEE80211Result) 0x0000)
-#define CSR_WIFI_SME_IEEE80211_RESULT_UNSPECIFIED_FAILURE ((CsrWifiSmeIEEE80211Result) 0x0001)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_CAPABILITIES_MISMATCH ((CsrWifiSmeIEEE80211Result) 0x000a)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REASSOCIATION_DENIED_NO_ASSOCIATION ((CsrWifiSmeIEEE80211Result) 0x000b)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_EXTERNAL_REASON ((CsrWifiSmeIEEE80211Result) 0x000c)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_AUTHENTICATION_MISMATCH ((CsrWifiSmeIEEE80211Result) 0x000d)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_INVALID_AUTHENTICATION_SEQUENCE_NUMBER ((CsrWifiSmeIEEE80211Result) 0x000e)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_CHALLENGE_FAILURE ((CsrWifiSmeIEEE80211Result) 0x000f)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_AUTHENTICATION_TIMEOUT ((CsrWifiSmeIEEE80211Result) 0x0010)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_AP_OUT_OF_MEMORY ((CsrWifiSmeIEEE80211Result) 0x0011)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_BASIC_RATES_MISMATCH ((CsrWifiSmeIEEE80211Result) 0x0012)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_SHORT_PREAMBLE_REQUIRED ((CsrWifiSmeIEEE80211Result) 0x0013)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_PBCC_MODULATION_REQUIRED ((CsrWifiSmeIEEE80211Result) 0x0014)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_CHANNEL_AGILITY_REQUIRED ((CsrWifiSmeIEEE80211Result) 0x0015)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_SPECTRUM_MANAGEMENT_REQUIRED ((CsrWifiSmeIEEE80211Result) 0x0016)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_POWER_CAPABILITY_UNACCEPTABLE ((CsrWifiSmeIEEE80211Result) 0x0017)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_SUPPORTED_CHANNELS_UNACCEPTABLE ((CsrWifiSmeIEEE80211Result) 0x0018)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_SHORT_SLOT_REQUIRED ((CsrWifiSmeIEEE80211Result) 0x0019)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_DSSS_OFDMREQUIRED ((CsrWifiSmeIEEE80211Result) 0x001a)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_NO_HT_SUPPORT ((CsrWifiSmeIEEE80211Result) 0x001b)
-#define CSR_WIFI_SME_IEEE80211_RESULT_R0KH_UNREACHABLE ((CsrWifiSmeIEEE80211Result) 0x001c)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_PCO_TRANSITION_SUPPORT ((CsrWifiSmeIEEE80211Result) 0x001d)
-#define CSR_WIFI_SME_IEEE80211_RESULT_ASSOCIATION_REQUEST_REJECTED_TEMPORARILY ((CsrWifiSmeIEEE80211Result) 0x001e)
-#define CSR_WIFI_SME_IEEE80211_RESULT_ROBUST_MANAGEMENT_FRAME_POLICY_VIOLATION ((CsrWifiSmeIEEE80211Result) 0x001f)
-#define CSR_WIFI_SME_IEEE80211_RESULT_FAILURE ((CsrWifiSmeIEEE80211Result) 0x0020)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_AP_BANDWIDTH_INSUFFICIENT ((CsrWifiSmeIEEE80211Result) 0x0021)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_POOR_OPERATING_CHANNEL ((CsrWifiSmeIEEE80211Result) 0x0022)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_QOS_REQUIRED ((CsrWifiSmeIEEE80211Result) 0x0023)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_REASON_UNSPECIFIED ((CsrWifiSmeIEEE80211Result) 0x0025)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED ((CsrWifiSmeIEEE80211Result) 0x0025)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INVALID_PARAMETERS ((CsrWifiSmeIEEE80211Result) 0x0026)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_WITH_SUGGESTED_TSPEC_CHANGES ((CsrWifiSmeIEEE80211Result) 0x0027)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_IE ((CsrWifiSmeIEEE80211Result) 0x0028)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_GROUP_CIPHER ((CsrWifiSmeIEEE80211Result) 0x0029)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_PAIRWISE_CIPHER ((CsrWifiSmeIEEE80211Result) 0x002a)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_AKMP ((CsrWifiSmeIEEE80211Result) 0x002b)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_UNSUPPORTED_RSN_VERSION ((CsrWifiSmeIEEE80211Result) 0x002c)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_INVALID_RSN_CAPABILITY ((CsrWifiSmeIEEE80211Result) 0x002d)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_SECURITY_POLICY ((CsrWifiSmeIEEE80211Result) 0x002e)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_FOR_DELAY_PERIOD ((CsrWifiSmeIEEE80211Result) 0x002f)
-#define CSR_WIFI_SME_IEEE80211_RESULT_NOT_ALLOWED ((CsrWifiSmeIEEE80211Result) 0x0030)
-#define CSR_WIFI_SME_IEEE80211_RESULT_NOT_PRESENT ((CsrWifiSmeIEEE80211Result) 0x0031)
-#define CSR_WIFI_SME_IEEE80211_RESULT_NOT_QSTA ((CsrWifiSmeIEEE80211Result) 0x0032)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REJECTED_LISTEN_INTERVAL_TOO_LARGE ((CsrWifiSmeIEEE80211Result) 0x0033)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INVALID_FT_ACTION_FRAME_COUNT ((CsrWifiSmeIEEE80211Result) 0x0034)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INVALID_PMKID ((CsrWifiSmeIEEE80211Result) 0x0035)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INVALID_MDIE ((CsrWifiSmeIEEE80211Result) 0x0036)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INVALID_FTIE ((CsrWifiSmeIEEE80211Result) 0x0037)
-#define CSR_WIFI_SME_IEEE80211_RESULT_UNSPECIFIED_QOS_FAILURE ((CsrWifiSmeIEEE80211Result) 0x00c8)
-#define CSR_WIFI_SME_IEEE80211_RESULT_WRONG_POLICY ((CsrWifiSmeIEEE80211Result) 0x00c9)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INSUFFICIENT_BANDWIDTH ((CsrWifiSmeIEEE80211Result) 0x00ca)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INVALID_TSPEC_PARAMETERS ((CsrWifiSmeIEEE80211Result) 0x00cb)
-#define CSR_WIFI_SME_IEEE80211_RESULT_TIMEOUT ((CsrWifiSmeIEEE80211Result) 0x8000)
-#define CSR_WIFI_SME_IEEE80211_RESULT_TOO_MANY_SIMULTANEOUS_REQUESTS ((CsrWifiSmeIEEE80211Result) 0x8001)
-#define CSR_WIFI_SME_IEEE80211_RESULT_BSS_ALREADY_STARTED_OR_JOINED ((CsrWifiSmeIEEE80211Result) 0x8002)
-#define CSR_WIFI_SME_IEEE80211_RESULT_NOT_SUPPORTED ((CsrWifiSmeIEEE80211Result) 0x8003)
-#define CSR_WIFI_SME_IEEE80211_RESULT_TRANSMISSION_FAILURE ((CsrWifiSmeIEEE80211Result) 0x8004)
-#define CSR_WIFI_SME_IEEE80211_RESULT_REFUSED_NOT_AUTHENTICATED ((CsrWifiSmeIEEE80211Result) 0x8005)
-#define CSR_WIFI_SME_IEEE80211_RESULT_RESET_REQUIRED_BEFORE_START ((CsrWifiSmeIEEE80211Result) 0x8006)
-#define CSR_WIFI_SME_IEEE80211_RESULT_LM_INFO_UNAVAILABLE ((CsrWifiSmeIEEE80211Result) 0x8007)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INVALID_UNICAST_CIPHER ((CsrWifiSmeIEEE80211Result) 0xf02f)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INVALID_MULTICAST_CIPHER ((CsrWifiSmeIEEE80211Result) 0xf030)
-#define CSR_WIFI_SME_IEEE80211_RESULT_UNSUPPORTED_WAPI_IE_VERSION ((CsrWifiSmeIEEE80211Result) 0xf031)
-#define CSR_WIFI_SME_IEEE80211_RESULT_INVALID_WAPI_CAPABILITY_IE ((CsrWifiSmeIEEE80211Result) 0xf032)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeIndications
-
- DESCRIPTION
- Defines bits for CsrWifiSmeIndicationsMask
-
- VALUES
- CSR_WIFI_SME_INDICATIONS_NONE
- - Used to cancel the registrations for receiving indications
- CSR_WIFI_SME_INDICATIONS_WIFIOFF
- - Used to register for CSR_WIFI_SME_WIFI_OFF_IND events
- CSR_WIFI_SME_INDICATIONS_SCANRESULT
- - Used to register for CSR_WIFI_SME_SCAN_RESULT_IND events
- CSR_WIFI_SME_INDICATIONS_CONNECTIONQUALITY
- - Used to register for CSR_WIFI_SME_CONNECTION_QUALITY_IND
- events
- CSR_WIFI_SME_INDICATIONS_MEDIASTATUS
- - Used to register for CSR_WIFI_SME_MEDIA_STATUS_IND events
- CSR_WIFI_SME_INDICATIONS_MICFAILURE
- - Used to register for CSR_WIFI_SME_MICFAILURE_IND events
- CSR_WIFI_SME_INDICATIONS_PMKIDCANDIDATELIST
- - Used to register for CSR_WIFI_SME_PMKIDCANDIDATE_LIST_IND
- events
- CSR_WIFI_SME_INDICATIONS_TSPEC
- - Used to register for CSR_WIFI_SME_TSPEC_IND events
- CSR_WIFI_SME_INDICATIONS_ROAMSTART
- - Used to register for CSR_WIFI_SME_ROAM_START_IND events
- CSR_WIFI_SME_INDICATIONS_ROAMCOMPLETE
- - Used to register for CSR_WIFI_SME_ROAM_COMPLETE_IND events
- CSR_WIFI_SME_INDICATIONS_ASSOCIATIONSTART
- - Used to register for CSR_WIFI_SME_ASSOCIATION_START_IND
- events
- CSR_WIFI_SME_INDICATIONS_ASSOCIATIONCOMPLETE
- - Used to register for CSR_WIFI_SME_ASSOCIATION_COMPLETE_IND
- events
- CSR_WIFI_SME_INDICATIONS_IBSSSTATION
- - Used to register for CSR_WIFI_SME_IBSS_STATION_IND events
- CSR_WIFI_SME_INDICATIONS_WIFION
- - Used to register for CSR_WIFI_SME_WIFI_ON_IND events
- CSR_WIFI_SME_INDICATIONS_ERROR
- - Used to register for CSR_WIFI_SME_ERROR_IND events
- CSR_WIFI_SME_INDICATIONS_INFO
- - Used to register for CSR_WIFI_SME_INFO_IND events
- CSR_WIFI_SME_INDICATIONS_COREDUMP
- - Used to register for CSR_WIFI_SME_CORE_DUMP_IND events
- CSR_WIFI_SME_INDICATIONS_ALL
- - Used to register for all available indications
-
-*******************************************************************************/
-typedef u32 CsrWifiSmeIndications;
-#define CSR_WIFI_SME_INDICATIONS_NONE ((CsrWifiSmeIndications) 0x00000000)
-#define CSR_WIFI_SME_INDICATIONS_WIFIOFF ((CsrWifiSmeIndications) 0x00000001)
-#define CSR_WIFI_SME_INDICATIONS_SCANRESULT ((CsrWifiSmeIndications) 0x00000002)
-#define CSR_WIFI_SME_INDICATIONS_CONNECTIONQUALITY ((CsrWifiSmeIndications) 0x00000004)
-#define CSR_WIFI_SME_INDICATIONS_MEDIASTATUS ((CsrWifiSmeIndications) 0x00000008)
-#define CSR_WIFI_SME_INDICATIONS_MICFAILURE ((CsrWifiSmeIndications) 0x00000010)
-#define CSR_WIFI_SME_INDICATIONS_PMKIDCANDIDATELIST ((CsrWifiSmeIndications) 0x00000020)
-#define CSR_WIFI_SME_INDICATIONS_TSPEC ((CsrWifiSmeIndications) 0x00000040)
-#define CSR_WIFI_SME_INDICATIONS_ROAMSTART ((CsrWifiSmeIndications) 0x00000080)
-#define CSR_WIFI_SME_INDICATIONS_ROAMCOMPLETE ((CsrWifiSmeIndications) 0x00000100)
-#define CSR_WIFI_SME_INDICATIONS_ASSOCIATIONSTART ((CsrWifiSmeIndications) 0x00000200)
-#define CSR_WIFI_SME_INDICATIONS_ASSOCIATIONCOMPLETE ((CsrWifiSmeIndications) 0x00000400)
-#define CSR_WIFI_SME_INDICATIONS_IBSSSTATION ((CsrWifiSmeIndications) 0x00000800)
-#define CSR_WIFI_SME_INDICATIONS_WIFION ((CsrWifiSmeIndications) 0x00001000)
-#define CSR_WIFI_SME_INDICATIONS_ERROR ((CsrWifiSmeIndications) 0x00002000)
-#define CSR_WIFI_SME_INDICATIONS_INFO ((CsrWifiSmeIndications) 0x00004000)
-#define CSR_WIFI_SME_INDICATIONS_COREDUMP ((CsrWifiSmeIndications) 0x00008000)
-#define CSR_WIFI_SME_INDICATIONS_ALL ((CsrWifiSmeIndications) 0xFFFFFFFF)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeKeyType
-
- DESCRIPTION
- Indicates the type of the key
-
- VALUES
- CSR_WIFI_SME_KEY_TYPE_GROUP - Key for broadcast communication
- CSR_WIFI_SME_KEY_TYPE_PAIRWISE - Key for unicast communication
- CSR_WIFI_SME_KEY_TYPE_STAKEY - Key for direct link communication to
- another station in infrastructure networks
- CSR_WIFI_SME_KEY_TYPE_IGTK - Integrity Group Temporal Key
- CSR_WIFI_SME_KEY_TYPE_CCKM - Key for Cisco Centralized Key Management
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeKeyType;
-#define CSR_WIFI_SME_KEY_TYPE_GROUP ((CsrWifiSmeKeyType) 0x00)
-#define CSR_WIFI_SME_KEY_TYPE_PAIRWISE ((CsrWifiSmeKeyType) 0x01)
-#define CSR_WIFI_SME_KEY_TYPE_STAKEY ((CsrWifiSmeKeyType) 0x02)
-#define CSR_WIFI_SME_KEY_TYPE_IGTK ((CsrWifiSmeKeyType) 0x03)
-#define CSR_WIFI_SME_KEY_TYPE_CCKM ((CsrWifiSmeKeyType) 0x04)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeListAction
-
- DESCRIPTION
- Identifies the type of action to be performed on a list of items
- The values of this type are used across the NME/SME/Router API's and they
- must be kept consistent with the corresponding types in the .xml of the
- ottherinterfaces
-
- VALUES
- CSR_WIFI_SME_LIST_ACTION_GET - Retrieve the current list of items
- CSR_WIFI_SME_LIST_ACTION_ADD - Add one or more items
- CSR_WIFI_SME_LIST_ACTION_REMOVE - Remove one or more items
- CSR_WIFI_SME_LIST_ACTION_FLUSH - Remove all items
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeListAction;
-#define CSR_WIFI_SME_LIST_ACTION_GET ((CsrWifiSmeListAction) 0x00)
-#define CSR_WIFI_SME_LIST_ACTION_ADD ((CsrWifiSmeListAction) 0x01)
-#define CSR_WIFI_SME_LIST_ACTION_REMOVE ((CsrWifiSmeListAction) 0x02)
-#define CSR_WIFI_SME_LIST_ACTION_FLUSH ((CsrWifiSmeListAction) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMediaStatus
-
- DESCRIPTION
- Indicates the connection status
- The values of this type are used across the NME/SME/Router API's and they
- must be kept consistent with the corresponding types in the .xml of the
- ottherinterfaces
-
- VALUES
- CSR_WIFI_SME_MEDIA_STATUS_CONNECTED
- - Value CSR_WIFI_SME_MEDIA_STATUS_CONNECTED can happen in two
- situations:
- * A network connection is established. Specifically, this is
- when the MLME_ASSOCIATION completes or the first peer
- relationship is established in an IBSS. In a WPA/WPA2
- network, this indicates that the stack is ready to perform
- the 4-way handshake or 802.1x authentication if CSR NME
- security library is not used. If CSR NME security library
- is used this indicates, completion of 4way handshake or
- 802.1x authentication
- * The SME roams to another AP on the same ESS
- During the AP operation, it indicates that the peer station
- is connected to the AP and is ready for data transfer.
- CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED
- - Value CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED can happen in
- two situations:
- * when the connection to a network is lost and there is no
- alternative on the same ESS to roam to
- * when a CSR_WIFI_SME_DISCONNECT_REQ request is issued
- During AP or P2PGO operation, it indicates that the peer
- station has disconnected from the AP
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeMediaStatus;
-#define CSR_WIFI_SME_MEDIA_STATUS_CONNECTED ((CsrWifiSmeMediaStatus) 0x00)
-#define CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED ((CsrWifiSmeMediaStatus) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pCapability
-
- DESCRIPTION
- Defines P2P Device Capabilities
-
- VALUES
- CSR_WIFI_SME_P2P_SERVICE_DISCOVERY_CAPABILITY
- - This field is set to 1 if the P2P Device supports Service
- Discovery, and to 0 otherwise
- CSR_WIFI_SME_P2P_CLIENT_DISCOVERABILITY_CAPABILITY
- - This field is set to 1 when the P2P Device supports P2P
- Client Discoverability, and to 0 otherwise.
- CSR_WIFI_SME_P2P_CONCURRENT_OPERATION_CAPABILITY
- - This field is set to 1 when the P2P Device supports
- Concurrent Operation with WLAN, and to 0 otherwise.
- CSR_WIFI_SME_P2P_MANAGED_DEVICE_CAPABILITY
- - This field is set to 1 when the P2P interface of the P2P
- Device is capable of being managed by the WLAN
- (infrastructure network) based on P2P coexistence
- parameters, and to 0 otherwise
- CSR_WIFI_SME_P2P_INVITAION_CAPABILITY
- - This field is set to 1 if the P2P Device is capable of
- processing P2P Invitation Procedure signaling, and to 0
- otherwise.
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeP2pCapability;
-#define CSR_WIFI_SME_P2P_SERVICE_DISCOVERY_CAPABILITY ((CsrWifiSmeP2pCapability) 0x01)
-#define CSR_WIFI_SME_P2P_CLIENT_DISCOVERABILITY_CAPABILITY ((CsrWifiSmeP2pCapability) 0x02)
-#define CSR_WIFI_SME_P2P_CONCURRENT_OPERATION_CAPABILITY ((CsrWifiSmeP2pCapability) 0x04)
-#define CSR_WIFI_SME_P2P_MANAGED_DEVICE_CAPABILITY ((CsrWifiSmeP2pCapability) 0x08)
-#define CSR_WIFI_SME_P2P_INVITAION_CAPABILITY ((CsrWifiSmeP2pCapability) 0x20)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pGroupCapability
-
- DESCRIPTION
- Define bits for P2P Group Capability
-
- VALUES
- CSR_WIFI_P2P_GRP_CAP_GO
- - Indicates if the local device has become a GO after GO
- negotiation
- CSR_WIFI_P2P_GRP_CAP_PERSISTENT
- - Persistent group
- CSR_WIFI_P2P_GRP_CAP_INTRABSS_DIST
- - Intra-BSS data distribution support
- CSR_WIFI_P2P_GRP_CAP_CROSS_CONN
- - Support of cross connection
- CSR_WIFI_P2P_GRP_CAP_PERSISTENT_RECONNECT
- - Support of persistent reconnect
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeP2pGroupCapability;
-#define CSR_WIFI_P2P_GRP_CAP_GO ((CsrWifiSmeP2pGroupCapability) 0x01)
-#define CSR_WIFI_P2P_GRP_CAP_PERSISTENT ((CsrWifiSmeP2pGroupCapability) 0x02)
-#define CSR_WIFI_P2P_GRP_CAP_INTRABSS_DIST ((CsrWifiSmeP2pGroupCapability) 0x08)
-#define CSR_WIFI_P2P_GRP_CAP_CROSS_CONN ((CsrWifiSmeP2pGroupCapability) 0x10)
-#define CSR_WIFI_P2P_GRP_CAP_PERSISTENT_RECONNECT ((CsrWifiSmeP2pGroupCapability) 0x20)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pNoaConfigMethod
-
- DESCRIPTION
- Notice of Absece Configuration
-
- VALUES
- CSR_WIFI_P2P_NOA_NONE - Do not use NOA
- CSR_WIFI_P2P_NOA_AUTONOMOUS - NOA based on the traffic analysis
- CSR_WIFI_P2P_NOA_USER_DEFINED - NOA as specified by the user
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeP2pNoaConfigMethod;
-#define CSR_WIFI_P2P_NOA_NONE ((CsrWifiSmeP2pNoaConfigMethod) 0x00)
-#define CSR_WIFI_P2P_NOA_AUTONOMOUS ((CsrWifiSmeP2pNoaConfigMethod) 0x01)
-#define CSR_WIFI_P2P_NOA_USER_DEFINED ((CsrWifiSmeP2pNoaConfigMethod) 0x02)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pRole
-
- DESCRIPTION
- Definition of roles for a P2P Device
-
- VALUES
- CSR_WIFI_SME_P2P_ROLE_NONE - A non-P2PDevice device
- CSR_WIFI_SME_P2P_ROLE_STANDALONE - A Standalone P2P device
- CSR_WIFI_SME_P2P_ROLE_GO - Role Assumed is that of a Group Owner
- within a P2P Group
- CSR_WIFI_SME_P2P_ROLE_CLI - Role Assumed is that of a P2P Client
- within a P2P Group
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeP2pRole;
-#define CSR_WIFI_SME_P2P_ROLE_NONE ((CsrWifiSmeP2pRole) 0x00)
-#define CSR_WIFI_SME_P2P_ROLE_STANDALONE ((CsrWifiSmeP2pRole) 0x01)
-#define CSR_WIFI_SME_P2P_ROLE_GO ((CsrWifiSmeP2pRole) 0x02)
-#define CSR_WIFI_SME_P2P_ROLE_CLI ((CsrWifiSmeP2pRole) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pStatus
-
- DESCRIPTION
- This data type enumerates the outcome of P2P procedure
-
- VALUES
- CSR_WIFI_SME_P2P_STATUS_SUCCESS
- - Success
- CSR_WIFI_SME_P2P_STATUS_FAIL_INFO_UNAVAILABLE
- - Fail; information is currently unavailable
- CSR_WIFI_SME_P2P_STATUS_FAIL_INCOMPATIBLE_PARAM
- - Fail; incompatible parameters
- CSR_WIFI_SME_P2P_STATUS_FAIL_LIMIT_REACHED
- - Fail; limit reached
- CSR_WIFI_SME_P2P_STATUS_FAIL_INVALID_PARAM
- - Fail; invalid parameters
- CSR_WIFI_SME_P2P_STATUS_FAIL_ACCOMODATE
- - Fail; unable to accommodate request
- CSR_WIFI_SME_P2P_STATUS_FAIL_PREV_ERROR
- - Fail; previous protocol error, or disruptive behavior
- CSR_WIFI_SME_P2P_STATUS_FAIL_COMMON_CHANNELS
- - Fail; no common channels
- CSR_WIFI_SME_P2P_STATUS_FAIL_UNKNOWN_GROUP
- - Fail; unknown P2P Group
- CSR_WIFI_SME_P2P_STATUS_FAIL_GO_INTENT
- - Fail: both P2P Devices indicated an Intent of 15 in Group
- Owner Negotiation
- CSR_WIFI_SME_P2P_STATUS_FAIL_PROVISION_METHOD_INCOMPATIBLE
- - Fail; incompatible provisioning method
- CSR_WIFI_SME_P2P_STATUS_FAIL_REJECT
- - Fail: rejected by user
- CSR_WIFI_SME_P2P_STATUS_FAIL_RESERVED
- - Fail; Status Reserved
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeP2pStatus;
-#define CSR_WIFI_SME_P2P_STATUS_SUCCESS ((CsrWifiSmeP2pStatus) 0x00)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_INFO_UNAVAILABLE ((CsrWifiSmeP2pStatus) 0x01)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_INCOMPATIBLE_PARAM ((CsrWifiSmeP2pStatus) 0x02)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_LIMIT_REACHED ((CsrWifiSmeP2pStatus) 0x03)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_INVALID_PARAM ((CsrWifiSmeP2pStatus) 0x04)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_ACCOMODATE ((CsrWifiSmeP2pStatus) 0x05)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_PREV_ERROR ((CsrWifiSmeP2pStatus) 0x06)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_COMMON_CHANNELS ((CsrWifiSmeP2pStatus) 0x07)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_UNKNOWN_GROUP ((CsrWifiSmeP2pStatus) 0x08)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_GO_INTENT ((CsrWifiSmeP2pStatus) 0x09)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_PROVISION_METHOD_INCOMPATIBLE ((CsrWifiSmeP2pStatus) 0x0A)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_REJECT ((CsrWifiSmeP2pStatus) 0x0B)
-#define CSR_WIFI_SME_P2P_STATUS_FAIL_RESERVED ((CsrWifiSmeP2pStatus) 0xFF)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePacketFilterMode
-
- DESCRIPTION
- Options for the filter mode parameter.
- The Values here match the HIP interface
-
- VALUES
- CSR_WIFI_SME_PACKET_FILTER_MODE_OPT_OUT
- - Broadcast packets are always reported to the host unless
- they match at least one of the specified TCLAS IEs.
- CSR_WIFI_SME_PACKET_FILTER_MODE_OPT_IN
- - Broadcast packets are reported to the host only if they
- match at least one of the specified TCLAS IEs.
-
-*******************************************************************************/
-typedef u8 CsrWifiSmePacketFilterMode;
-#define CSR_WIFI_SME_PACKET_FILTER_MODE_OPT_OUT ((CsrWifiSmePacketFilterMode) 0x00)
-#define CSR_WIFI_SME_PACKET_FILTER_MODE_OPT_IN ((CsrWifiSmePacketFilterMode) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerSaveLevel
-
- DESCRIPTION
- Power Save Level options as defined in the IEEE 802.11 standards
- First 3 values are set to match the mlme PowerManagementMode
-
- VALUES
- CSR_WIFI_SME_POWER_SAVE_LEVEL_LOW - No power save: the driver will remain
- active at all times
- CSR_WIFI_SME_POWER_SAVE_LEVEL_HIGH - Enter power save after all packets in
- the queues are transmitted and received
- CSR_WIFI_SME_POWER_SAVE_LEVEL_MED - Enter power save after all packets in
- the queues are transmitted and received
- and a further configurable delay
- (default 1s) has elapsed
- CSR_WIFI_SME_POWER_SAVE_LEVEL_AUTO - The SME will decide when to enter power
- save mode according to the traffic
- analysis
-
-*******************************************************************************/
-typedef u8 CsrWifiSmePowerSaveLevel;
-#define CSR_WIFI_SME_POWER_SAVE_LEVEL_LOW ((CsrWifiSmePowerSaveLevel) 0x00)
-#define CSR_WIFI_SME_POWER_SAVE_LEVEL_HIGH ((CsrWifiSmePowerSaveLevel) 0x01)
-#define CSR_WIFI_SME_POWER_SAVE_LEVEL_MED ((CsrWifiSmePowerSaveLevel) 0x02)
-#define CSR_WIFI_SME_POWER_SAVE_LEVEL_AUTO ((CsrWifiSmePowerSaveLevel) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePreambleType
-
- DESCRIPTION
- SME Preamble Types
-
- VALUES
- CSR_WIFI_SME_USE_LONG_PREAMBLE - Use legacy (long) preamble
- CSR_WIFI_SME_USE_SHORT_PREAMBLE - Use short PPDU format
-
-*******************************************************************************/
-typedef u8 CsrWifiSmePreambleType;
-#define CSR_WIFI_SME_USE_LONG_PREAMBLE ((CsrWifiSmePreambleType) 0x00)
-#define CSR_WIFI_SME_USE_SHORT_PREAMBLE ((CsrWifiSmePreambleType) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRadioIF
-
- DESCRIPTION
- Indicates the frequency
-
- VALUES
- CSR_WIFI_SME_RADIO_IF_GHZ_2_4 - Indicates the 2.4 GHZ frequency
- CSR_WIFI_SME_RADIO_IF_GHZ_5_0 - Future use: currently not supported
- CSR_WIFI_SME_RADIO_IF_BOTH - Future use: currently not supported
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeRadioIF;
-#define CSR_WIFI_SME_RADIO_IF_GHZ_2_4 ((CsrWifiSmeRadioIF) 0x01)
-#define CSR_WIFI_SME_RADIO_IF_GHZ_5_0 ((CsrWifiSmeRadioIF) 0x02)
-#define CSR_WIFI_SME_RADIO_IF_BOTH ((CsrWifiSmeRadioIF) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRegulatoryDomain
-
- DESCRIPTION
- Indicates the regulatory domain as defined in IEEE 802.11 standards
-
- VALUES
- CSR_WIFI_SME_REGULATORY_DOMAIN_OTHER
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_FCC
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_IC
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_ETSI
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_SPAIN
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_FRANCE
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_JAPAN
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_JAPANBIS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_CHINA
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_CHINABIS
- - See IEEE 802.11 Standard
- CSR_WIFI_SME_REGULATORY_DOMAIN_NONE
- - See IEEE 802.11 Standard
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeRegulatoryDomain;
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_OTHER ((CsrWifiSmeRegulatoryDomain) 0x00)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_FCC ((CsrWifiSmeRegulatoryDomain) 0x10)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_IC ((CsrWifiSmeRegulatoryDomain) 0x20)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_ETSI ((CsrWifiSmeRegulatoryDomain) 0x30)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_SPAIN ((CsrWifiSmeRegulatoryDomain) 0x31)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_FRANCE ((CsrWifiSmeRegulatoryDomain) 0x32)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_JAPAN ((CsrWifiSmeRegulatoryDomain) 0x40)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_JAPANBIS ((CsrWifiSmeRegulatoryDomain) 0x41)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_CHINA ((CsrWifiSmeRegulatoryDomain) 0x50)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_CHINABIS ((CsrWifiSmeRegulatoryDomain) 0x51)
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_NONE ((CsrWifiSmeRegulatoryDomain) 0xFF)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamReason
-
- DESCRIPTION
- Indicates the reason for roaming
-
- VALUES
- CSR_WIFI_SME_ROAM_REASON_BEACONLOST
- - The station cannot receive the beacon signal any more
- CSR_WIFI_SME_ROAM_REASON_DISASSOCIATED
- - The station has been disassociated
- CSR_WIFI_SME_ROAM_REASON_DEAUTHENTICATED
- - The station has been deauthenticated
- CSR_WIFI_SME_ROAM_REASON_BETTERAPFOUND
- - A better AP has been found
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeRoamReason;
-#define CSR_WIFI_SME_ROAM_REASON_BEACONLOST ((CsrWifiSmeRoamReason) 0x00)
-#define CSR_WIFI_SME_ROAM_REASON_DISASSOCIATED ((CsrWifiSmeRoamReason) 0x01)
-#define CSR_WIFI_SME_ROAM_REASON_DEAUTHENTICATED ((CsrWifiSmeRoamReason) 0x02)
-#define CSR_WIFI_SME_ROAM_REASON_BETTERAPFOUND ((CsrWifiSmeRoamReason) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanType
-
- DESCRIPTION
- Identifies the type of scan to be performed
-
- VALUES
- CSR_WIFI_SME_SCAN_TYPE_ALL - Scan actively or passively according to the
- regulatory domain restrictions
- CSR_WIFI_SME_SCAN_TYPE_ACTIVE - Scan actively only: send probes and listen
- for answers
- CSR_WIFI_SME_SCAN_TYPE_PASSIVE - Scan passively only: listen for beacon
- messages
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeScanType;
-#define CSR_WIFI_SME_SCAN_TYPE_ALL ((CsrWifiSmeScanType) 0x00)
-#define CSR_WIFI_SME_SCAN_TYPE_ACTIVE ((CsrWifiSmeScanType) 0x01)
-#define CSR_WIFI_SME_SCAN_TYPE_PASSIVE ((CsrWifiSmeScanType) 0x02)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTrafficType
-
- DESCRIPTION
- Identifies the type of traffic going on on the connection.
- The values of this type are used across the NME/SME/Router API's and they
- must be kept consistent with the corresponding types in the .xml of the
- ottherinterfaces
-
- VALUES
- CSR_WIFI_SME_TRAFFIC_TYPE_OCCASIONAL
- - During the last 30 seconds there were fewer than 20 packets
- per seconds in each second in both directions
- CSR_WIFI_SME_TRAFFIC_TYPE_BURSTY
- - During the last 30 seconds there was at least one second
- during which more than 20 packets were received in either
- direction
- CSR_WIFI_SME_TRAFFIC_TYPE_PERIODIC
- - During the last 5 seconds there were at least 10 packets
- received each second and a defined period for the traffic
- can be recognized
- CSR_WIFI_SME_TRAFFIC_TYPE_CONTINUOUS
- - During the last 5 seconds there were at least 20 packets
- received each second in either direction
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeTrafficType;
-#define CSR_WIFI_SME_TRAFFIC_TYPE_OCCASIONAL ((CsrWifiSmeTrafficType) 0x00)
-#define CSR_WIFI_SME_TRAFFIC_TYPE_BURSTY ((CsrWifiSmeTrafficType) 0x01)
-#define CSR_WIFI_SME_TRAFFIC_TYPE_PERIODIC ((CsrWifiSmeTrafficType) 0x02)
-#define CSR_WIFI_SME_TRAFFIC_TYPE_CONTINUOUS ((CsrWifiSmeTrafficType) 0x03)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTspecCtrl
-
- DESCRIPTION
- Defines bits for CsrWifiSmeTspecCtrlMask for additional CCX configuration.
- CURRENTLY NOT SUPPORTED.
-
- VALUES
- CSR_WIFI_SME_TSPEC_CTRL_STRICT
- - No automatic negotiation
- CSR_WIFI_SME_TSPEC_CTRL_CCX_SIGNALLING
- - Signalling TSPEC
- CSR_WIFI_SME_TSPEC_CTRL_CCX_VOICE
- - Voice traffic TSPEC
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeTspecCtrl;
-#define CSR_WIFI_SME_TSPEC_CTRL_STRICT ((CsrWifiSmeTspecCtrl) 0x01)
-#define CSR_WIFI_SME_TSPEC_CTRL_CCX_SIGNALLING ((CsrWifiSmeTspecCtrl) 0x02)
-#define CSR_WIFI_SME_TSPEC_CTRL_CCX_VOICE ((CsrWifiSmeTspecCtrl) 0x04)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTspecResultCode
-
- DESCRIPTION
- Defines the result of the TSPEC exchanges with the AP
-
- VALUES
- CSR_WIFI_SME_TSPEC_RESULT_SUCCESS
- - TSPEC command has been processed correctly
- CSR_WIFI_SME_TSPEC_RESULT_UNSPECIFIED_QOS_FAILURE
- - The Access Point reported a failure
- CSR_WIFI_SME_TSPEC_RESULT_FAILURE
- - Internal failure in the SME
- CSR_WIFI_SME_TSPEC_RESULT_INVALID_TSPEC_PARAMETERS
- - The TSPEC parameters are invalid
- CSR_WIFI_SME_TSPEC_RESULT_INVALID_TCLAS_PARAMETERS
- - The TCLASS parameters are invalid
- CSR_WIFI_SME_TSPEC_RESULT_INSUFFICIENT_BANDWIDTH
- - As specified by the WMM Spec
- CSR_WIFI_SME_TSPEC_RESULT_WRONG_POLICY
- - As specified by the WMM Spec
- CSR_WIFI_SME_TSPEC_RESULT_REJECTED_WITH_SUGGESTED_CHANGES
- - As specified by the WMM Spec
- CSR_WIFI_SME_TSPEC_RESULT_TIMEOUT
- - The TSPEC negotiation timed out
- CSR_WIFI_SME_TSPEC_RESULT_NOT_SUPPORTED
- - The Access Point does not support the TSPEC
- CSR_WIFI_SME_TSPEC_RESULT_IE_LENGTH_INCORRECT
- - The length of the TSPEC is not correct
- CSR_WIFI_SME_TSPEC_RESULT_INVALID_TRANSACTION_ID
- - The TSPEC transaction id is not in the list
- CSR_WIFI_SME_TSPEC_RESULT_INSTALLED
- - The TSPEC has been installed and it is now active.
- CSR_WIFI_SME_TSPEC_RESULT_TID_ALREADY_INSTALLED
- - The Traffic ID has already been installed
- CSR_WIFI_SME_TSPEC_RESULT_TSPEC_REMOTELY_DELETED
- - The AP has deleted the TSPEC
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeTspecResultCode;
-#define CSR_WIFI_SME_TSPEC_RESULT_SUCCESS ((CsrWifiSmeTspecResultCode) 0x00)
-#define CSR_WIFI_SME_TSPEC_RESULT_UNSPECIFIED_QOS_FAILURE ((CsrWifiSmeTspecResultCode) 0x01)
-#define CSR_WIFI_SME_TSPEC_RESULT_FAILURE ((CsrWifiSmeTspecResultCode) 0x02)
-#define CSR_WIFI_SME_TSPEC_RESULT_INVALID_TSPEC_PARAMETERS ((CsrWifiSmeTspecResultCode) 0x05)
-#define CSR_WIFI_SME_TSPEC_RESULT_INVALID_TCLAS_PARAMETERS ((CsrWifiSmeTspecResultCode) 0x06)
-#define CSR_WIFI_SME_TSPEC_RESULT_INSUFFICIENT_BANDWIDTH ((CsrWifiSmeTspecResultCode) 0x07)
-#define CSR_WIFI_SME_TSPEC_RESULT_WRONG_POLICY ((CsrWifiSmeTspecResultCode) 0x08)
-#define CSR_WIFI_SME_TSPEC_RESULT_REJECTED_WITH_SUGGESTED_CHANGES ((CsrWifiSmeTspecResultCode) 0x09)
-#define CSR_WIFI_SME_TSPEC_RESULT_TIMEOUT ((CsrWifiSmeTspecResultCode) 0x0D)
-#define CSR_WIFI_SME_TSPEC_RESULT_NOT_SUPPORTED ((CsrWifiSmeTspecResultCode) 0x0E)
-#define CSR_WIFI_SME_TSPEC_RESULT_IE_LENGTH_INCORRECT ((CsrWifiSmeTspecResultCode) 0x10)
-#define CSR_WIFI_SME_TSPEC_RESULT_INVALID_TRANSACTION_ID ((CsrWifiSmeTspecResultCode) 0x11)
-#define CSR_WIFI_SME_TSPEC_RESULT_INSTALLED ((CsrWifiSmeTspecResultCode) 0x12)
-#define CSR_WIFI_SME_TSPEC_RESULT_TID_ALREADY_INSTALLED ((CsrWifiSmeTspecResultCode) 0x13)
-#define CSR_WIFI_SME_TSPEC_RESULT_TSPEC_REMOTELY_DELETED ((CsrWifiSmeTspecResultCode) 0x14)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWepAuthMode
-
- DESCRIPTION
- Define bits for CsrWifiSmeWepAuthMode
-
- VALUES
- CSR_WIFI_SME_WEP_AUTH_MODE_OPEN - Open-WEP enabled network
- CSR_WIFI_SME_WEP_AUTH_MODE_SHARED - Shared-key WEP enabled network.
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeWepAuthMode;
-#define CSR_WIFI_SME_WEP_AUTH_MODE_OPEN ((CsrWifiSmeWepAuthMode) 0x00)
-#define CSR_WIFI_SME_WEP_AUTH_MODE_SHARED ((CsrWifiSmeWepAuthMode) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWepCredentialType
-
- DESCRIPTION
- Definition of types of WEP Credentials
-
- VALUES
- CSR_WIFI_SME_CREDENTIAL_TYPE_WEP64
- - WEP 64 credential
- CSR_WIFI_SME_CREDENTIAL_TYPE_WEP128
- - WEP 128 credential
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeWepCredentialType;
-#define CSR_WIFI_SME_CREDENTIAL_TYPE_WEP64 ((CsrWifiSmeWepCredentialType) 0x00)
-#define CSR_WIFI_SME_CREDENTIAL_TYPE_WEP128 ((CsrWifiSmeWepCredentialType) 0x01)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWmmMode
-
- DESCRIPTION
- Defines bits for CsrWifiSmeWmmModeMask: enable/disable WMM features.
-
- VALUES
- CSR_WIFI_SME_WMM_MODE_DISABLED - Disables the WMM features.
- CSR_WIFI_SME_WMM_MODE_AC_ENABLED - Enables support for WMM-AC.
- CSR_WIFI_SME_WMM_MODE_PS_ENABLED - Enables support for WMM Power Save.
- CSR_WIFI_SME_WMM_MODE_SA_ENABLED - Currently not supported
- CSR_WIFI_SME_WMM_MODE_ENABLED - Enables support for all currently
- available WMM features.
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeWmmMode;
-#define CSR_WIFI_SME_WMM_MODE_DISABLED ((CsrWifiSmeWmmMode) 0x00)
-#define CSR_WIFI_SME_WMM_MODE_AC_ENABLED ((CsrWifiSmeWmmMode) 0x01)
-#define CSR_WIFI_SME_WMM_MODE_PS_ENABLED ((CsrWifiSmeWmmMode) 0x02)
-#define CSR_WIFI_SME_WMM_MODE_SA_ENABLED ((CsrWifiSmeWmmMode) 0x04)
-#define CSR_WIFI_SME_WMM_MODE_ENABLED ((CsrWifiSmeWmmMode) 0xFF)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWmmQosInfo
-
- DESCRIPTION
- Defines bits for the QoS Info Octect as defined in the WMM specification.
- The first four values define one bit each that can be set or cleared.
- Each of the last four values define all the remaining 4 bits and only one
- of them at the time shall be used.
- For more information, see 'WMM (including WMM Power Save) Specification -
- Version 1.1' and 'UniFi Configuring WMM and WMM-PS, Application Note'.
-
- VALUES
- CSR_WIFI_SME_WMM_QOS_INFO_AC_MAX_SP_ALL
- - WMM AP may deliver all buffered frames
- CSR_WIFI_SME_WMM_QOS_INFO_AC_VO
- - Enable UAPSD(both trigger and delivery) for Voice Access
- Category
- CSR_WIFI_SME_WMM_QOS_INFO_AC_VI
- - Enable UAPSD(both trigger and delivery) for Video Access
- Category
- CSR_WIFI_SME_WMM_QOS_INFO_AC_BK
- - Enable UAPSD(both trigger and delivery) for Background
- Access Category
- CSR_WIFI_SME_WMM_QOS_INFO_AC_BE
- - Enable UAPSD(both trigger and delivery) for Best Effort
- Access Category
- CSR_WIFI_SME_WMM_QOS_INFO_AC_MAX_SP_TWO
- - WMM AP may deliver a maximum of 2 buffered frames (MSDUs
- and MMPDUs) per USP
- CSR_WIFI_SME_WMM_QOS_INFO_AC_MAX_SP_FOUR
- - WMM AP may deliver a maximum of 4 buffered frames (MSDUs
- and MMPDUs) per USP
- CSR_WIFI_SME_WMM_QOS_INFO_AC_MAX_SP_SIX
- - WMM AP may deliver a maximum of 6 buffered frames (MSDUs
- and MMPDUs) per USP
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeWmmQosInfo;
-#define CSR_WIFI_SME_WMM_QOS_INFO_AC_MAX_SP_ALL ((CsrWifiSmeWmmQosInfo) 0x00)
-#define CSR_WIFI_SME_WMM_QOS_INFO_AC_VO ((CsrWifiSmeWmmQosInfo) 0x01)
-#define CSR_WIFI_SME_WMM_QOS_INFO_AC_VI ((CsrWifiSmeWmmQosInfo) 0x02)
-#define CSR_WIFI_SME_WMM_QOS_INFO_AC_BK ((CsrWifiSmeWmmQosInfo) 0x04)
-#define CSR_WIFI_SME_WMM_QOS_INFO_AC_BE ((CsrWifiSmeWmmQosInfo) 0x08)
-#define CSR_WIFI_SME_WMM_QOS_INFO_AC_MAX_SP_TWO ((CsrWifiSmeWmmQosInfo) 0x20)
-#define CSR_WIFI_SME_WMM_QOS_INFO_AC_MAX_SP_FOUR ((CsrWifiSmeWmmQosInfo) 0x40)
-#define CSR_WIFI_SME_WMM_QOS_INFO_AC_MAX_SP_SIX ((CsrWifiSmeWmmQosInfo) 0x60)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsConfigType
-
- DESCRIPTION
- WPS config methods supported/used by a device
-
- VALUES
- CSR_WIFI_WPS_CONFIG_LABEL
- - Label
- CSR_WIFI_WPS_CONFIG_DISPLAY
- - Display
- CSR_WIFI_WPS_CONFIG_EXT_NFC
- - External NFC : Not supported in this release
- CSR_WIFI_WPS_CONFIG_INT_NFC
- - Internal NFC : Not supported in this release
- CSR_WIFI_WPS_CONFIG_NFC_IFACE
- - NFC interface : Not supported in this release
- CSR_WIFI_WPS_CONFIG_PBC
- - PBC
- CSR_WIFI_WPS_CONFIG_KEYPAD
- - KeyPad
- CSR_WIFI_WPS_CONFIG_VIRTUAL_PBC
- - PBC through software user interface
- CSR_WIFI_WPS_CONFIG_PHYSICAL_PBC
- - Physical PBC
- CSR_WIFI_WPS_CONFIG_VIRTUAL_DISPLAY
- - Virtual Display : via html config page etc
- CSR_WIFI_WPS_CONFIG_PHYSICAL_DISPLAY
- - Physical Display : Attached to the device
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeWpsConfigType;
-#define CSR_WIFI_WPS_CONFIG_LABEL ((CsrWifiSmeWpsConfigType) 0x0004)
-#define CSR_WIFI_WPS_CONFIG_DISPLAY ((CsrWifiSmeWpsConfigType) 0x0008)
-#define CSR_WIFI_WPS_CONFIG_EXT_NFC ((CsrWifiSmeWpsConfigType) 0x0010)
-#define CSR_WIFI_WPS_CONFIG_INT_NFC ((CsrWifiSmeWpsConfigType) 0x0020)
-#define CSR_WIFI_WPS_CONFIG_NFC_IFACE ((CsrWifiSmeWpsConfigType) 0x0040)
-#define CSR_WIFI_WPS_CONFIG_PBC ((CsrWifiSmeWpsConfigType) 0x0080)
-#define CSR_WIFI_WPS_CONFIG_KEYPAD ((CsrWifiSmeWpsConfigType) 0x0100)
-#define CSR_WIFI_WPS_CONFIG_VIRTUAL_PBC ((CsrWifiSmeWpsConfigType) 0x0280)
-#define CSR_WIFI_WPS_CONFIG_PHYSICAL_PBC ((CsrWifiSmeWpsConfigType) 0x0480)
-#define CSR_WIFI_WPS_CONFIG_VIRTUAL_DISPLAY ((CsrWifiSmeWpsConfigType) 0x2008)
-#define CSR_WIFI_WPS_CONFIG_PHYSICAL_DISPLAY ((CsrWifiSmeWpsConfigType) 0x4008)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsDeviceCategory
-
- DESCRIPTION
- Wps Primary Device Types
-
- VALUES
- CSR_WIFI_SME_WPS_CATEGORY_UNSPECIFIED
- - Unspecified.
- CSR_WIFI_SME_WPS_CATEGORY_COMPUTER
- - Computer.
- CSR_WIFI_SME_WPS_CATEGORY_INPUT_DEV
- - Input device
- CSR_WIFI_SME_WPS_CATEGORY_PRT_SCAN_FX_CP
- - Printer Scanner Fax Copier etc
- CSR_WIFI_SME_WPS_CATEGORY_CAMERA
- - Camera
- CSR_WIFI_SME_WPS_CATEGORY_STORAGE
- - Storage
- CSR_WIFI_SME_WPS_CATEGORY_NET_INFRA
- - Net Infra
- CSR_WIFI_SME_WPS_CATEGORY_DISPLAY
- - Display
- CSR_WIFI_SME_WPS_CATEGORY_MULTIMEDIA
- - Multimedia
- CSR_WIFI_SME_WPS_CATEGORY_GAMING
- - Gaming.
- CSR_WIFI_SME_WPS_CATEGORY_TELEPHONE
- - Telephone.
- CSR_WIFI_SME_WPS_CATEGORY_AUDIO
- - Audio
- CSR_WIFI_SME_WPS_CATEOARY_OTHERS
- - Others.
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeWpsDeviceCategory;
-#define CSR_WIFI_SME_WPS_CATEGORY_UNSPECIFIED ((CsrWifiSmeWpsDeviceCategory) 0x00)
-#define CSR_WIFI_SME_WPS_CATEGORY_COMPUTER ((CsrWifiSmeWpsDeviceCategory) 0x01)
-#define CSR_WIFI_SME_WPS_CATEGORY_INPUT_DEV ((CsrWifiSmeWpsDeviceCategory) 0x02)
-#define CSR_WIFI_SME_WPS_CATEGORY_PRT_SCAN_FX_CP ((CsrWifiSmeWpsDeviceCategory) 0x03)
-#define CSR_WIFI_SME_WPS_CATEGORY_CAMERA ((CsrWifiSmeWpsDeviceCategory) 0x04)
-#define CSR_WIFI_SME_WPS_CATEGORY_STORAGE ((CsrWifiSmeWpsDeviceCategory) 0x05)
-#define CSR_WIFI_SME_WPS_CATEGORY_NET_INFRA ((CsrWifiSmeWpsDeviceCategory) 0x06)
-#define CSR_WIFI_SME_WPS_CATEGORY_DISPLAY ((CsrWifiSmeWpsDeviceCategory) 0x07)
-#define CSR_WIFI_SME_WPS_CATEGORY_MULTIMEDIA ((CsrWifiSmeWpsDeviceCategory) 0x08)
-#define CSR_WIFI_SME_WPS_CATEGORY_GAMING ((CsrWifiSmeWpsDeviceCategory) 0x09)
-#define CSR_WIFI_SME_WPS_CATEGORY_TELEPHONE ((CsrWifiSmeWpsDeviceCategory) 0x0A)
-#define CSR_WIFI_SME_WPS_CATEGORY_AUDIO ((CsrWifiSmeWpsDeviceCategory) 0x0B)
-#define CSR_WIFI_SME_WPS_CATEOARY_OTHERS ((CsrWifiSmeWpsDeviceCategory) 0xFF)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsDeviceSubCategory
-
- DESCRIPTION
- Wps Secondary Device Types
-
- VALUES
- CSR_WIFI_SME_WPS_SUB_CATEGORY_UNSPECIFIED
- - Unspecied
- CSR_WIFI_SME_WPS_STORAGE_SUB_CATEGORY_NAS
- - Network Associated Storage
- CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_PRNTR
- - Printer or print server
- CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_WM
- - Windows mobile
- CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_TUNER
- - Audio tuner/receiver
- CSR_WIFI_SME_WPS_CAMERA_SUB_CATEGORY_DIG_STL
- - Digital still camera
- CSR_WIFI_SME_WPS_NET_INFRA_SUB_CATEGORY_AP
- - Access Point
- CSR_WIFI_SME_WPS_DISPLAY_SUB_CATEGORY_TV
- - TV.
- CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_DAR
- - DAR.
- CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_KEYBD
- - Keyboard.
- CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_PC
- - PC.
- CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_XBOX
- - Xbox.
- CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_SCNR
- - Scanner
- CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_SERVER
- - Server
- CSR_WIFI_SME_WPS_NET_INFRA_SUB_CATEGORY_ROUTER
- - Router
- CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_PVR
- - PVR
- CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_SPEAKER
- - Speaker
- CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_FP_SM
- - Feature phone - Single mode
- CSR_WIFI_SME_WPS_CAMERA_SUB_CATEGORY_VIDEO
- - Video camera
- CSR_WIFI_SME_WPS_DISPLAY_SUB_CATEGORY_PIC_FRM
- - Picture frame
- CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_XBOX_360
- - Xbox360
- CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_MOUSE
- - Mouse
- CSR_WIFI_SME_WPS_NET_INFRA_SUB_CATEGORY_SWITCH
- - Switch
- CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_PMP
- - Portable music player
- CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_JOYSTK
- - Joy stick
- CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_PLAY_STN
- - Play-station
- CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_MED_CENT
- - Media Center
- CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_MCX
- - MCX
- CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_FP_DM
- - Feature phone - Dual mode
- CSR_WIFI_SME_WPS_CAMERA_SUB_CATEGORY_WEB
- - Web camera
- CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_FAX
- - Fax
- CSR_WIFI_SME_WPS_DISPLAY_SUB_CATEGORY_PROJECTOR
- - Projector
- CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_TRKBL
- - Track Ball
- CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_ST_BOX
- - Set-Top-Box
- CSR_WIFI_SME_WPS_NET_INFRA_SUB_CATEGORY_GATEWAY
- - GateWay.
- CSR_WIFI_SME_WPS_CAMERA_SUB_CATEGORY_SECURITY
- - Security camera
- CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_ULTRA_MOB_PC
- - Ultra mobile PC.
- CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_CONSOLE
- - Game console/Game console adapter
- CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_CPR
- - Copier
- CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_HEADSET
- - Headset(headphones + microphone)
- CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_SP_SM
- - Smart phone - Single mode
- CSR_WIFI_SME_WPS_DISPLAY_SUB_CATEGORY_MONITOR
- - Monitor.
- CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_GAME_CTRL
- - Game control.
- CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_ALL
- - All-in-One
- CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_MEDIA
- - Media Server/Media Adapter/Media Extender
- CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_SP_DM
- - Smart phone - Dual mode
- CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_PORT_DEV
- - Portable gaming device
- CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_HEADPHONE
- - Headphone.
- CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_NOTEBOOK
- - Notebook.
- CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_REMOTE
- - Remote control
- CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_MIC
- - Microphone
- CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_DESKTOP
- - Desktop.
- CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_VP
- - Portable video player
- CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_MID
- - Mobile internet device
- CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_TOUCH_SCRN
- - Touch screen
- CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_BIOMET_RDR
- - Biometric reader
- CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_NETBOOK
- - Netbook
- CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_BRCD_RDR
- - Bar code reader.
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeWpsDeviceSubCategory;
-#define CSR_WIFI_SME_WPS_SUB_CATEGORY_UNSPECIFIED ((CsrWifiSmeWpsDeviceSubCategory) 0x00)
-#define CSR_WIFI_SME_WPS_STORAGE_SUB_CATEGORY_NAS ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_PRNTR ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_WM ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_TUNER ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_CAMERA_SUB_CATEGORY_DIG_STL ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_NET_INFRA_SUB_CATEGORY_AP ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_DISPLAY_SUB_CATEGORY_TV ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_DAR ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_KEYBD ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_PC ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_XBOX ((CsrWifiSmeWpsDeviceSubCategory) 0x01)
-#define CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_SCNR ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_SERVER ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_NET_INFRA_SUB_CATEGORY_ROUTER ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_PVR ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_SPEAKER ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_FP_SM ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_CAMERA_SUB_CATEGORY_VIDEO ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_DISPLAY_SUB_CATEGORY_PIC_FRM ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_XBOX_360 ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_MOUSE ((CsrWifiSmeWpsDeviceSubCategory) 0x02)
-#define CSR_WIFI_SME_WPS_NET_INFRA_SUB_CATEGORY_SWITCH ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_PMP ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_JOYSTK ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_PLAY_STN ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_MED_CENT ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_MCX ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_FP_DM ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_CAMERA_SUB_CATEGORY_WEB ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_FAX ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_DISPLAY_SUB_CATEGORY_PROJECTOR ((CsrWifiSmeWpsDeviceSubCategory) 0x03)
-#define CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_TRKBL ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_ST_BOX ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_NET_INFRA_SUB_CATEGORY_GATEWAY ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_CAMERA_SUB_CATEGORY_SECURITY ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_ULTRA_MOB_PC ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_CONSOLE ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_CPR ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_HEADSET ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_SP_SM ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_DISPLAY_SUB_CATEGORY_MONITOR ((CsrWifiSmeWpsDeviceSubCategory) 0x04)
-#define CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_GAME_CTRL ((CsrWifiSmeWpsDeviceSubCategory) 0x05)
-#define CSR_WIFI_SME_WPS_PSFC_SUB_CATEGORY_ALL ((CsrWifiSmeWpsDeviceSubCategory) 0x05)
-#define CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_MEDIA ((CsrWifiSmeWpsDeviceSubCategory) 0x05)
-#define CSR_WIFI_SME_WPS_TELEPHONE_SUB_CATEGORY_SP_DM ((CsrWifiSmeWpsDeviceSubCategory) 0x05)
-#define CSR_WIFI_SME_WPS_GAMING_SUB_CATEGORY_PORT_DEV ((CsrWifiSmeWpsDeviceSubCategory) 0x05)
-#define CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_HEADPHONE ((CsrWifiSmeWpsDeviceSubCategory) 0x05)
-#define CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_NOTEBOOK ((CsrWifiSmeWpsDeviceSubCategory) 0x05)
-#define CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_REMOTE ((CsrWifiSmeWpsDeviceSubCategory) 0x06)
-#define CSR_WIFI_SME_WPS_AUDIO_SUB_CATEGORY_MIC ((CsrWifiSmeWpsDeviceSubCategory) 0x06)
-#define CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_DESKTOP ((CsrWifiSmeWpsDeviceSubCategory) 0x06)
-#define CSR_WIFI_SME_WPS_MM_SUB_CATEGORY_VP ((CsrWifiSmeWpsDeviceSubCategory) 0x06)
-#define CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_MID ((CsrWifiSmeWpsDeviceSubCategory) 0x07)
-#define CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_TOUCH_SCRN ((CsrWifiSmeWpsDeviceSubCategory) 0x07)
-#define CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_BIOMET_RDR ((CsrWifiSmeWpsDeviceSubCategory) 0x08)
-#define CSR_WIFI_SME_WPS_COMPUTER_SUB_CATEGORY_NETBOOK ((CsrWifiSmeWpsDeviceSubCategory) 0x08)
-#define CSR_WIFI_SME_WPS_INPUT_DEV_SUB_CATEGORY_BRCD_RDR ((CsrWifiSmeWpsDeviceSubCategory) 0x09)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsDpid
-
- DESCRIPTION
- Device Password ID for the chosen config method
-
- VALUES
- CSR_WIFI_SME_WPS_DPID_PIN - PIN
- CSR_WIFI_SME_WPS_DPID_USER - User specified : Used only during P2P GO
- negotiation procedure
- CSR_WIFI_SME_WPS_DPID_MACHINE - Machine specified i: Not used in this
- release
- CSR_WIFI_SME_WPS_DPID_REKEY - Rekey : Not used in this release
- CSR_WIFI_SME_WPS_DPID_PBC - PBC
- CSR_WIFI_SME_WPS_DPID_REGISTRAR - Registrar specified : Used only in P2P Go
- negotiation procedure
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeWpsDpid;
-#define CSR_WIFI_SME_WPS_DPID_PIN ((CsrWifiSmeWpsDpid) 0x0000)
-#define CSR_WIFI_SME_WPS_DPID_USER ((CsrWifiSmeWpsDpid) 0x0001)
-#define CSR_WIFI_SME_WPS_DPID_MACHINE ((CsrWifiSmeWpsDpid) 0x0002)
-#define CSR_WIFI_SME_WPS_DPID_REKEY ((CsrWifiSmeWpsDpid) 0x0003)
-#define CSR_WIFI_SME_WPS_DPID_PBC ((CsrWifiSmeWpsDpid) 0x0004)
-#define CSR_WIFI_SME_WPS_DPID_REGISTRAR ((CsrWifiSmeWpsDpid) 0x0005)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsRegistration
-
- DESCRIPTION
-
- VALUES
- CSR_WIFI_SME_WPS_REG_NOT_REQUIRED - No encryption set
- CSR_WIFI_SME_WPS_REG_REQUIRED - No encryption set
- CSR_WIFI_SME_WPS_REG_UNKNOWN - No encryption set
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeWpsRegistration;
-#define CSR_WIFI_SME_WPS_REG_NOT_REQUIRED ((CsrWifiSmeWpsRegistration) 0x00)
-#define CSR_WIFI_SME_WPS_REG_REQUIRED ((CsrWifiSmeWpsRegistration) 0x01)
-#define CSR_WIFI_SME_WPS_REG_UNKNOWN ((CsrWifiSmeWpsRegistration) 0x02)
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAuthModeMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeAuthMode
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeAuthModeMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeEncryptionMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeEncryption
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeEncryptionMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeIndicationsMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeIndications
-
-*******************************************************************************/
-typedef u32 CsrWifiSmeIndicationsMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pCapabilityMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeP2pCapability
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeP2pCapabilityMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pGroupCapabilityMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeP2pGroupCapability
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeP2pGroupCapabilityMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTspecCtrlMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeTspecCtrl
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeTspecCtrlMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWmmModeMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeWmmMode
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeWmmModeMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWmmQosInfoMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeWmmQosInfo
-
-*******************************************************************************/
-typedef u8 CsrWifiSmeWmmQosInfoMask;
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsConfigTypeMask
-
- DESCRIPTION
- Mask type for use with the values defined by CsrWifiSmeWpsConfigType
-
-*******************************************************************************/
-typedef u16 CsrWifiSmeWpsConfigTypeMask;
-
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAdHocConfig
-
- DESCRIPTION
- Defines values to use when starting an Ad-hoc (IBSS) network.
-
- MEMBERS
- atimWindowTu - ATIM window specified for IBSS
- beaconPeriodTu - Interval between beacon packets
- joinOnlyAttempts - Maximum number of attempts to join an ad-hoc network.
- The default value is 1.
- Set to 0 for infinite attempts.
- joinAttemptIntervalMs - Time between scans for joining the requested IBSS.
-
-*******************************************************************************/
-typedef struct
-{
- u16 atimWindowTu;
- u16 beaconPeriodTu;
- u16 joinOnlyAttempts;
- u16 joinAttemptIntervalMs;
-} CsrWifiSmeAdHocConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAvailabilityConfig
-
- DESCRIPTION
-
- MEMBERS
- listenChannel -
- availabilityDuration -
- avalabilityPeriod -
-
-*******************************************************************************/
-typedef struct
-{
- u8 listenChannel;
- u16 availabilityDuration;
- u16 avalabilityPeriod;
-} CsrWifiSmeAvailabilityConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCcxConfig
-
- DESCRIPTION
- This type is reserved for future use and should not be used.
-
- MEMBERS
- keepAliveTimeMs - NOT USED
- apRoamingEnabled - NOT USED
- measurementsMask - NOT USED
- ccxRadioMgtEnabled - NOT USED
-
-*******************************************************************************/
-typedef struct
-{
- u8 keepAliveTimeMs;
- u8 apRoamingEnabled;
- u8 measurementsMask;
- u8 ccxRadioMgtEnabled;
-} CsrWifiSmeCcxConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexConfig
-
- DESCRIPTION
- Parameters for the coexistence behaviour.
-
- MEMBERS
- coexEnableSchemeManagement - Enables the Coexistence Management Scheme
- coexPeriodicWakeHost - If TRUE the firmware wakes up the host
- periodically according to the traffic
- period and latency parameters; the host
- will then send the data to transmit only
- when woken up.
- If FALSE, the firmware does not wake up the
- host and the host will send the data to
- transmit to the firmware whenever there is
- data to transmit
- coexTrafficBurstyLatencyMs - Period of awakening for the firmware used
- when bursty traffic is detected
- coexTrafficContinuousLatencyMs - Period of awakening for the firmware used
- when continuous traffic is detected
- coexObexBlackoutDurationMs - Blackout Duration when a Obex Connection is
- used
- coexObexBlackoutPeriodMs - Blackout Period when a Obex Connection is
- used
- coexA2dpBrBlackoutDurationMs - Blackout Duration when a Basic Rate A2DP
- Connection streaming data
- coexA2dpBrBlackoutPeriodMs - Blackout Period when a Basic Rate A2DP
- Connection streaming data
- coexA2dpEdrBlackoutDurationMs - Blackout Duration when an Enhanced Data
- Rate A2DP Connection streaming data
- coexA2dpEdrBlackoutPeriodMs - Blackout Period when an Enhanced Data Rate
- A2DP Connection streaming data
- coexPagingBlackoutDurationMs - Blackout Duration when a BT page is active
- coexPagingBlackoutPeriodMs - Blackout Period when a BT page is active
- coexInquiryBlackoutDurationMs - Blackout Duration when a BT inquiry is
- active
- coexInquiryBlackoutPeriodMs - Blackout Period when a BT inquiry is active
-
-*******************************************************************************/
-typedef struct
-{
- u8 coexEnableSchemeManagement;
- u8 coexPeriodicWakeHost;
- u16 coexTrafficBurstyLatencyMs;
- u16 coexTrafficContinuousLatencyMs;
- u16 coexObexBlackoutDurationMs;
- u16 coexObexBlackoutPeriodMs;
- u16 coexA2dpBrBlackoutDurationMs;
- u16 coexA2dpBrBlackoutPeriodMs;
- u16 coexA2dpEdrBlackoutDurationMs;
- u16 coexA2dpEdrBlackoutPeriodMs;
- u16 coexPagingBlackoutDurationMs;
- u16 coexPagingBlackoutPeriodMs;
- u16 coexInquiryBlackoutDurationMs;
- u16 coexInquiryBlackoutPeriodMs;
-} CsrWifiSmeCoexConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionStats
-
- DESCRIPTION
- Indicates the statistics of the connection.
- The dot11 fields are defined in the Annex D of the IEEE 802.11 standard.
-
- MEMBERS
- unifiTxDataRate
- - The bit rate currently in use for transmissions of unicast
- data frames; a data rate in units of 500kbit/s.
- On an infrastructure BSS, this is the data rate used in
- communicating with the associated access point; if there is
- none, an error is returned.
- On an IBSS, this is the data rate used for the last
- transmission of a unicast data frame to any station in the
- IBSS. If no such transmission has been made, an error is
- returned.
- unifiRxDataRate
- - As above for receiving data
- dot11RetryCount
- - See IEEE 802.11 Standard
- dot11MultipleRetryCount
- - See IEEE 802.11 Standard
- dot11AckFailureCount
- - See IEEE 802.11 Standard
- dot11FrameDuplicateCount
- - See IEEE 802.11 Standard
- dot11FcsErrorCount
- - See IEEE 802.11 Standard
- dot11RtsSuccessCount
- - See IEEE 802.11 Standard
- dot11RtsFailureCount
- - See IEEE 802.11 Standard
- dot11FailedCount
- - See IEEE 802.11 Standard
- dot11TransmittedFragmentCount
- - See IEEE 802.11 Standard
- dot11TransmittedFrameCount
- - See IEEE 802.11 Standard
- dot11WepExcludedCount
- - See IEEE 802.11 Standard
- dot11WepIcvErrorCount
- - See IEEE 802.11 Standard
- dot11WepUndecryptableCount
- - See IEEE 802.11 Standard
- dot11MulticastReceivedFrameCount
- - See IEEE 802.11 Standard
- dot11MulticastTransmittedFrameCount
- - See IEEE 802.11 Standard
- dot11ReceivedFragmentCount
- - See IEEE 802.11 Standard
- dot11Rsna4WayHandshakeFailures
- - See IEEE 802.11 Standard
- dot11RsnaTkipCounterMeasuresInvoked
- - See IEEE 802.11 Standard
- dot11RsnaStatsTkipLocalMicFailures
- - See IEEE 802.11 Standard
- dot11RsnaStatsTkipReplays
- - See IEEE 802.11 Standard
- dot11RsnaStatsTkipIcvErrors
- - See IEEE 802.11 Standard
- dot11RsnaStatsCcmpReplays
- - See IEEE 802.11 Standard
- dot11RsnaStatsCcmpDecryptErrors
- - See IEEE 802.11 Standard
-
-*******************************************************************************/
-typedef struct
-{
- u8 unifiTxDataRate;
- u8 unifiRxDataRate;
- u32 dot11RetryCount;
- u32 dot11MultipleRetryCount;
- u32 dot11AckFailureCount;
- u32 dot11FrameDuplicateCount;
- u32 dot11FcsErrorCount;
- u32 dot11RtsSuccessCount;
- u32 dot11RtsFailureCount;
- u32 dot11FailedCount;
- u32 dot11TransmittedFragmentCount;
- u32 dot11TransmittedFrameCount;
- u32 dot11WepExcludedCount;
- u32 dot11WepIcvErrorCount;
- u32 dot11WepUndecryptableCount;
- u32 dot11MulticastReceivedFrameCount;
- u32 dot11MulticastTransmittedFrameCount;
- u32 dot11ReceivedFragmentCount;
- u32 dot11Rsna4WayHandshakeFailures;
- u32 dot11RsnaTkipCounterMeasuresInvoked;
- u32 dot11RsnaStatsTkipLocalMicFailures;
- u32 dot11RsnaStatsTkipReplays;
- u32 dot11RsnaStatsTkipIcvErrors;
- u32 dot11RsnaStatsCcmpReplays;
- u32 dot11RsnaStatsCcmpDecryptErrors;
-} CsrWifiSmeConnectionStats;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDataBlock
-
- DESCRIPTION
- Holds a generic data block to be passed through the interface
-
- MEMBERS
- length - Length of the data block
- data - Points to the first byte of the data block
-
-*******************************************************************************/
-typedef struct
-{
- u16 length;
- u8 *data;
-} CsrWifiSmeDataBlock;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeEmpty
-
- DESCRIPTION
- Empty Structure to indicate that no parameters are available.
-
- MEMBERS
- empty - Only element of the empty structure (always set to 0).
-
-*******************************************************************************/
-typedef struct
-{
- u8 empty;
-} CsrWifiSmeEmpty;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeLinkQuality
-
- DESCRIPTION
- Indicates the quality of the link
-
- MEMBERS
- unifiRssi - Indicates the received signal strength indication of the link in
- dBm
- unifiSnr - Indicates the signal to noise ratio of the link in dB
-
-*******************************************************************************/
-typedef struct
-{
- s16 unifiRssi;
- s16 unifiSnr;
-} CsrWifiSmeLinkQuality;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibConfig
-
- DESCRIPTION
- Allows low level configuration in the chip.
-
- MEMBERS
- unifiFixMaxTxDataRate - This attribute is used in combination with
- unifiFixTxDataRate. If it is FALSE, then
- unifiFixTxDataRate specifies a specific data
- rate to use. If it is TRUE, unifiFixTxDataRate
- instead specifies a maximum data rate.
- unifiFixTxDataRate - Transmit rate for unicast data.
- See MIB description for more details
- dot11RtsThreshold - See IEEE 802.11 Standard
- dot11FragmentationThreshold - See IEEE 802.11 Standard
- dot11CurrentTxPowerLevel - See IEEE 802.11 Standard
-
-*******************************************************************************/
-typedef struct
-{
- u8 unifiFixMaxTxDataRate;
- u8 unifiFixTxDataRate;
- u16 dot11RtsThreshold;
- u16 dot11FragmentationThreshold;
- u16 dot11CurrentTxPowerLevel;
-} CsrWifiSmeMibConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pProfileIdentity
-
- DESCRIPTION
- Details to be filled in
-
- MEMBERS
- listenChannel -
- availabilityDuration -
- avalabilityPeriod -
-
-*******************************************************************************/
-typedef struct
-{
- u8 listenChannel;
- u16 availabilityDuration;
- u16 avalabilityPeriod;
-} CsrWifiSmeP2pProfileIdentity;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePmkid
-
- DESCRIPTION
- Defines a PMKID association with BSS
-
- MEMBERS
- bssid - BSS identifier
- pmkid - PMKID
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiMacAddress bssid;
- u8 pmkid[16];
-} CsrWifiSmePmkid;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePmkidCandidate
-
- DESCRIPTION
- Information for a PMKID candidate
-
- MEMBERS
- bssid - BSS identifier
- preAuthAllowed - Indicates whether preauthentication is allowed
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiMacAddress bssid;
- u8 preAuthAllowed;
-} CsrWifiSmePmkidCandidate;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePmkidList
-
- DESCRIPTION
- NOT USED
- Used in the Sync access API
-
- MEMBERS
- pmkidsCount - Number of PMKIDs in the list
- pmkids - Points to the first PMKID in the list
-
-*******************************************************************************/
-typedef struct
-{
- u8 pmkidsCount;
- CsrWifiSmePmkid *pmkids;
-} CsrWifiSmePmkidList;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRegulatoryDomainInfo
-
- DESCRIPTION
- Regulatory domain options.
-
- MEMBERS
- dot11MultiDomainCapabilityImplemented
- - TRUE is the multi domain capability is implemented
- dot11MultiDomainCapabilityEnabled
- - TRUE is the multi domain capability is enabled
- currentRegulatoryDomain
- - Current regulatory domain
- currentCountryCode
- - Current country code as defined by the IEEE 802.11
- standards
-
-*******************************************************************************/
-typedef struct
-{
- u8 dot11MultiDomainCapabilityImplemented;
- u8 dot11MultiDomainCapabilityEnabled;
- CsrWifiSmeRegulatoryDomain currentRegulatoryDomain;
- u8 currentCountryCode[2];
-} CsrWifiSmeRegulatoryDomainInfo;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingBandData
-
- DESCRIPTION
- Thresholds to define one usability level category for the received signal
-
- MEMBERS
- rssiHighThreshold - Received Signal Strength Indication upper bound in dBm
- for the usability level
- rssiLowThreshold - Received Signal Strength Indication lower bound in dBm
- for the usability level
- snrHighThreshold - Signal to Noise Ratio upper bound in dB for the
- usability level
- snrLowThreshold - Signal to Noise Ratio lower bound in dB for the
- usability level
-
-*******************************************************************************/
-typedef struct
-{
- s16 rssiHighThreshold;
- s16 rssiLowThreshold;
- s16 snrHighThreshold;
- s16 snrLowThreshold;
-} CsrWifiSmeRoamingBandData;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfigData
-
- DESCRIPTION
- Configures the scanning behaviour of the driver and firmware
-
- MEMBERS
- intervalSeconds - All the channels will be scanned once in this time
- interval.
- If connected, the channel scans are spread across
- the interval.
- If disconnected, all the channels will be scanned
- together
- validitySeconds - How long the scan result are cached
- minActiveChannelTimeTu - Minimum time of listening on a channel being
- actively scanned before leaving if no probe
- responses or beacon frames have been received
- maxActiveChannelTimeTu - Maximum time of listening on a channel being
- actively scanned
- minPassiveChannelTimeTu - Minimum time of listening on a channel being
- passive scanned before leaving if no beacon frames
- have been received
- maxPassiveChannelTimeTu - Maximum time of listening on a channel being
- passively scanned
-
-*******************************************************************************/
-typedef struct
-{
- u16 intervalSeconds;
- u16 validitySeconds;
- u16 minActiveChannelTimeTu;
- u16 maxActiveChannelTimeTu;
- u16 minPassiveChannelTimeTu;
- u16 maxPassiveChannelTimeTu;
-} CsrWifiSmeScanConfigData;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTsfTime
-
- DESCRIPTION
- Time stamp representation
-
- MEMBERS
- data - TSF Bytes
-
-*******************************************************************************/
-typedef struct
-{
- u8 data[8];
-} CsrWifiSmeTsfTime;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeVersions
-
- DESCRIPTION
- Reports version information for the chip, the firmware and the driver and
- the SME.
-
- MEMBERS
- chipId - Chip ID
- chipVersion - Chip version ID
- firmwareBuild - Firmware Rom build number
- firmwarePatch - Firmware Patch build number (if applicable)
- firmwareHip - Firmware HIP protocol version number
- routerBuild - Router build number
- routerHip - Router HIP protocol version number
- smeBuild - SME build number
- smeHip - SME HIP protocol version number
-
-*******************************************************************************/
-typedef struct
-{
- u32 chipId;
- u32 chipVersion;
- u32 firmwareBuild;
- u32 firmwarePatch;
- u32 firmwareHip;
- char *routerBuild;
- u32 routerHip;
- char *smeBuild;
- u32 smeHip;
-} CsrWifiSmeVersions;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWmmAcParams
-
- DESCRIPTION
- Structure holding WMM AC params data.
-
- MEMBERS
- cwMin - Exponent for the calculation of CWmin. Range: 0
- to 15
- cwMax - Exponent for the calculation of CWmax. Range: 0
- to15
- aifs - Arbitration Inter Frame Spacing in terms of
- number of timeslots. Range 2 to 15
- txopLimit - TXOP Limit in the units of 32 microseconds
- admissionControlMandatory - Indicates whether the admission control is
- mandatory or not. Current release does not
- support admission control , hence shall be set
- to FALSE.
-
-*******************************************************************************/
-typedef struct
-{
- u8 cwMin;
- u8 cwMax;
- u8 aifs;
- u16 txopLimit;
- u8 admissionControlMandatory;
-} CsrWifiSmeWmmAcParams;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsDeviceType
-
- DESCRIPTION
- Structure holding AP WPS device type data.
-
- MEMBERS
- deviceDetails - category , sub category etc
-
-*******************************************************************************/
-typedef struct
-{
- u8 deviceDetails[8];
-} CsrWifiSmeWpsDeviceType;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsDeviceTypeCommon
-
- DESCRIPTION
-
- MEMBERS
- spportWps -
- deviceType -
-
-*******************************************************************************/
-typedef struct
-{
- u8 spportWps;
- u8 deviceType;
-} CsrWifiSmeWpsDeviceTypeCommon;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsInfo
-
- DESCRIPTION
-
- MEMBERS
- version -
- configMethods -
- devicePassworId -
-
-*******************************************************************************/
-typedef struct
-{
- u16 version;
- u16 configMethods;
- u16 devicePassworId;
-} CsrWifiSmeWpsInfo;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCloakedSsidConfig
-
- DESCRIPTION
- List of cloaked SSIDs .
-
- MEMBERS
- cloakedSsidsCount - Number of cloaked SSID
- cloakedSsids - Points to the first byte of the first SSID provided
-
-*******************************************************************************/
-typedef struct
-{
- u8 cloakedSsidsCount;
- CsrWifiSsid *cloakedSsids;
-} CsrWifiSmeCloakedSsidConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexInfo
-
- DESCRIPTION
- Information and state related to coexistence.
-
- MEMBERS
- hasTrafficData - TRUE if any Wi-Fi traffic is detected
- currentTrafficType - Current type of traffic
- currentPeriodMs - Period of the traffic as detected by the traffic
- analysis.
- If the traffic is not periodic, it is set to 0.
- currentPowerSave - Current power save level
- currentCoexPeriodMs - Period of awakening for the firmware used when
- periodic traffic is detected.
- If the traffic is not periodic, it is set to 0.
- currentCoexLatencyMs - Period of awakening for the firmware used when
- non-periodic traffic is detected
- hasBtDevice - TRUE if there is a Bluetooth device connected
- currentBlackoutDurationUs - Current blackout duration for protecting
- Bluetooth
- currentBlackoutPeriodUs - Current blackout period
- currentCoexScheme - Defines the scheme for the coexistence
- signalling
-
-*******************************************************************************/
-typedef struct
-{
- u8 hasTrafficData;
- CsrWifiSmeTrafficType currentTrafficType;
- u16 currentPeriodMs;
- CsrWifiSmePowerSaveLevel currentPowerSave;
- u16 currentCoexPeriodMs;
- u16 currentCoexLatencyMs;
- u8 hasBtDevice;
- u32 currentBlackoutDurationUs;
- u32 currentBlackoutPeriodUs;
- CsrWifiSmeCoexScheme currentCoexScheme;
-} CsrWifiSmeCoexInfo;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionConfig
-
- DESCRIPTION
- Specifies the parameters that the SME should use in selecting a network.
-
- MEMBERS
- ssid
- - Service Set identifier
- bssid
- - BSS identifier
- bssType
- - Indicates the type of BSS
- ifIndex
- - Indicates the radio interface
- privacyMode
- - Specifies whether the privacy mode is enabled or disabled.
- authModeMask
- - Sets the authentication options that the SME can use while
- associating to the AP
- Set mask with values from CsrWifiSmeAuthMode
- encryptionModeMask
- - Sets the encryption options that the SME can use while
- associating to the AP
- Set mask with values from CsrWifiSmeEncryption
- mlmeAssociateReqInformationElementsLength
- - Length in bytes of information elements to be sent in the
- Association Request.
- mlmeAssociateReqInformationElements
- - Points to the first byte of the information elements, if
- any.
- wmmQosInfo
- - This parameter allows the driver's WMM behaviour to be
- configured.
- To enable support for WMM, use
- CSR_WIFI_SME_SME_CONFIG_SET_REQ with the
- CSR_WIFI_SME_WMM_MODE_AC_ENABLED bit set in wmmModeMask
- field in smeConfig parameter.
- Set mask with values from CsrWifiSmeWmmQosInfo
- adhocJoinOnly
- - This parameter is relevant only if bssType is NOT set to
- CSR_WIFI_SME_BSS_TYPE_INFRASTRUCTURE:
- if TRUE the SME will only try to join an ad-hoc network if
- there is one already established;
- if FALSE the SME will try to join an ad-hoc network if
- there is one already established or it will try to
- establish a new one
- adhocChannel
- - This parameter is relevant only if bssType is NOT set to
- CSR_WIFI_SME_BSS_TYPE_INFRASTRUCTURE:
- it indicates the channel to use joining an ad hoc network.
- Setting this to 0 causes the SME to select a channel from
- those permitted in the regulatory domain.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSsid ssid;
- CsrWifiMacAddress bssid;
- CsrWifiSmeBssType bssType;
- CsrWifiSmeRadioIF ifIndex;
- CsrWifiSme80211PrivacyMode privacyMode;
- CsrWifiSmeAuthModeMask authModeMask;
- CsrWifiSmeEncryptionMask encryptionModeMask;
- u16 mlmeAssociateReqInformationElementsLength;
- u8 *mlmeAssociateReqInformationElements;
- CsrWifiSmeWmmQosInfoMask wmmQosInfo;
- u8 adhocJoinOnly;
- u8 adhocChannel;
-} CsrWifiSmeConnectionConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionInfo
-
- DESCRIPTION
- Parameters that the SME should use in selecting a network
-
- MEMBERS
- ssid - Service set identifier
- bssid - BSS identifier
- networkType80211 - Physical layer used for the connection
- channelNumber - Channel number
- channelFrequency - Channel frequency
- authMode - Authentication mode used for the connection
- pairwiseCipher - Encryption type for peer to peer communication
- groupCipher - Encryption type for broadcast and multicast
- communication
- ifIndex - Indicates the radio interface
- atimWindowTu - ATIM window specified for IBSS
- beaconPeriodTu - Interval between beacon packets
- reassociation - Indicates whether a reassociation occurred
- beaconFrameLength - Indicates the number of bytes of the beacon
- frame
- beaconFrame - Points at the first byte of the beacon frame
- associationReqFrameLength - Indicates the number of bytes of the
- association request frame
- associationReqFrame - Points at the first byte of the association
- request frame
- associationRspFrameLength - Indicates the number of bytes of the
- association response frame
- associationRspFrame - Points at the first byte of the association
- response frame
- assocScanInfoElementsLength - Indicates the number of bytes in the buffer
- pointed by assocScanInfoElements
- assocScanInfoElements - Pointer to the buffer containing the
- information elements of the probe response
- received after the probe requests sent before
- attempting to authenticate to the network
- assocReqCapabilities - Reports the content of the Capability
- information element as specified in the
- association request.
- assocReqListenIntervalTu - Listen Interval specified in the association
- request
- assocReqApAddress - AP address to which the association requests
- has been sent
- assocReqInfoElementsLength - Indicates the number of bytes of the
- association request information elements
- assocReqInfoElements - Points at the first byte of the association
- request information elements
- assocRspResult - Result reported in the association response
- assocRspCapabilityInfo - Reports the content of the Capability
- information element as received in the
- association response.
- assocRspAssociationId - Reports the association ID received in the
- association response.
- assocRspInfoElementsLength - Indicates the number of bytes of the
- association response information elements
- assocRspInfoElements - Points at the first byte of the association
- response information elements
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSsid ssid;
- CsrWifiMacAddress bssid;
- CsrWifiSme80211NetworkType networkType80211;
- u8 channelNumber;
- u16 channelFrequency;
- CsrWifiSmeAuthMode authMode;
- CsrWifiSmeEncryption pairwiseCipher;
- CsrWifiSmeEncryption groupCipher;
- CsrWifiSmeRadioIF ifIndex;
- u16 atimWindowTu;
- u16 beaconPeriodTu;
- u8 reassociation;
- u16 beaconFrameLength;
- u8 *beaconFrame;
- u16 associationReqFrameLength;
- u8 *associationReqFrame;
- u16 associationRspFrameLength;
- u8 *associationRspFrame;
- u16 assocScanInfoElementsLength;
- u8 *assocScanInfoElements;
- u16 assocReqCapabilities;
- u16 assocReqListenIntervalTu;
- CsrWifiMacAddress assocReqApAddress;
- u16 assocReqInfoElementsLength;
- u8 *assocReqInfoElements;
- CsrWifiSmeIEEE80211Result assocRspResult;
- u16 assocRspCapabilityInfo;
- u16 assocRspAssociationId;
- u16 assocRspInfoElementsLength;
- u8 *assocRspInfoElements;
-} CsrWifiSmeConnectionInfo;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDeviceConfig
-
- DESCRIPTION
- General configuration options in the SME
-
- MEMBERS
- trustLevel - Level of trust of the information coming from the
- network
- countryCode - Country code as specified by IEEE 802.11 standard
- firmwareDriverInterface - Specifies the type of communication between Host
- and Firmware
- enableStrictDraftN - If TRUE TKIP is disallowed when connecting to
- 802.11n enabled access points
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSme80211dTrustLevel trustLevel;
- u8 countryCode[2];
- CsrWifiSmeFirmwareDriverInterface firmwareDriverInterface;
- u8 enableStrictDraftN;
-} CsrWifiSmeDeviceConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDeviceInfo
-
- DESCRIPTION
- P2P Information for a P2P Device
-
- MEMBERS
- deviceAddress - Device Address of the P2P device
- configMethods - Supported WPS configuration methods.
- p2PDeviceCap - P2P device capabilities
- primDeviceType - Primary WPS device type
- secondaryDeviceTypeCount - Number of secondary device types
- secDeviceType - list of secondary WPS device types
- deviceName - Device name without up to 32 characters'\0'.
- deviceNameLength - Number of characters of the device name
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiMacAddress deviceAddress;
- CsrWifiSmeWpsConfigTypeMask configMethods;
- CsrWifiSmeP2pCapabilityMask p2PDeviceCap;
- CsrWifiSmeWpsDeviceType primDeviceType;
- u8 secondaryDeviceTypeCount;
- CsrWifiSmeWpsDeviceType *secDeviceType;
- u8 deviceName[32];
- u8 deviceNameLength;
-} CsrWifiSmeDeviceInfo;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDeviceInfoCommon
-
- DESCRIPTION
- Structure holding device information.
-
- MEMBERS
- p2pDeviceAddress -
- primaryDeviceType -
- secondaryDeviceTypesCount -
- secondaryDeviceTypes -
- deviceNameLength -
- deviceName -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiMacAddress p2pDeviceAddress;
- CsrWifiSmeWpsDeviceTypeCommon primaryDeviceType;
- u8 secondaryDeviceTypesCount;
- u8 secondaryDeviceTypes[10];
- u8 deviceNameLength;
- u8 deviceName[32];
-} CsrWifiSmeDeviceInfoCommon;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostConfig
-
- DESCRIPTION
- Defines the host power state (for example, on mains power, on battery
- power etc) and the periodicity of the traffic data.
-
- MEMBERS
- powerMode - The wireless manager application should use the
- powerMode parameter to inform the SME of the host
- power state.
- applicationDataPeriodMs - The applicationDataPeriodMs parameter allows a
- wireless manager application to inform the SME
- that an application is running that generates
- periodic network traffic and the period of the
- traffic.
- An example of such an application is a VoIP client.
- The wireless manager application should set
- applicationDataPeriodMs to the period in
- milliseconds between data packets or zero if no
- periodic application is running.
- Voip etc 0 = No Periodic Data
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeHostPowerMode powerMode;
- u16 applicationDataPeriodMs;
-} CsrWifiSmeHostConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeKey
-
- DESCRIPTION
- Information for a key to be used for encryption
-
- MEMBERS
- keyType - Specifies whether the key is a pairwise or group key; it
- should be set to CSR_WIFI_SME_GROUP_KEY or
- CSR_WIFI_SME_PAIRWISE_KEY, as required.
- keyIndex - Specifies which WEP key (0-3) to set; it should be set to 0
- for a WPA/WPA2 pairwise key and non-zero for a WPA/WPA2
- group key.
- wepTxKey - If wepTxKey is TRUE, and the key is a WEP key, the key will
- be selected for encrypting transmitted packets.
- To select a previously defined key as the transmit
- encryption key, set keyIndex to the required key, wepTxKey
- to TRUE and the keyLength to 0.
- keyRsc - Key Receive Sequence Counter
- authenticator - If TRUE the WMA will act as authenticator.
- CURRENTLY NOT SUPPORTED
- address - BSS identifier of the AP
- keyLength - Length of the key in bytes
- key - Points to the first byte of the key
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeKeyType keyType;
- u8 keyIndex;
- u8 wepTxKey;
- u16 keyRsc[8];
- u8 authenticator;
- CsrWifiMacAddress address;
- u8 keyLength;
- u8 key[32];
-} CsrWifiSmeKey;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pClientInfoType
-
- DESCRIPTION
- P2P Information for a P2P Client
-
- MEMBERS
- p2PClientInterfaceAddress - MAC address of the P2P Client
- clientDeviceInfo - Device Information
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiMacAddress p2PClientInterfaceAddress;
- CsrWifiSmeDeviceInfo clientDeviceInfo;
-} CsrWifiSmeP2pClientInfoType;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeP2pGroupInfo
-
- DESCRIPTION
- P2P Information for a P2P Group
-
- MEMBERS
- groupCapability - P2P group capabilities
- p2pDeviceAddress - Device Address of the GO
- p2pClientInfoCount - Number of P2P Clients that belong to the group.
- p2PClientInfo - Pointer to the list containing client information for
- each client in the group
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeP2pGroupCapabilityMask groupCapability;
- CsrWifiMacAddress p2pDeviceAddress;
- u8 p2pClientInfoCount;
- CsrWifiSmeP2pClientInfoType *p2PClientInfo;
-} CsrWifiSmeP2pGroupInfo;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerConfig
-
- DESCRIPTION
- Configures the power-save behaviour of the driver and firmware.
-
- MEMBERS
- powerSaveLevel - Power Save Level option
- listenIntervalTu - Interval for waking to receive beacon frames
- rxDtims - If TRUE, wake for DTIM every beacon period, to
- allow the reception broadcast packets
- d3AutoScanMode - Defines whether the autonomous scanning will be
- turned off or will stay on during a D3 suspended
- period
- clientTrafficWindow - Deprecated
- opportunisticPowerSave - Deprecated
- noticeOfAbsence - Deprecated
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmePowerSaveLevel powerSaveLevel;
- u16 listenIntervalTu;
- u8 rxDtims;
- CsrWifiSmeD3AutoScanMode d3AutoScanMode;
- u8 clientTrafficWindow;
- u8 opportunisticPowerSave;
- u8 noticeOfAbsence;
-} CsrWifiSmePowerConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingConfig
-
- DESCRIPTION
- Configures the roaming behaviour of the driver and firmware
-
- MEMBERS
- roamingBands - Defines the thresholds to determine the usability
- level of the current connection.
- roamingBands is indexed by the first 3 entries of
- the CsrWifiSmeBasicUsability enum
- disableSmoothRoaming - Disable the RSSI/SNR triggers from the Firmware
- that the SME uses to detect the quality of the
- connection.
- This implicitly disables disableRoamScans
- disableRoamScans - Disables the scanning for the roaming operation
- reconnectLimit - Maximum number of times SME may reconnect in the
- given interval
- reconnectLimitIntervalMs - Interval for maximum number of times SME may
- reconnect to the same Access Point
- roamScanCfg - Scanning behaviour for the specifically aimed at
- improving roaming performance.
- roamScanCfg is indexed by the first 3 entries of
- the CsrWifiSmeBasicUsability enum
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeRoamingBandData roamingBands[3];
- u8 disableSmoothRoaming;
- u8 disableRoamScans;
- u8 reconnectLimit;
- u16 reconnectLimitIntervalMs;
- CsrWifiSmeScanConfigData roamScanCfg[3];
-} CsrWifiSmeRoamingConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfig
-
- DESCRIPTION
- Parameters for the autonomous scanning behaviour of the system
-
- MEMBERS
- scanCfg - Scan configuration data.
- Indexed by the CsrWifiSmeBasicUsability enum
- disableAutonomousScans - Enables or disables the autonomous scan
- maxResults - Maximum number of results to be cached in the SME
- highRssiThreshold - High received signal strength indication threshold
- in dBm for an AP above which the system will
- report scan indications
- lowRssiThreshold - Low received signal strength indication threshold
- in dBm for an AP below which the system will
- report scan indications
- deltaRssiThreshold - Minimum difference for received signal strength
- indication in dBm for an AP which trigger a scan
- indication to be sent.
- highSnrThreshold - High Signal to Noise Ratio threshold in dB for an
- AP above which the system will report scan
- indications
- lowSnrThreshold - Low Signal to Noise Ratio threshold in dB for an
- AP below which the system will report scan
- indications
- deltaSnrThreshold - Minimum difference for Signal to Noise Ratio in dB
- for an AP which trigger a scan indication to be
- sent.
- passiveChannelListCount - Number of channels to be scanned passively.
- passiveChannelList - Points to the first channel to be scanned
- passively , if any.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeScanConfigData scanCfg[4];
- u8 disableAutonomousScans;
- u16 maxResults;
- s8 highRssiThreshold;
- s8 lowRssiThreshold;
- s8 deltaRssiThreshold;
- s8 highSnrThreshold;
- s8 lowSnrThreshold;
- s8 deltaSnrThreshold;
- u16 passiveChannelListCount;
- u8 *passiveChannelList;
-} CsrWifiSmeScanConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResult
-
- DESCRIPTION
- This structure defines the scan result for each BSS found
-
- MEMBERS
- ssid - Service set identifier
- bssid - BSS identifier
- rssi - Received signal strength indication in dBm
- snr - Signal to noise ratio in dB
- ifIndex - Indicates the radio interface
- beaconPeriodTu - Interval between beacon frames
- timeStamp - Timestamp in the BSS
- localTime - Timestamp in the Access Point
- channelFrequency - Channel frequency
- capabilityInformation - Capabilities of the BSS.
- channelNumber - Channel number
- usability - Indicates the usability level.
- bssType - Type of BSS.
- informationElementsLength - Number of bytes of the information elements
- received as part of the beacon or probe
- response.
- informationElements - Points to the first byte of the IEs received
- as part of the beacon or probe response.
- The format of the IEs is as specified in the
- IEEE 802.11 specification.
- p2pDeviceRole - Role of the P2P device.
- Relevant only if bssType is
- CSR_WIFI_SME_BSS_TYPE_P2P
- deviceInfo - Union containing P2P device info which
- depends on p2pDeviceRole parameter.
- deviceInforeservedCli -
- deviceInfogroupInfo -
- deviceInforeservedNone -
- deviceInfostandalonedevInfo -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSsid ssid;
- CsrWifiMacAddress bssid;
- s16 rssi;
- s16 snr;
- CsrWifiSmeRadioIF ifIndex;
- u16 beaconPeriodTu;
- CsrWifiSmeTsfTime timeStamp;
- CsrWifiSmeTsfTime localTime;
- u16 channelFrequency;
- u16 capabilityInformation;
- u8 channelNumber;
- CsrWifiSmeBasicUsability usability;
- CsrWifiSmeBssType bssType;
- u16 informationElementsLength;
- u8 *informationElements;
- CsrWifiSmeP2pRole p2pDeviceRole;
- union {
- CsrWifiSmeEmpty reservedCli;
- CsrWifiSmeP2pGroupInfo groupInfo;
- CsrWifiSmeEmpty reservedNone;
- CsrWifiSmeDeviceInfo standalonedevInfo;
- } deviceInfo;
-} CsrWifiSmeScanResult;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeStaConfig
-
- DESCRIPTION
- Station configuration options in the SME
-
- MEMBERS
- connectionQualityRssiChangeTrigger - Sets the difference of RSSI
- measurements which triggers reports
- from the Firmware
- connectionQualitySnrChangeTrigger - Sets the difference of SNR measurements
- which triggers reports from the
- Firmware
- wmmModeMask - Mask containing one or more values from
- CsrWifiSmeWmmMode
- ifIndex - Indicates the band of frequencies used
- allowUnicastUseGroupCipher - If TRUE, it allows to use groupwise
- keys if no pairwise key is specified
- enableOpportunisticKeyCaching - If TRUE, enables the Opportunistic Key
- Caching feature
-
-*******************************************************************************/
-typedef struct
-{
- u8 connectionQualityRssiChangeTrigger;
- u8 connectionQualitySnrChangeTrigger;
- CsrWifiSmeWmmModeMask wmmModeMask;
- CsrWifiSmeRadioIF ifIndex;
- u8 allowUnicastUseGroupCipher;
- u8 enableOpportunisticKeyCaching;
-} CsrWifiSmeStaConfig;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWep128Keys
-
- DESCRIPTION
- Structure holding WEP Authentication Type and WEP keys that can be used
- when using WEP128.
-
- MEMBERS
- wepAuthType - Mask to select the WEP authentication type (Open or Shared)
- selectedWepKey - Index to one of the four keys below indicating the
- currently used WEP key. Mapping From SME/User -> firmware.
- Key 1 -> Index 0. Key 2 -> Index 1. key 3 -> Index 2. Key
- 4-> Index 3.
- key1 - Value for key number 1.
- key2 - Value for key number 2.
- key3 - Value for key number 3.
- key4 - Value for key number 4.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeWepAuthMode wepAuthType;
- u8 selectedWepKey;
- u8 key1[13];
- u8 key2[13];
- u8 key3[13];
- u8 key4[13];
-} CsrWifiSmeWep128Keys;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWep64Keys
-
- DESCRIPTION
- Structure holding WEP Authentication Type and WEP keys that can be used
- when using WEP64.
-
- MEMBERS
- wepAuthType - Mask to select the WEP authentication type (Open or Shared)
- selectedWepKey - Index to one of the four keys below indicating the
- currently used WEP key. Mapping From SME/User -> firmware.
- Key 1 -> Index 0. Key 2 -> Index 1. key 3 -> Index 2. Key
- 4-> Index 3.
- key1 - Value for key number 1.
- key2 - Value for key number 2.
- key3 - Value for key number 3.
- key4 - Value for key number 4.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeWepAuthMode wepAuthType;
- u8 selectedWepKey;
- u8 key1[5];
- u8 key2[5];
- u8 key3[5];
- u8 key4[5];
-} CsrWifiSmeWep64Keys;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWepAuth
-
- DESCRIPTION
- WEP authentication parameter structure
-
- MEMBERS
- wepKeyType - WEP key try (128 bit or 64 bit)
- wepCredentials - Union containing credentials which depends on
- wepKeyType parameter.
- wepCredentialswep128Key -
- wepCredentialswep64Key -
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiSmeWepCredentialType wepKeyType;
- union {
- CsrWifiSmeWep128Keys wep128Key;
- CsrWifiSmeWep64Keys wep64Key;
- } wepCredentials;
-} CsrWifiSmeWepAuth;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsConfig
-
- DESCRIPTION
- Structure holding AP WPS Config data.
-
- MEMBERS
- wpsVersion - wpsVersion should be 0x10 for WPS1.0h or 0x20 for
- WSC2.0
- uuid - uuid.
- deviceName - Device name upto 32 characters without '\0'.
- deviceNameLength - deviceNameLen.
- manufacturer - manufacturer: CSR
- manufacturerLength - manufacturerLen.
- modelName - modelName Unifi
- modelNameLength - modelNameLen.
- modelNumber - modelNumber
- modelNumberLength - modelNumberLen.
- serialNumber - serialNumber
- primDeviceType - Primary WPS device type
- secondaryDeviceTypeCount - Number of secondary device types
- secondaryDeviceType - list of secondary WPS device types
- configMethods - Supported WPS config methods
- rfBands - RfBands.
- osVersion - Os version on which the device is running
-
-*******************************************************************************/
-typedef struct
-{
- u8 wpsVersion;
- u8 uuid[16];
- u8 deviceName[32];
- u8 deviceNameLength;
- u8 manufacturer[64];
- u8 manufacturerLength;
- u8 modelName[32];
- u8 modelNameLength;
- u8 modelNumber[32];
- u8 modelNumberLength;
- u8 serialNumber[32];
- CsrWifiSmeWpsDeviceType primDeviceType;
- u8 secondaryDeviceTypeCount;
- CsrWifiSmeWpsDeviceType *secondaryDeviceType;
- CsrWifiSmeWpsConfigTypeMask configMethods;
- u8 rfBands;
- u8 osVersion[4];
-} CsrWifiSmeWpsConfig;
-
-
-/* Downstream */
-#define CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST (0x0000)
-
-#define CSR_WIFI_SME_ACTIVATE_REQ ((CsrWifiSmePrim) (0x0000 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_ADHOC_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x0001 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_ADHOC_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x0002 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_BLACKLIST_REQ ((CsrWifiSmePrim) (0x0003 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CALIBRATION_DATA_GET_REQ ((CsrWifiSmePrim) (0x0004 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CALIBRATION_DATA_SET_REQ ((CsrWifiSmePrim) (0x0005 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CCX_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x0006 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CCX_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x0007 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_COEX_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x0008 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_COEX_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x0009 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_COEX_INFO_GET_REQ ((CsrWifiSmePrim) (0x000A + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CONNECT_REQ ((CsrWifiSmePrim) (0x000B + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CONNECTION_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x000C + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CONNECTION_INFO_GET_REQ ((CsrWifiSmePrim) (0x000D + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CONNECTION_STATS_GET_REQ ((CsrWifiSmePrim) (0x000E + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_DEACTIVATE_REQ ((CsrWifiSmePrim) (0x000F + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_DISCONNECT_REQ ((CsrWifiSmePrim) (0x0010 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_EVENT_MASK_SET_REQ ((CsrWifiSmePrim) (0x0011 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_HOST_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x0012 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_HOST_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x0013 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_KEY_REQ ((CsrWifiSmePrim) (0x0014 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_LINK_QUALITY_GET_REQ ((CsrWifiSmePrim) (0x0015 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x0016 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x0017 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_GET_NEXT_REQ ((CsrWifiSmePrim) (0x0018 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_GET_REQ ((CsrWifiSmePrim) (0x0019 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_SET_REQ ((CsrWifiSmePrim) (0x001A + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_MULTICAST_ADDRESS_REQ ((CsrWifiSmePrim) (0x001B + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_PACKET_FILTER_SET_REQ ((CsrWifiSmePrim) (0x001C + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_PERMANENT_MAC_ADDRESS_GET_REQ ((CsrWifiSmePrim) (0x001D + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_PMKID_REQ ((CsrWifiSmePrim) (0x001E + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_POWER_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x001F + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_POWER_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x0020 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_INFO_GET_REQ ((CsrWifiSmePrim) (0x0021 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_ROAMING_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x0022 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_ROAMING_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x0023 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x0024 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x0025 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_FULL_REQ ((CsrWifiSmePrim) (0x0026 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_RESULTS_FLUSH_REQ ((CsrWifiSmePrim) (0x0027 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_RESULTS_GET_REQ ((CsrWifiSmePrim) (0x0028 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SME_STA_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x0029 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SME_STA_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x002A + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_STATION_MAC_ADDRESS_GET_REQ ((CsrWifiSmePrim) (0x002B + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_TSPEC_REQ ((CsrWifiSmePrim) (0x002C + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_VERSIONS_GET_REQ ((CsrWifiSmePrim) (0x002D + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_WIFI_FLIGHTMODE_REQ ((CsrWifiSmePrim) (0x002E + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_WIFI_OFF_REQ ((CsrWifiSmePrim) (0x002F + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_WIFI_ON_REQ ((CsrWifiSmePrim) (0x0030 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CLOAKED_SSIDS_SET_REQ ((CsrWifiSmePrim) (0x0031 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_CLOAKED_SSIDS_GET_REQ ((CsrWifiSmePrim) (0x0032 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SME_COMMON_CONFIG_GET_REQ ((CsrWifiSmePrim) (0x0033 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SME_COMMON_CONFIG_SET_REQ ((CsrWifiSmePrim) (0x0034 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_INTERFACE_CAPABILITY_GET_REQ ((CsrWifiSmePrim) (0x0035 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_WPS_CONFIGURATION_REQ ((CsrWifiSmePrim) (0x0036 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-#define CSR_WIFI_SME_SET_REQ ((CsrWifiSmePrim) (0x0037 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST))
-
-
-#define CSR_WIFI_SME_PRIM_DOWNSTREAM_HIGHEST (0x0037 + CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST)
-
-/* Upstream */
-#define CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST (0x0000 + CSR_PRIM_UPSTREAM)
-
-#define CSR_WIFI_SME_ACTIVATE_CFM ((CsrWifiSmePrim)(0x0000 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_ADHOC_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x0001 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_ADHOC_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x0002 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_ASSOCIATION_COMPLETE_IND ((CsrWifiSmePrim)(0x0003 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_ASSOCIATION_START_IND ((CsrWifiSmePrim)(0x0004 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_BLACKLIST_CFM ((CsrWifiSmePrim)(0x0005 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CALIBRATION_DATA_GET_CFM ((CsrWifiSmePrim)(0x0006 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CALIBRATION_DATA_SET_CFM ((CsrWifiSmePrim)(0x0007 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CCX_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x0008 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CCX_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x0009 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_COEX_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x000A + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_COEX_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x000B + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_COEX_INFO_GET_CFM ((CsrWifiSmePrim)(0x000C + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CONNECT_CFM ((CsrWifiSmePrim)(0x000D + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CONNECTION_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x000E + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CONNECTION_INFO_GET_CFM ((CsrWifiSmePrim)(0x000F + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CONNECTION_QUALITY_IND ((CsrWifiSmePrim)(0x0010 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CONNECTION_STATS_GET_CFM ((CsrWifiSmePrim)(0x0011 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_DEACTIVATE_CFM ((CsrWifiSmePrim)(0x0012 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_DISCONNECT_CFM ((CsrWifiSmePrim)(0x0013 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_EVENT_MASK_SET_CFM ((CsrWifiSmePrim)(0x0014 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_HOST_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x0015 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_HOST_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x0016 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_IBSS_STATION_IND ((CsrWifiSmePrim)(0x0017 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_KEY_CFM ((CsrWifiSmePrim)(0x0018 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_LINK_QUALITY_GET_CFM ((CsrWifiSmePrim)(0x0019 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_MEDIA_STATUS_IND ((CsrWifiSmePrim)(0x001A + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x001B + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x001C + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_GET_CFM ((CsrWifiSmePrim)(0x001D + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_GET_NEXT_CFM ((CsrWifiSmePrim)(0x001E + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIB_SET_CFM ((CsrWifiSmePrim)(0x001F + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_MIC_FAILURE_IND ((CsrWifiSmePrim)(0x0020 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_MULTICAST_ADDRESS_CFM ((CsrWifiSmePrim)(0x0021 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_PACKET_FILTER_SET_CFM ((CsrWifiSmePrim)(0x0022 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_PERMANENT_MAC_ADDRESS_GET_CFM ((CsrWifiSmePrim)(0x0023 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_PMKID_CANDIDATE_LIST_IND ((CsrWifiSmePrim)(0x0024 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_PMKID_CFM ((CsrWifiSmePrim)(0x0025 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_POWER_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x0026 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_POWER_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x0027 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_REGULATORY_DOMAIN_INFO_GET_CFM ((CsrWifiSmePrim)(0x0028 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_ROAM_COMPLETE_IND ((CsrWifiSmePrim)(0x0029 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_ROAM_START_IND ((CsrWifiSmePrim)(0x002A + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_ROAMING_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x002B + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_ROAMING_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x002C + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x002D + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x002E + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_FULL_CFM ((CsrWifiSmePrim)(0x002F + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_RESULT_IND ((CsrWifiSmePrim)(0x0030 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_RESULTS_FLUSH_CFM ((CsrWifiSmePrim)(0x0031 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SCAN_RESULTS_GET_CFM ((CsrWifiSmePrim)(0x0032 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SME_STA_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x0033 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SME_STA_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x0034 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_STATION_MAC_ADDRESS_GET_CFM ((CsrWifiSmePrim)(0x0035 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_TSPEC_IND ((CsrWifiSmePrim)(0x0036 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_TSPEC_CFM ((CsrWifiSmePrim)(0x0037 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_VERSIONS_GET_CFM ((CsrWifiSmePrim)(0x0038 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_WIFI_FLIGHTMODE_CFM ((CsrWifiSmePrim)(0x0039 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_WIFI_OFF_IND ((CsrWifiSmePrim)(0x003A + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_WIFI_OFF_CFM ((CsrWifiSmePrim)(0x003B + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_WIFI_ON_CFM ((CsrWifiSmePrim)(0x003C + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CLOAKED_SSIDS_SET_CFM ((CsrWifiSmePrim)(0x003D + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CLOAKED_SSIDS_GET_CFM ((CsrWifiSmePrim)(0x003E + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_WIFI_ON_IND ((CsrWifiSmePrim)(0x003F + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SME_COMMON_CONFIG_GET_CFM ((CsrWifiSmePrim)(0x0040 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_SME_COMMON_CONFIG_SET_CFM ((CsrWifiSmePrim)(0x0041 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_INTERFACE_CAPABILITY_GET_CFM ((CsrWifiSmePrim)(0x0042 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_ERROR_IND ((CsrWifiSmePrim)(0x0043 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_INFO_IND ((CsrWifiSmePrim)(0x0044 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_CORE_DUMP_IND ((CsrWifiSmePrim)(0x0045 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_AMP_STATUS_CHANGE_IND ((CsrWifiSmePrim)(0x0046 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-#define CSR_WIFI_SME_WPS_CONFIGURATION_CFM ((CsrWifiSmePrim)(0x0047 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST))
-
-#define CSR_WIFI_SME_PRIM_UPSTREAM_HIGHEST (0x0047 + CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST)
-
-#define CSR_WIFI_SME_PRIM_DOWNSTREAM_COUNT (CSR_WIFI_SME_PRIM_DOWNSTREAM_HIGHEST + 1 - CSR_WIFI_SME_PRIM_DOWNSTREAM_LOWEST)
-#define CSR_WIFI_SME_PRIM_UPSTREAM_COUNT (CSR_WIFI_SME_PRIM_UPSTREAM_HIGHEST + 1 - CSR_WIFI_SME_PRIM_UPSTREAM_LOWEST)
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeActivateReq
-
- DESCRIPTION
- The WMA sends this primitive to activate the SME.
- The WMA must activate the SME before it can send any other primitive.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeActivateReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAdhocConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the adHocConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeAdhocConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAdhocConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the adHocConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- adHocConfig - Sets the values to use when starting an ad hoc network.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeAdHocConfig adHocConfig;
-} CsrWifiSmeAdhocConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeBlacklistReq
-
- DESCRIPTION
- The wireless manager application should call this primitive to notify the
- driver of any networks that should not be connected to. The interface
- allows the wireless manager application to query, add, remove, and flush
- the BSSIDs that the driver may not connect or roam to.
- When this primitive adds to the black list the BSSID to which the SME is
- currently connected, the SME will try to roam, if applicable, to another
- BSSID in the same ESS; if the roaming procedure fails, the SME will
- disconnect.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - The value of the CsrWifiSmeListAction parameter instructs
- the driver to modify or provide the list of blacklisted
- networks.
- setAddressCount - Number of BSSIDs sent with this primitive
- setAddresses - Pointer to the list of BBSIDs sent with the primitive, set
- to NULL if none is sent.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeListAction action;
- u8 setAddressCount;
- CsrWifiMacAddress *setAddresses;
-} CsrWifiSmeBlacklistReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCalibrationDataGetReq
-
- DESCRIPTION
- This primitive retrieves the Wi-Fi radio calibration data.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeCalibrationDataGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCalibrationDataSetReq
-
- DESCRIPTION
- This primitive sets the Wi-Fi radio calibration data.
- The usage of the primitive with proper calibration data will avoid
- time-consuming configuration after power-up.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- calibrationDataLength - Number of bytes in the buffer pointed by
- calibrationData
- calibrationData - Pointer to a buffer of length calibrationDataLength
- containing the calibration data
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 calibrationDataLength;
- u8 *calibrationData;
-} CsrWifiSmeCalibrationDataSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCcxConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the CcxConfig parameter.
- CURRENTLY NOT SUPPORTED.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeCcxConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCcxConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the CcxConfig parameter.
- CURRENTLY NOT SUPPORTED.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- ccxConfig - Currently not supported
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeCcxConfig ccxConfig;
-} CsrWifiSmeCcxConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the CoexConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeCoexConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the CoexConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- coexConfig - Configures the coexistence behaviour
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeCoexConfig coexConfig;
-} CsrWifiSmeCoexConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexInfoGetReq
-
- DESCRIPTION
- This primitive gets the value of the CoexInfo parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeCoexInfoGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectReq
-
- DESCRIPTION
- The wireless manager application calls this primitive to start the
- process of joining an 802.11 wireless network or to start an ad hoc
- network.
- The structure pointed by connectionConfig contains parameters describing
- the network to join or, in case of an ad hoc network, to host or join.
- The SME will select a network, perform the IEEE 802.11 Join, Authenticate
- and Associate exchanges.
- The SME selects the networks from the current scan list that match both
- the SSID and BSSID, however either or both of these may be the wildcard
- value. Using this rule, the following operations are possible:
- * To connect to a network by name, specify the SSID and set the BSSID to
- 0xFF 0xFF 0xFF 0xFF 0xFF 0xFF. If there are two or more networks visible,
- the SME will select the one with the strongest signal.
- * To connect to a specific network, specify the BSSID. The SSID is
- optional, but if given it must match the SSID of the network. An empty
- SSID may be specified by setting the SSID length to zero. Please note
- that if the BSSID is specified (i.e. not equal to 0xFF 0xFF 0xFF 0xFF
- 0xFF 0xFF), the SME will not attempt to roam if signal conditions become
- poor, even if there is an alternative AP with an SSID that matches the
- current network SSID.
- * To connect to any network matching the other parameters (i.e. security,
- etc), set the SSID length to zero and set the BSSID to 0xFF 0xFF 0xFF
- 0xFF 0xFF 0xFF. In this case, the SME will order all available networks
- by their signal strengths and will iterate through this list until it
- successfully connects.
- NOTE: Specifying the BSSID will restrict the selection to one specific
- network. If SSID and BSSID are given, they must both match the network
- for it to be selected. To select a network based on the SSID only, the
- wireless manager application must set the BSSID to 0xFF 0xFF 0xFF 0xFF
- 0xFF 0xFF.
- The SME will try to connect to each network that matches the provided
- parameters, one by one, until it succeeds or has tried unsuccessfully
- with all the matching networks.
- If there is no network that matches the parameters and the request allows
- to host an ad hoc network, the SME will advertise a new ad hoc network
- instead.
- If the SME cannot connect, it will notify the failure in the confirm.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- connectionConfig - Describes the candidate network to join or to host.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeConnectionConfig connectionConfig;
-} CsrWifiSmeConnectReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the ConnectionConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeConnectionConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionInfoGetReq
-
- DESCRIPTION
- This primitive gets the value of the ConnectionInfo parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeConnectionInfoGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionStatsGetReq
-
- DESCRIPTION
- This primitive gets the value of the ConnectionStats parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeConnectionStatsGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDeactivateReq
-
- DESCRIPTION
- The WMA sends this primitive to deactivate the SME.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeDeactivateReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDisconnectReq
-
- DESCRIPTION
- The wireless manager application may disconnect from the current network
- by calling this primitive
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeDisconnectReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeEventMaskSetReq
-
- DESCRIPTION
- The wireless manager application may register with the SME to receive
- notification of interesting events. Indications will be sent only if the
- wireless manager explicitly registers to be notified of that event.
- indMask is a bit mask of values defined in CsrWifiSmeIndicationsMask.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- indMask - Set mask with values from CsrWifiSmeIndications
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeIndicationsMask indMask;
-} CsrWifiSmeEventMaskSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the hostConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeHostConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the hostConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- hostConfig - Communicates a change of host power state (for example, on
- mains power, on battery power etc) and of the periodicity of
- traffic data
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeHostConfig hostConfig;
-} CsrWifiSmeHostConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeKeyReq
-
- DESCRIPTION
- The wireless manager application calls this primitive to add or remove
- keys that the chip should use for encryption of data.
- The interface allows the wireless manager application to add and remove
- keys according to the specified action.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - The value of the CsrWifiSmeListAction parameter instructs the
- driver to modify or provide the list of keys.
- CSR_WIFI_SME_LIST_ACTION_GET is not supported here.
- key - Key to be added or removed
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeListAction action;
- CsrWifiSmeKey key;
-} CsrWifiSmeKeyReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeLinkQualityGetReq
-
- DESCRIPTION
- This primitive gets the value of the LinkQuality parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeLinkQualityGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the MibConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeMibConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the MibConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- mibConfig - Conveys the desired value of various IEEE 802.11 attributes as
- currently configured
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeMibConfig mibConfig;
-} CsrWifiSmeMibConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibGetNextReq
-
- DESCRIPTION
- To read a sequence of MIB parameters, for example a table, call this
- primitive to find the name of the next MIB variable
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to a VarBind or VarBindList containing the
- name(s) of the MIB variable(s) to search from.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 mibAttributeLength;
- u8 *mibAttribute;
-} CsrWifiSmeMibGetNextReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibGetReq
-
- DESCRIPTION
- The wireless manager application calls this primitive to retrieve one or
- more MIB variables.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to the VarBind or VarBindList containing the
- names of the MIB variables to be retrieved
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 mibAttributeLength;
- u8 *mibAttribute;
-} CsrWifiSmeMibGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibSetReq
-
- DESCRIPTION
- The SME provides raw access to the MIB on the chip, which may be used by
- some configuration or diagnostic utilities, but is not normally needed by
- the wireless manager application.
- The MIB access functions use BER encoded names (OID) of the MIB
- parameters and BER encoded values, as described in the chip Host
- Interface Protocol Specification.
- The MIB parameters are described in 'Wi-Fi 5.0.0 Management Information
- Base Reference Guide'.
- The wireless manager application calls this primitive to set one or more
- MIB variables
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to the VarBind or VarBindList containing the
- names and values of the MIB variables to set
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 mibAttributeLength;
- u8 *mibAttribute;
-} CsrWifiSmeMibSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMulticastAddressReq
-
- DESCRIPTION
- The wireless manager application calls this primitive to specify the
- multicast addresses which the chip should recognise. The interface allows
- the wireless manager application to query, add, remove and flush the
- multicast addresses for the network interface according to the specified
- action.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - The value of the CsrWifiSmeListAction parameter
- instructs the driver to modify or provide the list of
- MAC addresses.
- setAddressesCount - Number of MAC addresses sent with the primitive
- setAddresses - Pointer to the list of MAC Addresses sent with the
- primitive, set to NULL if none is sent.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeListAction action;
- u8 setAddressesCount;
- CsrWifiMacAddress *setAddresses;
-} CsrWifiSmeMulticastAddressReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePacketFilterSetReq
-
- DESCRIPTION
- The wireless manager application should call this primitive to enable or
- disable filtering of broadcast packets: uninteresting broadcast packets
- will be dropped by the Wi-Fi chip, instead of passing them up to the
- host.
- This has the advantage of saving power in the host application processor
- as it removes the need to process unwanted packets.
- All broadcast packets are filtered according to the filter and the filter
- mode provided, except ARP packets, which are filtered using
- arpFilterAddress.
- Filters are not cumulative: only the parameters specified in the most
- recent successful request are significant.
- For more information, see 'UniFi Firmware API Specification'.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- filterLength - Length of the filter in bytes.
- filterLength=0 disables the filter previously set
- filter - Points to the first byte of the filter provided, if any.
- This shall include zero or more instance of the
- information elements of one of these types
- * Traffic Classification (TCLAS) elements
- * WMM-SA TCLAS elements
- mode - Specifies whether the filter selects or excludes packets
- matching the filter
- arpFilterAddress - IPv4 address to be used for filtering the ARP packets.
- * If the specified address is the IPv4 broadcast address
- (255.255.255.255), all ARP packets are reported to the
- host,
- * If the specified address is NOT the IPv4 broadcast
- address, only ARP packets with the specified address in
- the Source or Target Protocol Address fields are reported
- to the host
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u16 filterLength;
- u8 *filter;
- CsrWifiSmePacketFilterMode mode;
- CsrWifiIp4Address arpFilterAddress;
-} CsrWifiSmePacketFilterSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePermanentMacAddressGetReq
-
- DESCRIPTION
- This primitive retrieves the MAC address stored in EEPROM
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmePermanentMacAddressGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePmkidReq
-
- DESCRIPTION
- The wireless manager application calls this primitive to request an
- operation on the SME PMKID list.
- The action argument specifies the operation to perform.
- When the connection is complete, the wireless manager application may
- then send and receive EAPOL packets to complete WPA or WPA2
- authentication if appropriate.
- The wireless manager application can then pass the resulting encryption
- keys using this primitive.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - The value of the CsrWifiSmeListAction parameter instructs
- the driver to modify or provide the list of PMKIDs.
- setPmkidsCount - Number of PMKIDs sent with the primitive
- setPmkids - Pointer to the list of PMKIDs sent with the primitive, set
- to NULL if none is sent.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeListAction action;
- u8 setPmkidsCount;
- CsrWifiSmePmkid *setPmkids;
-} CsrWifiSmePmkidReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the PowerConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmePowerConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the PowerConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- powerConfig - Power saving configuration
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmePowerConfig powerConfig;
-} CsrWifiSmePowerConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRegulatoryDomainInfoGetReq
-
- DESCRIPTION
- This primitive gets the value of the RegulatoryDomainInfo parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeRegulatoryDomainInfoGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the RoamingConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeRoamingConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the RoamingConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- roamingConfig - Desired roaming behaviour values
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeRoamingConfig roamingConfig;
-} CsrWifiSmeRoamingConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the ScanConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeScanConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the ScanConfig parameter.
- The SME normally configures the firmware to perform autonomous scanning
- without involving the host.
- The firmware passes beacon / probe response or indicates loss of beacon
- on certain changes of state, for example:
- * A new AP is seen for the first time
- * An AP is no longer visible
- * The signal strength of an AP changes by more than a certain amount, as
- configured by the thresholds in the scanConfig parameter
- In addition to the autonomous scan, the wireless manager application may
- request a scan at any time using CSR_WIFI_SME_SCAN_FULL_REQ.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- scanConfig - Reports the configuration for the autonomous scanning behaviour
- of the firmware
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeScanConfig scanConfig;
-} CsrWifiSmeScanConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanFullReq
-
- DESCRIPTION
- The wireless manager application should call this primitive to request a
- full scan.
- Channels are scanned actively or passively according to the requirement
- set by regulatory domain.
- If the SME receives this primitive while a full scan is going on, the new
- request is buffered and it will be served after the current full scan is
- completed.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- ssidCount - Number of SSIDs provided.
- If it is 0, the SME will attempt to detect any network
- ssid - Points to the first SSID provided, if any.
- bssid - BSS identifier.
- If it is equal to FF-FF-FF-FF-FF, the SME will listen for
- messages from any BSS.
- If it is different from FF-FF-FF-FF-FF and any SSID is
- provided, one SSID must match the network of the BSS.
- forceScan - Forces the scan even if the SME is in a state which would
- normally prevent it (e.g. autonomous scan is running).
- bssType - Type of BSS to scan for
- scanType - Type of scan to perform
- channelListCount - Number of channels provided.
- If it is 0, the SME will initiate a scan of all the
- supported channels that are permitted by the current
- regulatory domain.
- channelList - Points to the first channel , or NULL if channelListCount
- is zero.
- probeIeLength - Length of the information element in bytes to be sent
- with the probe message.
- probeIe - Points to the first byte of the information element to be
- sent with the probe message.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u8 ssidCount;
- CsrWifiSsid *ssid;
- CsrWifiMacAddress bssid;
- u8 forceScan;
- CsrWifiSmeBssType bssType;
- CsrWifiSmeScanType scanType;
- u16 channelListCount;
- u8 *channelList;
- u16 probeIeLength;
- u8 *probeIe;
-} CsrWifiSmeScanFullReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultsFlushReq
-
- DESCRIPTION
- The Wireless Manager calls this primitive to ask the SME to delete all
- scan results from its cache, except for the scan result of any currently
- connected network.
- As scan results are received by the SME from the firmware, they are
- cached in the SME memory.
- Any time the Wireless Manager requests scan results, they are returned
- from the SME internal cache.
- For some applications it may be desirable to clear this cache prior to
- requesting that a scan be performed; this will ensure that the cache then
- only contains the networks detected in the most recent scan.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeScanResultsFlushReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultsGetReq
-
- DESCRIPTION
- The wireless manager application calls this primitive to retrieve the
- current set of scan results, either after receiving a successful
- CSR_WIFI_SME_SCAN_FULL_CFM, or to get autonomous scan results.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeScanResultsGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeStaConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the SmeStaConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
-} CsrWifiSmeSmeStaConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeStaConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the SmeConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- smeConfig - SME Station Parameters to be set
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeStaConfig smeConfig;
-} CsrWifiSmeSmeStaConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeStationMacAddressGetReq
-
- DESCRIPTION
- This primitives is used to retrieve the current MAC address used by the
- station.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeStationMacAddressGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTspecReq
-
- DESCRIPTION
- The wireless manager application should call this primitive to use the
- TSPEC feature.
- The chip supports the use of TSPECs and TCLAS for the use of IEEE
- 802.11/WMM Quality of Service features.
- The API allows the wireless manager application to supply a correctly
- formatted TSPEC and TCLAS pair to the driver.
- After performing basic validation, the driver negotiates the installation
- of the TSPEC with the AP as defined by the 802.11 specification.
- The driver retains all TSPEC and TCLAS pairs until they are specifically
- removed.
- It is not compulsory for a TSPEC to have a TCLAS (NULL is used to
- indicate that no TCLAS is supplied), while a TCLASS always require a
- TSPEC.
- The format of the TSPEC element is specified in 'WMM (including WMM Power
- Save) Specification - Version 1.1' and 'ANSI/IEEE Std 802.11-REVmb/D3.0'.
- For more information, see 'UniFi Configuring WMM and WMM-PS'.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- action - Specifies the action to be carried out on the list of TSPECs.
- CSR_WIFI_SME_LIST_ACTION_FLUSH is not applicable here.
- transactionId - Unique Transaction ID for the TSPEC, as assigned by the
- driver
- strict - If it set to false, allows the SME to perform automatic
- TSPEC negotiation
- ctrlMask - Additional TSPEC configuration for CCX.
- Set mask with values from CsrWifiSmeTspecCtrl.
- CURRENTLY NOT SUPPORTED
- tspecLength - Length of the TSPEC.
- tspec - Points to the first byte of the TSPEC
- tclasLength - Length of the TCLAS.
- If it is equal to 0, no TCLASS is provided for the TSPEC
- tclas - Points to the first byte of the TCLAS, if any.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeListAction action;
- u32 transactionId;
- u8 strict;
- CsrWifiSmeTspecCtrlMask ctrlMask;
- u16 tspecLength;
- u8 *tspec;
- u16 tclasLength;
- u8 *tclas;
-} CsrWifiSmeTspecReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeVersionsGetReq
-
- DESCRIPTION
- This primitive gets the value of the Versions parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeVersionsGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiFlightmodeReq
-
- DESCRIPTION
- The wireless manager application may call this primitive on boot-up of
- the platform to ensure that the chip is placed in a mode that prevents
- any emission of RF energy.
- This primitive is an alternative to CSR_WIFI_SME_WIFI_ON_REQ.
- As in CSR_WIFI_SME_WIFI_ON_REQ, it causes the download of the patch file
- (if any) and the programming of the initial MIB settings (if supplied by
- the WMA), but it also ensures that the chip is left in its lowest
- possible power-mode with the radio subsystems disabled.
- This feature is useful on platforms where power cannot be removed from
- the chip (leaving the chip not initialised will cause it to consume more
- power so calling this function ensures that the chip is initialised into
- a low power mode but without entering a state where it could emit any RF
- energy).
- NOTE: this primitive does not cause the Wi-Fi to change state: Wi-Fi
- stays conceptually off. Configuration primitives can be sent after
- CSR_WIFI_SME_WIFI_FLIGHTMODE_REQ and the configuration will be maintained.
- Requests that require the state of the Wi-Fi to be ON will return
- CSR_WIFI_SME_STATUS_WIFI_OFF in their confirms.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- address - Optionally specifies a station MAC address.
- In normal use, the manager should set the address to 0xFF
- 0xFF 0xFF 0xFF 0xFF 0xFF, which will cause the chip to use
- the MAC address in the MIB.
- mibFilesCount - Number of provided data blocks with initial MIB values
- mibFiles - Points to the first data block with initial MIB values.
- These data blocks are typically the contents of the provided
- files ufmib.dat and localmib.dat, available from the host
- file system, if they exist.
- These files typically contain radio tuning and calibration
- values.
- More values can be created using the Host Tools.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiMacAddress address;
- u16 mibFilesCount;
- CsrWifiSmeDataBlock *mibFiles;
-} CsrWifiSmeWifiFlightmodeReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOffReq
-
- DESCRIPTION
- The wireless manager application calls this primitive to turn off the
- chip, thus saving power when Wi-Fi is not in use.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeWifiOffReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOnReq
-
- DESCRIPTION
- The wireless manager application calls this primitive to turn on the
- Wi-Fi chip.
- If the Wi-Fi chip is currently off, the SME turns the Wi-Fi chip on,
- downloads the patch file (if any), and programs the initial MIB settings
- (if supplied by the WMA).
- The patch file is not provided with the SME API; its downloading is
- automatic and handled internally by the system.
- The MIB settings, when provided, override the default values that the
- firmware loads from EEPROM.
- If the Wi-Fi chip is already on, the SME takes no action and returns a
- successful status in the confirm.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- address - Optionally specifies a station MAC address.
- In normal use, the manager should set the address to 0xFF
- 0xFF 0xFF 0xFF 0xFF 0xFF, which will cause the chip to use
- the MAC address in the MIB
- mibFilesCount - Number of provided data blocks with initial MIB values
- mibFiles - Points to the first data block with initial MIB values.
- These data blocks are typically the contents of the provided
- files ufmib.dat and localmib.dat, available from the host
- file system, if they exist.
- These files typically contain radio tuning and calibration
- values.
- More values can be created using the Host Tools.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiMacAddress address;
- u16 mibFilesCount;
- CsrWifiSmeDataBlock *mibFiles;
-} CsrWifiSmeWifiOnReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCloakedSsidsSetReq
-
- DESCRIPTION
- This primitive sets the list of cloaked SSIDs for which the WMA possesses
- profiles.
- When the driver detects a cloaked AP, the SME will explicitly scan for it
- using the list of cloaked SSIDs provided it, and, if the scan succeeds,
- it will report the AP to the WMA either via CSR_WIFI_SME_SCAN_RESULT_IND
- (if registered) or via CSR_WIFI_SCAN_RESULT_GET_CFM.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- cloakedSsids - Sets the list of cloaked SSIDs
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeCloakedSsidConfig cloakedSsids;
-} CsrWifiSmeCloakedSsidsSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCloakedSsidsGetReq
-
- DESCRIPTION
- This primitive gets the value of the CloakedSsids parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeCloakedSsidsGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeCommonConfigGetReq
-
- DESCRIPTION
- This primitive gets the value of the Sme common parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeSmeCommonConfigGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeCommonConfigSetReq
-
- DESCRIPTION
- This primitive sets the value of the Sme common.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- deviceConfig - Configuration options in the SME
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeDeviceConfig deviceConfig;
-} CsrWifiSmeSmeCommonConfigSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeInterfaceCapabilityGetReq
-
- DESCRIPTION
- The Wireless Manager calls this primitive to ask the SME for the
- capabilities of the supported interfaces
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
-} CsrWifiSmeInterfaceCapabilityGetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsConfigurationReq
-
- DESCRIPTION
- This primitive passes the WPS information for the device to SME. This may
- be accepted only if no interface is active.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- wpsConfig - WPS config.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeWpsConfig wpsConfig;
-} CsrWifiSmeWpsConfigurationReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSetReq
-
- DESCRIPTION
- Used to pass custom data to the SME. Format is the same as 802.11 Info
- Elements => | Id | Length | Data
- 1) Cmanr Test Mode "Id:0 Length:1 Data:0x00 = OFF 0x01 = ON" "0x00 0x01
- (0x00|0x01)"
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- dataLength - Number of bytes in the buffer pointed to by 'data'
- data - Pointer to the buffer containing 'dataLength' bytes
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u32 dataLength;
- u8 *data;
-} CsrWifiSmeSetReq;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeActivateCfm
-
- DESCRIPTION
- The SME sends this primitive when the activation is complete.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeActivateCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAdhocConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- adHocConfig - Contains the values used when starting an Ad-hoc (IBSS)
- connection.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmeAdHocConfig adHocConfig;
-} CsrWifiSmeAdhocConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAdhocConfigSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeAdhocConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAssociationCompleteInd
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it whenever it completes an attempt to associate with an AP. If
- the association was successful, status will be set to
- CSR_WIFI_SME_STATUS_SUCCESS, otherwise status and deauthReason shall be
- set to appropriate error codes.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the association procedure
- connectionInfo - This parameter is relevant only if result is
- CSR_WIFI_SME_STATUS_SUCCESS:
- it points to the connection information for the new network
- deauthReason - This parameter is relevant only if result is not
- CSR_WIFI_SME_STATUS_SUCCESS:
- if the AP deauthorised the station, it gives the reason of
- the deauthorization
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeConnectionInfo connectionInfo;
- CsrWifiSmeIEEE80211Reason deauthReason;
-} CsrWifiSmeAssociationCompleteInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAssociationStartInd
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it whenever it begins an attempt to associate with an AP.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- address - BSSID of the associating network
- ssid - Service Set identifier of the associating network
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiMacAddress address;
- CsrWifiSsid ssid;
-} CsrWifiSmeAssociationStartInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeBlacklistCfm
-
- DESCRIPTION
- The SME will call this primitive when the action on the blacklist has
- completed. For a GET action, this primitive also reports the list of
- BBSIDs in the blacklist.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- action - Action in the request
- getAddressCount - This parameter is only relevant if action is
- CSR_WIFI_SME_LIST_ACTION_GET:
- number of BSSIDs sent with this primitive
- getAddresses - Pointer to the list of BBSIDs sent with the primitive, set
- to NULL if none is sent.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeListAction action;
- u8 getAddressCount;
- CsrWifiMacAddress *getAddresses;
-} CsrWifiSmeBlacklistCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCalibrationDataGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- calibrationDataLength - Number of bytes in the buffer pointed by
- calibrationData
- calibrationData - Pointer to a buffer of length calibrationDataLength
- containing the calibration data
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- u16 calibrationDataLength;
- u8 *calibrationData;
-} CsrWifiSmeCalibrationDataGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCalibrationDataSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeCalibrationDataSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCcxConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- ccxConfig - Currently not supported
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeCcxConfig ccxConfig;
-} CsrWifiSmeCcxConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCcxConfigSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeCcxConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- coexConfig - Reports the parameters used to configure the coexistence
- behaviour
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmeCoexConfig coexConfig;
-} CsrWifiSmeCoexConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexConfigSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeCoexConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoexInfoGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- coexInfo - Reports information and state related to coexistence.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmeCoexInfo coexInfo;
-} CsrWifiSmeCoexInfoGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectCfm
-
- DESCRIPTION
- The SME calls this primitive when the connection exchange is complete or
- all connection attempts fail.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request.
- CSR_WIFI_SME_STATUS_NOT_FOUND: all attempts by the SME to
- locate the requested AP failed
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeConnectCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- connectionConfig - Parameters used by the SME for selecting a network
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeConnectionConfig connectionConfig;
-} CsrWifiSmeConnectionConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionInfoGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- connectionInfo - Information about the current connection
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeConnectionInfo connectionInfo;
-} CsrWifiSmeConnectionInfoGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionQualityInd
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it whenever the value of the current connection quality
- parameters change by more than a certain configurable amount.
- The wireless manager application may configure the trigger thresholds for
- this indication using the field in smeConfig parameter of
- CSR_WIFI_SME_SME_CONFIG_SET_REQ.
- Connection quality messages can be suppressed by setting both thresholds
- to zero.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- linkQuality - Indicates the quality of the link
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeLinkQuality linkQuality;
-} CsrWifiSmeConnectionQualityInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeConnectionStatsGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- connectionStats - Statistics for current connection.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeConnectionStats connectionStats;
-} CsrWifiSmeConnectionStatsGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDeactivateCfm
-
- DESCRIPTION
- The SME sends this primitive when the deactivation is complete.
- The WMA cannot send any more primitives until it actives the SME again
- sending another CSR_WIFI_SME_ACTIVATE_REQ.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeDeactivateCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeDisconnectCfm
-
- DESCRIPTION
- On reception of CSR_WIFI_SME_DISCONNECT_REQ the SME will perform a
- disconnect operation, sending a CsrWifiSmeMediaStatusInd with
- CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED and then call this primitive when
- disconnection is complete.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeDisconnectCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeEventMaskSetCfm
-
- DESCRIPTION
- The SME calls the primitive to report the result of the request
- primitive.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeEventMaskSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- hostConfig - Current host power state.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeHostConfig hostConfig;
-} CsrWifiSmeHostConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeHostConfigSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeHostConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeIbssStationInd
-
- DESCRIPTION
- The SME will send this primitive to indicate that a station has joined or
- left the ad-hoc network.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- address - MAC address of the station that has joined or left
- isconnected - TRUE if the station joined, FALSE if the station left
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiMacAddress address;
- u8 isconnected;
-} CsrWifiSmeIbssStationInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeKeyCfm
-
- DESCRIPTION
- The SME calls the primitive to report the result of the request
- primitive.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- action - Action in the request
- keyType - Type of the key added/deleted
- peerMacAddress - Peer MAC Address of the key added/deleted
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeListAction action;
- CsrWifiSmeKeyType keyType;
- CsrWifiMacAddress peerMacAddress;
-} CsrWifiSmeKeyCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeLinkQualityGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- linkQuality - Indicates the quality of the link
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeLinkQuality linkQuality;
-} CsrWifiSmeLinkQualityGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMediaStatusInd
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it when a network connection is established, lost or has moved to
- another AP.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- mediaStatus - Indicates the media status
- connectionInfo - This parameter is relevant only if the mediaStatus is
- CSR_WIFI_SME_MEDIA_STATUS_CONNECTED:
- it points to the connection information for the new network
- disassocReason - This parameter is relevant only if the mediaStatus is
- CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED:
- if a disassociation has occurred it gives the reason of the
- disassociation
- deauthReason - This parameter is relevant only if the mediaStatus is
- CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED:
- if a deauthentication has occurred it gives the reason of
- the deauthentication
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeMediaStatus mediaStatus;
- CsrWifiSmeConnectionInfo connectionInfo;
- CsrWifiSmeIEEE80211Reason disassocReason;
- CsrWifiSmeIEEE80211Reason deauthReason;
-} CsrWifiSmeMediaStatusInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- mibConfig - Reports various IEEE 802.11 attributes as currently configured
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmeMibConfig mibConfig;
-} CsrWifiSmeMibConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibConfigSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeMibConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibGetCfm
-
- DESCRIPTION
- The SME calls this primitive to return the requested MIB variable values.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to the VarBind or VarBindList containing the
- names and values of the MIB variables requested
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- u16 mibAttributeLength;
- u8 *mibAttribute;
-} CsrWifiSmeMibGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibGetNextCfm
-
- DESCRIPTION
- The SME calls this primitive to return the requested MIB name(s).
- The wireless manager application can then read the value of the MIB
- variable using CSR_WIFI_SME_MIB_GET_REQ, using the names provided.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- mibAttributeLength - Length of mibAttribute
- mibAttribute - Points to a VarBind or VarBindList containing the
- name(s) of the MIB variable(s) lexicographically
- following the name(s) given in the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- u16 mibAttributeLength;
- u8 *mibAttribute;
-} CsrWifiSmeMibGetNextCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMibSetCfm
-
- DESCRIPTION
- The SME calls the primitive to report the result of the set primitive.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeMibSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMicFailureInd
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it whenever the chip firmware reports a MIC failure.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- secondFailure - TRUE if this indication is for a second failure in 60
- seconds
- count - The number of MIC failure events since the connection was
- established
- address - MAC address of the transmitter that caused the MIC failure
- keyType - Type of key for which the failure occurred
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 secondFailure;
- u16 count;
- CsrWifiMacAddress address;
- CsrWifiSmeKeyType keyType;
-} CsrWifiSmeMicFailureInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeMulticastAddressCfm
-
- DESCRIPTION
- The SME will call this primitive when the operation is complete. For a
- GET action, this primitive reports the current list of MAC addresses.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- action - Action in the request
- getAddressesCount - This parameter is only relevant if action is
- CSR_WIFI_SME_LIST_ACTION_GET:
- number of MAC addresses sent with the primitive
- getAddresses - Pointer to the list of MAC Addresses sent with the
- primitive, set to NULL if none is sent.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeListAction action;
- u8 getAddressesCount;
- CsrWifiMacAddress *getAddresses;
-} CsrWifiSmeMulticastAddressCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePacketFilterSetCfm
-
- DESCRIPTION
- The SME calls the primitive to report the result of the set primitive.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmePacketFilterSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePermanentMacAddressGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- permanentMacAddress - MAC address stored in the EEPROM
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiMacAddress permanentMacAddress;
-} CsrWifiSmePermanentMacAddressGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePmkidCandidateListInd
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it when a new network supporting preauthentication and/or PMK
- caching is seen.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an
- interface
- pmkidCandidatesCount - Number of PMKID candidates provided
- pmkidCandidates - Points to the first PMKID candidate
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u8 pmkidCandidatesCount;
- CsrWifiSmePmkidCandidate *pmkidCandidates;
-} CsrWifiSmePmkidCandidateListInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePmkidCfm
-
- DESCRIPTION
- The SME will call this primitive when the operation is complete. For a
- GET action, this primitive reports the current list of PMKIDs
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- action - Action in the request
- getPmkidsCount - This parameter is only relevant if action is
- CSR_WIFI_SME_LIST_ACTION_GET:
- number of PMKIDs sent with the primitive
- getPmkids - Pointer to the list of PMKIDs sent with the primitive, set
- to NULL if none is sent.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeListAction action;
- u8 getPmkidsCount;
- CsrWifiSmePmkid *getPmkids;
-} CsrWifiSmePmkidCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- powerConfig - Returns the current parameters for the power configuration of
- the firmware
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmePowerConfig powerConfig;
-} CsrWifiSmePowerConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmePowerConfigSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmePowerConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRegulatoryDomainInfoGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- regDomInfo - Reports information and state related to regulatory domain
- operation.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmeRegulatoryDomainInfo regDomInfo;
-} CsrWifiSmeRegulatoryDomainInfoGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamCompleteInd
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it whenever it completes an attempt to roam to an AP. If the roam
- attempt was successful, status will be set to CSR_WIFI_SME_SUCCESS,
- otherwise it shall be set to the appropriate error code.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the roaming procedure
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeRoamCompleteInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamStartInd
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive it whenever it begins an attempt to roam to an AP.
- If the wireless manager application connect request specified the SSID
- and the BSSID was set to the broadcast address (0xFF 0xFF 0xFF 0xFF 0xFF
- 0xFF), the SME monitors the signal quality and maintains a list of
- candidates to roam to. When the signal quality of the current connection
- falls below a threshold, and there is a candidate with better quality,
- the SME will attempt to the candidate AP.
- If the roaming procedure succeeds, the SME will also issue a Media
- Connect indication to inform the wireless manager application of the
- change.
- NOTE: to prevent the SME from initiating roaming the WMA must specify the
- BSSID in the connection request; this forces the SME to connect only to
- that AP.
- The wireless manager application can obtain statistics for roaming
- purposes using CSR_WIFI_SME_CONNECTION_QUALITY_IND and
- CSR_WIFI_SME_CONNECTION_STATS_GET_REQ.
- When the wireless manager application wishes to roam to another AP, it
- must issue a connection request specifying the BSSID of the desired AP.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- roamReason - Indicates the reason for starting the roaming procedure
- reason80211 - Indicates the reason for deauthentication or disassociation
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeRoamReason roamReason;
- CsrWifiSmeIEEE80211Reason reason80211;
-} CsrWifiSmeRoamStartInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- roamingConfig - Reports the roaming behaviour of the driver and firmware
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeRoamingConfig roamingConfig;
-} CsrWifiSmeRoamingConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeRoamingConfigSetCfm
-
- DESCRIPTION
- This primitive sets the value of the RoamingConfig parameter.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeRoamingConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- scanConfig - Returns the current parameters for the autonomous scanning
- behaviour of the firmware
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmeScanConfig scanConfig;
-} CsrWifiSmeScanConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanConfigSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeScanConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanFullCfm
-
- DESCRIPTION
- The SME calls this primitive when the results from the scan are
- available.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeScanFullCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultInd
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it whenever a scan indication is received from the firmware.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- result - Points to a buffer containing a scan result.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeScanResult result;
-} CsrWifiSmeScanResultInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultsFlushCfm
-
- DESCRIPTION
- The SME will call this primitive when the cache has been cleared.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeScanResultsFlushCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeScanResultsGetCfm
-
- DESCRIPTION
- The SME sends this primitive to provide the current set of scan results.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- scanResultsCount - Number of scan results
- scanResults - Points to a buffer containing an array of
- CsrWifiSmeScanResult structures.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- u16 scanResultsCount;
- CsrWifiSmeScanResult *scanResults;
-} CsrWifiSmeScanResultsGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeStaConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- smeConfig - Current SME Station Parameters
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- CsrWifiSmeStaConfig smeConfig;
-} CsrWifiSmeSmeStaConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeStaConfigSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
-} CsrWifiSmeSmeStaConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeStationMacAddressGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- stationMacAddress - Current MAC address of the station.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiMacAddress stationMacAddress[2];
-} CsrWifiSmeStationMacAddressGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTspecInd
-
- DESCRIPTION
- The SME will send this primitive to all the task that have registered to
- receive it when a status change in the TSPEC occurs.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- transactionId - Unique Transaction ID for the TSPEC, as assigned by the
- driver
- tspecResultCode - Specifies the TSPEC operation requested by the peer
- station
- tspecLength - Length of the TSPEC.
- tspec - Points to the first byte of the TSPEC
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- u32 transactionId;
- CsrWifiSmeTspecResultCode tspecResultCode;
- u16 tspecLength;
- u8 *tspec;
-} CsrWifiSmeTspecInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeTspecCfm
-
- DESCRIPTION
- The SME calls the primitive to report the result of the TSpec primitive
- request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface Identifier; unique identifier of an interface
- status - Reports the result of the request
- transactionId - Unique Transaction ID for the TSPEC, as assigned by the
- driver
- tspecResultCode - Specifies the result of the negotiated TSPEC operation
- tspecLength - Length of the TSPEC.
- tspec - Points to the first byte of the TSPEC
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrResult status;
- u32 transactionId;
- CsrWifiSmeTspecResultCode tspecResultCode;
- u16 tspecLength;
- u8 *tspec;
-} CsrWifiSmeTspecCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeVersionsGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- versions - Version IDs of the product
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmeVersions versions;
-} CsrWifiSmeVersionsGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiFlightmodeCfm
-
- DESCRIPTION
- The SME calls this primitive when the chip is initialised for low power
- mode and with the radio subsystem disabled. To leave flight mode, and
- enable Wi-Fi, the wireless manager application should call
- CSR_WIFI_SME_WIFI_ON_REQ.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeWifiFlightmodeCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOffInd
-
- DESCRIPTION
- The SME sends this primitive to all the tasks that have registered to
- receive it to report that the chip has been turned off.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- reason - Indicates the reason why the Wi-Fi has been switched off.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiSmeControlIndication reason;
-} CsrWifiSmeWifiOffInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOffCfm
-
- DESCRIPTION
- After receiving CSR_WIFI_SME_WIFI_OFF_REQ, if the chip is connected to a
- network, the SME will perform a disconnect operation, will send a
- CSR_WIFI_SME_MEDIA_STATUS_IND with
- CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED, and then will call
- CSR_WIFI_SME_WIFI_OFF_CFM when the chip is off.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeWifiOffCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOnCfm
-
- DESCRIPTION
- The SME sends this primitive to the task that has sent the request once
- the chip has been initialised and is available for use.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeWifiOnCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCloakedSsidsSetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeCloakedSsidsSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCloakedSsidsGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- cloakedSsids - Reports list of cloaked SSIDs that are explicitly scanned for
- by the driver
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmeCloakedSsidConfig cloakedSsids;
-} CsrWifiSmeCloakedSsidsGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWifiOnInd
-
- DESCRIPTION
- The SME sends this primitive to all tasks that have registered to receive
- it once the chip becomes available and ready to use.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- address - Current MAC address
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrWifiMacAddress address;
-} CsrWifiSmeWifiOnInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeCommonConfigGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
- deviceConfig - Configuration options in the SME
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- CsrWifiSmeDeviceConfig deviceConfig;
-} CsrWifiSmeSmeCommonConfigGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeSmeCommonConfigSetCfm
-
- DESCRIPTION
- Reports the result of the request
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Reports the result of the request
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeSmeCommonConfigSetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeInterfaceCapabilityGetCfm
-
- DESCRIPTION
- This primitive reports the result of the request.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Result of the request
- numInterfaces - Number of the interfaces supported
- capBitmap - Points to the list of capabilities bitmaps provided for each
- interface.
- The bits represent the following capabilities:
- -bits 7 to 4-Reserved
- -bit 3-AMP
- -bit 2-P2P
- -bit 1-AP
- -bit 0-STA
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
- u16 numInterfaces;
- u8 capBitmap[2];
-} CsrWifiSmeInterfaceCapabilityGetCfm;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeErrorInd
-
- DESCRIPTION
- Important error message indicating a error of some importance
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- errorMessage - Contains the error message.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- char *errorMessage;
-} CsrWifiSmeErrorInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeInfoInd
-
- DESCRIPTION
- Message indicating a some info about current activity. Mostly of interest
- in testing but may be useful in the field.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- infoMessage - Contains the message.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- char *infoMessage;
-} CsrWifiSmeInfoInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeCoreDumpInd
-
- DESCRIPTION
- The SME will send this primitive to all the tasks that have registered to
- receive Wi-Fi Chip core dump data.
- The core dump data may be fragmented and sent using more than one
- indication.
- To indicate that all the data has been sent, the last indication contains
- a 'length' of 0 and 'data' of NULL.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- dataLength - Number of bytes in the buffer pointed to by 'data'
- data - Pointer to the buffer containing 'dataLength' bytes of core
- dump data
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u32 dataLength;
- u8 *data;
-} CsrWifiSmeCoreDumpInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeAmpStatusChangeInd
-
- DESCRIPTION
- Indication of change to AMP activity.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- interfaceTag - Interface on which the AMP activity changed.
- ampStatus - The new status of AMP activity.Range: {AMP_ACTIVE,
- AMP_INACTIVE}.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- u16 interfaceTag;
- CsrWifiSmeAmpStatus ampStatus;
-} CsrWifiSmeAmpStatusChangeInd;
-
-/*******************************************************************************
-
- NAME
- CsrWifiSmeWpsConfigurationCfm
-
- DESCRIPTION
- Confirm.
-
- MEMBERS
- common - Common header for use with the CsrWifiFsm Module
- status - Status of the request.
-
-*******************************************************************************/
-typedef struct
-{
- CsrWifiFsmEvent common;
- CsrResult status;
-} CsrWifiSmeWpsConfigurationCfm;
-
-#endif /* CSR_WIFI_SME_PRIM_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_sme_sef.c b/drivers/staging/csr/csr_wifi_sme_sef.c
deleted file mode 100644
index cf32254..0000000
--- a/drivers/staging/csr/csr_wifi_sme_sef.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- Confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
- *****************************************************************************/
-#include "csr_wifi_sme_sef.h"
-
-const CsrWifiSmeStateHandlerType CsrWifiSmeUpstreamStateHandlers[CSR_WIFI_SME_PRIM_UPSTREAM_COUNT] =
-{
- /* 0x8000 */ CsrWifiSmeActivateCfmHandler,
- /* 0x8001 */ CsrWifiSmeAdhocConfigGetCfmHandler,
- /* 0x8002 */ CsrWifiSmeAdhocConfigSetCfmHandler,
- /* 0x8003 */ CsrWifiSmeAssociationCompleteIndHandler,
- /* 0x8004 */ CsrWifiSmeAssociationStartIndHandler,
- /* 0x8005 */ CsrWifiSmeBlacklistCfmHandler,
- /* 0x8006 */ CsrWifiSmeCalibrationDataGetCfmHandler,
- /* 0x8007 */ CsrWifiSmeCalibrationDataSetCfmHandler,
- /* 0x8008 */ CsrWifiSmeCcxConfigGetCfmHandler,
- /* 0x8009 */ CsrWifiSmeCcxConfigSetCfmHandler,
- /* 0x800A */ CsrWifiSmeCoexConfigGetCfmHandler,
- /* 0x800B */ CsrWifiSmeCoexConfigSetCfmHandler,
- /* 0x800C */ CsrWifiSmeCoexInfoGetCfmHandler,
- /* 0x800D */ CsrWifiSmeConnectCfmHandler,
- /* 0x800E */ CsrWifiSmeConnectionConfigGetCfmHandler,
- /* 0x800F */ CsrWifiSmeConnectionInfoGetCfmHandler,
- /* 0x8010 */ CsrWifiSmeConnectionQualityIndHandler,
- /* 0x8011 */ CsrWifiSmeConnectionStatsGetCfmHandler,
- /* 0x8012 */ CsrWifiSmeDeactivateCfmHandler,
- /* 0x8013 */ CsrWifiSmeDisconnectCfmHandler,
- /* 0x8014 */ CsrWifiSmeEventMaskSetCfmHandler,
- /* 0x8015 */ CsrWifiSmeHostConfigGetCfmHandler,
- /* 0x8016 */ CsrWifiSmeHostConfigSetCfmHandler,
- /* 0x8017 */ CsrWifiSmeIbssStationIndHandler,
- /* 0x8018 */ CsrWifiSmeKeyCfmHandler,
- /* 0x8019 */ CsrWifiSmeLinkQualityGetCfmHandler,
- /* 0x801A */ CsrWifiSmeMediaStatusIndHandler,
- /* 0x801B */ CsrWifiSmeMibConfigGetCfmHandler,
- /* 0x801C */ CsrWifiSmeMibConfigSetCfmHandler,
- /* 0x801D */ CsrWifiSmeMibGetCfmHandler,
- /* 0x801E */ CsrWifiSmeMibGetNextCfmHandler,
- /* 0x801F */ CsrWifiSmeMibSetCfmHandler,
- /* 0x8020 */ CsrWifiSmeMicFailureIndHandler,
- /* 0x8021 */ CsrWifiSmeMulticastAddressCfmHandler,
- /* 0x8022 */ CsrWifiSmePacketFilterSetCfmHandler,
- /* 0x8023 */ CsrWifiSmePermanentMacAddressGetCfmHandler,
- /* 0x8024 */ CsrWifiSmePmkidCandidateListIndHandler,
- /* 0x8025 */ CsrWifiSmePmkidCfmHandler,
- /* 0x8026 */ CsrWifiSmePowerConfigGetCfmHandler,
- /* 0x8027 */ CsrWifiSmePowerConfigSetCfmHandler,
- /* 0x8028 */ CsrWifiSmeRegulatoryDomainInfoGetCfmHandler,
- /* 0x8029 */ CsrWifiSmeRoamCompleteIndHandler,
- /* 0x802A */ CsrWifiSmeRoamStartIndHandler,
- /* 0x802B */ CsrWifiSmeRoamingConfigGetCfmHandler,
- /* 0x802C */ CsrWifiSmeRoamingConfigSetCfmHandler,
- /* 0x802D */ CsrWifiSmeScanConfigGetCfmHandler,
- /* 0x802E */ CsrWifiSmeScanConfigSetCfmHandler,
- /* 0x802F */ CsrWifiSmeScanFullCfmHandler,
- /* 0x8030 */ CsrWifiSmeScanResultIndHandler,
- /* 0x8031 */ CsrWifiSmeScanResultsFlushCfmHandler,
- /* 0x8032 */ CsrWifiSmeScanResultsGetCfmHandler,
- /* 0x8033 */ CsrWifiSmeSmeStaConfigGetCfmHandler,
- /* 0x8034 */ CsrWifiSmeSmeStaConfigSetCfmHandler,
- /* 0x8035 */ CsrWifiSmeStationMacAddressGetCfmHandler,
- /* 0x8036 */ CsrWifiSmeTspecIndHandler,
- /* 0x8037 */ CsrWifiSmeTspecCfmHandler,
- /* 0x8038 */ CsrWifiSmeVersionsGetCfmHandler,
- /* 0x8039 */ CsrWifiSmeWifiFlightmodeCfmHandler,
- /* 0x803A */ CsrWifiSmeWifiOffIndHandler,
- /* 0x803B */ CsrWifiSmeWifiOffCfmHandler,
- /* 0x803C */ CsrWifiSmeWifiOnCfmHandler,
- /* 0x803D */ CsrWifiSmeCloakedSsidsSetCfmHandler,
- /* 0x803E */ CsrWifiSmeCloakedSsidsGetCfmHandler,
- /* 0x803F */ CsrWifiSmeWifiOnIndHandler,
- /* 0x8040 */ CsrWifiSmeSmeCommonConfigGetCfmHandler,
- /* 0x8041 */ CsrWifiSmeSmeCommonConfigSetCfmHandler,
- /* 0x8042 */ CsrWifiSmeGetInterfaceCapabilityCfmHandler,
- /* 0x8043 */ CsrWifiSmeErrorIndHandler,
- /* 0x8044 */ CsrWifiSmeInfoIndHandler,
- /* 0x8045 */ CsrWifiSmeCoreDumpIndHandler,
- /* 0x8046 */ CsrWifiSmeAmpStatusChangeIndHandler,
-};
diff --git a/drivers/staging/csr/csr_wifi_sme_sef.h b/drivers/staging/csr/csr_wifi_sme_sef.h
deleted file mode 100644
index 78b88c0..0000000
--- a/drivers/staging/csr/csr_wifi_sme_sef.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2010
- Confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-#ifndef CSR_WIFI_ROUTER_SEF_CSR_WIFI_SME_H__
-#define CSR_WIFI_ROUTER_SEF_CSR_WIFI_SME_H__
-
-#include "csr_wifi_sme_prim.h"
-
-typedef void (*CsrWifiSmeStateHandlerType)(void *drvpriv, CsrWifiFsmEvent *msg);
-
-extern const CsrWifiSmeStateHandlerType
- CsrWifiSmeUpstreamStateHandlers[CSR_WIFI_SME_PRIM_UPSTREAM_COUNT];
-
-
-extern void CsrWifiSmeActivateCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeAdhocConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeAdhocConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeAssociationCompleteIndHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeAssociationStartIndHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeBlacklistCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCalibrationDataGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCalibrationDataSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCcxConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCcxConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCoexConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCoexConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCoexInfoGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeConnectCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeConnectionConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeConnectionInfoGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeConnectionQualityIndHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeConnectionStatsGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeDeactivateCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeDisconnectCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeEventMaskSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeHostConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeHostConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeIbssStationIndHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeKeyCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeLinkQualityGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeMediaStatusIndHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeMibConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeMibConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeMibGetCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeMibGetNextCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeMibSetCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeMicFailureIndHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeMulticastAddressCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmePacketFilterSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmePermanentMacAddressGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmePmkidCandidateListIndHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmePmkidCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmePowerConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmePowerConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeRegulatoryDomainInfoGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeRoamCompleteIndHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeRoamStartIndHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeRoamingConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeRoamingConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeScanConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeScanConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeScanFullCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeScanResultIndHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeScanResultsFlushCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeScanResultsGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeSmeStaConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeSmeStaConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeStationMacAddressGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeTspecIndHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeTspecCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeVersionsGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeWifiFlightmodeCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeWifiOffIndHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeWifiOffCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeWifiOnCfmHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCloakedSsidsSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCloakedSsidsGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeWifiOnIndHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeSmeCommonConfigGetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeSmeCommonConfigSetCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeGetInterfaceCapabilityCfmHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeErrorIndHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeInfoIndHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeCoreDumpIndHandler(void *drvpriv, CsrWifiFsmEvent *msg);
-extern void CsrWifiSmeAmpStatusChangeIndHandler(void *drvpriv,
- CsrWifiFsmEvent *msg);
-
-#endif /* CSR_WIFI_ROUTER_SEF_CSR_WIFI_SME_H__ */
diff --git a/drivers/staging/csr/csr_wifi_sme_serialize.c b/drivers/staging/csr/csr_wifi_sme_serialize.c
deleted file mode 100644
index 7d7e1d8..0000000
--- a/drivers/staging/csr/csr_wifi_sme_serialize.c
+++ /dev/null
@@ -1,5809 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-#include <linux/string.h>
-#include <linux/slab.h>
-#include "csr_msgconv.h"
-#include "csr_wifi_sme_prim.h"
-#include "csr_wifi_sme_serialize.h"
-
-void CsrWifiSmePfree(void *ptr)
-{
- kfree(ptr);
-}
-
-
-size_t CsrWifiSmeAdhocConfigSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */
- bufferSize += 2; /* u16 primitive->adHocConfig.atimWindowTu */
- bufferSize += 2; /* u16 primitive->adHocConfig.beaconPeriodTu */
- bufferSize += 2; /* u16 primitive->adHocConfig.joinOnlyAttempts */
- bufferSize += 2; /* u16 primitive->adHocConfig.joinAttemptIntervalMs */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeAdhocConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeAdhocConfigSetReq *primitive = (CsrWifiSmeAdhocConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.atimWindowTu);
- CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.beaconPeriodTu);
- CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.joinOnlyAttempts);
- CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.joinAttemptIntervalMs);
- return(ptr);
-}
-
-
-void* CsrWifiSmeAdhocConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeAdhocConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeAdhocConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->adHocConfig.atimWindowTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->adHocConfig.beaconPeriodTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->adHocConfig.joinOnlyAttempts, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->adHocConfig.joinAttemptIntervalMs, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeBlacklistReqSizeof(void *msg)
-{
- CsrWifiSmeBlacklistReq *primitive = (CsrWifiSmeBlacklistReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeListAction primitive->action */
- bufferSize += 1; /* u8 primitive->setAddressCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setAddressCount; i1++)
- {
- bufferSize += 6; /* u8 primitive->setAddresses[i1].a[6] */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeBlacklistReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeBlacklistReq *primitive = (CsrWifiSmeBlacklistReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->setAddressCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setAddressCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->setAddresses[i1].a, ((u16) (6)));
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeBlacklistReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeBlacklistReq *primitive = kmalloc(sizeof(CsrWifiSmeBlacklistReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->setAddressCount, buffer, &offset);
- primitive->setAddresses = NULL;
- if (primitive->setAddressCount)
- {
- primitive->setAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->setAddressCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setAddressCount; i1++)
- {
- CsrMemCpyDes(primitive->setAddresses[i1].a, buffer, &offset, ((u16) (6)));
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeBlacklistReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeBlacklistReq *primitive = (CsrWifiSmeBlacklistReq *) voidPrimitivePointer;
- kfree(primitive->setAddresses);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeCalibrationDataSetReqSizeof(void *msg)
-{
- CsrWifiSmeCalibrationDataSetReq *primitive = (CsrWifiSmeCalibrationDataSetReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 6) */
- bufferSize += 2; /* u16 primitive->calibrationDataLength */
- bufferSize += primitive->calibrationDataLength; /* u8 primitive->calibrationData */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCalibrationDataSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCalibrationDataSetReq *primitive = (CsrWifiSmeCalibrationDataSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->calibrationDataLength);
- if (primitive->calibrationDataLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->calibrationData, ((u16) (primitive->calibrationDataLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeCalibrationDataSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCalibrationDataSetReq *primitive = kmalloc(sizeof(CsrWifiSmeCalibrationDataSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->calibrationDataLength, buffer, &offset);
- if (primitive->calibrationDataLength)
- {
- primitive->calibrationData = kmalloc(primitive->calibrationDataLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->calibrationData, buffer, &offset, ((u16) (primitive->calibrationDataLength)));
- }
- else
- {
- primitive->calibrationData = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeCalibrationDataSetReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeCalibrationDataSetReq *primitive = (CsrWifiSmeCalibrationDataSetReq *) voidPrimitivePointer;
- kfree(primitive->calibrationData);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeCcxConfigSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* u8 primitive->ccxConfig.keepAliveTimeMs */
- bufferSize += 1; /* u8 primitive->ccxConfig.apRoamingEnabled */
- bufferSize += 1; /* u8 primitive->ccxConfig.measurementsMask */
- bufferSize += 1; /* u8 primitive->ccxConfig.ccxRadioMgtEnabled */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCcxConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCcxConfigSetReq *primitive = (CsrWifiSmeCcxConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.keepAliveTimeMs);
- CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.apRoamingEnabled);
- CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.measurementsMask);
- CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.ccxRadioMgtEnabled);
- return(ptr);
-}
-
-
-void* CsrWifiSmeCcxConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCcxConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeCcxConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ccxConfig.keepAliveTimeMs, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ccxConfig.apRoamingEnabled, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ccxConfig.measurementsMask, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ccxConfig.ccxRadioMgtEnabled, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeCoexConfigSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 29) */
- bufferSize += 1; /* u8 primitive->coexConfig.coexEnableSchemeManagement */
- bufferSize += 1; /* u8 primitive->coexConfig.coexPeriodicWakeHost */
- bufferSize += 2; /* u16 primitive->coexConfig.coexTrafficBurstyLatencyMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexTrafficContinuousLatencyMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexObexBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexObexBlackoutPeriodMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpBrBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpBrBlackoutPeriodMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpEdrBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexPagingBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexPagingBlackoutPeriodMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexInquiryBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexInquiryBlackoutPeriodMs */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCoexConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCoexConfigSetReq *primitive = (CsrWifiSmeCoexConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint8Ser(ptr, len, (u8) primitive->coexConfig.coexEnableSchemeManagement);
- CsrUint8Ser(ptr, len, (u8) primitive->coexConfig.coexPeriodicWakeHost);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexTrafficBurstyLatencyMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexTrafficContinuousLatencyMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexObexBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexObexBlackoutPeriodMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpBrBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpBrBlackoutPeriodMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpEdrBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexPagingBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexPagingBlackoutPeriodMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexInquiryBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexInquiryBlackoutPeriodMs);
- return(ptr);
-}
-
-
-void* CsrWifiSmeCoexConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCoexConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeCoexConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->coexConfig.coexEnableSchemeManagement, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->coexConfig.coexPeriodicWakeHost, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexTrafficBurstyLatencyMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexTrafficContinuousLatencyMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexObexBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexObexBlackoutPeriodMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpBrBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpBrBlackoutPeriodMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpEdrBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexPagingBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexPagingBlackoutPeriodMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexInquiryBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexInquiryBlackoutPeriodMs, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeConnectReqSizeof(void *msg)
-{
- CsrWifiSmeConnectReq *primitive = (CsrWifiSmeConnectReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 57) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 32; /* u8 primitive->connectionConfig.ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->connectionConfig.ssid.length */
- bufferSize += 6; /* u8 primitive->connectionConfig.bssid.a[6] */
- bufferSize += 1; /* CsrWifiSmeBssType primitive->connectionConfig.bssType */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionConfig.ifIndex */
- bufferSize += 1; /* CsrWifiSme80211PrivacyMode primitive->connectionConfig.privacyMode */
- bufferSize += 2; /* CsrWifiSmeAuthModeMask primitive->connectionConfig.authModeMask */
- bufferSize += 2; /* CsrWifiSmeEncryptionMask primitive->connectionConfig.encryptionModeMask */
- bufferSize += 2; /* u16 primitive->connectionConfig.mlmeAssociateReqInformationElementsLength */
- bufferSize += primitive->connectionConfig.mlmeAssociateReqInformationElementsLength; /* u8 primitive->connectionConfig.mlmeAssociateReqInformationElements */
- bufferSize += 1; /* CsrWifiSmeWmmQosInfoMask primitive->connectionConfig.wmmQosInfo */
- bufferSize += 1; /* u8 primitive->connectionConfig.adhocJoinOnly */
- bufferSize += 1; /* u8 primitive->connectionConfig.adhocChannel */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeConnectReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeConnectReq *primitive = (CsrWifiSmeConnectReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.ssid.length);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.bssid.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.bssType);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.ifIndex);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.privacyMode);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.authModeMask);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.encryptionModeMask);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.mlmeAssociateReqInformationElementsLength);
- if (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.mlmeAssociateReqInformationElements, ((u16) (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength)));
- }
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.wmmQosInfo);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.adhocJoinOnly);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.adhocChannel);
- return(ptr);
-}
-
-
-void* CsrWifiSmeConnectReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeConnectReq *primitive = kmalloc(sizeof(CsrWifiSmeConnectReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->connectionConfig.ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->connectionConfig.ssid.length, buffer, &offset);
- CsrMemCpyDes(primitive->connectionConfig.bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->connectionConfig.bssType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionConfig.ifIndex, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionConfig.privacyMode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionConfig.authModeMask, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionConfig.encryptionModeMask, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionConfig.mlmeAssociateReqInformationElementsLength, buffer, &offset);
- if (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength)
- {
- primitive->connectionConfig.mlmeAssociateReqInformationElements = kmalloc(primitive->connectionConfig.mlmeAssociateReqInformationElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionConfig.mlmeAssociateReqInformationElements, buffer, &offset, ((u16) (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength)));
- }
- else
- {
- primitive->connectionConfig.mlmeAssociateReqInformationElements = NULL;
- }
- CsrUint8Des((u8 *) &primitive->connectionConfig.wmmQosInfo, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionConfig.adhocJoinOnly, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionConfig.adhocChannel, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiSmeConnectReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeConnectReq *primitive = (CsrWifiSmeConnectReq *) voidPrimitivePointer;
- kfree(primitive->connectionConfig.mlmeAssociateReqInformationElements);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeHostConfigSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeHostPowerMode primitive->hostConfig.powerMode */
- bufferSize += 2; /* u16 primitive->hostConfig.applicationDataPeriodMs */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeHostConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeHostConfigSetReq *primitive = (CsrWifiSmeHostConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->hostConfig.powerMode);
- CsrUint16Ser(ptr, len, (u16) primitive->hostConfig.applicationDataPeriodMs);
- return(ptr);
-}
-
-
-void* CsrWifiSmeHostConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeHostConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeHostConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->hostConfig.powerMode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->hostConfig.applicationDataPeriodMs, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeKeyReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 65) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeListAction primitive->action */
- bufferSize += 1; /* CsrWifiSmeKeyType primitive->key.keyType */
- bufferSize += 1; /* u8 primitive->key.keyIndex */
- bufferSize += 1; /* u8 primitive->key.wepTxKey */
- {
- u16 i2;
- for (i2 = 0; i2 < 8; i2++)
- {
- bufferSize += 2; /* u16 primitive->key.keyRsc[8] */
- }
- }
- bufferSize += 1; /* u8 primitive->key.authenticator */
- bufferSize += 6; /* u8 primitive->key.address.a[6] */
- bufferSize += 1; /* u8 primitive->key.keyLength */
- bufferSize += 32; /* u8 primitive->key.key[32] */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeKeyReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeKeyReq *primitive = (CsrWifiSmeKeyReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->key.keyType);
- CsrUint8Ser(ptr, len, (u8) primitive->key.keyIndex);
- CsrUint8Ser(ptr, len, (u8) primitive->key.wepTxKey);
- {
- u16 i2;
- for (i2 = 0; i2 < 8; i2++)
- {
- CsrUint16Ser(ptr, len, (u16) primitive->key.keyRsc[i2]);
- }
- }
- CsrUint8Ser(ptr, len, (u8) primitive->key.authenticator);
- CsrMemCpySer(ptr, len, (const void *) primitive->key.address.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->key.keyLength);
- CsrMemCpySer(ptr, len, (const void *) primitive->key.key, ((u16) (32)));
- return(ptr);
-}
-
-
-void* CsrWifiSmeKeyReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeKeyReq *primitive = kmalloc(sizeof(CsrWifiSmeKeyReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->key.keyType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->key.keyIndex, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->key.wepTxKey, buffer, &offset);
- {
- u16 i2;
- for (i2 = 0; i2 < 8; i2++)
- {
- CsrUint16Des((u16 *) &primitive->key.keyRsc[i2], buffer, &offset);
- }
- }
- CsrUint8Des((u8 *) &primitive->key.authenticator, buffer, &offset);
- CsrMemCpyDes(primitive->key.address.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->key.keyLength, buffer, &offset);
- CsrMemCpyDes(primitive->key.key, buffer, &offset, ((u16) (32)));
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeMibConfigSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */
- bufferSize += 1; /* u8 primitive->mibConfig.unifiFixMaxTxDataRate */
- bufferSize += 1; /* u8 primitive->mibConfig.unifiFixTxDataRate */
- bufferSize += 2; /* u16 primitive->mibConfig.dot11RtsThreshold */
- bufferSize += 2; /* u16 primitive->mibConfig.dot11FragmentationThreshold */
- bufferSize += 2; /* u16 primitive->mibConfig.dot11CurrentTxPowerLevel */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMibConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMibConfigSetReq *primitive = (CsrWifiSmeMibConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint8Ser(ptr, len, (u8) primitive->mibConfig.unifiFixMaxTxDataRate);
- CsrUint8Ser(ptr, len, (u8) primitive->mibConfig.unifiFixTxDataRate);
- CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11RtsThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11FragmentationThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11CurrentTxPowerLevel);
- return(ptr);
-}
-
-
-void* CsrWifiSmeMibConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMibConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeMibConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->mibConfig.unifiFixMaxTxDataRate, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->mibConfig.unifiFixTxDataRate, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibConfig.dot11RtsThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibConfig.dot11FragmentationThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibConfig.dot11CurrentTxPowerLevel, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeMibGetNextReqSizeof(void *msg)
-{
- CsrWifiSmeMibGetNextReq *primitive = (CsrWifiSmeMibGetNextReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 6) */
- bufferSize += 2; /* u16 primitive->mibAttributeLength */
- bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMibGetNextReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMibGetNextReq *primitive = (CsrWifiSmeMibGetNextReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength);
- if (primitive->mibAttributeLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeMibGetNextReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMibGetNextReq *primitive = kmalloc(sizeof(CsrWifiSmeMibGetNextReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset);
- if (primitive->mibAttributeLength)
- {
- primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength)));
- }
- else
- {
- primitive->mibAttribute = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeMibGetNextReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeMibGetNextReq *primitive = (CsrWifiSmeMibGetNextReq *) voidPrimitivePointer;
- kfree(primitive->mibAttribute);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeMibGetReqSizeof(void *msg)
-{
- CsrWifiSmeMibGetReq *primitive = (CsrWifiSmeMibGetReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 6) */
- bufferSize += 2; /* u16 primitive->mibAttributeLength */
- bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMibGetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMibGetReq *primitive = (CsrWifiSmeMibGetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength);
- if (primitive->mibAttributeLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeMibGetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMibGetReq *primitive = kmalloc(sizeof(CsrWifiSmeMibGetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset);
- if (primitive->mibAttributeLength)
- {
- primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength)));
- }
- else
- {
- primitive->mibAttribute = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeMibGetReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeMibGetReq *primitive = (CsrWifiSmeMibGetReq *) voidPrimitivePointer;
- kfree(primitive->mibAttribute);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeMibSetReqSizeof(void *msg)
-{
- CsrWifiSmeMibSetReq *primitive = (CsrWifiSmeMibSetReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 6) */
- bufferSize += 2; /* u16 primitive->mibAttributeLength */
- bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMibSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMibSetReq *primitive = (CsrWifiSmeMibSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength);
- if (primitive->mibAttributeLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeMibSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMibSetReq *primitive = kmalloc(sizeof(CsrWifiSmeMibSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset);
- if (primitive->mibAttributeLength)
- {
- primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength)));
- }
- else
- {
- primitive->mibAttribute = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeMibSetReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeMibSetReq *primitive = (CsrWifiSmeMibSetReq *) voidPrimitivePointer;
- kfree(primitive->mibAttribute);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeMulticastAddressReqSizeof(void *msg)
-{
- CsrWifiSmeMulticastAddressReq *primitive = (CsrWifiSmeMulticastAddressReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeListAction primitive->action */
- bufferSize += 1; /* u8 primitive->setAddressesCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setAddressesCount; i1++)
- {
- bufferSize += 6; /* u8 primitive->setAddresses[i1].a[6] */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMulticastAddressReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMulticastAddressReq *primitive = (CsrWifiSmeMulticastAddressReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->setAddressesCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setAddressesCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->setAddresses[i1].a, ((u16) (6)));
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeMulticastAddressReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMulticastAddressReq *primitive = kmalloc(sizeof(CsrWifiSmeMulticastAddressReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->setAddressesCount, buffer, &offset);
- primitive->setAddresses = NULL;
- if (primitive->setAddressesCount)
- {
- primitive->setAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->setAddressesCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setAddressesCount; i1++)
- {
- CsrMemCpyDes(primitive->setAddresses[i1].a, buffer, &offset, ((u16) (6)));
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeMulticastAddressReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeMulticastAddressReq *primitive = (CsrWifiSmeMulticastAddressReq *) voidPrimitivePointer;
- kfree(primitive->setAddresses);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmePacketFilterSetReqSizeof(void *msg)
-{
- CsrWifiSmePacketFilterSetReq *primitive = (CsrWifiSmePacketFilterSetReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* u16 primitive->filterLength */
- bufferSize += primitive->filterLength; /* u8 primitive->filter */
- bufferSize += 1; /* CsrWifiSmePacketFilterMode primitive->mode */
- bufferSize += 4; /* u8 primitive->arpFilterAddress.a[4] */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmePacketFilterSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmePacketFilterSetReq *primitive = (CsrWifiSmePacketFilterSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->filterLength);
- if (primitive->filterLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->filter, ((u16) (primitive->filterLength)));
- }
- CsrUint8Ser(ptr, len, (u8) primitive->mode);
- CsrMemCpySer(ptr, len, (const void *) primitive->arpFilterAddress.a, ((u16) (4)));
- return(ptr);
-}
-
-
-void* CsrWifiSmePacketFilterSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmePacketFilterSetReq *primitive = kmalloc(sizeof(CsrWifiSmePacketFilterSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->filterLength, buffer, &offset);
- if (primitive->filterLength)
- {
- primitive->filter = kmalloc(primitive->filterLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->filter, buffer, &offset, ((u16) (primitive->filterLength)));
- }
- else
- {
- primitive->filter = NULL;
- }
- CsrUint8Des((u8 *) &primitive->mode, buffer, &offset);
- CsrMemCpyDes(primitive->arpFilterAddress.a, buffer, &offset, ((u16) (4)));
-
- return primitive;
-}
-
-
-void CsrWifiSmePacketFilterSetReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmePacketFilterSetReq *primitive = (CsrWifiSmePacketFilterSetReq *) voidPrimitivePointer;
- kfree(primitive->filter);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmePmkidReqSizeof(void *msg)
-{
- CsrWifiSmePmkidReq *primitive = (CsrWifiSmePmkidReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 29) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeListAction primitive->action */
- bufferSize += 1; /* u8 primitive->setPmkidsCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setPmkidsCount; i1++)
- {
- bufferSize += 6; /* u8 primitive->setPmkids[i1].bssid.a[6] */
- bufferSize += 16; /* u8 primitive->setPmkids[i1].pmkid[16] */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmePmkidReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmePmkidReq *primitive = (CsrWifiSmePmkidReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->setPmkidsCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setPmkidsCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->setPmkids[i1].bssid.a, ((u16) (6)));
- CsrMemCpySer(ptr, len, (const void *) primitive->setPmkids[i1].pmkid, ((u16) (16)));
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmePmkidReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmePmkidReq *primitive = kmalloc(sizeof(CsrWifiSmePmkidReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->setPmkidsCount, buffer, &offset);
- primitive->setPmkids = NULL;
- if (primitive->setPmkidsCount)
- {
- primitive->setPmkids = kmalloc(sizeof(CsrWifiSmePmkid) * primitive->setPmkidsCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->setPmkidsCount; i1++)
- {
- CsrMemCpyDes(primitive->setPmkids[i1].bssid.a, buffer, &offset, ((u16) (6)));
- CsrMemCpyDes(primitive->setPmkids[i1].pmkid, buffer, &offset, ((u16) (16)));
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmePmkidReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmePmkidReq *primitive = (CsrWifiSmePmkidReq *) voidPrimitivePointer;
- kfree(primitive->setPmkids);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmePowerConfigSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */
- bufferSize += 1; /* CsrWifiSmePowerSaveLevel primitive->powerConfig.powerSaveLevel */
- bufferSize += 2; /* u16 primitive->powerConfig.listenIntervalTu */
- bufferSize += 1; /* u8 primitive->powerConfig.rxDtims */
- bufferSize += 1; /* CsrWifiSmeD3AutoScanMode primitive->powerConfig.d3AutoScanMode */
- bufferSize += 1; /* u8 primitive->powerConfig.clientTrafficWindow */
- bufferSize += 1; /* u8 primitive->powerConfig.opportunisticPowerSave */
- bufferSize += 1; /* u8 primitive->powerConfig.noticeOfAbsence */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmePowerConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmePowerConfigSetReq *primitive = (CsrWifiSmePowerConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.powerSaveLevel);
- CsrUint16Ser(ptr, len, (u16) primitive->powerConfig.listenIntervalTu);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.rxDtims);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.d3AutoScanMode);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.clientTrafficWindow);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.opportunisticPowerSave);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.noticeOfAbsence);
- return(ptr);
-}
-
-
-void* CsrWifiSmePowerConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmePowerConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmePowerConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.powerSaveLevel, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->powerConfig.listenIntervalTu, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.rxDtims, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.d3AutoScanMode, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.clientTrafficWindow, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.opportunisticPowerSave, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.noticeOfAbsence, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeRoamingConfigSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 70) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].rssiHighThreshold */
- bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].rssiLowThreshold */
- bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].snrHighThreshold */
- bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].snrLowThreshold */
- }
- }
- bufferSize += 1; /* u8 primitive->roamingConfig.disableSmoothRoaming */
- bufferSize += 1; /* u8 primitive->roamingConfig.disableRoamScans */
- bufferSize += 1; /* u8 primitive->roamingConfig.reconnectLimit */
- bufferSize += 2; /* u16 primitive->roamingConfig.reconnectLimitIntervalMs */
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].intervalSeconds */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].validitySeconds */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeRoamingConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeRoamingConfigSetReq *primitive = (CsrWifiSmeRoamingConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].rssiHighThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].rssiLowThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].snrHighThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].snrLowThreshold);
- }
- }
- CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.disableSmoothRoaming);
- CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.disableRoamScans);
- CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.reconnectLimit);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.reconnectLimitIntervalMs);
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].intervalSeconds);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].validitySeconds);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu);
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeRoamingConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeRoamingConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeRoamingConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].rssiHighThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].rssiLowThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].snrHighThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].snrLowThreshold, buffer, &offset);
- }
- }
- CsrUint8Des((u8 *) &primitive->roamingConfig.disableSmoothRoaming, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->roamingConfig.disableRoamScans, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->roamingConfig.reconnectLimit, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.reconnectLimitIntervalMs, buffer, &offset);
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].intervalSeconds, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].validitySeconds, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu, buffer, &offset);
- }
- }
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeScanConfigSetReqSizeof(void *msg)
-{
- CsrWifiSmeScanConfigSetReq *primitive = (CsrWifiSmeScanConfigSetReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 63) */
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].intervalSeconds */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].validitySeconds */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu */
- }
- }
- bufferSize += 1; /* u8 primitive->scanConfig.disableAutonomousScans */
- bufferSize += 2; /* u16 primitive->scanConfig.maxResults */
- bufferSize += 1; /* s8 primitive->scanConfig.highRssiThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.lowRssiThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.deltaRssiThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.highSnrThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.lowSnrThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.deltaSnrThreshold */
- bufferSize += 2; /* u16 primitive->scanConfig.passiveChannelListCount */
- bufferSize += primitive->scanConfig.passiveChannelListCount; /* u8 primitive->scanConfig.passiveChannelList */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeScanConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeScanConfigSetReq *primitive = (CsrWifiSmeScanConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].intervalSeconds);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].validitySeconds);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu);
- }
- }
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.disableAutonomousScans);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.maxResults);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.highRssiThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.lowRssiThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.deltaRssiThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.highSnrThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.lowSnrThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.deltaSnrThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.passiveChannelListCount);
- if (primitive->scanConfig.passiveChannelListCount)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->scanConfig.passiveChannelList, ((u16) (primitive->scanConfig.passiveChannelListCount)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeScanConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeScanConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeScanConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].intervalSeconds, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].validitySeconds, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu, buffer, &offset);
- }
- }
- CsrUint8Des((u8 *) &primitive->scanConfig.disableAutonomousScans, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.maxResults, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.highRssiThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.lowRssiThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.deltaRssiThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.highSnrThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.lowSnrThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.deltaSnrThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.passiveChannelListCount, buffer, &offset);
- if (primitive->scanConfig.passiveChannelListCount)
- {
- primitive->scanConfig.passiveChannelList = kmalloc(primitive->scanConfig.passiveChannelListCount, GFP_KERNEL);
- CsrMemCpyDes(primitive->scanConfig.passiveChannelList, buffer, &offset, ((u16) (primitive->scanConfig.passiveChannelListCount)));
- }
- else
- {
- primitive->scanConfig.passiveChannelList = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeScanConfigSetReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeScanConfigSetReq *primitive = (CsrWifiSmeScanConfigSetReq *) voidPrimitivePointer;
- kfree(primitive->scanConfig.passiveChannelList);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeScanFullReqSizeof(void *msg)
-{
- CsrWifiSmeScanFullReq *primitive = (CsrWifiSmeScanFullReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 52) */
- bufferSize += 1; /* u8 primitive->ssidCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->ssidCount; i1++)
- {
- bufferSize += 32; /* u8 primitive->ssid[i1].ssid[32] */
- bufferSize += 1; /* u8 primitive->ssid[i1].length */
- }
- }
- bufferSize += 6; /* u8 primitive->bssid.a[6] */
- bufferSize += 1; /* u8 primitive->forceScan */
- bufferSize += 1; /* CsrWifiSmeBssType primitive->bssType */
- bufferSize += 1; /* CsrWifiSmeScanType primitive->scanType */
- bufferSize += 2; /* u16 primitive->channelListCount */
- bufferSize += primitive->channelListCount; /* u8 primitive->channelList */
- bufferSize += 2; /* u16 primitive->probeIeLength */
- bufferSize += primitive->probeIeLength; /* u8 primitive->probeIe */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeScanFullReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeScanFullReq *primitive = (CsrWifiSmeScanFullReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint8Ser(ptr, len, (u8) primitive->ssidCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->ssidCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->ssid[i1].ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->ssid[i1].length);
- }
- }
- CsrMemCpySer(ptr, len, (const void *) primitive->bssid.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->forceScan);
- CsrUint8Ser(ptr, len, (u8) primitive->bssType);
- CsrUint8Ser(ptr, len, (u8) primitive->scanType);
- CsrUint16Ser(ptr, len, (u16) primitive->channelListCount);
- if (primitive->channelListCount)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->channelList, ((u16) (primitive->channelListCount)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->probeIeLength);
- if (primitive->probeIeLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->probeIe, ((u16) (primitive->probeIeLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeScanFullReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeScanFullReq *primitive = kmalloc(sizeof(CsrWifiSmeScanFullReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ssidCount, buffer, &offset);
- primitive->ssid = NULL;
- if (primitive->ssidCount)
- {
- primitive->ssid = kmalloc(sizeof(CsrWifiSsid) * primitive->ssidCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->ssidCount; i1++)
- {
- CsrMemCpyDes(primitive->ssid[i1].ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->ssid[i1].length, buffer, &offset);
- }
- }
- CsrMemCpyDes(primitive->bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->forceScan, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->bssType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanType, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->channelListCount, buffer, &offset);
- if (primitive->channelListCount)
- {
- primitive->channelList = kmalloc(primitive->channelListCount, GFP_KERNEL);
- CsrMemCpyDes(primitive->channelList, buffer, &offset, ((u16) (primitive->channelListCount)));
- }
- else
- {
- primitive->channelList = NULL;
- }
- CsrUint16Des((u16 *) &primitive->probeIeLength, buffer, &offset);
- if (primitive->probeIeLength)
- {
- primitive->probeIe = kmalloc(primitive->probeIeLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->probeIe, buffer, &offset, ((u16) (primitive->probeIeLength)));
- }
- else
- {
- primitive->probeIe = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeScanFullReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeScanFullReq *primitive = (CsrWifiSmeScanFullReq *) voidPrimitivePointer;
- kfree(primitive->ssid);
- kfree(primitive->channelList);
- kfree(primitive->probeIe);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeSmeStaConfigSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* u8 primitive->smeConfig.connectionQualityRssiChangeTrigger */
- bufferSize += 1; /* u8 primitive->smeConfig.connectionQualitySnrChangeTrigger */
- bufferSize += 1; /* CsrWifiSmeWmmModeMask primitive->smeConfig.wmmModeMask */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->smeConfig.ifIndex */
- bufferSize += 1; /* u8 primitive->smeConfig.allowUnicastUseGroupCipher */
- bufferSize += 1; /* u8 primitive->smeConfig.enableOpportunisticKeyCaching */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeSmeStaConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeSmeStaConfigSetReq *primitive = (CsrWifiSmeSmeStaConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.connectionQualityRssiChangeTrigger);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.connectionQualitySnrChangeTrigger);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.wmmModeMask);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.ifIndex);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.allowUnicastUseGroupCipher);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.enableOpportunisticKeyCaching);
- return(ptr);
-}
-
-
-void* CsrWifiSmeSmeStaConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeSmeStaConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeSmeStaConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.connectionQualityRssiChangeTrigger, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.connectionQualitySnrChangeTrigger, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.wmmModeMask, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.ifIndex, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.allowUnicastUseGroupCipher, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.enableOpportunisticKeyCaching, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeTspecReqSizeof(void *msg)
-{
- CsrWifiSmeTspecReq *primitive = (CsrWifiSmeTspecReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 18) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeListAction primitive->action */
- bufferSize += 4; /* u32 primitive->transactionId */
- bufferSize += 1; /* u8 primitive->strict */
- bufferSize += 1; /* CsrWifiSmeTspecCtrlMask primitive->ctrlMask */
- bufferSize += 2; /* u16 primitive->tspecLength */
- bufferSize += primitive->tspecLength; /* u8 primitive->tspec */
- bufferSize += 2; /* u16 primitive->tclasLength */
- bufferSize += primitive->tclasLength; /* u8 primitive->tclas */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeTspecReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeTspecReq *primitive = (CsrWifiSmeTspecReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint32Ser(ptr, len, (u32) primitive->transactionId);
- CsrUint8Ser(ptr, len, (u8) primitive->strict);
- CsrUint8Ser(ptr, len, (u8) primitive->ctrlMask);
- CsrUint16Ser(ptr, len, (u16) primitive->tspecLength);
- if (primitive->tspecLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->tspec, ((u16) (primitive->tspecLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->tclasLength);
- if (primitive->tclasLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->tclas, ((u16) (primitive->tclasLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeTspecReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeTspecReq *primitive = kmalloc(sizeof(CsrWifiSmeTspecReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->transactionId, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->strict, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ctrlMask, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->tspecLength, buffer, &offset);
- if (primitive->tspecLength)
- {
- primitive->tspec = kmalloc(primitive->tspecLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->tspec, buffer, &offset, ((u16) (primitive->tspecLength)));
- }
- else
- {
- primitive->tspec = NULL;
- }
- CsrUint16Des((u16 *) &primitive->tclasLength, buffer, &offset);
- if (primitive->tclasLength)
- {
- primitive->tclas = kmalloc(primitive->tclasLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->tclas, buffer, &offset, ((u16) (primitive->tclasLength)));
- }
- else
- {
- primitive->tclas = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeTspecReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeTspecReq *primitive = (CsrWifiSmeTspecReq *) voidPrimitivePointer;
- kfree(primitive->tspec);
- kfree(primitive->tclas);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeWifiFlightmodeReqSizeof(void *msg)
-{
- CsrWifiSmeWifiFlightmodeReq *primitive = (CsrWifiSmeWifiFlightmodeReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 14) */
- bufferSize += 6; /* u8 primitive->address.a[6] */
- bufferSize += 2; /* u16 primitive->mibFilesCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->mibFilesCount; i1++)
- {
- bufferSize += 2; /* u16 primitive->mibFiles[i1].length */
- bufferSize += primitive->mibFiles[i1].length; /* u8 primitive->mibFiles[i1].data */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeWifiFlightmodeReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeWifiFlightmodeReq *primitive = (CsrWifiSmeWifiFlightmodeReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->mibFilesCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->mibFilesCount; i1++)
- {
- CsrUint16Ser(ptr, len, (u16) primitive->mibFiles[i1].length);
- if (primitive->mibFiles[i1].length)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->mibFiles[i1].data, ((u16) (primitive->mibFiles[i1].length)));
- }
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeWifiFlightmodeReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeWifiFlightmodeReq *primitive = kmalloc(sizeof(CsrWifiSmeWifiFlightmodeReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->mibFilesCount, buffer, &offset);
- primitive->mibFiles = NULL;
- if (primitive->mibFilesCount)
- {
- primitive->mibFiles = kmalloc(sizeof(CsrWifiSmeDataBlock) * primitive->mibFilesCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->mibFilesCount; i1++)
- {
- CsrUint16Des((u16 *) &primitive->mibFiles[i1].length, buffer, &offset);
- if (primitive->mibFiles[i1].length)
- {
- primitive->mibFiles[i1].data = kmalloc(primitive->mibFiles[i1].length, GFP_KERNEL);
- CsrMemCpyDes(primitive->mibFiles[i1].data, buffer, &offset, ((u16) (primitive->mibFiles[i1].length)));
- }
- else
- {
- primitive->mibFiles[i1].data = NULL;
- }
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeWifiFlightmodeReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeWifiFlightmodeReq *primitive = (CsrWifiSmeWifiFlightmodeReq *) voidPrimitivePointer;
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->mibFilesCount; i1++)
- {
- kfree(primitive->mibFiles[i1].data);
- }
- }
- kfree(primitive->mibFiles);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeWifiOnReqSizeof(void *msg)
-{
- CsrWifiSmeWifiOnReq *primitive = (CsrWifiSmeWifiOnReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 14) */
- bufferSize += 6; /* u8 primitive->address.a[6] */
- bufferSize += 2; /* u16 primitive->mibFilesCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->mibFilesCount; i1++)
- {
- bufferSize += 2; /* u16 primitive->mibFiles[i1].length */
- bufferSize += primitive->mibFiles[i1].length; /* u8 primitive->mibFiles[i1].data */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeWifiOnReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeWifiOnReq *primitive = (CsrWifiSmeWifiOnReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->mibFilesCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->mibFilesCount; i1++)
- {
- CsrUint16Ser(ptr, len, (u16) primitive->mibFiles[i1].length);
- if (primitive->mibFiles[i1].length)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->mibFiles[i1].data, ((u16) (primitive->mibFiles[i1].length)));
- }
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeWifiOnReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeWifiOnReq *primitive = kmalloc(sizeof(CsrWifiSmeWifiOnReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->mibFilesCount, buffer, &offset);
- primitive->mibFiles = NULL;
- if (primitive->mibFilesCount)
- {
- primitive->mibFiles = kmalloc(sizeof(CsrWifiSmeDataBlock) * primitive->mibFilesCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->mibFilesCount; i1++)
- {
- CsrUint16Des((u16 *) &primitive->mibFiles[i1].length, buffer, &offset);
- if (primitive->mibFiles[i1].length)
- {
- primitive->mibFiles[i1].data = kmalloc(primitive->mibFiles[i1].length, GFP_KERNEL);
- CsrMemCpyDes(primitive->mibFiles[i1].data, buffer, &offset, ((u16) (primitive->mibFiles[i1].length)));
- }
- else
- {
- primitive->mibFiles[i1].data = NULL;
- }
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeWifiOnReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeWifiOnReq *primitive = (CsrWifiSmeWifiOnReq *) voidPrimitivePointer;
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->mibFilesCount; i1++)
- {
- kfree(primitive->mibFiles[i1].data);
- }
- }
- kfree(primitive->mibFiles);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeCloakedSsidsSetReqSizeof(void *msg)
-{
- CsrWifiSmeCloakedSsidsSetReq *primitive = (CsrWifiSmeCloakedSsidsSetReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 37) */
- bufferSize += 1; /* u8 primitive->cloakedSsids.cloakedSsidsCount */
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++)
- {
- bufferSize += 32; /* u8 primitive->cloakedSsids.cloakedSsids[i2].ssid[32] */
- bufferSize += 1; /* u8 primitive->cloakedSsids.cloakedSsids[i2].length */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCloakedSsidsSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCloakedSsidsSetReq *primitive = (CsrWifiSmeCloakedSsidsSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint8Ser(ptr, len, (u8) primitive->cloakedSsids.cloakedSsidsCount);
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->cloakedSsids.cloakedSsids[i2].ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->cloakedSsids.cloakedSsids[i2].length);
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeCloakedSsidsSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCloakedSsidsSetReq *primitive = kmalloc(sizeof(CsrWifiSmeCloakedSsidsSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->cloakedSsids.cloakedSsidsCount, buffer, &offset);
- primitive->cloakedSsids.cloakedSsids = NULL;
- if (primitive->cloakedSsids.cloakedSsidsCount)
- {
- primitive->cloakedSsids.cloakedSsids = kmalloc(sizeof(CsrWifiSsid) * primitive->cloakedSsids.cloakedSsidsCount, GFP_KERNEL);
- }
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++)
- {
- CsrMemCpyDes(primitive->cloakedSsids.cloakedSsids[i2].ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->cloakedSsids.cloakedSsids[i2].length, buffer, &offset);
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeCloakedSsidsSetReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeCloakedSsidsSetReq *primitive = (CsrWifiSmeCloakedSsidsSetReq *) voidPrimitivePointer;
- kfree(primitive->cloakedSsids.cloakedSsids);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeSmeCommonConfigSetReqSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 1; /* CsrWifiSme80211dTrustLevel primitive->deviceConfig.trustLevel */
- bufferSize += 2; /* u8 primitive->deviceConfig.countryCode[2] */
- bufferSize += 1; /* CsrWifiSmeFirmwareDriverInterface primitive->deviceConfig.firmwareDriverInterface */
- bufferSize += 1; /* u8 primitive->deviceConfig.enableStrictDraftN */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeSmeCommonConfigSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeSmeCommonConfigSetReq *primitive = (CsrWifiSmeSmeCommonConfigSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.trustLevel);
- CsrMemCpySer(ptr, len, (const void *) primitive->deviceConfig.countryCode, ((u16) (2)));
- CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.firmwareDriverInterface);
- CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.enableStrictDraftN);
- return(ptr);
-}
-
-
-void* CsrWifiSmeSmeCommonConfigSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeSmeCommonConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeSmeCommonConfigSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->deviceConfig.trustLevel, buffer, &offset);
- CsrMemCpyDes(primitive->deviceConfig.countryCode, buffer, &offset, ((u16) (2)));
- CsrUint8Des((u8 *) &primitive->deviceConfig.firmwareDriverInterface, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->deviceConfig.enableStrictDraftN, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeWpsConfigurationReqSizeof(void *msg)
-{
- CsrWifiSmeWpsConfigurationReq *primitive = (CsrWifiSmeWpsConfigurationReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 240) */
- bufferSize += 1; /* u8 primitive->wpsConfig.wpsVersion */
- bufferSize += 16; /* u8 primitive->wpsConfig.uuid[16] */
- bufferSize += 32; /* u8 primitive->wpsConfig.deviceName[32] */
- bufferSize += 1; /* u8 primitive->wpsConfig.deviceNameLength */
- bufferSize += 64; /* u8 primitive->wpsConfig.manufacturer[64] */
- bufferSize += 1; /* u8 primitive->wpsConfig.manufacturerLength */
- bufferSize += 32; /* u8 primitive->wpsConfig.modelName[32] */
- bufferSize += 1; /* u8 primitive->wpsConfig.modelNameLength */
- bufferSize += 32; /* u8 primitive->wpsConfig.modelNumber[32] */
- bufferSize += 1; /* u8 primitive->wpsConfig.modelNumberLength */
- bufferSize += 32; /* u8 primitive->wpsConfig.serialNumber[32] */
- bufferSize += 8; /* u8 primitive->wpsConfig.primDeviceType.deviceDetails[8] */
- bufferSize += 1; /* u8 primitive->wpsConfig.secondaryDeviceTypeCount */
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->wpsConfig.secondaryDeviceTypeCount; i2++)
- {
- bufferSize += 8; /* u8 primitive->wpsConfig.secondaryDeviceType[i2].deviceDetails[8] */
- }
- }
- bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->wpsConfig.configMethods */
- bufferSize += 1; /* u8 primitive->wpsConfig.rfBands */
- bufferSize += 4; /* u8 primitive->wpsConfig.osVersion[4] */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeWpsConfigurationReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeWpsConfigurationReq *primitive = (CsrWifiSmeWpsConfigurationReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.wpsVersion);
- CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.uuid, ((u16) (16)));
- CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.deviceName, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.deviceNameLength);
- CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.manufacturer, ((u16) (64)));
- CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.manufacturerLength);
- CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.modelName, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.modelNameLength);
- CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.modelNumber, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.modelNumberLength);
- CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.serialNumber, ((u16) (32)));
- CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.primDeviceType.deviceDetails, ((u16) (8)));
- CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.secondaryDeviceTypeCount);
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->wpsConfig.secondaryDeviceTypeCount; i2++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.secondaryDeviceType[i2].deviceDetails, ((u16) (8)));
- }
- }
- CsrUint16Ser(ptr, len, (u16) primitive->wpsConfig.configMethods);
- CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.rfBands);
- CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.osVersion, ((u16) (4)));
- return(ptr);
-}
-
-
-void* CsrWifiSmeWpsConfigurationReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeWpsConfigurationReq *primitive = kmalloc(sizeof(CsrWifiSmeWpsConfigurationReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wpsConfig.wpsVersion, buffer, &offset);
- CsrMemCpyDes(primitive->wpsConfig.uuid, buffer, &offset, ((u16) (16)));
- CsrMemCpyDes(primitive->wpsConfig.deviceName, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->wpsConfig.deviceNameLength, buffer, &offset);
- CsrMemCpyDes(primitive->wpsConfig.manufacturer, buffer, &offset, ((u16) (64)));
- CsrUint8Des((u8 *) &primitive->wpsConfig.manufacturerLength, buffer, &offset);
- CsrMemCpyDes(primitive->wpsConfig.modelName, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->wpsConfig.modelNameLength, buffer, &offset);
- CsrMemCpyDes(primitive->wpsConfig.modelNumber, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->wpsConfig.modelNumberLength, buffer, &offset);
- CsrMemCpyDes(primitive->wpsConfig.serialNumber, buffer, &offset, ((u16) (32)));
- CsrMemCpyDes(primitive->wpsConfig.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8)));
- CsrUint8Des((u8 *) &primitive->wpsConfig.secondaryDeviceTypeCount, buffer, &offset);
- primitive->wpsConfig.secondaryDeviceType = NULL;
- if (primitive->wpsConfig.secondaryDeviceTypeCount)
- {
- primitive->wpsConfig.secondaryDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->wpsConfig.secondaryDeviceTypeCount, GFP_KERNEL);
- }
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->wpsConfig.secondaryDeviceTypeCount; i2++)
- {
- CsrMemCpyDes(primitive->wpsConfig.secondaryDeviceType[i2].deviceDetails, buffer, &offset, ((u16) (8)));
- }
- }
- CsrUint16Des((u16 *) &primitive->wpsConfig.configMethods, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->wpsConfig.rfBands, buffer, &offset);
- CsrMemCpyDes(primitive->wpsConfig.osVersion, buffer, &offset, ((u16) (4)));
-
- return primitive;
-}
-
-
-void CsrWifiSmeWpsConfigurationReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeWpsConfigurationReq *primitive = (CsrWifiSmeWpsConfigurationReq *) voidPrimitivePointer;
- kfree(primitive->wpsConfig.secondaryDeviceType);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeSetReqSizeof(void *msg)
-{
- CsrWifiSmeSetReq *primitive = (CsrWifiSmeSetReq *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 4; /* u32 primitive->dataLength */
- bufferSize += primitive->dataLength; /* u8 primitive->data */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeSetReqSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeSetReq *primitive = (CsrWifiSmeSetReq *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint32Ser(ptr, len, (u32) primitive->dataLength);
- if (primitive->dataLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->data, ((u16) (primitive->dataLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeSetReqDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeSetReq *primitive = kmalloc(sizeof(CsrWifiSmeSetReq), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->dataLength, buffer, &offset);
- if (primitive->dataLength)
- {
- primitive->data = kmalloc(primitive->dataLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->data, buffer, &offset, ((u16) (primitive->dataLength)));
- }
- else
- {
- primitive->data = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeSetReqSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeSetReq *primitive = (CsrWifiSmeSetReq *) voidPrimitivePointer;
- kfree(primitive->data);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeAdhocConfigGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 2; /* u16 primitive->adHocConfig.atimWindowTu */
- bufferSize += 2; /* u16 primitive->adHocConfig.beaconPeriodTu */
- bufferSize += 2; /* u16 primitive->adHocConfig.joinOnlyAttempts */
- bufferSize += 2; /* u16 primitive->adHocConfig.joinAttemptIntervalMs */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeAdhocConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeAdhocConfigGetCfm *primitive = (CsrWifiSmeAdhocConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.atimWindowTu);
- CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.beaconPeriodTu);
- CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.joinOnlyAttempts);
- CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.joinAttemptIntervalMs);
- return(ptr);
-}
-
-
-void* CsrWifiSmeAdhocConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeAdhocConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeAdhocConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->adHocConfig.atimWindowTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->adHocConfig.beaconPeriodTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->adHocConfig.joinOnlyAttempts, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->adHocConfig.joinAttemptIntervalMs, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeAssociationCompleteIndSizeof(void *msg)
-{
- CsrWifiSmeAssociationCompleteInd *primitive = (CsrWifiSmeAssociationCompleteInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 98) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 32; /* u8 primitive->connectionInfo.ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->connectionInfo.ssid.length */
- bufferSize += 6; /* u8 primitive->connectionInfo.bssid.a[6] */
- bufferSize += 1; /* CsrWifiSme80211NetworkType primitive->connectionInfo.networkType80211 */
- bufferSize += 1; /* u8 primitive->connectionInfo.channelNumber */
- bufferSize += 2; /* u16 primitive->connectionInfo.channelFrequency */
- bufferSize += 2; /* CsrWifiSmeAuthMode primitive->connectionInfo.authMode */
- bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.pairwiseCipher */
- bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.groupCipher */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionInfo.ifIndex */
- bufferSize += 2; /* u16 primitive->connectionInfo.atimWindowTu */
- bufferSize += 2; /* u16 primitive->connectionInfo.beaconPeriodTu */
- bufferSize += 1; /* u8 primitive->connectionInfo.reassociation */
- bufferSize += 2; /* u16 primitive->connectionInfo.beaconFrameLength */
- bufferSize += primitive->connectionInfo.beaconFrameLength; /* u8 primitive->connectionInfo.beaconFrame */
- bufferSize += 2; /* u16 primitive->connectionInfo.associationReqFrameLength */
- bufferSize += primitive->connectionInfo.associationReqFrameLength; /* u8 primitive->connectionInfo.associationReqFrame */
- bufferSize += 2; /* u16 primitive->connectionInfo.associationRspFrameLength */
- bufferSize += primitive->connectionInfo.associationRspFrameLength; /* u8 primitive->connectionInfo.associationRspFrame */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocScanInfoElementsLength */
- bufferSize += primitive->connectionInfo.assocScanInfoElementsLength; /* u8 primitive->connectionInfo.assocScanInfoElements */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocReqCapabilities */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocReqListenIntervalTu */
- bufferSize += 6; /* u8 primitive->connectionInfo.assocReqApAddress.a[6] */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocReqInfoElementsLength */
- bufferSize += primitive->connectionInfo.assocReqInfoElementsLength; /* u8 primitive->connectionInfo.assocReqInfoElements */
- bufferSize += 2; /* CsrWifiSmeIEEE80211Result primitive->connectionInfo.assocRspResult */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocRspCapabilityInfo */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocRspAssociationId */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocRspInfoElementsLength */
- bufferSize += primitive->connectionInfo.assocRspInfoElementsLength; /* u8 primitive->connectionInfo.assocRspInfoElements */
- bufferSize += 2; /* CsrWifiSmeIEEE80211Reason primitive->deauthReason */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeAssociationCompleteIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeAssociationCompleteInd *primitive = (CsrWifiSmeAssociationCompleteInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ssid.length);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.bssid.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.networkType80211);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.channelNumber);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.channelFrequency);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.authMode);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.pairwiseCipher);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.groupCipher);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ifIndex);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.atimWindowTu);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconPeriodTu);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.reassociation);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconFrameLength);
- if (primitive->connectionInfo.beaconFrameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.beaconFrame, ((u16) (primitive->connectionInfo.beaconFrameLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationReqFrameLength);
- if (primitive->connectionInfo.associationReqFrameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationReqFrame, ((u16) (primitive->connectionInfo.associationReqFrameLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationRspFrameLength);
- if (primitive->connectionInfo.associationRspFrameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationRspFrame, ((u16) (primitive->connectionInfo.associationRspFrameLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocScanInfoElementsLength);
- if (primitive->connectionInfo.assocScanInfoElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocScanInfoElements, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqCapabilities);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqListenIntervalTu);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqApAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqInfoElementsLength);
- if (primitive->connectionInfo.assocReqInfoElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqInfoElements, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspResult);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspCapabilityInfo);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspAssociationId);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspInfoElementsLength);
- if (primitive->connectionInfo.assocRspInfoElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocRspInfoElements, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->deauthReason);
- return(ptr);
-}
-
-
-void* CsrWifiSmeAssociationCompleteIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeAssociationCompleteInd *primitive = kmalloc(sizeof(CsrWifiSmeAssociationCompleteInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrMemCpyDes(primitive->connectionInfo.ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->connectionInfo.ssid.length, buffer, &offset);
- CsrMemCpyDes(primitive->connectionInfo.bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->connectionInfo.networkType80211, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionInfo.channelNumber, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.channelFrequency, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.authMode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.pairwiseCipher, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.groupCipher, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionInfo.ifIndex, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.atimWindowTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.beaconPeriodTu, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionInfo.reassociation, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.beaconFrameLength, buffer, &offset);
- if (primitive->connectionInfo.beaconFrameLength)
- {
- primitive->connectionInfo.beaconFrame = kmalloc(primitive->connectionInfo.beaconFrameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.beaconFrame, buffer, &offset, ((u16) (primitive->connectionInfo.beaconFrameLength)));
- }
- else
- {
- primitive->connectionInfo.beaconFrame = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.associationReqFrameLength, buffer, &offset);
- if (primitive->connectionInfo.associationReqFrameLength)
- {
- primitive->connectionInfo.associationReqFrame = kmalloc(primitive->connectionInfo.associationReqFrameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.associationReqFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationReqFrameLength)));
- }
- else
- {
- primitive->connectionInfo.associationReqFrame = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.associationRspFrameLength, buffer, &offset);
- if (primitive->connectionInfo.associationRspFrameLength)
- {
- primitive->connectionInfo.associationRspFrame = kmalloc(primitive->connectionInfo.associationRspFrameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.associationRspFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationRspFrameLength)));
- }
- else
- {
- primitive->connectionInfo.associationRspFrame = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocScanInfoElementsLength, buffer, &offset);
- if (primitive->connectionInfo.assocScanInfoElementsLength)
- {
- primitive->connectionInfo.assocScanInfoElements = kmalloc(primitive->connectionInfo.assocScanInfoElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.assocScanInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength)));
- }
- else
- {
- primitive->connectionInfo.assocScanInfoElements = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqCapabilities, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqListenIntervalTu, buffer, &offset);
- CsrMemCpyDes(primitive->connectionInfo.assocReqApAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqInfoElementsLength, buffer, &offset);
- if (primitive->connectionInfo.assocReqInfoElementsLength)
- {
- primitive->connectionInfo.assocReqInfoElements = kmalloc(primitive->connectionInfo.assocReqInfoElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.assocReqInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength)));
- }
- else
- {
- primitive->connectionInfo.assocReqInfoElements = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspResult, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspCapabilityInfo, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspAssociationId, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspInfoElementsLength, buffer, &offset);
- if (primitive->connectionInfo.assocRspInfoElementsLength)
- {
- primitive->connectionInfo.assocRspInfoElements = kmalloc(primitive->connectionInfo.assocRspInfoElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.assocRspInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength)));
- }
- else
- {
- primitive->connectionInfo.assocRspInfoElements = NULL;
- }
- CsrUint16Des((u16 *) &primitive->deauthReason, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiSmeAssociationCompleteIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeAssociationCompleteInd *primitive = (CsrWifiSmeAssociationCompleteInd *) voidPrimitivePointer;
- kfree(primitive->connectionInfo.beaconFrame);
- kfree(primitive->connectionInfo.associationReqFrame);
- kfree(primitive->connectionInfo.associationRspFrame);
- kfree(primitive->connectionInfo.assocScanInfoElements);
- kfree(primitive->connectionInfo.assocReqInfoElements);
- kfree(primitive->connectionInfo.assocRspInfoElements);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeAssociationStartIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 44) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 6; /* u8 primitive->address.a[6] */
- bufferSize += 32; /* u8 primitive->ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->ssid.length */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeAssociationStartIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeAssociationStartInd *primitive = (CsrWifiSmeAssociationStartInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6)));
- CsrMemCpySer(ptr, len, (const void *) primitive->ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->ssid.length);
- return(ptr);
-}
-
-
-void* CsrWifiSmeAssociationStartIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeAssociationStartInd *primitive = kmalloc(sizeof(CsrWifiSmeAssociationStartInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6)));
- CsrMemCpyDes(primitive->ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->ssid.length, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeBlacklistCfmSizeof(void *msg)
-{
- CsrWifiSmeBlacklistCfm *primitive = (CsrWifiSmeBlacklistCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* CsrWifiSmeListAction primitive->action */
- bufferSize += 1; /* u8 primitive->getAddressCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getAddressCount; i1++)
- {
- bufferSize += 6; /* u8 primitive->getAddresses[i1].a[6] */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeBlacklistCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeBlacklistCfm *primitive = (CsrWifiSmeBlacklistCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->getAddressCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getAddressCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->getAddresses[i1].a, ((u16) (6)));
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeBlacklistCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeBlacklistCfm *primitive = kmalloc(sizeof(CsrWifiSmeBlacklistCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->getAddressCount, buffer, &offset);
- primitive->getAddresses = NULL;
- if (primitive->getAddressCount)
- {
- primitive->getAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->getAddressCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getAddressCount; i1++)
- {
- CsrMemCpyDes(primitive->getAddresses[i1].a, buffer, &offset, ((u16) (6)));
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeBlacklistCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeBlacklistCfm *primitive = (CsrWifiSmeBlacklistCfm *) voidPrimitivePointer;
- kfree(primitive->getAddresses);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeCalibrationDataGetCfmSizeof(void *msg)
-{
- CsrWifiSmeCalibrationDataGetCfm *primitive = (CsrWifiSmeCalibrationDataGetCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 2; /* u16 primitive->calibrationDataLength */
- bufferSize += primitive->calibrationDataLength; /* u8 primitive->calibrationData */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCalibrationDataGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCalibrationDataGetCfm *primitive = (CsrWifiSmeCalibrationDataGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint16Ser(ptr, len, (u16) primitive->calibrationDataLength);
- if (primitive->calibrationDataLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->calibrationData, ((u16) (primitive->calibrationDataLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeCalibrationDataGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCalibrationDataGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCalibrationDataGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->calibrationDataLength, buffer, &offset);
- if (primitive->calibrationDataLength)
- {
- primitive->calibrationData = kmalloc(primitive->calibrationDataLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->calibrationData, buffer, &offset, ((u16) (primitive->calibrationDataLength)));
- }
- else
- {
- primitive->calibrationData = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeCalibrationDataGetCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeCalibrationDataGetCfm *primitive = (CsrWifiSmeCalibrationDataGetCfm *) voidPrimitivePointer;
- kfree(primitive->calibrationData);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeCcxConfigGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* u8 primitive->ccxConfig.keepAliveTimeMs */
- bufferSize += 1; /* u8 primitive->ccxConfig.apRoamingEnabled */
- bufferSize += 1; /* u8 primitive->ccxConfig.measurementsMask */
- bufferSize += 1; /* u8 primitive->ccxConfig.ccxRadioMgtEnabled */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCcxConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCcxConfigGetCfm *primitive = (CsrWifiSmeCcxConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.keepAliveTimeMs);
- CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.apRoamingEnabled);
- CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.measurementsMask);
- CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.ccxRadioMgtEnabled);
- return(ptr);
-}
-
-
-void* CsrWifiSmeCcxConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCcxConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCcxConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ccxConfig.keepAliveTimeMs, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ccxConfig.apRoamingEnabled, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ccxConfig.measurementsMask, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->ccxConfig.ccxRadioMgtEnabled, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeCcxConfigSetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCcxConfigSetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCcxConfigSetCfm *primitive = (CsrWifiSmeCcxConfigSetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiSmeCcxConfigSetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCcxConfigSetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCcxConfigSetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeCoexConfigGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 31) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* u8 primitive->coexConfig.coexEnableSchemeManagement */
- bufferSize += 1; /* u8 primitive->coexConfig.coexPeriodicWakeHost */
- bufferSize += 2; /* u16 primitive->coexConfig.coexTrafficBurstyLatencyMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexTrafficContinuousLatencyMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexObexBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexObexBlackoutPeriodMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpBrBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpBrBlackoutPeriodMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpEdrBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexPagingBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexPagingBlackoutPeriodMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexInquiryBlackoutDurationMs */
- bufferSize += 2; /* u16 primitive->coexConfig.coexInquiryBlackoutPeriodMs */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCoexConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCoexConfigGetCfm *primitive = (CsrWifiSmeCoexConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->coexConfig.coexEnableSchemeManagement);
- CsrUint8Ser(ptr, len, (u8) primitive->coexConfig.coexPeriodicWakeHost);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexTrafficBurstyLatencyMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexTrafficContinuousLatencyMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexObexBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexObexBlackoutPeriodMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpBrBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpBrBlackoutPeriodMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpEdrBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexPagingBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexPagingBlackoutPeriodMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexInquiryBlackoutDurationMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexInquiryBlackoutPeriodMs);
- return(ptr);
-}
-
-
-void* CsrWifiSmeCoexConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCoexConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCoexConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->coexConfig.coexEnableSchemeManagement, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->coexConfig.coexPeriodicWakeHost, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexTrafficBurstyLatencyMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexTrafficContinuousLatencyMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexObexBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexObexBlackoutPeriodMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpBrBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpBrBlackoutPeriodMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpEdrBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexPagingBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexPagingBlackoutPeriodMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexInquiryBlackoutDurationMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexConfig.coexInquiryBlackoutPeriodMs, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeCoexInfoGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 24) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* u8 primitive->coexInfo.hasTrafficData */
- bufferSize += 1; /* CsrWifiSmeTrafficType primitive->coexInfo.currentTrafficType */
- bufferSize += 2; /* u16 primitive->coexInfo.currentPeriodMs */
- bufferSize += 1; /* CsrWifiSmePowerSaveLevel primitive->coexInfo.currentPowerSave */
- bufferSize += 2; /* u16 primitive->coexInfo.currentCoexPeriodMs */
- bufferSize += 2; /* u16 primitive->coexInfo.currentCoexLatencyMs */
- bufferSize += 1; /* u8 primitive->coexInfo.hasBtDevice */
- bufferSize += 4; /* u32 primitive->coexInfo.currentBlackoutDurationUs */
- bufferSize += 4; /* u32 primitive->coexInfo.currentBlackoutPeriodUs */
- bufferSize += 1; /* CsrWifiSmeCoexScheme primitive->coexInfo.currentCoexScheme */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCoexInfoGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCoexInfoGetCfm *primitive = (CsrWifiSmeCoexInfoGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.hasTrafficData);
- CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.currentTrafficType);
- CsrUint16Ser(ptr, len, (u16) primitive->coexInfo.currentPeriodMs);
- CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.currentPowerSave);
- CsrUint16Ser(ptr, len, (u16) primitive->coexInfo.currentCoexPeriodMs);
- CsrUint16Ser(ptr, len, (u16) primitive->coexInfo.currentCoexLatencyMs);
- CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.hasBtDevice);
- CsrUint32Ser(ptr, len, (u32) primitive->coexInfo.currentBlackoutDurationUs);
- CsrUint32Ser(ptr, len, (u32) primitive->coexInfo.currentBlackoutPeriodUs);
- CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.currentCoexScheme);
- return(ptr);
-}
-
-
-void* CsrWifiSmeCoexInfoGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCoexInfoGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCoexInfoGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->coexInfo.hasTrafficData, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->coexInfo.currentTrafficType, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexInfo.currentPeriodMs, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->coexInfo.currentPowerSave, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexInfo.currentCoexPeriodMs, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->coexInfo.currentCoexLatencyMs, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->coexInfo.hasBtDevice, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->coexInfo.currentBlackoutDurationUs, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->coexInfo.currentBlackoutPeriodUs, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->coexInfo.currentCoexScheme, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeConnectCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeConnectCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeConnectCfm *primitive = (CsrWifiSmeConnectCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiSmeConnectCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeConnectCfm *primitive = kmalloc(sizeof(CsrWifiSmeConnectCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeConnectionConfigGetCfmSizeof(void *msg)
-{
- CsrWifiSmeConnectionConfigGetCfm *primitive = (CsrWifiSmeConnectionConfigGetCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 59) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 32; /* u8 primitive->connectionConfig.ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->connectionConfig.ssid.length */
- bufferSize += 6; /* u8 primitive->connectionConfig.bssid.a[6] */
- bufferSize += 1; /* CsrWifiSmeBssType primitive->connectionConfig.bssType */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionConfig.ifIndex */
- bufferSize += 1; /* CsrWifiSme80211PrivacyMode primitive->connectionConfig.privacyMode */
- bufferSize += 2; /* CsrWifiSmeAuthModeMask primitive->connectionConfig.authModeMask */
- bufferSize += 2; /* CsrWifiSmeEncryptionMask primitive->connectionConfig.encryptionModeMask */
- bufferSize += 2; /* u16 primitive->connectionConfig.mlmeAssociateReqInformationElementsLength */
- bufferSize += primitive->connectionConfig.mlmeAssociateReqInformationElementsLength; /* u8 primitive->connectionConfig.mlmeAssociateReqInformationElements */
- bufferSize += 1; /* CsrWifiSmeWmmQosInfoMask primitive->connectionConfig.wmmQosInfo */
- bufferSize += 1; /* u8 primitive->connectionConfig.adhocJoinOnly */
- bufferSize += 1; /* u8 primitive->connectionConfig.adhocChannel */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeConnectionConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeConnectionConfigGetCfm *primitive = (CsrWifiSmeConnectionConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.ssid.length);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.bssid.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.bssType);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.ifIndex);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.privacyMode);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.authModeMask);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.encryptionModeMask);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.mlmeAssociateReqInformationElementsLength);
- if (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.mlmeAssociateReqInformationElements, ((u16) (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength)));
- }
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.wmmQosInfo);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.adhocJoinOnly);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.adhocChannel);
- return(ptr);
-}
-
-
-void* CsrWifiSmeConnectionConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeConnectionConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeConnectionConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrMemCpyDes(primitive->connectionConfig.ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->connectionConfig.ssid.length, buffer, &offset);
- CsrMemCpyDes(primitive->connectionConfig.bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->connectionConfig.bssType, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionConfig.ifIndex, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionConfig.privacyMode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionConfig.authModeMask, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionConfig.encryptionModeMask, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionConfig.mlmeAssociateReqInformationElementsLength, buffer, &offset);
- if (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength)
- {
- primitive->connectionConfig.mlmeAssociateReqInformationElements = kmalloc(primitive->connectionConfig.mlmeAssociateReqInformationElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionConfig.mlmeAssociateReqInformationElements, buffer, &offset, ((u16) (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength)));
- }
- else
- {
- primitive->connectionConfig.mlmeAssociateReqInformationElements = NULL;
- }
- CsrUint8Des((u8 *) &primitive->connectionConfig.wmmQosInfo, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionConfig.adhocJoinOnly, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionConfig.adhocChannel, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiSmeConnectionConfigGetCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeConnectionConfigGetCfm *primitive = (CsrWifiSmeConnectionConfigGetCfm *) voidPrimitivePointer;
- kfree(primitive->connectionConfig.mlmeAssociateReqInformationElements);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeConnectionInfoGetCfmSizeof(void *msg)
-{
- CsrWifiSmeConnectionInfoGetCfm *primitive = (CsrWifiSmeConnectionInfoGetCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 96) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 32; /* u8 primitive->connectionInfo.ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->connectionInfo.ssid.length */
- bufferSize += 6; /* u8 primitive->connectionInfo.bssid.a[6] */
- bufferSize += 1; /* CsrWifiSme80211NetworkType primitive->connectionInfo.networkType80211 */
- bufferSize += 1; /* u8 primitive->connectionInfo.channelNumber */
- bufferSize += 2; /* u16 primitive->connectionInfo.channelFrequency */
- bufferSize += 2; /* CsrWifiSmeAuthMode primitive->connectionInfo.authMode */
- bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.pairwiseCipher */
- bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.groupCipher */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionInfo.ifIndex */
- bufferSize += 2; /* u16 primitive->connectionInfo.atimWindowTu */
- bufferSize += 2; /* u16 primitive->connectionInfo.beaconPeriodTu */
- bufferSize += 1; /* u8 primitive->connectionInfo.reassociation */
- bufferSize += 2; /* u16 primitive->connectionInfo.beaconFrameLength */
- bufferSize += primitive->connectionInfo.beaconFrameLength; /* u8 primitive->connectionInfo.beaconFrame */
- bufferSize += 2; /* u16 primitive->connectionInfo.associationReqFrameLength */
- bufferSize += primitive->connectionInfo.associationReqFrameLength; /* u8 primitive->connectionInfo.associationReqFrame */
- bufferSize += 2; /* u16 primitive->connectionInfo.associationRspFrameLength */
- bufferSize += primitive->connectionInfo.associationRspFrameLength; /* u8 primitive->connectionInfo.associationRspFrame */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocScanInfoElementsLength */
- bufferSize += primitive->connectionInfo.assocScanInfoElementsLength; /* u8 primitive->connectionInfo.assocScanInfoElements */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocReqCapabilities */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocReqListenIntervalTu */
- bufferSize += 6; /* u8 primitive->connectionInfo.assocReqApAddress.a[6] */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocReqInfoElementsLength */
- bufferSize += primitive->connectionInfo.assocReqInfoElementsLength; /* u8 primitive->connectionInfo.assocReqInfoElements */
- bufferSize += 2; /* CsrWifiSmeIEEE80211Result primitive->connectionInfo.assocRspResult */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocRspCapabilityInfo */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocRspAssociationId */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocRspInfoElementsLength */
- bufferSize += primitive->connectionInfo.assocRspInfoElementsLength; /* u8 primitive->connectionInfo.assocRspInfoElements */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeConnectionInfoGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeConnectionInfoGetCfm *primitive = (CsrWifiSmeConnectionInfoGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ssid.length);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.bssid.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.networkType80211);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.channelNumber);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.channelFrequency);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.authMode);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.pairwiseCipher);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.groupCipher);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ifIndex);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.atimWindowTu);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconPeriodTu);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.reassociation);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconFrameLength);
- if (primitive->connectionInfo.beaconFrameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.beaconFrame, ((u16) (primitive->connectionInfo.beaconFrameLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationReqFrameLength);
- if (primitive->connectionInfo.associationReqFrameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationReqFrame, ((u16) (primitive->connectionInfo.associationReqFrameLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationRspFrameLength);
- if (primitive->connectionInfo.associationRspFrameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationRspFrame, ((u16) (primitive->connectionInfo.associationRspFrameLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocScanInfoElementsLength);
- if (primitive->connectionInfo.assocScanInfoElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocScanInfoElements, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqCapabilities);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqListenIntervalTu);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqApAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqInfoElementsLength);
- if (primitive->connectionInfo.assocReqInfoElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqInfoElements, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspResult);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspCapabilityInfo);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspAssociationId);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspInfoElementsLength);
- if (primitive->connectionInfo.assocRspInfoElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocRspInfoElements, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeConnectionInfoGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeConnectionInfoGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeConnectionInfoGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrMemCpyDes(primitive->connectionInfo.ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->connectionInfo.ssid.length, buffer, &offset);
- CsrMemCpyDes(primitive->connectionInfo.bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->connectionInfo.networkType80211, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionInfo.channelNumber, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.channelFrequency, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.authMode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.pairwiseCipher, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.groupCipher, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionInfo.ifIndex, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.atimWindowTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.beaconPeriodTu, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionInfo.reassociation, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.beaconFrameLength, buffer, &offset);
- if (primitive->connectionInfo.beaconFrameLength)
- {
- primitive->connectionInfo.beaconFrame = kmalloc(primitive->connectionInfo.beaconFrameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.beaconFrame, buffer, &offset, ((u16) (primitive->connectionInfo.beaconFrameLength)));
- }
- else
- {
- primitive->connectionInfo.beaconFrame = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.associationReqFrameLength, buffer, &offset);
- if (primitive->connectionInfo.associationReqFrameLength)
- {
- primitive->connectionInfo.associationReqFrame = kmalloc(primitive->connectionInfo.associationReqFrameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.associationReqFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationReqFrameLength)));
- }
- else
- {
- primitive->connectionInfo.associationReqFrame = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.associationRspFrameLength, buffer, &offset);
- if (primitive->connectionInfo.associationRspFrameLength)
- {
- primitive->connectionInfo.associationRspFrame = kmalloc(primitive->connectionInfo.associationRspFrameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.associationRspFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationRspFrameLength)));
- }
- else
- {
- primitive->connectionInfo.associationRspFrame = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocScanInfoElementsLength, buffer, &offset);
- if (primitive->connectionInfo.assocScanInfoElementsLength)
- {
- primitive->connectionInfo.assocScanInfoElements = kmalloc(primitive->connectionInfo.assocScanInfoElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.assocScanInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength)));
- }
- else
- {
- primitive->connectionInfo.assocScanInfoElements = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqCapabilities, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqListenIntervalTu, buffer, &offset);
- CsrMemCpyDes(primitive->connectionInfo.assocReqApAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqInfoElementsLength, buffer, &offset);
- if (primitive->connectionInfo.assocReqInfoElementsLength)
- {
- primitive->connectionInfo.assocReqInfoElements = kmalloc(primitive->connectionInfo.assocReqInfoElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.assocReqInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength)));
- }
- else
- {
- primitive->connectionInfo.assocReqInfoElements = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspResult, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspCapabilityInfo, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspAssociationId, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspInfoElementsLength, buffer, &offset);
- if (primitive->connectionInfo.assocRspInfoElementsLength)
- {
- primitive->connectionInfo.assocRspInfoElements = kmalloc(primitive->connectionInfo.assocRspInfoElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.assocRspInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength)));
- }
- else
- {
- primitive->connectionInfo.assocRspInfoElements = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeConnectionInfoGetCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeConnectionInfoGetCfm *primitive = (CsrWifiSmeConnectionInfoGetCfm *) voidPrimitivePointer;
- kfree(primitive->connectionInfo.beaconFrame);
- kfree(primitive->connectionInfo.associationReqFrame);
- kfree(primitive->connectionInfo.associationRspFrame);
- kfree(primitive->connectionInfo.assocScanInfoElements);
- kfree(primitive->connectionInfo.assocReqInfoElements);
- kfree(primitive->connectionInfo.assocRspInfoElements);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeConnectionQualityIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* s16 primitive->linkQuality.unifiRssi */
- bufferSize += 2; /* s16 primitive->linkQuality.unifiSnr */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeConnectionQualityIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeConnectionQualityInd *primitive = (CsrWifiSmeConnectionQualityInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->linkQuality.unifiRssi);
- CsrUint16Ser(ptr, len, (u16) primitive->linkQuality.unifiSnr);
- return(ptr);
-}
-
-
-void* CsrWifiSmeConnectionQualityIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeConnectionQualityInd *primitive = kmalloc(sizeof(CsrWifiSmeConnectionQualityInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->linkQuality.unifiRssi, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->linkQuality.unifiSnr, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeConnectionStatsGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 101) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* u8 primitive->connectionStats.unifiTxDataRate */
- bufferSize += 1; /* u8 primitive->connectionStats.unifiRxDataRate */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11RetryCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11MultipleRetryCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11AckFailureCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11FrameDuplicateCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11FcsErrorCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11RtsSuccessCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11RtsFailureCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11FailedCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11TransmittedFragmentCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11TransmittedFrameCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11WepExcludedCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11WepIcvErrorCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11WepUndecryptableCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11MulticastReceivedFrameCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11MulticastTransmittedFrameCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11ReceivedFragmentCount */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11Rsna4WayHandshakeFailures */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaTkipCounterMeasuresInvoked */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsTkipLocalMicFailures */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsTkipReplays */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsTkipIcvErrors */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsCcmpReplays */
- bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsCcmpDecryptErrors */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeConnectionStatsGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeConnectionStatsGetCfm *primitive = (CsrWifiSmeConnectionStatsGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionStats.unifiTxDataRate);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionStats.unifiRxDataRate);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RetryCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11MultipleRetryCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11AckFailureCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11FrameDuplicateCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11FcsErrorCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RtsSuccessCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RtsFailureCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11FailedCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11TransmittedFragmentCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11TransmittedFrameCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11WepExcludedCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11WepIcvErrorCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11WepUndecryptableCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11MulticastReceivedFrameCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11MulticastTransmittedFrameCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11ReceivedFragmentCount);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11Rsna4WayHandshakeFailures);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaTkipCounterMeasuresInvoked);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsTkipLocalMicFailures);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsTkipReplays);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsTkipIcvErrors);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsCcmpReplays);
- CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsCcmpDecryptErrors);
- return(ptr);
-}
-
-
-void* CsrWifiSmeConnectionStatsGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeConnectionStatsGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeConnectionStatsGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionStats.unifiTxDataRate, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionStats.unifiRxDataRate, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11RetryCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11MultipleRetryCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11AckFailureCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11FrameDuplicateCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11FcsErrorCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11RtsSuccessCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11RtsFailureCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11FailedCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11TransmittedFragmentCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11TransmittedFrameCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11WepExcludedCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11WepIcvErrorCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11WepUndecryptableCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11MulticastReceivedFrameCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11MulticastTransmittedFrameCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11ReceivedFragmentCount, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11Rsna4WayHandshakeFailures, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaTkipCounterMeasuresInvoked, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsTkipLocalMicFailures, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsTkipReplays, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsTkipIcvErrors, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsCcmpReplays, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsCcmpDecryptErrors, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeDisconnectCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeDisconnectCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeDisconnectCfm *primitive = (CsrWifiSmeDisconnectCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiSmeDisconnectCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeDisconnectCfm *primitive = kmalloc(sizeof(CsrWifiSmeDisconnectCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeHostConfigGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* CsrWifiSmeHostPowerMode primitive->hostConfig.powerMode */
- bufferSize += 2; /* u16 primitive->hostConfig.applicationDataPeriodMs */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeHostConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeHostConfigGetCfm *primitive = (CsrWifiSmeHostConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->hostConfig.powerMode);
- CsrUint16Ser(ptr, len, (u16) primitive->hostConfig.applicationDataPeriodMs);
- return(ptr);
-}
-
-
-void* CsrWifiSmeHostConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeHostConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeHostConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->hostConfig.powerMode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->hostConfig.applicationDataPeriodMs, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeHostConfigSetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeHostConfigSetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeHostConfigSetCfm *primitive = (CsrWifiSmeHostConfigSetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiSmeHostConfigSetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeHostConfigSetCfm *primitive = kmalloc(sizeof(CsrWifiSmeHostConfigSetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeIbssStationIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 6; /* u8 primitive->address.a[6] */
- bufferSize += 1; /* u8 primitive->isconnected */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeIbssStationIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeIbssStationInd *primitive = (CsrWifiSmeIbssStationInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->isconnected);
- return(ptr);
-}
-
-
-void* CsrWifiSmeIbssStationIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeIbssStationInd *primitive = kmalloc(sizeof(CsrWifiSmeIbssStationInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->isconnected, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeKeyCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* CsrWifiSmeListAction primitive->action */
- bufferSize += 1; /* CsrWifiSmeKeyType primitive->keyType */
- bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeKeyCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeKeyCfm *primitive = (CsrWifiSmeKeyCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->keyType);
- CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiSmeKeyCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeKeyCfm *primitive = kmalloc(sizeof(CsrWifiSmeKeyCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->keyType, buffer, &offset);
- CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeLinkQualityGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 2; /* s16 primitive->linkQuality.unifiRssi */
- bufferSize += 2; /* s16 primitive->linkQuality.unifiSnr */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeLinkQualityGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeLinkQualityGetCfm *primitive = (CsrWifiSmeLinkQualityGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint16Ser(ptr, len, (u16) primitive->linkQuality.unifiRssi);
- CsrUint16Ser(ptr, len, (u16) primitive->linkQuality.unifiSnr);
- return(ptr);
-}
-
-
-void* CsrWifiSmeLinkQualityGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeLinkQualityGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeLinkQualityGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->linkQuality.unifiRssi, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->linkQuality.unifiSnr, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeMediaStatusIndSizeof(void *msg)
-{
- CsrWifiSmeMediaStatusInd *primitive = (CsrWifiSmeMediaStatusInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 99) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeMediaStatus primitive->mediaStatus */
- bufferSize += 32; /* u8 primitive->connectionInfo.ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->connectionInfo.ssid.length */
- bufferSize += 6; /* u8 primitive->connectionInfo.bssid.a[6] */
- bufferSize += 1; /* CsrWifiSme80211NetworkType primitive->connectionInfo.networkType80211 */
- bufferSize += 1; /* u8 primitive->connectionInfo.channelNumber */
- bufferSize += 2; /* u16 primitive->connectionInfo.channelFrequency */
- bufferSize += 2; /* CsrWifiSmeAuthMode primitive->connectionInfo.authMode */
- bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.pairwiseCipher */
- bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.groupCipher */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionInfo.ifIndex */
- bufferSize += 2; /* u16 primitive->connectionInfo.atimWindowTu */
- bufferSize += 2; /* u16 primitive->connectionInfo.beaconPeriodTu */
- bufferSize += 1; /* u8 primitive->connectionInfo.reassociation */
- bufferSize += 2; /* u16 primitive->connectionInfo.beaconFrameLength */
- bufferSize += primitive->connectionInfo.beaconFrameLength; /* u8 primitive->connectionInfo.beaconFrame */
- bufferSize += 2; /* u16 primitive->connectionInfo.associationReqFrameLength */
- bufferSize += primitive->connectionInfo.associationReqFrameLength; /* u8 primitive->connectionInfo.associationReqFrame */
- bufferSize += 2; /* u16 primitive->connectionInfo.associationRspFrameLength */
- bufferSize += primitive->connectionInfo.associationRspFrameLength; /* u8 primitive->connectionInfo.associationRspFrame */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocScanInfoElementsLength */
- bufferSize += primitive->connectionInfo.assocScanInfoElementsLength; /* u8 primitive->connectionInfo.assocScanInfoElements */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocReqCapabilities */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocReqListenIntervalTu */
- bufferSize += 6; /* u8 primitive->connectionInfo.assocReqApAddress.a[6] */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocReqInfoElementsLength */
- bufferSize += primitive->connectionInfo.assocReqInfoElementsLength; /* u8 primitive->connectionInfo.assocReqInfoElements */
- bufferSize += 2; /* CsrWifiSmeIEEE80211Result primitive->connectionInfo.assocRspResult */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocRspCapabilityInfo */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocRspAssociationId */
- bufferSize += 2; /* u16 primitive->connectionInfo.assocRspInfoElementsLength */
- bufferSize += primitive->connectionInfo.assocRspInfoElementsLength; /* u8 primitive->connectionInfo.assocRspInfoElements */
- bufferSize += 2; /* CsrWifiSmeIEEE80211Reason primitive->disassocReason */
- bufferSize += 2; /* CsrWifiSmeIEEE80211Reason primitive->deauthReason */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMediaStatusIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMediaStatusInd *primitive = (CsrWifiSmeMediaStatusInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->mediaStatus);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ssid.length);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.bssid.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.networkType80211);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.channelNumber);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.channelFrequency);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.authMode);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.pairwiseCipher);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.groupCipher);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ifIndex);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.atimWindowTu);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconPeriodTu);
- CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.reassociation);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconFrameLength);
- if (primitive->connectionInfo.beaconFrameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.beaconFrame, ((u16) (primitive->connectionInfo.beaconFrameLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationReqFrameLength);
- if (primitive->connectionInfo.associationReqFrameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationReqFrame, ((u16) (primitive->connectionInfo.associationReqFrameLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationRspFrameLength);
- if (primitive->connectionInfo.associationRspFrameLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationRspFrame, ((u16) (primitive->connectionInfo.associationRspFrameLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocScanInfoElementsLength);
- if (primitive->connectionInfo.assocScanInfoElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocScanInfoElements, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqCapabilities);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqListenIntervalTu);
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqApAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqInfoElementsLength);
- if (primitive->connectionInfo.assocReqInfoElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqInfoElements, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspResult);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspCapabilityInfo);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspAssociationId);
- CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspInfoElementsLength);
- if (primitive->connectionInfo.assocRspInfoElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocRspInfoElements, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength)));
- }
- CsrUint16Ser(ptr, len, (u16) primitive->disassocReason);
- CsrUint16Ser(ptr, len, (u16) primitive->deauthReason);
- return(ptr);
-}
-
-
-void* CsrWifiSmeMediaStatusIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMediaStatusInd *primitive = kmalloc(sizeof(CsrWifiSmeMediaStatusInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->mediaStatus, buffer, &offset);
- CsrMemCpyDes(primitive->connectionInfo.ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->connectionInfo.ssid.length, buffer, &offset);
- CsrMemCpyDes(primitive->connectionInfo.bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->connectionInfo.networkType80211, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionInfo.channelNumber, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.channelFrequency, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.authMode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.pairwiseCipher, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.groupCipher, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionInfo.ifIndex, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.atimWindowTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.beaconPeriodTu, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->connectionInfo.reassociation, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.beaconFrameLength, buffer, &offset);
- if (primitive->connectionInfo.beaconFrameLength)
- {
- primitive->connectionInfo.beaconFrame = kmalloc(primitive->connectionInfo.beaconFrameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.beaconFrame, buffer, &offset, ((u16) (primitive->connectionInfo.beaconFrameLength)));
- }
- else
- {
- primitive->connectionInfo.beaconFrame = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.associationReqFrameLength, buffer, &offset);
- if (primitive->connectionInfo.associationReqFrameLength)
- {
- primitive->connectionInfo.associationReqFrame = kmalloc(primitive->connectionInfo.associationReqFrameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.associationReqFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationReqFrameLength)));
- }
- else
- {
- primitive->connectionInfo.associationReqFrame = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.associationRspFrameLength, buffer, &offset);
- if (primitive->connectionInfo.associationRspFrameLength)
- {
- primitive->connectionInfo.associationRspFrame = kmalloc(primitive->connectionInfo.associationRspFrameLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.associationRspFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationRspFrameLength)));
- }
- else
- {
- primitive->connectionInfo.associationRspFrame = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocScanInfoElementsLength, buffer, &offset);
- if (primitive->connectionInfo.assocScanInfoElementsLength)
- {
- primitive->connectionInfo.assocScanInfoElements = kmalloc(primitive->connectionInfo.assocScanInfoElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.assocScanInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength)));
- }
- else
- {
- primitive->connectionInfo.assocScanInfoElements = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqCapabilities, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqListenIntervalTu, buffer, &offset);
- CsrMemCpyDes(primitive->connectionInfo.assocReqApAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqInfoElementsLength, buffer, &offset);
- if (primitive->connectionInfo.assocReqInfoElementsLength)
- {
- primitive->connectionInfo.assocReqInfoElements = kmalloc(primitive->connectionInfo.assocReqInfoElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.assocReqInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength)));
- }
- else
- {
- primitive->connectionInfo.assocReqInfoElements = NULL;
- }
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspResult, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspCapabilityInfo, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspAssociationId, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspInfoElementsLength, buffer, &offset);
- if (primitive->connectionInfo.assocRspInfoElementsLength)
- {
- primitive->connectionInfo.assocRspInfoElements = kmalloc(primitive->connectionInfo.assocRspInfoElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->connectionInfo.assocRspInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength)));
- }
- else
- {
- primitive->connectionInfo.assocRspInfoElements = NULL;
- }
- CsrUint16Des((u16 *) &primitive->disassocReason, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->deauthReason, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiSmeMediaStatusIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeMediaStatusInd *primitive = (CsrWifiSmeMediaStatusInd *) voidPrimitivePointer;
- kfree(primitive->connectionInfo.beaconFrame);
- kfree(primitive->connectionInfo.associationReqFrame);
- kfree(primitive->connectionInfo.associationRspFrame);
- kfree(primitive->connectionInfo.assocScanInfoElements);
- kfree(primitive->connectionInfo.assocReqInfoElements);
- kfree(primitive->connectionInfo.assocRspInfoElements);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeMibConfigGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* u8 primitive->mibConfig.unifiFixMaxTxDataRate */
- bufferSize += 1; /* u8 primitive->mibConfig.unifiFixTxDataRate */
- bufferSize += 2; /* u16 primitive->mibConfig.dot11RtsThreshold */
- bufferSize += 2; /* u16 primitive->mibConfig.dot11FragmentationThreshold */
- bufferSize += 2; /* u16 primitive->mibConfig.dot11CurrentTxPowerLevel */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMibConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMibConfigGetCfm *primitive = (CsrWifiSmeMibConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->mibConfig.unifiFixMaxTxDataRate);
- CsrUint8Ser(ptr, len, (u8) primitive->mibConfig.unifiFixTxDataRate);
- CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11RtsThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11FragmentationThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11CurrentTxPowerLevel);
- return(ptr);
-}
-
-
-void* CsrWifiSmeMibConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMibConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeMibConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->mibConfig.unifiFixMaxTxDataRate, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->mibConfig.unifiFixTxDataRate, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibConfig.dot11RtsThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibConfig.dot11FragmentationThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibConfig.dot11CurrentTxPowerLevel, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeMibGetCfmSizeof(void *msg)
-{
- CsrWifiSmeMibGetCfm *primitive = (CsrWifiSmeMibGetCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 2; /* u16 primitive->mibAttributeLength */
- bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMibGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMibGetCfm *primitive = (CsrWifiSmeMibGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength);
- if (primitive->mibAttributeLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeMibGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMibGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeMibGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset);
- if (primitive->mibAttributeLength)
- {
- primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength)));
- }
- else
- {
- primitive->mibAttribute = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeMibGetCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeMibGetCfm *primitive = (CsrWifiSmeMibGetCfm *) voidPrimitivePointer;
- kfree(primitive->mibAttribute);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeMibGetNextCfmSizeof(void *msg)
-{
- CsrWifiSmeMibGetNextCfm *primitive = (CsrWifiSmeMibGetNextCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 2; /* u16 primitive->mibAttributeLength */
- bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMibGetNextCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMibGetNextCfm *primitive = (CsrWifiSmeMibGetNextCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength);
- if (primitive->mibAttributeLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeMibGetNextCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMibGetNextCfm *primitive = kmalloc(sizeof(CsrWifiSmeMibGetNextCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset);
- if (primitive->mibAttributeLength)
- {
- primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength)));
- }
- else
- {
- primitive->mibAttribute = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeMibGetNextCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeMibGetNextCfm *primitive = (CsrWifiSmeMibGetNextCfm *) voidPrimitivePointer;
- kfree(primitive->mibAttribute);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeMicFailureIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* u8 primitive->secondFailure */
- bufferSize += 2; /* u16 primitive->count */
- bufferSize += 6; /* u8 primitive->address.a[6] */
- bufferSize += 1; /* CsrWifiSmeKeyType primitive->keyType */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMicFailureIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMicFailureInd *primitive = (CsrWifiSmeMicFailureInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->secondFailure);
- CsrUint16Ser(ptr, len, (u16) primitive->count);
- CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->keyType);
- return(ptr);
-}
-
-
-void* CsrWifiSmeMicFailureIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMicFailureInd *primitive = kmalloc(sizeof(CsrWifiSmeMicFailureInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->secondFailure, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->count, buffer, &offset);
- CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->keyType, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeMulticastAddressCfmSizeof(void *msg)
-{
- CsrWifiSmeMulticastAddressCfm *primitive = (CsrWifiSmeMulticastAddressCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* CsrWifiSmeListAction primitive->action */
- bufferSize += 1; /* u8 primitive->getAddressesCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getAddressesCount; i1++)
- {
- bufferSize += 6; /* u8 primitive->getAddresses[i1].a[6] */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeMulticastAddressCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeMulticastAddressCfm *primitive = (CsrWifiSmeMulticastAddressCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->getAddressesCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getAddressesCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->getAddresses[i1].a, ((u16) (6)));
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeMulticastAddressCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeMulticastAddressCfm *primitive = kmalloc(sizeof(CsrWifiSmeMulticastAddressCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->getAddressesCount, buffer, &offset);
- primitive->getAddresses = NULL;
- if (primitive->getAddressesCount)
- {
- primitive->getAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->getAddressesCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getAddressesCount; i1++)
- {
- CsrMemCpyDes(primitive->getAddresses[i1].a, buffer, &offset, ((u16) (6)));
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeMulticastAddressCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeMulticastAddressCfm *primitive = (CsrWifiSmeMulticastAddressCfm *) voidPrimitivePointer;
- kfree(primitive->getAddresses);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmePacketFilterSetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmePacketFilterSetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmePacketFilterSetCfm *primitive = (CsrWifiSmePacketFilterSetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiSmePacketFilterSetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmePacketFilterSetCfm *primitive = kmalloc(sizeof(CsrWifiSmePacketFilterSetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmePermanentMacAddressGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 6; /* u8 primitive->permanentMacAddress.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmePermanentMacAddressGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmePermanentMacAddressGetCfm *primitive = (CsrWifiSmePermanentMacAddressGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrMemCpySer(ptr, len, (const void *) primitive->permanentMacAddress.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiSmePermanentMacAddressGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmePermanentMacAddressGetCfm *primitive = kmalloc(sizeof(CsrWifiSmePermanentMacAddressGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrMemCpyDes(primitive->permanentMacAddress.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-size_t CsrWifiSmePmkidCandidateListIndSizeof(void *msg)
-{
- CsrWifiSmePmkidCandidateListInd *primitive = (CsrWifiSmePmkidCandidateListInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* u8 primitive->pmkidCandidatesCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->pmkidCandidatesCount; i1++)
- {
- bufferSize += 6; /* u8 primitive->pmkidCandidates[i1].bssid.a[6] */
- bufferSize += 1; /* u8 primitive->pmkidCandidates[i1].preAuthAllowed */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmePmkidCandidateListIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmePmkidCandidateListInd *primitive = (CsrWifiSmePmkidCandidateListInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->pmkidCandidatesCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->pmkidCandidatesCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->pmkidCandidates[i1].bssid.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->pmkidCandidates[i1].preAuthAllowed);
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmePmkidCandidateListIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmePmkidCandidateListInd *primitive = kmalloc(sizeof(CsrWifiSmePmkidCandidateListInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->pmkidCandidatesCount, buffer, &offset);
- primitive->pmkidCandidates = NULL;
- if (primitive->pmkidCandidatesCount)
- {
- primitive->pmkidCandidates = kmalloc(sizeof(CsrWifiSmePmkidCandidate) * primitive->pmkidCandidatesCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->pmkidCandidatesCount; i1++)
- {
- CsrMemCpyDes(primitive->pmkidCandidates[i1].bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->pmkidCandidates[i1].preAuthAllowed, buffer, &offset);
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmePmkidCandidateListIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmePmkidCandidateListInd *primitive = (CsrWifiSmePmkidCandidateListInd *) voidPrimitivePointer;
- kfree(primitive->pmkidCandidates);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmePmkidCfmSizeof(void *msg)
-{
- CsrWifiSmePmkidCfm *primitive = (CsrWifiSmePmkidCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 31) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* CsrWifiSmeListAction primitive->action */
- bufferSize += 1; /* u8 primitive->getPmkidsCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getPmkidsCount; i1++)
- {
- bufferSize += 6; /* u8 primitive->getPmkids[i1].bssid.a[6] */
- bufferSize += 16; /* u8 primitive->getPmkids[i1].pmkid[16] */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmePmkidCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmePmkidCfm *primitive = (CsrWifiSmePmkidCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->action);
- CsrUint8Ser(ptr, len, (u8) primitive->getPmkidsCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getPmkidsCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->getPmkids[i1].bssid.a, ((u16) (6)));
- CsrMemCpySer(ptr, len, (const void *) primitive->getPmkids[i1].pmkid, ((u16) (16)));
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmePmkidCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmePmkidCfm *primitive = kmalloc(sizeof(CsrWifiSmePmkidCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->action, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->getPmkidsCount, buffer, &offset);
- primitive->getPmkids = NULL;
- if (primitive->getPmkidsCount)
- {
- primitive->getPmkids = kmalloc(sizeof(CsrWifiSmePmkid) * primitive->getPmkidsCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->getPmkidsCount; i1++)
- {
- CsrMemCpyDes(primitive->getPmkids[i1].bssid.a, buffer, &offset, ((u16) (6)));
- CsrMemCpyDes(primitive->getPmkids[i1].pmkid, buffer, &offset, ((u16) (16)));
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmePmkidCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmePmkidCfm *primitive = (CsrWifiSmePmkidCfm *) voidPrimitivePointer;
- kfree(primitive->getPmkids);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmePowerConfigGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* CsrWifiSmePowerSaveLevel primitive->powerConfig.powerSaveLevel */
- bufferSize += 2; /* u16 primitive->powerConfig.listenIntervalTu */
- bufferSize += 1; /* u8 primitive->powerConfig.rxDtims */
- bufferSize += 1; /* CsrWifiSmeD3AutoScanMode primitive->powerConfig.d3AutoScanMode */
- bufferSize += 1; /* u8 primitive->powerConfig.clientTrafficWindow */
- bufferSize += 1; /* u8 primitive->powerConfig.opportunisticPowerSave */
- bufferSize += 1; /* u8 primitive->powerConfig.noticeOfAbsence */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmePowerConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmePowerConfigGetCfm *primitive = (CsrWifiSmePowerConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.powerSaveLevel);
- CsrUint16Ser(ptr, len, (u16) primitive->powerConfig.listenIntervalTu);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.rxDtims);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.d3AutoScanMode);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.clientTrafficWindow);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.opportunisticPowerSave);
- CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.noticeOfAbsence);
- return(ptr);
-}
-
-
-void* CsrWifiSmePowerConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmePowerConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmePowerConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.powerSaveLevel, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->powerConfig.listenIntervalTu, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.rxDtims, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.d3AutoScanMode, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.clientTrafficWindow, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.opportunisticPowerSave, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->powerConfig.noticeOfAbsence, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeRegulatoryDomainInfoGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* u8 primitive->regDomInfo.dot11MultiDomainCapabilityImplemented */
- bufferSize += 1; /* u8 primitive->regDomInfo.dot11MultiDomainCapabilityEnabled */
- bufferSize += 1; /* CsrWifiSmeRegulatoryDomain primitive->regDomInfo.currentRegulatoryDomain */
- bufferSize += 2; /* u8 primitive->regDomInfo.currentCountryCode[2] */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeRegulatoryDomainInfoGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeRegulatoryDomainInfoGetCfm *primitive = (CsrWifiSmeRegulatoryDomainInfoGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->regDomInfo.dot11MultiDomainCapabilityImplemented);
- CsrUint8Ser(ptr, len, (u8) primitive->regDomInfo.dot11MultiDomainCapabilityEnabled);
- CsrUint8Ser(ptr, len, (u8) primitive->regDomInfo.currentRegulatoryDomain);
- CsrMemCpySer(ptr, len, (const void *) primitive->regDomInfo.currentCountryCode, ((u16) (2)));
- return(ptr);
-}
-
-
-void* CsrWifiSmeRegulatoryDomainInfoGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeRegulatoryDomainInfoGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeRegulatoryDomainInfoGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->regDomInfo.dot11MultiDomainCapabilityImplemented, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->regDomInfo.dot11MultiDomainCapabilityEnabled, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->regDomInfo.currentRegulatoryDomain, buffer, &offset);
- CsrMemCpyDes(primitive->regDomInfo.currentCountryCode, buffer, &offset, ((u16) (2)));
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeRoamCompleteIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeRoamCompleteIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeRoamCompleteInd *primitive = (CsrWifiSmeRoamCompleteInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiSmeRoamCompleteIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeRoamCompleteInd *primitive = kmalloc(sizeof(CsrWifiSmeRoamCompleteInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeRoamStartIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 1; /* CsrWifiSmeRoamReason primitive->roamReason */
- bufferSize += 2; /* CsrWifiSmeIEEE80211Reason primitive->reason80211 */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeRoamStartIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeRoamStartInd *primitive = (CsrWifiSmeRoamStartInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint8Ser(ptr, len, (u8) primitive->roamReason);
- CsrUint16Ser(ptr, len, (u16) primitive->reason80211);
- return(ptr);
-}
-
-
-void* CsrWifiSmeRoamStartIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeRoamStartInd *primitive = kmalloc(sizeof(CsrWifiSmeRoamStartInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->roamReason, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->reason80211, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeRoamingConfigGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 72) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].rssiHighThreshold */
- bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].rssiLowThreshold */
- bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].snrHighThreshold */
- bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].snrLowThreshold */
- }
- }
- bufferSize += 1; /* u8 primitive->roamingConfig.disableSmoothRoaming */
- bufferSize += 1; /* u8 primitive->roamingConfig.disableRoamScans */
- bufferSize += 1; /* u8 primitive->roamingConfig.reconnectLimit */
- bufferSize += 2; /* u16 primitive->roamingConfig.reconnectLimitIntervalMs */
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].intervalSeconds */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].validitySeconds */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeRoamingConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeRoamingConfigGetCfm *primitive = (CsrWifiSmeRoamingConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].rssiHighThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].rssiLowThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].snrHighThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].snrLowThreshold);
- }
- }
- CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.disableSmoothRoaming);
- CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.disableRoamScans);
- CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.reconnectLimit);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.reconnectLimitIntervalMs);
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].intervalSeconds);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].validitySeconds);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu);
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeRoamingConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeRoamingConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeRoamingConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].rssiHighThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].rssiLowThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].snrHighThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].snrLowThreshold, buffer, &offset);
- }
- }
- CsrUint8Des((u8 *) &primitive->roamingConfig.disableSmoothRoaming, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->roamingConfig.disableRoamScans, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->roamingConfig.reconnectLimit, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.reconnectLimitIntervalMs, buffer, &offset);
- {
- u16 i2;
- for (i2 = 0; i2 < 3; i2++)
- {
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].intervalSeconds, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].validitySeconds, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu, buffer, &offset);
- }
- }
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeRoamingConfigSetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeRoamingConfigSetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeRoamingConfigSetCfm *primitive = (CsrWifiSmeRoamingConfigSetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiSmeRoamingConfigSetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeRoamingConfigSetCfm *primitive = kmalloc(sizeof(CsrWifiSmeRoamingConfigSetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeScanConfigGetCfmSizeof(void *msg)
-{
- CsrWifiSmeScanConfigGetCfm *primitive = (CsrWifiSmeScanConfigGetCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 65) */
- bufferSize += 2; /* CsrResult primitive->status */
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].intervalSeconds */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].validitySeconds */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu */
- bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu */
- }
- }
- bufferSize += 1; /* u8 primitive->scanConfig.disableAutonomousScans */
- bufferSize += 2; /* u16 primitive->scanConfig.maxResults */
- bufferSize += 1; /* s8 primitive->scanConfig.highRssiThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.lowRssiThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.deltaRssiThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.highSnrThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.lowSnrThreshold */
- bufferSize += 1; /* s8 primitive->scanConfig.deltaSnrThreshold */
- bufferSize += 2; /* u16 primitive->scanConfig.passiveChannelListCount */
- bufferSize += primitive->scanConfig.passiveChannelListCount; /* u8 primitive->scanConfig.passiveChannelList */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeScanConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeScanConfigGetCfm *primitive = (CsrWifiSmeScanConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].intervalSeconds);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].validitySeconds);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu);
- }
- }
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.disableAutonomousScans);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.maxResults);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.highRssiThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.lowRssiThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.deltaRssiThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.highSnrThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.lowSnrThreshold);
- CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.deltaSnrThreshold);
- CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.passiveChannelListCount);
- if (primitive->scanConfig.passiveChannelListCount)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->scanConfig.passiveChannelList, ((u16) (primitive->scanConfig.passiveChannelListCount)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeScanConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeScanConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeScanConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- {
- u16 i2;
- for (i2 = 0; i2 < 4; i2++)
- {
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].intervalSeconds, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].validitySeconds, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu, buffer, &offset);
- }
- }
- CsrUint8Des((u8 *) &primitive->scanConfig.disableAutonomousScans, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.maxResults, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.highRssiThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.lowRssiThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.deltaRssiThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.highSnrThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.lowSnrThreshold, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanConfig.deltaSnrThreshold, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanConfig.passiveChannelListCount, buffer, &offset);
- if (primitive->scanConfig.passiveChannelListCount)
- {
- primitive->scanConfig.passiveChannelList = kmalloc(primitive->scanConfig.passiveChannelListCount, GFP_KERNEL);
- CsrMemCpyDes(primitive->scanConfig.passiveChannelList, buffer, &offset, ((u16) (primitive->scanConfig.passiveChannelListCount)));
- }
- else
- {
- primitive->scanConfig.passiveChannelList = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeScanConfigGetCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeScanConfigGetCfm *primitive = (CsrWifiSmeScanConfigGetCfm *) voidPrimitivePointer;
- kfree(primitive->scanConfig.passiveChannelList);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeScanResultIndSizeof(void *msg)
-{
- CsrWifiSmeScanResultInd *primitive = (CsrWifiSmeScanResultInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 149) */
- bufferSize += 32; /* u8 primitive->result.ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->result.ssid.length */
- bufferSize += 6; /* u8 primitive->result.bssid.a[6] */
- bufferSize += 2; /* s16 primitive->result.rssi */
- bufferSize += 2; /* s16 primitive->result.snr */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->result.ifIndex */
- bufferSize += 2; /* u16 primitive->result.beaconPeriodTu */
- bufferSize += 8; /* u8 primitive->result.timeStamp.data[8] */
- bufferSize += 8; /* u8 primitive->result.localTime.data[8] */
- bufferSize += 2; /* u16 primitive->result.channelFrequency */
- bufferSize += 2; /* u16 primitive->result.capabilityInformation */
- bufferSize += 1; /* u8 primitive->result.channelNumber */
- bufferSize += 1; /* CsrWifiSmeBasicUsability primitive->result.usability */
- bufferSize += 1; /* CsrWifiSmeBssType primitive->result.bssType */
- bufferSize += 2; /* u16 primitive->result.informationElementsLength */
- bufferSize += primitive->result.informationElementsLength; /* u8 primitive->result.informationElements */
- bufferSize += 1; /* CsrWifiSmeP2pRole primitive->result.p2pDeviceRole */
- switch (primitive->result.p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_CLI:
- bufferSize += 1; /* u8 primitive->result.deviceInfo.reservedCli.empty */
- break;
- case CSR_WIFI_SME_P2P_ROLE_GO:
- bufferSize += 1; /* CsrWifiSmeP2pGroupCapabilityMask primitive->result.deviceInfo.groupInfo.groupCapability */
- bufferSize += 6; /* u8 primitive->result.deviceInfo.groupInfo.p2pDeviceAddress.a[6] */
- bufferSize += 1; /* u8 primitive->result.deviceInfo.groupInfo.p2pClientInfoCount */
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->result.deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- bufferSize += 6; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a[6] */
- bufferSize += 6; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a[6] */
- bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods */
- bufferSize += 1; /* CsrWifiSmeP2pCapabilityMask primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap */
- bufferSize += 8; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails[8] */
- bufferSize += 1; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount */
- {
- u16 i6;
- for (i6 = 0; i6 < primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++)
- {
- bufferSize += 8; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails[8] */
- }
- }
- bufferSize += 32; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName[32] */
- bufferSize += 1; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength */
- }
- }
- break;
- case CSR_WIFI_SME_P2P_ROLE_NONE:
- bufferSize += 1; /* u8 primitive->result.deviceInfo.reservedNone.empty */
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- bufferSize += 6; /* u8 primitive->result.deviceInfo.standalonedevInfo.deviceAddress.a[6] */
- bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->result.deviceInfo.standalonedevInfo.configMethods */
- bufferSize += 1; /* CsrWifiSmeP2pCapabilityMask primitive->result.deviceInfo.standalonedevInfo.p2PDeviceCap */
- bufferSize += 8; /* u8 primitive->result.deviceInfo.standalonedevInfo.primDeviceType.deviceDetails[8] */
- bufferSize += 1; /* u8 primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount */
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++)
- {
- bufferSize += 8; /* u8 primitive->result.deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails[8] */
- }
- }
- bufferSize += 32; /* u8 primitive->result.deviceInfo.standalonedevInfo.deviceName[32] */
- bufferSize += 1; /* u8 primitive->result.deviceInfo.standalonedevInfo.deviceNameLength */
- break;
- default:
- break;
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeScanResultIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeScanResultInd *primitive = (CsrWifiSmeScanResultInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrMemCpySer(ptr, len, (const void *) primitive->result.ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->result.ssid.length);
- CsrMemCpySer(ptr, len, (const void *) primitive->result.bssid.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->result.rssi);
- CsrUint16Ser(ptr, len, (u16) primitive->result.snr);
- CsrUint8Ser(ptr, len, (u8) primitive->result.ifIndex);
- CsrUint16Ser(ptr, len, (u16) primitive->result.beaconPeriodTu);
- CsrMemCpySer(ptr, len, (const void *) primitive->result.timeStamp.data, ((u16) (8)));
- CsrMemCpySer(ptr, len, (const void *) primitive->result.localTime.data, ((u16) (8)));
- CsrUint16Ser(ptr, len, (u16) primitive->result.channelFrequency);
- CsrUint16Ser(ptr, len, (u16) primitive->result.capabilityInformation);
- CsrUint8Ser(ptr, len, (u8) primitive->result.channelNumber);
- CsrUint8Ser(ptr, len, (u8) primitive->result.usability);
- CsrUint8Ser(ptr, len, (u8) primitive->result.bssType);
- CsrUint16Ser(ptr, len, (u16) primitive->result.informationElementsLength);
- if (primitive->result.informationElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->result.informationElements, ((u16) (primitive->result.informationElementsLength)));
- }
- CsrUint8Ser(ptr, len, (u8) primitive->result.p2pDeviceRole);
- switch (primitive->result.p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_CLI:
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.reservedCli.empty);
- break;
- case CSR_WIFI_SME_P2P_ROLE_GO:
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.groupCapability);
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2pDeviceAddress.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.p2pClientInfoCount);
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->result.deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a, ((u16) (6)));
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods);
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap);
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails, ((u16) (8)));
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount);
- {
- u16 i6;
- for (i6 = 0; i6 < primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails, ((u16) (8)));
- }
- }
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength);
- }
- }
- break;
- case CSR_WIFI_SME_P2P_ROLE_NONE:
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.reservedNone.empty);
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.standalonedevInfo.deviceAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->result.deviceInfo.standalonedevInfo.configMethods);
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.standalonedevInfo.p2PDeviceCap);
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.standalonedevInfo.primDeviceType.deviceDetails, ((u16) (8)));
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount);
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails, ((u16) (8)));
- }
- }
- CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.standalonedevInfo.deviceName, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.standalonedevInfo.deviceNameLength);
- break;
- default:
- break;
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeScanResultIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeScanResultInd *primitive = kmalloc(sizeof(CsrWifiSmeScanResultInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrMemCpyDes(primitive->result.ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->result.ssid.length, buffer, &offset);
- CsrMemCpyDes(primitive->result.bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->result.rssi, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->result.snr, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->result.ifIndex, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->result.beaconPeriodTu, buffer, &offset);
- CsrMemCpyDes(primitive->result.timeStamp.data, buffer, &offset, ((u16) (8)));
- CsrMemCpyDes(primitive->result.localTime.data, buffer, &offset, ((u16) (8)));
- CsrUint16Des((u16 *) &primitive->result.channelFrequency, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->result.capabilityInformation, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->result.channelNumber, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->result.usability, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->result.bssType, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->result.informationElementsLength, buffer, &offset);
- if (primitive->result.informationElementsLength)
- {
- primitive->result.informationElements = kmalloc(primitive->result.informationElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->result.informationElements, buffer, &offset, ((u16) (primitive->result.informationElementsLength)));
- }
- else
- {
- primitive->result.informationElements = NULL;
- }
- CsrUint8Des((u8 *) &primitive->result.p2pDeviceRole, buffer, &offset);
- switch (primitive->result.p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_CLI:
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.reservedCli.empty, buffer, &offset);
- break;
- case CSR_WIFI_SME_P2P_ROLE_GO:
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.groupCapability, buffer, &offset);
- CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2pDeviceAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.p2pClientInfoCount, buffer, &offset);
- primitive->result.deviceInfo.groupInfo.p2PClientInfo = NULL;
- if (primitive->result.deviceInfo.groupInfo.p2pClientInfoCount)
- {
- primitive->result.deviceInfo.groupInfo.p2PClientInfo = kmalloc(sizeof(CsrWifiSmeP2pClientInfoType) * primitive->result.deviceInfo.groupInfo.p2pClientInfoCount, GFP_KERNEL);
- }
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->result.deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a, buffer, &offset, ((u16) (6)));
- CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap, buffer, &offset);
- CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8)));
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount, buffer, &offset);
- primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = NULL;
- if (primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount)
- {
- primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount, GFP_KERNEL);
- }
- {
- u16 i6;
- for (i6 = 0; i6 < primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++)
- {
- CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails, buffer, &offset, ((u16) (8)));
- }
- }
- CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength, buffer, &offset);
- }
- }
- break;
- case CSR_WIFI_SME_P2P_ROLE_NONE:
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.reservedNone.empty, buffer, &offset);
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- CsrMemCpyDes(primitive->result.deviceInfo.standalonedevInfo.deviceAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->result.deviceInfo.standalonedevInfo.configMethods, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.standalonedevInfo.p2PDeviceCap, buffer, &offset);
- CsrMemCpyDes(primitive->result.deviceInfo.standalonedevInfo.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8)));
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount, buffer, &offset);
- primitive->result.deviceInfo.standalonedevInfo.secDeviceType = NULL;
- if (primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount)
- {
- primitive->result.deviceInfo.standalonedevInfo.secDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount, GFP_KERNEL);
- }
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++)
- {
- CsrMemCpyDes(primitive->result.deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails, buffer, &offset, ((u16) (8)));
- }
- }
- CsrMemCpyDes(primitive->result.deviceInfo.standalonedevInfo.deviceName, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->result.deviceInfo.standalonedevInfo.deviceNameLength, buffer, &offset);
- break;
- default:
- break;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeScanResultIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeScanResultInd *primitive = (CsrWifiSmeScanResultInd *) voidPrimitivePointer;
- kfree(primitive->result.informationElements);
- switch (primitive->result.p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_GO:
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->result.deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- kfree(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType);
- }
- }
- kfree(primitive->result.deviceInfo.groupInfo.p2PClientInfo);
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- kfree(primitive->result.deviceInfo.standalonedevInfo.secDeviceType);
- break;
- default:
- break;
- }
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeScanResultsGetCfmSizeof(void *msg)
-{
- CsrWifiSmeScanResultsGetCfm *primitive = (CsrWifiSmeScanResultsGetCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 153) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 2; /* u16 primitive->scanResultsCount */
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->scanResultsCount; i1++)
- {
- bufferSize += 32; /* u8 primitive->scanResults[i1].ssid.ssid[32] */
- bufferSize += 1; /* u8 primitive->scanResults[i1].ssid.length */
- bufferSize += 6; /* u8 primitive->scanResults[i1].bssid.a[6] */
- bufferSize += 2; /* s16 primitive->scanResults[i1].rssi */
- bufferSize += 2; /* s16 primitive->scanResults[i1].snr */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->scanResults[i1].ifIndex */
- bufferSize += 2; /* u16 primitive->scanResults[i1].beaconPeriodTu */
- bufferSize += 8; /* u8 primitive->scanResults[i1].timeStamp.data[8] */
- bufferSize += 8; /* u8 primitive->scanResults[i1].localTime.data[8] */
- bufferSize += 2; /* u16 primitive->scanResults[i1].channelFrequency */
- bufferSize += 2; /* u16 primitive->scanResults[i1].capabilityInformation */
- bufferSize += 1; /* u8 primitive->scanResults[i1].channelNumber */
- bufferSize += 1; /* CsrWifiSmeBasicUsability primitive->scanResults[i1].usability */
- bufferSize += 1; /* CsrWifiSmeBssType primitive->scanResults[i1].bssType */
- bufferSize += 2; /* u16 primitive->scanResults[i1].informationElementsLength */
- bufferSize += primitive->scanResults[i1].informationElementsLength; /* u8 primitive->scanResults[i1].informationElements */
- bufferSize += 1; /* CsrWifiSmeP2pRole primitive->scanResults[i1].p2pDeviceRole */
- switch (primitive->scanResults[i1].p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_CLI:
- bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.reservedCli.empty */
- break;
- case CSR_WIFI_SME_P2P_ROLE_GO:
- bufferSize += 1; /* CsrWifiSmeP2pGroupCapabilityMask primitive->scanResults[i1].deviceInfo.groupInfo.groupCapability */
- bufferSize += 6; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2pDeviceAddress.a[6] */
- bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount */
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- bufferSize += 6; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a[6] */
- bufferSize += 6; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a[6] */
- bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods */
- bufferSize += 1; /* CsrWifiSmeP2pCapabilityMask primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap */
- bufferSize += 8; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails[8] */
- bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount */
- {
- u16 i6;
- for (i6 = 0; i6 < primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++)
- {
- bufferSize += 8; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails[8] */
- }
- }
- bufferSize += 32; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName[32] */
- bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength */
- }
- }
- break;
- case CSR_WIFI_SME_P2P_ROLE_NONE:
- bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.reservedNone.empty */
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- bufferSize += 6; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceAddress.a[6] */
- bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->scanResults[i1].deviceInfo.standalonedevInfo.configMethods */
- bufferSize += 1; /* CsrWifiSmeP2pCapabilityMask primitive->scanResults[i1].deviceInfo.standalonedevInfo.p2PDeviceCap */
- bufferSize += 8; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.primDeviceType.deviceDetails[8] */
- bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount */
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++)
- {
- bufferSize += 8; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails[8] */
- }
- }
- bufferSize += 32; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceName[32] */
- bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceNameLength */
- break;
- default:
- break;
- }
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeScanResultsGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeScanResultsGetCfm *primitive = (CsrWifiSmeScanResultsGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint16Ser(ptr, len, (u16) primitive->scanResultsCount);
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->scanResultsCount; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].ssid.ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].ssid.length);
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].bssid.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].rssi);
- CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].snr);
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].ifIndex);
- CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].beaconPeriodTu);
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].timeStamp.data, ((u16) (8)));
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].localTime.data, ((u16) (8)));
- CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].channelFrequency);
- CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].capabilityInformation);
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].channelNumber);
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].usability);
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].bssType);
- CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].informationElementsLength);
- if (primitive->scanResults[i1].informationElementsLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].informationElements, ((u16) (primitive->scanResults[i1].informationElementsLength)));
- }
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].p2pDeviceRole);
- switch (primitive->scanResults[i1].p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_CLI:
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.reservedCli.empty);
- break;
- case CSR_WIFI_SME_P2P_ROLE_GO:
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.groupCapability);
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2pDeviceAddress.a, ((u16) (6)));
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount);
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a, ((u16) (6)));
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods);
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap);
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails, ((u16) (8)));
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount);
- {
- u16 i6;
- for (i6 = 0; i6 < primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails, ((u16) (8)));
- }
- }
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength);
- }
- }
- break;
- case CSR_WIFI_SME_P2P_ROLE_NONE:
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.reservedNone.empty);
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceAddress.a, ((u16) (6)));
- CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].deviceInfo.standalonedevInfo.configMethods);
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.standalonedevInfo.p2PDeviceCap);
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.standalonedevInfo.primDeviceType.deviceDetails, ((u16) (8)));
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount);
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails, ((u16) (8)));
- }
- }
- CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceName, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceNameLength);
- break;
- default:
- break;
- }
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeScanResultsGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeScanResultsGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeScanResultsGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanResultsCount, buffer, &offset);
- primitive->scanResults = NULL;
- if (primitive->scanResultsCount)
- {
- primitive->scanResults = kmalloc(sizeof(CsrWifiSmeScanResult) * primitive->scanResultsCount, GFP_KERNEL);
- }
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->scanResultsCount; i1++)
- {
- CsrMemCpyDes(primitive->scanResults[i1].ssid.ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->scanResults[i1].ssid.length, buffer, &offset);
- CsrMemCpyDes(primitive->scanResults[i1].bssid.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->scanResults[i1].rssi, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanResults[i1].snr, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanResults[i1].ifIndex, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanResults[i1].beaconPeriodTu, buffer, &offset);
- CsrMemCpyDes(primitive->scanResults[i1].timeStamp.data, buffer, &offset, ((u16) (8)));
- CsrMemCpyDes(primitive->scanResults[i1].localTime.data, buffer, &offset, ((u16) (8)));
- CsrUint16Des((u16 *) &primitive->scanResults[i1].channelFrequency, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanResults[i1].capabilityInformation, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanResults[i1].channelNumber, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanResults[i1].usability, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanResults[i1].bssType, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->scanResults[i1].informationElementsLength, buffer, &offset);
- if (primitive->scanResults[i1].informationElementsLength)
- {
- primitive->scanResults[i1].informationElements = kmalloc(primitive->scanResults[i1].informationElementsLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->scanResults[i1].informationElements, buffer, &offset, ((u16) (primitive->scanResults[i1].informationElementsLength)));
- }
- else
- {
- primitive->scanResults[i1].informationElements = NULL;
- }
- CsrUint8Des((u8 *) &primitive->scanResults[i1].p2pDeviceRole, buffer, &offset);
- switch (primitive->scanResults[i1].p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_CLI:
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.reservedCli.empty, buffer, &offset);
- break;
- case CSR_WIFI_SME_P2P_ROLE_GO:
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.groupCapability, buffer, &offset);
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2pDeviceAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount, buffer, &offset);
- primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo = NULL;
- if (primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount)
- {
- primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo = kmalloc(sizeof(CsrWifiSmeP2pClientInfoType) * primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount, GFP_KERNEL);
- }
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a, buffer, &offset, ((u16) (6)));
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap, buffer, &offset);
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8)));
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount, buffer, &offset);
- primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = NULL;
- if (primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount)
- {
- primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount, GFP_KERNEL);
- }
- {
- u16 i6;
- for (i6 = 0; i6 < primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++)
- {
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails, buffer, &offset, ((u16) (8)));
- }
- }
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength, buffer, &offset);
- }
- }
- break;
- case CSR_WIFI_SME_P2P_ROLE_NONE:
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.reservedNone.empty, buffer, &offset);
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceAddress.a, buffer, &offset, ((u16) (6)));
- CsrUint16Des((u16 *) &primitive->scanResults[i1].deviceInfo.standalonedevInfo.configMethods, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.standalonedevInfo.p2PDeviceCap, buffer, &offset);
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.standalonedevInfo.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8)));
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount, buffer, &offset);
- primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType = NULL;
- if (primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount)
- {
- primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount, GFP_KERNEL);
- }
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++)
- {
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails, buffer, &offset, ((u16) (8)));
- }
- }
- CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceName, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceNameLength, buffer, &offset);
- break;
- default:
- break;
- }
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeScanResultsGetCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeScanResultsGetCfm *primitive = (CsrWifiSmeScanResultsGetCfm *) voidPrimitivePointer;
- {
- u16 i1;
- for (i1 = 0; i1 < primitive->scanResultsCount; i1++)
- {
- kfree(primitive->scanResults[i1].informationElements);
- switch (primitive->scanResults[i1].p2pDeviceRole)
- {
- case CSR_WIFI_SME_P2P_ROLE_GO:
- {
- u16 i4;
- for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount; i4++)
- {
- kfree(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType);
- }
- }
- kfree(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo);
- break;
- case CSR_WIFI_SME_P2P_ROLE_STANDALONE:
- kfree(primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType);
- break;
- default:
- break;
- }
- }
- }
- kfree(primitive->scanResults);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeSmeStaConfigGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* u8 primitive->smeConfig.connectionQualityRssiChangeTrigger */
- bufferSize += 1; /* u8 primitive->smeConfig.connectionQualitySnrChangeTrigger */
- bufferSize += 1; /* CsrWifiSmeWmmModeMask primitive->smeConfig.wmmModeMask */
- bufferSize += 1; /* CsrWifiSmeRadioIF primitive->smeConfig.ifIndex */
- bufferSize += 1; /* u8 primitive->smeConfig.allowUnicastUseGroupCipher */
- bufferSize += 1; /* u8 primitive->smeConfig.enableOpportunisticKeyCaching */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeSmeStaConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeSmeStaConfigGetCfm *primitive = (CsrWifiSmeSmeStaConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.connectionQualityRssiChangeTrigger);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.connectionQualitySnrChangeTrigger);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.wmmModeMask);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.ifIndex);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.allowUnicastUseGroupCipher);
- CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.enableOpportunisticKeyCaching);
- return(ptr);
-}
-
-
-void* CsrWifiSmeSmeStaConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeSmeStaConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeSmeStaConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.connectionQualityRssiChangeTrigger, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.connectionQualitySnrChangeTrigger, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.wmmModeMask, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.ifIndex, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.allowUnicastUseGroupCipher, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->smeConfig.enableOpportunisticKeyCaching, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeSmeStaConfigSetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeSmeStaConfigSetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeSmeStaConfigSetCfm *primitive = (CsrWifiSmeSmeStaConfigSetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- return(ptr);
-}
-
-
-void* CsrWifiSmeSmeStaConfigSetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeSmeStaConfigSetCfm *primitive = kmalloc(sizeof(CsrWifiSmeSmeStaConfigSetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeStationMacAddressGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 17) */
- bufferSize += 2; /* CsrResult primitive->status */
- {
- u16 i1;
- for (i1 = 0; i1 < 2; i1++)
- {
- bufferSize += 6; /* u8 primitive->stationMacAddress[i1].a[6] */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeStationMacAddressGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeStationMacAddressGetCfm *primitive = (CsrWifiSmeStationMacAddressGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- {
- u16 i1;
- for (i1 = 0; i1 < 2; i1++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->stationMacAddress[i1].a, ((u16) (6)));
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeStationMacAddressGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeStationMacAddressGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeStationMacAddressGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- {
- u16 i1;
- for (i1 = 0; i1 < 2; i1++)
- {
- CsrMemCpyDes(primitive->stationMacAddress[i1].a, buffer, &offset, ((u16) (6)));
- }
- }
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeTspecIndSizeof(void *msg)
-{
- CsrWifiSmeTspecInd *primitive = (CsrWifiSmeTspecInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 4; /* u32 primitive->transactionId */
- bufferSize += 1; /* CsrWifiSmeTspecResultCode primitive->tspecResultCode */
- bufferSize += 2; /* u16 primitive->tspecLength */
- bufferSize += primitive->tspecLength; /* u8 primitive->tspec */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeTspecIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeTspecInd *primitive = (CsrWifiSmeTspecInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint32Ser(ptr, len, (u32) primitive->transactionId);
- CsrUint8Ser(ptr, len, (u8) primitive->tspecResultCode);
- CsrUint16Ser(ptr, len, (u16) primitive->tspecLength);
- if (primitive->tspecLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->tspec, ((u16) (primitive->tspecLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeTspecIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeTspecInd *primitive = kmalloc(sizeof(CsrWifiSmeTspecInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->transactionId, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->tspecResultCode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->tspecLength, buffer, &offset);
- if (primitive->tspecLength)
- {
- primitive->tspec = kmalloc(primitive->tspecLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->tspec, buffer, &offset, ((u16) (primitive->tspecLength)));
- }
- else
- {
- primitive->tspec = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeTspecIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeTspecInd *primitive = (CsrWifiSmeTspecInd *) voidPrimitivePointer;
- kfree(primitive->tspec);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeTspecCfmSizeof(void *msg)
-{
- CsrWifiSmeTspecCfm *primitive = (CsrWifiSmeTspecCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */
- bufferSize += 2; /* u16 primitive->interfaceTag */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 4; /* u32 primitive->transactionId */
- bufferSize += 1; /* CsrWifiSmeTspecResultCode primitive->tspecResultCode */
- bufferSize += 2; /* u16 primitive->tspecLength */
- bufferSize += primitive->tspecLength; /* u8 primitive->tspec */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeTspecCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeTspecCfm *primitive = (CsrWifiSmeTspecCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint32Ser(ptr, len, (u32) primitive->transactionId);
- CsrUint8Ser(ptr, len, (u8) primitive->tspecResultCode);
- CsrUint16Ser(ptr, len, (u16) primitive->tspecLength);
- if (primitive->tspecLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->tspec, ((u16) (primitive->tspecLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeTspecCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeTspecCfm *primitive = kmalloc(sizeof(CsrWifiSmeTspecCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->transactionId, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->tspecResultCode, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->tspecLength, buffer, &offset);
- if (primitive->tspecLength)
- {
- primitive->tspec = kmalloc(primitive->tspecLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->tspec, buffer, &offset, ((u16) (primitive->tspecLength)));
- }
- else
- {
- primitive->tspec = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeTspecCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeTspecCfm *primitive = (CsrWifiSmeTspecCfm *) voidPrimitivePointer;
- kfree(primitive->tspec);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeVersionsGetCfmSizeof(void *msg)
-{
- CsrWifiSmeVersionsGetCfm *primitive = (CsrWifiSmeVersionsGetCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 33) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 4; /* u32 primitive->versions.chipId */
- bufferSize += 4; /* u32 primitive->versions.chipVersion */
- bufferSize += 4; /* u32 primitive->versions.firmwareBuild */
- bufferSize += 4; /* u32 primitive->versions.firmwarePatch */
- bufferSize += 4; /* u32 primitive->versions.firmwareHip */
- bufferSize += (primitive->versions.routerBuild ? strlen(primitive->versions.routerBuild) : 0) + 1; /* char* primitive->versions.routerBuild (0 byte len + 1 for NULL Term) */
- bufferSize += 4; /* u32 primitive->versions.routerHip */
- bufferSize += (primitive->versions.smeBuild ? strlen(primitive->versions.smeBuild) : 0) + 1; /* char* primitive->versions.smeBuild (0 byte len + 1 for NULL Term) */
- bufferSize += 4; /* u32 primitive->versions.smeHip */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeVersionsGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeVersionsGetCfm *primitive = (CsrWifiSmeVersionsGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.chipId);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.chipVersion);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.firmwareBuild);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.firmwarePatch);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.firmwareHip);
- CsrCharStringSer(ptr, len, primitive->versions.routerBuild);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.routerHip);
- CsrCharStringSer(ptr, len, primitive->versions.smeBuild);
- CsrUint32Ser(ptr, len, (u32) primitive->versions.smeHip);
- return(ptr);
-}
-
-
-void* CsrWifiSmeVersionsGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeVersionsGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeVersionsGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.chipId, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.chipVersion, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.firmwareBuild, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.firmwarePatch, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.firmwareHip, buffer, &offset);
- CsrCharStringDes(&primitive->versions.routerBuild, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.routerHip, buffer, &offset);
- CsrCharStringDes(&primitive->versions.smeBuild, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->versions.smeHip, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiSmeVersionsGetCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeVersionsGetCfm *primitive = (CsrWifiSmeVersionsGetCfm *) voidPrimitivePointer;
- kfree(primitive->versions.routerBuild);
- kfree(primitive->versions.smeBuild);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeCloakedSsidsGetCfmSizeof(void *msg)
-{
- CsrWifiSmeCloakedSsidsGetCfm *primitive = (CsrWifiSmeCloakedSsidsGetCfm *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 39) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* u8 primitive->cloakedSsids.cloakedSsidsCount */
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++)
- {
- bufferSize += 32; /* u8 primitive->cloakedSsids.cloakedSsids[i2].ssid[32] */
- bufferSize += 1; /* u8 primitive->cloakedSsids.cloakedSsids[i2].length */
- }
- }
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCloakedSsidsGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCloakedSsidsGetCfm *primitive = (CsrWifiSmeCloakedSsidsGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->cloakedSsids.cloakedSsidsCount);
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->cloakedSsids.cloakedSsids[i2].ssid, ((u16) (32)));
- CsrUint8Ser(ptr, len, (u8) primitive->cloakedSsids.cloakedSsids[i2].length);
- }
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeCloakedSsidsGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCloakedSsidsGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCloakedSsidsGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->cloakedSsids.cloakedSsidsCount, buffer, &offset);
- primitive->cloakedSsids.cloakedSsids = NULL;
- if (primitive->cloakedSsids.cloakedSsidsCount)
- {
- primitive->cloakedSsids.cloakedSsids = kmalloc(sizeof(CsrWifiSsid) * primitive->cloakedSsids.cloakedSsidsCount, GFP_KERNEL);
- }
- {
- u16 i2;
- for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++)
- {
- CsrMemCpyDes(primitive->cloakedSsids.cloakedSsids[i2].ssid, buffer, &offset, ((u16) (32)));
- CsrUint8Des((u8 *) &primitive->cloakedSsids.cloakedSsids[i2].length, buffer, &offset);
- }
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeCloakedSsidsGetCfmSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeCloakedSsidsGetCfm *primitive = (CsrWifiSmeCloakedSsidsGetCfm *) voidPrimitivePointer;
- kfree(primitive->cloakedSsids.cloakedSsids);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeWifiOnIndSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 6; /* u8 primitive->address.a[6] */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeWifiOnIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeWifiOnInd *primitive = (CsrWifiSmeWifiOnInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6)));
- return(ptr);
-}
-
-
-void* CsrWifiSmeWifiOnIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeWifiOnInd *primitive = kmalloc(sizeof(CsrWifiSmeWifiOnInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6)));
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeSmeCommonConfigGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 1; /* CsrWifiSme80211dTrustLevel primitive->deviceConfig.trustLevel */
- bufferSize += 2; /* u8 primitive->deviceConfig.countryCode[2] */
- bufferSize += 1; /* CsrWifiSmeFirmwareDriverInterface primitive->deviceConfig.firmwareDriverInterface */
- bufferSize += 1; /* u8 primitive->deviceConfig.enableStrictDraftN */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeSmeCommonConfigGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeSmeCommonConfigGetCfm *primitive = (CsrWifiSmeSmeCommonConfigGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.trustLevel);
- CsrMemCpySer(ptr, len, (const void *) primitive->deviceConfig.countryCode, ((u16) (2)));
- CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.firmwareDriverInterface);
- CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.enableStrictDraftN);
- return(ptr);
-}
-
-
-void* CsrWifiSmeSmeCommonConfigGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeSmeCommonConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeSmeCommonConfigGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->deviceConfig.trustLevel, buffer, &offset);
- CsrMemCpyDes(primitive->deviceConfig.countryCode, buffer, &offset, ((u16) (2)));
- CsrUint8Des((u8 *) &primitive->deviceConfig.firmwareDriverInterface, buffer, &offset);
- CsrUint8Des((u8 *) &primitive->deviceConfig.enableStrictDraftN, buffer, &offset);
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeInterfaceCapabilityGetCfmSizeof(void *msg)
-{
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */
- bufferSize += 2; /* CsrResult primitive->status */
- bufferSize += 2; /* u16 primitive->numInterfaces */
- bufferSize += 2; /* u8 primitive->capBitmap[2] */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeInterfaceCapabilityGetCfmSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeInterfaceCapabilityGetCfm *primitive = (CsrWifiSmeInterfaceCapabilityGetCfm *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint16Ser(ptr, len, (u16) primitive->status);
- CsrUint16Ser(ptr, len, (u16) primitive->numInterfaces);
- CsrMemCpySer(ptr, len, (const void *) primitive->capBitmap, ((u16) (2)));
- return(ptr);
-}
-
-
-void* CsrWifiSmeInterfaceCapabilityGetCfmDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeInterfaceCapabilityGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeInterfaceCapabilityGetCfm), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->status, buffer, &offset);
- CsrUint16Des((u16 *) &primitive->numInterfaces, buffer, &offset);
- CsrMemCpyDes(primitive->capBitmap, buffer, &offset, ((u16) (2)));
-
- return primitive;
-}
-
-
-size_t CsrWifiSmeErrorIndSizeof(void *msg)
-{
- CsrWifiSmeErrorInd *primitive = (CsrWifiSmeErrorInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 3) */
- bufferSize += (primitive->errorMessage ? strlen(primitive->errorMessage) : 0) + 1; /* char* primitive->errorMessage (0 byte len + 1 for NULL Term) */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeErrorIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeErrorInd *primitive = (CsrWifiSmeErrorInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrCharStringSer(ptr, len, primitive->errorMessage);
- return(ptr);
-}
-
-
-void* CsrWifiSmeErrorIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeErrorInd *primitive = kmalloc(sizeof(CsrWifiSmeErrorInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrCharStringDes(&primitive->errorMessage, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiSmeErrorIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeErrorInd *primitive = (CsrWifiSmeErrorInd *) voidPrimitivePointer;
- kfree(primitive->errorMessage);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeInfoIndSizeof(void *msg)
-{
- CsrWifiSmeInfoInd *primitive = (CsrWifiSmeInfoInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 3) */
- bufferSize += (primitive->infoMessage ? strlen(primitive->infoMessage) : 0) + 1; /* char* primitive->infoMessage (0 byte len + 1 for NULL Term) */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeInfoIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeInfoInd *primitive = (CsrWifiSmeInfoInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrCharStringSer(ptr, len, primitive->infoMessage);
- return(ptr);
-}
-
-
-void* CsrWifiSmeInfoIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeInfoInd *primitive = kmalloc(sizeof(CsrWifiSmeInfoInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrCharStringDes(&primitive->infoMessage, buffer, &offset);
-
- return primitive;
-}
-
-
-void CsrWifiSmeInfoIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeInfoInd *primitive = (CsrWifiSmeInfoInd *) voidPrimitivePointer;
- kfree(primitive->infoMessage);
- kfree(primitive);
-}
-
-
-size_t CsrWifiSmeCoreDumpIndSizeof(void *msg)
-{
- CsrWifiSmeCoreDumpInd *primitive = (CsrWifiSmeCoreDumpInd *) msg;
- size_t bufferSize = 2;
-
- /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */
- bufferSize += 4; /* u32 primitive->dataLength */
- bufferSize += primitive->dataLength; /* u8 primitive->data */
- return bufferSize;
-}
-
-
-u8* CsrWifiSmeCoreDumpIndSer(u8 *ptr, size_t *len, void *msg)
-{
- CsrWifiSmeCoreDumpInd *primitive = (CsrWifiSmeCoreDumpInd *)msg;
- *len = 0;
- CsrUint16Ser(ptr, len, primitive->common.type);
- CsrUint32Ser(ptr, len, (u32) primitive->dataLength);
- if (primitive->dataLength)
- {
- CsrMemCpySer(ptr, len, (const void *) primitive->data, ((u16) (primitive->dataLength)));
- }
- return(ptr);
-}
-
-
-void* CsrWifiSmeCoreDumpIndDes(u8 *buffer, size_t length)
-{
- CsrWifiSmeCoreDumpInd *primitive = kmalloc(sizeof(CsrWifiSmeCoreDumpInd), GFP_KERNEL);
- size_t offset;
- offset = 0;
-
- CsrUint16Des(&primitive->common.type, buffer, &offset);
- CsrUint32Des((u32 *) &primitive->dataLength, buffer, &offset);
- if (primitive->dataLength)
- {
- primitive->data = kmalloc(primitive->dataLength, GFP_KERNEL);
- CsrMemCpyDes(primitive->data, buffer, &offset, ((u16) (primitive->dataLength)));
- }
- else
- {
- primitive->data = NULL;
- }
-
- return primitive;
-}
-
-
-void CsrWifiSmeCoreDumpIndSerFree(void *voidPrimitivePointer)
-{
- CsrWifiSmeCoreDumpInd *primitive = (CsrWifiSmeCoreDumpInd *) voidPrimitivePointer;
- kfree(primitive->data);
- kfree(primitive);
-}
-
-
diff --git a/drivers/staging/csr/csr_wifi_sme_serialize.h b/drivers/staging/csr/csr_wifi_sme_serialize.h
deleted file mode 100644
index f852626..0000000
--- a/drivers/staging/csr/csr_wifi_sme_serialize.h
+++ /dev/null
@@ -1,666 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2012
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_SME_SERIALIZE_H__
-#define CSR_WIFI_SME_SERIALIZE_H__
-
-#include "csr_wifi_msgconv.h"
-#include "csr_wifi_sme_prim.h"
-
-extern void CsrWifiSmePfree(void *ptr);
-
-#define CsrWifiSmeActivateReqSer CsrWifiEventSer
-#define CsrWifiSmeActivateReqDes CsrWifiEventDes
-#define CsrWifiSmeActivateReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeActivateReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeAdhocConfigGetReqSer CsrWifiEventSer
-#define CsrWifiSmeAdhocConfigGetReqDes CsrWifiEventDes
-#define CsrWifiSmeAdhocConfigGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeAdhocConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeAdhocConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeAdhocConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeAdhocConfigSetReqSizeof(void *msg);
-#define CsrWifiSmeAdhocConfigSetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeBlacklistReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeBlacklistReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeBlacklistReqSizeof(void *msg);
-extern void CsrWifiSmeBlacklistReqSerFree(void *msg);
-
-#define CsrWifiSmeCalibrationDataGetReqSer CsrWifiEventSer
-#define CsrWifiSmeCalibrationDataGetReqDes CsrWifiEventDes
-#define CsrWifiSmeCalibrationDataGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeCalibrationDataGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeCalibrationDataSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCalibrationDataSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCalibrationDataSetReqSizeof(void *msg);
-extern void CsrWifiSmeCalibrationDataSetReqSerFree(void *msg);
-
-#define CsrWifiSmeCcxConfigGetReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeCcxConfigGetReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeCcxConfigGetReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeCcxConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeCcxConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCcxConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCcxConfigSetReqSizeof(void *msg);
-#define CsrWifiSmeCcxConfigSetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeCoexConfigGetReqSer CsrWifiEventSer
-#define CsrWifiSmeCoexConfigGetReqDes CsrWifiEventDes
-#define CsrWifiSmeCoexConfigGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeCoexConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeCoexConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCoexConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCoexConfigSetReqSizeof(void *msg);
-#define CsrWifiSmeCoexConfigSetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeCoexInfoGetReqSer CsrWifiEventSer
-#define CsrWifiSmeCoexInfoGetReqDes CsrWifiEventDes
-#define CsrWifiSmeCoexInfoGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeCoexInfoGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeConnectReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeConnectReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeConnectReqSizeof(void *msg);
-extern void CsrWifiSmeConnectReqSerFree(void *msg);
-
-#define CsrWifiSmeConnectionConfigGetReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeConnectionConfigGetReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeConnectionConfigGetReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeConnectionConfigGetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeConnectionInfoGetReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeConnectionInfoGetReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeConnectionInfoGetReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeConnectionInfoGetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeConnectionStatsGetReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeConnectionStatsGetReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeConnectionStatsGetReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeConnectionStatsGetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeDeactivateReqSer CsrWifiEventSer
-#define CsrWifiSmeDeactivateReqDes CsrWifiEventDes
-#define CsrWifiSmeDeactivateReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeDeactivateReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeDisconnectReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeDisconnectReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeDisconnectReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeDisconnectReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeEventMaskSetReqSer CsrWifiEventCsrUint32Ser
-#define CsrWifiSmeEventMaskSetReqDes CsrWifiEventCsrUint32Des
-#define CsrWifiSmeEventMaskSetReqSizeof CsrWifiEventCsrUint32Sizeof
-#define CsrWifiSmeEventMaskSetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeHostConfigGetReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeHostConfigGetReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeHostConfigGetReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeHostConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeHostConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeHostConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeHostConfigSetReqSizeof(void *msg);
-#define CsrWifiSmeHostConfigSetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeKeyReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeKeyReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeKeyReqSizeof(void *msg);
-#define CsrWifiSmeKeyReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeLinkQualityGetReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeLinkQualityGetReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeLinkQualityGetReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeLinkQualityGetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeMibConfigGetReqSer CsrWifiEventSer
-#define CsrWifiSmeMibConfigGetReqDes CsrWifiEventDes
-#define CsrWifiSmeMibConfigGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeMibConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeMibConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMibConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMibConfigSetReqSizeof(void *msg);
-#define CsrWifiSmeMibConfigSetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeMibGetNextReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMibGetNextReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMibGetNextReqSizeof(void *msg);
-extern void CsrWifiSmeMibGetNextReqSerFree(void *msg);
-
-extern u8 *CsrWifiSmeMibGetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMibGetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMibGetReqSizeof(void *msg);
-extern void CsrWifiSmeMibGetReqSerFree(void *msg);
-
-extern u8 *CsrWifiSmeMibSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMibSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMibSetReqSizeof(void *msg);
-extern void CsrWifiSmeMibSetReqSerFree(void *msg);
-
-extern u8 *CsrWifiSmeMulticastAddressReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMulticastAddressReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMulticastAddressReqSizeof(void *msg);
-extern void CsrWifiSmeMulticastAddressReqSerFree(void *msg);
-
-extern u8 *CsrWifiSmePacketFilterSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmePacketFilterSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmePacketFilterSetReqSizeof(void *msg);
-extern void CsrWifiSmePacketFilterSetReqSerFree(void *msg);
-
-#define CsrWifiSmePermanentMacAddressGetReqSer CsrWifiEventSer
-#define CsrWifiSmePermanentMacAddressGetReqDes CsrWifiEventDes
-#define CsrWifiSmePermanentMacAddressGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmePermanentMacAddressGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmePmkidReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmePmkidReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmePmkidReqSizeof(void *msg);
-extern void CsrWifiSmePmkidReqSerFree(void *msg);
-
-#define CsrWifiSmePowerConfigGetReqSer CsrWifiEventSer
-#define CsrWifiSmePowerConfigGetReqDes CsrWifiEventDes
-#define CsrWifiSmePowerConfigGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmePowerConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmePowerConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmePowerConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmePowerConfigSetReqSizeof(void *msg);
-#define CsrWifiSmePowerConfigSetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeRegulatoryDomainInfoGetReqSer CsrWifiEventSer
-#define CsrWifiSmeRegulatoryDomainInfoGetReqDes CsrWifiEventDes
-#define CsrWifiSmeRegulatoryDomainInfoGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeRegulatoryDomainInfoGetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeRoamingConfigGetReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeRoamingConfigGetReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeRoamingConfigGetReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeRoamingConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeRoamingConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeRoamingConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeRoamingConfigSetReqSizeof(void *msg);
-#define CsrWifiSmeRoamingConfigSetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeScanConfigGetReqSer CsrWifiEventSer
-#define CsrWifiSmeScanConfigGetReqDes CsrWifiEventDes
-#define CsrWifiSmeScanConfigGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeScanConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeScanConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeScanConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeScanConfigSetReqSizeof(void *msg);
-extern void CsrWifiSmeScanConfigSetReqSerFree(void *msg);
-
-extern u8 *CsrWifiSmeScanFullReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeScanFullReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeScanFullReqSizeof(void *msg);
-extern void CsrWifiSmeScanFullReqSerFree(void *msg);
-
-#define CsrWifiSmeScanResultsFlushReqSer CsrWifiEventSer
-#define CsrWifiSmeScanResultsFlushReqDes CsrWifiEventDes
-#define CsrWifiSmeScanResultsFlushReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeScanResultsFlushReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeScanResultsGetReqSer CsrWifiEventSer
-#define CsrWifiSmeScanResultsGetReqDes CsrWifiEventDes
-#define CsrWifiSmeScanResultsGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeScanResultsGetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeSmeStaConfigGetReqSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeSmeStaConfigGetReqDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeSmeStaConfigGetReqSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeSmeStaConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeSmeStaConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeSmeStaConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeSmeStaConfigSetReqSizeof(void *msg);
-#define CsrWifiSmeSmeStaConfigSetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeStationMacAddressGetReqSer CsrWifiEventSer
-#define CsrWifiSmeStationMacAddressGetReqDes CsrWifiEventDes
-#define CsrWifiSmeStationMacAddressGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeStationMacAddressGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeTspecReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeTspecReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeTspecReqSizeof(void *msg);
-extern void CsrWifiSmeTspecReqSerFree(void *msg);
-
-#define CsrWifiSmeVersionsGetReqSer CsrWifiEventSer
-#define CsrWifiSmeVersionsGetReqDes CsrWifiEventDes
-#define CsrWifiSmeVersionsGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeVersionsGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeWifiFlightmodeReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeWifiFlightmodeReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeWifiFlightmodeReqSizeof(void *msg);
-extern void CsrWifiSmeWifiFlightmodeReqSerFree(void *msg);
-
-#define CsrWifiSmeWifiOffReqSer CsrWifiEventSer
-#define CsrWifiSmeWifiOffReqDes CsrWifiEventDes
-#define CsrWifiSmeWifiOffReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeWifiOffReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeWifiOnReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeWifiOnReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeWifiOnReqSizeof(void *msg);
-extern void CsrWifiSmeWifiOnReqSerFree(void *msg);
-
-extern u8 *CsrWifiSmeCloakedSsidsSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCloakedSsidsSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCloakedSsidsSetReqSizeof(void *msg);
-extern void CsrWifiSmeCloakedSsidsSetReqSerFree(void *msg);
-
-#define CsrWifiSmeCloakedSsidsGetReqSer CsrWifiEventSer
-#define CsrWifiSmeCloakedSsidsGetReqDes CsrWifiEventDes
-#define CsrWifiSmeCloakedSsidsGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeCloakedSsidsGetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeSmeCommonConfigGetReqSer CsrWifiEventSer
-#define CsrWifiSmeSmeCommonConfigGetReqDes CsrWifiEventDes
-#define CsrWifiSmeSmeCommonConfigGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeSmeCommonConfigGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeSmeCommonConfigSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeSmeCommonConfigSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeSmeCommonConfigSetReqSizeof(void *msg);
-#define CsrWifiSmeSmeCommonConfigSetReqSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeInterfaceCapabilityGetReqSer CsrWifiEventSer
-#define CsrWifiSmeInterfaceCapabilityGetReqDes CsrWifiEventDes
-#define CsrWifiSmeInterfaceCapabilityGetReqSizeof CsrWifiEventSizeof
-#define CsrWifiSmeInterfaceCapabilityGetReqSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeWpsConfigurationReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeWpsConfigurationReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeWpsConfigurationReqSizeof(void *msg);
-extern void CsrWifiSmeWpsConfigurationReqSerFree(void *msg);
-
-extern u8 *CsrWifiSmeSetReqSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeSetReqDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeSetReqSizeof(void *msg);
-extern void CsrWifiSmeSetReqSerFree(void *msg);
-
-#define CsrWifiSmeActivateCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeActivateCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeActivateCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeActivateCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeAdhocConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeAdhocConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeAdhocConfigGetCfmSizeof(void *msg);
-#define CsrWifiSmeAdhocConfigGetCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeAdhocConfigSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeAdhocConfigSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeAdhocConfigSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeAdhocConfigSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeAssociationCompleteIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeAssociationCompleteIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeAssociationCompleteIndSizeof(void *msg);
-extern void CsrWifiSmeAssociationCompleteIndSerFree(void *msg);
-
-extern u8 *CsrWifiSmeAssociationStartIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeAssociationStartIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeAssociationStartIndSizeof(void *msg);
-#define CsrWifiSmeAssociationStartIndSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeBlacklistCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeBlacklistCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeBlacklistCfmSizeof(void *msg);
-extern void CsrWifiSmeBlacklistCfmSerFree(void *msg);
-
-extern u8 *CsrWifiSmeCalibrationDataGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCalibrationDataGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCalibrationDataGetCfmSizeof(void *msg);
-extern void CsrWifiSmeCalibrationDataGetCfmSerFree(void *msg);
-
-#define CsrWifiSmeCalibrationDataSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeCalibrationDataSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeCalibrationDataSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeCalibrationDataSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeCcxConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCcxConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCcxConfigGetCfmSizeof(void *msg);
-#define CsrWifiSmeCcxConfigGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeCcxConfigSetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCcxConfigSetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCcxConfigSetCfmSizeof(void *msg);
-#define CsrWifiSmeCcxConfigSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeCoexConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCoexConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCoexConfigGetCfmSizeof(void *msg);
-#define CsrWifiSmeCoexConfigGetCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeCoexConfigSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeCoexConfigSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeCoexConfigSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeCoexConfigSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeCoexInfoGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCoexInfoGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCoexInfoGetCfmSizeof(void *msg);
-#define CsrWifiSmeCoexInfoGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeConnectCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeConnectCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeConnectCfmSizeof(void *msg);
-#define CsrWifiSmeConnectCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeConnectionConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeConnectionConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeConnectionConfigGetCfmSizeof(void *msg);
-extern void CsrWifiSmeConnectionConfigGetCfmSerFree(void *msg);
-
-extern u8 *CsrWifiSmeConnectionInfoGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeConnectionInfoGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeConnectionInfoGetCfmSizeof(void *msg);
-extern void CsrWifiSmeConnectionInfoGetCfmSerFree(void *msg);
-
-extern u8 *CsrWifiSmeConnectionQualityIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeConnectionQualityIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeConnectionQualityIndSizeof(void *msg);
-#define CsrWifiSmeConnectionQualityIndSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeConnectionStatsGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeConnectionStatsGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeConnectionStatsGetCfmSizeof(void *msg);
-#define CsrWifiSmeConnectionStatsGetCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeDeactivateCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeDeactivateCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeDeactivateCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeDeactivateCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeDisconnectCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeDisconnectCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeDisconnectCfmSizeof(void *msg);
-#define CsrWifiSmeDisconnectCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeEventMaskSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeEventMaskSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeEventMaskSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeEventMaskSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeHostConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeHostConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeHostConfigGetCfmSizeof(void *msg);
-#define CsrWifiSmeHostConfigGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeHostConfigSetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeHostConfigSetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeHostConfigSetCfmSizeof(void *msg);
-#define CsrWifiSmeHostConfigSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeIbssStationIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeIbssStationIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeIbssStationIndSizeof(void *msg);
-#define CsrWifiSmeIbssStationIndSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeKeyCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeKeyCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeKeyCfmSizeof(void *msg);
-#define CsrWifiSmeKeyCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeLinkQualityGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeLinkQualityGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeLinkQualityGetCfmSizeof(void *msg);
-#define CsrWifiSmeLinkQualityGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeMediaStatusIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMediaStatusIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMediaStatusIndSizeof(void *msg);
-extern void CsrWifiSmeMediaStatusIndSerFree(void *msg);
-
-extern u8 *CsrWifiSmeMibConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMibConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMibConfigGetCfmSizeof(void *msg);
-#define CsrWifiSmeMibConfigGetCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeMibConfigSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeMibConfigSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeMibConfigSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeMibConfigSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeMibGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMibGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMibGetCfmSizeof(void *msg);
-extern void CsrWifiSmeMibGetCfmSerFree(void *msg);
-
-extern u8 *CsrWifiSmeMibGetNextCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMibGetNextCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMibGetNextCfmSizeof(void *msg);
-extern void CsrWifiSmeMibGetNextCfmSerFree(void *msg);
-
-#define CsrWifiSmeMibSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeMibSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeMibSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeMibSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeMicFailureIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMicFailureIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMicFailureIndSizeof(void *msg);
-#define CsrWifiSmeMicFailureIndSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeMulticastAddressCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeMulticastAddressCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeMulticastAddressCfmSizeof(void *msg);
-extern void CsrWifiSmeMulticastAddressCfmSerFree(void *msg);
-
-extern u8 *CsrWifiSmePacketFilterSetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmePacketFilterSetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmePacketFilterSetCfmSizeof(void *msg);
-#define CsrWifiSmePacketFilterSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmePermanentMacAddressGetCfmSer(u8 *ptr, size_t *len,
- void *msg);
-extern void *CsrWifiSmePermanentMacAddressGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmePermanentMacAddressGetCfmSizeof(void *msg);
-#define CsrWifiSmePermanentMacAddressGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmePmkidCandidateListIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmePmkidCandidateListIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmePmkidCandidateListIndSizeof(void *msg);
-extern void CsrWifiSmePmkidCandidateListIndSerFree(void *msg);
-
-extern u8 *CsrWifiSmePmkidCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmePmkidCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmePmkidCfmSizeof(void *msg);
-extern void CsrWifiSmePmkidCfmSerFree(void *msg);
-
-extern u8 *CsrWifiSmePowerConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmePowerConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmePowerConfigGetCfmSizeof(void *msg);
-#define CsrWifiSmePowerConfigGetCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmePowerConfigSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmePowerConfigSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmePowerConfigSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmePowerConfigSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeRegulatoryDomainInfoGetCfmSer(u8 *ptr, size_t *len,
- void *msg);
-extern void *CsrWifiSmeRegulatoryDomainInfoGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeRegulatoryDomainInfoGetCfmSizeof(void *msg);
-#define CsrWifiSmeRegulatoryDomainInfoGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeRoamCompleteIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeRoamCompleteIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeRoamCompleteIndSizeof(void *msg);
-#define CsrWifiSmeRoamCompleteIndSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeRoamStartIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeRoamStartIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeRoamStartIndSizeof(void *msg);
-#define CsrWifiSmeRoamStartIndSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeRoamingConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeRoamingConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeRoamingConfigGetCfmSizeof(void *msg);
-#define CsrWifiSmeRoamingConfigGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeRoamingConfigSetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeRoamingConfigSetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeRoamingConfigSetCfmSizeof(void *msg);
-#define CsrWifiSmeRoamingConfigSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeScanConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeScanConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeScanConfigGetCfmSizeof(void *msg);
-extern void CsrWifiSmeScanConfigGetCfmSerFree(void *msg);
-
-#define CsrWifiSmeScanConfigSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeScanConfigSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeScanConfigSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeScanConfigSetCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeScanFullCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeScanFullCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeScanFullCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeScanFullCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeScanResultIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeScanResultIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeScanResultIndSizeof(void *msg);
-extern void CsrWifiSmeScanResultIndSerFree(void *msg);
-
-#define CsrWifiSmeScanResultsFlushCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeScanResultsFlushCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeScanResultsFlushCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeScanResultsFlushCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeScanResultsGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeScanResultsGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeScanResultsGetCfmSizeof(void *msg);
-extern void CsrWifiSmeScanResultsGetCfmSerFree(void *msg);
-
-extern u8 *CsrWifiSmeSmeStaConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeSmeStaConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeSmeStaConfigGetCfmSizeof(void *msg);
-#define CsrWifiSmeSmeStaConfigGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeSmeStaConfigSetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeSmeStaConfigSetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeSmeStaConfigSetCfmSizeof(void *msg);
-#define CsrWifiSmeSmeStaConfigSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeStationMacAddressGetCfmSer(u8 *ptr, size_t *len,
- void *msg);
-extern void *CsrWifiSmeStationMacAddressGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeStationMacAddressGetCfmSizeof(void *msg);
-#define CsrWifiSmeStationMacAddressGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeTspecIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeTspecIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeTspecIndSizeof(void *msg);
-extern void CsrWifiSmeTspecIndSerFree(void *msg);
-
-extern u8 *CsrWifiSmeTspecCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeTspecCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeTspecCfmSizeof(void *msg);
-extern void CsrWifiSmeTspecCfmSerFree(void *msg);
-
-extern u8 *CsrWifiSmeVersionsGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeVersionsGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeVersionsGetCfmSizeof(void *msg);
-extern void CsrWifiSmeVersionsGetCfmSerFree(void *msg);
-
-#define CsrWifiSmeWifiFlightmodeCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeWifiFlightmodeCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeWifiFlightmodeCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeWifiFlightmodeCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeWifiOffIndSer CsrWifiEventCsrUint8Ser
-#define CsrWifiSmeWifiOffIndDes CsrWifiEventCsrUint8Des
-#define CsrWifiSmeWifiOffIndSizeof CsrWifiEventCsrUint8Sizeof
-#define CsrWifiSmeWifiOffIndSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeWifiOffCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeWifiOffCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeWifiOffCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeWifiOffCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeWifiOnCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeWifiOnCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeWifiOnCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeWifiOnCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeCloakedSsidsSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeCloakedSsidsSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeCloakedSsidsSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeCloakedSsidsSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeCloakedSsidsGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCloakedSsidsGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCloakedSsidsGetCfmSizeof(void *msg);
-extern void CsrWifiSmeCloakedSsidsGetCfmSerFree(void *msg);
-
-extern u8 *CsrWifiSmeWifiOnIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeWifiOnIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeWifiOnIndSizeof(void *msg);
-#define CsrWifiSmeWifiOnIndSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeSmeCommonConfigGetCfmSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeSmeCommonConfigGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeSmeCommonConfigGetCfmSizeof(void *msg);
-#define CsrWifiSmeSmeCommonConfigGetCfmSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeSmeCommonConfigSetCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeSmeCommonConfigSetCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeSmeCommonConfigSetCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeSmeCommonConfigSetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeInterfaceCapabilityGetCfmSer(u8 *ptr, size_t *len,
- void *msg);
-extern void *CsrWifiSmeInterfaceCapabilityGetCfmDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeInterfaceCapabilityGetCfmSizeof(void *msg);
-#define CsrWifiSmeInterfaceCapabilityGetCfmSerFree CsrWifiSmePfree
-
-extern u8 *CsrWifiSmeErrorIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeErrorIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeErrorIndSizeof(void *msg);
-extern void CsrWifiSmeErrorIndSerFree(void *msg);
-
-extern u8 *CsrWifiSmeInfoIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeInfoIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeInfoIndSizeof(void *msg);
-extern void CsrWifiSmeInfoIndSerFree(void *msg);
-
-extern u8 *CsrWifiSmeCoreDumpIndSer(u8 *ptr, size_t *len, void *msg);
-extern void *CsrWifiSmeCoreDumpIndDes(u8 *buffer, size_t len);
-extern size_t CsrWifiSmeCoreDumpIndSizeof(void *msg);
-extern void CsrWifiSmeCoreDumpIndSerFree(void *msg);
-
-#define CsrWifiSmeAmpStatusChangeIndSer CsrWifiEventCsrUint16CsrUint8Ser
-#define CsrWifiSmeAmpStatusChangeIndDes CsrWifiEventCsrUint16CsrUint8Des
-#define CsrWifiSmeAmpStatusChangeIndSizeof CsrWifiEventCsrUint16CsrUint8Sizeof
-#define CsrWifiSmeAmpStatusChangeIndSerFree CsrWifiSmePfree
-
-#define CsrWifiSmeWpsConfigurationCfmSer CsrWifiEventCsrUint16Ser
-#define CsrWifiSmeWpsConfigurationCfmDes CsrWifiEventCsrUint16Des
-#define CsrWifiSmeWpsConfigurationCfmSizeof CsrWifiEventCsrUint16Sizeof
-#define CsrWifiSmeWpsConfigurationCfmSerFree CsrWifiSmePfree
-
-#endif /* CSR_WIFI_SME_SERIALIZE_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_sme_task.h b/drivers/staging/csr/csr_wifi_sme_task.h
deleted file mode 100644
index 1e938c1..0000000
--- a/drivers/staging/csr/csr_wifi_sme_task.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-/* Note: this is an auto-generated file. */
-
-#ifndef CSR_WIFI_SME_TASK_H__
-#define CSR_WIFI_SME_TASK_H__
-
-#include "csr_sched.h"
-
-#define CSR_WIFI_SME_LOG_ID 0x1202FFFF
-extern CsrSchedQid CSR_WIFI_SME_IFACEQUEUE;
-void CsrWifiSmeInit(void **gash);
-void CsrWifiSmeDeinit(void **gash);
-void CsrWifiSmeHandler(void **gash);
-
-#endif /* CSR_WIFI_SME_TASK_H__ */
-
diff --git a/drivers/staging/csr/csr_wifi_vif_utils.h b/drivers/staging/csr/csr_wifi_vif_utils.h
deleted file mode 100644
index 8ff9788..0000000
--- a/drivers/staging/csr/csr_wifi_vif_utils.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*****************************************************************************
-
- (c) Cambridge Silicon Radio Limited 2011
- All rights reserved and confidential information of CSR
-
- Refer to LICENSE.txt included with this source for details
- on the license terms.
-
-*****************************************************************************/
-
-#ifndef CSR_WIFI_VIF_UTILS_H
-#define CSR_WIFI_VIF_UTILS_H
-
-/* STANDARD INCLUDES ********************************************************/
-
-/* PROJECT INCLUDES *********************************************************/
-/* including this file for CsrWifiInterfaceMode*/
-#include "csr_wifi_private_common.h"
-
-/* MACROS *******************************************************************/
-
-/* Common macros for NME and SME to be used temporarily until SoftMAC changes are made */
-#define CSR_WIFI_NUM_INTERFACES (u8)0x1
-#define CSR_WIFI_INTERFACE_IN_USE (u16)0x0
-
-#endif /* CSR_WIFI_VIF_UTILS_H */
-
diff --git a/drivers/staging/csr/data_tx.c b/drivers/staging/csr/data_tx.c
deleted file mode 100644
index 9e3d8b8..0000000
--- a/drivers/staging/csr/data_tx.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: data_tx.c
- *
- * PURPOSE:
- * This file provides functions to send data requests to the UniFi.
- *
- * Copyright (C) 2007-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include "csr_wifi_hip_unifi.h"
-#include "unifi_priv.h"
-
-int
-uf_verify_m4(unifi_priv_t *priv, const unsigned char *packet, unsigned int length)
-{
- const unsigned char *p = packet;
- u16 keyinfo;
-
-
- if (length < (4 + 5 + 8 + 32 + 16 + 8 + 8 + 16 + 1 + 8))
- return 1;
-
- p += 8;
- keyinfo = p[5] << 8 | p[6]; /* big-endian */
- if (
- (p[0] == 1 || p[0] == 2) /* protocol version 802.1X-2001 (WPA) or -2004 (WPA2) */ &&
- p[1] == 3 /* EAPOL-Key */ &&
- /* don't bother checking p[2] p[3] (hh ll, packet body length) */
- (p[4] == 254 || p[4] == 2) /* descriptor type P802.1i-D3.0 (WPA) or 802.11i-2004 (WPA2) */ &&
- ((keyinfo & 0x0007) == 1 || (keyinfo & 0x0007) == 2) /* key descriptor version */ &&
- (keyinfo & ~0x0207U) == 0x0108 && /* key info for 4/4 or 4/2 -- ignore key desc version and sec bit (since varies in WPA 4/4) */
- (p[4 + 5 + 8 + 32 + 16 + 8 + 8 + 16 + 0] == 0 && /* key data length (2 octets) 0 for 4/4 only */
- p[4 + 5 + 8 + 32 + 16 + 8 + 8 + 16 + 1] == 0)
- ) {
- unifi_trace(priv, UDBG1, "uf_verify_m4: M4 detected\n");
- return 0;
- } else {
- return 1;
- }
-}
-
-/*
- * ---------------------------------------------------------------------------
- *
- * Data transport signals.
- *
- * ---------------------------------------------------------------------------
- */
-
diff --git a/drivers/staging/csr/drv.c b/drivers/staging/csr/drv.c
deleted file mode 100644
index bdc2523..0000000
--- a/drivers/staging/csr/drv.c
+++ /dev/null
@@ -1,2193 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: drv.c
- *
- * PURPOSE:
- * Conventional device interface for debugging/monitoring of the
- * driver and h/w using unicli. This interface is also being used
- * by the SME linux implementation and the helper apps.
- *
- * Copyright (C) 2005-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-
-/*
- * Porting Notes:
- * Part of this file contains an example for how to glue the OS layer
- * with the HIP core lib, the SDIO glue layer, and the SME.
- *
- * When the unifi_sdio.ko modules loads, the linux kernel calls unifi_load().
- * unifi_load() calls uf_sdio_load() which is exported by the SDIO glue
- * layer. uf_sdio_load() registers this driver with the underlying SDIO driver.
- * When a card is detected, the SDIO glue layer calls register_unifi_sdio()
- * to pass the SDIO function context and ask the OS layer to initialise
- * the card. register_unifi_sdio() allocates all the private data of the OS
- * layer and calls uf_run_unifihelper() to start the SME. The SME calls
- * unifi_sys_wifi_on_req() which uses the HIP core lib to initialise the card.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <asm/uaccess.h>
-#include <linux/jiffies.h>
-#include <linux/version.h>
-
-#include "csr_wifi_hip_unifiversion.h"
-#include "unifi_priv.h"
-#include "csr_wifi_hip_conversions.h"
-#include "unifi_native.h"
-
-/* Module parameter variables */
-int buswidth = 0; /* 0 means use default, values 1,4 */
-int sdio_clock = 50000; /* kHz */
-int unifi_debug = 0;
-/* fw_init prevents f/w initialisation on error. */
-int fw_init[MAX_UNIFI_DEVS] = {-1, -1};
-int use_5g = 0;
-int led_mask = 0; /* 0x0c00 for dev-pc-1503c, dev-pc-1528a */
-int disable_hw_reset = 0;
-int disable_power_control = 0;
-int enable_wol = UNIFI_WOL_OFF; /* 0 for none, 1 for SDIO IRQ, 2 for PIO */
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
-int tl_80211d = (int)CSR_WIFI_SME_80211D_TRUST_LEVEL_MIB;
-#endif
-int sdio_block_size = -1; /* Override SDIO block size */
-int sdio_byte_mode = 0; /* 0 for block mode + padding, 1 for byte mode */
-int coredump_max = CSR_WIFI_HIP_NUM_COREDUMP_BUFFERS;
-int run_bh_once = -1; /* Set for scheduled interrupt mode, -1 = default */
-int bh_priority = -1;
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-#define UNIFI_LOG_HIP_SIGNALS_FILTER_BULKDATA (1 << 1)
-#define UNIFI_LOG_HIP_SIGNALS_FILTER_TIMESTAMP (1 << 2)
-int log_hip_signals = 0;
-#endif
-
-MODULE_DESCRIPTION("CSR UniFi (SDIO)");
-
-module_param(buswidth, int, S_IRUGO|S_IWUSR);
-module_param(sdio_clock, int, S_IRUGO|S_IWUSR);
-module_param(unifi_debug, int, S_IRUGO|S_IWUSR);
-module_param_array(fw_init, int, NULL, S_IRUGO|S_IWUSR);
-module_param(use_5g, int, S_IRUGO|S_IWUSR);
-module_param(led_mask, int, S_IRUGO|S_IWUSR);
-module_param(disable_hw_reset, int, S_IRUGO|S_IWUSR);
-module_param(disable_power_control, int, S_IRUGO|S_IWUSR);
-module_param(enable_wol, int, S_IRUGO|S_IWUSR);
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
-module_param(tl_80211d, int, S_IRUGO|S_IWUSR);
-#endif
-module_param(sdio_block_size, int, S_IRUGO|S_IWUSR);
-module_param(sdio_byte_mode, int, S_IRUGO|S_IWUSR);
-module_param(coredump_max, int, S_IRUGO|S_IWUSR);
-module_param(run_bh_once, int, S_IRUGO|S_IWUSR);
-module_param(bh_priority, int, S_IRUGO|S_IWUSR);
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-module_param(log_hip_signals, int, S_IRUGO|S_IWUSR);
-#endif
-
-MODULE_PARM_DESC(buswidth, "SDIO bus width (0=default), set 1 for 1-bit or 4 for 4-bit mode");
-MODULE_PARM_DESC(sdio_clock, "SDIO bus frequency in kHz, (default = 50 MHz)");
-MODULE_PARM_DESC(unifi_debug, "Diagnostic reporting level");
-MODULE_PARM_DESC(fw_init, "Set to 0 to prevent f/w initialization on error");
-MODULE_PARM_DESC(use_5g, "Use the 5G (802.11a) radio band");
-MODULE_PARM_DESC(led_mask, "LED mask flags");
-MODULE_PARM_DESC(disable_hw_reset, "Set to 1 to disable hardware reset");
-MODULE_PARM_DESC(disable_power_control, "Set to 1 to disable SDIO power control");
-MODULE_PARM_DESC(enable_wol, "Enable wake-on-wlan function 0=off, 1=SDIO, 2=PIO");
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
-MODULE_PARM_DESC(tl_80211d, "802.11d Trust Level (1-6, default = 5)");
-#endif
-MODULE_PARM_DESC(sdio_block_size, "Set to override SDIO block size");
-MODULE_PARM_DESC(sdio_byte_mode, "Set to 1 for byte mode SDIO");
-MODULE_PARM_DESC(coredump_max, "Number of chip mini-coredump buffers to allocate");
-MODULE_PARM_DESC(run_bh_once, "Run BH only when firmware interrupts");
-MODULE_PARM_DESC(bh_priority, "Modify the BH thread priority");
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-MODULE_PARM_DESC(log_hip_signals, "Set to 1 to enable HIP signal offline logging");
-#endif
-
-
-/* Callback for event logging to UDI clients */
-static void udi_log_event(ul_client_t *client,
- const u8 *signal, int signal_len,
- const bulk_data_param_t *bulkdata,
- int dir);
-
-static void udi_set_log_filter(ul_client_t *pcli,
- unifiio_filter_t *udi_filter);
-
-
-/* Mutex to protect access to priv->sme_cli */
-DEFINE_SEMAPHORE(udi_mutex);
-
-s32 CsrHipResultToStatus(CsrResult csrResult)
-{
- s32 r = -EIO;
-
- switch (csrResult)
- {
- case CSR_RESULT_SUCCESS:
- r = 0;
- break;
- case CSR_WIFI_HIP_RESULT_RANGE:
- r = -ERANGE;
- break;
- case CSR_WIFI_HIP_RESULT_NO_DEVICE:
- r = -ENODEV;
- break;
- case CSR_WIFI_HIP_RESULT_INVALID_VALUE:
- r = -EINVAL;
- break;
- case CSR_WIFI_HIP_RESULT_NOT_FOUND:
- r = -ENOENT;
- break;
- case CSR_WIFI_HIP_RESULT_NO_SPACE:
- r = -ENOSPC;
- break;
- case CSR_WIFI_HIP_RESULT_NO_MEMORY:
- r = -ENOMEM;
- break;
- case CSR_RESULT_FAILURE:
- r = -EIO;
- break;
- default:
- /*unifi_warning(card->ospriv, "CsrHipResultToStatus: Unrecognised csrResult error code: %d\n", csrResult);*/
- r = -EIO;
- }
- return r;
-}
-
-
-static const char*
-trace_putest_cmdid(unifi_putest_command_t putest_cmd)
-{
- switch (putest_cmd) {
- case UNIFI_PUTEST_START:
- return "START";
- case UNIFI_PUTEST_STOP:
- return "STOP";
- case UNIFI_PUTEST_SET_SDIO_CLOCK:
- return "SET CLOCK";
- case UNIFI_PUTEST_CMD52_READ:
- return "CMD52R";
- case UNIFI_PUTEST_CMD52_BLOCK_READ:
- return "CMD52BR";
- case UNIFI_PUTEST_CMD52_WRITE:
- return "CMD52W";
- case UNIFI_PUTEST_DL_FW:
- return "D/L FW";
- case UNIFI_PUTEST_DL_FW_BUFF:
- return "D/L FW BUFFER";
- case UNIFI_PUTEST_COREDUMP_PREPARE:
- return "PREPARE COREDUMP";
- case UNIFI_PUTEST_GP_READ16:
- return "GP16R";
- case UNIFI_PUTEST_GP_WRITE16:
- return "GP16W";
- default:
- return "ERROR: unrecognised command";
- }
- }
-
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-int uf_register_hip_offline_debug(unifi_priv_t *priv)
-{
- ul_client_t *udi_cli;
- int i;
-
- udi_cli = ul_register_client(priv, CLI_USING_WIRE_FORMAT, udi_log_event);
- if (udi_cli == NULL) {
- /* Too many clients already using this device */
- unifi_error(priv, "Too many UDI clients already open\n");
- return -ENOSPC;
- }
- unifi_trace(priv, UDBG1, "Offline HIP client is registered\n");
-
- down(&priv->udi_logging_mutex);
- udi_cli->event_hook = udi_log_event;
- unifi_set_udi_hook(priv->card, logging_handler);
- /* Log all signals by default */
- for (i = 0; i < SIG_FILTER_SIZE; i++) {
- udi_cli->signal_filter[i] = 0xFFFF;
- }
- priv->logging_client = udi_cli;
- up(&priv->udi_logging_mutex);
-
- return 0;
-}
-
-int uf_unregister_hip_offline_debug(unifi_priv_t *priv)
-{
- ul_client_t *udi_cli = priv->logging_client;
- if (udi_cli == NULL)
- {
- unifi_error(priv, "Unknown HIP client unregister request\n");
- return -ERANGE;
- }
-
- unifi_trace(priv, UDBG1, "Offline HIP client is unregistered\n");
-
- down(&priv->udi_logging_mutex);
- priv->logging_client = NULL;
- udi_cli->event_hook = NULL;
- up(&priv->udi_logging_mutex);
-
- ul_deregister_client(udi_cli);
-
- return 0;
-}
-#endif
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_open
- * unifi_release
- *
- * Open and release entry points for the UniFi debug driver.
- *
- * Arguments:
- * Normal linux driver args.
- *
- * Returns:
- * Linux error code.
- * ---------------------------------------------------------------------------
- */
-static int
-unifi_open(struct inode *inode, struct file *file)
-{
- int devno;
- unifi_priv_t *priv;
- ul_client_t *udi_cli;
-
- devno = MINOR(inode->i_rdev) >> 1;
-
- /*
- * Increase the ref_count for the char device clients.
- * Make sure you call uf_put_instance() to decreace it if
- * unifi_open returns an error.
- */
- priv = uf_get_instance(devno);
- if (priv == NULL) {
- unifi_error(NULL, "unifi_open: No device present\n");
- return -ENODEV;
- }
-
- /* Register this instance in the client's list. */
- /* The minor number determines the nature of the client (Unicli or SME). */
- if (MINOR(inode->i_rdev) & 0x1) {
- udi_cli = ul_register_client(priv, CLI_USING_WIRE_FORMAT, udi_log_event);
- if (udi_cli == NULL) {
- /* Too many clients already using this device */
- unifi_error(priv, "Too many clients already open\n");
- uf_put_instance(devno);
- return -ENOSPC;
- }
- unifi_trace(priv, UDBG1, "Client is registered to /dev/unifiudi%d\n", devno);
- } else {
- /*
- * Even-numbered device nodes are the control application.
- * This is the userspace helper containing SME or
- * unifi_manager.
- */
-
- down(&udi_mutex);
-
-#ifdef CSR_SME_USERSPACE
- /* Check if a config client is already attached */
- if (priv->sme_cli) {
- up(&udi_mutex);
- uf_put_instance(devno);
-
- unifi_info(priv, "There is already a configuration client using the character device\n");
- return -EBUSY;
- }
-#endif /* CSR_SME_USERSPACE */
-
-#ifdef CSR_SUPPORT_SME
- udi_cli = ul_register_client(priv,
- CLI_USING_WIRE_FORMAT | CLI_SME_USERSPACE,
- sme_log_event);
-#else
- /* Config client for native driver */
- udi_cli = ul_register_client(priv,
- 0,
- sme_native_log_event);
-#endif
- if (udi_cli == NULL) {
- /* Too many clients already using this device */
- up(&udi_mutex);
- uf_put_instance(devno);
-
- unifi_error(priv, "Too many clients already open\n");
- return -ENOSPC;
- }
-
- /*
- * Fill-in the pointer to the configuration client.
- * This is the SME userspace helper or unifi_manager.
- * Not used in the SME embedded version.
- */
- unifi_trace(priv, UDBG1, "SME client (id:%d s:0x%X) is registered\n",
- udi_cli->client_id, udi_cli->sender_id);
- /* Store the SME UniFi Linux Client */
- if (priv->sme_cli == NULL) {
- priv->sme_cli = udi_cli;
- }
-
- up(&udi_mutex);
- }
-
-
- /*
- * Store the pointer to the client.
- * All char driver's entry points will pass this pointer.
- */
- file->private_data = udi_cli;
-
- return 0;
-} /* unifi_open() */
-
-
-static int
-unifi_release(struct inode *inode, struct file *filp)
-{
- ul_client_t *udi_cli = (void*)filp->private_data;
- int devno;
- unifi_priv_t *priv;
-
- priv = uf_find_instance(udi_cli->instance);
- if (!priv) {
- unifi_error(priv, "unifi_close: instance for device not found\n");
- return -ENODEV;
- }
-
- devno = MINOR(inode->i_rdev) >> 1;
-
- /* Even device nodes are the config client (i.e. SME or unifi_manager) */
- if ((MINOR(inode->i_rdev) & 0x1) == 0) {
-
- if (priv->sme_cli != udi_cli) {
- unifi_notice(priv, "Surprise closing config device: not the sme client\n");
- }
- unifi_notice(priv, "SME client close (unifi%d)\n", devno);
-
- /*
- * Clear sme_cli before calling unifi_sys_... so it doesn't try to
- * queue a reply to the (now gone) SME.
- */
- down(&udi_mutex);
- priv->sme_cli = NULL;
- up(&udi_mutex);
-
-#ifdef CSR_SME_USERSPACE
- /* Power-down when config client closes */
- {
- CsrWifiRouterCtrlWifiOffReq req = {{CSR_WIFI_ROUTER_CTRL_HIP_REQ, 0, 0, 0, NULL}};
- CsrWifiRouterCtrlWifiOffReqHandler(priv, &req.common);
- }
-
- uf_sme_deinit(priv);
-
- /* It is possible that a blocking SME request was made from another process
- * which did not get read by the SME before the WifiOffReq.
- * So check for a pending request which will go unanswered and cancel
- * the wait for event. As only one blocking request can be in progress at
- * a time, up to one event should be completed.
- */
- uf_sme_cancel_request(priv, 0);
-
-#endif /* CSR_SME_USERSPACE */
- } else {
-
- unifi_trace(priv, UDBG2, "UDI client close (unifiudi%d)\n", devno);
-
- /* If the pointer matches the logging client, stop logging. */
- down(&priv->udi_logging_mutex);
- if (udi_cli == priv->logging_client) {
- priv->logging_client = NULL;
- }
- up(&priv->udi_logging_mutex);
-
- if (udi_cli == priv->amp_client) {
- priv->amp_client = NULL;
- }
- }
-
- /* Deregister this instance from the client's list. */
- ul_deregister_client(udi_cli);
-
- uf_put_instance(devno);
-
- return 0;
-} /* unifi_release() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_read
- *
- * The read() driver entry point.
- *
- * Arguments:
- * filp The file descriptor returned by unifi_open()
- * p The user space buffer to copy the read data
- * len The size of the p buffer
- * poff
- *
- * Returns:
- * number of bytes read or an error code on failure
- * ---------------------------------------------------------------------------
- */
-static ssize_t
-unifi_read(struct file *filp, char *p, size_t len, loff_t *poff)
-{
- ul_client_t *pcli = (void*)filp->private_data;
- unifi_priv_t *priv;
- udi_log_t *logptr = NULL;
- udi_msg_t *msgptr;
- struct list_head *l;
- int msglen;
-
- priv = uf_find_instance(pcli->instance);
- if (!priv) {
- unifi_error(priv, "invalid priv\n");
- return -ENODEV;
- }
-
- if (!pcli->udi_enabled) {
- unifi_error(priv, "unifi_read: unknown client.");
- return -EINVAL;
- }
-
- if (list_empty(&pcli->udi_log)) {
- if (filp->f_flags & O_NONBLOCK) {
- /* Non-blocking - just return if the udi_log is empty */
- return 0;
- } else {
- /* Blocking - wait on the UDI wait queue */
- if (wait_event_interruptible(pcli->udi_wq,
- !list_empty(&pcli->udi_log)))
- {
- unifi_error(priv, "unifi_read: wait_event_interruptible failed.");
- return -ERESTARTSYS;
- }
- }
- }
-
- /* Read entry from list head and remove it from the list */
- if (down_interruptible(&pcli->udi_sem)) {
- return -ERESTARTSYS;
- }
- l = pcli->udi_log.next;
- list_del(l);
- up(&pcli->udi_sem);
-
- /* Get a pointer to whole struct */
- logptr = list_entry(l, udi_log_t, q);
- if (logptr == NULL) {
- unifi_error(priv, "unifi_read: failed to get event.\n");
- return -EINVAL;
- }
-
- /* Get the real message */
- msgptr = &logptr->msg;
- msglen = msgptr->length;
- if (msglen > len) {
- printk(KERN_WARNING "truncated read to %d actual msg len is %lu\n", msglen, (long unsigned int)len);
- msglen = len;
- }
-
- /* and pass it to the client (SME or Unicli). */
- if (copy_to_user(p, msgptr, msglen))
- {
- printk(KERN_ERR "Failed to copy UDI log to user\n");
- kfree(logptr);
- return -EFAULT;
- }
-
- /* It is our resposibility to free the message buffer. */
- kfree(logptr);
-
- return msglen;
-
-} /* unifi_read() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * udi_send_signal_unpacked
- *
- * Sends an unpacked signal to UniFi.
- *
- * Arguments:
- * priv Pointer to private context struct
- * data Pointer to request structure and data to send
- * data_len Length of data in data pointer.
- *
- * Returns:
- * Number of bytes written, error otherwise.
- *
- * Notes:
- * All clients that use this function to send a signal to the unifi
- * must use the host formatted structures.
- * ---------------------------------------------------------------------------
- */
-static int
-udi_send_signal_unpacked(unifi_priv_t *priv, unsigned char* data, uint data_len)
-{
- CSR_SIGNAL *sigptr = (CSR_SIGNAL*)data;
- CSR_DATAREF *datarefptr;
- bulk_data_param_t bulk_data;
- uint signal_size, i;
- uint bulk_data_offset = 0;
- int bytecount, r;
- CsrResult csrResult;
-
- /* Number of bytes in the signal */
- signal_size = SigGetSize(sigptr);
- if (!signal_size || (signal_size > data_len)) {
- unifi_error(priv, "unifi_sme_mlme_req - Invalid signal 0x%x size should be %d bytes\n",
- sigptr->SignalPrimitiveHeader.SignalId,
- signal_size);
- return -EINVAL;
- }
- bytecount = signal_size;
-
- /* Get a pointer to the information of the first data reference */
- datarefptr = (CSR_DATAREF*)&sigptr->u;
-
- /* Initialize the offset in the data buffer, bulk data is right after the signal. */
- bulk_data_offset = signal_size;
-
- /* store the references and the size of the bulk data to the bulkdata structure */
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
- /* the length of the bulk data is in the signal */
- if ((datarefptr+i)->DataLength) {
- void *dest;
-
- csrResult = unifi_net_data_malloc(priv, &bulk_data.d[i], (datarefptr+i)->DataLength);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "udi_send_signal_unpacked: failed to allocate request_data.\n");
- return -EIO;
- }
-
- dest = (void*)bulk_data.d[i].os_data_ptr;
- memcpy(dest, data + bulk_data_offset, bulk_data.d[i].data_length);
- } else {
- bulk_data.d[i].data_length = 0;
- }
-
- bytecount += bulk_data.d[i].data_length;
- /* advance the offset, to point the next bulk data */
- bulk_data_offset += bulk_data.d[i].data_length;
- }
-
-
- unifi_trace(priv, UDBG3, "SME Send: signal 0x%.4X\n", sigptr->SignalPrimitiveHeader.SignalId);
-
- /* Send the signal. */
- r = ul_send_signal_unpacked(priv, sigptr, &bulk_data);
- if (r < 0) {
- unifi_error(priv, "udi_send_signal_unpacked: send failed (%d)\n", r);
- for(i=0;i<UNIFI_MAX_DATA_REFERENCES;i++) {
- if(bulk_data.d[i].data_length != 0) {
- unifi_net_data_free(priv, &bulk_data.d[i]);
- }
- }
- return -EIO;
- }
-
- return bytecount;
-} /* udi_send_signal_unpacked() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * udi_send_signal_raw
- *
- * Sends a packed signal to UniFi.
- *
- * Arguments:
- * priv Pointer to private context struct
- * buf Pointer to request structure and data to send
- * buflen Length of data in data pointer.
- *
- * Returns:
- * Number of bytes written, error otherwise.
- *
- * Notes:
- * All clients that use this function to send a signal to the unifi
- * must use the wire formatted structures.
- * ---------------------------------------------------------------------------
- */
-static int
-udi_send_signal_raw(unifi_priv_t *priv, unsigned char *buf, int buflen)
-{
- int signal_size;
- int sig_id;
- bulk_data_param_t data_ptrs;
- int i, r;
- unsigned int num_data_refs;
- int bytecount;
- CsrResult csrResult;
-
- /*
- * The signal is the first thing in buf, the signal id is the
- * first 16 bits of the signal.
- */
- /* Number of bytes in the signal */
- sig_id = GET_SIGNAL_ID(buf);
- signal_size = buflen;
- signal_size -= GET_PACKED_DATAREF_LEN(buf, 0);
- signal_size -= GET_PACKED_DATAREF_LEN(buf, 1);
- if ((signal_size <= 0) || (signal_size > buflen)) {
- unifi_error(priv, "udi_send_signal_raw - Couldn't find length of signal 0x%x\n",
- sig_id);
- return -EINVAL;
- }
- unifi_trace(priv, UDBG2, "udi_send_signal_raw: signal 0x%.4X len:%d\n",
- sig_id, signal_size);
- /* Zero the data ref arrays */
- memset(&data_ptrs, 0, sizeof(data_ptrs));
-
- /*
- * Find the number of associated bulk data packets. Scan through
- * the data refs to check that we have enough data and pick out
- * pointers to appended bulk data.
- */
- num_data_refs = 0;
- bytecount = signal_size;
-
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; ++i)
- {
- unsigned int len = GET_PACKED_DATAREF_LEN(buf, i);
- unifi_trace(priv, UDBG3, "udi_send_signal_raw: data_ref length = %d\n", len);
-
- if (len != 0) {
- void *dest;
-
- csrResult = unifi_net_data_malloc(priv, &data_ptrs.d[i], len);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "udi_send_signal_raw: failed to allocate request_data.\n");
- return -EIO;
- }
-
- dest = (void*)data_ptrs.d[i].os_data_ptr;
- memcpy(dest, buf + bytecount, len);
-
- bytecount += len;
- num_data_refs++;
- }
- data_ptrs.d[i].data_length = len;
- }
-
- unifi_trace(priv, UDBG3, "Queueing signal 0x%.4X from UDI with %u data refs\n",
- sig_id,
- num_data_refs);
-
- if (bytecount > buflen) {
- unifi_error(priv, "udi_send_signal_raw: Not enough data (%d instead of %d)\n", buflen, bytecount);
- return -EINVAL;
- }
-
- /* Send the signal calling the function that uses the wire-formatted signals. */
- r = ul_send_signal_raw(priv, buf, signal_size, &data_ptrs);
- if (r < 0) {
- unifi_error(priv, "udi_send_signal_raw: send failed (%d)\n", r);
- return -EIO;
- }
-
-#ifdef CSR_NATIVE_LINUX
- if (sig_id == CSR_MLME_POWERMGT_REQUEST_ID) {
- int power_mode = CSR_GET_UINT16_FROM_LITTLE_ENDIAN((buf +
- SIZEOF_SIGNAL_HEADER + (UNIFI_MAX_DATA_REFERENCES*SIZEOF_DATAREF)));
-#ifdef CSR_SUPPORT_WEXT
- /* Overide the wext power mode to the new value */
- priv->wext_conf.power_mode = power_mode;
-#endif
- /* Configure deep sleep signaling */
- if (power_mode || (priv->interfacePriv[0]->connected == UnifiNotConnected)) {
- csrResult = unifi_configure_low_power_mode(priv->card,
- UNIFI_LOW_POWER_ENABLED,
- UNIFI_PERIODIC_WAKE_HOST_DISABLED);
- } else {
- csrResult = unifi_configure_low_power_mode(priv->card,
- UNIFI_LOW_POWER_DISABLED,
- UNIFI_PERIODIC_WAKE_HOST_DISABLED);
- }
- }
-#endif
-
- return bytecount;
-} /* udi_send_signal_raw */
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_write
- *
- * The write() driver entry point.
- * A UniFi Debug Interface client such as unicli can write a signal
- * plus bulk data to the driver for sending to the UniFi chip.
- *
- * Only one signal may be sent per write operation.
- *
- * Arguments:
- * filp The file descriptor returned by unifi_open()
- * p The user space buffer to get the data from
- * len The size of the p buffer
- * poff
- *
- * Returns:
- * number of bytes written or an error code on failure
- * ---------------------------------------------------------------------------
- */
-static ssize_t
-unifi_write(struct file *filp, const char *p, size_t len, loff_t *poff)
-{
- ul_client_t *pcli = (ul_client_t*)filp->private_data;
- unifi_priv_t *priv;
- unsigned char *buf;
- unsigned char *bufptr;
- int remaining;
- int bytes_written;
- int r;
- bulk_data_param_t bulkdata;
- CsrResult csrResult;
-
- priv = uf_find_instance(pcli->instance);
- if (!priv) {
- unifi_error(priv, "invalid priv\n");
- return -ENODEV;
- }
-
- unifi_trace(priv, UDBG5, "unifi_write: len = %d\n", len);
-
- if (!pcli->udi_enabled) {
- unifi_error(priv, "udi disabled\n");
- return -EINVAL;
- }
-
- /*
- * AMP client sends only one signal at a time, so we can use
- * unifi_net_data_malloc to save the extra copy.
- */
- if (pcli == priv->amp_client) {
- int signal_size;
- int sig_id;
- unsigned char *signal_buf;
- char *user_data_buf;
-
- csrResult = unifi_net_data_malloc(priv, &bulkdata.d[0], len);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "unifi_write: failed to allocate request_data.\n");
- return -ENOMEM;
- }
-
- user_data_buf = (char*)bulkdata.d[0].os_data_ptr;
-
- /* Get the data from the AMP client. */
- if (copy_from_user((void*)user_data_buf, p, len)) {
- unifi_error(priv, "unifi_write: copy from user failed\n");
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return -EFAULT;
- }
-
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].data_length = 0;
-
- /* Number of bytes in the signal */
- sig_id = GET_SIGNAL_ID(bulkdata.d[0].os_data_ptr);
- signal_size = len;
- signal_size -= GET_PACKED_DATAREF_LEN(bulkdata.d[0].os_data_ptr, 0);
- signal_size -= GET_PACKED_DATAREF_LEN(bulkdata.d[0].os_data_ptr, 1);
- if ((signal_size <= 0) || (signal_size > len)) {
- unifi_error(priv, "unifi_write - Couldn't find length of signal 0x%x\n",
- sig_id);
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return -EINVAL;
- }
-
- unifi_trace(priv, UDBG2, "unifi_write: signal 0x%.4X len:%d\n",
- sig_id, signal_size);
-
- /* Allocate a buffer for the signal */
- signal_buf = kmemdup(bulkdata.d[0].os_data_ptr, signal_size,
- GFP_KERNEL);
- if (!signal_buf) {
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return -ENOMEM;
- }
-
- /* Get the signal from the os_data_ptr */
- signal_buf[5] = (pcli->sender_id >> 8) & 0xff;
-
- if (signal_size < len) {
- /* Remove the signal from the os_data_ptr */
- bulkdata.d[0].data_length -= signal_size;
- bulkdata.d[0].os_data_ptr += signal_size;
- } else {
- bulkdata.d[0].data_length = 0;
- bulkdata.d[0].os_data_ptr = NULL;
- }
-
- /* Send the signal calling the function that uses the wire-formatted signals. */
- r = ul_send_signal_raw(priv, signal_buf, signal_size, &bulkdata);
- if (r < 0) {
- unifi_error(priv, "unifi_write: send failed (%d)\n", r);
- if (bulkdata.d[0].os_data_ptr != NULL) {
- unifi_net_data_free(priv, &bulkdata.d[0]);
- }
- }
-
- /* Free the signal buffer and return */
- kfree(signal_buf);
- return len;
- }
-
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf) {
- return -ENOMEM;
- }
-
- /* Get the data from the client (SME or Unicli). */
- if (copy_from_user((void*)buf, p, len)) {
- unifi_error(priv, "copy from user failed\n");
- kfree(buf);
- return -EFAULT;
- }
-
- /*
- * In SME userspace build read() contains a SYS or MGT message.
- * Note that even though the SME sends one signal at a time, we can not
- * use unifi_net_data_malloc because in the early stages, before having
- * initialised the core, it will fail since the I/O block size is unknown.
- */
-#ifdef CSR_SME_USERSPACE
- if (pcli->configuration & CLI_SME_USERSPACE) {
- CsrWifiRouterTransportRecv(priv, buf, len);
- kfree(buf);
- return len;
- }
-#endif
-
- /* ul_send_signal_raw will do a sanity check of len against signal content */
-
- /*
- * udi_send_signal_raw() and udi_send_signal_unpacked() return the number of bytes consumed.
- * A write call can pass multiple signal concatenated together.
- */
- bytes_written = 0;
- remaining = len;
- bufptr = buf;
- while (remaining > 0)
- {
- int r;
-
- /*
- * Set the SenderProcessId.
- * The SignalPrimitiveHeader is the first 3 16-bit words of the signal,
- * the SenderProcessId is bytes 4,5.
- * The MSB of the sender ID needs to be set to the client ID.
- * The LSB is controlled by the SME.
- */
- bufptr[5] = (pcli->sender_id >> 8) & 0xff;
-
- /* use the appropriate interface, depending on the clients' configuration */
- if (pcli->configuration & CLI_USING_WIRE_FORMAT) {
- unifi_trace(priv, UDBG1, "unifi_write: call udi_send_signal().\n");
- r = udi_send_signal_raw(priv, bufptr, remaining);
- } else {
- r = udi_send_signal_unpacked(priv, bufptr, remaining);
- }
- if (r < 0) {
- /* Set the return value to the error code */
- unifi_error(priv, "unifi_write: (udi or sme)_send_signal() returns %d\n", r);
- bytes_written = r;
- break;
- }
- bufptr += r;
- remaining -= r;
- bytes_written += r;
- }
-
- kfree(buf);
-
- return bytes_written;
-} /* unifi_write() */
-
-
-static const char* build_type_to_string(unsigned char build_type)
-{
- switch (build_type)
- {
- case UNIFI_BUILD_NME: return "NME";
- case UNIFI_BUILD_WEXT: return "WEXT";
- case UNIFI_BUILD_AP: return "AP";
- }
- return "unknown";
-}
-
-
-/*
- * ----------------------------------------------------------------
- * unifi_ioctl
- *
- * Ioctl handler for unifi driver.
- *
- * Arguments:
- * inodep Pointer to inode structure.
- * filp Pointer to file structure.
- * cmd Ioctl cmd passed by user.
- * arg Ioctl arg passed by user.
- *
- * Returns:
- * 0 on success, -ve error code on error.
- * ----------------------------------------------------------------
- */
-static long
-unifi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- ul_client_t *pcli = (ul_client_t*)filp->private_data;
- unifi_priv_t *priv;
- struct net_device *dev;
- int r = 0;
- int int_param, i;
- u8* buf;
- CsrResult csrResult;
-#if (defined CSR_SUPPORT_SME)
- unifi_cfg_command_t cfg_cmd;
-#if (defined CSR_SUPPORT_WEXT)
- CsrWifiSmeCoexConfig coex_config;
- unsigned char uchar_param;
- unsigned char varbind[MAX_VARBIND_LENGTH];
- int vblen;
-#endif
-#endif
- unifi_putest_command_t putest_cmd;
-
- priv = uf_find_instance(pcli->instance);
- if (!priv) {
- unifi_error(priv, "ioctl error: unknown instance=%d\n", pcli->instance);
- r = -ENODEV;
- goto out;
- }
- unifi_trace(priv, UDBG5, "unifi_ioctl: cmd=0x%X, arg=0x%lX\n", cmd, arg);
-
- switch (cmd) {
-
- case UNIFI_GET_UDI_ENABLE:
- unifi_trace(priv, UDBG4, "UniFi Get UDI Enable\n");
-
- down(&priv->udi_logging_mutex);
- int_param = (priv->logging_client == NULL) ? 0 : 1;
- up(&priv->udi_logging_mutex);
-
- if (put_user(int_param, (int*)arg))
- {
- unifi_error(priv, "UNIFI_GET_UDI_ENABLE: Failed to copy to user\n");
- r = -EFAULT;
- goto out;
- }
- break;
-
- case UNIFI_SET_UDI_ENABLE:
- unifi_trace(priv, UDBG4, "UniFi Set UDI Enable\n");
- if (get_user(int_param, (int*)arg))
- {
- unifi_error(priv, "UNIFI_SET_UDI_ENABLE: Failed to copy from user\n");
- r = -EFAULT;
- goto out;
- }
-
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
- if (log_hip_signals) {
- unifi_error(priv, "omnicli cannot be used when log_hip_signals is used\n");
- r = -EFAULT;
- goto out;
- }
-#endif
-
- down(&priv->udi_logging_mutex);
- if (int_param) {
- pcli->event_hook = udi_log_event;
- unifi_set_udi_hook(priv->card, logging_handler);
- /* Log all signals by default */
- for (i = 0; i < SIG_FILTER_SIZE; i++) {
- pcli->signal_filter[i] = 0xFFFF;
- }
- priv->logging_client = pcli;
-
- } else {
- priv->logging_client = NULL;
- pcli->event_hook = NULL;
- }
- up(&priv->udi_logging_mutex);
-
- break;
-
- case UNIFI_SET_MIB:
- unifi_trace(priv, UDBG4, "UniFi Set MIB\n");
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
- /* Read first 2 bytes and check length */
- if (copy_from_user((void*)varbind, (void*)arg, 2)) {
- unifi_error(priv,
- "UNIFI_SET_MIB: Failed to copy in varbind header\n");
- r = -EFAULT;
- goto out;
- }
- vblen = varbind[1];
- if ((vblen + 2) > MAX_VARBIND_LENGTH) {
- unifi_error(priv,
- "UNIFI_SET_MIB: Varbind too long (%d, limit %d)\n",
- (vblen+2), MAX_VARBIND_LENGTH);
- r = -EINVAL;
- goto out;
- }
- /* Read rest of varbind */
- if (copy_from_user((void*)(varbind+2), (void*)(arg+2), vblen)) {
- unifi_error(priv, "UNIFI_SET_MIB: Failed to copy in varbind\n");
- r = -EFAULT;
- goto out;
- }
-
- /* send to SME */
- vblen += 2;
- r = sme_mgt_mib_set(priv, varbind, vblen);
- if (r) {
- goto out;
- }
-#else
- unifi_notice(priv, "UNIFI_SET_MIB: Unsupported.\n");
-#endif /* CSR_SUPPORT_WEXT */
- break;
-
- case UNIFI_GET_MIB:
- unifi_trace(priv, UDBG4, "UniFi Get MIB\n");
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
- /* Read first 2 bytes and check length */
- if (copy_from_user((void*)varbind, (void*)arg, 2)) {
- unifi_error(priv, "UNIFI_GET_MIB: Failed to copy in varbind header\n");
- r = -EFAULT;
- goto out;
- }
- vblen = varbind[1];
- if ((vblen+2) > MAX_VARBIND_LENGTH) {
- unifi_error(priv, "UNIFI_GET_MIB: Varbind too long (%d, limit %d)\n",
- (vblen+2), MAX_VARBIND_LENGTH);
- r = -EINVAL;
- goto out;
- }
- /* Read rest of varbind */
- if (copy_from_user((void*)(varbind+2), (void*)(arg+2), vblen)) {
- unifi_error(priv, "UNIFI_GET_MIB: Failed to copy in varbind\n");
- r = -EFAULT;
- goto out;
- }
-
- vblen += 2;
- r = sme_mgt_mib_get(priv, varbind, &vblen);
- if (r) {
- goto out;
- }
- /* copy out varbind */
- if (vblen > MAX_VARBIND_LENGTH) {
- unifi_error(priv,
- "UNIFI_GET_MIB: Varbind result too long (%d, limit %d)\n",
- vblen, MAX_VARBIND_LENGTH);
- r = -EINVAL;
- goto out;
- }
- if (copy_to_user((void*)arg, varbind, vblen)) {
- r = -EFAULT;
- goto out;
- }
-#else
- unifi_notice(priv, "UNIFI_GET_MIB: Unsupported.\n");
-#endif /* CSR_SUPPORT_WEXT */
- break;
-
- case UNIFI_CFG:
-#if (defined CSR_SUPPORT_SME)
- if (get_user(cfg_cmd, (unifi_cfg_command_t*)arg))
- {
- unifi_error(priv, "UNIFI_CFG: Failed to get the command\n");
- r = -EFAULT;
- goto out;
- }
-
- unifi_trace(priv, UDBG1, "UNIFI_CFG: Command is %d (t=%u) sz=%d\n",
- cfg_cmd, jiffies_to_msecs(jiffies), sizeof(unifi_cfg_command_t));
- switch (cfg_cmd) {
- case UNIFI_CFG_POWER:
- r = unifi_cfg_power(priv, (unsigned char*)arg);
- break;
- case UNIFI_CFG_POWERSAVE:
- r = unifi_cfg_power_save(priv, (unsigned char*)arg);
- break;
- case UNIFI_CFG_POWERSUPPLY:
- r = unifi_cfg_power_supply(priv, (unsigned char*)arg);
- break;
- case UNIFI_CFG_FILTER:
- r = unifi_cfg_packet_filters(priv, (unsigned char*)arg);
- break;
- case UNIFI_CFG_GET:
- r = unifi_cfg_get_info(priv, (unsigned char*)arg);
- break;
- case UNIFI_CFG_WMM_QOSINFO:
- r = unifi_cfg_wmm_qos_info(priv, (unsigned char*)arg);
- break;
- case UNIFI_CFG_WMM_ADDTS:
- r = unifi_cfg_wmm_addts(priv, (unsigned char*)arg);
- break;
- case UNIFI_CFG_WMM_DELTS:
- r = unifi_cfg_wmm_delts(priv, (unsigned char*)arg);
- break;
- case UNIFI_CFG_STRICT_DRAFT_N:
- r = unifi_cfg_strict_draft_n(priv, (unsigned char*)arg);
- break;
- case UNIFI_CFG_ENABLE_OKC:
- r = unifi_cfg_enable_okc(priv, (unsigned char*)arg);
- break;
-#ifdef CSR_SUPPORT_SME
- case UNIFI_CFG_CORE_DUMP:
- CsrWifiRouterCtrlWifiOffIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,CSR_WIFI_SME_CONTROL_INDICATION_ERROR);
- unifi_trace(priv, UDBG2, "UNIFI_CFG_CORE_DUMP: sent wifi off indication\n");
- break;
-#endif
-#ifdef CSR_SUPPORT_WEXT_AP
- case UNIFI_CFG_SET_AP_CONFIG:
- r= unifi_cfg_set_ap_config(priv,(unsigned char*)arg);
- break;
-#endif
- default:
- unifi_error(priv, "UNIFI_CFG: Unknown Command (%d)\n", cfg_cmd);
- r = -EINVAL;
- goto out;
- }
-#endif
-
- break;
-
- case UNIFI_PUTEST:
- if (get_user(putest_cmd, (unifi_putest_command_t*)arg))
- {
- unifi_error(priv, "UNIFI_PUTEST: Failed to get the command\n");
- r = -EFAULT;
- goto out;
- }
-
- unifi_trace(priv, UDBG1, "UNIFI_PUTEST: Command is %s\n",
- trace_putest_cmdid(putest_cmd));
- switch (putest_cmd) {
- case UNIFI_PUTEST_START:
- r = unifi_putest_start(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_STOP:
- r = unifi_putest_stop(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_SET_SDIO_CLOCK:
- r = unifi_putest_set_sdio_clock(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_CMD52_READ:
- r = unifi_putest_cmd52_read(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_CMD52_BLOCK_READ:
- r = unifi_putest_cmd52_block_read(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_CMD52_WRITE:
- r = unifi_putest_cmd52_write(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_DL_FW:
- r = unifi_putest_dl_fw(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_DL_FW_BUFF:
- r = unifi_putest_dl_fw_buff(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_COREDUMP_PREPARE:
- r = unifi_putest_coredump_prepare(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_GP_READ16:
- r = unifi_putest_gp_read16(priv, (unsigned char*)arg);
- break;
- case UNIFI_PUTEST_GP_WRITE16:
- r = unifi_putest_gp_write16(priv, (unsigned char*)arg);
- break;
- default:
- unifi_error(priv, "UNIFI_PUTEST: Unknown Command (%d)\n", putest_cmd);
- r = -EINVAL;
- goto out;
- }
-
- break;
- case UNIFI_BUILD_TYPE:
- unifi_trace(priv, UDBG2, "UNIFI_BUILD_TYPE userspace=%s\n", build_type_to_string(*(unsigned char*)arg));
-#ifndef CSR_SUPPORT_WEXT_AP
- if (UNIFI_BUILD_AP == *(unsigned char*)arg)
- {
- unifi_error(priv, "Userspace has AP support, which is incompatible\n");
- }
-#endif
-
-#ifndef CSR_SUPPORT_WEXT
- if (UNIFI_BUILD_WEXT == *(unsigned char*)arg)
- {
- unifi_error(priv, "Userspace has WEXT support, which is incompatible\n");
- }
-#endif
- break;
- case UNIFI_INIT_HW:
- unifi_trace(priv, UDBG2, "UNIFI_INIT_HW.\n");
- priv->init_progress = UNIFI_INIT_NONE;
-
-#if defined(CSR_SUPPORT_WEXT) || defined (CSR_NATIVE_LINUX)
- /* At this point we are ready to start the SME. */
- r = sme_mgt_wifi_on(priv);
- if (r) {
- goto out;
- }
-#endif
-
- break;
-
- case UNIFI_INIT_NETDEV:
- {
- /* get the proper interfaceTagId */
- u16 interfaceTag=0;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- dev = priv->netdev[interfaceTag];
- unifi_trace(priv, UDBG2, "UNIFI_INIT_NETDEV.\n");
-
- if (copy_from_user((void*)dev->dev_addr, (void*)arg, 6)) {
- r = -EFAULT;
- goto out;
- }
-
- /* Attach the network device to the stack */
- if (!interfacePriv->netdev_registered)
- {
- r = uf_register_netdev(priv,interfaceTag);
- if (r) {
- unifi_error(priv, "Failed to register the network device.\n");
- goto out;
- }
- }
-
- /* Apply scheduled interrupt mode, if requested by module param */
- if (run_bh_once != -1) {
- unifi_set_interrupt_mode(priv->card, (u32)run_bh_once);
- }
-
- priv->init_progress = UNIFI_INIT_COMPLETED;
-
- /* Firmware initialisation is complete, so let the SDIO bus
- * clock be raised when convienent to the core.
- */
- unifi_request_max_sdio_clock(priv->card);
-
-#ifdef CSR_SUPPORT_WEXT
- /* Notify the Android wpa_supplicant that we are ready */
- wext_send_started_event(priv);
-#endif
-
- unifi_info(priv, "UniFi ready\n");
-
-#ifdef ANDROID_BUILD
- /* Release the wakelock */
- unifi_trace(priv, UDBG1, "netdev_init: release wake lock\n");
- wake_unlock(&unifi_sdio_wake_lock);
-#endif
-#ifdef CSR_NATIVE_SOFTMAC /* For softmac dev, force-enable the network interface rather than wait for a connected-ind */
- {
- struct net_device *dev = priv->netdev[interfaceTag];
-#ifdef CSR_SUPPORT_WEXT
- interfacePriv->wait_netdev_change = TRUE;
-#endif
- netif_carrier_on(dev);
- }
-#endif
- }
- break;
- case UNIFI_GET_INIT_STATUS:
- unifi_trace(priv, UDBG2, "UNIFI_GET_INIT_STATUS.\n");
- if (put_user(priv->init_progress, (int*)arg))
- {
- printk(KERN_ERR "UNIFI_GET_INIT_STATUS: Failed to copy to user\n");
- r = -EFAULT;
- goto out;
- }
- break;
-
- case UNIFI_KICK:
- unifi_trace(priv, UDBG4, "Kick UniFi\n");
- unifi_sdio_interrupt_handler(priv->card);
- break;
-
- case UNIFI_SET_DEBUG:
- unifi_debug = arg;
- unifi_trace(priv, UDBG4, "unifi_debug set to %d\n", unifi_debug);
- break;
-
- case UNIFI_SET_TRACE:
- /* no longer supported */
- r = -EINVAL;
- break;
-
-
- case UNIFI_SET_UDI_LOG_MASK:
- {
- unifiio_filter_t udi_filter;
- uint16_t *sig_ids_addr;
-#define UF_MAX_SIG_IDS 128 /* Impose a sensible limit */
-
- if (copy_from_user((void*)(&udi_filter), (void*)arg, sizeof(udi_filter))) {
- r = -EFAULT;
- goto out;
- }
- if ((udi_filter.action < UfSigFil_AllOn) ||
- (udi_filter.action > UfSigFil_SelectOff))
- {
- printk(KERN_WARNING
- "UNIFI_SET_UDI_LOG_MASK: Bad action value: %d\n",
- udi_filter.action);
- r = -EINVAL;
- goto out;
- }
- /* No signal list for "All" actions */
- if ((udi_filter.action == UfSigFil_AllOn) ||
- (udi_filter.action == UfSigFil_AllOff))
- {
- udi_filter.num_sig_ids = 0;
- }
-
- if (udi_filter.num_sig_ids > UF_MAX_SIG_IDS) {
- printk(KERN_WARNING
- "UNIFI_SET_UDI_LOG_MASK: too many signal ids (%d, max %d)\n",
- udi_filter.num_sig_ids, UF_MAX_SIG_IDS);
- r = -EINVAL;
- goto out;
- }
-
- /* Copy in signal id list if given */
- if (udi_filter.num_sig_ids > 0) {
- /* Preserve userspace address of sig_ids array */
- sig_ids_addr = udi_filter.sig_ids;
- /* Allocate kernel memory for sig_ids and copy to it */
- udi_filter.sig_ids =
- kmalloc(udi_filter.num_sig_ids * sizeof(uint16_t), GFP_KERNEL);
- if (!udi_filter.sig_ids) {
- r = -ENOMEM;
- goto out;
- }
- if (copy_from_user((void*)udi_filter.sig_ids,
- (void*)sig_ids_addr,
- udi_filter.num_sig_ids * sizeof(uint16_t)))
- {
- kfree(udi_filter.sig_ids);
- r = -EFAULT;
- goto out;
- }
- }
-
- udi_set_log_filter(pcli, &udi_filter);
-
- if (udi_filter.num_sig_ids > 0) {
- kfree(udi_filter.sig_ids);
- }
- }
- break;
-
- case UNIFI_SET_AMP_ENABLE:
- unifi_trace(priv, UDBG4, "UniFi Set AMP Enable\n");
- if (get_user(int_param, (int*)arg))
- {
- unifi_error(priv, "UNIFI_SET_AMP_ENABLE: Failed to copy from user\n");
- r = -EFAULT;
- goto out;
- }
-
- if (int_param) {
- priv->amp_client = pcli;
- } else {
- priv->amp_client = NULL;
- }
-
- int_param = 0;
- buf = (u8*)&int_param;
- buf[0] = UNIFI_SOFT_COMMAND_Q_LENGTH - 1;
- buf[1] = UNIFI_SOFT_TRAFFIC_Q_LENGTH - 1;
- if (copy_to_user((void*)arg, &int_param, sizeof(int))) {
- r = -EFAULT;
- goto out;
- }
- break;
-
- case UNIFI_SET_UDI_SNAP_MASK:
- {
- unifiio_snap_filter_t snap_filter;
-
- if (copy_from_user((void*)(&snap_filter), (void*)arg, sizeof(snap_filter))) {
- r = -EFAULT;
- goto out;
- }
-
- if (pcli->snap_filter.count) {
- pcli->snap_filter.count = 0;
- kfree(pcli->snap_filter.protocols);
- }
-
- if (snap_filter.count == 0) {
- break;
- }
-
- pcli->snap_filter.protocols = kmalloc(snap_filter.count * sizeof(u16), GFP_KERNEL);
- if (!pcli->snap_filter.protocols) {
- r = -ENOMEM;
- goto out;
- }
- if (copy_from_user((void*)pcli->snap_filter.protocols,
- (void*)snap_filter.protocols,
- snap_filter.count * sizeof(u16)))
- {
- kfree(pcli->snap_filter.protocols);
- r = -EFAULT;
- goto out;
- }
-
- pcli->snap_filter.count = snap_filter.count;
-
- }
- break;
-
- case UNIFI_SME_PRESENT:
- {
- u8 ind;
- unifi_trace(priv, UDBG4, "UniFi SME Present IOCTL.\n");
- if (copy_from_user((void*)(&int_param), (void*)arg, sizeof(int)))
- {
- printk(KERN_ERR "UNIFI_SME_PRESENT: Failed to copy from user\n");
- r = -EFAULT;
- goto out;
- }
-
- priv->sme_is_present = int_param;
- if (priv->sme_is_present == 1) {
- ind = CONFIG_SME_PRESENT;
- } else {
- ind = CONFIG_SME_NOT_PRESENT;
- }
- /* Send an indication to the helper app. */
- ul_log_config_ind(priv, &ind, sizeof(u8));
- }
- break;
-
- case UNIFI_CFG_PERIOD_TRAFFIC:
- {
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
- CsrWifiSmeCoexConfig coexConfig;
-#endif /* CSR_SUPPORT_SME && CSR_SUPPORT_WEXT */
- unifi_trace(priv, UDBG4, "UniFi Configure Periodic Traffic.\n");
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
- if (copy_from_user((void*)(&uchar_param), (void*)arg, sizeof(unsigned char))) {
- unifi_error(priv, "UNIFI_CFG_PERIOD_TRAFFIC: Failed to copy from user\n");
- r = -EFAULT;
- goto out;
- }
-
- if (uchar_param == 0) {
- r = sme_mgt_coex_config_get(priv, &coexConfig);
- if (r) {
- unifi_error(priv, "UNIFI_CFG_PERIOD_TRAFFIC: Get unifi_CoexInfoValue failed.\n");
- goto out;
- }
- if (copy_to_user((void*)(arg + 1),
- (void*)&coexConfig,
- sizeof(CsrWifiSmeCoexConfig))) {
- r = -EFAULT;
- goto out;
- }
- goto out;
- }
-
- if (copy_from_user((void*)(&coex_config), (void*)(arg + 1), sizeof(CsrWifiSmeCoexConfig)))
- {
- unifi_error(priv, "UNIFI_CFG_PERIOD_TRAFFIC: Failed to copy from user\n");
- r = -EFAULT;
- goto out;
- }
-
- coexConfig = coex_config;
- r = sme_mgt_coex_config_set(priv, &coexConfig);
- if (r) {
- unifi_error(priv, "UNIFI_CFG_PERIOD_TRAFFIC: Set unifi_CoexInfoValue failed.\n");
- goto out;
- }
-
-#endif /* CSR_SUPPORT_SME && CSR_SUPPORT_WEXT */
- break;
- }
- case UNIFI_CFG_UAPSD_TRAFFIC:
- unifi_trace(priv, UDBG4, "UniFi Configure U-APSD Mask.\n");
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
- if (copy_from_user((void*)(&uchar_param), (void*)arg, sizeof(unsigned char))) {
- unifi_error(priv, "UNIFI_CFG_UAPSD_TRAFFIC: Failed to copy from user\n");
- r = -EFAULT;
- goto out;
- }
- unifi_trace(priv, UDBG4, "New U-APSD Mask: 0x%x\n", uchar_param);
-#endif /* CSR_SUPPORT_SME && CSR_SUPPORT_WEXT */
- break;
-
-#ifndef UNIFI_DISABLE_COREDUMP
- case UNIFI_COREDUMP_GET_REG:
- unifi_trace(priv, UDBG4, "Mini-coredump data request\n");
- {
- unifiio_coredump_req_t dump_req; /* Public OS layer structure */
- unifi_coredump_req_t priv_req; /* Private HIP structure */
-
- if (copy_from_user((void*)(&dump_req), (void*)arg, sizeof(dump_req))) {
- r = -EFAULT;
- goto out;
- }
- memset(&priv_req, 0, sizeof(priv_req));
- priv_req.index = dump_req.index;
- priv_req.offset = dump_req.offset;
-
- /* Convert OS-layer's XAP memory space ID to HIP's ID in case they differ */
- switch (dump_req.space) {
- case UNIFIIO_COREDUMP_MAC_REG: priv_req.space = UNIFI_COREDUMP_MAC_REG; break;
- case UNIFIIO_COREDUMP_PHY_REG: priv_req.space = UNIFI_COREDUMP_PHY_REG; break;
- case UNIFIIO_COREDUMP_SH_DMEM: priv_req.space = UNIFI_COREDUMP_SH_DMEM; break;
- case UNIFIIO_COREDUMP_MAC_DMEM: priv_req.space = UNIFI_COREDUMP_MAC_DMEM; break;
- case UNIFIIO_COREDUMP_PHY_DMEM: priv_req.space = UNIFI_COREDUMP_PHY_DMEM; break;
- case UNIFIIO_COREDUMP_TRIGGER_MAGIC: priv_req.space = UNIFI_COREDUMP_TRIGGER_MAGIC; break;
- default:
- r = -EINVAL;
- goto out;
- }
-
- if (priv_req.space == UNIFI_COREDUMP_TRIGGER_MAGIC) {
- /* Force a coredump grab now */
- unifi_trace(priv, UDBG2, "UNIFI_COREDUMP_GET_REG: Force capture\n");
- csrResult = unifi_coredump_capture(priv->card, &priv_req);
- r = CsrHipResultToStatus(csrResult);
- unifi_trace(priv, UDBG5, "UNIFI_COREDUMP_GET_REG: status %d\n", r);
- } else {
- /* Retrieve the appropriate register entry */
- csrResult = unifi_coredump_get_value(priv->card, &priv_req);
- r = CsrHipResultToStatus(csrResult);
- if (r) {
- unifi_trace(priv, UDBG5, "UNIFI_COREDUMP_GET_REG: Status %d\n", r);
- goto out;
- }
- /* Update the OS-layer structure with values returned in the private */
- dump_req.value = priv_req.value;
- dump_req.timestamp = priv_req.timestamp;
- dump_req.requestor = priv_req.requestor;
- dump_req.serial = priv_req.serial;
- dump_req.chip_ver = priv_req.chip_ver;
- dump_req.fw_ver = priv_req.fw_ver;
- dump_req.drv_build = 0;
-
- unifi_trace(priv, UDBG6,
- "Dump: %d (seq %d): V:0x%04x (%d) @0x%02x:%04x = 0x%04x\n",
- dump_req.index, dump_req.serial,
- dump_req.chip_ver, dump_req.drv_build,
- dump_req.space, dump_req.offset, dump_req.value);
- }
- if (copy_to_user((void*)arg, (void*)&dump_req, sizeof(dump_req))) {
- r = -EFAULT;
- goto out;
- }
- }
- break;
-#endif
- default:
- r = -EINVAL;
- }
-
-out:
- return (long)r;
-} /* unifi_ioctl() */
-
-
-
-static unsigned int
-unifi_poll(struct file *filp, poll_table *wait)
-{
- ul_client_t *pcli = (ul_client_t*)filp->private_data;
- unsigned int mask = 0;
- int ready;
-
- ready = !list_empty(&pcli->udi_log);
-
- poll_wait(filp, &pcli->udi_wq, wait);
-
- if (ready) {
- mask |= POLLIN | POLLRDNORM; /* readable */
- }
-
- return mask;
-} /* unifi_poll() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * udi_set_log_filter
- *
- * Configure the bit mask that determines which signal primitives are
- * passed to the logging process.
- *
- * Arguments:
- * pcli Pointer to the client to configure.
- * udi_filter Pointer to a unifiio_filter_t containing instructions.
- *
- * Returns:
- * None.
- *
- * Notes:
- * SigGetFilterPos() returns a 32-bit value that contains an index and a
- * mask for accessing a signal_filter array. The top 16 bits specify an
- * index into a signal_filter, the bottom 16 bits specify a mask to
- * apply.
- * ---------------------------------------------------------------------------
- */
-static void
-udi_set_log_filter(ul_client_t *pcli, unifiio_filter_t *udi_filter)
-{
- u32 filter_pos;
- int i;
-
- if (udi_filter->action == UfSigFil_AllOn)
- {
- for (i = 0; i < SIG_FILTER_SIZE; i++) {
- pcli->signal_filter[i] = 0xFFFF;
- }
- }
- else if (udi_filter->action == UfSigFil_AllOff)
- {
- for (i = 0; i < SIG_FILTER_SIZE; i++) {
- pcli->signal_filter[i] = 0;
- }
- }
- else if (udi_filter->action == UfSigFil_SelectOn)
- {
- for (i = 0; i < udi_filter->num_sig_ids; i++) {
- filter_pos = SigGetFilterPos(udi_filter->sig_ids[i]);
- if (filter_pos == 0xFFFFFFFF)
- {
- printk(KERN_WARNING
- "Unrecognised signal id (0x%X) specifed in logging filter\n",
- udi_filter->sig_ids[i]);
- } else {
- pcli->signal_filter[filter_pos >> 16] |= (filter_pos & 0xFFFF);
- }
- }
- }
- else if (udi_filter->action == UfSigFil_SelectOff)
- {
- for (i = 0; i < udi_filter->num_sig_ids; i++) {
- filter_pos = SigGetFilterPos(udi_filter->sig_ids[i]);
- if (filter_pos == 0xFFFFFFFF)
- {
- printk(KERN_WARNING
- "Unrecognised signal id (0x%X) specifed in logging filter\n",
- udi_filter->sig_ids[i]);
- } else {
- pcli->signal_filter[filter_pos >> 16] &= ~(filter_pos & 0xFFFF);
- }
- }
- }
-
-} /* udi_set_log_filter() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * udi_log_event
- *
- * Callback function to be registered as the UDI hook callback.
- * Copies the signal content into a new udi_log_t struct and adds
- * it to the read queue for this UDI client.
- *
- * Arguments:
- * pcli A pointer to the client instance.
- * signal Pointer to the received signal.
- * signal_len Size of the signal structure in bytes.
- * bulkdata Pointers to any associated bulk data.
- * dir Direction of the signal. Zero means from host,
- * non-zero means to host.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-udi_log_event(ul_client_t *pcli,
- const u8 *signal, int signal_len,
- const bulk_data_param_t *bulkdata,
- int dir)
-{
- udi_log_t *logptr;
- u8 *p;
- int i;
- int total_len;
- udi_msg_t *msgptr;
- u32 filter_pos;
-#ifdef OMNICLI_LINUX_EXTRA_LOG
- static volatile unsigned int printk_cpu = UINT_MAX;
- unsigned long long t;
- unsigned long nanosec_rem;
- unsigned long n_1000;
-#endif
-
- /* Just a sanity check */
- if ((signal == NULL) || (signal_len <= 0)) {
- return;
- }
-
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
- /* When HIP offline signal logging is enabled, omnicli cannot run */
- if (log_hip_signals)
- {
- /* Add timestamp */
- if (log_hip_signals & UNIFI_LOG_HIP_SIGNALS_FILTER_TIMESTAMP)
- {
- int timestamp = jiffies_to_msecs(jiffies);
- unifi_debug_log_to_buf("T:");
- unifi_debug_log_to_buf("%04X%04X ", *(((u16*)&timestamp) + 1),
- *(u16*)&timestamp);
- }
-
- /* Add signal */
- unifi_debug_log_to_buf("S%s:%04X R:%04X D:%04X ",
- dir ? "T" : "F",
- *(u16*)signal,
- *(u16*)(signal + 2),
- *(u16*)(signal + 4));
- unifi_debug_hex_to_buf(signal + 6, signal_len - 6);
-
- /* Add bulk data (assume 1 bulk data per signal) */
- if ((log_hip_signals & UNIFI_LOG_HIP_SIGNALS_FILTER_BULKDATA) &&
- (bulkdata->d[0].data_length > 0))
- {
- unifi_debug_log_to_buf("\nD:");
- unifi_debug_hex_to_buf(bulkdata->d[0].os_data_ptr, bulkdata->d[0].data_length);
- }
- unifi_debug_log_to_buf("\n");
-
- return;
- }
-#endif
-
-#ifdef CSR_NATIVE_LINUX
- uf_native_process_udi_signal(pcli, signal, signal_len, bulkdata, dir);
-#endif
-
- /*
- * Apply the logging filter - only report signals that have their
- * bit set in the filter mask.
- */
- filter_pos = SigGetFilterPos(GET_SIGNAL_ID(signal));
-
- if ((filter_pos != 0xFFFFFFFF) &&
- ((pcli->signal_filter[filter_pos >> 16] & (filter_pos & 0xFFFF)) == 0))
- {
- /* Signal is not wanted by client */
- return;
- }
-
-
- /* Calculate the buffer we need to store signal plus bulk data */
- total_len = signal_len;
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
- total_len += bulkdata->d[i].data_length;
- }
-
- /* Allocate log structure plus actual signal. */
- logptr = kmalloc(sizeof(udi_log_t) + total_len, GFP_KERNEL);
-
- if (logptr == NULL) {
- printk(KERN_ERR
- "Failed to allocate %lu bytes for a UDI log record\n",
- (long unsigned int)(sizeof(udi_log_t) + total_len));
- return;
- }
-
- /* Fill in udi_log struct */
- INIT_LIST_HEAD(&logptr->q);
- msgptr = &logptr->msg;
- msgptr->length = sizeof(udi_msg_t) + total_len;
-#ifdef OMNICLI_LINUX_EXTRA_LOG
- t = cpu_clock(printk_cpu);
- nanosec_rem = do_div(t, 1000000000);
- n_1000 = nanosec_rem/1000;
- msgptr->timestamp = (t <<10 ) | ((unsigned long)(n_1000 >> 10) & 0x3ff);
-#else
- msgptr->timestamp = jiffies_to_msecs(jiffies);
-#endif
- msgptr->direction = dir;
- msgptr->signal_length = signal_len;
-
- /* Copy signal and bulk data to the log */
- p = (u8 *)(msgptr + 1);
- memcpy(p, signal, signal_len);
- p += signal_len;
-
- /* Append any bulk data */
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
- int len = bulkdata->d[i].data_length;
-
- /*
- * Len here might not be the same as the length in the bulk data slot.
- * The slot length will always be even, but len could be odd.
- */
- if (len > 0) {
- if (bulkdata->d[i].os_data_ptr) {
- memcpy(p, bulkdata->d[i].os_data_ptr, len);
- } else {
- memset(p, 0, len);
- }
- p += len;
- }
- }
-
- /* Add to tail of log queue */
- if (down_interruptible(&pcli->udi_sem)) {
- printk(KERN_WARNING "udi_log_event_q: Failed to get udi sem\n");
- kfree(logptr);
- return;
- }
- list_add_tail(&logptr->q, &pcli->udi_log);
- up(&pcli->udi_sem);
-
- /* Wake any waiting user process */
- wake_up_interruptible(&pcli->udi_wq);
-
-} /* udi_log_event() */
-
-#ifdef CSR_SME_USERSPACE
-int
-uf_sme_queue_message(unifi_priv_t *priv, u8 *buffer, int length)
-{
- udi_log_t *logptr;
- udi_msg_t *msgptr;
- u8 *p;
-
- /* Just a sanity check */
- if ((buffer == NULL) || (length <= 0)) {
- return -EINVAL;
- }
-
- /* Allocate log structure plus actual signal. */
- logptr = kmalloc(sizeof(udi_log_t) + length, GFP_ATOMIC);
- if (logptr == NULL) {
- unifi_error(priv, "Failed to allocate %d bytes for an SME message\n",
- sizeof(udi_log_t) + length);
- kfree(buffer);
- return -ENOMEM;
- }
-
- /* Fill in udi_log struct */
- INIT_LIST_HEAD(&logptr->q);
- msgptr = &logptr->msg;
- msgptr->length = sizeof(udi_msg_t) + length;
- msgptr->signal_length = length;
-
- /* Copy signal and bulk data to the log */
- p = (u8 *)(msgptr + 1);
- memcpy(p, buffer, length);
-
- /* Add to tail of log queue */
- down(&udi_mutex);
- if (priv->sme_cli == NULL) {
- kfree(logptr);
- kfree(buffer);
- up(&udi_mutex);
- unifi_info(priv, "Message for the SME dropped, SME has gone away\n");
- return 0;
- }
-
- down(&priv->sme_cli->udi_sem);
- list_add_tail(&logptr->q, &priv->sme_cli->udi_log);
- up(&priv->sme_cli->udi_sem);
-
- /* Wake any waiting user process */
- wake_up_interruptible(&priv->sme_cli->udi_wq);
- up(&udi_mutex);
-
- /* It is our responsibility to free the buffer allocated in build_packed_*() */
- kfree(buffer);
-
- return 0;
-
-} /* uf_sme_queue_message() */
-#endif
-
-/*
- ****************************************************************************
- *
- * Driver instantiation
- *
- ****************************************************************************
- */
-static const struct file_operations unifi_fops = {
- .owner = THIS_MODULE,
- .open = unifi_open,
- .release = unifi_release,
- .read = unifi_read,
- .write = unifi_write,
- .unlocked_ioctl = unifi_ioctl,
- .poll = unifi_poll,
-};
-
-static dev_t unifi_first_devno;
-static struct class *unifi_class;
-
-
-int uf_create_device_nodes(unifi_priv_t *priv, int bus_id)
-{
- dev_t devno;
- int r;
-
- cdev_init(&priv->unifi_cdev, &unifi_fops);
-
- /* cdev_init() should set the cdev owner, but it does not */
- priv->unifi_cdev.owner = THIS_MODULE;
-
- devno = MKDEV(MAJOR(unifi_first_devno),
- MINOR(unifi_first_devno) + (bus_id * 2));
- r = cdev_add(&priv->unifi_cdev, devno, 1);
- if (r) {
- return r;
- }
-
-#ifdef SDIO_EXPORTS_STRUCT_DEVICE
- if (!device_create(unifi_class, priv->unifi_device,
- devno, priv, "unifi%d", bus_id)) {
-#else
- priv->unifi_device = device_create(unifi_class, NULL,
- devno, priv, "unifi%d", bus_id);
- if (priv->unifi_device == NULL) {
-#endif /* SDIO_EXPORTS_STRUCT_DEVICE */
-
- cdev_del(&priv->unifi_cdev);
- return -EINVAL;
- }
-
- cdev_init(&priv->unifiudi_cdev, &unifi_fops);
-
- /* cdev_init() should set the cdev owner, but it does not */
- priv->unifiudi_cdev.owner = THIS_MODULE;
-
- devno = MKDEV(MAJOR(unifi_first_devno),
- MINOR(unifi_first_devno) + (bus_id * 2) + 1);
- r = cdev_add(&priv->unifiudi_cdev, devno, 1);
- if (r) {
- device_destroy(unifi_class, priv->unifi_cdev.dev);
- cdev_del(&priv->unifi_cdev);
- return r;
- }
-
- if (!device_create(unifi_class,
-#ifdef SDIO_EXPORTS_STRUCT_DEVICE
- priv->unifi_device,
-#else
- NULL,
-#endif /* SDIO_EXPORTS_STRUCT_DEVICE */
- devno, priv, "unifiudi%d", bus_id)) {
- device_destroy(unifi_class, priv->unifi_cdev.dev);
- cdev_del(&priv->unifiudi_cdev);
- cdev_del(&priv->unifi_cdev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-void uf_destroy_device_nodes(unifi_priv_t *priv)
-{
- device_destroy(unifi_class, priv->unifiudi_cdev.dev);
- device_destroy(unifi_class, priv->unifi_cdev.dev);
- cdev_del(&priv->unifiudi_cdev);
- cdev_del(&priv->unifi_cdev);
-}
-
-
-
-/*
- * ----------------------------------------------------------------
- * uf_create_debug_device
- *
- * Allocates device numbers for unifi character device nodes
- * and creates a unifi class in sysfs
- *
- * Arguments:
- * fops Pointer to the char device operations structure.
- *
- * Returns:
- * 0 on success, -ve error code on error.
- * ----------------------------------------------------------------
- */
-static int
-uf_create_debug_device(const struct file_operations *fops)
-{
- int ret;
-
- /* Allocate two device numbers for each device. */
- ret = alloc_chrdev_region(&unifi_first_devno, 0, MAX_UNIFI_DEVS*2, UNIFI_NAME);
- if (ret) {
- unifi_error(NULL, "Failed to add alloc dev numbers: %d\n", ret);
- return ret;
- }
-
- /* Create a UniFi class */
- unifi_class = class_create(THIS_MODULE, UNIFI_NAME);
- if (IS_ERR(unifi_class)) {
- unifi_error(NULL, "Failed to create UniFi class\n");
-
- /* Release device numbers */
- unregister_chrdev_region(unifi_first_devno, MAX_UNIFI_DEVS*2);
- unifi_first_devno = 0;
- return -EINVAL;
- }
-
- return 0;
-} /* uf_create_debug_device() */
-
-
-/*
- * ----------------------------------------------------------------
- * uf_remove_debug_device
- *
- * Destroys the unifi class and releases the allocated
- * device numbers for unifi character device nodes.
- *
- * Arguments:
- *
- * Returns:
- * ----------------------------------------------------------------
- */
-static void
-uf_remove_debug_device(void)
-{
- /* Destroy the UniFi class */
- class_destroy(unifi_class);
-
- /* Release device numbers */
- unregister_chrdev_region(unifi_first_devno, MAX_UNIFI_DEVS*2);
- unifi_first_devno = 0;
-
-} /* uf_remove_debug_device() */
-
-
-/*
- * ---------------------------------------------------------------------------
- *
- * Module loading.
- *
- * ---------------------------------------------------------------------------
- */
-int __init
-unifi_load(void)
-{
- int r;
-
- printk("UniFi SDIO Driver: %s %s %s\n",
- CSR_WIFI_VERSION,
- __DATE__, __TIME__);
-
-#ifdef CSR_SME_USERSPACE
-#ifdef CSR_SUPPORT_WEXT
- printk("CSR SME with WEXT support\n");
-#else
- printk("CSR SME no WEXT support\n");
-#endif /* CSR_SUPPORT_WEXT */
-#endif /* CSR_SME_USERSPACE */
-
-#ifdef CSR_NATIVE_LINUX
-#ifdef CSR_SUPPORT_WEXT
-#error WEXT unsupported in the native driver
-#endif
- printk("CSR native no WEXT support\n");
-#endif
-#ifdef CSR_WIFI_SPLIT_PATCH
- printk("Split patch support\n");
-#endif
- printk("Kernel %d.%d.%d\n",
- ((LINUX_VERSION_CODE) >> 16) & 0xff,
- ((LINUX_VERSION_CODE) >> 8) & 0xff,
- (LINUX_VERSION_CODE) & 0xff);
- /*
- * Instantiate the /dev/unifi* device nodes.
- * We must do this before registering with the SDIO driver because it
- * will immediately call the "insert" callback if the card is
- * already present.
- */
- r = uf_create_debug_device(&unifi_fops);
- if (r) {
- return r;
- }
-
- /* Now register with the SDIO driver */
- r = uf_sdio_load();
- if (r) {
- uf_remove_debug_device();
- return r;
- }
-
- if (sdio_block_size > -1) {
- unifi_info(NULL, "sdio_block_size %d\n", sdio_block_size);
- }
-
- if (sdio_byte_mode) {
- unifi_info(NULL, "sdio_byte_mode\n");
- }
-
- if (disable_power_control) {
- unifi_info(NULL, "disable_power_control\n");
- }
-
- if (disable_hw_reset) {
- unifi_info(NULL, "disable_hw_reset\n");
- }
-
- if (enable_wol) {
- unifi_info(NULL, "enable_wol %d\n", enable_wol);
- }
-
- if (run_bh_once != -1) {
- unifi_info(NULL, "run_bh_once %d\n", run_bh_once);
- }
-
- return 0;
-} /* unifi_load() */
-
-
-void __exit
-unifi_unload(void)
-{
- /* The SDIO remove hook will call unifi_disconnect(). */
- uf_sdio_unload();
-
- uf_remove_debug_device();
-
-} /* unifi_unload() */
-
-module_init(unifi_load);
-module_exit(unifi_unload);
-
-MODULE_DESCRIPTION("UniFi Device driver");
-MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/staging/csr/firmware.c b/drivers/staging/csr/firmware.c
deleted file mode 100644
index b42a4d6..0000000
--- a/drivers/staging/csr/firmware.c
+++ /dev/null
@@ -1,396 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: firmware.c
- *
- * PURPOSE:
- * Implements the f/w related HIP core lib API.
- * It is part of the porting exercise in Linux.
- *
- * Also, it contains example code for reading the loader and f/w files
- * from the userspace and starting the SME in Linux.
- *
- * Copyright (C) 2005-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/kmod.h>
-#include <linux/vmalloc.h>
-#include <linux/firmware.h>
-#include <asm/uaccess.h>
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_unifi_udi.h"
-#include "unifiio.h"
-#include "unifi_priv.h"
-
-/*
- * ---------------------------------------------------------------------------
- *
- * F/W download. Part of the HIP core API
- *
- * ---------------------------------------------------------------------------
- */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_fw_read_start
- *
- * Returns a structure to be passed in unifi_fw_read().
- * This structure is an OS specific description of the f/w file.
- * In the linux implementation it is a buffer with the f/w and its' length.
- * The HIP driver calls this functions to request for the loader or
- * the firmware file.
- * The structure pointer can be freed when unifi_fw_read_stop() is called.
- *
- * Arguments:
- * ospriv Pointer to driver context.
- * is_fw Type of firmware to retrieve
- * info Versions information. Can be used to determine
- * the appropriate f/w file to load.
- *
- * Returns:
- * O on success, non-zero otherwise.
- *
- * ---------------------------------------------------------------------------
- */
-void*
-unifi_fw_read_start(void *ospriv, s8 is_fw, const card_info_t *info)
-{
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
- CSR_UNUSED(info);
-
- if (is_fw == UNIFI_FW_STA) {
- /* F/w may have been released after a previous successful download. */
- if (priv->fw_sta.dl_data == NULL) {
- unifi_trace(priv, UDBG2, "Attempt reload of sta f/w\n");
- uf_request_firmware_files(priv, UNIFI_FW_STA);
- }
- /* Set up callback struct for readfunc() */
- if (priv->fw_sta.dl_data != NULL) {
- return &priv->fw_sta;
- }
-
- } else {
- unifi_error(priv, "downloading firmware... unknown request: %d\n", is_fw);
- }
-
- return NULL;
-} /* unifi_fw_read_start() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_fw_read_stop
- *
- * Called when the HIP driver has finished using the loader or
- * the firmware file.
- * The firmware buffer may be released now.
- *
- * Arguments:
- * ospriv Pointer to driver context.
- * dlpriv The pointer returned by unifi_fw_read_start()
- *
- * ---------------------------------------------------------------------------
- */
-void
-unifi_fw_read_stop(void *ospriv, void *dlpriv)
-{
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
- struct dlpriv *dl_struct = (struct dlpriv *)dlpriv;
-
- if (dl_struct != NULL) {
- if (dl_struct->dl_data != NULL) {
- unifi_trace(priv, UDBG2, "Release f/w buffer %p, %d bytes\n",
- dl_struct->dl_data, dl_struct->dl_len);
- }
- uf_release_firmware(priv, dl_struct);
- }
-
-} /* unifi_fw_read_stop() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_fw_open_buffer
- *
- * Returns a handle for a buffer dynamically allocated by the driver,
- * e.g. into which a firmware file may have been converted from another format
- * which is the case with some production test images.
- *
- * The handle may then be used by unifi_fw_read() to access the contents of
- * the buffer.
- *
- * Arguments:
- * ospriv Pointer to driver context.
- * fwbuf Buffer containing firmware image
- * len Length of buffer in bytes
- *
- * Returns
- * Handle for buffer, or NULL on error
- * ---------------------------------------------------------------------------
- */
-void *
-unifi_fw_open_buffer(void *ospriv, void *fwbuf, u32 len)
-{
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
-
- if (fwbuf == NULL) {
- return NULL;
- }
- priv->fw_conv.dl_data = fwbuf;
- priv->fw_conv.dl_len = len;
- priv->fw_conv.fw_desc = NULL; /* No OS f/w resource is associated */
-
- return &priv->fw_conv;
-}
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_fw_close_buffer
- *
- * Releases any handle for a buffer dynamically allocated by the driver,
- * e.g. into which a firmware file may have been converted from another format
- * which is the case with some production test images.
- *
- *
- * Arguments:
- * ospriv Pointer to driver context.
- * fwbuf Buffer containing firmware image
- *
- * Returns
- * Handle for buffer, or NULL on error
- * ---------------------------------------------------------------------------
- */
-void unifi_fw_close_buffer(void *ospriv, void *fwbuf)
-{
-}
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_fw_read
- *
- * The HIP driver calls this function to ask for a part of the loader or
- * the firmware file.
- *
- * Arguments:
- * ospriv Pointer to driver context.
- * arg The pointer returned by unifi_fw_read_start().
- * offset The offset in the file to return from.
- * buf A buffer to store the requested data.
- * len The size of the buf and the size of the requested data.
- *
- * Returns
- * The number of bytes read from the firmware image, or -ve on error
- * ---------------------------------------------------------------------------
- */
-s32
-unifi_fw_read(void *ospriv, void *arg, u32 offset, void *buf, u32 len)
-{
- const struct dlpriv *dlpriv = arg;
-
- if (offset >= dlpriv->dl_len) {
- /* at end of file */
- return 0;
- }
-
- if ((offset + len) > dlpriv->dl_len) {
- /* attempt to read past end of file */
- return -1;
- }
-
- memcpy(buf, dlpriv->dl_data+offset, len);
-
- return len;
-
-} /* unifi_fw_read() */
-
-
-
-
-#define UNIFIHELPER_INIT_MODE_SMEUSER 2
-#define UNIFIHELPER_INIT_MODE_NATIVE 1
-
-/*
- * ---------------------------------------------------------------------------
- * uf_run_unifihelper
- *
- * Ask userspace to send us firmware for download by running
- * '/usr/sbin/unififw'.
- * The same script starts the SME userspace application.
- * Derived from net_run_sbin_hotplug().
- *
- * Arguments:
- * priv Pointer to OS private struct.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-int
-uf_run_unifihelper(unifi_priv_t *priv)
-{
-#ifdef ANDROID_BUILD
- char *prog = "/system/bin/unififw";
-#else
- char *prog = "/usr/sbin/unififw";
-#endif /* ANDROID_BUILD */
-
- char *argv[6], *envp[4];
- char inst_str[8];
- char init_mode[8];
- int i, r;
-
-#if (defined CSR_SME_USERSPACE) && (!defined CSR_SUPPORT_WEXT)
- unifi_trace(priv, UDBG1, "SME userspace build: run unifi_helper manually\n");
- return 0;
-#endif
-
- unifi_trace(priv, UDBG1, "starting %s\n", prog);
-
- snprintf(inst_str, 8, "%d", priv->instance);
-#if (defined CSR_SME_USERSPACE)
- snprintf(init_mode, 8, "%d", UNIFIHELPER_INIT_MODE_SMEUSER);
-#else
- snprintf(init_mode, 8, "%d", UNIFIHELPER_INIT_MODE_NATIVE);
-#endif /* CSR_SME_USERSPACE */
-
- i = 0;
- argv[i++] = prog;
- argv[i++] = inst_str;
- argv[i++] = init_mode;
- argv[i++] = 0;
- argv[i] = 0;
- /* Don't add more args without making argv bigger */
-
- /* minimal command environment */
- i = 0;
- envp[i++] = "HOME=/";
- envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
- envp[i] = 0;
- /* Don't add more without making envp bigger */
-
- unifi_trace(priv, UDBG2, "running %s %s %s\n", argv[0], argv[1], argv[2]);
-
- r = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
-
- return r;
-} /* uf_run_unifihelper() */
-
-#ifdef CSR_WIFI_SPLIT_PATCH
-static u8 is_ap_mode(unifi_priv_t *priv)
-{
- if (priv == NULL || priv->interfacePriv[0] == NULL)
- {
- return FALSE;
- }
-
- /* Test for mode requiring AP patch */
- return(CSR_WIFI_HIP_IS_AP_FW(priv->interfacePriv[0]->interfaceMode));
-}
-#endif
-
-/*
- * ---------------------------------------------------------------------------
- * uf_request_firmware_files
- *
- * Get the firmware files from userspace.
- *
- * Arguments:
- * priv Pointer to OS private struct.
- * is_fw type of firmware to load (UNIFI_FW_STA/LOADER)
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-int uf_request_firmware_files(unifi_priv_t *priv, int is_fw)
-{
- /* uses the default method to get the firmware */
- const struct firmware *fw_entry;
- int postfix;
-#define UNIFI_MAX_FW_PATH_LEN 32
- char fw_name[UNIFI_MAX_FW_PATH_LEN];
- int r;
-
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
- if (priv->mib_data.length) {
- vfree(priv->mib_data.data);
- priv->mib_data.data = NULL;
- priv->mib_data.length = 0;
- }
-#endif /* CSR_SUPPORT_SME && CSR_SUPPORT_WEXT*/
-
- postfix = priv->instance;
-
- if (is_fw == UNIFI_FW_STA) {
- /* Free kernel buffer and reload */
- uf_release_firmware(priv, &priv->fw_sta);
-#ifdef CSR_WIFI_SPLIT_PATCH
- scnprintf(fw_name, UNIFI_MAX_FW_PATH_LEN, "unifi-sdio-%d/%s",
- postfix, (is_ap_mode(priv) ? "ap.xbv" : "staonly.xbv") );
-#else
- scnprintf(fw_name, UNIFI_MAX_FW_PATH_LEN, "unifi-sdio-%d/%s",
- postfix, "sta.xbv" );
-#endif
- r = request_firmware(&fw_entry, fw_name, priv->unifi_device);
- if (r == 0) {
- priv->fw_sta.dl_data = fw_entry->data;
- priv->fw_sta.dl_len = fw_entry->size;
- priv->fw_sta.fw_desc = (void *)fw_entry;
- } else {
- unifi_trace(priv, UDBG2, "Firmware file not available\n");
- }
- }
-
- return 0;
-
-} /* uf_request_firmware_files() */
-
-/*
- * ---------------------------------------------------------------------------
- * uf_release_firmware_files
- *
- * Release all buffers used to store firmware files
- *
- * Arguments:
- * priv Pointer to OS private struct.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-int uf_release_firmware_files(unifi_priv_t *priv)
-{
- uf_release_firmware(priv, &priv->fw_sta);
-
- return 0;
-}
-
-/*
- * ---------------------------------------------------------------------------
- * uf_release_firmware
- *
- * Release specific buffer used to store firmware
- *
- * Arguments:
- * priv Pointer to OS private struct.
- * to_free Pointer to specific buffer to release
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-int uf_release_firmware(unifi_priv_t *priv, struct dlpriv *to_free)
-{
- if (to_free != NULL) {
- release_firmware((const struct firmware *)to_free->fw_desc);
- to_free->fw_desc = NULL;
- to_free->dl_data = NULL;
- to_free->dl_len = 0;
- }
- return 0;
-}
diff --git a/drivers/staging/csr/inet.c b/drivers/staging/csr/inet.c
deleted file mode 100644
index b3ef818..0000000
--- a/drivers/staging/csr/inet.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: inet.c
- *
- * PURPOSE:
- * Routines related to IP address changes.
- * Optional part of the porting exercise. It uses system network
- * handlers to obtain the UniFi IP address and pass it to the SME
- * using the unifi_sys_ip_configured_ind().
- *
- * Copyright (C) 2008-2009 Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/inetdevice.h>
-#include <linux/notifier.h>
-
-#include "unifi_priv.h"
-#include "csr_wifi_hip_conversions.h"
-
-/*
- * The inet notifier is global and not per-netdev. To avoid having a
- * notifier registered when there are no unifi devices present, it's
- * registered after the first unifi network device is registered, and
- * unregistered when the last unifi network device is unregistered.
- */
-
-static atomic_t inet_notif_refs = ATOMIC_INIT(0);
-
-static int uf_inetaddr_event(struct notifier_block *notif, unsigned long event, void *ifa)
-{
- struct net_device *ndev;
- unifi_priv_t *priv;
- struct in_ifaddr *if_addr;
- netInterface_priv_t *InterfacePriv = (netInterface_priv_t *)NULL;
-
- if (!ifa || !((struct in_ifaddr *)ifa)->ifa_dev) {
- unifi_trace(NULL, UDBG1, "uf_inetaddr_event (%lu) ifa=%p\n", event, ifa);
- return NOTIFY_DONE;
- }
-
- ndev = ((struct in_ifaddr *)ifa)->ifa_dev->dev;
- InterfacePriv = (netInterface_priv_t*) netdev_priv(ndev);
-
- /* As the notifier is global, the call may be for a non-UniFi netdev.
- * Therefore check the netdev_priv to make sure it's a known UniFi one.
- */
- if (uf_find_netdev_priv(InterfacePriv) == -1) {
- unifi_trace(NULL, UDBG1, "uf_inetaddr_event (%lu) ndev=%p, other netdev_priv=%p\n",
- event, ndev, InterfacePriv);
- return NOTIFY_DONE;
- }
-
- if (!InterfacePriv->privPtr) {
- unifi_error(NULL, "uf_inetaddr_event null priv (%lu) ndev=%p, InterfacePriv=%p\n",
- event, ndev, InterfacePriv);
- return NOTIFY_DONE;
- }
-
- priv = InterfacePriv->privPtr;
- if_addr = (struct in_ifaddr *)ifa;
-
- /* If this event is for a UniFi device, notify the SME that an IP
- * address has been added or removed. */
- if (uf_find_priv(priv) != -1) {
- switch (event) {
- case NETDEV_UP:
- unifi_info(priv, "IP address assigned for %s\n", priv->netdev[InterfacePriv->InterfaceTag]->name);
- priv->sta_ip_address = if_addr->ifa_address;
-#ifdef CSR_SUPPORT_WEXT
- sme_mgt_packet_filter_set(priv);
-#endif
- break;
- case NETDEV_DOWN:
- unifi_info(priv, "IP address removed for %s\n", priv->netdev[InterfacePriv->InterfaceTag]->name);
- priv->sta_ip_address = 0xFFFFFFFF;
-#ifdef CSR_SUPPORT_WEXT
- sme_mgt_packet_filter_set(priv);
-#endif
- break;
- }
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block uf_inetaddr_notifier = {
- .notifier_call = uf_inetaddr_event,
-};
-
-void uf_register_inet_notifier(void)
-{
- if (atomic_inc_return(&inet_notif_refs) == 1)
- register_inetaddr_notifier(&uf_inetaddr_notifier);
-}
-
-void uf_unregister_inet_notifier(void)
-{
- if (atomic_dec_return(&inet_notif_refs) == 0)
- unregister_inetaddr_notifier(&uf_inetaddr_notifier);
-}
diff --git a/drivers/staging/csr/init_hw.c b/drivers/staging/csr/init_hw.c
deleted file mode 100644
index 3b8a4ba..0000000
--- a/drivers/staging/csr/init_hw.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: init_hw.c
- *
- * PURPOSE:
- * Use the HIP core lib to initialise the UniFi chip.
- * It is part of the porting exercise in Linux.
- *
- * Copyright (C) 2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include "csr_wifi_hip_unifi.h"
-#include "unifi_priv.h"
-
-
-#define MAX_INIT_ATTEMPTS 4
-
-extern int led_mask;
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_init_hw
- *
- * Resets hardware, downloads and initialises f/w.
- * This function demonstrates how to use the HIP core lib API
- * to implement the SME unifi_sys_wifi_on_req() part of the SYS API.
- *
- * In a simple implementation, all this function needs to do is call
- * unifi_init_card() and then unifi_card_info().
- * In the Linux implementation, it will retry to initialise UniFi or
- * try to debug the reasons if unifi_init_card() returns an error.
- *
- * Arguments:
- * ospriv Pointer to OS driver structure for the device.
- *
- * Returns:
- * O on success, non-zero otherwise.
- *
- * ---------------------------------------------------------------------------
- */
-int
-uf_init_hw(unifi_priv_t *priv)
-{
- int attempts = 0;
- int priv_instance;
- CsrResult csrResult = CSR_RESULT_FAILURE;
-
- priv_instance = uf_find_priv(priv);
- if (priv_instance == -1) {
- unifi_warning(priv, "uf_init_hw: Unknown priv instance, will use fw_init[0]\n");
- priv_instance = 0;
- }
-
- while (1) {
- if (attempts > MAX_INIT_ATTEMPTS) {
- unifi_error(priv, "Failed to initialise UniFi after %d attempts, "
- "giving up.\n",
- attempts);
- break;
- }
- attempts++;
-
- unifi_info(priv, "Initialising UniFi, attempt %d\n", attempts);
-
- if (fw_init[priv_instance] > 0) {
- unifi_notice(priv, "f/w init prevented by module parameter\n");
- break;
- } else if (fw_init[priv_instance] == 0) {
- fw_init[priv_instance] ++;
- }
-
- /*
- * Initialise driver core. This will perform a reset of UniFi
- * internals, but not the SDIO CCCR.
- */
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_init_card(priv->card, led_mask);
- CsrSdioRelease(priv->sdio);
-
- if (csrResult == CSR_WIFI_HIP_RESULT_NO_DEVICE) {
- return CsrHipResultToStatus(csrResult);
- }
- if (csrResult == CSR_WIFI_HIP_RESULT_NOT_FOUND) {
- unifi_error(priv, "Firmware file required, but not found.\n");
- return CsrHipResultToStatus(csrResult);
- }
- if (csrResult != CSR_RESULT_SUCCESS) {
- /* failed. Reset h/w and try again */
- unifi_error(priv, "Failed to initialise UniFi chip.\n");
- continue;
- }
-
- /* Get the version information from the lib_hip */
- unifi_card_info(priv->card, &priv->card_info);
-
- return CsrHipResultToStatus(csrResult);
- }
-
- return CsrHipResultToStatus(csrResult);
-
-} /* uf_init_hw */
-
-
diff --git a/drivers/staging/csr/io.c b/drivers/staging/csr/io.c
deleted file mode 100644
index fe4a7ba..0000000
--- a/drivers/staging/csr/io.c
+++ /dev/null
@@ -1,1098 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: io.c
- *
- * PURPOSE:
- * This file contains routines that the SDIO driver can call when a
- * UniFi card is first inserted (or detected) and removed.
- *
- * When used with sdioemb, the udev scripts (at least on Ubuntu) don't
- * recognise a UniFi being added to the system. This is because sdioemb
- * does not register itself as a device_driver, it uses it's own code
- * to handle insert and remove.
- * To have Ubuntu recognise UniFi, edit /etc/udev/rules.d/85-ifupdown.rules
- * to change this line:
- * SUBSYSTEM=="net", DRIVERS=="?*", GOTO="net_start"
- * to these:
- * #SUBSYSTEM=="net", DRIVERS=="?*", GOTO="net_start"
- * SUBSYSTEM=="net", GOTO="net_start"
- *
- * Then you can add a stanza to /etc/network/interfaces like this:
- * auto eth1
- * iface eth1 inet dhcp
- * wpa-conf /etc/wpa_supplicant.conf
- * This will then automatically associate when a car dis inserted.
- *
- * Copyright (C) 2006-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_unifiversion.h"
-#include "csr_wifi_hip_unifi_udi.h" /* for unifi_print_status() */
-#include "unifiio.h"
-#include "unifi_priv.h"
-
-/*
- * Array of pointers to context structs for unifi devices that are present.
- * The index in the array corresponds to the wlan interface number
- * (if "wlan*" is used). If "eth*" is used, the eth* numbers are allocated
- * after any Ethernet cards.
- *
- * The Arasan PCI-SDIO controller card supported by this driver has 2 slots,
- * hence a max of 2 devices.
- */
-static unifi_priv_t *Unifi_instances[MAX_UNIFI_DEVS];
-
-/* Array of pointers to netdev objects used by the UniFi driver, as there
- * are now many per instance. This is used to determine which netdev events
- * are for UniFi as opposed to other net interfaces.
- */
-static netInterface_priv_t *Unifi_netdev_instances[MAX_UNIFI_DEVS * CSR_WIFI_NUM_INTERFACES];
-
-/*
- * Array to hold the status of each unifi device in each slot.
- * We only process an insert event when In_use[] for the slot is
- * UNIFI_DEV_NOT_IN_USE. Otherwise, it means that the slot is in use or
- * we are in the middle of a cleanup (the action on unplug).
- */
-#define UNIFI_DEV_NOT_IN_USE 0
-#define UNIFI_DEV_IN_USE 1
-#define UNIFI_DEV_CLEANUP 2
-static int In_use[MAX_UNIFI_DEVS];
-/*
- * Mutex to prevent UDI clients to open the character device before the priv
- * is created and initialised.
- */
-DEFINE_SEMAPHORE(Unifi_instance_mutex);
-/*
- * When the device is removed, unregister waits on Unifi_cleanup_wq
- * until all the UDI clients release the character device.
- */
-DECLARE_WAIT_QUEUE_HEAD(Unifi_cleanup_wq);
-
-#ifdef CONFIG_PROC_FS
-/*
- * seq_file wrappers for procfile show routines.
- */
-static int uf_proc_show(struct seq_file *m, void *v);
-
-#define UNIFI_DEBUG_TXT_BUFFER (8 * 1024)
-
-static int uf_proc_open(struct inode *inode, struct file *file)
-{
- return single_open_size(file, uf_proc_show, PDE_DATA(inode),
- UNIFI_DEBUG_TXT_BUFFER);
-}
-
-static const struct file_operations uf_proc_fops = {
- .open = uf_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-#endif /* CONFIG_PROC_FS */
-
-#ifdef CSR_WIFI_RX_PATH_SPLIT
-
-static CsrResult signal_buffer_init(unifi_priv_t * priv, int size)
-{
- int i;
-
- priv->rxSignalBuffer.writePointer =
- priv->rxSignalBuffer.readPointer = 0;
- priv->rxSignalBuffer.size = size;
- /* Allocating Memory for Signal primitive pointer */
- for(i=0; i<size; i++)
- {
- priv->rxSignalBuffer.rx_buff[i].sig_len=0;
- priv->rxSignalBuffer.rx_buff[i].bufptr = kmalloc(UNIFI_PACKED_SIGBUF_SIZE, GFP_KERNEL);
- if (priv->rxSignalBuffer.rx_buff[i].bufptr == NULL)
- {
- int j;
- unifi_error(priv,"signal_buffer_init:Failed to Allocate shared memory for T-H signals \n");
- for(j=0;j<i;j++)
- {
- priv->rxSignalBuffer.rx_buff[j].sig_len=0;
- kfree(priv->rxSignalBuffer.rx_buff[j].bufptr);
- priv->rxSignalBuffer.rx_buff[j].bufptr = NULL;
- }
- return -1;
- }
- }
- return 0;
-}
-
-
-static void signal_buffer_free(unifi_priv_t * priv, int size)
-{
- int i;
-
- for(i=0; i<size; i++)
- {
- priv->rxSignalBuffer.rx_buff[i].sig_len=0;
- kfree(priv->rxSignalBuffer.rx_buff[i].bufptr);
- priv->rxSignalBuffer.rx_buff[i].bufptr = NULL;
- }
-}
-#endif
-/*
- * ---------------------------------------------------------------------------
- * uf_register_netdev
- *
- * Registers the network interface, installes the qdisc,
- * and registers the inet handler.
- * In the porting exercise, register the driver to the network
- * stack if necessary.
- *
- * Arguments:
- * priv Pointer to driver context.
- *
- * Returns:
- * O on success, non-zero otherwise.
- *
- * Notes:
- * We will only unregister when the card is ejected, so we must
- * only do it once.
- * ---------------------------------------------------------------------------
- */
-int
-uf_register_netdev(unifi_priv_t *priv, int interfaceTag)
-{
- int r;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "uf_register_netdev bad interfaceTag\n");
- return -EINVAL;
- }
-
- /*
- * Allocates a device number and registers device with the network
- * stack.
- */
- unifi_trace(priv, UDBG5, "uf_register_netdev: netdev %d - 0x%p\n",
- interfaceTag, priv->netdev[interfaceTag]);
- r = register_netdev(priv->netdev[interfaceTag]);
- if (r) {
- unifi_error(priv, "Failed to register net device\n");
- return -EINVAL;
- }
-
- /* The device is registed */
- interfacePriv->netdev_registered = 1;
-
-#ifdef CSR_SUPPORT_SME
- /*
- * Register the inet handler; it notifies us for changes in the IP address.
- */
- uf_register_inet_notifier();
-#endif /* CSR_SUPPORT_SME */
-
- unifi_notice(priv, "unifi%d is %s\n",
- priv->instance, priv->netdev[interfaceTag]->name);
-
- return 0;
-} /* uf_register_netdev */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_unregister_netdev
- *
- * Unregisters the network interface and the inet handler.
- *
- * Arguments:
- * priv Pointer to driver context.
- *
- * Returns:
- * None.
- *
- * ---------------------------------------------------------------------------
- */
-void
-uf_unregister_netdev(unifi_priv_t *priv)
-{
- int i=0;
-
-#ifdef CSR_SUPPORT_SME
- /* Unregister the inet handler... */
- uf_unregister_inet_notifier();
-#endif /* CSR_SUPPORT_SME */
-
- for (i=0; i<CSR_WIFI_NUM_INTERFACES; i++) {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[i];
- if (interfacePriv->netdev_registered) {
- unifi_trace(priv, UDBG5,
- "uf_unregister_netdev: netdev %d - 0x%p\n",
- i, priv->netdev[i]);
-
- /* ... and the netdev */
- unregister_netdev(priv->netdev[i]);
- interfacePriv->netdev_registered = 0;
- }
-
- interfacePriv->interfaceMode = 0;
-
- /* Enable all queues by default */
- interfacePriv->queueEnabled[0] = 1;
- interfacePriv->queueEnabled[1] = 1;
- interfacePriv->queueEnabled[2] = 1;
- interfacePriv->queueEnabled[3] = 1;
- }
-
- priv->totalInterfaceCount = 0;
-} /* uf_unregister_netdev() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * register_unifi_sdio
- *
- * This function is called from the Probe (or equivalent) method of
- * the SDIO driver when a UniFi card is detected.
- * We allocate the Linux net_device struct, initialise the HIP core
- * lib, create the char device nodes and start the userspace helper
- * to initialise the device.
- *
- * Arguments:
- * sdio_dev Pointer to SDIO context handle to use for all
- * SDIO ops.
- * bus_id A small number indicating the SDIO card position on the
- * bus. Typically this is the slot number, e.g. 0, 1 etc.
- * Valid values are 0 to MAX_UNIFI_DEVS-1.
- * dev Pointer to kernel device manager struct.
- *
- * Returns:
- * Pointer to the unifi instance, or NULL on error.
- * ---------------------------------------------------------------------------
- */
-static unifi_priv_t *
-register_unifi_sdio(CsrSdioFunction *sdio_dev, int bus_id, struct device *dev)
-{
- unifi_priv_t *priv = NULL;
- int r = -1;
- CsrResult csrResult;
-
- if ((bus_id < 0) || (bus_id >= MAX_UNIFI_DEVS)) {
- unifi_error(priv, "register_unifi_sdio: invalid device %d\n",
- bus_id);
- return NULL;
- }
-
- down(&Unifi_instance_mutex);
-
- if (In_use[bus_id] != UNIFI_DEV_NOT_IN_USE) {
- unifi_error(priv, "register_unifi_sdio: device %d is already in use\n",
- bus_id);
- goto failed0;
- }
-
-
- /* Allocate device private and net_device structs */
- priv = uf_alloc_netdevice(sdio_dev, bus_id);
- if (priv == NULL) {
- unifi_error(priv, "Failed to allocate driver private\n");
- goto failed0;
- }
-
- priv->unifi_device = dev;
-
- SET_NETDEV_DEV(priv->netdev[0], dev);
-
- /* We are not ready to send data yet. */
- netif_carrier_off(priv->netdev[0]);
-
- /* Allocate driver context. */
- priv->card = unifi_alloc_card(priv->sdio, priv);
- if (priv->card == NULL) {
- unifi_error(priv, "Failed to allocate UniFi driver card struct.\n");
- goto failed1;
- }
-
- if (Unifi_instances[bus_id]) {
- unifi_error(priv, "Internal error: instance for slot %d is already taken\n",
- bus_id);
- }
- Unifi_instances[bus_id] = priv;
- In_use[bus_id] = UNIFI_DEV_IN_USE;
-
- /* Save the netdev_priv for use by the netdev event callback mechanism */
- Unifi_netdev_instances[bus_id * CSR_WIFI_NUM_INTERFACES] = netdev_priv(priv->netdev[0]);
-
- /* Initialise the mini-coredump capture buffers */
- csrResult = unifi_coredump_init(priv->card, (u16)coredump_max);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "Couldn't allocate mini-coredump buffers\n");
- }
-
- /* Create the character device nodes */
- r = uf_create_device_nodes(priv, bus_id);
- if (r) {
- goto failed1;
- }
-
- /*
- * We use the slot number as unifi device index.
- */
- scnprintf(priv->proc_entry_name, 64, "driver/unifi%d", priv->instance);
- /*
- * The following complex casting is in place in order to eliminate 64-bit compilation warning
- * "cast to/from pointer from/to integer of different size"
- */
- if (!proc_create_data(priv->proc_entry_name, 0, NULL,
- &uf_proc_fops, (void *)(long)priv->instance))
- {
- unifi_error(priv, "unifi: can't create /proc/driver/unifi\n");
- }
-
- /* Allocate the net_device for interfaces other than 0. */
- {
- int i;
- priv->totalInterfaceCount =0;
-
- for(i=1;i<CSR_WIFI_NUM_INTERFACES;i++)
- {
- if( !uf_alloc_netdevice_for_other_interfaces(priv,i) )
- {
- /* error occured while allocating the net_device for interface[i]. The net_device are
- * allocated for the interfaces with id<i. Dont worry, all the allocated net_device will
- * be releasing chen the control goes to the label failed0.
- */
- unifi_error(priv, "Failed to allocate driver private for interface[%d]\n",i);
- goto failed0;
- }
- else
- {
- SET_NETDEV_DEV(priv->netdev[i], dev);
-
- /* We are not ready to send data yet. */
- netif_carrier_off(priv->netdev[i]);
-
- /* Save the netdev_priv for use by the netdev event callback mechanism */
- Unifi_netdev_instances[bus_id * CSR_WIFI_NUM_INTERFACES + i] = netdev_priv(priv->netdev[i]);
- }
- }
-
- for(i=0;i<CSR_WIFI_NUM_INTERFACES;i++)
- {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[i];
- interfacePriv->netdev_registered=0;
- }
- }
-
-#ifdef CSR_WIFI_RX_PATH_SPLIT
- if (signal_buffer_init(priv, CSR_WIFI_RX_SIGNAL_BUFFER_SIZE))
- {
- unifi_error(priv,"Failed to allocate shared memory for T-H signals\n");
- goto failed2;
- }
- priv->rx_workqueue = create_singlethread_workqueue("rx_workq");
- if (priv->rx_workqueue == NULL) {
- unifi_error(priv,"create_singlethread_workqueue failed \n");
- goto failed3;
- }
- INIT_WORK(&priv->rx_work_struct, rx_wq_handler);
-#endif
-
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
- if (log_hip_signals)
- {
- uf_register_hip_offline_debug(priv);
- }
-#endif
-
- /* Initialise the SME related threads and parameters */
- r = uf_sme_init(priv);
- if (r) {
- unifi_error(priv, "SME initialisation failed.\n");
- goto failed4;
- }
-
- /*
- * Run the userspace helper program (unififw) to perform
- * the device initialisation.
- */
- unifi_trace(priv, UDBG1, "run UniFi helper app...\n");
- r = uf_run_unifihelper(priv);
- if (r) {
- unifi_notice(priv, "unable to run UniFi helper app\n");
- /* Not a fatal error. */
- }
-
- up(&Unifi_instance_mutex);
-
- return priv;
-
-failed4:
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-if (log_hip_signals)
-{
- uf_unregister_hip_offline_debug(priv);
-}
-#endif
-#ifdef CSR_WIFI_RX_PATH_SPLIT
- flush_workqueue(priv->rx_workqueue);
- destroy_workqueue(priv->rx_workqueue);
-failed3:
- signal_buffer_free(priv,CSR_WIFI_RX_SIGNAL_BUFFER_SIZE);
-failed2:
-#endif
- /* Remove the device nodes */
- uf_destroy_device_nodes(priv);
-failed1:
- /* Deregister priv->netdev_client */
- ul_deregister_client(priv->netdev_client);
-
-failed0:
- if (priv && priv->card) {
- unifi_coredump_free(priv->card);
- unifi_free_card(priv->card);
- }
- if (priv) {
- uf_free_netdevice(priv);
- }
-
- up(&Unifi_instance_mutex);
-
- return NULL;
-} /* register_unifi_sdio() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * ask_unifi_sdio_cleanup
- *
- * We can not free our private context, until all the char device
- * clients have closed the file handles. unregister_unifi_sdio() which
- * is called when a card is removed, waits on Unifi_cleanup_wq until
- * the reference count becomes zero. It is time to wake it up now.
- *
- * Arguments:
- * priv Pointer to driver context.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void
-ask_unifi_sdio_cleanup(unifi_priv_t *priv)
-{
-
- /*
- * Now clear the flag that says the old instance is in use.
- * This is used to prevent a new instance being started before old
- * one has finshed closing down, for example if bounce makes the card
- * appear to be ejected and re-inserted quickly.
- */
- In_use[priv->instance] = UNIFI_DEV_CLEANUP;
-
- unifi_trace(NULL, UDBG5, "ask_unifi_sdio_cleanup: wake up cleanup workqueue.\n");
- wake_up(&Unifi_cleanup_wq);
-
-} /* ask_unifi_sdio_cleanup() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * cleanup_unifi_sdio
- *
- * Release any resources owned by a unifi instance.
- *
- * Arguments:
- * priv Pointer to the instance to free.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void
-cleanup_unifi_sdio(unifi_priv_t *priv)
-{
- int priv_instance;
- int i;
- static const CsrWifiMacAddress broadcast_address = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}};
-
- /* Remove the device nodes */
- uf_destroy_device_nodes(priv);
-
- /* Mark this device as gone away by NULLing the entry in Unifi_instances */
- Unifi_instances[priv->instance] = NULL;
-
- unifi_trace(priv, UDBG5, "cleanup_unifi_sdio: remove_proc_entry\n");
- /*
- * Free the children of priv before unifi_free_netdevice() frees
- * the priv struct
- */
- remove_proc_entry(priv->proc_entry_name, 0);
-
-
- /* Unregister netdev as a client. */
- if (priv->netdev_client) {
- unifi_trace(priv, UDBG2, "Netdev client (id:%d s:0x%X) is unregistered\n",
- priv->netdev_client->client_id, priv->netdev_client->sender_id);
- ul_deregister_client(priv->netdev_client);
- }
-
- /* Destroy the SME related threads and parameters */
- uf_sme_deinit(priv);
-
-#ifdef CSR_SME_USERSPACE
- priv->smepriv = NULL;
-#endif
-
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
- if (log_hip_signals)
- {
- uf_unregister_hip_offline_debug(priv);
- }
-#endif
-
- /* Free any packets left in the Rx queues */
- for(i=0;i<CSR_WIFI_NUM_INTERFACES;i++)
- {
- uf_free_pending_rx_packets(priv, UF_UNCONTROLLED_PORT_Q, broadcast_address,i);
- uf_free_pending_rx_packets(priv, UF_CONTROLLED_PORT_Q, broadcast_address,i);
- }
- /*
- * We need to free the resources held by the core, which include tx skbs,
- * otherwise we can not call unregister_netdev().
- */
- if (priv->card) {
- unifi_trace(priv, UDBG5, "cleanup_unifi_sdio: free card\n");
- unifi_coredump_free(priv->card);
- unifi_free_card(priv->card);
- priv->card = NULL;
- }
-
- /*
- * Unregister the network device.
- * We can not unregister the netdev before we release
- * all pending packets in the core.
- */
- uf_unregister_netdev(priv);
- priv->totalInterfaceCount = 0;
-
- /* Clear the table of registered netdev_priv's */
- for (i = 0; i < CSR_WIFI_NUM_INTERFACES; i++) {
- Unifi_netdev_instances[priv->instance * CSR_WIFI_NUM_INTERFACES + i] = NULL;
- }
-
- unifi_trace(priv, UDBG5, "cleanup_unifi_sdio: uf_free_netdevice\n");
- /*
- * When uf_free_netdevice() returns, the priv is invalid
- * so we need to remember the instance to clear the global flag later.
- */
- priv_instance = priv->instance;
-
-#ifdef CSR_WIFI_RX_PATH_SPLIT
- flush_workqueue(priv->rx_workqueue);
- destroy_workqueue(priv->rx_workqueue);
- signal_buffer_free(priv,CSR_WIFI_RX_SIGNAL_BUFFER_SIZE);
-#endif
-
- /* Priv is freed as part of the net_device */
- uf_free_netdevice(priv);
-
- /*
- * Now clear the flag that says the old instance is in use.
- * This is used to prevent a new instance being started before old
- * one has finshed closing down, for example if bounce makes the card
- * appear to be ejected and re-inserted quickly.
- */
- In_use[priv_instance] = UNIFI_DEV_NOT_IN_USE;
-
- unifi_trace(NULL, UDBG5, "cleanup_unifi_sdio: DONE.\n");
-
-} /* cleanup_unifi_sdio() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unregister_unifi_sdio
- *
- * Call from SDIO driver when it detects that UniFi has been removed.
- *
- * Arguments:
- * bus_id Number of the card that was ejected.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void
-unregister_unifi_sdio(int bus_id)
-{
- unifi_priv_t *priv;
- int interfaceTag=0;
- u8 reason = CONFIG_IND_EXIT;
-
- if ((bus_id < 0) || (bus_id >= MAX_UNIFI_DEVS)) {
- unifi_error(NULL, "unregister_unifi_sdio: invalid device %d\n",
- bus_id);
- return;
- }
-
- priv = Unifi_instances[bus_id];
- if (priv == NULL) {
- unifi_error(priv, "unregister_unifi_sdio: device %d is not registered\n",
- bus_id);
- return;
- }
-
- /* Stop the network traffic before freeing the core. */
- for(interfaceTag=0;interfaceTag<priv->totalInterfaceCount;interfaceTag++)
- {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- if(interfacePriv->netdev_registered)
- {
- netif_carrier_off(priv->netdev[interfaceTag]);
- netif_tx_stop_all_queues(priv->netdev[interfaceTag]);
- }
- }
-
-#ifdef CSR_NATIVE_LINUX
- /*
- * If the unifi thread was started, signal it to stop. This
- * should cause any userspace processes with open unifi device to
- * close them.
- */
- uf_stop_thread(priv, &priv->bh_thread);
-
- /* Unregister the interrupt handler */
- if (csr_sdio_linux_remove_irq(priv->sdio)) {
- unifi_notice(priv,
- "csr_sdio_linux_remove_irq failed to talk to card.\n");
- }
-
- /* Ensure no MLME functions are waiting on a the mlme_event semaphore. */
- uf_abort_mlme(priv);
-#endif /* CSR_NATIVE_LINUX */
-
- ul_log_config_ind(priv, &reason, sizeof(u8));
-
- /* Deregister the UDI hook from the core. */
- unifi_remove_udi_hook(priv->card, logging_handler);
-
- uf_put_instance(bus_id);
-
- /*
- * Wait until the device is cleaned up. i.e., when all userspace
- * processes have closed any open unifi devices.
- */
- wait_event(Unifi_cleanup_wq, In_use[bus_id] == UNIFI_DEV_CLEANUP);
- unifi_trace(NULL, UDBG5, "Received clean up event\n");
-
- /* Now we can free the private context and the char device nodes */
- cleanup_unifi_sdio(priv);
-
-} /* unregister_unifi_sdio() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_find_instance
- *
- * Find the context structure for a given UniFi device instance.
- *
- * Arguments:
- * inst The instance number to look for.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-unifi_priv_t *
-uf_find_instance(int inst)
-{
- if ((inst < 0) || (inst >= MAX_UNIFI_DEVS)) {
- return NULL;
- }
- return Unifi_instances[inst];
-} /* uf_find_instance() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_find_priv
- *
- * Find the device instance for a given context structure.
- *
- * Arguments:
- * priv The context structure pointer to look for.
- *
- * Returns:
- * index of instance, -1 otherwise.
- * ---------------------------------------------------------------------------
- */
-int
-uf_find_priv(unifi_priv_t *priv)
-{
- int inst;
-
- if (!priv) {
- return -1;
- }
-
- for (inst = 0; inst < MAX_UNIFI_DEVS; inst++) {
- if (Unifi_instances[inst] == priv) {
- return inst;
- }
- }
-
- return -1;
-} /* uf_find_priv() */
-
-/*
- * ---------------------------------------------------------------------------
- * uf_find_netdev_priv
- *
- * Find the device instance for a given netdev context structure.
- *
- * Arguments:
- * priv The context structure pointer to look for.
- *
- * Returns:
- * index of instance, -1 otherwise.
- * ---------------------------------------------------------------------------
- */
-int
-uf_find_netdev_priv(netInterface_priv_t *priv)
-{
- int inst;
-
- if (!priv) {
- return -1;
- }
-
- for (inst = 0; inst < MAX_UNIFI_DEVS * CSR_WIFI_NUM_INTERFACES; inst++) {
- if (Unifi_netdev_instances[inst] == priv) {
- return inst;
- }
- }
-
- return -1;
-} /* uf_find_netdev_priv() */
-
-/*
- * ---------------------------------------------------------------------------
- * uf_get_instance
- *
- * Find the context structure for a given UniFi device instance
- * and increment the reference count.
- *
- * Arguments:
- * inst The instance number to look for.
- *
- * Returns:
- * Pointer to the instance or NULL if no instance exists.
- * ---------------------------------------------------------------------------
- */
-unifi_priv_t *
-uf_get_instance(int inst)
-{
- unifi_priv_t *priv;
-
- down(&Unifi_instance_mutex);
-
- priv = uf_find_instance(inst);
- if (priv) {
- priv->ref_count++;
- }
-
- up(&Unifi_instance_mutex);
-
- return priv;
-}
-
-/*
- * ---------------------------------------------------------------------------
- * uf_put_instance
- *
- * Decrement the context reference count, freeing resources and
- * shutting down the driver when the count reaches zero.
- *
- * Arguments:
- * inst The instance number to look for.
- *
- * Returns:
- * Pointer to the instance or NULL if no instance exists.
- * ---------------------------------------------------------------------------
- */
-void
-uf_put_instance(int inst)
-{
- unifi_priv_t *priv;
-
- down(&Unifi_instance_mutex);
-
- priv = uf_find_instance(inst);
- if (priv) {
- priv->ref_count--;
- if (priv->ref_count == 0) {
- ask_unifi_sdio_cleanup(priv);
- }
- }
-
- up(&Unifi_instance_mutex);
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_proc_show
- *
- * Read method for driver node in /proc/driver/unifi0
- *
- * Arguments:
- * page
- * start
- * offset
- * count
- * eof
- * data
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-#ifdef CONFIG_PROC_FS
-static int uf_proc_show(struct seq_file *m, void *v)
-{
- unifi_priv_t *priv;
- int i;
-
- /*
- * The following complex casting is in place in order to eliminate
- * 64-bit compilation warning "cast to/from pointer from/to integer of
- * different size"
- */
- priv = uf_find_instance((long)m->private);
- if (!priv)
- return 0;
-
- seq_printf(m, "UniFi SDIO Driver: %s %s %s\n",
- CSR_WIFI_VERSION, __DATE__, __TIME__);
-#ifdef CSR_SME_USERSPACE
- seq_puts(m, "SME: CSR userspace ");
-#ifdef CSR_SUPPORT_WEXT
- seq_puts(m, "with WEXT support\n");
-#else
- seq_putc(m, '\n');
-#endif /* CSR_SUPPORT_WEXT */
-#endif /* CSR_SME_USERSPACE */
-#ifdef CSR_NATIVE_LINUX
- seq_puts(m, "SME: native\n");
-#endif
-
-#ifdef CSR_SUPPORT_SME
- seq_printf(m, "Firmware (ROM) build:%u, Patch:%u\n",
- priv->card_info.fw_build,
- priv->sme_versions.firmwarePatch);
-#endif
-
- unifi_print_status(priv->card, m);
-
- seq_printf(m, "Last dbg str: %s\n", priv->last_debug_string);
-
- seq_puts(m, "Last dbg16:");
- for (i = 0; i < 8; i++)
- seq_printf(m, " %04X", priv->last_debug_word16[i]);
- seq_putc(m, '\n');
- seq_puts(m, " ");
- for (; i < 16; i++)
- seq_printf(m, " %04X", priv->last_debug_word16[i]);
- seq_putc(m, '\n');
- return 0;
-}
-#endif
-
-
-
-
-static void
-uf_lx_suspend(CsrSdioFunction *sdio_ctx)
-{
- unifi_priv_t *priv = sdio_ctx->driverData;
- unifi_suspend(priv);
-
- CsrSdioSuspendAcknowledge(sdio_ctx, CSR_RESULT_SUCCESS);
-}
-
-static void
-uf_lx_resume(CsrSdioFunction *sdio_ctx)
-{
- unifi_priv_t *priv = sdio_ctx->driverData;
- unifi_resume(priv);
-
- CsrSdioResumeAcknowledge(sdio_ctx, CSR_RESULT_SUCCESS);
-}
-
-static int active_slot = MAX_UNIFI_DEVS;
-static struct device *os_devices[MAX_UNIFI_DEVS];
-
-void
-uf_add_os_device(int bus_id, struct device *os_device)
-{
- if ((bus_id < 0) || (bus_id >= MAX_UNIFI_DEVS)) {
- unifi_error(NULL, "uf_add_os_device: invalid device %d\n",
- bus_id);
- return;
- }
-
- active_slot = bus_id;
- os_devices[bus_id] = os_device;
-} /* uf_add_os_device() */
-
-void
-uf_remove_os_device(int bus_id)
-{
- if ((bus_id < 0) || (bus_id >= MAX_UNIFI_DEVS)) {
- unifi_error(NULL, "uf_remove_os_device: invalid device %d\n",
- bus_id);
- return;
- }
-
- active_slot = bus_id;
- os_devices[bus_id] = NULL;
-} /* uf_remove_os_device() */
-
-static void
-uf_sdio_inserted(CsrSdioFunction *sdio_ctx)
-{
- unifi_priv_t *priv;
-
- unifi_trace(NULL, UDBG5, "uf_sdio_inserted(0x%p), slot_id=%d, dev=%p\n",
- sdio_ctx, active_slot, os_devices[active_slot]);
-
- priv = register_unifi_sdio(sdio_ctx, active_slot, os_devices[active_slot]);
- if (priv == NULL) {
- CsrSdioInsertedAcknowledge(sdio_ctx, CSR_RESULT_FAILURE);
- return;
- }
-
- sdio_ctx->driverData = priv;
-
- CsrSdioInsertedAcknowledge(sdio_ctx, CSR_RESULT_SUCCESS);
-} /* uf_sdio_inserted() */
-
-
-static void
-uf_sdio_removed(CsrSdioFunction *sdio_ctx)
-{
- unregister_unifi_sdio(active_slot);
- CsrSdioRemovedAcknowledge(sdio_ctx);
-} /* uf_sdio_removed() */
-
-
-static void
-uf_sdio_dsr_handler(CsrSdioFunction *sdio_ctx)
-{
- unifi_priv_t *priv = sdio_ctx->driverData;
-
- unifi_sdio_interrupt_handler(priv->card);
-} /* uf_sdio_dsr_handler() */
-
-/*
- * ---------------------------------------------------------------------------
- * uf_sdio_int_handler
- *
- * Interrupt callback function for SDIO interrupts.
- * This is called in kernel context (i.e. not interrupt context).
- * We retrieve the unifi context pointer and call the main UniFi
- * interrupt handler.
- *
- * Arguments:
- * fdev SDIO context pointer
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static CsrSdioInterruptDsrCallback
-uf_sdio_int_handler(CsrSdioFunction *sdio_ctx)
-{
- return uf_sdio_dsr_handler;
-} /* uf_sdio_int_handler() */
-
-
-
-
-static CsrSdioFunctionId unifi_ids[] =
-{
- {
- .manfId = SDIO_MANF_ID_CSR,
- .cardId = SDIO_CARD_ID_UNIFI_3,
- .sdioFunction = SDIO_WLAN_FUNC_ID_UNIFI_3,
- .sdioInterface = CSR_SDIO_ANY_SDIO_INTERFACE,
- },
- {
- .manfId = SDIO_MANF_ID_CSR,
- .cardId = SDIO_CARD_ID_UNIFI_4,
- .sdioFunction = SDIO_WLAN_FUNC_ID_UNIFI_4,
- .sdioInterface = CSR_SDIO_ANY_SDIO_INTERFACE,
- }
-};
-
-
-/*
- * Structure to register with the glue layer.
- */
-static CsrSdioFunctionDriver unifi_sdioFunction_drv =
-{
- .inserted = uf_sdio_inserted,
- .removed = uf_sdio_removed,
- .intr = uf_sdio_int_handler,
- .suspend = uf_lx_suspend,
- .resume = uf_lx_resume,
-
- .ids = unifi_ids,
- .idsCount = sizeof(unifi_ids) / sizeof(unifi_ids[0])
-};
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_sdio_load
- * uf_sdio_unload
- *
- * These functions are called from the main module load and unload
- * functions. They perform the appropriate operations for the monolithic
- * driver.
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-int __init
-uf_sdio_load(void)
-{
- CsrResult csrResult;
-
- csrResult = CsrSdioFunctionDriverRegister(&unifi_sdioFunction_drv);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(NULL, "Failed to register UniFi SDIO driver: csrResult=%d\n", csrResult);
- return -EIO;
- }
-
- return 0;
-} /* uf_sdio_load() */
-
-
-
-void __exit
-uf_sdio_unload(void)
-{
- CsrSdioFunctionDriverUnregister(&unifi_sdioFunction_drv);
-} /* uf_sdio_unload() */
-
diff --git a/drivers/staging/csr/mlme.c b/drivers/staging/csr/mlme.c
deleted file mode 100644
index 861d6b7..0000000
--- a/drivers/staging/csr/mlme.c
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: mlme.c
- *
- * PURPOSE:
- * This file provides functions to send MLME requests to the UniFi.
- *
- * Copyright (C) 2007-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include "csr_wifi_hip_unifi.h"
-#include "unifi_priv.h"
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_mlme_wait_for_reply
- *
- * Wait for a reply after sending a signal.
- *
- * Arguments:
- * priv Pointer to device private context struct
- * ul_client Pointer to linux client
- * sig_reply_id ID of the expected reply (defined in sigs.h).
- * timeout timeout in ms
- *
- * Returns:
- * 0 on success, -ve POSIX code on error.
- *
- * Notes:
- * This function waits for a specific (sig_reply_id) signal from UniFi.
- * It also match the sequence number of the received (cfm) signal, with
- * the latest sequence number of the signal (req) we have sent.
- * These two number match be equal.
- * Should only be used for waiting xxx.cfm signals and only after
- * we have sent the matching xxx.req signal to UniFi.
- * If no response is received within the expected time (timeout), we assume
- * that the UniFi is busy and return an error.
- * If the wait is aborted by a kernel signal arriving, we stop waiting.
- * If a response from UniFi is not what we expected, we discard it and
- * wait again. This could be a response from an aborted request. If we
- * see several bad responses we assume we have lost synchronisation with
- * UniFi.
- * ---------------------------------------------------------------------------
- */
-static int
-unifi_mlme_wait_for_reply(unifi_priv_t *priv, ul_client_t *pcli, int sig_reply_id, int timeout)
-{
- int retries = 0;
- long r;
- long t = timeout;
- unsigned int sent_seq_no;
-
- /* Convert t in ms to jiffies */
- t = msecs_to_jiffies(t);
-
- do {
- /* Wait for the confirm or timeout. */
- r = wait_event_interruptible_timeout(pcli->udi_wq,
- (pcli->wake_up_wq_id) || (priv->io_aborted == 1),
- t);
- /* Check for general i/o error */
- if (priv->io_aborted) {
- unifi_error(priv, "MLME operation aborted\n");
- return -EIO;
- }
-
- /*
- * If r=0 the request has timed-out.
- * If r>0 the request has completed successfully.
- * If r=-ERESTARTSYS an event (kill signal) has interrupted the wait_event.
- */
- if ((r == 0) && (pcli->wake_up_wq_id == 0)) {
- unifi_error(priv, "mlme_wait: timed-out waiting for 0x%.4X, after %lu msec.\n",
- sig_reply_id, jiffies_to_msecs(t));
- pcli->wake_up_wq_id = 0;
- return -ETIMEDOUT;
- } else if (r == -ERESTARTSYS) {
- unifi_error(priv, "mlme_wait: waiting for 0x%.4X was aborted.\n", sig_reply_id);
- pcli->wake_up_wq_id = 0;
- return -EINTR;
- } else {
- /* Get the sequence number of the signal that we previously set. */
- if (pcli->seq_no != 0) {
- sent_seq_no = pcli->seq_no - 1;
- } else {
- sent_seq_no = 0x0F;
- }
-
- unifi_trace(priv, UDBG5, "Received 0x%.4X, seq: (r:%d, s:%d)\n",
- pcli->wake_up_wq_id,
- pcli->wake_seq_no, sent_seq_no);
-
- /* The two sequence ids must match. */
- if (pcli->wake_seq_no == sent_seq_no) {
- /* and the signal ids must match. */
- if (sig_reply_id == pcli->wake_up_wq_id) {
- /* Found the expected signal */
- break;
- } else {
- /* This should never happen ... */
- unifi_error(priv, "mlme_wait: mismatching signal id (0x%.4X - exp 0x%.4X) (seq %d)\n",
- pcli->wake_up_wq_id,
- sig_reply_id,
- pcli->wake_seq_no);
- pcli->wake_up_wq_id = 0;
- return -EIO;
- }
- }
- /* Wait for the next signal. */
- pcli->wake_up_wq_id = 0;
-
- retries ++;
- if (retries >= 3) {
- unifi_error(priv, "mlme_wait: confirm wait retries exhausted (0x%.4X - exp 0x%.4X)\n",
- pcli->wake_up_wq_id,
- sig_reply_id);
- pcli->wake_up_wq_id = 0;
- return -EIO;
- }
- }
- } while (1);
-
- pcli->wake_up_wq_id = 0;
-
- return 0;
-} /* unifi_mlme_wait_for_reply() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_mlme_blocking_request
- *
- * Send a MLME request signal to UniFi.
- *
- * Arguments:
- * priv Pointer to device private context struct
- * pcli Pointer to context of calling process
- * sig Pointer to the signal to send
- * data_ptrs Pointer to the bulk data of the signal
- * timeout The request's timeout.
- *
- * Returns:
- * 0 on success, 802.11 result code on error.
- * ---------------------------------------------------------------------------
- */
-int
-unifi_mlme_blocking_request(unifi_priv_t *priv, ul_client_t *pcli,
- CSR_SIGNAL *sig, bulk_data_param_t *data_ptrs,
- int timeout)
-{
- int r;
-
- if (sig->SignalPrimitiveHeader.SignalId == 0) {
- unifi_error(priv, "unifi_mlme_blocking_request: Invalid Signal Id (0x%x)\n",
- sig->SignalPrimitiveHeader.SignalId);
- return -EINVAL;
- }
-
- down(&priv->mlme_blocking_mutex);
-
- sig->SignalPrimitiveHeader.ReceiverProcessId = 0;
- sig->SignalPrimitiveHeader.SenderProcessId = pcli->sender_id | pcli->seq_no;
-
- unifi_trace(priv, UDBG2, "Send client=%d, S:0x%04X, sig 0x%.4X\n",
- pcli->client_id,
- sig->SignalPrimitiveHeader.SenderProcessId,
- sig->SignalPrimitiveHeader.SignalId);
- /* Send the signal to UniFi */
- r = ul_send_signal_unpacked(priv, sig, data_ptrs);
- if (r) {
- up(&priv->mlme_blocking_mutex);
- unifi_error(priv, "Error queueing MLME REQUEST signal\n");
- return r;
- }
-
- unifi_trace(priv, UDBG5, "Send 0x%.4X, seq = %d\n",
- sig->SignalPrimitiveHeader.SignalId, pcli->seq_no);
-
- /*
- * Advance the sequence number of the last sent signal, only
- * if the signal has been successfully set.
- */
- pcli->seq_no++;
- if (pcli->seq_no > 0x0F) {
- pcli->seq_no = 0;
- }
-
- r = unifi_mlme_wait_for_reply(priv, pcli, (sig->SignalPrimitiveHeader.SignalId + 1), timeout);
- up(&priv->mlme_blocking_mutex);
-
- if (r) {
- unifi_error(priv, "Error waiting for MLME CONFIRM signal\n");
- return r;
- }
-
- return 0;
-} /* unifi_mlme_blocking_request() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_mlme_copy_reply_and_wakeup_client
- *
- * Copy the reply signal from UniFi to the client's structure
- * and wake up the waiting client.
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-unifi_mlme_copy_reply_and_wakeup_client(ul_client_t *pcli,
- CSR_SIGNAL *signal, int signal_len,
- const bulk_data_param_t *bulkdata)
-{
- int i;
-
- /* Copy the signal to the reply */
- memcpy(pcli->reply_signal, signal, signal_len);
-
- /* Get the sequence number of the signal that woke us up. */
- pcli->wake_seq_no = pcli->reply_signal->SignalPrimitiveHeader.ReceiverProcessId & 0x0F;
-
- /* Append any bulk data */
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
- if (bulkdata->d[i].data_length > 0) {
- if (bulkdata->d[i].os_data_ptr) {
- memcpy(pcli->reply_bulkdata[i]->ptr, bulkdata->d[i].os_data_ptr, bulkdata->d[i].data_length);
- pcli->reply_bulkdata[i]->length = bulkdata->d[i].data_length;
- } else {
- pcli->reply_bulkdata[i]->length = 0;
- }
- }
- }
-
- /* Wake the requesting MLME function. */
- pcli->wake_up_wq_id = pcli->reply_signal->SignalPrimitiveHeader.SignalId;
- wake_up_interruptible(&pcli->udi_wq);
-
-} /* unifi_mlme_copy_reply_and_wakeup_client() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_abort_mlme
- *
- * Abort any MLME operation in progress.
- * This is used in the error recovery mechanism.
- *
- * Arguments:
- * priv Pointer to driver context.
- *
- * Returns:
- * 0 on success.
- * ---------------------------------------------------------------------------
- */
-int
-uf_abort_mlme(unifi_priv_t *priv)
-{
- ul_client_t *ul_cli;
-
- /* Ensure no MLME functions are waiting on a the mlme_event semaphore. */
- priv->io_aborted = 1;
-
- ul_cli = priv->netdev_client;
- if (ul_cli) {
- wake_up_interruptible(&ul_cli->udi_wq);
- }
-
- ul_cli = priv->wext_client;
- if (ul_cli) {
- wake_up_interruptible(&ul_cli->udi_wq);
- }
-
- return 0;
-} /* uf_abort_mlme() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- *
- * Human-readable decoding of Reason and Result codes.
- *
- * ---------------------------------------------------------------------------
- */
-
-struct mlme_code {
- const char *name;
- int id;
-};
-
-static const struct mlme_code Result_codes[] = {
- { "Success", 0x0000 },
- { "Unspecified Failure", 0x0001 },
- /* (Reserved) 0x0002 - 0x0009 */
- { "Refused Capabilities Mismatch", 0x000A },
- /* (Reserved) 0x000B */
- { "Refused External Reason", 0x000C },
- /* (Reserved) 0x000D - 0x0010 */
- { "Refused AP Out Of Memory", 0x0011 },
- { "Refused Basic Rates Mismatch", 0x0012 },
- /* (Reserved) 0x0013 - 0x001F */
- { "Failure", 0x0020 },
- /* (Reserved) 0x0021 - 0x0024 */
- { "Refused Reason Unspecified", 0x0025 },
- { "Invalid Parameters", 0x0026 },
- { "Rejected With Suggested Changes", 0x0027 },
- /* (Reserved) 0x0028 - 0x002E */
- { "Rejected For Delay Period", 0x002F },
- { "Not Allowed", 0x0030 },
- { "Not Present", 0x0031 },
- { "Not QSTA", 0x0032 },
- /* (Reserved) 0x0033 - 0x7FFF */
- { "Timeout", 0x8000 },
- { "Too Many Simultaneous Requests", 0x8001 },
- { "BSS Already Started Or Joined", 0x8002 },
- { "Not Supported", 0x8003 },
- { "Transmission Failure", 0x8004 },
- { "Refused Not Authenticated", 0x8005 },
- { "Reset Required Before Start", 0x8006 },
- { "LM Info Unavailable", 0x8007 },
- { NULL, -1 }
-};
-
-static const struct mlme_code Reason_codes[] = {
- /* (Reserved) 0x0000 */
- { "Unspecified Reason", 0x0001 },
- { "Authentication Not Valid", 0x0002 },
- { "Deauthenticated Leave BSS", 0x0003 },
- { "Disassociated Inactivity", 0x0004 },
- { "AP Overload", 0x0005 },
- { "Class2 Frame Error", 0x0006 },
- { "Class3 Frame Error", 0x0007 },
- { "Disassociated Leave BSS", 0x0008 },
- { "Association Not Authenticated", 0x0009 },
- { "Disassociated Power Capability", 0x000A },
- { "Disassociated Supported Channels", 0x000B },
- /* (Reserved) 0x000C */
- { "Invalid Information Element", 0x000D },
- { "Michael MIC Failure", 0x000E },
- { "Fourway Handshake Timeout", 0x000F },
- { "Group Key Update Timeout", 0x0010 },
- { "Handshake Element Different", 0x0011 },
- { "Invalid Group Cipher", 0x0012 },
- { "Invalid Pairwise Cipher", 0x0013 },
- { "Invalid AKMP", 0x0014 },
- { "Unsupported RSN IE Version", 0x0015 },
- { "Invalid RSN IE Capabilities", 0x0016 },
- { "Dot1X Auth Failed", 0x0017 },
- { "Cipher Rejected By Policy", 0x0018 },
- /* (Reserved) 0x0019 - 0x001F */
- { "QoS Unspecified Reason", 0x0020 },
- { "QoS Insufficient Bandwidth", 0x0021 },
- { "QoS Excessive Not Ack", 0x0022 },
- { "QoS TXOP Limit Exceeded", 0x0023 },
- { "QSTA Leaving", 0x0024 },
- { "End TS, End DLS, End BA", 0x0025 },
- { "Unknown TS, Unknown DLS, Unknown BA", 0x0026 },
- { "Timeout", 0x0027 },
- /* (Reserved) 0x0028 - 0x002C */
- { "STAKey Mismatch", 0x002D },
- { NULL, -1 }
-};
-
-
-static const char *
-lookup_something(const struct mlme_code *n, int id)
-{
- for (; n->name; n++) {
- if (n->id == id) {
- return n->name;
- }
- }
-
- /* not found */
- return NULL;
-} /* lookup_something() */
-
-
-const char *
-lookup_result_code(int result)
-{
- static char fallback[16];
- const char *str;
-
- str = lookup_something(Result_codes, result);
-
- if (str == NULL) {
- snprintf(fallback, 16, "%d", result);
- str = fallback;
- }
-
- return str;
-} /* lookup_result_code() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * lookup_reason
- *
- * Return a description string for a WiFi MLME ReasonCode.
- *
- * Arguments:
- * reason The ReasonCode to interpret.
- *
- * Returns:
- * Pointer to description string.
- * ---------------------------------------------------------------------------
- */
-const char *
-lookup_reason_code(int reason)
-{
- static char fallback[16];
- const char *str;
-
- str = lookup_something(Reason_codes, reason);
-
- if (str == NULL) {
- snprintf(fallback, 16, "%d", reason);
- str = fallback;
- }
-
- return str;
-} /* lookup_reason_code() */
-
diff --git a/drivers/staging/csr/monitor.c b/drivers/staging/csr/monitor.c
deleted file mode 100644
index c8e20e4..0000000
--- a/drivers/staging/csr/monitor.c
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: monitor.c
- *
- * Copyright (C) 2006-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-
-#include "unifi_priv.h"
-
-#ifdef UNIFI_SNIFF_ARPHRD
-
-
-#if (UNIFI_SNIFF_ARPHRD == ARPHRD_IEEE80211_RADIOTAP)
-#include <net/ieee80211_radiotap.h>
-#endif
-
-#ifndef ETH_P_80211_RAW
-#define ETH_P_80211_RAW ETH_P_ALL
-#endif
-
-/*
- * ---------------------------------------------------------------------------
- * uf_start_sniff
- *
- * Start UniFi capture in SNIFF mode, i.e capture everything it hears.
- *
- * Arguments:
- * priv Pointer to device private context struct
- *
- * Returns:
- * 0 on success or kernel error code
- * ---------------------------------------------------------------------------
- */
-int
-uf_start_sniff(unifi_priv_t *priv)
-{
- ul_client_t *pcli = priv->wext_client;
- CSR_SIGNAL signal;
- CSR_MLME_SNIFFJOIN_REQUEST *req = &signal.u.MlmeSniffjoinRequest;
- int timeout = 1000;
- int r;
-
- req->Ifindex = priv->if_index;
- req->Channel = priv->wext_conf.channel;
- req->ChannelStartingFactor = 0;
-
- signal.SignalPrimitiveHeader.SignalId = CSR_MLME_SNIFFJOIN_REQUEST_ID;
-
- r = unifi_mlme_blocking_request(priv, pcli, &signal, NULL, timeout);
- if (r < 0) {
- unifi_error(priv, "failed to send SNIFFJOIN request, error %d\n", r);
- return r;
- }
-
- r = pcli->reply_signal->u.MlmeSniffjoinConfirm.Resultcode;
- if (r) {
- unifi_notice(priv, "SNIFFJOIN request was rejected with result 0x%X (%s)\n",
- r, lookup_result_code(r));
- return -EIO;
- }
-
- return 0;
-} /* uf_start_sniff() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * netrx_radiotap
- *
- * Reformat a UniFi SNIFFDATA signal into a radiotap packet.
- *
- * Arguments:
- * priv OS private context pointer.
- * ind Pointer to a MA_UNITDATA_INDICATION or
- * DS_UNITDATA_INDICATION indication structure.
- *
- * Notes:
- * Radiotap header values are all little-endian, UniFi signals will have
- * been converted to host-endian.
- * ---------------------------------------------------------------------------
- */
-#if (UNIFI_SNIFF_ARPHRD == ARPHRD_IEEE80211_RADIOTAP)
-static void
-netrx_radiotap(unifi_priv_t *priv,
- const CSR_MA_SNIFFDATA_INDICATION *ind,
- struct sk_buff *skb_orig)
-{
- struct net_device *dev = priv->netdev;
- struct sk_buff *skb = NULL;
- unsigned char *ptr;
- unsigned char *base;
- int ind_data_len = skb_orig->len - 2 - ETH_HLEN;
- struct unifi_rx_radiotap_header {
- struct ieee80211_radiotap_header rt_hdr;
- /* IEEE80211_RADIOTAP_TSFT */
- u64 rt_tsft;
- /* IEEE80211_RADIOTAP_FLAGS */
- u8 rt_flags;
- /* IEEE80211_RADIOTAP_RATE */
- u8 rt_rate;
- /* IEEE80211_RADIOTAP_CHANNEL */
- u16 rt_chan;
- u16 rt_chan_flags;
- /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
- u8 rt_dbm_antsignal;
- /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
- u8 rt_dbm_antnoise;
- /* IEEE80211_RADIOTAP_ANTENNA */
- u8 rt_antenna;
-
- /* pad to 4-byte boundary */
- u8 pad[3];
- } __attribute__((__packed__));
-
- struct unifi_rx_radiotap_header *unifi_rt;
- int signal, noise, snr;
-
- if (ind_data_len <= 0) {
- unifi_error(priv, "Invalid length in CSR_MA_SNIFFDATA_INDICATION.\n");
- return;
- }
-
- /*
- * Allocate a SKB for the received data packet, including radiotap
- * header.
- */
- skb = dev_alloc_skb(ind_data_len + sizeof(struct unifi_rx_radiotap_header) + 4);
- if (! skb) {
- unifi_error(priv, "alloc_skb failed.\n");
- priv->stats.rx_errors++;
- return;
- }
-
- base = skb->data;
-
- /* Reserve the radiotap header at the front of skb */
- unifi_rt = (struct unifi_rx_radiotap_header *)
- skb_put(skb, sizeof(struct unifi_rx_radiotap_header));
-
- /* Copy in the 802.11 frame */
- ptr = skb_put(skb, ind_data_len);
- memcpy(ptr, skb_orig->data, ind_data_len);
-
- unifi_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
- unifi_rt->rt_hdr.it_pad = 0; /* always good to zero */
- unifi_rt->rt_hdr.it_len = sizeof(struct unifi_rx_radiotap_header);
-
- /* Big bitfield of all the fields we provide in radiotap */
- unifi_rt->rt_hdr.it_present = 0
- | (1 << IEEE80211_RADIOTAP_TSFT)
- | (1 << IEEE80211_RADIOTAP_FLAGS)
- | (1 << IEEE80211_RADIOTAP_RATE)
- | (1 << IEEE80211_RADIOTAP_CHANNEL)
- | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)
- | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE)
- | (1 << IEEE80211_RADIOTAP_ANTENNA)
- ;
-
-
- /* No flags to set */
- unifi_rt->rt_tsft = (((u64)ind->Timestamp.x[7]) | (((u64)ind->Timestamp.x[6]) << 8) |
- (((u64)ind->Timestamp.x[5]) << 16) | (((u64)ind->Timestamp.x[4]) << 24) |
- (((u64)ind->Timestamp.x[3]) << 32) | (((u64)ind->Timestamp.x[2]) << 40) |
- (((u64)ind->Timestamp.x[1]) << 48) | (((u64)ind->Timestamp.x[0]) << 56));
-
- unifi_rt->rt_flags = 0;
-
- unifi_rt->rt_rate = ind->Rate;
-
- unifi_rt->rt_chan = cpu_to_le16(ieee80211chan2mhz(priv->wext_conf.channel));
- unifi_rt->rt_chan_flags = 0;
-
- /* Convert signal to dBm */
- signal = (s16)unifi2host_16(ind->Rssi); /* in dBm */
- snr = (s16)unifi2host_16(ind->Snr); /* in dB */
- noise = signal - snr;
-
- unifi_rt->rt_dbm_antsignal = signal;
- unifi_rt->rt_dbm_antnoise = noise;
-
- unifi_rt->rt_antenna = ind->AntennaId;
-
-
- skb->dev = dev;
- skb->mac_header = skb->data;
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = __constant_htons(ETH_P_80211_RAW);
- memset(skb->cb, 0, sizeof(skb->cb));
-
- /* Pass up to Linux network stack */
- netif_rx_ni(skb);
-
- dev->last_rx = jiffies;
-
- /* Bump the rx stats */
- priv->stats.rx_packets++;
- priv->stats.rx_bytes += ind_data_len;
-
-} /* netrx_radiotap() */
-#endif /* RADIOTAP */
-
-
-/*
- * ---------------------------------------------------------------------------
- * netrx_prism
- *
- * Reformat a UniFi SNIFFDATA signal into a Prism format sniff packet.
- *
- * Arguments:
- * priv OS private context pointer.
- * ind Pointer to a MA_UNITDATA_INDICATION or
- * DS_UNITDATA_INDICATION indication structure.
- *
- * Notes:
- * Radiotap header values are all little-endian, UniFi signals will have
- * been converted to host-endian.
- * ---------------------------------------------------------------------------
- */
-#if (UNIFI_SNIFF_ARPHRD == ARPHRD_IEEE80211_PRISM)
-static void
-netrx_prism(unifi_priv_t *priv,
- const CSR_MA_SNIFFDATA_INDICATION *ind,
- struct sk_buff *skb_orig)
-{
- struct net_device *dev = priv->netdev;
- struct sk_buff *skb = NULL;
- unsigned char *ptr;
- unsigned char *base;
- int ind_data_len = skb_orig->len - 2 - ETH_HLEN;
-#define WLANCAP_MAGIC_COOKIE_V1 0x80211001
- struct avs_header_v1 {
- uint32 version;
- uint32 length;
- uint64 mactime;
- uint64 hosttime;
- uint32 phytype;
- uint32 channel;
- uint32 datarate;
- uint32 antenna;
- uint32 priority;
- uint32 ssi_type;
- int32 ssi_signal;
- int32 ssi_noise;
- uint32 preamble;
- uint32 encoding;
- } *avs;
- int signal, noise, snr;
-
- if (ind_data_len <= 0) {
- unifi_error(priv, "Invalid length in CSR_MA_SNIFFDATA_INDICATION.\n");
- return;
- }
-
- /*
- * Allocate a SKB for the received data packet, including radiotap
- * header.
- */
- skb = dev_alloc_skb(ind_data_len + sizeof(struct avs_header_v1) + 4);
- if (! skb) {
- unifi_error(priv, "alloc_skb failed.\n");
- priv->stats.rx_errors++;
- return;
- }
-
- base = skb->data;
-
- /* Reserve the radiotap header at the front of skb */
- avs = (struct avs_header_v1 *)skb_put(skb, sizeof(struct avs_header_v1));
-
- /* Copy in the 802.11 frame */
- ptr = skb_put(skb, ind_data_len);
- memcpy(ptr, skb_orig->data, ind_data_len);
-
- /* Convert signal to dBm */
- signal = 0x10000 - ((s16)unifi2host_16(ind->Rssi)); /* in dBm */
- snr = (s16)unifi2host_16(ind->Snr); /* in dB */
- noise = signal - snr;
-
- avs->version = htonl(WLANCAP_MAGIC_COOKIE_V1);
- avs->length = htonl(sizeof(struct avs_header_v1));
- avs->mactime = __cpu_to_be64(ind->Timestamp);
- avs->hosttime = __cpu_to_be64(jiffies);
- avs->phytype = htonl(9); /* dss_ofdm_dot11_g */
- avs->channel = htonl(priv->wext_conf.channel);
- avs->datarate = htonl(ind->Rate * 5);
- avs->antenna = htonl(ind->Antenna);
- avs->priority = htonl(0); /* unknown */
- avs->ssi_type = htonl(2); /* dBm */
- avs->ssi_signal = htonl(signal);
- avs->ssi_noise = htonl(noise);
- avs->preamble = htonl(0); /* unknown */
- avs->encoding = htonl(0); /* unknown */
-
-
- skb->dev = dev;
- skb->mac.raw = skb->data;
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = __constant_htons(ETH_P_80211_RAW);
- memset(skb->cb, 0, sizeof(skb->cb));
-
- /* Pass up to Linux network stack */
- netif_rx_ni(skb);
-
- dev->last_rx = jiffies;
-
- /* Bump the rx stats */
- priv->stats.rx_packets++;
- priv->stats.rx_bytes += ind_data_len;
-
-} /* netrx_prism() */
-#endif /* PRISM */
-
-
-/*
- * ---------------------------------------------------------------------------
- * ma_sniffdata_ind
- *
- * Reformat a UniFi SNIFFDATA signal into a network
- *
- * Arguments:
- * ospriv OS private context pointer.
- * ind Pointer to a MA_UNITDATA_INDICATION or
- * DS_UNITDATA_INDICATION indication structure.
- * bulkdata Pointer to a bulk data structure, describing
- * the data received.
- *
- * Notes:
- * Radiotap header values are all little-endian, UniFi signals will have
- * been converted to host-endian.
- * ---------------------------------------------------------------------------
- */
-void
-ma_sniffdata_ind(void *ospriv,
- const CSR_MA_SNIFFDATA_INDICATION *ind,
- const bulk_data_param_t *bulkdata)
-{
- unifi_priv_t *priv = ospriv;
- struct net_device *dev = priv->netdev;
- struct sk_buff *skb = (struct sk_buff*)bulkdata->d[0].os_net_buf_ptr;
-
- if (bulkdata->d[0].data_length == 0) {
- unifi_warning(priv, "rx: MA-SNIFFDATA indication with zero bulk data\n");
- return;
- }
-
- skb->len = bulkdata->d[0].data_length;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!netif_running(dev))) {
- priv->stats.rx_dropped++;
- priv->wext_conf.wireless_stats.discard.misc++;
- dev_kfree_skb(skb);
- return;
- }
-
- if (ind->ReceptionStatus) {
- priv->stats.rx_dropped++;
- priv->wext_conf.wireless_stats.discard.misc++;
- printk(KERN_INFO "unifi: Dropping corrupt sniff packet\n");
- dev_kfree_skb(skb);
- return;
- }
-
-#if (UNIFI_SNIFF_ARPHRD == ARPHRD_IEEE80211_PRISM)
- netrx_prism(priv, ind, skb);
-#endif /* PRISM */
-
-#if (UNIFI_SNIFF_ARPHRD == ARPHRD_IEEE80211_RADIOTAP)
- netrx_radiotap(priv, ind, skb);
-#endif /* RADIOTAP */
-
- dev_kfree_skb(skb);
-
-} /* ma_sniffdata_ind() */
-
-
-#endif /* UNIFI_SNIFF_ARPHRD */
-
diff --git a/drivers/staging/csr/netdev.c b/drivers/staging/csr/netdev.c
deleted file mode 100644
index a0177d9..0000000
--- a/drivers/staging/csr/netdev.c
+++ /dev/null
@@ -1,3307 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: netdev.c
- *
- * PURPOSE:
- * This file provides the upper edge interface to the linux netdevice
- * and wireless extensions.
- * It is part of the porting exercise.
- *
- * Copyright (C) 2005-2010 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-
-/*
- * Porting Notes:
- * This file implements the data plane of the UniFi linux driver.
- *
- * All the Tx packets are passed to the HIP core lib, using the
- * unifi_send_signal() API. For EAPOL packets use the MLME-EAPOL.req
- * signal, for all other use the MLME-UNITDATA.req. The unifi_send_signal()
- * expects the wire-formatted (packed) signal. For convenience, in the OS
- * layer we only use the native (unpacked) signal structures. The HIP core lib
- * provides the write_pack() helper function to convert to the packed signal.
- * The packet is stored in the bulk data of the signal. We do not need to
- * allocate new memory to store the packet, because unifi_net_data_malloc()
- * is implemented to return a skb, which is the format of packet in Linux.
- * The HIP core lib frees the bulk data buffers, so we do not need to do
- * this in the OS layer.
- *
- * All the Rx packets are MLME-UNITDATA.ind signals, passed by the HIP core lib
- * in unifi_receive_event(). We do not need to allocate an skb and copy the
- * received packet because the HIP core lib has stored in memory allocated by
- * unifi_net_data_malloc(). Also, we can perform the 802.11 to Ethernet
- * translation in-place because we allocate the extra memory allocated in
- * unifi_net_data_malloc().
- *
- * If possible, the porting exercise should appropriately implement
- * unifi_net_data_malloc() and unifi_net_data_free() to save copies between
- * network and driver buffers.
- */
-
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/mutex.h>
-#include <linux/semaphore.h>
-#include <linux/vmalloc.h>
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-#include "unifi_priv.h"
-#include <net/pkt_sched.h>
-
-
-/* Wext handler is supported only if CSR_SUPPORT_WEXT is defined */
-#ifdef CSR_SUPPORT_WEXT
-extern struct iw_handler_def unifi_iw_handler_def;
-#endif /* CSR_SUPPORT_WEXT */
-static void check_ba_frame_age_timeout( unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- ba_session_rx_struct *ba_session);
-static void process_ba_frame(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- ba_session_rx_struct *ba_session,
- frame_desc_struct *frame_desc);
-static void process_ba_complete(unifi_priv_t *priv, netInterface_priv_t *interfacePriv);
-static void process_ma_packet_error_ind(unifi_priv_t *priv, CSR_SIGNAL *signal, bulk_data_param_t *bulkdata);
-static void process_amsdu(unifi_priv_t *priv, CSR_SIGNAL *signal, bulk_data_param_t *bulkdata);
-static int uf_net_open(struct net_device *dev);
-static int uf_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static int uf_net_stop(struct net_device *dev);
-static struct net_device_stats *uf_net_get_stats(struct net_device *dev);
-static u16 uf_net_select_queue(struct net_device *dev, struct sk_buff *skb);
-static netdev_tx_t uf_net_xmit(struct sk_buff *skb, struct net_device *dev);
-static void uf_set_multicast_list(struct net_device *dev);
-
-
-typedef int (*tx_signal_handler)(unifi_priv_t *priv, struct sk_buff *skb, const struct ethhdr *ehdr, CSR_PRIORITY priority);
-
-#ifdef CONFIG_NET_SCHED
-/*
- * Queueing Discipline Interface
- * Only used if kernel is configured with CONFIG_NET_SCHED
- */
-
-/*
- * The driver uses the qdisc interface to buffer and control all
- * outgoing traffic. We create a root qdisc, register our qdisc operations
- * and later we create two subsidiary pfifo queues for the uncontrolled
- * and controlled ports.
- *
- * The network stack delivers all outgoing packets in our enqueue handler.
- * There, we classify the packet and decide whether to store it or drop it
- * (if the controlled port state is set to "discard").
- * If the packet is enqueued, the network stack call our dequeue handler.
- * There, we decide whether we can send the packet, delay it or drop it
- * (the controlled port configuration might have changed meanwhile).
- * If a packet is dequeued, then the network stack calls our hard_start_xmit
- * handler where finally we send the packet.
- *
- * If the hard_start_xmit handler fails to send the packet, we return
- * NETDEV_TX_BUSY and the network stack call our requeue handler where
- * we put the packet back in the same queue in came from.
- *
- */
-
-struct uf_sched_data
-{
- /* Traffic Classifier TBD */
- struct tcf_proto *filter_list;
- /* Our two queues */
- struct Qdisc *queues[UNIFI_TRAFFIC_Q_MAX];
-};
-
-struct uf_tx_packet_data {
- /* Queue the packet is stored in */
- unifi_TrafficQueue queue;
- /* QoS Priority determined when enqueing packet */
- CSR_PRIORITY priority;
- /* Debug */
- unsigned long host_tag;
-};
-
-#endif /* CONFIG_NET_SCHED */
-
-static const struct net_device_ops uf_netdev_ops =
-{
- .ndo_open = uf_net_open,
- .ndo_stop = uf_net_stop,
- .ndo_start_xmit = uf_net_xmit,
- .ndo_do_ioctl = uf_net_ioctl,
- .ndo_get_stats = uf_net_get_stats, /* called by /proc/net/dev */
- .ndo_set_rx_mode = uf_set_multicast_list,
- .ndo_select_queue = uf_net_select_queue,
-};
-
-static u8 oui_rfc1042[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
-static u8 oui_8021h[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
-
-
-/* Callback for event logging to blocking clients */
-static void netdev_mlme_event_handler(ul_client_t *client,
- const u8 *sig_packed, int sig_len,
- const bulk_data_param_t *bulkdata,
- int dir);
-
-#ifdef CSR_SUPPORT_WEXT
-/* Declare netdev_notifier block which will contain the state change
- * handler callback function
- */
-static struct notifier_block uf_netdev_notifier;
-#endif
-
-/*
- * ---------------------------------------------------------------------------
- * uf_alloc_netdevice
- *
- * Allocate memory for the net_device and device private structs
- * for this interface.
- * Fill in the fields, but don't register the interface yet.
- * We need to configure the UniFi first.
- *
- * Arguments:
- * sdio_dev Pointer to SDIO context handle to use for all
- * SDIO ops.
- * bus_id A small number indicating the SDIO card position on the
- * bus. Typically this is the slot number, e.g. 0, 1 etc.
- * Valid values are 0 to MAX_UNIFI_DEVS-1.
- *
- * Returns:
- * Pointer to device private struct.
- *
- * Notes:
- * The net_device and device private structs are allocated together
- * and should be freed by freeing the net_device pointer.
- * ---------------------------------------------------------------------------
- */
-unifi_priv_t *
-uf_alloc_netdevice(CsrSdioFunction *sdio_dev, int bus_id)
-{
- struct net_device *dev;
- unifi_priv_t *priv;
- netInterface_priv_t *interfacePriv;
-#ifdef CSR_SUPPORT_WEXT
- int rc;
-#endif
- unsigned char i; /* loop index */
-
- /*
- * Allocate netdevice struct, assign name template and
- * setup as an ethernet device.
- * The net_device and private structs are zeroed. Ether_setup() then
- * sets up ethernet handlers and values.
- * The RedHat 9 redhat-config-network tool doesn't recognise wlan* devices,
- * so use "eth*" (like other wireless extns drivers).
- */
- dev = alloc_etherdev_mq(sizeof(unifi_priv_t) + sizeof(netInterface_priv_t), UNIFI_TRAFFIC_Q_MAX);
-
- if (dev == NULL) {
- return NULL;
- }
-
- /* Set up back pointer from priv to netdev */
- interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- priv = (unifi_priv_t *)(interfacePriv + 1);
- interfacePriv->privPtr = priv;
- interfacePriv->InterfaceTag = 0;
-
-
- /* Initialize all supported netdev interface to be NULL */
- for(i=0; i<CSR_WIFI_NUM_INTERFACES; i++) {
- priv->netdev[i] = NULL;
- priv->interfacePriv[i] = NULL;
- }
- priv->netdev[0] = dev;
- priv->interfacePriv[0] = interfacePriv;
-
- /* Setup / override net_device fields */
- dev->netdev_ops = &uf_netdev_ops;
-
-#ifdef CSR_SUPPORT_WEXT
- dev->wireless_handlers = &unifi_iw_handler_def;
-#if IW_HANDLER_VERSION < 6
- dev->get_wireless_stats = unifi_get_wireless_stats;
-#endif /* IW_HANDLER_VERSION */
-#endif /* CSR_SUPPORT_WEXT */
-
- /* This gives us enough headroom to add the 802.11 header */
- dev->needed_headroom = 32;
-
- /* Use bus_id as instance number */
- priv->instance = bus_id;
- /* Store SDIO pointer to pass in the core */
- priv->sdio = sdio_dev;
-
- sdio_dev->driverData = (void*)priv;
- /* Consider UniFi to be uninitialised */
- priv->init_progress = UNIFI_INIT_NONE;
-
- priv->prev_queue = 0;
-
- /*
- * Initialise the clients structure array.
- * We do not need protection around ul_init_clients() because
- * the character device can not be used until uf_alloc_netdevice()
- * returns and Unifi_instances[bus_id]=priv is set, since unifi_open()
- * will return -ENODEV.
- */
- ul_init_clients(priv);
-
- /*
- * Register a new ul client to send the multicast list signals.
- * Note: priv->instance must be set before calling this.
- */
- priv->netdev_client = ul_register_client(priv,
- 0,
- netdev_mlme_event_handler);
- if (priv->netdev_client == NULL) {
- unifi_error(priv,
- "Failed to register a unifi client for background netdev processing\n");
- free_netdev(priv->netdev[0]);
- return NULL;
- }
- unifi_trace(priv, UDBG2, "Netdev %p client (id:%d s:0x%X) is registered\n",
- dev, priv->netdev_client->client_id, priv->netdev_client->sender_id);
-
- priv->sta_wmm_capabilities = 0;
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_SUPPORT_SME))
- priv->wapi_multicast_filter = 0;
- priv->wapi_unicast_filter = 0;
- priv->wapi_unicast_queued_pkt_filter = 0;
-#ifdef CSR_WIFI_SECURITY_WAPI_QOSCTRL_MIC_WORKAROUND
- priv->isWapiConnection = FALSE;
-#endif
-#endif
-
- /* Enable all queues by default */
- interfacePriv->queueEnabled[0] = 1;
- interfacePriv->queueEnabled[1] = 1;
- interfacePriv->queueEnabled[2] = 1;
- interfacePriv->queueEnabled[3] = 1;
-
-#ifdef CSR_SUPPORT_SME
- priv->allPeerDozing = 0;
-#endif
- /*
- * Initialise the OS private struct.
- */
- /*
- * Instead of deciding in advance to use 11bg or 11a, we could do a more
- * clever scan on both radios.
- */
- if (use_5g) {
- priv->if_index = CSR_INDEX_5G;
- unifi_info(priv, "Using the 802.11a radio\n");
- } else {
- priv->if_index = CSR_INDEX_2G4;
- }
-
- /* Initialise bh thread structure */
- priv->bh_thread.thread_task = NULL;
- priv->bh_thread.block_thread = 1;
- init_waitqueue_head(&priv->bh_thread.wakeup_q);
- priv->bh_thread.wakeup_flag = 0;
- sprintf(priv->bh_thread.name, "uf_bh_thread");
-
- /* reset the connected state for the interface */
- interfacePriv->connected = UnifiConnectedUnknown; /* -1 unknown, 0 no, 1 yes */
-
-#ifdef USE_DRIVER_LOCK
- sema_init(&priv->lock, 1);
-#endif /* USE_DRIVER_LOCK */
-
- spin_lock_init(&priv->send_signal_lock);
-
- spin_lock_init(&priv->m4_lock);
- sema_init(&priv->ba_mutex, 1);
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
- spin_lock_init(&priv->wapi_lock);
-#endif
-
-#ifdef CSR_SUPPORT_SME
- spin_lock_init(&priv->staRecord_lock);
- spin_lock_init(&priv->tx_q_lock);
-#endif
-
- /* Create the Traffic Analysis workqueue */
- priv->unifi_workqueue = create_singlethread_workqueue("unifi_workq");
- if (priv->unifi_workqueue == NULL) {
- /* Deregister priv->netdev_client */
- ul_deregister_client(priv->netdev_client);
- free_netdev(priv->netdev[0]);
- return NULL;
- }
-
-#ifdef CSR_SUPPORT_SME
- /* Create the Multicast Addresses list work structure */
- INIT_WORK(&priv->multicast_list_task, uf_multicast_list_wq);
-
- /* Create m4 buffering work structure */
- INIT_WORK(&interfacePriv->send_m4_ready_task, uf_send_m4_ready_wq);
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
- /* Create work structure to buffer the WAPI data packets to be sent to SME for encryption */
- INIT_WORK(&interfacePriv->send_pkt_to_encrypt, uf_send_pkt_to_encrypt);
-#endif
-#endif
-
- priv->ref_count = 1;
-
- priv->amp_client = NULL;
- priv->coredump_mode = 0;
- priv->ptest_mode = 0;
- priv->wol_suspend = FALSE;
- INIT_LIST_HEAD(&interfacePriv->rx_uncontrolled_list);
- INIT_LIST_HEAD(&interfacePriv->rx_controlled_list);
- sema_init(&priv->rx_q_sem, 1);
-
-#ifdef CSR_SUPPORT_WEXT
- interfacePriv->netdev_callback_registered = FALSE;
- interfacePriv->wait_netdev_change = FALSE;
- /* Register callback for netdevice state changes */
- if ((rc = register_netdevice_notifier(&uf_netdev_notifier)) == 0) {
- interfacePriv->netdev_callback_registered = TRUE;
- }
- else {
- unifi_warning(priv, "Failed to register netdevice notifier : %d %p\n", rc, dev);
- }
-#endif /* CSR_SUPPORT_WEXT */
-
-#ifdef CSR_WIFI_SPLIT_PATCH
- /* set it to some invalid value */
- priv->pending_mode_set.common.destination = 0xaaaa;
-#endif
-
- return priv;
-} /* uf_alloc_netdevice() */
-
-/*
- *---------------------------------------------------------------------------
- * uf_alloc_netdevice_for_other_interfaces
- *
- * Allocate memory for the net_device and device private structs
- * for this interface.
- * Fill in the fields, but don't register the interface yet.
- * We need to configure the UniFi first.
- *
- * Arguments:
- * interfaceTag Interface number.
- * sdio_dev Pointer to SDIO context handle to use for all
- * SDIO ops.
- * bus_id A small number indicating the SDIO card position on the
- * bus. Typically this is the slot number, e.g. 0, 1 etc.
- * Valid values are 0 to MAX_UNIFI_DEVS-1.
- *
- * Returns:
- * Pointer to device private struct.
- *
- * Notes:
- * The device private structure contains the interfaceTag and pointer to the unifi_priv
- * structure created allocated by net_device od interface0.
- * The net_device and device private structs are allocated together
- * and should be freed by freeing the net_device pointer.
- * ---------------------------------------------------------------------------
- */
-u8
-uf_alloc_netdevice_for_other_interfaces(unifi_priv_t *priv, u16 interfaceTag)
-{
- struct net_device *dev;
- netInterface_priv_t *interfacePriv;
-
- /*
- * Allocate netdevice struct, assign name template and
- * setup as an ethernet device.
- * The net_device and private structs are zeroed. Ether_setup() then
- * sets up ethernet handlers and values.
- * The RedHat 9 redhat-config-network tool doesn't recognise wlan* devices,
- * so use "eth*" (like other wireless extns drivers).
- */
- dev = alloc_etherdev_mq(sizeof(netInterface_priv_t), 1);
- if (dev == NULL) {
- return FALSE;
- }
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "uf_alloc_netdevice_for_other_interfaces bad interfaceTag\n");
- return FALSE;
- }
-
- /* Set up back pointer from priv to netdev */
- interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- interfacePriv->privPtr = priv;
- interfacePriv->InterfaceTag = interfaceTag;
- priv->netdev[interfaceTag] = dev;
- priv->interfacePriv[interfacePriv->InterfaceTag] = interfacePriv;
-
- /* reset the connected state for the interface */
- interfacePriv->connected = UnifiConnectedUnknown; /* -1 unknown, 0 no, 1 yes */
- INIT_LIST_HEAD(&interfacePriv->rx_uncontrolled_list);
- INIT_LIST_HEAD(&interfacePriv->rx_controlled_list);
-
- /* Setup / override net_device fields */
- dev->netdev_ops = &uf_netdev_ops;
-
-#ifdef CSR_SUPPORT_WEXT
- dev->wireless_handlers = &unifi_iw_handler_def;
-#if IW_HANDLER_VERSION < 6
- dev->get_wireless_stats = unifi_get_wireless_stats;
-#endif /* IW_HANDLER_VERSION */
-#endif /* CSR_SUPPORT_WEXT */
- return TRUE;
-} /* uf_alloc_netdevice() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_free_netdevice
- *
- * Unregister the network device and free the memory allocated for it.
- * NB This includes the memory for the priv struct.
- *
- * Arguments:
- * priv Device private pointer.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-int
-uf_free_netdevice(unifi_priv_t *priv)
-{
- int i;
- unsigned long flags;
-
- unifi_trace(priv, UDBG1, "uf_free_netdevice\n");
-
- if (!priv) {
- return -EINVAL;
- }
-
- /*
- * Free any buffers used for holding firmware
- */
- uf_release_firmware_files(priv);
-
-#if (defined CSR_SUPPORT_SME) && (defined CSR_SUPPORT_WEXT)
- if (priv->connection_config.mlmeAssociateReqInformationElements) {
- kfree(priv->connection_config.mlmeAssociateReqInformationElements);
- }
- priv->connection_config.mlmeAssociateReqInformationElements = NULL;
- priv->connection_config.mlmeAssociateReqInformationElementsLength = 0;
-
- if (priv->mib_data.length) {
- vfree(priv->mib_data.data);
- }
- priv->mib_data.data = NULL;
- priv->mib_data.length = 0;
-
-#endif /* CSR_SUPPORT_SME && CSR_SUPPORT_WEXT*/
-
- /* Free any bulkdata buffers allocated for M4 caching */
- spin_lock_irqsave(&priv->m4_lock, flags);
- for (i = 0; i < CSR_WIFI_NUM_INTERFACES; i++) {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[i];
- if (interfacePriv->m4_bulk_data.data_length > 0) {
- unifi_trace(priv, UDBG5, "uf_free_netdevice: free M4 bulkdata %d\n", i);
- unifi_net_data_free(priv, &interfacePriv->m4_bulk_data);
- }
- }
- spin_unlock_irqrestore(&priv->m4_lock, flags);
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
- /* Free any bulkdata buffers allocated for M4 caching */
- spin_lock_irqsave(&priv->wapi_lock, flags);
- for (i = 0; i < CSR_WIFI_NUM_INTERFACES; i++) {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[i];
- if (interfacePriv->wapi_unicast_bulk_data.data_length > 0) {
- unifi_trace(priv, UDBG5, "uf_free_netdevice: free WAPI PKT bulk data %d\n", i);
- unifi_net_data_free(priv, &interfacePriv->wapi_unicast_bulk_data);
- }
- }
- spin_unlock_irqrestore(&priv->wapi_lock, flags);
-#endif
-
-#ifdef CSR_SUPPORT_WEXT
- /* Unregister callback for netdevice state changes */
- unregister_netdevice_notifier(&uf_netdev_notifier);
-#endif /* CSR_SUPPORT_WEXT */
-
-#ifdef CSR_SUPPORT_SME
- /* Cancel work items and destroy the workqueue */
- cancel_work_sync(&priv->multicast_list_task);
-#endif
-/* Destroy the workqueues. */
- flush_workqueue(priv->unifi_workqueue);
- destroy_workqueue(priv->unifi_workqueue);
-
- /* Free up netdev in reverse order: priv is allocated with netdev[0].
- * So, netdev[0] should be freed after all other netdevs are freed up
- */
- for (i=CSR_WIFI_NUM_INTERFACES-1; i>=0; i--) {
- /*Free the netdev struct and priv, which are all one lump*/
- if (priv->netdev[i]) {
- unifi_error(priv, "uf_free_netdevice: netdev %d %p\n", i, priv->netdev[i]);
- free_netdev(priv->netdev[i]);
- }
- }
-
- return 0;
-} /* uf_free_netdevice() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_net_open
- *
- * Called when userland does "ifconfig wlan0 up".
- *
- * Arguments:
- * dev Device pointer.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static int
-uf_net_open(struct net_device *dev)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- /* If we haven't finished UniFi initialisation, we can't start */
- if (priv->init_progress != UNIFI_INIT_COMPLETED) {
- unifi_warning(priv, "%s: unifi not ready, failing net_open\n", __FUNCTION__);
- return -EINVAL;
- }
-
-#if (defined CSR_NATIVE_LINUX) && (defined UNIFI_SNIFF_ARPHRD) && defined(CSR_SUPPORT_WEXT)
- /*
- * To sniff, the user must do "iwconfig mode monitor", which sets
- * priv->wext_conf.mode to IW_MODE_MONITOR.
- * Then he/she must do "ifconfig ethn up", which calls this fn.
- * There is no point in starting the sniff with SNIFFJOIN until
- * this point.
- */
- if (priv->wext_conf.mode == IW_MODE_MONITOR) {
- int err;
- err = uf_start_sniff(priv);
- if (err) {
- return err;
- }
- netif_carrier_on(dev);
- }
-#endif
-
-#ifdef CSR_SUPPORT_WEXT
- if (interfacePriv->wait_netdev_change) {
- unifi_trace(priv, UDBG1, "%s: Waiting for NETDEV_CHANGE, assume connected\n",
- __FUNCTION__);
- interfacePriv->connected = UnifiConnected;
- interfacePriv->wait_netdev_change = FALSE;
- }
-#endif
-
- netif_tx_start_all_queues(dev);
-
- return 0;
-} /* uf_net_open() */
-
-
-static int
-uf_net_stop(struct net_device *dev)
-{
-#if defined(CSR_NATIVE_LINUX) && defined(UNIFI_SNIFF_ARPHRD) && defined(CSR_SUPPORT_WEXT)
- netInterface_priv_t *interfacePriv = (netInterface_priv_t*)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- /* Stop sniffing if in Monitor mode */
- if (priv->wext_conf.mode == IW_MODE_MONITOR) {
- if (priv->card) {
- int err;
- err = unifi_reset_state(priv, dev->dev_addr, 1);
- if (err) {
- return err;
- }
- }
- }
-#endif
-
- netif_tx_stop_all_queues(dev);
-
- return 0;
-} /* uf_net_stop() */
-
-
-/* This is called after the WE handlers */
-static int
-uf_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- int rc;
-
- rc = -EOPNOTSUPP;
-
- return rc;
-} /* uf_net_ioctl() */
-
-
-
-static struct net_device_stats *
-uf_net_get_stats(struct net_device *dev)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
-
- return &interfacePriv->stats;
-} /* uf_net_get_stats() */
-
-static CSR_PRIORITY uf_get_packet_priority(unifi_priv_t *priv, netInterface_priv_t *interfacePriv, struct sk_buff *skb, const int proto)
-{
- CSR_PRIORITY priority = CSR_CONTENTION;
-
- priority = (CSR_PRIORITY) (skb->priority >> 5);
-
- if (priority == CSR_QOS_UP0) { /* 0 */
-
- unifi_trace(priv, UDBG5, "uf_get_packet_priority: proto = 0x%.4X\n", proto);
-
- switch (proto) {
- case 0x0800: /* IPv4 */
- case 0x814C: /* SNMP */
- case 0x880C: /* GSMP */
- priority = (CSR_PRIORITY) (skb->data[1 + ETH_HLEN] >> 5);
- break;
-
- case 0x8100: /* VLAN */
- priority = (CSR_PRIORITY) (skb->data[0 + ETH_HLEN] >> 5);
- break;
-
- case 0x86DD: /* IPv6 */
- priority = (CSR_PRIORITY) ((skb->data[0 + ETH_HLEN] & 0x0E) >> 1);
- break;
-
- default:
- priority = CSR_QOS_UP0;
- break;
- }
- }
-
- /* Check if we are allowed to transmit on this AC. Because of ACM we may have to downgrade to a lower
- * priority */
- if (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_STA ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI) {
- unifi_TrafficQueue queue;
-
- /* Keep trying lower priorities until we find a queue
- * Priority to queue mapping is 1,2 - BK, 0,3 - BE, 4,5 - VI, 6,7 - VO */
- queue = unifi_frame_priority_to_queue(priority);
-
- while (queue > UNIFI_TRAFFIC_Q_BK && !interfacePriv->queueEnabled[queue]) {
- queue--;
- priority = unifi_get_default_downgrade_priority(queue);
- }
- }
-
- unifi_trace(priv, UDBG5, "Packet priority = %d\n", priority);
-
- return priority;
-}
-
-/*
- */
-/*
- * ---------------------------------------------------------------------------
- * get_packet_priority
- *
- * Arguments:
- * priv private data area of functional driver
- * skb socket buffer
- * ehdr ethernet header to fetch protocol
- * interfacePriv For accessing station record database
- *
- *
- * Returns:
- * CSR_PRIORITY.
- * ---------------------------------------------------------------------------
- */
-CSR_PRIORITY
-get_packet_priority(unifi_priv_t *priv, struct sk_buff *skb, const struct ethhdr *ehdr, netInterface_priv_t *interfacePriv)
-{
- CSR_PRIORITY priority = CSR_CONTENTION;
- const int proto = ntohs(ehdr->h_proto);
-
- u8 interfaceMode = interfacePriv->interfaceMode;
-
- /* Priority Mapping for all the Modes */
- switch(interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- unifi_trace(priv, UDBG4, "mode is STA \n");
- if ((priv->sta_wmm_capabilities & QOS_CAPABILITY_WMM_ENABLED) == 1) {
- priority = uf_get_packet_priority(priv, interfacePriv, skb, proto);
- } else {
- priority = CSR_CONTENTION;
- }
- break;
-#ifdef CSR_SUPPORT_SME
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- {
- CsrWifiRouterCtrlStaInfo_t * dstStaInfo =
- CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv,ehdr->h_dest, interfacePriv->InterfaceTag);
- unifi_trace(priv, UDBG4, "mode is AP \n");
- if (!(ehdr->h_dest[0] & 0x01) && dstStaInfo && dstStaInfo->wmmOrQosEnabled) {
- /* If packet is not Broadcast/multicast */
- priority = uf_get_packet_priority(priv, interfacePriv, skb, proto);
- } else {
- /* Since packet destination is not QSTA, set priority to CSR_CONTENTION */
- unifi_trace(priv, UDBG4, "Destination is not QSTA or BroadCast/Multicast\n");
- priority = CSR_CONTENTION;
- }
- }
- break;
-#endif
- default:
- unifi_trace(priv, UDBG3, " mode unknown in %s func, mode=%x\n", __FUNCTION__, interfaceMode);
- }
- unifi_trace(priv, UDBG5, "priority = %x\n", priority);
-
- return priority;
-}
-
-/*
- * ---------------------------------------------------------------------------
- * uf_net_select_queue
- *
- * Called by the kernel to select which queue to put the packet in
- *
- * Arguments:
- * dev Device pointer
- * skb Packet
- *
- * Returns:
- * Queue index
- * ---------------------------------------------------------------------------
- */
-static u16
-uf_net_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = (unifi_priv_t *)interfacePriv->privPtr;
- struct ethhdr ehdr;
- unifi_TrafficQueue queue;
- int proto;
- CSR_PRIORITY priority;
-
- memcpy(&ehdr, skb->data, ETH_HLEN);
- proto = ntohs(ehdr.h_proto);
-
- /* 802.1x - apply controlled/uncontrolled port rules */
- if ((proto != ETH_P_PAE)
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- && (proto != ETH_P_WAI)
-#endif
- ) {
- /* queues 0 - 3 */
- priority = get_packet_priority(priv, skb, &ehdr, interfacePriv);
- queue = unifi_frame_priority_to_queue(priority);
- } else {
- /* queue 4 */
- queue = UNIFI_TRAFFIC_Q_EAPOL;
- }
-
-
- return (u16)queue;
-} /* uf_net_select_queue() */
-
-int
-skb_add_llc_snap(struct net_device *dev, struct sk_buff *skb, int proto)
-{
- llc_snap_hdr_t *snap;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int headroom;
-
- /* get the headroom available in skb */
- headroom = skb_headroom(skb);
- /* step 1: classify ether frame, DIX or 802.3? */
-
- if (proto < 0x600) {
- /* codes <= 1500 reserved for 802.3 lengths */
- /* it's 802.3, pass ether payload unchanged, */
- unifi_trace(priv, UDBG3, "802.3 len: %d\n", skb->len);
-
- /* leave off any PAD octets. */
- skb_trim(skb, proto);
- } else if (proto == ETH_P_8021Q) {
-
- /* Store the VLAN SNAP (should be 87-65). */
- u16 vlan_snap = *(u16*)skb->data;
- /* check for headroom availability before skb_push 14 = (4 + 10) */
- if (headroom < 14) {
- unifi_trace(priv, UDBG3, "cant append vlan snap: debug\n");
- return -1;
- }
- /* Add AA-AA-03-00-00-00 */
- snap = (llc_snap_hdr_t *)skb_push(skb, 4);
- snap->dsap = snap->ssap = 0xAA;
- snap->ctrl = 0x03;
- memcpy(snap->oui, oui_rfc1042, P80211_OUI_LEN);
-
- /* Add AA-AA-03-00-00-00 */
- snap = (llc_snap_hdr_t *)skb_push(skb, 10);
- snap->dsap = snap->ssap = 0xAA;
- snap->ctrl = 0x03;
- memcpy(snap->oui, oui_rfc1042, P80211_OUI_LEN);
-
- /* Add the VLAN specific information */
- snap->protocol = htons(proto);
- *(u16*)(snap + 1) = vlan_snap;
-
- } else
- {
- /* it's DIXII, time for some conversion */
- unifi_trace(priv, UDBG3, "DIXII len: %d\n", skb->len);
-
- /* check for headroom availability before skb_push */
- if (headroom < sizeof(llc_snap_hdr_t)) {
- unifi_trace(priv, UDBG3, "cant append snap: debug\n");
- return -1;
- }
- /* tack on SNAP */
- snap = (llc_snap_hdr_t *)skb_push(skb, sizeof(llc_snap_hdr_t));
- snap->dsap = snap->ssap = 0xAA;
- snap->ctrl = 0x03;
- /* Use the appropriate OUI. */
- if ((proto == ETH_P_AARP) || (proto == ETH_P_IPX)) {
- memcpy(snap->oui, oui_8021h, P80211_OUI_LEN);
- } else {
- memcpy(snap->oui, oui_rfc1042, P80211_OUI_LEN);
- }
- snap->protocol = htons(proto);
- }
-
- return 0;
-} /* skb_add_llc_snap() */
-
-#ifdef CSR_SUPPORT_SME
-static int
-_identify_sme_ma_pkt_ind(unifi_priv_t *priv,
- const s8 *oui, u16 protocol,
- const CSR_SIGNAL *signal,
- bulk_data_param_t *bulkdata,
- const unsigned char *daddr,
- const unsigned char *saddr)
-{
- CSR_MA_PACKET_INDICATION *pkt_ind = (CSR_MA_PACKET_INDICATION*)&signal->u.MaPacketIndication;
- int r;
- u8 i;
-
- unifi_trace(priv, UDBG5,
- "_identify_sme_ma_pkt_ind -->\n");
- for (i = 0; i < MAX_MA_UNIDATA_IND_FILTERS; i++) {
- if (priv->sme_unidata_ind_filters[i].in_use) {
- if (!memcmp(oui, priv->sme_unidata_ind_filters[i].oui, 3) &&
- (protocol == priv->sme_unidata_ind_filters[i].protocol)) {
-
- /* Send to client */
- if (priv->sme_cli) {
- /*
- * Pass the packet to the SME, using unifi_sys_ma_unitdata_ind().
- * The frame needs to be converted according to the encapsulation.
- */
- unifi_trace(priv, UDBG1,
- "_identify_sme_ma_pkt_ind: handle=%d, encap=%d, proto=%x\n",
- i, priv->sme_unidata_ind_filters[i].encapsulation,
- priv->sme_unidata_ind_filters[i].protocol);
- if (priv->sme_unidata_ind_filters[i].encapsulation == CSR_WIFI_ROUTER_ENCAPSULATION_ETHERNET) {
- struct sk_buff *skb;
- /* The translation is performed on skb... */
- skb = (struct sk_buff*)bulkdata->d[0].os_net_buf_ptr;
- skb->len = bulkdata->d[0].data_length;
-
- unifi_trace(priv, UDBG1,
- "_identify_sme_ma_pkt_ind: skb_80211_to_ether -->\n");
- r = skb_80211_to_ether(priv, skb, daddr, saddr,
- signal, bulkdata);
- unifi_trace(priv, UDBG1,
- "_identify_sme_ma_pkt_ind: skb_80211_to_ether <--\n");
- if (r) {
- return -EINVAL;
- }
-
- /* ... but we indicate buffer and length */
- bulkdata->d[0].os_data_ptr = skb->data;
- bulkdata->d[0].data_length = skb->len;
- } else {
- /* Add the MAC addresses before the SNAP */
- bulkdata->d[0].os_data_ptr -= 2*ETH_ALEN;
- bulkdata->d[0].data_length += 2*ETH_ALEN;
- memcpy((void*)bulkdata->d[0].os_data_ptr, daddr, ETH_ALEN);
- memcpy((void*)bulkdata->d[0].os_data_ptr + ETH_ALEN, saddr, ETH_ALEN);
- }
-
- unifi_trace(priv, UDBG1,
- "_identify_sme_ma_pkt_ind: unifi_sys_ma_pkt_ind -->\n");
- CsrWifiRouterMaPacketIndSend(priv->sme_unidata_ind_filters[i].appHandle,
- (pkt_ind->VirtualInterfaceIdentifier & 0xff),
- i,
- pkt_ind->ReceptionStatus,
- bulkdata->d[0].data_length,
- (u8*)bulkdata->d[0].os_data_ptr,
- NULL,
- pkt_ind->Rssi,
- pkt_ind->Snr,
- pkt_ind->ReceivedRate);
-
-
- unifi_trace(priv, UDBG1,
- "_identify_sme_ma_pkt_ind: unifi_sys_ma_pkt_ind <--\n");
- }
-
- return 1;
- }
- }
- }
-
- return -1;
-}
-#endif /* CSR_SUPPORT_SME */
-
-/*
- * ---------------------------------------------------------------------------
- * skb_80211_to_ether
- *
- * Make sure the received frame is in Ethernet (802.3) form.
- * De-encapsulates SNAP if necessary, adds a ethernet header.
- * The source buffer should not contain an 802.11 MAC header
- *
- * Arguments:
- * payload Pointer to packet data received from UniFi.
- * payload_length Number of bytes of data received from UniFi.
- * daddr Destination MAC address.
- * saddr Source MAC address.
- *
- * Returns:
- * 0 on success, -1 if the packet is bad and should be dropped,
- * 1 if the packet was forwarded to the SME or AMP client.
- * ---------------------------------------------------------------------------
- */
-int
-skb_80211_to_ether(unifi_priv_t *priv, struct sk_buff *skb,
- const unsigned char *daddr, const unsigned char *saddr,
- const CSR_SIGNAL *signal,
- bulk_data_param_t *bulkdata)
-{
- unsigned char *payload;
- int payload_length;
- struct ethhdr *eth;
- llc_snap_hdr_t *snap;
- int headroom;
-#define UF_VLAN_LLC_HEADER_SIZE 18
- static const u8 vlan_inner_snap[] = { 0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00 };
-#if defined(CSR_NATIVE_SOFTMAC) && defined(CSR_SUPPORT_SME)
- const CSR_MA_PACKET_INDICATION *pkt_ind = &signal->u.MaPacketIndication;
-#endif
-
- if(skb== NULL || daddr == NULL || saddr == NULL){
- unifi_error(priv,"skb_80211_to_ether: PBC fail\n");
- return 1;
- }
-
- payload = skb->data;
- payload_length = skb->len;
-
- snap = (llc_snap_hdr_t *)payload;
- eth = (struct ethhdr *)payload;
-
- /* get the skb headroom size */
- headroom = skb_headroom(skb);
-
- /*
- * Test for the various encodings
- */
- if ((payload_length >= sizeof(llc_snap_hdr_t)) &&
- (snap->dsap == 0xAA) &&
- (snap->ssap == 0xAA) &&
- (snap->ctrl == 0x03) &&
- (snap->oui[0] == 0) &&
- (snap->oui[1] == 0) &&
- ((snap->oui[2] == 0) || (snap->oui[2] == 0xF8)))
- {
- /* AppleTalk AARP (2) or IPX SNAP */
- if ((snap->oui[2] == 0) &&
- ((ntohs(snap->protocol) == ETH_P_AARP) || (ntohs(snap->protocol) == ETH_P_IPX)))
- {
- u16 len;
-
- unifi_trace(priv, UDBG3, "%s len: %d\n",
- (ntohs(snap->protocol) == ETH_P_AARP) ? "ETH_P_AARP" : "ETH_P_IPX",
- payload_length);
-
- /* check for headroom availability before skb_push */
- if (headroom < (2 * ETH_ALEN + 2)) {
- unifi_warning(priv, "headroom not available to skb_push ether header\n");
- return -1;
- }
-
- /* Add 802.3 header and leave full payload */
- len = htons(skb->len);
- memcpy(skb_push(skb, 2), &len, 2);
- memcpy(skb_push(skb, ETH_ALEN), saddr, ETH_ALEN);
- memcpy(skb_push(skb, ETH_ALEN), daddr, ETH_ALEN);
-
- return 0;
- }
- /* VLAN-tagged IP */
- if ((snap->oui[2] == 0) && (ntohs(snap->protocol) == ETH_P_8021Q))
- {
- /*
- * The translation doesn't change the packet length, so is done in-place.
- *
- * Example header (from Std 802.11-2007 Annex M):
- * AA-AA-03-00-00-00-81-00-87-65-AA-AA-03-00-00-00-08-06
- * -------SNAP-------p1-p1-ll-ll-------SNAP--------p2-p2
- * dd-dd-dd-dd-dd-dd-aa-aa-aa-aa-aa-aa-p1-p1-ll-ll-p2-p2
- * dd-dd-dd-dd-dd-dd-aa-aa-aa-aa-aa-aa-81-00-87-65-08-06
- */
- u16 vlan_snap;
-
- if (payload_length < UF_VLAN_LLC_HEADER_SIZE) {
- unifi_warning(priv, "VLAN SNAP header too short: %d bytes\n", payload_length);
- return -1;
- }
-
- if (memcmp(payload + 10, vlan_inner_snap, 6)) {
- unifi_warning(priv, "VLAN malformatted SNAP header.\n");
- return -1;
- }
-
- unifi_trace(priv, UDBG3, "VLAN SNAP: %02x-%02x\n", payload[8], payload[9]);
- unifi_trace(priv, UDBG3, "VLAN len: %d\n", payload_length);
-
- /* Create the 802.3 header */
-
- vlan_snap = *((u16*)(payload + 8));
-
- /* Create LLC header without byte-swapping */
- eth->h_proto = snap->protocol;
-
- memcpy(eth->h_dest, daddr, ETH_ALEN);
- memcpy(eth->h_source, saddr, ETH_ALEN);
- *(u16*)(eth + 1) = vlan_snap;
- return 0;
- }
-
- /* it's a SNAP + RFC1042 frame */
- unifi_trace(priv, UDBG3, "SNAP+RFC1042 len: %d\n", payload_length);
-
- /* chop SNAP+llc header from skb. */
- skb_pull(skb, sizeof(llc_snap_hdr_t));
-
- /* Since skb_pull called above to chop snap+llc, no need to check for headroom
- * availability before skb_push
- */
- /* create 802.3 header at beginning of skb. */
- eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
- memcpy(eth->h_dest, daddr, ETH_ALEN);
- memcpy(eth->h_source, saddr, ETH_ALEN);
- /* Copy protocol field without byte-swapping */
- eth->h_proto = snap->protocol;
- } else {
- u16 len;
-
- /* check for headroom availability before skb_push */
- if (headroom < (2 * ETH_ALEN + 2)) {
- unifi_warning(priv, "headroom not available to skb_push ether header\n");
- return -1;
- }
- /* Add 802.3 header and leave full payload */
- len = htons(skb->len);
- memcpy(skb_push(skb, 2), &len, 2);
- memcpy(skb_push(skb, ETH_ALEN), saddr, ETH_ALEN);
- memcpy(skb_push(skb, ETH_ALEN), daddr, ETH_ALEN);
-
- return 1;
- }
-
- return 0;
-} /* skb_80211_to_ether() */
-
-
-static CsrWifiRouterCtrlPortAction verify_port(unifi_priv_t *priv, unsigned char *address, int queue, u16 interfaceTag)
-{
-#ifdef CSR_NATIVE_LINUX
-#ifdef CSR_SUPPORT_WEXT
- if (queue == UF_CONTROLLED_PORT_Q) {
- return priv->wext_conf.block_controlled_port;
- } else {
- return CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN;
- }
-#else
- return CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN; /* default to open for softmac dev */
-#endif
-#else
- return uf_sme_port_state(priv, address, queue, interfaceTag);
-#endif
-}
-
-/*
- * ---------------------------------------------------------------------------
- * prepare_and_add_macheader
- *
- *
- * These functions adds mac header for packet from netdev
- * to UniFi for transmission.
- * EAP protocol packets are also appended with Mac header &
- * sent using send_ma_pkt_request().
- *
- * Arguments:
- * priv Pointer to device private context struct
- * skb Socket buffer containing data packet to transmit
- * newSkb Socket buffer containing data packet + Mac header if no sufficient headroom in skb
- * serviceClass to append QOS control header in Mac header
- * bulkdata if newSkb allocated then bulkdata updated to send to unifi
- * interfaceTag the interfaceID on which activity going on
- * daddr destination address
- * saddr source address
- * protection protection bit set in framce control of mac header
- *
- * Returns:
- * Zero on success or error code.
- * ---------------------------------------------------------------------------
- */
-
-int prepare_and_add_macheader(unifi_priv_t *priv, struct sk_buff *skb, struct sk_buff *newSkb,
- CSR_PRIORITY priority,
- bulk_data_param_t *bulkdata,
- u16 interfaceTag,
- const u8 *daddr,
- const u8 *saddr,
- u8 protection)
-{
- u16 fc = 0;
- u8 qc = 0;
- u8 macHeaderLengthInBytes = MAC_HEADER_SIZE, *bufPtr = NULL;
- bulk_data_param_t data_ptrs;
- CsrResult csrResult;
- int headroom =0;
- u8 direction = 0;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- u8 *addressOne;
- u8 bQosNull = false;
-
- if (skb == NULL) {
- unifi_error(priv,"prepare_and_add_macheader: Invalid SKB reference\n");
- return -1;
- }
-
- /* add a MAC header refer: 7.1.3.1 Frame Control field in P802.11REVmb.book */
- if (priority != CSR_CONTENTION) {
- /* EAPOL packets don't go as QOS_DATA */
- if (priority == CSR_MANAGEMENT) {
- fc |= cpu_to_le16(IEEE802_11_FC_TYPE_DATA);
- } else {
- /* Qos Control Field */
- macHeaderLengthInBytes += QOS_CONTROL_HEADER_SIZE;
-
- if (skb->len) {
-
- fc |= cpu_to_le16(IEEE802_11_FC_TYPE_QOS_DATA);
- } else {
- fc |= cpu_to_le16(IEEE802_11_FC_TYPE_QOS_NULL);
- bQosNull = true;
- }
- }
- } else {
- if(skb->len == 0) {
- fc |= cpu_to_le16(IEEE802_11_FC_TYPE_NULL);
- } else {
- fc |= cpu_to_le16(IEEE802_11_FC_TYPE_DATA);
- }
- }
-
- switch (interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- direction = 2;
- fc |= cpu_to_le16(IEEE802_11_FC_TO_DS_MASK);
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- direction = 0;
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- direction = 1;
- fc |= cpu_to_le16(IEEE802_11_FC_FROM_DS_MASK);
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_AMP:
- if (priority == CSR_MANAGEMENT ) {
-
- direction = 2;
- fc |= cpu_to_le16(IEEE802_11_FC_TO_DS_MASK);
- } else {
- /* Data frames have to use WDS 4 address frames */
- direction = 3;
- fc |= cpu_to_le16(IEEE802_11_FC_TO_DS_MASK | IEEE802_11_FC_FROM_DS_MASK);
- macHeaderLengthInBytes += 6;
- }
- break;
- default:
- unifi_warning(priv, "prepare_and_add_macheader: Unknown mode %d\n",
- interfacePriv->interfaceMode);
- }
-
-
- /* If Sta is QOS & HTC is supported then need to set 'order' bit */
- /* We don't support HT Control for now */
-
- if(protection) {
- fc |= cpu_to_le16(IEEE802_11_FC_PROTECTED_MASK);
- }
-
- /* check the skb headroom before pushing mac header */
- headroom = skb_headroom(skb);
-
- if (headroom < macHeaderLengthInBytes) {
- unifi_trace(priv, UDBG5,
- "prepare_and_add_macheader: Allocate headroom extra %d bytes\n",
- macHeaderLengthInBytes);
-
- csrResult = unifi_net_data_malloc(priv, &data_ptrs.d[0], skb->len + macHeaderLengthInBytes);
-
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, " failed to allocate request_data. in %s func\n", __FUNCTION__);
- return -1;
- }
- newSkb = (struct sk_buff *)(data_ptrs.d[0].os_net_buf_ptr);
- newSkb->len = skb->len + macHeaderLengthInBytes;
-
- memcpy((void*)data_ptrs.d[0].os_data_ptr + macHeaderLengthInBytes,
- skb->data, skb->len);
-
- bulkdata->d[0].os_data_ptr = newSkb->data;
- bulkdata->d[0].os_net_buf_ptr = (unsigned char*)newSkb;
- bulkdata->d[0].data_length = newSkb->len;
-
- bufPtr = (u8*)data_ptrs.d[0].os_data_ptr;
-
- /* The old skb will not be used again */
- kfree_skb(skb);
- } else {
-
- /* headroom has sufficient size, so will get proper pointer */
- bufPtr = (u8*)skb_push(skb, macHeaderLengthInBytes);
- bulkdata->d[0].os_data_ptr = skb->data;
- bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skb;
- bulkdata->d[0].data_length = skb->len;
- }
-
- /* Frame the actual MAC header */
-
- memset(bufPtr, 0, macHeaderLengthInBytes);
-
- /* copy frameControl field */
- memcpy(bufPtr, &fc, sizeof(fc));
- bufPtr += sizeof(fc);
- macHeaderLengthInBytes -= sizeof(fc);
-
- /* Duration/ID field which is 2 bytes */
- bufPtr += 2;
- macHeaderLengthInBytes -= 2;
-
- switch(direction)
- {
- case 0:
- /* Its an Ad-Hoc no need to route it through AP */
- /* Address1: MAC address of the destination from eth header */
- memcpy(bufPtr, daddr, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
-
- /* Address2: MAC address of the source */
- memcpy(bufPtr, saddr, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
-
- /* Address3: the BSSID (locally generated in AdHoc (creators Bssid)) */
- memcpy(bufPtr, &interfacePriv->bssid, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
- break;
- case 1:
- /* Address1: MAC address of the actual destination */
- memcpy(bufPtr, daddr, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
- /* Address2: The MAC address of the AP */
- memcpy(bufPtr, &interfacePriv->bssid, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
-
- /* Address3: MAC address of the source from eth header */
- memcpy(bufPtr, saddr, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
- break;
- case 2:
- /* Address1: To AP is the MAC address of the AP to which its associated */
- memcpy(bufPtr, &interfacePriv->bssid, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
-
- /* Address2: MAC address of the source from eth header */
- memcpy(bufPtr, saddr, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
-
- /* Address3: MAC address of the actual destination on the distribution system */
- memcpy(bufPtr, daddr, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
- break;
- case 3:
- memcpy(bufPtr, &interfacePriv->bssid, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
-
- /* Address2: MAC address of the source from eth header */
- memcpy(bufPtr, saddr, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
-
- /* Address3: MAC address of the actual destination on the distribution system */
- memcpy(bufPtr, daddr, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
- break;
- default:
- unifi_error(priv,"Unknown direction =%d : Not handled now\n",direction);
- return -1;
- }
- /* 2 bytes of frame control field, appended by firmware */
- bufPtr += 2;
- macHeaderLengthInBytes -= 2;
-
- if (3 == direction) {
- /* Address4: MAC address of the source */
- memcpy(bufPtr, saddr, ETH_ALEN);
- bufPtr += ETH_ALEN;
- macHeaderLengthInBytes -= ETH_ALEN;
- }
-
- /* IF Qos Data or Qos Null Data then set QosControl field */
- if ((priority != CSR_CONTENTION) && (macHeaderLengthInBytes >= QOS_CONTROL_HEADER_SIZE)) {
-
- if (priority > 7) {
- unifi_trace(priv, UDBG1, "data packets priority is more than 7, priority = %x\n", priority);
- qc |= 7;
- } else {
- qc |= priority;
- }
- /*assigning address1
- * Address1 offset taken fromm bufPtr(currently bufPtr pointing to Qos contorl) variable in reverse direction
- * Address4 don't exit
- */
-
- addressOne = bufPtr- ADDRESS_ONE_OFFSET;
-
- if (addressOne[0] & 0x1) {
- /* multicast/broadcast frames, no acknowledgement needed */
- qc |= 1 << 5;
- }
- /* non-AP mode only for now */
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_STA ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_IBSS ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI) {
- /* In case of STA and IBSS case eosp and txop limit is 0. */
- } else {
- if(bQosNull) {
- qc |= 1 << 4;
- }
- }
-
- /* append Qos control field to mac header */
- bufPtr[0] = qc;
- /* txop limit is 0 */
- bufPtr[1] = 0;
- macHeaderLengthInBytes -= QOS_CONTROL_HEADER_SIZE;
- }
- if (macHeaderLengthInBytes) {
- unifi_warning(priv, " Mac header not appended properly\n");
- return -1;
- }
- return 0;
-}
-
-/*
- * ---------------------------------------------------------------------------
- * send_ma_pkt_request
- *
- * These functions send a data packet to UniFi for transmission.
- * EAP protocol packets are also sent as send_ma_pkt_request().
- *
- * Arguments:
- * priv Pointer to device private context struct
- * skb Socket buffer containing data packet to transmit
- * ehdr Pointer to Ethernet header within skb.
- *
- * Returns:
- * Zero on success or error code.
- * ---------------------------------------------------------------------------
- */
-
-static int
-send_ma_pkt_request(unifi_priv_t *priv, struct sk_buff *skb, const struct ethhdr *ehdr, CSR_PRIORITY priority)
-{
- int r;
- u16 i;
- u8 eapolStore = FALSE;
- struct sk_buff *newSkb = NULL;
- bulk_data_param_t bulkdata;
- const int proto = ntohs(ehdr->h_proto);
- u16 interfaceTag;
- CsrWifiMacAddress peerAddress;
- CSR_TRANSMISSION_CONTROL transmissionControl = CSR_NO_CONFIRM_REQUIRED;
- s8 protection;
- netInterface_priv_t *interfacePriv = NULL;
- CSR_RATE TransmitRate = (CSR_RATE)0;
-
- unifi_trace(priv, UDBG5, "entering send_ma_pkt_request\n");
-
- /* Get the interface Tag by means of source Mac address */
- for (i = 0; i < CSR_WIFI_NUM_INTERFACES; i++) {
- if (!memcmp(priv->netdev[i]->dev_addr, ehdr->h_source, ETH_ALEN)) {
- interfaceTag = i;
- interfacePriv = priv->interfacePriv[interfaceTag];
- break;
- }
- }
-
- if (interfacePriv == NULL) {
- /* No match found - error */
- interfaceTag = 0;
- interfacePriv = priv->interfacePriv[interfaceTag];
- unifi_warning(priv, "Mac address not matching ... debugging needed\n");
- interfacePriv->stats.tx_dropped++;
- kfree_skb(skb);
- return -1;
- }
-
- /* Add a SNAP header if necessary */
- if (skb_add_llc_snap(priv->netdev[interfaceTag], skb, proto) != 0) {
- /* convert failed */
- unifi_error(priv, "skb_add_llc_snap failed.\n");
- kfree_skb(skb);
- return -1;
- }
-
- bulkdata.d[0].os_data_ptr = skb->data;
- bulkdata.d[0].os_net_buf_ptr = (unsigned char*)skb;
- bulkdata.d[0].net_buf_length = bulkdata.d[0].data_length = skb->len;
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].os_net_buf_ptr = NULL;
- bulkdata.d[1].net_buf_length = bulkdata.d[1].data_length = 0;
-
-#ifdef CSR_SUPPORT_SME
- /* Notify the TA module for the Tx frame for non AP/P2PGO mode*/
- if ((interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_AP) &&
- (interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_P2PGO)) {
- unifi_ta_sample(priv->card, CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_TX,
- &bulkdata.d[0], ehdr->h_source,
- priv->netdev[interfaceTag]->dev_addr,
- jiffies_to_msecs(jiffies),
- 0); /* rate is unknown on tx */
- }
-#endif /* CSR_SUPPORT_SME */
-
- if ((proto == ETH_P_PAE)
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- || (proto == ETH_P_WAI)
-#endif
- )
- {
- /* check for m4 detection */
- if (0 == uf_verify_m4(priv, bulkdata.d[0].os_data_ptr, bulkdata.d[0].data_length)) {
- eapolStore = TRUE;
- }
- }
-
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- if (proto == ETH_P_WAI)
- {
- protection = 0; /*WAI packets always sent unencrypted*/
- }
- else
- {
-#endif
-#ifdef CSR_SUPPORT_SME
- if ((protection = uf_get_protection_bit_from_interfacemode(priv, interfaceTag, ehdr->h_dest)) < 0) {
- unifi_warning(priv, "unicast address, but destination not in station record database\n");
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return -1;
- }
-#else
- protection = 0;
-#endif
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- }
-#endif
-
- /* append Mac header for Eapol as well as data packet */
- if (prepare_and_add_macheader(priv, skb, newSkb, priority, &bulkdata, interfaceTag, ehdr->h_dest, ehdr->h_source, protection)) {
- unifi_error(priv, "failed to create MAC header\n");
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return -1;
- }
-
- /* RA address must contain the immediate destination MAC address that is similar to
- * the Address 1 field of 802.11 Mac header here 4 is: (sizeof(framecontrol) + sizeof (durationID))
- * which is address 1 field
- */
- memcpy(peerAddress.a, ((u8 *) bulkdata.d[0].os_data_ptr) + 4, ETH_ALEN);
-
- unifi_trace(priv, UDBG5, "RA[0]=%x, RA[1]=%x, RA[2]=%x, RA[3]=%x, RA[4]=%x, RA[5]=%x\n",
- peerAddress.a[0],peerAddress.a[1], peerAddress.a[2], peerAddress.a[3],
- peerAddress.a[4],peerAddress.a[5]);
-
-
- if ((proto == ETH_P_PAE)
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- || (proto == ETH_P_WAI)
-#endif
- )
- {
- CSR_SIGNAL signal;
- CSR_MA_PACKET_REQUEST *req = &signal.u.MaPacketRequest;
-
- /* initialize signal to zero */
- memset(&signal, 0, sizeof(CSR_SIGNAL));
-
- /* Frame MA_PACKET request */
- signal.SignalPrimitiveHeader.SignalId = CSR_MA_PACKET_REQUEST_ID;
- signal.SignalPrimitiveHeader.ReceiverProcessId = 0;
- signal.SignalPrimitiveHeader.SenderProcessId = priv->netdev_client->sender_id;
-
- transmissionControl = req->TransmissionControl = 0;
-#ifdef CSR_SUPPORT_SME
- if (eapolStore)
- {
- netInterface_priv_t *netpriv = (netInterface_priv_t *)netdev_priv(priv->netdev[interfaceTag]);
-
- /* Fill the MA-PACKET.req */
-
- req->Priority = priority;
- unifi_trace(priv, UDBG3, "Tx Frame with Priority: %x\n", req->Priority);
-
- /* rate selected by firmware */
- req->TransmitRate = 0;
- req->HostTag = CSR_WIFI_EAPOL_M4_HOST_TAG;
- /* RA address matching with address 1 of Mac header */
- memcpy(req->Ra.x, ((u8 *) bulkdata.d[0].os_data_ptr) + 4, ETH_ALEN);
-
- spin_lock(&priv->m4_lock);
- /* Store the M4-PACKET.req for later */
- interfacePriv->m4_signal = signal;
- interfacePriv->m4_bulk_data.net_buf_length = bulkdata.d[0].net_buf_length;
- interfacePriv->m4_bulk_data.data_length = bulkdata.d[0].data_length;
- interfacePriv->m4_bulk_data.os_data_ptr = bulkdata.d[0].os_data_ptr;
- interfacePriv->m4_bulk_data.os_net_buf_ptr = bulkdata.d[0].os_net_buf_ptr;
- spin_unlock(&priv->m4_lock);
-
- /* Signal the workqueue to call CsrWifiRouterCtrlM4ReadyToSendIndSend().
- * It cannot be called directly from the tx path because it
- * does a non-atomic kmalloc via the framework's CsrPmemAlloc().
- */
- queue_work(priv->unifi_workqueue, &netpriv->send_m4_ready_task);
-
- return 0;
- }
-#endif
- }/*EAPOL or WAI packet*/
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
- if ((CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode) && \
- (priv->wapi_unicast_filter) && \
- (proto != ETH_P_PAE) && \
- (proto != ETH_P_WAI) && \
- (skb->len > 0))
- {
- CSR_SIGNAL signal;
- CSR_MA_PACKET_REQUEST *req = &signal.u.MaPacketRequest;
- netInterface_priv_t *netpriv = (netInterface_priv_t *)netdev_priv(priv->netdev[interfaceTag]);
-
- unifi_trace(priv, UDBG4, "send_ma_pkt_request() - WAPI unicast data packet when USKID = 1 \n");
-
- /* initialize signal to zero */
- memset(&signal, 0, sizeof(CSR_SIGNAL));
- /* Frame MA_PACKET request */
- signal.SignalPrimitiveHeader.SignalId = CSR_MA_PACKET_REQUEST_ID;
- signal.SignalPrimitiveHeader.ReceiverProcessId = 0;
- signal.SignalPrimitiveHeader.SenderProcessId = priv->netdev_client->sender_id;
-
- /* Fill the MA-PACKET.req */
- req->TransmissionControl = 0;
- req->Priority = priority;
- unifi_trace(priv, UDBG3, "Tx Frame with Priority: %x\n", req->Priority);
- req->TransmitRate = (CSR_RATE) 0; /* rate selected by firmware */
- req->HostTag = 0xffffffff; /* Ask for a new HostTag */
- /* RA address matching with address 1 of Mac header */
- memcpy(req->Ra.x, ((u8 *) bulkdata.d[0].os_data_ptr) + 4, ETH_ALEN);
-
- /* Store the M4-PACKET.req for later */
- spin_lock(&priv->wapi_lock);
- interfacePriv->wapi_unicast_ma_pkt_sig = signal;
- interfacePriv->wapi_unicast_bulk_data.net_buf_length = bulkdata.d[0].net_buf_length;
- interfacePriv->wapi_unicast_bulk_data.data_length = bulkdata.d[0].data_length;
- interfacePriv->wapi_unicast_bulk_data.os_data_ptr = bulkdata.d[0].os_data_ptr;
- interfacePriv->wapi_unicast_bulk_data.os_net_buf_ptr = bulkdata.d[0].os_net_buf_ptr;
- spin_unlock(&priv->wapi_lock);
-
- /* Signal the workqueue to call CsrWifiRouterCtrlWapiUnicastTxEncryptIndSend().
- * It cannot be called directly from the tx path because it
- * does a non-atomic kmalloc via the framework's CsrPmemAlloc().
- */
- queue_work(priv->unifi_workqueue, &netpriv->send_pkt_to_encrypt);
-
- return 0;
- }
-#endif
-
- if(priv->cmanrTestMode)
- {
- TransmitRate = priv->cmanrTestModeTransmitRate;
- unifi_trace(priv, UDBG2, "send_ma_pkt_request: cmanrTestModeTransmitRate = %d TransmitRate=%d\n",
- priv->cmanrTestModeTransmitRate,
- TransmitRate
- );
- }
-
- /* Send UniFi msg */
- /* Here hostTag is been sent as 0xffffffff, its been appended properly while framing MA-Packet request in pdu_processing.c file */
- r = uf_process_ma_packet_req(priv,
- peerAddress.a,
- 0xffffffff, /* Ask for a new HostTag */
- interfaceTag,
- transmissionControl,
- TransmitRate,
- priority,
- priv->netdev_client->sender_id,
- &bulkdata);
-
- if (r) {
- unifi_trace(priv, UDBG1, "(HIP validation failure) r = %x\n", r);
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return -1;
- }
-
- unifi_trace(priv, UDBG3, "leaving send_ma_pkt_request, UNITDATA result code = %d\n", r);
-
- return r;
-} /* send_ma_pkt_request() */
-
-/*
- * ---------------------------------------------------------------------------
- * uf_net_xmit
- *
- * This function is called by the higher level stack to transmit an
- * ethernet packet.
- *
- * Arguments:
- * skb Ethernet packet to send.
- * dev Pointer to the linux net device.
- *
- * Returns:
- * 0 on success (packet was consumed, not necessarily transmitted)
- * 1 if packet was requeued
- * -1 on error
- *
- *
- * Notes:
- * The controlled port is handled in the qdisc dequeue handler.
- * ---------------------------------------------------------------------------
- */
-static netdev_tx_t
-uf_net_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct ethhdr ehdr;
- int proto, port;
- int result;
- static tx_signal_handler tx_handler;
- CSR_PRIORITY priority;
- CsrWifiRouterCtrlPortAction port_action;
-
- unifi_trace(priv, UDBG5, "unifi_net_xmit: skb = %x\n", skb);
-
- memcpy(&ehdr, skb->data, ETH_HLEN);
- proto = ntohs(ehdr.h_proto);
- priority = get_packet_priority(priv, skb, &ehdr, interfacePriv);
-
- /* All frames are sent as MA-PACKET.req (EAPOL also) */
- tx_handler = send_ma_pkt_request;
-
- /* 802.1x - apply controlled/uncontrolled port rules */
- if ((proto != ETH_P_PAE)
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- && (proto != ETH_P_WAI)
-#endif
- ) {
- port = UF_CONTROLLED_PORT_Q;
- } else {
- /* queue 4 */
- port = UF_UNCONTROLLED_PORT_Q;
- }
-
- /* Uncontrolled port rules apply */
- port_action = verify_port(priv
- , (((CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode)||(CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI== interfacePriv->interfaceMode))? interfacePriv->bssid.a: ehdr.h_dest)
- , port
- , interfacePriv->InterfaceTag);
-
- if (port_action == CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN) {
- unifi_trace(priv, UDBG5,
- "uf_net_xmit: %s controlled port open\n",
- port ? "" : "un");
- /* Remove the ethernet header */
- skb_pull(skb, ETH_HLEN);
- result = tx_handler(priv, skb, &ehdr, priority);
- } else {
-
- /* Discard the packet if necessary */
- unifi_trace(priv, UDBG2,
- "uf_net_xmit: %s controlled port %s\n",
- port ? "" : "un", port_action==CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_BLOCK ? "blocked" : "closed");
- interfacePriv->stats.tx_dropped++;
- kfree_skb(skb);
-
- return NETDEV_TX_OK;
- }
-
- if (result == NETDEV_TX_OK) {
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
- /* Don't update the tx stats when the pkt is to be sent for sw encryption*/
- if (!((CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode) &&
- (priv->wapi_unicast_filter == 1)))
- {
- dev->trans_start = jiffies;
- /* Should really count tx stats in the UNITDATA.status signal but
- * that doesn't have the length.
- */
- interfacePriv->stats.tx_packets++;
- /* count only the packet payload */
- interfacePriv->stats.tx_bytes += skb->len;
-
- }
-#else
- dev->trans_start = jiffies;
-
- /*
- * Should really count tx stats in the UNITDATA.status signal but
- * that doesn't have the length.
- */
- interfacePriv->stats.tx_packets++;
- /* count only the packet payload */
- interfacePriv->stats.tx_bytes += skb->len;
-#endif
- } else if (result < 0) {
-
- /* Failed to send: fh queue was full, and the skb was discarded.
- * Return OK to indicate that the buffer was consumed, to stop the
- * kernel re-transmitting the freed buffer.
- */
- interfacePriv->stats.tx_dropped++;
- unifi_trace(priv, UDBG1, "unifi_net_xmit: (Packet Drop), dropped count = %x\n", interfacePriv->stats.tx_dropped);
- result = NETDEV_TX_OK;
- }
-
- /* The skb will have been freed by send_XXX_request() */
-
- return result;
-} /* uf_net_xmit() */
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_pause_xmit
- * unifi_restart_xmit
- *
- * These functions are called from the UniFi core to control the flow
- * of packets from the upper layers.
- * unifi_pause_xmit() is called when the internal queue is full and
- * should take action to stop unifi_ma_unitdata() being called.
- * When the queue has drained, unifi_restart_xmit() will be called to
- * re-enable the flow of packets for transmission.
- *
- * Arguments:
- * ospriv OS private context pointer.
- *
- * Returns:
- * unifi_pause_xmit() is called from interrupt context.
- * ---------------------------------------------------------------------------
- */
-void
-unifi_pause_xmit(void *ospriv, unifi_TrafficQueue queue)
-{
- unifi_priv_t *priv = ospriv;
- int i; /* used as a loop counter */
-
- unifi_trace(priv, UDBG2, "Stopping queue %d\n", queue);
-
- for(i=0;i<CSR_WIFI_NUM_INTERFACES;i++)
- {
- if (netif_running(priv->netdev[i]))
- {
- netif_stop_subqueue(priv->netdev[i], (u16)queue);
- }
- }
-
-#ifdef CSR_SUPPORT_SME
- if(queue<=3) {
- routerStartBuffering(priv,queue);
- unifi_trace(priv,UDBG2,"Start buffering %d\n", queue);
- } else {
- routerStartBuffering(priv,0);
- unifi_error(priv, "Start buffering %d defaulting to 0\n", queue);
- }
-#endif
-
-} /* unifi_pause_xmit() */
-
-void
-unifi_restart_xmit(void *ospriv, unifi_TrafficQueue queue)
-{
- unifi_priv_t *priv = ospriv;
- int i=0; /* used as a loop counter */
-
- unifi_trace(priv, UDBG2, "Waking queue %d\n", queue);
-
- for(i=0;i<CSR_WIFI_NUM_INTERFACES;i++)
- {
- if (netif_running(priv->netdev[i]))
- {
- netif_wake_subqueue(priv->netdev[i], (u16)queue);
- }
- }
-
-#ifdef CSR_SUPPORT_SME
- if(queue <=3) {
- routerStopBuffering(priv,queue);
- uf_send_buffered_frames(priv,queue);
- } else {
- routerStopBuffering(priv,0);
- uf_send_buffered_frames(priv,0);
- }
-#endif
-} /* unifi_restart_xmit() */
-
-
-static void
-indicate_rx_skb(unifi_priv_t *priv, u16 ifTag, u8* dst_a, u8* src_a, struct sk_buff *skb, CSR_SIGNAL *signal,
- bulk_data_param_t *bulkdata)
-{
- int r, sr = 0;
- struct net_device *dev;
-
-#ifdef CSR_SUPPORT_SME
- llc_snap_hdr_t *snap;
-
- snap = (llc_snap_hdr_t *)skb->data;
-
- sr = _identify_sme_ma_pkt_ind(priv,
- snap->oui, ntohs(snap->protocol),
- signal,
- bulkdata,
- dst_a, src_a );
-#endif
-
- /*
- * Decapsulate any SNAP header and
- * prepend an ethernet header so that the skb manipulation and ARP
- * stuff works.
- */
- r = skb_80211_to_ether(priv, skb, dst_a, src_a,
- signal, bulkdata);
- if (r == -1) {
- /* Drop the packet and return */
- priv->interfacePriv[ifTag]->stats.rx_errors++;
- priv->interfacePriv[ifTag]->stats.rx_frame_errors++;
- unifi_net_data_free(priv, &bulkdata->d[0]);
- unifi_notice(priv, "indicate_rx_skb: Discard unknown frame.\n");
- return;
- }
-
- /* Handle the case where packet is sent up through the subscription
- * API but should not be given to the network stack (AMP PAL case)
- * LLC header is different from WiFi and the packet has been subscribed for
- */
- if (r == 1 && sr == 1) {
- unifi_net_data_free(priv, &bulkdata->d[0]);
- unifi_trace(priv, UDBG5, "indicate_rx_skb: Data given to subscription"
- "API, not being given to kernel\n");
- return;
- }
-
- dev = priv->netdev[ifTag];
- /* Now we look like a regular ethernet frame */
- /* Fill in SKB meta data */
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* Test for an overlength frame */
- if (skb->len > (dev->mtu + ETH_HLEN)) {
- /* A bogus length ethfrm has been encap'd. */
- /* Is someone trying an oflow attack? */
- unifi_error(priv, "%s: oversize frame (%d > %d)\n",
- dev->name,
- skb->len, dev->mtu + ETH_HLEN);
-
- /* Drop the packet and return */
- priv->interfacePriv[ifTag]->stats.rx_errors++;
- priv->interfacePriv[ifTag]->stats.rx_length_errors++;
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return;
- }
-
-
- if(priv->cmanrTestMode)
- {
- const CSR_MA_PACKET_INDICATION *pkt_ind = &signal->u.MaPacketIndication;
- priv->cmanrTestModeTransmitRate = pkt_ind->ReceivedRate;
- unifi_trace(priv, UDBG2, "indicate_rx_skb: cmanrTestModeTransmitRate=%d\n", priv->cmanrTestModeTransmitRate);
- }
-
- /* Pass SKB up the stack */
-#ifdef CSR_WIFI_USE_NETIF_RX
- netif_rx(skb);
-#else
- netif_rx_ni(skb);
-#endif
-
- if (dev != NULL) {
- dev->last_rx = jiffies;
- }
-
- /* Bump rx stats */
- priv->interfacePriv[ifTag]->stats.rx_packets++;
- priv->interfacePriv[ifTag]->stats.rx_bytes += bulkdata->d[0].data_length;
-
- return;
-}
-
-void
-uf_process_rx_pending_queue(unifi_priv_t *priv, int queue,
- CsrWifiMacAddress source_address,
- int indicate, u16 interfaceTag)
-{
- rx_buffered_packets_t *rx_q_item;
- struct list_head *rx_list;
- struct list_head *n;
- struct list_head *l_h;
- static const CsrWifiMacAddress broadcast_address = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}};
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "uf_process_rx_pending_queue bad interfaceTag\n");
- return;
- }
-
- if (queue == UF_CONTROLLED_PORT_Q) {
- rx_list = &interfacePriv->rx_controlled_list;
- } else {
- rx_list = &interfacePriv->rx_uncontrolled_list;
- }
-
- down(&priv->rx_q_sem);
- list_for_each_safe(l_h, n, rx_list) {
- rx_q_item = list_entry(l_h, rx_buffered_packets_t, q);
-
- /* Validate against the source address */
- if (memcmp(broadcast_address.a, source_address.a, ETH_ALEN) &&
- memcmp(rx_q_item->sa.a, source_address.a, ETH_ALEN)) {
-
- unifi_trace(priv, UDBG2,
- "uf_process_rx_pending_queue: Skipping sa=%02X%02X%02X%02X%02X%02X skb=%p, bulkdata=%p\n",
- rx_q_item->sa.a[0], rx_q_item->sa.a[1],
- rx_q_item->sa.a[2], rx_q_item->sa.a[3],
- rx_q_item->sa.a[4], rx_q_item->sa.a[5],
- rx_q_item->skb, &rx_q_item->bulkdata.d[0]);
- continue;
- }
-
- list_del(l_h);
-
-
- unifi_trace(priv, UDBG2,
- "uf_process_rx_pending_queue: Was Blocked skb=%p, bulkdata=%p\n",
- rx_q_item->skb, &rx_q_item->bulkdata);
-
- if (indicate) {
- indicate_rx_skb(priv, interfaceTag, rx_q_item->da.a, rx_q_item->sa.a, rx_q_item->skb, &rx_q_item->signal, &rx_q_item->bulkdata);
- } else {
- interfacePriv->stats.rx_dropped++;
- unifi_net_data_free(priv, &rx_q_item->bulkdata.d[0]);
- }
-
- /* It is our resposibility to free the Rx structure object. */
- kfree(rx_q_item);
- }
- up(&priv->rx_q_sem);
-}
-
-/*
- * ---------------------------------------------------------------------------
- * uf_resume_data_plane
- *
- * Is called when the (un)controlled port is set to open,
- * to notify the network stack to schedule for transmission
- * any packets queued in the qdisk while port was closed and
- * indicated to the stack any packets buffered in the Rx queues.
- *
- * Arguments:
- * priv Pointer to device private struct
- *
- * Returns:
- * ---------------------------------------------------------------------------
- */
-void
-uf_resume_data_plane(unifi_priv_t *priv, int queue,
- CsrWifiMacAddress peer_address,
- u16 interfaceTag)
-{
-#ifdef CSR_SUPPORT_WEXT
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-#endif
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "uf_resume_data_plane bad interfaceTag\n");
- return;
- }
-
- unifi_trace(priv, UDBG2, "Resuming netif\n");
-
- /*
- * If we are waiting for the net device to enter the up state, don't
- * process the rx queue yet as it will be done by the callback when
- * the device is ready.
- */
-#ifdef CSR_SUPPORT_WEXT
- if (!interfacePriv->wait_netdev_change)
-#endif
- {
-#ifdef CONFIG_NET_SCHED
- if (netif_running(priv->netdev[interfaceTag])) {
- netif_tx_schedule_all(priv->netdev[interfaceTag]);
- }
-#endif
- uf_process_rx_pending_queue(priv, queue, peer_address, 1,interfaceTag);
- }
-} /* uf_resume_data_plane() */
-
-
-void uf_free_pending_rx_packets(unifi_priv_t *priv, int queue, CsrWifiMacAddress peer_address,u16 interfaceTag)
-{
- uf_process_rx_pending_queue(priv, queue, peer_address, 0,interfaceTag);
-
-} /* uf_free_pending_rx_packets() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_rx
- *
- * Reformat a UniFi data received packet into a p80211 packet and
- * pass it up the protocol stack.
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void
-unifi_rx(unifi_priv_t *priv, CSR_SIGNAL *signal, bulk_data_param_t *bulkdata)
-{
- u16 interfaceTag;
- bulk_data_desc_t *pData;
- const CSR_MA_PACKET_INDICATION *pkt_ind = &signal->u.MaPacketIndication;
- struct sk_buff *skb;
- CsrWifiRouterCtrlPortAction port_action;
- u8 dataFrameType;
- int proto;
- int queue;
-
- u8 da[ETH_ALEN], sa[ETH_ALEN];
- u8 toDs, fromDs, frameType, macHeaderLengthInBytes = MAC_HEADER_SIZE;
- u16 frameControl;
- netInterface_priv_t *interfacePriv;
- struct ethhdr ehdr;
-
- interfaceTag = (pkt_ind->VirtualInterfaceIdentifier & 0xff);
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- /* Sanity check that the VIF refers to a sensible interface */
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES)
- {
- unifi_error(priv, "%s: MA-PACKET indication with bad interfaceTag %d\n", __FUNCTION__, interfaceTag);
- unifi_net_data_free(priv,&bulkdata->d[0]);
- return;
- }
-
- /* Sanity check that the VIF refers to an allocated netdev */
- if (!interfacePriv->netdev_registered)
- {
- unifi_error(priv, "%s: MA-PACKET indication with unallocated interfaceTag %d\n", __FUNCTION__, interfaceTag);
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return;
- }
-
- if (bulkdata->d[0].data_length == 0) {
- unifi_warning(priv, "%s: MA-PACKET indication with zero bulk data\n", __FUNCTION__);
- unifi_net_data_free(priv,&bulkdata->d[0]);
- return;
- }
-
-
- skb = (struct sk_buff*)bulkdata->d[0].os_net_buf_ptr;
- skb->len = bulkdata->d[0].data_length;
-
- /* Point to the addresses */
- toDs = (skb->data[1] & 0x01) ? 1 : 0;
- fromDs = (skb->data[1] & 0x02) ? 1 : 0;
-
- memcpy(da,(skb->data+4+toDs*12),ETH_ALEN);/* Address1 or 3 */
- memcpy(sa,(skb->data+10+fromDs*(6+toDs*8)),ETH_ALEN); /* Address2, 3 or 4 */
-
-
- pData = &bulkdata->d[0];
- frameControl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(pData->os_data_ptr);
- frameType = ((frameControl & 0x000C) >> 2);
-
- dataFrameType =((frameControl & 0x00f0) >> 4);
- unifi_trace(priv, UDBG6,
- "%s: Receive Data Frame Type %d \n", __FUNCTION__,dataFrameType);
-
- switch(dataFrameType)
- {
- case QOS_DATA:
- case QOS_DATA_NULL:
- /* If both are set then the Address4 exists (only for AP) */
- if (fromDs && toDs)
- {
- /* 6 is the size of Address4 field */
- macHeaderLengthInBytes += (QOS_CONTROL_HEADER_SIZE + 6);
- }
- else
- {
- macHeaderLengthInBytes += QOS_CONTROL_HEADER_SIZE;
- }
-
- /* If order bit set then HT control field is the part of MAC header */
- if (frameControl & FRAME_CONTROL_ORDER_BIT)
- macHeaderLengthInBytes += HT_CONTROL_HEADER_SIZE;
- break;
- default:
- if (fromDs && toDs)
- macHeaderLengthInBytes += 6;
- }
-
- /* Prepare the ethernet header from snap header of skb data */
- switch(dataFrameType)
- {
- case DATA_NULL:
- case QOS_DATA_NULL:
- /* This is for only queue info fetching, EAPOL wont come as
- * null data so the proto is initialized as zero
- */
- proto = 0x0;
- break;
- default:
- {
- llc_snap_hdr_t *snap;
- /* Fetch a snap header to find protocol (for IPV4/IPV6 packets
- * the snap header fetching offset is same)
- */
- snap = (llc_snap_hdr_t *) (skb->data + macHeaderLengthInBytes);
-
- /* prepare the ethernet header from the snap header & addresses */
- ehdr.h_proto = snap->protocol;
- memcpy(ehdr.h_dest, da, ETH_ALEN);
- memcpy(ehdr.h_source, sa, ETH_ALEN);
- }
- proto = ntohs(ehdr.h_proto);
- }
- unifi_trace(priv, UDBG3, "in unifi_rx protocol from snap header = 0x%x\n", proto);
-
- if ((proto != ETH_P_PAE)
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- && (proto != ETH_P_WAI)
-#endif
- ) {
- queue = UF_CONTROLLED_PORT_Q;
- } else {
- queue = UF_UNCONTROLLED_PORT_Q;
- }
-
- port_action = verify_port(priv, (unsigned char*)sa, queue, interfaceTag);
- unifi_trace(priv, UDBG3, "in unifi_rx port action is = 0x%x & queue = %x\n", port_action, queue);
-
-#ifdef CSR_SUPPORT_SME
- /* Notify the TA module for the Rx frame for non P2PGO and AP cases*/
- if((interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_AP) &&
- (interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_P2PGO))
- {
- /* Remove MAC header of length(macHeaderLengthInBytes) before sampling */
- skb_pull(skb, macHeaderLengthInBytes);
- pData->os_data_ptr = skb->data;
- pData->data_length -= macHeaderLengthInBytes;
-
- if (pData->data_length) {
- unifi_ta_sample(priv->card, CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_RX,
- &bulkdata->d[0],
- sa, priv->netdev[interfaceTag]->dev_addr,
- jiffies_to_msecs(jiffies),
- pkt_ind->ReceivedRate);
- }
- } else {
-
- /* AP/P2PGO specific handling here */
- CsrWifiRouterCtrlStaInfo_t * srcStaInfo =
- CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv,sa,interfaceTag);
-
- /* Defensive check only; Source address is already checked in
- process_ma_packet_ind and we should have a valid source address here */
-
- if(srcStaInfo == NULL) {
- CsrWifiMacAddress peerMacAddress;
- /* Unknown data PDU */
- memcpy(peerMacAddress.a,sa,ETH_ALEN);
- unifi_trace(priv, UDBG1, "%s: Unexpected frame from peer = %x:%x:%x:%x:%x:%x\n", __FUNCTION__,
- sa[0], sa[1],sa[2], sa[3], sa[4],sa[5]);
- CsrWifiRouterCtrlUnexpectedFrameIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,interfaceTag,peerMacAddress);
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return;
- }
-
- /* For AP GO mode, don't store the PDUs */
- if (port_action != CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN) {
- /* Drop the packet and return */
- CsrWifiMacAddress peerMacAddress;
- memcpy(peerMacAddress.a,sa,ETH_ALEN);
- unifi_trace(priv, UDBG3, "%s: Port is not open: unexpected frame from peer = %x:%x:%x:%x:%x:%x\n",
- __FUNCTION__, sa[0], sa[1],sa[2], sa[3], sa[4],sa[5]);
-
- CsrWifiRouterCtrlUnexpectedFrameIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,interfaceTag,peerMacAddress);
- interfacePriv->stats.rx_dropped++;
- unifi_net_data_free(priv, &bulkdata->d[0]);
- unifi_notice(priv, "%s: Dropping packet, proto=0x%04x, %s port\n", __FUNCTION__,
- proto, queue ? "Controlled" : "Un-controlled");
- return;
- }
-
- /* Qos NULL/Data NULL are freed here and not processed further */
- if((dataFrameType == QOS_DATA_NULL) || (dataFrameType == DATA_NULL)){
- unifi_trace(priv, UDBG5, "%s: Null Frame Received and Freed\n", __FUNCTION__);
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return;
- }
-
- /* Now we have done with MAC header so proceed with the real data part*/
- /* This function takes care of appropriate routing for AP/P2PGO case*/
- /* the function hadnles following things
- 2. Routing the PDU to appropriate location
- 3. Error case handling
- */
- if(!(uf_ap_process_data_pdu(priv, skb, &ehdr, srcStaInfo,
- signal,
- bulkdata,
- macHeaderLengthInBytes)))
- {
- return;
- }
- unifi_trace(priv, UDBG5, "unifi_rx: no specific AP handling process as normal frame, MAC Header len %d\n",macHeaderLengthInBytes);
- /* Remove the MAC header for subsequent conversion */
- skb_pull(skb, macHeaderLengthInBytes);
- pData->os_data_ptr = skb->data;
- pData->data_length -= macHeaderLengthInBytes;
- pData->os_net_buf_ptr = (unsigned char*)skb;
- pData->net_buf_length = skb->len;
- }
-#endif /* CSR_SUPPORT_SME */
-
-
- /* Now that the MAC header is removed, null-data frames have zero length
- * and can be dropped
- */
- if (pData->data_length == 0) {
- if (((frameControl & 0x00f0) >> 4) != QOS_DATA_NULL &&
- ((frameControl & 0x00f0) >> 4) != DATA_NULL) {
- unifi_trace(priv, UDBG1, "Zero length frame, but not null-data %04x\n", frameControl);
- }
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return;
- }
-
- if (port_action == CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD) {
- /* Drop the packet and return */
- interfacePriv->stats.rx_dropped++;
- unifi_net_data_free(priv, &bulkdata->d[0]);
- unifi_notice(priv, "%s: Dropping packet, proto=0x%04x, %s port\n",
- __FUNCTION__, proto, queue ? "controlled" : "uncontrolled");
- return;
- } else if ( (port_action == CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_BLOCK) ||
- (interfacePriv->connected != UnifiConnected) ) {
-
- /* Buffer the packet into the Rx queues */
- rx_buffered_packets_t *rx_q_item;
- struct list_head *rx_list;
-
- rx_q_item = kmalloc(sizeof(rx_buffered_packets_t),
- GFP_KERNEL);
- if (rx_q_item == NULL) {
- unifi_error(priv, "%s: Failed to allocate %d bytes for rx packet record\n",
- __FUNCTION__, sizeof(rx_buffered_packets_t));
- interfacePriv->stats.rx_dropped++;
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return;
- }
-
- INIT_LIST_HEAD(&rx_q_item->q);
- rx_q_item->bulkdata = *bulkdata;
- rx_q_item->skb = skb;
- rx_q_item->signal = *signal;
- memcpy(rx_q_item->sa.a, sa, ETH_ALEN);
- memcpy(rx_q_item->da.a, da, ETH_ALEN);
- unifi_trace(priv, UDBG2, "%s: Blocked skb=%p, bulkdata=%p\n",
- __FUNCTION__, rx_q_item->skb, &rx_q_item->bulkdata);
-
- if (queue == UF_CONTROLLED_PORT_Q) {
- rx_list = &interfacePriv->rx_controlled_list;
- } else {
- rx_list = &interfacePriv->rx_uncontrolled_list;
- }
-
- /* Add to tail of packets queue */
- down(&priv->rx_q_sem);
- list_add_tail(&rx_q_item->q, rx_list);
- up(&priv->rx_q_sem);
-
- return;
-
- }
-
- indicate_rx_skb(priv, interfaceTag, da, sa, skb, signal, bulkdata);
-
-} /* unifi_rx() */
-
-static void process_ma_packet_cfm(unifi_priv_t *priv, CSR_SIGNAL *signal, bulk_data_param_t *bulkdata)
-{
- u16 interfaceTag;
- const CSR_MA_PACKET_CONFIRM *pkt_cfm = &signal->u.MaPacketConfirm;
- netInterface_priv_t *interfacePriv;
-
- interfaceTag = (pkt_cfm->VirtualInterfaceIdentifier & 0xff);
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- /* Sanity check that the VIF refers to a sensible interface */
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES)
- {
- unifi_error(priv, "%s: MA-PACKET confirm with bad interfaceTag %d\n", __FUNCTION__, interfaceTag);
- return;
- }
-#ifdef CSR_SUPPORT_SME
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
-
- uf_process_ma_pkt_cfm_for_ap(priv,interfaceTag,pkt_cfm);
- } else if (interfacePriv->m4_sent && (pkt_cfm->HostTag == interfacePriv->m4_hostTag)) {
- /* Check if this is a confirm for EAPOL M4 frame and we need to send transmistted ind*/
- CsrResult result = pkt_cfm->TransmissionStatus == CSR_TX_SUCCESSFUL?CSR_RESULT_SUCCESS:CSR_RESULT_FAILURE;
- CsrWifiMacAddress peerMacAddress;
- memcpy(peerMacAddress.a, interfacePriv->m4_signal.u.MaPacketRequest.Ra.x, ETH_ALEN);
-
- unifi_trace(priv, UDBG1, "%s: Sending M4 Transmit CFM\n", __FUNCTION__);
- CsrWifiRouterCtrlM4TransmittedIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0,
- interfaceTag,
- peerMacAddress,
- result);
- interfacePriv->m4_sent = FALSE;
- interfacePriv->m4_hostTag = 0xffffffff;
- }
-#endif
- return;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_rx
- *
- * Reformat a UniFi data received packet into a p80211 packet and
- * pass it up the protocol stack.
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void process_ma_packet_ind(unifi_priv_t *priv, CSR_SIGNAL *signal, bulk_data_param_t *bulkdata)
-{
- u16 interfaceTag;
- bulk_data_desc_t *pData;
- CSR_MA_PACKET_INDICATION *pkt_ind = (CSR_MA_PACKET_INDICATION*)&signal->u.MaPacketIndication;
- struct sk_buff *skb;
- u16 frameControl;
- netInterface_priv_t *interfacePriv;
- u8 da[ETH_ALEN], sa[ETH_ALEN];
- u8 *bssid = NULL, *ba_addr = NULL;
- u8 toDs, fromDs, frameType;
- u8 i =0;
-
-#ifdef CSR_SUPPORT_SME
- u8 dataFrameType = 0;
- u8 powerSaveChanged = FALSE;
- u8 pmBit = 0;
- CsrWifiRouterCtrlStaInfo_t *srcStaInfo = NULL;
- u16 qosControl;
-
-#endif
-
- interfaceTag = (pkt_ind->VirtualInterfaceIdentifier & 0xff);
- interfacePriv = priv->interfacePriv[interfaceTag];
-
-
- /* Sanity check that the VIF refers to a sensible interface */
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES)
- {
- unifi_error(priv, "%s: MA-PACKET indication with bad interfaceTag %d\n", __FUNCTION__, interfaceTag);
- unifi_net_data_free(priv,&bulkdata->d[0]);
- return;
- }
-
- /* Sanity check that the VIF refers to an allocated netdev */
- if (!interfacePriv->netdev_registered)
- {
- unifi_error(priv, "%s: MA-PACKET indication with unallocated interfaceTag %d\n", __FUNCTION__, interfaceTag);
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return;
- }
-
- if (bulkdata->d[0].data_length == 0) {
- unifi_warning(priv, "%s: MA-PACKET indication with zero bulk data\n", __FUNCTION__);
- unifi_net_data_free(priv,&bulkdata->d[0]);
- return;
- }
- /* For monitor mode we need to pass this indication to the registered application
- handle this separately*/
- /* MIC failure is already taken care of so no need to send the PDUs which are not successfully received in non-monitor mode*/
- if(pkt_ind->ReceptionStatus != CSR_RX_SUCCESS)
- {
- unifi_warning(priv, "%s: MA-PACKET indication with status = %d\n",__FUNCTION__, pkt_ind->ReceptionStatus);
- unifi_net_data_free(priv,&bulkdata->d[0]);
- return;
- }
-
-
- skb = (struct sk_buff*)bulkdata->d[0].os_net_buf_ptr;
- skb->len = bulkdata->d[0].data_length;
-
- /* Point to the addresses */
- toDs = (skb->data[1] & 0x01) ? 1 : 0;
- fromDs = (skb->data[1] & 0x02) ? 1 : 0;
-
- memcpy(da,(skb->data+4+toDs*12),ETH_ALEN);/* Address1 or 3 */
- memcpy(sa,(skb->data+10+fromDs*(6+toDs*8)),ETH_ALEN); /* Address2, 3 or 4 */
-
- /* Find the BSSID, which will be used to match the BA session */
- if (toDs && fromDs)
- {
- unifi_trace(priv, UDBG6, "4 address frame - don't try to find BSSID\n");
- bssid = NULL;
- }
- else
- {
- bssid = (u8 *) (skb->data + 4 + 12 - (fromDs * 6) - (toDs * 12));
- }
-
- pData = &bulkdata->d[0];
- frameControl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(pData->os_data_ptr);
- frameType = ((frameControl & 0x000C) >> 2);
-
- unifi_trace(priv, UDBG3, "Rx Frame Type: %d sn: %d\n",frameType,
- (le16_to_cpu(*((u16*)(bulkdata->d[0].os_data_ptr + IEEE802_11_SEQUENCE_CONTROL_OFFSET))) >> 4) & 0xfff);
- if(frameType == IEEE802_11_FRAMETYPE_CONTROL){
-#ifdef CSR_SUPPORT_SME
- unifi_trace(priv, UDBG6, "%s: Received Control Frame\n", __FUNCTION__);
-
- if((frameControl & 0x00f0) == 0x00A0){
- /* This is a PS-POLL request */
- u8 pmBit = (frameControl & 0x1000)?0x01:0x00;
- unifi_trace(priv, UDBG6, "%s: Received PS-POLL Frame\n", __FUNCTION__);
-
- uf_process_ps_poll(priv,sa,da,pmBit,interfaceTag);
- }
- else {
- unifi_warning(priv, "%s: Non PS-POLL control frame is received\n", __FUNCTION__);
- }
-#endif
- unifi_net_data_free(priv,&bulkdata->d[0]);
- return;
- }
- if(frameType != IEEE802_11_FRAMETYPE_DATA) {
- unifi_warning(priv, "%s: Non control Non Data frame is received\n",__FUNCTION__);
- unifi_net_data_free(priv,&bulkdata->d[0]);
- return;
- }
-
-#ifdef CSR_SUPPORT_SME
- if((interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP) ||
- (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO)){
-
- srcStaInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv,sa,interfaceTag);
-
- if(srcStaInfo == NULL) {
- CsrWifiMacAddress peerMacAddress;
- /* Unknown data PDU */
- memcpy(peerMacAddress.a,sa,ETH_ALEN);
- unifi_trace(priv, UDBG1, "%s: Unexpected frame from peer = %x:%x:%x:%x:%x:%x\n", __FUNCTION__,
- sa[0], sa[1],sa[2], sa[3], sa[4],sa[5]);
- CsrWifiRouterCtrlUnexpectedFrameIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,interfaceTag,peerMacAddress);
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return;
- }
-
- /*
- verify power management bit here so as to ensure host and unifi are always
- in sync with power management status of peer.
-
- If we do it later, it may so happen we have stored the frame in BA re-ordering
- buffer and hence host and unifi are out of sync for power management status
- */
-
- pmBit = (frameControl & 0x1000)?0x01:0x00;
- powerSaveChanged = uf_process_pm_bit_for_peer(priv,srcStaInfo,pmBit,interfaceTag);
-
- /* Update station last activity time */
- srcStaInfo->activity_flag = TRUE;
-
- /* For Qos Frame if PM bit is toggled to indicate the change in power save state then it shall not be
- considered as Trigger Frame. Enter only if WMM STA and peer is in Power save */
-
- dataFrameType = ((frameControl & 0x00f0) >> 4);
-
- if((powerSaveChanged == FALSE)&&(srcStaInfo->wmmOrQosEnabled == TRUE)&&
- (srcStaInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE)){
-
- if((dataFrameType == QOS_DATA) || (dataFrameType == QOS_DATA_NULL)){
-
- /*
- * QoS control field is offset from frame control by 2 (frame control)
- * + 2 (duration/ID) + 2 (sequence control) + 3*ETH_ALEN or 4*ETH_ALEN
- */
- if((frameControl & IEEE802_11_FC_TO_DS_MASK) && (frameControl & IEEE802_11_FC_FROM_DS_MASK)){
- qosControl= CSR_GET_UINT16_FROM_LITTLE_ENDIAN(pData->os_data_ptr + 30);
- }
- else{
- qosControl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(pData->os_data_ptr + 24);
- }
- unifi_trace(priv, UDBG5, "%s: Check if U-APSD operations are triggered for qosControl: 0x%x\n",__FUNCTION__,qosControl);
- uf_process_wmm_deliver_ac_uapsd(priv,srcStaInfo,qosControl,interfaceTag);
- }
- }
- }
-
-#endif
-
- if( ((frameControl & 0x00f0) >> 4) == QOS_DATA) {
- u8 *qos_control_ptr = (u8*)bulkdata->d[0].os_data_ptr + (((frameControl & IEEE802_11_FC_TO_DS_MASK) && (frameControl & IEEE802_11_FC_FROM_DS_MASK))?30: 24);
- int tID = *qos_control_ptr & IEEE802_11_QC_TID_MASK; /* using ls octet of qos control */
- ba_session_rx_struct *ba_session;
- u8 ba_session_idx = 0;
- /* Get the BA originator address */
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO){
- ba_addr = sa;
- }else{
- ba_addr = bssid;
- }
-
- down(&priv->ba_mutex);
- for (ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_RX; ba_session_idx++){
- ba_session = interfacePriv->ba_session_rx[ba_session_idx];
- if (ba_session){
- unifi_trace(priv, UDBG6, "found ba_session=0x%x ba_session_idx=%d", ba_session, ba_session_idx);
- if ((!memcmp(ba_session->macAddress.a, ba_addr, ETH_ALEN)) && (ba_session->tID == tID)){
- frame_desc_struct frame_desc;
- frame_desc.bulkdata = *bulkdata;
- frame_desc.signal = *signal;
- frame_desc.sn = (le16_to_cpu(*((u16*)(bulkdata->d[0].os_data_ptr + IEEE802_11_SEQUENCE_CONTROL_OFFSET))) >> 4) & 0xfff;
- frame_desc.active = TRUE;
- unifi_trace(priv, UDBG6, "%s: calling process_ba_frame (session=%d)\n", __FUNCTION__, ba_session_idx);
- process_ba_frame(priv, interfacePriv, ba_session, &frame_desc);
- up(&priv->ba_mutex);
- process_ba_complete(priv, interfacePriv);
- break;
- }
- }
- }
- if (ba_session_idx == MAX_SUPPORTED_BA_SESSIONS_RX){
- up(&priv->ba_mutex);
- unifi_trace(priv, UDBG6, "%s: calling process_amsdu()", __FUNCTION__);
- process_amsdu(priv, signal, bulkdata);
- }
- } else {
- unifi_trace(priv, UDBG6, "calling unifi_rx()");
- unifi_rx(priv, signal, bulkdata);
- }
-
- /* check if the frames in reorder buffer has aged, the check
- * is done after receive processing so that if the missing frame
- * has arrived in this receive process, then it is handled cleanly.
- *
- * And also this code here takes care that timeout check is made for all
- * the receive indications
- */
- down(&priv->ba_mutex);
- for (i=0; i < MAX_SUPPORTED_BA_SESSIONS_RX; i++){
- ba_session_rx_struct *ba_session;
- ba_session = interfacePriv->ba_session_rx[i];
- if (ba_session){
- check_ba_frame_age_timeout(priv, interfacePriv, ba_session);
- }
- }
- up(&priv->ba_mutex);
- process_ba_complete(priv, interfacePriv);
-
-}
-/*
- * ---------------------------------------------------------------------------
- * uf_set_multicast_list
- *
- * This function is called by the higher level stack to set
- * a list of multicast rx addresses.
- *
- * Arguments:
- * dev Network Device pointer.
- *
- * Returns:
- * None.
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-
-static void
-uf_set_multicast_list(struct net_device *dev)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
-#ifdef CSR_NATIVE_LINUX
- unifi_trace(priv, UDBG3, "uf_set_multicast_list unsupported\n");
- return;
-#else
-
- u8 *mc_list = interfacePriv->mc_list;
- struct netdev_hw_addr *mc_addr;
- int mc_addr_count;
-
- if (priv->init_progress != UNIFI_INIT_COMPLETED) {
- return;
- }
-
- mc_addr_count = netdev_mc_count(dev);
-
- unifi_trace(priv, UDBG3,
- "uf_set_multicast_list (count=%d)\n", mc_addr_count);
-
-
- /* Not enough space? */
- if (mc_addr_count > UNIFI_MAX_MULTICAST_ADDRESSES) {
- return;
- }
-
- /* Store the list to be processed by the work item. */
- interfacePriv->mc_list_count = mc_addr_count;
- netdev_hw_addr_list_for_each(mc_addr, &dev->mc) {
- memcpy(mc_list, mc_addr->addr, ETH_ALEN);
- mc_list += ETH_ALEN;
- }
-
- /* Send a message to the workqueue */
- queue_work(priv->unifi_workqueue, &priv->multicast_list_task);
-#endif
-
-} /* uf_set_multicast_list() */
-
-/*
- * ---------------------------------------------------------------------------
- * netdev_mlme_event_handler
- *
- * Callback function to be used as the udi_event_callback when registering
- * as a netdev client.
- * To use it, a client specifies this function as the udi_event_callback
- * to ul_register_client(). The signal dispatcher in
- * unifi_receive_event() will call this function to deliver a signal.
- *
- * Arguments:
- * pcli Pointer to the client instance.
- * signal Pointer to the received signal.
- * signal_len Size of the signal structure in bytes.
- * bulkdata Pointer to structure containing any associated bulk data.
- * dir Direction of the signal. Zero means from host,
- * non-zero means to host.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void
-netdev_mlme_event_handler(ul_client_t *pcli, const u8 *sig_packed, int sig_len,
- const bulk_data_param_t *bulkdata_o, int dir)
-{
- CSR_SIGNAL signal;
- unifi_priv_t *priv = uf_find_instance(pcli->instance);
- int id, r;
- bulk_data_param_t bulkdata;
-
- /* Just a sanity check */
- if (sig_packed == NULL) {
- return;
- }
-
- /*
- * This copy is to silence a compiler warning about discarding the
- * const qualifier.
- */
- bulkdata = *bulkdata_o;
-
- /* Get the unpacked signal */
- r = read_unpack_signal(sig_packed, &signal);
- if (r) {
- /*
- * The CSR_MLME_CONNECTED_INDICATION_ID has a receiverID=0 so will
- * fall through this case. It is safe to ignore this signal.
- */
- unifi_trace(priv, UDBG1,
- "Netdev - Received unknown signal 0x%.4X.\n",
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sig_packed));
- return;
- }
-
- id = signal.SignalPrimitiveHeader.SignalId;
- unifi_trace(priv, UDBG3, "Netdev - Process signal 0x%.4X\n", id);
-
- /*
- * Take the appropriate action for the signal.
- */
- switch (id) {
- case CSR_MA_PACKET_ERROR_INDICATION_ID:
- process_ma_packet_error_ind(priv, &signal, &bulkdata);
- break;
- case CSR_MA_PACKET_INDICATION_ID:
- process_ma_packet_ind(priv, &signal, &bulkdata);
- break;
- case CSR_MA_PACKET_CONFIRM_ID:
- process_ma_packet_cfm(priv, &signal, &bulkdata);
- break;
-#ifdef CSR_SUPPORT_SME
- case CSR_MLME_SET_TIM_CONFIRM_ID:
- /* Handle TIM confirms from FW & set the station record's TIM state appropriately,
- * In case of failures, tries with max_retransmit limit
- */
- uf_handle_tim_cfm(priv, &signal.u.MlmeSetTimConfirm, signal.SignalPrimitiveHeader.ReceiverProcessId);
- break;
-#endif
- case CSR_DEBUG_STRING_INDICATION_ID:
- debug_string_indication(priv, bulkdata.d[0].os_data_ptr, bulkdata.d[0].data_length);
- break;
-
- case CSR_DEBUG_WORD16_INDICATION_ID:
- debug_word16_indication(priv, &signal);
- break;
-
- case CSR_DEBUG_GENERIC_CONFIRM_ID:
- case CSR_DEBUG_GENERIC_INDICATION_ID:
- debug_generic_indication(priv, &signal);
- break;
- default:
- break;
- }
-
-} /* netdev_mlme_event_handler() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_net_get_name
- *
- * Retrieve the name (e.g. eth1) associated with this network device
- *
- * Arguments:
- * dev Pointer to the network device.
- * name Buffer to write name
- * len Size of buffer in bytes
- *
- * Returns:
- * None
- *
- * Notes:
- * ---------------------------------------------------------------------------
- */
-void uf_net_get_name(struct net_device *dev, char *name, int len)
-{
- *name = '\0';
- if (dev) {
- strlcpy(name, dev->name, (len > IFNAMSIZ) ? IFNAMSIZ : len);
- }
-
-} /* uf_net_get_name */
-
-#ifdef CSR_SUPPORT_WEXT
-
-/*
- * ---------------------------------------------------------------------------
- * uf_netdev_event
- *
- * Callback function to handle netdev state changes
- *
- * Arguments:
- * notif Pointer to a notifier_block.
- * event Event prompting notification
- * ptr net_device pointer
- *
- * Returns:
- * None
- *
- * Notes:
- * The event handler is global, and may occur on non-UniFi netdevs.
- * ---------------------------------------------------------------------------
- */
-static int
-uf_netdev_event(struct notifier_block *notif, unsigned long event, void* ptr) {
- struct net_device *netdev = ptr;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(netdev);
- unifi_priv_t *priv = NULL;
- static const CsrWifiMacAddress broadcast_address = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}};
-
- /* Check that the event is for a UniFi netdev. If it's not, the netdev_priv
- * structure is not safe to use.
- */
- if (uf_find_netdev_priv(interfacePriv) == -1) {
- unifi_trace(NULL, UDBG1, "uf_netdev_event: ignore e=%d, ptr=%p, priv=%p %s\n",
- event, ptr, interfacePriv, netdev->name);
- return 0;
- }
-
- switch(event) {
- case NETDEV_CHANGE:
- priv = interfacePriv->privPtr;
- unifi_trace(priv, UDBG1, "NETDEV_CHANGE: %p %s %s waiting for it\n",
- ptr,
- netdev->name,
- interfacePriv->wait_netdev_change ? "" : "not");
-
- if (interfacePriv->wait_netdev_change) {
- netif_tx_wake_all_queues(priv->netdev[interfacePriv->InterfaceTag]);
- interfacePriv->connected = UnifiConnected;
- interfacePriv->wait_netdev_change = FALSE;
- /* Note: passing the broadcast address here will allow anyone to attempt to join our adhoc network */
- uf_process_rx_pending_queue(priv, UF_UNCONTROLLED_PORT_Q, broadcast_address, 1,interfacePriv->InterfaceTag);
- uf_process_rx_pending_queue(priv, UF_CONTROLLED_PORT_Q, broadcast_address, 1,interfacePriv->InterfaceTag);
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-
-static struct notifier_block uf_netdev_notifier = {
- .notifier_call = uf_netdev_event,
-};
-#endif /* CSR_SUPPORT_WEXT */
-
-
-static void
- process_amsdu(unifi_priv_t *priv, CSR_SIGNAL *signal, bulk_data_param_t *bulkdata)
-{
- u32 offset;
- u32 length = bulkdata->d[0].data_length;
- u32 subframe_length, subframe_body_length, dot11_hdr_size;
- u8 *ptr;
- bulk_data_param_t subframe_bulkdata;
- u8 *dot11_hdr_ptr = (u8*)bulkdata->d[0].os_data_ptr;
- CsrResult csrResult;
- u16 frameControl;
- u8 *qos_control_ptr;
-
- frameControl = le16_to_cpu(*((u16*)dot11_hdr_ptr));
- qos_control_ptr = dot11_hdr_ptr + (((frameControl & IEEE802_11_FC_TO_DS_MASK) && (frameControl & IEEE802_11_FC_FROM_DS_MASK))?30: 24);
- if(!(*qos_control_ptr & IEEE802_11_QC_A_MSDU_PRESENT)) {
- unifi_trace(priv, UDBG6, "%s: calling unifi_rx()", __FUNCTION__);
- unifi_rx(priv, signal, bulkdata);
- return;
- }
- *qos_control_ptr &= ~(IEEE802_11_QC_A_MSDU_PRESENT);
-
- ptr = qos_control_ptr + 2;
- offset = dot11_hdr_size = ptr - dot11_hdr_ptr;
-
- while(length > (offset + sizeof(struct ethhdr) + sizeof(llc_snap_hdr_t))) {
- subframe_body_length = ntohs(((struct ethhdr*)ptr)->h_proto);
- if(subframe_body_length > IEEE802_11_MAX_DATA_LEN) {
- unifi_error(priv, "%s: bad subframe_body_length = %d\n", __FUNCTION__, subframe_body_length);
- break;
- }
- subframe_length = sizeof(struct ethhdr) + subframe_body_length;
- memset(&subframe_bulkdata, 0, sizeof(bulk_data_param_t));
-
- csrResult = unifi_net_data_malloc(priv, &subframe_bulkdata.d[0], dot11_hdr_size + subframe_body_length);
-
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "%s: unifi_net_data_malloc failed\n", __FUNCTION__);
- break;
- }
-
- memcpy((u8*)subframe_bulkdata.d[0].os_data_ptr, dot11_hdr_ptr, dot11_hdr_size);
-
-
- /* When to DS=0 and from DS=0, address 3 will already have BSSID so no need to re-program */
- if ((frameControl & IEEE802_11_FC_TO_DS_MASK) && !(frameControl & IEEE802_11_FC_FROM_DS_MASK)){
- memcpy((u8*)subframe_bulkdata.d[0].os_data_ptr + IEEE802_11_ADDR3_OFFSET, ((struct ethhdr*)ptr)->h_dest, ETH_ALEN);
- }
- else if (!(frameControl & IEEE802_11_FC_TO_DS_MASK) && (frameControl & IEEE802_11_FC_FROM_DS_MASK)){
- memcpy((u8*)subframe_bulkdata.d[0].os_data_ptr + IEEE802_11_ADDR3_OFFSET,
- ((struct ethhdr*)ptr)->h_source,
- ETH_ALEN);
- }
-
- memcpy((u8*)subframe_bulkdata.d[0].os_data_ptr + dot11_hdr_size,
- ptr + sizeof(struct ethhdr),
- subframe_body_length);
- unifi_trace(priv, UDBG6, "%s: calling unifi_rx. length = %d subframe_length = %d\n", __FUNCTION__, length, subframe_length);
- unifi_rx(priv, signal, &subframe_bulkdata);
-
- subframe_length = (subframe_length + 3)&(~0x3);
- ptr += subframe_length;
- offset += subframe_length;
- }
- unifi_net_data_free(priv, &bulkdata->d[0]);
-}
-
-
-#define SN_TO_INDEX(__ba_session, __sn) (((__sn - __ba_session->start_sn) & 0xFFF) % __ba_session->wind_size)
-
-
-#define ADVANCE_EXPECTED_SN(__ba_session) \
-{ \
- __ba_session->expected_sn++; \
- __ba_session->expected_sn &= 0xFFF; \
-}
-
-#define FREE_BUFFER_SLOT(__ba_session, __index) \
-{ \
- __ba_session->occupied_slots--; \
- __ba_session->buffer[__index].active = FALSE; \
- ADVANCE_EXPECTED_SN(__ba_session); \
-}
-
-static void add_frame_to_ba_complete(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- frame_desc_struct *frame_desc)
-{
- interfacePriv->ba_complete[interfacePriv->ba_complete_index] = *frame_desc;
- interfacePriv->ba_complete_index++;
-}
-
-
-static void update_expected_sn(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- ba_session_rx_struct *ba_session,
- u16 sn)
-{
- int i, j;
- u16 gap;
-
- gap = (sn - ba_session->expected_sn) & 0xFFF;
- unifi_trace(priv, UDBG6, "%s: process the frames up to new_expected_sn = %d gap = %d\n", __FUNCTION__, sn, gap);
- for(j = 0; j < gap && j < ba_session->wind_size; j++) {
- i = SN_TO_INDEX(ba_session, ba_session->expected_sn);
- unifi_trace(priv, UDBG6, "%s: process the slot index = %d\n", __FUNCTION__, i);
- if(ba_session->buffer[i].active) {
- add_frame_to_ba_complete(priv, interfacePriv, &ba_session->buffer[i]);
- unifi_trace(priv, UDBG6, "%s: process the frame at index = %d expected_sn = %d\n", __FUNCTION__, i, ba_session->expected_sn);
- FREE_BUFFER_SLOT(ba_session, i);
- } else {
- unifi_trace(priv, UDBG6, "%s: empty slot at index = %d\n", __FUNCTION__, i);
- ADVANCE_EXPECTED_SN(ba_session);
- }
- }
- ba_session->expected_sn = sn;
-}
-
-
-static void complete_ready_sequence(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- ba_session_rx_struct *ba_session)
-{
- int i;
-
- i = SN_TO_INDEX(ba_session, ba_session->expected_sn);
- while (ba_session->buffer[i].active) {
- add_frame_to_ba_complete(priv, interfacePriv, &ba_session->buffer[i]);
- unifi_trace(priv, UDBG6, "%s: completed stored frame(expected_sn=%d) at i = %d\n", __FUNCTION__, ba_session->expected_sn, i);
- FREE_BUFFER_SLOT(ba_session, i);
- i = SN_TO_INDEX(ba_session, ba_session->expected_sn);
- }
-}
-
-
-void scroll_ba_window(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- ba_session_rx_struct *ba_session,
- u16 sn)
-{
- if(((sn - ba_session->expected_sn) & 0xFFF) <= 2048) {
- update_expected_sn(priv, interfacePriv, ba_session, sn);
- complete_ready_sequence(priv, interfacePriv, ba_session);
- }
-}
-
-
-static int consume_frame_or_get_buffer_index(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- ba_session_rx_struct *ba_session,
- u16 sn,
- frame_desc_struct *frame_desc) {
- int i;
- u16 sn_temp;
-
- if(((sn - ba_session->expected_sn) & 0xFFF) <= 2048) {
-
- /* once we are in BA window, set the flag for BA trigger */
- if(!ba_session->trigger_ba_after_ssn){
- ba_session->trigger_ba_after_ssn = TRUE;
- }
-
- sn_temp = ba_session->expected_sn + ba_session->wind_size;
- unifi_trace(priv, UDBG6, "%s: new frame: sn=%d\n", __FUNCTION__, sn);
- if(!(((sn - sn_temp) & 0xFFF) > 2048)) {
- u16 new_expected_sn;
- unifi_trace(priv, UDBG6, "%s: frame is out of window\n", __FUNCTION__);
- sn_temp = (sn - ba_session->wind_size) & 0xFFF;
- new_expected_sn = (sn_temp + 1) & 0xFFF;
- update_expected_sn(priv, interfacePriv, ba_session, new_expected_sn);
- }
- i = -1;
- if (sn == ba_session->expected_sn) {
- unifi_trace(priv, UDBG6, "%s: sn = ba_session->expected_sn = %d\n", __FUNCTION__, sn);
- ADVANCE_EXPECTED_SN(ba_session);
- add_frame_to_ba_complete(priv, interfacePriv, frame_desc);
- } else {
- i = SN_TO_INDEX(ba_session, sn);
- unifi_trace(priv, UDBG6, "%s: sn(%d) != ba_session->expected_sn(%d), i = %d\n", __FUNCTION__, sn, ba_session->expected_sn, i);
- if (ba_session->buffer[i].active) {
- unifi_trace(priv, UDBG6, "%s: free frame at i = %d\n", __FUNCTION__, i);
- i = -1;
- unifi_net_data_free(priv, &frame_desc->bulkdata.d[0]);
- }
- }
- } else {
- i = -1;
- if(!ba_session->trigger_ba_after_ssn){
- unifi_trace(priv, UDBG6, "%s: frame before ssn, pass it up: sn=%d\n", __FUNCTION__, sn);
- add_frame_to_ba_complete(priv, interfacePriv, frame_desc);
- }else{
- unifi_trace(priv, UDBG6, "%s: old frame, drop: sn=%d, expected_sn=%d\n", __FUNCTION__, sn, ba_session->expected_sn);
- unifi_net_data_free(priv, &frame_desc->bulkdata.d[0]);
- }
- }
- return i;
-}
-
-
-
-static void process_ba_frame(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- ba_session_rx_struct *ba_session,
- frame_desc_struct *frame_desc)
-{
- int i;
- u16 sn = frame_desc->sn;
-
- if (ba_session->timeout) {
- mod_timer(&ba_session->timer, (jiffies + usecs_to_jiffies((ba_session->timeout) * 1024)));
- }
- unifi_trace(priv, UDBG6, "%s: got frame(sn=%d)\n", __FUNCTION__, sn);
-
- i = consume_frame_or_get_buffer_index(priv, interfacePriv, ba_session, sn, frame_desc);
- if(i >= 0) {
- unifi_trace(priv, UDBG6, "%s: store frame(sn=%d) at i = %d\n", __FUNCTION__, sn, i);
- ba_session->buffer[i] = *frame_desc;
- ba_session->buffer[i].recv_time = CsrTimeGet(NULL);
- ba_session->occupied_slots++;
- } else {
- unifi_trace(priv, UDBG6, "%s: frame consumed - sn = %d\n", __FUNCTION__, sn);
- }
- complete_ready_sequence(priv, interfacePriv, ba_session);
-}
-
-
-static void process_ba_complete(unifi_priv_t *priv, netInterface_priv_t *interfacePriv)
-{
- frame_desc_struct *frame_desc;
- u8 i;
-
- for(i = 0; i < interfacePriv->ba_complete_index; i++) {
- frame_desc = &interfacePriv->ba_complete[i];
- unifi_trace(priv, UDBG6, "%s: calling process_amsdu()\n", __FUNCTION__);
- process_amsdu(priv, &frame_desc->signal, &frame_desc->bulkdata);
- }
- interfacePriv->ba_complete_index = 0;
-
-}
-
-
-/* Check if the frames in BA reoder buffer has aged and
- * if so release the frames to upper processes and move
- * the window
- */
-static void check_ba_frame_age_timeout( unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- ba_session_rx_struct *ba_session)
-{
- u32 now;
- u32 age;
- u8 i, j;
- u16 sn_temp;
-
- /* gap is started at 1 because we have buffered frames and
- * hence a minimum gap of 1 exists
- */
- u8 gap=1;
-
- now = CsrTimeGet(NULL);
-
- if (ba_session->occupied_slots)
- {
- /* expected sequence has not arrived so start searching from next
- * sequence number until a frame is available and determine the gap.
- * Check if the frame available has timedout, if so advance the
- * expected sequence number and release the frames
- */
- sn_temp = (ba_session->expected_sn + 1) & 0xFFF;
-
- for(j = 0; j < ba_session->wind_size; j++)
- {
- i = SN_TO_INDEX(ba_session, sn_temp);
-
- if(ba_session->buffer[i].active)
- {
- unifi_trace(priv, UDBG6, "check age at slot index = %d sn = %d recv_time = %u now = %u\n",
- i,
- ba_session->buffer[i].sn,
- ba_session->buffer[i].recv_time,
- now);
-
- if (ba_session->buffer[i].recv_time > now)
- {
- /* timer wrap */
- age = CsrTimeAdd((u32)CsrTimeSub(CSR_SCHED_TIME_MAX, ba_session->buffer[i].recv_time), now);
- }
- else
- {
- age = (u32)CsrTimeSub(now, ba_session->buffer[i].recv_time);
- }
-
- if (age >= CSR_WIFI_BA_MPDU_FRAME_AGE_TIMEOUT)
- {
- unifi_trace(priv, UDBG2, "release the frame at index = %d gap = %d expected_sn = %d sn = %d\n",
- i,
- gap,
- ba_session->expected_sn,
- ba_session->buffer[i].sn);
-
- /* if it has timedout don't wait for missing frames, move the window */
- while (gap--)
- {
- ADVANCE_EXPECTED_SN(ba_session);
- }
- add_frame_to_ba_complete(priv, interfacePriv, &ba_session->buffer[i]);
- FREE_BUFFER_SLOT(ba_session, i);
- complete_ready_sequence(priv, interfacePriv, ba_session);
- }
- break;
-
- }
- else
- {
- /* advance temp sequence number and frame gap */
- sn_temp = (sn_temp + 1) & 0xFFF;
- gap++;
- }
- }
- }
-}
-
-
-static void process_ma_packet_error_ind(unifi_priv_t *priv, CSR_SIGNAL *signal, bulk_data_param_t *bulkdata)
-{
- u16 interfaceTag;
- const CSR_MA_PACKET_ERROR_INDICATION *pkt_err_ind = &signal->u.MaPacketErrorIndication;
- netInterface_priv_t *interfacePriv;
- ba_session_rx_struct *ba_session;
- u8 ba_session_idx = 0;
- CSR_PRIORITY UserPriority;
- CSR_SEQUENCE_NUMBER sn;
-
- interfaceTag = (pkt_err_ind->VirtualInterfaceIdentifier & 0xff);
-
-
- /* Sanity check that the VIF refers to a sensible interface */
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES)
- {
- unifi_error(priv, "%s: MaPacketErrorIndication indication with bad interfaceTag %d\n", __FUNCTION__, interfaceTag);
- return;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
- UserPriority = pkt_err_ind->UserPriority;
- if(UserPriority > 15) {
- unifi_error(priv, "%s: MaPacketErrorIndication indication with bad UserPriority=%d\n", __FUNCTION__, UserPriority);
- }
- sn = pkt_err_ind->SequenceNumber;
-
- down(&priv->ba_mutex);
- /* To find the right ba_session loop through the BA sessions, compare MAC address and tID */
- for (ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_RX; ba_session_idx++){
- ba_session = interfacePriv->ba_session_rx[ba_session_idx];
- if (ba_session){
- if ((!memcmp(ba_session->macAddress.a, pkt_err_ind->PeerQstaAddress.x, ETH_ALEN)) && (ba_session->tID == UserPriority)){
- if (ba_session->timeout) {
- mod_timer(&ba_session->timer, (jiffies + usecs_to_jiffies((ba_session->timeout) * 1024)));
- }
- scroll_ba_window(priv, interfacePriv, ba_session, sn);
- break;
- }
- }
- }
-
- up(&priv->ba_mutex);
- process_ba_complete(priv, interfacePriv);
-}
-
-
diff --git a/drivers/staging/csr/os.c b/drivers/staging/csr/os.c
deleted file mode 100644
index 37ec59d..0000000
--- a/drivers/staging/csr/os.c
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: os.c
- *
- * PURPOSE:
- * Routines to fulfil the OS-abstraction for the HIP lib.
- * It is part of the porting exercise.
- *
- * Copyright (C) 2005-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-
-/**
- * The HIP lib OS abstraction consists of the implementation
- * of the functions in this file. It is part of the porting exercise.
- */
-
-#include "unifi_priv.h"
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_net_data_malloc
- *
- * Allocate an OS specific net data buffer of "size" bytes.
- * The bulk_data_slot.os_data_ptr must be initialised to point
- * to the buffer allocated. The bulk_data_slot.length must be
- * initialised to the requested size, zero otherwise.
- * The bulk_data_slot.os_net_buf_ptr can be initialised to
- * an OS specific pointer to be used in the unifi_net_data_free().
- *
- *
- * Arguments:
- * ospriv Pointer to device private context struct.
- * bulk_data_slot Pointer to the bulk data structure to initialise.
- * size Size of the buffer to be allocated.
- *
- * Returns:
- * CSR_RESULT_SUCCESS on success, CSR_RESULT_FAILURE otherwise.
- * ---------------------------------------------------------------------------
- */
-CsrResult
-unifi_net_data_malloc(void *ospriv, bulk_data_desc_t *bulk_data_slot, unsigned int size)
-{
- struct sk_buff *skb;
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
- int rounded_length;
-
- if (priv->card_info.sdio_block_size == 0) {
- unifi_error(priv, "unifi_net_data_malloc: Invalid SDIO block size\n");
- return CSR_RESULT_FAILURE;
- }
-
- rounded_length = (size + priv->card_info.sdio_block_size - 1) & ~(priv->card_info.sdio_block_size - 1);
-
- /*
- * (ETH_HLEN + 2) bytes tailroom for header manipulation
- * CSR_WIFI_ALIGN_BYTES bytes headroom for alignment manipulation
- */
- skb = dev_alloc_skb(rounded_length + 2 + ETH_HLEN + CSR_WIFI_ALIGN_BYTES);
- if (! skb) {
- unifi_error(ospriv, "alloc_skb failed.\n");
- bulk_data_slot->os_net_buf_ptr = NULL;
- bulk_data_slot->net_buf_length = 0;
- bulk_data_slot->os_data_ptr = NULL;
- bulk_data_slot->data_length = 0;
- return CSR_RESULT_FAILURE;
- }
-
- bulk_data_slot->os_net_buf_ptr = (const unsigned char*)skb;
- bulk_data_slot->net_buf_length = rounded_length + 2 + ETH_HLEN + CSR_WIFI_ALIGN_BYTES;
- bulk_data_slot->os_data_ptr = (const void*)skb->data;
- bulk_data_slot->data_length = size;
-
- return CSR_RESULT_SUCCESS;
-} /* unifi_net_data_malloc() */
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_net_data_free
- *
- * Free an OS specific net data buffer.
- * The bulk_data_slot.length must be initialised to 0.
- *
- *
- * Arguments:
- * ospriv Pointer to device private context struct.
- * bulk_data_slot Pointer to the bulk data structure that
- * holds the data to be freed.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-unifi_net_data_free(void *ospriv, bulk_data_desc_t *bulk_data_slot)
-{
- struct sk_buff *skb;
- CSR_UNUSED(ospriv);
-
- skb = (struct sk_buff *)bulk_data_slot->os_net_buf_ptr;
- dev_kfree_skb(skb);
-
- bulk_data_slot->net_buf_length = 0;
- bulk_data_slot->data_length = 0;
- bulk_data_slot->os_data_ptr = bulk_data_slot->os_net_buf_ptr = NULL;
-
-} /* unifi_net_data_free() */
-
-
-/*
-* ---------------------------------------------------------------------------
-* unifi_net_dma_align
-*
-* DMA align an OS specific net data buffer.
-* The buffer must be empty.
-*
-*
-* Arguments:
-* ospriv Pointer to device private context struct.
-* bulk_data_slot Pointer to the bulk data structure that
-* holds the data to be aligned.
-*
-* Returns:
-* None.
-* ---------------------------------------------------------------------------
-*/
-CsrResult
-unifi_net_dma_align(void *ospriv, bulk_data_desc_t *bulk_data_slot)
-{
- struct sk_buff *skb;
- unsigned long buf_address;
- int offset;
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
-
- if ((bulk_data_slot == NULL) || (CSR_WIFI_ALIGN_BYTES == 0)) {
- return CSR_RESULT_SUCCESS;
- }
-
- if ((bulk_data_slot->os_data_ptr == NULL) || (bulk_data_slot->data_length == 0)) {
- return CSR_RESULT_SUCCESS;
- }
-
- buf_address = (unsigned long)(bulk_data_slot->os_data_ptr) & (CSR_WIFI_ALIGN_BYTES - 1);
-
- unifi_trace(priv, UDBG5,
- "unifi_net_dma_align: Allign buffer (0x%p) by %d bytes\n",
- bulk_data_slot->os_data_ptr, buf_address);
-
- offset = CSR_WIFI_ALIGN_BYTES - buf_address;
- if (offset < 0) {
- unifi_error(priv, "unifi_net_dma_align: Failed (offset=%d)\n", offset);
- return CSR_RESULT_FAILURE;
- }
-
- skb = (struct sk_buff*)(bulk_data_slot->os_net_buf_ptr);
- skb_reserve(skb, offset);
- bulk_data_slot->os_net_buf_ptr = (const unsigned char*)skb;
- bulk_data_slot->os_data_ptr = (const void*)(skb->data);
-
- return CSR_RESULT_SUCCESS;
-
-} /* unifi_net_dma_align() */
-
-#ifdef ANDROID_TIMESTAMP
-static volatile unsigned int printk_cpu = UINT_MAX;
-char tbuf[30];
-
-char* print_time(void )
-{
- unsigned long long t;
- unsigned long nanosec_rem;
-
- t = cpu_clock(printk_cpu);
- nanosec_rem = do_div(t, 1000000000);
- sprintf(tbuf, "[%5lu.%06lu] ",
- (unsigned long) t,
- nanosec_rem / 1000);
-
- return tbuf;
-}
-#endif
-
-
-/* Module parameters */
-extern int unifi_debug;
-
-#ifdef UNIFI_DEBUG
-#define DEBUG_BUFFER_SIZE 120
-
-#define FORMAT_TRACE(_s, _len, _args, _fmt) \
- do { \
- va_start(_args, _fmt); \
- _len += vsnprintf(&(_s)[_len], \
- (DEBUG_BUFFER_SIZE - _len), \
- _fmt, _args); \
- va_end(_args); \
- if (_len >= DEBUG_BUFFER_SIZE) { \
- (_s)[DEBUG_BUFFER_SIZE - 2] = '\n'; \
- (_s)[DEBUG_BUFFER_SIZE - 1] = 0; \
- } \
- } while (0)
-
-void
-unifi_error(void* ospriv, const char *fmt, ...)
-{
- unifi_priv_t *priv = (unifi_priv_t*) ospriv;
- char s[DEBUG_BUFFER_SIZE];
- va_list args;
- unsigned int len;
-#ifdef ANDROID_TIMESTAMP
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_ERR "%s unifi%d: ", print_time(), priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_ERR "%s unifi: ", print_time());
- }
-#else
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_ERR "unifi%d: ", priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_ERR "unifi: ");
- }
-#endif /* ANDROID_TIMESTAMP */
- FORMAT_TRACE(s, len, args, fmt);
-
- printk("%s", s);
-}
-
-void
-unifi_warning(void* ospriv, const char *fmt, ...)
-{
- unifi_priv_t *priv = (unifi_priv_t*) ospriv;
- char s[DEBUG_BUFFER_SIZE];
- va_list args;
- unsigned int len;
-
-#ifdef ANDROID_TIMESTAMP
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_WARNING "%s unifi%d: ", print_time(), priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_WARNING "%s unifi: ", print_time());
- }
-#else
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_WARNING "unifi%d: ", priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_WARNING "unifi: ");
- }
-#endif /* ANDROID_TIMESTAMP */
-
- FORMAT_TRACE(s, len, args, fmt);
-
- printk("%s", s);
-}
-
-
-void
-unifi_notice(void* ospriv, const char *fmt, ...)
-{
- unifi_priv_t *priv = (unifi_priv_t*) ospriv;
- char s[DEBUG_BUFFER_SIZE];
- va_list args;
- unsigned int len;
-
-#ifdef ANDROID_TIMESTAMP
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_NOTICE "%s unifi%d: ", print_time(), priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_NOTICE "%s unifi: ", print_time());
- }
-#else
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_NOTICE "unifi%d: ", priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_NOTICE "unifi: ");
- }
-#endif /* ANDROID_TIMESTAMP */
-
- FORMAT_TRACE(s, len, args, fmt);
-
- printk("%s", s);
-}
-
-
-void
-unifi_info(void* ospriv, const char *fmt, ...)
-{
- unifi_priv_t *priv = (unifi_priv_t*) ospriv;
- char s[DEBUG_BUFFER_SIZE];
- va_list args;
- unsigned int len;
-
-#ifdef ANDROID_TIMESTAMP
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_INFO "%s unifi%d: ", print_time(), priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_INFO "%s unifi: ", print_time());
- }
-#else
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_INFO "unifi%d: ", priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_INFO "unifi: ");
- }
-#endif /* ANDROID_TIMESTAMP */
-
- FORMAT_TRACE(s, len, args, fmt);
-
- printk("%s", s);
-}
-
-/* debugging */
-void
-unifi_trace(void* ospriv, int level, const char *fmt, ...)
-{
- unifi_priv_t *priv = (unifi_priv_t*) ospriv;
- char s[DEBUG_BUFFER_SIZE];
- va_list args;
- unsigned int len;
-
- if (unifi_debug >= level) {
-#ifdef ANDROID_TIMESTAMP
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_ERR "%s unifi%d: ", print_time(), priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_ERR "%s unifi: ", print_time());
- }
-#else
- if (priv != NULL) {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_ERR "unifi%d: ", priv->instance);
- } else {
- len = snprintf(s, DEBUG_BUFFER_SIZE, KERN_ERR "unifi: ");
- }
-#endif /* ANDROID_TIMESTAMP */
-
- FORMAT_TRACE(s, len, args, fmt);
-
- printk("%s", s);
- }
-}
-
-#else
-
-void
-unifi_error_nop(void* ospriv, const char *fmt, ...)
-{
-}
-
-void
-unifi_trace_nop(void* ospriv, int level, const char *fmt, ...)
-{
-}
-
-#endif /* UNIFI_DEBUG */
-
-
-/*
- * ---------------------------------------------------------------------------
- *
- * Debugging support.
- *
- * ---------------------------------------------------------------------------
- */
-
-#ifdef UNIFI_DEBUG
-
-/* Memory dump with level filter controlled by unifi_debug */
-void
-unifi_dump(void *ospriv, int level, const char *msg, void *mem, u16 len)
-{
- unifi_priv_t *priv = (unifi_priv_t*) ospriv;
-
- if (unifi_debug >= level) {
-#ifdef ANDROID_TIMESTAMP
- if (priv != NULL) {
- printk(KERN_ERR "%s unifi%d: --- dump: %s ---\n", print_time(), priv->instance, msg ? msg : "");
- } else {
- printk(KERN_ERR "%s unifi: --- dump: %s ---\n", print_time(), msg ? msg : "");
- }
-#else
- if (priv != NULL) {
- printk(KERN_ERR "unifi%d: --- dump: %s ---\n", priv->instance, msg ? msg : "");
- } else {
- printk(KERN_ERR "unifi: --- dump: %s ---\n", msg ? msg : "");
- }
-#endif /* ANDROID_TIMESTAMP */
- dump(mem, len);
-
- if (priv != NULL) {
- printk(KERN_ERR "unifi%d: --- end of dump ---\n", priv->instance);
- } else {
- printk(KERN_ERR "unifi: --- end of dump ---\n");
- }
- }
-}
-
-/* Memory dump that appears all the time, use sparingly */
-void
-dump(void *mem, u16 len)
-{
- int i, col = 0;
- unsigned char *pdata = (unsigned char *)mem;
-#ifdef ANDROID_TIMESTAMP
- printk("timestamp %s \n", print_time());
-#endif /* ANDROID_TIMESTAMP */
- if (mem == NULL) {
- printk("(null dump)\n");
- return;
- }
- for (i = 0; i < len; i++) {
- if (col == 0)
- printk("0x%02X: ", i);
-
- printk(" %02X", pdata[i]);
-
- if (++col == 16) {
- printk("\n");
- col = 0;
- }
- }
- if (col)
- printk("\n");
-} /* dump() */
-
-
-void
-dump16(void *mem, u16 len)
-{
- int i, col=0;
- unsigned short *p = (unsigned short *)mem;
-#ifdef ANDROID_TIMESTAMP
- printk("timestamp %s \n", print_time());
-#endif /* ANDROID_TIMESTAMP */
- for (i = 0; i < len; i+=2) {
- if (col == 0)
- printk("0x%02X: ", i);
-
- printk(" %04X", *p++);
-
- if (++col == 8) {
- printk("\n");
- col = 0;
- }
- }
- if (col)
- printk("\n");
-}
-
-
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-void
-dump_str(void *mem, u16 len)
-{
- int i;
- unsigned char *pdata = (unsigned char *)mem;
-#ifdef ANDROID_TIMESTAMP
- printk("timestamp %s \n", print_time());
-#endif /* ANDROID_TIMESTAMP */
- for (i = 0; i < len; i++) {
- printk("%c", pdata[i]);
- }
- printk("\n");
-
-} /* dump_str() */
-#endif /* CSR_ONLY_NOTES */
-
-
-#endif /* UNIFI_DEBUG */
-
-
-/* ---------------------------------------------------------------------------
- * - End -
- * ------------------------------------------------------------------------- */
diff --git a/drivers/staging/csr/putest.c b/drivers/staging/csr/putest.c
deleted file mode 100644
index 5613cf0..0000000
--- a/drivers/staging/csr/putest.c
+++ /dev/null
@@ -1,685 +0,0 @@
-/*
- * ***************************************************************************
- * FILE: putest.c
- *
- * PURPOSE: putest related functions.
- *
- * Copyright (C) 2008-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ***************************************************************************
- */
-
-#include <linux/vmalloc.h>
-#include <linux/firmware.h>
-
-#include "unifi_priv.h"
-#include "csr_wifi_hip_chiphelper.h"
-
-#define UNIFI_PROC_BOTH 3
-
-
-int unifi_putest_cmd52_read(unifi_priv_t *priv, unsigned char *arg)
-{
- struct unifi_putest_cmd52 cmd52_params;
- u8 *arg_pos;
- unsigned int cmd_param_size;
- int r;
- CsrResult csrResult;
- unsigned char ret_buffer[32];
- u8 *ret_buffer_pos;
- u8 retries;
-
- arg_pos = (u8*)(((unifi_putest_command_t*)arg) + 1);
- if (get_user(cmd_param_size, (int*)arg_pos)) {
- unifi_error(priv,
- "unifi_putest_cmd52_read: Failed to get the argument\n");
- return -EFAULT;
- }
-
- if (cmd_param_size != sizeof(struct unifi_putest_cmd52)) {
- unifi_error(priv,
- "unifi_putest_cmd52_read: cmd52 struct mismatch\n");
- return -EINVAL;
- }
-
- arg_pos += sizeof(unsigned int);
- if (copy_from_user(&cmd52_params,
- (void*)arg_pos,
- sizeof(struct unifi_putest_cmd52))) {
- unifi_error(priv,
- "unifi_putest_cmd52_read: Failed to get the cmd52 params\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG2, "cmd52r: func=%d addr=0x%x ",
- cmd52_params.funcnum, cmd52_params.addr);
-
- retries = 3;
- CsrSdioClaim(priv->sdio);
- do {
- if (cmd52_params.funcnum == 0) {
- csrResult = CsrSdioF0Read8(priv->sdio, cmd52_params.addr, &cmd52_params.data);
- } else {
- csrResult = CsrSdioRead8(priv->sdio, cmd52_params.addr, &cmd52_params.data);
- }
- } while (--retries && ((csrResult == CSR_SDIO_RESULT_CRC_ERROR) || (csrResult == CSR_SDIO_RESULT_TIMEOUT)));
- CsrSdioRelease(priv->sdio);
-
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv,
- "\nunifi_putest_cmd52_read: Read8() failed (csrResult=0x%x)\n", csrResult);
- return -EFAULT;
- }
- unifi_trace(priv, UDBG2, "data=%d\n", cmd52_params.data);
-
- /* Copy the info to the out buffer */
- *(unifi_putest_command_t*)ret_buffer = UNIFI_PUTEST_CMD52_READ;
- ret_buffer_pos = (u8*)(((unifi_putest_command_t*)ret_buffer) + 1);
- *(unsigned int*)ret_buffer_pos = sizeof(struct unifi_putest_cmd52);
- ret_buffer_pos += sizeof(unsigned int);
- memcpy(ret_buffer_pos, &cmd52_params, sizeof(struct unifi_putest_cmd52));
- ret_buffer_pos += sizeof(struct unifi_putest_cmd52);
-
- r = copy_to_user((void*)arg,
- ret_buffer,
- ret_buffer_pos - ret_buffer);
- if (r) {
- unifi_error(priv,
- "unifi_putest_cmd52_read: Failed to return the data\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-
-int unifi_putest_cmd52_write(unifi_priv_t *priv, unsigned char *arg)
-{
- struct unifi_putest_cmd52 cmd52_params;
- u8 *arg_pos;
- unsigned int cmd_param_size;
- CsrResult csrResult;
- u8 retries;
-
- arg_pos = (u8*)(((unifi_putest_command_t*)arg) + 1);
- if (get_user(cmd_param_size, (int*)arg_pos)) {
- unifi_error(priv,
- "unifi_putest_cmd52_write: Failed to get the argument\n");
- return -EFAULT;
- }
-
- if (cmd_param_size != sizeof(struct unifi_putest_cmd52)) {
- unifi_error(priv,
- "unifi_putest_cmd52_write: cmd52 struct mismatch\n");
- return -EINVAL;
- }
-
- arg_pos += sizeof(unsigned int);
- if (copy_from_user(&cmd52_params,
- (void*)(arg_pos),
- sizeof(struct unifi_putest_cmd52))) {
- unifi_error(priv,
- "unifi_putest_cmd52_write: Failed to get the cmd52 params\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG2, "cmd52w: func=%d addr=0x%x data=%d\n",
- cmd52_params.funcnum, cmd52_params.addr, cmd52_params.data);
-
- retries = 3;
- CsrSdioClaim(priv->sdio);
- do {
- if (cmd52_params.funcnum == 0) {
- csrResult = CsrSdioF0Write8(priv->sdio, cmd52_params.addr, cmd52_params.data);
- } else {
- csrResult = CsrSdioWrite8(priv->sdio, cmd52_params.addr, cmd52_params.data);
- }
- } while (--retries && ((csrResult == CSR_SDIO_RESULT_CRC_ERROR) || (csrResult == CSR_SDIO_RESULT_TIMEOUT)));
- CsrSdioRelease(priv->sdio);
-
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv,
- "unifi_putest_cmd52_write: Write8() failed (csrResult=0x%x)\n", csrResult);
- return -EFAULT;
- }
-
- return 0;
-}
-
-int unifi_putest_gp_read16(unifi_priv_t *priv, unsigned char *arg)
-{
- struct unifi_putest_gp_rw16 gp_r16_params;
- u8 *arg_pos;
- unsigned int cmd_param_size;
- int r;
- CsrResult csrResult;
- unsigned char ret_buffer[32];
- u8 *ret_buffer_pos;
-
- arg_pos = (u8*)(((unifi_putest_command_t*)arg) + 1);
- if (get_user(cmd_param_size, (int*)arg_pos)) {
- unifi_error(priv,
- "unifi_putest_gp_read16: Failed to get the argument\n");
- return -EFAULT;
- }
-
- if (cmd_param_size != sizeof(struct unifi_putest_gp_rw16)) {
- unifi_error(priv,
- "unifi_putest_gp_read16: struct mismatch\n");
- return -EINVAL;
- }
-
- arg_pos += sizeof(unsigned int);
- if (copy_from_user(&gp_r16_params,
- (void*)arg_pos,
- sizeof(struct unifi_putest_gp_rw16))) {
- unifi_error(priv,
- "unifi_putest_gp_read16: Failed to get the params\n");
- return -EFAULT;
- }
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_card_read16(priv->card, gp_r16_params.addr, &gp_r16_params.data);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv,
- "unifi_putest_gp_read16: unifi_card_read16() GP=0x%x failed (csrResult=0x%x)\n", gp_r16_params.addr, csrResult);
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG2, "gp_r16: GP=0x%08x, data=0x%04x\n", gp_r16_params.addr, gp_r16_params.data);
-
- /* Copy the info to the out buffer */
- *(unifi_putest_command_t*)ret_buffer = UNIFI_PUTEST_GP_READ16;
- ret_buffer_pos = (u8*)(((unifi_putest_command_t*)ret_buffer) + 1);
- *(unsigned int*)ret_buffer_pos = sizeof(struct unifi_putest_gp_rw16);
- ret_buffer_pos += sizeof(unsigned int);
- memcpy(ret_buffer_pos, &gp_r16_params, sizeof(struct unifi_putest_gp_rw16));
- ret_buffer_pos += sizeof(struct unifi_putest_gp_rw16);
-
- r = copy_to_user((void*)arg,
- ret_buffer,
- ret_buffer_pos - ret_buffer);
- if (r) {
- unifi_error(priv,
- "unifi_putest_gp_read16: Failed to return the data\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-int unifi_putest_gp_write16(unifi_priv_t *priv, unsigned char *arg)
-{
- struct unifi_putest_gp_rw16 gp_w16_params;
- u8 *arg_pos;
- unsigned int cmd_param_size;
- CsrResult csrResult;
-
- arg_pos = (u8*)(((unifi_putest_command_t*)arg) + 1);
- if (get_user(cmd_param_size, (int*)arg_pos)) {
- unifi_error(priv,
- "unifi_putest_gp_write16: Failed to get the argument\n");
- return -EFAULT;
- }
-
- if (cmd_param_size != sizeof(struct unifi_putest_gp_rw16)) {
- unifi_error(priv,
- "unifi_putest_gp_write16: struct mismatch\n");
- return -EINVAL;
- }
-
- arg_pos += sizeof(unsigned int);
- if (copy_from_user(&gp_w16_params,
- (void*)(arg_pos),
- sizeof(struct unifi_putest_gp_rw16))) {
- unifi_error(priv,
- "unifi_putest_gp_write16: Failed to get the params\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG2, "gp_w16: GP=0x%08x, data=0x%04x\n", gp_w16_params.addr, gp_w16_params.data);
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_card_write16(priv->card, gp_w16_params.addr, gp_w16_params.data);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv,
- "unifi_putest_gp_write16: unifi_card_write16() GP=%x failed (csrResult=0x%x)\n", gp_w16_params.addr, csrResult);
- return -EFAULT;
- }
-
- return 0;
-}
-
-int unifi_putest_set_sdio_clock(unifi_priv_t *priv, unsigned char *arg)
-{
- int sdio_clock_speed;
- CsrResult csrResult;
-
- if (get_user(sdio_clock_speed, (int*)(((unifi_putest_command_t*)arg) + 1))) {
- unifi_error(priv,
- "unifi_putest_set_sdio_clock: Failed to get the argument\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG2, "set sdio clock: %d KHz\n", sdio_clock_speed);
-
- CsrSdioClaim(priv->sdio);
- csrResult = CsrSdioMaxBusClockFrequencySet(priv->sdio, sdio_clock_speed * 1000);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv,
- "unifi_putest_set_sdio_clock: Set clock failed (csrResult=0x%x)\n", csrResult);
- return -EFAULT;
- }
-
- return 0;
-}
-
-
-int unifi_putest_start(unifi_priv_t *priv, unsigned char *arg)
-{
- int r;
- CsrResult csrResult;
- int already_in_test = priv->ptest_mode;
-
- /* Ensure that sme_sys_suspend() doesn't power down the chip because:
- * 1) Power is needed anyway for ptest.
- * 2) The app code uses the START ioctl as a reset, so it gets called
- * multiple times. If the app stops the XAPs, but the power_down/up
- * sequence doesn't actually power down the chip, there can be problems
- * resetting, because part of the power_up sequence disables function 1
- */
- priv->ptest_mode = 1;
-
- /* Suspend the SME and UniFi */
- if (priv->sme_cli) {
- r = sme_sys_suspend(priv);
- if (r) {
- unifi_error(priv,
- "unifi_putest_start: failed to suspend UniFi\n");
- return r;
- }
- }
-
- /* Application may have stopped the XAPs, but they are needed for reset */
- if (already_in_test) {
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_start_processors(priv->card);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "Failed to start XAPs. Hard reset required.\n");
- }
- } else {
- /* Ensure chip is powered for the case where there's no unifi_helper */
- CsrSdioClaim(priv->sdio);
- csrResult = CsrSdioPowerOn(priv->sdio);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "CsrSdioPowerOn csrResult = %d\n", csrResult);
- }
- }
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_init(priv->card);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv,
- "unifi_putest_start: failed to init UniFi\n");
- return CsrHipResultToStatus(csrResult);
- }
-
- return 0;
-}
-
-
-int unifi_putest_stop(unifi_priv_t *priv, unsigned char *arg)
-{
- int r = 0;
- CsrResult csrResult;
-
- /* Application may have stopped the XAPs, but they are needed for reset */
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_start_processors(priv->card);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "Failed to start XAPs. Hard reset required.\n");
- }
-
- /* PUTEST_STOP is also used to resume the XAPs after SME coredump.
- * Don't power off the chip, leave that to the normal wifi-off which is
- * about to carry on. No need to resume the SME either, as it wasn't suspended.
- */
- if (priv->coredump_mode) {
- priv->coredump_mode = 0;
- return 0;
- }
-
- /* At this point function 1 is enabled and the XAPs are running, so it is
- * safe to let the card power down. Power is restored later, asynchronously,
- * during the wifi_on requested by the SME.
- */
- CsrSdioClaim(priv->sdio);
- CsrSdioPowerOff(priv->sdio);
- CsrSdioRelease(priv->sdio);
-
- /* Resume the SME and UniFi */
- if (priv->sme_cli) {
- r = sme_sys_resume(priv);
- if (r) {
- unifi_error(priv,
- "unifi_putest_stop: failed to resume SME\n");
- }
- }
- priv->ptest_mode = 0;
-
- return r;
-}
-
-
-int unifi_putest_dl_fw(unifi_priv_t *priv, unsigned char *arg)
-{
-#define UF_PUTEST_MAX_FW_FILE_NAME 16
-#define UNIFI_MAX_FW_PATH_LEN 32
- unsigned int fw_name_length;
- unsigned char fw_name[UF_PUTEST_MAX_FW_FILE_NAME+1];
- unsigned char *name_buffer;
- int postfix;
- char fw_path[UNIFI_MAX_FW_PATH_LEN];
- const struct firmware *fw_entry;
- struct dlpriv temp_fw_sta;
- int r;
- CsrResult csrResult;
-
- /* Get the f/w file name length */
- if (get_user(fw_name_length, (unsigned int*)(((unifi_putest_command_t*)arg) + 1))) {
- unifi_error(priv,
- "unifi_putest_dl_fw: Failed to get the length argument\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG2, "unifi_putest_dl_fw: file name size = %d\n", fw_name_length);
-
- /* Sanity check for the f/w file name length */
- if (fw_name_length > UF_PUTEST_MAX_FW_FILE_NAME) {
- unifi_error(priv,
- "unifi_putest_dl_fw: F/W file name is too long\n");
- return -EINVAL;
- }
-
- /* Get the f/w file name */
- name_buffer = ((unsigned char*)arg) + sizeof(unifi_putest_command_t) + sizeof(unsigned int);
- if (copy_from_user(fw_name, (void*)name_buffer, fw_name_length)) {
- unifi_error(priv, "unifi_putest_dl_fw: Failed to get the file name\n");
- return -EFAULT;
- }
- fw_name[fw_name_length] = '\0';
- unifi_trace(priv, UDBG2, "unifi_putest_dl_fw: file = %s\n", fw_name);
-
- /* Keep the existing f/w to a temp, we need to restore it later */
- temp_fw_sta = priv->fw_sta;
-
- /* Get the putest f/w */
- postfix = priv->instance;
- scnprintf(fw_path, UNIFI_MAX_FW_PATH_LEN, "unifi-sdio-%d/%s",
- postfix, fw_name);
- r = request_firmware(&fw_entry, fw_path, priv->unifi_device);
- if (r == 0) {
- priv->fw_sta.fw_desc = (void *)fw_entry;
- priv->fw_sta.dl_data = fw_entry->data;
- priv->fw_sta.dl_len = fw_entry->size;
- } else {
- unifi_error(priv, "Firmware file not available\n");
- return -EINVAL;
- }
-
- /* Application may have stopped the XAPs, but they are needed for reset */
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_start_processors(priv->card);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "Failed to start XAPs. Hard reset required.\n");
- }
-
- /* Download the f/w. On UF6xxx this will cause the f/w file to convert
- * into patch format and download via the ROM boot loader
- */
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_download(priv->card, 0x0c00);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv,
- "unifi_putest_dl_fw: failed to download the f/w\n");
- goto free_fw;
- }
-
- /* Free the putest f/w... */
-free_fw:
- uf_release_firmware(priv, &priv->fw_sta);
- /* ... and restore the original f/w */
- priv->fw_sta = temp_fw_sta;
-
- return CsrHipResultToStatus(csrResult);
-}
-
-
-int unifi_putest_dl_fw_buff(unifi_priv_t *priv, unsigned char *arg)
-{
- unsigned int fw_length;
- unsigned char *fw_buf = NULL;
- unsigned char *fw_user_ptr;
- struct dlpriv temp_fw_sta;
- CsrResult csrResult;
-
- /* Get the f/w buffer length */
- if (get_user(fw_length, (unsigned int*)(((unifi_putest_command_t*)arg) + 1))) {
- unifi_error(priv,
- "unifi_putest_dl_fw_buff: Failed to get the length arg\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG2, "unifi_putest_dl_fw_buff: size = %d\n", fw_length);
-
- /* Sanity check for the buffer length */
- if (fw_length == 0 || fw_length > 0xfffffff) {
- unifi_error(priv,
- "unifi_putest_dl_fw_buff: buffer length bad %u\n", fw_length);
- return -EINVAL;
- }
-
- /* Buffer for kernel copy of the f/w image */
- fw_buf = kmalloc(fw_length, GFP_KERNEL);
- if (!fw_buf) {
- unifi_error(priv, "unifi_putest_dl_fw_buff: malloc fail\n");
- return -ENOMEM;
- }
-
- /* Get the f/w image */
- fw_user_ptr = ((unsigned char*)arg) + sizeof(unifi_putest_command_t) + sizeof(unsigned int);
- if (copy_from_user(fw_buf, (void*)fw_user_ptr, fw_length)) {
- unifi_error(priv, "unifi_putest_dl_fw_buff: Failed to get the buffer\n");
- kfree(fw_buf);
- return -EFAULT;
- }
-
- /* Save the existing f/w to a temp, we need to restore it later */
- temp_fw_sta = priv->fw_sta;
-
- /* Setting fw_desc NULL indicates to the core that no f/w file was loaded
- * via the kernel request_firmware() mechanism. This indicates to the core
- * that it shouldn't call release_firmware() after the download is done.
- */
- priv->fw_sta.fw_desc = NULL; /* No OS f/w resource */
- priv->fw_sta.dl_data = fw_buf;
- priv->fw_sta.dl_len = fw_length;
-
- /* Application may have stopped the XAPs, but they are needed for reset */
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_start_processors(priv->card);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "Failed to start XAPs. Hard reset required.\n");
- }
-
- /* Download the f/w. On UF6xxx this will cause the f/w file to convert
- * into patch format and download via the ROM boot loader
- */
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_download(priv->card, 0x0c00);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv,
- "unifi_putest_dl_fw_buff: failed to download the f/w\n");
- goto free_fw;
- }
-
-free_fw:
- /* Finished with the putest f/w, so restore the station f/w */
- priv->fw_sta = temp_fw_sta;
- kfree(fw_buf);
-
- return CsrHipResultToStatus(csrResult);
-}
-
-
-int unifi_putest_coredump_prepare(unifi_priv_t *priv, unsigned char *arg)
-{
- u16 data_u16;
- s32 i;
- CsrResult r;
-
- unifi_info(priv, "Preparing for SDIO coredump\n");
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE)
- unifi_debug_buf_dump();
-#endif
-
- /* Sanity check that userspace hasn't called a PUTEST_START, because that
- * would have reset UniFi, potentially power cycling it and losing context
- */
- if (priv->ptest_mode) {
- unifi_error(priv, "PUTEST_START shouldn't be used before a coredump\n");
- }
-
- /* Flag that the userspace has requested coredump. Even if this preparation
- * fails, the SME will call PUTEST_STOP to tidy up.
- */
- priv->coredump_mode = 1;
-
- for (i = 0; i < 3; i++) {
- CsrSdioClaim(priv->sdio);
- r = CsrSdioRead16(priv->sdio, CHIP_HELPER_UNIFI_GBL_CHIP_VERSION*2, &data_u16);
- CsrSdioRelease(priv->sdio);
- if (r != CSR_RESULT_SUCCESS) {
- unifi_info(priv, "Failed to read chip version! Try %d\n", i);
-
- /* First try, re-enable function which may have been disabled by f/w panic */
- if (i == 0) {
- unifi_info(priv, "Try function enable\n");
- CsrSdioClaim(priv->sdio);
- r = CsrSdioFunctionEnable(priv->sdio);
- CsrSdioRelease(priv->sdio);
- if (r != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "CsrSdioFunctionEnable failed %d\n", r);
- }
- continue;
- }
-
- /* Subsequent tries, reset */
-
- /* Set clock speed low */
- CsrSdioClaim(priv->sdio);
- r = CsrSdioMaxBusClockFrequencySet(priv->sdio, UNIFI_SDIO_CLOCK_SAFE_HZ);
- CsrSdioRelease(priv->sdio);
- if (r != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "CsrSdioMaxBusClockFrequencySet() failed %d\n", r);
- }
-
- /* Card software reset */
- CsrSdioClaim(priv->sdio);
- r = unifi_card_hard_reset(priv->card);
- CsrSdioRelease(priv->sdio);
- if (r != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "unifi_card_hard_reset() failed %d\n", r);
- }
- } else {
- unifi_info(priv, "Read chip version of 0x%04x\n", data_u16);
- break;
- }
- }
-
- if (r != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "Failed to prepare chip\n");
- return -EIO;
- }
-
- /* Stop the XAPs for coredump. The PUTEST_STOP must be called, e.g. at
- * Raw SDIO deinit, to resume them.
- */
- CsrSdioClaim(priv->sdio);
- r = unifi_card_stop_processor(priv->card, UNIFI_PROC_BOTH);
- CsrSdioRelease(priv->sdio);
- if (r != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "Failed to stop processors\n");
- }
-
- return 0;
-}
-
-int unifi_putest_cmd52_block_read(unifi_priv_t *priv, unsigned char *arg)
-{
- struct unifi_putest_block_cmd52_r block_cmd52;
- u8 *arg_pos;
- unsigned int cmd_param_size;
- CsrResult r;
- u8 *block_local_buffer;
-
- arg_pos = (u8*)(((unifi_putest_command_t*)arg) + 1);
- if (get_user(cmd_param_size, (int*)arg_pos)) {
- unifi_error(priv,
- "cmd52r_block: Failed to get the argument\n");
- return -EFAULT;
- }
-
- if (cmd_param_size != sizeof(struct unifi_putest_block_cmd52_r)) {
- unifi_error(priv,
- "cmd52r_block: cmd52 struct mismatch\n");
- return -EINVAL;
- }
-
- arg_pos += sizeof(unsigned int);
- if (copy_from_user(&block_cmd52,
- (void*)arg_pos,
- sizeof(struct unifi_putest_block_cmd52_r))) {
- unifi_error(priv,
- "cmd52r_block: Failed to get the cmd52 params\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG2, "cmd52r_block: func=%d addr=0x%x len=0x%x ",
- block_cmd52.funcnum, block_cmd52.addr, block_cmd52.length);
-
- block_local_buffer = vmalloc(block_cmd52.length);
- if (block_local_buffer == NULL) {
- unifi_error(priv, "cmd52r_block: Failed to allocate buffer\n");
- return -ENOMEM;
- }
-
- CsrSdioClaim(priv->sdio);
- r = unifi_card_readn(priv->card, block_cmd52.addr, block_local_buffer, block_cmd52.length);
- CsrSdioRelease(priv->sdio);
- if (r != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "cmd52r_block: unifi_readn failed\n");
- return -EIO;
- }
-
- if (copy_to_user((void*)block_cmd52.data,
- block_local_buffer,
- block_cmd52.length)) {
- unifi_error(priv,
- "cmd52r_block: Failed to return the data\n");
- return -EFAULT;
- }
-
- return 0;
-}
diff --git a/drivers/staging/csr/sdio_events.c b/drivers/staging/csr/sdio_events.c
deleted file mode 100644
index 2a80b9e..0000000
--- a/drivers/staging/csr/sdio_events.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: sdio_events.c
- *
- * PURPOSE:
- * Process the events received by the SDIO glue layer.
- * Optional part of the porting exercise.
- *
- * Copyright (C) 2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include "unifi_priv.h"
-
-
-/*
- * Porting Notes:
- * There are two ways to support the suspend/resume system events in a driver.
- * In some operating systems these events are delivered to the OS driver
- * directly from the system. In this case, the OS driver needs to pass these
- * events to the API described in the CSR SDIO Abstration API document.
- * In Linux, and other embedded operating systems, the suspend/resume events
- * come from the SDIO driver. In this case, simply get these events in the
- * SDIO glue layer and notify the OS layer.
- *
- * In either case, typically, the events are processed by the SME.
- * Use the unifi_sys_suspend_ind() and unifi_sys_resume_ind() to pass
- * the events to the SME.
- */
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_suspend
- *
- * Handles a suspend request from the SDIO driver.
- *
- * Arguments:
- * ospriv Pointer to OS driver context.
- *
- * ---------------------------------------------------------------------------
- */
-void unifi_suspend(void *ospriv)
-{
- unifi_priv_t *priv = ospriv;
- int interfaceTag=0;
-
- /* For powered suspend, tell the resume's wifi_on() not to reinit UniFi */
- priv->wol_suspend = (enable_wol == UNIFI_WOL_OFF) ? FALSE : TRUE;
-
- unifi_trace(priv, UDBG1, "unifi_suspend: wol_suspend %d, enable_wol %d",
- priv->wol_suspend, enable_wol );
-
- /* Stop network traffic. */
- /* need to stop all the netdevices*/
- for( interfaceTag=0;interfaceTag<CSR_WIFI_NUM_INTERFACES;interfaceTag++)
- {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- if (interfacePriv->netdev_registered == 1)
- {
- if( priv->wol_suspend ) {
- unifi_trace(priv, UDBG1, "unifi_suspend: Don't netif_carrier_off");
- } else {
- unifi_trace(priv, UDBG1, "unifi_suspend: netif_carrier_off");
- netif_carrier_off(priv->netdev[interfaceTag]);
- }
- netif_tx_stop_all_queues(priv->netdev[interfaceTag]);
- }
- }
-
- unifi_trace(priv, UDBG1, "unifi_suspend: suspend SME");
-
- sme_sys_suspend(priv);
-
-} /* unifi_suspend() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_resume
- *
- * Handles a resume request from the SDIO driver.
- *
- * Arguments:
- * ospriv Pointer to OS driver context.
- *
- * ---------------------------------------------------------------------------
- */
-void unifi_resume(void *ospriv)
-{
- unifi_priv_t *priv = ospriv;
- int interfaceTag=0;
- int r;
- int wol = priv->wol_suspend;
-
- unifi_trace(priv, UDBG1, "unifi_resume: resume SME, enable_wol=%d", enable_wol);
-
- /* The resume causes wifi-on which will re-enable the BH and reinstall the ISR */
- r = sme_sys_resume(priv);
- if (r) {
- unifi_error(priv, "Failed to resume UniFi\n");
- }
-
- /* Resume the network interfaces. For the cold resume case, this will
- * happen upon reconnection.
- */
- if (wol) {
- unifi_trace(priv, UDBG1, "unifi_resume: try to enable carrier");
-
- /* need to start all the netdevices*/
- for( interfaceTag=0;interfaceTag<CSR_WIFI_NUM_INTERFACES;interfaceTag++) {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- unifi_trace(priv, UDBG1, "unifi_resume: interfaceTag %d netdev_registered %d mode %d\n",
- interfaceTag, interfacePriv->netdev_registered, interfacePriv->interfaceMode);
-
- if (interfacePriv->netdev_registered == 1)
- {
- netif_carrier_on(priv->netdev[interfaceTag]);
- netif_tx_start_all_queues(priv->netdev[interfaceTag]);
- }
- }
-
- /* Kick the BH thread (with reason=host) to poll for data that may have
- * arrived during a powered suspend. This caters for the case where the SME
- * doesn't interact with the chip (e.g install autonomous scans) during resume.
- */
- unifi_send_signal(priv->card, NULL, 0, NULL);
- }
-
-} /* unifi_resume() */
-
diff --git a/drivers/staging/csr/sdio_mmc.c b/drivers/staging/csr/sdio_mmc.c
deleted file mode 100644
index 30271d3..0000000
--- a/drivers/staging/csr/sdio_mmc.c
+++ /dev/null
@@ -1,1288 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- *
- * FILE: sdio_mmc.c
- *
- * PURPOSE: SDIO driver interface for generic MMC stack.
- *
- * Copyright (C) 2008-2009 by Cambridge Silicon Radio Ltd.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio.h>
-#include <linux/suspend.h>
-
-#include "unifi_priv.h"
-
-#ifdef ANDROID_BUILD
-struct wake_lock unifi_sdio_wake_lock; /* wakelock to prevent suspend while resuming */
-#endif
-
-static CsrSdioFunctionDriver *sdio_func_drv;
-
-#ifdef CONFIG_PM
-static int uf_sdio_mmc_power_event(struct notifier_block *this, unsigned long event, void *ptr);
-#endif
-
-/*
- * We need to keep track of the power on/off because we can not call
- * mmc_power_restore_host() when the card is already powered.
- * Even then, we need to patch the MMC driver to add a power_restore handler
- * in the mmc_sdio_ops structure. If the MMC driver before 2.6.37 is not patched,
- * mmc_power_save_host() and mmc_power_restore_host() are no-ops in the kernel,
- * returning immediately (at least on x86).
- */
-static int card_is_powered = 1;
-
-/* MMC uses ENOMEDIUM to indicate card gone away */
-
-static CsrResult
-ConvertSdioToCsrSdioResult(int r)
-{
- CsrResult csrResult = CSR_RESULT_FAILURE;
-
- switch (r) {
- case 0:
- csrResult = CSR_RESULT_SUCCESS;
- break;
- case -EIO:
- case -EILSEQ:
- csrResult = CSR_SDIO_RESULT_CRC_ERROR;
- break;
- /* Timeout errors */
- case -ETIMEDOUT:
- case -EBUSY:
- csrResult = CSR_SDIO_RESULT_TIMEOUT;
- break;
- case -ENODEV:
- case -ENOMEDIUM:
- csrResult = CSR_SDIO_RESULT_NO_DEVICE;
- break;
- case -EINVAL:
- csrResult = CSR_SDIO_RESULT_INVALID_VALUE;
- break;
- case -ENOMEM:
- case -ENOSYS:
- case -ERANGE:
- case -ENXIO:
- csrResult = CSR_RESULT_FAILURE;
- break;
- default:
- unifi_warning(NULL, "Unrecognised SDIO error code: %d\n", r);
- break;
- }
-
- return csrResult;
-}
-
-
-static int
-csr_io_rw_direct(struct mmc_card *card, int write, uint8_t fn,
- uint32_t addr, uint8_t in, uint8_t* out)
-{
- struct mmc_command cmd;
- int err;
-
- BUG_ON(!card);
- BUG_ON(fn > 7);
-
- memset(&cmd, 0, sizeof(struct mmc_command));
-
- cmd.opcode = SD_IO_RW_DIRECT;
- cmd.arg = write ? 0x80000000 : 0x00000000;
- cmd.arg |= fn << 28;
- cmd.arg |= (write && out) ? 0x08000000 : 0x00000000;
- cmd.arg |= addr << 9;
- cmd.arg |= in;
- cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
-
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
- if (err)
- return err;
-
- /* this function is not exported, so we will need to sort it out here
- * for now, lets hard code it to sdio */
- if (0) {
- /* old arg (mmc_host_is_spi(card->host)) { */
- /* host driver already reported errors */
- } else {
- if (cmd.resp[0] & R5_ERROR) {
- printk(KERN_ERR "%s: r5 error 0x%02x\n",
- __FUNCTION__, cmd.resp[0]);
- return -EIO;
- }
- if (cmd.resp[0] & R5_FUNCTION_NUMBER)
- return -EINVAL;
- if (cmd.resp[0] & R5_OUT_OF_RANGE)
- return -ERANGE;
- }
-
- if (out) {
- if (0) { /* old argument (mmc_host_is_spi(card->host)) */
- *out = (cmd.resp[0] >> 8) & 0xFF;
- }
- else {
- *out = cmd.resp[0] & 0xFF;
- }
- }
-
- return CSR_RESULT_SUCCESS;
-}
-
-
-CsrResult
-CsrSdioRead8(CsrSdioFunction *function, u32 address, u8 *data)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err = 0;
-
- _sdio_claim_host(func);
- *data = sdio_readb(func, address, &err);
- _sdio_release_host(func);
-
- if (err) {
- return ConvertSdioToCsrSdioResult(err);
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioRead8() */
-
-CsrResult
-CsrSdioWrite8(CsrSdioFunction *function, u32 address, u8 data)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err = 0;
-
- _sdio_claim_host(func);
- sdio_writeb(func, data, address, &err);
- _sdio_release_host(func);
-
- if (err) {
- return ConvertSdioToCsrSdioResult(err);
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioWrite8() */
-
-CsrResult
-CsrSdioRead16(CsrSdioFunction *function, u32 address, u16 *data)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err;
- uint8_t b0, b1;
-
- _sdio_claim_host(func);
- b0 = sdio_readb(func, address, &err);
- if (err) {
- _sdio_release_host(func);
- return ConvertSdioToCsrSdioResult(err);
- }
-
- b1 = sdio_readb(func, address+1, &err);
- if (err) {
- _sdio_release_host(func);
- return ConvertSdioToCsrSdioResult(err);
- }
- _sdio_release_host(func);
-
- *data = ((uint16_t)b1 << 8) | b0;
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioRead16() */
-
-
-CsrResult
-CsrSdioWrite16(CsrSdioFunction *function, u32 address, u16 data)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err;
- uint8_t b0, b1;
-
- _sdio_claim_host(func);
- b1 = (data >> 8) & 0xFF;
- sdio_writeb(func, b1, address+1, &err);
- if (err) {
- _sdio_release_host(func);
- return ConvertSdioToCsrSdioResult(err);
- }
-
- b0 = data & 0xFF;
- sdio_writeb(func, b0, address, &err);
- if (err) {
- _sdio_release_host(func);
- return ConvertSdioToCsrSdioResult(err);
- }
-
- _sdio_release_host(func);
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioWrite16() */
-
-
-CsrResult
-CsrSdioF0Read8(CsrSdioFunction *function, u32 address, u8 *data)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err = 0;
-
- _sdio_claim_host(func);
-#ifdef MMC_QUIRK_LENIENT_FN0
- *data = sdio_f0_readb(func, address, &err);
-#else
- err = csr_io_rw_direct(func->card, 0, 0, address, 0, data);
-#endif
- _sdio_release_host(func);
-
- if (err) {
- return ConvertSdioToCsrSdioResult(err);
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioF0Read8() */
-
-CsrResult
-CsrSdioF0Write8(CsrSdioFunction *function, u32 address, u8 data)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err = 0;
-
- _sdio_claim_host(func);
-#ifdef MMC_QUIRK_LENIENT_FN0
- sdio_f0_writeb(func, data, address, &err);
-#else
- err = csr_io_rw_direct(func->card, 1, 0, address, data, NULL);
-#endif
- _sdio_release_host(func);
-
- if (err) {
- return ConvertSdioToCsrSdioResult(err);
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioF0Write8() */
-
-
-CsrResult
-CsrSdioRead(CsrSdioFunction *function, u32 address, void *data, u32 length)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err;
-
- _sdio_claim_host(func);
- err = sdio_readsb(func, data, address, length);
- _sdio_release_host(func);
-
- if (err) {
- return ConvertSdioToCsrSdioResult(err);
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioRead() */
-
-CsrResult
-CsrSdioWrite(CsrSdioFunction *function, u32 address, const void *data, u32 length)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err;
-
- _sdio_claim_host(func);
- err = sdio_writesb(func, address, (void*)data, length);
- _sdio_release_host(func);
-
- if (err) {
- return ConvertSdioToCsrSdioResult(err);
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioWrite() */
-
-
-static int
-csr_sdio_enable_hs(struct mmc_card *card)
-{
- int ret;
- u8 speed;
-
- if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED)) {
- /* We've asked for HS clock rates, but controller doesn't
- * claim to support it. We should limit the clock
- * to 25MHz via module parameter.
- */
- printk(KERN_INFO "unifi: request HS but not MMC_CAP_SD_HIGHSPEED");
- return 0;
- }
-
- if (!card->cccr.high_speed)
- return 0;
-
-#if 1
- ret = csr_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
- if (ret)
- return ret;
-
- speed |= SDIO_SPEED_EHS;
-#else
- /* Optimisation: Eliminate read by always assuming SHS and that reserved bits can be zero */
- speed = SDIO_SPEED_EHS | SDIO_SPEED_SHS;
-#endif
-
- ret = csr_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
- if (ret)
- return ret;
-
- mmc_card_set_highspeed(card);
- card->host->ios.timing = MMC_TIMING_SD_HS;
- card->host->ops->set_ios(card->host, &card->host->ios);
-
- return 0;
-}
-
-static int
-csr_sdio_disable_hs(struct mmc_card *card)
-{
- int ret;
- u8 speed;
-
- if (!(card->host->caps & MMC_CAP_SD_HIGHSPEED))
- return 0;
-
- if (!card->cccr.high_speed)
- return 0;
-#if 1
- ret = csr_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
- if (ret)
- return ret;
-
- speed &= ~SDIO_SPEED_EHS;
-#else
- /* Optimisation: Eliminate read by always assuming SHS and that reserved bits can be zero */
- speed = SDIO_SPEED_SHS; /* clear SDIO_SPEED_EHS */
-#endif
-
- ret = csr_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
- if (ret)
- return ret;
-
- card->state &= ~MMC_STATE_HIGHSPEED;
- card->host->ios.timing = MMC_TIMING_LEGACY;
- card->host->ops->set_ios(card->host, &card->host->ios);
-
- return 0;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioMaxBusClockFrequencySet
- *
- * Set the maximum SDIO bus clock speed to use.
- *
- * Arguments:
- * sdio SDIO context pointer
- * maxFrequency maximum clock speed in Hz
- *
- * Returns:
- * an error code.
- * ---------------------------------------------------------------------------
- */
-CsrResult
-CsrSdioMaxBusClockFrequencySet(CsrSdioFunction *function, u32 maxFrequency)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- struct mmc_host *host = func->card->host;
- struct mmc_ios *ios = &host->ios;
- unsigned int max_hz;
- int err;
- u32 max_khz = maxFrequency/1000;
-
- if (!max_khz || max_khz > sdio_clock) {
- max_khz = sdio_clock;
- }
-
- _sdio_claim_host(func);
- max_hz = 1000 * max_khz;
- if (max_hz > host->f_max) {
- max_hz = host->f_max;
- }
-
- if (max_hz > 25000000) {
- err = csr_sdio_enable_hs(func->card);
- } else {
- err = csr_sdio_disable_hs(func->card);
- }
- if (err) {
- printk(KERN_ERR "SDIO warning: Failed to configure SDIO clock mode\n");
- _sdio_release_host(func);
- return CSR_RESULT_SUCCESS;
- }
-
- ios->clock = max_hz;
- host->ops->set_ios(host, ios);
-
- _sdio_release_host(func);
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioMaxBusClockFrequencySet() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioInterruptEnable
- * CsrSdioInterruptDisable
- *
- * Enable or disable the SDIO interrupt.
- * The driver disables the SDIO interrupt until the i/o thread can
- * process it.
- * The SDIO interrupt can be disabled by modifying the SDIO_INT_ENABLE
- * register in the Card Common Control Register block, but this requires
- * two CMD52 operations. A better solution is to mask the interrupt at
- * the host controller.
- *
- * Arguments:
- * sdio SDIO context pointer
- *
- * Returns:
- * Zero on success or a UniFi driver error code.
- *
- * ---------------------------------------------------------------------------
- */
-CsrResult
-CsrSdioInterruptEnable(CsrSdioFunction *function)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err = 0;
-
-#ifdef CSR_CONFIG_MMC_INT_BYPASS_KSOFTIRQD
- sdio_unblock_card_irq(func);
-#else
- _sdio_claim_host(func);
- /* Write the Int Enable in CCCR block */
-#ifdef MMC_QUIRK_LENIENT_FN0
- sdio_f0_writeb(func, 0x3, SDIO_CCCR_IENx, &err);
-#else
- err = csr_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, 0x03, NULL);
-#endif
- _sdio_release_host(func);
-
- if (err) {
- printk(KERN_ERR "unifi: %s: error %d writing IENx\n", __FUNCTION__, err);
- return ConvertSdioToCsrSdioResult(err);
- }
-#endif
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioInterruptEnable() */
-
-CsrResult
-CsrSdioInterruptDisable(CsrSdioFunction *function)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err = 0;
-
-#ifdef CSR_CONFIG_MMC_INT_BYPASS_KSOFTIRQD
- sdio_block_card_irq(func);
-#else
- _sdio_claim_host(func);
- /* Write the Int Enable in CCCR block */
-#ifdef MMC_QUIRK_LENIENT_FN0
- sdio_f0_writeb(func, 0, SDIO_CCCR_IENx, &err);
-#else
- err = csr_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, 0x00, NULL);
-#endif
- _sdio_release_host(func);
-
- if (err) {
- printk(KERN_ERR "unifi: %s: error %d writing IENx\n", __FUNCTION__, err);
- return ConvertSdioToCsrSdioResult(err);
- }
-#endif
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioInterruptDisable() */
-
-
-void CsrSdioInterruptAcknowledge(CsrSdioFunction *function)
-{
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioFunctionEnable
- *
- * Enable i/o on function 1.
- *
- * Arguments:
- * sdio SDIO context pointer
- *
- * Returns:
- * UniFi driver error code.
- * ---------------------------------------------------------------------------
- */
-CsrResult
-CsrSdioFunctionEnable(CsrSdioFunction *function)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err;
-
- /* Enable UniFi function 1 (the 802.11 part). */
- _sdio_claim_host(func);
- err = sdio_enable_func(func);
- _sdio_release_host(func);
- if (err) {
- unifi_error(NULL, "Failed to enable SDIO function %d\n", func->num);
- }
-
- return ConvertSdioToCsrSdioResult(err);
-} /* CsrSdioFunctionEnable() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioFunctionDisable
- *
- * Enable i/o on function 1.
- *
- * Arguments:
- * sdio SDIO context pointer
- *
- * Returns:
- * UniFi driver error code.
- * ---------------------------------------------------------------------------
- */
-CsrResult
-CsrSdioFunctionDisable(CsrSdioFunction *function)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int err;
-
- /* Disable UniFi function 1 (the 802.11 part). */
- _sdio_claim_host(func);
- err = sdio_disable_func(func);
- _sdio_release_host(func);
- if (err) {
- unifi_error(NULL, "Failed to disable SDIO function %d\n", func->num);
- }
-
- return ConvertSdioToCsrSdioResult(err);
-} /* CsrSdioFunctionDisable() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioFunctionActive
- *
- * No-op as the bus goes to an active state at the start of every
- * command.
- *
- * Arguments:
- * sdio SDIO context pointer
- * ---------------------------------------------------------------------------
- */
-void
-CsrSdioFunctionActive(CsrSdioFunction *function)
-{
-} /* CsrSdioFunctionActive() */
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioFunctionIdle
- *
- * Set the function as idle.
- *
- * Arguments:
- * sdio SDIO context pointer
- * ---------------------------------------------------------------------------
- */
-void
-CsrSdioFunctionIdle(CsrSdioFunction *function)
-{
-} /* CsrSdioFunctionIdle() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioPowerOn
- *
- * Power on UniFi.
- *
- * Arguments:
- * sdio SDIO context pointer
- * ---------------------------------------------------------------------------
- */
-CsrResult
-CsrSdioPowerOn(CsrSdioFunction *function)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- struct mmc_host *host = func->card->host;
-
- _sdio_claim_host(func);
- if (!card_is_powered) {
- mmc_power_restore_host(host);
- card_is_powered = 1;
- } else {
- printk(KERN_INFO "SDIO: Skip power on; card is already powered.\n");
- }
- _sdio_release_host(func);
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioPowerOn() */
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioPowerOff
- *
- * Power off UniFi.
- *
- * Arguments:
- * sdio SDIO context pointer
- * ---------------------------------------------------------------------------
- */
-void
-CsrSdioPowerOff(CsrSdioFunction *function)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- struct mmc_host *host = func->card->host;
-
- _sdio_claim_host(func);
- if (card_is_powered) {
- mmc_power_save_host(host);
- card_is_powered = 0;
- } else {
- printk(KERN_INFO "SDIO: Skip power off; card is already powered off.\n");
- }
- _sdio_release_host(func);
-} /* CsrSdioPowerOff() */
-
-
-static int
-sdio_set_block_size_ignore_first_error(struct sdio_func *func, unsigned blksz)
-{
- int ret;
-
- if (blksz > func->card->host->max_blk_size)
- return -EINVAL;
-
- if (blksz == 0) {
- blksz = min(func->max_blksize, func->card->host->max_blk_size);
- blksz = min(blksz, 512u);
- }
-
- /*
- * Ignore -ERANGE (OUT_OF_RANGE in R5) on the first byte as
- * the block size may be invalid until both bytes are written.
- */
- ret = csr_io_rw_direct(func->card, 1, 0,
- SDIO_FBR_BASE(func->num) + SDIO_FBR_BLKSIZE,
- blksz & 0xff, NULL);
- if (ret && ret != -ERANGE)
- return ret;
- ret = csr_io_rw_direct(func->card, 1, 0,
- SDIO_FBR_BASE(func->num) + SDIO_FBR_BLKSIZE + 1,
- (blksz >> 8) & 0xff, NULL);
- if (ret)
- return ret;
- func->cur_blksize = blksz;
-
- return 0;
-}
-
-CsrResult
-CsrSdioBlockSizeSet(CsrSdioFunction *function, u16 blockSize)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int r = 0;
-
- /* Module parameter overrides */
- if (sdio_block_size > -1) {
- blockSize = sdio_block_size;
- }
-
- unifi_trace(NULL, UDBG1, "Set SDIO function block size to %d\n",
- blockSize);
-
- _sdio_claim_host(func);
- r = sdio_set_block_size(func, blockSize);
- _sdio_release_host(func);
-
- /*
- * The MMC driver for kernels prior to 2.6.32 may fail this request
- * with -ERANGE. In this case use our workaround.
- */
- if (r == -ERANGE) {
- _sdio_claim_host(func);
- r = sdio_set_block_size_ignore_first_error(func, blockSize);
- _sdio_release_host(func);
- }
- if (r) {
- unifi_error(NULL, "Error %d setting block size\n", r);
- }
-
- /* Determine the achieved block size to pass to the core */
- function->blockSize = func->cur_blksize;
-
- return ConvertSdioToCsrSdioResult(r);
-} /* CsrSdioBlockSizeSet() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioHardReset
- *
- * Hard Resets UniFi is possible.
- *
- * Arguments:
- * sdio SDIO context pointer
- * ---------------------------------------------------------------------------
- */
-CsrResult
-CsrSdioHardReset(CsrSdioFunction *function)
-{
- return CSR_RESULT_FAILURE;
-} /* CsrSdioHardReset() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_glue_sdio_int_handler
- *
- * Interrupt callback function for SDIO interrupts.
- * This is called in kernel context (i.e. not interrupt context).
- *
- * Arguments:
- * func SDIO context pointer
- *
- * Returns:
- * None.
- *
- * Note: Called with host already claimed.
- * ---------------------------------------------------------------------------
- */
-static void
-uf_glue_sdio_int_handler(struct sdio_func *func)
-{
- CsrSdioFunction *sdio_ctx;
- CsrSdioInterruptDsrCallback func_dsr_callback;
- int r;
-
- sdio_ctx = sdio_get_drvdata(func);
- if (!sdio_ctx) {
- return;
- }
-
-#ifndef CSR_CONFIG_MMC_INT_BYPASS_KSOFTIRQD
- /*
- * Normally, we are not allowed to do any SDIO commands here.
- * However, this is called in a thread context and with the SDIO lock
- * so we disable the interrupts here instead of trying to do complicated
- * things with the SDIO lock.
- */
-#ifdef MMC_QUIRK_LENIENT_FN0
- sdio_f0_writeb(func, 0, SDIO_CCCR_IENx, &r);
-#else
- r = csr_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, 0x00, NULL);
-#endif
- if (r) {
- printk(KERN_ERR "UniFi MMC Int handler: Failed to disable interrupts %d\n", r);
- }
-#endif
-
- /* If the function driver has registered a handler, call it */
- if (sdio_func_drv && sdio_func_drv->intr) {
-
- func_dsr_callback = sdio_func_drv->intr(sdio_ctx);
-
- /* If interrupt handle returns a DSR handle, call it */
- if (func_dsr_callback) {
- func_dsr_callback(sdio_ctx);
- }
- }
-
-} /* uf_glue_sdio_int_handler() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * csr_sdio_linux_remove_irq
- *
- * Unregister the interrupt handler.
- * This means that the linux layer can not process interrupts any more.
- *
- * Arguments:
- * sdio SDIO context pointer
- *
- * Returns:
- * Status of the removal.
- * ---------------------------------------------------------------------------
- */
-int csr_sdio_linux_remove_irq(CsrSdioFunction *function)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int r;
-
- unifi_trace(NULL, UDBG1, "csr_sdio_linux_remove_irq\n");
-
- sdio_claim_host(func);
- r = sdio_release_irq(func);
- sdio_release_host(func);
-
- return r;
-
-} /* csr_sdio_linux_remove_irq() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * csr_sdio_linux_install_irq
- *
- * Register the interrupt handler.
- * This means that the linux layer can process interrupts.
- *
- * Arguments:
- * sdio SDIO context pointer
- *
- * Returns:
- * Status of the removal.
- * ---------------------------------------------------------------------------
- */
-int csr_sdio_linux_install_irq(CsrSdioFunction *function)
-{
- struct sdio_func *func = (struct sdio_func *)function->priv;
- int r;
-
- unifi_trace(NULL, UDBG1, "csr_sdio_linux_install_irq\n");
-
- /* Register our interrupt handle */
- sdio_claim_host(func);
- r = sdio_claim_irq(func, uf_glue_sdio_int_handler);
- sdio_release_host(func);
-
- /* If the interrupt was installed earlier, is fine */
- if (r == -EBUSY)
- r = 0;
-
- return r;
-} /* csr_sdio_linux_install_irq() */
-
-#ifdef CONFIG_PM
-
-/*
- * Power Management notifier
- */
-struct uf_sdio_mmc_pm_notifier
-{
- struct list_head list;
-
- CsrSdioFunction *sdio_ctx;
- struct notifier_block pm_notifier;
-};
-
-/* PM notifier list head */
-static struct uf_sdio_mmc_pm_notifier uf_sdio_mmc_pm_notifiers = {
- .sdio_ctx = NULL,
-};
-
-/*
- * ---------------------------------------------------------------------------
- * uf_sdio_mmc_register_pm_notifier
- * uf_sdio_mmc_unregister_pm_notifier
- *
- * Register/unregister for power management events. A list is used to
- * allow multiple card instances to be supported.
- *
- * Arguments:
- * sdio_ctx - CSR SDIO context to associate PM notifier to
- *
- * Returns:
- * Register function returns NULL on error
- * ---------------------------------------------------------------------------
- */
-static struct uf_sdio_mmc_pm_notifier *
-uf_sdio_mmc_register_pm_notifier(CsrSdioFunction *sdio_ctx)
-{
- /* Allocate notifier context for this card instance */
- struct uf_sdio_mmc_pm_notifier *notifier_ctx = kmalloc(sizeof(struct uf_sdio_mmc_pm_notifier), GFP_KERNEL);
-
- if (notifier_ctx)
- {
- notifier_ctx->sdio_ctx = sdio_ctx;
- notifier_ctx->pm_notifier.notifier_call = uf_sdio_mmc_power_event;
-
- list_add(&notifier_ctx->list, &uf_sdio_mmc_pm_notifiers.list);
-
- if (register_pm_notifier(&notifier_ctx->pm_notifier)) {
- printk(KERN_ERR "unifi: register_pm_notifier failed\n");
- }
- }
-
- return notifier_ctx;
-}
-
-static void
-uf_sdio_mmc_unregister_pm_notifier(CsrSdioFunction *sdio_ctx)
-{
- struct uf_sdio_mmc_pm_notifier *notifier_ctx;
- struct list_head *node, *q;
-
- list_for_each_safe(node, q, &uf_sdio_mmc_pm_notifiers.list) {
- notifier_ctx = list_entry(node, struct uf_sdio_mmc_pm_notifier, list);
-
- /* If it matches, unregister and free the notifier context */
- if (notifier_ctx && notifier_ctx->sdio_ctx == sdio_ctx)
- {
- if (unregister_pm_notifier(&notifier_ctx->pm_notifier)) {
- printk(KERN_ERR "unifi: unregister_pm_notifier failed\n");
- }
-
- /* Remove from list */
- notifier_ctx->sdio_ctx = NULL;
- list_del(node);
- kfree(notifier_ctx);
- }
- }
-}
-
-/*
- * ---------------------------------------------------------------------------
- * uf_sdio_mmc_power_event
- *
- * Handler for power management events.
- *
- * We need to handle suspend/resume events while the userspace is unsuspended
- * to allow the SME to run its suspend/resume state machines.
- *
- * Arguments:
- * event event ID
- *
- * Returns:
- * Status of the event handling
- * ---------------------------------------------------------------------------
- */
-static int
-uf_sdio_mmc_power_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
- struct uf_sdio_mmc_pm_notifier *notifier_ctx = container_of(this,
- struct uf_sdio_mmc_pm_notifier,
- pm_notifier);
-
- /* Call the CSR SDIO function driver's suspend/resume method
- * while the userspace is unsuspended.
- */
- switch (event) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- printk(KERN_INFO "%s:%d resume\n", __FUNCTION__, __LINE__ );
- if (sdio_func_drv && sdio_func_drv->resume) {
- sdio_func_drv->resume(notifier_ctx->sdio_ctx);
- }
- break;
-
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- printk(KERN_INFO "%s:%d suspend\n", __FUNCTION__, __LINE__ );
- if (sdio_func_drv && sdio_func_drv->suspend) {
- sdio_func_drv->suspend(notifier_ctx->sdio_ctx);
- }
- break;
- }
- return NOTIFY_DONE;
-}
-
-#endif /* CONFIG_PM */
-
-/*
- * ---------------------------------------------------------------------------
- * uf_glue_sdio_probe
- *
- * Card insert callback.
- *
- * Arguments:
- * func Our (glue layer) context pointer.
- *
- * Returns:
- * UniFi driver error code.
- * ---------------------------------------------------------------------------
- */
-static int
-uf_glue_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int instance;
- CsrSdioFunction *sdio_ctx;
-
- /* First of all claim the SDIO driver */
- sdio_claim_host(func);
-
- /* Assume that the card is already powered */
- card_is_powered = 1;
-
- /* Assumes one card per host, which is true for SDIO */
- instance = func->card->host->index;
- printk("sdio bus_id: %16s - UniFi card 0x%X inserted\n",
- sdio_func_id(func), instance);
-
- /* Allocate context */
- sdio_ctx = kmalloc(sizeof(CsrSdioFunction), GFP_KERNEL);
- if (sdio_ctx == NULL) {
- sdio_release_host(func);
- return -ENOMEM;
- }
-
- /* Initialise the context */
- sdio_ctx->sdioId.manfId = func->vendor;
- sdio_ctx->sdioId.cardId = func->device;
- sdio_ctx->sdioId.sdioFunction = func->num;
- sdio_ctx->sdioId.sdioInterface = func->class;
- sdio_ctx->blockSize = func->cur_blksize;
- sdio_ctx->priv = (void *)func;
- sdio_ctx->features = 0;
-
- /* Module parameter enables byte mode */
- if (sdio_byte_mode) {
- sdio_ctx->features |= CSR_SDIO_FEATURE_BYTE_MODE;
- }
-
- if (func->card->host->caps & MMC_CAP_SD_HIGHSPEED) {
- unifi_trace(NULL, UDBG1, "MMC_CAP_SD_HIGHSPEED is available\n");
- }
-
-#ifdef MMC_QUIRK_LENIENT_FN0
- func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
-#endif
-
- /* Pass context to the SDIO driver */
- sdio_set_drvdata(func, sdio_ctx);
-
-#ifdef CONFIG_PM
- /* Register to get PM events */
- if (uf_sdio_mmc_register_pm_notifier(sdio_ctx) == NULL) {
- unifi_error(NULL, "%s: Failed to register for PM events\n", __FUNCTION__);
- }
-#endif
-
- /* Register this device with the SDIO function driver */
- /* Call the main UniFi driver inserted handler */
- if (sdio_func_drv && sdio_func_drv->inserted) {
- uf_add_os_device(instance, &func->dev);
- sdio_func_drv->inserted(sdio_ctx);
- }
-
- /* We have finished, so release the SDIO driver */
- sdio_release_host(func);
-
-#ifdef ANDROID_BUILD
- /* Take the wakelock */
- unifi_trace(NULL, UDBG1, "probe: take wake lock\n");
- wake_lock(&unifi_sdio_wake_lock);
-#endif
-
- return 0;
-} /* uf_glue_sdio_probe() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_glue_sdio_remove
- *
- * Card removal callback.
- *
- * Arguments:
- * func Our (glue layer) context pointer.
- *
- * Returns:
- * UniFi driver error code.
- * ---------------------------------------------------------------------------
- */
-static void
-uf_glue_sdio_remove(struct sdio_func *func)
-{
- CsrSdioFunction *sdio_ctx;
-
- sdio_ctx = sdio_get_drvdata(func);
- if (!sdio_ctx) {
- return;
- }
-
- unifi_info(NULL, "UniFi card removed\n");
-
- /* Clean up the SDIO function driver */
- if (sdio_func_drv && sdio_func_drv->removed) {
- uf_remove_os_device(func->card->host->index);
- sdio_func_drv->removed(sdio_ctx);
- }
-
-#ifdef CONFIG_PM
- /* Unregister for PM events */
- uf_sdio_mmc_unregister_pm_notifier(sdio_ctx);
-#endif
-
- kfree(sdio_ctx);
-
-} /* uf_glue_sdio_remove */
-
-
-/*
- * SDIO ids *must* be statically declared, so we can't take
- * them from the list passed in csr_sdio_register_driver().
- */
-static const struct sdio_device_id unifi_ids[] = {
- { SDIO_DEVICE(SDIO_MANF_ID_CSR,SDIO_CARD_ID_UNIFI_3) },
- { SDIO_DEVICE(SDIO_MANF_ID_CSR,SDIO_CARD_ID_UNIFI_4) },
- { /* end: all zeroes */ },
-};
-
-MODULE_DEVICE_TABLE(sdio, unifi_ids);
-
-#ifdef CONFIG_PM
-
-/*
- * ---------------------------------------------------------------------------
- * uf_glue_sdio_suspend
- *
- * Card suspend callback. The userspace will already be suspended.
- *
- * Arguments:
- * dev The struct device owned by the MMC driver
- *
- * Returns:
- * None
- * ---------------------------------------------------------------------------
- */
-static int
-uf_glue_sdio_suspend(struct device *dev)
-{
- unifi_trace(NULL, UDBG1, "uf_glue_sdio_suspend");
-
- return 0;
-} /* uf_glue_sdio_suspend */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_glue_sdio_resume
- *
- * Card resume callback. The userspace will still be suspended.
- *
- * Arguments:
- * dev The struct device owned by the MMC driver
- *
- * Returns:
- * None
- * ---------------------------------------------------------------------------
- */
-static int
-uf_glue_sdio_resume(struct device *dev)
-{
- unifi_trace(NULL, UDBG1, "uf_glue_sdio_resume");
-
-#ifdef ANDROID_BUILD
- unifi_trace(NULL, UDBG1, "resume: take wakelock\n");
- wake_lock(&unifi_sdio_wake_lock);
-#endif
-
- return 0;
-
-} /* uf_glue_sdio_resume */
-
-static struct dev_pm_ops unifi_pm_ops = {
- .suspend = uf_glue_sdio_suspend,
- .resume = uf_glue_sdio_resume,
-};
-
-#define UNIFI_PM_OPS (&unifi_pm_ops)
-
-#else
-
-#define UNIFI_PM_OPS NULL
-
-#endif /* CONFIG_PM */
-
-static struct sdio_driver unifi_driver = {
- .probe = uf_glue_sdio_probe,
- .remove = uf_glue_sdio_remove,
- .name = "unifi",
- .id_table = unifi_ids,
- .drv.pm = UNIFI_PM_OPS,
-};
-
-
-/*
- * ---------------------------------------------------------------------------
- * CsrSdioFunctionDriverRegister
- * CsrSdioFunctionDriverUnregister
- *
- * These functions are called from the main module load and unload
- * functions. They perform the appropriate operations for the
- * linux MMC/SDIO driver.
- *
- * Arguments:
- * sdio_drv Pointer to the function driver's SDIO structure.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-CsrResult
-CsrSdioFunctionDriverRegister(CsrSdioFunctionDriver *sdio_drv)
-{
- int r;
-
- printk("UniFi: Using native Linux MMC driver for SDIO.\n");
-
- if (sdio_func_drv) {
- unifi_error(NULL, "sdio_mmc: UniFi driver already registered\n");
- return CSR_SDIO_RESULT_INVALID_VALUE;
- }
-
-#ifdef ANDROID_BUILD
- wake_lock_init(&unifi_sdio_wake_lock, WAKE_LOCK_SUSPEND, "unifi_sdio_work");
-#endif
-
- /* Save the registered driver description */
- /*
- * FIXME:
- * Need a table here to handle a call to register for just one function.
- * mmc only allows us to register for the whole device
- */
- sdio_func_drv = sdio_drv;
-
-#ifdef CONFIG_PM
- /* Initialise PM notifier list */
- INIT_LIST_HEAD(&uf_sdio_mmc_pm_notifiers.list);
-#endif
-
- /* Register ourself with mmc_core */
- r = sdio_register_driver(&unifi_driver);
- if (r) {
- printk(KERN_ERR "unifi_sdio: Failed to register UniFi SDIO driver: %d\n", r);
- return ConvertSdioToCsrSdioResult(r);
- }
-
- return CSR_RESULT_SUCCESS;
-} /* CsrSdioFunctionDriverRegister() */
-
-
-
-void
-CsrSdioFunctionDriverUnregister(CsrSdioFunctionDriver *sdio_drv)
-{
- printk(KERN_INFO "UniFi: unregister from MMC sdio\n");
-
-#ifdef ANDROID_BUILD
- wake_lock_destroy(&unifi_sdio_wake_lock);
-#endif
- sdio_unregister_driver(&unifi_driver);
-
- sdio_func_drv = NULL;
-
-} /* CsrSdioFunctionDriverUnregister() */
-
diff --git a/drivers/staging/csr/sdio_stubs.c b/drivers/staging/csr/sdio_stubs.c
deleted file mode 100644
index 839fae0..0000000
--- a/drivers/staging/csr/sdio_stubs.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Stubs for some of the bottom edge functions.
- *
- * These stubs are optional functions in the bottom edge (SDIO driver
- * interface) API that not all platforms or SDIO drivers may support.
- *
- * They're declared as weak symbols so they can be overridden by
- * simply providing a non-weak declaration.
- *
- * Copyright (C) 2007-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- */
-#include "csr_wifi_hip_unifi.h"
-
-void __attribute__((weak)) CsrSdioFunctionIdle(CsrSdioFunction *function)
-{
-}
-
-void __attribute__((weak)) CsrSdioFunctionActive(CsrSdioFunction *function)
-{
-}
-
-CsrResult __attribute__((weak)) CsrSdioPowerOn(CsrSdioFunction *function)
-{
- return CSR_RESULT_SUCCESS;
-}
-
-void __attribute__((weak)) CsrSdioPowerOff(CsrSdioFunction *function)
-{
-}
-
-CsrResult __attribute__((weak)) CsrSdioHardReset(CsrSdioFunction *function)
-{
- return CSR_SDIO_RESULT_NOT_RESET;
-}
-
-CsrResult __attribute__((weak)) CsrSdioBlockSizeSet(CsrSdioFunction *function,
- u16 blockSize)
-{
- return CSR_RESULT_SUCCESS;
-}
-
-CsrResult __attribute__((weak)) CsrSdioSuspend(CsrSdioFunction *function)
-{
- return CSR_RESULT_SUCCESS;
-}
-
-CsrResult __attribute__((weak)) CsrSdioResume(CsrSdioFunction *function)
-{
- return CSR_RESULT_SUCCESS;
-}
-
-int __attribute__((weak)) csr_sdio_linux_install_irq(CsrSdioFunction *function)
-{
- return 0;
-}
-
-int __attribute__((weak)) csr_sdio_linux_remove_irq(CsrSdioFunction *function)
-{
- return 0;
-}
-
-void __attribute__((weak)) CsrSdioInsertedAcknowledge(CsrSdioFunction *function, CsrResult result)
-{
-}
-
-void __attribute__((weak)) CsrSdioRemovedAcknowledge(CsrSdioFunction *function)
-{
-}
-
-void __attribute__((weak)) CsrSdioSuspendAcknowledge(CsrSdioFunction *function, CsrResult result)
-{
-}
-
-void __attribute__((weak)) CsrSdioResumeAcknowledge(CsrSdioFunction *function, CsrResult result)
-{
-}
-
-
diff --git a/drivers/staging/csr/sme_blocking.c b/drivers/staging/csr/sme_blocking.c
deleted file mode 100644
index d88ccd5..0000000
--- a/drivers/staging/csr/sme_blocking.c
+++ /dev/null
@@ -1,1466 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: sme_mgt_blocking.c
- *
- * PURPOSE:
- * This file contains the driver specific implementation of
- * the WEXT <==> SME MGT interface for all SME builds that support WEXT.
- *
- * Copyright (C) 2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-
-#include "unifi_priv.h"
-
-
-/*
- * This file also contains the implementation of the asynchronous
- * requests to the SME.
- *
- * Before calling an asynchronous SME function, we call sme_init_request()
- * which gets hold of the SME semaphore and updates the request status.
- * The semaphore makes sure that there is only one pending request to
- * the SME at a time.
- *
- * Now we are ready to call the SME function, but only if
- * sme_init_request() has returned 0.
- *
- * When the SME function returns, we need to wait
- * for the reply. This is done in sme_wait_for_reply().
- * If the request times-out, the request status is set to SME_REQUEST_TIMEDOUT
- * and the sme_wait_for_reply() returns.
- *
- * If the SME replies in time, we call sme_complete_request().
- * There we change the request status to SME_REQUEST_RECEIVED. This will
- * wake up the process waiting on sme_wait_for_reply().
- * It is important that we copy the reply data in priv->sme_reply
- * before calling sme_complete_request().
- *
- * Handling the wext requests, we need to block
- * until the SME sends the response to our request.
- * We use the sme_init_request() and sme_wait_for_reply()
- * to implement this behavior in the following functions:
- * sme_mgt_wifi_on()
- * sme_mgt_wifi_off()
- * sme_mgt_scan_full()
- * sme_mgt_scan_results_get_async()
- * sme_mgt_connect()
- * unifi_mgt_media_status_ind()
- * sme_mgt_disconnect()
- * sme_mgt_pmkid()
- * sme_mgt_key()
- * sme_mgt_mib_get()
- * sme_mgt_mib_set()
- * sme_mgt_versions_get()
- * sme_mgt_set_value()
- * sme_mgt_get_value()
- * sme_mgt_set_value_async()
- * sme_mgt_get_value_async()
- * sme_mgt_packet_filter_set()
- * sme_mgt_tspec()
- */
-
-
-/*
- * Handling the suspend and resume system events, we need to block
- * until the SME sends the response to our indication.
- * We use the sme_init_request() and sme_wait_for_reply()
- * to implement this behavior in the following functions:
- * sme_sys_suspend()
- * sme_sys_resume()
- */
-
-#define UNIFI_SME_MGT_SHORT_TIMEOUT 10000
-#define UNIFI_SME_MGT_LONG_TIMEOUT 19000
-#define UNIFI_SME_SYS_LONG_TIMEOUT 10000
-
-#ifdef UNIFI_DEBUG
-# define sme_wait_for_reply(priv, t) _sme_wait_for_reply(priv, t, __func__)
-#else
-# define sme_wait_for_reply(priv, t) _sme_wait_for_reply(priv, t, NULL)
-#endif
-
-static int
-sme_init_request(unifi_priv_t *priv)
-{
- if (priv == NULL) {
- unifi_error(priv, "sme_init_request: Invalid priv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG5, "sme_init_request: wait sem\n");
-
- /* Grab the SME semaphore until the reply comes, or timeout */
- if (down_interruptible(&priv->sme_sem)) {
- unifi_error(priv, "sme_init_request: Failed to get SME semaphore\n");
- return -EIO;
- }
- unifi_trace(priv, UDBG5, "sme_init_request: got sem: pending\n");
-
- priv->sme_reply.request_status = SME_REQUEST_PENDING;
-
- return 0;
-
-} /* sme_init_request() */
-
-
-void
-uf_sme_complete_request(unifi_priv_t *priv, CsrResult reply_status, const char *func)
-{
- if (priv == NULL) {
- unifi_error(priv, "sme_complete_request: Invalid priv\n");
- return;
- }
-
- if (priv->sme_reply.request_status != SME_REQUEST_PENDING) {
- unifi_notice(priv,
- "sme_complete_request: request not pending %s (s:%d)\n",
- (func ? func : ""), priv->sme_reply.request_status);
- return;
- }
- unifi_trace(priv, UDBG5,
- "sme_complete_request: completed %s (s:%d)\n",
- (func ? func : ""), priv->sme_reply.request_status);
-
- priv->sme_reply.request_status = SME_REQUEST_RECEIVED;
- priv->sme_reply.reply_status = reply_status;
-
- wake_up_interruptible(&priv->sme_request_wq);
-
- return;
-}
-
-
-void
-uf_sme_cancel_request(unifi_priv_t *priv, CsrResult reply_status)
-{
- /* Check for a blocking SME request in progress, and cancel the wait.
- * This should be used when the character device is closed.
- */
-
- if (priv == NULL) {
- unifi_error(priv, "sme_cancel_request: Invalid priv\n");
- return;
- }
-
- /* If no request is pending, nothing to wake up */
- if (priv->sme_reply.request_status != SME_REQUEST_PENDING) {
- unifi_trace(priv, UDBG5,
- "sme_cancel_request: no request was pending (s:%d)\n",
- priv->sme_reply.request_status);
- /* Nothing to do */
- return;
- }
- unifi_trace(priv, UDBG5,
- "sme_cancel_request: request cancelled (s:%d)\n",
- priv->sme_reply.request_status);
-
- /* Wake up the wait with an error status */
- priv->sme_reply.request_status = SME_REQUEST_CANCELLED;
- priv->sme_reply.reply_status = reply_status; /* unimportant since the CANCELLED state will fail the ioctl */
-
- wake_up_interruptible(&priv->sme_request_wq);
-
- return;
-}
-
-
-static int
-_sme_wait_for_reply(unifi_priv_t *priv,
- unsigned long timeout, const char *func)
-{
- long r;
-
- unifi_trace(priv, UDBG5, "sme_wait_for_reply: %s sleep\n", func ? func : "");
- r = wait_event_interruptible_timeout(priv->sme_request_wq,
- (priv->sme_reply.request_status != SME_REQUEST_PENDING),
- msecs_to_jiffies(timeout));
- unifi_trace(priv, UDBG5, "sme_wait_for_reply: %s awake (%d)\n", func ? func : "", r);
-
- if (r == -ERESTARTSYS) {
- /* The thread was killed */
- unifi_info(priv, "ERESTARTSYS in _sme_wait_for_reply\n");
- up(&priv->sme_sem);
- return r;
- }
- if (priv->sme_reply.request_status == SME_REQUEST_CANCELLED) {
- unifi_trace(priv, UDBG5, "Cancelled waiting for SME to reply (%s s:%d, t:%d, r:%d)\n",
- (func ? func : ""), priv->sme_reply.request_status, timeout, r);
-
- /* Release the SME semaphore that was downed in sme_init_request() */
- up(&priv->sme_sem);
- return -EIO; /* fail the ioctl */
- }
- if ((r == 0) && (priv->sme_reply.request_status != SME_REQUEST_RECEIVED)) {
- unifi_notice(priv, "Timeout waiting for SME to reply (%s s:%d, t:%d)\n",
- (func ? func : ""), priv->sme_reply.request_status, timeout);
-
- priv->sme_reply.request_status = SME_REQUEST_TIMEDOUT;
-
- /* Release the SME semaphore that was downed in sme_init_request() */
- up(&priv->sme_sem);
-
- return -ETIMEDOUT;
- }
-
- unifi_trace(priv, UDBG5, "sme_wait_for_reply: %s received (%d)\n",
- func ? func : "", r);
-
- /* Release the SME semaphore that was downed in sme_init_request() */
- up(&priv->sme_sem);
-
- return 0;
-} /* sme_wait_for_reply() */
-
-
-
-
-#ifdef CSR_SUPPORT_WEXT
-int sme_mgt_wifi_on(unifi_priv_t *priv)
-{
- u16 numElements;
- CsrWifiSmeDataBlock* dataList;
-#ifdef CSR_SUPPORT_WEXT_AP
- int r;
-#endif
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_wifi_on: invalid smepriv\n");
- return -EIO;
- }
-
- if (priv->mib_data.length) {
- numElements = 1;
- dataList = &priv->mib_data;
- } else {
- numElements = 0;
- dataList = NULL;
- }
- /* Start the SME */
-#ifdef CSR_SUPPORT_WEXT_AP
- r = sme_init_request(priv);
- if (r) {
- return -EIO;
- }
-#endif
- CsrWifiSmeWifiOnReqSend(0, priv->sta_mac_address, numElements, dataList);
-#ifdef CSR_SUPPORT_WEXT_AP
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_LONG_TIMEOUT);
- unifi_trace(priv, UDBG4,
- "sme_mgt_wifi_on: unifi_mgt_wifi_oo_req <-- (r=%d, status=%d)\n",
- r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- return 0;
-#endif
-} /* sme_mgt_wifi_on() */
-
-
-int sme_mgt_wifi_off(unifi_priv_t *priv)
-{
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_wifi_off: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- /* Stop the SME */
- CsrWifiSmeWifiOffReqSend(0);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_LONG_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_wifi_off: unifi_mgt_wifi_off_req <-- (r=%d, status=%d)\n",
- r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-
-} /* sme_mgt_wifi_off */
-
-int sme_mgt_key(unifi_priv_t *priv, CsrWifiSmeKey *sme_key,
- CsrWifiSmeListAction action)
-{
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_key: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeKeyReqSend(0, CSR_WIFI_INTERFACE_IN_USE, action, *sme_key);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-
-int sme_mgt_scan_full(unifi_priv_t *priv,
- CsrWifiSsid *specific_ssid,
- int num_channels,
- unsigned char *channel_list)
-{
- CsrWifiMacAddress bcastAddress = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }};
- u8 is_active = (num_channels > 0) ? TRUE : FALSE;
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_scan_full: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_scan_full: -->\n");
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- /* If a channel list is provided, do an active scan */
- if (is_active) {
- unifi_trace(priv, UDBG1,
- "channel list - num_channels: %d, active scan\n",
- num_channels);
- }
-
- CsrWifiSmeScanFullReqSend(0,
- specific_ssid->length?1:0, /* 0 or 1 SSIDS */
- specific_ssid,
- bcastAddress,
- is_active,
- CSR_WIFI_SME_BSS_TYPE_ANY_BSS,
- CSR_WIFI_SME_SCAN_TYPE_ALL,
- (u16)num_channels, channel_list,
- 0, NULL);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_LONG_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4, "sme_mgt_scan_full: <-- (status=%d)\n", priv->sme_reply.reply_status);
- if (priv->sme_reply.reply_status == CSR_WIFI_RESULT_UNAVAILABLE)
- return 0; /* initial scan already underway */
- else
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-
-int sme_mgt_scan_results_get_async(unifi_priv_t *priv,
- struct iw_request_info *info,
- char *scan_results,
- long scan_results_len)
-{
- u16 scan_result_list_count;
- CsrWifiSmeScanResult *scan_result_list;
- CsrWifiSmeScanResult *scan_result;
- int r;
- int i;
- char *current_ev = scan_results;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_scan_results_get_async: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeScanResultsGetReqSend(0);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_LONG_TIMEOUT);
- if (r)
- return r;
-
- scan_result_list_count = priv->sme_reply.reply_scan_results_count;
- scan_result_list = priv->sme_reply.reply_scan_results;
- unifi_trace(priv, UDBG2,
- "scan_results: Scan returned %d, numElements=%d\n",
- r, scan_result_list_count);
-
- /* OK, now we have the scan results */
- for (i = 0; i < scan_result_list_count; ++i) {
- scan_result = &scan_result_list[i];
-
- unifi_trace(priv, UDBG2, "Scan Result: %.*s\n",
- scan_result->ssid.length,
- scan_result->ssid.ssid);
-
- r = unifi_translate_scan(priv->netdev[0], info,
- current_ev,
- scan_results + scan_results_len,
- scan_result, i+1);
-
- if (r < 0) {
- kfree(scan_result_list);
- priv->sme_reply.reply_scan_results_count = 0;
- priv->sme_reply.reply_scan_results = NULL;
- return r;
- }
-
- current_ev += r;
- }
-
- /*
- * Free the scan results allocated in unifi_mgt_scan_results_get_cfm()
- * and invalidate the reply_scan_results to avoid re-using
- * the freed pointers.
- */
- kfree(scan_result_list);
- priv->sme_reply.reply_scan_results_count = 0;
- priv->sme_reply.reply_scan_results = NULL;
-
- unifi_trace(priv, UDBG2,
- "scan_results: Scan translated to %d bytes\n",
- current_ev - scan_results);
- return (current_ev - scan_results);
-}
-
-
-int sme_mgt_connect(unifi_priv_t *priv)
-{
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_connect: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG2, "sme_mgt_connect: %.*s\n",
- priv->connection_config.ssid.length,
- priv->connection_config.ssid.ssid);
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeConnectReqSend(0, CSR_WIFI_INTERFACE_IN_USE, priv->connection_config);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- if (priv->sme_reply.reply_status)
- unifi_trace(priv, UDBG1, "sme_mgt_connect: failed with SME status %d\n",
- priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-
-int sme_mgt_disconnect(unifi_priv_t *priv)
-{
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_disconnect: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeDisconnectReqSend(0, CSR_WIFI_INTERFACE_IN_USE);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4, "sme_mgt_disconnect: <-- (status=%d)\n", priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-
-int sme_mgt_pmkid(unifi_priv_t *priv,
- CsrWifiSmeListAction action,
- CsrWifiSmePmkidList *pmkid_list)
-{
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_pmkid: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmePmkidReqSend(0, CSR_WIFI_INTERFACE_IN_USE, action,
- pmkid_list->pmkidsCount, pmkid_list->pmkids);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4, "sme_mgt_pmkid: <-- (status=%d)\n", priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-
-int sme_mgt_mib_get(unifi_priv_t *priv,
- unsigned char *varbind, int *length)
-{
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_mib_get: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- priv->mib_cfm_buffer = varbind;
- priv->mib_cfm_buffer_length = MAX_VARBIND_LENGTH;
-
- CsrWifiSmeMibGetReqSend(0, *length, varbind);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r) {
- priv->mib_cfm_buffer_length = 0;
- priv->mib_cfm_buffer = NULL;
- return r;
- }
-
- *length = priv->mib_cfm_buffer_length;
-
- priv->mib_cfm_buffer_length = 0;
- priv->mib_cfm_buffer = NULL;
- unifi_trace(priv, UDBG4, "sme_mgt_mib_get: <-- (status=%d)\n", priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-int sme_mgt_mib_set(unifi_priv_t *priv,
- unsigned char *varbind, int length)
-{
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_mib_get: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeMibSetReqSend(0, length, varbind);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4, "sme_mgt_mib_set: <-- (status=%d)\n", priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-#endif /* CSR_SUPPORT_WEXT */
-
-int sme_mgt_power_config_set(unifi_priv_t *priv, CsrWifiSmePowerConfig *powerConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_set_value_async: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmePowerConfigSetReqSend(0, *powerConfig);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_set_value_async: unifi_mgt_set_value_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_set_value: invalid smepriv\n");
- return -EIO;
- }
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtPowerConfigSetReq(priv->smepriv, *powerConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-int sme_mgt_sme_config_set(unifi_priv_t *priv, CsrWifiSmeStaConfig *staConfig, CsrWifiSmeDeviceConfig *deviceConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_sme_config_set: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeSmeStaConfigSetReqSend(0, CSR_WIFI_INTERFACE_IN_USE, *staConfig);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_sme_config_set: CsrWifiSmeSmeStaConfigSetReq <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeSmeCommonConfigSetReqSend(0, *deviceConfig);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_sme_config_set: CsrWifiSmeSmeCommonConfigSetReq <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_sme_config_set: invalid smepriv\n");
- return -EIO;
- }
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtSmeConfigSetReq(priv->smepriv, *staConfig);
- status = CsrWifiSmeMgtDeviceConfigSetReq(priv->smepriv, *deviceConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-#ifdef CSR_SUPPORT_WEXT
-
-int sme_mgt_mib_config_set(unifi_priv_t *priv, CsrWifiSmeMibConfig *mibConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_mib_config_set: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeMibConfigSetReqSend(0, *mibConfig);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_mib_config_set: unifi_mgt_set_mib_config_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_mib_config_set: invalid smepriv\n");
- return -EIO;
- }
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtMibConfigSetReq(priv->smepriv, *mibConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-int sme_mgt_coex_config_set(unifi_priv_t *priv, CsrWifiSmeCoexConfig *coexConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_coex_config_set: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeCoexConfigSetReqSend(0, *coexConfig);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_coex_config_set: unifi_mgt_set_mib_config_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_coex_config_set: invalid smepriv\n");
- return -EIO;
- }
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtCoexConfigSetReq(priv->smepriv, *coexConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-#endif /* CSR_SUPPORT_WEXT */
-
-int sme_mgt_host_config_set(unifi_priv_t *priv, CsrWifiSmeHostConfig *hostConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_host_config_set: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeHostConfigSetReqSend(0, CSR_WIFI_INTERFACE_IN_USE, *hostConfig);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_host_config_set: unifi_mgt_set_host_config_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_host_config_set: invalid smepriv\n");
- return -EIO;
- }
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtHostConfigSetReq(priv->smepriv, *hostConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-#ifdef CSR_SUPPORT_WEXT
-
-int sme_mgt_versions_get(unifi_priv_t *priv, CsrWifiSmeVersions *versions)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_versions_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_versions_get: unifi_mgt_versions_get_req -->\n");
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeVersionsGetReqSend(0);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (versions != NULL) {
- memcpy((unsigned char*)versions,
- (unsigned char*)&priv->sme_reply.versions,
- sizeof(CsrWifiSmeVersions));
- }
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_versions_get: unifi_mgt_versions_get_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtVersionsGetReq(priv->smepriv, versions);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-#endif /* CSR_SUPPORT_WEXT */
-
-int sme_mgt_power_config_get(unifi_priv_t *priv, CsrWifiSmePowerConfig *powerConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_power_config_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_power_config_get: unifi_mgt_power_config_req -->\n");
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmePowerConfigGetReqSend(0);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (powerConfig != NULL) {
- memcpy((unsigned char*)powerConfig,
- (unsigned char*)&priv->sme_reply.powerConfig,
- sizeof(CsrWifiSmePowerConfig));
- }
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_get_versions: unifi_mgt_power_config_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtPowerConfigGetReq(priv->smepriv, powerConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-int sme_mgt_host_config_get(unifi_priv_t *priv, CsrWifiSmeHostConfig *hostConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_host_config_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_host_config_get: unifi_mgt_host_config_get_req -->\n");
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeHostConfigGetReqSend(0, CSR_WIFI_INTERFACE_IN_USE);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (hostConfig != NULL)
- memcpy((unsigned char*)hostConfig,
- (unsigned char*)&priv->sme_reply.hostConfig,
- sizeof(CsrWifiSmeHostConfig));
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_host_config_get: unifi_mgt_host_config_get_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtHostConfigGetReq(priv->smepriv, hostConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-int sme_mgt_sme_config_get(unifi_priv_t *priv, CsrWifiSmeStaConfig *staConfig, CsrWifiSmeDeviceConfig *deviceConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_sme_config_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_sme_config_get: unifi_mgt_sme_config_get_req -->\n");
-
- /* Common device config */
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeSmeCommonConfigGetReqSend(0);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (deviceConfig != NULL)
- memcpy((unsigned char*)deviceConfig,
- (unsigned char*)&priv->sme_reply.deviceConfig,
- sizeof(CsrWifiSmeDeviceConfig));
-
- /* STA config */
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeSmeStaConfigGetReqSend(0, CSR_WIFI_INTERFACE_IN_USE);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (staConfig != NULL)
- memcpy((unsigned char*)staConfig,
- (unsigned char*)&priv->sme_reply.staConfig,
- sizeof(CsrWifiSmeStaConfig));
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_sme_config_get: unifi_mgt_sme_config_get_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtSmeConfigGetReq(priv->smepriv, staConfig);
- status = CsrWifiSmeMgtDeviceConfigGetReq(priv->smepriv, deviceConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-int sme_mgt_coex_info_get(unifi_priv_t *priv, CsrWifiSmeCoexInfo *coexInfo)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_coex_info_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_coex_info_get: unifi_mgt_coex_info_get_req -->\n");
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeCoexInfoGetReqSend(0);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (coexInfo != NULL)
- memcpy((unsigned char*)coexInfo,
- (unsigned char*)&priv->sme_reply.coexInfo,
- sizeof(CsrWifiSmeCoexInfo));
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_coex_info_get: unifi_mgt_coex_info_get_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtCoexInfoGetReq(priv->smepriv, coexInfo);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-#ifdef CSR_SUPPORT_WEXT
-
-int sme_mgt_coex_config_get(unifi_priv_t *priv, CsrWifiSmeCoexConfig *coexConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_coex_config_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_coex_config_get: unifi_mgt_coex_config_get_req -->\n");
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeCoexConfigGetReqSend(0);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (coexConfig != NULL)
- memcpy((unsigned char*)coexConfig,
- (unsigned char*)&priv->sme_reply.coexConfig,
- sizeof(CsrWifiSmeCoexConfig));
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_coex_config_get: unifi_mgt_coex_config_get_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtCoexConfigGetReq(priv->smepriv, coexConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-int sme_mgt_mib_config_get(unifi_priv_t *priv, CsrWifiSmeMibConfig *mibConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_mib_config_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_mib_config_get: unifi_mgt_mib_config_get_req -->\n");
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeMibConfigGetReqSend(0);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (mibConfig != NULL)
- memcpy((unsigned char*)mibConfig,
- (unsigned char*)&priv->sme_reply.mibConfig,
- sizeof(CsrWifiSmeMibConfig));
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_mib_config_get: unifi_mgt_mib_config_get_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtMibConfigGetReq(priv->smepriv, mibConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-int sme_mgt_connection_info_get(unifi_priv_t *priv, CsrWifiSmeConnectionInfo *connectionInfo)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_connection_info_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_connection_info_get: unifi_mgt_connection_info_get_req -->\n");
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeConnectionInfoGetReqSend(0, CSR_WIFI_INTERFACE_IN_USE);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (connectionInfo != NULL)
- memcpy((unsigned char*)connectionInfo,
- (unsigned char*)&priv->sme_reply.connectionInfo,
- sizeof(CsrWifiSmeConnectionInfo));
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_connection_info_get: unifi_mgt_connection_info_get_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtConnectionInfoGetReq(priv->smepriv, connectionInfo);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-int sme_mgt_connection_config_get(unifi_priv_t *priv, CsrWifiSmeConnectionConfig *connectionConfig)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_connection_config_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_connection_config_get: unifi_mgt_connection_config_get_req -->\n");
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeConnectionConfigGetReqSend(0, CSR_WIFI_INTERFACE_IN_USE);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (connectionConfig != NULL)
- memcpy((unsigned char*)connectionConfig,
- (unsigned char*)&priv->sme_reply.connectionConfig,
- sizeof(CsrWifiSmeConnectionConfig));
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_connection_config_get: unifi_mgt_connection_config_get_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtConnectionConfigGetReq(priv->smepriv, connectionConfig);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-int sme_mgt_connection_stats_get(unifi_priv_t *priv, CsrWifiSmeConnectionStats *connectionStats)
-{
-#ifdef CSR_SME_USERSPACE
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_connection_stats_get: invalid smepriv\n");
- return -EIO;
- }
-
- unifi_trace(priv, UDBG4, "sme_mgt_connection_stats_get: unifi_mgt_connection_stats_get_req -->\n");
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeConnectionStatsGetReqSend(0, CSR_WIFI_INTERFACE_IN_USE);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- /* store the reply */
- if (connectionStats != NULL)
- memcpy((unsigned char*)connectionStats,
- (unsigned char*)&priv->sme_reply.connectionStats,
- sizeof(CsrWifiSmeConnectionStats));
-
- unifi_trace(priv, UDBG4,
- "sme_mgt_connection_stats_get: unifi_mgt_connection_stats_get_req <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
-
- return convert_sme_error(priv->sme_reply.reply_status);
-#else
- CsrResult status;
- CsrWifiSmeMgtClaimSyncAccess(priv->smepriv);
- status = CsrWifiSmeMgtConnectionStatsGetReq(priv->smepriv, connectionStats);
- CsrWifiSmeMgtReleaseSyncAccess(priv->smepriv);
- return convert_sme_error(status);
-#endif
-}
-
-#endif /* CSR_SUPPORT_WEXT */
-
-int sme_mgt_packet_filter_set(unifi_priv_t *priv)
-{
- CsrWifiIp4Address ipAddress = {{0xFF, 0xFF, 0xFF, 0xFF }};
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_packet_filter_set: invalid smepriv\n");
- return -EIO;
- }
- if (priv->packet_filters.arp_filter) {
- ipAddress.a[0] = (priv->sta_ip_address ) & 0xFF;
- ipAddress.a[1] = (priv->sta_ip_address >> 8) & 0xFF;
- ipAddress.a[2] = (priv->sta_ip_address >> 16) & 0xFF;
- ipAddress.a[3] = (priv->sta_ip_address >> 24) & 0xFF;
- }
-
- unifi_trace(priv, UDBG5,
- "sme_mgt_packet_filter_set: IP address %d.%d.%d.%d\n",
- ipAddress.a[0], ipAddress.a[1],
- ipAddress.a[2], ipAddress.a[3]);
-
- /* Doesn't block for a confirm */
- CsrWifiSmePacketFilterSetReqSend(0, CSR_WIFI_INTERFACE_IN_USE,
- priv->packet_filters.tclas_ies_length,
- priv->filter_tclas_ies,
- priv->packet_filters.filter_mode,
- ipAddress);
- return 0;
-}
-
-int sme_mgt_tspec(unifi_priv_t *priv, CsrWifiSmeListAction action,
- u32 tid, CsrWifiSmeDataBlock *tspec, CsrWifiSmeDataBlock *tclas)
-{
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_mgt_tspec: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiSmeTspecReqSend(0, CSR_WIFI_INTERFACE_IN_USE,
- action, tid, TRUE, 0,
- tspec->length, tspec->data,
- tclas->length, tclas->data);
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4, "sme_mgt_tspec: <-- (status=%d)\n", priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-
-
-int sme_sys_suspend(unifi_priv_t *priv)
-{
- int r;
- CsrResult csrResult;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_sys_suspend: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- /* Suspend the SME, which MAY cause it to power down UniFi */
- CsrWifiRouterCtrlSuspendIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0, 0, priv->wol_suspend);
- r = sme_wait_for_reply(priv, UNIFI_SME_SYS_LONG_TIMEOUT);
- if (r) {
- /* No reply - forcibly power down in case the request wasn't processed */
- unifi_notice(priv,
- "suspend: SME did not reply %s, ",
- (priv->ptest_mode | priv->wol_suspend) ? "leave powered" : "power off UniFi anyway\n");
-
- /* Leave power on for production test, though */
- if (!priv->ptest_mode) {
- /* Put UniFi to deep sleep, in case we can not power it off */
- CsrSdioClaim(priv->sdio);
- unifi_trace(priv, UDBG1, "Force deep sleep");
- csrResult = unifi_force_low_power_mode(priv->card);
-
- /* For WOL, the UniFi must stay powered */
- if (!priv->wol_suspend) {
- unifi_trace(priv, UDBG1, "Power off\n");
- CsrSdioPowerOff(priv->sdio);
- }
- CsrSdioRelease(priv->sdio);
- }
- }
-
- if (priv->wol_suspend) {
- unifi_trace(priv, UDBG1, "UniFi left powered for WOL\n");
-
- /* Remove the IRQ, which also disables the card SDIO interrupt.
- * Disabling the card SDIO interrupt enables the PIO WOL source.
- * Removal of the of the handler ensures that in both SDIO and PIO cases
- * the card interrupt only wakes the host. The card will be polled
- * after resume to handle any pending data.
- */
- if (csr_sdio_linux_remove_irq(priv->sdio)) {
- unifi_notice(priv, "WOL csr_sdio_linux_remove_irq failed\n");
- }
-
- if (enable_wol == UNIFI_WOL_SDIO) {
- /* Because csr_sdio_linux_remove_irq() disabled the card SDIO interrupt,
- * it must be left enabled to wake-on-SDIO.
- */
- unifi_trace(priv, UDBG1, "Enable card SDIO interrupt for SDIO WOL\n");
-
- CsrSdioClaim(priv->sdio);
- csrResult = CsrSdioInterruptEnable(priv->sdio);
- CsrSdioRelease(priv->sdio);
-
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "WOL CsrSdioInterruptEnable failed %d\n", csrResult);
- }
- } else {
- unifi_trace(priv, UDBG1, "Disabled card SDIO interrupt for PIO WOL\n");
- }
-
- /* Prevent the BH thread from running during the suspend.
- * Upon resume, sme_sys_resume() will trigger a wifi-on, this will cause
- * the BH thread to be re-enabled and reinstall the ISR.
- */
- priv->bh_thread.block_thread = 1;
-
- unifi_trace(priv, UDBG1, "unifi_suspend: suspended BH");
- }
-
- /* Consider UniFi to be uninitialised */
- priv->init_progress = UNIFI_INIT_NONE;
-
- unifi_trace(priv, UDBG1, "sme_sys_suspend: <-- (r=%d status=%d)\n", r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-
-int sme_sys_resume(unifi_priv_t *priv)
-{
- int r;
-
- unifi_trace(priv, UDBG1, "sme_sys_resume %s\n", priv->wol_suspend ? "warm" : "");
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_sys_resume: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiRouterCtrlResumeIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0, priv->wol_suspend);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_SYS_LONG_TIMEOUT);
- if (r)
- unifi_notice(priv,
- "resume: SME did not reply, return success anyway\n");
-
- return 0;
-}
-
-#ifdef CSR_SUPPORT_WEXT_AP
-int sme_ap_stop(unifi_priv_t *priv,u16 interface_tag)
-{
- int r;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_ap_stop: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiNmeApStopReqSend(0,interface_tag);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_ap_stop <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-
-}
-
-int sme_ap_start(unifi_priv_t *priv,u16 interface_tag,
- CsrWifiSmeApConfig_t * ap_config)
-{
- int r;
- CsrWifiSmeApP2pGoConfig p2p_go_param;
- memset(&p2p_go_param,0,sizeof(CsrWifiSmeApP2pGoConfig));
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_ap_start: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiNmeApStartReqSend(0,interface_tag,CSR_WIFI_AP_TYPE_LEGACY,FALSE,
- ap_config->ssid,1,ap_config->channel,
- ap_config->credentials,ap_config->max_connections,
- p2p_go_param,FALSE);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_ap_start <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-
-int sme_ap_config(unifi_priv_t *priv,
- CsrWifiSmeApMacConfig *ap_mac_config,
- CsrWifiNmeApConfig *group_security_config)
-{
- int r;
- CsrWifiSmeApP2pGoConfig p2p_go_param;
- memset(&p2p_go_param,0,sizeof(CsrWifiSmeApP2pGoConfig));
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_ap_config: invalid smepriv\n");
- return -EIO;
- }
-
- r = sme_init_request(priv);
- if (r)
- return -EIO;
-
- CsrWifiNmeApConfigSetReqSend(0,*group_security_config,
- *ap_mac_config);
-
- r = sme_wait_for_reply(priv, UNIFI_SME_MGT_SHORT_TIMEOUT);
- if (r)
- return r;
-
- unifi_trace(priv, UDBG4,
- "sme_ap_config <-- (r=%d status=%d)\n",
- r, priv->sme_reply.reply_status);
- return convert_sme_error(priv->sme_reply.reply_status);
-}
-#endif
diff --git a/drivers/staging/csr/sme_mgt.c b/drivers/staging/csr/sme_mgt.c
deleted file mode 100644
index 58d1b3b..0000000
--- a/drivers/staging/csr/sme_mgt.c
+++ /dev/null
@@ -1,1012 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: sme_mgt.c
- *
- * PURPOSE:
- * This file contains the driver specific implementation of
- * the SME MGT SAP.
- * It is part of the porting exercise.
- *
- * Copyright (C) 2008-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-
-#include "csr_wifi_hip_unifiversion.h"
-#include "unifi_priv.h"
-#include "csr_wifi_hip_conversions.h"
-/*
- * This file implements the SME MGT API. It contains the following functions:
- * CsrWifiSmeWifiFlightmodeCfmSend()
- * CsrWifiSmeWifiOnCfmSend()
- * CsrWifiSmeWifiOffCfmSend()
- * CsrWifiSmeWifiOffIndSend()
- * CsrWifiSmeScanFullCfmSend()
- * CsrWifiSmeScanResultsGetCfmSend()
- * CsrWifiSmeScanResultIndSend()
- * CsrWifiSmeScanResultsFlushCfmSend()
- * CsrWifiSmeConnectCfmSend()
- * CsrWifiSmeMediaStatusIndSend()
- * CsrWifiSmeDisconnectCfmSend()
- * CsrWifiSmeKeyCfmSend()
- * CsrWifiSmeMulticastAddressCfmSend()
- * CsrWifiSmeSetValueCfmSend()
- * CsrWifiSmeGetValueCfmSend()
- * CsrWifiSmeMicFailureIndSend()
- * CsrWifiSmePmkidCfmSend()
- * CsrWifiSmePmkidCandidateListIndSend()
- * CsrWifiSmeMibSetCfmSend()
- * CsrWifiSmeMibGetCfmSend()
- * CsrWifiSmeMibGetNextCfmSend()
- * CsrWifiSmeConnectionQualityIndSend()
- * CsrWifiSmePacketFilterSetCfmSend()
- * CsrWifiSmeTspecCfmSend()
- * CsrWifiSmeTspecIndSend()
- * CsrWifiSmeBlacklistCfmSend()
- * CsrWifiSmeEventMaskSetCfmSend()
- * CsrWifiSmeRoamStartIndSend()
- * CsrWifiSmeRoamCompleteIndSend()
- * CsrWifiSmeAssociationStartIndSend()
- * CsrWifiSmeAssociationCompleteIndSend()
- * CsrWifiSmeIbssStationIndSend()
- */
-
-
-void CsrWifiSmeMicFailureIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeMicFailureInd* ind = (CsrWifiSmeMicFailureInd*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeMicFailureIndSend: invalid priv\n");
- return;
- }
-
- unifi_trace(priv, UDBG1,
- "CsrWifiSmeMicFailureIndSend: count=%d, KeyType=%d\n",
- ind->count, ind->keyType);
-
- wext_send_michaelmicfailure_event(priv, ind->count, ind->address, ind->keyType, ind->interfaceTag);
-#endif
-}
-
-
-void CsrWifiSmePmkidCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmePmkidCfm* cfm = (CsrWifiSmePmkidCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmePmkidCfmSend: Invalid ospriv.\n");
- return;
- }
-
- /*
- * WEXT never does a GET operation the PMKIDs, so we don't need
- * handle data returned in pmkids.
- */
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-
-void CsrWifiSmePmkidCandidateListIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmePmkidCandidateListInd* ind = (CsrWifiSmePmkidCandidateListInd*)msg;
- int i;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiSmePmkidCandidateListIndSend: invalid smepriv\n");
- return;
- }
-
- for (i = 0; i < ind->pmkidCandidatesCount; i++)
- {
- wext_send_pmkid_candidate_event(priv, ind->pmkidCandidates[i].bssid, ind->pmkidCandidates[i].preAuthAllowed, ind->interfaceTag);
- }
-#endif
-}
-
-void CsrWifiSmeScanResultsFlushCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeScanResultsGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeScanResultsGetCfm* cfm = (CsrWifiSmeScanResultsGetCfm*)msg;
- int bytesRequired = cfm->scanResultsCount * sizeof(CsrWifiSmeScanResult);
- int i;
- u8* current_buff;
- CsrWifiSmeScanResult* scanCopy;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeScanResultsGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- /* Calc the size of the buffer reuired */
- for (i = 0; i < cfm->scanResultsCount; ++i) {
- const CsrWifiSmeScanResult *scan_result = &cfm->scanResults[i];
- bytesRequired += scan_result->informationElementsLength;
- }
-
- /* Take a Copy of the scan Results :-) */
- scanCopy = kmalloc(bytesRequired, GFP_KERNEL);
- memcpy(scanCopy, cfm->scanResults, sizeof(CsrWifiSmeScanResult) * cfm->scanResultsCount);
-
- /* Take a Copy of the Info Elements AND update the scan result pointers */
- current_buff = (u8*)&scanCopy[cfm->scanResultsCount];
- for (i = 0; i < cfm->scanResultsCount; ++i)
- {
- CsrWifiSmeScanResult *scan_result = &scanCopy[i];
- memcpy(current_buff, scan_result->informationElements, scan_result->informationElementsLength);
- scan_result->informationElements = current_buff;
- current_buff += scan_result->informationElementsLength;
- }
-
- priv->sme_reply.reply_scan_results_count = cfm->scanResultsCount;
- priv->sme_reply.reply_scan_results = scanCopy;
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-
-void CsrWifiSmeScanFullCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeScanFullCfm* cfm = (CsrWifiSmeScanFullCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeScanFullCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-
-void CsrWifiSmeScanResultIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-
-}
-
-
-void CsrWifiSmeConnectCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeConnectCfm* cfm = (CsrWifiSmeConnectCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeConnectCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-
-void CsrWifiSmeDisconnectCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeDisconnectCfm* cfm = (CsrWifiSmeDisconnectCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeDisconnectCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-
-void CsrWifiSmeKeyCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeKeyCfm* cfm = (CsrWifiSmeKeyCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeKeyCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-
-void CsrWifiSmeMulticastAddressCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeMulticastAddressCfm* cfm = (CsrWifiSmeMulticastAddressCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeMulticastAddressCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeWifiFlightmodeCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeWifiFlightmodeCfm* cfm = (CsrWifiSmeWifiFlightmodeCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeWifiFlightmodeCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeWifiOnCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeWifiOnCfm* cfm = (CsrWifiSmeWifiOnCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeWifiOnCfmSend: Invalid ospriv.\n");
- return;
- }
-
- unifi_trace(priv, UDBG4,
- "CsrWifiSmeWifiOnCfmSend: wake up status %d\n", cfm->status);
-#ifdef CSR_SUPPORT_WEXT_AP
- sme_complete_request(priv, cfm->status);
-#endif
-
-#endif
-}
-
-void CsrWifiSmeWifiOffCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeWifiOffCfm* cfm = (CsrWifiSmeWifiOffCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeWifiOffCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-
-void CsrWifiSmeWifiOffIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeWifiOffInd* ind = (CsrWifiSmeWifiOffInd*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiRouterCtrlStoppedReqSend: Invalid ospriv.\n");
- return;
- }
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlStoppedReqSend: invalid smepriv\n");
- return;
- }
-
- /*
- * If the status indicates an error, the SME is in a stopped state.
- * We need to start it again in order to reinitialise UniFi.
- */
- switch (ind->reason) {
- case CSR_WIFI_SME_CONTROL_INDICATION_ERROR:
- unifi_trace(priv, UDBG1,
- "CsrWifiRouterCtrlStoppedReqSend: Restarting SME (ind:%d)\n",
- ind->reason);
-
- /* On error, restart the SME */
- sme_mgt_wifi_on(priv);
- break;
- case CSR_WIFI_SME_CONTROL_INDICATION_EXIT:
-#ifdef CSR_SUPPORT_WEXT_AP
- sme_complete_request(priv, 0);
-#endif
- break;
- default:
- break;
- }
-
-#endif
-}
-
-void CsrWifiSmeVersionsGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeVersionsGetCfm* cfm = (CsrWifiSmeVersionsGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeVersionsGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.versions = cfm->versions;
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmePowerConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmePowerConfigGetCfm* cfm = (CsrWifiSmePowerConfigGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmePowerConfigGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.powerConfig = cfm->powerConfig;
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeHostConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeHostConfigGetCfm* cfm = (CsrWifiSmeHostConfigGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeHostConfigGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.hostConfig = cfm->hostConfig;
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeCoexInfoGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeCoexInfoGetCfm* cfm = (CsrWifiSmeCoexInfoGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeCoexInfoGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.coexInfo = cfm->coexInfo;
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeCoexConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeCoexConfigGetCfm* cfm = (CsrWifiSmeCoexConfigGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeCoexConfigGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.coexConfig = cfm->coexConfig;
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeMibConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeMibConfigGetCfm* cfm = (CsrWifiSmeMibConfigGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeMibConfigGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.mibConfig = cfm->mibConfig;
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeConnectionInfoGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeConnectionInfoGetCfm* cfm = (CsrWifiSmeConnectionInfoGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeConnectionInfoGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.connectionInfo = cfm->connectionInfo;
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeConnectionConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeConnectionConfigGetCfm* cfm = (CsrWifiSmeConnectionConfigGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeConnectionConfigGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.connectionConfig = cfm->connectionConfig;
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeConnectionStatsGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeConnectionStatsGetCfm* cfm = (CsrWifiSmeConnectionStatsGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeConnectionStatsGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.connectionStats = cfm->connectionStats;
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeMibSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeMibSetCfm* cfm = (CsrWifiSmeMibSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeMibSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeMibGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeMibGetCfm* cfm = (CsrWifiSmeMibGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeMibGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- if (cfm->mibAttribute == NULL) {
- unifi_error(priv, "CsrWifiSmeMibGetCfmSend: Empty reply.\n");
- sme_complete_request(priv, cfm->status);
- return;
- }
-
- if ((priv->mib_cfm_buffer != NULL) &&
- (priv->mib_cfm_buffer_length >= cfm->mibAttributeLength)) {
- memcpy(priv->mib_cfm_buffer, cfm->mibAttribute, cfm->mibAttributeLength);
- priv->mib_cfm_buffer_length = cfm->mibAttributeLength;
- } else {
- unifi_error(priv,
- "CsrWifiSmeMibGetCfmSend: No room to store MIB data (have=%d need=%d).\n",
- priv->mib_cfm_buffer_length, cfm->mibAttributeLength);
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeMibGetNextCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeMibGetNextCfm* cfm = (CsrWifiSmeMibGetNextCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeMibGetNextCfmSend: Invalid ospriv.\n");
- return;
- }
-
- /* Need to copy MIB data */
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeConnectionQualityIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeConnectionQualityInd* ind = (CsrWifiSmeConnectionQualityInd*)msg;
- int signal, noise, snr;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeConnectionQualityIndSend: Invalid ospriv.\n");
- return;
- }
-
- /*
- * level and noise below are mapped into an unsigned 8 bit number,
- * ranging from [-192; 63]. The way this is achieved is simply to
- * add 0x100 onto the number if it is negative,
- * once clipped to the correct range.
- */
- signal = ind->linkQuality.unifiRssi;
- /* Clip range of snr */
- snr = (ind->linkQuality.unifiSnr > 0) ? ind->linkQuality.unifiSnr : 0; /* In dB relative, from 0 - 255 */
- snr = (snr < 255) ? snr : 255;
- noise = signal - snr;
-
- /* Clip range of signal */
- signal = (signal < 63) ? signal : 63;
- signal = (signal > -192) ? signal : -192;
-
- /* Clip range of noise */
- noise = (noise < 63) ? noise : 63;
- noise = (noise > -192) ? noise : -192;
-
- /* Make u8 */
- signal = ( signal < 0 ) ? signal + 0x100 : signal;
- noise = ( noise < 0 ) ? noise + 0x100 : noise;
-
- priv->wext_wireless_stats.qual.level = (u8)signal; /* -192 : 63 */
- priv->wext_wireless_stats.qual.noise = (u8)noise; /* -192 : 63 */
- priv->wext_wireless_stats.qual.qual = snr; /* 0 : 255 */
- priv->wext_wireless_stats.qual.updated = 0;
-
-#if WIRELESS_EXT > 16
- priv->wext_wireless_stats.qual.updated |= IW_QUAL_LEVEL_UPDATED |
- IW_QUAL_NOISE_UPDATED |
- IW_QUAL_QUAL_UPDATED;
-#if WIRELESS_EXT > 18
- priv->wext_wireless_stats.qual.updated |= IW_QUAL_DBM;
-#endif
-#endif
-#endif
-}
-
-void CsrWifiSmePacketFilterSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmePacketFilterSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- /* The packet filter set request does not block for a reply */
-}
-
-void CsrWifiSmeTspecCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeTspecCfm* cfm = (CsrWifiSmeTspecCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeTspecCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeTspecIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeBlacklistCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeEventMaskSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-
-void CsrWifiSmeRoamStartIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeRoamCompleteIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- /* This is called when the association completes, before any 802.1x authentication */
-}
-
-void CsrWifiSmeAssociationStartIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeAssociationCompleteIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeIbssStationIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeWifiOnIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeRestrictedAccessEnableCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeRestrictedAccessDisableCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-
-void CsrWifiSmeAdhocConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeAdhocConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeAdhocConfigSetCfm* cfm = (CsrWifiSmeAdhocConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeCalibrationDataGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeCalibrationDataSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeCalibrationDataSetCfm* cfm = (CsrWifiSmeCalibrationDataSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeCcxConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeCcxConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeCcxConfigSetCfm* cfm = (CsrWifiSmeCcxConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeCloakedSsidsGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeCloakedSsidsSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeCloakedSsidsSetCfm* cfm = (CsrWifiSmeCloakedSsidsSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-
-void CsrWifiSmeCoexConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeCoexConfigSetCfm* cfm = (CsrWifiSmeCoexConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeHostConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeHostConfigSetCfm* cfm = (CsrWifiSmeHostConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeLinkQualityGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-
-void CsrWifiSmeMibConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeMibConfigSetCfm* cfm = (CsrWifiSmeMibConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmePermanentMacAddressGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmePowerConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmePowerConfigSetCfm* cfm = (CsrWifiSmePowerConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeRegulatoryDomainInfoGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeRoamingConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeMediaStatusIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeMediaStatusInd* ind = (CsrWifiSmeMediaStatusInd*)msg;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiSmeMediaStatusIndSend: invalid smepriv\n");
- return;
- }
-
- if (ind->mediaStatus == CSR_WIFI_SME_MEDIA_STATUS_CONNECTED) {
- /*
- * Send wireless-extension event up to userland to announce
- * connection.
- */
- wext_send_assoc_event(priv,
- (unsigned char *)ind->connectionInfo.bssid.a,
- (unsigned char *)ind->connectionInfo.assocReqInfoElements,
- ind->connectionInfo.assocReqInfoElementsLength,
- (unsigned char *)ind->connectionInfo.assocRspInfoElements,
- ind->connectionInfo.assocRspInfoElementsLength,
- (unsigned char *)ind->connectionInfo.assocScanInfoElements,
- ind->connectionInfo.assocScanInfoElementsLength);
-
- unifi_trace(priv, UDBG2, "CsrWifiSmeMediaStatusIndSend: IBSS=%pM\n",
- ind->connectionInfo.bssid.a);
-
- sme_mgt_packet_filter_set(priv);
-
- } else {
- /*
- * Send wireless-extension event up to userland to announce
- * connection lost to a BSS.
- */
- wext_send_disassoc_event(priv);
- }
-#endif
-}
-
-void CsrWifiSmeRoamingConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeRoamingConfigSetCfm* cfm = (CsrWifiSmeRoamingConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeRoamingConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeScanConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeScanConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_SUPPORT_WEXT
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeScanConfigSetCfm* cfm = (CsrWifiSmeScanConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-#endif
-}
-
-void CsrWifiSmeStationMacAddressGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeSmeCommonConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeSmeCommonConfigGetCfm* cfm = (CsrWifiSmeSmeCommonConfigGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeCommonConfigGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.deviceConfig = cfm->deviceConfig;
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeSmeStaConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeSmeStaConfigGetCfm* cfm = (CsrWifiSmeSmeStaConfigGetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeStaConfigGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- priv->sme_reply.staConfig = cfm->smeConfig;
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeSmeCommonConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeSmeCommonConfigSetCfm* cfm = (CsrWifiSmeSmeCommonConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeCommonConfigGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeSmeStaConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiSmeSmeStaConfigSetCfm* cfm = (CsrWifiSmeSmeStaConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiSmeSmeStaConfigGetCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiSmeGetInterfaceCapabilityCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeErrorIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeInfoIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeCoreDumpIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-void CsrWifiSmeAmpStatusChangeIndHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiSmeActivateCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-void CsrWifiSmeDeactivateCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-#ifdef CSR_SUPPORT_WEXT
-#ifdef CSR_SUPPORT_WEXT_AP
-void CsrWifiNmeApStartCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiNmeApStartCfm* cfm = (CsrWifiNmeApStartCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiNmeApStartCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiNmeApStopCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiNmeApStopCfm* cfm = (CsrWifiNmeApStopCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiNmeApStopCfmSend: Invalid ospriv.\n");
- return;
- }
-
- sme_complete_request(priv, cfm->status);
-}
-
-void CsrWifiNmeApConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiNmeApConfigSetCfm* cfm = (CsrWifiNmeApConfigSetCfm*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiNmeApConfigSetCfmSend: Invalid ospriv.\n");
- return;
- }
- sme_complete_request(priv, cfm->status);
-}
-#endif
-#endif
diff --git a/drivers/staging/csr/sme_native.c b/drivers/staging/csr/sme_native.c
deleted file mode 100644
index ca55249..0000000
--- a/drivers/staging/csr/sme_native.c
+++ /dev/null
@@ -1,566 +0,0 @@
-/*
- * ***************************************************************************
- *
- * FILE: sme_native.c
- *
- * Copyright (C) 2005-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ***************************************************************************
- */
-
-#include <linux/netdevice.h>
-#include "unifi_priv.h"
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-
-static const unsigned char wildcard_address[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
-int
-uf_sme_init(unifi_priv_t *priv)
-{
- sema_init(&priv->mlme_blocking_mutex, 1);
-
-#ifdef CSR_SUPPORT_WEXT
- {
- int r = uf_init_wext_interface(priv);
- if (r != 0) {
- return r;
- }
- }
-#endif
-
- return 0;
-} /* uf_sme_init() */
-
-
-void
-uf_sme_deinit(unifi_priv_t *priv)
-{
-
- /* Free memory allocated for the scan table */
-/* unifi_clear_scan_table(priv); */
-
- /* Cancel any pending workqueue tasks */
- flush_workqueue(priv->unifi_workqueue);
-
-#ifdef CSR_SUPPORT_WEXT
- uf_deinit_wext_interface(priv);
-#endif
-
-} /* uf_sme_deinit() */
-
-
-int sme_mgt_wifi_on(unifi_priv_t *priv)
-{
- int r,i;
- s32 csrResult;
-
- if (priv == NULL) {
- return -EINVAL;
- }
- /* Initialize the interface mode to None */
- for (i=0; i<CSR_WIFI_NUM_INTERFACES; i++) {
- priv->interfacePriv[i]->interfaceMode = 0;
- }
-
- /* Set up interface mode so that get_packet_priority() can
- * select the right QOS priority when WMM is enabled.
- */
- priv->interfacePriv[0]->interfaceMode = CSR_WIFI_ROUTER_CTRL_MODE_STA;
-
- r = uf_request_firmware_files(priv, UNIFI_FW_STA);
- if (r) {
- unifi_error(priv, "sme_mgt_wifi_on: Failed to get f/w\n");
- return r;
- }
-
- /*
- * The request to initialise UniFi might come while UniFi is running.
- * We need to block all I/O activity until the reset completes, otherwise
- * an SDIO error might occur resulting an indication to the SME which
- * makes it think that the initialisation has failed.
- */
- priv->bh_thread.block_thread = 1;
-
- /* Power on UniFi */
- CsrSdioClaim(priv->sdio);
- csrResult = CsrSdioPowerOn(priv->sdio);
- CsrSdioRelease(priv->sdio);
- if(csrResult != CSR_RESULT_SUCCESS && csrResult != CSR_SDIO_RESULT_NOT_RESET) {
- return -EIO;
- }
-
- if (csrResult == CSR_RESULT_SUCCESS) {
- /* Initialise UniFi hardware */
- r = uf_init_hw(priv);
- if (r) {
- return r;
- }
- }
-
- /* Re-enable the I/O thread */
- priv->bh_thread.block_thread = 0;
-
- /* Disable deep sleep signalling during the firmware initialisation, to
- * prevent the wakeup mechanism raising the SDIO clock beyond INIT before
- * the first MLME-RESET.ind. It gets re-enabled at the CONNECTED.ind,
- * immediately after the MLME-RESET.ind
- */
- csrResult = unifi_configure_low_power_mode(priv->card,
- UNIFI_LOW_POWER_DISABLED,
- UNIFI_PERIODIC_WAKE_HOST_DISABLED);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_warning(priv,
- "sme_mgt_wifi_on: unifi_configure_low_power_mode() returned an error\n");
- }
-
-
- /* Start the I/O thread */
- CsrSdioClaim(priv->sdio);
- r = uf_init_bh(priv);
- if (r) {
- CsrSdioPowerOff(priv->sdio);
- CsrSdioRelease(priv->sdio);
- return r;
- }
- CsrSdioRelease(priv->sdio);
-
- priv->init_progress = UNIFI_INIT_FW_DOWNLOADED;
-
- return 0;
-}
-
-int
-sme_sys_suspend(unifi_priv_t *priv)
-{
- const int interfaceNum = 0; /* FIXME */
- CsrResult csrResult;
-
- /* Abort any pending requests. */
- uf_abort_mlme(priv);
-
- /* Allow our mlme request to go through. */
- priv->io_aborted = 0;
-
- /* Send MLME-RESET.req to UniFi. */
- unifi_reset_state(priv, priv->netdev[interfaceNum]->dev_addr, 0);
-
- /* Stop the network traffic */
- netif_carrier_off(priv->netdev[interfaceNum]);
-
- /* Put UniFi to deep sleep */
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_force_low_power_mode(priv->card);
- CsrSdioRelease(priv->sdio);
-
- return 0;
-} /* sme_sys_suspend() */
-
-
-int
-sme_sys_resume(unifi_priv_t *priv)
-{
-#ifdef CSR_SUPPORT_WEXT
- /* Send disconnect event so clients will re-initialise connection. */
- memset(priv->wext_conf.current_ssid, 0, UNIFI_MAX_SSID_LEN);
- memset((void*)priv->wext_conf.current_bssid, 0, ETH_ALEN);
- priv->wext_conf.capability = 0;
- wext_send_disassoc_event(priv);
-#endif
- return 0;
-} /* sme_sys_resume() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * sme_native_log_event
- *
- * Callback function to be registered as the SME event callback.
- * Copies the signal content into a new udi_log_t struct and adds
- * it to the read queue for the SME client.
- *
- * Arguments:
- * arg This is the value given to unifi_add_udi_hook, in
- * this case a pointer to the client instance.
- * signal Pointer to the received signal.
- * signal_len Size of the signal structure in bytes.
- * bulkdata Pointers to any associated bulk data.
- * dir Direction of the signal. Zero means from host,
- * non-zero means to host.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-sme_native_log_event(ul_client_t *pcli,
- const u8 *sig_packed, int sig_len,
- const bulk_data_param_t *bulkdata,
- int dir)
-{
- unifi_priv_t *priv;
- udi_log_t *logptr;
- u8 *p;
- int i, r;
- int signal_len;
- int total_len;
- udi_msg_t *msgptr;
- CSR_SIGNAL signal;
- ul_client_t *client = pcli;
-
- if (client == NULL) {
- unifi_error(NULL, "sme_native_log_event: client has exited\n");
- return;
- }
-
- priv = uf_find_instance(client->instance);
- if (!priv) {
- unifi_error(priv, "invalid priv\n");
- return;
- }
-
- /* Just a sanity check */
- if ((sig_packed == NULL) || (sig_len <= 0)) {
- return;
- }
-
- /* Get the unpacked signal */
- r = read_unpack_signal(sig_packed, &signal);
- if (r == 0) {
- signal_len = SigGetSize(&signal);
- } else {
- u16 receiver_id = CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sig_packed) + sizeof(u16)) & 0xFF00;
-
- /* The control indications are 1 byte, pass them to client. */
- if (sig_len == 1) {
- unifi_trace(priv, UDBG5,
- "Control indication (0x%x) for native SME.\n",
- *sig_packed);
-
- *(u8*)&signal = *sig_packed;
- signal_len = sig_len;
- } else if (receiver_id == 0) {
- /*
- * Also "unknown" signals with a ReceiverId of 0 are passed to the client
- * without unpacking. (This is a code size optimisation to allow signals
- * that the driver not interested in to be dropped from the unpack code).
- */
- unifi_trace(priv, UDBG5,
- "Signal 0x%.4X with ReceiverId 0 for native SME.\n",
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sig_packed));
-
- *(u8*)&signal = *sig_packed;
- signal_len = sig_len;
- } else {
- unifi_error(priv,
- "sme_native_log_event - Received unknown signal 0x%.4X.\n",
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sig_packed));
- return;
- }
- }
-
- unifi_trace(priv, UDBG3, "sme_native_log_event: signal 0x%.4X for %d\n",
- signal.SignalPrimitiveHeader.SignalId,
- client->client_id);
-
- total_len = signal_len;
- /* Calculate the buffer we need to store signal plus bulk data */
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
- total_len += bulkdata->d[i].data_length;
- }
-
- /* Allocate log structure plus actual signal. */
- logptr = kmalloc(sizeof(udi_log_t) + total_len, GFP_KERNEL);
-
- if (logptr == NULL) {
- unifi_error(priv,
- "Failed to allocate %d bytes for a UDI log record\n",
- sizeof(udi_log_t) + total_len);
- return;
- }
-
- /* Fill in udi_log struct */
- INIT_LIST_HEAD(&logptr->q);
- msgptr = &logptr->msg;
- msgptr->length = sizeof(udi_msg_t) + total_len;
- msgptr->timestamp = jiffies_to_msecs(jiffies);
- msgptr->direction = dir;
- msgptr->signal_length = signal_len;
-
- /* Copy signal and bulk data to the log */
- p = (u8 *)(msgptr + 1);
- memcpy(p, &signal, signal_len);
- p += signal_len;
-
- /* Append any bulk data */
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
- int len = bulkdata->d[i].data_length;
-
- /*
- * Len here might not be the same as the length in the bulk data slot.
- * The slot length will always be even, but len could be odd.
- */
- if (len > 0) {
- if (bulkdata->d[i].os_data_ptr) {
- memcpy(p, bulkdata->d[i].os_data_ptr, len);
- } else {
- memset(p, 0, len);
- }
- p += len;
- }
- }
-
- /* Add to tail of log queue */
- down(&client->udi_sem);
- list_add_tail(&logptr->q, &client->udi_log);
- up(&client->udi_sem);
-
- /* Wake any waiting user process */
- wake_up_interruptible(&client->udi_wq);
-
-} /* sme_native_log_event() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_ta_indicate_protocol
- *
- * Report that a packet of a particular type has been seen
- *
- * Arguments:
- * drv_priv The device context pointer passed to ta_init.
- * protocol The protocol type enum value.
- * direction Whether the packet was a tx or rx.
- * src_addr The source MAC address from the data packet.
- *
- * Returns:
- * None.
- *
- * Notes:
- * We defer the actual sending to a background workqueue,
- * see uf_ta_ind_wq().
- * ---------------------------------------------------------------------------
- */
-void
-unifi_ta_indicate_protocol(void *ospriv,
- CsrWifiRouterCtrlTrafficPacketType packet_type,
- CsrWifiRouterCtrlProtocolDirection direction,
- const CsrWifiMacAddress *src_addr)
-{
-
-} /* unifi_ta_indicate_protocol */
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_ta_indicate_sampling
- *
- * Send the TA sampling information to the SME.
- *
- * Arguments:
- * drv_priv The device context pointer passed to ta_init.
- * stats The TA sampling data to send.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-unifi_ta_indicate_sampling(void *ospriv, CsrWifiRouterCtrlTrafficStats *stats)
-{
-
-} /* unifi_ta_indicate_sampling() */
-
-
-void
-unifi_ta_indicate_l4stats(void *ospriv,
- u32 rxTcpThroughput,
- u32 txTcpThroughput,
- u32 rxUdpThroughput,
- u32 txUdpThroughput)
-{
-
-} /* unifi_ta_indicate_l4stats() */
-
-/*
- * ---------------------------------------------------------------------------
- * uf_native_process_udi_signal
- *
- * Process interesting signals from the UDI interface.
- *
- * Arguments:
- * pcli A pointer to the client instance.
- * signal Pointer to the received signal.
- * signal_len Size of the signal structure in bytes.
- * bulkdata Pointers to any associated bulk data.
- * dir Direction of the signal. Zero means from host,
- * non-zero means to host.
- *
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-uf_native_process_udi_signal(ul_client_t *pcli,
- const u8 *packed_signal, int packed_signal_len,
- const bulk_data_param_t *bulkdata, int dir)
-{
-
-} /* uf_native_process_udi_signal() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * sme_native_mlme_event_handler
- *
- * Callback function to be used as the udi_event_callback when registering
- * as a client.
- * This function implements a blocking request-reply interface for WEXT.
- * To use it, a client specifies this function as the udi_event_callback
- * to ul_register_client(). The signal dispatcher in
- * unifi_receive_event() will call this function to deliver a signal.
- *
- * Arguments:
- * pcli Pointer to the client instance.
- * signal Pointer to the received signal.
- * signal_len Size of the signal structure in bytes.
- * bulkdata Pointer to structure containing any associated bulk data.
- * dir Direction of the signal. Zero means from host,
- * non-zero means to host.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-sme_native_mlme_event_handler(ul_client_t *pcli,
- const u8 *sig_packed, int sig_len,
- const bulk_data_param_t *bulkdata,
- int dir)
-{
- CSR_SIGNAL signal;
- int signal_len;
- unifi_priv_t *priv = uf_find_instance(pcli->instance);
- int id, r;
-
- /* Just a sanity check */
- if ((sig_packed == NULL) || (sig_len <= 0)) {
- return;
- }
-
- /* Get the unpacked signal */
- r = read_unpack_signal(sig_packed, &signal);
- if (r == 0) {
- signal_len = SigGetSize(&signal);
- } else {
- unifi_error(priv,
- "sme_native_mlme_event_handler - Received unknown signal 0x%.4X.\n",
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sig_packed));
- return;
- }
-
- id = signal.SignalPrimitiveHeader.SignalId;
- unifi_trace(priv, UDBG4, "wext - Process signal 0x%.4X\n", id);
-
- /*
- * Take the appropriate action for the signal.
- */
- switch (id) {
- /*
- * Confirm replies from UniFi.
- * These all have zero or one CSR_DATAREF member. (FIXME: check this is still true for softmac)
- */
- case CSR_MA_PACKET_CONFIRM_ID:
- case CSR_MLME_RESET_CONFIRM_ID:
- case CSR_MLME_GET_CONFIRM_ID:
- case CSR_MLME_SET_CONFIRM_ID:
- case CSR_MLME_GET_NEXT_CONFIRM_ID:
- case CSR_MLME_POWERMGT_CONFIRM_ID:
- case CSR_MLME_SCAN_CONFIRM_ID:
- case CSR_MLME_HL_SYNC_CONFIRM_ID:
- case CSR_MLME_MEASURE_CONFIRM_ID:
- case CSR_MLME_SETKEYS_CONFIRM_ID:
- case CSR_MLME_DELETEKEYS_CONFIRM_ID:
- case CSR_MLME_HL_SYNC_CANCEL_CONFIRM_ID:
- case CSR_MLME_ADD_PERIODIC_CONFIRM_ID:
- case CSR_MLME_DEL_PERIODIC_CONFIRM_ID:
- case CSR_MLME_ADD_AUTONOMOUS_SCAN_CONFIRM_ID:
- case CSR_MLME_DEL_AUTONOMOUS_SCAN_CONFIRM_ID:
- case CSR_MLME_SET_PACKET_FILTER_CONFIRM_ID:
- case CSR_MLME_STOP_MEASURE_CONFIRM_ID:
- case CSR_MLME_PAUSE_AUTONOMOUS_SCAN_CONFIRM_ID:
- case CSR_MLME_ADD_TRIGGERED_GET_CONFIRM_ID:
- case CSR_MLME_DEL_TRIGGERED_GET_CONFIRM_ID:
- case CSR_MLME_ADD_BLACKOUT_CONFIRM_ID:
- case CSR_MLME_DEL_BLACKOUT_CONFIRM_ID:
- case CSR_MLME_ADD_RX_TRIGGER_CONFIRM_ID:
- case CSR_MLME_DEL_RX_TRIGGER_CONFIRM_ID:
- case CSR_MLME_CONNECT_STATUS_CONFIRM_ID:
- case CSR_MLME_MODIFY_BSS_PARAMETER_CONFIRM_ID:
- case CSR_MLME_ADD_TEMPLATE_CONFIRM_ID:
- case CSR_MLME_CONFIG_QUEUE_CONFIRM_ID:
- case CSR_MLME_ADD_TSPEC_CONFIRM_ID:
- case CSR_MLME_DEL_TSPEC_CONFIRM_ID:
- case CSR_MLME_START_AGGREGATION_CONFIRM_ID:
- case CSR_MLME_STOP_AGGREGATION_CONFIRM_ID:
- case CSR_MLME_SM_START_CONFIRM_ID:
- case CSR_MLME_LEAVE_CONFIRM_ID:
- case CSR_MLME_SET_TIM_CONFIRM_ID:
- case CSR_MLME_GET_KEY_SEQUENCE_CONFIRM_ID:
- case CSR_MLME_SET_CHANNEL_CONFIRM_ID:
- case CSR_MLME_ADD_MULTICAST_ADDRESS_CONFIRM_ID:
- case CSR_DEBUG_GENERIC_CONFIRM_ID:
- unifi_mlme_copy_reply_and_wakeup_client(pcli, &signal, signal_len, bulkdata);
- break;
-
- case CSR_MLME_CONNECTED_INDICATION_ID:
- /* We currently ignore the connected-ind for softmac f/w development */
- unifi_info(priv, "CSR_MLME_CONNECTED_INDICATION_ID ignored\n");
- break;
-
- default:
- break;
- }
-
-} /* sme_native_mlme_event_handler() */
-
-
-
-/*
- * -------------------------------------------------------------------------
- * unifi_reset_state
- *
- * Ensure that a MAC address has been set.
- * Send the MLME-RESET signal.
- * This must be called at least once before starting to do any
- * network activities (e.g. scan, join etc).
- *
- * Arguments:
- * priv Pointer to device private context struct
- * macaddr Pointer to chip MAC address.
- * If this is FF:FF:FF:FF:FF:FF it will be replaced
- * with the MAC address from the chip.
- * set_default_mib 1 if the f/w must reset the MIB to the default values
- * 0 otherwise
- *
- * Returns:
- * 0 on success, an error code otherwise.
- * -------------------------------------------------------------------------
- */
-int
-unifi_reset_state(unifi_priv_t *priv, unsigned char *macaddr,
- unsigned char set_default_mib)
-{
- int r = 0;
-
-#ifdef CSR_SUPPORT_WEXT
- /* The reset clears any 802.11 association. */
- priv->wext_conf.flag_associated = 0;
-#endif
-
- return r;
-} /* unifi_reset_state() */
-
diff --git a/drivers/staging/csr/sme_sys.c b/drivers/staging/csr/sme_sys.c
deleted file mode 100644
index b1151a2..0000000
--- a/drivers/staging/csr/sme_sys.c
+++ /dev/null
@@ -1,3260 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: sme_sys.c
- *
- * PURPOSE:
- * Driver specific implementation of the SME SYS SAP.
- * It is part of the porting exercise.
- *
- * Copyright (C) 2008-2011 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-
-#include "csr_wifi_hip_unifiversion.h"
-#include "unifi_priv.h"
-#include "csr_wifi_hip_conversions.h"
-#ifdef CSR_SUPPORT_WEXT_AP
-#include "csr_wifi_sme_sef.h"
-#endif
-
-/*
- * This file implements the SME SYS API and contains the following functions:
- * CsrWifiRouterCtrlMediaStatusReqHandler()
- * CsrWifiRouterCtrlHipReqHandler()
- * CsrWifiRouterCtrlPortConfigureReqHandler()
- * CsrWifiRouterCtrlWifiOnReqHandler()
- * CsrWifiRouterCtrlWifiOffReqHandler()
- * CsrWifiRouterCtrlSuspendResHandler()
- * CsrWifiRouterCtrlResumeResHandler()
- * CsrWifiRouterCtrlQosControlReqHandler()
- * CsrWifiRouterCtrlConfigurePowerModeReqHandler()
- * CsrWifiRouterCtrlWifiOnResHandler()
- * CsrWifiRouterCtrlWifiOffRspHandler()
- * CsrWifiRouterCtrlMulticastAddressResHandler()
- * CsrWifiRouterCtrlTrafficConfigReqHandler()
- * CsrWifiRouterCtrlTrafficClassificationReqHandler()
- * CsrWifiRouterCtrlTclasAddReqHandler()
- * CsrWifiRouterCtrlTclasDelReqHandler()
- * CsrWifiRouterCtrlSetModeReqHandler()
- * CsrWifiRouterCtrlWapiMulticastFilterReqHandler()
- * CsrWifiRouterCtrlWapiUnicastFilterReqHandler()
- * CsrWifiRouterCtrlWapiUnicastTxPktReqHandler()
- * CsrWifiRouterCtrlWapiRxPktReqHandler()
- * CsrWifiRouterCtrlWapiFilterReqHandler()
- */
-
-#ifdef CSR_SUPPORT_SME
-static void check_inactivity_timer_expire_func(unsigned long data);
-void uf_send_disconnected_ind_wq(struct work_struct *work);
-#endif
-
-void send_auto_ma_packet_confirm(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- struct list_head *buffered_frames_list)
-{
- tx_buffered_packets_t *buffered_frame_item = NULL;
- struct list_head *listHead;
- struct list_head *placeHolder;
- int client_id;
-
- CSR_SIGNAL unpacked_signal;
- u8 sigbuf[UNIFI_PACKED_SIGBUF_SIZE];
- u16 packed_siglen;
-
-
- list_for_each_safe(listHead, placeHolder, buffered_frames_list)
- {
- buffered_frame_item = list_entry(listHead, tx_buffered_packets_t, q);
-
- if(!buffered_frame_item) {
- unifi_error(priv, "Entry should exist, otherwise it is a (BUG)\n");
- continue;
- }
-
- if ((interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_NONE) &&
- (priv->wifi_on_state == wifi_on_done))
- {
-
- unifi_warning(priv, "Send MA_PACKET_CONFIRM to SenderProcessId = %x for (HostTag = %x TransmissionControl = %x)\n",
- (buffered_frame_item->leSenderProcessId),
- buffered_frame_item->hostTag,
- buffered_frame_item->transmissionControl);
-
- client_id = buffered_frame_item->leSenderProcessId & 0xFF00;
-
- if (client_id == priv->sme_cli->sender_id)
- {
- /* construct a MA-PACKET.confirm message for SME */
- memset(&unpacked_signal, 0, sizeof(unpacked_signal));
- unpacked_signal.SignalPrimitiveHeader.SignalId = CSR_MA_PACKET_CONFIRM_ID;
- unpacked_signal.SignalPrimitiveHeader.ReceiverProcessId = buffered_frame_item->leSenderProcessId;
- unpacked_signal.SignalPrimitiveHeader.SenderProcessId = CSR_WIFI_ROUTER_IFACEQUEUE;
-
- unpacked_signal.u.MaPacketConfirm.VirtualInterfaceIdentifier = uf_get_vif_identifier(interfacePriv->interfaceMode,
- interfacePriv->InterfaceTag);
- unpacked_signal.u.MaPacketConfirm.TransmissionStatus = CSR_RESULT_FAILURE;
- unpacked_signal.u.MaPacketConfirm.RetryCount = 0;
- unpacked_signal.u.MaPacketConfirm.Rate = buffered_frame_item->rate;
- unpacked_signal.u.MaPacketConfirm.HostTag = buffered_frame_item->hostTag;
-
- write_pack(&unpacked_signal, sigbuf, &packed_siglen);
- unifi_warning(priv, "MA_PACKET_CONFIRM for SME (0x%x, 0x%x, 0x%x, 0x%x)\n",
- unpacked_signal.SignalPrimitiveHeader.ReceiverProcessId,
- unpacked_signal.SignalPrimitiveHeader.SenderProcessId,
- unpacked_signal.u.MaPacketConfirm.VirtualInterfaceIdentifier,
- unpacked_signal.u.MaPacketConfirm.HostTag);
-
- CsrWifiRouterCtrlHipIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,
- packed_siglen,
- (u8 *)sigbuf,
- 0, NULL,
- 0, NULL);
- }
- else if((buffered_frame_item->hostTag & 0x80000000))
- {
- /* construct a MA-PACKET.confirm message for NME */
- unifi_warning(priv, "MA_PACKET_CONFIRM for NME (0x%x, 0x%x, 0x%x, 0x%x)\n",
- buffered_frame_item->leSenderProcessId,
- buffered_frame_item->interfaceTag,
- buffered_frame_item->transmissionControl,
- (buffered_frame_item->hostTag & 0x3FFFFFFF));
-
- CsrWifiRouterMaPacketCfmSend((buffered_frame_item->leSenderProcessId & 0xFF),
- buffered_frame_item->interfaceTag,
- CSR_RESULT_FAILURE,
- (buffered_frame_item->hostTag & 0x3FFFFFFF),
- buffered_frame_item->rate);
-
- }
- else
- {
- unifi_warning(priv, "Buffered packet dropped without sending a confirm\n");
- }
-
- }
-
- list_del(listHead);
- kfree(buffered_frame_item);
- buffered_frame_item = NULL;
- }
-}
-
-void CsrWifiRouterCtrlMediaStatusReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlMediaStatusReq* req = (CsrWifiRouterCtrlMediaStatusReq*)msg;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
- unsigned long flags;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlMediaStatusReqHandler: invalid smepriv\n");
- return;
- }
- if (req->interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "CsrWifiRouterCtrlMediaStatusReqHandler: invalid interfaceTag\n");
- return;
- }
- unifi_trace(priv, UDBG3, "CsrWifiRouterCtrlMediaStatusReqHandler: Mode = %d req->mediaStatus = %d\n",interfacePriv->interfaceMode,req->mediaStatus);
- if (interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_AMP) {
- bulk_data_desc_t bulk_data;
-
- bulk_data.data_length = 0;
-
- spin_lock_irqsave(&priv->m4_lock, flags);
- if (interfacePriv->m4_bulk_data.data_length > 0) {
- bulk_data = interfacePriv->m4_bulk_data;
- interfacePriv->m4_bulk_data.net_buf_length = 0;
- interfacePriv->m4_bulk_data.data_length = 0;
- interfacePriv->m4_bulk_data.os_data_ptr = interfacePriv->m4_bulk_data.os_net_buf_ptr = NULL;
- }
- spin_unlock_irqrestore(&priv->m4_lock, flags);
-
- if (bulk_data.data_length != 0) {
- unifi_trace(priv, UDBG5, "CsrWifiRouterCtrlMediaStatusReqHandler: free M4\n");
- unifi_net_data_free(priv, &bulk_data);
- }
-
- if ((req->mediaStatus == CSR_WIFI_SME_MEDIA_STATUS_CONNECTED) &&
- (interfacePriv->connected != UnifiConnected)) {
-
- switch(interfacePriv->interfaceMode){
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- interfacePriv->connected = UnifiConnected;
- netif_carrier_on(priv->netdev[req->interfaceTag]);
-#ifdef CSR_SUPPORT_WEXT
- wext_send_started_event(priv);
-#endif
- unifi_trace(priv, UDBG1,
- "CsrWifiRouterCtrlMediaStatusReqHandler: AP/P2PGO setting netif_carrier_on\n");
- netif_tx_wake_all_queues(priv->netdev[req->interfaceTag]);
- break;
-
- default:
-#ifdef CSR_SUPPORT_WEXT
- /* In the WEXT builds (sme and native), the userspace is not ready
- * to process any EAPOL or WAPI packets, until it has been informed
- * of the NETDEV_CHANGE.
- */
- if (interfacePriv->netdev_callback_registered && (interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI)) {
- interfacePriv->wait_netdev_change = TRUE;
- unifi_trace(priv, UDBG1,
- "CsrWifiRouterCtrlMediaStatusReqHandler: waiting for NETDEV_CHANGE\n");
- /*
- * Carrier can go to on, only after wait_netdev_change is set to TRUE.
- * Otherwise there can be a race in uf_netdev_event().
- */
- netif_carrier_on(priv->netdev[req->interfaceTag]);
- unifi_trace(priv, UDBG1,
- "CsrWifiRouterCtrlMediaStatusReqHandler: STA/P2PCLI setting netif_carrier_on\n");
- }
- else
-#endif
- {
- /* In the NME build, the userspace does not wait for the NETDEV_CHANGE
- * so it is ready to process all the EAPOL or WAPI packets.
- * At this point, we enable all the Tx queues, and we indicate any packets
- * that are queued (and the respective port is opened).
- */
- static const CsrWifiMacAddress broadcast_address = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}};
- interfacePriv->connected = UnifiConnected;
- unifi_trace(priv, UDBG1,
- "CsrWifiRouterMediaStatusReqHandler: UnifiConnected && netif_carrier_on\n");
- netif_carrier_on(priv->netdev[req->interfaceTag]);
- netif_tx_wake_all_queues(priv->netdev[req->interfaceTag]);
- uf_process_rx_pending_queue(priv, UF_UNCONTROLLED_PORT_Q, broadcast_address, 1, interfacePriv->InterfaceTag);
- uf_process_rx_pending_queue(priv, UF_CONTROLLED_PORT_Q, broadcast_address, 1, interfacePriv->InterfaceTag);
- }
- break;
- }
- }
-
- if (req->mediaStatus == CSR_WIFI_SME_MEDIA_STATUS_DISCONNECTED) {
-#ifdef CSR_SUPPORT_WEXT
- unifi_trace(priv, UDBG1,
- "CsrWifiRouterMediaStatusReqHandler: cancel waiting for NETDEV_CHANGE\n");
- interfacePriv->wait_netdev_change = FALSE;
-#endif
- unifi_trace(priv, UDBG1,
- "CsrWifiRouterMediaStatusReqHandler: setting netif_carrier_off\n");
- netif_carrier_off(priv->netdev[req->interfaceTag]);
-#ifdef CSR_SUPPORT_WEXT
- switch(interfacePriv->interfaceMode){
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- wext_send_started_event(priv);
- break;
- default:
- break;
- }
-#endif
- interfacePriv->connected = UnifiNotConnected;
- }
- } else {
- /* For AMP, just update the L2 connected flag */
- if (req->mediaStatus == CSR_WIFI_SME_MEDIA_STATUS_CONNECTED) {
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlMediaStatusReqHandler: AMP connected\n");
- interfacePriv->connected = UnifiConnected;
- } else {
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlMediaStatusReqHandler: AMP disconnected\n");
- interfacePriv->connected = UnifiNotConnected;
- }
- }
-}
-
-
-void CsrWifiRouterCtrlHipReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlHipReq* hipreq = (CsrWifiRouterCtrlHipReq*)msg;
- bulk_data_param_t bulkdata;
- u8 *signal_ptr;
- int signal_length;
- int r=0;
- void *dest;
- CsrResult csrResult;
- CSR_SIGNAL *signal;
- u16 interfaceTag = 0;
- CSR_MA_PACKET_REQUEST *req;
- netInterface_priv_t *interfacePriv;
-
- if (priv == NULL) {
- return;
- }
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlHipReqHandler: invalid smepriv\n");
- return;
- }
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "CsrWifiRouterCtrlHipReqHandler: invalid interfaceTag\n");
- return;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- /* Initialize bulkdata to avoid os_net_buf is garbage */
- memset(&bulkdata, 0, sizeof(bulk_data_param_t));
-
- signal = (CSR_SIGNAL *)hipreq->mlmeCommand;
-
- unifi_trace(priv, UDBG4, "CsrWifiRouterCtrlHipReqHandler: 0x04%X ---->\n",
- *((u16*)hipreq->mlmeCommand));
-
- /* Construct the signal. */
- signal_ptr = (u8*)hipreq->mlmeCommand;
- signal_length = hipreq->mlmeCommandLength;
-
- /*
- * The MSB of the sender ID needs to be set to the client ID.
- * The LSB is controlled by the SME.
- */
- signal_ptr[5] = (priv->sme_cli->sender_id >> 8) & 0xff;
-
- /* Allocate buffers for the bulk data. */
- if (hipreq->dataRef1Length) {
- csrResult = unifi_net_data_malloc(priv, &bulkdata.d[0], hipreq->dataRef1Length);
- if (csrResult == CSR_RESULT_SUCCESS) {
- dest = (void*)bulkdata.d[0].os_data_ptr;
- memcpy(dest, hipreq->dataRef1, hipreq->dataRef1Length);
- bulkdata.d[0].data_length = hipreq->dataRef1Length;
- } else {
- unifi_warning(priv, "signal not sent down, allocation failed in CsrWifiRouterCtrlHipReqHandler\n");
- return;
- }
- } else {
- bulkdata.d[0].os_data_ptr = NULL;
- bulkdata.d[0].data_length = 0;
- }
- if (hipreq->dataRef2Length) {
- csrResult = unifi_net_data_malloc(priv, &bulkdata.d[1], hipreq->dataRef2Length);
- if (csrResult == CSR_RESULT_SUCCESS) {
- dest = (void*)bulkdata.d[1].os_data_ptr;
- memcpy(dest, hipreq->dataRef2, hipreq->dataRef2Length);
- bulkdata.d[1].data_length = hipreq->dataRef2Length;
- } else {
- if (bulkdata.d[0].data_length)
- {
- unifi_net_data_free(priv, &bulkdata.d[0]);
- }
- unifi_warning(priv, "signal not sent down, allocation failed in CsrWifiRouterCtrlHipReqHandler\n");
- return;
- }
- } else {
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].data_length = 0;
- }
-
- unifi_trace(priv, UDBG3, "SME SEND: Signal 0x%.4X \n",
- *((u16*)signal_ptr));
- if (signal->SignalPrimitiveHeader.SignalId == CSR_MA_PACKET_REQUEST_ID)
- {
- CSR_SIGNAL unpacked_signal;
- read_unpack_signal((u8 *) signal, &unpacked_signal);
- req = &unpacked_signal.u.MaPacketRequest;
- interfaceTag = req->VirtualInterfaceIdentifier & 0xff;
- switch(interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_NONE:
- unifi_error(priv, "CsrWifiRouterCtrlHipReqHandler: invalid mode: NONE \n");
- break;
- default:
- unifi_trace(priv, UDBG5, "mode is %x\n", interfacePriv->interfaceMode);
- }
- /* While sending ensure that first 2 bits b31 and b30 are 00. These are used for local routing*/
- r = uf_process_ma_packet_req(priv, req->Ra.x, (req->HostTag & 0x3FFFFFFF), interfaceTag,
- req->TransmissionControl, req->TransmitRate,
- req->Priority, signal->SignalPrimitiveHeader.SenderProcessId,
- &bulkdata);
- if (r)
- {
- if (bulkdata.d[0].data_length)
- {
- unifi_net_data_free(priv, &bulkdata.d[0]);
- }
- if (bulkdata.d[1].data_length)
- {
- unifi_net_data_free(priv, &bulkdata.d[1]);
- }
- }
- } else {
- /* ul_send_signal_raw frees the bulk data if it fails */
- r = ul_send_signal_raw(priv, signal_ptr, signal_length, &bulkdata);
- }
-
- if (r) {
- unifi_error(priv,
- "CsrWifiRouterCtrlHipReqHandler: Failed to send signal (0x%.4X - %u)\n",
- *((u16*)signal_ptr), r);
- CsrWifiRouterCtrlWifiOffIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,CSR_WIFI_SME_CONTROL_INDICATION_ERROR);
- }
-
- unifi_trace(priv, UDBG4, "CsrWifiRouterCtrlHipReqHandler: <----\n");
-}
-
-#ifdef CSR_WIFI_SEND_GRATUITOUS_ARP
-static void
-uf_send_gratuitous_arp(unifi_priv_t *priv, u16 interfaceTag)
-{
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- CSR_PRIORITY priority;
- CSR_SIGNAL signal;
- bulk_data_param_t bulkdata;
- CsrResult csrResult;
- struct sk_buff *skb, *newSkb = NULL;
- s8 protection;
- int r;
- static const u8 arp_req[36] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00,
- 0x08, 0x06, 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
- 0x00, 0x02, 0x5f, 0x20, 0x2f, 0x02,
- 0xc0, 0xa8, 0x00, 0x02,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xc0, 0xa8, 0x00, 0x02};
-
- csrResult = unifi_net_data_malloc(priv, &bulkdata.d[0], sizeof(arp_req));
- if (csrResult != CSR_RESULT_SUCCESS)
- {
- unifi_error(priv, "Failed to allocate bulk data in CsrWifiSmeRoamCompleteIndHandler()\n");
- return;
- }
- skb = (struct sk_buff *)(bulkdata.d[0].os_net_buf_ptr);
- skb->len = bulkdata.d[0].data_length;
-
- memcpy(skb->data, arp_req, sizeof(arp_req));
- /* add MAC and IP address */
- memcpy(skb->data + 16, priv->netdev[interfaceTag]->dev_addr, ETH_ALEN);
- skb->data[22] = (priv->sta_ip_address ) & 0xFF;
- skb->data[23] = (priv->sta_ip_address >> 8) & 0xFF;
- skb->data[24] = (priv->sta_ip_address >> 16) & 0xFF;
- skb->data[25] = (priv->sta_ip_address >> 24) & 0xFF;
- skb->data[32] = (priv->sta_ip_address ) & 0xFF;
- skb->data[33] = (priv->sta_ip_address >> 8) & 0xFF;
- skb->data[34] = (priv->sta_ip_address >> 16) & 0xFF;
- skb->data[35] = (priv->sta_ip_address >> 24) & 0xFF;
-
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].os_net_buf_ptr = NULL;
- bulkdata.d[1].net_buf_length = bulkdata.d[1].data_length = 0;
-
- if ((protection = uf_get_protection_bit_from_interfacemode(priv, interfaceTag, &arp_req[26])) < 0)
- {
- unifi_error(priv, "CsrWifiSmeRoamCompleteIndHandler: Failed to determine protection mode\n");
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return;
- }
-
- if ((priv->sta_wmm_capabilities & QOS_CAPABILITY_WMM_ENABLED) == 1)
- {
- priority = CSR_QOS_UP0;
- }
- else
- {
- priority = CSR_CONTENTION;
- }
-
- if (prepare_and_add_macheader(priv, skb, newSkb, priority, &bulkdata,
- interfaceTag, &arp_req[26],
- priv->netdev[interfaceTag]->dev_addr, protection))
- {
- unifi_error(priv, "CsrWifiSmeRoamCompleteIndHandler: failed to create MAC header\n");
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return;
- }
- bulkdata.d[0].os_data_ptr = skb->data;
- bulkdata.d[0].os_net_buf_ptr = skb;
- bulkdata.d[0].data_length = skb->len;
-
- unifi_frame_ma_packet_req(priv, priority, 0, 0xffffffff, interfaceTag,
- CSR_NO_CONFIRM_REQUIRED, priv->netdev_client->sender_id,
- interfacePriv->bssid.a, &signal);
-
- r = ul_send_signal_unpacked(priv, &signal, &bulkdata);
- if (r)
- {
- unifi_error(priv, "CsrWifiSmeRoamCompleteIndHandler: failed to send QOS data null packet result: %d\n",r);
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return;
- }
-
-}
-#endif /* CSR_WIFI_SEND_GRATUITOUS_ARP */
-
-/*
- * ---------------------------------------------------------------------------
- * configure_data_port
- *
- * Store the new controlled port configuration.
- *
- * Arguments:
- * priv Pointer to device private context struct
- * port_cfg Pointer to the port configuration
- *
- * Returns:
- * An unifi_ControlledPortAction value.
- * ---------------------------------------------------------------------------
- */
-static int
-configure_data_port(unifi_priv_t *priv,
- CsrWifiRouterCtrlPortAction port_action,
- const CsrWifiMacAddress *macAddress,
- const int queue,
- u16 interfaceTag)
-{
- const u8 broadcast_mac_address[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
- unifi_port_config_t *port;
- netInterface_priv_t *interfacePriv;
- int i;
- const char* controlled_string; /* cosmetic "controlled"/"uncontrolled" for trace */
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "configure_data_port: bad interfaceTag\n");
- return -EFAULT;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- if (queue == UF_CONTROLLED_PORT_Q) {
- port = &interfacePriv->controlled_data_port;
- controlled_string = "controlled";
- } else {
- port = &interfacePriv->uncontrolled_data_port;
- controlled_string = "uncontrolled";
- }
-
- unifi_trace(priv, UDBG2,
- "port config request %pM %s with port_action %d.\n",
- macAddress->a, controlled_string, port_action);
-
- /* If the new configuration has the broadcast MAC address or if we are in infrastructure mode then clear the list first and set port overide mode */
- if ((CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI) ||
- !memcmp(macAddress->a, broadcast_mac_address, ETH_ALEN)) {
-
- port->port_cfg[0].port_action = port_action;
- port->port_cfg[0].mac_address = *macAddress;
- port->port_cfg[0].in_use = TRUE;
- port->entries_in_use = 1;
- port->overide_action = UF_DATA_PORT_OVERIDE;
-
- unifi_trace(priv, UDBG2, "%s port override on\n",
- (queue == UF_CONTROLLED_PORT_Q) ? "Controlled" : "Uncontrolled");
-
- /* Discard the remaining entries in the port config table */
- for (i = 1; i < UNIFI_MAX_CONNECTIONS; i++) {
- port->port_cfg[i].in_use = FALSE;
- }
-
- if (port_action == CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN) {
- unifi_trace(priv, UDBG1, "%s port broadcast set to open.\n",
- (queue == UF_CONTROLLED_PORT_Q) ? "Controlled" : "Uncontrolled");
-
- /*
- * Ask stack to schedule for transmission any packets queued
- * while controlled port was not open.
- * Use netif_schedule() instead of netif_wake_queue() because
- * transmission should be already enabled at this point. If it
- * is not, probably the interface is down and should remain as is.
- */
- uf_resume_data_plane(priv, queue, *macAddress, interfaceTag);
-
-#ifdef CSR_WIFI_SEND_GRATUITOUS_ARP
- if ((CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode) &&
- (queue == UF_CONTROLLED_PORT_Q) && (priv->sta_ip_address != 0xFFFFFFFF))
- {
- uf_send_gratuitous_arp(priv, interfaceTag);
- }
-#endif
- } else {
- unifi_trace(priv, UDBG1, "%s port broadcast set to %s.\n",
- (queue == UF_CONTROLLED_PORT_Q) ? "Controlled" : "Uncontrolled",
- (port_action == CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD) ? "discard": "closed");
-
- /* If port is closed, discard all the pending Rx packets */
- if (port_action == CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD) {
- uf_free_pending_rx_packets(priv, queue, *macAddress,interfaceTag);
- }
- }
- } else {
- /* store the new configuration, either in the entry with matching mac address (if already present),
- * otherwise in a new entry
- */
-
- int found_entry_flag;
- int first_free_slot = -1;
-
- /* If leaving override mode, free the port entry used for override */
- if (port->overide_action == UF_DATA_PORT_OVERIDE) {
- port->port_cfg[0].in_use = FALSE;
- port->entries_in_use = 0;
- port->overide_action = UF_DATA_PORT_NOT_OVERIDE;
-
- unifi_trace(priv, UDBG2, "%s port override off\n",
- (queue == UF_CONTROLLED_PORT_Q) ? "Controlled" : "Uncontrolled");
- }
-
- found_entry_flag = 0;
- for (i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- if (port->port_cfg[i].in_use) {
- if (!memcmp(&port->port_cfg[i].mac_address.a, macAddress->a, ETH_ALEN)) {
- /* We've seen this address before, reconfigure it */
- port->port_cfg[i].port_action = port_action;
- found_entry_flag = 1;
- break;
- }
- } else if (first_free_slot == -1) {
- /* Remember the first free slot on the way past so it can be claimed
- * if this turns out to be a new MAC address (to save walking the list again).
- */
- first_free_slot = i;
- }
- }
-
- /* At this point we found an existing entry and have updated it, or need to
- * add a new entry. If all slots are allocated, give up and return an error.
- */
- if (!found_entry_flag) {
- if (first_free_slot == -1) {
- unifi_error(priv, "no free slot found in port config array (%d used)\n", port->entries_in_use);
- return -EFAULT;
- } else {
- port->entries_in_use++;
- }
-
- unifi_trace(priv, UDBG3, "port config index assigned in config_data_port = %d\n", first_free_slot);
- port->port_cfg[first_free_slot].in_use = TRUE;
- port->port_cfg[first_free_slot].port_action = port_action;
- port->port_cfg[first_free_slot].mac_address = *macAddress;
- }
-
- if (port_action == CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN) {
- /*
- * Ask stack to schedule for transmission any packets queued
- * while controlled port was not open.
- * Use netif_schedule() instead of netif_wake_queue() because
- * transmission should be already enabled at this point. If it
- * is not, probably the interface is down and should remain as is.
- */
- uf_resume_data_plane(priv, queue, *macAddress, interfaceTag);
- }
-
- /*
- * If port is closed, discard all the pending Rx packets
- * coming from the peer station.
- */
- if (port_action == CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD) {
- uf_free_pending_rx_packets(priv, queue, *macAddress,interfaceTag);
- }
-
- unifi_trace(priv, UDBG2,
- "port config %pM with port_action %d.\n",
- macAddress->a, port_action);
- }
- return 0;
-} /* configure_data_port() */
-
-
-void CsrWifiRouterCtrlPortConfigureReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlPortConfigureReq* req = (CsrWifiRouterCtrlPortConfigureReq*)msg;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
-
- unifi_trace(priv, UDBG3, "entering CsrWifiRouterCtrlPortConfigureReqHandler\n");
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlPortConfigureReqHandler: invalid smepriv\n");
- return;
- }
-
- /* To update the protection status of the peer/station */
- switch(interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_AMP:
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- /* Since for Unifi as a station, the station record not maintained & interfaceID is
- * only needed to update the peer protection status
- */
- interfacePriv->protect = req->setProtection;
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- {
- u8 i;
- CsrWifiRouterCtrlStaInfo_t *staRecord;
- /* Ifscontrolled port is open means, The peer has been added to station record
- * so that the protection corresponding to the peer is valid in this req
- */
- if (req->controlledPortAction == CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN) {
- for(i =0; i < UNIFI_MAX_CONNECTIONS; i++) {
- staRecord = (CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[i]);
- if (staRecord) {
- /* Find the matching station record & set the protection type */
- if (!memcmp(req->macAddress.a, staRecord->peerMacAddress.a, ETH_ALEN)) {
- staRecord->protection = req->setProtection;
- break;
- }
- }
- }
- }
- }
- break;
- default:
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlPortConfigureReqHandler(0x%.4X) Uncaught mode %d\n",
- msg->source, interfacePriv->interfaceMode);
- }
-
- configure_data_port(priv, req->uncontrolledPortAction, (const CsrWifiMacAddress *)&req->macAddress,
- UF_UNCONTROLLED_PORT_Q, req->interfaceTag);
- configure_data_port(priv, req->controlledPortAction, (const CsrWifiMacAddress *)&req->macAddress,
- UF_CONTROLLED_PORT_Q, req->interfaceTag);
-
- CsrWifiRouterCtrlPortConfigureCfmSend(msg->source,req->clientData,req->interfaceTag,
- CSR_RESULT_SUCCESS, req->macAddress);
- unifi_trace(priv, UDBG3, "leaving CsrWifiRouterCtrlPortConfigureReqHandler\n");
-}
-
-
-void CsrWifiRouterCtrlWifiOnReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlVersions versions;
- CsrWifiRouterCtrlWifiOnReq* req = (CsrWifiRouterCtrlWifiOnReq*)msg;
- int r,i;
- CsrResult csrResult;
-
- if (priv == NULL) {
- return;
- }
- if( priv->wol_suspend ) {
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWifiOnReqHandler: Don't reset mode\n");
- } else {
-#ifdef ANDROID_BUILD
- /* Take the wakelock while Wi-Fi On is in progress */
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWifiOnReqHandler: take wake lock\n");
- wake_lock(&unifi_sdio_wake_lock);
-#endif
- for (i=0; i<CSR_WIFI_NUM_INTERFACES; i++) {
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWifiOnReqHandler: Setting interface %d to NONE\n", i );
-
- priv->interfacePriv[i]->interfaceMode = 0;
- }
- }
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWifiOnReqHandler(0x%.4X) req->dataLength=%d req->data=0x%x\n", msg->source, req->dataLength, req->data);
-
- if(req->dataLength==3 && req->data && req->data[0]==0 && req->data[1]==1 && req->data[2]==1)
- {
- priv->cmanrTestMode = TRUE;
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWifiOnReqHandler: cmanrTestMode=%d\n", priv->cmanrTestMode);
- }
- else
- {
- priv->cmanrTestMode = FALSE;
- }
-
- /*
- * The request to initialise UniFi might come while UniFi is running.
- * We need to block all I/O activity until the reset completes, otherwise
- * an SDIO error might occur resulting an indication to the SME which
- * makes it think that the initialisation has failed.
- */
- priv->bh_thread.block_thread = 1;
-
- /* Update the wifi_on state */
- priv->wifi_on_state = wifi_on_in_progress;
-
- /* If UniFi was unpowered, acquire the firmware for download to chip */
- if (!priv->wol_suspend) {
- r = uf_request_firmware_files(priv, UNIFI_FW_STA);
- if (r) {
- unifi_error(priv, "CsrWifiRouterCtrlWifiOnReqHandler: Failed to get f/w\n");
- CsrWifiRouterCtrlWifiOnCfmSend(msg->source, req->clientData, CSR_RESULT_FAILURE);
- return;
- }
- } else {
- unifi_trace(priv, UDBG1, "Don't need firmware\n");
- }
-
- /* Power on UniFi (which may not necessarily have been off) */
- CsrSdioClaim(priv->sdio);
- csrResult = CsrSdioPowerOn(priv->sdio);
- CsrSdioRelease(priv->sdio);
- if (csrResult != CSR_RESULT_SUCCESS && csrResult != CSR_SDIO_RESULT_NOT_RESET) {
- unifi_error(priv, "CsrWifiRouterCtrlWifiOnReqHandler: Failed to power on UniFi\n");
- CsrWifiRouterCtrlWifiOnCfmSend(msg->source, req->clientData, CSR_RESULT_FAILURE);
- return;
- }
-
- /* If CsrSdioPowerOn() returns CSR_RESULT_SUCCESS, it means that we need to initialise UniFi */
- if (csrResult == CSR_RESULT_SUCCESS && !priv->wol_suspend) {
- /* Initialise UniFi hardware */
- r = uf_init_hw(priv);
- if (r) {
- unifi_error(priv, "CsrWifiRouterCtrlWifiOnReqHandler: Failed to initialise h/w, error %d\n", r);
- CsrWifiRouterCtrlWifiOnCfmSend(msg->source, req->clientData, CSR_RESULT_FAILURE);
- return;
- }
- } else {
- unifi_trace(priv, UDBG1, "UniFi already initialised\n");
- }
-
- /* Completed handling of wake up from suspend with UniFi powered */
- priv->wol_suspend = FALSE;
-
- /* Re-enable the I/O thread */
- priv->bh_thread.block_thread = 0;
-
- /*
- * Start the I/O thread. The thread might be already running.
- * This fine, just carry on with the request.
- */
- r = uf_init_bh(priv);
- if (r) {
- CsrSdioClaim(priv->sdio);
- CsrSdioPowerOff(priv->sdio);
- CsrSdioRelease(priv->sdio);
- CsrWifiRouterCtrlWifiOnCfmSend(msg->source, req->clientData, CSR_RESULT_FAILURE);
- return;
- }
-
- /* Get the version information from the core */
- unifi_card_info(priv->card, &priv->card_info);
-
- /* Set the sme queue id */
- priv->CSR_WIFI_SME_IFACEQUEUE = msg->source;
- CSR_WIFI_SME_IFACEQUEUE = msg->source;
-
-
- /* Copy to the unifiio_card_info structure. */
- versions.chipId = priv->card_info.chip_id;
- versions.chipVersion = priv->card_info.chip_version;
- versions.firmwareBuild = priv->card_info.fw_build;
- versions.firmwareHip = priv->card_info.fw_hip_version;
- versions.routerBuild = (char*)CSR_WIFI_VERSION;
- versions.routerHip = (UNIFI_HIP_MAJOR_VERSION << 8) | UNIFI_HIP_MINOR_VERSION;
-
- CsrWifiRouterCtrlWifiOnIndSend(msg->source, 0, CSR_RESULT_SUCCESS, versions);
-
- /* Update the wifi_on state */
- priv->wifi_on_state = wifi_on_done;
-}
-
-
-/*
- * wifi_off:
- * Common code for CsrWifiRouterCtrlWifiOffReqHandler() and
- * CsrWifiRouterCtrlWifiOffRspHandler().
- */
-static void
-wifi_off(unifi_priv_t *priv)
-{
- int power_off;
- int priv_instance;
- int i;
- CsrResult csrResult;
-
-
- /* Already off? */
- if (priv->wifi_on_state == wifi_on_unspecified) {
- unifi_trace(priv, UDBG1, "wifi_off already\n");
- return;
- }
-
- unifi_trace(priv, UDBG1, "wifi_off\n");
-
- /* Destroy the Traffic Analysis Module */
- cancel_work_sync(&priv->ta_ind_work.task);
- cancel_work_sync(&priv->ta_sample_ind_work.task);
-#ifdef CSR_SUPPORT_WEXT
- cancel_work_sync(&priv->sme_config_task);
- wext_send_disassoc_event(priv);
-#endif
-
- /* Cancel pending M4 stuff */
- for (i = 0; i < CSR_WIFI_NUM_INTERFACES; i++) {
- if (priv->netdev[i]) {
- netInterface_priv_t *netpriv = (netInterface_priv_t *) netdev_priv(priv->netdev[i]);
- cancel_work_sync(&netpriv->send_m4_ready_task);
- }
- }
- flush_workqueue(priv->unifi_workqueue);
-
- /* fw_init parameter can prevent power off UniFi, for debugging */
- priv_instance = uf_find_priv(priv);
- if (priv_instance == -1) {
- unifi_warning(priv,
- "CsrWifiRouterCtrlStopReqHandler: Unknown priv instance, will power off card.\n");
- power_off = 1;
- } else {
- power_off = (fw_init[priv_instance] > 0) ? 0 : 1;
- }
-
- /* Production test mode requires power to the chip, too */
- if (priv->ptest_mode) {
- power_off = 0;
- }
-
- /* Stop the bh_thread */
- uf_stop_thread(priv, &priv->bh_thread);
-
- /* Read the f/w panic codes, if any. Protect against second wifi_off() call,
- * which may happen if SME requests a wifi_off and closes the char device */
- if (priv->init_progress != UNIFI_INIT_NONE) {
- CsrSdioClaim(priv->sdio);
- unifi_capture_panic(priv->card);
- CsrSdioRelease(priv->sdio);
- }
-
- /* Unregister the interrupt handler */
- if (csr_sdio_linux_remove_irq(priv->sdio)) {
- unifi_notice(priv,
- "csr_sdio_linux_remove_irq failed to talk to card.\n");
- }
-
- if (power_off) {
- unifi_trace(priv, UDBG2,
- "Force low power and try to power off\n");
- /* Put UniFi to deep sleep, in case we can not power it off */
- CsrSdioClaim(priv->sdio);
- csrResult = unifi_force_low_power_mode(priv->card);
- CsrSdioRelease(priv->sdio);
-
- CsrSdioPowerOff(priv->sdio);
- }
-
- /* Consider UniFi to be uninitialised */
- priv->init_progress = UNIFI_INIT_NONE;
- priv->wifi_on_state = wifi_on_unspecified;
-
-
-} /* wifi_off() */
-
-
-void CsrWifiRouterCtrlWifiOffReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlWifiOffReq* req = (CsrWifiRouterCtrlWifiOffReq*)msg;
- int i = 0;
-
- if (priv == NULL) {
- return;
- }
-
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWifiOffReqHandler(0x%.4X)\n", msg->source);
-
- /* Stop the network traffic on all interfaces before freeing the core. */
- for (i=0; i<CSR_WIFI_NUM_INTERFACES; i++) {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[i];
- if (interfacePriv->netdev_registered == 1) {
- netif_carrier_off(priv->netdev[i]);
- netif_tx_stop_all_queues(priv->netdev[i]);
- interfacePriv->connected = UnifiConnectedUnknown;
- }
- interfacePriv->interfaceMode = 0;
-
- /* Enable all queues by default */
- interfacePriv->queueEnabled[0] = 1;
- interfacePriv->queueEnabled[1] = 1;
- interfacePriv->queueEnabled[2] = 1;
- interfacePriv->queueEnabled[3] = 1;
- }
- wifi_off(priv);
-
- CsrWifiRouterCtrlWifiOffCfmSend(msg->source,req->clientData);
-
- /* If this is called in response to closing the character device, the
- * caller must use uf_sme_cancel_request() to terminate any pending SME
- * blocking request or there will be a delay while the operation times out.
- */
-}
-
-
-void CsrWifiRouterCtrlQosControlReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlQosControlReq* req = (CsrWifiRouterCtrlQosControlReq*)msg;
- netInterface_priv_t *interfacePriv;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlQosControlReqHandler: invalid smepriv\n");
- return;
- }
-
- unifi_trace(priv, UDBG4, "CsrWifiRouterCtrlQosControlReqHandler:scontrol = %d", req->control);
-
- if (req->interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "CsrWifiRouterCtrlQosControlReqHandler: interfaceID >= CSR_WIFI_NUM_INTERFACES.\n");
- return;
- }
- interfacePriv = priv->interfacePriv[req->interfaceTag];
-
- if (req->control == CSR_WIFI_ROUTER_CTRL_QOS_CONTROL_WMM_ON) {
- priv->sta_wmm_capabilities |= QOS_CAPABILITY_WMM_ENABLED;
- unifi_trace(priv, UDBG1, "WMM enabled\n");
-
- unifi_trace(priv, UDBG1, "Queue Config %x\n", req->queueConfig);
-
- interfacePriv->queueEnabled[UNIFI_TRAFFIC_Q_BK] = (req->queueConfig & CSR_WIFI_ROUTER_CTRL_QUEUE_BK_ENABLE)?1:0;
- interfacePriv->queueEnabled[UNIFI_TRAFFIC_Q_BE] = (req->queueConfig & CSR_WIFI_ROUTER_CTRL_QUEUE_BE_ENABLE)?1:0;
- interfacePriv->queueEnabled[UNIFI_TRAFFIC_Q_VI] = (req->queueConfig & CSR_WIFI_ROUTER_CTRL_QUEUE_VI_ENABLE)?1:0;
- interfacePriv->queueEnabled[UNIFI_TRAFFIC_Q_VO] = (req->queueConfig & CSR_WIFI_ROUTER_CTRL_QUEUE_VO_ENABLE)?1:0;
-
- } else {
- priv->sta_wmm_capabilities = 0;
- unifi_trace(priv, UDBG1, "WMM disabled\n");
- }
-}
-
-
-void CsrWifiRouterCtrlTclasAddReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlTclasAddReq* req = (CsrWifiRouterCtrlTclasAddReq*)msg;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlTclasAddReqHandler: invalid smepriv\n");
- return;
- }
-
- CsrWifiRouterCtrlTclasAddCfmSend(msg->source, req->clientData, req->interfaceTag , CSR_RESULT_SUCCESS);
-}
-
-void CsrWifiRouterCtrlTclasDelReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlTclasDelReq* req = (CsrWifiRouterCtrlTclasDelReq*)msg;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlTclasDelReqHandler: invalid smepriv\n");
- return;
- }
-
- CsrWifiRouterCtrlTclasDelCfmSend(msg->source, req->clientData, req->interfaceTag, CSR_RESULT_SUCCESS);
-}
-
-
-void CsrWifiRouterCtrlConfigurePowerModeReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlConfigurePowerModeReq* req = (CsrWifiRouterCtrlConfigurePowerModeReq*)msg;
- enum unifi_low_power_mode pm;
- CsrResult csrResult;
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlConfigurePowerModeReqHandler: invalid smepriv\n");
- return;
- }
-
- if (req->mode == CSR_WIFI_ROUTER_CTRL_LOW_POWER_MODE_DISABLED) {
- pm = UNIFI_LOW_POWER_DISABLED;
- } else {
- pm = UNIFI_LOW_POWER_ENABLED;
- }
-
- unifi_trace(priv, UDBG2,
- "CsrWifiRouterCtrlConfigurePowerModeReqHandler (mode=%d, wake=%d)\n",
- req->mode, req->wakeHost);
- csrResult = unifi_configure_low_power_mode(priv->card, pm,
- (req->wakeHost ? UNIFI_PERIODIC_WAKE_HOST_ENABLED : UNIFI_PERIODIC_WAKE_HOST_DISABLED));
-}
-
-
-void CsrWifiRouterCtrlWifiOnResHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlWifiOnRes* res = (CsrWifiRouterCtrlWifiOnRes*)msg;
-
- if (priv == NULL) {
- unifi_error(NULL, "CsrWifiRouterCtrlWifiOnResHandler: Invalid ospriv.\n");
- return;
- }
-
- unifi_trace(priv, UDBG1,
- "CsrWifiRouterCtrlWifiOnResHandler: status %d (patch %u)\n", res->status, res->smeVersions.firmwarePatch);
-
- if (res->smeVersions.firmwarePatch != 0) {
- unifi_info(priv, "Firmware patch %d\n", res->smeVersions.firmwarePatch);
- }
-
- if (res->numInterfaceAddress > CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "WifiOnResHandler bad numInterfaceAddress %d\n", res->numInterfaceAddress);
- return;
- }
-
- /* UniFi is now initialised, complete the init. */
- if (res->status == CSR_RESULT_SUCCESS)
- {
- int i; /* used as a loop counter */
- u32 intmode = CSR_WIFI_INTMODE_DEFAULT;
-#ifdef CSR_WIFI_SPLIT_PATCH
- u8 switching_ap_fw = FALSE;
-#endif
- /* Register the UniFi device with the OS network manager */
- unifi_trace(priv, UDBG3, "Card Init Completed Successfully\n");
-
- /* Store the MAC address in the netdev */
- for(i=0;i<res->numInterfaceAddress;i++)
- {
- memcpy(priv->netdev[i]->dev_addr, res->stationMacAddress[i].a, ETH_ALEN);
- }
-
- /* Copy version structure into the private versions field */
- priv->sme_versions = res->smeVersions;
-
- unifi_trace(priv, UDBG2, "network interfaces count = %d\n",
- res->numInterfaceAddress);
-
- /* Register the netdevs for each interface. */
- for(i=0;i<res->numInterfaceAddress;i++)
- {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[i];
- if(!interfacePriv->netdev_registered)
- {
- int r;
- unifi_trace(priv, UDBG3, "registering net device %d\n", i);
- r = uf_register_netdev(priv, i);
- if (r)
- {
- /* unregister the net_device that are registered in the previous iterations */
- uf_unregister_netdev(priv);
- unifi_error(priv, "Failed to register the network device.\n");
- CsrWifiRouterCtrlWifiOnCfmSend(msg->source, res->clientData, CSR_RESULT_FAILURE);
- return;
- }
- }
-#ifdef CSR_WIFI_SPLIT_PATCH
- else
- {
- /* If a netdev is already registered, we have received this WifiOnRes
- * in response to switching AP/STA firmware in a ModeSetReq.
- * Rememeber this in order to send a ModeSetCfm once
- */
- switching_ap_fw = TRUE;
- }
-#endif
- }
- priv->totalInterfaceCount = res->numInterfaceAddress;
-
- /* If the MIB has selected f/w scheduled interrupt mode, apply it now
- * but let module param override.
- */
- if (run_bh_once != -1) {
- intmode = (u32)run_bh_once;
- } else if (res->scheduledInterrupt) {
- intmode = CSR_WIFI_INTMODE_RUN_BH_ONCE;
- }
- unifi_set_interrupt_mode(priv->card, intmode);
-
- priv->init_progress = UNIFI_INIT_COMPLETED;
-
- /* Acknowledge the CsrWifiRouterCtrlWifiOnReq now */
- CsrWifiRouterCtrlWifiOnCfmSend(msg->source, res->clientData, CSR_RESULT_SUCCESS);
-
-#ifdef CSR_WIFI_SPLIT_PATCH
- if (switching_ap_fw && (priv->pending_mode_set.common.destination != 0xaaaa)) {
- unifi_info(priv, "Completed firmware reload with %s patch\n",
- CSR_WIFI_HIP_IS_AP_FW(priv->interfacePriv[0]->interfaceMode) ? "AP" : "STA");
-
- /* Confirm the ModeSetReq that requested the AP/STA patch switch */
- CsrWifiRouterCtrlModeSetCfmSend(priv->pending_mode_set.common.source,
- priv->pending_mode_set.clientData,
- priv->pending_mode_set.interfaceTag,
- priv->pending_mode_set.mode,
- CSR_RESULT_SUCCESS);
- priv->pending_mode_set.common.destination = 0xaaaa;
- }
-#endif
- unifi_info(priv, "UniFi ready\n");
-
-#ifdef ANDROID_BUILD
- /* Release the wakelock */
- unifi_trace(priv, UDBG1, "ready: release wake lock\n");
- wake_unlock(&unifi_sdio_wake_lock);
-#endif
- /* Firmware initialisation is complete, so let the SDIO bus
- * clock be raised when convienent to the core.
- */
- unifi_request_max_sdio_clock(priv->card);
-
-#ifdef CSR_SUPPORT_WEXT
- /* Notify the Android wpa_supplicant that we are ready */
- wext_send_started_event(priv);
-
- queue_work(priv->unifi_workqueue, &priv->sme_config_task);
-#endif
-
- } else {
- /* Acknowledge the CsrWifiRouterCtrlWifiOnReq now */
- CsrWifiRouterCtrlWifiOnCfmSend(msg->source, res->clientData, CSR_RESULT_FAILURE);
- }
-}
-
-
-void CsrWifiRouterCtrlWifiOffResHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-
-void CsrWifiRouterCtrlMulticastAddressResHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-
-void CsrWifiRouterMaPacketSubscribeReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterMaPacketSubscribeReq* req = (CsrWifiRouterMaPacketSubscribeReq*)msg;
- u8 i;
- CsrResult result;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterMaPacketSubscribeReqHandler: invalid priv\n");
- return;
- }
-
- /* Look for an unused filter */
-
- result = CSR_WIFI_RESULT_NO_ROOM;
- for (i = 0; i < MAX_MA_UNIDATA_IND_FILTERS; i++) {
-
- if (!priv->sme_unidata_ind_filters[i].in_use) {
-
- priv->sme_unidata_ind_filters[i].in_use = 1;
- priv->sme_unidata_ind_filters[i].appHandle = msg->source;
- priv->sme_unidata_ind_filters[i].encapsulation = req->encapsulation;
- priv->sme_unidata_ind_filters[i].protocol = req->protocol;
-
- priv->sme_unidata_ind_filters[i].oui[2] = (u8) (req->oui & 0xFF);
- priv->sme_unidata_ind_filters[i].oui[1] = (u8) ((req->oui >> 8) & 0xFF);
- priv->sme_unidata_ind_filters[i].oui[0] = (u8) ((req->oui >> 16) & 0xFF);
-
- result = CSR_RESULT_SUCCESS;
- break;
- }
- }
-
- unifi_trace(priv, UDBG1,
- "subscribe_req: encap=%d, handle=%d, result=%d\n",
- req->encapsulation, i, result);
- CsrWifiRouterMaPacketSubscribeCfmSend(msg->source,req->interfaceTag, i, result, 0);
-}
-
-
-void CsrWifiRouterMaPacketUnsubscribeReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterMaPacketUnsubscribeReq* req = (CsrWifiRouterMaPacketUnsubscribeReq*)msg;
- CsrResult result;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterMaPacketUnsubscribeReqHandler: invalid priv\n");
- return;
- }
-
- result = CSR_WIFI_RESULT_NOT_FOUND;
-
- if (req->subscriptionHandle < MAX_MA_UNIDATA_IND_FILTERS) {
- if (priv->sme_unidata_ind_filters[req->subscriptionHandle].in_use) {
- priv->sme_unidata_ind_filters[req->subscriptionHandle].in_use = 0;
- result = CSR_RESULT_SUCCESS;
- } else {
- result = CSR_WIFI_RESULT_NOT_FOUND;
- }
- }
-
- unifi_trace(priv, UDBG1,
- "unsubscribe_req: handle=%d, result=%d\n",
- req->subscriptionHandle, result);
- CsrWifiRouterMaPacketUnsubscribeCfmSend(msg->source,req->interfaceTag, result);
-}
-
-
-void CsrWifiRouterCtrlCapabilitiesReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlCapabilitiesReq* req = (CsrWifiRouterCtrlCapabilitiesReq*)msg;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlCapabilitiesReqHandler: invalid priv\n");
- return;
- }
-
- CsrWifiRouterCtrlCapabilitiesCfmSend(msg->source,req->clientData,
- UNIFI_SOFT_COMMAND_Q_LENGTH - 1,
- UNIFI_SOFT_TRAFFIC_Q_LENGTH - 1);
-}
-
-
-void CsrWifiRouterCtrlSuspendResHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlSuspendRes* res = (CsrWifiRouterCtrlSuspendRes*)msg;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlSuspendResHandler: invalid priv\n");
- return;
- }
-
- sme_complete_request(priv, res->status);
-}
-
-
-void CsrWifiRouterCtrlResumeResHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlResumeRes* res = (CsrWifiRouterCtrlResumeRes*)msg;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlResumeResHandler: invalid priv\n");
- return;
- }
-
- sme_complete_request(priv, res->status);
-}
-
-
-void CsrWifiRouterCtrlTrafficConfigReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlTrafficConfigReq* req = (CsrWifiRouterCtrlTrafficConfigReq*)msg;
- CsrResult csrResult;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlTrafficConfigReqHandler: invalid smepriv\n");
- return;
- }
- if (req->trafficConfigType == CSR_WIFI_ROUTER_CTRL_TRAFFIC_CONFIG_TYPE_FILTER)
- {
- req->config.packetFilter |= CSR_WIFI_ROUTER_CTRL_TRAFFIC_PACKET_TYPE_CUSTOM;
- }
- csrResult = unifi_ta_configure(priv->card, req->trafficConfigType, (const CsrWifiRouterCtrlTrafficConfig *)&req->config);
-}
-
-void CsrWifiRouterCtrlTrafficClassificationReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlTrafficClassificationReq* req = (CsrWifiRouterCtrlTrafficClassificationReq*)msg;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlTrafficClassificationReqHandler: invalid smepriv\n");
- return;
- }
-
- unifi_ta_classification(priv->card, req->trafficType, req->period);
-}
-
-static int
-_sys_packet_req(unifi_priv_t *priv, const CSR_SIGNAL *signal,
- u8 subscriptionHandle,
- u16 frameLength, u8 *frame,
- int proto)
-{
- int r;
- const sme_ma_unidata_ind_filter_t *subs;
- bulk_data_param_t bulkdata;
- CSR_MA_PACKET_REQUEST req = signal->u.MaPacketRequest;
- struct sk_buff *skb, *newSkb = NULL;
- CsrWifiMacAddress peerMacAddress;
- CsrResult csrResult;
- u16 interfaceTag = req.VirtualInterfaceIdentifier & 0xff;
- u8 eapolStore = FALSE;
- s8 protection = 0;
- netInterface_priv_t *interfacePriv;
- unsigned long flags;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "_sys_packet_req: interfaceID >= CSR_WIFI_NUM_INTERFACES.\n");
- return -EINVAL;
- }
- interfacePriv = priv->interfacePriv[interfaceTag];
- if (!priv->sme_unidata_ind_filters[subscriptionHandle].in_use) {
- unifi_error(priv, "_sys_packet_req: unknown subscription.\n");
- return -EINVAL;
- }
-
- subs = &priv->sme_unidata_ind_filters[subscriptionHandle];
- unifi_trace(priv, UDBG1,
- "_sys_packet_req: handle=%d, subs=%p, encap=%d\n",
- subscriptionHandle, subs, subs->encapsulation);
-
- csrResult = unifi_net_data_malloc(priv, &bulkdata.d[0], frameLength);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "_sys_packet_req: failed to allocate bulkdata.\n");
- return (int)CsrHipResultToStatus(csrResult);
- }
-
- /* get the peer Mac address */
- memcpy(&peerMacAddress, frame, ETH_ALEN);
-
- /* Determine if we need to add encapsulation header */
- if (subs->encapsulation == CSR_WIFI_ROUTER_ENCAPSULATION_ETHERNET) {
- memcpy((void*)bulkdata.d[0].os_data_ptr, frame, frameLength);
-
- /* The translation is performed on the skb */
- skb = (struct sk_buff*)bulkdata.d[0].os_net_buf_ptr;
-
- unifi_trace(priv, UDBG1,
- "_sys_packet_req: skb_add_llc_snap -->\n");
- r = skb_add_llc_snap(priv->netdev[interfaceTag], skb, proto);
- unifi_trace(priv, UDBG1,
- "_sys_packet_req: skb_add_llc_snap <--\n");
- if (r) {
- unifi_error(priv,
- "_sys_packet_req: failed to translate eth frame.\n");
- unifi_net_data_free(priv,&bulkdata.d[0]);
- return r;
- }
-
- bulkdata.d[0].data_length = skb->len;
- } else {
- /* Crop the MAC addresses from the packet */
- memcpy((void*)bulkdata.d[0].os_data_ptr, frame + 2*ETH_ALEN, frameLength - 2*ETH_ALEN);
- bulkdata.d[0].data_length = frameLength - 2*ETH_ALEN;
- skb = (struct sk_buff*)bulkdata.d[0].os_net_buf_ptr;
- skb->len = bulkdata.d[0].data_length;
-
- }
-
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].os_net_buf_ptr = NULL;
- bulkdata.d[1].data_length = 0;
-
- /* check for m4 detection */
- if (0 == uf_verify_m4(priv, bulkdata.d[0].os_data_ptr, bulkdata.d[0].data_length)) {
- eapolStore = TRUE;
- }
-
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- if (proto == ETH_P_WAI)
- {
- protection = 0; /*WAI packets always sent unencrypted*/
- }
- else
- {
-#endif
-
-#ifdef CSR_SUPPORT_SME
- if ((protection = uf_get_protection_bit_from_interfacemode(priv, interfaceTag, peerMacAddress.a)) < 0) {
- unifi_error(priv, "unicast address, but destination not in station record database\n");
- unifi_net_data_free(priv,&bulkdata.d[0]);
- return -1;
- }
-#else
- protection = 0;
-#endif
-
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- }
-#endif
-
- /* add Mac header */
- if (prepare_and_add_macheader(priv, skb, newSkb, req.Priority, &bulkdata, interfaceTag, frame, frame + ETH_ALEN, protection)) {
- unifi_error(priv, "failed to create MAC header\n");
- unifi_net_data_free(priv,&bulkdata.d[0]);
- return -1;
- }
-
- if (eapolStore) {
- spin_lock_irqsave(&priv->m4_lock, flags);
- /* Store the EAPOL M4 packet for later */
- interfacePriv->m4_signal = *signal;
- interfacePriv->m4_bulk_data.net_buf_length = bulkdata.d[0].net_buf_length;
- interfacePriv->m4_bulk_data.data_length = bulkdata.d[0].data_length;
- interfacePriv->m4_bulk_data.os_data_ptr = bulkdata.d[0].os_data_ptr;
- interfacePriv->m4_bulk_data.os_net_buf_ptr = bulkdata.d[0].os_net_buf_ptr;
- spin_unlock_irqrestore(&priv->m4_lock, flags);
- /* Send a signal to SME */
- unifi_trace(priv, UDBG1, "_sys_packet_req: Sending CsrWifiRouterCtrlM4ReadyToSendInd\n");
- CsrWifiRouterCtrlM4ReadyToSendIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0, interfaceTag, peerMacAddress);
- return 0;
- }
-
- /* Send the signal to UniFi */
- /* Set the B31 to 1 for local routing*/
- r= uf_process_ma_packet_req(priv, peerMacAddress.a, (req.HostTag | 0x80000000), interfaceTag, 0,
- (CSR_RATE)0, req.Priority, signal->SignalPrimitiveHeader.SenderProcessId, &bulkdata);
- if (r) {
- unifi_error(priv,
- "_sys_packet_req: failed to send signal.\n");
- unifi_net_data_free(priv,&bulkdata.d[0]);
- return r;
- }
- /* The final CsrWifiRouterMaPacketCfmSend() will called when the actual MA-PACKET.cfm is received from the chip */
-
- return 0;
-}
-
-void CsrWifiRouterMaPacketReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- int r;
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterMaPacketReq* mareq = (CsrWifiRouterMaPacketReq*)msg;
- llc_snap_hdr_t *snap;
- u16 snap_protocol;
- CSR_SIGNAL signal;
- CSR_MA_PACKET_REQUEST *req = &signal.u.MaPacketRequest;
- CsrWifiRouterCtrlPortAction controlPortaction;
- u8 *daddr, *saddr;
- u16 interfaceTag = mareq->interfaceTag & 0x00ff;
- int queue;
- netInterface_priv_t *interfacePriv;
-
- if (!mareq->frame || !priv || !priv->smepriv)
- {
- unifi_error(priv, "CsrWifiRouterMaPacketReqHandler: invalid frame/priv/priv->smepriv\n");
- return;
- }
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "CsrWifiRouterMaPacketReqHandler: interfaceID >= CSR_WIFI_NUM_INTERFACES.\n");
- return;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
- /* get a pointer to dest & source Mac address */
- daddr = mareq->frame;
- saddr = (mareq->frame + ETH_ALEN);
- /* point to the proper position of frame, since frame has MAC header */
- snap = (llc_snap_hdr_t *) (mareq->frame + 2 * ETH_ALEN);
- snap_protocol = ntohs(snap->protocol);
- if((snap_protocol == ETH_P_PAE)
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- || (snap_protocol == ETH_P_WAI)
-#endif
- )
- {
- queue = UF_UNCONTROLLED_PORT_Q;
- }
- else
- {
- queue = UF_CONTROLLED_PORT_Q;
- }
-
- /* Controlled port restrictions apply to the packets */
- controlPortaction = uf_sme_port_state(priv, daddr, queue, interfaceTag);
- if (controlPortaction != CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN)
- {
- unifi_warning(priv, "CsrWifiRouterMaPacketReqHandler: (%s)controlled port is closed.\n", (queue == UF_CONTROLLED_PORT_Q)?"":"un");
- if(mareq->cfmRequested)
- {
- CsrWifiRouterMaPacketCfmSend(msg->source,
- interfaceTag,
- CSR_RESULT_FAILURE,
- mareq->hostTag, 0);
- }
- return;
- }
-
- signal.SignalPrimitiveHeader.SignalId = CSR_MA_PACKET_REQUEST_ID;
- /* Store the appHandle in the LSB of the SenderId. */
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(((priv->sme_cli->sender_id & 0xff00) | (unsigned int)msg->source),
- (u8*)&signal.SignalPrimitiveHeader.SenderProcessId);
- signal.SignalPrimitiveHeader.ReceiverProcessId = 0;
-
- /* Fill in the MA-PACKET.req signal */
- memcpy(req->Ra.x, daddr, ETH_ALEN);
- req->Priority = mareq->priority;
- req->TransmitRate = 0; /* Let firmware select the rate*/
- req->VirtualInterfaceIdentifier = uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag);
- req->HostTag = mareq->hostTag;
-
- if(mareq->cfmRequested)
- req->TransmissionControl = 0;
- else
- req->TransmissionControl = CSR_NO_CONFIRM_REQUIRED;
-
- r = _sys_packet_req(priv, &signal, mareq->subscriptionHandle,
- mareq->frameLength, mareq->frame, snap_protocol);
-
- if (r && mareq->cfmRequested)
- {
- CsrWifiRouterMaPacketCfmSend(msg->source,interfaceTag,
- CSR_RESULT_FAILURE,
- mareq->hostTag, 0);
- }
- return;
-}
-
-void CsrWifiRouterMaPacketCancelReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-void CsrWifiRouterCtrlM4TransmitReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlM4TransmitReq* req = (CsrWifiRouterCtrlM4TransmitReq*)msg;
- int r;
- bulk_data_param_t bulkdata;
- netInterface_priv_t *interfacePriv;
- CSR_SIGNAL m4_signal;
- unsigned long flags;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlM4TransmitReqHandler: invalid smepriv\n");
- return;
- }
- if (req->interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "M4TransmitReqHandler: interfaceTag >= CSR_WIFI_NUM_INTERFACES\n");
- return;
- }
-
- interfacePriv = priv->interfacePriv[req->interfaceTag];
- spin_lock_irqsave(&priv->m4_lock, flags);
- if (interfacePriv->m4_bulk_data.data_length == 0) {
- spin_unlock_irqrestore(&priv->m4_lock, flags);
- unifi_error(priv, "CsrWifiRouterCtrlM4TransmitReqHandler: invalid buffer\n");
- return;
- }
-
- memcpy(&bulkdata.d[0], &interfacePriv->m4_bulk_data, sizeof(bulk_data_desc_t));
-
- interfacePriv->m4_bulk_data.net_buf_length = 0;
- interfacePriv->m4_bulk_data.data_length = 0;
- interfacePriv->m4_bulk_data.os_data_ptr = interfacePriv->m4_bulk_data.os_net_buf_ptr = NULL;
- m4_signal = interfacePriv->m4_signal;
- spin_unlock_irqrestore(&priv->m4_lock, flags);
-
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].data_length = 0;
-
- interfacePriv->m4_sent = TRUE;
- m4_signal.u.MaPacketRequest.HostTag |= 0x80000000;
- /* Store the hostTag for later varification */
- interfacePriv->m4_hostTag = m4_signal.u.MaPacketRequest.HostTag;
- r = ul_send_signal_unpacked(priv, &m4_signal, &bulkdata);
- unifi_trace(priv, UDBG1,
- "CsrWifiRouterCtrlM4TransmitReqHandler: sent\n");
- if (r) {
- unifi_error(priv,
- "CsrWifiRouterCtrlM4TransmitReqHandler: failed to send signal.\n");
- unifi_net_data_free(priv, &bulkdata.d[0]);
- }
-}
-
-/* reset the station records when the mode is set as CSR_WIFI_ROUTER_CTRL_MODE_NONE */
-static void CsrWifiRouterCtrlResetStationRecordList(unifi_priv_t *priv, u16 interfaceTag)
-{
- u8 i,j;
- CsrWifiRouterCtrlStaInfo_t *staInfo=NULL;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- unsigned long lock_flags;
-
- /* create a list for sending confirms of un-delivered packets */
- struct list_head send_cfm_list;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "CsrWifiRouterCtrlResetStationRecordList: bad interfaceTag\n");
- return;
- }
-
- INIT_LIST_HEAD(&send_cfm_list);
-
- /* Reset the station record to NULL if mode is NONE */
- for(i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- if ((staInfo=interfacePriv->staInfo[i]) != NULL) {
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(staInfo->mgtFrames));
- uf_flush_list(priv,&(staInfo->mgtFrames));
- for(j=0;j<MAX_ACCESS_CATOGORY;j++){
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(staInfo->dataPdu[j]));
- uf_flush_list(priv,&(staInfo->dataPdu[j]));
- }
-
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- /* Removing station record information from port config array */
- memset(staInfo->peerControlledPort, 0, sizeof(unifi_port_cfg_t));
- staInfo->peerControlledPort->port_action = CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
- staInfo->peerControlledPort->in_use = FALSE;
- interfacePriv->controlled_data_port.entries_in_use--;
-
- memset(staInfo->peerUnControlledPort, 0, sizeof(unifi_port_cfg_t));
- staInfo->peerUnControlledPort->port_action = CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
- staInfo->peerUnControlledPort->in_use = FALSE;
- interfacePriv->uncontrolled_data_port.entries_in_use--;
-
- kfree(interfacePriv->staInfo[i]);
- interfacePriv->staInfo[i] = NULL;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- }
- }
- /* after the critical region process the list of frames that requested cfm
- * and send cfm to requestor one by one
- */
- send_auto_ma_packet_confirm(priv, interfacePriv, &send_cfm_list);
-
-#ifdef CSR_SUPPORT_SME
- /* Interface Independent, no of packet queued, incase of mode is None or AP set to 0 */
- switch(interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- case CSR_WIFI_ROUTER_CTRL_MODE_NONE:
- if (priv->noOfPktQueuedInDriver) {
- unifi_warning(priv, "After reset the noOfPktQueuedInDriver = %x\n", priv->noOfPktQueuedInDriver);
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- priv->noOfPktQueuedInDriver = 0;
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- }
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- break;
- default:
- unifi_error(priv, "interfacemode is not correct in CsrWifiRouterCtrlResetStationRecordList: debug\n");
- }
-#endif
-
- if (((interfacePriv->controlled_data_port.entries_in_use != 0) || (interfacePriv->uncontrolled_data_port.entries_in_use != 0))
- && (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_NONE)) {
- /* Print in case if the value of entries goes to -ve/+ve (apart from 0)
- * we expect the entries should be zero here if mode is set as NONE
- */
- unifi_trace(priv, UDBG3, "In %s controlled port entries = %d, uncontrolled port entries = %d\n",
- __FUNCTION__, interfacePriv->controlled_data_port.entries_in_use,
- interfacePriv->uncontrolled_data_port.entries_in_use);
- }
-}
-
-void CsrWifiRouterCtrlInterfaceReset(unifi_priv_t *priv, u16 interfaceTag)
-{
- netInterface_priv_t *interfacePriv;
-
- /* create a list for sending confirms of un-delivered packets */
- struct list_head send_cfm_list;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "CsrWifiRouterCtrlInterfaceReset: bad interfaceTag\n");
- return;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- INIT_LIST_HEAD(&send_cfm_list);
-
- /* Enable all queues by default */
- interfacePriv->queueEnabled[0] = 1;
- interfacePriv->queueEnabled[1] = 1;
- interfacePriv->queueEnabled[2] = 1;
- interfacePriv->queueEnabled[3] = 1;
-
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(interfacePriv->genericMgtFrames));
- uf_flush_list(priv,&(interfacePriv->genericMgtFrames));
-
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(interfacePriv->genericMulticastOrBroadCastMgtFrames));
- uf_flush_list(priv,&(interfacePriv->genericMulticastOrBroadCastMgtFrames));
-
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(interfacePriv->genericMulticastOrBroadCastFrames));
-
- uf_flush_list(priv,&(interfacePriv->genericMulticastOrBroadCastFrames));
-
- /* process the list of frames that requested cfm
- and send cfm to requestor one by one */
- send_auto_ma_packet_confirm(priv, interfacePriv, &send_cfm_list);
-
- /* Reset the station record to NULL if mode is tried to set as NONE */
- switch(interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- case CSR_WIFI_ROUTER_CTRL_MODE_MONITOR:
- case CSR_WIFI_ROUTER_CTRL_MODE_AMP:
- /* station records not available in these modes */
- break;
- default:
- CsrWifiRouterCtrlResetStationRecordList(priv,interfaceTag);
- }
-
- interfacePriv->num_stations_joined = 0;
- interfacePriv->sta_activity_check_enabled = FALSE;
-}
-
-
-void CsrWifiRouterCtrlModeSetReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlModeSetReq* req = (CsrWifiRouterCtrlModeSetReq*)msg;
-
- if (priv == NULL)
- {
- unifi_error(priv, "CsrWifiRouterCtrlModeSetReqHandler: invalid smepriv\n");
- return;
- }
-
- if (req->interfaceTag < CSR_WIFI_NUM_INTERFACES)
- {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
-#ifdef CSR_WIFI_SPLIT_PATCH
- u8 old_mode = interfacePriv->interfaceMode;
-#endif
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlModeSetReqHandler: interfacePriv->interfaceMode = %d\n",
- interfacePriv->interfaceMode);
-
- interfacePriv->interfaceMode = req->mode;
-
-#ifdef CSR_WIFI_SPLIT_PATCH
- /* Detect a change in mode that requires a switch to/from the AP firmware patch.
- * This should only happen when transitioning in/out of AP modes.
- */
- if (CSR_WIFI_HIP_IS_AP_FW(req->mode) != CSR_WIFI_HIP_IS_AP_FW(old_mode))
- {
- CsrWifiRouterCtrlVersions versions;
- int r;
-
-#ifdef ANDROID_BUILD
- /* Take the wakelock while switching patch */
- unifi_trace(priv, UDBG1, "patch switch: take wake lock\n");
- wake_lock(&unifi_sdio_wake_lock);
-#endif
- unifi_info(priv, "Resetting UniFi with %s patch\n", CSR_WIFI_HIP_IS_AP_FW(req->mode) ? "AP" : "STA");
-
- r = uf_request_firmware_files(priv, UNIFI_FW_STA);
- if (r) {
- unifi_error(priv, "CsrWifiRouterCtrlModeSetReqHandler: Failed to get f/w\n");
- CsrWifiRouterCtrlModeSetCfmSend(msg->source, req->clientData, req->interfaceTag,
- req->mode, CSR_RESULT_FAILURE);
- return;
- }
-
- /* Block the I/O thread */
- priv->bh_thread.block_thread = 1;
-
- /* Reset and download the new patch */
- r = uf_init_hw(priv);
- if (r) {
- unifi_error(priv, "CsrWifiRouterCtrlWifiOnReqHandler: Failed to initialise h/w, error %d\n", r);
- CsrWifiRouterCtrlModeSetCfmSend(msg->source, req->clientData, req->interfaceTag,
- req->mode, CSR_RESULT_FAILURE);
- return;
- }
-
- /* Re-enable the I/O thread */
- priv->bh_thread.block_thread = 0;
-
- /* Get the version information from the core */
- unifi_card_info(priv->card, &priv->card_info);
-
- /* Copy to the unifiio_card_info structure. */
- versions.chipId = priv->card_info.chip_id;
- versions.chipVersion = priv->card_info.chip_version;
- versions.firmwareBuild = priv->card_info.fw_build;
- versions.firmwareHip = priv->card_info.fw_hip_version;
- versions.routerBuild = (char*)CSR_WIFI_VERSION;
- versions.routerHip = (UNIFI_HIP_MAJOR_VERSION << 8) | UNIFI_HIP_MINOR_VERSION;
-
- /* Now that new firmware is running, send a WifiOnInd to the NME. This will
- * cause it to retransfer the MIB.
- */
- CsrWifiRouterCtrlWifiOnIndSend(msg->source, 0, CSR_RESULT_SUCCESS, versions);
-
- /* Store the request so we know where to send the ModeSetCfm */
- priv->pending_mode_set = *req;
- }
- else
-#endif
- {
- /* No patch switch, confirm straightaway */
- CsrWifiRouterCtrlModeSetCfmSend(msg->source, req->clientData, req->interfaceTag,
- req->mode, CSR_RESULT_SUCCESS);
- }
-
- interfacePriv->bssid = req->bssid;
- /* For modes other than AP/P2PGO, set below member FALSE */
- interfacePriv->intraBssEnabled = FALSE;
- /* Initialise the variable bcTimSet with a value
- * other then CSR_WIFI_TIM_SET or CSR_WIFI_TIM_RESET value
- */
- interfacePriv->bcTimSet = 0xFF;
- interfacePriv->bcTimSetReqPendingFlag = FALSE;
- /* Initialise the variable bcTimSetReqQueued with a value
- * other then CSR_WIFI_TIM_SET or CSR_WIFI_TIM_RESET value
- */
- interfacePriv->bcTimSetReqQueued =0xFF;
- CsrWifiRouterCtrlInterfaceReset(priv,req->interfaceTag);
-
- if(req->mode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- req->mode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- interfacePriv->protect = req->protection;
- interfacePriv->dtimActive=FALSE;
- interfacePriv->multicastPduHostTag = 0xffffffff;
- /* For AP/P2PGO mode SME sending intraBssDistEnabled
- * i.e. for AP: intraBssDistEnabled = TRUE, for P2PGO
- * intraBssDistEnabled = TRUE/FALSE on requirement
- */
- interfacePriv->intraBssEnabled = req->intraBssDistEnabled;
- unifi_trace(priv, UDBG3, "CsrWifiRouterCtrlModeSetReqHandler: IntraBssDisEnabled = %d\n",
- req->intraBssDistEnabled);
- } else if (req->mode == CSR_WIFI_ROUTER_CTRL_MODE_NONE) {
- netif_carrier_off(priv->netdev[req->interfaceTag]);
- interfacePriv->connected = UnifiConnectedUnknown;
- }
- }
- else {
- unifi_error(priv, "CsrWifiRouterCtrlModeSetReqHandler: invalid interfaceTag :%d\n",req->interfaceTag);
- }
-}
-
-void CsrWifiRouterMaPacketResHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-}
-
-/* delete the station record from the station record data base */
-static int peer_delete_record(unifi_priv_t *priv, CsrWifiRouterCtrlPeerDelReq *req)
-{
- u8 j;
- CsrWifiRouterCtrlStaInfo_t *staInfo = NULL;
- unifi_port_config_t *controlledPort;
- unifi_port_config_t *unControlledPort;
- netInterface_priv_t *interfacePriv;
-
- u8 ba_session_idx = 0;
- ba_session_rx_struct *ba_session_rx = NULL;
- ba_session_tx_struct *ba_session_tx = NULL;
-
- /* create a list for sending confirms of un-delivered packets */
- struct list_head send_cfm_list;
-
- unsigned long lock_flags;
-
- if ((req->peerRecordHandle >= UNIFI_MAX_CONNECTIONS) || (req->interfaceTag >= CSR_WIFI_NUM_INTERFACES)) {
- unifi_error(priv, "handle/interfaceTag is not proper, handle = %d, interfaceTag = %d\n", req->peerRecordHandle, req->interfaceTag);
- return CSR_RESULT_FAILURE;
- }
-
- INIT_LIST_HEAD(&send_cfm_list);
-
- interfacePriv = priv->interfacePriv[req->interfaceTag];
- /* remove the station record & make it NULL */
- if ((staInfo=interfacePriv->staInfo[req->peerRecordHandle])!=NULL) {
-
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(staInfo->mgtFrames));
-
- uf_flush_list(priv,&(staInfo->mgtFrames));
- for(j=0;j<MAX_ACCESS_CATOGORY;j++){
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(staInfo->dataPdu[j]));
- uf_flush_list(priv,&(staInfo->dataPdu[j]));
- }
-
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- /* clear the port configure array info, for the corresponding peer entry */
- controlledPort = &interfacePriv->controlled_data_port;
- unControlledPort = &interfacePriv->uncontrolled_data_port;
-
- unifi_trace(priv, UDBG1, "peer_delete_record: Peer found handle = %d, port in use: cont(%d), unCont(%d)\n",
- req->peerRecordHandle, controlledPort->entries_in_use, unControlledPort->entries_in_use);
-
- memset(staInfo->peerControlledPort, 0, sizeof(unifi_port_cfg_t));
- staInfo->peerControlledPort->port_action = CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
- staInfo->peerControlledPort->in_use = FALSE;
- if (controlledPort->entries_in_use) {
- controlledPort->entries_in_use--;
- } else {
- unifi_warning(priv, "number of controlled port entries is zero, trying to decrement: debug\n");
- }
-
- memset(staInfo->peerUnControlledPort, 0, sizeof(unifi_port_cfg_t));
- staInfo->peerUnControlledPort->port_action = CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
- staInfo->peerUnControlledPort->in_use = FALSE;
- if (unControlledPort->entries_in_use) {
- unControlledPort->entries_in_use--;
- } else {
- unifi_warning(priv, "number of uncontrolled port entries is zero, trying to decrement: debug\n");
- }
-
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- /* update the TIM with zero */
- if (interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_IBSS &&
- staInfo->timSet == CSR_WIFI_TIM_SET) {
- unifi_trace(priv, UDBG3, "peer is deleted so TIM updated to 0, in firmware\n");
- update_tim(priv,staInfo->aid,0,req->interfaceTag, req->peerRecordHandle);
- }
-
-
- /* Stop BA session if it is active, for this peer address all BA sessions
- (per tID per role) are closed */
-
- down(&priv->ba_mutex);
- for(ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_RX; ba_session_idx++){
- ba_session_rx = priv->interfacePriv[req->interfaceTag]->ba_session_rx[ba_session_idx];
- if(ba_session_rx) {
- if(!memcmp(ba_session_rx->macAddress.a, staInfo->peerMacAddress.a, ETH_ALEN)){
- blockack_session_stop(priv,
- req->interfaceTag,
- CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_RECIPIENT,
- ba_session_rx->tID,
- ba_session_rx->macAddress);
- }
- }
- }
-
- for(ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_TX; ba_session_idx++){
- ba_session_tx = priv->interfacePriv[req->interfaceTag]->ba_session_tx[ba_session_idx];
- if(ba_session_tx) {
- if(!memcmp(ba_session_tx->macAddress.a, staInfo->peerMacAddress.a, ETH_ALEN)){
- blockack_session_stop(priv,
- req->interfaceTag,
- CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ORIGINATOR,
- ba_session_tx->tID,
- ba_session_tx->macAddress);
- }
- }
- }
-
- up(&priv->ba_mutex);
-
-#ifdef CSR_SUPPORT_SME
- unifi_trace(priv, UDBG1, "Canceling work queue for STA with AID: %d\n", staInfo->aid);
- cancel_work_sync(&staInfo->send_disconnected_ind_task);
-#endif
-
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
-#ifdef CSR_SUPPORT_SME
- interfacePriv->num_stations_joined--;
-
- staInfo->nullDataHostTag = INVALID_HOST_TAG;
-
- if ((interfacePriv->sta_activity_check_enabled) &&
- (interfacePriv->num_stations_joined < STA_INACTIVE_DETECTION_TRIGGER_THRESHOLD))
- {
- unifi_trace(priv, UDBG1, "STOPPING the Inactivity Timer (num of stations = %d)\n", interfacePriv->num_stations_joined);
- interfacePriv->sta_activity_check_enabled = FALSE;
- del_timer_sync(&interfacePriv->sta_activity_check_timer);
- }
-#endif
-
- /* Free the station record for corresponding peer */
- kfree(interfacePriv->staInfo[req->peerRecordHandle]);
- interfacePriv->staInfo[req->peerRecordHandle] = NULL;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
-
- /* after the critical region process the list of frames that requested cfm
- and send cfm to requestor one by one */
- send_auto_ma_packet_confirm(priv, interfacePriv, &send_cfm_list);
-
-
- }
- else
- {
- unifi_trace(priv, UDBG3, " peer not found: Delete request Peer handle[%d]\n", req->peerRecordHandle);
- }
-
- return CSR_RESULT_SUCCESS;
-}
-
-void CsrWifiRouterCtrlPeerDelReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- CsrWifiRouterCtrlPeerDelReq* req = (CsrWifiRouterCtrlPeerDelReq*)msg;
- CsrResult status = CSR_RESULT_SUCCESS;
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- netInterface_priv_t *interfacePriv;
-
- unifi_trace(priv, UDBG2, "entering CsrWifiRouterCtrlPeerDelReqHandler\n");
- if (priv == NULL)
- {
- unifi_error(priv, "CsrWifiRouterCtrlPeerDelReqHandler: invalid smepriv\n");
- return;
- }
-
- if (req->interfaceTag >= CSR_WIFI_NUM_INTERFACES)
- {
- unifi_error(priv, "CsrWifiRouterCtrlPeerDelReqHandler: bad interfaceTag\n");
- return;
- }
-
- interfacePriv = priv->interfacePriv[req->interfaceTag];
-
- switch(interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- /* remove the station from station record data base */
- status = peer_delete_record(priv, req);
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- default:
- /* No station record to maintain in these modes */
- break;
- }
-
- CsrWifiRouterCtrlPeerDelCfmSend(msg->source,req->clientData,req->interfaceTag,status);
- unifi_trace(priv, UDBG2, "leaving CsrWifiRouterCtrlPeerDelReqHandler \n");
-}
-
-/* Add the new station to the station record data base */
-static int peer_add_new_record(unifi_priv_t *priv,CsrWifiRouterCtrlPeerAddReq *req,u32 *handle)
-{
- u8 i, powerModeTemp = 0;
- u8 freeSlotFound = FALSE;
- CsrWifiRouterCtrlStaInfo_t *newRecord = NULL;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
- u32 currentTime, currentTimeHi;
- unsigned long lock_flags;
-
- if (req->interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "peer_add_new_record: bad interfaceTag\n");
- return CSR_RESULT_FAILURE;
- }
-
- currentTime = CsrTimeGet(&currentTimeHi);
-
- for(i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- if(interfacePriv->staInfo[i] == NULL) {
- /* Slot is empty, so can be used for station record */
- freeSlotFound = TRUE;
- *handle = i;
-
- /* Allocate for the new station record , to avoid race condition would happen between ADD_PEER &
- * DEL_PEER the allocation made atomic memory rather than kernel memory
- */
- newRecord = kmalloc(sizeof(CsrWifiRouterCtrlStaInfo_t), GFP_ATOMIC);
- if (!newRecord) {
- unifi_error(priv, "failed to allocate the %d bytes of mem for station record\n",
- sizeof(CsrWifiRouterCtrlStaInfo_t));
- return CSR_RESULT_FAILURE;
- }
-
- unifi_trace(priv, UDBG1, "peer_add_new_record: handle = %d AID = %d addr = %x:%x:%x:%x:%x:%x LI=%u\n",
- *handle, req->associationId, req->peerMacAddress.a[0], req->peerMacAddress.a[1], req->peerMacAddress.a[2],
- req->peerMacAddress.a[3], req->peerMacAddress.a[4], req->peerMacAddress.a[5],
- req->staInfo.listenIntervalInTus);
-
- /* disable the preemption until station record updated */
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
-
- interfacePriv->staInfo[i] = newRecord;
- /* Initialize the record*/
- memset(newRecord,0,sizeof(CsrWifiRouterCtrlStaInfo_t));
- /* update the station record */
- memcpy(newRecord->peerMacAddress.a, req->peerMacAddress.a, ETH_ALEN);
- newRecord->wmmOrQosEnabled = req->staInfo.wmmOrQosEnabled;
-
- /* maxSpLength is bit map in qosInfo field, so converting accordingly */
- newRecord->maxSpLength = req->staInfo.maxSpLength * 2;
-
- /*Max SP 0 mean any number of packets. since we buffer only 512
- packets we are hard coding this to zero for the moment */
-
- if(newRecord->maxSpLength == 0)
- newRecord->maxSpLength=512;
-
- newRecord->assignedHandle = i;
-
- /* copy power save mode of all access catagory (Trigger/Delivery/both enabled/disabled) */
- powerModeTemp = (u8) ((req->staInfo.powersaveMode >> 4) & 0xff);
-
- if(!(req->staInfo.powersaveMode & 0x0001))
- newRecord->powersaveMode[UNIFI_TRAFFIC_Q_BK]= CSR_WIFI_AC_LEGACY_POWER_SAVE;
- else
- newRecord->powersaveMode[UNIFI_TRAFFIC_Q_BK]= powerModeTemp & 0x03;
-
- if(!(req->staInfo.powersaveMode & 0x0002))
- newRecord->powersaveMode[UNIFI_TRAFFIC_Q_BE]= CSR_WIFI_AC_LEGACY_POWER_SAVE;
- else
- newRecord->powersaveMode[UNIFI_TRAFFIC_Q_BE]= ((powerModeTemp & 0x0C)>> 2);
-
- if(!(req->staInfo.powersaveMode & 0x0004))
- newRecord->powersaveMode[UNIFI_TRAFFIC_Q_VI]= CSR_WIFI_AC_LEGACY_POWER_SAVE;
- else
- newRecord->powersaveMode[UNIFI_TRAFFIC_Q_VI]= ((powerModeTemp & 0x30)>> 4);
-
- if(!(req->staInfo.powersaveMode & 0x0008))
- newRecord->powersaveMode[UNIFI_TRAFFIC_Q_VO]= CSR_WIFI_AC_LEGACY_POWER_SAVE;
- else
- newRecord->powersaveMode[UNIFI_TRAFFIC_Q_VO]= ((powerModeTemp & 0xC0)>> 6);
-
- {
- u8 k;
- for(k=0; k< MAX_ACCESS_CATOGORY ;k++)
- unifi_trace(priv, UDBG2, "peer_add_new_record: WMM : %d ,AC %d, powersaveMode %x \n",
- req->staInfo.wmmOrQosEnabled,k,newRecord->powersaveMode[k]);
- }
-
- unifi_trace(priv, UDBG3, "newRecord->wmmOrQosEnabled : %d , MAX SP : %d\n",
- newRecord->wmmOrQosEnabled,newRecord->maxSpLength);
-
- /* Initialize the mgtFrames & data Pdu list */
- {
- u8 j;
- INIT_LIST_HEAD(&newRecord->mgtFrames);
- for(j = 0; j < MAX_ACCESS_CATOGORY; j++) {
- INIT_LIST_HEAD(&newRecord->dataPdu[j]);
- }
- }
-
- newRecord->lastActivity = currentTime;
- newRecord->activity_flag = TRUE;
-
- /* enable the preemption as station record updated */
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
-
- /* First time port actions are set for the peer with below information */
- configure_data_port(priv, CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN, &newRecord->peerMacAddress,
- UF_UNCONTROLLED_PORT_Q, req->interfaceTag);
-
- if (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_IBSS) {
- configure_data_port(priv, CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN, &newRecord->peerMacAddress,
- UF_CONTROLLED_PORT_Q, req->interfaceTag);
- } else {
- configure_data_port(priv, CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD, &newRecord->peerMacAddress,
- UF_CONTROLLED_PORT_Q, req->interfaceTag);
- }
-
-
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- /* Port status must be already set before calling the Add Peer request */
- newRecord->peerControlledPort = uf_sme_port_config_handle(priv, newRecord->peerMacAddress.a,
- UF_CONTROLLED_PORT_Q, req->interfaceTag);
- newRecord->peerUnControlledPort = uf_sme_port_config_handle(priv, newRecord->peerMacAddress.a,
- UF_UNCONTROLLED_PORT_Q, req->interfaceTag);
-
- if (!newRecord->peerControlledPort || !newRecord->peerUnControlledPort) {
- /* enable the preemption as station record failed to update */
- unifi_warning(priv, "Un/ControlledPort record not found in port configuration array index = %d\n", i);
- kfree(interfacePriv->staInfo[i]);
- interfacePriv->staInfo[i] = NULL;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- return CSR_RESULT_FAILURE;
- }
-
- newRecord->currentPeerState = CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE;
-
- /* changes done during block ack handling */
- newRecord->txSuspend = FALSE;
-
- /*U-APSD related data structure*/
- newRecord->timRequestPendingFlag = FALSE;
-
- /* Initialise the variable updateTimReqQueued with a value
- * other then CSR_WIFI_TIM_SET or CSR_WIFI_TIM_RESET value
- */
- newRecord->updateTimReqQueued = 0xFF;
- newRecord->timSet = CSR_WIFI_TIM_RESET;
- newRecord->uapsdActive = FALSE;
- newRecord->noOfSpFramesSent =0;
- newRecord->triggerFramePriority = CSR_QOS_UP0;
-
- /* The protection bit is updated once the port opens for corresponding peer in
- * routerPortConfigure request */
-
- /* update the association ID */
- newRecord->aid = req->associationId;
-
-#ifdef CSR_SUPPORT_SME
- interfacePriv->num_stations_joined++;
- newRecord->interfacePriv = interfacePriv;
- newRecord->listenIntervalInTus = req->staInfo.listenIntervalInTus;
- newRecord->nullDataHostTag = INVALID_HOST_TAG;
-
- INIT_WORK(&newRecord->send_disconnected_ind_task, uf_send_disconnected_ind_wq);
-
- if(!(interfacePriv->sta_activity_check_enabled) &&
- (interfacePriv->num_stations_joined >= STA_INACTIVE_DETECTION_TRIGGER_THRESHOLD)){
- unifi_trace(priv, UDBG1,
- "peer_add_new_record: STARTING the Inactivity Timer (num of stations = %d)",
- interfacePriv->num_stations_joined);
-
- interfacePriv->sta_activity_check_enabled = TRUE;
- interfacePriv->sta_activity_check_timer.function = check_inactivity_timer_expire_func;
- interfacePriv->sta_activity_check_timer.data = (unsigned long)interfacePriv;
-
- init_timer(&interfacePriv->sta_activity_check_timer);
- mod_timer(&interfacePriv->sta_activity_check_timer,
- (jiffies + usecs_to_jiffies(STA_INACTIVE_DETECTION_TIMER_INTERVAL * 1000 * 1000)));
-
- }
-#endif
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- break;
- }
- }
-
- if(!freeSlotFound) {
- unifi_error(priv, "Limited connectivity, Free slot not found for station record addition\n");
- return CSR_RESULT_FAILURE;
- }
- return CSR_RESULT_SUCCESS;
-}
-
-#ifdef CSR_SUPPORT_SME
-static void check_inactivity_timer_expire_func(unsigned long data)
-{
- struct unifi_priv *priv;
- CsrWifiRouterCtrlStaInfo_t *sta_record = NULL;
- u8 i = 0;
- u32 now;
- u32 inactive_time;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *) data;
-
- if (!interfacePriv)
- {
- return;
- }
-
- priv = interfacePriv->privPtr;
-
- if (interfacePriv->InterfaceTag >= CSR_WIFI_NUM_INTERFACES)
- {
- unifi_error(priv, "check_inactivity_timer_expire_func: Invalid interfaceTag\n");
- return;
- }
-
- /* RUN Algorithm to check inactivity for each connected station */
- now = CsrTimeGet(NULL);
-
- for(i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- if(interfacePriv->staInfo[i] != NULL) {
- sta_record = interfacePriv->staInfo[i];
-
- if (sta_record->activity_flag == TRUE){
- sta_record->activity_flag = FALSE;
- sta_record->lastActivity = now;
- continue;
- }
-
- if (sta_record->lastActivity > now)
- {
- /* simple timer wrap (for 1 wrap) */
- inactive_time = CsrTimeAdd((u32)CsrTimeSub(CSR_SCHED_TIME_MAX, sta_record->lastActivity), now);
- }
- else
- {
- inactive_time = (u32)CsrTimeSub(now, sta_record->lastActivity);
- }
-
- if (inactive_time >= STA_INACTIVE_TIMEOUT_VAL)
- {
- unifi_trace(priv, UDBG1, "STA is Inactive - AID = %d inactive_time = %d\n",
- sta_record->aid,
- inactive_time);
-
- /* station is in-active, if it is in active mode send a null frame
- * and the station should acknowledge the null frame, if acknowledgement
- * is not received throw out the station.
- * If the station is in Power Save, update TIM for the station so
- * that it wakes up and register some activity through PS-Poll or
- * trigger frame.
- */
- if (sta_record->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE)
- {
- unifi_trace(priv, UDBG1, "STA power save state - Active, send a NULL frame to check if it is ALIVE\n");
- uf_send_nulldata ( priv,
- sta_record->interfacePriv->InterfaceTag,
- sta_record->peerMacAddress.a,
- CSR_CONTENTION,
- sta_record);
- }
- else if (sta_record->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE)
- {
- if((sta_record->timSet == CSR_WIFI_TIM_SET) ||
- (sta_record->timSet == CSR_WIFI_TIM_SETTING))
- {
- unifi_trace(priv, UDBG1, "STA power save state - PS, TIM is already SET\n");
-
- /* If TIM is set and we do not have any activity for
- * more than 3 listen intervals then send a disconnected
- * indication to SME, to delete the station from station
- * record list.
- * The inactivity is already more than STA_INACTIVE_TIMEOUT_VAL
- * and this check ensures if the listen interval is a larger
- * value than STA_INACTIVE_TIMEOUT_VAL.
- */
- if (inactive_time > (3 * (sta_record->listenIntervalInTus * 1024)))
- {
- unifi_trace(priv, UDBG1, "STA is inactive for more than 3 listen intervals\n");
- queue_work( priv->unifi_workqueue,
- &sta_record->send_disconnected_ind_task);
- }
-
- }
- else
- {
- unifi_trace(priv, UDBG1, "STA power save state - PS, update TIM to see if it is ALIVE\n");
- update_tim(priv,
- sta_record->aid,
- CSR_WIFI_TIM_SET,
- interfacePriv->InterfaceTag,
- sta_record->assignedHandle);
- }
- }
- }
- }
- }
-
- /* re-run the timer interrupt */
- mod_timer(&interfacePriv->sta_activity_check_timer,
- (jiffies + usecs_to_jiffies(STA_INACTIVE_DETECTION_TIMER_INTERVAL * 1000 * 1000)));
-
-}
-
-
-void uf_send_disconnected_ind_wq(struct work_struct *work)
-{
-
- CsrWifiRouterCtrlStaInfo_t *staInfo = container_of(work, CsrWifiRouterCtrlStaInfo_t, send_disconnected_ind_task);
- unifi_priv_t *priv;
- u16 interfaceTag;
- struct list_head send_cfm_list;
- u8 j;
-
- if(!staInfo) {
- return;
- }
-
- if(!staInfo->interfacePriv) {
- return;
- }
-
- priv = staInfo->interfacePriv->privPtr;
- interfaceTag = staInfo->interfacePriv->InterfaceTag;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "uf_send_disconnected_ind_wq: invalid interfaceTag\n");
- return;
- }
-
- /* The SME/NME may be waiting for confirmation for requested frames to this station.
- * So loop through buffered frames for this station and if confirmation is
- * requested, send auto confirmation with failure status. Also flush the frames so
- * that these are not processed again in PEER_DEL_REQ handler.
- */
- INIT_LIST_HEAD(&send_cfm_list);
-
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(staInfo->mgtFrames));
-
- uf_flush_list(priv, &(staInfo->mgtFrames));
-
- for(j = 0; j < MAX_ACCESS_CATOGORY; j++){
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(staInfo->dataPdu[j]));
-
- uf_flush_list(priv,&(staInfo->dataPdu[j]));
- }
-
- send_auto_ma_packet_confirm(priv, staInfo->interfacePriv, &send_cfm_list);
-
- unifi_warning(priv, "uf_send_disconnected_ind_wq: Router Disconnected IND Peer (%x-%x-%x-%x-%x-%x)\n",
- staInfo->peerMacAddress.a[0],
- staInfo->peerMacAddress.a[1],
- staInfo->peerMacAddress.a[2],
- staInfo->peerMacAddress.a[3],
- staInfo->peerMacAddress.a[4],
- staInfo->peerMacAddress.a[5]);
-
- CsrWifiRouterCtrlConnectedIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,
- 0,
- staInfo->interfacePriv->InterfaceTag,
- staInfo->peerMacAddress,
- CSR_WIFI_ROUTER_CTRL_PEER_DISCONNECTED);
-
-
- return;
-}
-
-
-#endif
-void CsrWifiRouterCtrlPeerAddReqHandler(void* drvpriv,CsrWifiFsmEvent* msg)
-{
- CsrWifiRouterCtrlPeerAddReq* req = (CsrWifiRouterCtrlPeerAddReq*)msg;
- CsrResult status = CSR_RESULT_SUCCESS;
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- u32 handle = 0;
- netInterface_priv_t *interfacePriv;
-
- unifi_trace(priv, UDBG2, "entering CsrWifiRouterCtrlPeerAddReqHandler \n");
- if (priv == NULL)
- {
- unifi_error(priv, "CsrWifiRouterCtrlPeerAddReqHandler: invalid smepriv\n");
- return;
- }
-
- if (req->interfaceTag >= CSR_WIFI_NUM_INTERFACES)
- {
- unifi_error(priv, "CsrWifiRouterCtrlPeerAddReqHandler: bad interfaceTag\n");
- return;
- }
-
- interfacePriv = priv->interfacePriv[req->interfaceTag];
-
- switch(interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- /* Add station record */
- status = peer_add_new_record(priv,req,&handle);
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- default:
- /* No station record to maintain in these modes */
- break;
- }
-
- CsrWifiRouterCtrlPeerAddCfmSend(msg->source,req->clientData,req->interfaceTag,req->peerMacAddress,handle,status);
- unifi_trace(priv, UDBG2, "leaving CsrWifiRouterCtrlPeerAddReqHandler \n");
-}
-
-void CsrWifiRouterCtrlPeerUpdateReqHandler(void* drvpriv,CsrWifiFsmEvent* msg)
-{
- CsrWifiRouterCtrlPeerUpdateReq* req = (CsrWifiRouterCtrlPeerUpdateReq*)msg;
- CsrResult status = CSR_RESULT_SUCCESS;
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
-
- unifi_trace(priv, UDBG2, "entering CsrWifiRouterCtrlPeerUpdateReqHandler \n");
- if (priv == NULL)
- {
- unifi_error(priv, "CsrWifiRouterCtrlPeerUpdateReqHandler: invalid smepriv\n");
- return;
- }
-
- CsrWifiRouterCtrlPeerUpdateCfmSend(msg->source,req->clientData,req->interfaceTag,status);
- unifi_trace(priv, UDBG2, "leaving CsrWifiRouterCtrlPeerUpdateReqHandler \n");
-}
-
-
- void CsrWifiRouterCtrlRawSdioDeinitialiseReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- /* This will never be called as it is intercepted in the Userspace */
-}
-
-void CsrWifiRouterCtrlRawSdioInitialiseReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- /* This will never be called as it is intercepted in the Userspace */
-}
-
-void
-uf_send_ba_err_wq(struct work_struct *work)
-{
- ba_session_rx_struct *ba_session = container_of(work, ba_session_rx_struct, send_ba_err_task);
- unifi_priv_t *priv;
-
- if(!ba_session) {
- return;
- }
-
- if(!ba_session->interfacePriv) {
- return;
- }
-
- priv = ba_session->interfacePriv->privPtr;
-
- if (ba_session->interfacePriv->InterfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "%s: invalid interfaceTag\n", __FUNCTION__);
- return;
- }
-
- unifi_warning(priv, "%s: Calling CsrWifiRouterCtrlBlockAckErrorIndSend(%d, %d, %d, %d, %x:%x:%x:%x:%x:%x, %d)\n",
- __FUNCTION__,
- priv->CSR_WIFI_SME_IFACEQUEUE,
- 0,
- ba_session->interfacePriv->InterfaceTag,
- ba_session->tID,
- ba_session->macAddress.a[0],
- ba_session->macAddress.a[1],
- ba_session->macAddress.a[2],
- ba_session->macAddress.a[3],
- ba_session->macAddress.a[4],
- ba_session->macAddress.a[5],
- CSR_RESULT_SUCCESS
- );
- CsrWifiRouterCtrlBlockAckErrorIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,
- 0,
- ba_session->interfacePriv->InterfaceTag,
- ba_session->tID,
- ba_session->macAddress,
- CSR_RESULT_SUCCESS);
-}
-
-
-static void ba_session_terminate_timer_func(unsigned long data)
-{
- ba_session_rx_struct *ba_session = (ba_session_rx_struct*)data;
- struct unifi_priv *priv;
-
- if(!ba_session) {
- return;
- }
-
- if(!ba_session->interfacePriv) {
- return;
- }
-
- priv = ba_session->interfacePriv->privPtr;
-
- if (ba_session->interfacePriv->InterfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "%s: invalid interfaceTag\n", __FUNCTION__);
- return;
- }
-
- queue_work(priv->unifi_workqueue, &ba_session->send_ba_err_task);
-}
-
-
-u8 blockack_session_stop(unifi_priv_t *priv,
- u16 interfaceTag,
- CsrWifiRouterCtrlBlockAckRole role,
- u16 tID,
- CsrWifiMacAddress macAddress)
-{
- netInterface_priv_t *interfacePriv;
- ba_session_rx_struct *ba_session_rx = NULL;
- ba_session_tx_struct *ba_session_tx = NULL;
- u8 ba_session_idx = 0;
- int i;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "%s: bad interfaceTag = %d\n", __FUNCTION__, interfaceTag);
- return FALSE;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- if(!interfacePriv) {
- unifi_error(priv, "%s: bad interfacePriv\n", __FUNCTION__);
- return FALSE;
- }
-
- if(tID > 15) {
- unifi_error(priv, "%s: bad tID = %d\n", __FUNCTION__, tID);
- return FALSE;
- }
-
- if((role != CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ORIGINATOR) &&
- (role != CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_RECIPIENT)) {
- unifi_error(priv, "%s: bad role = %d\n", __FUNCTION__, role);
- return FALSE;
- }
-
- unifi_warning(priv,
- "%s: stopping ba_session for peer = %pM role = %d tID = %d\n",
- __func__, macAddress.a, role, tID);
-
- /* find out the appropriate ba session (/station /tid /role) for which stop is requested */
- if (role == CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_RECIPIENT){
- for (ba_session_idx =0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_RX; ba_session_idx++){
-
- ba_session_rx = interfacePriv->ba_session_rx[ba_session_idx];
-
- if(ba_session_rx){
- if ((!memcmp(ba_session_rx->macAddress.a, macAddress.a, ETH_ALEN)) && (ba_session_rx->tID == tID)){
- break;
- }
- }
- }
-
- if (!ba_session_rx || (ba_session_idx == MAX_SUPPORTED_BA_SESSIONS_RX)) {
- unifi_error(priv, "%s: bad ba_session for Rx [tID=%d]\n", __FUNCTION__, tID);
- return FALSE;
- }
-
-
- if(ba_session_rx->timeout) {
- del_timer_sync(&ba_session_rx->timer);
- }
- cancel_work_sync(&ba_session_rx->send_ba_err_task);
- for (i = 0; i < ba_session_rx->wind_size; i++) {
- if(ba_session_rx->buffer[i].active) {
- frame_desc_struct *frame_desc = &ba_session_rx->buffer[i];
- unifi_net_data_free(priv, &frame_desc->bulkdata.d[0]);
- }
- }
- kfree(ba_session_rx->buffer);
-
- interfacePriv->ba_session_rx[ba_session_idx] = NULL;
- kfree(ba_session_rx);
- }else if (role == CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ORIGINATOR){
- for (ba_session_idx =0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_TX; ba_session_idx++){
- ba_session_tx = interfacePriv->ba_session_tx[ba_session_idx];
- if(ba_session_tx){
- if ((!memcmp(ba_session_tx->macAddress.a, macAddress.a, ETH_ALEN)) && (ba_session_tx->tID == tID)){
- break;
- }
- }
- }
-
- if (!ba_session_tx || (ba_session_idx == MAX_SUPPORTED_BA_SESSIONS_TX)) {
- unifi_error(priv, "%s: bad ba_session for Tx [tID=%d]\n", __FUNCTION__, tID);
- return FALSE;
- }
- interfacePriv->ba_session_tx[ba_session_idx] = NULL;
- kfree(ba_session_tx);
-
- }
-
- return TRUE;
-}
-
-
-void CsrWifiRouterCtrlBlockAckDisableReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- CsrWifiRouterCtrlBlockAckDisableReq* req = (CsrWifiRouterCtrlBlockAckDisableReq*)msg;
- u8 r;
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
-
- unifi_trace(priv, UDBG6, "%s: in ok\n", __FUNCTION__);
-
- down(&priv->ba_mutex);
- r = blockack_session_stop(priv,
- req->interfaceTag,
- req->role,
- req->trafficStreamID,
- req->macAddress);
- up(&priv->ba_mutex);
-
- CsrWifiRouterCtrlBlockAckDisableCfmSend(msg->source,
- req->clientData,
- req->interfaceTag,
- r?CSR_RESULT_SUCCESS:CSR_RESULT_FAILURE);
-
- unifi_trace(priv, UDBG6, "%s: out ok\n", __FUNCTION__);
-}
-
-
-u8 blockack_session_start(unifi_priv_t *priv,
- u16 interfaceTag,
- u16 tID,
- u16 timeout,
- CsrWifiRouterCtrlBlockAckRole role,
- u16 wind_size,
- u16 start_sn,
- CsrWifiMacAddress macAddress
- )
-{
- netInterface_priv_t *interfacePriv;
- ba_session_rx_struct *ba_session_rx = NULL;
- ba_session_tx_struct *ba_session_tx = NULL;
- u8 ba_session_idx = 0;
-
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "%s: bad interfaceTag = %d\n", __FUNCTION__, interfaceTag);
- return FALSE;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- if(!interfacePriv) {
- unifi_error(priv, "%s: bad interfacePriv\n", __FUNCTION__);
- return FALSE;
- }
-
- if(tID > 15)
- {
- unifi_error(priv, "%s: bad tID=%d\n", __FUNCTION__, tID);
- return FALSE;
- }
-
- if(wind_size > MAX_BA_WIND_SIZE) {
- unifi_error(priv, "%s: bad wind_size = %d\n", __FUNCTION__, wind_size);
- return FALSE;
- }
-
- if(role != CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ORIGINATOR &&
- role != CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_RECIPIENT) {
- unifi_error(priv, "%s: bad role = %d\n", __FUNCTION__, role);
- return FALSE;
- }
-
- unifi_warning(priv,
- "%s: ba session with peer= (%pM)\n", __func__,
- macAddress.a);
-
- unifi_warning(priv, "%s: ba session for tID=%d timeout=%d role=%d wind_size=%d start_sn=%d\n", __FUNCTION__,
- tID,
- timeout,
- role,
- wind_size,
- start_sn);
-
- /* Check if BA session exists for per station, per TID, per role or not.
- if BA session exists update parameters and if it does not exist
- create a new BA session */
- if (role == CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ORIGINATOR){
- for (ba_session_idx =0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_TX; ba_session_idx++){
- ba_session_tx = interfacePriv->ba_session_tx[ba_session_idx];
- if (ba_session_tx) {
- if ((!memcmp(ba_session_tx->macAddress.a, macAddress.a, ETH_ALEN)) && (ba_session_tx->tID == tID)){
- unifi_warning(priv, "%s: ba_session for Tx already exists\n", __FUNCTION__);
- return TRUE;
- }
- }
- }
-
- /* we have to create new ba_session_tx struct */
- ba_session_tx = NULL;
-
- /* loop through until an empty BA session slot is there and save the session there */
- for (ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_TX ; ba_session_idx++){
- if (!(interfacePriv->ba_session_tx[ba_session_idx])){
- break;
- }
- }
- if (ba_session_idx == MAX_SUPPORTED_BA_SESSIONS_TX){
- unifi_error(priv, "%s: All ba_session used for Tx, NO free session available\n", __FUNCTION__);
- return FALSE;
- }
-
- /* create and populate the new BA session structure */
- ba_session_tx = kzalloc(sizeof(ba_session_tx_struct), GFP_KERNEL);
- if (!ba_session_tx) {
- unifi_error(priv, "%s: kmalloc failed for ba_session_tx\n", __FUNCTION__);
- return FALSE;
- }
-
- ba_session_tx->interfacePriv = interfacePriv;
- ba_session_tx->tID = tID;
- ba_session_tx->macAddress = macAddress;
-
- interfacePriv->ba_session_tx[ba_session_idx] = ba_session_tx;
-
- } else if (role == CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_RECIPIENT){
-
- for (ba_session_idx =0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_RX; ba_session_idx++){
- ba_session_rx = interfacePriv->ba_session_rx[ba_session_idx];
- if (ba_session_rx) {
- if ((!memcmp(ba_session_rx->macAddress.a, macAddress.a, ETH_ALEN)) && (ba_session_rx->tID == tID)){
- unifi_warning(priv, "%s: ba_session for Rx[tID = %d] already exists\n", __FUNCTION__, tID);
-
- if(ba_session_rx->wind_size == wind_size &&
- ba_session_rx->timeout == timeout &&
- ba_session_rx->expected_sn == start_sn) {
- return TRUE;
- }
-
- if(ba_session_rx->timeout) {
- del_timer_sync(&ba_session_rx->timer);
- ba_session_rx->timeout = 0;
- }
-
- if(ba_session_rx->wind_size != wind_size) {
- blockack_session_stop(priv, interfaceTag, role, tID, macAddress);
- } else {
- if (timeout) {
- ba_session_rx->timeout = timeout;
- ba_session_rx->timer.function = ba_session_terminate_timer_func;
- ba_session_rx->timer.data = (unsigned long)ba_session_rx;
- init_timer(&ba_session_rx->timer);
- mod_timer(&ba_session_rx->timer, (jiffies + usecs_to_jiffies((ba_session_rx->timeout) * 1024)));
- }
- /*
- * The starting sequence number shall remain same if the BA
- * enable request is issued to update BA parameters only. If
- * it is not same, then we scroll our window to the new starting
- * sequence number. This could happen if the DELBA frame from
- * originator is lost and then we receive ADDBA frame with new SSN.
- */
- if(ba_session_rx->start_sn != start_sn) {
- scroll_ba_window(priv, interfacePriv, ba_session_rx, start_sn);
- }
- return TRUE;
- }
- }
- }
- }
-
- /* we could have a valid BA session pointer here or un-initialized
- ba session pointer. but in any case we have to create a new session.
- so re-initialize the ba_session pointer */
- ba_session_rx = NULL;
-
- /* loop through until an empty BA session slot is there and save the session there */
- for (ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_RX ; ba_session_idx++){
- if (!(interfacePriv->ba_session_rx[ba_session_idx])){
- break;
- }
- }
- if (ba_session_idx == MAX_SUPPORTED_BA_SESSIONS_RX){
- unifi_error(priv, "%s: All ba_session used for Rx, NO free session available\n", __FUNCTION__);
- return FALSE;
- }
-
- /* It is observed that with some devices there is a race between
- * EAPOL exchanges and BA session establishment. This results in
- * some EAPOL authentication packets getting stuck in BA reorder
- * buffer and hence the conection cannot be established. To avoid
- * this we check here if the EAPOL authentication is complete and
- * if so then only allow the BA session to establish.
- *
- * It is verified that the peers normally re-establish
- * the BA session after the initial rejection.
- */
- if (CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN != uf_sme_port_state(priv, macAddress.a, UF_CONTROLLED_PORT_Q, interfacePriv->InterfaceTag))
- {
- unifi_warning(priv, "blockack_session_start: Controlled port not opened, Reject BA request\n");
- return FALSE;
- }
-
- ba_session_rx = kzalloc(sizeof(ba_session_rx_struct), GFP_KERNEL);
- if (!ba_session_rx) {
- unifi_error(priv, "%s: kmalloc failed for ba_session_rx\n", __FUNCTION__);
- return FALSE;
- }
-
- ba_session_rx->wind_size = wind_size;
- ba_session_rx->start_sn = ba_session_rx->expected_sn = start_sn;
- ba_session_rx->trigger_ba_after_ssn = FALSE;
-
- ba_session_rx->buffer = kzalloc(ba_session_rx->wind_size*sizeof(frame_desc_struct), GFP_KERNEL);
- if (!ba_session_rx->buffer) {
- kfree(ba_session_rx);
- unifi_error(priv, "%s: kmalloc failed for buffer\n", __FUNCTION__);
- return FALSE;
- }
-
- INIT_WORK(&ba_session_rx->send_ba_err_task, uf_send_ba_err_wq);
- if (timeout) {
- ba_session_rx->timeout = timeout;
- ba_session_rx->timer.function = ba_session_terminate_timer_func;
- ba_session_rx->timer.data = (unsigned long)ba_session_rx;
- init_timer(&ba_session_rx->timer);
- mod_timer(&ba_session_rx->timer, (jiffies + usecs_to_jiffies((ba_session_rx->timeout) * 1024)));
- }
-
- ba_session_rx->interfacePriv = interfacePriv;
- ba_session_rx->tID = tID;
- ba_session_rx->macAddress = macAddress;
-
- interfacePriv->ba_session_rx[ba_session_idx] = ba_session_rx;
- }
- return TRUE;
-}
-
-void CsrWifiRouterCtrlBlockAckEnableReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
- CsrWifiRouterCtrlBlockAckEnableReq* req = (CsrWifiRouterCtrlBlockAckEnableReq*)msg;
- u8 r;
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
-
- unifi_trace(priv, UDBG6, ">>%s\n", __FUNCTION__);
- down(&priv->ba_mutex);
- r = blockack_session_start(priv,
- req->interfaceTag,
- req->trafficStreamID,
- req->timeout,
- req->role,
- req->bufferSize,
- req->ssn,
- req->macAddress
- );
- up(&priv->ba_mutex);
-
- CsrWifiRouterCtrlBlockAckEnableCfmSend(msg->source,
- req->clientData,
- req->interfaceTag,
- r?CSR_RESULT_SUCCESS:CSR_RESULT_FAILURE);
- unifi_trace(priv, UDBG6, "<<%s: r=%d\n", __FUNCTION__, r);
-
-}
-
-void CsrWifiRouterCtrlWapiMulticastFilterReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
-
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlWapiMulticastFilterReq* req = (CsrWifiRouterCtrlWapiMulticastFilterReq*)msg;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
-
- if (CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode) {
-
- unifi_trace(priv, UDBG6, ">>%s\n", __FUNCTION__);
-
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWapiMulticastFilterReq: req->status = %d\n", req->status);
-
- /* status 1 - Filter on
- * status 0 - Filter off */
- priv->wapi_multicast_filter = req->status;
-
- unifi_trace(priv, UDBG6, "<<%s\n", __FUNCTION__);
- } else {
-
- unifi_warning(priv, "%s is NOT applicable for interface mode - %d\n", __FUNCTION__,interfacePriv->interfaceMode);
-
- }
-#elif defined(UNIFI_DEBUG)
- /*WAPI Disabled*/
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- unifi_error(priv,"CsrWifiRouterCtrlWapiMulticastFilterReqHandler: called when WAPI isn't enabled\n");
-#endif
-}
-
-void CsrWifiRouterCtrlWapiUnicastFilterReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
-
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlWapiUnicastFilterReq* req = (CsrWifiRouterCtrlWapiUnicastFilterReq*)msg;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
-
- if (CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode) {
-
- unifi_trace(priv, UDBG6, ">>%s\n", __FUNCTION__);
-
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWapiUnicastFilterReq: req->status= %d\n", req->status);
-
- if ((priv->wapi_unicast_filter == 1) && (req->status == 0)) {
- /* When we have successfully re-associated and obtained a new unicast key with keyid = 0 */
- priv->wapi_unicast_queued_pkt_filter = 1;
- }
-
- /* status 1 - Filter ON
- * status 0 - Filter OFF */
- priv->wapi_unicast_filter = req->status;
-
- unifi_trace(priv, UDBG6, "<<%s\n", __FUNCTION__);
- } else {
-
- unifi_warning(priv, "%s is NOT applicable for interface mode - %d\n", __FUNCTION__,interfacePriv->interfaceMode);
-
- }
-#elif defined(UNIFI_DEBUG)
- /*WAPI Disabled*/
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- unifi_error(priv,"CsrWifiRouterCtrlWapiUnicastFilterReqHandler: called when WAPI isn't enabled\n");
-#endif
-}
-
-void CsrWifiRouterCtrlWapiRxPktReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
-
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlWapiRxPktReq* req = (CsrWifiRouterCtrlWapiRxPktReq*)msg;
- int client_id, receiver_id;
- bulk_data_param_t bulkdata;
- CsrResult res;
- ul_client_t *client;
- CSR_SIGNAL signal;
- CSR_MA_PACKET_INDICATION *pkt_ind;
- netInterface_priv_t *interfacePriv;
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiRxPktReq : invalid priv\n", __func__);
- return;
- }
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiRxPktReq : invalid sme priv\n", __func__);
- return;
- }
-
- interfacePriv = priv->interfacePriv[req->interfaceTag];
-
- if (CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode) {
-
- unifi_trace(priv, UDBG6, ">>%s\n", __FUNCTION__);
-
-
- if (req->dataLength == 0 || req->data == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiRxPktReq: invalid request\n",__FUNCTION__);
- return;
- }
-
- res = unifi_net_data_malloc(priv, &bulkdata.d[0], req->dataLength);
- if (res != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiRxPktReq: Could not allocate net data\n",__FUNCTION__);
- return;
- }
-
- /* This function is expected to be called only when the MIC has been verified by SME to be correct
- * So reset the reception status to rx_success */
- res = read_unpack_signal(req->signal, &signal);
- if (res) {
- unifi_error(priv,"CsrWifiRouterCtrlWapiRxPktReqHandler: Received unknown or corrupted signal.\n");
- return;
- }
- pkt_ind = (CSR_MA_PACKET_INDICATION*) (&((&signal)->u).MaPacketIndication);
- if (pkt_ind->ReceptionStatus != CSR_MICHAEL_MIC_ERROR) {
- unifi_error(priv,"CsrWifiRouterCtrlWapiRxPktReqHandler: Unknown signal with reception status = %d\n",pkt_ind->ReceptionStatus);
- return;
- } else {
- unifi_trace(priv, UDBG4,"CsrWifiRouterCtrlWapiRxPktReqHandler: MIC verified , RX_SUCCESS \n",__FUNCTION__);
- pkt_ind->ReceptionStatus = CSR_RX_SUCCESS;
- write_pack(&signal, req->signal, &(req->signalLength));
- }
-
- memcpy((void*)bulkdata.d[0].os_data_ptr, req->data, req->dataLength);
-
- receiver_id = CSR_GET_UINT16_FROM_LITTLE_ENDIAN((req->signal) + sizeof(s16)) & 0xFFF0;
- client_id = (receiver_id & 0x0F00) >> UDI_SENDER_ID_SHIFT;
-
- client = &priv->ul_clients[client_id];
-
- if (client && client->event_hook) {
- unifi_trace(priv, UDBG3,
- "CsrWifiRouterCtrlWapiRxPktReq: "
- "Sending signal to client %d, (s:0x%X, r:0x%X) - Signal 0x%X \n",
- client->client_id, client->sender_id, receiver_id,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(req->signal));
-
- client->event_hook(client, req->signal, req->signalLength, &bulkdata, UDI_TO_HOST);
- } else {
- unifi_trace(priv, UDBG4, "No client to give the packet to\n");
- unifi_net_data_free(priv, &bulkdata.d[0]);
- }
-
- unifi_trace(priv, UDBG6, "<<%s\n", __FUNCTION__);
- } else {
- unifi_warning(priv, "%s is NOT applicable for interface mode - %d\n", __FUNCTION__,interfacePriv->interfaceMode);
- }
-#elif defined(UNIFI_DEBUG)
- /*WAPI Disabled*/
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- unifi_error(priv,"CsrWifiRouterCtrlWapiRxPktReqHandler: called when WAPI isn't enabled\n");
-#endif
-}
-
-void CsrWifiRouterCtrlWapiUnicastTxPktReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
-
- unifi_priv_t *priv = (unifi_priv_t*) drvpriv;
- CsrWifiRouterCtrlWapiUnicastTxPktReq *req = (CsrWifiRouterCtrlWapiUnicastTxPktReq*) msg;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
- bulk_data_param_t bulkdata;
- u8 macHeaderLengthInBytes = MAC_HEADER_SIZE;
- /*KeyID, Reserved, PN, MIC*/
- u8 appendedCryptoFields = 1 + 1 + 16 + 16;
- CsrResult result;
- /* Retrieve the MA PACKET REQ fields from the Signal retained from send_ma_pkt_request() */
- CSR_MA_PACKET_REQUEST *storedSignalMAPktReq = &interfacePriv->wapi_unicast_ma_pkt_sig.u.MaPacketRequest;
-
- if (CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode) {
-
- unifi_trace(priv, UDBG6, ">>%s\n", __FUNCTION__);
-
- if (priv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiUnicastTxPktReqHandler : invalid priv\n",__FUNCTION__);
- return;
- }
- if (priv->smepriv == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiUnicastTxPktReqHandler : invalid sme priv\n",__FUNCTION__);
- return;
- }
- if (req->data == NULL) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiUnicastTxPktReqHandler: invalid request\n",__FUNCTION__);
- return;
- } else {
- /* If it is QoS data (type = data subtype = QoS), frame header contains QoS control field */
- if ((req->data[0] & 0x88) == 0x88) {
- macHeaderLengthInBytes = macHeaderLengthInBytes + QOS_CONTROL_HEADER_SIZE;
- }
- }
- if ( !(req->dataLength>(macHeaderLengthInBytes+appendedCryptoFields)) ) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiUnicastTxPktReqHandler: invalid dataLength\n",__FUNCTION__);
- return;
- }
-
- /* Encrypted DATA Packet contained in (req->data)
- * -------------------------------------------------------------------
- * |MAC Header| KeyId | Reserved | PN | xxDataxx | xxMICxxx |
- * -------------------------------------------------------------------
- * (<-----Encrypted----->)
- * -------------------------------------------------------------------
- * |24/26(QoS)| 1 | 1 | 16 | x | 16 |
- * -------------------------------------------------------------------
- */
- result = unifi_net_data_malloc(priv, &bulkdata.d[0], req->dataLength);
- if (result != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "CsrWifiRouterCtrlWapiUnicastTxPktReqHandler: Could not allocate net data\n",__FUNCTION__);
- return;
- }
- memcpy((void*)bulkdata.d[0].os_data_ptr, req->data, req->dataLength);
- bulkdata.d[0].data_length = req->dataLength;
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].data_length = 0;
-
- /* Send UniFi msg */
- /* Here hostTag is been sent as 0xffffffff, its been appended properly while framing MA-Packet request in pdu_processing.c file */
- result = uf_process_ma_packet_req(priv,
- storedSignalMAPktReq->Ra.x,
- storedSignalMAPktReq->HostTag,/* Ask for a new HostTag */
- req->interfaceTag,
- storedSignalMAPktReq->TransmissionControl,
- storedSignalMAPktReq->TransmitRate,
- storedSignalMAPktReq->Priority, /* Retained value */
- interfacePriv->wapi_unicast_ma_pkt_sig.SignalPrimitiveHeader.SenderProcessId, /*FIXME AP: VALIDATE ???*/
- &bulkdata);
-
- if (result == NETDEV_TX_OK) {
- (priv->netdev[req->interfaceTag])->trans_start = jiffies;
- /* Should really count tx stats in the UNITDATA.status signal but
- * that doesn't have the length.
- */
- interfacePriv->stats.tx_packets++;
-
- /* count only the packet payload */
- interfacePriv->stats.tx_bytes += req->dataLength - macHeaderLengthInBytes - appendedCryptoFields;
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWapiUnicastTxPktReqHandler: (Packet Sent), sent count = %x\n", interfacePriv->stats.tx_packets);
- } else {
- /* Failed to send: fh queue was full, and the skb was discarded*/
- unifi_trace(priv, UDBG1, "(HIP validation failure) Result = %d\n", result);
- unifi_net_data_free(priv, &bulkdata.d[0]);
-
- interfacePriv->stats.tx_dropped++;
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWapiUnicastTxPktReqHandler: (Packet Drop), dropped count = %x\n", interfacePriv->stats.tx_dropped);
- }
-
- unifi_trace(priv, UDBG6, "<<%s\n", __FUNCTION__);
-
- } else {
-
- unifi_warning(priv, "%s is NOT applicable for interface mode - %d\n", __FUNCTION__,interfacePriv->interfaceMode);
-
- }
-#elif defined(UNIFI_DEBUG)
- /*WAPI Disabled*/
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- unifi_error(priv,"CsrWifiRouterCtrlWapiUnicastTxPktReqHandler: called when WAPI SW ENCRYPTION isn't enabled\n");
-#endif
-}
-
-void CsrWifiRouterCtrlWapiFilterReqHandler(void* drvpriv, CsrWifiFsmEvent* msg)
-{
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
-
-#ifdef CSR_WIFI_SECURITY_WAPI_QOSCTRL_MIC_WORKAROUND
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- CsrWifiRouterCtrlWapiFilterReq* req = (CsrWifiRouterCtrlWapiFilterReq*)msg;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[req->interfaceTag];
-
- if (CSR_WIFI_ROUTER_CTRL_MODE_STA == interfacePriv->interfaceMode) {
-
- unifi_trace(priv, UDBG6, ">>%s\n", __FUNCTION__);
-
- unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlWapiFilterReq: req->isWapiConnected [0/1] = %d \n",req->isWapiConnected);
-
- priv->isWapiConnection = req->isWapiConnected;
-
- unifi_trace(priv, UDBG6, "<<%s\n", __FUNCTION__);
- } else {
-
- unifi_warning(priv, "%s is NOT applicable for interface mode - %d\n", __FUNCTION__,interfacePriv->interfaceMode);
-
- }
-#endif
-
-#elif defined(UNIFI_DEBUG)
- /*WAPI Disabled*/
- unifi_priv_t *priv = (unifi_priv_t*)drvpriv;
- unifi_error(priv,"CsrWifiRouterCtrlWapiFilterReq: called when WAPI isn't enabled\n");
-#endif
-}
diff --git a/drivers/staging/csr/sme_userspace.c b/drivers/staging/csr/sme_userspace.c
deleted file mode 100644
index abcb446..0000000
--- a/drivers/staging/csr/sme_userspace.c
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- *****************************************************************************
- *
- * FILE : sme_userspace.c
- *
- * PURPOSE : Support functions for userspace SME helper application.
- *
- *
- * Copyright (C) 2008-2011 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- *****************************************************************************
- */
-
-#include "unifi_priv.h"
-
-/*
- * Fix Me..... These need to be the correct values...
- * Dynamic from the user space.
- */
-CsrSchedQid CSR_WIFI_ROUTER_IFACEQUEUE = 0xFFFF;
-CsrSchedQid CSR_WIFI_SME_IFACEQUEUE = 0xFFFF;
-#ifdef CSR_SUPPORT_WEXT_AP
-CsrSchedQid CSR_WIFI_NME_IFACEQUEUE = 0xFFFF;
-#endif
-int
-uf_sme_init(unifi_priv_t *priv)
-{
- int i, j;
-
- CsrWifiRouterTransportInit(priv);
-
- priv->smepriv = priv;
-
- init_waitqueue_head(&priv->sme_request_wq);
-
- priv->filter_tclas_ies = NULL;
- memset(&priv->packet_filters, 0, sizeof(uf_cfg_bcast_packet_filter_t));
-
-#ifdef CSR_SUPPORT_WEXT
- priv->ignore_bssid_join = FALSE;
- priv->mib_data.length = 0;
-
- uf_sme_wext_set_defaults(priv);
-#endif /* CSR_SUPPORT_WEXT*/
-
- priv->sta_ip_address = 0xFFFFFFFF;
-
- priv->wifi_on_state = wifi_on_unspecified;
-
- sema_init(&priv->sme_sem, 1);
- memset(&priv->sme_reply, 0, sizeof(sme_reply_t));
-
- priv->ta_ind_work.in_use = 0;
- priv->ta_sample_ind_work.in_use = 0;
-
- priv->CSR_WIFI_SME_IFACEQUEUE = 0xFFFF;
-
- for (i = 0; i < MAX_MA_UNIDATA_IND_FILTERS; i++) {
- priv->sme_unidata_ind_filters[i].in_use = 0;
- }
-
- /* Create a work queue item for Traffic Analysis indications to SME */
- INIT_WORK(&priv->ta_ind_work.task, uf_ta_ind_wq);
- INIT_WORK(&priv->ta_sample_ind_work.task, uf_ta_sample_ind_wq);
-#ifdef CSR_SUPPORT_WEXT
- INIT_WORK(&priv->sme_config_task, uf_sme_config_wq);
-#endif
-
- for (i = 0; i < CSR_WIFI_NUM_INTERFACES; i++) {
- netInterface_priv_t *interfacePriv = priv->interfacePriv[i];
- interfacePriv->m4_sent = FALSE;
- interfacePriv->m4_bulk_data.net_buf_length = 0;
- interfacePriv->m4_bulk_data.data_length = 0;
- interfacePriv->m4_bulk_data.os_data_ptr = interfacePriv->m4_bulk_data.os_net_buf_ptr = NULL;
-
- memset(&interfacePriv->controlled_data_port, 0, sizeof(unifi_port_config_t));
- interfacePriv->controlled_data_port.entries_in_use = 1;
- interfacePriv->controlled_data_port.port_cfg[0].in_use = TRUE;
- interfacePriv->controlled_data_port.port_cfg[0].port_action = CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
- interfacePriv->controlled_data_port.overide_action = UF_DATA_PORT_OVERIDE;
-
- memset(&interfacePriv->uncontrolled_data_port, 0, sizeof(unifi_port_config_t));
- interfacePriv->uncontrolled_data_port.entries_in_use = 1;
- interfacePriv->uncontrolled_data_port.port_cfg[0].in_use = TRUE;
- interfacePriv->uncontrolled_data_port.port_cfg[0].port_action = CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
- interfacePriv->uncontrolled_data_port.overide_action = UF_DATA_PORT_OVERIDE;
-
- /* Mark the remainder of the port config table as unallocated */
- for(j = 1; j < UNIFI_MAX_CONNECTIONS; j++) {
- interfacePriv->controlled_data_port.port_cfg[j].in_use = FALSE;
- interfacePriv->controlled_data_port.port_cfg[j].port_action = CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
-
- interfacePriv->uncontrolled_data_port.port_cfg[j].in_use = FALSE;
- interfacePriv->uncontrolled_data_port.port_cfg[j].port_action = CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
- }
-
- /* intializing the lists */
- INIT_LIST_HEAD(&interfacePriv->genericMgtFrames);
- INIT_LIST_HEAD(&interfacePriv->genericMulticastOrBroadCastMgtFrames);
- INIT_LIST_HEAD(&interfacePriv->genericMulticastOrBroadCastFrames);
-
- for(j = 0; j < UNIFI_MAX_CONNECTIONS; j++) {
- interfacePriv->staInfo[j] = NULL;
- }
-
- interfacePriv->num_stations_joined = 0;
- interfacePriv->sta_activity_check_enabled = FALSE;
- }
-
-
- return 0;
-} /* uf_sme_init() */
-
-
-void
-uf_sme_deinit(unifi_priv_t *priv)
-{
- int i,j;
- u8 ba_session_idx;
- ba_session_rx_struct *ba_session_rx = NULL;
- ba_session_tx_struct *ba_session_tx = NULL;
- CsrWifiRouterCtrlStaInfo_t *staInfo = NULL;
- netInterface_priv_t *interfacePriv = NULL;
-
- /* Free any TCLASs previously allocated */
- if (priv->packet_filters.tclas_ies_length) {
- priv->packet_filters.tclas_ies_length = 0;
- kfree(priv->filter_tclas_ies);
- priv->filter_tclas_ies = NULL;
- }
-
- for (i = 0; i < MAX_MA_UNIDATA_IND_FILTERS; i++) {
- priv->sme_unidata_ind_filters[i].in_use = 0;
- }
-
- /* Remove all the Peer database, before going down */
- for (i = 0; i < CSR_WIFI_NUM_INTERFACES; i++) {
- down(&priv->ba_mutex);
- for(ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_RX; ba_session_idx++){
- ba_session_rx = priv->interfacePriv[i]->ba_session_rx[ba_session_idx];
- if(ba_session_rx) {
- blockack_session_stop(priv,
- i,
- CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_RECIPIENT,
- ba_session_rx->tID,
- ba_session_rx->macAddress);
- }
- }
- for(ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_TX; ba_session_idx++){
- ba_session_tx = priv->interfacePriv[i]->ba_session_tx[ba_session_idx];
- if(ba_session_tx) {
- blockack_session_stop(priv,
- i,
- CSR_WIFI_ROUTER_CTRL_BLOCK_ACK_ORIGINATOR,
- ba_session_tx->tID,
- ba_session_tx->macAddress);
- }
- }
-
- up(&priv->ba_mutex);
- interfacePriv = priv->interfacePriv[i];
- if(interfacePriv){
- for(j = 0; j < UNIFI_MAX_CONNECTIONS; j++) {
- if ((staInfo=interfacePriv->staInfo[j]) != NULL) {
- /* Clear the STA activity parameters before freeing station Record */
- unifi_trace(priv, UDBG1, "uf_sme_deinit: Canceling work queue for STA with AID: %d\n", staInfo->aid);
- cancel_work_sync(&staInfo->send_disconnected_ind_task);
- staInfo->nullDataHostTag = INVALID_HOST_TAG;
- }
- }
- if (interfacePriv->sta_activity_check_enabled){
- interfacePriv->sta_activity_check_enabled = FALSE;
- del_timer_sync(&interfacePriv->sta_activity_check_timer);
- }
- }
- CsrWifiRouterCtrlInterfaceReset(priv, i);
- priv->interfacePriv[i]->interfaceMode = CSR_WIFI_ROUTER_CTRL_MODE_NONE;
- }
-
-
-} /* uf_sme_deinit() */
-
-
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_ta_indicate_protocol
- *
- * Report that a packet of a particular type has been seen
- *
- * Arguments:
- * drv_priv The device context pointer passed to ta_init.
- * protocol The protocol type enum value.
- * direction Whether the packet was a tx or rx.
- * src_addr The source MAC address from the data packet.
- *
- * Returns:
- * None.
- *
- * Notes:
- * We defer the actual sending to a background workqueue,
- * see uf_ta_ind_wq().
- * ---------------------------------------------------------------------------
- */
-void
-unifi_ta_indicate_protocol(void *ospriv,
- CsrWifiRouterCtrlTrafficPacketType packet_type,
- CsrWifiRouterCtrlProtocolDirection direction,
- const CsrWifiMacAddress *src_addr)
-{
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
-
- if (priv->ta_ind_work.in_use) {
- unifi_warning(priv,
- "unifi_ta_indicate_protocol: workqueue item still in use, not sending\n");
- return;
- }
-
- if (CSR_WIFI_ROUTER_CTRL_PROTOCOL_DIRECTION_RX == direction)
- {
- u16 interfaceTag = 0;
- CsrWifiRouterCtrlTrafficProtocolIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,
- interfaceTag,
- packet_type,
- direction,
- *src_addr);
- }
- else
- {
- priv->ta_ind_work.packet_type = packet_type;
- priv->ta_ind_work.direction = direction;
- priv->ta_ind_work.src_addr = *src_addr;
-
- queue_work(priv->unifi_workqueue, &priv->ta_ind_work.task);
- }
-
-} /* unifi_ta_indicate_protocol() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_ta_indicate_sampling
- *
- * Send the TA sampling information to the SME.
- *
- * Arguments:
- * drv_priv The device context pointer passed to ta_init.
- * stats The TA sampling data to send.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-unifi_ta_indicate_sampling(void *ospriv, CsrWifiRouterCtrlTrafficStats *stats)
-{
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
-
- if (!priv) {
- return;
- }
-
- if (priv->ta_sample_ind_work.in_use) {
- unifi_warning(priv,
- "unifi_ta_indicate_sampling: workqueue item still in use, not sending\n");
- return;
- }
-
- priv->ta_sample_ind_work.stats = *stats;
-
- queue_work(priv->unifi_workqueue, &priv->ta_sample_ind_work.task);
-
-} /* unifi_ta_indicate_sampling() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_ta_indicate_l4stats
- *
- * Send the TA TCP/UDP throughput information to the driver.
- *
- * Arguments:
- * drv_priv The device context pointer passed to ta_init.
- * rxTcpThroughput TCP RX throughput in KiloBytes
- * txTcpThroughput TCP TX throughput in KiloBytes
- * rxUdpThroughput UDP RX throughput in KiloBytes
- * txUdpThroughput UDP TX throughput in KiloBytes
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-unifi_ta_indicate_l4stats(void *ospriv,
- u32 rxTcpThroughput,
- u32 txTcpThroughput,
- u32 rxUdpThroughput,
- u32 txUdpThroughput)
-{
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
-
- if (!priv) {
- return;
- }
- /* Save the info. The actual action will be taken in unifi_ta_indicate_sampling() */
- priv->rxTcpThroughput = rxTcpThroughput;
- priv->txTcpThroughput = txTcpThroughput;
- priv->rxUdpThroughput = rxUdpThroughput;
- priv->txUdpThroughput = txUdpThroughput;
-} /* unifi_ta_indicate_l4stats() */
diff --git a/drivers/staging/csr/sme_userspace.h b/drivers/staging/csr/sme_userspace.h
deleted file mode 100644
index ebe371c..0000000
--- a/drivers/staging/csr/sme_userspace.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * ***************************************************************************
- * FILE: sme_userspace.h
- *
- * PURPOSE: SME related definitions.
- *
- * Copyright (C) 2007-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ***************************************************************************
- */
-#ifndef __LINUX_SME_USERSPACE_H__
-#define __LINUX_SME_USERSPACE_H__ 1
-
-#include <linux/kernel.h>
-
-int uf_sme_init(unifi_priv_t *priv);
-void uf_sme_deinit(unifi_priv_t *priv);
-int uf_sme_queue_message(unifi_priv_t *priv, u8 *buffer, int length);
-
-
-#include "csr_wifi_router_lib.h"
-#include "csr_wifi_router_sef.h"
-#include "csr_wifi_router_ctrl_lib.h"
-#include "csr_wifi_router_ctrl_sef.h"
-#include "csr_wifi_sme_task.h"
-#ifdef CSR_SUPPORT_WEXT_AP
-#include "csr_wifi_nme_ap_lib.h"
-#endif
-#include "csr_wifi_sme_lib.h"
-
-void CsrWifiRouterTransportInit(unifi_priv_t *priv);
-void CsrWifiRouterTransportRecv(unifi_priv_t *priv, u8 *buffer, size_t bufferLength);
-void CsrWifiRouterTransportDeInit(unifi_priv_t *priv);
-
-#endif /* __LINUX_SME_USERSPACE_H__ */
diff --git a/drivers/staging/csr/sme_wext.c b/drivers/staging/csr/sme_wext.c
deleted file mode 100644
index 4129a643..0000000
--- a/drivers/staging/csr/sme_wext.c
+++ /dev/null
@@ -1,3327 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: sme_wext.c
- *
- * PURPOSE:
- * Handlers for ioctls from iwconfig.
- * These provide the control plane operations.
- *
- * Copyright (C) 2007-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <asm/uaccess.h>
-#include <linux/ctype.h>
-#include "unifi_priv.h"
-#include <linux/rtnetlink.h>
-
-#define CHECK_INITED(_priv) \
- do { \
- if (_priv->init_progress != UNIFI_INIT_COMPLETED) { \
- unifi_trace(_priv, UDBG2, "%s unifi not ready, failing wext call\n", __FUNCTION__); \
- return -ENODEV; \
- } \
- } while (0)
-
-/* Workaround for the wpa_supplicant hanging issue - disabled on Android */
-#ifndef ANDROID_BUILD
-#define CSR_WIFI_WEXT_HANG_WORKAROUND
-#endif
-
-#ifdef CSR_WIFI_WEXT_HANG_WORKAROUND
-# define UF_RTNL_LOCK() rtnl_lock()
-# define UF_RTNL_UNLOCK() rtnl_unlock()
-#else
-# define UF_RTNL_LOCK()
-# define UF_RTNL_UNLOCK()
-#endif
-
-
-/*
- * ---------------------------------------------------------------------------
- * Helper functions
- * ---------------------------------------------------------------------------
- */
-
-/*
- * ---------------------------------------------------------------------------
- * wext_freq_to_channel
- * channel_to_mhz
- *
- * These functions convert between channel number and frequency.
- *
- * Arguments:
- * ch Channel number, as defined in 802.11 specs
- * m, e Mantissa and exponent as provided by wireless extension.
- *
- * Returns:
- * channel or frequency (in MHz) value
- * ---------------------------------------------------------------------------
- */
-static int
-wext_freq_to_channel(int m, int e)
-{
- int mhz;
-
- mhz = m;
- while (e < 6) {
- mhz /= 10;
- e++;
- }
- while (e > 6) {
- mhz *= 10;
- e--;
- }
-
- if (mhz >= 5000) {
- return ((mhz - 5000) / 5);
- }
-
- if (mhz == 2482) {
- return 14;
- }
-
- if (mhz >= 2407) {
- return ((mhz - 2407) / 5);
- }
-
- return 0;
-} /* wext_freq_to_channel() */
-
-static int
-channel_to_mhz(int ch, int dot11a)
-{
-
- if (ch == 0) return 0;
- if (ch > 200) return 0;
-
- /* 5G */
- if (dot11a) {
- return (5000 + (5 * ch));
- }
-
- /* 2.4G */
- if (ch == 14) {
- return 2484;
- }
-
- if ((ch < 14) && (ch > 0)) {
- return (2407 + (5 * ch));
- }
-
- return 0;
-}
-#ifdef CSR_SUPPORT_WEXT_AP
-void uf_sme_wext_ap_set_defaults(unifi_priv_t *priv)
-{
- memcpy(priv->ap_config.ssid.ssid,"defaultssid",sizeof("defaultssid"));
-
- priv->ap_config.ssid.length = 8;
- priv->ap_config.channel = 6;
- priv->ap_config.if_index = 1;
- priv->ap_config.credentials.authType = 0;
- priv->ap_config.max_connections=8;
-
- priv->group_sec_config.apGroupkeyTimeout = 0;
- priv->group_sec_config.apStrictGtkRekey = 0;
- priv->group_sec_config.apGmkTimeout = 0;
- priv->group_sec_config.apResponseTimeout = 100; /* Default*/
- priv->group_sec_config.apRetransLimit = 3; /* Default*/
- /* Set default params even if they may not be used*/
- /* Until Here*/
-
- priv->ap_mac_config.preamble = CSR_WIFI_SME_USE_LONG_PREAMBLE;
- priv->ap_mac_config.shortSlotTimeEnabled = FALSE;
- priv->ap_mac_config.ctsProtectionType=CSR_WIFI_SME_CTS_PROTECTION_AUTOMATIC;
-
- priv->ap_mac_config.wmmEnabled = TRUE;
- priv->ap_mac_config.wmmApParams[0].cwMin=4;
- priv->ap_mac_config.wmmApParams[0].cwMax=10;
- priv->ap_mac_config.wmmApParams[0].aifs=3;
- priv->ap_mac_config.wmmApParams[0].txopLimit=0;
- priv->ap_mac_config.wmmApParams[0].admissionControlMandatory=FALSE;
- priv->ap_mac_config.wmmApParams[1].cwMin=4;
- priv->ap_mac_config.wmmApParams[1].cwMax=10;
- priv->ap_mac_config.wmmApParams[1].aifs=7;
- priv->ap_mac_config.wmmApParams[1].txopLimit=0;
- priv->ap_mac_config.wmmApParams[1].admissionControlMandatory=FALSE;
- priv->ap_mac_config.wmmApParams[2].cwMin=3;
- priv->ap_mac_config.wmmApParams[2].cwMax=4;
- priv->ap_mac_config.wmmApParams[2].aifs=1;
- priv->ap_mac_config.wmmApParams[2].txopLimit=94;
- priv->ap_mac_config.wmmApParams[2].admissionControlMandatory=FALSE;
- priv->ap_mac_config.wmmApParams[3].cwMin=2;
- priv->ap_mac_config.wmmApParams[3].cwMax=3;
- priv->ap_mac_config.wmmApParams[3].aifs=1;
- priv->ap_mac_config.wmmApParams[3].txopLimit=47;
- priv->ap_mac_config.wmmApParams[3].admissionControlMandatory=FALSE;
-
- priv->ap_mac_config.wmmApBcParams[0].cwMin=4;
- priv->ap_mac_config.wmmApBcParams[0].cwMax=10;
- priv->ap_mac_config.wmmApBcParams[0].aifs=3;
- priv->ap_mac_config.wmmApBcParams[0].txopLimit=0;
- priv->ap_mac_config.wmmApBcParams[0].admissionControlMandatory=FALSE;
- priv->ap_mac_config.wmmApBcParams[1].cwMin=4;
- priv->ap_mac_config.wmmApBcParams[1].cwMax=10;
- priv->ap_mac_config.wmmApBcParams[1].aifs=7;
- priv->ap_mac_config.wmmApBcParams[1].txopLimit=0;
- priv->ap_mac_config.wmmApBcParams[1].admissionControlMandatory=FALSE;
- priv->ap_mac_config.wmmApBcParams[2].cwMin=3;
- priv->ap_mac_config.wmmApBcParams[2].cwMax=4;
- priv->ap_mac_config.wmmApBcParams[2].aifs=2;
- priv->ap_mac_config.wmmApBcParams[2].txopLimit=94;
- priv->ap_mac_config.wmmApBcParams[2].admissionControlMandatory=FALSE;
- priv->ap_mac_config.wmmApBcParams[3].cwMin=2;
- priv->ap_mac_config.wmmApBcParams[3].cwMax=3;
- priv->ap_mac_config.wmmApBcParams[3].aifs=2;
- priv->ap_mac_config.wmmApBcParams[3].txopLimit=47;
- priv->ap_mac_config.wmmApBcParams[3].admissionControlMandatory=FALSE;
-
- priv->ap_mac_config.accessType=CSR_WIFI_AP_ACCESS_TYPE_NONE;
- priv->ap_mac_config.macAddressListCount=0;
- priv->ap_mac_config.macAddressList=NULL;
-
- priv->ap_mac_config.apHtParams.rxStbc=1;
- priv->ap_mac_config.apHtParams.rifsModeAllowed=TRUE;
- priv->ap_mac_config.apHtParams.greenfieldSupported=FALSE;
- priv->ap_mac_config.apHtParams.shortGi20MHz=TRUE;
- priv->ap_mac_config.apHtParams.htProtection=0;
- priv->ap_mac_config.apHtParams.dualCtsProtection=FALSE;
-
- priv->ap_mac_config.phySupportedBitmap =
- (CSR_WIFI_SME_AP_PHY_SUPPORT_B|CSR_WIFI_SME_AP_PHY_SUPPORT_G|CSR_WIFI_SME_AP_PHY_SUPPORT_N);
- priv->ap_mac_config.beaconInterval= 100;
- priv->ap_mac_config.dtimPeriod=3;
- priv->ap_mac_config.maxListenInterval=0x00ff;/* Set it to a large value
- to enable different types of
- devices to join us */
- priv->ap_mac_config.supportedRatesCount =
- uf_configure_supported_rates(priv->ap_mac_config.supportedRates,priv->ap_mac_config.phySupportedBitmap);
-}
-#endif
-/*
- * ---------------------------------------------------------------------------
- * uf_sme_wext_set_defaults
- *
- * Set up power-on defaults for driver config.
- *
- * Note: The SME Management API *cannot* be used in this function.
- *
- * Arguments:
- * priv Pointer to device private context struct
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-uf_sme_wext_set_defaults(unifi_priv_t *priv)
-{
- memset(&priv->connection_config, 0, sizeof(CsrWifiSmeConnectionConfig));
-
- priv->connection_config.bssType = CSR_WIFI_SME_BSS_TYPE_INFRASTRUCTURE;
- priv->connection_config.authModeMask = CSR_WIFI_SME_AUTH_MODE_80211_OPEN;
- priv->connection_config.encryptionModeMask = CSR_WIFI_SME_ENCRYPTION_CIPHER_NONE;
- priv->connection_config.privacyMode = CSR_WIFI_SME_80211_PRIVACY_MODE_DISABLED;
- priv->connection_config.wmmQosInfo = 0xFF;
- priv->connection_config.ifIndex = CSR_WIFI_SME_RADIO_IF_BOTH;
- priv->connection_config.adhocJoinOnly = FALSE;
- priv->connection_config.adhocChannel = 6;
-
- priv->wep_tx_key_index = 0;
-
- priv->wext_wireless_stats.qual.qual = 0;
- priv->wext_wireless_stats.qual.level = 0;
- priv->wext_wireless_stats.qual.noise = 0;
- priv->wext_wireless_stats.qual.updated = 0x70;
-#ifdef CSR_SUPPORT_WEXT_AP
- /* Initialize the default configuration for AP */
- uf_sme_wext_ap_set_defaults(priv);
-#endif
-
-
-} /* uf_sme_wext_set_defaults() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * WEXT methods
- * ---------------------------------------------------------------------------
- */
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_giwname - handler for SIOCGIWNAME
- * unifi_siwfreq - handler for SIOCSIWFREQ
- * unifi_giwfreq - handler for SIOCGIWFREQ
- * unifi_siwmode - handler for SIOCSIWMODE
- * unifi_giwmode - handler for SIOCGIWMODE
- * unifi_giwrange - handler for SIOCGIWRANGE
- * unifi_siwap - handler for SIOCSIWAP
- * unifi_giwap - handler for SIOCGIWAP
- * unifi_siwscan - handler for SIOCSIWSCAN
- * unifi_giwscan - handler for SIOCGIWSCAN
- * unifi_siwessid - handler for SIOCSIWESSID
- * unifi_giwessid - handler for SIOCGIWESSID
- * unifi_siwencode - handler for SIOCSIWENCODE
- * unifi_giwencode - handler for SIOCGIWENCODE
- *
- * Handler functions for IW extensions.
- * These are registered via the unifi_iw_handler_def struct below
- * and called by the generic IW driver support code.
- * See include/net/iw_handler.h.
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static int
-iwprivsdefs(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int r;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- CsrWifiSmeMibConfig mibConfig;
- CsrWifiSmePowerConfig powerConfig;
-
- unifi_trace(priv, UDBG1, "iwprivs80211defaults: reload defaults\n");
-
- uf_sme_wext_set_defaults(priv);
-
- /* Get, modify and set the MIB data */
- r = sme_mgt_mib_config_get(priv, &mibConfig);
- if (r) {
- unifi_error(priv, "iwprivs80211defaults: Get CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
- mibConfig.dot11RtsThreshold = 2347;
- mibConfig.dot11FragmentationThreshold = 2346;
- r = sme_mgt_mib_config_set(priv, &mibConfig);
- if (r) {
- unifi_error(priv, "iwprivs80211defaults: Set CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
-
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_LOW;
- powerConfig.listenIntervalTu = 100;
- powerConfig.rxDtims = 1;
-
- r = sme_mgt_power_config_set(priv, &powerConfig);
- if (r) {
- unifi_error(priv, "iwprivs80211defaults: Set unifi_PowerConfigValue failed.\n");
- return r;
- }
-
- return 0;
-} /* iwprivsdefs() */
-
-static int
-iwprivs80211ps(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int r = 0;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- int ps_mode = (int)(*extra);
- CsrWifiSmePowerConfig powerConfig;
-
- unifi_trace(priv, UDBG1, "iwprivs80211ps: power save mode = %d\n", ps_mode);
-
- r = sme_mgt_power_config_get(priv, &powerConfig);
- if (r) {
- unifi_error(priv, "iwprivs80211ps: Get unifi_PowerConfigValue failed.\n");
- return r;
- }
-
- switch (ps_mode) {
- case CSR_PMM_ACTIVE_MODE:
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_LOW;
- break;
- case CSR_PMM_POWER_SAVE:
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_HIGH;
- break;
- case CSR_PMM_FAST_POWER_SAVE:
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_MED;
- break;
- default:
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_AUTO;
- break;
- }
-
- r = sme_mgt_power_config_set(priv, &powerConfig);
- if (r) {
- unifi_error(priv, "iwprivs80211ps: Set unifi_PowerConfigValue failed.\n");
- }
-
- return r;
-}
-
-static int
-iwprivg80211ps(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- CsrWifiSmePowerConfig powerConfig;
- int r;
-
- r = sme_mgt_power_config_get(priv, &powerConfig);
- if (r) {
- unifi_error(priv, "iwprivg80211ps: Get 802.11 power mode failed.\n");
- return r;
- }
-
- switch (powerConfig.powerSaveLevel) {
- case CSR_WIFI_SME_POWER_SAVE_LEVEL_LOW:
- snprintf(extra, IWPRIV_POWER_SAVE_MAX_STRING,
- "Power save mode: %d (Active)",
- powerConfig.powerSaveLevel);
- break;
- case CSR_WIFI_SME_POWER_SAVE_LEVEL_MED:
- snprintf(extra, IWPRIV_POWER_SAVE_MAX_STRING,
- "Power save mode: %d (Fast)",
- powerConfig.powerSaveLevel);
- break;
- case CSR_WIFI_SME_POWER_SAVE_LEVEL_HIGH:
- snprintf(extra, IWPRIV_POWER_SAVE_MAX_STRING,
- "Power save mode: %d (Full)",
- powerConfig.powerSaveLevel);
- break;
- case CSR_WIFI_SME_POWER_SAVE_LEVEL_AUTO:
- snprintf(extra, IWPRIV_POWER_SAVE_MAX_STRING,
- "Power save mode: %d (Auto)",
- powerConfig.powerSaveLevel);
- break;
- default:
- snprintf(extra, IWPRIV_POWER_SAVE_MAX_STRING,
- "Power save mode: %d (Unknown)",
- powerConfig.powerSaveLevel);
- break;
- }
-
- wrqu->data.length = strlen(extra) + 1;
-
- return 0;
-}
-
-static int
-iwprivssmedebug(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- /* No longer supported on the API */
-#if defined (CSR_WIFI_HIP_DEBUG_OFFLINE)
- unifi_debug_buf_dump();
-#endif
-
- return 0;
-}
-
-#ifdef CSR_SUPPORT_WEXT_AP
-#define PARAM_TYPE_INT 0
-#define PARAM_TYPE_STRING 1
-#define CSR_WIFI_MAX_SSID_LEN 32
-#define CSR_WIFI_MAX_SEC_LEN 16
-#define CSR_WIFI_MAX_KEY_LEN 65
-
-static int hex_look_up(char x)
-{
- if(x>='0' && x<='9')
- return (x-48);
- if(x>= 'a' && x <= 'f')
- return (x-87);
- return -1;
-}
-
-static int power (int a, int b)
-{
- int i;
- int num =1;
- for(i=0;i<b;i++)
- num *=a;
- return num;
-}
-
-static int decode_parameter_from_string(unifi_priv_t* priv, char **str_ptr,
- const char *token, int param_type,
- void *dst, int param_max_len)
-{
- u8 int_str[7] = "0";
- u32 param_str_len;
- u8 *param_str_begin,*param_str_end;
- u8 *orig_str = *str_ptr;
-
- if (!strncmp(*str_ptr, token, strlen(token))) {
- strsep(str_ptr, "=,");
- param_str_begin = *str_ptr;
- strsep(str_ptr, "=,");
- if (*str_ptr == NULL) {
- param_str_len = strlen(param_str_begin);
- } else {
- param_str_end = *str_ptr-1;
- param_str_len = param_str_end - param_str_begin;
- }
- unifi_trace(priv,UDBG2,"'token:%s', len:%d, ", token, param_str_len);
- if (param_str_len > param_max_len) {
- unifi_notice(priv,"extracted param len:%d is > MAX:%d\n",param_str_len, param_max_len);
- param_str_len = param_max_len;
- }
- switch (param_type) {
- case PARAM_TYPE_INT:
- {
- u32 *pdst_int = dst,num =0;
- int i,j=0;
- if (param_str_len > sizeof(int_str)) {
- param_str_len = sizeof(int_str);
- }
- memcpy(int_str, param_str_begin, param_str_len);
- for(i = param_str_len; i>0;i--) {
- if(int_str[i-1] >= '0' && int_str[i-1] <='9') {
- num += ((int_str[i-1]-'0')*power(10,j));
- j++;
- } else {
- unifi_error(priv,"decode_parameter_from_string:not a number %c\n",(int_str[i-1]));
- return -1;
- }
- }
- *pdst_int = num;
- unifi_trace(priv,UDBG2,"decode_parameter_from_string:decoded int = %d\n",*pdst_int);
- }
- break;
- default:
- memcpy(dst, param_str_begin, param_str_len);
- *((char *)dst + param_str_len) = 0;
- unifi_trace(priv,UDBG2,"decode_parameter_from_string:decoded string = %s\n",(char *)dst);
- break;
- }
- } else {
- unifi_error(priv,"decode_parameter_from_string: Token:%s not found in %s \n",token,orig_str);
- return -1;
- }
- return 0;
-}
-static int store_ap_advanced_config_from_string(unifi_priv_t *priv, char *param_str)
-{
- char * str_ptr=param_str;
- int ret = 0,tmp_var;
- char phy_mode[6];
- CsrWifiSmeApMacConfig * ap_mac_config = &priv->ap_mac_config;
-
- /* Check for BI */
- ret = decode_parameter_from_string(priv, &str_ptr, "BI=",
- PARAM_TYPE_INT, &tmp_var, 5);
- if(ret) {
- unifi_error(priv,"store_ap_advanced_config_from_string: BI not found\n");
- return -1;
- }
- ap_mac_config->beaconInterval = tmp_var;
- ret = decode_parameter_from_string(priv, &str_ptr, "DTIM_PER=",
- PARAM_TYPE_INT, &tmp_var, 5);
- if(ret) {
- unifi_error(priv,"store_ap_advanced_config_from_string: DTIM_PER not found\n");
- return -1;
- }
- ap_mac_config->dtimPeriod = tmp_var;
- ret = decode_parameter_from_string(priv, &str_ptr, "WMM=",
- PARAM_TYPE_INT, &tmp_var, 5);
- if(ret) {
- unifi_error(priv,"store_ap_advanced_config_from_string: WMM not found\n");
- return -1;
- }
- ap_mac_config->wmmEnabled = tmp_var;
- ret = decode_parameter_from_string(priv, &str_ptr, "PHY=",
- PARAM_TYPE_STRING, phy_mode, 5);
- if(ret) {
- unifi_error(priv,"store_ap_advanced_config_from_string: PHY not found\n");
- } else {
- if(strstr(phy_mode,"b")){
- ap_mac_config->phySupportedBitmap = CSR_WIFI_SME_AP_PHY_SUPPORT_B;
- }
- if(strstr(phy_mode,"g")) {
- ap_mac_config->phySupportedBitmap |= CSR_WIFI_SME_AP_PHY_SUPPORT_G;
- }
- if(strstr(phy_mode,"n")) {
- ap_mac_config->phySupportedBitmap |= CSR_WIFI_SME_AP_PHY_SUPPORT_N;
- }
- ap_mac_config->supportedRatesCount =
- uf_configure_supported_rates(ap_mac_config->supportedRates, ap_mac_config->phySupportedBitmap);
- }
- return ret;
-}
-
-static int store_ap_config_from_string( unifi_priv_t * priv,char *param_str)
-
-{
- char *str_ptr = param_str;
- char sub_cmd[16];
- char sec[CSR_WIFI_MAX_SEC_LEN];
- char key[CSR_WIFI_MAX_KEY_LEN];
- int ret = 0,tmp_var;
- CsrWifiSmeApConfig_t *ap_config = &priv->ap_config;
- CsrWifiSmeApMacConfig * ap_mac_config = &priv->ap_mac_config;
- memset(sub_cmd, 0, sizeof(sub_cmd));
- if(!strstr(param_str,"END")) {
- unifi_error(priv,"store_ap_config_from_string:Invalid config string:%s\n",param_str);
- return -1;
- }
- if (decode_parameter_from_string(priv,&str_ptr, "ASCII_CMD=",
- PARAM_TYPE_STRING, sub_cmd, 6) != 0) {
- return -1;
- }
- if (strncmp(sub_cmd, "AP_CFG", 6)) {
-
- if(!strncmp(sub_cmd ,"ADVCFG", 6)) {
- return store_ap_advanced_config_from_string(priv, str_ptr);
- }
- unifi_error(priv,"store_ap_config_from_string: sub_cmd:%s != 'AP_CFG or ADVCFG'!\n", sub_cmd);
- return -1;
- }
- memset(ap_config, 0, sizeof(CsrWifiSmeApConfig_t));
- ret = decode_parameter_from_string(priv,&str_ptr, "SSID=",
- PARAM_TYPE_STRING, ap_config->ssid.ssid,
- CSR_WIFI_MAX_SSID_LEN);
- if(ret) {
- unifi_error(priv,"store_ap_config_from_string: SSID not found\n");
- return -1;
- }
- ap_config->ssid.length = strlen(ap_config->ssid.ssid);
-
- ret = decode_parameter_from_string(priv, &str_ptr, "SEC=",
- PARAM_TYPE_STRING, sec, CSR_WIFI_MAX_SEC_LEN);
- if(ret) {
- unifi_error(priv,"store_ap_config_from_string: SEC not found\n");
- return -1;
- }
- ret = decode_parameter_from_string(priv,&str_ptr, "KEY=",
- PARAM_TYPE_STRING, key, CSR_WIFI_MAX_KEY_LEN);
- if(!strcasecmp(sec,"open")) {
- unifi_trace(priv,UDBG2,"store_ap_config_from_string: security open");
- ap_config->credentials.authType = CSR_WIFI_SME_AP_AUTH_TYPE_OPEN_SYSTEM;
- if(ret) {
- unifi_notice(priv,"store_ap_config_from_string: KEY not found:fine with Open\n");
- }
- }
- else if(!strcasecmp(sec,"wpa2-psk")) {
- int i,j=0;
- CsrWifiNmeApAuthPers *pers =
- ((CsrWifiNmeApAuthPers *)&(ap_config->credentials.nmeAuthType.authTypePersonal));
- u8 *psk = pers->authPers_credentials.psk.psk;
-
- unifi_trace(priv,UDBG2,"store_ap_config_from_string: security WPA2");
- if(ret) {
- unifi_error(priv,"store_ap_config_from_string: KEY not found for WPA2\n");
- return -1;
- }
- ap_config->credentials.authType = CSR_WIFI_SME_AP_AUTH_TYPE_PERSONAL;
- pers->authSupport = CSR_WIFI_SME_RSN_AUTH_WPA2PSK;
- pers->rsnCapabilities =0;
- pers->wapiCapabilities =0;
- pers->pskOrPassphrase=CSR_WIFI_NME_AP_CREDENTIAL_TYPE_PSK;
- pers->authPers_credentials.psk.encryptionMode =
- (CSR_WIFI_NME_ENCRYPTION_CIPHER_PAIRWISE_CCMP |CSR_WIFI_NME_ENCRYPTION_CIPHER_GROUP_CCMP) ;
- for(i=0;i<32;i++){
- psk[i] = (16*hex_look_up(key[j]))+hex_look_up(key[j+1]);
- j+=2;
- }
-
- } else {
- unifi_notice(priv,"store_ap_config_from_string: Unknown security: Assuming Open");
- ap_config->credentials.authType = CSR_WIFI_SME_AP_AUTH_TYPE_OPEN_SYSTEM;
- return -1;
- }
- /* Get the decoded value in a temp int variable to ensure that other fields within the struct
- which are of type other than int are not over written */
- ret = decode_parameter_from_string(priv,&str_ptr, "CHANNEL=", PARAM_TYPE_INT, &tmp_var, 5);
- if(ret)
- return -1;
- ap_config->channel = tmp_var;
- ret = decode_parameter_from_string(priv,&str_ptr, "PREAMBLE=", PARAM_TYPE_INT, &tmp_var, 5);
- if(ret)
- return -1;
- ap_mac_config->preamble = tmp_var;
- ret = decode_parameter_from_string(priv,&str_ptr, "MAX_SCB=", PARAM_TYPE_INT, &tmp_var, 5);
- ap_config->max_connections = tmp_var;
- return ret;
-}
-
-static int
-iwprivsapstart(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int r;
-
- unifi_trace(priv, UDBG1, "iwprivsapstart\n" );
- r = sme_ap_start(priv,interfacePriv->InterfaceTag,&priv->ap_config);
- if(r) {
- unifi_error(priv,"iwprivsapstart AP START failed : %d\n",-r);
- }
- return r;
-}
-
-static int
-iwprivsapconfig(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- char *cfg_str = NULL;
- int r;
-
- unifi_trace(priv, UDBG1, "iwprivsapconfig\n" );
- if (wrqu->data.length != 0) {
- char *str;
- if (!(cfg_str = kmalloc(wrqu->data.length+1, GFP_KERNEL)))
- {
- return -ENOMEM;
- }
- if (copy_from_user(cfg_str, wrqu->data.pointer, wrqu->data.length)) {
- kfree(cfg_str);
- return -EFAULT;
- }
- cfg_str[wrqu->data.length] = 0;
- unifi_trace(priv,UDBG2,"length:%d\n",wrqu->data.length);
- unifi_trace(priv,UDBG2,"AP configuration string:%s\n",cfg_str);
- str = cfg_str;
- if ((r = store_ap_config_from_string(priv,str))) {
- unifi_error(priv, "iwprivsapconfig:Failed to decode the string %d\n",r);
- kfree(cfg_str);
- return -EIO;
-
- }
- } else {
- unifi_error(priv,"iwprivsapconfig argument length = 0 \n");
- return -EIO;
- }
- r = sme_ap_config(priv, &priv->ap_mac_config, &priv->group_sec_config);
- if(r) {
- unifi_error(priv,"iwprivsapstop AP Config failed : %d\n",-r);
- } else if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_trace(priv, UDBG1, "iwprivsapconfig: Starting the AP");
- r = sme_ap_start(priv,interfacePriv->InterfaceTag,&priv->ap_config);
- if(r) {
- unifi_error(priv,"iwprivsapstart AP START failed : %d\n",-r);
- }
- }
- kfree(cfg_str);
- return r;
-}
-
-static int
-iwprivsapstop(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int r;
- u16 interface_tag = interfacePriv->InterfaceTag;
-
- unifi_trace(priv, UDBG1, "iwprivsapstop\n" );
- r = sme_ap_stop(priv,interface_tag);
- if(r) {
- unifi_error(priv,"iwprivsapstop AP STOP failed : %d\n",-r);
- }
- return r;
-}
-
-#ifdef ANDROID_BUILD
-static int
-iwprivsapfwreload(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- unifi_trace(priv, UDBG1, "iwprivsapfwreload\n" );
- return 0;
-}
-
-static int
-iwprivsstackstart(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- unifi_trace(priv, UDBG1, "iwprivsstackstart\n" );
- return 0;
-}
-
-static int
-iwprivsstackstop(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int r = 0;
- u16 interface_tag = interfacePriv->InterfaceTag;
-
- unifi_trace(priv, UDBG1, "iwprivsstackstop\n" );
-
- switch(interfacePriv->interfaceMode) {
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- r = sme_mgt_disconnect(priv);
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- r = sme_ap_stop(priv,interface_tag);
- break;
- default :
- break;
- }
-
- if(r) {
- unifi_error(priv,"iwprivsstackstop Stack stop failed : %d\n",-r);
- }
- return 0;
-}
-#endif /* ANDROID_BUILD */
-#endif /* CSR_SUPPORT_WEXT_AP */
-
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
-static int
-iwprivsconfwapi(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u8 enable;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- unifi_trace(priv, UDBG1, "iwprivsconfwapi\n" );
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "iwprivsconfwapi: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
- enable = *(u8*)(extra);
-
- if (enable) {
- priv->connection_config.authModeMask = CSR_WIFI_SME_AUTH_MODE_80211_OPEN;
- priv->connection_config.authModeMask |= (CSR_WIFI_SME_AUTH_MODE_WAPI_WAIPSK | CSR_WIFI_SME_AUTH_MODE_WAPI_WAI);
- priv->connection_config.encryptionModeMask |=
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_SMS4 | CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_SMS4;
- } else {
- priv->connection_config.authModeMask &= ~(CSR_WIFI_SME_AUTH_MODE_WAPI_WAIPSK | CSR_WIFI_SME_AUTH_MODE_WAPI_WAI);
- priv->connection_config.encryptionModeMask &=
- ~(CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_SMS4 | CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_SMS4);
- }
-
- return 0;
-}
-
-static int
-iwprivswpikey(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int r = 0, i;
- CsrWifiSmeKey key;
- unifiio_wapi_key_t inKey;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- unifi_trace(priv, UDBG1, "iwprivswpikey\n" );
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "iwprivswpikey: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
- inKey = *(unifiio_wapi_key_t*)(extra);
-
- if (inKey.unicastKey) {
- key.keyType = CSR_WIFI_SME_KEY_TYPE_PAIRWISE;
- } else {
- key.keyType = CSR_WIFI_SME_KEY_TYPE_GROUP;
- }
-
- key.keyIndex = inKey.keyIndex;
-
- /* memcpy(key.keyRsc, inKey.keyRsc, 16); */
- for (i = 0; i < 16; i+= 2)
- {
- key.keyRsc[i/2] = inKey.keyRsc[i+1] << 8 | inKey.keyRsc[i];
- }
-
- memcpy(key.address.a, inKey.address, 6);
- key.keyLength = 32;
- memcpy(key.key, inKey.key, 32);
- key.authenticator = 0;
- key.wepTxKey = 0;
-
- unifi_trace(priv, UDBG1, "keyType = %d, keyIndex = %d, wepTxKey = %d, keyRsc = %x:%x, auth = %d, address = %x:%x, "
- "keylength = %d, key = %x:%x\n", key.keyType, key.keyIndex, key.wepTxKey,
- key.keyRsc[0], key.keyRsc[7], key.authenticator,
- key.address.a[0], key.address.a[5], key.keyLength, key.key[0],
- key.key[15]);
-
- r = sme_mgt_key(priv, &key, CSR_WIFI_SME_LIST_ACTION_ADD);
- if (r) {
- unifi_error(priv, "SETKEYS request was rejected with result %d\n", r);
- return convert_sme_error(r);
- }
-
- return r;
-}
-#endif
-
-
-static int
-unifi_giwname(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- char *name = wrqu->name;
- unifi_trace(priv, UDBG2, "unifi_giwname\n");
-
- if (priv->if_index == CSR_INDEX_5G) {
- strcpy(name, "IEEE 802.11-a");
- } else {
- strcpy(name, "IEEE 802.11-bgn");
- }
- return 0;
-} /* unifi_giwname() */
-
-
-static int
-unifi_siwfreq(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_freq *freq = (struct iw_freq *)wrqu;
-
- unifi_trace(priv, UDBG2, "unifi_siwfreq\n");
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwfreq: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- /*
- * Channel is stored in the connection configuration,
- * and set later when ask for a connection.
- */
- if ((freq->e == 0) && (freq->m <= 1000)) {
- priv->connection_config.adhocChannel = freq->m;
- } else {
- priv->connection_config.adhocChannel = wext_freq_to_channel(freq->m, freq->e);
- }
-
- return 0;
-} /* unifi_siwfreq() */
-
-
-static int
-unifi_giwfreq(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_freq *freq = (struct iw_freq *)wrqu;
- int err = 0;
- CsrWifiSmeConnectionInfo connectionInfo;
-
- unifi_trace(priv, UDBG2, "unifi_giwfreq\n");
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_giwfreq: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- UF_RTNL_UNLOCK();
- err = sme_mgt_connection_info_get(priv, &connectionInfo);
- UF_RTNL_LOCK();
-
- freq->m = channel_to_mhz(connectionInfo.channelNumber,
- (connectionInfo.networkType80211 == CSR_WIFI_SME_RADIO_IF_GHZ_5_0));
- freq->e = 6;
-
- return convert_sme_error(err);
-} /* unifi_giwfreq() */
-
-
-static int
-unifi_siwmode(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- unifi_trace(priv, UDBG2, "unifi_siwmode\n");
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwmode: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- switch(wrqu->mode) {
- case IW_MODE_ADHOC:
- priv->connection_config.bssType = CSR_WIFI_SME_BSS_TYPE_ADHOC;
- break;
- case IW_MODE_INFRA:
- priv->connection_config.bssType = CSR_WIFI_SME_BSS_TYPE_INFRASTRUCTURE;
- break;
- case IW_MODE_AUTO:
- priv->connection_config.bssType = CSR_WIFI_SME_BSS_TYPE_ANY_BSS;
- break;
- default:
- unifi_notice(priv, "Unknown IW MODE value.\n");
- }
-
- /* Clear the SSID and BSSID configuration */
- priv->connection_config.ssid.length = 0;
- memset(priv->connection_config.bssid.a, 0xFF, ETH_ALEN);
-
- return 0;
-} /* unifi_siwmode() */
-
-
-
-static int
-unifi_giwmode(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int r = 0;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- CsrWifiSmeConnectionConfig connectionConfig;
-
- unifi_trace(priv, UDBG2, "unifi_giwmode\n");
- CHECK_INITED(priv);
-
- unifi_trace(priv, UDBG2, "unifi_giwmode: Exisitng mode = 0x%x\n",
- interfacePriv->interfaceMode);
- switch(interfacePriv->interfaceMode) {
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- wrqu->mode = IW_MODE_INFRA;
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- wrqu->mode = IW_MODE_MASTER;
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- wrqu->mode = IW_MODE_ADHOC;
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_P2P:
- case CSR_WIFI_ROUTER_CTRL_MODE_NONE:
- UF_RTNL_UNLOCK();
- r = sme_mgt_connection_config_get(priv, &connectionConfig);
- UF_RTNL_LOCK();
- if (r == 0) {
- switch(connectionConfig.bssType) {
- case CSR_WIFI_SME_BSS_TYPE_ADHOC:
- wrqu->mode = IW_MODE_ADHOC;
- break;
- case CSR_WIFI_SME_BSS_TYPE_INFRASTRUCTURE:
- wrqu->mode = IW_MODE_INFRA;
- break;
- default:
- wrqu->mode = IW_MODE_AUTO;
- unifi_notice(priv, "Unknown IW MODE value.\n");
- }
- }
- break;
- default:
- wrqu->mode = IW_MODE_AUTO;
- unifi_notice(priv, "Unknown IW MODE value.\n");
-
- }
- unifi_trace(priv, UDBG4, "unifi_giwmode: mode = 0x%x\n", wrqu->mode);
- return r;
-} /* unifi_giwmode() */
-
-
-
-static int
-unifi_giwrange(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_point *dwrq = &wrqu->data;
- struct iw_range *range = (struct iw_range *) extra;
- int i;
-
- unifi_trace(NULL, UDBG2, "unifi_giwrange\n");
-
- dwrq->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(*range));
- range->min_nwid = 0x0000;
- range->max_nwid = 0x0000;
-
- /*
- * Don't report the frequency/channel table, then the channel
- * number returned elsewhere will be printed as a channel number.
- */
-
- /* Ranges of values reported in quality structs */
- range->max_qual.qual = 40; /* Max expected qual value */
- range->max_qual.level = -120; /* Noise floor in dBm */
- range->max_qual.noise = -120; /* Noise floor in dBm */
-
-
- /* space for IW_MAX_BITRATES (8 up to WE15, 32 later) */
- i = 0;
-#if WIRELESS_EXT > 15
- range->bitrate[i++] = 2 * 500000;
- range->bitrate[i++] = 4 * 500000;
- range->bitrate[i++] = 11 * 500000;
- range->bitrate[i++] = 22 * 500000;
- range->bitrate[i++] = 12 * 500000;
- range->bitrate[i++] = 18 * 500000;
- range->bitrate[i++] = 24 * 500000;
- range->bitrate[i++] = 36 * 500000;
- range->bitrate[i++] = 48 * 500000;
- range->bitrate[i++] = 72 * 500000;
- range->bitrate[i++] = 96 * 500000;
- range->bitrate[i++] = 108 * 500000;
-#else
- range->bitrate[i++] = 2 * 500000;
- range->bitrate[i++] = 4 * 500000;
- range->bitrate[i++] = 11 * 500000;
- range->bitrate[i++] = 22 * 500000;
- range->bitrate[i++] = 24 * 500000;
- range->bitrate[i++] = 48 * 500000;
- range->bitrate[i++] = 96 * 500000;
- range->bitrate[i++] = 108 * 500000;
-#endif /* WIRELESS_EXT < 16 */
- range->num_bitrates = i;
-
- range->max_encoding_tokens = NUM_WEPKEYS;
- range->num_encoding_sizes = 2;
- range->encoding_size[0] = 5;
- range->encoding_size[1] = 13;
-
- range->we_version_source = 20;
- range->we_version_compiled = WIRELESS_EXT;
-
- /* Number of channels available in h/w */
- range->num_channels = 14;
- /* Number of entries in freq[] array */
- range->num_frequency = 14;
- for (i = 0; (i < range->num_frequency) && (i < IW_MAX_FREQUENCIES); i++) {
- int chan = i + 1;
- range->freq[i].i = chan;
- range->freq[i].m = channel_to_mhz(chan, 0);
- range->freq[i].e = 6;
- }
- if ((i+3) < IW_MAX_FREQUENCIES) {
- range->freq[i].i = 36;
- range->freq[i].m = channel_to_mhz(36, 1);
- range->freq[i].e = 6;
- range->freq[i+1].i = 40;
- range->freq[i+1].m = channel_to_mhz(40, 1);
- range->freq[i+1].e = 6;
- range->freq[i+2].i = 44;
- range->freq[i+2].m = channel_to_mhz(44, 1);
- range->freq[i+2].e = 6;
- range->freq[i+3].i = 48;
- range->freq[i+3].m = channel_to_mhz(48, 1);
- range->freq[i+3].e = 6;
- }
-
-#if WIRELESS_EXT > 16
- /* Event capability (kernel + driver) */
- range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
- IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
- IW_EVENT_CAPA_MASK(SIOCGIWAP) |
- IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
- range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVTXDROP) |
- IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
- IW_EVENT_CAPA_MASK(IWEVREGISTERED) |
- IW_EVENT_CAPA_MASK(IWEVEXPIRED));
-#endif /* WIRELESS_EXT > 16 */
-
-#if WIRELESS_EXT > 17
- range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
-#endif /* WIRELESS_EXT > 17 */
-
-
- return 0;
-} /* unifi_giwrange() */
-
-
-static int
-unifi_siwap(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int err = 0;
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwap: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) {
- return -EINVAL;
- }
-
- unifi_trace(priv, UDBG1, "unifi_siwap: asked for %pM\n",
- wrqu->ap_addr.sa_data);
-
- if (is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
- priv->ignore_bssid_join = FALSE;
- err = sme_mgt_disconnect(priv);
- if (err) {
- unifi_trace(priv, UDBG4, "unifi_siwap: Disconnect failed, status %d\n", err);
- }
- return 0;
- }
-
- if (priv->ignore_bssid_join) {
- unifi_trace(priv, UDBG4, "unifi_siwap: ignoring second join\n");
- priv->ignore_bssid_join = FALSE;
- } else {
- memcpy(priv->connection_config.bssid.a, wrqu->ap_addr.sa_data, ETH_ALEN);
- unifi_trace(priv, UDBG1, "unifi_siwap: Joining %X:%X:%X:%X:%X:%X\n",
- priv->connection_config.bssid.a[0],
- priv->connection_config.bssid.a[1],
- priv->connection_config.bssid.a[2],
- priv->connection_config.bssid.a[3],
- priv->connection_config.bssid.a[4],
- priv->connection_config.bssid.a[5]);
- err = sme_mgt_connect(priv);
- if (err) {
- unifi_error(priv, "unifi_siwap: Join failed, status %d\n", err);
- return convert_sme_error(err);
- }
- }
-
- return 0;
-} /* unifi_siwap() */
-
-
-static int
-unifi_giwap(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- CsrWifiSmeConnectionInfo connectionInfo;
- int r = 0;
- u8 *bssid;
-
- CHECK_INITED(priv);
- unifi_trace(priv, UDBG2, "unifi_giwap\n");
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "iwprivswpikey: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
- UF_RTNL_UNLOCK();
- r = sme_mgt_connection_info_get(priv, &connectionInfo);
- UF_RTNL_LOCK();
-
- if (r == 0) {
- bssid = connectionInfo.bssid.a;
- wrqu->ap_addr.sa_family = ARPHRD_ETHER;
- unifi_trace(priv, UDBG4, "unifi_giwap: BSSID = %pM\n", bssid);
-
- memcpy(wrqu->ap_addr.sa_data, bssid, ETH_ALEN);
- } else {
- memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
- }
-
- return 0;
-} /* unifi_giwap() */
-
-
-static int
-unifi_siwscan(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int r;
- CsrWifiSsid scan_ssid;
- unsigned char *channel_list = NULL;
- int chans_good = 0;
-#if WIRELESS_EXT > 17
- struct iw_point *data = &wrqu->data;
- struct iw_scan_req *req = (struct iw_scan_req *) extra;
-#endif
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwscan: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
-#if WIRELESS_EXT > 17
- /* Providing a valid channel list will force an active scan */
- if (req) {
- if ((req->num_channels > 0) && (req->num_channels < IW_MAX_FREQUENCIES)) {
- channel_list = kmalloc(req->num_channels, GFP_KERNEL);
- if (channel_list) {
- int i;
- for (i = 0; i < req->num_channels; i++) {
- /* Convert frequency to channel number */
- int ch = wext_freq_to_channel(req->channel_list[i].m,
- req->channel_list[i].e);
- if (ch) {
- channel_list[chans_good++] = ch;
- }
- }
- unifi_trace(priv, UDBG1,
- "SIWSCAN: Scanning %d channels\n", chans_good);
- } else {
- /* Fall back to scanning all */
- unifi_error(priv, "SIWSCAN: Can't alloc channel_list (%d)\n",
- req->num_channels);
- }
- }
- }
-
- if (req && (data->flags & IW_SCAN_THIS_ESSID)) {
- memcpy(scan_ssid.ssid, req->essid, req->essid_len);
- scan_ssid.length = req->essid_len;
- unifi_trace(priv, UDBG1,
- "SIWSCAN: Scanning for %.*s\n",
- scan_ssid.length, scan_ssid.ssid);
- } else
-#endif
- {
- unifi_trace(priv, UDBG1, "SIWSCAN: Scanning for all APs\n");
- scan_ssid.length = 0;
- }
-
- r = sme_mgt_scan_full(priv, &scan_ssid, chans_good, channel_list);
- if (r) {
- unifi_error(priv, "SIWSCAN: Scan returned error %d\n", r);
- } else {
- unifi_trace(priv, UDBG1, "SIWSCAN: Scan done\n");
- wext_send_scan_results_event(priv);
- }
-
- if (channel_list) {
- kfree(channel_list);
- }
-
- return r;
-
-} /* unifi_siwscan() */
-
-
-static const unsigned char *
-unifi_find_info_element(int id, const unsigned char *info, int len)
-{
- const unsigned char *ie = info;
-
- while (len > 1)
- {
- int e_id, e_len;
- e_id = ie[0];
- e_len = ie[1];
-
- /* Return if we find a match */
- if (e_id == id)
- {
- return ie;
- }
-
- len -= (e_len + 2);
- ie += (e_len + 2);
- }
-
- return NULL;
-} /* unifi_find_info_element() */
-
-
-/*
- * Translate scan data returned from the card to a card independent
- * format that the Wireless Tools will understand - Jean II
- */
-int
-unifi_translate_scan(struct net_device *dev,
- struct iw_request_info *info,
- char *current_ev, char *end_buf,
- CsrWifiSmeScanResult *scan_data,
- int scan_index)
-{
- struct iw_event iwe; /* Temporary buffer */
- unsigned char *info_elems;
- int info_elem_len;
- const unsigned char *elem;
- u16 capabilities;
- int signal, noise, snr;
- char *start_buf = current_ev;
- char *current_val; /* For rates */
- int i, r;
-
- info_elems = scan_data->informationElements;
- info_elem_len = scan_data->informationElementsLength;
-
- if (!scan_data->informationElementsLength || !scan_data->informationElements) {
- unifi_error(NULL, "*** NULL SCAN IEs ***\n");
- return -EIO;
- }
-
- /* get capinfo bits */
- capabilities = scan_data->capabilityInformation;
-
- unifi_trace(NULL, UDBG5, "Capabilities: 0x%x\n", capabilities);
-
- /* First entry *MUST* be the AP MAC address */
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, scan_data->bssid.a, ETH_ALEN);
- iwe.len = IW_EV_ADDR_LEN;
- r = uf_iwe_stream_add_event(info, start_buf, end_buf, &iwe, IW_EV_ADDR_LEN);
- if (r < 0) {
- return r;
- }
- start_buf += r;
-
- /* Other entries will be displayed in the order we give them */
-
- /* Add the ESSID */
- /* find SSID in Info Elems */
- elem = unifi_find_info_element(IE_SSID_ID, info_elems, info_elem_len);
- if (elem) {
- int e_len = elem[1];
- const unsigned char *e_ptr = elem + 2;
- unsigned char buf[33];
-
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWESSID;
- iwe.u.essid.length = e_len;
- if (iwe.u.essid.length > 32) {
- iwe.u.essid.length = 32;
- }
- iwe.u.essid.flags = scan_index;
- memcpy(buf, e_ptr, iwe.u.essid.length);
- buf[iwe.u.essid.length] = '\0';
- r = uf_iwe_stream_add_point(info, start_buf, end_buf, &iwe, buf);
- if (r < 0) {
- return r;
- }
- start_buf += r;
-
- }
-
- /* Add mode */
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWMODE;
- if (scan_data->bssType == CSR_WIFI_SME_BSS_TYPE_INFRASTRUCTURE) {
- iwe.u.mode = IW_MODE_INFRA;
- } else {
- iwe.u.mode = IW_MODE_ADHOC;
- }
- iwe.len = IW_EV_UINT_LEN;
- r = uf_iwe_stream_add_event(info, start_buf, end_buf, &iwe, IW_EV_UINT_LEN);
- if (r < 0) {
- return r;
- }
- start_buf += r;
-
- /* Add frequency. iwlist will convert to channel using table given in giwrange */
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = scan_data->channelFrequency;
- iwe.u.freq.e = 6;
- r = uf_iwe_stream_add_event(info, start_buf, end_buf, &iwe, IW_EV_FREQ_LEN);
- if (r < 0) {
- return r;
- }
- start_buf += r;
-
-
- /* Add quality statistics */
- iwe.cmd = IWEVQUAL;
- /*
- * level and noise below are mapped into an unsigned 8 bit number,
- * ranging from [-192; 63]. The way this is achieved is simply to
- * add 0x100 onto the number if it is negative,
- * once clipped to the correct range.
- */
- signal = scan_data->rssi; /* This value is in dBm */
- /* Clip range of snr */
- snr = (scan_data->snr > 0) ? scan_data->snr : 0; /* In dB relative, from 0 - 255 */
- snr = (snr < 255) ? snr : 255;
- noise = signal - snr;
-
- /* Clip range of signal */
- signal = (signal < 63) ? signal : 63;
- signal = (signal > -192) ? signal : -192;
-
- /* Clip range of noise */
- noise = (noise < 63) ? noise : 63;
- noise = (noise > -192) ? noise : -192;
-
- /* Make u8 */
- signal = ( signal < 0 ) ? signal + 0x100 : signal;
- noise = ( noise < 0 ) ? noise + 0x100 : noise;
-
- iwe.u.qual.level = (u8)signal; /* -192 : 63 */
- iwe.u.qual.noise = (u8)noise; /* -192 : 63 */
- iwe.u.qual.qual = snr; /* 0 : 255 */
- iwe.u.qual.updated = 0;
-#if WIRELESS_EXT > 16
- iwe.u.qual.updated |= IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED |
- IW_QUAL_QUAL_UPDATED;
-#if WIRELESS_EXT > 18
- iwe.u.qual.updated |= IW_QUAL_DBM;
-#endif
-#endif
- r = uf_iwe_stream_add_event(info, start_buf, end_buf, &iwe, IW_EV_QUAL_LEN);
- if (r < 0) {
- return r;
- }
- start_buf += r;
-
- /* Add encryption capability */
- iwe.cmd = SIOCGIWENCODE;
- if (capabilities & SIG_CAP_PRIVACY) {
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- } else {
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- }
- iwe.u.data.length = 0;
- iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
- r = uf_iwe_stream_add_point(info, start_buf, end_buf, &iwe, "");
- if (r < 0) {
- return r;
- }
- start_buf += r;
-
-
- /*
- * Rate : stuffing multiple values in a single event require a bit
- * more of magic - Jean II
- */
- current_val = start_buf + IW_EV_LCP_LEN;
-
- iwe.cmd = SIOCGIWRATE;
- /* Those two flags are ignored... */
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
-
- elem = unifi_find_info_element(IE_SUPPORTED_RATES_ID,
- info_elems, info_elem_len);
- if (elem) {
- int e_len = elem[1];
- const unsigned char *e_ptr = elem + 2;
-
- /*
- * Count how many rates we have.
- * Zero marks the end of the list, if the list is not truncated.
- */
- /* Max 8 values */
- for (i = 0; i < e_len; i++) {
- if (e_ptr[i] == 0) {
- break;
- }
- /* Bit rate given in 500 kb/s units (+ 0x80) */
- iwe.u.bitrate.value = ((e_ptr[i] & 0x7f) * 500000);
- /* Add new value to event */
- r = uf_iwe_stream_add_value(info, start_buf, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
- if (r < 0) {
- return r;
- }
- current_val +=r;
-
- }
- }
- elem = unifi_find_info_element(IE_EXTENDED_SUPPORTED_RATES_ID,
- info_elems, info_elem_len);
- if (elem) {
- int e_len = elem[1];
- const unsigned char *e_ptr = elem + 2;
-
- /*
- * Count how many rates we have.
- * Zero marks the end of the list, if the list is not truncated.
- */
- /* Max 8 values */
- for (i = 0; i < e_len; i++) {
- if (e_ptr[i] == 0) {
- break;
- }
- /* Bit rate given in 500 kb/s units (+ 0x80) */
- iwe.u.bitrate.value = ((e_ptr[i] & 0x7f) * 500000);
- /* Add new value to event */
- r = uf_iwe_stream_add_value(info, start_buf, current_val, end_buf, &iwe, IW_EV_PARAM_LEN);
- if (r < 0) {
- return r;
- }
- current_val +=r;
- }
- }
- /* Check if we added any rates event */
- if ((current_val - start_buf) > IW_EV_LCP_LEN) {
- start_buf = current_val;
- }
-
-
-#if WIRELESS_EXT > 17
- memset(&iwe, 0, sizeof(iwe));
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = info_elem_len;
-
- r = uf_iwe_stream_add_point(info, start_buf, end_buf, &iwe, info_elems);
- if (r < 0) {
- return r;
- }
-
- start_buf += r;
-#endif /* WE > 17 */
-
- return (start_buf - current_ev);
-} /* unifi_translate_scan() */
-
-
-
-static int
-unifi_giwscan(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_point *dwrq = &wrqu->data;
- int r;
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_giwscan: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- unifi_trace(priv, UDBG1,
- "unifi_giwscan: buffer (%d bytes) \n",
- dwrq->length);
- UF_RTNL_UNLOCK();
- r = sme_mgt_scan_results_get_async(priv, info, extra, dwrq->length);
- UF_RTNL_LOCK();
- if (r < 0) {
- unifi_trace(priv, UDBG1,
- "unifi_giwscan: buffer (%d bytes) not big enough.\n",
- dwrq->length);
- return r;
- }
-
- dwrq->length = r;
- dwrq->flags = 0;
-
- return 0;
-} /* unifi_giwscan() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_siwessid
- *
- * Request to join a network or start and AdHoc.
- *
- * Arguments:
- * dev Pointer to network device struct.
- * info Pointer to broken-out ioctl request.
- * data Pointer to argument data.
- * essid Pointer to string giving name of network to join
- * or start
- *
- * Returns:
- * 0 on success and everything complete
- * -EINPROGRESS to have the higher level call the commit method.
- * ---------------------------------------------------------------------------
- */
-static int
-unifi_siwessid(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *data, char *essid)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int len;
- int err = 0;
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwessid: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- len = 0;
- if (data->flags & 1) {
- /* Limit length */
- len = data->length;
- if (len > UNIFI_MAX_SSID_LEN) {
- len = UNIFI_MAX_SSID_LEN;
- }
- }
-
-#ifdef UNIFI_DEBUG
- {
- char essid_str[UNIFI_MAX_SSID_LEN+1];
- int i;
-
- for (i = 0; i < len; i++) {
- essid_str[i] = (isprint(essid[i]) ? essid[i] : '?');
- }
- essid_str[i] = '\0';
-
- unifi_trace(priv, UDBG1, "unifi_siwessid: asked for '%*s' (%d)\n", len, essid_str, len);
- unifi_trace(priv, UDBG2, " with authModeMask = %d", priv->connection_config.authModeMask);
- }
-#endif
-
- memset(priv->connection_config.bssid.a, 0xFF, ETH_ALEN);
- if (len) {
- if (essid[len - 1] == 0) {
- len --;
- }
-
- memcpy(priv->connection_config.ssid.ssid, essid, len);
- priv->connection_config.ssid.length = len;
-
- } else {
- priv->connection_config.ssid.length = 0;
- }
-
- UF_RTNL_UNLOCK();
- err = sme_mgt_connect(priv);
- UF_RTNL_LOCK();
- if (err) {
- unifi_error(priv, "unifi_siwessid: Join failed, status %d\n", err);
- return convert_sme_error(err);
- }
-
- return 0;
-} /* unifi_siwessid() */
-
-
-static int
-unifi_giwessid(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *essid)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_point *data = &wrqu->essid;
- CsrWifiSmeConnectionInfo connectionInfo;
- int r = 0;
-
- unifi_trace(priv, UDBG2, "unifi_giwessid\n");
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_giwessid: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
- UF_RTNL_UNLOCK();
- r = sme_mgt_connection_info_get(priv, &connectionInfo);
- UF_RTNL_LOCK();
-
- if (r == 0) {
- data->length = connectionInfo.ssid.length;
- strncpy(essid,
- connectionInfo.ssid.ssid,
- data->length);
- data->flags = 1; /* active */
-
- unifi_trace(priv, UDBG2, "unifi_giwessid: %.*s\n",
- data->length, essid);
- }
-
-
- return 0;
-} /* unifi_giwessid() */
-
-
-static int
-unifi_siwrate(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_param *args = &wrqu->bitrate;
- CsrWifiSmeMibConfig mibConfig;
- int r;
-
- CHECK_INITED(priv);
- unifi_trace(priv, UDBG2, "unifi_siwrate\n");
-
- /*
- * If args->fixed == 0, value is max rate or -1 for best
- * If args->fixed == 1, value is rate to set or -1 for best
- * args->disabled and args->flags are not used in SIOCSIWRATE
- */
-
- /* Get, modify and set the MIB data */
- UF_RTNL_UNLOCK();
- r = sme_mgt_mib_config_get(priv, &mibConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_siwrate: Get CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
-
- /* Default to auto rate algorithm */
- /* in 500Kbit/s, 0 means auto */
- mibConfig.unifiFixTxDataRate = 0;
-
- if (args->value != -1) {
- mibConfig.unifiFixTxDataRate = args->value / 500000;
- }
-
- /* 1 means rate is a maximum, 2 means rate is a set value */
- if (args->fixed == 1) {
- mibConfig.unifiFixMaxTxDataRate = 0;
- } else {
- mibConfig.unifiFixMaxTxDataRate = 1;
- }
- UF_RTNL_UNLOCK();
- r = sme_mgt_mib_config_set(priv, &mibConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_siwrate: Set CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
-
-
- return 0;
-} /* unifi_siwrate() */
-
-
-
-static int
-unifi_giwrate(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_param *args = &wrqu->bitrate;
- int r;
- int bitrate, flag;
- CsrWifiSmeMibConfig mibConfig;
- CsrWifiSmeConnectionStats connectionStats;
-
- unifi_trace(priv, UDBG2, "unifi_giwrate\n");
- CHECK_INITED(priv);
-
- flag = 0;
- bitrate = 0;
- UF_RTNL_UNLOCK();
- r = sme_mgt_mib_config_get(priv, &mibConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_giwrate: Get CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
-
- bitrate = mibConfig.unifiFixTxDataRate;
- flag = mibConfig.unifiFixMaxTxDataRate;
-
- /* Used the value returned by the SME if MIB returns 0 */
- if (bitrate == 0) {
- UF_RTNL_UNLOCK();
- r = sme_mgt_connection_stats_get(priv, &connectionStats);
- UF_RTNL_LOCK();
- /* Ignore errors, we may be disconnected */
- if (r == 0) {
- bitrate = connectionStats.unifiTxDataRate;
- }
- }
-
- args->value = bitrate * 500000;
- args->fixed = !flag;
-
- return 0;
-} /* unifi_giwrate() */
-
-
-static int
-unifi_siwrts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int val = wrqu->rts.value;
- int r = 0;
- CsrWifiSmeMibConfig mibConfig;
-
- unifi_trace(priv, UDBG2, "unifi_siwrts\n");
- CHECK_INITED(priv);
-
- if (wrqu->rts.disabled) {
- val = 2347;
- }
-
- if ( (val < 0) || (val > 2347) )
- {
- return -EINVAL;
- }
-
- /* Get, modify and set the MIB data */
- UF_RTNL_UNLOCK();
- r = sme_mgt_mib_config_get(priv, &mibConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_siwrts: Get CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
- mibConfig.dot11RtsThreshold = val;
- UF_RTNL_UNLOCK();
- r = sme_mgt_mib_config_set(priv, &mibConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_siwrts: Set CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
-
- return 0;
-}
-
-
-static int
-unifi_giwrts(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int r;
- int rts_thresh;
- CsrWifiSmeMibConfig mibConfig;
-
- unifi_trace(priv, UDBG2, "unifi_giwrts\n");
- CHECK_INITED(priv);
-
- UF_RTNL_UNLOCK();
- r = sme_mgt_mib_config_get(priv, &mibConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_giwrts: Get CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
-
- rts_thresh = mibConfig.dot11RtsThreshold;
- if (rts_thresh > 2347) {
- rts_thresh = 2347;
- }
-
- wrqu->rts.value = rts_thresh;
- wrqu->rts.disabled = (rts_thresh == 2347);
- wrqu->rts.fixed = 1;
-
- return 0;
-}
-
-
-static int
-unifi_siwfrag(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int val = wrqu->frag.value;
- int r = 0;
- CsrWifiSmeMibConfig mibConfig;
-
- unifi_trace(priv, UDBG2, "unifi_siwfrag\n");
- CHECK_INITED(priv);
-
- if (wrqu->frag.disabled)
- val = 2346;
-
- if ( (val < 256) || (val > 2347) )
- return -EINVAL;
-
- /* Get, modify and set the MIB data */
- UF_RTNL_UNLOCK();
- r = sme_mgt_mib_config_get(priv, &mibConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_siwfrag: Get CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
- /* Fragmentation Threashold must be even */
- mibConfig.dot11FragmentationThreshold = (val & ~0x1);
- UF_RTNL_UNLOCK();
- r = sme_mgt_mib_config_set(priv, &mibConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_siwfrag: Set CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
-
- return 0;
-}
-
-
-static int
-unifi_giwfrag(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int r;
- int frag_thresh;
- CsrWifiSmeMibConfig mibConfig;
-
- unifi_trace(priv, UDBG2, "unifi_giwfrag\n");
- CHECK_INITED(priv);
-
- UF_RTNL_UNLOCK();
- r = sme_mgt_mib_config_get(priv, &mibConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_giwfrag: Get CsrWifiSmeMibConfigValue failed.\n");
- return r;
- }
-
- frag_thresh = mibConfig.dot11FragmentationThreshold;
-
- /* Build the return structure */
- wrqu->frag.value = frag_thresh;
- wrqu->frag.disabled = (frag_thresh >= 2346);
- wrqu->frag.fixed = 1;
-
- return 0;
-}
-
-
-static int
-unifi_siwencode(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_point *erq = &wrqu->encoding;
- int index;
- int rc = 0;
- int privacy = -1;
- CsrWifiSmeKey sme_key;
-
- unifi_trace(priv, UDBG2, "unifi_siwencode\n");
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwencode: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- /*
- * Key index is encoded in the flags.
- * 0 - use current default,
- * 1-4 - if a key value is given set that key
- * if not use that key
- */
- index = (erq->flags & IW_ENCODE_INDEX); /* key number, 1-4 */
- if ((index < 0) || (index > 4)) {
- unifi_error(priv, "unifi_siwencode: Request to set an invalid key (index:%d)", index);
- return -EINVAL;
- }
-
- /*
- * Basic checking: do we have a key to set ?
- * The IW_ENCODE_NOKEY flag is set when no key is present (only change flags),
- * but older versions rely on sending a key id 1-4.
- */
- if (erq->length > 0) {
-
- /* Check the size of the key */
- if ((erq->length > LARGE_KEY_SIZE) || (erq->length < SMALL_KEY_SIZE)) {
- unifi_error(priv, "unifi_siwencode: Request to set an invalid key (length:%d)",
- erq->length);
- return -EINVAL;
- }
-
- /* Check the index (none (i.e. 0) means use current) */
- if ((index < 1) || (index > 4)) {
- /* If we do not have a previous key, use 1 as default */
- if (!priv->wep_tx_key_index) {
- priv->wep_tx_key_index = 1;
- }
- index = priv->wep_tx_key_index;
- }
-
- /* If we didn't have a key and a valid index is set, we want to remember it*/
- if (!priv->wep_tx_key_index) {
- priv->wep_tx_key_index = index;
- }
-
- unifi_trace(priv, UDBG1, "Tx key Index is %d\n", priv->wep_tx_key_index);
-
- privacy = 1;
-
- /* Check if the key is not marked as invalid */
- if ((erq->flags & IW_ENCODE_NOKEY) == 0) {
-
- unifi_trace(priv, UDBG1, "New %s key (len=%d, index=%d)\n",
- (priv->wep_tx_key_index == index) ? "tx" : "",
- erq->length, index);
-
- sme_key.wepTxKey = (priv->wep_tx_key_index == index);
- if (priv->wep_tx_key_index == index) {
- sme_key.keyType = CSR_WIFI_SME_KEY_TYPE_PAIRWISE;
- } else {
- sme_key.keyType = CSR_WIFI_SME_KEY_TYPE_GROUP;
- }
- /* Key index is zero based in SME but 1 based in wext */
- sme_key.keyIndex = (index - 1);
- sme_key.keyLength = erq->length;
- sme_key.authenticator = 0;
- memset(sme_key.address.a, 0xFF, ETH_ALEN);
- memcpy(sme_key.key, extra, erq->length);
-
- UF_RTNL_UNLOCK();
- rc = sme_mgt_key(priv, &sme_key, CSR_WIFI_SME_LIST_ACTION_ADD);
- UF_RTNL_LOCK();
- if (rc) {
- unifi_error(priv, "unifi_siwencode: Set key failed (%d)", rc);
- return convert_sme_error(rc);
- }
-
- /* Store the key to be reported by the SIOCGIWENCODE handler */
- priv->wep_keys[index - 1].len = erq->length;
- memcpy(priv->wep_keys[index - 1].key, extra, erq->length);
- }
- } else {
- /*
- * No additional key data, so it must be a request to change the
- * active key.
- */
- if (index != 0) {
- unifi_trace(priv, UDBG1, "Tx key Index is %d\n", index - 1);
-
- /* Store the index to be reported by the SIOCGIWENCODE handler */
- priv->wep_tx_key_index = index;
-
- sme_key.wepTxKey = 1;
- sme_key.keyType = CSR_WIFI_SME_KEY_TYPE_PAIRWISE;
-
- /* Key index is zero based in SME but 1 based in wext */
- sme_key.keyIndex = (index - 1);
- sme_key.keyLength = 0;
- sme_key.authenticator = 0;
- UF_RTNL_UNLOCK();
- rc = sme_mgt_key(priv, &sme_key, CSR_WIFI_SME_LIST_ACTION_ADD);
- UF_RTNL_LOCK();
- if (rc) {
- unifi_error(priv, "unifi_siwencode: Set key failed (%d)", rc);
- return convert_sme_error(rc);
- }
-
- /* Turn on encryption */
- privacy = 1;
- }
- }
-
- /* Read the flags */
- if (erq->flags & IW_ENCODE_DISABLED) {
- /* disable encryption */
- unifi_trace(priv, UDBG1, "disable WEP encryption\n");
- privacy = 0;
-
- priv->wep_tx_key_index = 0;
-
- unifi_trace(priv, UDBG1, "IW_ENCODE_DISABLED: CSR_WIFI_SME_AUTH_MODE_80211_OPEN\n");
- priv->connection_config.authModeMask = CSR_WIFI_SME_AUTH_MODE_80211_OPEN;
- }
-
- if (erq->flags & IW_ENCODE_RESTRICTED) {
- /* Use shared key auth */
- unifi_trace(priv, UDBG1, "IW_ENCODE_RESTRICTED: CSR_WIFI_SME_AUTH_MODE_80211_SHARED\n");
- priv->connection_config.authModeMask = CSR_WIFI_SME_AUTH_MODE_80211_SHARED;
-
- /* Turn on encryption */
- privacy = 1;
- }
- if (erq->flags & IW_ENCODE_OPEN) {
- unifi_trace(priv, UDBG1, "IW_ENCODE_OPEN: CSR_WIFI_SME_AUTH_MODE_80211_OPEN\n");
- priv->connection_config.authModeMask = CSR_WIFI_SME_AUTH_MODE_80211_OPEN;
- }
-
- /* Commit the changes to flags if needed */
- if (privacy != -1) {
- priv->connection_config.privacyMode = privacy ? CSR_WIFI_SME_80211_PRIVACY_MODE_ENABLED : CSR_WIFI_SME_80211_PRIVACY_MODE_DISABLED;
- priv->connection_config.encryptionModeMask = privacy ? (CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_WEP40 |
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_WEP104 |
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP40 |
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP104) :
- CSR_WIFI_SME_ENCRYPTION_CIPHER_NONE;
- }
-
- return convert_sme_error(rc);
-
-} /* unifi_siwencode() */
-
-
-
-static int
-unifi_giwencode(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_point *erq = &wrqu->encoding;
-
- unifi_trace(priv, UDBG2, "unifi_giwencode\n");
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_giwencode: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- if (priv->connection_config.authModeMask == CSR_WIFI_SME_AUTH_MODE_80211_SHARED) {
- erq->flags = IW_ENCODE_RESTRICTED;
- }
- else {
- if (priv->connection_config.privacyMode == CSR_WIFI_SME_80211_PRIVACY_MODE_DISABLED) {
- erq->flags = IW_ENCODE_DISABLED;
- } else {
- erq->flags = IW_ENCODE_OPEN;
- }
- }
-
- erq->length = 0;
-
- if (erq->flags != IW_ENCODE_DISABLED) {
- int index = priv->wep_tx_key_index;
-
- if ((index > 0) && (index <= NUM_WEPKEYS)) {
- erq->flags |= (index & IW_ENCODE_INDEX);
- erq->length = priv->wep_keys[index - 1].len;
- memcpy(extra, priv->wep_keys[index - 1].key, erq->length);
- } else {
- unifi_notice(priv, "unifi_giwencode: Surprise, do not have a valid key index (%d)\n",
- index);
- }
- }
-
- return 0;
-} /* unifi_giwencode() */
-
-
-static int
-unifi_siwpower(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *args = &wrqu->power;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int listen_interval, wake_for_dtim;
- int r = 0;
- CsrWifiSmePowerConfig powerConfig;
-
- unifi_trace(priv, UDBG2, "unifi_siwpower\n");
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwpower: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
- UF_RTNL_UNLOCK();
- r = sme_mgt_power_config_get(priv, &powerConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_siwpower: Get unifi_PowerConfigValue failed.\n");
- return r;
- }
-
- listen_interval = -1;
- wake_for_dtim = -1;
- if (args->disabled) {
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_LOW;
- }
- else
- {
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_HIGH;
-
- switch (args->flags & IW_POWER_TYPE) {
- case 0:
- /* not specified */
- break;
- case IW_POWER_PERIOD:
- listen_interval = args->value / 1000;
- break;
- default:
- return -EINVAL;
- }
-
- switch (args->flags & IW_POWER_MODE) {
- case 0:
- /* not specified */
- break;
- case IW_POWER_UNICAST_R:
- /* not interested in broadcast packets */
- wake_for_dtim = 0;
- break;
- case IW_POWER_ALL_R:
- /* yes, we are interested in broadcast packets */
- wake_for_dtim = 1;
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (listen_interval > 0) {
- powerConfig.listenIntervalTu = listen_interval;
- unifi_trace(priv, UDBG4, "unifi_siwpower: new Listen Interval = %d.\n",
- powerConfig.listenIntervalTu);
- }
-
- if (wake_for_dtim >= 0) {
- powerConfig.rxDtims = wake_for_dtim;
- }
- UF_RTNL_UNLOCK();
- r = sme_mgt_power_config_set(priv, &powerConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_siwpower: Set unifi_PowerConfigValue failed.\n");
- return r;
- }
-
- return 0;
-} /* unifi_siwpower() */
-
-
-static int
-unifi_giwpower(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct iw_param *args = &wrqu->power;
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- CsrWifiSmePowerConfig powerConfig;
- int r;
-
- unifi_trace(priv, UDBG2, "unifi_giwpower\n");
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_giwpower: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- args->flags = 0;
- UF_RTNL_UNLOCK();
- r = sme_mgt_power_config_get(priv, &powerConfig);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "unifi_giwpower: Get unifi_PowerConfigValue failed.\n");
- return r;
- }
-
- unifi_trace(priv, UDBG4, "unifi_giwpower: mode=%d\n",
- powerConfig.powerSaveLevel);
-
- args->disabled = (powerConfig.powerSaveLevel == CSR_WIFI_SME_POWER_SAVE_LEVEL_LOW);
- if (args->disabled) {
- args->flags = 0;
- return 0;
- }
-
- args->value = powerConfig.listenIntervalTu * 1000;
- args->flags |= IW_POWER_PERIOD;
-
- if (powerConfig.rxDtims) {
- args->flags |= IW_POWER_ALL_R;
- } else {
- args->flags |= IW_POWER_UNICAST_R;
- }
-
- return 0;
-} /* unifi_giwpower() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_siwcommit - handler for SIOCSIWCOMMIT
- *
- * Apply all the parameters that have been set.
- * In practice this means:
- * - do a scan
- * - join a network or start an AdHoc
- * - authenticate and associate.
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static int
-unifi_siwcommit(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-} /* unifi_siwcommit() */
-
-
-
-static int
-unifi_siwmlme(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_mlme *mlme = (struct iw_mlme *)extra;
-
- unifi_trace(priv, UDBG2, "unifi_siwmlme\n");
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwmlme: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- switch (mlme->cmd) {
- case IW_MLME_DEAUTH:
- case IW_MLME_DISASSOC:
- UF_RTNL_UNLOCK();
- sme_mgt_disconnect(priv);
- UF_RTNL_LOCK();
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-} /* unifi_siwmlme() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_siwgenie
- * unifi_giwgenie
- *
- * WPA : Generic IEEE 802.11 information element (e.g., for WPA/RSN/WMM).
- * Handlers for SIOCSIWGENIE, SIOCGIWGENIE - set/get generic IE
- *
- * The host program (e.g. wpa_supplicant) uses this call to set the
- * additional IEs to accompany the next (Associate?) request.
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- * Notes:
- * From wireless.h:
- * This ioctl uses struct iw_point and data buffer that includes IE id
- * and len fields. More than one IE may be included in the
- * request. Setting the generic IE to empty buffer (len=0) removes the
- * generic IE from the driver.
- * ---------------------------------------------------------------------------
- */
-static int
-unifi_siwgenie(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int len;
-
- unifi_trace(priv, UDBG2, "unifi_siwgenie\n");
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwgenie: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- if ( priv->connection_config.mlmeAssociateReqInformationElements) {
- kfree( priv->connection_config.mlmeAssociateReqInformationElements);
- }
- priv->connection_config.mlmeAssociateReqInformationElementsLength = 0;
- priv->connection_config.mlmeAssociateReqInformationElements = NULL;
-
- len = wrqu->data.length;
- if (len == 0) {
- return 0;
- }
-
- priv->connection_config.mlmeAssociateReqInformationElements = kmalloc(len, GFP_KERNEL);
- if (priv->connection_config.mlmeAssociateReqInformationElements == NULL) {
- return -ENOMEM;
- }
-
- priv->connection_config.mlmeAssociateReqInformationElementsLength = len;
- memcpy( priv->connection_config.mlmeAssociateReqInformationElements, extra, len);
-
- return 0;
-} /* unifi_siwgenie() */
-
-
-static int
-unifi_giwgenie(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- int len;
-
- unifi_trace(priv, UDBG2, "unifi_giwgenie\n");
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_giwgenie: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- len = priv->connection_config.mlmeAssociateReqInformationElementsLength;
-
- if (len == 0) {
- wrqu->data.length = 0;
- return 0;
- }
-
- if (wrqu->data.length < len) {
- return -E2BIG;
- }
-
- wrqu->data.length = len;
- memcpy(extra, priv->connection_config.mlmeAssociateReqInformationElements, len);
-
- return 0;
-} /* unifi_giwgenie() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_siwauth
- * unifi_giwauth
- *
- * Handlers for SIOCSIWAUTH, SIOCGIWAUTH
- * Set/get various authentication parameters.
- *
- * Arguments:
- *
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static int
-_unifi_siwauth(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- CsrWifiSmeAuthModeMask new_auth;
-
- unifi_trace(priv, UDBG2, "unifi_siwauth\n");
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwauth: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- /*
- * This ioctl is safe to call even when UniFi is powered off.
- * wpa_supplicant calls it to test whether we support WPA.
- */
-
- switch (wrqu->param.flags & IW_AUTH_INDEX) {
-
- case IW_AUTH_WPA_ENABLED:
- unifi_trace(priv, UDBG1, "IW_AUTH_WPA_ENABLED: %d\n", wrqu->param.value);
-
- if (wrqu->param.value == 0) {
- unifi_trace(priv, UDBG5, "IW_AUTH_WPA_ENABLED: CSR_WIFI_SME_AUTH_MODE_80211_OPEN\n");
- priv->connection_config.authModeMask = CSR_WIFI_SME_AUTH_MODE_80211_OPEN;
- }
- break;
-
- case IW_AUTH_PRIVACY_INVOKED:
- unifi_trace(priv, UDBG1, "IW_AUTH_PRIVACY_INVOKED: %d\n", wrqu->param.value);
-
- priv->connection_config.privacyMode = wrqu->param.value ? CSR_WIFI_SME_80211_PRIVACY_MODE_ENABLED : CSR_WIFI_SME_80211_PRIVACY_MODE_DISABLED;
- if (wrqu->param.value == CSR_WIFI_SME_80211_PRIVACY_MODE_DISABLED)
- {
- priv->connection_config.encryptionModeMask = CSR_WIFI_SME_ENCRYPTION_CIPHER_NONE;
- }
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- /*
- IW_AUTH_ALG_OPEN_SYSTEM 0x00000001
- IW_AUTH_ALG_SHARED_KEY 0x00000002
- IW_AUTH_ALG_LEAP 0x00000004
- */
- new_auth = 0;
- if (wrqu->param.value & IW_AUTH_ALG_OPEN_SYSTEM) {
- unifi_trace(priv, UDBG1, "IW_AUTH_80211_AUTH_ALG: %d (IW_AUTH_ALG_OPEN_SYSTEM)\n", wrqu->param.value);
- new_auth |= CSR_WIFI_SME_AUTH_MODE_80211_OPEN;
- }
- if (wrqu->param.value & IW_AUTH_ALG_SHARED_KEY) {
- unifi_trace(priv, UDBG1, "IW_AUTH_80211_AUTH_ALG: %d (IW_AUTH_ALG_SHARED_KEY)\n", wrqu->param.value);
- new_auth |= CSR_WIFI_SME_AUTH_MODE_80211_SHARED;
- }
- if (wrqu->param.value & IW_AUTH_ALG_LEAP) {
- /* Initial exchanges using open-system to set EAP */
- unifi_trace(priv, UDBG1, "IW_AUTH_80211_AUTH_ALG: %d (IW_AUTH_ALG_LEAP)\n", wrqu->param.value);
- new_auth |= CSR_WIFI_SME_AUTH_MODE_8021X_OTHER1X;
- }
- if (new_auth == 0) {
- unifi_trace(priv, UDBG1, "IW_AUTH_80211_AUTH_ALG: invalid value %d\n",
- wrqu->param.value);
- return -EINVAL;
- } else {
- priv->connection_config.authModeMask = new_auth;
- }
- break;
-
- case IW_AUTH_WPA_VERSION:
- unifi_trace(priv, UDBG1, "IW_AUTH_WPA_VERSION: %d\n", wrqu->param.value);
- priv->ignore_bssid_join = TRUE;
- /*
- IW_AUTH_WPA_VERSION_DISABLED 0x00000001
- IW_AUTH_WPA_VERSION_WPA 0x00000002
- IW_AUTH_WPA_VERSION_WPA2 0x00000004
- */
-
- if (!(wrqu->param.value & IW_AUTH_WPA_VERSION_DISABLED)) {
-
- priv->connection_config.authModeMask = CSR_WIFI_SME_AUTH_MODE_80211_OPEN;
-
- if (wrqu->param.value & IW_AUTH_WPA_VERSION_WPA) {
- unifi_trace(priv, UDBG4, "IW_AUTH_WPA_VERSION: WPA, WPA-PSK\n");
- priv->connection_config.authModeMask |= (CSR_WIFI_SME_AUTH_MODE_8021X_WPA | CSR_WIFI_SME_AUTH_MODE_8021X_WPAPSK);
- }
- if (wrqu->param.value & IW_AUTH_WPA_VERSION_WPA2) {
- unifi_trace(priv, UDBG4, "IW_AUTH_WPA_VERSION: WPA2, WPA2-PSK\n");
- priv->connection_config.authModeMask |= (CSR_WIFI_SME_AUTH_MODE_8021X_WPA2 | CSR_WIFI_SME_AUTH_MODE_8021X_WPA2PSK);
- }
- }
- break;
-
- case IW_AUTH_CIPHER_PAIRWISE:
- unifi_trace(priv, UDBG1, "IW_AUTH_CIPHER_PAIRWISE: %d\n", wrqu->param.value);
- /*
- * one of:
- IW_AUTH_CIPHER_NONE 0x00000001
- IW_AUTH_CIPHER_WEP40 0x00000002
- IW_AUTH_CIPHER_TKIP 0x00000004
- IW_AUTH_CIPHER_CCMP 0x00000008
- IW_AUTH_CIPHER_WEP104 0x00000010
- */
-
- priv->connection_config.encryptionModeMask = CSR_WIFI_SME_ENCRYPTION_CIPHER_NONE;
-
- if (wrqu->param.value & IW_AUTH_CIPHER_WEP40) {
- priv->connection_config.encryptionModeMask |=
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_WEP40 | CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP40;
- }
- if (wrqu->param.value & IW_AUTH_CIPHER_WEP104) {
- priv->connection_config.encryptionModeMask |=
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_WEP104 | CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP104;
- }
- if (wrqu->param.value & IW_AUTH_CIPHER_TKIP) {
- priv->connection_config.encryptionModeMask |=
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_TKIP | CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_TKIP;
- }
- if (wrqu->param.value & IW_AUTH_CIPHER_CCMP) {
- priv->connection_config.encryptionModeMask |=
- CSR_WIFI_SME_ENCRYPTION_CIPHER_PAIRWISE_CCMP | CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_CCMP;
- }
-
- break;
-
- case IW_AUTH_CIPHER_GROUP:
- unifi_trace(priv, UDBG1, "IW_AUTH_CIPHER_GROUP: %d\n", wrqu->param.value);
- /*
- * Use the WPA version and the group cipher suite to set the permitted
- * group key in the MIB. f/w uses this value to validate WPA and RSN IEs
- * in the probe responses from the desired BSS(ID)
- */
-
- priv->connection_config.encryptionModeMask &= ~(CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP40 |
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP104 |
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_TKIP |
- CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_CCMP);
- if (wrqu->param.value & IW_AUTH_CIPHER_WEP40) {
- priv->connection_config.encryptionModeMask |= CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP40;
- }
- if (wrqu->param.value & IW_AUTH_CIPHER_WEP104) {
- priv->connection_config.encryptionModeMask |= CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_WEP104;
- }
- if (wrqu->param.value & IW_AUTH_CIPHER_TKIP) {
- priv->connection_config.encryptionModeMask |= CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_TKIP;
- }
- if (wrqu->param.value & IW_AUTH_CIPHER_CCMP) {
- priv->connection_config.encryptionModeMask |= CSR_WIFI_SME_ENCRYPTION_CIPHER_GROUP_CCMP;
- }
-
- break;
-
- case IW_AUTH_KEY_MGMT:
- unifi_trace(priv, UDBG1, "IW_AUTH_KEY_MGMT: %d\n", wrqu->param.value);
- /*
- IW_AUTH_KEY_MGMT_802_1X 1
- IW_AUTH_KEY_MGMT_PSK 2
- */
- if (priv->connection_config.authModeMask & (CSR_WIFI_SME_AUTH_MODE_8021X_WPA | CSR_WIFI_SME_AUTH_MODE_8021X_WPAPSK)) {
- /* Check for explicitly set mode. */
- if (wrqu->param.value == IW_AUTH_KEY_MGMT_802_1X) {
- priv->connection_config.authModeMask &= ~CSR_WIFI_SME_AUTH_MODE_8021X_WPAPSK;
- }
- if (wrqu->param.value == IW_AUTH_KEY_MGMT_PSK) {
- priv->connection_config.authModeMask &= ~CSR_WIFI_SME_AUTH_MODE_8021X_WPA;
- }
- unifi_trace(priv, UDBG5, "IW_AUTH_KEY_MGMT: WPA: %d\n",
- priv->connection_config.authModeMask);
- }
- if (priv->connection_config.authModeMask & (CSR_WIFI_SME_AUTH_MODE_8021X_WPA2 | CSR_WIFI_SME_AUTH_MODE_8021X_WPA2PSK)) {
- /* Check for explicitly set mode. */
- if (wrqu->param.value == IW_AUTH_KEY_MGMT_802_1X) {
- priv->connection_config.authModeMask &= ~CSR_WIFI_SME_AUTH_MODE_8021X_WPA2PSK;
- }
- if (wrqu->param.value == IW_AUTH_KEY_MGMT_PSK) {
- priv->connection_config.authModeMask &= ~CSR_WIFI_SME_AUTH_MODE_8021X_WPA2;
- }
- unifi_trace(priv, UDBG5, "IW_AUTH_KEY_MGMT: WPA2: %d\n",
- priv->connection_config.authModeMask);
- }
-
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- /*
- * Set to true at the start of the 60 second backup-off period
- * following 2 MichaelMIC failures within 60s.
- */
- unifi_trace(priv, UDBG1, "IW_AUTH_TKIP_COUNTERMEASURES: %d\n", wrqu->param.value);
- break;
-
- case IW_AUTH_DROP_UNENCRYPTED:
- /*
- * Set to true on init.
- * Set to false just before associate if encryption will not be
- * required.
- *
- * Note this is not the same as the 802.1X controlled port
- */
- unifi_trace(priv, UDBG1, "IW_AUTH_DROP_UNENCRYPTED: %d\n", wrqu->param.value);
- break;
-
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- /*
- * This is set by wpa_supplicant to allow unencrypted EAPOL messages
- * even if pairwise keys are set when not using WPA. IEEE 802.1X
- * specifies that these frames are not encrypted, but WPA encrypts
- * them when pairwise keys are in use.
- * I think the UniFi f/w handles this decision for us.
- */
- unifi_trace(priv, UDBG1, "IW_AUTH_RX_UNENCRYPTED_EAPOL: %d\n", wrqu->param.value);
- break;
-
- case IW_AUTH_ROAMING_CONTROL:
- unifi_trace(priv, UDBG1, "IW_AUTH_ROAMING_CONTROL: %d\n", wrqu->param.value);
- break;
-
- default:
- unifi_trace(priv, UDBG1, "Unsupported auth param %d to 0x%X\n",
- wrqu->param.flags & IW_AUTH_INDEX,
- wrqu->param.value);
- return -EOPNOTSUPP;
- }
-
- unifi_trace(priv, UDBG2, "authModeMask = %d", priv->connection_config.authModeMask);
-
- return 0;
-} /* _unifi_siwauth() */
-
-
-static int
-unifi_siwauth(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int err = 0;
-
- UF_RTNL_UNLOCK();
- err = _unifi_siwauth(dev, info, wrqu, extra);
- UF_RTNL_LOCK();
-
- return err;
-} /* unifi_siwauth() */
-
-
-static int
-unifi_giwauth(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- unifi_trace(NULL, UDBG2, "unifi_giwauth\n");
- return -EOPNOTSUPP;
-} /* unifi_giwauth() */
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_siwencodeext
- * unifi_giwencodeext
- *
- * Handlers for SIOCSIWENCODEEXT, SIOCGIWENCODEEXT - set/get
- * encoding token & mode
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- *
- * Notes:
- * For WPA/WPA2 we don't take note of the IW_ENCODE_EXT_SET_TX_KEY flag.
- * This flag means "use this key to encode transmissions"; we just
- * assume only one key will be set and that is the one to use.
- * ---------------------------------------------------------------------------
- */
-static int
-_unifi_siwencodeext(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int r = 0;
- unsigned char *keydata;
- unsigned char tkip_key[32];
- int keyid;
- unsigned char *a = (unsigned char *)ext->addr.sa_data;
- CsrWifiSmeKey sme_key;
- CsrWifiSmeKeyType key_type;
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwencodeext: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- unifi_trace(priv, UDBG1, "siwencodeext: flags=0x%X, alg=%d, ext_flags=0x%X, len=%d, index=%d,\n",
- wrqu->encoding.flags, ext->alg, ext->ext_flags,
- ext->key_len, (wrqu->encoding.flags & IW_ENCODE_INDEX));
- unifi_trace(priv, UDBG3, " addr=%pM\n", a);
-
- if ((ext->key_len == 0) && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
- /* This means use a different key (given by key_idx) for Tx. */
- /* NYI */
- unifi_trace(priv, UDBG1, KERN_ERR "unifi_siwencodeext: NYI should change tx key id here!!\n");
- return -ENOTSUPP;
- }
-
- memset(&sme_key, 0, sizeof(sme_key));
-
- keydata = (unsigned char *)(ext + 1);
- keyid = (wrqu->encoding.flags & IW_ENCODE_INDEX);
-
- /*
- * Check for request to delete keys for an address.
- */
- /* Pick out request for no privacy. */
- if (ext->alg == IW_ENCODE_ALG_NONE) {
-
- unifi_trace(priv, UDBG1, "Deleting %s key %d\n",
- (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) ? "GROUP" : "PAIRWISE",
- keyid);
-
- if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- sme_key.keyType = CSR_WIFI_SME_KEY_TYPE_GROUP;
- } else {
- sme_key.keyType = CSR_WIFI_SME_KEY_TYPE_PAIRWISE;
- }
- sme_key.keyIndex = (keyid - 1);
- sme_key.keyLength = 0;
- sme_key.authenticator = 0;
- memcpy(sme_key.address.a, a, ETH_ALEN);
- UF_RTNL_UNLOCK();
- r = sme_mgt_key(priv, &sme_key, CSR_WIFI_SME_LIST_ACTION_REMOVE);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "Delete key request was rejected with result %d\n", r);
- return convert_sme_error(r);
- }
-
- return 0;
- }
-
- /*
- * Request is to set a key, not delete
- */
-
- /* Pick out WEP and use set_wep_key(). */
- if (ext->alg == IW_ENCODE_ALG_WEP) {
- /* WEP-40, WEP-104 */
-
- /* Check for valid key length */
- if (!((ext->key_len == 5) || (ext->key_len == 13))) {
- unifi_trace(priv, UDBG1, KERN_ERR "Invalid length for WEP key: %d\n", ext->key_len);
- return -EINVAL;
- }
-
- unifi_trace(priv, UDBG1, "Setting WEP key %d tx:%d\n",
- keyid, ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY);
-
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- sme_key.wepTxKey = TRUE;
- sme_key.keyType = CSR_WIFI_SME_KEY_TYPE_PAIRWISE;
- } else {
- sme_key.wepTxKey = FALSE;
- sme_key.keyType = CSR_WIFI_SME_KEY_TYPE_GROUP;
- }
- sme_key.keyIndex = (keyid - 1);
- sme_key.keyLength = ext->key_len;
- sme_key.authenticator = 0;
- memset(sme_key.address.a, 0xFF, ETH_ALEN);
- memcpy(sme_key.key, keydata, ext->key_len);
- UF_RTNL_UNLOCK();
- r = sme_mgt_key(priv, &sme_key, CSR_WIFI_SME_LIST_ACTION_ADD);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "siwencodeext: Set key failed (%d)", r);
- return convert_sme_error(r);
- }
-
- return 0;
- }
-
- /*
- *
- * If we reach here, we are dealing with a WPA/WPA2 key
- *
- */
- if (ext->key_len > 32) {
- return -EINVAL;
- }
-
- /*
- * TKIP keys from wpa_supplicant need swapping.
- * What about other supplicants (when they come along)?
- */
- if ((ext->alg == IW_ENCODE_ALG_TKIP) && (ext->key_len == 32)) {
- memcpy(tkip_key, keydata, 16);
- memcpy(tkip_key + 16, keydata + 24, 8);
- memcpy(tkip_key + 24, keydata + 16, 8);
- keydata = tkip_key;
- }
-
- key_type = (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) ?
- CSR_WIFI_SME_KEY_TYPE_GROUP : /* Group Key */
- CSR_WIFI_SME_KEY_TYPE_PAIRWISE; /* Pairwise Key */
-
- sme_key.keyType = key_type;
- sme_key.keyIndex = (keyid - 1);
- sme_key.keyLength = ext->key_len;
- sme_key.authenticator = 0;
- memcpy(sme_key.address.a, ext->addr.sa_data, ETH_ALEN);
- if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
-
- unifi_trace(priv, UDBG5, "RSC first 6 bytes = %*phC\n",
- 6, ext->rx_seq);
-
- /* memcpy((u8*)(&sme_key.keyRsc), ext->rx_seq, 8); */
- sme_key.keyRsc[0] = ext->rx_seq[1] << 8 | ext->rx_seq[0];
- sme_key.keyRsc[1] = ext->rx_seq[3] << 8 | ext->rx_seq[2];
- sme_key.keyRsc[2] = ext->rx_seq[5] << 8 | ext->rx_seq[4];
- sme_key.keyRsc[3] = ext->rx_seq[7] << 8 | ext->rx_seq[6];
-
- }
-
- memcpy(sme_key.key, keydata, ext->key_len);
- UF_RTNL_UNLOCK();
- r = sme_mgt_key(priv, &sme_key, CSR_WIFI_SME_LIST_ACTION_ADD);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "SETKEYS request was rejected with result %d\n", r);
- return convert_sme_error(r);
- }
-
- return r;
-} /* _unifi_siwencodeext() */
-
-
-static int
-unifi_siwencodeext(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int err = 0;
-
- err = _unifi_siwencodeext(dev, info, wrqu, extra);
-
- return err;
-} /* unifi_siwencodeext() */
-
-
-static int
-unifi_giwencodeext(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return -EOPNOTSUPP;
-} /* unifi_giwencodeext() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_siwpmksa
- *
- * SIOCSIWPMKSA - PMKSA cache operation
- * The caller passes a pmksa structure:
- * - cmd one of ADD, REMOVE, FLUSH
- * - bssid MAC address
- * - pmkid ID string (16 bytes)
- *
- * Arguments:
- * None.
- *
- * Returns:
- * None.
- *
- * Notes:
- * This is not needed since we provide a siwgenie method.
- * ---------------------------------------------------------------------------
- */
-#define UNIFI_PMKID_KEY_SIZE 16
-static int
-unifi_siwpmksa(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
- struct iw_pmksa *pmksa = (struct iw_pmksa *)extra;
- CsrResult r = 0;
- CsrWifiSmePmkidList pmkid_list;
- CsrWifiSmePmkid pmkid;
- CsrWifiSmeListAction action;
-
- CHECK_INITED(priv);
-
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
- unifi_error(priv, "unifi_siwpmksa: not permitted in Mode %d\n",
- interfacePriv->interfaceMode);
- return -EPERM;
- }
-
-
- unifi_trace(priv, UDBG1, "SIWPMKSA: cmd %d, %pM\n", pmksa->cmd,
- pmksa->bssid.sa_data);
-
- pmkid_list.pmkids = NULL;
- switch (pmksa->cmd) {
- case IW_PMKSA_ADD:
- pmkid_list.pmkids = &pmkid;
- action = CSR_WIFI_SME_LIST_ACTION_ADD;
- pmkid_list.pmkidsCount = 1;
- memcpy(pmkid.bssid.a, pmksa->bssid.sa_data, ETH_ALEN);
- memcpy(pmkid.pmkid, pmksa->pmkid, UNIFI_PMKID_KEY_SIZE);
- break;
- case IW_PMKSA_REMOVE:
- pmkid_list.pmkids = &pmkid;
- action = CSR_WIFI_SME_LIST_ACTION_REMOVE;
- pmkid_list.pmkidsCount = 1;
- memcpy(pmkid.bssid.a, pmksa->bssid.sa_data, ETH_ALEN);
- memcpy(pmkid.pmkid, pmksa->pmkid, UNIFI_PMKID_KEY_SIZE);
- break;
- case IW_PMKSA_FLUSH:
- /* Replace current PMKID's with an empty list */
- pmkid_list.pmkidsCount = 0;
- action = CSR_WIFI_SME_LIST_ACTION_FLUSH;
- break;
- default:
- unifi_notice(priv, "SIWPMKSA: Unknown command (0x%x)\n", pmksa->cmd);
- return -EINVAL;
- }
-
- /* Set the Value the pmkid's will have 1 added OR 1 removed OR be cleared at this point */
- UF_RTNL_UNLOCK();
- r = sme_mgt_pmkid(priv, action, &pmkid_list);
- UF_RTNL_LOCK();
- if (r) {
- unifi_error(priv, "SIWPMKSA: Set PMKID's Failed.\n");
- }
-
- return r;
-
-} /* unifi_siwpmksa() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_get_wireless_stats
- *
- * get_wireless_stats method for Linux wireless extensions.
- *
- * Arguments:
- * dev Pointer to associated netdevice.
- *
- * Returns:
- * Pointer to iw_statistics struct.
- * ---------------------------------------------------------------------------
- */
-struct iw_statistics *
-unifi_get_wireless_stats(struct net_device *dev)
-{
- netInterface_priv_t *interfacePriv = (netInterface_priv_t *)netdev_priv(dev);
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- if (priv->init_progress != UNIFI_INIT_COMPLETED) {
- return NULL;
- }
-
- return &priv->wext_wireless_stats;
-} /* unifi_get_wireless_stats() */
-
-
-/*
- * Structures to export the Wireless Handlers
- */
-
-static const struct iw_priv_args unifi_private_args[] = {
- /*{ cmd, set_args, get_args, name } */
- { SIOCIWS80211POWERSAVEPRIV, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "iwprivs80211ps" },
- { SIOCIWG80211POWERSAVEPRIV, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IWPRIV_POWER_SAVE_MAX_STRING, "iwprivg80211ps" },
- { SIOCIWS80211RELOADDEFAULTSPRIV, IW_PRIV_TYPE_NONE,
- IW_PRIV_TYPE_NONE, "iwprivsdefs" },
- { SIOCIWSSMEDEBUGPRIV, IW_PRIV_TYPE_CHAR | IWPRIV_SME_DEBUG_MAX_STRING, IW_PRIV_TYPE_NONE, "iwprivssmedebug" },
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- { SIOCIWSCONFWAPIPRIV, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
- IW_PRIV_TYPE_NONE, "iwprivsconfwapi" },
- { SIOCIWSWAPIKEYPRIV, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof(unifiio_wapi_key_t),
- IW_PRIV_TYPE_NONE, "iwprivswpikey" },
-#endif
-#ifdef CSR_SUPPORT_WEXT_AP
- { SIOCIWSAPCFGPRIV, IW_PRIV_TYPE_CHAR | 256, IW_PRIV_TYPE_NONE, "AP_SET_CFG" },
- { SIOCIWSAPSTARTPRIV, 0,IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED|IWPRIV_SME_MAX_STRING,"AP_BSS_START" },
- { SIOCIWSAPSTOPPRIV, IW_PRIV_TYPE_CHAR |IW_PRIV_SIZE_FIXED|0,
- IW_PRIV_TYPE_CHAR |IW_PRIV_SIZE_FIXED|0, "AP_BSS_STOP" },
-#ifdef ANDROID_BUILD
- { SIOCIWSFWRELOADPRIV, IW_PRIV_TYPE_CHAR |256,
- IW_PRIV_TYPE_CHAR |IW_PRIV_SIZE_FIXED|0, "WL_FW_RELOAD" },
- { SIOCIWSSTACKSTART, 0,
- IW_PRIV_TYPE_CHAR |IW_PRIV_SIZE_FIXED|IWPRIV_SME_MAX_STRING, "START" },
- { SIOCIWSSTACKSTOP, 0,
- IW_PRIV_TYPE_CHAR |IW_PRIV_SIZE_FIXED|IWPRIV_SME_MAX_STRING, "STOP" },
-#endif /* ANDROID_BUILD */
-#endif /* CSR_SUPPORT_WEXT_AP */
-};
-
-static const iw_handler unifi_handler[] =
-{
- (iw_handler) unifi_siwcommit, /* SIOCSIWCOMMIT */
- (iw_handler) unifi_giwname, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) unifi_siwfreq, /* SIOCSIWFREQ */
- (iw_handler) unifi_giwfreq, /* SIOCGIWFREQ */
- (iw_handler) unifi_siwmode, /* SIOCSIWMODE */
- (iw_handler) unifi_giwmode, /* SIOCGIWMODE */
- (iw_handler) NULL, /* SIOCSIWSENS */
- (iw_handler) NULL, /* SIOCGIWSENS */
- (iw_handler) NULL, /* SIOCSIWRANGE */
- (iw_handler) unifi_giwrange, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* SIOCSIWPRIV */
- (iw_handler) NULL, /* SIOCGIWPRIV */
- (iw_handler) NULL, /* SIOCSIWSTATS */
- (iw_handler) NULL, /* SIOCGIWSTATS */
- (iw_handler) NULL, /* SIOCSIWSPY */
- (iw_handler) NULL, /* SIOCGIWSPY */
- (iw_handler) NULL, /* SIOCSIWTHRSPY */
- (iw_handler) NULL, /* SIOCGIWTHRSPY */
- (iw_handler) unifi_siwap, /* SIOCSIWAP */
- (iw_handler) unifi_giwap, /* SIOCGIWAP */
-#if WIRELESS_EXT > 17
- /* WPA : IEEE 802.11 MLME requests */
- unifi_siwmlme, /* SIOCSIWMLME, request MLME operation */
-#else
- (iw_handler) NULL, /* -- hole -- */
-#endif
- (iw_handler) NULL, /* SIOCGIWAPLIST */
- (iw_handler) unifi_siwscan, /* SIOCSIWSCAN */
- (iw_handler) unifi_giwscan, /* SIOCGIWSCAN */
- (iw_handler) unifi_siwessid, /* SIOCSIWESSID */
- (iw_handler) unifi_giwessid, /* SIOCGIWESSID */
- (iw_handler) NULL, /* SIOCSIWNICKN */
- (iw_handler) NULL, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- unifi_siwrate, /* SIOCSIWRATE */
- unifi_giwrate, /* SIOCGIWRATE */
- unifi_siwrts, /* SIOCSIWRTS */
- unifi_giwrts, /* SIOCGIWRTS */
- unifi_siwfrag, /* SIOCSIWFRAG */
- unifi_giwfrag, /* SIOCGIWFRAG */
- (iw_handler) NULL, /* SIOCSIWTXPOW */
- (iw_handler) NULL, /* SIOCGIWTXPOW */
- (iw_handler) NULL, /* SIOCSIWRETRY */
- (iw_handler) NULL, /* SIOCGIWRETRY */
- unifi_siwencode, /* SIOCSIWENCODE */
- unifi_giwencode, /* SIOCGIWENCODE */
- unifi_siwpower, /* SIOCSIWPOWER */
- unifi_giwpower, /* SIOCGIWPOWER */
-#if WIRELESS_EXT > 17
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
-
- /* WPA : Generic IEEE 802.11 informatiom element (e.g., for WPA/RSN/WMM). */
- unifi_siwgenie, /* SIOCSIWGENIE */ /* set generic IE */
- unifi_giwgenie, /* SIOCGIWGENIE */ /* get generic IE */
-
- /* WPA : Authentication mode parameters */
- unifi_siwauth, /* SIOCSIWAUTH */ /* set authentication mode params */
- unifi_giwauth, /* SIOCGIWAUTH */ /* get authentication mode params */
-
- /* WPA : Extended version of encoding configuration */
- unifi_siwencodeext, /* SIOCSIWENCODEEXT */ /* set encoding token & mode */
- unifi_giwencodeext, /* SIOCGIWENCODEEXT */ /* get encoding token & mode */
-
- /* WPA2 : PMKSA cache management */
- unifi_siwpmksa, /* SIOCSIWPMKSA */ /* PMKSA cache operation */
- (iw_handler) NULL, /* -- hole -- */
-#endif /* WIRELESS_EXT > 17 */
-};
-
-
-static const iw_handler unifi_private_handler[] =
-{
- iwprivs80211ps, /* SIOCIWFIRSTPRIV */
- iwprivg80211ps, /* SIOCIWFIRSTPRIV + 1 */
- iwprivsdefs, /* SIOCIWFIRSTPRIV + 2 */
- (iw_handler) NULL,
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- iwprivsconfwapi, /* SIOCIWFIRSTPRIV + 4 */
- (iw_handler) NULL, /* SIOCIWFIRSTPRIV + 5 */
- iwprivswpikey, /* SIOCIWFIRSTPRIV + 6 */
-#else
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
-#endif
- (iw_handler) NULL,
- iwprivssmedebug, /* SIOCIWFIRSTPRIV + 8 */
-#ifdef CSR_SUPPORT_WEXT_AP
- (iw_handler) NULL,
- iwprivsapconfig,
- (iw_handler) NULL,
- iwprivsapstart,
- (iw_handler) NULL,
- iwprivsapstop,
- (iw_handler) NULL,
-#ifdef ANDROID_BUILD
- iwprivsapfwreload,
- (iw_handler) NULL,
- iwprivsstackstart,
- (iw_handler) NULL,
- iwprivsstackstop,
-#else
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
-#endif /* ANDROID_BUILD */
-#else
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
-#endif /* CSR_SUPPORT_WEXT_AP */
-};
-
-struct iw_handler_def unifi_iw_handler_def =
-{
- .num_standard = sizeof(unifi_handler) / sizeof(iw_handler),
- .num_private = sizeof(unifi_private_handler) / sizeof(iw_handler),
- .num_private_args = sizeof(unifi_private_args) / sizeof(struct iw_priv_args),
- .standard = (iw_handler *) unifi_handler,
- .private = (iw_handler *) unifi_private_handler,
- .private_args = (struct iw_priv_args *) unifi_private_args,
-#if IW_HANDLER_VERSION >= 6
- .get_wireless_stats = unifi_get_wireless_stats,
-#endif
-};
-
-
diff --git a/drivers/staging/csr/ul_int.c b/drivers/staging/csr/ul_int.c
deleted file mode 100644
index 0fae6f4..0000000
--- a/drivers/staging/csr/ul_int.c
+++ /dev/null
@@ -1,528 +0,0 @@
-/*
- * ***************************************************************************
- * FILE: ul_int.c
- *
- * PURPOSE:
- * Manage list of client applications using UniFi.
- *
- * Copyright (C) 2006-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ***************************************************************************
- */
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-#include "unifi_priv.h"
-#include "unifiio.h"
-#include "unifi_os.h"
-
-static void free_bulkdata_buffers(unifi_priv_t *priv, bulk_data_param_t *bulkdata);
-static void reset_driver_status(unifi_priv_t *priv);
-
-/*
- * ---------------------------------------------------------------------------
- * ul_init_clients
- *
- * Initialise the clients array to empty.
- *
- * Arguments:
- * priv Pointer to device private context struct
- *
- * Returns:
- * None.
- *
- * Notes:
- * This function needs to be called before priv is stored in
- * Unifi_instances[].
- * ---------------------------------------------------------------------------
- */
-void
-ul_init_clients(unifi_priv_t *priv)
-{
- int id;
- ul_client_t *ul_clients;
-
- sema_init(&priv->udi_logging_mutex, 1);
- priv->logging_client = NULL;
-
- ul_clients = priv->ul_clients;
-
- for (id = 0; id < MAX_UDI_CLIENTS; id++) {
- memset(&ul_clients[id], 0, sizeof(ul_client_t));
-
- ul_clients[id].client_id = id;
- ul_clients[id].sender_id = UDI_SENDER_ID_BASE + (id << UDI_SENDER_ID_SHIFT);
- ul_clients[id].instance = -1;
- ul_clients[id].event_hook = NULL;
-
- INIT_LIST_HEAD(&ul_clients[id].udi_log);
- init_waitqueue_head(&ul_clients[id].udi_wq);
- sema_init(&ul_clients[id].udi_sem, 1);
-
- ul_clients[id].wake_up_wq_id = 0;
- ul_clients[id].seq_no = 0;
- ul_clients[id].wake_seq_no = 0;
- ul_clients[id].snap_filter.count = 0;
- }
-} /* ul_init_clients() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * ul_register_client
- *
- * This function registers a new ul client.
- *
- * Arguments:
- * priv Pointer to device private context struct
- * configuration Special configuration for the client.
- * udi_event_clbk Callback for receiving event from unifi.
- *
- * Returns:
- * 0 if a new clients is registered, -1 otherwise.
- * ---------------------------------------------------------------------------
- */
-ul_client_t *
-ul_register_client(unifi_priv_t *priv, unsigned int configuration,
- udi_event_t udi_event_clbk)
-{
- unsigned char id, ref;
- ul_client_t *ul_clients;
-
- ul_clients = priv->ul_clients;
-
- /* check for an unused entry */
- for (id = 0; id < MAX_UDI_CLIENTS; id++) {
- if (ul_clients[id].udi_enabled == 0) {
- ul_clients[id].instance = priv->instance;
- ul_clients[id].udi_enabled = 1;
- ul_clients[id].configuration = configuration;
-
- /* Allocate memory for the reply signal.. */
- ul_clients[id].reply_signal = kmalloc(sizeof(CSR_SIGNAL), GFP_KERNEL);
- if (ul_clients[id].reply_signal == NULL) {
- unifi_error(priv, "Failed to allocate reply signal for client.\n");
- return NULL;
- }
- /* .. and the bulk data of the reply signal. */
- for (ref = 0; ref < UNIFI_MAX_DATA_REFERENCES; ref ++) {
- ul_clients[id].reply_bulkdata[ref] = kmalloc(sizeof(bulk_data_t), GFP_KERNEL);
- /* If allocation fails, free allocated memory. */
- if (ul_clients[id].reply_bulkdata[ref] == NULL) {
- for (; ref > 0; ref --) {
- kfree(ul_clients[id].reply_bulkdata[ref - 1]);
- }
- kfree(ul_clients[id].reply_signal);
- unifi_error(priv, "Failed to allocate bulk data buffers for client.\n");
- return NULL;
- }
- }
-
- /* Set the event callback. */
- ul_clients[id].event_hook = udi_event_clbk;
-
- unifi_trace(priv, UDBG2, "UDI %d (0x%x) registered. configuration = 0x%x\n",
- id, &ul_clients[id], configuration);
- return &ul_clients[id];
- }
- }
- return NULL;
-} /* ul_register_client() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * ul_deregister_client
- *
- * This function deregisters a blocking UDI client.
- *
- * Arguments:
- * client Pointer to the client we deregister.
- *
- * Returns:
- * 0 if a new clients is deregistered.
- * ---------------------------------------------------------------------------
- */
-int
-ul_deregister_client(ul_client_t *ul_client)
-{
- struct list_head *pos, *n;
- udi_log_t *logptr;
- unifi_priv_t *priv = uf_find_instance(ul_client->instance);
- int ref;
-
- ul_client->instance = -1;
- ul_client->event_hook = NULL;
- ul_client->udi_enabled = 0;
- unifi_trace(priv, UDBG5, "UDI (0x%x) deregistered.\n", ul_client);
-
- /* Free memory allocated for the reply signal and its bulk data. */
- kfree(ul_client->reply_signal);
- for (ref = 0; ref < UNIFI_MAX_DATA_REFERENCES; ref ++) {
- kfree(ul_client->reply_bulkdata[ref]);
- }
-
- if (ul_client->snap_filter.count) {
- ul_client->snap_filter.count = 0;
- kfree(ul_client->snap_filter.protocols);
- }
-
- /* Free anything pending on the udi_log list */
- down(&ul_client->udi_sem);
- list_for_each_safe(pos, n, &ul_client->udi_log)
- {
- logptr = list_entry(pos, udi_log_t, q);
- list_del(pos);
- kfree(logptr);
- }
- up(&ul_client->udi_sem);
-
- return 0;
-} /* ul_deregister_client() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * logging_handler
- *
- * This function is registered with the driver core.
- * It is called every time a UniFi HIP Signal is sent. It iterates over
- * the list of processes interested in receiving log events and
- * delivers the events to them.
- *
- * Arguments:
- * ospriv Pointer to driver's private data.
- * sigdata Pointer to the packed signal buffer.
- * signal_len Length of the packed signal.
- * bulkdata Pointer to the signal's bulk data.
- * dir Direction of the signal
- * 0 = from-host
- * 1 = to-host
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-logging_handler(void *ospriv,
- u8 *sigdata, u32 signal_len,
- const bulk_data_param_t *bulkdata,
- enum udi_log_direction direction)
-{
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
- ul_client_t *client;
- int dir;
-
- dir = (direction == UDI_LOG_FROM_HOST) ? UDI_FROM_HOST : UDI_TO_HOST;
-
- down(&priv->udi_logging_mutex);
- client = priv->logging_client;
- if (client != NULL) {
- client->event_hook(client, sigdata, signal_len,
- bulkdata, dir);
- }
- up(&priv->udi_logging_mutex);
-
-} /* logging_handler() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * ul_log_config_ind
- *
- * This function uses the client's register callback
- * to indicate configuration information e.g core errors.
- *
- * Arguments:
- * priv Pointer to driver's private data.
- * conf_param Pointer to the configuration data.
- * len Length of the configuration data.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-ul_log_config_ind(unifi_priv_t *priv, u8 *conf_param, int len)
-{
-#ifdef CSR_SUPPORT_SME
- if (priv->smepriv == NULL)
- {
- return;
- }
- if ((CONFIG_IND_ERROR == (*conf_param)) && (priv->wifi_on_state == wifi_on_in_progress)) {
- unifi_notice(priv, "ul_log_config_ind: wifi on in progress, suppress error\n");
- } else {
- /* wifi_off_ind (error or exit) */
- CsrWifiRouterCtrlWifiOffIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0, (CsrWifiRouterCtrlControlIndication)(*conf_param));
- }
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
- unifi_debug_buf_dump();
-#endif
-#else
- bulk_data_param_t bulkdata;
-
- /*
- * If someone killed unifi_managed before the driver was unloaded
- * the g_drvpriv pointer is going to be NULL. In this case it is
- * safe to assume that there is no client to get the indication.
- */
- if (!priv) {
- unifi_notice(NULL, "uf_sme_event_ind: NULL priv\n");
- return;
- }
-
- /* Create a null bulkdata structure. */
- bulkdata.d[0].data_length = 0;
- bulkdata.d[1].data_length = 0;
-
- sme_native_log_event(priv->sme_cli, conf_param, sizeof(u8),
- &bulkdata, UDI_CONFIG_IND);
-
-#endif /* CSR_SUPPORT_SME */
-
-} /* ul_log_config_ind */
-
-
-/*
- * ---------------------------------------------------------------------------
- * free_bulkdata_buffers
- *
- * Free the bulkdata buffers e.g. after a failed unifi_send_signal().
- *
- * Arguments:
- * priv Pointer to device private struct
- * bulkdata Pointer to bulkdata parameter table
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void
-free_bulkdata_buffers(unifi_priv_t *priv, bulk_data_param_t *bulkdata)
-{
- int i;
-
- if (bulkdata) {
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; ++i) {
- if (bulkdata->d[i].data_length != 0) {
- unifi_net_data_free(priv, (bulk_data_desc_t *)(&bulkdata->d[i]));
- /* data_length is now 0 */
- }
- }
- }
-
-} /* free_bulkdata_buffers */
-
-static int
-_align_bulk_data_buffers(unifi_priv_t *priv, u8 *signal,
- bulk_data_param_t *bulkdata)
-{
- unsigned int i;
-
- if ((bulkdata == NULL) || (CSR_WIFI_ALIGN_BYTES == 0)) {
- return 0;
- }
-
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++)
- {
- struct sk_buff *skb;
- /*
- * The following complex casting is in place in order to eliminate 64-bit compilation warning
- * "cast to/from pointer from/to integer of different size"
- */
- u32 align_offset = (u32)(long)(bulkdata->d[i].os_data_ptr) & (CSR_WIFI_ALIGN_BYTES-1);
- if (align_offset)
- {
- skb = (struct sk_buff*)bulkdata->d[i].os_net_buf_ptr;
- if (skb == NULL) {
- unifi_warning(priv,
- "_align_bulk_data_buffers: Align offset found (%d) but skb is NULL!\n",
- align_offset);
- return -EINVAL;
- }
- if (bulkdata->d[i].data_length == 0) {
- unifi_warning(priv,
- "_align_bulk_data_buffers: Align offset found (%d) but length is zero\n",
- align_offset);
- return CSR_RESULT_SUCCESS;
- }
- unifi_trace(priv, UDBG5,
- "Align f-h buffer (0x%p) by %d bytes (skb->data: 0x%p)\n",
- bulkdata->d[i].os_data_ptr, align_offset, skb->data);
-
-
- /* Check if there is enough headroom... */
- if (unlikely(skb_headroom(skb) < align_offset))
- {
- struct sk_buff *tmp = skb;
-
- unifi_trace(priv, UDBG5, "Headroom not enough - realloc it\n");
- skb = skb_realloc_headroom(skb, align_offset);
- if (skb == NULL) {
- unifi_error(priv,
- "_align_bulk_data_buffers: skb_realloc_headroom failed - signal is dropped\n");
- return -EFAULT;
- }
- /* Free the old bulk data only if allocation succeeds */
- kfree_skb(tmp);
- /* Bulkdata needs to point to the new skb */
- bulkdata->d[i].os_net_buf_ptr = (const unsigned char*)skb;
- bulkdata->d[i].os_data_ptr = (const void*)skb->data;
- }
- /* ... before pushing the data to the right alignment offset */
- skb_push(skb, align_offset);
-
- }
- /* The direction bit is zero for the from-host */
- signal[SIZEOF_SIGNAL_HEADER + (i * SIZEOF_DATAREF) + 1] = align_offset;
-
- }
- return 0;
-} /* _align_bulk_data_buffers() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * ul_send_signal_unpacked
- *
- * This function sends a host formatted signal to unifi.
- *
- * Arguments:
- * priv Pointer to driver's private data.
- * sigptr Pointer to the signal.
- * bulkdata Pointer to the signal's bulk data.
- *
- * Returns:
- * O on success, error code otherwise.
- *
- * Notes:
- * The signals have to be sent in the format described in the host interface
- * specification, i.e wire formatted. Certain clients use the host formatted
- * structures. The write_pack() transforms the host formatted signal
- * into the wired formatted signal. The code is in the core, since the signals
- * are defined therefore binded to the host interface specification.
- * ---------------------------------------------------------------------------
- */
-int
-ul_send_signal_unpacked(unifi_priv_t *priv, CSR_SIGNAL *sigptr,
- bulk_data_param_t *bulkdata)
-{
- u8 sigbuf[UNIFI_PACKED_SIGBUF_SIZE];
- u16 packed_siglen;
- CsrResult csrResult;
- unsigned long lock_flags;
- int r;
-
-
- csrResult = write_pack(sigptr, sigbuf, &packed_siglen);
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "Malformed HIP signal in ul_send_signal_unpacked()\n");
- return CsrHipResultToStatus(csrResult);
- }
- r = _align_bulk_data_buffers(priv, sigbuf, (bulk_data_param_t*)bulkdata);
- if (r) {
- return r;
- }
-
- spin_lock_irqsave(&priv->send_signal_lock, lock_flags);
- csrResult = unifi_send_signal(priv->card, sigbuf, packed_siglen, bulkdata);
- if (csrResult != CSR_RESULT_SUCCESS) {
- /* free_bulkdata_buffers(priv, (bulk_data_param_t *)bulkdata); */
- spin_unlock_irqrestore(&priv->send_signal_lock, lock_flags);
- return CsrHipResultToStatus(csrResult);
- }
- spin_unlock_irqrestore(&priv->send_signal_lock, lock_flags);
-
- return 0;
-} /* ul_send_signal_unpacked() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * reset_driver_status
- *
- * This function is called from ul_send_signal_raw() when it detects
- * that the SME has sent a MLME-RESET request.
- *
- * Arguments:
- * priv Pointer to device private struct
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-static void
-reset_driver_status(unifi_priv_t *priv)
-{
- priv->sta_wmm_capabilities = 0;
-#ifdef CSR_NATIVE_LINUX
-#ifdef CSR_SUPPORT_WEXT
- priv->wext_conf.flag_associated = 0;
- priv->wext_conf.block_controlled_port = CSR_WIFI_ROUTER_PORT_ACTION_8021X_PORT_OPEN;
- priv->wext_conf.bss_wmm_capabilities = 0;
- priv->wext_conf.disable_join_on_ssid_set = 0;
-#endif
-#endif
-} /* reset_driver_status() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * ul_send_signal_raw
- *
- * This function sends a wire formatted data signal to unifi.
- *
- * Arguments:
- * priv Pointer to driver's private data.
- * sigptr Pointer to the signal.
- * siglen Length of the signal.
- * bulkdata Pointer to the signal's bulk data.
- *
- * Returns:
- * O on success, error code otherwise.
- * ---------------------------------------------------------------------------
- */
-int
-ul_send_signal_raw(unifi_priv_t *priv, unsigned char *sigptr, int siglen,
- bulk_data_param_t *bulkdata)
-{
- CsrResult csrResult;
- unsigned long lock_flags;
- int r;
-
- /*
- * Make sure that the signal is updated with the bulk data
- * alignment for DMA.
- */
- r = _align_bulk_data_buffers(priv, (u8*)sigptr, bulkdata);
- if (r) {
- return r;
- }
-
- spin_lock_irqsave(&priv->send_signal_lock, lock_flags);
- csrResult = unifi_send_signal(priv->card, sigptr, siglen, bulkdata);
- if (csrResult != CSR_RESULT_SUCCESS) {
- free_bulkdata_buffers(priv, bulkdata);
- spin_unlock_irqrestore(&priv->send_signal_lock, lock_flags);
- return CsrHipResultToStatus(csrResult);
- }
- spin_unlock_irqrestore(&priv->send_signal_lock, lock_flags);
-
- /*
- * Since this is use by unicli, if we get an MLME reset request
- * we need to initialize a few status parameters
- * that the driver uses to make decisions.
- */
- if (GET_SIGNAL_ID(sigptr) == CSR_MLME_RESET_REQUEST_ID) {
- reset_driver_status(priv);
- }
-
- return 0;
-} /* ul_send_signal_raw() */
-
-
diff --git a/drivers/staging/csr/unifi_clients.h b/drivers/staging/csr/unifi_clients.h
deleted file mode 100644
index df853e1..0000000
--- a/drivers/staging/csr/unifi_clients.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- *****************************************************************************
- *
- * FILE : unifi_clients.h
- *
- * PURPOSE : Private header file for unifi clients.
- *
- * UDI = UniFi Debug Interface
- *
- * Copyright (C) 2005-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- *****************************************************************************
- */
-#ifndef __LINUX_UNIFI_CLIENTS_H__
-#define __LINUX_UNIFI_CLIENTS_H__ 1
-
-#include <linux/kernel.h>
-
-#define MAX_UDI_CLIENTS 8
-
-/* The start of the range of process ids allocated for ul clients */
-#define UDI_SENDER_ID_BASE 0xC000
-#define UDI_SENDER_ID_SHIFT 8
-
-
-/* Structure to hold a UDI logged signal */
-typedef struct {
-
- /* List link structure */
- struct list_head q;
-
- /* The message that will be passed to the user app */
- udi_msg_t msg;
-
- /* Signal body and data follow */
-
-} udi_log_t;
-
-
-
-typedef struct ul_client ul_client_t;
-
-typedef void (*udi_event_t)(ul_client_t *client,
- const u8 *sigdata, int signal_len,
- const bulk_data_param_t *bulkdata,
- int dir);
-
-void logging_handler(void *ospriv,
- u8 *sigdata, u32 signal_len,
- const bulk_data_param_t *bulkdata,
- enum udi_log_direction direction);
-
-
-/*
- * Structure describing a bulk data slot.
- * The length field is used to indicate empty/occupied state.
- */
-typedef struct _bulk_data
-{
- unsigned char ptr[2000];
- unsigned int length;
-} bulk_data_t;
-
-
-struct ul_client {
- /* Index of this client in the ul_clients array. */
- int client_id;
-
- /* Index of UniFi device to which this client is attached. */
- int instance;
-
- /* Flag to say whether this client has been enabled. */
- int udi_enabled;
-
- /* Value to use in signal->SenderProcessId */
- int sender_id;
-
- /* Configuration flags, e.g blocking, logging, etc. */
- unsigned int configuration;
-
- udi_event_t event_hook;
-
- /* A list to hold signals received from UniFi for reading by read() */
- struct list_head udi_log;
-
- /* Semaphore to protect the udi_log list */
- struct semaphore udi_sem;
-
- /*
- * Linux waitqueue to support blocking read and poll.
- * Logging clients should wait on udi_log. while
- * blocking clients should wait on wake_up_wq.
- */
- wait_queue_head_t udi_wq;
- CSR_SIGNAL* reply_signal;
- bulk_data_t* reply_bulkdata[UNIFI_MAX_DATA_REFERENCES];
-
- u16 signal_filter[SIG_FILTER_SIZE];
-
-
- /* ------------------------------------------------------------------- */
- /* Code below here is used by the sme_native configuration only */
-
- /* Flag to wake up blocking clients waiting on udi_wq. */
- int wake_up_wq_id;
-
- /*
- * A 0x00 - 0x0F mask to apply in signal->SenderProcessId.
- * Every time we do a blocking mlme request we increase this value.
- * The mlme_wait_for_reply() will wait for this sequence number.
- * Only the MLME blocking functions update this field.
- */
- unsigned char seq_no;
-
- /*
- * A 0x00 - 0x0F counter, containing the sequence number of
- * the signal that this client has last received.
- * Only the MLME blocking functions update this field.
- */
- unsigned char wake_seq_no;
-
- unifiio_snap_filter_t snap_filter;
-}; /* struct ul_client */
-
-
-#endif /* __LINUX_UNIFI_CLIENTS_H__ */
diff --git a/drivers/staging/csr/unifi_config.h b/drivers/staging/csr/unifi_config.h
deleted file mode 100644
index fe11970..0000000
--- a/drivers/staging/csr/unifi_config.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- *
- * FILE: unifi_config.h
- *
- * PURPOSE:
- * This header file provides parameters that configure the operation
- * of the driver.
- *
- * Copyright (C) 2006-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#ifndef __UNIFI_CONFIG_H__
-#define __UNIFI_CONFIG_H__ 1
-
-/*
- * Override for the SDIO function block size on this host. When byte mode CMD53s
- * are not used/supported by the SD host controller, transfers are padded up to
- * the next block boundary. The 512-byte default on UF6xxx wastes too much space
- * on the chip, so the block size is reduced to support this configuration.
- */
-#define CSR_WIFI_HIP_SDIO_BLOCK_SIZE 64
-
-/* Define the number of mini-coredump buffers to allocate at startup. These are
- * used to record chip status for the last n unexpected resets.
- */
-#define CSR_WIFI_HIP_NUM_COREDUMP_BUFFERS 5
-
-
-#endif /* __UNIFI_CONFIG_H__ */
diff --git a/drivers/staging/csr/unifi_dbg.c b/drivers/staging/csr/unifi_dbg.c
deleted file mode 100644
index 38d5708..0000000
--- a/drivers/staging/csr/unifi_dbg.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * ***************************************************************************
- * FILE: unifi_dbg.c
- *
- * PURPOSE:
- * Handle debug signals received from UniFi.
- *
- * Copyright (C) 2007-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ***************************************************************************
- */
-#include "unifi_priv.h"
-
-/*
- * ---------------------------------------------------------------------------
- * debug_string_indication
- * debug_word16_indication
- *
- * Handlers for debug indications.
- *
- * Arguments:
- * priv Pointer to private context structure.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-debug_string_indication(unifi_priv_t *priv, const unsigned char *extra, unsigned int extralen)
-{
- const unsigned int maxlen = sizeof(priv->last_debug_string) - 1;
-
- if (extralen > maxlen) {
- extralen = maxlen;
- }
-
- strncpy(priv->last_debug_string, extra, extralen);
-
- /* Make sure the string is terminated */
- priv->last_debug_string[extralen] = '\0';
-
- unifi_info(priv, "unifi debug: %s\n", priv->last_debug_string);
-
-} /* debug_string_indication() */
-
-
-
-void
-debug_word16_indication(unifi_priv_t *priv, const CSR_SIGNAL *sigptr)
-{
- int i;
-
- if (priv == NULL) {
- unifi_info(priv, "Priv is NULL\n");
- return;
- }
-
- for (i = 0; i < 16; i++) {
- priv->last_debug_word16[i] =
- sigptr->u.DebugWord16Indication.DebugWords[i];
- }
-
- if (priv->last_debug_word16[0] == 0xFA11) {
- unsigned long ts;
- ts = (priv->last_debug_word16[6] << 16) | priv->last_debug_word16[5];
- unifi_info(priv, " %10lu: %s fault %04x, arg %04x (x%d)\n",
- ts,
- priv->last_debug_word16[3] == 0x8000 ? "MAC" :
- priv->last_debug_word16[3] == 0x4000 ? "PHY" :
- "???",
- priv->last_debug_word16[1],
- priv->last_debug_word16[2],
- priv->last_debug_word16[4]);
- }
- else if (priv->last_debug_word16[0] != 0xDBAC)
- /* suppress SDL Trace output (note: still available to unicli). */
- {
- unifi_info(priv, "unifi debug: %04X %04X %04X %04X %04X %04X %04X %04X\n",
- priv->last_debug_word16[0], priv->last_debug_word16[1],
- priv->last_debug_word16[2], priv->last_debug_word16[3],
- priv->last_debug_word16[4], priv->last_debug_word16[5],
- priv->last_debug_word16[6], priv->last_debug_word16[7]);
- unifi_info(priv, " %04X %04X %04X %04X %04X %04X %04X %04X\n",
- priv->last_debug_word16[8], priv->last_debug_word16[9],
- priv->last_debug_word16[10], priv->last_debug_word16[11],
- priv->last_debug_word16[12], priv->last_debug_word16[13],
- priv->last_debug_word16[14], priv->last_debug_word16[15]);
- }
-
-} /* debug_word16_indication() */
-
-
-void
-debug_generic_indication(unifi_priv_t *priv, const CSR_SIGNAL *sigptr)
-{
- unifi_info(priv, "debug: %04X %04X %04X %04X %04X %04X %04X %04X\n",
- sigptr->u.DebugGenericIndication.DebugWords[0],
- sigptr->u.DebugGenericIndication.DebugWords[1],
- sigptr->u.DebugGenericIndication.DebugWords[2],
- sigptr->u.DebugGenericIndication.DebugWords[3],
- sigptr->u.DebugGenericIndication.DebugWords[4],
- sigptr->u.DebugGenericIndication.DebugWords[5],
- sigptr->u.DebugGenericIndication.DebugWords[6],
- sigptr->u.DebugGenericIndication.DebugWords[7]);
-
-} /* debug_generic_indication() */
-
diff --git a/drivers/staging/csr/unifi_event.c b/drivers/staging/csr/unifi_event.c
deleted file mode 100644
index e81a998..0000000
--- a/drivers/staging/csr/unifi_event.c
+++ /dev/null
@@ -1,692 +0,0 @@
-/*
- * ***************************************************************************
- * FILE: unifi_event.c
- *
- * PURPOSE:
- * Process the signals received by UniFi.
- * It is part of the porting exercise.
- *
- * Copyright (C) 2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ***************************************************************************
- */
-
-
-/*
- * Porting notes:
- * The implementation of unifi_receive_event() in Linux is fairly complicated.
- * The linux driver support multiple userspace applications and several
- * build configurations, so the received signals are processed by different
- * processes and multiple times.
- * In a simple implementation, this function needs to deliver:
- * - The MLME-UNITDATA.ind signals to the Rx data plane and to the Traffic
- * Analysis using unifi_ta_sample().
- * - The MLME-UNITDATA-STATUS.ind signals to the Tx data plane.
- * - All the other signals to the SME using unifi_sys_hip_ind().
- */
-
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-#include "unifi_priv.h"
-
-
-/*
- * ---------------------------------------------------------------------------
- * send_to_client
- *
- * Helper for unifi_receive_event.
- *
- * This function forwards a signal to one client.
- *
- * Arguments:
- * priv Pointer to driver's private data.
- * client Pointer to the client structure.
- * receiver_id The reciever id of the signal.
- * sigdata Pointer to the packed signal buffer.
- * siglen Length of the packed signal.
- * bulkdata Pointer to the signal's bulk data.
- *
- * Returns:
- * None.
- *
- * ---------------------------------------------------------------------------
- */
-static void send_to_client(unifi_priv_t *priv, ul_client_t *client,
- int receiver_id,
- unsigned char *sigdata, int siglen,
- const bulk_data_param_t *bulkdata)
-{
- if (client && client->event_hook) {
- /*unifi_trace(priv, UDBG3,
- "Receive: client %d, (s:0x%X, r:0x%X) - Signal 0x%.4X \n",
- client->client_id, client->sender_id, receiver_id,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata));*/
-
- client->event_hook(client, sigdata, siglen, bulkdata, UDI_TO_HOST);
- }
-}
-
-/*
- * ---------------------------------------------------------------------------
- * process_pkt_data_ind
- *
- * Dispatcher for received signals.
- *
- * This function receives the 'to host' signals and forwards
- * them to the unifi linux clients.
- *
- * Arguments:
- * priv Context
- * sigdata Pointer to the packed signal buffer(Its in form of MA-PACKET.ind).
- * bulkdata Pointer to signal's bulkdata
- * freeBulkData Pointer to a flag which gets set if the bulkdata needs to
- * be freed after calling the logging handlers. If it is not
- * set the bulkdata must be freed by the MLME handler or
- * passed to the network stack.
- * Returns:
- * TRUE if the packet should be routed to the SME etc.
- * FALSE if the packet is for the driver or network stack
- * ---------------------------------------------------------------------------
- */
-static u8 check_routing_pkt_data_ind(unifi_priv_t *priv,
- u8 *sigdata,
- const bulk_data_param_t* bulkdata,
- u8 *freeBulkData,
- netInterface_priv_t *interfacePriv)
-{
- u16 frmCtrl, receptionStatus, frmCtrlSubType;
- u8 *macHdrLocation;
- u8 interfaceTag;
- u8 isDataFrame;
- u8 isProtocolVerInvalid = FALSE;
- u8 isDataFrameSubTypeNoData = FALSE;
-
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- static const u8 wapiProtocolIdSNAPHeader[] = {0x88,0xb4};
- static const u8 wapiProtocolIdSNAPHeaderOffset = 6;
- u8 *destAddr;
- u8 *srcAddr;
- u8 isWapiUnicastPkt = FALSE;
-
-#ifdef CSR_WIFI_SECURITY_WAPI_QOSCTRL_MIC_WORKAROUND
- u16 qosControl;
-#endif
-
- u8 llcSnapHeaderOffset = 0;
-
- destAddr = (u8 *) bulkdata->d[0].os_data_ptr + MAC_HEADER_ADDR1_OFFSET;
- srcAddr = (u8 *) bulkdata->d[0].os_data_ptr + MAC_HEADER_ADDR2_OFFSET;
-
- /*Individual/Group bit - Bit 0 of first byte*/
- isWapiUnicastPkt = (!(destAddr[0] & 0x01)) ? TRUE : FALSE;
-#endif
-
-#define CSR_WIFI_MA_PKT_IND_RECEPTION_STATUS_OFFSET sizeof(CSR_SIGNAL_PRIMITIVE_HEADER) + 22
-
- *freeBulkData = FALSE;
-
- /* Fetch the MAC header location from MA_PKT_IND packet */
- macHdrLocation = (u8 *) bulkdata->d[0].os_data_ptr;
- /* Fetch the Frame Control value from MAC header */
- frmCtrl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(macHdrLocation);
-
- /* Pull out interface tag from virtual interface identifier */
- interfaceTag = (CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata + 14)) & 0xff;
-
- /* check for MIC failure before processing the signal */
- receptionStatus = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata + CSR_WIFI_MA_PKT_IND_RECEPTION_STATUS_OFFSET);
-
- /* To discard any spurious MIC failures that could be reported by the firmware */
- isDataFrame = ((frmCtrl & IEEE80211_FC_TYPE_MASK) == (IEEE802_11_FC_TYPE_DATA & IEEE80211_FC_TYPE_MASK)) ? TRUE : FALSE;
- /* 0x00 is the only valid protocol version*/
- isProtocolVerInvalid = (frmCtrl & IEEE80211_FC_PROTO_VERSION_MASK) ? TRUE : FALSE;
- frmCtrlSubType = (frmCtrl & IEEE80211_FC_SUBTYPE_MASK) >> FRAME_CONTROL_SUBTYPE_FIELD_OFFSET;
- /*Exclude the no data & reserved sub-types from MIC failure processing*/
- isDataFrameSubTypeNoData = (((frmCtrlSubType>0x03)&&(frmCtrlSubType<0x08)) || (frmCtrlSubType>0x0B)) ? TRUE : FALSE;
- if ((receptionStatus == CSR_MICHAEL_MIC_ERROR) &&
- ((!isDataFrame) || isProtocolVerInvalid || (isDataFrame && isDataFrameSubTypeNoData))) {
- /* Currently MIC errors are discarded for frames other than data frames. This might need changing when we start
- * supporting 802.11w (Protected Management frames)
- */
- *freeBulkData = TRUE;
- unifi_trace(priv, UDBG4, "Discarding this frame and ignoring the MIC failure as this is a garbage/non-data/no data frame\n");
- return FALSE;
- }
-
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
-
- if (receptionStatus == CSR_MICHAEL_MIC_ERROR) {
-
- if (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_STA) {
-
-#ifdef CSR_WIFI_SECURITY_WAPI_QOSCTRL_MIC_WORKAROUND
- if ((isDataFrame) &&
- ((IEEE802_11_FC_TYPE_QOS_DATA & IEEE80211_FC_SUBTYPE_MASK) == (frmCtrl & IEEE80211_FC_SUBTYPE_MASK)) &&
- (priv->isWapiConnection))
- {
- qosControl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(macHdrLocation + (((frmCtrl & IEEE802_11_FC_TO_DS_MASK) && (frmCtrl & IEEE802_11_FC_FROM_DS_MASK)) ? 30 : 24) );
-
- unifi_trace(priv, UDBG4, "check_routing_pkt_data_ind() :: Value of the QoS control field - 0x%04x \n", qosControl);
-
- if (qosControl & IEEE802_11_QC_NON_TID_BITS_MASK)
- {
- unifi_trace(priv, UDBG4, "Ignore the MIC failure and pass the MPDU to the stack when any of bits [4-15] is set in the QoS control field\n");
-
- /*Exclude the MIC [16] and the PN [16] that are appended by the firmware*/
- ((bulk_data_param_t*)bulkdata)->d[0].data_length = bulkdata->d[0].data_length - 32;
-
- /*Clear the reception status of the signal (CSR_RX_SUCCESS)*/
- *(sigdata + CSR_WIFI_MA_PKT_IND_RECEPTION_STATUS_OFFSET) = 0x00;
- *(sigdata + CSR_WIFI_MA_PKT_IND_RECEPTION_STATUS_OFFSET+1) = 0x00;
-
- *freeBulkData = FALSE;
-
- return FALSE;
- }
- }
-#endif
- /* If this MIC ERROR reported by the firmware is either for
- * [1] a WAPI Multicast MPDU and the Multicast filter has NOT been set (It is set only when group key index (MSKID) = 1 in Group Rekeying) OR
- * [2] a WAPI Unicast MPDU and either the CONTROL PORT is open or the WAPI Unicast filter or filter(s) is NOT set
- * then report a MIC FAILURE indication to the SME.
- */
-#ifndef CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION
- if ((priv->wapi_multicast_filter == 0) || isWapiUnicastPkt) {
-#else
- /*When SW encryption is enabled and USKID=1 (wapi_unicast_filter = 1), we are expected
- *to receive MIC failure INDs for unicast MPDUs*/
- if ( ((priv->wapi_multicast_filter == 0) && !isWapiUnicastPkt) ||
- ((priv->wapi_unicast_filter == 0) && isWapiUnicastPkt) ) {
-#endif
- /*Discard the frame*/
- *freeBulkData = TRUE;
- unifi_trace(priv, UDBG4, "Discarding the contents of the frame with MIC failure \n");
-
- if (isWapiUnicastPkt &&
- ((uf_sme_port_state(priv,srcAddr,UF_CONTROLLED_PORT_Q,interfaceTag) != CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN)||
-#ifndef CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION
- (priv->wapi_unicast_filter) ||
-#endif
- (priv->wapi_unicast_queued_pkt_filter))) {
-
- /* Workaround to handle MIC failures reported by the firmware for encrypted packets from the AP
- * while we are in the process of re-association induced by unsupported WAPI Unicast key index
- * - Discard the packets with MIC failures "until" we have
- * a. negotiated a key,
- * b. opened the CONTROL PORT and
- * c. the AP has started using the new key
- */
- unifi_trace(priv, UDBG4, "Ignoring the MIC failure as either a. CONTROL PORT isn't OPEN or b. Unicast filter is set or c. WAPI AP using old key for buffered pkts\n");
-
- /*Ignore this MIC failure*/
- return FALSE;
-
- }/*WAPI re-key specific workaround*/
-
- unifi_trace(priv, UDBG6, "check_routing_pkt_data_ind - MIC FAILURE : interfaceTag %x Src Addr %x:%x:%x:%x:%x:%x\n",
- interfaceTag, srcAddr[0], srcAddr[1], srcAddr[2], srcAddr[3], srcAddr[4], srcAddr[5]);
- unifi_trace(priv, UDBG6, "check_routing_pkt_data_ind - MIC FAILURE : Dest Addr %x:%x:%x:%x:%x:%x\n",
- destAddr[0], destAddr[1], destAddr[2], destAddr[3], destAddr[4], destAddr[5]);
- unifi_trace(priv, UDBG6, "check_routing_pkt_data_ind - MIC FAILURE : Control Port State - 0x%.4X \n",
- uf_sme_port_state(priv,srcAddr,UF_CONTROLLED_PORT_Q,interfaceTag));
-
- unifi_error(priv, "MIC failure in %s\n", __FUNCTION__);
-
- /*Report the MIC failure to the SME*/
- return TRUE;
- }
- }/* STA mode */
- else {
- /* Its AP Mode . Just Return */
- *freeBulkData = TRUE;
- unifi_error(priv, "MIC failure in %s\n", __FUNCTION__);
- return TRUE;
- } /* AP mode */
- }/* MIC error */
-#else
- if (receptionStatus == CSR_MICHAEL_MIC_ERROR) {
- *freeBulkData = TRUE;
- unifi_error(priv, "MIC failure in %s\n", __FUNCTION__);
- return TRUE;
- }
-#endif /*CSR_WIFI_SECURITY_WAPI_ENABLE*/
-
- unifi_trace(priv, UDBG4, "frmCtrl = 0x%04x %s\n",
- frmCtrl,
- (((frmCtrl & 0x000c)>>FRAME_CONTROL_TYPE_FIELD_OFFSET) == IEEE802_11_FRAMETYPE_MANAGEMENT) ?
- "Mgt" : "Ctrl/Data");
-
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- /* To ignore MIC failures reported due to the WAPI AP using the old key for queued packets before
- * starting to use the new key negotiated as part of unicast re-keying
- */
- if ((interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_STA)&&
- isWapiUnicastPkt &&
- (receptionStatus == CSR_RX_SUCCESS) &&
- (priv->wapi_unicast_queued_pkt_filter==1)) {
-
- unifi_trace(priv, UDBG6, "check_routing_pkt_data_ind(): WAPI unicast pkt received when the (wapi_unicast_queued_pkt_filter) is set\n");
-
- if (isDataFrame) {
- switch(frmCtrl & IEEE80211_FC_SUBTYPE_MASK) {
- case IEEE802_11_FC_TYPE_QOS_DATA & IEEE80211_FC_SUBTYPE_MASK:
- llcSnapHeaderOffset = MAC_HEADER_SIZE + 2;
- break;
- case IEEE802_11_FC_TYPE_QOS_NULL & IEEE80211_FC_SUBTYPE_MASK:
- case IEEE802_11_FC_TYPE_NULL & IEEE80211_FC_SUBTYPE_MASK:
- break;
- default:
- llcSnapHeaderOffset = MAC_HEADER_SIZE;
- }
- }
-
- if (llcSnapHeaderOffset > 0) {
- /* QoS data or Data */
- unifi_trace(priv, UDBG6, "check_routing_pkt_data_ind(): SNAP header found & its offset %d\n",llcSnapHeaderOffset);
- if (memcmp((u8 *)(bulkdata->d[0].os_data_ptr+llcSnapHeaderOffset+wapiProtocolIdSNAPHeaderOffset),
- wapiProtocolIdSNAPHeader,sizeof(wapiProtocolIdSNAPHeader))) {
-
- unifi_trace(priv, UDBG6, "check_routing_pkt_data_ind(): This is a data & NOT a WAI protocol packet\n");
- /* On the first unicast data pkt that is decrypted successfully after re-keying, reset the filter */
- priv->wapi_unicast_queued_pkt_filter = 0;
- unifi_trace(priv, UDBG4, "check_routing_pkt_data_ind(): WAPI AP has started using the new unicast key, no more MIC failures expected (reset filter)\n");
- }
- else {
- unifi_trace(priv, UDBG6, "check_routing_pkt_data_ind(): WAPI - This is a WAI protocol packet\n");
- }
- }
- }
-#endif
-
-
- switch ((frmCtrl & 0x000c)>>FRAME_CONTROL_TYPE_FIELD_OFFSET) {
- case IEEE802_11_FRAMETYPE_MANAGEMENT:
- *freeBulkData = TRUE; /* Free (after SME handler copies it) */
-
- /* In P2P device mode, filter the legacy AP beacons here */
- if((interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2P)&&\
- ((CSR_WIFI_80211_GET_FRAME_SUBTYPE(macHdrLocation)) == CSR_WIFI_80211_FRAME_SUBTYPE_BEACON)){
-
- u8 *pSsid, *pSsidLen;
- static u8 P2PWildCardSsid[CSR_WIFI_P2P_WILDCARD_SSID_LENGTH] = {'D', 'I', 'R', 'E', 'C', 'T', '-'};
-
- pSsidLen = macHdrLocation + MAC_HEADER_SIZE + CSR_WIFI_BEACON_FIXED_LENGTH;
- pSsid = pSsidLen + 2;
-
- if(*(pSsidLen + 1) >= CSR_WIFI_P2P_WILDCARD_SSID_LENGTH){
- if(memcmp(pSsid, P2PWildCardSsid, CSR_WIFI_P2P_WILDCARD_SSID_LENGTH) == 0){
- unifi_trace(priv, UDBG6, "Received a P2P Beacon, pass it to SME\n");
- return TRUE;
- }
- }
- unifi_trace(priv, UDBG6, "Received a Legacy AP beacon in P2P mode, drop it\n");
- return FALSE;
- }
- return TRUE; /* Route to SME */
- case IEEE802_11_FRAMETYPE_DATA:
- case IEEE802_11_FRAMETYPE_CONTROL:
- *freeBulkData = FALSE; /* Network stack or MLME handler frees */
- return FALSE;
- default:
- unifi_error(priv, "Unhandled frame type %04x\n", frmCtrl);
- *freeBulkData = TRUE; /* Not interested, but must free it */
- return FALSE;
- }
-}
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_process_receive_event
- *
- * Dispatcher for received signals.
- *
- * This function receives the 'to host' signals and forwards
- * them to the unifi linux clients.
- *
- * Arguments:
- * ospriv Pointer to driver's private data.
- * sigdata Pointer to the packed signal buffer.
- * siglen Length of the packed signal.
- * bulkdata Pointer to the signal's bulk data.
- *
- * Returns:
- * None.
- *
- * Notes:
- * The signals are received in the format described in the host interface
- * specification, i.e wire formatted. Certain clients use the same format
- * to interpret them and other clients use the host formatted structures.
- * Each client has to call read_unpack_signal() to transform the wire
- * formatted signal into the host formatted signal, if necessary.
- * The code is in the core, since the signals are defined therefore
- * binded to the host interface specification.
- * ---------------------------------------------------------------------------
- */
-static void
-unifi_process_receive_event(void *ospriv,
- u8 *sigdata, u32 siglen,
- const bulk_data_param_t *bulkdata)
-{
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
- int i, receiver_id;
- int client_id;
- s16 signal_id;
- u8 pktIndToSme = FALSE, freeBulkData = FALSE;
-
- unifi_trace(priv, UDBG5, "unifi_process_receive_event: "
- "%04x %04x %04x %04x %04x %04x %04x %04x (%d)\n",
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*0) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*1) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*2) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*3) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*4) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*5) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*6) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*7) & 0xFFFF,
- siglen);
-
- receiver_id = CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)) & 0xFF00;
- client_id = (receiver_id & 0x0F00) >> UDI_SENDER_ID_SHIFT;
- signal_id = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata);
-
-
-
- /* check for the type of frame received (checks for 802.11 management frames) */
- if (signal_id == CSR_MA_PACKET_INDICATION_ID)
- {
-#define CSR_MA_PACKET_INDICATION_INTERFACETAG_OFFSET 14
- u8 interfaceTag;
- netInterface_priv_t *interfacePriv;
-
- /* Pull out interface tag from virtual interface identifier */
- interfaceTag = (CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata + CSR_MA_PACKET_INDICATION_INTERFACETAG_OFFSET)) & 0xff;
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- /* Update activity for this station in case of IBSS */
-#ifdef CSR_SUPPORT_SME
- if (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_IBSS)
- {
- u8 *saddr;
- /* Fetch the source address from mac header */
- saddr = (u8 *) bulkdata->d[0].os_data_ptr + MAC_HEADER_ADDR2_OFFSET;
- unifi_trace(priv, UDBG5,
- "Updating sta activity in IBSS interfaceTag %x Src Addr %x:%x:%x:%x:%x:%x\n",
- interfaceTag, saddr[0], saddr[1], saddr[2], saddr[3], saddr[4], saddr[5]);
-
- uf_update_sta_activity(priv, interfaceTag, saddr);
- }
-#endif
-
- pktIndToSme = check_routing_pkt_data_ind(priv, sigdata, bulkdata, &freeBulkData, interfacePriv);
-
- unifi_trace(priv, UDBG6, "RX: packet entry point to driver from HIP,pkt to SME ?(%s) \n", (pktIndToSme)? "YES":"NO");
-
- }
-
- if (pktIndToSme)
- {
- /* Management MA_PACKET_IND for SME */
- if(sigdata != NULL && bulkdata != NULL){
- send_to_client(priv, priv->sme_cli, receiver_id, sigdata, siglen, bulkdata);
- }
- else{
- unifi_error(priv, "unifi_receive_event2: sigdata or Bulkdata is NULL \n");
- }
-#ifdef CSR_NATIVE_LINUX
- send_to_client(priv, priv->wext_client,
- receiver_id,
- sigdata, siglen, bulkdata);
-#endif
- }
- else
- {
- /* Signals with ReceiverId==0 are also reported to SME / WEXT,
- * unless they are data/control MA_PACKET_INDs or VIF_AVAILABILITY_INDs
- */
- if (!receiver_id) {
- if(signal_id == CSR_MA_VIF_AVAILABILITY_INDICATION_ID) {
- uf_process_ma_vif_availibility_ind(priv, sigdata, siglen);
- }
- else if (signal_id != CSR_MA_PACKET_INDICATION_ID) {
- send_to_client(priv, priv->sme_cli, receiver_id, sigdata, siglen, bulkdata);
-#ifdef CSR_NATIVE_LINUX
- send_to_client(priv, priv->wext_client,
- receiver_id,
- sigdata, siglen, bulkdata);
-#endif
- }
- else
- {
-
-#if (defined(CSR_SUPPORT_SME) && defined(CSR_WIFI_SECURITY_WAPI_ENABLE))
- #define CSR_MA_PACKET_INDICATION_RECEPTION_STATUS_OFFSET sizeof(CSR_SIGNAL_PRIMITIVE_HEADER) + 22
- netInterface_priv_t *interfacePriv;
- u8 interfaceTag;
- u16 receptionStatus = CSR_RX_SUCCESS;
-
- /* Pull out interface tag from virtual interface identifier */
- interfaceTag = (CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata + CSR_MA_PACKET_INDICATION_INTERFACETAG_OFFSET)) & 0xff;
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- /* check for MIC failure */
- receptionStatus = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata + CSR_MA_PACKET_INDICATION_RECEPTION_STATUS_OFFSET);
-
- /* Send a WAPI MPDU to SME for re-check MIC if the respective filter has been set*/
- if ((!freeBulkData) &&
- (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_STA) &&
- (receptionStatus == CSR_MICHAEL_MIC_ERROR) &&
- ((priv->wapi_multicast_filter == 1)
-#ifdef CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION
- || (priv->wapi_unicast_filter == 1)
-#endif
- ))
- {
- CSR_SIGNAL signal;
- u8 *destAddr;
- CsrResult res;
- u16 interfaceTag = 0;
- u8 isMcastPkt = TRUE;
-
- unifi_trace(priv, UDBG6, "Received a WAPI data packet when the Unicast/Multicast filter is set\n");
- res = read_unpack_signal(sigdata, &signal);
- if (res) {
- unifi_error(priv, "Received unknown or corrupted signal (0x%x).\n",
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata));
- return;
- }
-
- /* Check if the type of MPDU and the respective filter status*/
- destAddr = (u8 *) bulkdata->d[0].os_data_ptr + MAC_HEADER_ADDR1_OFFSET;
- isMcastPkt = (destAddr[0] & 0x01) ? TRUE : FALSE;
- unifi_trace(priv, UDBG6,
- "1.MPDU type: (%s), 2.Multicast filter: (%s), 3. Unicast filter: (%s)\n",
- ((isMcastPkt) ? "Multiast":"Unicast"),
- ((priv->wapi_multicast_filter) ? "Enabled":"Disabled"),
- ((priv->wapi_unicast_filter) ? "Enabled":"Disabled"));
-
- if (((isMcastPkt) && (priv->wapi_multicast_filter == 1))
-#ifdef CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION
- || ((!isMcastPkt) && (priv->wapi_unicast_filter == 1))
-#endif
- )
- {
- unifi_trace(priv, UDBG4, "Sending the WAPI MPDU for MIC check\n");
- CsrWifiRouterCtrlWapiRxMicCheckIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0, interfaceTag, siglen, sigdata, bulkdata->d[0].data_length, (u8*)bulkdata->d[0].os_data_ptr);
-
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
- if (bulkdata->d[i].data_length != 0) {
- unifi_net_data_free(priv, (void *)&bulkdata->d[i]);
- }
- }
- return;
- }
- } /* CSR_MA_PACKET_INDICATION_ID */
-#endif /*CSR_SUPPORT_SME && CSR_WIFI_SECURITY_WAPI_ENABLE*/
- }
- }
-
- /* calls the registered clients handler callback func.
- * netdev_mlme_event_handler is one of the registered handler used to route
- * data packet to network stack or AMP/EAPOL related data to SME
- *
- * The freeBulkData check ensures that, it has received a management frame and
- * the frame needs to be freed here. So not to be passed to netdev handler
- */
- if(!freeBulkData){
- if ((client_id < MAX_UDI_CLIENTS) &&
- (&priv->ul_clients[client_id] != priv->logging_client)) {
- unifi_trace(priv, UDBG6, "Call the registered clients handler callback func\n");
- send_to_client(priv, &priv->ul_clients[client_id],
- receiver_id,
- sigdata, siglen, bulkdata);
- }
- }
- }
-
- /*
- * Free bulk data buffers here unless it is a CSR_MA_PACKET_INDICATION
- */
- switch (signal_id)
- {
-#ifdef UNIFI_SNIFF_ARPHRD
- case CSR_MA_SNIFFDATA_INDICATION_ID:
-#endif
- break;
-
- case CSR_MA_PACKET_INDICATION_ID:
- if (!freeBulkData)
- {
- break;
- }
- /* FALLS THROUGH... */
- default:
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
- if (bulkdata->d[i].data_length != 0) {
- unifi_net_data_free(priv, (void *)&bulkdata->d[i]);
- }
- }
- }
-
-} /* unifi_process_receive_event() */
-
-
-#ifdef CSR_WIFI_RX_PATH_SPLIT
-static u8 signal_buffer_is_full(unifi_priv_t* priv)
-{
- return (((priv->rxSignalBuffer.writePointer + 1)% priv->rxSignalBuffer.size) == (priv->rxSignalBuffer.readPointer));
-}
-
-void unifi_rx_queue_flush(void *ospriv)
-{
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
-
- unifi_trace(priv, UDBG4, "rx_wq_handler: RdPtr = %d WritePtr = %d\n",
- priv->rxSignalBuffer.readPointer,priv->rxSignalBuffer.writePointer);
- if(priv != NULL) {
- u8 readPointer = priv->rxSignalBuffer.readPointer;
- while (readPointer != priv->rxSignalBuffer.writePointer)
- {
- rx_buff_struct_t *buf = &priv->rxSignalBuffer.rx_buff[readPointer];
- unifi_trace(priv, UDBG6, "rx_wq_handler: RdPtr = %d WritePtr = %d\n",
- readPointer,priv->rxSignalBuffer.writePointer);
- unifi_process_receive_event(priv, buf->bufptr, buf->sig_len, &buf->data_ptrs);
- readPointer ++;
- if(readPointer >= priv->rxSignalBuffer.size) {
- readPointer = 0;
- }
- }
- priv->rxSignalBuffer.readPointer = readPointer;
- }
-}
-
-void rx_wq_handler(struct work_struct *work)
-{
- unifi_priv_t *priv = container_of(work, unifi_priv_t, rx_work_struct);
- unifi_rx_queue_flush(priv);
-}
-#endif
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * unifi_receive_event
- *
- * Dispatcher for received signals.
- *
- * This function receives the 'to host' signals and forwards
- * them to the unifi linux clients.
- *
- * Arguments:
- * ospriv Pointer to driver's private data.
- * sigdata Pointer to the packed signal buffer.
- * siglen Length of the packed signal.
- * bulkdata Pointer to the signal's bulk data.
- *
- * Returns:
- * None.
- *
- * Notes:
- * The signals are received in the format described in the host interface
- * specification, i.e wire formatted. Certain clients use the same format
- * to interpret them and other clients use the host formatted structures.
- * Each client has to call read_unpack_signal() to transform the wire
- * formatted signal into the host formatted signal, if necessary.
- * The code is in the core, since the signals are defined therefore
- * binded to the host interface specification.
- * ---------------------------------------------------------------------------
- */
-void
-unifi_receive_event(void *ospriv,
- u8 *sigdata, u32 siglen,
- const bulk_data_param_t *bulkdata)
-{
-#ifdef CSR_WIFI_RX_PATH_SPLIT
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
- u8 writePointer;
- int i;
- rx_buff_struct_t * rx_buff;
-
- unifi_trace(priv, UDBG5, "unifi_receive_event: "
- "%04x %04x %04x %04x %04x %04x %04x %04x (%d)\n",
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*0) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*1) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*2) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*3) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*4) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*5) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*6) & 0xFFFF,
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN((sigdata) + sizeof(s16)*7) & 0xFFFF, siglen);
- if(signal_buffer_is_full(priv)) {
- unifi_error(priv,"TO HOST signal queue FULL dropping the PDU\n");
- for (i = 0; i < UNIFI_MAX_DATA_REFERENCES; i++) {
- if (bulkdata->d[i].data_length != 0) {
- unifi_net_data_free(priv, (void *)&bulkdata->d[i]);
- }
- }
- return;
- }
- writePointer = priv->rxSignalBuffer.writePointer;
- rx_buff = &priv->rxSignalBuffer.rx_buff[writePointer];
- memcpy(rx_buff->bufptr,sigdata,siglen);
- rx_buff->sig_len = siglen;
- rx_buff->data_ptrs = *bulkdata;
- writePointer++;
- if(writePointer >= priv->rxSignalBuffer.size) {
- writePointer =0;
- }
- unifi_trace(priv, UDBG4, "unifi_receive_event:writePtr = %d\n",priv->rxSignalBuffer.writePointer);
- priv->rxSignalBuffer.writePointer = writePointer;
-
-#ifndef CSR_WIFI_RX_PATH_SPLIT_DONT_USE_WQ
- queue_work(priv->rx_workqueue, &priv->rx_work_struct);
-#endif
-
-#else
- unifi_process_receive_event(ospriv, sigdata, siglen, bulkdata);
-#endif
-} /* unifi_receive_event() */
-
diff --git a/drivers/staging/csr/unifi_native.h b/drivers/staging/csr/unifi_native.h
deleted file mode 100644
index a73b38e..0000000
--- a/drivers/staging/csr/unifi_native.h
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- *****************************************************************************
- *
- * FILE : unifi_native.h
- *
- * PURPOSE : Private header file for unifi driver support to wireless extensions.
- *
- * UDI = UniFi Debug Interface
- *
- * Copyright (C) 2005-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- *****************************************************************************
- */
-#ifndef __LINUX_UNIFI_NATIVE_H__
-#define __LINUX_UNIFI_NATIVE_H__ 1
-
-#include <linux/kernel.h>
-#include <linux/if_arp.h>
-
-
-/*
- * scan.c wext.c autojoin.c
- */
-/* Structure to hold results of a scan */
-typedef struct scan_info {
-
-/* CSR_MLME_SCAN_INDICATION msi; */
-
- unsigned char *info_elems;
- int info_elem_length;
-
-} scan_info_t;
-
-
-#define IE_VECTOR_MAXLEN 1024
-
-#ifdef CSR_SUPPORT_WEXT
-/*
- * Structre to hold the wireless network configuration info.
- */
-struct wext_config {
-
- /* Requested channel when setting up an adhoc network */
- int channel;
-
- /* wireless extns mode: IW_MODE_AUTO, ADHOC, INFRA, MASTER ... MONITOR */
- int mode;
-
- /* The capabilities of the currently joined network */
- int capability;
-
- /* The interval between beacons if we create an IBSS */
- int beacon_period;
-
- /*
- * Power-save parameters
- */
- /* The listen interval to ask for in Associate req. */
- int assoc_listen_interval;
- /* Power-mode to put UniFi into */
-
- unsigned char desired_ssid[UNIFI_MAX_SSID_LEN]; /* the last ESSID set by SIOCSIWESSID */
- int power_mode;
- /* Whether to wake for broadcast packets (using DTIM interval) */
- int wakeup_for_dtims;
-
- /* Currently selected WEP Key ID (0..3) */
- int wep_key_id;
-
- wep_key_t wep_keys[NUM_WEPKEYS];
-
-/* CSR_AUTHENTICATION_TYPE auth_type; */
- int privacy;
-
- u32 join_failure_timeout;
- u32 auth_failure_timeout;
- u32 assoc_failure_timeout;
-
- unsigned char generic_ie[IE_VECTOR_MAXLEN];
- int generic_ie_len;
-
- struct iw_statistics wireless_stats;
-
-
- /* the ESSID we are currently associated to */
- unsigned char current_ssid[UNIFI_MAX_SSID_LEN];
- /* the BSSID we are currently associated to */
- unsigned char current_bssid[6];
-
- /*
- * IW_AUTH_WPA_VERSION_DISABLED 0x00000001
- * IW_AUTH_WPA_VERSION_WPA 0x00000002
- * IW_AUTH_WPA_VERSION_WPA2 0x00000004
- */
- unsigned char wpa_version;
-
- /*
- * cipher selection:
- * IW_AUTH_CIPHER_NONE 0x00000001
- * IW_AUTH_CIPHER_WEP40 0x00000002
- * IW_AUTH_CIPHER_TKIP 0x00000004
- * IW_AUTH_CIPHER_CCMP 0x00000008
- * IW_AUTH_CIPHER_WEP104 0x00000010
- */
- unsigned char pairwise_cipher_used;
- unsigned char group_cipher_used;
-
- unsigned int frag_thresh;
- unsigned int rts_thresh;
-
- /* U-APSD value, send with Association Request to WMM Enabled APs */
- unsigned char wmm_bss_uapsd_mask;
- /* The WMM capabilities of the selected BSS */
- unsigned int bss_wmm_capabilities;
-
- /* Flag to prevent a join when the ssid is set */
- int disable_join_on_ssid_set;
-
- /* Scan info */
-#define UNIFI_MAX_SCANS 32
- scan_info_t scan_list[UNIFI_MAX_SCANS];
- int num_scan_info;
-
- /* Flag on whether non-802.1x packets are allowed out */
-/* CsrWifiRouterPortAction block_controlled_port;*/
-
- /* Flag on whether we have completed an authenticate/associate process */
- unsigned int flag_associated : 1;
-}; /* struct wext_config */
-
-#endif /* CSR_SUPPORT_WEXT */
-
-
-/*
- * wext.c
- */
-/*int mlme_set_protection(unifi_priv_t *priv, unsigned char *addr,
- CSR_PROTECT_TYPE prot, CSR_KEY_TYPE key_type);
-*/
-
-/*
- * scan.c
- */
-/*
-void unifi_scan_indication_handler(unifi_priv_t *priv,
- const CSR_MLME_SCAN_INDICATION *msg,
- const unsigned char *extra,
- unsigned int len);
-*/
-void unifi_clear_scan_table(unifi_priv_t *priv);
-scan_info_t *unifi_get_scan_report(unifi_priv_t *priv, int index);
-
-
-/*
- * Utility functions
- */
-const unsigned char *unifi_find_info_element(int id,
- const unsigned char *info,
- int len);
-int unifi_add_info_element(unsigned char *info,
- int ie_id,
- const unsigned char *ie_data,
- int ie_len);
-
-/*
- * autojoin.c
- */
-/* Higher level fns */
-int unifi_autojoin(unifi_priv_t *priv, const char *ssid);
-/*
-int unifi_do_scan(unifi_priv_t *priv, int scantype, CSR_BSS_TYPE bsstype,
- const char *ssid, int ssid_len);
-*/
-int unifi_set_powermode(unifi_priv_t *priv);
-int unifi_join_ap(unifi_priv_t *priv, scan_info_t *si);
-int unifi_join_bss(unifi_priv_t *priv, unsigned char *macaddr);
-int unifi_leave(unifi_priv_t *priv);
-unsigned int unifi_get_wmm_bss_capabilities(unifi_priv_t *priv,
- unsigned char *ie_vector,
- int ie_len, int *ap_capabilities);
-
-/*
- * Status and management.
- */
-int uf_init_wext_interface(unifi_priv_t *priv);
-void uf_deinit_wext_interface(unifi_priv_t *priv);
-
-/*
- * Function to reset UniFi's 802.11 state by sending MLME-RESET.req
- */
-int unifi_reset_state(unifi_priv_t *priv, unsigned char *macaddr, unsigned char set_default_mib);
-
-
-/*
- * mlme.c
- */
-/* Abort an MLME operation - useful in error recovery */
-int uf_abort_mlme(unifi_priv_t *priv);
-
-int unifi_mlme_blocking_request(unifi_priv_t *priv, ul_client_t *pcli,
- CSR_SIGNAL *sig, bulk_data_param_t *data_ptrs,
- int timeout);
-void unifi_mlme_copy_reply_and_wakeup_client(ul_client_t *pcli,
- CSR_SIGNAL *signal, int signal_len,
- const bulk_data_param_t *bulkdata);
-
-/*
- * Utility functions
- */
-const char *lookup_reason_code(int reason);
-const char *lookup_result_code(int result);
-
-
-/*
- * sme_native.c
- */
-int uf_sme_init(unifi_priv_t *priv);
-void uf_sme_deinit(unifi_priv_t *priv);
-int sme_sys_suspend(unifi_priv_t *priv);
-int sme_sys_resume(unifi_priv_t *priv);
-int sme_mgt_wifi_on(unifi_priv_t *priv);
-
-/* Callback for event logging to SME clients (unifi_manager) */
-void sme_native_log_event(ul_client_t *client,
- const u8 *sig_packed, int sig_len,
- const bulk_data_param_t *bulkdata,
- int dir);
-
-void sme_native_mlme_event_handler(ul_client_t *pcli,
- const u8 *sig_packed, int sig_len,
- const bulk_data_param_t *bulkdata,
- int dir);
-
-/* Task to query statistics from the MIB */
-#define UF_SME_STATS_WQ_TIMEOUT 2000 /* in msecs */
-void uf_sme_stats_wq(struct work_struct *work);
-
-void uf_native_process_udi_signal(ul_client_t *pcli,
- const u8 *packed_signal,
- int packed_signal_len,
- const bulk_data_param_t *bulkdata, int dir);
-#ifdef UNIFI_SNIFF_ARPHRD
-/*
- * monitor.c
- */
-int uf_start_sniff(unifi_priv_t *priv);
-/*
-void ma_sniffdata_ind(void *ospriv,
- const CSR_MA_SNIFFDATA_INDICATION *ind,
- const bulk_data_param_t *bulkdata);
-*/
-#endif /* ARPHRD_IEEE80211_PRISM */
-
-#endif /* __LINUX_UNIFI_NATIVE_H__ */
diff --git a/drivers/staging/csr/unifi_os.h b/drivers/staging/csr/unifi_os.h
deleted file mode 100644
index 56a2698..0000000
--- a/drivers/staging/csr/unifi_os.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- *
- * FILE: os_linux/unifi_os.h
- *
- * PURPOSE:
- * This header file provides the OS-dependent facilities for a linux
- * environment.
- *
- * Copyright (C) 2005-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#ifndef __UNIFI_OS_LINUX_H__
-#define __UNIFI_OS_LINUX_H__ 1
-
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/list.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-
-/*
- * Needed for core/signals.c
- */
-#include <stddef.h>
-
-
-/* Define INLINE directive*/
-#define INLINE inline
-
-/* Malloc and free */
-CsrResult unifi_net_data_malloc(void *ospriv, bulk_data_desc_t *bulk_data_slot, unsigned int size);
-void unifi_net_data_free(void *ospriv, bulk_data_desc_t *bulk_data_slot);
-#define CSR_WIFI_ALIGN_BYTES 4
-CsrResult unifi_net_dma_align(void *ospriv, bulk_data_desc_t *bulk_data_slot);
-
-/*
- * Byte Order
- * Note that __le*_to_cpu and __cpu_to_le* return an unsigned value!
- */
-#ifdef __KERNEL__
-#define unifi2host_16(n) (__le16_to_cpu((n)))
-#define unifi2host_32(n) (__le32_to_cpu((n)))
-#define host2unifi_16(n) (__cpu_to_le16((n)))
-#define host2unifi_32(n) (__cpu_to_le32((n)))
-#endif
-
-/* Module parameters */
-extern int unifi_debug;
-
-/* debugging */
-#ifdef UNIFI_DEBUG
-/*
- * unifi_debug is a verbosity level for debug messages
- * UDBG0 msgs are always printed if UNIFI_DEBUG is defined
- * UDBG1 msgs are printed if UNIFI_DEBUG is defined and unifi_debug > 0
- * etc.
- */
-
-#define ASSERT(cond) \
- do { \
- if (!(cond)) { \
- printk("Assertion failed in %s at %s:%d: %s\n", \
- __FUNCTION__, __FILE__, __LINE__, #cond); \
- } \
- } while (0)
-
-
-void unifi_dump(void *ospriv, int lvl, const char *msg, void *mem, u16 len);
-void dump(void *mem, u16 len);
-void dump16(void *mem, u16 len);
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-void dump_str(void *mem, u16 len);
-#endif /* CSR_WIFI_HIP_DEBUG_OFFLINE */
-
-void unifi_error(void* ospriv, const char *fmt, ...);
-void unifi_warning(void* ospriv, const char *fmt, ...);
-void unifi_notice(void* ospriv, const char *fmt, ...);
-void unifi_info(void* ospriv, const char *fmt, ...);
-
-void unifi_trace(void* ospriv, int level, const char *fmt, ...);
-
-#else
-
-/* Stubs */
-
-#define ASSERT(cond)
-
-static inline void unifi_dump(void *ospriv, int lvl, const char *msg, void *mem, u16 len) {}
-static inline void dump(void *mem, u16 len) {}
-static inline void dump16(void *mem, u16 len) {}
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-static inline void dump_str(void *mem, u16 len) {}
-#endif /* CSR_WIFI_HIP_DEBUG_OFFLINE */
-
-void unifi_error_nop(void* ospriv, const char *fmt, ...);
-void unifi_trace_nop(void* ospriv, int level, const char *fmt, ...);
-#define unifi_error if(1);else unifi_error_nop
-#define unifi_warning if(1);else unifi_error_nop
-#define unifi_notice if(1);else unifi_error_nop
-#define unifi_info if(1);else unifi_error_nop
-#define unifi_trace if(1);else unifi_trace_nop
-
-#endif /* UNIFI_DEBUG */
-
-
-/* Different levels of diagnostic detail... */
-#define UDBG0 0 /* always prints in debug build */
-#define UDBG1 1
-#define UDBG2 2
-#define UDBG3 3
-#define UDBG4 4
-#define UDBG5 5
-#define UDBG6 6
-#define UDBG7 7
-
-
-#endif /* __UNIFI_OS_LINUX_H__ */
diff --git a/drivers/staging/csr/unifi_pdu_processing.c b/drivers/staging/csr/unifi_pdu_processing.c
deleted file mode 100644
index f9b421b..0000000
--- a/drivers/staging/csr/unifi_pdu_processing.c
+++ /dev/null
@@ -1,3729 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: unifi_pdu_processing.c
- *
- * PURPOSE:
- * This file provides the PDU handling functionality before it gets sent to unfi and after
- * receiving a PDU from unifi
- *
- * Copyright (C) 2010 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/vmalloc.h>
-
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-#include "csr_time.h"
-#include "unifi_priv.h"
-#include <net/pkt_sched.h>
-
-#ifdef CSR_SUPPORT_SME
-static void _update_buffered_pkt_params_after_alignment(unifi_priv_t *priv, bulk_data_param_t *bulkdata,
- tx_buffered_packets_t* buffered_pkt)
-{
- struct sk_buff *skb ;
- u32 align_offset;
-
- if (priv == NULL || bulkdata == NULL || buffered_pkt == NULL){
- return;
- }
-
- skb = (struct sk_buff*)bulkdata->d[0].os_net_buf_ptr;
- align_offset = (u32)(long)(bulkdata->d[0].os_data_ptr) & (CSR_WIFI_ALIGN_BYTES-1);
- if(align_offset){
- skb_pull(skb,align_offset);
- }
-
- buffered_pkt->bulkdata.os_data_ptr = bulkdata->d[0].os_data_ptr;
- buffered_pkt->bulkdata.data_length = bulkdata->d[0].data_length;
- buffered_pkt->bulkdata.os_net_buf_ptr = bulkdata->d[0].os_net_buf_ptr;
- buffered_pkt->bulkdata.net_buf_length = bulkdata->d[0].net_buf_length;
-}
-#endif
-
-void
-unifi_frame_ma_packet_req(unifi_priv_t *priv, CSR_PRIORITY priority,
- CSR_RATE TransmitRate, CSR_CLIENT_TAG hostTag,
- u16 interfaceTag, CSR_TRANSMISSION_CONTROL transmissionControl,
- CSR_PROCESS_ID leSenderProcessId, u8 *peerMacAddress,
- CSR_SIGNAL *signal)
-{
-
- CSR_MA_PACKET_REQUEST *req = &signal->u.MaPacketRequest;
- netInterface_priv_t *interfacePriv;
- u8 ba_session_idx = 0;
- ba_session_tx_struct *ba_session = NULL;
- u8 *ba_addr = NULL;
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- unifi_trace(priv, UDBG5,
- "In unifi_frame_ma_packet_req, Frame for Peer: %pMF\n",
- peerMacAddress);
- signal->SignalPrimitiveHeader.SignalId = CSR_MA_PACKET_REQUEST_ID;
- signal->SignalPrimitiveHeader.ReceiverProcessId = 0;
- signal->SignalPrimitiveHeader.SenderProcessId = leSenderProcessId;
-
- /* Fill the MA-PACKET.req */
- req->Priority = priority;
- unifi_trace(priv, UDBG3, "Tx Frame with Priority: 0x%x\n", req->Priority);
-
- /* A value of 0 is used for auto selection of rates. But for P2P GO case
- * for action frames the rate is governed by SME. Hence instead of 0,
- * the rate is filled in with the value passed here
- */
- req->TransmitRate = TransmitRate;
-
- /* packets from netdev then no confirm required but packets from
- * Nme/Sme eapol data frames requires the confirmation
- */
- req->TransmissionControl = transmissionControl;
- req->VirtualInterfaceIdentifier =
- uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag);
- memcpy(req->Ra.x, peerMacAddress, ETH_ALEN);
-
- if (hostTag == 0xffffffff) {
- req->HostTag = interfacePriv->tag++;
- req->HostTag |= 0x40000000;
- unifi_trace(priv, UDBG3, "new host tag assigned = 0x%x\n", req->HostTag);
- interfacePriv->tag &= 0x0fffffff;
- } else {
- req->HostTag = hostTag;
- unifi_trace(priv, UDBG3, "host tag got from SME = 0x%x\n", req->HostTag);
- }
- /* check if BA session exists for the peer MAC address on same tID */
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO){
- ba_addr = peerMacAddress;
- }else{
- ba_addr = interfacePriv->bssid.a;
- }
- for (ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_TX; ba_session_idx++){
- ba_session = interfacePriv->ba_session_tx[ba_session_idx];
- if (ba_session){
- if ((!memcmp(ba_session->macAddress.a, ba_addr, ETH_ALEN)) && (ba_session->tID == priority)){
- req->TransmissionControl |= CSR_ALLOW_BA;
- break;
- }
- }
- }
-
- unifi_trace(priv, UDBG5, "leaving unifi_frame_ma_packet_req\n");
-}
-
-#ifdef CSR_SUPPORT_SME
-
-#define TRANSMISSION_CONTROL_TRIGGER_MASK 0x0001
-#define TRANSMISSION_CONTROL_EOSP_MASK 0x0002
-
-static
-int frame_and_send_queued_pdu(unifi_priv_t* priv,tx_buffered_packets_t* buffered_pkt,
- CsrWifiRouterCtrlStaInfo_t *staRecord,u8 moreData , u8 eosp)
-{
-
- CSR_SIGNAL signal;
- bulk_data_param_t bulkdata;
- int result;
- u8 toDs, fromDs, macHeaderLengthInBytes = MAC_HEADER_SIZE;
- u8 *qc;
- u16 *fc = (u16*)(buffered_pkt->bulkdata.os_data_ptr);
- unsigned long lock_flags;
- unifi_trace(priv, UDBG3, "frame_and_send_queued_pdu with moreData: %d , EOSP: %d\n",moreData,eosp);
- unifi_frame_ma_packet_req(priv, buffered_pkt->priority, buffered_pkt->rate, buffered_pkt->hostTag,
- buffered_pkt->interfaceTag, buffered_pkt->transmissionControl,
- buffered_pkt->leSenderProcessId, buffered_pkt->peerMacAddress.a, &signal);
- bulkdata.d[0].os_data_ptr = buffered_pkt->bulkdata.os_data_ptr;
- bulkdata.d[0].data_length = buffered_pkt->bulkdata.data_length;
- bulkdata.d[0].os_net_buf_ptr = buffered_pkt->bulkdata.os_net_buf_ptr;
- bulkdata.d[0].net_buf_length = buffered_pkt->bulkdata.net_buf_length;
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].data_length = 0;
- bulkdata.d[1].os_net_buf_ptr =0;
- bulkdata.d[1].net_buf_length =0;
-
- if(moreData) {
- *fc |= cpu_to_le16(IEEE802_11_FC_MOREDATA_MASK);
- } else {
- *fc &= cpu_to_le16(~IEEE802_11_FC_MOREDATA_MASK);
- }
-
- if((staRecord != NULL)&& (staRecord->wmmOrQosEnabled == TRUE))
- {
- unifi_trace(priv, UDBG3, "frame_and_send_queued_pdu WMM Enabled: %d \n",staRecord->wmmOrQosEnabled);
-
- toDs = (*fc & cpu_to_le16(IEEE802_11_FC_TO_DS_MASK))?1 : 0;
- fromDs = (*fc & cpu_to_le16(IEEE802_11_FC_FROM_DS_MASK))? 1: 0;
-
- switch(le16_to_cpu(*fc) & IEEE80211_FC_SUBTYPE_MASK)
- {
- case IEEE802_11_FC_TYPE_QOS_DATA & IEEE80211_FC_SUBTYPE_MASK:
- case IEEE802_11_FC_TYPE_QOS_NULL & IEEE80211_FC_SUBTYPE_MASK:
- /* If both are set then the Address4 exists (only for AP) */
- if (fromDs && toDs) {
- /* 6 is the size of Address4 field */
- macHeaderLengthInBytes += (QOS_CONTROL_HEADER_SIZE + 6);
- } else {
- macHeaderLengthInBytes += QOS_CONTROL_HEADER_SIZE;
- }
-
- /* If order bit set then HT control field is the part of MAC header */
- if (*fc & cpu_to_le16(IEEE80211_FC_ORDER_MASK)) {
- macHeaderLengthInBytes += HT_CONTROL_HEADER_SIZE;
- qc = (u8*)(buffered_pkt->bulkdata.os_data_ptr + (macHeaderLengthInBytes-6));
- } else {
- qc = (u8*)(buffered_pkt->bulkdata.os_data_ptr + (macHeaderLengthInBytes-2));
- }
- *qc = eosp ? *qc | (1 << 4) : *qc & (~(1 << 4));
- break;
- default:
- if (fromDs && toDs)
- macHeaderLengthInBytes += 6;
- }
-
- }
- result = ul_send_signal_unpacked(priv, &signal, &bulkdata);
- if(result){
- _update_buffered_pkt_params_after_alignment(priv, &bulkdata,buffered_pkt);
- }
-
- /* Decrement the packet counts queued in driver */
- if (result != -ENOSPC) {
- /* protect entire counter updation by disabling preemption */
- if (!priv->noOfPktQueuedInDriver) {
- unifi_error(priv, "packets queued in driver 0 still decrementing\n");
- } else {
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- priv->noOfPktQueuedInDriver--;
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- }
- /* Sta Record is available for all unicast (except genericMgt Frames) & in other case its NULL */
- if (staRecord) {
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- if (!staRecord->noOfPktQueued) {
- unifi_error(priv, "packets queued in driver per station is 0 still decrementing\n");
- } else {
- staRecord->noOfPktQueued--;
- }
- /* if the STA alive probe frame has failed then reset the saved host tag */
- if (result){
- if (staRecord->nullDataHostTag == buffered_pkt->hostTag){
- staRecord->nullDataHostTag = INVALID_HOST_TAG;
- }
- }
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- }
-
- }
- return result;
-}
-#ifdef CSR_SUPPORT_SME
-static
-void set_eosp_transmit_ctrl(unifi_priv_t *priv, struct list_head *txList)
-{
- /* dequeue the tx data packets from the appropriate queue */
- tx_buffered_packets_t *tx_q_item = NULL;
- struct list_head *listHead;
- struct list_head *placeHolder;
- unsigned long lock_flags;
-
-
- unifi_trace(priv, UDBG5, "entering set_eosp_transmit_ctrl\n");
- /* check for list empty */
- if (list_empty(txList)) {
- unifi_warning(priv, "In set_eosp_transmit_ctrl, the list is empty\n");
- return;
- }
-
- /* return the last node , and modify it. */
-
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_for_each_prev_safe(listHead, placeHolder, txList) {
- tx_q_item = list_entry(listHead, tx_buffered_packets_t, q);
- tx_q_item->transmissionControl |= TRANSMISSION_CONTROL_EOSP_MASK;
- tx_q_item->transmissionControl = (tx_q_item->transmissionControl & ~(CSR_NO_CONFIRM_REQUIRED));
- unifi_trace(priv, UDBG1,
- "set_eosp_transmit_ctrl Transmission Control = 0x%x hostTag = 0x%x \n",tx_q_item->transmissionControl,tx_q_item->hostTag);
- unifi_trace(priv,UDBG3,"in set_eosp_transmit_ctrl no.of buffered frames %d\n",priv->noOfPktQueuedInDriver);
- break;
- }
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- unifi_trace(priv, UDBG1,"List Empty %d\n",list_empty(txList));
- unifi_trace(priv, UDBG5, "leaving set_eosp_transmit_ctrl\n");
- return;
-}
-
-static
-void send_vif_availibility_rsp(unifi_priv_t *priv,CSR_VIF_IDENTIFIER vif,CSR_RESULT_CODE resultCode)
-{
- CSR_SIGNAL signal;
- CSR_MA_VIF_AVAILABILITY_RESPONSE *rsp;
- bulk_data_param_t *bulkdata = NULL;
- int r;
-
- unifi_trace(priv, UDBG3, "send_vif_availibility_rsp : invoked with resultCode = %d \n", resultCode);
-
- memset(&signal,0,sizeof(CSR_SIGNAL));
- rsp = &signal.u.MaVifAvailabilityResponse;
- rsp->VirtualInterfaceIdentifier = vif;
- rsp->ResultCode = resultCode;
- signal.SignalPrimitiveHeader.SignalId = CSR_MA_VIF_AVAILABILITY_RESPONSE_ID;
- signal.SignalPrimitiveHeader.ReceiverProcessId = 0;
- signal.SignalPrimitiveHeader.SenderProcessId = priv->netdev_client->sender_id;
-
- /* Send the signal to UniFi */
- r = ul_send_signal_unpacked(priv, &signal, bulkdata);
- if(r) {
- unifi_error(priv,"Availibility response sending failed %x status %d\n",vif,r);
- }
- else {
- unifi_trace(priv, UDBG3, "send_vif_availibility_rsp : status = %d \n", r);
- }
-}
-#endif
-
-static
-void verify_and_accomodate_tx_packet(unifi_priv_t *priv)
-{
- tx_buffered_packets_t *tx_q_item;
- unsigned long lock_flags;
- struct list_head *listHead, *list;
- struct list_head *placeHolder;
- u8 i, j,eospFramedeleted=0;
- u8 thresholdExcedeDueToBroadcast = TRUE;
- /* it will be made it interface Specific in the future when multi interfaces are supported ,
- right now interface 0 is considered */
- netInterface_priv_t *interfacePriv = priv->interfacePriv[0];
- CsrWifiRouterCtrlStaInfo_t *staInfo = NULL;
-
- unifi_trace(priv, UDBG3, "entering verify_and_accomodate_tx_packet\n");
-
- for(i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- staInfo = interfacePriv->staInfo[i];
- if (staInfo && (staInfo->noOfPktQueued >= CSR_WIFI_DRIVER_MAX_PKT_QUEUING_THRESHOLD_PER_PEER)) {
- /* remove the first(oldest) packet from the all the access catogory, since data
- * packets for station record crossed the threshold limit (64 for AP supporting
- * 8 peers)
- */
- unifi_trace(priv,UDBG3,"number of station pkts queued= %d for sta id = %d\n", staInfo->noOfPktQueued, staInfo->aid);
- for(j = 0; j < MAX_ACCESS_CATOGORY; j++) {
- list = &staInfo->dataPdu[j];
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_for_each_safe(listHead, placeHolder, list) {
- tx_q_item = list_entry(listHead, tx_buffered_packets_t, q);
- list_del(listHead);
- thresholdExcedeDueToBroadcast = FALSE;
- unifi_net_data_free(priv, &tx_q_item->bulkdata);
- kfree(tx_q_item);
- tx_q_item = NULL;
- if (!priv->noOfPktQueuedInDriver) {
- unifi_error(priv, "packets queued in driver 0 still decrementing in %s\n", __FUNCTION__);
- } else {
- /* protection provided by spinlock */
- priv->noOfPktQueuedInDriver--;
-
- }
- /* Sta Record is available for all unicast (except genericMgt Frames) & in other case its NULL */
- if (!staInfo->noOfPktQueued) {
- unifi_error(priv, "packets queued in driver per station is 0 still decrementing in %s\n", __FUNCTION__);
- } else {
- spin_lock(&priv->staRecord_lock);
- staInfo->noOfPktQueued--;
- spin_unlock(&priv->staRecord_lock);
- }
- break;
- }
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- }
- }
- }
- if (thresholdExcedeDueToBroadcast && interfacePriv->noOfbroadcastPktQueued > CSR_WIFI_DRIVER_MINIMUM_BROADCAST_PKT_THRESHOLD ) {
- /* Remove the packets from genericMulticastOrBroadCastFrames queue
- * (the max packets in driver is reached due to broadcast/multicast frames)
- */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_for_each_safe(listHead, placeHolder, &interfacePriv->genericMulticastOrBroadCastFrames) {
- tx_q_item = list_entry(listHead, tx_buffered_packets_t, q);
- if(eospFramedeleted){
- tx_q_item->transmissionControl |= TRANSMISSION_CONTROL_EOSP_MASK;
- tx_q_item->transmissionControl = (tx_q_item->transmissionControl & ~(CSR_NO_CONFIRM_REQUIRED));
- unifi_trace(priv, UDBG1,"updating eosp for next packet hostTag:= 0x%x ",tx_q_item->hostTag);
- eospFramedeleted =0;
- break;
- }
-
- if(tx_q_item->transmissionControl & TRANSMISSION_CONTROL_EOSP_MASK ){
- eospFramedeleted = 1;
- }
- unifi_trace(priv,UDBG1, "freeing of multicast packets ToC = 0x%x hostTag = 0x%x \n",tx_q_item->transmissionControl,tx_q_item->hostTag);
- list_del(listHead);
- unifi_net_data_free(priv, &tx_q_item->bulkdata);
- kfree(tx_q_item);
- priv->noOfPktQueuedInDriver--;
- spin_lock(&priv->staRecord_lock);
- interfacePriv->noOfbroadcastPktQueued--;
- spin_unlock(&priv->staRecord_lock);
- if(!eospFramedeleted){
- break;
- }
- }
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- }
- unifi_trace(priv, UDBG3, "leaving verify_and_accomodate_tx_packet\n");
-}
-
-static
-CsrResult enque_tx_data_pdu(unifi_priv_t *priv, bulk_data_param_t *bulkdata,
- struct list_head *list, CSR_SIGNAL *signal,
- u8 requeueOnSamePos)
-{
-
- /* queue the tx data packets on to appropriate queue */
- CSR_MA_PACKET_REQUEST *req = &signal->u.MaPacketRequest;
- tx_buffered_packets_t *tx_q_item;
- unsigned long lock_flags;
-
- unifi_trace(priv, UDBG5, "entering enque_tx_data_pdu\n");
- if(!list) {
- unifi_error(priv,"List is not specified\n");
- return CSR_RESULT_FAILURE;
- }
-
- /* Removes aged packets & adds the incoming packet */
- if (priv->noOfPktQueuedInDriver >= CSR_WIFI_DRIVER_SUPPORT_FOR_MAX_PKT_QUEUEING) {
- unifi_trace(priv,UDBG3,"number of pkts queued= %d \n", priv->noOfPktQueuedInDriver);
- verify_and_accomodate_tx_packet(priv);
- }
-
-
-
- tx_q_item = kmalloc(sizeof(tx_buffered_packets_t), GFP_ATOMIC);
- if (tx_q_item == NULL) {
- unifi_error(priv,
- "Failed to allocate %d bytes for tx packet record\n",
- sizeof(tx_buffered_packets_t));
- return CSR_RESULT_FAILURE;
- }
-
- /* disable the preemption */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- INIT_LIST_HEAD(&tx_q_item->q);
- /* fill the tx_q structure members */
- tx_q_item->bulkdata.os_data_ptr = bulkdata->d[0].os_data_ptr;
- tx_q_item->bulkdata.data_length = bulkdata->d[0].data_length;
- tx_q_item->bulkdata.os_net_buf_ptr = bulkdata->d[0].os_net_buf_ptr;
- tx_q_item->bulkdata.net_buf_length = bulkdata->d[0].net_buf_length;
- tx_q_item->interfaceTag = req->VirtualInterfaceIdentifier & 0xff;
- tx_q_item->hostTag = req->HostTag;
- tx_q_item->leSenderProcessId = signal->SignalPrimitiveHeader.SenderProcessId;
- tx_q_item->transmissionControl = req->TransmissionControl;
- tx_q_item->priority = req->Priority;
- tx_q_item->rate = req->TransmitRate;
- memcpy(tx_q_item->peerMacAddress.a, req->Ra.x, ETH_ALEN);
-
-
-
- if (requeueOnSamePos) {
- list_add(&tx_q_item->q, list);
- } else {
- list_add_tail(&tx_q_item->q, list);
- }
-
- /* Count of packet queued in driver */
- priv->noOfPktQueuedInDriver++;
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- unifi_trace(priv, UDBG5, "leaving enque_tx_data_pdu\n");
- return CSR_RESULT_SUCCESS;
-}
-
-#ifdef CSR_WIFI_REQUEUE_PACKET_TO_HAL
-CsrResult unifi_reque_ma_packet_request (void *ospriv, u32 host_tag,
- u16 txStatus, bulk_data_desc_t *bulkDataDesc)
-{
- CsrResult status = CSR_RESULT_SUCCESS;
- unifi_priv_t *priv = (unifi_priv_t*)ospriv;
- netInterface_priv_t *interfacePriv;
- struct list_head *list = NULL;
- CsrWifiRouterCtrlStaInfo_t *staRecord = NULL;
- bulk_data_param_t bulkData;
- CSR_SIGNAL signal;
- CSR_PRIORITY priority = 0;
- u16 interfaceTag = 0;
- unifi_TrafficQueue priority_q;
- u16 frameControl = 0, frameType = 0;
- unsigned long lock_flags;
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- /* If the current mode is not AP or P2PGO then just return failure
- * to clear the hip slot
- */
- if(!((interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP) ||
- (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO))) {
- return CSR_RESULT_FAILURE;
- }
-
- unifi_trace(priv, UDBG6, "unifi_reque_ma_packet_request: host_tag = 0x%x\n", host_tag);
-
- staRecord = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv,
- (((u8 *) bulkDataDesc->os_data_ptr) + 4),
- interfaceTag);
- if (NULL == staRecord) {
- unifi_trace(priv, UDBG5, "unifi_reque_ma_packet_request: Invalid STA record \n");
- return CSR_RESULT_FAILURE;
- }
-
- /* Update TIM if MA-PACKET.cfm fails with status as Tx-retry-limit or No-BSS and then just return failure
- * to clear the hip slot associated with the Packet
- */
- if (CSR_TX_RETRY_LIMIT == txStatus || CSR_TX_NO_BSS == txStatus) {
- if (staRecord->timSet == CSR_WIFI_TIM_RESET || staRecord->timSet == CSR_WIFI_TIM_RESETTING)
- {
- unifi_trace(priv, UDBG2, "unifi_reque_ma_packet_request: CFM failed with Retry Limit or No BSS-->update TIM\n");
- if (!staRecord->timRequestPendingFlag) {
- update_tim(priv, staRecord->aid, 1, interfaceTag, staRecord->assignedHandle);
- }
- else {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- staRecord->updateTimReqQueued = 1;
- unifi_trace(priv, UDBG6, "unifi_reque_ma_packet_request: One more UpdateTim Request(:%d)Queued for AID %x\n",
- staRecord->updateTimReqQueued, staRecord->aid);
- }
- }
- return CSR_RESULT_FAILURE;
- }
- else if ((CSR_TX_LIFETIME == txStatus) || (CSR_TX_BLOCK_ACK_TIMEOUT == txStatus) ||
- (CSR_TX_FAIL_TRANSMISSION_VIF_INTERRUPTED == txStatus) ||
- (CSR_TX_REJECTED_PEER_STATION_SLEEPING == txStatus) ||
- (CSR_TX_REJECTED_DTIM_STARTED == txStatus)) {
- /* Extract the Frame control and the frame type */
- frameControl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(bulkDataDesc->os_data_ptr);
- frameType = ((frameControl & IEEE80211_FC_TYPE_MASK) >> FRAME_CONTROL_TYPE_FIELD_OFFSET);
-
- /* Mgmt frames will not be re-queued for Tx
- * so just return failure to clear the hip slot
- */
- if (IEEE802_11_FRAMETYPE_MANAGEMENT == frameType) {
- return CSR_RESULT_FAILURE;
- }
- else if (IEEE802_11_FRAMETYPE_DATA == frameType) {
- /* QOS NULL and DATA NULL frames will not be re-queued for Tx
- * so just return failure to clear the hip slot
- */
- if ((((frameControl & IEEE80211_FC_SUBTYPE_MASK) >> FRAME_CONTROL_SUBTYPE_FIELD_OFFSET) == QOS_DATA_NULL) ||
- (((frameControl & IEEE80211_FC_SUBTYPE_MASK) >> FRAME_CONTROL_SUBTYPE_FIELD_OFFSET)== DATA_NULL )) {
- return CSR_RESULT_FAILURE;
- }
- }
-
- /* Extract the Packet priority */
- if (TRUE == staRecord->wmmOrQosEnabled) {
- u16 qosControl = 0;
- u8 dataFrameType = 0;
-
- dataFrameType =((frameControl & IEEE80211_FC_SUBTYPE_MASK) >> 4);
-
- if (dataFrameType == QOS_DATA) {
- /* QoS control field is offset from frame control by 2 (frame control)
- * + 2 (duration/ID) + 2 (sequence control) + 3*ETH_ALEN or 4*ETH_ALEN
- */
- if((frameControl & IEEE802_11_FC_TO_DS_MASK) && (frameControl & IEEE802_11_FC_FROM_DS_MASK)) {
- qosControl= CSR_GET_UINT16_FROM_LITTLE_ENDIAN(bulkDataDesc->os_data_ptr + 30);
- }
- else {
- qosControl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(bulkDataDesc->os_data_ptr + 24);
- }
- }
-
- priority = (CSR_PRIORITY)(qosControl & IEEE802_11_QC_TID_MASK);
-
- if (priority < CSR_QOS_UP0 || priority > CSR_QOS_UP7) {
- unifi_trace(priv, UDBG5, "unifi_reque_ma_packet_request: Invalid priority:%x \n", priority);
- return CSR_RESULT_FAILURE;
- }
- }
- else {
- priority = CSR_CONTENTION;
- }
-
- /* Frame Bulk data to requeue it back to HAL Queues */
- bulkData.d[0].os_data_ptr = bulkDataDesc->os_data_ptr;
- bulkData.d[0].data_length = bulkDataDesc->data_length;
- bulkData.d[0].os_net_buf_ptr = bulkDataDesc->os_net_buf_ptr;
- bulkData.d[0].net_buf_length = bulkDataDesc->net_buf_length;
-
- bulkData.d[1].os_data_ptr = NULL;
- bulkData.d[1].os_net_buf_ptr = NULL;
- bulkData.d[1].data_length = bulkData.d[1].net_buf_length = 0;
-
- /* Initialize signal to zero */
- memset(&signal, 0, sizeof(CSR_SIGNAL));
-
- /* Frame MA Packet Req */
- unifi_frame_ma_packet_req(priv, priority, 0, host_tag,
- interfaceTag, CSR_NO_CONFIRM_REQUIRED,
- priv->netdev_client->sender_id,
- staRecord->peerMacAddress.a, &signal);
-
- /* Find the Q-Priority */
- priority_q = unifi_frame_priority_to_queue(priority);
- list = &staRecord->dataPdu[priority_q];
-
- /* Place the Packet on to HAL Queue */
- status = enque_tx_data_pdu(priv, &bulkData, list, &signal, TRUE);
-
- /* Update the Per-station queued packet counter */
- if (!status) {
- spin_lock_irqsave(&priv->staRecord_lock, lock_flags);
- staRecord->noOfPktQueued++;
- spin_unlock_irqrestore(&priv->staRecord_lock, lock_flags);
- }
- }
- else {
- /* Packet will not be re-queued for any of the other MA Packet Tx failure
- * reasons so just return failure to clear the hip slot
- */
- return CSR_RESULT_FAILURE;
- }
-
- return status;
-}
-#endif
-
-static void is_all_ac_deliver_enabled_and_moredata(CsrWifiRouterCtrlStaInfo_t *staRecord, u8 *allDeliveryEnabled, u8 *dataAvailable)
-{
- u8 i;
- *allDeliveryEnabled = TRUE;
- for (i = 0 ;i < MAX_ACCESS_CATOGORY; i++) {
- if (!IS_DELIVERY_ENABLED(staRecord->powersaveMode[i])) {
- /* One is is not Delivery Enabled */
- *allDeliveryEnabled = FALSE;
- break;
- }
- }
- if (*allDeliveryEnabled) {
- *dataAvailable = (!list_empty(&staRecord->dataPdu[0]) || !list_empty(&staRecord->dataPdu[1])
- ||!list_empty(&staRecord->dataPdu[2]) ||!list_empty(&staRecord->dataPdu[3])
- ||!list_empty(&staRecord->mgtFrames));
- }
-}
-
-/*
- * ---------------------------------------------------------------------------
- * uf_handle_tim_cfm
- *
- *
- * This function updates tim status in host depending confirm status from firmware
- *
- * Arguments:
- * priv Pointer to device private context struct
- * cfm CSR_MLME_SET_TIM_CONFIRM
- * receiverProcessId SenderProcessID to fetch handle & timSet status
- *
- * ---------------------------------------------------------------------------
- */
-void uf_handle_tim_cfm(unifi_priv_t *priv, CSR_MLME_SET_TIM_CONFIRM *cfm, u16 receiverProcessId)
-{
- u8 handle = CSR_WIFI_GET_STATION_HANDLE_FROM_RECEIVER_ID(receiverProcessId);
- u8 timSetStatus = CSR_WIFI_GET_TIMSET_STATE_FROM_RECEIVER_ID(receiverProcessId);
- u16 interfaceTag = (cfm->VirtualInterfaceIdentifier & 0xff);
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- CsrWifiRouterCtrlStaInfo_t *staRecord = NULL;
- /* This variable holds what TIM value we wanted to set in firmware */
- u16 timSetValue = 0;
- /* Irrespective of interface the count maintained */
- static u8 retryCount = 0;
- unsigned long lock_flags;
- unifi_trace(priv, UDBG3, "entering %s, handle = %x, timSetStatus = %x\n", __FUNCTION__, handle, timSetStatus);
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_warning(priv, "bad interfaceTag = %x\n", interfaceTag);
- return;
- }
-
- if ((handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) && (handle >= UNIFI_MAX_CONNECTIONS)) {
- unifi_warning(priv, "bad station Handle = %x\n", handle);
- return;
- }
-
- if (handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) {
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- if ((staRecord = ((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[handle]))) == NULL) {
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- unifi_warning(priv, "uf_handle_tim_cfm: station record is NULL handle = %x\n", handle);
- return;
- }
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- }
- switch(timSetStatus)
- {
- case CSR_WIFI_TIM_SETTING:
- timSetValue = CSR_WIFI_TIM_SET;
- break;
- case CSR_WIFI_TIM_RESETTING:
- timSetValue = CSR_WIFI_TIM_RESET;
- break;
- default:
- unifi_warning(priv, "timSet state is %x: Debug\n", timSetStatus);
- return;
- }
-
- /* check TIM confirm for success/failures */
- switch(cfm->ResultCode)
- {
- case CSR_RC_SUCCESS:
- if (handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) {
- /* Unicast frame & station record available */
- if (timSetStatus == staRecord->timSet) {
- staRecord->timSet = timSetValue;
- /* fh_cmd_q can also be full at some point of time!,
- * resetting count as queue is cleaned by firmware at this point
- */
- retryCount = 0;
- unifi_trace(priv, UDBG2, "tim (%s) successfully in firmware\n", (timSetValue)?"SET":"RESET");
- } else {
- unifi_trace(priv, UDBG3, "receiver processID = %x, success: request & confirm states are not matching in TIM cfm: Debug status = %x, staRecord->timSet = %x, handle = %x\n",
- receiverProcessId, timSetStatus, staRecord->timSet, handle);
- }
-
- /* Reset TIM pending flag to send next TIM request */
- staRecord->timRequestPendingFlag = FALSE;
-
- /* Make sure that one more UpdateTim request is queued, if Queued its value
- * should be CSR_WIFI_TIM_SET or CSR_WIFI_TIM_RESET
- */
- if (0xFF != staRecord->updateTimReqQueued)
- {
- /* Process the UpdateTim Request which is queued while previous UpdateTim was in progress */
- if (staRecord->timSet != staRecord->updateTimReqQueued)
- {
- unifi_trace(priv, UDBG2, "uf_handle_tim_cfm : Processing Queued UpdateTimReq \n");
-
- update_tim(priv, staRecord->aid, staRecord->updateTimReqQueued, interfaceTag, handle);
-
- staRecord->updateTimReqQueued = 0xFF;
- }
- }
- } else {
-
- interfacePriv->bcTimSet = timSetValue;
- /* fh_cmd_q can also be full at some point of time!,
- * resetting count as queue is cleaned by firmware at this point
- */
- retryCount = 0;
- unifi_trace(priv, UDBG3, "tim (%s) successfully for broadcast frame in firmware\n", (timSetValue)?"SET":"RESET");
-
- /* Reset DTIM pending flag to send next DTIM request */
- interfacePriv->bcTimSetReqPendingFlag = FALSE;
-
- /* Make sure that one more UpdateDTim request is queued, if Queued its value
- * should be CSR_WIFI_TIM_SET or CSR_WIFI_TIM_RESET
- */
- if (0xFF != interfacePriv->bcTimSetReqQueued)
- {
- /* Process the UpdateTim Request which is queued while previous UpdateTim was in progress */
- if (interfacePriv->bcTimSet != interfacePriv->bcTimSetReqQueued)
- {
- unifi_trace(priv, UDBG2, "uf_handle_tim_cfm : Processing Queued UpdateDTimReq \n");
-
- update_tim(priv, 0, interfacePriv->bcTimSetReqQueued, interfaceTag, 0xFFFFFFFF);
-
- interfacePriv->bcTimSetReqQueued = 0xFF;
- }
- }
-
- }
- break;
- case CSR_RC_INVALID_PARAMETERS:
- case CSR_RC_INSUFFICIENT_RESOURCE:
- /* check for max retry limit & send again
- * MAX_RETRY_LIMIT is not maintained for each set of transactions..Its generic
- * If failure crosses this Limit, we have to take a call to FIX
- */
- if (retryCount > UNIFI_MAX_RETRY_LIMIT) {
- u8 moreData = FALSE;
- retryCount = 0;
- /* Because of continuos traffic in fh_cmd_q the tim set request is failing (exceeding retry limit)
- * but if we didn't synchronize our timSet varible state with firmware then it can cause below issues
- * cond 1. We want to SET tim in firmware if its fails & max retry limit reached
- * -> If host set's the timSet to 1, we wont try to send(as max retry reached) update tim but
- * firmware is not updated with queue(TIM) status so it wont set TIM in beacon finally host start piling
- * up data & wont try to set tim in firmware (This can cause worser performance)
- * cond 2. We want to reset tim in firmware it fails & reaches max retry limit
- * -> If host sets the timSet to Zero, it wont try to set a TIM request unless we wont have any packets
- * to be queued, so beacon unnecessarily advertizes the TIM
- */
-
- if(staRecord) {
- if(!staRecord->wmmOrQosEnabled) {
- moreData = (!list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]) ||
- !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]) ||
- !list_empty(&staRecord->mgtFrames));
- } else {
- /* Peer is QSTA */
- u8 allDeliveryEnabled = 0, dataAvailable = 0;
- /* Check if all AC's are Delivery Enabled */
- is_all_ac_deliver_enabled_and_moredata(staRecord, &allDeliveryEnabled, &dataAvailable);
- /*check for more data in non-delivery enabled queues*/
- moreData = (uf_is_more_data_for_non_delivery_ac(staRecord) || (allDeliveryEnabled && dataAvailable));
-
- }
- /* To avoid cond 1 & 2, check internal Queues status, if we have more Data then set RESET the timSet(0),
- * so we are trying to be in sync with firmware & next packets before queuing atleast try to
- * set TIM in firmware otherwise it SET timSet(1)
- */
- if (moreData) {
- staRecord->timSet = CSR_WIFI_TIM_RESET;
- } else {
- staRecord->timSet = CSR_WIFI_TIM_SET;
- }
- } else {
- /* Its a broadcast frames */
- moreData = (!list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames) ||
- !list_empty(&interfacePriv->genericMulticastOrBroadCastFrames));
- if (moreData) {
- update_tim(priv, 0, CSR_WIFI_TIM_SET, interfaceTag, 0xFFFFFFFF);
- } else {
- update_tim(priv, 0, CSR_WIFI_TIM_RESET, interfaceTag, 0xFFFFFFFF);
- }
- }
-
- unifi_error(priv, "no of error's for TIM setting crossed the Limit: verify\n");
- return;
- }
- retryCount++;
-
- if (handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) {
- if (timSetStatus == staRecord->timSet) {
- unifi_warning(priv, "tim request failed, retry for AID = %x\n", staRecord->aid);
- update_tim(priv, staRecord->aid, timSetValue, interfaceTag, handle);
- } else {
- unifi_trace(priv, UDBG1, "failure: request & confirm states are not matching in TIM cfm: Debug status = %x, staRecord->timSet = %x\n",
- timSetStatus, staRecord->timSet);
- }
- } else {
- unifi_warning(priv, "tim request failed, retry for broadcast frames\n");
- update_tim(priv, 0, timSetValue, interfaceTag, 0xFFFFFFFF);
- }
- break;
- default:
- unifi_warning(priv, "tim update request failed resultcode = %x\n", cfm->ResultCode);
- }
-
- unifi_trace(priv, UDBG2, "leaving %s\n", __FUNCTION__);
-}
-
-/*
- * ---------------------------------------------------------------------------
- * update_tim
- *
- *
- * This function updates tim status in firmware for AID[1 to UNIFI_MAX_CONNECTIONS] or
- * AID[0] for broadcast/multicast packets.
- *
- * NOTE: The LSB (least significant BYTE) of senderId while sending this MLME premitive
- * has been modified(utilized) as below
- *
- * SenderID in signal's SignalPrimitiveHeader is 2 byte the lowe byte bitmap is below
- *
- * station handle(6 bits) timSet Status (2 bits)
- * --------------------- ----------------------
- * 0 0 0 0 0 0 | 0 0
- *
- * timSet Status can be one of below:
- *
- * CSR_WIFI_TIM_RESET
- * CSR_WIFI_TIM_RESETTING
- * CSR_WIFI_TIM_SET
- * CSR_WIFI_TIM_SETTING
- *
- * Arguments:
- * priv Pointer to device private context struct
- * aid can be 1 t0 UNIFI_MAX_CONNECTIONS & 0 means multicast/broadcast
- * setTim value SET(1) / RESET(0)
- * interfaceTag the interfaceID on which activity going on
- * handle from (0 <= handle < UNIFI_MAX_CONNECTIONS)
- *
- * ---------------------------------------------------------------------------
- */
-void update_tim(unifi_priv_t * priv, u16 aid, u8 setTim, u16 interfaceTag, u32 handle)
-{
- CSR_SIGNAL signal;
- s32 r;
- CSR_MLME_SET_TIM_REQUEST *req = &signal.u.MlmeSetTimRequest;
- bulk_data_param_t *bulkdata = NULL;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- u8 senderIdLsb = 0;
- CsrWifiRouterCtrlStaInfo_t *staRecord = NULL;
- u32 oldTimSetStatus = 0, timSetStatus = 0;
-
- unifi_trace(priv, UDBG5, "entering the update_tim routine\n");
-
-
- if (handle == 0xFFFFFFFF) {
- handle &= CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE;
- if (setTim == interfacePriv->bcTimSet)
- {
- unifi_trace(priv, UDBG3, "update_tim, Drop:Hdl=%x, timval=%d, globalTim=%d\n", handle, setTim, interfacePriv->bcTimSet);
- return;
- }
- } else if ((handle != 0xFFFFFFFF) && (handle >= UNIFI_MAX_CONNECTIONS)) {
- unifi_warning(priv, "bad station Handle = %x\n", handle);
- return;
- }
-
- if (setTim) {
- timSetStatus = CSR_WIFI_TIM_SETTING;
- } else {
- timSetStatus = CSR_WIFI_TIM_RESETTING;
- }
-
- if (handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) {
- if ((staRecord = ((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[handle]))) == NULL) {
- unifi_warning(priv, "station record is NULL in update_tim: handle = %x :debug\n", handle);
- return;
- }
- /* In case of signal sending failed, revert back to old state */
- oldTimSetStatus = staRecord->timSet;
- staRecord->timSet = timSetStatus;
- }
-
- /* pack senderID LSB */
- senderIdLsb = CSR_WIFI_PACK_SENDER_ID_LSB_FOR_TIM_REQ(handle, timSetStatus);
-
- /* initialize signal to zero */
- memset(&signal, 0, sizeof(CSR_SIGNAL));
-
- /* Frame the MLME-SET-TIM request */
- signal.SignalPrimitiveHeader.SignalId = CSR_MLME_SET_TIM_REQUEST_ID;
- signal.SignalPrimitiveHeader.ReceiverProcessId = 0;
- CSR_COPY_UINT16_TO_LITTLE_ENDIAN(((priv->netdev_client->sender_id & 0xff00) | senderIdLsb),
- (u8*)&signal.SignalPrimitiveHeader.SenderProcessId);
-
- /* set The virtual interfaceIdentifier, aid, tim value */
- req->VirtualInterfaceIdentifier = uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag);
- req->AssociationId = aid;
- req->TimValue = setTim;
-
-
- unifi_trace(priv, UDBG2, "update_tim:AID %x,senderIdLsb = 0x%x, handle = 0x%x, timSetStatus = %x, sender proceesID = %x \n",
- aid,senderIdLsb, handle, timSetStatus, signal.SignalPrimitiveHeader.SenderProcessId);
-
- /* Send the signal to UniFi */
- r = ul_send_signal_unpacked(priv, &signal, bulkdata);
- if (r) {
- /* No need to free bulk data, as TIM request doesn't carries any data */
- unifi_error(priv, "Error queueing CSR_MLME_SET_TIM_REQUEST signal\n");
- if (staRecord) {
- staRecord->timSet = oldTimSetStatus ;
- }
- else
- {
- /* MLME_SET_TIM.req sending failed here for AID0, so revert back our bcTimSet status */
- interfacePriv->bcTimSet = !setTim;
- }
- }
- else {
- /* Update tim request pending flag and ensure no more TIM set requests are send
- for the same station until TIM confirm is received */
- if (staRecord) {
- staRecord->timRequestPendingFlag = TRUE;
- }
- else
- {
- /* Update tim request (for AID 0) pending flag and ensure no more DTIM set requests are send
- * for the same station until TIM confirm is received
- */
- interfacePriv->bcTimSetReqPendingFlag = TRUE;
- }
- }
- unifi_trace(priv, UDBG5, "leaving the update_tim routine\n");
-}
-
-static
-void process_peer_active_transition(unifi_priv_t * priv,
- CsrWifiRouterCtrlStaInfo_t *staRecord,
- u16 interfaceTag)
-{
- int r,i;
- u8 spaceAvail[4] = {TRUE,TRUE,TRUE,TRUE};
- tx_buffered_packets_t * buffered_pkt = NULL;
- unsigned long lock_flags;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- unifi_trace(priv, UDBG5, "entering process_peer_active_transition\n");
-
- if(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)) {
- /* giving more priority to multicast packets so delaying unicast packets*/
- unifi_trace(priv,UDBG2, "Multicast transmission is going on so resume unicast transmission after DTIM over\n");
-
- /* As station is active now, even though AP is not able to send frames to it
- * because of DTIM, it needs to reset the TIM here
- */
- if (!staRecord->timRequestPendingFlag){
- if((staRecord->timSet == CSR_WIFI_TIM_SET) || (staRecord->timSet == CSR_WIFI_TIM_SETTING)){
- update_tim(priv, staRecord->aid, 0, interfaceTag, staRecord->assignedHandle);
- }
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- staRecord->updateTimReqQueued = 0;
- unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued,
- staRecord->aid);
- }
- return;
- }
- while((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->mgtFrames))) {
- buffered_pkt->transmissionControl &=
- ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK);
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,0,FALSE)) == -ENOSPC) {
- unifi_trace(priv, UDBG2, "p_p_a_t:(ENOSPC) Mgt Frame queueing \n");
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &staRecord->mgtFrames);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- priv->pausedStaHandle[3]=(u8)(staRecord->assignedHandle);
- spaceAvail[3] = FALSE;
- break;
- } else {
- if(r){
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- }
- if (!staRecord->timRequestPendingFlag) {
- if (staRecord->txSuspend) {
- if(staRecord->timSet == CSR_WIFI_TIM_SET) {
- update_tim(priv,staRecord->aid,0,interfaceTag, staRecord->assignedHandle);
- }
- return;
- }
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- staRecord->updateTimReqQueued = 0;
- unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued,
- staRecord->aid);
- }
- for(i=3;i>=0;i--) {
- if(!spaceAvail[i])
- continue;
- unifi_trace(priv, UDBG6, "p_p_a_t:data pkt sending for AC %d \n",i);
- while((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->dataPdu[i]))) {
- buffered_pkt->transmissionControl &=
- ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK);
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,0,FALSE)) == -ENOSPC) {
- /* Clear the trigger bit transmission control*/
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &staRecord->dataPdu[i]);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- priv->pausedStaHandle[i]=(u8)(staRecord->assignedHandle);
- break;
- } else {
- if(r){
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- }
- }
- if (!staRecord->timRequestPendingFlag){
- if((staRecord->timSet == CSR_WIFI_TIM_SET) || (staRecord->timSet == CSR_WIFI_TIM_SETTING)) {
- unifi_trace(priv, UDBG3, "p_p_a_t:resetting tim .....\n");
- update_tim(priv,staRecord->aid,0,interfaceTag, staRecord->assignedHandle);
- }
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- staRecord->updateTimReqQueued = 0;
- unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued,
- staRecord->aid);
- }
- unifi_trace(priv, UDBG5, "leaving process_peer_active_transition\n");
-}
-
-
-
-void uf_process_ma_pkt_cfm_for_ap(unifi_priv_t *priv,u16 interfaceTag, const CSR_MA_PACKET_CONFIRM *pkt_cfm)
-{
- netInterface_priv_t *interfacePriv;
- u8 i;
- CsrWifiRouterCtrlStaInfo_t *staRecord = NULL;
- interfacePriv = priv->interfacePriv[interfaceTag];
-
-
- if(pkt_cfm->HostTag == interfacePriv->multicastPduHostTag) {
- unifi_trace(priv,UDBG2,"CFM for marked Multicast Tag = %x\n",interfacePriv->multicastPduHostTag);
- interfacePriv->multicastPduHostTag = 0xffffffff;
- resume_suspended_uapsd(priv,interfaceTag);
- resume_unicast_buffered_frames(priv,interfaceTag);
- if(list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames) &&
- list_empty(&interfacePriv->genericMulticastOrBroadCastFrames)) {
- unifi_trace(priv,UDBG1,"Resetting multicastTIM");
- if (!interfacePriv->bcTimSetReqPendingFlag)
- {
- update_tim(priv,0,CSR_WIFI_TIM_RESET,interfaceTag, 0xFFFFFFFF);
- }
- else
- {
- /* Cache the DTimSet value so that it will processed immidiatly after
- * completing the current setDTim Request
- */
- interfacePriv->bcTimSetReqQueued = CSR_WIFI_TIM_RESET;
- unifi_trace(priv, UDBG2, "uf_process_ma_pkt_cfm_for_ap : One more UpdateDTim Request(%d) Queued \n",
- interfacePriv->bcTimSetReqQueued);
- }
-
- }
- return;
- }
-
- /* Check if it is a Confirm for null data frame used
- * for probing station activity
- */
- for(i =0; i < UNIFI_MAX_CONNECTIONS; i++) {
- staRecord = (CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[i]);
- if (staRecord && (staRecord->nullDataHostTag == pkt_cfm->HostTag)) {
-
- unifi_trace(priv, UDBG1, "CFM for Inactive probe Null frame (tag = %x, status = %d)\n",
- pkt_cfm->HostTag,
- pkt_cfm->TransmissionStatus
- );
- staRecord->nullDataHostTag = INVALID_HOST_TAG;
-
- if(pkt_cfm->TransmissionStatus == CSR_TX_RETRY_LIMIT){
- u32 now;
- u32 inactive_time;
-
- unifi_trace(priv, UDBG1, "Nulldata to probe STA ALIVE Failed with retry limit\n");
- /* Recheck if there is some activity after null data is sent.
- *
- * If still there is no activity then send a disconnected indication
- * to SME to delete the station record.
- */
- if (staRecord->activity_flag){
- return;
- }
- now = CsrTimeGet(NULL);
-
- if (staRecord->lastActivity > now)
- {
- /* simple timer wrap (for 1 wrap) */
- inactive_time = CsrTimeAdd((u32)CsrTimeSub(CSR_SCHED_TIME_MAX, staRecord->lastActivity),
- now);
- }
- else
- {
- inactive_time = (u32)CsrTimeSub(now, staRecord->lastActivity);
- }
-
- if (inactive_time >= STA_INACTIVE_TIMEOUT_VAL)
- {
- struct list_head send_cfm_list;
- u8 j;
-
- /* The SME/NME may be waiting for confirmation for requested frames to this station.
- * Though this is --VERY UNLIKELY-- in case of station in active mode. But still as a
- * a defensive check, it loops through buffered frames for this station and if confirmation
- * is requested, send auto confirmation with failure status. Also flush the frames so
- * that these are not processed again in PEER_DEL_REQ handler.
- */
- INIT_LIST_HEAD(&send_cfm_list);
-
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(staRecord->mgtFrames));
-
- uf_flush_list(priv, &(staRecord->mgtFrames));
-
- for(j = 0; j < MAX_ACCESS_CATOGORY; j++){
- uf_prepare_send_cfm_list_for_queued_pkts(priv,
- &send_cfm_list,
- &(staRecord->dataPdu[j]));
-
- uf_flush_list(priv,&(staRecord->dataPdu[j]));
- }
-
- send_auto_ma_packet_confirm(priv, staRecord->interfacePriv, &send_cfm_list);
-
-
-
- unifi_warning(priv, "uf_process_ma_pkt_cfm_for_ap: Router Disconnected IND Peer (%x-%x-%x-%x-%x-%x)\n",
- staRecord->peerMacAddress.a[0],
- staRecord->peerMacAddress.a[1],
- staRecord->peerMacAddress.a[2],
- staRecord->peerMacAddress.a[3],
- staRecord->peerMacAddress.a[4],
- staRecord->peerMacAddress.a[5]);
-
- CsrWifiRouterCtrlConnectedIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,
- 0,
- staRecord->interfacePriv->InterfaceTag,
- staRecord->peerMacAddress,
- CSR_WIFI_ROUTER_CTRL_PEER_DISCONNECTED);
- }
-
- }
- else if (pkt_cfm->TransmissionStatus == CSR_TX_SUCCESSFUL)
- {
- staRecord->activity_flag = TRUE;
- }
- }
- }
-}
-
-#endif
-u16 uf_get_vif_identifier (CsrWifiRouterCtrlMode mode, u16 tag)
-{
- switch(mode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- return (0x02<<8|tag);
-
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- return (0x03<<8|tag);
-
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- return (0x01<<8|tag);
-
- case CSR_WIFI_ROUTER_CTRL_MODE_MONITOR:
- return (0x04<<8|tag);
- case CSR_WIFI_ROUTER_CTRL_MODE_AMP:
- return (0x05<<8|tag);
- default:
- return tag;
- }
-}
-
-#ifdef CSR_SUPPORT_SME
-
-/*
- * ---------------------------------------------------------------------------
- * update_macheader
- *
- *
- * These functions updates mac header for intra BSS packet
- * routing.
- * NOTE: This function always has to be called in rx context which
- * is in bh thread context since GFP_KERNEL is used. In soft IRQ/ Interrupt
- * context shouldn't be used
- *
- * Arguments:
- * priv Pointer to device private context struct
- * skb Socket buffer containing data packet to transmit
- * newSkb Socket buffer containing data packet + Mac header if no sufficient headroom in skb
- * priority to append QOS control header in Mac header
- * bulkdata if newSkb allocated then bulkdata updated to send to unifi
- * interfaceTag the interfaceID on which activity going on
- * macHeaderLengthInBytes no. of bytes of mac header in received frame
- * qosDestination used to append Qos control field
- *
- * Returns:
- * Zero on success or -1 on error.
- * ---------------------------------------------------------------------------
- */
-
-static int update_macheader(unifi_priv_t *priv, struct sk_buff *skb,
- struct sk_buff *newSkb, CSR_PRIORITY *priority,
- bulk_data_param_t *bulkdata, u16 interfaceTag,
- u8 macHeaderLengthInBytes,
- u8 qosDestination)
-{
-
- u16 *fc = NULL;
- u8 direction = 0, toDs, fromDs;
- u8 *bufPtr = NULL;
- u8 sa[ETH_ALEN], da[ETH_ALEN];
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- int headroom;
- u8 macHeaderBuf[IEEE802_11_DATA_FRAME_MAC_HEADER_SIZE] = {0};
-
- unifi_trace(priv, UDBG5, "entering the update_macheader function\n");
-
- /* temporary buffer for the Mac header storage */
- memcpy(macHeaderBuf, skb->data, macHeaderLengthInBytes);
-
- /* remove the Macheader from the skb */
- skb_pull(skb, macHeaderLengthInBytes);
-
- /* get the skb headroom for skb_push check */
- headroom = skb_headroom(skb);
-
- /* pointer to frame control field */
- fc = (u16*) macHeaderBuf;
-
- toDs = (*fc & cpu_to_le16(IEEE802_11_FC_TO_DS_MASK))?1 : 0;
- fromDs = (*fc & cpu_to_le16(IEEE802_11_FC_FROM_DS_MASK))? 1: 0;
- unifi_trace(priv, UDBG5, "In update_macheader function, fromDs = %x, toDs = %x\n", fromDs, toDs);
- direction = ((fromDs | (toDs << 1)) & 0x3);
-
- /* Address1 or 3 from the macheader */
- memcpy(da, macHeaderBuf+4+toDs*12, ETH_ALEN);
- /* Address2, 3 or 4 from the mac header */
- memcpy(sa, macHeaderBuf+10+fromDs*(6+toDs*8), ETH_ALEN);
-
- unifi_trace(priv, UDBG3, "update_macheader:direction = %x\n", direction);
- /* update the toDs, fromDs & address fields in Mac header */
- switch(direction)
- {
- case 2:
- /* toDs = 1 & fromDs = 0 , toAp when frames received from peer
- * while sending this packet to Destination the Mac header changed
- * as fromDs = 1 & toDs = 0, fromAp
- */
- *fc &= cpu_to_le16(~IEEE802_11_FC_TO_DS_MASK);
- *fc |= cpu_to_le16(IEEE802_11_FC_FROM_DS_MASK);
- /* Address1: MAC address of the actual destination (4 = 2+2) */
- memcpy(macHeaderBuf + 4, da, ETH_ALEN);
- /* Address2: The MAC address of the AP (10 = 2+2+6) */
- memcpy(macHeaderBuf + 10, &interfacePriv->bssid, ETH_ALEN);
- /* Address3: MAC address of the actual source from mac header (16 = 2+2+6+6) */
- memcpy(macHeaderBuf + 16, sa, ETH_ALEN);
- break;
- case 3:
- unifi_trace(priv, UDBG3, "when both the toDs & fromDS set, NOT SUPPORTED\n");
- break;
- default:
- unifi_trace(priv, UDBG3, "problem in decoding packet in update_macheader \n");
- return -1;
- }
-
- /* frameType is Data always, Validation is done before calling this function */
-
- /* check for the souce station type */
- switch(le16_to_cpu(*fc) & IEEE80211_FC_SUBTYPE_MASK)
- {
- case IEEE802_11_FC_TYPE_QOS_DATA & IEEE80211_FC_SUBTYPE_MASK:
- /* No need to modify the qos control field */
- if (!qosDestination) {
-
- /* If source Sta is QOS enabled & if this bit set, then HTC is supported by
- * peer station & htc field present in macHeader
- */
- if (*fc & cpu_to_le16(IEEE80211_FC_ORDER_MASK)) {
- /* HT control field present in Mac header
- * 6 = sizeof(qosControl) + sizeof(htc)
- */
- macHeaderLengthInBytes -= 6;
- } else {
- macHeaderLengthInBytes -= 2;
- }
- /* Destination STA is non qos so change subtype to DATA */
- *fc &= cpu_to_le16(~IEEE80211_FC_SUBTYPE_MASK);
- *fc |= cpu_to_le16(IEEE802_11_FC_TYPE_DATA);
- /* remove the qos control field & HTC(if present). new macHeaderLengthInBytes is less than old
- * macHeaderLengthInBytes so no need to verify skb headroom
- */
- if (headroom < macHeaderLengthInBytes) {
- unifi_trace(priv, UDBG1, " sufficient headroom not there to push updated mac header \n");
- return -1;
- }
- bufPtr = (u8 *) skb_push(skb, macHeaderLengthInBytes);
-
- /* update bulk data os_data_ptr */
- bulkdata->d[0].os_data_ptr = skb->data;
- bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skb;
- bulkdata->d[0].data_length = skb->len;
-
- } else {
- /* pointing to QOS control field */
- u8 qc;
- if (*fc & cpu_to_le16(IEEE80211_FC_ORDER_MASK)) {
- qc = *((u8*)(macHeaderBuf + (macHeaderLengthInBytes - 4 - 2)));
- } else {
- qc = *((u8*)(macHeaderBuf + (macHeaderLengthInBytes - 2)));
- }
-
- if ((qc & IEEE802_11_QC_TID_MASK) > 7) {
- *priority = 7;
- } else {
- *priority = qc & IEEE802_11_QC_TID_MASK;
- }
-
- unifi_trace(priv, UDBG1, "Incoming packet priority from QSTA is %x\n", *priority);
-
- if (headroom < macHeaderLengthInBytes) {
- unifi_trace(priv, UDBG3, " sufficient headroom not there to push updated mac header \n");
- return -1;
- }
- bufPtr = (u8 *) skb_push(skb, macHeaderLengthInBytes);
- }
- break;
- default:
- {
- bulk_data_param_t data_ptrs;
- CsrResult csrResult;
- unifi_trace(priv, UDBG5, "normal Data packet, NO QOS \n");
-
- if (qosDestination) {
- u8 qc = 0;
- unifi_trace(priv, UDBG3, "destination is QOS station \n");
-
- /* Set Ma-Packet.req UP to UP0 */
- *priority = CSR_QOS_UP0;
-
- /* prepare the qos control field */
- qc |= CSR_QOS_UP0;
- /* no Amsdu is in ap buffer so eosp is left 0 */
- if (da[0] & 0x1) {
- /* multicast/broadcast frames, no acknowledgement needed */
- qc |= 1 << 5;
- }
-
- /* update new Mac header Length with 2 = sizeof(qos control) */
- macHeaderLengthInBytes += 2;
-
- /* received DATA frame but destiantion is QOS station so update subtype to QOS*/
- *fc &= cpu_to_le16(~IEEE80211_FC_SUBTYPE_MASK);
- *fc |= cpu_to_le16(IEEE802_11_FC_TYPE_QOS_DATA);
-
- /* appendQosControlOffset = macHeaderLengthInBytes - 2, since source sta is not QOS */
- macHeaderBuf[macHeaderLengthInBytes - 2] = qc;
- /* txopLimit is 0 */
- macHeaderBuf[macHeaderLengthInBytes - 1] = 0;
- if (headroom < macHeaderLengthInBytes) {
- csrResult = unifi_net_data_malloc(priv, &data_ptrs.d[0], skb->len + macHeaderLengthInBytes);
-
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, " failed to allocate request_data. in update_macheader func\n");
- return -1;
- }
- newSkb = (struct sk_buff *)(data_ptrs.d[0].os_net_buf_ptr);
- newSkb->len = skb->len + macHeaderLengthInBytes;
-
- memcpy((void*)data_ptrs.d[0].os_data_ptr + macHeaderLengthInBytes,
- skb->data, skb->len);
-
- bulkdata->d[0].os_data_ptr = newSkb->data;
- bulkdata->d[0].os_net_buf_ptr = (unsigned char*)newSkb;
- bulkdata->d[0].data_length = newSkb->len;
-
- bufPtr = (u8*)data_ptrs.d[0].os_data_ptr;
-
- /* The old skb will not be used again */
- kfree_skb(skb);
- } else {
- /* skb headroom is sufficient to append Macheader */
- bufPtr = (u8*)skb_push(skb, macHeaderLengthInBytes);
- bulkdata->d[0].os_data_ptr = skb->data;
- bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skb;
- bulkdata->d[0].data_length = skb->len;
- }
- } else {
- unifi_trace(priv, UDBG3, "destination is not a QSTA\n");
- if (headroom < macHeaderLengthInBytes) {
- csrResult = unifi_net_data_malloc(priv, &data_ptrs.d[0], skb->len + macHeaderLengthInBytes);
-
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, " failed to allocate request_data. in update_macheader func\n");
- return -1;
- }
- newSkb = (struct sk_buff *)(data_ptrs.d[0].os_net_buf_ptr);
- newSkb->len = skb->len + macHeaderLengthInBytes;
-
- memcpy((void*)data_ptrs.d[0].os_data_ptr + macHeaderLengthInBytes,
- skb->data, skb->len);
-
- bulkdata->d[0].os_data_ptr = newSkb->data;
- bulkdata->d[0].os_net_buf_ptr = (unsigned char*)newSkb;
- bulkdata->d[0].data_length = newSkb->len;
-
- bufPtr = (u8*)data_ptrs.d[0].os_data_ptr;
-
- /* The old skb will not be used again */
- kfree_skb(skb);
- } else {
- /* skb headroom is sufficient to append Macheader */
- bufPtr = (u8*)skb_push(skb, macHeaderLengthInBytes);
- bulkdata->d[0].os_data_ptr = skb->data;
- bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skb;
- bulkdata->d[0].data_length = skb->len;
- }
- }
- }
- }
-
- /* prepare the complete skb, by pushing the MAC header to the beginning of the skb->data */
- unifi_trace(priv, UDBG5, "updated Mac Header: %d \n",macHeaderLengthInBytes);
- memcpy(bufPtr, macHeaderBuf, macHeaderLengthInBytes);
-
- unifi_trace(priv, UDBG5, "leaving the update_macheader function\n");
- return 0;
-}
-/*
- * ---------------------------------------------------------------------------
- * uf_ap_process_data_pdu
- *
- *
- * Takes care of intra BSS admission control & routing packets within BSS
- *
- * Arguments:
- * priv Pointer to device private context struct
- * skb Socket buffer containing data packet to transmit
- * ehdr ethernet header to fetch priority of packet
- * srcStaInfo source stations record for connection verification
- * packed_signal
- * signal_len
- * signal MA-PACKET.indication signal
- * bulkdata if newSkb allocated then bulkdata updated to send to unifi
- * macHeaderLengthInBytes no. of bytes of mac header in received frame
- *
- * Returns:
- * Zero on success(ap processing complete) or -1 if packet also have to be sent to NETDEV.
- * ---------------------------------------------------------------------------
- */
-int
-uf_ap_process_data_pdu(unifi_priv_t *priv, struct sk_buff *skb,
- struct ethhdr *ehdr, CsrWifiRouterCtrlStaInfo_t * srcStaInfo,
- const CSR_SIGNAL *signal,
- bulk_data_param_t *bulkdata,
- u8 macHeaderLengthInBytes)
-{
- const CSR_MA_PACKET_INDICATION *ind = &(signal->u.MaPacketIndication);
- u16 interfaceTag = (ind->VirtualInterfaceIdentifier & 0x00ff);
- struct sk_buff *newSkb = NULL;
- /* pointer to skb or private skb created using skb_copy() */
- struct sk_buff *skbPtr = skb;
- u8 sendToNetdev = FALSE;
- u8 qosDestination = FALSE;
- CSR_PRIORITY priority = CSR_CONTENTION;
- CsrWifiRouterCtrlStaInfo_t *dstStaInfo = NULL;
- netInterface_priv_t *interfacePriv;
-
- unifi_trace(priv, UDBG5, "entering uf_ap_process_data_pdu %d\n",macHeaderLengthInBytes);
- /* InterfaceTag validation from MA_PACKET.indication */
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_trace(priv, UDBG1, "Interface Tag is Invalid in uf_ap_process_data_pdu\n");
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return 0;
- }
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- if((interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) &&
- (interfacePriv->intraBssEnabled == FALSE)) {
- unifi_trace(priv, UDBG2, "uf_ap_process_data_pdu:P2P GO intrabssEnabled?= %d\n", interfacePriv->intraBssEnabled);
-
- /*In P2P GO case, if intraBSS distribution Disabled then don't do IntraBSS routing */
- /* If destination in our BSS then drop otherwise give packet to netdev */
- dstStaInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, ehdr->h_dest, interfaceTag);
- if (dstStaInfo) {
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return 0;
- }
- /* May be associated P2PCLI trying to send the packets on backbone (Netdev) */
- return -1;
- }
-
- if(!memcmp(ehdr->h_dest, interfacePriv->bssid.a, ETH_ALEN)) {
- /* This packet will be given to the TCP/IP stack since this packet is for us(AP)
- * No routing needed */
- unifi_trace(priv, UDBG4, "destination address is csr_ap\n");
- return -1;
- }
-
- /* fetch the destination record from station record database */
- dstStaInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, ehdr->h_dest, interfaceTag);
-
- /* AP mode processing, & if packet is unicast */
- if(!dstStaInfo) {
- if (!(ehdr->h_dest[0] & 0x1)) {
- /* destination not in station record & its a unicast packet, so pass the packet to network stack */
- unifi_trace(priv, UDBG3, "unicast frame & destination record not exist, send to netdev proto = %x\n", htons(skb->protocol));
- return -1;
- } else {
- /* packet is multicast/broadcast */
- /* copy the skb to skbPtr, send skb to netdev & skbPtr to multicast/broad cast list */
- unifi_trace(priv, UDBG5, "skb_copy, in uf_ap_process_data_pdu, protocol = %x\n", htons(skb->protocol));
- skbPtr = skb_copy(skb, GFP_KERNEL);
- if(skbPtr == NULL) {
- /* We don't have memory to don't send the frame in BSS*/
- unifi_notice(priv, "broacast/multicast frame can't be sent in BSS No memeory: proto = %x\n", htons(skb->protocol));
- return -1;
- }
- sendToNetdev = TRUE;
- }
- } else {
-
- /* validate the Peer & Destination Station record */
- if (uf_process_station_records_for_sending_data(priv, interfaceTag, srcStaInfo, dstStaInfo)) {
- unifi_notice(priv, "uf_ap_process_data_pdu: station record validation failed \n");
- interfacePriv->stats.rx_errors++;
- unifi_net_data_free(priv, &bulkdata->d[0]);
- return 0;
- }
- }
-
- /* BroadCast packet received and it's been sent as non QOS packets.
- * Since WMM spec not mandates broadcast/multicast to be sent as QOS data only,
- * if all Peers are QSTA
- */
- if(sendToNetdev) {
- /* BroadCast packet and it's been sent as non QOS packets */
- qosDestination = FALSE;
- } else if(dstStaInfo && (dstStaInfo->wmmOrQosEnabled == TRUE)) {
- qosDestination = TRUE;
- }
-
- unifi_trace(priv, UDBG3, "uf_ap_process_data_pdu QoS destination = %s\n", (qosDestination)? "TRUE": "FALSE");
-
- /* packet is allowed to send to unifi, update the Mac header */
- if (update_macheader(priv, skbPtr, newSkb, &priority, bulkdata, interfaceTag, macHeaderLengthInBytes, qosDestination)) {
- interfacePriv->stats.rx_errors++;
- unifi_notice(priv, "(Packet Drop) failed to update the Mac header in uf_ap_process_data_pdu\n");
- if (sendToNetdev) {
- /* Free's the skb_copy(skbPtr) data since packet processing failed */
- bulkdata->d[0].os_data_ptr = skbPtr->data;
- bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skbPtr;
- bulkdata->d[0].data_length = skbPtr->len;
- unifi_net_data_free(priv, &bulkdata->d[0]);
- }
- return -1;
- }
-
- unifi_trace(priv, UDBG3, "Mac Header updated...calling uf_process_ma_packet_req \n");
-
- /* Packet is ready to send to unifi ,transmissionControl = 0x0004, confirmation is not needed for data packets */
- if (uf_process_ma_packet_req(priv, ehdr->h_dest, 0xffffffff, interfaceTag, CSR_NO_CONFIRM_REQUIRED, (CSR_RATE)0,priority, priv->netdev_client->sender_id, bulkdata)) {
- if (sendToNetdev) {
- unifi_trace(priv, UDBG1, "In uf_ap_process_data_pdu, (Packet Drop) uf_process_ma_packet_req failed. freeing skb_copy data (original data sent to Netdev)\n");
- /* Free's the skb_copy(skbPtr) data since packet processing failed */
- bulkdata->d[0].os_data_ptr = skbPtr->data;
- bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skbPtr;
- bulkdata->d[0].data_length = skbPtr->len;
- unifi_net_data_free(priv, &bulkdata->d[0]);
- } else {
- /* This free's the skb data */
- unifi_trace(priv, UDBG1, "In uf_ap_process_data_pdu, (Packet Drop). Unicast data so freeing original skb \n");
- unifi_net_data_free(priv, &bulkdata->d[0]);
- }
- }
- unifi_trace(priv, UDBG5, "leaving uf_ap_process_data_pdu\n");
-
- if (sendToNetdev) {
- /* The packet is multicast/broadcast, so after AP processing packet has to
- * be sent to netdev, if peer port state is open
- */
- unifi_trace(priv, UDBG4, "Packet will be routed to NetDev\n");
- return -1;
- }
- /* Ap handled the packet & its a unicast packet, no need to send to netdev */
- return 0;
-}
-
-#endif
-
-CsrResult uf_process_ma_packet_req(unifi_priv_t *priv,
- u8 *peerMacAddress,
- CSR_CLIENT_TAG hostTag,
- u16 interfaceTag,
- CSR_TRANSMISSION_CONTROL transmissionControl,
- CSR_RATE TransmitRate,
- CSR_PRIORITY priority,
- CSR_PROCESS_ID leSenderProcessId,
- bulk_data_param_t *bulkdata)
-{
- CsrResult status = CSR_RESULT_SUCCESS;
- CSR_SIGNAL signal;
- int result;
-#ifdef CSR_SUPPORT_SME
- CsrWifiRouterCtrlStaInfo_t *staRecord = NULL;
- const u8 *macHdrLocation = bulkdata->d[0].os_data_ptr;
- CsrWifiPacketType pktType;
- int frameType = 0;
- u8 queuePacketDozing = FALSE;
- u32 priority_q;
- u16 frmCtrl;
- struct list_head * list = NULL; /* List to which buffered PDUs are to be enqueued*/
- u8 setBcTim=FALSE;
- netInterface_priv_t *interfacePriv;
- u8 requeueOnSamePos = FALSE;
- u32 handle = 0xFFFFFFFF;
- unsigned long lock_flags;
-
- unifi_trace(priv, UDBG5,
- "entering uf_process_ma_packet_req, peer: %pMF\n",
- peerMacAddress);
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "interfaceTag >= CSR_WIFI_NUM_INTERFACES, interfacetag = %d\n", interfaceTag);
- return CSR_RESULT_FAILURE;
- }
- interfacePriv = priv->interfacePriv[interfaceTag];
-
-
- /* fetch the station record for corresponding peer mac address */
- if ((staRecord = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, peerMacAddress, interfaceTag))) {
- handle = staRecord->assignedHandle;
- }
-
- /* Frame ma-packet.req, this is saved/transmitted depend on queue state */
- unifi_frame_ma_packet_req(priv, priority, TransmitRate, hostTag,
- interfaceTag, transmissionControl, leSenderProcessId,
- peerMacAddress, &signal);
-
- /* Since it's common path between STA & AP mode, in case of STA packet
- * need not to be queued but in AP case we have to queue PDU's in
- * different scenarios
- */
- switch(interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- /* For this mode processing done below */
- break;
- default:
- /* In case of STA/IBSS/P2PCLI/AMP, no checks needed send the packet down & return */
- unifi_trace(priv, UDBG5, "In %s, interface mode is %x \n", __FUNCTION__, interfacePriv->interfaceMode);
- if (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_NONE) {
- unifi_warning(priv, "In %s, interface mode NONE \n", __FUNCTION__);
- }
- if ((result = ul_send_signal_unpacked(priv, &signal, bulkdata))) {
- status = CSR_RESULT_FAILURE;
- }
- return status;
- }
-
- /* -----Only AP/P2pGO mode handling falls below----- */
-
- /* convert priority to queue */
- priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY) priority);
-
- /* check the powersave status of the peer */
- if (staRecord && (staRecord->currentPeerState ==
- CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE)) {
- /* Peer is dozing & packet have to be delivered, so buffer the packet &
- * update the TIM
- */
- queuePacketDozing = TRUE;
- }
-
- /* find the type of frame unicast or mulicast/broadcast */
- if (*peerMacAddress & 0x1) {
- /* Multicast/broadCast data are always triggered by vif_availability.ind
- * at the DTIM
- */
- pktType = CSR_WIFI_MULTICAST_PDU;
- } else {
- pktType = CSR_WIFI_UNICAST_PDU;
- }
-
- /* Fetch the frame control field from mac header & check for frame type */
- frmCtrl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(macHdrLocation);
-
- /* Processing done according to Frame/Packet type */
- frameType = ((frmCtrl & 0x000c) >> FRAME_CONTROL_TYPE_FIELD_OFFSET);
- switch(frameType)
- {
- case IEEE802_11_FRAMETYPE_MANAGEMENT:
-
- switch(pktType)
- {
- case CSR_WIFI_UNICAST_PDU:
- unifi_trace(priv, UDBG5, "management unicast PDU in uf_process_ma_packet_req \n");
- /* push the packet in to the queue with appropriate mgt list */
- if (!staRecord) {
- /* push the packet to the unifi if list is empty (if packet lost how to re-enque) */
- if (list_empty(&interfacePriv->genericMgtFrames)) {
-#ifdef CSR_SUPPORT_SME
- if(!(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag))) {
-#endif
-
- unifi_trace(priv, UDBG3, "genericMgtFrames list is empty uf_process_ma_packet_req \n");
- result = ul_send_signal_unpacked(priv, &signal, bulkdata);
- /* reque only on ENOSPC */
- if(result == -ENOSPC) {
- /* requeue the failed packet to genericMgtFrame with same position */
- unifi_trace(priv, UDBG1, "(ENOSPC) Sending genericMgtFrames Failed so buffering\n");
- list = &interfacePriv->genericMgtFrames;
- requeueOnSamePos = TRUE;
- }
-#ifdef CSR_SUPPORT_SME
- }else{
- list = &interfacePriv->genericMgtFrames;
- unifi_trace(priv, UDBG3, "genericMgtFrames queue empty and dtim started\n hosttag is 0x%x,\n",signal.u.MaPacketRequest.HostTag);
- update_eosp_to_head_of_broadcast_list_head(priv,interfaceTag);
- }
-#endif
- } else {
- /* Queue the packet to genericMgtFrame of unifi_priv_t data structure */
- list = &interfacePriv->genericMgtFrames;
- unifi_trace(priv, UDBG2, "genericMgtFrames queue not empty\n");
- }
- } else {
- /* check peer power state */
- if (queuePacketDozing || !list_empty(&staRecord->mgtFrames) || IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)) {
- /* peer is in dozing mode, so queue packet in mgt frame list of station record */
- /*if multicast traffic is going on, buffer the unicast packets*/
- list = &staRecord->mgtFrames;
-
- unifi_trace(priv, UDBG1, "staRecord->MgtFrames list empty? = %s, handle = %d, queuePacketDozing = %d\n",
- (list_empty(&staRecord->mgtFrames))? "YES": "NO", staRecord->assignedHandle, queuePacketDozing);
- if(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)){
- update_eosp_to_head_of_broadcast_list_head(priv,interfaceTag);
- }
-
- } else {
- unifi_trace(priv, UDBG5, "staRecord->mgtFrames list is empty uf_process_ma_packet_req \n");
- result = ul_send_signal_unpacked(priv, &signal, bulkdata);
- if(result == -ENOSPC) {
- /* requeue the failed packet to staRecord->mgtFrames with same position */
- list = &staRecord->mgtFrames;
- requeueOnSamePos = TRUE;
- unifi_trace(priv, UDBG1, "(ENOSPC) Sending MgtFrames Failed handle = %d so buffering\n",staRecord->assignedHandle);
- priv->pausedStaHandle[0]=(u8)(staRecord->assignedHandle);
- } else if (result) {
- status = CSR_RESULT_FAILURE;
- }
- }
- }
- break;
- case CSR_WIFI_MULTICAST_PDU:
- unifi_trace(priv, UDBG5, "management multicast/broadcast PDU in uf_process_ma_packet_req 'QUEUE it' \n");
- /* Queue the packet to genericMulticastOrBroadCastMgtFrames of unifi_priv_t data structure
- * will be sent when we receive VIF AVAILABILITY from firmware as part of DTIM
- */
-
- list = &interfacePriv->genericMulticastOrBroadCastMgtFrames;
- if((interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_IBSS) &&
- (list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames))) {
- setBcTim=TRUE;
- }
- break;
- default:
- unifi_error(priv, "condition never meets: packet type unrecognized\n");
- }
- break;
- case IEEE802_11_FRAMETYPE_DATA:
- switch(pktType)
- {
- case CSR_WIFI_UNICAST_PDU:
- unifi_trace(priv, UDBG5, "data unicast PDU in uf_process_ma_packet_req \n");
- /* check peer power state, list status & peer port status */
- if(!staRecord) {
- unifi_error(priv, "In %s unicast but staRecord = NULL\n", __FUNCTION__);
- return CSR_RESULT_FAILURE;
- } else if (queuePacketDozing || isRouterBufferEnabled(priv,priority_q)|| !list_empty(&staRecord->dataPdu[priority_q]) || IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)) {
- /* peer is in dozing mode, so queue packet in mgt frame list of station record */
- /* if multicast traffic is going on, buffet the unicast packets */
- unifi_trace(priv, UDBG2, "Enqueued to staRecord->dataPdu[%d] queuePacketDozing=%d,\
- Buffering enabled = %d \n", priority_q,queuePacketDozing,isRouterBufferEnabled(priv,priority_q));
- list = &staRecord->dataPdu[priority_q];
- } else {
- unifi_trace(priv, UDBG5, "staRecord->dataPdu[%d] list is empty uf_process_ma_packet_req \n", priority_q);
- /* Pdu allowed to send to unifi */
- result = ul_send_signal_unpacked(priv, &signal, bulkdata);
- if(result == -ENOSPC) {
- /* requeue the failed packet to staRecord->dataPdu[priority_q] with same position */
- unifi_trace(priv, UDBG1, "(ENOSPC) Sending Unicast DataPDU to queue %d Failed so buffering\n",priority_q);
- requeueOnSamePos = TRUE;
- list = &staRecord->dataPdu[priority_q];
- priv->pausedStaHandle[priority_q]=(u8)(staRecord->assignedHandle);
- if(!isRouterBufferEnabled(priv,priority_q)) {
- unifi_error(priv,"Buffering Not enabled for queue %d \n",priority_q);
- }
- } else if (result) {
- status = CSR_RESULT_FAILURE;
- }
- }
- break;
- case CSR_WIFI_MULTICAST_PDU:
- unifi_trace(priv, UDBG5, "data multicast/broadcast PDU in uf_process_ma_packet_req \n");
- /* Queue the packet to genericMulticastOrBroadCastFrames list of unifi_priv_t data structure
- * will be sent when we receive VIF AVAILABILITY from firmware as part of DTIM
- */
- list = &interfacePriv->genericMulticastOrBroadCastFrames;
- if(list_empty(&interfacePriv->genericMulticastOrBroadCastFrames)) {
- setBcTim = TRUE;
- }
- break;
- default:
- unifi_error(priv, "condition never meets: packet type un recognized\n");
- }
- break;
- default:
- unifi_error(priv, "unrecognized frame type\n");
- }
- if(list) {
- status = enque_tx_data_pdu(priv, bulkdata,list, &signal,requeueOnSamePos);
- /* Record no. of packet queued for each peer */
- if (staRecord && (pktType == CSR_WIFI_UNICAST_PDU) && (!status)) {
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- staRecord->noOfPktQueued++;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- }
- else if ((pktType == CSR_WIFI_MULTICAST_PDU) && (!status))
- {
- /* If broadcast Tim is set && queuing is successful, then only update TIM */
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- interfacePriv->noOfbroadcastPktQueued++;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- }
- }
- /* If broadcast Tim is set && queuing is successful, then only update TIM */
- if(setBcTim && !status) {
- unifi_trace(priv, UDBG3, "tim set due to broadcast pkt\n");
- if (!interfacePriv->bcTimSetReqPendingFlag)
- {
- update_tim(priv,0,CSR_WIFI_TIM_SET,interfaceTag, handle);
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- interfacePriv->bcTimSetReqQueued = CSR_WIFI_TIM_SET;
- unifi_trace(priv, UDBG2, "uf_process_ma_packet_req : One more UpdateDTim Request(:%d) Queued \n",
- interfacePriv->bcTimSetReqQueued);
- }
- } else if(staRecord && staRecord->currentPeerState ==
- CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE) {
- if(staRecord->timSet == CSR_WIFI_TIM_RESET || staRecord->timSet == CSR_WIFI_TIM_RESETTING) {
- if(!staRecord->wmmOrQosEnabled) {
- if(!list_empty(&staRecord->mgtFrames) ||
- !list_empty(&staRecord->dataPdu[3]) ||
- !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION])) {
- unifi_trace(priv, UDBG3, "tim set due to unicast pkt & peer in powersave\n");
- if (!staRecord->timRequestPendingFlag){
- update_tim(priv,staRecord->aid,1,interfaceTag, handle);
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- staRecord->updateTimReqQueued = 1;
- unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued,
- staRecord->aid);
- }
- }
- } else {
- /* Check for non delivery enable(i.e trigger enable), all delivery enable & legacy AC for TIM update in firmware */
- u8 allDeliveryEnabled = 0, dataAvailable = 0;
- /* Check if all AC's are Delivery Enabled */
- is_all_ac_deliver_enabled_and_moredata(staRecord, &allDeliveryEnabled, &dataAvailable);
- if (uf_is_more_data_for_non_delivery_ac(staRecord) || (allDeliveryEnabled && dataAvailable)
- || (!list_empty(&staRecord->mgtFrames))) {
- if (!staRecord->timRequestPendingFlag) {
- update_tim(priv,staRecord->aid,1,interfaceTag, handle);
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- staRecord->updateTimReqQueued = 1;
- unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued,
- staRecord->aid);
- }
- }
- }
- }
- }
-
- if((list) && (pktType == CSR_WIFI_UNICAST_PDU && !queuePacketDozing) && !(isRouterBufferEnabled(priv,priority_q)) && !(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag))) {
- unifi_trace(priv, UDBG2, "buffering cleared for queue = %d So resending buffered frames\n",priority_q);
- uf_send_buffered_frames(priv, priority_q);
- }
- unifi_trace(priv, UDBG5, "leaving uf_process_ma_packet_req \n");
- return status;
-#else
-#ifdef CSR_NATIVE_LINUX
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "interfaceTag >= CSR_WIFI_NUM_INTERFACES, interfacetag = %d\n", interfaceTag);
- return CSR_RESULT_FAILURE;
- }
- /* Frame ma-packet.req, this is saved/transmitted depend on queue state */
- unifi_frame_ma_packet_req(priv, priority, TransmitRate, hostTag, interfaceTag,
- transmissionControl, leSenderProcessId,
- peerMacAddress, &signal);
- result = ul_send_signal_unpacked(priv, &signal, bulkdata);
- if (result) {
- return CSR_RESULT_FAILURE;
- }
-#endif
- return status;
-#endif
-}
-
-#ifdef CSR_SUPPORT_SME
-s8 uf_get_protection_bit_from_interfacemode(unifi_priv_t *priv, u16 interfaceTag, const u8 *daddr)
-{
- s8 protection = 0;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- switch(interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_STA:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI:
- case CSR_WIFI_ROUTER_CTRL_MODE_AMP:
- case CSR_WIFI_ROUTER_CTRL_MODE_IBSS:
- protection = interfacePriv->protect;
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- {
- CsrWifiRouterCtrlStaInfo_t *dstStaInfo = NULL;
- if (daddr[0] & 0x1) {
- unifi_trace(priv, UDBG3, "broadcast/multicast packet in send_ma_pkt_request\n");
- /* In this mode, the protect member of priv structure has an information of how
- * AP/P2PGO has started, & the member updated in set mode request for AP/P2PGO
- */
- protection = interfacePriv->protect;
- } else {
- /* fetch the destination record from station record database */
- dstStaInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, daddr, interfaceTag);
- if (!dstStaInfo) {
- unifi_trace(priv, UDBG3, "peer not found in station record in send_ma_pkt_request\n");
- return -1;
- }
- protection = dstStaInfo->protection;
- }
- }
- break;
- default:
- unifi_trace(priv, UDBG2, "mode unknown in send_ma_pkt_request\n");
- }
- return protection;
-}
-#endif
-#ifdef CSR_SUPPORT_SME
-u8 send_multicast_frames(unifi_priv_t *priv, u16 interfaceTag)
-{
- int r;
- tx_buffered_packets_t * buffered_pkt = NULL;
- u8 moreData = FALSE;
- u8 pduSent =0;
- unsigned long lock_flags;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- u32 hostTag = 0xffffffff;
-
- if(!isRouterBufferEnabled(priv,UNIFI_TRAFFIC_Q_VO)) {
- while((interfacePriv->dtimActive)&& (buffered_pkt=dequeue_tx_data_pdu(priv,&interfacePriv->genericMulticastOrBroadCastMgtFrames))) {
- buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK);
- moreData = (buffered_pkt->transmissionControl & TRANSMISSION_CONTROL_EOSP_MASK)?FALSE:TRUE;
-
-
- unifi_trace(priv,UDBG2,"DTIM Occurred for interface:sending Mgt packet %d\n",interfaceTag);
-
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,NULL,moreData,FALSE)) == -ENOSPC) {
- unifi_trace(priv,UDBG1,"frame_and_send_queued_pdu failed with ENOSPC for host tag = %x\n", buffered_pkt->hostTag);
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &interfacePriv->genericMulticastOrBroadCastMgtFrames);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- break;
- } else {
- unifi_trace(priv,UDBG1,"send_multicast_frames: Send genericMulticastOrBroadCastMgtFrames (%x, %x)\n",
- buffered_pkt->hostTag,
- r);
- if(r) {
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- if(!moreData) {
-
- interfacePriv->dtimActive = FALSE;
- if(!r) {
- hostTag = buffered_pkt->hostTag;
- pduSent++;
- } else {
- send_vif_availibility_rsp(priv,uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag),CSR_RC_UNSPECIFIED_FAILURE);
- }
- }
- /* Buffered frame sent successfully */
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- interfacePriv->noOfbroadcastPktQueued--;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- kfree(buffered_pkt);
- }
-
- }
- }
- if(!isRouterBufferEnabled(priv,UNIFI_TRAFFIC_Q_CONTENTION)) {
- while((interfacePriv->dtimActive)&& (buffered_pkt=dequeue_tx_data_pdu(priv,&interfacePriv->genericMulticastOrBroadCastFrames))) {
- buffered_pkt->transmissionControl |= TRANSMISSION_CONTROL_TRIGGER_MASK;
- moreData = (buffered_pkt->transmissionControl & TRANSMISSION_CONTROL_EOSP_MASK)?FALSE:TRUE;
-
-
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,NULL,moreData,FALSE)) == -ENOSPC) {
- /* Clear the trigger bit transmission control*/
- buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK);
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &interfacePriv->genericMulticastOrBroadCastFrames);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- break;
- } else {
- if(r) {
- unifi_trace(priv,UDBG1,"send_multicast_frames: Send genericMulticastOrBroadCastFrame failed (%x, %x)\n",
- buffered_pkt->hostTag,
- r);
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- if(!moreData) {
- interfacePriv->dtimActive = FALSE;
- if(!r) {
- pduSent ++;
- hostTag = buffered_pkt->hostTag;
- } else {
- send_vif_availibility_rsp(priv,uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag),CSR_RC_UNSPECIFIED_FAILURE);
- }
- }
- /* Buffered frame sent successfully */
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- interfacePriv->noOfbroadcastPktQueued--;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- kfree(buffered_pkt);
- }
- }
- }
- if((interfacePriv->dtimActive == FALSE)) {
- /* Record the host Tag*/
- unifi_trace(priv,UDBG2,"send_multicast_frames: Recorded hostTag of EOSP packet: = 0x%x\n",hostTag);
- interfacePriv->multicastPduHostTag = hostTag;
- }
- return pduSent;
-}
-#endif
-void uf_process_ma_vif_availibility_ind(unifi_priv_t *priv,u8 *sigdata,
- u32 siglen)
-{
-#ifdef CSR_SUPPORT_SME
- CSR_SIGNAL signal;
- CSR_MA_VIF_AVAILABILITY_INDICATION *ind;
- int r;
- u16 interfaceTag;
- u8 pduSent =0;
- CSR_RESULT_CODE resultCode = CSR_RC_SUCCESS;
- netInterface_priv_t *interfacePriv;
-
- unifi_trace(priv, UDBG3,
- "uf_process_ma_vif_availibility_ind: Process signal 0x%.4X\n",
- *((u16*)sigdata));
-
- r = read_unpack_signal(sigdata, &signal);
- if (r) {
- unifi_error(priv,
- "uf_process_ma_vif_availibility_ind: Received unknown signal 0x%.4X.\n",
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata));
- return;
- }
- ind = &signal.u.MaVifAvailabilityIndication;
- interfaceTag=ind->VirtualInterfaceIdentifier & 0xff;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "in vif_availability_ind interfaceTag is wrong\n");
- return;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- if(ind->Multicast) {
- if(list_empty(&interfacePriv->genericMulticastOrBroadCastFrames) &&
- list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames)) {
- /* This condition can occur because of a potential race where the
- TIM is not yet reset as host is waiting for confirm but it is sent
- by firmware and DTIM occurs*/
- unifi_notice(priv,"ma_vif_availibility_ind recevied for multicast but queues are empty%d\n",interfaceTag);
- send_vif_availibility_rsp(priv,ind->VirtualInterfaceIdentifier,CSR_RC_NO_BUFFERED_BROADCAST_MULTICAST_FRAMES);
- interfacePriv->dtimActive = FALSE;
- if(interfacePriv->multicastPduHostTag == 0xffffffff) {
- unifi_notice(priv,"ma_vif_availibility_ind recevied for multicast but queues are empty%d\n",interfaceTag);
- /* This may be an extra request in very rare race conditions but it is fine as it would atleast remove the potential lock up */
- if (!interfacePriv->bcTimSetReqPendingFlag)
- {
- update_tim(priv,0,CSR_WIFI_TIM_RESET,interfaceTag, 0xFFFFFFFF);
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- interfacePriv->bcTimSetReqQueued = CSR_WIFI_TIM_RESET;
- unifi_trace(priv, UDBG2, "uf_process_ma_vif_availibility_ind : One more UpdateDTim Request(%d) Queued \n",
- interfacePriv->bcTimSetReqQueued);
- }
- }
- return;
- }
- if(interfacePriv->dtimActive) {
- unifi_trace(priv,UDBG2,"DTIM Occurred for already active DTIM interface %d\n",interfaceTag);
- return;
- } else {
- unifi_trace(priv,UDBG2,"DTIM Occurred for interface %d\n",interfaceTag);
- if(list_empty(&interfacePriv->genericMulticastOrBroadCastFrames)) {
- set_eosp_transmit_ctrl(priv,&interfacePriv->genericMulticastOrBroadCastMgtFrames);
- } else {
- set_eosp_transmit_ctrl(priv,&interfacePriv->genericMulticastOrBroadCastFrames);
- }
- }
- interfacePriv->dtimActive = TRUE;
- pduSent = send_multicast_frames(priv,interfaceTag);
- }
- else {
- unifi_error(priv,"Interface switching is not supported %d\n",interfaceTag);
- resultCode = CSR_RC_NOT_SUPPORTED;
- send_vif_availibility_rsp(priv,ind->VirtualInterfaceIdentifier,CSR_RC_NOT_SUPPORTED);
- }
-#endif
-}
-#ifdef CSR_SUPPORT_SME
-
-#define GET_ACTIVE_INTERFACE_TAG(priv) 0
-
-static u8 uf_is_more_data_for_delivery_ac(unifi_priv_t *priv, CsrWifiRouterCtrlStaInfo_t *staRecord)
-{
- s8 i;
-
- for(i=UNIFI_TRAFFIC_Q_VO; i >= UNIFI_TRAFFIC_Q_BK; i--)
- {
- if(((staRecord->powersaveMode[i]==CSR_WIFI_AC_DELIVERY_ONLY_ENABLE)
- ||(staRecord->powersaveMode[i]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED))
- &&(!list_empty(&staRecord->dataPdu[i]))) {
- unifi_trace(priv,UDBG2,"uf_is_more_data_for_delivery_ac: Data Available AC = %d\n", i);
- return TRUE;
- }
- }
-
- unifi_trace(priv,UDBG2,"uf_is_more_data_for_delivery_ac: Data NOT Available \n");
- return FALSE;
-}
-
-static u8 uf_is_more_data_for_usp_delivery(unifi_priv_t *priv, CsrWifiRouterCtrlStaInfo_t *staRecord, unifi_TrafficQueue queue)
-{
- s8 i;
-
- for(i = queue; i >= UNIFI_TRAFFIC_Q_BK; i--)
- {
- if(((staRecord->powersaveMode[i]==CSR_WIFI_AC_DELIVERY_ONLY_ENABLE)
- ||(staRecord->powersaveMode[i]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED))
- &&(!list_empty(&staRecord->dataPdu[i]))) {
- unifi_trace(priv,UDBG2,"uf_is_more_data_for_usp_delivery: Data Available AC = %d\n", i);
- return TRUE;
- }
- }
-
- unifi_trace(priv,UDBG2,"uf_is_more_data_for_usp_delivery: Data NOT Available \n");
- return FALSE;
-}
-
-/*
- * ---------------------------------------------------------------------------
- * uf_send_buffered_data_from_delivery_ac
- *
- * This function takes care of
- * -> Parsing the delivery enabled queue & sending frame down to HIP
- * -> Setting EOSP=1 when USP to be terminated
- * -> Depending on MAX SP length services the USP
- *
- * NOTE:This function always called from uf_handle_uspframes_delivery(), Dont
- * call this function from any other location in code
- *
- * Arguments:
- * priv Pointer to device private context struct
- * vif interface specific HIP vif instance
- * staInfo peer for which UAPSD to be scheduled
- * queue AC from which Data to be sent in USP
- * txList access category for processing list
- * ---------------------------------------------------------------------------
- */
-void uf_send_buffered_data_from_delivery_ac(unifi_priv_t *priv,
- CsrWifiRouterCtrlStaInfo_t * staInfo,
- u8 queue,
- struct list_head *txList)
-{
-
- u16 interfaceTag = GET_ACTIVE_INTERFACE_TAG(priv);
- tx_buffered_packets_t * buffered_pkt = NULL;
- unsigned long lock_flags;
- u8 eosp=FALSE;
- s8 r =0;
- u8 moreData = FALSE;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- unifi_trace(priv, UDBG2, "++uf_send_buffered_data_from_delivery_ac, active=%x\n", staInfo->uapsdActive);
-
- if (queue > UNIFI_TRAFFIC_Q_VO)
- {
- return;
- }
- while((buffered_pkt=dequeue_tx_data_pdu(priv, txList))) {
- if((IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag))) {
- unifi_trace(priv, UDBG2, "uf_send_buffered_data_from_delivery_ac: DTIM Active, suspend UAPSD, staId: 0x%x\n",
- staInfo->aid);
-
- /* Once resume called, the U-APSD delivery operation will resume */
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- staInfo->uspSuspend = TRUE;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- /* re-queueing the packet as DTIM started */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q,txList);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- break;
- }
-
- buffered_pkt->transmissionControl &=
- ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
-
-
- if((staInfo->wmmOrQosEnabled == TRUE)&&(staInfo->uapsdActive == TRUE)) {
-
- buffered_pkt->transmissionControl = TRANSMISSION_CONTROL_TRIGGER_MASK;
-
- /* Check All delivery enables Ac for more data, because caller of this
- * function not aware about last packet
- * (First check in moreData fetching helps in draining out Mgt frames Q)
- */
- moreData = (!list_empty(txList) || uf_is_more_data_for_usp_delivery(priv, staInfo, queue));
-
- if(staInfo->noOfSpFramesSent == (staInfo->maxSpLength - 1)) {
- moreData = FALSE;
- }
-
- if(moreData == FALSE) {
- eosp = TRUE;
- buffered_pkt->transmissionControl =
- (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
- }
- } else {
- /* Non QoS and non U-APSD */
- unifi_warning(priv, "uf_send_buffered_data_from_delivery_ac: non U-APSD !!! \n");
- }
-
- unifi_trace(priv,UDBG2,"uf_send_buffered_data_from_delivery_ac : MoreData:%d, EOSP:%d\n",moreData,eosp);
-
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staInfo,moreData,eosp)) == -ENOSPC) {
-
- unifi_trace(priv, UDBG2, "uf_send_buffered_data_from_delivery_ac: UASPD suspended, ENOSPC in hipQ=%x\n", queue);
-
- /* Once resume called, the U-APSD delivery operation will resume */
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- staInfo->uspSuspend = TRUE;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
-
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q,txList);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- priv->pausedStaHandle[queue]=(u8)(staInfo->assignedHandle);
- break;
- } else {
- if(r){
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- staInfo->noOfSpFramesSent++;
- if((!moreData) || (staInfo->noOfSpFramesSent == staInfo->maxSpLength)) {
- unifi_trace(priv, UDBG2, "uf_send_buffered_data_from_delivery_ac: Terminating USP\n");
- staInfo->uapsdActive = FALSE;
- staInfo->uspSuspend = FALSE;
- staInfo->noOfSpFramesSent = 0;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- break;
- }
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- }
- }
- unifi_trace(priv, UDBG2, "--uf_send_buffered_data_from_delivery_ac, active=%x\n", staInfo->uapsdActive);
-}
-
-void uf_send_buffered_data_from_ac(unifi_priv_t *priv,
- CsrWifiRouterCtrlStaInfo_t * staInfo,
- u8 queue,
- struct list_head *txList)
-{
- tx_buffered_packets_t * buffered_pkt = NULL;
- unsigned long lock_flags;
- u8 eosp=FALSE;
- u8 moreData = FALSE;
- s8 r =0;
-
- unifi_trace(priv,UDBG2,"uf_send_buffered_data_from_ac :\n");
-
- while(!isRouterBufferEnabled(priv,queue) &&
- ((buffered_pkt=dequeue_tx_data_pdu(priv, txList))!=NULL)){
-
- buffered_pkt->transmissionControl &=
- ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK);
-
- unifi_trace(priv,UDBG3,"uf_send_buffered_data_from_ac : MoreData:%d, EOSP:%d\n",moreData,eosp);
-
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staInfo,moreData,eosp)) == -ENOSPC) {
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q,txList);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- if(staInfo != NULL){
- priv->pausedStaHandle[queue]=(u8)(staInfo->assignedHandle);
- }
- unifi_trace(priv,UDBG3," uf_send_buffered_data_from_ac: PDU sending failed .. no space for queue %d \n",queue);
- } else {
- if(r){
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- }
-
-}
-
-void uf_send_buffered_frames(unifi_priv_t *priv,unifi_TrafficQueue q)
-{
- u16 interfaceTag = GET_ACTIVE_INTERFACE_TAG(priv);
- u32 startIndex=0,endIndex=0;
- CsrWifiRouterCtrlStaInfo_t * staInfo = NULL;
- u8 queue;
- u8 moreData = FALSE;
-
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- if(!((interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP) ||
- (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO)))
- return;
-
- queue = (q<=3)?q:0;
-
- if(interfacePriv->dtimActive) {
- /* this function updates dtimActive*/
- send_multicast_frames(priv,interfaceTag);
- if(!interfacePriv->dtimActive) {
- moreData = (!list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames) ||
- !list_empty(&interfacePriv->genericMulticastOrBroadCastFrames));
- if(!moreData) {
- if (!interfacePriv->bcTimSetReqPendingFlag)
- {
- update_tim(priv,0,CSR_WIFI_TIM_RESET,interfaceTag, 0XFFFFFFFF);
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- interfacePriv->bcTimSetReqQueued = CSR_WIFI_TIM_RESET;
- unifi_trace(priv, UDBG2, "uf_send_buffered_frames : One more UpdateDTim Request(%d) Queued \n",
- interfacePriv->bcTimSetReqQueued);
- }
- }
- } else {
- moreData = (!list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames) ||
- !list_empty(&interfacePriv->genericMulticastOrBroadCastFrames));
- if(!moreData) {
- /* This should never happen but if it happens, we need a way out */
- unifi_error(priv,"ERROR: No More Data but DTIM is active sending Response\n");
- send_vif_availibility_rsp(priv,uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag),CSR_RC_NO_BUFFERED_BROADCAST_MULTICAST_FRAMES);
- interfacePriv->dtimActive = FALSE;
- }
- }
- return;
- }
- if(priv->pausedStaHandle[queue] > 7) {
- priv->pausedStaHandle[queue] = 0;
- }
-
- if(queue == UNIFI_TRAFFIC_Q_VO) {
-
-
- unifi_trace(priv,UDBG2,"uf_send_buffered_frames : trying mgt from queue=%d\n",queue);
- for(startIndex= 0; startIndex < UNIFI_MAX_CONNECTIONS;startIndex++) {
- staInfo = CsrWifiRouterCtrlGetStationRecordFromHandle(priv,startIndex,interfaceTag);
- if(!staInfo ) {
- continue;
- } else if((staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE)
- &&(staInfo->uapsdActive == FALSE) ) {
- continue;
- }
-
- if((staInfo != NULL)&&(staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE)
- &&(staInfo->uapsdActive == FALSE)){
- /*Non-UAPSD case push the management frames out*/
- if(!list_empty(&staInfo->mgtFrames)){
- uf_send_buffered_data_from_ac(priv,staInfo, UNIFI_TRAFFIC_Q_VO, &staInfo->mgtFrames);
- }
- }
-
- if(isRouterBufferEnabled(priv,queue)) {
- unifi_notice(priv,"uf_send_buffered_frames : No space Left for queue = %d\n",queue);
- break;
- }
- }
- /*push generic management frames out*/
- if(!list_empty(&interfacePriv->genericMgtFrames)) {
- unifi_trace(priv,UDBG2,"uf_send_buffered_frames : trying generic mgt from queue=%d\n",queue);
- uf_send_buffered_data_from_ac(priv,staInfo, UNIFI_TRAFFIC_Q_VO, &interfacePriv->genericMgtFrames);
- }
- }
-
-
- unifi_trace(priv,UDBG2,"uf_send_buffered_frames : Resume called for Queue=%d\n",queue);
- unifi_trace(priv,UDBG2,"uf_send_buffered_frames : start=%d end=%d\n",startIndex,endIndex);
-
- startIndex = priv->pausedStaHandle[queue];
- endIndex = (startIndex + UNIFI_MAX_CONNECTIONS -1) % UNIFI_MAX_CONNECTIONS;
-
- while(startIndex != endIndex) {
- staInfo = CsrWifiRouterCtrlGetStationRecordFromHandle(priv,startIndex,interfaceTag);
- if(!staInfo) {
- startIndex ++;
- if(startIndex >= UNIFI_MAX_CONNECTIONS) {
- startIndex = 0;
- }
- continue;
- } else if((staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE)
- &&(staInfo->uapsdActive == FALSE)) {
- startIndex ++;
- if(startIndex >= UNIFI_MAX_CONNECTIONS) {
- startIndex = 0;
- }
- continue;
- }
- /* Peer is active or U-APSD is active so send PDUs to the peer */
- unifi_trace(priv,UDBG2,"uf_send_buffered_frames : trying data from queue=%d\n",queue);
-
-
- if((staInfo != NULL)&&(staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE)
- &&(staInfo->uapsdActive == FALSE)) {
- if(!list_empty(&staInfo->dataPdu[queue])) {
-
- /*Non-UAPSD case push the AC frames out*/
- uf_send_buffered_data_from_ac(priv, staInfo, queue, (&staInfo->dataPdu[queue]));
- }
- }
- startIndex ++;
- if(startIndex >= UNIFI_MAX_CONNECTIONS) {
- startIndex = 0;
- }
- }
- if(isRouterBufferEnabled(priv,queue)) {
- priv->pausedStaHandle[queue] = endIndex;
- } else {
- priv->pausedStaHandle[queue] = 0;
- }
-
- /* U-APSD might have stopped because of ENOSPC in lib_hip (pause activity).
- * So restart it if U-APSD was active with any of the station
- */
- unifi_trace(priv, UDBG4, "csrWifiHipSendBufferedFrames: UAPSD Resume Q=%x\n", queue);
- resume_suspended_uapsd(priv, interfaceTag);
-}
-
-
-u8 uf_is_more_data_for_non_delivery_ac(CsrWifiRouterCtrlStaInfo_t *staRecord)
-{
- u8 i;
-
- for(i=0;i<=3;i++)
- {
- if(((staRecord->powersaveMode[i]==CSR_WIFI_AC_TRIGGER_ONLY_ENABLED)
- ||(staRecord->powersaveMode[i]==CSR_WIFI_AC_LEGACY_POWER_SAVE))
- &&(!list_empty(&staRecord->dataPdu[i]))){
-
- return TRUE;
- }
- }
-
- if(((staRecord->powersaveMode[UNIFI_TRAFFIC_Q_VO]==CSR_WIFI_AC_TRIGGER_ONLY_ENABLED)
- ||(staRecord->powersaveMode[UNIFI_TRAFFIC_Q_VO]==CSR_WIFI_AC_LEGACY_POWER_SAVE))
- &&(!list_empty(&staRecord->mgtFrames))){
-
- return TRUE;
- }
-
-
-
- return FALSE;
-}
-
-
-int uf_process_station_records_for_sending_data(unifi_priv_t *priv,u16 interfaceTag,
- CsrWifiRouterCtrlStaInfo_t *srcStaInfo,
- CsrWifiRouterCtrlStaInfo_t *dstStaInfo)
-{
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- unifi_trace(priv, UDBG5, "entering uf_process_station_records_for_sending_data\n");
-
- if (srcStaInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_DISCONNECTED) {
- unifi_error(priv, "Peer State not connected AID = %x, handle = %x, control port state = %x\n",
- srcStaInfo->aid, srcStaInfo->assignedHandle, srcStaInfo->peerControlledPort->port_action);
- return -1;
- }
- switch (interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- unifi_trace(priv, UDBG5, "mode is AP/P2PGO\n");
- break;
- default:
- unifi_warning(priv, "mode is nor AP neither P2PGO, packet cant be xmit\n");
- return -1;
- }
-
- switch(dstStaInfo->peerControlledPort->port_action)
- {
- case CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD:
- case CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_BLOCK:
- unifi_trace(priv, UDBG5, "destination port is closed/blocked, discarding the packet\n");
- return -1;
- default:
- unifi_trace(priv, UDBG5, "destination port state is open\n");
- }
-
- /* port state is open, destination station record is valid, Power save state is
- * validated in uf_process_ma_packet_req function
- */
- unifi_trace(priv, UDBG5, "leaving uf_process_station_records_for_sending_data\n");
- return 0;
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_handle_uspframes_delivery
- *
- * This function takes care of handling USP session for peer, when
- * -> trigger frame from peer
- * -> suspended USP to be processed (resumed)
- *
- * NOTE: uf_send_buffered_data_from_delivery_ac() always called from this function, Dont
- * make a direct call to uf_send_buffered_data_from_delivery_ac() from any other part of
- * code
- *
- * Arguments:
- * priv Pointer to device private context struct
- * staInfo peer for which UAPSD to be scheduled
- * interfaceTag virtual interface tag
- * ---------------------------------------------------------------------------
- */
-static void uf_handle_uspframes_delivery(unifi_priv_t * priv, CsrWifiRouterCtrlStaInfo_t *staInfo, u16 interfaceTag)
-{
-
- s8 i;
- u8 allDeliveryEnabled = 0, dataAvailable = 0;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- unsigned long lock_flags;
-
- unifi_trace(priv, UDBG2, " ++ uf_handle_uspframes_delivery, uapsd active=%x, suspended?=%x\n",
- staInfo->uapsdActive, staInfo->uspSuspend);
-
- /* Check for Buffered frames according to priority order & deliver it
- * 1. AC_VO delivery enable & Mgt frames available
- * 2. Process remaining Ac's from order AC_VO to AC_BK
- */
-
- /* USP initiated by WMMPS enabled peer & SET the status flag to TRUE */
- if (!staInfo->uspSuspend && staInfo->uapsdActive)
- {
- unifi_notice(priv, "uf_handle_uspframes_delivery: U-APSD already active! STA=%x:%x:%x:%x:%x:%x\n",
- staInfo->peerMacAddress.a[0], staInfo->peerMacAddress.a[1],
- staInfo->peerMacAddress.a[2], staInfo->peerMacAddress.a[3],
- staInfo->peerMacAddress.a[4], staInfo->peerMacAddress.a[5]);
- return;
- }
-
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- staInfo->uapsdActive = TRUE;
- staInfo->uspSuspend = FALSE;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
-
- if(((staInfo->powersaveMode[UNIFI_TRAFFIC_Q_VO]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED)||
- (staInfo->powersaveMode[UNIFI_TRAFFIC_Q_VO]==CSR_WIFI_AC_DELIVERY_ONLY_ENABLE))
- && (!list_empty(&staInfo->mgtFrames))) {
-
- /* Management queue has data && UNIFI_TRAFFIC_Q_VO is delivery enable */
- unifi_trace(priv, UDBG4, "uf_handle_uspframes_delivery: Sending buffered management frames\n");
- uf_send_buffered_data_from_delivery_ac(priv, staInfo, UNIFI_TRAFFIC_Q_VO, &staInfo->mgtFrames);
- }
-
- if (!uf_is_more_data_for_delivery_ac(priv, staInfo)) {
- /* All delivery enable AC's are empty, so QNULL to be sent to terminate the USP
- * NOTE: If we have sent Mgt frame also, we must send QNULL followed to terminate USP
- */
- if (!staInfo->uspSuspend) {
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- staInfo->uapsdActive = FALSE;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
-
- unifi_trace(priv, UDBG2, "uf_handle_uspframes_delivery: sending QNull for trigger\n");
- uf_send_qos_null(priv, interfaceTag, staInfo->peerMacAddress.a, (CSR_PRIORITY) staInfo->triggerFramePriority, staInfo);
- staInfo->triggerFramePriority = CSR_QOS_UP0;
- } else {
- unifi_trace(priv, UDBG2, "uf_handle_uspframes_delivery: MgtQ xfer suspended\n");
- }
- } else {
- for(i = UNIFI_TRAFFIC_Q_VO; i >= UNIFI_TRAFFIC_Q_BK; i--) {
- if(((staInfo->powersaveMode[i]==CSR_WIFI_AC_DELIVERY_ONLY_ENABLE)
- ||(staInfo->powersaveMode[i]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED))
- && (!list_empty(&staInfo->dataPdu[i]))) {
- /* Deliver Data according to AC priority (from VO to BK) as part of USP */
- unifi_trace(priv, UDBG4, "uf_handle_uspframes_delivery: Buffered data frames from Queue (%d) for USP\n", i);
- uf_send_buffered_data_from_delivery_ac(priv, staInfo, i, &staInfo->dataPdu[i]);
- }
-
- if ((!staInfo->uapsdActive) ||
- (staInfo->uspSuspend && IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag))) {
- /* If DTIM active found on one AC, No need to parse the remaining AC's
- * as USP suspended. Break out of loop
- */
- unifi_trace(priv, UDBG2, "uf_handle_uspframes_delivery: suspend=%x, DTIM=%x, USP terminated=%s\n",
- staInfo->uspSuspend, IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag),
- staInfo->uapsdActive?"NO":"YES");
- break;
- }
- }
- }
-
- /* Depending on the USP status, update the TIM accordingly for delivery enabled AC only
- * (since we are not manipulating any Non-delivery list(AC))
- */
- is_all_ac_deliver_enabled_and_moredata(staInfo, &allDeliveryEnabled, &dataAvailable);
- if ((allDeliveryEnabled && !dataAvailable)) {
- if ((staInfo->timSet != CSR_WIFI_TIM_RESET) || (staInfo->timSet != CSR_WIFI_TIM_RESETTING)) {
- staInfo->updateTimReqQueued = (u8) CSR_WIFI_TIM_RESET;
- unifi_trace(priv, UDBG4, " --uf_handle_uspframes_delivery, UAPSD timset\n");
- if (!staInfo->timRequestPendingFlag) {
- update_tim(priv, staInfo->aid, 0, interfaceTag, staInfo->assignedHandle);
- }
- }
- }
- unifi_trace(priv, UDBG2, " --uf_handle_uspframes_delivery, uapsd active=%x, suspend?=%x\n",
- staInfo->uapsdActive, staInfo->uspSuspend);
-}
-
-void uf_process_wmm_deliver_ac_uapsd(unifi_priv_t * priv,
- CsrWifiRouterCtrlStaInfo_t * srcStaInfo,
- u16 qosControl,
- u16 interfaceTag)
-{
- CSR_PRIORITY priority;
- unifi_TrafficQueue priority_q;
- unsigned long lock_flags;
-
- unifi_trace(priv, UDBG2, "++uf_process_wmm_deliver_ac_uapsd: uapsdactive?=%x\n", srcStaInfo->uapsdActive);
- /* If recceived Frames trigger Frame and Devlivery enabled AC has data
- * then transmit from High priorty delivery enabled AC
- */
- priority = (CSR_PRIORITY)(qosControl & IEEE802_11_QC_TID_MASK);
- priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY) priority);
-
- if((srcStaInfo->powersaveMode[priority_q]==CSR_WIFI_AC_TRIGGER_ONLY_ENABLED)
- ||(srcStaInfo->powersaveMode[priority_q]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED)) {
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- srcStaInfo->triggerFramePriority = priority;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- unifi_trace(priv, UDBG2, "uf_process_wmm_deliver_ac_uapsd: trigger frame, Begin U-APSD, triggerQ=%x\n", priority_q);
- uf_handle_uspframes_delivery(priv, srcStaInfo, interfaceTag);
- }
- unifi_trace(priv, UDBG2, "--uf_process_wmm_deliver_ac_uapsd: uapsdactive?=%x\n", srcStaInfo->uapsdActive);
-}
-
-
-void uf_send_qos_null(unifi_priv_t * priv,u16 interfaceTag, const u8 *da,CSR_PRIORITY priority,CsrWifiRouterCtrlStaInfo_t * srcStaInfo)
-{
- bulk_data_param_t bulkdata;
- CsrResult csrResult;
- struct sk_buff *skb, *newSkb = NULL;
- CsrWifiMacAddress peerAddress;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- CSR_TRANSMISSION_CONTROL transmissionControl = (TRANSMISSION_CONTROL_EOSP_MASK | TRANSMISSION_CONTROL_TRIGGER_MASK);
- int r;
- CSR_SIGNAL signal;
- u32 priority_q;
- CSR_RATE transmitRate = 0;
-
-
- /* Send a Null Frame to Peer,
- * 32= size of mac header */
- csrResult = unifi_net_data_malloc(priv, &bulkdata.d[0], MAC_HEADER_SIZE + QOS_CONTROL_HEADER_SIZE);
-
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, " failed to allocate request_data. in uf_send_qos_null func\n");
- return ;
- }
- skb = (struct sk_buff *)(bulkdata.d[0].os_net_buf_ptr);
- skb->len = 0;
- bulkdata.d[0].os_data_ptr = skb->data;
- bulkdata.d[0].os_net_buf_ptr = (unsigned char*)skb;
- bulkdata.d[0].net_buf_length = bulkdata.d[0].data_length = skb->len;
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].os_net_buf_ptr = NULL;
- bulkdata.d[1].net_buf_length = bulkdata.d[1].data_length = 0;
-
- /* For null frames protection bit should not be set in MAC header, so passing value 0 below for protection field */
-
- if (prepare_and_add_macheader(priv, skb, newSkb, priority, &bulkdata, interfaceTag, da, interfacePriv->bssid.a, 0)) {
- unifi_error(priv, "failed to create MAC header\n");
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return;
- }
- memcpy(peerAddress.a, ((u8 *) bulkdata.d[0].os_data_ptr) + 4, ETH_ALEN);
- /* convert priority to queue */
- priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY) priority);
-
- /* Frame ma-packet.req, this is saved/transmitted depend on queue state
- * send the null frame at data rate of 1 Mb/s for AP or 6 Mb/s for P2PGO
- */
- switch (interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- transmitRate = 2;
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- transmitRate = 12;
- break;
- default:
- transmitRate = 0;
- }
- unifi_frame_ma_packet_req(priv, priority, transmitRate, 0xffffffff, interfaceTag,
- transmissionControl, priv->netdev_client->sender_id,
- peerAddress.a, &signal);
-
- r = ul_send_signal_unpacked(priv, &signal, &bulkdata);
- if(r) {
- unifi_error(priv, "failed to send QOS data null packet result: %d\n",r);
- unifi_net_data_free(priv, &bulkdata.d[0]);
- }
-
- return;
-
-}
-void uf_send_nulldata(unifi_priv_t * priv,u16 interfaceTag, const u8 *da,CSR_PRIORITY priority,CsrWifiRouterCtrlStaInfo_t * srcStaInfo)
-{
- bulk_data_param_t bulkdata;
- CsrResult csrResult;
- struct sk_buff *skb, *newSkb = NULL;
- CsrWifiMacAddress peerAddress;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- CSR_TRANSMISSION_CONTROL transmissionControl = 0;
- int r;
- CSR_SIGNAL signal;
- u32 priority_q;
- CSR_RATE transmitRate = 0;
- CSR_MA_PACKET_REQUEST *req = &signal.u.MaPacketRequest;
- unsigned long lock_flags;
-
- /* Send a Null Frame to Peer, size = 24 for MAC header */
- csrResult = unifi_net_data_malloc(priv, &bulkdata.d[0], MAC_HEADER_SIZE);
-
- if (csrResult != CSR_RESULT_SUCCESS) {
- unifi_error(priv, "uf_send_nulldata: Failed to allocate memory for NULL frame\n");
- return ;
- }
- skb = (struct sk_buff *)(bulkdata.d[0].os_net_buf_ptr);
- skb->len = 0;
- bulkdata.d[0].os_data_ptr = skb->data;
- bulkdata.d[0].os_net_buf_ptr = (unsigned char*)skb;
- bulkdata.d[0].net_buf_length = bulkdata.d[0].data_length = skb->len;
- bulkdata.d[1].os_data_ptr = NULL;
- bulkdata.d[1].os_net_buf_ptr = NULL;
- bulkdata.d[1].net_buf_length = bulkdata.d[1].data_length = 0;
-
- /* For null frames protection bit should not be set in MAC header, so passing value 0 below for protection field */
- if (prepare_and_add_macheader(priv, skb, newSkb, priority, &bulkdata, interfaceTag, da, interfacePriv->bssid.a, 0)) {
- unifi_error(priv, "uf_send_nulldata: Failed to create MAC header\n");
- unifi_net_data_free(priv, &bulkdata.d[0]);
- return;
- }
- memcpy(peerAddress.a, ((u8 *) bulkdata.d[0].os_data_ptr) + 4, ETH_ALEN);
- /* convert priority to queue */
- priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY) priority);
- transmissionControl &= ~(CSR_NO_CONFIRM_REQUIRED);
-
- /* Frame ma-packet.req, this is saved/transmitted depend on queue state
- * send the null frame at data rate of 1 Mb/s for AP or 6 Mb/s for P2PGO
- */
- switch (interfacePriv->interfaceMode)
- {
- case CSR_WIFI_ROUTER_CTRL_MODE_AP:
- transmitRate = 2;
- break;
- case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO:
- transmitRate = 12;
- break;
- default:
- transmitRate = 0;
- }
- unifi_frame_ma_packet_req(priv, priority, transmitRate, INVALID_HOST_TAG, interfaceTag,
- transmissionControl, priv->netdev_client->sender_id,
- peerAddress.a, &signal);
-
- /* Save host tag to check the status on reception of MA packet confirm */
- srcStaInfo->nullDataHostTag = req->HostTag;
- unifi_trace(priv, UDBG1, "uf_send_nulldata: STA AID = %d hostTag = %x\n", srcStaInfo->aid, req->HostTag);
-
- r = ul_send_signal_unpacked(priv, &signal, &bulkdata);
-
- if(r == -ENOSPC) {
- unifi_trace(priv, UDBG1, "uf_send_nulldata: ENOSPC Requeue the Null frame\n");
- enque_tx_data_pdu(priv, &bulkdata, &srcStaInfo->dataPdu[priority_q], &signal, 1);
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- srcStaInfo->noOfPktQueued++;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
-
-
- }
- if(r && r != -ENOSPC){
- unifi_error(priv, "uf_send_nulldata: Failed to send Null frame Error = %d\n",r);
- unifi_net_data_free(priv, &bulkdata.d[0]);
- srcStaInfo->nullDataHostTag = INVALID_HOST_TAG;
- }
-
- return;
-}
-
-u8 uf_check_broadcast_bssid(unifi_priv_t *priv, const bulk_data_param_t *bulkdata)
-{
- u8 *bssid = NULL;
- static const CsrWifiMacAddress broadcast_address = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}};
- u8 toDs, fromDs;
-
- toDs = (((bulkdata->d[0].os_data_ptr)[1]) & 0x01) ? 1 : 0;
- fromDs =(((bulkdata->d[0].os_data_ptr)[1]) & 0x02) ? 1 : 0;
-
- if (toDs && fromDs)
- {
- unifi_trace(priv, UDBG6, "Address 4 present, Don't try to find BSSID\n");
- bssid = NULL;
- }
- else if((toDs == 0) && (fromDs ==0))
- {
- /* BSSID is Address 3 */
- bssid = (u8 *) (bulkdata->d[0].os_data_ptr + 4 + (2 * ETH_ALEN));
- }
- else if(toDs)
- {
- /* BSSID is Address 1 */
- bssid = (u8 *) (bulkdata->d[0].os_data_ptr + 4);
- }
- else if(fromDs)
- {
- /* BSSID is Address 2 */
- bssid = (u8 *) (bulkdata->d[0].os_data_ptr + 4 + ETH_ALEN);
- }
-
- if (memcmp(broadcast_address.a, bssid, ETH_ALEN)== 0)
- {
- return TRUE;
- }
- else
- {
- return FALSE;
- }
-}
-
-
-u8 uf_process_pm_bit_for_peer(unifi_priv_t * priv, CsrWifiRouterCtrlStaInfo_t * srcStaInfo,
- u8 pmBit,u16 interfaceTag)
-{
- u8 moreData = FALSE;
- u8 powerSaveChanged = FALSE;
- unsigned long lock_flags;
-
- unifi_trace(priv, UDBG3, "entering uf_process_pm_bit_for_peer\n");
- if (pmBit) {
- priv->allPeerDozing |= (0x01 << (srcStaInfo->assignedHandle));
- } else {
- priv->allPeerDozing &= ~(0x01 << (srcStaInfo->assignedHandle));
- }
- if(pmBit) {
- if(srcStaInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE) {
-
- /* disable the preemption */
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- srcStaInfo->currentPeerState =CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE;
- powerSaveChanged = TRUE;
- /* enable the preemption */
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- } else {
- return powerSaveChanged;
- }
- } else {
- if(srcStaInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE) {
- /* disable the preemption */
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- srcStaInfo->currentPeerState = CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE;
- powerSaveChanged = TRUE;
- /* enable the preemption */
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- }else {
- return powerSaveChanged;
- }
- }
-
-
- if(srcStaInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE) {
- unifi_trace(priv,UDBG3, "Peer with AID = %d is active now\n",srcStaInfo->aid);
- process_peer_active_transition(priv,srcStaInfo,interfaceTag);
- } else {
- unifi_trace(priv,UDBG3, "Peer with AID = %d is in PS Now\n",srcStaInfo->aid);
- /* Set TIM if needed */
- if(!srcStaInfo->wmmOrQosEnabled) {
- moreData = (!list_empty(&srcStaInfo->mgtFrames) ||
- !list_empty(&srcStaInfo->dataPdu[UNIFI_TRAFFIC_Q_VO])||
- !list_empty(&srcStaInfo->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]));
- if(moreData && (srcStaInfo->timSet == CSR_WIFI_TIM_RESET)) {
- unifi_trace(priv, UDBG3, "This condition should not occur\n");
- if (!srcStaInfo->timRequestPendingFlag){
- update_tim(priv,srcStaInfo->aid,1,interfaceTag, srcStaInfo->assignedHandle);
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- srcStaInfo->updateTimReqQueued = 1;
- unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", srcStaInfo->updateTimReqQueued,
- srcStaInfo->aid);
- }
-
- }
- } else {
- u8 allDeliveryEnabled = 0, dataAvailable = 0;
- unifi_trace(priv, UDBG5, "Qos in AP Mode\n");
- /* Check if all AC's are Delivery Enabled */
- is_all_ac_deliver_enabled_and_moredata(srcStaInfo, &allDeliveryEnabled, &dataAvailable);
- /*check for more data in non-delivery enabled queues*/
- moreData = (uf_is_more_data_for_non_delivery_ac(srcStaInfo) || (allDeliveryEnabled && dataAvailable));
-
- if(moreData && (srcStaInfo->timSet == CSR_WIFI_TIM_RESET)) {
- if (!srcStaInfo->timRequestPendingFlag){
- update_tim(priv,srcStaInfo->aid,1,interfaceTag, srcStaInfo->assignedHandle);
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- srcStaInfo->updateTimReqQueued = 1;
- unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", srcStaInfo->updateTimReqQueued,
- srcStaInfo->aid);
- }
- }
- }
- }
- unifi_trace(priv, UDBG3, "leaving uf_process_pm_bit_for_peer\n");
- return powerSaveChanged;
-}
-
-
-
-void uf_process_ps_poll(unifi_priv_t *priv,u8* sa,u8* da,u8 pmBit,u16 interfaceTag)
-{
- CsrWifiRouterCtrlStaInfo_t *staRecord =
- CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, sa, interfaceTag);
- tx_buffered_packets_t * buffered_pkt = NULL;
- CsrWifiMacAddress peerMacAddress;
- unsigned long lock_flags;
- s8 r =0;
- u8 moreData = FALSE;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- unifi_trace(priv, UDBG3, "entering uf_process_ps_poll\n");
- if(!staRecord) {
- memcpy(peerMacAddress.a,sa,ETH_ALEN);
- unifi_trace(priv, UDBG3, "In uf_process_ps_poll, sta record not found:unexpected frame addr = %x:%x:%x:%x:%x:%x\n",
- sa[0], sa[1],sa[2], sa[3], sa[4],sa[5]);
- CsrWifiRouterCtrlUnexpectedFrameIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,interfaceTag,peerMacAddress);
- return;
- }
-
- uf_process_pm_bit_for_peer(priv,staRecord,pmBit,interfaceTag);
-
- /* Update station last activity time */
- staRecord->activity_flag = TRUE;
-
- /* This should not change the PM bit as PS-POLL has PM bit always set */
- if(!pmBit) {
- unifi_notice (priv," PM bit reset in PS-POLL\n");
- return;
- }
-
- if(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)) {
- /* giving more priority to multicast packets so dropping ps-poll*/
- unifi_notice (priv," multicast transmission is going on so don't take action on PS-POLL\n");
- return;
- }
-
- if(!staRecord->wmmOrQosEnabled) {
- if((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->mgtFrames))) {
- buffered_pkt->transmissionControl |= TRANSMISSION_CONTROL_TRIGGER_MASK;
- moreData = (!list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]) ||
- !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]) ||
- !list_empty(&staRecord->mgtFrames));
-
- buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) {
- /* Clear the trigger bit transmission control*/
- buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &staRecord->mgtFrames);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n");
- priv->pausedStaHandle[3]=(u8)(staRecord->assignedHandle);
- } else {
- if(r){
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- } else if((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]))) {
- buffered_pkt->transmissionControl |= TRANSMISSION_CONTROL_TRIGGER_MASK;
- moreData = (!list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]) ||
- !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]));
-
- buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) {
- /* Clear the trigger bit transmission control*/
- buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- priv->pausedStaHandle[3]=(u8)(staRecord->assignedHandle);
- unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n");
- } else {
- if(r){
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- } else if((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]))) {
- buffered_pkt->transmissionControl |= TRANSMISSION_CONTROL_TRIGGER_MASK;
- moreData = !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]);
-
- buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) {
- /* Clear the trigger bit transmission control*/
- buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- priv->pausedStaHandle[0]=(u8)(staRecord->assignedHandle);
- unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n");
- } else {
- if(r){
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- } else {
- /* Actually since we have sent an ACK, there
- * there is no need to send a NULL frame*/
- }
- moreData = (!list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]) ||
- !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]) ||
- !list_empty(&staRecord->mgtFrames));
- if(!moreData && (staRecord->timSet == CSR_WIFI_TIM_SET)) {
- unifi_trace(priv, UDBG3, "more data = NULL, set tim to 0 in uf_process_ps_poll\n");
- if (!staRecord->timRequestPendingFlag){
- update_tim(priv,staRecord->aid,0,interfaceTag, staRecord->assignedHandle);
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- staRecord->updateTimReqQueued = 0;
- unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued,
- staRecord->aid);
- }
- }
- } else {
-
- u8 allDeliveryEnabled = 0, dataAvailable = 0;
- unifi_trace(priv, UDBG3,"Qos Support station.Processing PS-Poll\n");
-
- /*Send Data From Management Frames*/
- /* Priority orders for delivering the buffered packets are
- * 1. Deliver the Management frames if there
- * 2. Other access category frames which are non deliver enable including UNIFI_TRAFFIC_Q_VO
- * priority is from VO->BK
- */
-
- /* Check if all AC's are Delivery Enabled */
- is_all_ac_deliver_enabled_and_moredata(staRecord, &allDeliveryEnabled, &dataAvailable);
-
- if (allDeliveryEnabled) {
- unifi_trace(priv, UDBG3, "uf_process_ps_poll: All ACs are delivery enable so Sending QOS Null in response of Ps-poll\n");
- uf_send_qos_null(priv,interfaceTag,sa,CSR_QOS_UP0,staRecord);
- return;
- }
-
- if (!list_empty(&staRecord->mgtFrames)) {
- if ((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->mgtFrames))) {
- /* We dont have packets in non delivery enabled UNIFI_TRAFFIC_Q_VO, So we are looking in management
- * queue of the station record
- */
- moreData = uf_is_more_data_for_non_delivery_ac(staRecord);
- buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
-
- /* Last parameter is EOSP & its false always for PS-POLL processing */
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) {
- /* Clear the trigger bit transmission control*/
- buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &staRecord->mgtFrames);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- priv->pausedStaHandle[0]=(u8)(staRecord->assignedHandle);
- unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n");
- } else {
- if(r){
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- } else {
- unifi_error(priv, "uf_process_ps_poll: Mgt frame list empty!! \n");
- }
-
- } else {
- s8 i;
- /* We dont have buffered packet in mangement frame queue (1 failed), So proceed with condition 2
- * UNIFI_TRAFFIC_Q_VO -> VI -> BE -> BK
- */
- for(i= 3; i>=0; i--) {
- if (!IS_DELIVERY_ENABLED(staRecord->powersaveMode[i])) {
- /* Send One packet, if queue is NULL then continue */
- if((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->dataPdu[i]))) {
- moreData = uf_is_more_data_for_non_delivery_ac(staRecord);
-
- buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
-
- /* Last parameter is EOSP & its false always for PS-POLL processing */
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) {
- /* Clear the trigger bit transmission control*/
- buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK);
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &staRecord->dataPdu[i]);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- priv->pausedStaHandle[0]=(u8)(staRecord->assignedHandle);
- unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n");
- } else {
- if(r) {
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- break;
- }
- }
- }
- }
- /* Check if all AC's are Delivery Enabled */
- is_all_ac_deliver_enabled_and_moredata(staRecord, &allDeliveryEnabled, &dataAvailable);
- /*check for more data in non-delivery enabled queues*/
- moreData = (uf_is_more_data_for_non_delivery_ac(staRecord) || (allDeliveryEnabled && dataAvailable));
- if(!moreData && (staRecord->timSet == CSR_WIFI_TIM_SET)) {
- unifi_trace(priv, UDBG3, "more data = NULL, set tim to 0 in uf_process_ps_poll\n");
- if (!staRecord->timRequestPendingFlag){
- update_tim(priv,staRecord->aid,0,interfaceTag, staRecord->assignedHandle);
- }
- else
- {
- /* Cache the TimSet value so that it will processed immidiatly after
- * completing the current setTim Request
- */
- staRecord->updateTimReqQueued = 0;
- unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued,
- staRecord->aid);
- }
-
- }
- }
-
- unifi_trace(priv, UDBG3, "leaving uf_process_ps_poll\n");
-}
-
-
-
-void add_to_send_cfm_list(unifi_priv_t * priv,
- tx_buffered_packets_t *tx_q_item,
- struct list_head *frames_need_cfm_list)
-{
- tx_buffered_packets_t *send_cfm_list_item = NULL;
-
- send_cfm_list_item = kmalloc(sizeof(tx_buffered_packets_t), GFP_ATOMIC);
-
- if(send_cfm_list_item == NULL){
- unifi_warning(priv, "%s: Failed to allocate memory for new list item \n");
- return;
- }
-
- INIT_LIST_HEAD(&send_cfm_list_item->q);
-
- send_cfm_list_item->hostTag = tx_q_item->hostTag;
- send_cfm_list_item->interfaceTag = tx_q_item->interfaceTag;
- send_cfm_list_item->transmissionControl = tx_q_item->transmissionControl;
- send_cfm_list_item->leSenderProcessId = tx_q_item->leSenderProcessId;
- send_cfm_list_item->rate = tx_q_item->rate;
- memcpy(send_cfm_list_item->peerMacAddress.a, tx_q_item->peerMacAddress.a, ETH_ALEN);
- send_cfm_list_item->priority = tx_q_item->priority;
-
- list_add_tail(&send_cfm_list_item->q, frames_need_cfm_list);
-}
-
-void uf_prepare_send_cfm_list_for_queued_pkts(unifi_priv_t * priv,
- struct list_head *frames_need_cfm_list,
- struct list_head * list)
-{
- tx_buffered_packets_t *tx_q_item = NULL;
- struct list_head *listHead;
- struct list_head *placeHolder;
- unsigned long lock_flags;
-
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
-
- /* Search through the list and if confirmation required for any frames,
- add it to the send_cfm list */
- list_for_each_safe(listHead, placeHolder, list) {
- tx_q_item = list_entry(listHead, tx_buffered_packets_t, q);
-
- if(!tx_q_item) {
- unifi_error(priv, "Entry should exist, otherwise it is a (BUG)\n");
- continue;
- }
-
- /* check if confirmation is requested and if the sender ID
- is not netdevice client then save the entry in the list for need cfms */
- if (!(tx_q_item->transmissionControl & CSR_NO_CONFIRM_REQUIRED) &&
- (tx_q_item->leSenderProcessId != priv->netdev_client->sender_id)){
- unifi_trace(priv, UDBG1, "%s: SenderProcessID=%x host tag=%x transmission control=%x\n",
- __FUNCTION__,
- tx_q_item->leSenderProcessId,
- tx_q_item->hostTag,
- tx_q_item->transmissionControl);
-
- add_to_send_cfm_list(priv, tx_q_item, frames_need_cfm_list);
- }
- }
-
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
-
-}
-
-
-
-void uf_flush_list(unifi_priv_t * priv, struct list_head * list)
-{
- tx_buffered_packets_t *tx_q_item;
- struct list_head *listHead;
- struct list_head *placeHolder;
- unsigned long lock_flags;
-
- unifi_trace(priv, UDBG5, "entering the uf_flush_list \n");
-
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- /* go through list, delete & free memory */
- list_for_each_safe(listHead, placeHolder, list) {
- tx_q_item = list_entry(listHead, tx_buffered_packets_t, q);
-
- if(!tx_q_item) {
- unifi_error(priv, "entry should exists, otherwise crashes (bug)\n");
- }
- unifi_trace(priv, UDBG5,
- "proccess_tx: in uf_flush_list peerMacAddress=%02X%02X%02X%02X%02X%02X senderProcessId=%x\n",
- tx_q_item->peerMacAddress.a[0], tx_q_item->peerMacAddress.a[1],
- tx_q_item->peerMacAddress.a[2], tx_q_item->peerMacAddress.a[3],
- tx_q_item->peerMacAddress.a[4], tx_q_item->peerMacAddress.a[5],
- tx_q_item->leSenderProcessId);
-
- list_del(listHead);
- /* free the allocated memory */
- unifi_net_data_free(priv, &tx_q_item->bulkdata);
- kfree(tx_q_item);
- tx_q_item = NULL;
- if (!priv->noOfPktQueuedInDriver) {
- unifi_error(priv, "packets queued in driver 0 still decrementing in %s\n", __FUNCTION__);
- } else {
- priv->noOfPktQueuedInDriver--;
- }
- }
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
-}
-
-tx_buffered_packets_t *dequeue_tx_data_pdu(unifi_priv_t *priv, struct list_head *txList)
-{
- /* dequeue the tx data packets from the appropriate queue */
- tx_buffered_packets_t *tx_q_item = NULL;
- struct list_head *listHead;
- struct list_head *placeHolder;
- unsigned long lock_flags;
-
- unifi_trace(priv, UDBG5, "entering dequeue_tx_data_pdu\n");
- /* check for list empty */
- if (list_empty(txList)) {
- unifi_trace(priv, UDBG5, "In dequeue_tx_data_pdu, the list is empty\n");
- return NULL;
- }
-
- /* Verification, if packet count is negetive */
- if (priv->noOfPktQueuedInDriver == 0xFFFF) {
- unifi_warning(priv, "no packet available in queue: debug");
- return NULL;
- }
-
- /* return first node after header, & delete from the list && atleast one item exist */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_for_each_safe(listHead, placeHolder, txList) {
- tx_q_item = list_entry(listHead, tx_buffered_packets_t, q);
- list_del(listHead);
- break;
- }
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
-
- if (tx_q_item) {
- unifi_trace(priv, UDBG5,
- "proccess_tx: In dequeue_tx_data_pdu peerMacAddress=%02X%02X%02X%02X%02X%02X senderProcessId=%x\n",
- tx_q_item->peerMacAddress.a[0], tx_q_item->peerMacAddress.a[1],
- tx_q_item->peerMacAddress.a[2], tx_q_item->peerMacAddress.a[3],
- tx_q_item->peerMacAddress.a[4], tx_q_item->peerMacAddress.a[5],
- tx_q_item->leSenderProcessId);
- }
-
- unifi_trace(priv, UDBG5, "leaving dequeue_tx_data_pdu\n");
- return tx_q_item;
-}
-/* generic function to get the station record handler */
-CsrWifiRouterCtrlStaInfo_t *CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(unifi_priv_t *priv,
- const u8 *peerMacAddress,
- u16 interfaceTag)
-{
- u8 i;
- netInterface_priv_t *interfacePriv;
- unsigned long lock_flags;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "interfaceTag is not proper, interfaceTag = %d\n", interfaceTag);
- return NULL;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- /* disable the preemption until station record is fetched */
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
-
- for (i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- if (interfacePriv->staInfo[i]!= NULL) {
- if (!memcmp(((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[i]))->peerMacAddress.a, peerMacAddress, ETH_ALEN)) {
- /* enable the preemption as station record is fetched */
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- unifi_trace(priv, UDBG5, "peer entry found in station record\n");
- return ((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[i]));
- }
- }
- }
- /* enable the preemption as station record is fetched */
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- unifi_trace(priv, UDBG5, "peer entry not found in station record\n");
- return NULL;
-}
-/* generic function to get the station record handler from the handle */
-CsrWifiRouterCtrlStaInfo_t * CsrWifiRouterCtrlGetStationRecordFromHandle(unifi_priv_t *priv,
- u32 handle,
- u16 interfaceTag)
-{
- netInterface_priv_t *interfacePriv;
-
- if ((handle >= UNIFI_MAX_CONNECTIONS) || (interfaceTag >= CSR_WIFI_NUM_INTERFACES)) {
- unifi_error(priv, "handle/interfaceTag is not proper, handle = %d, interfaceTag = %d\n", handle, interfaceTag);
- return NULL;
- }
- interfacePriv = priv->interfacePriv[interfaceTag];
- return ((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[handle]));
-}
-
-/* Function to do inactivity */
-void uf_check_inactivity(unifi_priv_t *priv, u16 interfaceTag, u32 currentTime)
-{
- u32 i;
- CsrWifiRouterCtrlStaInfo_t *staInfo;
- u32 elapsedTime; /* Time in microseconds */
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- CsrWifiMacAddress peerMacAddress;
- unsigned long lock_flags;
-
- if (interfacePriv == NULL) {
- unifi_trace(priv, UDBG3, "uf_check_inactivity: Interface priv is NULL \n");
- return;
- }
-
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- /* Go through the list of stations to check for inactivity */
- for(i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- staInfo = CsrWifiRouterCtrlGetStationRecordFromHandle(priv, i, interfaceTag);
- if(!staInfo ) {
- continue;
- }
-
- unifi_trace(priv, UDBG3, "Running Inactivity handler Time %xus station's last activity %xus\n",
- currentTime, staInfo->lastActivity);
-
-
- elapsedTime = (currentTime >= staInfo->lastActivity)?
- (currentTime - staInfo->lastActivity):
- (~((u32)0) - staInfo->lastActivity + currentTime);
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
-
- if (elapsedTime > MAX_INACTIVITY_INTERVAL) {
- memcpy((u8*)&peerMacAddress, (u8*)&staInfo->peerMacAddress, sizeof(CsrWifiMacAddress));
-
- /* Indicate inactivity for the station */
- unifi_trace(priv, UDBG3, "Station %x:%x:%x:%x:%x:%x inactive since %xus\n sending Inactive Ind\n",
- peerMacAddress.a[0], peerMacAddress.a[1],
- peerMacAddress.a[2], peerMacAddress.a[3],
- peerMacAddress.a[4], peerMacAddress.a[5],
- elapsedTime);
-
- CsrWifiRouterCtrlStaInactiveIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0, interfaceTag, peerMacAddress);
- }
- }
-
- interfacePriv->last_inactivity_check = currentTime;
-}
-
-/* Function to update activity of a station */
-void uf_update_sta_activity(unifi_priv_t *priv, u16 interfaceTag, const u8 *peerMacAddress)
-{
- u32 elapsedTime, currentTime; /* Time in microseconds */
- u32 timeHi; /* Not used - Time in microseconds */
- CsrWifiRouterCtrlStaInfo_t *staInfo;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- unsigned long lock_flags;
-
- if (interfacePriv == NULL) {
- unifi_trace(priv, UDBG3, "uf_check_inactivity: Interface priv is NULL \n");
- return;
- }
-
- currentTime = CsrTimeGet(&timeHi);
-
-
- staInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, peerMacAddress, interfaceTag);
-
- if (staInfo == NULL) {
- unifi_trace(priv, UDBG4, "Sta does not exist yet");
- return;
- }
-
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- /* Update activity */
- staInfo->lastActivity = currentTime;
-
- /* See if inactivity handler needs to be run
- * Here it is theoretically possible that the counter may have wrapped around. But
- * since we just want to know when to run the inactivity handler it does not really matter.
- * Especially since this is data path it makes sense in keeping it simple and avoiding
- * 64 bit handling */
- elapsedTime = (currentTime >= interfacePriv->last_inactivity_check)?
- (currentTime - interfacePriv->last_inactivity_check):
- (~((u32)0) - interfacePriv->last_inactivity_check + currentTime);
-
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
-
- /* Check if it is time to run the inactivity handler */
- if (elapsedTime > INACTIVITY_CHECK_INTERVAL) {
- uf_check_inactivity(priv, interfaceTag, currentTime);
- }
-}
-void resume_unicast_buffered_frames(unifi_priv_t *priv, u16 interfaceTag)
-{
-
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- u8 i;
- int j;
- tx_buffered_packets_t * buffered_pkt = NULL;
- u8 hipslotFree[4] = {TRUE,TRUE,TRUE,TRUE};
- int r;
- unsigned long lock_flags;
-
- while(!isRouterBufferEnabled(priv,3) &&
- ((buffered_pkt=dequeue_tx_data_pdu(priv,&interfacePriv->genericMgtFrames))!=NULL)) {
- buffered_pkt->transmissionControl &=
- ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK);
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,NULL,0,FALSE)) == -ENOSPC) {
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &interfacePriv->genericMgtFrames);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- hipslotFree[3]=FALSE;
- break;
- }else {
- if(r){
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- }
- for(i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- CsrWifiRouterCtrlStaInfo_t *staInfo = interfacePriv->staInfo[i];
- if(!hipslotFree[0] && !hipslotFree[1] && !hipslotFree[2] && !hipslotFree[3]) {
- unifi_trace(priv, UDBG3, "(ENOSPC) in resume_unicast_buffered_frames:: hip slots are full \n");
- break;
- }
- if (staInfo && (staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE)) {
- while((( TRUE == hipslotFree[3] ) && (buffered_pkt=dequeue_tx_data_pdu(priv, &staInfo->mgtFrames)))) {
- buffered_pkt->transmissionControl &=
- ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK);
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staInfo,0,FALSE)) == -ENOSPC) {
- unifi_trace(priv, UDBG3, "(ENOSPC) in resume_unicast_buffered_frames:: hip slots are full for voice queue\n");
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &staInfo->mgtFrames);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- priv->pausedStaHandle[3]=(u8)(staInfo->assignedHandle);
- hipslotFree[3] = FALSE;
- break;
- } else {
- if(r){
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- }
-
- for(j=3;j>=0;j--) {
- if(!hipslotFree[j])
- continue;
-
- while((buffered_pkt=dequeue_tx_data_pdu(priv, &staInfo->dataPdu[j]))) {
- buffered_pkt->transmissionControl &=
- ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK);
- if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staInfo,0,FALSE)) == -ENOSPC) {
- /* Enqueue at the head of the queue */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_add(&buffered_pkt->q, &staInfo->dataPdu[j]);
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- priv->pausedStaHandle[j]=(u8)(staInfo->assignedHandle);
- hipslotFree[j]=FALSE;
- break;
- } else {
- if(r){
- unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n");
- /* the PDU failed where we can't do any thing so free the storage */
- unifi_net_data_free(priv, &buffered_pkt->bulkdata);
- }
- kfree(buffered_pkt);
- }
- }
- }
- }
- }
-}
-void update_eosp_to_head_of_broadcast_list_head(unifi_priv_t *priv,u16 interfaceTag)
-{
-
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
- unsigned long lock_flags;
- struct list_head *listHead;
- struct list_head *placeHolder;
- tx_buffered_packets_t *tx_q_item;
-
- if (interfacePriv->noOfbroadcastPktQueued) {
-
- /* Update the EOSP to the HEAD of b/c list
- * because we have received any mgmt packet so it should not hold for long time
- * peer may time out.
- */
- spin_lock_irqsave(&priv->tx_q_lock,lock_flags);
- list_for_each_safe(listHead, placeHolder, &interfacePriv->genericMulticastOrBroadCastFrames) {
- tx_q_item = list_entry(listHead, tx_buffered_packets_t, q);
- tx_q_item->transmissionControl |= TRANSMISSION_CONTROL_EOSP_MASK;
- tx_q_item->transmissionControl = (tx_q_item->transmissionControl & ~(CSR_NO_CONFIRM_REQUIRED));
- unifi_trace(priv, UDBG1,"updating eosp for list Head hostTag:= 0x%x ",tx_q_item->hostTag);
- break;
- }
- spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags);
- }
-}
-
-/*
- * ---------------------------------------------------------------------------
- * resume_suspended_uapsd
- *
- * This function takes care processing packets of Unscheduled Service Period,
- * which been suspended earlier due to DTIM/HIP ENOSPC scenarios
- *
- * Arguments:
- * priv Pointer to device private context struct
- * interfaceTag For which resume should happen
- * ---------------------------------------------------------------------------
- */
-void resume_suspended_uapsd(unifi_priv_t* priv,u16 interfaceTag)
-{
-
- u8 startIndex;
- CsrWifiRouterCtrlStaInfo_t * staInfo = NULL;
- unsigned long lock_flags;
-
- unifi_trace(priv, UDBG2, "++resume_suspended_uapsd: \n");
- for(startIndex= 0; startIndex < UNIFI_MAX_CONNECTIONS;startIndex++) {
- staInfo = CsrWifiRouterCtrlGetStationRecordFromHandle(priv,startIndex,interfaceTag);
-
- if(!staInfo || !staInfo->wmmOrQosEnabled) {
- continue;
- } else if((staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE)
- &&staInfo->uapsdActive && staInfo->uspSuspend) {
- /* U-APSD Still active & previously suspended either ENOSPC of FH queues OR
- * due to DTIM activity
- */
- uf_handle_uspframes_delivery(priv, staInfo, interfaceTag);
- } else {
- unifi_trace(priv, UDBG2, "resume_suspended_uapsd: PS state=%x, uapsdActive?=%x, suspend?=%x\n",
- staInfo->currentPeerState, staInfo->uapsdActive, staInfo->uspSuspend);
- if (staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE)
- {
- spin_lock_irqsave(&priv->staRecord_lock,lock_flags);
- staInfo->uapsdActive = FALSE;
- staInfo->uspSuspend = FALSE;
- spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags);
- }
- }
- }
- unifi_trace(priv, UDBG2, "--resume_suspended_uapsd:\n");
-}
-
-#endif
diff --git a/drivers/staging/csr/unifi_priv.h b/drivers/staging/csr/unifi_priv.h
deleted file mode 100644
index d20d74c..0000000
--- a/drivers/staging/csr/unifi_priv.h
+++ /dev/null
@@ -1,1136 +0,0 @@
-/*
- *****************************************************************************
- *
- * FILE : unifi_priv.h
- *
- * PURPOSE : Private header file for unifi driver.
- *
- * UDI = UniFi Debug Interface
- *
- * Copyright (C) 2005-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- *****************************************************************************
- */
-#ifndef __LINUX_UNIFI_PRIV_H__
-#define __LINUX_UNIFI_PRIV_H__ 1
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <linux/cdev.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-
-#ifdef CSR_WIFI_SUPPORT_MMC_DRIVER
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio.h>
-#endif /* CSR_WIFI_SUPPORT_MMC_DRIVER */
-
-#include <linux/fs.h>
-
-#ifdef ANDROID_BUILD
-#include <linux/wakelock.h>
-#endif
-
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_unifi_udi.h"
-#include "csr_wifi_router_lib.h"
-#include "unifiio.h"
-#ifndef CSR_WIFI_HIP_TA_DISABLE
-#include "csr_wifi_vif_utils.h"
-#endif
-
-/* Define the unifi_priv_t before include the unifi_native.h */
-struct unifi_priv;
-typedef struct unifi_priv unifi_priv_t;
-#ifdef CSR_SUPPORT_WEXT_AP
-struct CsrWifiSmeApConfig;
-typedef struct CsrWifiSmeApConfig CsrWifiSmeApConfig_t;
-#endif
-#ifdef CSR_SUPPORT_WEXT
-#include "unifi_wext.h"
-#endif
-
-#ifdef ANDROID_BUILD
-extern struct wake_lock unifi_sdio_wake_lock;
-#endif
-
-#include "unifi_clients.h"
-
-#ifdef CSR_NATIVE_LINUX
-#include "sme_native/unifi_native.h"
-#else
-#include "unifi_sme.h"
-#endif
-
-/* The device major number to use when registering the udi driver */
-#define UNIFI_NAME "unifi"
-/*
- * MAX_UNIFI_DEVS defines the maximum number of UniFi devices that can be present.
- * This number should be set to the number of SDIO slots supported by the SDIO
- * host controller on the platform.
- * Note: If MAX_UNIFI_DEVS value changes, fw_init[] needs to be corrected in drv.c
- */
-#define MAX_UNIFI_DEVS 2
-
-/* 802.11 Mac header offsets */
-#define MAC_HEADER_SIZE 24
-#define QOS_CONTROL_HEADER_SIZE 2
-#define HT_CONTROL_HEADER_SIZE 4
-#define QOS_DATA 0x8
-#define QOS_DATA_NULL 0xc
-#define DATA_NULL 0x04
-#define FRAME_CONTROL_ORDER_BIT 0x8000
-#define FRAME_CONTROL_TYPE_FIELD_OFFSET 2
-#define FRAME_CONTROL_SUBTYPE_FIELD_OFFSET 4
-#define IEEE802_11_FRAMETYPE_DATA 0x02
-#define IEEE802_11_FRAMETYPE_CONTROL 0x01
-#define IEEE802_11_FRAMETYPE_MANAGEMENT 0x00
-#define IEEE802_11_FRAMETYPE_RESERVED 0x03
-
-/* octet offset from start of mac header for certain fields */
-#define IEEE802_11_ADDR3_OFFSET 16
-#define IEEE802_11_SEQUENCE_CONTROL_OFFSET 22
-#define IEEE802_11_MAX_DATA_LEN 2304
-
-/* frame control (FC) masks, for frame control as 16 bit integer */
-#define IEEE802_11_FC_TO_DS_MASK 0x100
-#define IEEE802_11_FC_FROM_DS_MASK 0x200
-#define IEEE802_11_FC_MOREDATA_MASK 0x2000
-#define IEEE802_11_FC_PROTECTED_MASK 0x4000
-#define IEEE80211_FC_ORDER_MASK 0x8000
-#define IEEE80211_FC_SUBTYPE_MASK 0x00f0
-#define IEEE80211_FC_TYPE_MASK 0x000c
-#define IEEE80211_FC_PROTO_VERSION_MASK 0x0003
-
-/* selected type and subtype combinations as in 7.1.3.1 table 1
- For frame control as 16 bit integer, or for ls octet
-*/
-#define IEEE802_11_FC_TYPE_DATA 0x08
-#define IEEE802_11_FC_TYPE_NULL 0x48
-#define IEEE802_11_FC_TYPE_QOS_NULL 0xc8
-#define IEEE802_11_FC_TYPE_QOS_DATA 0x88
-
-#define IEEE802_11_FC_TYPE_DATA_SUBTYPE_RESERVED 0x0D
-
-/* qos control (QC) masks for qos control as 16 bit integer, or for ls octet */
-#define IEEE802_11_QC_TID_MASK 0x0f
-#define IEEE802_11_QC_A_MSDU_PRESENT 0x80
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_QOSCTRL_MIC_WORKAROUND))
-#define IEEE802_11_QC_NON_TID_BITS_MASK 0xFFF0
-#endif
-
-#define CSR_WIFI_EAPOL_M4_HOST_TAG 0x50000000
-#define IEEE802_11_DATA_FRAME_MAC_HEADER_SIZE 36
-#define MAX_ACCESS_CATOGORY 4
-
-/* Time in us to check for inactivity of stations 5 mins */
-#define INACTIVITY_CHECK_INTERVAL 300000000
-/* Time in us before a station is flagged as inactive */
-#define MAX_INACTIVITY_INTERVAL 300000000
-
-
-/* Define for maximum BA session */
-#define MAX_SUPPORTED_BA_SESSIONS_TX 1
-#define MAX_SUPPORTED_BA_SESSIONS_RX 4
-
-#define MAX_BA_WIND_SIZE 64
-#define MAC_HEADER_ADDR1_OFFSET 4
-#define MAC_HEADER_ADDR2_OFFSET 10
-
-/* Define for age (in us) value for frames in MPDU reorder buffer */
-#define CSR_WIFI_BA_MPDU_FRAME_AGE_TIMEOUT 30000 /* 30 milli seconds */
-
-/* This macro used in prepare_and_add_macheader*/
-#define ADDRESS_ONE_OFFSET 20
-
-/* Defines for STA inactivity detection */
-#define STA_INACTIVE_DETECTION_TRIGGER_THRESHOLD 1 /* in number of stations */
-#define STA_INACTIVE_DETECTION_TIMER_INTERVAL 30 /* in seconds */
-#define STA_INACTIVE_TIMEOUT_VAL 120*1000*1000 /* 120 seconds */
-
-/* Test for modes requiring AP firmware patch */
-#define CSR_WIFI_HIP_IS_AP_FW(mode) ((((mode) == CSR_WIFI_ROUTER_CTRL_MODE_AP) || \
- ((mode) == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO)) ? TRUE : FALSE)
-
-/* Defines used in beacon filtering in case of P2P */
-#define CSR_WIFI_P2P_WILDCARD_SSID_LENGTH 0x7
-#define CSR_WIFI_80211_FRAME_SUBTYPE_BEACON 0x8
-#define CSR_WIFI_BEACON_FIXED_LENGTH 12
-#define CSR_WIFI_FRAME_SUBTYPE_BIT_OFFSET 4
-#define CSR_WIFI_80211_FRAME_SUBTYPE_BIT_MASK ((u8)(0xF << CSR_WIFI_FRAME_SUBTYPE_BIT_OFFSET))
-
-#define CSR_WIFI_80211_GET_FRAME_SUBTYPE(frameBuffer) \
- ((u8)(((u8 *)frameBuffer)[0] & CSR_WIFI_80211_FRAME_SUBTYPE_BIT_MASK) >> CSR_WIFI_FRAME_SUBTYPE_BIT_OFFSET)
-
-/* For M4 request received via netdev*/
-
-typedef u8 CsrWifiPacketType;
-#define CSR_WIFI_UNICAST_PDU ((CsrWifiPacketType) 0x00)
-#define CSR_WIFI_MULTICAST_PDU ((CsrWifiPacketType) 0x1)
-#define CSR_WIFI_BROADCAST_PDU ((CsrWifiPacketType) 0x2)
-
-#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
-
-/* Module parameter variables */
-extern int buswidth;
-extern int sdio_clock;
-extern int use_5g;
-extern int disable_hw_reset;
-extern int disable_power_control;
-extern int enable_wol;
-extern int sme_debug;
-extern int fw_init[MAX_UNIFI_DEVS];
-extern int tl_80211d;
-extern int sdio_byte_mode;
-extern int sdio_block_size;
-extern int coredump_max;
-extern int run_bh_once;
-extern int bh_priority;
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-extern int log_hip_signals;
-#endif
-
-struct dlpriv {
- const unsigned char *dl_data;
- int dl_len;
- void *fw_desc;
-};
-
-
-struct uf_thread {
-
- struct task_struct *thread_task;
-
- /* wait_queue for waking the unifi_thread kernel thread */
- wait_queue_head_t wakeup_q;
- unsigned int wakeup_flag;
-
- /*
- * Use it to block the I/O thread when
- * an error occurs or UniFi is reinitialised.
- */
- int block_thread;
-
- char name[16];
- int prio;
-};
-
-/*
- * Link list to hold the received packets for the period the port
- * remains closed.
- */
-typedef struct rx_buffered_packets {
- /* List link structure */
- struct list_head q;
- /* Packet to indicate when the port reopens */
- struct sk_buff *skb;
- /* Bulkdata to free in case the port closes and need to discard the packet */
- bulk_data_param_t bulkdata;
- /* The source address of the packet */
- CsrWifiMacAddress sa;
- /* The destination address of the packet */
- CsrWifiMacAddress da;
- /* Corresponding signal */
- CSR_SIGNAL signal;
-} rx_buffered_packets_t;
-
-
-typedef u8 CsrWifiAcPowersaveMode;
-#define CSR_WIFI_AC_TRIGGER_ONLY_ENABLED 0x00
-#define CSR_WIFI_AC_DELIVERY_ONLY_ENABLE 0X01
-#define CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED 0X03
-#define CSR_WIFI_AC_LEGACY_POWER_SAVE 0X02
-
-
-#define IS_DELIVERY_ENABLED(mode) (mode & CSR_WIFI_AC_DELIVERY_ONLY_ENABLE)? 1: 0
-#define IS_DELIVERY_AND_TRIGGER_ENABLED(mode) ((mode & CSR_WIFI_AC_DELIVERY_ONLY_ENABLE)||(mode & CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED))? 1: 0
-#define IS_DTIM_ACTIVE(flag,hostTag) ((flag == TRUE || hostTag != INVALID_HOST_TAG))
-#define INVALID_HOST_TAG 0xFFFFFFFF
-#define UNIFI_TRAFFIC_Q_CONTENTION UNIFI_TRAFFIC_Q_BE
-
-
-
-
-/* Queue to be used for contention priority */
-
-/*
- * Link list to hold the tx packets for the period the peer
- * powersave/free slots in unifi
- */
-typedef struct tx_buffered_packets {
- /* List link structure */
- struct list_head q;
- u16 interfaceTag;
- CSR_CLIENT_TAG hostTag;
- CSR_PROCESS_ID leSenderProcessId;
- CSR_TRANSMISSION_CONTROL transmissionControl;
- CSR_RATE rate;
- /* Bulkdata to free in case the port closes and need to discard the packet */
- bulk_data_desc_t bulkdata;
- /* The source address of the packet */
- CsrWifiMacAddress peerMacAddress;
- CSR_PRIORITY priority;
-} tx_buffered_packets_t;
-
-/* station record has this data structure */
-typedef struct CsrWifiRouterCtrlStaInfo_t {
-
- /* Sme sends these parameters */
- CsrWifiMacAddress peerMacAddress;
- u32 assignedHandle;
- u8 wmmOrQosEnabled;
- CsrWifiAcPowersaveMode powersaveMode[MAX_ACCESS_CATOGORY];
- u16 maxSpLength;
- u8 uapsdActive;
- u16 noOfSpFramesSent;
-
- /* Router/Driver database */
-#ifdef CSR_SUPPORT_SME
- unifi_port_cfg_t *peerControlledPort;
- unifi_port_cfg_t *peerUnControlledPort;
-
- /* Inactivity feature parameters */
- struct netInterface_priv *interfacePriv;
- struct work_struct send_disconnected_ind_task;
- u8 activity_flag;
- u16 listenIntervalInTus;
- CSR_CLIENT_TAG nullDataHostTag;
-
- /* Activity timestamps for the station */
- u32 lastActivity;
-
- /* during m/c transmission sp suspended */
- u8 uspSuspend;
- CSR_PRIORITY triggerFramePriority;
-#endif
- CsrWifiRouterCtrlPeerStatus currentPeerState;
- struct list_head dataPdu[MAX_ACCESS_CATOGORY];
- struct list_head mgtFrames;
- u8 spStatus;
- u8 prevFrmType;
- u8 prevFrmAccessCatogory;
- u8 protection;
- u16 aid;
- u8 txSuspend;
- u8 timSet;
- /* Dont change the value of below macro for SET & RESET */
-#define CSR_WIFI_TIM_RESET 0
-#define CSR_WIFI_TIM_SET 1
-#define CSR_WIFI_TIM_RESETTING 2
-#define CSR_WIFI_TIM_SETTING 3
-
- u8 timRequestPendingFlag;
- u8 updateTimReqQueued;
- u16 noOfPktQueued;
-}CsrWifiRouterCtrlStaInfo_t;
-
-#ifdef CSR_SUPPORT_WEXT_AP
-struct CsrWifiSmeApConfig {
- CsrWifiSsid ssid;
- u16 channel;
- CsrWifiNmeApCredentials credentials;
- u8 max_connections;
- u8 if_index;
-};
-#endif
-
-#ifdef CSR_WIFI_RX_PATH_SPLIT
-/* This is a test code and may be removed later*/
-#define CSR_WIFI_RX_SIGNAL_BUFFER_SIZE (60+1)
-
-typedef struct
-{
- u8 *bufptr; /* Signal Primitive */
- bulk_data_param_t data_ptrs; /* Bulk Data pointers */
- u16 sig_len;
-}rx_buff_struct_t;
-
-typedef struct
-{
- u8 writePointer; /**< write pointer */
- u8 readPointer; /**< read pointer */
- u8 size; /**< size of circular buffer */
- rx_buff_struct_t rx_buff[CSR_WIFI_RX_SIGNAL_BUFFER_SIZE]; /**< Element of ciruclar buffer */
-} rxCircularBuffer_t;
-
-void rx_wq_handler(struct work_struct *work);
-#endif
-
-struct unifi_priv {
-
- card_t *card;
- CsrSdioFunction *sdio;
-
- /* Index into Unifi_instances[] for this device. */
- int instance;
- /* Reference count for this instance */
- int ref_count;
-
- /* Firmware images */
- struct dlpriv fw_sta;
- struct dlpriv fw_conv; /* used for conversion of production test image */
-
- /* Char device related structures */
- struct cdev unifi_cdev;
- struct cdev unifiudi_cdev;
- struct device *unifi_device;
-
- /* Which wireless interface to use (1 - 2.4GHz, 2 - 5GHz) */
- CSR_IFINTERFACE if_index;
-
- /* For multiple interface support */
- struct net_device *netdev[CSR_WIFI_NUM_INTERFACES];
- struct netInterface_priv *interfacePriv[CSR_WIFI_NUM_INTERFACES];
-
- u8 totalInterfaceCount;
-
- int prev_queue;
-
- /* Name of node under /proc */
- char proc_entry_name[64];
-
- /*
- * Flags:
- * drop_unencrypted
- * - Not used?
- * netdev_registered
- * - whether the netdev has been registered.
- */
- unsigned int drop_unencrypted : 1;
-
- /* Our list of unifi linux clients. */
- ul_client_t ul_clients[MAX_UDI_CLIENTS];
-
- /* Mutex to protect using the logging hook after UDI client is gone */
- struct semaphore udi_logging_mutex;
- /* Pointer to the ul_clients[] array */
- ul_client_t *logging_client;
-
- /* A ul_client_t* used to send the netdev related MIB requests. */
- ul_client_t *netdev_client;
-
- /* The SME ul_client_t pointer. */
- ul_client_t *sme_cli;
-
- /* The AMP ul_client_t pointer. */
- ul_client_t *amp_client;
-
- /*
- * Semaphore for locking the top-half to one user process.
- * This is necessary to prevent multiple processes calling driver
- * operations. This can happen because the network driver entry points
- * can be called from multiple processes.
- */
-#ifdef USE_DRIVER_LOCK
- struct semaphore lock;
-#endif /* USE_DRIVER_LOCK */
-
- /* Flag to say that an operation was aborted */
- int io_aborted;
-
- struct uf_thread bh_thread;
-
-#define UNIFI_INIT_NONE 0x00
-#define UNIFI_INIT_IN_PROGRESS 0x01
-#define UNIFI_INIT_FW_DOWNLOADED 0x02
-#define UNIFI_INIT_COMPLETED 0x04
- unsigned char init_progress;
-
- int sme_is_present;
-
- /* The WMM features that UniFi uses in the current BSS */
- unsigned int sta_wmm_capabilities;
-
- /* Debug only */
- char last_debug_string[256];
- unsigned short last_debug_word16[16];
-
-#ifdef CSR_SUPPORT_SME
- /* lock to protect the tx queues list */
- spinlock_t tx_q_lock;
- u8 allPeerDozing;
- u8 pausedStaHandle[MAX_ACCESS_CATOGORY];
- /* Max packet the driver can queue, irrespective of interface number */
- u16 noOfPktQueuedInDriver;
-#define CSR_WIFI_DRIVER_SUPPORT_FOR_MAX_PKT_QUEUEING 512
-#define CSR_WIFI_DRIVER_MAX_PKT_QUEUING_THRESHOLD_PER_PEER 64
-#define CSR_WIFI_DRIVER_MINIMUM_BROADCAST_PKT_THRESHOLD 3
-
- u8 routerBufferEnable[MAX_ACCESS_CATOGORY];
- /* lock to protect stainfo members and priv members*/
- spinlock_t staRecord_lock;
-#endif
-#ifdef CSR_NATIVE_LINUX
-#ifdef CSR_SUPPORT_WEXT
- /* wireless config */
- struct wext_config wext_conf;
-#endif
-
- /* Mutex to protect the MLME blocking requests */
- struct semaphore mlme_blocking_mutex;
-
- /* The ul_client that provides the blocking API for WEXT calls */
- ul_client_t *wext_client;
-
-#endif /* CSR_NATIVE_LINUX */
-
-#ifdef CSR_SUPPORT_SME
- wait_queue_head_t sme_request_wq;
- /* Semaphore to protect the SME blocking requests */
- struct semaphore sme_sem;
- /* Structure to hold the SME blocking requests data*/
- sme_reply_t sme_reply;
-
- /* Structure to hold a traffic protocol indication */
- struct ta_ind {
- struct work_struct task;
- CsrWifiRouterCtrlTrafficPacketType packet_type;
- CsrWifiRouterCtrlProtocolDirection direction;
- CsrWifiMacAddress src_addr;
- int in_use;
- } ta_ind_work;
-
- struct ta_sample_ind {
- struct work_struct task;
- CsrWifiRouterCtrlTrafficStats stats;
- int in_use;
- } ta_sample_ind_work;
-
- __be32 sta_ip_address;
- CsrWifiRouterCtrlSmeVersions sme_versions;
-
- /*
- * Flag to reflect state of unifi_sys_wifi_on_*() progress.
- * This indicates whether we are in an "wifi on" state when we are
- * allowed to indication errors with unifi_mgt_wifi_off_ind()
- */
- enum {
- wifi_on_unspecified = -1,
- wifi_on_in_progress = 0,
- wifi_on_done = 1,
- } wifi_on_state;
-
- /* Userspace TaskId for the SME Set when a wifi on req is received */
- CsrSchedQid CSR_WIFI_SME_IFACEQUEUE;
-
- struct work_struct multicast_list_task;
- /*
- * The SME installs filters to ask for specific MA-UNITDATA.req
- * to be passed to different SME components.
- */
-#define MAX_MA_UNIDATA_IND_FILTERS 8
- sme_ma_unidata_ind_filter_t sme_unidata_ind_filters[MAX_MA_UNIDATA_IND_FILTERS];
-
-/* UNIFI_CFG related parameters */
- uf_cfg_bcast_packet_filter_t packet_filters;
- unsigned char *filter_tclas_ies;
- /* The structure that holds all the connection configuration. */
- CsrWifiSmeConnectionConfig connection_config;
-#ifdef CSR_SUPPORT_WEXT
-
- int ignore_bssid_join;
- struct iw_statistics wext_wireless_stats;
-
- /* The MIB and MAC address files contents, read from userspace */
- CsrWifiSmeDataBlock mib_data;
- CsrWifiMacAddress sta_mac_address;
-
- int wep_tx_key_index;
- wep_key_t wep_keys[NUM_WEPKEYS];
-
-
-#ifdef CSR_SUPPORT_WEXT_AP
- CsrWifiSmeApMacConfig ap_mac_config;
- CsrWifiNmeApConfig group_sec_config;
- CsrWifiSmeApConfig_t ap_config;
-#endif
- struct work_struct sme_config_task;
-
-#endif /* CSR_SUPPORT_WEXT */
-
-#endif /* CSR_SUPPORT_SME */
-
-#ifdef CSR_SME_USERSPACE
- void *smepriv;
-#endif /* CSR_SME_USERSPACE */
-
- card_info_t card_info;
-
- /* Mutex to protect unifi_send_signal() */
- spinlock_t send_signal_lock;
-
-
- /*
- * The workqueue to offload the TA run
- * and the multicast addresses list set
- */
- struct workqueue_struct *unifi_workqueue;
-
- unsigned char *mib_cfm_buffer;
- unsigned int mib_cfm_buffer_length;
-
- int ptest_mode; /* Set when in production test mode */
- int coredump_mode; /* Set when SME has requested a coredump */
- u8 wol_suspend; /* Set when suspending with UniFi powered */
-
-#define UF_UNCONTROLLED_PORT_Q 0
-#define UF_CONTROLLED_PORT_Q 1
-
- /* Semaphore to protect the rx queues list */
- struct semaphore rx_q_sem;
-
- /* Spinlock to protect M4 data */
- spinlock_t m4_lock;
- /* Mutex to protect BA RX data */
- struct semaphore ba_mutex;
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
- /* Spinlock to protect the WAPI data */
- spinlock_t wapi_lock;
-#endif
-
- /* Array to indicate if a particular Tx queue is paused, this may not be
- * required in a multiqueue implementation since we can directly stop kernel
- * queues */
- u8 tx_q_paused_flag[UNIFI_TRAFFIC_Q_MAX];
-
-#ifdef CSR_WIFI_RX_PATH_SPLIT
- struct workqueue_struct *rx_workqueue;
- struct work_struct rx_work_struct;
- rxCircularBuffer_t rxSignalBuffer;
-
-#endif
-
- u32 rxTcpThroughput;
- u32 txTcpThroughput;
- u32 rxUdpThroughput;
- u32 txUdpThroughput;
-
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- /*Set if multicast KeyID = 1*/
- u8 wapi_multicast_filter;
- /*Set if unicast KeyID = 1*/
- u8 wapi_unicast_filter;
- u8 wapi_unicast_queued_pkt_filter;
-#ifdef CSR_WIFI_SECURITY_WAPI_QOSCTRL_MIC_WORKAROUND
- u8 isWapiConnection;
-#endif
-#endif
-
-#ifdef CSR_WIFI_SPLIT_PATCH
- CsrWifiRouterCtrlModeSetReq pending_mode_set;
-#endif
-
- u8 cmanrTestMode;
- CSR_RATE cmanrTestModeTransmitRate;
-
-};
-
-typedef struct {
- u16 queue_length[4];
- u8 os_queue_paused;
-} unifi_OsQosInfo;
-
-
-typedef struct {
- u8 active;
- bulk_data_param_t bulkdata;
- CSR_SIGNAL signal;
- u16 sn;
- u32 recv_time;
-} frame_desc_struct;
-
-typedef struct {
- frame_desc_struct *buffer;
- u16 wind_size;
- u16 occupied_slots;
- struct timer_list timer;
- u16 timeout;
- u16 expected_sn;
- u16 start_sn;
- u8 trigger_ba_after_ssn;
- struct netInterface_priv *interfacePriv;
- u16 tID;
- CsrWifiMacAddress macAddress;
- struct work_struct send_ba_err_task;
-} ba_session_rx_struct;
-
-
-typedef struct {
- struct netInterface_priv *interfacePriv;
- u16 tID;
- CsrWifiMacAddress macAddress;
-} ba_session_tx_struct;
-
-typedef struct netInterface_priv
-{
- u16 InterfaceTag;
- struct unifi_priv *privPtr;
- ba_session_tx_struct *ba_session_tx[MAX_SUPPORTED_BA_SESSIONS_TX];
- ba_session_rx_struct *ba_session_rx[MAX_SUPPORTED_BA_SESSIONS_RX];
- frame_desc_struct ba_complete[MAX_BA_WIND_SIZE];
- u8 ba_complete_index;
- u8 queueEnabled[UNIFI_NO_OF_TX_QS];
- struct work_struct send_m4_ready_task;
-#ifdef CSR_WIFI_SECURITY_WAPI_ENABLE
- struct work_struct send_pkt_to_encrypt;
-#endif
- struct net_device_stats stats;
- u8 interfaceMode;
- u8 protect;
- CsrWifiMacAddress bssid;
- /*
- * Flag to reflect state of CONNECTED indication signal.
- * This indicates whether we are "joined" an Access Point (i.e. have
- * nominated an AP and are receiving beacons) but give no indication
- * of whether we are authenticated and/or associated.
- */
- enum {
- UnifiConnectedUnknown = -1,
- UnifiNotConnected = 0,
- UnifiConnected = 1,
- } connected;
-#ifdef CSR_SUPPORT_WEXT
- /* Tracks when we are waiting for a netdevice state change callback */
- u8 wait_netdev_change;
- /* True if we have successfully registered for netdev callbacks */
- u8 netdev_callback_registered;
-#endif /* CSR_SUPPORT_WEXT */
- unsigned int netdev_registered;
-#define UNIFI_MAX_MULTICAST_ADDRESSES 10
- /* The multicast addresses list that the thread needs to set. */
- u8 mc_list[UNIFI_MAX_MULTICAST_ADDRESSES*ETH_ALEN];
- /* The multicast addresses count that the thread needs to set. */
- int mc_list_count;
- u32 tag;
-#ifdef CSR_SUPPORT_SME
- /* (un)controlled port configuration */
- unifi_port_config_t controlled_data_port;
- unifi_port_config_t uncontrolled_data_port;
-
- /* station record maintenance related data structures */
- u8 num_stations_joined;
- CsrWifiRouterCtrlStaInfo_t *(staInfo)[UNIFI_MAX_CONNECTIONS];
- struct list_head genericMgtFrames;
- struct list_head genericMulticastOrBroadCastFrames;
- struct list_head genericMulticastOrBroadCastMgtFrames;
-
- /* Timer for detecting station inactivity */
- struct timer_list sta_activity_check_timer;
- u8 sta_activity_check_enabled;
-
- /* Timestamp when the last inactivity check was done */
- u32 last_inactivity_check;
-
- /*number of multicast or borad cast packets queued*/
- u16 noOfbroadcastPktQueued;
-#endif
- /* A list to hold the buffered uncontrolled port packets */
- struct list_head rx_uncontrolled_list;
- /* A list to hold the buffered controlled port packets */
- struct list_head rx_controlled_list;
- /* Buffered M4 signal to take care of WPA race condition */
- CSR_SIGNAL m4_signal;
- bulk_data_desc_t m4_bulk_data;
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
- /* Buffered WAPI Unicast MA Packet Request for encryption in Sme */
- CSR_SIGNAL wapi_unicast_ma_pkt_sig;
- bulk_data_desc_t wapi_unicast_bulk_data;
-#endif
-
- /* This should be removed and m4_hostTag should be used for checking*/
- u8 m4_sent;
- CSR_CLIENT_TAG m4_hostTag;
- u8 dtimActive;
- u8 intraBssEnabled;
- u32 multicastPduHostTag; /* Used to set the tim after getting
- a confirm for it */
- u8 bcTimSet;
- u8 bcTimSetReqPendingFlag;
- u8 bcTimSetReqQueued;
-} netInterface_priv_t;
-
-#ifdef CSR_SUPPORT_SME
-#define routerStartBuffering(priv,queue) priv->routerBufferEnable[(queue)] = TRUE;
-#define routerStopBuffering(priv,queue) priv->routerBufferEnable[(queue)] = FALSE;
-#define isRouterBufferEnabled(priv,queue) priv->routerBufferEnable[(queue)]
-#endif
-
-#ifdef USE_DRIVER_LOCK
-#define LOCK_DRIVER(_p) down_interruptible(&(_p)->lock)
-#define UNLOCK_DRIVER(_p) up(&(_p)->lock)
-#else
-#define LOCK_DRIVER(_p) (void)(_p); /* as nothing */
-#define UNLOCK_DRIVER(_p) (void)(_p); /* as nothing */
-#endif /* USE_DRIVER_LOCK */
-
-s32 CsrHipResultToStatus(CsrResult csrResult);
-
-
-/*
- * SDIO related functions and callbacks
- */
-int uf_sdio_load(void);
-void uf_sdio_unload(void);
-unifi_priv_t *uf_find_instance(int inst);
-int uf_find_priv(unifi_priv_t *priv);
-int uf_find_netdev_priv(netInterface_priv_t *priv);
-unifi_priv_t *uf_get_instance(int inst);
-void uf_put_instance(int inst);
-int csr_sdio_linux_install_irq(CsrSdioFunction *sdio);
-int csr_sdio_linux_remove_irq(CsrSdioFunction *sdio);
-
-void uf_add_os_device(int bus_id, struct device *os_device);
-void uf_remove_os_device(int bus_id);
-
-
-
-/*
- * Claim/release SDIO
- *
- * For multifunction cards, we cannot grub the SDIO lock around the unifi_bh()
- * as this prevents other functions using SDIO.
- * Since some of CSR SDIO API is used regardless of trying to lock unifi_bh()
- * we have followed this scheme:
- * 1. If a function needs protection only when CSR_WIFI_SINGLE_FUNCTION is defined
- * then we call CsrSdioClaim/CsrSdioRelease().
- * 2. If a function needs protection only when CSR_WIFI_SINGLE_FUNCTION is not defined
- * then we call _sdio_claim_host/_sdio_claim_host(). Use of this should be restricted
- * to the SDIO glue layer only (e.g. sdio_mmc.c).
- * 3. If a function needs protection, regardless of the CSR_WIFI_SINGLE_FUNCTION
- * then we call directly the sdio_claim_host/sdio_release_host().
- * Use of this must be restricted to the SDIO glue layer only (e.g. sdio_mmc.c).
- *
- * Note: The _func and function pointers are _not_ the same.
- * The former is the (struct sdio_func*) context, which restricts the use to the SDIO glue layer.
- * The latter is the (CsrSdioFunction*) context, which allows calls from all layers.
- */
-
-#ifdef CSR_WIFI_SUPPORT_MMC_DRIVER
-
-#ifdef CSR_WIFI_SINGLE_FUNCTION
-#define CsrSdioClaim(function) sdio_claim_host((function)->priv);
-#define CsrSdioRelease(function) sdio_release_host((function)->priv);
-
-#define _sdio_claim_host(_func)
-#define _sdio_release_host(_func)
-
-#else
-#define CsrSdioClaim(function)
-#define CsrSdioRelease(function)
-
-#define _sdio_claim_host(_func) sdio_claim_host(_func)
-#define _sdio_release_host(_func) sdio_release_host(_func)
-
-#endif /* CSR_WIFI_SINGLE_FUNCTION */
-
-#else
-#define _sdio_claim_host(_func)
-#define _sdio_release_host(_func)
-
-#define CsrSdioClaim(function)
-#define CsrSdioRelease(function)
-
-#endif /* CSR_WIFI_SUPPORT_MMC_DRIVER */
-
-
-/*
- * Functions to allocate and free an ethernet device.
- */
-unifi_priv_t *uf_alloc_netdevice(CsrSdioFunction *sdio_dev, int bus_id);
-int uf_free_netdevice(unifi_priv_t *priv);
-
-/* Allocating function for other interfaces */
-u8 uf_alloc_netdevice_for_other_interfaces(unifi_priv_t *priv, u16 interfaceTag);
-
-/*
- * Firmware download related functions.
- */
-int uf_run_unifihelper(unifi_priv_t *priv);
-int uf_request_firmware_files(unifi_priv_t *priv, int is_fw);
-int uf_release_firmware_files(unifi_priv_t *priv);
-int uf_release_firmware(unifi_priv_t *priv, struct dlpriv *to_free);
-
-/*
- * Functions to create and delete the device nodes.
- */
-int uf_create_device_nodes(unifi_priv_t *priv, int bus_id);
-void uf_destroy_device_nodes(unifi_priv_t *priv);
-
-/*
- * Upper Edge Initialisation functions
- */
-int uf_init_bh(unifi_priv_t *priv);
-int uf_init_hw(unifi_priv_t *priv);
-
-/* Thread related helper functions */
-int uf_start_thread(unifi_priv_t *priv, struct uf_thread *thread, int (*func)(void *));
-void uf_stop_thread(unifi_priv_t *priv, struct uf_thread *thread);
-void uf_wait_for_thread_to_stop(unifi_priv_t *priv, struct uf_thread *thread);
-
-
-/*
- * Unifi Linux functions
- */
-void ul_init_clients(unifi_priv_t *priv);
-
-/* Configuration flags */
-#define CLI_USING_WIRE_FORMAT 0x0002
-#define CLI_SME_USERSPACE 0x0020
-ul_client_t *ul_register_client(unifi_priv_t *priv,
- unsigned int configuration,
- udi_event_t udi_event_clbk);
-int ul_deregister_client(ul_client_t *pcli);
-
-int ul_send_signal_unpacked(unifi_priv_t *priv,
- CSR_SIGNAL *sigptr,
- bulk_data_param_t *bulkdata);
-int ul_send_signal_raw(unifi_priv_t *priv,
- unsigned char *sigptr, int siglen,
- bulk_data_param_t *bulkdata);
-
-void ul_log_config_ind(unifi_priv_t *priv, u8 *conf_param, int len);
-
-
-/*
- * Data plane operations
- */
-/*
- * data_tx.c
- */
-int uf_verify_m4(unifi_priv_t *priv, const unsigned char *packet,
- unsigned int length);
-
-#ifdef CSR_SUPPORT_SME
-u8 uf_check_broadcast_bssid(unifi_priv_t *priv, const bulk_data_param_t *bulkdata);
-u8 uf_process_pm_bit_for_peer(unifi_priv_t * priv, CsrWifiRouterCtrlStaInfo_t * srcStaInfo,u8 pmBit,u16 interfaceTag);
-void uf_process_ps_poll(unifi_priv_t *priv,u8* sa,u8* da,u8 pmBit,u16 interfaceTag);
-int uf_ap_process_data_pdu(unifi_priv_t *priv, struct sk_buff *skb,
- struct ethhdr *ehdr, CsrWifiRouterCtrlStaInfo_t * srcStaInfo,
- const CSR_SIGNAL *signal,
- bulk_data_param_t *bulkdata,
- u8 macHeaderLengthInBytes);
-u8 uf_is_more_data_for_non_delivery_ac(CsrWifiRouterCtrlStaInfo_t *staRecord);
-void uf_process_wmm_deliver_ac_uapsd ( unifi_priv_t * priv,
- CsrWifiRouterCtrlStaInfo_t * srcStaInfo,
- u16 qosControl,
- u16 interfaceTag);
-
-void uf_send_buffered_data_from_ac(unifi_priv_t *priv, CsrWifiRouterCtrlStaInfo_t * staInfo, u8 queue, struct list_head *txList);
-void uf_send_buffered_data_from_delivery_ac(unifi_priv_t *priv, CsrWifiRouterCtrlStaInfo_t * staInfo, u8 queue, struct list_head *txList);
-
-void uf_continue_uapsd(unifi_priv_t *priv, CsrWifiRouterCtrlStaInfo_t * staInfo);
-void uf_send_qos_null(unifi_priv_t * priv,u16 interfaceTag, const u8 *da,CSR_PRIORITY priority,CsrWifiRouterCtrlStaInfo_t * srcStaInfo);
-void uf_send_nulldata(unifi_priv_t * priv,u16 interfaceTag, const u8 *da,CSR_PRIORITY priority,CsrWifiRouterCtrlStaInfo_t * srcStaInfo);
-
-
-
-#endif
-CsrResult uf_process_ma_packet_req(unifi_priv_t *priv, u8 *peerMacAddress, CSR_CLIENT_TAG hostTag, u16 interfaceTag, CSR_TRANSMISSION_CONTROL transmissionControl, CSR_RATE TransmitRate, CSR_PRIORITY priority, CSR_PROCESS_ID senderId, bulk_data_param_t *bulkdata);
-void uf_process_ma_vif_availibility_ind(unifi_priv_t *priv,u8 *sigdata, u32 siglen);
-#ifdef CSR_SUPPORT_SME
-void uf_send_buffered_frames(unifi_priv_t *priv,unifi_TrafficQueue queue);
-int uf_process_station_records_for_sending_data(unifi_priv_t *priv,u16 interfaceTag,
- CsrWifiRouterCtrlStaInfo_t *srcStaInfo,
- CsrWifiRouterCtrlStaInfo_t *dstStaInfo);
-void uf_prepare_send_cfm_list_for_queued_pkts(unifi_priv_t * priv,
- struct list_head *frames_need_cfm_list,
- struct list_head * list);
-void send_auto_ma_packet_confirm(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- struct list_head *buffered_frames_list);
-void uf_flush_list(unifi_priv_t * priv, struct list_head * list);
-tx_buffered_packets_t *dequeue_tx_data_pdu(unifi_priv_t *priv, struct list_head *txList);
-void resume_unicast_buffered_frames(unifi_priv_t *priv, u16 interfaceTag);
-void update_eosp_to_head_of_broadcast_list_head(unifi_priv_t *priv,u16 interfaceTag);
-void resume_suspended_uapsd(unifi_priv_t* priv,u16 interfaceTag);
-#endif
-/*
- * netdev.c
- */
-
-#ifndef P80211_OUI_LEN
-#define P80211_OUI_LEN 3
-#endif
-typedef struct {
- u8 dsap; /* always 0xAA */
- u8 ssap; /* always 0xAA */
- u8 ctrl; /* always 0x03 */
- u8 oui[P80211_OUI_LEN]; /* organizational universal id */
- u16 protocol;
-} __attribute__ ((packed)) llc_snap_hdr_t;
-int skb_add_llc_snap(struct net_device *dev, struct sk_buff *skb, int proto);
-int skb_80211_to_ether(unifi_priv_t *priv, struct sk_buff *skb,
- const unsigned char *daddr, const unsigned char *saddr,
- const CSR_SIGNAL *signal,
- bulk_data_param_t *bulkdata);
-
-const char *result_code_str(int result);
-
-
-/* prepares & appends the Mac header for the payload */
-int prepare_and_add_macheader(unifi_priv_t *priv,
- struct sk_buff *skb,
- struct sk_buff *newSkb,
- CSR_PRIORITY priority,
- bulk_data_param_t *bulkdata,
- u16 interfaceTag,
- const u8 *daddr,
- const u8 *saddr,
- u8 protection);
-CSR_PRIORITY
-get_packet_priority(unifi_priv_t *priv, struct sk_buff *skb, const struct ethhdr *ehdr, netInterface_priv_t *interfacePriv);
-
-void
-unifi_frame_ma_packet_req(unifi_priv_t *priv, CSR_PRIORITY priority,
- CSR_RATE TransmitRate, CSR_CLIENT_TAG hostTag,
- u16 interfaceTag, CSR_TRANSMISSION_CONTROL transmissionControl,
- CSR_PROCESS_ID leSenderProcessId, u8 *peerMacAddress,
- CSR_SIGNAL *signal);
-
-
-/* Pack the LSB to include station handle & status of tim set */
-#define CSR_WIFI_PACK_SENDER_ID_LSB_FOR_TIM_REQ(handle, timState) ((handle << 2) | timState)
-/* get the station record handle from the sender ID */
-#define CSR_WIFI_GET_STATION_HANDLE_FROM_RECEIVER_ID(receiverProcessId) (u8) ((receiverProcessId & 0xff) >> 2)
-/* get the timSet status from the sender ID */
-#define CSR_WIFI_GET_TIMSET_STATE_FROM_RECEIVER_ID(receiverProcessId) (u8) (receiverProcessId & 0x03)
-
-/* handle is 6 bits to accomodate in senderId LSB (only 64 station can be associated) */
-#define CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE 0x3F
-
-void update_tim(unifi_priv_t * priv, u16 aid, u8 setTim, u16 interfaceTag, u32 handle);
-void uf_handle_tim_cfm(unifi_priv_t *priv, CSR_MLME_SET_TIM_CONFIRM *cfm, u16 senderProcessId);
-
-/* Clear the Peer station Record, in case of wifioff/unexpected card removal */
-void CsrWifiRouterCtrlInterfaceReset(unifi_priv_t *priv, u16 interfaceTag);
-
-void scroll_ba_window(unifi_priv_t *priv,
- netInterface_priv_t *interfacePriv,
- ba_session_rx_struct *ba_session,
- u16 sn);
-
-u8 blockack_session_stop(unifi_priv_t *priv,
- u16 interfaceTag,
- CsrWifiRouterCtrlBlockAckRole role,
- u16 tID,
- CsrWifiMacAddress macAddress);
-#ifdef CSR_SUPPORT_SME
-/* Fetch the protection information from interface Mode */
-s8 uf_get_protection_bit_from_interfacemode(unifi_priv_t *priv, u16 interfaceTag, const u8 *daddr);
-#endif
-
-/* Fetch the station record handler from data base for matching Mac address */
-#ifdef CSR_SUPPORT_SME
-CsrWifiRouterCtrlStaInfo_t *CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(unifi_priv_t *priv,
- const u8 *peerMacAddress,
- u16 interfaceTag);
-
-/* Fetch the station record handler from data base for matching handle */
-CsrWifiRouterCtrlStaInfo_t * CsrWifiRouterCtrlGetStationRecordFromHandle(unifi_priv_t *priv,
- u32 handle,
- u16 interfaceTag);
-
-void uf_update_sta_activity(unifi_priv_t *priv, u16 interfaceTag, const u8 *peerMacAddress);
-void uf_process_ma_pkt_cfm_for_ap(unifi_priv_t *priv,u16 interfaceTag, const CSR_MA_PACKET_CONFIRM *pkt_cfm);
-#endif
-
-void uf_resume_data_plane(unifi_priv_t *priv, int queue,
- CsrWifiMacAddress peer_address,
- u16 interfaceTag);
-void uf_free_pending_rx_packets(unifi_priv_t *priv, int queue,
- CsrWifiMacAddress peer_address,u16 interfaceTag);
-
-int uf_register_netdev(unifi_priv_t *priv, int numOfInterface);
-void uf_unregister_netdev(unifi_priv_t *priv);
-
-void uf_net_get_name(struct net_device *dev, char *name, int len);
-
-void uf_send_queue_info(unifi_priv_t *priv);
-u16 uf_get_vif_identifier(CsrWifiRouterCtrlMode mode, u16 tag);
-
-void uf_process_rx_pending_queue(unifi_priv_t *priv, int queue,
- CsrWifiMacAddress source_address,
- int indicate, u16 interfaceTag);
-
-#ifdef CSR_WIFI_HIP_DEBUG_OFFLINE
-int uf_register_hip_offline_debug(unifi_priv_t *priv);
-int uf_unregister_hip_offline_debug(unifi_priv_t *priv);
-#endif
-
-/*
- * inet.c
- */
-void uf_register_inet_notifier(void);
-void uf_unregister_inet_notifier(void);
-
-
-/*
- * Suspend / Resume handlers
- */
-void unifi_resume(void *ospriv);
-void unifi_suspend(void *ospriv);
-
-
-#define QOS_CAPABILITY_WMM_ENABLED 0x0001
-#define QOS_CAPABILITY_WMM_UAPSD 0x0002
-#define QOS_CAPABILITY_ACM_BE_ENABLED 0x0010
-#define QOS_CAPABILITY_ACM_BK_ENABLED 0x0020
-#define QOS_CAPABILITY_ACM_VI_ENABLED 0x0040
-#define QOS_CAPABILITY_ACM_VO_ENABLED 0x0080
-#define QOS_CAPABILITY_TS_BE_ENABLED 0x0100
-#define QOS_CAPABILITY_TS_BK_ENABLED 0x0200
-#define QOS_CAPABILITY_TS_VI_ENABLED 0x0400
-#define QOS_CAPABILITY_TS_VO_ENABLED 0x0800
-
-
-/* EAPOL PDUS */
-#ifndef ETH_P_PAE
-#define ETH_P_PAE 0x888e
-#endif
-#ifndef ETH_P_WAI
-#define ETH_P_WAI 0x88b4
-#endif
-/*
- * unifi_dbg.c
- */
-void debug_string_indication(unifi_priv_t *priv,
- const unsigned char *extra,
- unsigned int extralen);
-void debug_word16_indication(unifi_priv_t *priv, const CSR_SIGNAL *sigptr);
-void debug_generic_indication(unifi_priv_t *priv, const CSR_SIGNAL *sigptr);
-
-
-/*
- * putest.c
- */
-int unifi_putest_start(unifi_priv_t *priv, unsigned char *arg);
-int unifi_putest_cmd52_block_read(unifi_priv_t *priv, unsigned char *arg);
-int unifi_putest_stop(unifi_priv_t *priv, unsigned char *arg);
-int unifi_putest_set_sdio_clock(unifi_priv_t *priv, unsigned char *arg);
-int unifi_putest_cmd52_read(unifi_priv_t *priv, unsigned char *arg);
-int unifi_putest_coredump_prepare(unifi_priv_t *priv, unsigned char *arg);
-int unifi_putest_cmd52_write(unifi_priv_t *priv, unsigned char *arg);
-int unifi_putest_gp_read16(unifi_priv_t *priv, unsigned char *arg);
-int unifi_putest_gp_write16(unifi_priv_t *priv, unsigned char *arg);
-
-int unifi_putest_dl_fw(unifi_priv_t *priv, unsigned char *arg);
-int unifi_putest_dl_fw_buff(unifi_priv_t *priv, unsigned char *arg);
-
-#endif /* __LINUX_UNIFI_PRIV_H__ */
diff --git a/drivers/staging/csr/unifi_sme.c b/drivers/staging/csr/unifi_sme.c
deleted file mode 100644
index 9029503..0000000
--- a/drivers/staging/csr/unifi_sme.c
+++ /dev/null
@@ -1,1225 +0,0 @@
-/*
- * ***************************************************************************
- * FILE: unifi_sme.c
- *
- * PURPOSE: SME related functions.
- *
- * Copyright (C) 2007-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ***************************************************************************
- */
-
-#include "unifi_priv.h"
-#include "csr_wifi_hip_unifi.h"
-#include "csr_wifi_hip_conversions.h"
-#include <linux/sched/rt.h>
-
-
-
- int
-convert_sme_error(CsrResult error)
-{
- switch (error) {
- case CSR_RESULT_SUCCESS:
- return 0;
- case CSR_RESULT_FAILURE:
- case CSR_WIFI_RESULT_NOT_FOUND:
- case CSR_WIFI_RESULT_TIMED_OUT:
- case CSR_WIFI_RESULT_CANCELLED:
- case CSR_WIFI_RESULT_UNAVAILABLE:
- return -EIO;
- case CSR_WIFI_RESULT_NO_ROOM:
- return -EBUSY;
- case CSR_WIFI_RESULT_INVALID_PARAMETER:
- return -EINVAL;
- case CSR_WIFI_RESULT_UNSUPPORTED:
- return -EOPNOTSUPP;
- default:
- return -EIO;
- }
-}
-
-
-/*
- * ---------------------------------------------------------------------------
- * sme_log_event
- *
- * Callback function to be registered as the SME event callback.
- * Copies the signal content into a new udi_log_t struct and adds
- * it to the read queue for the SME client.
- *
- * Arguments:
- * arg This is the value given to unifi_add_udi_hook, in
- * this case a pointer to the client instance.
- * signal Pointer to the received signal.
- * signal_len Size of the signal structure in bytes.
- * bulkdata Pointers to any associated bulk data.
- * dir Direction of the signal. Zero means from host,
- * non-zero means to host.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
- void
-sme_log_event(ul_client_t *pcli,
- const u8 *signal, int signal_len,
- const bulk_data_param_t *bulkdata,
- int dir)
-{
- unifi_priv_t *priv;
- CSR_SIGNAL unpacked_signal;
- CsrWifiSmeDataBlock mlmeCommand;
- CsrWifiSmeDataBlock dataref1;
- CsrWifiSmeDataBlock dataref2;
- CsrResult result = CSR_RESULT_SUCCESS;
- int r;
-
- /* Just a sanity check */
- if ((signal == NULL) || (signal_len <= 0)) {
- return;
- }
-
- priv = uf_find_instance(pcli->instance);
- if (!priv) {
- unifi_error(priv, "sme_log_event: invalid priv\n");
- return;
- }
-
- if (priv->smepriv == NULL) {
- unifi_error(priv, "sme_log_event: invalid smepriv\n");
- return;
- }
-
- unifi_trace(priv, UDBG3,
- "sme_log_event: Process signal 0x%.4X\n",
- CSR_GET_UINT16_FROM_LITTLE_ENDIAN(signal));
-
-
- /* If the signal is known, then do any filtering required, otherwise it pass it to the SME. */
- r = read_unpack_signal(signal, &unpacked_signal);
- if (r == CSR_RESULT_SUCCESS) {
- if ((unpacked_signal.SignalPrimitiveHeader.SignalId == CSR_DEBUG_STRING_INDICATION_ID) ||
- (unpacked_signal.SignalPrimitiveHeader.SignalId == CSR_DEBUG_WORD16_INDICATION_ID))
- {
- return;
- }
- if (unpacked_signal.SignalPrimitiveHeader.SignalId == CSR_MA_PACKET_INDICATION_ID)
- {
- u16 frmCtrl;
- u8 unicastPdu = TRUE;
- u8 *macHdrLocation;
- u8 *raddr = NULL, *taddr = NULL;
- CsrWifiMacAddress peerMacAddress;
- /* Check if we need to send CsrWifiRouterCtrlMicFailureInd*/
- CSR_MA_PACKET_INDICATION *ind = &unpacked_signal.u.MaPacketIndication;
-
- macHdrLocation = (u8 *) bulkdata->d[0].os_data_ptr;
- /* Fetch the frame control value from mac header */
- frmCtrl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(macHdrLocation);
-
- /* Point to the addresses */
- raddr = macHdrLocation + MAC_HEADER_ADDR1_OFFSET;
- taddr = macHdrLocation + MAC_HEADER_ADDR2_OFFSET;
-
- memcpy(peerMacAddress.a, taddr, ETH_ALEN);
-
- if(ind->ReceptionStatus == CSR_MICHAEL_MIC_ERROR)
- {
- if (*raddr & 0x1)
- unicastPdu = FALSE;
-
- CsrWifiRouterCtrlMicFailureIndSend (priv->CSR_WIFI_SME_IFACEQUEUE, 0,
- (ind->VirtualInterfaceIdentifier & 0xff),peerMacAddress,
- unicastPdu);
- return;
- }
- else
- {
- if(ind->ReceptionStatus == CSR_RX_SUCCESS)
- {
- u8 pmBit = (frmCtrl & 0x1000)?0x01:0x00;
- u16 interfaceTag = (ind->VirtualInterfaceIdentifier & 0xff);
- CsrWifiRouterCtrlStaInfo_t *srcStaInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv,taddr,interfaceTag);
- if((srcStaInfo != NULL) && (uf_check_broadcast_bssid(priv, bulkdata)== FALSE))
- {
- uf_process_pm_bit_for_peer(priv,srcStaInfo,pmBit,interfaceTag);
-
- /* Update station last activity flag */
- srcStaInfo->activity_flag = TRUE;
- }
- }
- }
- }
-
- if (unpacked_signal.SignalPrimitiveHeader.SignalId == CSR_MA_PACKET_CONFIRM_ID)
- {
- CSR_MA_PACKET_CONFIRM *cfm = &unpacked_signal.u.MaPacketConfirm;
- u16 interfaceTag = (cfm->VirtualInterfaceIdentifier & 0xff);
- netInterface_priv_t *interfacePriv;
- CSR_MA_PACKET_REQUEST *req;
- CsrWifiMacAddress peerMacAddress;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES)
- {
- unifi_error(priv, "Bad MA_PACKET_CONFIRM interfaceTag %d\n", interfaceTag);
- return;
- }
-
- unifi_trace(priv,UDBG1,"MA-PACKET Confirm (%x, %x)\n", cfm->HostTag, cfm->TransmissionStatus);
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-#ifdef CSR_SUPPORT_SME
- if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP ||
- interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) {
-
- if(cfm->HostTag == interfacePriv->multicastPduHostTag){
- uf_process_ma_pkt_cfm_for_ap(priv ,interfaceTag, cfm);
- }
- }
-#endif
-
- req = &interfacePriv->m4_signal.u.MaPacketRequest;
-
- if(cfm->HostTag & 0x80000000)
- {
- if (cfm->TransmissionStatus != CSR_TX_SUCCESSFUL)
- {
- result = CSR_RESULT_FAILURE;
- }
-#ifdef CSR_SUPPORT_SME
- memcpy(peerMacAddress.a, req->Ra.x, ETH_ALEN);
- /* Check if this is a confirm for EAPOL M4 frame and we need to send transmistted ind*/
- if (interfacePriv->m4_sent && (cfm->HostTag == interfacePriv->m4_hostTag))
- {
- unifi_trace(priv, UDBG1, "%s: Sending M4 Transmit CFM\n", __FUNCTION__);
- CsrWifiRouterCtrlM4TransmittedIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0,
- interfaceTag,
- peerMacAddress,
- result);
- interfacePriv->m4_sent = FALSE;
- interfacePriv->m4_hostTag = 0xffffffff;
- }
-#endif
- /* If EAPOL was requested via router APIs then send cfm else ignore*/
- if((cfm->HostTag & 0x80000000) != CSR_WIFI_EAPOL_M4_HOST_TAG) {
- CsrWifiRouterMaPacketCfmSend((u16)signal[2],
- cfm->VirtualInterfaceIdentifier,
- result,
- (cfm->HostTag & 0x3fffffff), cfm->Rate);
- } else {
- unifi_trace(priv, UDBG1, "%s: M4 received from netdevice\n", __FUNCTION__);
- }
- return;
- }
- }
- }
-
- mlmeCommand.length = signal_len;
- mlmeCommand.data = (u8*)signal;
-
- dataref1.length = bulkdata->d[0].data_length;
- if (dataref1.length > 0) {
- dataref1.data = (u8 *) bulkdata->d[0].os_data_ptr;
- } else
- {
- dataref1.data = NULL;
- }
-
- dataref2.length = bulkdata->d[1].data_length;
- if (dataref2.length > 0) {
- dataref2.data = (u8 *) bulkdata->d[1].os_data_ptr;
- } else
- {
- dataref2.data = NULL;
- }
-
- CsrWifiRouterCtrlHipIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, mlmeCommand.length, mlmeCommand.data,
- dataref1.length, dataref1.data,
- dataref2.length, dataref2.data);
-
-} /* sme_log_event() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_sme_port_state
- *
- * Return the state of the controlled port.
- *
- * Arguments:
- * priv Pointer to device private context struct
- * address Pointer to the destination for tx or sender for rx address
- * queue Controlled or uncontrolled queue
- *
- * Returns:
- * An unifi_ControlledPortAction value.
- * ---------------------------------------------------------------------------
- */
-CsrWifiRouterCtrlPortAction
-uf_sme_port_state(unifi_priv_t *priv, unsigned char *address, int queue, u16 interfaceTag)
-{
- int i;
- unifi_port_config_t *port;
- netInterface_priv_t *interfacePriv;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "uf_sme_port_state: bad interfaceTag\n");
- return CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
- }
-
- interfacePriv = priv->interfacePriv[interfaceTag];
-
- if (queue == UF_CONTROLLED_PORT_Q) {
- port = &interfacePriv->controlled_data_port;
- } else {
- port = &interfacePriv->uncontrolled_data_port;
- }
-
- if (!port->entries_in_use) {
- unifi_trace(priv, UDBG5, "No port configurations, return Discard.\n");
- return CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD;
- }
-
- /* If the port configuration is common for all destinations, return it. */
- if (port->overide_action == UF_DATA_PORT_OVERIDE) {
- unifi_trace(priv, UDBG5, "Single port configuration (%d).\n",
- port->port_cfg[0].port_action);
- return port->port_cfg[0].port_action;
- }
-
- unifi_trace(priv, UDBG5, "Multiple (%d) port configurations.\n", port->entries_in_use);
-
- /* If multiple configurations exist.. */
- for (i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- /* .. go through the list and match the destination address. */
- if (port->port_cfg[i].in_use &&
- memcmp(address, port->port_cfg[i].mac_address.a, ETH_ALEN) == 0) {
- /* Return the desired action. */
- return port->port_cfg[i].port_action;
- }
- }
-
- /* Could not find any information, return Open. */
- unifi_trace(priv, UDBG5, "port configuration not found, return Open.\n");
- return CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_OPEN;
-} /* uf_sme_port_state() */
-
-/*
- * ---------------------------------------------------------------------------
- * uf_sme_port_config_handle
- *
- * Return the port config handle of the controlled/uncontrolled port.
- *
- * Arguments:
- * priv Pointer to device private context struct
- * address Pointer to the destination for tx or sender for rx address
- * queue Controlled or uncontrolled queue
- *
- * Returns:
- * An unifi_port_cfg_t* .
- * ---------------------------------------------------------------------------
- */
-unifi_port_cfg_t*
-uf_sme_port_config_handle(unifi_priv_t *priv, unsigned char *address, int queue, u16 interfaceTag)
-{
- int i;
- unifi_port_config_t *port;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "uf_sme_port_config_handle: bad interfaceTag\n");
- return NULL;
- }
-
- if (queue == UF_CONTROLLED_PORT_Q) {
- port = &interfacePriv->controlled_data_port;
- } else {
- port = &interfacePriv->uncontrolled_data_port;
- }
-
- if (!port->entries_in_use) {
- unifi_trace(priv, UDBG5, "No port configurations, return Discard.\n");
- return NULL;
- }
-
- /* If the port configuration is common for all destinations, return it. */
- if (port->overide_action == UF_DATA_PORT_OVERIDE) {
- unifi_trace(priv, UDBG5, "Single port configuration (%d).\n",
- port->port_cfg[0].port_action);
- if (address) {
- unifi_trace(priv, UDBG5, "addr[0] = %x, addr[1] = %x, addr[2] = %x, addr[3] = %x\n", address[0], address[1], address[2], address[3]);
- }
- return &port->port_cfg[0];
- }
-
- unifi_trace(priv, UDBG5, "Multiple port configurations.\n");
-
- /* If multiple configurations exist.. */
- for (i = 0; i < UNIFI_MAX_CONNECTIONS; i++) {
- /* .. go through the list and match the destination address. */
- if (port->port_cfg[i].in_use &&
- memcmp(address, port->port_cfg[i].mac_address.a, ETH_ALEN) == 0) {
- /* Return the desired action. */
- return &port->port_cfg[i];
- }
- }
-
- /* Could not find any information, return Open. */
- unifi_trace(priv, UDBG5, "port configuration not found, returning NULL (debug).\n");
- return NULL;
-} /* uf_sme_port_config_handle */
-
-void
-uf_multicast_list_wq(struct work_struct *work)
-{
- unifi_priv_t *priv = container_of(work, unifi_priv_t,
- multicast_list_task);
- int i;
- u16 interfaceTag = 0;
- CsrWifiMacAddress* multicast_address_list = NULL;
- int mc_count;
- u8 *mc_list;
- netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag];
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "uf_multicast_list_wq: bad interfaceTag\n");
- return;
- }
-
- unifi_trace(priv, UDBG5,
- "uf_multicast_list_wq: list count = %d\n",
- interfacePriv->mc_list_count);
-
- /* Flush the current list */
- CsrWifiRouterCtrlMulticastAddressIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0, interfaceTag, CSR_WIFI_SME_LIST_ACTION_FLUSH, 0, NULL);
-
- mc_count = interfacePriv->mc_list_count;
- mc_list = interfacePriv->mc_list;
- /*
- * Allocate a new list, need to free it later
- * in unifi_mgt_multicast_address_cfm().
- */
- multicast_address_list = kmalloc(mc_count * sizeof(CsrWifiMacAddress), GFP_KERNEL);
-
- if (multicast_address_list == NULL) {
- return;
- }
-
- for (i = 0; i < mc_count; i++) {
- memcpy(multicast_address_list[i].a, mc_list, ETH_ALEN);
- mc_list += ETH_ALEN;
- }
-
- if (priv->smepriv == NULL) {
- kfree(multicast_address_list);
- return;
- }
-
- CsrWifiRouterCtrlMulticastAddressIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,
- interfaceTag,
- CSR_WIFI_SME_LIST_ACTION_ADD,
- mc_count, multicast_address_list);
-
- /* The SME will take a copy of the addreses*/
- kfree(multicast_address_list);
-}
-
-
-int unifi_cfg_power(unifi_priv_t *priv, unsigned char *arg)
-{
- unifi_cfg_power_t cfg_power;
- int rc;
- int wol;
-
- if (get_user(cfg_power, (unifi_cfg_power_t*)(((unifi_cfg_command_t*)arg) + 1))) {
- unifi_error(priv, "UNIFI_CFG: Failed to get the argument\n");
- return -EFAULT;
- }
-
- switch (cfg_power) {
- case UNIFI_CFG_POWER_OFF:
- priv->wol_suspend = (enable_wol == UNIFI_WOL_OFF) ? FALSE : TRUE;
- rc = sme_sys_suspend(priv);
- if (rc) {
- return rc;
- }
- break;
- case UNIFI_CFG_POWER_ON:
- wol = priv->wol_suspend;
- rc = sme_sys_resume(priv);
- if (rc) {
- return rc;
- }
- if (wol) {
- /* Kick the BH to ensure pending transfers are handled when
- * a suspend happened with card powered.
- */
- unifi_send_signal(priv->card, NULL, 0, NULL);
- }
- break;
- default:
- unifi_error(priv, "WIFI POWER: Unknown value.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-int unifi_cfg_power_save(unifi_priv_t *priv, unsigned char *arg)
-{
- unifi_cfg_powersave_t cfg_power_save;
- CsrWifiSmePowerConfig powerConfig;
- int rc;
-
- if (get_user(cfg_power_save, (unifi_cfg_powersave_t*)(((unifi_cfg_command_t*)arg) + 1))) {
- unifi_error(priv, "UNIFI_CFG: Failed to get the argument\n");
- return -EFAULT;
- }
-
- /* Get the coex info from the SME */
- rc = sme_mgt_power_config_get(priv, &powerConfig);
- if (rc) {
- unifi_error(priv, "UNIFI_CFG: Get unifi_PowerConfigValue failed.\n");
- return rc;
- }
-
- switch (cfg_power_save) {
- case UNIFI_CFG_POWERSAVE_NONE:
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_LOW;
- break;
- case UNIFI_CFG_POWERSAVE_FAST:
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_MED;
- break;
- case UNIFI_CFG_POWERSAVE_FULL:
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_HIGH;
- break;
- case UNIFI_CFG_POWERSAVE_AUTO:
- powerConfig.powerSaveLevel = CSR_WIFI_SME_POWER_SAVE_LEVEL_AUTO;
- break;
- default:
- unifi_error(priv, "POWERSAVE: Unknown value.\n");
- return -EINVAL;
- }
-
- rc = sme_mgt_power_config_set(priv, &powerConfig);
-
- if (rc) {
- unifi_error(priv, "UNIFI_CFG: Set unifi_PowerConfigValue failed.\n");
- }
-
- return rc;
-}
-
-
-int unifi_cfg_power_supply(unifi_priv_t *priv, unsigned char *arg)
-{
- unifi_cfg_powersupply_t cfg_power_supply;
- CsrWifiSmeHostConfig hostConfig;
- int rc;
-
- if (get_user(cfg_power_supply, (unifi_cfg_powersupply_t*)(((unifi_cfg_command_t*)arg) + 1))) {
- unifi_error(priv, "UNIFI_CFG: Failed to get the argument\n");
- return -EFAULT;
- }
-
- /* Get the coex info from the SME */
- rc = sme_mgt_host_config_get(priv, &hostConfig);
- if (rc) {
- unifi_error(priv, "UNIFI_CFG: Get unifi_HostConfigValue failed.\n");
- return rc;
- }
-
- switch (cfg_power_supply) {
- case UNIFI_CFG_POWERSUPPLY_MAINS:
- hostConfig.powerMode = CSR_WIFI_SME_HOST_POWER_MODE_ACTIVE;
- break;
- case UNIFI_CFG_POWERSUPPLY_BATTERIES:
- hostConfig.powerMode = CSR_WIFI_SME_HOST_POWER_MODE_POWER_SAVE;
- break;
- default:
- unifi_error(priv, "POWERSUPPLY: Unknown value.\n");
- return -EINVAL;
- }
-
- rc = sme_mgt_host_config_set(priv, &hostConfig);
- if (rc) {
- unifi_error(priv, "UNIFI_CFG: Set unifi_HostConfigValue failed.\n");
- }
-
- return rc;
-}
-
-
-int unifi_cfg_packet_filters(unifi_priv_t *priv, unsigned char *arg)
-{
- unsigned char *tclas_buffer;
- unsigned int tclas_buffer_length;
- tclas_t *dhcp_tclas;
- int rc;
-
- /* Free any TCLASs previously allocated */
- if (priv->packet_filters.tclas_ies_length) {
- kfree(priv->filter_tclas_ies);
- priv->filter_tclas_ies = NULL;
- }
-
- tclas_buffer = ((unsigned char*)arg) + sizeof(unifi_cfg_command_t) + sizeof(unsigned int);
- if (copy_from_user(&priv->packet_filters, (void*)tclas_buffer,
- sizeof(uf_cfg_bcast_packet_filter_t))) {
- unifi_error(priv, "UNIFI_CFG: Failed to get the filter struct\n");
- return -EFAULT;
- }
-
- tclas_buffer_length = priv->packet_filters.tclas_ies_length;
-
- /* Allocate TCLASs if necessary */
- if (priv->packet_filters.dhcp_filter) {
- priv->packet_filters.tclas_ies_length += sizeof(tclas_t);
- }
- if (priv->packet_filters.tclas_ies_length > 0) {
- priv->filter_tclas_ies = kmalloc(priv->packet_filters.tclas_ies_length, GFP_KERNEL);
- if (priv->filter_tclas_ies == NULL) {
- return -ENOMEM;
- }
- if (tclas_buffer_length) {
- tclas_buffer += sizeof(uf_cfg_bcast_packet_filter_t) - sizeof(unsigned char*);
- if (copy_from_user(priv->filter_tclas_ies,
- tclas_buffer,
- tclas_buffer_length)) {
- unifi_error(priv, "UNIFI_CFG: Failed to get the TCLAS buffer\n");
- return -EFAULT;
- }
- }
- }
-
- if(priv->packet_filters.dhcp_filter)
- {
- /* Append the DHCP tclas IE */
- dhcp_tclas = (tclas_t*)(priv->filter_tclas_ies + tclas_buffer_length);
- memset(dhcp_tclas, 0, sizeof(tclas_t));
- dhcp_tclas->element_id = 14;
- dhcp_tclas->length = sizeof(tcpip_clsfr_t) + 1;
- dhcp_tclas->user_priority = 0;
- dhcp_tclas->tcp_ip_cls_fr.cls_fr_type = 1;
- dhcp_tclas->tcp_ip_cls_fr.version = 4;
- ((u8*)(&dhcp_tclas->tcp_ip_cls_fr.source_port))[0] = 0x00;
- ((u8*)(&dhcp_tclas->tcp_ip_cls_fr.source_port))[1] = 0x44;
- ((u8*)(&dhcp_tclas->tcp_ip_cls_fr.dest_port))[0] = 0x00;
- ((u8*)(&dhcp_tclas->tcp_ip_cls_fr.dest_port))[1] = 0x43;
- dhcp_tclas->tcp_ip_cls_fr.protocol = 0x11;
- dhcp_tclas->tcp_ip_cls_fr.cls_fr_mask = 0x58; //bits: 3,4,6
- }
-
- rc = sme_mgt_packet_filter_set(priv);
-
- return rc;
-}
-
-
-int unifi_cfg_wmm_qos_info(unifi_priv_t *priv, unsigned char *arg)
-{
- u8 wmm_qos_info;
- int rc = 0;
-
- if (get_user(wmm_qos_info, (u8*)(((unifi_cfg_command_t*)arg) + 1))) {
- unifi_error(priv, "UNIFI_CFG: Failed to get the argument\n");
- return -EFAULT;
- }
-
- /* Store the value in the connection info */
- priv->connection_config.wmmQosInfo = wmm_qos_info;
-
- return rc;
-}
-
-
-int unifi_cfg_wmm_addts(unifi_priv_t *priv, unsigned char *arg)
-{
- u32 addts_tid;
- u8 addts_ie_length;
- u8 *addts_ie;
- u8 *addts_params;
- CsrWifiSmeDataBlock tspec;
- CsrWifiSmeDataBlock tclas;
- int rc;
-
- addts_params = (u8*)(((unifi_cfg_command_t*)arg) + 1);
- if (get_user(addts_tid, (u32*)addts_params)) {
- unifi_error(priv, "unifi_cfg_wmm_addts: Failed to get the argument\n");
- return -EFAULT;
- }
-
- addts_params += sizeof(u32);
- if (get_user(addts_ie_length, (u8*)addts_params)) {
- unifi_error(priv, "unifi_cfg_wmm_addts: Failed to get the argument\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG4, "addts: tid = 0x%x ie_length = %d\n",
- addts_tid, addts_ie_length);
-
- addts_ie = kmalloc(addts_ie_length, GFP_KERNEL);
- if (addts_ie == NULL) {
- unifi_error(priv,
- "unifi_cfg_wmm_addts: Failed to malloc %d bytes for addts_ie buffer\n",
- addts_ie_length);
- return -ENOMEM;
- }
-
- addts_params += sizeof(u8);
- rc = copy_from_user(addts_ie, addts_params, addts_ie_length);
- if (rc) {
- unifi_error(priv, "unifi_cfg_wmm_addts: Failed to get the addts buffer\n");
- kfree(addts_ie);
- return -EFAULT;
- }
-
- tspec.data = addts_ie;
- tspec.length = addts_ie_length;
- tclas.data = NULL;
- tclas.length = 0;
-
- rc = sme_mgt_tspec(priv, CSR_WIFI_SME_LIST_ACTION_ADD, addts_tid,
- &tspec, &tclas);
-
- kfree(addts_ie);
- return rc;
-}
-
-
-int unifi_cfg_wmm_delts(unifi_priv_t *priv, unsigned char *arg)
-{
- u32 delts_tid;
- u8 *delts_params;
- CsrWifiSmeDataBlock tspec;
- CsrWifiSmeDataBlock tclas;
- int rc;
-
- delts_params = (u8*)(((unifi_cfg_command_t*)arg) + 1);
- if (get_user(delts_tid, (u32*)delts_params)) {
- unifi_error(priv, "unifi_cfg_wmm_delts: Failed to get the argument\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG4, "delts: tid = 0x%x\n", delts_tid);
-
- tspec.data = tclas.data = NULL;
- tspec.length = tclas.length = 0;
-
- rc = sme_mgt_tspec(priv, CSR_WIFI_SME_LIST_ACTION_REMOVE, delts_tid,
- &tspec, &tclas);
-
- return rc;
-}
-
-int unifi_cfg_strict_draft_n(unifi_priv_t *priv, unsigned char *arg)
-{
- u8 strict_draft_n;
- u8 *strict_draft_n_params;
- int rc;
-
- CsrWifiSmeStaConfig staConfig;
- CsrWifiSmeDeviceConfig deviceConfig;
-
- strict_draft_n_params = (u8*)(((unifi_cfg_command_t*)arg) + 1);
- if (get_user(strict_draft_n, (u8*)strict_draft_n_params)) {
- unifi_error(priv, "unifi_cfg_strict_draft_n: Failed to get the argument\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG4, "strict_draft_n: = %s\n", ((strict_draft_n) ? "yes":"no"));
-
- rc = sme_mgt_sme_config_get(priv, &staConfig, &deviceConfig);
-
- if (rc) {
- unifi_warning(priv, "unifi_cfg_strict_draft_n: Get unifi_SMEConfigValue failed.\n");
- return -EFAULT;
- }
-
- deviceConfig.enableStrictDraftN = strict_draft_n;
-
- rc = sme_mgt_sme_config_set(priv, &staConfig, &deviceConfig);
- if (rc) {
- unifi_warning(priv, "unifi_cfg_strict_draft_n: Set unifi_SMEConfigValue failed.\n");
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-
-int unifi_cfg_enable_okc(unifi_priv_t *priv, unsigned char *arg)
-{
- u8 enable_okc;
- u8 *enable_okc_params;
- int rc;
-
- CsrWifiSmeStaConfig staConfig;
- CsrWifiSmeDeviceConfig deviceConfig;
-
- enable_okc_params = (u8*)(((unifi_cfg_command_t*)arg) + 1);
- if (get_user(enable_okc, (u8*)enable_okc_params)) {
- unifi_error(priv, "unifi_cfg_enable_okc: Failed to get the argument\n");
- return -EFAULT;
- }
-
- unifi_trace(priv, UDBG4, "enable_okc: = %s\n", ((enable_okc) ? "yes":"no"));
-
- rc = sme_mgt_sme_config_get(priv, &staConfig, &deviceConfig);
- if (rc) {
- unifi_warning(priv, "unifi_cfg_enable_okc: Get unifi_SMEConfigValue failed.\n");
- return -EFAULT;
- }
-
- staConfig.enableOpportunisticKeyCaching = enable_okc;
-
- rc = sme_mgt_sme_config_set(priv, &staConfig, &deviceConfig);
- if (rc) {
- unifi_warning(priv, "unifi_cfg_enable_okc: Set unifi_SMEConfigValue failed.\n");
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-
-int unifi_cfg_get_info(unifi_priv_t *priv, unsigned char *arg)
-{
- unifi_cfg_get_t get_cmd;
- char inst_name[IFNAMSIZ];
- int rc;
-
- if (get_user(get_cmd, (unifi_cfg_get_t*)(((unifi_cfg_command_t*)arg) + 1))) {
- unifi_error(priv, "UNIFI_CFG: Failed to get the argument\n");
- return -EFAULT;
- }
-
- switch (get_cmd) {
- case UNIFI_CFG_GET_COEX:
- {
- CsrWifiSmeCoexInfo coexInfo;
- /* Get the coex info from the SME */
- rc = sme_mgt_coex_info_get(priv, &coexInfo);
- if (rc) {
- unifi_error(priv, "UNIFI_CFG: Get unifi_CoexInfoValue failed.\n");
- return rc;
- }
-
- /* Copy the info to the out buffer */
- if (copy_to_user((void*)arg,
- &coexInfo,
- sizeof(CsrWifiSmeCoexInfo))) {
- unifi_error(priv, "UNIFI_CFG: Failed to copy the coex info\n");
- return -EFAULT;
- }
- break;
- }
- case UNIFI_CFG_GET_POWER_MODE:
- {
- CsrWifiSmePowerConfig powerConfig;
- rc = sme_mgt_power_config_get(priv, &powerConfig);
- if (rc) {
- unifi_error(priv, "UNIFI_CFG: Get unifi_PowerConfigValue failed.\n");
- return rc;
- }
-
- /* Copy the info to the out buffer */
- if (copy_to_user((void*)arg,
- &powerConfig.powerSaveLevel,
- sizeof(CsrWifiSmePowerSaveLevel))) {
- unifi_error(priv, "UNIFI_CFG: Failed to copy the power save info\n");
- return -EFAULT;
- }
- break;
- }
- case UNIFI_CFG_GET_POWER_SUPPLY:
- {
- CsrWifiSmeHostConfig hostConfig;
- rc = sme_mgt_host_config_get(priv, &hostConfig);
- if (rc) {
- unifi_error(priv, "UNIFI_CFG: Get unifi_HostConfigValue failed.\n");
- return rc;
- }
-
- /* Copy the info to the out buffer */
- if (copy_to_user((void*)arg,
- &hostConfig.powerMode,
- sizeof(CsrWifiSmeHostPowerMode))) {
- unifi_error(priv, "UNIFI_CFG: Failed to copy the host power mode\n");
- return -EFAULT;
- }
- break;
- }
- case UNIFI_CFG_GET_VERSIONS:
- break;
- case UNIFI_CFG_GET_INSTANCE:
- {
- u16 InterfaceId=0;
- uf_net_get_name(priv->netdev[InterfaceId], &inst_name[0], sizeof(inst_name));
-
- /* Copy the info to the out buffer */
- if (copy_to_user((void*)arg,
- &inst_name[0],
- sizeof(inst_name))) {
- unifi_error(priv, "UNIFI_CFG: Failed to copy the instance name\n");
- return -EFAULT;
- }
- }
- break;
-
- case UNIFI_CFG_GET_AP_CONFIG:
- {
-#ifdef CSR_SUPPORT_WEXT_AP
- uf_cfg_ap_config_t cfg_ap_config;
-
- memset(&cfg_ap_config, 0, sizeof(cfg_ap_config));
- cfg_ap_config.channel = priv->ap_config.channel;
- cfg_ap_config.beaconInterval = priv->ap_mac_config.beaconInterval;
- cfg_ap_config.wmmEnabled = priv->ap_mac_config.wmmEnabled;
- cfg_ap_config.dtimPeriod = priv->ap_mac_config.dtimPeriod;
- cfg_ap_config.phySupportedBitmap = priv->ap_mac_config.phySupportedBitmap;
- if (copy_to_user((void*)arg,
- &cfg_ap_config,
- sizeof(uf_cfg_ap_config_t))) {
- unifi_error(priv, "UNIFI_CFG: Failed to copy the AP configuration\n");
- return -EFAULT;
- }
-#else
- return -EPERM;
-#endif
- }
- break;
-
-
- default:
- unifi_error(priv, "unifi_cfg_get_info: Unknown value.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-#ifdef CSR_SUPPORT_WEXT_AP
-int
- uf_configure_supported_rates(u8 * supportedRates, u8 phySupportedBitmap)
-{
- int i=0;
- u8 b=FALSE, g = FALSE, n = FALSE;
- b = phySupportedBitmap & CSR_WIFI_SME_AP_PHY_SUPPORT_B;
- n = phySupportedBitmap & CSR_WIFI_SME_AP_PHY_SUPPORT_N;
- g = phySupportedBitmap & CSR_WIFI_SME_AP_PHY_SUPPORT_G;
- if(b || g) {
- supportedRates[i++]=0x82;
- supportedRates[i++]=0x84;
- supportedRates[i++]=0x8b;
- supportedRates[i++]=0x96;
- } else if(n) {
- /* For some strange reasons WiFi stack needs both b and g rates*/
- supportedRates[i++]=0x02;
- supportedRates[i++]=0x04;
- supportedRates[i++]=0x0b;
- supportedRates[i++]=0x16;
- supportedRates[i++]=0x0c;
- supportedRates[i++]=0x12;
- supportedRates[i++]=0x18;
- supportedRates[i++]=0x24;
- supportedRates[i++]=0x30;
- supportedRates[i++]=0x48;
- supportedRates[i++]=0x60;
- supportedRates[i++]=0x6c;
- }
- if(g) {
- if(!b) {
- supportedRates[i++]=0x8c;
- supportedRates[i++]=0x98;
- supportedRates[i++]=0xb0;
- } else {
- supportedRates[i++]=0x0c;
- supportedRates[i++]=0x18;
- supportedRates[i++]=0x30;
- }
- supportedRates[i++]=0x48;
- supportedRates[i++]=0x12;
- supportedRates[i++]=0x24;
- supportedRates[i++]=0x60;
- supportedRates[i++]=0x6c;
- }
- return i;
-}
-int unifi_cfg_set_ap_config(unifi_priv_t * priv,unsigned char* arg)
-{
- uf_cfg_ap_config_t cfg_ap_config;
- char *buffer;
-
- buffer = ((unsigned char*)arg) + sizeof(unifi_cfg_command_t) + sizeof(unsigned int);
- if (copy_from_user(&cfg_ap_config, (void*)buffer,
- sizeof(uf_cfg_ap_config_t))) {
- unifi_error(priv, "UNIFI_CFG: Failed to get the ap config struct\n");
- return -EFAULT;
- }
- priv->ap_config.channel = cfg_ap_config.channel;
- priv->ap_mac_config.dtimPeriod = cfg_ap_config.dtimPeriod;
- priv->ap_mac_config.beaconInterval = cfg_ap_config.beaconInterval;
- priv->group_sec_config.apGroupkeyTimeout = cfg_ap_config.groupkeyTimeout;
- priv->group_sec_config.apStrictGtkRekey = cfg_ap_config.strictGtkRekeyEnabled;
- priv->group_sec_config.apGmkTimeout = cfg_ap_config.gmkTimeout;
- priv->group_sec_config.apResponseTimeout = cfg_ap_config.responseTimeout;
- priv->group_sec_config.apRetransLimit = cfg_ap_config.retransLimit;
-
- priv->ap_mac_config.shortSlotTimeEnabled = cfg_ap_config.shortSlotTimeEnabled;
- priv->ap_mac_config.ctsProtectionType=cfg_ap_config.ctsProtectionType;
-
- priv->ap_mac_config.wmmEnabled = cfg_ap_config.wmmEnabled;
-
- priv->ap_mac_config.apHtParams.rxStbc=cfg_ap_config.rxStbc;
- priv->ap_mac_config.apHtParams.rifsModeAllowed=cfg_ap_config.rifsModeAllowed;
-
- priv->ap_mac_config.phySupportedBitmap = cfg_ap_config.phySupportedBitmap;
- priv->ap_mac_config.maxListenInterval=cfg_ap_config.maxListenInterval;
-
- priv->ap_mac_config.supportedRatesCount= uf_configure_supported_rates(priv->ap_mac_config.supportedRates,priv->ap_mac_config.phySupportedBitmap);
-
- return 0;
-}
-
-#endif
-#ifdef CSR_SUPPORT_WEXT
-
- void
-uf_sme_config_wq(struct work_struct *work)
-{
- CsrWifiSmeStaConfig staConfig;
- CsrWifiSmeDeviceConfig deviceConfig;
- unifi_priv_t *priv = container_of(work, unifi_priv_t, sme_config_task);
-
- /* Register to receive indications from the SME */
- CsrWifiSmeEventMaskSetReqSend(0,
- CSR_WIFI_SME_INDICATIONS_WIFIOFF | CSR_WIFI_SME_INDICATIONS_CONNECTIONQUALITY |
- CSR_WIFI_SME_INDICATIONS_MEDIASTATUS | CSR_WIFI_SME_INDICATIONS_MICFAILURE);
-
- if (sme_mgt_sme_config_get(priv, &staConfig, &deviceConfig)) {
- unifi_warning(priv, "uf_sme_config_wq: Get unifi_SMEConfigValue failed.\n");
- return;
- }
-
- if (priv->if_index == CSR_INDEX_5G) {
- staConfig.ifIndex = CSR_WIFI_SME_RADIO_IF_GHZ_5_0;
- } else {
- staConfig.ifIndex = CSR_WIFI_SME_RADIO_IF_GHZ_2_4;
- }
-
- deviceConfig.trustLevel = (CsrWifiSme80211dTrustLevel)tl_80211d;
- if (sme_mgt_sme_config_set(priv, &staConfig, &deviceConfig)) {
- unifi_warning(priv,
- "SME config for 802.11d Trust Level and Radio Band failed.\n");
- return;
- }
-
-} /* uf_sme_config_wq() */
-
-#endif /* CSR_SUPPORT_WEXT */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_ta_ind_wq
- *
- * Deferred work queue function to send Traffic Analysis protocols
- * indications to the SME.
- * These are done in a deferred work queue for two reasons:
- * - the CsrWifiRouterCtrl...Send() functions are not safe for atomic context
- * - we want to load the main driver data path as lightly as possible
- *
- * The TA classifications already come from a workqueue.
- *
- * Arguments:
- * work Pointer to work queue item.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
- void
-uf_ta_ind_wq(struct work_struct *work)
-{
- struct ta_ind *ind = container_of(work, struct ta_ind, task);
- unifi_priv_t *priv = container_of(ind, unifi_priv_t, ta_ind_work);
- u16 interfaceTag = 0;
-
-
- CsrWifiRouterCtrlTrafficProtocolIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,
- interfaceTag,
- ind->packet_type,
- ind->direction,
- ind->src_addr);
- ind->in_use = 0;
-
-} /* uf_ta_ind_wq() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_ta_sample_ind_wq
- *
- * Deferred work queue function to send Traffic Analysis sample
- * indications to the SME.
- * These are done in a deferred work queue for two reasons:
- * - the CsrWifiRouterCtrl...Send() functions are not safe for atomic context
- * - we want to load the main driver data path as lightly as possible
- *
- * The TA classifications already come from a workqueue.
- *
- * Arguments:
- * work Pointer to work queue item.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
- void
-uf_ta_sample_ind_wq(struct work_struct *work)
-{
- struct ta_sample_ind *ind = container_of(work, struct ta_sample_ind, task);
- unifi_priv_t *priv = container_of(ind, unifi_priv_t, ta_sample_ind_work);
- u16 interfaceTag = 0;
-
- unifi_trace(priv, UDBG5, "rxtcp %d txtcp %d rxudp %d txudp %d prio %d\n",
- priv->rxTcpThroughput,
- priv->txTcpThroughput,
- priv->rxUdpThroughput,
- priv->txUdpThroughput,
- priv->bh_thread.prio);
-
- if(priv->rxTcpThroughput > 1000)
- {
- if (bh_priority == -1 && priv->bh_thread.prio != 1)
- {
- struct sched_param param;
- priv->bh_thread.prio = 1;
- unifi_trace(priv, UDBG1, "%s new thread (RT) priority = %d\n",
- priv->bh_thread.name, priv->bh_thread.prio);
- param.sched_priority = priv->bh_thread.prio;
- sched_setscheduler(priv->bh_thread.thread_task, SCHED_FIFO, &param);
- }
- } else
- {
- if (bh_priority == -1 && priv->bh_thread.prio != DEFAULT_PRIO)
- {
- struct sched_param param;
- param.sched_priority = 0;
- sched_setscheduler(priv->bh_thread.thread_task, SCHED_NORMAL, &param);
- priv->bh_thread.prio = DEFAULT_PRIO;
- unifi_trace(priv, UDBG1, "%s new thread priority = %d\n",
- priv->bh_thread.name, priv->bh_thread.prio);
- set_user_nice(priv->bh_thread.thread_task, PRIO_TO_NICE(priv->bh_thread.prio));
- }
- }
-
- CsrWifiRouterCtrlTrafficSampleIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0, interfaceTag, ind->stats);
-
- ind->in_use = 0;
-
-} /* uf_ta_sample_ind_wq() */
-
-
-/*
- * ---------------------------------------------------------------------------
- * uf_send_m4_ready_wq
- *
- * Deferred work queue function to send M4 ReadyToSend inds to the SME.
- * These are done in a deferred work queue for two reasons:
- * - the CsrWifiRouterCtrl...Send() functions are not safe for atomic context
- * - we want to load the main driver data path as lightly as possible
- *
- * Arguments:
- * work Pointer to work queue item.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void
-uf_send_m4_ready_wq(struct work_struct *work)
-{
- netInterface_priv_t *InterfacePriv = container_of(work, netInterface_priv_t, send_m4_ready_task);
- u16 iface = InterfacePriv->InterfaceTag;
- unifi_priv_t *priv = InterfacePriv->privPtr;
- CSR_MA_PACKET_REQUEST *req = &InterfacePriv->m4_signal.u.MaPacketRequest;
- CsrWifiMacAddress peer;
- unsigned long flags;
-
- /* The peer address was stored in the signal */
- spin_lock_irqsave(&priv->m4_lock, flags);
- memcpy(peer.a, req->Ra.x, sizeof(peer.a));
- spin_unlock_irqrestore(&priv->m4_lock, flags);
-
- /* Send a signal to SME */
- CsrWifiRouterCtrlM4ReadyToSendIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0, iface, peer);
-
- unifi_trace(priv, UDBG1, "M4ReadyToSendInd sent for peer %pMF\n",
- peer.a);
-
-} /* uf_send_m4_ready_wq() */
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
-/*
- * ---------------------------------------------------------------------------
- * uf_send_pkt_to_encrypt
- *
- * Deferred work queue function to send the WAPI data pkts to SME when unicast KeyId = 1
- * These are done in a deferred work queue for two reasons:
- * - the CsrWifiRouterCtrl...Send() functions are not safe for atomic context
- * - we want to load the main driver data path as lightly as possible
- *
- * Arguments:
- * work Pointer to work queue item.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-void uf_send_pkt_to_encrypt(struct work_struct *work)
-{
- netInterface_priv_t *interfacePriv = container_of(work, netInterface_priv_t, send_pkt_to_encrypt);
- u16 interfaceTag = interfacePriv->InterfaceTag;
- unifi_priv_t *priv = interfacePriv->privPtr;
-
- u32 pktBulkDataLength;
- u8 *pktBulkData;
- unsigned long flags;
-
- if (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_STA) {
-
- pktBulkDataLength = interfacePriv->wapi_unicast_bulk_data.data_length;
-
- if (pktBulkDataLength > 0) {
- pktBulkData = kmalloc(pktBulkDataLength, GFP_KERNEL);
- } else {
- unifi_error(priv, "uf_send_pkt_to_encrypt() : invalid buffer\n");
- return;
- }
-
- spin_lock_irqsave(&priv->wapi_lock, flags);
- /* Copy over the MA PKT REQ bulk data */
- memcpy(pktBulkData, (u8*)interfacePriv->wapi_unicast_bulk_data.os_data_ptr, pktBulkDataLength);
- /* Free any bulk data buffers allocated for the WAPI Data pkt */
- unifi_net_data_free(priv, &interfacePriv->wapi_unicast_bulk_data);
- interfacePriv->wapi_unicast_bulk_data.net_buf_length = 0;
- interfacePriv->wapi_unicast_bulk_data.data_length = 0;
- interfacePriv->wapi_unicast_bulk_data.os_data_ptr = interfacePriv->wapi_unicast_bulk_data.os_net_buf_ptr = NULL;
- spin_unlock_irqrestore(&priv->wapi_lock, flags);
-
- CsrWifiRouterCtrlWapiUnicastTxEncryptIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0, interfaceTag, pktBulkDataLength, pktBulkData);
- unifi_trace(priv, UDBG1, "WapiUnicastTxEncryptInd sent to SME\n");
-
- kfree(pktBulkData); /* Would have been copied over by the SME Handler */
-
- } else {
- unifi_warning(priv, "uf_send_pkt_to_encrypt() is NOT applicable for interface mode - %d\n",interfacePriv->interfaceMode);
- }
-}/* uf_send_pkt_to_encrypt() */
-#endif
diff --git a/drivers/staging/csr/unifi_sme.h b/drivers/staging/csr/unifi_sme.h
deleted file mode 100644
index b689cfe..0000000
--- a/drivers/staging/csr/unifi_sme.h
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * ***************************************************************************
- * FILE: unifi_sme.h
- *
- * PURPOSE: SME related definitions.
- *
- * Copyright (C) 2007-2011 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ***************************************************************************
- */
-#ifndef __LINUX_UNIFI_SME_H__
-#define __LINUX_UNIFI_SME_H__ 1
-
-#include <linux/kernel.h>
-
-#ifdef CSR_SME_USERSPACE
-#include "sme_userspace.h"
-#endif
-
-#include "csr_wifi_sme_lib.h"
-
-typedef int unifi_data_port_action;
-
-typedef struct unifi_port_cfg
-{
- /* TRUE if this port entry is allocated */
- u8 in_use;
- CsrWifiRouterCtrlPortAction port_action;
- CsrWifiMacAddress mac_address;
-} unifi_port_cfg_t;
-
-#define UNIFI_MAX_CONNECTIONS 8
-#define UNIFI_MAX_RETRY_LIMIT 5
-#define UF_DATA_PORT_NOT_OVERIDE 0
-#define UF_DATA_PORT_OVERIDE 1
-
-typedef struct unifi_port_config
-{
- int entries_in_use;
- int overide_action;
- unifi_port_cfg_t port_cfg[UNIFI_MAX_CONNECTIONS];
-} unifi_port_config_t;
-
-
-enum sme_request_status {
- SME_REQUEST_EMPTY,
- SME_REQUEST_PENDING,
- SME_REQUEST_RECEIVED,
- SME_REQUEST_TIMEDOUT,
- SME_REQUEST_CANCELLED,
-};
-
-/* Structure to hold a UDI logged signal */
-typedef struct {
-
- /* The current status of the request */
- enum sme_request_status request_status;
-
- /* The status the SME has passed to us */
- CsrResult reply_status;
-
- /* SME's reply to a get request */
- CsrWifiSmeVersions versions;
- CsrWifiSmePowerConfig powerConfig;
- CsrWifiSmeHostConfig hostConfig;
- CsrWifiSmeStaConfig staConfig;
- CsrWifiSmeDeviceConfig deviceConfig;
- CsrWifiSmeCoexInfo coexInfo;
- CsrWifiSmeCoexConfig coexConfig;
- CsrWifiSmeMibConfig mibConfig;
- CsrWifiSmeConnectionInfo connectionInfo;
- CsrWifiSmeConnectionConfig connectionConfig;
- CsrWifiSmeConnectionStats connectionStats;
-
-
- /* SME's reply to a scan request */
- u16 reply_scan_results_count;
- CsrWifiSmeScanResult* reply_scan_results;
-
-} sme_reply_t;
-
-
-typedef struct {
- u16 appHandle;
- CsrWifiRouterEncapsulation encapsulation;
- u16 protocol;
- u8 oui[3];
- u8 in_use;
-} sme_ma_unidata_ind_filter_t;
-
-
-CsrWifiRouterCtrlPortAction uf_sme_port_state(unifi_priv_t *priv,
- unsigned char *address,
- int queue,
- u16 interfaceTag);
-unifi_port_cfg_t *uf_sme_port_config_handle(unifi_priv_t *priv,
- unsigned char *address,
- int queue,
- u16 interfaceTag);
-
-
-
-/* Callback for event logging to SME clients */
-void sme_log_event(ul_client_t *client, const u8 *signal, int signal_len,
- const bulk_data_param_t *bulkdata, int dir);
-
-/* The workqueue task to the set the multicast addresses list */
-void uf_multicast_list_wq(struct work_struct *work);
-
-/* The workqueue task to execute the TA module */
-void uf_ta_wq(struct work_struct *work);
-
-
-/*
- * SME blocking helper functions
- */
-#ifdef UNIFI_DEBUG
-# define sme_complete_request(priv, status) uf_sme_complete_request(priv, status, __func__)
-#else
-# define sme_complete_request(priv, status) uf_sme_complete_request(priv, status, NULL)
-#endif
-
-void uf_sme_complete_request(unifi_priv_t *priv, CsrResult reply_status, const char *func);
-void uf_sme_cancel_request(unifi_priv_t *priv, CsrResult reply_status);
-
-
-/*
- * Blocking functions using the SME SYS API.
- */
-int sme_sys_suspend(unifi_priv_t *priv);
-int sme_sys_resume(unifi_priv_t *priv);
-
-
-/*
- * Traffic Analysis workqueue jobs
- */
-void uf_ta_ind_wq(struct work_struct *work);
-void uf_ta_sample_ind_wq(struct work_struct *work);
-
-/*
- * SME config workqueue job
- */
-void uf_sme_config_wq(struct work_struct *work);
-
-/*
- * To send M4 read to send IND
- */
-void uf_send_m4_ready_wq(struct work_struct *work);
-
-#if (defined(CSR_WIFI_SECURITY_WAPI_ENABLE) && defined(CSR_WIFI_SECURITY_WAPI_SW_ENCRYPTION))
-/*
- * To send data pkt to Sme for encryption
- */
-void uf_send_pkt_to_encrypt(struct work_struct *work);
-#endif
-
-int sme_mgt_power_config_set(unifi_priv_t *priv, CsrWifiSmePowerConfig *powerConfig);
-int sme_mgt_power_config_get(unifi_priv_t *priv, CsrWifiSmePowerConfig *powerConfig);
-int sme_mgt_host_config_set(unifi_priv_t *priv, CsrWifiSmeHostConfig *hostConfig);
-int sme_mgt_host_config_get(unifi_priv_t *priv, CsrWifiSmeHostConfig *hostConfig);
-int sme_mgt_sme_config_set(unifi_priv_t *priv, CsrWifiSmeStaConfig *staConfig, CsrWifiSmeDeviceConfig *deviceConfig);
-int sme_mgt_sme_config_get(unifi_priv_t *priv, CsrWifiSmeStaConfig *staConfig, CsrWifiSmeDeviceConfig *deviceConfig);
-int sme_mgt_coex_info_get(unifi_priv_t *priv, CsrWifiSmeCoexInfo *coexInfo);
-int sme_mgt_packet_filter_set(unifi_priv_t *priv);
-int sme_mgt_tspec(unifi_priv_t *priv, CsrWifiSmeListAction action,
- u32 tid, CsrWifiSmeDataBlock *tspec, CsrWifiSmeDataBlock *tclas);
-
-#ifdef CSR_SUPPORT_WEXT
-/*
- * Blocking functions using the SME MGT API.
- */
-int sme_mgt_wifi_on(unifi_priv_t *priv);
-int sme_mgt_wifi_off(unifi_priv_t *priv);
-/*int sme_mgt_set_value_async(unifi_priv_t *priv, unifi_AppValue *app_value);
-int sme_mgt_get_value_async(unifi_priv_t *priv, unifi_AppValue *app_value);
-int sme_mgt_get_value(unifi_priv_t *priv, unifi_AppValue *app_value);
-int sme_mgt_set_value(unifi_priv_t *priv, unifi_AppValue *app_value);
-*/
-int sme_mgt_coex_config_set(unifi_priv_t *priv, CsrWifiSmeCoexConfig *coexConfig);
-int sme_mgt_coex_config_get(unifi_priv_t *priv, CsrWifiSmeCoexConfig *coexConfig);
-int sme_mgt_mib_config_set(unifi_priv_t *priv, CsrWifiSmeMibConfig *mibConfig);
-int sme_mgt_mib_config_get(unifi_priv_t *priv, CsrWifiSmeMibConfig *mibConfig);
-
-int sme_mgt_connection_info_set(unifi_priv_t *priv, CsrWifiSmeConnectionInfo *connectionInfo);
-int sme_mgt_connection_info_get(unifi_priv_t *priv, CsrWifiSmeConnectionInfo *connectionInfo);
-int sme_mgt_connection_config_set(unifi_priv_t *priv, CsrWifiSmeConnectionConfig *connectionConfig);
-int sme_mgt_connection_config_get(unifi_priv_t *priv, CsrWifiSmeConnectionConfig *connectionConfig);
-int sme_mgt_connection_stats_get(unifi_priv_t *priv, CsrWifiSmeConnectionStats *connectionStats);
-
-int sme_mgt_versions_get(unifi_priv_t *priv, CsrWifiSmeVersions *versions);
-
-
-int sme_mgt_scan_full(unifi_priv_t *priv, CsrWifiSsid *specific_ssid,
- int num_channels, unsigned char *channel_list);
-int sme_mgt_scan_results_get_async(unifi_priv_t *priv,
- struct iw_request_info *info,
- char *scan_results,
- long scan_results_len);
-int sme_mgt_disconnect(unifi_priv_t *priv);
-int sme_mgt_connect(unifi_priv_t *priv);
-int sme_mgt_key(unifi_priv_t *priv, CsrWifiSmeKey *sme_key,
- CsrWifiSmeListAction action);
-int sme_mgt_pmkid(unifi_priv_t *priv, CsrWifiSmeListAction action,
- CsrWifiSmePmkidList *pmkid_list);
-int sme_mgt_mib_get(unifi_priv_t *priv,
- unsigned char *varbind, int *length);
-int sme_mgt_mib_set(unifi_priv_t *priv,
- unsigned char *varbind, int length);
-#ifdef CSR_SUPPORT_WEXT_AP
-int sme_ap_start(unifi_priv_t *priv,u16 interface_tag,CsrWifiSmeApConfig_t *ap_config);
-int sme_ap_stop(unifi_priv_t *priv,u16 interface_tag);
-int sme_ap_config(unifi_priv_t *priv,CsrWifiSmeApMacConfig *ap_mac_config, CsrWifiNmeApConfig *group_security_config);
-int uf_configure_supported_rates(u8 * supportedRates, u8 phySupportedBitmap);
-#endif
-int unifi_translate_scan(struct net_device *dev,
- struct iw_request_info *info,
- char *current_ev, char *end_buf,
- CsrWifiSmeScanResult *scan_data,
- int scan_index);
-
-#endif /* CSR_SUPPORT_WEXT */
-
-int unifi_cfg_power(unifi_priv_t *priv, unsigned char *arg);
-int unifi_cfg_power_save(unifi_priv_t *priv, unsigned char *arg);
-int unifi_cfg_power_supply(unifi_priv_t *priv, unsigned char *arg);
-int unifi_cfg_packet_filters(unifi_priv_t *priv, unsigned char *arg);
-int unifi_cfg_wmm_qos_info(unifi_priv_t *priv, unsigned char *arg);
-int unifi_cfg_wmm_addts(unifi_priv_t *priv, unsigned char *arg);
-int unifi_cfg_wmm_delts(unifi_priv_t *priv, unsigned char *arg);
-int unifi_cfg_get_info(unifi_priv_t *priv, unsigned char *arg);
-int unifi_cfg_strict_draft_n(unifi_priv_t *priv, unsigned char *arg);
-int unifi_cfg_enable_okc(unifi_priv_t *priv, unsigned char *arg);
-#ifdef CSR_SUPPORT_WEXT_AP
-int unifi_cfg_set_ap_config(unifi_priv_t * priv,unsigned char* arg);
-#endif
-
-
-
-int convert_sme_error(CsrResult error);
-
-
-#endif /* __LINUX_UNIFI_SME_H__ */
diff --git a/drivers/staging/csr/unifi_wext.h b/drivers/staging/csr/unifi_wext.h
deleted file mode 100644
index beba089..0000000
--- a/drivers/staging/csr/unifi_wext.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- *****************************************************************************
- *
- * FILE : unifi_wext.h
- *
- * PURPOSE : Private header file for unifi driver support to wireless extensions.
- *
- * Copyright (C) 2005-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
-*****************************************************************************
- */
-#ifndef __LINUX_UNIFI_WEXT_H__
-#define __LINUX_UNIFI_WEXT_H__ 1
-
-#include <linux/kernel.h>
-#include <net/iw_handler.h>
-#include "csr_wifi_sme_prim.h"
-
-/*
- * wext.c
- */
-/* A few details needed for WEP (Wireless Equivalent Privacy) */
-#define UNIFI_MAX_KEY_SIZE 16
-#define NUM_WEPKEYS 4
-#define SMALL_KEY_SIZE 5
-#define LARGE_KEY_SIZE 13
-typedef struct wep_key_t {
- int len;
- unsigned char key[UNIFI_MAX_KEY_SIZE]; /* 40-bit and 104-bit keys */
-} wep_key_t;
-
-#define UNIFI_SCAN_ACTIVE 0
-#define UNIFI_SCAN_PASSIVE 1
-#define UNIFI_MAX_SSID_LEN 32
-
-#define MAX_WPA_IE_LEN 64
-#define MAX_RSN_IE_LEN 255
-
-/*
- * Function to register in the netdev to report wireless stats.
- */
-struct iw_statistics *unifi_get_wireless_stats(struct net_device *dev);
-
-void uf_sme_wext_set_defaults(unifi_priv_t *priv);
-
-
-/*
- * wext_events.c
- */
-/* Functions to generate Wireless Extension events */
-void wext_send_scan_results_event(unifi_priv_t *priv);
-void wext_send_assoc_event(unifi_priv_t *priv, unsigned char *bssid,
- unsigned char *req_ie, int req_ie_len,
- unsigned char *resp_ie, int resp_ie_len,
- unsigned char *scan_ie, unsigned int scan_ie_len);
-void wext_send_disassoc_event(unifi_priv_t *priv);
-void wext_send_michaelmicfailure_event(unifi_priv_t *priv,
- u16 count, CsrWifiMacAddress address,
- CsrWifiSmeKeyType keyType, u16 interfaceTag);
-void wext_send_pmkid_candidate_event(unifi_priv_t *priv, CsrWifiMacAddress bssid, u8 preauth_allowed, u16 interfaceTag);
-void wext_send_started_event(unifi_priv_t *priv);
-
-
-static inline int
-uf_iwe_stream_add_point(struct iw_request_info *info, char *start, char *stop,
- struct iw_event *piwe, char *extra)
-{
- char *new_start;
-
- new_start = iwe_stream_add_point(info, start, stop, piwe, extra);
- if (unlikely(new_start == start))
- return -E2BIG;
-
- return (new_start - start);
-}
-
-
-static inline int
-uf_iwe_stream_add_event(struct iw_request_info *info, char *start, char *stop,
- struct iw_event *piwe, int len)
-{
- char *new_start;
-
- new_start = iwe_stream_add_event(info, start, stop, piwe, len);
- if (unlikely(new_start == start))
- return -E2BIG;
-
- return (new_start - start);
-}
-
-static inline int
-uf_iwe_stream_add_value(struct iw_request_info *info, char *stream, char *start,
- char *stop, struct iw_event *piwe, int len)
-{
- char *new_start;
-
- new_start = iwe_stream_add_value(info, stream, start, stop, piwe, len);
- if (unlikely(new_start == start))
- return -E2BIG;
-
- return (new_start - start);
-}
-
-
-#endif /* __LINUX_UNIFI_WEXT_H__ */
diff --git a/drivers/staging/csr/unifiio.h b/drivers/staging/csr/unifiio.h
deleted file mode 100644
index b9de0cb..0000000
--- a/drivers/staging/csr/unifiio.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- *
- * FILE: unifiio.h
- *
- * Public definitions for the UniFi linux driver.
- * This is mostly ioctl command values and structs.
- *
- * Include <sys/ioctl.h> or similar before this file
- *
- * Copyright (C) 2005-2009 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#ifndef __UNIFIIO_H__
-#define __UNIFIIO_H__
-
-#include <linux/types.h>
-
-#define UNIFI_GET_UDI_ENABLE _IOR('u', 1, int)
-#define UNIFI_SET_UDI_ENABLE _IOW('u', 2, int)
-/* Values for UDI_ENABLE */
-#define UDI_ENABLE_DATA 0x1
-#define UDI_ENABLE_CONTROL 0x2
-
-/* MIB set/get. Arg is a pointer to a varbind */
-#define UNIFI_GET_MIB _IOWR('u', 3, unsigned char *)
-#define UNIFI_SET_MIB _IOW ('u', 4, unsigned char *)
-#define MAX_VARBIND_LENGTH 127
-
-/* Private IOCTLs */
-#define SIOCIWS80211POWERSAVEPRIV SIOCIWFIRSTPRIV
-#define SIOCIWG80211POWERSAVEPRIV SIOCIWFIRSTPRIV + 1
-#define SIOCIWS80211RELOADDEFAULTSPRIV SIOCIWFIRSTPRIV + 2
-#define SIOCIWSCONFWAPIPRIV SIOCIWFIRSTPRIV + 4
-#define SIOCIWSWAPIKEYPRIV SIOCIWFIRSTPRIV + 6
-#define SIOCIWSSMEDEBUGPRIV SIOCIWFIRSTPRIV + 8
-#define SIOCIWSAPCFGPRIV SIOCIWFIRSTPRIV + 10
-#define SIOCIWSAPSTARTPRIV SIOCIWFIRSTPRIV + 12
-#define SIOCIWSAPSTOPPRIV SIOCIWFIRSTPRIV + 14
-#define SIOCIWSFWRELOADPRIV SIOCIWFIRSTPRIV + 16
-#define SIOCIWSSTACKSTART SIOCIWFIRSTPRIV + 18
-#define SIOCIWSSTACKSTOP SIOCIWFIRSTPRIV + 20
-
-
-
-#define IWPRIV_POWER_SAVE_MAX_STRING 32
-#define IWPRIV_SME_DEBUG_MAX_STRING 32
-#define IWPRIV_SME_MAX_STRING 120
-
-
-/* Private configuration commands */
-#define UNIFI_CFG _IOWR('u', 5, unsigned char *)
-/*
- * <------------------ Read/Write Buffer -------------------->
- * _____________________________________________________________
- * | Cmd | Arg | ... Buffer (opt) ... |
- * -------------------------------------------------------------
- * <-- uint --><-- uint --><----- unsigned char buffer ------>
- *
- * Cmd: A unifi_cfg_command_t command.
- * Arg: Out:Length if Cmd==UNIFI_CFG_GET
- * In:PowerOnOff if Cmd==UNIFI_CFG_POWER
- * In:PowerMode if Cmd==UNIFI_CFG_POWERSAVE
- * In:Length if Cmd==UNIFI_CFG_FILTER
- * In:WMM Qos Info if Cmd==UNIFI_CFG_WMM_QOS_INFO
- * Buffer: Out:Data if Cmd==UNIFI_CFG_GET
- * NULL if Cmd==UNIFI_CFG_POWER
- * NULL if Cmd==UNIFI_CFG_POWERSAVE
- * In:Filters if Cmd==UNIFI_CFG_FILTER
- *
- * where Filters is a uf_cfg_bcast_packet_filter_t structure
- * followed by 0 - n tclas_t structures. The length of the tclas_t
- * structures is obtained by uf_cfg_bcast_packet_filter_t::tclas_ies_length.
- */
-
-
-#define UNIFI_PUTEST _IOWR('u', 6, unsigned char *)
-/*
- * <------------------ Read/Write Buffer -------------------->
- * _____________________________________________________________
- * | Cmd | Arg | ... Buffer (opt) ... |
- * -------------------------------------------------------------
- * <-- uint --><-- uint --><----- unsigned char buffer ------>
- *
- * Cmd: A unifi_putest_command_t command.
- * Arg: N/A if Cmd==UNIFI_PUTEST_START
- * N/A if Cmd==UNIFI_PUTEST_STOP
- * In:int (Clock Speed) if Cmd==UNIFI_PUTEST_SET_SDIO_CLOCK
- * In/Out:sizeof(unifi_putest_cmd52) if Cmd==UNIFI_PUTEST_CMD52_READ
- * In:sizeof(unifi_putest_cmd52) if Cmd==UNIFI_PUTEST_CMD52_WRITE
- * In:uint (f/w file name length) if Cmd==UNIFI_PUTEST_DL_FW
- * Buffer: NULL if Cmd==UNIFI_PUTEST_START
- * NULL if Cmd==UNIFI_PUTEST_STOP
- * NULL if Cmd==UNIFI_PUTEST_SET_SDIO_CLOCK
- * In/Out:unifi_putest_cmd52 if Cmd==UNIFI_PUTEST_CMD52_READ
- * In:unifi_putest_cmd52 if Cmd==UNIFI_PUTEST_CMD52_WRITE
- * In:f/w file name if Cmd==UNIFI_PUTEST_DL_FW
- */
-
-#define UNIFI_BUILD_TYPE _IOWR('u', 7, unsigned char)
-#define UNIFI_BUILD_NME 1
-#define UNIFI_BUILD_WEXT 2
-#define UNIFI_BUILD_AP 3
-
-/* debugging */
-#define UNIFI_KICK _IO ('u', 0x10)
-#define UNIFI_SET_DEBUG _IO ('u', 0x11)
-#define UNIFI_SET_TRACE _IO ('u', 0x12)
-
-#define UNIFI_GET_INIT_STATUS _IOR ('u', 0x15, int)
-#define UNIFI_SET_UDI_LOG_MASK _IOR('u', 0x18, unifiio_filter_t)
-#define UNIFI_SET_UDI_SNAP_MASK _IOW('u', 0x1a, unifiio_snap_filter_t)
-#define UNIFI_SET_AMP_ENABLE _IOWR('u', 0x1b, int)
-
-#define UNIFI_INIT_HW _IOR ('u', 0x13, unsigned char)
-#define UNIFI_INIT_NETDEV _IOW ('u', 0x14, unsigned char[6])
-#define UNIFI_SME_PRESENT _IOW ('u', 0x19, int)
-
-#define UNIFI_CFG_PERIOD_TRAFFIC _IOW ('u', 0x21, unsigned char *)
-#define UNIFI_CFG_UAPSD_TRAFFIC _IOW ('u', 0x22, unsigned char)
-
-#define UNIFI_COREDUMP_GET_REG _IOWR('u', 0x23, unifiio_coredump_req_t)
-
-
-/*
- * Following reset, f/w may only be downloaded using CMD52.
- * This is slow, so there is a facility to download a secondary
- * loader first which supports CMD53.
- * If loader_len is > 0, then loader_data is assumed to point to
- * a suitable secondary loader that can be used to download the
- * main image.
- *
- * The driver will run the host protocol initialisation sequence
- * after downloading the image.
- *
- * If both lengths are zero, then the f/w is assumed to have been
- * booted from Flash and the host protocol initialisation sequence
- * is run.
- */
-typedef struct {
-
- /* Number of bytes in the image */
- int img_len;
-
- /* Pointer to image data. */
- unsigned char *img_data;
-
-
- /* Number of bytes in the loader image */
- int loader_len;
-
- /* Pointer to loader image data. */
- unsigned char *loader_data;
-
-} unifiio_img_t;
-
-
-/* Structure of data read from the unifi device. */
-typedef struct
-{
- /* Length (in bytes) of entire structure including appended bulk data */
- int length;
-
- /* System time (in milliseconds) that signal was transferred */
- int timestamp;
-
- /* Direction in which signal was transferred. */
- int direction;
-#define UDI_FROM_HOST 0
-#define UDI_TO_HOST 1
-#define UDI_CONFIG_IND 2
-
- /* The length of the signal (in bytes) not including bulk data */
- int signal_length;
-
- /* Signal body follows, then any bulk data */
-
-} udi_msg_t;
-
-
-typedef enum
-{
- UfSigFil_AllOn = 0, /* Log all signal IDs */
- UfSigFil_AllOff = 1, /* Don't log any signal IDs */
- UfSigFil_SelectOn = 2, /* Log these signal IDs */
- UfSigFil_SelectOff = 3 /* Don't log these signal IDs */
-} uf_sigfilter_action_t;
-
-typedef struct {
-
- /* Number of 16-bit ints in the sig_ids array */
- int num_sig_ids;
- /* The action to perform */
- uf_sigfilter_action_t action;
- /* List of signal IDs to pass or block */
- unsigned short *sig_ids;
-
-} unifiio_filter_t;
-
-
-typedef struct {
- /* Number of 16-bit ints in the protocols array */
- u16 count;
- /* List of protocol ids to pass */
- u16 *protocols;
-} unifiio_snap_filter_t;
-
-
-
-typedef u8 unifi_putest_command_t;
-
-#define UNIFI_PUTEST_START 0
-#define UNIFI_PUTEST_STOP 1
-#define UNIFI_PUTEST_SET_SDIO_CLOCK 2
-#define UNIFI_PUTEST_CMD52_READ 3
-#define UNIFI_PUTEST_CMD52_WRITE 4
-#define UNIFI_PUTEST_DL_FW 5
-#define UNIFI_PUTEST_DL_FW_BUFF 6
-#define UNIFI_PUTEST_CMD52_BLOCK_READ 7
-#define UNIFI_PUTEST_COREDUMP_PREPARE 8
-#define UNIFI_PUTEST_GP_READ16 9
-#define UNIFI_PUTEST_GP_WRITE16 10
-
-
-struct unifi_putest_cmd52 {
- int funcnum;
- unsigned long addr;
- unsigned char data;
-};
-
-
-struct unifi_putest_block_cmd52_r {
- int funcnum;
- unsigned long addr;
- unsigned int length;
- unsigned char *data;
-};
-
-struct unifi_putest_gp_rw16 {
- unsigned long addr; /* generic address */
- unsigned short data;
-};
-
-typedef enum unifi_cfg_command {
- UNIFI_CFG_GET,
- UNIFI_CFG_POWER,
- UNIFI_CFG_POWERSAVE,
- UNIFI_CFG_FILTER,
- UNIFI_CFG_POWERSUPPLY,
- UNIFI_CFG_WMM_QOSINFO,
- UNIFI_CFG_WMM_ADDTS,
- UNIFI_CFG_WMM_DELTS,
- UNIFI_CFG_STRICT_DRAFT_N,
- UNIFI_CFG_ENABLE_OKC,
- UNIFI_CFG_SET_AP_CONFIG,
- UNIFI_CFG_CORE_DUMP /* request to take a fw core dump */
-} unifi_cfg_command_t;
-
-typedef enum unifi_cfg_power {
- UNIFI_CFG_POWER_UNSPECIFIED,
- UNIFI_CFG_POWER_OFF,
- UNIFI_CFG_POWER_ON
-} unifi_cfg_power_t;
-
-typedef enum unifi_cfg_powersupply {
- UNIFI_CFG_POWERSUPPLY_UNSPECIFIED,
- UNIFI_CFG_POWERSUPPLY_MAINS,
- UNIFI_CFG_POWERSUPPLY_BATTERIES
-} unifi_cfg_powersupply_t;
-
-typedef enum unifi_cfg_powersave {
- UNIFI_CFG_POWERSAVE_UNSPECIFIED,
- UNIFI_CFG_POWERSAVE_NONE,
- UNIFI_CFG_POWERSAVE_FAST,
- UNIFI_CFG_POWERSAVE_FULL,
- UNIFI_CFG_POWERSAVE_AUTO
-} unifi_cfg_powersave_t;
-
-typedef enum unifi_cfg_get {
- UNIFI_CFG_GET_COEX,
- UNIFI_CFG_GET_POWER_MODE,
- UNIFI_CFG_GET_VERSIONS,
- UNIFI_CFG_GET_POWER_SUPPLY,
- UNIFI_CFG_GET_INSTANCE,
- UNIFI_CFG_GET_AP_CONFIG
-} unifi_cfg_get_t;
-
-#define UNIFI_CFG_FILTER_NONE 0x0000
-#define UNIFI_CFG_FILTER_DHCP 0x0001
-#define UNIFI_CFG_FILTER_ARP 0x0002
-#define UNIFI_CFG_FILTER_NBNS 0x0004
-#define UNIFI_CFG_FILTER_NBDS 0x0008
-#define UNIFI_CFG_FILTER_CUPS 0x0010
-#define UNIFI_CFG_FILTER_ALL 0xFFFF
-
-
-typedef struct uf_cfg_bcast_packet_filter
-{
- unsigned long filter_mode; //as defined by HIP protocol
- unsigned char arp_filter;
- unsigned char dhcp_filter;
- unsigned long tclas_ies_length; // length of tclas_ies in bytes
- unsigned char tclas_ies[1]; // variable length depending on above field
-} uf_cfg_bcast_packet_filter_t;
-
-typedef struct uf_cfg_ap_config
-{
- u8 phySupportedBitmap;
- u8 channel;
- u16 beaconInterval;
- u8 dtimPeriod;
- u8 wmmEnabled;
- u8 shortSlotTimeEnabled;
- u16 groupkeyTimeout;
- u8 strictGtkRekeyEnabled;
- u16 gmkTimeout;
- u16 responseTimeout;
- u8 retransLimit;
- u8 rxStbc;
- u8 rifsModeAllowed;
- u8 dualCtsProtection;
- u8 ctsProtectionType;
- u16 maxListenInterval;
-}uf_cfg_ap_config_t;
-
-typedef struct tcpic_clsfr
-{
- __u8 cls_fr_type;
- __u8 cls_fr_mask;
- __u8 version;
- __u8 source_ip_addr[4];
- __u8 dest_ip_addr[4];
- __u16 source_port;
- __u16 dest_port;
- __u8 dscp;
- __u8 protocol;
- __u8 reserved;
-} __attribute__ ((packed)) tcpip_clsfr_t;
-
-typedef struct tclas {
- __u8 element_id;
- __u8 length;
- __u8 user_priority;
- tcpip_clsfr_t tcp_ip_cls_fr;
-} __attribute__ ((packed)) tclas_t;
-
-
-#define CONFIG_IND_ERROR 0x01
-#define CONFIG_IND_EXIT 0x02
-#define CONFIG_SME_NOT_PRESENT 0x10
-#define CONFIG_SME_PRESENT 0x20
-
-/* WAPI Key */
-typedef struct
-{
- u8 unicastKey;
- /* If non zero, then unicast key otherwise group key */
- u8 keyIndex;
- u8 keyRsc[16];
- u8 authenticator;
- /* If non zero, then authenticator otherwise supplicant */
- u8 address[6];
- u8 key[32];
-} unifiio_wapi_key_t;
-
-/* Values describing XAP memory regions captured by the mini-coredump system */
-typedef enum unifiio_coredump_space {
- UNIFIIO_COREDUMP_MAC_REG,
- UNIFIIO_COREDUMP_PHY_REG,
- UNIFIIO_COREDUMP_SH_DMEM,
- UNIFIIO_COREDUMP_MAC_DMEM,
- UNIFIIO_COREDUMP_PHY_DMEM,
- UNIFIIO_COREDUMP_TRIGGER_MAGIC = 0xFEED
-} unifiio_coredump_space_t;
-
-/* Userspace tool uses this structure to retrieve a register value from a
- * mini-coredump buffer previously saved by the HIP
- */
-typedef struct unifiio_coredump_req {
- /* From user */
- int index; /* 0=newest, -1=oldest */
- unsigned int offset; /* register offset in space */
- unifiio_coredump_space_t space; /* memory space */
- /* Filled by driver */
- unsigned int drv_build; /* driver build id */
- unsigned int chip_ver; /* chip version */
- unsigned int fw_ver; /* firmware version */
- int requestor; /* requestor: 0=auto dump, 1=manual */
- unsigned int timestamp; /* time of capture by driver */
- unsigned int serial; /* capture serial number */
- int value; /* 16 bit register value, -ve for error */
-} unifiio_coredump_req_t; /* Core-dumped register value request */
-
-#endif /* __UNIFIIO_H__ */
diff --git a/drivers/staging/csr/wext_events.c b/drivers/staging/csr/wext_events.c
deleted file mode 100644
index 9860ea3..0000000
--- a/drivers/staging/csr/wext_events.c
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- * ---------------------------------------------------------------------------
- * FILE: wext_events.c
- *
- * PURPOSE:
- * Code to generate iwevents.
- *
- * Copyright (C) 2006-2008 by Cambridge Silicon Radio Ltd.
- *
- * Refer to LICENSE.txt included with this source code for details on
- * the license terms.
- *
- * ---------------------------------------------------------------------------
- */
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include "csr_wifi_hip_unifi.h"
-#include "unifi_priv.h"
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * wext_send_assoc_event
- *
- * Send wireless-extension events up to userland to announce
- * successful association with an AP.
- *
- * Arguments:
- * priv Pointer to driver context.
- * bssid MAC address of AP we associated with
- * req_ie, req_ie_len IEs in the original request
- * resp_ie, resp_ie_len IEs in the response
- *
- * Returns:
- * None.
- *
- * Notes:
- * This is sent on first successful association, and again if we
- * roam to another AP.
- * ---------------------------------------------------------------------------
- */
-void
-wext_send_assoc_event(unifi_priv_t *priv, unsigned char *bssid,
- unsigned char *req_ie, int req_ie_len,
- unsigned char *resp_ie, int resp_ie_len,
- unsigned char *scan_ie, unsigned int scan_ie_len)
-{
-#if WIRELESS_EXT > 17
- union iwreq_data wrqu;
-
- if (req_ie_len == 0) req_ie = NULL;
- wrqu.data.length = req_ie_len;
- wrqu.data.flags = 0;
- wireless_send_event(priv->netdev[CSR_WIFI_INTERFACE_IN_USE], IWEVASSOCREQIE, &wrqu, req_ie);
-
- if (resp_ie_len == 0) resp_ie = NULL;
- wrqu.data.length = resp_ie_len;
- wrqu.data.flags = 0;
- wireless_send_event(priv->netdev[CSR_WIFI_INTERFACE_IN_USE], IWEVASSOCRESPIE, &wrqu, resp_ie);
-
- if (scan_ie_len > 0) {
- wrqu.data.length = scan_ie_len;
- wrqu.data.flags = 0;
- wireless_send_event(priv->netdev[CSR_WIFI_INTERFACE_IN_USE], IWEVGENIE, &wrqu, scan_ie);
- }
-
- memcpy(&wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
- wireless_send_event(priv->netdev[CSR_WIFI_INTERFACE_IN_USE], SIOCGIWAP, &wrqu, NULL);
-#endif
-} /* wext_send_assoc_event() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * wext_send_disassoc_event
- *
- * Send a wireless-extension event up to userland to announce
- * that we disassociated from an AP.
- *
- * Arguments:
- * priv Pointer to driver context.
- *
- * Returns:
- * None.
- *
- * Notes:
- * The semantics of wpa_supplicant (the userland SME application) are
- * that a SIOCGIWAP event with MAC address of all zero means
- * disassociate.
- * ---------------------------------------------------------------------------
- */
-void
-wext_send_disassoc_event(unifi_priv_t *priv)
-{
-#if WIRELESS_EXT > 17
- union iwreq_data wrqu;
-
- memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
- wireless_send_event(priv->netdev[CSR_WIFI_INTERFACE_IN_USE], SIOCGIWAP, &wrqu, NULL);
-#endif
-} /* wext_send_disassoc_event() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * wext_send_scan_results_event
- *
- * Send wireless-extension events up to userland to announce
- * completion of a scan.
- *
- * Arguments:
- * priv Pointer to driver context.
- *
- * Returns:
- * None.
- *
- * Notes:
- * This doesn't actually report the results, they are retrieved
- * using the SIOCGIWSCAN ioctl command.
- * ---------------------------------------------------------------------------
- */
-void
-wext_send_scan_results_event(unifi_priv_t *priv)
-{
-#if WIRELESS_EXT > 17
- union iwreq_data wrqu;
-
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(priv->netdev[CSR_WIFI_INTERFACE_IN_USE], SIOCGIWSCAN, &wrqu, NULL);
-
-#endif
-} /* wext_send_scan_results_event() */
-
-
-
-/*
- * ---------------------------------------------------------------------------
- * wext_send_michaelmicfailure_event
- *
- * Send wireless-extension events up to userland to announce
- * completion of a scan.
- *
- * Arguments:
- * priv Pointer to driver context.
- * count, macaddr, key_type, key_idx, tsc
- * Parameters from report from UniFi.
- *
- * Returns:
- * None.
- * ---------------------------------------------------------------------------
- */
-#if WIRELESS_EXT >= 18
-static inline void
-_send_michaelmicfailure_event(struct net_device *dev,
- int count, const unsigned char *macaddr,
- int key_type, int key_idx,
- unsigned char *tsc)
-{
- union iwreq_data wrqu;
- struct iw_michaelmicfailure mmf;
-
- memset(&mmf, 0, sizeof(mmf));
-
- mmf.flags = key_idx & IW_MICFAILURE_KEY_ID;
- if (key_type == CSR_GROUP) {
- mmf.flags |= IW_MICFAILURE_GROUP;
- } else {
- mmf.flags |= IW_MICFAILURE_PAIRWISE;
- }
- mmf.flags |= ((count << 5) & IW_MICFAILURE_COUNT);
-
- mmf.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(mmf.src_addr.sa_data, macaddr, ETH_ALEN);
-
- memcpy(mmf.tsc, tsc, IW_ENCODE_SEQ_MAX_SIZE);
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(mmf);
-
- wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&mmf);
-}
-#elif WIRELESS_EXT >= 15
-static inline void
-_send_michaelmicfailure_event(struct net_device *dev,
- int count, const unsigned char *macaddr,
- int key_type, int key_idx,
- unsigned char *tsc)
-{
- union iwreq_data wrqu;
- char buf[128];
-
- sprintf(buf,
- "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=%pM)",
- key_idx, (key_type == CSR_GROUP) ? "broad" : "uni", macaddr);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
-}
-#else /* WIRELESS_EXT >= 15 */
-static inline void
-_send_michaelmicfailure_event(struct net_device *dev,
- int count, const unsigned char *macaddr,
- int key_type, int key_idx,
- unsigned char *tsc)
-{
- /* Not supported before WEXT 15 */
-}
-#endif /* WIRELESS_EXT >= 15 */
-
-
-void
-wext_send_michaelmicfailure_event(unifi_priv_t *priv,
- u16 count,
- CsrWifiMacAddress address,
- CsrWifiSmeKeyType keyType,
- u16 interfaceTag)
-{
- unsigned char tsc[8] = {0};
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "wext_send_michaelmicfailure_event bad interfaceTag\n");
- return;
- }
-
- _send_michaelmicfailure_event(priv->netdev[interfaceTag],
- count,
- address.a,
- keyType,
- 0,
- tsc);
-} /* wext_send_michaelmicfailure_event() */
-
-void
-wext_send_pmkid_candidate_event(unifi_priv_t *priv, CsrWifiMacAddress bssid, u8 preauth_allowed, u16 interfaceTag)
-{
-#if WIRELESS_EXT > 17
- union iwreq_data wrqu;
- struct iw_pmkid_cand pmkid_cand;
-
- if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) {
- unifi_error(priv, "wext_send_pmkid_candidate_event bad interfaceTag\n");
- return;
- }
-
- memset(&pmkid_cand, 0, sizeof(pmkid_cand));
-
- if (preauth_allowed) {
- pmkid_cand.flags |= IW_PMKID_CAND_PREAUTH;
- }
- pmkid_cand.bssid.sa_family = ARPHRD_ETHER;
- memcpy(pmkid_cand.bssid.sa_data, bssid.a, ETH_ALEN);
- /* Used as priority, smaller the number higher the priority, not really used in our case */
- pmkid_cand.index = 1;
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(pmkid_cand);
-
- wireless_send_event(priv->netdev[interfaceTag], IWEVPMKIDCAND, &wrqu, (char *)&pmkid_cand);
-#endif
-} /* wext_send_pmkid_candidate_event() */
-
-/*
- * Send a custom WEXT event to say we have completed initialisation
- * and are now ready for WEXT ioctls. Used by Android wpa_supplicant.
- */
-void
-wext_send_started_event(unifi_priv_t *priv)
-{
-#if WIRELESS_EXT > 17
- union iwreq_data wrqu;
- char data[] = "STARTED";
-
- wrqu.data.length = sizeof(data);
- wrqu.data.flags = 0;
- wireless_send_event(priv->netdev[CSR_WIFI_INTERFACE_IN_USE], IWEVCUSTOM, &wrqu, data);
-#endif
-} /* wext_send_started_event() */
-
diff --git a/drivers/staging/cxt1e1/Makefile b/drivers/staging/cxt1e1/Makefile
index b9ccb76..2f217e9 100644
--- a/drivers/staging/cxt1e1/Makefile
+++ b/drivers/staging/cxt1e1/Makefile
@@ -2,7 +2,6 @@ obj-$(CONFIG_CXT1E1) += cxt1e1.o
ccflags-y := -DSBE_PMCC4_ENABLE
ccflags-y += -DSBE_ISR_TASKLET
-ccflags-y += -DSBE_INCLUDE_SYMBOLS
cxt1e1-y := \
ossiRelease.o \
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
index 52224cd..d71aea5 100644
--- a/drivers/staging/cxt1e1/comet.c
+++ b/drivers/staging/cxt1e1/comet.c
@@ -13,7 +13,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/hdlc.h>
#include "pmcc4_sysdep.h"
#include "sbecom_inline_linux.h"
@@ -22,248 +22,259 @@
#include "comet.h"
#include "comet_tables.h"
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
-
extern int cxt1e1_log_level;
#define COMET_NUM_SAMPLES 24 /* Number of entries in the waveform table */
#define COMET_NUM_UNITS 5 /* Number of points per entry in table */
/* forward references */
-STATIC void SetPwrLevel (comet_t * comet);
-STATIC void WrtRcvEqualizerTbl (ci_t * ci, comet_t * comet, u_int32_t *table);
-STATIC void WrtXmtWaveformTbl (ci_t * ci, comet_t * comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
+static void SetPwrLevel(comet_t *comet);
+static void WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table);
+static void WrtXmtWaveformTbl(ci_t *ci, comet_t *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
void *TWV_table[12] = {
- TWVLongHaul0DB, TWVLongHaul7_5DB, TWVLongHaul15DB, TWVLongHaul22_5DB,
- TWVShortHaul0, TWVShortHaul1, TWVShortHaul2, TWVShortHaul3, TWVShortHaul4,
- TWVShortHaul5,
- TWV_E1_75Ohm, /** PORT POINT - 75 Ohm not supported **/
- TWV_E1_120Ohm
+ TWVLongHaul0DB, TWVLongHaul7_5DB, TWVLongHaul15DB, TWVLongHaul22_5DB,
+ TWVShortHaul0, TWVShortHaul1, TWVShortHaul2, TWVShortHaul3,
+ TWVShortHaul4, TWVShortHaul5,
+ /** PORT POINT - 75 Ohm not supported **/
+ TWV_E1_75Ohm,
+ TWV_E1_120Ohm
};
static int
-lbo_tbl_lkup (int t1, int lbo)
-{
- if ((lbo < CFG_LBO_LH0) || (lbo > CFG_LBO_E120)) /* error switches to
- * default */
- {
- if (t1)
- lbo = CFG_LBO_LH0; /* default T1 waveform table */
- else
- lbo = CFG_LBO_E120; /* default E1 waveform table */
- }
- return (lbo - 1); /* make index ZERO relative */
+lbo_tbl_lkup(int t1, int lbo) {
+ /* error switches to default */
+ if ((lbo < CFG_LBO_LH0) || (lbo > CFG_LBO_E120)) {
+ if (t1)
+ /* default T1 waveform table */
+ lbo = CFG_LBO_LH0;
+ else
+ /* default E1 waveform table */
+ lbo = CFG_LBO_E120;
+ }
+ /* make index ZERO relative */
+ return lbo - 1;
}
-
-void
-init_comet (void *ci, comet_t * comet, u_int32_t port_mode, int clockmaster,
- u_int8_t moreParams)
+void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
+ u_int8_t moreParams)
{
- u_int8_t isT1mode;
- u_int8_t tix = CFG_LBO_LH0; /* T1 default */
-
- isT1mode = IS_FRAME_ANY_T1 (port_mode);
- /* T1 or E1 */
- if (isT1mode)
- {
- pci_write_32 ((u_int32_t *) &comet->gbl_cfg, 0xa0); /* Select T1 Mode & PIO
- * output enabled */
- tix = lbo_tbl_lkup (isT1mode, CFG_LBO_LH0); /* default T1 waveform
- * table */
- } else
- {
- pci_write_32 ((u_int32_t *) &comet->gbl_cfg, 0x81); /* Select E1 Mode & PIO
- * output enabled */
- tix = lbo_tbl_lkup (isT1mode, CFG_LBO_E120); /* default E1 waveform
- * table */
- }
-
- if (moreParams & CFG_LBO_MASK)
- tix = lbo_tbl_lkup (isT1mode, moreParams & CFG_LBO_MASK); /* dial-in requested
- * waveform table */
-
- /* Tx line Intfc cfg ** Set for analog & no special patterns */
- pci_write_32 ((u_int32_t *) &comet->tx_line_cfg, 0x00); /* Transmit Line
- * Interface Config. */
-
- /* master test ** Ignore Test settings for now */
- pci_write_32 ((u_int32_t *) &comet->mtest, 0x00); /* making sure it's
- * Default value */
-
- /* Turn on Center (CENT) and everything else off */
- pci_write_32 ((u_int32_t *) &comet->rjat_cfg, 0x10); /* RJAT cfg */
- /* Set Jitter Attenuation to recommend T1 values */
- if (isT1mode)
- {
- pci_write_32 ((u_int32_t *) &comet->rjat_n1clk, 0x2F); /* RJAT Divider N1
- * Control */
- pci_write_32 ((u_int32_t *) &comet->rjat_n2clk, 0x2F); /* RJAT Divider N2
- * Control */
- } else
- {
- pci_write_32 ((u_int32_t *) &comet->rjat_n1clk, 0xFF); /* RJAT Divider N1
- * Control */
- pci_write_32 ((u_int32_t *) &comet->rjat_n2clk, 0xFF); /* RJAT Divider N2
- * Control */
- }
-
- /* Turn on Center (CENT) and everything else off */
- pci_write_32 ((u_int32_t *) &comet->tjat_cfg, 0x10); /* TJAT Config. */
-
- /* Do not bypass jitter attenuation and bypass elastic store */
- pci_write_32 ((u_int32_t *) &comet->rx_opt, 0x00); /* rx opts */
-
- /* TJAT ctrl & TJAT divider ctrl */
- /* Set Jitter Attenuation to recommended T1 values */
- if (isT1mode)
- {
- pci_write_32 ((u_int32_t *) &comet->tjat_n1clk, 0x2F); /* TJAT Divider N1
- * Control */
- pci_write_32 ((u_int32_t *) &comet->tjat_n2clk, 0x2F); /* TJAT Divider N2
- * Control */
- } else
- {
- pci_write_32 ((u_int32_t *) &comet->tjat_n1clk, 0xFF); /* TJAT Divider N1
- * Control */
- pci_write_32 ((u_int32_t *) &comet->tjat_n2clk, 0xFF); /* TJAT Divider N2
- * Control */
- }
-
- /* 1c: rx ELST cfg 20: tx ELST cfg 28&38: rx&tx data link ctrl */
- if (isT1mode)
- { /* Select 193-bit frame format */
- pci_write_32 ((u_int32_t *) &comet->rx_elst_cfg, 0x00);
- pci_write_32 ((u_int32_t *) &comet->tx_elst_cfg, 0x00);
- } else
- { /* Select 256-bit frame format */
- pci_write_32 ((u_int32_t *) &comet->rx_elst_cfg, 0x03);
- pci_write_32 ((u_int32_t *) &comet->tx_elst_cfg, 0x03);
- pci_write_32 ((u_int32_t *) &comet->rxce1_ctl, 0x00); /* disable T1 data link
- * receive */
- pci_write_32 ((u_int32_t *) &comet->txci1_ctl, 0x00); /* disable T1 data link
- * transmit */
- }
+ u_int8_t isT1mode;
+ /* T1 default */
+ u_int8_t tix = CFG_LBO_LH0;
+ isT1mode = IS_FRAME_ANY_T1(port_mode);
+ /* T1 or E1 */
+ if (isT1mode) {
+ /* Select T1 Mode & PIO output enabled */
+ pci_write_32((u_int32_t *) &comet->gbl_cfg, 0xa0);
+ /* default T1 waveform table */
+ tix = lbo_tbl_lkup(isT1mode, CFG_LBO_LH0);
+ } else {
+ /* Select E1 Mode & PIO output enabled */
+ pci_write_32((u_int32_t *) &comet->gbl_cfg, 0x81);
+ /* default E1 waveform table */
+ tix = lbo_tbl_lkup(isT1mode, CFG_LBO_E120);
+ }
+
+ if (moreParams & CFG_LBO_MASK)
+ /* dial-in requested waveform table */
+ tix = lbo_tbl_lkup(isT1mode, moreParams & CFG_LBO_MASK);
+ /* Tx line Intfc cfg Set for analog & no special patterns */
+ /* Transmit Line Interface Config. */
+ pci_write_32((u_int32_t *) &comet->tx_line_cfg, 0x00);
+ /* master test Ignore Test settings for now */
+ /* making sure it's Default value */
+ pci_write_32((u_int32_t *) &comet->mtest, 0x00);
+ /* Turn on Center (CENT) and everything else off */
+ /* RJAT cfg */
+ pci_write_32((u_int32_t *) &comet->rjat_cfg, 0x10);
+ /* Set Jitter Attenuation to recommend T1 values */
+ if (isT1mode) {
+ /* RJAT Divider N1 Control */
+ pci_write_32((u_int32_t *) &comet->rjat_n1clk, 0x2F);
+ /* RJAT Divider N2 Control */
+ pci_write_32((u_int32_t *) &comet->rjat_n2clk, 0x2F);
+ } else {
+ /* RJAT Divider N1 Control */
+ pci_write_32((u_int32_t *) &comet->rjat_n1clk, 0xFF);
+ /* RJAT Divider N2 Control */
+ pci_write_32((u_int32_t *) &comet->rjat_n2clk, 0xFF);
+ }
+
+ /* Turn on Center (CENT) and everything else off */
+ /* TJAT Config. */
+ pci_write_32((u_int32_t *) &comet->tjat_cfg, 0x10);
+
+ /* Do not bypass jitter attenuation and bypass elastic store */
+ /* rx opts */
+ pci_write_32((u_int32_t *) &comet->rx_opt, 0x00);
+
+ /* TJAT ctrl & TJAT divider ctrl */
+ /* Set Jitter Attenuation to recommended T1 values */
+ if (isT1mode) {
+ /* TJAT Divider N1 Control */
+ pci_write_32((u_int32_t *) &comet->tjat_n1clk, 0x2F);
+ /* TJAT Divider N2 Control */
+ pci_write_32((u_int32_t *) &comet->tjat_n2clk, 0x2F);
+ } else {
+ /* TJAT Divider N1 Control */
+ pci_write_32((u_int32_t *) &comet->tjat_n1clk, 0xFF);
+ /* TJAT Divider N2 Control */
+ pci_write_32((u_int32_t *) &comet->tjat_n2clk, 0xFF);
+ }
+
+ /* 1c: rx ELST cfg 20: tx ELST cfg 28&38: rx&tx data link ctrl */
+
+ /* Select 193-bit frame format */
+ if (isT1mode) {
+ pci_write_32((u_int32_t *) &comet->rx_elst_cfg, 0x00);
+ pci_write_32((u_int32_t *) &comet->tx_elst_cfg, 0x00);
+ } else {
+ /* Select 256-bit frame format */
+ pci_write_32((u_int32_t *) &comet->rx_elst_cfg, 0x03);
+ pci_write_32((u_int32_t *) &comet->tx_elst_cfg, 0x03);
+ /* disable T1 data link receive */
+ pci_write_32((u_int32_t *) &comet->rxce1_ctl, 0x00);
+ /* disable T1 data link transmit */
+ pci_write_32((u_int32_t *) &comet->txci1_ctl, 0x00);
+ }
/* the following is a default value */
/* Enable 8 out of 10 validation */
- pci_write_32 ((u_int32_t *) &comet->t1_rboc_ena, 0x00); /* t1RBOC
- * enable(BOC:BitOriented
- * Code) */
- if (isT1mode)
- {
-
- /* IBCD cfg: aka Inband Code Detection ** loopback code length set to */
- pci_write_32 ((u_int32_t *) &comet->ibcd_cfg, 0x04); /* 6 bit down, 5 bit up
- * (assert) */
- pci_write_32 ((u_int32_t *) &comet->ibcd_act, 0x08); /* line loopback
- * activate pattern */
- pci_write_32 ((u_int32_t *) &comet->ibcd_deact, 0x24); /* deactivate code
- * pattern (i.e.001) */
- }
+ /* t1RBOC enable(BOC:BitOriented Code) */
+ pci_write_32((u_int32_t *) &comet->t1_rboc_ena, 0x00);
+ if (isT1mode)
+ {
+
+ /* IBCD cfg: aka Inband Code Detection ** loopback code length set to */
+ /* 6 bit down, 5 bit up (assert) */
+ pci_write_32((u_int32_t *) &comet->ibcd_cfg, 0x04);
+ /* line loopback activate pattern */
+ pci_write_32((u_int32_t *) &comet->ibcd_act, 0x08);
+ /* deactivate code pattern (i.e.001) */
+ pci_write_32((u_int32_t *) &comet->ibcd_deact, 0x24);
+ }
/* 10: CDRC cfg 28&38: rx&tx data link 1 ctrl 48: t1 frmr cfg */
/* 50: SIGX cfg, COSS (change of signaling state) 54: XBAS cfg */
/* 60: t1 ALMI cfg */
/* Configure Line Coding */
- switch (port_mode)
- {
- case CFG_FRAME_SF: /* 1 - T1 B8ZS */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->t1_frmr_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->t1_xbas_cfg, 0x20); /* 5:B8ZS */
- pci_write_32 ((u_int32_t *) &comet->t1_almi_cfg, 0);
- break;
- case CFG_FRAME_ESF: /* 2 - T1 B8ZS */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->rxce1_ctl, 0x20); /* Bit 5: T1 DataLink
- * Enable */
- pci_write_32 ((u_int32_t *) &comet->txci1_ctl, 0x20); /* 5: T1 DataLink Enable */
- pci_write_32 ((u_int32_t *) &comet->t1_frmr_cfg, 0x30); /* 4:ESF 5:ESFFA */
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0x04); /* 2:ESF */
- pci_write_32 ((u_int32_t *) &comet->t1_xbas_cfg, 0x30); /* 4:ESF 5:B8ZS */
- pci_write_32 ((u_int32_t *) &comet->t1_almi_cfg, 0x10); /* 4:ESF */
- break;
- case CFG_FRAME_E1PLAIN: /* 3 - HDB3 */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0x40);
- break;
- case CFG_FRAME_E1CAS: /* 4 - HDB3 */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x60);
- pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0);
- break;
- case CFG_FRAME_E1CRC: /* 5 - HDB3 */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x10);
- pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0xc2);
- break;
- case CFG_FRAME_E1CRC_CAS: /* 6 - HDB3 */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x70);
- pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0x82);
- break;
- case CFG_FRAME_SF_AMI: /* 7 - T1 AMI */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
- * Decoding */
- pci_write_32 ((u_int32_t *) &comet->t1_frmr_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->t1_xbas_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->t1_almi_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- break;
- case CFG_FRAME_ESF_AMI: /* 8 - T1 AMI */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
- * Decoding */
- pci_write_32 ((u_int32_t *) &comet->rxce1_ctl, 0x20); /* 5: T1 DataLink Enable */
- pci_write_32 ((u_int32_t *) &comet->txci1_ctl, 0x20); /* 5: T1 DataLink Enable */
- pci_write_32 ((u_int32_t *) &comet->t1_frmr_cfg, 0x30); /* Bit 4:ESF 5:ESFFA */
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0x04); /* 2:ESF */
- pci_write_32 ((u_int32_t *) &comet->t1_xbas_cfg, 0x10); /* 4:ESF */
- pci_write_32 ((u_int32_t *) &comet->t1_almi_cfg, 0x10); /* 4:ESF */
- break;
- case CFG_FRAME_E1PLAIN_AMI: /* 9 - AMI */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
- * Decoding */
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x80);
- pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0x40);
- break;
- case CFG_FRAME_E1CAS_AMI: /* 10 - AMI */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
- * Decoding */
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0xe0);
- pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0);
- break;
- case CFG_FRAME_E1CRC_AMI: /* 11 - AMI */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
- * Decoding */
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0x90);
- pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0xc2);
- break;
- case CFG_FRAME_E1CRC_CAS_AMI: /* 12 - AMI */
- pci_write_32 ((u_int32_t *) &comet->cdrc_cfg, 0x80); /* Enable AMI Line
- * Decoding */
- pci_write_32 ((u_int32_t *) &comet->sigx_cfg, 0);
- pci_write_32 ((u_int32_t *) &comet->e1_tran_cfg, 0xf0);
- pci_write_32 ((u_int32_t *) &comet->e1_frmr_aopts, 0x82);
- break;
- } /* end switch */
+ switch (port_mode)
+ {
+ /* 1 - T1 B8ZS */
+ case CFG_FRAME_SF:
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->t1_frmr_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ /* 5:B8ZS */
+ pci_write_32((u_int32_t *) &comet->t1_xbas_cfg, 0x20);
+ pci_write_32((u_int32_t *) &comet->t1_almi_cfg, 0);
+ break;
+ /* 2 - T1 B8ZS */
+ case CFG_FRAME_ESF:
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0);
+ /* Bit 5: T1 DataLink Enable */
+ pci_write_32((u_int32_t *) &comet->rxce1_ctl, 0x20);
+ /* 5: T1 DataLink Enable */
+ pci_write_32((u_int32_t *) &comet->txci1_ctl, 0x20);
+ /* 4:ESF 5:ESFFA */
+ pci_write_32((u_int32_t *) &comet->t1_frmr_cfg, 0x30);
+ /* 2:ESF */
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0x04);
+ /* 4:ESF 5:B8ZS */
+ pci_write_32((u_int32_t *) &comet->t1_xbas_cfg, 0x30);
+ /* 4:ESF */
+ pci_write_32((u_int32_t *) &comet->t1_almi_cfg, 0x10);
+ break;
+ /* 3 - HDB3 */
+ case CFG_FRAME_E1PLAIN:
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->e1_tran_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->e1_frmr_aopts, 0x40);
+ break;
+ /* 4 - HDB3 */
+ case CFG_FRAME_E1CAS:
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->e1_tran_cfg, 0x60);
+ pci_write_32((u_int32_t *) &comet->e1_frmr_aopts, 0);
+ break;
+ /* 5 - HDB3 */
+ case CFG_FRAME_E1CRC:
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->e1_tran_cfg, 0x10);
+ pci_write_32((u_int32_t *) &comet->e1_frmr_aopts, 0xc2);
+ break;
+ /* 6 - HDB3 */
+ case CFG_FRAME_E1CRC_CAS:
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->e1_tran_cfg, 0x70);
+ pci_write_32((u_int32_t *) &comet->e1_frmr_aopts, 0x82);
+ break;
+ /* 7 - T1 AMI */
+ case CFG_FRAME_SF_AMI:
+ /* Enable AMI Line Decoding */
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0x80);
+ pci_write_32((u_int32_t *) &comet->t1_frmr_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->t1_xbas_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->t1_almi_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ break;
+ /* 8 - T1 AMI */
+ case CFG_FRAME_ESF_AMI:
+ /* Enable AMI Line Decoding */
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0x80);
+ /* 5: T1 DataLink Enable */
+ pci_write_32((u_int32_t *) &comet->rxce1_ctl, 0x20);
+ /* 5: T1 DataLink Enable */
+ pci_write_32((u_int32_t *) &comet->txci1_ctl, 0x20);
+ /* Bit 4:ESF 5:ESFFA */
+ pci_write_32((u_int32_t *) &comet->t1_frmr_cfg, 0x30);
+ /* 2:ESF */
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0x04);
+ /* 4:ESF */
+ pci_write_32((u_int32_t *) &comet->t1_xbas_cfg, 0x10);
+ /* 4:ESF */
+ pci_write_32((u_int32_t *) &comet->t1_almi_cfg, 0x10);
+ break;
+ /* 9 - AMI */
+ case CFG_FRAME_E1PLAIN_AMI:
+ /* Enable AMI Line Decoding */
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0x80);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->e1_tran_cfg, 0x80);
+ pci_write_32((u_int32_t *) &comet->e1_frmr_aopts, 0x40);
+ break;
+ /* 10 - AMI */
+ case CFG_FRAME_E1CAS_AMI:
+ /* Enable AMI Line Decoding */
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0x80);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->e1_tran_cfg, 0xe0);
+ pci_write_32((u_int32_t *) &comet->e1_frmr_aopts, 0);
+ break;
+ /* 11 - AMI */
+ case CFG_FRAME_E1CRC_AMI:
+ /* Enable AMI Line Decoding */
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0x80);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->e1_tran_cfg, 0x90);
+ pci_write_32((u_int32_t *) &comet->e1_frmr_aopts, 0xc2);
+ break;
+ /* 12 - AMI */
+ case CFG_FRAME_E1CRC_CAS_AMI:
+ /* Enable AMI Line Decoding */
+ pci_write_32((u_int32_t *) &comet->cdrc_cfg, 0x80);
+ pci_write_32((u_int32_t *) &comet->sigx_cfg, 0);
+ pci_write_32((u_int32_t *) &comet->e1_tran_cfg, 0xf0);
+ pci_write_32((u_int32_t *) &comet->e1_frmr_aopts, 0x82);
+ break;
+ } /* end switch */
/***
* Set Full Frame mode (NXDSO[1] = 0, NXDSO[0] = 0)
@@ -277,101 +288,109 @@ init_comet (void *ci, comet_t * comet, u_int32_t port_mode, int clockmaster,
/* 0x30: "BRIF cfg"; 0x20 is 'CMODE', 0x03 is (bit) rate */
/* note "rate bits can only be set once after reset" */
- if (clockmaster)
- { /* CMODE == clockMode, 0=clock master (so
- * all 3 others should be slave) */
- if (isT1mode) /* rate = 1.544 Mb/s */
- pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x00); /* Comet 0 Master
- * Mode(CMODE=0) */
- else /* rate = 2.048 Mb/s */
- pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x01); /* Comet 0 Master
- * Mode(CMODE=0) */
-
- /* 31: BRIF frame pulse cfg 06: tx timing options */
- pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, 0x00); /* Master Mode
- * i.e.FPMODE=0 (@0x20) */
- if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL)
- {
- if (cxt1e1_log_level >= LOG_SBEBUG12)
- pr_info(">> %s: clockmaster internal clock\n", __func__);
- pci_write_32 ((u_int32_t *) &comet->tx_time, 0x0d); /* internal oscillator */
- } else /* external clock source */
- {
- if (cxt1e1_log_level >= LOG_SBEBUG12)
- pr_info(">> %s: clockmaster external clock\n", __func__);
- pci_write_32 ((u_int32_t *) &comet->tx_time, 0x09); /* loop timing
- * (external) */
- }
-
- } else /* slave */
- {
- if (isT1mode)
- pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x20); /* Slave Mode(CMODE=1,
- * see above) */
- else
- pci_write_32 ((u_int32_t *) &comet->brif_cfg, 0x21); /* Slave Mode (CMODE=1) */
- pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, 0x20); /* Slave Mode i.e.
- * FPMODE=1 (@0x20) */
- if (cxt1e1_log_level >= LOG_SBEBUG12)
- pr_info(">> %s: clockslave internal clock\n", __func__);
- pci_write_32 ((u_int32_t *) &comet->tx_time, 0x0d); /* oscillator timing */
- }
-
- /* 32: BRIF parity F-bit cfg */
- /* Totem-pole operation */
- pci_write_32 ((u_int32_t *) &comet->brif_pfcfg, 0x01); /* Receive Backplane
- * Parity/F-bit */
+ if (clockmaster)
+ {
+ /* CMODE == clockMode, 0=clock master (so all 3 others should be slave) */
+ /* rate = 1.544 Mb/s */
+ if (isT1mode)
+ /* Comet 0 Master Mode(CMODE=0) */
+ pci_write_32((u_int32_t *) &comet->brif_cfg, 0x00);
+ /* rate = 2.048 Mb/s */
+ else
+ /* Comet 0 Master Mode(CMODE=0) */
+ pci_write_32((u_int32_t *) &comet->brif_cfg, 0x01);
+
+ /* 31: BRIF frame pulse cfg 06: tx timing options */
+
+ /* Master Mode i.e.FPMODE=0 (@0x20) */
+ pci_write_32((u_int32_t *) &comet->brif_fpcfg, 0x00);
+ if ((moreParams & CFG_CLK_PORT_MASK) == CFG_CLK_PORT_INTERNAL)
+ {
+ if (cxt1e1_log_level >= LOG_SBEBUG12)
+ pr_info(">> %s: clockmaster internal clock\n", __func__);
+ /* internal oscillator */
+ pci_write_32((u_int32_t *) &comet->tx_time, 0x0d);
+ } else {
+ /* external clock source */
+ if (cxt1e1_log_level >= LOG_SBEBUG12)
+ pr_info(">> %s: clockmaster external clock\n", __func__);
+ /* loop timing(external) */
+ pci_write_32((u_int32_t *) &comet->tx_time, 0x09);
+ }
+
+ } else {
+ /* slave */
+ if (isT1mode)
+ /* Slave Mode(CMODE=1, see above) */
+ pci_write_32((u_int32_t *) &comet->brif_cfg, 0x20);
+ else
+ /* Slave Mode(CMODE=1)*/
+ pci_write_32((u_int32_t *) &comet->brif_cfg, 0x21);
+ /* Slave Mode i.e. FPMODE=1 (@0x20) */
+ pci_write_32((u_int32_t *) &comet->brif_fpcfg, 0x20);
+ if (cxt1e1_log_level >= LOG_SBEBUG12)
+ pr_info(">> %s: clockslave internal clock\n", __func__);
+ /* oscillator timing */
+ pci_write_32((u_int32_t *) &comet->tx_time, 0x0d);
+ }
+
+ /* 32: BRIF parity F-bit cfg */
+ /* Totem-pole operation */
+ /* Receive Backplane Parity/F-bit */
+ pci_write_32((u_int32_t *) &comet->brif_pfcfg, 0x01);
/* dc: RLPS equalizer V ref */
/* Configuration */
- if (isT1mode)
- pci_write_32 ((u_int32_t *) &comet->rlps_eqvr, 0x2c); /* RLPS Equalizer
- * Voltage */
- else
- pci_write_32 ((u_int32_t *) &comet->rlps_eqvr, 0x34); /* RLPS Equalizer
- * Voltage */
+ if (isT1mode)
+ /* RLPS Equalizer Voltage */
+ pci_write_32((u_int32_t *) &comet->rlps_eqvr, 0x2c);
+ else
+ /* RLPS Equalizer Voltage */
+ pci_write_32((u_int32_t *) &comet->rlps_eqvr, 0x34);
/* Reserved bit set and SQUELCH enabled */
/* f8: RLPS cfg & status f9: RLPS ALOS detect/clear threshold */
- pci_write_32 ((u_int32_t *) &comet->rlps_cfgsts, 0x11); /* RLPS Configuration
- * Status */
- if (isT1mode)
- pci_write_32 ((u_int32_t *) &comet->rlps_alos_thresh, 0x55); /* ? */
- else
- pci_write_32 ((u_int32_t *) &comet->rlps_alos_thresh, 0x22); /* ? */
+ /* RLPS Configuration Status */
+ pci_write_32((u_int32_t *) &comet->rlps_cfgsts, 0x11);
+ if (isT1mode)
+ /* ? */
+ pci_write_32((u_int32_t *) &comet->rlps_alos_thresh, 0x55);
+ else
+ /* ? */
+ pci_write_32((u_int32_t *) &comet->rlps_alos_thresh, 0x22);
/* Set Full Frame mode (NXDSO[1] = 0, NXDSO[0] = 0) */
/* CMODE=0: Clock slave mode with BTCLK as an input, DE=1: Use rising */
/* edge of BTCLK for data, FE=1: Use rising edge of BTCLK for frame, */
/* CMS=0: Use backplane freq, RATE[1:0]=0,0: T1 */
-/*** Transmit side is always an Input, Slave Clock*/
- /* 40: BTIF cfg 41: BTIF frame pulse cfg */
- if (isT1mode)
- pci_write_32 ((u_int32_t *) &comet->btif_cfg, 0x38); /* BTIF Configuration
- * Reg. */
- else
- pci_write_32 ((u_int32_t *) &comet->btif_cfg, 0x39); /* BTIF Configuration
- * Reg. */
-
- pci_write_32 ((u_int32_t *) &comet->btif_fpcfg, 0x01); /* BTIF Frame Pulse
- * Config. */
+ /*** Transmit side is always an Input, Slave Clock*/
+ /* 40: BTIF cfg 41: loop timing(external) */
+ /*BTIF frame pulse cfg */
+ if (isT1mode)
+ /* BTIF Configuration Reg. */
+ pci_write_32((u_int32_t *) &comet->btif_cfg, 0x38);
+ else
+ /* BTIF Configuration Reg. */
+ pci_write_32((u_int32_t *) &comet->btif_cfg, 0x39);
+ /* BTIF Frame Pulse Config. */
+ pci_write_32((u_int32_t *) &comet->btif_fpcfg, 0x01);
/* 0a: master diag 06: tx timing options */
/* if set Comet to loop back */
/* Comets set to normal */
- pci_write_32 ((u_int32_t *) &comet->mdiag, 0x00);
+ pci_write_32((u_int32_t *) &comet->mdiag, 0x00);
/* BTCLK driven by TCLKI internally (crystal driven) and Xmt Elasted */
/* Store is enabled. */
- WrtXmtWaveformTbl (ci, comet, TWV_table[tix]);
- if (isT1mode)
- WrtRcvEqualizerTbl ((ci_t *) ci, comet, &T1_Equalizer[0]);
- else
- WrtRcvEqualizerTbl ((ci_t *) ci, comet, &E1_Equalizer[0]);
- SetPwrLevel (comet);
+ WrtXmtWaveformTbl(ci, comet, TWV_table[tix]);
+ if (isT1mode)
+ WrtRcvEqualizerTbl((ci_t *) ci, comet, &T1_Equalizer[0]);
+ else
+ WrtRcvEqualizerTbl((ci_t *) ci, comet, &E1_Equalizer[0]);
+ SetPwrLevel(comet);
}
/*
@@ -381,16 +400,16 @@ init_comet (void *ci, comet_t * comet, u_int32_t port_mode, int clockmaster,
** Write the data to the Pulse Waveform Storage Data register.
** Returns: Nothing
*/
-STATIC void
-WrtXmtWaveform (ci_t * ci, comet_t * comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
+static void
+WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
{
- u_int8_t WaveformAddr;
+ u_int8_t WaveformAddr;
- WaveformAddr = (sample << 3) + (unit & 7);
- pci_write_32 ((u_int32_t *) &comet->xlpg_pwave_addr, WaveformAddr);
- pci_flush_write (ci); /* for write order preservation when
- * Optimizing driver */
- pci_write_32 ((u_int32_t *) &comet->xlpg_pwave_data, 0x7F & data);
+ WaveformAddr = (sample << 3) + (unit & 7);
+ pci_write_32((u_int32_t *) &comet->xlpg_pwave_addr, WaveformAddr);
+ /* for write order preservation when Optimizing driver */
+ pci_flush_write(ci);
+ pci_write_32((u_int32_t *) &comet->xlpg_pwave_data, 0x7F & data);
}
/*
@@ -399,20 +418,20 @@ WrtXmtWaveform (ci_t * ci, comet_t * comet, u_int32_t sample, u_int32_t unit, u_
** for driving the transmitter DAC.
** Returns: Nothing
*/
-STATIC void
-WrtXmtWaveformTbl (ci_t * ci, comet_t * comet,
- u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS])
+static void
+WrtXmtWaveformTbl(ci_t *ci, comet_t *comet,
+ u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS])
{
- u_int32_t sample, unit;
+ u_int32_t sample, unit;
- for (sample = 0; sample < COMET_NUM_SAMPLES; sample++)
- {
- for (unit = 0; unit < COMET_NUM_UNITS; unit++)
- WrtXmtWaveform (ci, comet, sample, unit, table[sample][unit]);
- }
+ for (sample = 0; sample < COMET_NUM_SAMPLES; sample++)
+ {
+ for (unit = 0; unit < COMET_NUM_UNITS; unit++)
+ WrtXmtWaveform(ci, comet, sample, unit, table[sample][unit]);
+ }
/* Enable transmitter and set output amplitude */
- pci_write_32 ((u_int32_t *) &comet->xlpg_cfg, table[COMET_NUM_SAMPLES][0]);
+ pci_write_32((u_int32_t *) &comet->xlpg_cfg, table[COMET_NUM_SAMPLES][0]);
}
@@ -426,61 +445,61 @@ WrtXmtWaveformTbl (ci_t * ci, comet_t * comet,
** is coded with early setup of indirect address.
*/
-STATIC void
-WrtRcvEqualizerTbl (ci_t * ci, comet_t * comet, u_int32_t *table)
+static void
+WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
{
- u_int32_t ramaddr;
- volatile u_int32_t value;
-
- for (ramaddr = 0; ramaddr < 256; ramaddr++)
- {
- /*** the following lines are per Errata 7, 2.5 ***/
- {
- pci_write_32 ((u_int32_t *) &comet->rlps_eq_rwsel, 0x80); /* Set up for a read
- * operation */
- pci_flush_write (ci); /* for write order preservation when
- * Optimizing driver */
- pci_write_32 ((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr); /* write the addr,
- * initiate a read */
- pci_flush_write (ci); /* for write order preservation when
- * Optimizing driver */
- /*
- * wait 3 line rate clock cycles to ensure address bits are
- * captured by T1/E1 clock
- */
- OS_uwait (4, "wret"); /* 683ns * 3 = 1366 ns, approx 2us (but
- * use 4us) */
- }
-
- value = *table++;
- pci_write_32 ((u_int32_t *) &comet->rlps_idata3, (u_int8_t) (value >> 24));
- pci_write_32 ((u_int32_t *) &comet->rlps_idata2, (u_int8_t) (value >> 16));
- pci_write_32 ((u_int32_t *) &comet->rlps_idata1, (u_int8_t) (value >> 8));
- pci_write_32 ((u_int32_t *) &comet->rlps_idata0, (u_int8_t) value);
- pci_flush_write (ci); /* for write order preservation when
- * Optimizing driver */
-
- /* Storing RAM address, causes RAM to be updated */
-
- pci_write_32 ((u_int32_t *) &comet->rlps_eq_rwsel, 0); /* Set up for a write
- * operation */
- pci_flush_write (ci); /* for write order preservation when
- * Optimizing driver */
- pci_write_32 ((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr); /* write the addr,
- * initiate a read */
- pci_flush_write (ci); /* for write order preservation when
- * Optimizing driver */
- /*
- * wait 3 line rate clock cycles to ensure address bits are captured
- * by T1/E1 clock
- */
- OS_uwait (4, "wret"); /* 683ns * 3 = 1366 ns, approx 2us (but
- * use 4us) */
- }
-
- pci_write_32 ((u_int32_t *) &comet->rlps_eq_cfg, 0xCB); /* Enable Equalizer &
- * set it to use 256
- * periods */
+ u_int32_t ramaddr;
+ volatile u_int32_t value;
+
+ for (ramaddr = 0; ramaddr < 256; ramaddr++) {
+ /*** the following lines are per Errata 7, 2.5 ***/
+ {
+ /* Set up for a read operation */
+ pci_write_32((u_int32_t *) &comet->rlps_eq_rwsel, 0x80);
+ /* for write order preservation when Optimizing driver */
+ pci_flush_write(ci);
+ /* write the addr, initiate a read */
+ pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr);
+ /* for write order preservation when Optimizing driver */
+ pci_flush_write(ci);
+ /*
+ * wait 3 line rate clock cycles to ensure address bits are
+ * captured by T1/E1 clock
+ */
+
+ /* 683ns * 3 = 1366 ns, approx 2us (but use 4us) */
+ OS_uwait(4, "wret");
+ }
+
+ value = *table++;
+ pci_write_32((u_int32_t *) &comet->rlps_idata3, (u_int8_t) (value >> 24));
+ pci_write_32((u_int32_t *) &comet->rlps_idata2, (u_int8_t) (value >> 16));
+ pci_write_32((u_int32_t *) &comet->rlps_idata1, (u_int8_t) (value >> 8));
+ pci_write_32((u_int32_t *) &comet->rlps_idata0, (u_int8_t) value);
+ /* for write order preservation when Optimizing driver */
+ pci_flush_write(ci);
+
+ /* Storing RAM address, causes RAM to be updated */
+
+ /* Set up for a write operation */
+ pci_write_32((u_int32_t *) &comet->rlps_eq_rwsel, 0);
+ /* for write order preservation when optimizing driver */
+ pci_flush_write(ci);
+ /* write the addr, initiate a read */
+ pci_write_32((u_int32_t *) &comet->rlps_eq_iaddr, (u_int8_t) ramaddr);
+ /* for write order preservation when optimizing driver */
+ pci_flush_write(ci);
+
+ /*
+ * wait 3 line rate clock cycles to ensure address bits are captured
+ * by T1/E1 clock
+ */
+ /* 683ns * 3 = 1366 ns, approx 2us (but use 4us) */
+ OS_uwait(4, "wret");
+ }
+
+ /* Enable Equalizer & set it to use 256 periods */
+ pci_write_32((u_int32_t *) &comet->rlps_eq_cfg, 0xCB);
}
@@ -490,10 +509,10 @@ WrtRcvEqualizerTbl (ci_t * ci, comet_t * comet, u_int32_t *table)
** Returns: Nothing
*/
-STATIC void
-SetPwrLevel (comet_t * comet)
+static void
+SetPwrLevel(comet_t *comet)
{
- volatile u_int32_t temp;
+ volatile u_int32_t temp;
/*
** Algorithm to Balance the Power Distribution of Ttip Tring
@@ -507,22 +526,20 @@ SetPwrLevel (comet_t * comet)
** Repeat these steps for register F5
** Write 0x01 to register F6
*/
- pci_write_32 ((u_int32_t *) &comet->xlpg_fdata_sel, 0x00); /* XLPG Fuse Data Select */
-
- pci_write_32 ((u_int32_t *) &comet->xlpg_atest_pctl, 0x01); /* XLPG Analog Test
- * Positive control */
- pci_write_32 ((u_int32_t *) &comet->xlpg_atest_pctl, 0x01);
-
- temp = pci_read_32 ((u_int32_t *) &comet->xlpg_atest_pctl) & 0xfe;
- pci_write_32 ((u_int32_t *) &comet->xlpg_atest_pctl, temp);
-
- pci_write_32 ((u_int32_t *) &comet->xlpg_atest_nctl, 0x01); /* XLPG Analog Test
- * Negative control */
- pci_write_32 ((u_int32_t *) &comet->xlpg_atest_nctl, 0x01);
-
- temp = pci_read_32 ((u_int32_t *) &comet->xlpg_atest_nctl) & 0xfe;
- pci_write_32 ((u_int32_t *) &comet->xlpg_atest_nctl, temp);
- pci_write_32 ((u_int32_t *) &comet->xlpg_fdata_sel, 0x01); /* XLPG */
+ /* XLPG Fuse Data Select */
+ pci_write_32((u_int32_t *) &comet->xlpg_fdata_sel, 0x00);
+ /* XLPG Analog Test Positive control */
+ pci_write_32((u_int32_t *) &comet->xlpg_atest_pctl, 0x01);
+ pci_write_32((u_int32_t *) &comet->xlpg_atest_pctl, 0x01);
+ temp = pci_read_32((u_int32_t *) &comet->xlpg_atest_pctl) & 0xfe;
+ pci_write_32((u_int32_t *) &comet->xlpg_atest_pctl, temp);
+ pci_write_32((u_int32_t *) &comet->xlpg_atest_nctl, 0x01);
+ pci_write_32((u_int32_t *) &comet->xlpg_atest_nctl, 0x01);
+ /* XLPG Analog Test Negative control */
+ temp = pci_read_32((u_int32_t *) &comet->xlpg_atest_nctl) & 0xfe;
+ pci_write_32((u_int32_t *) &comet->xlpg_atest_nctl, temp);
+ /* XLPG */
+ pci_write_32((u_int32_t *) &comet->xlpg_fdata_sel, 0x01);
}
@@ -534,34 +551,31 @@ SetPwrLevel (comet_t * comet)
** Returns: Nothing
*/
#if 0
-STATIC void
-SetCometOps (comet_t * comet)
+static void
+SetCometOps(comet_t *comet)
{
- volatile u_int8_t rd_value;
-
- if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2))
- {
- rd_value = (u_int8_t) pci_read_32 ((u_int32_t *) &comet->brif_cfg); /* read the BRIF
- * Configuration */
- rd_value &= ~0x20;
- pci_write_32 ((u_int32_t *) &comet->brif_cfg, (u_int32_t) rd_value);
-
- rd_value = (u_int8_t) pci_read_32 ((u_int32_t *) &comet->brif_fpcfg); /* read the BRIF Frame
- * Pulse Configuration */
- rd_value &= ~0x20;
- pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, (u_int8_t) rd_value);
- } else
- {
- rd_value = (u_int8_t) pci_read_32 ((u_int32_t *) &comet->brif_cfg); /* read the BRIF
- * Configuration */
- rd_value |= 0x20;
- pci_write_32 ((u_int32_t *) &comet->brif_cfg, (u_int32_t) rd_value);
-
- rd_value = (u_int8_t) pci_read_32 ((u_int32_t *) &comet->brif_fpcfg); /* read the BRIF Frame
- * Pulse Configuration */
- rd_value |= 0x20;
- pci_write_32 ((u_int32_t *) &comet->brif_fpcfg, (u_int8_t) rd_value);
- }
+ volatile u_int8_t rd_value;
+
+ if (comet == mConfig.C4Func1Base + (COMET0_OFFSET >> 2))
+ {
+ /* read the BRIF Configuration */
+ rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg);
+ rd_value &= ~0x20;
+ pci_write_32((u_int32_t *) &comet->brif_cfg, (u_int32_t) rd_value);
+ /* read the BRIF Frame Pulse Configuration */
+ rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_fpcfg);
+ rd_value &= ~0x20;
+ pci_write_32((u_int32_t *) &comet->brif_fpcfg, (u_int8_t) rd_value);
+ } else {
+ /* read the BRIF Configuration */
+ rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_cfg);
+ rd_value |= 0x20;
+ pci_write_32((u_int32_t *) &comet->brif_cfg, (u_int32_t) rd_value);
+ /* read the BRIF Frame Pulse Configuration */
+ rd_value = (u_int8_t) pci_read_32((u_int32_t *) &comet->brif_fpcfg);
+ rd_value |= 0x20;
+ pci_write_32(u_int32_t *) & comet->brif_fpcfg, (u_int8_t) rd_value);
+ }
}
#endif
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index d9a9aa3..d021b31 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -14,7 +14,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/byteorder.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
@@ -24,13 +24,6 @@
#include "libsbew.h"
#include "pmcc4.h"
-
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
#define _v7_hdlc_ 1
@@ -97,7 +90,7 @@ pci_write_32 (u_int32_t *p, u_int32_t v)
void
-pci_flush_write (ci_t * ci)
+pci_flush_write (ci_t *ci)
{
volatile u_int32_t v;
@@ -111,7 +104,7 @@ pci_flush_write (ci_t * ci)
}
-STATIC void
+static void
watchdog_func (unsigned long arg)
{
struct watchdog *wd = (void *) arg;
@@ -202,7 +195,7 @@ sd_line_is_ok (void *user)
{
struct net_device *ndev = (struct net_device *) user;
- return (netif_carrier_ok (ndev));
+ return netif_carrier_ok (ndev);
}
void
@@ -246,7 +239,7 @@ sd_queue_stopped (void *user)
{
struct net_device *ndev = (struct net_device *) user;
- return (netif_queue_stopped (ndev));
+ return netif_queue_stopped (ndev);
}
void sd_recv_consume(void *token, size_t len, void *user)
@@ -279,7 +272,7 @@ VMETRO_TRACE (void *x)
void
-VMETRO_TRIGGER (ci_t * ci, int x)
+VMETRO_TRIGGER (ci_t *ci, int x)
{
comet_t *comet;
volatile u_int32_t data;
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
index de8ac0b..53e9237 100644
--- a/drivers/staging/cxt1e1/hwprobe.c
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -31,12 +31,6 @@
#include "sbeproc.h"
#endif
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
extern int cxt1e1_log_level;
extern int error_flag;
extern int drvr_state;
@@ -50,7 +44,7 @@ struct s_hdw_info hdw_info[MAX_BOARDS];
void __init
-show_two (hdw_info_t * hi, int brdno)
+show_two (hdw_info_t *hi, int brdno)
{
ci_t *ci;
struct pci_dev *pdev;
@@ -102,7 +96,7 @@ show_two (hdw_info_t * hi, int brdno)
void __init
-hdw_sn_get (hdw_info_t * hi, int brdno)
+hdw_sn_get (hdw_info_t *hi, int brdno)
{
/* obtain hardware EEPROM information */
long addr;
@@ -221,8 +215,8 @@ cleanup_devs (void)
}
-STATIC int __init
-c4_hdw_init (struct pci_dev * pdev, int found)
+static int __init
+c4_hdw_init (struct pci_dev *pdev, int found)
{
hdw_info_t *hi;
int i;
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index a829b62..142691c 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -52,12 +52,6 @@
/*****************************************************************************************/
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
#define CHANNAME "hdlc"
/*******************************************************************/
@@ -144,7 +138,7 @@ getuserbychan (int channum)
char *
-get_hdlc_name (hdlc_device * hdlc)
+get_hdlc_name (hdlc_device *hdlc)
{
struct c4_priv *priv = hdlc->priv;
struct net_device *dev = getuserbychan (priv->channum);
@@ -185,7 +179,7 @@ mkret (int bsd)
* within a port's group.
*/
void
-c4_wk_chan_restart (mch_t * ch)
+c4_wk_chan_restart (mch_t *ch)
{
mpi_t *pi = ch->up;
@@ -203,7 +197,7 @@ c4_wk_chan_restart (mch_t * ch)
}
status_t
-c4_wk_chan_init (mpi_t * pi, mch_t * ch)
+c4_wk_chan_init (mpi_t *pi, mch_t *ch)
{
/*
* this will be used to restart a stopped channel
@@ -218,7 +212,7 @@ c4_wk_chan_init (mpi_t * pi, mch_t * ch)
}
status_t
-c4_wq_port_init (mpi_t * pi)
+c4_wq_port_init (mpi_t *pi)
{
char name[16], *np; /* NOTE: name of the queue limited by system
@@ -241,7 +235,7 @@ c4_wq_port_init (mpi_t * pi)
}
void
-c4_wq_port_cleanup (mpi_t * pi)
+c4_wq_port_cleanup (mpi_t *pi)
{
/*
* PORT POINT: cannot call this if WQ is statically allocated w/in
@@ -278,15 +272,15 @@ c4_ebus_interrupt (int irq, void *dev_instance)
static int
-void_open (struct net_device * ndev)
+void_open (struct net_device *ndev)
{
pr_info("%s: trying to open master device !\n", ndev->name);
return -1;
}
-STATIC int
-chan_open (struct net_device * ndev)
+static int
+chan_open (struct net_device *ndev)
{
hdlc_device *hdlc = dev_to_hdlc (ndev);
const struct c4_priv *priv = hdlc->priv;
@@ -305,8 +299,8 @@ chan_open (struct net_device * ndev)
}
-STATIC int
-chan_close (struct net_device * ndev)
+static int
+chan_close (struct net_device *ndev)
{
hdlc_device *hdlc = dev_to_hdlc (ndev);
const struct c4_priv *priv = hdlc->priv;
@@ -319,23 +313,23 @@ chan_close (struct net_device * ndev)
}
-STATIC int
-chan_dev_ioctl (struct net_device * dev, struct ifreq * ifr, int cmd)
+static int
+chan_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
{
return hdlc_ioctl (dev, ifr, cmd);
}
-STATIC int
-chan_attach_noop (struct net_device * ndev, unsigned short foo_1, unsigned short foo_2)
+static int
+chan_attach_noop (struct net_device *ndev, unsigned short foo_1, unsigned short foo_2)
{
return 0; /* our driver has nothing to do here, show's
* over, go home */
}
-STATIC struct net_device_stats *
-chan_get_stats (struct net_device * ndev)
+static struct net_device_stats *
+chan_get_stats (struct net_device *ndev)
{
mch_t *ch;
struct net_device_stats *nstats;
@@ -388,14 +382,14 @@ chan_get_stats (struct net_device * ndev)
static ci_t *
-get_ci_by_dev (struct net_device * ndev)
+get_ci_by_dev (struct net_device *ndev)
{
return (ci_t *)(netdev_priv(ndev));
}
-STATIC int
-c4_linux_xmit (struct sk_buff * skb, struct net_device * ndev)
+static int
+c4_linux_xmit (struct sk_buff *skb, struct net_device *ndev)
{
const struct c4_priv *priv;
int rval;
@@ -416,9 +410,9 @@ static const struct net_device_ops chan_ops = {
.ndo_get_stats = chan_get_stats,
};
-STATIC struct net_device *
-create_chan (struct net_device * ndev, ci_t * ci,
- struct sbecom_chan_param * cp)
+static struct net_device *
+create_chan (struct net_device *ndev, ci_t *ci,
+ struct sbecom_chan_param *cp)
{
hdlc_device *hdlc;
struct net_device *dev;
@@ -509,8 +503,8 @@ create_chan (struct net_device * ndev, ci_t * ci,
/* the idea here is to get port information and pass it back (using pointer) */
-STATIC status_t
-do_get_port (struct net_device * ndev, void *data)
+static status_t
+do_get_port (struct net_device *ndev, void *data)
{
int ret;
ci_t *ci; /* ci stands for card information */
@@ -534,8 +528,8 @@ do_get_port (struct net_device * ndev, void *data)
}
/* this function copys the user data and then calls the real action function */
-STATIC status_t
-do_set_port (struct net_device * ndev, void *data)
+static status_t
+do_set_port (struct net_device *ndev, void *data)
{
ci_t *ci; /* ci stands for card information */
struct sbecom_port_param pp;/* copy data to kernel land */
@@ -556,8 +550,8 @@ do_set_port (struct net_device * ndev, void *data)
}
/* work the port loopback mode as per directed */
-STATIC status_t
-do_port_loop (struct net_device * ndev, void *data)
+static status_t
+do_port_loop (struct net_device *ndev, void *data)
{
struct sbecom_port_param pp;
ci_t *ci;
@@ -571,8 +565,8 @@ do_port_loop (struct net_device * ndev, void *data)
}
/* set the specified register with the given value / or just read it */
-STATIC status_t
-do_framer_rw (struct net_device * ndev, void *data)
+static status_t
+do_framer_rw (struct net_device *ndev, void *data)
{
struct sbecom_port_param pp;
ci_t *ci;
@@ -592,8 +586,8 @@ do_framer_rw (struct net_device * ndev, void *data)
}
/* set the specified register with the given value / or just read it */
-STATIC status_t
-do_pld_rw (struct net_device * ndev, void *data)
+static status_t
+do_pld_rw (struct net_device *ndev, void *data)
{
struct sbecom_port_param pp;
ci_t *ci;
@@ -613,8 +607,8 @@ do_pld_rw (struct net_device * ndev, void *data)
}
/* set the specified register with the given value / or just read it */
-STATIC status_t
-do_musycc_rw (struct net_device * ndev, void *data)
+static status_t
+do_musycc_rw (struct net_device *ndev, void *data)
{
struct c4_musycc_param mp;
ci_t *ci;
@@ -633,8 +627,8 @@ do_musycc_rw (struct net_device * ndev, void *data)
return 0;
}
-STATIC status_t
-do_get_chan (struct net_device * ndev, void *data)
+static status_t
+do_get_chan (struct net_device *ndev, void *data)
{
struct sbecom_chan_param cp;
int ret;
@@ -651,8 +645,8 @@ do_get_chan (struct net_device * ndev, void *data)
return 0;
}
-STATIC status_t
-do_set_chan (struct net_device * ndev, void *data)
+static status_t
+do_set_chan (struct net_device *ndev, void *data)
{
struct sbecom_chan_param cp;
int ret;
@@ -672,8 +666,8 @@ do_set_chan (struct net_device * ndev, void *data)
}
}
-STATIC status_t
-do_create_chan (struct net_device * ndev, void *data)
+static status_t
+do_create_chan (struct net_device *ndev, void *data)
{
ci_t *ci;
struct net_device *dev;
@@ -699,8 +693,8 @@ do_create_chan (struct net_device * ndev, void *data)
return ret;
}
-STATIC status_t
-do_get_chan_stats (struct net_device * ndev, void *data)
+static status_t
+do_get_chan_stats (struct net_device *ndev, void *data)
{
struct c4_chan_stats_wrap ccs;
int ret;
@@ -720,8 +714,8 @@ do_get_chan_stats (struct net_device * ndev, void *data)
return -EFAULT;
return 0;
}
-STATIC status_t
-do_set_loglevel (struct net_device * ndev, void *data)
+static status_t
+do_set_loglevel (struct net_device *ndev, void *data)
{
unsigned int cxt1e1_log_level;
@@ -731,8 +725,8 @@ do_set_loglevel (struct net_device * ndev, void *data)
return 0;
}
-STATIC status_t
-do_deluser (struct net_device * ndev, int lockit)
+static status_t
+do_deluser (struct net_device *ndev, int lockit)
{
if (ndev->flags & IFF_UP)
return -EBUSY;
@@ -763,7 +757,7 @@ do_deluser (struct net_device * ndev, int lockit)
}
int
-do_del_chan (struct net_device * musycc_dev, void *data)
+do_del_chan (struct net_device *musycc_dev, void *data)
{
struct sbecom_chan_param cp;
char buf[sizeof (CHANNAME) + 3];
@@ -787,7 +781,7 @@ do_del_chan (struct net_device * musycc_dev, void *data)
int c4_reset_board (void *);
int
-do_reset (struct net_device * musycc_dev, void *data)
+do_reset (struct net_device *musycc_dev, void *data)
{
const struct c4_priv *priv;
int i;
@@ -816,7 +810,7 @@ do_reset (struct net_device * musycc_dev, void *data)
}
int
-do_reset_chan_stats (struct net_device * musycc_dev, void *data)
+do_reset_chan_stats (struct net_device *musycc_dev, void *data)
{
struct sbecom_chan_param cp;
@@ -826,8 +820,8 @@ do_reset_chan_stats (struct net_device * musycc_dev, void *data)
return mkret (c4_del_chan_stats (cp.channum));
}
-STATIC status_t
-c4_ioctl (struct net_device * ndev, struct ifreq * ifr, int cmd)
+static status_t
+c4_ioctl (struct net_device *ndev, struct ifreq *ifr, int cmd)
{
ci_t *ci;
void *data;
@@ -954,7 +948,7 @@ static void c4_setup(struct net_device *dev)
}
struct net_device *__init
-c4_add_dev (hdw_info_t * hi, int brdno, unsigned long f0, unsigned long f1,
+c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
int irq0, int irq1)
{
struct net_device *ndev;
@@ -1102,7 +1096,7 @@ c4_add_dev (hdw_info_t * hi, int brdno, unsigned long f0, unsigned long f1,
return ndev;
}
-STATIC int __init
+static int __init
c4_mod_init (void)
{
int rtn;
@@ -1144,7 +1138,7 @@ c4_mod_init (void)
* do_deluser()
*/
-STATIC void __exit
+static void __exit
cleanup_hdlc (void)
{
hdw_info_t *hi;
@@ -1168,7 +1162,7 @@ cleanup_hdlc (void)
}
-STATIC void __exit
+static void __exit
c4_mod_remove (void)
{
cleanup_hdlc(); /* delete any missed channels */
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index b2cc68a..52b6d7f 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -35,12 +35,6 @@ unsigned int max_bh = 0;
#include "pmcc4.h"
#include "musycc.h"
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
#define sd_find_chan(ci,ch) c4_find_chan(ch)
@@ -65,7 +59,6 @@ void c4_wk_chan_restart(mch_t *);
void musycc_bh_tx_eom(mpi_t *, int);
int musycc_chan_up(ci_t *, int);
status_t __init musycc_init(ci_t *);
-STATIC void __init musycc_init_port(mpi_t *);
void musycc_intr_bh_tasklet(ci_t *);
void musycc_serv_req(mpi_t *, u_int32_t);
void musycc_update_timeslots(mpi_t *);
@@ -73,8 +66,8 @@ void musycc_update_timeslots(mpi_t *);
/*******************************************************************/
#if 1
-STATIC int
-musycc_dump_rxbuffer_ring(mch_t * ch, int lockit)
+static int
+musycc_dump_rxbuffer_ring(mch_t *ch, int lockit)
{
struct mdesc *m;
unsigned long flags = 0;
@@ -139,8 +132,8 @@ musycc_dump_rxbuffer_ring(mch_t * ch, int lockit)
#endif
#if 1
-STATIC int
-musycc_dump_txbuffer_ring(mch_t * ch, int lockit)
+static int
+musycc_dump_txbuffer_ring(mch_t *ch, int lockit)
{
struct mdesc *m;
unsigned long flags = 0;
@@ -205,7 +198,7 @@ musycc_dump_txbuffer_ring(mch_t * ch, int lockit)
*/
status_t
-musycc_dump_ring(ci_t * ci, unsigned int chan)
+musycc_dump_ring(ci_t *ci, unsigned int chan)
{
mch_t *ch;
@@ -248,7 +241,7 @@ musycc_dump_ring(ci_t * ci, unsigned int chan)
status_t
-musycc_dump_rings(ci_t * ci, unsigned int start_chan)
+musycc_dump_rings(ci_t *ci, unsigned int start_chan)
{
unsigned int chan;
@@ -264,7 +257,7 @@ musycc_dump_rings(ci_t * ci, unsigned int start_chan)
*/
void
-musycc_init_mdt(mpi_t * pi)
+musycc_init_mdt(mpi_t *pi)
{
u_int32_t *addr, cfg;
int i;
@@ -288,7 +281,7 @@ musycc_init_mdt(mpi_t * pi)
/* Set TX thp to the next unprocessed md */
void
-musycc_update_tx_thp(mch_t * ch)
+musycc_update_tx_thp(mch_t *ch)
{
struct mdesc *md;
unsigned long flags;
@@ -443,7 +436,7 @@ musycc_wq_chan_restart(void *arg) /* channel private structure */
*/
void
-musycc_chan_restart(mch_t * ch)
+musycc_chan_restart(mch_t *ch)
{
#ifdef RLD_RESTART_DEBUG
pr_info("++ musycc_chan_restart[%d]: txd_irq_srv @ %p = sts %x\n",
@@ -461,7 +454,7 @@ musycc_chan_restart(mch_t * ch)
void
-rld_put_led(mpi_t * pi, u_int32_t ledval)
+rld_put_led(mpi_t *pi, u_int32_t ledval)
{
static u_int32_t led = 0;
@@ -477,7 +470,7 @@ rld_put_led(mpi_t * pi, u_int32_t ledval)
#define MUSYCC_SR_RETRY_CNT 9
void
-musycc_serv_req(mpi_t * pi, u_int32_t req)
+musycc_serv_req(mpi_t *pi, u_int32_t req)
{
volatile u_int32_t r;
int rcnt;
@@ -578,7 +571,7 @@ rewrite:
#ifdef SBE_PMCC4_ENABLE
void
-musycc_update_timeslots(mpi_t * pi)
+musycc_update_timeslots(mpi_t *pi)
{
int i, ch;
char e1mode = IS_FRAME_ANY_E1(pi->p.port_mode);
@@ -640,7 +633,7 @@ musycc_update_timeslots(mpi_t * pi)
#ifdef SBE_WAN256T3_ENABLE
void
-musycc_update_timeslots(mpi_t * pi)
+musycc_update_timeslots(mpi_t *pi)
{
mch_t *ch;
@@ -702,8 +695,8 @@ musycc_chan_proto(int proto)
}
#ifdef SBE_WAN256T3_ENABLE
-STATIC void __init
-musycc_init_port(mpi_t * pi)
+static void __init
+musycc_init_port(mpi_t *pi)
{
pci_write_32((u_int32_t *) &pi->reg->gbp, OS_vtophys(pi->regram));
@@ -737,7 +730,7 @@ musycc_init_port(mpi_t * pi)
status_t __init
-musycc_init(ci_t * ci)
+musycc_init(ci_t *ci)
{
char *regaddr; /* temp for address boundary calculations */
int i, gchan;
@@ -832,7 +825,7 @@ musycc_init(ci_t * ci)
void
-musycc_bh_tx_eom(mpi_t * pi, int gchan)
+musycc_bh_tx_eom(mpi_t *pi, int gchan)
{
mch_t *ch;
struct mdesc *md;
@@ -1009,8 +1002,8 @@ musycc_bh_tx_eom(mpi_t * pi, int gchan)
}
-STATIC void
-musycc_bh_rx_eom(mpi_t * pi, int gchan)
+static void
+musycc_bh_rx_eom(mpi_t *pi, int gchan)
{
mch_t *ch;
void *m, *m2;
@@ -1229,7 +1222,7 @@ unsigned long
#else
void
#endif
-musycc_intr_bh_tasklet(ci_t * ci)
+musycc_intr_bh_tasklet(ci_t *ci)
{
mpi_t *pi;
mch_t *ch;
@@ -1517,7 +1510,7 @@ musycc_intr_bh_tasklet(ci_t * ci)
#if 0
int __init
-musycc_new_chan(ci_t * ci, int channum, void *user)
+musycc_new_chan(ci_t *ci, int channum, void *user)
{
mch_t *ch;
@@ -1546,7 +1539,7 @@ musycc_new_chan(ci_t * ci, int channum, void *user)
#ifdef SBE_PMCC4_ENABLE
status_t
-musycc_chan_down(ci_t * dummy, int channum)
+musycc_chan_down(ci_t *dummy, int channum)
{
mpi_t *pi;
mch_t *ch;
@@ -1597,7 +1590,7 @@ musycc_chan_down(ci_t * dummy, int channum)
int
-musycc_del_chan(ci_t * ci, int channum)
+musycc_del_chan(ci_t *ci, int channum)
{
mch_t *ch;
@@ -1613,7 +1606,7 @@ musycc_del_chan(ci_t * ci, int channum)
int
-musycc_del_chan_stats(ci_t * ci, int channum)
+musycc_del_chan_stats(ci_t *ci, int channum)
{
mch_t *ch;
@@ -1628,7 +1621,7 @@ musycc_del_chan_stats(ci_t * ci, int channum)
int
-musycc_start_xmit(ci_t * ci, int channum, void *mem_token)
+musycc_start_xmit(ci_t *ci, int channum, void *mem_token)
{
mch_t *ch;
struct mdesc *md;
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.c b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
index 62b12fb..137b63c 100644
--- a/drivers/staging/cxt1e1/pmc93x6_eeprom.c
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
@@ -34,13 +34,6 @@
#define FALSE 0
#endif
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
-
/*------------------------------------------------------------------------
* EEPROM address definitions
*------------------------------------------------------------------------
@@ -120,7 +113,7 @@ short mfg_template[sizeof (FLD_TYPE2)] =
* (the MSB becomes the LSB etc.).
*/
-STATIC void
+static void
BuildByteReverse (void)
{
long half; /* Used to build by powers to 2 */
@@ -141,7 +134,7 @@ BuildByteReverse (void)
*------------------------------------------------------------------------
*/
-STATIC void
+static void
eeprom_delay (void)
{
int timeout;
@@ -224,7 +217,7 @@ eeprom_get_byte (long addr)
* Issue the EEPROM command to disable writes.
*/
-STATIC void
+static void
disable_pmc_eeprom (long addr)
{
eeprom_put_byte (addr, EPROM_EWDS, SIZE_ADDR_OP);
@@ -241,7 +234,7 @@ disable_pmc_eeprom (long addr)
* Issue the EEPROM command to enable writes.
*/
-STATIC void
+static void
enable_pmc_eeprom (long addr)
{
eeprom_put_byte (addr, EPROM_EWEN, SIZE_ADDR_OP);
diff --git a/drivers/staging/cxt1e1/pmcc4.h b/drivers/staging/cxt1e1/pmcc4.h
index b0ed4ad..003eb86 100644
--- a/drivers/staging/cxt1e1/pmcc4.h
+++ b/drivers/staging/cxt1e1/pmcc4.h
@@ -85,15 +85,15 @@ void c4_cleanup (void);
status_t c4_chan_up (ci_t *, int channum);
status_t c4_del_chan_stats (int channum);
status_t c4_del_chan (int channum);
-status_t c4_get_iidinfo (ci_t * ci, struct sbe_iid_info * iip);
+status_t c4_get_iidinfo (ci_t *ci, struct sbe_iid_info *iip);
int c4_is_chan_up (int channum);
void *getuserbychan (int channum);
-void pci_flush_write (ci_t * ci);
+void pci_flush_write (ci_t *ci);
void sbecom_set_loglevel (int debuglevel);
-char *sbeid_get_bdname (ci_t * ci);
-void sbeid_set_bdtype (ci_t * ci);
-void sbeid_set_hdwbid (ci_t * ci);
+char *sbeid_get_bdname (ci_t *ci);
+void sbeid_set_bdtype (ci_t *ci);
+void sbeid_set_hdwbid (ci_t *ci);
u_int32_t sbeCrc (u_int8_t *, u_int32_t, u_int32_t, u_int32_t *);
void VMETRO_TRACE (void *); /* put data into 8 LEDs */
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index 8d8a22b..2383c60 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -28,7 +28,7 @@
#include <linux/sched.h> /* include for timer */
#include <linux/timer.h> /* include for timer */
#include <linux/hdlc.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "sbecom_inline_linux.h"
#include "libsbew.h"
@@ -39,13 +39,6 @@
#include "comet.h"
#include "sbe_bid.h"
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
-
#define KERN_WARN KERN_WARNING
/* forward references */
@@ -123,7 +116,7 @@ c4_find_chan (int channum)
{
if ((ch->state != UNASSIGNED) &&
(ch->channum == channum))
- return (ch);
+ return ch;
}
}
return 0;
@@ -193,7 +186,7 @@ c4_new (void *hi)
#define COMET_LBCMD_READ 0x80 /* read only (do not set, return read value) */
void
-checkPorts (ci_t * ci)
+checkPorts (ci_t *ci)
{
#ifndef CONFIG_SBE_PMCC4_NCOMM
/*
@@ -458,8 +451,8 @@ checkPorts (ci_t * ci)
}
-STATIC void
-c4_watchdog (ci_t * ci)
+static void
+c4_watchdog (ci_t *ci)
{
if (drvr_state != SBE_DRVR_AVAILABLE)
{
@@ -512,7 +505,7 @@ c4_cleanup (void)
*/
int
-c4_get_portcfg (ci_t * ci)
+c4_get_portcfg (ci_t *ci)
{
comet_t *comet;
int portnum, mask;
@@ -536,7 +529,7 @@ c4_get_portcfg (ci_t * ci)
/* nothing herein should generate interrupts */
status_t __init
-c4_init (ci_t * ci, u_char *func0, u_char *func1)
+c4_init (ci_t *ci, u_char *func0, u_char *func1)
{
mpi_t *pi;
mch_t *ch;
@@ -670,7 +663,7 @@ c4_init (ci_t * ci, u_char *func0, u_char *func1)
/* better be fully setup to handle interrupts when you call this */
status_t __init
-c4_init2 (ci_t * ci)
+c4_init2 (ci_t *ci)
{
status_t ret;
@@ -698,7 +691,7 @@ c4_init2 (ci_t * ci)
/* This function sets the loopback mode (or clears it, as the case may be). */
int
-c4_loop_port (ci_t * ci, int portnum, u_int8_t cmd)
+c4_loop_port (ci_t *ci, int portnum, u_int8_t cmd)
{
comet_t *comet;
volatile u_int32_t loopValue;
@@ -757,7 +750,7 @@ c4_loop_port (ci_t * ci, int portnum, u_int8_t cmd)
*/
status_t
-c4_frame_rw (ci_t * ci, struct sbecom_port_param * pp)
+c4_frame_rw (ci_t *ci, struct sbecom_port_param *pp)
{
comet_t *comet;
volatile u_int32_t data;
@@ -796,7 +789,7 @@ c4_frame_rw (ci_t * ci, struct sbecom_port_param * pp)
*/
status_t
-c4_pld_rw (ci_t * ci, struct sbecom_port_param * pp)
+c4_pld_rw (ci_t *ci, struct sbecom_port_param *pp)
{
volatile u_int32_t *regaddr;
volatile u_int32_t data;
@@ -834,7 +827,7 @@ c4_pld_rw (ci_t * ci, struct sbecom_port_param * pp)
*/
status_t
-c4_musycc_rw (ci_t * ci, struct c4_musycc_param * mcp)
+c4_musycc_rw (ci_t *ci, struct c4_musycc_param *mcp)
{
mpi_t *pi;
volatile u_int32_t *dph; /* hardware implemented register */
@@ -898,7 +891,7 @@ c4_musycc_rw (ci_t * ci, struct c4_musycc_param * mcp)
}
status_t
-c4_get_port (ci_t * ci, int portnum)
+c4_get_port (ci_t *ci, int portnum)
{
if (portnum >= ci->max_port) /* sanity check */
return ENXIO;
@@ -913,7 +906,7 @@ c4_get_port (ci_t * ci, int portnum)
}
status_t
-c4_set_port (ci_t * ci, int portnum)
+c4_set_port (ci_t *ci, int portnum)
{
mpi_t *pi;
struct sbecom_port_param *pp;
@@ -942,7 +935,7 @@ c4_set_port (ci_t * ci, int portnum)
if ((ret = c4_wq_port_init (pi))) /* create/init
* workqueue_struct */
- return (ret);
+ return ret;
}
init_comet (ci, pi->cometbase, pp->port_mode, 1 /* clockmaster == true */ , pp->portP);
@@ -1018,7 +1011,7 @@ c4_set_port (ci_t * ci, int portnum)
unsigned int max_int = 0;
status_t
-c4_new_chan (ci_t * ci, int portnum, int channum, void *user)
+c4_new_chan (ci_t *ci, int portnum, int channum, void *user)
{
mpi_t *pi;
mch_t *ch;
@@ -1111,7 +1104,7 @@ c4_del_chan_stats (int channum)
status_t
-c4_set_chan (int channum, struct sbecom_chan_param * p)
+c4_set_chan (int channum, struct sbecom_chan_param *p)
{
mch_t *ch;
int i, x = 0;
@@ -1162,7 +1155,7 @@ c4_set_chan (int channum, struct sbecom_chan_param * p)
status_t
-c4_get_chan (int channum, struct sbecom_chan_param * p)
+c4_get_chan (int channum, struct sbecom_chan_param *p)
{
mch_t *ch;
@@ -1173,7 +1166,7 @@ c4_get_chan (int channum, struct sbecom_chan_param * p)
}
status_t
-c4_get_chan_stats (int channum, struct sbecom_chan_stats * p)
+c4_get_chan_stats (int channum, struct sbecom_chan_stats *p)
{
mch_t *ch;
@@ -1184,8 +1177,8 @@ c4_get_chan_stats (int channum, struct sbecom_chan_stats * p)
return 0;
}
-STATIC int
-c4_fifo_alloc (mpi_t * pi, int chan, int *len)
+static int
+c4_fifo_alloc (mpi_t *pi, int chan, int *len)
{
int i, l = 0, start = 0, max = 0, maxstart = 0;
@@ -1222,7 +1215,7 @@ c4_fifo_alloc (mpi_t * pi, int chan, int *len)
}
void
-c4_fifo_free (mpi_t * pi, int chan)
+c4_fifo_free (mpi_t *pi, int chan)
{
int i;
@@ -1236,7 +1229,7 @@ c4_fifo_free (mpi_t * pi, int chan)
status_t
-c4_chan_up (ci_t * ci, int channum)
+c4_chan_up (ci_t *ci, int channum)
{
mpi_t *pi;
mch_t *ch;
@@ -1467,7 +1460,7 @@ errfree:
/* stop the hardware from servicing & interrupting */
void
-c4_stopwd (ci_t * ci)
+c4_stopwd (ci_t *ci)
{
OS_stop_watchdog (&ci->wd);
SD_SEM_TAKE (&ci->sem_wdbusy, "_stop_"); /* ensure WD not running */
@@ -1476,7 +1469,7 @@ c4_stopwd (ci_t * ci)
void
-sbecom_get_brdinfo (ci_t * ci, struct sbe_brd_info * bip, u_int8_t *bsn)
+sbecom_get_brdinfo (ci_t *ci, struct sbe_brd_info *bip, u_int8_t *bsn)
{
char *np;
u_int32_t sn = 0;
@@ -1485,7 +1478,7 @@ sbecom_get_brdinfo (ci_t * ci, struct sbe_brd_info * bip, u_int8_t *bsn)
bip->brdno = ci->brdno; /* our board number */
bip->brd_id = ci->brd_id;
bip->brd_hdw_id = ci->hdw_bid;
- bip->brd_chan_cnt = MUSYCC_NCHANS * ci->max_port; /* number of channels
+ bip->brd_chan_cnt = MUSYCC_NCHANS *ci->max_port; /* number of channels
* being used */
bip->brd_port_cnt = ci->max_port; /* number of ports being used */
bip->brd_pci_speed = BINFO_PCI_SPEED_unk; /* PCI speed not yet
@@ -1535,7 +1528,7 @@ sbecom_get_brdinfo (ci_t * ci, struct sbe_brd_info * bip, u_int8_t *bsn)
status_t
-c4_get_iidinfo (ci_t * ci, struct sbe_iid_info * iip)
+c4_get_iidinfo (ci_t *ci, struct sbe_iid_info *iip)
{
struct net_device *dev;
char *np;
@@ -1624,7 +1617,7 @@ wanpmcC4T1E1_getBaseAddress (int cardID, int deviceID)
}
ci = ci->next; /* next board, if any */
}
- return (base);
+ return base;
}
#endif /*** CONFIG_SBE_PMCC4_NCOMM ***/
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index 68ed445..3c6d1c0 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -177,7 +177,7 @@ struct watchdog
static inline int
-OS_start_watchdog (struct watchdog * wd)
+OS_start_watchdog (struct watchdog *wd)
{
wd->h.expires = jiffies + wd->ticks;
add_timer (&wd->h);
@@ -186,7 +186,7 @@ OS_start_watchdog (struct watchdog * wd)
static inline int
-OS_stop_watchdog (struct watchdog * wd)
+OS_stop_watchdog (struct watchdog *wd)
{
del_timer_sync (&wd->h);
return 0;
@@ -194,7 +194,7 @@ OS_stop_watchdog (struct watchdog * wd)
static inline int
-OS_free_watchdog (struct watchdog * wd)
+OS_free_watchdog (struct watchdog *wd)
{
OS_stop_watchdog (wd);
OS_kfree (wd);
diff --git a/drivers/staging/cxt1e1/sbeid.c b/drivers/staging/cxt1e1/sbeid.c
index a2243b1..791993f 100644
--- a/drivers/staging/cxt1e1/sbeid.c
+++ b/drivers/staging/cxt1e1/sbeid.c
@@ -19,15 +19,8 @@
#include "pmcc4.h"
#include "sbe_bid.h"
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
-
char *
-sbeid_get_bdname (ci_t * ci)
+sbeid_get_bdname (ci_t *ci)
{
char *np = 0;
@@ -73,7 +66,7 @@ sbeid_get_bdname (ci_t * ci)
/* given the presetting of brd_id, set the corresponding hdw_id */
void
-sbeid_set_hdwbid (ci_t * ci)
+sbeid_set_hdwbid (ci_t *ci)
{
/*
* set SBE's unique hardware identification (for legacy boards might not
@@ -170,7 +163,7 @@ sbeid_set_hdwbid (ci_t * ci)
/* given the presetting of hdw_bid, set the corresponding brd_id */
void
-sbeid_set_bdtype (ci_t * ci)
+sbeid_set_bdtype (ci_t *ci)
{
/* set SBE's unique PCI VENDOR/DEVID */
switch (ci->hdw_bid)
diff --git a/drivers/staging/cxt1e1/sbeproc.h b/drivers/staging/cxt1e1/sbeproc.h
index e5c072c..37285df 100644
--- a/drivers/staging/cxt1e1/sbeproc.h
+++ b/drivers/staging/cxt1e1/sbeproc.h
@@ -28,11 +28,11 @@ int __init sbecom_proc_brd_init (ci_t *);
#else
-static inline void sbecom_proc_brd_cleanup(ci_t * ci)
+static inline void sbecom_proc_brd_cleanup(ci_t *ci)
{
}
-static inline int __init sbecom_proc_brd_init(ci_t * ci)
+static inline int __init sbecom_proc_brd_init(ci_t *ci)
{
return 0;
}
diff --git a/drivers/staging/dgap/Kconfig b/drivers/staging/dgap/Kconfig
new file mode 100644
index 0000000..31f1d75
--- /dev/null
+++ b/drivers/staging/dgap/Kconfig
@@ -0,0 +1,6 @@
+config DGAP
+ tristate "Digi EPCA PCI products"
+ default n
+ depends on TTY
+ ---help---
+ Driver for the Digi International EPCA PCI based product line
diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile
new file mode 100644
index 0000000..9f1fce1
--- /dev/null
+++ b/drivers/staging/dgap/Makefile
@@ -0,0 +1,9 @@
+EXTRA_CFLAGS += -DDG_NAME=\"dgap-1.3-16\" -DDG_PART=\"40002347_C\"
+
+obj-$(CONFIG_DGAP) += dgap.o
+
+
+dgap-objs := dgap_driver.o dgap_fep5.o \
+ dgap_parse.o dgap_trace.o \
+ dgap_tty.o dgap_sysfs.o
+
diff --git a/drivers/staging/dgap/dgap_conf.h b/drivers/staging/dgap/dgap_conf.h
new file mode 100644
index 0000000..8809701
--- /dev/null
+++ b/drivers/staging/dgap/dgap_conf.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************
+ *
+ * dgap_conf.h - Header file for installations and parse files.
+ *
+ * $Id: dgap_conf.h,v 1.1 2009/10/23 14:01:57 markh Exp $
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef _DGAP_CONF_H
+#define _DGAP_CONF_H
+
+#define NULLNODE 0 /* header node, not used */
+#define BNODE 1 /* Board node */
+#define LNODE 2 /* Line node */
+#define CNODE 3 /* Concentrator node */
+#define MNODE 4 /* EBI Module node */
+#define TNODE 5 /* tty name prefix node */
+#define CUNODE 6 /* cu name prefix (non-SCO) */
+#define PNODE 7 /* trans. print prefix node */
+#define JNODE 8 /* maJor number node */
+#define ANODE 9 /* altpin */
+#define TSNODE 10 /* tty structure size */
+#define CSNODE 11 /* channel structure size */
+#define BSNODE 12 /* board structure size */
+#define USNODE 13 /* unit schedule structure size */
+#define FSNODE 14 /* f2200 structure size */
+#define VSNODE 15 /* size of VPIX structures */
+#define INTRNODE 16 /* enable interrupt */
+
+/* Enumeration of tokens */
+#define BEGIN 1
+#define END 2
+#define BOARD 10
+
+#define EPCFS 11 /* start of EPC family definitions */
+#define ICX 11
+#define MCX 13
+#define PCX 14
+#define IEPC 15
+#define EEPC 16
+#define MEPC 17
+#define IPCM 18
+#define EPCM 19
+#define MPCM 20
+#define PEPC 21
+#define PPCM 22
+#ifdef CP
+#define ICP 23
+#define ECP 24
+#define MCP 25
+#endif
+#define EPCFE 25 /* end of EPC family definitions */
+#define PC2E 26
+#define PC4E 27
+#define PC4E8K 28
+#define PC8E 29
+#define PC8E8K 30
+#define PC16E 31
+#define MC2E8K 34
+#define MC4E8K 35
+#define MC8E8K 36
+
+#define AVANFS 42 /* start of Avanstar family definitions */
+#define A8P 42
+#define A16P 43
+#define AVANFE 43 /* end of Avanstar family definitions */
+
+#define DA2000FS 44 /* start of AccelePort 2000 family definitions */
+#define DA22 44 /* AccelePort 2002 */
+#define DA24 45 /* AccelePort 2004 */
+#define DA28 46 /* AccelePort 2008 */
+#define DA216 47 /* AccelePort 2016 */
+#define DAR4 48 /* AccelePort RAS 4 port */
+#define DAR8 49 /* AccelePort RAS 8 port */
+#define DDR24 50 /* DataFire RAS 24 port */
+#define DDR30 51 /* DataFire RAS 30 port */
+#define DDR48 52 /* DataFire RAS 48 port */
+#define DDR60 53 /* DataFire RAS 60 port */
+#define DA2000FE 53 /* end of AccelePort 2000/RAS family definitions */
+
+#define PCXRFS 106 /* start of PCXR family definitions */
+#define APORT4 106
+#define APORT8 107
+#define PAPORT4 108
+#define PAPORT8 109
+#define APORT4_920I 110
+#define APORT8_920I 111
+#define APORT4_920P 112
+#define APORT8_920P 113
+#define APORT2_920P 114
+#define PCXRFE 117 /* end of PCXR family definitions */
+
+#define LINE 82
+#ifdef T1
+#define T1M 83
+#define E1M 84
+#endif
+#define CONC 64
+#define CX 65
+#define EPC 66
+#define MOD 67
+#define PORTS 68
+#define METHOD 69
+#define CUSTOM 70
+#define BASIC 71
+#define STATUS 72
+#define MODEM 73
+/* The following tokens can appear in multiple places */
+#define SPEED 74
+#define NPORTS 75
+#define ID 76
+#define CABLE 77
+#define CONNECT 78
+#define IO 79
+#define MEM 80
+#define DPSZ 81
+
+#define TTYN 90
+#define CU 91
+#define PRINT 92
+#define XPRINT 93
+#define CMAJOR 94
+#define ALTPIN 95
+#define STARTO 96
+#define USEINTR 97
+#define PCIINFO 98
+
+#define TTSIZ 100
+#define CHSIZ 101
+#define BSSIZ 102
+#define UNTSIZ 103
+#define F2SIZ 104
+#define VPSIZ 105
+
+#define TOTAL_BOARD 2
+#define CURRENT_BRD 4
+#define BOARD_TYPE 6
+#define IO_ADDRESS 8
+#define MEM_ADDRESS 10
+
+#define FIELDS_PER_PAGE 18
+
+#define TB_FIELD 1
+#define CB_FIELD 3
+#define BT_FIELD 5
+#define IO_FIELD 7
+#define ID_FIELD 8
+#define ME_FIELD 9
+#define TTY_FIELD 11
+#define CU_FIELD 13
+#define PR_FIELD 15
+#define MPR_FIELD 17
+
+#define MAX_FIELD 512
+
+#define INIT 0
+#define NITEMS 128
+#define MAX_ITEM 512
+
+#define DSCRINST 1
+#define DSCRNUM 3
+#define ALTPINQ 5
+#define SSAVE 7
+
+#define DSCR "32"
+#define ONETONINE "123456789"
+#define ALL "1234567890"
+
+
+struct cnode {
+ struct cnode *next;
+ int type;
+ int numbrd;
+
+ union {
+ struct {
+ char type; /* Board Type */
+ short port; /* I/O Address */
+ char *portstr; /* I/O Address in string */
+ long addr; /* Memory Address */
+ char *addrstr; /* Memory Address in string */
+ long pcibus; /* PCI BUS */
+ char *pcibusstr; /* PCI BUS in string */
+ long pcislot; /* PCI SLOT */
+ char *pcislotstr; /* PCI SLOT in string */
+ char nport; /* Number of Ports */
+ char *id; /* tty id */
+ int start; /* start of tty counting */
+ char *method; /* Install method */
+ char v_type;
+ char v_port;
+ char v_addr;
+ char v_pcibus;
+ char v_pcislot;
+ char v_nport;
+ char v_id;
+ char v_start;
+ char v_method;
+ char line1;
+ char line2;
+ char conc1; /* total concs in line1 */
+ char conc2; /* total concs in line2 */
+ char module1; /* total modules for line1 */
+ char module2; /* total modules for line2 */
+ char *status; /* config status */
+ char *dimstatus; /* Y/N */
+ int status_index; /* field pointer */
+ } board;
+
+ struct {
+ char *cable;
+ char v_cable;
+ char speed;
+ char v_speed;
+ } line;
+
+ struct {
+ char type;
+ char *connect;
+ char speed;
+ char nport;
+ char *id;
+ char *idstr;
+ int start;
+ char v_type;
+ char v_connect;
+ char v_speed;
+ char v_nport;
+ char v_id;
+ char v_start;
+ } conc;
+
+ struct {
+ char type;
+ char nport;
+ char *id;
+ char *idstr;
+ int start;
+ char v_type;
+ char v_nport;
+ char v_id;
+ char v_start;
+ } module;
+
+ char *ttyname;
+
+ char *cuname;
+
+ char *printname;
+
+ int majornumber;
+
+ int altpin;
+
+ int ttysize;
+
+ int chsize;
+
+ int bssize;
+
+ int unsize;
+
+ int f2size;
+
+ int vpixsize;
+
+ int useintr;
+ } u;
+};
+
+#endif
diff --git a/drivers/staging/dgap/dgap_downld.h b/drivers/staging/dgap/dgap_downld.h
new file mode 100644
index 0000000..f79e65c
--- /dev/null
+++ b/drivers/staging/dgap/dgap_downld.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: dgap_downld.h,v 1.1 2009/10/23 14:01:57 markh Exp $
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ */
+
+/*
+** downld.h
+** - describes the interface between the user level download process
+** and the concentrator download driver.
+*/
+
+#ifndef _DGAP_DOWNLD_H_
+#define _DGAP_DOWNLD_H_
+
+
+struct fepimg {
+ int type; /* board type */
+ int len; /* length of image */
+ char fepimage[1]; /* begining of image */
+};
+
+struct downldio {
+ unsigned int req_type; /* FEP or concentrator */
+ unsigned int bdid; /* opaque board identifier */
+ union {
+ struct downld_t dl; /* download structure */
+ struct fepimg fi; /* fep/bios image structure */
+ } image;
+};
+
+#define DIGI_DLREQ_GET (('d'<<8) | 220)
+#define DIGI_DLREQ_SET (('d'<<8) | 221)
+
+#define DIGI_DL_NUKE (('d'<<8) | 222) /* Not really a dl request, but
+ dangerous enuff to not put in
+ digi.h */
+/* Packed bits of intarg for DIGI_DL_NUKE */
+#define DIGI_NUKE_RESET_ALL (1 << 31)
+#define DIGI_NUKE_INHIBIT_POLLER (1 << 30)
+#define DIGI_NUKE_BRD_NUMB 0x0f
+
+
+
+#define DLREQ_BIOS 0
+#define DLREQ_FEP 1
+#define DLREQ_CONC 2
+#define DLREQ_CONFIG 3
+#define DLREQ_DEVCREATE 4
+
+#endif
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
new file mode 100644
index 0000000..40ef785
--- /dev/null
+++ b/drivers/staging/dgap/dgap_driver.c
@@ -0,0 +1,1051 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ * $Id: dgap_driver.c,v 1.3 2011/06/21 10:35:16 markh Exp $
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h> /* For udelay */
+#include <linux/slab.h>
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+#include <linux/sched.h>
+#endif
+
+#include "dgap_driver.h"
+#include "dgap_pci.h"
+#include "dgap_fep5.h"
+#include "dgap_tty.h"
+#include "dgap_conf.h"
+#include "dgap_parse.h"
+#include "dgap_trace.h"
+#include "dgap_sysfs.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Digi International, http://www.digi.com");
+MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
+MODULE_SUPPORTED_DEVICE("dgap");
+
+/*
+ * insmod command line overrideable parameters
+ *
+ * NOTE: we use a set of macros to create the variables, which allows
+ * us to specify the variable type, name, initial value, and description.
+ */
+PARM_INT(debug, 0x00, 0644, "Driver debugging level");
+PARM_INT(rawreadok, 1, 0644, "Bypass flip buffers on input");
+PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
+
+
+/**************************************************************************
+ *
+ * protos for this file
+ *
+ */
+
+static int dgap_start(void);
+static void dgap_init_globals(void);
+static int dgap_found_board(struct pci_dev *pdev, int id);
+static void dgap_cleanup_board(struct board_t *brd);
+static void dgap_poll_handler(ulong dummy);
+static int dgap_init_pci(void);
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void dgap_remove_one(struct pci_dev *dev);
+static int dgap_probe1(struct pci_dev *pdev, int card_type);
+static void dgap_mbuf(struct board_t *brd, const char *fmt, ...);
+static int dgap_do_remap(struct board_t *brd);
+static irqreturn_t dgap_intr(int irq, void *voidbrd);
+
+/* Driver load/unload functions */
+int dgap_init_module(void);
+void dgap_cleanup_module(void);
+
+module_init(dgap_init_module);
+module_exit(dgap_cleanup_module);
+
+
+/*
+ * File operations permitted on Control/Management major.
+ */
+static struct file_operations DgapBoardFops =
+{
+ .owner = THIS_MODULE,
+};
+
+
+/*
+ * Globals
+ */
+uint dgap_NumBoards;
+struct board_t *dgap_Board[MAXBOARDS];
+DEFINE_SPINLOCK(dgap_global_lock);
+ulong dgap_poll_counter;
+char *dgap_config_buf;
+int dgap_driver_state = DRIVER_INITIALIZED;
+DEFINE_SPINLOCK(dgap_dl_lock);
+wait_queue_head_t dgap_dl_wait;
+int dgap_dl_action;
+int dgap_poll_tick = 20; /* Poll interval - 20 ms */
+
+/*
+ * Static vars.
+ */
+static int dgap_Major_Control_Registered = FALSE;
+static uint dgap_driver_start = FALSE;
+
+static struct class * dgap_class;
+
+/*
+ * Poller stuff
+ */
+static DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */
+static ulong dgap_poll_time; /* Time of next poll */
+static uint dgap_poll_stop; /* Used to tell poller to stop */
+static struct timer_list dgap_poll_timer;
+
+
+static struct pci_device_id dgap_pci_tbl[] = {
+ { DIGI_VID, PCI_DEVICE_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { DIGI_VID, PCI_DEVICE_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { DIGI_VID, PCI_DEVICE_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { DIGI_VID, PCI_DEVICE_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { DIGI_VID, PCI_DEVICE_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { DIGI_VID, PCI_DEVICE_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { DIGI_VID, PCI_DEVICE_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { DIGI_VID, PCI_DEVICE_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { DIGI_VID, PCI_DEVICE_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { DIGI_VID, PCI_DEVICE_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { DIGI_VID, PCI_DEVICE_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { DIGI_VID, PCI_DEVICE_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { DIGI_VID, PCI_DEVICE_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { DIGI_VID, PCI_DEVICE_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
+ { DIGI_VID, PCI_DEVICE_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
+ {0,} /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
+
+
+/*
+ * A generic list of Product names, PCI Vendor ID, and PCI Device ID.
+ */
+struct board_id {
+ uint config_type;
+ uchar *name;
+ uint maxports;
+ uint dpatype;
+};
+
+static struct board_id dgap_Ids[] =
+{
+ { PPCM, PCI_DEVICE_XEM_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS) },
+ { PCX, PCI_DEVICE_CX_NAME, 128, (T_CX | T_PCIBUS) },
+ { PCX, PCI_DEVICE_CX_IBM_NAME, 128, (T_CX | T_PCIBUS) },
+ { PEPC, PCI_DEVICE_EPCJ_NAME, 224, (T_EPC | T_PCIBUS) },
+ { APORT2_920P, PCI_DEVICE_920_2_NAME, 2, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { APORT4_920P, PCI_DEVICE_920_4_NAME, 4, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { APORT8_920P, PCI_DEVICE_920_8_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XRJ_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_422_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_IBM_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_SAIP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_BULL_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { APORT8_920P, PCI_DEVICE_920_8_HP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PPCM, PCI_DEVICE_XEM_HP_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS) },
+ {0,} /* 0 terminated list. */
+};
+
+static struct pci_driver dgap_driver = {
+ .name = "dgap",
+ .probe = dgap_init_one,
+ .id_table = dgap_pci_tbl,
+ .remove = dgap_remove_one,
+};
+
+
+char *dgap_state_text[] = {
+ "Board Failed",
+ "Configuration for board not found.\n\t\t\tRun mpi to configure board.",
+ "Board Found",
+ "Need Reset",
+ "Finished Reset",
+ "Need Config",
+ "Finished Config",
+ "Need Device Creation",
+ "Requested Device Creation",
+ "Finished Device Creation",
+ "Need BIOS Load",
+ "Requested BIOS",
+ "Doing BIOS Load",
+ "Finished BIOS Load",
+ "Need FEP Load",
+ "Requested FEP",
+ "Doing FEP Load",
+ "Finished FEP Load",
+ "Requested PROC creation",
+ "Finished PROC creation",
+ "Board READY",
+};
+
+char *dgap_driver_state_text[] = {
+ "Driver Initialized",
+ "Driver needs configuration load.",
+ "Driver requested configuration from download daemon.",
+ "Driver Ready."
+};
+
+
+
+/************************************************************************
+ *
+ * Driver load/unload functions
+ *
+ ************************************************************************/
+
+/*
+ * init_module()
+ *
+ * Module load. This is where it all starts.
+ */
+int dgap_init_module(void)
+{
+ int rc = 0;
+
+ APR(("%s, Digi International Part Number %s\n", DG_NAME, DG_PART));
+
+ /*
+ * Initialize global stuff
+ */
+ rc = dgap_start();
+
+ if (rc < 0) {
+ return(rc);
+ }
+
+ /*
+ * Find and configure all the cards
+ */
+ rc = dgap_init_pci();
+
+ /*
+ * If something went wrong in the scan, bail out of driver.
+ */
+ if (rc < 0) {
+ /* Only unregister the pci driver if it was actually registered. */
+ if (dgap_NumBoards)
+ pci_unregister_driver(&dgap_driver);
+ else
+ printk("WARNING: dgap driver load failed. No DGAP boards found.\n");
+
+ dgap_cleanup_module();
+ }
+ else {
+ dgap_create_driver_sysfiles(&dgap_driver);
+ }
+
+ DPR_INIT(("Finished init_module. Returning %d\n", rc));
+ return (rc);
+}
+
+
+/*
+ * Start of driver.
+ */
+static int dgap_start(void)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (dgap_driver_start == FALSE) {
+
+ dgap_driver_start = TRUE;
+
+ /* make sure that the globals are init'd before we do anything else */
+ dgap_init_globals();
+
+ dgap_NumBoards = 0;
+
+ APR(("For the tools package or updated drivers please visit http://www.digi.com\n"));
+
+ /*
+ * Register our base character device into the kernel.
+ * This allows the download daemon to connect to the downld device
+ * before any of the boards are init'ed.
+ */
+ if (!dgap_Major_Control_Registered) {
+ /*
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &DgapBoardFops);
+ if (rc < 0) {
+ APR(("Can't register dgap driver device (%d)\n", rc));
+ return (rc);
+ }
+
+ dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
+ device_create(dgap_class, NULL,
+ MKDEV(DIGI_DGAP_MAJOR, 0),
+ NULL, "dgap_mgmt");
+ device_create(dgap_class, NULL,
+ MKDEV(DIGI_DGAP_MAJOR, 1),
+ NULL, "dgap_downld");
+ dgap_Major_Control_Registered = TRUE;
+ }
+
+ /*
+ * Init any global tty stuff.
+ */
+ rc = dgap_tty_preinit();
+
+ if (rc < 0) {
+ APR(("tty preinit - not enough memory (%d)\n", rc));
+ return(rc);
+ }
+
+ /* Start the poller */
+ DGAP_LOCK(dgap_poll_lock, flags);
+ init_timer(&dgap_poll_timer);
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ dgap_poll_timer.expires = dgap_poll_time;
+ DGAP_UNLOCK(dgap_poll_lock, flags);
+
+ add_timer(&dgap_poll_timer);
+
+ dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
+ }
+
+ return (rc);
+}
+
+
+/*
+ * Register pci driver, and return how many boards we have.
+ */
+static int dgap_init_pci(void)
+{
+ return pci_register_driver(&dgap_driver);
+}
+
+
+/* returns count (>= 0), or negative on error */
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+
+ /* wake up and enable device */
+ rc = pci_enable_device(pdev);
+
+ if (rc < 0) {
+ rc = -EIO;
+ } else {
+ rc = dgap_probe1(pdev, ent->driver_data);
+ if (rc == 0) {
+ dgap_NumBoards++;
+ DPR_INIT(("Incrementing numboards to %d\n", dgap_NumBoards));
+ }
+ }
+ return rc;
+}
+
+
+static int dgap_probe1(struct pci_dev *pdev, int card_type)
+{
+ return dgap_found_board(pdev, card_type);
+}
+
+
+static void dgap_remove_one(struct pci_dev *dev)
+{
+ /* Do Nothing */
+}
+
+
+/*
+ * dgap_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+void dgap_cleanup_module(void)
+{
+ int i;
+ ulong lock_flags;
+
+ DGAP_LOCK(dgap_poll_lock, lock_flags);
+ dgap_poll_stop = 1;
+ DGAP_UNLOCK(dgap_poll_lock, lock_flags);
+
+ /* Turn off poller right away. */
+ del_timer_sync( &dgap_poll_timer);
+
+ dgap_remove_driver_sysfiles(&dgap_driver);
+
+
+ if (dgap_Major_Control_Registered) {
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 1));
+ class_destroy(dgap_class);
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+ }
+
+ if (dgap_config_buf)
+ kfree(dgap_config_buf);
+
+ for (i = 0; i < dgap_NumBoards; ++i) {
+ dgap_remove_ports_sysfiles(dgap_Board[i]);
+ dgap_tty_uninit(dgap_Board[i]);
+ dgap_cleanup_board(dgap_Board[i]);
+ }
+
+ dgap_tty_post_uninit();
+
+#if defined(DGAP_TRACER)
+ /* last thing, make sure we release the tracebuffer */
+ dgap_tracer_free();
+#endif
+ if (dgap_NumBoards)
+ pci_unregister_driver(&dgap_driver);
+}
+
+
+/*
+ * dgap_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgap_cleanup_board(struct board_t *brd)
+{
+ int i = 0;
+
+ if(!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ if (brd->intr_used && brd->irq)
+ free_irq(brd->irq, brd);
+
+ tasklet_kill(&brd->helper_tasklet);
+
+ if (brd->re_map_port) {
+ release_mem_region(brd->membase + 0x200000, 0x200000);
+ iounmap(brd->re_map_port);
+ brd->re_map_port = NULL;
+ }
+
+ if (brd->re_map_membase) {
+ release_mem_region(brd->membase, 0x200000);
+ iounmap(brd->re_map_membase);
+ brd->re_map_membase = NULL;
+ }
+
+ if (brd->msgbuf_head) {
+ unsigned long flags;
+
+ DGAP_LOCK(dgap_global_lock, flags);
+ brd->msgbuf = NULL;
+ printk("%s", brd->msgbuf_head);
+ kfree(brd->msgbuf_head);
+ brd->msgbuf_head = NULL;
+ DGAP_UNLOCK(dgap_global_lock, flags);
+ }
+
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++) {
+ if (brd->channels[i]) {
+ kfree(brd->channels[i]);
+ brd->channels[i] = NULL;
+ }
+ }
+
+ if (brd->flipbuf)
+ kfree(brd->flipbuf);
+ if (brd->flipflagbuf)
+ kfree(brd->flipflagbuf);
+
+ dgap_Board[brd->boardnum] = NULL;
+
+ kfree(brd);
+}
+
+
+/*
+ * dgap_found_board()
+ *
+ * A board has been found, init it.
+ */
+static int dgap_found_board(struct pci_dev *pdev, int id)
+{
+ struct board_t *brd;
+ unsigned int pci_irq;
+ int i = 0;
+ unsigned long flags;
+
+ /* get the board structure and prep it */
+ brd = dgap_Board[dgap_NumBoards] =
+ (struct board_t *) dgap_driver_kzmalloc(sizeof(struct board_t), GFP_KERNEL);
+ if (!brd) {
+ APR(("memory allocation for board structure failed\n"));
+ return(-ENOMEM);
+ }
+
+ /* make a temporary message buffer for the boot messages */
+ brd->msgbuf = brd->msgbuf_head =
+ (char *) dgap_driver_kzmalloc(sizeof(char) * 8192, GFP_KERNEL);
+ if(!brd->msgbuf) {
+ kfree(brd);
+ APR(("memory allocation for board msgbuf failed\n"));
+ return(-ENOMEM);
+ }
+
+ /* store the info for the board we've found */
+ brd->magic = DGAP_BOARD_MAGIC;
+ brd->boardnum = dgap_NumBoards;
+ brd->firstminor = 0;
+ brd->vendor = dgap_pci_tbl[id].vendor;
+ brd->device = dgap_pci_tbl[id].device;
+ brd->pdev = pdev;
+ brd->pci_bus = pdev->bus->number;
+ brd->pci_slot = PCI_SLOT(pdev->devfn);
+ brd->name = dgap_Ids[id].name;
+ brd->maxports = dgap_Ids[id].maxports;
+ brd->type = dgap_Ids[id].config_type;
+ brd->dpatype = dgap_Ids[id].dpatype;
+ brd->dpastatus = BD_NOFEP;
+ init_waitqueue_head(&brd->state_wait);
+
+ DGAP_SPINLOCK_INIT(brd->bd_lock);
+
+ brd->state = BOARD_FOUND;
+ brd->runwait = 0;
+ brd->inhibit_poller = FALSE;
+ brd->wait_for_bios = 0;
+ brd->wait_for_fep = 0;
+
+ for (i = 0; i < MAXPORTS; i++) {
+ brd->channels[i] = NULL;
+ }
+
+ /* store which card & revision we have */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+
+ pci_irq = pdev->irq;
+ brd->irq = pci_irq;
+
+ /* get the PCI Base Address Registers */
+
+ /* Xr Jupiter and EPC use BAR 2 */
+ if (brd->device == PCI_DEVICE_XRJ_DID || brd->device == PCI_DEVICE_EPCJ_DID) {
+ brd->membase = pci_resource_start(pdev, 2);
+ brd->membase_end = pci_resource_end(pdev, 2);
+ }
+ /* Everyone else uses BAR 0 */
+ else {
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
+ }
+
+ if (!brd->membase) {
+ APR(("card has no PCI IO resources, failing board.\n"));
+ return -ENODEV;
+ }
+
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ /*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+ brd->port = brd->membase + PCI_IO_OFFSET;
+ brd->port_end = brd->port + PCI_IO_SIZE;
+
+
+ /*
+ * Special initialization for non-PLX boards
+ */
+ if (brd->device != PCI_DEVICE_XRJ_DID && brd->device != PCI_DEVICE_EPCJ_DID) {
+ unsigned short cmd;
+
+ pci_write_config_byte(pdev, 0x40, 0);
+ pci_write_config_byte(pdev, 0x46, 0);
+
+ /* Limit burst length to 2 doubleword transactions */
+ pci_write_config_byte(pdev, 0x42, 1);
+
+ /*
+ * Enable IO and mem if not already done.
+ * This was needed for support on Itanium.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ /* init our poll helper tasklet */
+ tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet, (unsigned long) brd);
+
+ /* Log the information about the board */
+ dgap_mbuf(brd, DRVSTR": board %d: %s (rev %d), irq %d\n",
+ dgap_NumBoards, brd->name, brd->rev, brd->irq);
+
+ DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i));
+ DGAP_LOCK(dgap_global_lock, flags);
+ brd->msgbuf = NULL;
+ printk("%s", brd->msgbuf_head);
+ kfree(brd->msgbuf_head);
+ brd->msgbuf_head = NULL;
+ DGAP_UNLOCK(dgap_global_lock, flags);
+
+ i = dgap_do_remap(brd);
+ if (i)
+ brd->state = BOARD_FAILED;
+ else
+ brd->state = NEED_RESET;
+
+ return(0);
+}
+
+
+int dgap_finalize_board_init(struct board_t *brd) {
+
+ int rc;
+
+ DPR_INIT(("dgap_finalize_board_init() - start\n"));
+
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return(-ENODEV);
+
+ DPR_INIT(("dgap_finalize_board_init() - start #2\n"));
+
+ brd->use_interrupts = dgap_config_get_useintr(brd);
+
+ /*
+ * Set up our interrupt handler if we are set to do interrupts.
+ */
+ if (brd->use_interrupts && brd->irq) {
+
+ rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
+
+ if (rc) {
+ dgap_mbuf(brd, DRVSTR": Failed to hook IRQ %d. Board will work in poll mode.\n",
+ brd->irq);
+ brd->intr_used = 0;
+ }
+ else
+ brd->intr_used = 1;
+ } else {
+ brd->intr_used = 0;
+ }
+
+ return(0);
+}
+
+
+/*
+ * Remap PCI memory.
+ */
+static int dgap_do_remap(struct board_t *brd)
+{
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -ENXIO;
+
+ if (!request_mem_region(brd->membase, 0x200000, "dgap")) {
+ APR(("dgap: mem_region %lx already in use.\n", brd->membase));
+ return -ENOMEM;
+ }
+
+ if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000, "dgap")) {
+ APR(("dgap: mem_region IO %lx already in use.\n",
+ brd->membase + PCI_IO_OFFSET));
+ release_mem_region(brd->membase, 0x200000);
+ return -ENOMEM;
+ }
+
+ brd->re_map_membase = ioremap(brd->membase, 0x200000);
+ if (!brd->re_map_membase) {
+ APR(("dgap: ioremap mem %lx cannot be mapped.\n", brd->membase));
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ return -ENOMEM;
+ }
+
+ brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
+ if (!brd->re_map_port) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ iounmap(brd->re_map_membase);
+ APR(("dgap: ioremap IO mem %lx cannot be mapped.\n",
+ brd->membase + PCI_IO_OFFSET));
+ return -ENOMEM;
+ }
+
+ DPR_INIT(("remapped io: 0x%p remapped mem: 0x%p\n",
+ brd->re_map_port, brd->re_map_membase));
+ return 0;
+}
+
+
+/*****************************************************************************
+*
+* Function:
+*
+* dgap_poll_handler
+*
+* Author:
+*
+* Scott H Kilau
+*
+* Parameters:
+*
+* dummy -- ignored
+*
+* Return Values:
+*
+* none
+*
+* Description:
+*
+* As each timer expires, it determines (a) whether the "transmit"
+* waiter needs to be woken up, and (b) whether the poller needs to
+* be rescheduled.
+*
+******************************************************************************/
+
+static void dgap_poll_handler(ulong dummy)
+{
+ int i;
+ struct board_t *brd;
+ unsigned long lock_flags;
+ unsigned long lock_flags2;
+ ulong new_time;
+
+ dgap_poll_counter++;
+
+
+ /*
+ * If driver needs the config file still,
+ * keep trying to wake up the downloader to
+ * send us the file.
+ */
+ if (dgap_driver_state == DRIVER_NEED_CONFIG_LOAD) {
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ goto schedule_poller;
+ }
+ /*
+ * Do not start the board state machine until
+ * driver tells us its up and running, and has
+ * everything it needs.
+ */
+ else if (dgap_driver_state != DRIVER_READY) {
+ goto schedule_poller;
+ }
+
+ /*
+ * If we have just 1 board, or the system is not SMP,
+ * then use the typical old style poller.
+ * Otherwise, use our new tasklet based poller, which should
+ * speed things up for multiple boards.
+ */
+ if ( (dgap_NumBoards == 1) || (num_online_cpus() <= 1) ) {
+ for (i = 0; i < dgap_NumBoards; i++) {
+
+ brd = dgap_Board[i];
+
+ if (brd->state == BOARD_FAILED) {
+ continue;
+ }
+ if (!brd->intr_running) {
+ /* Call the real board poller directly */
+ dgap_poll_tasklet((unsigned long) brd);
+ }
+ }
+ }
+ else {
+ /* Go thru each board, kicking off a tasklet for each if needed */
+ for (i = 0; i < dgap_NumBoards; i++) {
+ brd = dgap_Board[i];
+
+ /*
+ * Attempt to grab the board lock.
+ *
+ * If we can't get it, no big deal, the next poll will get it.
+ * Basically, I just really don't want to spin in here, because I want
+ * to kick off my tasklets as fast as I can, and then get out the poller.
+ */
+ if (!spin_trylock(&brd->bd_lock)) {
+ continue;
+ }
+
+ /* If board is in a failed state, don't bother scheduling a tasklet */
+ if (brd->state == BOARD_FAILED) {
+ spin_unlock(&brd->bd_lock);
+ continue;
+ }
+
+ /* Schedule a poll helper task */
+ if (!brd->intr_running) {
+ tasklet_schedule(&brd->helper_tasklet);
+ }
+
+ /*
+ * Can't do DGAP_UNLOCK here, as we don't have
+ * lock_flags because we did a trylock above.
+ */
+ spin_unlock(&brd->bd_lock);
+ }
+ }
+
+schedule_poller:
+
+ /*
+ * Schedule ourself back at the nominal wakeup interval.
+ */
+ DGAP_LOCK(dgap_poll_lock, lock_flags );
+ dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
+
+ new_time = dgap_poll_time - jiffies;
+
+ if ((ulong) new_time >= 2 * dgap_poll_tick) {
+ dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ }
+
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_timer.expires = dgap_poll_time;
+ DGAP_UNLOCK(dgap_poll_lock, lock_flags );
+
+ if (!dgap_poll_stop)
+ add_timer(&dgap_poll_timer);
+}
+
+
+
+
+/*
+ * dgap_intr()
+ *
+ * Driver interrupt handler.
+ */
+static irqreturn_t dgap_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = (struct board_t *) voidbrd;
+
+ if (!brd) {
+ APR(("Received interrupt (%d) with null board associated\n", irq));
+ return IRQ_NONE;
+ }
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGAP_BOARD_MAGIC) {
+ APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ return IRQ_NONE;
+ }
+
+ brd->intr_count++;
+
+ /*
+ * Schedule tasklet to run at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * dgap_init_globals()
+ *
+ * This is where we initialize the globals from the static insmod
+ * configuration variables. These are declared near the head of
+ * this file.
+ */
+static void dgap_init_globals(void)
+{
+ int i = 0;
+
+ dgap_rawreadok = rawreadok;
+ dgap_trcbuf_size = trcbuf_size;
+ dgap_debug = debug;
+
+ for (i = 0; i < MAXBOARDS; i++) {
+ dgap_Board[i] = NULL;
+ }
+
+ init_timer( &dgap_poll_timer );
+
+ init_waitqueue_head(&dgap_dl_wait);
+ dgap_dl_action = 0;
+}
+
+
+/************************************************************************
+ *
+ * Utility functions
+ *
+ ************************************************************************/
+
+
+/*
+ * dgap_driver_kzmalloc()
+ *
+ * Malloc and clear memory,
+ */
+void *dgap_driver_kzmalloc(size_t size, int priority)
+{
+ void *p = kmalloc(size, priority);
+ if(p)
+ memset(p, 0, size);
+ return(p);
+}
+
+
+/*
+ * dgap_mbuf()
+ *
+ * Used to print to the message buffer during board init.
+ */
+static void dgap_mbuf(struct board_t *brd, const char *fmt, ...) {
+ va_list ap;
+ char buf[1024];
+ int i;
+ unsigned long flags;
+ size_t length;
+
+ DGAP_LOCK(dgap_global_lock, flags);
+
+ /* Format buf using fmt and arguments contained in ap. */
+ va_start(ap, fmt);
+ i = vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ DPR((buf));
+
+ if (!brd || !brd->msgbuf) {
+ printk("%s", buf);
+ DGAP_UNLOCK(dgap_global_lock, flags);
+ return;
+ }
+
+ length = strlen(buf) + 1;
+ if (brd->msgbuf - brd->msgbuf_head < length)
+ length = brd->msgbuf - brd->msgbuf_head;
+ memcpy(brd->msgbuf, buf, length);
+ brd->msgbuf += length;
+
+ DGAP_UNLOCK(dgap_global_lock, flags);
+}
+
+
+/*
+ * dgap_ms_sleep()
+ *
+ * Put the driver to sleep for x ms's
+ *
+ * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
+ */
+int dgap_ms_sleep(ulong ms)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout((ms * HZ) / 1000);
+ return (signal_pending(current));
+}
+
+
+
+/*
+ * dgap_ioctl_name() : Returns a text version of each ioctl value.
+ */
+char *dgap_ioctl_name(int cmd)
+{
+ switch(cmd) {
+
+ case TCGETA: return("TCGETA");
+ case TCGETS: return("TCGETS");
+ case TCSETA: return("TCSETA");
+ case TCSETS: return("TCSETS");
+ case TCSETAW: return("TCSETAW");
+ case TCSETSW: return("TCSETSW");
+ case TCSETAF: return("TCSETAF");
+ case TCSETSF: return("TCSETSF");
+ case TCSBRK: return("TCSBRK");
+ case TCXONC: return("TCXONC");
+ case TCFLSH: return("TCFLSH");
+ case TIOCGSID: return("TIOCGSID");
+
+ case TIOCGETD: return("TIOCGETD");
+ case TIOCSETD: return("TIOCSETD");
+ case TIOCGWINSZ: return("TIOCGWINSZ");
+ case TIOCSWINSZ: return("TIOCSWINSZ");
+
+ case TIOCMGET: return("TIOCMGET");
+ case TIOCMSET: return("TIOCMSET");
+ case TIOCMBIS: return("TIOCMBIS");
+ case TIOCMBIC: return("TIOCMBIC");
+
+ /* from digi.h */
+ case DIGI_SETA: return("DIGI_SETA");
+ case DIGI_SETAW: return("DIGI_SETAW");
+ case DIGI_SETAF: return("DIGI_SETAF");
+ case DIGI_SETFLOW: return("DIGI_SETFLOW");
+ case DIGI_SETAFLOW: return("DIGI_SETAFLOW");
+ case DIGI_GETFLOW: return("DIGI_GETFLOW");
+ case DIGI_GETAFLOW: return("DIGI_GETAFLOW");
+ case DIGI_GETA: return("DIGI_GETA");
+ case DIGI_GEDELAY: return("DIGI_GEDELAY");
+ case DIGI_SEDELAY: return("DIGI_SEDELAY");
+ case DIGI_GETCUSTOMBAUD: return("DIGI_GETCUSTOMBAUD");
+ case DIGI_SETCUSTOMBAUD: return("DIGI_SETCUSTOMBAUD");
+ case TIOCMODG: return("TIOCMODG");
+ case TIOCMODS: return("TIOCMODS");
+ case TIOCSDTR: return("TIOCSDTR");
+ case TIOCCDTR: return("TIOCCDTR");
+
+ default: return("unknown");
+ }
+}
diff --git a/drivers/staging/dgap/dgap_driver.h b/drivers/staging/dgap/dgap_driver.h
new file mode 100644
index 0000000..b1cf489
--- /dev/null
+++ b/drivers/staging/dgap/dgap_driver.h
@@ -0,0 +1,618 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * Driver includes
+ *
+ *************************************************************************/
+
+#ifndef __DGAP_DRIVER_H
+#define __DGAP_DRIVER_H
+
+#include <linux/version.h> /* To get the current Linux version */
+#include <linux/types.h> /* To pick up the varions Linux types */
+#include <linux/tty.h> /* To pick up the various tty structs/defines */
+#include <linux/interrupt.h> /* For irqreturn_t type */
+
+#include "dgap_types.h" /* Additional types needed by the Digi header files */
+#include "digi.h" /* Digi specific ioctl header */
+#include "dgap_kcompat.h" /* Kernel 2.4/2.6 compat includes */
+#include "dgap_sysfs.h" /* Support for SYSFS */
+
+/*************************************************************************
+ *
+ * Driver defines
+ *
+ *************************************************************************/
+
+/*
+ * Driver identification, error and debugging statments
+ *
+ * In theory, you can change all occurances of "digi" in the next
+ * three lines, and the driver printk's will all automagically change.
+ *
+ * APR((fmt, args, ...)); Always prints message
+ * DPR((fmt, args, ...)); Only prints if DGAP_TRACER is defined at
+ * compile time and dgap_debug!=0
+ */
+#define PROCSTR "dgap" /* /proc entries */
+#define DEVSTR "/dev/dg/dgap" /* /dev entries */
+#define DRVSTR "dgap" /* Driver name string
+ * displayed by APR */
+#define APR(args) do { PRINTF_TO_KMEM(args); printk(DRVSTR": "); printk args; \
+ } while (0)
+#define RAPR(args) do { PRINTF_TO_KMEM(args); printk args; } while (0)
+
+#define TRC_TO_CONSOLE 1
+
+/*
+ * Debugging levels can be set using debug insmod variable
+ * They can also be compiled out completely.
+ */
+
+#define DBG_INIT (dgap_debug & 0x01)
+#define DBG_BASIC (dgap_debug & 0x02)
+#define DBG_CORE (dgap_debug & 0x04)
+
+#define DBG_OPEN (dgap_debug & 0x08)
+#define DBG_CLOSE (dgap_debug & 0x10)
+#define DBG_READ (dgap_debug & 0x20)
+#define DBG_WRITE (dgap_debug & 0x40)
+
+#define DBG_IOCTL (dgap_debug & 0x80)
+
+#define DBG_PROC (dgap_debug & 0x100)
+#define DBG_PARAM (dgap_debug & 0x200)
+#define DBG_PSCAN (dgap_debug & 0x400)
+#define DBG_EVENT (dgap_debug & 0x800)
+
+#define DBG_DRAIN (dgap_debug & 0x1000)
+#define DBG_CARR (dgap_debug & 0x2000)
+
+#define DBG_MGMT (dgap_debug & 0x4000)
+
+
+#if defined(DGAP_TRACER)
+
+# if defined(TRC_TO_KMEM)
+/* Choose one: */
+# define TRC_ON_OVERFLOW_WRAP_AROUND
+# undef TRC_ON_OVERFLOW_SHIFT_BUFFER
+# endif //TRC_TO_KMEM
+
+# define TRC_MAXMSG 1024
+# define TRC_OVERFLOW "(OVERFLOW)"
+# define TRC_DTRC "/usr/bin/dtrc"
+
+#if defined TRC_TO_CONSOLE
+#define PRINTF_TO_CONSOLE(args) { printk(DRVSTR": "); printk args; }
+#else //!defined TRACE_TO_CONSOLE
+#define PRINTF_TO_CONSOLE(args)
+#endif
+
+#if defined TRC_TO_KMEM
+#define PRINTF_TO_KMEM(args) dgap_tracef args
+#else //!defined TRC_TO_KMEM
+#define PRINTF_TO_KMEM(args)
+#endif
+
+#define TRC(args) { PRINTF_TO_KMEM(args); PRINTF_TO_CONSOLE(args) }
+
+# define DPR_INIT(ARGS) if (DBG_INIT) TRC(ARGS)
+# define DPR_BASIC(ARGS) if (DBG_BASIC) TRC(ARGS)
+# define DPR_CORE(ARGS) if (DBG_CORE) TRC(ARGS)
+# define DPR_OPEN(ARGS) if (DBG_OPEN) TRC(ARGS)
+# define DPR_CLOSE(ARGS) if (DBG_CLOSE) TRC(ARGS)
+# define DPR_READ(ARGS) if (DBG_READ) TRC(ARGS)
+# define DPR_WRITE(ARGS) if (DBG_WRITE) TRC(ARGS)
+# define DPR_IOCTL(ARGS) if (DBG_IOCTL) TRC(ARGS)
+# define DPR_PROC(ARGS) if (DBG_PROC) TRC(ARGS)
+# define DPR_PARAM(ARGS) if (DBG_PARAM) TRC(ARGS)
+# define DPR_PSCAN(ARGS) if (DBG_PSCAN) TRC(ARGS)
+# define DPR_EVENT(ARGS) if (DBG_EVENT) TRC(ARGS)
+# define DPR_DRAIN(ARGS) if (DBG_DRAIN) TRC(ARGS)
+# define DPR_CARR(ARGS) if (DBG_CARR) TRC(ARGS)
+# define DPR_MGMT(ARGS) if (DBG_MGMT) TRC(ARGS)
+
+# define DPR(ARGS) if (dgap_debug) TRC(ARGS)
+# define P(X) dgap_tracef(#X "=%p\n", X)
+# define X(X) dgap_tracef(#X "=%x\n", X)
+
+#else//!defined DGAP_TRACER
+
+#define PRINTF_TO_KMEM(args)
+# define TRC(ARGS)
+# define DPR_INIT(ARGS)
+# define DPR_BASIC(ARGS)
+# define DPR_CORE(ARGS)
+# define DPR_OPEN(ARGS)
+# define DPR_CLOSE(ARGS)
+# define DPR_READ(ARGS)
+# define DPR_WRITE(ARGS)
+# define DPR_IOCTL(ARGS)
+# define DPR_PROC(ARGS)
+# define DPR_PARAM(ARGS)
+# define DPR_PSCAN(ARGS)
+# define DPR_EVENT(ARGS)
+# define DPR_DRAIN(ARGS)
+# define DPR_CARR(ARGS)
+# define DPR_MGMT(ARGS)
+
+# define DPR(args)
+
+#endif//DGAP_TRACER
+
+/* Number of boards we support at once. */
+#define MAXBOARDS 32
+#define MAXPORTS 224
+#define MAXTTYNAMELEN 200
+
+/* Our 3 magic numbers for our board, channel and unit structs */
+#define DGAP_BOARD_MAGIC 0x5c6df104
+#define DGAP_CHANNEL_MAGIC 0x6c6df104
+#define DGAP_UNIT_MAGIC 0x7c6df104
+
+/* Serial port types */
+#define DGAP_SERIAL 0
+#define DGAP_PRINT 1
+
+#define SERIAL_TYPE_NORMAL 1
+
+/* 4 extra for alignment play space */
+#define WRITEBUFLEN ((4096) + 4)
+#define MYFLIPLEN N_TTY_BUF_SIZE
+
+#define SBREAK_TIME 0x25
+#define U2BSIZE 0x400
+
+#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000)
+
+/*
+ * Our major for the mgmt devices.
+ *
+ * We can use 22, because Digi was allocated 22 and 23 for the epca driver.
+ * 22 has now become obsolete now that the "cu" devices have
+ * been removed from 2.6.
+ * Also, this *IS* the epca driver, just PCI only now.
+ */
+#ifndef DIGI_DGAP_MAJOR
+# define DIGI_DGAP_MAJOR 22
+#endif
+
+/*
+ * The parameters we use to define the periods of the moving averages.
+ */
+#define MA_PERIOD (HZ / 10)
+#define SMA_DUR (1 * HZ)
+#define EMA_DUR (1 * HZ)
+#define SMA_NPERIODS (SMA_DUR / MA_PERIOD)
+#define EMA_NPERIODS (EMA_DUR / MA_PERIOD)
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially. This is the same structure that is defined
+ * as the default in tty_io.c with the same settings overriden as in serial.c
+ *
+ * In short, this should match the internal serial ports' defaults.
+ */
+#define DEFAULT_IFLAGS (ICRNL | IXON)
+#define DEFAULT_OFLAGS (OPOST | ONLCR)
+#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
+#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
+ ECHOCTL | ECHOKE | IEXTEN)
+
+#ifndef _POSIX_VDISABLE
+#define _POSIX_VDISABLE '\0'
+#endif
+
+#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
+#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
+
+#define VPDSIZE (512)
+
+/*
+ * Lock function/defines.
+ * Makes spotting lock/unlock locations easier.
+ */
+# define DGAP_SPINLOCK_INIT(x) spin_lock_init(&(x))
+# define DGAP_LOCK(x,y) spin_lock_irqsave(&(x), y)
+# define DGAP_UNLOCK(x,y) spin_unlock_irqrestore(&(x), y)
+# define DGAP_TRYLOCK(x,y) spin_trylock(&(x))
+
+/*
+ * All the possible states the driver can be while being loaded.
+ */
+enum {
+ DRIVER_INITIALIZED = 0,
+ DRIVER_NEED_CONFIG_LOAD,
+ DRIVER_REQUESTED_CONFIG,
+ DRIVER_READY
+};
+
+/*
+ * All the possible states the board can be while booting up.
+ */
+enum {
+ BOARD_FAILED = 0,
+ CONFIG_NOT_FOUND,
+ BOARD_FOUND,
+ NEED_RESET,
+ FINISHED_RESET,
+ NEED_CONFIG,
+ FINISHED_CONFIG,
+ NEED_DEVICE_CREATION,
+ REQUESTED_DEVICE_CREATION,
+ FINISHED_DEVICE_CREATION,
+ NEED_BIOS_LOAD,
+ REQUESTED_BIOS,
+ WAIT_BIOS_LOAD,
+ FINISHED_BIOS_LOAD,
+ NEED_FEP_LOAD,
+ REQUESTED_FEP,
+ WAIT_FEP_LOAD,
+ FINISHED_FEP_LOAD,
+ NEED_PROC_CREATION,
+ FINISHED_PROC_CREATION,
+ BOARD_READY
+};
+
+/*
+ * All the possible states that a requested concentrator image can be in.
+ */
+enum {
+ NO_PENDING_CONCENTRATOR_REQUESTS = 0,
+ NEED_CONCENTRATOR,
+ REQUESTED_CONCENTRATOR
+};
+
+extern char *dgap_state_text[];
+extern char *dgap_driver_state_text[];
+
+
+/*
+ * Modem line constants are defined as macros because DSR and
+ * DCD are swapable using the ditty altpin option.
+ */
+#define D_CD(ch) ch->ch_cd /* Carrier detect */
+#define D_DSR(ch) ch->ch_dsr /* Data set ready */
+#define D_RTS(ch) DM_RTS /* Request to send */
+#define D_CTS(ch) DM_CTS /* Clear to send */
+#define D_RI(ch) DM_RI /* Ring indicator */
+#define D_DTR(ch) DM_DTR /* Data terminal ready */
+
+
+/*************************************************************************
+ *
+ * Structures and closely related defines.
+ *
+ *************************************************************************/
+
+
+/*
+ * A structure to hold a statistics counter. We also
+ * compute moving averages for this counter.
+ */
+struct macounter
+{
+ u32 cnt; /* Total count */
+ ulong accum; /* Acuumulator per period */
+ ulong sma; /* Simple moving average */
+ ulong ema; /* Exponential moving average */
+};
+
+
+/************************************************************************
+ * Device flag definitions for bd_flags.
+ ************************************************************************/
+#define BD_FEP5PLUS 0x0001 /* Supports FEP5 Plus commands */
+#define BD_HAS_VPD 0x0002 /* Board has VPD info available */
+
+
+/*
+ * Per-board information
+ */
+struct board_t
+{
+ int magic; /* Board Magic number. */
+ int boardnum; /* Board number: 0-3 */
+ int firstminor; /* First minor, e.g. 0, 30, 60 */
+
+ int type; /* Type of board */
+ char *name; /* Product Name */
+ struct pci_dev *pdev; /* Pointer to the pci_dev struct */
+ u16 vendor; /* PCI vendor ID */
+ u16 device; /* PCI device ID */
+ u16 subvendor; /* PCI subsystem vendor ID */
+ u16 subdevice; /* PCI subsystem device ID */
+ uchar rev; /* PCI revision ID */
+ uint pci_bus; /* PCI bus value */
+ uint pci_slot; /* PCI slot value */
+ u16 maxports; /* MAX ports this board can handle */
+ uchar vpd[VPDSIZE]; /* VPD of board, if found */
+ u32 bd_flags; /* Board flags */
+
+ spinlock_t bd_lock; /* Used to protect board */
+
+ u32 state; /* State of card. */
+ wait_queue_head_t state_wait; /* Place to sleep on for state change */
+
+ struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
+
+ u32 wait_for_bios;
+ u32 wait_for_fep;
+
+ struct cnode * bd_config; /* Config of board */
+
+ u16 nasync; /* Number of ports on card */
+
+ u32 use_interrupts; /* Should we be interrupt driven? */
+ ulong irq; /* Interrupt request number */
+ ulong intr_count; /* Count of interrupts */
+ u32 intr_used; /* Non-zero if using interrupts */
+ u32 intr_running; /* Non-zero if FEP knows its doing interrupts */
+
+ ulong port; /* Start of base io port of the card */
+ ulong port_end; /* End of base io port of the card */
+ ulong membase; /* Start of base memory of the card */
+ ulong membase_end; /* End of base memory of the card */
+
+ uchar *re_map_port; /* Remapped io port of the card */
+ uchar *re_map_membase;/* Remapped memory of the card */
+
+ uchar runwait; /* # Processes waiting for FEP */
+ uchar inhibit_poller; /* Tells the poller to leave us alone */
+
+ struct channel_t *channels[MAXPORTS]; /* array of pointers to our channels. */
+
+ struct tty_driver *SerialDriver;
+ char SerialName[200];
+ struct tty_driver *PrintDriver;
+ char PrintName[200];
+
+ u32 dgap_Major_Serial_Registered;
+ u32 dgap_Major_TransparentPrint_Registered;
+
+ u32 dgap_Serial_Major;
+ u32 dgap_TransparentPrint_Major;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ u32 TtyRefCnt;
+#endif
+
+ struct bs_t *bd_bs; /* Base structure pointer */
+
+ char *flipbuf; /* Our flip buffer, alloced if board is found */
+ char *flipflagbuf; /* Our flip flag buffer, alloced if board is found */
+
+ u16 dpatype; /* The board "type", as defined by DPA */
+ u16 dpastatus; /* The board "status", as defined by DPA */
+ wait_queue_head_t kme_wait; /* Needed for DPA support */
+
+ u32 conc_dl_status; /* Status of any pending conc download */
+ /*
+ * Mgmt data.
+ */
+ char *msgbuf_head;
+ char *msgbuf;
+};
+
+
+
+/************************************************************************
+ * Unit flag definitions for un_flags.
+ ************************************************************************/
+#define UN_ISOPEN 0x0001 /* Device is open */
+#define UN_CLOSING 0x0002 /* Line is being closed */
+#define UN_IMM 0x0004 /* Service immediately */
+#define UN_BUSY 0x0008 /* Some work this channel */
+#define UN_BREAKI 0x0010 /* Input break received */
+#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
+#define UN_TIME 0x0040 /* Waiting on time */
+#define UN_EMPTY 0x0080 /* Waiting output queue empty */
+#define UN_LOW 0x0100 /* Waiting output low water mark*/
+#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
+#define UN_WOPEN 0x0400 /* Device waiting for open */
+#define UN_WIOCTL 0x0800 /* Device waiting for open */
+#define UN_HANGUP 0x8000 /* Carrier lost */
+
+struct device;
+
+/************************************************************************
+ * Structure for terminal or printer unit.
+ ************************************************************************/
+struct un_t {
+ int magic; /* Unit Magic Number. */
+ struct channel_t *un_ch;
+ u32 un_time;
+ u32 un_type;
+ u32 un_open_count; /* Counter of opens to port */
+ struct tty_struct *un_tty;/* Pointer to unit tty structure */
+ u32 un_flags; /* Unit flags */
+ wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
+ u32 un_dev; /* Minor device number */
+ tcflag_t un_oflag; /* oflags being done on board */
+ tcflag_t un_lflag; /* lflags being done on board */
+ struct device *un_sysfs;
+};
+
+
+/************************************************************************
+ * Device flag definitions for ch_flags.
+ ************************************************************************/
+#define CH_PRON 0x0001 /* Printer on string */
+#define CH_OUT 0x0002 /* Dial-out device open */
+#define CH_STOP 0x0004 /* Output is stopped */
+#define CH_STOPI 0x0008 /* Input is stopped */
+#define CH_CD 0x0010 /* Carrier is present */
+#define CH_FCAR 0x0020 /* Carrier forced on */
+
+#define CH_RXBLOCK 0x0080 /* Enable rx blocked flag */
+#define CH_WLOW 0x0100 /* Term waiting low event */
+#define CH_WEMPTY 0x0200 /* Term waiting empty event */
+#define CH_RENABLE 0x0400 /* Buffer just emptied */
+#define CH_RACTIVE 0x0800 /* Process active in xxread() */
+#define CH_RWAIT 0x1000 /* Process waiting in xxread() */
+#define CH_BAUD0 0x2000 /* Used for checking B0 transitions */
+#define CH_HANGUP 0x8000 /* Hangup received */
+
+/*
+ * Definitions for ch_sniff_flags
+ */
+#define SNIFF_OPEN 0x1
+#define SNIFF_WAIT_DATA 0x2
+#define SNIFF_WAIT_SPACE 0x4
+
+
+/************************************************************************
+ * Channel information structure.
+ ************************************************************************/
+struct channel_t {
+ int magic; /* Channel Magic Number */
+ struct bs_t *ch_bs; /* Base structure pointer */
+ struct cm_t *ch_cm; /* Command queue pointer */
+ struct board_t *ch_bd; /* Board structure pointer */
+ unsigned char *ch_vaddr; /* FEP memory origin */
+ unsigned char *ch_taddr; /* Write buffer origin */
+ unsigned char *ch_raddr; /* Read buffer origin */
+ struct digi_t ch_digi; /* Transparent Print structure */
+ struct un_t ch_tun; /* Terminal unit info */
+ struct un_t ch_pun; /* Printer unit info */
+
+ spinlock_t ch_lock; /* provide for serialization */
+ wait_queue_head_t ch_flags_wait;
+
+ u32 pscan_state;
+ uchar pscan_savechar;
+
+ u32 ch_portnum; /* Port number, 0 offset. */
+ u32 ch_open_count; /* open count */
+ u32 ch_flags; /* Channel flags */
+
+
+ u32 ch_close_delay; /* How long we should drop RTS/DTR for */
+
+ u32 ch_cpstime; /* Time for CPS calculations */
+
+ tcflag_t ch_c_iflag; /* channel iflags */
+ tcflag_t ch_c_cflag; /* channel cflags */
+ tcflag_t ch_c_oflag; /* channel oflags */
+ tcflag_t ch_c_lflag; /* channel lflags */
+
+ u16 ch_fepiflag; /* FEP tty iflags */
+ u16 ch_fepcflag; /* FEP tty cflags */
+ u16 ch_fepoflag; /* FEP tty oflags */
+ u16 ch_wopen; /* Waiting for open process cnt */
+ u16 ch_tstart; /* Transmit buffer start */
+ u16 ch_tsize; /* Transmit buffer size */
+ u16 ch_rstart; /* Receive buffer start */
+ u16 ch_rsize; /* Receive buffer size */
+ u16 ch_rdelay; /* Receive delay time */
+
+ u16 ch_tlw; /* Our currently set low water mark */
+
+ u16 ch_cook; /* Output character mask */
+
+ uchar ch_card; /* Card channel is on */
+ uchar ch_stopc; /* Stop character */
+ uchar ch_startc; /* Start character */
+
+ uchar ch_mostat; /* FEP output modem status */
+ uchar ch_mistat; /* FEP input modem status */
+ uchar ch_mforce; /* Modem values to be forced */
+ uchar ch_mval; /* Force values */
+ uchar ch_fepstopc; /* FEP stop character */
+ uchar ch_fepstartc; /* FEP start character */
+
+ uchar ch_astopc; /* Auxiliary Stop character */
+ uchar ch_astartc; /* Auxiliary Start character */
+ uchar ch_fepastopc; /* Auxiliary FEP stop char */
+ uchar ch_fepastartc; /* Auxiliary FEP start char */
+
+ uchar ch_hflow; /* FEP hardware handshake */
+ uchar ch_dsr; /* stores real dsr value */
+ uchar ch_cd; /* stores real cd value */
+ uchar ch_tx_win; /* channel tx buffer window */
+ uchar ch_rx_win; /* channel rx buffer window */
+ uint ch_custom_speed; /* Custom baud, if set */
+ uint ch_baud_info; /* Current baud info for /proc output */
+ ulong ch_rxcount; /* total of data received so far */
+ ulong ch_txcount; /* total of data transmitted so far */
+ ulong ch_err_parity; /* Count of parity errors on channel */
+ ulong ch_err_frame; /* Count of framing errors on channel */
+ ulong ch_err_break; /* Count of breaks on channel */
+ ulong ch_err_overrun; /* Count of overruns on channel */
+
+ uint ch_sniff_in;
+ uint ch_sniff_out;
+ char *ch_sniff_buf; /* Sniff buffer for proc */
+ ulong ch_sniff_flags; /* Channel flags */
+ wait_queue_head_t ch_sniff_wait;
+};
+
+
+/*************************************************************************
+ *
+ * Prototypes for non-static functions used in more than one module
+ *
+ *************************************************************************/
+
+extern int dgap_ms_sleep(ulong ms);
+extern void *dgap_driver_kzmalloc(size_t size, int priority);
+extern char *dgap_ioctl_name(int cmd);
+extern void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len);
+extern void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len);
+extern void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len);
+extern void dgap_do_config_load(uchar __user *uaddr, int len);
+extern int dgap_after_config_loaded(void);
+extern int dgap_finalize_board_init(struct board_t *brd);
+
+/*
+ * Our Global Variables.
+ */
+extern int dgap_driver_state; /* The state of the driver */
+extern int dgap_debug; /* Debug variable */
+extern int dgap_rawreadok; /* Set if user wants rawreads */
+extern int dgap_poll_tick; /* Poll interval - 20 ms */
+extern spinlock_t dgap_global_lock; /* Driver global spinlock */
+extern uint dgap_NumBoards; /* Total number of boards */
+extern struct board_t *dgap_Board[MAXBOARDS]; /* Array of board structs */
+extern ulong dgap_poll_counter; /* Times the poller has run */
+extern char *dgap_config_buf; /* The config file buffer */
+extern spinlock_t dgap_dl_lock; /* Downloader spinlock */
+extern wait_queue_head_t dgap_dl_wait; /* Wait queue for downloader */
+extern int dgap_dl_action; /* Action flag for downloader */
+extern int dgap_registerttyswithsysfs; /* Should we register the */
+ /* ttys with sysfs or not */
+
+/*
+ * Global functions declared in dgap_fep5.c, but must be hidden from
+ * user space programs.
+ */
+extern void dgap_poll_tasklet(unsigned long data);
+extern void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds);
+extern void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds);
+extern void dgap_wmove(struct channel_t *ch, char *buf, uint cnt);
+extern int dgap_param(struct tty_struct *tty);
+extern void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *fbuf, int *len);
+extern uint dgap_get_custom_baud(struct channel_t *ch);
+extern void dgap_firmware_reset_port(struct channel_t *ch);
+
+#endif
diff --git a/drivers/staging/dgap/dgap_fep5.c b/drivers/staging/dgap/dgap_fep5.c
new file mode 100644
index 0000000..4464f02
--- /dev/null
+++ b/drivers/staging/dgap/dgap_fep5.c
@@ -0,0 +1,1953 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ * $Id: dgap_fep5.c,v 1.2 2011/06/21 10:35:40 markh Exp $
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h> /* For udelay */
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+#include <linux/tty.h>
+#include <linux/tty_flip.h> /* For tty_schedule_flip */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+#include <linux/sched.h>
+#endif
+
+#include "dgap_driver.h"
+#include "dgap_pci.h"
+#include "dgap_fep5.h"
+#include "dgap_tty.h"
+#include "dgap_conf.h"
+#include "dgap_parse.h"
+#include "dgap_trace.h"
+
+/*
+ * Our function prototypes
+ */
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds);
+static int dgap_event(struct board_t *bd);
+
+/*
+ * internal variables
+ */
+static uint dgap_count = 500;
+
+
+/*
+ * Loads the dgap.conf config file from the user.
+ */
+void dgap_do_config_load(uchar __user *uaddr, int len)
+{
+ int orig_len = len;
+ char *to_addr;
+ uchar __user *from_addr = uaddr;
+ char buf[U2BSIZE];
+ int n;
+
+ to_addr = dgap_config_buf = dgap_driver_kzmalloc(len + 1, GFP_ATOMIC);
+ if (!dgap_config_buf) {
+ DPR_INIT(("dgap_do_config_load - unable to allocate memory for file\n"));
+ dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
+ return;
+ }
+
+ n = U2BSIZE;
+ while (len) {
+
+ if (n > len)
+ n = len;
+
+ if (copy_from_user((char *) &buf, from_addr, n) == -1 )
+ return;
+
+ /* Copy data from buffer to kernel memory */
+ memcpy(to_addr, buf, n);
+
+ /* increment counts */
+ len -= n;
+ to_addr += n;
+ from_addr += n;
+ n = U2BSIZE;
+ }
+
+ dgap_config_buf[orig_len] = '\0';
+
+ to_addr = dgap_config_buf;
+ dgap_parsefile(&to_addr, TRUE);
+
+ DPR_INIT(("dgap_config_load() finish\n"));
+
+ return;
+}
+
+
+int dgap_after_config_loaded(void)
+{
+ int i = 0;
+ int rc = 0;
+
+ /*
+ * Register our ttys, now that we have the config loaded.
+ */
+ for (i = 0; i < dgap_NumBoards; ++i) {
+
+ /*
+ * Initialize KME waitqueues...
+ */
+ init_waitqueue_head(&(dgap_Board[i]->kme_wait));
+
+ /*
+ * allocate flip buffer for board.
+ */
+ dgap_Board[i]->flipbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC);
+ dgap_Board[i]->flipflagbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC);
+ }
+
+ return (rc);
+}
+
+
+
+/*=======================================================================
+ *
+ * usertoboard - copy from user space to board space.
+ *
+ *=======================================================================*/
+static int dgap_usertoboard(struct board_t *brd, char *to_addr, char __user *from_addr, int len)
+{
+ char buf[U2BSIZE];
+ int n = U2BSIZE;
+
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return(-EFAULT);
+
+ while (len) {
+ if (n > len)
+ n = len;
+
+ if (copy_from_user((char *) &buf, from_addr, n) == -1 ) {
+ return(-EFAULT);
+ }
+
+ /* Copy data from buffer to card memory */
+ memcpy_toio(to_addr, buf, n);
+
+ /* increment counts */
+ len -= n;
+ to_addr += n;
+ from_addr += n;
+ n = U2BSIZE;
+ }
+ return(0);
+}
+
+
+/*
+ * Copies the BIOS code from the user to the board,
+ * and starts the BIOS running.
+ */
+void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len)
+{
+ uchar *addr;
+ uint offset;
+ int i;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ DPR_INIT(("dgap_do_bios_load() start\n"));
+
+ addr = brd->re_map_membase;
+
+ /*
+ * clear POST area
+ */
+ for (i = 0; i < 16; i++)
+ writeb(0, addr + POSTAREA + i);
+
+ /*
+ * Download bios
+ */
+ offset = 0x1000;
+ if (dgap_usertoboard(brd, addr + offset, ubios, len) == -1 ) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+
+ writel(0x0bf00401, addr);
+ writel(0, (addr + 4));
+
+ /* Clear the reset, and change states. */
+ writeb(FEPCLR, brd->re_map_port);
+ brd->state = WAIT_BIOS_LOAD;
+}
+
+
+/*
+ * Checks to see if the BIOS completed running on the card.
+ */
+static void dgap_do_wait_for_bios(struct board_t *brd)
+{
+ uchar *addr;
+ u16 word;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+ word = readw(addr + POSTAREA);
+
+ /* Check to see if BIOS thinks board is good. (GD). */
+ if (word == *(u16 *) "GD") {
+ DPR_INIT(("GOT GD in memory, moving states.\n"));
+ brd->state = FINISHED_BIOS_LOAD;
+ return;
+ }
+
+ /* Give up on board after too long of time taken */
+ if (brd->wait_for_bios++ > 5000) {
+ u16 err1 = readw(addr + SEQUENCE);
+ u16 err2 = readw(addr + ERROR);
+ APR(("***WARNING*** %s failed diagnostics. Error #(%x,%x).\n",
+ brd->name, err1, err2));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ }
+}
+
+
+/*
+ * Copies the FEP code from the user to the board,
+ * and starts the FEP running.
+ */
+void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len)
+{
+ uchar *addr;
+ uint offset;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+
+ DPR_INIT(("dgap_do_fep_load() for board %s : start\n", brd->name));
+
+ /*
+ * Download FEP
+ */
+ offset = 0x1000;
+ if (dgap_usertoboard(brd, addr + offset, ufep, len) == -1 ) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+
+ /*
+ * If board is a concentrator product, we need to give
+ * it its config string describing how the concentrators look.
+ */
+ if ((brd->type == PCX) || (brd->type == PEPC)) {
+ uchar string[100];
+ uchar *config, *xconfig;
+ int i = 0;
+
+ xconfig = dgap_create_config_string(brd, string);
+
+ /* Write string to board memory */
+ config = addr + CONFIG;
+ for (; i < CONFIGSIZE; i++, config++, xconfig++) {
+ writeb(*xconfig, config);
+ if ((*xconfig & 0xff) == 0xff)
+ break;
+ }
+ }
+
+ writel(0xbfc01004, (addr + 0xc34));
+ writel(0x3, (addr + 0xc30));
+
+ /* change states. */
+ brd->state = WAIT_FEP_LOAD;
+
+ DPR_INIT(("dgap_do_fep_load() for board %s : finish\n", brd->name));
+
+}
+
+
+/*
+ * Waits for the FEP to report thats its ready for us to use.
+ */
+static void dgap_do_wait_for_fep(struct board_t *brd)
+{
+ uchar *addr;
+ u16 word;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+
+ DPR_INIT(("dgap_do_wait_for_fep() for board %s : start. addr: %p\n", brd->name, addr));
+
+ word = readw(addr + FEPSTAT);
+
+ /* Check to see if FEP is up and running now. */
+ if (word == *(u16 *) "OS") {
+ DPR_INIT(("GOT OS in memory for board %s, moving states.\n", brd->name));
+ brd->state = FINISHED_FEP_LOAD;
+
+ /*
+ * Check to see if the board can support FEP5+ commands.
+ */
+ word = readw(addr + FEP5_PLUS);
+ if (word == *(u16 *) "5A") {
+ DPR_INIT(("GOT 5A in memory for board %s, board supports extended FEP5 commands.\n", brd->name));
+ brd->bd_flags |= BD_FEP5PLUS;
+ }
+
+ return;
+ }
+
+ /* Give up on board after too long of time taken */
+ if (brd->wait_for_fep++ > 5000) {
+ u16 err1 = readw(addr + SEQUENCE);
+ u16 err2 = readw(addr + ERROR);
+ APR(("***WARNING*** FEPOS for %s not functioning. Error #(%x,%x).\n",
+ brd->name, err1, err2));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ }
+
+ DPR_INIT(("dgap_do_wait_for_fep() for board %s : finish\n", brd->name));
+}
+
+
+/*
+ * Physically forces the FEP5 card to reset itself.
+ */
+static void dgap_do_reset_board(struct board_t *brd)
+{
+ uchar check;
+ u32 check1;
+ u32 check2;
+ int i = 0;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase || !brd->re_map_port) {
+ DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n",
+ brd, brd ? brd->re_map_membase : 0, brd ? brd->re_map_port : 0));
+ return;
+ }
+
+ DPR_INIT(("dgap_do_reset_board() start. io: %p\n", brd->re_map_port));
+
+ /* FEPRST does not vary among supported boards */
+ writeb(FEPRST, brd->re_map_port);
+
+ for (i = 0; i <= 1000; i++) {
+ check = readb(brd->re_map_port) & 0xe;
+ if (check == FEPRST)
+ break;
+ udelay(10);
+
+ }
+ if (i > 1000) {
+ APR(("*** WARNING *** Board not resetting... Failing board.\n"));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ goto failed;
+ }
+
+ /*
+ * Make sure there really is memory out there.
+ */
+ writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
+ writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
+ check1 = readl(brd->re_map_membase + LOWMEM);
+ check2 = readl(brd->re_map_membase + HIGHMEM);
+
+ if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
+ APR(("*** Warning *** No memory at %p for board.\n", brd->re_map_membase));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ goto failed;
+ }
+
+ if (brd->state != BOARD_FAILED)
+ brd->state = FINISHED_RESET;
+
+failed:
+ DPR_INIT(("dgap_do_reset_board() finish\n"));
+}
+
+
+/*
+ * Sends a concentrator image into the FEP5 board.
+ */
+void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len)
+{
+ char *vaddr;
+ u16 offset = 0;
+ struct downld_t *to_dp;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ vaddr = brd->re_map_membase;
+
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ to_dp = (struct downld_t *) (vaddr + (int) offset);
+
+ /*
+ * The image was already read into kernel space,
+ * we do NOT need a user space read here
+ */
+ memcpy_toio((char *) to_dp, uaddr, sizeof(struct downld_t));
+
+ /* Tell card we have data for it */
+ writew(0, vaddr + (DOWNREQ));
+
+ brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
+}
+
+
+#define EXPANSION_ROM_SIZE (64 * 1024)
+#define FEP5_ROM_MAGIC (0xFEFFFFFF)
+
+static void dgap_get_vpd(struct board_t *brd)
+{
+ u32 magic;
+ u32 base_offset;
+ u16 rom_offset;
+ u16 vpd_offset;
+ u16 image_length;
+ u16 i;
+ uchar byte1;
+ uchar byte2;
+
+ /*
+ * Poke the magic number at the PCI Rom Address location.
+ * If VPD is supported, the value read from that address
+ * will be non-zero.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
+
+ /* VPD not supported, bail */
+ if (!magic)
+ return;
+
+ /*
+ * To get to the OTPROM memory, we have to send the boards base
+ * address or'ed with 1 into the PCI Rom Address location.
+ */
+ magic = brd->membase | 0x01;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
+
+ byte1 = readb(brd->re_map_membase);
+ byte2 = readb(brd->re_map_membase + 1);
+
+ /*
+ * If the board correctly swapped to the OTPROM memory,
+ * the first 2 bytes (header) should be 0x55, 0xAA
+ */
+ if (byte1 == 0x55 && byte2 == 0xAA) {
+
+ base_offset = 0;
+
+ /*
+ * We have to run through all the OTPROM memory looking
+ * for the VPD offset.
+ */
+ while (base_offset <= EXPANSION_ROM_SIZE) {
+
+ /*
+ * Lots of magic numbers here.
+ *
+ * The VPD offset is located inside the ROM Data Structure.
+ * We also have to remember the length of each
+ * ROM Data Structure, so we can "hop" to the next
+ * entry if the VPD isn't in the current
+ * ROM Data Structure.
+ */
+ rom_offset = readw(brd->re_map_membase + base_offset + 0x18);
+ image_length = readw(brd->re_map_membase + rom_offset + 0x10) * 512;
+ vpd_offset = readw(brd->re_map_membase + rom_offset + 0x08);
+
+ /* Found the VPD entry */
+ if (vpd_offset)
+ break;
+
+ /* We didn't find a VPD entry, go to next ROM entry. */
+ base_offset += image_length;
+
+ byte1 = readb(brd->re_map_membase + base_offset);
+ byte2 = readb(brd->re_map_membase + base_offset + 1);
+
+ /*
+ * If the new ROM offset doesn't have 0x55, 0xAA
+ * as its header, we have run out of ROM.
+ */
+ if (byte1 != 0x55 || byte2 != 0xAA)
+ break;
+ }
+
+ /*
+ * If we have a VPD offset, then mark the board
+ * as having a valid VPD, and copy VPDSIZE (512) bytes of
+ * that VPD to the buffer we have in our board structure.
+ */
+ if (vpd_offset) {
+ brd->bd_flags |= BD_HAS_VPD;
+ for (i = 0; i < VPDSIZE; i++)
+ brd->vpd[i] = readb(brd->re_map_membase + vpd_offset + i);
+ }
+ }
+
+ /*
+ * We MUST poke the magic number at the PCI Rom Address location again.
+ * This makes the card report the regular board memory back to us,
+ * rather than the OTPROM memory.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+}
+
+
+/*
+ * Our board poller function.
+ */
+void dgap_poll_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ ulong lock_flags;
+ ulong lock_flags2;
+ char *vaddr;
+ u16 head, tail;
+ u16 *chk_addr;
+ u16 check = 0;
+
+ if (!bd || (bd->magic != DGAP_BOARD_MAGIC)) {
+ APR(("dgap_poll_tasklet() - NULL or bad bd.\n"));
+ return;
+ }
+
+ if (bd->inhibit_poller)
+ return;
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if (bd->state == BOARD_READY) {
+
+ struct ev_t *eaddr = NULL;
+
+ if (!bd->re_map_membase) {
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return;
+ }
+ if (!bd->re_map_port) {
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return;
+ }
+
+ if (!bd->nasync) {
+ goto out;
+ }
+
+ /*
+ * If this is a CX or EPCX, we need to see if the firmware
+ * is requesting a concentrator image from us.
+ */
+ if ((bd->type == PCX) || (bd->type == PEPC)) {
+ chk_addr = (u16 *) (vaddr + DOWNREQ);
+ check = readw(chk_addr);
+ /* Nonzero if FEP is requesting concentrator image. */
+ if (check) {
+ if (bd->conc_dl_status == NO_PENDING_CONCENTRATOR_REQUESTS)
+ bd->conc_dl_status = NEED_CONCENTRATOR;
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+
+ }
+ }
+
+ eaddr = (struct ev_t *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * If there is an event pending. Go service it.
+ */
+ if (head != tail) {
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ dgap_event(bd);
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ }
+
+out:
+ /*
+ * If board is doing interrupts, ACK the interrupt.
+ */
+ if (bd && bd->intr_running) {
+ readb(bd->re_map_port + 2);
+ }
+
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /* Our state machine to get the board up and running */
+
+ /* Reset board */
+ if (bd->state == NEED_RESET) {
+
+ /* Get VPD info */
+ dgap_get_vpd(bd);
+
+ dgap_do_reset_board(bd);
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_RESET) {
+ bd->state = NEED_CONFIG;
+ }
+
+ if (bd->state == NEED_CONFIG) {
+ /*
+ * Match this board to a config the user created for us.
+ */
+ bd->bd_config = dgap_find_config(bd->type, bd->pci_bus, bd->pci_slot);
+
+ /*
+ * Because the 4 port Xr products share the same PCI ID
+ * as the 8 port Xr products, if we receive a NULL config
+ * back, and this is a PAPORT8 board, retry with a
+ * PAPORT4 attempt as well.
+ */
+ if (bd->type == PAPORT8 && !bd->bd_config) {
+ bd->bd_config = dgap_find_config(PAPORT4, bd->pci_bus, bd->pci_slot);
+ }
+
+ /*
+ * Register the ttys (if any) into the kernel.
+ */
+ if (bd->bd_config) {
+ bd->state = FINISHED_CONFIG;
+ }
+ else {
+ bd->state = CONFIG_NOT_FOUND;
+ }
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_CONFIG) {
+ bd->state = NEED_DEVICE_CREATION;
+ }
+
+ /* Move to next state */
+ if (bd->state == NEED_DEVICE_CREATION) {
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_DEVICE_CREATION) {
+ bd->state = NEED_BIOS_LOAD;
+ }
+
+ /* Move to next state */
+ if (bd->state == NEED_BIOS_LOAD) {
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ }
+
+ /* Wait for BIOS to test board... */
+ if (bd->state == WAIT_BIOS_LOAD) {
+ dgap_do_wait_for_bios(bd);
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_BIOS_LOAD) {
+ bd->state = NEED_FEP_LOAD;
+
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ }
+
+ /* Wait for FEP to load on board... */
+ if (bd->state == WAIT_FEP_LOAD) {
+ dgap_do_wait_for_fep(bd);
+ }
+
+
+ /* Move to next state */
+ if (bd->state == FINISHED_FEP_LOAD) {
+
+ /*
+ * Do tty device initialization.
+ */
+ int rc = dgap_tty_init(bd);
+
+ if (rc < 0) {
+ dgap_tty_uninit(bd);
+ APR(("Can't init tty devices (%d)\n", rc));
+ bd->state = BOARD_FAILED;
+ bd->dpastatus = BD_NOFEP;
+ }
+ else {
+ bd->state = NEED_PROC_CREATION;
+
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ }
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_PROC_CREATION) {
+
+ bd->state = BOARD_READY;
+ bd->dpastatus = BD_RUNNING;
+
+ /*
+ * If user requested the board to run in interrupt mode,
+ * go and set it up on the board.
+ */
+ if (bd->intr_used) {
+ writew(1, (bd->re_map_membase + ENABLE_INTR));
+ /*
+ * Tell the board to poll the UARTS as fast as possible.
+ */
+ writew(FEPPOLL_MIN, (bd->re_map_membase + FEPPOLL));
+ bd->intr_running = 1;
+ }
+
+ /* Wake up anyone waiting for board state to change to ready */
+ wake_up_interruptible(&bd->state_wait);
+ }
+
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+}
+
+
+/*=======================================================================
+ *
+ * dgap_cmdb - Sends a 2 byte command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * byte1 - Integer containing first byte to be sent.
+ * byte2 - Integer containing second byte to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ DPR_CORE(("%s:%d board is in failed state.\n", __FILE__, __LINE__));
+ return;
+ }
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ DPR_CORE(("%s:%d pointers out of range, failing board!\n", __FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
+ writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
+ writeb(byte1, (char *) (vaddr + head + CMDSTART + 2));
+ writeb(byte2, (char *) (vaddr + head + CMDSTART + 3));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ DPR_CORE(("%s:%d failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+
+/*=======================================================================
+ *
+ * dgap_cmdw - Sends a 1 word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ DPR_CORE(("%s:%d board is failed!\n", __FILE__, __LINE__));
+ return;
+ }
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ DPR_CORE(("%s:%d Pointers out of range. Failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
+ writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
+ writew((u16) word, (char *) (vaddr + head + CMDSTART + 2));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ DPR_CORE(("%s:%d Failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+
+
+/*=======================================================================
+ *
+ * dgap_cmdw_ext - Sends a extended word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ DPR_CORE(("%s:%d board is failed!\n", __FILE__, __LINE__));
+ return;
+ }
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ DPR_CORE(("%s:%d Pointers out of range. Failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+
+ /* Write an FF to tell the FEP that we want an extended command */
+ writeb((uchar) 0xff, (char *) (vaddr + head + CMDSTART + 0));
+
+ writeb((uchar) ch->ch_portnum, (uchar *) (vaddr + head + CMDSTART + 1));
+ writew((u16) cmd, (char *) (vaddr + head + CMDSTART + 2));
+
+ /*
+ * If the second part of the command won't fit,
+ * put it at the beginning of the circular buffer.
+ */
+ if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03))) {
+ writew((u16) word, (char *) (vaddr + CMDSTART));
+ } else {
+ writew((u16) word, (char *) (vaddr + head + CMDSTART + 4));
+ }
+
+ head = (head + 8) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ DPR_CORE(("%s:%d Failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+
+/*=======================================================================
+ *
+ * dgap_wmove - Write data to FEP buffer.
+ *
+ * ch - Pointer to channel structure.
+ * buf - Poiter to characters to be moved.
+ * cnt - Number of characters to move.
+ *
+ *=======================================================================*/
+void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
+{
+ int n;
+ char *taddr;
+ struct bs_t *bs;
+ u16 head;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check parameters.
+ */
+ bs = ch->ch_bs;
+ head = readw(&(bs->tx_head));
+
+ /*
+ * If pointers are out of range, just return.
+ */
+ if ((cnt > ch->ch_tsize) || (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize) {
+ DPR_CORE(("%s:%d pointer out of range", __FILE__, __LINE__));
+ return;
+ }
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ n = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (cnt >= n) {
+ cnt -= n;
+ taddr = ch->ch_taddr + head;
+ memcpy_toio(taddr, buf, n);
+ head = ch->ch_tstart;
+ buf += n;
+ }
+
+ /*
+ * Move rest of data.
+ */
+ taddr = ch->ch_taddr + head;
+ n = cnt;
+ memcpy_toio(taddr, buf, n);
+ head += cnt;
+
+ writew(head, &(bs->tx_head));
+}
+
+/*
+ * Retrives the current custom baud rate from FEP memory,
+ * and returns it back to the user.
+ * Returns 0 on error.
+ */
+uint dgap_get_custom_baud(struct channel_t *ch)
+{
+ uchar *vaddr;
+ ulong offset = 0;
+ uint value = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
+ return (0);
+ }
+
+ if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC) {
+ return (0);
+ }
+
+ if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
+ return (0);
+
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return (0);
+
+ /*
+ * Go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
+ (ch->ch_portnum * 0x28) + LINE_SPEED));
+
+ value = readw(vaddr + offset);
+ return (value);
+}
+
+
+/*
+ * Calls the firmware to reset this channel.
+ */
+void dgap_firmware_reset_port(struct channel_t *ch)
+{
+ dgap_cmdb(ch, CHRESET, 0, 0, 0);
+
+ /*
+ * Now that the channel is reset, we need to make sure
+ * all the current settings get reapplied to the port
+ * in the firmware.
+ *
+ * So we will set the driver's cache of firmware
+ * settings all to 0, and then call param.
+ */
+ ch->ch_fepiflag = 0;
+ ch->ch_fepcflag = 0;
+ ch->ch_fepoflag = 0;
+ ch->ch_fepstartc = 0;
+ ch->ch_fepstopc = 0;
+ ch->ch_fepastartc = 0;
+ ch->ch_fepastopc = 0;
+ ch->ch_mostat = 0;
+ ch->ch_hflow = 0;
+}
+
+
+/*=======================================================================
+ *
+ * dgap_param - Set Digi parameters.
+ *
+ * struct tty_struct * - TTY for port.
+ *
+ *=======================================================================*/
+int dgap_param(struct tty_struct *tty)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct bs_t *bs;
+ struct un_t *un;
+ u16 head;
+ u16 cflag;
+ u16 iflag;
+ uchar mval;
+ uchar hflow;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC) {
+ return (-ENXIO);
+ }
+
+ bs = ch->ch_bs;
+ if (bs == 0) {
+ return (-ENXIO);
+ }
+
+ DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
+ ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
+
+ ts = &tty->termios;
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+
+ /* flush rx */
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+
+ /* flush tx */
+ head = readw(&(ch->ch_bs->tx_head));
+ writew(head, &(ch->ch_bs->tx_tail));
+
+ ch->ch_flags |= (CH_BAUD0);
+
+ /* Drop RTS and DTR */
+ ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
+ mval = D_DTR(ch) | D_RTS(ch);
+ ch->ch_baud_info = 0;
+
+ } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
+ /*
+ * Tell the fep to do the command
+ */
+
+ DPR_PARAM(("param: Want %d speed\n", ch->ch_custom_speed));
+
+ dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
+
+ /*
+ * Now go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ ch->ch_baud_info = ch->ch_custom_speed = dgap_get_custom_baud(ch);
+
+ DPR_PARAM(("param: Got %d speed\n", ch->ch_custom_speed));
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+
+ } else {
+ /*
+ * Set baud rate, character size, and parity.
+ */
+
+
+ int iindex = 0;
+ int jindex = 0;
+ int baud = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 14400, 57600, 230400, 76800,
+ 115200, 230400, 28800, 460800,
+ 921600, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /* Only use the TXPrint baud rate if the terminal unit is NOT open */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGAP_PRINT))
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
+ baud = bauds[iindex][jindex];
+ } else {
+ DPR_IOCTL(("baud indices were out of range (%d)(%d)",
+ iindex, jindex));
+ baud = 0;
+ }
+
+ if (baud == 0)
+ baud = 9600;
+
+ ch->ch_baud_info = baud;
+
+
+ /*
+ * CBAUD has bit position 0x1000 set these days to indicate Linux
+ * baud rate remap.
+ * We use a different bit assignment for high speed. Clear this
+ * bit out while grabbing the parts of "cflag" we want.
+ */
+ cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE);
+
+ /*
+ * HUPCL bit is used by FEP to indicate fast baud
+ * table is to be used.
+ */
+ if ((ch->ch_digi.digi_flags & DIGI_FAST) || (ch->ch_c_cflag & CBAUDEX))
+ cflag |= HUPCL;
+
+
+ if ((ch->ch_c_cflag & CBAUDEX) && !(ch->ch_digi.digi_flags & DIGI_FAST)) {
+ /*
+ * The below code is trying to guarantee that only baud rates
+ * 115200, 230400, 460800, 921600 are remapped. We use exclusive or
+ * because the various baud rates share common bit positions
+ * and therefore can't be tested for easily.
+ */
+ tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
+ int baudpart = 0;
+
+ /* Map high speed requests to index into FEP's baud table */
+ switch (tcflag) {
+ case B57600 :
+ baudpart = 1;
+ break;
+#ifdef B76800
+ case B76800 :
+ baudpart = 2;
+ break;
+#endif
+ case B115200 :
+ baudpart = 3;
+ break;
+ case B230400 :
+ baudpart = 9;
+ break;
+ case B460800 :
+ baudpart = 11;
+ break;
+#ifdef B921600
+ case B921600 :
+ baudpart = 12;
+ break;
+#endif
+ default:
+ baudpart = 0;
+ }
+
+ if (baudpart)
+ cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
+ }
+
+ cflag &= 0xffff;
+
+ if (cflag != ch->ch_fepcflag) {
+ ch->ch_fepcflag = (u16) (cflag & 0xffff);
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
+ }
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+ }
+
+ /*
+ * Get input flags.
+ */
+ iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | INPCK | ISTRIP | IXON | IXANY | IXOFF);
+
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) {
+ iflag &= ~(IXON | IXOFF);
+ ch->ch_c_iflag &= ~(IXON | IXOFF);
+ }
+
+ /*
+ * Only the IBM Xr card can switch between
+ * 232 and 422 modes on the fly
+ */
+ if (bd->device == PCI_DEVICE_XR_IBM_DID) {
+ if (ch->ch_digi.digi_flags & DIGI_422)
+ dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
+ else
+ dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
+ iflag |= IALTPIN ;
+
+ if (iflag != ch->ch_fepiflag) {
+ ch->ch_fepiflag = iflag;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
+ }
+
+ /*
+ * Select hardware handshaking.
+ */
+ hflow = 0;
+
+ if (ch->ch_c_cflag & CRTSCTS) {
+ hflow |= (D_RTS(ch) | D_CTS(ch));
+ }
+ if (ch->ch_digi.digi_flags & RTSPACE)
+ hflow |= D_RTS(ch);
+ if (ch->ch_digi.digi_flags & DTRPACE)
+ hflow |= D_DTR(ch);
+ if (ch->ch_digi.digi_flags & CTSPACE)
+ hflow |= D_CTS(ch);
+ if (ch->ch_digi.digi_flags & DSRPACE)
+ hflow |= D_DSR(ch);
+ if (ch->ch_digi.digi_flags & DCDPACE)
+ hflow |= D_CD(ch);
+
+ if (hflow != ch->ch_hflow) {
+ ch->ch_hflow = hflow;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SHFLOW, (uchar) hflow, 0xff, 0);
+ }
+
+
+ /*
+ * Set RTS and/or DTR Toggle if needed, but only if product is FEP5+ based.
+ */
+ if (bd->bd_flags & BD_FEP5PLUS) {
+ u16 hflow2 = 0;
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ hflow2 |= (D_RTS(ch));
+ }
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ hflow2 |= (D_DTR(ch));
+ }
+
+ dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
+ }
+
+ /*
+ * Set modem control lines.
+ */
+
+ mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
+
+ DPR_PARAM(("dgap_param: mval: %x ch_mforce: %x ch_mval: %x ch_mostat: %x\n",
+ mval, ch->ch_mforce, ch->ch_mval, ch->ch_mostat));
+
+ if (ch->ch_mostat ^ mval) {
+ ch->ch_mostat = mval;
+
+ /* Okay to have channel and board locks held calling this */
+ DPR_PARAM(("dgap_param: Sending SMODEM mval: %x\n", mval));
+ dgap_cmdb(ch, SMODEM, (uchar) mval, D_RTS(ch)|D_DTR(ch), 0);
+ }
+
+ /*
+ * Read modem signals, and then call carrier function.
+ */
+ ch->ch_mistat = readb(&(bs->m_stat));
+ dgap_carrier(ch);
+
+ /*
+ * Set the start and stop characters.
+ */
+ if (ch->ch_startc != ch->ch_fepstartc || ch->ch_stopc != ch->ch_fepstopc) {
+ ch->ch_fepstartc = ch->ch_startc;
+ ch->ch_fepstopc = ch->ch_stopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
+ }
+
+ /*
+ * Set the Auxiliary start and stop characters.
+ */
+ if (ch->ch_astartc != ch->ch_fepastartc || ch->ch_astopc != ch->ch_fepastopc) {
+ ch->ch_fepastartc = ch->ch_astartc;
+ ch->ch_fepastopc = ch->ch_astopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
+ }
+
+ DPR_PARAM(("param finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgap_parity_scan()
+ *
+ * Convert the FEP5 way of reporting parity errors and breaks into
+ * the Linux line discipline way.
+ */
+void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *fbuf, int *len)
+{
+ int l = *len;
+ int count = 0;
+ unsigned char *in, *cout, *fout;
+ unsigned char c;
+
+ in = cbuf;
+ cout = cbuf;
+ fout = fbuf;
+
+ DPR_PSCAN(("dgap_parity_scan start\n"));
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ while (l--) {
+ c = *in++;
+ switch (ch->pscan_state) {
+ default:
+ /* reset to sanity and fall through */
+ ch->pscan_state = 0;
+
+ case 0:
+ /* No FF seen yet */
+ if (c == (unsigned char) '\377') {
+ /* delete this character from stream */
+ ch->pscan_state = 1;
+ } else {
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ }
+ break;
+
+ case 1:
+ /* first FF seen */
+ if (c == (unsigned char) '\377') {
+ /* doubled ff, transform to single ff */
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ ch->pscan_state = 0;
+ } else {
+ /* save value examination in next state */
+ ch->pscan_savechar = c;
+ ch->pscan_state = 2;
+ }
+ break;
+
+ case 2:
+ /* third character of ff sequence */
+
+ *cout++ = c;
+
+ if (ch->pscan_savechar == 0x0) {
+
+ if (c == 0x0) {
+ DPR_PSCAN(("dgap_parity_scan in 3rd char of ff seq. c: %x setting break.\n", c));
+ ch->ch_err_break++;
+ *fout++ = TTY_BREAK;
+ }
+ else {
+ DPR_PSCAN(("dgap_parity_scan in 3rd char of ff seq. c: %x setting parity.\n", c));
+ ch->ch_err_parity++;
+ *fout++ = TTY_PARITY;
+ }
+ }
+ else {
+ DPR_PSCAN(("%s:%d Logic Error.\n", __FILE__, __LINE__));
+ }
+
+ count += 1;
+ ch->pscan_state = 0;
+ }
+ }
+ *len = count;
+ DPR_PSCAN(("dgap_parity_scan finish\n"));
+}
+
+
+
+
+/*=======================================================================
+ *
+ * dgap_event - FEP to host event processing routine.
+ *
+ * bd - Board of current event.
+ *
+ *=======================================================================*/
+static int dgap_event(struct board_t *bd)
+{
+ struct channel_t *ch;
+ ulong lock_flags;
+ ulong lock_flags2;
+ struct bs_t *bs;
+ uchar *event;
+ uchar *vaddr = NULL;
+ struct ev_t *eaddr = NULL;
+ uint head;
+ uint tail;
+ int port;
+ int reason;
+ int modem;
+ int b1;
+
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-ENXIO);
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ if (!vaddr) {
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return (-ENXIO);
+ }
+
+ eaddr = (struct ev_t *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+
+ if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
+ (head | tail) & 03) {
+ DPR_EVENT(("should be calling xxfail %d\n", __LINE__));
+ /* Let go of board lock */
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return (-ENXIO);
+ }
+
+ /*
+ * Loop to process all the events in the buffer.
+ */
+ while (tail != head) {
+
+ /*
+ * Get interrupt information.
+ */
+
+ event = bd->re_map_membase + tail + EVSTART;
+
+ port = event[0];
+ reason = event[1];
+ modem = event[2];
+ b1 = event[3];
+
+ DPR_EVENT(("event: jiffies: %ld port: %d reason: %x modem: %x\n",
+ jiffies, port, reason, modem));
+
+ /*
+ * Make sure the interrupt is valid.
+ */
+ if ( port >= bd->nasync) {
+ goto next;
+ }
+
+ if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA))) {
+ goto next;
+ }
+
+ ch = bd->channels[port];
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
+ goto next;
+ }
+
+ /*
+ * If we have made it here, the event was valid.
+ * Lock down the channel.
+ */
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ bs = ch->ch_bs;
+
+ if (!bs) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ goto next;
+ }
+
+ /*
+ * Process received data.
+ */
+ if (reason & IFDATA) {
+
+ /*
+ * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
+ * input could send some data to ld, which in turn
+ * could do a callback to one of our other functions.
+ */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ dgap_input(ch);
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ if (ch->ch_flags & CH_RACTIVE)
+ ch->ch_flags |= CH_RENABLE;
+ else
+ writeb(1, &(bs->idata));
+
+ if (ch->ch_flags & CH_RWAIT) {
+ ch->ch_flags &= ~CH_RWAIT;
+
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ }
+
+ /*
+ * Process Modem change signals.
+ */
+ if (reason & IFMODEM) {
+ ch->ch_mistat = modem;
+ dgap_carrier(ch);
+ }
+
+ /*
+ * Process break.
+ */
+ if (reason & IFBREAK) {
+
+ DPR_EVENT(("got IFBREAK\n"));
+
+ if (ch->ch_tun.un_tty) {
+ /* A break has been indicated */
+ ch->ch_err_break++;
+ tty_buffer_request_room(ch->ch_tun.un_tty->port, 1);
+ tty_insert_flip_char(ch->ch_tun.un_tty->port, 0, TTY_BREAK);
+ tty_flip_buffer_push(ch->ch_tun.un_tty->port);
+ }
+ }
+
+ /*
+ * Process Transmit low.
+ */
+ if (reason & IFTLW) {
+
+ DPR_EVENT(("event: got low event\n"));
+
+ if (ch->ch_tun.un_flags & UN_LOW) {
+ ch->ch_tun.un_flags &= ~UN_LOW;
+
+ if (ch->ch_tun.un_flags & UN_ISOPEN) {
+ if ((ch->ch_tun.un_tty->flags &
+ (1 << TTY_DO_WRITE_WAKEUP)) &&
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
+#else
+ ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
+#endif
+ {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
+#else
+ (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
+#endif
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+
+ DPR_EVENT(("event: Got low event. jiffies: %lu\n", jiffies));
+ }
+ }
+
+ if (ch->ch_pun.un_flags & UN_LOW) {
+ ch->ch_pun.un_flags &= ~UN_LOW;
+ if (ch->ch_pun.un_flags & UN_ISOPEN) {
+ if ((ch->ch_pun.un_tty->flags &
+ (1 << TTY_DO_WRITE_WAKEUP)) &&
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
+#else
+ ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
+#endif
+ {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
+#else
+ (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
+#endif
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+ }
+
+ if (ch->ch_flags & CH_WLOW) {
+ ch->ch_flags &= ~CH_WLOW;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ /*
+ * Process Transmit empty.
+ */
+ if (reason & IFTEM) {
+ DPR_EVENT(("event: got empty event\n"));
+
+ if (ch->ch_tun.un_flags & UN_EMPTY) {
+ ch->ch_tun.un_flags &= ~UN_EMPTY;
+ if (ch->ch_tun.un_flags & UN_ISOPEN) {
+ if ((ch->ch_tun.un_tty->flags &
+ (1 << TTY_DO_WRITE_WAKEUP)) &&
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
+#else
+ ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
+#endif
+ {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
+#else
+ (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
+#endif
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ }
+
+ if (ch->ch_pun.un_flags & UN_EMPTY) {
+ ch->ch_pun.un_flags &= ~UN_EMPTY;
+ if (ch->ch_pun.un_flags & UN_ISOPEN) {
+ if ((ch->ch_pun.un_tty->flags &
+ (1 << TTY_DO_WRITE_WAKEUP)) &&
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
+#else
+ ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
+#endif
+ {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
+#else
+ (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
+#endif
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+ }
+
+
+ if (ch->ch_flags & CH_WEMPTY) {
+ ch->ch_flags &= ~CH_WEMPTY;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+
+next:
+ tail = (tail + 4) & (EVMAX - EVSTART - 4);
+ }
+
+ writew(tail, &(eaddr->ev_tail));
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ return (0);
+}
diff --git a/drivers/staging/dgap/dgap_fep5.h b/drivers/staging/dgap/dgap_fep5.h
new file mode 100644
index 0000000..3a12ba5
--- /dev/null
+++ b/drivers/staging/dgap/dgap_fep5.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ ************************************************************************
+ *** FEP Version 5 dependent definitions
+ ************************************************************************/
+
+#ifndef __DGAP_FEP5_H
+#define __DGAP_FEP5_H
+
+/************************************************************************
+ * FEP memory offsets
+ ************************************************************************/
+#define START 0x0004L /* Execution start address */
+
+#define CMDBUF 0x0d10L /* Command (cm_t) structure offset */
+#define CMDSTART 0x0400L /* Start of command buffer */
+#define CMDMAX 0x0800L /* End of command buffer */
+
+#define EVBUF 0x0d18L /* Event (ev_t) structure */
+#define EVSTART 0x0800L /* Start of event buffer */
+#define EVMAX 0x0c00L /* End of event buffer */
+#define FEP5_PLUS 0x0E40 /* ASCII '5' and ASCII 'A' is here */
+#define ECS_SEG 0x0E44 /* Segment of the extended channel structure */
+#define LINE_SPEED 0x10 /* Offset into ECS_SEG for line speed */
+ /* if the fep has extended capabilities */
+
+/* BIOS MAGIC SPOTS */
+#define ERROR 0x0C14L /* BIOS error code */
+#define SEQUENCE 0x0C12L /* BIOS sequence indicator */
+#define POSTAREA 0x0C00L /* POST complete message area */
+
+/* FEP MAGIC SPOTS */
+#define FEPSTAT POSTAREA /* OS here when FEP comes up */
+#define NCHAN 0x0C02L /* number of ports FEP sees */
+#define PANIC 0x0C10L /* PANIC area for FEP */
+#define KMEMEM 0x0C30L /* Memory for KME use */
+#define CONFIG 0x0CD0L /* Concentrator configuration info */
+#define CONFIGSIZE 0x0030 /* configuration info size */
+#define DOWNREQ 0x0D00 /* Download request buffer pointer */
+
+#define CHANBUF 0x1000L /* Async channel (bs_t) structs */
+#define FEPOSSIZE 0x1FFF /* 8K FEPOS */
+
+#define XEMPORTS 0xC02 /*
+ * Offset in board memory where FEP5 stores
+ * how many ports it has detected.
+ * NOTE: FEP5 reports 64 ports when the user
+ * has the cable in EBI OUT instead of EBI IN.
+ */
+
+#define FEPCLR 0x00
+#define FEPMEM 0x02
+#define FEPRST 0x04
+#define FEPINT 0x08
+#define FEPMASK 0x0e
+#define FEPWIN 0x80
+
+#define LOWMEM 0x0100
+#define HIGHMEM 0x7f00
+
+#define FEPTIMEOUT 200000
+
+#define ENABLE_INTR 0x0e04 /* Enable interrupts flag */
+#define FEPPOLL_MIN 1 /* minimum of 1 millisecond */
+#define FEPPOLL_MAX 20 /* maximum of 20 milliseconds */
+#define FEPPOLL 0x0c26 /* Fep event poll interval */
+
+#define IALTPIN 0x0080 /* Input flag to swap DSR <-> DCD */
+
+/************************************************************************
+ * Command structure definition.
+ ************************************************************************/
+struct cm_t {
+ volatile unsigned short cm_head; /* Command buffer head offset */
+ volatile unsigned short cm_tail; /* Command buffer tail offset */
+ volatile unsigned short cm_start; /* start offset of buffer */
+ volatile unsigned short cm_max; /* last offset of buffer */
+};
+
+/************************************************************************
+ * Event structure definition.
+ ************************************************************************/
+struct ev_t {
+ volatile unsigned short ev_head; /* Command buffer head offset */
+ volatile unsigned short ev_tail; /* Command buffer tail offset */
+ volatile unsigned short ev_start; /* start offset of buffer */
+ volatile unsigned short ev_max; /* last offset of buffer */
+};
+
+/************************************************************************
+ * Download buffer structure.
+ ************************************************************************/
+struct downld_t {
+ uchar dl_type; /* Header */
+ uchar dl_seq; /* Download sequence */
+ ushort dl_srev; /* Software revision number */
+ ushort dl_lrev; /* Low revision number */
+ ushort dl_hrev; /* High revision number */
+ ushort dl_seg; /* Start segment address */
+ ushort dl_size; /* Number of bytes to download */
+ uchar dl_data[1024]; /* Download data */
+};
+
+/************************************************************************
+ * Per channel buffer structure
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * C = changed by commands only *
+ * U = unknown (may be changed w/o notice) *
+ ************************************************************************/
+struct bs_t {
+ volatile unsigned short tp_jmp; /* Transmit poll jump */
+ volatile unsigned short tc_jmp; /* Cooked procedure jump */
+ volatile unsigned short ri_jmp; /* Not currently used */
+ volatile unsigned short rp_jmp; /* Receive poll jump */
+
+ volatile unsigned short tx_seg; /* W Tx segment */
+ volatile unsigned short tx_head; /* W Tx buffer head offset */
+ volatile unsigned short tx_tail; /* R Tx buffer tail offset */
+ volatile unsigned short tx_max; /* W Tx buffer size - 1 */
+
+ volatile unsigned short rx_seg; /* W Rx segment */
+ volatile unsigned short rx_head; /* W Rx buffer head offset */
+ volatile unsigned short rx_tail; /* R Rx buffer tail offset */
+ volatile unsigned short rx_max; /* W Rx buffer size - 1 */
+
+ volatile unsigned short tx_lw; /* W Tx buffer low water mark */
+ volatile unsigned short rx_lw; /* W Rx buffer low water mark */
+ volatile unsigned short rx_hw; /* W Rx buffer high water mark */
+ volatile unsigned short incr; /* W Increment to next channel */
+
+ volatile unsigned short fepdev; /* U SCC device base address */
+ volatile unsigned short edelay; /* W Exception delay */
+ volatile unsigned short blen; /* W Break length */
+ volatile unsigned short btime; /* U Break complete time */
+
+ volatile unsigned short iflag; /* C UNIX input flags */
+ volatile unsigned short oflag; /* C UNIX output flags */
+ volatile unsigned short cflag; /* C UNIX control flags */
+ volatile unsigned short wfill[13]; /* U Reserved for expansion */
+
+ volatile unsigned char num; /* U Channel number */
+ volatile unsigned char ract; /* U Receiver active counter */
+ volatile unsigned char bstat; /* U Break status bits */
+ volatile unsigned char tbusy; /* W Transmit busy */
+ volatile unsigned char iempty; /* W Transmit empty event enable */
+ volatile unsigned char ilow; /* W Transmit low-water event enable */
+ volatile unsigned char idata; /* W Receive data interrupt enable */
+ volatile unsigned char eflag; /* U Host event flags */
+
+ volatile unsigned char tflag; /* U Transmit flags */
+ volatile unsigned char rflag; /* U Receive flags */
+ volatile unsigned char xmask; /* U Transmit ready flags */
+ volatile unsigned char xval; /* U Transmit ready value */
+ volatile unsigned char m_stat; /* RC Modem status bits */
+ volatile unsigned char m_change; /* U Modem bits which changed */
+ volatile unsigned char m_int; /* W Modem interrupt enable bits */
+ volatile unsigned char m_last; /* U Last modem status */
+
+ volatile unsigned char mtran; /* C Unreported modem trans */
+ volatile unsigned char orun; /* C Buffer overrun occurred */
+ volatile unsigned char astartc; /* W Auxiliary Xon char */
+ volatile unsigned char astopc; /* W Auxiliary Xoff char */
+ volatile unsigned char startc; /* W Xon character */
+ volatile unsigned char stopc; /* W Xoff character */
+ volatile unsigned char vnextc; /* W Vnext character */
+ volatile unsigned char hflow; /* C Software flow control */
+
+ volatile unsigned char fillc; /* U Delay Fill character */
+ volatile unsigned char ochar; /* U Saved output character */
+ volatile unsigned char omask; /* U Output character mask */
+
+ volatile unsigned char bfill[13]; /* U Reserved for expansion */
+
+ volatile unsigned char scc[16]; /* U SCC registers */
+};
+
+
+/************************************************************************
+ * FEP supported functions
+ ************************************************************************/
+#define SRLOW 0xe0 /* Set receive low water */
+#define SRHIGH 0xe1 /* Set receive high water */
+#define FLUSHTX 0xe2 /* Flush transmit buffer */
+#define PAUSETX 0xe3 /* Pause data transmission */
+#define RESUMETX 0xe4 /* Resume data transmission */
+#define SMINT 0xe5 /* Set Modem Interrupt */
+#define SAFLOWC 0xe6 /* Set Aux. flow control chars */
+#define SBREAK 0xe8 /* Send break */
+#define SMODEM 0xe9 /* Set 8530 modem control lines */
+#define SIFLAG 0xea /* Set UNIX iflags */
+#define SFLOWC 0xeb /* Set flow control characters */
+#define STLOW 0xec /* Set transmit low water mark */
+#define RPAUSE 0xee /* Pause recieve */
+#define RRESUME 0xef /* Resume receive */
+#define CHRESET 0xf0 /* Reset Channel */
+#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/
+#define SOFLAG 0xf3 /* Set UNIX oflags */
+#define SHFLOW 0xf4 /* Set hardware handshake */
+#define SCFLAG 0xf5 /* Set UNIX cflags */
+#define SVNEXT 0xf6 /* Set VNEXT character */
+#define SPINTFC 0xfc /* Reserved */
+#define SCOMMODE 0xfd /* Set RS232/422 mode */
+
+
+/************************************************************************
+ * Modes for SCOMMODE
+ ************************************************************************/
+#define MODE_232 0x00
+#define MODE_422 0x01
+
+
+/************************************************************************
+ * Event flags.
+ ************************************************************************/
+#define IFBREAK 0x01 /* Break received */
+#define IFTLW 0x02 /* Transmit low water */
+#define IFTEM 0x04 /* Transmitter empty */
+#define IFDATA 0x08 /* Receive data present */
+#define IFMODEM 0x20 /* Modem status change */
+
+/************************************************************************
+ * Modem flags
+ ************************************************************************/
+# define DM_RTS 0x02 /* Request to send */
+# define DM_CD 0x80 /* Carrier detect */
+# define DM_DSR 0x20 /* Data set ready */
+# define DM_CTS 0x10 /* Clear to send */
+# define DM_RI 0x40 /* Ring indicator */
+# define DM_DTR 0x01 /* Data terminal ready */
+
+
+#endif
diff --git a/drivers/staging/dgap/dgap_kcompat.h b/drivers/staging/dgap/dgap_kcompat.h
new file mode 100644
index 0000000..8ebf4b7
--- /dev/null
+++ b/drivers/staging/dgap/dgap_kcompat.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2004 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * This file is intended to contain all the kernel "differences" between the
+ * various kernels that we support.
+ *
+ *************************************************************************/
+
+#ifndef __DGAP_KCOMPAT_H
+#define __DGAP_KCOMPAT_H
+
+# ifndef KERNEL_VERSION
+# define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+# endif
+
+
+#if !defined(TTY_FLIPBUF_SIZE)
+# define TTY_FLIPBUF_SIZE 512
+#endif
+
+
+/* Sparse stuff */
+# ifndef __user
+# define __user
+# define __kernel
+# define __safe
+# define __force
+# define __chk_user_ptr(x) (void)0
+# endif
+
+
+# define PARM_STR(VAR, INIT, PERM, DESC) \
+ static char *VAR = INIT; \
+ char *dgap_##VAR; \
+ module_param(VAR, charp, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+# define PARM_INT(VAR, INIT, PERM, DESC) \
+ static int VAR = INIT; \
+ int dgap_##VAR; \
+ module_param(VAR, int, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+# define PARM_ULONG(VAR, INIT, PERM, DESC) \
+ static ulong VAR = INIT; \
+ ulong dgap_##VAR; \
+ module_param(VAR, long, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+
+
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+
+
+
+
+/* NOTHING YET */
+
+
+
+
+# else
+
+
+
+# error "this driver does not support anything below the 2.6.27 kernel series."
+
+
+
+# endif
+
+#endif /* ! __DGAP_KCOMPAT_H */
diff --git a/drivers/staging/dgap/dgap_parse.c b/drivers/staging/dgap/dgap_parse.c
new file mode 100644
index 0000000..5497e6d
--- /dev/null
+++ b/drivers/staging/dgap/dgap_parse.c
@@ -0,0 +1,1371 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ *
+ *****************************************************************************
+ *
+ * dgap_parse.c - Parses the configuration information from the input file.
+ *
+ * $Id: dgap_parse.c,v 1.1 2009/10/23 14:01:57 markh Exp $
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+
+#include "dgap_types.h"
+#include "dgap_fep5.h"
+#include "dgap_driver.h"
+#include "dgap_conf.h"
+
+
+/*
+ * Function prototypes.
+ */
+static int dgap_gettok(char **in, struct cnode *p);
+static char *dgap_getword(char **in);
+static char *dgap_savestring(char *s);
+static struct cnode *dgap_newnode(int t);
+static int dgap_checknode(struct cnode *p);
+static void dgap_err(char *s);
+
+/*
+ * Our needed internal static variables...
+ */
+static struct cnode dgap_head;
+#define MAXCWORD 200
+static char dgap_cword[MAXCWORD];
+
+struct toklist {
+ int token;
+ char *string;
+};
+
+static struct toklist dgap_tlist[] = {
+ { BEGIN, "config_begin" },
+ { END, "config_end" },
+ { BOARD, "board" },
+ { PCX, "Digi_AccelePort_C/X_PCI" }, /* C/X_PCI */
+ { PEPC, "Digi_AccelePort_EPC/X_PCI" }, /* EPC/X_PCI */
+ { PPCM, "Digi_AccelePort_Xem_PCI" }, /* PCI/Xem */
+ { APORT2_920P, "Digi_AccelePort_2r_920_PCI" },
+ { APORT4_920P, "Digi_AccelePort_4r_920_PCI" },
+ { APORT8_920P, "Digi_AccelePort_8r_920_PCI" },
+ { PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
+ { PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
+ { IO, "io" },
+ { PCIINFO, "pciinfo" },
+ { LINE, "line" },
+ { CONC, "conc" },
+ { CONC, "concentrator" },
+ { CX, "cx" },
+ { CX, "ccon" },
+ { EPC, "epccon" },
+ { EPC, "epc" },
+ { MOD, "module" },
+ { ID, "id" },
+ { STARTO, "start" },
+ { SPEED, "speed" },
+ { CABLE, "cable" },
+ { CONNECT, "connect" },
+ { METHOD, "method" },
+ { STATUS, "status" },
+ { CUSTOM, "Custom" },
+ { BASIC, "Basic" },
+ { MEM, "mem" },
+ { MEM, "memory" },
+ { PORTS, "ports" },
+ { MODEM, "modem" },
+ { NPORTS, "nports" },
+ { TTYN, "ttyname" },
+ { CU, "cuname" },
+ { PRINT, "prname" },
+ { CMAJOR, "major" },
+ { ALTPIN, "altpin" },
+ { USEINTR, "useintr" },
+ { TTSIZ, "ttysize" },
+ { CHSIZ, "chsize" },
+ { BSSIZ, "boardsize" },
+ { UNTSIZ, "schedsize" },
+ { F2SIZ, "f2200size" },
+ { VPSIZ, "vpixsize" },
+ { 0, NULL }
+};
+
+
+/*
+ * Parse a configuration file read into memory as a string.
+ */
+int dgap_parsefile(char **in, int Remove)
+{
+ struct cnode *p, *brd, *line, *conc;
+ int rc;
+ char *s = NULL, *s2 = NULL;
+ int linecnt = 0;
+
+ p = &dgap_head;
+ brd = line = conc = NULL;
+
+ /* perhaps we are adding to an existing list? */
+ while (p->next != NULL) {
+ p = p->next;
+ }
+
+ /* file must start with a BEGIN */
+ while ( (rc = dgap_gettok(in,p)) != BEGIN ) {
+ if (rc == 0) {
+ dgap_err("unexpected EOF");
+ return(-1);
+ }
+ }
+
+ for (; ; ) {
+ rc = dgap_gettok(in,p);
+ if (rc == 0) {
+ dgap_err("unexpected EOF");
+ return(-1);
+ }
+
+ switch (rc) {
+ case 0:
+ dgap_err("unexpected end of file");
+ return(-1);
+
+ case BEGIN: /* should only be 1 begin */
+ dgap_err("unexpected config_begin\n");
+ return(-1);
+
+ case END:
+ return(0);
+
+ case BOARD: /* board info */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(BNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+
+ p->u.board.status = dgap_savestring("No");
+ line = conc = NULL;
+ brd = p;
+ linecnt = -1;
+ break;
+
+ case APORT2_920P: /* AccelePort_4 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_2r_920 string");
+ return(-1);
+ }
+ p->u.board.type = APORT2_920P;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_2r_920 PCI to config...\n"));
+ break;
+
+ case APORT4_920P: /* AccelePort_4 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_4r_920 string");
+ return(-1);
+ }
+ p->u.board.type = APORT4_920P;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_4r_920 PCI to config...\n"));
+ break;
+
+ case APORT8_920P: /* AccelePort_8 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_8r_920 string");
+ return(-1);
+ }
+ p->u.board.type = APORT8_920P;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_8r_920 PCI to config...\n"));
+ break;
+
+ case PAPORT4: /* AccelePort_4 PCI */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_4r(PCI) string");
+ return(-1);
+ }
+ p->u.board.type = PAPORT4;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_4r PCI to config...\n"));
+ break;
+
+ case PAPORT8: /* AccelePort_8 PCI */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_8r string");
+ return(-1);
+ }
+ p->u.board.type = PAPORT8;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_8r PCI to config...\n"));
+ break;
+
+ case PCX: /* PCI C/X */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_C/X_(PCI) string");
+ return(-1);
+ }
+ p->u.board.type = PCX;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ p->u.board.module1 = 0;
+ p->u.board.module2 = 0;
+ DPR_INIT(("Adding PCI C/X to config...\n"));
+ break;
+
+ case PEPC: /* PCI EPC/X */
+ if (p->type != BNODE) {
+ dgap_err("unexpected \"Digi_EPC/X_(PCI)\" string");
+ return(-1);
+ }
+ p->u.board.type = PEPC;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ p->u.board.module1 = 0;
+ p->u.board.module2 = 0;
+ DPR_INIT(("Adding PCI EPC/X to config...\n"));
+ break;
+
+ case PPCM: /* PCI/Xem */
+ if (p->type != BNODE) {
+ dgap_err("unexpected PCI/Xem string");
+ return(-1);
+ }
+ p->u.board.type = PPCM;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ DPR_INIT(("Adding PCI XEM to config...\n"));
+ break;
+
+ case IO: /* i/o port */
+ if (p->type != BNODE) {
+ dgap_err("IO port only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.portstr = dgap_savestring(s);
+ p->u.board.port = (short)simple_strtol(s, &s2, 0);
+ if ((short)strlen(s) > (short)(s2 - s)) {
+ dgap_err("bad number for IO port");
+ return(-1);
+ }
+ p->u.board.v_port = 1;
+ DPR_INIT(("Adding IO (%s) to config...\n", s));
+ break;
+
+ case MEM: /* memory address */
+ if (p->type != BNODE) {
+ dgap_err("memory address only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.addrstr = dgap_savestring(s);
+ p->u.board.addr = simple_strtoul(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for memory address");
+ return(-1);
+ }
+ p->u.board.v_addr = 1;
+ DPR_INIT(("Adding MEM (%s) to config...\n", s));
+ break;
+
+ case PCIINFO: /* pci information */
+ if (p->type != BNODE) {
+ dgap_err("memory address only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.pcibusstr = dgap_savestring(s);
+ p->u.board.pcibus = simple_strtoul(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for pci bus");
+ return(-1);
+ }
+ p->u.board.v_pcibus = 1;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.pcislotstr = dgap_savestring(s);
+ p->u.board.pcislot = simple_strtoul(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for pci slot");
+ return(-1);
+ }
+ p->u.board.v_pcislot = 1;
+
+ DPR_INIT(("Adding PCIINFO (%s %s) to config...\n", p->u.board.pcibusstr,
+ p->u.board.pcislotstr));
+ break;
+
+ case METHOD:
+ if (p->type != BNODE) {
+ dgap_err("install method only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.method = dgap_savestring(s);
+ p->u.board.v_method = 1;
+ DPR_INIT(("Adding METHOD (%s) to config...\n", s));
+ break;
+
+ case STATUS:
+ if (p->type != BNODE) {
+ dgap_err("config status only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.status = dgap_savestring(s);
+ DPR_INIT(("Adding STATUS (%s) to config...\n", s));
+ break;
+
+ case NPORTS: /* number of ports */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.nport = (char)simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for number of ports");
+ return(-1);
+ }
+ p->u.board.v_nport = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.conc.nport = (char)simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for number of ports");
+ return(-1);
+ }
+ p->u.conc.v_nport = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.module.nport = (char)simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for number of ports");
+ return(-1);
+ }
+ p->u.module.v_nport = 1;
+ } else {
+ dgap_err("nports only valid for concentrators or modules");
+ return(-1);
+ }
+ DPR_INIT(("Adding NPORTS (%s) to config...\n", s));
+ break;
+
+ case ID: /* letter ID used in tty name */
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+
+ p->u.board.status = dgap_savestring(s);
+
+ if (p->type == CNODE) {
+ p->u.conc.id = dgap_savestring(s);
+ p->u.conc.v_id = 1;
+ } else if (p->type == MNODE) {
+ p->u.module.id = dgap_savestring(s);
+ p->u.module.v_id = 1;
+ } else {
+ dgap_err("id only valid for concentrators or modules");
+ return(-1);
+ }
+ DPR_INIT(("Adding ID (%s) to config...\n", s));
+ break;
+
+ case STARTO: /* start offset of ID */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.start = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for start of tty count");
+ return(-1);
+ }
+ p->u.board.v_start = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.conc.start = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for start of tty count");
+ return(-1);
+ }
+ p->u.conc.v_start = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.module.start = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for start of tty count");
+ return(-1);
+ }
+ p->u.module.v_start = 1;
+ } else {
+ dgap_err("start only valid for concentrators or modules");
+ return(-1);
+ }
+ DPR_INIT(("Adding START (%s) to config...\n", s));
+ break;
+
+ case TTYN: /* tty name prefix */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(TNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ if ( (s = dgap_getword(in)) == NULL ) {
+ dgap_err("unexpeced end of file");
+ return(-1);
+ }
+ if ( (p->u.ttyname = dgap_savestring(s)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ DPR_INIT(("Adding TTY (%s) to config...\n", s));
+ break;
+
+ case CU: /* cu name prefix */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(CUNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ if ( (s = dgap_getword(in)) == NULL ) {
+ dgap_err("unexpeced end of file");
+ return(-1);
+ }
+ if ( (p->u.cuname = dgap_savestring(s)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ DPR_INIT(("Adding CU (%s) to config...\n", s));
+ break;
+
+ case LINE: /* line information */
+ if (dgap_checknode(p))
+ return(-1);
+ if (brd == NULL) {
+ dgap_err("must specify board before line info");
+ return(-1);
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ dgap_err("line not vaild for PC/em");
+ return(-1);
+ }
+ if ( (p->next = dgap_newnode(LNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ conc = NULL;
+ line = p;
+ linecnt++;
+ DPR_INIT(("Adding LINE to config...\n"));
+ break;
+
+ case CONC: /* concentrator information */
+ if (dgap_checknode(p))
+ return(-1);
+ if (line == NULL) {
+ dgap_err("must specify line info before concentrator");
+ return(-1);
+ }
+ if ( (p->next = dgap_newnode(CNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ conc = p;
+ if (linecnt)
+ brd->u.board.conc2++;
+ else
+ brd->u.board.conc1++;
+
+ DPR_INIT(("Adding CONC to config...\n"));
+ break;
+
+ case CX: /* c/x type concentrator */
+ if (p->type != CNODE) {
+ dgap_err("cx only valid for concentrators");
+ return(-1);
+ }
+ p->u.conc.type = CX;
+ p->u.conc.v_type = 1;
+ DPR_INIT(("Adding CX to config...\n"));
+ break;
+
+ case EPC: /* epc type concentrator */
+ if (p->type != CNODE) {
+ dgap_err("cx only valid for concentrators");
+ return(-1);
+ }
+ p->u.conc.type = EPC;
+ p->u.conc.v_type = 1;
+ DPR_INIT(("Adding EPC to config...\n"));
+ break;
+
+ case MOD: /* EBI module */
+ if (dgap_checknode(p))
+ return(-1);
+ if (brd == NULL) {
+ dgap_err("must specify board info before EBI modules");
+ return(-1);
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ linecnt = 0;
+ break;
+ default:
+ if (conc == NULL) {
+ dgap_err("must specify concentrator info before EBI module");
+ return(-1);
+ }
+ }
+ if ( (p->next = dgap_newnode(MNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ if (linecnt)
+ brd->u.board.module2++;
+ else
+ brd->u.board.module1++;
+
+ DPR_INIT(("Adding MOD to config...\n"));
+ break;
+
+ case PORTS: /* ports type EBI module */
+ if (p->type != MNODE) {
+ dgap_err("ports only valid for EBI modules");
+ return(-1);
+ }
+ p->u.module.type = PORTS;
+ p->u.module.v_type = 1;
+ DPR_INIT(("Adding PORTS to config...\n"));
+ break;
+
+ case MODEM: /* ports type EBI module */
+ if (p->type != MNODE) {
+ dgap_err("modem only valid for modem modules");
+ return(-1);
+ }
+ p->u.module.type = MODEM;
+ p->u.module.v_type = 1;
+ DPR_INIT(("Adding MODEM to config...\n"));
+ break;
+
+ case CABLE:
+ if (p->type == LNODE) {
+ if ((s = dgap_getword(in)) == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.line.cable = dgap_savestring(s);
+ p->u.line.v_cable = 1;
+ }
+ DPR_INIT(("Adding CABLE (%s) to config...\n", s));
+ break;
+
+ case SPEED: /* sync line speed indication */
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.line.speed = (char)simple_strtol(s, &s2, 0);
+ if ((short)strlen(s) > (short)(s2 - s)) {
+ dgap_err("bad number for line speed");
+ return(-1);
+ }
+ p->u.line.v_speed = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.conc.speed = (char)simple_strtol(s, &s2, 0);
+ if ((short)strlen(s) > (short)(s2 - s)) {
+ dgap_err("bad number for line speed");
+ return(-1);
+ }
+ p->u.conc.v_speed = 1;
+ } else {
+ dgap_err("speed valid only for lines or concentrators.");
+ return(-1);
+ }
+ DPR_INIT(("Adding SPEED (%s) to config...\n", s));
+ break;
+
+ case CONNECT:
+ if (p->type == CNODE) {
+ if ((s = dgap_getword(in)) == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.conc.connect = dgap_savestring(s);
+ p->u.conc.v_connect = 1;
+ }
+ DPR_INIT(("Adding CONNECT (%s) to config...\n", s));
+ break;
+ case PRINT: /* transparent print name prefix */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(PNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ if ( (s = dgap_getword(in)) == NULL ) {
+ dgap_err("unexpeced end of file");
+ return(-1);
+ }
+ if ( (p->u.printname = dgap_savestring(s)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ DPR_INIT(("Adding PRINT (%s) to config...\n", s));
+ break;
+
+ case CMAJOR: /* major number */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(JNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.majornumber = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for major number");
+ return(-1);
+ }
+ DPR_INIT(("Adding CMAJOR (%s) to config...\n", s));
+ break;
+
+ case ALTPIN: /* altpin setting */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(ANODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.altpin = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for altpin");
+ return(-1);
+ }
+ DPR_INIT(("Adding ALTPIN (%s) to config...\n", s));
+ break;
+
+ case USEINTR: /* enable interrupt setting */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(INTRNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.useintr = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for useintr");
+ return(-1);
+ }
+ DPR_INIT(("Adding USEINTR (%s) to config...\n", s));
+ break;
+
+ case TTSIZ: /* size of tty structure */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(TSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.ttysize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for ttysize");
+ return(-1);
+ }
+ DPR_INIT(("Adding TTSIZ (%s) to config...\n", s));
+ break;
+
+ case CHSIZ: /* channel structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(CSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.chsize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for chsize");
+ return(-1);
+ }
+ DPR_INIT(("Adding CHSIZE (%s) to config...\n", s));
+ break;
+
+ case BSSIZ: /* board structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(BSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.bssize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for bssize");
+ return(-1);
+ }
+ DPR_INIT(("Adding BSSIZ (%s) to config...\n", s));
+ break;
+
+ case UNTSIZ: /* sched structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(USNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.unsize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for schedsize");
+ return(-1);
+ }
+ DPR_INIT(("Adding UNTSIZ (%s) to config...\n", s));
+ break;
+
+ case F2SIZ: /* f2200 structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(FSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.f2size = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for f2200size");
+ return(-1);
+ }
+ DPR_INIT(("Adding F2SIZ (%s) to config...\n", s));
+ break;
+
+ case VPSIZ: /* vpix structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(VSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.vpixsize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for vpixsize");
+ return(-1);
+ }
+ DPR_INIT(("Adding VPSIZ (%s) to config...\n", s));
+ break;
+ }
+ }
+}
+
+
+/*
+ * dgap_sindex: much like index(), but it looks for a match of any character in
+ * the group, and returns that position. If the first character is a ^, then
+ * this will match the first occurence not in that group.
+ */
+static char *dgap_sindex (char *string, char *group)
+{
+ char *ptr;
+
+ if (!string || !group)
+ return (char *) NULL;
+
+ if (*group == '^') {
+ group++;
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ break;
+ }
+ if (*ptr == '\0')
+ return string;
+ }
+ }
+ else {
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ return string;
+ }
+ }
+ }
+
+ return (char *) NULL;
+}
+
+
+/*
+ * Get a token from the input file; return 0 if end of file is reached
+ */
+static int dgap_gettok(char **in, struct cnode *p)
+{
+ char *w;
+ struct toklist *t;
+
+ if (strstr(dgap_cword, "boar")) {
+ w = dgap_getword(in);
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if ( !strcmp(w, t->string)) {
+ return(t->token);
+ }
+ }
+ dgap_err("board !!type not specified");
+ return(1);
+ }
+ else {
+ while ( (w = dgap_getword(in)) != NULL ) {
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if ( !strcmp(w, t->string) )
+ return(t->token);
+ }
+ }
+ return(0);
+ }
+}
+
+
+/*
+ * get a word from the input stream, also keep track of current line number.
+ * words are separated by whitespace.
+ */
+static char *dgap_getword(char **in)
+{
+ char *ret_ptr = *in;
+
+ char *ptr = dgap_sindex(*in, " \t\n");
+
+ /* If no word found, return null */
+ if (!ptr)
+ return NULL;
+
+ /* Mark new location for our buffer */
+ *ptr = '\0';
+ *in = ptr + 1;
+
+ /* Eat any extra spaces/tabs/newlines that might be present */
+ while (*in && **in && ((**in == ' ') || (**in == '\t') || (**in == '\n'))) {
+ **in = '\0';
+ *in = *in + 1;
+ }
+
+ return ret_ptr;
+}
+
+
+/*
+ * print an error message, giving the line number in the file where
+ * the error occurred.
+ */
+static void dgap_err(char *s)
+{
+ printk("DGAP: parse: %s\n", s);
+}
+
+
+/*
+ * allocate a new configuration node of type t
+ */
+static struct cnode *dgap_newnode(int t)
+{
+ struct cnode *n;
+ if ( (n = (struct cnode *) kmalloc(sizeof(struct cnode ), GFP_ATOMIC) ) != NULL) {
+ memset( (char *)n, 0, sizeof(struct cnode ) );
+ n->type = t;
+ }
+ return(n);
+}
+
+
+/*
+ * dgap_checknode: see if all the necessary info has been supplied for a node
+ * before creating the next node.
+ */
+static int dgap_checknode(struct cnode *p)
+{
+ switch (p->type) {
+ case BNODE:
+ if (p->u.board.v_type == 0) {
+ dgap_err("board type !not specified");
+ return(1);
+ }
+
+ return(0);
+
+ case LNODE:
+ if (p->u.line.v_speed == 0) {
+ dgap_err("line speed not specified");
+ return(1);
+ }
+ return(0);
+
+ case CNODE:
+ if (p->u.conc.v_type == 0) {
+ dgap_err("concentrator type not specified");
+ return(1);
+ }
+ if (p->u.conc.v_speed == 0) {
+ dgap_err("concentrator line speed not specified");
+ return(1);
+ }
+ if (p->u.conc.v_nport == 0) {
+ dgap_err("number of ports on concentrator not specified");
+ return(1);
+ }
+ if (p->u.conc.v_id == 0) {
+ dgap_err("concentrator id letter not specified");
+ return(1);
+ }
+ return(0);
+
+ case MNODE:
+ if (p->u.module.v_type == 0) {
+ dgap_err("EBI module type not specified");
+ return(1);
+ }
+ if (p->u.module.v_nport == 0) {
+ dgap_err("number of ports on EBI module not specified");
+ return(1);
+ }
+ if (p->u.module.v_id == 0) {
+ dgap_err("EBI module id letter not specified");
+ return(1);
+ }
+ return(0);
+ }
+ return(0);
+}
+
+/*
+ * save a string somewhere
+ */
+static char *dgap_savestring(char *s)
+{
+ char *p;
+ if ( (p = kmalloc(strlen(s) + 1, GFP_ATOMIC) ) != NULL) {
+ strcpy(p, s);
+ }
+ return(p);
+}
+
+
+/*
+ * Given a board pointer, returns whether we should use interrupts or not.
+ */
+uint dgap_config_get_useintr(struct board_t *bd)
+{
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return(0);
+
+ for (p = bd->bd_config; p; p = p->next) {
+ switch (p->type) {
+ case INTRNODE:
+ /*
+ * check for pcxr types.
+ */
+ return p->u.useintr;
+ default:
+ break;
+ }
+ }
+
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
+
+
+/*
+ * Given a board pointer, returns whether we turn on altpin or not.
+ */
+uint dgap_config_get_altpin(struct board_t *bd)
+{
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return(0);
+
+ for (p = bd->bd_config; p; p = p->next) {
+ switch (p->type) {
+ case ANODE:
+ /*
+ * check for pcxr types.
+ */
+ return p->u.altpin;
+ default:
+ break;
+ }
+ }
+
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
+
+
+
+/*
+ * Given a specific type of board, if found, detached link and
+ * returns the first occurance in the list.
+ */
+struct cnode *dgap_find_config(int type, int bus, int slot)
+{
+ struct cnode *p, *prev = NULL, *prev2 = NULL, *found = NULL;
+
+ p = &dgap_head;
+
+ while (p->next != NULL) {
+ prev = p;
+ p = p->next;
+
+ if (p->type == BNODE) {
+
+ if (p->u.board.type == type) {
+
+ if (p->u.board.v_pcibus && p->u.board.pcibus != bus) {
+ DPR(("Found matching board, but wrong bus position. System says bus %d, we want bus %ld\n",
+ bus, p->u.board.pcibus));
+ continue;
+ }
+ if (p->u.board.v_pcislot && p->u.board.pcislot != slot) {
+ DPR_INIT(("Found matching board, but wrong slot position. System says slot %d, we want slot %ld\n",
+ slot, p->u.board.pcislot));
+ continue;
+ }
+
+ DPR_INIT(("Matched type in config file\n"));
+
+ found = p;
+ /*
+ * Keep walking thru the list till we find the next board.
+ */
+ while (p->next != NULL) {
+ prev2 = p;
+ p = p->next;
+ if (p->type == BNODE) {
+
+ /*
+ * Mark the end of our 1 board chain of configs.
+ */
+ prev2->next = NULL;
+
+ /*
+ * Link the "next" board to the previous board,
+ * effectively "unlinking" our board from the main config.
+ */
+ prev->next = p;
+
+ return found;
+ }
+ }
+ /*
+ * It must be the last board in the list.
+ */
+ prev->next = NULL;
+ return found;
+ }
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Given a board pointer, walks the config link, counting up
+ * all ports user specified should be on the board.
+ * (This does NOT mean they are all actually present right now tho)
+ */
+uint dgap_config_get_number_of_ports(struct board_t *bd)
+{
+ int count = 0;
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return(0);
+
+ for (p = bd->bd_config; p; p = p->next) {
+
+ switch (p->type) {
+ case BNODE:
+ /*
+ * check for pcxr types.
+ */
+ if (p->u.board.type > EPCFE)
+ count += p->u.board.nport;
+ break;
+ case CNODE:
+ count += p->u.conc.nport;
+ break;
+ case MNODE:
+ count += p->u.module.nport;
+ break;
+ }
+ }
+ return (count);
+}
+
+char *dgap_create_config_string(struct board_t *bd, char *string)
+{
+ char *ptr = string;
+ struct cnode *p = NULL;
+ struct cnode *q = NULL;
+ int speed;
+
+ if (!bd) {
+ *ptr = 0xff;
+ return string;
+ }
+
+ for (p = bd->bd_config; p; p = p->next) {
+
+ switch (p->type) {
+ case LNODE:
+ *ptr = '\0';
+ ptr++;
+ *ptr = p->u.line.speed;
+ ptr++;
+ break;
+ case CNODE:
+ /*
+ * Because the EPC/con concentrators can have EM modules
+ * hanging off of them, we have to walk ahead in the list
+ * and keep adding the number of ports on each EM to the config.
+ * UGH!
+ */
+ speed = p->u.conc.speed;
+ q = p->next;
+ if ((q != NULL) && (q->type == MNODE) ) {
+ *ptr = (p->u.conc.nport + 0x80);
+ ptr++;
+ p = q;
+ while ((q->next != NULL) && (q->next->type) == MNODE) {
+ *ptr = (q->u.module.nport + 0x80);
+ ptr++;
+ p = q;
+ q = q->next;
+ }
+ *ptr = q->u.module.nport;
+ ptr++;
+ } else {
+ *ptr = p->u.conc.nport;
+ ptr++;
+ }
+
+ *ptr = speed;
+ ptr++;
+ break;
+ }
+ }
+
+ *ptr = 0xff;
+ return string;
+}
+
+
+
+char *dgap_get_config_letters(struct board_t *bd, char *string)
+{
+ int found = FALSE;
+ char *ptr = string;
+ struct cnode *cptr = NULL;
+ int len = 0;
+ int left = MAXTTYNAMELEN;
+
+ if (!bd) {
+ return "<NULL>";
+ }
+
+ for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
+
+ if ((cptr->type == BNODE) &&
+ ((cptr->u.board.type == APORT2_920P) || (cptr->u.board.type == APORT4_920P) ||
+ (cptr->u.board.type == APORT8_920P) || (cptr->u.board.type == PAPORT4) ||
+ (cptr->u.board.type == PAPORT8))) {
+
+ found = TRUE;
+ }
+
+ if (cptr->type == TNODE && found == TRUE) {
+ char *ptr1;
+ if (strstr(cptr->u.ttyname, "tty")) {
+ ptr1 = cptr->u.ttyname;
+ ptr1 += 3;
+ }
+ else {
+ ptr1 = cptr->u.ttyname;
+ }
+ if (ptr1) {
+ len = snprintf(ptr, left, "%s", ptr1);
+ left -= len;
+ ptr += len;
+ if (left <= 0)
+ break;
+ }
+ }
+
+ if (cptr->type == CNODE) {
+ if (cptr->u.conc.id) {
+ len = snprintf(ptr, left, "%s", cptr->u.conc.id);
+ left -= len;
+ ptr += len;
+ if (left <= 0)
+ break;
+ }
+ }
+
+ if (cptr->type == MNODE) {
+ if (cptr->u.module.id) {
+ len = snprintf(ptr, left, "%s", cptr->u.module.id);
+ left -= len;
+ ptr += len;
+ if (left <= 0)
+ break;
+ }
+ }
+ }
+
+ return string;
+}
diff --git a/drivers/staging/dgap/dgap_parse.h b/drivers/staging/dgap/dgap_parse.h
new file mode 100644
index 0000000..8128c47
--- /dev/null
+++ b/drivers/staging/dgap/dgap_parse.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef _DGAP_PARSE_H
+#define _DGAP_PARSE_H
+
+#include "dgap_driver.h"
+
+extern int dgap_parsefile(char **in, int Remove);
+extern struct cnode *dgap_find_config(int type, int bus, int slot);
+extern uint dgap_config_get_number_of_ports(struct board_t *bd);
+extern char *dgap_create_config_string(struct board_t *bd, char *string);
+extern char *dgap_get_config_letters(struct board_t *bd, char *string);
+extern uint dgap_config_get_useintr(struct board_t *bd);
+extern uint dgap_config_get_altpin(struct board_t *bd);
+
+#endif
diff --git a/drivers/staging/dgap/dgap_pci.h b/drivers/staging/dgap/dgap_pci.h
new file mode 100644
index 0000000..05ed374
--- /dev/null
+++ b/drivers/staging/dgap/dgap_pci.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+/* $Id: dgap_pci.h,v 1.1 2009/10/23 14:01:57 markh Exp $ */
+
+#ifndef __DGAP_PCI_H
+#define __DGAP_PCI_H
+
+#define PCIMAX 32 /* maximum number of PCI boards */
+
+#define DIGI_VID 0x114F
+
+#define PCI_DEVICE_EPC_DID 0x0002
+#define PCI_DEVICE_XEM_DID 0x0004
+#define PCI_DEVICE_XR_DID 0x0005
+#define PCI_DEVICE_CX_DID 0x0006
+#define PCI_DEVICE_XRJ_DID 0x0009 /* PLX-based Xr adapter */
+#define PCI_DEVICE_XR_IBM_DID 0x0011 /* IBM 8-port Async Adapter */
+#define PCI_DEVICE_XR_BULL_DID 0x0013 /* BULL 8-port Async Adapter */
+#define PCI_DEVICE_XR_SAIP_DID 0x001c /* SAIP card - Xr adapter */
+#define PCI_DEVICE_XR_422_DID 0x0012 /* Xr-422 */
+#define PCI_DEVICE_920_2_DID 0x0034 /* XR-Plus 920 K, 2 port */
+#define PCI_DEVICE_920_4_DID 0x0026 /* XR-Plus 920 K, 4 port */
+#define PCI_DEVICE_920_8_DID 0x0027 /* XR-Plus 920 K, 8 port */
+#define PCI_DEVICE_EPCJ_DID 0x000a /* PLX 9060 chip for PCI */
+#define PCI_DEVICE_CX_IBM_DID 0x001b /* IBM 128-port Async Adapter */
+#define PCI_DEVICE_920_8_HP_DID 0x0058 /* HP XR-Plus 920 K, 8 port */
+#define PCI_DEVICE_XEM_HP_DID 0x0059 /* HP Xem PCI */
+
+#define PCI_DEVICE_XEM_NAME "AccelePort XEM"
+#define PCI_DEVICE_CX_NAME "AccelePort CX"
+#define PCI_DEVICE_XR_NAME "AccelePort Xr"
+#define PCI_DEVICE_XRJ_NAME "AccelePort Xr (PLX)"
+#define PCI_DEVICE_XR_SAIP_NAME "AccelePort Xr (SAIP)"
+#define PCI_DEVICE_920_2_NAME "AccelePort Xr920 2 port"
+#define PCI_DEVICE_920_4_NAME "AccelePort Xr920 4 port"
+#define PCI_DEVICE_920_8_NAME "AccelePort Xr920 8 port"
+#define PCI_DEVICE_XR_422_NAME "AccelePort Xr 422"
+#define PCI_DEVICE_EPCJ_NAME "AccelePort EPC (PLX)"
+#define PCI_DEVICE_XR_BULL_NAME "AccelePort Xr (BULL)"
+#define PCI_DEVICE_XR_IBM_NAME "AccelePort Xr (IBM)"
+#define PCI_DEVICE_CX_IBM_NAME "AccelePort CX (IBM)"
+#define PCI_DEVICE_920_8_HP_NAME "AccelePort Xr920 8 port (HP)"
+#define PCI_DEVICE_XEM_HP_NAME "AccelePort XEM (HP)"
+
+
+/*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+
+/* Potential location of PCI Bios from E0000 to FFFFF*/
+#define PCI_BIOS_SIZE 0x00020000
+
+/* Size of Memory and I/O for PCI (4MB) */
+#define PCI_RAM_SIZE 0x00400000
+
+/* Size of Memory (2MB) */
+#define PCI_MEM_SIZE 0x00200000
+
+/* Max PCI Window Size (2MB) */
+#define PCI_WIN_SIZE 0x00200000
+
+#define PCI_WIN_SHIFT 21 /* 21 bits max */
+
+/* Offset of I/0 in Memory (2MB) */
+#define PCI_IO_OFFSET 0x00200000
+
+/* Size of IO (2MB) */
+#define PCI_IO_SIZE 0x00200000
+
+#endif
diff --git a/drivers/staging/dgap/dgap_sysfs.c b/drivers/staging/dgap/dgap_sysfs.c
new file mode 100644
index 0000000..94da06f
--- /dev/null
+++ b/drivers/staging/dgap/dgap_sysfs.c
@@ -0,0 +1,793 @@
+/*
+ * Copyright 2004 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ *
+ *
+ * $Id: dgap_sysfs.c,v 1.1 2009/10/23 14:01:57 markh Exp $
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/serial_reg.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+
+#include "dgap_driver.h"
+#include "dgap_conf.h"
+#include "dgap_parse.h"
+
+
+static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
+}
+static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
+
+
+static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgap_NumBoards);
+}
+static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
+
+
+static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
+}
+static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
+
+
+static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
+}
+static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
+
+
+static ssize_t dgap_driver_state_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", dgap_driver_state_text[dgap_driver_state]);
+}
+static DRIVER_ATTR(state, S_IRUSR, dgap_driver_state_show, NULL);
+
+
+static ssize_t dgap_driver_debug_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", dgap_debug);
+}
+
+static ssize_t dgap_driver_debug_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "0x%x\n", &dgap_debug);
+ return count;
+}
+static DRIVER_ATTR(debug, (S_IRUSR | S_IWUSR), dgap_driver_debug_show, dgap_driver_debug_store);
+
+
+static ssize_t dgap_driver_rawreadok_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", dgap_rawreadok);
+}
+
+static ssize_t dgap_driver_rawreadok_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "0x%x\n", &dgap_rawreadok);
+ return count;
+}
+static DRIVER_ATTR(rawreadok, (S_IRUSR | S_IWUSR), dgap_driver_rawreadok_show, dgap_driver_rawreadok_store);
+
+
+static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
+}
+
+static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "%d\n", &dgap_poll_tick);
+ return count;
+}
+static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show, dgap_driver_pollrate_store);
+
+
+void dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ int rc = 0;
+ struct device_driver *driverfs = &dgap_driver->driver;
+
+ rc |= driver_create_file(driverfs, &driver_attr_version);
+ rc |= driver_create_file(driverfs, &driver_attr_boards);
+ rc |= driver_create_file(driverfs, &driver_attr_maxboards);
+ rc |= driver_create_file(driverfs, &driver_attr_debug);
+ rc |= driver_create_file(driverfs, &driver_attr_rawreadok);
+ rc |= driver_create_file(driverfs, &driver_attr_pollrate);
+ rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
+ rc |= driver_create_file(driverfs, &driver_attr_state);
+ if (rc) {
+ printk(KERN_ERR "DGAP: sysfs driver_create_file failed!\n");
+ }
+}
+
+
+void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ struct device_driver *driverfs = &dgap_driver->driver;
+ driver_remove_file(driverfs, &driver_attr_version);
+ driver_remove_file(driverfs, &driver_attr_boards);
+ driver_remove_file(driverfs, &driver_attr_maxboards);
+ driver_remove_file(driverfs, &driver_attr_debug);
+ driver_remove_file(driverfs, &driver_attr_rawreadok);
+ driver_remove_file(driverfs, &driver_attr_pollrate);
+ driver_remove_file(driverfs, &driver_attr_pollcounter);
+ driver_remove_file(driverfs, &driver_attr_state);
+}
+
+
+#define DGAP_VERIFY_BOARD(p, bd) \
+ if (!p) \
+ return (0); \
+ \
+ bd = dev_get_drvdata(p); \
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC) \
+ return (0); \
+ if (bd->state != BOARD_READY) \
+ return (0); \
+
+
+static ssize_t dgap_ports_state_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s\n", bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_open_count ? "Open" : "Closed");
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
+
+
+static ssize_t dgap_ports_baud_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %d\n", bd->channels[i]->ch_portnum, bd->channels[i]->ch_baud_info);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
+
+
+static ssize_t dgap_ports_msignals_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ if (bd->channels[i]->ch_open_count) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s %s %s %s %s %s\n", bd->channels[i]->ch_portnum,
+ (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ } else {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d\n", bd->channels[i]->ch_portnum);
+ }
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
+
+
+static ssize_t dgap_ports_iflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_iflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
+
+
+static ssize_t dgap_ports_cflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_cflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
+
+
+static ssize_t dgap_ports_oflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_oflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
+
+
+static ssize_t dgap_ports_lflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_lflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
+
+
+static ssize_t dgap_ports_digi_flag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_digi.digi_flags);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
+
+
+static ssize_t dgap_ports_rxcount_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_rxcount);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
+
+
+static ssize_t dgap_ports_txcount_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_txcount);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
+
+
+/* this function creates the sys files that will export each signal status
+ * to sysfs each value will be put in a separate filename
+ */
+void dgap_create_ports_sysfiles(struct board_t *bd)
+{
+ int rc = 0;
+
+ dev_set_drvdata(&bd->pdev->dev, bd);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+ if (rc) {
+ printk(KERN_ERR "DGAP: sysfs device_create_file failed!\n");
+ }
+}
+
+
+/* removes all the sys files created for that port */
+void dgap_remove_ports_sysfiles(struct board_t *bd)
+{
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+}
+
+
+static ssize_t dgap_tty_state_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ? "Open" : "Closed");
+}
+static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
+
+
+static ssize_t dgap_tty_baud_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
+}
+static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
+
+
+static ssize_t dgap_tty_msignals_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ if (ch->ch_open_count) {
+ return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
+ (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ }
+ return 0;
+}
+static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
+
+
+static ssize_t dgap_tty_iflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
+}
+static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
+
+
+static ssize_t dgap_tty_cflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
+}
+static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
+
+
+static ssize_t dgap_tty_oflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
+}
+static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
+
+
+static ssize_t dgap_tty_lflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
+}
+static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
+
+
+static ssize_t dgap_tty_digi_flag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
+}
+static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
+
+
+static ssize_t dgap_tty_rxcount_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
+}
+static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
+
+
+static ssize_t dgap_tty_txcount_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
+}
+static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
+
+
+static ssize_t dgap_tty_name_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int cn;
+ int bn;
+ struct cnode *cptr = NULL;
+ int found = FALSE;
+ int ncount = 0;
+ int starto = 0;
+ int i = 0;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ bn = bd->boardnum;
+ cn = ch->ch_portnum;
+
+ for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
+
+ if ((cptr->type == BNODE) &&
+ ((cptr->u.board.type == APORT2_920P) || (cptr->u.board.type == APORT4_920P) ||
+ (cptr->u.board.type == APORT8_920P) || (cptr->u.board.type == PAPORT4) ||
+ (cptr->u.board.type == PAPORT8))) {
+
+ found = TRUE;
+ if (cptr->u.board.v_start)
+ starto = cptr->u.board.start;
+ else
+ starto = 1;
+ }
+
+ if (cptr->type == TNODE && found == TRUE) {
+ char *ptr1;
+ if (strstr(cptr->u.ttyname, "tty")) {
+ ptr1 = cptr->u.ttyname;
+ ptr1 += 3;
+ }
+ else {
+ ptr1 = cptr->u.ttyname;
+ }
+
+ for (i = 0; i < dgap_config_get_number_of_ports(bd); i++) {
+ if (cn == i) {
+ return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty",
+ ptr1, i + starto);
+ }
+ }
+ }
+
+ if (cptr->type == CNODE) {
+
+ for (i = 0; i < cptr->u.conc.nport; i++) {
+ if (cn == (i + ncount)) {
+
+ return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty",
+ cptr->u.conc.id,
+ i + (cptr->u.conc.v_start ? cptr->u.conc.start : 1));
+ }
+ }
+
+ ncount += cptr->u.conc.nport;
+ }
+
+ if (cptr->type == MNODE) {
+
+ for (i = 0; i < cptr->u.module.nport; i++) {
+ if (cn == (i + ncount)) {
+ return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty",
+ cptr->u.module.id,
+ i + (cptr->u.module.v_start ? cptr->u.module.start : 1));
+ }
+ }
+
+ ncount += cptr->u.module.nport;
+
+ }
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
+
+}
+static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
+
+
+static struct attribute *dgap_sysfs_tty_entries[] = {
+ &dev_attr_state.attr,
+ &dev_attr_baud.attr,
+ &dev_attr_msignals.attr,
+ &dev_attr_iflag.attr,
+ &dev_attr_cflag.attr,
+ &dev_attr_oflag.attr,
+ &dev_attr_lflag.attr,
+ &dev_attr_digi_flag.attr,
+ &dev_attr_rxcount.attr,
+ &dev_attr_txcount.attr,
+ &dev_attr_custom_name.attr,
+ NULL
+};
+
+
+static struct attribute_group dgap_tty_attribute_group = {
+ .name = NULL,
+ .attrs = dgap_sysfs_tty_entries,
+};
+
+
+
+
+void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
+{
+ int ret;
+
+ ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
+ if (ret) {
+ printk(KERN_ERR "dgap: failed to create sysfs tty device attributes.\n");
+ sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+ return;
+ }
+
+ dev_set_drvdata(c, un);
+
+}
+
+
+void dgap_remove_tty_sysfs(struct device *c)
+{
+ sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+}
diff --git a/drivers/staging/dgap/dgap_sysfs.h b/drivers/staging/dgap/dgap_sysfs.h
new file mode 100644
index 0000000..dde690e
--- /dev/null
+++ b/drivers/staging/dgap/dgap_sysfs.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGAP_SYSFS_H
+#define __DGAP_SYSFS_H
+
+#include "dgap_driver.h"
+
+#include <linux/device.h>
+
+struct board_t;
+struct channel_t;
+struct un_t;
+struct pci_driver;
+struct class_device;
+
+extern void dgap_create_ports_sysfiles(struct board_t *bd);
+extern void dgap_remove_ports_sysfiles(struct board_t *bd);
+
+extern void dgap_create_driver_sysfiles(struct pci_driver *);
+extern void dgap_remove_driver_sysfiles(struct pci_driver *);
+
+extern int dgap_tty_class_init(void);
+extern int dgap_tty_class_destroy(void);
+
+extern void dgap_create_tty_sysfs(struct un_t *un, struct device *c);
+extern void dgap_remove_tty_sysfs(struct device *c);
+
+
+#endif
diff --git a/drivers/staging/dgap/dgap_trace.c b/drivers/staging/dgap/dgap_trace.c
new file mode 100644
index 0000000..0f9a956
--- /dev/null
+++ b/drivers/staging/dgap/dgap_trace.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+/* $Id: dgap_trace.c,v 1.1 2009/10/23 14:01:57 markh Exp $ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/vmalloc.h>
+
+#include "dgap_driver.h"
+
+#define TRC_TO_CONSOLE 1
+
+/* file level globals */
+static char *dgap_trcbuf; /* the ringbuffer */
+
+#if defined(TRC_TO_KMEM)
+static int dgap_trcbufi = 0; /* index of the tilde at the end of */
+#endif
+
+extern int dgap_trcbuf_size; /* size of the ringbuffer */
+
+#if defined(TRC_TO_KMEM)
+static DEFINE_SPINLOCK(dgap_tracef_lock);
+#endif
+
+#if 0
+
+#if !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE)
+void dgap_tracef(const char *fmt, ...)
+{
+ return;
+}
+
+#else /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
+
+void dgap_tracef(const char *fmt, ...)
+{
+ va_list ap;
+ char buf[TRC_MAXMSG+1];
+ size_t lenbuf;
+ int i;
+ static int failed = FALSE;
+# if defined(TRC_TO_KMEM)
+ unsigned long flags;
+#endif
+
+ if(failed)
+ return;
+# if defined(TRC_TO_KMEM)
+ DGAP_LOCK(dgap_tracef_lock, flags);
+#endif
+
+ /* Format buf using fmt and arguments contained in ap. */
+ va_start(ap, fmt);
+ i = vsprintf(buf, fmt, ap);
+ va_end(ap);
+ lenbuf = strlen(buf);
+
+# if defined(TRC_TO_KMEM)
+ {
+ static int initd=0;
+
+ /*
+ * Now, in addition to (or instead of) printing this stuff out
+ * (which is a buffered operation), also tuck it away into a
+ * corner of memory which can be examined post-crash in kdb.
+ */
+ if (!initd) {
+ dgap_trcbuf = (char *) vmalloc(dgap_trcbuf_size);
+ if(!dgap_trcbuf) {
+ failed = TRUE;
+ printk("dgap: tracing init failed!\n");
+ return;
+ }
+
+ memset(dgap_trcbuf, '\0', dgap_trcbuf_size);
+ dgap_trcbufi = 0;
+ initd++;
+
+ printk("dgap: tracing enabled - " TRC_DTRC
+ " 0x%lx 0x%x\n",
+ (unsigned long)dgap_trcbuf,
+ dgap_trcbuf_size);
+ }
+
+# if defined(TRC_ON_OVERFLOW_WRAP_AROUND)
+ /*
+ * This is the less CPU-intensive way to do things. We simply
+ * wrap around before we fall off the end of the buffer. A
+ * tilde (~) demarcates the current end of the trace.
+ *
+ * This method should be used if you are concerned about race
+ * conditions as it is less likely to affect the timing of
+ * things.
+ */
+
+ if (dgap_trcbufi + lenbuf >= dgap_trcbuf_size) {
+ /* We are wrapping, so wipe out the last tilde. */
+ dgap_trcbuf[dgap_trcbufi] = '\0';
+ /* put the new string at the beginning of the buffer */
+ dgap_trcbufi = 0;
+ }
+
+ strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
+ dgap_trcbufi += lenbuf;
+ dgap_trcbuf[dgap_trcbufi] = '~';
+
+# elif defined(TRC_ON_OVERFLOW_SHIFT_BUFFER)
+ /*
+ * This is the more CPU-intensive way to do things. If we
+ * venture into the last 1/8 of the buffer, we shift the
+ * last 7/8 of the buffer forward, wiping out the first 1/8.
+ * Advantage: No wrap-around, only truncation from the
+ * beginning.
+ *
+ * This method should not be used if you are concerned about
+ * timing changes affecting the behaviour of the driver (ie,
+ * race conditions).
+ */
+ strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
+ dgap_trcbufi += lenbuf;
+ dgap_trcbuf[dgap_trcbufi] = '~';
+ dgap_trcbuf[dgap_trcbufi+1] = '\0';
+
+ /* If we're near the end of the trace buffer... */
+ if (dgap_trcbufi > (dgap_trcbuf_size/8)*7) {
+ /* Wipe out the first eighth to make some more room. */
+ strcpy(dgap_trcbuf, &dgap_trcbuf[dgap_trcbuf_size/8]);
+ dgap_trcbufi = strlen(dgap_trcbuf)-1;
+ /* Plop overflow message at the top of the buffer. */
+ bcopy(TRC_OVERFLOW, dgap_trcbuf, strlen(TRC_OVERFLOW));
+ }
+# else
+# error "TRC_ON_OVERFLOW_WRAP_AROUND or TRC_ON_OVERFLOW_SHIFT_BUFFER?"
+# endif
+ }
+ DGAP_UNLOCK(dgap_tracef_lock, flags);
+
+# endif /* defined(TRC_TO_KMEM) */
+}
+
+#endif /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
+
+#endif
+
+/*
+ * dgap_tracer_free()
+ *
+ *
+ */
+void dgap_tracer_free(void)
+{
+ if(dgap_trcbuf)
+ vfree(dgap_trcbuf);
+}
diff --git a/drivers/staging/dgap/dgap_trace.h b/drivers/staging/dgap/dgap_trace.h
new file mode 100644
index 0000000..b21f461
--- /dev/null
+++ b/drivers/staging/dgap/dgap_trace.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *****************************************************************************
+ * Header file for dgap_trace.c
+ *
+ * $Id: dgap_trace.h,v 1.1 2009/10/23 14:01:57 markh Exp $
+ */
+
+#ifndef __DGAP_TRACE_H
+#define __DGAP_TRACE_H
+
+#include "dgap_driver.h"
+
+void dgap_tracef(const char *fmt, ...);
+void dgap_tracer_free(void);
+
+#endif
+
diff --git a/drivers/staging/dgap/dgap_tty.c b/drivers/staging/dgap/dgap_tty.c
new file mode 100644
index 0000000..b906db3
--- /dev/null
+++ b/drivers/staging/dgap/dgap_tty.c
@@ -0,0 +1,3597 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ */
+
+/************************************************************************
+ *
+ * This file implements the tty driver functionality for the
+ * FEP5 based product lines.
+ *
+ ************************************************************************
+ *
+ * $Id: dgap_tty.c,v 1.3 2011/06/23 12:11:31 markh Exp $
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/delay.h> /* For udelay */
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+#include <asm/io.h> /* For read[bwl]/write[bwl] */
+#include <linux/pci.h>
+
+#include "dgap_driver.h"
+#include "dgap_tty.h"
+#include "dgap_types.h"
+#include "dgap_fep5.h"
+#include "dgap_parse.h"
+#include "dgap_conf.h"
+#include "dgap_sysfs.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
+#define init_MUTEX(sem) sema_init(sem, 1)
+#define DECLARE_MUTEX(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+#endif
+
+/*
+ * internal variables
+ */
+static struct board_t *dgap_BoardsByMajor[256];
+static uchar *dgap_TmpWriteBuf = NULL;
+static DECLARE_MUTEX(dgap_TmpWriteSem);
+
+/*
+ * Default transparent print information.
+ */
+static struct digi_t dgap_digi_init = {
+ .digi_flags = DIGI_COOK, /* Flags */
+ .digi_maxcps = 100, /* Max CPS */
+ .digi_maxchar = 50, /* Max chars in print queue */
+ .digi_bufsize = 100, /* Printer buffer size */
+ .digi_onlen = 4, /* size of printer on string */
+ .digi_offlen = 4, /* size of printer off string */
+ .digi_onstr = "\033[5i", /* ANSI printer on string ] */
+ .digi_offstr = "\033[4i", /* ANSI printer off string ] */
+ .digi_term = "ansi" /* default terminal type */
+};
+
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially.
+ *
+ * This defines a raw port at 9600 baud, 8 data bits, no parity,
+ * 1 stop bit.
+ */
+
+static struct ktermios DgapDefaultTermios =
+{
+ .c_iflag = (DEFAULT_IFLAGS), /* iflags */
+ .c_oflag = (DEFAULT_OFLAGS), /* oflags */
+ .c_cflag = (DEFAULT_CFLAGS), /* cflags */
+ .c_lflag = (DEFAULT_LFLAGS), /* lflags */
+ .c_cc = INIT_C_CC,
+ .c_line = 0,
+};
+
+/* Our function prototypes */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file);
+static void dgap_tty_close(struct tty_struct *tty, struct file *file);
+static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch);
+static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo);
+static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info);
+static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo);
+static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info);
+static int dgap_tty_write_room(struct tty_struct* tty);
+static int dgap_tty_chars_in_buffer(struct tty_struct* tty);
+static void dgap_tty_start(struct tty_struct *tty);
+static void dgap_tty_stop(struct tty_struct *tty);
+static void dgap_tty_throttle(struct tty_struct *tty);
+static void dgap_tty_unthrottle(struct tty_struct *tty);
+static void dgap_tty_flush_chars(struct tty_struct *tty);
+static void dgap_tty_flush_buffer(struct tty_struct *tty);
+static void dgap_tty_hangup(struct tty_struct *tty);
+static int dgap_wait_for_drain(struct tty_struct *tty);
+static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value);
+static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value);
+static int dgap_tty_digisetcustombaud(struct tty_struct *tty, int __user *new_info);
+static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinfo);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgap_tty_tiocmget(struct tty_struct *tty);
+static int dgap_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
+#else
+static int dgap_tty_tiocmget(struct tty_struct *tty, struct file *file);
+static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear);
+#endif
+static int dgap_tty_send_break(struct tty_struct *tty, int msec);
+static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout);
+static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
+static void dgap_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios);
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c);
+static void dgap_tty_send_xchar(struct tty_struct *tty, char ch);
+
+static const struct tty_operations dgap_tty_ops = {
+ .open = dgap_tty_open,
+ .close = dgap_tty_close,
+ .write = dgap_tty_write,
+ .write_room = dgap_tty_write_room,
+ .flush_buffer = dgap_tty_flush_buffer,
+ .chars_in_buffer = dgap_tty_chars_in_buffer,
+ .flush_chars = dgap_tty_flush_chars,
+ .ioctl = dgap_tty_ioctl,
+ .set_termios = dgap_tty_set_termios,
+ .stop = dgap_tty_stop,
+ .start = dgap_tty_start,
+ .throttle = dgap_tty_throttle,
+ .unthrottle = dgap_tty_unthrottle,
+ .hangup = dgap_tty_hangup,
+ .put_char = dgap_tty_put_char,
+ .tiocmget = dgap_tty_tiocmget,
+ .tiocmset = dgap_tty_tiocmset,
+ .break_ctl = dgap_tty_send_break,
+ .wait_until_sent = dgap_tty_wait_until_sent,
+ .send_xchar = dgap_tty_send_xchar
+};
+
+
+
+
+
+/************************************************************************
+ *
+ * TTY Initialization/Cleanup Functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_preinit()
+ *
+ * Initialize any global tty related data before we download any boards.
+ */
+int dgap_tty_preinit(void)
+{
+ unsigned long flags;
+
+ DGAP_LOCK(dgap_global_lock, flags);
+
+ /*
+ * Allocate a buffer for doing the copy from user space to
+ * kernel space in dgap_input(). We only use one buffer and
+ * control access to it with a semaphore. If we are paging, we
+ * are already in trouble so one buffer won't hurt much anyway.
+ */
+ dgap_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_ATOMIC);
+
+ if (!dgap_TmpWriteBuf) {
+ DGAP_UNLOCK(dgap_global_lock, flags);
+ DPR_INIT(("unable to allocate tmp write buf"));
+ return (-ENOMEM);
+ }
+
+ DGAP_UNLOCK(dgap_global_lock, flags);
+ return(0);
+}
+
+
+/*
+ * dgap_tty_register()
+ *
+ * Init the tty subsystem for this board.
+ */
+int dgap_tty_register(struct board_t *brd)
+{
+ int rc = 0;
+
+ DPR_INIT(("tty_register start"));
+
+ brd->SerialDriver = alloc_tty_driver(MAXPORTS);
+
+ snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgap_%d_", brd->boardnum);
+ brd->SerialDriver->name = brd->SerialName;
+ brd->SerialDriver->name_base = 0;
+ brd->SerialDriver->major = 0;
+ brd->SerialDriver->minor_start = 0;
+ brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->SerialDriver->init_termios = DgapDefaultTermios;
+ brd->SerialDriver->driver_name = DRVSTR;
+ brd->SerialDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
+
+ /* The kernel wants space to store pointers to tty_structs */
+ brd->SerialDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->SerialDriver->ttys)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ brd->SerialDriver->refcount = brd->TtyRefCnt;
+#endif
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(brd->SerialDriver, &dgap_tty_ops);
+
+ /*
+ * If we're doing transparent print, we have to do all of the above
+ * again, seperately so we don't get the LD confused about what major
+ * we are when we get into the dgap_tty_open() routine.
+ */
+ brd->PrintDriver = alloc_tty_driver(MAXPORTS);
+
+ snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgap_%d_", brd->boardnum);
+ brd->PrintDriver->name = brd->PrintName;
+ brd->PrintDriver->name_base = 0;
+ brd->PrintDriver->major = 0;
+ brd->PrintDriver->minor_start = 0;
+ brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->PrintDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->PrintDriver->init_termios = DgapDefaultTermios;
+ brd->PrintDriver->driver_name = DRVSTR;
+ brd->PrintDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
+
+ /* The kernel wants space to store pointers to tty_structs */
+ brd->PrintDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->PrintDriver->ttys)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ brd->PrintDriver->refcount = brd->TtyRefCnt;
+#endif
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(brd->PrintDriver, &dgap_tty_ops);
+
+ if (!brd->dgap_Major_Serial_Registered) {
+ /* Register tty devices */
+ rc = tty_register_driver(brd->SerialDriver);
+ if (rc < 0) {
+ APR(("Can't register tty device (%d)\n", rc));
+ return(rc);
+ }
+ brd->dgap_Major_Serial_Registered = TRUE;
+ dgap_BoardsByMajor[brd->SerialDriver->major] = brd;
+ brd->dgap_Serial_Major = brd->SerialDriver->major;
+ }
+
+ if (!brd->dgap_Major_TransparentPrint_Registered) {
+ /* Register Transparent Print devices */
+ rc = tty_register_driver(brd->PrintDriver);
+ if (rc < 0) {
+ APR(("Can't register Transparent Print device (%d)\n", rc));
+ return(rc);
+ }
+ brd->dgap_Major_TransparentPrint_Registered = TRUE;
+ dgap_BoardsByMajor[brd->PrintDriver->major] = brd;
+ brd->dgap_TransparentPrint_Major = brd->PrintDriver->major;
+ }
+
+ DPR_INIT(("DGAP REGISTER TTY: MAJORS: %d %d\n", brd->SerialDriver->major,
+ brd->PrintDriver->major));
+
+ return (rc);
+}
+
+
+/*
+ * dgap_tty_init()
+ *
+ * Init the tty subsystem. Called once per board after board has been
+ * downloaded and init'ed.
+ */
+int dgap_tty_init(struct board_t *brd)
+{
+ int i;
+ int tlw;
+ uint true_count = 0;
+ uchar *vaddr;
+ uchar modem = 0;
+ struct channel_t *ch;
+ struct bs_t *bs;
+ struct cm_t *cm;
+
+ if (!brd)
+ return (-ENXIO);
+
+ DPR_INIT(("dgap_tty_init start\n"));
+
+ /*
+ * Initialize board structure elements.
+ */
+
+ vaddr = brd->re_map_membase;
+ true_count = readw((vaddr + NCHAN));
+
+ brd->nasync = dgap_config_get_number_of_ports(brd);
+
+ if (!brd->nasync) {
+ brd->nasync = brd->maxports;
+ }
+
+ if (brd->nasync > brd->maxports) {
+ brd->nasync = brd->maxports;
+ }
+
+ if (true_count != brd->nasync) {
+ if ((brd->type == PPCM) && (true_count == 64)) {
+ APR(("***WARNING**** %s configured for %d ports, has %d ports.\nPlease make SURE the EBI cable running from the card\nto each EM module is plugged into EBI IN!\n",
+ brd->name, brd->nasync, true_count));
+ }
+ else if ((brd->type == PPCM) && (true_count == 0)) {
+ APR(("***WARNING**** %s configured for %d ports, has %d ports.\nPlease make SURE the EBI cable running from the card\nto each EM module is plugged into EBI IN!\n",
+ brd->name, brd->nasync, true_count));
+ }
+ else {
+ APR(("***WARNING**** %s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count));
+ }
+
+ brd->nasync = true_count;
+
+ /* If no ports, don't bother going any further */
+ if (!brd->nasync) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return(-ENXIO);
+ }
+ }
+
+ /*
+ * Allocate channel memory that might not have been allocated
+ * when the driver was first loaded.
+ */
+ for (i = 0; i < brd->nasync; i++) {
+ if (!brd->channels[i]) {
+ brd->channels[i] = dgap_driver_kzmalloc(sizeof(struct channel_t), GFP_ATOMIC);
+ if (!brd->channels[i]) {
+ DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n",
+ __FILE__, __LINE__));
+ }
+ }
+ }
+
+ ch = brd->channels[0];
+ vaddr = brd->re_map_membase;
+
+ bs = (struct bs_t *) ((ulong) vaddr + CHANBUF);
+ cm = (struct cm_t *) ((ulong) vaddr + CMDBUF);
+
+ brd->bd_bs = bs;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
+
+ if (!brd->channels[i])
+ continue;
+
+ DGAP_SPINLOCK_INIT(ch->ch_lock);
+
+ /* Store all our magic numbers */
+ ch->magic = DGAP_CHANNEL_MAGIC;
+ ch->ch_tun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_tun.un_type = DGAP_SERIAL;
+ ch->ch_tun.un_ch = ch;
+ ch->ch_tun.un_dev = i;
+
+ ch->ch_pun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_pun.un_type = DGAP_PRINT;
+ ch->ch_pun.un_ch = ch;
+ ch->ch_pun.un_dev = i;
+
+ ch->ch_vaddr = vaddr;
+ ch->ch_bs = bs;
+ ch->ch_cm = cm;
+ ch->ch_bd = brd;
+ ch->ch_portnum = i;
+ ch->ch_digi = dgap_digi_init;
+
+ /*
+ * Set up digi dsr and dcd bits based on altpin flag.
+ */
+ if (dgap_config_get_altpin(brd)) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ ch->ch_digi.digi_flags |= DIGI_ALTPIN;
+ }
+ else {
+ ch->ch_cd = DM_CD;
+ ch->ch_dsr = DM_DSR;
+ }
+
+ ch->ch_taddr = vaddr + ((ch->ch_bs->tx_seg) << 4);
+ ch->ch_raddr = vaddr + ((ch->ch_bs->rx_seg) << 4);
+ ch->ch_tx_win = 0;
+ ch->ch_rx_win = 0;
+ ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
+ ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
+ ch->ch_tstart = 0;
+ ch->ch_rstart = 0;
+
+ /* .25 second delay */
+ ch->ch_close_delay = 250;
+
+ /*
+ * Set queue water marks, interrupt mask,
+ * and general tty parameters.
+ */
+ ch->ch_tlw = tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) : ch->ch_tsize / 2;
+
+ dgap_cmdw(ch, STLOW, tlw, 0);
+
+ dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
+
+ dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
+
+ ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+
+ init_waitqueue_head(&ch->ch_flags_wait);
+ init_waitqueue_head(&ch->ch_tun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_sniff_wait);
+
+ /* Turn on all modem interrupts for now */
+ modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
+ writeb(modem, &(ch->ch_bs->m_int));
+
+ /*
+ * Set edelay to 0 if interrupts are turned on,
+ * otherwise set edelay to the usual 100.
+ */
+ if (brd->intr_used)
+ writew(0, &(ch->ch_bs->edelay));
+ else
+ writew(100, &(ch->ch_bs->edelay));
+
+ writeb(1, &(ch->ch_bs->idata));
+ }
+
+
+ DPR_INIT(("dgap_tty_init finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_post_uninit()
+ *
+ * UnInitialize any global tty related data.
+ */
+void dgap_tty_post_uninit(void)
+{
+ if (dgap_TmpWriteBuf) {
+ kfree(dgap_TmpWriteBuf);
+ dgap_TmpWriteBuf = NULL;
+ }
+}
+
+
+/*
+ * dgap_tty_uninit()
+ *
+ * Uninitialize the TTY portion of this driver. Free all memory and
+ * resources.
+ */
+void dgap_tty_uninit(struct board_t *brd)
+{
+ int i = 0;
+
+ if (brd->dgap_Major_Serial_Registered) {
+ dgap_BoardsByMajor[brd->SerialDriver->major] = NULL;
+ brd->dgap_Serial_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ dgap_remove_tty_sysfs(brd->channels[i]->ch_tun.un_sysfs);
+ tty_unregister_device(brd->SerialDriver, i);
+ }
+ tty_unregister_driver(brd->SerialDriver);
+ if (brd->SerialDriver->ttys) {
+ kfree(brd->SerialDriver->ttys);
+ brd->SerialDriver->ttys = NULL;
+ }
+ put_tty_driver(brd->SerialDriver);
+ brd->dgap_Major_Serial_Registered = FALSE;
+ }
+
+ if (brd->dgap_Major_TransparentPrint_Registered) {
+ dgap_BoardsByMajor[brd->PrintDriver->major] = NULL;
+ brd->dgap_TransparentPrint_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ dgap_remove_tty_sysfs(brd->channels[i]->ch_pun.un_sysfs);
+ tty_unregister_device(brd->PrintDriver, i);
+ }
+ tty_unregister_driver(brd->PrintDriver);
+ if (brd->PrintDriver->ttys) {
+ kfree(brd->PrintDriver->ttys);
+ brd->PrintDriver->ttys = NULL;
+ }
+ put_tty_driver(brd->PrintDriver);
+ brd->dgap_Major_TransparentPrint_Registered = FALSE;
+ }
+}
+
+
+#define TMPBUFLEN (1024)
+
+/*
+ * dgap_sniff - Dump data out to the "sniff" buffer if the
+ * proc sniff file is opened...
+ */
+static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *buf, int len)
+{
+ struct timeval tv;
+ int n;
+ int r;
+ int nbuf;
+ int i;
+ int tmpbuflen;
+ char tmpbuf[TMPBUFLEN];
+ char *p = tmpbuf;
+ int too_much_data;
+
+ /* Leave if sniff not open */
+ if (!(ch->ch_sniff_flags & SNIFF_OPEN))
+ return;
+
+ do_gettimeofday(&tv);
+
+ /* Create our header for data dump */
+ p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
+ tmpbuflen = p - tmpbuf;
+
+ do {
+ too_much_data = 0;
+
+ for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
+ p += sprintf(p, "%02x ", *buf);
+ buf++;
+ tmpbuflen = p - tmpbuf;
+ }
+
+ if (tmpbuflen < (TMPBUFLEN - 4)) {
+ if (i > 0)
+ p += sprintf(p - 1, "%s\n", ">");
+ else
+ p += sprintf(p, "%s\n", ">");
+ } else {
+ too_much_data = 1;
+ len -= i;
+ }
+
+ nbuf = strlen(tmpbuf);
+ p = tmpbuf;
+
+ /*
+ * Loop while data remains.
+ */
+ while (nbuf > 0 && ch->ch_sniff_buf != 0) {
+ /*
+ * Determine the amount of available space left in the
+ * buffer. If there's none, wait until some appears.
+ */
+ n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) & SNIFF_MASK;
+
+ /*
+ * If there is no space left to write to in our sniff buffer,
+ * we have no choice but to drop the data.
+ * We *cannot* sleep here waiting for space, because this
+ * function was probably called by the interrupt/timer routines!
+ */
+ if (n == 0) {
+ return;
+ }
+
+ /*
+ * Copy as much data as will fit.
+ */
+
+ if (n > nbuf)
+ n = nbuf;
+
+ r = SNIFF_MAX - ch->ch_sniff_in;
+
+ if (r <= n) {
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, r);
+
+ n -= r;
+ ch->ch_sniff_in = 0;
+ p += r;
+ nbuf -= r;
+ }
+
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
+
+ ch->ch_sniff_in += n;
+ p += n;
+ nbuf -= n;
+
+ /*
+ * Wakeup any thread waiting for data
+ */
+ if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
+ ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
+ wake_up_interruptible(&ch->ch_sniff_wait);
+ }
+ }
+
+ /*
+ * If the user sent us too much data to push into our tmpbuf,
+ * we need to keep looping around on all the data.
+ */
+ if (too_much_data) {
+ p = tmpbuf;
+ tmpbuflen = 0;
+ }
+
+ } while (too_much_data);
+}
+
+
+/*=======================================================================
+ *
+ * dgap_input - Process received data.
+ *
+ * ch - Pointer to channel structure.
+ *
+ *=======================================================================*/
+
+void dgap_input(struct channel_t *ch)
+{
+ struct board_t *bd;
+ struct bs_t *bs;
+ struct tty_struct *tp;
+ struct tty_ldisc *ld;
+ uint rmask;
+ uint head;
+ uint tail;
+ int data_len;
+ ulong lock_flags;
+ ulong lock_flags2;
+ int flip_len;
+ int len = 0;
+ int n = 0;
+ uchar *buf;
+ uchar tmpchar;
+ int s = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ tp = ch->ch_tun.un_tty;
+
+ bs = ch->ch_bs;
+ if (!bs) {
+ return;
+ }
+
+ bd = ch->ch_bd;
+ if(!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_READ(("dgap_input start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ /*
+ * Figure the number of characters in the buffer.
+ * Exit immediately if none.
+ */
+
+ rmask = ch->ch_rsize - 1;
+
+ head = readw(&(bs->rx_head));
+ head &= rmask;
+ tail = readw(&(bs->rx_tail));
+ tail &= rmask;
+
+ data_len = (head - tail) & rmask;
+
+ if (data_len == 0) {
+ writeb(1, &(bs->idata));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ DPR_READ(("No data on port %d\n", ch->ch_portnum));
+ return;
+ }
+
+ /*
+ * If the device is not open, or CREAD is off, flush
+ * input data and return immediately.
+ */
+ if ((bd->state != BOARD_READY) || !tp || (tp->magic != TTY_MAGIC) ||
+ !(ch->ch_tun.un_flags & UN_ISOPEN) || !(tp->termios.c_cflag & CREAD) ||
+ (ch->ch_tun.un_flags & UN_CLOSING)) {
+
+ DPR_READ(("input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum));
+ DPR_READ(("input. tp: %p tp->magic: %x MAGIC:%x ch flags: %x\n",
+ tp, tp ? tp->magic : 0, TTY_MAGIC, ch->ch_tun.un_flags));
+ writew(head, &(bs->rx_tail));
+ writeb(1, &(bs->idata));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If we are throttled, simply don't read any data.
+ */
+ if (ch->ch_flags & CH_RXBLOCK) {
+ writeb(1, &(bs->idata));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ DPR_READ(("Port %d throttled, not reading any data. head: %x tail: %x\n",
+ ch->ch_portnum, head, tail));
+ return;
+ }
+
+ /*
+ * Ignore oruns.
+ */
+ tmpchar = readb(&(bs->orun));
+ if (tmpchar) {
+ ch->ch_err_overrun++;
+ writeb(0, &(bs->orun));
+ }
+
+ DPR_READ(("dgap_input start 2\n"));
+
+ /* Decide how much data we can send into the tty layer */
+ flip_len = TTY_FLIPBUF_SIZE;
+
+ /* Chop down the length, if needed */
+ len = min(data_len, flip_len);
+ len = min(len, (N_TTY_BUF_SIZE - 1));
+
+ ld = tty_ldisc_ref(tp);
+
+#ifdef TTY_DONT_FLIP
+ /*
+ * If the DONT_FLIP flag is on, don't flush our buffer, and act
+ * like the ld doesn't have any space to put the data right now.
+ */
+ if (test_bit(TTY_DONT_FLIP, &tp->flags))
+ len = 0;
+#endif
+
+ /*
+ * If we were unable to get a reference to the ld,
+ * don't flush our buffer, and act like the ld doesn't
+ * have any space to put the data right now.
+ */
+ if (!ld) {
+ len = 0;
+ } else {
+ /*
+ * If ld doesn't have a pointer to a receive_buf function,
+ * flush the data, then act like the ld doesn't have any
+ * space to put the data right now.
+ */
+ if (!ld->ops->receive_buf) {
+ writew(head, &(bs->rx_tail));
+ len = 0;
+ }
+ }
+
+ if (len <= 0) {
+ writeb(1, &(bs->idata));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ DPR_READ(("dgap_input 1 - finish\n"));
+ if (ld)
+ tty_ldisc_deref(ld);
+ return;
+ }
+
+ buf = ch->ch_bd->flipbuf;
+ n = len;
+
+ /*
+ * n now contains the most amount of data we can copy,
+ * bounded either by our buffer size or the amount
+ * of data the card actually has pending...
+ */
+ while (n) {
+
+ s = ((head >= tail) ? head : ch->ch_rsize) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ memcpy_fromio(buf, (char *) ch->ch_raddr + tail, s);
+ dgap_sniff_nowait_nolock(ch, "USER READ", buf, s);
+
+ tail += s;
+ buf += s;
+
+ n -= s;
+ /* Flip queue if needed */
+ tail &= rmask;
+ }
+
+ writew(tail, &(bs->rx_tail));
+ writeb(1, &(bs->idata));
+ ch->ch_rxcount += len;
+
+ /*
+ * If we are completely raw, we don't need to go through a lot
+ * of the tty layers that exist.
+ * In this case, we take the shortest and fastest route we
+ * can to relay the data to the user.
+ *
+ * On the other hand, if we are not raw, we need to go through
+ * the tty layer, which has its API more well defined.
+ */
+ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
+ dgap_parity_scan(ch, ch->ch_bd->flipbuf, ch->ch_bd->flipflagbuf, &len);
+
+ len = tty_buffer_request_room(tp->port, len);
+ tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
+ ch->ch_bd->flipflagbuf, len);
+ }
+ else {
+ len = tty_buffer_request_room(tp->port, len);
+ tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ /* Tell the tty layer its okay to "eat" the data now */
+ tty_flip_buffer_push(tp->port);
+
+ if (ld)
+ tty_ldisc_deref(ld);
+
+ DPR_READ(("dgap_input - finish\n"));
+}
+
+
+/************************************************************************
+ * Determines when CARRIER changes state and takes appropriate
+ * action.
+ ************************************************************************/
+void dgap_carrier(struct channel_t *ch)
+{
+ struct board_t *bd;
+
+ int virt_carrier = 0;
+ int phys_carrier = 0;
+
+ DPR_CARR(("dgap_carrier called...\n"));
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ /* Make sure altpin is always set correctly */
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ }
+ else {
+ ch->ch_dsr = DM_DSR;
+ ch->ch_cd = DM_CD;
+ }
+
+ if (ch->ch_mistat & D_CD(ch)) {
+ DPR_CARR(("mistat: %x D_CD: %x\n", ch->ch_mistat, D_CD(ch)));
+ phys_carrier = 1;
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) {
+ virt_carrier = 1;
+ }
+
+ if (ch->ch_c_cflag & CLOCAL) {
+ virt_carrier = 1;
+ }
+
+
+ DPR_CARR(("DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier));
+
+ /*
+ * Test for a VIRTUAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ DPR_CARR(("carrier: virt DCD rose\n"));
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ DPR_CARR(("carrier: physical DCD rose\n"));
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL transition to low, so long as we aren't
+ * currently ignoring physical transitions (which is what "virtual
+ * carrier" indicates).
+ *
+ * The transition of the virtual carrier to low really doesn't
+ * matter... it really only means "ignore carrier state", not
+ * "make pretend that carrier is there".
+ */
+ if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
+ (phys_carrier == 0))
+ {
+
+ /*
+ * When carrier drops:
+ *
+ * Drop carrier on all open units.
+ *
+ * Flush queues, waking up any task waiting in the
+ * line discipline.
+ *
+ * Send a hangup to the control terminal.
+ *
+ * Enable all select calls.
+ */
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+
+ if (ch->ch_tun.un_open_count > 0) {
+ DPR_CARR(("Sending tty hangup\n"));
+ tty_hangup(ch->ch_tun.un_tty);
+ }
+
+ if (ch->ch_pun.un_open_count > 0) {
+ DPR_CARR(("Sending pr hangup\n"));
+ tty_hangup(ch->ch_pun.un_tty);
+ }
+ }
+
+ /*
+ * Make sure that our cached values reflect the current reality.
+ */
+ if (virt_carrier == 1)
+ ch->ch_flags |= CH_FCAR;
+ else
+ ch->ch_flags &= ~CH_FCAR;
+
+ if (phys_carrier == 1)
+ ch->ch_flags |= CH_CD;
+ else
+ ch->ch_flags &= ~CH_CD;
+}
+
+
+/************************************************************************
+ *
+ * TTY Entry points and helper functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_open()
+ *
+ */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct board_t *brd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct bs_t *bs;
+ uint major = 0;
+ uint minor = 0;
+ int rc = 0;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head;
+
+ rc = 0;
+
+ major = MAJOR(tty_devnum(tty));
+ minor = MINOR(tty_devnum(tty));
+
+ if (major > 255) {
+ return -ENXIO;
+ }
+
+ /* Get board pointer from our array of majors we have allocated */
+ brd = dgap_BoardsByMajor[major];
+ if (!brd) {
+ return -ENXIO;
+ }
+
+ /*
+ * If board is not yet up to a state of READY, go to
+ * sleep waiting for it to happen or they cancel the open.
+ */
+ rc = wait_event_interruptible(brd->state_wait,
+ (brd->state & BOARD_READY));
+
+ if (rc) {
+ return rc;
+ }
+
+ DGAP_LOCK(brd->bd_lock, lock_flags);
+
+ /* The wait above should guarentee this cannot happen */
+ if (brd->state != BOARD_READY) {
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* If opened device is greater than our number of ports, bail. */
+ if (MINOR(tty_devnum(tty)) > brd->nasync) {
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ ch = brd->channels[minor];
+ if (!ch) {
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* Grab channel lock */
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ /* Figure out our type */
+ if (major == brd->dgap_Serial_Major) {
+ un = &brd->channels[minor]->ch_tun;
+ un->un_type = DGAP_SERIAL;
+ }
+ else if (major == brd->dgap_TransparentPrint_Major) {
+ un = &brd->channels[minor]->ch_pun;
+ un->un_type = DGAP_PRINT;
+ }
+ else {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ DPR_OPEN(("%d Unknown TYPE!\n", __LINE__));
+ return -ENXIO;
+ }
+
+ /* Store our unit into driver_data, so we always have it available. */
+ tty->driver_data = un;
+
+ DPR_OPEN(("Open called. MAJOR: %d MINOR:%d unit: %p NAME: %s\n",
+ MAJOR(tty_devnum(tty)), MINOR(tty_devnum(tty)), un, brd->name));
+
+ /*
+ * Error if channel info pointer is 0.
+ */
+ if ((bs = ch->ch_bs) == 0) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ DPR_OPEN(("%d BS is 0!\n", __LINE__));
+ return -ENXIO;
+ }
+
+ DPR_OPEN(("%d: tflag=%x pflag=%x\n", __LINE__, ch->ch_tun.un_flags, ch->ch_pun.un_flags));
+
+ /*
+ * Initialize tty's
+ */
+ if (!(un->un_flags & UN_ISOPEN)) {
+ /* Store important variables. */
+ un->un_tty = tty;
+
+ /* Maybe do something here to the TTY struct as well? */
+ }
+
+ /*
+ * Initialize if neither terminal or printer is open.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+
+ DPR_OPEN(("dgap_open: initializing channel in open...\n"));
+
+ ch->ch_mforce = 0;
+ ch->ch_mval = 0;
+
+ /*
+ * Flush input queue.
+ */
+ head = readw(&(bs->rx_head));
+ writew(head, &(bs->rx_tail));
+
+ ch->ch_flags = 0;
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ /* TODO: flush our TTY struct here? */
+ }
+
+ dgap_carrier(ch);
+ /*
+ * Run param in case we changed anything
+ */
+ dgap_param(tty);
+
+ /*
+ * follow protocol for opening port
+ */
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+
+ rc = dgap_block_til_ready(tty, file, ch);
+
+ if (!un->un_tty) {
+ return -ENODEV;
+ }
+
+ if (rc) {
+ DPR_OPEN(("dgap_tty_open returning after dgap_block_til_ready "
+ "with %d\n", rc));
+ }
+
+ /* No going back now, increment our unit and channel counters */
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_open_count++;
+ un->un_open_count++;
+ un->un_flags |= (UN_ISOPEN);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("dgap_tty_open finished\n"));
+ return (rc);
+}
+
+
+/*
+ * dgap_block_til_ready()
+ *
+ * Wait for DCD, if needed.
+ */
+static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
+{
+ int retval = 0;
+ struct un_t *un = NULL;
+ ulong lock_flags;
+ uint old_flags = 0;
+ int sleep_on_un_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGAP_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ DPR_OPEN(("dgap_block_til_ready - before block.\n"));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_wopen++;
+
+ /* Loop forever */
+ while (1) {
+
+ sleep_on_un_flags = 0;
+
+ /*
+ * If board has failed somehow during our sleep, bail with error.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ retval = -ENXIO;
+ break;
+ }
+
+ /* If tty was hung up, break out of loop and set error. */
+ if (tty_hung_up_p(file)) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ /*
+ * If either unit is in the middle of the fragile part of close,
+ * we just cannot touch the channel safely.
+ * Go back to sleep, knowing that when the channel can be
+ * touched safely, the close routine will signal the
+ * ch_wait_flags to wake us back up.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
+
+ /*
+ * Our conditions to leave cleanly and happily:
+ * 1) NONBLOCKING on the tty is set.
+ * 2) CLOCAL is set.
+ * 3) DCD (fake or real) is active.
+ */
+
+ if (file->f_flags & O_NONBLOCK) {
+ break;
+ }
+
+ if (tty->flags & (1 << TTY_IO_ERROR)) {
+ break;
+ }
+
+ if (ch->ch_flags & CH_CD) {
+ DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
+ break;
+ }
+
+ if (ch->ch_flags & CH_FCAR) {
+ DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
+ break;
+ }
+ }
+ else {
+ sleep_on_un_flags = 1;
+ }
+
+ /*
+ * If there is a signal pending, the user probably
+ * interrupted (ctrl-c) us.
+ * Leave loop with error set.
+ */
+ if (signal_pending(current)) {
+ DPR_OPEN(("%d: signal pending...\n", __LINE__));
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ DPR_OPEN(("dgap_block_til_ready - blocking.\n"));
+
+ /*
+ * Store the flags before we let go of channel lock
+ */
+ if (sleep_on_un_flags)
+ old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
+ else
+ old_flags = ch->ch_flags;
+
+ /*
+ * Let go of channel lock before calling schedule.
+ * Our poller will get any FEP events and wake us up when DCD
+ * eventually goes active.
+ */
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("Going to sleep on %s flags...\n",
+ (sleep_on_un_flags ? "un" : "ch")));
+
+ /*
+ * Wait for something in the flags to change from the current value.
+ */
+ if (sleep_on_un_flags) {
+ retval = wait_event_interruptible(un->un_flags_wait,
+ (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags)));
+ }
+ else {
+ retval = wait_event_interruptible(ch->ch_flags_wait,
+ (old_flags != ch->ch_flags));
+ }
+
+ DPR_OPEN(("After sleep... retval: %x\n", retval));
+
+ /*
+ * We got woken up for some reason.
+ * Before looping around, grab our channel lock.
+ */
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ }
+
+ ch->ch_wopen--;
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("dgap_block_til_ready - after blocking.\n"));
+
+ if (retval) {
+ DPR_OPEN(("dgap_block_til_ready - done. error. retval: %x\n", retval));
+ return(retval);
+ }
+
+ DPR_OPEN(("dgap_block_til_ready - done no error. jiffies: %lu\n", jiffies));
+
+ return(0);
+}
+
+
+/*
+ * dgap_tty_hangup()
+ *
+ * Hangup the port. Like a close, but don't wait for output to drain.
+ */
+static void dgap_tty_hangup(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_CLOSE(("dgap_hangup called. ch->ch_open_count: %d un->un_open_count: %d\n",
+ ch->ch_open_count, un->un_open_count));
+
+ /* flush the transmit queues */
+ dgap_tty_flush_buffer(tty);
+
+ DPR_CLOSE(("dgap_hangup finished. ch->ch_open_count: %d un->un_open_count: %d\n",
+ ch->ch_open_count, un->un_open_count));
+}
+
+
+
+/*
+ * dgap_tty_close()
+ *
+ */
+static void dgap_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ ts = &tty->termios;
+
+ DPR_CLOSE(("Close called\n"));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Determine if this is the last close or not - and if we agree about
+ * which type of close it is with the Line Discipline
+ */
+ if ((tty->count == 1) && (un->un_open_count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. un_open_count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
+ un->un_open_count = 1;
+ }
+
+ if (--un->un_open_count < 0) {
+ APR(("bad serial port open count of %d\n", un->un_open_count));
+ un->un_open_count = 0;
+ }
+
+ ch->ch_open_count--;
+
+ if (ch->ch_open_count && un->un_open_count) {
+ DPR_CLOSE(("dgap_tty_close: not last close ch: %d un:%d\n",
+ ch->ch_open_count, un->un_open_count));
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* OK, its the last close on the unit */
+ DPR_CLOSE(("dgap_tty_close - last close on unit procedures\n"));
+
+ un->un_flags |= UN_CLOSING;
+
+ tty->closing = 1;
+
+ /*
+ * Only officially close channel if count is 0 and
+ * DIGI_PRINTER bit is not set.
+ */
+ if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* wait for output to drain */
+ /* This will also return if we take an interrupt */
+
+ DPR_CLOSE(("Calling wait_for_drain\n"));
+ rc = dgap_wait_for_drain(tty);
+ DPR_CLOSE(("After calling wait_for_drain\n"));
+
+ if (rc) {
+ DPR_BASIC(("dgap_tty_close - bad return: %d ", rc));
+ }
+
+ dgap_tty_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ tty->closing = 0;
+
+ /*
+ * If we have HUPCL set, lower DTR and RTS
+ */
+ if (ch->ch_c_cflag & HUPCL ) {
+ DPR_CLOSE(("Close. HUPCL set, dropping DTR/RTS\n"));
+ ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
+ dgap_cmdb( ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0 );
+
+ /*
+ * Go to sleep to ensure RTS/DTR
+ * have been dropped for modems to see it.
+ */
+ if (ch->ch_close_delay) {
+ DPR_CLOSE(("Close. Sleeping for RTS/DTR drop\n"));
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ dgap_ms_sleep(ch->ch_close_delay);
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ DPR_CLOSE(("Close. After sleeping for RTS/DTR drop\n"));
+ }
+ }
+
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+ ch->ch_baud_info = 0;
+
+ }
+
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON) ) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ un->un_tty = NULL;
+ un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
+ tty->driver_data = NULL;
+
+ DPR_CLOSE(("Close. Doing wakeups\n"));
+ wake_up_interruptible(&ch->ch_flags_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_BASIC(("dgap_tty_close - complete\n"));
+}
+
+
+/*
+ * dgap_tty_chars_in_buffer()
+ *
+ * Return number of characters that have not been transmitted yet.
+ *
+ * This routine is used by the line discipline to determine if there
+ * is data waiting to be transmitted/drained/flushed or not.
+ */
+static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct board_t *bd = NULL;
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ uchar tbusy;
+ uint chars = 0;
+ u16 thead, ttail, tmask, chead, ctail;
+ ulong lock_flags = 0;
+ ulong lock_flags2 = 0;
+
+ if (tty == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return (0);
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ tmask = (ch->ch_tsize - 1);
+
+ /* Get Transmit queue pointers */
+ thead = readw(&(bs->tx_head)) & tmask;
+ ttail = readw(&(bs->tx_tail)) & tmask;
+
+ /* Get tbusy flag */
+ tbusy = readb(&(bs->tbusy));
+
+ /* Get Command queue pointers */
+ chead = readw(&(ch->ch_cm->cm_head));
+ ctail = readw(&(ch->ch_cm->cm_tail));
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ /*
+ * The only way we know for sure if there is no pending
+ * data left to be transferred, is if:
+ * 1) Transmit head and tail are equal (empty).
+ * 2) Command queue head and tail are equal (empty).
+ * 3) The "TBUSY" flag is 0. (Transmitter not busy).
+ */
+
+ if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
+ chars = 0;
+ }
+ else {
+ if (thead >= ttail)
+ chars = thead - ttail;
+ else
+ chars = thead - ttail + ch->ch_tsize;
+ /*
+ * Fudge factor here.
+ * If chars is zero, we know that the command queue had
+ * something in it or tbusy was set. Because we cannot
+ * be sure if there is still some data to be transmitted,
+ * lets lie, and tell ld we have 1 byte left.
+ */
+ if (chars == 0) {
+ /*
+ * If TBUSY is still set, and our tx buffers are empty,
+ * force the firmware to send me another wakeup after
+ * TBUSY has been cleared.
+ */
+ if (tbusy != 0) {
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ chars = 1;
+ }
+ }
+
+ DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n",
+ ch->ch_portnum, chars, thead, ttail, ch->ch_tsize));
+ return(chars);
+}
+
+
+static int dgap_wait_for_drain(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct bs_t *bs;
+ int ret = -EIO;
+ uint count = 1;
+ ulong lock_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return ret;
+
+ ret = 0;
+
+ DPR_DRAIN(("dgap_wait_for_drain start\n"));
+
+ /* Loop until data is drained */
+ while (count != 0) {
+
+ count = dgap_tty_chars_in_buffer(tty);
+
+ if (count == 0)
+ break;
+
+ /* Set flag waiting for drain */
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* Go to sleep till we get woken up */
+ ret = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (ret) {
+ break;
+ }
+ }
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags &= ~(UN_EMPTY);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_DRAIN(("dgap_wait_for_drain finish\n"));
+ return (ret);
+}
+
+
+/*
+ * dgap_maxcps_room
+ *
+ * Reduces bytes_available to the max number of characters
+ * that can be sent currently given the maxcps value, and
+ * returns the new bytes_available. This only affects printer
+ * output.
+ */
+static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+
+ if (tty == NULL)
+ return (bytes_available);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (bytes_available);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (bytes_available);
+
+ /*
+ * If its not the Transparent print device, return
+ * the full data amount.
+ */
+ if (un->un_type != DGAP_PRINT)
+ return (bytes_available);
+
+ if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0 ) {
+ int cps_limit = 0;
+ unsigned long current_time = jiffies;
+ unsigned long buffer_time = current_time +
+ (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps;
+
+ if (ch->ch_cpstime < current_time) {
+ /* buffer is empty */
+ ch->ch_cpstime = current_time; /* reset ch_cpstime */
+ cps_limit = ch->ch_digi.digi_bufsize;
+ }
+ else if (ch->ch_cpstime < buffer_time) {
+ /* still room in the buffer */
+ cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ;
+ }
+ else {
+ /* no room in the buffer */
+ cps_limit = 0;
+ }
+
+ bytes_available = min(cps_limit, bytes_available);
+ }
+
+ return (bytes_available);
+}
+
+
+static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
+{
+ struct channel_t *ch = NULL;
+ struct bs_t *bs = NULL;
+
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+ bs = ch->ch_bs;
+ if (!bs)
+ return;
+
+ if ((event & UN_LOW) != 0) {
+ if ((un->un_flags & UN_LOW) == 0) {
+ un->un_flags |= UN_LOW;
+ writeb(1, &(bs->ilow));
+ }
+ }
+ if ((event & UN_LOW) != 0) {
+ if ((un->un_flags & UN_EMPTY) == 0) {
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ }
+ }
+}
+
+
+/*
+ * dgap_tty_write_room()
+ *
+ * Return space available in Tx buffer
+ */
+static int dgap_tty_write_room(struct tty_struct *tty)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ u16 head, tail, tmask;
+ int ret = 0;
+ ulong lock_flags = 0;
+
+ if (tty == NULL || dgap_TmpWriteBuf == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return (0);
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ tmask = ch->ch_tsize - 1;
+ head = readw(&(bs->tx_head)) & tmask;
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ if ((ret = tail - head - 1) < 0)
+ ret += ch->ch_tsize;
+
+ /* Limit printer to maxcps */
+ ret = dgap_maxcps_room(tty, ret);
+
+ /*
+ * If we are printer device, leave space for
+ * possibly both the on and off strings.
+ */
+ if (un->un_type == DGAP_PRINT) {
+ if (!(ch->ch_flags & CH_PRON))
+ ret -= ch->ch_digi.digi_onlen;
+ ret -= ch->ch_digi.digi_offlen;
+ }
+ else {
+ if (ch->ch_flags & CH_PRON)
+ ret -= ch->ch_digi.digi_offlen;
+ }
+
+ if (ret < 0)
+ ret = 0;
+
+ /*
+ * Schedule FEP to wake us up if needed.
+ *
+ * TODO: This might be overkill...
+ * Do we really need to schedule callbacks from the FEP
+ * in every case? Can we get smarter based on ret?
+ */
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_WRITE(("dgap_tty_write_room - %d tail: %d head: %d\n", ret, tail, head));
+
+ return(ret);
+}
+
+
+/*
+ * dgap_tty_put_char()
+ *
+ * Put a character into ch->ch_buf
+ *
+ * - used by the line discipline for OPOST processing
+ */
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
+{
+ /*
+ * Simply call tty_write.
+ */
+ DPR_WRITE(("dgap_tty_put_char called\n"));
+ dgap_tty_write(tty, &c, 1);
+ return 1;
+}
+
+
+/*
+ * dgap_tty_write()
+ *
+ * Take data from the user or kernel and send it out to the FEP.
+ * In here exists all the Transparent Print magic as well.
+ */
+static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ char *vaddr = NULL;
+ u16 head, tail, tmask, remain;
+ int bufcount = 0, n = 0;
+ int orig_count = 0;
+ ulong lock_flags;
+ int from_user = 0;
+
+ if (tty == NULL || dgap_TmpWriteBuf == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return(0);
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return(0);
+
+ if (!count)
+ return(0);
+
+ DPR_WRITE(("dgap_tty_write: Port: %x tty=%p user=%d len=%d\n",
+ ch->ch_portnum, tty, from_user, count));
+
+ /*
+ * Store original amount of characters passed in.
+ * This helps to figure out if we should ask the FEP
+ * to send us an event when it has more space available.
+ */
+ orig_count = count;
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ /* Get our space available for the channel from the board */
+ tmask = ch->ch_tsize - 1;
+ head = readw(&(bs->tx_head)) & tmask;
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ if ((bufcount = tail - head - 1) < 0)
+ bufcount += ch->ch_tsize;
+
+ DPR_WRITE(("%d: bufcount: %x count: %x tail: %x head: %x tmask: %x\n",
+ __LINE__, bufcount, count, tail, head, tmask));
+
+ /*
+ * Limit printer output to maxcps overall, with bursts allowed
+ * up to bufsize characters.
+ */
+ bufcount = dgap_maxcps_room(tty, bufcount);
+
+ /*
+ * Take minimum of what the user wants to send, and the
+ * space available in the FEP buffer.
+ */
+ count = min(count, bufcount);
+
+ /*
+ * Bail if no space left.
+ */
+ if (count <= 0) {
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ /*
+ * Output the printer ON string, if we are in terminal mode, but
+ * need to be in printer mode.
+ */
+ if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_onstr,
+ (int) ch->ch_digi.digi_onlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags |= CH_PRON;
+ }
+
+ /*
+ * On the other hand, output the printer OFF string, if we are
+ * currently in printer mode, but need to output to the terminal.
+ */
+ if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ /*
+ * If there is nothing left to copy, or I can't handle any more data, leave.
+ */
+ if (count <= 0) {
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ if (from_user) {
+
+ count = min(count, WRITEBUFLEN);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * If data is coming from user space, copy it into a temporary
+ * buffer so we don't get swapped out while doing the copy to
+ * the board.
+ */
+ /* we're allowed to block if it's from_user */
+ if (down_interruptible(&dgap_TmpWriteSem)) {
+ return (-EINTR);
+ }
+
+ if (copy_from_user(dgap_TmpWriteBuf, (const uchar __user *) buf, count)) {
+ up(&dgap_TmpWriteSem);
+ printk("Write: Copy from user failed!\n");
+ return -EFAULT;
+ }
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ buf = dgap_TmpWriteBuf;
+ }
+
+ n = count;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ remain = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (n >= remain) {
+ n -= remain;
+ vaddr = ch->ch_taddr + head;
+
+ memcpy_toio(vaddr, (uchar *) buf, remain);
+ dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf, remain);
+
+ head = ch->ch_tstart;
+ buf += remain;
+ }
+
+ if (n > 0) {
+
+ /*
+ * Move rest of data.
+ */
+ vaddr = ch->ch_taddr + head;
+ remain = n;
+
+ memcpy_toio(vaddr, (uchar *) buf, remain);
+ dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf, remain);
+
+ head += remain;
+
+ }
+
+ if (count) {
+ ch->ch_txcount += count;
+ head &= tmask;
+ writew(head, &(bs->tx_head));
+ }
+
+
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+
+ /*
+ * If this is the print device, and the
+ * printer is still on, we need to turn it
+ * off before going idle. If the buffer is
+ * non-empty, wait until it goes empty.
+ * Otherwise turn it off right now.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ if (tail != head) {
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ }
+ else {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+ }
+
+ /* Update printer buffer empty time. */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
+ && (ch->ch_digi.digi_bufsize > 0)) {
+ ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
+ }
+
+ if (from_user) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ up(&dgap_TmpWriteSem);
+ }
+ else {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ }
+
+ DPR_WRITE(("Write finished - Write %d bytes of %d.\n", count, orig_count));
+
+ return (count);
+}
+
+
+
+/*
+ * Return modem signals to ld.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgap_tty_tiocmget(struct tty_struct *tty)
+#else
+static int dgap_tty_tiocmget(struct tty_struct *tty, struct file *file)
+#endif
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int result = -EIO;
+ uchar mstat = 0;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return result;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return result;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return result;
+
+ DPR_IOCTL(("dgap_tty_tiocmget start\n"));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = readb(&(ch->ch_bs->m_stat));
+ /* Append any outbound signals that might be pending... */
+ mstat |= ch->ch_mostat;
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & D_DTR(ch))
+ result |= TIOCM_DTR;
+ if (mstat & D_RTS(ch))
+ result |= TIOCM_RTS;
+ if (mstat & D_CTS(ch))
+ result |= TIOCM_CTS;
+ if (mstat & D_DSR(ch))
+ result |= TIOCM_DSR;
+ if (mstat & D_RI(ch))
+ result |= TIOCM_RI;
+ if (mstat & D_CD(ch))
+ result |= TIOCM_CD;
+
+ DPR_IOCTL(("dgap_tty_tiocmget finish\n"));
+
+ return result;
+}
+
+
+/*
+ * dgap_tty_tiocmset()
+ *
+ * Set modem signals, called by ld.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgap_tty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+#else
+static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+#endif
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ DPR_IOCTL(("dgap_tty_tiocmset start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ if (set & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval |= D_RTS(ch);
+ }
+
+ if (set & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval |= D_DTR(ch);
+ }
+
+ if (clear & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (clear & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_tiocmset finish\n"));
+
+ return (0);
+}
+
+
+
+/*
+ * dgap_tty_send_break()
+ *
+ * Send a Break, called by ld.
+ */
+static int dgap_tty_send_break(struct tty_struct *tty, int msec)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ switch (msec) {
+ case -1:
+ msec = 0xFFFF;
+ break;
+ case 0:
+ msec = 1;
+ break;
+ default:
+ msec /= 10;
+ break;
+ }
+
+ DPR_IOCTL(("dgap_tty_send_break start 1. %lx\n", jiffies));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+#if 0
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+#endif
+ dgap_cmdw(ch, SBREAK, (u16) msec, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_send_break finish\n"));
+
+ return (0);
+}
+
+
+
+
+/*
+ * dgap_tty_wait_until_sent()
+ *
+ * wait until data has been transmitted, called by ld.
+ */
+static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ int rc;
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return;
+ }
+ return;
+}
+
+
+
+/*
+ * dgap_send_xchar()
+ *
+ * send a high priority character, called by ld.
+ */
+static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_send_xchar start 1. %lx\n", jiffies));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ /*
+ * This is technically what we should do.
+ * However, the NIST tests specifically want
+ * to see each XON or XOFF character that it
+ * sends, so lets just send each character
+ * by hand...
+ */
+#if 0
+ if (c == STOP_CHAR(tty)) {
+ dgap_cmdw(ch, RPAUSE, 0, 0);
+ }
+ else if (c == START_CHAR(tty)) {
+ dgap_cmdw(ch, RRESUME, 0, 0);
+ }
+ else {
+ dgap_wmove(ch, &c, 1);
+ }
+#else
+ dgap_wmove(ch, &c, 1);
+#endif
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_send_xchar finish\n"));
+
+ return;
+}
+
+
+
+
+/*
+ * Return modem signals to ld.
+ */
+static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
+{
+ int result = 0;
+ uchar mstat = 0;
+ ulong lock_flags;
+ int rc = 0;
+
+ DPR_IOCTL(("dgap_get_modem_info start\n"));
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return(-ENXIO);
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = readb(&(ch->ch_bs->m_stat));
+ /* Append any outbound signals that might be pending... */
+ mstat |= ch->ch_mostat;
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & D_DTR(ch))
+ result |= TIOCM_DTR;
+ if (mstat & D_RTS(ch))
+ result |= TIOCM_RTS;
+ if (mstat & D_CTS(ch))
+ result |= TIOCM_CTS;
+ if (mstat & D_DSR(ch))
+ result |= TIOCM_DSR;
+ if (mstat & D_RI(ch))
+ result |= TIOCM_RI;
+ if (mstat & D_CD(ch))
+ result |= TIOCM_CD;
+
+ rc = put_user(result, value);
+
+ DPR_IOCTL(("dgap_get_modem_info finish\n"));
+ return(rc);
+}
+
+
+/*
+ * dgap_set_modem_info()
+ *
+ * Set modem signals, called by ld.
+ */
+static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -ENXIO;
+ unsigned int arg = 0;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ DPR_IOCTL(("dgap_set_modem_info() start\n"));
+
+ ret = get_user(arg, value);
+ if (ret) {
+ DPR_IOCTL(("dgap_set_modem_info %d ret: %x. finished.\n", __LINE__, ret));
+ return(ret);
+ }
+
+ DPR_IOCTL(("dgap_set_modem_info: command: %x arg: %x\n", command, arg));
+
+ switch (command) {
+ case TIOCMBIS:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval |= D_RTS(ch);
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval |= D_DTR(ch);
+ }
+
+ break;
+
+ case TIOCMBIC:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ break;
+
+ case TIOCMSET:
+ ch->ch_mforce = D_DTR(ch)|D_RTS(ch);
+
+ if (arg & TIOCM_RTS) {
+ ch->ch_mval |= D_RTS(ch);
+ }
+ else {
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mval |= (D_DTR(ch));
+ }
+ else {
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ break;
+
+ default:
+ return(-EINVAL);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_set_modem_info finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_digigeta()
+ *
+ * Ioctl to get the information for ditty.
+ *
+ *
+ *
+ */
+static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return (-EFAULT);
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_digiseta()
+ *
+ * Ioctl to set the information for ditty.
+ *
+ *
+ *
+ */
+static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t new_digi;
+ ulong lock_flags = 0;
+ unsigned long lock_flags2;
+
+ DPR_IOCTL(("DIGI_SETA start\n"));
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-EFAULT);
+
+ if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t))) {
+ DPR_IOCTL(("DIGI_SETA failed copy_from_user\n"));
+ return(-EFAULT);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
+
+ if (ch->ch_digi.digi_maxcps < 1)
+ ch->ch_digi.digi_maxcps = 1;
+
+ if (ch->ch_digi.digi_maxcps > 10000)
+ ch->ch_digi.digi_maxcps = 10000;
+
+ if (ch->ch_digi.digi_bufsize < 10)
+ ch->ch_digi.digi_bufsize = 10;
+
+ if (ch->ch_digi.digi_maxchar < 1)
+ ch->ch_digi.digi_maxchar = 1;
+
+ if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
+ ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
+
+ if (ch->ch_digi.digi_onlen > DIGI_PLEN)
+ ch->ch_digi.digi_onlen = DIGI_PLEN;
+
+ if (ch->ch_digi.digi_offlen > DIGI_PLEN)
+ ch->ch_digi.digi_offlen = DIGI_PLEN;
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("DIGI_SETA finish\n"));
+
+ return(0);
+}
+
+
+/*
+ * dgap_tty_digigetedelay()
+ *
+ * Ioctl to get the current edelay setting.
+ *
+ *
+ *
+ */
+static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ tmp = readw(&(ch->ch_bs->edelay));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return (-EFAULT);
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_digisetedelay()
+ *
+ * Ioctl to set the EDELAY setting
+ *
+ */
+static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int new_digi;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ DPR_IOCTL(("DIGI_SETA start\n"));
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-EFAULT);
+
+ if (copy_from_user(&new_digi, new_info, sizeof(int))) {
+ DPR_IOCTL(("DIGI_SETEDELAY failed copy_from_user\n"));
+ return(-EFAULT);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ writew((u16) new_digi, &(ch->ch_bs->edelay));
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("DIGI_SETA finish\n"));
+
+ return(0);
+}
+
+
+/*
+ * dgap_tty_digigetcustombaud()
+ *
+ * Ioctl to get the current custom baud rate setting.
+ */
+static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ tmp = dgap_get_custom_baud(ch);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("DIGI_GETCUSTOMBAUD. Returning %d\n", tmp));
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return (-EFAULT);
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_digisetcustombaud()
+ *
+ * Ioctl to set the custom baud rate setting
+ */
+static int dgap_tty_digisetcustombaud(struct tty_struct *tty, int __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ uint new_rate;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ DPR_IOCTL(("DIGI_SETCUSTOMBAUD start\n"));
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-EFAULT);
+
+
+ if (copy_from_user(&new_rate, new_info, sizeof(unsigned int))) {
+ DPR_IOCTL(("DIGI_SETCUSTOMBAUD failed copy_from_user\n"));
+ return(-EFAULT);
+ }
+
+ if (bd->bd_flags & BD_FEP5PLUS) {
+
+ DPR_IOCTL(("DIGI_SETCUSTOMBAUD. Setting %d\n", new_rate));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_custom_speed = new_rate;
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ }
+
+ DPR_IOCTL(("DIGI_SETCUSTOMBAUD finish\n"));
+
+ return(0);
+}
+
+
+/*
+ * dgap_set_termios()
+ */
+static void dgap_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ unsigned long lock_flags;
+ unsigned long lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ dgap_carrier(ch);
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+}
+
+
+static void dgap_tty_throttle(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_throttle start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_flags |= (CH_RXBLOCK);
+#if 1
+ dgap_cmdw(ch, RPAUSE, 0, 0);
+#endif
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_throttle finish\n"));
+}
+
+
+static void dgap_tty_unthrottle(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_unthrottle start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+#if 1
+ dgap_cmdw(ch, RRESUME, 0, 0);
+#endif
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_unthrottle finish\n"));
+}
+
+
+static void dgap_tty_start(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_start start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_start finish\n"));
+}
+
+
+static void dgap_tty_stop(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_stop start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, PAUSETX, 0, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_stop finish\n"));
+}
+
+
+/*
+ * dgap_tty_flush_chars()
+ *
+ * Flush the cook buffer
+ *
+ * Note to self, and any other poor souls who venture here:
+ *
+ * flush in this case DOES NOT mean dispose of the data.
+ * instead, it means "stop buffering and send it if you
+ * haven't already." Just guess how I figured that out... SRW 2-Jun-98
+ *
+ * It is also always called in interrupt context - JAR 8-Sept-99
+ */
+static void dgap_tty_flush_chars(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_flush_chars start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ /* TODO: Do something here */
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_flush_chars finish\n"));
+}
+
+
+
+/*
+ * dgap_tty_flush_buffer()
+ *
+ * Flush Tx buffer (make in == out)
+ */
+static void dgap_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_flush_buffer on port: %d start\n", ch->ch_portnum));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
+
+ DPR_IOCTL(("dgap_tty_flush_buffer finish\n"));
+}
+
+
+
+/*****************************************************************************
+ *
+ * The IOCTL function and all of its helpers
+ *
+ *****************************************************************************/
+
+/*
+ * dgap_tty_ioctl()
+ *
+ * The usual assortment of ioctl's
+ */
+static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc;
+ u16 head = 0;
+ ulong lock_flags = 0;
+ ulong lock_flags2 = 0;
+ void __user *uarg = (void __user *) arg;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-ENODEV);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-ENODEV);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-ENODEV);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-ENODEV);
+
+ DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ if (un->un_open_count <= 0) {
+ DPR_BASIC(("dgap_tty_ioctl - unit not open.\n"));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(-EIO);
+ }
+
+ switch (cmd) {
+
+ /* Here are all the standard ioctl's that we MUST implement */
+
+ case TCSBRK:
+ /*
+ * TCSBRK is SVID version: non-zero arg --> no break
+ * this behaviour is exploited by tcdrain().
+ *
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = dgap_wait_for_drain(tty);
+
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ if(((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP)) {
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+
+ case TCSBRKP:
+ /* support for POSIX tcsendbreak()
+
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+ case TIOCSBRK:
+ /*
+ * FEP5 doesn't support turning on a break unconditionally.
+ * The FEP5 device will stop sending a break automatically
+ * after the specified time value that was sent when turning on
+ * the break.
+ */
+ rc = tty_check_change(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ return 0;
+
+ case TIOCCBRK:
+ /*
+ * FEP5 doesn't support turning off a break unconditionally.
+ * The FEP5 device will stop sending a break automatically
+ * after the specified time value that was sent when turning on
+ * the break.
+ */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return 0;
+
+ case TIOCGSOFTCAR:
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) arg);
+ return(rc);
+
+ case TIOCSSOFTCAR:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ rc = get_user(arg, (unsigned long __user *) arg);
+ if (rc)
+ return(rc);
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
+ dgap_param(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ return(0);
+
+ case TIOCMGET:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_get_modem_info(ch, uarg));
+
+ case TIOCMBIS:
+ case TIOCMBIC:
+ case TIOCMSET:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_set_modem_info(tty, cmd, uarg));
+
+ /*
+ * Here are any additional ioctl's that we want to implement
+ */
+
+ case TCFLSH:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(rc);
+ }
+
+ if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
+ if (!(un->un_type == DGAP_PRINT)) {
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+ writeb(0, &(ch->ch_bs->orun));
+ }
+ }
+
+ if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0 );
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+
+ /* Can't hold any locks when calling tty_wakeup! */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ tty_wakeup(tty);
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+
+ /* pretend we didn't recognize this IOCTL */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n",
+ __LINE__, ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ return(-ENOIOCTLCMD);
+
+ case TCSETSF:
+ case TCSETSW:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ if (cmd == TCSETSF) {
+ /* flush rx */
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+ }
+
+ /* now wait for all the output to drain */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ /* pretend we didn't recognize this */
+ return(-ENOIOCTLCMD);
+
+ case TCSETAW:
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ /* pretend we didn't recognize this */
+ return(-ENOIOCTLCMD);
+
+ case TCXONC:
+ /*
+ * The Linux Line Discipline (LD) would do this for us if we
+ * let it, but we have the special firmware options to do this
+ * the "right way" regardless of hardware or software flow
+ * control so we'll do it outselves instead of letting the LD
+ * do it.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(rc);
+ }
+
+ DPR_IOCTL(("dgap_ioctl - in TCXONC - %d\n", cmd));
+ switch (arg) {
+
+ case TCOON:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ dgap_tty_start(tty);
+ return(0);
+ case TCOOFF:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ dgap_tty_stop(tty);
+ return(0);
+ case TCION:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ /* Make the ld do it */
+ return(-ENOIOCTLCMD);
+ case TCIOFF:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ /* Make the ld do it */
+ return(-ENOIOCTLCMD);
+ default:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(-EINVAL);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(-ENOIOCTLCMD);
+
+ case DIGI_GETA:
+ /* get information for ditty */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digigeta(tty, uarg));
+
+ case DIGI_SETAW:
+ case DIGI_SETAF:
+
+ /* set information for ditty */
+ if (cmd == (DIGI_SETAW)) {
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ else {
+ tty_ldisc_flush(tty);
+ }
+ /* fall thru */
+
+ case DIGI_SETA:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digiseta(tty, uarg));
+
+ case DIGI_GEDELAY:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digigetedelay(tty, uarg));
+
+ case DIGI_SEDELAY:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digisetedelay(tty, uarg));
+
+ case DIGI_GETCUSTOMBAUD:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digigetcustombaud(tty, uarg));
+
+ case DIGI_SETCUSTOMBAUD:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digisetcustombaud(tty, uarg));
+
+ case DIGI_RESET_PORT:
+ dgap_firmware_reset_port(ch);
+ dgap_param(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return 0;
+
+ default:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl - in default\n"));
+ DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ dgap_ioctl_name(cmd), cmd, arg));
+
+ return(-ENOIOCTLCMD);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ dgap_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+}
diff --git a/drivers/staging/dgap/dgap_tty.h b/drivers/staging/dgap/dgap_tty.h
new file mode 100644
index 0000000..464a460
--- /dev/null
+++ b/drivers/staging/dgap/dgap_tty.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGAP_TTY_H
+#define __DGAP_TTY_H
+
+#include "dgap_driver.h"
+
+int dgap_tty_register(struct board_t *brd);
+
+int dgap_tty_preinit(void);
+int dgap_tty_init(struct board_t *);
+
+void dgap_tty_post_uninit(void);
+void dgap_tty_uninit(struct board_t *);
+
+void dgap_carrier(struct channel_t *ch);
+void dgap_input(struct channel_t *ch);
+
+
+#endif
diff --git a/drivers/staging/dgap/dgap_types.h b/drivers/staging/dgap/dgap_types.h
new file mode 100644
index 0000000..eca38c7
--- /dev/null
+++ b/drivers/staging/dgap/dgap_types.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGAP_TYPES_H
+#define __DGAP_TYPES_H
+
+#ifndef TRUE
+# define TRUE 1
+#endif
+
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+/* Required for our shared headers! */
+typedef unsigned char uchar;
+
+#endif
diff --git a/drivers/staging/dgap/digi.h b/drivers/staging/dgap/digi.h
new file mode 100644
index 0000000..651e2e5
--- /dev/null
+++ b/drivers/staging/dgap/digi.h
@@ -0,0 +1,376 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: digi.h,v 1.1 2009/10/23 14:01:57 markh Exp $
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DIGI_H
+#define __DIGI_H
+
+/************************************************************************
+ *** Definitions for Digi ditty(1) command.
+ ************************************************************************/
+
+
+/*
+ * Copyright (c) 1988-96 Digi International Inc., All Rights Reserved.
+ */
+
+/************************************************************************
+ * This module provides application access to special Digi
+ * serial line enhancements which are not standard UNIX(tm) features.
+ ************************************************************************/
+
+#if !defined(TIOCMODG)
+
+#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */
+#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */
+
+#ifndef TIOCM_LE
+#define TIOCM_LE 0x01 /* line enable */
+#define TIOCM_DTR 0x02 /* data terminal ready */
+#define TIOCM_RTS 0x04 /* request to send */
+#define TIOCM_ST 0x08 /* secondary transmit */
+#define TIOCM_SR 0x10 /* secondary receive */
+#define TIOCM_CTS 0x20 /* clear to send */
+#define TIOCM_CAR 0x40 /* carrier detect */
+#define TIOCM_RNG 0x80 /* ring indicator */
+#define TIOCM_DSR 0x100 /* data set ready */
+#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
+#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
+#endif
+
+#endif
+
+#if !defined(TIOCMSET)
+#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */
+#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */
+#endif
+
+#if !defined(TIOCMBIC)
+#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */
+#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */
+#endif
+
+
+#if !defined(TIOCSDTR)
+#define TIOCSDTR ('e'<<8) | 0 /* set DTR */
+#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */
+#endif
+
+/************************************************************************
+ * Ioctl command arguments for DIGI parameters.
+ ************************************************************************/
+#define DIGI_GETA ('e'<<8) | 94 /* Read params */
+
+#define DIGI_SETA ('e'<<8) | 95 /* Set params */
+#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */
+#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */
+
+#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
+ /* Adapter Memory */
+
+#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */
+ /* control characters */
+#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */
+ /* control characters */
+#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */
+ /* flow control chars */
+#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */
+ /* flow control chars */
+
+#define DIGI_GEDELAY ('d'<<8) | 246 /* Get edelay */
+#define DIGI_SEDELAY ('d'<<8) | 247 /* Set edelay */
+
+struct digiflow_t {
+ unsigned char startc; /* flow cntl start char */
+ unsigned char stopc; /* flow cntl stop char */
+};
+
+
+#ifdef FLOW_2200
+#define F2200_GETA ('e'<<8) | 104 /* Get 2x36 flow cntl flags */
+#define F2200_SETAW ('e'<<8) | 105 /* Set 2x36 flow cntl flags */
+#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
+#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
+#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
+#define F2200_XON 0xf8
+#define P2200_XON 0xf9
+#define F2200_XOFF 0xfa
+#define P2200_XOFF 0xfb
+
+#define FXOFF_MASK 0x03 /* 2200 flow status mask */
+#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
+#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
+#endif
+
+/************************************************************************
+ * Values for digi_flags
+ ************************************************************************/
+#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
+#define DIGI_FAST 0x0002 /* Fast baud rates */
+#define RTSPACE 0x0004 /* RTS input flow control */
+#define CTSPACE 0x0008 /* CTS output flow control */
+#define DSRPACE 0x0010 /* DSR output flow control */
+#define DCDPACE 0x0020 /* DCD output flow control */
+#define DTRPACE 0x0040 /* DTR input flow control */
+#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
+#define DIGI_FORCEDCD 0x0100 /* Force carrier */
+#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
+#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
+#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
+#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
+#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
+#define DIGI_422 0x4000 /* for 422/232 selectable panel */
+#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
+
+/************************************************************************
+ * These options are not supported on the comxi.
+ ************************************************************************/
+#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
+
+#define DIGI_PLEN 28 /* String length */
+#define DIGI_TSIZ 10 /* Terminal string len */
+
+/************************************************************************
+ * Structure used with ioctl commands for DIGI parameters.
+ ************************************************************************/
+struct digi_t {
+ unsigned short digi_flags; /* Flags (see above) */
+ unsigned short digi_maxcps; /* Max printer CPS */
+ unsigned short digi_maxchar; /* Max chars in print queue */
+ unsigned short digi_bufsize; /* Buffer size */
+ unsigned char digi_onlen; /* Length of ON string */
+ unsigned char digi_offlen; /* Length of OFF string */
+ char digi_onstr[DIGI_PLEN]; /* Printer on string */
+ char digi_offstr[DIGI_PLEN]; /* Printer off string */
+ char digi_term[DIGI_TSIZ]; /* terminal string */
+};
+
+/************************************************************************
+ * KME definitions and structures.
+ ************************************************************************/
+#define RW_IDLE 0 /* Operation complete */
+#define RW_READ 1 /* Read Concentrator Memory */
+#define RW_WRITE 2 /* Write Concentrator Memory */
+
+struct rw_t {
+ unsigned char rw_req; /* Request type */
+ unsigned char rw_board; /* Host Adapter board number */
+ unsigned char rw_conc; /* Concentrator number */
+ unsigned char rw_reserved; /* Reserved for expansion */
+ unsigned long rw_addr; /* Address in concentrator */
+ unsigned short rw_size; /* Read/write request length */
+ unsigned char rw_data[128]; /* Data to read/write */
+};
+
+/***********************************************************************
+ * Shrink Buffer and Board Information definitions and structures.
+
+ ************************************************************************/
+ /* Board type return codes */
+#define PCXI_TYPE 1 /* Board type at the designated port is a PC/Xi */
+#define PCXM_TYPE 2 /* Board type at the designated port is a PC/Xm */
+#define PCXE_TYPE 3 /* Board type at the designated port is a PC/Xe */
+#define MCXI_TYPE 4 /* Board type at the designated port is a MC/Xi */
+#define COMXI_TYPE 5 /* Board type at the designated port is a COM/Xi */
+
+ /* Non-Zero Result codes. */
+#define RESULT_NOBDFND 1 /* A Digi product at that port is not config installed */
+#define RESULT_NODESCT 2 /* A memory descriptor was not obtainable */
+#define RESULT_NOOSSIG 3 /* FEP/OS signature was not detected on the board */
+#define RESULT_TOOSML 4 /* Too small an area to shrink. */
+#define RESULT_NOCHAN 5 /* Channel structure for the board was not found */
+
+struct shrink_buf_struct {
+ unsigned long shrink_buf_vaddr; /* Virtual address of board */
+ unsigned long shrink_buf_phys; /* Physical address of board */
+ unsigned long shrink_buf_bseg; /* Amount of board memory */
+ unsigned long shrink_buf_hseg; /* '186 Begining of Dual-Port */
+
+ unsigned long shrink_buf_lseg; /* '186 Begining of freed memory */
+ unsigned long shrink_buf_mseg; /* Linear address from start of
+ dual-port were freed memory
+ begins, host viewpoint. */
+
+ unsigned long shrink_buf_bdparam; /* Parameter for xxmemon and
+ xxmemoff */
+
+ unsigned long shrink_buf_reserva; /* Reserved */
+ unsigned long shrink_buf_reservb; /* Reserved */
+ unsigned long shrink_buf_reservc; /* Reserved */
+ unsigned long shrink_buf_reservd; /* Reserved */
+
+ unsigned char shrink_buf_result; /* Reason for call failing
+ Zero is Good return */
+ unsigned char shrink_buf_init; /* Non-Zero if it caused an
+ xxinit call. */
+
+ unsigned char shrink_buf_anports; /* Number of async ports */
+ unsigned char shrink_buf_snports; /* Number of sync ports */
+ unsigned char shrink_buf_type; /* Board type 1 = PC/Xi,
+ 2 = PC/Xm,
+ 3 = PC/Xe
+ 4 = MC/Xi
+ 5 = COMX/i */
+ unsigned char shrink_buf_card; /* Card number */
+
+};
+
+/************************************************************************
+ * Structure to get driver status information
+ ************************************************************************/
+struct digi_dinfo {
+ unsigned long dinfo_nboards; /* # boards configured */
+ char dinfo_reserved[12]; /* for future expansion */
+ char dinfo_version[16]; /* driver version */
+};
+
+#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
+
+/************************************************************************
+ * Structure used with ioctl commands for per-board information
+ *
+ * physsize and memsize differ when board has "windowed" memory
+ ************************************************************************/
+struct digi_info {
+ unsigned long info_bdnum; /* Board number (0 based) */
+ unsigned long info_ioport; /* io port address */
+ unsigned long info_physaddr; /* memory address */
+ unsigned long info_physsize; /* Size of host mem window */
+ unsigned long info_memsize; /* Amount of dual-port mem */
+ /* on board */
+ unsigned short info_bdtype; /* Board type */
+ unsigned short info_nports; /* number of ports */
+ char info_bdstate; /* board state */
+ char info_reserved[7]; /* for future expansion */
+};
+
+#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
+
+struct digi_stat {
+ unsigned int info_chan; /* Channel number (0 based) */
+ unsigned int info_brd; /* Board number (0 based) */
+ unsigned long info_cflag; /* cflag for channel */
+ unsigned long info_iflag; /* iflag for channel */
+ unsigned long info_oflag; /* oflag for channel */
+ unsigned long info_mstat; /* mstat for channel */
+ unsigned long info_tx_data; /* tx_data for channel */
+ unsigned long info_rx_data; /* rx_data for channel */
+ unsigned long info_hflow; /* hflow for channel */
+ unsigned long info_reserved[8]; /* for future expansion */
+};
+
+#define DIGI_GETSTAT ('d'<<8) | 244 /* get board info */
+/************************************************************************
+ *
+ * Structure used with ioctl commands for per-channel information
+ *
+ ************************************************************************/
+struct digi_ch {
+ unsigned long info_bdnum; /* Board number (0 based) */
+ unsigned long info_channel; /* Channel index number */
+ unsigned long info_ch_cflag; /* Channel cflag */
+ unsigned long info_ch_iflag; /* Channel iflag */
+ unsigned long info_ch_oflag; /* Channel oflag */
+ unsigned long info_chsize; /* Channel structure size */
+ unsigned long info_sleep_stat; /* sleep status */
+ dev_t info_dev; /* device number */
+ unsigned char info_initstate; /* Channel init state */
+ unsigned char info_running; /* Channel running state */
+ long reserved[8]; /* reserved for future use */
+};
+
+/*
+* This structure is used with the DIGI_FEPCMD ioctl to
+* tell the driver which port to send the command for.
+*/
+struct digi_cmd {
+ int cmd;
+ int word;
+ int ncmds;
+ int chan; /* channel index (zero based) */
+ int bdid; /* board index (zero based) */
+};
+
+/*
+* info_sleep_stat defines
+*/
+#define INFO_RUNWAIT 0x0001
+#define INFO_WOPEN 0x0002
+#define INFO_TTIOW 0x0004
+#define INFO_CH_RWAIT 0x0008
+#define INFO_CH_WEMPTY 0x0010
+#define INFO_CH_WLOW 0x0020
+#define INFO_XXBUF_BUSY 0x0040
+
+#define DIGI_GETCH ('d'<<8) | 245 /* get board info */
+
+/* Board type definitions */
+
+#define SUBTYPE 0007
+#define T_PCXI 0000
+#define T_PCXM 0001
+#define T_PCXE 0002
+#define T_PCXR 0003
+#define T_SP 0004
+#define T_SP_PLUS 0005
+# define T_HERC 0000
+# define T_HOU 0001
+# define T_LON 0002
+# define T_CHA 0003
+#define FAMILY 0070
+#define T_COMXI 0000
+#define T_PCXX 0010
+#define T_CX 0020
+#define T_EPC 0030
+#define T_PCLITE 0040
+#define T_SPXX 0050
+#define T_AVXX 0060
+#define T_DXB 0070
+#define T_A2K_4_8 0070
+#define BUSTYPE 0700
+#define T_ISABUS 0000
+#define T_MCBUS 0100
+#define T_EISABUS 0200
+#define T_PCIBUS 0400
+
+/* Board State Definitions */
+
+#define BD_RUNNING 0x0
+#define BD_REASON 0x7f
+#define BD_NOTFOUND 0x1
+#define BD_NOIOPORT 0x2
+#define BD_NOMEM 0x3
+#define BD_NOBIOS 0x4
+#define BD_NOFEP 0x5
+#define BD_FAILED 0x6
+#define BD_ALLOCATED 0x7
+#define BD_TRIBOOT 0x8
+#define BD_BADKME 0x80
+
+#define DIGI_LOOPBACK ('d'<<8) | 252 /* Enable/disable UART internal loopback */
+#define DIGI_SPOLL ('d'<<8) | 254 /* change poller rate */
+
+#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
+#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
+#define DIGI_RESET_PORT ('e'<<8) | 93 /* Reset port */
+
+#endif /* DIGI_H */
diff --git a/drivers/staging/dgap/downld.c b/drivers/staging/dgap/downld.c
new file mode 100644
index 0000000..57dfd6b
--- /dev/null
+++ b/drivers/staging/dgap/downld.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: downld.c,v 1.6 2009/01/14 14:10:54 markh Exp $
+ */
+
+/*
+** downld.c
+**
+** This is the daemon that sends the fep, bios, and concentrator images
+** from user space to the driver.
+** BUGS:
+** If the file changes in the middle of the download, you probably
+** will get what you deserve.
+**
+*/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/errno.h>
+
+#include "dgap_types.h"
+#include "digi.h"
+#include "dgap_fep5.h"
+
+#include "dgap_downld.h"
+
+#include <string.h>
+#include <malloc.h>
+#include <stddef.h>
+#include <unistd.h>
+
+char *pgm;
+void myperror();
+
+/*
+** This structure is used to keep track of the diferent images available
+** to give to the driver. It is arranged so that the things that are
+** constants or that have defaults are first inthe strucutre to simplify
+** the table of initializers.
+*/
+struct image_info {
+ short type; /* bios, fep, conc */
+ short family; /* boards this applies to */
+ short subtype; /* subtype */
+ int len; /* size of image */
+ char *image; /* ioctl struct + image */
+ char *name;
+ char *fname; /* filename of binary (i.e. "asfep.bin") */
+ char *pathname; /* pathname to this binary ("/etc/dgap/xrfep.bin"); */
+ time_t mtime; /* Last modification time */
+};
+
+#define IBIOS 0
+#define IFEP 1
+#define ICONC 2
+#define ICONFIG 3
+#define IBAD 4
+
+#define DEFAULT_LOC "/lib/firmware/dgap/"
+
+struct image_info *image_list;
+int nimages, count;
+
+struct image_info images[] = {
+{IBIOS, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxbios.bin", DEFAULT_LOC "fxbios.bin", 0 },
+{IFEP, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxfep.bin", DEFAULT_LOC "fxfep.bin", 0 },
+{ICONC, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxcon.bin", DEFAULT_LOC "fxcon.bin", 0 },
+
+{IBIOS, T_CX, SUBTYPE, 0, NULL, "C/X", "cxbios.bin", DEFAULT_LOC "cxbios.bin", 0 },
+{IFEP, T_CX, SUBTYPE, 0, NULL, "C/X", "cxhost.bin", DEFAULT_LOC "cxhost.bin", 0 },
+
+{IBIOS, T_CX, T_PCIBUS, 0, NULL, "C/X PCI", "cxpbios.bin", DEFAULT_LOC "cxpbios.bin", 0 },
+{IFEP, T_CX, T_PCIBUS, 0, NULL, "C/X PCI", "cxpfep.bin", DEFAULT_LOC "cxpfep.bin", 0 },
+
+{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "cxcon.bin", DEFAULT_LOC "cxcon.bin", 0 },
+{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "ibmcxcon.bin", DEFAULT_LOC "ibmcxcon.bin", 0 },
+{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "ibmencon.bin", DEFAULT_LOC "ibmencon.bin", 0 },
+
+{IBIOS, FAMILY, T_PCXR, 0, NULL, "PCXR", "xrbios.bin", DEFAULT_LOC "xrbios.bin", 0 },
+{IFEP, FAMILY, T_PCXR, 0, NULL, "PCXR", "xrfep.bin", DEFAULT_LOC "xrfep.bin", 0 },
+
+{IBIOS, T_PCLITE, SUBTYPE, 0, NULL, "X/em", "sxbios.bin", DEFAULT_LOC "sxbios.bin", 0 },
+{IFEP, T_PCLITE, SUBTYPE, 0, NULL, "X/em", "sxfep.bin", DEFAULT_LOC "sxfep.bin", 0 },
+
+{IBIOS, T_EPC, T_PCIBUS, 0, NULL, "PCI", "pcibios.bin", DEFAULT_LOC "pcibios.bin", 0 },
+{IFEP, T_EPC, T_PCIBUS, 0, NULL, "PCI", "pcifep.bin", DEFAULT_LOC "pcifep.bin", 0 },
+{ICONFIG, 0, 0, 0, NULL, NULL, "dgap.conf", "/etc/dgap.conf", 0 },
+
+/* IBAD/NULL entry indicating end-of-table */
+
+{IBAD, 0, 0, 0, NULL, NULL, NULL, NULL, 0 }
+
+} ;
+
+int errorprint = 1;
+int nodldprint = 1;
+int debugflag;
+int fd;
+
+struct downld_t *ip; /* Image pointer in current image */
+struct downld_t *dp; /* conc. download */
+
+
+/*
+ * The same for either the FEP or the BIOS.
+ * Append the downldio header, issue the ioctl, then free
+ * the buffer. Not horribly CPU efficient, but quite RAM efficient.
+ */
+
+void squirt(int req_type, int bdid, struct image_info *ii)
+{
+ struct downldio *dliop;
+ int size_buf;
+ int sfd;
+ struct stat sb;
+
+ /*
+ * If this binary comes from a file, stat it to see how
+ * large it is. Yes, we intentionally do this each
+ * time for the binary may change between loads.
+ */
+
+ if (ii->pathname) {
+ sfd = open(ii->pathname, O_RDONLY);
+
+ if (sfd < 0 ) {
+ myperror(ii->pathname);
+ goto squirt_end;
+ }
+
+ if (fstat(sfd, &sb) == -1 ) {
+ myperror(ii->pathname);
+ goto squirt_end;
+ }
+
+ ii->len = sb.st_size ;
+ }
+
+ size_buf = ii->len + sizeof(struct downldio);
+
+ /*
+ * This buffer will be freed at the end of this function. It is
+ * not resilient and should be around only long enough for the d/l
+ * to happen.
+ */
+ dliop = (struct downldio *) malloc(size_buf);
+
+ if (dliop == NULL) {
+ fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n",
+ pgm, size_buf);
+ exit (1);
+ }
+
+ /* Now, stick the image in fepimage. This can come from either
+ * the compiled-in image or from the filesystem.
+ */
+ if (ii->pathname)
+ read(sfd, dliop->image.fi.fepimage, ii->len);
+ else
+ memcpy(dliop ->image.fi.fepimage, ii->image, ii->len);
+
+ dliop->req_type = req_type;
+ dliop->bdid = bdid;
+
+ dliop->image.fi.len = ii->len;
+
+ if (debugflag)
+ printf("sending %d bytes of %s %s from %s\n",
+ ii->len,
+ (ii->type == IFEP) ? "FEP" : (ii->type == IBIOS) ? "BIOS" : "CONFIG",
+ ii->name ? ii->name : "",
+ (ii->pathname) ? ii->pathname : "internal image" );
+
+ if (ioctl(fd, DIGI_DLREQ_SET, (char *) dliop) == -1) {
+ if(errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",pgm);
+ errorprint = 0;
+ }
+ sleep(2);
+ }
+
+squirt_end:
+
+ if (ii->pathname) {
+ close(sfd);
+ }
+ free(dliop);
+}
+
+
+/*
+ * See if we need to reload the download image in core
+ *
+ */
+void consider_file_rescan(struct image_info *ii)
+{
+ int sfd ;
+ int len ;
+ struct stat sb;
+
+ /* This operation only makes sense when we're working from a file */
+
+ if (ii->pathname) {
+
+ sfd = open (ii->pathname, O_RDONLY) ;
+ if (sfd < 0 ) {
+ myperror(ii->pathname);
+ exit(1) ;
+ }
+
+ if( fstat(sfd,&sb) == -1 ) {
+ myperror(ii->pathname);
+ exit(1);
+ }
+
+ /* If the file hasn't changed since we last did this,
+ * and we have not done a free() on the image, bail
+ */
+ if (ii->image && (sb.st_mtime == ii->mtime))
+ goto end_rescan;
+
+ ii->len = len = sb.st_size ;
+
+ /* Record the timestamp of the file */
+ ii->mtime = sb.st_mtime;
+
+ /* image should be NULL unless there is an image malloced
+ * in already. Before we malloc again, make sure we don't
+ * have a memory leak.
+ */
+ if ( ii->image ) {
+ free( ii->image );
+ /* ii->image = NULL; */ /* not necessary */
+ }
+
+ /* This image will be kept only long enough for the
+ * download to happen. After sending the last block,
+ * it will be freed
+ */
+ ii->image = malloc(len) ;
+
+ if (ii->image == NULL) {
+ fprintf(stderr,
+ "%s: can't get %d bytes of memory; aborting\n",
+ pgm, len);
+ exit (1);
+ }
+
+ if (read(sfd, ii->image, len) < len) {
+ fprintf(stderr,"%s: read error on %s; aborting\n",
+ pgm, ii->pathname);
+ exit (1);
+ }
+
+end_rescan:
+ close(sfd);
+
+ }
+}
+
+/*
+ * Scan for images to match the driver requests
+ */
+
+struct image_info * find_conc_image()
+{
+ int x ;
+ struct image_info *i = NULL ;
+
+ for ( x = 0; x < nimages; x++ ) {
+ i=&image_list[x];
+
+ if(i->type != ICONC)
+ continue;
+
+ consider_file_rescan(i) ;
+
+ ip = (struct downld_t *) image_list[x].image;
+ if (ip == NULL) continue;
+
+ /*
+ * When I removed Clusterport, I kept only the code that I
+ * was SURE wasn't ClusterPort. We may not need the next two
+ * lines of code.
+ */
+ if ((dp->dl_type != 'P' ) && ( ip->dl_srev == dp->dl_srev ))
+ return i;
+ }
+ return NULL ;
+}
+
+
+int main(int argc, char **argv)
+{
+ struct downldio dlio;
+ int offset, bsize;
+ int x;
+ char *down, *image, *fname;
+ struct image_info *ii;
+
+ pgm = argv[0];
+ dp = &dlio.image.dl; /* conc. download */
+
+ while((argc > 2) && !strcmp(argv[1],"-d")) {
+ debugflag++ ;
+ argc-- ;
+ argv++ ;
+ }
+
+ if(argc < 2) {
+ fprintf(stderr,
+ "usage: %s download-device [image-file] ...\n",
+ pgm);
+ exit(1);
+ }
+
+
+
+ /*
+ * Daemonize, unless debugging is turned on.
+ */
+ if (debugflag == 0) {
+ switch (fork())
+ {
+ case 0:
+ break;
+
+ case -1:
+ return 1;
+
+ default:
+ return 0;
+ }
+
+ setsid();
+
+ /*
+ * The child no longer needs "stdin", "stdout", or "stderr",
+ * and should not block processes waiting for them to close.
+ */
+ fclose(stdin);
+ fclose(stdout);
+ fclose(stderr);
+
+ }
+
+ while (1) {
+ if( (fd = open(argv[1], O_RDWR)) == -1 ) {
+ sleep(1);
+ }
+ else
+ break;
+ }
+
+ /*
+ ** create a list of images to search through when trying to match
+ ** requests from the driver. Put images from the command line in
+ ** the list before built in images so that the command line images
+ ** can override the built in ones.
+ */
+
+ /* allocate space for the list */
+
+ nimages = argc - 2;
+
+ /* count the number of default list entries */
+
+ for (count = 0; images[count].type != IBAD; ++count) ;
+
+ nimages += count;
+
+ /* Really should just remove the variable "image_list".... robertl */
+ image_list = images ;
+
+ /* get the images from the command line */
+ for(x = 2; x < argc; x++) {
+ int xx;
+
+ /*
+ * strip off any leading path information for
+ * determining file type
+ */
+ if( (fname = strrchr(argv[x],'/')) == NULL)
+ fname = argv[x];
+ else
+ fname++; /* skip the slash */
+
+ for (xx = 0; xx < count; xx++) {
+ if (strcmp(fname, images[xx].fname) == 0 ) {
+ images[xx].pathname = argv[x];
+
+ /* image should be NULL until */
+ /* space is malloced */
+ images[xx].image = NULL ;
+ }
+ }
+ }
+
+ sleep(3);
+
+ /*
+ ** Endless loop: get a request from the fep, and service that request.
+ */
+ for(;;) {
+ /* get the request */
+ if (debugflag)
+ printf("b4 get ioctl...");
+
+ if (ioctl(fd,DIGI_DLREQ_GET, &dlio) == -1 ) {
+ if (errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint = 0;
+ }
+ sleep(2);
+ } else {
+ if (debugflag)
+ printf("dlio.req_type is %d bd %d\n",
+ dlio.req_type,dlio.bdid);
+
+ switch(dlio.req_type) {
+ case DLREQ_BIOS:
+ /*
+ ** find the bios image for this type
+ */
+ for ( x = 0; x < nimages; x++ ) {
+ if(image_list[x].type != IBIOS)
+ continue;
+
+ if ((dlio.image.fi.type & FAMILY) ==
+ image_list[x].family) {
+
+ if ( image_list[x].family == T_CX ) {
+ if ((dlio.image.fi.type & BUSTYPE)
+ == T_PCIBUS ) {
+ if ( image_list[x].subtype
+ == T_PCIBUS )
+ break;
+ }
+ else {
+ break;
+ }
+ }
+ else if ( image_list[x].family == T_EPC ) {
+ /* If subtype of image is T_PCIBUS, it is */
+ /* a PCI EPC image, so the board must */
+ /* have bus type T_PCIBUS to match */
+ if ((dlio.image.fi.type & BUSTYPE)
+ == T_PCIBUS ) {
+ if ( image_list[x].subtype
+ == T_PCIBUS )
+ break;
+ }
+ else {
+ /* NON PCI EPC doesn't use PCI image */
+ if ( image_list[x].subtype
+ != T_PCIBUS )
+ break;
+ }
+ }
+ else
+ break;
+ }
+ else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
+ /* PCXR board will break out of the loop here */
+ if ( image_list[x].subtype == T_PCXR ) {
+ break;
+ }
+ }
+ }
+
+ if ( x >= nimages) {
+ /*
+ ** no valid images exist
+ */
+ if(nodldprint) {
+ fprintf(stderr,
+ "%s: cannot find correct BIOS image\n",
+ pgm);
+ nodldprint = 0;
+ }
+ dlio.image.fi.type = -1;
+ if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1) {
+ if (errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint = 0;
+ }
+ sleep(2);
+ }
+ break;
+ }
+ squirt(dlio.req_type, dlio.bdid, &image_list[x]);
+ break ;
+
+ case DLREQ_FEP:
+ /*
+ ** find the fep image for this type
+ */
+ for ( x = 0; x < nimages; x++ ) {
+ if(image_list[x].type != IFEP)
+ continue;
+ if( (dlio.image.fi.type & FAMILY) ==
+ image_list[x].family ) {
+ if ( image_list[x].family == T_CX ) {
+ /* C/X PCI board */
+ if ((dlio.image.fi.type & BUSTYPE)
+ == T_PCIBUS ) {
+ if ( image_list[x].subtype
+ == T_PCIBUS )
+ break;
+ }
+ else {
+ /* Regular CX */
+ break;
+ }
+ }
+ else if ( image_list[x].family == T_EPC ) {
+ /* If subtype of image is T_PCIBUS, it is */
+ /* a PCI EPC image, so the board must */
+ /* have bus type T_PCIBUS to match */
+ if ((dlio.image.fi.type & BUSTYPE)
+ == T_PCIBUS ) {
+ if ( image_list[x].subtype
+ == T_PCIBUS )
+ break;
+ }
+ else {
+ /* NON PCI EPC doesn't use PCI image */
+ if ( image_list[x].subtype
+ != T_PCIBUS )
+ break;
+ }
+ }
+ else
+ break;
+ }
+ else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
+ /* PCXR board will break out of the loop here */
+ if ( image_list[x].subtype == T_PCXR ) {
+ break;
+ }
+ }
+ }
+
+ if ( x >= nimages) {
+ /*
+ ** no valid images exist
+ */
+ if(nodldprint) {
+ fprintf(stderr,
+ "%s: cannot find correct FEP image\n",
+ pgm);
+ nodldprint = 0;
+ }
+ dlio.image.fi.type=-1;
+ if( ioctl(fd,DIGI_DLREQ_SET,&dlio) == -1 ) {
+ if(errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint=0;
+ }
+ sleep(2);
+ }
+ break;
+ }
+ squirt(dlio.req_type, dlio.bdid, &image_list[x]);
+ break;
+
+ case DLREQ_DEVCREATE:
+ {
+ char string[1024];
+#if 0
+ sprintf(string, "%s /proc/dgap/%d/mknod", DEFSHELL, dlio.bdid);
+#endif
+ sprintf(string, "%s /usr/sbin/dgap_updatedevs %d", DEFSHELL, dlio.bdid);
+ system(string);
+
+ if (debugflag)
+ printf("Created Devices.\n");
+ if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
+ if(errorprint) {
+ fprintf(stderr, "%s: warning - DEVCREATE ioctl failed\n",pgm);
+ errorprint = 0;
+ }
+ sleep(2);
+ }
+ if (debugflag)
+ printf("After ioctl set - Created Device.\n");
+ }
+
+ break;
+
+ case DLREQ_CONFIG:
+ for ( x = 0; x < nimages; x++ ) {
+ if(image_list[x].type != ICONFIG)
+ continue;
+ else
+ break;
+ }
+
+ if ( x >= nimages) {
+ /*
+ ** no valid images exist
+ */
+ if(nodldprint) {
+ fprintf(stderr,
+ "%s: cannot find correct CONFIG image\n",
+ pgm);
+ nodldprint = 0;
+ }
+ dlio.image.fi.type=-1;
+ if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
+ if(errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint=0;
+ }
+ sleep(2);
+ }
+ break;
+ }
+
+ squirt(dlio.req_type, dlio.bdid, &image_list[x]);
+ break;
+
+ case DLREQ_CONC:
+ /*
+ ** find the image needed for this download
+ */
+ if ( dp->dl_seq == 0 ) {
+ /*
+ ** find image for hardware rev range
+ */
+ for ( x = 0; x < nimages; x++ ) {
+ ii=&image_list[x];
+
+ if(image_list[x].type != ICONC)
+ continue;
+
+ consider_file_rescan(ii) ;
+
+ ip = (struct downld_t *) image_list[x].image;
+ if (ip == NULL) continue;
+
+ /*
+ * When I removed Clusterport, I kept only the
+ * code that I was SURE wasn't ClusterPort.
+ * We may not need the next four lines of code.
+ */
+
+ if ((dp->dl_type != 'P' ) &&
+ (ip->dl_lrev <= dp->dl_lrev ) &&
+ ( dp->dl_lrev <= ip->dl_hrev))
+ break;
+ }
+
+ if ( x >= nimages ) {
+ /*
+ ** No valid images exist
+ */
+ if(nodldprint) {
+ fprintf(stderr,
+ "%s: cannot find correct download image %d\n",
+ pgm, dp->dl_lrev);
+ nodldprint=0;
+ }
+ continue;
+ }
+
+ } else {
+ /*
+ ** find image version required
+ */
+ if ((ii = find_conc_image()) == NULL ) {
+ /*
+ ** No valid images exist
+ */
+ fprintf(stderr,
+ "%s: can't find rest of download image??\n",
+ pgm);
+ continue;
+ }
+ }
+
+ /*
+ ** download block of image
+ */
+
+ offset = 1024 * dp->dl_seq;
+
+ /*
+ ** test if block requested within image
+ */
+ if ( offset < ii->len ) {
+
+ /*
+ ** if it is, determine block size, set segment,
+ ** set size, set pointers, and copy block
+ */
+ if (( bsize = ii->len - offset ) > 1024 )
+ bsize = 1024;
+
+ /*
+ ** copy image version info to download area
+ */
+ dp->dl_srev = ip->dl_srev;
+ dp->dl_lrev = ip->dl_lrev;
+ dp->dl_hrev = ip->dl_hrev;
+
+ dp->dl_seg = (64 * dp->dl_seq) + ip->dl_seg;
+ dp->dl_size = bsize;
+
+ down = (char *)&dp->dl_data[0];
+ image = (char *)((char *)ip + offset);
+
+ memcpy(down, image, bsize);
+ }
+ else {
+ /*
+ ** Image has been downloaded, set segment and
+ ** size to indicate no more blocks
+ */
+ dp->dl_seg = ip->dl_seg;
+ dp->dl_size = 0;
+
+ /* Now, we can release the concentrator */
+ /* image from memory if we're running */
+ /* from filesystem images */
+
+ if (ii->pathname)
+ if (ii->image) {
+ free(ii->image);
+ ii->image = NULL ;
+ }
+ }
+
+ if (debugflag)
+ printf(
+ "sending conc dl section %d to %s from %s\n",
+ dp->dl_seq, ii->name,
+ ii->pathname ? ii->pathname : "Internal Image");
+
+ if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
+ if (errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint=0;
+ }
+ sleep(2);
+ }
+ break;
+ } /* switch */
+ }
+ if (debugflag > 1) {
+ printf("pausing: "); fflush(stdout);
+ fflush(stdin);
+ while(getchar() != '\n');
+ printf("continuing\n");
+ }
+ }
+}
+
+/*
+** myperror()
+**
+** Same as normal perror(), but places the program name at the begining
+** of the message.
+*/
+void myperror(char *s)
+{
+ fprintf(stderr,"%s: %s: %s.\n",pgm, s, strerror(errno));
+}
diff --git a/drivers/staging/dgnc/Kconfig b/drivers/staging/dgnc/Kconfig
new file mode 100644
index 0000000..032c2a7
--- /dev/null
+++ b/drivers/staging/dgnc/Kconfig
@@ -0,0 +1,6 @@
+config DGNC
+ tristate "Digi Neo and Classic PCI Products"
+ default n
+ depends on TTY && PCI
+ ---help---
+ Driver for the Digi International Neo and Classic PCI based product line.
diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile
new file mode 100644
index 0000000..888c433
--- /dev/null
+++ b/drivers/staging/dgnc/Makefile
@@ -0,0 +1,7 @@
+EXTRA_CFLAGS += -DDG_NAME=\"dgnc-1.3-16\" -DDG_PART=\"40002369_F\"
+
+obj-$(CONFIG_DGNC) += dgnc.o
+
+dgnc-objs := dgnc_cls.o dgnc_driver.o\
+ dgnc_mgmt.o dgnc_neo.o\
+ dgnc_trace.o dgnc_tty.o dgnc_sysfs.o
diff --git a/drivers/staging/dgnc/TODO b/drivers/staging/dgnc/TODO
new file mode 100644
index 0000000..1ff2d18
--- /dev/null
+++ b/drivers/staging/dgnc/TODO
@@ -0,0 +1,17 @@
+* remove kzalloc casts
+* checkpatch fixes
+* sparse fixes
+* fix use of sizeof(). Example replace sizeof(struct board_t)
+ with sizeof(*brd) and remove sizeof(char)
+* change name of board_t to dgnc_board
+* split two assignments into the two assignments on two lines;
+ don't use two equals signs
+* remove unecessary comments
+* remove unecessary error messages. Example kzalloc() has its
+ own error message. Adding an extra one is useless.
+* use goto statements for error handling when appropriate
+* there is a lot of unecessary code in the driver. It was
+ originally a standalone driver. Remove uneeded code.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+Cc: Lidza Louina <lidza.louina@gmail.com>
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
new file mode 100644
index 0000000..117e158
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -0,0 +1,1409 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/delay.h> /* For udelay */
+#include <asm/io.h> /* For read[bwl]/write[bwl] */
+#include <linux/serial.h> /* For struct async_serial */
+#include <linux/serial_reg.h> /* For the various UART offsets */
+#include <linux/pci.h>
+
+#include "dgnc_driver.h" /* Driver main header file */
+#include "dgnc_cls.h"
+#include "dgnc_tty.h"
+#include "dgnc_trace.h"
+
+static inline void cls_parse_isr(struct board_t *brd, uint port);
+static inline void cls_clear_break(struct channel_t *ch, int force);
+static inline void cls_set_cts_flow_control(struct channel_t *ch);
+static inline void cls_set_rts_flow_control(struct channel_t *ch);
+static inline void cls_set_ixon_flow_control(struct channel_t *ch);
+static inline void cls_set_ixoff_flow_control(struct channel_t *ch);
+static inline void cls_set_no_output_flow_control(struct channel_t *ch);
+static inline void cls_set_no_input_flow_control(struct channel_t *ch);
+static void cls_parse_modem(struct channel_t *ch, uchar signals);
+static void cls_tasklet(unsigned long data);
+static void cls_vpd(struct board_t *brd);
+static void cls_uart_init(struct channel_t *ch);
+static void cls_uart_off(struct channel_t *ch);
+static int cls_drain(struct tty_struct *tty, uint seconds);
+static void cls_param(struct tty_struct *tty);
+static void cls_assert_modem_signals(struct channel_t *ch);
+static void cls_flush_uart_write(struct channel_t *ch);
+static void cls_flush_uart_read(struct channel_t *ch);
+static void cls_disable_receiver(struct channel_t *ch);
+static void cls_enable_receiver(struct channel_t *ch);
+static void cls_send_break(struct channel_t *ch, int msecs);
+static void cls_send_start_character(struct channel_t *ch);
+static void cls_send_stop_character(struct channel_t *ch);
+static void cls_copy_data_from_uart_to_queue(struct channel_t *ch);
+static void cls_copy_data_from_queue_to_uart(struct channel_t *ch);
+static uint cls_get_uart_bytes_left(struct channel_t *ch);
+static void cls_send_immediate_char(struct channel_t *ch, unsigned char);
+static irqreturn_t cls_intr(int irq, void *voidbrd);
+
+struct board_ops dgnc_cls_ops = {
+ .tasklet = cls_tasklet,
+ .intr = cls_intr,
+ .uart_init = cls_uart_init,
+ .uart_off = cls_uart_off,
+ .drain = cls_drain,
+ .param = cls_param,
+ .vpd = cls_vpd,
+ .assert_modem_signals = cls_assert_modem_signals,
+ .flush_uart_write = cls_flush_uart_write,
+ .flush_uart_read = cls_flush_uart_read,
+ .disable_receiver = cls_disable_receiver,
+ .enable_receiver = cls_enable_receiver,
+ .send_break = cls_send_break,
+ .send_start_character = cls_send_start_character,
+ .send_stop_character = cls_send_stop_character,
+ .copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart,
+ .get_uart_bytes_left = cls_get_uart_bytes_left,
+ .send_immediate_char = cls_send_immediate_char
+};
+
+
+static inline void cls_set_cts_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Setting CTSFLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on CTS flow control, turn off IXON flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_CTSDSR);
+ isr_fcr &= ~(UART_EXAR654_EFR_IXON);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Enable interrupts for CTS flow, turn off interrupts for received XOFF chars */
+ ier |= (UART_EXAR654_IER_CTSDSR);
+ ier &= ~(UART_EXAR654_IER_XOFF);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_t_tlevel = 16;
+
+}
+
+
+static inline void cls_set_ixon_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Setting IXON FLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on IXON flow control, turn off CTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXON);
+ isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Now set our current start/stop chars while in enhanced mode */
+ writeb(ch->ch_startc, &ch->ch_cls_uart->mcr);
+ writeb(0, &ch->ch_cls_uart->lsr);
+ writeb(ch->ch_stopc, &ch->ch_cls_uart->msr);
+ writeb(0, &ch->ch_cls_uart->spr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for CTS flow, turn on interrupts for received XOFF chars */
+ ier &= ~(UART_EXAR654_IER_CTSDSR);
+ ier |= (UART_EXAR654_IER_XOFF);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+}
+
+
+static inline void cls_set_no_output_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Unsetting Output FLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn off IXON flow control, turn off CTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB);
+ isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR | UART_EXAR654_EFR_IXON);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for CTS flow, turn off interrupts for received XOFF chars */
+ ier &= ~(UART_EXAR654_IER_CTSDSR);
+ ier &= ~(UART_EXAR654_IER_XOFF);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_r_watermark = 0;
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
+
+}
+
+
+static inline void cls_set_rts_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Setting RTSFLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on RTS flow control, turn off IXOFF flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_RTSDTR);
+ isr_fcr &= ~(UART_EXAR654_EFR_IXOFF);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Enable interrupts for RTS flow */
+ ier |= (UART_EXAR654_IER_RTSDTR);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+
+ ch->ch_r_watermark = 4;
+ ch->ch_r_tlevel = 8;
+
+}
+
+
+static inline void cls_set_ixoff_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Setting IXOFF FLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on IXOFF flow control, turn off RTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXOFF);
+ isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Now set our current start/stop chars while in enhanced mode */
+ writeb(ch->ch_startc, &ch->ch_cls_uart->mcr);
+ writeb(0, &ch->ch_cls_uart->lsr);
+ writeb(ch->ch_stopc, &ch->ch_cls_uart->msr);
+ writeb(0, &ch->ch_cls_uart->spr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for RTS flow */
+ ier &= ~(UART_EXAR654_IER_RTSDTR);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+}
+
+
+static inline void cls_set_no_input_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Unsetting Input FLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn off IXOFF flow control, turn off RTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB);
+ isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR | UART_EXAR654_EFR_IXOFF);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for RTS flow */
+ ier &= ~(UART_EXAR654_IER_RTSDTR);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
+
+}
+
+
+/*
+ * cls_clear_break.
+ * Determines whether its time to shut off break condition.
+ *
+ * No locks are assumed to be held when calling this function.
+ * channel lock is held and released in this function.
+ */
+static inline void cls_clear_break(struct channel_t *ch, int force)
+{
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Bail if we aren't currently sending a break. */
+ if (!ch->ch_stop_sending_break) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* Turn break off, and unset some variables */
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ if ((jiffies >= ch->ch_stop_sending_break) || force) {
+ uchar temp = readb(&ch->ch_cls_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ ch->ch_stop_sending_break = 0;
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ }
+ }
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+/* Parse the ISR register for the specific port */
+static inline void cls_parse_isr(struct board_t *brd, uint port)
+{
+ struct channel_t *ch;
+ uchar isr = 0;
+ ulong lock_flags;
+
+ /*
+ * No need to verify board pointer, it was already
+ * verified in the interrupt routine.
+ */
+
+ if (port > brd->nasync)
+ return;
+
+ ch = brd->channels[port];
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ /* Here we try to figure out what caused the interrupt to happen */
+ while (1) {
+
+ isr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Bail if no pending interrupt on port */
+ if (isr & UART_IIR_NO_INT) {
+ break;
+ }
+
+ DPR_INTR(("%s:%d port: %x isr: %x\n", __FILE__, __LINE__, port, isr));
+
+ /* Receive Interrupt pending */
+ if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) {
+ /* Read data from uart -> queue */
+ brd->intr_rx++;
+ ch->ch_intr_rx++;
+ cls_copy_data_from_uart_to_queue(ch);
+ dgnc_check_queue_flow_control(ch);
+ }
+
+ /* Transmit Hold register empty pending */
+ if (isr & UART_IIR_THRI) {
+ /* Transfer data (if any) from Write Queue -> UART. */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ brd->intr_tx++;
+ ch->ch_intr_tx++;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ cls_copy_data_from_queue_to_uart(ch);
+ }
+
+ /* Received Xoff signal/Special character */
+ if (isr & UART_IIR_XOFF) {
+ /* Empty */
+ }
+
+ /* CTS/RTS change of state */
+ if (isr & UART_IIR_CTSRTS) {
+ brd->intr_modem++;
+ ch->ch_intr_modem++;
+ /*
+ * Don't need to do anything, the cls_parse_modem
+ * below will grab the updated modem signals.
+ */
+ }
+
+ /* Parse any modem signal changes */
+ DPR_INTR(("MOD_STAT: sending to parse_modem_sigs\n"));
+ cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
+ }
+}
+
+
+/*
+ * cls_param()
+ * Send any/all changes to the line to the UART.
+ */
+static void cls_param(struct tty_struct *tty)
+{
+ uchar lcr = 0;
+ uchar uart_lcr = 0;
+ uchar ier = 0;
+ uchar uart_ier = 0;
+ uint baud = 9600;
+ int quot = 0;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return;
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return;
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
+ return;
+ }
+
+ DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
+ ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ ch->ch_r_head = ch->ch_r_tail = 0;
+ ch->ch_e_head = ch->ch_e_tail = 0;
+ ch->ch_w_head = ch->ch_w_tail = 0;
+
+ cls_flush_uart_write(ch);
+ cls_flush_uart_read(ch);
+
+ /* The baudrate is B0 so all modem lines are to be dropped. */
+ ch->ch_flags |= (CH_BAUD0);
+ ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
+ cls_assert_modem_signals(ch);
+ ch->ch_old_baud = 0;
+ return;
+ } else if (ch->ch_custom_speed) {
+
+ baud = ch->ch_custom_speed;
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+
+ /*
+ * Bring back up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+ }
+
+ } else {
+ int iindex = 0;
+ int jindex = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 131657, 153600, 230400, 460800,
+ 921600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /* Only use the TXPrint baud rate if the terminal unit is NOT open */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
+ baud = bauds[iindex][jindex];
+ } else {
+ DPR_IOCTL(("baud indices were out of range (%d)(%d)",
+ iindex, jindex));
+ baud = 0;
+ }
+
+ if (baud == 0)
+ baud = 9600;
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+
+ /*
+ * Bring back up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+ }
+ }
+
+ if (ch->ch_c_cflag & PARENB) {
+ lcr |= UART_LCR_PARITY;
+ }
+
+ if (!(ch->ch_c_cflag & PARODD)) {
+ lcr |= UART_LCR_EPAR;
+ }
+
+ /*
+ * Not all platforms support mark/space parity,
+ * so this will hide behind an ifdef.
+ */
+#ifdef CMSPAR
+ if (ch->ch_c_cflag & CMSPAR)
+ lcr |= UART_LCR_SPAR;
+#endif
+
+ if (ch->ch_c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+
+ switch (ch->ch_c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ case CS8:
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ ier = uart_ier = readb(&ch->ch_cls_uart->ier);
+ uart_lcr = readb(&ch->ch_cls_uart->lcr);
+
+ if (baud == 0)
+ baud = 9600;
+
+ quot = ch->ch_bd->bd_dividend / baud;
+
+ if (quot != 0 && ch->ch_old_baud != baud) {
+ ch->ch_old_baud = baud;
+ writeb(UART_LCR_DLAB, &ch->ch_cls_uart->lcr);
+ writeb((quot & 0xff), &ch->ch_cls_uart->txrx);
+ writeb((quot >> 8), &ch->ch_cls_uart->ier);
+ writeb(lcr, &ch->ch_cls_uart->lcr);
+ }
+
+ if (uart_lcr != lcr)
+ writeb(lcr, &ch->ch_cls_uart->lcr);
+
+ if (ch->ch_c_cflag & CREAD) {
+ ier |= (UART_IER_RDI | UART_IER_RLSI);
+ }
+ else {
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI);
+ }
+
+ /*
+ * Have the UART interrupt on modem signal changes ONLY when
+ * we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set.
+ */
+ if ((ch->ch_digi.digi_flags & CTSPACE) || (ch->ch_digi.digi_flags & RTSPACE) ||
+ (ch->ch_c_cflag & CRTSCTS) || !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
+ !(ch->ch_c_cflag & CLOCAL))
+ {
+ ier |= UART_IER_MSI;
+ }
+ else {
+ ier &= ~UART_IER_MSI;
+ }
+
+ ier |= UART_IER_THRI;
+
+ if (ier != uart_ier)
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ cls_set_cts_flow_control(ch);
+ }
+ else if (ch->ch_c_iflag & IXON) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ cls_set_no_output_flow_control(ch);
+ else
+ cls_set_ixon_flow_control(ch);
+ }
+ else {
+ cls_set_no_output_flow_control(ch);
+ }
+
+ if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ cls_set_rts_flow_control(ch);
+ }
+ else if (ch->ch_c_iflag & IXOFF) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ cls_set_no_input_flow_control(ch);
+ else
+ cls_set_ixoff_flow_control(ch);
+ }
+ else {
+ cls_set_no_input_flow_control(ch);
+ }
+
+ cls_assert_modem_signals(ch);
+
+ /* Get current status of the modem signals now */
+ cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
+}
+
+
+/*
+ * Our board poller function.
+ */
+static void cls_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ struct channel_t *ch;
+ ulong lock_flags;
+ int i;
+ int state = 0;
+ int ports = 0;
+
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
+ APR(("poll_tasklet() - NULL or bad bd.\n"));
+ return;
+ }
+
+ /* Cache a couple board values */
+ DGNC_LOCK(bd->bd_lock, lock_flags);
+ state = bd->state;
+ ports = bd->nasync;
+ DGNC_UNLOCK(bd->bd_lock, lock_flags);
+
+ /*
+ * Do NOT allow the interrupt routine to read the intr registers
+ * Until we release this lock.
+ */
+ DGNC_LOCK(bd->bd_intr_lock, lock_flags);
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if ((state == BOARD_READY) && (ports > 0)) {
+
+ /* Loop on each port */
+ for (i = 0; i < ports; i++) {
+ ch = bd->channels[i];
+ if (!ch)
+ continue;
+
+ /*
+ * NOTE: Remember you CANNOT hold any channel
+ * locks when calling input.
+ * During input processing, its possible we
+ * will call ld, which might do callbacks back
+ * into us.
+ */
+ dgnc_input(ch);
+
+ /*
+ * Channel lock is grabbed and then released
+ * inside this routine.
+ */
+ cls_copy_data_from_queue_to_uart(ch);
+ dgnc_wakeup_writes(ch);
+
+ /*
+ * Check carrier function.
+ */
+ dgnc_carrier(ch);
+
+ /*
+ * The timing check of turning off the break is done
+ * inside clear_break()
+ */
+ if (ch->ch_stop_sending_break)
+ cls_clear_break(ch, 0);
+ }
+ }
+
+ DGNC_UNLOCK(bd->bd_intr_lock, lock_flags);
+
+}
+
+
+/*
+ * cls_intr()
+ *
+ * Classic specific interrupt handler.
+ */
+static irqreturn_t cls_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = (struct board_t *) voidbrd;
+ uint i = 0;
+ uchar poll_reg;
+ unsigned long lock_flags;
+
+ if (!brd) {
+ APR(("Received interrupt (%d) with null board associated\n", irq));
+ return IRQ_NONE;
+ }
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGNC_BOARD_MAGIC) {
+ APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ return IRQ_NONE;
+ }
+
+ DGNC_LOCK(brd->bd_intr_lock, lock_flags);
+
+ brd->intr_count++;
+
+ /*
+ * Check the board's global interrupt offset to see if we
+ * we actually do have an interrupt pending for us.
+ */
+ poll_reg = readb(brd->re_map_membase + UART_CLASSIC_POLL_ADDR_OFFSET);
+
+ /* If 0, no interrupts pending */
+ if (!poll_reg) {
+ DPR_INTR(("Kernel interrupted to me, but no pending interrupts...\n"));
+ DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
+ return IRQ_NONE;
+ }
+
+ DPR_INTR(("%s:%d poll_reg: %x\n", __FILE__, __LINE__, poll_reg));
+
+ /* Parse each port to find out what caused the interrupt */
+ for (i = 0; i < brd->nasync; i++) {
+ cls_parse_isr(brd, i);
+ }
+
+ /*
+ * Schedule tasklet to more in-depth servicing at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+
+ DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
+
+ DPR_INTR(("dgnc_intr finish.\n"));
+ return IRQ_HANDLED;
+}
+
+
+static void cls_disable_receiver(struct channel_t *ch)
+{
+ uchar tmp = readb(&ch->ch_cls_uart->ier);
+ tmp &= ~(UART_IER_RDI);
+ writeb(tmp, &ch->ch_cls_uart->ier);
+}
+
+
+static void cls_enable_receiver(struct channel_t *ch)
+{
+ uchar tmp = readb(&ch->ch_cls_uart->ier);
+ tmp |= (UART_IER_RDI);
+ writeb(tmp, &ch->ch_cls_uart->ier);
+}
+
+
+static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
+{
+ int qleft = 0;
+ uchar linestatus = 0;
+ uchar error_mask = 0;
+ ushort head;
+ ushort tail;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* cache head and tail of queue */
+ head = ch->ch_r_head;
+ tail = ch->ch_r_tail;
+
+ /* Store how much space we have left in the queue */
+ if ((qleft = tail - head - 1) < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * Create a mask to determine whether we should
+ * insert the character (if any) into our queue.
+ */
+ if (ch->ch_c_iflag & IGNBRK)
+ error_mask |= UART_LSR_BI;
+
+ while (1) {
+ linestatus = readb(&ch->ch_cls_uart->lsr);
+
+ if (!(linestatus & (UART_LSR_DR)))
+ break;
+
+ /*
+ * Discard character if we are ignoring the error mask.
+ */
+ if (linestatus & error_mask) {
+ uchar discard;
+ linestatus = 0;
+ discard = readb(&ch->ch_cls_uart->txrx);
+ continue;
+ }
+
+ /*
+ * If our queue is full, we have no choice but to drop some data.
+ * The assumption is that HWFLOW or SWFLOW should have stopped
+ * things way way before we got to this point.
+ *
+ * I decided that I wanted to ditch the oldest data first,
+ * I hope thats okay with everyone? Yes? Good.
+ */
+ while (qleft < 1) {
+ DPR_READ(("Queue full, dropping DATA:%x LSR:%x\n",
+ ch->ch_rqueue[tail], ch->ch_equeue[tail]));
+
+ ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK;
+ ch->ch_err_overrun++;
+ qleft++;
+ }
+
+ ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE);
+ ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
+ dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, 1);
+
+ qleft--;
+
+ DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]));
+
+ if (ch->ch_equeue[head] & UART_LSR_PE)
+ ch->ch_err_parity++;
+ if (ch->ch_equeue[head] & UART_LSR_BI)
+ ch->ch_err_break++;
+ if (ch->ch_equeue[head] & UART_LSR_FE)
+ ch->ch_err_frame++;
+
+ /* Add to, and flip head if needed */
+ head = (head + 1) & RQUEUEMASK;
+ ch->ch_rxcount++;
+ }
+
+ /*
+ * Write new final heads to channel structure.
+ */
+ ch->ch_r_head = head & RQUEUEMASK;
+ ch->ch_e_head = head & EQUEUEMASK;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+/*
+ * This function basically goes to sleep for secs, or until
+ * it gets signalled that the port has fully drained.
+ */
+static int cls_drain(struct tty_struct *tty, uint seconds)
+{
+ ulong lock_flags;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * NOTE: Do something with time passed in.
+ */
+ rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (rc)
+ DPR_IOCTL(("%d Drain - User ctrl c'ed\n", __LINE__));
+
+ return (rc);
+}
+
+
+/* Channel lock MUST be held before calling this function! */
+static void cls_flush_uart_write(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr);
+ udelay(10);
+
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+}
+
+
+/* Channel lock MUST be held before calling this function! */
+static void cls_flush_uart_read(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ /*
+ * For complete POSIX compatibility, we should be purging the
+ * read FIFO in the UART here.
+ *
+ * However, doing the statement below also incorrectly flushes
+ * write data as well as just basically trashing the FIFO.
+ *
+ * I believe this is a BUG in this UART.
+ * So for now, we will leave the code #ifdef'ed out...
+ */
+#if 0
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr);
+#endif
+ udelay(10);
+}
+
+
+static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
+{
+ ushort head;
+ ushort tail;
+ int n;
+ int qlen;
+ uint len_written = 0;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* No data to write to the UART */
+ if (ch->ch_w_tail == ch->ch_w_head) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* If port is "stopped", don't send any data to the UART */
+ if ((ch->ch_flags & CH_FORCED_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ n = 32;
+
+ /* cache head and tail of queue */
+ head = ch->ch_w_head & WQUEUEMASK;
+ tail = ch->ch_w_tail & WQUEUEMASK;
+ qlen = (head - tail) & WQUEUEMASK;
+
+ /* Find minimum of the FIFO space, versus queue length */
+ n = min(n, qlen);
+
+ while (n > 0) {
+
+ /*
+ * If RTS Toggle mode is on, turn on RTS now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_RTS)) {
+ ch->ch_mostat |= (UART_MCR_RTS);
+ cls_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+
+ /*
+ * If DTR Toggle mode is on, turn on DTR now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_DTR)) {
+ ch->ch_mostat |= (UART_MCR_DTR);
+ cls_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+ writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx);
+ dgnc_sniff_nowait_nolock(ch, "UART WRITE", ch->ch_wqueue + ch->ch_w_tail, 1);
+ DPR_WRITE(("Tx data: %x\n", ch->ch_wqueue[ch->ch_w_tail]));
+ ch->ch_w_tail++;
+ ch->ch_w_tail &= WQUEUEMASK;
+ ch->ch_txcount++;
+ len_written++;
+ n--;
+ }
+
+ if (len_written > 0)
+ ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ return;
+}
+
+
+static void cls_parse_modem(struct channel_t *ch, uchar signals)
+{
+ volatile uchar msignals = signals;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_MSIGS(("cls_parse_modem: port: %d signals: %d\n", ch->ch_portnum, msignals));
+
+ /*
+ * Do altpin switching. Altpin switches DCD and DSR.
+ * This prolly breaks DSRPACE, so we should be more clever here.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
+ uchar mswap = signals;
+ if (mswap & UART_MSR_DDCD) {
+ msignals &= ~UART_MSR_DDCD;
+ msignals |= UART_MSR_DDSR;
+ }
+ if (mswap & UART_MSR_DDSR) {
+ msignals &= ~UART_MSR_DDSR;
+ msignals |= UART_MSR_DDCD;
+ }
+ if (mswap & UART_MSR_DCD) {
+ msignals &= ~UART_MSR_DCD;
+ msignals |= UART_MSR_DSR;
+ }
+ if (mswap & UART_MSR_DSR) {
+ msignals &= ~UART_MSR_DSR;
+ msignals |= UART_MSR_DCD;
+ }
+ }
+
+ /* Scrub off lower bits. They signify delta's, which I don't care about */
+ signals &= 0xf0;
+
+ if (msignals & UART_MSR_DCD)
+ ch->ch_mistat |= UART_MSR_DCD;
+ else
+ ch->ch_mistat &= ~UART_MSR_DCD;
+
+ if (msignals & UART_MSR_DSR)
+ ch->ch_mistat |= UART_MSR_DSR;
+ else
+ ch->ch_mistat &= ~UART_MSR_DSR;
+
+ if (msignals & UART_MSR_RI)
+ ch->ch_mistat |= UART_MSR_RI;
+ else
+ ch->ch_mistat &= ~UART_MSR_RI;
+
+ if (msignals & UART_MSR_CTS)
+ ch->ch_mistat |= UART_MSR_CTS;
+ else
+ ch->ch_mistat &= ~UART_MSR_CTS;
+
+
+ DPR_MSIGS(("Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
+ ch->ch_portnum,
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)));
+}
+
+
+/* Make the UART raise any of the output signals we want up */
+static void cls_assert_modem_signals(struct channel_t *ch)
+{
+ uchar out;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ out = ch->ch_mostat;
+
+ if (ch->ch_flags & CH_LOOPBACK)
+ out |= UART_MCR_LOOP;
+
+ writeb(out, &ch->ch_cls_uart->mcr);
+
+ /* Give time for the UART to actually drop the signals */
+ udelay(10);
+}
+
+
+static void cls_send_start_character(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ if (ch->ch_startc != _POSIX_VDISABLE) {
+ ch->ch_xon_sends++;
+ writeb(ch->ch_startc, &ch->ch_cls_uart->txrx);
+ }
+}
+
+
+static void cls_send_stop_character(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ if (ch->ch_stopc != _POSIX_VDISABLE) {
+ ch->ch_xoff_sends++;
+ writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx);
+ }
+}
+
+
+/* Inits UART */
+static void cls_uart_init(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar isr_fcr = 0;
+
+ writeb(0, &ch->ch_cls_uart->ier);
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on Enhanced/Extended controls */
+ isr_fcr |= (UART_EXAR654_EFR_ECB);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Clear out UART and FIFO */
+ readb(&ch->ch_cls_uart->txrx);
+
+ writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr);
+ udelay(10);
+
+ ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+
+ readb(&ch->ch_cls_uart->lsr);
+ readb(&ch->ch_cls_uart->msr);
+}
+
+
+/*
+ * Turns off UART.
+ */
+static void cls_uart_off(struct channel_t *ch)
+{
+ writeb(0, &ch->ch_cls_uart->ier);
+}
+
+
+/*
+ * cls_get_uarts_bytes_left.
+ * Returns 0 is nothing left in the FIFO, returns 1 otherwise.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static uint cls_get_uart_bytes_left(struct channel_t *ch)
+{
+ uchar left = 0;
+ uchar lsr = 0;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return 0;
+
+ lsr = readb(&ch->ch_cls_uart->lsr);
+
+ /* Determine whether the Transmitter is empty or not */
+ if (!(lsr & UART_LSR_TEMT)) {
+ if (ch->ch_flags & CH_TX_FIFO_EMPTY) {
+ tasklet_schedule(&ch->ch_bd->helper_tasklet);
+ }
+ left = 1;
+ }
+ else {
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ left = 0;
+ }
+
+ return left;
+}
+
+
+/*
+ * cls_send_break.
+ * Starts sending a break thru the UART.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static void cls_send_break(struct channel_t *ch, int msecs)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * If we receive a time of 0, this means turn off the break.
+ */
+ if (msecs == 0) {
+ /* Turn break off, and unset some variables */
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ uchar temp = readb(&ch->ch_cls_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ ch->ch_stop_sending_break = 0;
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ }
+ return;
+ }
+
+ /*
+ * Set the time we should stop sending the break.
+ * If we are already sending a break, toss away the existing
+ * time to stop, and use this new value instead.
+ */
+ ch->ch_stop_sending_break = jiffies + dgnc_jiffies_from_ms(msecs);
+
+ /* Tell the UART to start sending the break */
+ if (!(ch->ch_flags & CH_BREAK_SENDING)) {
+ uchar temp = readb(&ch->ch_cls_uart->lcr);
+ writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+ ch->ch_flags |= (CH_BREAK_SENDING);
+ DPR_IOCTL(("Port %d. Starting UART_LCR_SBC! start: %lx should end: %lx\n",
+ ch->ch_portnum, jiffies, ch->ch_stop_sending_break));
+ }
+}
+
+
+/*
+ * cls_send_immediate_char.
+ * Sends a specific character as soon as possible to the UART,
+ * jumping over any bytes that might be in the write queue.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static void cls_send_immediate_char(struct channel_t *ch, unsigned char c)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ writeb(c, &ch->ch_cls_uart->txrx);
+}
+
+static void cls_vpd(struct board_t *brd)
+{
+ ulong vpdbase; /* Start of io base of the card */
+ u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */
+ int i = 0;
+
+
+ vpdbase = pci_resource_start(brd->pdev, 3);
+
+ /* No VPD */
+ if (!vpdbase)
+ return;
+
+ re_map_vpdbase = ioremap(vpdbase, 0x400);
+
+ if (!re_map_vpdbase)
+ return;
+
+ /* Store the VPD into our buffer */
+ for (i = 0; i < 0x40; i++) {
+ brd->vpd[i] = readb(re_map_vpdbase + i);
+ printk("%x ", brd->vpd[i]);
+ }
+ printk("\n");
+
+ if (re_map_vpdbase)
+ iounmap(re_map_vpdbase);
+}
+
diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h
new file mode 100644
index 0000000..ffe8535
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_cls.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ */
+
+#ifndef __DGNC_CLS_H
+#define __DGNC_CLS_H
+
+#include "dgnc_types.h"
+
+
+/************************************************************************
+ * Per channel/port Classic UART structure *
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * U = Unused. *
+ ************************************************************************/
+
+struct cls_uart_struct {
+ u8 txrx; /* WR RHR/THR - Holding Reg */
+ u8 ier; /* WR IER - Interrupt Enable Reg */
+ u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */
+ u8 lcr; /* WR LCR - Line Control Reg */
+ u8 mcr; /* WR MCR - Modem Control Reg */
+ u8 lsr; /* WR LSR - Line Status Reg */
+ u8 msr; /* WR MSR - Modem Status Reg */
+ u8 spr; /* WR SPR - Scratch Pad Reg */
+};
+
+/* Where to read the interrupt register (8bits) */
+#define UART_CLASSIC_POLL_ADDR_OFFSET 0x40
+
+#define UART_EXAR654_ENHANCED_REGISTER_SET 0xBF
+
+#define UART_16654_FCR_TXTRIGGER_8 0x0
+#define UART_16654_FCR_TXTRIGGER_16 0x10
+#define UART_16654_FCR_TXTRIGGER_32 0x20
+#define UART_16654_FCR_TXTRIGGER_56 0x30
+
+#define UART_16654_FCR_RXTRIGGER_8 0x0
+#define UART_16654_FCR_RXTRIGGER_16 0x40
+#define UART_16654_FCR_RXTRIGGER_56 0x80
+#define UART_16654_FCR_RXTRIGGER_60 0xC0
+
+#define UART_IIR_XOFF 0x10 /* Received Xoff signal/Special character */
+#define UART_IIR_CTSRTS 0x20 /* Received CTS/RTS change of state */
+#define UART_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
+
+/*
+ * These are the EXTENDED definitions for the Exar 654's Interrupt
+ * Enable Register.
+ */
+#define UART_EXAR654_EFR_ECB 0x10 /* Enhanced control bit */
+#define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
+#define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
+#define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
+#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+
+#define UART_EXAR654_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */
+#define UART_EXAR654_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */
+
+#define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */
+#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */
+#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */
+
+/*
+ * Our Global Variables
+ */
+extern struct board_ops dgnc_cls_ops;
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
new file mode 100644
index 0000000..71d2b83
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -0,0 +1,958 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+#include <linux/sched.h>
+#endif
+
+#include "dgnc_driver.h"
+#include "dgnc_pci.h"
+#include "dpacompat.h"
+#include "dgnc_mgmt.h"
+#include "dgnc_tty.h"
+#include "dgnc_trace.h"
+#include "dgnc_cls.h"
+#include "dgnc_neo.h"
+#include "dgnc_sysfs.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Digi International, http://www.digi.com");
+MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
+MODULE_SUPPORTED_DEVICE("dgnc");
+
+/*
+ * insmod command line overrideable parameters
+ *
+ * NOTE: we use a set of macros to create the variables, which allows
+ * us to specify the variable type, name, initial value, and description.
+ */
+PARM_INT(debug, 0x00, 0644, "Driver debugging level");
+PARM_INT(rawreadok, 1, 0644, "Bypass flip buffers on input");
+PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
+
+/**************************************************************************
+ *
+ * protos for this file
+ *
+ */
+static int dgnc_start(void);
+static int dgnc_finalize_board_init(struct board_t *brd);
+static void dgnc_init_globals(void);
+static int dgnc_found_board(struct pci_dev *pdev, int id);
+static void dgnc_cleanup_board(struct board_t *brd);
+static void dgnc_poll_handler(ulong dummy);
+static int dgnc_init_pci(void);
+static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void dgnc_remove_one(struct pci_dev *dev);
+static int dgnc_probe1(struct pci_dev *pdev, int card_type);
+static void dgnc_do_remap(struct board_t *brd);
+
+/* Driver load/unload functions */
+int dgnc_init_module(void);
+void dgnc_cleanup_module(void);
+
+module_init(dgnc_init_module);
+module_exit(dgnc_cleanup_module);
+
+
+/*
+ * File operations permitted on Control/Management major.
+ */
+static struct file_operations dgnc_BoardFops =
+{
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dgnc_mgmt_ioctl,
+ .open = dgnc_mgmt_open,
+ .release = dgnc_mgmt_close
+};
+
+
+/*
+ * Globals
+ */
+uint dgnc_NumBoards;
+struct board_t *dgnc_Board[MAXBOARDS];
+DEFINE_SPINLOCK(dgnc_global_lock);
+int dgnc_driver_state = DRIVER_INITIALIZED;
+ulong dgnc_poll_counter;
+uint dgnc_Major;
+int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
+
+/*
+ * Static vars.
+ */
+static uint dgnc_Major_Control_Registered = FALSE;
+static uint dgnc_driver_start = FALSE;
+
+static struct class *dgnc_class;
+
+/*
+ * Poller stuff
+ */
+static DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
+static ulong dgnc_poll_time; /* Time of next poll */
+static uint dgnc_poll_stop; /* Used to tell poller to stop */
+static struct timer_list dgnc_poll_timer;
+
+
+static struct pci_device_id dgnc_pci_tbl[] = {
+ { DIGI_VID, PCI_DEVICE_CLASSIC_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { DIGI_VID, PCI_DEVICE_CLASSIC_4_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { DIGI_VID, PCI_DEVICE_CLASSIC_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { DIGI_VID, PCI_DEVICE_CLASSIC_8_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { DIGI_VID, PCI_DEVICE_NEO_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { DIGI_VID, PCI_DEVICE_NEO_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { DIGI_VID, PCI_DEVICE_NEO_2DB9_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { DIGI_VID, PCI_DEVICE_NEO_2DB9PRI_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { DIGI_VID, PCI_DEVICE_NEO_2RJ45_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { DIGI_VID, PCI_DEVICE_NEO_2RJ45PRI_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { DIGI_VID, PCI_DEVICE_NEO_1_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { DIGI_VID, PCI_DEVICE_NEO_1_422_485_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { DIGI_VID, PCI_DEVICE_NEO_2_422_485_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { DIGI_VID, PCI_DEVICE_NEO_EXPRESS_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
+ { DIGI_VID, PCI_DEVICE_NEO_EXPRESS_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
+ { DIGI_VID, PCI_DEVICE_NEO_EXPRESS_4RJ45_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
+ { DIGI_VID, PCI_DEVICE_NEO_EXPRESS_8RJ45_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
+ {0,} /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, dgnc_pci_tbl);
+
+struct board_id {
+ uchar *name;
+ uint maxports;
+ unsigned int is_pci_express;
+};
+
+static struct board_id dgnc_Ids[] =
+{
+ { PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
+ { PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
+ { PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
+ { PCI_DEVICE_CLASSIC_8_422_PCI_NAME, 8, 0 },
+ { PCI_DEVICE_NEO_4_PCI_NAME, 4, 0 },
+ { PCI_DEVICE_NEO_8_PCI_NAME, 8, 0 },
+ { PCI_DEVICE_NEO_2DB9_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_2DB9PRI_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_2RJ45_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_1_422_PCI_NAME, 1, 0 },
+ { PCI_DEVICE_NEO_1_422_485_PCI_NAME, 1, 0 },
+ { PCI_DEVICE_NEO_2_422_485_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_EXPRESS_8_PCI_NAME, 8, 1 },
+ { PCI_DEVICE_NEO_EXPRESS_4_PCI_NAME, 4, 1 },
+ { PCI_DEVICE_NEO_EXPRESS_4RJ45_PCI_NAME, 4, 1 },
+ { PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME, 8, 1 },
+ { NULL, 0, 0 }
+};
+
+static struct pci_driver dgnc_driver = {
+ .name = "dgnc",
+ .probe = dgnc_init_one,
+ .id_table = dgnc_pci_tbl,
+ .remove = dgnc_remove_one,
+};
+
+
+char *dgnc_state_text[] = {
+ "Board Failed",
+ "Board Found",
+ "Board READY",
+};
+
+char *dgnc_driver_state_text[] = {
+ "Driver Initialized",
+ "Driver Ready."
+};
+
+
+
+/************************************************************************
+ *
+ * Driver load/unload functions
+ *
+ ************************************************************************/
+
+
+/*
+ * init_module()
+ *
+ * Module load. This is where it all starts.
+ */
+int dgnc_init_module(void)
+{
+ int rc = 0;
+
+ APR(("%s, Digi International Part Number %s\n", DG_NAME, DG_PART));
+
+ /*
+ * Initialize global stuff
+ */
+ rc = dgnc_start();
+
+ if (rc < 0) {
+ return(rc);
+ }
+
+ /*
+ * Find and configure all the cards
+ */
+ rc = dgnc_init_pci();
+
+ /*
+ * If something went wrong in the scan, bail out of driver.
+ */
+ if (rc < 0) {
+ /* Only unregister the pci driver if it was actually registered. */
+ if (dgnc_NumBoards)
+ pci_unregister_driver(&dgnc_driver);
+ else
+ printk("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
+
+ dgnc_cleanup_module();
+ }
+ else {
+ dgnc_create_driver_sysfiles(&dgnc_driver);
+ }
+
+ DPR_INIT(("Finished init_module. Returning %d\n", rc));
+ return (rc);
+}
+
+
+/*
+ * Start of driver.
+ */
+static int dgnc_start(void)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (dgnc_driver_start == FALSE) {
+
+ dgnc_driver_start = TRUE;
+
+ /* make sure that the globals are init'd before we do anything else */
+ dgnc_init_globals();
+
+ dgnc_NumBoards = 0;
+
+ APR(("For the tools package or updated drivers please visit http://www.digi.com\n"));
+
+ /*
+ * Register our base character device into the kernel.
+ * This allows the download daemon to connect to the downld device
+ * before any of the boards are init'ed.
+ */
+ if (!dgnc_Major_Control_Registered) {
+ /*
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(0, "dgnc", &dgnc_BoardFops);
+ if (rc <= 0) {
+ APR(("Can't register dgnc driver device (%d)\n", rc));
+ rc = -ENXIO;
+ return(rc);
+ }
+ dgnc_Major = rc;
+
+ dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ device_create_drvdata(dgnc_class, NULL,
+ MKDEV(dgnc_Major, 0),
+ NULL, "dgnc_mgmt");
+#else
+ device_create(dgnc_class, NULL,
+ MKDEV(dgnc_Major, 0),
+ NULL, "dgnc_mgmt");
+#endif
+
+ dgnc_Major_Control_Registered = TRUE;
+ }
+
+ /*
+ * Init any global tty stuff.
+ */
+ rc = dgnc_tty_preinit();
+
+ if (rc < 0) {
+ APR(("tty preinit - not enough memory (%d)\n", rc));
+ return(rc);
+ }
+
+ /* Start the poller */
+ DGNC_LOCK(dgnc_poll_lock, flags);
+ init_timer(&dgnc_poll_timer);
+ dgnc_poll_timer.function = dgnc_poll_handler;
+ dgnc_poll_timer.data = 0;
+ dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
+ dgnc_poll_timer.expires = dgnc_poll_time;
+ DGNC_UNLOCK(dgnc_poll_lock, flags);
+
+ add_timer(&dgnc_poll_timer);
+
+ dgnc_driver_state = DRIVER_READY;
+ }
+
+ return(rc);
+}
+
+/*
+ * Register pci driver, and return how many boards we have.
+ */
+static int dgnc_init_pci(void)
+{
+ return pci_register_driver(&dgnc_driver);
+}
+
+
+/* returns count (>= 0), or negative on error */
+static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+
+ /* wake up and enable device */
+ rc = pci_enable_device(pdev);
+
+ if (rc < 0) {
+ rc = -EIO;
+ } else {
+ rc = dgnc_probe1(pdev, ent->driver_data);
+ if (rc == 0) {
+ dgnc_NumBoards++;
+ DPR_INIT(("Incrementing numboards to %d\n", dgnc_NumBoards));
+ }
+ }
+ return rc;
+}
+
+static int dgnc_probe1(struct pci_dev *pdev, int card_type)
+{
+ return dgnc_found_board(pdev, card_type);
+}
+
+
+static void dgnc_remove_one(struct pci_dev *dev)
+{
+ /* Do Nothing */
+}
+
+/*
+ * dgnc_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+void dgnc_cleanup_module(void)
+{
+ int i;
+ ulong lock_flags;
+
+ DGNC_LOCK(dgnc_poll_lock, lock_flags);
+ dgnc_poll_stop = 1;
+ DGNC_UNLOCK(dgnc_poll_lock, lock_flags);
+
+ /* Turn off poller right away. */
+ del_timer_sync(&dgnc_poll_timer);
+
+ dgnc_remove_driver_sysfiles(&dgnc_driver);
+
+ if (dgnc_Major_Control_Registered) {
+ device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
+ class_destroy(dgnc_class);
+ unregister_chrdev(dgnc_Major, "dgnc");
+ }
+
+ for (i = 0; i < dgnc_NumBoards; ++i) {
+ dgnc_remove_ports_sysfiles(dgnc_Board[i]);
+ dgnc_tty_uninit(dgnc_Board[i]);
+ dgnc_cleanup_board(dgnc_Board[i]);
+ }
+
+ dgnc_tty_post_uninit();
+
+#if defined(DGNC_TRACER)
+ /* last thing, make sure we release the tracebuffer */
+ dgnc_tracer_free();
+#endif
+ if (dgnc_NumBoards)
+ pci_unregister_driver(&dgnc_driver);
+}
+
+
+/*
+ * dgnc_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgnc_cleanup_board(struct board_t *brd)
+{
+ int i = 0;
+
+ if(!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ switch (brd->device) {
+ case PCI_DEVICE_CLASSIC_4_DID:
+ case PCI_DEVICE_CLASSIC_8_DID:
+ case PCI_DEVICE_CLASSIC_4_422_DID:
+ case PCI_DEVICE_CLASSIC_8_422_DID:
+
+ /* Tell card not to interrupt anymore. */
+ outb(0, brd->iobase + 0x4c);
+ break;
+
+ default:
+ break;
+ }
+
+ if (brd->irq)
+ free_irq(brd->irq, brd);
+
+ tasklet_kill(&brd->helper_tasklet);
+
+ if (brd->re_map_membase) {
+ iounmap(brd->re_map_membase);
+ brd->re_map_membase = NULL;
+ }
+
+ if (brd->msgbuf_head) {
+ unsigned long flags;
+
+ DGNC_LOCK(dgnc_global_lock, flags);
+ brd->msgbuf = NULL;
+ printk("%s", brd->msgbuf_head);
+ kfree(brd->msgbuf_head);
+ brd->msgbuf_head = NULL;
+ DGNC_UNLOCK(dgnc_global_lock, flags);
+ }
+
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++) {
+ if (brd->channels[i]) {
+ if (brd->channels[i]->ch_rqueue)
+ kfree(brd->channels[i]->ch_rqueue);
+ if (brd->channels[i]->ch_equeue)
+ kfree(brd->channels[i]->ch_equeue);
+ if (brd->channels[i]->ch_wqueue)
+ kfree(brd->channels[i]->ch_wqueue);
+
+ kfree(brd->channels[i]);
+ brd->channels[i] = NULL;
+ }
+ }
+
+ if (brd->flipbuf)
+ kfree(brd->flipbuf);
+
+ dgnc_Board[brd->boardnum] = NULL;
+
+ kfree(brd);
+}
+
+
+/*
+ * dgnc_found_board()
+ *
+ * A board has been found, init it.
+ */
+static int dgnc_found_board(struct pci_dev *pdev, int id)
+{
+ struct board_t *brd;
+ unsigned int pci_irq;
+ int i = 0;
+ int rc = 0;
+ unsigned long flags;
+
+ /* get the board structure and prep it */
+ brd = dgnc_Board[dgnc_NumBoards] =
+ (struct board_t *) kzalloc(sizeof(struct board_t), GFP_KERNEL);
+ if (!brd) {
+ APR(("memory allocation for board structure failed\n"));
+ return(-ENOMEM);
+ }
+
+ /* make a temporary message buffer for the boot messages */
+ brd->msgbuf = brd->msgbuf_head =
+ (char *) kzalloc(sizeof(char) * 8192, GFP_KERNEL);
+ if (!brd->msgbuf) {
+ kfree(brd);
+ APR(("memory allocation for board msgbuf failed\n"));
+ return(-ENOMEM);
+ }
+
+ /* store the info for the board we've found */
+ brd->magic = DGNC_BOARD_MAGIC;
+ brd->boardnum = dgnc_NumBoards;
+ brd->vendor = dgnc_pci_tbl[id].vendor;
+ brd->device = dgnc_pci_tbl[id].device;
+ brd->pdev = pdev;
+ brd->pci_bus = pdev->bus->number;
+ brd->pci_slot = PCI_SLOT(pdev->devfn);
+ brd->name = dgnc_Ids[id].name;
+ brd->maxports = dgnc_Ids[id].maxports;
+ if (dgnc_Ids[i].is_pci_express)
+ brd->bd_flags |= BD_IS_PCI_EXPRESS;
+ brd->dpastatus = BD_NOFEP;
+ init_waitqueue_head(&brd->state_wait);
+
+ DGNC_SPINLOCK_INIT(brd->bd_lock);
+ DGNC_SPINLOCK_INIT(brd->bd_intr_lock);
+
+ brd->state = BOARD_FOUND;
+
+ for (i = 0; i < MAXPORTS; i++) {
+ brd->channels[i] = NULL;
+ }
+
+ /* store which card & revision we have */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+
+ pci_irq = pdev->irq;
+ brd->irq = pci_irq;
+
+
+ switch(brd->device) {
+
+ case PCI_DEVICE_CLASSIC_4_DID:
+ case PCI_DEVICE_CLASSIC_8_DID:
+ case PCI_DEVICE_CLASSIC_4_422_DID:
+ case PCI_DEVICE_CLASSIC_8_422_DID:
+
+ brd->dpatype = T_CLASSIC | T_PCIBUS;
+
+ DPR_INIT(("dgnc_found_board - Classic.\n"));
+
+ /*
+ * For PCI ClassicBoards
+ * PCI Local Address (i.e. "resource" number) space
+ * 0 PLX Memory Mapped Config
+ * 1 PLX I/O Mapped Config
+ * 2 I/O Mapped UARTs and Status
+ * 3 Memory Mapped VPD
+ * 4 Memory Mapped UARTs and Status
+ */
+
+
+ /* get the PCI Base Address Registers */
+ brd->membase = pci_resource_start(pdev, 4);
+
+ if (!brd->membase) {
+ APR(("card has no PCI IO resources, failing board.\n"));
+ return -ENODEV;
+ }
+
+ brd->membase_end = pci_resource_end(pdev, 4);
+
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ brd->iobase = pci_resource_start(pdev, 1);
+ brd->iobase_end = pci_resource_end(pdev, 1);
+ brd->iobase = ((unsigned int) (brd->iobase)) & 0xFFFE;
+
+ /* Assign the board_ops struct */
+ brd->bd_ops = &dgnc_cls_ops;
+
+ brd->bd_uart_offset = 0x8;
+ brd->bd_dividend = 921600;
+
+ dgnc_do_remap(brd);
+
+ /* Get and store the board VPD, if it exists */
+ brd->bd_ops->vpd(brd);
+
+ /*
+ * Enable Local Interrupt 1 (0x1),
+ * Local Interrupt 1 Polarity Active high (0x2),
+ * Enable PCI interrupt (0x40)
+ */
+ outb(0x43, brd->iobase + 0x4c);
+
+ break;
+
+
+ case PCI_DEVICE_NEO_4_DID:
+ case PCI_DEVICE_NEO_8_DID:
+ case PCI_DEVICE_NEO_2DB9_DID:
+ case PCI_DEVICE_NEO_2DB9PRI_DID:
+ case PCI_DEVICE_NEO_2RJ45_DID:
+ case PCI_DEVICE_NEO_2RJ45PRI_DID:
+ case PCI_DEVICE_NEO_1_422_DID:
+ case PCI_DEVICE_NEO_1_422_485_DID:
+ case PCI_DEVICE_NEO_2_422_485_DID:
+ case PCI_DEVICE_NEO_EXPRESS_8_DID:
+ case PCI_DEVICE_NEO_EXPRESS_4_DID:
+ case PCI_DEVICE_NEO_EXPRESS_4RJ45_DID:
+ case PCI_DEVICE_NEO_EXPRESS_8RJ45_DID:
+
+ /*
+ * This chip is set up 100% when we get to it.
+ * No need to enable global interrupts or anything.
+ */
+ if (brd->bd_flags & BD_IS_PCI_EXPRESS)
+ brd->dpatype = T_NEO_EXPRESS | T_PCIBUS;
+ else
+ brd->dpatype = T_NEO | T_PCIBUS;
+
+ DPR_INIT(("dgnc_found_board - NEO.\n"));
+
+ /* get the PCI Base Address Registers */
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
+
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ /* Assign the board_ops struct */
+ brd->bd_ops = &dgnc_neo_ops;
+
+ brd->bd_uart_offset = 0x200;
+ brd->bd_dividend = 921600;
+
+ dgnc_do_remap(brd);
+
+ if (brd->re_map_membase) {
+
+ /* After remap is complete, we need to read and store the dvid */
+ brd->dvid = readb(brd->re_map_membase + 0x8D);
+
+ /* Get and store the board VPD, if it exists */
+ brd->bd_ops->vpd(brd);
+ }
+ break;
+
+ default:
+ APR(("Did not find any compatible Neo or Classic PCI boards in system.\n"));
+ return (-ENXIO);
+
+ }
+
+ /*
+ * Do tty device initialization.
+ */
+
+ rc = dgnc_tty_register(brd);
+ if (rc < 0) {
+ dgnc_tty_uninit(brd);
+ APR(("Can't register tty devices (%d)\n", rc));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ goto failed;
+ }
+
+ rc = dgnc_finalize_board_init(brd);
+ if (rc < 0) {
+ APR(("Can't finalize board init (%d)\n", rc));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+
+ goto failed;
+ }
+
+ rc = dgnc_tty_init(brd);
+ if (rc < 0) {
+ dgnc_tty_uninit(brd);
+ APR(("Can't init tty devices (%d)\n", rc));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+
+ goto failed;
+ }
+
+ brd->state = BOARD_READY;
+ brd->dpastatus = BD_RUNNING;
+
+ dgnc_create_ports_sysfiles(brd);
+
+ /* init our poll helper tasklet */
+ tasklet_init(&brd->helper_tasklet, brd->bd_ops->tasklet, (unsigned long) brd);
+
+ DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i));
+ DGNC_LOCK(dgnc_global_lock, flags);
+ brd->msgbuf = NULL;
+ printk("%s", brd->msgbuf_head);
+ kfree(brd->msgbuf_head);
+ brd->msgbuf_head = NULL;
+ DGNC_UNLOCK(dgnc_global_lock, flags);
+
+ /*
+ * allocate flip buffer for board.
+ *
+ * Okay to malloc with GFP_KERNEL, we are not at interrupt
+ * context, and there are no locks held.
+ */
+ brd->flipbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
+
+ wake_up_interruptible(&brd->state_wait);
+
+ return(0);
+
+failed:
+
+ return (-ENXIO);
+
+}
+
+
+static int dgnc_finalize_board_init(struct board_t *brd) {
+ int rc = 0;
+
+ DPR_INIT(("dgnc_finalize_board_init() - start\n"));
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return(-ENODEV);
+
+ DPR_INIT(("dgnc_finalize_board_init() - start #2\n"));
+
+ if (brd->irq) {
+ rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "DGNC", brd);
+
+ if (rc) {
+ printk("Failed to hook IRQ %d\n",brd->irq);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ rc = -ENODEV;
+ } else {
+ DPR_INIT(("Requested and received usage of IRQ %d\n", brd->irq));
+ }
+ }
+ return(rc);
+}
+
+/*
+ * Remap PCI memory.
+ */
+static void dgnc_do_remap(struct board_t *brd)
+{
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ brd->re_map_membase = ioremap(brd->membase, 0x1000);
+
+ DPR_INIT(("remapped mem: 0x%p\n", brd->re_map_membase));
+}
+
+
+/*****************************************************************************
+*
+* Function:
+*
+* dgnc_poll_handler
+*
+* Author:
+*
+* Scott H Kilau
+*
+* Parameters:
+*
+* dummy -- ignored
+*
+* Return Values:
+*
+* none
+*
+* Description:
+*
+* As each timer expires, it determines (a) whether the "transmit"
+* waiter needs to be woken up, and (b) whether the poller needs to
+* be rescheduled.
+*
+******************************************************************************/
+
+static void dgnc_poll_handler(ulong dummy)
+{
+ struct board_t *brd;
+ unsigned long lock_flags;
+ int i;
+ unsigned long new_time;
+
+ dgnc_poll_counter++;
+
+ /*
+ * Do not start the board state machine until
+ * driver tells us its up and running, and has
+ * everything it needs.
+ */
+ if (dgnc_driver_state != DRIVER_READY) {
+ goto schedule_poller;
+ }
+
+ /* Go thru each board, kicking off a tasklet for each if needed */
+ for (i = 0; i < dgnc_NumBoards; i++) {
+ brd = dgnc_Board[i];
+
+ DGNC_LOCK(brd->bd_lock, lock_flags);
+
+ /* If board is in a failed state, don't bother scheduling a tasklet */
+ if (brd->state == BOARD_FAILED) {
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+ continue;
+ }
+
+ /* Schedule a poll helper task */
+ tasklet_schedule(&brd->helper_tasklet);
+
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+ }
+
+schedule_poller:
+
+ /*
+ * Schedule ourself back at the nominal wakeup interval.
+ */
+ DGNC_LOCK(dgnc_poll_lock, lock_flags);
+ dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick);
+
+ new_time = dgnc_poll_time - jiffies;
+
+ if ((ulong) new_time >= 2 * dgnc_poll_tick) {
+ dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
+ }
+
+ init_timer(&dgnc_poll_timer);
+ dgnc_poll_timer.function = dgnc_poll_handler;
+ dgnc_poll_timer.data = 0;
+ dgnc_poll_timer.expires = dgnc_poll_time;
+ DGNC_UNLOCK(dgnc_poll_lock, lock_flags);
+
+ if (!dgnc_poll_stop)
+ add_timer(&dgnc_poll_timer);
+}
+
+/*
+ * dgnc_init_globals()
+ *
+ * This is where we initialize the globals from the static insmod
+ * configuration variables. These are declared near the head of
+ * this file.
+ */
+static void dgnc_init_globals(void)
+{
+ int i = 0;
+
+ dgnc_rawreadok = rawreadok;
+ dgnc_trcbuf_size = trcbuf_size;
+ dgnc_debug = debug;
+
+ for (i = 0; i < MAXBOARDS; i++) {
+ dgnc_Board[i] = NULL;
+ }
+
+ init_timer(&dgnc_poll_timer);
+}
+
+
+/************************************************************************
+ *
+ * Utility functions
+ *
+ ************************************************************************/
+
+/*
+ * dgnc_ms_sleep()
+ *
+ * Put the driver to sleep for x ms's
+ *
+ * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
+ */
+int dgnc_ms_sleep(ulong ms)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout((ms * HZ) / 1000);
+ return (signal_pending(current));
+}
+
+
+
+/*
+ * dgnc_ioctl_name() : Returns a text version of each ioctl value.
+ */
+char *dgnc_ioctl_name(int cmd)
+{
+ switch(cmd) {
+
+ case TCGETA: return("TCGETA");
+ case TCGETS: return("TCGETS");
+ case TCSETA: return("TCSETA");
+ case TCSETS: return("TCSETS");
+ case TCSETAW: return("TCSETAW");
+ case TCSETSW: return("TCSETSW");
+ case TCSETAF: return("TCSETAF");
+ case TCSETSF: return("TCSETSF");
+ case TCSBRK: return("TCSBRK");
+ case TCXONC: return("TCXONC");
+ case TCFLSH: return("TCFLSH");
+ case TIOCGSID: return("TIOCGSID");
+
+ case TIOCGETD: return("TIOCGETD");
+ case TIOCSETD: return("TIOCSETD");
+ case TIOCGWINSZ: return("TIOCGWINSZ");
+ case TIOCSWINSZ: return("TIOCSWINSZ");
+
+ case TIOCMGET: return("TIOCMGET");
+ case TIOCMSET: return("TIOCMSET");
+ case TIOCMBIS: return("TIOCMBIS");
+ case TIOCMBIC: return("TIOCMBIC");
+
+ /* from digi.h */
+ case DIGI_SETA: return("DIGI_SETA");
+ case DIGI_SETAW: return("DIGI_SETAW");
+ case DIGI_SETAF: return("DIGI_SETAF");
+ case DIGI_SETFLOW: return("DIGI_SETFLOW");
+ case DIGI_SETAFLOW: return("DIGI_SETAFLOW");
+ case DIGI_GETFLOW: return("DIGI_GETFLOW");
+ case DIGI_GETAFLOW: return("DIGI_GETAFLOW");
+ case DIGI_GETA: return("DIGI_GETA");
+ case DIGI_GEDELAY: return("DIGI_GEDELAY");
+ case DIGI_SEDELAY: return("DIGI_SEDELAY");
+ case DIGI_GETCUSTOMBAUD: return("DIGI_GETCUSTOMBAUD");
+ case DIGI_SETCUSTOMBAUD: return("DIGI_SETCUSTOMBAUD");
+ case TIOCMODG: return("TIOCMODG");
+ case TIOCMODS: return("TIOCMODS");
+ case TIOCSDTR: return("TIOCSDTR");
+ case TIOCCDTR: return("TIOCCDTR");
+
+ default: return("unknown");
+ }
+}
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
new file mode 100644
index 0000000..218b15d
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -0,0 +1,563 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * Driver includes
+ *
+ *************************************************************************/
+
+#ifndef __DGNC_DRIVER_H
+#define __DGNC_DRIVER_H
+
+#include <linux/types.h> /* To pick up the varions Linux types */
+#include <linux/tty.h> /* To pick up the various tty structs/defines */
+#include <linux/interrupt.h> /* For irqreturn_t type */
+
+#include "dgnc_types.h" /* Additional types needed by the Digi header files */
+#include "digi.h" /* Digi specific ioctl header */
+#include "dgnc_kcompat.h" /* Kernel 2.4/2.6 compat includes */
+#include "dgnc_sysfs.h" /* Support for SYSFS */
+
+/*************************************************************************
+ *
+ * Driver defines
+ *
+ *************************************************************************/
+
+/*
+ * Driver identification, error and debugging statments
+ *
+ * In theory, you can change all occurances of "digi" in the next
+ * three lines, and the driver printk's will all automagically change.
+ *
+ * APR((fmt, args, ...)); Always prints message
+ * DPR((fmt, args, ...)); Only prints if DGNC_TRACER is defined at
+ * compile time and dgnc_debug!=0
+ */
+#define PROCSTR "dgnc" /* /proc entries */
+#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
+#define DRVSTR "dgnc" /* Driver name string
+ * displayed by APR */
+#define APR(args) do { PRINTF_TO_KMEM(args); printk(DRVSTR": "); printk args; \
+ } while (0)
+#define RAPR(args) do { PRINTF_TO_KMEM(args); printk args; } while (0)
+
+#define TRC_TO_CONSOLE 1
+
+/*
+ * Debugging levels can be set using debug insmod variable
+ * They can also be compiled out completely.
+ */
+
+#define DBG_INIT (dgnc_debug & 0x01)
+#define DBG_BASIC (dgnc_debug & 0x02)
+#define DBG_CORE (dgnc_debug & 0x04)
+
+#define DBG_OPEN (dgnc_debug & 0x08)
+#define DBG_CLOSE (dgnc_debug & 0x10)
+#define DBG_READ (dgnc_debug & 0x20)
+#define DBG_WRITE (dgnc_debug & 0x40)
+
+#define DBG_IOCTL (dgnc_debug & 0x80)
+
+#define DBG_PROC (dgnc_debug & 0x100)
+#define DBG_PARAM (dgnc_debug & 0x200)
+#define DBG_PSCAN (dgnc_debug & 0x400)
+#define DBG_EVENT (dgnc_debug & 0x800)
+
+#define DBG_DRAIN (dgnc_debug & 0x1000)
+#define DBG_MSIGS (dgnc_debug & 0x2000)
+
+#define DBG_MGMT (dgnc_debug & 0x4000)
+#define DBG_INTR (dgnc_debug & 0x8000)
+
+#define DBG_CARR (dgnc_debug & 0x10000)
+
+
+#if defined(DGNC_TRACER)
+
+# if defined(TRC_TO_KMEM)
+/* Choose one: */
+# define TRC_ON_OVERFLOW_WRAP_AROUND
+# undef TRC_ON_OVERFLOW_SHIFT_BUFFER
+# endif //TRC_TO_KMEM
+
+# define TRC_MAXMSG 1024
+# define TRC_OVERFLOW "(OVERFLOW)"
+# define TRC_DTRC "/usr/bin/dtrc"
+
+#if defined TRC_TO_CONSOLE
+#define PRINTF_TO_CONSOLE(args) { printk(DRVSTR": "); printk args; }
+#else //!defined TRACE_TO_CONSOLE
+#define PRINTF_TO_CONSOLE(args)
+#endif
+
+#if defined TRC_TO_KMEM
+#define PRINTF_TO_KMEM(args) dgnc_tracef args
+#else //!defined TRC_TO_KMEM
+#define PRINTF_TO_KMEM(args)
+#endif
+
+#define TRC(args) { PRINTF_TO_KMEM(args); PRINTF_TO_CONSOLE(args) }
+
+# define DPR_INIT(ARGS) if (DBG_INIT) TRC(ARGS)
+# define DPR_BASIC(ARGS) if (DBG_BASIC) TRC(ARGS)
+# define DPR_CORE(ARGS) if (DBG_CORE) TRC(ARGS)
+# define DPR_OPEN(ARGS) if (DBG_OPEN) TRC(ARGS)
+# define DPR_CLOSE(ARGS) if (DBG_CLOSE) TRC(ARGS)
+# define DPR_READ(ARGS) if (DBG_READ) TRC(ARGS)
+# define DPR_WRITE(ARGS) if (DBG_WRITE) TRC(ARGS)
+# define DPR_IOCTL(ARGS) if (DBG_IOCTL) TRC(ARGS)
+# define DPR_PROC(ARGS) if (DBG_PROC) TRC(ARGS)
+# define DPR_PARAM(ARGS) if (DBG_PARAM) TRC(ARGS)
+# define DPR_PSCAN(ARGS) if (DBG_PSCAN) TRC(ARGS)
+# define DPR_EVENT(ARGS) if (DBG_EVENT) TRC(ARGS)
+# define DPR_DRAIN(ARGS) if (DBG_DRAIN) TRC(ARGS)
+# define DPR_CARR(ARGS) if (DBG_CARR) TRC(ARGS)
+# define DPR_MGMT(ARGS) if (DBG_MGMT) TRC(ARGS)
+# define DPR_INTR(ARGS) if (DBG_INTR) TRC(ARGS)
+# define DPR_MSIGS(ARGS) if (DBG_MSIGS) TRC(ARGS)
+
+# define DPR(ARGS) if (dgnc_debug) TRC(ARGS)
+# define P(X) dgnc_tracef(#X "=%p\n", X)
+# define X(X) dgnc_tracef(#X "=%x\n", X)
+
+#else//!defined DGNC_TRACER
+
+#define PRINTF_TO_KMEM(args)
+# define TRC(ARGS)
+# define DPR_INIT(ARGS)
+# define DPR_BASIC(ARGS)
+# define DPR_CORE(ARGS)
+# define DPR_OPEN(ARGS)
+# define DPR_CLOSE(ARGS)
+# define DPR_READ(ARGS)
+# define DPR_WRITE(ARGS)
+# define DPR_IOCTL(ARGS)
+# define DPR_PROC(ARGS)
+# define DPR_PARAM(ARGS)
+# define DPR_PSCAN(ARGS)
+# define DPR_EVENT(ARGS)
+# define DPR_DRAIN(ARGS)
+# define DPR_CARR(ARGS)
+# define DPR_MGMT(ARGS)
+# define DPR_INTR(ARGS)
+# define DPR_MSIGS(ARGS)
+
+# define DPR(args)
+
+#endif//DGNC_TRACER
+
+/* Number of boards we support at once. */
+#define MAXBOARDS 20
+#define MAXPORTS 8
+#define MAXTTYNAMELEN 200
+
+/* Our 3 magic numbers for our board, channel and unit structs */
+#define DGNC_BOARD_MAGIC 0x5c6df104
+#define DGNC_CHANNEL_MAGIC 0x6c6df104
+#define DGNC_UNIT_MAGIC 0x7c6df104
+
+/* Serial port types */
+#define DGNC_SERIAL 0
+#define DGNC_PRINT 1
+
+#define SERIAL_TYPE_NORMAL 1
+
+#define PORT_NUM(dev) ((dev) & 0x7f)
+#define IS_PRINT(dev) (((dev) & 0xff) >= 0x80)
+
+/* MAX number of stop characters we will send when our read queue is getting full */
+#define MAX_STOPS_SENT 5
+
+/* 4 extra for alignment play space */
+#define WRITEBUFLEN ((4096) + 4)
+#define MYFLIPLEN N_TTY_BUF_SIZE
+
+#define dgnc_jiffies_from_ms(a) (((a) * HZ) / 1000)
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially. This is the same structure that is defined
+ * as the default in tty_io.c with the same settings overriden as in serial.c
+ *
+ * In short, this should match the internal serial ports' defaults.
+ */
+#define DEFAULT_IFLAGS (ICRNL | IXON)
+#define DEFAULT_OFLAGS (OPOST | ONLCR)
+#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
+#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
+ ECHOCTL | ECHOKE | IEXTEN)
+
+#ifndef _POSIX_VDISABLE
+#define _POSIX_VDISABLE '\0'
+#endif
+
+#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
+#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
+
+/*
+ * Lock function/defines.
+ * Makes spotting lock/unlock locations easier.
+ */
+# define DGNC_SPINLOCK_INIT(x) spin_lock_init(&(x))
+# define DGNC_LOCK(x,y) spin_lock_irqsave(&(x), y)
+# define DGNC_UNLOCK(x,y) spin_unlock_irqrestore(&(x), y)
+
+/*
+ * All the possible states the driver can be while being loaded.
+ */
+enum {
+ DRIVER_INITIALIZED = 0,
+ DRIVER_READY
+};
+
+/*
+ * All the possible states the board can be while booting up.
+ */
+enum {
+ BOARD_FAILED = 0,
+ BOARD_FOUND,
+ BOARD_READY
+};
+
+
+/*************************************************************************
+ *
+ * Structures and closely related defines.
+ *
+ *************************************************************************/
+
+struct board_t;
+struct channel_t;
+
+/************************************************************************
+ * Per board operations structure *
+ ************************************************************************/
+struct board_ops {
+ void (*tasklet) (unsigned long data);
+ irqreturn_t (*intr) (int irq, void *voidbrd);
+ void (*uart_init) (struct channel_t *ch);
+ void (*uart_off) (struct channel_t *ch);
+ int (*drain) (struct tty_struct *tty, uint seconds);
+ void (*param) (struct tty_struct *tty);
+ void (*vpd) (struct board_t *brd);
+ void (*assert_modem_signals) (struct channel_t *ch);
+ void (*flush_uart_write) (struct channel_t *ch);
+ void (*flush_uart_read) (struct channel_t *ch);
+ void (*disable_receiver) (struct channel_t *ch);
+ void (*enable_receiver) (struct channel_t *ch);
+ void (*send_break) (struct channel_t *ch, int);
+ void (*send_start_character) (struct channel_t *ch);
+ void (*send_stop_character) (struct channel_t *ch);
+ void (*copy_data_from_queue_to_uart) (struct channel_t *ch);
+ uint (*get_uart_bytes_left) (struct channel_t *ch);
+ void (*send_immediate_char) (struct channel_t *ch, unsigned char);
+};
+
+/************************************************************************
+ * Device flag definitions for bd_flags.
+ ************************************************************************/
+#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */
+
+
+/*
+ * Per-board information
+ */
+struct board_t {
+ int magic; /* Board Magic number. */
+ int boardnum; /* Board number: 0-32 */
+
+ int type; /* Type of board */
+ char *name; /* Product Name */
+ struct pci_dev *pdev; /* Pointer to the pci_dev struct */
+ unsigned long bd_flags; /* Board flags */
+ u16 vendor; /* PCI vendor ID */
+ u16 device; /* PCI device ID */
+ u16 subvendor; /* PCI subsystem vendor ID */
+ u16 subdevice; /* PCI subsystem device ID */
+ uchar rev; /* PCI revision ID */
+ uint pci_bus; /* PCI bus value */
+ uint pci_slot; /* PCI slot value */
+ uint maxports; /* MAX ports this board can handle */
+ uchar dvid; /* Board specific device id */
+ uchar vpd[128]; /* VPD of board, if found */
+ uchar serial_num[20]; /* Serial number of board, if found in VPD */
+
+ spinlock_t bd_lock; /* Used to protect board */
+
+ spinlock_t bd_intr_lock; /* Used to protect the poller tasklet and
+ * the interrupt routine from each other.
+ */
+
+ uint state; /* State of card. */
+ wait_queue_head_t state_wait; /* Place to sleep on for state change */
+
+ struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
+
+ uint nasync; /* Number of ports on card */
+
+ uint irq; /* Interrupt request number */
+ ulong intr_count; /* Count of interrupts */
+ ulong intr_modem; /* Count of interrupts */
+ ulong intr_tx; /* Count of interrupts */
+ ulong intr_rx; /* Count of interrupts */
+
+ ulong membase; /* Start of base memory of the card */
+ ulong membase_end; /* End of base memory of the card */
+
+ u8 __iomem *re_map_membase;/* Remapped memory of the card */
+
+ ulong iobase; /* Start of io base of the card */
+ ulong iobase_end; /* End of io base of the card */
+
+ uint bd_uart_offset; /* Space between each UART */
+
+ struct channel_t *channels[MAXPORTS]; /* array of pointers to our channels. */
+
+ struct tty_driver SerialDriver;
+ char SerialName[200];
+ struct tty_driver PrintDriver;
+ char PrintName[200];
+
+ uint dgnc_Major_Serial_Registered;
+ uint dgnc_Major_TransparentPrint_Registered;
+
+ uint dgnc_Serial_Major;
+ uint dgnc_TransparentPrint_Major;
+
+ uint TtyRefCnt;
+
+ char *flipbuf; /* Our flip buffer, alloced if board is found */
+
+ u16 dpatype; /* The board "type", as defined by DPA */
+ u16 dpastatus; /* The board "status", as defined by DPA */
+
+ /*
+ * Mgmt data.
+ */
+ char *msgbuf_head;
+ char *msgbuf;
+
+ uint bd_dividend; /* Board/UARTs specific dividend */
+
+ struct board_ops *bd_ops;
+
+ /* /proc/<board> entries */
+ struct proc_dir_entry *proc_entry_pointer;
+ struct dgnc_proc_entry *dgnc_board_table;
+
+};
+
+
+/************************************************************************
+ * Unit flag definitions for un_flags.
+ ************************************************************************/
+#define UN_ISOPEN 0x0001 /* Device is open */
+#define UN_CLOSING 0x0002 /* Line is being closed */
+#define UN_IMM 0x0004 /* Service immediately */
+#define UN_BUSY 0x0008 /* Some work this channel */
+#define UN_BREAKI 0x0010 /* Input break received */
+#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
+#define UN_TIME 0x0040 /* Waiting on time */
+#define UN_EMPTY 0x0080 /* Waiting output queue empty */
+#define UN_LOW 0x0100 /* Waiting output low water mark*/
+#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
+#define UN_WOPEN 0x0400 /* Device waiting for open */
+#define UN_WIOCTL 0x0800 /* Device waiting for open */
+#define UN_HANGUP 0x8000 /* Carrier lost */
+
+struct device;
+
+/************************************************************************
+ * Structure for terminal or printer unit.
+ ************************************************************************/
+struct un_t {
+ int magic; /* Unit Magic Number. */
+ struct channel_t *un_ch;
+ ulong un_time;
+ uint un_type;
+ uint un_open_count; /* Counter of opens to port */
+ struct tty_struct *un_tty;/* Pointer to unit tty structure */
+ uint un_flags; /* Unit flags */
+ wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
+ uint un_dev; /* Minor device number */
+ struct device *un_sysfs;
+};
+
+
+/************************************************************************
+ * Device flag definitions for ch_flags.
+ ************************************************************************/
+#define CH_PRON 0x0001 /* Printer on string */
+#define CH_STOP 0x0002 /* Output is stopped */
+#define CH_STOPI 0x0004 /* Input is stopped */
+#define CH_CD 0x0008 /* Carrier is present */
+#define CH_FCAR 0x0010 /* Carrier forced on */
+#define CH_HANGUP 0x0020 /* Hangup received */
+
+#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */
+#define CH_OPENING 0x0080 /* Port in fragile open state */
+#define CH_CLOSING 0x0100 /* Port in fragile close state */
+#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */
+#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */
+#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
+#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
+#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
+#define CH_FLIPBUF_IN_USE 0x4000 /* Channel's flipbuf is in use */
+#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */
+#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
+#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
+
+/*
+ * Definitions for ch_sniff_flags
+ */
+#define SNIFF_OPEN 0x1
+#define SNIFF_WAIT_DATA 0x2
+#define SNIFF_WAIT_SPACE 0x4
+
+
+/* Our Read/Error/Write queue sizes */
+#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
+#define EQUEUEMASK 0x1FFF /* 8 K - 1 */
+#define WQUEUEMASK 0x0FFF /* 4 K - 1 */
+#define RQUEUESIZE (RQUEUEMASK + 1)
+#define EQUEUESIZE RQUEUESIZE
+#define WQUEUESIZE (WQUEUEMASK + 1)
+
+
+/************************************************************************
+ * Channel information structure.
+ ************************************************************************/
+struct channel_t {
+ int magic; /* Channel Magic Number */
+ struct board_t *ch_bd; /* Board structure pointer */
+ struct digi_t ch_digi; /* Transparent Print structure */
+ struct un_t ch_tun; /* Terminal unit info */
+ struct un_t ch_pun; /* Printer unit info */
+
+ spinlock_t ch_lock; /* provide for serialization */
+ wait_queue_head_t ch_flags_wait;
+
+ uint ch_portnum; /* Port number, 0 offset. */
+ uint ch_open_count; /* open count */
+ uint ch_flags; /* Channel flags */
+
+ ulong ch_close_delay; /* How long we should drop RTS/DTR for */
+
+ ulong ch_cpstime; /* Time for CPS calculations */
+
+ tcflag_t ch_c_iflag; /* channel iflags */
+ tcflag_t ch_c_cflag; /* channel cflags */
+ tcflag_t ch_c_oflag; /* channel oflags */
+ tcflag_t ch_c_lflag; /* channel lflags */
+ uchar ch_stopc; /* Stop character */
+ uchar ch_startc; /* Start character */
+
+ uint ch_old_baud; /* Cache of the current baud */
+ uint ch_custom_speed;/* Custom baud, if set */
+
+ uint ch_wopen; /* Waiting for open process cnt */
+
+ uchar ch_mostat; /* FEP output modem status */
+ uchar ch_mistat; /* FEP input modem status */
+
+ struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the "mapped" UART struct */
+ struct cls_uart_struct __iomem *ch_cls_uart; /* Pointer to the "mapped" UART struct */
+
+ uchar ch_cached_lsr; /* Cached value of the LSR register */
+
+ uchar *ch_rqueue; /* Our read queue buffer - malloc'ed */
+ ushort ch_r_head; /* Head location of the read queue */
+ ushort ch_r_tail; /* Tail location of the read queue */
+
+ uchar *ch_equeue; /* Our error queue buffer - malloc'ed */
+ ushort ch_e_head; /* Head location of the error queue */
+ ushort ch_e_tail; /* Tail location of the error queue */
+
+ uchar *ch_wqueue; /* Our write queue buffer - malloc'ed */
+ ushort ch_w_head; /* Head location of the write queue */
+ ushort ch_w_tail; /* Tail location of the write queue */
+
+ ulong ch_rxcount; /* total of data received so far */
+ ulong ch_txcount; /* total of data transmitted so far */
+
+ uchar ch_r_tlevel; /* Receive Trigger level */
+ uchar ch_t_tlevel; /* Transmit Trigger level */
+
+ uchar ch_r_watermark; /* Receive Watermark */
+
+ ulong ch_stop_sending_break; /* Time we should STOP sending a break */
+
+ uint ch_stops_sent; /* How many times I have sent a stop character
+ * to try to stop the other guy sending.
+ */
+ ulong ch_err_parity; /* Count of parity errors on channel */
+ ulong ch_err_frame; /* Count of framing errors on channel */
+ ulong ch_err_break; /* Count of breaks on channel */
+ ulong ch_err_overrun; /* Count of overruns on channel */
+
+ ulong ch_xon_sends; /* Count of xons transmitted */
+ ulong ch_xoff_sends; /* Count of xoffs transmitted */
+
+ ulong ch_intr_modem; /* Count of interrupts */
+ ulong ch_intr_tx; /* Count of interrupts */
+ ulong ch_intr_rx; /* Count of interrupts */
+
+
+ /* /proc/<board>/<channel> entries */
+ struct proc_dir_entry *proc_entry_pointer;
+ struct dgnc_proc_entry *dgnc_channel_table;
+
+ uint ch_sniff_in;
+ uint ch_sniff_out;
+ char *ch_sniff_buf; /* Sniff buffer for proc */
+ ulong ch_sniff_flags; /* Channel flags */
+ wait_queue_head_t ch_sniff_wait;
+};
+
+
+/*************************************************************************
+ *
+ * Prototypes for non-static functions used in more than one module
+ *
+ *************************************************************************/
+
+extern int dgnc_ms_sleep(ulong ms);
+extern char *dgnc_ioctl_name(int cmd);
+
+/*
+ * Our Global Variables.
+ */
+extern int dgnc_driver_state; /* The state of the driver */
+extern uint dgnc_Major; /* Our driver/mgmt major */
+extern int dgnc_debug; /* Debug variable */
+extern int dgnc_rawreadok; /* Set if user wants rawreads */
+extern int dgnc_poll_tick; /* Poll interval - 20 ms */
+extern int dgnc_trcbuf_size; /* Size of the ringbuffer */
+extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
+extern uint dgnc_NumBoards; /* Total number of boards */
+extern struct board_t *dgnc_Board[MAXBOARDS]; /* Array of board structs */
+extern ulong dgnc_poll_counter; /* Times the poller has run */
+extern char *dgnc_state_text[]; /* Array of state text */
+extern char *dgnc_driver_state_text[];/* Array of driver state text */
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_kcompat.h b/drivers/staging/dgnc/dgnc_kcompat.h
new file mode 100644
index 0000000..00f589a
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_kcompat.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2004 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * This file is intended to contain all the kernel "differences" between the
+ * various kernels that we support.
+ *
+ *************************************************************************/
+
+#ifndef __DGNC_KCOMPAT_H
+#define __DGNC_KCOMPAT_H
+
+#include <linux/version.h>
+
+# ifndef KERNEL_VERSION
+# define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+# endif
+
+
+#if !defined(TTY_FLIPBUF_SIZE)
+# define TTY_FLIPBUF_SIZE 512
+#endif
+
+
+/* Sparse stuff */
+# ifndef __user
+# define __user
+# define __kernel
+# define __safe
+# define __force
+# define __chk_user_ptr(x) (void)0
+# endif
+
+
+# define PARM_STR(VAR, INIT, PERM, DESC) \
+ static char *VAR = INIT; \
+ char *dgnc_##VAR; \
+ module_param(VAR, charp, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+# define PARM_INT(VAR, INIT, PERM, DESC) \
+ static int VAR = INIT; \
+ int dgnc_##VAR; \
+ module_param(VAR, int, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+# define PARM_ULONG(VAR, INIT, PERM, DESC) \
+ static ulong VAR = INIT; \
+ ulong dgnc_##VAR; \
+ module_param(VAR, long, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+
+
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+
+
+
+/* NOTHING YET */
+
+
+
+# else
+
+
+
+# error "this driver does not support anything below the 2.6.27 kernel series."
+
+
+
+# endif
+
+#endif /* ! __DGNC_KCOMPAT_H */
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
new file mode 100644
index 0000000..c4629d7
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+/************************************************************************
+ *
+ * This file implements the mgmt functionality for the
+ * Neo and ClassicBoard based product lines.
+ *
+ ************************************************************************
+ */
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/serial_reg.h>
+#include <linux/termios.h>
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+
+#include "dgnc_driver.h"
+#include "dgnc_pci.h"
+#include "dgnc_kcompat.h" /* Kernel 2.4/2.6 compat includes */
+#include "dgnc_mgmt.h"
+#include "dpacompat.h"
+
+
+/* Our "in use" variables, to enforce 1 open only */
+static int dgnc_mgmt_in_use[MAXMGMTDEVICES];
+
+
+/*
+ * dgnc_mgmt_open()
+ *
+ * Open the mgmt/downld/dpa device
+ */
+int dgnc_mgmt_open(struct inode *inode, struct file *file)
+{
+ unsigned long lock_flags;
+ unsigned int minor = iminor(inode);
+
+ DPR_MGMT(("dgnc_mgmt_open start.\n"));
+
+ DGNC_LOCK(dgnc_global_lock, lock_flags);
+
+ /* mgmt device */
+ if (minor < MAXMGMTDEVICES) {
+ /* Only allow 1 open at a time on mgmt device */
+ if (dgnc_mgmt_in_use[minor]) {
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+ return (-EBUSY);
+ }
+ dgnc_mgmt_in_use[minor]++;
+ }
+ else {
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+ return (-ENXIO);
+ }
+
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+
+ DPR_MGMT(("dgnc_mgmt_open finish.\n"));
+
+ return 0;
+}
+
+
+/*
+ * dgnc_mgmt_close()
+ *
+ * Open the mgmt/dpa device
+ */
+int dgnc_mgmt_close(struct inode *inode, struct file *file)
+{
+ unsigned long lock_flags;
+ unsigned int minor = iminor(inode);
+
+ DPR_MGMT(("dgnc_mgmt_close start.\n"));
+
+ DGNC_LOCK(dgnc_global_lock, lock_flags);
+
+ /* mgmt device */
+ if (minor < MAXMGMTDEVICES) {
+ if (dgnc_mgmt_in_use[minor]) {
+ dgnc_mgmt_in_use[minor] = 0;
+ }
+ }
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+
+ DPR_MGMT(("dgnc_mgmt_close finish.\n"));
+
+ return 0;
+}
+
+
+/*
+ * dgnc_mgmt_ioctl()
+ *
+ * ioctl the mgmt/dpa device
+ */
+
+long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ unsigned long lock_flags;
+ void __user *uarg = (void __user *) arg;
+
+ DPR_MGMT(("dgnc_mgmt_ioctl start.\n"));
+
+ switch (cmd) {
+
+ case DIGI_GETDD:
+ {
+ /*
+ * This returns the total number of boards
+ * in the system, as well as driver version
+ * and has space for a reserved entry
+ */
+ struct digi_dinfo ddi;
+
+ DGNC_LOCK(dgnc_global_lock, lock_flags);
+
+ ddi.dinfo_nboards = dgnc_NumBoards;
+ sprintf(ddi.dinfo_version, "%s", DG_PART);
+
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+
+ DPR_MGMT(("DIGI_GETDD returning numboards: %d version: %s\n",
+ ddi.dinfo_nboards, ddi.dinfo_version));
+
+ if (copy_to_user(uarg, &ddi, sizeof (ddi)))
+ return(-EFAULT);
+
+ break;
+ }
+
+ case DIGI_GETBD:
+ {
+ int brd;
+
+ struct digi_info di;
+
+ if (copy_from_user(&brd, uarg, sizeof(int))) {
+ return(-EFAULT);
+ }
+
+ DPR_MGMT(("DIGI_GETBD asking about board: %d\n", brd));
+
+ if ((brd < 0) || (brd > dgnc_NumBoards) || (dgnc_NumBoards == 0))
+ return (-ENODEV);
+
+ memset(&di, 0, sizeof(di));
+
+ di.info_bdnum = brd;
+
+ DGNC_LOCK(dgnc_Board[brd]->bd_lock, lock_flags);
+
+ di.info_bdtype = dgnc_Board[brd]->dpatype;
+ di.info_bdstate = dgnc_Board[brd]->dpastatus;
+ di.info_ioport = 0;
+ di.info_physaddr = (ulong) dgnc_Board[brd]->membase;
+ di.info_physsize = (ulong) dgnc_Board[brd]->membase - dgnc_Board[brd]->membase_end;
+ if (dgnc_Board[brd]->state != BOARD_FAILED)
+ di.info_nports = dgnc_Board[brd]->nasync;
+ else
+ di.info_nports = 0;
+
+ DGNC_UNLOCK(dgnc_Board[brd]->bd_lock, lock_flags);
+
+ DPR_MGMT(("DIGI_GETBD returning type: %x state: %x ports: %x size: %x\n",
+ di.info_bdtype, di.info_bdstate, di.info_nports, di.info_physsize));
+
+ if (copy_to_user(uarg, &di, sizeof (di)))
+ return (-EFAULT);
+
+ break;
+ }
+
+ case DIGI_GET_NI_INFO:
+ {
+ struct channel_t *ch;
+ struct ni_info ni;
+ uchar mstat = 0;
+ uint board = 0;
+ uint channel = 0;
+
+ if (copy_from_user(&ni, uarg, sizeof(struct ni_info))) {
+ return(-EFAULT);
+ }
+
+ DPR_MGMT(("DIGI_GETBD asking about board: %d channel: %d\n",
+ ni.board, ni.channel));
+
+ board = ni.board;
+ channel = ni.channel;
+
+ /* Verify boundaries on board */
+ if ((board < 0) || (board > dgnc_NumBoards) || (dgnc_NumBoards == 0))
+ return (-ENODEV);
+
+ /* Verify boundaries on channel */
+ if ((channel < 0) || (channel > dgnc_Board[board]->nasync))
+ return (-ENODEV);
+
+ ch = dgnc_Board[board]->channels[channel];
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (-ENODEV);
+
+ memset(&ni, 0, sizeof(ni));
+ ni.board = board;
+ ni.channel = channel;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = (ch->ch_mostat | ch->ch_mistat);
+
+ if (mstat & UART_MCR_DTR) {
+ ni.mstat |= TIOCM_DTR;
+ ni.dtr = TIOCM_DTR;
+ }
+ if (mstat & UART_MCR_RTS) {
+ ni.mstat |= TIOCM_RTS;
+ ni.rts = TIOCM_RTS;
+ }
+ if (mstat & UART_MSR_CTS) {
+ ni.mstat |= TIOCM_CTS;
+ ni.cts = TIOCM_CTS;
+ }
+ if (mstat & UART_MSR_RI) {
+ ni.mstat |= TIOCM_RI;
+ ni.ri = TIOCM_RI;
+ }
+ if (mstat & UART_MSR_DCD) {
+ ni.mstat |= TIOCM_CD;
+ ni.dcd = TIOCM_CD;
+ }
+ if (mstat & UART_MSR_DSR)
+ ni.mstat |= TIOCM_DSR;
+
+ ni.iflag = ch->ch_c_iflag;
+ ni.oflag = ch->ch_c_oflag;
+ ni.cflag = ch->ch_c_cflag;
+ ni.lflag = ch->ch_c_lflag;
+
+ if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS)
+ ni.hflow = 1;
+ else
+ ni.hflow = 0;
+
+ if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI))
+ ni.recv_stopped = 1;
+ else
+ ni.recv_stopped = 0;
+
+ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP))
+ ni.xmit_stopped = 1;
+ else
+ ni.xmit_stopped = 0;
+
+ ni.curtx = ch->ch_txcount;
+ ni.currx = ch->ch_rxcount;
+
+ ni.baud = ch->ch_old_baud;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(uarg, &ni, sizeof(ni)))
+ return (-EFAULT);
+
+ break;
+ }
+
+
+ }
+
+ DPR_MGMT(("dgnc_mgmt_ioctl finish.\n"));
+
+ return 0;
+}
diff --git a/drivers/staging/dgnc/dgnc_mgmt.h b/drivers/staging/dgnc/dgnc_mgmt.h
new file mode 100644
index 0000000..567f687
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_mgmt.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_MGMT_H
+#define __DGNC_MGMT_H
+
+#define MAXMGMTDEVICES 8
+
+int dgnc_mgmt_open(struct inode *inode, struct file *file);
+int dgnc_mgmt_close(struct inode *inode, struct file *file);
+long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
new file mode 100644
index 0000000..8b9e09a
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -0,0 +1,1974 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/delay.h> /* For udelay */
+#include <asm/io.h> /* For read[bwl]/write[bwl] */
+#include <linux/serial.h> /* For struct async_serial */
+#include <linux/serial_reg.h> /* For the various UART offsets */
+
+#include "dgnc_driver.h" /* Driver main header file */
+#include "dgnc_neo.h" /* Our header file */
+#include "dgnc_tty.h"
+#include "dgnc_trace.h"
+
+static inline void neo_parse_lsr(struct board_t *brd, uint port);
+static inline void neo_parse_isr(struct board_t *brd, uint port);
+static void neo_copy_data_from_uart_to_queue(struct channel_t *ch);
+static inline void neo_clear_break(struct channel_t *ch, int force);
+static inline void neo_set_cts_flow_control(struct channel_t *ch);
+static inline void neo_set_rts_flow_control(struct channel_t *ch);
+static inline void neo_set_ixon_flow_control(struct channel_t *ch);
+static inline void neo_set_ixoff_flow_control(struct channel_t *ch);
+static inline void neo_set_no_output_flow_control(struct channel_t *ch);
+static inline void neo_set_no_input_flow_control(struct channel_t *ch);
+static inline void neo_set_new_start_stop_chars(struct channel_t *ch);
+static void neo_parse_modem(struct channel_t *ch, uchar signals);
+static void neo_tasklet(unsigned long data);
+static void neo_vpd(struct board_t *brd);
+static void neo_uart_init(struct channel_t *ch);
+static void neo_uart_off(struct channel_t *ch);
+static int neo_drain(struct tty_struct *tty, uint seconds);
+static void neo_param(struct tty_struct *tty);
+static void neo_assert_modem_signals(struct channel_t *ch);
+static void neo_flush_uart_write(struct channel_t *ch);
+static void neo_flush_uart_read(struct channel_t *ch);
+static void neo_disable_receiver(struct channel_t *ch);
+static void neo_enable_receiver(struct channel_t *ch);
+static void neo_send_break(struct channel_t *ch, int msecs);
+static void neo_send_start_character(struct channel_t *ch);
+static void neo_send_stop_character(struct channel_t *ch);
+static void neo_copy_data_from_queue_to_uart(struct channel_t *ch);
+static uint neo_get_uart_bytes_left(struct channel_t *ch);
+static void neo_send_immediate_char(struct channel_t *ch, unsigned char c);
+static irqreturn_t neo_intr(int irq, void *voidbrd);
+
+
+struct board_ops dgnc_neo_ops = {
+ .tasklet = neo_tasklet,
+ .intr = neo_intr,
+ .uart_init = neo_uart_init,
+ .uart_off = neo_uart_off,
+ .drain = neo_drain,
+ .param = neo_param,
+ .vpd = neo_vpd,
+ .assert_modem_signals = neo_assert_modem_signals,
+ .flush_uart_write = neo_flush_uart_write,
+ .flush_uart_read = neo_flush_uart_read,
+ .disable_receiver = neo_disable_receiver,
+ .enable_receiver = neo_enable_receiver,
+ .send_break = neo_send_break,
+ .send_start_character = neo_send_start_character,
+ .send_stop_character = neo_send_stop_character,
+ .copy_data_from_queue_to_uart = neo_copy_data_from_queue_to_uart,
+ .get_uart_bytes_left = neo_get_uart_bytes_left,
+ .send_immediate_char = neo_send_immediate_char
+};
+
+static uint dgnc_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
+
+
+/*
+ * This function allows calls to ensure that all outstanding
+ * PCI writes have been completed, by doing a PCI read against
+ * a non-destructive, read-only location on the Neo card.
+ *
+ * In this case, we are reading the DVID (Read-only Device Identification)
+ * value of the Neo card.
+ */
+static inline void neo_pci_posting_flush(struct board_t *bd)
+{
+ readb(bd->re_map_membase + 0x8D);
+}
+
+static inline void neo_set_cts_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+
+ DPR_PARAM(("Setting CTSFLOW\n"));
+
+ /* Turn on auto CTS flow control */
+#if 1
+ ier |= (UART_17158_IER_CTSDSR);
+#else
+ ier &= ~(UART_17158_IER_CTSDSR);
+#endif
+
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_CTSDSR);
+
+ /* Turn off auto Xon flow control */
+ efr &= ~(UART_17158_EFR_IXON);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+
+ /* Feed the UART our trigger levels */
+ writeb(8, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 8;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_rts_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Setting RTSFLOW\n"));
+
+ /* Turn on auto RTS flow control */
+#if 1
+ ier |= (UART_17158_IER_RTSDTR);
+#else
+ ier &= ~(UART_17158_IER_RTSDTR);
+#endif
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_RTSDTR);
+
+ /* Turn off auto Xoff flow control */
+ ier &= ~(UART_17158_IER_XOFF);
+ efr &= ~(UART_17158_EFR_IXOFF);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+ ch->ch_r_watermark = 4;
+
+ writeb(32, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 32;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ /*
+ * From the Neo UART spec sheet:
+ * The auto RTS/DTR function must be started by asserting
+ * RTS/DTR# output pin (MCR bit-0 or 1 to logic 1 after
+ * it is enabled.
+ */
+ ch->ch_mostat |= (UART_MCR_RTS);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_ixon_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Setting IXON FLOW\n"));
+
+ /* Turn off auto CTS flow control */
+ ier &= ~(UART_17158_IER_CTSDSR);
+ efr &= ~(UART_17158_EFR_CTSDSR);
+
+ /* Turn on auto Xon flow control */
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ ch->ch_r_watermark = 4;
+
+ writeb(32, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 32;
+
+ /* Tell UART what start/stop chars it should be looking for */
+ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
+ writeb(0, &ch->ch_neo_uart->xonchar2);
+
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
+ writeb(0, &ch->ch_neo_uart->xoffchar2);
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Setting IXOFF FLOW\n"));
+
+ /* Turn off auto RTS flow control */
+ ier &= ~(UART_17158_IER_RTSDTR);
+ efr &= ~(UART_17158_EFR_RTSDTR);
+
+ /* Turn on auto Xoff flow control */
+ ier |= (UART_17158_IER_XOFF);
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+
+ writeb(8, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 8;
+
+ /* Tell UART what start/stop chars it should be looking for */
+ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
+ writeb(0, &ch->ch_neo_uart->xonchar2);
+
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
+ writeb(0, &ch->ch_neo_uart->xoffchar2);
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_no_input_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Unsetting Input FLOW\n"));
+
+ /* Turn off auto RTS flow control */
+ ier &= ~(UART_17158_IER_RTSDTR);
+ efr &= ~(UART_17158_EFR_RTSDTR);
+
+ /* Turn off auto Xoff flow control */
+ ier &= ~(UART_17158_IER_XOFF);
+ if (ch->ch_c_iflag & IXON)
+ efr &= ~(UART_17158_EFR_IXOFF);
+ else
+ efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
+
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+
+ ch->ch_r_watermark = 0;
+
+ writeb(16, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 16;
+
+ writeb(16, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 16;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_no_output_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Unsetting Output FLOW\n"));
+
+ /* Turn off auto CTS flow control */
+ ier &= ~(UART_17158_IER_CTSDSR);
+ efr &= ~(UART_17158_EFR_CTSDSR);
+
+ /* Turn off auto Xon flow control */
+ if (ch->ch_c_iflag & IXOFF)
+ efr &= ~(UART_17158_EFR_IXON);
+ else
+ efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+
+ ch->ch_r_watermark = 0;
+
+ writeb(16, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 16;
+
+ writeb(16, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 16;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+/* change UARTs start/stop chars */
+static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
+{
+
+ /* if hardware flow control is set, then skip this whole thing */
+ if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) || ch->ch_c_cflag & CRTSCTS)
+ return;
+
+ DPR_PARAM(("In new start stop chars\n"));
+
+ /* Tell UART what start/stop chars it should be looking for */
+ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
+ writeb(0, &ch->ch_neo_uart->xonchar2);
+
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
+ writeb(0, &ch->ch_neo_uart->xoffchar2);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+/*
+ * No locks are assumed to be held when calling this function.
+ */
+static inline void neo_clear_break(struct channel_t *ch, int force)
+{
+ ulong lock_flags;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Bail if we aren't currently sending a break. */
+ if (!ch->ch_stop_sending_break) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* Turn break off, and unset some variables */
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ if ((jiffies >= ch->ch_stop_sending_break) || force) {
+ uchar temp = readb(&ch->ch_neo_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
+ neo_pci_posting_flush(ch->ch_bd);
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ ch->ch_stop_sending_break = 0;
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ }
+ }
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+/*
+ * Parse the ISR register.
+ */
+static inline void neo_parse_isr(struct board_t *brd, uint port)
+{
+ struct channel_t *ch;
+ uchar isr;
+ uchar cause;
+ ulong lock_flags;
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ if (port > brd->maxports)
+ return;
+
+ ch = brd->channels[port];
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ /* Here we try to figure out what caused the interrupt to happen */
+ while (1) {
+
+ isr = readb(&ch->ch_neo_uart->isr_fcr);
+
+ /* Bail if no pending interrupt */
+ if (isr & UART_IIR_NO_INT) {
+ break;
+ }
+
+ /*
+ * Yank off the upper 2 bits, which just show that the FIFO's are enabled.
+ */
+ isr &= ~(UART_17158_IIR_FIFO_ENABLED);
+
+ DPR_INTR(("%s:%d isr: %x\n", __FILE__, __LINE__, isr));
+
+ if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
+ /* Read data from uart -> queue */
+ brd->intr_rx++;
+ ch->ch_intr_rx++;
+ neo_copy_data_from_uart_to_queue(ch);
+
+ /* Call our tty layer to enforce queue flow control if needed. */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ dgnc_check_queue_flow_control(ch);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+
+ if (isr & UART_IIR_THRI) {
+ brd->intr_tx++;
+ ch->ch_intr_tx++;
+ /* Transfer data (if any) from Write Queue -> UART. */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ neo_copy_data_from_queue_to_uart(ch);
+ }
+
+ if (isr & UART_17158_IIR_XONXOFF) {
+ cause = readb(&ch->ch_neo_uart->xoffchar1);
+
+ DPR_INTR(("Port %d. Got ISR_XONXOFF: cause:%x\n", port, cause));
+
+ /*
+ * Since the UART detected either an XON or
+ * XOFF match, we need to figure out which
+ * one it was, so we can suspend or resume data flow.
+ */
+ if (cause == UART_17158_XON_DETECT) {
+ /* Is output stopped right now, if so, resume it */
+ if (brd->channels[port]->ch_flags & CH_STOP) {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags &= ~(CH_STOP);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ DPR_INTR(("Port %d. XON detected in incoming data\n", port));
+ }
+ else if (cause == UART_17158_XOFF_DETECT) {
+ if (!(brd->channels[port]->ch_flags & CH_STOP)) {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= CH_STOP;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ DPR_INTR(("Setting CH_STOP\n"));
+ }
+ DPR_INTR(("Port: %d. XOFF detected in incoming data\n", port));
+ }
+ }
+
+ if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) {
+ /*
+ * If we get here, this means the hardware is doing auto flow control.
+ * Check to see whether RTS/DTR or CTS/DSR caused this interrupt.
+ */
+ brd->intr_modem++;
+ ch->ch_intr_modem++;
+ cause = readb(&ch->ch_neo_uart->mcr);
+ /* Which pin is doing auto flow? RTS or DTR? */
+ if ((cause & 0x4) == 0) {
+ if (cause & UART_MCR_RTS) {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_mostat |= UART_MCR_RTS;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ else {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ } else {
+ if (cause & UART_MCR_DTR) {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_mostat |= UART_MCR_DTR;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ else {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ }
+ }
+
+ /* Parse any modem signal changes */
+ DPR_INTR(("MOD_STAT: sending to parse_modem_sigs\n"));
+ neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
+ }
+}
+
+
+static inline void neo_parse_lsr(struct board_t *brd, uint port)
+{
+ struct channel_t *ch;
+ int linestatus;
+ ulong lock_flags;
+
+ if (!brd)
+ return;
+
+ if (brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ if (port > brd->maxports)
+ return;
+
+ ch = brd->channels[port];
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ linestatus = readb(&ch->ch_neo_uart->lsr);
+
+ DPR_INTR(("%s:%d port: %d linestatus: %x\n", __FILE__, __LINE__, port, linestatus));
+
+ ch->ch_cached_lsr |= linestatus;
+
+ if (ch->ch_cached_lsr & UART_LSR_DR) {
+ brd->intr_rx++;
+ ch->ch_intr_rx++;
+ /* Read data from uart -> queue */
+ neo_copy_data_from_uart_to_queue(ch);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ dgnc_check_queue_flow_control(ch);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+
+ /*
+ * This is a special flag. It indicates that at least 1
+ * RX error (parity, framing, or break) has happened.
+ * Mark this in our struct, which will tell me that I have
+ *to do the special RX+LSR read for this FIFO load.
+ */
+ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) {
+ DPR_INTR(("%s:%d Port: %d Got an RX error, need to parse LSR\n",
+ __FILE__, __LINE__, port));
+ }
+
+ /*
+ * The next 3 tests should *NOT* happen, as the above test
+ * should encapsulate all 3... At least, thats what Exar says.
+ */
+
+ if (linestatus & UART_LSR_PE) {
+ ch->ch_err_parity++;
+ DPR_INTR(("%s:%d Port: %d. PAR ERR!\n", __FILE__, __LINE__, port));
+ }
+
+ if (linestatus & UART_LSR_FE) {
+ ch->ch_err_frame++;
+ DPR_INTR(("%s:%d Port: %d. FRM ERR!\n", __FILE__, __LINE__, port));
+ }
+
+ if (linestatus & UART_LSR_BI) {
+ ch->ch_err_break++;
+ DPR_INTR(("%s:%d Port: %d. BRK INTR!\n", __FILE__, __LINE__, port));
+ }
+
+ if (linestatus & UART_LSR_OE) {
+ /*
+ * Rx Oruns. Exar says that an orun will NOT corrupt
+ * the FIFO. It will just replace the holding register
+ * with this new data byte. So basically just ignore this.
+ * Probably we should eventually have an orun stat in our driver...
+ */
+ ch->ch_err_overrun++;
+ DPR_INTR(("%s:%d Port: %d. Rx Overrun!\n", __FILE__, __LINE__, port));
+ }
+
+ if (linestatus & UART_LSR_THRE) {
+ brd->intr_tx++;
+ ch->ch_intr_tx++;
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* Transfer data (if any) from Write Queue -> UART. */
+ neo_copy_data_from_queue_to_uart(ch);
+ }
+ else if (linestatus & UART_17158_TX_AND_FIFO_CLR) {
+ brd->intr_tx++;
+ ch->ch_intr_tx++;
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* Transfer data (if any) from Write Queue -> UART. */
+ neo_copy_data_from_queue_to_uart(ch);
+ }
+}
+
+
+/*
+ * neo_param()
+ * Send any/all changes to the line to the UART.
+ */
+static void neo_param(struct tty_struct *tty)
+{
+ uchar lcr = 0;
+ uchar uart_lcr = 0;
+ uchar ier = 0;
+ uchar uart_ier = 0;
+ uint baud = 9600;
+ int quot = 0;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return;
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return;
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
+ return;
+ }
+
+ DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
+ ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ ch->ch_r_head = ch->ch_r_tail = 0;
+ ch->ch_e_head = ch->ch_e_tail = 0;
+ ch->ch_w_head = ch->ch_w_tail = 0;
+
+ neo_flush_uart_write(ch);
+ neo_flush_uart_read(ch);
+
+ /* The baudrate is B0 so all modem lines are to be dropped. */
+ ch->ch_flags |= (CH_BAUD0);
+ ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
+ neo_assert_modem_signals(ch);
+ ch->ch_old_baud = 0;
+ return;
+
+ } else if (ch->ch_custom_speed) {
+
+ baud = ch->ch_custom_speed;
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+
+ /*
+ * Bring back up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+ }
+ } else {
+ int iindex = 0;
+ int jindex = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 131657, 153600, 230400, 460800,
+ 921600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /* Only use the TXPrint baud rate if the terminal unit is NOT open */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
+ baud = bauds[iindex][jindex];
+ } else {
+ DPR_IOCTL(("baud indices were out of range (%d)(%d)",
+ iindex, jindex));
+ baud = 0;
+ }
+
+ if (baud == 0)
+ baud = 9600;
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+
+ /*
+ * Bring back up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+ }
+ }
+
+ if (ch->ch_c_cflag & PARENB) {
+ lcr |= UART_LCR_PARITY;
+ }
+
+ if (!(ch->ch_c_cflag & PARODD)) {
+ lcr |= UART_LCR_EPAR;
+ }
+
+ /*
+ * Not all platforms support mark/space parity,
+ * so this will hide behind an ifdef.
+ */
+#ifdef CMSPAR
+ if (ch->ch_c_cflag & CMSPAR)
+ lcr |= UART_LCR_SPAR;
+#endif
+
+ if (ch->ch_c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+
+ switch (ch->ch_c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ case CS8:
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ ier = uart_ier = readb(&ch->ch_neo_uart->ier);
+ uart_lcr = readb(&ch->ch_neo_uart->lcr);
+
+ if (baud == 0)
+ baud = 9600;
+
+ quot = ch->ch_bd->bd_dividend / baud;
+
+ if (quot != 0 && ch->ch_old_baud != baud) {
+ ch->ch_old_baud = baud;
+ writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr);
+ writeb((quot & 0xff), &ch->ch_neo_uart->txrx);
+ writeb((quot >> 8), &ch->ch_neo_uart->ier);
+ writeb(lcr, &ch->ch_neo_uart->lcr);
+ }
+
+ if (uart_lcr != lcr)
+ writeb(lcr, &ch->ch_neo_uart->lcr);
+
+ if (ch->ch_c_cflag & CREAD) {
+ ier |= (UART_IER_RDI | UART_IER_RLSI);
+ }
+ else {
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI);
+ }
+
+ /*
+ * Have the UART interrupt on modem signal changes ONLY when
+ * we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set.
+ */
+ if ((ch->ch_digi.digi_flags & CTSPACE) || (ch->ch_digi.digi_flags & RTSPACE) ||
+ (ch->ch_c_cflag & CRTSCTS) || !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
+ !(ch->ch_c_cflag & CLOCAL))
+ {
+ ier |= UART_IER_MSI;
+ }
+ else {
+ ier &= ~UART_IER_MSI;
+ }
+
+ ier |= UART_IER_THRI;
+
+ if (ier != uart_ier)
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ /* Set new start/stop chars */
+ neo_set_new_start_stop_chars(ch);
+
+ if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ neo_set_cts_flow_control(ch);
+ }
+ else if (ch->ch_c_iflag & IXON) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ neo_set_no_output_flow_control(ch);
+ else
+ neo_set_ixon_flow_control(ch);
+ }
+ else {
+ neo_set_no_output_flow_control(ch);
+ }
+
+ if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ neo_set_rts_flow_control(ch);
+ }
+ else if (ch->ch_c_iflag & IXOFF) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ neo_set_no_input_flow_control(ch);
+ else
+ neo_set_ixoff_flow_control(ch);
+ }
+ else {
+ neo_set_no_input_flow_control(ch);
+ }
+
+ /*
+ * Adjust the RX FIFO Trigger level if baud is less than 9600.
+ * Not exactly elegant, but this is needed because of the Exar chip's
+ * delay on firing off the RX FIFO interrupt on slower baud rates.
+ */
+ if (baud < 9600) {
+ writeb(1, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 1;
+ }
+
+ neo_assert_modem_signals(ch);
+
+ /* Get current status of the modem signals now */
+ neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
+}
+
+
+/*
+ * Our board poller function.
+ */
+static void neo_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ struct channel_t *ch;
+ ulong lock_flags;
+ int i;
+ int state = 0;
+ int ports = 0;
+
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
+ APR(("poll_tasklet() - NULL or bad bd.\n"));
+ return;
+ }
+
+ /* Cache a couple board values */
+ DGNC_LOCK(bd->bd_lock, lock_flags);
+ state = bd->state;
+ ports = bd->nasync;
+ DGNC_UNLOCK(bd->bd_lock, lock_flags);
+
+ /*
+ * Do NOT allow the interrupt routine to read the intr registers
+ * Until we release this lock.
+ */
+ DGNC_LOCK(bd->bd_intr_lock, lock_flags);
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if ((state == BOARD_READY) && (ports > 0)) {
+ /* Loop on each port */
+ for (i = 0; i < ports; i++) {
+ ch = bd->channels[i];
+
+ /* Just being careful... */
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ continue;
+
+ /*
+ * NOTE: Remember you CANNOT hold any channel
+ * locks when calling the input routine.
+ *
+ * During input processing, its possible we
+ * will call the Linux ld, which might in turn,
+ * do a callback right back into us, resulting
+ * in us trying to grab the channel lock twice!
+ */
+ dgnc_input(ch);
+
+ /*
+ * Channel lock is grabbed and then released
+ * inside both of these routines, but neither
+ * call anything else that could call back into us.
+ */
+ neo_copy_data_from_queue_to_uart(ch);
+ dgnc_wakeup_writes(ch);
+
+ /*
+ * Call carrier carrier function, in case something
+ * has changed.
+ */
+ dgnc_carrier(ch);
+
+ /*
+ * Check to see if we need to turn off a sending break.
+ * The timing check is done inside clear_break()
+ */
+ if (ch->ch_stop_sending_break)
+ neo_clear_break(ch, 0);
+ }
+ }
+
+ /* Allow interrupt routine to access the interrupt register again */
+ DGNC_UNLOCK(bd->bd_intr_lock, lock_flags);
+
+}
+
+
+/*
+ * dgnc_neo_intr()
+ *
+ * Neo specific interrupt handler.
+ */
+static irqreturn_t neo_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = (struct board_t *) voidbrd;
+ struct channel_t *ch;
+ int port = 0;
+ int type = 0;
+ int current_port;
+ u32 tmp;
+ u32 uart_poll;
+ unsigned long lock_flags;
+ unsigned long lock_flags2;
+
+ if (!brd) {
+ APR(("Received interrupt (%d) with null board associated\n", irq));
+ return IRQ_NONE;
+ }
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGNC_BOARD_MAGIC) {
+ APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ return IRQ_NONE;
+ }
+
+ brd->intr_count++;
+
+ /* Lock out the slow poller from running on this board. */
+ DGNC_LOCK(brd->bd_intr_lock, lock_flags);
+
+ /*
+ * Read in "extended" IRQ information from the 32bit Neo register.
+ * Bits 0-7: What port triggered the interrupt.
+ * Bits 8-31: Each 3bits indicate what type of interrupt occurred.
+ */
+ uart_poll = readl(brd->re_map_membase + UART_17158_POLL_ADDR_OFFSET);
+
+ DPR_INTR(("%s:%d uart_poll: %x\n", __FILE__, __LINE__, uart_poll));
+
+ /*
+ * If 0, no interrupts pending.
+ * This can happen if the IRQ is shared among a couple Neo/Classic boards.
+ */
+ if (!uart_poll) {
+ DPR_INTR(("Kernel interrupted to me, but no pending interrupts...\n"));
+ DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
+ return IRQ_NONE;
+ }
+
+ /* At this point, we have at least SOMETHING to service, dig further... */
+
+ current_port = 0;
+
+ /* Loop on each port */
+ while ((uart_poll & 0xff) != 0) {
+
+ tmp = uart_poll;
+
+ /* Check current port to see if it has interrupt pending */
+ if ((tmp & dgnc_offset_table[current_port]) != 0) {
+ port = current_port;
+ type = tmp >> (8 + (port * 3));
+ type &= 0x7;
+ } else {
+ current_port++;
+ continue;
+ }
+
+ DPR_INTR(("%s:%d port: %x type: %x\n", __FILE__, __LINE__, port, type));
+
+ /* Remove this port + type from uart_poll */
+ uart_poll &= ~(dgnc_offset_table[port]);
+
+ if (!type) {
+ /* If no type, just ignore it, and move onto next port */
+ DPR_INTR(("Interrupt with no type! port: %d\n", port));
+ continue;
+ }
+
+ /* Switch on type of interrupt we have */
+ switch (type) {
+
+ case UART_17158_RXRDY_TIMEOUT:
+ /*
+ * RXRDY Time-out is cleared by reading data in the
+ * RX FIFO until it falls below the trigger level.
+ */
+
+ /* Verify the port is in range. */
+ if (port > brd->nasync)
+ continue;
+
+ ch = brd->channels[port];
+ neo_copy_data_from_uart_to_queue(ch);
+
+ /* Call our tty layer to enforce queue flow control if needed. */
+ DGNC_LOCK(ch->ch_lock, lock_flags2);
+ dgnc_check_queue_flow_control(ch);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags2);
+
+ continue;
+
+ case UART_17158_RX_LINE_STATUS:
+ /*
+ * RXRDY and RX LINE Status (logic OR of LSR[4:1])
+ */
+ neo_parse_lsr(brd, port);
+ continue;
+
+ case UART_17158_TXRDY:
+ /*
+ * TXRDY interrupt clears after reading ISR register for the UART channel.
+ */
+
+ /*
+ * Yes, this is odd...
+ * Why would I check EVERY possibility of type of
+ * interrupt, when we know its TXRDY???
+ * Becuz for some reason, even tho we got triggered for TXRDY,
+ * it seems to be occassionally wrong. Instead of TX, which
+ * it should be, I was getting things like RXDY too. Weird.
+ */
+ neo_parse_isr(brd, port);
+ continue;
+
+ case UART_17158_MSR:
+ /*
+ * MSR or flow control was seen.
+ */
+ neo_parse_isr(brd, port);
+ continue;
+
+ default:
+ /*
+ * The UART triggered us with a bogus interrupt type.
+ * It appears the Exar chip, when REALLY bogged down, will throw
+ * these once and awhile.
+ * Its harmless, just ignore it and move on.
+ */
+ DPR_INTR(("%s:%d Unknown Interrupt type: %x\n", __FILE__, __LINE__, type));
+ continue;
+ }
+ }
+
+ /*
+ * Schedule tasklet to more in-depth servicing at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+
+ DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
+
+ DPR_INTR(("dgnc_intr finish.\n"));
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * Neo specific way of turning off the receiver.
+ * Used as a way to enforce queue flow control when in
+ * hardware flow control mode.
+ */
+static void neo_disable_receiver(struct channel_t *ch)
+{
+ uchar tmp = readb(&ch->ch_neo_uart->ier);
+ tmp &= ~(UART_IER_RDI);
+ writeb(tmp, &ch->ch_neo_uart->ier);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+/*
+ * Neo specific way of turning on the receiver.
+ * Used as a way to un-enforce queue flow control when in
+ * hardware flow control mode.
+ */
+static void neo_enable_receiver(struct channel_t *ch)
+{
+ uchar tmp = readb(&ch->ch_neo_uart->ier);
+ tmp |= (UART_IER_RDI);
+ writeb(tmp, &ch->ch_neo_uart->ier);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
+{
+ int qleft = 0;
+ uchar linestatus = 0;
+ uchar error_mask = 0;
+ int n = 0;
+ int total = 0;
+ ushort head;
+ ushort tail;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* cache head and tail of queue */
+ head = ch->ch_r_head & RQUEUEMASK;
+ tail = ch->ch_r_tail & RQUEUEMASK;
+
+ /* Get our cached LSR */
+ linestatus = ch->ch_cached_lsr;
+ ch->ch_cached_lsr = 0;
+
+ /* Store how much space we have left in the queue */
+ if ((qleft = tail - head - 1) < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * If the UART is not in FIFO mode, force the FIFO copy to
+ * NOT be run, by setting total to 0.
+ *
+ * On the other hand, if the UART IS in FIFO mode, then ask
+ * the UART to give us an approximation of data it has RX'ed.
+ */
+ if (!(ch->ch_flags & CH_FIFO_ENABLED))
+ total = 0;
+ else {
+ total = readb(&ch->ch_neo_uart->rfifo);
+
+ /*
+ * EXAR chip bug - RX FIFO COUNT - Fudge factor.
+ *
+ * This resolves a problem/bug with the Exar chip that sometimes
+ * returns a bogus value in the rfifo register.
+ * The count can be any where from 0-3 bytes "off".
+ * Bizarre, but true.
+ */
+ if ((ch->ch_bd->dvid & 0xf0) >= UART_XR17E158_DVID) {
+ total -= 1;
+ }
+ else {
+ total -= 3;
+ }
+ }
+
+
+ /*
+ * Finally, bound the copy to make sure we don't overflow
+ * our own queue...
+ * The byte by byte copy loop below this loop this will
+ * deal with the queue overflow possibility.
+ */
+ total = min(total, qleft);
+
+ while (total > 0) {
+
+ /*
+ * Grab the linestatus register, we need to check
+ * to see if there are any errors in the FIFO.
+ */
+ linestatus = readb(&ch->ch_neo_uart->lsr);
+
+ /*
+ * Break out if there is a FIFO error somewhere.
+ * This will allow us to go byte by byte down below,
+ * finding the exact location of the error.
+ */
+ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR)
+ break;
+
+ /* Make sure we don't go over the end of our queue */
+ n = min(((uint) total), (RQUEUESIZE - (uint) head));
+
+ /*
+ * Cut down n even further if needed, this is to fix
+ * a problem with memcpy_fromio() with the Neo on the
+ * IBM pSeries platform.
+ * 15 bytes max appears to be the magic number.
+ */
+ n = min((uint) n, (uint) 12);
+
+ /*
+ * Since we are grabbing the linestatus register, which
+ * will reset some bits after our read, we need to ensure
+ * we don't miss our TX FIFO emptys.
+ */
+ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ }
+
+ linestatus = 0;
+
+ /* Copy data from uart to the queue */
+ memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
+ dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, n);
+
+ /*
+ * Since RX_FIFO_DATA_ERROR was 0, we are guarenteed
+ * that all the data currently in the FIFO is free of
+ * breaks and parity/frame/orun errors.
+ */
+ memset(ch->ch_equeue + head, 0, n);
+
+ /* Add to and flip head if needed */
+ head = (head + n) & RQUEUEMASK;
+ total -= n;
+ qleft -= n;
+ ch->ch_rxcount += n;
+ }
+
+ /*
+ * Create a mask to determine whether we should
+ * insert the character (if any) into our queue.
+ */
+ if (ch->ch_c_iflag & IGNBRK)
+ error_mask |= UART_LSR_BI;
+
+ /*
+ * Now cleanup any leftover bytes still in the UART.
+ * Also deal with any possible queue overflow here as well.
+ */
+ while (1) {
+
+ /*
+ * Its possible we have a linestatus from the loop above
+ * this, so we "OR" on any extra bits.
+ */
+ linestatus |= readb(&ch->ch_neo_uart->lsr);
+
+ /*
+ * If the chip tells us there is no more data pending to
+ * be read, we can then leave.
+ * But before we do, cache the linestatus, just in case.
+ */
+ if (!(linestatus & UART_LSR_DR)) {
+ ch->ch_cached_lsr = linestatus;
+ break;
+ }
+
+ /* No need to store this bit */
+ linestatus &= ~UART_LSR_DR;
+
+ /*
+ * Since we are grabbing the linestatus register, which
+ * will reset some bits after our read, we need to ensure
+ * we don't miss our TX FIFO emptys.
+ */
+ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
+ linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ }
+
+ /*
+ * Discard character if we are ignoring the error mask.
+ */
+ if (linestatus & error_mask) {
+ uchar discard;
+ linestatus = 0;
+ memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1);
+ continue;
+ }
+
+ /*
+ * If our queue is full, we have no choice but to drop some data.
+ * The assumption is that HWFLOW or SWFLOW should have stopped
+ * things way way before we got to this point.
+ *
+ * I decided that I wanted to ditch the oldest data first,
+ * I hope thats okay with everyone? Yes? Good.
+ */
+ while (qleft < 1) {
+ DPR_READ(("Queue full, dropping DATA:%x LSR:%x\n",
+ ch->ch_rqueue[tail], ch->ch_equeue[tail]));
+
+ ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK;
+ ch->ch_err_overrun++;
+ qleft++;
+ }
+
+ memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
+ ch->ch_equeue[head] = (uchar) linestatus;
+ dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, 1);
+
+ DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]));
+
+ /* Ditch any remaining linestatus value. */
+ linestatus = 0;
+
+ /* Add to and flip head if needed */
+ head = (head + 1) & RQUEUEMASK;
+
+ qleft--;
+ ch->ch_rxcount++;
+ }
+
+ /*
+ * Write new final heads to channel structure.
+ */
+ ch->ch_r_head = head & RQUEUEMASK;
+ ch->ch_e_head = head & EQUEUEMASK;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+/*
+ * This function basically goes to sleep for secs, or until
+ * it gets signalled that the port has fully drained.
+ */
+static int neo_drain(struct tty_struct *tty, uint seconds)
+{
+ ulong lock_flags;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ DPR_IOCTL(("%d Drain wait started.\n", __LINE__));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Go to sleep waiting for the tty layer to wake me back up when
+ * the empty flag goes away.
+ *
+ * NOTE: TODO: Do something with time passed in.
+ */
+ rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (rc) {
+ DPR_IOCTL(("%d Drain - User ctrl c'ed\n", __LINE__));
+ }
+ else {
+ DPR_IOCTL(("%d Drain wait finished.\n", __LINE__));
+ }
+
+ return (rc);
+}
+
+
+/*
+ * Flush the WRITE FIFO on the Neo.
+ *
+ * NOTE: Channel lock MUST be held before calling this function!
+ */
+static void neo_flush_uart_write(struct channel_t *ch)
+{
+ uchar tmp = 0;
+ int i = 0;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ neo_pci_posting_flush(ch->ch_bd);
+
+ for (i = 0; i < 10; i++) {
+
+ /* Check to see if the UART feels it completely flushed the FIFO. */
+ tmp = readb(&ch->ch_neo_uart->isr_fcr);
+ if (tmp & 4) {
+ DPR_IOCTL(("Still flushing TX UART... i: %d\n", i));
+ udelay(10);
+ }
+ else
+ break;
+ }
+
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+}
+
+
+/*
+ * Flush the READ FIFO on the Neo.
+ *
+ * NOTE: Channel lock MUST be held before calling this function!
+ */
+static void neo_flush_uart_read(struct channel_t *ch)
+{
+ uchar tmp = 0;
+ int i = 0;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr);
+ neo_pci_posting_flush(ch->ch_bd);
+
+ for (i = 0; i < 10; i++) {
+
+ /* Check to see if the UART feels it completely flushed the FIFO. */
+ tmp = readb(&ch->ch_neo_uart->isr_fcr);
+ if (tmp & 2) {
+ DPR_IOCTL(("Still flushing RX UART... i: %d\n", i));
+ udelay(10);
+ }
+ else
+ break;
+ }
+}
+
+
+static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
+{
+ ushort head;
+ ushort tail;
+ int n;
+ int s;
+ int qlen;
+ uint len_written = 0;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* No data to write to the UART */
+ if (ch->ch_w_tail == ch->ch_w_head) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* If port is "stopped", don't send any data to the UART */
+ if ((ch->ch_flags & CH_FORCED_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If FIFOs are disabled. Send data directly to txrx register
+ */
+ if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
+ uchar lsrbits = readb(&ch->ch_neo_uart->lsr);
+
+ /* Cache the LSR bits for later parsing */
+ ch->ch_cached_lsr |= lsrbits;
+ if (ch->ch_cached_lsr & UART_LSR_THRE) {
+ ch->ch_cached_lsr &= ~(UART_LSR_THRE);
+
+ /*
+ * If RTS Toggle mode is on, turn on RTS now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_RTS)) {
+ ch->ch_mostat |= (UART_MCR_RTS);
+ neo_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+ /*
+ * If DTR Toggle mode is on, turn on DTR now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_DTR)) {
+ ch->ch_mostat |= (UART_MCR_DTR);
+ neo_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+
+ writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
+ DPR_WRITE(("Tx data: %x\n", ch->ch_wqueue[ch->ch_w_head]));
+ ch->ch_w_tail++;
+ ch->ch_w_tail &= WQUEUEMASK;
+ ch->ch_txcount++;
+ }
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * We have to do it this way, because of the EXAR TXFIFO count bug.
+ */
+ if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) {
+ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ len_written = 0;
+
+ n = readb(&ch->ch_neo_uart->tfifo);
+
+ if ((unsigned int) n > ch->ch_t_tlevel) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;
+ }
+ else {
+ n = UART_17158_TX_FIFOSIZE - readb(&ch->ch_neo_uart->tfifo);
+ }
+
+ /* cache head and tail of queue */
+ head = ch->ch_w_head & WQUEUEMASK;
+ tail = ch->ch_w_tail & WQUEUEMASK;
+ qlen = (head - tail) & WQUEUEMASK;
+
+ /* Find minimum of the FIFO space, versus queue length */
+ n = min(n, qlen);
+
+ while (n > 0) {
+
+ s = ((head >= tail) ? head : WQUEUESIZE) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ /*
+ * If RTS Toggle mode is on, turn on RTS now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_RTS)) {
+ ch->ch_mostat |= (UART_MCR_RTS);
+ neo_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+
+ /*
+ * If DTR Toggle mode is on, turn on DTR now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_DTR)) {
+ ch->ch_mostat |= (UART_MCR_DTR);
+ neo_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+
+ memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
+ dgnc_sniff_nowait_nolock(ch, "UART WRITE", ch->ch_wqueue + tail, s);
+
+ /* Add and flip queue if needed */
+ tail = (tail + s) & WQUEUEMASK;
+ n -= s;
+ ch->ch_txcount += s;
+ len_written += s;
+ }
+
+ /* Update the final tail */
+ ch->ch_w_tail = tail & WQUEUEMASK;
+
+ if (len_written > 0) {
+ neo_pci_posting_flush(ch->ch_bd);
+ ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+static void neo_parse_modem(struct channel_t *ch, uchar signals)
+{
+ volatile uchar msignals = signals;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_MSIGS(("neo_parse_modem: port: %d msignals: %x\n", ch->ch_portnum, msignals));
+
+ /*
+ * Do altpin switching. Altpin switches DCD and DSR.
+ * This prolly breaks DSRPACE, so we should be more clever here.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
+ uchar mswap = msignals;
+
+ if (mswap & UART_MSR_DDCD) {
+ msignals &= ~UART_MSR_DDCD;
+ msignals |= UART_MSR_DDSR;
+ }
+ if (mswap & UART_MSR_DDSR) {
+ msignals &= ~UART_MSR_DDSR;
+ msignals |= UART_MSR_DDCD;
+ }
+ if (mswap & UART_MSR_DCD) {
+ msignals &= ~UART_MSR_DCD;
+ msignals |= UART_MSR_DSR;
+ }
+ if (mswap & UART_MSR_DSR) {
+ msignals &= ~UART_MSR_DSR;
+ msignals |= UART_MSR_DCD;
+ }
+ }
+
+ /* Scrub off lower bits. They signify delta's, which I don't care about */
+ msignals &= 0xf0;
+
+ if (msignals & UART_MSR_DCD)
+ ch->ch_mistat |= UART_MSR_DCD;
+ else
+ ch->ch_mistat &= ~UART_MSR_DCD;
+
+ if (msignals & UART_MSR_DSR)
+ ch->ch_mistat |= UART_MSR_DSR;
+ else
+ ch->ch_mistat &= ~UART_MSR_DSR;
+
+ if (msignals & UART_MSR_RI)
+ ch->ch_mistat |= UART_MSR_RI;
+ else
+ ch->ch_mistat &= ~UART_MSR_RI;
+
+ if (msignals & UART_MSR_CTS)
+ ch->ch_mistat |= UART_MSR_CTS;
+ else
+ ch->ch_mistat &= ~UART_MSR_CTS;
+
+ DPR_MSIGS(("Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
+ ch->ch_portnum,
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)));
+}
+
+
+/* Make the UART raise any of the output signals we want up */
+static void neo_assert_modem_signals(struct channel_t *ch)
+{
+ uchar out;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ out = ch->ch_mostat;
+
+ if (ch->ch_flags & CH_LOOPBACK)
+ out |= UART_MCR_LOOP;
+
+ writeb(out, &ch->ch_neo_uart->mcr);
+ neo_pci_posting_flush(ch->ch_bd);
+
+ /* Give time for the UART to actually raise/drop the signals */
+ udelay(10);
+}
+
+
+static void neo_send_start_character(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ if (ch->ch_startc != _POSIX_VDISABLE) {
+ ch->ch_xon_sends++;
+ writeb(ch->ch_startc, &ch->ch_neo_uart->txrx);
+ neo_pci_posting_flush(ch->ch_bd);
+ udelay(10);
+ }
+}
+
+
+static void neo_send_stop_character(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ if (ch->ch_stopc != _POSIX_VDISABLE) {
+ ch->ch_xoff_sends++;
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx);
+ neo_pci_posting_flush(ch->ch_bd);
+ udelay(10);
+ }
+}
+
+
+/*
+ * neo_uart_init
+ */
+static void neo_uart_init(struct channel_t *ch)
+{
+
+ writeb(0, &ch->ch_neo_uart->ier);
+ writeb(0, &ch->ch_neo_uart->efr);
+ writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr);
+
+
+ /* Clear out UART and FIFO */
+ readb(&ch->ch_neo_uart->txrx);
+ writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ readb(&ch->ch_neo_uart->lsr);
+ readb(&ch->ch_neo_uart->msr);
+
+ ch->ch_flags |= CH_FIFO_ENABLED;
+
+ /* Assert any signals we want up */
+ writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+/*
+ * Make the UART completely turn off.
+ */
+static void neo_uart_off(struct channel_t *ch)
+{
+ /* Turn off UART enhanced bits */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Stop all interrupts from occurring. */
+ writeb(0, &ch->ch_neo_uart->ier);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static uint neo_get_uart_bytes_left(struct channel_t *ch)
+{
+ uchar left = 0;
+ uchar lsr = readb(&ch->ch_neo_uart->lsr);
+
+ /* We must cache the LSR as some of the bits get reset once read... */
+ ch->ch_cached_lsr |= lsr;
+
+ /* Determine whether the Transmitter is empty or not */
+ if (!(lsr & UART_LSR_TEMT)) {
+ if (ch->ch_flags & CH_TX_FIFO_EMPTY) {
+ tasklet_schedule(&ch->ch_bd->helper_tasklet);
+ }
+ left = 1;
+ } else {
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ left = 0;
+ }
+
+ return left;
+}
+
+
+/* Channel lock MUST be held by the calling function! */
+static void neo_send_break(struct channel_t *ch, int msecs)
+{
+ /*
+ * If we receive a time of 0, this means turn off the break.
+ */
+ if (msecs == 0) {
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ uchar temp = readb(&ch->ch_neo_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
+ neo_pci_posting_flush(ch->ch_bd);
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ ch->ch_stop_sending_break = 0;
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ }
+ return;
+ }
+
+ /*
+ * Set the time we should stop sending the break.
+ * If we are already sending a break, toss away the existing
+ * time to stop, and use this new value instead.
+ */
+ ch->ch_stop_sending_break = jiffies + dgnc_jiffies_from_ms(msecs);
+
+ /* Tell the UART to start sending the break */
+ if (!(ch->ch_flags & CH_BREAK_SENDING)) {
+ uchar temp = readb(&ch->ch_neo_uart->lcr);
+ writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr);
+ neo_pci_posting_flush(ch->ch_bd);
+ ch->ch_flags |= (CH_BREAK_SENDING);
+ DPR_IOCTL(("Port %d. Starting UART_LCR_SBC! start: %lx should end: %lx\n",
+ ch->ch_portnum, jiffies, ch->ch_stop_sending_break));
+ }
+}
+
+
+/*
+ * neo_send_immediate_char.
+ *
+ * Sends a specific character as soon as possible to the UART,
+ * jumping over any bytes that might be in the write queue.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static void neo_send_immediate_char(struct channel_t *ch, unsigned char c)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ writeb(c, &ch->ch_neo_uart->txrx);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int address)
+{
+ unsigned int enable;
+ unsigned int bits;
+ unsigned int databit;
+ unsigned int val;
+
+ /* enable chip select */
+ writeb(NEO_EECS, base + NEO_EEREG);
+ /* READ */
+ enable = (address | 0x180);
+
+ for (bits = 9; bits--; ) {
+ databit = (enable & (1 << bits)) ? NEO_EEDI : 0;
+ /* Set read address */
+ writeb(databit | NEO_EECS, base + NEO_EEREG);
+ writeb(databit | NEO_EECS | NEO_EECK, base + NEO_EEREG);
+ }
+
+ val = 0;
+
+ for (bits = 17; bits--; ) {
+ /* clock to EEPROM */
+ writeb(NEO_EECS, base + NEO_EEREG);
+ writeb(NEO_EECS | NEO_EECK, base + NEO_EEREG);
+ val <<= 1;
+ /* read EEPROM */
+ if (readb(base + NEO_EEREG) & NEO_EEDO)
+ val |= 1;
+ }
+
+ /* clock falling edge */
+ writeb(NEO_EECS, base + NEO_EEREG);
+
+ /* drop chip select */
+ writeb(0x00, base + NEO_EEREG);
+
+ return val;
+}
+
+
+static void neo_vpd(struct board_t *brd)
+{
+ unsigned int i = 0;
+ unsigned int a;
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ if (!brd->re_map_membase)
+ return;
+
+ /* Store the VPD into our buffer */
+ for (i = 0; i < NEO_VPD_IMAGESIZE; i++) {
+ a = neo_read_eeprom(brd->re_map_membase, i);
+ brd->vpd[i*2] = a & 0xff;
+ brd->vpd[(i*2)+1] = (a >> 8) & 0xff;
+ }
+
+ if (((brd->vpd[0x08] != 0x82) /* long resource name tag */
+ && (brd->vpd[0x10] != 0x82)) /* long resource name tag (PCI-66 files)*/
+ || (brd->vpd[0x7F] != 0x78)) /* small resource end tag */
+ {
+ memset(brd->vpd, '\0', NEO_VPD_IMAGESIZE);
+ }
+ else {
+ /* Search for the serial number */
+ for (i = 0; i < NEO_VPD_IMAGESIZE * 2; i++) {
+ if (brd->vpd[i] == 'S' && brd->vpd[i + 1] == 'N') {
+ strncpy(brd->serial_num, &(brd->vpd[i + 3]), 9);
+ }
+ }
+ }
+}
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
new file mode 100644
index 0000000..7ec5710
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_neo.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ */
+
+#ifndef __DGNC_NEO_H
+#define __DGNC_NEO_H
+
+#include "dgnc_types.h"
+#include "dgnc_driver.h"
+
+/************************************************************************
+ * Per channel/port NEO UART structure *
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * U = Unused. *
+ ************************************************************************/
+
+struct neo_uart_struct {
+ u8 txrx; /* WR RHR/THR - Holding Reg */
+ u8 ier; /* WR IER - Interrupt Enable Reg */
+ u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */
+ u8 lcr; /* WR LCR - Line Control Reg */
+ u8 mcr; /* WR MCR - Modem Control Reg */
+ u8 lsr; /* WR LSR - Line Status Reg */
+ u8 msr; /* WR MSR - Modem Status Reg */
+ u8 spr; /* WR SPR - Scratch Pad Reg */
+ u8 fctr; /* WR FCTR - Feature Control Reg */
+ u8 efr; /* WR EFR - Enhanced Function Reg */
+ u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
+ u8 rfifo; /* WR RXCNT/RXTRG - Recieve FIFO Reg */
+ u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */
+ u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */
+ u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */
+ u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */
+
+ u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */
+ u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
+ u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */
+ u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
+};
+
+/* Where to read the extended interrupt register (32bits instead of 8bits) */
+#define UART_17158_POLL_ADDR_OFFSET 0x80
+
+/* These are the current dvid's of the Neo boards */
+#define UART_XR17C158_DVID 0x20
+#define UART_XR17D158_DVID 0x20
+#define UART_XR17E158_DVID 0x40
+
+#define NEO_EECK 0x10 /* Clock */
+#define NEO_EECS 0x20 /* Chip Select */
+#define NEO_EEDI 0x40 /* Data In is an Output Pin */
+#define NEO_EEDO 0x80 /* Data Out is an Input Pin */
+#define NEO_EEREG 0x8E /* offset to EEPROM control reg */
+
+
+#define NEO_VPD_IMAGESIZE 0x40 /* size of image to read from EEPROM in words */
+#define NEO_VPD_IMAGEBYTES (NEO_VPD_IMAGESIZE * 2)
+
+/*
+ * These are the redefinitions for the FCTR on the XR17C158, since
+ * Exar made them different than their earlier design. (XR16C854)
+ */
+
+/* These are only applicable when table D is selected */
+#define UART_17158_FCTR_RTS_NODELAY 0x00
+#define UART_17158_FCTR_RTS_4DELAY 0x01
+#define UART_17158_FCTR_RTS_6DELAY 0x02
+#define UART_17158_FCTR_RTS_8DELAY 0x03
+#define UART_17158_FCTR_RTS_12DELAY 0x12
+#define UART_17158_FCTR_RTS_16DELAY 0x05
+#define UART_17158_FCTR_RTS_20DELAY 0x13
+#define UART_17158_FCTR_RTS_24DELAY 0x06
+#define UART_17158_FCTR_RTS_28DELAY 0x14
+#define UART_17158_FCTR_RTS_32DELAY 0x07
+#define UART_17158_FCTR_RTS_36DELAY 0x16
+#define UART_17158_FCTR_RTS_40DELAY 0x08
+#define UART_17158_FCTR_RTS_44DELAY 0x09
+#define UART_17158_FCTR_RTS_48DELAY 0x10
+#define UART_17158_FCTR_RTS_52DELAY 0x11
+
+#define UART_17158_FCTR_RTS_IRDA 0x10
+#define UART_17158_FCTR_RS485 0x20
+#define UART_17158_FCTR_TRGA 0x00
+#define UART_17158_FCTR_TRGB 0x40
+#define UART_17158_FCTR_TRGC 0x80
+#define UART_17158_FCTR_TRGD 0xC0
+
+/* 17158 trigger table selects.. */
+#define UART_17158_FCTR_BIT6 0x40
+#define UART_17158_FCTR_BIT7 0x80
+
+/* 17158 TX/RX memmapped buffer offsets */
+#define UART_17158_RX_FIFOSIZE 64
+#define UART_17158_TX_FIFOSIZE 64
+
+/* 17158 Extended IIR's */
+#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
+#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */
+#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR state change */
+#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */
+
+/*
+ * These are the extended interrupts that get sent
+ * back to us from the UART's 32bit interrupt register
+ */
+#define UART_17158_RX_LINE_STATUS 0x1 /* RX Ready */
+#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */
+#define UART_17158_TXRDY 0x3 /* TX Ready */
+#define UART_17158_MSR 0x4 /* Modem State Change */
+#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding Reg Empty */
+#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO Data error */
+
+/*
+ * These are the EXTENDED definitions for the 17C158's Interrupt
+ * Enable Register.
+ */
+#define UART_17158_EFR_ECB 0x10 /* Enhanced control bit */
+#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
+#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
+#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
+#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+
+#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */
+#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */
+
+#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */
+#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */
+#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */
+#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */
+
+/*
+ * Our Global Variables
+ */
+extern struct board_ops dgnc_neo_ops;
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_pci.h b/drivers/staging/dgnc/dgnc_pci.h
new file mode 100644
index 0000000..5b6f76d
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_pci.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_PCI_H
+#define __DGNC_PCI_H
+
+#define PCIMAX 32 /* maximum number of PCI boards */
+
+#define DIGI_VID 0x114F
+
+#define PCI_DEVICE_CLASSIC_4_DID 0x0028
+#define PCI_DEVICE_CLASSIC_8_DID 0x0029
+#define PCI_DEVICE_CLASSIC_4_422_DID 0x00D0
+#define PCI_DEVICE_CLASSIC_8_422_DID 0x00D1
+#define PCI_DEVICE_NEO_4_DID 0x00B0
+#define PCI_DEVICE_NEO_8_DID 0x00B1
+#define PCI_DEVICE_NEO_2DB9_DID 0x00C8
+#define PCI_DEVICE_NEO_2DB9PRI_DID 0x00C9
+#define PCI_DEVICE_NEO_2RJ45_DID 0x00CA
+#define PCI_DEVICE_NEO_2RJ45PRI_DID 0x00CB
+#define PCI_DEVICE_NEO_1_422_DID 0x00CC
+#define PCI_DEVICE_NEO_1_422_485_DID 0x00CD
+#define PCI_DEVICE_NEO_2_422_485_DID 0x00CE
+#define PCI_DEVICE_NEO_EXPRESS_8_DID 0x00F0
+#define PCI_DEVICE_NEO_EXPRESS_4_DID 0x00F1
+#define PCI_DEVICE_NEO_EXPRESS_4RJ45_DID 0x00F2
+#define PCI_DEVICE_NEO_EXPRESS_8RJ45_DID 0x00F3
+#define PCI_DEVICE_NEO_EXPRESS_4_IBM_DID 0x00F4
+
+#define PCI_DEVICE_CLASSIC_4_PCI_NAME "ClassicBoard 4 PCI"
+#define PCI_DEVICE_CLASSIC_8_PCI_NAME "ClassicBoard 8 PCI"
+#define PCI_DEVICE_CLASSIC_4_422_PCI_NAME "ClassicBoard 4 422 PCI"
+#define PCI_DEVICE_CLASSIC_8_422_PCI_NAME "ClassicBoard 8 422 PCI"
+#define PCI_DEVICE_NEO_4_PCI_NAME "Neo 4 PCI"
+#define PCI_DEVICE_NEO_8_PCI_NAME "Neo 8 PCI"
+#define PCI_DEVICE_NEO_2DB9_PCI_NAME "Neo 2 - DB9 Universal PCI"
+#define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME "Neo 2 - DB9 Universal PCI - Powered Ring Indicator"
+#define PCI_DEVICE_NEO_2RJ45_PCI_NAME "Neo 2 - RJ45 Universal PCI"
+#define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator"
+#define PCI_DEVICE_NEO_1_422_PCI_NAME "Neo 1 422 PCI"
+#define PCI_DEVICE_NEO_1_422_485_PCI_NAME "Neo 1 422/485 PCI"
+#define PCI_DEVICE_NEO_2_422_485_PCI_NAME "Neo 2 422/485 PCI"
+
+#define PCI_DEVICE_NEO_EXPRESS_8_PCI_NAME "Neo 8 PCI Express"
+#define PCI_DEVICE_NEO_EXPRESS_4_PCI_NAME "Neo 4 PCI Express"
+#define PCI_DEVICE_NEO_EXPRESS_4RJ45_PCI_NAME "Neo 4 PCI Express RJ45"
+#define PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME "Neo 8 PCI Express RJ45"
+#define PCI_DEVICE_NEO_EXPRESS_4_IBM_PCI_NAME "Neo 4 PCI Express IBM"
+
+
+/* Size of Memory and I/O for PCI (4 K) */
+#define PCI_RAM_SIZE 0x1000
+
+/* Size of Memory (2MB) */
+#define PCI_MEM_SIZE 0x1000
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
new file mode 100644
index 0000000..0ea6c80
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -0,0 +1,756 @@
+/*
+ * Copyright 2004 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/serial_reg.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+
+#include "dgnc_driver.h"
+#include "dgnc_mgmt.h"
+
+
+static ssize_t dgnc_driver_version_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
+}
+static DRIVER_ATTR(version, S_IRUSR, dgnc_driver_version_show, NULL);
+
+
+static ssize_t dgnc_driver_boards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_NumBoards);
+}
+static DRIVER_ATTR(boards, S_IRUSR, dgnc_driver_boards_show, NULL);
+
+
+static ssize_t dgnc_driver_maxboards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
+}
+static DRIVER_ATTR(maxboards, S_IRUSR, dgnc_driver_maxboards_show, NULL);
+
+
+static ssize_t dgnc_driver_pollcounter_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", dgnc_poll_counter);
+}
+static DRIVER_ATTR(pollcounter, S_IRUSR, dgnc_driver_pollcounter_show, NULL);
+
+
+static ssize_t dgnc_driver_state_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", dgnc_driver_state_text[dgnc_driver_state]);
+}
+static DRIVER_ATTR(state, S_IRUSR, dgnc_driver_state_show, NULL);
+
+
+static ssize_t dgnc_driver_debug_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", dgnc_debug);
+}
+
+static ssize_t dgnc_driver_debug_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "0x%x\n", &dgnc_debug);
+ return count;
+}
+static DRIVER_ATTR(debug, (S_IRUSR | S_IWUSR), dgnc_driver_debug_show, dgnc_driver_debug_store);
+
+
+static ssize_t dgnc_driver_rawreadok_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", dgnc_rawreadok);
+}
+
+static ssize_t dgnc_driver_rawreadok_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "0x%x\n", &dgnc_rawreadok);
+ return count;
+}
+static DRIVER_ATTR(rawreadok, (S_IRUSR | S_IWUSR), dgnc_driver_rawreadok_show, dgnc_driver_rawreadok_store);
+
+
+static ssize_t dgnc_driver_pollrate_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%dms\n", dgnc_poll_tick);
+}
+
+static ssize_t dgnc_driver_pollrate_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "%d\n", &dgnc_poll_tick);
+ return count;
+}
+static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgnc_driver_pollrate_show, dgnc_driver_pollrate_store);
+
+
+void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
+{
+ int rc = 0;
+ struct device_driver *driverfs = &dgnc_driver->driver;
+
+ rc |= driver_create_file(driverfs, &driver_attr_version);
+ rc |= driver_create_file(driverfs, &driver_attr_boards);
+ rc |= driver_create_file(driverfs, &driver_attr_maxboards);
+ rc |= driver_create_file(driverfs, &driver_attr_debug);
+ rc |= driver_create_file(driverfs, &driver_attr_rawreadok);
+ rc |= driver_create_file(driverfs, &driver_attr_pollrate);
+ rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
+ rc |= driver_create_file(driverfs, &driver_attr_state);
+ if (rc) {
+ printk(KERN_ERR "DGNC: sysfs driver_create_file failed!\n");
+ }
+}
+
+
+void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
+{
+ struct device_driver *driverfs = &dgnc_driver->driver;
+ driver_remove_file(driverfs, &driver_attr_version);
+ driver_remove_file(driverfs, &driver_attr_boards);
+ driver_remove_file(driverfs, &driver_attr_maxboards);
+ driver_remove_file(driverfs, &driver_attr_debug);
+ driver_remove_file(driverfs, &driver_attr_rawreadok);
+ driver_remove_file(driverfs, &driver_attr_pollrate);
+ driver_remove_file(driverfs, &driver_attr_pollcounter);
+ driver_remove_file(driverfs, &driver_attr_state);
+}
+
+
+#define DGNC_VERIFY_BOARD(p, bd) \
+ if (!p) \
+ return (0); \
+ \
+ bd = dev_get_drvdata(p); \
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) \
+ return (0); \
+ if (bd->state != BOARD_READY) \
+ return (0); \
+
+
+
+static ssize_t dgnc_vpd_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ count += sprintf(buf + count, "\n 0 1 2 3 4 5 6 7 8 9 A B C D E F");
+ for (i = 0; i < 0x40 * 2; i++) {
+ if (!(i % 16))
+ count += sprintf(buf + count, "\n%04X ", i * 2);
+ count += sprintf(buf + count, "%02X ", bd->vpd[i]);
+ }
+ count += sprintf(buf + count, "\n");
+
+ return count;
+}
+static DEVICE_ATTR(vpd, S_IRUSR, dgnc_vpd_show, NULL);
+
+static ssize_t dgnc_serial_number_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ if (bd->serial_num[0] == '\0')
+ count += sprintf(buf + count, "<UNKNOWN>\n");
+ else
+ count += sprintf(buf + count, "%s\n", bd->serial_num);
+
+ return count;
+}
+static DEVICE_ATTR(serial_number, S_IRUSR, dgnc_serial_number_show, NULL);
+
+
+static ssize_t dgnc_ports_state_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s\n", bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_open_count ? "Open" : "Closed");
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_state, S_IRUSR, dgnc_ports_state_show, NULL);
+
+
+static ssize_t dgnc_ports_baud_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %d\n", bd->channels[i]->ch_portnum, bd->channels[i]->ch_old_baud);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_baud, S_IRUSR, dgnc_ports_baud_show, NULL);
+
+
+static ssize_t dgnc_ports_msignals_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ if (bd->channels[i]->ch_open_count) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s %s %s %s %s %s\n", bd->channels[i]->ch_portnum,
+ (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ } else {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d\n", bd->channels[i]->ch_portnum);
+ }
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_msignals, S_IRUSR, dgnc_ports_msignals_show, NULL);
+
+
+static ssize_t dgnc_ports_iflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_iflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_iflag, S_IRUSR, dgnc_ports_iflag_show, NULL);
+
+
+static ssize_t dgnc_ports_cflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_cflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_cflag, S_IRUSR, dgnc_ports_cflag_show, NULL);
+
+
+static ssize_t dgnc_ports_oflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_oflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_oflag, S_IRUSR, dgnc_ports_oflag_show, NULL);
+
+
+static ssize_t dgnc_ports_lflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_lflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_lflag, S_IRUSR, dgnc_ports_lflag_show, NULL);
+
+
+static ssize_t dgnc_ports_digi_flag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_digi.digi_flags);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgnc_ports_digi_flag_show, NULL);
+
+
+static ssize_t dgnc_ports_rxcount_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_rxcount);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgnc_ports_rxcount_show, NULL);
+
+
+static ssize_t dgnc_ports_txcount_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_txcount);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_txcount, S_IRUSR, dgnc_ports_txcount_show, NULL);
+
+
+/* this function creates the sys files that will export each signal status
+ * to sysfs each value will be put in a separate filename
+ */
+void dgnc_create_ports_sysfiles(struct board_t *bd)
+{
+ int rc = 0;
+
+ dev_set_drvdata(&bd->pdev->dev, bd);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_vpd);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_serial_number);
+ if (rc) {
+ printk(KERN_ERR "DGNC: sysfs device_create_file failed!\n");
+ }
+}
+
+
+/* removes all the sys files created for that port */
+void dgnc_remove_ports_sysfiles(struct board_t *bd)
+{
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_vpd);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_serial_number);
+}
+
+
+static ssize_t dgnc_tty_state_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ? "Open" : "Closed");
+}
+static DEVICE_ATTR(state, S_IRUSR, dgnc_tty_state_show, NULL);
+
+
+static ssize_t dgnc_tty_baud_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud);
+}
+static DEVICE_ATTR(baud, S_IRUSR, dgnc_tty_baud_show, NULL);
+
+
+static ssize_t dgnc_tty_msignals_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ if (ch->ch_open_count) {
+ return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
+ (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ }
+ return 0;
+}
+static DEVICE_ATTR(msignals, S_IRUSR, dgnc_tty_msignals_show, NULL);
+
+
+static ssize_t dgnc_tty_iflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
+}
+static DEVICE_ATTR(iflag, S_IRUSR, dgnc_tty_iflag_show, NULL);
+
+
+static ssize_t dgnc_tty_cflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
+}
+static DEVICE_ATTR(cflag, S_IRUSR, dgnc_tty_cflag_show, NULL);
+
+
+static ssize_t dgnc_tty_oflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
+}
+static DEVICE_ATTR(oflag, S_IRUSR, dgnc_tty_oflag_show, NULL);
+
+
+static ssize_t dgnc_tty_lflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
+}
+static DEVICE_ATTR(lflag, S_IRUSR, dgnc_tty_lflag_show, NULL);
+
+
+static ssize_t dgnc_tty_digi_flag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
+}
+static DEVICE_ATTR(digi_flag, S_IRUSR, dgnc_tty_digi_flag_show, NULL);
+
+
+static ssize_t dgnc_tty_rxcount_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
+}
+static DEVICE_ATTR(rxcount, S_IRUSR, dgnc_tty_rxcount_show, NULL);
+
+
+static ssize_t dgnc_tty_txcount_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
+}
+static DEVICE_ATTR(txcount, S_IRUSR, dgnc_tty_txcount_show, NULL);
+
+
+static ssize_t dgnc_tty_name_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%sn%d%c\n",
+ (un->un_type == DGNC_PRINT) ? "pr" : "tty",
+ bd->boardnum + 1, 'a' + ch->ch_portnum);
+}
+static DEVICE_ATTR(custom_name, S_IRUSR, dgnc_tty_name_show, NULL);
+
+
+static struct attribute *dgnc_sysfs_tty_entries[] = {
+ &dev_attr_state.attr,
+ &dev_attr_baud.attr,
+ &dev_attr_msignals.attr,
+ &dev_attr_iflag.attr,
+ &dev_attr_cflag.attr,
+ &dev_attr_oflag.attr,
+ &dev_attr_lflag.attr,
+ &dev_attr_digi_flag.attr,
+ &dev_attr_rxcount.attr,
+ &dev_attr_txcount.attr,
+ &dev_attr_custom_name.attr,
+ NULL
+};
+
+
+static struct attribute_group dgnc_tty_attribute_group = {
+ .name = NULL,
+ .attrs = dgnc_sysfs_tty_entries,
+};
+
+
+void dgnc_create_tty_sysfs(struct un_t *un, struct device *c)
+{
+ int ret;
+
+ ret = sysfs_create_group(&c->kobj, &dgnc_tty_attribute_group);
+ if (ret) {
+ printk(KERN_ERR "dgnc: failed to create sysfs tty device attributes.\n");
+ sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
+ return;
+ }
+
+ dev_set_drvdata(c, un);
+
+}
+
+
+void dgnc_remove_tty_sysfs(struct device *c)
+{
+ sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
+}
+
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
new file mode 100644
index 0000000..4b87ce1
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_sysfs.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_SYSFS_H
+#define __DGNC_SYSFS_H
+
+#include "dgnc_driver.h"
+
+#include <linux/device.h>
+
+struct board_t;
+struct channel_t;
+struct un_t;
+struct pci_driver;
+struct class_device;
+
+extern void dgnc_create_ports_sysfiles(struct board_t *bd);
+extern void dgnc_remove_ports_sysfiles(struct board_t *bd);
+
+extern void dgnc_create_driver_sysfiles(struct pci_driver *);
+extern void dgnc_remove_driver_sysfiles(struct pci_driver *);
+
+extern int dgnc_tty_class_init(void);
+extern int dgnc_tty_class_destroy(void);
+
+extern void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
+extern void dgnc_remove_tty_sysfs(struct device *c);
+
+
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_trace.c b/drivers/staging/dgnc/dgnc_trace.c
new file mode 100644
index 0000000..a98b7d4
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_trace.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/vmalloc.h>
+
+#include "dgnc_driver.h"
+
+#define TRC_TO_CONSOLE 1
+
+/* file level globals */
+static char *dgnc_trcbuf; /* the ringbuffer */
+
+#if defined(TRC_TO_KMEM)
+static int dgnc_trcbufi = 0; /* index of the tilde at the end of */
+#endif
+
+#if defined(TRC_TO_KMEM)
+static DEFINE_SPINLOCK(dgnc_tracef_lock);
+#endif
+
+
+#if 0
+
+#if !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE)
+
+void dgnc_tracef(const char *fmt, ...)
+{
+ return;
+}
+
+#else /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
+
+void dgnc_tracef(const char *fmt, ...)
+{
+ va_list ap;
+ char buf[TRC_MAXMSG+1];
+ size_t lenbuf;
+ int i;
+ static int failed = FALSE;
+# if defined(TRC_TO_KMEM)
+ unsigned long flags;
+#endif
+
+ if(failed)
+ return;
+# if defined(TRC_TO_KMEM)
+ DGNC_LOCK(dgnc_tracef_lock, flags);
+#endif
+
+ /* Format buf using fmt and arguments contained in ap. */
+ va_start(ap, fmt);
+ i = vsprintf(buf, fmt, ap);
+ va_end(ap);
+ lenbuf = strlen(buf);
+
+# if defined(TRC_TO_KMEM)
+ {
+ static int initd=0;
+
+ /*
+ * Now, in addition to (or instead of) printing this stuff out
+ * (which is a buffered operation), also tuck it away into a
+ * corner of memory which can be examined post-crash in kdb.
+ */
+ if (!initd) {
+ dgnc_trcbuf = (char *) vmalloc(dgnc_trcbuf_size);
+ if(!dgnc_trcbuf) {
+ failed = TRUE;
+ printk("dgnc: tracing init failed!\n");
+ return;
+ }
+
+ memset(dgnc_trcbuf, '\0', dgnc_trcbuf_size);
+ dgnc_trcbufi = 0;
+ initd++;
+
+ printk("dgnc: tracing enabled - " TRC_DTRC
+ " 0x%lx 0x%x\n",
+ (unsigned long)dgnc_trcbuf,
+ dgnc_trcbuf_size);
+ }
+
+# if defined(TRC_ON_OVERFLOW_WRAP_AROUND)
+ /*
+ * This is the less CPU-intensive way to do things. We simply
+ * wrap around before we fall off the end of the buffer. A
+ * tilde (~) demarcates the current end of the trace.
+ *
+ * This method should be used if you are concerned about race
+ * conditions as it is less likely to affect the timing of
+ * things.
+ */
+
+ if (dgnc_trcbufi + lenbuf >= dgnc_trcbuf_size) {
+ /* We are wrapping, so wipe out the last tilde. */
+ dgnc_trcbuf[dgnc_trcbufi] = '\0';
+ /* put the new string at the beginning of the buffer */
+ dgnc_trcbufi = 0;
+ }
+
+ strcpy(&dgnc_trcbuf[dgnc_trcbufi], buf);
+ dgnc_trcbufi += lenbuf;
+ dgnc_trcbuf[dgnc_trcbufi] = '~';
+
+# elif defined(TRC_ON_OVERFLOW_SHIFT_BUFFER)
+ /*
+ * This is the more CPU-intensive way to do things. If we
+ * venture into the last 1/8 of the buffer, we shift the
+ * last 7/8 of the buffer forward, wiping out the first 1/8.
+ * Advantage: No wrap-around, only truncation from the
+ * beginning.
+ *
+ * This method should not be used if you are concerned about
+ * timing changes affecting the behaviour of the driver (ie,
+ * race conditions).
+ */
+ strcpy(&dgnc_trcbuf[dgnc_trcbufi], buf);
+ dgnc_trcbufi += lenbuf;
+ dgnc_trcbuf[dgnc_trcbufi] = '~';
+ dgnc_trcbuf[dgnc_trcbufi+1] = '\0';
+
+ /* If we're near the end of the trace buffer... */
+ if (dgnc_trcbufi > (dgnc_trcbuf_size/8)*7) {
+ /* Wipe out the first eighth to make some more room. */
+ strcpy(dgnc_trcbuf, &dgnc_trcbuf[dgnc_trcbuf_size/8]);
+ dgnc_trcbufi = strlen(dgnc_trcbuf)-1;
+ /* Plop overflow message at the top of the buffer. */
+ bcopy(TRC_OVERFLOW, dgnc_trcbuf, strlen(TRC_OVERFLOW));
+ }
+# else
+# error "TRC_ON_OVERFLOW_WRAP_AROUND or TRC_ON_OVERFLOW_SHIFT_BUFFER?"
+# endif
+ }
+ DGNC_UNLOCK(dgnc_tracef_lock, flags);
+
+# endif /* defined(TRC_TO_KMEM) */
+}
+
+#endif /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
+
+#endif
+
+
+/*
+ * dgnc_tracer_free()
+ *
+ *
+ */
+void dgnc_tracer_free(void)
+{
+ if(dgnc_trcbuf)
+ vfree(dgnc_trcbuf);
+}
diff --git a/drivers/staging/dgnc/dgnc_trace.h b/drivers/staging/dgnc/dgnc_trace.h
new file mode 100644
index 0000000..efed88a
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_trace.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *****************************************************************************
+ * Header file for dgnc_trace.c
+ *
+ */
+
+#ifndef __DGNC_TRACE_H
+#define __DGNC_TRACE_H
+
+#include "dgnc_driver.h"
+
+#if 0
+
+# if !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE)
+ void dgnc_tracef(const char *fmt, ...);
+# else
+ void dgnc_tracef(const char *fmt, ...);
+# endif
+
+#endif
+
+void dgnc_tracer_free(void);
+
+#endif
+
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
new file mode 100644
index 0000000..a7bb6bc
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -0,0 +1,3544 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ */
+
+/************************************************************************
+ *
+ * This file implements the tty driver functionality for the
+ * Neo and ClassicBoard PCI based product lines.
+ *
+ ************************************************************************
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/delay.h> /* For udelay */
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+#include <linux/pci.h>
+
+#include "dgnc_driver.h"
+#include "dgnc_tty.h"
+#include "dgnc_types.h"
+#include "dgnc_trace.h"
+#include "dgnc_neo.h"
+#include "dgnc_cls.h"
+#include "dpacompat.h"
+#include "dgnc_sysfs.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
+#define init_MUTEX(sem) sema_init(sem, 1)
+#define DECLARE_MUTEX(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+#endif
+
+/*
+ * internal variables
+ */
+static struct board_t *dgnc_BoardsByMajor[256];
+static uchar *dgnc_TmpWriteBuf = NULL;
+static DECLARE_MUTEX(dgnc_TmpWriteSem);
+
+/*
+ * Default transparent print information.
+ */
+static struct digi_t dgnc_digi_init = {
+ .digi_flags = DIGI_COOK, /* Flags */
+ .digi_maxcps = 100, /* Max CPS */
+ .digi_maxchar = 50, /* Max chars in print queue */
+ .digi_bufsize = 100, /* Printer buffer size */
+ .digi_onlen = 4, /* size of printer on string */
+ .digi_offlen = 4, /* size of printer off string */
+ .digi_onstr = "\033[5i", /* ANSI printer on string ] */
+ .digi_offstr = "\033[4i", /* ANSI printer off string ] */
+ .digi_term = "ansi" /* default terminal type */
+};
+
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially.
+ *
+ * This defines a raw port at 9600 baud, 8 data bits, no parity,
+ * 1 stop bit.
+ */
+static struct ktermios DgncDefaultTermios =
+{
+ .c_iflag = (DEFAULT_IFLAGS), /* iflags */
+ .c_oflag = (DEFAULT_OFLAGS), /* oflags */
+ .c_cflag = (DEFAULT_CFLAGS), /* cflags */
+ .c_lflag = (DEFAULT_LFLAGS), /* lflags */
+ .c_cc = INIT_C_CC,
+ .c_line = 0,
+};
+
+
+/* Our function prototypes */
+static int dgnc_tty_open(struct tty_struct *tty, struct file *file);
+static void dgnc_tty_close(struct tty_struct *tty, struct file *file);
+static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch);
+static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo);
+static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info);
+static int dgnc_tty_write_room(struct tty_struct *tty);
+static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c);
+static int dgnc_tty_chars_in_buffer(struct tty_struct *tty);
+static void dgnc_tty_start(struct tty_struct *tty);
+static void dgnc_tty_stop(struct tty_struct *tty);
+static void dgnc_tty_throttle(struct tty_struct *tty);
+static void dgnc_tty_unthrottle(struct tty_struct *tty);
+static void dgnc_tty_flush_chars(struct tty_struct *tty);
+static void dgnc_tty_flush_buffer(struct tty_struct *tty);
+static void dgnc_tty_hangup(struct tty_struct *tty);
+static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value);
+static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgnc_tty_tiocmget(struct tty_struct *tty);
+static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
+#else
+static int dgnc_tty_tiocmget(struct tty_struct *tty, struct file *file);
+static int dgnc_tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear);
+#endif
+static int dgnc_tty_send_break(struct tty_struct *tty, int msec);
+static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout);
+static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
+static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios);
+static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch);
+
+
+static const struct tty_operations dgnc_tty_ops = {
+ .open = dgnc_tty_open,
+ .close = dgnc_tty_close,
+ .write = dgnc_tty_write,
+ .write_room = dgnc_tty_write_room,
+ .flush_buffer = dgnc_tty_flush_buffer,
+ .chars_in_buffer = dgnc_tty_chars_in_buffer,
+ .flush_chars = dgnc_tty_flush_chars,
+ .ioctl = dgnc_tty_ioctl,
+ .set_termios = dgnc_tty_set_termios,
+ .stop = dgnc_tty_stop,
+ .start = dgnc_tty_start,
+ .throttle = dgnc_tty_throttle,
+ .unthrottle = dgnc_tty_unthrottle,
+ .hangup = dgnc_tty_hangup,
+ .put_char = dgnc_tty_put_char,
+ .tiocmget = dgnc_tty_tiocmget,
+ .tiocmset = dgnc_tty_tiocmset,
+ .break_ctl = dgnc_tty_send_break,
+ .wait_until_sent = dgnc_tty_wait_until_sent,
+ .send_xchar = dgnc_tty_send_xchar
+};
+
+/************************************************************************
+ *
+ * TTY Initialization/Cleanup Functions
+ *
+ ************************************************************************/
+
+/*
+ * dgnc_tty_preinit()
+ *
+ * Initialize any global tty related data before we download any boards.
+ */
+int dgnc_tty_preinit(void)
+{
+ /*
+ * Allocate a buffer for doing the copy from user space to
+ * kernel space in dgnc_write(). We only use one buffer and
+ * control access to it with a semaphore. If we are paging, we
+ * are already in trouble so one buffer won't hurt much anyway.
+ *
+ * We are okay to sleep in the malloc, as this routine
+ * is only called during module load, (not in interrupt context),
+ * and with no locks held.
+ */
+ dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL);
+
+ if (!dgnc_TmpWriteBuf) {
+ DPR_INIT(("unable to allocate tmp write buf"));
+ return (-ENOMEM);
+ }
+
+ return(0);
+}
+
+
+/*
+ * dgnc_tty_register()
+ *
+ * Init the tty subsystem for this board.
+ */
+int dgnc_tty_register(struct board_t *brd)
+{
+ int rc = 0;
+
+ DPR_INIT(("tty_register start\n"));
+
+ memset(&brd->SerialDriver, 0, sizeof(struct tty_driver));
+ memset(&brd->PrintDriver, 0, sizeof(struct tty_driver));
+
+ brd->SerialDriver.magic = TTY_DRIVER_MAGIC;
+
+ snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
+
+ brd->SerialDriver.name = brd->SerialName;
+ brd->SerialDriver.name_base = 0;
+ brd->SerialDriver.major = 0;
+ brd->SerialDriver.minor_start = 0;
+ brd->SerialDriver.num = brd->maxports;
+ brd->SerialDriver.type = TTY_DRIVER_TYPE_SERIAL;
+ brd->SerialDriver.subtype = SERIAL_TYPE_NORMAL;
+ brd->SerialDriver.init_termios = DgncDefaultTermios;
+ brd->SerialDriver.driver_name = DRVSTR;
+ brd->SerialDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
+
+ /*
+ * The kernel wants space to store pointers to
+ * tty_struct's and termios's.
+ */
+ brd->SerialDriver.ttys = kzalloc(brd->maxports * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->SerialDriver.ttys)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ brd->SerialDriver.refcount = brd->TtyRefCnt;
+#else
+ kref_init(&brd->SerialDriver.kref);
+#endif
+
+ brd->SerialDriver.termios = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ if (!brd->SerialDriver.termios)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+ brd->SerialDriver.termios_locked = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ if (!brd->SerialDriver.termios_locked)
+ return(-ENOMEM);
+#endif
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(&brd->SerialDriver, &dgnc_tty_ops);
+
+ if (!brd->dgnc_Major_Serial_Registered) {
+ /* Register tty devices */
+ rc = tty_register_driver(&brd->SerialDriver);
+ if (rc < 0) {
+ APR(("Can't register tty device (%d)\n", rc));
+ return(rc);
+ }
+ brd->dgnc_Major_Serial_Registered = TRUE;
+ }
+
+ /*
+ * If we're doing transparent print, we have to do all of the above
+ * again, seperately so we don't get the LD confused about what major
+ * we are when we get into the dgnc_tty_open() routine.
+ */
+ brd->PrintDriver.magic = TTY_DRIVER_MAGIC;
+ snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
+
+ brd->PrintDriver.name = brd->PrintName;
+ brd->PrintDriver.name_base = 0;
+ brd->PrintDriver.major = brd->SerialDriver.major;
+ brd->PrintDriver.minor_start = 0x80;
+ brd->PrintDriver.num = brd->maxports;
+ brd->PrintDriver.type = TTY_DRIVER_TYPE_SERIAL;
+ brd->PrintDriver.subtype = SERIAL_TYPE_NORMAL;
+ brd->PrintDriver.init_termios = DgncDefaultTermios;
+ brd->PrintDriver.driver_name = DRVSTR;
+ brd->PrintDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
+
+ /*
+ * The kernel wants space to store pointers to
+ * tty_struct's and termios's. Must be seperate from
+ * the Serial Driver so we don't get confused
+ */
+ brd->PrintDriver.ttys = kzalloc(brd->maxports * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->PrintDriver.ttys)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ brd->PrintDriver.refcount = brd->TtyRefCnt;
+#else
+ kref_init(&brd->PrintDriver.kref);
+#endif
+
+ brd->PrintDriver.termios = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ if (!brd->PrintDriver.termios)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+ brd->PrintDriver.termios_locked = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ if (!brd->PrintDriver.termios_locked)
+ return(-ENOMEM);
+#endif
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(&brd->PrintDriver, &dgnc_tty_ops);
+
+ if (!brd->dgnc_Major_TransparentPrint_Registered) {
+ /* Register Transparent Print devices */
+ rc = tty_register_driver(&brd->PrintDriver);
+ if (rc < 0) {
+ APR(("Can't register Transparent Print device (%d)\n", rc));
+ return(rc);
+ }
+ brd->dgnc_Major_TransparentPrint_Registered = TRUE;
+ }
+
+ dgnc_BoardsByMajor[brd->SerialDriver.major] = brd;
+ brd->dgnc_Serial_Major = brd->SerialDriver.major;
+ brd->dgnc_TransparentPrint_Major = brd->PrintDriver.major;
+
+ DPR_INIT(("DGNC REGISTER TTY: MAJOR: %d\n", brd->SerialDriver.major));
+
+ return (rc);
+}
+
+
+/*
+ * dgnc_tty_init()
+ *
+ * Init the tty subsystem. Called once per board after board has been
+ * downloaded and init'ed.
+ */
+int dgnc_tty_init(struct board_t *brd)
+{
+ int i;
+ void __iomem *vaddr;
+ struct channel_t *ch;
+
+ if (!brd)
+ return (-ENXIO);
+
+ DPR_INIT(("dgnc_tty_init start\n"));
+
+ /*
+ * Initialize board structure elements.
+ */
+
+ vaddr = brd->re_map_membase;
+
+ brd->nasync = brd->maxports;
+
+ /*
+ * Allocate channel memory that might not have been allocated
+ * when the driver was first loaded.
+ */
+ for (i = 0; i < brd->nasync; i++) {
+ if (!brd->channels[i]) {
+
+ /*
+ * Okay to malloc with GFP_KERNEL, we are not at
+ * interrupt context, and there are no locks held.
+ */
+ brd->channels[i] = kzalloc(sizeof(struct channel_t), GFP_KERNEL);
+ if (!brd->channels[i]) {
+ DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n",
+ __FILE__, __LINE__));
+ }
+ }
+ }
+
+ ch = brd->channels[0];
+ vaddr = brd->re_map_membase;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
+
+ if (!brd->channels[i])
+ continue;
+
+ DGNC_SPINLOCK_INIT(ch->ch_lock);
+
+ /* Store all our magic numbers */
+ ch->magic = DGNC_CHANNEL_MAGIC;
+ ch->ch_tun.magic = DGNC_UNIT_MAGIC;
+ ch->ch_tun.un_ch = ch;
+ ch->ch_tun.un_type = DGNC_SERIAL;
+ ch->ch_tun.un_dev = i;
+
+ ch->ch_pun.magic = DGNC_UNIT_MAGIC;
+ ch->ch_pun.un_ch = ch;
+ ch->ch_pun.un_type = DGNC_PRINT;
+ ch->ch_pun.un_dev = i + 128;
+
+ if (brd->bd_uart_offset == 0x200)
+ ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
+ else
+ ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i);
+
+ ch->ch_bd = brd;
+ ch->ch_portnum = i;
+ ch->ch_digi = dgnc_digi_init;
+
+ /* .25 second delay */
+ ch->ch_close_delay = 250;
+
+ init_waitqueue_head(&ch->ch_flags_wait);
+ init_waitqueue_head(&ch->ch_tun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_sniff_wait);
+
+ {
+ struct device *classp;
+ classp = tty_register_device(&brd->SerialDriver, i,
+ &(ch->ch_bd->pdev->dev));
+ ch->ch_tun.un_sysfs = classp;
+ dgnc_create_tty_sysfs(&ch->ch_tun, classp);
+
+ classp = tty_register_device(&brd->PrintDriver, i,
+ &(ch->ch_bd->pdev->dev));
+ ch->ch_pun.un_sysfs = classp;
+ dgnc_create_tty_sysfs(&ch->ch_pun, classp);
+ }
+
+ }
+
+ DPR_INIT(("dgnc_tty_init finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgnc_tty_post_uninit()
+ *
+ * UnInitialize any global tty related data.
+ */
+void dgnc_tty_post_uninit(void)
+{
+ if (dgnc_TmpWriteBuf) {
+ kfree(dgnc_TmpWriteBuf);
+ dgnc_TmpWriteBuf = NULL;
+ }
+}
+
+
+/*
+ * dgnc_tty_uninit()
+ *
+ * Uninitialize the TTY portion of this driver. Free all memory and
+ * resources.
+ */
+void dgnc_tty_uninit(struct board_t *brd)
+{
+ int i = 0;
+
+ if (brd->dgnc_Major_Serial_Registered) {
+ dgnc_BoardsByMajor[brd->SerialDriver.major] = NULL;
+ brd->dgnc_Serial_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ dgnc_remove_tty_sysfs(brd->channels[i]->ch_tun.un_sysfs);
+ tty_unregister_device(&brd->SerialDriver, i);
+ }
+ tty_unregister_driver(&brd->SerialDriver);
+ brd->dgnc_Major_Serial_Registered = FALSE;
+ }
+
+ if (brd->dgnc_Major_TransparentPrint_Registered) {
+ dgnc_BoardsByMajor[brd->PrintDriver.major] = NULL;
+ brd->dgnc_TransparentPrint_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ dgnc_remove_tty_sysfs(brd->channels[i]->ch_pun.un_sysfs);
+ tty_unregister_device(&brd->PrintDriver, i);
+ }
+ tty_unregister_driver(&brd->PrintDriver);
+ brd->dgnc_Major_TransparentPrint_Registered = FALSE;
+ }
+
+ if (brd->SerialDriver.ttys) {
+ kfree(brd->SerialDriver.ttys);
+ brd->SerialDriver.ttys = NULL;
+ }
+ if (brd->PrintDriver.ttys) {
+ kfree(brd->PrintDriver.ttys);
+ brd->PrintDriver.ttys = NULL;
+ }
+}
+
+
+#define TMPBUFLEN (1024)
+
+/*
+ * dgnc_sniff - Dump data out to the "sniff" buffer if the
+ * proc sniff file is opened...
+ */
+void dgnc_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *buf, int len)
+{
+ struct timeval tv;
+ int n;
+ int r;
+ int nbuf;
+ int i;
+ int tmpbuflen;
+ char tmpbuf[TMPBUFLEN];
+ char *p = tmpbuf;
+ int too_much_data;
+
+ /* Leave if sniff not open */
+ if (!(ch->ch_sniff_flags & SNIFF_OPEN))
+ return;
+
+ do_gettimeofday(&tv);
+
+ /* Create our header for data dump */
+ p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
+ tmpbuflen = p - tmpbuf;
+
+ do {
+ too_much_data = 0;
+
+ for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
+ p += sprintf(p, "%02x ", *buf);
+ buf++;
+ tmpbuflen = p - tmpbuf;
+ }
+
+ if (tmpbuflen < (TMPBUFLEN - 4)) {
+ if (i > 0)
+ p += sprintf(p - 1, "%s\n", ">");
+ else
+ p += sprintf(p, "%s\n", ">");
+ } else {
+ too_much_data = 1;
+ len -= i;
+ }
+
+ nbuf = strlen(tmpbuf);
+ p = tmpbuf;
+
+ /*
+ * Loop while data remains.
+ */
+ while (nbuf > 0 && ch->ch_sniff_buf != 0) {
+ /*
+ * Determine the amount of available space left in the
+ * buffer. If there's none, wait until some appears.
+ */
+ n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) & SNIFF_MASK;
+
+ /*
+ * If there is no space left to write to in our sniff buffer,
+ * we have no choice but to drop the data.
+ * We *cannot* sleep here waiting for space, because this
+ * function was probably called by the interrupt/timer routines!
+ */
+ if (n == 0) {
+ return;
+ }
+
+ /*
+ * Copy as much data as will fit.
+ */
+
+ if (n > nbuf)
+ n = nbuf;
+
+ r = SNIFF_MAX - ch->ch_sniff_in;
+
+ if (r <= n) {
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, r);
+
+ n -= r;
+ ch->ch_sniff_in = 0;
+ p += r;
+ nbuf -= r;
+ }
+
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
+
+ ch->ch_sniff_in += n;
+ p += n;
+ nbuf -= n;
+
+ /*
+ * Wakeup any thread waiting for data
+ */
+ if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
+ ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
+ wake_up_interruptible(&ch->ch_sniff_wait);
+ }
+ }
+
+ /*
+ * If the user sent us too much data to push into our tmpbuf,
+ * we need to keep looping around on all the data.
+ */
+ if (too_much_data) {
+ p = tmpbuf;
+ tmpbuflen = 0;
+ }
+
+ } while (too_much_data);
+}
+
+
+/*=======================================================================
+ *
+ * dgnc_wmove - Write data to transmit queue.
+ *
+ * ch - Pointer to channel structure.
+ * buf - Poiter to characters to be moved.
+ * n - Number of characters to move.
+ *
+ *=======================================================================*/
+static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
+{
+ int remain;
+ uint head;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ head = ch->ch_w_head & WQUEUEMASK;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ remain = WQUEUESIZE - head;
+
+ if (n >= remain) {
+ n -= remain;
+ memcpy(ch->ch_wqueue + head, buf, remain);
+ head = 0;
+ buf += remain;
+ }
+
+ if (n > 0) {
+ /*
+ * Move rest of data.
+ */
+ remain = n;
+ memcpy(ch->ch_wqueue + head, buf, remain);
+ head += remain;
+ }
+
+ head &= WQUEUEMASK;
+ ch->ch_w_head = head;
+}
+
+
+
+
+/*=======================================================================
+ *
+ * dgnc_input - Process received data.
+ *
+ * ch - Pointer to channel structure.
+ *
+ *=======================================================================*/
+void dgnc_input(struct channel_t *ch)
+{
+ struct board_t *bd;
+ struct tty_struct *tp;
+ struct tty_ldisc *ld;
+ uint rmask;
+ ushort head;
+ ushort tail;
+ int data_len;
+ ulong lock_flags;
+ int flip_len;
+ int len = 0;
+ int n = 0;
+ int s = 0;
+ int i = 0;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ tp = ch->ch_tun.un_tty;
+
+ bd = ch->ch_bd;
+ if(!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Figure the number of characters in the buffer.
+ * Exit immediately if none.
+ */
+ rmask = RQUEUEMASK;
+ head = ch->ch_r_head & rmask;
+ tail = ch->ch_r_tail & rmask;
+ data_len = (head - tail) & rmask;
+
+ if (data_len == 0) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ DPR_READ(("dgnc_input start\n"));
+
+ /*
+ * If the device is not open, or CREAD is off,
+ * flush input data and return immediately.
+ */
+ if (!tp || (tp->magic != TTY_MAGIC) || !(ch->ch_tun.un_flags & UN_ISOPEN) ||
+ !(tp->termios.c_cflag & CREAD) || (ch->ch_tun.un_flags & UN_CLOSING)) {
+
+ DPR_READ(("input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum));
+ DPR_READ(("input. tp: %p tp->magic: %x MAGIC:%x ch flags: %x\n",
+ tp, tp ? tp->magic : 0, TTY_MAGIC, ch->ch_tun.un_flags));
+
+ ch->ch_r_head = tail;
+
+ /* Force queue flow control to be released, if needed */
+ dgnc_check_queue_flow_control(ch);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If we are throttled, simply don't read any data.
+ */
+ if (ch->ch_flags & CH_FORCED_STOPI) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ DPR_READ(("Port %d throttled, not reading any data. head: %x tail: %x\n",
+ ch->ch_portnum, head, tail));
+ return;
+ }
+
+ DPR_READ(("dgnc_input start 2\n"));
+
+ flip_len = TTY_FLIPBUF_SIZE;
+
+ /* Chop down the length, if needed */
+ len = min(data_len, flip_len);
+ len = min(len, (N_TTY_BUF_SIZE - 1));
+
+ ld = tty_ldisc_ref(tp);
+
+#ifdef TTY_DONT_FLIP
+ /*
+ * If the DONT_FLIP flag is on, don't flush our buffer, and act
+ * like the ld doesn't have any space to put the data right now.
+ */
+ if (test_bit(TTY_DONT_FLIP, &tp->flags))
+ len = 0;
+#endif
+
+ /*
+ * If we were unable to get a reference to the ld,
+ * don't flush our buffer, and act like the ld doesn't
+ * have any space to put the data right now.
+ */
+ if (!ld) {
+ len = 0;
+ } else {
+ /*
+ * If ld doesn't have a pointer to a receive_buf function,
+ * flush the data, then act like the ld doesn't have any
+ * space to put the data right now.
+ */
+ if (!ld->ops->receive_buf) {
+ ch->ch_r_head = ch->ch_r_tail;
+ len = 0;
+ }
+ }
+
+ if (len <= 0) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ if (ld)
+ tty_ldisc_deref(ld);
+ return;
+ }
+
+ /*
+ * The tty layer in the kernel has changed in 2.6.16+.
+ *
+ * The flip buffers in the tty structure are no longer exposed,
+ * and probably will be going away eventually.
+ *
+ * If we are completely raw, we don't need to go through a lot
+ * of the tty layers that exist.
+ * In this case, we take the shortest and fastest route we
+ * can to relay the data to the user.
+ *
+ * On the other hand, if we are not raw, we need to go through
+ * the new 2.6.16+ tty layer, which has its API more well defined.
+ */
+ len = tty_buffer_request_room(tp->port, len);
+ n = len;
+
+ /*
+ * n now contains the most amount of data we can copy,
+ * bounded either by how much the Linux tty layer can handle,
+ * or the amount of data the card actually has pending...
+ */
+ while (n) {
+ s = ((head >= tail) ? head : RQUEUESIZE) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ /*
+ * If conditions are such that ld needs to see all
+ * UART errors, we will have to walk each character
+ * and error byte and send them to the buffer one at
+ * a time.
+ */
+ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
+ for (i = 0; i < s; i++) {
+ if (*(ch->ch_equeue + tail + i) & UART_LSR_BI)
+ tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_BREAK);
+ else if (*(ch->ch_equeue + tail + i) & UART_LSR_PE)
+ tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_PARITY);
+ else if (*(ch->ch_equeue + tail + i) & UART_LSR_FE)
+ tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_FRAME);
+ else
+ tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_NORMAL);
+ }
+ }
+ else {
+ tty_insert_flip_string(tp->port, ch->ch_rqueue + tail, s);
+ }
+
+ dgnc_sniff_nowait_nolock(ch, "USER READ", ch->ch_rqueue + tail, s);
+
+ tail += s;
+ n -= s;
+ /* Flip queue if needed */
+ tail &= rmask;
+ }
+
+ ch->ch_r_tail = tail & rmask;
+ ch->ch_e_tail = tail & rmask;
+ dgnc_check_queue_flow_control(ch);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* Tell the tty layer its okay to "eat" the data now */
+ tty_flip_buffer_push(tp->port);
+
+ if (ld)
+ tty_ldisc_deref(ld);
+
+ DPR_READ(("dgnc_input - finish\n"));
+}
+
+
+/************************************************************************
+ * Determines when CARRIER changes state and takes appropriate
+ * action.
+ ************************************************************************/
+void dgnc_carrier(struct channel_t *ch)
+{
+ struct board_t *bd;
+
+ int virt_carrier = 0;
+ int phys_carrier = 0;
+
+ DPR_CARR(("dgnc_carrier called...\n"));
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ if (ch->ch_mistat & UART_MSR_DCD) {
+ DPR_CARR(("mistat: %x D_CD: %x\n", ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD));
+ phys_carrier = 1;
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) {
+ virt_carrier = 1;
+ }
+
+ if (ch->ch_c_cflag & CLOCAL) {
+ virt_carrier = 1;
+ }
+
+
+ DPR_CARR(("DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier));
+
+ /*
+ * Test for a VIRTUAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ DPR_CARR(("carrier: virt DCD rose\n"));
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ DPR_CARR(("carrier: physical DCD rose\n"));
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL transition to low, so long as we aren't
+ * currently ignoring physical transitions (which is what "virtual
+ * carrier" indicates).
+ *
+ * The transition of the virtual carrier to low really doesn't
+ * matter... it really only means "ignore carrier state", not
+ * "make pretend that carrier is there".
+ */
+ if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
+ (phys_carrier == 0))
+ {
+
+ /*
+ * When carrier drops:
+ *
+ * Drop carrier on all open units.
+ *
+ * Flush queues, waking up any task waiting in the
+ * line discipline.
+ *
+ * Send a hangup to the control terminal.
+ *
+ * Enable all select calls.
+ */
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+
+ if (ch->ch_tun.un_open_count > 0) {
+ DPR_CARR(("Sending tty hangup\n"));
+ tty_hangup(ch->ch_tun.un_tty);
+ }
+
+ if (ch->ch_pun.un_open_count > 0) {
+ DPR_CARR(("Sending pr hangup\n"));
+ tty_hangup(ch->ch_pun.un_tty);
+ }
+ }
+
+ /*
+ * Make sure that our cached values reflect the current reality.
+ */
+ if (virt_carrier == 1)
+ ch->ch_flags |= CH_FCAR;
+ else
+ ch->ch_flags &= ~CH_FCAR;
+
+ if (phys_carrier == 1)
+ ch->ch_flags |= CH_CD;
+ else
+ ch->ch_flags &= ~CH_CD;
+}
+
+/*
+ * Assign the custom baud rate to the channel structure
+ */
+static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
+{
+ int testdiv;
+ int testrate_high;
+ int testrate_low;
+ int deltahigh;
+ int deltalow;
+
+ if (newrate < 0)
+ newrate = 0;
+
+ /*
+ * Since the divisor is stored in a 16-bit integer, we make sure
+ * we don't allow any rates smaller than a 16-bit integer would allow.
+ * And of course, rates above the dividend won't fly.
+ */
+ if (newrate && newrate < ((ch->ch_bd->bd_dividend / 0xFFFF) + 1))
+ newrate = ((ch->ch_bd->bd_dividend / 0xFFFF) + 1);
+
+ if (newrate && newrate > ch->ch_bd->bd_dividend)
+ newrate = ch->ch_bd->bd_dividend;
+
+ while (newrate > 0) {
+ testdiv = ch->ch_bd->bd_dividend / newrate;
+
+ /*
+ * If we try to figure out what rate the board would use
+ * with the test divisor, it will be either equal or higher
+ * than the requested baud rate. If we then determine the
+ * rate with a divisor one higher, we will get the next lower
+ * supported rate below the requested.
+ */
+ testrate_high = ch->ch_bd->bd_dividend / testdiv;
+ testrate_low = ch->ch_bd->bd_dividend / (testdiv + 1);
+
+ /*
+ * If the rate for the requested divisor is correct, just
+ * use it and be done.
+ */
+ if (testrate_high == newrate )
+ break;
+
+ /*
+ * Otherwise, pick the rate that is closer (i.e. whichever rate
+ * has a smaller delta).
+ */
+ deltahigh = testrate_high - newrate;
+ deltalow = newrate - testrate_low;
+
+ if (deltahigh < deltalow) {
+ newrate = testrate_high;
+ } else {
+ newrate = testrate_low;
+ }
+
+ break;
+ }
+
+ ch->ch_custom_speed = newrate;
+
+ return;
+}
+
+
+void dgnc_check_queue_flow_control(struct channel_t *ch)
+{
+ int qleft = 0;
+
+ /* Store how much space we have left in the queue */
+ if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * Check to see if we should enforce flow control on our queue because
+ * the ld (or user) isn't reading data out of our queue fast enuf.
+ *
+ * NOTE: This is done based on what the current flow control of the
+ * port is set for.
+ *
+ * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt.
+ * This will cause the UART's FIFO to back up, and force
+ * the RTS signal to be dropped.
+ * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to
+ * the other side, in hopes it will stop sending data to us.
+ * 3) NONE - Nothing we can do. We will simply drop any extra data
+ * that gets sent into us when the queue fills up.
+ */
+ if (qleft < 256) {
+ /* HWFLOW */
+ if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ if(!(ch->ch_flags & CH_RECEIVER_OFF)) {
+ ch->ch_bd->bd_ops->disable_receiver(ch);
+ ch->ch_flags |= (CH_RECEIVER_OFF);
+ DPR_READ(("Internal queue hit hilevel mark (%d)! Turning off interrupts.\n",
+ qleft));
+ }
+ }
+ /* SWFLOW */
+ else if (ch->ch_c_iflag & IXOFF) {
+ if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
+ ch->ch_bd->bd_ops->send_stop_character(ch);
+ ch->ch_stops_sent++;
+ DPR_READ(("Sending stop char! Times sent: %x\n", ch->ch_stops_sent));
+ }
+ }
+ /* No FLOW */
+ else {
+ /* Empty... Can't do anything about the impending overflow... */
+ }
+ }
+
+ /*
+ * Check to see if we should unenforce flow control because
+ * ld (or user) finally read enuf data out of our queue.
+ *
+ * NOTE: This is done based on what the current flow control of the
+ * port is set for.
+ *
+ * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt.
+ * This will cause the UART's FIFO to raise RTS back up,
+ * which will allow the other side to start sending data again.
+ * 2) SWFLOW (IXOFF) - Send a start character to
+ * the other side, so it will start sending data to us again.
+ * 3) NONE - Do nothing. Since we didn't do anything to turn off the
+ * other side, we don't need to do anything now.
+ */
+ if (qleft > (RQUEUESIZE / 2)) {
+ /* HWFLOW */
+ if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ if (ch->ch_flags & CH_RECEIVER_OFF) {
+ ch->ch_bd->bd_ops->enable_receiver(ch);
+ ch->ch_flags &= ~(CH_RECEIVER_OFF);
+ DPR_READ(("Internal queue hit lowlevel mark (%d)! Turning on interrupts.\n",
+ qleft));
+ }
+ }
+ /* SWFLOW */
+ else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
+ ch->ch_stops_sent = 0;
+ ch->ch_bd->bd_ops->send_start_character(ch);
+ DPR_READ(("Sending start char!\n"));
+ }
+ /* No FLOW */
+ else {
+ /* Nothing needed. */
+ }
+ }
+}
+
+
+void dgnc_wakeup_writes(struct channel_t *ch)
+{
+ int qlen = 0;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * If channel now has space, wake up anyone waiting on the condition.
+ */
+ if ((qlen = ch->ch_w_head - ch->ch_w_tail) < 0)
+ qlen += WQUEUESIZE;
+
+ if (qlen >= (WQUEUESIZE - 256)) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ if (ch->ch_tun.un_flags & UN_ISOPEN) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+ if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
+ {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+#else
+ if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
+ {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+#endif
+
+ wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
+
+ /*
+ * If unit is set to wait until empty, check to make sure
+ * the queue AND FIFO are both empty.
+ */
+ if (ch->ch_tun.un_flags & UN_EMPTY) {
+ if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) {
+ ch->ch_tun.un_flags &= ~(UN_EMPTY);
+
+ /*
+ * If RTS Toggle mode is on, whenever
+ * the queue and UART is empty, keep RTS low.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+ }
+
+ /*
+ * If DTR Toggle mode is on, whenever
+ * the queue and UART is empty, keep DTR low.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+ }
+ }
+ }
+
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+
+ if (ch->ch_pun.un_flags & UN_ISOPEN) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+ if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
+ {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+#else
+ if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
+ {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+#endif
+
+ wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
+
+ /*
+ * If unit is set to wait until empty, check to make sure
+ * the queue AND FIFO are both empty.
+ */
+ if (ch->ch_pun.un_flags & UN_EMPTY) {
+ if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) {
+ ch->ch_pun.un_flags &= ~(UN_EMPTY);
+ }
+ }
+
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+
+/************************************************************************
+ *
+ * TTY Entry points and helper functions
+ *
+ ************************************************************************/
+
+/*
+ * dgnc_tty_open()
+ *
+ */
+static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct board_t *brd;
+ struct channel_t *ch;
+ struct un_t *un;
+ uint major = 0;
+ uint minor = 0;
+ int rc = 0;
+ ulong lock_flags;
+
+ rc = 0;
+
+ major = MAJOR(tty_devnum(tty));
+ minor = MINOR(tty_devnum(tty));
+
+ if (major > 255) {
+ return -ENXIO;
+ }
+
+ /* Get board pointer from our array of majors we have allocated */
+ brd = dgnc_BoardsByMajor[major];
+ if (!brd) {
+ return -ENXIO;
+ }
+
+ /*
+ * If board is not yet up to a state of READY, go to
+ * sleep waiting for it to happen or they cancel the open.
+ */
+ rc = wait_event_interruptible(brd->state_wait,
+ (brd->state & BOARD_READY));
+
+ if (rc) {
+ return rc;
+ }
+
+ DGNC_LOCK(brd->bd_lock, lock_flags);
+
+ /* If opened device is greater than our number of ports, bail. */
+ if (PORT_NUM(minor) > brd->nasync) {
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ ch = brd->channels[PORT_NUM(minor)];
+ if (!ch) {
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* Drop board lock */
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+
+ /* Grab channel lock */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Figure out our type */
+ if (!IS_PRINT(minor)) {
+ un = &brd->channels[PORT_NUM(minor)]->ch_tun;
+ un->un_type = DGNC_SERIAL;
+ }
+ else if (IS_PRINT(minor)) {
+ un = &brd->channels[PORT_NUM(minor)]->ch_pun;
+ un->un_type = DGNC_PRINT;
+ }
+ else {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ DPR_OPEN(("%d Unknown TYPE!\n", __LINE__));
+ return -ENXIO;
+ }
+
+ /*
+ * If the port is still in a previous open, and in a state
+ * where we simply cannot safely keep going, wait until the
+ * state clears.
+ */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ rc = wait_event_interruptible(ch->ch_flags_wait, ((ch->ch_flags & CH_OPENING) == 0));
+
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (rc) {
+ DPR_OPEN(("%d User ctrl c'ed\n", __LINE__));
+ return -EINTR;
+ }
+
+ /*
+ * If either unit is in the middle of the fragile part of close,
+ * we just cannot touch the channel safely.
+ * Go to sleep, knowing that when the channel can be
+ * touched safely, the close routine will signal the
+ * ch_flags_wait to wake us back up.
+ */
+ rc = wait_event_interruptible(ch->ch_flags_wait,
+ (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING) == 0));
+
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (rc) {
+ DPR_OPEN(("%d User ctrl c'ed\n", __LINE__));
+ return -EINTR;
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+
+ /* Store our unit into driver_data, so we always have it available. */
+ tty->driver_data = un;
+
+ DPR_OPEN(("Open called. MAJOR: %d MINOR:%d PORT_NUM: %x unit: %p NAME: %s\n",
+ MAJOR(tty_devnum(tty)), MINOR(tty_devnum(tty)), PORT_NUM(minor), un, brd->name));
+
+ DPR_OPEN(("%d: tflag=%x pflag=%x\n", __LINE__, ch->ch_tun.un_flags, ch->ch_pun.un_flags));
+
+ /*
+ * Initialize tty's
+ */
+ if (!(un->un_flags & UN_ISOPEN)) {
+ /* Store important variables. */
+ un->un_tty = tty;
+
+ /* Maybe do something here to the TTY struct as well? */
+ }
+
+
+ /*
+ * Allocate channel buffers for read/write/error.
+ * Set flag, so we don't get trounced on.
+ */
+ ch->ch_flags |= (CH_OPENING);
+
+ /* Drop locks, as malloc with GFP_KERNEL can sleep */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (!ch->ch_rqueue)
+ ch->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL);
+ if (!ch->ch_equeue)
+ ch->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL);
+ if (!ch->ch_wqueue)
+ ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags &= ~(CH_OPENING);
+ wake_up_interruptible(&ch->ch_flags_wait);
+
+ /*
+ * Initialize if neither terminal or printer is open.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+
+ DPR_OPEN(("dgnc_open: initializing channel in open...\n"));
+
+ /*
+ * Flush input queues.
+ */
+ ch->ch_r_head = ch->ch_r_tail = 0;
+ ch->ch_e_head = ch->ch_e_tail = 0;
+ ch->ch_w_head = ch->ch_w_tail = 0;
+
+ brd->bd_ops->flush_uart_write(ch);
+ brd->bd_ops->flush_uart_read(ch);
+
+ ch->ch_flags = 0;
+ ch->ch_cached_lsr = 0;
+ ch->ch_stop_sending_break = 0;
+ ch->ch_stops_sent = 0;
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ /*
+ * Bring up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+
+ /* Tell UART to init itself */
+ brd->bd_ops->uart_init(ch);
+ }
+
+ /*
+ * Run param in case we changed anything
+ */
+ brd->bd_ops->param(tty);
+
+ dgnc_carrier(ch);
+
+ /*
+ * follow protocol for opening port
+ */
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ rc = dgnc_block_til_ready(tty, file, ch);
+
+ if (rc) {
+ DPR_OPEN(("dgnc_tty_open returning after dgnc_block_til_ready "
+ "with %d\n", rc));
+ }
+
+ /* No going back now, increment our unit and channel counters */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_open_count++;
+ un->un_open_count++;
+ un->un_flags |= (UN_ISOPEN);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("dgnc_tty_open finished\n"));
+ return (rc);
+}
+
+
+/*
+ * dgnc_block_til_ready()
+ *
+ * Wait for DCD, if needed.
+ */
+static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
+{
+ int retval = 0;
+ struct un_t *un = NULL;
+ ulong lock_flags;
+ uint old_flags = 0;
+ int sleep_on_un_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ DPR_OPEN(("dgnc_block_til_ready - before block.\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_wopen++;
+
+ /* Loop forever */
+ while (1) {
+
+ sleep_on_un_flags = 0;
+
+ /*
+ * If board has failed somehow during our sleep, bail with error.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ retval = -ENXIO;
+ break;
+ }
+
+ /* If tty was hung up, break out of loop and set error. */
+ if (tty_hung_up_p(file)) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ /*
+ * If either unit is in the middle of the fragile part of close,
+ * we just cannot touch the channel safely.
+ * Go back to sleep, knowing that when the channel can be
+ * touched safely, the close routine will signal the
+ * ch_wait_flags to wake us back up.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
+
+ /*
+ * Our conditions to leave cleanly and happily:
+ * 1) NONBLOCKING on the tty is set.
+ * 2) CLOCAL is set.
+ * 3) DCD (fake or real) is active.
+ */
+
+ if (file->f_flags & O_NONBLOCK) {
+ break;
+ }
+
+ if (tty->flags & (1 << TTY_IO_ERROR)) {
+ retval = -EIO;
+ break;
+ }
+
+ if (ch->ch_flags & CH_CD) {
+ DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
+ break;
+ }
+
+ if (ch->ch_flags & CH_FCAR) {
+ DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
+ break;
+ }
+ }
+ else {
+ sleep_on_un_flags = 1;
+ }
+
+ /*
+ * If there is a signal pending, the user probably
+ * interrupted (ctrl-c) us.
+ * Leave loop with error set.
+ */
+ if (signal_pending(current)) {
+ DPR_OPEN(("%d: signal pending...\n", __LINE__));
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ DPR_OPEN(("dgnc_block_til_ready - blocking.\n"));
+
+ /*
+ * Store the flags before we let go of channel lock
+ */
+ if (sleep_on_un_flags)
+ old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
+ else
+ old_flags = ch->ch_flags;
+
+ /*
+ * Let go of channel lock before calling schedule.
+ * Our poller will get any FEP events and wake us up when DCD
+ * eventually goes active.
+ */
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("Going to sleep on %s flags...\n",
+ (sleep_on_un_flags ? "un" : "ch")));
+
+ /*
+ * Wait for something in the flags to change from the current value.
+ */
+ if (sleep_on_un_flags) {
+ retval = wait_event_interruptible(un->un_flags_wait,
+ (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags)));
+ }
+ else {
+ retval = wait_event_interruptible(ch->ch_flags_wait,
+ (old_flags != ch->ch_flags));
+ }
+
+ DPR_OPEN(("After sleep... retval: %x\n", retval));
+
+ /*
+ * We got woken up for some reason.
+ * Before looping around, grab our channel lock.
+ */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+
+ ch->ch_wopen--;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("dgnc_block_til_ready - after blocking.\n"));
+
+ if (retval) {
+ DPR_OPEN(("dgnc_block_til_ready - done. error. retval: %x\n", retval));
+ return(retval);
+ }
+
+ DPR_OPEN(("dgnc_block_til_ready - done no error. jiffies: %lu\n", jiffies));
+
+ return(0);
+}
+
+
+/*
+ * dgnc_tty_hangup()
+ *
+ * Hangup the port. Like a close, but don't wait for output to drain.
+ */
+static void dgnc_tty_hangup(struct tty_struct *tty)
+{
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ DPR_CLOSE(("dgnc_hangup called. ch->ch_open_count: %d un->un_open_count: %d\n",
+ un->un_ch->ch_open_count, un->un_open_count));
+
+ /* flush the transmit queues */
+ dgnc_tty_flush_buffer(tty);
+
+ DPR_CLOSE(("dgnc_hangup finished. ch->ch_open_count: %d un->un_open_count: %d\n",
+ un->un_ch->ch_open_count, un->un_open_count));
+}
+
+
+/*
+ * dgnc_tty_close()
+ *
+ */
+static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ ts = &tty->termios;
+
+ DPR_CLOSE(("Close called\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Determine if this is the last close or not - and if we agree about
+ * which type of close it is with the Line Discipline
+ */
+ if ((tty->count == 1) && (un->un_open_count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. un_open_count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
+ un->un_open_count = 1;
+ }
+
+ if (--un->un_open_count < 0) {
+ APR(("bad serial port open count of %d\n", un->un_open_count));
+ un->un_open_count = 0;
+ }
+
+ ch->ch_open_count--;
+
+ if (ch->ch_open_count && un->un_open_count) {
+ DPR_CLOSE(("dgnc_tty_close: not last close ch: %d un:%d\n",
+ ch->ch_open_count, un->un_open_count));
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* OK, its the last close on the unit */
+ DPR_CLOSE(("dgnc_tty_close - last close on unit procedures\n"));
+
+ un->un_flags |= UN_CLOSING;
+
+ tty->closing = 1;
+
+
+ /*
+ * Only officially close channel if count is 0 and
+ * DIGI_PRINTER bit is not set.
+ */
+ if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
+
+ ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI);
+
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON) ) {
+ dgnc_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ /* wait for output to drain */
+ /* This will also return if we take an interrupt */
+
+ DPR_CLOSE(("Calling wait_for_drain\n"));
+ rc = bd->bd_ops->drain(tty, 0);
+
+ DPR_CLOSE(("After calling wait_for_drain\n"));
+
+ if (rc) {
+ DPR_BASIC(("dgnc_tty_close - bad return: %d ", rc));
+ }
+
+ dgnc_tty_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ tty->closing = 0;
+
+ /*
+ * If we have HUPCL set, lower DTR and RTS
+ */
+ if (ch->ch_c_cflag & HUPCL) {
+ DPR_CLOSE(("Close. HUPCL set, dropping DTR/RTS\n"));
+
+ /* Drop RTS/DTR */
+ ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
+ bd->bd_ops->assert_modem_signals(ch);
+
+ /*
+ * Go to sleep to ensure RTS/DTR
+ * have been dropped for modems to see it.
+ */
+ if (ch->ch_close_delay) {
+ DPR_CLOSE(("Close. Sleeping for RTS/DTR drop\n"));
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ dgnc_ms_sleep(ch->ch_close_delay);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ DPR_CLOSE(("Close. After sleeping for RTS/DTR drop\n"));
+ }
+ }
+
+ ch->ch_old_baud = 0;
+
+ /* Turn off UART interrupts for this port */
+ ch->ch_bd->bd_ops->uart_off(ch);
+ }
+ else {
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON) ) {
+ dgnc_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+ }
+
+ un->un_tty = NULL;
+ un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
+
+ DPR_CLOSE(("Close. Doing wakeups\n"));
+ wake_up_interruptible(&ch->ch_flags_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_BASIC(("dgnc_tty_close - complete\n"));
+}
+
+
+/*
+ * dgnc_tty_chars_in_buffer()
+ *
+ * Return number of characters that have not been transmitted yet.
+ *
+ * This routine is used by the line discipline to determine if there
+ * is data waiting to be transmitted/drained/flushed or not.
+ */
+static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ ushort thead;
+ ushort ttail;
+ uint tmask;
+ uint chars = 0;
+ ulong lock_flags = 0;
+
+ if (tty == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ tmask = WQUEUEMASK;
+ thead = ch->ch_w_head & tmask;
+ ttail = ch->ch_w_tail & tmask;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (ttail == thead) {
+ chars = 0;
+ } else {
+ if (thead >= ttail)
+ chars = thead - ttail;
+ else
+ chars = thead - ttail + WQUEUESIZE;
+ }
+
+ DPR_WRITE(("dgnc_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d)\n",
+ ch->ch_portnum, chars, thead, ttail));
+
+ return(chars);
+}
+
+
+/*
+ * dgnc_maxcps_room
+ *
+ * Reduces bytes_available to the max number of characters
+ * that can be sent currently given the maxcps value, and
+ * returns the new bytes_available. This only affects printer
+ * output.
+ */
+static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+
+ if (!tty)
+ return (bytes_available);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (bytes_available);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (bytes_available);
+
+ /*
+ * If its not the Transparent print device, return
+ * the full data amount.
+ */
+ if (un->un_type != DGNC_PRINT)
+ return (bytes_available);
+
+ if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0 ) {
+ int cps_limit = 0;
+ unsigned long current_time = jiffies;
+ unsigned long buffer_time = current_time +
+ (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps;
+
+ if (ch->ch_cpstime < current_time) {
+ /* buffer is empty */
+ ch->ch_cpstime = current_time; /* reset ch_cpstime */
+ cps_limit = ch->ch_digi.digi_bufsize;
+ }
+ else if (ch->ch_cpstime < buffer_time) {
+ /* still room in the buffer */
+ cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ;
+ }
+ else {
+ /* no room in the buffer */
+ cps_limit = 0;
+ }
+
+ bytes_available = min(cps_limit, bytes_available);
+ }
+
+ return (bytes_available);
+}
+
+
+/*
+ * dgnc_tty_write_room()
+ *
+ * Return space available in Tx buffer
+ */
+static int dgnc_tty_write_room(struct tty_struct *tty)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ ushort head;
+ ushort tail;
+ ushort tmask;
+ int ret = 0;
+ ulong lock_flags = 0;
+
+ if (tty == NULL || dgnc_TmpWriteBuf == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ tmask = WQUEUEMASK;
+ head = (ch->ch_w_head) & tmask;
+ tail = (ch->ch_w_tail) & tmask;
+
+ if ((ret = tail - head - 1) < 0)
+ ret += WQUEUESIZE;
+
+ /* Limit printer to maxcps */
+ ret = dgnc_maxcps_room(tty, ret);
+
+ /*
+ * If we are printer device, leave space for
+ * possibly both the on and off strings.
+ */
+ if (un->un_type == DGNC_PRINT) {
+ if (!(ch->ch_flags & CH_PRON))
+ ret -= ch->ch_digi.digi_onlen;
+ ret -= ch->ch_digi.digi_offlen;
+ }
+ else {
+ if (ch->ch_flags & CH_PRON)
+ ret -= ch->ch_digi.digi_offlen;
+ }
+
+ if (ret < 0)
+ ret = 0;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_WRITE(("dgnc_tty_write_room - %d tail: %d head: %d\n", ret, tail, head));
+
+ return(ret);
+}
+
+
+/*
+ * dgnc_tty_put_char()
+ *
+ * Put a character into ch->ch_buf
+ *
+ * - used by the line discipline for OPOST processing
+ */
+static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)
+{
+ /*
+ * Simply call tty_write.
+ */
+ DPR_WRITE(("dgnc_tty_put_char called\n"));
+ dgnc_tty_write(tty, &c, 1);
+ return 1;
+}
+
+
+/*
+ * dgnc_tty_write()
+ *
+ * Take data from the user or kernel and send it out to the FEP.
+ * In here exists all the Transparent Print magic as well.
+ */
+static int dgnc_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ int bufcount = 0, n = 0;
+ int orig_count = 0;
+ ulong lock_flags;
+ ushort head;
+ ushort tail;
+ ushort tmask;
+ uint remain;
+ int from_user = 0;
+
+ if (tty == NULL || dgnc_TmpWriteBuf == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return(0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return(0);
+
+ if (!count)
+ return(0);
+
+ DPR_WRITE(("dgnc_tty_write: Port: %x tty=%p user=%d len=%d\n",
+ ch->ch_portnum, tty, from_user, count));
+
+ /*
+ * Store original amount of characters passed in.
+ * This helps to figure out if we should ask the FEP
+ * to send us an event when it has more space available.
+ */
+ orig_count = count;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Get our space available for the channel from the board */
+ tmask = WQUEUEMASK;
+ head = (ch->ch_w_head) & tmask;
+ tail = (ch->ch_w_tail) & tmask;
+
+ if ((bufcount = tail - head - 1) < 0)
+ bufcount += WQUEUESIZE;
+
+ DPR_WRITE(("%d: bufcount: %x count: %x tail: %x head: %x tmask: %x\n",
+ __LINE__, bufcount, count, tail, head, tmask));
+
+ /*
+ * Limit printer output to maxcps overall, with bursts allowed
+ * up to bufsize characters.
+ */
+ bufcount = dgnc_maxcps_room(tty, bufcount);
+
+ /*
+ * Take minimum of what the user wants to send, and the
+ * space available in the FEP buffer.
+ */
+ count = min(count, bufcount);
+
+ /*
+ * Bail if no space left.
+ */
+ if (count <= 0) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ /*
+ * Output the printer ON string, if we are in terminal mode, but
+ * need to be in printer mode.
+ */
+ if ((un->un_type == DGNC_PRINT) && !(ch->ch_flags & CH_PRON)) {
+ dgnc_wmove(ch, ch->ch_digi.digi_onstr,
+ (int) ch->ch_digi.digi_onlen);
+ head = (ch->ch_w_head) & tmask;
+ ch->ch_flags |= CH_PRON;
+ }
+
+ /*
+ * On the other hand, output the printer OFF string, if we are
+ * currently in printer mode, but need to output to the terminal.
+ */
+ if ((un->un_type != DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgnc_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = (ch->ch_w_head) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ /*
+ * If there is nothing left to copy, or I can't handle any more data, leave.
+ */
+ if (count <= 0) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ if (from_user) {
+
+ count = min(count, WRITEBUFLEN);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * If data is coming from user space, copy it into a temporary
+ * buffer so we don't get swapped out while doing the copy to
+ * the board.
+ */
+ /* we're allowed to block if it's from_user */
+ if (down_interruptible(&dgnc_TmpWriteSem)) {
+ return (-EINTR);
+ }
+
+ /*
+ * copy_from_user() returns the number
+ * of bytes that could *NOT* be copied.
+ */
+ count -= copy_from_user(dgnc_TmpWriteBuf, (const uchar __user *) buf, count);
+
+ if (!count) {
+ up(&dgnc_TmpWriteSem);
+ return(-EFAULT);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ buf = dgnc_TmpWriteBuf;
+
+ }
+
+ n = count;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ remain = WQUEUESIZE - head;
+
+ if (n >= remain) {
+ n -= remain;
+ memcpy(ch->ch_wqueue + head, buf, remain);
+ dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
+ head = 0;
+ buf += remain;
+ }
+
+ if (n > 0) {
+ /*
+ * Move rest of data.
+ */
+ remain = n;
+ memcpy(ch->ch_wqueue + head, buf, remain);
+ dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
+ head += remain;
+ }
+
+ if (count) {
+ head &= tmask;
+ ch->ch_w_head = head;
+ }
+
+#if 0
+ /*
+ * If this is the print device, and the
+ * printer is still on, we need to turn it
+ * off before going idle.
+ */
+ if (count == orig_count) {
+ if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
+ head &= tmask;
+ ch->ch_w_head = head;
+ dgnc_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = (ch->ch_w_head) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+ }
+#endif
+
+ /* Update printer buffer empty time. */
+ if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0)
+ && (ch->ch_digi.digi_bufsize > 0)) {
+ ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
+ }
+
+ if (from_user) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ up(&dgnc_TmpWriteSem);
+ } else {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+
+ DPR_WRITE(("Write finished - Write %d bytes of %d.\n", count, orig_count));
+
+ if (count) {
+ /*
+ * Channel lock is grabbed and then released
+ * inside this routine.
+ */
+ ch->ch_bd->bd_ops->copy_data_from_queue_to_uart(ch);
+ }
+
+ return (count);
+}
+
+
+/*
+ * Return modem signals to ld.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgnc_tty_tiocmget(struct tty_struct *tty)
+#else
+static int dgnc_tty_tiocmget(struct tty_struct *tty, struct file *file)
+#endif
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int result = -EIO;
+ uchar mstat = 0;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return result;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return result;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return result;
+
+ DPR_IOCTL(("dgnc_tty_tiocmget start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = (ch->ch_mostat | ch->ch_mistat);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & UART_MCR_DTR)
+ result |= TIOCM_DTR;
+ if (mstat & UART_MCR_RTS)
+ result |= TIOCM_RTS;
+ if (mstat & UART_MSR_CTS)
+ result |= TIOCM_CTS;
+ if (mstat & UART_MSR_DSR)
+ result |= TIOCM_DSR;
+ if (mstat & UART_MSR_RI)
+ result |= TIOCM_RI;
+ if (mstat & UART_MSR_DCD)
+ result |= TIOCM_CD;
+
+ DPR_IOCTL(("dgnc_tty_tiocmget finish\n"));
+
+ return result;
+}
+
+
+/*
+ * dgnc_tty_tiocmset()
+ *
+ * Set modem signals, called by ld.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgnc_tty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+#else
+static int dgnc_tty_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+#endif
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return ret;
+
+ DPR_IOCTL(("dgnc_tty_tiocmset start\n"));
+
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ if (set & TIOCM_RTS) {
+ ch->ch_mostat |= UART_MCR_RTS;
+ }
+
+ if (set & TIOCM_DTR) {
+ ch->ch_mostat |= UART_MCR_DTR;
+ }
+
+ if (clear & TIOCM_RTS) {
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ }
+
+ if (clear & TIOCM_DTR) {
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ }
+
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_tiocmset finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgnc_tty_send_break()
+ *
+ * Send a Break, called by ld.
+ */
+static int dgnc_tty_send_break(struct tty_struct *tty, int msec)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return ret;
+
+ switch (msec) {
+ case -1:
+ msec = 0xFFFF;
+ break;
+ case 0:
+ msec = 0;
+ break;
+ default:
+ break;
+ }
+
+ DPR_IOCTL(("dgnc_tty_send_break start 1. %lx\n", jiffies));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_bd->bd_ops->send_break(ch, msec);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_send_break finish\n"));
+
+ return (0);
+
+}
+
+
+/*
+ * dgnc_tty_wait_until_sent()
+ *
+ * wait until data has been transmitted, called by ld.
+ */
+static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ rc = bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return;
+ }
+ return;
+}
+
+
+/*
+ * dgnc_send_xchar()
+ *
+ * send a high priority character, called by ld.
+ */
+static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_send_xchar start\n"));
+ printk("dgnc_tty_send_xchar start\n");
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ bd->bd_ops->send_immediate_char(ch, c);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_send_xchar finish\n"));
+ printk("dgnc_tty_send_xchar finish\n");
+ return;
+}
+
+
+
+
+/*
+ * Return modem signals to ld.
+ */
+static inline int dgnc_get_mstat(struct channel_t *ch)
+{
+ unsigned char mstat;
+ int result = -EIO;
+ ulong lock_flags;
+
+ DPR_IOCTL(("dgnc_getmstat start\n"));
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return(-ENXIO);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = (ch->ch_mostat | ch->ch_mistat);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & UART_MCR_DTR)
+ result |= TIOCM_DTR;
+ if (mstat & UART_MCR_RTS)
+ result |= TIOCM_RTS;
+ if (mstat & UART_MSR_CTS)
+ result |= TIOCM_CTS;
+ if (mstat & UART_MSR_DSR)
+ result |= TIOCM_DSR;
+ if (mstat & UART_MSR_RI)
+ result |= TIOCM_RI;
+ if (mstat & UART_MSR_DCD)
+ result |= TIOCM_CD;
+
+ DPR_IOCTL(("dgnc_getmstat finish\n"));
+
+ return(result);
+}
+
+
+
+/*
+ * Return modem signals to ld.
+ */
+static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value)
+{
+ int result;
+ int rc;
+
+ DPR_IOCTL(("dgnc_get_modem_info start\n"));
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return(-ENXIO);
+
+ result = dgnc_get_mstat(ch);
+
+ if (result < 0)
+ return (-ENXIO);
+
+ rc = put_user(result, value);
+
+ DPR_IOCTL(("dgnc_get_modem_info finish\n"));
+ return(rc);
+}
+
+
+/*
+ * dgnc_set_modem_info()
+ *
+ * Set modem signals, called by ld.
+ */
+static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -ENXIO;
+ unsigned int arg = 0;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return ret;
+
+ ret = 0;
+
+ DPR_IOCTL(("dgnc_set_modem_info() start\n"));
+
+ ret = get_user(arg, value);
+ if (ret)
+ return(ret);
+
+ switch (command) {
+ case TIOCMBIS:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mostat |= UART_MCR_RTS;
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mostat |= UART_MCR_DTR;
+ }
+
+ break;
+
+ case TIOCMBIC:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ }
+
+ break;
+
+ case TIOCMSET:
+
+ if (arg & TIOCM_RTS) {
+ ch->ch_mostat |= UART_MCR_RTS;
+ }
+ else {
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mostat |= UART_MCR_DTR;
+ }
+ else {
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ }
+
+ break;
+
+ default:
+ return(-EINVAL);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_set_modem_info finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgnc_tty_digigeta()
+ *
+ * Ioctl to get the information for ditty.
+ *
+ *
+ *
+ */
+static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return (-EFAULT);
+
+ return (0);
+}
+
+
+/*
+ * dgnc_tty_digiseta()
+ *
+ * Ioctl to set the information for ditty.
+ *
+ *
+ *
+ */
+static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t new_digi;
+ ulong lock_flags;
+
+ DPR_IOCTL(("DIGI_SETA start\n"));
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (-EFAULT);
+
+ if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t))) {
+ DPR_IOCTL(("DIGI_SETA failed copy_from_user\n"));
+ return(-EFAULT);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Handle transistions to and from RTS Toggle.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && (new_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ if ((ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && !(new_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+
+ /*
+ * Handle transistions to and from DTR Toggle.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && (new_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ if ((ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && !(new_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+
+ memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
+
+ if (ch->ch_digi.digi_maxcps < 1)
+ ch->ch_digi.digi_maxcps = 1;
+
+ if (ch->ch_digi.digi_maxcps > 10000)
+ ch->ch_digi.digi_maxcps = 10000;
+
+ if (ch->ch_digi.digi_bufsize < 10)
+ ch->ch_digi.digi_bufsize = 10;
+
+ if (ch->ch_digi.digi_maxchar < 1)
+ ch->ch_digi.digi_maxchar = 1;
+
+ if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
+ ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
+
+ if (ch->ch_digi.digi_onlen > DIGI_PLEN)
+ ch->ch_digi.digi_onlen = DIGI_PLEN;
+
+ if (ch->ch_digi.digi_offlen > DIGI_PLEN)
+ ch->ch_digi.digi_offlen = DIGI_PLEN;
+
+ ch->ch_bd->bd_ops->param(tty);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("DIGI_SETA finish\n"));
+
+ return(0);
+}
+
+
+/*
+ * dgnc_set_termios()
+ */
+static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ unsigned long lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ ch->ch_bd->bd_ops->param(tty);
+ dgnc_carrier(ch);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+static void dgnc_tty_throttle(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_throttle start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags |= (CH_FORCED_STOPI);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_throttle finish\n"));
+}
+
+
+static void dgnc_tty_unthrottle(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_unthrottle start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags &= ~(CH_FORCED_STOPI);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_unthrottle finish\n"));
+}
+
+
+static void dgnc_tty_start(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgcn_tty_start start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags &= ~(CH_FORCED_STOP);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_start finish\n"));
+}
+
+
+static void dgnc_tty_stop(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_stop start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags |= (CH_FORCED_STOP);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_stop finish\n"));
+}
+
+
+/*
+ * dgnc_tty_flush_chars()
+ *
+ * Flush the cook buffer
+ *
+ * Note to self, and any other poor souls who venture here:
+ *
+ * flush in this case DOES NOT mean dispose of the data.
+ * instead, it means "stop buffering and send it if you
+ * haven't already." Just guess how I figured that out... SRW 2-Jun-98
+ *
+ * It is also always called in interrupt context - JAR 8-Sept-99
+ */
+static void dgnc_tty_flush_chars(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_flush_chars start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Do something maybe here */
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_flush_chars finish\n"));
+}
+
+
+
+/*
+ * dgnc_tty_flush_buffer()
+ *
+ * Flush Tx buffer (make in == out)
+ */
+static void dgnc_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_flush_buffer on port: %d start\n", ch->ch_portnum));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags &= ~CH_STOP;
+
+ /* Flush our write queue */
+ ch->ch_w_head = ch->ch_w_tail;
+
+ /* Flush UARTs transmit FIFO */
+ ch->ch_bd->bd_ops->flush_uart_write(ch);
+
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_flush_buffer finish\n"));
+}
+
+
+
+/*****************************************************************************
+ *
+ * The IOCTL function and all of its helpers
+ *
+ *****************************************************************************/
+
+/*
+ * dgnc_tty_ioctl()
+ *
+ * The usual assortment of ioctl's
+ */
+static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc;
+ ulong lock_flags;
+ void __user *uarg = (void __user *) arg;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-ENODEV);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (-ENODEV);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (-ENODEV);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (-ENODEV);
+
+ DPR_IOCTL(("dgnc_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ if (un->un_open_count <= 0) {
+ DPR_BASIC(("dgnc_tty_ioctl - unit not open.\n"));
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(-EIO);
+ }
+
+ switch (cmd) {
+
+ /* Here are all the standard ioctl's that we MUST implement */
+
+ case TCSBRK:
+ /*
+ * TCSBRK is SVID version: non-zero arg --> no break
+ * this behaviour is exploited by tcdrain().
+ *
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ if(((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP)) {
+ ch->ch_bd->bd_ops->send_break(ch, 250);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+
+ case TCSBRKP:
+ /* support for POSIX tcsendbreak()
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_bd->bd_ops->send_break(ch, 250);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+ case TIOCSBRK:
+ rc = tty_check_change(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_bd->bd_ops->send_break(ch, 250);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+ case TIOCCBRK:
+ /* Do Nothing */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return 0;
+
+ case TIOCGSOFTCAR:
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) arg);
+ return(rc);
+
+ case TIOCSSOFTCAR:
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = get_user(arg, (unsigned long __user *) arg);
+ if (rc)
+ return(rc);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
+ ch->ch_bd->bd_ops->param(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ return(0);
+
+ case TIOCMGET:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(dgnc_get_modem_info(ch, uarg));
+
+ case TIOCMBIS:
+ case TIOCMBIC:
+ case TIOCMSET:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(dgnc_set_modem_info(tty, cmd, uarg));
+
+ /*
+ * Here are any additional ioctl's that we want to implement
+ */
+
+ case TCFLSH:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(rc);
+ }
+
+ if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
+ ch->ch_r_head = ch->ch_r_tail;
+ ch->ch_bd->bd_ops->flush_uart_read(ch);
+ /* Force queue flow control to be released, if needed */
+ dgnc_check_queue_flow_control(ch);
+ }
+
+ if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
+ if (!(un->un_type == DGNC_PRINT)) {
+ ch->ch_w_head = ch->ch_w_tail;
+ ch->ch_bd->bd_ops->flush_uart_write(ch);
+
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ }
+ }
+
+ /* pretend we didn't recognize this IOCTL */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(-ENOIOCTLCMD);
+ case TCSETSF:
+ case TCSETSW:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ if (cmd == TCSETSF) {
+ /* flush rx */
+ ch->ch_flags &= ~CH_STOP;
+ ch->ch_r_head = ch->ch_r_tail;
+ ch->ch_bd->bd_ops->flush_uart_read(ch);
+ /* Force queue flow control to be released, if needed */
+ dgnc_check_queue_flow_control(ch);
+ }
+
+ /* now wait for all the output to drain */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d\n", rc));
+ return(-EINTR);
+ }
+
+ DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ /* pretend we didn't recognize this */
+ return(-ENOIOCTLCMD);
+
+ case TCSETAW:
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ /* pretend we didn't recognize this */
+ return(-ENOIOCTLCMD);
+
+ case TCXONC:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ /* Make the ld do it */
+ return(-ENOIOCTLCMD);
+
+ case DIGI_GETA:
+ /* get information for ditty */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(dgnc_tty_digigeta(tty, uarg));
+
+ case DIGI_SETAW:
+ case DIGI_SETAF:
+
+ /* set information for ditty */
+ if (cmd == (DIGI_SETAW)) {
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+ else {
+ tty_ldisc_flush(tty);
+ }
+ /* fall thru */
+
+ case DIGI_SETA:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(dgnc_tty_digiseta(tty, uarg));
+
+ case DIGI_LOOPBACK:
+ {
+ uint loopback = 0;
+ /* Let go of locks when accessing user space, could sleep */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = get_user(loopback, (unsigned int __user *) arg);
+ if (rc)
+ return(rc);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Enable/disable internal loopback for this port */
+ if (loopback)
+ ch->ch_flags |= CH_LOOPBACK;
+ else
+ ch->ch_flags &= ~(CH_LOOPBACK);
+
+ ch->ch_bd->bd_ops->param(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ case DIGI_GETCUSTOMBAUD:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = put_user(ch->ch_custom_speed, (unsigned int __user *) arg);
+ return(rc);
+
+ case DIGI_SETCUSTOMBAUD:
+ {
+ uint new_rate;
+ /* Let go of locks when accessing user space, could sleep */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = get_user(new_rate, (unsigned int __user *) arg);
+ if (rc)
+ return(rc);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ dgnc_set_custom_speed(ch, new_rate);
+ ch->ch_bd->bd_ops->param(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ /*
+ * This ioctl allows insertion of a character into the front
+ * of any pending data to be transmitted.
+ *
+ * This ioctl is to satify the "Send Character Immediate"
+ * call that the RealPort protocol spec requires.
+ */
+ case DIGI_REALPORT_SENDIMMEDIATE:
+ {
+ unsigned char c;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = get_user(c, (unsigned char __user *) arg);
+ if (rc)
+ return(rc);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_bd->bd_ops->send_immediate_char(ch, c);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ /*
+ * This ioctl returns all the current counts for the port.
+ *
+ * This ioctl is to satify the "Line Error Counters"
+ * call that the RealPort protocol spec requires.
+ */
+ case DIGI_REALPORT_GETCOUNTERS:
+ {
+ struct digi_getcounter buf;
+
+ buf.norun = ch->ch_err_overrun;
+ buf.noflow = 0; /* The driver doesn't keep this stat */
+ buf.nframe = ch->ch_err_frame;
+ buf.nparity = ch->ch_err_parity;
+ buf.nbreak = ch->ch_err_break;
+ buf.rbytes = ch->ch_rxcount;
+ buf.tbytes = ch->ch_txcount;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(uarg, &buf, sizeof(struct digi_getcounter))) {
+ return (-EFAULT);
+ }
+ return(0);
+ }
+
+ /*
+ * This ioctl returns all current events.
+ *
+ * This ioctl is to satify the "Event Reporting"
+ * call that the RealPort protocol spec requires.
+ */
+ case DIGI_REALPORT_GETEVENTS:
+ {
+ unsigned int events = 0;
+
+ /* NOTE: MORE EVENTS NEEDS TO BE ADDED HERE */
+ if (ch->ch_flags & CH_BREAK_SENDING)
+ events |= EV_TXB;
+ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP)) {
+ events |= (EV_OPU | EV_OPS);
+ }
+ if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI)) {
+ events |= (EV_IPU | EV_IPS);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = put_user(events, (unsigned int __user *) arg);
+ return(rc);
+ }
+
+ /*
+ * This ioctl returns TOUT and TIN counters based
+ * upon the values passed in by the RealPort Server.
+ * It also passes back whether the UART Transmitter is
+ * empty as well.
+ */
+ case DIGI_REALPORT_GETBUFFERS:
+ {
+ struct digi_getbuffer buf;
+ int tdist;
+ int count;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Get data from user first.
+ */
+ if (copy_from_user(&buf, uarg, sizeof(struct digi_getbuffer))) {
+ return (-EFAULT);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Figure out how much data is in our RX and TX queues.
+ */
+ buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK;
+ buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK;
+
+ /*
+ * Is the UART empty? Add that value to whats in our TX queue.
+ */
+ count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch);
+
+ /*
+ * Figure out how much data the RealPort Server believes should
+ * be in our TX queue.
+ */
+ tdist = (buf.tIn - buf.tOut) & 0xffff;
+
+ /*
+ * If we have more data than the RealPort Server believes we
+ * should have, reduce our count to its amount.
+ *
+ * This count difference CAN happen because the Linux LD can
+ * insert more characters into our queue for OPOST processing
+ * that the RealPort Server doesn't know about.
+ */
+ if (buf.txbuf > tdist) {
+ buf.txbuf = tdist;
+ }
+
+ /*
+ * Report whether our queue and UART TX are completely empty.
+ */
+ if (count) {
+ buf.txdone = 0;
+ } else {
+ buf.txdone = 1;
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(uarg, &buf, sizeof(struct digi_getbuffer))) {
+ return (-EFAULT);
+ }
+ return(0);
+ }
+ default:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl - in default\n"));
+ DPR_IOCTL(("dgnc_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(-ENOIOCTLCMD);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+}
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
new file mode 100644
index 0000000..deb388d
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_TTY_H
+#define __DGNC_TTY_H
+
+#include "dgnc_driver.h"
+
+int dgnc_tty_register(struct board_t *brd);
+
+int dgnc_tty_preinit(void);
+int dgnc_tty_init(struct board_t *);
+
+void dgnc_tty_post_uninit(void);
+void dgnc_tty_uninit(struct board_t *);
+
+void dgnc_input(struct channel_t *ch);
+void dgnc_carrier(struct channel_t *ch);
+void dgnc_wakeup_writes(struct channel_t *ch);
+void dgnc_check_queue_flow_control(struct channel_t *ch);
+
+void dgnc_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *buf, int nbuf);
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_types.h b/drivers/staging/dgnc/dgnc_types.h
new file mode 100644
index 0000000..4fa3585
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_types.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_TYPES_H
+#define __DGNC_TYPES_H
+
+#ifndef TRUE
+# define TRUE 1
+#endif
+
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+/* Required for our shared headers! */
+typedef unsigned char uchar;
+
+#endif
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
new file mode 100644
index 0000000..eb6e371
--- /dev/null
+++ b/drivers/staging/dgnc/digi.h
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DIGI_H
+#define __DIGI_H
+
+/************************************************************************
+ *** Definitions for Digi ditty(1) command.
+ ************************************************************************/
+
+
+/*
+ * Copyright (c) 1988-96 Digi International Inc., All Rights Reserved.
+ */
+
+/************************************************************************
+ * This module provides application access to special Digi
+ * serial line enhancements which are not standard UNIX(tm) features.
+ ************************************************************************/
+
+#if !defined(TIOCMODG)
+
+#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */
+#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */
+
+#ifndef TIOCM_LE
+#define TIOCM_LE 0x01 /* line enable */
+#define TIOCM_DTR 0x02 /* data terminal ready */
+#define TIOCM_RTS 0x04 /* request to send */
+#define TIOCM_ST 0x08 /* secondary transmit */
+#define TIOCM_SR 0x10 /* secondary receive */
+#define TIOCM_CTS 0x20 /* clear to send */
+#define TIOCM_CAR 0x40 /* carrier detect */
+#define TIOCM_RNG 0x80 /* ring indicator */
+#define TIOCM_DSR 0x100 /* data set ready */
+#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
+#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
+#endif
+
+#endif
+
+#if !defined(TIOCMSET)
+#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */
+#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */
+#endif
+
+#if !defined(TIOCMBIC)
+#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */
+#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */
+#endif
+
+
+#if !defined(TIOCSDTR)
+#define TIOCSDTR ('e'<<8) | 0 /* set DTR */
+#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */
+#endif
+
+/************************************************************************
+ * Ioctl command arguments for DIGI parameters.
+ ************************************************************************/
+#define DIGI_GETA ('e'<<8) | 94 /* Read params */
+
+#define DIGI_SETA ('e'<<8) | 95 /* Set params */
+#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */
+#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */
+
+#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
+ /* Adapter Memory */
+
+#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */
+ /* control characters */
+#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */
+ /* control characters */
+#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */
+ /* flow control chars */
+#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */
+ /* flow control chars */
+
+#define DIGI_GEDELAY ('d'<<8) | 246 /* Get edelay */
+#define DIGI_SEDELAY ('d'<<8) | 247 /* Set edelay */
+
+struct digiflow_t {
+ unsigned char startc; /* flow cntl start char */
+ unsigned char stopc; /* flow cntl stop char */
+};
+
+
+#ifdef FLOW_2200
+#define F2200_GETA ('e'<<8) | 104 /* Get 2x36 flow cntl flags */
+#define F2200_SETAW ('e'<<8) | 105 /* Set 2x36 flow cntl flags */
+#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
+#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
+#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
+#define F2200_XON 0xf8
+#define P2200_XON 0xf9
+#define F2200_XOFF 0xfa
+#define P2200_XOFF 0xfb
+
+#define FXOFF_MASK 0x03 /* 2200 flow status mask */
+#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
+#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
+#endif
+
+/************************************************************************
+ * Values for digi_flags
+ ************************************************************************/
+#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
+#define DIGI_FAST 0x0002 /* Fast baud rates */
+#define RTSPACE 0x0004 /* RTS input flow control */
+#define CTSPACE 0x0008 /* CTS output flow control */
+#define DSRPACE 0x0010 /* DSR output flow control */
+#define DCDPACE 0x0020 /* DCD output flow control */
+#define DTRPACE 0x0040 /* DTR input flow control */
+#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
+#define DIGI_FORCEDCD 0x0100 /* Force carrier */
+#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
+#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
+#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
+#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
+#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
+#define DIGI_422 0x4000 /* for 422/232 selectable panel */
+#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
+
+/************************************************************************
+ * These options are not supported on the comxi.
+ ************************************************************************/
+#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
+
+#define DIGI_PLEN 28 /* String length */
+#define DIGI_TSIZ 10 /* Terminal string len */
+
+/************************************************************************
+ * Structure used with ioctl commands for DIGI parameters.
+ ************************************************************************/
+struct digi_t {
+ unsigned short digi_flags; /* Flags (see above) */
+ unsigned short digi_maxcps; /* Max printer CPS */
+ unsigned short digi_maxchar; /* Max chars in print queue */
+ unsigned short digi_bufsize; /* Buffer size */
+ unsigned char digi_onlen; /* Length of ON string */
+ unsigned char digi_offlen; /* Length of OFF string */
+ char digi_onstr[DIGI_PLEN]; /* Printer on string */
+ char digi_offstr[DIGI_PLEN]; /* Printer off string */
+ char digi_term[DIGI_TSIZ]; /* terminal string */
+};
+
+/************************************************************************
+ * KME definitions and structures.
+ ************************************************************************/
+#define RW_IDLE 0 /* Operation complete */
+#define RW_READ 1 /* Read Concentrator Memory */
+#define RW_WRITE 2 /* Write Concentrator Memory */
+
+struct rw_t {
+ unsigned char rw_req; /* Request type */
+ unsigned char rw_board; /* Host Adapter board number */
+ unsigned char rw_conc; /* Concentrator number */
+ unsigned char rw_reserved; /* Reserved for expansion */
+ unsigned int rw_addr; /* Address in concentrator */
+ unsigned short rw_size; /* Read/write request length */
+ unsigned char rw_data[128]; /* Data to read/write */
+};
+
+/***********************************************************************
+ * Shrink Buffer and Board Information definitions and structures.
+
+ ************************************************************************/
+ /* Board type return codes */
+#define PCXI_TYPE 1 /* Board type at the designated port is a PC/Xi */
+#define PCXM_TYPE 2 /* Board type at the designated port is a PC/Xm */
+#define PCXE_TYPE 3 /* Board type at the designated port is a PC/Xe */
+#define MCXI_TYPE 4 /* Board type at the designated port is a MC/Xi */
+#define COMXI_TYPE 5 /* Board type at the designated port is a COM/Xi */
+
+ /* Non-Zero Result codes. */
+#define RESULT_NOBDFND 1 /* A Digi product at that port is not config installed */
+#define RESULT_NODESCT 2 /* A memory descriptor was not obtainable */
+#define RESULT_NOOSSIG 3 /* FEP/OS signature was not detected on the board */
+#define RESULT_TOOSML 4 /* Too small an area to shrink. */
+#define RESULT_NOCHAN 5 /* Channel structure for the board was not found */
+
+struct shrink_buf_struct {
+ unsigned int shrink_buf_vaddr; /* Virtual address of board */
+ unsigned int shrink_buf_phys; /* Physical address of board */
+ unsigned int shrink_buf_bseg; /* Amount of board memory */
+ unsigned int shrink_buf_hseg; /* '186 Begining of Dual-Port */
+
+ unsigned int shrink_buf_lseg; /* '186 Begining of freed memory */
+ unsigned int shrink_buf_mseg; /* Linear address from start of
+ dual-port were freed memory
+ begins, host viewpoint. */
+
+ unsigned int shrink_buf_bdparam; /* Parameter for xxmemon and
+ xxmemoff */
+
+ unsigned int shrink_buf_reserva; /* Reserved */
+ unsigned int shrink_buf_reservb; /* Reserved */
+ unsigned int shrink_buf_reservc; /* Reserved */
+ unsigned int shrink_buf_reservd; /* Reserved */
+
+ unsigned char shrink_buf_result; /* Reason for call failing
+ Zero is Good return */
+ unsigned char shrink_buf_init; /* Non-Zero if it caused an
+ xxinit call. */
+
+ unsigned char shrink_buf_anports; /* Number of async ports */
+ unsigned char shrink_buf_snports; /* Number of sync ports */
+ unsigned char shrink_buf_type; /* Board type 1 = PC/Xi,
+ 2 = PC/Xm,
+ 3 = PC/Xe
+ 4 = MC/Xi
+ 5 = COMX/i */
+ unsigned char shrink_buf_card; /* Card number */
+
+};
+
+/************************************************************************
+ * Structure to get driver status information
+ ************************************************************************/
+struct digi_dinfo {
+ unsigned int dinfo_nboards; /* # boards configured */
+ char dinfo_reserved[12]; /* for future expansion */
+ char dinfo_version[16]; /* driver version */
+};
+
+#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
+
+/************************************************************************
+ * Structure used with ioctl commands for per-board information
+ *
+ * physsize and memsize differ when board has "windowed" memory
+ ************************************************************************/
+struct digi_info {
+ unsigned int info_bdnum; /* Board number (0 based) */
+ unsigned int info_ioport; /* io port address */
+ unsigned int info_physaddr; /* memory address */
+ unsigned int info_physsize; /* Size of host mem window */
+ unsigned int info_memsize; /* Amount of dual-port mem */
+ /* on board */
+ unsigned short info_bdtype; /* Board type */
+ unsigned short info_nports; /* number of ports */
+ char info_bdstate; /* board state */
+ char info_reserved[7]; /* for future expansion */
+};
+
+#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
+
+struct digi_stat {
+ unsigned int info_chan; /* Channel number (0 based) */
+ unsigned int info_brd; /* Board number (0 based) */
+ unsigned int info_cflag; /* cflag for channel */
+ unsigned int info_iflag; /* iflag for channel */
+ unsigned int info_oflag; /* oflag for channel */
+ unsigned int info_mstat; /* mstat for channel */
+ unsigned int info_tx_data; /* tx_data for channel */
+ unsigned int info_rx_data; /* rx_data for channel */
+ unsigned int info_hflow; /* hflow for channel */
+ unsigned int info_reserved[8]; /* for future expansion */
+};
+
+#define DIGI_GETSTAT ('d'<<8) | 244 /* get board info */
+/************************************************************************
+ *
+ * Structure used with ioctl commands for per-channel information
+ *
+ ************************************************************************/
+struct digi_ch {
+ unsigned int info_bdnum; /* Board number (0 based) */
+ unsigned int info_channel; /* Channel index number */
+ unsigned int info_ch_cflag; /* Channel cflag */
+ unsigned int info_ch_iflag; /* Channel iflag */
+ unsigned int info_ch_oflag; /* Channel oflag */
+ unsigned int info_chsize; /* Channel structure size */
+ unsigned int info_sleep_stat; /* sleep status */
+ dev_t info_dev; /* device number */
+ unsigned char info_initstate; /* Channel init state */
+ unsigned char info_running; /* Channel running state */
+ int reserved[8]; /* reserved for future use */
+};
+
+/*
+* This structure is used with the DIGI_FEPCMD ioctl to
+* tell the driver which port to send the command for.
+*/
+struct digi_cmd {
+ int cmd;
+ int word;
+ int ncmds;
+ int chan; /* channel index (zero based) */
+ int bdid; /* board index (zero based) */
+};
+
+
+struct digi_getbuffer /* Struct for holding buffer use counts */
+{
+ unsigned long tIn;
+ unsigned long tOut;
+ unsigned long rxbuf;
+ unsigned long txbuf;
+ unsigned long txdone;
+};
+
+struct digi_getcounter {
+ unsigned long norun; /* number of UART overrun errors */
+ unsigned long noflow; /* number of buffer overflow errors */
+ unsigned long nframe; /* number of framing errors */
+ unsigned long nparity; /* number of parity errors */
+ unsigned long nbreak; /* number of breaks received */
+ unsigned long rbytes; /* number of received bytes */
+ unsigned long tbytes; /* number of bytes transmitted fully */
+};
+
+/*
+* info_sleep_stat defines
+*/
+#define INFO_RUNWAIT 0x0001
+#define INFO_WOPEN 0x0002
+#define INFO_TTIOW 0x0004
+#define INFO_CH_RWAIT 0x0008
+#define INFO_CH_WEMPTY 0x0010
+#define INFO_CH_WLOW 0x0020
+#define INFO_XXBUF_BUSY 0x0040
+
+#define DIGI_GETCH ('d'<<8) | 245 /* get board info */
+
+/* Board type definitions */
+
+#define SUBTYPE 0007
+#define T_PCXI 0000
+#define T_PCXM 0001
+#define T_PCXE 0002
+#define T_PCXR 0003
+#define T_SP 0004
+#define T_SP_PLUS 0005
+# define T_HERC 0000
+# define T_HOU 0001
+# define T_LON 0002
+# define T_CHA 0003
+#define FAMILY 0070
+#define T_COMXI 0000
+#define T_PCXX 0010
+#define T_CX 0020
+#define T_EPC 0030
+#define T_PCLITE 0040
+#define T_SPXX 0050
+#define T_AVXX 0060
+#define T_DXB 0070
+#define T_A2K_4_8 0070
+#define BUSTYPE 0700
+#define T_ISABUS 0000
+#define T_MCBUS 0100
+#define T_EISABUS 0200
+#define T_PCIBUS 0400
+
+/* Board State Definitions */
+
+#define BD_RUNNING 0x0
+#define BD_REASON 0x7f
+#define BD_NOTFOUND 0x1
+#define BD_NOIOPORT 0x2
+#define BD_NOMEM 0x3
+#define BD_NOBIOS 0x4
+#define BD_NOFEP 0x5
+#define BD_FAILED 0x6
+#define BD_ALLOCATED 0x7
+#define BD_TRIBOOT 0x8
+#define BD_BADKME 0x80
+
+#define DIGI_SPOLL ('d'<<8) | 254 /* change poller rate */
+
+#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
+#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
+
+#define DIGI_REALPORT_GETBUFFERS ('e'<<8 ) | 108
+#define DIGI_REALPORT_SENDIMMEDIATE ('e'<<8 ) | 109
+#define DIGI_REALPORT_GETCOUNTERS ('e'<<8 ) | 110
+#define DIGI_REALPORT_GETEVENTS ('e'<<8 ) | 111
+
+#define EV_OPU 0x0001 //!<Output paused by client
+#define EV_OPS 0x0002 //!<Output paused by reqular sw flowctrl
+#define EV_OPX 0x0004 //!<Output paused by extra sw flowctrl
+#define EV_OPH 0x0008 //!<Output paused by hw flowctrl
+#define EV_OPT 0x0800 //!<Output paused for RTS Toggle predelay
+
+#define EV_IPU 0x0010 //!<Input paused unconditionally by user
+#define EV_IPS 0x0020 //!<Input paused by high/low water marks
+//#define EV_IPH 0x0040 //!<Input paused w/ hardware
+#define EV_IPA 0x0400 //!<Input paused by pattern alarm module
+
+#define EV_TXB 0x0040 //!<Transmit break pending
+#define EV_TXI 0x0080 //!<Transmit immediate pending
+#define EV_TXF 0x0100 //!<Transmit flowctrl char pending
+#define EV_RXB 0x0200 //!<Break received
+
+#define EV_OPALL 0x080f //!<Output pause flags
+#define EV_IPALL 0x0430 //!<Input pause flags
+
+#endif /* DIGI_H */
diff --git a/drivers/staging/dgnc/dpacompat.h b/drivers/staging/dgnc/dpacompat.h
new file mode 100644
index 0000000..f96963b
--- /dev/null
+++ b/drivers/staging/dgnc/dpacompat.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+
+/*
+ * This structure holds data needed for the intelligent <--> nonintelligent
+ * DPA translation
+ */
+ struct ni_info {
+ int board;
+ int channel;
+ int dtr;
+ int rts;
+ int cts;
+ int dsr;
+ int ri;
+ int dcd;
+ int curtx;
+ int currx;
+ unsigned short iflag;
+ unsigned short oflag;
+ unsigned short cflag;
+ unsigned short lflag;
+
+ unsigned int mstat;
+ unsigned char hflow;
+
+ unsigned char xmit_stopped;
+ unsigned char recv_stopped;
+
+ unsigned int baud;
+};
+
+#define RW_READ 1
+#define RW_WRITE 2
+#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
+
+#define SUBTYPE 0007
+#define T_PCXI 0000
+#define T_PCXEM 0001
+#define T_PCXE 0002
+#define T_PCXR 0003
+#define T_SP 0004
+#define T_SP_PLUS 0005
+
+#define T_HERC 0000
+#define T_HOU 0001
+#define T_LON 0002
+#define T_CHA 0003
+
+#define T_NEO 0000
+#define T_NEO_EXPRESS 0001
+#define T_CLASSIC 0002
+
+#define FAMILY 0070
+#define T_COMXI 0000
+#define T_NI 0000
+#define T_PCXX 0010
+#define T_CX 0020
+#define T_EPC 0030
+#define T_PCLITE 0040
+#define T_SPXX 0050
+#define T_AVXX 0060
+#define T_DXB 0070
+#define T_A2K_4_8 0070
+
+#define BUSTYPE 0700
+#define T_ISABUS 0000
+#define T_MCBUS 0100
+#define T_EISABUS 0200
+#define T_PCIBUS 0400
+
+/* Board State Definitions */
+
+#define BD_RUNNING 0x0
+#define BD_REASON 0x7f
+#define BD_NOTFOUND 0x1
+#define BD_NOIOPORT 0x2
+#define BD_NOMEM 0x3
+#define BD_NOBIOS 0x4
+#define BD_NOFEP 0x5
+#define BD_FAILED 0x6
+#define BD_ALLOCATED 0x7
+#define BD_TRIBOOT 0x8
+#define BD_BADKME 0x80
+
+#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
+
+/* Ioctls needed for dpa operation */
+
+#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
+#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
+#define DIGI_GET_NI_INFO ('d'<<8) | 250 /* nonintelligent state snfo */
+
+/* Other special ioctls */
+#define DIGI_TIMERIRQ ('d'<<8) | 251 /* Enable/disable RS_TIMER use */
+#define DIGI_LOOPBACK ('d'<<8) | 252 /* Enable/disable UART internal loopback */
diff --git a/drivers/staging/dgrp/dgrp_dpa_ops.c b/drivers/staging/dgrp/dgrp_dpa_ops.c
index 114799c..69bfe30 100644
--- a/drivers/staging/dgrp/dgrp_dpa_ops.c
+++ b/drivers/staging/dgrp/dgrp_dpa_ops.c
@@ -392,7 +392,7 @@ static long dgrp_dpa_ioctl(struct file *file, unsigned int cmd,
getnode.nd_rx_byte = nd->nd_rx_byte;
memset(&getnode.nd_ps_desc, 0, MAX_DESC_LEN);
- strncpy(getnode.nd_ps_desc, nd->nd_ps_desc, MAX_DESC_LEN);
+ strlcpy(getnode.nd_ps_desc, nd->nd_ps_desc, MAX_DESC_LEN);
if (copy_to_user(uarg, &getnode, sizeof(struct digi_node)))
return -EFAULT;
diff --git a/drivers/staging/dgrp/dgrp_driver.c b/drivers/staging/dgrp/dgrp_driver.c
index e456dc6c..08eedf0 100644
--- a/drivers/staging/dgrp/dgrp_driver.c
+++ b/drivers/staging/dgrp/dgrp_driver.c
@@ -52,19 +52,12 @@ MODULE_PARM_DESC(register_prdevices, "Turn on/off registering transparent print
module_param_named(pollrate, dgrp_poll_tick, int, 0644);
MODULE_PARM_DESC(pollrate, "Poll interval in ms");
-/* Driver load/unload functions */
-static int dgrp_init_module(void);
-static void dgrp_cleanup_module(void);
-
-module_init(dgrp_init_module);
-module_exit(dgrp_cleanup_module);
-
/*
* init_module()
*
* Module load. This is where it all starts.
*/
-static int dgrp_init_module(void)
+static int __init dgrp_init_module(void)
{
int ret;
@@ -89,7 +82,7 @@ static int dgrp_init_module(void)
/*
* Module unload. This is where it all ends.
*/
-static void dgrp_cleanup_module(void)
+static void __exit dgrp_cleanup_module(void)
{
struct nd_struct *nd, *next;
@@ -108,3 +101,6 @@ static void dgrp_cleanup_module(void)
kfree(nd);
}
}
+
+module_init(dgrp_init_module);
+module_exit(dgrp_cleanup_module);
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 5b7833f..33ac7fb 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -278,7 +278,7 @@ static void parity_scan(struct ch_struct *ch, unsigned char *cbuf,
switch (ch->ch_pscan_state) {
default:
/* reset to sanity and fall through */
- ch->ch_pscan_state = 0 ;
+ ch->ch_pscan_state = 0;
case 0:
/* No FF seen yet */
@@ -1607,7 +1607,7 @@ static int dgrp_send(struct nd_struct *nd, long tmax)
if ((ch->ch_pun.un_flag & UN_LOW) != 0 ?
(n <= TBUF_LOW) :
(ch->ch_pun.un_flag & UN_TIME) != 0 ?
- ((jiffies - ch->ch_waketime) >= 0) :
+ time_is_before_jiffies(ch->ch_waketime) :
(n == 0 && ch->ch_s_tpos == ch->ch_s_tin) &&
((ch->ch_pun.un_flag & UN_EMPTY) != 0 ||
((ch->ch_tun.un_open_count &&
@@ -3083,7 +3083,7 @@ check_query:
nd->nd_hw_ver = (b[8] << 8) | b[9];
nd->nd_sw_ver = (b[10] << 8) | b[11];
nd->nd_hw_id = b[6];
- desclen = ((plen - 12) > MAX_DESC_LEN) ? MAX_DESC_LEN :
+ desclen = (plen - 12 > MAX_DESC_LEN - 1) ? MAX_DESC_LEN - 1 :
plen - 12;
if (desclen <= 0) {
diff --git a/drivers/staging/dgrp/dgrp_sysfs.c b/drivers/staging/dgrp/dgrp_sysfs.c
index 7d1b36d..8cee9c8 100644
--- a/drivers/staging/dgrp/dgrp_sysfs.c
+++ b/drivers/staging/dgrp/dgrp_sysfs.c
@@ -273,7 +273,7 @@ void dgrp_create_node_class_sysfs_files(struct nd_struct *nd)
sprintf(name, "node%ld", nd->nd_major);
nd->nd_class_dev = device_create(dgrp_class, dgrp_class_nodes_dev,
- MKDEV(0, nd->nd_major), NULL, name);
+ MKDEV(0, nd->nd_major), NULL, "%s", name);
ret = sysfs_create_group(&nd->nd_class_dev->kobj,
&dgrp_node_attribute_group);
diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c
index 654f601..0d52de3 100644
--- a/drivers/staging/dgrp/dgrp_tty.c
+++ b/drivers/staging/dgrp/dgrp_tty.c
@@ -1120,7 +1120,9 @@ static void dgrp_tty_close(struct tty_struct *tty, struct file *file)
if (!sent_printer_offstr)
dgrp_tty_flush_buffer(tty);
+ spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
tty_ldisc_flush(tty);
+ spin_lock_irqsave(&nd->nd_lock, lock_flags);
break;
}
diff --git a/drivers/staging/dgrp/drp.h b/drivers/staging/dgrp/drp.h
index 84a1e7b..4024b48 100644
--- a/drivers/staging/dgrp/drp.h
+++ b/drivers/staging/dgrp/drp.h
@@ -674,7 +674,7 @@ struct nd_struct {
ushort nd_hw_ver; /* HW version returned from PS */
ushort nd_sw_ver; /* SW version returned from PS */
uint nd_hw_id; /* HW ID returned from PS */
- u8 nd_ps_desc[MAX_DESC_LEN+1]; /* Description from PS */
+ u8 nd_ps_desc[MAX_DESC_LEN]; /* Description from PS */
uint nd_vpd_len; /* VPD len, if any */
u8 nd_vpd[VPDSIZE]; /* VPD, if any */
diff --git a/drivers/staging/dwc2/Kconfig b/drivers/staging/dwc2/Kconfig
index d15d9d5..be947d6 100644
--- a/drivers/staging/dwc2/Kconfig
+++ b/drivers/staging/dwc2/Kconfig
@@ -1,7 +1,6 @@
config USB_DWC2
tristate "DesignWare USB2 DRD Core Support"
depends on USB
- depends on VIRT_TO_BUS
help
Say Y or M here if your system has a Dual Role HighSpeed
USB controller based on the DesignWare HSOTG IP Core.
diff --git a/drivers/staging/dwc2/core.c b/drivers/staging/dwc2/core.c
index 3177db2..06dae67 100644
--- a/drivers/staging/dwc2/core.c
+++ b/drivers/staging/dwc2/core.c
@@ -90,12 +90,10 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
*/
static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
{
- u32 hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
- u32 fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
u32 hcfg, val;
- if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
- fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
hsotg->core_params->ulpi_fs_ls > 0) ||
hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* Full speed PHY */
@@ -108,7 +106,7 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
hcfg = readl(hsotg->regs + HCFG);
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
- hcfg |= val;
+ hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
writel(hcfg, hsotg->regs + HCFG);
}
@@ -245,7 +243,7 @@ static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
- u32 usbcfg, hs_phy_type, fs_phy_type;
+ u32 usbcfg;
if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
@@ -256,11 +254,8 @@ static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
dwc2_hs_phy_init(hsotg, select_phy);
}
- hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
- fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
-
- if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
- fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
hsotg->core_params->ulpi_fs_ls > 0) {
dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
usbcfg = readl(hsotg->regs + GUSBCFG);
@@ -277,20 +272,20 @@ static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
{
- u32 ahbcfg = 0;
+ u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
- switch (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) {
+ switch (hsotg->hw_params.arch) {
case GHWCFG2_EXT_DMA_ARCH:
dev_err(hsotg->dev, "External DMA Mode not supported\n");
return -EINVAL;
case GHWCFG2_INT_DMA_ARCH:
dev_dbg(hsotg->dev, "Internal DMA Mode\n");
- /*
- * Old value was GAHBCFG_HBSTLEN_INCR - done for
- * Host mode ISOC in issue fix - vahrama
- */
- ahbcfg |= GAHBCFG_HBSTLEN_INCR4;
+ if (hsotg->core_params->ahbcfg != -1) {
+ ahbcfg &= GAHBCFG_CTRL_MASK;
+ ahbcfg |= hsotg->core_params->ahbcfg &
+ ~GAHBCFG_CTRL_MASK;
+ }
break;
case GHWCFG2_SLAVE_ONLY_ARCH:
@@ -313,9 +308,6 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
hsotg->core_params->dma_desc_enable = 0;
}
- if (hsotg->core_params->ahb_single > 0)
- ahbcfg |= GAHBCFG_AHB_SINGLE;
-
if (hsotg->core_params->dma_enable > 0)
ahbcfg |= GAHBCFG_DMA_EN;
@@ -331,7 +323,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
usbcfg = readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
- switch (hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK) {
+ switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
if (hsotg->core_params->otg_cap ==
DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
@@ -392,21 +384,6 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
/* Reset the Controller */
dwc2_core_reset(hsotg);
- dev_dbg(hsotg->dev, "num_dev_perio_in_ep=%d\n",
- hsotg->hwcfg4 >> GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT &
- GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK >>
- GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT);
-
- hsotg->total_fifo_size = hsotg->hwcfg3 >> GHWCFG3_DFIFO_DEPTH_SHIFT &
- GHWCFG3_DFIFO_DEPTH_MASK >> GHWCFG3_DFIFO_DEPTH_SHIFT;
- hsotg->rx_fifo_size = readl(hsotg->regs + GRXFSIZ);
- hsotg->nperio_tx_fifo_size =
- readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
-
- dev_dbg(hsotg->dev, "Total FIFO SZ=%d\n", hsotg->total_fifo_size);
- dev_dbg(hsotg->dev, "RxFIFO SZ=%d\n", hsotg->rx_fifo_size);
- dev_dbg(hsotg->dev, "NP TxFIFO SZ=%d\n", hsotg->nperio_tx_fifo_size);
-
/*
* This needs to happen in FS mode before any other programming occurs
*/
@@ -504,23 +481,18 @@ void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *params = hsotg->core_params;
- u32 rxfsiz, nptxfsiz, ptxfsiz, hptxfsiz, dfifocfg;
+ u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
- if (!(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO) ||
- !params->enable_dynamic_fifo)
+ if (!params->enable_dynamic_fifo)
return;
- dev_dbg(hsotg->dev, "Total FIFO Size=%d\n", hsotg->total_fifo_size);
- dev_dbg(hsotg->dev, "Rx FIFO Size=%d\n", params->host_rx_fifo_size);
- dev_dbg(hsotg->dev, "NP Tx FIFO Size=%d\n",
- params->host_nperio_tx_fifo_size);
- dev_dbg(hsotg->dev, "P Tx FIFO Size=%d\n",
- params->host_perio_tx_fifo_size);
-
/* Rx FIFO */
- dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n",
- readl(hsotg->regs + GRXFSIZ));
- writel(params->host_rx_fifo_size, hsotg->regs + GRXFSIZ);
+ grxfsiz = readl(hsotg->regs + GRXFSIZ);
+ dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
+ grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
+ grxfsiz |= params->host_rx_fifo_size <<
+ GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
+ writel(grxfsiz, hsotg->regs + GRXFSIZ);
dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
/* Non-periodic Tx FIFO */
@@ -537,27 +509,26 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
/* Periodic Tx FIFO */
dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
readl(hsotg->regs + HPTXFSIZ));
- ptxfsiz = params->host_perio_tx_fifo_size <<
- FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
- ptxfsiz |= (params->host_rx_fifo_size +
- params->host_nperio_tx_fifo_size) <<
- FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
- writel(ptxfsiz, hsotg->regs + HPTXFSIZ);
+ hptxfsiz = params->host_perio_tx_fifo_size <<
+ FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
+ hptxfsiz |= (params->host_rx_fifo_size +
+ params->host_nperio_tx_fifo_size) <<
+ FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
+ writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
readl(hsotg->regs + HPTXFSIZ));
if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
- hsotg->snpsid <= DWC2_CORE_REV_2_94a) {
+ hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
/*
* Global DFIFOCFG calculation for Host mode -
* include RxFIFO, NPTXFIFO and HPTXFIFO
*/
dfifocfg = readl(hsotg->regs + GDFIFOCFG);
- rxfsiz = readl(hsotg->regs + GRXFSIZ) & 0x0000ffff;
- nptxfsiz = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
- hptxfsiz = readl(hsotg->regs + HPTXFSIZ) >> 16 & 0xffff;
dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
- dfifocfg |= (rxfsiz + nptxfsiz + hptxfsiz) <<
+ dfifocfg |= (params->host_rx_fifo_size +
+ params->host_nperio_tx_fifo_size +
+ params->host_perio_tx_fifo_size) <<
GDFIFOCFG_EPINFOBASE_SHIFT &
GDFIFOCFG_EPINFOBASE_MASK;
writel(dfifocfg, hsotg->regs + GDFIFOCFG);
@@ -603,10 +574,9 @@ void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
}
if (hsotg->core_params->dma_desc_enable > 0) {
- u32 op_mode = hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK;
-
- if (hsotg->snpsid < DWC2_CORE_REV_2_90a ||
- !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA) ||
+ u32 op_mode = hsotg->hw_params.op_mode;
+ if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
+ !hsotg->hw_params.dma_desc_enable ||
op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
@@ -884,26 +854,20 @@ void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
hc_num, hcchar);
- dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, hc_num);
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n",
+ __func__, hc_num);
dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
- hcchar >> HCCHAR_DEVADDR_SHIFT &
- HCCHAR_DEVADDR_MASK >> HCCHAR_DEVADDR_SHIFT);
+ chan->dev_addr);
dev_vdbg(hsotg->dev, " Ep Num: %d\n",
- hcchar >> HCCHAR_EPNUM_SHIFT &
- HCCHAR_EPNUM_MASK >> HCCHAR_EPNUM_SHIFT);
+ chan->ep_num);
dev_vdbg(hsotg->dev, " Is In: %d\n",
- !!(hcchar & HCCHAR_EPDIR));
+ chan->ep_is_in);
dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
- !!(hcchar & HCCHAR_LSPDDEV));
+ chan->speed == USB_SPEED_LOW);
dev_vdbg(hsotg->dev, " Ep Type: %d\n",
- hcchar >> HCCHAR_EPTYPE_SHIFT &
- HCCHAR_EPTYPE_MASK >> HCCHAR_EPTYPE_SHIFT);
+ chan->ep_type);
dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
- hcchar >> HCCHAR_MPS_SHIFT &
- HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
- dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
- hcchar >> HCCHAR_MULTICNT_SHIFT &
- HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
+ chan->max_packet);
}
/* Program the HCSPLT register for SPLITs */
@@ -933,8 +897,7 @@ void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
dev_vdbg(hsotg->dev, " is_in %d\n",
chan->ep_is_in);
dev_vdbg(hsotg->dev, " Max Pkt %d\n",
- hcchar >> HCCHAR_MPS_SHIFT &
- HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
+ chan->max_packet);
dev_vdbg(hsotg->dev, " xferlen %d\n",
chan->xfer_len);
}
@@ -1146,16 +1109,10 @@ void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, u32 *hcchar)
{
- u32 hfnum, frnum;
-
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
- hfnum = readl(hsotg->regs + HFNUM);
- frnum = hfnum >> HFNUM_FRNUM_SHIFT &
- HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
-
/* 1 if _next_ frame is odd, 0 if it's even */
- if (frnum & 0x1)
+ if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
*hcchar |= HCCHAR_ODDFRM;
}
}
@@ -1389,14 +1346,14 @@ void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
chan->hc_num);
dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
- hctsiz >> TSIZ_XFERSIZE_SHIFT &
- TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
+ (hctsiz & TSIZ_XFERSIZE_MASK) >>
+ TSIZ_XFERSIZE_SHIFT);
dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
- hctsiz >> TSIZ_PKTCNT_SHIFT &
- TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
+ (hctsiz & TSIZ_PKTCNT_MASK) >>
+ TSIZ_PKTCNT_SHIFT);
dev_vdbg(hsotg->dev, " Start PID: %d\n",
- hctsiz >> TSIZ_SC_MC_PID_SHIFT &
- TSIZ_SC_MC_PID_MASK >> TSIZ_SC_MC_PID_SHIFT);
+ (hctsiz & TSIZ_SC_MC_PID_MASK) >>
+ TSIZ_SC_MC_PID_SHIFT);
}
if (hsotg->core_params->dma_enable > 0) {
@@ -1440,8 +1397,8 @@ void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
- hcchar >> HCCHAR_MULTICNT_SHIFT &
- HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
+ (hcchar & HCCHAR_MULTICNT_MASK) >>
+ HCCHAR_MULTICNT_SHIFT);
writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
if (dbg_hc(chan))
@@ -1529,8 +1486,8 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
- hcchar >> HCCHAR_MULTICNT_SHIFT &
- HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
+ (hcchar & HCCHAR_MULTICNT_MASK) >>
+ HCCHAR_MULTICNT_SHIFT);
writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
if (dbg_hc(chan))
@@ -1665,18 +1622,16 @@ void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
{
u32 usbcfg;
- u32 hwcfg2;
u32 hprt0;
int clock = 60; /* default value */
usbcfg = readl(hsotg->regs + GUSBCFG);
- hwcfg2 = readl(hsotg->regs + GHWCFG2);
hprt0 = readl(hsotg->regs + HPRT0);
if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
!(usbcfg & GUSBCFG_PHYIF16))
clock = 60;
- if ((usbcfg & GUSBCFG_PHYSEL) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
+ if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
clock = 48;
if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
@@ -1689,14 +1644,13 @@ u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
!(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
clock = 48;
if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
- (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
- GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
clock = 48;
- if ((usbcfg & GUSBCFG_PHYSEL) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
- GHWCFG2_FS_PHY_TYPE_DEDICATED)
+ if ((usbcfg & GUSBCFG_PHYSEL) &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
clock = 48;
- if ((hprt0 & HPRT0_SPD_MASK) == 0)
+ if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
/* High speed case */
return 125 * clock;
else
@@ -1815,8 +1769,6 @@ void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
{
#ifdef DEBUG
u32 __iomem *addr;
- int i, ep_num;
- char *txfsiz;
dev_dbg(hsotg->dev, "Core Global Registers\n");
addr = hsotg->regs + GOTGCTL;
@@ -1892,23 +1844,6 @@ void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n",
(unsigned long)addr, readl(addr));
- if (hsotg->core_params->en_multiple_tx_fifo <= 0) {
- ep_num = hsotg->hwcfg4 >> GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT &
- GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK >>
- GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
- txfsiz = "DPTXFSIZ";
- } else {
- ep_num = hsotg->hwcfg4 >> GHWCFG4_NUM_IN_EPS_SHIFT &
- GHWCFG4_NUM_IN_EPS_MASK >> GHWCFG4_NUM_IN_EPS_SHIFT;
- txfsiz = "DIENPTXF";
- }
-
- for (i = 0; i < ep_num; i++) {
- addr = hsotg->regs + DPTXFSIZN(i + 1);
- dev_dbg(hsotg->dev, "%s[%d] @0x%08lX : 0x%08X\n", txfsiz, i + 1,
- (unsigned long)addr, readl(addr));
- }
-
addr = hsotg->regs + PCGCTL;
dev_dbg(hsotg->dev, "PCGCTL @0x%08lX : 0x%08X\n",
(unsigned long)addr, readl(addr));
@@ -1984,17 +1919,14 @@ int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
int retval = 0;
- u32 op_mode;
-
- op_mode = hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK;
switch (val) {
case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
- if (op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
+ if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
valid = 0;
break;
case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
- switch (op_mode) {
+ switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
@@ -2018,7 +1950,7 @@ int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for otg_cap parameter. Check HW configuration.\n",
val);
- switch (op_mode) {
+ switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
break;
@@ -2044,8 +1976,7 @@ int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val > 0 && (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) ==
- GHWCFG2_SLAVE_ONLY_ARCH)
+ if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
valid = 0;
if (val < 0)
valid = 0;
@@ -2055,8 +1986,7 @@ int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for dma_enable parameter. Check HW configuration.\n",
val);
- val = (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) !=
- GHWCFG2_SLAVE_ONLY_ARCH;
+ val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
retval = -EINVAL;
}
@@ -2071,7 +2001,7 @@ int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
int retval = 0;
if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
- !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA)))
+ !hsotg->hw_params.dma_desc_enable))
valid = 0;
if (val < 0)
valid = 0;
@@ -2082,7 +2012,7 @@ int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
val);
val = (hsotg->core_params->dma_enable > 0 &&
- (hsotg->hwcfg4 & GHWCFG4_DESC_DMA));
+ hsotg->hw_params.dma_desc_enable);
dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
retval = -EINVAL;
}
@@ -2118,7 +2048,7 @@ int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val > 0 && !(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO))
+ if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
valid = 0;
if (val < 0)
valid = 0;
@@ -2128,7 +2058,7 @@ int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
val);
- val = !!(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
+ val = hsotg->hw_params.enable_dynamic_fifo;
dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
retval = -EINVAL;
}
@@ -2142,7 +2072,7 @@ int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val < 16 || val > readl(hsotg->regs + GRXFSIZ))
+ if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
valid = 0;
if (!valid) {
@@ -2150,7 +2080,7 @@ int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
val);
- val = readl(hsotg->regs + GRXFSIZ);
+ val = hsotg->hw_params.host_rx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
retval = -EINVAL;
}
@@ -2164,7 +2094,7 @@ int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val < 16 || val > (readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff))
+ if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
valid = 0;
if (!valid) {
@@ -2172,7 +2102,7 @@ int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
val);
- val = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
+ val = hsotg->hw_params.host_nperio_tx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
val);
retval = -EINVAL;
@@ -2187,7 +2117,7 @@ int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val < 16 || val > (hsotg->hptxfsiz >> 16))
+ if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
valid = 0;
if (!valid) {
@@ -2195,7 +2125,7 @@ int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
val);
- val = hsotg->hptxfsiz >> 16;
+ val = hsotg->hw_params.host_perio_tx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
val);
retval = -EINVAL;
@@ -2209,11 +2139,8 @@ int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
int retval = 0;
- int width = hsotg->hwcfg3 >> GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT &
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
- if (val < 2047 || val >= (1 << (width + 11)))
+ if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
valid = 0;
if (!valid) {
@@ -2221,7 +2148,7 @@ int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for max_transfer_size. Check HW configuration.\n",
val);
- val = (1 << (width + 11)) - 1;
+ val = hsotg->hw_params.max_transfer_size;
dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
retval = -EINVAL;
}
@@ -2234,11 +2161,8 @@ int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
int retval = 0;
- int width = hsotg->hwcfg3 >> GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT &
- GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK >>
- GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
- if (val < 15 || val > (1 << (width + 4)))
+ if (val < 15 || val > hsotg->hw_params.max_packet_count)
valid = 0;
if (!valid) {
@@ -2246,7 +2170,7 @@ int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for max_packet_count. Check HW configuration.\n",
val);
- val = (1 << (width + 4)) - 1;
+ val = hsotg->hw_params.max_packet_count;
dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
retval = -EINVAL;
}
@@ -2259,10 +2183,8 @@ int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
int retval = 0;
- int num_chan = hsotg->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
- GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT;
- if (val < 1 || val > num_chan + 1)
+ if (val < 1 || val > hsotg->hw_params.host_channels)
valid = 0;
if (!valid) {
@@ -2270,7 +2192,7 @@ int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for host_channels. Check HW configuration.\n",
val);
- val = num_chan + 1;
+ val = hsotg->hw_params.host_channels;
dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
retval = -EINVAL;
}
@@ -2283,8 +2205,7 @@ int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
{
#ifndef NO_FS_PHY_HW_CHECKS
int valid = 0;
- u32 hs_phy_type;
- u32 fs_phy_type;
+ u32 hs_phy_type, fs_phy_type;
#endif
int retval = 0;
@@ -2298,16 +2219,15 @@ int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
#ifndef NO_FS_PHY_HW_CHECKS
valid = 0;
#else
- val = 0;
+ val = DWC2_PHY_TYPE_PARAM_FS;
dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
retval = -EINVAL;
#endif
}
#ifndef NO_FS_PHY_HW_CHECKS
- hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
- fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
-
+ hs_phy_type = hsotg->hw_params.hs_phy_type;
+ fs_phy_type = hsotg->hw_params.fs_phy_type;
if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
(hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
@@ -2325,7 +2245,7 @@ int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for phy_type. Check HW configuration.\n",
val);
- val = 0;
+ val = DWC2_PHY_TYPE_PARAM_FS;
if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
@@ -2360,8 +2280,8 @@ int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
valid = 0;
}
- if (val == 0 && dwc2_get_param_phy_type(hsotg) ==
- DWC2_PHY_TYPE_PARAM_FS)
+ if (val == DWC2_SPEED_PARAM_HIGH &&
+ dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
valid = 0;
if (!valid) {
@@ -2370,7 +2290,7 @@ int dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
"%d invalid for speed parameter. Check HW configuration.\n",
val);
val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
- 1 : 0;
+ DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
retval = -EINVAL;
}
@@ -2456,14 +2376,29 @@ int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
{
+ int valid = 0;
int retval = 0;
- if (DWC2_PARAM_TEST(val, 8, 8) && DWC2_PARAM_TEST(val, 16, 16)) {
+ switch (hsotg->hw_params.utmi_phy_data_width) {
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
+ valid = (val == 8);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
+ valid = (val == 16);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
+ valid = (val == 8 || val == 16);
+ break;
+ }
+
+ if (!valid) {
if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for phy_utmi_width\n");
- dev_err(hsotg->dev, "phy_utmi_width must be 8 or 16\n");
+ dev_err(hsotg->dev,
+ "%d invalid for phy_utmi_width. Check HW configuration.\n",
+ val);
}
- val = 8;
+ val = (hsotg->hw_params.utmi_phy_data_width ==
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
retval = -EINVAL;
}
@@ -2531,7 +2466,7 @@ int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
}
#ifndef NO_FS_PHY_HW_CHECKS
- if (val == 1 && !(hsotg->hwcfg3 & GHWCFG3_I2C))
+ if (val == 1 && !(hsotg->hw_params.i2c_enable))
valid = 0;
if (!valid) {
@@ -2539,7 +2474,7 @@ int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for i2c_enable. Check HW configuration.\n",
val);
- val = !!(hsotg->hwcfg3 & GHWCFG3_I2C);
+ val = hsotg->hw_params.i2c_enable;
dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
retval = -EINVAL;
}
@@ -2564,7 +2499,7 @@ int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
valid = 0;
}
- if (val == 1 && !(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN))
+ if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
valid = 0;
if (!valid) {
@@ -2572,7 +2507,7 @@ int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
val);
- val = !!(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN);
+ val = hsotg->hw_params.en_multiple_tx_fifo;
dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
retval = -EINVAL;
}
@@ -2595,7 +2530,7 @@ int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
valid = 0;
}
- if (val == 1 && hsotg->snpsid < DWC2_CORE_REV_2_92a)
+ if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
valid = 0;
if (!valid) {
@@ -2603,7 +2538,7 @@ int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for parameter reload_ctl. Check HW configuration.\n",
val);
- val = hsotg->snpsid >= DWC2_CORE_REV_2_92a;
+ val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
retval = -EINVAL;
}
@@ -2612,35 +2547,14 @@ int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
return retval;
}
-int dwc2_set_param_ahb_single(struct dwc2_hsotg *hsotg, int val)
+int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
{
- int valid = 1;
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter ahb_single\n", val);
- dev_err(hsotg->dev, "ahb_single must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val > 0 && hsotg->snpsid < DWC2_CORE_REV_2_94a)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for parameter ahb_single. Check HW configuration.\n",
- val);
- val = 0;
- dev_dbg(hsotg->dev, "Setting ahb_single to %d\n", val);
- retval = -EINVAL;
- }
-
- hsotg->core_params->ahb_single = val;
- return retval;
+ if (val != -1)
+ hsotg->core_params->ahbcfg = val;
+ else
+ hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
+ GAHBCFG_HBSTLEN_SHIFT;
+ return 0;
}
int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
@@ -2663,12 +2577,171 @@ int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
return retval;
}
+/**
+ * During device initialization, read various hardware configuration
+ * registers and interpret the contents.
+ */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ unsigned width;
+ u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
+ u32 hptxfsiz, grxfsiz, gnptxfsiz;
+ u32 gusbcfg;
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the GSNPSID register contents. The value should be
+ * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
+ * as in "OTG version 2.xx" or "OTG version 3.xx".
+ */
+ hw->snpsid = readl(hsotg->regs + GSNPSID);
+ if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
+ (hw->snpsid & 0xfffff000) != 0x4f543000) {
+ dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
+ hw->snpsid);
+ return -ENODEV;
+ }
+
+ dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
+ hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
+ hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+
+ hwcfg1 = readl(hsotg->regs + GHWCFG1);
+ hwcfg2 = readl(hsotg->regs + GHWCFG2);
+ hwcfg3 = readl(hsotg->regs + GHWCFG3);
+ hwcfg4 = readl(hsotg->regs + GHWCFG4);
+ gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
+ grxfsiz = readl(hsotg->regs + GRXFSIZ);
+
+ dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
+ dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
+ dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
+ dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+ dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
+
+ /* Force host mode to get HPTXFSIZ exact power on value */
+ gusbcfg = readl(hsotg->regs + GUSBCFG);
+ gusbcfg |= GUSBCFG_FORCEHOSTMODE;
+ writel(gusbcfg, hsotg->regs + GUSBCFG);
+ usleep_range(100000, 150000);
+
+ hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
+ dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
+ gusbcfg = readl(hsotg->regs + GUSBCFG);
+ gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
+ writel(gusbcfg, hsotg->regs + GUSBCFG);
+ usleep_range(100000, 150000);
+
+ /* hwcfg2 */
+ hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
+ GHWCFG2_OP_MODE_SHIFT;
+ hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
+ GHWCFG2_ARCHITECTURE_SHIFT;
+ hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
+ hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
+ GHWCFG2_NUM_HOST_CHAN_SHIFT);
+ hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
+ GHWCFG2_HS_PHY_TYPE_SHIFT;
+ hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
+ GHWCFG2_FS_PHY_TYPE_SHIFT;
+ hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
+ GHWCFG2_NUM_DEV_EP_SHIFT;
+ hw->nperio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->host_perio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->dev_token_q_depth =
+ (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
+ GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
+
+ /* hwcfg3 */
+ width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_transfer_size = (1 << (width + 11)) - 1;
+ width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_packet_count = (1 << (width + 4)) - 1;
+ hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
+ hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
+ GHWCFG3_DFIFO_DEPTH_SHIFT;
+
+ /* hwcfg4 */
+ hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
+ hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
+ GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+ hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
+ hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
+ hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
+
+ /* fifo sizes */
+ hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
+ GRXFSIZ_DEPTH_SHIFT;
+ hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+ hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+
+ dev_dbg(hsotg->dev, "Detected values from hardware:\n");
+ dev_dbg(hsotg->dev, " op_mode=%d\n",
+ hw->op_mode);
+ dev_dbg(hsotg->dev, " arch=%d\n",
+ hw->arch);
+ dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
+ hw->dma_desc_enable);
+ dev_dbg(hsotg->dev, " power_optimized=%d\n",
+ hw->power_optimized);
+ dev_dbg(hsotg->dev, " i2c_enable=%d\n",
+ hw->i2c_enable);
+ dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
+ hw->hs_phy_type);
+ dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
+ hw->fs_phy_type);
+ dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n",
+ hw->utmi_phy_data_width);
+ dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
+ hw->num_dev_ep);
+ dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
+ hw->num_dev_perio_in_ep);
+ dev_dbg(hsotg->dev, " host_channels=%d\n",
+ hw->host_channels);
+ dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
+ hw->max_transfer_size);
+ dev_dbg(hsotg->dev, " max_packet_count=%d\n",
+ hw->max_packet_count);
+ dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
+ hw->nperio_tx_q_depth);
+ dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
+ hw->host_perio_tx_q_depth);
+ dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
+ hw->dev_token_q_depth);
+ dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
+ hw->enable_dynamic_fifo);
+ dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
+ hw->en_multiple_tx_fifo);
+ dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
+ hw->total_fifo_size);
+ dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
+ hw->host_rx_fifo_size);
+ dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
+ hw->host_nperio_tx_fifo_size);
+ dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
+ hw->host_perio_tx_fifo_size);
+ dev_dbg(hsotg->dev, "\n");
+
+ return 0;
+}
+
/*
* This function is called during module intialization to pass module parameters
* for the DWC_otg core. It returns non-0 if any parameters are invalid.
*/
int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- struct dwc2_core_params *params)
+ const struct dwc2_core_params *params)
{
int retval = 0;
@@ -2707,7 +2780,7 @@ int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
retval |= dwc2_set_param_en_multiple_tx_fifo(hsotg,
params->en_multiple_tx_fifo);
retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
- retval |= dwc2_set_param_ahb_single(hsotg, params->ahb_single);
+ retval |= dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
return retval;
diff --git a/drivers/staging/dwc2/core.h b/drivers/staging/dwc2/core.h
index fc075a7..9102f66 100644
--- a/drivers/staging/dwc2/core.h
+++ b/drivers/staging/dwc2/core.h
@@ -68,16 +68,18 @@ enum dwc2_lx_state {
/**
* struct dwc2_core_params - Parameters for configuring the core
*
- * @otg_cap: Specifies the OTG capabilities. The driver will
- * automatically detect the value for this parameter if
- * none is specified.
- * 0 - HNP and SRP capable (default)
+ * @otg_cap: Specifies the OTG capabilities.
+ * 0 - HNP and SRP capable
* 1 - SRP Only capable
- * 2 - No HNP/SRP capable
+ * 2 - No HNP/SRP capable (always available)
+ * Defaults to best available option (0, 1, then 2)
+ * @otg_ver: OTG version supported
+ * 0 - 1.3 (default)
+ * 1 - 2.0
* @dma_enable: Specifies whether to use slave or DMA mode for accessing
* the data FIFOs. The driver will automatically detect the
* value for this parameter if none is specified.
- * 0 - Slave
+ * 0 - Slave (always available)
* 1 - DMA (default, if available)
* @dma_desc_enable: When DMA mode is enabled, specifies whether to use
* address DMA mode or descriptor DMA mode for accessing
@@ -88,39 +90,47 @@ enum dwc2_lx_state {
* @speed: Specifies the maximum speed of operation in host and
* device mode. The actual speed depends on the speed of
* the attached device and the value of phy_type.
- * 0 - High Speed (default)
+ * 0 - High Speed
+ * (default when phy_type is UTMI+ or ULPI)
* 1 - Full Speed
- * @host_support_fs_ls_low_power: Specifies whether low power mode is supported
- * when attached to a Full Speed or Low Speed device in
- * host mode.
- * 0 - Don't support low power mode (default)
- * 1 - Support low power mode
- * @host_ls_low_power_phy_clk: Specifies the PHY clock rate in low power mode
- * when connected to a Low Speed device in host mode. This
- * parameter is applicable only if
- * host_support_fs_ls_low_power is enabled. If phy_type is
- * set to FS then defaults to 6 MHZ otherwise 48 MHZ.
- * 0 - 48 MHz
- * 1 - 6 MHz
+ * (default when phy_type is Full Speed)
* @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters
- * 1 - Allow dynamic FIFO sizing (default)
+ * 1 - Allow dynamic FIFO sizing (default, if available)
+ * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
+ * are enabled
* @host_rx_fifo_size: Number of 4-byte words in the Rx FIFO in host mode when
* dynamic FIFO sizing is enabled
- * 16 to 32768 (default 1024)
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
* @host_nperio_tx_fifo_size: Number of 4-byte words in the non-periodic Tx FIFO
* in host mode when dynamic FIFO sizing is enabled
- * 16 to 32768 (default 1024)
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
* @host_perio_tx_fifo_size: Number of 4-byte words in the periodic Tx FIFO in
* host mode when dynamic FIFO sizing is enabled
- * 16 to 32768 (default 1024)
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
* @max_transfer_size: The maximum transfer size supported, in bytes
- * 2047 to 65,535 (default 65,535)
+ * 2047 to 65,535
+ * Actual maximum value is autodetected and also
+ * the default.
* @max_packet_count: The maximum number of packets in a transfer
- * 15 to 511 (default 511)
+ * 15 to 511
+ * Actual maximum value is autodetected and also
+ * the default.
* @host_channels: The number of host channel registers to use
- * 1 to 16 (default 12)
+ * 1 to 16
+ * Actual maximum value is autodetected and also
+ * the default.
* @phy_type: Specifies the type of PHY interface to use. By default,
* the driver will automatically detect the phy_type.
+ * 0 - Full Speed Phy
+ * 1 - UTMI+ Phy
+ * 2 - ULPI Phy
+ * Defaults to best available option (2, 1, then 0)
* @phy_utmi_width: Specifies the UTMI+ Data Width (in bits). This parameter
* is applicable for a phy_type of UTMI+ or ULPI. (For a
* ULPI phy_type, this parameter indicates the data width
@@ -129,7 +139,7 @@ enum dwc2_lx_state {
* parameter was set to "8 and 16 bits", meaning that the
* core has been configured to work at either data path
* width.
- * 8 or 16 (default 16)
+ * 8 or 16 (default 16 if available)
* @phy_ulpi_ddr: Specifies whether the ULPI operates at double or single
* data rate. This parameter is only applicable if phy_type
* is ULPI.
@@ -139,27 +149,51 @@ enum dwc2_lx_state {
* data bus
* @phy_ulpi_ext_vbus: For a ULPI phy, specifies whether to use the internal or
* external supply to drive the VBus
+ * 0 - Internal supply (default)
+ * 1 - External supply
* @i2c_enable: Specifies whether to use the I2Cinterface for a full
* speed PHY. This parameter is only applicable if phy_type
* is FS.
* 0 - No (default)
* 1 - Yes
- * @ulpi_fs_ls: True to make ULPI phy operate in FS/LS mode only
- * @ts_dline: True to enable Term Select Dline pulsing
- * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
- * are enabled
- * @reload_ctl: True to allow dynamic reloading of HFIR register during
- * runtime
- * @ahb_single: This bit enables SINGLE transfers for remainder data in
- * a transfer for DMA mode of operation.
- * 0 - remainder data will be sent using INCR burst size
- * 1 - remainder data will be sent using SINGLE burst size
- * @otg_ver: OTG version supported
- * 0 - 1.3
- * 1 - 2.0
+ * @ulpi_fs_ls: Make ULPI phy operate in FS/LS mode only
+ * 0 - No (default)
+ * 1 - Yes
+ * @host_support_fs_ls_low_power: Specifies whether low power mode is supported
+ * when attached to a Full Speed or Low Speed device in
+ * host mode.
+ * 0 - Don't support low power mode (default)
+ * 1 - Support low power mode
+ * @host_ls_low_power_phy_clk: Specifies the PHY clock rate in low power mode
+ * when connected to a Low Speed device in host
+ * mode. This parameter is applicable only if
+ * host_support_fs_ls_low_power is enabled.
+ * 0 - 48 MHz
+ * (default when phy_type is UTMI+ or ULPI)
+ * 1 - 6 MHz
+ * (default when phy_type is Full Speed)
+ * @ts_dline: Enable Term Select Dline pulsing
+ * 0 - No (default)
+ * 1 - Yes
+ * @reload_ctl: Allow dynamic reloading of HFIR register during runtime
+ * 0 - No (default for core < 2.92a)
+ * 1 - Yes (default for core >= 2.92a)
+ * @ahbcfg: This field allows the default value of the GAHBCFG
+ * register to be overridden
+ * -1 - GAHBCFG value will be set to 0x06
+ * (INCR4, default)
+ * all others - GAHBCFG value will be overridden with
+ * this value
+ * Not all bits can be controlled like this, the
+ * bits defined by GAHBCFG_CTRL_MASK are controlled
+ * by the driver and are ignored in this
+ * configuration value.
*
* The following parameters may be specified when starting the module. These
- * parameters define how the DWC_otg controller should be configured.
+ * parameters define how the DWC_otg controller should be configured. A
+ * value of -1 (or any other out of range value) for any parameter means
+ * to read the value from hardware (if possible) or use the builtin
+ * default described above.
*/
struct dwc2_core_params {
/*
@@ -189,7 +223,85 @@ struct dwc2_core_params {
int host_ls_low_power_phy_clk;
int ts_dline;
int reload_ctl;
- int ahb_single;
+ int ahbcfg;
+};
+
+/**
+ * struct dwc2_hw_params - Autodetected parameters.
+ *
+ * These parameters are the various parameters read from hardware
+ * registers during initialization. They typically contain the best
+ * supported or maximum value that can be configured in the
+ * corresponding dwc2_core_params value.
+ *
+ * The values that are not in dwc2_core_params are documented below.
+ *
+ * @op_mode Mode of Operation
+ * 0 - HNP- and SRP-Capable OTG (Host & Device)
+ * 1 - SRP-Capable OTG (Host & Device)
+ * 2 - Non-HNP and Non-SRP Capable OTG (Host & Device)
+ * 3 - SRP-Capable Device
+ * 4 - Non-OTG Device
+ * 5 - SRP-Capable Host
+ * 6 - Non-OTG Host
+ * @arch Architecture
+ * 0 - Slave only
+ * 1 - External DMA
+ * 2 - Internal DMA
+ * @power_optimized Are power optimizations enabled?
+ * @num_dev_ep Number of device endpoints available
+ * @num_dev_perio_in_ep Number of device periodic IN endpoints
+ * avaialable
+ * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
+ * Depth
+ * 0 to 30
+ * @host_perio_tx_q_depth
+ * Host Mode Periodic Request Queue Depth
+ * 2, 4 or 8
+ * @nperio_tx_q_depth
+ * Non-Periodic Request Queue Depth
+ * 2, 4 or 8
+ * @hs_phy_type High-speed PHY interface type
+ * 0 - High-speed interface not supported
+ * 1 - UTMI+
+ * 2 - ULPI
+ * 3 - UTMI+ and ULPI
+ * @fs_phy_type Full-speed PHY interface type
+ * 0 - Full speed interface not supported
+ * 1 - Dedicated full speed interface
+ * 2 - FS pins shared with UTMI+ pins
+ * 3 - FS pins shared with ULPI pins
+ * @total_fifo_size: Total internal RAM for FIFOs (bytes)
+ * @utmi_phy_data_width UTMI+ PHY data width
+ * 0 - 8 bits
+ * 1 - 16 bits
+ * 2 - 8 or 16 bits
+ * @snpsid: Value from SNPSID register
+ */
+struct dwc2_hw_params {
+ unsigned op_mode:3;
+ unsigned arch:2;
+ unsigned dma_desc_enable:1;
+ unsigned enable_dynamic_fifo:1;
+ unsigned en_multiple_tx_fifo:1;
+ unsigned host_rx_fifo_size:16;
+ unsigned host_nperio_tx_fifo_size:16;
+ unsigned host_perio_tx_fifo_size:16;
+ unsigned nperio_tx_q_depth:3;
+ unsigned host_perio_tx_q_depth:3;
+ unsigned dev_token_q_depth:5;
+ unsigned max_transfer_size:26;
+ unsigned max_packet_count:11;
+ unsigned host_channels:4;
+ unsigned hs_phy_type:2;
+ unsigned fs_phy_type:2;
+ unsigned i2c_enable:1;
+ unsigned num_dev_ep:4;
+ unsigned num_dev_perio_in_ep:4;
+ unsigned total_fifo_size:16;
+ unsigned power_optimized:1;
+ unsigned utmi_phy_data_width:2;
+ u32 snpsid;
};
/**
@@ -199,15 +311,8 @@ struct dwc2_core_params {
* @dev: The struct device pointer
* @regs: Pointer to controller regs
* @core_params: Parameters that define how the core should be configured
- * @hwcfg1: Hardware Configuration - stored here for convenience
- * @hwcfg2: Hardware Configuration - stored here for convenience
- * @hwcfg3: Hardware Configuration - stored here for convenience
- * @hwcfg4: Hardware Configuration - stored here for convenience
- * @hptxfsiz: Hardware Configuration - stored here for convenience
- * @snpsid: Value from SNPSID register
- * @total_fifo_size: Total internal RAM for FIFOs (bytes)
- * @rx_fifo_size: Size of Rx FIFO (bytes)
- * @nperio_tx_fifo_size: Size of Non-periodic Tx FIFO (Bytes)
+ * @hw_params: Parameters that were autodetected from the
+ * hardware registers
* @op_state: The operational State, during transitions (a_host=>
* a_peripheral and b_device=>b_host) this may not match
* the core, but allows the software to determine
@@ -295,16 +400,10 @@ struct dwc2_core_params {
struct dwc2_hsotg {
struct device *dev;
void __iomem *regs;
+ /** Params detected from hardware */
+ struct dwc2_hw_params hw_params;
+ /** Params to actually use */
struct dwc2_core_params *core_params;
- u32 hwcfg1;
- u32 hwcfg2;
- u32 hwcfg3;
- u32 hwcfg4;
- u32 hptxfsiz;
- u32 snpsid;
- u16 total_fifo_size;
- u16 rx_fifo_size;
- u16 nperio_tx_fifo_size;
enum usb_otg_state op_state;
unsigned int queuing_high_bandwidth:1;
@@ -643,7 +742,7 @@ extern int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
extern int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_ahb_single(struct dwc2_hsotg *hsotg, int val);
+extern int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
extern int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
diff --git a/drivers/staging/dwc2/core_intr.c b/drivers/staging/dwc2/core_intr.c
index 4c9ad14..07cfa2f 100644
--- a/drivers/staging/dwc2/core_intr.c
+++ b/drivers/staging/dwc2/core_intr.c
@@ -166,7 +166,7 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
* WA for 3.00a- HW is not setting cur_mode, even sometimes
* this does not help
*/
- if (hsotg->snpsid >= DWC2_CORE_REV_3_00a)
+ if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a)
udelay(100);
if (gotgctl & GOTGCTL_HSTNEGSCS) {
if (dwc2_is_host_mode(hsotg)) {
@@ -380,7 +380,7 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev,
"DSTS.Suspend Status=%d HWCFG4.Power Optimize=%d\n",
!!(dsts & DSTS_SUSPSTS),
- !!(hsotg->hwcfg4 & GHWCFG4_POWER_OPTIMIZ));
+ hsotg->hw_params.power_optimized);
} else {
if (hsotg->op_state == OTG_STATE_A_PERIPHERAL) {
dev_dbg(hsotg->dev, "a_peripheral->a_host\n");
@@ -403,8 +403,7 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
#define GINTMSK_COMMON (GINTSTS_WKUPINT | GINTSTS_SESSREQINT | \
GINTSTS_CONIDSTSCHNG | GINTSTS_OTGINT | \
GINTSTS_MODEMIS | GINTSTS_DISCONNINT | \
- GINTSTS_USBSUSP | GINTSTS_RESTOREDONE | \
- GINTSTS_PRTINT)
+ GINTSTS_USBSUSP | GINTSTS_PRTINT)
/*
* This function returns the Core Interrupt register
@@ -450,7 +449,7 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
{
struct dwc2_hsotg *hsotg = dev;
u32 gintsts;
- int retval = 0;
+ irqreturn_t retval = IRQ_NONE;
if (dwc2_check_core_status(hsotg) < 0) {
dev_warn(hsotg->dev, "Controller is disconnected\n");
@@ -461,7 +460,7 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
gintsts = dwc2_read_common_intr(hsotg);
if (gintsts & ~GINTSTS_PRTINT)
- retval = 1;
+ retval = IRQ_HANDLED;
if (gintsts & GINTSTS_MODEMIS)
dwc2_handle_mode_mismatch_intr(hsotg);
@@ -478,12 +477,6 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
if (gintsts & GINTSTS_USBSUSP)
dwc2_handle_usb_suspend_intr(hsotg);
- if (gintsts & GINTSTS_RESTOREDONE) {
- gintsts = GINTSTS_RESTOREDONE;
- writel(gintsts, hsotg->regs + GINTSTS);
- dev_dbg(hsotg->dev, " --Restore done interrupt received--\n");
- }
-
if (gintsts & GINTSTS_PRTINT) {
/*
* The port interrupt occurs while in device mode with HPRT0
@@ -500,6 +493,6 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
spin_unlock(&hsotg->lock);
out:
- return IRQ_RETVAL(retval);
+ return retval;
}
EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
diff --git a/drivers/staging/dwc2/hcd.c b/drivers/staging/dwc2/hcd.c
index 8551cce..da0d35c 100644
--- a/drivers/staging/dwc2/hcd.c
+++ b/drivers/staging/dwc2/hcd.c
@@ -134,11 +134,8 @@ static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
qtd_list_entry) {
- if (qtd->urb != NULL) {
- dwc2_host_complete(hsotg, qtd->urb->priv,
- qtd->urb, -ETIMEDOUT);
- dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
- }
+ dwc2_host_complete(hsotg, qtd, -ETIMEDOUT);
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
}
}
}
@@ -421,6 +418,8 @@ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
return -EINVAL;
}
+ urb->priv = NULL;
+
if (urb_qtd->in_process && qh->channel) {
dwc2_dump_channel_info(hsotg, qh->channel);
@@ -1006,10 +1005,10 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
tx_status = readl(hsotg->regs + HPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
if (dbg_perio()) {
dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n",
@@ -1021,7 +1020,9 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
qh_ptr = hsotg->periodic_sched_assigned.next;
while (qh_ptr != &hsotg->periodic_sched_assigned) {
tx_status = readl(hsotg->regs + HPTXSTS);
- if ((tx_status & TXSTS_QSPCAVAIL_MASK) == 0) {
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ if (qspcavail == 0) {
no_queue_space = 1;
break;
}
@@ -1047,8 +1048,8 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
qh->channel->multi_count > 1)
hsotg->queuing_high_bandwidth = 1;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
if (status < 0) {
no_fifo_space = 1;
@@ -1079,10 +1080,10 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
if (hsotg->core_params->dma_enable <= 0) {
tx_status = readl(hsotg->regs + HPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
if (dbg_perio()) {
dev_vdbg(hsotg->dev,
" P Tx Req Queue Space Avail (after queue): %d\n",
@@ -1144,10 +1145,10 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
tx_status = readl(hsotg->regs + GNPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n",
qspcavail);
dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n",
@@ -1167,8 +1168,8 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
*/
do {
tx_status = readl(hsotg->regs + GNPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) {
no_queue_space = 1;
break;
@@ -1183,8 +1184,8 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
if (qh->tt_buffer_dirty)
goto next;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
if (status > 0) {
@@ -1204,10 +1205,10 @@ next:
if (hsotg->core_params->dma_enable <= 0) {
tx_status = readl(hsotg->regs + GNPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
dev_vdbg(hsotg->dev,
" NP Tx Req Queue Space Avail (after queue): %d\n",
qspcavail);
@@ -1563,9 +1564,9 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
break;
case GetPortStatus:
- dev_dbg(hsotg->dev,
- "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex,
- hsotg->flags.d32);
+ dev_vdbg(hsotg->dev,
+ "GetPortStatus wIndex=0x%04x flags=0x%08x\n", windex,
+ hsotg->flags.d32);
if (!windex || windex > 1)
goto error;
@@ -1598,7 +1599,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
}
hprt0 = readl(hsotg->regs + HPRT0);
- dev_dbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0);
+ dev_vdbg(hsotg->dev, " HPRT0: 0x%08x\n", hprt0);
if (hprt0 & HPRT0_CONNSTS)
port_status |= USB_PORT_STAT_CONNECTION;
@@ -1613,7 +1614,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
if (hprt0 & HPRT0_PWR)
port_status |= USB_PORT_STAT_POWER;
- speed = hprt0 & HPRT0_SPD_MASK;
+ speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (speed == HPRT0_SPD_HIGH_SPEED)
port_status |= USB_PORT_STAT_HIGH_SPEED;
else if (speed == HPRT0_SPD_LOW_SPEED)
@@ -1623,7 +1624,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
port_status |= USB_PORT_STAT_TEST;
/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
- dev_dbg(hsotg->dev, "port_status=%08x\n", port_status);
+ dev_vdbg(hsotg->dev, "port_status=%08x\n", port_status);
*(__le32 *)buf = cpu_to_le32(port_status);
break;
@@ -1762,11 +1763,9 @@ int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
#ifdef DWC2_DEBUG_SOF
dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
- hfnum >> HFNUM_FRNUM_SHIFT &
- HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT);
+ (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT);
#endif
- return hfnum >> HFNUM_FRNUM_SHIFT &
- HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
+ return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
}
int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
@@ -1917,18 +1916,14 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
np_tx_status = readl(hsotg->regs + GNPTXSTS);
dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n",
- np_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
+ (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n",
- np_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
+ (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
p_tx_status = readl(hsotg->regs + HPTXSTS);
dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n",
- p_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
+ (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n",
- p_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
+ (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
dwc2_hcd_dump_frrem(hsotg);
dwc2_dump_global_registers(hsotg);
dwc2_dump_host_registers(hsotg);
@@ -2088,23 +2083,29 @@ static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
*
* Must be called with interrupt disabled and spinlock held
*/
-void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
- struct dwc2_hcd_urb *dwc2_urb, int status)
+void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ int status)
{
- struct urb *urb = context;
+ struct urb *urb;
int i;
- if (!urb) {
- dev_dbg(hsotg->dev, "## %s: context is NULL ##\n", __func__);
+ if (!qtd) {
+ dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
return;
}
- if (!dwc2_urb) {
- dev_dbg(hsotg->dev, "## %s: dwc2_urb is NULL ##\n", __func__);
+ if (!qtd->urb) {
+ dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
return;
}
- urb->actual_length = dwc2_hcd_urb_get_actual_length(dwc2_urb);
+ urb = qtd->urb->priv;
+ if (!urb) {
+ dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
+ return;
+ }
+
+ urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb);
if (dbg_urb(urb))
dev_vdbg(hsotg->dev,
@@ -2121,18 +2122,17 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
}
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- urb->error_count = dwc2_hcd_urb_get_error_count(dwc2_urb);
+ urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
for (i = 0; i < urb->number_of_packets; ++i) {
urb->iso_frame_desc[i].actual_length =
dwc2_hcd_urb_get_iso_desc_actual_length(
- dwc2_urb, i);
+ qtd->urb, i);
urb->iso_frame_desc[i].status =
- dwc2_hcd_urb_get_iso_desc_status(dwc2_urb, i);
+ dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i);
}
}
urb->status = status;
- urb->hcpriv = NULL;
if (!status) {
if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
urb->actual_length < urb->transfer_buffer_length)
@@ -2149,7 +2149,10 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
urb);
}
- kfree(dwc2_urb);
+ usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb);
+ urb->hcpriv = NULL;
+ kfree(qtd->urb);
+ qtd->urb = NULL;
spin_unlock(&hsotg->lock);
usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
@@ -2337,8 +2340,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
struct usb_host_endpoint *ep = urb->ep;
struct dwc2_hcd_urb *dwc2_urb;
int i;
+ int retval;
int alloc_bandwidth = 0;
- int retval = 0;
u8 ep_type = 0;
u32 tflags = 0;
void *buf;
@@ -2389,14 +2392,15 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
!(usb_pipein(urb->pipe))));
buf = urb->transfer_buffer;
+
if (hcd->self.uses_dma) {
- /*
- * Calculate virtual address from physical address, because
- * some class driver may not fill transfer_buffer.
- * In Buffer DMA mode virtual address is used, when handling
- * non-DWORD aligned buffers.
- */
- buf = bus_to_virt(urb->transfer_dma);
+ if (!buf && (urb->transfer_dma & 3)) {
+ dev_err(hsotg->dev,
+ "%s: unaligned transfer with no transfer_buffer",
+ __func__);
+ retval = -EINVAL;
+ goto fail1;
+ }
}
if (!(urb->transfer_flags & URB_NO_INTERRUPT))
@@ -2420,21 +2424,36 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
urb->iso_frame_desc[i].length);
urb->hcpriv = dwc2_urb;
- retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv,
- mem_flags);
- if (retval) {
- urb->hcpriv = NULL;
- kfree(dwc2_urb);
- } else {
- if (alloc_bandwidth) {
- spin_lock_irqsave(&hsotg->lock, flags);
- dwc2_allocate_bus_bandwidth(hcd,
- dwc2_hcd_get_ep_bandwidth(hsotg, ep),
- urb);
- spin_unlock_irqrestore(&hsotg->lock, flags);
- }
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ if (retval)
+ goto fail1;
+
+ retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags);
+ if (retval)
+ goto fail2;
+
+ if (alloc_bandwidth) {
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_allocate_bus_bandwidth(hcd,
+ dwc2_hcd_get_ep_bandwidth(hsotg, ep),
+ urb);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
}
+ return 0;
+
+fail2:
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_urb->priv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+fail1:
+ urb->hcpriv = NULL;
+ kfree(dwc2_urb);
+
return retval;
}
@@ -2445,7 +2464,7 @@ static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- int rc = 0;
+ int rc;
unsigned long flags;
dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n");
@@ -2453,6 +2472,10 @@ static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
spin_lock_irqsave(&hsotg->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto out;
+
if (!urb->hcpriv) {
dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n");
goto out;
@@ -2460,6 +2483,8 @@ static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
kfree(urb->hcpriv);
urb->hcpriv = NULL;
@@ -2533,9 +2558,8 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
static irqreturn_t _dwc2_hcd_irq(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- int retval = dwc2_hcd_intr(hsotg);
- return IRQ_RETVAL(retval);
+ return dwc2_handle_hcd_intr(hsotg);
}
/*
@@ -2654,7 +2678,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
writel(ahbcfg, hsotg->regs + GAHBCFG);
writel(0, hsotg->regs + GINTMSK);
- if (hsotg->snpsid >= DWC2_CORE_REV_3_00a) {
+ if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) {
dctl = readl(hsotg->regs + DCTL);
dctl |= DCTL_SFTDISCON;
writel(dctl, hsotg->regs + DCTL);
@@ -2691,7 +2715,7 @@ void dwc2_set_all_params(struct dwc2_core_params *params, int value)
int i;
for (i = 0; i < size; i++)
- p[i] = -1;
+ p[i] = value;
}
EXPORT_SYMBOL_GPL(dwc2_set_all_params);
@@ -2702,83 +2726,26 @@ EXPORT_SYMBOL_GPL(dwc2_set_all_params);
* a negative error on failure.
*/
int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
- struct dwc2_core_params *params)
+ const struct dwc2_core_params *params)
{
struct usb_hcd *hcd;
struct dwc2_host_chan *channel;
- u32 snpsid, gusbcfg, hcfg;
+ u32 hcfg;
int i, num_channels;
- int retval = -ENOMEM;
+ int retval;
dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
- /*
- * Attempt to ensure this device is really a DWC_otg Controller.
- * Read and verify the GSNPSID register contents. The value should be
- * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
- * as in "OTG version 2.xx" or "OTG version 3.xx".
- */
- snpsid = readl(hsotg->regs + GSNPSID);
- if ((snpsid & 0xfffff000) != 0x4f542000 &&
- (snpsid & 0xfffff000) != 0x4f543000) {
- dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", snpsid);
- retval = -ENODEV;
- goto error1;
- }
+ /* Detect config values from hardware */
+ retval = dwc2_get_hwparams(hsotg);
- /*
- * Store the contents of the hardware configuration registers here for
- * easy access later
- */
- hsotg->hwcfg1 = readl(hsotg->regs + GHWCFG1);
- hsotg->hwcfg2 = readl(hsotg->regs + GHWCFG2);
- hsotg->hwcfg3 = readl(hsotg->regs + GHWCFG3);
- hsotg->hwcfg4 = readl(hsotg->regs + GHWCFG4);
-
- dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hsotg->hwcfg1);
- dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hsotg->hwcfg2);
- dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hsotg->hwcfg3);
- dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hsotg->hwcfg4);
-
- /* Force host mode to get HPTXFSIZ exact power on value */
- gusbcfg = readl(hsotg->regs + GUSBCFG);
- gusbcfg |= GUSBCFG_FORCEHOSTMODE;
- writel(gusbcfg, hsotg->regs + GUSBCFG);
- usleep_range(100000, 150000);
-
- hsotg->hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
- dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hsotg->hptxfsiz);
- gusbcfg = readl(hsotg->regs + GUSBCFG);
- gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
- writel(gusbcfg, hsotg->regs + GUSBCFG);
- usleep_range(100000, 150000);
+ if (retval)
+ return retval;
+
+ retval = -ENOMEM;
hcfg = readl(hsotg->regs + HCFG);
dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
- dev_dbg(hsotg->dev, "op_mode=%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_OP_MODE_SHIFT &
- GHWCFG2_OP_MODE_MASK >> GHWCFG2_OP_MODE_SHIFT);
- dev_dbg(hsotg->dev, "arch=%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_ARCHITECTURE_SHIFT &
- GHWCFG2_ARCHITECTURE_MASK >> GHWCFG2_ARCHITECTURE_SHIFT);
- dev_dbg(hsotg->dev, "num_dev_ep=%d\n",
- hsotg->hwcfg2 >> GHWCFG2_NUM_DEV_EP_SHIFT &
- GHWCFG2_NUM_DEV_EP_MASK >> GHWCFG2_NUM_DEV_EP_SHIFT);
- dev_dbg(hsotg->dev, "max_host_chan=%d\n",
- hsotg->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
- GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT);
- dev_dbg(hsotg->dev, "nonperio_tx_q_depth=0x%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT &
- GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK >>
- GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT);
- dev_dbg(hsotg->dev, "host_perio_tx_q_depth=0x%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT &
- GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK >>
- GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT);
- dev_dbg(hsotg->dev, "dev_token_q_depth=0x%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT &
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
hsotg->frame_num_array = kzalloc(sizeof(*hsotg->frame_num_array) *
@@ -2802,22 +2769,30 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
/* Validate parameter values */
dwc2_set_parameters(hsotg, params);
+ /* Check if the bus driver or platform code has setup a dma_mask */
+ if (hsotg->core_params->dma_enable > 0 &&
+ hsotg->dev->dma_mask == NULL) {
+ dev_warn(hsotg->dev,
+ "dma_mask not set, disabling DMA\n");
+ hsotg->core_params->dma_enable = 0;
+ hsotg->core_params->dma_desc_enable = 0;
+ }
+
/* Set device flags indicating whether the HCD supports DMA */
if (hsotg->core_params->dma_enable > 0) {
if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
dev_warn(hsotg->dev, "can't set DMA mask\n");
- if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0)
- dev_warn(hsotg->dev,
- "can't enable workaround for >2GB RAM\n");
- } else {
- dma_set_mask(hsotg->dev, 0);
- dma_set_coherent_mask(hsotg->dev, 0);
+ if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
+ dev_warn(hsotg->dev, "can't set coherent DMA mask\n");
}
hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev));
if (!hcd)
goto error1;
+ if (hsotg->core_params->dma_enable <= 0)
+ hcd->self.uses_dma = 0;
+
hcd->has_tt = 1;
spin_lock_init(&hsotg->lock);
@@ -2844,11 +2819,6 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
}
INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
- hsotg->snpsid = readl(hsotg->regs + GSNPSID);
- dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x\n",
- hsotg->snpsid >> 12 & 0xf, hsotg->snpsid >> 8 & 0xf,
- hsotg->snpsid >> 4 & 0xf, hsotg->snpsid & 0xf);
-
setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
(unsigned long)hsotg);
@@ -2919,12 +2889,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
* allocates the DMA buffer pool, registers the USB bus, requests the
* IRQ line, and calls hcd_start method.
*/
- retval = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval < 0)
goto error3;
- dwc2_dump_global_registers(hsotg);
- dwc2_dump_host_registers(hsotg);
dwc2_hcd_dump_state(hsotg);
dwc2_enable_global_interrupts(hsotg);
diff --git a/drivers/staging/dwc2/hcd.h b/drivers/staging/dwc2/hcd.h
index d071f1a..cc0a117 100644
--- a/drivers/staging/dwc2/hcd.h
+++ b/drivers/staging/dwc2/hcd.h
@@ -122,11 +122,11 @@ struct dwc2_host_chan {
unsigned ep_type:2;
unsigned max_packet:11;
unsigned data_pid_start:2;
-#define DWC2_HC_PID_DATA0 (TSIZ_SC_MC_PID_DATA0 >> TSIZ_SC_MC_PID_SHIFT)
-#define DWC2_HC_PID_DATA2 (TSIZ_SC_MC_PID_DATA2 >> TSIZ_SC_MC_PID_SHIFT)
-#define DWC2_HC_PID_DATA1 (TSIZ_SC_MC_PID_DATA1 >> TSIZ_SC_MC_PID_SHIFT)
-#define DWC2_HC_PID_MDATA (TSIZ_SC_MC_PID_MDATA >> TSIZ_SC_MC_PID_SHIFT)
-#define DWC2_HC_PID_SETUP (TSIZ_SC_MC_PID_SETUP >> TSIZ_SC_MC_PID_SHIFT)
+#define DWC2_HC_PID_DATA0 TSIZ_SC_MC_PID_DATA0
+#define DWC2_HC_PID_DATA2 TSIZ_SC_MC_PID_DATA2
+#define DWC2_HC_PID_DATA1 TSIZ_SC_MC_PID_DATA1
+#define DWC2_HC_PID_MDATA TSIZ_SC_MC_PID_MDATA
+#define DWC2_HC_PID_SETUP TSIZ_SC_MC_PID_SETUP
unsigned multi_count:2;
@@ -146,10 +146,10 @@ struct dwc2_host_chan {
u8 hub_addr;
u8 hub_port;
u8 xact_pos;
-#define DWC2_HCSPLT_XACTPOS_MID (HCSPLT_XACTPOS_MID >> HCSPLT_XACTPOS_SHIFT)
-#define DWC2_HCSPLT_XACTPOS_END (HCSPLT_XACTPOS_END >> HCSPLT_XACTPOS_SHIFT)
-#define DWC2_HCSPLT_XACTPOS_BEGIN (HCSPLT_XACTPOS_BEGIN >> HCSPLT_XACTPOS_SHIFT)
-#define DWC2_HCSPLT_XACTPOS_ALL (HCSPLT_XACTPOS_ALL >> HCSPLT_XACTPOS_SHIFT)
+#define DWC2_HCSPLT_XACTPOS_MID HCSPLT_XACTPOS_MID
+#define DWC2_HCSPLT_XACTPOS_END HCSPLT_XACTPOS_END
+#define DWC2_HCSPLT_XACTPOS_BEGIN HCSPLT_XACTPOS_BEGIN
+#define DWC2_HCSPLT_XACTPOS_ALL HCSPLT_XACTPOS_ALL
u8 requests;
u8 schinfo;
@@ -232,16 +232,19 @@ enum dwc2_transaction_type {
* - DWC2_HC_PID_DATA1
* @ping_state: Ping state
* @do_split: Full/low speed endpoint on high-speed hub requires split
- * @qtd_list: List of QTDs for this QH
- * @channel: Host channel currently processing transfers for this QH
+ * @td_first: Index of first activated isochronous transfer descriptor
+ * @td_last: Index of last activated isochronous transfer descriptor
* @usecs: Bandwidth in microseconds per (micro)frame
* @interval: Interval between transfers in (micro)frames
- * @sched_frame: (micro)frame to initialize a periodic transfer.
+ * @sched_frame: (Micro)frame to initialize a periodic transfer.
* The transfer executes in the following (micro)frame.
* @start_split_frame: (Micro)frame at which last start split was initialized
+ * @ntd: Actual number of transfer descriptors in a list
* @dw_align_buf: Used instead of original buffer if its physical address
* is not dword-aligned
* @dw_align_buf_dma: DMA address for align_buf
+ * @qtd_list: List of QTDs for this QH
+ * @channel: Host channel currently processing transfers for this QH
* @qh_list_entry: Entry for QH in either the periodic or non-periodic
* schedule
* @desc_list: List of transfer descriptors
@@ -249,9 +252,6 @@ enum dwc2_transaction_type {
* @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
* descriptor and indicates original XferSize value for the
* descriptor
- * @ntd: Actual number of transfer descriptors in a list
- * @td_first: Index of first activated isochronous transfer descriptor
- * @td_last: Index of last activated isochronous transfer descriptor
* @tt_buffer_dirty True if clear_tt_buffer_complete is pending
*
* A Queue Head (QH) holds the static characteristics of an endpoint and
@@ -266,21 +266,21 @@ struct dwc2_qh {
u8 data_toggle;
u8 ping_state;
u8 do_split;
- struct list_head qtd_list;
- struct dwc2_host_chan *channel;
+ u8 td_first;
+ u8 td_last;
u16 usecs;
u16 interval;
u16 sched_frame;
u16 start_split_frame;
+ u16 ntd;
u8 *dw_align_buf;
dma_addr_t dw_align_buf_dma;
+ struct list_head qtd_list;
+ struct dwc2_host_chan *channel;
struct list_head qh_list_entry;
struct dwc2_hcd_dma_desc *desc_list;
dma_addr_t desc_list_dma;
u32 *n_bytes;
- u16 ntd;
- u8 td_first;
- u8 td_last;
unsigned tt_buffer_dirty:1;
};
@@ -448,11 +448,12 @@ static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
}
extern int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
- struct dwc2_core_params *params);
+ const struct dwc2_core_params *params);
extern void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
extern int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
- struct dwc2_core_params *params);
+ const struct dwc2_core_params *params);
extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
+extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
/* Transaction Execution Functions */
extern enum dwc2_transaction_type dwc2_hcd_select_transactions(
@@ -646,14 +647,14 @@ extern void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
/* HCD Core API */
/**
- * dwc2_hcd_intr() - Called on every hardware interrupt
+ * dwc2_handle_hcd_intr() - Called on every hardware interrupt
*
* @hsotg: The DWC2 HCD
*
- * Returns non zero if interrupt is handled
- * Return 0 if interrupt is not handled
+ * Returns IRQ_HANDLED if interrupt is handled
+ * Return IRQ_NONE if interrupt is not handled
*/
-extern int dwc2_hcd_intr(struct dwc2_hsotg *hsotg);
+extern irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg);
/**
* dwc2_hcd_stop() - Halts the DWC_otg host mode operation
@@ -716,8 +717,8 @@ extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
int *hub_addr, int *hub_port);
extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
-extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
- struct dwc2_hcd_urb *dwc2_urb, int status);
+extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ int status);
#ifdef DEBUG
/*
diff --git a/drivers/staging/dwc2/hcd_ddma.c b/drivers/staging/dwc2/hcd_ddma.c
index 5c0fd27..69070f4 100644
--- a/drivers/staging/dwc2/hcd_ddma.c
+++ b/drivers/staging/dwc2/hcd_ddma.c
@@ -800,11 +800,14 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
u16 remain = 0;
int rc = 0;
+ if (!qtd->urb)
+ return -EINVAL;
+
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
if (chan->ep_is_in)
- remain = dma_desc->status >> HOST_DMA_ISOC_NBYTES_SHIFT &
- HOST_DMA_ISOC_NBYTES_MASK >> HOST_DMA_ISOC_NBYTES_SHIFT;
+ remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
+ HOST_DMA_ISOC_NBYTES_SHIFT;
if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
/*
@@ -826,7 +829,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
* urb->status is not used for isoc transfers here. The
* individual frame_desc status are used instead.
*/
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb, 0);
+ dwc2_host_complete(hsotg, qtd, 0);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
/*
@@ -884,13 +887,16 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
qtd_list_entry) {
- for (idx = 0; idx < qtd->urb->packet_count; idx++) {
- frame_desc = &qtd->urb->iso_descs[idx];
- frame_desc->status = err;
+ if (qtd->urb) {
+ for (idx = 0; idx < qtd->urb->packet_count;
+ idx++) {
+ frame_desc = &qtd->urb->iso_descs[idx];
+ frame_desc->status = err;
+ }
+
+ dwc2_host_complete(hsotg, qtd, err);
}
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
- err);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
}
@@ -929,8 +935,8 @@ static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
u16 remain = 0;
if (chan->ep_is_in)
- remain = dma_desc->status >> HOST_DMA_NBYTES_SHIFT &
- HOST_DMA_NBYTES_MASK >> HOST_DMA_NBYTES_SHIFT;
+ remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
+ HOST_DMA_NBYTES_SHIFT;
dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
@@ -1015,6 +1021,9 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "%s()\n", __func__);
+ if (!urb)
+ return -EINVAL;
+
dma_desc = &qh->desc_list[desc_num];
n_bytes = qh->n_bytes[desc_num];
dev_vdbg(hsotg->dev,
@@ -1024,7 +1033,7 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
halt_status, n_bytes,
xfer_done);
if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
- dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
+ dwc2_host_complete(hsotg, qtd, urb->status);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
failed, *xfer_done, urb->status);
diff --git a/drivers/staging/dwc2/hcd_intr.c b/drivers/staging/dwc2/hcd_intr.c
index e24062f..e143f69 100644
--- a/drivers/staging/dwc2/hcd_intr.c
+++ b/drivers/staging/dwc2/hcd_intr.c
@@ -89,15 +89,20 @@ static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
{
struct urb *usb_urb;
- if (!chan->qh || !qtd->urb)
+ if (!chan->qh)
+ return;
+
+ if (chan->qh->dev_speed == USB_SPEED_HIGH)
+ return;
+
+ if (!qtd->urb)
return;
usb_urb = qtd->urb->priv;
- if (!usb_urb || !usb_urb->dev)
+ if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
return;
- if (chan->qh->dev_speed != USB_SPEED_HIGH &&
- qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
+ if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
chan->qh->tt_buffer_dirty = 1;
if (usb_hub_clear_tt_buffer(usb_urb))
/* Clear failed; let's hope things work anyway */
@@ -115,16 +120,13 @@ static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
{
struct list_head *qh_entry;
struct dwc2_qh *qh;
- u32 hfnum;
enum dwc2_transaction_type tr_type;
#ifdef DEBUG_SOF
dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
#endif
- hfnum = readl(hsotg->regs + HFNUM);
- hsotg->frame_number = hfnum >> HFNUM_FRNUM_SHIFT &
- HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
+ hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
dwc2_track_missed_sofs(hsotg);
@@ -163,19 +165,16 @@ static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
grxsts = readl(hsotg->regs + GRXSTSP);
- chnum = grxsts >> GRXSTS_HCHNUM_SHIFT &
- GRXSTS_HCHNUM_MASK >> GRXSTS_HCHNUM_SHIFT;
+ chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
chan = hsotg->hc_ptr_array[chnum];
if (!chan) {
dev_err(hsotg->dev, "Unable to get corresponding channel\n");
return;
}
- bcnt = grxsts >> GRXSTS_BYTECNT_SHIFT &
- GRXSTS_BYTECNT_MASK >> GRXSTS_BYTECNT_SHIFT;
- dpid = grxsts >> GRXSTS_DPID_SHIFT &
- GRXSTS_DPID_MASK >> GRXSTS_DPID_SHIFT;
- pktsts = grxsts & GRXSTS_PKTSTS_MASK;
+ bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
+ dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
+ pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
/* Packet Status */
if (dbg_perio()) {
@@ -183,9 +182,7 @@ static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, " Count = %d\n", bcnt);
dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n", dpid,
chan->data_pid_start);
- dev_vdbg(hsotg->dev, " PStatus = %d\n",
- pktsts >> GRXSTS_PKTSTS_SHIFT &
- GRXSTS_PKTSTS_MASK >> GRXSTS_PKTSTS_SHIFT);
+ dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts);
}
switch (pktsts) {
@@ -244,6 +241,7 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
u32 usbcfg;
u32 prtspd;
u32 hcfg;
+ u32 fslspclksel;
u32 hfir;
dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
@@ -263,7 +261,7 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
}
usbcfg = readl(hsotg->regs + GUSBCFG);
- prtspd = hprt0 & HPRT0_SPD_MASK;
+ prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
/* Low power */
@@ -275,6 +273,8 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
}
hcfg = readl(hsotg->regs + HCFG);
+ fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
+ HCFG_FSLSPCLKSEL_SHIFT;
if (prtspd == HPRT0_SPD_LOW_SPEED &&
params->host_ls_low_power_phy_clk ==
@@ -282,10 +282,10 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
/* 6 MHZ */
dev_vdbg(hsotg->dev,
"FS_PHY programming HCFG to 6 MHz\n");
- if ((hcfg & HCFG_FSLSPCLKSEL_MASK) !=
- HCFG_FSLSPCLKSEL_6_MHZ) {
+ if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
+ fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
- hcfg |= HCFG_FSLSPCLKSEL_6_MHZ;
+ hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
writel(hcfg, hsotg->regs + HCFG);
do_reset = 1;
}
@@ -293,10 +293,10 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
/* 48 MHZ */
dev_vdbg(hsotg->dev,
"FS_PHY programming HCFG to 48 MHz\n");
- if ((hcfg & HCFG_FSLSPCLKSEL_MASK) !=
- HCFG_FSLSPCLKSEL_48_MHZ) {
+ if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
+ fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
- hcfg |= HCFG_FSLSPCLKSEL_48_MHZ;
+ hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
writel(hcfg, hsotg->regs + HCFG);
do_reset = 1;
}
@@ -409,8 +409,8 @@ static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
if (halt_status == DWC2_HC_XFER_COMPLETE) {
if (chan->ep_is_in) {
- count = hctsiz >> TSIZ_XFERSIZE_SHIFT &
- TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT;
+ count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
+ TSIZ_XFERSIZE_SHIFT;
length = chan->xfer_len - count;
if (short_read != NULL)
*short_read = (count != 0);
@@ -429,8 +429,7 @@ static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
* hctsiz.xfersize field because that reflects the number of
* bytes transferred via the AHB, not the USB).
*/
- count = hctsiz >> TSIZ_PKTCNT_SHIFT &
- TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT;
+ count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
length = (chan->start_pkt_count - count) * chan->max_packet;
}
@@ -465,7 +464,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
/* Non DWORD-aligned buffer case handling */
if (chan->align_buf && xfer_length && chan->ep_is_in) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
DMA_FROM_DEVICE);
memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
@@ -493,8 +492,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
__func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n",
- hctsiz >> TSIZ_XFERSIZE_SHIFT &
- TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
+ (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length);
dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length);
dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read,
@@ -513,7 +511,7 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd)
{
u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
- u32 pid = hctsiz & TSIZ_SC_MC_PID_MASK;
+ u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
if (pid == TSIZ_SC_MC_PID_DATA0)
@@ -560,8 +558,8 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
/* Non DWORD-aligned buffer case handling */
if (chan->align_buf && frame_desc->actual_length &&
chan->ep_is_in) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n",
- __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
+ __func__);
dma_sync_single_for_cpu(hsotg->dev, urb->dma,
urb->length, DMA_FROM_DEVICE);
memcpy(urb->buf + frame_desc->offset +
@@ -594,8 +592,8 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
/* Non DWORD-aligned buffer case handling */
if (chan->align_buf && frame_desc->actual_length &&
chan->ep_is_in) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n",
- __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
+ __func__);
dma_sync_single_for_cpu(hsotg->dev, urb->dma,
urb->length, DMA_FROM_DEVICE);
memcpy(urb->buf + frame_desc->offset +
@@ -626,7 +624,7 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
* urb->status is not used for isoc transfers. The individual
* frame_desc statuses are used instead.
*/
- dwc2_host_complete(hsotg, urb->priv, urb, 0);
+ dwc2_host_complete(hsotg, qtd, 0);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
} else {
halt_status = DWC2_HC_XFER_COMPLETE;
@@ -717,11 +715,7 @@ static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev,
" Complete URB with transaction error\n");
free_qtd = 1;
- if (qtd->urb) {
- qtd->urb->status = -EPROTO;
- dwc2_host_complete(hsotg, qtd->urb->priv,
- qtd->urb, -EPROTO);
- }
+ dwc2_host_complete(hsotg, qtd, -EPROTO);
}
break;
case DWC2_HC_XFER_URB_DEQUEUE:
@@ -734,11 +728,7 @@ static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
free_qtd = 1;
- if (qtd && qtd->urb) {
- qtd->urb->status = -EIO;
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
- -EIO);
- }
+ dwc2_host_complete(hsotg, qtd, -EIO);
break;
case DWC2_HC_XFER_NO_HALT_STATUS:
default:
@@ -941,7 +931,7 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc->actual_length += len;
if (chan->align_buf && len) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
dma_sync_single_for_cpu(hsotg->dev, qtd->urb->dma,
qtd->urb->length, DMA_FROM_DEVICE);
memcpy(qtd->urb->buf + frame_desc->offset +
@@ -960,7 +950,7 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
}
if (qtd->isoc_frame_index == qtd->urb->packet_count) {
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb, 0);
+ dwc2_host_complete(hsotg, qtd, 0);
dwc2_release_channel(hsotg, chan, qtd,
DWC2_HC_XFER_URB_COMPLETE);
} else {
@@ -1043,7 +1033,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, " Control transfer complete\n");
if (urb->status == -EINPROGRESS)
urb->status = 0;
- dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
+ dwc2_host_complete(hsotg, qtd, urb->status);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
break;
}
@@ -1056,7 +1046,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
qtd);
if (urb_xfer_done) {
- dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
+ dwc2_host_complete(hsotg, qtd, urb->status);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
} else {
halt_status = DWC2_HC_XFER_COMPLETE;
@@ -1076,11 +1066,10 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
* interrupt
*/
if (urb_xfer_done) {
- dwc2_host_complete(hsotg, urb->priv, urb,
- urb->status);
- halt_status = DWC2_HC_XFER_URB_COMPLETE;
+ dwc2_host_complete(hsotg, qtd, urb->status);
+ halt_status = DWC2_HC_XFER_URB_COMPLETE;
} else {
- halt_status = DWC2_HC_XFER_COMPLETE;
+ halt_status = DWC2_HC_XFER_COMPLETE;
}
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
@@ -1126,11 +1115,11 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
goto handle_stall_halt;
if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
- dwc2_host_complete(hsotg, urb->priv, urb, -EPIPE);
+ dwc2_host_complete(hsotg, qtd, -EPIPE);
if (pipe_type == USB_ENDPOINT_XFER_BULK ||
pipe_type == USB_ENDPOINT_XFER_INT) {
- dwc2_host_complete(hsotg, urb->priv, urb, -EPIPE);
+ dwc2_host_complete(hsotg, qtd, -EPIPE);
/*
* USB protocol requires resetting the data toggle for bulk
* and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
@@ -1171,7 +1160,7 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
/* Non DWORD-aligned buffer case handling */
if (chan->align_buf && xfer_length && chan->ep_is_in) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
DMA_FROM_DEVICE);
memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
@@ -1188,8 +1177,7 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
chan->start_pkt_count);
dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n",
- hctsiz >> TSIZ_PKTCNT_SHIFT &
- TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
+ (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet);
dev_vdbg(hsotg->dev, " bytes_transferred %d\n",
xfer_length);
@@ -1375,10 +1363,10 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
hsotg->core_params->dma_enable > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
+ qtd->isoc_frame_index++;
if (qtd->urb &&
- ++qtd->isoc_frame_index == qtd->urb->packet_count) {
- dwc2_host_complete(hsotg, qtd->urb->priv,
- qtd->urb, 0);
+ qtd->isoc_frame_index == qtd->urb->packet_count) {
+ dwc2_host_complete(hsotg, qtd, 0);
dwc2_release_channel(hsotg, chan, qtd,
DWC2_HC_XFER_URB_COMPLETE);
} else {
@@ -1448,16 +1436,16 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
chnum);
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
if (hsotg->core_params->dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_BABBLE_ERR);
- goto handle_babble_done;
+ goto disable_int;
}
if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
- if (qtd->urb)
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
- -EOVERFLOW);
+ dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
} else {
enum dwc2_halt_status halt_status;
@@ -1467,8 +1455,7 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
dwc2_halt_channel(hsotg, chan, qtd, halt_status);
}
-handle_babble_done:
- dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+disable_int:
disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
}
@@ -1493,6 +1480,8 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
if (!urb)
goto handle_ahberr_halt;
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
hcchar = readl(hsotg->regs + HCCHAR(chnum));
hcsplt = readl(hsotg->regs + HCSPLT(chnum));
hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
@@ -1560,7 +1549,7 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
goto handle_ahberr_done;
}
- dwc2_host_complete(hsotg, urb->priv, urb, -EIO);
+ dwc2_host_complete(hsotg, qtd, -EIO);
handle_ahberr_halt:
/*
@@ -1570,7 +1559,6 @@ handle_ahberr_halt:
dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
handle_ahberr_done:
- dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
}
@@ -1585,6 +1573,8 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev,
"--Host Channel %d Interrupt: Transaction Error--\n", chnum);
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
if (hsotg->core_params->dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_XACT_ERR);
@@ -1628,7 +1618,6 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
}
handle_xacterr_done:
- dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
}
@@ -1646,6 +1635,8 @@ static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
chnum);
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
@@ -1660,7 +1651,6 @@ static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
break;
}
- dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
}
@@ -1769,7 +1759,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
* For core with OUT NAK enhancement, the flow for high-speed
* CONTROL/BULK OUT is handled a little differently
*/
- if (hsotg->snpsid >= DWC2_CORE_REV_2_71a) {
+ if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
(chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
@@ -2060,14 +2050,14 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
}
/* This function handles interrupts for the HCD */
-int dwc2_hcd_intr(struct dwc2_hsotg *hsotg)
+irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
{
u32 gintsts, dbg_gintsts;
- int retval = 0;
+ irqreturn_t retval = IRQ_NONE;
if (dwc2_check_core_status(hsotg) < 0) {
dev_warn(hsotg->dev, "Controller is disconnected\n");
- return 0;
+ return retval;
}
spin_lock(&hsotg->lock);
@@ -2077,10 +2067,10 @@ int dwc2_hcd_intr(struct dwc2_hsotg *hsotg)
gintsts = dwc2_read_core_intr(hsotg);
if (!gintsts) {
spin_unlock(&hsotg->lock);
- return 0;
+ return retval;
}
- retval = 1;
+ retval = IRQ_HANDLED;
dbg_gintsts = gintsts;
#ifndef DEBUG_SOF
@@ -2102,9 +2092,6 @@ int dwc2_hcd_intr(struct dwc2_hsotg *hsotg)
dwc2_rx_fifo_level_intr(hsotg);
if (gintsts & GINTSTS_NPTXFEMP)
dwc2_np_tx_fifo_empty_intr(hsotg);
- if (gintsts & GINTSTS_I2CINT)
- /* Todo: Implement i2cintr handler */
- writel(GINTSTS_I2CINT, hsotg->regs + GINTSTS);
if (gintsts & GINTSTS_PRTINT)
dwc2_port_intr(hsotg);
if (gintsts & GINTSTS_HCHINT)
diff --git a/drivers/staging/dwc2/hcd_queue.c b/drivers/staging/dwc2/hcd_queue.c
index b36f783..b1980ef 100644
--- a/drivers/staging/dwc2/hcd_queue.c
+++ b/drivers/staging/dwc2/hcd_queue.c
@@ -116,7 +116,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
qh->interval = 8;
#endif
hprt = readl(hsotg->regs + HPRT0);
- prtspd = hprt & HPRT0_SPD_MASK;
+ prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (prtspd == HPRT0_SPD_HIGH_SPEED &&
(dev_speed == USB_SPEED_LOW ||
dev_speed == USB_SPEED_FULL)) {
@@ -197,6 +197,9 @@ static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
{
struct dwc2_qh *qh;
+ if (!urb->priv)
+ return NULL;
+
/* Allocate memory */
qh = kzalloc(sizeof(*qh), mem_flags);
if (!qh)
@@ -638,7 +641,7 @@ int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
struct dwc2_hcd_urb *urb = qtd->urb;
unsigned long flags;
int allocated = 0;
- int retval = 0;
+ int retval;
/*
* Get the QH which holds the QTD-list to insert to. Create QH if it
@@ -652,8 +655,19 @@ int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
}
spin_lock_irqsave(&hsotg->lock, flags);
+
retval = dwc2_hcd_qh_add(hsotg, *qh);
- if (retval && allocated) {
+ if (retval)
+ goto fail;
+
+ qtd->qh = *qh;
+ list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return 0;
+
+fail:
+ if (allocated) {
struct dwc2_qtd *qtd2, *qtd2_tmp;
struct dwc2_qh *qh_tmp = *qh;
@@ -668,8 +682,6 @@ int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hcd_qh_free(hsotg, qh_tmp);
} else {
- qtd->qh = *qh;
- list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
diff --git a/drivers/staging/dwc2/hw.h b/drivers/staging/dwc2/hw.h
index 382a1d7..9c92a3c 100644
--- a/drivers/staging/dwc2/hw.h
+++ b/drivers/staging/dwc2/hw.h
@@ -72,12 +72,16 @@
#define GAHBCFG_DMA_EN (1 << 5)
#define GAHBCFG_HBSTLEN_MASK (0xf << 1)
#define GAHBCFG_HBSTLEN_SHIFT 1
-#define GAHBCFG_HBSTLEN_SINGLE (0 << 1)
-#define GAHBCFG_HBSTLEN_INCR (1 << 1)
-#define GAHBCFG_HBSTLEN_INCR4 (3 << 1)
-#define GAHBCFG_HBSTLEN_INCR8 (5 << 1)
-#define GAHBCFG_HBSTLEN_INCR16 (7 << 1)
+#define GAHBCFG_HBSTLEN_SINGLE 0
+#define GAHBCFG_HBSTLEN_INCR 1
+#define GAHBCFG_HBSTLEN_INCR4 3
+#define GAHBCFG_HBSTLEN_INCR8 5
+#define GAHBCFG_HBSTLEN_INCR16 7
#define GAHBCFG_GLBL_INTR_EN (1 << 0)
+#define GAHBCFG_CTRL_MASK (GAHBCFG_P_TXF_EMP_LVL | \
+ GAHBCFG_NP_TXF_EMP_LVL | \
+ GAHBCFG_DMA_EN | \
+ GAHBCFG_GLBL_INTR_EN)
#define GUSBCFG HSOTG_REG(0x00C)
#define GUSBCFG_FORCEDEVMODE (1 << 30)
@@ -165,15 +169,15 @@
#define GRXSTS_FN_SHIFT 25
#define GRXSTS_PKTSTS_MASK (0xf << 17)
#define GRXSTS_PKTSTS_SHIFT 17
-#define GRXSTS_PKTSTS_GLOBALOUTNAK (1 << 17)
-#define GRXSTS_PKTSTS_OUTRX (2 << 17)
-#define GRXSTS_PKTSTS_HCHIN (2 << 17)
-#define GRXSTS_PKTSTS_OUTDONE (3 << 17)
-#define GRXSTS_PKTSTS_HCHIN_XFER_COMP (3 << 17)
-#define GRXSTS_PKTSTS_SETUPDONE (4 << 17)
-#define GRXSTS_PKTSTS_DATATOGGLEERR (5 << 17)
-#define GRXSTS_PKTSTS_SETUPRX (6 << 17)
-#define GRXSTS_PKTSTS_HCHHALTED (7 << 17)
+#define GRXSTS_PKTSTS_GLOBALOUTNAK 1
+#define GRXSTS_PKTSTS_OUTRX 2
+#define GRXSTS_PKTSTS_HCHIN 2
+#define GRXSTS_PKTSTS_OUTDONE 3
+#define GRXSTS_PKTSTS_HCHIN_XFER_COMP 3
+#define GRXSTS_PKTSTS_SETUPDONE 4
+#define GRXSTS_PKTSTS_DATATOGGLEERR 5
+#define GRXSTS_PKTSTS_SETUPRX 6
+#define GRXSTS_PKTSTS_HCHHALTED 7
#define GRXSTS_HCHNUM_MASK (0xf << 0)
#define GRXSTS_HCHNUM_SHIFT 0
#define GRXSTS_DPID_MASK (0x3 << 15)
@@ -184,16 +188,11 @@
#define GRXSTS_EPNUM_SHIFT 0
#define GRXFSIZ HSOTG_REG(0x024)
+#define GRXFSIZ_DEPTH_MASK (0xffff << 0)
+#define GRXFSIZ_DEPTH_SHIFT 0
#define GNPTXFSIZ HSOTG_REG(0x028)
-#define GNPTXFSIZ_NP_TXF_DEP_MASK (0xffff << 16)
-#define GNPTXFSIZ_NP_TXF_DEP_SHIFT 16
-#define GNPTXFSIZ_NP_TXF_DEP_LIMIT 0xffff
-#define GNPTXFSIZ_NP_TXF_DEP(_x) ((_x) << 16)
-#define GNPTXFSIZ_NP_TXF_ST_ADDR_MASK (0xffff << 0)
-#define GNPTXFSIZ_NP_TXF_ST_ADDR_SHIFT 0
-#define GNPTXFSIZ_NP_TXF_ST_ADDR_LIMIT 0xffff
-#define GNPTXFSIZ_NP_TXF_ST_ADDR(_x) ((_x) << 0)
+/* Use FIFOSIZE_* constants to access this register */
#define GNPTXSTS HSOTG_REG(0x02C)
#define GNPTXSTS_NP_TXQ_TOP_MASK (0x7f << 24)
@@ -244,32 +243,32 @@
#define GHWCFG2_NUM_DEV_EP_SHIFT 10
#define GHWCFG2_FS_PHY_TYPE_MASK (0x3 << 8)
#define GHWCFG2_FS_PHY_TYPE_SHIFT 8
-#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED (0 << 8)
-#define GHWCFG2_FS_PHY_TYPE_DEDICATED (1 << 8)
-#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI (2 << 8)
-#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI (3 << 8)
+#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_FS_PHY_TYPE_DEDICATED 1
+#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI 2
+#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI 3
#define GHWCFG2_HS_PHY_TYPE_MASK (0x3 << 6)
#define GHWCFG2_HS_PHY_TYPE_SHIFT 6
-#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED (0 << 6)
-#define GHWCFG2_HS_PHY_TYPE_UTMI (1 << 6)
-#define GHWCFG2_HS_PHY_TYPE_ULPI (2 << 6)
-#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI (3 << 6)
+#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_HS_PHY_TYPE_UTMI 1
+#define GHWCFG2_HS_PHY_TYPE_ULPI 2
+#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
#define GHWCFG2_POINT2POINT (1 << 5)
#define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3)
#define GHWCFG2_ARCHITECTURE_SHIFT 3
-#define GHWCFG2_SLAVE_ONLY_ARCH (0 << 3)
-#define GHWCFG2_EXT_DMA_ARCH (1 << 3)
-#define GHWCFG2_INT_DMA_ARCH (2 << 3)
+#define GHWCFG2_SLAVE_ONLY_ARCH 0
+#define GHWCFG2_EXT_DMA_ARCH 1
+#define GHWCFG2_INT_DMA_ARCH 2
#define GHWCFG2_OP_MODE_MASK (0x7 << 0)
#define GHWCFG2_OP_MODE_SHIFT 0
-#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE (0 << 0)
-#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE (1 << 0)
-#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE (2 << 0)
-#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE (3 << 0)
-#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE (4 << 0)
-#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST (5 << 0)
-#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST (6 << 0)
-#define GHWCFG2_OP_MODE_UNDEFINED (7 << 0)
+#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE 0
+#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE 1
+#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE 2
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+#define GHWCFG2_OP_MODE_UNDEFINED 7
#define GHWCFG3 HSOTG_REG(0x004c)
#define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16)
@@ -303,6 +302,9 @@
#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14)
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
#define GHWCFG4_XHIBER (1 << 7)
#define GHWCFG4_HIBER (1 << 6)
#define GHWCFG4_MIN_AHB_FREQ (1 << 5)
@@ -391,16 +393,12 @@
#define ADPCTL_PRB_DSCHRG_SHIFT 0
#define HPTXFSIZ HSOTG_REG(0x100)
+/* Use FIFOSIZE_* constants to access this register */
#define DPTXFSIZN(_a) HSOTG_REG(0x104 + (((_a) - 1) * 4))
-#define DPTXFSIZN_DP_TXF_SIZE_MASK (0xffff << 16)
-#define DPTXFSIZN_DP_TXF_SIZE_SHIFT 16
-#define DPTXFSIZN_DP_TXF_SIZE_GET(_v) (((_v) >> 16) & 0xffff)
-#define DPTXFSIZN_DP_TXF_SIZE_LIMIT 0xffff
-#define DPTXFSIZN_DP_TXF_SIZE(_x) ((_x) << 16)
-#define DPTXFSIZN_DP_TXF_ST_ADDR_MASK (0xffff << 0)
-#define DPTXFSIZN_DP_TXF_ST_ADDR_SHIFT 0
+/* Use FIFOSIZE_* constants to access this register */
+/* These apply to the GNPTXFSIZ, HPTXFSIZ and DPTXFSIZN registers */
#define FIFOSIZE_DEPTH_MASK (0xffff << 16)
#define FIFOSIZE_DEPTH_SHIFT 16
#define FIFOSIZE_STARTADDR_MASK (0xffff << 0)
@@ -424,10 +422,10 @@
#define DCFG_NZ_STS_OUT_HSHK (1 << 2)
#define DCFG_DEVSPD_MASK (0x3 << 0)
#define DCFG_DEVSPD_SHIFT 0
-#define DCFG_DEVSPD_HS (0 << 0)
-#define DCFG_DEVSPD_FS (1 << 0)
-#define DCFG_DEVSPD_LS (2 << 0)
-#define DCFG_DEVSPD_FS48 (3 << 0)
+#define DCFG_DEVSPD_HS 0
+#define DCFG_DEVSPD_FS 1
+#define DCFG_DEVSPD_LS 2
+#define DCFG_DEVSPD_FS48 3
#define DCTL HSOTG_REG(0x804)
#define DCTL_PWRONPRGDONE (1 << 11)
@@ -450,10 +448,10 @@
#define DSTS_ERRATICERR (1 << 3)
#define DSTS_ENUMSPD_MASK (0x3 << 1)
#define DSTS_ENUMSPD_SHIFT 1
-#define DSTS_ENUMSPD_HS (0 << 1)
-#define DSTS_ENUMSPD_FS (1 << 1)
-#define DSTS_ENUMSPD_LS (2 << 1)
-#define DSTS_ENUMSPD_FS48 (3 << 1)
+#define DSTS_ENUMSPD_HS 0
+#define DSTS_ENUMSPD_FS 1
+#define DSTS_ENUMSPD_LS 2
+#define DSTS_ENUMSPD_FS48 3
#define DSTS_SUSPSTS (1 << 0)
#define DIEPMSK HSOTG_REG(0x810)
@@ -501,10 +499,10 @@
*/
#define D0EPCTL_MPS_MASK (0x3 << 0)
#define D0EPCTL_MPS_SHIFT 0
-#define D0EPCTL_MPS_64 (0 << 0)
-#define D0EPCTL_MPS_32 (1 << 0)
-#define D0EPCTL_MPS_16 (2 << 0)
-#define D0EPCTL_MPS_8 (3 << 0)
+#define D0EPCTL_MPS_64 0
+#define D0EPCTL_MPS_32 1
+#define D0EPCTL_MPS_16 2
+#define D0EPCTL_MPS_8 3
#define DXEPCTL_EPENA (1 << 31)
#define DXEPCTL_EPDIS (1 << 30)
@@ -522,10 +520,10 @@
#define DXEPCTL_SNP (1 << 20)
#define DXEPCTL_EPTYPE_MASK (0x3 << 18)
#define DXEPCTL_EPTYPE_SHIFT 18
-#define DXEPCTL_EPTYPE_CONTROL (0 << 18)
-#define DXEPCTL_EPTYPE_ISO (1 << 18)
-#define DXEPCTL_EPTYPE_BULK (2 << 18)
-#define DXEPCTL_EPTYPE_INTTERUPT (3 << 18)
+#define DXEPCTL_EPTYPE_CONTROL 0
+#define DXEPCTL_EPTYPE_ISO 1
+#define DXEPCTL_EPTYPE_BULK 2
+#define DXEPCTL_EPTYPE_INTTERUPT 3
#define DXEPCTL_NAKSTS (1 << 17)
#define DXEPCTL_DPID (1 << 16)
#define DXEPCTL_EOFRNUM (1 << 16)
@@ -645,9 +643,9 @@
#define HCFG_FSLSSUPP (1 << 2)
#define HCFG_FSLSPCLKSEL_MASK (0x3 << 0)
#define HCFG_FSLSPCLKSEL_SHIFT 0
-#define HCFG_FSLSPCLKSEL_30_60_MHZ (0 << 0)
-#define HCFG_FSLSPCLKSEL_48_MHZ (1 << 0)
-#define HCFG_FSLSPCLKSEL_6_MHZ (2 << 0)
+#define HCFG_FSLSPCLKSEL_30_60_MHZ 0
+#define HCFG_FSLSPCLKSEL_48_MHZ 1
+#define HCFG_FSLSPCLKSEL_6_MHZ 2
#define HFIR HSOTG_REG(0x0404)
#define HFIR_FRINT_MASK (0xffff << 0)
@@ -680,9 +678,9 @@
#define HPRT0 HSOTG_REG(0x0440)
#define HPRT0_SPD_MASK (0x3 << 17)
#define HPRT0_SPD_SHIFT 17
-#define HPRT0_SPD_HIGH_SPEED (0 << 17)
-#define HPRT0_SPD_FULL_SPEED (1 << 17)
-#define HPRT0_SPD_LOW_SPEED (2 << 17)
+#define HPRT0_SPD_HIGH_SPEED 0
+#define HPRT0_SPD_FULL_SPEED 1
+#define HPRT0_SPD_LOW_SPEED 2
#define HPRT0_TSTCTL_MASK (0xf << 13)
#define HPRT0_TSTCTL_SHIFT 13
#define HPRT0_PWR (1 << 12)
@@ -720,10 +718,10 @@
#define HCSPLT_COMPSPLT (1 << 16)
#define HCSPLT_XACTPOS_MASK (0x3 << 14)
#define HCSPLT_XACTPOS_SHIFT 14
-#define HCSPLT_XACTPOS_MID (0 << 14)
-#define HCSPLT_XACTPOS_END (1 << 14)
-#define HCSPLT_XACTPOS_BEGIN (2 << 14)
-#define HCSPLT_XACTPOS_ALL (3 << 14)
+#define HCSPLT_XACTPOS_MID 0
+#define HCSPLT_XACTPOS_END 1
+#define HCSPLT_XACTPOS_BEGIN 2
+#define HCSPLT_XACTPOS_ALL 3
#define HCSPLT_HUBADDR_MASK (0x7f << 7)
#define HCSPLT_HUBADDR_SHIFT 7
#define HCSPLT_PRTADDR_MASK (0x7f << 0)
@@ -751,11 +749,11 @@
#define TSIZ_DOPNG (1 << 31)
#define TSIZ_SC_MC_PID_MASK (0x3 << 29)
#define TSIZ_SC_MC_PID_SHIFT 29
-#define TSIZ_SC_MC_PID_DATA0 (0 << 29)
-#define TSIZ_SC_MC_PID_DATA2 (1 << 29)
-#define TSIZ_SC_MC_PID_DATA1 (2 << 29)
-#define TSIZ_SC_MC_PID_MDATA (3 << 29)
-#define TSIZ_SC_MC_PID_SETUP (3 << 29)
+#define TSIZ_SC_MC_PID_DATA0 0
+#define TSIZ_SC_MC_PID_DATA2 1
+#define TSIZ_SC_MC_PID_DATA1 2
+#define TSIZ_SC_MC_PID_MDATA 3
+#define TSIZ_SC_MC_PID_SETUP 3
#define TSIZ_PKTCNT_MASK (0x3ff << 19)
#define TSIZ_PKTCNT_SHIFT 19
#define TSIZ_NTD_MASK (0xff << 8)
diff --git a/drivers/staging/dwc2/pci.c b/drivers/staging/dwc2/pci.c
index 69c65eb..9020260 100644
--- a/drivers/staging/dwc2/pci.c
+++ b/drivers/staging/dwc2/pci.c
@@ -59,7 +59,7 @@
static const char dwc2_driver_name[] = "dwc2";
-static struct dwc2_core_params dwc2_module_params = {
+static const struct dwc2_core_params dwc2_module_params = {
.otg_cap = -1,
.otg_ver = -1,
.dma_enable = -1,
@@ -74,7 +74,7 @@ static struct dwc2_core_params dwc2_module_params = {
.max_packet_count = 511,
.host_channels = -1,
.phy_type = -1,
- .phy_utmi_width = 16, /* 16 bits - NOT DETECTABLE */
+ .phy_utmi_width = -1,
.phy_ulpi_ddr = -1,
.phy_ulpi_ext_vbus = -1,
.i2c_enable = -1,
@@ -83,7 +83,7 @@ static struct dwc2_core_params dwc2_module_params = {
.host_ls_low_power_phy_clk = -1,
.ts_dline = -1,
.reload_ctl = -1,
- .ahb_single = -1,
+ .ahbcfg = -1,
};
/**
@@ -101,8 +101,6 @@ static void dwc2_driver_remove(struct pci_dev *dev)
{
struct dwc2_hsotg *hsotg = pci_get_drvdata(dev);
- dev_dbg(&dev->dev, "%s(%p)\n", __func__, dev);
-
dwc2_hcd_remove(hsotg);
pci_disable_device(dev);
}
@@ -125,18 +123,14 @@ static int dwc2_driver_probe(struct pci_dev *dev,
struct dwc2_hsotg *hsotg;
int retval;
- dev_dbg(&dev->dev, "%s(%p)\n", __func__, dev);
-
hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
if (!hsotg)
return -ENOMEM;
- pci_set_power_state(dev, PCI_D0);
-
hsotg->dev = &dev->dev;
- hsotg->regs = devm_request_and_ioremap(&dev->dev, &dev->resource[0]);
- if (!hsotg->regs)
- return -ENOMEM;
+ hsotg->regs = devm_ioremap_resource(&dev->dev, &dev->resource[0]);
+ if (IS_ERR(hsotg->regs))
+ return PTR_ERR(hsotg->regs);
dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
(unsigned long)pci_resource_start(dev, 0), hsotg->regs);
@@ -153,7 +147,6 @@ static int dwc2_driver_probe(struct pci_dev *dev,
}
pci_set_drvdata(dev, hsotg);
- dev_dbg(&dev->dev, "hsotg=%p\n", hsotg);
return retval;
}
@@ -162,6 +155,10 @@ static DEFINE_PCI_DEVICE_TABLE(dwc2_pci_ids) = {
{
PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_PRODUCT_ID_HAPS_HSOTG),
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_STMICRO,
+ PCI_DEVICE_ID_STMICRO_USB_OTG),
+ },
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, dwc2_pci_ids);
diff --git a/drivers/staging/echo/echo.c b/drivers/staging/echo/echo.c
index 5882139..9597e95 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/staging/echo/echo.c
@@ -267,13 +267,13 @@ struct oslec_state *oslec_create(int len, int adaption_mode)
goto error_snap;
ec->cond_met = 0;
- ec->Pstates = 0;
- ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
- ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
+ ec->pstates = 0;
+ ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0;
+ ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0;
ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
- ec->Lbgn = ec->Lbgn_acc = 0;
- ec->Lbgn_upper = 200;
- ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
+ ec->lbgn = ec->lbgn_acc = 0;
+ ec->lbgn_upper = 200;
+ ec->lbgn_upper_acc = ec->lbgn_upper << 13;
return ec;
@@ -314,13 +314,13 @@ void oslec_flush(struct oslec_state *ec)
{
int i;
- ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
- ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
+ ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0;
+ ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0;
ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
- ec->Lbgn = ec->Lbgn_acc = 0;
- ec->Lbgn_upper = 200;
- ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
+ ec->lbgn = ec->lbgn_acc = 0;
+ ec->lbgn_upper = 200;
+ ec->lbgn_upper_acc = ec->lbgn_upper << 13;
ec->nonupdate_dwell = 0;
@@ -332,7 +332,7 @@ void oslec_flush(struct oslec_state *ec)
memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t));
ec->curr_pos = ec->taps - 1;
- ec->Pstates = 0;
+ ec->pstates = 0;
}
EXPORT_SYMBOL_GPL(oslec_flush);
@@ -418,33 +418,33 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
new = (int)tx * (int)tx;
old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
(int)ec->fir_state.history[ec->fir_state.curr_pos];
- ec->Pstates +=
+ ec->pstates +=
((new - old) + (1 << (ec->log2taps - 1))) >> ec->log2taps;
- if (ec->Pstates < 0)
- ec->Pstates = 0;
+ if (ec->pstates < 0)
+ ec->pstates = 0;
}
/* Calculate short term average levels using simple single pole IIRs */
- ec->Ltxacc += abs(tx) - ec->Ltx;
- ec->Ltx = (ec->Ltxacc + (1 << 4)) >> 5;
- ec->Lrxacc += abs(rx) - ec->Lrx;
- ec->Lrx = (ec->Lrxacc + (1 << 4)) >> 5;
+ ec->ltxacc += abs(tx) - ec->ltx;
+ ec->ltx = (ec->ltxacc + (1 << 4)) >> 5;
+ ec->lrxacc += abs(rx) - ec->lrx;
+ ec->lrx = (ec->lrxacc + (1 << 4)) >> 5;
/* Foreground filter */
ec->fir_state.coeffs = ec->fir_taps16[0];
echo_value = fir16(&ec->fir_state, tx);
ec->clean = rx - echo_value;
- ec->Lcleanacc += abs(ec->clean) - ec->Lclean;
- ec->Lclean = (ec->Lcleanacc + (1 << 4)) >> 5;
+ ec->lcleanacc += abs(ec->clean) - ec->lclean;
+ ec->lclean = (ec->lcleanacc + (1 << 4)) >> 5;
/* Background filter */
echo_value = fir16(&ec->fir_state_bg, tx);
clean_bg = rx - echo_value;
- ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg;
- ec->Lclean_bg = (ec->Lclean_bgacc + (1 << 4)) >> 5;
+ ec->lclean_bgacc += abs(clean_bg) - ec->lclean_bg;
+ ec->lclean_bg = (ec->lclean_bgacc + (1 << 4)) >> 5;
/* Background Filter adaption */
@@ -455,7 +455,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
ec->factor = 0;
ec->shift = 0;
if ((ec->nonupdate_dwell == 0)) {
- int P, logP, shift;
+ int p, logp, shift;
/* Determine:
@@ -490,9 +490,9 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
for a divide versus a top_bit() implementation.
*/
- P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates;
- logP = top_bit(P) + ec->log2taps;
- shift = 30 - 2 - logP;
+ p = MIN_TX_POWER_FOR_ADAPTION + ec->pstates;
+ logp = top_bit(p) + ec->log2taps;
+ shift = 30 - 2 - logp;
ec->shift = shift;
lms_adapt_bg(ec, clean_bg, shift);
@@ -502,7 +502,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
near end speech */
ec->adapt = 0;
- if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx))
+ if ((ec->lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->lrx > ec->ltx))
ec->nonupdate_dwell = DTD_HANGOVER;
if (ec->nonupdate_dwell)
ec->nonupdate_dwell--;
@@ -515,9 +515,9 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) &&
(ec->nonupdate_dwell == 0) &&
/* (ec->Lclean_bg < 0.875*ec->Lclean) */
- (8 * ec->Lclean_bg < 7 * ec->Lclean) &&
+ (8 * ec->lclean_bg < 7 * ec->lclean) &&
/* (ec->Lclean_bg < 0.125*ec->Ltx) */
- (8 * ec->Lclean_bg < ec->Ltx)) {
+ (8 * ec->lclean_bg < ec->ltx)) {
if (ec->cond_met == 6) {
/*
* BG filter has had better results for 6 consecutive
@@ -541,14 +541,14 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
* non-linearity in the channel.".
*/
- if ((16 * ec->Lclean < ec->Ltx)) {
+ if ((16 * ec->lclean < ec->ltx)) {
/*
* Our e/c has improved echo by at least 24 dB (each
* factor of 2 is 6dB, so 2*2*2*2=16 is the same as
* 6+6+6+6=24dB)
*/
if (ec->adaption_mode & ECHO_CAN_USE_CNG) {
- ec->cng_level = ec->Lbgn;
+ ec->cng_level = ec->lbgn;
/*
* Very elementary comfort noise generation.
@@ -571,10 +571,10 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
} else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) {
/* This sounds much better than CNG */
- if (ec->clean_nlp > ec->Lbgn)
- ec->clean_nlp = ec->Lbgn;
- if (ec->clean_nlp < -ec->Lbgn)
- ec->clean_nlp = -ec->Lbgn;
+ if (ec->clean_nlp > ec->lbgn)
+ ec->clean_nlp = ec->lbgn;
+ if (ec->clean_nlp < -ec->lbgn)
+ ec->clean_nlp = -ec->lbgn;
} else {
/*
* just mute the residual, doesn't sound very
@@ -593,9 +593,9 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
* level signals like near end speech. When combined
* with CNG or especially CLIP seems to work OK.
*/
- if (ec->Lclean < 40) {
- ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn;
- ec->Lbgn = (ec->Lbgn_acc + (1 << 11)) >> 12;
+ if (ec->lclean < 40) {
+ ec->lbgn_acc += abs(ec->clean) - ec->lbgn;
+ ec->lbgn = (ec->lbgn_acc + (1 << 11)) >> 12;
}
}
}
diff --git a/drivers/staging/echo/echo.h b/drivers/staging/echo/echo.h
index 32ca9de..9b08c63 100644
--- a/drivers/staging/echo/echo.h
+++ b/drivers/staging/echo/echo.h
@@ -139,24 +139,24 @@ struct oslec_state {
int adaption_mode;
int cond_met;
- int32_t Pstates;
+ int32_t pstates;
int16_t adapt;
int32_t factor;
int16_t shift;
/* Average levels and averaging filter states */
- int Ltxacc;
- int Lrxacc;
- int Lcleanacc;
- int Lclean_bgacc;
- int Ltx;
- int Lrx;
- int Lclean;
- int Lclean_bg;
- int Lbgn;
- int Lbgn_acc;
- int Lbgn_upper;
- int Lbgn_upper_acc;
+ int ltxacc;
+ int lrxacc;
+ int lcleanacc;
+ int lclean_bgacc;
+ int ltx;
+ int lrx;
+ int lclean;
+ int lclean_bg;
+ int lbgn;
+ int lbgn_acc;
+ int lbgn_upper;
+ int lbgn_upper_acc;
/* foreground and background filter states */
struct fir16_state_t fir_state;
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 05ad085..9272a24 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -8,7 +8,6 @@ Note, the powermanagement options were removed from the vendor provided
driver as they did not build properly at the time.
TODO:
- - some rx packets have CRC/code/frame errors
- Look at reducing the number of spinlocks
- Simplify code in nic_rx_pkts(), when determining multicast_pkts_rcvd
- Implement NAPI support
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index ea9362d..817f837 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -24,13 +24,14 @@
* raw interrupt reports.
*/
-/* Note: this currently uses a dumb ringbuffer for reads and writes.
+/*
+ * Note: this currently uses a dumb ringbuffer for reads and writes.
* A more optimal driver would cache and kill off outstanding urbs that are
* now invalid, and ignore ones that already were in the queue but valid
* as we only have 30 commands for the alphatrack. In particular this is
* key for getting lights to flash in time as otherwise many commands
* can be buffered up before the light change makes it to the interface.
-*/
+ */
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -100,7 +101,8 @@ static int debug = ALPHATRACK_DEBUG;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
-/* All interrupt in transfers are collected in a ring buffer to
+/*
+ * All interrupt in transfers are collected in a ring buffer to
* avoid racing conditions and get better performance of the driver.
*/
@@ -109,8 +111,7 @@ static int ring_buffer_size = RING_BUFFER_SIZE;
module_param(ring_buffer_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_buffer_size, "Read ring buffer size");
-/* The write_buffer can one day contain more than one interrupt out transfer.
- */
+/* The write_buffer can one day contain more than one interrupt out transfer.*/
static int write_buffer_size = WRITE_BUFFER_SIZE;
module_param(write_buffer_size, int, S_IRUGO);
@@ -199,9 +200,7 @@ static void usb_alphatrack_abort_transfers(struct usb_alphatrack *dev)
usb_kill_urb(dev->interrupt_out_urb);
}
-/**
- * usb_alphatrack_delete
- */
+/** usb_alphatrack_delete */
static void usb_alphatrack_delete(struct usb_alphatrack *dev)
{
usb_alphatrack_abort_transfers(dev);
@@ -213,9 +212,7 @@ static void usb_alphatrack_delete(struct usb_alphatrack *dev)
kfree(dev); /* fixme oldi_buffer */
}
-/**
- * usb_alphatrack_interrupt_in_callback
- */
+/** usb_alphatrack_interrupt_in_callback */
static void usb_alphatrack_interrupt_in_callback(struct urb *urb)
{
@@ -296,9 +293,7 @@ exit:
wake_up_interruptible(&dev->read_wait);
}
-/**
- * usb_alphatrack_interrupt_out_callback
- */
+/** usb_alphatrack_interrupt_out_callback */
static void usb_alphatrack_interrupt_out_callback(struct urb *urb)
{
struct usb_alphatrack *dev = urb->context;
@@ -315,9 +310,7 @@ static void usb_alphatrack_interrupt_out_callback(struct urb *urb)
wake_up_interruptible(&dev->write_wait);
}
-/**
- * usb_alphatrack_open
- */
+/** usb_alphatrack_open */
static int usb_alphatrack_open(struct inode *inode, struct file *file)
{
struct usb_alphatrack *dev;
@@ -398,9 +391,7 @@ unlock_disconnect_exit:
return retval;
}
-/**
- * usb_alphatrack_release
- */
+/** usb_alphatrack_release */
static int usb_alphatrack_release(struct inode *inode, struct file *file)
{
struct usb_alphatrack *dev;
@@ -447,9 +438,7 @@ exit:
return retval;
}
-/**
- * usb_alphatrack_poll
- */
+/** usb_alphatrack_poll */
static unsigned int usb_alphatrack_poll(struct file *file, poll_table *wait)
{
struct usb_alphatrack *dev;
@@ -468,9 +457,7 @@ static unsigned int usb_alphatrack_poll(struct file *file, poll_table *wait)
return mask;
}
-/**
- * usb_alphatrack_read
- */
+/** usb_alphatrack_read */
static ssize_t usb_alphatrack_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -539,9 +526,7 @@ exit:
return retval;
}
-/**
- * usb_alphatrack_write
- */
+/** usb_alphatrack_write */
static ssize_t usb_alphatrack_write(struct file *file,
const char __user *buffer, size_t count,
loff_t *ppos)
@@ -601,7 +586,7 @@ static ssize_t usb_alphatrack_write(struct file *file,
}
if (dev->interrupt_out_endpoint == NULL) {
- dev_err(&dev->intf->dev, "Endpoint should not be be null!\n");
+ dev_err(&dev->intf->dev, "Endpoint should not be null!\n");
goto unlock_exit;
}
@@ -718,8 +703,10 @@ static int usb_alphatrack_probe(struct usb_interface *intf,
true_size = min(ring_buffer_size, RING_BUFFER_SIZE);
- /* FIXME - there are more usb_alloc routines for dma correctness.
- Needed? */
+ /*
+ * FIXME - there are more usb_alloc routines for dma correctness.
+ * Needed?
+ */
dev->ring_buffer = kmalloc_array(true_size,
sizeof(struct alphatrack_icmd),
GFP_KERNEL);
@@ -840,11 +827,11 @@ static void usb_alphatrack_disconnect(struct usb_interface *intf)
mutex_unlock(&dev->mtx);
usb_alphatrack_delete(dev);
} else {
+ atomic_set(&dev->writes_pending, 0);
dev->intf = NULL;
mutex_unlock(&dev->mtx);
}
- atomic_set(&dev->writes_pending, 0);
mutex_unlock(&disconnect_mutex);
dev_info(&intf->dev, "Alphatrack Surface #%d now disconnected\n",
diff --git a/drivers/staging/frontier/alphatrack.h b/drivers/staging/frontier/alphatrack.h
index 10a7972..418c605 100644
--- a/drivers/staging/frontier/alphatrack.h
+++ b/drivers/staging/frontier/alphatrack.h
@@ -6,7 +6,8 @@ struct alphatrack_ocmd {
unsigned char cmd[8];
};
-/* These are unused by the present driver but provide documentation for the
+/*
+ * These are unused by the present driver but provide documentation for the
* userspace API.
*/
enum LightID {
@@ -58,7 +59,8 @@ enum LightID {
#define BUTTONMASK_PRESS2 0x00008010
#define BUTTONMASK_PRESS3 0x00002020
-/* last 3 bytes are the slider position
+/*
+ * last 3 bytes are the slider position
* 40 is the actual slider moving, the most sig bits, and 3 lsb
*/
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 04b5e66..074b0e5 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -86,7 +86,8 @@ static int debug = TRANZPORT_DEBUG;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
-/* All interrupt in transfers are collected in a ring buffer to
+/*
+ * All interrupt in transfers are collected in a ring buffer to
* avoid racing conditions and get better performance of the driver.
*/
@@ -95,7 +96,8 @@ static int ring_buffer_size = RING_BUFFER_SIZE;
module_param(ring_buffer_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_buffer_size, "Read ring buffer size in reports");
-/* The write_buffer can one day contain more than one interrupt out transfer.
+/*
+ * The write_buffer can one day contain more than one interrupt out transfer.
*/
static int write_buffer_size = WRITE_BUFFER_SIZE;
module_param(write_buffer_size, int, S_IRUGO);
@@ -175,24 +177,24 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
}
#define show_int(value) \
- static ssize_t show_##value(struct device *dev, \
+ static ssize_t value##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_tranzport *t = usb_get_intfdata(intf); \
return sprintf(buf, "%d\n", t->value); \
} \
- static DEVICE_ATTR(value, S_IRUGO, show_##value, NULL);
+ static DEVICE_ATTR_RO(value)
#define show_set_int(value) \
- static ssize_t show_##value(struct device *dev, \
+ static ssize_t value##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_tranzport *t = usb_get_intfdata(intf); \
return sprintf(buf, "%d\n", t->value); \
} \
- static ssize_t set_##value(struct device *dev, \
+ static ssize_t value##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
@@ -204,7 +206,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
t->value = temp; \
return count; \
} \
- static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value);
+ static DEVICE_ATTR_RW(value)
show_int(enable);
show_int(offline);
@@ -565,9 +567,9 @@ static ssize_t usb_tranzport_read(struct file *file, char __user *buffer,
newwheel = (*dev->ring_buffer)[next_tail].cmd[6];
oldwheel = (*dev->ring_buffer)[dev->ring_tail].cmd[6];
/* if both are wheel events, and
- no buttons have changes (FIXME, do I have to check?),
- and we are the same sign, we can compress +- 7F
- */
+ * no buttons have changes (FIXME, do I have to check?),
+ * and we are the same sign, we can compress +- 7F
+ */
dbg_info(&dev->intf->dev,
"%s: trying to compress: "
"%02x%02x%02x%02x%02x%02x%02x%02x\n",
@@ -729,7 +731,7 @@ static ssize_t usb_tranzport_write(struct file *file,
}
if (dev->interrupt_out_endpoint == NULL) {
- dev_err(&dev->intf->dev, "Endpoint should not be be null!\n");
+ dev_err(&dev->intf->dev, "Endpoint should not be null!\n");
goto unlock_exit;
}
@@ -842,8 +844,10 @@ static int usb_tranzport_probe(struct usb_interface *intf,
ring_buffer_size = RING_BUFFER_SIZE;
true_size = min(ring_buffer_size, RING_BUFFER_SIZE);
- /* FIXME - there are more usb_alloc routines for dma correctness.
- Needed? */
+ /*
+ * FIXME - there are more usb_alloc routines for dma correctness.
+ * Needed?
+ */
dev->ring_buffer =
kmalloc((true_size * sizeof(struct tranzport_cmd)) + 8, GFP_KERNEL);
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
index 47cc365..6311b2f 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
@@ -132,16 +132,16 @@ void card_bootload(struct net_device *dev)
pdata = (u32 *) bootimage;
size = sizeof(bootimage);
- // check for odd word
- if (size & 0x0003) {
+ /* check for odd word */
+ if (size & 0x0003)
size += 4;
- }
- // Provide mutual exclusive access while reading ASIC registers.
+
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock, flags);
- // need to set i/o base address initially and hardware will autoincrement
+ /* need to set i/o base address initially and hardware will autoincrement */
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, FT1000_DPRAM_BASE);
- // write bytes
+ /* write bytes */
for (i = 0; i < (size >> 2); i++) {
templong = *pdata++;
outl(templong, dev->base_addr + FT1000_REG_MAG_DPDATA);
@@ -345,11 +345,10 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
handshake = get_handshake(dev, HANDSHAKE_DSP_BL_READY);
- if (handshake == HANDSHAKE_DSP_BL_READY) {
+ if (handshake == HANDSHAKE_DSP_BL_READY)
put_handshake(dev, HANDSHAKE_DRIVER_READY);
- } else {
+ else
Status = FAILURE;
- }
uiState = STATE_BOOT_DWNLD;
@@ -391,7 +390,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
Status = FAILURE;
break;
}
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock,
flags);
/*
@@ -505,15 +504,15 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
break;
case REQUEST_MAILBOX_DATA:
- // Convert length from byte count to word count. Make sure we round up.
+ /* Convert length from byte count to word count. Make sure we round up. */
word_length =
(long)(info->DSPInfoBlklen + 1) / 2;
put_request_value(dev, word_length);
pMailBoxData =
- (struct drv_msg *) & info->DSPInfoBlk[0];
+ (struct drv_msg *) &info->DSPInfoBlk[0];
pUsData =
- (u16 *) & pMailBoxData->data[0];
- // Provide mutual exclusive access while reading ASIC registers.
+ (u16 *) &pMailBoxData->data[0];
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock,
flags);
if (file_version == 5) {
@@ -538,9 +537,9 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
outw(DWNLD_MAG_PS_HDR_LOC,
dev->base_addr +
FT1000_REG_DPRAM_ADDR);
- if (word_length & 0x01) {
+ if (word_length & 0x01)
word_length++;
- }
+
word_length = word_length / 2;
for (; word_length > 0; word_length--) { /* In words */
@@ -565,7 +564,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
(u16 *) ((long)pFileStart +
pFileHdr5->
version_data_offset);
- // Provide mutual exclusive access while reading ASIC registers.
+ /* Provide mutual exclusive access while reading ASIC registers. */
spin_lock_irqsave(&info->dpram_lock,
flags);
/*
@@ -692,7 +691,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
if (pHdr->portdest == 0x80 /* DspOAM */
&& (pHdr->portsrc == 0x00 /* Driver */
- || pHdr->portsrc == 0x10 /* FMM */ )) {
+ || pHdr->portsrc == 0x10 /* FMM */)) {
uiState = STATE_SECTION_PROV;
} else {
DEBUG(1,
@@ -711,13 +710,13 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
pHdr = (struct pseudo_hdr *) pUcFile;
if (pHdr->checksum == hdr_checksum(pHdr)) {
- if (pHdr->portdest != 0x80 /* Dsp OAM */ ) {
+ if (pHdr->portdest != 0x80 /* Dsp OAM */) {
uiState = STATE_DONE_PROV;
break;
}
usHdrLength = ntohs(pHdr->length); /* Byte length for PROV records */
- // Get buffer for provisioning data
+ /* Get buffer for provisioning data */
pbuffer =
kmalloc((usHdrLength + sizeof(struct pseudo_hdr)),
GFP_ATOMIC);
@@ -725,7 +724,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
memcpy(pbuffer, (void *)pUcFile,
(u32) (usHdrLength +
sizeof(struct pseudo_hdr)));
- // link provisioning data
+ /* link provisioning data */
pprov_record =
kmalloc(sizeof(struct prov_record),
GFP_ATOMIC);
@@ -735,7 +734,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart,
list_add_tail(&pprov_record->
list,
&info->prov_list);
- // Move to next entry if available
+ /* Move to next entry if available */
pUcFile =
(u8 *) ((unsigned long) pUcFile +
(unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index 94e426e..b2330f1 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -164,7 +164,7 @@ static const struct file_operations ft1000_proc_fops = {
static int ft1000NotifyProc(struct notifier_block *this, unsigned long event,
void *ptr)
{
- struct net_device *dev = ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct ft1000_info *info;
info = netdev_priv(dev);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 3251d2e..68a55ce 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -1,29 +1,31 @@
-//---------------------------------------------------------------------------
-// FT1000 driver for Flarion Flash OFDM NIC Device
-//
-// Copyright (C) 2006 Flarion Technologies, All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License as published by the Free
-// Software Foundation; either version 2 of the License, or (at your option) any
-// later version. This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-// more details. You should have received a copy of the GNU General Public
-// License along with this program; if not, write to the
-// Free Software Foundation, Inc., 59 Temple Place -
-// Suite 330, Boston, MA 02111-1307, USA.
-//---------------------------------------------------------------------------
-//
-// File: ft1000_chdev.c
-//
-// Description: Custom character device dispatch routines.
-//
-// History:
-// 8/29/02 Whc Ported to Linux.
-// 6/05/06 Whc Porting to Linux 2.6.9
-//
-//---------------------------------------------------------------------------
+/*
+*---------------------------------------------------------------------------
+* FT1000 driver for Flarion Flash OFDM NIC Device
+*
+* Copyright (C) 2006 Flarion Technologies, All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation; either version 2 of the License, or (at your option) any
+* later version. This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details. You should have received a copy of the GNU General Public
+* License along with this program; if not, write to the
+* Free Software Foundation, Inc., 59 Temple Place -
+* Suite 330, Boston, MA 02111-1307, USA.
+*---------------------------------------------------------------------------
+*
+* File: ft1000_chdev.c
+*
+* Description: Custom character device dispatch routines.
+*
+* History:
+* 8/29/02 Whc Ported to Linux.
+* 6/05/06 Whc Porting to Linux 2.6.9
+*
+*---------------------------------------------------------------------------
+*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -38,25 +40,24 @@
static int ft1000_flarion_cnt = 0;
-static int ft1000_open (struct inode *inode, struct file *file);
+static int ft1000_open(struct inode *inode, struct file *file);
static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait);
static long ft1000_ioctl(struct file *file, unsigned int command,
unsigned long argument);
-static int ft1000_release (struct inode *inode, struct file *file);
+static int ft1000_release(struct inode *inode, struct file *file);
-// List to free receive command buffer pool
+/* List to free receive command buffer pool */
struct list_head freercvpool;
-// lock to arbitrate free buffer list for receive command data
+/* lock to arbitrate free buffer list for receive command data */
spinlock_t free_buff_lock;
int numofmsgbuf = 0;
-//
-// Table of entry-point routines for char device
-//
-static const struct file_operations ft1000fops =
-{
+/*
+* Table of entry-point routines for char device
+*/
+static const struct file_operations ft1000fops = {
.unlocked_ioctl = ft1000_ioctl,
.poll = ft1000_poll_dev,
.open = ft1000_open,
@@ -64,34 +65,35 @@ static const struct file_operations ft1000fops =
.llseek = no_llseek,
};
-//---------------------------------------------------------------------------
-// Function: ft1000_get_buffer
-//
-// Parameters:
-//
-// Returns:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/*
+---------------------------------------------------------------------------
+* Function: ft1000_get_buffer
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+* Notes:
+*
+*---------------------------------------------------------------------------
+*/
struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist)
{
unsigned long flags;
struct dpram_blk *ptr;
spin_lock_irqsave(&free_buff_lock, flags);
- // Check if buffer is available
- if ( list_empty(bufflist) ) {
+ /* Check if buffer is available */
+ if (list_empty(bufflist)) {
DEBUG("ft1000_get_buffer: No more buffer - %d\n", numofmsgbuf);
ptr = NULL;
- }
- else {
+ } else {
numofmsgbuf--;
ptr = list_entry(bufflist->next, struct dpram_blk, list);
list_del(&ptr->list);
- //DEBUG("ft1000_get_buffer: number of free msg buffers = %d\n", numofmsgbuf);
+ /* DEBUG("ft1000_get_buffer: number of free msg buffers = %d\n", numofmsgbuf); */
}
spin_unlock_irqrestore(&free_buff_lock, flags);
@@ -101,42 +103,46 @@ struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist)
-//---------------------------------------------------------------------------
-// Function: ft1000_free_buffer
-//
-// Parameters:
-//
-// Returns:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/*
+*---------------------------------------------------------------------------
+* Function: ft1000_free_buffer
+*
+* Parameters:
+*
+* Returns:
+*
+* Description:
+*
+* Notes:
+*
+*---------------------------------------------------------------------------
+*/
void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist)
{
unsigned long flags;
spin_lock_irqsave(&free_buff_lock, flags);
- // Put memory back to list
+ /* Put memory back to list */
list_add_tail(&pdpram_blk->list, plist);
numofmsgbuf++;
- //DEBUG("ft1000_free_buffer: number of free msg buffers = %d\n", numofmsgbuf);
+ /*DEBUG("ft1000_free_buffer: number of free msg buffers = %d\n", numofmsgbuf); */
spin_unlock_irqrestore(&free_buff_lock, flags);
}
-//---------------------------------------------------------------------------
-// Function: ft1000_CreateDevice
-//
-// Parameters: dev - pointer to adapter object
-//
-// Returns: 0 if successful
-//
-// Description: Creates a private char device.
-//
-// Notes: Only called by init_module().
-//
-//---------------------------------------------------------------------------
+/*
+*---------------------------------------------------------------------------
+* Function: ft1000_CreateDevice
+*
+* Parameters: dev - pointer to adapter object
+*
+* Returns: 0 if successful
+*
+* Description: Creates a private char device.
+*
+* Notes: Only called by init_module().
+*
+*---------------------------------------------------------------------------
+*/
int ft1000_create_dev(struct ft1000_usb *dev)
{
int result;
@@ -144,20 +150,19 @@ int ft1000_create_dev(struct ft1000_usb *dev)
struct dentry *dir, *file;
struct ft1000_debug_dirs *tmp;
- // make a new device name
+ /* make a new device name */
sprintf(dev->DeviceName, "%s%d", "FT1000_", dev->CardNumber);
DEBUG("%s: number of instance = %d\n", __func__, ft1000_flarion_cnt);
DEBUG("DeviceCreated = %x\n", dev->DeviceCreated);
- if (dev->DeviceCreated)
- {
+ if (dev->DeviceCreated) {
DEBUG("%s: \"%s\" already registered\n", __func__, dev->DeviceName);
return -EIO;
}
- // register the device
+ /* register the device */
DEBUG("%s: \"%s\" debugfs device registration\n", __func__, dev->DeviceName);
tmp = kmalloc(sizeof(struct ft1000_debug_dirs), GFP_KERNEL);
@@ -186,7 +191,7 @@ int ft1000_create_dev(struct ft1000_usb *dev)
DEBUG("%s: registered debugfs directory \"%s\"\n", __func__, dev->DeviceName);
- // initialize application information
+ /* initialize application information */
dev->appcnt = 0;
for (i=0; i<MAX_NUM_APP; i++) {
dev->app_info[i].nTxMsg = 0;
@@ -198,7 +203,7 @@ int ft1000_create_dev(struct ft1000_usb *dev)
dev->app_info[i].DspBCMsgFlag = 0;
dev->app_info[i].NumOfMsg = 0;
init_waitqueue_head(&dev->app_info[i].wait_dpram_msg);
- INIT_LIST_HEAD (&dev->app_info[i].app_sqlist);
+ INIT_LIST_HEAD(&dev->app_info[i].app_sqlist);
}
dev->DeviceCreated = TRUE;
@@ -214,16 +219,18 @@ fail:
return result;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_DestroyDeviceDEBUG
-//
-// Parameters: dev - pointer to adapter object
-//
-// Description: Destroys a private char device.
-//
-// Notes: Only called by cleanup_module().
-//
-//---------------------------------------------------------------------------
+/*
+*---------------------------------------------------------------------------
+* Function: ft1000_DestroyDeviceDEBUG
+*
+* Parameters: dev - pointer to adapter object
+*
+* Description: Destroys a private char device.
+*
+* Notes: Only called by cleanup_module().
+*
+*---------------------------------------------------------------------------
+*/
void ft1000_destroy_dev(struct net_device *netdev)
{
struct ft1000_info *info = netdev_priv(netdev);
@@ -238,8 +245,7 @@ void ft1000_destroy_dev(struct net_device *netdev)
- if (dev->DeviceCreated)
- {
+ if (dev->DeviceCreated) {
ft1000_flarion_cnt--;
list_for_each_safe(pos, q, &dev->nodes.list) {
dir = list_entry(pos, struct ft1000_debug_dirs, list);
@@ -253,7 +259,7 @@ void ft1000_destroy_dev(struct net_device *netdev)
DEBUG("%s: unregistered device \"%s\"\n", __func__,
dev->DeviceName);
- // Make sure we free any memory reserve for slow Queue
+ /* Make sure we free any memory reserve for slow Queue */
for (i=0; i<MAX_NUM_APP; i++) {
while (list_empty(&dev->app_info[i].app_sqlist) == 0) {
pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
@@ -264,7 +270,7 @@ void ft1000_destroy_dev(struct net_device *netdev)
wake_up_interruptible(&dev->app_info[i].wait_dpram_msg);
}
- // Remove buffer allocated for receive command data
+ /* Remove buffer allocated for receive command data */
if (ft1000_flarion_cnt == 0) {
while (list_empty(&freercvpool) == 0) {
ptr = list_entry(freercvpool.next, struct dpram_blk, list);
@@ -279,17 +285,19 @@ void ft1000_destroy_dev(struct net_device *netdev)
}
-//---------------------------------------------------------------------------
-// Function: ft1000_open
-//
-// Parameters:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static int ft1000_open (struct inode *inode, struct file *file)
+/*
+*---------------------------------------------------------------------------
+* Function: ft1000_open
+*
+* Parameters:
+*
+* Description:
+*
+* Notes:
+*
+*---------------------------------------------------------------------------
+*/
+static int ft1000_open(struct inode *inode, struct file *file)
{
struct ft1000_info *info;
struct ft1000_usb *dev = (struct ft1000_usb *)inode->i_private;
@@ -301,22 +309,22 @@ static int ft1000_open (struct inode *inode, struct file *file)
info = file->private_data = netdev_priv(dev->net);
- DEBUG("f_owner = %p number of application = %d\n", (&file->f_owner), dev->appcnt );
+ DEBUG("f_owner = %p number of application = %d\n", (&file->f_owner), dev->appcnt);
- // Check if maximum number of application exceeded
+ /* Check if maximum number of application exceeded */
if (dev->appcnt > MAX_NUM_APP) {
DEBUG("Maximum number of application exceeded\n");
return -EACCES;
}
- // Search for available application info block
+ /* Search for available application info block */
for (i=0; i<MAX_NUM_APP; i++) {
- if ( (dev->app_info[i].fileobject == NULL) ) {
+ if ((dev->app_info[i].fileobject == NULL)) {
break;
}
}
- // Fail due to lack of application info block
+ /* Fail due to lack of application info block */
if (i == MAX_NUM_APP) {
DEBUG("Could not find an application info block\n");
return -EACCES;
@@ -334,16 +342,18 @@ static int ft1000_open (struct inode *inode, struct file *file)
}
-//---------------------------------------------------------------------------
-// Function: ft1000_poll_dev
-//
-// Parameters:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
+/*
+*---------------------------------------------------------------------------
+* Function: ft1000_poll_dev
+*
+* Parameters:
+*
+* Description:
+*
+* Notes:
+*
+*---------------------------------------------------------------------------
+*/
static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
{
@@ -352,24 +362,24 @@ static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
struct ft1000_usb *dev = info->priv;
int i;
- //DEBUG("ft1000_poll_dev called\n");
+ /* DEBUG("ft1000_poll_dev called\n"); */
if (ft1000_flarion_cnt == 0) {
DEBUG("FT1000:ft1000_poll_dev called when ft1000_flarion_cnt is zero\n");
return (-EBADF);
}
- // Search for matching file object
+ /* Search for matching file object */
for (i=0; i<MAX_NUM_APP; i++) {
- if ( dev->app_info[i].fileobject == &file->f_owner) {
- //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", dev->app_info[i].app_id);
+ if (dev->app_info[i].fileobject == &file->f_owner) {
+ /* DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", dev->app_info[i].app_id); */
break;
}
}
- // Could not find application info block
+ /* Could not find application info block */
if (i == MAX_NUM_APP) {
DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
- return ( -EACCES );
+ return (-EACCES);
}
if (list_empty(&dev->app_info[i].app_sqlist) == 0) {
@@ -377,23 +387,25 @@ static unsigned int ft1000_poll_dev(struct file *file, poll_table *wait)
return(POLLIN | POLLRDNORM | POLLPRI);
}
- poll_wait (file, &dev->app_info[i].wait_dpram_msg, wait);
- //DEBUG("FT1000:ft1000_poll_dev:Polling for data from DSP\n");
+ poll_wait(file, &dev->app_info[i].wait_dpram_msg, wait);
+ /* DEBUG("FT1000:ft1000_poll_dev:Polling for data from DSP\n"); */
return (0);
}
-//---------------------------------------------------------------------------
-// Function: ft1000_ioctl
-//
-// Parameters:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static long ft1000_ioctl (struct file *file, unsigned int command,
+/*
+*---------------------------------------------------------------------------
+* Function: ft1000_ioctl
+*
+* Parameters:
+*
+* Description:
+*
+* Notes:
+*
+*---------------------------------------------------------------------------
+*/
+static long ft1000_ioctl(struct file *file, unsigned int command,
unsigned long argument)
{
void __user *argp = (void __user *)argument;
@@ -417,21 +429,21 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
unsigned short ledStat=0;
unsigned short conStat=0;
- //DEBUG("ft1000_ioctl called\n");
+ /* DEBUG("ft1000_ioctl called\n"); */
if (ft1000_flarion_cnt == 0) {
DEBUG("FT1000:ft1000_ioctl called when ft1000_flarion_cnt is zero\n");
return (-EBADF);
}
- //DEBUG("FT1000:ft1000_ioctl:command = 0x%x argument = 0x%8x\n", command, (u32)argument);
+ /* DEBUG("FT1000:ft1000_ioctl:command = 0x%x argument = 0x%8x\n", command, (u32)argument); */
info = file->private_data;
ft1000dev = info->priv;
cmd = _IOC_NR(command);
- //DEBUG("FT1000:ft1000_ioctl:cmd = 0x%x\n", cmd);
+ /* DEBUG("FT1000:ft1000_ioctl:cmd = 0x%x\n", cmd); */
- // process the command
+ /* process the command */
switch (cmd) {
case IOCTL_REGISTER_CMD:
DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_REGISTER called\n");
@@ -441,7 +453,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
break;
}
if (tempword == DSPBCMSGID) {
- // Search for matching file object
+ /* Search for matching file object */
for (i=0; i<MAX_NUM_APP; i++) {
if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
ft1000dev->app_info[i].DspBCMsgFlag = 1;
@@ -457,7 +469,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
get_ver_data.drv_ver = FT1000_DRV_VER;
- if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data)) ) {
+ if (copy_to_user(argp, &get_ver_data, sizeof(get_ver_data))) {
DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
result = -EFAULT;
break;
@@ -467,20 +479,20 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
break;
case IOCTL_CONNECT:
- // Connect Message
+ /* Connect Message */
DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_CONNECT\n");
ConnectionMsg[79] = 0xfc;
card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
break;
case IOCTL_DISCONNECT:
- // Disconnect Message
+ /* Disconnect Message */
DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_DISCONNECT\n");
ConnectionMsg[79] = 0xfd;
card_send_command(ft1000dev, (unsigned short *)ConnectionMsg, 0x4c);
break;
case IOCTL_GET_DSP_STAT_CMD:
- //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DSP_STAT called\n");
+ /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DSP_STAT called\n"); */
memset(&get_stat_data, 0, sizeof(get_stat_data));
memcpy(get_stat_data.DspVer, info->DspVer, DSPVERSZ);
memcpy(get_stat_data.HwSerNum, info->HwSerNum, HWSERNUMSZ);
@@ -494,8 +506,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
get_stat_data.ConStat = ntohs(conStat);
DEBUG("FT1000:ft1000_ioctl: ConStat = 0x%x\n", get_stat_data.ConStat);
- }
- else {
+ } else {
get_stat_data.ConStat = 0x0f;
}
@@ -504,10 +515,10 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
get_stat_data.nRxPkts = info->stats.rx_packets;
get_stat_data.nTxBytes = info->stats.tx_bytes;
get_stat_data.nRxBytes = info->stats.rx_bytes;
- do_gettimeofday ( &tv );
+ do_gettimeofday(&tv);
get_stat_data.ConTm = (u32)(tv.tv_sec - info->ConTm);
DEBUG("Connection Time = %d\n", (int)get_stat_data.ConTm);
- if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data)) ) {
+ if (copy_to_user(argp, &get_stat_data, sizeof(get_stat_data))) {
DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
result = -EFAULT;
break;
@@ -517,7 +528,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
case IOCTL_SET_DPRAM_CMD:
{
IOCTL_DPRAM_BLK *dpram_data = NULL;
- //IOCTL_DPRAM_COMMAND dpram_command;
+ /* IOCTL_DPRAM_COMMAND dpram_command; */
u16 qtype;
u16 msgsz;
struct pseudo_hdr *ppseudo_hdr;
@@ -526,7 +537,7 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
u16 app_index;
u16 status;
- //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_SET_DPRAM called\n");
+ /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_SET_DPRAM called\n");*/
if (ft1000_flarion_cnt == 0) {
@@ -545,12 +556,12 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
if (info->CardReady) {
- //DEBUG("FT1000:ft1000_ioctl: try to SET_DPRAM \n");
+ /* DEBUG("FT1000:ft1000_ioctl: try to SET_DPRAM \n"); */
- // Get the length field to see how many bytes to copy
+ /* Get the length field to see how many bytes to copy */
result = get_user(msgsz, (__u16 __user *)argp);
- msgsz = ntohs (msgsz);
- //DEBUG("FT1000:ft1000_ioctl: length of message = %d\n", msgsz);
+ msgsz = ntohs(msgsz);
+ /* DEBUG("FT1000:ft1000_ioctl: length of message = %d\n", msgsz); */
if (msgsz > MAX_CMD_SQSIZE) {
DEBUG("FT1000:ft1000_ioctl: bad message length = %d\n", msgsz);
@@ -563,12 +574,11 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
if (!dpram_data)
break;
- if ( copy_from_user(dpram_data, argp, msgsz+2) ) {
+ if (copy_from_user(dpram_data, argp, msgsz+2)) {
DEBUG("FT1000:ft1000_ChIoctl: copy fault occurred\n");
result = -EFAULT;
- }
- else {
- // Check if this message came from a registered application
+ } else {
+ /* Check if this message came from a registered application */
for (i=0; i<MAX_NUM_APP; i++) {
if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
break;
@@ -582,28 +592,27 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
}
app_index = i;
- // Check message qtype type which is the lower byte within qos_class
+ /* Check message qtype type which is the lower byte within qos_class */
qtype = ntohs(dpram_data->pseudohdr.qos_class) & 0xff;
- //DEBUG("FT1000_ft1000_ioctl: qtype = %d\n", qtype);
+ /* DEBUG("FT1000_ft1000_ioctl: qtype = %d\n", qtype); */
if (qtype) {
- }
- else {
- // Put message into Slow Queue
- // Only put a message into the DPRAM if msg doorbell is available
+ } else {
+ /* Put message into Slow Queue */
+ /* Only put a message into the DPRAM if msg doorbell is available */
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
- //DEBUG("FT1000_ft1000_ioctl: READ REGISTER tempword=%x\n", tempword);
+ /* DEBUG("FT1000_ft1000_ioctl: READ REGISTER tempword=%x\n", tempword); */
if (tempword & FT1000_DB_DPRAM_TX) {
- // Suspend for 2ms and try again due to DSP doorbell busy
+ /* Suspend for 2ms and try again due to DSP doorbell busy */
mdelay(2);
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
- // Suspend for 1ms and try again due to DSP doorbell busy
+ /* Suspend for 1ms and try again due to DSP doorbell busy */
mdelay(1);
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
- // Suspend for 3ms and try again due to DSP doorbell busy
+ /* Suspend for 3ms and try again due to DSP doorbell busy */
mdelay(3);
status = ft1000_read_register(ft1000dev, &tempword, FT1000_REG_DOORBELL);
if (tempword & FT1000_DB_DPRAM_TX) {
@@ -617,11 +626,11 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
}
}
- //DEBUG("FT1000_ft1000_ioctl: finished reading register\n");
+ /*DEBUG("FT1000_ft1000_ioctl: finished reading register\n"); */
- // Make sure we are within the limits of the slow queue memory limitation
- if ( (msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ) ) {
- // Need to put sequence number plus new checksum for message
+ /* Make sure we are within the limits of the slow queue memory limitation */
+ if ((msgsz < MAX_CMD_SQSIZE) && (msgsz > PSEUDOSZ)) {
+ /* Need to put sequence number plus new checksum for message */
pmsg = (u16 *)&dpram_data->pseudohdr;
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
total_len = msgsz+2;
@@ -629,15 +638,15 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
total_len++;
}
- // Insert slow queue sequence number
+ /* Insert slow queue sequence number */
ppseudo_hdr->seq_num = info->squeseqnum++;
ppseudo_hdr->portsrc = ft1000dev->app_info[app_index].app_id;
- // Calculate new checksum
+ /* Calculate new checksum */
ppseudo_hdr->checksum = *pmsg++;
- //DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum);
+ /* DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum); */
for (i=1; i<7; i++) {
ppseudo_hdr->checksum ^= *pmsg++;
- //DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum);
+ /* DEBUG("checksum = 0x%x\n", ppseudo_hdr->checksum); */
}
pmsg++;
ppseudo_hdr = (struct pseudo_hdr *)pmsg;
@@ -645,14 +654,12 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
ft1000dev->app_info[app_index].nTxMsg++;
- }
- else {
+ } else {
result = -EINVAL;
}
}
}
- }
- else {
+ } else {
DEBUG("FT1000:ft1000_ioctl: Card not ready take messages\n");
result = -EACCES;
}
@@ -666,21 +673,21 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
IOCTL_DPRAM_BLK __user *pioctl_dpram;
int msglen;
- //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM called\n");
+ /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM called\n"); */
if (ft1000_flarion_cnt == 0) {
return (-EBADF);
}
- // Search for matching file object
+ /* Search for matching file object */
for (i=0; i<MAX_NUM_APP; i++) {
if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
- //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id);
+ /*DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
break;
}
}
- // Could not find application info block
+ /* Could not find application info block */
if (i == MAX_NUM_APP) {
DEBUG("FT1000:ft1000_ioctl:Could not find application info block\n");
result = -EBADF;
@@ -690,30 +697,29 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
result = 0;
pioctl_dpram = argp;
if (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
- //DEBUG("FT1000:ft1000_ioctl:Message detected in slow queue\n");
+ /* DEBUG("FT1000:ft1000_ioctl:Message detected in slow queue\n"); */
spin_lock_irqsave(&free_buff_lock, flags);
pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
list_del(&pdpram_blk->list);
ft1000dev->app_info[i].NumOfMsg--;
- //DEBUG("FT1000:ft1000_ioctl:NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg);
+ /* DEBUG("FT1000:ft1000_ioctl:NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg); */
spin_unlock_irqrestore(&free_buff_lock, flags);
msglen = ntohs(*(u16 *)pdpram_blk->pbuffer) + PSEUDOSZ;
result = get_user(msglen, &pioctl_dpram->total_len);
if (result)
break;
msglen = htons(msglen);
- //DEBUG("FT1000:ft1000_ioctl:msg length = %x\n", msglen);
- if(copy_to_user (&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen))
- {
+ /* DEBUG("FT1000:ft1000_ioctl:msg length = %x\n", msglen); */
+ if (copy_to_user (&pioctl_dpram->pseudohdr, pdpram_blk->pbuffer, msglen)) {
DEBUG("FT1000:ft1000_ioctl: copy fault occurred\n");
- result = -EFAULT;
- break;
+ result = -EFAULT;
+ break;
}
ft1000_free_buffer(pdpram_blk, &freercvpool);
result = msglen;
}
- //DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM no message\n");
+ /* DEBUG("FT1000:ft1000_ioctl: IOCTL_FT1000_GET_DPRAM no message\n"); */
}
break;
@@ -726,17 +732,19 @@ static long ft1000_ioctl (struct file *file, unsigned int command,
return result;
}
-//---------------------------------------------------------------------------
-// Function: ft1000_release
-//
-// Parameters:
-//
-// Description:
-//
-// Notes:
-//
-//---------------------------------------------------------------------------
-static int ft1000_release (struct inode *inode, struct file *file)
+/*
+*---------------------------------------------------------------------------
+* Function: ft1000_release
+*
+* Parameters:
+*
+* Description:
+*
+* Notes:
+*
+*---------------------------------------------------------------------------
+*/
+static int ft1000_release(struct inode *inode, struct file *file)
{
struct ft1000_info *info;
struct net_device *dev;
@@ -755,10 +763,10 @@ static int ft1000_release (struct inode *inode, struct file *file)
return (-EBADF);
}
- // Search for matching file object
+ /* Search for matching file object */
for (i=0; i<MAX_NUM_APP; i++) {
- if ( ft1000dev->app_info[i].fileobject == &file->f_owner) {
- //DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id);
+ if (ft1000dev->app_info[i].fileobject == &file->f_owner) {
+ /* DEBUG("FT1000:ft1000_ioctl: Message is for AppId = %d\n", ft1000dev->app_info[i].app_id); */
break;
}
}
@@ -773,11 +781,10 @@ static int ft1000_release (struct inode *inode, struct file *file)
ft1000_free_buffer(pdpram_blk, &freercvpool);
}
- // initialize application information
+ /* initialize application information */
ft1000dev->appcnt--;
DEBUG("ft1000_chdev:%s:appcnt = %d\n", __FUNCTION__, ft1000dev->appcnt);
ft1000dev->app_info[i].fileobject = NULL;
return 0;
}
-
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
index 3f4207f..24b8d77 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
@@ -1,91 +1,89 @@
-//---------------------------------------------------------------------------
-// FT1000 driver for Flarion Flash OFDM NIC Device
-//
-// Copyright (C) 2002 Flarion Technologies, All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License as published by the Free
-// Software Foundation; either version 2 of the License, or (at your option) any
-// later version. This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-// more details. You should have received a copy of the GNU General Public
-// License along with this program; if not, write to the
-// Free Software Foundation, Inc., 59 Temple Place -
-// Suite 330, Boston, MA 02111-1307, USA.
-//---------------------------------------------------------------------------
-//
-// File: ft1000_ioctl.h
-//
-// Description: Common structures and defines relating to IOCTL
-//
-// History:
-// 11/5/02 Whc Created.
-//
-//---------------------------------------------------------------------------//---------------------------------------------------------------------------
+/*
+*---------------------------------------------------------------------------
+* FT1000 driver for Flarion Flash OFDM NIC Device
+*
+* Copyright (C) 2002 Flarion Technologies, All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation; either version 2 of the License, or (at your option) any
+* later version. This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details. You should have received a copy of the GNU General Public
+* License along with this program; if not, write to the
+* Free Software Foundation, Inc., 59 Temple Place -
+* Suite 330, Boston, MA 02111-1307, USA.
+*---------------------------------------------------------------------------
+*
+* File: ft1000_ioctl.h
+*
+* Description: Common structures and defines relating to IOCTL
+*
+* History:
+* 11/5/02 Whc Created.
+*
+*---------------------------------------------------------------------------//---------------------------------------------------------------------------
+*/
#ifndef _FT1000IOCTLH_
#define _FT1000IOCTLH_
-typedef struct _IOCTL_GET_VER
-{
+typedef struct _IOCTL_GET_VER {
unsigned long drv_ver;
} __attribute__ ((packed)) IOCTL_GET_VER, *PIOCTL_GET_VER;
-//Data structure for Dsp statistics
-typedef struct _IOCTL_GET_DSP_STAT
-{
- unsigned char DspVer[DSPVERSZ]; // DSP version number
- unsigned char HwSerNum[HWSERNUMSZ]; // Hardware Serial Number
- unsigned char Sku[SKUSZ]; // SKU
- unsigned char eui64[EUISZ]; // EUI64
- unsigned short ConStat; // Connection Status
- // Bits 0-3 = Connection Status Field
- // 0000=Idle (Disconnect)
- // 0001=Searching
- // 0010=Active (Connected)
- // 0011=Waiting for L2 down
- // 0100=Sleep
- unsigned short LedStat; // Led Status
- // Bits 0-3 = Signal Strength Field
- // 0000 = -105dBm to -92dBm
- // 0001 = -92dBm to -85dBm
- // 0011 = -85dBm to -75dBm
- // 0111 = -75dBm to -50dBm
- // 1111 = -50dBm to 0dBm
- // Bits 4-7 = Reserved
- // Bits 8-11 = SNR Field
- // 0000 = <2dB
- // 0001 = 2dB to 8dB
- // 0011 = 8dB to 15dB
- // 0111 = 15dB to 22dB
- // 1111 = >22dB
- // Bits 12-15 = Reserved
- unsigned long nTxPkts; // Number of packets transmitted from host to dsp
- unsigned long nRxPkts; // Number of packets received from dsp to host
- unsigned long nTxBytes; // Number of bytes transmitted from host to dsp
- unsigned long nRxBytes; // Number of bytes received from dsp to host
- unsigned long ConTm; // Current session connection time in seconds
- unsigned char CalVer[CALVERSZ]; // Proprietary Calibration Version
- unsigned char CalDate[CALDATESZ]; // Proprietary Calibration Date
+/* Data structure for Dsp statistics */
+typedef struct _IOCTL_GET_DSP_STAT {
+ unsigned char DspVer[DSPVERSZ]; /* DSP version number */
+ unsigned char HwSerNum[HWSERNUMSZ]; /* Hardware Serial Number */
+ unsigned char Sku[SKUSZ]; /* SKU */
+ unsigned char eui64[EUISZ]; /* EUI64 */
+ unsigned short ConStat; /* Connection Status */
+ /* Bits 0-3 = Connection Status Field */
+ /* 0000=Idle (Disconnect) */
+ /* 0001=Searching */
+ /* 0010=Active (Connected) */
+ /* 0011=Waiting for L2 down */
+ /* 0100=Sleep */
+ unsigned short LedStat; /* Led Status */
+ /* Bits 0-3 = Signal Strength Field */
+ /* 0000 = -105dBm to -92dBm */
+ /* 0001 = -92dBm to -85dBm */
+ /* 0011 = -85dBm to -75dBm */
+ /* 0111 = -75dBm to -50dBm */
+ /* 1111 = -50dBm to 0dBm */
+ /* Bits 4-7 = Reserved */
+ /* Bits 8-11 = SNR Field */
+ /* 0000 = <2dB */
+ /* 0001 = 2dB to 8dB */
+ /* 0011 = 8dB to 15dB */
+ /* 0111 = 15dB to 22dB */
+ /* 1111 = >22dB */
+ /* Bits 12-15 = Reserved */
+ unsigned long nTxPkts; /* Number of packets transmitted from host to dsp */
+ unsigned long nRxPkts; /* Number of packets received from dsp to host */
+ unsigned long nTxBytes; /* Number of bytes transmitted from host to dsp */
+ unsigned long nRxBytes; /* Number of bytes received from dsp to host */
+ unsigned long ConTm; /* Current session connection time in seconds */
+ unsigned char CalVer[CALVERSZ]; /* Proprietary Calibration Version */
+ unsigned char CalDate[CALDATESZ]; /* Proprietary Calibration Date */
} __attribute__ ((packed)) IOCTL_GET_DSP_STAT, *PIOCTL_GET_DSP_STAT;
-//Data structure for Dual Ported RAM messaging between Host and Dsp
-typedef struct _IOCTL_DPRAM_BLK
-{
+/* Data structure for Dual Ported RAM messaging between Host and Dsp */
+typedef struct _IOCTL_DPRAM_BLK {
unsigned short total_len;
struct pseudo_hdr pseudohdr;
unsigned char buffer[1780];
} __attribute__ ((packed)) IOCTL_DPRAM_BLK, *PIOCTL_DPRAM_BLK;
-typedef struct _IOCTL_DPRAM_COMMAND
-{
+typedef struct _IOCTL_DPRAM_COMMAND {
unsigned short extra;
IOCTL_DPRAM_BLK dpram_blk;
} __attribute__ ((packed)) IOCTL_DPRAM_COMMAND, *PIOCTL_DPRAM_COMMAND;
-//
-// Custom IOCTL command codes
-//
+/*
+* Custom IOCTL command codes
+*/
#define FT1000_MAGIC_CODE 'F'
#define IOCTL_REGISTER_CMD 0
@@ -96,12 +94,12 @@ typedef struct _IOCTL_DPRAM_COMMAND
#define IOCTL_CONNECT 10
#define IOCTL_DISCONNECT 11
-#define IOCTL_FT1000_GET_DSP_STAT _IOR (FT1000_MAGIC_CODE, IOCTL_GET_DSP_STAT_CMD, sizeof(IOCTL_GET_DSP_STAT) )
-#define IOCTL_FT1000_GET_VER _IOR (FT1000_MAGIC_CODE, IOCTL_GET_VER_CMD, sizeof(IOCTL_GET_VER) )
-#define IOCTL_FT1000_CONNECT _IOW (FT1000_MAGIC_CODE, IOCTL_CONNECT, 0 )
-#define IOCTL_FT1000_DISCONNECT _IOW (FT1000_MAGIC_CODE, IOCTL_DISCONNECT, 0 )
-#define IOCTL_FT1000_SET_DPRAM _IOW (FT1000_MAGIC_CODE, IOCTL_SET_DPRAM_CMD, sizeof(IOCTL_DPRAM_BLK) )
-#define IOCTL_FT1000_GET_DPRAM _IOR (FT1000_MAGIC_CODE, IOCTL_GET_DPRAM_CMD, sizeof(IOCTL_DPRAM_BLK) )
-#define IOCTL_FT1000_REGISTER _IOW (FT1000_MAGIC_CODE, IOCTL_REGISTER_CMD, sizeof(unsigned short *) )
-#endif // _FT1000IOCTLH_
+#define IOCTL_FT1000_GET_DSP_STAT _IOR(FT1000_MAGIC_CODE, IOCTL_GET_DSP_STAT_CMD, sizeof(IOCTL_GET_DSP_STAT)
+#define IOCTL_FT1000_GET_VER _IOR(FT1000_MAGIC_CODE, IOCTL_GET_VER_CMD, sizeof(IOCTL_GET_VER)
+#define IOCTL_FT1000_CONNECT _IOW(FT1000_MAGIC_CODE, IOCTL_CONNECT, 0
+#define IOCTL_FT1000_DISCONNECT _IOW(FT1000_MAGIC_CODE, IOCTL_DISCONNECT, 0
+#define IOCTL_FT1000_SET_DPRAM _IOW(FT1000_MAGIC_CODE, IOCTL_SET_DPRAM_CMD, sizeof(IOCTL_DPRAM_BLK)
+#define IOCTL_FT1000_GET_DPRAM _IOR(FT1000_MAGIC_CODE, IOCTL_GET_DPRAM_CMD, sizeof(IOCTL_DPRAM_BLK)
+#define IOCTL_FT1000_REGISTER _IOW(FT1000_MAGIC_CODE, IOCTL_REGISTER_CMD, sizeof(unsigned short *)
+#endif /* _FT1000IOCTLH_ */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
index eca6f02..5ead942 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_proc.c
@@ -166,7 +166,7 @@ static const struct file_operations ft1000_proc_fops = {
static int
ft1000NotifyProc(struct notifier_block *this, unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct ft1000_info *info;
struct proc_dir_entry *ft1000_proc_file;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index 614db55..29a7cd2 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -79,8 +79,12 @@ static int ft1000_probe(struct usb_interface *interface,
ft1000dev->dev = dev;
ft1000dev->status = 0;
ft1000dev->net = NULL;
- ft1000dev->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
- ft1000dev->rx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ ft1000dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ ft1000dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ft1000dev->tx_urb || !ft1000dev->rx_urb) {
+ ret = -ENOMEM;
+ goto err_fw;
+ }
DEBUG("ft1000_probe is called\n");
numaltsetting = interface->num_altsetting;
@@ -209,6 +213,8 @@ err_thread:
err_load:
kfree(pFileStart);
err_fw:
+ usb_free_urb(ft1000dev->rx_urb);
+ usb_free_urb(ft1000dev->tx_urb);
kfree(ft1000dev);
return ret;
}
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index e5818a1..ff92f34 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -18,6 +18,8 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/device.h>
@@ -101,13 +103,16 @@ struct fwtty_transaction {
};
#define to_device(a, b) (a->b)
-#define fwtty_err(p, s, v...) dev_err(to_device(p, device), s, ##v)
-#define fwtty_info(p, s, v...) dev_info(to_device(p, device), s, ##v)
-#define fwtty_notice(p, s, v...) dev_notice(to_device(p, device), s, ##v)
-#define fwtty_dbg(p, s, v...) \
- dev_dbg(to_device(p, device), "%s: " s, __func__, ##v)
-#define fwtty_err_ratelimited(p, s, v...) \
- dev_err_ratelimited(to_device(p, device), s, ##v)
+#define fwtty_err(p, fmt, ...) \
+ dev_err(to_device(p, device), fmt, ##__VA_ARGS__)
+#define fwtty_info(p, fmt, ...) \
+ dev_info(to_device(p, device), fmt, ##__VA_ARGS__)
+#define fwtty_notice(p, fmt, ...) \
+ dev_notice(to_device(p, device), fmt, ##__VA_ARGS__)
+#define fwtty_dbg(p, fmt, ...) \
+ dev_dbg(to_device(p, device), "%s: " fmt, __func__, ##__VA_ARGS__)
+#define fwtty_err_ratelimited(p, fmt, ...) \
+ dev_err_ratelimited(to_device(p, device), fmt, ##__VA_ARGS__)
#ifdef DEBUG
static inline void debug_short_write(struct fwtty_port *port, int c, int n)
@@ -118,7 +123,7 @@ static inline void debug_short_write(struct fwtty_port *port, int c, int n)
spin_lock_bh(&port->lock);
avail = dma_fifo_avail(&port->tx_fifo);
spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "short write: avail:%d req:%d wrote:%d",
+ fwtty_dbg(port, "short write: avail:%d req:%d wrote:%d\n",
avail, c, n);
}
}
@@ -197,22 +202,22 @@ static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
{
switch (rcode) {
case RCODE_SEND_ERROR:
- fwtty_err_ratelimited(port, "card busy");
+ fwtty_err_ratelimited(port, "card busy\n");
break;
case RCODE_ADDRESS_ERROR:
- fwtty_err_ratelimited(port, "bad unit addr or write length");
+ fwtty_err_ratelimited(port, "bad unit addr or write length\n");
break;
case RCODE_DATA_ERROR:
- fwtty_err_ratelimited(port, "failed rx");
+ fwtty_err_ratelimited(port, "failed rx\n");
break;
case RCODE_NO_ACK:
- fwtty_err_ratelimited(port, "missing ack");
+ fwtty_err_ratelimited(port, "missing ack\n");
break;
case RCODE_BUSY:
- fwtty_err_ratelimited(port, "remote busy");
+ fwtty_err_ratelimited(port, "remote busy\n");
break;
default:
- fwtty_err_ratelimited(port, "failed tx: %d", rcode);
+ fwtty_err_ratelimited(port, "failed tx: %d\n", rcode);
}
}
@@ -287,7 +292,7 @@ static void __fwtty_restart_tx(struct fwtty_port *port)
schedule_delayed_work(&port->drain, 0);
avail = dma_fifo_avail(&port->tx_fifo);
- fwtty_dbg(port, "fifo len: %d avail: %d", len, avail);
+ fwtty_dbg(port, "fifo len: %d avail: %d\n", len, avail);
}
static void fwtty_restart_tx(struct fwtty_port *port)
@@ -323,7 +328,7 @@ static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
if (delta & TIOCM_CTS)
++port->icount.cts;
- fwtty_dbg(port, "status: %x delta: %x", status, delta);
+ fwtty_dbg(port, "status: %x delta: %x\n", status, delta);
if (delta & TIOCM_CAR) {
tty = tty_port_tty_get(&port->port);
@@ -509,7 +514,7 @@ static void fwtty_emit_breaks(struct work_struct *work)
n = (elapsed * port->cps) / HZ + 1;
port->break_last = now;
- fwtty_dbg(port, "sending %d brks", n);
+ fwtty_dbg(port, "sending %d brks\n", n);
while (n) {
t = min(n, 16);
@@ -570,7 +575,7 @@ static int fwtty_buffer_rx(struct fwtty_port *port, unsigned char *d, size_t n)
size_t size = (n + sizeof(struct buffered_rx) + 0xFF) & ~0xFF;
if (port->buffered + n > HIGH_WATERMARK) {
- fwtty_err_ratelimited(port, "overflowed rx buffer: buffered: %d new: %zu wtrmk: %d",
+ fwtty_err_ratelimited(port, "overflowed rx buffer: buffered: %d new: %zu wtrmk: %d\n",
port->buffered, n, HIGH_WATERMARK);
return 0;
}
@@ -599,7 +604,7 @@ static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
unsigned lsr;
int err = 0;
- fwtty_dbg(port, "%d", n);
+ fwtty_dbg(port, "%d\n", n);
profile_size_distrib(port->stats.reads, n);
if (port->write_only) {
@@ -689,7 +694,7 @@ static void fwtty_port_handler(struct fw_card *card,
rcu_read_unlock();
if (!peer || peer != rcu_access_pointer(port->peer)) {
rcode = RCODE_ADDRESS_ERROR;
- fwtty_err_ratelimited(port, "ignoring unauthenticated data");
+ fwtty_err_ratelimited(port, "ignoring unauthenticated data\n");
goto respond;
}
@@ -746,7 +751,7 @@ static void fwtty_tx_complete(struct fw_card *card, int rcode,
struct fwtty_port *port = txn->port;
int len;
- fwtty_dbg(port, "rcode: %d", rcode);
+ fwtty_dbg(port, "rcode: %d\n", rcode);
switch (rcode) {
case RCODE_COMPLETE:
@@ -809,7 +814,7 @@ static int fwtty_tx(struct fwtty_port *port, bool drain)
n = dma_fifo_out_pend(&port->tx_fifo, &txn->dma_pended);
spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "out: %u rem: %d", txn->dma_pended.len, n);
+ fwtty_dbg(port, "out: %u rem: %d\n", txn->dma_pended.len, n);
if (n < 0) {
kmem_cache_free(fwtty_txn_cache, txn);
@@ -819,7 +824,8 @@ static int fwtty_tx(struct fwtty_port *port, bool drain)
profile_size_distrib(port->stats.txns, 0);
else {
++port->stats.fifo_errs;
- fwtty_err_ratelimited(port, "fifo err: %d", n);
+ fwtty_err_ratelimited(port, "fifo err: %d\n",
+ n);
}
break;
}
@@ -877,7 +883,7 @@ static void fwtty_write_xchar(struct fwtty_port *port, char ch)
++port->stats.xchars;
- fwtty_dbg(port, "%02x", ch);
+ fwtty_dbg(port, "%02x\n", ch);
rcu_read_lock();
peer = rcu_dereference(port->peer);
@@ -964,7 +970,7 @@ static void fwtty_port_dtr_rts(struct tty_port *tty_port, int on)
{
struct fwtty_port *port = to_port(tty_port, port);
- fwtty_dbg(port, "on/off: %d", on);
+ fwtty_dbg(port, "on/off: %d\n", on);
spin_lock_bh(&port->lock);
/* Don't change carrier state if this is a console */
@@ -992,7 +998,7 @@ static int fwtty_port_carrier_raised(struct tty_port *tty_port)
rc = (port->mstatus & TIOCM_CAR);
- fwtty_dbg(port, "%d", rc);
+ fwtty_dbg(port, "%d\n", rc);
return rc;
}
@@ -1177,7 +1183,7 @@ static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c)
struct fwtty_port *port = tty->driver_data;
int n, len;
- fwtty_dbg(port, "%d", c);
+ fwtty_dbg(port, "%d\n", c);
profile_size_distrib(port->stats.writes, c);
spin_lock_bh(&port->lock);
@@ -1204,7 +1210,7 @@ static int fwtty_write_room(struct tty_struct *tty)
n = dma_fifo_avail(&port->tx_fifo);
spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "%d", n);
+ fwtty_dbg(port, "%d\n", n);
return n;
}
@@ -1218,7 +1224,7 @@ static int fwtty_chars_in_buffer(struct tty_struct *tty)
n = dma_fifo_level(&port->tx_fifo);
spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "%d", n);
+ fwtty_dbg(port, "%d\n", n);
return n;
}
@@ -1227,7 +1233,7 @@ static void fwtty_send_xchar(struct tty_struct *tty, char ch)
{
struct fwtty_port *port = tty->driver_data;
- fwtty_dbg(port, "%02x", ch);
+ fwtty_dbg(port, "%02x\n", ch);
fwtty_write_xchar(port, ch);
}
@@ -1254,7 +1260,7 @@ static void fwtty_unthrottle(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
- fwtty_dbg(port, "CRTSCTS: %d", (C_CRTSCTS(tty) != 0));
+ fwtty_dbg(port, "CRTSCTS: %d\n", (C_CRTSCTS(tty) != 0));
profile_fifo_avail(port, port->stats.unthrottle);
@@ -1409,7 +1415,7 @@ static int fwtty_break_ctl(struct tty_struct *tty, int state)
struct fwtty_port *port = tty->driver_data;
long ret;
- fwtty_dbg(port, "%d", state);
+ fwtty_dbg(port, "%d\n", state);
if (state == -1) {
set_bit(STOP_TX, &port->flags);
@@ -1446,7 +1452,7 @@ static int fwtty_tiocmget(struct tty_struct *tty)
tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK);
spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "%x", tiocm);
+ fwtty_dbg(port, "%x\n", tiocm);
return tiocm;
}
@@ -1455,7 +1461,7 @@ static int fwtty_tiocmset(struct tty_struct *tty, unsigned set, unsigned clear)
{
struct fwtty_port *port = tty->driver_data;
- fwtty_dbg(port, "set: %x clear: %x", set, clear);
+ fwtty_dbg(port, "set: %x clear: %x\n", set, clear);
/* TODO: simulate loopback if TIOCM_LOOP set */
@@ -1775,7 +1781,7 @@ static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
if (port->port.console && port->fwcon_ops->notify != NULL)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data);
- fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s",
+ fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
(unsigned long long)peer->guid, dev_name(port->device));
}
@@ -1797,7 +1803,7 @@ static inline int fwserial_send_mgmt_sync(struct fwtty_peer *peer,
pkt, be16_to_cpu(pkt->hdr.len));
if (rcode == RCODE_BUSY || rcode == RCODE_SEND_ERROR ||
rcode == RCODE_GENERATION) {
- fwtty_dbg(&peer->unit, "mgmt write error: %d", rcode);
+ fwtty_dbg(&peer->unit, "mgmt write error: %d\n", rcode);
continue;
} else
break;
@@ -1918,7 +1924,7 @@ static int fwserial_connect_peer(struct fwtty_peer *peer)
port = fwserial_find_port(peer);
if (!port) {
- fwtty_err(&peer->unit, "avail ports in use");
+ fwtty_err(&peer->unit, "avail ports in use\n");
err = -EBUSY;
goto free_pkt;
}
@@ -2056,7 +2062,7 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
* has created its remote unit device before this driver has
* been probed for any unit devices...
*/
- fwtty_err(card, "unknown card (guid %016llx)",
+ fwtty_err(card, "unknown card (guid %016llx)\n",
(unsigned long long) card->guid);
return NULL;
}
@@ -2084,8 +2090,8 @@ static void __dump_peer_list(struct fw_card *card)
list_for_each_entry_rcu(peer, &serial->peer_list, list) {
int g = peer->generation;
smp_rmb();
- fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n", g,
- peer->node_id, (unsigned long long) peer->guid);
+ fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n",
+ g, peer->node_id, (unsigned long long) peer->guid);
}
}
#else
@@ -2173,7 +2179,7 @@ static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
peer->serial = serial;
list_add_rcu(&peer->list, &serial->peer_list);
- fwtty_info(&peer->unit, "peer added (guid:%016llx)",
+ fwtty_info(&peer->unit, "peer added (guid:%016llx)\n",
(unsigned long long)peer->guid);
/* identify the local unit & virt cable to loopback port */
@@ -2236,7 +2242,7 @@ static void fwserial_remove_peer(struct fwtty_peer *peer)
list_del_rcu(&peer->list);
- fwtty_info(&peer->unit, "peer removed (guid:%016llx)",
+ fwtty_info(&peer->unit, "peer removed (guid:%016llx)\n",
(unsigned long long)peer->guid);
spin_unlock_bh(&peer->lock);
@@ -2324,7 +2330,7 @@ static int fwserial_create(struct fw_unit *unit)
err = fwtty_ports_add(serial);
if (err) {
- fwtty_err(&unit, "no space in port table");
+ fwtty_err(&unit, "no space in port table\n");
goto free_ports;
}
@@ -2335,7 +2341,8 @@ static int fwserial_create(struct fw_unit *unit)
card->device);
if (IS_ERR(tty_dev)) {
err = PTR_ERR(tty_dev);
- fwtty_err(&unit, "register tty device error (%d)", err);
+ fwtty_err(&unit, "register tty device error (%d)\n",
+ err);
goto unregister_ttys;
}
@@ -2352,7 +2359,8 @@ static int fwserial_create(struct fw_unit *unit)
card->device);
if (IS_ERR(loop_dev)) {
err = PTR_ERR(loop_dev);
- fwtty_err(&unit, "create loop device failed (%d)", err);
+ fwtty_err(&unit, "create loop device failed (%d)\n",
+ err);
goto unregister_ttys;
}
serial->ports[j]->device = loop_dev;
@@ -2372,14 +2380,14 @@ static int fwserial_create(struct fw_unit *unit)
list_add_rcu(&serial->list, &fwserial_list);
- fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)",
+ fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)\n",
dev_name(card->device), (unsigned long long) card->guid);
err = fwserial_add_peer(serial, unit);
if (!err)
return 0;
- fwtty_err(&unit, "unable to add peer unit device (%d)", err);
+ fwtty_err(&unit, "unable to add peer unit device (%d)\n", err);
/* fall-through to error processing */
debugfs_remove_recursive(serial->debugfs);
@@ -2438,9 +2446,9 @@ free_ports:
* last peer for a given fw_card triggering the destruction of the same
* fw_serial for the same fw_card.
*/
-static int fwserial_probe(struct device *dev)
+static int fwserial_probe(struct fw_unit *unit,
+ const struct ieee1394_device_id *id)
{
- struct fw_unit *unit = fw_unit(dev);
struct fw_serial *serial;
int err;
@@ -2462,9 +2470,9 @@ static int fwserial_probe(struct device *dev)
* specific fw_card). If this is the last peer being removed, then trigger
* the destruction of the underlying TTYs.
*/
-static int fwserial_remove(struct device *dev)
+static void fwserial_remove(struct fw_unit *unit)
{
- struct fwtty_peer *peer = dev_get_drvdata(dev);
+ struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
struct fw_serial *serial = peer->serial;
int i;
@@ -2484,8 +2492,6 @@ static int fwserial_remove(struct device *dev)
kref_put(&serial->kref, fwserial_destroy);
}
mutex_unlock(&fwserial_list_mutex);
-
- return 0;
}
/**
@@ -2530,10 +2536,10 @@ static struct fw_driver fwserial_driver = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.bus = &fw_bus_type,
- .probe = fwserial_probe,
- .remove = fwserial_remove,
},
+ .probe = fwserial_probe,
.update = fwserial_update,
+ .remove = fwserial_remove,
.id_table = fwserial_id_table,
};
@@ -2621,7 +2627,7 @@ static void fwserial_handle_plug_req(struct work_struct *work)
switch (peer->state) {
case FWPS_NOT_ATTACHED:
if (!port) {
- fwtty_err(&peer->unit, "no more ports avail");
+ fwtty_err(&peer->unit, "no more ports avail\n");
fill_plug_rsp_nack(pkt);
} else {
peer->port = port;
@@ -2663,7 +2669,7 @@ static void fwserial_handle_plug_req(struct work_struct *work)
fwtty_write_port_status(tmp);
spin_lock_bh(&peer->lock);
} else {
- fwtty_err(&peer->unit, "PLUG_RSP error (%d)", rcode);
+ fwtty_err(&peer->unit, "PLUG_RSP error (%d)\n", rcode);
port = peer_revert_state(peer);
}
}
@@ -2715,7 +2721,8 @@ static void fwserial_handle_unplug_req(struct work_struct *work)
spin_lock_bh(&peer->lock);
if (peer->state == FWPS_UNPLUG_RESPONDING) {
if (rcode != RCODE_COMPLETE)
- fwtty_err(&peer->unit, "UNPLUG_RSP error (%d)", rcode);
+ fwtty_err(&peer->unit, "UNPLUG_RSP error (%d)\n",
+ rcode);
port = peer_revert_state(peer);
}
cleanup:
@@ -2750,19 +2757,19 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
* already removed from the bus -- and the removal was
* processed before we rec'd this transaction
*/
- fwtty_err(&peer->unit, "peer already removed");
+ fwtty_err(&peer->unit, "peer already removed\n");
spin_unlock_bh(&peer->lock);
return RCODE_ADDRESS_ERROR;
}
rcode = RCODE_COMPLETE;
- fwtty_dbg(&peer->unit, "mgmt: hdr.code: %04hx", pkt->hdr.code);
+ fwtty_dbg(&peer->unit, "mgmt: hdr.code: %04hx\n", pkt->hdr.code);
switch (be16_to_cpu(pkt->hdr.code) & FWSC_CODE_MASK) {
case FWSC_VIRT_CABLE_PLUG:
if (work_pending(&peer->work)) {
- fwtty_err(&peer->unit, "plug req: busy");
+ fwtty_err(&peer->unit, "plug req: busy\n");
rcode = RCODE_CONFLICT_ERROR;
} else {
@@ -2777,7 +2784,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
rcode = RCODE_CONFLICT_ERROR;
} else if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK) {
- fwtty_notice(&peer->unit, "NACK plug rsp");
+ fwtty_notice(&peer->unit, "NACK plug rsp\n");
port = peer_revert_state(peer);
} else {
@@ -2793,7 +2800,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
case FWSC_VIRT_CABLE_UNPLUG:
if (work_pending(&peer->work)) {
- fwtty_err(&peer->unit, "unplug req: busy");
+ fwtty_err(&peer->unit, "unplug req: busy\n");
rcode = RCODE_CONFLICT_ERROR;
} else {
PREPARE_WORK(&peer->work, fwserial_handle_unplug_req);
@@ -2806,14 +2813,14 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
rcode = RCODE_CONFLICT_ERROR;
else {
if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK)
- fwtty_notice(&peer->unit, "NACK unplug?");
+ fwtty_notice(&peer->unit, "NACK unplug?\n");
port = peer_revert_state(peer);
reset = true;
}
break;
default:
- fwtty_err(&peer->unit, "unknown mgmt code %d",
+ fwtty_err(&peer->unit, "unknown mgmt code %d\n",
be16_to_cpu(pkt->hdr.code));
rcode = RCODE_DATA_ERROR;
}
@@ -2847,7 +2854,7 @@ static void fwserial_mgmt_handler(struct fw_card *card,
rcu_read_lock();
peer = __fwserial_peer_by_node_id(card, generation, source);
if (!peer) {
- fwtty_dbg(card, "peer(%d:%x) not found", generation, source);
+ fwtty_dbg(card, "peer(%d:%x) not found\n", generation, source);
__dump_peer_list(card);
rcode = RCODE_CONFLICT_ERROR;
@@ -2897,7 +2904,7 @@ static int __init fwserial_init(void)
err = tty_register_driver(fwtty_driver);
if (err) {
- driver_err("register tty driver failed (%d)", err);
+ pr_err("register tty driver failed (%d)\n", err);
goto put_tty;
}
@@ -2922,7 +2929,7 @@ static int __init fwserial_init(void)
err = tty_register_driver(fwloop_driver);
if (err) {
- driver_err("register loop driver failed (%d)", err);
+ pr_err("register loop driver failed (%d)\n", err);
goto put_loop;
}
}
@@ -2948,7 +2955,7 @@ static int __init fwserial_init(void)
err = fw_core_add_address_handler(&fwserial_mgmt_addr_handler,
&fwserial_mgmt_addr_region);
if (err) {
- driver_err("add management handler failed (%d)", err);
+ pr_err("add management handler failed (%d)\n", err);
goto destroy_cache;
}
@@ -2956,13 +2963,13 @@ static int __init fwserial_init(void)
FW_UNIT_ADDRESS(fwserial_mgmt_addr_handler.offset);
err = fw_core_add_descriptor(&fwserial_unit_directory);
if (err) {
- driver_err("add unit descriptor failed (%d)", err);
+ pr_err("add unit descriptor failed (%d)\n", err);
goto remove_handler;
}
err = driver_register(&fwserial_driver.driver);
if (err) {
- driver_err("register fwserial driver failed (%d)", err);
+ pr_err("register fwserial driver failed (%d)\n", err);
goto remove_descriptor;
}
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 514f571..2463501 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -356,8 +356,6 @@ static const char loop_dev_name[] = "fwloop";
extern struct tty_driver *fwtty_driver;
-#define driver_err(s, v...) pr_err(KBUILD_MODNAME ": " s, ##v)
-
struct fwtty_port *fwtty_port_get(unsigned index);
void fwtty_port_put(struct fwtty_port *port);
diff --git a/drivers/staging/gdm724x/Kconfig b/drivers/staging/gdm724x/Kconfig
new file mode 100644
index 0000000..0a1f090
--- /dev/null
+++ b/drivers/staging/gdm724x/Kconfig
@@ -0,0 +1,15 @@
+#
+# GCT GDM724x LTE driver configuration
+#
+
+config LTE_GDM724X
+ tristate "GCT GDM724x LTE support"
+ depends on NET && USB && TTY && m
+ help
+ This driver supports GCT GDM724x LTE chip based USB modem devices.
+ It exposes 4 network devices to be used per PDN and 2 tty devices to be
+ used for AT commands and DM monitoring applications.
+ The modules will be called gdmulte.ko and gdmtty.ko
+
+ GCT-ATCx can be used for AT Commands
+ GCT-DMx can be used for LTE protocol monitoring
diff --git a/drivers/staging/gdm724x/Makefile b/drivers/staging/gdm724x/Makefile
new file mode 100644
index 0000000..ba7f11a
--- /dev/null
+++ b/drivers/staging/gdm724x/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_LTE_GDM724X) := gdmulte.o
+gdmulte-y += gdm_lte.o netlink_k.o
+gdmulte-y += gdm_usb.o gdm_endian.o
+
+obj-$(CONFIG_LTE_GDM724X) += gdmtty.o
+gdmtty-y := gdm_tty.o gdm_mux.o
+
diff --git a/drivers/staging/gdm724x/TODO b/drivers/staging/gdm724x/TODO
new file mode 100644
index 0000000..b2b571e
--- /dev/null
+++ b/drivers/staging/gdm724x/TODO
@@ -0,0 +1,16 @@
+TODO:
+- Clean up coding style to meet kernel standard. (80 line limit, netdev_err)
+- Remove test for host endian
+- Remove confusing macros (endian, hci_send, sdu_send, rcv_with_cb)
+- Fixes for every instances of function returning -1
+- Check for skb->len in gdm_lte_emulate_arp()
+- Use ALIGN() macro for dummy_cnt in up_to_host()
+- Error handling in init_usb()
+- Explain reason for multiples of 512 bytes in alloc_tx_struct()
+- Review use of atomic allocation for tx structs
+- No error checking for alloc_tx_struct in do_tx()
+- fix up static tty port allocation to be dynamic
+
+Patches to:
+ Jonathan Kim <jonathankim@gctsemi.com>
+ Dean ahn <deanahn@gctsemi.com>
diff --git a/drivers/staging/gdm724x/gdm_endian.c b/drivers/staging/gdm724x/gdm_endian.c
new file mode 100644
index 0000000..f6cc90a
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_endian.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "gdm_endian.h"
+
+void gdm_set_endian(struct gdm_endian *ed, u8 dev_endian)
+{
+ u8 a[2] = {0x12, 0x34};
+ u8 b[2] = {0, };
+ u16 c = 0x1234;
+
+ if (dev_endian == ENDIANNESS_BIG)
+ ed->dev_ed = ENDIANNESS_BIG;
+ else
+ ed->dev_ed = ENDIANNESS_LITTLE;
+
+ memcpy(b, &c, 2);
+
+ if (a[0] != b[0])
+ ed->host_ed = ENDIANNESS_LITTLE;
+ else
+ ed->host_ed = ENDIANNESS_BIG;
+
+}
+
+u16 gdm_cpu_to_dev16(struct gdm_endian *ed, u16 x)
+{
+ if (ed->dev_ed == ed->host_ed)
+ return x;
+
+ return Endian16_Swap(x);
+}
+
+u16 gdm_dev16_to_cpu(struct gdm_endian *ed, u16 x)
+{
+ if (ed->dev_ed == ed->host_ed)
+ return x;
+
+ return Endian16_Swap(x);
+}
+
+u32 gdm_cpu_to_dev32(struct gdm_endian *ed, u32 x)
+{
+ if (ed->dev_ed == ed->host_ed)
+ return x;
+
+ return Endian32_Swap(x);
+}
+
+u32 gdm_dev32_to_cpu(struct gdm_endian *ed, u32 x)
+{
+ if (ed->dev_ed == ed->host_ed)
+ return x;
+
+ return Endian32_Swap(x);
+}
diff --git a/drivers/staging/gdm724x/gdm_endian.h b/drivers/staging/gdm724x/gdm_endian.h
new file mode 100644
index 0000000..9b2531f
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_endian.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __GDM_ENDIAN_H__
+#define __GDM_ENDIAN_H__
+
+#include <linux/types.h>
+
+#define Endian16_Swap(value) \
+ ((((u16)((value) & 0x00FF)) << 8) | \
+ (((u16)((value) & 0xFF00)) >> 8))
+
+#define Endian32_Swap(value) \
+ ((((u32)((value) & 0x000000FF)) << 24) | \
+ (((u32)((value) & 0x0000FF00)) << 8) | \
+ (((u32)((value) & 0x00FF0000)) >> 8) | \
+ (((u32)((value) & 0xFF000000)) >> 24))
+
+enum {
+ ENDIANNESS_MIN = 0,
+ ENDIANNESS_UNKNOWN,
+ ENDIANNESS_LITTLE,
+ ENDIANNESS_BIG,
+ ENDIANNESS_MIDDLE,
+ ENDIANNESS_MAX
+};
+
+struct gdm_endian {
+ u8 dev_ed;
+ u8 host_ed;
+};
+
+void gdm_set_endian(struct gdm_endian *ed, u8 dev_endian);
+u16 gdm_cpu_to_dev16(struct gdm_endian *ed, u16 x);
+u16 gdm_dev16_to_cpu(struct gdm_endian *ed, u16 x);
+u32 gdm_cpu_to_dev32(struct gdm_endian *ed, u32 x);
+u32 gdm_dev32_to_cpu(struct gdm_endian *ed, u32 x);
+
+#endif /*__GDM_ENDIAN_H__*/
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
new file mode 100644
index 0000000..bc0d510
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -0,0 +1,877 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/in6.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/uaccess.h>
+#include <net/ndisc.h>
+
+#include "gdm_lte.h"
+#include "netlink_k.h"
+#include "hci.h"
+#include "hci_packet.h"
+#include "gdm_endian.h"
+
+/*
+ * Netlink protocol number
+ */
+#define NETLINK_LTE 30
+
+/*
+ * Default MTU Size
+ */
+#define DEFAULT_MTU_SIZE 1500
+
+#define gdm_dev_endian(n) (\
+ n->phy_dev->get_endian(n->phy_dev->priv_dev))
+
+#define gdm_lte_hci_send(n, d, l) (\
+ n->phy_dev->send_hci_func(n->phy_dev->priv_dev, d, l, NULL, NULL))
+
+#define gdm_lte_sdu_send(n, d, l, c, b, i, t) (\
+ n->phy_dev->send_sdu_func(n->phy_dev->priv_dev, d, l, n->pdn_table.dft_eps_id, 0, c, b, i, t))
+
+#define gdm_lte_rcv_with_cb(n, c, b, e) (\
+ n->rcv_func(n->priv_dev, c, b, e))
+
+#define IP_VERSION_4 4
+#define IP_VERSION_6 6
+
+static struct {
+ int ref_cnt;
+ struct sock *sock;
+} lte_event;
+
+static struct device_type wwan_type = {
+ .name = "wwan",
+};
+
+static int gdm_lte_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int gdm_lte_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int gdm_lte_set_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ return 0;
+}
+
+static void tx_complete(void *arg)
+{
+ struct nic *nic = arg;
+
+ if (netif_queue_stopped(nic->netdev))
+ netif_wake_queue(nic->netdev);
+}
+
+static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
+{
+ int ret;
+
+ ret = netif_rx_ni(skb);
+ if (ret == NET_RX_DROP) {
+ nic->stats.rx_dropped++;
+ } else {
+ nic->stats.rx_packets++;
+ nic->stats.rx_bytes += skb->len + ETH_HLEN;
+ }
+
+ return 0;
+}
+
+int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
+{
+ struct nic *nic = netdev_priv(skb_in->dev);
+ struct sk_buff *skb_out;
+ struct ethhdr eth;
+ struct vlan_ethhdr vlan_eth;
+ struct arphdr *arp_in;
+ struct arphdr *arp_out;
+ struct arpdata {
+ u8 ar_sha[ETH_ALEN];
+ u8 ar_sip[4];
+ u8 ar_tha[ETH_ALEN];
+ u8 ar_tip[4];
+ };
+ struct arpdata *arp_data_in;
+ struct arpdata *arp_data_out;
+ u8 arp_temp[60];
+ void *mac_header_data;
+ u32 mac_header_len;
+
+ /* Format the mac header so that it can be put to skb */
+ if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
+ memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
+ mac_header_data = &vlan_eth;
+ mac_header_len = VLAN_ETH_HLEN;
+ } else {
+ memcpy(&eth, skb_in->data, sizeof(struct ethhdr));
+ mac_header_data = &eth;
+ mac_header_len = ETH_HLEN;
+ }
+
+ /* Get the pointer of the original request */
+ arp_in = (struct arphdr *)(skb_in->data + mac_header_len);
+ arp_data_in = (struct arpdata *)(skb_in->data + mac_header_len + sizeof(struct arphdr));
+
+ /* Get the pointer of the outgoing response */
+ arp_out = (struct arphdr *)arp_temp;
+ arp_data_out = (struct arpdata *)(arp_temp + sizeof(struct arphdr));
+
+ /* Copy the arp header */
+ memcpy(arp_out, arp_in, sizeof(struct arphdr));
+ arp_out->ar_op = htons(ARPOP_REPLY);
+
+ /* Copy the arp payload: based on 2 bytes of mac and fill the IP */
+ arp_data_out->ar_sha[0] = arp_data_in->ar_sha[0];
+ arp_data_out->ar_sha[1] = arp_data_in->ar_sha[1];
+ memcpy(&arp_data_out->ar_sha[2], &arp_data_in->ar_tip[0], 4);
+ memcpy(&arp_data_out->ar_sip[0], &arp_data_in->ar_tip[0], 4);
+ memcpy(&arp_data_out->ar_tha[0], &arp_data_in->ar_sha[0], 6);
+ memcpy(&arp_data_out->ar_tip[0], &arp_data_in->ar_sip[0], 4);
+
+ /* Fill the destination mac with source mac of the received packet */
+ memcpy(mac_header_data, mac_header_data + ETH_ALEN, ETH_ALEN);
+ /* Fill the source mac with nic's source mac */
+ memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
+
+ /* Alloc skb and reserve align */
+ skb_out = dev_alloc_skb(skb_in->len);
+ if (!skb_out)
+ return -ENOMEM;
+ skb_reserve(skb_out, NET_IP_ALIGN);
+
+ memcpy(skb_put(skb_out, mac_header_len), mac_header_data, mac_header_len);
+ memcpy(skb_put(skb_out, sizeof(struct arphdr)), arp_out, sizeof(struct arphdr));
+ memcpy(skb_put(skb_out, sizeof(struct arpdata)), arp_data_out, sizeof(struct arpdata));
+
+ skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
+ skb_out->dev = skb_in->dev;
+ skb_reset_mac_header(skb_out);
+ skb_pull(skb_out, ETH_HLEN);
+
+ gdm_lte_rx(skb_out, nic, nic_type);
+
+ return 0;
+}
+
+int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
+{
+ unsigned short *w = ptr;
+ int sum = 0;
+ int i;
+
+ union {
+ struct {
+ u8 ph_src[16];
+ u8 ph_dst[16];
+ u32 ph_len;
+ u8 ph_zero[3];
+ u8 ph_nxt;
+ } ph __packed;
+ u16 pa[20];
+ } pseudo_header;
+
+ memset(&pseudo_header, 0, sizeof(pseudo_header));
+ memcpy(&pseudo_header.ph.ph_src, &ipv6->saddr.in6_u.u6_addr8, 16);
+ memcpy(&pseudo_header.ph.ph_dst, &ipv6->daddr.in6_u.u6_addr8, 16);
+ pseudo_header.ph.ph_len = ipv6->payload_len;
+ pseudo_header.ph.ph_nxt = ipv6->nexthdr;
+
+ w = (u16 *)&pseudo_header;
+ for (i = 0; i < sizeof(pseudo_header.pa) / sizeof(pseudo_header.pa[0]); i++)
+ sum += pseudo_header.pa[i];
+
+ w = ptr;
+ while (len > 1) {
+ sum += *w++;
+ len -= 2;
+ }
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ sum = ~sum & 0xffff;
+
+ return sum;
+}
+
+int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
+{
+ struct nic *nic = netdev_priv(skb_in->dev);
+ struct sk_buff *skb_out;
+ struct ethhdr eth;
+ struct vlan_ethhdr vlan_eth;
+ struct neighbour_advertisement {
+ u8 target_address[16];
+ u8 type;
+ u8 length;
+ u8 link_layer_address[6];
+ };
+ struct neighbour_advertisement na;
+ struct neighbour_solicitation {
+ u8 target_address[16];
+ };
+ struct neighbour_solicitation *ns;
+ struct ipv6hdr *ipv6_in;
+ struct ipv6hdr ipv6_out;
+ struct icmp6hdr *icmp6_in;
+ struct icmp6hdr icmp6_out;
+
+ void *mac_header_data;
+ u32 mac_header_len;
+
+ /* Format the mac header so that it can be put to skb */
+ if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
+ memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
+ if (ntohs(vlan_eth.h_vlan_encapsulated_proto) != ETH_P_IPV6)
+ return -1;
+ mac_header_data = &vlan_eth;
+ mac_header_len = VLAN_ETH_HLEN;
+ } else {
+ memcpy(&eth, skb_in->data, sizeof(struct ethhdr));
+ if (ntohs(eth.h_proto) != ETH_P_IPV6)
+ return -1;
+ mac_header_data = &eth;
+ mac_header_len = ETH_HLEN;
+ }
+
+ /* Check if this is IPv6 ICMP packet */
+ ipv6_in = (struct ipv6hdr *)(skb_in->data + mac_header_len);
+ if (ipv6_in->version != 6 || ipv6_in->nexthdr != IPPROTO_ICMPV6)
+ return -1;
+
+ /* Check if this is NDP packet */
+ icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len + sizeof(struct ipv6hdr));
+ if (icmp6_in->icmp6_type == NDISC_ROUTER_SOLICITATION) { /* Check RS */
+ return -1;
+ } else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { /* Check NS */
+ u8 icmp_na[sizeof(struct icmp6hdr) + sizeof(struct neighbour_advertisement)];
+ u8 zero_addr8[16] = {0,};
+
+ if (memcmp(ipv6_in->saddr.in6_u.u6_addr8, zero_addr8, 16) == 0)
+ /* Duplicate Address Detection: Source IP is all zero */
+ return 0;
+
+ icmp6_out.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+ icmp6_out.icmp6_code = 0;
+ icmp6_out.icmp6_cksum = 0;
+ icmp6_out.icmp6_dataun.un_data32[0] = htonl(0x60000000); /* R=0, S=1, O=1 */
+
+ ns = (struct neighbour_solicitation *)(skb_in->data + mac_header_len + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
+ memcpy(&na.target_address, ns->target_address, 16);
+ na.type = 0x02;
+ na.length = 1;
+ na.link_layer_address[0] = 0x00;
+ na.link_layer_address[1] = 0x0a;
+ na.link_layer_address[2] = 0x3b;
+ na.link_layer_address[3] = 0xaf;
+ na.link_layer_address[4] = 0x63;
+ na.link_layer_address[5] = 0xc7;
+
+ memcpy(&ipv6_out, ipv6_in, sizeof(struct ipv6hdr));
+ memcpy(ipv6_out.saddr.in6_u.u6_addr8, &na.target_address, 16);
+ memcpy(ipv6_out.daddr.in6_u.u6_addr8, ipv6_in->saddr.in6_u.u6_addr8, 16);
+ ipv6_out.payload_len = htons(sizeof(struct icmp6hdr) + sizeof(struct neighbour_advertisement));
+
+ memcpy(icmp_na, &icmp6_out, sizeof(struct icmp6hdr));
+ memcpy(icmp_na + sizeof(struct icmp6hdr), &na, sizeof(struct neighbour_advertisement));
+
+ icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out, (u16 *)icmp_na, sizeof(icmp_na));
+ } else {
+ return -1;
+ }
+
+ /* Fill the destination mac with source mac of the received packet */
+ memcpy(mac_header_data, mac_header_data + ETH_ALEN, ETH_ALEN);
+ /* Fill the source mac with nic's source mac */
+ memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
+
+ /* Alloc skb and reserve align */
+ skb_out = dev_alloc_skb(skb_in->len);
+ if (!skb_out)
+ return -ENOMEM;
+ skb_reserve(skb_out, NET_IP_ALIGN);
+
+ memcpy(skb_put(skb_out, mac_header_len), mac_header_data, mac_header_len);
+ memcpy(skb_put(skb_out, sizeof(struct ipv6hdr)), &ipv6_out, sizeof(struct ipv6hdr));
+ memcpy(skb_put(skb_out, sizeof(struct icmp6hdr)), &icmp6_out, sizeof(struct icmp6hdr));
+ memcpy(skb_put(skb_out, sizeof(struct neighbour_advertisement)), &na, sizeof(struct neighbour_advertisement));
+
+ skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
+ skb_out->dev = skb_in->dev;
+ skb_reset_mac_header(skb_out);
+ skb_pull(skb_out, ETH_HLEN);
+
+ gdm_lte_rx(skb_out, nic, nic_type);
+
+ return 0;
+}
+
+static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
+{
+ struct nic *nic = netdev_priv(dev);
+ struct ethhdr *eth;
+ struct vlan_ethhdr *vlan_eth;
+ struct iphdr *ip;
+ struct ipv6hdr *ipv6;
+ int mac_proto;
+ void *network_data;
+ u32 nic_type = 0;
+
+ /* NIC TYPE is based on the nic_id of this net_device */
+ nic_type = 0x00000010 | nic->nic_id;
+
+ /* Get ethernet protocol */
+ eth = (struct ethhdr *)skb->data;
+ if (ntohs(eth->h_proto) == ETH_P_8021Q) {
+ vlan_eth = (struct vlan_ethhdr *)skb->data;
+ mac_proto = ntohs(vlan_eth->h_vlan_encapsulated_proto);
+ network_data = skb->data + VLAN_ETH_HLEN;
+ nic_type |= NIC_TYPE_F_VLAN;
+ } else {
+ mac_proto = ntohs(eth->h_proto);
+ network_data = skb->data + ETH_HLEN;
+ }
+
+ /* Process packet for nic type */
+ switch (mac_proto) {
+ case ETH_P_ARP:
+ nic_type |= NIC_TYPE_ARP;
+ break;
+ case ETH_P_IP:
+ nic_type |= NIC_TYPE_F_IPV4;
+ ip = (struct iphdr *)network_data;
+
+ /* Check DHCPv4 */
+ if (ip->protocol == IPPROTO_UDP) {
+ struct udphdr *udp = (struct udphdr *)(network_data + sizeof(struct iphdr));
+ if (ntohs(udp->dest) == 67 || ntohs(udp->dest) == 68)
+ nic_type |= NIC_TYPE_F_DHCP;
+ }
+ break;
+ case ETH_P_IPV6:
+ nic_type |= NIC_TYPE_F_IPV6;
+ ipv6 = (struct ipv6hdr *)network_data;
+
+ if (ipv6->nexthdr == IPPROTO_ICMPV6) /* Check NDP request */ {
+ struct icmp6hdr *icmp6 = (struct icmp6hdr *)(network_data + sizeof(struct ipv6hdr));
+ if (/*icmp6->icmp6_type == NDISC_ROUTER_SOLICITATION || */
+ icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ nic_type |= NIC_TYPE_ICMPV6;
+ } else if (ipv6->nexthdr == IPPROTO_UDP) /* Check DHCPv6 */ {
+ struct udphdr *udp = (struct udphdr *)(network_data + sizeof(struct ipv6hdr));
+ if (ntohs(udp->dest) == 546 || ntohs(udp->dest) == 547)
+ nic_type |= NIC_TYPE_F_DHCP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return nic_type;
+}
+
+static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct nic *nic = netdev_priv(dev);
+ u32 nic_type;
+ void *data_buf;
+ int data_len;
+ int idx;
+ int ret = 0;
+
+ nic_type = gdm_lte_tx_nic_type(dev, skb);
+ if (nic_type == 0) {
+ netdev_err(dev, "tx - invalid nic_type\n");
+ return -1;
+ }
+
+ if (nic_type & NIC_TYPE_ARP) {
+ if (gdm_lte_emulate_arp(skb, nic_type) == 0) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ if (nic_type & NIC_TYPE_ICMPV6) {
+ if (gdm_lte_emulate_ndp(skb, nic_type) == 0) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ /*
+ Need byte shift (that is, remove VLAN tag) if there is one
+ For the case of ARP, this breaks the offset as vlan_ethhdr+4 is treated as ethhdr
+ However, it shouldn't be a problem as the response starts from arp_hdr and ethhdr
+ is created by this driver based on the NIC mac
+ */
+ if (nic_type & NIC_TYPE_F_VLAN) {
+ struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr *)skb->data;
+ nic->vlan_id = ntohs(vlan_eth->h_vlan_TCI) & VLAN_VID_MASK;
+ data_buf = skb->data + (VLAN_ETH_HLEN - ETH_HLEN);
+ data_len = skb->len - (VLAN_ETH_HLEN - ETH_HLEN);
+ } else {
+ nic->vlan_id = 0;
+ data_buf = skb->data;
+ data_len = skb->len;
+ }
+
+ /* If it is a ICMPV6 packet, clear all the other bits : for backward compatibility with the firmware */
+ if (nic_type & NIC_TYPE_ICMPV6)
+ nic_type = NIC_TYPE_ICMPV6;
+
+ /* If it is not a dhcp packet, clear all the flag bits : original NIC, otherwise the special flag (IPVX | DHCP) */
+ if (!(nic_type & NIC_TYPE_F_DHCP))
+ nic_type &= NIC_TYPE_MASK;
+
+ sscanf(dev->name, "lte%d", &idx);
+
+ ret = gdm_lte_sdu_send(nic,
+ data_buf,
+ data_len,
+ tx_complete,
+ nic,
+ idx,
+ nic_type);
+
+ if (ret == TX_NO_BUFFER || ret == TX_NO_SPC) {
+ netif_stop_queue(dev);
+ if (ret == TX_NO_BUFFER)
+ ret = 0;
+ else
+ ret = -ENOSPC;
+ } else if (ret == TX_NO_DEV) {
+ ret = -ENODEV;
+ }
+
+ /* Updates tx stats */
+ if (ret) {
+ nic->stats.tx_dropped++;
+ } else {
+ nic->stats.tx_packets++;
+ nic->stats.tx_bytes += data_len;
+ }
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static struct net_device_stats *gdm_lte_stats(struct net_device *dev)
+{
+ struct nic *nic = netdev_priv(dev);
+ return &nic->stats;
+}
+
+static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
+{
+ struct nic *nic = netdev_priv(dev);
+ struct hci_packet *hci = (struct hci_packet *)buf;
+ int idx;
+
+ sscanf(dev->name, "lte%d", &idx);
+
+ return netlink_send(lte_event.sock, idx, 0, buf,
+ gdm_dev16_to_cpu(gdm_dev_endian(nic), hci->len) + HCI_HEADER_SIZE);
+}
+
+static void gdm_lte_event_rcv(struct net_device *dev, u16 type, void *msg, int len)
+{
+ struct nic *nic = netdev_priv(dev);
+
+ gdm_lte_hci_send(nic, msg, len);
+}
+
+int gdm_lte_event_init(void)
+{
+ if (lte_event.ref_cnt == 0)
+ lte_event.sock = netlink_init(NETLINK_LTE, gdm_lte_event_rcv);
+
+ if (lte_event.sock) {
+ lte_event.ref_cnt++;
+ return 0;
+ }
+
+ pr_err("event init failed\n");
+ return -1;
+}
+
+void gdm_lte_event_exit(void)
+{
+ if (lte_event.sock && --lte_event.ref_cnt == 0) {
+ netlink_exit(lte_event.sock);
+ lte_event.sock = NULL;
+ }
+}
+
+static u8 find_dev_index(u32 nic_type)
+{
+ u8 index;
+
+ index = (u8)(nic_type & 0x0000000f);
+ if (index > MAX_NIC_TYPE)
+ index = 0;
+
+ return index;
+}
+
+static void gdm_lte_netif_rx(struct net_device *dev, char *buf, int len, int flagged_nic_type)
+{
+ u32 nic_type;
+ struct nic *nic;
+ struct sk_buff *skb;
+ struct ethhdr eth;
+ struct vlan_ethhdr vlan_eth;
+ void *mac_header_data;
+ u32 mac_header_len;
+ char ip_version = 0;
+
+ nic_type = flagged_nic_type & NIC_TYPE_MASK;
+ nic = netdev_priv(dev);
+
+ if (flagged_nic_type & NIC_TYPE_F_DHCP) {
+ /* Change the destination mac address with the one requested the IP */
+ if (flagged_nic_type & NIC_TYPE_F_IPV4) {
+ struct dhcp_packet {
+ u8 op; /* BOOTREQUEST or BOOTREPLY */
+ u8 htype; /* hardware address type. 1 = 10mb ethernet */
+ u8 hlen; /* hardware address length */
+ u8 hops; /* used by relay agents only */
+ u32 xid; /* unique id */
+ u16 secs; /* elapsed since client began acquisition/renewal */
+ u16 flags; /* only one flag so far: */
+ #define BROADCAST_FLAG 0x8000 /* "I need broadcast replies" */
+ u32 ciaddr; /* client IP (if client is in BOUND, RENEW or REBINDING state) */
+ u32 yiaddr; /* 'your' (client) IP address */
+ /* IP address of next server to use in bootstrap, returned in DHCPOFFER, DHCPACK by server */
+ u32 siaddr_nip;
+ u32 gateway_nip; /* relay agent IP address */
+ u8 chaddr[16]; /* link-layer client hardware address (MAC) */
+ u8 sname[64]; /* server host name (ASCIZ) */
+ u8 file[128]; /* boot file name (ASCIZ) */
+ u32 cookie; /* fixed first four option bytes (99,130,83,99 dec) */
+ } __packed;
+ void *addr = buf + sizeof(struct iphdr) + sizeof(struct udphdr) + offsetof(struct dhcp_packet, chaddr);
+ memcpy(nic->dest_mac_addr, addr, ETH_ALEN);
+ }
+ }
+
+ if (nic->vlan_id > 0) {
+ mac_header_data = (void *)&vlan_eth;
+ mac_header_len = VLAN_ETH_HLEN;
+ } else {
+ mac_header_data = (void *)&eth;
+ mac_header_len = ETH_HLEN;
+ }
+
+ /* Format the data so that it can be put to skb */
+ memcpy(mac_header_data, nic->dest_mac_addr, ETH_ALEN);
+ memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
+
+ vlan_eth.h_vlan_TCI = htons(nic->vlan_id);
+ vlan_eth.h_vlan_proto = htons(ETH_P_8021Q);
+
+ if (nic_type == NIC_TYPE_ARP) {
+ /* Should be response: Only happens because there was a request from the host */
+ eth.h_proto = htons(ETH_P_ARP);
+ vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_ARP);
+ } else {
+ ip_version = buf[0] >> 4;
+ if (ip_version == IP_VERSION_4) {
+ eth.h_proto = htons(ETH_P_IP);
+ vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_IP);
+ } else if (ip_version == IP_VERSION_6) {
+ eth.h_proto = htons(ETH_P_IPV6);
+ vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
+ } else {
+ netdev_err(dev, "Unknown IP version %d\n", ip_version);
+ return;
+ }
+ }
+
+ /* Alloc skb and reserve align */
+ skb = dev_alloc_skb(len + mac_header_len + NET_IP_ALIGN);
+ if (!skb)
+ return;
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ memcpy(skb_put(skb, mac_header_len), mac_header_data, mac_header_len);
+ memcpy(skb_put(skb, len), buf, len);
+
+ skb->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb_pull(skb, ETH_HLEN);
+
+ gdm_lte_rx(skb, nic, nic_type);
+}
+
+static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
+{
+ struct net_device *dev;
+ struct multi_sdu *multi_sdu = (struct multi_sdu *)buf;
+ struct sdu *sdu = NULL;
+ u8 *data = (u8 *)multi_sdu->data;
+ u16 i = 0;
+ u16 num_packet;
+ u16 hci_len;
+ u16 cmd_evt;
+ u32 nic_type;
+ u8 index;
+
+ hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), multi_sdu->len);
+ num_packet = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), multi_sdu->num_packet);
+
+ for (i = 0; i < num_packet; i++) {
+ sdu = (struct sdu *)data;
+
+ cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->cmd_evt);
+ hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->len);
+ nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->nic_type);
+
+ if (cmd_evt != LTE_RX_SDU) {
+ pr_err("rx sdu wrong hci %04x\n", cmd_evt);
+ return;
+ }
+ if (hci_len < 12) {
+ pr_err("rx sdu invalid len %d\n", hci_len);
+ return;
+ }
+
+ index = find_dev_index(nic_type);
+ if (index < MAX_NIC_TYPE) {
+ dev = phy_dev->dev[index];
+ gdm_lte_netif_rx(dev, (char *)sdu->data, (int)(hci_len-12), nic_type);
+ } else {
+ pr_err("rx sdu invalid nic_type :%x\n", nic_type);
+ }
+
+ data += ((hci_len+3) & 0xfffc) + HCI_HEADER_SIZE;
+ }
+}
+
+static void gdm_lte_pdn_table(struct net_device *dev, char *buf, int len)
+{
+ struct nic *nic = netdev_priv(dev);
+ struct hci_pdn_table_ind *pdn_table = (struct hci_pdn_table_ind *)buf;
+
+ if (pdn_table->activate) {
+ nic->pdn_table.activate = pdn_table->activate;
+ nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->dft_eps_id);
+ nic->pdn_table.nic_type = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->nic_type);
+
+ netdev_info(dev, "pdn activated, nic_type=0x%x\n",
+ nic->pdn_table.nic_type);
+ } else {
+ memset(&nic->pdn_table, 0x00, sizeof(struct pdn_table));
+ netdev_info(dev, "pdn deactivated\n");
+ }
+}
+
+static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
+{
+ struct hci_packet *hci = (struct hci_packet *)buf;
+ struct hci_pdn_table_ind *pdn_table = (struct hci_pdn_table_ind *)buf;
+ struct sdu *sdu;
+ struct net_device *dev;
+ int ret = 0;
+ u16 cmd_evt;
+ u32 nic_type;
+ u8 index;
+
+ if (!len)
+ return ret;
+
+ cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), hci->cmd_evt);
+
+ dev = phy_dev->dev[0];
+ if (dev == NULL)
+ return 0;
+
+ switch (cmd_evt) {
+ case LTE_RX_SDU:
+ sdu = (struct sdu *)hci->data;
+ nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->nic_type);
+ index = find_dev_index(nic_type);
+ dev = phy_dev->dev[index];
+ gdm_lte_netif_rx(dev, hci->data, len, nic_type);
+ break;
+ case LTE_RX_MULTI_SDU:
+ gdm_lte_multi_sdu_pkt(phy_dev, buf, len);
+ break;
+ case LTE_LINK_ON_OFF_INDICATION:
+ netdev_info(dev, "link %s\n",
+ ((struct hci_connect_ind *)buf)->connect
+ ? "on" : "off");
+ break;
+ case LTE_PDN_TABLE_IND:
+ pdn_table = (struct hci_pdn_table_ind *)buf;
+ nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), pdn_table->nic_type);
+ index = find_dev_index(nic_type);
+ dev = phy_dev->dev[index];
+ gdm_lte_pdn_table(dev, buf, len);
+ /* Fall through */
+ default:
+ ret = gdm_lte_event_send(dev, buf, len);
+ break;
+ }
+
+ return ret;
+}
+
+static int rx_complete(void *arg, void *data, int len, int context)
+{
+ struct phy_dev *phy_dev = (struct phy_dev *)arg;
+
+ return gdm_lte_receive_pkt(phy_dev, (char *)data, len);
+}
+
+void start_rx_proc(struct phy_dev *phy_dev)
+{
+ int i;
+
+ for (i = 0; i < MAX_RX_SUBMIT_COUNT; i++)
+ gdm_lte_rcv_with_cb(phy_dev, rx_complete, phy_dev, USB_COMPLETE);
+}
+
+static struct net_device_ops gdm_netdev_ops = {
+ .ndo_open = gdm_lte_open,
+ .ndo_stop = gdm_lte_close,
+ .ndo_set_config = gdm_lte_set_config,
+ .ndo_start_xmit = gdm_lte_tx,
+ .ndo_get_stats = gdm_lte_stats,
+};
+
+static u8 gdm_lte_macaddr[ETH_ALEN] = {0x00, 0x0a, 0x3b, 0x00, 0x00, 0x00};
+
+static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest, u8 *mac_address, u8 index)
+{
+ /* Form the dev_addr */
+ if (!mac_address)
+ memcpy(dev_addr, gdm_lte_macaddr, ETH_ALEN);
+ else
+ memcpy(dev_addr, mac_address, ETH_ALEN);
+
+ /* The last byte of the mac address should be less than or equal to 0xFC */
+ dev_addr[ETH_ALEN-1] += index;
+
+ /* Create random nic src and copy the first 3 bytes to be the same as dev_addr */
+ random_ether_addr(nic_src);
+ memcpy(nic_src, dev_addr, 3);
+
+ /* Copy the nic_dest from dev_addr*/
+ memcpy(nic_dest, dev_addr, ETH_ALEN);
+}
+
+static void validate_mac_address(u8 *mac_address)
+{
+ /* if zero address or multicast bit set, restore the default value */
+ if (is_zero_ether_addr(mac_address) || (mac_address[0] & 0x01)) {
+ pr_err("MAC invalid, restoring default\n");
+ memcpy(mac_address, gdm_lte_macaddr, 6);
+ }
+}
+
+int register_lte_device(struct phy_dev *phy_dev, struct device *dev, u8 *mac_address)
+{
+ struct nic *nic;
+ struct net_device *net;
+ char pdn_dev_name[16];
+ int ret = 0;
+ u8 index;
+
+ validate_mac_address(mac_address);
+
+ for (index = 0; index < MAX_NIC_TYPE; index++) {
+ /* Create device name lteXpdnX */
+ sprintf(pdn_dev_name, "lte%%dpdn%d", index);
+
+ /* Allocate netdev */
+ net = alloc_netdev(sizeof(struct nic), pdn_dev_name, ether_setup);
+ if (net == NULL) {
+ pr_err("alloc_netdev failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ net->netdev_ops = &gdm_netdev_ops;
+ net->flags &= ~IFF_MULTICAST;
+ net->mtu = DEFAULT_MTU_SIZE;
+
+ nic = netdev_priv(net);
+ memset(nic, 0, sizeof(struct nic));
+ nic->netdev = net;
+ nic->phy_dev = phy_dev;
+ nic->nic_id = index;
+
+ form_mac_address(
+ net->dev_addr,
+ nic->src_mac_addr,
+ nic->dest_mac_addr,
+ mac_address,
+ index);
+
+ SET_NETDEV_DEV(net, dev);
+ SET_NETDEV_DEVTYPE(net, &wwan_type);
+
+ ret = register_netdev(net);
+ if (ret)
+ goto err;
+
+ netif_carrier_on(net);
+
+ phy_dev->dev[index] = net;
+ }
+
+ return 0;
+
+err:
+ unregister_lte_device(phy_dev);
+
+ return ret;
+}
+
+void unregister_lte_device(struct phy_dev *phy_dev)
+{
+ struct net_device *net;
+ int index;
+
+ for (index = 0; index < MAX_NIC_TYPE; index++) {
+ net = phy_dev->dev[index];
+ if (net == NULL)
+ continue;
+
+ unregister_netdev(net);
+ free_netdev(net);
+ }
+}
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
new file mode 100644
index 0000000..9287d31
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GDM_LTE_H_
+#define _GDM_LTE_H_
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#include "gdm_endian.h"
+
+#define MAX_NIC_TYPE 4
+#define MAX_RX_SUBMIT_COUNT 3
+#define DRIVER_VERSION "3.7.17.0"
+
+enum TX_ERROR_CODE {
+ TX_NO_ERROR = 0,
+ TX_NO_DEV,
+ TX_NO_SPC,
+ TX_NO_BUFFER,
+};
+
+enum CALLBACK_CONTEXT {
+ KERNEL_THREAD = 0,
+ USB_COMPLETE,
+};
+
+struct pdn_table {
+ u8 activate;
+ u32 dft_eps_id;
+ u32 nic_type;
+} __packed;
+
+struct nic;
+
+struct phy_dev {
+ void *priv_dev;
+ struct net_device *dev[MAX_NIC_TYPE];
+ int (*send_hci_func)(void *priv_dev, void *data, int len,
+ void (*cb)(void *cb_data), void *cb_data);
+ int (*send_sdu_func)(void *priv_dev, void *data, int len,
+ unsigned int dftEpsId, unsigned int epsId,
+ void (*cb)(void *cb_data), void *cb_data,
+ int dev_idx, int nic_type);
+ int (*rcv_func)(void *priv_dev,
+ int (*cb)(void *cb_data, void *data, int len,
+ int context),
+ void *cb_data, int context);
+ struct gdm_endian *(*get_endian)(void *priv_dev);
+};
+
+struct nic {
+ struct net_device *netdev;
+ struct phy_dev *phy_dev;
+ struct net_device_stats stats;
+ struct pdn_table pdn_table;
+ u8 dest_mac_addr[ETH_ALEN];
+ u8 src_mac_addr[ETH_ALEN];
+ u32 nic_id;
+ u16 vlan_id;
+};
+
+int gdm_lte_event_init(void);
+void gdm_lte_event_exit(void);
+
+void start_rx_proc(struct phy_dev *phy_dev);
+int register_lte_device(struct phy_dev *phy_dev, struct device *dev,
+ u8 *mac_address);
+void unregister_lte_device(struct phy_dev *phy_dev);
+
+#endif /* _GDM_LTE_H_ */
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
new file mode 100644
index 0000000..5b1ef40
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -0,0 +1,690 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/slab.h>
+#include <linux/usb/cdc.h>
+
+#include "gdm_mux.h"
+
+struct workqueue_struct *mux_rx_wq;
+
+static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
+
+#define USB_DEVICE_CDC_DATA(vid, pid) \
+ .match_flags = \
+ USB_DEVICE_ID_MATCH_DEVICE |\
+ USB_DEVICE_ID_MATCH_INT_CLASS |\
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
+ .idVendor = vid,\
+ .idProduct = pid,\
+ .bInterfaceClass = USB_CLASS_COMM,\
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
+ { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
+ { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
+ { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
+ {}
+};
+
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+int packet_type_to_index(u16 packetType)
+{
+ int i;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ if (packet_type[i] == packetType)
+ return i;
+ }
+
+ return -1;
+}
+
+static struct mux_tx *alloc_mux_tx(int len)
+{
+ struct mux_tx *t = NULL;
+
+ t = kzalloc(sizeof(struct mux_tx), GFP_ATOMIC);
+ if (!t)
+ return NULL;
+
+ t->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
+ if (!t->urb || !t->buf) {
+ usb_free_urb(t->urb);
+ kfree(t->buf);
+ kfree(t);
+ return NULL;
+ }
+
+ return t;
+}
+
+static void free_mux_tx(struct mux_tx *t)
+{
+ if (t) {
+ usb_free_urb(t->urb);
+ kfree(t->buf);
+ kfree(t);
+ }
+}
+
+static struct mux_rx *alloc_mux_rx(void)
+{
+ struct mux_rx *r = NULL;
+
+ r = kzalloc(sizeof(struct mux_rx), GFP_ATOMIC);
+ if (!r)
+ return NULL;
+
+ r->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_ATOMIC);
+ if (!r->urb || !r->buf) {
+ usb_free_urb(r->urb);
+ kfree(r->buf);
+ kfree(r);
+ return NULL;
+ }
+
+ return r;
+}
+
+static void free_mux_rx(struct mux_rx *r)
+{
+ if (r) {
+ usb_free_urb(r->urb);
+ kfree(r->buf);
+ kfree(r);
+ }
+}
+
+static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
+{
+ struct mux_rx *r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rx->free_list_lock, flags);
+
+ if (list_empty(&rx->rx_free_list)) {
+ spin_unlock_irqrestore(&rx->free_list_lock, flags);
+ return NULL;
+ }
+
+ r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
+ list_del(&r->free_list);
+
+ spin_unlock_irqrestore(&rx->free_list_lock, flags);
+
+ return r;
+}
+
+static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rx->free_list_lock, flags);
+ list_add_tail(&r->free_list, &rx->rx_free_list);
+ spin_unlock_irqrestore(&rx->free_list_lock, flags);
+}
+
+
+static int up_to_host(struct mux_rx *r)
+{
+ struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
+ struct mux_pkt_header *mux_header;
+ unsigned int start_flag;
+ unsigned int payload_size;
+ unsigned short packet_type;
+ int remain;
+ int dummy_cnt;
+ u32 packet_size_sum = r->offset;
+ int index;
+ int ret = TO_HOST_INVALID_PACKET;
+ int len = r->len;
+
+ while (1) {
+ mux_header = (struct mux_pkt_header *)(r->buf + packet_size_sum);
+ start_flag = __le32_to_cpu(mux_header->start_flag);
+ payload_size = __le32_to_cpu(mux_header->payload_size);
+ packet_type = __le16_to_cpu(mux_header->packet_type);
+
+ if (start_flag != START_FLAG) {
+ pr_err("invalid START_FLAG %x\n", start_flag);
+ break;
+ }
+
+ remain = (MUX_HEADER_SIZE + payload_size) % 4;
+ dummy_cnt = remain ? (4-remain) : 0;
+
+ if (len - packet_size_sum <
+ MUX_HEADER_SIZE + payload_size + dummy_cnt) {
+ pr_err("invalid payload : %d %d %04x\n",
+ payload_size, len, packet_type);
+ break;
+ }
+
+ index = packet_type_to_index(packet_type);
+ if (index < 0) {
+ pr_err("invalid index %d\n", index);
+ break;
+ }
+
+ ret = r->callback(mux_header->data,
+ payload_size,
+ index,
+ mux_dev->tty_dev,
+ RECV_PACKET_PROCESS_CONTINUE
+ );
+ if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
+ r->offset += packet_size_sum;
+ break;
+ }
+
+ packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
+ if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
+ ret = r->callback(NULL,
+ 0,
+ index,
+ mux_dev->tty_dev,
+ RECV_PACKET_PROCESS_COMPLETE
+ );
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void do_rx(struct work_struct *work)
+{
+ struct mux_dev *mux_dev =
+ container_of(work, struct mux_dev , work_rx.work);
+ struct mux_rx *r;
+ struct rx_cxt *rx = (struct rx_cxt *)&mux_dev->rx;
+ unsigned long flags;
+ int ret = 0;
+
+ while (1) {
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ if (list_empty(&rx->to_host_list)) {
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+ break;
+ }
+ r = list_entry(rx->to_host_list.next, struct mux_rx, to_host_list);
+ list_del(&r->to_host_list);
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+
+ ret = up_to_host(r);
+ if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
+ pr_err("failed to send mux data to host\n");
+ else
+ put_rx_struct(rx, r);
+ }
+}
+
+static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
+{
+ unsigned long flags;
+ struct mux_rx *r_remove, *r_remove_next;
+
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list) {
+ if (r == r_remove)
+ list_del(&r->rx_submit_list);
+ }
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+}
+
+static void gdm_mux_rcv_complete(struct urb *urb)
+{
+ struct mux_rx *r = urb->context;
+ struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
+ struct rx_cxt *rx = &mux_dev->rx;
+ unsigned long flags;
+
+ remove_rx_submit_list(r, rx);
+
+ if (urb->status) {
+ if (mux_dev->usb_state == PM_NORMAL)
+ pr_err("%s: urb status error %d\n",
+ __func__, urb->status);
+ put_rx_struct(rx, r);
+ } else {
+ r->len = r->urb->actual_length;
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ list_add_tail(&r->to_host_list, &rx->to_host_list);
+ queue_work(mux_rx_wq, &mux_dev->work_rx.work);
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+ }
+}
+
+static int gdm_mux_recv(void *priv_dev,
+ int (*cb)(void *data, int len, int tty_index, struct tty_dev *tty_dev, int complete)
+ )
+{
+ struct mux_dev *mux_dev = priv_dev;
+ struct usb_device *usbdev = mux_dev->usbdev;
+ struct mux_rx *r;
+ struct rx_cxt *rx = &mux_dev->rx;
+ unsigned long flags;
+ int ret;
+
+ if (!usbdev) {
+ pr_err("device is disconnected\n");
+ return -ENODEV;
+ }
+
+ r = get_rx_struct(rx);
+ if (!r) {
+ pr_err("get_rx_struct fail\n");
+ return -ENOMEM;
+ }
+
+ r->offset = 0;
+ r->mux_dev = (void *)mux_dev;
+ r->callback = cb;
+ mux_dev->rx_cb = cb;
+
+ usb_fill_bulk_urb(r->urb,
+ usbdev,
+ usb_rcvbulkpipe(usbdev, 0x86),
+ r->buf,
+ MUX_RX_MAX_SIZE,
+ gdm_mux_rcv_complete,
+ r);
+
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+
+ ret = usb_submit_urb(r->urb, GFP_KERNEL);
+
+ if (ret) {
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_del(&r->rx_submit_list);
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+
+ put_rx_struct(rx, r);
+
+ pr_err("usb_submit_urb ret=%d\n", ret);
+ }
+
+ usb_mark_last_busy(usbdev);
+
+ return ret;
+}
+
+static void gdm_mux_send_complete(struct urb *urb)
+{
+ struct mux_tx *t = urb->context;
+
+ if (urb->status == -ECONNRESET) {
+ pr_info("CONNRESET\n");
+ free_mux_tx(t);
+ return;
+ }
+
+ if (t->callback)
+ t->callback(t->cb_data);
+
+ free_mux_tx(t);
+}
+
+static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
+ void (*cb)(void *data), void *cb_data)
+{
+ struct mux_dev *mux_dev = priv_dev;
+ struct usb_device *usbdev = mux_dev->usbdev;
+ struct mux_pkt_header *mux_header;
+ struct mux_tx *t = NULL;
+ static u32 seq_num = 1;
+ int remain;
+ int dummy_cnt;
+ int total_len;
+ int ret;
+ unsigned long flags;
+
+ if (mux_dev->usb_state == PM_SUSPEND) {
+ ret = usb_autopm_get_interface(mux_dev->intf);
+ if (!ret)
+ usb_autopm_put_interface(mux_dev->intf);
+ }
+
+ spin_lock_irqsave(&mux_dev->write_lock, flags);
+
+ remain = (MUX_HEADER_SIZE + len) % 4;
+ dummy_cnt = remain ? (4 - remain) : 0;
+
+ total_len = len + MUX_HEADER_SIZE + dummy_cnt;
+
+ t = alloc_mux_tx(total_len);
+ if (!t) {
+ pr_err("alloc_mux_tx fail\n");
+ spin_unlock_irqrestore(&mux_dev->write_lock, flags);
+ return -ENOMEM;
+ }
+
+ mux_header = (struct mux_pkt_header *)t->buf;
+ mux_header->start_flag = __cpu_to_le32(START_FLAG);
+ mux_header->seq_num = __cpu_to_le32(seq_num++);
+ mux_header->payload_size = __cpu_to_le32((u32)len);
+ mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
+
+ memcpy(t->buf+MUX_HEADER_SIZE, data, len);
+ memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
+
+ t->len = total_len;
+ t->callback = cb;
+ t->cb_data = cb_data;
+
+ usb_fill_bulk_urb(t->urb,
+ usbdev,
+ usb_sndbulkpipe(usbdev, 5),
+ t->buf,
+ total_len,
+ gdm_mux_send_complete,
+ t);
+
+ ret = usb_submit_urb(t->urb, GFP_ATOMIC);
+
+ spin_unlock_irqrestore(&mux_dev->write_lock, flags);
+
+ if (ret)
+ pr_err("usb_submit_urb Error: %d\n", ret);
+
+ usb_mark_last_busy(usbdev);
+
+ return ret;
+}
+
+static int gdm_mux_send_control(void *priv_dev, int request, int value, void *buf, int len)
+{
+ struct mux_dev *mux_dev = priv_dev;
+ struct usb_device *usbdev = mux_dev->usbdev;
+ int ret;
+
+ ret = usb_control_msg(usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+ request,
+ USB_RT_ACM,
+ value,
+ 2,
+ buf,
+ len,
+ 5000
+ );
+
+ if (ret < 0)
+ pr_err("usb_control_msg error: %d\n", ret);
+
+ return ret < 0 ? ret : 0;
+}
+
+static void release_usb(struct mux_dev *mux_dev)
+{
+ struct rx_cxt *rx = &mux_dev->rx;
+ struct mux_rx *r, *r_next;
+ unsigned long flags;
+
+ cancel_delayed_work(&mux_dev->work_rx);
+
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+ usb_kill_urb(r->urb);
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+
+ spin_lock_irqsave(&rx->free_list_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
+ list_del(&r->free_list);
+ free_mux_rx(r);
+ }
+ spin_unlock_irqrestore(&rx->free_list_lock, flags);
+
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
+ if (r->mux_dev == (void *)mux_dev) {
+ list_del(&r->to_host_list);
+ free_mux_rx(r);
+ }
+ }
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+}
+
+
+static int init_usb(struct mux_dev *mux_dev)
+{
+ struct mux_rx *r;
+ struct rx_cxt *rx = &mux_dev->rx;
+ int ret = 0;
+ int i;
+
+ spin_lock_init(&mux_dev->write_lock);
+ INIT_LIST_HEAD(&rx->to_host_list);
+ INIT_LIST_HEAD(&rx->rx_submit_list);
+ INIT_LIST_HEAD(&rx->rx_free_list);
+ spin_lock_init(&rx->to_host_lock);
+ spin_lock_init(&rx->submit_list_lock);
+ spin_lock_init(&rx->free_list_lock);
+
+ for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
+ r = alloc_mux_rx();
+ if (r == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ list_add(&r->free_list, &rx->rx_free_list);
+ }
+
+ INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
+
+ return ret;
+}
+
+static int gdm_mux_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct mux_dev *mux_dev;
+ struct tty_dev *tty_dev;
+ u16 idVendor, idProduct;
+ int bInterfaceNumber;
+ int ret;
+ int i;
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+ bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
+ idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
+
+ pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
+
+ if (bInterfaceNumber != 2)
+ return -ENODEV;
+
+ mux_dev = kzalloc(sizeof(struct mux_dev), GFP_KERNEL);
+ if (!mux_dev)
+ return -ENOMEM;
+
+ tty_dev = kzalloc(sizeof(struct tty_dev), GFP_KERNEL);
+ if (!tty_dev) {
+ ret = -ENOMEM;
+ goto err_free_mux;
+ }
+
+ mux_dev->usbdev = usbdev;
+ mux_dev->control_intf = intf;
+
+ ret = init_usb(mux_dev);
+ if (ret)
+ goto err_free_tty;
+
+ tty_dev->priv_dev = (void *)mux_dev;
+ tty_dev->send_func = gdm_mux_send;
+ tty_dev->recv_func = gdm_mux_recv;
+ tty_dev->send_control = gdm_mux_send_control;
+
+ ret = register_lte_tty_device(tty_dev, &intf->dev);
+ if (ret)
+ goto err_unregister_tty;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++)
+ mux_dev->tty_dev = tty_dev;
+
+ mux_dev->intf = intf;
+ mux_dev->usb_state = PM_NORMAL;
+
+ usb_get_dev(usbdev);
+ usb_set_intfdata(intf, tty_dev);
+
+ return 0;
+
+err_unregister_tty:
+ unregister_lte_tty_device(tty_dev);
+ release_usb(mux_dev);
+err_free_tty:
+ kfree(tty_dev);
+err_free_mux:
+ kfree(mux_dev);
+
+ return ret;
+}
+
+static void gdm_mux_disconnect(struct usb_interface *intf)
+{
+ struct tty_dev *tty_dev;
+ struct mux_dev *mux_dev;
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+
+ tty_dev = usb_get_intfdata(intf);
+
+ mux_dev = tty_dev->priv_dev;
+
+ release_usb(mux_dev);
+ unregister_lte_tty_device(tty_dev);
+
+ kfree(mux_dev);
+ kfree(tty_dev);
+
+ usb_put_dev(usbdev);
+}
+
+static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
+{
+ struct tty_dev *tty_dev;
+ struct mux_dev *mux_dev;
+ struct rx_cxt *rx;
+ struct mux_rx *r, *r_next;
+ unsigned long flags;
+
+ tty_dev = usb_get_intfdata(intf);
+ mux_dev = tty_dev->priv_dev;
+ rx = &mux_dev->rx;
+
+ if (mux_dev->usb_state != PM_NORMAL) {
+ pr_err("usb suspend - invalid state\n");
+ return -1;
+ }
+
+ mux_dev->usb_state = PM_SUSPEND;
+
+
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+ usb_kill_urb(r->urb);
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+
+ return 0;
+}
+
+static int gdm_mux_resume(struct usb_interface *intf)
+{
+ struct tty_dev *tty_dev;
+ struct mux_dev *mux_dev;
+ u8 i;
+
+ tty_dev = usb_get_intfdata(intf);
+ mux_dev = tty_dev->priv_dev;
+
+ if (mux_dev->usb_state != PM_SUSPEND) {
+ pr_err("usb resume - invalid state\n");
+ return -1;
+ }
+
+ mux_dev->usb_state = PM_NORMAL;
+
+ for (i = 0; i < MAX_ISSUE_NUM; i++)
+ gdm_mux_recv(mux_dev, mux_dev->rx_cb);
+
+ return 0;
+}
+
+static struct usb_driver gdm_mux_driver = {
+ .name = "gdm_mux",
+ .probe = gdm_mux_probe,
+ .disconnect = gdm_mux_disconnect,
+ .id_table = id_table,
+ .supports_autosuspend = 1,
+ .suspend = gdm_mux_suspend,
+ .resume = gdm_mux_resume,
+ .reset_resume = gdm_mux_resume,
+};
+
+static int __init gdm_usb_mux_init(void)
+{
+
+ mux_rx_wq = create_workqueue("mux_rx_wq");
+ if (mux_rx_wq == NULL) {
+ pr_err("work queue create fail\n");
+ return -1;
+ }
+
+ register_lte_tty_driver();
+
+ return usb_register(&gdm_mux_driver);
+}
+
+static void __exit gdm_usb_mux_exit(void)
+{
+ unregister_lte_tty_driver();
+
+ if (mux_rx_wq) {
+ flush_workqueue(mux_rx_wq);
+ destroy_workqueue(mux_rx_wq);
+ }
+
+ usb_deregister(&gdm_mux_driver);
+}
+
+module_init(gdm_usb_mux_init);
+module_exit(gdm_usb_mux_exit);
+
+MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm724x/gdm_mux.h b/drivers/staging/gdm724x/gdm_mux.h
new file mode 100644
index 0000000..0163b24
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_mux.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GDM_MUX_H_
+#define _GDM_MUX_H_
+
+#include <linux/types.h>
+#include <linux/usb.h>
+#include <linux/list.h>
+
+#include "gdm_tty.h"
+
+#define PM_NORMAL 0
+#define PM_SUSPEND 1
+
+#define USB_RT_ACM (USB_TYPE_CLASS | USB_RECIP_INTERFACE)
+
+#define START_FLAG 0xA512485A
+#define MUX_HEADER_SIZE 14
+#define MUX_TX_MAX_SIZE (1024*10)
+#define MUX_RX_MAX_SIZE (1024*30)
+#define AT_PKT_TYPE 0xF011
+#define DM_PKT_TYPE 0xF010
+
+#define RETRY_TIMER 30 /* msec */
+
+struct mux_pkt_header {
+ unsigned int start_flag;
+ unsigned int seq_num;
+ unsigned int payload_size;
+ unsigned short packet_type;
+ unsigned char data[0];
+};
+
+struct mux_tx {
+ struct urb *urb;
+ u8 *buf;
+ int len;
+ void (*callback)(void *cb_data);
+ void *cb_data;
+};
+
+struct mux_rx {
+ struct list_head free_list;
+ struct list_head rx_submit_list;
+ struct list_head to_host_list;
+ struct urb *urb;
+ u8 *buf;
+ void *mux_dev;
+ u32 offset;
+ u32 len;
+ int (*callback)(void *data,
+ int len,
+ int tty_index,
+ struct tty_dev *tty_dev,
+ int complete);
+};
+
+struct rx_cxt {
+ struct list_head to_host_list;
+ struct list_head rx_submit_list;
+ struct list_head rx_free_list;
+ spinlock_t to_host_lock;
+ spinlock_t submit_list_lock;
+ spinlock_t free_list_lock;
+};
+
+struct mux_dev {
+ struct usb_device *usbdev;
+ struct usb_interface *control_intf;
+ struct usb_interface *data_intf;
+ struct rx_cxt rx;
+ struct delayed_work work_rx;
+ struct usb_interface *intf;
+ int usb_state;
+ int (*rx_cb)(void *data,
+ int len,
+ int tty_index,
+ struct tty_dev *tty_dev,
+ int complete);
+ spinlock_t write_lock;
+ struct tty_dev *tty_dev;
+};
+
+#endif /* _GDM_MUX_H_ */
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
new file mode 100644
index 0000000..0247a20
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb/cdc.h>
+#include <linux/serial.h>
+#include "gdm_tty.h"
+
+#define GDM_TTY_MAJOR 0
+#define GDM_TTY_MINOR 32
+
+#define ACM_CTRL_DTR 0x01
+#define ACM_CTRL_RTS 0x02
+#define ACM_CTRL_DSR 0x02
+#define ACM_CTRL_RI 0x08
+#define ACM_CTRL_DCD 0x01
+
+#define WRITE_SIZE 2048
+
+#define MUX_TX_MAX_SIZE 2048
+
+#define gdm_tty_send(n, d, l, i, c, b) (\
+ n->tty_dev->send_func(n->tty_dev->priv_dev, d, l, i, c, b))
+#define gdm_tty_recv(n, c) (\
+ n->tty_dev->recv_func(n->tty_dev->priv_dev, c))
+#define gdm_tty_send_control(n, r, v, d, l) (\
+ n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
+
+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
+
+static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
+static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
+static DEFINE_MUTEX(gdm_table_lock);
+
+static char *DRIVER_STRING[TTY_MAX_COUNT] = {"GCTATC", "GCTDM"};
+static char *DEVICE_STRING[TTY_MAX_COUNT] = {"GCT-ATC", "GCT-DM"};
+
+static void gdm_port_destruct(struct tty_port *port)
+{
+ struct gdm *gdm = container_of(port, struct gdm, port);
+
+ mutex_lock(&gdm_table_lock);
+ gdm_table[gdm->index][gdm->minor] = NULL;
+ mutex_unlock(&gdm_table_lock);
+
+ kfree(gdm);
+}
+
+static struct tty_port_operations gdm_port_ops = {
+ .destruct = gdm_port_destruct,
+};
+
+static int gdm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct gdm *gdm = NULL;
+ int ret;
+ int i;
+ int j;
+
+ j = GDM_TTY_MINOR;
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ if (!strcmp(tty->driver->driver_name, DRIVER_STRING[i])) {
+ j = tty->index;
+ break;
+ }
+ }
+
+ if (j == GDM_TTY_MINOR)
+ return -ENODEV;
+
+ mutex_lock(&gdm_table_lock);
+ gdm = gdm_table[i][j];
+ if (gdm == NULL) {
+ mutex_unlock(&gdm_table_lock);
+ return -ENODEV;
+ }
+
+ tty_port_get(&gdm->port);
+
+ ret = tty_standard_install(driver, tty);
+ if (ret) {
+ tty_port_put(&gdm->port);
+ mutex_unlock(&gdm_table_lock);
+ return ret;
+ }
+
+ tty->driver_data = gdm;
+ mutex_unlock(&gdm_table_lock);
+
+ return 0;
+}
+
+static int gdm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct gdm *gdm = tty->driver_data;
+ return tty_port_open(&gdm->port, tty, filp);
+}
+
+static void gdm_tty_cleanup(struct tty_struct *tty)
+{
+ struct gdm *gdm = tty->driver_data;
+ tty_port_put(&gdm->port);
+}
+
+static void gdm_tty_hangup(struct tty_struct *tty)
+{
+ struct gdm *gdm = tty->driver_data;
+ tty_port_hangup(&gdm->port);
+}
+
+static void gdm_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct gdm *gdm = tty->driver_data;
+ tty_port_close(&gdm->port, tty, filp);
+}
+
+static int gdm_tty_recv_complete(void *data,
+ int len,
+ int index,
+ struct tty_dev *tty_dev,
+ int complete)
+{
+ struct gdm *gdm = tty_dev->gdm[index];
+ if (!GDM_TTY_READY(gdm)) {
+ if (complete == RECV_PACKET_PROCESS_COMPLETE)
+ gdm_tty_recv(gdm, gdm_tty_recv_complete);
+ return TO_HOST_PORT_CLOSE;
+ }
+
+ if (data && len) {
+ if (tty_buffer_request_room(&gdm->port, len) == len) {
+ tty_insert_flip_string(&gdm->port, data, len);
+ tty_flip_buffer_push(&gdm->port);
+ } else {
+ return TO_HOST_BUFFER_REQUEST_FAIL;
+ }
+ }
+
+ if (complete == RECV_PACKET_PROCESS_COMPLETE)
+ gdm_tty_recv(gdm, gdm_tty_recv_complete);
+
+ return 0;
+}
+
+static void gdm_tty_send_complete(void *arg)
+{
+ struct gdm *gdm = (struct gdm *)arg;
+
+ if (!GDM_TTY_READY(gdm))
+ return;
+
+ tty_port_tty_wakeup(&gdm->port);
+}
+
+static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf, int len)
+{
+ struct gdm *gdm = tty->driver_data;
+ int remain = len;
+ int sent_len = 0;
+ int sending_len = 0;
+
+ if (!GDM_TTY_READY(gdm))
+ return -ENODEV;
+
+ if (!len)
+ return 0;
+
+ while (1) {
+ sending_len = remain > MUX_TX_MAX_SIZE ? MUX_TX_MAX_SIZE : remain;
+ gdm_tty_send(gdm,
+ (void *)(buf+sent_len),
+ sending_len,
+ gdm->index,
+ gdm_tty_send_complete,
+ gdm
+ );
+ sent_len += sending_len;
+ remain -= sending_len;
+ if (remain <= 0)
+ break;
+ }
+
+ return len;
+}
+
+static int gdm_tty_write_room(struct tty_struct *tty)
+{
+ struct gdm *gdm = tty->driver_data;
+
+ if (!GDM_TTY_READY(gdm))
+ return -ENODEV;
+
+ return WRITE_SIZE;
+}
+
+int register_lte_tty_device(struct tty_dev *tty_dev, struct device *device)
+{
+ struct gdm *gdm;
+ int i;
+ int j;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+
+ gdm = kmalloc(sizeof(struct gdm), GFP_KERNEL);
+ if (!gdm)
+ return -ENOMEM;
+
+ mutex_lock(&gdm_table_lock);
+ for (j = 0; j < GDM_TTY_MINOR; j++) {
+ if (!gdm_table[i][j])
+ break;
+ }
+
+ if (j == GDM_TTY_MINOR) {
+ kfree(gdm);
+ mutex_unlock(&gdm_table_lock);
+ return -EINVAL;
+ }
+
+ gdm_table[i][j] = gdm;
+ mutex_unlock(&gdm_table_lock);
+
+ tty_dev->gdm[i] = gdm;
+ tty_port_init(&gdm->port);
+
+ gdm->port.ops = &gdm_port_ops;
+ gdm->index = i;
+ gdm->minor = j;
+ gdm->tty_dev = tty_dev;
+
+ tty_port_register_device(&gdm->port, gdm_driver[i], gdm->minor, device);
+ }
+
+ for (i = 0; i < MAX_ISSUE_NUM; i++)
+ gdm_tty_recv(gdm, gdm_tty_recv_complete);
+
+ return 0;
+}
+
+void unregister_lte_tty_device(struct tty_dev *tty_dev)
+{
+ struct gdm *gdm;
+ struct tty_struct *tty;
+ int i;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ gdm = tty_dev->gdm[i];
+ if (!gdm)
+ continue;
+
+ mutex_lock(&gdm_table_lock);
+ gdm_table[gdm->index][gdm->minor] = NULL;
+ mutex_unlock(&gdm_table_lock);
+
+ tty = tty_port_tty_get(&gdm->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+
+ tty_unregister_device(gdm_driver[i], gdm->minor);
+ tty_port_put(&gdm->port);
+ }
+}
+
+static const struct tty_operations gdm_tty_ops = {
+ .install = gdm_tty_install,
+ .open = gdm_tty_open,
+ .close = gdm_tty_close,
+ .cleanup = gdm_tty_cleanup,
+ .hangup = gdm_tty_hangup,
+ .write = gdm_tty_write,
+ .write_room = gdm_tty_write_room,
+};
+
+int register_lte_tty_driver(void)
+{
+ struct tty_driver *tty_driver;
+ int i;
+ int ret;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ tty_driver = alloc_tty_driver(GDM_TTY_MINOR);
+ if (!tty_driver)
+ return -ENOMEM;
+
+ tty_driver->owner = THIS_MODULE;
+ tty_driver->driver_name = DRIVER_STRING[i];
+ tty_driver->name = DEVICE_STRING[i];
+ tty_driver->major = GDM_TTY_MAJOR;
+ tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_driver->init_termios = tty_std_termios;
+ tty_driver->init_termios.c_cflag = B9600 | CS8 | HUPCL | CLOCAL;
+ tty_driver->init_termios.c_lflag = ISIG | ICANON | IEXTEN;
+ tty_set_operations(tty_driver, &gdm_tty_ops);
+
+ ret = tty_register_driver(tty_driver);
+ if (ret) {
+ put_tty_driver(tty_driver);
+ return ret;
+ }
+
+ gdm_driver[i] = tty_driver;
+ }
+
+ return ret;
+}
+
+void unregister_lte_tty_driver(void)
+{
+ struct tty_driver *tty_driver;
+ int i;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ tty_driver = gdm_driver[i];
+ if (tty_driver) {
+ tty_unregister_driver(tty_driver);
+ put_tty_driver(tty_driver);
+ }
+ }
+}
+
diff --git a/drivers/staging/gdm724x/gdm_tty.h b/drivers/staging/gdm724x/gdm_tty.h
new file mode 100644
index 0000000..297438b
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_tty.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GDM_TTY_H_
+#define _GDM_TTY_H_
+
+#include <linux/types.h>
+#include <linux/tty.h>
+
+
+#define TTY_MAX_COUNT 2
+
+#define MAX_ISSUE_NUM 3
+
+enum TO_HOST_RESULT {
+ TO_HOST_BUFFER_REQUEST_FAIL = 1,
+ TO_HOST_PORT_CLOSE = 2,
+ TO_HOST_INVALID_PACKET = 3,
+};
+
+enum RECV_PACKET_PROCESS {
+ RECV_PACKET_PROCESS_COMPLETE = 0,
+ RECV_PACKET_PROCESS_CONTINUE = 1,
+};
+
+struct gdm {
+ struct tty_dev *tty_dev;
+ struct tty_port port;
+ unsigned int index;
+ unsigned int minor;
+};
+
+struct tty_dev {
+ void *priv_dev;
+ int (*send_func)(void *priv_dev,
+ void *data,
+ int len,
+ int tty_index,
+ void (*cb)(void *cb_data),
+ void *cb_data);
+ int (*recv_func)(void *priv_dev,
+ int (*cb)(void *data,
+ int len,
+ int tty_index,
+ struct tty_dev *tty_dev,
+ int complete));
+ int (*send_control)(void *priv_dev,
+ int request,
+ int value,
+ void *data,
+ int len);
+ struct gdm *gdm[2];
+};
+
+int register_lte_tty_driver(void);
+void unregister_lte_tty_driver(void);
+int register_lte_tty_device(struct tty_dev *tty_dev, struct device *dev);
+void unregister_lte_tty_device(struct tty_dev *tty_dev);
+
+#endif /* _GDM_USB_H_ */
+
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
new file mode 100644
index 0000000..bdc9637
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -0,0 +1,1049 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/if_ether.h>
+#include <linux/pm_runtime.h>
+
+#include "gdm_usb.h"
+#include "gdm_lte.h"
+#include "hci.h"
+#include "hci_packet.h"
+#include "gdm_endian.h"
+
+#define USB_DEVICE_CDC_DATA(vid, pid) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
+ .idVendor = vid,\
+ .idProduct = pid,\
+ .bInterfaceClass = USB_CLASS_COMM,\
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
+
+#define USB_DEVICE_MASS_DATA(vid, pid) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,\
+ .idVendor = vid,\
+ .idProduct = pid,\
+ .bInterfaceSubClass = USB_SC_SCSI, \
+ .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
+ .bInterfaceProtocol = USB_PR_BULK
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
+ { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct workqueue_struct *usb_tx_wq;
+static struct workqueue_struct *usb_rx_wq;
+
+static void do_tx(struct work_struct *work);
+static void do_rx(struct work_struct *work);
+
+static int gdm_usb_recv(void *priv_dev,
+ int (*cb)(void *cb_data, void *data, int len, int context),
+ void *cb_data,
+ int context);
+
+static int request_mac_address(struct lte_udev *udev)
+{
+ u8 buf[16] = {0,};
+ struct hci_packet *hci = (struct hci_packet *)buf;
+ struct usb_device *usbdev = udev->usbdev;
+ int actual;
+ int ret = -1;
+
+ hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
+ hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
+ hci->data[0] = MAC_ADDRESS;
+
+ ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
+ &actual, 1000);
+
+ udev->request_mac_addr = 1;
+
+ return ret;
+}
+
+static struct usb_tx *alloc_tx_struct(int len)
+{
+ struct usb_tx *t = NULL;
+ int ret = 0;
+
+ t = kmalloc(sizeof(struct usb_tx), GFP_ATOMIC);
+ if (!t) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memset(t, 0, sizeof(struct usb_tx));
+
+ t->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!(len % 512))
+ len++;
+
+ t->buf = kmalloc(len, GFP_ATOMIC);
+ if (!t->urb || !t->buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+out:
+ if (ret < 0) {
+ if (t) {
+ usb_free_urb(t->urb);
+ kfree(t->buf);
+ kfree(t);
+ }
+ return NULL;
+ }
+
+ return t;
+}
+
+static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
+{
+ struct usb_tx_sdu *t_sdu = NULL;
+ int ret = 0;
+
+
+ t_sdu = kmalloc(sizeof(struct usb_tx_sdu), GFP_ATOMIC);
+ if (!t_sdu) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memset(t_sdu, 0, sizeof(struct usb_tx_sdu));
+
+ t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_ATOMIC);
+ if (!t_sdu->buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+out:
+
+ if (ret < 0) {
+ if (t_sdu) {
+ kfree(t_sdu->buf);
+ kfree(t_sdu);
+ }
+ return NULL;
+ }
+
+ return t_sdu;
+}
+
+static void free_tx_struct(struct usb_tx *t)
+{
+ if (t) {
+ usb_free_urb(t->urb);
+ kfree(t->buf);
+ kfree(t);
+ }
+}
+
+static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
+{
+ if (t_sdu) {
+ kfree(t_sdu->buf);
+ kfree(t_sdu);
+ }
+}
+
+static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
+{
+ struct usb_tx_sdu *t_sdu;
+
+ if (list_empty(&tx->free_list))
+ return NULL;
+
+ t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
+ list_del(&t_sdu->list);
+
+ tx->avail_count--;
+
+ *no_spc = list_empty(&tx->free_list) ? 1 : 0;
+
+ return t_sdu;
+}
+
+static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
+{
+ list_add_tail(&t_sdu->list, &tx->free_list);
+ tx->avail_count++;
+}
+
+static struct usb_rx *alloc_rx_struct(void)
+{
+ struct usb_rx *r = NULL;
+ int ret = 0;
+
+ r = kmalloc(sizeof(struct usb_rx), GFP_ATOMIC);
+ if (!r) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ r->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ r->buf = kmalloc(RX_BUF_SIZE, GFP_ATOMIC);
+ if (!r->urb || !r->buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+out:
+
+ if (ret < 0) {
+ if (r) {
+ usb_free_urb(r->urb);
+ kfree(r->buf);
+ kfree(r);
+ }
+ return NULL;
+ }
+
+ return r;
+}
+
+static void free_rx_struct(struct usb_rx *r)
+{
+ if (r) {
+ usb_free_urb(r->urb);
+ kfree(r->buf);
+ kfree(r);
+ }
+}
+
+static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
+{
+ struct usb_rx *r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rx->rx_lock, flags);
+
+ if (list_empty(&rx->free_list)) {
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+ return NULL;
+ }
+
+ r = list_entry(rx->free_list.next, struct usb_rx, free_list);
+ list_del(&r->free_list);
+
+ rx->avail_count--;
+
+ *no_spc = list_empty(&rx->free_list) ? 1 : 0;
+
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+
+ return r;
+}
+
+static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rx->rx_lock, flags);
+
+ list_add_tail(&r->free_list, &rx->free_list);
+ rx->avail_count++;
+
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+}
+
+static void release_usb(struct lte_udev *udev)
+{
+ struct rx_cxt *rx = &udev->rx;
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx *t, *t_next;
+ struct usb_rx *r, *r_next;
+ struct usb_tx_sdu *t_sdu, *t_sdu_next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list)
+ {
+ list_del(&t_sdu->list);
+ free_tx_sdu_struct(t_sdu);
+ }
+
+ list_for_each_entry_safe(t, t_next, &tx->hci_list, list)
+ {
+ list_del(&t->list);
+ free_tx_struct(t);
+ }
+
+ list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list)
+ {
+ list_del(&t_sdu->list);
+ free_tx_sdu_struct(t_sdu);
+ }
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
+ {
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+ usb_kill_urb(r->urb);
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ }
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+
+ spin_lock_irqsave(&rx->rx_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->free_list, free_list)
+ {
+ list_del(&r->free_list);
+ free_rx_struct(r);
+ }
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list)
+ {
+ if (r->index == (void *)udev) {
+ list_del(&r->to_host_list);
+ free_rx_struct(r);
+ }
+ }
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+}
+
+static int init_usb(struct lte_udev *udev)
+{
+ int ret = 0;
+ int i;
+ struct tx_cxt *tx = &udev->tx;
+ struct rx_cxt *rx = &udev->rx;
+ struct usb_tx_sdu *t_sdu = NULL;
+ struct usb_rx *r = NULL;
+
+ udev->send_complete = 1;
+ udev->tx_stop = 0;
+ udev->request_mac_addr = 0;
+ udev->usb_state = PM_NORMAL;
+
+ INIT_LIST_HEAD(&tx->sdu_list);
+ INIT_LIST_HEAD(&tx->hci_list);
+ INIT_LIST_HEAD(&tx->free_list);
+ INIT_LIST_HEAD(&rx->rx_submit_list);
+ INIT_LIST_HEAD(&rx->free_list);
+ INIT_LIST_HEAD(&rx->to_host_list);
+ spin_lock_init(&tx->lock);
+ spin_lock_init(&rx->rx_lock);
+ spin_lock_init(&rx->submit_lock);
+ spin_lock_init(&rx->to_host_lock);
+
+ tx->avail_count = 0;
+ rx->avail_count = 0;
+
+ udev->rx_cb = NULL;
+
+ for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
+ t_sdu = alloc_tx_sdu_struct();
+ if (t_sdu == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_add(&t_sdu->list, &tx->free_list);
+ tx->avail_count++;
+ }
+
+ for (i = 0; i < MAX_RX_SUBMIT_COUNT*2; i++) {
+ r = alloc_rx_struct();
+ if (r == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_add(&r->free_list, &rx->free_list);
+ rx->avail_count++;
+ }
+ INIT_DELAYED_WORK(&udev->work_tx, do_tx);
+ INIT_DELAYED_WORK(&udev->work_rx, do_rx);
+ return 0;
+fail:
+ return ret;
+}
+
+static int set_mac_address(u8 *data, void *arg)
+{
+ struct phy_dev *phy_dev = (struct phy_dev *)arg;
+ struct lte_udev *udev = phy_dev->priv_dev;
+ struct tlv *tlv = (struct tlv *)data;
+ u8 mac_address[ETH_ALEN] = {0, };
+
+ if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
+ memcpy(mac_address, tlv->data, tlv->len);
+
+ if (register_lte_device(phy_dev, &udev->intf->dev, mac_address) < 0)
+ pr_err("register lte device failed\n");
+
+ udev->request_mac_addr = 0;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static void do_rx(struct work_struct *work)
+{
+ struct lte_udev *udev = container_of(work, struct lte_udev, work_rx.work);
+ struct rx_cxt *rx = &udev->rx;
+ struct usb_rx *r;
+ struct hci_packet *hci;
+ struct phy_dev *phy_dev;
+ u16 cmd_evt;
+ int ret;
+ unsigned long flags;
+
+ while (1) {
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ if (list_empty(&rx->to_host_list)) {
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+ break;
+ }
+ r = list_entry(rx->to_host_list.next, struct usb_rx, to_host_list);
+ list_del(&r->to_host_list);
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+
+ phy_dev = (struct phy_dev *)r->cb_data;
+ udev = (struct lte_udev *)phy_dev->priv_dev;
+ hci = (struct hci_packet *)r->buf;
+ cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
+
+ switch (cmd_evt) {
+ case LTE_GET_INFORMATION_RESULT:
+ if (set_mac_address(hci->data, r->cb_data) == 0) {
+ ret = r->callback(r->cb_data,
+ r->buf,
+ r->urb->actual_length,
+ KERNEL_THREAD);
+ }
+ break;
+
+ default:
+ if (r->callback) {
+ ret = r->callback(r->cb_data,
+ r->buf,
+ r->urb->actual_length,
+ KERNEL_THREAD);
+
+ if (ret == -EAGAIN)
+ pr_err("failed to send received data\n");
+ }
+ break;
+ }
+
+ put_rx_struct(rx, r);
+
+ gdm_usb_recv(udev,
+ r->callback,
+ r->cb_data,
+ USB_COMPLETE);
+ }
+}
+
+static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
+{
+ unsigned long flags;
+ struct usb_rx *r_remove, *r_remove_next;
+
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list)
+ {
+ if (r == r_remove) {
+ list_del(&r->rx_submit_list);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+}
+
+static void gdm_usb_rcv_complete(struct urb *urb)
+{
+ struct usb_rx *r = urb->context;
+ struct rx_cxt *rx = r->rx;
+ unsigned long flags;
+ struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
+ struct usb_device *usbdev = udev->usbdev;
+
+ remove_rx_submit_list(r, rx);
+
+ if (!urb->status && r->callback) {
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ list_add_tail(&r->to_host_list, &rx->to_host_list);
+ queue_work(usb_rx_wq, &udev->work_rx.work);
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+ } else {
+ if (urb->status && udev->usb_state == PM_NORMAL)
+ pr_err("%s: urb status error %d\n",
+ __func__, urb->status);
+
+ put_rx_struct(rx, r);
+ }
+
+ usb_mark_last_busy(usbdev);
+}
+
+static int gdm_usb_recv(void *priv_dev,
+ int (*cb)(void *cb_data, void *data, int len, int context),
+ void *cb_data,
+ int context)
+{
+ struct lte_udev *udev = priv_dev;
+ struct usb_device *usbdev = udev->usbdev;
+ struct rx_cxt *rx = &udev->rx;
+ struct usb_rx *r;
+ int no_spc;
+ int ret;
+ unsigned long flags;
+
+ if (!udev->usbdev) {
+ pr_err("invalid device\n");
+ return -ENODEV;
+ }
+
+ r = get_rx_struct(rx, &no_spc);
+ if (!r) {
+ pr_err("Out of Memory\n");
+ return -ENOMEM;
+ }
+
+ udev->rx_cb = cb;
+ r->callback = cb;
+ r->cb_data = cb_data;
+ r->index = (void *)udev;
+ r->rx = rx;
+
+ usb_fill_bulk_urb(r->urb,
+ usbdev,
+ usb_rcvbulkpipe(usbdev, 0x83),
+ r->buf,
+ RX_BUF_SIZE,
+ gdm_usb_rcv_complete,
+ r);
+
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+
+ if (context == KERNEL_THREAD)
+ ret = usb_submit_urb(r->urb, GFP_KERNEL);
+ else
+ ret = usb_submit_urb(r->urb, GFP_ATOMIC);
+
+ if (ret) {
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_del(&r->rx_submit_list);
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+
+ pr_err("usb_submit_urb failed (%p)\n", r);
+ put_rx_struct(rx, r);
+ }
+
+ return ret;
+}
+
+static void gdm_usb_send_complete(struct urb *urb)
+{
+ struct usb_tx *t = urb->context;
+ struct tx_cxt *tx = t->tx;
+ struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
+ unsigned long flags;
+
+ if (urb->status == -ECONNRESET) {
+ pr_info("CONNRESET\n");
+ return;
+ }
+
+ if (t->callback)
+ t->callback(t->cb_data);
+
+ free_tx_struct(t);
+
+ spin_lock_irqsave(&tx->lock, flags);
+ udev->send_complete = 1;
+ queue_work(usb_tx_wq, &udev->work_tx.work);
+ spin_unlock_irqrestore(&tx->lock, flags);
+}
+
+static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
+{
+ int ret = 0;
+
+ if (!(len%512))
+ len++;
+
+ usb_fill_bulk_urb(t->urb,
+ usbdev,
+ usb_sndbulkpipe(usbdev, 2),
+ t->buf,
+ len,
+ gdm_usb_send_complete,
+ t);
+
+ ret = usb_submit_urb(t->urb, GFP_ATOMIC);
+
+ if (ret)
+ pr_err("usb_submit_urb failed: %d\n", ret);
+
+ usb_mark_last_busy(usbdev);
+
+ return ret;
+}
+
+static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
+{
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx_sdu *t_sdu = NULL;
+ struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
+ u16 send_len = 0;
+ u16 num_packet = 0;
+ unsigned long flags;
+
+ multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
+
+ while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
+ spin_lock_irqsave(&tx->lock, flags);
+ if (list_empty(&tx->sdu_list)) {
+ spin_unlock_irqrestore(&tx->lock, flags);
+ break;
+ }
+
+ t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
+ if (send_len + t_sdu->len > MAX_SDU_SIZE) {
+ spin_unlock_irqrestore(&tx->lock, flags);
+ break;
+ }
+
+ list_del(&t_sdu->list);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
+
+ send_len += (t_sdu->len + 3) & 0xfffc;
+ num_packet++;
+
+ if (tx->avail_count > 10)
+ t_sdu->callback(t_sdu->cb_data);
+
+ spin_lock_irqsave(&tx->lock, flags);
+ put_tx_struct(tx, t_sdu);
+ spin_unlock_irqrestore(&tx->lock, flags);
+ }
+
+ multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
+ multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
+
+ return send_len + offsetof(struct multi_sdu, data);
+}
+
+static void do_tx(struct work_struct *work)
+{
+ struct lte_udev *udev = container_of(work, struct lte_udev, work_tx.work);
+ struct usb_device *usbdev = udev->usbdev;
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx *t = NULL;
+ int is_send = 0;
+ u32 len = 0;
+ unsigned long flags;
+
+ if (!usb_autopm_get_interface(udev->intf))
+ usb_autopm_put_interface(udev->intf);
+
+ if (udev->usb_state == PM_SUSPEND)
+ return;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ if (!udev->send_complete) {
+ spin_unlock_irqrestore(&tx->lock, flags);
+ return;
+ } else {
+ udev->send_complete = 0;
+ }
+
+ if (!list_empty(&tx->hci_list)) {
+ t = list_entry(tx->hci_list.next, struct usb_tx, list);
+ list_del(&t->list);
+ len = t->len;
+ t->is_sdu = 0;
+ is_send = 1;
+ } else if (!list_empty(&tx->sdu_list)) {
+ if (udev->tx_stop) {
+ udev->send_complete = 1;
+ spin_unlock_irqrestore(&tx->lock, flags);
+ return;
+ }
+
+ t = alloc_tx_struct(TX_BUF_SIZE);
+ t->callback = NULL;
+ t->tx = tx;
+ t->is_sdu = 1;
+ is_send = 1;
+ }
+
+ if (!is_send) {
+ udev->send_complete = 1;
+ spin_unlock_irqrestore(&tx->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ if (t->is_sdu)
+ len = packet_aggregation(udev, t->buf);
+
+ if (send_tx_packet(usbdev, t, len)) {
+ pr_err("send_tx_packet failed\n");
+ t->callback = NULL;
+ gdm_usb_send_complete(t->urb);
+ }
+}
+
+#define SDU_PARAM_LEN 12
+static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
+ unsigned int dftEpsId, unsigned int epsId,
+ void (*cb)(void *data), void *cb_data,
+ int dev_idx, int nic_type)
+{
+ struct lte_udev *udev = priv_dev;
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx_sdu *t_sdu;
+ struct sdu *sdu = NULL;
+ unsigned long flags;
+ int no_spc = 0;
+ u16 send_len;
+
+ if (!udev->usbdev) {
+ pr_err("sdu send - invalid device\n");
+ return TX_NO_DEV;
+ }
+
+ spin_lock_irqsave(&tx->lock, flags);
+ t_sdu = get_tx_sdu_struct(tx, &no_spc);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ if (t_sdu == NULL) {
+ pr_err("sdu send - free list empty\n");
+ return TX_NO_SPC;
+ }
+
+ sdu = (struct sdu *)t_sdu->buf;
+ sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
+ if (nic_type == NIC_TYPE_ARP) {
+ send_len = len + SDU_PARAM_LEN;
+ memcpy(sdu->data, data, len);
+ } else {
+ send_len = len - ETH_HLEN;
+ send_len += SDU_PARAM_LEN;
+ memcpy(sdu->data, data+ETH_HLEN, len-ETH_HLEN);
+ }
+
+ sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
+ sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
+ sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
+ sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
+
+ t_sdu->len = send_len + HCI_HEADER_SIZE;
+ t_sdu->callback = cb;
+ t_sdu->cb_data = cb_data;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ list_add_tail(&t_sdu->list, &tx->sdu_list);
+ queue_work(usb_tx_wq, &udev->work_tx.work);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ if (no_spc)
+ return TX_NO_BUFFER;
+
+ return 0;
+}
+
+static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
+ void (*cb)(void *data), void *cb_data)
+{
+ struct lte_udev *udev = priv_dev;
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx *t;
+ unsigned long flags;
+
+ if (!udev->usbdev) {
+ pr_err("hci send - invalid device\n");
+ return -ENODEV;
+ }
+
+ t = alloc_tx_struct(len);
+ if (t == NULL) {
+ pr_err("hci_send - out of memory\n");
+ return -ENOMEM;
+ }
+
+ memcpy(t->buf, data, len);
+ t->callback = cb;
+ t->cb_data = cb_data;
+ t->len = len;
+ t->tx = tx;
+ t->is_sdu = 0;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ list_add_tail(&t->list, &tx->hci_list);
+ queue_work(usb_tx_wq, &udev->work_tx.work);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return 0;
+}
+
+static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
+{
+ struct lte_udev *udev = priv_dev;
+
+ return &udev->gdm_ed;
+}
+
+static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ int ret = 0;
+ struct phy_dev *phy_dev = NULL;
+ struct lte_udev *udev = NULL;
+ u16 idVendor, idProduct;
+ int bInterfaceNumber;
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+
+ bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
+ idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
+ idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
+
+ pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
+
+ if (bInterfaceNumber > NETWORK_INTERFACE) {
+ pr_info("not a network device\n");
+ return -1;
+ }
+
+ phy_dev = kmalloc(sizeof(struct phy_dev), GFP_ATOMIC);
+ if (!phy_dev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ udev = kmalloc(sizeof(struct lte_udev), GFP_ATOMIC);
+ if (!udev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(phy_dev, 0, sizeof(struct phy_dev));
+ memset(udev, 0, sizeof(struct lte_udev));
+
+ phy_dev->priv_dev = (void *)udev;
+ phy_dev->send_hci_func = gdm_usb_hci_send;
+ phy_dev->send_sdu_func = gdm_usb_sdu_send;
+ phy_dev->rcv_func = gdm_usb_recv;
+ phy_dev->get_endian = gdm_usb_get_endian;
+
+ udev->usbdev = usbdev;
+ ret = init_usb(udev);
+ if (ret < 0) {
+ pr_err("init_usb func failed\n");
+ goto out;
+ }
+ udev->intf = intf;
+
+ intf->needs_remote_wakeup = 1;
+ usb_enable_autosuspend(usbdev);
+ pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
+
+ /* List up hosts with big endians, otherwise, defaults to little endian */
+ if (idProduct == PID_GDM7243)
+ gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
+ else
+ gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
+
+ ret = request_mac_address(udev);
+ if (ret < 0) {
+ pr_err("request Mac address failed\n");
+ goto out;
+ }
+
+ start_rx_proc(phy_dev);
+out:
+
+ if (ret < 0) {
+ kfree(phy_dev);
+ if (udev) {
+ release_usb(udev);
+ kfree(udev);
+ }
+ }
+
+ usb_get_dev(usbdev);
+ usb_set_intfdata(intf, phy_dev);
+
+ return ret;
+}
+
+static void gdm_usb_disconnect(struct usb_interface *intf)
+{
+ struct phy_dev *phy_dev;
+ struct lte_udev *udev;
+ u16 idVendor, idProduct;
+ struct usb_device *usbdev;
+ usbdev = interface_to_usbdev(intf);
+
+ idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
+ idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
+
+ phy_dev = usb_get_intfdata(intf);
+
+ udev = phy_dev->priv_dev;
+ unregister_lte_device(phy_dev);
+
+ release_usb(udev);
+
+ kfree(udev);
+ udev = NULL;
+
+ kfree(phy_dev);
+ phy_dev = NULL;
+
+ usb_put_dev(usbdev);
+}
+
+static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
+{
+ struct phy_dev *phy_dev;
+ struct lte_udev *udev;
+ struct rx_cxt *rx;
+ struct usb_rx *r;
+ struct usb_rx *r_next;
+ unsigned long flags;
+
+ phy_dev = usb_get_intfdata(intf);
+ udev = phy_dev->priv_dev;
+ rx = &udev->rx;
+ if (udev->usb_state != PM_NORMAL) {
+ pr_err("usb suspend - invalid state\n");
+ return -1;
+ }
+
+ udev->usb_state = PM_SUSPEND;
+
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
+ {
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+ usb_kill_urb(r->urb);
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ }
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+
+ return 0;
+}
+
+static int gdm_usb_resume(struct usb_interface *intf)
+{
+ struct phy_dev *phy_dev;
+ struct lte_udev *udev;
+ struct tx_cxt *tx;
+ struct rx_cxt *rx;
+ unsigned long flags;
+ int issue_count;
+ int i;
+
+ phy_dev = usb_get_intfdata(intf);
+ udev = phy_dev->priv_dev;
+ rx = &udev->rx;
+
+ if (udev->usb_state != PM_SUSPEND) {
+ pr_err("usb resume - invalid state\n");
+ return -1;
+ }
+ udev->usb_state = PM_NORMAL;
+
+ spin_lock_irqsave(&rx->rx_lock, flags);
+ issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+
+ if (issue_count >= 0) {
+ for (i = 0; i < issue_count; i++)
+ gdm_usb_recv(phy_dev->priv_dev,
+ udev->rx_cb,
+ phy_dev,
+ USB_COMPLETE);
+ }
+
+ tx = &udev->tx;
+ spin_lock_irqsave(&tx->lock, flags);
+ queue_work(usb_tx_wq, &udev->work_tx.work);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return 0;
+}
+
+static struct usb_driver gdm_usb_lte_driver = {
+ .name = "gdm_lte",
+ .probe = gdm_usb_probe,
+ .disconnect = gdm_usb_disconnect,
+ .id_table = id_table,
+ .supports_autosuspend = 1,
+ .suspend = gdm_usb_suspend,
+ .resume = gdm_usb_resume,
+ .reset_resume = gdm_usb_resume,
+};
+
+static int __init gdm_usb_lte_init(void)
+{
+ if (gdm_lte_event_init() < 0) {
+ pr_err("error creating event\n");
+ return -1;
+ }
+
+ usb_tx_wq = create_workqueue("usb_tx_wq");
+ if (usb_tx_wq == NULL)
+ return -1;
+
+ usb_rx_wq = create_workqueue("usb_rx_wq");
+ if (usb_rx_wq == NULL)
+ return -1;
+
+ return usb_register(&gdm_usb_lte_driver);
+}
+
+static void __exit gdm_usb_lte_exit(void)
+{
+ gdm_lte_event_exit();
+
+ usb_deregister(&gdm_usb_lte_driver);
+
+ if (usb_tx_wq) {
+ flush_workqueue(usb_tx_wq);
+ destroy_workqueue(usb_tx_wq);
+ }
+
+ if (usb_rx_wq) {
+ flush_workqueue(usb_rx_wq);
+ destroy_workqueue(usb_rx_wq);
+ }
+}
+
+module_init(gdm_usb_lte_init);
+module_exit(gdm_usb_lte_exit);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_DESCRIPTION("GCT LTE USB Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm724x/gdm_usb.h b/drivers/staging/gdm724x/gdm_usb.h
new file mode 100644
index 0000000..e6486e7
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_usb.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GDM_USB_H_
+#define _GDM_USB_H_
+
+#include <linux/types.h>
+#include <linux/usb.h>
+#include <linux/list.h>
+#include <linux/time.h>
+
+#include "gdm_endian.h"
+#include "hci_packet.h"
+
+#define PM_NORMAL 0
+#define PM_SUSPEND 1
+#define AUTO_SUSPEND_TIMER 5000 /* ms */
+
+#define RX_BUF_SIZE (1024*32)
+#define TX_BUF_SIZE (1024*32)
+#define SDU_BUF_SIZE 2048
+#define MAX_SDU_SIZE (1024*30)
+#define MAX_PACKET_IN_MULTI_SDU 256
+
+#define VID_GCT 0x1076
+#define PID_GDM7240 0x8000
+#define PID_GDM7243 0x9000
+
+#define NETWORK_INTERFACE 1
+#define USB_SC_SCSI 0x06
+#define USB_PR_BULK 0x50
+
+#define MAX_NUM_SDU_BUF 64
+
+struct usb_tx {
+ struct list_head list;
+ struct urb *urb;
+ u8 *buf;
+ u32 len;
+ void (*callback)(void *cb_data);
+ void *cb_data;
+ struct tx_cxt *tx;
+ u8 is_sdu;
+};
+
+struct usb_tx_sdu {
+ struct list_head list;
+ u8 *buf;
+ u32 len;
+ void (*callback)(void *cb_data);
+ void *cb_data;
+};
+
+struct usb_rx {
+ struct list_head to_host_list;
+ struct list_head free_list;
+ struct list_head rx_submit_list;
+ struct rx_cxt *rx;
+ struct urb *urb;
+ u8 *buf;
+ int (*callback)(void *cb_data, void *data, int len, int context);
+ void *cb_data;
+ void *index;
+};
+
+struct tx_cxt {
+ struct list_head sdu_list;
+ struct list_head hci_list;
+ struct list_head free_list;
+ u32 avail_count;
+ spinlock_t lock;
+};
+
+struct rx_cxt {
+ struct list_head to_host_list;
+ struct list_head rx_submit_list;
+ struct list_head free_list;
+ u32 avail_count;
+ spinlock_t to_host_lock;
+ spinlock_t rx_lock;
+ spinlock_t submit_lock;
+};
+
+struct lte_udev {
+ struct usb_device *usbdev;
+ struct gdm_endian gdm_ed;
+ struct tx_cxt tx;
+ struct rx_cxt rx;
+ struct delayed_work work_tx;
+ struct delayed_work work_rx;
+ u8 send_complete;
+ u8 tx_stop;
+ struct usb_interface *intf;
+ int (*rx_cb)(void *cb_data, void *data, int len, int context);
+ int usb_state;
+ u8 request_mac_addr;
+};
+
+#endif /* _GDM_USB_H_ */
diff --git a/drivers/staging/gdm724x/hci.h b/drivers/staging/gdm724x/hci.h
new file mode 100644
index 0000000..9a591b0
--- /dev/null
+++ b/drivers/staging/gdm724x/hci.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _HCI_H_
+#define _HCI_H_
+
+#define LTE_GET_INFORMATION 0x3002
+#define LTE_GET_INFORMATION_RESULT 0xB003
+ #define MAC_ADDRESS 0xA2
+
+#define LTE_LINK_ON_OFF_INDICATION 0xB133
+#define LTE_PDN_TABLE_IND 0xB143
+
+#define LTE_TX_SDU 0x3200
+#define LTE_RX_SDU 0xB201
+#define LTE_TX_MULTI_SDU 0x3202
+#define LTE_RX_MULTI_SDU 0xB203
+
+#define LTE_DL_SDU_FLOW_CONTROL 0x3305
+#define LTE_UL_SDU_FLOW_CONTROL 0xB306
+
+#define LTE_AT_CMD_TO_DEVICE 0x3307
+#define LTE_AT_CMD_FROM_DEVICE 0xB308
+
+#define LTE_SDIO_DM_SEND_PKT 0x3312
+#define LTE_SDIO_DM_RECV_PKT 0xB313
+
+#define LTE_NV_RESTORE_REQUEST 0xB30C
+#define LTE_NV_RESTORE_RESPONSE 0x330D
+#define LTE_NV_SAVE_REQUEST 0xB30E
+ #define NV_TYPE_LTE_INFO 0x00
+ #define NV_TYPE_BOARD_CONFIG 0x01
+ #define NV_TYPE_RF_CAL 0x02
+ #define NV_TYPE_TEMP 0x03
+ #define NV_TYPE_NET_INFO 0x04
+ #define NV_TYPE_SAFETY_INFO 0x05
+ #define NV_TYPE_CDMA_CAL 0x06
+ #define NV_TYPE_VENDOR 0x07
+ #define NV_TYPE_ALL 0xff
+#define LTE_NV_SAVE_RESPONSE 0x330F
+
+#define LTE_AT_CMD_TO_DEVICE_EXT 0x3323
+#define LTE_AT_CMD_FROM_DEVICE_EXT 0xB324
+
+#endif /* _HCI_H_ */
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
new file mode 100644
index 0000000..7fba8a6
--- /dev/null
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _HCI_PACKET_H_
+#define _HCI_PACKET_H_
+
+#define HCI_HEADER_SIZE 4
+
+/*
+ * The NIC type definition:
+ * For backward compatibility, lower 16 bits used as they were.
+ * Lower 16 bit: NIC_TYPE values
+ * Uppoer 16 bit: NIC_TYPE Flags
+ */
+#define NIC_TYPE_NIC0 0x00000010
+#define NIC_TYPE_NIC1 0x00000011
+#define NIC_TYPE_NIC2 0x00000012
+#define NIC_TYPE_NIC3 0x00000013
+#define NIC_TYPE_ARP 0x00000100
+#define NIC_TYPE_ICMPV6 0x00000200
+#define NIC_TYPE_MASK 0x0000FFFF
+#define NIC_TYPE_F_IPV4 0x00010000
+#define NIC_TYPE_F_IPV6 0x00020000
+#define NIC_TYPE_F_DHCP 0x00040000
+#define NIC_TYPE_F_NDP 0x00080000
+#define NIC_TYPE_F_VLAN 0x00100000
+
+struct hci_packet {
+ u16 cmd_evt;
+ u16 len;
+ u8 data[0];
+} __packed;
+
+struct tlv {
+ u8 type;
+ u8 len;
+ u8 *data[1];
+} __packed;
+
+struct sdu_header {
+ u16 cmd_evt;
+ u16 len;
+ u32 dftEpsId;
+ u32 bearer_ID;
+ u32 nic_type;
+} __packed;
+
+struct sdu {
+ u16 cmd_evt;
+ u16 len;
+ u32 dftEpsId;
+ u32 bearer_ID;
+ u32 nic_type;
+ u8 data[0];
+} __packed;
+
+struct multi_sdu {
+ u16 cmd_evt;
+ u16 len;
+ u16 num_packet;
+ u16 reserved;
+ u8 data[0];
+} __packed;
+
+struct hci_pdn_table_ind {
+ u16 cmd_evt;
+ u16 len;
+ u8 activate;
+ u32 dft_eps_id;
+ u32 nic_type;
+ u8 pdn_type;
+ u8 ipv4_addr[4];
+ u8 ipv6_intf_id[8];
+} __packed;
+
+struct hci_connect_ind {
+ u16 cmd_evt;
+ u16 len;
+ u32 connect;
+} __packed;
+
+
+#endif /* _HCI_PACKET_H_ */
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
new file mode 100644
index 0000000..77fc64e
--- /dev/null
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/netlink.h>
+#include <asm/byteorder.h>
+#include <net/sock.h>
+
+#include "netlink_k.h"
+
+#if defined(DEFINE_MUTEX)
+static DEFINE_MUTEX(netlink_mutex);
+#else
+static struct semaphore netlink_mutex;
+#define mutex_lock(x) down(x)
+#define mutex_unlock(x) up(x)
+#endif
+
+#define ND_MAX_GROUP 30
+#define ND_IFINDEX_LEN sizeof(int)
+#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN)
+#define ND_NLMSG_DATA(nlh) ((void *)((char *)NLMSG_DATA(nlh) + ND_IFINDEX_LEN))
+#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
+#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
+#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh)
+#define ND_MAX_MSG_LEN (1024 * 32)
+
+static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len);
+
+static void netlink_rcv_cb(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh;
+ struct net_device *dev;
+ u32 mlen;
+ void *msg;
+ int ifindex;
+
+ if (!rcv_cb) {
+ pr_err("nl cb - unregistered\n");
+ return;
+ }
+
+ if (skb->len < NLMSG_SPACE(0)) {
+ pr_err("nl cb - invalid skb length\n");
+ return;
+ }
+
+ nlh = (struct nlmsghdr *)skb->data;
+
+ if (skb->len < nlh->nlmsg_len || nlh->nlmsg_len > ND_MAX_MSG_LEN) {
+ pr_err("nl cb - invalid length (%d,%d)\n",
+ skb->len, nlh->nlmsg_len);
+ return;
+ }
+
+ memcpy(&ifindex, ND_NLMSG_IFIDX(nlh), ND_IFINDEX_LEN);
+ msg = ND_NLMSG_DATA(nlh);
+ mlen = ND_NLMSG_R_LEN(nlh);
+
+ dev = dev_get_by_index(&init_net, ifindex);
+ if (dev) {
+ rcv_cb(dev, nlh->nlmsg_type, msg, mlen);
+ dev_put(dev);
+ } else {
+ pr_err("nl cb - dev (%d) not found\n", ifindex);
+ }
+}
+
+static void netlink_rcv(struct sk_buff *skb)
+{
+ mutex_lock(&netlink_mutex);
+ netlink_rcv_cb(skb);
+ mutex_unlock(&netlink_mutex);
+}
+
+struct sock *netlink_init(int unit,
+ void (*cb)(struct net_device *dev, u16 type, void *msg, int len))
+{
+ struct sock *sock;
+ struct netlink_kernel_cfg cfg = {
+ .input = netlink_rcv,
+ };
+
+#if !defined(DEFINE_MUTEX)
+ init_MUTEX(&netlink_mutex);
+#endif
+
+ sock = netlink_kernel_create(&init_net, unit, &cfg);
+
+ if (sock)
+ rcv_cb = cb;
+
+ return sock;
+}
+
+void netlink_exit(struct sock *sock)
+{
+ sock_release(sock->sk_socket);
+}
+
+int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
+{
+ static u32 seq;
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ int ret = 0;
+
+ if (group > ND_MAX_GROUP)
+ return -EINVAL;
+
+ if (!netlink_has_listeners(sock, group+1))
+ return -ESRCH;
+
+ skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ seq++;
+
+ nlh = nlmsg_put(skb, 0, seq, type, len, 0);
+ memcpy(NLMSG_DATA(nlh), msg, len);
+ NETLINK_CB(skb).portid = 0;
+ NETLINK_CB(skb).dst_group = 0;
+
+ ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
+ if (!ret)
+ return len;
+
+ if (ret != -ESRCH)
+ pr_err("nl broadcast g=%d, t=%d, l=%d, r=%d\n",
+ group, type, len, ret);
+ else if (netlink_has_listeners(sock, group+1))
+ return -EAGAIN;
+
+ return ret;
+}
diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h
new file mode 100644
index 0000000..589486d
--- /dev/null
+++ b/drivers/staging/gdm724x/netlink_k.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NETLINK_K_H
+#define _NETLINK_K_H
+
+#include <linux/netdevice.h>
+#include <net/sock.h>
+
+struct sock *netlink_init(int unit,
+ void (*cb)(struct net_device *dev, u16 type, void *msg, int len));
+void netlink_exit(struct sock *sock);
+int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
+
+#endif /* _NETLINK_K_H_ */
diff --git a/drivers/staging/gdm72xx/Kconfig b/drivers/staging/gdm72xx/Kconfig
index 6905913..dd8a391 100644
--- a/drivers/staging/gdm72xx/Kconfig
+++ b/drivers/staging/gdm72xx/Kconfig
@@ -4,7 +4,7 @@
menuconfig WIMAX_GDM72XX
tristate "GCT GDM72xx WiMAX support"
- depends on NET
+ depends on NET && (USB || MMC)
help
Support for the GCT GDM72xx WiMAX chip
@@ -19,7 +19,7 @@ config WIMAX_GDM72XX_K_MODE
default n
config WIMAX_GDM72XX_WIMAX2
- bool "Enable WIMAX2 support"
+ bool "Enable WiMAX2 support"
default n
choice
@@ -27,18 +27,18 @@ choice
config WIMAX_GDM72XX_USB
bool "USB interface"
- depends on USB
+ depends on (USB = y || USB = WIMAX_GDM72XX)
config WIMAX_GDM72XX_SDIO
bool "SDIO interface"
- depends on MMC
+ depends on (MMC = y || MMC = WIMAX_GDM72XX)
endchoice
if WIMAX_GDM72XX_USB
config WIMAX_GDM72XX_USB_PM
- bool "Enable power managerment support"
+ bool "Enable power management support"
depends on PM_RUNTIME
endif # WIMAX_GDM72XX_USB
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
index b795353..cc36924 100644
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ b/drivers/staging/gdm72xx/gdm_qos.c
@@ -250,8 +250,8 @@ static void send_qos_list(struct nic *nic, struct list_head *head)
list_for_each_entry_safe(entry, n, head, list) {
list_del(&entry->list);
- free_qos_entry(entry);
gdm_wimax_send_tx(entry->skb, entry->dev);
+ free_qos_entry(entry);
}
}
diff --git a/drivers/staging/gdm72xx/gdm_wimax.c b/drivers/staging/gdm72xx/gdm_wimax.c
index 41efbee..dd85497 100644
--- a/drivers/staging/gdm72xx/gdm_wimax.c
+++ b/drivers/staging/gdm72xx/gdm_wimax.c
@@ -939,8 +939,7 @@ int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev)
struct net_device *dev;
int ret;
- dev = (struct net_device *)alloc_netdev(sizeof(*nic),
- "wm%d", ether_setup);
+ dev = alloc_netdev(sizeof(*nic), "wm%d", ether_setup);
if (dev == NULL) {
pr_err("alloc_etherdev failed\n");
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
index d3bed21f..f96dcec 100644
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -1,4 +1,5 @@
-/* drivers/misc/goldfish_audio.c
+/*
+ * drivers/misc/goldfish_audio.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
@@ -47,10 +48,11 @@ struct goldfish_audio {
int read_supported; /* true if we have audio input support */
};
-/* We will allocate two read buffers and two write buffers.
- Having two read buffers facilitate stereo -> mono conversion.
- Having two write buffers facilitate interleaved IO.
-*/
+/*
+ * We will allocate two read buffers and two write buffers.
+ * Having two read buffers facilitate stereo -> mono conversion.
+ * Having two write buffers facilitate interleaved IO.
+ */
#define READ_BUFFER_SIZE 16384
#define WRITE_BUFFER_SIZE 16384
#define COMBINED_BUFFER_SIZE ((2 * READ_BUFFER_SIZE) + \
@@ -59,8 +61,10 @@ struct goldfish_audio {
#define AUDIO_READ(data, addr) (readl(data->reg_base + addr))
#define AUDIO_WRITE(data, addr, x) (writel(x, data->reg_base + addr))
-/* temporary variable used between goldfish_audio_probe() and
- goldfish_audio_open() */
+/*
+ * temporary variable used between goldfish_audio_probe() and
+ * goldfish_audio_open()
+ */
static struct goldfish_audio *audio_data;
enum {
@@ -161,8 +165,10 @@ static ssize_t goldfish_audio_write(struct file *fp, const char __user *buf,
}
spin_lock_irqsave(&data->lock, irq_flags);
- /* clear the buffer empty flag, and signal the emulator
- * to start writing the buffer */
+ /*
+ * clear the buffer empty flag, and signal the emulator
+ * to start writing the buffer
+ */
if (kbuf == data->write_buffer1) {
data->buffer_status &= ~AUDIO_INT_WRITE_BUFFER_1_EMPTY;
AUDIO_WRITE(data, AUDIO_WRITE_BUFFER_1, copy);
@@ -225,8 +231,10 @@ static irqreturn_t goldfish_audio_interrupt(int irq, void *dev_id)
/* read buffer status flags */
status = AUDIO_READ(data, AUDIO_INT_STATUS);
status &= AUDIO_INT_MASK;
- /* if buffers are newly empty, wake up blocked
- goldfish_audio_write() call */
+ /*
+ * if buffers are newly empty, wake up blocked
+ * goldfish_audio_write() call
+ */
if (status) {
data->buffer_status = status;
wake_up(&data->wait);
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
index ab1f019..81e2ad4 100644
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ b/drivers/staging/goldfish/goldfish_nand.c
@@ -326,9 +326,10 @@ static int goldfish_nand_init_device(struct platform_device *pdev,
(mtd->writesize + mtd->oobsize) * mtd->writesize;
do_div(mtd->size, mtd->writesize + mtd->oobsize);
mtd->size *= mtd->writesize;
- dev_dbg(&pdev->dev,
+ dev_dbg(&pdev->dev,
"goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
- id, mtd->size, mtd->writesize, mtd->oobsize, mtd->erasesize);
+ id, mtd->size, mtd->writesize,
+ mtd->oobsize, mtd->erasesize);
spin_unlock_irqrestore(&nand->lock, irq_flags);
mtd->priv = nand;
@@ -340,7 +341,7 @@ static int goldfish_nand_init_device(struct platform_device *pdev,
result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
name);
if (result != name_len) {
- dev_err(&pdev->dev,
+ dev_err(&pdev->dev,
"goldfish_nand_init_device failed to get dev name %d != %d\n",
result, name_len);
return -ENODEV;
@@ -391,7 +392,7 @@ static int goldfish_nand_probe(struct platform_device *pdev)
version = readl(base + NAND_VERSION);
if (version != NAND_VERSION_CURRENT) {
- dev_err(&pdev->dev,
+ dev_err(&pdev->dev,
"goldfish_nand_init: version mismatch, got %d, expected %d\n",
version, NAND_VERSION_CURRENT);
return -ENODEV;
@@ -400,7 +401,7 @@ static int goldfish_nand_probe(struct platform_device *pdev)
if (num_dev == 0)
return -ENODEV;
- nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
+ nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
if (nand == NULL)
return -ENOMEM;
diff --git a/drivers/staging/goldfish/goldfish_nand_reg.h b/drivers/staging/goldfish/goldfish_nand_reg.h
index 956c6c3..ddfda71 100644
--- a/drivers/staging/goldfish/goldfish_nand_reg.h
+++ b/drivers/staging/goldfish/goldfish_nand_reg.h
@@ -1,27 +1,30 @@
-/* drivers/mtd/devices/goldfish_nand_reg.h
-**
-** Copyright (C) 2007 Google, Inc.
-**
-** This software is licensed under the terms of the GNU General Public
-** License version 2, as published by the Free Software Foundation, and
-** may be copied, distributed, and modified under those terms.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-*/
+/*
+ * drivers/mtd/devices/goldfish_nand_reg.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
#ifndef GOLDFISH_NAND_REG_H
#define GOLDFISH_NAND_REG_H
enum nand_cmd {
- NAND_CMD_GET_DEV_NAME, /* Write device name for NAND_DEV to NAND_DATA (vaddr) */
+ /* Write device name for NAND_DEV to NAND_DATA (vaddr) */
+ NAND_CMD_GET_DEV_NAME,
NAND_CMD_READ,
NAND_CMD_WRITE,
NAND_CMD_ERASE,
- NAND_CMD_BLOCK_BAD_GET, /* NAND_RESULT is 1 if block is bad, 0 if it is not */
+ /* NAND_RESULT is 1 if block is bad, 0 if it is not */
+ NAND_CMD_BLOCK_BAD_GET,
NAND_CMD_BLOCK_BAD_SET,
NAND_CMD_READ_WITH_PARAMS,
NAND_CMD_WRITE_WITH_PARAMS,
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index ea08d62..8be32e5 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -56,7 +56,7 @@ Then fill in the following:
- indio_dev->modes:
Specify whether direct access and / or ring buffer access is supported.
-- indio_dev->ring:
+- indio_dev->buffer:
An optional associated buffer.
- indio_dev->pollfunc:
Poll function related elements. This controls what occurs when a trigger
@@ -67,7 +67,7 @@ Then fill in the following:
- indio_dev->num_channels:
How many channels are there?
-Once these are set up, a call to iio_device_register(indio_dev),
+Once these are set up, a call to iio_device_register(indio_dev)
will register the device with the iio core.
Worth noting here is that, if a ring buffer is to be used, it can be
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index db4d6dc..b36feb0 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -37,7 +37,7 @@ config IIO_SIMPLE_DUMMY_EVENTS
config IIO_SIMPLE_DUMMY_BUFFER
boolean "Buffered capture support"
- depends on IIO_KFIFO_BUF
+ select IIO_KFIFO_BUF
help
Add buffered data capture to the simple dummy driver.
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index ab8ec7a..2105576 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -182,11 +182,10 @@ static int adis16201_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -201,10 +200,10 @@ static int adis16201_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16201_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -218,9 +217,6 @@ static int adis16201_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -231,7 +227,6 @@ static int adis16201_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index b08ac8f..409a28e 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -148,11 +148,9 @@ static int adis16203_probe(struct spi_device *spi)
struct adis *st;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -166,11 +164,11 @@ static int adis16203_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16203_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -185,9 +183,6 @@ static int adis16203_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -198,7 +193,6 @@ static int adis16203_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index 792ec25..b8ea768 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -187,11 +187,9 @@ static int adis16204_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -205,11 +203,11 @@ static int adis16204_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16204_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -223,9 +221,6 @@ static int adis16204_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -236,7 +231,6 @@ static int adis16204_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 323c169..4492e51 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -183,11 +183,9 @@ static int adis16209_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -201,10 +199,10 @@ static int adis16209_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16209_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -218,9 +216,6 @@ static int adis16209_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -231,7 +226,6 @@ static int adis16209_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 0e72f79..5c28961 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -428,11 +428,9 @@ static int adis16220_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
@@ -447,7 +445,7 @@ static int adis16220_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
if (ret)
@@ -478,9 +476,6 @@ error_rm_accel_bin:
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
error_unregister_dev:
iio_device_unregister(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -492,7 +487,6 @@ static int adis16220_remove(struct spi_device *spi)
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index fd1f0fd..3a303a0 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -236,11 +236,9 @@ static int adis16240_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -254,10 +252,10 @@ static int adis16240_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16240_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -270,9 +268,6 @@ static int adis16240_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -283,7 +278,6 @@ static int adis16240_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 1bfe5d8..bb852dc 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -257,6 +257,8 @@ static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
ret = lis3l02dq_read_reg_s16(indio_dev, reg, val);
}
mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ goto error_ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -666,11 +668,9 @@ static int lis3l02dq_probe(struct spi_device *spi)
struct lis3l02dq_state *st;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof *st);
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -688,7 +688,7 @@ static int lis3l02dq_probe(struct spi_device *spi)
ret = lis3l02dq_configure_buffer(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
ret = iio_buffer_register(indio_dev,
lis3l02dq_channels,
@@ -734,9 +734,6 @@ error_uninitialize_buffer:
iio_buffer_unregister(indio_dev);
error_unreg_buffer_funcs:
lis3l02dq_unconfigure_buffer(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -784,8 +781,6 @@ static int lis3l02dq_remove(struct spi_device *spi)
iio_buffer_unregister(indio_dev);
lis3l02dq_unconfigure_buffer(indio_dev);
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 32950ad..48a25ba 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -1135,11 +1135,9 @@ static int sca3000_probe(struct spi_device *spi)
struct sca3000_state *st;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
@@ -1162,7 +1160,7 @@ static int sca3000_probe(struct spi_device *spi)
sca3000_configure_ring(indio_dev);
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto error_free_dev;
+ return ret;
ret = iio_buffer_register(indio_dev,
sca3000_channels,
@@ -1198,10 +1196,6 @@ error_unregister_ring:
iio_buffer_unregister(indio_dev);
error_unregister_dev:
iio_device_unregister(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-
-error_ret:
return ret;
}
@@ -1235,7 +1229,6 @@ static int sca3000_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_buffer_unregister(indio_dev);
sca3000_unconfigure_ring(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 5047019..3283e282 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -326,7 +326,7 @@ static ssize_t ad7192_write_frequency(struct device *dev,
unsigned long lval;
int div, ret;
- ret = strict_strtoul(buf, 10, &lval);
+ ret = kstrtoul(buf, 10, &lval);
if (ret)
return ret;
if (lval == 0)
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 2fd6ee3..c19618b 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -632,7 +632,7 @@ static ssize_t ad7280_write_channel_config(struct device *dev,
long val;
int ret;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index d088c66..a2e61c2 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -21,6 +21,8 @@
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
+#include "ad7291.h"
+
/*
* Simplified handling
*
@@ -39,33 +41,9 @@
#define AD7291_VOLTAGE 0x01
#define AD7291_T_SENSE 0x02
#define AD7291_T_AVERAGE 0x03
-#define AD7291_CH0_DATA_HIGH 0x04
-#define AD7291_CH0_DATA_LOW 0x05
-#define AD7291_CH0_HYST 0x06
-#define AD7291_CH1_DATA_HIGH 0x07
-#define AD7291_CH1_DATA_LOW 0x08
-#define AD7291_CH1_HYST 0x09
-#define AD7291_CH2_DATA_HIGH 0x0A
-#define AD7291_CH2_DATA_LOW 0x0B
-#define AD7291_CH2_HYST 0x0C
-#define AD7291_CH3_DATA_HIGH 0x0D
-#define AD7291_CH3_DATA_LOW 0x0E
-#define AD7291_CH3_HYST 0x0F
-#define AD7291_CH4_DATA_HIGH 0x10
-#define AD7291_CH4_DATA_LOW 0x11
-#define AD7291_CH4_HYST 0x12
-#define AD7291_CH5_DATA_HIGH 0x13
-#define AD7291_CH5_DATA_LOW 0x14
-#define AD7291_CH5_HYST 0x15
-#define AD7291_CH6_DATA_HIGH 0x16
-#define AD7291_CH6_DATA_LOW 0x17
-#define AD7291_CH6_HYST 0x18
-#define AD7291_CH7_DATA_HIGH 0x19
-#define AD7291_CH7_DATA_LOW 0x1A
-#define AD7291_CH7_HYST 0x2B
-#define AD7291_T_SENSE_HIGH 0x1C
-#define AD7291_T_SENSE_LOW 0x1D
-#define AD7291_T_SENSE_HYST 0x1E
+#define AD7291_DATA_HIGH(x) ((x) * 3 + 0x4)
+#define AD7291_DATA_LOW(x) ((x) * 3 + 0x5)
+#define AD7291_HYST(x) ((x) * 3 + 0x6)
#define AD7291_VOLTAGE_ALERT_STATUS 0x1F
#define AD7291_T_ALERT_STATUS 0x20
@@ -100,7 +78,6 @@
struct ad7291_chip_info {
struct i2c_client *client;
struct regulator *reg;
- u16 int_vref_mv;
u16 command;
u16 c_mask; /* Active voltage channels for events */
struct mutex state_lock;
@@ -111,45 +88,22 @@ static int ad7291_i2c_read(struct ad7291_chip_info *chip, u8 reg, u16 *data)
struct i2c_client *client = chip->client;
int ret = 0;
- ret = i2c_smbus_read_word_data(client, reg);
+ ret = i2c_smbus_read_word_swapped(client, reg);
if (ret < 0) {
dev_err(&client->dev, "I2C read error\n");
return ret;
}
- *data = swab16((u16)ret);
+ *data = ret;
return 0;
}
static int ad7291_i2c_write(struct ad7291_chip_info *chip, u8 reg, u16 data)
{
- return i2c_smbus_write_word_data(chip->client, reg, swab16(data));
-}
-
-static ssize_t ad7291_store_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct ad7291_chip_info *chip = iio_priv(indio_dev);
-
- return ad7291_i2c_write(chip, AD7291_COMMAND,
- chip->command | AD7291_RESET);
+ return i2c_smbus_write_word_swapped(chip->client, reg, data);
}
-static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, ad7291_store_reset, 0);
-
-static struct attribute *ad7291_attributes[] = {
- &iio_dev_attr_reset.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7291_attribute_group = {
- .attrs = ad7291_attributes,
-};
-
static irqreturn_t ad7291_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
@@ -255,31 +209,31 @@ static inline ssize_t ad7291_set_hyst(struct device *dev,
static IIO_DEVICE_ATTR(in_temp0_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
ad7291_show_hyst, ad7291_set_hyst,
- AD7291_T_SENSE_HYST);
+ AD7291_HYST(8));
static IIO_DEVICE_ATTR(in_voltage0_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_CH0_HYST);
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(0));
static IIO_DEVICE_ATTR(in_voltage1_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_CH1_HYST);
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(1));
static IIO_DEVICE_ATTR(in_voltage2_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_CH2_HYST);
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(2));
static IIO_DEVICE_ATTR(in_voltage3_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_CH3_HYST);
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(3));
static IIO_DEVICE_ATTR(in_voltage4_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_CH4_HYST);
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(4));
static IIO_DEVICE_ATTR(in_voltage5_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_CH5_HYST);
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(5));
static IIO_DEVICE_ATTR(in_voltage6_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_CH6_HYST);
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(6));
static IIO_DEVICE_ATTR(in_voltage7_thresh_both_hyst_raw,
S_IRUGO | S_IWUSR,
- ad7291_show_hyst, ad7291_set_hyst, AD7291_CH7_HYST);
+ ad7291_show_hyst, ad7291_set_hyst, AD7291_HYST(7));
static struct attribute *ad7291_event_attributes[] = {
&iio_dev_attr_in_temp0_thresh_both_hyst_raw.dev_attr.attr,
@@ -294,53 +248,45 @@ static struct attribute *ad7291_event_attributes[] = {
NULL,
};
-/* high / low */
-static u8 ad7291_limit_regs[9][2] = {
- { AD7291_CH0_DATA_HIGH, AD7291_CH0_DATA_LOW },
- { AD7291_CH1_DATA_HIGH, AD7291_CH1_DATA_LOW },
- { AD7291_CH2_DATA_HIGH, AD7291_CH2_DATA_LOW },
- { AD7291_CH3_DATA_HIGH, AD7291_CH3_DATA_LOW }, /* FIXME: ? */
- { AD7291_CH4_DATA_HIGH, AD7291_CH4_DATA_LOW },
- { AD7291_CH5_DATA_HIGH, AD7291_CH5_DATA_LOW },
- { AD7291_CH6_DATA_HIGH, AD7291_CH6_DATA_LOW },
- { AD7291_CH7_DATA_HIGH, AD7291_CH7_DATA_LOW },
- /* temp */
- { AD7291_T_SENSE_HIGH, AD7291_T_SENSE_LOW },
-};
+static unsigned int ad7291_threshold_reg(u64 event_code)
+{
+ unsigned int offset;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_VOLTAGE:
+ offset = IIO_EVENT_CODE_EXTRACT_CHAN(event_code);
+ break;
+ case IIO_TEMP:
+ offset = 8;
+ break;
+ default:
+ return 0;
+ }
+
+ if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_FALLING)
+ return AD7291_DATA_LOW(offset);
+ else
+ return AD7291_DATA_HIGH(offset);
+}
static int ad7291_read_event_value(struct iio_dev *indio_dev,
u64 event_code,
int *val)
{
struct ad7291_chip_info *chip = iio_priv(indio_dev);
-
int ret;
- u8 reg;
u16 uval;
- s16 signval;
+
+ ret = ad7291_i2c_read(chip, ad7291_threshold_reg(event_code), &uval);
+ if (ret < 0)
+ return ret;
switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
case IIO_VOLTAGE:
- reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)]
- [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING)];
-
- ret = ad7291_i2c_read(chip, reg, &uval);
- if (ret < 0)
- return ret;
*val = uval & AD7291_VALUE_MASK;
return 0;
-
case IIO_TEMP:
- reg = ad7291_limit_regs[8]
- [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING)];
-
- ret = ad7291_i2c_read(chip, reg, &signval);
- if (ret < 0)
- return ret;
- signval = (s16)((signval & AD7291_VALUE_MASK) << 4) >> 4;
- *val = signval;
+ *val = sign_extend32(uval, 11);
return 0;
default:
return -EINVAL;
@@ -352,28 +298,21 @@ static int ad7291_write_event_value(struct iio_dev *indio_dev,
int val)
{
struct ad7291_chip_info *chip = iio_priv(indio_dev);
- u8 reg;
- s16 signval;
switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
case IIO_VOLTAGE:
if (val > AD7291_VALUE_MASK || val < 0)
return -EINVAL;
- reg = ad7291_limit_regs[IIO_EVENT_CODE_EXTRACT_CHAN(event_code)]
- [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING)];
- return ad7291_i2c_write(chip, reg, val);
+ break;
case IIO_TEMP:
if (val > 2047 || val < -2048)
return -EINVAL;
- reg = ad7291_limit_regs[8]
- [!(IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
- IIO_EV_DIR_RISING)];
- signval = val;
- return ad7291_i2c_write(chip, reg, *(u16 *)&signval);
+ break;
default:
return -EINVAL;
- };
+ }
+
+ return ad7291_i2c_write(chip, ad7291_threshold_reg(event_code), val);
}
static int ad7291_read_event_config(struct iio_dev *indio_dev,
@@ -456,9 +395,7 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
{
int ret;
struct ad7291_chip_info *chip = iio_priv(indio_dev);
- unsigned int scale_uv;
u16 regval;
- s16 signval;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -479,44 +416,47 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
return ret;
}
/* Read voltage */
- ret = i2c_smbus_read_word_data(chip->client,
+ ret = i2c_smbus_read_word_swapped(chip->client,
AD7291_VOLTAGE);
if (ret < 0) {
mutex_unlock(&chip->state_lock);
return ret;
}
- *val = swab16((u16)ret) & AD7291_VALUE_MASK;
+ *val = ret & AD7291_VALUE_MASK;
mutex_unlock(&chip->state_lock);
return IIO_VAL_INT;
case IIO_TEMP:
/* Assumes tsense bit of command register always set */
- ret = i2c_smbus_read_word_data(chip->client,
+ ret = i2c_smbus_read_word_swapped(chip->client,
AD7291_T_SENSE);
if (ret < 0)
return ret;
- signval = (s16)((swab16((u16)ret) &
- AD7291_VALUE_MASK) << 4) >> 4;
- *val = signval;
+ *val = sign_extend32(ret, 11);
return IIO_VAL_INT;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_AVERAGE_RAW:
- ret = i2c_smbus_read_word_data(chip->client,
+ ret = i2c_smbus_read_word_swapped(chip->client,
AD7291_T_AVERAGE);
if (ret < 0)
return ret;
- signval = (s16)((swab16((u16)ret) &
- AD7291_VALUE_MASK) << 4) >> 4;
- *val = signval;
+ *val = sign_extend32(ret, 11);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- scale_uv = (chip->int_vref_mv * 1000) >> AD7291_BITS;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
+ if (chip->reg) {
+ int vref;
+ vref = regulator_get_voltage(chip->reg);
+ if (vref < 0)
+ return vref;
+ *val = vref / 1000;
+ } else {
+ *val = 2500;
+ }
+ *val2 = AD7291_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
/*
* One LSB of the ADC corresponds to 0.25 deg C.
@@ -571,21 +511,22 @@ static struct attribute_group ad7291_event_attribute_group = {
};
static const struct iio_info ad7291_info = {
- .attrs = &ad7291_attribute_group,
.read_raw = &ad7291_read_raw,
.read_event_config = &ad7291_read_event_config,
.write_event_config = &ad7291_write_event_config,
.read_event_value = &ad7291_read_event_value,
.write_event_value = &ad7291_write_event_value,
.event_attrs = &ad7291_event_attribute_group,
+ .driver_module = THIS_MODULE,
};
static int ad7291_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct ad7291_platform_data *pdata = client->dev.platform_data;
struct ad7291_chip_info *chip;
struct iio_dev *indio_dev;
- int ret = 0, voltage_uv = 0;
+ int ret = 0;
indio_dev = iio_device_alloc(sizeof(*chip));
if (indio_dev == NULL) {
@@ -594,12 +535,14 @@ static int ad7291_probe(struct i2c_client *client,
}
chip = iio_priv(indio_dev);
- chip->reg = regulator_get(&client->dev, "vcc");
- if (!IS_ERR(chip->reg)) {
+ if (pdata && pdata->use_external_ref) {
+ chip->reg = regulator_get(&client->dev, "vref");
+ if (IS_ERR(chip->reg))
+ goto error_free;
+
ret = regulator_enable(chip->reg);
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(chip->reg);
}
mutex_init(&chip->state_lock);
@@ -612,12 +555,8 @@ static int ad7291_probe(struct i2c_client *client,
AD7291_T_SENSE_MASK | /* Tsense always enabled */
AD7291_ALERT_POLARITY; /* set irq polarity low level */
- if (voltage_uv) {
- chip->int_vref_mv = voltage_uv / 1000;
+ if (pdata && pdata->use_external_ref)
chip->command |= AD7291_EXT_REF;
- } else {
- chip->int_vref_mv = 2500; /* Build-in ref */
- }
indio_dev->name = id->name;
indio_dev->channels = ad7291_channels;
@@ -654,21 +593,18 @@ static int ad7291_probe(struct i2c_client *client,
if (ret)
goto error_unreg_irq;
- dev_info(&client->dev, "%s ADC registered.\n",
- id->name);
-
return 0;
error_unreg_irq:
if (client->irq)
free_irq(client->irq, indio_dev);
error_disable_reg:
- if (!IS_ERR(chip->reg))
+ if (chip->reg)
regulator_disable(chip->reg);
error_put_reg:
- if (!IS_ERR(chip->reg))
+ if (chip->reg)
regulator_put(chip->reg);
-
+error_free:
iio_device_free(indio_dev);
error_ret:
return ret;
@@ -684,7 +620,7 @@ static int ad7291_remove(struct i2c_client *client)
if (client->irq)
free_irq(client->irq, indio_dev);
- if (!IS_ERR(chip->reg)) {
+ if (chip->reg) {
regulator_disable(chip->reg);
regulator_put(chip->reg);
}
diff --git a/drivers/staging/iio/adc/ad7291.h b/drivers/staging/iio/adc/ad7291.h
new file mode 100644
index 0000000..bbd89fa
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7291.h
@@ -0,0 +1,12 @@
+#ifndef __IIO_AD7291_H__
+#define __IIO_AD7291_H__
+
+/**
+ * struct ad7291_platform_data - AD7291 platform data
+ * @use_external_ref: Whether to use an external or internal reference voltage
+ */
+struct ad7291_platform_data {
+ bool use_external_ref;
+};
+
+#endif
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index d104b43..72868ce 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -125,9 +125,12 @@ static ssize_t ad7606_store_range(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7606_state *st = iio_priv(indio_dev);
unsigned long lval;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &lval);
+ if (ret)
+ return ret;
- if (strict_strtoul(buf, 10, &lval))
- return -EINVAL;
if (!(lval == 5000 || lval == 10000)) {
dev_err(dev, "range is not supported\n");
return -EINVAL;
@@ -173,8 +176,9 @@ static ssize_t ad7606_store_oversampling_ratio(struct device *dev,
unsigned long lval;
int ret;
- if (strict_strtoul(buf, 10, &lval))
- return -EINVAL;
+ ret = kstrtoul(buf, 10, &lval);
+ if (ret)
+ return ret;
ret = ad7606_oversampling_get_index(lval);
if (ret < 0) {
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 58cfdde..8a48d18 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -112,8 +112,6 @@ static int ad7606_par_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 9284771..8470036 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -175,9 +175,9 @@ static ssize_t ad7816_store_channel(struct device *dev,
unsigned long data;
int ret;
- ret = strict_strtoul(buf, 10, &data);
+ ret = kstrtoul(buf, 10, &data);
if (ret)
- return -EINVAL;
+ return ret;
if (data > AD7816_CS_MAX && data != AD7816_CS_MASK) {
dev_err(&chip->spi_dev->dev, "Invalid channel id %lu for %s.\n",
@@ -290,7 +290,9 @@ static inline ssize_t ad7816_set_oti(struct device *dev,
u8 data;
int ret;
- ret = strict_strtol(buf, 10, &value);
+ ret = kstrtol(buf, 10, &value);
+ if (ret)
+ return ret;
if (chip->channel_id > AD7816_CS_MAX) {
dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id);
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 8dc97b3..2b2049c 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -226,7 +226,7 @@ static ssize_t ad799x_write_frequency(struct device *dev,
int ret, i;
u8 t;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -337,7 +337,7 @@ static ssize_t ad799x_write_channel_config(struct device *dev,
long val;
int ret;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index 2f2f7fd..9a4bb09 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -215,7 +215,6 @@ static int lpc32xx_adc_remove(struct platform_device *pdev)
iio_device_unregister(iodev);
free_irq(irq, info);
- platform_set_drvdata(pdev, NULL);
clk_put(info->clk);
iounmap(info->adc_base);
iio_device_free(iodev);
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 163c638..a08c173 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -225,6 +225,9 @@ struct mxs_lradc {
#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
+#define LRADC_RESOLUTION 12
+#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
+
/*
* Raw I/O operations
*/
@@ -234,7 +237,6 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
{
struct mxs_lradc *lradc = iio_priv(iio_dev);
int ret;
- unsigned long mask;
if (m != IIO_CHAN_INFO_RAW)
return -EINVAL;
@@ -243,12 +245,6 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
if (chan->channel > LRADC_MAX_TOTAL_CHANS)
return -EINVAL;
- /* Validate the channel if it doesn't intersect with reserved chans. */
- bitmap_set(&mask, chan->channel, 1);
- ret = iio_validate_scan_mask_onehot(iio_dev, &mask);
- if (ret)
- return -EINVAL;
-
/*
* See if there is no buffered operation in progess. If there is, simply
* bail out. This can be improved to support both buffered and raw IO at
@@ -547,9 +543,10 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
__set_bit(EV_ABS, input->evbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_TOUCH, input->keybit);
- input_set_abs_params(input, ABS_X, 0, LRADC_CH_VALUE_MASK, 0, 0);
- input_set_abs_params(input, ABS_Y, 0, LRADC_CH_VALUE_MASK, 0, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_CH_VALUE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_X, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_SINGLE_SAMPLE_MASK,
+ 0, 0);
lradc->ts_input = input;
input_set_drvdata(input, lradc);
@@ -620,7 +617,7 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
unsigned int i, j = 0;
- for_each_set_bit(i, iio->active_scan_mask, iio->masklength) {
+ for_each_set_bit(i, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
lradc->buffer[j] = readl(lradc->base + LRADC_CH(j));
writel(chan_value, lradc->base + LRADC_CH(j));
lradc->buffer[j] &= LRADC_CH_VALUE_MASK;
@@ -661,12 +658,13 @@ static int mxs_lradc_trigger_init(struct iio_dev *iio)
{
int ret;
struct iio_trigger *trig;
+ struct mxs_lradc *lradc = iio_priv(iio);
trig = iio_trigger_alloc("%s-dev%i", iio->name, iio->id);
if (trig == NULL)
return -ENOMEM;
- trig->dev.parent = iio->dev.parent;
+ trig->dev.parent = lradc->dev;
iio_trigger_set_drvdata(trig, iio);
trig->ops = &mxs_lradc_trigger_ops;
@@ -676,15 +674,17 @@ static int mxs_lradc_trigger_init(struct iio_dev *iio)
return ret;
}
- iio->trig = trig;
+ lradc->trig = trig;
return 0;
}
static void mxs_lradc_trigger_remove(struct iio_dev *iio)
{
- iio_trigger_unregister(iio->trig);
- iio_trigger_free(iio->trig);
+ struct mxs_lradc *lradc = iio_priv(iio);
+
+ iio_trigger_unregister(lradc->trig);
+ iio_trigger_free(lradc->trig);
}
static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
@@ -774,8 +774,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
const unsigned long *mask)
{
struct mxs_lradc *lradc = iio_priv(iio);
- const int len = iio->masklength;
- const int map_chans = bitmap_weight(mask, len);
+ const int map_chans = bitmap_weight(mask, LRADC_MAX_TOTAL_CHANS);
int rsvd_chans = 0;
unsigned long rsvd_mask = 0;
@@ -792,7 +791,7 @@ static bool mxs_lradc_validate_scan_mask(struct iio_dev *iio,
rsvd_chans++;
/* Test for attempts to map channels with special mode of operation. */
- if (bitmap_intersects(mask, &rsvd_mask, len))
+ if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS))
return false;
/* Test for attempts to map more channels then available slots. */
@@ -822,7 +821,7 @@ static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = {
.channel = (idx), \
.scan_type = { \
.sign = 'u', \
- .realbits = 18, \
+ .realbits = LRADC_RESOLUTION, \
.storagebits = 32, \
}, \
}
@@ -846,14 +845,16 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
MXS_ADC_CHAN(15, IIO_VOLTAGE), /* VDD5V */
};
-static void mxs_lradc_hw_init(struct mxs_lradc *lradc)
+static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
{
/* The ADC always uses DELAY CHANNEL 0. */
const uint32_t adc_cfg =
(1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) |
(LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET);
- stmp_reset_block(lradc->base);
+ int ret = stmp_reset_block(lradc->base);
+ if (ret)
+ return ret;
/* Configure DELAY CHANNEL 0 for generic ADC sampling. */
writel(adc_cfg, lradc->base + LRADC_DELAY(0));
@@ -874,6 +875,8 @@ static void mxs_lradc_hw_init(struct mxs_lradc *lradc)
/* Start internal temperature sensing. */
writel(0, lradc->base + LRADC_CTRL2);
+
+ return 0;
}
static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
@@ -910,7 +913,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
int i;
/* Allocate the IIO device. */
- iio = iio_device_alloc(sizeof(*lradc));
+ iio = devm_iio_device_alloc(dev, sizeof(*lradc));
if (!iio) {
dev_err(dev, "Failed to allocate IIO device\n");
return -ENOMEM;
@@ -922,10 +925,8 @@ static int mxs_lradc_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lradc->dev = &pdev->dev;
lradc->base = devm_ioremap_resource(dev, iores);
- if (IS_ERR(lradc->base)) {
- ret = PTR_ERR(lradc->base);
- goto err_addr;
- }
+ if (IS_ERR(lradc->base))
+ return PTR_ERR(lradc->base);
INIT_WORK(&lradc->ts_work, mxs_lradc_ts_work);
@@ -945,16 +946,14 @@ static int mxs_lradc_probe(struct platform_device *pdev)
/* Grab all IRQ sources */
for (i = 0; i < of_cfg->irq_count; i++) {
lradc->irq[i] = platform_get_irq(pdev, i);
- if (lradc->irq[i] < 0) {
- ret = -EINVAL;
- goto err_addr;
- }
+ if (lradc->irq[i] < 0)
+ return -EINVAL;
ret = devm_request_irq(dev, lradc->irq[i],
mxs_lradc_handle_irq, 0,
of_cfg->irq_name[i], iio);
if (ret)
- goto err_addr;
+ return ret;
}
platform_set_drvdata(pdev, iio);
@@ -968,19 +967,22 @@ static int mxs_lradc_probe(struct platform_device *pdev)
iio->modes = INDIO_DIRECT_MODE;
iio->channels = mxs_lradc_chan_spec;
iio->num_channels = ARRAY_SIZE(mxs_lradc_chan_spec);
+ iio->masklength = LRADC_MAX_TOTAL_CHANS;
ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
&mxs_lradc_trigger_handler,
&mxs_lradc_buffer_ops);
if (ret)
- goto err_addr;
+ return ret;
ret = mxs_lradc_trigger_init(iio);
if (ret)
goto err_trig;
/* Configure the hardware. */
- mxs_lradc_hw_init(lradc);
+ ret = mxs_lradc_hw_init(lradc);
+ if (ret)
+ goto err_dev;
/* Register the touchscreen input device. */
ret = mxs_lradc_ts_register(lradc);
@@ -1002,8 +1004,6 @@ err_dev:
mxs_lradc_trigger_remove(iio);
err_trig:
iio_triggered_buffer_cleanup(iio);
-err_addr:
- iio_device_free(iio);
return ret;
}
@@ -1019,7 +1019,6 @@ static int mxs_lradc_remove(struct platform_device *pdev)
iio_device_unregister(iio);
iio_triggered_buffer_cleanup(iio);
mxs_lradc_trigger_remove(iio);
- iio_device_free(iio);
return 0;
}
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c
index f45da42..20f2d55 100644
--- a/drivers/staging/iio/adc/spear_adc.c
+++ b/drivers/staging/iio/adc/spear_adc.c
@@ -300,11 +300,10 @@ static int spear_adc_probe(struct platform_device *pdev)
int ret = -ENODEV;
int irq;
- iodev = iio_device_alloc(sizeof(struct spear_adc_info));
+ iodev = devm_iio_device_alloc(dev, sizeof(struct spear_adc_info));
if (!iodev) {
dev_err(dev, "failed allocating iio device\n");
- ret = -ENOMEM;
- goto errout1;
+ return -ENOMEM;
}
info = iio_priv(iodev);
@@ -318,8 +317,7 @@ static int spear_adc_probe(struct platform_device *pdev)
info->adc_base_spear6xx = of_iomap(np, 0);
if (!info->adc_base_spear6xx) {
dev_err(dev, "failed mapping memory\n");
- ret = -ENOMEM;
- goto errout2;
+ return -ENOMEM;
}
info->adc_base_spear3xx =
(struct adc_regs_spear3xx *)info->adc_base_spear6xx;
@@ -327,33 +325,33 @@ static int spear_adc_probe(struct platform_device *pdev)
info->clk = clk_get(dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(dev, "failed getting clock\n");
- goto errout3;
+ goto errout1;
}
ret = clk_prepare_enable(info->clk);
if (ret) {
dev_err(dev, "failed enabling clock\n");
- goto errout4;
+ goto errout2;
}
irq = platform_get_irq(pdev, 0);
if ((irq < 0) || (irq >= NR_IRQS)) {
dev_err(dev, "failed getting interrupt resource\n");
ret = -EINVAL;
- goto errout5;
+ goto errout3;
}
ret = devm_request_irq(dev, irq, spear_adc_isr, 0, MOD_NAME, info);
if (ret < 0) {
dev_err(dev, "failed requesting interrupt\n");
- goto errout5;
+ goto errout3;
}
if (of_property_read_u32(np, "sampling-frequency",
&info->sampling_freq)) {
dev_err(dev, "sampling-frequency missing in DT\n");
ret = -EINVAL;
- goto errout5;
+ goto errout3;
}
/*
@@ -383,21 +381,18 @@ static int spear_adc_probe(struct platform_device *pdev)
ret = iio_device_register(iodev);
if (ret)
- goto errout5;
+ goto errout3;
dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq);
return 0;
-errout5:
- clk_disable_unprepare(info->clk);
-errout4:
- clk_put(info->clk);
errout3:
- iounmap(info->adc_base_spear6xx);
+ clk_disable_unprepare(info->clk);
errout2:
- iio_device_free(iodev);
+ clk_put(info->clk);
errout1:
+ iounmap(info->adc_base_spear6xx);
return ret;
}
@@ -407,20 +402,20 @@ static int spear_adc_remove(struct platform_device *pdev)
struct spear_adc_info *info = iio_priv(iodev);
iio_device_unregister(iodev);
- platform_set_drvdata(pdev, NULL);
clk_disable_unprepare(info->clk);
clk_put(info->clk);
iounmap(info->adc_base_spear6xx);
- iio_device_free(iodev);
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id spear_adc_dt_ids[] = {
{ .compatible = "st,spear600-adc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, spear_adc_dt_ids);
+#endif
static struct platform_driver spear_adc_driver = {
.probe = spear_adc_probe,
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 506b5a7..1e13568 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -551,31 +551,6 @@ static IIO_DEVICE_ATTR(enable_smbus_timeout, S_IRUGO | S_IWUSR,
adt7316_store_enable_smbus_timeout,
0);
-
-static ssize_t adt7316_store_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_to_iio_dev(dev);
- struct adt7316_chip_info *chip = iio_priv(dev_info);
- u8 config2;
- int ret;
-
- config2 = chip->config2 | ADT7316_RESET;
-
- ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
- if (ret)
- return -EIO;
-
- return len;
-}
-
-static IIO_DEVICE_ATTR(reset, S_IWUSR,
- NULL,
- adt7316_store_reset,
- 0);
-
static ssize_t adt7316_show_powerdown(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1675,7 +1650,6 @@ static IIO_DEVICE_ATTR(bus_type, S_IRUGO, adt7316_show_bus_type, NULL, 0);
static struct attribute *adt7316_attributes[] = {
&iio_dev_attr_all_modes.dev_attr.attr,
&iio_dev_attr_mode.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_enabled.dev_attr.attr,
&iio_dev_attr_ad_channel.dev_attr.attr,
&iio_dev_attr_all_ad_channels.dev_attr.attr,
@@ -1719,7 +1693,6 @@ static struct attribute *adt7516_attributes[] = {
&iio_dev_attr_all_modes.dev_attr.attr,
&iio_dev_attr_mode.dev_attr.attr,
&iio_dev_attr_select_ex_temp.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_enabled.dev_attr.attr,
&iio_dev_attr_ad_channel.dev_attr.attr,
&iio_dev_attr_all_ad_channels.dev_attr.attr,
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 687dd2c..f4a0341 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -558,11 +558,9 @@ static int ad7150_probe(struct i2c_client *client,
struct ad7150_chip_info *chip;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
mutex_init(&chip->state_lock);
/* this is only used for device removal purposes */
@@ -581,7 +579,7 @@ static int ad7150_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
if (client->irq) {
- ret = request_threaded_irq(client->irq,
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL,
&ad7150_event_handler,
IRQF_TRIGGER_RISING |
@@ -590,11 +588,11 @@ static int ad7150_probe(struct i2c_client *client,
"ad7150_irq1",
indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
}
if (client->dev.platform_data) {
- ret = request_threaded_irq(*(unsigned int *)
+ ret = devm_request_threaded_irq(&client->dev, *(unsigned int *)
client->dev.platform_data,
NULL,
&ad7150_event_handler,
@@ -604,28 +602,17 @@ static int ad7150_probe(struct i2c_client *client,
"ad7150_irq2",
indio_dev);
if (ret)
- goto error_free_irq;
+ return ret;
}
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_irq2;
+ return ret;
dev_info(&client->dev, "%s capacitive sensor registered,irq: %d\n",
id->name, client->irq);
return 0;
-error_free_irq2:
- if (client->dev.platform_data)
- free_irq(*(unsigned int *)client->dev.platform_data,
- indio_dev);
-error_free_irq:
- if (client->irq)
- free_irq(client->irq, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
static int ad7150_remove(struct i2c_client *client)
@@ -633,13 +620,6 @@ static int ad7150_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- if (client->irq)
- free_irq(client->irq, indio_dev);
-
- if (client->dev.platform_data)
- free_irq(*(unsigned int *)client->dev.platform_data, indio_dev);
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 1d7c528..f2c309d 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -481,11 +481,9 @@ static int ad7152_probe(struct i2c_client *client,
struct ad7152_chip_info *chip;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
@@ -506,16 +504,11 @@ static int ad7152_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
dev_err(&client->dev, "%s capacitive sensor registered\n", id->name);
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
static int ad7152_remove(struct i2c_client *client)
@@ -523,7 +516,6 @@ static int ad7152_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 94f9ca7..75a533b 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -699,11 +699,9 @@ static int ad7746_probe(struct i2c_client *client,
int ret = 0;
unsigned char regval = 0;
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
@@ -748,20 +746,15 @@ static int ad7746_probe(struct i2c_client *client,
ret = i2c_smbus_write_byte_data(chip->client,
AD7746_REG_EXC_SETUP, regval);
if (ret < 0)
- goto error_free_dev;
+ return ret;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
dev_info(&client->dev, "%s capacitive sensor registered\n", id->name);
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
static int ad7746_remove(struct i2c_client *client)
@@ -769,7 +762,6 @@ static int ad7746_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index 8360662..88b199b 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -10,23 +10,4 @@ config ADIS16060
Say yes here to build support for Analog Devices adis16060 wide bandwidth
yaw rate gyroscope with SPI.
-config ADIS16130
- tristate "Analog Devices ADIS16130 High Precision Angular Rate Sensor driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices ADIS16130 High Precision
- Angular Rate Sensor driver.
-
-config ADIS16260
- tristate "Analog Devices ADIS16260 Digital Gyroscope Sensor SPI driver"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say yes here to build support for Analog Devices ADIS16260 ADIS16265
- ADIS16250 ADIS16255 and ADIS16251 programmable digital gyroscope sensors.
-
- This driver can also be built as a module. If so, the module
- will be called adis16260.
-
endmenu
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
index 98e6500..cf22d6d 100644
--- a/drivers/staging/iio/gyro/Makefile
+++ b/drivers/staging/iio/gyro/Makefile
@@ -4,9 +4,3 @@
adis16060-y := adis16060_core.o
obj-$(CONFIG_ADIS16060) += adis16060.o
-
-adis16130-y := adis16130_core.o
-obj-$(CONFIG_ADIS16130) += adis16130.o
-
-adis16260-y := adis16260_core.o
-obj-$(CONFIG_ADIS16260) += adis16260.o
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index c67d3a8..6d3d771 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -151,11 +151,9 @@ static int adis16060_r_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
@@ -171,23 +169,16 @@ static int adis16060_r_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
adis16060_iio_dev = indio_dev;
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
/* fixme, confirm ordering in this function */
static int adis16060_r_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
-
return 0;
}
diff --git a/drivers/staging/iio/gyro/adis16130_core.c b/drivers/staging/iio/gyro/adis16130_core.c
deleted file mode 100644
index 531b803..0000000
--- a/drivers/staging/iio/gyro/adis16130_core.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * ADIS16130 Digital Output, High Precision Angular Rate Sensor driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#define ADIS16130_CON 0x0
-#define ADIS16130_CON_RD (1 << 6)
-#define ADIS16130_IOP 0x1
-
-/* 1 = data-ready signal low when unread data on all channels; */
-#define ADIS16130_IOP_ALL_RDY (1 << 3)
-#define ADIS16130_IOP_SYNC (1 << 0) /* 1 = synchronization enabled */
-#define ADIS16130_RATEDATA 0x8 /* Gyroscope output, rate of rotation */
-#define ADIS16130_TEMPDATA 0xA /* Temperature output */
-#define ADIS16130_RATECS 0x28 /* Gyroscope channel setup */
-#define ADIS16130_RATECS_EN (1 << 3) /* 1 = channel enable; */
-#define ADIS16130_TEMPCS 0x2A /* Temperature channel setup */
-#define ADIS16130_TEMPCS_EN (1 << 3)
-#define ADIS16130_RATECONV 0x30
-#define ADIS16130_TEMPCONV 0x32
-#define ADIS16130_MODE 0x38
-#define ADIS16130_MODE_24BIT (1 << 1) /* 1 = 24-bit resolution; */
-
-/**
- * struct adis16130_state - device instance specific data
- * @us: actual spi_device to write data
- * @buf_lock: mutex to protect tx and rx
- * @buf: unified tx/rx buffer
- **/
-struct adis16130_state {
- struct spi_device *us;
- struct mutex buf_lock;
- u8 buf[4] ____cacheline_aligned;
-};
-
-static int adis16130_spi_read(struct iio_dev *indio_dev, u8 reg_addr, u32 *val)
-{
- int ret;
- struct adis16130_state *st = iio_priv(indio_dev);
- struct spi_message msg;
- struct spi_transfer xfer = {
- .tx_buf = st->buf,
- .rx_buf = st->buf,
- .len = 4,
- };
-
- mutex_lock(&st->buf_lock);
-
- st->buf[0] = ADIS16130_CON_RD | reg_addr;
- st->buf[1] = st->buf[2] = st->buf[3] = 0;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->us, &msg);
- ret = spi_read(st->us, st->buf, 4);
-
- if (ret == 0)
- *val = (st->buf[1] << 16) | (st->buf[2] << 8) | st->buf[3];
- mutex_unlock(&st->buf_lock);
-
- return ret;
-}
-
-static int adis16130_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- int ret;
- u32 temp;
-
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- ret = adis16130_spi_read(indio_dev, chan->address, &temp);
- mutex_unlock(&indio_dev->mlock);
- if (ret)
- return ret;
- *val = temp;
- return IIO_VAL_INT;
-}
-
-static const struct iio_chan_spec adis16130_channels[] = {
- {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = ADIS16130_RATEDATA,
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = ADIS16130_TEMPDATA,
- }
-};
-
-static const struct iio_info adis16130_info = {
- .read_raw = &adis16130_read_raw,
- .driver_module = THIS_MODULE,
-};
-
-static int adis16130_probe(struct spi_device *spi)
-{
- int ret;
- struct adis16130_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
- st->us = spi;
- mutex_init(&st->buf_lock);
- indio_dev->name = spi->dev.driver->name;
- indio_dev->channels = adis16130_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16130_channels);
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16130_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
- return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-
-error_ret:
- return ret;
-}
-
-/* fixme, confirm ordering in this function */
-static int adis16130_remove(struct spi_device *spi)
-{
- iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
-
- return 0;
-}
-
-static struct spi_driver adis16130_driver = {
- .driver = {
- .name = "adis16130",
- .owner = THIS_MODULE,
- },
- .probe = adis16130_probe,
- .remove = adis16130_remove,
-};
-module_spi_driver(adis16130_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16130 High Precision Angular Rate");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16130");
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
deleted file mode 100644
index df3c0b7..0000000
--- a/drivers/staging/iio/gyro/adis16260.h
+++ /dev/null
@@ -1,98 +0,0 @@
-#ifndef SPI_ADIS16260_H_
-#define SPI_ADIS16260_H_
-
-#include "adis16260_platform_data.h"
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16260_STARTUP_DELAY 220 /* ms */
-
-#define ADIS16260_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16260_SUPPLY_OUT 0x02 /* Power supply measurement */
-#define ADIS16260_GYRO_OUT 0x04 /* X-axis gyroscope output */
-#define ADIS16260_AUX_ADC 0x0A /* analog input channel measurement */
-#define ADIS16260_TEMP_OUT 0x0C /* internal temperature measurement */
-#define ADIS16260_ANGL_OUT 0x0E /* angle displacement */
-#define ADIS16260_GYRO_OFF 0x14 /* Calibration, offset/bias adjustment */
-#define ADIS16260_GYRO_SCALE 0x16 /* Calibration, scale adjustment */
-#define ADIS16260_ALM_MAG1 0x20 /* Alarm 1 magnitude/polarity setting */
-#define ADIS16260_ALM_MAG2 0x22 /* Alarm 2 magnitude/polarity setting */
-#define ADIS16260_ALM_SMPL1 0x24 /* Alarm 1 dynamic rate of change setting */
-#define ADIS16260_ALM_SMPL2 0x26 /* Alarm 2 dynamic rate of change setting */
-#define ADIS16260_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16260_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16260_GPIO_CTRL 0x32 /* Control, digital I/O line */
-#define ADIS16260_MSC_CTRL 0x34 /* Control, data ready, self-test settings */
-#define ADIS16260_SMPL_PRD 0x36 /* Control, internal sample rate */
-#define ADIS16260_SENS_AVG 0x38 /* Control, dynamic range, filtering */
-#define ADIS16260_SLP_CNT 0x3A /* Control, sleep mode initiation */
-#define ADIS16260_DIAG_STAT 0x3C /* Diagnostic, error flags */
-#define ADIS16260_GLOB_CMD 0x3E /* Control, global commands */
-#define ADIS16260_LOT_ID1 0x52 /* Lot Identification Code 1 */
-#define ADIS16260_LOT_ID2 0x54 /* Lot Identification Code 2 */
-#define ADIS16260_PROD_ID 0x56 /* Product identifier;
- * convert to decimal = 16,265/16,260 */
-#define ADIS16260_SERIAL_NUM 0x58 /* Serial number */
-
-#define ADIS16260_ERROR_ACTIVE (1<<14)
-#define ADIS16260_NEW_DATA (1<<15)
-
-/* MSC_CTRL */
-#define ADIS16260_MSC_CTRL_MEM_TEST (1<<11)
-/* Internal self-test enable */
-#define ADIS16260_MSC_CTRL_INT_SELF_TEST (1<<10)
-#define ADIS16260_MSC_CTRL_NEG_SELF_TEST (1<<9)
-#define ADIS16260_MSC_CTRL_POS_SELF_TEST (1<<8)
-#define ADIS16260_MSC_CTRL_DATA_RDY_EN (1<<2)
-#define ADIS16260_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
-#define ADIS16260_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
-
-/* SMPL_PRD */
-/* Time base (tB): 0 = 1.953 ms, 1 = 60.54 ms */
-#define ADIS16260_SMPL_PRD_TIME_BASE (1<<7)
-#define ADIS16260_SMPL_PRD_DIV_MASK 0x7F
-
-/* SLP_CNT */
-#define ADIS16260_SLP_CNT_POWER_OFF 0x80
-
-/* DIAG_STAT */
-#define ADIS16260_DIAG_STAT_ALARM2 (1<<9)
-#define ADIS16260_DIAG_STAT_ALARM1 (1<<8)
-#define ADIS16260_DIAG_STAT_FLASH_CHK_BIT 6
-#define ADIS16260_DIAG_STAT_SELF_TEST_BIT 5
-#define ADIS16260_DIAG_STAT_OVERFLOW_BIT 4
-#define ADIS16260_DIAG_STAT_SPI_FAIL_BIT 3
-#define ADIS16260_DIAG_STAT_FLASH_UPT_BIT 2
-#define ADIS16260_DIAG_STAT_POWER_HIGH_BIT 1
-#define ADIS16260_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-#define ADIS16260_GLOB_CMD_SW_RESET (1<<7)
-#define ADIS16260_GLOB_CMD_FLASH_UPD (1<<3)
-#define ADIS16260_GLOB_CMD_DAC_LATCH (1<<2)
-#define ADIS16260_GLOB_CMD_FAC_CALIB (1<<1)
-#define ADIS16260_GLOB_CMD_AUTO_NULL (1<<0)
-
-#define ADIS16260_SPI_SLOW (u32)(300 * 1000)
-#define ADIS16260_SPI_BURST (u32)(1000 * 1000)
-#define ADIS16260_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct adis16260_state - device instance specific data
- * @negate: negate the scale parameter
- **/
-struct adis16260_state {
- unsigned negate:1;
- struct adis adis;
-};
-
-/* At the moment triggers are only used for ring buffer
- * filling. This may change!
- */
-
-#define ADIS16260_SCAN_GYRO 0
-#define ADIS16260_SCAN_SUPPLY 1
-#define ADIS16260_SCAN_AUX_ADC 2
-#define ADIS16260_SCAN_TEMP 3
-#define ADIS16260_SCAN_ANGL 4
-
-#endif /* SPI_ADIS16260_H_ */
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
deleted file mode 100644
index 620d63f..0000000
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * ADIS16260/ADIS16265 Programmable Digital Gyroscope Sensor Driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-
-#include "adis16260.h"
-
-static ssize_t adis16260_read_frequency_available(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16260_state *st = iio_priv(indio_dev);
- if (spi_get_device_id(st->adis.spi)->driver_data)
- return sprintf(buf, "%s\n", "0.129 ~ 256");
- else
- return sprintf(buf, "%s\n", "256 2048");
-}
-
-static ssize_t adis16260_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16260_state *st = iio_priv(indio_dev);
- int ret, len = 0;
- u16 t;
- int sps;
- ret = adis_read_reg_16(&st->adis, ADIS16260_SMPL_PRD, &t);
- if (ret)
- return ret;
-
- if (spi_get_device_id(st->adis.spi)->driver_data) /* If an adis16251 */
- sps = (t & ADIS16260_SMPL_PRD_TIME_BASE) ? 8 : 256;
- else
- sps = (t & ADIS16260_SMPL_PRD_TIME_BASE) ? 66 : 2048;
- sps /= (t & ADIS16260_SMPL_PRD_DIV_MASK) + 1;
- len = sprintf(buf, "%d SPS\n", sps);
- return len;
-}
-
-static ssize_t adis16260_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16260_state *st = iio_priv(indio_dev);
- long val;
- int ret;
- u8 t;
-
- ret = strict_strtol(buf, 10, &val);
- if (ret)
- return ret;
- if (val == 0)
- return -EINVAL;
-
- mutex_lock(&indio_dev->mlock);
- if (spi_get_device_id(st->adis.spi)->driver_data) {
- t = (256 / val);
- if (t > 0)
- t--;
- t &= ADIS16260_SMPL_PRD_DIV_MASK;
- } else {
- t = (2048 / val);
- if (t > 0)
- t--;
- t &= ADIS16260_SMPL_PRD_DIV_MASK;
- }
- if ((t & ADIS16260_SMPL_PRD_DIV_MASK) >= 0x0A)
- st->adis.spi->max_speed_hz = ADIS16260_SPI_SLOW;
- else
- st->adis.spi->max_speed_hz = ADIS16260_SPI_FAST;
- ret = adis_write_reg_8(&st->adis,
- ADIS16260_SMPL_PRD,
- t);
-
- mutex_unlock(&indio_dev->mlock);
-
- return ret ? ret : len;
-}
-
-/* Power down the device */
-static int adis16260_stop_device(struct iio_dev *indio_dev)
-{
- struct adis16260_state *st = iio_priv(indio_dev);
- int ret;
- u16 val = ADIS16260_SLP_CNT_POWER_OFF;
-
- ret = adis_write_reg_16(&st->adis, ADIS16260_SLP_CNT, val);
- if (ret)
- dev_err(&indio_dev->dev, "problem with turning device off: SLP_CNT");
-
- return ret;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
- adis16260_read_frequency,
- adis16260_write_frequency);
-
-static IIO_DEVICE_ATTR(sampling_frequency_available,
- S_IRUGO, adis16260_read_frequency_available, NULL, 0);
-
-#define ADIS16260_GYRO_CHANNEL_SET(axis, mod) \
-struct iio_chan_spec adis16260_channels_##axis[] = { \
- ADIS_GYRO_CHAN(mod, ADIS16260_GYRO_OUT, ADIS16260_SCAN_GYRO, \
- BIT(IIO_CHAN_INFO_CALIBBIAS) | \
- BIT(IIO_CHAN_INFO_CALIBSCALE), 14), \
- ADIS_INCLI_CHAN(mod, ADIS16260_ANGL_OUT, ADIS16260_SCAN_ANGL, 0, 14), \
- ADIS_TEMP_CHAN(ADIS16260_TEMP_OUT, ADIS16260_SCAN_TEMP, 12), \
- ADIS_SUPPLY_CHAN(ADIS16260_SUPPLY_OUT, ADIS16260_SCAN_SUPPLY, 12), \
- ADIS_AUX_ADC_CHAN(ADIS16260_AUX_ADC, ADIS16260_SCAN_AUX_ADC, 12), \
- IIO_CHAN_SOFT_TIMESTAMP(5), \
-}
-
-static const ADIS16260_GYRO_CHANNEL_SET(x, X);
-static const ADIS16260_GYRO_CHANNEL_SET(y, Y);
-static const ADIS16260_GYRO_CHANNEL_SET(z, Z);
-
-static const u8 adis16260_addresses[][2] = {
- [ADIS16260_SCAN_GYRO] = { ADIS16260_GYRO_OFF, ADIS16260_GYRO_SCALE },
-};
-
-static int adis16260_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis16260_state *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16260_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_ANGL_VEL:
- *val = 0;
- if (spi_get_device_id(st->adis.spi)->driver_data) {
- /* 0.01832 degree / sec */
- *val2 = IIO_DEGREE_TO_RAD(18320);
- } else {
- /* 0.07326 degree / sec */
- *val2 = IIO_DEGREE_TO_RAD(73260);
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 831500; /* 1.8315 mV */
- } else {
- *val = 0;
- *val2 = 610500; /* 610.5 uV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = 145;
- *val2 = 300000; /* 0.1453 C */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 250000 / 1453; /* 25 C = 0x00 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ANGL_VEL:
- bits = 12;
- break;
- default:
- return -EINVAL;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16260_addresses[chan->scan_index][0];
- ret = adis_read_reg_16(&st->adis, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBSCALE:
- switch (chan->type) {
- case IIO_ANGL_VEL:
- bits = 12;
- break;
- default:
- return -EINVAL;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16260_addresses[chan->scan_index][1];
- ret = adis_read_reg_16(&st->adis, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- *val = (1 << bits) - 1;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16260_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis16260_state *st = iio_priv(indio_dev);
- int bits = 12;
- s16 val16;
- u8 addr;
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- val16 = val & ((1 << bits) - 1);
- addr = adis16260_addresses[chan->scan_index][0];
- return adis_write_reg_16(&st->adis, addr, val16);
- case IIO_CHAN_INFO_CALIBSCALE:
- val16 = val & ((1 << bits) - 1);
- addr = adis16260_addresses[chan->scan_index][1];
- return adis_write_reg_16(&st->adis, addr, val16);
- }
- return -EINVAL;
-}
-
-static struct attribute *adis16260_attributes[] = {
- &iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16260_attribute_group = {
- .attrs = adis16260_attributes,
-};
-
-static const struct iio_info adis16260_info = {
- .attrs = &adis16260_attribute_group,
- .read_raw = &adis16260_read_raw,
- .write_raw = &adis16260_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis1620_status_error_msgs[] = {
- [ADIS16260_DIAG_STAT_FLASH_CHK_BIT] = "Flash checksum error",
- [ADIS16260_DIAG_STAT_SELF_TEST_BIT] = "Self test error",
- [ADIS16260_DIAG_STAT_OVERFLOW_BIT] = "Sensor overrange",
- [ADIS16260_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16260_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16260_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 5.25",
- [ADIS16260_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 4.75",
-};
-
-static const struct adis_data adis16260_data = {
- .write_delay = 30,
- .read_delay = 30,
- .msc_ctrl_reg = ADIS16260_MSC_CTRL,
- .glob_cmd_reg = ADIS16260_GLOB_CMD,
- .diag_stat_reg = ADIS16260_DIAG_STAT,
-
- .self_test_mask = ADIS16260_MSC_CTRL_MEM_TEST,
- .startup_delay = ADIS16260_STARTUP_DELAY,
-
- .status_error_msgs = adis1620_status_error_msgs,
- .status_error_mask = BIT(ADIS16260_DIAG_STAT_FLASH_CHK_BIT) |
- BIT(ADIS16260_DIAG_STAT_SELF_TEST_BIT) |
- BIT(ADIS16260_DIAG_STAT_OVERFLOW_BIT) |
- BIT(ADIS16260_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16260_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16260_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16260_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16260_probe(struct spi_device *spi)
-{
- int ret;
- struct adis16260_platform_data *pd = spi->dev.platform_data;
- struct adis16260_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- st = iio_priv(indio_dev);
- if (pd)
- st->negate = pd->negate;
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16260_info;
- indio_dev->num_channels
- = ARRAY_SIZE(adis16260_channels_x);
- if (pd && pd->direction)
- switch (pd->direction) {
- case 'x':
- indio_dev->channels = adis16260_channels_x;
- break;
- case 'y':
- indio_dev->channels = adis16260_channels_y;
- break;
- case 'z':
- indio_dev->channels = adis16260_channels_z;
- break;
- default:
- return -EINVAL;
- }
- else
- indio_dev->channels = adis16260_channels_x;
- indio_dev->num_channels = ARRAY_SIZE(adis16260_channels_x);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(&st->adis, indio_dev, spi, &adis16260_data);
- if (ret)
- goto error_free_dev;
-
- ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
- if (ret)
- goto error_free_dev;
-
- if (indio_dev->buffer) {
- /* Set default scan mode */
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_SUPPLY);
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_GYRO);
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_AUX_ADC);
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_TEMP);
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_ANGL);
- }
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(&st->adis);
- if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
-}
-
-static int adis16260_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis16260_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis16260_stop_device(indio_dev);
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
- iio_device_free(indio_dev);
-
- return 0;
-}
-
-/*
- * These parts do not need to be differentiated until someone adds
- * support for the on chip filtering.
- */
-static const struct spi_device_id adis16260_id[] = {
- {"adis16260", 0},
- {"adis16265", 0},
- {"adis16250", 0},
- {"adis16255", 0},
- {"adis16251", 1},
- {}
-};
-MODULE_DEVICE_TABLE(spi, adis16260_id);
-
-static struct spi_driver adis16260_driver = {
- .driver = {
- .name = "adis16260",
- .owner = THIS_MODULE,
- },
- .probe = adis16260_probe,
- .remove = adis16260_remove,
- .id_table = adis16260_id,
-};
-module_spi_driver(adis16260_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/gyro/adis16260_platform_data.h b/drivers/staging/iio/gyro/adis16260_platform_data.h
deleted file mode 100644
index 12802e9..0000000
--- a/drivers/staging/iio/gyro/adis16260_platform_data.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * ADIS16260 Programmable Digital Gyroscope Sensor Driver Platform Data
- *
- * Based on adis16255.h Matthia Brugger <m_brugger&web.de>
- *
- * Copyright (C) 2010 Fraunhofer Institute for Integrated Circuits
- *
- * Licensed under the GPL-2 or later.
- */
-
-/**
- * struct adis16260_platform_data - instance specific data
- * @direction: x y or z
- * @negate: flag to indicate value should be inverted.
- **/
-struct adis16260_platform_data {
- char direction;
- unsigned negate:1;
-};
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 82478a5..e4998e4 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -550,11 +550,10 @@ static int isl29018_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int err;
- indio_dev = iio_device_alloc(sizeof(*chip));
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (indio_dev == NULL) {
dev_err(&client->dev, "iio allocation fails\n");
- err = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
chip = iio_priv(indio_dev);
@@ -564,6 +563,7 @@ static int isl29018_probe(struct i2c_client *client,
mutex_init(&chip->lock);
chip->lux_scale = 1;
+ chip->lux_uscale = 0;
chip->range = 1000;
chip->adc_bit = 16;
chip->suspended = false;
@@ -572,12 +572,12 @@ static int isl29018_probe(struct i2c_client *client,
if (IS_ERR(chip->regmap)) {
err = PTR_ERR(chip->regmap);
dev_err(chip->dev, "regmap initialization failed: %d\n", err);
- goto exit;
+ return err;
}
err = isl29018_chip_init(chip);
if (err)
- goto exit_iio_free;
+ return err;
indio_dev->info = &isl29108_info;
indio_dev->channels = isl29018_channels;
@@ -588,14 +588,10 @@ static int isl29018_probe(struct i2c_client *client,
err = iio_device_register(indio_dev);
if (err) {
dev_err(&client->dev, "iio registration fails\n");
- goto exit_iio_free;
+ return err;
}
return 0;
-exit_iio_free:
- iio_device_free(indio_dev);
-exit:
- return err;
}
static int isl29018_remove(struct i2c_client *client)
@@ -604,7 +600,6 @@ static int isl29018_remove(struct i2c_client *client)
dev_dbg(&client->dev, "%s()\n", __func__);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 8bb0d03..6014625 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -482,7 +482,7 @@ static int isl29028_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int ret;
- indio_dev = iio_device_alloc(sizeof(*chip));
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev) {
dev_err(&client->dev, "iio allocation fails\n");
return -ENOMEM;
@@ -498,13 +498,13 @@ static int isl29028_probe(struct i2c_client *client,
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
dev_err(chip->dev, "regmap initialization failed: %d\n", ret);
- goto exit_iio_free;
+ return ret;
}
ret = isl29028_chip_init(chip);
if (ret < 0) {
dev_err(chip->dev, "chip initialization failed: %d\n", ret);
- goto exit_iio_free;
+ return ret;
}
indio_dev->info = &isl29028_info;
@@ -517,13 +517,9 @@ static int isl29028_probe(struct i2c_client *client,
if (ret < 0) {
dev_err(chip->dev, "iio registration fails with error %d\n",
ret);
- goto exit_iio_free;
+ return ret;
}
return 0;
-
-exit_iio_free:
- iio_device_free(indio_dev);
- return ret;
}
static int isl29028_remove(struct i2c_client *client)
@@ -531,7 +527,6 @@ static int isl29028_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 86c6bf9..c3f3f53 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -20,12 +20,10 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/types.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
#define HMC5843_CONFIG_REG_A 0x00
#define HMC5843_CONFIG_REG_B 0x01
@@ -42,9 +40,6 @@
#define HMC5883_DATA_OUT_Y_MSB_REG 0x07
#define HMC5883_DATA_OUT_Y_LSB_REG 0x08
#define HMC5843_STATUS_REG 0x09
-#define HMC5843_ID_REG_A 0x0A
-#define HMC5843_ID_REG_B 0x0B
-#define HMC5843_ID_REG_C 0x0C
enum hmc5843_ids {
HMC5843_ID,
@@ -53,14 +48,6 @@ enum hmc5843_ids {
};
/*
- * Beware: identification of the HMC5883 is still "H43";
- * I2C address is also unchanged
- */
-#define HMC5843_ID_REG_LENGTH 0x03
-#define HMC5843_ID_STRING "H43"
-#define HMC5843_I2C_ADDRESS 0x1E
-
-/*
* Range gain settings in (+-)Ga
* Beware: HMC5843 and HMC5883 have different recommended sensor field
* ranges; default corresponds to +-1.0 Ga and +-1.3 Ga, respectively
@@ -185,14 +172,9 @@ static const char * const hmc5883_regval_to_sample_freq[] = {
"0.75", "1.5", "3", "7.5", "15", "30", "75",
};
-/* Addresses to scan: 0x1E */
-static const unsigned short normal_i2c[] = { HMC5843_I2C_ADDRESS,
- I2C_CLIENT_END };
-
/* Describe chip variants */
struct hmc5843_chip_info {
const struct iio_chan_spec *channels;
- int num_channels;
const char * const *regval_to_sample_freq;
const int *regval_to_input_field_mga;
const int *regval_to_nanoscale;
@@ -225,18 +207,29 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
struct hmc5843_data *data = iio_priv(indio_dev);
s32 result;
+ int tries = 150;
mutex_lock(&data->lock);
- result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
- while (!(result & HMC5843_DATA_READY))
- result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
+ while (tries-- > 0) {
+ result = i2c_smbus_read_byte_data(client,
+ HMC5843_STATUS_REG);
+ if (result & HMC5843_DATA_READY)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&client->dev, "data not ready\n");
+ mutex_unlock(&data->lock);
+ return -EIO;
+ }
- result = i2c_smbus_read_word_data(client, address);
+ result = i2c_smbus_read_word_swapped(client, address);
mutex_unlock(&data->lock);
if (result < 0)
return -EINVAL;
- *val = (s16)swab16((u16)result);
+ *val = sign_extend32(result, 15);
return IIO_VAL_INT;
}
@@ -559,14 +552,14 @@ static int hmc5843_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-#define HMC5843_CHANNEL(axis, add) \
+#define HMC5843_CHANNEL(axis, addr) \
{ \
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .address = add \
+ .address = addr \
}
static const struct iio_chan_spec hmc5843_channels[] = {
@@ -597,7 +590,6 @@ static const struct attribute_group hmc5843_group = {
static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
[HMC5843_ID] = {
.channels = hmc5843_channels,
- .num_channels = ARRAY_SIZE(hmc5843_channels),
.regval_to_sample_freq = hmc5843_regval_to_sample_freq,
.regval_to_input_field_mga =
hmc5843_regval_to_input_field_mga,
@@ -605,7 +597,6 @@ static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
},
[HMC5883_ID] = {
.channels = hmc5883_channels,
- .num_channels = ARRAY_SIZE(hmc5883_channels),
.regval_to_sample_freq = hmc5883_regval_to_sample_freq,
.regval_to_input_field_mga =
hmc5883_regval_to_input_field_mga,
@@ -613,7 +604,6 @@ static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
},
[HMC5883L_ID] = {
.channels = hmc5883_channels,
- .num_channels = ARRAY_SIZE(hmc5883_channels),
.regval_to_sample_freq = hmc5883_regval_to_sample_freq,
.regval_to_input_field_mga =
hmc5883l_regval_to_input_field_mga,
@@ -621,25 +611,6 @@ static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
},
};
-static int hmc5843_detect(struct i2c_client *client,
- struct i2c_board_info *info)
-{
- unsigned char id_str[HMC5843_ID_REG_LENGTH];
-
- if (client->addr != HMC5843_I2C_ADDRESS)
- return -ENODEV;
-
- if (i2c_smbus_read_i2c_block_data(client, HMC5843_ID_REG_A,
- HMC5843_ID_REG_LENGTH, id_str)
- != HMC5843_ID_REG_LENGTH)
- return -ENODEV;
-
- if (0 != strncmp(id_str, HMC5843_ID_STRING, HMC5843_ID_REG_LENGTH))
- return -ENODEV;
-
- return 0;
-}
-
/* Called when we have found a new HMC58X3 */
static void hmc5843_init_client(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -649,7 +620,7 @@ static void hmc5843_init_client(struct i2c_client *client,
data->variant = &hmc5843_chip_info_tbl[id->driver_data];
indio_dev->channels = data->variant->channels;
- indio_dev->num_channels = data->variant->num_channels;
+ indio_dev->num_channels = 3;
hmc5843_set_meas_conf(client, data->meas_conf);
hmc5843_set_rate(client, data->rate);
hmc5843_configure(client, data->operating_mode);
@@ -756,8 +727,6 @@ static struct i2c_driver hmc5843_driver = {
.id_table = hmc5843_id,
.probe = hmc5843_probe,
.remove = hmc5843_remove,
- .detect = hmc5843_detect,
- .address_list = normal_i2c,
};
module_i2c_driver(hmc5843_driver);
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index e5943e2..74025fb 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -225,21 +225,6 @@ static int ade7753_reset(struct device *dev)
return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
}
-static ssize_t ade7753_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7753_reset(dev);
- }
- return -1;
-}
-
static IIO_DEV_ATTR_AENERGY(ade7753_read_24bit, ADE7753_AENERGY);
static IIO_DEV_ATTR_LAENERGY(ade7753_read_24bit, ADE7753_LAENERGY);
static IIO_DEV_ATTR_VAENERGY(ade7753_read_24bit, ADE7753_VAENERGY);
@@ -458,8 +443,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7753_read_frequency,
ade7753_write_frequency);
-static IIO_DEV_ATTR_RESET(ade7753_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
static struct attribute *ade7753_attributes[] = {
@@ -468,7 +451,6 @@ static struct attribute *ade7753_attributes[] = {
&iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_phcal.dev_attr.attr,
&iio_dev_attr_cfden.dev_attr.attr,
&iio_dev_attr_aenergy.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 7b6503b..f649ebe 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -224,22 +224,6 @@ static int ade7754_reset(struct device *dev)
return ade7754_spi_write_reg_8(dev, ADE7754_OPMODE, val);
}
-
-static ssize_t ade7754_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7754_reset(dev);
- }
- return -1;
-}
-
static IIO_DEV_ATTR_AENERGY(ade7754_read_24bit, ADE7754_AENERGY);
static IIO_DEV_ATTR_LAENERGY(ade7754_read_24bit, ADE7754_LAENERGY);
static IIO_DEV_ATTR_VAENERGY(ade7754_read_24bit, ADE7754_VAENERGY);
@@ -478,8 +462,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7754_read_frequency,
ade7754_write_frequency);
-static IIO_DEV_ATTR_RESET(ade7754_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26000 13000 65000 33000");
static struct attribute *ade7754_attributes[] = {
@@ -488,7 +470,6 @@ static struct attribute *ade7754_attributes[] = {
&iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_aenergy.dev_attr.attr,
&iio_dev_attr_laenergy.dev_attr.attr,
&iio_dev_attr_vaenergy.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 8f5bcfa..6005d4a 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -313,21 +313,6 @@ static int ade7758_reset(struct device *dev)
return ret;
}
-static ssize_t ade7758_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7758_reset(dev);
- }
- return len;
-}
-
static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
ade7758_read_8bit,
ade7758_write_8bit,
@@ -591,8 +576,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7758_read_frequency,
ade7758_write_frequency);
-static IIO_DEV_ATTR_RESET(ade7758_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255");
static struct attribute *ade7758_attributes[] = {
@@ -601,7 +584,6 @@ static struct attribute *ade7758_attributes[] = {
&iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_awatthr.dev_attr.attr,
&iio_dev_attr_bwatthr.dev_attr.attr,
&iio_dev_attr_cwatthr.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index b29e2d5..7d5db71 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -54,7 +54,7 @@ out:
return ret;
}
-/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is too device
* specific to be rolled into the core.
*/
static irqreturn_t ade7758_trigger_handler(int irq, void *p)
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 17dc373..d214ac4 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -229,21 +229,6 @@ static int ade7759_reset(struct device *dev)
return ret;
}
-static ssize_t ade7759_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7759_reset(dev);
- }
- return -1;
-}
-
static IIO_DEV_ATTR_AENERGY(ade7759_read_40bit, ADE7759_AENERGY);
static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
ade7759_read_16bit,
@@ -418,8 +403,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7759_read_frequency,
ade7759_write_frequency);
-static IIO_DEV_ATTR_RESET(ade7759_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
static struct attribute *ade7759_attributes[] = {
@@ -428,7 +411,6 @@ static struct attribute *ade7759_attributes[] = {
&iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_phcal.dev_attr.attr,
&iio_dev_attr_cfden.dev_attr.attr,
&iio_dev_attr_aenergy.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index a802cf2..4c6d204 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -299,7 +299,7 @@ static int ade7854_spi_probe(struct spi_device *spi)
if (ret)
iio_device_free(indio_dev);
- return 0;
+ return ret;
}
static int ade7854_spi_remove(struct spi_device *spi)
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index c642da8..e8379c0 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -186,22 +186,6 @@ static int ade7854_reset(struct device *dev)
return st->write_reg_16(dev, ADE7854_CONFIG, val);
}
-
-static ssize_t ade7854_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7854_reset(dev);
- }
- return -1;
-}
-
static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
ade7854_read_24bit,
ade7854_write_24bit,
@@ -468,8 +452,6 @@ err_ret:
return ret;
}
-static IIO_DEV_ATTR_RESET(ade7854_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("8000");
static IIO_CONST_ATTR(name, "ade7854");
@@ -515,7 +497,6 @@ static struct attribute *ade7854_attributes[] = {
&iio_dev_attr_bvahr.dev_attr.attr,
&iio_dev_attr_cvahr.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_const_attr_name.dev_attr.attr,
&iio_dev_attr_vpeak.dev_attr.attr,
&iio_dev_attr_ipeak.dev_attr.attr,
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 0d3356d..dcdadbb 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -192,21 +192,6 @@ static inline int ad2s1210_soft_reset(struct ad2s1210_state *st)
return ad2s1210_config_write(st, 0x0);
}
-static ssize_t ad2s1210_store_softreset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- int ret;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_soft_reset(st);
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
static ssize_t ad2s1210_show_fclkin(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -536,8 +521,6 @@ error_ret:
return ret;
}
-static IIO_DEVICE_ATTR(reset, S_IWUSR,
- NULL, ad2s1210_store_softreset, 0);
static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR,
ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR,
@@ -587,7 +570,6 @@ static const struct iio_chan_spec ad2s1210_channels[] = {
};
static struct attribute *ad2s1210_attributes[] = {
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_fclkin.dev_attr.attr,
&iio_dev_attr_fexcit.dev_attr.attr,
&iio_dev_attr_control.dev_attr.attr,
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
index 1a051da..2fd18c6 100644
--- a/drivers/staging/iio/trigger/Kconfig
+++ b/drivers/staging/iio/trigger/Kconfig
@@ -12,23 +12,6 @@ config IIO_PERIODIC_RTC_TRIGGER
Provides support for using periodic capable real time
clocks as IIO triggers.
-config IIO_GPIO_TRIGGER
- tristate "GPIO trigger"
- depends on GPIOLIB
- help
- Provides support for using GPIO pins as IIO triggers.
-
-config IIO_SYSFS_TRIGGER
- tristate "SYSFS trigger"
- depends on SYSFS
- select IRQ_WORK
- help
- Provides support for using SYSFS entry as IIO triggers.
- If unsure, say N (but it's safe to say "Y").
-
- To compile this driver as a module, choose M here: the
- module will be called iio-trig-sysfs.
-
config IIO_BFIN_TMR_TRIGGER
tristate "Blackfin TIMER trigger"
depends on BLACKFIN
diff --git a/drivers/staging/iio/trigger/Makefile b/drivers/staging/iio/trigger/Makefile
index b088b57..238481b 100644
--- a/drivers/staging/iio/trigger/Makefile
+++ b/drivers/staging/iio/trigger/Makefile
@@ -3,6 +3,4 @@
#
obj-$(CONFIG_IIO_PERIODIC_RTC_TRIGGER) += iio-trig-periodic-rtc.o
-obj-$(CONFIG_IIO_GPIO_TRIGGER) += iio-trig-gpio.o
-obj-$(CONFIG_IIO_SYSFS_TRIGGER) += iio-trig-sysfs.o
obj-$(CONFIG_IIO_BFIN_TMR_TRIGGER) += iio-trig-bfin-timer.o
diff --git a/drivers/staging/iio/trigger/iio-trig-gpio.c b/drivers/staging/iio/trigger/iio-trig-gpio.c
deleted file mode 100644
index 7c593d1..0000000
--- a/drivers/staging/iio/trigger/iio-trig-gpio.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Industrial I/O - gpio based trigger support
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Currently this is more of a functioning proof of concept than a full
- * fledged trigger driver.
- *
- * TODO:
- *
- * Add board config elements to allow specification of startup settings.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger.h>
-
-static LIST_HEAD(iio_gpio_trigger_list);
-static DEFINE_MUTEX(iio_gpio_trigger_list_lock);
-
-struct iio_gpio_trigger_info {
- struct mutex in_use;
- unsigned int irq;
-};
-/*
- * Need to reference count these triggers and only enable gpio interrupts
- * as appropriate.
- */
-
-/* So what functionality do we want in here?... */
-/* set high / low as interrupt type? */
-
-static irqreturn_t iio_gpio_trigger_poll(int irq, void *private)
-{
- /* Timestamp not currently provided */
- iio_trigger_poll(private, 0);
- return IRQ_HANDLED;
-}
-
-static const struct iio_trigger_ops iio_gpio_trigger_ops = {
- .owner = THIS_MODULE,
-};
-
-static int iio_gpio_trigger_probe(struct platform_device *pdev)
-{
- struct iio_gpio_trigger_info *trig_info;
- struct iio_trigger *trig, *trig2;
- unsigned long irqflags;
- struct resource *irq_res;
- int irq, ret = 0, irq_res_cnt = 0;
-
- do {
- irq_res = platform_get_resource(pdev,
- IORESOURCE_IRQ, irq_res_cnt);
-
- if (irq_res == NULL) {
- if (irq_res_cnt == 0)
- dev_err(&pdev->dev, "No GPIO IRQs specified");
- break;
- }
- irqflags = (irq_res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED;
-
- for (irq = irq_res->start; irq <= irq_res->end; irq++) {
-
- trig = iio_trigger_alloc("irqtrig%d", irq);
- if (!trig) {
- ret = -ENOMEM;
- goto error_free_completed_registrations;
- }
-
- trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
- if (!trig_info) {
- ret = -ENOMEM;
- goto error_put_trigger;
- }
- iio_trigger_set_drvdata(trig, trig_info);
- trig_info->irq = irq;
- trig->ops = &iio_gpio_trigger_ops;
- ret = request_irq(irq, iio_gpio_trigger_poll,
- irqflags, trig->name, trig);
- if (ret) {
- dev_err(&pdev->dev,
- "request IRQ-%d failed", irq);
- goto error_free_trig_info;
- }
-
- ret = iio_trigger_register(trig);
- if (ret)
- goto error_release_irq;
-
- list_add_tail(&trig->alloc_list,
- &iio_gpio_trigger_list);
- }
-
- irq_res_cnt++;
- } while (irq_res != NULL);
-
-
- return 0;
-
-/* First clean up the partly allocated trigger */
-error_release_irq:
- free_irq(irq, trig);
-error_free_trig_info:
- kfree(trig_info);
-error_put_trigger:
- iio_trigger_put(trig);
-error_free_completed_registrations:
- /* The rest should have been added to the iio_gpio_trigger_list */
- list_for_each_entry_safe(trig,
- trig2,
- &iio_gpio_trigger_list,
- alloc_list) {
- trig_info = iio_trigger_get_drvdata(trig);
- free_irq(gpio_to_irq(trig_info->irq), trig);
- kfree(trig_info);
- iio_trigger_unregister(trig);
- }
-
- return ret;
-}
-
-static int iio_gpio_trigger_remove(struct platform_device *pdev)
-{
- struct iio_trigger *trig, *trig2;
- struct iio_gpio_trigger_info *trig_info;
-
- mutex_lock(&iio_gpio_trigger_list_lock);
- list_for_each_entry_safe(trig,
- trig2,
- &iio_gpio_trigger_list,
- alloc_list) {
- trig_info = iio_trigger_get_drvdata(trig);
- iio_trigger_unregister(trig);
- free_irq(trig_info->irq, trig);
- kfree(trig_info);
- iio_trigger_put(trig);
- }
- mutex_unlock(&iio_gpio_trigger_list_lock);
-
- return 0;
-}
-
-static struct platform_driver iio_gpio_trigger_driver = {
- .probe = iio_gpio_trigger_probe,
- .remove = iio_gpio_trigger_remove,
- .driver = {
- .name = "iio_gpio_trigger",
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(iio_gpio_trigger_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("Example gpio trigger for the iio subsystem");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/trigger/iio-trig-sysfs.c b/drivers/staging/iio/trigger/iio-trig-sysfs.c
deleted file mode 100644
index b727bde..0000000
--- a/drivers/staging/iio/trigger/iio-trig-sysfs.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/irq_work.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger.h>
-
-struct iio_sysfs_trig {
- struct iio_trigger *trig;
- struct irq_work work;
- int id;
- struct list_head l;
-};
-
-static LIST_HEAD(iio_sysfs_trig_list);
-static DEFINE_MUTEX(iio_syfs_trig_list_mut);
-
-static int iio_sysfs_trigger_probe(int id);
-static ssize_t iio_sysfs_trig_add(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- int ret;
- unsigned long input;
-
- ret = strict_strtoul(buf, 10, &input);
- if (ret)
- return ret;
- ret = iio_sysfs_trigger_probe(input);
- if (ret)
- return ret;
- return len;
-}
-static DEVICE_ATTR(add_trigger, S_IWUSR, NULL, &iio_sysfs_trig_add);
-
-static int iio_sysfs_trigger_remove(int id);
-static ssize_t iio_sysfs_trig_remove(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- int ret;
- unsigned long input;
-
- ret = strict_strtoul(buf, 10, &input);
- if (ret)
- return ret;
- ret = iio_sysfs_trigger_remove(input);
- if (ret)
- return ret;
- return len;
-}
-
-static DEVICE_ATTR(remove_trigger, S_IWUSR, NULL, &iio_sysfs_trig_remove);
-
-static struct attribute *iio_sysfs_trig_attrs[] = {
- &dev_attr_add_trigger.attr,
- &dev_attr_remove_trigger.attr,
- NULL,
-};
-
-static const struct attribute_group iio_sysfs_trig_group = {
- .attrs = iio_sysfs_trig_attrs,
-};
-
-static const struct attribute_group *iio_sysfs_trig_groups[] = {
- &iio_sysfs_trig_group,
- NULL
-};
-
-
-/* Nothing to actually do upon release */
-static void iio_trigger_sysfs_release(struct device *dev)
-{
-}
-
-static struct device iio_sysfs_trig_dev = {
- .bus = &iio_bus_type,
- .groups = iio_sysfs_trig_groups,
- .release = &iio_trigger_sysfs_release,
-};
-
-static void iio_sysfs_trigger_work(struct irq_work *work)
-{
- struct iio_sysfs_trig *trig = container_of(work, struct iio_sysfs_trig,
- work);
-
- iio_trigger_poll(trig->trig, 0);
-}
-
-static ssize_t iio_sysfs_trigger_poll(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct iio_trigger *trig = to_iio_trigger(dev);
- struct iio_sysfs_trig *sysfs_trig = iio_trigger_get_drvdata(trig);
-
- irq_work_queue(&sysfs_trig->work);
-
- return count;
-}
-
-static DEVICE_ATTR(trigger_now, S_IWUSR, NULL, iio_sysfs_trigger_poll);
-
-static struct attribute *iio_sysfs_trigger_attrs[] = {
- &dev_attr_trigger_now.attr,
- NULL,
-};
-
-static const struct attribute_group iio_sysfs_trigger_attr_group = {
- .attrs = iio_sysfs_trigger_attrs,
-};
-
-static const struct attribute_group *iio_sysfs_trigger_attr_groups[] = {
- &iio_sysfs_trigger_attr_group,
- NULL
-};
-
-static const struct iio_trigger_ops iio_sysfs_trigger_ops = {
- .owner = THIS_MODULE,
-};
-
-static int iio_sysfs_trigger_probe(int id)
-{
- struct iio_sysfs_trig *t;
- int ret;
- bool foundit = false;
- mutex_lock(&iio_syfs_trig_list_mut);
- list_for_each_entry(t, &iio_sysfs_trig_list, l)
- if (id == t->id) {
- foundit = true;
- break;
- }
- if (foundit) {
- ret = -EINVAL;
- goto out1;
- }
- t = kmalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL) {
- ret = -ENOMEM;
- goto out1;
- }
- t->id = id;
- t->trig = iio_trigger_alloc("sysfstrig%d", id);
- if (!t->trig) {
- ret = -ENOMEM;
- goto free_t;
- }
-
- t->trig->dev.groups = iio_sysfs_trigger_attr_groups;
- t->trig->ops = &iio_sysfs_trigger_ops;
- t->trig->dev.parent = &iio_sysfs_trig_dev;
- iio_trigger_set_drvdata(t->trig, t);
-
- init_irq_work(&t->work, iio_sysfs_trigger_work);
-
- ret = iio_trigger_register(t->trig);
- if (ret)
- goto out2;
- list_add(&t->l, &iio_sysfs_trig_list);
- __module_get(THIS_MODULE);
- mutex_unlock(&iio_syfs_trig_list_mut);
- return 0;
-
-out2:
- iio_trigger_put(t->trig);
-free_t:
- kfree(t);
-out1:
- mutex_unlock(&iio_syfs_trig_list_mut);
- return ret;
-}
-
-static int iio_sysfs_trigger_remove(int id)
-{
- bool foundit = false;
- struct iio_sysfs_trig *t;
- mutex_lock(&iio_syfs_trig_list_mut);
- list_for_each_entry(t, &iio_sysfs_trig_list, l)
- if (id == t->id) {
- foundit = true;
- break;
- }
- if (!foundit) {
- mutex_unlock(&iio_syfs_trig_list_mut);
- return -EINVAL;
- }
-
- iio_trigger_unregister(t->trig);
- iio_trigger_free(t->trig);
-
- list_del(&t->l);
- kfree(t);
- module_put(THIS_MODULE);
- mutex_unlock(&iio_syfs_trig_list_mut);
- return 0;
-}
-
-
-static int __init iio_sysfs_trig_init(void)
-{
- device_initialize(&iio_sysfs_trig_dev);
- dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
- return device_add(&iio_sysfs_trig_dev);
-}
-module_init(iio_sysfs_trig_init);
-
-static void __exit iio_sysfs_trig_exit(void)
-{
- device_unregister(&iio_sysfs_trig_dev);
-}
-module_exit(iio_sysfs_trig_exit);
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Sysfs based trigger for the iio subsystem");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:iio-trig-sysfs");
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index ef699f7..394254f 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -15,7 +15,7 @@ config DRM_IMX_FB_HELPER
help
The DRM framework can provide a legacy /dev/fb0 framebuffer
for your device. This is necessary to get a framebuffer console
- and also for appplications using the legacy framebuffer API
+ and also for applications using the legacy framebuffer API
config DRM_IMX_PARALLEL_DISPLAY
tristate "Support for parallel displays"
@@ -30,6 +30,13 @@ config DRM_IMX_TVE
Choose this to enable the internal Television Encoder (TVe)
found on i.MX53 processors.
+config DRM_IMX_LDB
+ tristate "Support for LVDS displays"
+ depends on DRM_IMX && MFD_SYSCON
+ help
+ Choose this to enable the internal LVDS Display Bridge (LDB)
+ found on i.MX53 and i.MX6 processors.
+
config DRM_IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on DRM_IMX
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/staging/imx-drm/Makefile
index 7e50184..bfaf693 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/staging/imx-drm/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_DRM_IMX) += imxdrm.o
obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
+obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
obj-$(CONFIG_DRM_IMX_FB_HELPER) += imx-fbdev.o
obj-$(CONFIG_DRM_IMX_IPUV3_CORE) += ipu-v3/
obj-$(CONFIG_DRM_IMX_IPUV3) += ipuv3-crtc.o
diff --git a/drivers/staging/imx-drm/TODO b/drivers/staging/imx-drm/TODO
index 123acbe..9cfa2a7 100644
--- a/drivers/staging/imx-drm/TODO
+++ b/drivers/staging/imx-drm/TODO
@@ -6,12 +6,10 @@ TODO:
- Factor out more code to common helper functions
- decide where to put the base driver. It is not specific to a subsystem
and would be used by DRM/KMS and media/V4L2
-- convert irq driver to irq_domain_add_linear
Missing features (not necessarily for moving out of staging):
- Add KMS plane support for CRTC driver
-- Add LDB (LVDS Display Bridge) support
- Add i.MX6 HDMI support
- Add support for IC (Image converter)
- Add support for CSI (CMOS Sensor interface)
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 6455305..a2e52a0 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -41,7 +41,6 @@ struct imx_drm_device {
struct list_head encoder_list;
struct list_head connector_list;
struct mutex mutex;
- int references;
int pipes;
struct drm_fbdev_cma *fbhelper;
};
@@ -69,28 +68,20 @@ struct imx_drm_connector {
struct module *owner;
};
-static int imx_drm_driver_firstopen(struct drm_device *drm)
-{
- if (!imx_drm_device_get())
- return -EINVAL;
-
- return 0;
-}
-
static void imx_drm_driver_lastclose(struct drm_device *drm)
{
struct imx_drm_device *imxdrm = drm->dev_private;
if (imxdrm->fbhelper)
drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
-
- imx_drm_device_put();
}
static int imx_drm_driver_unload(struct drm_device *drm)
{
struct imx_drm_device *imxdrm = drm->dev_private;
+ imx_drm_device_put();
+
drm_mode_config_cleanup(imxdrm->drm);
drm_kms_helper_poll_fini(imxdrm->drm);
@@ -144,7 +135,7 @@ int imx_drm_crtc_panel_format(struct drm_crtc *crtc, u32 encoder_type,
u32 interface_pix_fmt)
{
return imx_drm_crtc_panel_format_pins(crtc, encoder_type,
- interface_pix_fmt, 0, 0);
+ interface_pix_fmt, 2, 3);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format);
@@ -207,7 +198,6 @@ static const struct file_operations imx_drm_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_cma_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
.llseek = noop_llseek,
};
@@ -226,8 +216,6 @@ struct drm_device *imx_drm_device_get(void)
struct imx_drm_connector *con;
struct imx_drm_crtc *crtc;
- mutex_lock(&imxdrm->mutex);
-
list_for_each_entry(enc, &imxdrm->encoder_list, list) {
if (!try_module_get(enc->owner)) {
dev_err(imxdrm->dev, "could not get module %s\n",
@@ -252,10 +240,6 @@ struct drm_device *imx_drm_device_get(void)
}
}
- imxdrm->references++;
-
- mutex_unlock(&imxdrm->mutex);
-
return imxdrm->drm;
unwind_crtc:
@@ -293,8 +277,6 @@ void imx_drm_device_put(void)
list_for_each_entry(enc, &imxdrm->encoder_list, list)
module_put(enc->owner);
- imxdrm->references--;
-
mutex_unlock(&imxdrm->mutex);
}
EXPORT_SYMBOL_GPL(imx_drm_device_put);
@@ -447,6 +429,9 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
*/
imxdrm->drm->vblank_disable_allowed = 1;
+ if (!imx_drm_device_get())
+ ret = -EINVAL;
+
ret = 0;
err_init:
@@ -491,12 +476,11 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
{
struct imx_drm_device *imxdrm = __imx_drm_device();
struct imx_drm_crtc *imx_drm_crtc;
- const struct drm_crtc_funcs *crtc_funcs;
int ret;
mutex_lock(&imxdrm->mutex);
- if (imxdrm->references) {
+ if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
}
@@ -512,8 +496,6 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
imx_drm_crtc->cookie.cookie = cookie;
imx_drm_crtc->cookie.id = id;
- crtc_funcs = imx_drm_helper_funcs->crtc_funcs;
-
imx_drm_crtc->crtc = crtc;
imx_drm_crtc->imxdrm = imxdrm;
@@ -577,7 +559,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
mutex_lock(&imxdrm->mutex);
- if (imxdrm->references) {
+ if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
}
@@ -681,6 +663,7 @@ found:
return i;
}
+EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
/*
* imx_drm_remove_encoder - remove an encoder
@@ -721,7 +704,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
mutex_lock(&imxdrm->mutex);
- if (imxdrm->references) {
+ if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
}
@@ -786,7 +769,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
}
EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
-static struct drm_ioctl_desc imx_drm_ioctls[] = {
+static const struct drm_ioctl_desc imx_drm_ioctls[] = {
/* none so far */
};
@@ -794,13 +777,12 @@ static struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = imx_drm_driver_load,
.unload = imx_drm_driver_unload,
- .firstopen = imx_drm_driver_firstopen,
.lastclose = imx_drm_driver_lastclose,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = imx_drm_enable_vblank,
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
new file mode 100644
index 0000000..af733ea
--- /dev/null
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -0,0 +1,626 @@
+/*
+ * i.MX drm driver - LVDS display bridge
+ *
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <video/of_videomode.h>
+#include <linux/regmap.h>
+#include <linux/videodev2.h>
+
+#include "imx-drm.h"
+
+#define DRIVER_NAME "imx-ldb"
+
+#define LDB_CH0_MODE_EN_TO_DI0 (1 << 0)
+#define LDB_CH0_MODE_EN_TO_DI1 (3 << 0)
+#define LDB_CH0_MODE_EN_MASK (3 << 0)
+#define LDB_CH1_MODE_EN_TO_DI0 (1 << 2)
+#define LDB_CH1_MODE_EN_TO_DI1 (3 << 2)
+#define LDB_CH1_MODE_EN_MASK (3 << 2)
+#define LDB_SPLIT_MODE_EN (1 << 4)
+#define LDB_DATA_WIDTH_CH0_24 (1 << 5)
+#define LDB_BIT_MAP_CH0_JEIDA (1 << 6)
+#define LDB_DATA_WIDTH_CH1_24 (1 << 7)
+#define LDB_BIT_MAP_CH1_JEIDA (1 << 8)
+#define LDB_DI0_VS_POL_ACT_LOW (1 << 9)
+#define LDB_DI1_VS_POL_ACT_LOW (1 << 10)
+#define LDB_BGREF_RMODE_INT (1 << 15)
+
+#define con_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, connector)
+#define enc_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, encoder)
+
+struct imx_ldb;
+
+struct imx_ldb_channel {
+ struct imx_ldb *ldb;
+ struct drm_connector connector;
+ struct imx_drm_connector *imx_drm_connector;
+ struct drm_encoder encoder;
+ struct imx_drm_encoder *imx_drm_encoder;
+ int chno;
+ void *edid;
+ int edid_len;
+ struct drm_display_mode mode;
+ int mode_valid;
+};
+
+struct bus_mux {
+ int reg;
+ int shift;
+ int mask;
+};
+
+struct imx_ldb {
+ struct regmap *regmap;
+ struct device *dev;
+ struct imx_ldb_channel channel[2];
+ struct clk *clk[2]; /* our own clock */
+ struct clk *clk_sel[4]; /* parent of display clock */
+ struct clk *clk_pll[2]; /* upstream clock we can adjust */
+ u32 ldb_ctrl;
+ const struct bus_mux *lvds_mux;
+};
+
+static enum drm_connector_status imx_ldb_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void imx_ldb_connector_destroy(struct drm_connector *connector)
+{
+ /* do not free here */
+}
+
+static int imx_ldb_connector_get_modes(struct drm_connector *connector)
+{
+ struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
+ int num_modes = 0;
+
+ if (imx_ldb_ch->edid) {
+ drm_mode_connector_update_edid_property(connector,
+ imx_ldb_ch->edid);
+ num_modes = drm_add_edid_modes(connector, imx_ldb_ch->edid);
+ }
+
+ if (imx_ldb_ch->mode_valid) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ drm_mode_copy(mode, &imx_ldb_ch->mode);
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+
+ return num_modes;
+}
+
+static int imx_ldb_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return 0;
+}
+
+static struct drm_encoder *imx_ldb_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
+
+ return &imx_ldb_ch->encoder;
+}
+
+static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool imx_ldb_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
+ unsigned long serial_clk, unsigned long di_clk)
+{
+ int ret;
+
+ dev_dbg(ldb->dev, "%s: now: %ld want: %ld\n", __func__,
+ clk_get_rate(ldb->clk_pll[chno]), serial_clk);
+ clk_set_rate(ldb->clk_pll[chno], serial_clk);
+
+ dev_dbg(ldb->dev, "%s after: %ld\n", __func__,
+ clk_get_rate(ldb->clk_pll[chno]));
+
+ dev_dbg(ldb->dev, "%s: now: %ld want: %ld\n", __func__,
+ clk_get_rate(ldb->clk[chno]),
+ (long int)di_clk);
+ clk_set_rate(ldb->clk[chno], di_clk);
+
+ dev_dbg(ldb->dev, "%s after: %ld\n", __func__,
+ clk_get_rate(ldb->clk[chno]));
+
+ /* set display clock mux to LDB input clock */
+ ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk[chno]);
+ if (ret) {
+ dev_err(ldb->dev, "unable to set di%d parent clock to ldb_di%d\n", mux, chno);
+ }
+}
+
+static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ struct drm_display_mode *mode = &encoder->crtc->mode;
+ u32 pixel_fmt;
+ unsigned long serial_clk;
+ unsigned long di_clk = mode->clock * 1000;
+ int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->imx_drm_encoder,
+ encoder->crtc);
+
+ if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
+ /* dual channel LVDS mode */
+ serial_clk = 3500UL * mode->clock;
+ imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
+ imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
+ } else {
+ serial_clk = 7000UL * mode->clock;
+ imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, di_clk);
+ }
+
+ switch (imx_ldb_ch->chno) {
+ case 0:
+ pixel_fmt = (ldb->ldb_ctrl & LDB_DATA_WIDTH_CH0_24) ?
+ V4L2_PIX_FMT_RGB24 : V4L2_PIX_FMT_BGR666;
+ break;
+ case 1:
+ pixel_fmt = (ldb->ldb_ctrl & LDB_DATA_WIDTH_CH1_24) ?
+ V4L2_PIX_FMT_RGB24 : V4L2_PIX_FMT_BGR666;
+ break;
+ default:
+ dev_err(ldb->dev, "unable to config di%d panel format\n",
+ imx_ldb_ch->chno);
+ pixel_fmt = V4L2_PIX_FMT_RGB24;
+ }
+
+ imx_drm_crtc_panel_format(encoder->crtc, DRM_MODE_ENCODER_LVDS,
+ pixel_fmt);
+}
+
+static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
+{
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+ int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->imx_drm_encoder,
+ encoder->crtc);
+
+ if (dual) {
+ clk_prepare_enable(ldb->clk[0]);
+ clk_prepare_enable(ldb->clk[1]);
+ }
+
+ if (imx_ldb_ch == &ldb->channel[0] || dual) {
+ ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
+ if (mux == 0 || ldb->lvds_mux)
+ ldb->ldb_ctrl |= LDB_CH0_MODE_EN_TO_DI0;
+ else if (mux == 1)
+ ldb->ldb_ctrl |= LDB_CH0_MODE_EN_TO_DI1;
+ }
+ if (imx_ldb_ch == &ldb->channel[1] || dual) {
+ ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
+ if (mux == 1 || ldb->lvds_mux)
+ ldb->ldb_ctrl |= LDB_CH1_MODE_EN_TO_DI1;
+ else if (mux == 0)
+ ldb->ldb_ctrl |= LDB_CH1_MODE_EN_TO_DI0;
+ }
+
+ if (ldb->lvds_mux) {
+ const struct bus_mux *lvds_mux = NULL;
+
+ if (imx_ldb_ch == &ldb->channel[0])
+ lvds_mux = &ldb->lvds_mux[0];
+ else if (imx_ldb_ch == &ldb->channel[1])
+ lvds_mux = &ldb->lvds_mux[1];
+
+ regmap_update_bits(ldb->regmap, lvds_mux->reg, lvds_mux->mask,
+ mux << lvds_mux->shift);
+ }
+
+ regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
+}
+
+static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+
+ if (mode->clock > 170000) {
+ dev_warn(ldb->dev,
+ "%s: mode exceeds 170 MHz pixel clock\n", __func__);
+ }
+ if (mode->clock > 85000 && !dual) {
+ dev_warn(ldb->dev,
+ "%s: mode exceeds 85 MHz pixel clock\n", __func__);
+ }
+
+ /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
+ if (imx_ldb_ch == &ldb->channel[0]) {
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
+ else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW;
+ }
+ if (imx_ldb_ch == &ldb->channel[1]) {
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
+ else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW;
+ }
+}
+
+static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
+{
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+
+ /*
+ * imx_ldb_encoder_disable is called by
+ * drm_helper_disable_unused_functions without
+ * the encoder being enabled before.
+ */
+ if (imx_ldb_ch == &ldb->channel[0] &&
+ (ldb->ldb_ctrl & LDB_CH0_MODE_EN_MASK) == 0)
+ return;
+ else if (imx_ldb_ch == &ldb->channel[1] &&
+ (ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0)
+ return;
+
+ if (imx_ldb_ch == &ldb->channel[0])
+ ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
+ else if (imx_ldb_ch == &ldb->channel[1])
+ ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
+
+ regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
+
+ if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
+ clk_disable_unprepare(ldb->clk[0]);
+ clk_disable_unprepare(ldb->clk[1]);
+ }
+}
+
+static void imx_ldb_encoder_destroy(struct drm_encoder *encoder)
+{
+ /* do not free here */
+}
+
+static struct drm_connector_funcs imx_ldb_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = imx_ldb_connector_detect,
+ .destroy = imx_ldb_connector_destroy,
+};
+
+static struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
+ .get_modes = imx_ldb_connector_get_modes,
+ .best_encoder = imx_ldb_connector_best_encoder,
+ .mode_valid = imx_ldb_connector_mode_valid,
+};
+
+static struct drm_encoder_funcs imx_ldb_encoder_funcs = {
+ .destroy = imx_ldb_encoder_destroy,
+};
+
+static struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
+ .dpms = imx_ldb_encoder_dpms,
+ .mode_fixup = imx_ldb_encoder_mode_fixup,
+ .prepare = imx_ldb_encoder_prepare,
+ .commit = imx_ldb_encoder_commit,
+ .mode_set = imx_ldb_encoder_mode_set,
+ .disable = imx_ldb_encoder_disable,
+};
+
+static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
+{
+ char clkname[16];
+
+ sprintf(clkname, "di%d", chno);
+ ldb->clk[chno] = devm_clk_get(ldb->dev, clkname);
+ if (IS_ERR(ldb->clk[chno]))
+ return PTR_ERR(ldb->clk[chno]);
+
+ sprintf(clkname, "di%d_pll", chno);
+ ldb->clk_pll[chno] = devm_clk_get(ldb->dev, clkname);
+ if (IS_ERR(ldb->clk_pll[chno]))
+ return PTR_ERR(ldb->clk_pll[chno]);
+
+ return 0;
+}
+
+static int imx_ldb_register(struct imx_ldb_channel *imx_ldb_ch)
+{
+ int ret;
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+
+ ret = imx_ldb_get_clk(ldb, imx_ldb_ch->chno);
+ if (ret)
+ return ret;
+ if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
+ ret |= imx_ldb_get_clk(ldb, 1);
+ if (ret)
+ return ret;
+ }
+
+ imx_ldb_ch->connector.funcs = &imx_ldb_connector_funcs;
+ imx_ldb_ch->encoder.funcs = &imx_ldb_encoder_funcs;
+
+ imx_ldb_ch->encoder.encoder_type = DRM_MODE_ENCODER_LVDS;
+ imx_ldb_ch->connector.connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ drm_encoder_helper_add(&imx_ldb_ch->encoder,
+ &imx_ldb_encoder_helper_funcs);
+ ret = imx_drm_add_encoder(&imx_ldb_ch->encoder,
+ &imx_ldb_ch->imx_drm_encoder, THIS_MODULE);
+ if (ret) {
+ dev_err(ldb->dev, "adding encoder failed with %d\n", ret);
+ return ret;
+ }
+
+ drm_connector_helper_add(&imx_ldb_ch->connector,
+ &imx_ldb_connector_helper_funcs);
+
+ ret = imx_drm_add_connector(&imx_ldb_ch->connector,
+ &imx_ldb_ch->imx_drm_connector, THIS_MODULE);
+ if (ret) {
+ imx_drm_remove_encoder(imx_ldb_ch->imx_drm_encoder);
+ dev_err(ldb->dev, "adding connector failed with %d\n", ret);
+ return ret;
+ }
+
+ drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
+ &imx_ldb_ch->encoder);
+
+ return 0;
+}
+
+enum {
+ LVDS_BIT_MAP_SPWG,
+ LVDS_BIT_MAP_JEIDA
+};
+
+static const char *imx_ldb_bit_mappings[] = {
+ [LVDS_BIT_MAP_SPWG] = "spwg",
+ [LVDS_BIT_MAP_JEIDA] = "jeida",
+};
+
+const int of_get_data_mapping(struct device_node *np)
+{
+ const char *bm;
+ int ret, i;
+
+ ret = of_property_read_string(np, "fsl,data-mapping", &bm);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(imx_ldb_bit_mappings); i++)
+ if (!strcasecmp(bm, imx_ldb_bit_mappings[i]))
+ return i;
+
+ return -EINVAL;
+}
+
+static struct bus_mux imx6q_lvds_mux[2] = {
+ {
+ .reg = IOMUXC_GPR3,
+ .shift = 6,
+ .mask = IMX6Q_GPR3_LVDS0_MUX_CTL_MASK,
+ }, {
+ .reg = IOMUXC_GPR3,
+ .shift = 8,
+ .mask = IMX6Q_GPR3_LVDS1_MUX_CTL_MASK,
+ }
+};
+
+/*
+ * For a device declaring compatible = "fsl,imx6q-ldb", "fsl,imx53-ldb",
+ * of_match_device will walk through this list and take the first entry
+ * matching any of its compatible values. Therefore, the more generic
+ * entries (in this case fsl,imx53-ldb) need to be ordered last.
+ */
+static const struct of_device_id imx_ldb_dt_ids[] = {
+ { .compatible = "fsl,imx6q-ldb", .data = imx6q_lvds_mux, },
+ { .compatible = "fsl,imx53-ldb", .data = NULL, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids);
+
+static int imx_ldb_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(of_match_ptr(imx_ldb_dt_ids),
+ &pdev->dev);
+ struct device_node *child;
+ const u8 *edidp;
+ struct imx_ldb *imx_ldb;
+ int datawidth;
+ int mapping;
+ int dual;
+ int ret;
+ int i;
+
+ imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
+ if (!imx_ldb)
+ return -ENOMEM;
+
+ imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
+ if (IS_ERR(imx_ldb->regmap)) {
+ dev_err(&pdev->dev, "failed to get parent regmap\n");
+ return PTR_ERR(imx_ldb->regmap);
+ }
+
+ imx_ldb->dev = &pdev->dev;
+
+ if (of_id)
+ imx_ldb->lvds_mux = of_id->data;
+
+ dual = of_property_read_bool(np, "fsl,dual-channel");
+ if (dual)
+ imx_ldb->ldb_ctrl |= LDB_SPLIT_MODE_EN;
+
+ /*
+ * There are three different possible clock mux configurations:
+ * i.MX53: ipu1_di0_sel, ipu1_di1_sel
+ * i.MX6q: ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel, ipu2_di1_sel
+ * i.MX6dl: ipu1_di0_sel, ipu1_di1_sel, lcdif_sel
+ * Map them all to di0_sel...di3_sel.
+ */
+ for (i = 0; i < 4; i++) {
+ char clkname[16];
+
+ sprintf(clkname, "di%d_sel", i);
+ imx_ldb->clk_sel[i] = devm_clk_get(imx_ldb->dev, clkname);
+ if (IS_ERR(imx_ldb->clk_sel[i])) {
+ ret = PTR_ERR(imx_ldb->clk_sel[i]);
+ imx_ldb->clk_sel[i] = NULL;
+ break;
+ }
+ }
+ if (i == 0)
+ return ret;
+
+ for_each_child_of_node(np, child) {
+ struct imx_ldb_channel *channel;
+
+ ret = of_property_read_u32(child, "reg", &i);
+ if (ret || i < 0 || i > 1)
+ return -EINVAL;
+
+ if (dual && i > 0) {
+ dev_warn(&pdev->dev, "dual-channel mode, ignoring second output\n");
+ continue;
+ }
+
+ if (!of_device_is_available(child))
+ continue;
+
+ channel = &imx_ldb->channel[i];
+ channel->ldb = imx_ldb;
+ channel->chno = i;
+
+ edidp = of_get_property(child, "edid", &channel->edid_len);
+ if (edidp) {
+ channel->edid = kmemdup(edidp, channel->edid_len,
+ GFP_KERNEL);
+ } else {
+ ret = of_get_drm_display_mode(child, &channel->mode, 0);
+ if (!ret)
+ channel->mode_valid = 1;
+ }
+
+ ret = of_property_read_u32(child, "fsl,data-width", &datawidth);
+ if (ret)
+ datawidth = 0;
+ else if (datawidth != 18 && datawidth != 24)
+ return -EINVAL;
+
+ mapping = of_get_data_mapping(child);
+ switch (mapping) {
+ case LVDS_BIT_MAP_SPWG:
+ if (datawidth == 24) {
+ if (i == 0 || dual)
+ imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ if (i == 1 || dual)
+ imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ }
+ break;
+ case LVDS_BIT_MAP_JEIDA:
+ if (datawidth == 18) {
+ dev_err(&pdev->dev, "JEIDA standard only supported in 24 bit\n");
+ return -EINVAL;
+ }
+ if (i == 0 || dual)
+ imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 | LDB_BIT_MAP_CH0_JEIDA;
+ if (i == 1 || dual)
+ imx_ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 | LDB_BIT_MAP_CH1_JEIDA;
+ break;
+ default:
+ dev_err(&pdev->dev, "data mapping not specified or invalid\n");
+ return -EINVAL;
+ }
+
+ ret = imx_ldb_register(channel);
+ if (ret)
+ return ret;
+
+ imx_drm_encoder_add_possible_crtcs(channel->imx_drm_encoder, child);
+ }
+
+ platform_set_drvdata(pdev, imx_ldb);
+
+ return 0;
+}
+
+static int imx_ldb_remove(struct platform_device *pdev)
+{
+ struct imx_ldb *imx_ldb = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct imx_ldb_channel *channel = &imx_ldb->channel[i];
+ struct drm_connector *connector = &channel->connector;
+ struct drm_encoder *encoder = &channel->encoder;
+
+ drm_mode_connector_detach_encoder(connector, encoder);
+
+ imx_drm_remove_connector(channel->imx_drm_connector);
+ imx_drm_remove_encoder(channel->imx_drm_encoder);
+ }
+
+ return 0;
+}
+
+static struct platform_driver imx_ldb_driver = {
+ .probe = imx_ldb_probe,
+ .remove = imx_ldb_remove,
+ .driver = {
+ .of_match_table = imx_ldb_dt_ids,
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(imx_ldb_driver);
+
+MODULE_DESCRIPTION("i.MX LVDS driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 03892de..33d6525 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -21,8 +21,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
-#include <linux/of_i2c.h>
-#include <linux/pinctrl/consumer.h>
+#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
@@ -132,12 +131,14 @@ struct imx_tve {
};
static void tve_lock(void *__tve)
+__acquires(&tve->lock)
{
struct imx_tve *tve = __tve;
spin_lock(&tve->lock);
}
static void tve_unlock(void *__tve)
+__releases(&tve->lock)
{
struct imx_tve *tve = __tve;
spin_unlock(&tve->lock);
@@ -165,7 +166,10 @@ static void tve_enable(struct imx_tve *tve)
regmap_write(tve->regmap, TVE_INT_CONT_REG, 0);
else
regmap_write(tve->regmap, TVE_INT_CONT_REG,
- TVE_CD_SM_IEN | TVE_CD_LM_IEN | TVE_CD_MON_END_IEN);
+ TVE_CD_SM_IEN |
+ TVE_CD_LM_IEN |
+ TVE_CD_MON_END_IEN);
+
spin_unlock_irqrestore(&tve->enable_lock, flags);
}
@@ -466,7 +470,9 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
else
val = TVE_DAC_FULL_RATE;
- ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_DAC_SAMP_RATE_MASK, val);
+ ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+ TVE_DAC_SAMP_RATE_MASK, val);
+
if (ret < 0) {
dev_err(tve->dev, "failed to set divider: %d\n", ret);
return ret;
@@ -610,22 +616,17 @@ static int imx_tve_probe(struct platform_device *pdev)
}
if (tve->mode == TVE_MODE_VGA) {
- struct pinctrl *pinctrl;
-
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- dev_warn(&pdev->dev, "failed to setup pinctrl: %d", ret);
- return ret;
- }
+ ret = of_property_read_u32(np, "fsl,hsync-pin",
+ &tve->hsync_pin);
- ret = of_property_read_u32(np, "fsl,hsync-pin", &tve->hsync_pin);
if (ret < 0) {
dev_err(&pdev->dev, "failed to get vsync pin\n");
return ret;
}
- ret |= of_property_read_u32(np, "fsl,vsync-pin", &tve->vsync_pin);
+ ret |= of_property_read_u32(np, "fsl,vsync-pin",
+ &tve->vsync_pin);
+
if (ret < 0) {
dev_err(&pdev->dev, "failed to get vsync pin\n");
return ret;
@@ -633,16 +634,9 @@ static int imx_tve_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get memory region\n");
- return -ENOENT;
- }
-
- base = devm_request_and_ioremap(&pdev->dev, res);
- if (!base) {
- dev_err(&pdev->dev, "failed to remap memory region\n");
- return -ENOENT;
- }
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
tve_regmap_config.lock_arg = tve;
tve->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "tve", base,
@@ -755,3 +749,4 @@ module_platform_driver(imx_tve_driver);
MODULE_DESCRIPTION("i.MX Television Encoder driver");
MODULE_AUTHOR("Philipp Zabel, Pengutronix");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-tve");
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 0127601..ba464e5 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/of_device.h>
#include "imx-ipu-v3.h"
@@ -799,16 +800,18 @@ err_di_0:
static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
{
unsigned long status;
- int i, bit, irq_base;
+ int i, bit, irq;
for (i = 0; i < num_regs; i++) {
status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
- irq_base = ipu->irq_start + regs[i] * 32;
- for_each_set_bit(bit, &status, 32)
- generic_handle_irq(irq_base + bit);
+ for_each_set_bit(bit, &status, 32) {
+ irq = irq_linear_revmap(ipu->domain, regs[i] * 32 + bit);
+ if (irq)
+ generic_handle_irq(irq);
+ }
}
}
@@ -838,57 +841,15 @@ static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void ipu_ack_irq(struct irq_data *d)
-{
- struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
- unsigned int irq = d->irq - ipu->irq_start;
-
- ipu_cm_write(ipu, 1 << (irq % 32), IPU_INT_STAT(irq / 32));
-}
-
-static void ipu_unmask_irq(struct irq_data *d)
-{
- struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
- unsigned int irq = d->irq - ipu->irq_start;
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32));
- reg |= 1 << (irq % 32);
- ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32));
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-}
-
-static void ipu_mask_irq(struct irq_data *d)
-{
- struct ipu_soc *ipu = irq_data_get_irq_chip_data(d);
- unsigned int irq = d->irq - ipu->irq_start;
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- reg = ipu_cm_read(ipu, IPU_INT_CTRL(irq / 32));
- reg &= ~(1 << (irq % 32));
- ipu_cm_write(ipu, reg, IPU_INT_CTRL(irq / 32));
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-}
-
-static struct irq_chip ipu_irq_chip = {
- .name = "IPU",
- .irq_ack = ipu_ack_irq,
- .irq_mask = ipu_mask_irq,
- .irq_unmask = ipu_unmask_irq,
-};
-
int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
enum ipu_channel_irq irq_type)
{
- return ipu->irq_start + irq_type + channel->num;
+ int irq = irq_linear_revmap(ipu->domain, irq_type + channel->num);
+
+ if (!irq)
+ irq = irq_create_mapping(ipu->domain, irq_type + channel->num);
+
+ return irq;
}
EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
@@ -975,18 +936,48 @@ err_register:
return ret;
}
+
static int ipu_irq_init(struct ipu_soc *ipu)
{
- int i;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned long unused[IPU_NUM_IRQS / 32] = {
+ 0x400100d0, 0xffe000fd,
+ 0x400100d0, 0xffe000fd,
+ 0x400100d0, 0xffe000fd,
+ 0x4077ffff, 0xffe7e1fd,
+ 0x23fffffe, 0x8880fff0,
+ 0xf98fe7d0, 0xfff81fff,
+ 0x400100d0, 0xffe000fd,
+ 0x00000000,
+ };
+ int ret, i;
+
+ ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
+ &irq_generic_chip_ops, ipu);
+ if (!ipu->domain) {
+ dev_err(ipu->dev, "failed to add irq domain\n");
+ return -ENODEV;
+ }
- ipu->irq_start = irq_alloc_descs(-1, 0, IPU_NUM_IRQS, 0);
- if (ipu->irq_start < 0)
- return ipu->irq_start;
+ ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
+ handle_level_irq, 0, IRQF_VALID, 0);
+ if (ret < 0) {
+ dev_err(ipu->dev, "failed to alloc generic irq chips\n");
+ irq_domain_remove(ipu->domain);
+ return ret;
+ }
- for (i = ipu->irq_start; i < ipu->irq_start + IPU_NUM_IRQS; i++) {
- irq_set_chip_and_handler(i, &ipu_irq_chip, handle_level_irq);
- set_irq_flags(i, IRQF_VALID);
- irq_set_chip_data(i, ipu);
+ for (i = 0; i < IPU_NUM_IRQS; i += 32) {
+ gc = irq_get_domain_generic_chip(ipu->domain, i);
+ gc->reg_base = ipu->cm_reg;
+ gc->unused = unused[i / 32];
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.ack = IPU_INT_STAT(i / 32);
+ ct->regs.mask = IPU_INT_CTRL(i / 32);
}
irq_set_chained_handler(ipu->irq_sync, ipu_irq_handler);
@@ -999,20 +990,22 @@ static int ipu_irq_init(struct ipu_soc *ipu)
static void ipu_irq_exit(struct ipu_soc *ipu)
{
- int i;
+ int i, irq;
irq_set_chained_handler(ipu->irq_err, NULL);
irq_set_handler_data(ipu->irq_err, NULL);
irq_set_chained_handler(ipu->irq_sync, NULL);
irq_set_handler_data(ipu->irq_sync, NULL);
- for (i = ipu->irq_start; i < ipu->irq_start + IPU_NUM_IRQS; i++) {
- set_irq_flags(i, 0);
- irq_set_chip(i, NULL);
- irq_set_chip_data(i, NULL);
+ /* TODO: remove irq_domain_generic_chips */
+
+ for (i = 0; i < IPU_NUM_IRQS; i++) {
+ irq = irq_linear_revmap(ipu->domain, i);
+ if (irq)
+ irq_dispose_mapping(irq);
}
- irq_free_descs(ipu->irq_start, IPU_NUM_IRQS);
+ irq_domain_remove(ipu->domain);
}
static int ipu_probe(struct platform_device *pdev)
@@ -1082,21 +1075,23 @@ static int ipu_probe(struct platform_device *pdev)
ipu->cpmem_base = devm_ioremap(&pdev->dev,
ipu_base + devtype->cpmem_ofs, PAGE_SIZE);
- if (!ipu->cm_reg || !ipu->idmac_reg || !ipu->cpmem_base) {
- ret = -ENOMEM;
- goto failed_ioremap;
- }
+ if (!ipu->cm_reg || !ipu->idmac_reg || !ipu->cpmem_base)
+ return -ENOMEM;
ipu->clk = devm_clk_get(&pdev->dev, "bus");
if (IS_ERR(ipu->clk)) {
ret = PTR_ERR(ipu->clk);
dev_err(&pdev->dev, "clk_get failed with %d", ret);
- goto failed_clk_get;
+ return ret;
}
platform_set_drvdata(pdev, ipu);
- clk_prepare_enable(ipu->clk);
+ ret = clk_prepare_enable(ipu->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
ipu->dev = &pdev->dev;
ipu->irq_sync = irq_sync;
@@ -1141,8 +1136,6 @@ out_failed_reset:
ipu_irq_exit(ipu);
out_failed_irq:
clk_disable_unprepare(ipu->clk);
-failed_clk_get:
-failed_ioremap:
return ret;
}
@@ -1170,6 +1163,7 @@ static struct platform_driver imx_ipu_driver = {
module_platform_driver(imx_ipu_driver);
+MODULE_ALIAS("platform:imx-ipuv3");
MODULE_DESCRIPTION("i.MX IPU v3 driver");
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
index 59f03f9..21bf1c8 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
@@ -161,14 +161,15 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
u32 pixel_fmt, u32 width)
{
struct ipu_dc_priv *priv = dc->priv;
- u32 reg = 0, map;
+ u32 reg = 0;
+ int map;
dc->di = ipu_di_get_num(di);
map = ipu_pixfmt_to_map(pixel_fmt);
if (map < 0) {
dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
- return -EINVAL;
+ return map;
}
if (interlaced) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-di.c b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
index 19d777e..948a49b 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-di.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
@@ -603,7 +603,12 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
vsync_cnt = 3;
if (di->id == 1)
- vsync_cnt = 6;
+ /*
+ * TODO: change only for TVEv2, parallel display
+ * uses pin 2 / 3
+ */
+ if (!(sig->hsync_pin == 2 && sig->vsync_pin == 3))
+ vsync_cnt = 6;
if (sig->Hsync_pol) {
if (sig->hsync_pin == 2)
@@ -614,11 +619,11 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
di_gen |= DI_GEN_POLARITY_7;
}
if (sig->Vsync_pol) {
- if (sig->hsync_pin == 3)
+ if (sig->vsync_pin == 3)
di_gen |= DI_GEN_POLARITY_3;
- else if (sig->hsync_pin == 6)
+ else if (sig->vsync_pin == 6)
di_gen |= DI_GEN_POLARITY_6;
- else if (sig->hsync_pin == 8)
+ else if (sig->vsync_pin == 8)
di_gen |= DI_GEN_POLARITY_8;
}
}
@@ -649,7 +654,9 @@ EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel);
int ipu_di_enable(struct ipu_di *di)
{
- clk_prepare_enable(di->clk_di_pixel);
+ int ret = clk_prepare_enable(di->clk_di_pixel);
+ if (ret)
+ return ret;
ipu_module_enable(di->ipu, di->module);
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c b/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
index 91821bc..2e97c33 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c
@@ -61,7 +61,7 @@ struct dmfc_channel_data {
static const struct dmfc_channel_data dmfcdata[] = {
{
- .ipu_channel = 23,
+ .ipu_channel = IPUV3_CHANNEL_MEM_BG_SYNC,
.channel_reg = DMFC_DP_CHAN,
.shift = DMFC_DP_CHAN_5B_23,
.eot_shift = 20,
@@ -73,13 +73,13 @@ static const struct dmfc_channel_data dmfcdata[] = {
.eot_shift = 22,
.max_fifo_lines = 1,
}, {
- .ipu_channel = 27,
+ .ipu_channel = IPUV3_CHANNEL_MEM_FG_SYNC,
.channel_reg = DMFC_DP_CHAN,
.shift = DMFC_DP_CHAN_5F_27,
.eot_shift = 21,
.max_fifo_lines = 2,
}, {
- .ipu_channel = 28,
+ .ipu_channel = IPUV3_CHANNEL_MEM_DC_SYNC,
.channel_reg = DMFC_WR_CHAN,
.shift = DMFC_WR_CHAN_1_28,
.eot_shift = 16,
@@ -292,7 +292,7 @@ int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
{
struct ipu_dmfc_priv *priv = dmfc->priv;
int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second);
- int segment = 0, ret = 0;
+ int segment = -1, ret = 0;
dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n",
bandwidth_pixel_per_second / 1000000,
@@ -307,7 +307,17 @@ int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
goto out;
}
- segment = dmfc_find_slots(priv, slots);
+ /* Always allocate at least 128*4 bytes (2 slots) */
+ if (slots < 2)
+ slots = 2;
+
+ /* For the MEM_BG channel, first try to allocate twice the slots */
+ if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
+ segment = dmfc_find_slots(priv, slots * 2);
+ if (segment >= 0)
+ slots *= 2;
+ else
+ segment = dmfc_find_slots(priv, slots);
if (segment < 0) {
ret = -EBUSY;
goto out;
@@ -391,7 +401,7 @@ int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
* We have a total bandwidth of clkrate * 4pixel divided
* into 8 slots.
*/
- priv->bandwidth_per_slot = clk_get_rate(ipu_clk) / 8;
+ priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8;
dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n",
priv->bandwidth_per_slot / 1000000);
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c b/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
index 113b046..231afd6 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
@@ -46,6 +46,8 @@
#define DP_COM_CONF_CSC_DEF_BG (2 << 8)
#define DP_COM_CONF_CSC_DEF_BOTH (1 << 8)
+#define IPUV3_NUM_FLOWS 3
+
struct ipu_dp_priv;
struct ipu_dp {
@@ -67,7 +69,7 @@ struct ipu_dp_priv {
struct ipu_soc *ipu;
struct device *dev;
void __iomem *base;
- struct ipu_flow flow[3];
+ struct ipu_flow flow[IPUV3_NUM_FLOWS];
struct mutex mutex;
int use_count;
};
@@ -280,7 +282,7 @@ struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
struct ipu_dp_priv *priv = ipu->dp_priv;
struct ipu_dp *dp;
- if (flow > 5)
+ if ((flow >> 1) >= IPUV3_NUM_FLOWS)
return ERR_PTR(-EINVAL);
if (flow & 1)
@@ -309,19 +311,20 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
priv->dev = dev;
priv->ipu = ipu;
ipu->dp_priv = priv;
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
- if (!priv->base) {
+ if (!priv->base)
return -ENOMEM;
- }
mutex_init(&priv->mutex);
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
priv->flow[i].foreground.foreground = 1;
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
priv->flow[i].priv = priv;
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-prv.h b/drivers/staging/imx-drm/ipu-v3/ipu-prv.h
index 5518028..4df0050 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-prv.h
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-prv.h
@@ -110,7 +110,7 @@ struct ipu_soc;
#define IDMAC_BAND_EN(ch) IPU_IDMAC_REG(0x0040 + 4 * ((ch) / 32))
#define IDMAC_CHA_BUSY(ch) IPU_IDMAC_REG(0x0100 + 4 * ((ch) / 32))
-#define IPU_NUM_IRQS (32 * 5)
+#define IPU_NUM_IRQS (32 * 15)
enum ipu_modules {
IPU_CONF_CSI0_EN = (1 << 0),
@@ -170,9 +170,9 @@ struct ipu_soc {
struct ipuv3_channel channel[64];
- int irq_start;
int irq_sync;
int irq_err;
+ struct irq_domain *domain;
struct ipu_dc_priv *dc_priv;
struct ipu_dp_priv *dp_priv;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index ff5c633..6fd37a7 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -22,7 +22,6 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
#include <linux/clk.h>
@@ -42,9 +41,6 @@ struct ipu_framebuffer {
};
struct ipu_crtc {
- struct drm_fb_helper fb_helper;
- struct ipu_framebuffer ifb;
- int num_crtcs;
struct device *dev;
struct drm_crtc base;
struct imx_drm_crtc *imx_crtc;
@@ -54,7 +50,6 @@ struct ipu_crtc {
struct dmfc_channel *dmfc;
struct ipu_di *di;
int enabled;
- struct ipu_priv *ipu_priv;
struct drm_pending_vblank_event *page_flip_event;
struct drm_framebuffer *newfb;
int irq;
@@ -134,7 +129,8 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
static int ipu_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
int ret;
@@ -152,6 +148,7 @@ static int ipu_page_flip(struct drm_crtc *crtc,
ipu_crtc->newfb = fb;
ipu_crtc->page_flip_event = event;
+ crtc->fb = fb;
return 0;
}
@@ -334,7 +331,6 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
imx_drm_handle_vblank(ipu_crtc->imx_crtc);
if (ipu_crtc->newfb) {
- ipu_crtc->base.fb = ipu_crtc->newfb;
ipu_crtc->newfb = NULL;
ipu_drm_set_base(&ipu_crtc->base, 0, 0);
ipu_crtc_handle_pageflip(ipu_crtc);
@@ -364,17 +360,12 @@ static void ipu_crtc_commit(struct drm_crtc *crtc)
ipu_fb_enable(ipu_crtc);
}
-static void ipu_crtc_load_lut(struct drm_crtc *crtc)
-{
-}
-
static struct drm_crtc_helper_funcs ipu_helper_funcs = {
.dpms = ipu_crtc_dpms,
.mode_fixup = ipu_crtc_mode_fixup,
.mode_set = ipu_crtc_mode_set,
.prepare = ipu_crtc_prepare,
.commit = ipu_crtc_commit,
- .load_lut = ipu_crtc_load_lut,
};
static int ipu_enable_vblank(struct drm_crtc *crtc)
@@ -572,3 +563,4 @@ module_platform_driver(ipu_drm_driver);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-ipuv3-crtc");
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c
index e7fba62..24aa9be 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/staging/imx-drm/parallel-display.c
@@ -23,7 +23,6 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/videodev2.h>
-#include <linux/pinctrl/consumer.h>
#include "imx-drm.h"
@@ -206,20 +205,11 @@ static int imx_pd_probe(struct platform_device *pdev)
struct imx_parallel_display *imxpd;
int ret;
const char *fmt;
- struct pinctrl *pinctrl;
imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
if (!imxpd)
return -ENOMEM;
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- dev_warn(&pdev->dev, "pinctrl_get_select_default failed with %d",
- ret);
- return ret;
- }
-
edidp = of_get_property(np, "edid", &imxpd->edid_len);
if (edidp)
imxpd->edid = kmemdup(edidp, imxpd->edid_len, GFP_KERNEL);
@@ -265,6 +255,7 @@ static const struct of_device_id imx_pd_dt_ids[] = {
{ .compatible = "fsl,imx-parallel-display", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx_pd_dt_ids);
static struct platform_driver imx_pd_driver = {
.probe = imx_pd_probe,
@@ -281,3 +272,4 @@ module_platform_driver(imx_pd_driver);
MODULE_DESCRIPTION("i.MX parallel display driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-parallel-display");
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index 231611d..f5d41e0 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -19,13 +19,13 @@ int ENE_InitMedia(struct us_data *us)
int result;
BYTE MiscReg03 = 0;
- printk(KERN_INFO "--- Init Media ---\n");
- result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
+ dev_info(&us->pusb_dev->dev, "--- Init Media ---\n");
+ result = ene_read_byte(us, REG_CARD_STATUS, &MiscReg03);
if (result != USB_STOR_XFER_GOOD) {
- printk(KERN_ERR "Read register fail !!\n");
+ dev_err(&us->pusb_dev->dev, "Failed to read register\n");
return USB_STOR_TRANSPORT_ERROR;
}
- printk(KERN_INFO "MiscReg03 = %x\n", MiscReg03);
+ dev_info(&us->pusb_dev->dev, "MiscReg03 = %x\n", MiscReg03);
if (MiscReg03 & 0x02) {
if (!us->SM_Status.Ready && !us->MS_Status.Ready) {
@@ -39,9 +39,9 @@ int ENE_InitMedia(struct us_data *us)
}
/*
- * ENE_Read_BYTE() :
+ * ene_read_byte() :
*/
-int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf)
+int ene_read_byte(struct us_data *us, WORD index, void *buf)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
@@ -67,11 +67,13 @@ int ENE_SMInit(struct us_data *us)
int result;
BYTE buf[0x200];
- printk(KERN_INFO "transport --- ENE_SMInit\n");
+ dev_dbg(&us->pusb_dev->dev, "transport --- ENE_SMInit\n");
result = ENE_LoadBinCode(us, SM_INIT_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
- printk(KERN_INFO "Load SM Init Code Fail !!\n");
+ dev_info(&us->pusb_dev->dev,
+ "Failed to load SmartMedia init code\n: result= %x\n",
+ result);
return USB_STOR_TRANSPORT_ERROR;
}
@@ -84,26 +86,33 @@ int ENE_SMInit(struct us_data *us)
result = ENE_SendScsiCmd(us, FDIR_READ, &buf, 0);
if (result != USB_STOR_XFER_GOOD) {
- printk(KERN_ERR
- "Execution SM Init Code Fail !! result = %x\n", result);
+ dev_err(&us->pusb_dev->dev,
+ "Failed to load SmartMedia init code: result = %x\n",
+ result);
return USB_STOR_TRANSPORT_ERROR;
}
- us->SM_Status = *(PSM_STATUS)&buf[0];
+ us->SM_Status = *(struct keucr_sm_status *)&buf[0];
us->SM_DeviceID = buf[1];
us->SM_CardID = buf[2];
if (us->SM_Status.Insert && us->SM_Status.Ready) {
- printk(KERN_INFO "Insert = %x\n", us->SM_Status.Insert);
- printk(KERN_INFO "Ready = %x\n", us->SM_Status.Ready);
- printk(KERN_INFO "WtP = %x\n", us->SM_Status.WtP);
- printk(KERN_INFO "DeviceID = %x\n", us->SM_DeviceID);
- printk(KERN_INFO "CardID = %x\n", us->SM_CardID);
+ dev_info(&us->pusb_dev->dev, "Insert = %x\n",
+ us->SM_Status.Insert);
+ dev_info(&us->pusb_dev->dev, "Ready = %x\n",
+ us->SM_Status.Ready);
+ dev_info(&us->pusb_dev->dev, "WtP = %x\n",
+ us->SM_Status.WtP);
+ dev_info(&us->pusb_dev->dev, "DeviceID = %x\n",
+ us->SM_DeviceID);
+ dev_info(&us->pusb_dev->dev, "CardID = %x\n",
+ us->SM_CardID);
MediaChange = 1;
Check_D_MediaFmt(us);
} else {
- printk(KERN_ERR "SM Card Not Ready --- %x\n", buf[0]);
+ dev_err(&us->pusb_dev->dev,
+ "SmartMedia Card Not Ready --- %x\n", buf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
@@ -120,7 +129,7 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
/* void *buf; */
PBYTE buf;
- /* printk(KERN_INFO "transport --- ENE_LoadBinCode\n"); */
+ /* dev_info(&us->pusb_dev->dev, "transport --- ENE_LoadBinCode\n"); */
if (us->BIN_FLAG == flag)
return USB_STOR_TRANSPORT_GOOD;
@@ -130,11 +139,11 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
switch (flag) {
/* For SS */
case SM_INIT_PATTERN:
- printk(KERN_INFO "SM_INIT_PATTERN\n");
+ dev_dbg(&us->pusb_dev->dev, "SM_INIT_PATTERN\n");
memcpy(buf, SM_Init, 0x800);
break;
case SM_RW_PATTERN:
- printk(KERN_INFO "SM_RW_PATTERN\n");
+ dev_dbg(&us->pusb_dev->dev, "SM_RW_PATTERN\n");
memcpy(buf, SM_Rdwr, 0x800);
break;
}
@@ -165,12 +174,13 @@ int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg)
cswlen = 0, partial = 0;
unsigned int residue;
- /* printk(KERN_INFO "transport --- ENE_SendScsiCmd\n"); */
+ /* dev_dbg(&us->pusb_dev->dev, "transport --- ENE_SendScsiCmd\n"); */
/* send cmd to out endpoint */
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
bcb, US_BULK_CB_WRAP_LEN, NULL);
if (result != USB_STOR_XFER_GOOD) {
- printk(KERN_ERR "send cmd to out endpoint fail ---\n");
+ dev_err(&us->pusb_dev->dev,
+ "send cmd to out endpoint fail ---\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -189,7 +199,7 @@ int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg)
result = usb_stor_bulk_transfer_sg(us, pipe, buf,
transfer_length, 0, &partial);
if (result != USB_STOR_XFER_GOOD) {
- printk(KERN_ERR "data transfer fail ---\n");
+ dev_err(&us->pusb_dev->dev, "data transfer fail ---\n");
return USB_STOR_TRANSPORT_ERROR;
}
}
@@ -199,14 +209,16 @@ int ENE_SendScsiCmd(struct us_data *us, BYTE fDir, void *buf, int use_sg)
US_BULK_CS_WRAP_LEN, &cswlen);
if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
- printk(KERN_WARNING "Received 0-length CSW; retrying...\n");
+ dev_warn(&us->pusb_dev->dev,
+ "Received 0-length CSW; retrying...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, &cswlen);
}
if (result == USB_STOR_XFER_STALLED) {
/* get the status again */
- printk(KERN_WARNING "Attempting to get CSW (2nd try)...\n");
+ dev_warn(&us->pusb_dev->dev,
+ "Attempting to get CSW (2nd try)...\n");
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, NULL);
}
@@ -243,7 +255,7 @@ int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length)
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
int result;
- /* printk(KERN_INFO "transport --- ENE_Read_Data\n"); */
+ /* dev_dbg(&us->pusb_dev->dev, "transport --- ENE_Read_Data\n"); */
/* set up the command wrapper */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -318,55 +330,3 @@ int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length)
return USB_STOR_TRANSPORT_GOOD;
}
-/*
- * usb_stor_print_cmd():
- */
-void usb_stor_print_cmd(struct scsi_cmnd *srb)
-{
- PBYTE Cdb = srb->cmnd;
- DWORD cmd = Cdb[0];
- DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
- ((Cdb[3] << 16) & 0x00ff0000) |
- ((Cdb[4] << 8) & 0x0000ff00) |
- ((Cdb[5] << 0) & 0x000000ff);
- WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
-
- switch (cmd) {
- case TEST_UNIT_READY:
- /* printk(KERN_INFO
- "scsi cmd %X --- SCSIOP_TEST_UNIT_READY\n", cmd); */
- break;
- case INQUIRY:
- printk(KERN_INFO "scsi cmd %X --- SCSIOP_INQUIRY\n", cmd);
- break;
- case MODE_SENSE:
- printk(KERN_INFO "scsi cmd %X --- SCSIOP_MODE_SENSE\n", cmd);
- break;
- case START_STOP:
- printk(KERN_INFO "scsi cmd %X --- SCSIOP_START_STOP\n", cmd);
- break;
- case READ_CAPACITY:
- printk(KERN_INFO "scsi cmd %X --- SCSIOP_READ_CAPACITY\n", cmd);
- break;
- case READ_10:
- /* printk(KERN_INFO
- "scsi cmd %X --- SCSIOP_READ,bn = %X, blen = %X\n"
- ,cmd, bn, blen); */
- break;
- case WRITE_10:
- /* printk(KERN_INFO
- "scsi cmd %X --- SCSIOP_WRITE,
- bn = %X, blen = %X\n" , cmd, bn, blen); */
- break;
- case ALLOW_MEDIUM_REMOVAL:
- printk(KERN_INFO
- "scsi cmd %X --- SCSIOP_ALLOW_MEDIUM_REMOVAL\n", cmd);
- break;
- default:
- printk(KERN_INFO "scsi cmd %X --- Other cmd\n", cmd);
- break;
- }
- bn = 0;
- blen = 0;
-}
-
diff --git a/drivers/staging/keucr/scsiglue.c b/drivers/staging/keucr/scsiglue.c
index 48e1005..ac3d34d 100644
--- a/drivers/staging/keucr/scsiglue.c
+++ b/drivers/staging/keucr/scsiglue.c
@@ -73,7 +73,8 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_CAPACITY_HEURISTICS)
sdev->guess_capacity = 1;
if (sdev->scsi_level > SCSI_2)
- sdev->sdev_target->scsi_level = sdev->scsi_level = SCSI_2;
+ sdev->sdev_target->scsi_level = sdev->scsi_level
+ = SCSI_2;
sdev->retry_hwerror = 1;
sdev->allow_restart = 1;
sdev->last_sector_bug = 1;
@@ -144,7 +145,7 @@ static int command_abort(struct scsi_cmnd *srb)
scsi_lock(us_to_host(us));
if (us->srb != srb) {
scsi_unlock(us_to_host(us));
- printk("-- nothing to abort\n");
+ dev_info(&us->pusb_dev->dev, "-- nothing to abort\n");
return FAILED;
}
@@ -288,10 +289,7 @@ US_DO_ALL_FLAGS
***********************************************************************/
/* Output routine for the sysfs max_sectors file */
-/*
- * show_max_sectors()
- */
-static ssize_t show_max_sectors(struct device *dev,
+static ssize_t max_sectors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -301,10 +299,7 @@ static ssize_t show_max_sectors(struct device *dev,
}
/* Input routine for the sysfs max_sectors file */
-/*
- * store_max_sectors()
- */
-static ssize_t store_max_sectors(struct device *dev,
+static ssize_t max_sectors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -318,9 +313,11 @@ static ssize_t store_max_sectors(struct device *dev,
}
return -EINVAL;
}
+static DEVICE_ATTR_RW(max_sectors);
-static DEVICE_ATTR(max_sectors, S_IRUGO | S_IWUSR, show_max_sectors, store_max_sectors);
-static struct device_attribute *sysfs_device_attr_list[] = {&dev_attr_max_sectors, NULL, };
+static struct device_attribute *sysfs_device_attr_list[] = {
+ &dev_attr_max_sectors, NULL,
+};
/* this defines our host template, with which we'll allocate hosts */
@@ -393,8 +390,9 @@ unsigned char usb_stor_sense_invalidCDB[18] = {
/*
* usb_stor_access_xfer_buf()
*/
-unsigned int usb_stor_access_xfer_buf(struct us_data *us, unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr,
+unsigned int usb_stor_access_xfer_buf(struct us_data *us,
+ unsigned char *buffer, unsigned int buflen,
+ struct scsi_cmnd *srb, struct scatterlist **sgptr,
unsigned int *offset, enum xfer_buf_dir dir)
{
unsigned int cnt;
@@ -424,7 +422,7 @@ unsigned int usb_stor_access_xfer_buf(struct us_data *us, unsigned char *buffer,
while (sglen > 0) {
unsigned int plen = min(sglen,
- (unsigned int)PAGE_SIZE - poff);
+ (unsigned int)PAGE_SIZE - poff);
unsigned char *ptr = kmap(page);
if (dir == TO_XFER_BUF)
diff --git a/drivers/staging/keucr/smil.h b/drivers/staging/keucr/smil.h
index 24a636a..1538d7b 100644
--- a/drivers/staging/keucr/smil.h
+++ b/drivers/staging/keucr/smil.h
@@ -168,7 +168,7 @@ SmartMedia Model & Attribute
/***************************************************************************
Struct Definition
***************************************************************************/
-struct SSFDCTYPE {
+struct keucr_media_info {
BYTE Model;
BYTE Attribute;
BYTE MaxZones;
@@ -177,30 +177,14 @@ struct SSFDCTYPE {
WORD MaxLogBlocks;
};
-typedef struct SSFDCTYPE_T {
- BYTE Model;
- BYTE Attribute;
- BYTE MaxZones;
- BYTE MaxSectors;
- WORD MaxBlocks;
- WORD MaxLogBlocks;
-} *SSFDCTYPE_T;
-
-struct ADDRESS {
+struct keucr_media_address {
BYTE Zone; /* Zone Number */
BYTE Sector; /* Sector(512byte) Number on Block */
WORD PhyBlock; /* Physical Block Number on Zone */
WORD LogBlock; /* Logical Block Number of Zone */
};
-typedef struct ADDRESS_T {
- BYTE Zone; /* Zone Number */
- BYTE Sector; /* Sector(512byte) Number on Block */
- WORD PhyBlock; /* Physical Block Number on Zone */
- WORD LogBlock; /* Logical Block Number of Zone */
-} *ADDRESS_T;
-
-struct CIS_AREA {
+struct keucr_media_area {
BYTE Sector; /* Sector(512byte) Number on Block */
WORD PhyBlock; /* Physical Block Number on Zone 0 */
};
@@ -215,9 +199,9 @@ extern WORD ReadBlock;
extern WORD WriteBlock;
extern DWORD MediaChange;
-extern struct SSFDCTYPE Ssfdc;
-extern struct ADDRESS Media;
-extern struct CIS_AREA CisArea;
+extern struct keucr_media_info Ssfdc;
+extern struct keucr_media_address Media;
+extern struct keucr_media_area CisArea;
/*
* SMILMain.c
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index cc49038..2786808 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -4,204 +4,135 @@
#include "smcommon.h"
#include "smil.h"
-int Check_D_LogCHS (WORD *,BYTE *,BYTE *);
-void Initialize_D_Media (void);
-void PowerOff_D_Media (void);
-int Check_D_MediaPower (void);
-int Check_D_MediaExist (void);
-int Check_D_MediaWP (void);
-int Check_D_MediaFmt (struct us_data *);
-int Check_D_MediaFmtForEraseAll (struct us_data *);
-int Conv_D_MediaAddr (struct us_data *, DWORD);
-int Inc_D_MediaAddr (struct us_data *);
-int Check_D_FirstSect (void);
-int Check_D_LastSect (void);
-int Media_D_ReadOneSect (struct us_data *, WORD, BYTE *);
-int Media_D_WriteOneSect (struct us_data *, WORD, BYTE *);
-int Media_D_CopyBlockHead (struct us_data *);
-int Media_D_CopyBlockTail (struct us_data *);
-int Media_D_EraseOneBlock (void);
-int Media_D_EraseAllBlock (void);
-
-int Copy_D_BlockAll (struct us_data *, DWORD);
-int Copy_D_BlockHead (struct us_data *);
-int Copy_D_BlockTail (struct us_data *);
-int Reassign_D_BlockHead (struct us_data *);
-
-int Assign_D_WriteBlock (void);
-int Release_D_ReadBlock (struct us_data *);
-int Release_D_WriteBlock (struct us_data *);
-int Release_D_CopySector (struct us_data *);
-
-int Copy_D_PhyOneSect (struct us_data *);
-int Read_D_PhyOneSect (struct us_data *, WORD, BYTE *);
-int Write_D_PhyOneSect (struct us_data *, WORD, BYTE *);
-int Erase_D_PhyOneBlock (struct us_data *);
-
-int Set_D_PhyFmtValue (struct us_data *);
-int Search_D_CIS (struct us_data *);
-int Make_D_LogTable (struct us_data *);
-void Check_D_BlockIsFull (void);
-
-int MarkFail_D_PhyOneBlock (struct us_data *);
+int Check_D_LogCHS(WORD *, BYTE *, BYTE *);
+void Initialize_D_Media(void);
+void PowerOff_D_Media(void);
+int Check_D_MediaPower(void);
+int Check_D_MediaExist(void);
+int Check_D_MediaWP(void);
+int Check_D_MediaFmt(struct us_data *);
+int Check_D_MediaFmtForEraseAll(struct us_data *);
+int Conv_D_MediaAddr(struct us_data *, DWORD);
+int Inc_D_MediaAddr(struct us_data *);
+int Check_D_FirstSect(void);
+int Check_D_LastSect(void);
+int Media_D_ReadOneSect(struct us_data *, WORD, BYTE *);
+int Media_D_WriteOneSect(struct us_data *, WORD, BYTE *);
+int Media_D_CopyBlockHead(struct us_data *);
+int Media_D_CopyBlockTail(struct us_data *);
+int Media_D_EraseOneBlock(void);
+int Media_D_EraseAllBlock(void);
+
+int Copy_D_BlockAll(struct us_data *, DWORD);
+int Copy_D_BlockHead(struct us_data *);
+int Copy_D_BlockTail(struct us_data *);
+int Reassign_D_BlockHead(struct us_data *);
+
+int Assign_D_WriteBlock(void);
+int Release_D_ReadBlock(struct us_data *);
+int Release_D_WriteBlock(struct us_data *);
+int Release_D_CopySector(struct us_data *);
+
+int Copy_D_PhyOneSect(struct us_data *);
+int Read_D_PhyOneSect(struct us_data *, WORD, BYTE *);
+int Write_D_PhyOneSect(struct us_data *, WORD, BYTE *);
+int Erase_D_PhyOneBlock(struct us_data *);
+
+int Set_D_PhyFmtValue(struct us_data *);
+int Search_D_CIS(struct us_data *);
+int Make_D_LogTable(struct us_data *);
+void Check_D_BlockIsFull(void);
+
+int MarkFail_D_PhyOneBlock(struct us_data *);
DWORD ErrXDCode;
DWORD ErrCode;
-//BYTE SectBuf[SECTSIZE];
static BYTE WorkBuf[SECTSIZE];
static BYTE Redundant[REDTSIZE];
static BYTE WorkRedund[REDTSIZE];
-//WORD Log2Phy[MAX_ZONENUM][MAX_LOGBLOCK];
-static WORD *Log2Phy[MAX_ZONENUM]; // 128 x 1000, Log2Phy[MAX_ZONENUM][MAX_LOGBLOCK];
-static BYTE Assign[MAX_ZONENUM][MAX_BLOCKNUM/8];
+/* 128 x 1000, Log2Phy[MAX_ZONENUM][MAX_LOGBLOCK]; */
+static WORD *Log2Phy[MAX_ZONENUM];
+static BYTE Assign[MAX_ZONENUM][MAX_BLOCKNUM / 8];
static WORD AssignStart[MAX_ZONENUM];
WORD ReadBlock;
WORD WriteBlock;
DWORD MediaChange;
static DWORD SectCopyMode;
-//BIT Control Macro
-static BYTE BitData[] = { 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80 } ;
-#define Set_D_Bit(a,b) (a[(BYTE)((b)/8)]|= BitData[(b)%8])
-#define Clr_D_Bit(a,b) (a[(BYTE)((b)/8)]&=~BitData[(b)%8])
-#define Chk_D_Bit(a,b) (a[(BYTE)((b)/8)] & BitData[(b)%8])
+/* BIT Control Macro */
+static BYTE BitData[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
+#define Set_D_Bit(a, b) (a[(BYTE)((b) / 8)] |= BitData[(b) % 8])
+#define Clr_D_Bit(a, b) (a[(BYTE)((b) / 8)] &= ~BitData[(b) % 8])
+#define Chk_D_Bit(a, b) (a[(BYTE)((b) / 8)] & BitData[(b) % 8])
-//extern PBYTE SMHostAddr;
BYTE IsSSFDCCompliance;
BYTE IsXDCompliance;
-//
-////Power Control & Media Exist Check Function
-////----- Init_D_SmartMedia() --------------------------------------------
-//int Init_D_SmartMedia(void)
-//{
-// int i;
-//
-// EMCR_Print("Init_D_SmartMedia start\n");
-// for (i=0; i<MAX_ZONENUM; i++)
-// {
-// if (Log2Phy[i]!=NULL)
-// {
-// EMCR_Print("ExFreePool Zone = %x, Addr = %x\n", i, Log2Phy[i]);
-// ExFreePool(Log2Phy[i]);
-// Log2Phy[i] = NULL;
-// }
-// }
-//
-// Initialize_D_Media();
-// return(NO_ERROR);
-//}
-
-//----- SM_FreeMem() -------------------------------------------------
+/* ----- SM_FreeMem() ------------------------------------------------- */
int SM_FreeMem(void)
{
int i;
pr_info("SM_FreeMem start\n");
- for (i=0; i<MAX_ZONENUM; i++)
- {
- if (Log2Phy[i]!=NULL)
- {
+ for (i = 0; i < MAX_ZONENUM; i++) {
+ if (Log2Phy[i] != NULL) {
pr_info("Free Zone = %x, Addr = %p\n", i, Log2Phy[i]);
kfree(Log2Phy[i]);
Log2Phy[i] = NULL;
}
}
- return(NO_ERROR);
+ return NO_ERROR;
}
-////----- Pwoff_D_SmartMedia() -------------------------------------------
-//int Pwoff_D_SmartMedia(void)
-//{
-// PowerOff_D_Media();
-// return(NO_ERROR);
-//}
-//
-////----- Check_D_SmartMedia() -------------------------------------------
-//int Check_D_SmartMedia(void)
-//{
-// if (Check_D_MediaExist())
-// return(ErrCode);
-//
-// return(NO_ERROR);
-//}
-//
-////----- Check_D_Parameter() --------------------------------------------
-//int Check_D_Parameter(PFDO_DEVICE_EXTENSION fdoExt,WORD *pcyl,BYTE *phead,BYTE *psect)
-//{
-// if (Check_D_MediaPower())
-// return(ErrCode);
-//
-// if (Check_D_MediaFmt(fdoExt))
-// return(ErrCode);
-//
-// if (Check_D_LogCHS(pcyl,phead,psect))
-// return(ErrCode);
-//
-// return(NO_ERROR);
-//}
-
-//SmartMedia Read/Write/Erase Function
-//----- Media_D_ReadSector() -------------------------------------------
-int Media_D_ReadSector(struct us_data *us, DWORD start,WORD count,BYTE *buf)
+/* SmartMedia Read/Write/Erase Function */
+/* ----- Media_D_ReadSector() ------------------------------------------- */
+int Media_D_ReadSector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
{
WORD len, bn;
- //if (Check_D_MediaPower()) ; ¦b 6250 don't care
- // return(ErrCode);
- //if (Check_D_MediaFmt(fdoExt)) ;
- // return(ErrCode);
if (Conv_D_MediaAddr(us, start))
- return(ErrCode);
+ return ErrCode;
- while(1)
- {
+ while (1) {
len = Ssfdc.MaxSectors - Media.Sector;
if (count > len)
bn = len;
else
bn = count;
- //if (Media_D_ReadOneSect(fdoExt, SectBuf))
- //if (Media_D_ReadOneSect(fdoExt, count, buf))
- if (Media_D_ReadOneSect(us, bn, buf))
- {
+
+ if (Media_D_ReadOneSect(us, bn, buf)) {
ErrCode = ERR_EccReadErr;
- return(ErrCode);
+ return ErrCode;
}
Media.Sector += bn;
count -= bn;
- if (count<=0)
+ if (count <= 0)
break;
buf += bn * SECTSIZE;
if (Inc_D_MediaAddr(us))
- return(ErrCode);
+ return ErrCode;
}
- return(NO_ERROR);
+ return NO_ERROR;
}
-// here
-//----- Media_D_CopySector() ------------------------------------------
-int Media_D_CopySector(struct us_data *us, DWORD start,WORD count,BYTE *buf)
+/* here */
+/* ----- Media_D_CopySector() ------------------------------------------ */
+int Media_D_CopySector(struct us_data *us, DWORD start, WORD count, BYTE *buf)
{
- //DWORD mode;
- //int i;
WORD len, bn;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
/* pr_info("Media_D_CopySector !!!\n"); */
if (Conv_D_MediaAddr(us, start))
- return(ErrCode);
+ return ErrCode;
- while(1)
- {
+ while (1) {
if (Assign_D_WriteBlock())
- return(ERROR);
+ return ERROR;
len = Ssfdc.MaxSectors - Media.Sector;
if (count > len)
@@ -209,607 +140,137 @@ int Media_D_CopySector(struct us_data *us, DWORD start,WORD count,BYTE *buf)
else
bn = count;
- //if (Ssfdc_D_CopyBlock(fdoExt,count,buf,Redundant))
- if (Ssfdc_D_CopyBlock(us,bn,buf,Redundant))
- {
+ if (Ssfdc_D_CopyBlock(us, bn, buf, Redundant)) {
ErrCode = ERR_WriteFault;
- return(ErrCode);
+ return ErrCode;
}
Media.Sector = 0x1F;
- //if (Release_D_ReadBlock(fdoExt))
- if (Release_D_CopySector(us))
- {
- if (ErrCode==ERR_HwError)
- {
+ if (Release_D_CopySector(us)) {
+ if (ErrCode == ERR_HwError) {
ErrCode = ERR_WriteFault;
- return(ErrCode);
+ return ErrCode;
}
}
count -= bn;
- if (count<=0)
+ if (count <= 0)
break;
buf += bn * SECTSIZE;
if (Inc_D_MediaAddr(us))
- return(ErrCode);
+ return ErrCode;
}
- return(NO_ERROR);
+ return NO_ERROR;
}
-//----- Release_D_CopySector() ------------------------------------------
+/* ----- Release_D_CopySector() ------------------------------------------ */
int Release_D_CopySector(struct us_data *us)
{
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
-
- Log2Phy[Media.Zone][Media.LogBlock]=WriteBlock;
- Media.PhyBlock=ReadBlock;
+ Log2Phy[Media.Zone][Media.LogBlock] = WriteBlock;
+ Media.PhyBlock = ReadBlock;
- if (Media.PhyBlock==NO_ASSIGN)
- {
- Media.PhyBlock=WriteBlock;
- return(SMSUCCESS);
+ if (Media.PhyBlock == NO_ASSIGN) {
+ Media.PhyBlock = WriteBlock;
+ return SMSUCCESS;
}
- Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock);
- Media.PhyBlock=WriteBlock;
+ Clr_D_Bit(Assign[Media.Zone], Media.PhyBlock);
+ Media.PhyBlock = WriteBlock;
- return(SMSUCCESS);
-}
-/*
-//----- Media_D_WriteSector() ------------------------------------------
-int Media_D_WriteSector(PFDO_DEVICE_EXTENSION fdoExt, DWORD start,WORD count,BYTE *buf)
-{
- int i;
- WORD len, bn;
- SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- ADDRESS_T bb = (ADDRESS_T) &Media;
-
- //if (Check_D_MediaPower())
- // return(ErrCode);
- //
- //if (Check_D_MediaFmt(fdoExt))
- // return(ErrCode);
- //
- //if (Check_D_MediaWP())
- // return(ErrCode);
-
- if (Conv_D_MediaAddr(fdoExt, start))
- return(ErrCode);
-
- //ENE_Print("Media_D_WriteSector --- Sector = %x\n", Media.Sector);
- if (Check_D_FirstSect())
- {
- if (Media_D_CopyBlockHead(fdoExt))
- {
- ErrCode = ERR_WriteFault;
- return(ErrCode);
- }
- }
-
- while(1)
- {
- if (!Check_D_FirstSect())
- {
- if (Assign_D_WriteBlock())
- return(ErrCode);
- }
-
- len = Ssfdc.MaxSectors - Media.Sector;
- if (count > len)
- bn = len;
- else
- bn = count;
- //for(i=0;i<SECTSIZE;i++)
- // SectBuf[i]=*buf++;
-
- //if (Media_D_WriteOneSect(fdoExt, SectBuf))
- if (Media_D_WriteOneSect(fdoExt, bn, buf))
- {
- ErrCode = ERR_WriteFault;
- return(ErrCode);
- }
-
- Media.Sector += bn - 1;
-
- if (!Check_D_LastSect())
- {
- if (Release_D_ReadBlock(fdoExt))
-
- { if (ErrCode==ERR_HwError)
- {
- ErrCode = ERR_WriteFault;
- return(ErrCode);
- }
- }
- }
-
- count -= bn;
-
- if (count<=0)
- break;
-
- buf += bn * SECTSIZE;
-
- //if (--count<=0)
- // break;
-
- if (Inc_D_MediaAddr(fdoExt))
- return(ErrCode);
- }
-
- if (!Check_D_LastSect())
- return(NO_ERROR);
-
- if (Inc_D_MediaAddr(fdoExt))
- return(ErrCode);
-
- if (Media_D_CopyBlockTail(fdoExt))
- {
- ErrCode = ERR_WriteFault;
- return(ErrCode);
- }
-
- return(NO_ERROR);
+ return SMSUCCESS;
}
-//
-////----- Media_D_EraseBlock() -------------------------------------------
-//int Media_D_EraseBlock(PFDO_DEVICE_EXTENSION fdoExt, DWORD start,WORD count)
-//{
-// if (Check_D_MediaPower())
-// return(ErrCode);
-//
-// if (Check_D_MediaFmt(fdoExt))
-// return(ErrCode);
-//
-// if (Check_D_MediaWP())
-// return(ErrCode);
-//
-// if (Conv_D_MediaAddr(start))
-// return(ErrCode);
-//
-// while(Check_D_FirstSect()) {
-// if (Inc_D_MediaAddr(fdoExt))
-// return(ErrCode);
-//
-// if (--count<=0)
-// return(NO_ERROR);
-// }
-//
-// while(1) {
-// if (!Check_D_LastSect())
-// if (Media_D_EraseOneBlock())
-// if (ErrCode==ERR_HwError)
-// {
-// ErrCode = ERR_WriteFault;
-// return(ErrCode);
-// }
-//
-// if (Inc_D_MediaAddr(fdoExt))
-// return(ErrCode);
-//
-// if (--count<=0)
-// return(NO_ERROR);
-// }
-//}
-//
-////----- Media_D_EraseAll() ---------------------------------------------
-//int Media_D_EraseAll(PFDO_DEVICE_EXTENSION fdoExt)
-//{
-// if (Check_D_MediaPower())
-// return(ErrCode);
-//
-// if (Check_D_MediaFmtForEraseAll(fdoExt))
-// return(ErrCode);
-//
-// if (Check_D_MediaWP())
-// return(ErrCode);
-//
-// if (Media_D_EraseAllBlock())
-// return(ErrCode);
-//
-// return(NO_ERROR);
-//}
-
-//SmartMedia Write Function for One Sector Write Mode
-//----- Media_D_OneSectWriteStart() ------------------------------------
-int Media_D_OneSectWriteStart(PFDO_DEVICE_EXTENSION fdoExt,DWORD start,BYTE *buf)
-{
-// int i;
-// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
-// ADDRESS_T bb = (ADDRESS_T) &Media;
-//
-// //if (Check_D_MediaPower())
-// // return(ErrCode);
-// //if (Check_D_MediaFmt(fdoExt))
-// // return(ErrCode);
-// //if (Check_D_MediaWP())
-// // return(ErrCode);
-// if (Conv_D_MediaAddr(fdoExt, start))
-// return(ErrCode);
-//
-// if (Check_D_FirstSect())
-// if (Media_D_CopyBlockHead(fdoExt))
-// {
-// ErrCode = ERR_WriteFault;
-// return(ErrCode);
-// }
-//
-// if (!Check_D_FirstSect())
-// if (Assign_D_WriteBlock())
-// return(ErrCode);
-//
-// //for(i=0;i<SECTSIZE;i++)
-// // SectBuf[i]=*buf++;
-//
-// //if (Media_D_WriteOneSect(fdoExt, SectBuf))
-// if (Media_D_WriteOneSect(fdoExt, buf))
-// {
-// ErrCode = ERR_WriteFault;
-// return(ErrCode);
-// }
-//
-// if (!Check_D_LastSect())
-// {
-// if (Release_D_ReadBlock(fdoExt))
-// if (ErrCode==ERR_HwError)
-// {
-// ErrCode = ERR_WriteFault;
-// return(ErrCode);
-// }
-// }
-
- return(NO_ERROR);
-}
-
-//----- Media_D_OneSectWriteNext() -------------------------------------
-int Media_D_OneSectWriteNext(PFDO_DEVICE_EXTENSION fdoExt, BYTE *buf)
-{
-// int i;
-// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
-// ADDRESS_T bb = (ADDRESS_T) &Media;
-//
-// if (Inc_D_MediaAddr(fdoExt))
-// return(ErrCode);
-//
-// if (!Check_D_FirstSect())
-// if (Assign_D_WriteBlock())
-// return(ErrCode);
-//
-// //for(i=0;i<SECTSIZE;i++)
-// // SectBuf[i]=*buf++;
-//
-// //if (Media_D_WriteOneSect(fdoExt, SectBuf))
-// if (Media_D_WriteOneSect(fdoExt, buf))
-// {
-// ErrCode = ERR_WriteFault;
-// return(ErrCode);
-// }
-//
-// if (!Check_D_LastSect())
-// {
-// if (Release_D_ReadBlock(fdoExt))
-// if (ErrCode==ERR_HwError)
-// {
-// ErrCode = ERR_WriteFault;
-// return(ErrCode);
-// }
-// }
-
- return(NO_ERROR);
-}
-
-//----- Media_D_OneSectWriteFlush() ------------------------------------
-int Media_D_OneSectWriteFlush(PFDO_DEVICE_EXTENSION fdoExt)
-{
- if (!Check_D_LastSect())
- return(NO_ERROR);
-
- if (Inc_D_MediaAddr(fdoExt))
- return(ErrCode);
-
- if (Media_D_CopyBlockTail(fdoExt))
- {
- ErrCode = ERR_WriteFault;
- return(ErrCode);
- }
- return(NO_ERROR);
-}
-//
-////LED Tern On/Off Subroutine
-////----- SM_EnableLED() -----------------------------------------------
-//void SM_EnableLED(PFDO_DEVICE_EXTENSION fdoExt, BOOLEAN enable)
-//{
-// if (fdoExt->Drive_IsSWLED)
-// {
-// if (enable)
-// Led_D_TernOn();
-// else
-// Led_D_TernOff();
-// }
-//}
-//
-////----- Led_D_TernOn() -------------------------------------------------
-//void Led_D_TernOn(void)
-//{
-// if (Check_D_CardStsChg())
-// MediaChange=ERROR;
-//
-// Cnt_D_LedOn();
-//}
-//
-////----- Led_D_TernOff() ------------------------------------------------
-//void Led_D_TernOff(void)
-//{
-// if (Check_D_CardStsChg())
-// MediaChange=ERROR;
-//
-// Cnt_D_LedOff();
-//}
-//
-////SmartMedia Logical Format Subroutine
-////----- Check_D_LogCHS() -----------------------------------------------
-//int Check_D_LogCHS(WORD *c,BYTE *h,BYTE *s)
-//{
-// switch(Ssfdc.Model) {
-// case SSFDC1MB: *c=125; *h= 4; *s= 4; break;
-// case SSFDC2MB: *c=125; *h= 4; *s= 8; break;
-// case SSFDC4MB: *c=250; *h= 4; *s= 8; break;
-// case SSFDC8MB: *c=250; *h= 4; *s=16; break;
-// case SSFDC16MB: *c=500; *h= 4; *s=16; break;
-// case SSFDC32MB: *c=500; *h= 8; *s=16; break;
-// case SSFDC64MB: *c=500; *h= 8; *s=32; break;
-// case SSFDC128MB: *c=500; *h=16; *s=32; break;
-// default: *c= 0; *h= 0; *s= 0; ErrCode = ERR_NoSmartMedia; return(ERROR);
-// }
-//
-// return(SMSUCCESS);
-//}
-//
-////Power Control & Media Exist Check Subroutine
-////----- Initialize_D_Media() -------------------------------------------
-//void Initialize_D_Media(void)
-//{
-// ErrCode = NO_ERROR;
-// MediaChange = ERROR;
-// SectCopyMode = COMPLETED;
-// Cnt_D_Reset();
-//}
-//
-////----- PowerOff_D_Media() ---------------------------------------------
-//void PowerOff_D_Media(void)
-//{
-// Cnt_D_PowerOff();
-//}
-//
-////----- Check_D_MediaPower() -------------------------------------------
-//int Check_D_MediaPower(void)
-//{
-// //usleep(56*1024);
-// if (Check_D_CardStsChg())
-// MediaChange = ERROR;
-// //usleep(56*1024);
-// if ((!Check_D_CntPower())&&(!MediaChange)) // ¦³ power & Media ¨S³Q change, «h return success
-// return(SMSUCCESS);
-// //usleep(56*1024);
-//
-// if (Check_D_CardExist()) // Check if card is not exist, return err
-// {
-// ErrCode = ERR_NoSmartMedia;
-// MediaChange = ERROR;
-// return(ERROR);
-// }
-// //usleep(56*1024);
-// if (Cnt_D_PowerOn())
-// {
-// ErrCode = ERR_NoSmartMedia;
-// MediaChange = ERROR;
-// return(ERROR);
-// }
-// //usleep(56*1024);
-// Ssfdc_D_Reset(fdoExt);
-// //usleep(56*1024);
-// return(SMSUCCESS);
-//}
-//
-////-----Check_D_MediaExist() --------------------------------------------
-//int Check_D_MediaExist(void)
-//{
-// if (Check_D_CardStsChg())
-// MediaChange = ERROR;
-//
-// if (!Check_D_CardExist())
-// {
-// if (!MediaChange)
-// return(SMSUCCESS);
-//
-// ErrCode = ERR_ChangedMedia;
-// return(ERROR);
-// }
-//
-// ErrCode = ERR_NoSmartMedia;
-//
-// return(ERROR);
-//}
-//
-////----- Check_D_MediaWP() ----------------------------------------------
-//int Check_D_MediaWP(void)
-//{
-// if (Ssfdc.Attribute &MWP)
-// {
-// ErrCode = ERR_WrtProtect;
-// return(ERROR);
-// }
-//
-// return(SMSUCCESS);
-//}
-*/
-//SmartMedia Physical Format Test Subroutine
-//----- Check_D_MediaFmt() ---------------------------------------------
+/* SmartMedia Physical Format Test Subroutine */
+/* ----- Check_D_MediaFmt() --------------------------------------------- */
int Check_D_MediaFmt(struct us_data *us)
{
pr_info("Check_D_MediaFmt\n");
- //ULONG i,j, result=FALSE, zone,block;
- //usleep(56*1024);
if (!MediaChange)
- return(SMSUCCESS);
+ return SMSUCCESS;
MediaChange = ERROR;
SectCopyMode = COMPLETED;
- //usleep(56*1024);
- if (Set_D_PhyFmtValue(us))
- {
+ if (Set_D_PhyFmtValue(us)) {
ErrCode = ERR_UnknownMedia;
- return(ERROR);
+ return ERROR;
}
-
- //usleep(56*1024);
- if (Search_D_CIS(us))
- {
+
+ if (Search_D_CIS(us)) {
ErrCode = ERR_IllegalFmt;
- return(ERROR);
+ return ERROR;
}
-
- MediaChange = SMSUCCESS;
- return(SMSUCCESS);
+ MediaChange = SMSUCCESS;
+ return SMSUCCESS;
}
-/*
-////----- Check_D_BlockIsFull() ----------------------------------
-//void Check_D_BlockIsFull()
-//{
-// ULONG i, block;
-//
-// if (IsXDCompliance || IsSSFDCCompliance)
-// {
-// // If the blocks are full then return write-protect.
-// block = Ssfdc.MaxBlocks/8;
-// for (Media.Zone=0; Media.Zone<Ssfdc.MaxZones; Media.Zone++)
-// {
-// if (Log2Phy[Media.Zone]==NULL)
-// {
-// if (Make_D_LogTable())
-// {
-// ErrCode = ERR_IllegalFmt;
-// return;
-// }
-// }
-//
-// for (i=0; i<block; i++)
-// {
-// if (Assign[Media.Zone][i] != 0xFF)
-// return;
-// }
-// }
-// Ssfdc.Attribute |= WP;
-// }
-//}
-//
-//
-////----- Check_D_MediaFmtForEraseAll() ----------------------------------
-//int Check_D_MediaFmtForEraseAll(PFDO_DEVICE_EXTENSION fdoExt)
-//{
-// MediaChange = ERROR;
-// SectCopyMode = COMPLETED;
-//
-// if (Set_D_PhyFmtValue(fdoExt))
-// {
-// ErrCode = ERR_UnknownMedia;
-// return(ERROR);
-// }
-//
-// if (Search_D_CIS(fdoExt))
-// {
-// ErrCode = ERR_IllegalFmt;
-// return(ERROR);
-// }
-//
-// return(SMSUCCESS);
-//}
-*/
-//SmartMedia Physical Address Control Subroutine
-//----- Conv_D_MediaAddr() ---------------------------------------------
+
+/* SmartMedia Physical Address Control Subroutine */
+/* ----- Conv_D_MediaAddr() --------------------------------------------- */
int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
{
DWORD temp;
- //ULONG zz;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
- temp = addr/Ssfdc.MaxSectors;
- Media.Zone = (BYTE) (temp/Ssfdc.MaxLogBlocks);
+ temp = addr / Ssfdc.MaxSectors;
+ Media.Zone = (BYTE) (temp / Ssfdc.MaxLogBlocks);
- if (Log2Phy[Media.Zone]==NULL)
- {
- if (Make_D_LogTable(us))
- {
+ if (Log2Phy[Media.Zone] == NULL) {
+ if (Make_D_LogTable(us)) {
ErrCode = ERR_IllegalFmt;
- return(ERROR);
+ return ERROR;
}
}
- Media.Sector = (BYTE) (addr%Ssfdc.MaxSectors);
- Media.LogBlock = (WORD) (temp%Ssfdc.MaxLogBlocks);
+ Media.Sector = (BYTE) (addr % Ssfdc.MaxSectors);
+ Media.LogBlock = (WORD) (temp % Ssfdc.MaxLogBlocks);
- if (Media.Zone<Ssfdc.MaxZones)
- {
+ if (Media.Zone < Ssfdc.MaxZones) {
Clr_D_RedundantData(Redundant);
Set_D_LogBlockAddr(Redundant);
Media.PhyBlock = Log2Phy[Media.Zone][Media.LogBlock];
- return(SMSUCCESS);
+ return SMSUCCESS;
}
ErrCode = ERR_OutOfLBA;
- return(ERROR);
+ return ERROR;
}
-//----- Inc_D_MediaAddr() ----------------------------------------------
+/* ----- Inc_D_MediaAddr() ---------------------------------------------- */
int Inc_D_MediaAddr(struct us_data *us)
{
WORD LogBlock = Media.LogBlock;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
- if (++Media.Sector<Ssfdc.MaxSectors)
- return(SMSUCCESS);
+ if (++Media.Sector < Ssfdc.MaxSectors)
+ return SMSUCCESS;
- if (Log2Phy[Media.Zone]==NULL)
- {
- if (Make_D_LogTable(us))
- {
+ if (Log2Phy[Media.Zone] == NULL) {
+ if (Make_D_LogTable(us)) {
ErrCode = ERR_IllegalFmt;
- return(ERROR);
+ return ERROR;
}
}
- Media.Sector=0;
+ Media.Sector = 0;
Media.LogBlock = LogBlock;
- if (++Media.LogBlock<Ssfdc.MaxLogBlocks)
- {
+ if (++Media.LogBlock < Ssfdc.MaxLogBlocks) {
Clr_D_RedundantData(Redundant);
Set_D_LogBlockAddr(Redundant);
- Media.PhyBlock=Log2Phy[Media.Zone][Media.LogBlock];
- return(SMSUCCESS);
+ Media.PhyBlock = Log2Phy[Media.Zone][Media.LogBlock];
+ return SMSUCCESS;
}
- Media.LogBlock=0;
+ Media.LogBlock = 0;
- if (++Media.Zone<Ssfdc.MaxZones)
- {
- if (Log2Phy[Media.Zone]==NULL)
- {
- if (Make_D_LogTable(us))
- {
+ if (++Media.Zone < Ssfdc.MaxZones) {
+ if (Log2Phy[Media.Zone] == NULL) {
+ if (Make_D_LogTable(us)) {
ErrCode = ERR_IllegalFmt;
- return(ERROR);
+ return ERROR;
}
}
@@ -817,1034 +278,508 @@ int Inc_D_MediaAddr(struct us_data *us)
Clr_D_RedundantData(Redundant);
Set_D_LogBlockAddr(Redundant);
- Media.PhyBlock=Log2Phy[Media.Zone][Media.LogBlock];
- return(SMSUCCESS);
+ Media.PhyBlock = Log2Phy[Media.Zone][Media.LogBlock];
+ return SMSUCCESS;
}
- Media.Zone=0;
+ Media.Zone = 0;
ErrCode = ERR_OutOfLBA;
- return(ERROR);
+ return ERROR;
}
-/*
-//----- Check_D_FirstSect() --------------------------------------------
-int Check_D_FirstSect(void)
-{
- SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- ADDRESS_T bb = (ADDRESS_T) &Media;
-
- if (!Media.Sector)
- return(SMSUCCESS);
-
- return(ERROR);
-}
-
-//----- Check_D_LastSect() ---------------------------------------------
-int Check_D_LastSect(void)
-{
- SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- ADDRESS_T bb = (ADDRESS_T) &Media;
- if (Media.Sector<(Ssfdc.MaxSectors-1))
- return(ERROR);
-
- return(SMSUCCESS);
-}
-*/
-//SmartMedia Read/Write Subroutine with Retry
-//----- Media_D_ReadOneSect() ------------------------------------------
+/* SmartMedia Read/Write Subroutine with Retry */
+/* ----- Media_D_ReadOneSect() ------------------------------------------ */
int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
{
DWORD err, retry;
if (!Read_D_PhyOneSect(us, count, buf))
- return(SMSUCCESS);
- if (ErrCode==ERR_HwError)
- return(ERROR);
- if (ErrCode==ERR_DataStatus)
- return(ERROR);
+ return SMSUCCESS;
+ if (ErrCode == ERR_HwError)
+ return ERROR;
+ if (ErrCode == ERR_DataStatus)
+ return ERROR;
#ifdef RDERR_REASSIGN
- if (Ssfdc.Attribute &MWP)
- {
- if (ErrCode==ERR_CorReadErr)
- return(SMSUCCESS);
- return(ERROR);
+ if (Ssfdc.Attribute & MWP) {
+ if (ErrCode == ERR_CorReadErr)
+ return SMSUCCESS;
+ return ERROR;
}
- err=ErrCode;
- for(retry=0; retry<2; retry++)
- {
- if (Copy_D_BlockAll(us, (err==ERR_EccReadErr)?REQ_FAIL:REQ_ERASE))
- {
- if (ErrCode==ERR_HwError)
- return(ERROR);
+ err = ErrCode;
+ for (retry = 0; retry < 2; retry++) {
+ if (Copy_D_BlockAll(us,
+ (err == ERR_EccReadErr) ? REQ_FAIL : REQ_ERASE)) {
+ if (ErrCode == ERR_HwError)
+ return ERROR;
continue;
}
ErrCode = err;
- if (ErrCode==ERR_CorReadErr)
- return(SMSUCCESS);
- return(ERROR);
+ if (ErrCode == ERR_CorReadErr)
+ return SMSUCCESS;
+ return ERROR;
}
MediaChange = ERROR;
#else
- if (ErrCode==ERR_CorReadErr) return(SMSUCCESS);
+ if (ErrCode == ERR_CorReadErr)
+ return SMSUCCESS;
#endif
- return(ERROR);
-}
-/*
-//----- Media_D_WriteOneSect() -----------------------------------------
-int Media_D_WriteOneSect(PFDO_DEVICE_EXTENSION fdoExt, WORD count, BYTE *buf)
-{
- DWORD retry;
- SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- ADDRESS_T bb = (ADDRESS_T) &Media;
-
- if (!Write_D_PhyOneSect(fdoExt, count, buf))
- return(SMSUCCESS);
- if (ErrCode==ERR_HwError)
- return(ERROR);
-
- for(retry=1; retry<2; retry++)
- {
- if (Reassign_D_BlockHead(fdoExt))
- {
- if (ErrCode==ERR_HwError)
- return(ERROR);
- continue;
- }
-
- if (!Write_D_PhyOneSect(fdoExt, count, buf))
- return(SMSUCCESS);
- if (ErrCode==ERR_HwError)
- return(ERROR);
- }
-
- if (Release_D_WriteBlock(fdoExt))
- return(ERROR);
-
- ErrCode = ERR_WriteFault;
- MediaChange = ERROR;
- return(ERROR);
-}
-
-//SmartMedia Data Copy Subroutine with Retry
-//----- Media_D_CopyBlockHead() ----------------------------------------
-int Media_D_CopyBlockHead(PFDO_DEVICE_EXTENSION fdoExt)
-{
- DWORD retry;
-
- for(retry=0; retry<2; retry++)
- {
- if (!Copy_D_BlockHead(fdoExt))
- return(SMSUCCESS);
- if (ErrCode==ERR_HwError)
- return(ERROR);
- }
-
- MediaChange = ERROR;
- return(ERROR);
+ return ERROR;
}
-//----- Media_D_CopyBlockTail() ----------------------------------------
-int Media_D_CopyBlockTail(PFDO_DEVICE_EXTENSION fdoExt)
-{
- DWORD retry;
-
- if (!Copy_D_BlockTail(fdoExt))
- return(SMSUCCESS);
- if (ErrCode==ERR_HwError)
- return(ERROR);
-
- for(retry=1; retry<2; retry++)
- {
- if (Reassign_D_BlockHead(fdoExt))
- {
- if (ErrCode==ERR_HwError)
- return(ERROR);
- continue;
- }
-
- if (!Copy_D_BlockTail(fdoExt))
- return(SMSUCCESS);
- if (ErrCode==ERR_HwError)
- return(ERROR);
- }
-
- if (Release_D_WriteBlock(fdoExt))
- return(ERROR);
-
- ErrCode = ERR_WriteFault;
- MediaChange = ERROR;
- return(ERROR);
-}
-//
-////----- Media_D_EraseOneBlock() ----------------------------------------
-//int Media_D_EraseOneBlock(void)
-//{
-// WORD LogBlock = Media.LogBlock;
-// WORD PhyBlock = Media.PhyBlock;
-// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
-// ADDRESS_T bb = (ADDRESS_T) &Media;
-//
-// if (Media.PhyBlock==NO_ASSIGN)
-// return(SMSUCCESS);
-//
-// if (Log2Phy[Media.Zone]==NULL)
-// {
-// if (Make_D_LogTable())
-// {
-// ErrCode = ERR_IllegalFmt;
-// return(ERROR);
-// }
-// }
-// Media.LogBlock = LogBlock;
-// Media.PhyBlock = PhyBlock;
-//
-// Log2Phy[Media.Zone][Media.LogBlock]=NO_ASSIGN;
-//
-// if (Erase_D_PhyOneBlock(fdoExt))
-// {
-// if (ErrCode==ERR_HwError)
-// return(ERROR);
-// if (MarkFail_D_PhyOneBlock())
-// return(ERROR);
-//
-// ErrCode = ERR_WriteFault;
-// return(ERROR);
-// }
-//
-// Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock);
-// Media.PhyBlock=NO_ASSIGN;
-// return(SMSUCCESS);
-//}
-//
-////SmartMedia Erase Subroutine
-////----- Media_D_EraseAllBlock() ----------------------------------------
-//int Media_D_EraseAllBlock(void)
-//{
-// WORD cis=0;
-//
-// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
-// ADDRESS_T bb = (ADDRESS_T) &Media;
-//
-// MediaChange = ERROR;
-// Media.Sector = 0;
-//
-// for(Media.Zone=0; Media.Zone<Ssfdc.MaxZones; Media.Zone++)
-// for(Media.PhyBlock=0; Media.PhyBlock<Ssfdc.MaxBlocks; Media.PhyBlock++) {
-// if (Ssfdc_D_ReadRedtData(Redundant))
-// {
-// Ssfdc_D_Reset(fdoExt);
-// return(ERROR);
-// }
-//
-// Ssfdc_D_Reset(fdoExt);
-// if (!Check_D_FailBlock(Redundant))
-// {
-// if (cis)
-// {
-// if (Ssfdc_D_EraseBlock(fdoExt))
-// {
-// ErrCode = ERR_HwError;
-// return(ERROR);
-// }
-//
-// if (Ssfdc_D_CheckStatus())
-// {
-// if (MarkFail_D_PhyOneBlock())
-// return(ERROR);
-// }
-//
-// continue;
-// }
-//
-// if (Media.PhyBlock!=CisArea.PhyBlock)
-// {
-// ErrCode = ERR_IllegalFmt;
-// return(ERROR);
-// }
-//
-// cis++;
-// }
-//
-// }
-// return(SMSUCCESS);
-//}
-*/
-//SmartMedia Physical Sector Data Copy Subroutine
-//----- Copy_D_BlockAll() ----------------------------------------------
+/* SmartMedia Physical Sector Data Copy Subroutine */
+/* ----- Copy_D_BlockAll() ---------------------------------------------- */
int Copy_D_BlockAll(struct us_data *us, DWORD mode)
{
BYTE sect;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
- sect=Media.Sector;
+ sect = Media.Sector;
if (Assign_D_WriteBlock())
- return(ERROR);
- if (mode==REQ_FAIL)
- SectCopyMode=REQ_FAIL;
-
- for(Media.Sector=0; Media.Sector<Ssfdc.MaxSectors; Media.Sector++)
- {
- if (Copy_D_PhyOneSect(us))
- {
- if (ErrCode==ERR_HwError)
- return(ERROR);
+ return ERROR;
+ if (mode == REQ_FAIL)
+ SectCopyMode = REQ_FAIL;
+
+ for (Media.Sector = 0; Media.Sector < Ssfdc.MaxSectors;
+ Media.Sector++) {
+ if (Copy_D_PhyOneSect(us)) {
+ if (ErrCode == ERR_HwError)
+ return ERROR;
if (Release_D_WriteBlock(us))
- return(ERROR);
+ return ERROR;
ErrCode = ERR_WriteFault;
- Media.PhyBlock=ReadBlock;
- Media.Sector=sect;
+ Media.PhyBlock = ReadBlock;
+ Media.Sector = sect;
- return(ERROR);
+ return ERROR;
}
}
if (Release_D_ReadBlock(us))
- return(ERROR);
-
- Media.PhyBlock=WriteBlock;
- Media.Sector=sect;
- return(SMSUCCESS);
-}
-/*
-//----- Copy_D_BlockHead() ---------------------------------------------
-int Copy_D_BlockHead(PFDO_DEVICE_EXTENSION fdoExt)
-{
- BYTE sect;
- SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- ADDRESS_T bb = (ADDRESS_T) &Media;
-
- sect=Media.Sector;
- if (Assign_D_WriteBlock())
- return(ERROR);
-
- for(Media.Sector=0; Media.Sector<sect; Media.Sector++)
- {
- if (Copy_D_PhyOneSect(fdoExt))
- {
- if (ErrCode==ERR_HwError)
- return(ERROR);
- if (Release_D_WriteBlock(fdoExt))
- return(ERROR);
-
- ErrCode = ERR_WriteFault;
- Media.PhyBlock=ReadBlock;
- Media.Sector=sect;
-
- return(ERROR);
- }
- }
-
- Media.PhyBlock=WriteBlock;
- Media.Sector=sect;
- return(SMSUCCESS);
-}
+ return ERROR;
-//----- Copy_D_BlockTail() ---------------------------------------------
-int Copy_D_BlockTail(PFDO_DEVICE_EXTENSION fdoExt)
-{
- BYTE sect;
- SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- ADDRESS_T bb = (ADDRESS_T) &Media;
-
- for(sect=Media.Sector; Media.Sector<Ssfdc.MaxSectors; Media.Sector++)
- {
- if (Copy_D_PhyOneSect(fdoExt))
- {
- if (ErrCode==ERR_HwError)
- return(ERROR);
-
- Media.PhyBlock=WriteBlock;
- Media.Sector=sect;
-
- return(ERROR);
- }
- }
-
- if (Release_D_ReadBlock(fdoExt))
- return(ERROR);
-
- Media.PhyBlock=WriteBlock;
- Media.Sector=sect;
- return(SMSUCCESS);
+ Media.PhyBlock = WriteBlock;
+ Media.Sector = sect;
+ return SMSUCCESS;
}
-//----- Reassign_D_BlockHead() -----------------------------------------
-int Reassign_D_BlockHead(PFDO_DEVICE_EXTENSION fdoExt)
-{
- DWORD mode;
- WORD block;
- BYTE sect;
- SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- ADDRESS_T bb = (ADDRESS_T) &Media;
-
- mode=SectCopyMode;
- block=ReadBlock;
- sect=Media.Sector;
-
- if (Assign_D_WriteBlock())
- return(ERROR);
-
- SectCopyMode=REQ_FAIL;
-
- for(Media.Sector=0; Media.Sector<sect; Media.Sector++)
- {
- if (Copy_D_PhyOneSect(fdoExt))
- {
- if (ErrCode==ERR_HwError)
- return(ERROR);
- if (Release_D_WriteBlock(fdoExt))
- return(ERROR);
-
- ErrCode = ERR_WriteFault;
- SectCopyMode=mode;
- WriteBlock=ReadBlock;
- ReadBlock=block;
- Media.Sector=sect;
- Media.PhyBlock=WriteBlock;
-
- return(ERROR);
- }
- }
-
- if (Release_D_ReadBlock(fdoExt))
- return(ERROR);
-
- SectCopyMode=mode;
- ReadBlock=block;
- Media.Sector=sect;
- Media.PhyBlock=WriteBlock;
- return(SMSUCCESS);
-}
-*/
-//SmartMedia Physical Block Assign/Release Subroutine
-//----- Assign_D_WriteBlock() ------------------------------------------
+/* SmartMedia Physical Block Assign/Release Subroutine */
+/* ----- Assign_D_WriteBlock() ------------------------------------------ */
int Assign_D_WriteBlock(void)
{
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
- ReadBlock=Media.PhyBlock;
-
- for(WriteBlock=AssignStart[Media.Zone]; WriteBlock<Ssfdc.MaxBlocks; WriteBlock++)
- {
- if (!Chk_D_Bit(Assign[Media.Zone],WriteBlock))
- {
- Set_D_Bit(Assign[Media.Zone],WriteBlock);
- AssignStart[Media.Zone]=WriteBlock+1;
- Media.PhyBlock=WriteBlock;
- SectCopyMode=REQ_ERASE;
- //ErrXDCode = NO_ERROR;
- return(SMSUCCESS);
+ ReadBlock = Media.PhyBlock;
+
+ for (WriteBlock = AssignStart[Media.Zone];
+ WriteBlock < Ssfdc.MaxBlocks; WriteBlock++) {
+ if (!Chk_D_Bit(Assign[Media.Zone], WriteBlock)) {
+ Set_D_Bit(Assign[Media.Zone], WriteBlock);
+ AssignStart[Media.Zone] = WriteBlock + 1;
+ Media.PhyBlock = WriteBlock;
+ SectCopyMode = REQ_ERASE;
+ return SMSUCCESS;
}
}
- for(WriteBlock=0; WriteBlock<AssignStart[Media.Zone]; WriteBlock++)
- {
- if (!Chk_D_Bit(Assign[Media.Zone],WriteBlock))
- {
- Set_D_Bit(Assign[Media.Zone],WriteBlock);
- AssignStart[Media.Zone]=WriteBlock+1;
- Media.PhyBlock=WriteBlock;
- SectCopyMode=REQ_ERASE;
- //ErrXDCode = NO_ERROR;
- return(SMSUCCESS);
+ for (WriteBlock = 0;
+ WriteBlock < AssignStart[Media.Zone]; WriteBlock++) {
+ if (!Chk_D_Bit(Assign[Media.Zone], WriteBlock)) {
+ Set_D_Bit(Assign[Media.Zone], WriteBlock);
+ AssignStart[Media.Zone] = WriteBlock + 1;
+ Media.PhyBlock = WriteBlock;
+ SectCopyMode = REQ_ERASE;
+ return SMSUCCESS;
}
}
- WriteBlock=NO_ASSIGN;
+ WriteBlock = NO_ASSIGN;
ErrCode = ERR_WriteFault;
- // For xD test
- //Ssfdc.Attribute |= WP;
- //ErrXDCode = ERR_WrtProtect;
- return(ERROR);
+
+ return ERROR;
}
-//----- Release_D_ReadBlock() ------------------------------------------
+/* ----- Release_D_ReadBlock() ------------------------------------------ */
int Release_D_ReadBlock(struct us_data *us)
{
DWORD mode;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
- mode=SectCopyMode;
- SectCopyMode=COMPLETED;
+ mode = SectCopyMode;
+ SectCopyMode = COMPLETED;
- if (mode==COMPLETED)
- return(SMSUCCESS);
+ if (mode == COMPLETED)
+ return SMSUCCESS;
- Log2Phy[Media.Zone][Media.LogBlock]=WriteBlock;
- Media.PhyBlock=ReadBlock;
+ Log2Phy[Media.Zone][Media.LogBlock] = WriteBlock;
+ Media.PhyBlock = ReadBlock;
- if (Media.PhyBlock==NO_ASSIGN)
- {
- Media.PhyBlock=WriteBlock;
- return(SMSUCCESS);
+ if (Media.PhyBlock == NO_ASSIGN) {
+ Media.PhyBlock = WriteBlock;
+ return SMSUCCESS;
}
- if (mode==REQ_ERASE)
- {
- if (Erase_D_PhyOneBlock(us))
- {
- if (ErrCode==ERR_HwError) return(ERROR);
- if (MarkFail_D_PhyOneBlock(us)) return(ERROR);
- }
- else
- Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock);
- }
- else if (MarkFail_D_PhyOneBlock(us))
- return(ERROR);
+ if (mode == REQ_ERASE) {
+ if (Erase_D_PhyOneBlock(us)) {
+ if (ErrCode == ERR_HwError)
+ return ERROR;
+ if (MarkFail_D_PhyOneBlock(us))
+ return ERROR;
+ } else
+ Clr_D_Bit(Assign[Media.Zone], Media.PhyBlock);
+ } else if (MarkFail_D_PhyOneBlock(us))
+ return ERROR;
- Media.PhyBlock=WriteBlock;
- return(SMSUCCESS);
+ Media.PhyBlock = WriteBlock;
+ return SMSUCCESS;
}
-//----- Release_D_WriteBlock() -----------------------------------------
+/* ----- Release_D_WriteBlock() ----------------------------------------- */
int Release_D_WriteBlock(struct us_data *us)
{
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
- SectCopyMode=COMPLETED;
- Media.PhyBlock=WriteBlock;
+ SectCopyMode = COMPLETED;
+ Media.PhyBlock = WriteBlock;
if (MarkFail_D_PhyOneBlock(us))
- return(ERROR);
+ return ERROR;
- Media.PhyBlock=ReadBlock;
- return(SMSUCCESS);
+ Media.PhyBlock = ReadBlock;
+ return SMSUCCESS;
}
-//SmartMedia Physical Sector Data Copy Subroutine
-//----- Copy_D_PhyOneSect() --------------------------------------------
+/* SmartMedia Physical Sector Data Copy Subroutine */
+/* ----- Copy_D_PhyOneSect() -------------------------------------------- */
int Copy_D_PhyOneSect(struct us_data *us)
{
int i;
DWORD err, retry;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
/* pr_info("Copy_D_PhyOneSect --- Secotr = %x\n", Media.Sector); */
- if (ReadBlock!=NO_ASSIGN)
- {
- Media.PhyBlock=ReadBlock;
- for(retry=0; retry<2; retry++)
- {
- if (retry!=0)
- {
+ if (ReadBlock != NO_ASSIGN) {
+ Media.PhyBlock = ReadBlock;
+ for (retry = 0; retry < 2; retry++) {
+ if (retry != 0) {
Ssfdc_D_Reset(us);
- if (Ssfdc_D_ReadCisSect(us,WorkBuf,WorkRedund))
- { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
+ if (Ssfdc_D_ReadCisSect(us, WorkBuf,
+ WorkRedund)) {
+ ErrCode = ERR_HwError;
+ MediaChange = ERROR;
+ return ERROR;
+ }
- if (Check_D_CISdata(WorkBuf,WorkRedund))
- { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
+ if (Check_D_CISdata(WorkBuf, WorkRedund)) {
+ ErrCode = ERR_HwError;
+ MediaChange = ERROR;
+ return ERROR;
+ }
+ }
+
+ if (Ssfdc_D_ReadSect(us, WorkBuf, WorkRedund)) {
+ ErrCode = ERR_HwError;
+ MediaChange = ERROR;
+ return ERROR;
+ }
+ if (Check_D_DataStatus(WorkRedund)) {
+ err = ERROR;
+ break;
+ }
+ if (!Check_D_ReadError(WorkRedund)) {
+ err = SMSUCCESS;
+ break;
+ }
+ if (!Check_D_Correct(WorkBuf, WorkRedund)) {
+ err = SMSUCCESS;
+ break;
}
- if (Ssfdc_D_ReadSect(us,WorkBuf,WorkRedund))
- { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
- if (Check_D_DataStatus(WorkRedund))
- { err=ERROR; break; }
- if (!Check_D_ReadError(WorkRedund))
- { err=SMSUCCESS; break; }
- if (!Check_D_Correct(WorkBuf,WorkRedund))
- { err=SMSUCCESS; break; }
-
- err=ERROR;
- SectCopyMode=REQ_FAIL;
+ err = ERROR;
+ SectCopyMode = REQ_FAIL;
}
- }
- else
- {
- err=SMSUCCESS;
- for(i=0; i<SECTSIZE; i++)
- WorkBuf[i]=DUMMY_DATA;
+ } else {
+ err = SMSUCCESS;
+ for (i = 0; i < SECTSIZE; i++)
+ WorkBuf[i] = DUMMY_DATA;
Clr_D_RedundantData(WorkRedund);
}
Set_D_LogBlockAddr(WorkRedund);
- if (err==ERROR)
- {
+ if (err == ERROR) {
Set_D_RightECC(WorkRedund);
Set_D_DataStaus(WorkRedund);
}
- Media.PhyBlock=WriteBlock;
+ Media.PhyBlock = WriteBlock;
- if (Ssfdc_D_WriteSectForCopy(us, WorkBuf, WorkRedund))
- { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
- if (Ssfdc_D_CheckStatus())
- { ErrCode = ERR_WriteFault; return(ERROR); }
+ if (Ssfdc_D_WriteSectForCopy(us, WorkBuf, WorkRedund)) {
+ ErrCode = ERR_HwError;
+ MediaChange = ERROR;
+ return ERROR;
+ }
+ if (Ssfdc_D_CheckStatus()) {
+ ErrCode = ERR_WriteFault;
+ return ERROR;
+ }
- Media.PhyBlock=ReadBlock;
- return(SMSUCCESS);
+ Media.PhyBlock = ReadBlock;
+ return SMSUCCESS;
}
-//SmartMedia Physical Sector Read/Write/Erase Subroutine
-//----- Read_D_PhyOneSect() --------------------------------------------
+/* SmartMedia Physical Sector Read/Write/Erase Subroutine */
+/* ----- Read_D_PhyOneSect() -------------------------------------------- */
int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
{
int i;
DWORD retry;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
-
- if (Media.PhyBlock==NO_ASSIGN)
- {
- for(i=0; i<SECTSIZE; i++)
- *buf++=DUMMY_DATA;
- return(SMSUCCESS);
+
+ if (Media.PhyBlock == NO_ASSIGN) {
+ for (i = 0; i < SECTSIZE; i++)
+ *buf++ = DUMMY_DATA;
+ return SMSUCCESS;
}
- for(retry=0; retry<2; retry++)
- {
- if (retry!=0)
- {
+ for (retry = 0; retry < 2; retry++) {
+ if (retry != 0) {
Ssfdc_D_Reset(us);
- if (Ssfdc_D_ReadCisSect(us,WorkBuf,WorkRedund))
- { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
- if (Check_D_CISdata(WorkBuf,WorkRedund))
- { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
+ if (Ssfdc_D_ReadCisSect(us, WorkBuf, WorkRedund)) {
+ ErrCode = ERR_HwError;
+ MediaChange = ERROR;
+ return ERROR;
+ }
+ if (Check_D_CISdata(WorkBuf, WorkRedund)) {
+ ErrCode = ERR_HwError;
+ MediaChange = ERROR;
+ return ERROR;
+ }
}
- //if (Ssfdc_D_ReadSect(fdoExt,buf,Redundant))
- if (Ssfdc_D_ReadBlock(us,count,buf,Redundant))
- { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
- if (Check_D_DataStatus(Redundant))
- { ErrCode = ERR_DataStatus; return(ERROR); }
+ if (Ssfdc_D_ReadBlock(us, count, buf, Redundant)) {
+ ErrCode = ERR_HwError;
+ MediaChange = ERROR;
+ return ERROR;
+ }
+ if (Check_D_DataStatus(Redundant)) {
+ ErrCode = ERR_DataStatus;
+ return ERROR;
+ }
if (!Check_D_ReadError(Redundant))
- return(SMSUCCESS);
+ return SMSUCCESS;
- if (!Check_D_Correct(buf,Redundant))
- { ErrCode = ERR_CorReadErr; return(ERROR); }
+ if (!Check_D_Correct(buf, Redundant)) {
+ ErrCode = ERR_CorReadErr;
+ return ERROR;
+ }
}
ErrCode = ERR_EccReadErr;
- return(ERROR);
+ return ERROR;
}
-/*
-//----- Write_D_PhyOneSect() -------------------------------------------
-int Write_D_PhyOneSect(PFDO_DEVICE_EXTENSION fdoExt, WORD count, BYTE *buf)
-{
- SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- ADDRESS_T bb = (ADDRESS_T) &Media;
-
- //if (Ssfdc_D_WriteSect(fdoExt,buf,Redundant))
- if (Ssfdc_D_WriteBlock(fdoExt,count,buf,Redundant))
- { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
- if (Ssfdc_D_CheckStatus())
- { ErrCode = ERR_WriteFault; return(ERROR); }
- return(SMSUCCESS);
-}
-*/
-//----- Erase_D_PhyOneBlock() ------------------------------------------
+/* ----- Erase_D_PhyOneBlock() ------------------------------------------ */
int Erase_D_PhyOneBlock(struct us_data *us)
{
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
-
- if (Ssfdc_D_EraseBlock(us))
- { ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
- if (Ssfdc_D_CheckStatus())
- { ErrCode = ERR_WriteFault; return(ERROR); }
+ if (Ssfdc_D_EraseBlock(us)) {
+ ErrCode = ERR_HwError;
+ MediaChange = ERROR;
+ return ERROR;
+ }
+ if (Ssfdc_D_CheckStatus()) {
+ ErrCode = ERR_WriteFault;
+ return ERROR;
+ }
- return(SMSUCCESS);
+ return SMSUCCESS;
}
-//SmartMedia Physical Format Check Local Subroutine
-//----- Set_D_PhyFmtValue() --------------------------------------------
+/* SmartMedia Physical Format Check Local Subroutine */
+/* ----- Set_D_PhyFmtValue() -------------------------------------------- */
int Set_D_PhyFmtValue(struct us_data *us)
{
-// PPDO_DEVICE_EXTENSION pdoExt;
-// BYTE idcode[4];
-// DWORD UserDefData_1, UserDefData_2, Data, mask;
-//
-// //if (!fdoExt->ChildDeviceObject) return(ERROR);
-// //pdoExt = fdoExt->ChildDeviceObject->DeviceExtension;
-//
-// Ssfdc_D_ReadID(idcode, READ_ID_1);
-//
- //if (Set_D_SsfdcModel(idcode[1]))
- if (Set_D_SsfdcModel(us->SM_DeviceID))
- return(ERROR);
-
-// //Use Multi-function pin to differentiate SM and xD.
-// UserDefData_1 = ReadPCIReg(fdoExt->BusID, fdoExt->DevID, fdoExt->FuncID, PCI_REG_USER_DEF) & 0x80;
-// if (UserDefData_1)
-// {
-// if ( READ_PORT_BYTE(SM_REG_INT_STATUS) & 0x80 ) fdoExt->DiskType = DISKTYPE_XD;
-// if ( READ_PORT_BYTE(SM_REG_INT_STATUS) & 0x40 ) fdoExt->DiskType = DISKTYPE_SM;
-//
-// if ( IsXDCompliance && (fdoExt->DiskType == DISKTYPE_XD) )
-// {
-// Ssfdc_D_ReadID(idcode, READ_ID_3);
-// if (idcode[2] != 0xB5)
-// return(ERROR);
-// }
-// }
-//
-// //Use GPIO to differentiate SM and xD.
-// UserDefData_2 = ReadPCIReg(fdoExt->BusID, fdoExt->DevID, fdoExt->FuncID, PCI_REG_USER_DEF) >> 8;
-// if ( UserDefData_2 )
-// {
-// Data = ReadPCIReg(fdoExt->BusID, fdoExt->DevID, 0, 0xAC);
-//
-// mask = 1 << (UserDefData_2-1);
-// // 1 : xD , 0 : SM
-// if ( Data & mask)
-// fdoExt->DiskType = DISKTYPE_XD;
-// else
-// fdoExt->DiskType = DISKTYPE_SM;
-//
-// if ( IsXDCompliance && (fdoExt->DiskType == DISKTYPE_XD) )
-// {
-// Ssfdc_D_ReadID(idcode, READ_ID_3);
-// if (idcode[2] != 0xB5)
-// return(ERROR);
-// }
-// }
-//
-// if ( !(UserDefData_1 | UserDefData_2) )
-// {
-// // Use UserDefine Register to differentiate SM and xD.
-// Ssfdc_D_ReadID(idcode, READ_ID_3);
-//
-// if (idcode[2] == 0xB5)
-// fdoExt->DiskType = DISKTYPE_XD;
-// else
-// {
-// if (!IsXDCompliance)
-// fdoExt->DiskType = DISKTYPE_SM;
-// else
-// return(ERROR);
-// }
-//
-// if (fdoExt->UserDef_DiskType == 0x04) fdoExt->DiskType = DISKTYPE_XD;
-// if (fdoExt->UserDef_DiskType == 0x08) fdoExt->DiskType = DISKTYPE_SM;
-// }
-//
-// if (!fdoExt->UserDef_DisableWP)
-// {
-// if (fdoExt->DiskType == DISKTYPE_SM)
-// {
-// if (Check_D_SsfdcWP())
-// Ssfdc.Attribute|=WP;
-// }
-// }
-
- return(SMSUCCESS);
+ if (Set_D_SsfdcModel(us->SM_DeviceID))
+ return ERROR;
+
+ return SMSUCCESS;
}
-//----- Search_D_CIS() -------------------------------------------------
+/* ----- Search_D_CIS() ------------------------------------------------- */
int Search_D_CIS(struct us_data *us)
{
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
-
- Media.Zone=0; Media.Sector=0;
+ Media.Zone = 0;
+ Media.Sector = 0;
- for (Media.PhyBlock=0; Media.PhyBlock<(Ssfdc.MaxBlocks-Ssfdc.MaxLogBlocks-1); Media.PhyBlock++)
- {
- if (Ssfdc_D_ReadRedtData(us, Redundant))
- {
+ for (Media.PhyBlock = 0;
+ Media.PhyBlock < (Ssfdc.MaxBlocks - Ssfdc.MaxLogBlocks - 1);
+ Media.PhyBlock++) {
+ if (Ssfdc_D_ReadRedtData(us, Redundant)) {
Ssfdc_D_Reset(us);
- return(ERROR);
+ return ERROR;
}
if (!Check_D_FailBlock(Redundant))
break;
}
- if (Media.PhyBlock==(Ssfdc.MaxBlocks-Ssfdc.MaxLogBlocks-1))
- {
+ if (Media.PhyBlock == (Ssfdc.MaxBlocks - Ssfdc.MaxLogBlocks - 1)) {
Ssfdc_D_Reset(us);
- return(ERROR);
+ return ERROR;
}
- while (Media.Sector<CIS_SEARCH_SECT)
- {
- if (Media.Sector)
- {
- if (Ssfdc_D_ReadRedtData(us, Redundant))
- {
+ while (Media.Sector < CIS_SEARCH_SECT) {
+ if (Media.Sector) {
+ if (Ssfdc_D_ReadRedtData(us, Redundant)) {
Ssfdc_D_Reset(us);
- return(ERROR);
+ return ERROR;
}
}
- if (!Check_D_DataStatus(Redundant))
- {
- if (Ssfdc_D_ReadSect(us,WorkBuf,Redundant))
- {
+ if (!Check_D_DataStatus(Redundant)) {
+ if (Ssfdc_D_ReadSect(us, WorkBuf, Redundant)) {
Ssfdc_D_Reset(us);
- return(ERROR);
+ return ERROR;
}
- if (Check_D_CISdata(WorkBuf,Redundant))
- {
+ if (Check_D_CISdata(WorkBuf, Redundant)) {
Ssfdc_D_Reset(us);
- return(ERROR);
+ return ERROR;
}
- CisArea.PhyBlock=Media.PhyBlock;
- CisArea.Sector=Media.Sector;
+ CisArea.PhyBlock = Media.PhyBlock;
+ CisArea.Sector = Media.Sector;
Ssfdc_D_Reset(us);
- return(SMSUCCESS);
+ return SMSUCCESS;
}
Media.Sector++;
}
Ssfdc_D_Reset(us);
- return(ERROR);
+ return ERROR;
}
-//----- Make_D_LogTable() ----------------------------------------------
+/* ----- Make_D_LogTable() ---------------------------------------------- */
int Make_D_LogTable(struct us_data *us)
{
- WORD phyblock,logblock;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
+ WORD phyblock, logblock;
- if (Log2Phy[Media.Zone]==NULL)
- {
- Log2Phy[Media.Zone] = kmalloc(MAX_LOGBLOCK*sizeof(WORD), GFP_KERNEL);
+ if (Log2Phy[Media.Zone] == NULL) {
+ Log2Phy[Media.Zone] = kmalloc(MAX_LOGBLOCK * sizeof(WORD),
+ GFP_KERNEL);
/* pr_info("ExAllocatePool Zone = %x, Addr = %x\n",
Media.Zone, Log2Phy[Media.Zone]); */
- if (Log2Phy[Media.Zone]==NULL)
- return(ERROR);
+ if (Log2Phy[Media.Zone] == NULL)
+ return ERROR;
}
- Media.Sector=0;
-
- //for(Media.Zone=0; Media.Zone<MAX_ZONENUM; Media.Zone++)
- //for(Media.Zone=0; Media.Zone<Ssfdc.MaxZones; Media.Zone++)
- {
- /* pr_info("Make_D_LogTable --- MediaZone = 0x%x\n",
- Media.Zone); */
- for(Media.LogBlock=0; Media.LogBlock<Ssfdc.MaxLogBlocks; Media.LogBlock++)
- Log2Phy[Media.Zone][Media.LogBlock]=NO_ASSIGN;
-
- for(Media.PhyBlock=0; Media.PhyBlock<(MAX_BLOCKNUM/8); Media.PhyBlock++)
- Assign[Media.Zone][Media.PhyBlock]=0x00;
-
- for(Media.PhyBlock=0; Media.PhyBlock<Ssfdc.MaxBlocks; Media.PhyBlock++)
- {
- if ((!Media.Zone) && (Media.PhyBlock<=CisArea.PhyBlock))
- {
- Set_D_Bit(Assign[Media.Zone],Media.PhyBlock);
- continue;
- }
+ Media.Sector = 0;
+
+ /* pr_info("Make_D_LogTable --- MediaZone = 0x%x\n",
+ Media.Zone); */
+ for (Media.LogBlock = 0; Media.LogBlock < Ssfdc.MaxLogBlocks;
+ Media.LogBlock++)
+ Log2Phy[Media.Zone][Media.LogBlock] = NO_ASSIGN;
+
+ for (Media.PhyBlock = 0; Media.PhyBlock < (MAX_BLOCKNUM / 8);
+ Media.PhyBlock++)
+ Assign[Media.Zone][Media.PhyBlock] = 0x00;
+
+ for (Media.PhyBlock = 0; Media.PhyBlock < Ssfdc.MaxBlocks;
+ Media.PhyBlock++) {
+ if ((!Media.Zone) && (Media.PhyBlock <= CisArea.PhyBlock)) {
+ Set_D_Bit(Assign[Media.Zone], Media.PhyBlock);
+ continue;
+ }
+
+ if (Ssfdc_D_ReadRedtData(us, Redundant)) {
+ Ssfdc_D_Reset(us);
+ return ERROR;
+ }
+
+ if (!Check_D_DataBlank(Redundant))
+ continue;
+
+ Set_D_Bit(Assign[Media.Zone], Media.PhyBlock);
- if (Ssfdc_D_ReadRedtData(us, Redundant))
- { Ssfdc_D_Reset(us); return(ERROR); }
+ if (Check_D_FailBlock(Redundant))
+ continue;
- if (!Check_D_DataBlank(Redundant))
- continue;
+ if (Load_D_LogBlockAddr(Redundant))
+ continue;
- Set_D_Bit(Assign[Media.Zone],Media.PhyBlock);
+ if (Media.LogBlock >= Ssfdc.MaxLogBlocks)
+ continue;
- if (Check_D_FailBlock(Redundant))
- continue;
+ if (Log2Phy[Media.Zone][Media.LogBlock] == NO_ASSIGN) {
+ Log2Phy[Media.Zone][Media.LogBlock] = Media.PhyBlock;
+ continue;
+ }
- //if (Check_D_DataStatus(Redundant))
- // continue;
+ phyblock = Media.PhyBlock;
+ logblock = Media.LogBlock;
+ Media.Sector = (BYTE)(Ssfdc.MaxSectors - 1);
- if (Load_D_LogBlockAddr(Redundant))
- continue;
+ if (Ssfdc_D_ReadRedtData(us, Redundant)) {
+ Ssfdc_D_Reset(us);
+ return ERROR;
+ }
- if (Media.LogBlock>=Ssfdc.MaxLogBlocks)
- continue;
+ if (!Load_D_LogBlockAddr(Redundant) &&
+ (Media.LogBlock == logblock)) {
+ Media.PhyBlock = Log2Phy[Media.Zone][logblock];
- if (Log2Phy[Media.Zone][Media.LogBlock]==NO_ASSIGN)
- {
- Log2Phy[Media.Zone][Media.LogBlock]=Media.PhyBlock;
- continue;
+ if (Ssfdc_D_ReadRedtData(us, Redundant)) {
+ Ssfdc_D_Reset(us);
+ return ERROR;
}
- phyblock = Media.PhyBlock;
- logblock = Media.LogBlock;
- Media.Sector = (BYTE)(Ssfdc.MaxSectors-1);
-
- if (Ssfdc_D_ReadRedtData(us, Redundant))
- { Ssfdc_D_Reset(us); return(ERROR); }
-
- if (!Load_D_LogBlockAddr(Redundant))
- {
- if (Media.LogBlock==logblock)
- {
- Media.PhyBlock=Log2Phy[Media.Zone][logblock];
-
- if (Ssfdc_D_ReadRedtData(us, Redundant))
- { Ssfdc_D_Reset(us); return(ERROR); }
-
- Media.PhyBlock=phyblock;
-
- if (!Load_D_LogBlockAddr(Redundant))
- {
- if (Media.LogBlock!=logblock)
- {
- Media.PhyBlock=Log2Phy[Media.Zone][logblock];
- Log2Phy[Media.Zone][logblock]=phyblock;
- }
- }
- else
- {
- Media.PhyBlock=Log2Phy[Media.Zone][logblock];
- Log2Phy[Media.Zone][logblock]=phyblock;
- }
+ Media.PhyBlock = phyblock;
+
+ if (!Load_D_LogBlockAddr(Redundant)) {
+ if (Media.LogBlock != logblock) {
+ Media.PhyBlock =
+ Log2Phy[Media.Zone][logblock];
+ Log2Phy[Media.Zone][logblock] =
+ phyblock;
}
+ } else {
+ Media.PhyBlock = Log2Phy[Media.Zone][logblock];
+ Log2Phy[Media.Zone][logblock] = phyblock;
}
+ }
+
+ Media.Sector = 0;
+ Media.PhyBlock = phyblock;
+
+ AssignStart[Media.Zone] = 0;
- Media.Sector=0;
-
-// here Not yet
-//#ifdef L2P_ERR_ERASE
-// if (!(Ssfdc.Attribute &MWP))
-// {
-// Ssfdc_D_Reset(fdoExt);
-// if (Ssfdc_D_EraseBlock(fdoExt))
-// return(ERROR);
-//
-// if (Ssfdc_D_CheckStatus())
-// {
-// if (MarkFail_D_PhyOneBlock())
-// return(ERROR);
-// }
-// else
-// Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock);
-// }
-//#else
-// Ssfdc.Attribute|=MWP;
-//#endif
- Media.PhyBlock=phyblock;
-
- } // End for (Media.PhyBlock<Ssfdc.MaxBlocks)
-
- AssignStart[Media.Zone]=0;
-
- } // End for (Media.Zone<MAX_ZONENUM)
+ } /* End for (Media.Zone<MAX_ZONENUM) */
Ssfdc_D_Reset(us);
- return(SMSUCCESS);
+ return SMSUCCESS;
}
-//----- MarkFail_D_PhyOneBlock() ---------------------------------------
+/* ----- MarkFail_D_PhyOneBlock() --------------------------------------- */
int MarkFail_D_PhyOneBlock(struct us_data *us)
{
BYTE sect;
- //SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
- //ADDRESS_T bb = (ADDRESS_T) &Media;
- sect=Media.Sector;
+ sect = Media.Sector;
Set_D_FailBlock(WorkRedund);
- //Ssfdc_D_WriteRedtMode();
- for(Media.Sector=0; Media.Sector<Ssfdc.MaxSectors; Media.Sector++)
- {
- if (Ssfdc_D_WriteRedtData(us, WorkRedund))
- {
+ for (Media.Sector = 0; Media.Sector < Ssfdc.MaxSectors;
+ Media.Sector++) {
+ if (Ssfdc_D_WriteRedtData(us, WorkRedund)) {
Ssfdc_D_Reset(us);
Media.Sector = sect;
ErrCode = ERR_HwError;
MediaChange = ERROR;
- return(ERROR);
- } // NO Status Check
+ return ERROR;
+ } /* NO Status Check */
}
Ssfdc_D_Reset(us);
- Media.Sector=sect;
- return(SMSUCCESS);
+ Media.Sector = sect;
+ return SMSUCCESS;
}
-/*
-//
-////----- SM_Init() ----------------------------------------------------
-//void SM_Init(void)
-//{
-// _Hw_D_ClrIntCardChg();
-// _Hw_D_SetIntMask();
-// // For DMA Interrupt
-// _Hw_D_ClrDMAIntCardChg();
-// _Hw_D_SetDMAIntMask();
-//}
-//
-////----- Media_D_EraseAllRedtData() -----------------------------------
-//int Media_D_EraseAllRedtData(DWORD Index, BOOLEAN CheckBlock)
-//{
-// BYTE i;
-//
-// if (Check_D_MediaPower())
-// return(ErrCode);
-//
-// if (Check_D_MediaWP())
-// return(ErrCode);
-//
-// for (i=0; i<REDTSIZE; i++)
-// WorkRedund[i] = 0xFF;
-//
-// Media.Zone = (BYTE)Index;
-// for (Media.PhyBlock=0; Media.PhyBlock<Ssfdc.MaxBlocks; Media.PhyBlock++)
-// {
-// if ((!Media.Zone) && (Media.PhyBlock<=CisArea.PhyBlock))
-// continue;
-//
-// if (Ssfdc_D_EraseBlock(fdoExt))
-// {
-// ErrCode = ERR_HwError;
-// return(ERROR);
-// }
-//
-// for(Media.Sector=0; Media.Sector<Ssfdc.MaxSectors; Media.Sector++)
-// {
-// Ssfdc_D_WriteRedtMode();
-//
-// if (Ssfdc_D_WriteRedtData(WorkRedund))
-// {
-// Ssfdc_D_Reset(fdoExt);
-// ErrCode = ERR_HwError;
-// MediaChange = ERROR;
-// return(ERROR);
-// } // NO Status Check
-// }
-//
-// Ssfdc_D_Reset(fdoExt);
-// }
-//
-// Ssfdc_D_Reset(fdoExt);
-//
-// return(SMSUCCESS);
-//}
-//
-////----- Media_D_GetMediaInfo() ---------------------------------------
-//DWORD Media_D_GetMediaInfo(PFDO_DEVICE_EXTENSION fdoExt, PIOCTL_MEDIA_INFO_IN pParamIn, PIOCTL_MEDIA_INFO_OUT pParamOut)
-//{
-// pParamOut->ErrCode = STATUS_CMD_FAIL;
-//
-// Init_D_SmartMedia();
-//
-// if (Check_D_MediaPower())
-// return (ErrCode==ERR_NoSmartMedia) ? STATUS_CMD_NO_MEDIA : STATUS_CMD_FAIL;
-//
-// if (Set_D_PhyFmtValue(fdoExt))
-// return STATUS_CMD_FAIL;
-//
-// //usleep(56*1024);
-// if (Search_D_CIS(fdoExt))
-// return STATUS_CMD_FAIL;
-//
-// if (Check_D_MediaWP())
-// return STATUS_CMD_MEDIA_WP;
-//
-// pParamOut->PageSize = Ssfdc.MaxSectors;
-// pParamOut->BlockSize = Ssfdc.MaxBlocks;
-// pParamOut->ZoneSize = Ssfdc.MaxZones;
-//
-// return STATUS_CMD_SUCCESS;
-//}*/
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index d4dd5ed..346c570 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -33,9 +33,9 @@ void _Set_D_ECCdata(BYTE, BYTE *);
void _Calc_D_ECCdata(BYTE *);
-struct SSFDCTYPE Ssfdc;
-struct ADDRESS Media;
-struct CIS_AREA CisArea;
+struct keucr_media_info Ssfdc;
+struct keucr_media_address Media;
+struct keucr_media_area CisArea;
static BYTE EccBuf[6];
extern PBYTE SMHostAddr;
@@ -103,8 +103,10 @@ int Load_D_LogBlockAddr(BYTE *redundant)
{
WORD addr1, addr2;
- addr1 = (WORD)*(redundant + REDT_ADDR1H)*0x0100 + (WORD)*(redundant + REDT_ADDR1L);
- addr2 = (WORD)*(redundant + REDT_ADDR2H)*0x0100 + (WORD)*(redundant + REDT_ADDR2L);
+ addr1 = (WORD)*(redundant + REDT_ADDR1H)*0x0100 +
+ (WORD)*(redundant + REDT_ADDR1L);
+ addr2 = (WORD)*(redundant + REDT_ADDR2H)*0x0100 +
+ (WORD)*(redundant + REDT_ADDR2L);
if (addr1 == addr2)
if ((addr1 & 0xF000) == 0x1000) {
@@ -151,7 +153,8 @@ void Set_D_LogBlockAddr(BYTE *redundant)
if ((hweight16(addr) % 2))
addr++;
- *(redundant + REDT_ADDR1H) = *(redundant + REDT_ADDR2H) = (BYTE)(addr / 0x0100);
+ *(redundant + REDT_ADDR1H) = *(redundant + REDT_ADDR2H) =
+ (BYTE)(addr / 0x0100);
*(redundant + REDT_ADDR1L) = *(redundant + REDT_ADDR2L) = (BYTE)addr;
}
@@ -191,7 +194,9 @@ int Ssfdc_D_ReadCisSect(struct us_data *us, BYTE *buf, BYTE *redundant)
Media.Sector = CisArea.Sector;
if (Ssfdc_D_ReadSect(us, buf, redundant)) {
- Media.Zone = zone; Media.PhyBlock = block; Media.Sector = sector;
+ Media.Zone = zone;
+ Media.PhyBlock = block;
+ Media.Sector = sector;
return ERROR;
}
@@ -209,7 +214,8 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
- printk("Load SM RW Code Fail !!\n");
+ dev_err(&us->pusb_dev->dev,
+ "Failed to load SmartMedia read/write code\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -252,7 +258,8 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf, BYTE *redundant)
}
/* ----- Ssfdc_D_ReadBlock() --------------------------------------------- */
-int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf, BYTE *redundant)
+int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,
+ BYTE *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
@@ -260,7 +267,8 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf, BYTE *redundant
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
- printk("Load SM RW Code Fail !!\n");
+ dev_err(&us->pusb_dev->dev,
+ "Failed to load SmartMedia read/write code\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -304,7 +312,8 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf, BYTE *redundant
/* ----- Ssfdc_D_CopyBlock() -------------------------------------------- */
-int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf, BYTE *redundant)
+int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,
+ BYTE *redundant)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
@@ -312,7 +321,8 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf, BYTE *redundant
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
- printk("Load SM RW Code Fail !!\n");
+ dev_err(&us->pusb_dev->dev,
+ "Failed to load SmartMedia read/write code\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -358,7 +368,8 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
- printk("Load SM RW Code Fail !!\n");
+ dev_err(&us->pusb_dev->dev,
+ "Failed to load SmartMedia read/write code\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -396,7 +407,8 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
- printk("Load SM RW Code Fail !!\n");
+ dev_err(&us->pusb_dev->dev,
+ "Failed to load SmartMedia read/write code\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -431,7 +443,8 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
- printk("Load SM RW Code Fail !!\n");
+ dev_err(&us->pusb_dev->dev,
+ "Failed to load SmartMedia read/write code\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -470,7 +483,8 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
result = ENE_LoadBinCode(us, SM_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD) {
- printk("Load SM RW Code Fail !!\n");
+ dev_err(&us->pusb_dev->dev,
+ "Failed to load SmartMedia read/write code\n");
return USB_STOR_TRANSPORT_ERROR;
}
@@ -611,7 +625,7 @@ int Set_D_SsfdcModel(BYTE dcode)
return ERROR;
}
- return SMSUCCESS;
+ return SMSUCCESS;
}
/* ----- _Check_D_DevCode() --------------------------------------------- */
@@ -686,8 +700,8 @@ int Check_D_CISdata(BYTE *buf, BYTE *redundant)
/* ----- Set_D_RightECC() ---------------------------------------------- */
void Set_D_RightECC(BYTE *redundant)
{
- /* Driver ECC Check */
- return;
+ /* Driver ECC Check */
+ return;
}
diff --git a/drivers/staging/keucr/smscsi.c b/drivers/staging/keucr/smscsi.c
index 58b5555..572d648 100644
--- a/drivers/staging/keucr/smscsi.c
+++ b/drivers/staging/keucr/smscsi.c
@@ -56,7 +56,7 @@ int SM_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb)
return result;
}
-/* ----- SM_SCSI_Test_Unit_Ready() -------------------------------------------------- */
+/* ----- SM_SCSI_Test_Unit_Ready() ------------------------------------- */
int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
{
if (us->SM_Status.Insert && us->SM_Status.Ready)
@@ -69,21 +69,27 @@ int SM_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
return USB_STOR_TRANSPORT_GOOD;
}
-/* ----- SM_SCSI_Inquiry() -------------------------------------------------- */
+/* ----- SM_SCSI_Inquiry() --------------------------------------------- */
int SM_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
{
- BYTE data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55, 0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61, 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30};
+ BYTE data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00,
+ 0x55, 0x53, 0x42, 0x32, 0x2E, 0x30, 0x20,
+ 0x20, 0x43, 0x61, 0x72, 0x64, 0x52, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30};
usb_stor_set_xfer_buf(us, data_ptr, 36, srb, TO_XFER_BUF);
return USB_STOR_TRANSPORT_GOOD;
}
-/* ----- SM_SCSI_Mode_Sense() -------------------------------------------------- */
+/* ----- SM_SCSI_Mode_Sense() ------------------------------------------ */
int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
{
- BYTE mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
- BYTE mediaWP[12] = {0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
+ BYTE mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
+ BYTE mediaWP[12] = {0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
+ 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
if (us->SM_Status.WtP)
usb_stor_set_xfer_buf(us, mediaWP, 12, srb, TO_XFER_BUF);
@@ -94,7 +100,7 @@ int SM_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
return USB_STOR_TRANSPORT_GOOD;
}
-/* ----- SM_SCSI_Read_Capacity() -------------------------------------------------- */
+/* ----- SM_SCSI_Read_Capacity() --------------------------------------- */
int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
{
unsigned int offset = 0;
@@ -103,14 +109,14 @@ int SM_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
WORD bl_len;
BYTE buf[8];
- printk("SM_SCSI_Read_Capacity\n");
+ dev_dbg(&us->pusb_dev->dev, "SM_SCSI_Read_Capacity\n");
bl_len = 0x200;
bl_num = Ssfdc.MaxLogBlocks * Ssfdc.MaxSectors * Ssfdc.MaxZones - 1;
us->bl_num = bl_num;
- printk("bl_len = %x\n", bl_len);
- printk("bl_num = %x\n", bl_num);
+ dev_dbg(&us->pusb_dev->dev, "bl_len = %x\n", bl_len);
+ dev_dbg(&us->pusb_dev->dev, "bl_num = %x\n", bl_num);
buf[0] = (bl_num >> 24) & 0xff;
buf[1] = (bl_num >> 16) & 0xff;
@@ -131,8 +137,10 @@ int SM_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2] << 24) & 0xff000000) | ((Cdb[3] << 16) & 0x00ff0000) |
- ((Cdb[4] << 8) & 0x0000ff00) | ((Cdb[5] << 0) & 0x000000ff);
+ DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
+ ((Cdb[3] << 16) & 0x00ff0000) |
+ ((Cdb[4] << 8) & 0x0000ff00) |
+ ((Cdb[5] << 0) & 0x000000ff);
WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
DWORD blenByte = blen * 0x200;
void *buf;
@@ -161,8 +169,10 @@ int SM_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
{
int result = 0;
PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2] << 24) & 0xff000000) | ((Cdb[3] << 16) & 0x00ff0000) |
- ((Cdb[4] << 8) & 0x0000ff00) | ((Cdb[5] << 0) & 0x000000ff);
+ DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
+ ((Cdb[3] << 16) & 0x00ff0000) |
+ ((Cdb[4] << 8) & 0x0000ff00) |
+ ((Cdb[5] << 0) & 0x000000ff);
WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
DWORD blenByte = blen * 0x200;
void *buf;
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index 1a8837d..aeb2186 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -79,6 +79,47 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
}
/*
+ * usb_stor_print_cmd():
+ */
+static void usb_stor_print_cmd(struct us_data *us, struct scsi_cmnd *srb)
+{
+ PBYTE Cdb = srb->cmnd;
+ DWORD cmd = Cdb[0];
+
+ switch (cmd) {
+ case TEST_UNIT_READY:
+ break;
+ case INQUIRY:
+ dev_dbg(&us->pusb_dev->dev,
+ "scsi cmd %X --- SCSIOP_INQUIRY\n", cmd);
+ break;
+ case MODE_SENSE:
+ dev_dbg(&us->pusb_dev->dev,
+ "scsi cmd %X --- SCSIOP_MODE_SENSE\n", cmd);
+ break;
+ case START_STOP:
+ dev_dbg(&us->pusb_dev->dev,
+ "scsi cmd %X --- SCSIOP_START_STOP\n", cmd);
+ break;
+ case READ_CAPACITY:
+ dev_dbg(&us->pusb_dev->dev,
+ "scsi cmd %X --- SCSIOP_READ_CAPACITY\n", cmd);
+ break;
+ case READ_10:
+ break;
+ case WRITE_10:
+ break;
+ case ALLOW_MEDIUM_REMOVAL:
+ dev_dbg(&us->pusb_dev->dev,
+ "scsi cmd %X --- SCSIOP_ALLOW_MEDIUM_REMOVAL\n", cmd);
+ break;
+ default:
+ dev_dbg(&us->pusb_dev->dev, "scsi cmd %X --- Other cmd\n", cmd);
+ break;
+ }
+}
+
+/*
* usb_stor_control_msg()
*/
int usb_stor_control_msg(struct us_data *us, unsigned int pipe,
@@ -303,7 +344,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
int result;
/* pr_info("transport --- usb_stor_invoke_transport\n"); */
- usb_stor_print_cmd(srb);
+ usb_stor_print_cmd(us, srb);
/* send the command to the transport layer */
scsi_set_resid(srb, 0);
result = us->transport(srb, us); /* usb_stor_Bulk_transport; */
@@ -429,7 +470,7 @@ void ENE_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
int result = 0;
/* pr_info("transport --- ENE_stor_invoke_transport\n"); */
- usb_stor_print_cmd(srb);
+ usb_stor_print_cmd(us, srb);
/* send the command to the transport layer */
scsi_set_resid(srb, 0);
if (!(us->SM_Status.Ready))
@@ -708,8 +749,8 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
} else {
residue = min(residue, transfer_length);
- scsi_set_resid(srb, max(scsi_get_resid(srb),
- (int) residue));
+ scsi_set_resid(srb, max_t(int, scsi_get_resid(srb),
+ residue));
}
}
diff --git a/drivers/staging/keucr/transport.h b/drivers/staging/keucr/transport.h
index 2a11a98..df34474 100644
--- a/drivers/staging/keucr/transport.h
+++ b/drivers/staging/keucr/transport.h
@@ -29,7 +29,6 @@
extern int usb_stor_Bulk_transport(struct scsi_cmnd *, struct us_data*);
extern int usb_stor_Bulk_max_lun(struct us_data *);
extern int usb_stor_Bulk_reset(struct us_data *);
-extern void usb_stor_print_cmd(struct scsi_cmnd *);
extern void usb_stor_invoke_transport(struct scsi_cmnd *, struct us_data*);
extern void usb_stor_stop_transport(struct us_data *);
extern int usb_stor_control_msg(struct us_data *us, unsigned int pipe,
@@ -61,7 +60,7 @@ extern int ENE_InitMedia(struct us_data *);
extern int ENE_SMInit(struct us_data *);
extern int ENE_SendScsiCmd(struct us_data*, BYTE, void*, int);
extern int ENE_LoadBinCode(struct us_data*, BYTE);
-extern int ENE_Read_BYTE(struct us_data*, WORD index, void *buf);
+extern int ene_read_byte(struct us_data*, WORD index, void *buf);
extern int ENE_Read_Data(struct us_data*, void *buf, unsigned int length);
extern int ENE_Write_Data(struct us_data*, void *buf, unsigned int length);
extern void BuildSenseBuffer(struct scsi_cmnd *, int);
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index f656f8a..ddd2e73 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -24,13 +24,13 @@ MODULE_LICENSE("GPL");
static unsigned int delay_use = 1;
-static struct usb_device_id eucr_usb_ids [] = {
+static struct usb_device_id eucr_usb_ids[] = {
{ USB_DEVICE(0x058f, 0x6366) },
{ USB_DEVICE(0x0cf2, 0x6230) },
{ USB_DEVICE(0x0cf2, 0x6250) },
{ } /* Terminating entry */
};
-MODULE_DEVICE_TABLE (usb, eucr_usb_ids);
+MODULE_DEVICE_TABLE(usb, eucr_usb_ids);
#ifdef CONFIG_PM
@@ -65,7 +65,7 @@ static int eucr_resume(struct usb_interface *iface)
us->Power_IsResum = true;
- us->SM_Status = *(PSM_STATUS)&tmp;
+ us->SM_Status = *(struct keucr_sm_status *)&tmp;
return 0;
}
@@ -85,9 +85,9 @@ static int eucr_reset_resume(struct usb_interface *iface)
* the device
*/
- us->Power_IsResum = true;
+ us->Power_IsResum = true;
- us->SM_Status = *(PSM_STATUS)&tmp;
+ us->SM_Status = *(struct keucr_sm_status *)&tmp;
return 0;
}
@@ -124,16 +124,18 @@ static int eucr_post_reset(struct usb_interface *iface)
return 0;
}
-void fill_inquiry_response(struct us_data *us, unsigned char *data, unsigned int data_len)
+void fill_inquiry_response(struct us_data *us, unsigned char *data,
+ unsigned int data_len)
{
pr_info("usb --- fill_inquiry_response\n");
if (data_len < 36) /* You lose. */
return;
if (data[0]&0x20) {
- memset(data+8,0,28);
+ memset(data+8, 0, 28);
} else {
- u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
+ u16 bcdDevice =
+ le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
memcpy(data+8, us->unusual_dev->vendorName,
strlen(us->unusual_dev->vendorName) > 8 ? 8 :
strlen(us->unusual_dev->vendorName));
@@ -148,7 +150,7 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data, unsigned int
usb_stor_set_xfer_buf(us, data, data_len, us->srb, TO_XFER_BUF);
}
-static int usb_stor_control_thread(void * __us)
+static int usb_stor_control_thread(void *__us)
{
struct us_data *us = (struct us_data *)__us;
struct Scsi_Host *host = us_to_host(us);
@@ -194,7 +196,8 @@ static int usb_stor_control_thread(void * __us)
us->srb->result = DID_BAD_TARGET << 16;
} else if ((us->srb->cmnd[0] == INQUIRY)
&& (us->fflags & US_FL_FIX_INQUIRY)) {
- unsigned char data_ptr[36] = {0x00, 0x80, 0x02, 0x02, 0x1F, 0x00, 0x00, 0x00};
+ unsigned char data_ptr[36] = {0x00, 0x80, 0x02, 0x02,
+ 0x1F, 0x00, 0x00, 0x00};
fill_inquiry_response(us, data_ptr, 36);
us->srb->result = SAM_STAT_GOOD;
@@ -253,13 +256,15 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
usb_set_intfdata(intf, us);
/* Allocate the device-related DMA-mapped buffers */
- us->cr = usb_alloc_coherent(us->pusb_dev, sizeof(*us->cr), GFP_KERNEL, &us->cr_dma);
+ us->cr = usb_alloc_coherent(us->pusb_dev, sizeof(*us->cr), GFP_KERNEL,
+ &us->cr_dma);
if (!us->cr) {
pr_info("usb_ctrlrequest allocation failed\n");
return -ENOMEM;
}
- us->iobuf = usb_alloc_coherent(us->pusb_dev, US_IOBUF_SIZE, GFP_KERNEL, &us->iobuf_dma);
+ us->iobuf = usb_alloc_coherent(us->pusb_dev, US_IOBUF_SIZE, GFP_KERNEL,
+ &us->iobuf_dma);
if (!us->iobuf) {
pr_info("I/O buffer allocation failed\n");
return -ENOMEM;
@@ -275,7 +280,8 @@ static int associate_dev(struct us_data *us, struct usb_interface *intf)
static int get_device_info(struct us_data *us, const struct usb_device_id *id)
{
struct usb_device *dev = us->pusb_dev;
- struct usb_interface_descriptor *idesc = &us->pusb_intf->cur_altsetting->desc;
+ struct usb_interface_descriptor *idesc =
+ &us->pusb_intf->cur_altsetting->desc;
pr_info("usb --- get_device_info\n");
@@ -374,10 +380,13 @@ static int get_pipes(struct us_data *us)
/* Calculate and store the pipe values */
us->send_ctrl_pipe = usb_sndctrlpipe(us->pusb_dev, 0);
us->recv_ctrl_pipe = usb_rcvctrlpipe(us->pusb_dev, 0);
- us->send_bulk_pipe = usb_sndbulkpipe(us->pusb_dev, ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
- us->recv_bulk_pipe = usb_rcvbulkpipe(us->pusb_dev, ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ us->send_bulk_pipe = usb_sndbulkpipe(us->pusb_dev,
+ ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ us->recv_bulk_pipe = usb_rcvbulkpipe(us->pusb_dev,
+ ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
if (ep_int) {
- us->recv_intr_pipe = usb_rcvintpipe(us->pusb_dev, ep_int->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+ us->recv_intr_pipe = usb_rcvintpipe(us->pusb_dev,
+ ep_int->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
us->ep_bInterval = ep_int->bInterval;
}
return 0;
@@ -433,10 +442,9 @@ static void dissociate_dev(struct us_data *us)
kfree(us->sensebuf);
/* Free the device-related DMA-mapped buffers */
- if (us->cr)
- usb_free_coherent(us->pusb_dev, sizeof(*us->cr), us->cr, us->cr_dma);
- if (us->iobuf)
- usb_free_coherent(us->pusb_dev, US_IOBUF_SIZE, us->iobuf, us->iobuf_dma);
+ usb_free_coherent(us->pusb_dev, sizeof(*us->cr), us->cr, us->cr_dma);
+ usb_free_coherent(us->pusb_dev, US_IOBUF_SIZE, us->iobuf,
+ us->iobuf_dma);
/* Remove our private data from the interface */
usb_set_intfdata(us->pusb_intf, NULL);
@@ -485,7 +493,7 @@ static void release_everything(struct us_data *us)
scsi_host_put(us_to_host(us));
}
-static int usb_stor_scan_thread(void * __us)
+static int usb_stor_scan_thread(void *__us)
{
struct us_data *us = (struct us_data *)__us;
@@ -515,7 +523,8 @@ static int usb_stor_scan_thread(void * __us)
complete_and_exit(&us->scanning_done, 0);
}
-static int eucr_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int eucr_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct Scsi_Host *host;
struct us_data *us;
@@ -525,7 +534,7 @@ static int eucr_probe(struct usb_interface *intf, const struct usb_device_id *id
pr_info("usb --- eucr_probe\n");
- host = scsi_host_alloc(&usb_stor_host_template, sizeof(*us));
+ host = scsi_host_alloc(&usb_stor_host_template, sizeof(*us));
if (!host) {
pr_info("Unable to allocate the scsi host\n");
return -ENOMEM;
@@ -585,7 +594,7 @@ static int eucr_probe(struct usb_interface *intf, const struct usb_device_id *id
wake_up_process(th);
/* probe card type */
- result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
+ result = ene_read_byte(us, REG_CARD_STATUS, &MiscReg03);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_TRANSPORT_ERROR;
quiesce_and_remove_host(us);
@@ -595,9 +604,9 @@ static int eucr_probe(struct usb_interface *intf, const struct usb_device_id *id
if (!(MiscReg03 & 0x02)) {
result = -ENODEV;
quiesce_and_remove_host(us);
- pr_info("keucr: The driver only supports SM/MS card.\
- To use SD card, \
- please build driver/usb/storage/ums-eneub6250.ko\n");
+ pr_info("keucr: The driver only supports SM/MS card. "
+ "To use SD card, "
+ "please build driver/usb/storage/ums-eneub6250.ko\n");
goto BadDevice;
}
@@ -623,9 +632,9 @@ static void eucr_disconnect(struct usb_interface *intf)
static struct usb_driver usb_storage_driver = {
.name = "eucr",
.probe = eucr_probe,
- .suspend = eucr_suspend,
+ .suspend = eucr_suspend,
.resume = eucr_resume,
- .reset_resume = eucr_reset_resume,
+ .reset_resume = eucr_reset_resume,
.disconnect = eucr_disconnect,
.pre_reset = eucr_pre_reset,
.post_reset = eucr_post_reset,
diff --git a/drivers/staging/keucr/usb.h b/drivers/staging/keucr/usb.h
index a5f7a16..d665af1 100644
--- a/drivers/staging/keucr/usb.h
+++ b/drivers/staging/keucr/usb.h
@@ -1,4 +1,4 @@
-// Driver for USB Mass Storage compliant devices
+/* Driver for USB Mass Storage compliant devices */
#ifndef _USB_H_
#define _USB_H_
@@ -19,26 +19,26 @@ struct scsi_cmnd;
*/
struct us_unusual_dev {
- const char* vendorName;
- const char* productName;
+ const char *vendorName;
+ const char *productName;
__u8 useProtocol;
__u8 useTransport;
int (*initFunction)(struct us_data *);
};
-//EnE HW Register
+/* EnE HW Register */
#define REG_CARD_STATUS 0xFF83
#define REG_HW_TRAP1 0xFF89
-// SRB Status. Refers /usr/include/wine/wine/wnaspi32.h & SCSI sense key
-#define SS_SUCCESS 0x00 // No Sense
+/* SRB Status. Refers /usr/include/wine/wine/wnaspi32.h & SCSI sense key */
+#define SS_SUCCESS 0x00 /* No Sense */
#define SS_NOT_READY 0x02
#define SS_MEDIUM_ERR 0x03
#define SS_HW_ERR 0x04
#define SS_ILLEGAL_REQUEST 0x05
#define SS_UNIT_ATTENTION 0x06
-//ENE Load FW Pattern
+/* ENE Load FW Pattern */
#define SD_INIT1_PATTERN 1
#define SD_INIT2_PATTERN 2
#define SD_RW_PATTERN 3
@@ -51,39 +51,40 @@ struct us_unusual_dev {
#define FDIR_WRITE 0
#define FDIR_READ 1
-typedef struct _SD_STATUS {
- BYTE Insert:1;
- BYTE Ready:1;
- BYTE MediaChange:1;
- BYTE IsMMC:1;
- BYTE HiCapacity:1;
- BYTE HiSpeed:1;
- BYTE WtP:1;
- BYTE Reserved:1;
-} SD_STATUS, *PSD_STATUS;
-
-typedef struct _MS_STATUS {
- BYTE Insert:1;
- BYTE Ready:1;
- BYTE MediaChange:1;
- BYTE IsMSPro:1;
- BYTE IsMSPHG:1;
- BYTE Reserved1:1;
- BYTE WtP:1;
- BYTE Reserved2:1;
-} MS_STATUS, *PMS_STATUS;
-
-typedef struct _SM_STATUS {
- BYTE Insert:1;
- BYTE Ready:1;
- BYTE MediaChange:1;
- BYTE Reserved:3;
- BYTE WtP:1;
- BYTE IsMS:1;
-} SM_STATUS, *PSM_STATUS;
-
-// SD Block Length
-#define SD_BLOCK_LEN 9 // 2^9 = 512 Bytes, The HW maximum read/write data length
+struct keucr_sd_status {
+ BYTE Insert:1;
+ BYTE Ready:1;
+ BYTE MediaChange:1;
+ BYTE IsMMC:1;
+ BYTE HiCapacity:1;
+ BYTE HiSpeed:1;
+ BYTE WtP:1;
+ BYTE Reserved:1;
+};
+
+struct keucr_ms_status {
+ BYTE Insert:1;
+ BYTE Ready:1;
+ BYTE MediaChange:1;
+ BYTE IsMSPro:1;
+ BYTE IsMSPHG:1;
+ BYTE Reserved1:1;
+ BYTE WtP:1;
+ BYTE Reserved2:1;
+};
+
+struct keucr_sm_status {
+ BYTE Insert:1;
+ BYTE Ready:1;
+ BYTE MediaChange:1;
+ BYTE Reserved:3;
+ BYTE WtP:1;
+ BYTE IsMS:1;
+};
+
+/* SD Block Length */
+#define SD_BLOCK_LEN 9 /* 2^9 = 512 Bytes,
+ The HW maximum read/write data length */
/* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
#define US_FLIDX_URB_ACTIVE 0 /* current_urb is in use */
@@ -107,9 +108,9 @@ typedef struct _SM_STATUS {
#define US_IOBUF_SIZE 64 /* Size of the DMA-mapped I/O buffer */
#define US_SENSE_SIZE 18 /* Size of the autosense data buffer */
-typedef int (*trans_cmnd)(struct scsi_cmnd *, struct us_data*);
-typedef int (*trans_reset)(struct us_data*);
-typedef void (*proto_cmnd)(struct scsi_cmnd*, struct us_data*);
+typedef int (*trans_cmnd)(struct scsi_cmnd *, struct us_data *);
+typedef int (*trans_reset)(struct us_data *);
+typedef void (*proto_cmnd)(struct scsi_cmnd *, struct us_data *);
typedef void (*extra_data_destructor)(void *); /* extra data destructor */
typedef void (*pm_hook)(struct us_data *, int); /* power management hook */
@@ -176,19 +177,19 @@ struct us_data {
#ifdef CONFIG_PM
pm_hook suspend_resume_hook;
#endif
- // for 6250 code
- SD_STATUS SD_Status;
- MS_STATUS MS_Status;
- SM_STATUS SM_Status;
+ /* for 6250 code */
+ struct keucr_sd_status SD_Status;
+ struct keucr_ms_status MS_Status;
+ struct keucr_sm_status SM_Status;
- //----- SD Control Data ----------------
- //SD_REGISTER SD_Regs;
+ /* ----- SD Control Data ---------------- */
+ /* SD_REGISTER SD_Regs; */
WORD SD_Block_Mult;
BYTE SD_READ_BL_LEN;
WORD SD_C_SIZE;
BYTE SD_C_SIZE_MULT;
- // SD/MMC New spec.
+ /* SD/MMC New spec. */
BYTE SD_SPEC_VER;
BYTE SD_CSD_VER;
BYTE SD20_HIGH_CAPACITY;
@@ -196,15 +197,15 @@ struct us_data {
BYTE MMC_SPEC_VER;
BYTE MMC_BusWidth;
BYTE MMC_HIGH_CAPACITY;
-
- //----- MS Control Data ----------------
+
+ /* ----- MS Control Data ---------------- */
BOOLEAN MS_SWWP;
DWORD MSP_TotalBlock;
/* MS_LibControl MS_Lib; */
BOOLEAN MS_IsRWPage;
WORD MS_Model;
- //----- SM Control Data ----------------
+ /* ----- SM Control Data ---------------- */
BYTE SM_DeviceID;
BYTE SM_CardID;
@@ -212,16 +213,18 @@ struct us_data {
BYTE BIN_FLAG;
DWORD bl_num;
int SrbStatus;
-
- //------Power Managerment ---------------
- BOOLEAN Power_IsResum;
+
+ /* ------Power Managerment --------------- */
+ BOOLEAN Power_IsResum;
};
/* Convert between us_data and the corresponding Scsi_Host */
-static inline struct Scsi_Host *us_to_host(struct us_data *us) {
+static inline struct Scsi_Host *us_to_host(struct us_data *us)
+{
return container_of((void *) us, struct Scsi_Host, hostdata);
}
-static inline struct us_data *host_to_us(struct Scsi_Host *host) {
+static inline struct us_data *host_to_us(struct Scsi_Host *host)
+{
return (struct us_data *) host->hostdata;
}
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 6252aca..471c10c 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -568,15 +568,6 @@ ssize_t line6_nop_read(struct device *dev, struct device_attribute *attr,
}
/*
- No operation (i.e., unsupported).
-*/
-ssize_t line6_nop_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return count;
-}
-
-/*
Generic destructor.
*/
static void line6_destruct(struct usb_interface *interface)
diff --git a/drivers/staging/line6/driver.h b/drivers/staging/line6/driver.h
index a8341f9..34ae95e 100644
--- a/drivers/staging/line6/driver.h
+++ b/drivers/staging/line6/driver.h
@@ -190,9 +190,6 @@ extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1,
int code2, int size);
extern ssize_t line6_nop_read(struct device *dev,
struct device_attribute *attr, char *buf);
-extern ssize_t line6_nop_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
extern int line6_read_data(struct usb_line6 *line6, int address, void *data,
size_t datalen);
extern int line6_read_serial_number(struct usb_line6 *line6,
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 02f77d7..6a0648c 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -34,8 +34,8 @@ static struct snd_line6_pcm *dev2pcm(struct device *dev)
/*
"read" request on "impulse_volume" special file.
*/
-static ssize_t pcm_get_impulse_volume(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t impulse_volume_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev2pcm(dev)->impulse_volume);
}
@@ -43,9 +43,9 @@ static ssize_t pcm_get_impulse_volume(struct device *dev,
/*
"write" request on "impulse_volume" special file.
*/
-static ssize_t pcm_set_impulse_volume(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t impulse_volume_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct snd_line6_pcm *line6pcm = dev2pcm(dev);
int value;
@@ -64,12 +64,13 @@ static ssize_t pcm_set_impulse_volume(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(impulse_volume);
/*
"read" request on "impulse_period" special file.
*/
-static ssize_t pcm_get_impulse_period(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t impulse_period_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev2pcm(dev)->impulse_period);
}
@@ -77,9 +78,9 @@ static ssize_t pcm_get_impulse_period(struct device *dev,
/*
"write" request on "impulse_period" special file.
*/
-static ssize_t pcm_set_impulse_period(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t impulse_period_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int value;
int ret;
@@ -91,11 +92,7 @@ static ssize_t pcm_set_impulse_period(struct device *dev,
dev2pcm(dev)->impulse_period = value;
return count;
}
-
-static DEVICE_ATTR(impulse_volume, S_IWUSR | S_IRUGO, pcm_get_impulse_volume,
- pcm_set_impulse_volume);
-static DEVICE_ATTR(impulse_period, S_IWUSR | S_IRUGO, pcm_get_impulse_period,
- pcm_set_impulse_period);
+static DEVICE_ATTR_RW(impulse_period);
#endif
@@ -107,11 +104,15 @@ static bool test_flags(unsigned long flags0, unsigned long flags1,
int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int channels)
{
- unsigned long flags_old =
- __sync_fetch_and_or(&line6pcm->flags, channels);
- unsigned long flags_new = flags_old | channels;
- unsigned long flags_final = flags_old;
- int err = 0;
+ unsigned long flags_old, flags_new, flags_final;
+ int err;
+
+ do {
+ flags_old = ACCESS_ONCE(line6pcm->flags);
+ flags_new = flags_old | channels;
+ } while (cmpxchg(&line6pcm->flags, flags_old, flags_new) != flags_old);
+
+ flags_final = flags_old;
line6pcm->prev_fbuf = NULL;
@@ -197,9 +198,12 @@ pcm_acquire_error:
int line6_pcm_release(struct snd_line6_pcm *line6pcm, int channels)
{
- unsigned long flags_old =
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
- unsigned long flags_new = flags_old & ~channels;
+ unsigned long flags_old, flags_new;
+
+ do {
+ flags_old = ACCESS_ONCE(line6pcm->flags);
+ flags_new = flags_old & ~channels;
+ } while (cmpxchg(&line6pcm->flags, flags_old, flags_new) != flags_old);
if (test_flags(flags_new, flags_old, LINE6_BITS_CAPTURE_STREAM))
line6_unlink_audio_in_urbs(line6pcm);
@@ -385,8 +389,11 @@ static int snd_line6_pcm_free(struct snd_device *device)
*/
static void pcm_disconnect_substream(struct snd_pcm_substream *substream)
{
- if (substream->runtime && snd_pcm_running(substream))
+ if (substream->runtime && snd_pcm_running(substream)) {
+ snd_pcm_stream_lock_irq(substream);
snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+ snd_pcm_stream_unlock_irq(substream);
+ }
}
/*
diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c
index 699b217..f4e95a6 100644
--- a/drivers/staging/line6/pod.c
+++ b/drivers/staging/line6/pod.c
@@ -192,8 +192,8 @@ static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
/*
"read" request on "serial_number" special file.
*/
-static ssize_t pod_get_serial_number(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
@@ -203,9 +203,8 @@ static ssize_t pod_get_serial_number(struct device *dev,
/*
"read" request on "firmware_version" special file.
*/
-static ssize_t pod_get_firmware_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
@@ -216,8 +215,8 @@ static ssize_t pod_get_firmware_version(struct device *dev,
/*
"read" request on "device_id" special file.
*/
-static ssize_t pod_get_device_id(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t device_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
@@ -274,11 +273,9 @@ static void pod_startup4(struct work_struct *work)
}
/* POD special files: */
-static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write);
-static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version,
- line6_nop_write);
-static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number,
- line6_nop_write);
+static DEVICE_ATTR_RO(device_id);
+static DEVICE_ATTR_RO(firmware_version);
+static DEVICE_ATTR_RO(serial_number);
/* control info callback */
static int snd_pod_control_monitor_info(struct snd_kcontrol *kcontrol,
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index 2f44d56..776d363 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -244,13 +244,17 @@ static int snd_toneport_source_put(struct snd_kcontrol *kcontrol,
struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
struct usb_line6_toneport *toneport =
(struct usb_line6_toneport *)line6pcm->line6;
+ unsigned int source;
- if (ucontrol->value.enumerated.item[0] == toneport->source)
+ source = ucontrol->value.enumerated.item[0];
+ if (source >= ARRAY_SIZE(toneport_source_info))
+ return -EINVAL;
+ if (source == toneport->source)
return 0;
- toneport->source = ucontrol->value.enumerated.item[0];
+ toneport->source = source;
toneport_send_cmd(toneport->line6.usbdev,
- toneport_source_info[toneport->source].code, 0x0000);
+ toneport_source_info[source].code, 0x0000);
return 1;
}
diff --git a/drivers/staging/lustre/Kconfig b/drivers/staging/lustre/Kconfig
new file mode 100644
index 0000000..a224d88
--- /dev/null
+++ b/drivers/staging/lustre/Kconfig
@@ -0,0 +1,3 @@
+source "drivers/staging/lustre/lustre/Kconfig"
+
+source "drivers/staging/lustre/lnet/Kconfig"
diff --git a/drivers/staging/lustre/Makefile b/drivers/staging/lustre/Makefile
new file mode 100644
index 0000000..fb0e0fa
--- /dev/null
+++ b/drivers/staging/lustre/Makefile
@@ -0,0 +1,4 @@
+subdir-ccflags-y := -I$(src)/include/
+
+obj-$(CONFIG_LNET) += lnet/
+obj-$(CONFIG_LUSTRE_FS) += lustre/
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
new file mode 100644
index 0000000..22742d6
--- /dev/null
+++ b/drivers/staging/lustre/TODO
@@ -0,0 +1,13 @@
+* Possible remaining coding style fix.
+* Remove deadcode.
+* Seperate client/server functionality. Functions only used by server can be
+ removed from client.
+* Clean up libcfs layer. Ideally we can remove include/linux/libcfs entirely.
+* Clean up CLIO layer. Lustre client readahead/writeback control needs to better
+ suit kernel providings.
+* Add documents in Documentation.
+* Other minor misc cleanups...
+
+Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
+<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing
+hpdd-discuss <hpdd-discuss@lists.01.org> would be great too.
diff --git a/drivers/staging/lustre/include/linux/libcfs/bitmap.h b/drivers/staging/lustre/include/linux/libcfs/bitmap.h
new file mode 100644
index 0000000..f3d4a89
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/bitmap.h
@@ -0,0 +1,111 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#ifndef _LIBCFS_BITMAP_H_
+#define _LIBCFS_BITMAP_H_
+
+
+typedef struct {
+ int size;
+ unsigned long data[0];
+} cfs_bitmap_t;
+
+#define CFS_BITMAP_SIZE(nbits) \
+ (((nbits/BITS_PER_LONG)+1)*sizeof(long)+sizeof(cfs_bitmap_t))
+
+static inline
+cfs_bitmap_t *CFS_ALLOCATE_BITMAP(int size)
+{
+ cfs_bitmap_t *ptr;
+
+ OBD_ALLOC(ptr, CFS_BITMAP_SIZE(size));
+ if (ptr == NULL)
+ return ptr;
+
+ ptr->size = size;
+
+ return ptr;
+}
+
+#define CFS_FREE_BITMAP(ptr) OBD_FREE(ptr, CFS_BITMAP_SIZE(ptr->size))
+
+static inline
+void cfs_bitmap_set(cfs_bitmap_t *bitmap, int nbit)
+{
+ set_bit(nbit, bitmap->data);
+}
+
+static inline
+void cfs_bitmap_clear(cfs_bitmap_t *bitmap, int nbit)
+{
+ test_and_clear_bit(nbit, bitmap->data);
+}
+
+static inline
+int cfs_bitmap_check(cfs_bitmap_t *bitmap, int nbit)
+{
+ return test_bit(nbit, bitmap->data);
+}
+
+static inline
+int cfs_bitmap_test_and_clear(cfs_bitmap_t *bitmap, int nbit)
+{
+ return test_and_clear_bit(nbit, bitmap->data);
+}
+
+/* return 0 is bitmap has none set bits */
+static inline
+int cfs_bitmap_check_empty(cfs_bitmap_t *bitmap)
+{
+ return find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
+}
+
+static inline
+void cfs_bitmap_copy(cfs_bitmap_t *new, cfs_bitmap_t *old)
+{
+ int newsize;
+
+ LASSERT(new->size >= old->size);
+ newsize = new->size;
+ memcpy(new, old, CFS_BITMAP_SIZE(old->size));
+ new->size = newsize;
+}
+
+#define cfs_foreach_bit(bitmap, pos) \
+ for ((pos) = find_first_bit((bitmap)->data, bitmap->size); \
+ (pos) < (bitmap)->size; \
+ (pos) = find_next_bit((bitmap)->data, (bitmap)->size, (pos) + 1))
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
new file mode 100644
index 0000000..de8e35b
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -0,0 +1,108 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/curproc.h
+ *
+ * Lustre curproc API declaration
+ *
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ */
+
+#ifndef __LIBCFS_CURPROC_H__
+#define __LIBCFS_CURPROC_H__
+
+/*
+ * Portable API to access common characteristics of "current" UNIX process.
+ *
+ * Implemented in portals/include/libcfs/<os>/
+ */
+int cfs_curproc_groups_nr(void);
+
+/*
+ * Plus, platform-specific constant
+ *
+ * CFS_CURPROC_COMM_MAX,
+ *
+ * and opaque scalar type
+ *
+ * kernel_cap_t
+ */
+
+/* check if task is running in compat mode.*/
+int current_is_32bit(void);
+#define current_pid() (current->pid)
+#define current_comm() (current->comm)
+int cfs_get_environ(const char *key, char *value, int *val_len);
+
+typedef __u32 cfs_cap_t;
+
+#define CFS_CAP_CHOWN 0
+#define CFS_CAP_DAC_OVERRIDE 1
+#define CFS_CAP_DAC_READ_SEARCH 2
+#define CFS_CAP_FOWNER 3
+#define CFS_CAP_FSETID 4
+#define CFS_CAP_LINUX_IMMUTABLE 9
+#define CFS_CAP_SYS_ADMIN 21
+#define CFS_CAP_SYS_BOOT 23
+#define CFS_CAP_SYS_RESOURCE 24
+
+#define CFS_CAP_FS_MASK ((1 << CFS_CAP_CHOWN) | \
+ (1 << CFS_CAP_DAC_OVERRIDE) | \
+ (1 << CFS_CAP_DAC_READ_SEARCH) | \
+ (1 << CFS_CAP_FOWNER) | \
+ (1 << CFS_CAP_FSETID ) | \
+ (1 << CFS_CAP_LINUX_IMMUTABLE) | \
+ (1 << CFS_CAP_SYS_ADMIN) | \
+ (1 << CFS_CAP_SYS_BOOT) | \
+ (1 << CFS_CAP_SYS_RESOURCE))
+
+void cfs_cap_raise(cfs_cap_t cap);
+void cfs_cap_lower(cfs_cap_t cap);
+int cfs_cap_raised(cfs_cap_t cap);
+cfs_cap_t cfs_curproc_cap_pack(void);
+void cfs_curproc_cap_unpack(cfs_cap_t cap);
+int cfs_capable(cfs_cap_t cap);
+
+/* __LIBCFS_CURPROC_H__ */
+#endif
+/*
+ * Local variables:
+ * c-indentation-style: "K&R"
+ * c-basic-offset: 8
+ * tab-width: 8
+ * fill-column: 80
+ * scroll-step: 1
+ * End:
+ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
new file mode 100644
index 0000000..687dbab
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -0,0 +1,188 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LIBCFS_LIBCFS_H__
+#define __LIBCFS_LIBCFS_H__
+
+#if !__GNUC__
+#define __attribute__(x)
+#endif
+
+#include <linux/libcfs/linux/libcfs.h>
+
+#include "curproc.h"
+
+#ifndef offsetof
+# define offsetof(typ,memb) ((long)(long_ptr_t)((char *)&(((typ *)0)->memb)))
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) ((sizeof (a)) / (sizeof ((a)[0])))
+#endif
+
+#if !defined(swap)
+#define swap(x,y) do { typeof(x) z = x; x = y; y = z; } while (0)
+#endif
+
+#if !defined(container_of)
+/* given a pointer @ptr to the field @member embedded into type (usually
+ * struct) @type, return pointer to the embedding instance of @type. */
+#define container_of(ptr, type, member) \
+ ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
+#endif
+
+static inline int __is_po2(unsigned long long val)
+{
+ return !(val & (val - 1));
+}
+
+#define IS_PO2(val) __is_po2((unsigned long long)(val))
+
+#define LOWEST_BIT_SET(x) ((x) & ~((x) - 1))
+
+/*
+ * Lustre Error Checksum: calculates checksum
+ * of Hex number by XORing each bit.
+ */
+#define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \
+ ((hexnum) >> 8 & 0xf))
+
+#define LUSTRE_SRV_LNET_PID LUSTRE_LNET_PID
+
+#include <linux/list.h>
+
+/* libcfs tcpip */
+int libcfs_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask);
+int libcfs_ipif_enumerate(char ***names);
+void libcfs_ipif_free_enumeration(char **names, int n);
+int libcfs_sock_listen(socket_t **sockp, __u32 ip, int port, int backlog);
+int libcfs_sock_accept(socket_t **newsockp, socket_t *sock);
+void libcfs_sock_abort_accept(socket_t *sock);
+int libcfs_sock_connect(socket_t **sockp, int *fatal,
+ __u32 local_ip, int local_port,
+ __u32 peer_ip, int peer_port);
+int libcfs_sock_setbuf(socket_t *socket, int txbufsize, int rxbufsize);
+int libcfs_sock_getbuf(socket_t *socket, int *txbufsize, int *rxbufsize);
+int libcfs_sock_getaddr(socket_t *socket, int remote, __u32 *ip, int *port);
+int libcfs_sock_write(socket_t *sock, void *buffer, int nob, int timeout);
+int libcfs_sock_read(socket_t *sock, void *buffer, int nob, int timeout);
+void libcfs_sock_release(socket_t *sock);
+
+/* need both kernel and user-land acceptor */
+#define LNET_ACCEPTOR_MIN_RESERVED_PORT 512
+#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
+
+/*
+ * libcfs pseudo device operations
+ *
+ * It's just draft now.
+ */
+
+struct cfs_psdev_file {
+ unsigned long off;
+ void *private_data;
+ unsigned long reserved1;
+ unsigned long reserved2;
+};
+
+struct cfs_psdev_ops {
+ int (*p_open)(unsigned long, void *);
+ int (*p_close)(unsigned long, void *);
+ int (*p_read)(struct cfs_psdev_file *, char *, unsigned long);
+ int (*p_write)(struct cfs_psdev_file *, char *, unsigned long);
+ int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void *);
+};
+
+/*
+ * Drop into debugger, if possible. Implementation is provided by platform.
+ */
+
+void cfs_enter_debugger(void);
+
+/*
+ * Defined by platform
+ */
+int unshare_fs_struct(void);
+sigset_t cfs_get_blocked_sigs(void);
+sigset_t cfs_block_allsigs(void);
+sigset_t cfs_block_sigs(unsigned long sigs);
+sigset_t cfs_block_sigsinv(unsigned long sigs);
+void cfs_restore_sigs(sigset_t);
+int cfs_signal_pending(void);
+void cfs_clear_sigpending(void);
+
+/*
+ * Random number handling
+ */
+
+/* returns a random 32-bit integer */
+unsigned int cfs_rand(void);
+/* seed the generator */
+void cfs_srand(unsigned int, unsigned int);
+void cfs_get_random_bytes(void *buf, int size);
+
+#include <linux/libcfs/libcfs_debug.h>
+#include <linux/libcfs/libcfs_cpu.h>
+#include <linux/libcfs/libcfs_private.h>
+#include <linux/libcfs/libcfs_ioctl.h>
+#include <linux/libcfs/libcfs_prim.h>
+#include <linux/libcfs/libcfs_time.h>
+#include <linux/libcfs/libcfs_string.h>
+#include <linux/libcfs/libcfs_kernelcomm.h>
+#include <linux/libcfs/libcfs_workitem.h>
+#include <linux/libcfs/libcfs_hash.h>
+#include <linux/libcfs/libcfs_heap.h>
+#include <linux/libcfs/libcfs_fail.h>
+#include <linux/libcfs/params_tree.h>
+#include <linux/libcfs/libcfs_crypto.h>
+
+/* container_of depends on "likely" which is defined in libcfs_private.h */
+static inline void *__container_of(void *ptr, unsigned long shift)
+{
+ if (unlikely(IS_ERR(ptr) || ptr == NULL))
+ return ptr;
+ else
+ return (char *)ptr - shift;
+}
+
+#define container_of0(ptr, type, member) \
+ ((type *)__container_of((void *)(ptr), offsetof(type, member)))
+
+#define SET_BUT_UNUSED(a) do { } while(sizeof(a) - sizeof(a))
+
+#define _LIBCFS_H
+
+#endif /* _LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
new file mode 100644
index 0000000..c87efb4
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -0,0 +1,214 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/libcfs_cpu.h
+ *
+ * CPU partition
+ * . CPU partition is virtual processing unit
+ *
+ * . CPU partition can present 1-N cores, or 1-N NUMA nodes,
+ * in other words, CPU partition is a processors pool.
+ *
+ * CPU Partition Table (CPT)
+ * . a set of CPU partitions
+ *
+ * . There are two modes for CPT: CFS_CPU_MODE_NUMA and CFS_CPU_MODE_SMP
+ *
+ * . User can specify total number of CPU partitions while creating a
+ * CPT, ID of CPU partition is always start from 0.
+ *
+ * Example: if there are 8 cores on the system, while creating a CPT
+ * with cpu_npartitions=4:
+ * core[0, 1] = partition[0], core[2, 3] = partition[1]
+ * core[4, 5] = partition[2], core[6, 7] = partition[3]
+ *
+ * cpu_npartitions=1:
+ * core[0, 1, ... 7] = partition[0]
+ *
+ * . User can also specify CPU partitions by string pattern
+ *
+ * Examples: cpu_partitions="0[0,1], 1[2,3]"
+ * cpu_partitions="N 0[0-3], 1[4-8]"
+ *
+ * The first character "N" means following numbers are numa ID
+ *
+ * . NUMA allocators, CPU affinity threads are built over CPU partitions,
+ * instead of HW CPUs or HW nodes.
+ *
+ * . By default, Lustre modules should refer to the global cfs_cpt_table,
+ * instead of accessing HW CPUs directly, so concurrency of Lustre can be
+ * configured by cpu_npartitions of the global cfs_cpt_table
+ *
+ * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the
+ * same way as 2.2 or earlier versions
+ *
+ * Author: liang@whamcloud.com
+ */
+
+#ifndef __LIBCFS_CPU_H__
+#define __LIBCFS_CPU_H__
+
+/* any CPU partition */
+#define CFS_CPT_ANY (-1)
+
+#ifdef CONFIG_SMP
+/**
+ * return cpumask of CPU partition \a cpt
+ */
+cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
+/**
+ * print string information of cpt-table
+ */
+int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
+#else /* !CONFIG_SMP */
+struct cfs_cpt_table {
+ /* # of CPU partitions */
+ int ctb_nparts;
+ /* cpu mask */
+ cpumask_t ctb_mask;
+ /* node mask */
+ nodemask_t ctb_nodemask;
+ /* version */
+ __u64 ctb_version;
+};
+
+static inline cpumask_t *
+cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
+{
+ return NULL;
+}
+
+static inline int
+cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
+{
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
+extern struct cfs_cpt_table *cfs_cpt_table;
+
+/**
+ * destroy a CPU partition table
+ */
+void cfs_cpt_table_free(struct cfs_cpt_table *cptab);
+/**
+ * create a cfs_cpt_table with \a ncpt number of partitions
+ */
+struct cfs_cpt_table *cfs_cpt_table_alloc(unsigned int ncpt);
+/**
+ * return total number of CPU partitions in \a cptab
+ */
+int
+cfs_cpt_number(struct cfs_cpt_table *cptab);
+/**
+ * return number of HW cores or hypter-threadings in a CPU partition \a cpt
+ */
+int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
+/**
+ * is there any online CPU in CPU partition \a cpt
+ */
+int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt);
+/**
+ * return nodemask of CPU partition \a cpt
+ */
+nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt);
+/**
+ * shadow current HW processor ID to CPU-partition ID of \a cptab
+ */
+int cfs_cpt_current(struct cfs_cpt_table *cptab, int remap);
+/**
+ * shadow HW processor ID \a CPU to CPU-partition ID by \a cptab
+ */
+int cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu);
+/**
+ * bind current thread on a CPU-partition \a cpt of \a cptab
+ */
+int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt);
+/**
+ * add \a cpu to CPU partion @cpt of \a cptab, return 1 for success,
+ * otherwise 0 is returned
+ */
+int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
+/**
+ * remove \a cpu from CPU partition \a cpt of \a cptab
+ */
+void cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
+/**
+ * add all cpus in \a mask to CPU partition \a cpt
+ * return 1 if successfully set all CPUs, otherwise return 0
+ */
+int cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab,
+ int cpt, cpumask_t *mask);
+/**
+ * remove all cpus in \a mask from CPU partition \a cpt
+ */
+void cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab,
+ int cpt, cpumask_t *mask);
+/**
+ * add all cpus in NUMA node \a node to CPU partition \a cpt
+ * return 1 if successfully set all CPUs, otherwise return 0
+ */
+int cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node);
+/**
+ * remove all cpus in NUMA node \a node from CPU partition \a cpt
+ */
+void cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node);
+
+/**
+ * add all cpus in node mask \a mask to CPU partition \a cpt
+ * return 1 if successfully set all CPUs, otherwise return 0
+ */
+int cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab,
+ int cpt, nodemask_t *mask);
+/**
+ * remove all cpus in node mask \a mask from CPU partition \a cpt
+ */
+void cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab,
+ int cpt, nodemask_t *mask);
+/**
+ * unset all cpus for CPU partition \a cpt
+ */
+void cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt);
+/**
+ * convert partition id \a cpt to numa node id, if there are more than one
+ * nodes in this partition, it might return a different node id each time.
+ */
+int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
+
+/**
+ * iterate over all CPU partitions in \a cptab
+ */
+#define cfs_cpt_for_each(i, cptab) \
+ for (i = 0; i < cfs_cpt_number(cptab); i++)
+
+int cfs_cpu_init(void);
+void cfs_cpu_fini(void);
+
+#endif /* __LIBCFS_CPU_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
new file mode 100644
index 0000000..776e9c0
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -0,0 +1,201 @@
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ */
+
+#ifndef _LIBCFS_CRYPTO_H
+#define _LIBCFS_CRYPTO_H
+
+struct cfs_crypto_hash_type {
+ char *cht_name; /**< hash algorithm name, equal to
+ * format name for crypto api */
+ unsigned int cht_key; /**< init key by default (vaild for
+ * 4 bytes context like crc32, adler */
+ unsigned int cht_size; /**< hash digest size */
+};
+
+enum cfs_crypto_hash_alg {
+ CFS_HASH_ALG_NULL = 0,
+ CFS_HASH_ALG_ADLER32,
+ CFS_HASH_ALG_CRC32,
+ CFS_HASH_ALG_MD5,
+ CFS_HASH_ALG_SHA1,
+ CFS_HASH_ALG_SHA256,
+ CFS_HASH_ALG_SHA384,
+ CFS_HASH_ALG_SHA512,
+ CFS_HASH_ALG_CRC32C,
+ CFS_HASH_ALG_MAX
+};
+
+static struct cfs_crypto_hash_type hash_types[] = {
+ [CFS_HASH_ALG_NULL] = { "null", 0, 0 },
+ [CFS_HASH_ALG_ADLER32] = { "adler32", 1, 4 },
+ [CFS_HASH_ALG_CRC32] = { "crc32", ~0, 4 },
+ [CFS_HASH_ALG_CRC32C] = { "crc32c", ~0, 4 },
+ [CFS_HASH_ALG_MD5] = { "md5", 0, 16 },
+ [CFS_HASH_ALG_SHA1] = { "sha1", 0, 20 },
+ [CFS_HASH_ALG_SHA256] = { "sha256", 0, 32 },
+ [CFS_HASH_ALG_SHA384] = { "sha384", 0, 48 },
+ [CFS_HASH_ALG_SHA512] = { "sha512", 0, 64 },
+};
+
+/** Return pointer to type of hash for valid hash algorithm identifier */
+static inline const struct cfs_crypto_hash_type *
+ cfs_crypto_hash_type(unsigned char hash_alg)
+{
+ struct cfs_crypto_hash_type *ht;
+
+ if (hash_alg < CFS_HASH_ALG_MAX) {
+ ht = &hash_types[hash_alg];
+ if (ht->cht_name)
+ return ht;
+ }
+ return NULL;
+}
+
+/** Return hash name for valid hash algorithm identifier or "unknown" */
+static inline const char *cfs_crypto_hash_name(unsigned char hash_alg)
+{
+ const struct cfs_crypto_hash_type *ht;
+
+ ht = cfs_crypto_hash_type(hash_alg);
+ if (ht)
+ return ht->cht_name;
+ else
+ return "unknown";
+}
+
+/** Return digest size for valid algorithm identifier or 0 */
+static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg)
+{
+ const struct cfs_crypto_hash_type *ht;
+
+ ht = cfs_crypto_hash_type(hash_alg);
+ if (ht)
+ return ht->cht_size;
+ else
+ return 0;
+}
+
+/** Return hash identifier for valid hash algorithm name or 0xFF */
+static inline unsigned char cfs_crypto_hash_alg(const char *algname)
+{
+ unsigned char i;
+
+ for (i = 0; i < CFS_HASH_ALG_MAX; i++)
+ if (!strcmp(hash_types[i].cht_name, algname))
+ break;
+ return (i == CFS_HASH_ALG_MAX ? 0xFF : i);
+}
+
+/** Calculate hash digest for buffer.
+ * @param alg id of hash algorithm
+ * @param buf buffer of data
+ * @param buf_len buffer len
+ * @param key initial value for algorithm, if it is NULL,
+ * default initial value should be used.
+ * @param key_len len of initial value
+ * @param hash [out] pointer to hash, if it is NULL, hash_len is
+ * set to valid digest size in bytes, retval -ENOSPC.
+ * @param hash_len [in,out] size of hash buffer
+ * @returns status of operation
+ * @retval -EINVAL if buf, buf_len, hash_len or alg_id is invalid
+ * @retval -ENODEV if this algorithm is unsupported
+ * @retval -ENOSPC if pointer to hash is NULL, or hash_len less than
+ * digest size
+ * @retval 0 for success
+ * @retval < 0 other errors from lower layers.
+ */
+int cfs_crypto_hash_digest(unsigned char alg,
+ const void *buf, unsigned int buf_len,
+ unsigned char *key, unsigned int key_len,
+ unsigned char *hash, unsigned int *hash_len);
+
+/* cfs crypto hash descriptor */
+struct cfs_crypto_hash_desc;
+
+/** Allocate and initialize descriptor for hash algorithm.
+ * @param alg algorithm id
+ * @param key initial value for algorithm, if it is NULL,
+ * default initial value should be used.
+ * @param key_len len of initial value
+ * @returns pointer to descriptor of hash instance
+ * @retval ERR_PTR(error) when errors occurred.
+ */
+struct cfs_crypto_hash_desc*
+ cfs_crypto_hash_init(unsigned char alg,
+ unsigned char *key, unsigned int key_len);
+
+/** Update digest by part of data.
+ * @param desc hash descriptor
+ * @param page data page
+ * @param offset data offset
+ * @param len data len
+ * @returns status of operation
+ * @retval 0 for success.
+ */
+int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
+ struct page *page, unsigned int offset,
+ unsigned int len);
+
+/** Update digest by part of data.
+ * @param desc hash descriptor
+ * @param buf pointer to data buffer
+ * @param buf_len size of data at buffer
+ * @returns status of operation
+ * @retval 0 for success.
+ */
+int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf,
+ unsigned int buf_len);
+
+/** Finalize hash calculation, copy hash digest to buffer, destroy hash
+ * descriptor.
+ * @param desc hash descriptor
+ * @param hash buffer pointer to store hash digest
+ * @param hash_len pointer to hash buffer size, if NULL
+ * destroy hash descriptor
+ * @returns status of operation
+ * @retval -ENOSPC if hash is NULL, or *hash_len less than
+ * digest size
+ * @retval 0 for success
+ * @retval < 0 other errors from lower layers.
+ */
+int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc,
+ unsigned char *hash, unsigned int *hash_len);
+/**
+ * Register crypto hash algorithms
+ */
+int cfs_crypto_register(void);
+
+/**
+ * Unregister
+ */
+void cfs_crypto_unregister(void);
+
+/** Return hash speed in Mbytes per second for valid hash algorithm
+ * identifier. If test was unsuccessful -1 would be returned.
+ */
+int cfs_crypto_hash_speed(unsigned char hash_alg);
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
new file mode 100644
index 0000000..e6439d1
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -0,0 +1,282 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/libcfs_debug.h
+ *
+ * Debug messages and assertions
+ *
+ */
+
+#ifndef __LIBCFS_DEBUG_H__
+#define __LIBCFS_DEBUG_H__
+
+/*
+ * Debugging
+ */
+extern unsigned int libcfs_subsystem_debug;
+extern unsigned int libcfs_stack;
+extern unsigned int libcfs_debug;
+extern unsigned int libcfs_printk;
+extern unsigned int libcfs_console_ratelimit;
+extern unsigned int libcfs_watchdog_ratelimit;
+extern unsigned int libcfs_console_max_delay;
+extern unsigned int libcfs_console_min_delay;
+extern unsigned int libcfs_console_backoff;
+extern unsigned int libcfs_debug_binary;
+extern char libcfs_debug_file_path_arr[PATH_MAX];
+
+int libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys);
+int libcfs_debug_str2mask(int *mask, const char *str, int is_subsys);
+
+/* Has there been an LBUG? */
+extern unsigned int libcfs_catastrophe;
+extern unsigned int libcfs_panic_on_lbug;
+
+/**
+ * Format for debug message headers
+ */
+struct ptldebug_header {
+ __u32 ph_len;
+ __u32 ph_flags;
+ __u32 ph_subsys;
+ __u32 ph_mask;
+ __u16 ph_cpu_id;
+ __u16 ph_type;
+ __u32 ph_sec;
+ __u64 ph_usec;
+ __u32 ph_stack;
+ __u32 ph_pid;
+ __u32 ph_extern_pid;
+ __u32 ph_line_num;
+} __attribute__((packed));
+
+
+#define PH_FLAG_FIRST_RECORD 1
+
+/* Debugging subsystems (32 bits, non-overlapping) */
+/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
+#define S_UNDEFINED 0x00000001
+#define S_MDC 0x00000002
+#define S_MDS 0x00000004
+#define S_OSC 0x00000008
+#define S_OST 0x00000010
+#define S_CLASS 0x00000020
+#define S_LOG 0x00000040
+#define S_LLITE 0x00000080
+#define S_RPC 0x00000100
+#define S_MGMT 0x00000200
+#define S_LNET 0x00000400
+#define S_LND 0x00000800 /* ALL LNDs */
+#define S_PINGER 0x00001000
+#define S_FILTER 0x00002000
+/* unused */
+#define S_ECHO 0x00008000
+#define S_LDLM 0x00010000
+#define S_LOV 0x00020000
+#define S_LQUOTA 0x00040000
+#define S_OSD 0x00080000
+/* unused */
+/* unused */
+/* unused */
+#define S_LMV 0x00800000 /* b_new_cmd */
+/* unused */
+#define S_SEC 0x02000000 /* upcall cache */
+#define S_GSS 0x04000000 /* b_new_cmd */
+/* unused */
+#define S_MGC 0x10000000
+#define S_MGS 0x20000000
+#define S_FID 0x40000000 /* b_new_cmd */
+#define S_FLD 0x80000000 /* b_new_cmd */
+/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
+
+/* Debugging masks (32 bits, non-overlapping) */
+/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
+#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
+#define D_INODE 0x00000002
+#define D_SUPER 0x00000004
+#define D_EXT2 0x00000008 /* anything from ext2_debug */
+#define D_MALLOC 0x00000010 /* print malloc, free information */
+#define D_CACHE 0x00000020 /* cache-related items */
+#define D_INFO 0x00000040 /* general information */
+#define D_IOCTL 0x00000080 /* ioctl related information */
+#define D_NETERROR 0x00000100 /* network errors */
+#define D_NET 0x00000200 /* network communications */
+#define D_WARNING 0x00000400 /* CWARN(...) == CDEBUG (D_WARNING, ...) */
+#define D_BUFFS 0x00000800
+#define D_OTHER 0x00001000
+#define D_DENTRY 0x00002000
+#define D_NETTRACE 0x00004000
+#define D_PAGE 0x00008000 /* bulk page handling */
+#define D_DLMTRACE 0x00010000
+#define D_ERROR 0x00020000 /* CERROR(...) == CDEBUG (D_ERROR, ...) */
+#define D_EMERG 0x00040000 /* CEMERG(...) == CDEBUG (D_EMERG, ...) */
+#define D_HA 0x00080000 /* recovery and failover */
+#define D_RPCTRACE 0x00100000 /* for distributed debugging */
+#define D_VFSTRACE 0x00200000
+#define D_READA 0x00400000 /* read-ahead */
+#define D_MMAP 0x00800000
+#define D_CONFIG 0x01000000
+#define D_CONSOLE 0x02000000
+#define D_QUOTA 0x04000000
+#define D_SEC 0x08000000
+#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
+/* keep these in sync with lnet/{utils,libcfs}/debug.c */
+
+#define D_HSM D_TRACE
+
+#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE)
+
+#ifndef DEBUG_SUBSYSTEM
+# define DEBUG_SUBSYSTEM S_UNDEFINED
+#endif
+
+#define CDEBUG_DEFAULT_MAX_DELAY (cfs_time_seconds(600)) /* jiffies */
+#define CDEBUG_DEFAULT_MIN_DELAY ((cfs_time_seconds(1) + 1) / 2) /* jiffies */
+#define CDEBUG_DEFAULT_BACKOFF 2
+typedef struct {
+ cfs_time_t cdls_next;
+ unsigned int cdls_delay;
+ int cdls_count;
+} cfs_debug_limit_state_t;
+
+struct libcfs_debug_msg_data {
+ const char *msg_file;
+ const char *msg_fn;
+ int msg_subsys;
+ int msg_line;
+ int msg_mask;
+ cfs_debug_limit_state_t *msg_cdls;
+};
+
+#define LIBCFS_DEBUG_MSG_DATA_INIT(data, mask, cdls) \
+do { \
+ (data)->msg_subsys = DEBUG_SUBSYSTEM; \
+ (data)->msg_file = __FILE__; \
+ (data)->msg_fn = __FUNCTION__; \
+ (data)->msg_line = __LINE__; \
+ (data)->msg_cdls = (cdls); \
+ (data)->msg_mask = (mask); \
+} while (0)
+
+#define LIBCFS_DEBUG_MSG_DATA_DECL(dataname, mask, cdls) \
+ static struct libcfs_debug_msg_data dataname = { \
+ .msg_subsys = DEBUG_SUBSYSTEM, \
+ .msg_file = __FILE__, \
+ .msg_fn = __FUNCTION__, \
+ .msg_line = __LINE__, \
+ .msg_cdls = (cdls) }; \
+ dataname.msg_mask = (mask);
+
+
+
+/**
+ * Filters out logging messages based on mask and subsystem.
+ */
+static inline int cfs_cdebug_show(unsigned int mask, unsigned int subsystem)
+{
+ return mask & D_CANTMASK ||
+ ((libcfs_debug & mask) && (libcfs_subsystem_debug & subsystem));
+}
+
+#define __CDEBUG(cdls, mask, format, ...) \
+do { \
+ static struct libcfs_debug_msg_data msgdata; \
+ \
+ CFS_CHECK_STACK(&msgdata, mask, cdls); \
+ \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_INIT(&msgdata, mask, cdls); \
+ libcfs_debug_msg(&msgdata, format, ## __VA_ARGS__); \
+ } \
+} while (0)
+
+#define CDEBUG(mask, format, ...) __CDEBUG(NULL, mask, format, ## __VA_ARGS__)
+
+#define CDEBUG_LIMIT(mask, format, ...) \
+do { \
+ static cfs_debug_limit_state_t cdls; \
+ \
+ __CDEBUG(&cdls, mask, format, ## __VA_ARGS__);\
+} while (0)
+
+
+
+
+#define CWARN(format, ...) CDEBUG_LIMIT(D_WARNING, format, ## __VA_ARGS__)
+#define CERROR(format, ...) CDEBUG_LIMIT(D_ERROR, format, ## __VA_ARGS__)
+#define CNETERR(format, a...) CDEBUG_LIMIT(D_NETERROR, format, ## a)
+#define CEMERG(format, ...) CDEBUG_LIMIT(D_EMERG, format, ## __VA_ARGS__)
+
+#define LCONSOLE(mask, format, ...) CDEBUG(D_CONSOLE | (mask), format, ## __VA_ARGS__)
+#define LCONSOLE_INFO(format, ...) CDEBUG_LIMIT(D_CONSOLE, format, ## __VA_ARGS__)
+#define LCONSOLE_WARN(format, ...) CDEBUG_LIMIT(D_CONSOLE | D_WARNING, format, ## __VA_ARGS__)
+#define LCONSOLE_ERROR_MSG(errnum, format, ...) CDEBUG_LIMIT(D_CONSOLE | D_ERROR, \
+ "%x-%x: " format, errnum, LERRCHKSUM(errnum), ## __VA_ARGS__)
+#define LCONSOLE_ERROR(format, ...) LCONSOLE_ERROR_MSG(0x00, format, ## __VA_ARGS__)
+
+#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__)
+
+
+void libcfs_log_goto(struct libcfs_debug_msg_data *, const char *, long_ptr_t);
+#define GOTO(label, rc) \
+do { \
+ if (cfs_cdebug_show(D_TRACE, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
+ libcfs_log_goto(&msgdata, #label, (long_ptr_t)(rc)); \
+ } else { \
+ (void)(rc); \
+ } \
+ goto label; \
+} while (0)
+
+
+extern int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
+ const char *format1, ...)
+ __attribute__ ((format (printf, 2, 3)));
+
+extern int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
+ const char *format1,
+ va_list args, const char *format2, ...)
+ __attribute__ ((format (printf, 4, 5)));
+
+/* other external symbols that tracefile provides: */
+extern int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
+ const char *usr_buffer, int usr_buffer_nob);
+extern int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
+ const char *knl_buffer, char *append);
+
+#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
+
+#endif /* __LIBCFS_DEBUG_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
new file mode 100644
index 0000000..8393c27
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -0,0 +1,170 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
+ * CA 94065 USA or visit www.oracle.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Oracle Corporation, Inc.
+ */
+
+#ifndef _LIBCFS_FAIL_H
+#define _LIBCFS_FAIL_H
+
+extern unsigned long cfs_fail_loc;
+extern unsigned int cfs_fail_val;
+
+extern wait_queue_head_t cfs_race_waitq;
+extern int cfs_race_state;
+
+int __cfs_fail_check_set(__u32 id, __u32 value, int set);
+int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set);
+
+enum {
+ CFS_FAIL_LOC_NOSET = 0,
+ CFS_FAIL_LOC_ORSET = 1,
+ CFS_FAIL_LOC_RESET = 2,
+ CFS_FAIL_LOC_VALUE = 3
+};
+
+/* Failure injection control */
+#define CFS_FAIL_MASK_SYS 0x0000FF00
+#define CFS_FAIL_MASK_LOC (0x000000FF | CFS_FAIL_MASK_SYS)
+
+#define CFS_FAILED_BIT 30
+/* CFS_FAILED is 0x40000000 */
+#define CFS_FAILED (1 << CFS_FAILED_BIT)
+
+#define CFS_FAIL_ONCE_BIT 31
+/* CFS_FAIL_ONCE is 0x80000000 */
+#define CFS_FAIL_ONCE (1 << CFS_FAIL_ONCE_BIT)
+
+/* The following flags aren't made to be combined */
+#define CFS_FAIL_SKIP 0x20000000 /* skip N times then fail */
+#define CFS_FAIL_SOME 0x10000000 /* only fail N times */
+#define CFS_FAIL_RAND 0x08000000 /* fail 1/N of the times */
+#define CFS_FAIL_USR1 0x04000000 /* user flag */
+
+#define CFS_FAIL_PRECHECK(id) (cfs_fail_loc && \
+ (cfs_fail_loc & CFS_FAIL_MASK_LOC) == \
+ ((id) & CFS_FAIL_MASK_LOC))
+
+static inline int cfs_fail_check_set(__u32 id, __u32 value,
+ int set, int quiet)
+{
+ int ret = 0;
+
+ if (unlikely(CFS_FAIL_PRECHECK(id) &&
+ (ret = __cfs_fail_check_set(id, value, set)))) {
+ if (quiet) {
+ CDEBUG(D_INFO, "*** cfs_fail_loc=%x, val=%u***\n",
+ id, value);
+ } else {
+ LCONSOLE_INFO("*** cfs_fail_loc=%x, val=%u***\n",
+ id, value);
+ }
+ }
+
+ return ret;
+}
+
+/* If id hit cfs_fail_loc, return 1, otherwise return 0 */
+#define CFS_FAIL_CHECK(id) \
+ cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 0)
+#define CFS_FAIL_CHECK_QUIET(id) \
+ cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1)
+
+/* If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
+ * otherwise return 0 */
+#define CFS_FAIL_CHECK_VALUE(id, value) \
+ cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0)
+#define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \
+ cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1)
+
+/* If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
+ * otherwise return 0 */
+#define CFS_FAIL_CHECK_ORSET(id, value) \
+ cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0)
+#define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \
+ cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1)
+
+/* If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
+ * otherwise return 0 */
+#define CFS_FAIL_CHECK_RESET(id, value) \
+ cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0)
+#define CFS_FAIL_CHECK_RESET_QUIET(id, value) \
+ cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1)
+
+static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
+{
+ if (unlikely(CFS_FAIL_PRECHECK(id)))
+ return __cfs_fail_timeout_set(id, value, ms, set);
+ else
+ return 0;
+}
+
+/* If id hit cfs_fail_loc, sleep for seconds or milliseconds */
+#define CFS_FAIL_TIMEOUT(id, secs) \
+ cfs_fail_timeout_set(id, 0, secs * 1000, CFS_FAIL_LOC_NOSET)
+
+#define CFS_FAIL_TIMEOUT_MS(id, ms) \
+ cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET)
+
+/* If id hit cfs_fail_loc, cfs_fail_loc |= value and
+ * sleep seconds or milliseconds */
+#define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \
+ cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET)
+
+#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \
+ cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET)
+
+/* The idea here is to synchronise two threads to force a race. The
+ * first thread that calls this with a matching fail_loc is put to
+ * sleep. The next thread that calls with the same fail_loc wakes up
+ * the first and continues. */
+static inline void cfs_race(__u32 id)
+{
+
+ if (CFS_FAIL_PRECHECK(id)) {
+ if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
+ int rc;
+ cfs_race_state = 0;
+ CERROR("cfs_race id %x sleeping\n", id);
+ cfs_wait_event_interruptible(cfs_race_waitq,
+ cfs_race_state != 0, rc);
+ CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
+ } else {
+ CERROR("cfs_fail_race id %x waking\n", id);
+ cfs_race_state = 1;
+ wake_up(&cfs_race_waitq);
+ }
+ }
+}
+#define CFS_RACE(id) cfs_race(id)
+
+#endif /* _LIBCFS_FAIL_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
new file mode 100644
index 0000000..98f5be2
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -0,0 +1,851 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/libcfs_hash.h
+ *
+ * Hashing routines
+ *
+ */
+
+#ifndef __LIBCFS_HASH_H__
+#define __LIBCFS_HASH_H__
+/*
+ * Knuth recommends primes in approximately golden ratio to the maximum
+ * integer representable by a machine word for multiplicative hashing.
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * These primes are chosen to be bit-sparse, that is operations on
+ * them can use shifts and additions instead of multiplications for
+ * machines where multiplications are slow.
+ */
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
+
+/*
+ * Ideally we would use HAVE_HASH_LONG for this, but on linux we configure
+ * the linux kernel and user space at the same time, so we need to differentiate
+ * between them explicitely. If this is not needed on other architectures, then
+ * we'll need to move the functions to archi specific headers.
+ */
+
+#include <linux/hash.h>
+
+#define cfs_hash_long(val, bits) hash_long(val, bits)
+
+/** disable debug */
+#define CFS_HASH_DEBUG_NONE 0
+/** record hash depth and output to console when it's too deep,
+ * computing overhead is low but consume more memory */
+#define CFS_HASH_DEBUG_1 1
+/** expensive, check key validation */
+#define CFS_HASH_DEBUG_2 2
+
+#define CFS_HASH_DEBUG_LEVEL CFS_HASH_DEBUG_NONE
+
+struct cfs_hash_ops;
+struct cfs_hash_lock_ops;
+struct cfs_hash_hlist_ops;
+
+typedef union {
+ rwlock_t rw; /**< rwlock */
+ spinlock_t spin; /**< spinlock */
+} cfs_hash_lock_t;
+
+/**
+ * cfs_hash_bucket is a container of:
+ * - lock, couter ...
+ * - array of hash-head starting from hsb_head[0], hash-head can be one of
+ * . cfs_hash_head_t
+ * . cfs_hash_head_dep_t
+ * . cfs_hash_dhead_t
+ * . cfs_hash_dhead_dep_t
+ * which depends on requirement of user
+ * - some extra bytes (caller can require it while creating hash)
+ */
+typedef struct cfs_hash_bucket {
+ cfs_hash_lock_t hsb_lock; /**< bucket lock */
+ __u32 hsb_count; /**< current entries */
+ __u32 hsb_version; /**< change version */
+ unsigned int hsb_index; /**< index of bucket */
+ int hsb_depmax; /**< max depth on bucket */
+ long hsb_head[0]; /**< hash-head array */
+} cfs_hash_bucket_t;
+
+/**
+ * cfs_hash bucket descriptor, it's normally in stack of caller
+ */
+typedef struct cfs_hash_bd {
+ cfs_hash_bucket_t *bd_bucket; /**< address of bucket */
+ unsigned int bd_offset; /**< offset in bucket */
+} cfs_hash_bd_t;
+
+#define CFS_HASH_NAME_LEN 16 /**< default name length */
+#define CFS_HASH_BIGNAME_LEN 64 /**< bigname for param tree */
+
+#define CFS_HASH_BKT_BITS 3 /**< default bits of bucket */
+#define CFS_HASH_BITS_MAX 30 /**< max bits of bucket */
+#define CFS_HASH_BITS_MIN CFS_HASH_BKT_BITS
+
+/**
+ * common hash attributes.
+ */
+enum cfs_hash_tag {
+ /**
+ * don't need any lock, caller will protect operations with it's
+ * own lock. With this flag:
+ * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK
+ * will be ignored.
+ * . Some functions will be disabled with this flag, i.e:
+ * cfs_hash_for_each_empty, cfs_hash_rehash
+ */
+ CFS_HASH_NO_LOCK = 1 << 0,
+ /** no bucket lock, use one spinlock to protect the whole hash */
+ CFS_HASH_NO_BKTLOCK = 1 << 1,
+ /** rwlock to protect bucket */
+ CFS_HASH_RW_BKTLOCK = 1 << 2,
+ /** spinlcok to protect bucket */
+ CFS_HASH_SPIN_BKTLOCK = 1 << 3,
+ /** always add new item to tail */
+ CFS_HASH_ADD_TAIL = 1 << 4,
+ /** hash-table doesn't have refcount on item */
+ CFS_HASH_NO_ITEMREF = 1 << 5,
+ /** big name for param-tree */
+ CFS_HASH_BIGNAME = 1 << 6,
+ /** track global count */
+ CFS_HASH_COUNTER = 1 << 7,
+ /** rehash item by new key */
+ CFS_HASH_REHASH_KEY = 1 << 8,
+ /** Enable dynamic hash resizing */
+ CFS_HASH_REHASH = 1 << 9,
+ /** can shrink hash-size */
+ CFS_HASH_SHRINK = 1 << 10,
+ /** assert hash is empty on exit */
+ CFS_HASH_ASSERT_EMPTY = 1 << 11,
+ /** record hlist depth */
+ CFS_HASH_DEPTH = 1 << 12,
+ /**
+ * rehash is always scheduled in a different thread, so current
+ * change on hash table is non-blocking
+ */
+ CFS_HASH_NBLK_CHANGE = 1 << 13,
+ /** NB, we typed hs_flags as __u16, please change it
+ * if you need to extend >=16 flags */
+};
+
+/** most used attributes */
+#define CFS_HASH_DEFAULT (CFS_HASH_RW_BKTLOCK | \
+ CFS_HASH_COUNTER | CFS_HASH_REHASH)
+
+/**
+ * cfs_hash is a hash-table implementation for general purpose, it can support:
+ * . two refcount modes
+ * hash-table with & without refcount
+ * . four lock modes
+ * nolock, one-spinlock, rw-bucket-lock, spin-bucket-lock
+ * . general operations
+ * lookup, add(add_tail or add_head), delete
+ * . rehash
+ * grows or shrink
+ * . iteration
+ * locked iteration and unlocked iteration
+ * . bigname
+ * support long name hash
+ * . debug
+ * trace max searching depth
+ *
+ * Rehash:
+ * When the htable grows or shrinks, a separate task (cfs_hash_rehash_worker)
+ * is spawned to handle the rehash in the background, it's possible that other
+ * processes can concurrently perform additions, deletions, and lookups
+ * without being blocked on rehash completion, because rehash will release
+ * the global wrlock for each bucket.
+ *
+ * rehash and iteration can't run at the same time because it's too tricky
+ * to keep both of them safe and correct.
+ * As they are relatively rare operations, so:
+ * . if iteration is in progress while we try to launch rehash, then
+ * it just giveup, iterator will launch rehash at the end.
+ * . if rehash is in progress while we try to iterate the hash table,
+ * then we just wait (shouldn't be very long time), anyway, nobody
+ * should expect iteration of whole hash-table to be non-blocking.
+ *
+ * During rehashing, a (key,object) pair may be in one of two buckets,
+ * depending on whether the worker task has yet to transfer the object
+ * to its new location in the table. Lookups and deletions need to search both
+ * locations; additions must take care to only insert into the new bucket.
+ */
+
+typedef struct cfs_hash {
+ /** serialize with rehash, or serialize all operations if
+ * the hash-table has CFS_HASH_NO_BKTLOCK */
+ cfs_hash_lock_t hs_lock;
+ /** hash operations */
+ struct cfs_hash_ops *hs_ops;
+ /** hash lock operations */
+ struct cfs_hash_lock_ops *hs_lops;
+ /** hash list operations */
+ struct cfs_hash_hlist_ops *hs_hops;
+ /** hash buckets-table */
+ cfs_hash_bucket_t **hs_buckets;
+ /** total number of items on this hash-table */
+ atomic_t hs_count;
+ /** hash flags, see cfs_hash_tag for detail */
+ __u16 hs_flags;
+ /** # of extra-bytes for bucket, for user saving extended attributes */
+ __u16 hs_extra_bytes;
+ /** wants to iterate */
+ __u8 hs_iterating;
+ /** hash-table is dying */
+ __u8 hs_exiting;
+ /** current hash bits */
+ __u8 hs_cur_bits;
+ /** min hash bits */
+ __u8 hs_min_bits;
+ /** max hash bits */
+ __u8 hs_max_bits;
+ /** bits for rehash */
+ __u8 hs_rehash_bits;
+ /** bits for each bucket */
+ __u8 hs_bkt_bits;
+ /** resize min threshold */
+ __u16 hs_min_theta;
+ /** resize max threshold */
+ __u16 hs_max_theta;
+ /** resize count */
+ __u32 hs_rehash_count;
+ /** # of iterators (caller of cfs_hash_for_each_*) */
+ __u32 hs_iterators;
+ /** rehash workitem */
+ cfs_workitem_t hs_rehash_wi;
+ /** refcount on this hash table */
+ atomic_t hs_refcount;
+ /** rehash buckets-table */
+ cfs_hash_bucket_t **hs_rehash_buckets;
+#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
+ /** serialize debug members */
+ spinlock_t hs_dep_lock;
+ /** max depth */
+ unsigned int hs_dep_max;
+ /** id of the deepest bucket */
+ unsigned int hs_dep_bkt;
+ /** offset in the deepest bucket */
+ unsigned int hs_dep_off;
+ /** bits when we found the max depth */
+ unsigned int hs_dep_bits;
+ /** workitem to output max depth */
+ cfs_workitem_t hs_dep_wi;
+#endif
+ /** name of htable */
+ char hs_name[0];
+} cfs_hash_t;
+
+typedef struct cfs_hash_lock_ops {
+ /** lock the hash table */
+ void (*hs_lock)(cfs_hash_lock_t *lock, int exclusive);
+ /** unlock the hash table */
+ void (*hs_unlock)(cfs_hash_lock_t *lock, int exclusive);
+ /** lock the hash bucket */
+ void (*hs_bkt_lock)(cfs_hash_lock_t *lock, int exclusive);
+ /** unlock the hash bucket */
+ void (*hs_bkt_unlock)(cfs_hash_lock_t *lock, int exclusive);
+} cfs_hash_lock_ops_t;
+
+typedef struct cfs_hash_hlist_ops {
+ /** return hlist_head of hash-head of @bd */
+ struct hlist_head *(*hop_hhead)(cfs_hash_t *hs, cfs_hash_bd_t *bd);
+ /** return hash-head size */
+ int (*hop_hhead_size)(cfs_hash_t *hs);
+ /** add @hnode to hash-head of @bd */
+ int (*hop_hnode_add)(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, struct hlist_node *hnode);
+ /** remove @hnode from hash-head of @bd */
+ int (*hop_hnode_del)(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, struct hlist_node *hnode);
+} cfs_hash_hlist_ops_t;
+
+typedef struct cfs_hash_ops {
+ /** return hashed value from @key */
+ unsigned (*hs_hash)(cfs_hash_t *hs, const void *key, unsigned mask);
+ /** return key address of @hnode */
+ void * (*hs_key)(struct hlist_node *hnode);
+ /** copy key from @hnode to @key */
+ void (*hs_keycpy)(struct hlist_node *hnode, void *key);
+ /**
+ * compare @key with key of @hnode
+ * returns 1 on a match
+ */
+ int (*hs_keycmp)(const void *key, struct hlist_node *hnode);
+ /** return object address of @hnode, i.e: container_of(...hnode) */
+ void * (*hs_object)(struct hlist_node *hnode);
+ /** get refcount of item, always called with holding bucket-lock */
+ void (*hs_get)(cfs_hash_t *hs, struct hlist_node *hnode);
+ /** release refcount of item */
+ void (*hs_put)(cfs_hash_t *hs, struct hlist_node *hnode);
+ /** release refcount of item, always called with holding bucket-lock */
+ void (*hs_put_locked)(cfs_hash_t *hs, struct hlist_node *hnode);
+ /** it's called before removing of @hnode */
+ void (*hs_exit)(cfs_hash_t *hs, struct hlist_node *hnode);
+} cfs_hash_ops_t;
+
+/** total number of buckets in @hs */
+#define CFS_HASH_NBKT(hs) \
+ (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits))
+
+/** total number of buckets in @hs while rehashing */
+#define CFS_HASH_RH_NBKT(hs) \
+ (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits))
+
+/** number of hlist for in bucket */
+#define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits)
+
+/** total number of hlist in @hs */
+#define CFS_HASH_NHLIST(hs) (1U << (hs)->hs_cur_bits)
+
+/** total number of hlist in @hs while rehashing */
+#define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits)
+
+static inline int
+cfs_hash_with_no_lock(cfs_hash_t *hs)
+{
+ /* caller will serialize all operations for this hash-table */
+ return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
+}
+
+static inline int
+cfs_hash_with_no_bktlock(cfs_hash_t *hs)
+{
+ /* no bucket lock, one single lock to protect the hash-table */
+ return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
+}
+
+static inline int
+cfs_hash_with_rw_bktlock(cfs_hash_t *hs)
+{
+ /* rwlock to protect hash bucket */
+ return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
+}
+
+static inline int
+cfs_hash_with_spin_bktlock(cfs_hash_t *hs)
+{
+ /* spinlock to protect hash bucket */
+ return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
+}
+
+static inline int
+cfs_hash_with_add_tail(cfs_hash_t *hs)
+{
+ return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
+}
+
+static inline int
+cfs_hash_with_no_itemref(cfs_hash_t *hs)
+{
+ /* hash-table doesn't keep refcount on item,
+ * item can't be removed from hash unless it's
+ * ZERO refcount */
+ return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0;
+}
+
+static inline int
+cfs_hash_with_bigname(cfs_hash_t *hs)
+{
+ return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
+}
+
+static inline int
+cfs_hash_with_counter(cfs_hash_t *hs)
+{
+ return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
+}
+
+static inline int
+cfs_hash_with_rehash(cfs_hash_t *hs)
+{
+ return (hs->hs_flags & CFS_HASH_REHASH) != 0;
+}
+
+static inline int
+cfs_hash_with_rehash_key(cfs_hash_t *hs)
+{
+ return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
+}
+
+static inline int
+cfs_hash_with_shrink(cfs_hash_t *hs)
+{
+ return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
+}
+
+static inline int
+cfs_hash_with_assert_empty(cfs_hash_t *hs)
+{
+ return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
+}
+
+static inline int
+cfs_hash_with_depth(cfs_hash_t *hs)
+{
+ return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
+}
+
+static inline int
+cfs_hash_with_nblk_change(cfs_hash_t *hs)
+{
+ return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
+}
+
+static inline int
+cfs_hash_is_exiting(cfs_hash_t *hs)
+{ /* cfs_hash_destroy is called */
+ return hs->hs_exiting;
+}
+
+static inline int
+cfs_hash_is_rehashing(cfs_hash_t *hs)
+{ /* rehash is launched */
+ return hs->hs_rehash_bits != 0;
+}
+
+static inline int
+cfs_hash_is_iterating(cfs_hash_t *hs)
+{ /* someone is calling cfs_hash_for_each_* */
+ return hs->hs_iterating || hs->hs_iterators != 0;
+}
+
+static inline int
+cfs_hash_bkt_size(cfs_hash_t *hs)
+{
+ return offsetof(cfs_hash_bucket_t, hsb_head[0]) +
+ hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
+ hs->hs_extra_bytes;
+}
+
+#define CFS_HOP(hs, op) (hs)->hs_ops->hs_ ## op
+
+static inline unsigned
+cfs_hash_id(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+ return CFS_HOP(hs, hash)(hs, key, mask);
+}
+
+static inline void *
+cfs_hash_key(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ return CFS_HOP(hs, key)(hnode);
+}
+
+static inline void
+cfs_hash_keycpy(cfs_hash_t *hs, struct hlist_node *hnode, void *key)
+{
+ if (CFS_HOP(hs, keycpy) != NULL)
+ CFS_HOP(hs, keycpy)(hnode, key);
+}
+
+/**
+ * Returns 1 on a match,
+ */
+static inline int
+cfs_hash_keycmp(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+{
+ return CFS_HOP(hs, keycmp)(key, hnode);
+}
+
+static inline void *
+cfs_hash_object(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ return CFS_HOP(hs, object)(hnode);
+}
+
+static inline void
+cfs_hash_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ return CFS_HOP(hs, get)(hs, hnode);
+}
+
+static inline void
+cfs_hash_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ LASSERT(CFS_HOP(hs, put_locked) != NULL);
+
+ return CFS_HOP(hs, put_locked)(hs, hnode);
+}
+
+static inline void
+cfs_hash_put(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ LASSERT(CFS_HOP(hs, put) != NULL);
+
+ return CFS_HOP(hs, put)(hs, hnode);
+}
+
+static inline void
+cfs_hash_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ if (CFS_HOP(hs, exit))
+ CFS_HOP(hs, exit)(hs, hnode);
+}
+
+static inline void cfs_hash_lock(cfs_hash_t *hs, int excl)
+{
+ hs->hs_lops->hs_lock(&hs->hs_lock, excl);
+}
+
+static inline void cfs_hash_unlock(cfs_hash_t *hs, int excl)
+{
+ hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
+}
+
+static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
+ atomic_t *condition)
+{
+ LASSERT(cfs_hash_with_no_bktlock(hs));
+ return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
+}
+
+static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, int excl)
+{
+ hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
+}
+
+static inline void cfs_hash_bd_unlock(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, int excl)
+{
+ hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
+}
+
+/**
+ * operations on cfs_hash bucket (bd: bucket descriptor),
+ * they are normally for hash-table without rehash
+ */
+void cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd);
+
+static inline void cfs_hash_bd_get_and_lock(cfs_hash_t *hs, const void *key,
+ cfs_hash_bd_t *bd, int excl)
+{
+ cfs_hash_bd_get(hs, key, bd);
+ cfs_hash_bd_lock(hs, bd, excl);
+}
+
+static inline unsigned cfs_hash_bd_index_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+{
+ return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
+}
+
+static inline void cfs_hash_bd_index_set(cfs_hash_t *hs,
+ unsigned index, cfs_hash_bd_t *bd)
+{
+ bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
+ bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
+}
+
+static inline void *
+cfs_hash_bd_extra_get(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+{
+ return (void *)bd->bd_bucket +
+ cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
+}
+
+static inline __u32
+cfs_hash_bd_version_get(cfs_hash_bd_t *bd)
+{
+ /* need hold cfs_hash_bd_lock */
+ return bd->bd_bucket->hsb_version;
+}
+
+static inline __u32
+cfs_hash_bd_count_get(cfs_hash_bd_t *bd)
+{
+ /* need hold cfs_hash_bd_lock */
+ return bd->bd_bucket->hsb_count;
+}
+
+static inline int
+cfs_hash_bd_depmax_get(cfs_hash_bd_t *bd)
+{
+ return bd->bd_bucket->hsb_depmax;
+}
+
+static inline int
+cfs_hash_bd_compare(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
+{
+ if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index)
+ return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index;
+
+ if (bd1->bd_offset != bd2->bd_offset)
+ return bd1->bd_offset - bd2->bd_offset;
+
+ return 0;
+}
+
+void cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode);
+void cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode);
+void cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
+ cfs_hash_bd_t *bd_new, struct hlist_node *hnode);
+
+static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ atomic_t *condition)
+{
+ LASSERT(cfs_hash_with_spin_bktlock(hs));
+ return atomic_dec_and_lock(condition,
+ &bd->bd_bucket->hsb_lock.spin);
+}
+
+static inline struct hlist_head *cfs_hash_bd_hhead(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd)
+{
+ return hs->hs_hops->hop_hhead(hs, bd);
+}
+
+struct hlist_node *cfs_hash_bd_lookup_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, const void *key);
+struct hlist_node *cfs_hash_bd_peek_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, const void *key);
+struct hlist_node *cfs_hash_bd_findadd_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, const void *key,
+ struct hlist_node *hnode,
+ int insist_add);
+struct hlist_node *cfs_hash_bd_finddel_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bd, const void *key,
+ struct hlist_node *hnode);
+
+/**
+ * operations on cfs_hash bucket (bd: bucket descriptor),
+ * they are safe for hash-table with rehash
+ */
+void cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds);
+void cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
+void cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl);
+
+static inline void cfs_hash_dual_bd_get_and_lock(cfs_hash_t *hs, const void *key,
+ cfs_hash_bd_t *bds, int excl)
+{
+ cfs_hash_dual_bd_get(hs, key, bds);
+ cfs_hash_dual_bd_lock(hs, bds, excl);
+}
+
+struct hlist_node *cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bds,
+ const void *key);
+struct hlist_node *cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bds,
+ const void *key,
+ struct hlist_node *hnode,
+ int insist_add);
+struct hlist_node *cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bds,
+ const void *key,
+ struct hlist_node *hnode);
+
+/* Hash init/cleanup functions */
+cfs_hash_t *cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
+ unsigned bkt_bits, unsigned extra_bytes,
+ unsigned min_theta, unsigned max_theta,
+ cfs_hash_ops_t *ops, unsigned flags);
+
+cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs);
+void cfs_hash_putref(cfs_hash_t *hs);
+
+/* Hash addition functions */
+void cfs_hash_add(cfs_hash_t *hs, const void *key,
+ struct hlist_node *hnode);
+int cfs_hash_add_unique(cfs_hash_t *hs, const void *key,
+ struct hlist_node *hnode);
+void *cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
+ struct hlist_node *hnode);
+
+/* Hash deletion functions */
+void *cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode);
+void *cfs_hash_del_key(cfs_hash_t *hs, const void *key);
+
+/* Hash lookup/for_each functions */
+#define CFS_HASH_LOOP_HOG 1024
+
+typedef int (*cfs_hash_for_each_cb_t)(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *node, void *data);
+void *cfs_hash_lookup(cfs_hash_t *hs, const void *key);
+void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
+void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
+int cfs_hash_for_each_nolock(cfs_hash_t *hs,
+ cfs_hash_for_each_cb_t, void *data);
+int cfs_hash_for_each_empty(cfs_hash_t *hs,
+ cfs_hash_for_each_cb_t, void *data);
+void cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
+ cfs_hash_for_each_cb_t, void *data);
+typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
+void cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t, void *data);
+
+void cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
+ cfs_hash_for_each_cb_t, void *data);
+int cfs_hash_is_empty(cfs_hash_t *hs);
+__u64 cfs_hash_size_get(cfs_hash_t *hs);
+
+/*
+ * Rehash - Theta is calculated to be the average chained
+ * hash depth assuming a perfectly uniform hash function.
+ */
+void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs);
+void cfs_hash_rehash_cancel(cfs_hash_t *hs);
+int cfs_hash_rehash(cfs_hash_t *hs, int do_rehash);
+void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
+ void *new_key, struct hlist_node *hnode);
+
+#if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
+/* Validate hnode references the correct key */
+static inline void
+cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
+ struct hlist_node *hnode)
+{
+ LASSERT(cfs_hash_keycmp(hs, key, hnode));
+}
+
+/* Validate hnode is in the correct bucket */
+static inline void
+cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode)
+{
+ cfs_hash_bd_t bds[2];
+
+ cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
+ LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
+ bds[1].bd_bucket == bd->bd_bucket);
+}
+
+#else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
+
+static inline void
+cfs_hash_key_validate(cfs_hash_t *hs, const void *key,
+ struct hlist_node *hnode) {}
+
+static inline void
+cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode) {}
+
+#endif /* CFS_HASH_DEBUG_LEVEL */
+
+#define CFS_HASH_THETA_BITS 10
+#define CFS_HASH_MIN_THETA (1U << (CFS_HASH_THETA_BITS - 1))
+#define CFS_HASH_MAX_THETA (1U << (CFS_HASH_THETA_BITS + 1))
+
+/* Return integer component of theta */
+static inline int __cfs_hash_theta_int(int theta)
+{
+ return (theta >> CFS_HASH_THETA_BITS);
+}
+
+/* Return a fractional value between 0 and 999 */
+static inline int __cfs_hash_theta_frac(int theta)
+{
+ return ((theta * 1000) >> CFS_HASH_THETA_BITS) -
+ (__cfs_hash_theta_int(theta) * 1000);
+}
+
+static inline int __cfs_hash_theta(cfs_hash_t *hs)
+{
+ return (atomic_read(&hs->hs_count) <<
+ CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
+}
+
+static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
+{
+ LASSERT(min < max);
+ hs->hs_min_theta = (__u16)min;
+ hs->hs_max_theta = (__u16)max;
+}
+
+/* Generic debug formatting routines mainly for proc handler */
+struct seq_file;
+int cfs_hash_debug_header(struct seq_file *m);
+int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m);
+
+/*
+ * Generic djb2 hash algorithm for character arrays.
+ */
+static inline unsigned
+cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
+{
+ unsigned i, hash = 5381;
+
+ LASSERT(key != NULL);
+
+ for (i = 0; i < size; i++)
+ hash = hash * 33 + ((char *)key)[i];
+
+ return (hash & mask);
+}
+
+/*
+ * Generic u32 hash algorithm.
+ */
+static inline unsigned
+cfs_hash_u32_hash(const __u32 key, unsigned mask)
+{
+ return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
+}
+
+/*
+ * Generic u64 hash algorithm.
+ */
+static inline unsigned
+cfs_hash_u64_hash(const __u64 key, unsigned mask)
+{
+ return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
+}
+
+/** iterate over all buckets in @bds (array of cfs_hash_bd_t) */
+#define cfs_hash_for_each_bd(bds, n, i) \
+ for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++)
+
+/** iterate over all buckets of @hs */
+#define cfs_hash_for_each_bucket(hs, bd, pos) \
+ for (pos = 0; \
+ pos < CFS_HASH_NBKT(hs) && \
+ ((bd)->bd_bucket = (hs)->hs_buckets[pos]) != NULL; pos++)
+
+/** iterate over all hlist of bucket @bd */
+#define cfs_hash_bd_for_each_hlist(hs, bd, hlist) \
+ for ((bd)->bd_offset = 0; \
+ (bd)->bd_offset < CFS_HASH_BKT_NHLIST(hs) && \
+ (hlist = cfs_hash_bd_hhead(hs, bd)) != NULL; \
+ (bd)->bd_offset++)
+
+/* !__LIBCFS__HASH_H__ */
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_heap.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_heap.h
new file mode 100644
index 0000000..bfa6d7b
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_heap.h
@@ -0,0 +1,200 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details. A copy is
+ * included in the COPYING file that accompanied this code.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2011 Intel Corporation
+ */
+/*
+ * libcfs/include/libcfs/heap.h
+ *
+ * Author: Eric Barton <eeb@whamcloud.com>
+ * Liang Zhen <liang@whamcloud.com>
+ */
+
+#ifndef __LIBCFS_HEAP_H__
+#define __LIBCFS_HEAP_H__
+
+/** \defgroup heap Binary heap
+ *
+ * The binary heap is a scalable data structure created using a binary tree. It
+ * is capable of maintaining large sets of elements sorted usually by one or
+ * more element properties, but really based on anything that can be used as a
+ * binary predicate in order to determine the relevant ordering of any two nodes
+ * that belong to the set. There is no search operation, rather the intention is
+ * for the element of the lowest priority which will always be at the root of
+ * the tree (as this is an implementation of a min-heap) to be removed by users
+ * for consumption.
+ *
+ * Users of the heap should embed a \e cfs_binheap_node_t object instance on
+ * every object of the set that they wish the binary heap instance to handle,
+ * and (at a minimum) provide a cfs_binheap_ops_t::hop_compare() implementation
+ * which is used by the heap as the binary predicate during its internal sorting
+ * operations.
+ *
+ * The current implementation enforces no locking scheme, and so assumes the
+ * user caters for locking between calls to insert, delete and lookup
+ * operations. Since the only consumer for the data structure at this point
+ * are NRS policies, and these operate on a per-CPT basis, binary heap instances
+ * are tied to a specific CPT.
+ * @{
+ */
+
+/**
+ * Binary heap node.
+ *
+ * Objects of this type are embedded into objects of the ordered set that is to
+ * be maintained by a \e cfs_binheap_t instance.
+ */
+typedef struct {
+ /** Index into the binary tree */
+ unsigned int chn_index;
+} cfs_binheap_node_t;
+
+#define CBH_SHIFT 9
+#define CBH_SIZE (1 << CBH_SHIFT) /* # ptrs per level */
+#define CBH_MASK (CBH_SIZE - 1)
+#define CBH_NOB (CBH_SIZE * sizeof(cfs_binheap_node_t *))
+
+#define CBH_POISON 0xdeadbeef
+
+/**
+ * Binary heap flags.
+ */
+enum {
+ CBH_FLAG_ATOMIC_GROW = 1,
+};
+
+struct cfs_binheap;
+
+/**
+ * Binary heap operations.
+ */
+typedef struct {
+ /**
+ * Called right before inserting a node into the binary heap.
+ *
+ * Implementing this operation is optional.
+ *
+ * \param[in] h The heap
+ * \param[in] e The node
+ *
+ * \retval 0 success
+ * \retval != 0 error
+ */
+ int (*hop_enter)(struct cfs_binheap *h,
+ cfs_binheap_node_t *e);
+ /**
+ * Called right after removing a node from the binary heap.
+ *
+ * Implementing this operation is optional.
+ *
+ * \param[in] h The heap
+ * \param[in] e The node
+ */
+ void (*hop_exit)(struct cfs_binheap *h,
+ cfs_binheap_node_t *e);
+ /**
+ * A binary predicate which is called during internal heap sorting
+ * operations, and used in order to determine the relevant ordering of
+ * two heap nodes.
+ *
+ * Implementing this operation is mandatory.
+ *
+ * \param[in] a The first heap node
+ * \param[in] b The second heap node
+ *
+ * \retval 0 Node a > node b
+ * \retval 1 Node a < node b
+ *
+ * \see cfs_binheap_bubble()
+ * \see cfs_biheap_sink()
+ */
+ int (*hop_compare)(cfs_binheap_node_t *a,
+ cfs_binheap_node_t *b);
+} cfs_binheap_ops_t;
+
+/**
+ * Binary heap object.
+ *
+ * Sorts elements of type \e cfs_binheap_node_t
+ */
+typedef struct cfs_binheap {
+ /** Triple indirect */
+ cfs_binheap_node_t ****cbh_elements3;
+ /** double indirect */
+ cfs_binheap_node_t ***cbh_elements2;
+ /** single indirect */
+ cfs_binheap_node_t **cbh_elements1;
+ /** # elements referenced */
+ unsigned int cbh_nelements;
+ /** high water mark */
+ unsigned int cbh_hwm;
+ /** user flags */
+ unsigned int cbh_flags;
+ /** operations table */
+ cfs_binheap_ops_t *cbh_ops;
+ /** private data */
+ void *cbh_private;
+ /** associated CPT table */
+ struct cfs_cpt_table *cbh_cptab;
+ /** associated CPT id of this cfs_binheap_t::cbh_cptab */
+ int cbh_cptid;
+} cfs_binheap_t;
+
+void cfs_binheap_destroy(cfs_binheap_t *h);
+cfs_binheap_t *cfs_binheap_create(cfs_binheap_ops_t *ops, unsigned int flags,
+ unsigned count, void *arg,
+ struct cfs_cpt_table *cptab, int cptid);
+cfs_binheap_node_t *cfs_binheap_find(cfs_binheap_t *h, unsigned int idx);
+int cfs_binheap_insert(cfs_binheap_t *h, cfs_binheap_node_t *e);
+void cfs_binheap_remove(cfs_binheap_t *h, cfs_binheap_node_t *e);
+
+static inline int
+cfs_binheap_size(cfs_binheap_t *h)
+{
+ return h->cbh_nelements;
+}
+
+static inline int
+cfs_binheap_is_empty(cfs_binheap_t *h)
+{
+ return h->cbh_nelements == 0;
+}
+
+static inline cfs_binheap_node_t *
+cfs_binheap_root(cfs_binheap_t *h)
+{
+ return cfs_binheap_find(h, 0);
+}
+
+static inline cfs_binheap_node_t *
+cfs_binheap_remove_root(cfs_binheap_t *h)
+{
+ cfs_binheap_node_t *e = cfs_binheap_find(h, 0);
+
+ if (e != NULL)
+ cfs_binheap_remove(h, e);
+ return e;
+}
+
+/** @} heap */
+
+#endif /* __LIBCFS_HEAP_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
new file mode 100644
index 0000000..5be3679
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -0,0 +1,222 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/libcfs_ioctl.h
+ *
+ * Low-level ioctl data structures. Kernel ioctl functions declared here,
+ * and user space functions are in libcfsutil_ioctl.h.
+ *
+ */
+
+#ifndef __LIBCFS_IOCTL_H__
+#define __LIBCFS_IOCTL_H__
+
+
+#define LIBCFS_IOCTL_VERSION 0x0001000a
+
+struct libcfs_ioctl_data {
+ __u32 ioc_len;
+ __u32 ioc_version;
+
+ __u64 ioc_nid;
+ __u64 ioc_u64[1];
+
+ __u32 ioc_flags;
+ __u32 ioc_count;
+ __u32 ioc_net;
+ __u32 ioc_u32[7];
+
+ __u32 ioc_inllen1;
+ char *ioc_inlbuf1;
+ __u32 ioc_inllen2;
+ char *ioc_inlbuf2;
+
+ __u32 ioc_plen1; /* buffers in userspace */
+ char *ioc_pbuf1;
+ __u32 ioc_plen2; /* buffers in userspace */
+ char *ioc_pbuf2;
+
+ char ioc_bulk[0];
+};
+
+
+struct libcfs_ioctl_hdr {
+ __u32 ioc_len;
+ __u32 ioc_version;
+};
+
+struct libcfs_debug_ioctl_data
+{
+ struct libcfs_ioctl_hdr hdr;
+ unsigned int subs;
+ unsigned int debug;
+};
+
+#define LIBCFS_IOC_INIT(data) \
+do { \
+ memset(&data, 0, sizeof(data)); \
+ data.ioc_version = LIBCFS_IOCTL_VERSION; \
+ data.ioc_len = sizeof(data); \
+} while (0)
+
+
+struct libcfs_ioctl_handler {
+ struct list_head item;
+ int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data);
+};
+
+#define DECLARE_IOCTL_HANDLER(ident, func) \
+ struct libcfs_ioctl_handler ident = { \
+ /* .item = */ LIST_HEAD_INIT(ident.item), \
+ /* .handle_ioctl = */ func \
+ }
+
+
+/* FIXME check conflict with lustre_lib.h */
+#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
+
+
+/* ioctls for manipulating snapshots 30- */
+#define IOC_LIBCFS_TYPE 'e'
+#define IOC_LIBCFS_MIN_NR 30
+/* libcfs ioctls */
+#define IOC_LIBCFS_PANIC _IOWR('e', 30, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_LWT_CONTROL _IOWR('e', 33, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_LWT_SNAPSHOT _IOWR('e', 34, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_LWT_LOOKUP_STRING _IOWR('e', 35, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, IOCTL_LIBCFS_TYPE)
+/* lnet ioctls */
+#define IOC_LIBCFS_GET_NI _IOWR('e', 50, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_ADD_ROUTE _IOWR('e', 52, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEL_ROUTE _IOWR('e', 53, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_ROUTE _IOWR('e', 54, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PING _IOWR('e', 61, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_LNETST _IOWR('e', 63, IOCTL_LIBCFS_TYPE)
+/* lnd ioctls */
+#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_TXDESC _IOWR('e', 77, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, IOCTL_LIBCFS_TYPE)
+
+#define IOC_LIBCFS_MAX_NR 80
+
+static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
+{
+ int len = sizeof(*data);
+ len += cfs_size_round(data->ioc_inllen1);
+ len += cfs_size_round(data->ioc_inllen2);
+ return len;
+}
+
+static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
+{
+ if (data->ioc_len > (1<<30)) {
+ CERROR ("LIBCFS ioctl: ioc_len larger than 1<<30\n");
+ return 1;
+ }
+ if (data->ioc_inllen1 > (1<<30)) {
+ CERROR ("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
+ return 1;
+ }
+ if (data->ioc_inllen2 > (1<<30)) {
+ CERROR ("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
+ return 1;
+ }
+ if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
+ CERROR ("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
+ CERROR ("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_pbuf1 && !data->ioc_plen1) {
+ CERROR ("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_pbuf2 && !data->ioc_plen2) {
+ CERROR ("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_plen1 && !data->ioc_pbuf1) {
+ CERROR ("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
+ return 1;
+ }
+ if (data->ioc_plen2 && !data->ioc_pbuf2) {
+ CERROR ("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
+ return 1;
+ }
+ if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_len ) {
+ CERROR ("LIBCFS ioctl: packlen != ioc_len\n");
+ return 1;
+ }
+ if (data->ioc_inllen1 &&
+ data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
+ CERROR ("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
+ return 1;
+ }
+ if (data->ioc_inllen2 &&
+ data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
+ data->ioc_inllen2 - 1] != '\0') {
+ CERROR ("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
+ return 1;
+ }
+ return 0;
+}
+
+
+extern int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
+extern int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
+extern int libcfs_ioctl_getdata(char *buf, char *end, void *arg);
+extern int libcfs_ioctl_popdata(void *arg, void *buf, int size);
+
+
+#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
new file mode 100644
index 0000000..596a15f
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
@@ -0,0 +1,117 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Author: Nathan Rutman <nathan.rutman@sun.com>
+ *
+ * libcfs/include/libcfs/libcfs_kernelcomm.h
+ *
+ * Kernel <-> userspace communication routines.
+ * The definitions below are used in the kernel and userspace.
+ *
+ */
+
+#ifndef __LIBCFS_KERNELCOMM_H__
+#define __LIBCFS_KERNELCOMM_H__
+
+#ifndef __LIBCFS_LIBCFS_H__
+#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
+#endif
+
+
+/* KUC message header.
+ * All current and future KUC messages should use this header.
+ * To avoid having to include Lustre headers from libcfs, define this here.
+ */
+struct kuc_hdr {
+ __u16 kuc_magic;
+ __u8 kuc_transport; /* Each new Lustre feature should use a different
+ transport */
+ __u8 kuc_flags;
+ __u16 kuc_msgtype; /* Message type or opcode, transport-specific */
+ __u16 kuc_msglen; /* Including header */
+} __attribute__((aligned(sizeof(__u64))));
+
+#define KUC_MAGIC 0x191C /*Lustre9etLinC */
+#define KUC_FL_BLOCK 0x01 /* Wait for send */
+
+/* kuc_msgtype values are defined in each transport */
+enum kuc_transport_type {
+ KUC_TRANSPORT_GENERIC = 1,
+ KUC_TRANSPORT_HSM = 2,
+ KUC_TRANSPORT_CHANGELOG = 3,
+};
+
+enum kuc_generic_message_type {
+ KUC_MSG_SHUTDOWN = 1,
+};
+
+/* prototype for callback function on kuc groups */
+typedef int (*libcfs_kkuc_cb_t)(__u32 data, void *cb_arg);
+
+/* KUC Broadcast Groups. This determines which userspace process hears which
+ * messages. Mutliple transports may be used within a group, or multiple
+ * groups may use the same transport. Broadcast
+ * groups need not be used if e.g. a UID is specified instead;
+ * use group 0 to signify unicast.
+ */
+#define KUC_GRP_HSM 0x02
+#define KUC_GRP_MAX KUC_GRP_HSM
+
+/* Kernel methods */
+extern int libcfs_kkuc_msg_put(struct file *fp, void *payload);
+extern int libcfs_kkuc_group_put(int group, void *payload);
+extern int libcfs_kkuc_group_add(struct file *fp, int uid, int group,
+ __u32 data);
+extern int libcfs_kkuc_group_rem(int uid, int group);
+extern int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
+ void *cb_arg);
+
+#define LK_FLG_STOP 0x01
+
+/* kernelcomm control structure, passed from userspace to kernel */
+typedef struct lustre_kernelcomm {
+ __u32 lk_wfd;
+ __u32 lk_rfd;
+ __u32 lk_uid;
+ __u32 lk_group;
+ __u32 lk_data;
+ __u32 lk_flags;
+} __attribute__((packed)) lustre_kernelcomm;
+
+/* Userspace methods */
+extern int libcfs_ukuc_start(lustre_kernelcomm *l, int groups);
+extern int libcfs_ukuc_stop(lustre_kernelcomm *l);
+extern int libcfs_ukuc_msg_get(lustre_kernelcomm *l, char *buf, int maxsize,
+ int transport);
+
+#endif /* __LIBCFS_KERNELCOMM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
new file mode 100644
index 0000000..e6e417a
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -0,0 +1,97 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/libcfs_prim.h
+ *
+ * General primitives.
+ *
+ */
+
+#ifndef __LIBCFS_PRIM_H__
+#define __LIBCFS_PRIM_H__
+
+/*
+ * Schedule
+ */
+void cfs_pause(cfs_duration_t ticks);
+
+/*
+ * Timer
+ */
+typedef void (cfs_timer_func_t)(ulong_ptr_t);
+void schedule_timeout_and_set_state(long, int64_t);
+
+void init_waitqueue_entry_current(wait_queue_t *link);
+int64_t waitq_timedwait(wait_queue_t *, long, int64_t);
+void waitq_wait(wait_queue_t *, long);
+void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
+
+void cfs_init_timer(struct timer_list *t);
+void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg);
+void cfs_timer_done(struct timer_list *t);
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline);
+void cfs_timer_disarm(struct timer_list *t);
+int cfs_timer_is_armed(struct timer_list *t);
+cfs_time_t cfs_timer_deadline(struct timer_list *t);
+
+/*
+ * Memory
+ */
+#ifndef memory_pressure_get
+#define memory_pressure_get() (0)
+#endif
+#ifndef memory_pressure_set
+#define memory_pressure_set() do {} while (0)
+#endif
+#ifndef memory_pressure_clr
+#define memory_pressure_clr() do {} while (0)
+#endif
+
+static inline int cfs_memory_pressure_get_and_set(void)
+{
+ int old = memory_pressure_get();
+
+ if (!old)
+ memory_pressure_set();
+ return old;
+}
+
+static inline void cfs_memory_pressure_restore(int old)
+{
+ if (old)
+ memory_pressure_set();
+ else
+ memory_pressure_clr();
+ return;
+}
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
new file mode 100644
index 0000000..d0d942c
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -0,0 +1,572 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/libcfs_private.h
+ *
+ * Various defines for libcfs.
+ *
+ */
+
+#ifndef __LIBCFS_PRIVATE_H__
+#define __LIBCFS_PRIVATE_H__
+
+/* XXX this layering violation is for nidstrings */
+#include <linux/lnet/types.h>
+
+#ifndef DEBUG_SUBSYSTEM
+# define DEBUG_SUBSYSTEM S_UNDEFINED
+#endif
+
+
+
+/*
+ * When this is on, LASSERT macro includes check for assignment used instead
+ * of equality check, but doesn't have unlikely(). Turn this on from time to
+ * time to make test-builds. This shouldn't be on for production release.
+ */
+#define LASSERT_CHECKED (0)
+
+
+#define LASSERTF(cond, fmt, ...) \
+do { \
+ if (unlikely(!(cond))) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
+ libcfs_debug_msg(&__msg_data, \
+ "ASSERTION( %s ) failed: " fmt, #cond, \
+ ## __VA_ARGS__); \
+ lbug_with_loc(&__msg_data); \
+ } \
+} while (0)
+
+#define LASSERT(cond) LASSERTF(cond, "\n")
+
+#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
+/**
+ * This is for more expensive checks that one doesn't want to be enabled all
+ * the time. LINVRNT() has to be explicitly enabled by
+ * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option.
+ */
+# define LINVRNT(exp) LASSERT(exp)
+#else
+# define LINVRNT(exp) ((void)sizeof!!(exp))
+#endif
+
+#define KLASSERT(e) LASSERT(e)
+
+void lbug_with_loc(struct libcfs_debug_msg_data *) __attribute__((noreturn));
+
+#define LBUG() \
+do { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
+ lbug_with_loc(&msgdata); \
+} while(0)
+
+extern atomic_t libcfs_kmemory;
+/*
+ * Memory
+ */
+
+# define libcfs_kmem_inc(ptr, size) \
+do { \
+ atomic_add(size, &libcfs_kmemory); \
+} while (0)
+
+# define libcfs_kmem_dec(ptr, size) \
+do { \
+ atomic_sub(size, &libcfs_kmemory); \
+} while (0)
+
+# define libcfs_kmem_read() \
+ atomic_read(&libcfs_kmemory)
+
+
+#ifndef LIBCFS_VMALLOC_SIZE
+#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
+#endif
+
+#define LIBCFS_ALLOC_PRE(size, mask) \
+do { \
+ LASSERT(!in_interrupt() || \
+ ((size) <= LIBCFS_VMALLOC_SIZE && \
+ ((mask) & GFP_ATOMIC)) != 0); \
+} while (0)
+
+#define LIBCFS_ALLOC_POST(ptr, size) \
+do { \
+ if (unlikely((ptr) == NULL)) { \
+ CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
+ #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
+ CERROR("LNET: %d total bytes allocated by lnet\n", \
+ libcfs_kmem_read()); \
+ } else { \
+ memset((ptr), 0, (size)); \
+ libcfs_kmem_inc((ptr), (size)); \
+ CDEBUG(D_MALLOC, "alloc '" #ptr "': %d at %p (tot %d).\n", \
+ (int)(size), (ptr), libcfs_kmem_read()); \
+ } \
+} while (0)
+
+/**
+ * allocate memory with GFP flags @mask
+ */
+#define LIBCFS_ALLOC_GFP(ptr, size, mask) \
+do { \
+ LIBCFS_ALLOC_PRE((size), (mask)); \
+ (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
+ kmalloc((size), (mask)) : vmalloc(size); \
+ LIBCFS_ALLOC_POST((ptr), (size)); \
+} while (0)
+
+/**
+ * default allocator
+ */
+#define LIBCFS_ALLOC(ptr, size) \
+ LIBCFS_ALLOC_GFP(ptr, size, __GFP_IO)
+
+/**
+ * non-sleeping allocator
+ */
+#define LIBCFS_ALLOC_ATOMIC(ptr, size) \
+ LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
+
+/**
+ * allocate memory for specified CPU partition
+ * \a cptab != NULL, \a cpt is CPU partition id of \a cptab
+ * \a cptab == NULL, \a cpt is HW NUMA node id
+ */
+#define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \
+do { \
+ LIBCFS_ALLOC_PRE((size), (mask)); \
+ (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
+ kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
+ vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \
+ LIBCFS_ALLOC_POST((ptr), (size)); \
+} while (0)
+
+/** default numa allocator */
+#define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
+ LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO)
+
+#define LIBCFS_FREE(ptr, size) \
+do { \
+ int s = (size); \
+ if (unlikely((ptr) == NULL)) { \
+ CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
+ "%s:%d\n", s, __FILE__, __LINE__); \
+ break; \
+ } \
+ libcfs_kmem_dec((ptr), s); \
+ CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \
+ s, (ptr), libcfs_kmem_read()); \
+ if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
+ vfree(ptr); \
+ else \
+ kfree(ptr); \
+} while (0)
+
+/******************************************************************************/
+
+/* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */
+#if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__)
+#define ___htonl(x) __cpu_to_be32(x)
+#define ___htons(x) __cpu_to_be16(x)
+#define ___ntohl(x) __be32_to_cpu(x)
+#define ___ntohs(x) __be16_to_cpu(x)
+#define htonl(x) ___htonl(x)
+#define ntohl(x) ___ntohl(x)
+#define htons(x) ___htons(x)
+#define ntohs(x) ___ntohs(x)
+#endif
+
+void libcfs_run_upcall(char **argv);
+void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
+void libcfs_debug_dumplog(void);
+int libcfs_debug_init(unsigned long bufsize);
+int libcfs_debug_cleanup(void);
+int libcfs_debug_clear_buffer(void);
+int libcfs_debug_mark_buffer(const char *text);
+
+void libcfs_debug_set_level(unsigned int debug_level);
+
+
+/*
+ * allocate per-cpu-partition data, returned value is an array of pointers,
+ * variable can be indexed by CPU ID.
+ * cptable != NULL: size of array is number of CPU partitions
+ * cptable == NULL: size of array is number of HW cores
+ */
+void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
+/*
+ * destroy per-cpu-partition variable
+ */
+void cfs_percpt_free(void *vars);
+int cfs_percpt_number(void *vars);
+void *cfs_percpt_current(void *vars);
+void *cfs_percpt_index(void *vars, int idx);
+
+#define cfs_percpt_for_each(var, i, vars) \
+ for (i = 0; i < cfs_percpt_number(vars) && \
+ ((var) = (vars)[i]) != NULL; i++)
+
+/*
+ * allocate a variable array, returned value is an array of pointers.
+ * Caller can specify length of array by count.
+ */
+void *cfs_array_alloc(int count, unsigned int size);
+void cfs_array_free(void *vars);
+
+#define LASSERT_ATOMIC_ENABLED (1)
+
+#if LASSERT_ATOMIC_ENABLED
+
+/** assert value of @a is equal to @v */
+#define LASSERT_ATOMIC_EQ(a, v) \
+do { \
+ LASSERTF(atomic_read(a) == v, \
+ "value: %d\n", atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is unequal to @v */
+#define LASSERT_ATOMIC_NE(a, v) \
+do { \
+ LASSERTF(atomic_read(a) != v, \
+ "value: %d\n", atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is little than @v */
+#define LASSERT_ATOMIC_LT(a, v) \
+do { \
+ LASSERTF(atomic_read(a) < v, \
+ "value: %d\n", atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is little/equal to @v */
+#define LASSERT_ATOMIC_LE(a, v) \
+do { \
+ LASSERTF(atomic_read(a) <= v, \
+ "value: %d\n", atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is great than @v */
+#define LASSERT_ATOMIC_GT(a, v) \
+do { \
+ LASSERTF(atomic_read(a) > v, \
+ "value: %d\n", atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is great/equal to @v */
+#define LASSERT_ATOMIC_GE(a, v) \
+do { \
+ LASSERTF(atomic_read(a) >= v, \
+ "value: %d\n", atomic_read((a))); \
+} while (0)
+
+/** assert value of @a is great than @v1 and little than @v2 */
+#define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
+do { \
+ int __v = atomic_read(a); \
+ LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
+} while (0)
+
+/** assert value of @a is great than @v1 and little/equal to @v2 */
+#define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
+do { \
+ int __v = atomic_read(a); \
+ LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
+} while (0)
+
+/** assert value of @a is great/equal to @v1 and little than @v2 */
+#define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
+do { \
+ int __v = atomic_read(a); \
+ LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
+} while (0)
+
+/** assert value of @a is great/equal to @v1 and little/equal to @v2 */
+#define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
+do { \
+ int __v = atomic_read(a); \
+ LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
+} while (0)
+
+#else /* !LASSERT_ATOMIC_ENABLED */
+
+#define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
+#define LASSERT_ATOMIC_NE(a, v) do {} while (0)
+#define LASSERT_ATOMIC_LT(a, v) do {} while (0)
+#define LASSERT_ATOMIC_LE(a, v) do {} while (0)
+#define LASSERT_ATOMIC_GT(a, v) do {} while (0)
+#define LASSERT_ATOMIC_GE(a, v) do {} while (0)
+#define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
+#define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
+#define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
+#define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
+
+#endif /* LASSERT_ATOMIC_ENABLED */
+
+#define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
+#define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
+
+#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof (*(ptr)));
+#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof (*(ptr)));
+
+/*
+ * percpu partition lock
+ *
+ * There are some use-cases like this in Lustre:
+ * . each CPU partition has it's own private data which is frequently changed,
+ * and mostly by the local CPU partition.
+ * . all CPU partitions share some global data, these data are rarely changed.
+ *
+ * LNet is typical example.
+ * CPU partition lock is designed for this kind of use-cases:
+ * . each CPU partition has it's own private lock
+ * . change on private data just needs to take the private lock
+ * . read on shared data just needs to take _any_ of private locks
+ * . change on shared data needs to take _all_ private locks,
+ * which is slow and should be really rare.
+ */
+
+enum {
+ CFS_PERCPT_LOCK_EX = -1, /* negative */
+};
+
+
+struct cfs_percpt_lock {
+ /* cpu-partition-table for this lock */
+ struct cfs_cpt_table *pcl_cptab;
+ /* exclusively locked */
+ unsigned int pcl_locked;
+ /* private lock table */
+ spinlock_t **pcl_locks;
+};
+
+/* return number of private locks */
+static inline int
+cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
+{
+ return cfs_cpt_number(pcl->pcl_cptab);
+}
+
+
+/*
+ * create a cpu-partition lock based on CPU partition table \a cptab,
+ * each private lock has extra \a psize bytes padding data
+ */
+struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
+/* destroy a cpu-partition lock */
+void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
+
+/* lock private lock \a index of \a pcl */
+void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
+/* unlock private lock \a index of \a pcl */
+void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
+/* create percpt (atomic) refcount based on @cptab */
+atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
+/* destroy percpt refcount */
+void cfs_percpt_atomic_free(atomic_t **refs);
+/* return sum of all percpu refs */
+int cfs_percpt_atomic_summary(atomic_t **refs);
+
+
+/** Compile-time assertion.
+
+ * Check an invariant described by a constant expression at compile time by
+ * forcing a compiler error if it does not hold. \a cond must be a constant
+ * expression as defined by the ISO C Standard:
+ *
+ * 6.8.4.2 The switch statement
+ * ....
+ * [#3] The expression of each case label shall be an integer
+ * constant expression and no two of the case constant
+ * expressions in the same switch statement shall have the same
+ * value after conversion...
+ *
+ */
+#define CLASSERT(cond) do {switch(42) {case (cond): case 0: break;}} while (0)
+
+/* support decl needed both by kernel and liblustre */
+int libcfs_isknown_lnd(int type);
+char *libcfs_lnd2modname(int type);
+char *libcfs_lnd2str(int type);
+int libcfs_str2lnd(const char *str);
+char *libcfs_net2str(__u32 net);
+char *libcfs_nid2str(lnet_nid_t nid);
+__u32 libcfs_str2net(const char *str);
+lnet_nid_t libcfs_str2nid(const char *str);
+int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
+char *libcfs_id2str(lnet_process_id_t id);
+void cfs_free_nidlist(struct list_head *list);
+int cfs_parse_nidlist(char *str, int len, struct list_head *list);
+int cfs_match_nid(lnet_nid_t nid, struct list_head *list);
+
+/** \addtogroup lnet_addr
+ * @{ */
+/* how an LNET NID encodes net:address */
+/** extract the address part of an lnet_nid_t */
+#define LNET_NIDADDR(nid) ((__u32)((nid) & 0xffffffff))
+/** extract the network part of an lnet_nid_t */
+#define LNET_NIDNET(nid) ((__u32)(((nid) >> 32)) & 0xffffffff)
+/** make an lnet_nid_t from a network part and an address part */
+#define LNET_MKNID(net,addr) ((((__u64)(net))<<32)|((__u64)(addr)))
+/* how net encodes type:number */
+#define LNET_NETNUM(net) ((net) & 0xffff)
+#define LNET_NETTYP(net) (((net) >> 16) & 0xffff)
+#define LNET_MKNET(typ,num) ((((__u32)(typ))<<16)|((__u32)(num)))
+/** @} lnet_addr */
+
+/* max value for numeric network address */
+#define MAX_NUMERIC_VALUE 0xffffffff
+
+/* implication */
+#define ergo(a, b) (!(a) || (b))
+/* logical equivalence */
+#define equi(a, b) (!!(a) == !!(b))
+
+/* --------------------------------------------------------------------
+ * Light-weight trace
+ * Support for temporary event tracing with minimal Heisenberg effect.
+ * All stuff about lwt are put in arch/kp30.h
+ * -------------------------------------------------------------------- */
+
+struct libcfs_device_userstate
+{
+ int ldu_memhog_pages;
+ struct page *ldu_memhog_root_page;
+};
+
+/* what used to be in portals_lib.h */
+#ifndef MIN
+# define MIN(a,b) (((a)<(b)) ? (a): (b))
+#endif
+#ifndef MAX
+# define MAX(a,b) (((a)>(b)) ? (a): (b))
+#endif
+
+#define MKSTR(ptr) ((ptr))? (ptr) : ""
+
+static inline int cfs_size_round4 (int val)
+{
+ return (val + 3) & (~0x3);
+}
+
+#ifndef HAVE_CFS_SIZE_ROUND
+static inline int cfs_size_round (int val)
+{
+ return (val + 7) & (~0x7);
+}
+#define HAVE_CFS_SIZE_ROUND
+#endif
+
+static inline int cfs_size_round16(int val)
+{
+ return (val + 0xf) & (~0xf);
+}
+
+static inline int cfs_size_round32(int val)
+{
+ return (val + 0x1f) & (~0x1f);
+}
+
+static inline int cfs_size_round0(int val)
+{
+ if (!val)
+ return 0;
+ return (val + 1 + 7) & (~0x7);
+}
+
+static inline size_t cfs_round_strlen(char *fset)
+{
+ return (size_t)cfs_size_round((int)strlen(fset) + 1);
+}
+
+/* roundup \a val to power2 */
+static inline unsigned int cfs_power2_roundup(unsigned int val)
+{
+ if (val != LOWEST_BIT_SET(val)) { /* not a power of 2 already */
+ do {
+ val &= ~LOWEST_BIT_SET(val);
+ } while (val != LOWEST_BIT_SET(val));
+ /* ...and round up */
+ val <<= 1;
+ }
+ return val;
+}
+
+#define LOGL(var,len,ptr) \
+do { \
+ if (var) \
+ memcpy((char *)ptr, (const char *)var, len); \
+ ptr += cfs_size_round(len); \
+} while (0)
+
+#define LOGU(var,len,ptr) \
+do { \
+ if (var) \
+ memcpy((char *)var, (const char *)ptr, len); \
+ ptr += cfs_size_round(len); \
+} while (0)
+
+#define LOGL0(var,len,ptr) \
+do { \
+ if (!len) \
+ break; \
+ memcpy((char *)ptr, (const char *)var, len); \
+ *((char *)(ptr) + len) = 0; \
+ ptr += cfs_size_round(len + 1); \
+} while (0)
+
+/**
+ * Lustre Network Driver types.
+ */
+enum {
+ /* Only add to these values (i.e. don't ever change or redefine them):
+ * network addresses depend on them... */
+ QSWLND = 1,
+ SOCKLND = 2,
+ GMLND = 3, /* obsolete, keep it so that libcfs_nid2str works */
+ PTLLND = 4,
+ O2IBLND = 5,
+ CIBLND = 6,
+ OPENIBLND = 7,
+ IIBLND = 8,
+ LOLND = 9,
+ RALND = 10,
+ VIBLND = 11,
+ MXLND = 12,
+ GNILND = 13,
+};
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
new file mode 100644
index 0000000..a6bac9c
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -0,0 +1,137 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/libcfs_string.h
+ *
+ * Generic string manipulation functions.
+ *
+ * Author: Nathan Rutman <nathan.rutman@sun.com>
+ */
+
+#ifndef __LIBCFS_STRING_H__
+#define __LIBCFS_STRING_H__
+
+/* libcfs_string.c */
+/* string comparison ignoring case */
+int cfs_strncasecmp(const char *s1, const char *s2, size_t n);
+/* Convert a text string to a bitmask */
+int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
+ int *oldmask, int minmask, int allmask);
+
+/* Allocate space for and copy an existing string.
+ * Must free with kfree().
+ */
+char *cfs_strdup(const char *str, u_int32_t flags);
+
+/* safe vsnprintf */
+int cfs_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+
+/* safe snprintf */
+int cfs_snprintf(char *buf, size_t size, const char *fmt, ...);
+
+/* trim leading and trailing space characters */
+char *cfs_firststr(char *str, size_t size);
+
+/**
+ * Structure to represent NULL-less strings.
+ */
+struct cfs_lstr {
+ char *ls_str;
+ int ls_len;
+};
+
+/*
+ * Structure to represent \<range_expr\> token of the syntax.
+ */
+struct cfs_range_expr {
+ /*
+ * Link to cfs_expr_list::el_exprs.
+ */
+ struct list_head re_link;
+ __u32 re_lo;
+ __u32 re_hi;
+ __u32 re_stride;
+};
+
+struct cfs_expr_list {
+ struct list_head el_link;
+ struct list_head el_exprs;
+};
+
+static inline int
+cfs_iswhite(char c)
+{
+ switch (c) {
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
+char *cfs_trimwhite(char *str);
+int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
+int cfs_str2num_check(char *str, int nob, unsigned *num,
+ unsigned min, unsigned max);
+int cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
+ int single_tok, struct cfs_range_expr **expr);
+int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list);
+int cfs_expr_list_values(struct cfs_expr_list *expr_list,
+ int max, __u32 **values);
+static inline void
+cfs_expr_list_values_free(__u32 *values, int num)
+{
+ /* This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
+ * by OBD_FREE() if it's called by module other than libcfs & LNet,
+ * otherwise we will see fake memory leak */
+ LIBCFS_FREE(values, num * sizeof(values[0]));
+}
+
+void cfs_expr_list_free(struct cfs_expr_list *expr_list);
+void cfs_expr_list_print(struct cfs_expr_list *expr_list);
+int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
+ struct cfs_expr_list **elpp);
+void cfs_expr_list_free_list(struct list_head *list);
+int cfs_ip_addr_parse(char *str, int len, struct list_head *list);
+int cfs_ip_addr_match(__u32 addr, struct list_head *list);
+void cfs_ip_addr_free(struct list_head *list);
+
+#define strtoul(str, endp, base) simple_strtoul(str, endp, base)
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
new file mode 100644
index 0000000..4bdd771
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
@@ -0,0 +1,132 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/libcfs_time.h
+ *
+ * Time functions.
+ *
+ */
+
+#ifndef __LIBCFS_TIME_H__
+#define __LIBCFS_TIME_H__
+/*
+ * generic time manipulation functions.
+ */
+
+static inline cfs_time_t cfs_time_add(cfs_time_t t, cfs_duration_t d)
+{
+ return (cfs_time_t)(t + d);
+}
+
+static inline cfs_duration_t cfs_time_sub(cfs_time_t t1, cfs_time_t t2)
+{
+ return (cfs_time_t)(t1 - t2);
+}
+
+static inline int cfs_time_after(cfs_time_t t1, cfs_time_t t2)
+{
+ return cfs_time_before(t2, t1);
+}
+
+static inline int cfs_time_aftereq(cfs_time_t t1, cfs_time_t t2)
+{
+ return cfs_time_beforeq(t2, t1);
+}
+
+
+static inline cfs_time_t cfs_time_shift(int seconds)
+{
+ return cfs_time_add(cfs_time_current(), cfs_time_seconds(seconds));
+}
+
+static inline long cfs_timeval_sub(struct timeval *large, struct timeval *small,
+ struct timeval *result)
+{
+ long r = (long) (
+ (large->tv_sec - small->tv_sec) * ONE_MILLION +
+ (large->tv_usec - small->tv_usec));
+ if (result != NULL) {
+ result->tv_usec = r % ONE_MILLION;
+ result->tv_sec = r / ONE_MILLION;
+ }
+ return r;
+}
+
+static inline void cfs_slow_warning(cfs_time_t now, int seconds, char *msg)
+{
+ if (cfs_time_after(cfs_time_current(),
+ cfs_time_add(now, cfs_time_seconds(15))))
+ CERROR("slow %s "CFS_TIME_T" sec\n", msg,
+ cfs_duration_sec(cfs_time_sub(cfs_time_current(),now)));
+}
+
+#define CFS_RATELIMIT(seconds) \
+({ \
+ /* \
+ * XXX nikita: non-portable initializer \
+ */ \
+ static time_t __next_message = 0; \
+ int result; \
+ \
+ if (cfs_time_after(cfs_time_current(), __next_message)) \
+ result = 1; \
+ else { \
+ __next_message = cfs_time_shift(seconds); \
+ result = 0; \
+ } \
+ result; \
+})
+
+/*
+ * helper function similar to do_gettimeofday() of Linux kernel
+ */
+static inline void cfs_fs_timeval(struct timeval *tv)
+{
+ cfs_fs_time_t time;
+
+ cfs_fs_time_current(&time);
+ cfs_fs_time_usec(&time, tv);
+}
+
+/*
+ * return valid time-out based on user supplied one. Currently we only check
+ * that time-out is not shorted than allowed.
+ */
+static inline cfs_duration_t cfs_timeout_cap(cfs_duration_t timeout)
+{
+ if (timeout < CFS_TICK)
+ timeout = CFS_TICK;
+ return timeout;
+}
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
new file mode 100644
index 0000000..5cc64f3
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -0,0 +1,110 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/libcfs_workitem.h
+ *
+ * Author: Isaac Huang <he.h.huang@oracle.com>
+ * Liang Zhen <zhen.liang@sun.com>
+ *
+ * A workitems is deferred work with these semantics:
+ * - a workitem always runs in thread context.
+ * - a workitem can be concurrent with other workitems but is strictly
+ * serialized with respect to itself.
+ * - no CPU affinity, a workitem does not necessarily run on the same CPU
+ * that schedules it. However, this might change in the future.
+ * - if a workitem is scheduled again before it has a chance to run, it
+ * runs only once.
+ * - if a workitem is scheduled while it runs, it runs again after it
+ * completes; this ensures that events occurring while other events are
+ * being processed receive due attention. This behavior also allows a
+ * workitem to reschedule itself.
+ *
+ * Usage notes:
+ * - a workitem can sleep but it should be aware of how that sleep might
+ * affect others.
+ * - a workitem runs inside a kernel thread so there's no user space to access.
+ * - do not use a workitem if the scheduling latency can't be tolerated.
+ *
+ * When wi_action returns non-zero, it means the workitem has either been
+ * freed or reused and workitem scheduler won't touch it any more.
+ */
+
+#ifndef __LIBCFS_WORKITEM_H__
+#define __LIBCFS_WORKITEM_H__
+
+struct cfs_wi_sched;
+
+void cfs_wi_sched_destroy(struct cfs_wi_sched *);
+int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
+ int nthrs, struct cfs_wi_sched **);
+
+struct cfs_workitem;
+
+typedef int (*cfs_wi_action_t) (struct cfs_workitem *);
+typedef struct cfs_workitem {
+ /** chain on runq or rerunq */
+ struct list_head wi_list;
+ /** working function */
+ cfs_wi_action_t wi_action;
+ /** arg for working function */
+ void *wi_data;
+ /** in running */
+ unsigned short wi_running:1;
+ /** scheduled */
+ unsigned short wi_scheduled:1;
+} cfs_workitem_t;
+
+static inline void
+cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
+{
+ INIT_LIST_HEAD(&wi->wi_list);
+
+ wi->wi_running = 0;
+ wi->wi_scheduled = 0;
+ wi->wi_data = data;
+ wi->wi_action = action;
+}
+
+void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
+int cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
+void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
+
+int cfs_wi_startup(void);
+void cfs_wi_shutdown(void);
+
+/** # workitem scheduler loops before reschedule */
+#define CFS_WI_RESCHED 128
+
+#endif /* __LIBCFS_WORKITEM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h b/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
new file mode 100644
index 0000000..c204b67
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
@@ -0,0 +1,241 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LIBCFS_LINUX_KP30_H__
+#define __LIBCFS_LINUX_KP30_H__
+
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/kmod.h>
+#include <linux/notifier.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+#include <linux/rwsem.h>
+#include <linux/proc_fs.h>
+#include <linux/file.h>
+#include <linux/smp.h>
+#include <linux/ctype.h>
+#include <linux/compiler.h>
+#ifdef HAVE_MM_INLINE
+# include <linux/mm_inline.h>
+#endif
+#include <linux/kallsyms.h>
+#include <linux/moduleparam.h>
+#include <linux/scatterlist.h>
+
+#include <linux/libcfs/linux/portals_compat25.h>
+
+
+/******************************************************************************/
+/* Module parameter support */
+#define CFS_MODULE_PARM(name, t, type, perm, desc) \
+ module_param(name, type, perm);\
+ MODULE_PARM_DESC(name, desc)
+
+#define CFS_SYSFS_MODULE_PARM 1 /* module parameters accessible via sysfs */
+
+/******************************************************************************/
+/* Light-weight trace
+ * Support for temporary event tracing with minimal Heisenberg effect. */
+#define LWT_SUPPORT 0
+
+#define LWT_MEMORY (16<<20)
+
+#ifndef KLWT_SUPPORT
+# if !defined(BITS_PER_LONG)
+# error "BITS_PER_LONG not defined"
+# endif
+
+/* kernel hasn't defined this? */
+typedef struct {
+ long long lwte_when;
+ char *lwte_where;
+ void *lwte_task;
+ long lwte_p1;
+ long lwte_p2;
+ long lwte_p3;
+ long lwte_p4;
+# if BITS_PER_LONG > 32
+ long lwte_pad;
+# endif
+} lwt_event_t;
+#endif /* !KLWT_SUPPORT */
+
+#if LWT_SUPPORT
+# if !KLWT_SUPPORT
+
+typedef struct _lwt_page {
+ struct list_head lwtp_list;
+ struct page *lwtp_page;
+ lwt_event_t *lwtp_events;
+} lwt_page_t;
+
+typedef struct {
+ int lwtc_current_index;
+ lwt_page_t *lwtc_current_page;
+} lwt_cpu_t;
+
+extern int lwt_enabled;
+extern lwt_cpu_t lwt_cpus[];
+
+/* Note that we _don't_ define LWT_EVENT at all if LWT_SUPPORT isn't set.
+ * This stuff is meant for finding specific problems; it never stays in
+ * production code... */
+
+#define LWTSTR(n) #n
+#define LWTWHERE(f,l) f ":" LWTSTR(l)
+#define LWT_EVENTS_PER_PAGE (PAGE_CACHE_SIZE / sizeof (lwt_event_t))
+
+#define LWT_EVENT(p1, p2, p3, p4) \
+do { \
+ unsigned long flags; \
+ lwt_cpu_t *cpu; \
+ lwt_page_t *p; \
+ lwt_event_t *e; \
+ \
+ if (lwt_enabled) { \
+ local_irq_save (flags); \
+ \
+ cpu = &lwt_cpus[smp_processor_id()]; \
+ p = cpu->lwtc_current_page; \
+ e = &p->lwtp_events[cpu->lwtc_current_index++]; \
+ \
+ if (cpu->lwtc_current_index >= LWT_EVENTS_PER_PAGE) { \
+ cpu->lwtc_current_page = \
+ list_entry (p->lwtp_list.next, \
+ lwt_page_t, lwtp_list); \
+ cpu->lwtc_current_index = 0; \
+ } \
+ \
+ e->lwte_when = get_cycles(); \
+ e->lwte_where = LWTWHERE(__FILE__,__LINE__); \
+ e->lwte_task = current; \
+ e->lwte_p1 = (long)(p1); \
+ e->lwte_p2 = (long)(p2); \
+ e->lwte_p3 = (long)(p3); \
+ e->lwte_p4 = (long)(p4); \
+ \
+ local_irq_restore (flags); \
+ } \
+} while (0)
+
+#endif /* !KLWT_SUPPORT */
+
+extern int lwt_init (void);
+extern void lwt_fini (void);
+extern int lwt_lookup_string (int *size, char *knlptr,
+ char *usrptr, int usrsize);
+extern int lwt_control (int enable, int clear);
+extern int lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
+ void *user_ptr, int user_size);
+#endif /* LWT_SUPPORT */
+
+/* ------------------------------------------------------------------ */
+
+#define IOCTL_LIBCFS_TYPE long
+
+#ifdef __CYGWIN__
+# ifndef BITS_PER_LONG
+# define BITS_PER_LONG 64
+# endif
+#endif
+
+# define LI_POISON 0x5a5a5a5a
+#if BITS_PER_LONG > 32
+# define LL_POISON 0x5a5a5a5a5a5a5a5aL
+#else
+# define LL_POISON 0x5a5a5a5aL
+#endif
+# define LP_POISON ((void *)LL_POISON)
+
+/* this is a bit chunky */
+
+#define _LWORDSIZE BITS_PER_LONG
+
+# define LPU64 "%llu"
+# define LPD64 "%lld"
+# define LPX64 "%#llx"
+# define LPX64i "%llx"
+# define LPO64 "%#llo"
+# define LPF64 "L"
+
+/*
+ * long_ptr_t & ulong_ptr_t, same to "long" for gcc
+ */
+# define LPLU "%lu"
+# define LPLD "%ld"
+# define LPLX "%#lx"
+
+/*
+ * pid_t
+ */
+# define LPPID "%d"
+
+
+#undef _LWORDSIZE
+
+/* compat macroses */
+
+
+#ifndef get_cpu
+# ifdef CONFIG_PREEMPT
+# define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+# define put_cpu() preempt_enable()
+# else
+# define get_cpu() smp_processor_id()
+# define put_cpu()
+# endif
+#else
+#endif /* get_cpu & put_cpu */
+
+#define INIT_CTL_NAME(a)
+#define INIT_STRATEGY(a)
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
new file mode 100644
index 0000000..60ecaf6
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -0,0 +1,122 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LIBCFS_LINUX_LIBCFS_H__
+#define __LIBCFS_LINUX_LIBCFS_H__
+
+#ifndef __LIBCFS_LIBCFS_H__
+#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
+#endif
+
+
+
+#include <stdarg.h>
+#include <linux/libcfs/linux/linux-cpu.h>
+#include <linux/libcfs/linux/linux-time.h>
+#include <linux/libcfs/linux/linux-mem.h>
+#include <linux/libcfs/linux/linux-prim.h>
+#include <linux/libcfs/linux/linux-lock.h>
+#include <linux/libcfs/linux/linux-fs.h>
+#include <linux/libcfs/linux/linux-tcpip.h>
+#include <linux/libcfs/linux/linux-bitops.h>
+#include <linux/libcfs/linux/linux-types.h>
+#include <linux/libcfs/linux/kp30.h>
+
+#include <asm/types.h>
+#include <linux/types.h>
+#include <asm/timex.h>
+#include <linux/sched.h> /* THREAD_SIZE */
+#include <linux/rbtree.h>
+
+#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
+
+#if !defined(__x86_64__)
+# ifdef __ia64__
+# define CDEBUG_STACK() (THREAD_SIZE - \
+ ((unsigned long)__builtin_dwarf_cfa() & \
+ (THREAD_SIZE - 1)))
+# else
+# define CDEBUG_STACK() (THREAD_SIZE - \
+ ((unsigned long)__builtin_frame_address(0) & \
+ (THREAD_SIZE - 1)))
+# endif /* __ia64__ */
+
+#define __CHECK_STACK(msgdata, mask, cdls) \
+do { \
+ if (unlikely(CDEBUG_STACK() > libcfs_stack)) { \
+ LIBCFS_DEBUG_MSG_DATA_INIT(msgdata, D_WARNING, NULL); \
+ libcfs_stack = CDEBUG_STACK(); \
+ libcfs_debug_msg(msgdata, \
+ "maximum lustre stack %lu\n", \
+ CDEBUG_STACK()); \
+ (msgdata)->msg_mask = mask; \
+ (msgdata)->msg_cdls = cdls; \
+ dump_stack(); \
+ /*panic("LBUG");*/ \
+ } \
+} while (0)
+#define CFS_CHECK_STACK(msgdata, mask, cdls) __CHECK_STACK(msgdata, mask, cdls)
+#else /* __x86_64__ */
+#define CFS_CHECK_STACK(msgdata, mask, cdls) do {} while(0)
+#define CDEBUG_STACK() (0L)
+#endif /* __x86_64__ */
+
+/* initial pid */
+#define LUSTRE_LNET_PID 12345
+
+#define __current_nesting_level() (0)
+
+/**
+ * Platform specific declarations for cfs_curproc API (libcfs/curproc.h)
+ *
+ * Implementation is in linux-curproc.c
+ */
+#define CFS_CURPROC_COMM_MAX (sizeof ((struct task_struct *)0)->comm)
+
+#include <linux/capability.h>
+
+/* long integer with size equal to pointer */
+typedef unsigned long ulong_ptr_t;
+typedef long long_ptr_t;
+
+#ifndef WITH_WATCHDOG
+#define WITH_WATCHDOG
+#endif
+
+
+
+
+#endif /* _LINUX_LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-bitops.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-bitops.h
new file mode 100644
index 0000000..43936e3
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-bitops.h
@@ -0,0 +1,38 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-bitops.h
+ */
+#include <linux/bitops.h>
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
new file mode 100644
index 0000000..8dd354d
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -0,0 +1,166 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-mem.h
+ *
+ * Basic library routines.
+ *
+ * Author: liang@whamcloud.com
+ */
+
+#ifndef __LIBCFS_LINUX_CPU_H__
+#define __LIBCFS_LINUX_CPU_H__
+
+#ifndef __LIBCFS_LIBCFS_H__
+#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
+#endif
+
+
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/topology.h>
+
+#ifdef CONFIG_SMP
+
+#define HAVE_LIBCFS_CPT
+
+/** virtual processing unit */
+struct cfs_cpu_partition {
+ /* CPUs mask for this partition */
+ cpumask_t *cpt_cpumask;
+ /* nodes mask for this partition */
+ nodemask_t *cpt_nodemask;
+ /* spread rotor for NUMA allocator */
+ unsigned cpt_spread_rotor;
+};
+
+/** descriptor for CPU partitions */
+struct cfs_cpt_table {
+ /* version, reserved for hotplug */
+ unsigned ctb_version;
+ /* spread rotor for NUMA allocator */
+ unsigned ctb_spread_rotor;
+ /* # of CPU partitions */
+ unsigned ctb_nparts;
+ /* partitions tables */
+ struct cfs_cpu_partition *ctb_parts;
+ /* shadow HW CPU to CPU partition ID */
+ int *ctb_cpu2cpt;
+ /* all cpus in this partition table */
+ cpumask_t *ctb_cpumask;
+ /* all nodes in this partition table */
+ nodemask_t *ctb_nodemask;
+};
+
+/**
+ * comment out definitions for compatible layer
+ *
+ * typedef cpumask_t cfs_cpumask_t;
+ *
+ * #define cfs_cpu_current() smp_processor_id()
+ * #define cfs_cpu_online(i) cpu_online(i)
+ * #define cfs_cpu_online_num() num_online_cpus()
+ * #define cfs_cpu_online_for_each(i) for_each_online_cpu(i)
+ * #define cfs_cpu_possible_num() num_possible_cpus()
+ * #define cfs_cpu_possible_for_each(i) for_each_possible_cpu(i)
+ *
+ * #ifdef CONFIG_CPUMASK_SIZE
+ * #define cfs_cpu_mask_size() cpumask_size()
+ * #else
+ * #define cfs_cpu_mask_size() sizeof(cfs_cpumask_t)
+ * #endif
+ *
+ * #define cfs_cpu_mask_set(i, mask) cpu_set(i, mask)
+ * #define cfs_cpu_mask_unset(i, mask) cpu_clear(i, mask)
+ * #define cfs_cpu_mask_isset(i, mask) cpu_isset(i, mask)
+ * #define cfs_cpu_mask_clear(mask) cpus_clear(mask)
+ * #define cfs_cpu_mask_empty(mask) cpus_empty(mask)
+ * #define cfs_cpu_mask_weight(mask) cpus_weight(mask)
+ * #define cfs_cpu_mask_first(mask) first_cpu(mask)
+ * #define cfs_cpu_mask_any_online(mask) (any_online_cpu(mask) != NR_CPUS)
+ * #define cfs_cpu_mask_for_each(i, mask) for_each_cpu_mask(i, mask)
+ * #define cfs_cpu_mask_bind(t, mask) set_cpus_allowed(t, mask)
+ *
+ * #ifdef HAVE_CPUMASK_COPY
+ * #define cfs_cpu_mask_copy(dst, src) cpumask_copy(dst, src)
+ * #else
+ * #define cfs_cpu_mask_copy(dst, src) memcpy(dst, src, sizeof(*src))
+ * #endif
+ *
+ * static inline void
+ * cfs_cpu_mask_of_online(cfs_cpumask_t *mask)
+ * {
+ * cfs_cpu_mask_copy(mask, &cpu_online_map);
+ * }
+ *
+ * #ifdef CONFIG_NUMA
+ *
+ * #define CFS_NODE_NR MAX_NUMNODES
+ *
+ * typedef nodemask_t cfs_node_mask_t;
+ *
+ * #define cfs_node_of_cpu(cpu) cpu_to_node(cpu)
+ * #define cfs_node_online(i) node_online(i)
+ * #define cfs_node_online_num() num_online_nodes()
+ * #define cfs_node_online_for_each(i) for_each_online_node(i)
+ * #define cfs_node_possible_num() num_possible_nodes()
+ * #define cfs_node_possible_for_each(i) for_each_node(i)
+ *
+ * static inline void cfs_node_to_cpumask(int node, cfs_cpumask_t *mask)
+ * {
+ * #if defined(HAVE_NODE_TO_CPUMASK)
+ * *mask = node_to_cpumask(node);
+ * #elif defined(HAVE_CPUMASK_OF_NODE)
+ * cfs_cpu_mask_copy(mask, cpumask_of_node(node));
+ * #else
+ * # error "Needs node_to_cpumask or cpumask_of_node"
+ * #endif
+ * }
+ *
+ * #define cfs_node_mask_set(i, mask) node_set(i, mask)
+ * #define cfs_node_mask_unset(i, mask) node_clear(i, mask)
+ * #define cfs_node_mask_isset(i, mask) node_isset(i, mask)
+ * #define cfs_node_mask_clear(mask) nodes_reset(mask)
+ * #define cfs_node_mask_empty(mask) nodes_empty(mask)
+ * #define cfs_node_mask_weight(mask) nodes_weight(mask)
+ * #define cfs_node_mask_for_each(i, mask) for_each_node_mask(i, mask)
+ * #define cfs_node_mask_copy(dst, src) memcpy(dst, src, sizeof(*src))
+ *
+ * static inline void
+ * cfs_node_mask_of_online(cfs_node_mask_t *mask)
+ * {
+ * cfs_node_mask_copy(mask, &node_online_map);
+ * }
+ *
+ * #endif
+ */
+
+#endif /* CONFIG_SMP */
+#endif /* __LIBCFS_LINUX_CPU_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-crypto.h
new file mode 100644
index 0000000..97c771cf
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-crypto.h
@@ -0,0 +1,49 @@
+ /*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ */
+
+/**
+ * Linux crypto hash specific functions.
+ */
+
+/**
+ * Functions for start/stop shash CRC32 algorithm.
+ */
+int cfs_crypto_crc32_register(void);
+void cfs_crypto_crc32_unregister(void);
+
+/**
+ * Functions for start/stop shash adler32 algorithm.
+ */
+int cfs_crypto_adler32_register(void);
+void cfs_crypto_adler32_unregister(void);
+
+/**
+ * Functions for start/stop shash crc32 pclmulqdq
+ */
+int cfs_crypto_crc32_pclmul_register(void);
+void cfs_crypto_crc32_pclmul_unregister(void);
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h
new file mode 100644
index 0000000..eebf138
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-fs.h
@@ -0,0 +1,92 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-fs.h
+ *
+ * Basic library routines.
+ */
+
+#ifndef __LIBCFS_LINUX_CFS_FS_H__
+#define __LIBCFS_LINUX_CFS_FS_H__
+
+#ifndef __LIBCFS_LIBCFS_H__
+#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
+#endif
+
+
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/mount.h>
+#include <linux/backing-dev.h>
+#include <linux/posix_acl_xattr.h>
+
+#define filp_size(f) \
+ (i_size_read((f)->f_dentry->d_inode))
+#define filp_poff(f) \
+ (&(f)->f_pos)
+
+# define do_fsync(fp, flag) \
+ ((fp)->f_op->fsync(fp, 0, LLONG_MAX, flag))
+
+#define filp_read(fp, buf, size, pos) \
+ ((fp)->f_op->read((fp), (buf), (size), pos))
+
+#define filp_write(fp, buf, size, pos) \
+ ((fp)->f_op->write((fp), (buf), (size), pos))
+
+#define filp_fsync(fp) \
+ do_fsync(fp, 1)
+
+#define flock_type(fl) ((fl)->fl_type)
+#define flock_set_type(fl, type) do { (fl)->fl_type = (type); } while (0)
+#define flock_pid(fl) ((fl)->fl_pid)
+#define flock_set_pid(fl, pid) do { (fl)->fl_pid = (pid); } while (0)
+#define flock_start(fl) ((fl)->fl_start)
+#define flock_set_start(fl, st) do { (fl)->fl_start = (st); } while (0)
+#define flock_end(fl) ((fl)->fl_end)
+#define flock_set_end(fl, end) do { (fl)->fl_end = (end); } while (0)
+
+#ifndef IFSHIFT
+#define IFSHIFT 12
+#endif
+
+#ifndef IFTODT
+#define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
+#endif
+#ifndef DTTOIF
+#define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
+#endif
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h
new file mode 100644
index 0000000..d6e00f9
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h
@@ -0,0 +1,204 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-lock.h
+ *
+ * Basic library routines.
+ */
+
+#ifndef __LIBCFS_LINUX_CFS_LOCK_H__
+#define __LIBCFS_LINUX_CFS_LOCK_H__
+
+#ifndef __LIBCFS_LIBCFS_H__
+#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
+#endif
+
+
+#include <linux/mutex.h>
+
+/*
+ * IMPORTANT !!!!!!!!
+ *
+ * All locks' declaration are not guaranteed to be initialized,
+ * although some of them are initialized in Linux. All locks
+ * declared by CFS_DECL_* should be initialized explicitly.
+ */
+
+/*
+ * spin_lock "implementation" (use Linux kernel's primitives)
+ *
+ * - spin_lock_init(x)
+ * - spin_lock(x)
+ * - spin_lock_bh(x)
+ * - spin_lock_bh_init(x)
+ * - spin_unlock(x)
+ * - spin_unlock_bh(x)
+ * - spin_trylock(x)
+ * - spin_is_locked(x)
+ *
+ * - spin_lock_irq(x)
+ * - spin_lock_irqsave(x, f)
+ * - spin_unlock_irqrestore(x, f)
+ * - read_lock_irqsave(lock, f)
+ * - write_lock_irqsave(lock, f)
+ * - write_unlock_irqrestore(lock, f)
+ */
+
+/*
+ * spinlock "implementation"
+ */
+
+
+
+
+/*
+ * rw_semaphore "implementation" (use Linux kernel's primitives)
+ *
+ * - sema_init(x)
+ * - init_rwsem(x)
+ * - down_read(x)
+ * - up_read(x)
+ * - down_write(x)
+ * - up_write(x)
+ */
+
+
+#define fini_rwsem(s) do {} while (0)
+
+
+/*
+ * rwlock_t "implementation" (use Linux kernel's primitives)
+ *
+ * - rwlock_init(x)
+ * - read_lock(x)
+ * - read_unlock(x)
+ * - write_lock(x)
+ * - write_unlock(x)
+ * - write_lock_bh(x)
+ * - write_unlock_bh(x)
+ *
+ * - RW_LOCK_UNLOCKED
+ */
+
+
+#ifndef DEFINE_RWLOCK
+#define DEFINE_RWLOCK(lock) rwlock_t lock = __RW_LOCK_UNLOCKED(lock)
+#endif
+
+/*
+ * completion "implementation" (use Linux kernel's primitives)
+ *
+ * - DECLARE_COMPLETION(work)
+ * - INIT_COMPLETION(c)
+ * - COMPLETION_INITIALIZER(work)
+ * - init_completion(c)
+ * - complete(c)
+ * - wait_for_completion(c)
+ * - wait_for_completion_interruptible(c)
+ * - fini_completion(c)
+ */
+#define fini_completion(c) do { } while (0)
+
+/*
+ * semaphore "implementation" (use Linux kernel's primitives)
+ * - DEFINE_SEMAPHORE(name)
+ * - sema_init(sem, val)
+ * - up(sem)
+ * - down(sem)
+ * - down_interruptible(sem)
+ * - down_trylock(sem)
+ */
+
+/*
+ * mutex "implementation" (use Linux kernel's primitives)
+ *
+ * - DEFINE_MUTEX(name)
+ * - mutex_init(x)
+ * - mutex_lock(x)
+ * - mutex_unlock(x)
+ * - mutex_trylock(x)
+ * - mutex_is_locked(x)
+ * - mutex_destroy(x)
+ */
+
+#ifndef lockdep_set_class
+
+/**************************************************************************
+ *
+ * Lockdep "implementation". Also see liblustre.h
+ *
+ **************************************************************************/
+
+struct lock_class_key {
+ ;
+};
+
+#define lockdep_set_class(lock, key) \
+ do { (void)sizeof(lock); (void)sizeof(key); } while (0)
+/* This has to be a macro, so that `subclass' can be undefined in kernels
+ * that do not support lockdep. */
+
+
+static inline void lockdep_off(void)
+{
+}
+
+static inline void lockdep_on(void)
+{
+}
+#else
+
+#endif /* lockdep_set_class */
+
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#ifndef mutex_lock_nested
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#endif
+
+#ifndef spin_lock_nested
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
+#endif
+
+#ifndef down_read_nested
+#define down_read_nested(lock, subclass) down_read(lock)
+#endif
+
+#ifndef down_write_nested
+#define down_write_nested(lock, subclass) down_write(lock)
+#endif
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+
+
+#endif /* __LIBCFS_LINUX_CFS_LOCK_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
new file mode 100644
index 0000000..2af15d4
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -0,0 +1,82 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-mem.h
+ *
+ * Basic library routines.
+ */
+
+#ifndef __LIBCFS_LINUX_CFS_MEM_H__
+#define __LIBCFS_LINUX_CFS_MEM_H__
+
+#ifndef __LIBCFS_LIBCFS_H__
+#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
+#endif
+
+
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/memcontrol.h>
+#include <linux/mm_inline.h>
+
+#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
+#define page_index(p) ((p)->index)
+
+#define memory_pressure_get() (current->flags & PF_MEMALLOC)
+#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
+#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
+
+#if BITS_PER_LONG == 32
+/* limit to lowmem on 32-bit systems */
+#define NUM_CACHEPAGES \
+ min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
+#else
+#define NUM_CACHEPAGES totalram_pages
+#endif
+
+/*
+ * In Linux there is no way to determine whether current execution context is
+ * blockable.
+ */
+#define ALLOC_ATOMIC_TRY GFP_ATOMIC
+
+#define DECL_MMSPACE mm_segment_t __oldfs
+#define MMSPACE_OPEN \
+ do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
+#define MMSPACE_CLOSE set_fs(__oldfs)
+
+#endif /* __LINUX_CFS_MEM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
new file mode 100644
index 0000000..1ec4ca1
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
@@ -0,0 +1,83 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-prim.h
+ *
+ * Basic library routines.
+ */
+
+#ifndef __LIBCFS_LINUX_CFS_PRIM_H__
+#define __LIBCFS_LINUX_CFS_PRIM_H__
+
+#ifndef __LIBCFS_LIBCFS_H__
+#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
+#endif
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/random.h>
+
+#include <linux/miscdevice.h>
+#include <linux/libcfs/linux/portals_compat25.h>
+#include <asm/div64.h>
+
+#include <linux/libcfs/linux/linux-time.h>
+
+/*
+ * Sysctl register
+ */
+typedef struct ctl_table ctl_table_t;
+typedef struct ctl_table_header ctl_table_header_t;
+
+#define DECLARE_PROC_HANDLER(name) \
+static int \
+LL_PROC_PROTO(name) \
+{ \
+ DECLARE_LL_PROC_PPOS_DECL; \
+ \
+ return proc_call_handler(table->data, write, \
+ ppos, buffer, lenp, \
+ __##name); \
+}
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-tcpip.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-tcpip.h
new file mode 100644
index 0000000..7a8d006
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-tcpip.h
@@ -0,0 +1,72 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-tcpip.h
+ *
+ * Basic library routines.
+ */
+
+#ifndef __LIBCFS_LINUX_CFS_TCP_H__
+#define __LIBCFS_LINUX_CFS_TCP_H__
+
+#ifndef __LIBCFS_LIBCFS_H__
+#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
+#endif
+
+
+#include <net/sock.h>
+
+typedef struct socket socket_t;
+
+#define SOCK_SNDBUF(so) ((so)->sk->sk_sndbuf)
+#define SOCK_TEST_NOSPACE(so) test_bit(SOCK_NOSPACE, &(so)->flags)
+
+static inline int
+cfs_sock_error(struct socket *sock)
+{
+ return sock->sk->sk_err;
+}
+
+static inline int
+cfs_sock_wmem_queued(struct socket *sock)
+{
+ return sock->sk->sk_wmem_queued;
+}
+
+#define cfs_sk_sleep(sk) sk_sleep(sk)
+
+#define DEFAULT_NET (&init_net)
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
new file mode 100644
index 0000000..a386d1b
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -0,0 +1,274 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/linux/linux-time.h
+ *
+ * Implementation of portable time API for Linux (kernel and user-level).
+ *
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ */
+
+#ifndef __LIBCFS_LINUX_LINUX_TIME_H__
+#define __LIBCFS_LINUX_LINUX_TIME_H__
+
+#ifndef __LIBCFS_LIBCFS_H__
+#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
+#endif
+
+
+/* Portable time API */
+
+/*
+ * Platform provides three opaque data-types:
+ *
+ * cfs_time_t represents point in time. This is internal kernel
+ * time rather than "wall clock". This time bears no
+ * relation to gettimeofday().
+ *
+ * cfs_duration_t represents time interval with resolution of internal
+ * platform clock
+ *
+ * cfs_fs_time_t represents instance in world-visible time. This is
+ * used in file-system time-stamps
+ *
+ * cfs_time_t cfs_time_current(void);
+ * cfs_time_t cfs_time_add (cfs_time_t, cfs_duration_t);
+ * cfs_duration_t cfs_time_sub (cfs_time_t, cfs_time_t);
+ * int cfs_impl_time_before (cfs_time_t, cfs_time_t);
+ * int cfs_impl_time_before_eq(cfs_time_t, cfs_time_t);
+ *
+ * cfs_duration_t cfs_duration_build(int64_t);
+ *
+ * time_t cfs_duration_sec (cfs_duration_t);
+ * void cfs_duration_usec(cfs_duration_t, struct timeval *);
+ * void cfs_duration_nsec(cfs_duration_t, struct timespec *);
+ *
+ * void cfs_fs_time_current(cfs_fs_time_t *);
+ * time_t cfs_fs_time_sec (cfs_fs_time_t *);
+ * void cfs_fs_time_usec (cfs_fs_time_t *, struct timeval *);
+ * void cfs_fs_time_nsec (cfs_fs_time_t *, struct timespec *);
+ * int cfs_fs_time_before (cfs_fs_time_t *, cfs_fs_time_t *);
+ * int cfs_fs_time_beforeq(cfs_fs_time_t *, cfs_fs_time_t *);
+ *
+ * CFS_TIME_FORMAT
+ * CFS_DURATION_FORMAT
+ *
+ */
+
+#define ONE_BILLION ((u_int64_t)1000000000)
+#define ONE_MILLION 1000000
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <asm/div64.h>
+
+#include <linux/libcfs/linux/portals_compat25.h>
+
+/*
+ * post 2.5 kernels.
+ */
+
+#include <linux/jiffies.h>
+
+typedef struct timespec cfs_fs_time_t;
+
+static inline void cfs_fs_time_usec(cfs_fs_time_t *t, struct timeval *v)
+{
+ v->tv_sec = t->tv_sec;
+ v->tv_usec = t->tv_nsec / 1000;
+}
+
+static inline void cfs_fs_time_nsec(cfs_fs_time_t *t, struct timespec *s)
+{
+ *s = *t;
+}
+
+/*
+ * internal helper function used by cfs_fs_time_before*()
+ */
+static inline unsigned long long __cfs_fs_time_flat(cfs_fs_time_t *t)
+{
+ return (unsigned long long)t->tv_sec * ONE_BILLION + t->tv_nsec;
+}
+
+
+/*
+ * Generic kernel stuff
+ */
+
+typedef unsigned long cfs_time_t; /* jiffies */
+typedef long cfs_duration_t;
+typedef cycles_t cfs_cycles_t;
+
+static inline int cfs_time_before(cfs_time_t t1, cfs_time_t t2)
+{
+ return time_before(t1, t2);
+}
+
+static inline int cfs_time_beforeq(cfs_time_t t1, cfs_time_t t2)
+{
+ return time_before_eq(t1, t2);
+}
+
+static inline cfs_time_t cfs_time_current(void)
+{
+ return jiffies;
+}
+
+static inline time_t cfs_time_current_sec(void)
+{
+ return get_seconds();
+}
+
+static inline void cfs_fs_time_current(cfs_fs_time_t *t)
+{
+ *t = CURRENT_TIME;
+}
+
+static inline time_t cfs_fs_time_sec(cfs_fs_time_t *t)
+{
+ return t->tv_sec;
+}
+
+static inline int cfs_fs_time_before(cfs_fs_time_t *t1, cfs_fs_time_t *t2)
+{
+ return __cfs_fs_time_flat(t1) < __cfs_fs_time_flat(t2);
+}
+
+static inline int cfs_fs_time_beforeq(cfs_fs_time_t *t1, cfs_fs_time_t *t2)
+{
+ return __cfs_fs_time_flat(t1) <= __cfs_fs_time_flat(t2);
+}
+
+#if 0
+static inline cfs_duration_t cfs_duration_build(int64_t nano)
+{
+#if (BITS_PER_LONG == 32)
+ /* We cannot use do_div(t, ONE_BILLION), do_div can only process
+ * 64 bits n and 32 bits base */
+ int64_t t = nano * HZ;
+ do_div(t, 1000);
+ do_div(t, 1000000);
+ return (cfs_duration_t)t;
+#else
+ return (nano * HZ / ONE_BILLION);
+#endif
+}
+#endif
+
+static inline cfs_duration_t cfs_time_seconds(int seconds)
+{
+ return ((cfs_duration_t)seconds) * HZ;
+}
+
+static inline time_t cfs_duration_sec(cfs_duration_t d)
+{
+ return d / HZ;
+}
+
+static inline void cfs_duration_usec(cfs_duration_t d, struct timeval *s)
+{
+#if (BITS_PER_LONG == 32) && (HZ > 4096)
+ __u64 t;
+
+ s->tv_sec = d / HZ;
+ t = (d - (cfs_duration_t)s->tv_sec * HZ) * ONE_MILLION;
+ do_div(t, HZ);
+ s->tv_usec = t;
+#else
+ s->tv_sec = d / HZ;
+ s->tv_usec = ((d - (cfs_duration_t)s->tv_sec * HZ) * \
+ ONE_MILLION) / HZ;
+#endif
+}
+
+static inline void cfs_duration_nsec(cfs_duration_t d, struct timespec *s)
+{
+#if (BITS_PER_LONG == 32)
+ __u64 t;
+
+ s->tv_sec = d / HZ;
+ t = (d - s->tv_sec * HZ) * ONE_BILLION;
+ do_div(t, HZ);
+ s->tv_nsec = t;
+#else
+ s->tv_sec = d / HZ;
+ s->tv_nsec = ((d - s->tv_sec * HZ) * ONE_BILLION) / HZ;
+#endif
+}
+
+#define cfs_time_current_64 get_jiffies_64
+
+static inline __u64 cfs_time_add_64(__u64 t, __u64 d)
+{
+ return t + d;
+}
+
+static inline __u64 cfs_time_shift_64(int seconds)
+{
+ return cfs_time_add_64(cfs_time_current_64(),
+ cfs_time_seconds(seconds));
+}
+
+static inline int cfs_time_before_64(__u64 t1, __u64 t2)
+{
+ return (__s64)t2 - (__s64)t1 > 0;
+}
+
+static inline int cfs_time_beforeq_64(__u64 t1, __u64 t2)
+{
+ return (__s64)t2 - (__s64)t1 >= 0;
+}
+
+
+/*
+ * One jiffy
+ */
+#define CFS_TICK (1)
+
+#define CFS_TIME_T "%lu"
+#define CFS_DURATION_T "%ld"
+
+
+#endif /* __LIBCFS_LINUX_LINUX_TIME_H__ */
+/*
+ * Local variables:
+ * c-indentation-style: "K&R"
+ * c-basic-offset: 8
+ * tab-width: 8
+ * fill-column: 80
+ * scroll-step: 1
+ * End:
+ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-types.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-types.h
new file mode 100644
index 0000000..1423949
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-types.h
@@ -0,0 +1,36 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/include/libcfs/user-bitops.h
+ */
+#include <linux/types.h>
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/portals_compat25.h b/drivers/staging/lustre/include/linux/libcfs/linux/portals_compat25.h
new file mode 100644
index 0000000..fe4c63f
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/portals_compat25.h
@@ -0,0 +1,99 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LIBCFS_LINUX_PORTALS_COMPAT_H__
+#define __LIBCFS_LINUX_PORTALS_COMPAT_H__
+
+// XXX BUG 1511 -- remove this stanza and all callers when bug 1511 is resolved
+#if defined(SPINLOCK_DEBUG) && SPINLOCK_DEBUG
+# define SIGNAL_MASK_ASSERT() \
+ LASSERT(current->sighand->siglock.magic == SPINLOCK_MAGIC)
+#else
+# define SIGNAL_MASK_ASSERT()
+#endif
+// XXX BUG 1511 -- remove this stanza and all callers when bug 1511 is resolved
+
+#define SIGNAL_MASK_LOCK(task, flags) \
+ spin_lock_irqsave(&task->sighand->siglock, flags)
+#define SIGNAL_MASK_UNLOCK(task, flags) \
+ spin_unlock_irqrestore(&task->sighand->siglock, flags)
+#define USERMODEHELPER(path, argv, envp) \
+ call_usermodehelper(path, argv, envp, 1)
+#define clear_tsk_thread_flag(current, TIF_SIGPENDING) clear_tsk_thread_flag(current, \
+ TIF_SIGPENDING)
+# define smp_num_cpus num_online_cpus()
+
+#define cfs_wait_event_interruptible(wq, condition, ret) \
+ ret = wait_event_interruptible(wq, condition)
+#define cfs_wait_event_interruptible_exclusive(wq, condition, ret) \
+ ret = wait_event_interruptible_exclusive(wq, condition)
+
+#define THREAD_NAME(comm, len, fmt, a...) \
+ snprintf(comm, len, fmt, ## a)
+
+/* 2.6 alloc_page users can use page->lru */
+#define PAGE_LIST_ENTRY lru
+#define PAGE_LIST(page) ((page)->lru)
+
+#ifndef __user
+#define __user
+#endif
+
+#ifndef __fls
+#define __cfs_fls fls
+#else
+#define __cfs_fls __fls
+#endif
+
+#define ll_proc_dointvec(table, write, filp, buffer, lenp, ppos) \
+ proc_dointvec(table, write, buffer, lenp, ppos);
+
+#define ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos) \
+ proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+#define ll_proc_dostring(table, write, filp, buffer, lenp, ppos) \
+ proc_dostring(table, write, buffer, lenp, ppos);
+#define LL_PROC_PROTO(name) \
+ name(ctl_table_t *table, int write, \
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+#define DECLARE_LL_PROC_PPOS_DECL
+
+/* helper for sysctl handlers */
+int proc_call_handler(void *data, int write,
+ loff_t *ppos, void *buffer, size_t *lenp,
+ int (*handler)(void *data, int write,
+ loff_t pos, void *buffer, int len));
+
+#endif /* _PORTALS_COMPAT_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/lucache.h b/drivers/staging/lustre/include/linux/libcfs/lucache.h
new file mode 100644
index 0000000..9668b39
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/lucache.h
@@ -0,0 +1,162 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LUCACHE_H
+#define _LUCACHE_H
+
+#include <linux/libcfs/libcfs.h>
+
+/** \defgroup ucache ucache
+ *
+ * @{
+ */
+
+#define UC_CACHE_NEW 0x01
+#define UC_CACHE_ACQUIRING 0x02
+#define UC_CACHE_INVALID 0x04
+#define UC_CACHE_EXPIRED 0x08
+
+#define UC_CACHE_IS_NEW(i) ((i)->ue_flags & UC_CACHE_NEW)
+#define UC_CACHE_IS_INVALID(i) ((i)->ue_flags & UC_CACHE_INVALID)
+#define UC_CACHE_IS_ACQUIRING(i) ((i)->ue_flags & UC_CACHE_ACQUIRING)
+#define UC_CACHE_IS_EXPIRED(i) ((i)->ue_flags & UC_CACHE_EXPIRED)
+#define UC_CACHE_IS_VALID(i) ((i)->ue_flags == 0)
+
+#define UC_CACHE_SET_NEW(i) (i)->ue_flags |= UC_CACHE_NEW
+#define UC_CACHE_SET_INVALID(i) (i)->ue_flags |= UC_CACHE_INVALID
+#define UC_CACHE_SET_ACQUIRING(i) (i)->ue_flags |= UC_CACHE_ACQUIRING
+#define UC_CACHE_SET_EXPIRED(i) (i)->ue_flags |= UC_CACHE_EXPIRED
+#define UC_CACHE_SET_VALID(i) (i)->ue_flags = 0
+
+#define UC_CACHE_CLEAR_NEW(i) (i)->ue_flags &= ~UC_CACHE_NEW
+#define UC_CACHE_CLEAR_ACQUIRING(i) (i)->ue_flags &= ~UC_CACHE_ACQUIRING
+#define UC_CACHE_CLEAR_INVALID(i) (i)->ue_flags &= ~UC_CACHE_INVALID
+#define UC_CACHE_CLEAR_EXPIRED(i) (i)->ue_flags &= ~UC_CACHE_EXPIRED
+
+struct upcall_cache_entry;
+
+struct md_perm {
+ lnet_nid_t mp_nid;
+ __u32 mp_perm;
+};
+
+struct md_identity {
+ struct upcall_cache_entry *mi_uc_entry;
+ uid_t mi_uid;
+ gid_t mi_gid;
+ struct group_info *mi_ginfo;
+ int mi_nperms;
+ struct md_perm *mi_perms;
+};
+
+struct upcall_cache_entry {
+ struct list_head ue_hash;
+ __u64 ue_key;
+ atomic_t ue_refcount;
+ int ue_flags;
+ wait_queue_head_t ue_waitq;
+ cfs_time_t ue_acquire_expire;
+ cfs_time_t ue_expire;
+ union {
+ struct md_identity identity;
+ } u;
+};
+
+#define UC_CACHE_HASH_SIZE (128)
+#define UC_CACHE_HASH_INDEX(id) ((id) & (UC_CACHE_HASH_SIZE - 1))
+#define UC_CACHE_UPCALL_MAXPATH (1024UL)
+
+struct upcall_cache;
+
+struct upcall_cache_ops {
+ void (*init_entry)(struct upcall_cache_entry *, void *args);
+ void (*free_entry)(struct upcall_cache *,
+ struct upcall_cache_entry *);
+ int (*upcall_compare)(struct upcall_cache *,
+ struct upcall_cache_entry *,
+ __u64 key, void *args);
+ int (*downcall_compare)(struct upcall_cache *,
+ struct upcall_cache_entry *,
+ __u64 key, void *args);
+ int (*do_upcall)(struct upcall_cache *,
+ struct upcall_cache_entry *);
+ int (*parse_downcall)(struct upcall_cache *,
+ struct upcall_cache_entry *, void *);
+};
+
+struct upcall_cache {
+ struct list_head uc_hashtable[UC_CACHE_HASH_SIZE];
+ spinlock_t uc_lock;
+ rwlock_t uc_upcall_rwlock;
+
+ char uc_name[40]; /* for upcall */
+ char uc_upcall[UC_CACHE_UPCALL_MAXPATH];
+ int uc_acquire_expire; /* seconds */
+ int uc_entry_expire; /* seconds */
+ struct upcall_cache_ops *uc_ops;
+};
+
+struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
+ __u64 key, void *args);
+void upcall_cache_put_entry(struct upcall_cache *cache,
+ struct upcall_cache_entry *entry);
+int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
+ void *args);
+void upcall_cache_flush_idle(struct upcall_cache *cache);
+void upcall_cache_flush_all(struct upcall_cache *cache);
+void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args);
+struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
+ struct upcall_cache_ops *ops);
+void upcall_cache_cleanup(struct upcall_cache *cache);
+
+#if 0
+struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *hash,
+ __u64 key, __u32 primary,
+ __u32 ngroups, __u32 *groups);
+void upcall_cache_put_entry(struct upcall_cache *hash,
+ struct upcall_cache_entry *entry);
+int upcall_cache_downcall(struct upcall_cache *hash, __u32 err, __u64 key,
+ __u32 primary, __u32 ngroups, __u32 *groups);
+void upcall_cache_flush_idle(struct upcall_cache *cache);
+void upcall_cache_flush_all(struct upcall_cache *cache);
+struct upcall_cache *upcall_cache_init(const char *name);
+void upcall_cache_cleanup(struct upcall_cache *hash);
+
+#endif
+
+/** @} ucache */
+
+#endif /* _LUCACHE_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/params_tree.h b/drivers/staging/lustre/include/linux/libcfs/params_tree.h
new file mode 100644
index 0000000..78a2c4e
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/libcfs/params_tree.h
@@ -0,0 +1,164 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * API and structure definitions for params_tree.
+ *
+ * Author: LiuYing <emoly.liu@oracle.com>
+ */
+#ifndef __PARAMS_TREE_H__
+#define __PARAMS_TREE_H__
+
+#include <linux/libcfs/libcfs.h>
+
+#undef LPROCFS
+#if defined(CONFIG_PROC_FS)
+# define LPROCFS
+#endif
+
+#ifdef LPROCFS
+typedef struct file cfs_param_file_t;
+typedef struct inode cfs_inode_t;
+typedef struct proc_inode cfs_proc_inode_t;
+typedef struct seq_file cfs_seq_file_t;
+typedef struct seq_operations cfs_seq_ops_t;
+typedef struct file_operations cfs_param_file_ops_t;
+typedef struct proc_dir_entry cfs_param_dentry_t;
+typedef struct poll_table_struct cfs_poll_table_t;
+#define CFS_PARAM_MODULE THIS_MODULE
+#define cfs_file_private(file) (file->private_data)
+#define cfs_dentry_data(dentry) (dentry->data)
+#define cfs_proc_inode_pde(proc_inode) (proc_inode->pde)
+#define cfs_proc_inode(proc_inode) (proc_inode->vfs_inode)
+#define cfs_seq_read_common seq_read
+#define cfs_seq_lseek_common seq_lseek
+#define cfs_seq_private(seq) (seq->private)
+#define cfs_seq_printf(seq, format, ...) seq_printf(seq, format, \
+ ## __VA_ARGS__)
+#define cfs_seq_release(inode, file) seq_release(inode, file)
+#define cfs_seq_puts(seq, s) seq_puts(seq, s)
+#define cfs_seq_putc(seq, s) seq_putc(seq, s)
+#define cfs_seq_read(file, buf, count, ppos, rc) (rc = seq_read(file, buf, \
+ count, ppos))
+#define cfs_seq_open(file, ops, rc) (rc = seq_open(file, ops))
+
+#else /* !LPROCFS */
+
+typedef struct cfs_params_file {
+ void *param_private;
+ loff_t param_pos;
+ unsigned int param_flags;
+} cfs_param_file_t;
+
+typedef struct cfs_param_inode {
+ void *param_private;
+} cfs_inode_t;
+
+typedef struct cfs_param_dentry {
+ void *param_data;
+} cfs_param_dentry_t;
+
+typedef struct cfs_proc_inode {
+ cfs_param_dentry_t *param_pde;
+ cfs_inode_t param_inode;
+} cfs_proc_inode_t;
+
+struct cfs_seq_operations;
+typedef struct cfs_seq_file {
+ char *buf;
+ size_t size;
+ size_t from;
+ size_t count;
+ loff_t index;
+ loff_t version;
+ struct mutex lock;
+ struct cfs_seq_operations *op;
+ void *private;
+} cfs_seq_file_t;
+
+typedef struct cfs_seq_operations {
+ void *(*start) (cfs_seq_file_t *m, loff_t *pos);
+ void (*stop) (cfs_seq_file_t *m, void *v);
+ void *(*next) (cfs_seq_file_t *m, void *v, loff_t *pos);
+ int (*show) (cfs_seq_file_t *m, void *v);
+} cfs_seq_ops_t;
+
+typedef void *cfs_poll_table_t;
+
+typedef struct cfs_param_file_ops {
+ struct module *owner;
+ int (*open) (cfs_inode_t *, struct file *);
+ loff_t (*llseek)(struct file *, loff_t, int);
+ int (*release) (cfs_inode_t *, cfs_param_file_t *);
+ unsigned int (*poll) (struct file *, cfs_poll_table_t *);
+ ssize_t (*write) (struct file *, const char *, size_t, loff_t *);
+ ssize_t (*read)(struct file *, char *, size_t, loff_t *);
+} cfs_param_file_ops_t;
+typedef cfs_param_file_ops_t *cfs_lproc_filep_t;
+
+static inline cfs_proc_inode_t *FAKE_PROC_I(const cfs_inode_t *inode)
+{
+ return container_of(inode, cfs_proc_inode_t, param_inode);
+}
+
+#define CFS_PARAM_MODULE NULL
+#define cfs_file_private(file) (file->param_private)
+#define cfs_dentry_data(dentry) (dentry->param_data)
+#define cfs_proc_inode(proc_inode) (proc_inode->param_inode)
+#define cfs_proc_inode_pde(proc_inode) (proc_inode->param_pde)
+#define cfs_seq_read_common NULL
+#define cfs_seq_lseek_common NULL
+#define cfs_seq_private(seq) (seq->private)
+#define cfs_seq_read(file, buf, count, ppos, rc) do {} while(0)
+#define cfs_seq_open(file, ops, rc) \
+do { \
+ cfs_seq_file_t *p = cfs_file_private(file); \
+ if (!p) { \
+ LIBCFS_ALLOC(p, sizeof(*p)); \
+ if (!p) { \
+ rc = -ENOMEM; \
+ break; \
+ } \
+ cfs_file_private(file) = p; \
+ } \
+ memset(p, 0, sizeof(*p)); \
+ p->op = ops; \
+ rc = 0; \
+} while(0)
+
+#endif /* LPROCFS */
+
+/* XXX: params_tree APIs */
+
+#endif /* __PARAMS_TREE_H__ */
diff --git a/drivers/staging/lustre/include/linux/lnet/api-support.h b/drivers/staging/lustre/include/linux/lnet/api-support.h
new file mode 100644
index 0000000..a8d91db
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/api-support.h
@@ -0,0 +1,44 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LNET_API_SUPPORT_H__
+#define __LNET_API_SUPPORT_H__
+
+#include <linux/lnet/linux/api-support.h>
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/types.h>
+#include <linux/lnet/lnet.h>
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
new file mode 100644
index 0000000..e8642e3
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/api.h
@@ -0,0 +1,220 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LNET_API_H__
+#define __LNET_API_H__
+
+/** \defgroup lnet LNet
+ *
+ * The Lustre Networking subsystem.
+ *
+ * LNet is an asynchronous message-passing API, which provides an unreliable
+ * connectionless service that can't guarantee any order. It supports OFA IB,
+ * TCP/IP, and Cray Portals, and routes between heterogeneous networks.
+ *
+ * LNet can run both in OS kernel space and in userspace as a library.
+ * @{
+ */
+
+#include <linux/lnet/types.h>
+
+/** \defgroup lnet_init_fini Initialization and cleanup
+ * The LNet must be properly initialized before any LNet calls can be made.
+ * @{ */
+int LNetInit(void);
+void LNetFini(void);
+
+int LNetNIInit(lnet_pid_t requested_pid);
+int LNetNIFini(void);
+/** @} lnet_init_fini */
+
+/** \defgroup lnet_addr LNet addressing and basic types
+ *
+ * Addressing scheme and basic data types of LNet.
+ *
+ * The LNet API is memory-oriented, so LNet must be able to address not only
+ * end-points but also memory region within a process address space.
+ * An ::lnet_nid_t addresses an end-point. An ::lnet_pid_t identifies a process
+ * in a node. A portal represents an opening in the address space of a
+ * process. Match bits is criteria to identify a region of memory inside a
+ * portal, and offset specifies an offset within the memory region.
+ *
+ * LNet creates a table of portals for each process during initialization.
+ * This table has MAX_PORTALS entries and its size can't be dynamically
+ * changed. A portal stays empty until the owning process starts to add
+ * memory regions to it. A portal is sometimes called an index because
+ * it's an entry in the portals table of a process.
+ *
+ * \see LNetMEAttach
+ * @{ */
+int LNetGetId(unsigned int index, lnet_process_id_t *id);
+int LNetDist(lnet_nid_t nid, lnet_nid_t *srcnid, __u32 *order);
+void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle);
+
+/** @} lnet_addr */
+
+
+/** \defgroup lnet_me Match entries
+ *
+ * A match entry (abbreviated as ME) describes a set of criteria to accept
+ * incoming requests.
+ *
+ * A portal is essentially a match list plus a set of attributes. A match
+ * list is a chain of MEs. Each ME includes a pointer to a memory descriptor
+ * and a set of match criteria. The match criteria can be used to reject
+ * incoming requests based on process ID or the match bits provided in the
+ * request. MEs can be dynamically inserted into a match list by LNetMEAttach()
+ * and LNetMEInsert(), and removed from its list by LNetMEUnlink().
+ * @{ */
+int LNetMEAttach(unsigned int portal,
+ lnet_process_id_t match_id_in,
+ __u64 match_bits_in,
+ __u64 ignore_bits_in,
+ lnet_unlink_t unlink_in,
+ lnet_ins_pos_t pos_in,
+ lnet_handle_me_t *handle_out);
+
+int LNetMEInsert(lnet_handle_me_t current_in,
+ lnet_process_id_t match_id_in,
+ __u64 match_bits_in,
+ __u64 ignore_bits_in,
+ lnet_unlink_t unlink_in,
+ lnet_ins_pos_t position_in,
+ lnet_handle_me_t *handle_out);
+
+int LNetMEUnlink(lnet_handle_me_t current_in);
+/** @} lnet_me */
+
+/** \defgroup lnet_md Memory descriptors
+ *
+ * A memory descriptor contains information about a region of a user's
+ * memory (either in kernel or user space) and optionally points to an
+ * event queue where information about the operations performed on the
+ * memory descriptor are recorded. Memory descriptor is abbreviated as
+ * MD and can be used interchangeably with the memory region it describes.
+ *
+ * The LNet API provides two operations to create MDs: LNetMDAttach()
+ * and LNetMDBind(); one operation to unlink and release the resources
+ * associated with a MD: LNetMDUnlink().
+ * @{ */
+int LNetMDAttach(lnet_handle_me_t current_in,
+ lnet_md_t md_in,
+ lnet_unlink_t unlink_in,
+ lnet_handle_md_t *handle_out);
+
+int LNetMDBind(lnet_md_t md_in,
+ lnet_unlink_t unlink_in,
+ lnet_handle_md_t *handle_out);
+
+int LNetMDUnlink(lnet_handle_md_t md_in);
+/** @} lnet_md */
+
+/** \defgroup lnet_eq Events and event queues
+ *
+ * Event queues (abbreviated as EQ) are used to log operations performed on
+ * local MDs. In particular, they signal the completion of a data transmission
+ * into or out of a MD. They can also be used to hold acknowledgments for
+ * completed PUT operations and indicate when a MD has been unlinked. Multiple
+ * MDs can share a single EQ. An EQ may have an optional event handler
+ * associated with it. If an event handler exists, it will be run for each
+ * event that is deposited into the EQ.
+ *
+ * In addition to the lnet_handle_eq_t, the LNet API defines two types
+ * associated with events: The ::lnet_event_kind_t defines the kinds of events
+ * that can be stored in an EQ. The lnet_event_t defines a structure that
+ * holds the information about with an event.
+ *
+ * There are five functions for dealing with EQs: LNetEQAlloc() is used to
+ * create an EQ and allocate the resources needed, while LNetEQFree()
+ * releases these resources and free the EQ. LNetEQGet() retrieves the next
+ * event from an EQ, and LNetEQWait() can be used to block a process until
+ * an EQ has at least one event. LNetEQPoll() can be used to test or wait
+ * on multiple EQs.
+ * @{ */
+int LNetEQAlloc(unsigned int count_in,
+ lnet_eq_handler_t handler,
+ lnet_handle_eq_t *handle_out);
+
+int LNetEQFree(lnet_handle_eq_t eventq_in);
+
+int LNetEQGet(lnet_handle_eq_t eventq_in,
+ lnet_event_t *event_out);
+
+
+int LNetEQWait(lnet_handle_eq_t eventq_in,
+ lnet_event_t *event_out);
+
+int LNetEQPoll(lnet_handle_eq_t *eventqs_in,
+ int neq_in,
+ int timeout_ms,
+ lnet_event_t *event_out,
+ int *which_eq_out);
+/** @} lnet_eq */
+
+/** \defgroup lnet_data Data movement operations
+ *
+ * The LNet API provides two data movement operations: LNetPut()
+ * and LNetGet().
+ * @{ */
+int LNetPut(lnet_nid_t self,
+ lnet_handle_md_t md_in,
+ lnet_ack_req_t ack_req_in,
+ lnet_process_id_t target_in,
+ unsigned int portal_in,
+ __u64 match_bits_in,
+ unsigned int offset_in,
+ __u64 hdr_data_in);
+
+int LNetGet(lnet_nid_t self,
+ lnet_handle_md_t md_in,
+ lnet_process_id_t target_in,
+ unsigned int portal_in,
+ __u64 match_bits_in,
+ unsigned int offset_in);
+/** @} lnet_data */
+
+
+/** \defgroup lnet_misc Miscellaneous operations.
+ * Miscellaneous operations.
+ * @{ */
+
+int LNetSetLazyPortal(int portal);
+int LNetClearLazyPortal(int portal);
+int LNetCtl(unsigned int cmd, void *arg);
+int LNetSetAsync(lnet_process_id_t id, int nasync);
+
+/** @} lnet_misc */
+
+/** @} lnet */
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
new file mode 100644
index 0000000..59bff0b
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -0,0 +1,874 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/include/lnet/lib-lnet.h
+ *
+ * Top level include for library side routines
+ */
+
+#ifndef __LNET_LIB_LNET_H__
+#define __LNET_LIB_LNET_H__
+
+#include <linux/lnet/linux/lib-lnet.h>
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/types.h>
+#include <linux/lnet/lnet.h>
+#include <linux/lnet/lib-types.h>
+
+extern lnet_t the_lnet; /* THE network */
+
+#if defined(LNET_USE_LIB_FREELIST)
+/* 1 CPT, simplify implementation... */
+# define LNET_CPT_MAX_BITS 0
+
+#else /* KERNEL and no freelist */
+
+# if (BITS_PER_LONG == 32)
+/* 2 CPTs, allowing more CPTs might make us under memory pressure */
+# define LNET_CPT_MAX_BITS 1
+
+# else /* 64-bit system */
+/*
+ * 256 CPTs for thousands of CPUs, allowing more CPTs might make us
+ * under risk of consuming all lh_cookie.
+ */
+# define LNET_CPT_MAX_BITS 8
+# endif /* BITS_PER_LONG == 32 */
+#endif
+
+/* max allowed CPT number */
+#define LNET_CPT_MAX (1 << LNET_CPT_MAX_BITS)
+
+#define LNET_CPT_NUMBER (the_lnet.ln_cpt_number)
+#define LNET_CPT_BITS (the_lnet.ln_cpt_bits)
+#define LNET_CPT_MASK ((1ULL << LNET_CPT_BITS) - 1)
+
+/** exclusive lock */
+#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
+
+static inline int lnet_is_wire_handle_none (lnet_handle_wire_t *wh)
+{
+ return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE &&
+ wh->wh_object_cookie == LNET_WIRE_HANDLE_COOKIE_NONE);
+}
+
+static inline int lnet_md_exhausted (lnet_libmd_t *md)
+{
+ return (md->md_threshold == 0 ||
+ ((md->md_options & LNET_MD_MAX_SIZE) != 0 &&
+ md->md_offset + md->md_max_size > md->md_length));
+}
+
+static inline int lnet_md_unlinkable (lnet_libmd_t *md)
+{
+ /* Should unlink md when its refcount is 0 and either:
+ * - md has been flagged for deletion (by auto unlink or LNetM[DE]Unlink,
+ * in the latter case md may not be exhausted).
+ * - auto unlink is on and md is exhausted.
+ */
+ if (md->md_refcount != 0)
+ return 0;
+
+ if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0)
+ return 1;
+
+ return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
+ lnet_md_exhausted(md));
+}
+
+#define lnet_cpt_table() (the_lnet.ln_cpt_table)
+#define lnet_cpt_current() cfs_cpt_current(the_lnet.ln_cpt_table, 1)
+
+static inline int
+lnet_cpt_of_cookie(__u64 cookie)
+{
+ unsigned int cpt = (cookie >> LNET_COOKIE_TYPE_BITS) & LNET_CPT_MASK;
+
+ /* LNET_CPT_NUMBER doesn't have to be power2, which means we can
+ * get illegal cpt from it's invalid cookie */
+ return cpt < LNET_CPT_NUMBER ? cpt : cpt % LNET_CPT_NUMBER;
+}
+
+static inline void
+lnet_res_lock(int cpt)
+{
+ cfs_percpt_lock(the_lnet.ln_res_lock, cpt);
+}
+
+static inline void
+lnet_res_unlock(int cpt)
+{
+ cfs_percpt_unlock(the_lnet.ln_res_lock, cpt);
+}
+
+static inline int
+lnet_res_lock_current(void)
+{
+ int cpt = lnet_cpt_current();
+
+ lnet_res_lock(cpt);
+ return cpt;
+}
+
+static inline void
+lnet_net_lock(int cpt)
+{
+ cfs_percpt_lock(the_lnet.ln_net_lock, cpt);
+}
+
+static inline void
+lnet_net_unlock(int cpt)
+{
+ cfs_percpt_unlock(the_lnet.ln_net_lock, cpt);
+}
+
+static inline int
+lnet_net_lock_current(void)
+{
+ int cpt = lnet_cpt_current();
+
+ lnet_net_lock(cpt);
+ return cpt;
+}
+
+#define LNET_LOCK() lnet_net_lock(LNET_LOCK_EX)
+#define LNET_UNLOCK() lnet_net_unlock(LNET_LOCK_EX)
+
+
+#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock)
+#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock)
+#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock)
+#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock)
+#define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock)
+#define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock)
+#define LNET_MUTEX_LOCK(m) mutex_lock(m)
+#define LNET_MUTEX_UNLOCK(m) mutex_unlock(m)
+
+
+#define MAX_PORTALS 64
+
+/* these are only used by code with LNET_USE_LIB_FREELIST, but we still
+ * exported them to !LNET_USE_LIB_FREELIST for easy implemetation */
+#define LNET_FL_MAX_MES 2048
+#define LNET_FL_MAX_MDS 2048
+#define LNET_FL_MAX_EQS 512
+#define LNET_FL_MAX_MSGS 2048 /* Outstanding messages */
+
+#ifdef LNET_USE_LIB_FREELIST
+
+int lnet_freelist_init(lnet_freelist_t *fl, int n, int size);
+void lnet_freelist_fini(lnet_freelist_t *fl);
+
+static inline void *
+lnet_freelist_alloc (lnet_freelist_t *fl)
+{
+ /* ALWAYS called with liblock held */
+ lnet_freeobj_t *o;
+
+ if (list_empty (&fl->fl_list))
+ return (NULL);
+
+ o = list_entry (fl->fl_list.next, lnet_freeobj_t, fo_list);
+ list_del (&o->fo_list);
+ return ((void *)&o->fo_contents);
+}
+
+static inline void
+lnet_freelist_free (lnet_freelist_t *fl, void *obj)
+{
+ /* ALWAYS called with liblock held */
+ lnet_freeobj_t *o = list_entry (obj, lnet_freeobj_t, fo_contents);
+
+ list_add (&o->fo_list, &fl->fl_list);
+}
+
+
+static inline lnet_eq_t *
+lnet_eq_alloc (void)
+{
+ /* NEVER called with resource lock held */
+ struct lnet_res_container *rec = &the_lnet.ln_eq_container;
+ lnet_eq_t *eq;
+
+ LASSERT(LNET_CPT_NUMBER == 1);
+
+ lnet_res_lock(0);
+ eq = (lnet_eq_t *)lnet_freelist_alloc(&rec->rec_freelist);
+ lnet_res_unlock(0);
+
+ return eq;
+}
+
+static inline void
+lnet_eq_free_locked(lnet_eq_t *eq)
+{
+ /* ALWAYS called with resource lock held */
+ struct lnet_res_container *rec = &the_lnet.ln_eq_container;
+
+ LASSERT(LNET_CPT_NUMBER == 1);
+ lnet_freelist_free(&rec->rec_freelist, eq);
+}
+
+static inline void
+lnet_eq_free(lnet_eq_t *eq)
+{
+ lnet_res_lock(0);
+ lnet_eq_free_locked(eq);
+ lnet_res_unlock(0);
+}
+
+static inline lnet_libmd_t *
+lnet_md_alloc (lnet_md_t *umd)
+{
+ /* NEVER called with resource lock held */
+ struct lnet_res_container *rec = the_lnet.ln_md_containers[0];
+ lnet_libmd_t *md;
+
+ LASSERT(LNET_CPT_NUMBER == 1);
+
+ lnet_res_lock(0);
+ md = (lnet_libmd_t *)lnet_freelist_alloc(&rec->rec_freelist);
+ lnet_res_unlock(0);
+
+ if (md != NULL)
+ INIT_LIST_HEAD(&md->md_list);
+
+ return md;
+}
+
+static inline void
+lnet_md_free_locked(lnet_libmd_t *md)
+{
+ /* ALWAYS called with resource lock held */
+ struct lnet_res_container *rec = the_lnet.ln_md_containers[0];
+
+ LASSERT(LNET_CPT_NUMBER == 1);
+ lnet_freelist_free(&rec->rec_freelist, md);
+}
+
+static inline void
+lnet_md_free(lnet_libmd_t *md)
+{
+ lnet_res_lock(0);
+ lnet_md_free_locked(md);
+ lnet_res_unlock(0);
+}
+
+static inline lnet_me_t *
+lnet_me_alloc(void)
+{
+ /* NEVER called with resource lock held */
+ struct lnet_res_container *rec = the_lnet.ln_me_containers[0];
+ lnet_me_t *me;
+
+ LASSERT(LNET_CPT_NUMBER == 1);
+
+ lnet_res_lock(0);
+ me = (lnet_me_t *)lnet_freelist_alloc(&rec->rec_freelist);
+ lnet_res_unlock(0);
+
+ return me;
+}
+
+static inline void
+lnet_me_free_locked(lnet_me_t *me)
+{
+ /* ALWAYS called with resource lock held */
+ struct lnet_res_container *rec = the_lnet.ln_me_containers[0];
+
+ LASSERT(LNET_CPT_NUMBER == 1);
+ lnet_freelist_free(&rec->rec_freelist, me);
+}
+
+static inline void
+lnet_me_free(lnet_me_t *me)
+{
+ lnet_res_lock(0);
+ lnet_me_free_locked(me);
+ lnet_res_unlock(0);
+}
+
+static inline lnet_msg_t *
+lnet_msg_alloc (void)
+{
+ /* NEVER called with network lock held */
+ struct lnet_msg_container *msc = the_lnet.ln_msg_containers[0];
+ lnet_msg_t *msg;
+
+ LASSERT(LNET_CPT_NUMBER == 1);
+
+ lnet_net_lock(0);
+ msg = (lnet_msg_t *)lnet_freelist_alloc(&msc->msc_freelist);
+ lnet_net_unlock(0);
+
+ if (msg != NULL) {
+ /* NULL pointers, clear flags etc */
+ memset(msg, 0, sizeof(*msg));
+ }
+ return msg;
+}
+
+static inline void
+lnet_msg_free_locked(lnet_msg_t *msg)
+{
+ /* ALWAYS called with network lock held */
+ struct lnet_msg_container *msc = the_lnet.ln_msg_containers[0];
+
+ LASSERT(LNET_CPT_NUMBER == 1);
+ LASSERT(!msg->msg_onactivelist);
+ lnet_freelist_free(&msc->msc_freelist, msg);
+}
+
+static inline void
+lnet_msg_free (lnet_msg_t *msg)
+{
+ lnet_net_lock(0);
+ lnet_msg_free_locked(msg);
+ lnet_net_unlock(0);
+}
+
+#else /* !LNET_USE_LIB_FREELIST */
+
+static inline lnet_eq_t *
+lnet_eq_alloc (void)
+{
+ /* NEVER called with liblock held */
+ lnet_eq_t *eq;
+
+ LIBCFS_ALLOC(eq, sizeof(*eq));
+ return (eq);
+}
+
+static inline void
+lnet_eq_free(lnet_eq_t *eq)
+{
+ /* ALWAYS called with resource lock held */
+ LIBCFS_FREE(eq, sizeof(*eq));
+}
+
+static inline lnet_libmd_t *
+lnet_md_alloc (lnet_md_t *umd)
+{
+ /* NEVER called with liblock held */
+ lnet_libmd_t *md;
+ unsigned int size;
+ unsigned int niov;
+
+ if ((umd->options & LNET_MD_KIOV) != 0) {
+ niov = umd->length;
+ size = offsetof(lnet_libmd_t, md_iov.kiov[niov]);
+ } else {
+ niov = ((umd->options & LNET_MD_IOVEC) != 0) ?
+ umd->length : 1;
+ size = offsetof(lnet_libmd_t, md_iov.iov[niov]);
+ }
+
+ LIBCFS_ALLOC(md, size);
+
+ if (md != NULL) {
+ /* Set here in case of early free */
+ md->md_options = umd->options;
+ md->md_niov = niov;
+ INIT_LIST_HEAD(&md->md_list);
+ }
+
+ return (md);
+}
+
+static inline void
+lnet_md_free(lnet_libmd_t *md)
+{
+ /* ALWAYS called with resource lock held */
+ unsigned int size;
+
+ if ((md->md_options & LNET_MD_KIOV) != 0)
+ size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]);
+ else
+ size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]);
+
+ LIBCFS_FREE(md, size);
+}
+
+static inline lnet_me_t *
+lnet_me_alloc (void)
+{
+ /* NEVER called with liblock held */
+ lnet_me_t *me;
+
+ LIBCFS_ALLOC(me, sizeof(*me));
+ return (me);
+}
+
+static inline void
+lnet_me_free(lnet_me_t *me)
+{
+ /* ALWAYS called with resource lock held */
+ LIBCFS_FREE(me, sizeof(*me));
+}
+
+static inline lnet_msg_t *
+lnet_msg_alloc(void)
+{
+ /* NEVER called with liblock held */
+ lnet_msg_t *msg;
+
+ LIBCFS_ALLOC(msg, sizeof(*msg));
+
+ /* no need to zero, LIBCFS_ALLOC does for us */
+ return (msg);
+}
+
+static inline void
+lnet_msg_free(lnet_msg_t *msg)
+{
+ /* ALWAYS called with network lock held */
+ LASSERT(!msg->msg_onactivelist);
+ LIBCFS_FREE(msg, sizeof(*msg));
+}
+
+#define lnet_eq_free_locked(eq) lnet_eq_free(eq)
+#define lnet_md_free_locked(md) lnet_md_free(md)
+#define lnet_me_free_locked(me) lnet_me_free(me)
+#define lnet_msg_free_locked(msg) lnet_msg_free(msg)
+
+#endif /* LNET_USE_LIB_FREELIST */
+
+lnet_libhandle_t *lnet_res_lh_lookup(struct lnet_res_container *rec,
+ __u64 cookie);
+void lnet_res_lh_initialize(struct lnet_res_container *rec,
+ lnet_libhandle_t *lh);
+static inline void
+lnet_res_lh_invalidate(lnet_libhandle_t *lh)
+{
+ /* ALWAYS called with resource lock held */
+ /* NB: cookie is still useful, don't reset it */
+ list_del(&lh->lh_hash_chain);
+}
+
+static inline void
+lnet_eq2handle (lnet_handle_eq_t *handle, lnet_eq_t *eq)
+{
+ if (eq == NULL) {
+ LNetInvalidateHandle(handle);
+ return;
+ }
+
+ handle->cookie = eq->eq_lh.lh_cookie;
+}
+
+static inline lnet_eq_t *
+lnet_handle2eq(lnet_handle_eq_t *handle)
+{
+ /* ALWAYS called with resource lock held */
+ lnet_libhandle_t *lh;
+
+ lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie);
+ if (lh == NULL)
+ return NULL;
+
+ return lh_entry(lh, lnet_eq_t, eq_lh);
+}
+
+static inline void
+lnet_md2handle (lnet_handle_md_t *handle, lnet_libmd_t *md)
+{
+ handle->cookie = md->md_lh.lh_cookie;
+}
+
+static inline lnet_libmd_t *
+lnet_handle2md(lnet_handle_md_t *handle)
+{
+ /* ALWAYS called with resource lock held */
+ lnet_libhandle_t *lh;
+ int cpt;
+
+ cpt = lnet_cpt_of_cookie(handle->cookie);
+ lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
+ handle->cookie);
+ if (lh == NULL)
+ return NULL;
+
+ return lh_entry(lh, lnet_libmd_t, md_lh);
+}
+
+static inline lnet_libmd_t *
+lnet_wire_handle2md(lnet_handle_wire_t *wh)
+{
+ /* ALWAYS called with resource lock held */
+ lnet_libhandle_t *lh;
+ int cpt;
+
+ if (wh->wh_interface_cookie != the_lnet.ln_interface_cookie)
+ return NULL;
+
+ cpt = lnet_cpt_of_cookie(wh->wh_object_cookie);
+ lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
+ wh->wh_object_cookie);
+ if (lh == NULL)
+ return NULL;
+
+ return lh_entry(lh, lnet_libmd_t, md_lh);
+}
+
+static inline void
+lnet_me2handle (lnet_handle_me_t *handle, lnet_me_t *me)
+{
+ handle->cookie = me->me_lh.lh_cookie;
+}
+
+static inline lnet_me_t *
+lnet_handle2me(lnet_handle_me_t *handle)
+{
+ /* ALWAYS called with resource lock held */
+ lnet_libhandle_t *lh;
+ int cpt;
+
+ cpt = lnet_cpt_of_cookie(handle->cookie);
+ lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt],
+ handle->cookie);
+ if (lh == NULL)
+ return NULL;
+
+ return lh_entry(lh, lnet_me_t, me_lh);
+}
+
+static inline void
+lnet_peer_addref_locked(lnet_peer_t *lp)
+{
+ LASSERT (lp->lp_refcount > 0);
+ lp->lp_refcount++;
+}
+
+extern void lnet_destroy_peer_locked(lnet_peer_t *lp);
+
+static inline void
+lnet_peer_decref_locked(lnet_peer_t *lp)
+{
+ LASSERT (lp->lp_refcount > 0);
+ lp->lp_refcount--;
+ if (lp->lp_refcount == 0)
+ lnet_destroy_peer_locked(lp);
+}
+
+static inline int
+lnet_isrouter(lnet_peer_t *lp)
+{
+ return lp->lp_rtr_refcount != 0;
+}
+
+static inline void
+lnet_ni_addref_locked(lnet_ni_t *ni, int cpt)
+{
+ LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER);
+ LASSERT(*ni->ni_refs[cpt] >= 0);
+
+ (*ni->ni_refs[cpt])++;
+}
+
+static inline void
+lnet_ni_addref(lnet_ni_t *ni)
+{
+ lnet_net_lock(0);
+ lnet_ni_addref_locked(ni, 0);
+ lnet_net_unlock(0);
+}
+
+static inline void
+lnet_ni_decref_locked(lnet_ni_t *ni, int cpt)
+{
+ LASSERT(cpt >= 0 && cpt < LNET_CPT_NUMBER);
+ LASSERT(*ni->ni_refs[cpt] > 0);
+
+ (*ni->ni_refs[cpt])--;
+}
+
+static inline void
+lnet_ni_decref(lnet_ni_t *ni)
+{
+ lnet_net_lock(0);
+ lnet_ni_decref_locked(ni, 0);
+ lnet_net_unlock(0);
+}
+
+void lnet_ni_free(lnet_ni_t *ni);
+
+static inline int
+lnet_nid2peerhash(lnet_nid_t nid)
+{
+ return cfs_hash_long(nid, LNET_PEER_HASH_BITS);
+}
+
+static inline struct list_head *
+lnet_net2rnethash(__u32 net)
+{
+ return &the_lnet.ln_remote_nets_hash[(LNET_NETNUM(net) +
+ LNET_NETTYP(net)) &
+ ((1U << the_lnet.ln_remote_nets_hbits) - 1)];
+}
+
+extern lnd_t the_lolnd;
+
+
+extern int lnet_cpt_of_nid_locked(lnet_nid_t nid);
+extern int lnet_cpt_of_nid(lnet_nid_t nid);
+extern lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
+extern lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt);
+extern lnet_ni_t *lnet_net2ni(__u32 net);
+
+int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, cfs_time_t when);
+void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when);
+int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid);
+int lnet_check_routes(void);
+int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
+void lnet_destroy_routes(void);
+int lnet_get_route(int idx, __u32 *net, __u32 *hops,
+ lnet_nid_t *gateway, __u32 *alive);
+void lnet_proc_init(void);
+void lnet_proc_fini(void);
+int lnet_rtrpools_alloc(int im_a_router);
+void lnet_rtrpools_free(void);
+lnet_remotenet_t *lnet_find_net_locked (__u32 net);
+
+int lnet_islocalnid(lnet_nid_t nid);
+int lnet_islocalnet(__u32 net);
+
+void lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
+ unsigned int offset, unsigned int mlen);
+void lnet_msg_detach_md(lnet_msg_t *msg, int status);
+void lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev);
+void lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type);
+void lnet_msg_commit(lnet_msg_t *msg, int cpt);
+void lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status);
+
+void lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev);
+void lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
+ unsigned int offset, unsigned int len);
+int lnet_send(lnet_nid_t nid, lnet_msg_t *msg, lnet_nid_t rtr_nid);
+void lnet_return_tx_credits_locked(lnet_msg_t *msg);
+void lnet_return_rx_credits_locked(lnet_msg_t *msg);
+
+/* portals functions */
+/* portals attributes */
+static inline int
+lnet_ptl_is_lazy(lnet_portal_t *ptl)
+{
+ return !!(ptl->ptl_options & LNET_PTL_LAZY);
+}
+
+static inline int
+lnet_ptl_is_unique(lnet_portal_t *ptl)
+{
+ return !!(ptl->ptl_options & LNET_PTL_MATCH_UNIQUE);
+}
+
+static inline int
+lnet_ptl_is_wildcard(lnet_portal_t *ptl)
+{
+ return !!(ptl->ptl_options & LNET_PTL_MATCH_WILDCARD);
+}
+
+static inline void
+lnet_ptl_setopt(lnet_portal_t *ptl, int opt)
+{
+ ptl->ptl_options |= opt;
+}
+
+static inline void
+lnet_ptl_unsetopt(lnet_portal_t *ptl, int opt)
+{
+ ptl->ptl_options &= ~opt;
+}
+
+/* match-table functions */
+struct list_head *lnet_mt_match_head(struct lnet_match_table *mtable,
+ lnet_process_id_t id, __u64 mbits);
+struct lnet_match_table *lnet_mt_of_attach(unsigned int index,
+ lnet_process_id_t id, __u64 mbits,
+ __u64 ignore_bits,
+ lnet_ins_pos_t pos);
+int lnet_mt_match_md(struct lnet_match_table *mtable,
+ struct lnet_match_info *info, struct lnet_msg *msg);
+
+/* portals match/attach functions */
+void lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
+ struct list_head *matches, struct list_head *drops);
+void lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md);
+int lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg);
+
+/* initialized and finalize portals */
+int lnet_portals_create(void);
+void lnet_portals_destroy(void);
+
+/* message functions */
+int lnet_parse (lnet_ni_t *ni, lnet_hdr_t *hdr,
+ lnet_nid_t fromnid, void *private, int rdma_req);
+void lnet_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+ unsigned int offset, unsigned int mlen, unsigned int rlen);
+lnet_msg_t *lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *get_msg);
+void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *msg, unsigned int len);
+void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int rc);
+void lnet_drop_delayed_msg_list(struct list_head *head, char *reason);
+void lnet_recv_delayed_msg_list(struct list_head *head);
+
+int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt);
+void lnet_msg_container_cleanup(struct lnet_msg_container *container);
+void lnet_msg_containers_destroy(void);
+int lnet_msg_containers_create(void);
+
+char *lnet_msgtyp2str (int type);
+void lnet_print_hdr (lnet_hdr_t * hdr);
+int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold);
+
+void lnet_counters_get(lnet_counters_t *counters);
+void lnet_counters_reset(void);
+
+unsigned int lnet_iov_nob (unsigned int niov, struct iovec *iov);
+int lnet_extract_iov (int dst_niov, struct iovec *dst,
+ int src_niov, struct iovec *src,
+ unsigned int offset, unsigned int len);
+
+unsigned int lnet_kiov_nob (unsigned int niov, lnet_kiov_t *iov);
+int lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
+ int src_niov, lnet_kiov_t *src,
+ unsigned int offset, unsigned int len);
+
+void lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov,
+ unsigned int doffset,
+ unsigned int nsiov, struct iovec *siov,
+ unsigned int soffset, unsigned int nob);
+void lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov,
+ unsigned int iovoffset,
+ unsigned int nkiov, lnet_kiov_t *kiov,
+ unsigned int kiovoffset, unsigned int nob);
+void lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov,
+ unsigned int kiovoffset,
+ unsigned int niov, struct iovec *iov,
+ unsigned int iovoffset, unsigned int nob);
+void lnet_copy_kiov2kiov (unsigned int ndkiov, lnet_kiov_t *dkiov,
+ unsigned int doffset,
+ unsigned int nskiov, lnet_kiov_t *skiov,
+ unsigned int soffset, unsigned int nob);
+
+static inline void
+lnet_copy_iov2flat(int dlen, void *dest, unsigned int doffset,
+ unsigned int nsiov, struct iovec *siov, unsigned int soffset,
+ unsigned int nob)
+{
+ struct iovec diov = {/*.iov_base = */ dest, /*.iov_len = */ dlen};
+
+ lnet_copy_iov2iov(1, &diov, doffset,
+ nsiov, siov, soffset, nob);
+}
+
+static inline void
+lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset,
+ unsigned int nsiov, lnet_kiov_t *skiov, unsigned int soffset,
+ unsigned int nob)
+{
+ struct iovec diov = {/* .iov_base = */ dest, /* .iov_len = */ dlen};
+
+ lnet_copy_kiov2iov(1, &diov, doffset,
+ nsiov, skiov, soffset, nob);
+}
+
+static inline void
+lnet_copy_flat2iov(unsigned int ndiov, struct iovec *diov, unsigned int doffset,
+ int slen, void *src, unsigned int soffset, unsigned int nob)
+{
+ struct iovec siov = {/*.iov_base = */ src, /*.iov_len = */slen};
+ lnet_copy_iov2iov(ndiov, diov, doffset,
+ 1, &siov, soffset, nob);
+}
+
+static inline void
+lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov, unsigned int doffset,
+ int slen, void *src, unsigned int soffset, unsigned int nob)
+{
+ struct iovec siov = {/* .iov_base = */ src, /* .iov_len = */ slen};
+ lnet_copy_iov2kiov(ndiov, dkiov, doffset,
+ 1, &siov, soffset, nob);
+}
+
+void lnet_me_unlink(lnet_me_t *me);
+
+void lnet_md_unlink(lnet_libmd_t *md);
+void lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd);
+
+void lnet_register_lnd(lnd_t *lnd);
+void lnet_unregister_lnd(lnd_t *lnd);
+int lnet_set_ip_niaddr (lnet_ni_t *ni);
+
+int lnet_connect(socket_t **sockp, lnet_nid_t peer_nid,
+ __u32 local_ip, __u32 peer_ip, int peer_port);
+void lnet_connect_console_error(int rc, lnet_nid_t peer_nid,
+ __u32 peer_ip, int port);
+int lnet_count_acceptor_nis(void);
+int lnet_acceptor_timeout(void);
+int lnet_acceptor_port(void);
+
+int lnet_count_acceptor_nis(void);
+int lnet_acceptor_port(void);
+
+int lnet_acceptor_start(void);
+void lnet_acceptor_stop(void);
+
+void lnet_get_tunables(void);
+int lnet_peers_start_down(void);
+int lnet_peer_buffer_credits(lnet_ni_t *ni);
+
+int lnet_router_checker_start(void);
+void lnet_router_checker_stop(void);
+void lnet_swap_pinginfo(lnet_ping_info_t *info);
+
+int lnet_ping_target_init(void);
+void lnet_ping_target_fini(void);
+int lnet_ping(lnet_process_id_t id, int timeout_ms,
+ lnet_process_id_t *ids, int n_ids);
+
+int lnet_parse_ip2nets (char **networksp, char *ip2nets);
+int lnet_parse_routes (char *route_str, int *im_a_router);
+int lnet_parse_networks (struct list_head *nilist, char *networks);
+
+int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt);
+lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable,
+ lnet_nid_t nid);
+void lnet_peer_tables_cleanup(void);
+void lnet_peer_tables_destroy(void);
+int lnet_peer_tables_create(void);
+void lnet_debug_peer(lnet_nid_t nid);
+
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
new file mode 100644
index 0000000..e579e7e
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -0,0 +1,765 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/include/lnet/lib-types.h
+ *
+ * Types used by the library side routines that do not need to be
+ * exposed to the user application
+ */
+
+#ifndef __LNET_LIB_TYPES_H__
+#define __LNET_LIB_TYPES_H__
+
+#include <linux/lnet/linux/lib-types.h>
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/list.h>
+#include <linux/lnet/types.h>
+
+#define WIRE_ATTR __attribute__((packed))
+
+/* Packed version of lnet_process_id_t to transfer via network */
+typedef struct {
+ lnet_nid_t nid;
+ lnet_pid_t pid; /* node id / process id */
+} WIRE_ATTR lnet_process_id_packed_t;
+
+/* The wire handle's interface cookie only matches one network interface in
+ * one epoch (i.e. new cookie when the interface restarts or the node
+ * reboots). The object cookie only matches one object on that interface
+ * during that object's lifetime (i.e. no cookie re-use). */
+typedef struct {
+ __u64 wh_interface_cookie;
+ __u64 wh_object_cookie;
+} WIRE_ATTR lnet_handle_wire_t;
+
+typedef enum {
+ LNET_MSG_ACK = 0,
+ LNET_MSG_PUT,
+ LNET_MSG_GET,
+ LNET_MSG_REPLY,
+ LNET_MSG_HELLO,
+} lnet_msg_type_t;
+
+/* The variant fields of the portals message header are aligned on an 8
+ * byte boundary in the message header. Note that all types used in these
+ * wire structs MUST be fixed size and the smaller types are placed at the
+ * end. */
+typedef struct lnet_ack {
+ lnet_handle_wire_t dst_wmd;
+ __u64 match_bits;
+ __u32 mlength;
+} WIRE_ATTR lnet_ack_t;
+
+typedef struct lnet_put {
+ lnet_handle_wire_t ack_wmd;
+ __u64 match_bits;
+ __u64 hdr_data;
+ __u32 ptl_index;
+ __u32 offset;
+} WIRE_ATTR lnet_put_t;
+
+typedef struct lnet_get {
+ lnet_handle_wire_t return_wmd;
+ __u64 match_bits;
+ __u32 ptl_index;
+ __u32 src_offset;
+ __u32 sink_length;
+} WIRE_ATTR lnet_get_t;
+
+typedef struct lnet_reply {
+ lnet_handle_wire_t dst_wmd;
+} WIRE_ATTR lnet_reply_t;
+
+typedef struct lnet_hello {
+ __u64 incarnation;
+ __u32 type;
+} WIRE_ATTR lnet_hello_t;
+
+typedef struct {
+ lnet_nid_t dest_nid;
+ lnet_nid_t src_nid;
+ lnet_pid_t dest_pid;
+ lnet_pid_t src_pid;
+ __u32 type; /* lnet_msg_type_t */
+ __u32 payload_length; /* payload data to follow */
+ /*<------__u64 aligned------->*/
+ union {
+ lnet_ack_t ack;
+ lnet_put_t put;
+ lnet_get_t get;
+ lnet_reply_t reply;
+ lnet_hello_t hello;
+ } msg;
+} WIRE_ATTR lnet_hdr_t;
+
+/* A HELLO message contains a magic number and protocol version
+ * code in the header's dest_nid, the peer's NID in the src_nid, and
+ * LNET_MSG_HELLO in the type field. All other common fields are zero
+ * (including payload_size; i.e. no payload).
+ * This is for use by byte-stream LNDs (e.g. TCP/IP) to check the peer is
+ * running the same protocol and to find out its NID. These LNDs should
+ * exchange HELLO messages when a connection is first established. Individual
+ * LNDs can put whatever else they fancy in lnet_hdr_t::msg.
+ */
+typedef struct {
+ __u32 magic; /* LNET_PROTO_TCP_MAGIC */
+ __u16 version_major; /* increment on incompatible change */
+ __u16 version_minor; /* increment on compatible change */
+} WIRE_ATTR lnet_magicversion_t;
+
+/* PROTO MAGIC for LNDs */
+#define LNET_PROTO_IB_MAGIC 0x0be91b91
+#define LNET_PROTO_RA_MAGIC 0x0be91b92
+#define LNET_PROTO_QSW_MAGIC 0x0be91b93
+#define LNET_PROTO_GNI_MAGIC 0xb00fbabe /* ask Kim */
+#define LNET_PROTO_TCP_MAGIC 0xeebc0ded
+#define LNET_PROTO_PTL_MAGIC 0x50746C4E /* 'PtlN' unique magic */
+#define LNET_PROTO_MX_MAGIC 0x4d583130 /* 'MX10'! */
+#define LNET_PROTO_ACCEPTOR_MAGIC 0xacce7100
+#define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */
+
+/* Placeholder for a future "unified" protocol across all LNDs */
+/* Current LNDs that receive a request with this magic will respond with a
+ * "stub" reply using their current protocol */
+#define LNET_PROTO_MAGIC 0x45726963 /* ! */
+
+
+#define LNET_PROTO_TCP_VERSION_MAJOR 1
+#define LNET_PROTO_TCP_VERSION_MINOR 0
+
+/* Acceptor connection request */
+typedef struct {
+ __u32 acr_magic; /* PTL_ACCEPTOR_PROTO_MAGIC */
+ __u32 acr_version; /* protocol version */
+ __u64 acr_nid; /* target NID */
+} WIRE_ATTR lnet_acceptor_connreq_t;
+
+#define LNET_PROTO_ACCEPTOR_VERSION 1
+
+/* forward refs */
+struct lnet_libmd;
+
+typedef struct lnet_msg {
+ struct list_head msg_activelist;
+ struct list_head msg_list; /* Q for credits/MD */
+
+ lnet_process_id_t msg_target;
+ /* where is it from, it's only for building event */
+ lnet_nid_t msg_from;
+ __u32 msg_type;
+
+ /* committed for sending */
+ unsigned int msg_tx_committed:1;
+ /* CPT # this message committed for sending */
+ unsigned int msg_tx_cpt:15;
+ /* committed for receiving */
+ unsigned int msg_rx_committed:1;
+ /* CPT # this message committed for receiving */
+ unsigned int msg_rx_cpt:15;
+ /* queued for tx credit */
+ unsigned int msg_tx_delayed:1;
+ /* queued for RX buffer */
+ unsigned int msg_rx_delayed:1;
+ /* ready for pending on RX delay list */
+ unsigned int msg_rx_ready_delay:1;
+
+ unsigned int msg_vmflush:1; /* VM trying to free memory */
+ unsigned int msg_target_is_router:1; /* sending to a router */
+ unsigned int msg_routing:1; /* being forwarded */
+ unsigned int msg_ack:1; /* ack on finalize (PUT) */
+ unsigned int msg_sending:1; /* outgoing message */
+ unsigned int msg_receiving:1; /* being received */
+ unsigned int msg_txcredit:1; /* taken an NI send credit */
+ unsigned int msg_peertxcredit:1; /* taken a peer send credit */
+ unsigned int msg_rtrcredit:1; /* taken a globel router credit */
+ unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
+ unsigned int msg_onactivelist:1; /* on the activelist */
+
+ struct lnet_peer *msg_txpeer; /* peer I'm sending to */
+ struct lnet_peer *msg_rxpeer; /* peer I received from */
+
+ void *msg_private;
+ struct lnet_libmd *msg_md;
+
+ unsigned int msg_len;
+ unsigned int msg_wanted;
+ unsigned int msg_offset;
+ unsigned int msg_niov;
+ struct iovec *msg_iov;
+ lnet_kiov_t *msg_kiov;
+
+ lnet_event_t msg_ev;
+ lnet_hdr_t msg_hdr;
+} lnet_msg_t;
+
+
+typedef struct lnet_libhandle {
+ struct list_head lh_hash_chain;
+ __u64 lh_cookie;
+} lnet_libhandle_t;
+
+#define lh_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
+
+typedef struct lnet_eq {
+ struct list_head eq_list;
+ lnet_libhandle_t eq_lh;
+ lnet_seq_t eq_enq_seq;
+ lnet_seq_t eq_deq_seq;
+ unsigned int eq_size;
+ lnet_eq_handler_t eq_callback;
+ lnet_event_t *eq_events;
+ int **eq_refs; /* percpt refcount for EQ */
+} lnet_eq_t;
+
+typedef struct lnet_me {
+ struct list_head me_list;
+ lnet_libhandle_t me_lh;
+ lnet_process_id_t me_match_id;
+ unsigned int me_portal;
+ unsigned int me_pos; /* hash offset in mt_hash */
+ __u64 me_match_bits;
+ __u64 me_ignore_bits;
+ lnet_unlink_t me_unlink;
+ struct lnet_libmd *me_md;
+} lnet_me_t;
+
+typedef struct lnet_libmd {
+ struct list_head md_list;
+ lnet_libhandle_t md_lh;
+ lnet_me_t *md_me;
+ char *md_start;
+ unsigned int md_offset;
+ unsigned int md_length;
+ unsigned int md_max_size;
+ int md_threshold;
+ int md_refcount;
+ unsigned int md_options;
+ unsigned int md_flags;
+ void *md_user_ptr;
+ lnet_eq_t *md_eq;
+ unsigned int md_niov; /* # frags */
+ union {
+ struct iovec iov[LNET_MAX_IOV];
+ lnet_kiov_t kiov[LNET_MAX_IOV];
+ } md_iov;
+} lnet_libmd_t;
+
+#define LNET_MD_FLAG_ZOMBIE (1 << 0)
+#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
+
+#ifdef LNET_USE_LIB_FREELIST
+typedef struct
+{
+ void *fl_objs; /* single contiguous array of objects */
+ int fl_nobjs; /* the number of them */
+ int fl_objsize; /* the size (including overhead) of each of them */
+ struct list_head fl_list; /* where they are enqueued */
+} lnet_freelist_t;
+
+typedef struct
+{
+ struct list_head fo_list; /* enqueue on fl_list */
+ void *fo_contents; /* aligned contents */
+} lnet_freeobj_t;
+#endif
+
+typedef struct {
+ /* info about peers we are trying to fail */
+ struct list_head tp_list; /* ln_test_peers */
+ lnet_nid_t tp_nid; /* matching nid */
+ unsigned int tp_threshold; /* # failures to simulate */
+} lnet_test_peer_t;
+
+#define LNET_COOKIE_TYPE_MD 1
+#define LNET_COOKIE_TYPE_ME 2
+#define LNET_COOKIE_TYPE_EQ 3
+#define LNET_COOKIE_TYPE_BITS 2
+#define LNET_COOKIE_MASK ((1ULL << LNET_COOKIE_TYPE_BITS) - 1ULL)
+
+struct lnet_ni; /* forward ref */
+
+typedef struct lnet_lnd
+{
+ /* fields managed by portals */
+ struct list_head lnd_list; /* stash in the LND table */
+ int lnd_refcount; /* # active instances */
+
+ /* fields initialised by the LND */
+ unsigned int lnd_type;
+
+ int (*lnd_startup) (struct lnet_ni *ni);
+ void (*lnd_shutdown) (struct lnet_ni *ni);
+ int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
+
+ /* In data movement APIs below, payload buffers are described as a set
+ * of 'niov' fragments which are...
+ * EITHER
+ * in virtual memory (struct iovec *iov != NULL)
+ * OR
+ * in pages (kernel only: plt_kiov_t *kiov != NULL).
+ * The LND may NOT overwrite these fragment descriptors.
+ * An 'offset' and may specify a byte offset within the set of
+ * fragments to start from
+ */
+
+ /* Start sending a preformatted message. 'private' is NULL for PUT and
+ * GET messages; otherwise this is a response to an incoming message
+ * and 'private' is the 'private' passed to lnet_parse(). Return
+ * non-zero for immediate failure, otherwise complete later with
+ * lnet_finalize() */
+ int (*lnd_send)(struct lnet_ni *ni, void *private, lnet_msg_t *msg);
+
+ /* Start receiving 'mlen' bytes of payload data, skipping the following
+ * 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
+ * lnet_parse(). Return non-zero for immedaite failure, otherwise
+ * complete later with lnet_finalize(). This also gives back a receive
+ * credit if the LND does flow control. */
+ int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
+ int delayed, unsigned int niov,
+ struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen);
+
+ /* lnet_parse() has had to delay processing of this message
+ * (e.g. waiting for a forwarding buffer or send credits). Give the
+ * LND a chance to free urgently needed resources. If called, return 0
+ * for success and do NOT give back a receive credit; that has to wait
+ * until lnd_recv() gets called. On failure return < 0 and
+ * release resources; lnd_recv() will not be called. */
+ int (*lnd_eager_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
+ void **new_privatep);
+
+ /* notification of peer health */
+ void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive);
+
+ /* query of peer aliveness */
+ void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, cfs_time_t *when);
+
+ /* accept a new connection */
+ int (*lnd_accept)(struct lnet_ni *ni, socket_t *sock);
+
+} lnd_t;
+
+#define LNET_NI_STATUS_UP 0x15aac0de
+#define LNET_NI_STATUS_DOWN 0xdeadface
+#define LNET_NI_STATUS_INVALID 0x00000000
+typedef struct {
+ lnet_nid_t ns_nid;
+ __u32 ns_status;
+ __u32 ns_unused;
+} WIRE_ATTR lnet_ni_status_t;
+
+struct lnet_tx_queue {
+ int tq_credits; /* # tx credits free */
+ int tq_credits_min; /* lowest it's been */
+ int tq_credits_max; /* total # tx credits */
+ struct list_head tq_delayed; /* delayed TXs */
+};
+
+#define LNET_MAX_INTERFACES 16
+
+typedef struct lnet_ni {
+ spinlock_t ni_lock;
+ struct list_head ni_list; /* chain on ln_nis */
+ struct list_head ni_cptlist; /* chain on ln_nis_cpt */
+ int ni_maxtxcredits; /* # tx credits */
+ /* # per-peer send credits */
+ int ni_peertxcredits;
+ /* # per-peer router buffer credits */
+ int ni_peerrtrcredits;
+ /* seconds to consider peer dead */
+ int ni_peertimeout;
+ int ni_ncpts; /* number of CPTs */
+ __u32 *ni_cpts; /* bond NI on some CPTs */
+ lnet_nid_t ni_nid; /* interface's NID */
+ void *ni_data; /* instance-specific data */
+ lnd_t *ni_lnd; /* procedural interface */
+ struct lnet_tx_queue **ni_tx_queues; /* percpt TX queues */
+ int **ni_refs; /* percpt reference count */
+ long ni_last_alive; /* when I was last alive */
+ lnet_ni_status_t *ni_status; /* my health status */
+ /* equivalent interfaces to use */
+ char *ni_interfaces[LNET_MAX_INTERFACES];
+} lnet_ni_t;
+
+#define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL
+
+/* NB: value of these features equal to LNET_PROTO_PING_VERSION_x
+ * of old LNet, so there shouldn't be any compatibility issue */
+#define LNET_PING_FEAT_INVAL (0) /* no feature */
+#define LNET_PING_FEAT_BASE (1 << 0) /* just a ping */
+#define LNET_PING_FEAT_NI_STATUS (1 << 1) /* return NI status */
+
+#define LNET_PING_FEAT_MASK (LNET_PING_FEAT_BASE | \
+ LNET_PING_FEAT_NI_STATUS)
+
+typedef struct {
+ __u32 pi_magic;
+ __u32 pi_features;
+ lnet_pid_t pi_pid;
+ __u32 pi_nnis;
+ lnet_ni_status_t pi_ni[0];
+} WIRE_ATTR lnet_ping_info_t;
+
+/* router checker data, per router */
+#define LNET_MAX_RTR_NIS 16
+#define LNET_PINGINFO_SIZE offsetof(lnet_ping_info_t, pi_ni[LNET_MAX_RTR_NIS])
+typedef struct {
+ /* chain on the_lnet.ln_zombie_rcd or ln_deathrow_rcd */
+ struct list_head rcd_list;
+ lnet_handle_md_t rcd_mdh; /* ping buffer MD */
+ struct lnet_peer *rcd_gateway; /* reference to gateway */
+ lnet_ping_info_t *rcd_pinginfo; /* ping buffer */
+} lnet_rc_data_t;
+
+typedef struct lnet_peer {
+ struct list_head lp_hashlist; /* chain on peer hash */
+ struct list_head lp_txq; /* messages blocking for tx credits */
+ struct list_head lp_rtrq; /* messages blocking for router credits */
+ struct list_head lp_rtr_list; /* chain on router list */
+ int lp_txcredits; /* # tx credits available */
+ int lp_mintxcredits; /* low water mark */
+ int lp_rtrcredits; /* # router credits */
+ int lp_minrtrcredits; /* low water mark */
+ unsigned int lp_alive:1; /* alive/dead? */
+ unsigned int lp_notify:1; /* notification outstanding? */
+ unsigned int lp_notifylnd:1; /* outstanding notification for LND? */
+ unsigned int lp_notifying:1; /* some thread is handling notification */
+ unsigned int lp_ping_notsent; /* SEND event outstanding from ping */
+ int lp_alive_count; /* # times router went dead<->alive */
+ long lp_txqnob; /* bytes queued for sending */
+ cfs_time_t lp_timestamp; /* time of last aliveness news */
+ cfs_time_t lp_ping_timestamp; /* time of last ping attempt */
+ cfs_time_t lp_ping_deadline; /* != 0 if ping reply expected */
+ cfs_time_t lp_last_alive; /* when I was last alive */
+ cfs_time_t lp_last_query; /* when lp_ni was queried last time */
+ lnet_ni_t *lp_ni; /* interface peer is on */
+ lnet_nid_t lp_nid; /* peer's NID */
+ int lp_refcount; /* # refs */
+ int lp_cpt; /* CPT this peer attached on */
+ /* # refs from lnet_route_t::lr_gateway */
+ int lp_rtr_refcount;
+ /* returned RC ping features */
+ unsigned int lp_ping_feats;
+ struct list_head lp_routes; /* routers on this peer */
+ lnet_rc_data_t *lp_rcd; /* router checker state */
+} lnet_peer_t;
+
+
+/* peer hash size */
+#define LNET_PEER_HASH_BITS 9
+#define LNET_PEER_HASH_SIZE (1 << LNET_PEER_HASH_BITS)
+
+/* peer hash table */
+struct lnet_peer_table {
+ int pt_version; /* /proc validity stamp */
+ int pt_number; /* # peers extant */
+ struct list_head pt_deathrow; /* zombie peers */
+ struct list_head *pt_hash; /* NID->peer hash */
+};
+
+/* peer aliveness is enabled only on routers for peers in a network where the
+ * lnet_ni_t::ni_peertimeout has been set to a positive value */
+#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \
+ (lp)->lp_ni->ni_peertimeout > 0)
+
+typedef struct {
+ struct list_head lr_list; /* chain on net */
+ struct list_head lr_gwlist; /* chain on gateway */
+ lnet_peer_t *lr_gateway; /* router node */
+ __u32 lr_net; /* remote network number */
+ int lr_seq; /* sequence for round-robin */
+ unsigned int lr_downis; /* number of down NIs */
+ unsigned int lr_hops; /* how far I am */
+} lnet_route_t;
+
+#define LNET_REMOTE_NETS_HASH_DEFAULT (1U << 7)
+#define LNET_REMOTE_NETS_HASH_MAX (1U << 16)
+#define LNET_REMOTE_NETS_HASH_SIZE (1 << the_lnet.ln_remote_nets_hbits)
+
+typedef struct {
+ struct list_head lrn_list; /* chain on ln_remote_nets_hash */
+ struct list_head lrn_routes; /* routes to me */
+ __u32 lrn_net; /* my net number */
+} lnet_remotenet_t;
+
+typedef struct {
+ struct list_head rbp_bufs; /* my free buffer pool */
+ struct list_head rbp_msgs; /* messages blocking for a buffer */
+ int rbp_npages; /* # pages in each buffer */
+ int rbp_nbuffers; /* # buffers */
+ int rbp_credits; /* # free buffers / blocked messages */
+ int rbp_mincredits; /* low water mark */
+} lnet_rtrbufpool_t;
+
+typedef struct {
+ struct list_head rb_list; /* chain on rbp_bufs */
+ lnet_rtrbufpool_t *rb_pool; /* owning pool */
+ lnet_kiov_t rb_kiov[0]; /* the buffer space */
+} lnet_rtrbuf_t;
+
+typedef struct {
+ __u32 msgs_alloc;
+ __u32 msgs_max;
+ __u32 errors;
+ __u32 send_count;
+ __u32 recv_count;
+ __u32 route_count;
+ __u32 drop_count;
+ __u64 send_length;
+ __u64 recv_length;
+ __u64 route_length;
+ __u64 drop_length;
+} WIRE_ATTR lnet_counters_t;
+
+#define LNET_PEER_HASHSIZE 503 /* prime! */
+
+#define LNET_NRBPOOLS 3 /* # different router buffer pools */
+
+enum {
+ /* Didn't match anything */
+ LNET_MATCHMD_NONE = (1 << 0),
+ /* Matched OK */
+ LNET_MATCHMD_OK = (1 << 1),
+ /* Must be discarded */
+ LNET_MATCHMD_DROP = (1 << 2),
+ /* match and buffer is exhausted */
+ LNET_MATCHMD_EXHAUSTED = (1 << 3),
+ /* match or drop */
+ LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
+};
+
+/* Options for lnet_portal_t::ptl_options */
+#define LNET_PTL_LAZY (1 << 0)
+#define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
+#define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match, request portal */
+
+/* parameter for matching operations (GET, PUT) */
+struct lnet_match_info {
+ __u64 mi_mbits;
+ lnet_process_id_t mi_id;
+ unsigned int mi_opc;
+ unsigned int mi_portal;
+ unsigned int mi_rlength;
+ unsigned int mi_roffset;
+};
+
+/* ME hash of RDMA portal */
+#define LNET_MT_HASH_BITS 8
+#define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS)
+#define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1)
+/* we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
+ * the last entry is reserved for MEs with ignore-bits */
+#define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE
+/* __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
+ * is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the
+ * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] */
+#define LNET_MT_BITS_U64 6 /* 2^6 bits */
+#define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64)
+#define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1)
+
+/* portal match table */
+struct lnet_match_table {
+ /* reserved for upcoming patches, CPU partition ID */
+ unsigned int mt_cpt;
+ unsigned int mt_portal; /* portal index */
+ /* match table is set as "enabled" if there's non-exhausted MD
+ * attached on mt_mhash, it's only valide for wildcard portal */
+ unsigned int mt_enabled;
+ /* bitmap to flag whether MEs on mt_hash are exhausted or not */
+ __u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
+ struct list_head *mt_mhash; /* matching hash */
+};
+
+/* these are only useful for wildcard portal */
+/* Turn off message rotor for wildcard portals */
+#define LNET_PTL_ROTOR_OFF 0
+/* round-robin dispatch all PUT messages for wildcard portals */
+#define LNET_PTL_ROTOR_ON 1
+/* round-robin dispatch routed PUT message for wildcard portals */
+#define LNET_PTL_ROTOR_RR_RT 2
+/* dispatch routed PUT message by hashing source NID for wildcard portals */
+#define LNET_PTL_ROTOR_HASH_RT 3
+
+typedef struct lnet_portal {
+ spinlock_t ptl_lock;
+ unsigned int ptl_index; /* portal ID, reserved */
+ /* flags on this portal: lazy, unique... */
+ unsigned int ptl_options;
+ /* list of messages which are stealing buffer */
+ struct list_head ptl_msg_stealing;
+ /* messages blocking for MD */
+ struct list_head ptl_msg_delayed;
+ /* Match table for each CPT */
+ struct lnet_match_table **ptl_mtables;
+ /* spread rotor of incoming "PUT" */
+ int ptl_rotor;
+ /* # active entries for this portal */
+ int ptl_mt_nmaps;
+ /* array of active entries' cpu-partition-id */
+ int ptl_mt_maps[0];
+} lnet_portal_t;
+
+#define LNET_LH_HASH_BITS 12
+#define LNET_LH_HASH_SIZE (1ULL << LNET_LH_HASH_BITS)
+#define LNET_LH_HASH_MASK (LNET_LH_HASH_SIZE - 1)
+
+/* resource container (ME, MD, EQ) */
+struct lnet_res_container {
+ unsigned int rec_type; /* container type */
+ __u64 rec_lh_cookie; /* cookie generator */
+ struct list_head rec_active; /* active resource list */
+ struct list_head *rec_lh_hash; /* handle hash */
+#ifdef LNET_USE_LIB_FREELIST
+ lnet_freelist_t rec_freelist; /* freelist for resources */
+#endif
+};
+
+/* message container */
+struct lnet_msg_container {
+ int msc_init; /* initialized or not */
+ /* max # threads finalizing */
+ int msc_nfinalizers;
+ /* msgs waiting to complete finalizing */
+ struct list_head msc_finalizing;
+ struct list_head msc_active; /* active message list */
+ /* threads doing finalization */
+ void **msc_finalizers;
+#ifdef LNET_USE_LIB_FREELIST
+ lnet_freelist_t msc_freelist; /* freelist for messages */
+#endif
+};
+
+/* Router Checker states */
+#define LNET_RC_STATE_SHUTDOWN 0 /* not started */
+#define LNET_RC_STATE_RUNNING 1 /* started up OK */
+#define LNET_RC_STATE_STOPPING 2 /* telling thread to stop */
+
+typedef struct
+{
+ /* CPU partition table of LNet */
+ struct cfs_cpt_table *ln_cpt_table;
+ /* number of CPTs in ln_cpt_table */
+ unsigned int ln_cpt_number;
+ unsigned int ln_cpt_bits;
+
+ /* protect LNet resources (ME/MD/EQ) */
+ struct cfs_percpt_lock *ln_res_lock;
+ /* # portals */
+ int ln_nportals;
+ /* the vector of portals */
+ lnet_portal_t **ln_portals;
+ /* percpt ME containers */
+ struct lnet_res_container **ln_me_containers;
+ /* percpt MD container */
+ struct lnet_res_container **ln_md_containers;
+
+ /* Event Queue container */
+ struct lnet_res_container ln_eq_container;
+ wait_queue_head_t ln_eq_waitq;
+ spinlock_t ln_eq_wait_lock;
+ unsigned int ln_remote_nets_hbits;
+
+ /* protect NI, peer table, credits, routers, rtrbuf... */
+ struct cfs_percpt_lock *ln_net_lock;
+ /* percpt message containers for active/finalizing/freed message */
+ struct lnet_msg_container **ln_msg_containers;
+ lnet_counters_t **ln_counters;
+ struct lnet_peer_table **ln_peer_tables;
+ /* failure simulation */
+ struct list_head ln_test_peers;
+
+ struct list_head ln_nis; /* LND instances */
+ /* NIs bond on specific CPT(s) */
+ struct list_head ln_nis_cpt;
+ /* dying LND instances */
+ struct list_head ln_nis_zombie;
+ lnet_ni_t *ln_loni; /* the loopback NI */
+ /* NI to wait for events in */
+ lnet_ni_t *ln_eq_waitni;
+
+ /* remote networks with routes to them */
+ struct list_head *ln_remote_nets_hash;
+ /* validity stamp */
+ __u64 ln_remote_nets_version;
+ /* list of all known routers */
+ struct list_head ln_routers;
+ /* validity stamp */
+ __u64 ln_routers_version;
+ /* percpt router buffer pools */
+ lnet_rtrbufpool_t **ln_rtrpools;
+
+ lnet_handle_md_t ln_ping_target_md;
+ lnet_handle_eq_t ln_ping_target_eq;
+ lnet_ping_info_t *ln_ping_info;
+
+ /* router checker startup/shutdown state */
+ int ln_rc_state;
+ /* router checker's event queue */
+ lnet_handle_eq_t ln_rc_eqh;
+ /* rcd still pending on net */
+ struct list_head ln_rcd_deathrow;
+ /* rcd ready for free */
+ struct list_head ln_rcd_zombie;
+ /* serialise startup/shutdown */
+ struct semaphore ln_rc_signal;
+
+ struct mutex ln_api_mutex;
+ struct mutex ln_lnd_mutex;
+ int ln_init; /* LNetInit() called? */
+ /* Have I called LNetNIInit myself? */
+ int ln_niinit_self;
+ /* LNetNIInit/LNetNIFini counter */
+ int ln_refcount;
+ /* shutdown in progress */
+ int ln_shutdown;
+
+ int ln_routing; /* am I a router? */
+ lnet_pid_t ln_pid; /* requested pid */
+ /* uniquely identifies this ni in this epoch */
+ __u64 ln_interface_cookie;
+ /* registered LNDs */
+ struct list_head ln_lnds;
+
+ /* space for network names */
+ char *ln_network_tokens;
+ int ln_network_tokens_nob;
+ /* test protocol compatibility flags */
+ int ln_testprotocompat;
+
+} lnet_t;
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/linux/api-support.h b/drivers/staging/lustre/include/linux/lnet/linux/api-support.h
new file mode 100644
index 0000000..ca78a0a
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/linux/api-support.h
@@ -0,0 +1,43 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LINUX_API_SUPPORT_H__
+#define __LINUX_API_SUPPORT_H__
+
+#ifndef __LNET_API_SUPPORT_H__
+#error Do not #include this file directly. #include <lnet /api-support.h> instead
+#endif
+
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/linux/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/linux/lib-lnet.h
new file mode 100644
index 0000000..d2c0a70
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/linux/lib-lnet.h
@@ -0,0 +1,72 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LNET_LINUX_LIB_LNET_H__
+#define __LNET_LINUX_LIB_LNET_H__
+
+#ifndef __LNET_LIB_LNET_H__
+#error Do not #include this file directly. #include <linux/lnet/lib-lnet.h> instead
+#endif
+
+# include <asm/page.h>
+# include <linux/string.h>
+# include <asm/io.h>
+# include <linux/libcfs/libcfs.h>
+
+static inline __u64
+lnet_page2phys (struct page *p)
+{
+ /* compiler optimizer will elide unused branches */
+
+ switch (sizeof(typeof(page_to_phys(p)))) {
+ case 4:
+ /* page_to_phys returns a 32 bit physical address. This must
+ * be a 32 bit machine with <= 4G memory and we must ensure we
+ * don't sign extend when converting to 64 bits. */
+ return (unsigned long)page_to_phys(p);
+
+ case 8:
+ /* page_to_phys returns a 64 bit physical address :) */
+ return page_to_phys(p);
+
+ default:
+ LBUG();
+ return 0;
+ }
+}
+
+
+#define LNET_ROUTER
+
+#endif /* __LNET_LINUX_LIB_LNET_H__ */
diff --git a/drivers/staging/lustre/include/linux/lnet/linux/lib-types.h b/drivers/staging/lustre/include/linux/lnet/linux/lib-types.h
new file mode 100644
index 0000000..669e8c0
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/linux/lib-types.h
@@ -0,0 +1,45 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LNET_LINUX_LIB_TYPES_H__
+#define __LNET_LINUX_LIB_TYPES_H__
+
+#ifndef __LNET_LIB_TYPES_H__
+#error Do not #include this file directly. #include <linux/lnet/lib-types.h> instead
+#endif
+
+# include <linux/uio.h>
+# include <linux/types.h>
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/linux/lnet.h b/drivers/staging/lustre/include/linux/lnet/linux/lnet.h
new file mode 100644
index 0000000..1e888f1
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/linux/lnet.h
@@ -0,0 +1,56 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LNET_LINUX_LNET_H__
+#define __LNET_LINUX_LNET_H__
+
+#ifndef __LNET_H__
+#error Do not #include this file directly. #include <linux/lnet/lnet.h> instead
+#endif
+
+/*
+ * lnet.h
+ *
+ * User application interface file
+ */
+
+#include <linux/uio.h>
+#include <linux/types.h>
+
+#define cfs_tcp_sendpage(sk, page, offset, size, flags) \
+ tcp_sendpage(sk, page, offset, size, flags)
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnet-sysctl.h b/drivers/staging/lustre/include/linux/lnet/lnet-sysctl.h
new file mode 100644
index 0000000..1bde44e
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/lnet-sysctl.h
@@ -0,0 +1,51 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LNET_SYSCTL_H__
+#define __LNET_SYSCTL_H__
+
+#if defined(CONFIG_SYSCTL)
+
+
+#define CTL_KRANAL 201
+#define CTL_O2IBLND 205
+#define CTL_PTLLND 206
+#define CTL_QSWNAL 207
+#define CTL_SOCKLND 208
+#define CTL_GNILND 210
+
+
+#endif
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnet.h b/drivers/staging/lustre/include/linux/lnet/lnet.h
new file mode 100644
index 0000000..c532b15
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/lnet.h
@@ -0,0 +1,51 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LNET_H__
+#define __LNET_H__
+
+/*
+ * lnet.h
+ *
+ * User application interface file
+ */
+#include <linux/lnet/linux/lnet.h>
+
+#include <linux/lnet/types.h>
+#include <linux/lnet/api.h>
+
+#define LNET_NIDSTR_COUNT 1024 /* # of nidstrings */
+#define LNET_NIDSTR_SIZE 32 /* size of each one (see below for usage) */
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetctl.h b/drivers/staging/lustre/include/linux/lnet/lnetctl.h
new file mode 100644
index 0000000..b22daa2
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/lnetctl.h
@@ -0,0 +1,80 @@
+/*
+ * This file is part of Portals, http://www.sf.net/projects/lustre/
+ *
+ * Portals is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Portals is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Portals; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * header for libptlctl.a
+ */
+#ifndef _PTLCTL_H_
+#define _PTLCTL_H_
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/types.h>
+
+#define LNET_DEV_ID 0
+#define LNET_DEV_PATH "/dev/lnet"
+#define LNET_DEV_MAJOR 10
+#define LNET_DEV_MINOR 240
+#define OBD_DEV_ID 1
+#define OBD_DEV_NAME "obd"
+#define OBD_DEV_PATH "/dev/" OBD_DEV_NAME
+#define OBD_DEV_MAJOR 10
+#define OBD_DEV_MINOR 241
+#define SMFS_DEV_ID 2
+#define SMFS_DEV_PATH "/dev/snapdev"
+#define SMFS_DEV_MAJOR 10
+#define SMFS_DEV_MINOR 242
+
+int ptl_initialize(int argc, char **argv);
+int jt_ptl_network(int argc, char **argv);
+int jt_ptl_list_nids(int argc, char **argv);
+int jt_ptl_which_nid(int argc, char **argv);
+int jt_ptl_print_interfaces(int argc, char **argv);
+int jt_ptl_add_interface(int argc, char **argv);
+int jt_ptl_del_interface(int argc, char **argv);
+int jt_ptl_print_peers (int argc, char **argv);
+int jt_ptl_add_peer (int argc, char **argv);
+int jt_ptl_del_peer (int argc, char **argv);
+int jt_ptl_print_connections (int argc, char **argv);
+int jt_ptl_disconnect(int argc, char **argv);
+int jt_ptl_push_connection(int argc, char **argv);
+int jt_ptl_print_active_txs(int argc, char **argv);
+int jt_ptl_ping(int argc, char **argv);
+int jt_ptl_mynid(int argc, char **argv);
+int jt_ptl_add_uuid(int argc, char **argv);
+int jt_ptl_add_uuid_old(int argc, char **argv); /* backwards compatibility */
+int jt_ptl_close_uuid(int argc, char **argv);
+int jt_ptl_del_uuid(int argc, char **argv);
+int jt_ptl_add_route (int argc, char **argv);
+int jt_ptl_del_route (int argc, char **argv);
+int jt_ptl_notify_router (int argc, char **argv);
+int jt_ptl_print_routes (int argc, char **argv);
+int jt_ptl_fail_nid (int argc, char **argv);
+int jt_ptl_lwt(int argc, char **argv);
+int jt_ptl_testprotocompat(int argc, char **argv);
+int jt_ptl_memhog(int argc, char **argv);
+
+int dbg_initialize(int argc, char **argv);
+int jt_dbg_filter(int argc, char **argv);
+int jt_dbg_show(int argc, char **argv);
+int jt_dbg_list(int argc, char **argv);
+int jt_dbg_debug_kernel(int argc, char **argv);
+int jt_dbg_debug_daemon(int argc, char **argv);
+int jt_dbg_debug_file(int argc, char **argv);
+int jt_dbg_clear_debug_buf(int argc, char **argv);
+int jt_dbg_mark_debug_buf(int argc, char **argv);
+int jt_dbg_modules(int argc, char **argv);
+int jt_dbg_panic(int argc, char **argv);
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
new file mode 100644
index 0000000..e060599
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -0,0 +1,491 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/include/lnet/lnetst.h
+ *
+ * Author: Liang Zhen <liangzhen@clusterfs.com>
+ */
+
+#ifndef __LNET_ST_H__
+#define __LNET_ST_H__
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lnet.h>
+#include <linux/lnet/lib-types.h>
+
+#define LST_FEAT_NONE (0)
+#define LST_FEAT_BULK_LEN (1 << 0) /* enable variable page size */
+
+#define LST_FEATS_EMPTY (LST_FEAT_NONE)
+#define LST_FEATS_MASK (LST_FEAT_NONE | LST_FEAT_BULK_LEN)
+
+#define LST_NAME_SIZE 32 /* max name buffer length */
+
+#define LSTIO_DEBUG 0xC00 /* debug */
+#define LSTIO_SESSION_NEW 0xC01 /* create session */
+#define LSTIO_SESSION_END 0xC02 /* end session */
+#define LSTIO_SESSION_INFO 0xC03 /* query session */
+#define LSTIO_GROUP_ADD 0xC10 /* add group */
+#define LSTIO_GROUP_LIST 0xC11 /* list all groups in session */
+#define LSTIO_GROUP_INFO 0xC12 /* query default information of specified group */
+#define LSTIO_GROUP_DEL 0xC13 /* delete group */
+#define LSTIO_NODES_ADD 0xC14 /* add nodes to specified group */
+#define LSTIO_GROUP_UPDATE 0xC15 /* update group */
+#define LSTIO_BATCH_ADD 0xC20 /* add batch */
+#define LSTIO_BATCH_START 0xC21 /* start batch */
+#define LSTIO_BATCH_STOP 0xC22 /* stop batch */
+#define LSTIO_BATCH_DEL 0xC23 /* delete batch */
+#define LSTIO_BATCH_LIST 0xC24 /* show all batches in the session */
+#define LSTIO_BATCH_INFO 0xC25 /* show defail of specified batch */
+#define LSTIO_TEST_ADD 0xC26 /* add test (to batch) */
+#define LSTIO_BATCH_QUERY 0xC27 /* query batch status */
+#define LSTIO_STAT_QUERY 0xC30 /* get stats */
+
+typedef struct {
+ lnet_nid_t ses_nid; /* nid of console node */
+ __u64 ses_stamp; /* time stamp */
+} lst_sid_t; /*** session id */
+
+extern lst_sid_t LST_INVALID_SID;
+
+typedef struct {
+ __u64 bat_id; /* unique id in session */
+} lst_bid_t; /*** batch id (group of tests) */
+
+/* Status of test node */
+#define LST_NODE_ACTIVE 0x1 /* node in this session */
+#define LST_NODE_BUSY 0x2 /* node is taken by other session */
+#define LST_NODE_DOWN 0x4 /* node is down */
+#define LST_NODE_UNKNOWN 0x8 /* node not in session */
+
+typedef struct {
+ lnet_process_id_t nde_id; /* id of node */
+ int nde_state; /* state of node */
+} lstcon_node_ent_t; /*** node entry, for list_group command */
+
+typedef struct {
+ int nle_nnode; /* # of nodes */
+ int nle_nactive; /* # of active nodes */
+ int nle_nbusy; /* # of busy nodes */
+ int nle_ndown; /* # of down nodes */
+ int nle_nunknown; /* # of unknown nodes */
+} lstcon_ndlist_ent_t; /*** node_list entry, for list_batch command */
+
+typedef struct {
+ int tse_type; /* test type */
+ int tse_loop; /* loop count */
+ int tse_concur; /* concurrency of test */
+} lstcon_test_ent_t; /*** test summary entry, for list_batch command */
+
+typedef struct {
+ int bae_state; /* batch status */
+ int bae_timeout; /* batch timeout */
+ int bae_ntest; /* # of tests in the batch */
+} lstcon_batch_ent_t; /*** batch summary entry, for list_batch command */
+
+typedef struct {
+ lstcon_ndlist_ent_t tbe_cli_nle; /* client (group) node_list entry */
+ lstcon_ndlist_ent_t tbe_srv_nle; /* server (group) node_list entry */
+ union {
+ lstcon_test_ent_t tbe_test; /* test entry */
+ lstcon_batch_ent_t tbe_batch; /* batch entry */
+ } u;
+} lstcon_test_batch_ent_t; /*** test/batch verbose information entry,
+ *** for list_batch command */
+
+typedef struct {
+ struct list_head rpe_link; /* link chain */
+ lnet_process_id_t rpe_peer; /* peer's id */
+ struct timeval rpe_stamp; /* time stamp of RPC */
+ int rpe_state; /* peer's state */
+ int rpe_rpc_errno; /* RPC errno */
+
+ lst_sid_t rpe_sid; /* peer's session id */
+ int rpe_fwk_errno; /* framework errno */
+ int rpe_priv[4]; /* private data */
+ char rpe_payload[0]; /* private reply payload */
+} lstcon_rpc_ent_t;
+
+typedef struct {
+ int trs_rpc_stat[4]; /* RPCs stat (0: total, 1: failed, 2: finished, 4: reserved */
+ int trs_rpc_errno; /* RPC errno */
+ int trs_fwk_stat[8]; /* framework stat */
+ int trs_fwk_errno; /* errno of the first remote error */
+ void *trs_fwk_private; /* private framework stat */
+} lstcon_trans_stat_t;
+
+static inline int
+lstcon_rpc_stat_total(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_rpc_stat[0] : stat->trs_rpc_stat[0];
+}
+
+static inline int
+lstcon_rpc_stat_success(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_rpc_stat[1] : stat->trs_rpc_stat[1];
+}
+
+static inline int
+lstcon_rpc_stat_failure(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_rpc_stat[2] : stat->trs_rpc_stat[2];
+}
+
+static inline int
+lstcon_sesop_stat_success(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
+}
+
+static inline int
+lstcon_sesop_stat_failure(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
+}
+
+static inline int
+lstcon_sesqry_stat_active(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
+}
+
+static inline int
+lstcon_sesqry_stat_busy(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
+}
+
+static inline int
+lstcon_sesqry_stat_unknown(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[2] : stat->trs_fwk_stat[2];
+}
+
+static inline int
+lstcon_tsbop_stat_success(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
+}
+
+static inline int
+lstcon_tsbop_stat_failure(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
+}
+
+static inline int
+lstcon_tsbqry_stat_idle(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
+}
+
+static inline int
+lstcon_tsbqry_stat_run(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
+}
+
+static inline int
+lstcon_tsbqry_stat_failure(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[2] : stat->trs_fwk_stat[2];
+}
+
+static inline int
+lstcon_statqry_stat_success(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[0] : stat->trs_fwk_stat[0];
+}
+
+static inline int
+lstcon_statqry_stat_failure(lstcon_trans_stat_t *stat, int inc)
+{
+ return inc ? ++stat->trs_fwk_stat[1] : stat->trs_fwk_stat[1];
+}
+
+/* create a session */
+typedef struct {
+ int lstio_ses_key; /* IN: local key */
+ int lstio_ses_timeout; /* IN: session timeout */
+ int lstio_ses_force; /* IN: force create ? */
+ /** IN: session features */
+ unsigned lstio_ses_feats;
+ lst_sid_t *lstio_ses_idp; /* OUT: session id */
+ int lstio_ses_nmlen; /* IN: name length */
+ char *lstio_ses_namep; /* IN: session name */
+} lstio_session_new_args_t;
+
+/* query current session */
+typedef struct {
+ lst_sid_t *lstio_ses_idp; /* OUT: session id */
+ int *lstio_ses_keyp; /* OUT: local key */
+ /** OUT: session features */
+ unsigned *lstio_ses_featp;
+ lstcon_ndlist_ent_t *lstio_ses_ndinfo; /* OUT: */
+ int lstio_ses_nmlen; /* IN: name length */
+ char *lstio_ses_namep; /* OUT: session name */
+} lstio_session_info_args_t;
+
+/* delete a session */
+typedef struct {
+ int lstio_ses_key; /* IN: session key */
+} lstio_session_end_args_t;
+
+#define LST_OPC_SESSION 1
+#define LST_OPC_GROUP 2
+#define LST_OPC_NODES 3
+#define LST_OPC_BATCHCLI 4
+#define LST_OPC_BATCHSRV 5
+
+typedef struct {
+ int lstio_dbg_key; /* IN: session key */
+ int lstio_dbg_type; /* IN: debug sessin|batch|group|nodes list */
+ int lstio_dbg_flags; /* IN: reserved debug flags */
+ int lstio_dbg_timeout; /* IN: timeout of debug */
+
+ int lstio_dbg_nmlen; /* IN: len of name */
+ char *lstio_dbg_namep; /* IN: name of group|batch */
+ int lstio_dbg_count; /* IN: # of test nodes to debug */
+ lnet_process_id_t *lstio_dbg_idsp; /* IN: id of test nodes */
+ struct list_head *lstio_dbg_resultp; /* OUT: list head of result buffer */
+} lstio_debug_args_t;
+
+typedef struct {
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_nmlen; /* IN: name length */
+ char *lstio_grp_namep; /* IN: group name */
+} lstio_group_add_args_t;
+
+typedef struct {
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_nmlen; /* IN: name length */
+ char *lstio_grp_namep; /* IN: group name */
+} lstio_group_del_args_t;
+
+#define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */
+#define LST_GROUP_REFRESH 2 /* refresh inactive nodes in the group */
+#define LST_GROUP_RMND 3 /* delete nodes from the group */
+
+typedef struct {
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_opc; /* IN: OPC */
+ int lstio_grp_args; /* IN: arguments */
+ int lstio_grp_nmlen; /* IN: name length */
+ char *lstio_grp_namep; /* IN: group name */
+ int lstio_grp_count; /* IN: # of nodes id */
+ lnet_process_id_t *lstio_grp_idsp; /* IN: array of nodes */
+ struct list_head *lstio_grp_resultp; /* OUT: list head of result buffer */
+} lstio_group_update_args_t;
+
+typedef struct {
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_nmlen; /* IN: name length */
+ char *lstio_grp_namep; /* IN: group name */
+ int lstio_grp_count; /* IN: # of nodes */
+ /** OUT: session features */
+ unsigned *lstio_grp_featp;
+ lnet_process_id_t *lstio_grp_idsp; /* IN: nodes */
+ struct list_head *lstio_grp_resultp; /* OUT: list head of result buffer */
+} lstio_group_nodes_args_t;
+
+typedef struct {
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_idx; /* IN: group idx */
+ int lstio_grp_nmlen; /* IN: name len */
+ char *lstio_grp_namep; /* OUT: name */
+} lstio_group_list_args_t;
+
+typedef struct {
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_nmlen; /* IN: name len */
+ char *lstio_grp_namep; /* IN: name */
+ lstcon_ndlist_ent_t *lstio_grp_entp; /* OUT: description of group */
+
+ int *lstio_grp_idxp; /* IN/OUT: node index */
+ int *lstio_grp_ndentp; /* IN/OUT: # of nodent */
+ lstcon_node_ent_t *lstio_grp_dentsp; /* OUT: nodent array */
+} lstio_group_info_args_t;
+
+#define LST_DEFAULT_BATCH "batch" /* default batch name */
+
+typedef struct {
+ int lstio_bat_key; /* IN: session key */
+ int lstio_bat_nmlen; /* IN: name length */
+ char *lstio_bat_namep; /* IN: batch name */
+} lstio_batch_add_args_t;
+
+typedef struct {
+ int lstio_bat_key; /* IN: session key */
+ int lstio_bat_nmlen; /* IN: name length */
+ char *lstio_bat_namep; /* IN: batch name */
+} lstio_batch_del_args_t;
+
+typedef struct {
+ int lstio_bat_key; /* IN: session key */
+ int lstio_bat_timeout; /* IN: timeout for the batch */
+ int lstio_bat_nmlen; /* IN: name length */
+ char *lstio_bat_namep; /* IN: batch name */
+ struct list_head *lstio_bat_resultp; /* OUT: list head of result buffer */
+} lstio_batch_run_args_t;
+
+typedef struct {
+ int lstio_bat_key; /* IN: session key */
+ int lstio_bat_force; /* IN: abort unfinished test RPC */
+ int lstio_bat_nmlen; /* IN: name length */
+ char *lstio_bat_namep; /* IN: batch name */
+ struct list_head *lstio_bat_resultp; /* OUT: list head of result buffer */
+} lstio_batch_stop_args_t;
+
+typedef struct {
+ int lstio_bat_key; /* IN: session key */
+ int lstio_bat_testidx; /* IN: test index */
+ int lstio_bat_client; /* IN: is test client? */
+ int lstio_bat_timeout; /* IN: timeout for waiting */
+ int lstio_bat_nmlen; /* IN: name length */
+ char *lstio_bat_namep; /* IN: batch name */
+ struct list_head *lstio_bat_resultp; /* OUT: list head of result buffer */
+} lstio_batch_query_args_t;
+
+typedef struct {
+ int lstio_bat_key; /* IN: session key */
+ int lstio_bat_idx; /* IN: index */
+ int lstio_bat_nmlen; /* IN: name length */
+ char *lstio_bat_namep; /* IN: batch name */
+} lstio_batch_list_args_t;
+
+typedef struct {
+ int lstio_bat_key; /* IN: session key */
+ int lstio_bat_nmlen; /* IN: name length */
+ char *lstio_bat_namep; /* IN: name */
+ int lstio_bat_server; /* IN: query server or not */
+ int lstio_bat_testidx; /* IN: test index */
+ lstcon_test_batch_ent_t *lstio_bat_entp; /* OUT: batch ent */
+
+ int *lstio_bat_idxp; /* IN/OUT: index of node */
+ int *lstio_bat_ndentp; /* IN/OUT: # of nodent */
+ lstcon_node_ent_t *lstio_bat_dentsp; /* array of nodent */
+} lstio_batch_info_args_t;
+
+/* add stat in session */
+typedef struct {
+ int lstio_sta_key; /* IN: session key */
+ int lstio_sta_timeout; /* IN: timeout for stat requst */
+ int lstio_sta_nmlen; /* IN: group name length */
+ char *lstio_sta_namep; /* IN: group name */
+ int lstio_sta_count; /* IN: # of pid */
+ lnet_process_id_t *lstio_sta_idsp; /* IN: pid */
+ struct list_head *lstio_sta_resultp; /* OUT: list head of result buffer */
+} lstio_stat_args_t;
+
+typedef enum {
+ LST_TEST_BULK = 1,
+ LST_TEST_PING = 2
+} lst_test_type_t;
+
+/* create a test in a batch */
+#define LST_MAX_CONCUR 1024 /* Max concurrency of test */
+
+typedef struct {
+ int lstio_tes_key; /* IN: session key */
+ int lstio_tes_bat_nmlen; /* IN: batch name len */
+ char *lstio_tes_bat_name; /* IN: batch name */
+ int lstio_tes_type; /* IN: test type */
+ int lstio_tes_oneside; /* IN: one sided test */
+ int lstio_tes_loop; /* IN: loop count */
+ int lstio_tes_concur; /* IN: concurrency */
+
+ int lstio_tes_dist; /* IN: node distribution in destination groups */
+ int lstio_tes_span; /* IN: node span in destination groups */
+ int lstio_tes_sgrp_nmlen; /* IN: source group name length */
+ char *lstio_tes_sgrp_name; /* IN: group name */
+ int lstio_tes_dgrp_nmlen; /* IN: destination group name length */
+ char *lstio_tes_dgrp_name; /* IN: group name */
+
+ int lstio_tes_param_len; /* IN: param buffer len */
+ void *lstio_tes_param; /* IN: parameter for specified test:
+ lstio_bulk_param_t,
+ lstio_ping_param_t,
+ ... more */
+ int *lstio_tes_retp; /* OUT: private returned value */
+ struct list_head *lstio_tes_resultp; /* OUT: list head of result buffer */
+} lstio_test_args_t;
+
+typedef enum {
+ LST_BRW_READ = 1,
+ LST_BRW_WRITE = 2
+} lst_brw_type_t;
+
+typedef enum {
+ LST_BRW_CHECK_NONE = 1,
+ LST_BRW_CHECK_SIMPLE = 2,
+ LST_BRW_CHECK_FULL = 3
+} lst_brw_flags_t;
+
+typedef struct {
+ int blk_opc; /* bulk operation code */
+ int blk_size; /* size (bytes) */
+ int blk_time; /* time of running the test*/
+ int blk_flags; /* reserved flags */
+} lst_test_bulk_param_t;
+
+typedef struct {
+ int png_size; /* size of ping message */
+ int png_time; /* time */
+ int png_loop; /* loop */
+ int png_flags; /* reserved flags */
+} lst_test_ping_param_t;
+
+/* more tests */
+typedef struct {
+ __u32 errors;
+ __u32 rpcs_sent;
+ __u32 rpcs_rcvd;
+ __u32 rpcs_dropped;
+ __u32 rpcs_expired;
+ __u64 bulk_get;
+ __u64 bulk_put;
+} WIRE_ATTR srpc_counters_t;
+
+typedef struct {
+ /** milliseconds since current session started */
+ __u32 running_ms;
+ __u32 active_batches;
+ __u32 zombie_sessions;
+ __u32 brw_errors;
+ __u32 ping_errors;
+} WIRE_ATTR sfw_counters_t;
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/ptllnd.h b/drivers/staging/lustre/include/linux/lnet/ptllnd.h
new file mode 100644
index 0000000..564f5d3
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/ptllnd.h
@@ -0,0 +1,94 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/include/lnet/ptllnd.h
+ *
+ * Author: PJ Kirner <pjkirner@clusterfs.com>
+ */
+
+/*
+ * The PTLLND was designed to support Portals with
+ * Lustre and non-lustre UNLINK semantics.
+ * However for now the two targets are Cray Portals
+ * on the XT3 and Lustre Portals (for testing) both
+ * have Lustre UNLINK semantics, so this is defined
+ * by default.
+ */
+#define LUSTRE_PORTALS_UNLINK_SEMANTICS
+
+
+#ifdef _USING_LUSTRE_PORTALS_
+
+/* NIDs are 64-bits on Lustre Portals */
+#define FMT_NID LPU64
+#define FMT_PID "%d"
+
+/* When using Lustre Portals Lustre completion semantics are imlicit*/
+#define PTL_MD_LUSTRE_COMPLETION_SEMANTICS 0
+
+#else /* _USING_CRAY_PORTALS_ */
+
+/* NIDs are integers on Cray Portals */
+#define FMT_NID "%u"
+#define FMT_PID "%d"
+
+/* When using Cray Portals this is defined in the Cray Portals Header*/
+/*#define PTL_MD_LUSTRE_COMPLETION_SEMANTICS */
+
+/* Can compare handles directly on Cray Portals */
+#define PtlHandleIsEqual(a,b) ((a) == (b))
+
+/* Different error types on Cray Portals*/
+#define ptl_err_t ptl_ni_fail_t
+
+/*
+ * The Cray Portals has no maximum number of IOVs. The
+ * maximum is limited only by memory and size of the
+ * int parameters (2^31-1).
+ * Lustre only really require that the underyling
+ * implementation to support at least LNET_MAX_IOV,
+ * so for Cray portals we can safely just use that
+ * value here.
+ *
+ */
+#define PTL_MD_MAX_IOV LNET_MAX_IOV
+
+#endif
+
+#define FMT_PTLID "ptlid:"FMT_PID"-"FMT_NID
+
+/* Align incoming small request messages to an 8 byte boundary if this is
+ * supported to avoid alignment issues on some architectures */
+#ifndef PTL_MD_LOCAL_ALIGN8
+# define PTL_MD_LOCAL_ALIGN8 0
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/ptllnd_wire.h b/drivers/staging/lustre/include/linux/lnet/ptllnd_wire.h
new file mode 100644
index 0000000..7d12b3a
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/ptllnd_wire.h
@@ -0,0 +1,124 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/include/lnet/ptllnd_wire.h
+ *
+ * Author: PJ Kirner <pjkirner@clusterfs.com>
+ */
+
+/* Minimum buffer size that any peer will post to receive ptllnd messages */
+#define PTLLND_MIN_BUFFER_SIZE 256
+
+/************************************************************************
+ * Tunable defaults that {u,k}lnds/ptllnd should have in common.
+ */
+
+#define PTLLND_PORTAL 9 /* The same portal PTLPRC used when talking to cray portals */
+#define PTLLND_PID 9 /* The Portals PID */
+#define PTLLND_PEERCREDITS 8 /* concurrent sends to 1 peer */
+
+/* Default buffer size for kernel ptllnds (guaranteed eager) */
+#define PTLLND_MAX_KLND_MSG_SIZE 512
+
+/* Default buffer size for catamount ptllnds (not guaranteed eager) - large
+ * enough to avoid RDMA for anything sent while control is not in liblustre */
+#define PTLLND_MAX_ULND_MSG_SIZE 512
+
+
+/************************************************************************
+ * Portals LND Wire message format.
+ * These are sent in sender's byte order (i.e. receiver flips).
+ */
+
+#define PTL_RESERVED_MATCHBITS 0x100 /* below this value is reserved
+ * above is for bulk data transfer */
+#define LNET_MSG_MATCHBITS 0 /* the value for the message channel */
+
+typedef struct
+{
+ lnet_hdr_t kptlim_hdr; /* portals header */
+ char kptlim_payload[0]; /* piggy-backed payload */
+} WIRE_ATTR kptl_immediate_msg_t;
+
+typedef struct
+{
+ lnet_hdr_t kptlrm_hdr; /* portals header */
+ __u64 kptlrm_matchbits; /* matchbits */
+} WIRE_ATTR kptl_rdma_msg_t;
+
+typedef struct
+{
+ __u64 kptlhm_matchbits; /* matchbits */
+ __u32 kptlhm_max_msg_size; /* max message size */
+} WIRE_ATTR kptl_hello_msg_t;
+
+typedef struct
+{
+ /* First 2 fields fixed FOR ALL TIME */
+ __u32 ptlm_magic; /* I'm a Portals LND message */
+ __u16 ptlm_version; /* this is my version number */
+ __u8 ptlm_type; /* the message type */
+ __u8 ptlm_credits; /* returned credits */
+ __u32 ptlm_nob; /* # bytes in whole message */
+ __u32 ptlm_cksum; /* checksum (0 == no checksum) */
+ __u64 ptlm_srcnid; /* sender's NID */
+ __u64 ptlm_srcstamp; /* sender's incarnation */
+ __u64 ptlm_dstnid; /* destination's NID */
+ __u64 ptlm_dststamp; /* destination's incarnation */
+ __u32 ptlm_srcpid; /* sender's PID */
+ __u32 ptlm_dstpid; /* destination's PID */
+
+ union {
+ kptl_immediate_msg_t immediate;
+ kptl_rdma_msg_t rdma;
+ kptl_hello_msg_t hello;
+ } WIRE_ATTR ptlm_u;
+
+} kptl_msg_t;
+
+/* kptl_msg_t::ptlm_credits is only a __u8 */
+#define PTLLND_MSG_MAX_CREDITS ((typeof(((kptl_msg_t*) 0)->ptlm_credits)) -1)
+
+#define PTLLND_MSG_MAGIC LNET_PROTO_PTL_MAGIC
+#define PTLLND_MSG_VERSION 0x04
+
+#define PTLLND_RDMA_OK 0x00
+#define PTLLND_RDMA_FAIL 0x01
+
+#define PTLLND_MSG_TYPE_INVALID 0x00
+#define PTLLND_MSG_TYPE_PUT 0x01
+#define PTLLND_MSG_TYPE_GET 0x02
+#define PTLLND_MSG_TYPE_IMMEDIATE 0x03 /* No bulk data xfer*/
+#define PTLLND_MSG_TYPE_NOOP 0x04
+#define PTLLND_MSG_TYPE_HELLO 0x05
+#define PTLLND_MSG_TYPE_NAK 0x06
diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h
new file mode 100644
index 0000000..bacc749
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h
@@ -0,0 +1,103 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/include/lnet/socklnd.h
+ *
+ * #defines shared between socknal implementation and utilities
+ */
+#ifndef __LNET_LNET_SOCKLND_H__
+#define __LNET_LNET_SOCKLND_H__
+
+#include <linux/lnet/types.h>
+#include <linux/lnet/lib-types.h>
+
+#define SOCKLND_CONN_NONE (-1)
+#define SOCKLND_CONN_ANY 0
+#define SOCKLND_CONN_CONTROL 1
+#define SOCKLND_CONN_BULK_IN 2
+#define SOCKLND_CONN_BULK_OUT 3
+#define SOCKLND_CONN_NTYPES 4
+
+#define SOCKLND_CONN_ACK SOCKLND_CONN_BULK_IN
+
+typedef struct {
+ __u32 kshm_magic; /* magic number of socklnd message */
+ __u32 kshm_version; /* version of socklnd message */
+ lnet_nid_t kshm_src_nid; /* sender's nid */
+ lnet_nid_t kshm_dst_nid; /* destination nid */
+ lnet_pid_t kshm_src_pid; /* sender's pid */
+ lnet_pid_t kshm_dst_pid; /* destination pid */
+ __u64 kshm_src_incarnation; /* sender's incarnation */
+ __u64 kshm_dst_incarnation; /* destination's incarnation */
+ __u32 kshm_ctype; /* connection type */
+ __u32 kshm_nips; /* # IP addrs */
+ __u32 kshm_ips[0]; /* IP addrs */
+} WIRE_ATTR ksock_hello_msg_t;
+
+typedef struct {
+ lnet_hdr_t ksnm_hdr; /* lnet hdr */
+
+ /*
+ * ksnm_payload is removed because of winnt compiler's limitation:
+ * zero-sized array can only be placed at the tail of [nested]
+ * structure definitions. lnet payload will be stored just after
+ * the body of structure ksock_lnet_msg_t
+ */
+} WIRE_ATTR ksock_lnet_msg_t;
+
+typedef struct {
+ __u32 ksm_type; /* type of socklnd message */
+ __u32 ksm_csum; /* checksum if != 0 */
+ __u64 ksm_zc_cookies[2]; /* Zero-Copy request/ACK cookie */
+ union {
+ ksock_lnet_msg_t lnetmsg; /* lnet message, it's empty if it's NOOP */
+ } WIRE_ATTR ksm_u;
+} WIRE_ATTR ksock_msg_t;
+
+static inline void
+socklnd_init_msg(ksock_msg_t *msg, int type)
+{
+ msg->ksm_csum = 0;
+ msg->ksm_type = type;
+ msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
+}
+
+#define KSOCK_MSG_NOOP 0xc0 /* ksm_u empty */
+#define KSOCK_MSG_LNET 0xc1 /* lnet msg */
+
+/* We need to know this number to parse hello msg from ksocklnd in
+ * other LND (usocklnd, for example) */
+#define KSOCK_PROTO_V2 2
+#define KSOCK_PROTO_V3 3
+
+#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
new file mode 100644
index 0000000..4f63b7a
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -0,0 +1,503 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LNET_TYPES_H__
+#define __LNET_TYPES_H__
+
+/** \addtogroup lnet
+ * @{ */
+
+#include <linux/libcfs/libcfs.h>
+
+/** \addtogroup lnet_addr
+ * @{ */
+
+/** Portal reserved for LNet's own use.
+ * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments.
+ */
+#define LNET_RESERVED_PORTAL 0
+
+/**
+ * Address of an end-point in an LNet network.
+ *
+ * A node can have multiple end-points and hence multiple addresses.
+ * An LNet network can be a simple network (e.g. tcp0) or a network of
+ * LNet networks connected by LNet routers. Therefore an end-point address
+ * has two parts: network ID, and address within a network.
+ *
+ * \see LNET_NIDNET, LNET_NIDADDR, and LNET_MKNID.
+ */
+typedef __u64 lnet_nid_t;
+/**
+ * ID of a process in a node. Shortened as PID to distinguish from
+ * lnet_process_id_t, the global process ID.
+ */
+typedef __u32 lnet_pid_t;
+
+/** wildcard NID that matches any end-point address */
+#define LNET_NID_ANY ((lnet_nid_t) -1)
+/** wildcard PID that matches any lnet_pid_t */
+#define LNET_PID_ANY ((lnet_pid_t) -1)
+
+#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
+#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
+
+#define LNET_TIME_FOREVER (-1)
+
+/**
+ * Objects maintained by the LNet are accessed through handles. Handle types
+ * have names of the form lnet_handle_xx_t, where xx is one of the two letter
+ * object type codes ('eq' for event queue, 'md' for memory descriptor, and
+ * 'me' for match entry).
+ * Each type of object is given a unique handle type to enhance type checking.
+ * The type lnet_handle_any_t can be used when a generic handle is needed.
+ * Every handle value can be converted into a value of type lnet_handle_any_t
+ * without loss of information.
+ */
+typedef struct {
+ __u64 cookie;
+} lnet_handle_any_t;
+
+typedef lnet_handle_any_t lnet_handle_eq_t;
+typedef lnet_handle_any_t lnet_handle_md_t;
+typedef lnet_handle_any_t lnet_handle_me_t;
+
+#define LNET_WIRE_HANDLE_COOKIE_NONE (-1)
+
+/**
+ * Invalidate handle \a h.
+ */
+static inline void LNetInvalidateHandle(lnet_handle_any_t *h)
+{
+ h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
+}
+
+/**
+ * Compare handles \a h1 and \a h2.
+ *
+ * \return 1 if handles are equal, 0 if otherwise.
+ */
+static inline int LNetHandleIsEqual (lnet_handle_any_t h1, lnet_handle_any_t h2)
+{
+ return (h1.cookie == h2.cookie);
+}
+
+/**
+ * Check whether handle \a h is invalid.
+ *
+ * \return 1 if handle is invalid, 0 if valid.
+ */
+static inline int LNetHandleIsInvalid(lnet_handle_any_t h)
+{
+ return (LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie);
+}
+
+/**
+ * Global process ID.
+ */
+typedef struct {
+ /** node id */
+ lnet_nid_t nid;
+ /** process id */
+ lnet_pid_t pid;
+} lnet_process_id_t;
+/** @} lnet_addr */
+
+/** \addtogroup lnet_me
+ * @{ */
+
+/**
+ * Specifies whether the match entry or memory descriptor should be unlinked
+ * automatically (LNET_UNLINK) or not (LNET_RETAIN).
+ */
+typedef enum {
+ LNET_RETAIN = 0,
+ LNET_UNLINK
+} lnet_unlink_t;
+
+/**
+ * Values of the type lnet_ins_pos_t are used to control where a new match
+ * entry is inserted. The value LNET_INS_BEFORE is used to insert the new
+ * entry before the current entry or before the head of the list. The value
+ * LNET_INS_AFTER is used to insert the new entry after the current entry
+ * or after the last item in the list.
+ */
+typedef enum {
+ /** insert ME before current position or head of the list */
+ LNET_INS_BEFORE,
+ /** insert ME after current position or tail of the list */
+ LNET_INS_AFTER,
+ /** attach ME at tail of local CPU partition ME list */
+ LNET_INS_LOCAL
+} lnet_ins_pos_t;
+
+/** @} lnet_me */
+
+/** \addtogroup lnet_md
+ * @{ */
+
+/**
+ * Defines the visible parts of a memory descriptor. Values of this type
+ * are used to initialize memory descriptors.
+ */
+typedef struct {
+ /**
+ * Specify the memory region associated with the memory descriptor.
+ * If the options field has:
+ * - LNET_MD_KIOV bit set: The start field points to the starting
+ * address of an array of lnet_kiov_t and the length field specifies
+ * the number of entries in the array. The length can't be bigger
+ * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based
+ * fragments that are not necessarily mapped in virtal memory.
+ * - LNET_MD_IOVEC bit set: The start field points to the starting
+ * address of an array of struct iovec and the length field specifies
+ * the number of entries in the array. The length can't be bigger
+ * than LNET_MAX_IOV. The struct iovec is used to describe fragments
+ * that have virtual addresses.
+ * - Otherwise: The memory region is contiguous. The start field
+ * specifies the starting address for the memory region and the
+ * length field specifies its length.
+ *
+ * When the memory region is fragmented, all fragments but the first
+ * one must start on page boundary, and all but the last must end on
+ * page boundary.
+ */
+ void *start;
+ unsigned int length;
+ /**
+ * Specifies the maximum number of operations that can be performed
+ * on the memory descriptor. An operation is any action that could
+ * possibly generate an event. In the usual case, the threshold value
+ * is decremented for each operation on the MD. When the threshold
+ * drops to zero, the MD becomes inactive and does not respond to
+ * operations. A threshold value of LNET_MD_THRESH_INF indicates that
+ * there is no bound on the number of operations that may be applied
+ * to a MD.
+ */
+ int threshold;
+ /**
+ * Specifies the largest incoming request that the memory descriptor
+ * should respond to. When the unused portion of a MD (length -
+ * local offset) falls below this value, the MD becomes inactive and
+ * does not respond to further operations. This value is only used
+ * if the LNET_MD_MAX_SIZE option is set.
+ */
+ int max_size;
+ /**
+ * Specifies the behavior of the memory descriptor. A bitwise OR
+ * of the following values can be used:
+ * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD.
+ * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD.
+ * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory
+ * region is provided by the incoming request. By default, the
+ * offset is maintained locally. When maintained locally, the
+ * offset is incremented by the length of the request so that
+ * the next operation (PUT or GET) will access the next part of
+ * the memory region. Note that only one offset variable exists
+ * per memory descriptor. If both PUT and GET operations are
+ * performed on a memory descriptor, the offset is updated each time.
+ * - LNET_MD_TRUNCATE: The length provided in the incoming request can
+ * be reduced to match the memory available in the region (determined
+ * by subtracting the offset from the length of the memory region).
+ * By default, if the length in the incoming operation is greater
+ * than the amount of memory available, the operation is rejected.
+ * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for
+ * incoming PUT operations, even if requested. By default,
+ * acknowledgments are sent for PUT operations that request an
+ * acknowledgment. Acknowledgments are never sent for GET operations.
+ * The data sent in the REPLY serves as an implicit acknowledgment.
+ * - LNET_MD_KIOV: The start and length fields specify an array of
+ * lnet_kiov_t.
+ * - LNET_MD_IOVEC: The start and length fields specify an array of
+ * struct iovec.
+ * - LNET_MD_MAX_SIZE: The max_size field is valid.
+ *
+ * Note:
+ * - LNET_MD_KIOV or LNET_MD_IOVEC allows for a scatter/gather
+ * capability for memory descriptors. They can't be both set.
+ * - When LNET_MD_MAX_SIZE is set, the total length of the memory
+ * region (i.e. sum of all fragment lengths) must not be less than
+ * \a max_size.
+ */
+ unsigned int options;
+ /**
+ * A user-specified value that is associated with the memory
+ * descriptor. The value does not need to be a pointer, but must fit
+ * in the space used by a pointer. This value is recorded in events
+ * associated with operations on this MD.
+ */
+ void *user_ptr;
+ /**
+ * A handle for the event queue used to log the operations performed on
+ * the memory region. If this argument is a NULL handle (i.e. nullified
+ * by LNetInvalidateHandle()), operations performed on this memory
+ * descriptor are not logged.
+ */
+ lnet_handle_eq_t eq_handle;
+} lnet_md_t;
+
+/* Max Transfer Unit (minimum supported everywhere).
+ * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
+ * these limits are system wide and not interface-local. */
+#define LNET_MTU_BITS 20
+#define LNET_MTU (1 << LNET_MTU_BITS)
+
+/** limit on the number of fragments in discontiguous MDs */
+#define LNET_MAX_IOV 256
+
+/* Max payload size */
+# define LNET_MAX_PAYLOAD CONFIG_LNET_MAX_PAYLOAD
+# if (LNET_MAX_PAYLOAD < LNET_MTU)
+# error "LNET_MAX_PAYLOAD too small - error in configure --with-max-payload-mb"
+# else
+# if (LNET_MAX_PAYLOAD > (PAGE_SIZE * LNET_MAX_IOV))
+/* PAGE_SIZE is a constant: check with cpp! */
+# error "LNET_MAX_PAYLOAD too large - error in configure --with-max-payload-mb"
+# endif
+# endif
+
+/**
+ * Options for the MD structure. See lnet_md_t::options.
+ */
+#define LNET_MD_OP_PUT (1 << 0)
+/** See lnet_md_t::options. */
+#define LNET_MD_OP_GET (1 << 1)
+/** See lnet_md_t::options. */
+#define LNET_MD_MANAGE_REMOTE (1 << 2)
+/* unused (1 << 3) */
+/** See lnet_md_t::options. */
+#define LNET_MD_TRUNCATE (1 << 4)
+/** See lnet_md_t::options. */
+#define LNET_MD_ACK_DISABLE (1 << 5)
+/** See lnet_md_t::options. */
+#define LNET_MD_IOVEC (1 << 6)
+/** See lnet_md_t::options. */
+#define LNET_MD_MAX_SIZE (1 << 7)
+/** See lnet_md_t::options. */
+#define LNET_MD_KIOV (1 << 8)
+
+/* For compatibility with Cray Portals */
+#define LNET_MD_PHYS 0
+
+/** Infinite threshold on MD operations. See lnet_md_t::threshold */
+#define LNET_MD_THRESH_INF (-1)
+
+/* NB lustre portals uses struct iovec internally! */
+typedef struct iovec lnet_md_iovec_t;
+
+/**
+ * A page-based fragment of a MD.
+ */
+typedef struct {
+ /** Pointer to the page where the fragment resides */
+ struct page *kiov_page;
+ /** Length in bytes of the fragment */
+ unsigned int kiov_len;
+ /**
+ * Starting offset of the fragment within the page. Note that the
+ * end of the fragment must not pass the end of the page; i.e.,
+ * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+ */
+ unsigned int kiov_offset;
+} lnet_kiov_t;
+/** @} lnet_md */
+
+/** \addtogroup lnet_eq
+ * @{ */
+
+/**
+ * Six types of events can be logged in an event queue.
+ */
+typedef enum {
+ /** An incoming GET operation has completed on the MD. */
+ LNET_EVENT_GET = 1,
+ /**
+ * An incoming PUT operation has completed on the MD. The
+ * underlying layers will not alter the memory (on behalf of this
+ * operation) once this event has been logged.
+ */
+ LNET_EVENT_PUT,
+ /**
+ * A REPLY operation has completed. This event is logged after the
+ * data (if any) from the REPLY has been written into the MD.
+ */
+ LNET_EVENT_REPLY,
+ /** An acknowledgment has been received. */
+ LNET_EVENT_ACK,
+ /**
+ * An outgoing send (PUT or GET) operation has completed. This event
+ * is logged after the entire buffer has been sent and it is safe for
+ * the caller to reuse the buffer.
+ *
+ * Note:
+ * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can
+ * happen even when the message has not yet been put out on wire.
+ * - It's unsafe to assume that in an outgoing GET operation
+ * the LNET_EVENT_SEND event would happen before the
+ * LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and
+ * LNET_EVENT_ACK events in an outgoing PUT operation.
+ */
+ LNET_EVENT_SEND,
+ /**
+ * A MD has been unlinked. Note that LNetMDUnlink() does not
+ * necessarily trigger an LNET_EVENT_UNLINK event.
+ * \see LNetMDUnlink
+ */
+ LNET_EVENT_UNLINK,
+} lnet_event_kind_t;
+
+#define LNET_SEQ_BASETYPE long
+typedef unsigned LNET_SEQ_BASETYPE lnet_seq_t;
+#define LNET_SEQ_GT(a,b) (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0)
+
+/* XXX
+ * cygwin need the pragma line, not clear if it's needed in other places.
+ * checking!!!
+ */
+#ifdef __CYGWIN__
+#pragma pack(push, 4)
+#endif
+
+/**
+ * Information about an event on a MD.
+ */
+typedef struct {
+ /** The identifier (nid, pid) of the target. */
+ lnet_process_id_t target;
+ /** The identifier (nid, pid) of the initiator. */
+ lnet_process_id_t initiator;
+ /**
+ * The NID of the immediate sender. If the request has been forwarded
+ * by routers, this is the NID of the last hop; otherwise it's the
+ * same as the initiator.
+ */
+ lnet_nid_t sender;
+ /** Indicates the type of the event. */
+ lnet_event_kind_t type;
+ /** The portal table index specified in the request */
+ unsigned int pt_index;
+ /** A copy of the match bits specified in the request. */
+ __u64 match_bits;
+ /** The length (in bytes) specified in the request. */
+ unsigned int rlength;
+ /**
+ * The length (in bytes) of the data that was manipulated by the
+ * operation. For truncated operations, the manipulated length will be
+ * the number of bytes specified by the MD (possibly with an offset,
+ * see lnet_md_t). For all other operations, the manipulated length
+ * will be the length of the requested operation, i.e. rlength.
+ */
+ unsigned int mlength;
+ /**
+ * The handle to the MD associated with the event. The handle may be
+ * invalid if the MD has been unlinked.
+ */
+ lnet_handle_md_t md_handle;
+ /**
+ * A snapshot of the state of the MD immediately after the event has
+ * been processed. In particular, the threshold field in md will
+ * reflect the value of the threshold after the operation occurred.
+ */
+ lnet_md_t md;
+ /**
+ * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT.
+ * \see LNetPut
+ */
+ __u64 hdr_data;
+ /**
+ * Indicates the completion status of the operation. It's 0 for
+ * successful operations, otherwise it's an error code.
+ */
+ int status;
+ /**
+ * Indicates whether the MD has been unlinked. Note that:
+ * - An event with unlinked set is the last event on the MD.
+ * - This field is also set for an explicit LNET_EVENT_UNLINK event.
+ * \see LNetMDUnlink
+ */
+ int unlinked;
+ /**
+ * The displacement (in bytes) into the memory region that the
+ * operation used. The offset can be determined by the operation for
+ * a remote managed MD or by the local MD.
+ * \see lnet_md_t::options
+ */
+ unsigned int offset;
+ /**
+ * The sequence number for this event. Sequence numbers are unique
+ * to each event.
+ */
+ volatile lnet_seq_t sequence;
+} lnet_event_t;
+#ifdef __CYGWIN__
+#pragma pop
+#endif
+
+/**
+ * Event queue handler function type.
+ *
+ * The EQ handler runs for each event that is deposited into the EQ. The
+ * handler is supplied with a pointer to the event that triggered the
+ * handler invocation.
+ *
+ * The handler must not block, must be reentrant, and must not call any LNet
+ * API functions. It should return as quickly as possible.
+ */
+typedef void (*lnet_eq_handler_t)(lnet_event_t *event);
+#define LNET_EQ_HANDLER_NONE NULL
+/** @} lnet_eq */
+
+/** \addtogroup lnet_data
+ * @{ */
+
+/**
+ * Specify whether an acknowledgment should be sent by target when the PUT
+ * operation completes (i.e., when the data has been written to a MD of the
+ * target process).
+ *
+ * \see lnet_md_t::options for the discussion on LNET_MD_ACK_DISABLE by which
+ * acknowledgments can be disabled for a MD.
+ */
+typedef enum {
+ /** Request an acknowledgment */
+ LNET_ACK_REQ,
+ /** Request that no acknowledgment should be generated. */
+ LNET_NOACK_REQ
+} lnet_ack_req_t;
+/** @} lnet_data */
+
+/** @} lnet */
+#endif
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
new file mode 100644
index 0000000..00850ee
--- /dev/null
+++ b/drivers/staging/lustre/lnet/Kconfig
@@ -0,0 +1,40 @@
+config LNET
+ tristate "Lustre networking subsystem"
+ depends on LUSTRE_FS
+
+config LNET_MAX_PAYLOAD
+ int "Lustre lnet max transfer payload (default 2MB)"
+ depends on LUSTRE_FS
+ default "1048576"
+ help
+ This option defines the maximum size of payload in bytes that lnet
+ can put into its transport.
+
+ If unsure, use default.
+
+config LNET_SELFTEST
+ tristate "Lustre networking self testing"
+ depends on LNET
+ help
+ Choose Y here if you want to do lnet self testing. To compile this
+ as a module, choose M here: the module will be called lnet_selftest.
+
+ To compile this as a kernel modules, choose M here and it will be
+ called lnet_selftest.
+
+ If unsure, say N.
+
+ See also http://wiki.lustre.org/
+
+config LNET_XPRT_IB
+ tristate "LNET infiniband support"
+ depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS
+ default LNET && INFINIBAND
+ help
+ This option allows the LNET users to use infiniband as an
+ RDMA-enabled transport.
+
+ To compile this as a kernel module, choose M here and it will be
+ called ko2iblnd.
+
+ If unsure, say N.
diff --git a/drivers/staging/lustre/lnet/Makefile b/drivers/staging/lustre/lnet/Makefile
new file mode 100644
index 0000000..f6f03e3
--- /dev/null
+++ b/drivers/staging/lustre/lnet/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LNET) += lnet/ klnds/ selftest/
diff --git a/drivers/staging/lustre/lnet/klnds/Makefile b/drivers/staging/lustre/lnet/klnds/Makefile
new file mode 100644
index 0000000..c23e4f6
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_LNET) += o2iblnd/ socklnd/
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile b/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile
new file mode 100644
index 0000000..71b7d84
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_LNET_XPRT_IB) += ko2iblnd.o
+ko2iblnd-y := o2iblnd.o o2iblnd_cb.o o2iblnd_modparams.o
+
+
+ccflags-y := -I$(src)/../../include
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
new file mode 100644
index 0000000..86397f9
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -0,0 +1,3261 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/klnds/o2iblnd/o2iblnd.c
+ *
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ */
+
+#include "o2iblnd.h"
+#include <asm/div64.h>
+
+lnd_t the_o2iblnd = {
+ .lnd_type = O2IBLND,
+ .lnd_startup = kiblnd_startup,
+ .lnd_shutdown = kiblnd_shutdown,
+ .lnd_ctl = kiblnd_ctl,
+ .lnd_query = kiblnd_query,
+ .lnd_send = kiblnd_send,
+ .lnd_recv = kiblnd_recv,
+};
+
+kib_data_t kiblnd_data;
+
+__u32
+kiblnd_cksum (void *ptr, int nob)
+{
+ char *c = ptr;
+ __u32 sum = 0;
+
+ while (nob-- > 0)
+ sum = ((sum << 1) | (sum >> 31)) + *c++;
+
+ /* ensure I don't return 0 (== no checksum) */
+ return (sum == 0) ? 1 : sum;
+}
+
+static char *
+kiblnd_msgtype2str(int type)
+{
+ switch (type) {
+ case IBLND_MSG_CONNREQ:
+ return "CONNREQ";
+
+ case IBLND_MSG_CONNACK:
+ return "CONNACK";
+
+ case IBLND_MSG_NOOP:
+ return "NOOP";
+
+ case IBLND_MSG_IMMEDIATE:
+ return "IMMEDIATE";
+
+ case IBLND_MSG_PUT_REQ:
+ return "PUT_REQ";
+
+ case IBLND_MSG_PUT_NAK:
+ return "PUT_NAK";
+
+ case IBLND_MSG_PUT_ACK:
+ return "PUT_ACK";
+
+ case IBLND_MSG_PUT_DONE:
+ return "PUT_DONE";
+
+ case IBLND_MSG_GET_REQ:
+ return "GET_REQ";
+
+ case IBLND_MSG_GET_DONE:
+ return "GET_DONE";
+
+ default:
+ return "???";
+ }
+}
+
+static int
+kiblnd_msgtype2size(int type)
+{
+ const int hdr_size = offsetof(kib_msg_t, ibm_u);
+
+ switch (type) {
+ case IBLND_MSG_CONNREQ:
+ case IBLND_MSG_CONNACK:
+ return hdr_size + sizeof(kib_connparams_t);
+
+ case IBLND_MSG_NOOP:
+ return hdr_size;
+
+ case IBLND_MSG_IMMEDIATE:
+ return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
+
+ case IBLND_MSG_PUT_REQ:
+ return hdr_size + sizeof(kib_putreq_msg_t);
+
+ case IBLND_MSG_PUT_ACK:
+ return hdr_size + sizeof(kib_putack_msg_t);
+
+ case IBLND_MSG_GET_REQ:
+ return hdr_size + sizeof(kib_get_msg_t);
+
+ case IBLND_MSG_PUT_NAK:
+ case IBLND_MSG_PUT_DONE:
+ case IBLND_MSG_GET_DONE:
+ return hdr_size + sizeof(kib_completion_msg_t);
+ default:
+ return -1;
+ }
+}
+
+static int
+kiblnd_unpack_rd(kib_msg_t *msg, int flip)
+{
+ kib_rdma_desc_t *rd;
+ int nob;
+ int n;
+ int i;
+
+ LASSERT (msg->ibm_type == IBLND_MSG_GET_REQ ||
+ msg->ibm_type == IBLND_MSG_PUT_ACK);
+
+ rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
+ &msg->ibm_u.get.ibgm_rd :
+ &msg->ibm_u.putack.ibpam_rd;
+
+ if (flip) {
+ __swab32s(&rd->rd_key);
+ __swab32s(&rd->rd_nfrags);
+ }
+
+ n = rd->rd_nfrags;
+
+ if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
+ CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
+ n, IBLND_MAX_RDMA_FRAGS);
+ return 1;
+ }
+
+ nob = offsetof (kib_msg_t, ibm_u) +
+ kiblnd_rd_msg_size(rd, msg->ibm_type, n);
+
+ if (msg->ibm_nob < nob) {
+ CERROR("Short %s: %d(%d)\n",
+ kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
+ return 1;
+ }
+
+ if (!flip)
+ return 0;
+
+ for (i = 0; i < n; i++) {
+ __swab32s(&rd->rd_frags[i].rf_nob);
+ __swab64s(&rd->rd_frags[i].rf_addr);
+ }
+
+ return 0;
+}
+
+void
+kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
+ int credits, lnet_nid_t dstnid, __u64 dststamp)
+{
+ kib_net_t *net = ni->ni_data;
+
+ /* CAVEAT EMPTOR! all message fields not set here should have been
+ * initialised previously. */
+ msg->ibm_magic = IBLND_MSG_MAGIC;
+ msg->ibm_version = version;
+ /* ibm_type */
+ msg->ibm_credits = credits;
+ /* ibm_nob */
+ msg->ibm_cksum = 0;
+ msg->ibm_srcnid = ni->ni_nid;
+ msg->ibm_srcstamp = net->ibn_incarnation;
+ msg->ibm_dstnid = dstnid;
+ msg->ibm_dststamp = dststamp;
+
+ if (*kiblnd_tunables.kib_cksum) {
+ /* NB ibm_cksum zero while computing cksum */
+ msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
+ }
+}
+
+int
+kiblnd_unpack_msg(kib_msg_t *msg, int nob)
+{
+ const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ __u32 msg_cksum;
+ __u16 version;
+ int msg_nob;
+ int flip;
+
+ /* 6 bytes are enough to have received magic + version */
+ if (nob < 6) {
+ CERROR("Short message: %d\n", nob);
+ return -EPROTO;
+ }
+
+ if (msg->ibm_magic == IBLND_MSG_MAGIC) {
+ flip = 0;
+ } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
+ flip = 1;
+ } else {
+ CERROR("Bad magic: %08x\n", msg->ibm_magic);
+ return -EPROTO;
+ }
+
+ version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
+ if (version != IBLND_MSG_VERSION &&
+ version != IBLND_MSG_VERSION_1) {
+ CERROR("Bad version: %x\n", version);
+ return -EPROTO;
+ }
+
+ if (nob < hdr_size) {
+ CERROR("Short message: %d\n", nob);
+ return -EPROTO;
+ }
+
+ msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
+ if (msg_nob > nob) {
+ CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
+ return -EPROTO;
+ }
+
+ /* checksum must be computed with ibm_cksum zero and BEFORE anything
+ * gets flipped */
+ msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
+ msg->ibm_cksum = 0;
+ if (msg_cksum != 0 &&
+ msg_cksum != kiblnd_cksum(msg, msg_nob)) {
+ CERROR("Bad checksum\n");
+ return -EPROTO;
+ }
+
+ msg->ibm_cksum = msg_cksum;
+
+ if (flip) {
+ /* leave magic unflipped as a clue to peer endianness */
+ msg->ibm_version = version;
+ CLASSERT (sizeof(msg->ibm_type) == 1);
+ CLASSERT (sizeof(msg->ibm_credits) == 1);
+ msg->ibm_nob = msg_nob;
+ __swab64s(&msg->ibm_srcnid);
+ __swab64s(&msg->ibm_srcstamp);
+ __swab64s(&msg->ibm_dstnid);
+ __swab64s(&msg->ibm_dststamp);
+ }
+
+ if (msg->ibm_srcnid == LNET_NID_ANY) {
+ CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
+ return -EPROTO;
+ }
+
+ if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
+ CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
+ msg_nob, kiblnd_msgtype2size(msg->ibm_type));
+ return -EPROTO;
+ }
+
+ switch (msg->ibm_type) {
+ default:
+ CERROR("Unknown message type %x\n", msg->ibm_type);
+ return -EPROTO;
+
+ case IBLND_MSG_NOOP:
+ case IBLND_MSG_IMMEDIATE:
+ case IBLND_MSG_PUT_REQ:
+ break;
+
+ case IBLND_MSG_PUT_ACK:
+ case IBLND_MSG_GET_REQ:
+ if (kiblnd_unpack_rd(msg, flip))
+ return -EPROTO;
+ break;
+
+ case IBLND_MSG_PUT_NAK:
+ case IBLND_MSG_PUT_DONE:
+ case IBLND_MSG_GET_DONE:
+ if (flip)
+ __swab32s(&msg->ibm_u.completion.ibcm_status);
+ break;
+
+ case IBLND_MSG_CONNREQ:
+ case IBLND_MSG_CONNACK:
+ if (flip) {
+ __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
+ __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
+ __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
+ }
+ break;
+ }
+ return 0;
+}
+
+int
+kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
+{
+ kib_peer_t *peer;
+ kib_net_t *net = ni->ni_data;
+ int cpt = lnet_cpt_of_nid(nid);
+ unsigned long flags;
+
+ LASSERT(net != NULL);
+ LASSERT(nid != LNET_NID_ANY);
+
+ LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
+ if (peer == NULL) {
+ CERROR("Cannot allocate peer\n");
+ return -ENOMEM;
+ }
+
+ memset(peer, 0, sizeof(*peer)); /* zero flags etc */
+
+ peer->ibp_ni = ni;
+ peer->ibp_nid = nid;
+ peer->ibp_error = 0;
+ peer->ibp_last_alive = 0;
+ atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
+
+ INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
+ INIT_LIST_HEAD(&peer->ibp_conns);
+ INIT_LIST_HEAD(&peer->ibp_tx_queue);
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ /* always called with a ref on ni, which prevents ni being shutdown */
+ LASSERT (net->ibn_shutdown == 0);
+
+ /* npeers only grows with the global lock held */
+ atomic_inc(&net->ibn_npeers);
+
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ *peerp = peer;
+ return 0;
+}
+
+void
+kiblnd_destroy_peer (kib_peer_t *peer)
+{
+ kib_net_t *net = peer->ibp_ni->ni_data;
+
+ LASSERT (net != NULL);
+ LASSERT (atomic_read(&peer->ibp_refcount) == 0);
+ LASSERT (!kiblnd_peer_active(peer));
+ LASSERT (peer->ibp_connecting == 0);
+ LASSERT (peer->ibp_accepting == 0);
+ LASSERT (list_empty(&peer->ibp_conns));
+ LASSERT (list_empty(&peer->ibp_tx_queue));
+
+ LIBCFS_FREE(peer, sizeof(*peer));
+
+ /* NB a peer's connections keep a reference on their peer until
+ * they are destroyed, so we can be assured that _all_ state to do
+ * with this peer has been cleaned up when its refcount drops to
+ * zero. */
+ atomic_dec(&net->ibn_npeers);
+}
+
+kib_peer_t *
+kiblnd_find_peer_locked (lnet_nid_t nid)
+{
+ /* the caller is responsible for accounting the additional reference
+ * that this creates */
+ struct list_head *peer_list = kiblnd_nid2peerlist(nid);
+ struct list_head *tmp;
+ kib_peer_t *peer;
+
+ list_for_each (tmp, peer_list) {
+
+ peer = list_entry(tmp, kib_peer_t, ibp_list);
+
+ LASSERT (peer->ibp_connecting > 0 || /* creating conns */
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns)); /* active conn */
+
+ if (peer->ibp_nid != nid)
+ continue;
+
+ CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
+ peer, libcfs_nid2str(nid),
+ atomic_read(&peer->ibp_refcount),
+ peer->ibp_version);
+ return peer;
+ }
+ return NULL;
+}
+
+void
+kiblnd_unlink_peer_locked (kib_peer_t *peer)
+{
+ LASSERT (list_empty(&peer->ibp_conns));
+
+ LASSERT (kiblnd_peer_active(peer));
+ list_del_init(&peer->ibp_list);
+ /* lose peerlist's ref */
+ kiblnd_peer_decref(peer);
+}
+
+int
+kiblnd_get_peer_info (lnet_ni_t *ni, int index,
+ lnet_nid_t *nidp, int *count)
+{
+ kib_peer_t *peer;
+ struct list_head *ptmp;
+ int i;
+ unsigned long flags;
+
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
+
+ list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT (peer->ibp_connecting > 0 ||
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns));
+
+ if (peer->ibp_ni != ni)
+ continue;
+
+ if (index-- > 0)
+ continue;
+
+ *nidp = peer->ibp_nid;
+ *count = atomic_read(&peer->ibp_refcount);
+
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
+ return 0;
+ }
+ }
+
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ return -ENOENT;
+}
+
+void
+kiblnd_del_peer_locked (kib_peer_t *peer)
+{
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ kib_conn_t *conn;
+
+ if (list_empty(&peer->ibp_conns)) {
+ kiblnd_unlink_peer_locked(peer);
+ } else {
+ list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
+
+ kiblnd_close_conn_locked(conn, 0);
+ }
+ /* NB closing peer's last conn unlinked it. */
+ }
+ /* NB peer now unlinked; might even be freed if the peer table had the
+ * last ref on it. */
+}
+
+int
+kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
+{
+ LIST_HEAD (zombies);
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ kib_peer_t *peer;
+ int lo;
+ int hi;
+ int i;
+ unsigned long flags;
+ int rc = -ENOENT;
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ if (nid != LNET_NID_ANY) {
+ lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ } else {
+ lo = 0;
+ hi = kiblnd_data.kib_peer_hash_size - 1;
+ }
+
+ for (i = lo; i <= hi; i++) {
+ list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT (peer->ibp_connecting > 0 ||
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns));
+
+ if (peer->ibp_ni != ni)
+ continue;
+
+ if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
+ continue;
+
+ if (!list_empty(&peer->ibp_tx_queue)) {
+ LASSERT (list_empty(&peer->ibp_conns));
+
+ list_splice_init(&peer->ibp_tx_queue,
+ &zombies);
+ }
+
+ kiblnd_del_peer_locked(peer);
+ rc = 0; /* matched something */
+ }
+ }
+
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ kiblnd_txlist_done(ni, &zombies, -EIO);
+
+ return rc;
+}
+
+kib_conn_t *
+kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
+{
+ kib_peer_t *peer;
+ struct list_head *ptmp;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ int i;
+ unsigned long flags;
+
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
+ list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT (peer->ibp_connecting > 0 ||
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns));
+
+ if (peer->ibp_ni != ni)
+ continue;
+
+ list_for_each (ctmp, &peer->ibp_conns) {
+ if (index-- > 0)
+ continue;
+
+ conn = list_entry(ctmp, kib_conn_t,
+ ibc_list);
+ kiblnd_conn_addref(conn);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
+ return conn;
+ }
+ }
+ }
+
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ return NULL;
+}
+
+void
+kiblnd_debug_rx (kib_rx_t *rx)
+{
+ CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n",
+ rx, rx->rx_status, rx->rx_msg->ibm_type,
+ rx->rx_msg->ibm_credits);
+}
+
+void
+kiblnd_debug_tx (kib_tx_t *tx)
+{
+ CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
+ "cookie "LPX64" msg %s%s type %x cred %d\n",
+ tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
+ tx->tx_status, tx->tx_deadline, tx->tx_cookie,
+ tx->tx_lntmsg[0] == NULL ? "-" : "!",
+ tx->tx_lntmsg[1] == NULL ? "-" : "!",
+ tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
+}
+
+void
+kiblnd_debug_conn (kib_conn_t *conn)
+{
+ struct list_head *tmp;
+ int i;
+
+ spin_lock(&conn->ibc_lock);
+
+ CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
+ atomic_read(&conn->ibc_refcount), conn,
+ conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
+ conn->ibc_state, conn->ibc_noops_posted,
+ conn->ibc_nsends_posted, conn->ibc_credits,
+ conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
+ CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
+
+ CDEBUG(D_CONSOLE, " early_rxs:\n");
+ list_for_each(tmp, &conn->ibc_early_rxs)
+ kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
+
+ CDEBUG(D_CONSOLE, " tx_noops:\n");
+ list_for_each(tmp, &conn->ibc_tx_noops)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+
+ CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
+ list_for_each(tmp, &conn->ibc_tx_queue_nocred)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+
+ CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
+ list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+
+ CDEBUG(D_CONSOLE, " tx_queue:\n");
+ list_for_each(tmp, &conn->ibc_tx_queue)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+
+ CDEBUG(D_CONSOLE, " active_txs:\n");
+ list_for_each(tmp, &conn->ibc_active_txs)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+
+ CDEBUG(D_CONSOLE, " rxs:\n");
+ for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
+ kiblnd_debug_rx(&conn->ibc_rxs[i]);
+
+ spin_unlock(&conn->ibc_lock);
+}
+
+int
+kiblnd_translate_mtu(int value)
+{
+ switch (value) {
+ default:
+ return -1;
+ case 0:
+ return 0;
+ case 256:
+ return IB_MTU_256;
+ case 512:
+ return IB_MTU_512;
+ case 1024:
+ return IB_MTU_1024;
+ case 2048:
+ return IB_MTU_2048;
+ case 4096:
+ return IB_MTU_4096;
+ }
+}
+
+static void
+kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
+{
+ int mtu;
+
+ /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
+ if (cmid->route.path_rec == NULL)
+ return;
+
+ mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
+ LASSERT (mtu >= 0);
+ if (mtu != 0)
+ cmid->route.path_rec->mtu = mtu;
+}
+
+static int
+kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
+{
+ cpumask_t *mask;
+ int vectors;
+ int off;
+ int i;
+ lnet_nid_t nid = conn->ibc_peer->ibp_nid;
+
+ vectors = conn->ibc_cmid->device->num_comp_vectors;
+ if (vectors <= 1)
+ return 0;
+
+ mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
+ if (mask == NULL)
+ return 0;
+
+ /* hash NID to CPU id in this partition... */
+ off = do_div(nid, cpus_weight(*mask));
+ for_each_cpu_mask(i, *mask) {
+ if (off-- == 0)
+ return i % vectors;
+ }
+
+ LBUG();
+ return 1;
+}
+
+kib_conn_t *
+kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+ int state, int version)
+{
+ /* CAVEAT EMPTOR:
+ * If the new conn is created successfully it takes over the caller's
+ * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
+ * is destroyed. On failure, the caller's ref on 'peer' remains and
+ * she must dispose of 'cmid'. (Actually I'd block forever if I tried
+ * to destroy 'cmid' here since I'm called from the CM which still has
+ * its ref on 'cmid'). */
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_dev_t *dev;
+ struct ib_qp_init_attr *init_qp_attr;
+ struct kib_sched_info *sched;
+ kib_conn_t *conn;
+ struct ib_cq *cq;
+ unsigned long flags;
+ int cpt;
+ int rc;
+ int i;
+
+ LASSERT(net != NULL);
+ LASSERT(!in_interrupt());
+
+ dev = net->ibn_dev;
+
+ cpt = lnet_cpt_of_nid(peer->ibp_nid);
+ sched = kiblnd_data.kib_scheds[cpt];
+
+ LASSERT(sched->ibs_nthreads > 0);
+
+ LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
+ sizeof(*init_qp_attr));
+ if (init_qp_attr == NULL) {
+ CERROR("Can't allocate qp_attr for %s\n",
+ libcfs_nid2str(peer->ibp_nid));
+ goto failed_0;
+ }
+
+ LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
+ if (conn == NULL) {
+ CERROR("Can't allocate connection for %s\n",
+ libcfs_nid2str(peer->ibp_nid));
+ goto failed_1;
+ }
+
+ conn->ibc_state = IBLND_CONN_INIT;
+ conn->ibc_version = version;
+ conn->ibc_peer = peer; /* I take the caller's ref */
+ cmid->context = conn; /* for future CM callbacks */
+ conn->ibc_cmid = cmid;
+
+ INIT_LIST_HEAD(&conn->ibc_early_rxs);
+ INIT_LIST_HEAD(&conn->ibc_tx_noops);
+ INIT_LIST_HEAD(&conn->ibc_tx_queue);
+ INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
+ INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
+ INIT_LIST_HEAD(&conn->ibc_active_txs);
+ spin_lock_init(&conn->ibc_lock);
+
+ LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
+ sizeof(*conn->ibc_connvars));
+ if (conn->ibc_connvars == NULL) {
+ CERROR("Can't allocate in-progress connection state\n");
+ goto failed_2;
+ }
+
+ write_lock_irqsave(glock, flags);
+ if (dev->ibd_failover) {
+ write_unlock_irqrestore(glock, flags);
+ CERROR("%s: failover in progress\n", dev->ibd_ifname);
+ goto failed_2;
+ }
+
+ if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
+ /* wakeup failover thread and teardown connection */
+ if (kiblnd_dev_can_failover(dev)) {
+ list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ wake_up(&kiblnd_data.kib_failover_waitq);
+ }
+
+ write_unlock_irqrestore(glock, flags);
+ CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
+ cmid->device->name, dev->ibd_ifname);
+ goto failed_2;
+ }
+
+ kiblnd_hdev_addref_locked(dev->ibd_hdev);
+ conn->ibc_hdev = dev->ibd_hdev;
+
+ kiblnd_setup_mtu_locked(cmid);
+
+ write_unlock_irqrestore(glock, flags);
+
+ LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
+ IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
+ if (conn->ibc_rxs == NULL) {
+ CERROR("Cannot allocate RX buffers\n");
+ goto failed_2;
+ }
+
+ rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
+ IBLND_RX_MSG_PAGES(version));
+ if (rc != 0)
+ goto failed_2;
+
+ kiblnd_map_rx_descs(conn);
+
+ cq = ib_create_cq(cmid->device,
+ kiblnd_cq_completion, kiblnd_cq_event, conn,
+ IBLND_CQ_ENTRIES(version),
+ kiblnd_get_completion_vector(conn, cpt));
+ if (IS_ERR(cq)) {
+ CERROR("Can't create CQ: %ld, cqe: %d\n",
+ PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
+ goto failed_2;
+ }
+
+ conn->ibc_cq = cq;
+
+ rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ if (rc != 0) {
+ CERROR("Can't request completion notificiation: %d\n", rc);
+ goto failed_2;
+ }
+
+ init_qp_attr->event_handler = kiblnd_qp_event;
+ init_qp_attr->qp_context = conn;
+ init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
+ init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
+ init_qp_attr->cap.max_send_sge = 1;
+ init_qp_attr->cap.max_recv_sge = 1;
+ init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_qp_attr->qp_type = IB_QPT_RC;
+ init_qp_attr->send_cq = cq;
+ init_qp_attr->recv_cq = cq;
+
+ conn->ibc_sched = sched;
+
+ rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
+ if (rc != 0) {
+ CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
+ rc, init_qp_attr->cap.max_send_wr,
+ init_qp_attr->cap.max_recv_wr);
+ goto failed_2;
+ }
+
+ LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+
+ /* 1 ref for caller and each rxmsg */
+ atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
+ conn->ibc_nrx = IBLND_RX_MSGS(version);
+
+ /* post receives */
+ for (i = 0; i < IBLND_RX_MSGS(version); i++) {
+ rc = kiblnd_post_rx(&conn->ibc_rxs[i],
+ IBLND_POSTRX_NO_CREDIT);
+ if (rc != 0) {
+ CERROR("Can't post rxmsg: %d\n", rc);
+
+ /* Make posted receives complete */
+ kiblnd_abort_receives(conn);
+
+ /* correct # of posted buffers
+ * NB locking needed now I'm racing with completion */
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ /* cmid will be destroyed by CM(ofed) after cm_callback
+ * returned, so we can't refer it anymore
+ * (by kiblnd_connd()->kiblnd_destroy_conn) */
+ rdma_destroy_qp(conn->ibc_cmid);
+ conn->ibc_cmid = NULL;
+
+ /* Drop my own and unused rxbuffer refcounts */
+ while (i++ <= IBLND_RX_MSGS(version))
+ kiblnd_conn_decref(conn);
+
+ return NULL;
+ }
+ }
+
+ /* Init successful! */
+ LASSERT (state == IBLND_CONN_ACTIVE_CONNECT ||
+ state == IBLND_CONN_PASSIVE_WAIT);
+ conn->ibc_state = state;
+
+ /* 1 more conn */
+ atomic_inc(&net->ibn_nconns);
+ return conn;
+
+ failed_2:
+ kiblnd_destroy_conn(conn);
+ failed_1:
+ LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+ failed_0:
+ return NULL;
+}
+
+void
+kiblnd_destroy_conn (kib_conn_t *conn)
+{
+ struct rdma_cm_id *cmid = conn->ibc_cmid;
+ kib_peer_t *peer = conn->ibc_peer;
+ int rc;
+
+ LASSERT (!in_interrupt());
+ LASSERT (atomic_read(&conn->ibc_refcount) == 0);
+ LASSERT (list_empty(&conn->ibc_early_rxs));
+ LASSERT (list_empty(&conn->ibc_tx_noops));
+ LASSERT (list_empty(&conn->ibc_tx_queue));
+ LASSERT (list_empty(&conn->ibc_tx_queue_rsrvd));
+ LASSERT (list_empty(&conn->ibc_tx_queue_nocred));
+ LASSERT (list_empty(&conn->ibc_active_txs));
+ LASSERT (conn->ibc_noops_posted == 0);
+ LASSERT (conn->ibc_nsends_posted == 0);
+
+ switch (conn->ibc_state) {
+ default:
+ /* conn must be completely disengaged from the network */
+ LBUG();
+
+ case IBLND_CONN_DISCONNECTED:
+ /* connvars should have been freed already */
+ LASSERT (conn->ibc_connvars == NULL);
+ break;
+
+ case IBLND_CONN_INIT:
+ break;
+ }
+
+ /* conn->ibc_cmid might be destroyed by CM already */
+ if (cmid != NULL && cmid->qp != NULL)
+ rdma_destroy_qp(cmid);
+
+ if (conn->ibc_cq != NULL) {
+ rc = ib_destroy_cq(conn->ibc_cq);
+ if (rc != 0)
+ CWARN("Error destroying CQ: %d\n", rc);
+ }
+
+ if (conn->ibc_rx_pages != NULL)
+ kiblnd_unmap_rx_descs(conn);
+
+ if (conn->ibc_rxs != NULL) {
+ LIBCFS_FREE(conn->ibc_rxs,
+ IBLND_RX_MSGS(conn->ibc_version) * sizeof(kib_rx_t));
+ }
+
+ if (conn->ibc_connvars != NULL)
+ LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+
+ if (conn->ibc_hdev != NULL)
+ kiblnd_hdev_decref(conn->ibc_hdev);
+
+ /* See CAVEAT EMPTOR above in kiblnd_create_conn */
+ if (conn->ibc_state != IBLND_CONN_INIT) {
+ kib_net_t *net = peer->ibp_ni->ni_data;
+
+ kiblnd_peer_decref(peer);
+ rdma_destroy_id(cmid);
+ atomic_dec(&net->ibn_nconns);
+ }
+
+ LIBCFS_FREE(conn, sizeof(*conn));
+}
+
+int
+kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
+{
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int count = 0;
+
+ list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
+
+ CDEBUG(D_NET, "Closing conn -> %s, "
+ "version: %x, reason: %d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ conn->ibc_version, why);
+
+ kiblnd_close_conn_locked(conn, why);
+ count++;
+ }
+
+ return count;
+}
+
+int
+kiblnd_close_stale_conns_locked (kib_peer_t *peer,
+ int version, __u64 incarnation)
+{
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int count = 0;
+
+ list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
+
+ if (conn->ibc_version == version &&
+ conn->ibc_incarnation == incarnation)
+ continue;
+
+ CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
+ "incarnation:"LPX64"(%x, "LPX64")\n",
+ libcfs_nid2str(peer->ibp_nid),
+ conn->ibc_version, conn->ibc_incarnation,
+ version, incarnation);
+
+ kiblnd_close_conn_locked(conn, -ESTALE);
+ count++;
+ }
+
+ return count;
+}
+
+int
+kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
+{
+ kib_peer_t *peer;
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ int lo;
+ int hi;
+ int i;
+ unsigned long flags;
+ int count = 0;
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ if (nid != LNET_NID_ANY)
+ lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ else {
+ lo = 0;
+ hi = kiblnd_data.kib_peer_hash_size - 1;
+ }
+
+ for (i = lo; i <= hi; i++) {
+ list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT (peer->ibp_connecting > 0 ||
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns));
+
+ if (peer->ibp_ni != ni)
+ continue;
+
+ if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
+ continue;
+
+ count += kiblnd_close_peer_conns_locked(peer, 0);
+ }
+ }
+
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ /* wildcards always succeed */
+ if (nid == LNET_NID_ANY)
+ return 0;
+
+ return (count == 0) ? -ENOENT : 0;
+}
+
+int
+kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+{
+ struct libcfs_ioctl_data *data = arg;
+ int rc = -EINVAL;
+
+ switch(cmd) {
+ case IOC_LIBCFS_GET_PEER: {
+ lnet_nid_t nid = 0;
+ int count = 0;
+
+ rc = kiblnd_get_peer_info(ni, data->ioc_count,
+ &nid, &count);
+ data->ioc_nid = nid;
+ data->ioc_count = count;
+ break;
+ }
+
+ case IOC_LIBCFS_DEL_PEER: {
+ rc = kiblnd_del_peer(ni, data->ioc_nid);
+ break;
+ }
+ case IOC_LIBCFS_GET_CONN: {
+ kib_conn_t *conn;
+
+ rc = 0;
+ conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
+ if (conn == NULL) {
+ rc = -ENOENT;
+ break;
+ }
+
+ LASSERT (conn->ibc_cmid != NULL);
+ data->ioc_nid = conn->ibc_peer->ibp_nid;
+ if (conn->ibc_cmid->route.path_rec == NULL)
+ data->ioc_u32[0] = 0; /* iWarp has no path MTU */
+ else
+ data->ioc_u32[0] =
+ ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
+ kiblnd_conn_decref(conn);
+ break;
+ }
+ case IOC_LIBCFS_CLOSE_CONNECTION: {
+ rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+void
+kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
+{
+ cfs_time_t last_alive = 0;
+ cfs_time_t now = cfs_time_current();
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_peer_t *peer;
+ unsigned long flags;
+
+ read_lock_irqsave(glock, flags);
+
+ peer = kiblnd_find_peer_locked(nid);
+ if (peer != NULL) {
+ LASSERT (peer->ibp_connecting > 0 || /* creating conns */
+ peer->ibp_accepting > 0 ||
+ !list_empty(&peer->ibp_conns)); /* active conn */
+ last_alive = peer->ibp_last_alive;
+ }
+
+ read_unlock_irqrestore(glock, flags);
+
+ if (last_alive != 0)
+ *when = last_alive;
+
+ /* peer is not persistent in hash, trigger peer creation
+ * and connection establishment with a NULL tx */
+ if (peer == NULL)
+ kiblnd_launch_tx(ni, NULL, nid);
+
+ CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
+ libcfs_nid2str(nid), peer,
+ last_alive ? cfs_duration_sec(now - last_alive) : -1);
+ return;
+}
+
+void
+kiblnd_free_pages(kib_pages_t *p)
+{
+ int npages = p->ibp_npages;
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ if (p->ibp_pages[i] != NULL)
+ __free_page(p->ibp_pages[i]);
+ }
+
+ LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
+}
+
+int
+kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
+{
+ kib_pages_t *p;
+ int i;
+
+ LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
+ offsetof(kib_pages_t, ibp_pages[npages]));
+ if (p == NULL) {
+ CERROR("Can't allocate descriptor for %d pages\n", npages);
+ return -ENOMEM;
+ }
+
+ memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
+ p->ibp_npages = npages;
+
+ for (i = 0; i < npages; i++) {
+ p->ibp_pages[i] = alloc_pages_node(
+ cfs_cpt_spread_node(lnet_cpt_table(), cpt),
+ __GFP_IO, 0);
+ if (p->ibp_pages[i] == NULL) {
+ CERROR("Can't allocate page %d of %d\n", i, npages);
+ kiblnd_free_pages(p);
+ return -ENOMEM;
+ }
+ }
+
+ *pp = p;
+ return 0;
+}
+
+void
+kiblnd_unmap_rx_descs(kib_conn_t *conn)
+{
+ kib_rx_t *rx;
+ int i;
+
+ LASSERT (conn->ibc_rxs != NULL);
+ LASSERT (conn->ibc_hdev != NULL);
+
+ for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+ rx = &conn->ibc_rxs[i];
+
+ LASSERT (rx->rx_nob >= 0); /* not posted */
+
+ kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
+ KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
+ rx->rx_msgaddr),
+ IBLND_MSG_SIZE, DMA_FROM_DEVICE);
+ }
+
+ kiblnd_free_pages(conn->ibc_rx_pages);
+
+ conn->ibc_rx_pages = NULL;
+}
+
+void
+kiblnd_map_rx_descs(kib_conn_t *conn)
+{
+ kib_rx_t *rx;
+ struct page *pg;
+ int pg_off;
+ int ipg;
+ int i;
+
+ for (pg_off = ipg = i = 0;
+ i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+ pg = conn->ibc_rx_pages->ibp_pages[ipg];
+ rx = &conn->ibc_rxs[i];
+
+ rx->rx_conn = conn;
+ rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
+
+ rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
+ rx->rx_msg, IBLND_MSG_SIZE,
+ DMA_FROM_DEVICE);
+ LASSERT (!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
+ rx->rx_msgaddr));
+ KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
+
+ CDEBUG(D_NET,"rx %d: %p "LPX64"("LPX64")\n",
+ i, rx->rx_msg, rx->rx_msgaddr,
+ lnet_page2phys(pg) + pg_off);
+
+ pg_off += IBLND_MSG_SIZE;
+ LASSERT (pg_off <= PAGE_SIZE);
+
+ if (pg_off == PAGE_SIZE) {
+ pg_off = 0;
+ ipg++;
+ LASSERT (ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
+ }
+ }
+}
+
+static void
+kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
+{
+ kib_hca_dev_t *hdev = tpo->tpo_hdev;
+ kib_tx_t *tx;
+ int i;
+
+ LASSERT (tpo->tpo_pool.po_allocated == 0);
+
+ if (hdev == NULL)
+ return;
+
+ for (i = 0; i < tpo->tpo_pool.po_size; i++) {
+ tx = &tpo->tpo_tx_descs[i];
+ kiblnd_dma_unmap_single(hdev->ibh_ibdev,
+ KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
+ tx->tx_msgaddr),
+ IBLND_MSG_SIZE, DMA_TO_DEVICE);
+ }
+
+ kiblnd_hdev_decref(hdev);
+ tpo->tpo_hdev = NULL;
+}
+
+static kib_hca_dev_t *
+kiblnd_current_hdev(kib_dev_t *dev)
+{
+ kib_hca_dev_t *hdev;
+ unsigned long flags;
+ int i = 0;
+
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ while (dev->ibd_failover) {
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ if (i++ % 50 == 0)
+ CDEBUG(D_NET, "%s: Wait for failover\n",
+ dev->ibd_ifname);
+ schedule_timeout(cfs_time_seconds(1) / 100);
+
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ }
+
+ kiblnd_hdev_addref_locked(dev->ibd_hdev);
+ hdev = dev->ibd_hdev;
+
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ return hdev;
+}
+
+static void
+kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
+{
+ kib_pages_t *txpgs = tpo->tpo_tx_pages;
+ kib_pool_t *pool = &tpo->tpo_pool;
+ kib_net_t *net = pool->po_owner->ps_net;
+ kib_dev_t *dev;
+ struct page *page;
+ kib_tx_t *tx;
+ int page_offset;
+ int ipage;
+ int i;
+
+ LASSERT (net != NULL);
+
+ dev = net->ibn_dev;
+
+ /* pre-mapped messages are not bigger than 1 page */
+ CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
+
+ /* No fancy arithmetic when we do the buffer calculations */
+ CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
+
+ tpo->tpo_hdev = kiblnd_current_hdev(dev);
+
+ for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
+ page = txpgs->ibp_pages[ipage];
+ tx = &tpo->tpo_tx_descs[i];
+
+ tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
+ page_offset);
+
+ tx->tx_msgaddr = kiblnd_dma_map_single(
+ tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
+ IBLND_MSG_SIZE, DMA_TO_DEVICE);
+ LASSERT (!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
+ tx->tx_msgaddr));
+ KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
+
+ list_add(&tx->tx_list, &pool->po_free_list);
+
+ page_offset += IBLND_MSG_SIZE;
+ LASSERT (page_offset <= PAGE_SIZE);
+
+ if (page_offset == PAGE_SIZE) {
+ page_offset = 0;
+ ipage++;
+ LASSERT (ipage <= txpgs->ibp_npages);
+ }
+ }
+}
+
+struct ib_mr *
+kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
+{
+ __u64 index;
+
+ LASSERT (hdev->ibh_mrs[0] != NULL);
+
+ if (hdev->ibh_nmrs == 1)
+ return hdev->ibh_mrs[0];
+
+ index = addr >> hdev->ibh_mr_shift;
+
+ if (index < hdev->ibh_nmrs &&
+ index == ((addr + size - 1) >> hdev->ibh_mr_shift))
+ return hdev->ibh_mrs[index];
+
+ return NULL;
+}
+
+struct ib_mr *
+kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
+{
+ struct ib_mr *prev_mr;
+ struct ib_mr *mr;
+ int i;
+
+ LASSERT (hdev->ibh_mrs[0] != NULL);
+
+ if (*kiblnd_tunables.kib_map_on_demand > 0 &&
+ *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
+ return NULL;
+
+ if (hdev->ibh_nmrs == 1)
+ return hdev->ibh_mrs[0];
+
+ for (i = 0, mr = prev_mr = NULL;
+ i < rd->rd_nfrags; i++) {
+ mr = kiblnd_find_dma_mr(hdev,
+ rd->rd_frags[i].rf_addr,
+ rd->rd_frags[i].rf_nob);
+ if (prev_mr == NULL)
+ prev_mr = mr;
+
+ if (mr == NULL || prev_mr != mr) {
+ /* Can't covered by one single MR */
+ mr = NULL;
+ break;
+ }
+ }
+
+ return mr;
+}
+
+void
+kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
+{
+ LASSERT (pool->fpo_map_count == 0);
+
+ if (pool->fpo_fmr_pool != NULL)
+ ib_destroy_fmr_pool(pool->fpo_fmr_pool);
+
+ if (pool->fpo_hdev != NULL)
+ kiblnd_hdev_decref(pool->fpo_hdev);
+
+ LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
+}
+
+void
+kiblnd_destroy_fmr_pool_list(struct list_head *head)
+{
+ kib_fmr_pool_t *pool;
+
+ while (!list_empty(head)) {
+ pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
+ list_del(&pool->fpo_list);
+ kiblnd_destroy_fmr_pool(pool);
+ }
+}
+
+static int kiblnd_fmr_pool_size(int ncpts)
+{
+ int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
+
+ return max(IBLND_FMR_POOL, size);
+}
+
+static int kiblnd_fmr_flush_trigger(int ncpts)
+{
+ int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
+
+ return max(IBLND_FMR_POOL_FLUSH, size);
+}
+
+int
+kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
+{
+ /* FMR pool for RDMA */
+ kib_dev_t *dev = fps->fps_net->ibn_dev;
+ kib_fmr_pool_t *fpo;
+ struct ib_fmr_pool_param param = {
+ .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
+ .page_shift = PAGE_SHIFT,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
+ .pool_size = fps->fps_pool_size,
+ .dirty_watermark = fps->fps_flush_trigger,
+ .flush_function = NULL,
+ .flush_arg = NULL,
+ .cache = !!*kiblnd_tunables.kib_fmr_cache};
+ int rc;
+
+ LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
+ if (fpo == NULL)
+ return -ENOMEM;
+
+ fpo->fpo_hdev = kiblnd_current_hdev(dev);
+
+ fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param);
+ if (IS_ERR(fpo->fpo_fmr_pool)) {
+ rc = PTR_ERR(fpo->fpo_fmr_pool);
+ CERROR("Failed to create FMR pool: %d\n", rc);
+
+ kiblnd_hdev_decref(fpo->fpo_hdev);
+ LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
+ return rc;
+ }
+
+ fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_owner = fps;
+ *pp_fpo = fpo;
+
+ return 0;
+}
+
+static void
+kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies)
+{
+ if (fps->fps_net == NULL) /* intialized? */
+ return;
+
+ spin_lock(&fps->fps_lock);
+
+ while (!list_empty(&fps->fps_pool_list)) {
+ kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
+ kib_fmr_pool_t, fpo_list);
+ fpo->fpo_failed = 1;
+ list_del(&fpo->fpo_list);
+ if (fpo->fpo_map_count == 0)
+ list_add(&fpo->fpo_list, zombies);
+ else
+ list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
+ }
+
+ spin_unlock(&fps->fps_lock);
+}
+
+static void
+kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
+{
+ if (fps->fps_net != NULL) { /* initialized? */
+ kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
+ kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
+ }
+}
+
+static int
+kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net,
+ int pool_size, int flush_trigger)
+{
+ kib_fmr_pool_t *fpo;
+ int rc;
+
+ memset(fps, 0, sizeof(kib_fmr_poolset_t));
+
+ fps->fps_net = net;
+ fps->fps_cpt = cpt;
+ fps->fps_pool_size = pool_size;
+ fps->fps_flush_trigger = flush_trigger;
+ spin_lock_init(&fps->fps_lock);
+ INIT_LIST_HEAD(&fps->fps_pool_list);
+ INIT_LIST_HEAD(&fps->fps_failed_pool_list);
+
+ rc = kiblnd_create_fmr_pool(fps, &fpo);
+ if (rc == 0)
+ list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+
+ return rc;
+}
+
+static int
+kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, cfs_time_t now)
+{
+ if (fpo->fpo_map_count != 0) /* still in use */
+ return 0;
+ if (fpo->fpo_failed)
+ return 1;
+ return cfs_time_aftereq(now, fpo->fpo_deadline);
+}
+
+void
+kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
+{
+ LIST_HEAD (zombies);
+ kib_fmr_pool_t *fpo = fmr->fmr_pool;
+ kib_fmr_poolset_t *fps = fpo->fpo_owner;
+ cfs_time_t now = cfs_time_current();
+ kib_fmr_pool_t *tmp;
+ int rc;
+
+ rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
+ LASSERT (rc == 0);
+
+ if (status != 0) {
+ rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
+ LASSERT (rc == 0);
+ }
+
+ fmr->fmr_pool = NULL;
+ fmr->fmr_pfmr = NULL;
+
+ spin_lock(&fps->fps_lock);
+ fpo->fpo_map_count --; /* decref the pool */
+
+ list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
+ /* the first pool is persistent */
+ if (fps->fps_pool_list.next == &fpo->fpo_list)
+ continue;
+
+ if (kiblnd_fmr_pool_is_idle(fpo, now)) {
+ list_move(&fpo->fpo_list, &zombies);
+ fps->fps_version ++;
+ }
+ }
+ spin_unlock(&fps->fps_lock);
+
+ if (!list_empty(&zombies))
+ kiblnd_destroy_fmr_pool_list(&zombies);
+}
+
+int
+kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
+ __u64 iov, kib_fmr_t *fmr)
+{
+ struct ib_pool_fmr *pfmr;
+ kib_fmr_pool_t *fpo;
+ __u64 version;
+ int rc;
+
+ again:
+ spin_lock(&fps->fps_lock);
+ version = fps->fps_version;
+ list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
+ fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_map_count++;
+ spin_unlock(&fps->fps_lock);
+
+ pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
+ pages, npages, iov);
+ if (likely(!IS_ERR(pfmr))) {
+ fmr->fmr_pool = fpo;
+ fmr->fmr_pfmr = pfmr;
+ return 0;
+ }
+
+ spin_lock(&fps->fps_lock);
+ fpo->fpo_map_count--;
+ if (PTR_ERR(pfmr) != -EAGAIN) {
+ spin_unlock(&fps->fps_lock);
+ return PTR_ERR(pfmr);
+ }
+
+ /* EAGAIN and ... */
+ if (version != fps->fps_version) {
+ spin_unlock(&fps->fps_lock);
+ goto again;
+ }
+ }
+
+ if (fps->fps_increasing) {
+ spin_unlock(&fps->fps_lock);
+ CDEBUG(D_NET, "Another thread is allocating new "
+ "FMR pool, waiting for her to complete\n");
+ schedule();
+ goto again;
+
+ }
+
+ if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
+ /* someone failed recently */
+ spin_unlock(&fps->fps_lock);
+ return -EAGAIN;
+ }
+
+ fps->fps_increasing = 1;
+ spin_unlock(&fps->fps_lock);
+
+ CDEBUG(D_NET, "Allocate new FMR pool\n");
+ rc = kiblnd_create_fmr_pool(fps, &fpo);
+ spin_lock(&fps->fps_lock);
+ fps->fps_increasing = 0;
+ if (rc == 0) {
+ fps->fps_version++;
+ list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+ } else {
+ fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ }
+ spin_unlock(&fps->fps_lock);
+
+ goto again;
+}
+
+static void
+kiblnd_fini_pool(kib_pool_t *pool)
+{
+ LASSERT (list_empty(&pool->po_free_list));
+ LASSERT (pool->po_allocated == 0);
+
+ CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
+}
+
+static void
+kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
+{
+ CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
+
+ memset(pool, 0, sizeof(kib_pool_t));
+ INIT_LIST_HEAD(&pool->po_free_list);
+ pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ pool->po_owner = ps;
+ pool->po_size = size;
+}
+
+void
+kiblnd_destroy_pool_list(struct list_head *head)
+{
+ kib_pool_t *pool;
+
+ while (!list_empty(head)) {
+ pool = list_entry(head->next, kib_pool_t, po_list);
+ list_del(&pool->po_list);
+
+ LASSERT (pool->po_owner != NULL);
+ pool->po_owner->ps_pool_destroy(pool);
+ }
+}
+
+static void
+kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
+{
+ if (ps->ps_net == NULL) /* intialized? */
+ return;
+
+ spin_lock(&ps->ps_lock);
+ while (!list_empty(&ps->ps_pool_list)) {
+ kib_pool_t *po = list_entry(ps->ps_pool_list.next,
+ kib_pool_t, po_list);
+ po->po_failed = 1;
+ list_del(&po->po_list);
+ if (po->po_allocated == 0)
+ list_add(&po->po_list, zombies);
+ else
+ list_add(&po->po_list, &ps->ps_failed_pool_list);
+ }
+ spin_unlock(&ps->ps_lock);
+}
+
+static void
+kiblnd_fini_poolset(kib_poolset_t *ps)
+{
+ if (ps->ps_net != NULL) { /* initialized? */
+ kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
+ kiblnd_destroy_pool_list(&ps->ps_pool_list);
+ }
+}
+
+static int
+kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
+ kib_net_t *net, char *name, int size,
+ kib_ps_pool_create_t po_create,
+ kib_ps_pool_destroy_t po_destroy,
+ kib_ps_node_init_t nd_init,
+ kib_ps_node_fini_t nd_fini)
+{
+ kib_pool_t *pool;
+ int rc;
+
+ memset(ps, 0, sizeof(kib_poolset_t));
+
+ ps->ps_cpt = cpt;
+ ps->ps_net = net;
+ ps->ps_pool_create = po_create;
+ ps->ps_pool_destroy = po_destroy;
+ ps->ps_node_init = nd_init;
+ ps->ps_node_fini = nd_fini;
+ ps->ps_pool_size = size;
+ if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
+ >= sizeof(ps->ps_name))
+ return -E2BIG;
+ spin_lock_init(&ps->ps_lock);
+ INIT_LIST_HEAD(&ps->ps_pool_list);
+ INIT_LIST_HEAD(&ps->ps_failed_pool_list);
+
+ rc = ps->ps_pool_create(ps, size, &pool);
+ if (rc == 0)
+ list_add(&pool->po_list, &ps->ps_pool_list);
+ else
+ CERROR("Failed to create the first pool for %s\n", ps->ps_name);
+
+ return rc;
+}
+
+static int
+kiblnd_pool_is_idle(kib_pool_t *pool, cfs_time_t now)
+{
+ if (pool->po_allocated != 0) /* still in use */
+ return 0;
+ if (pool->po_failed)
+ return 1;
+ return cfs_time_aftereq(now, pool->po_deadline);
+}
+
+void
+kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
+{
+ LIST_HEAD (zombies);
+ kib_poolset_t *ps = pool->po_owner;
+ kib_pool_t *tmp;
+ cfs_time_t now = cfs_time_current();
+
+ spin_lock(&ps->ps_lock);
+
+ if (ps->ps_node_fini != NULL)
+ ps->ps_node_fini(pool, node);
+
+ LASSERT (pool->po_allocated > 0);
+ list_add(node, &pool->po_free_list);
+ pool->po_allocated --;
+
+ list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
+ /* the first pool is persistent */
+ if (ps->ps_pool_list.next == &pool->po_list)
+ continue;
+
+ if (kiblnd_pool_is_idle(pool, now))
+ list_move(&pool->po_list, &zombies);
+ }
+ spin_unlock(&ps->ps_lock);
+
+ if (!list_empty(&zombies))
+ kiblnd_destroy_pool_list(&zombies);
+}
+
+struct list_head *
+kiblnd_pool_alloc_node(kib_poolset_t *ps)
+{
+ struct list_head *node;
+ kib_pool_t *pool;
+ int rc;
+
+ again:
+ spin_lock(&ps->ps_lock);
+ list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
+ if (list_empty(&pool->po_free_list))
+ continue;
+
+ pool->po_allocated ++;
+ pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ node = pool->po_free_list.next;
+ list_del(node);
+
+ if (ps->ps_node_init != NULL) {
+ /* still hold the lock */
+ ps->ps_node_init(pool, node);
+ }
+ spin_unlock(&ps->ps_lock);
+ return node;
+ }
+
+ /* no available tx pool and ... */
+ if (ps->ps_increasing) {
+ /* another thread is allocating a new pool */
+ spin_unlock(&ps->ps_lock);
+ CDEBUG(D_NET, "Another thread is allocating new "
+ "%s pool, waiting for her to complete\n",
+ ps->ps_name);
+ schedule();
+ goto again;
+ }
+
+ if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
+ /* someone failed recently */
+ spin_unlock(&ps->ps_lock);
+ return NULL;
+ }
+
+ ps->ps_increasing = 1;
+ spin_unlock(&ps->ps_lock);
+
+ CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
+
+ rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
+
+ spin_lock(&ps->ps_lock);
+ ps->ps_increasing = 0;
+ if (rc == 0) {
+ list_add_tail(&pool->po_list, &ps->ps_pool_list);
+ } else {
+ ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ CERROR("Can't allocate new %s pool because out of memory\n",
+ ps->ps_name);
+ }
+ spin_unlock(&ps->ps_lock);
+
+ goto again;
+}
+
+void
+kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr)
+{
+ kib_pmr_pool_t *ppo = pmr->pmr_pool;
+ struct ib_mr *mr = pmr->pmr_mr;
+
+ pmr->pmr_mr = NULL;
+ kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list);
+ if (mr != NULL)
+ ib_dereg_mr(mr);
+}
+
+int
+kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
+ kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
+{
+ kib_phys_mr_t *pmr;
+ struct list_head *node;
+ int rc;
+ int i;
+
+ node = kiblnd_pool_alloc_node(&pps->pps_poolset);
+ if (node == NULL) {
+ CERROR("Failed to allocate PMR descriptor\n");
+ return -ENOMEM;
+ }
+
+ pmr = container_of(node, kib_phys_mr_t, pmr_list);
+ if (pmr->pmr_pool->ppo_hdev != hdev) {
+ kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
+ return -EAGAIN;
+ }
+
+ for (i = 0; i < rd->rd_nfrags; i ++) {
+ pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr;
+ pmr->pmr_ipb[i].size = rd->rd_frags[i].rf_nob;
+ }
+
+ pmr->pmr_mr = ib_reg_phys_mr(hdev->ibh_pd,
+ pmr->pmr_ipb, rd->rd_nfrags,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE,
+ iova);
+ if (!IS_ERR(pmr->pmr_mr)) {
+ pmr->pmr_iova = *iova;
+ *pp_pmr = pmr;
+ return 0;
+ }
+
+ rc = PTR_ERR(pmr->pmr_mr);
+ CERROR("Failed ib_reg_phys_mr: %d\n", rc);
+
+ pmr->pmr_mr = NULL;
+ kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
+
+ return rc;
+}
+
+static void
+kiblnd_destroy_pmr_pool(kib_pool_t *pool)
+{
+ kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool);
+ kib_phys_mr_t *pmr;
+
+ LASSERT (pool->po_allocated == 0);
+
+ while (!list_empty(&pool->po_free_list)) {
+ pmr = list_entry(pool->po_free_list.next,
+ kib_phys_mr_t, pmr_list);
+
+ LASSERT (pmr->pmr_mr == NULL);
+ list_del(&pmr->pmr_list);
+
+ if (pmr->pmr_ipb != NULL) {
+ LIBCFS_FREE(pmr->pmr_ipb,
+ IBLND_MAX_RDMA_FRAGS *
+ sizeof(struct ib_phys_buf));
+ }
+
+ LIBCFS_FREE(pmr, sizeof(kib_phys_mr_t));
+ }
+
+ kiblnd_fini_pool(pool);
+ if (ppo->ppo_hdev != NULL)
+ kiblnd_hdev_decref(ppo->ppo_hdev);
+
+ LIBCFS_FREE(ppo, sizeof(kib_pmr_pool_t));
+}
+
+static inline int kiblnd_pmr_pool_size(int ncpts)
+{
+ int size = *kiblnd_tunables.kib_pmr_pool_size / ncpts;
+
+ return max(IBLND_PMR_POOL, size);
+}
+
+static int
+kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po)
+{
+ struct kib_pmr_pool *ppo;
+ struct kib_pool *pool;
+ kib_phys_mr_t *pmr;
+ int i;
+
+ LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(),
+ ps->ps_cpt, sizeof(kib_pmr_pool_t));
+ if (ppo == NULL) {
+ CERROR("Failed to allocate PMR pool\n");
+ return -ENOMEM;
+ }
+
+ pool = &ppo->ppo_pool;
+ kiblnd_init_pool(ps, pool, size);
+
+ for (i = 0; i < size; i++) {
+ LIBCFS_CPT_ALLOC(pmr, lnet_cpt_table(),
+ ps->ps_cpt, sizeof(kib_phys_mr_t));
+ if (pmr == NULL)
+ break;
+
+ pmr->pmr_pool = ppo;
+ LIBCFS_CPT_ALLOC(pmr->pmr_ipb, lnet_cpt_table(), ps->ps_cpt,
+ IBLND_MAX_RDMA_FRAGS * sizeof(*pmr->pmr_ipb));
+ if (pmr->pmr_ipb == NULL)
+ break;
+
+ list_add(&pmr->pmr_list, &pool->po_free_list);
+ }
+
+ if (i < size) {
+ ps->ps_pool_destroy(pool);
+ return -ENOMEM;
+ }
+
+ ppo->ppo_hdev = kiblnd_current_hdev(ps->ps_net->ibn_dev);
+ *pp_po = pool;
+ return 0;
+}
+
+static void
+kiblnd_destroy_tx_pool(kib_pool_t *pool)
+{
+ kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
+ int i;
+
+ LASSERT (pool->po_allocated == 0);
+
+ if (tpo->tpo_tx_pages != NULL) {
+ kiblnd_unmap_tx_pool(tpo);
+ kiblnd_free_pages(tpo->tpo_tx_pages);
+ }
+
+ if (tpo->tpo_tx_descs == NULL)
+ goto out;
+
+ for (i = 0; i < pool->po_size; i++) {
+ kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+
+ list_del(&tx->tx_list);
+ if (tx->tx_pages != NULL)
+ LIBCFS_FREE(tx->tx_pages,
+ LNET_MAX_IOV *
+ sizeof(*tx->tx_pages));
+ if (tx->tx_frags != NULL)
+ LIBCFS_FREE(tx->tx_frags,
+ IBLND_MAX_RDMA_FRAGS *
+ sizeof(*tx->tx_frags));
+ if (tx->tx_wrq != NULL)
+ LIBCFS_FREE(tx->tx_wrq,
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_wrq));
+ if (tx->tx_sge != NULL)
+ LIBCFS_FREE(tx->tx_sge,
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_sge));
+ if (tx->tx_rd != NULL)
+ LIBCFS_FREE(tx->tx_rd,
+ offsetof(kib_rdma_desc_t,
+ rd_frags[IBLND_MAX_RDMA_FRAGS]));
+ }
+
+ LIBCFS_FREE(tpo->tpo_tx_descs,
+ pool->po_size * sizeof(kib_tx_t));
+out:
+ kiblnd_fini_pool(pool);
+ LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+}
+
+static int kiblnd_tx_pool_size(int ncpts)
+{
+ int ntx = *kiblnd_tunables.kib_ntx / ncpts;
+
+ return max(IBLND_TX_POOL, ntx);
+}
+
+static int
+kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po)
+{
+ int i;
+ int npg;
+ kib_pool_t *pool;
+ kib_tx_pool_t *tpo;
+
+ LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
+ if (tpo == NULL) {
+ CERROR("Failed to allocate TX pool\n");
+ return -ENOMEM;
+ }
+
+ pool = &tpo->tpo_pool;
+ kiblnd_init_pool(ps, pool, size);
+ tpo->tpo_tx_descs = NULL;
+ tpo->tpo_tx_pages = NULL;
+
+ npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
+ if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
+ CERROR("Can't allocate tx pages: %d\n", npg);
+ LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+ return -ENOMEM;
+ }
+
+ LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
+ size * sizeof(kib_tx_t));
+ if (tpo->tpo_tx_descs == NULL) {
+ CERROR("Can't allocate %d tx descriptors\n", size);
+ ps->ps_pool_destroy(pool);
+ return -ENOMEM;
+ }
+
+ memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
+
+ for (i = 0; i < size; i++) {
+ kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+
+ tx->tx_pool = tpo;
+ if (ps->ps_net->ibn_fmr_ps != NULL) {
+ LIBCFS_CPT_ALLOC(tx->tx_pages,
+ lnet_cpt_table(), ps->ps_cpt,
+ LNET_MAX_IOV * sizeof(*tx->tx_pages));
+ if (tx->tx_pages == NULL)
+ break;
+ }
+
+ LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
+ IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
+ if (tx->tx_frags == NULL)
+ break;
+
+ sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
+
+ LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_wrq));
+ if (tx->tx_wrq == NULL)
+ break;
+
+ LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_sge));
+ if (tx->tx_sge == NULL)
+ break;
+
+ LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
+ offsetof(kib_rdma_desc_t,
+ rd_frags[IBLND_MAX_RDMA_FRAGS]));
+ if (tx->tx_rd == NULL)
+ break;
+ }
+
+ if (i == size) {
+ kiblnd_map_tx_pool(tpo);
+ *pp_po = pool;
+ return 0;
+ }
+
+ ps->ps_pool_destroy(pool);
+ return -ENOMEM;
+}
+
+static void
+kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
+{
+ kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
+ tps_poolset);
+ kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
+
+ tx->tx_cookie = tps->tps_next_tx_cookie ++;
+}
+
+void
+kiblnd_net_fini_pools(kib_net_t *net)
+{
+ int i;
+
+ cfs_cpt_for_each(i, lnet_cpt_table()) {
+ kib_tx_poolset_t *tps;
+ kib_fmr_poolset_t *fps;
+ kib_pmr_poolset_t *pps;
+
+ if (net->ibn_tx_ps != NULL) {
+ tps = net->ibn_tx_ps[i];
+ kiblnd_fini_poolset(&tps->tps_poolset);
+ }
+
+ if (net->ibn_fmr_ps != NULL) {
+ fps = net->ibn_fmr_ps[i];
+ kiblnd_fini_fmr_poolset(fps);
+ }
+
+ if (net->ibn_pmr_ps != NULL) {
+ pps = net->ibn_pmr_ps[i];
+ kiblnd_fini_poolset(&pps->pps_poolset);
+ }
+ }
+
+ if (net->ibn_tx_ps != NULL) {
+ cfs_percpt_free(net->ibn_tx_ps);
+ net->ibn_tx_ps = NULL;
+ }
+
+ if (net->ibn_fmr_ps != NULL) {
+ cfs_percpt_free(net->ibn_fmr_ps);
+ net->ibn_fmr_ps = NULL;
+ }
+
+ if (net->ibn_pmr_ps != NULL) {
+ cfs_percpt_free(net->ibn_pmr_ps);
+ net->ibn_pmr_ps = NULL;
+ }
+}
+
+int
+kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
+{
+ unsigned long flags;
+ int cpt;
+ int rc;
+ int i;
+
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ if (*kiblnd_tunables.kib_map_on_demand == 0 &&
+ net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
+ goto create_tx_pool;
+ }
+
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ if (*kiblnd_tunables.kib_fmr_pool_size <
+ *kiblnd_tunables.kib_ntx / 4) {
+ CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
+ *kiblnd_tunables.kib_fmr_pool_size,
+ *kiblnd_tunables.kib_ntx / 4);
+ rc = -EINVAL;
+ goto failed;
+ }
+
+ /* TX pool must be created later than FMR/PMR, see LU-2268
+ * for details */
+ LASSERT(net->ibn_tx_ps == NULL);
+
+ /* premapping can fail if ibd_nmr > 1, so we always create
+ * FMR/PMR pool and map-on-demand if premapping failed */
+
+ net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(kib_fmr_poolset_t));
+ if (net->ibn_fmr_ps == NULL) {
+ CERROR("Failed to allocate FMR pool array\n");
+ rc = -ENOMEM;
+ goto failed;
+ }
+
+ for (i = 0; i < ncpts; i++) {
+ cpt = (cpts == NULL) ? i : cpts[i];
+ rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
+ kiblnd_fmr_pool_size(ncpts),
+ kiblnd_fmr_flush_trigger(ncpts));
+ if (rc == -ENOSYS && i == 0) /* no FMR */
+ break; /* create PMR pool */
+
+ if (rc != 0) { /* a real error */
+ CERROR("Can't initialize FMR pool for CPT %d: %d\n",
+ cpt, rc);
+ goto failed;
+ }
+ }
+
+ if (i > 0) {
+ LASSERT(i == ncpts);
+ goto create_tx_pool;
+ }
+
+ cfs_percpt_free(net->ibn_fmr_ps);
+ net->ibn_fmr_ps = NULL;
+
+ CWARN("Device does not support FMR, failing back to PMR\n");
+
+ if (*kiblnd_tunables.kib_pmr_pool_size <
+ *kiblnd_tunables.kib_ntx / 4) {
+ CERROR("Can't set pmr pool size (%d) < ntx / 4(%d)\n",
+ *kiblnd_tunables.kib_pmr_pool_size,
+ *kiblnd_tunables.kib_ntx / 4);
+ rc = -EINVAL;
+ goto failed;
+ }
+
+ net->ibn_pmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(kib_pmr_poolset_t));
+ if (net->ibn_pmr_ps == NULL) {
+ CERROR("Failed to allocate PMR pool array\n");
+ rc = -ENOMEM;
+ goto failed;
+ }
+
+ for (i = 0; i < ncpts; i++) {
+ cpt = (cpts == NULL) ? i : cpts[i];
+ rc = kiblnd_init_poolset(&net->ibn_pmr_ps[cpt]->pps_poolset,
+ cpt, net, "PMR",
+ kiblnd_pmr_pool_size(ncpts),
+ kiblnd_create_pmr_pool,
+ kiblnd_destroy_pmr_pool, NULL, NULL);
+ if (rc != 0) {
+ CERROR("Can't initialize PMR pool for CPT %d: %d\n",
+ cpt, rc);
+ goto failed;
+ }
+ }
+
+ create_tx_pool:
+ net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(kib_tx_poolset_t));
+ if (net->ibn_tx_ps == NULL) {
+ CERROR("Failed to allocate tx pool array\n");
+ rc = -ENOMEM;
+ goto failed;
+ }
+
+ for (i = 0; i < ncpts; i++) {
+ cpt = (cpts == NULL) ? i : cpts[i];
+ rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
+ cpt, net, "TX",
+ kiblnd_tx_pool_size(ncpts),
+ kiblnd_create_tx_pool,
+ kiblnd_destroy_tx_pool,
+ kiblnd_tx_init, NULL);
+ if (rc != 0) {
+ CERROR("Can't initialize TX pool for CPT %d: %d\n",
+ cpt, rc);
+ goto failed;
+ }
+ }
+
+ return 0;
+ failed:
+ kiblnd_net_fini_pools(net);
+ LASSERT(rc != 0);
+ return rc;
+}
+
+static int
+kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
+{
+ struct ib_device_attr *attr;
+ int rc;
+
+ /* It's safe to assume a HCA can handle a page size
+ * matching that of the native system */
+ hdev->ibh_page_shift = PAGE_SHIFT;
+ hdev->ibh_page_size = 1 << PAGE_SHIFT;
+ hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
+
+ LIBCFS_ALLOC(attr, sizeof(*attr));
+ if (attr == NULL) {
+ CERROR("Out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = ib_query_device(hdev->ibh_ibdev, attr);
+ if (rc == 0)
+ hdev->ibh_mr_size = attr->max_mr_size;
+
+ LIBCFS_FREE(attr, sizeof(*attr));
+
+ if (rc != 0) {
+ CERROR("Failed to query IB device: %d\n", rc);
+ return rc;
+ }
+
+ if (hdev->ibh_mr_size == ~0ULL) {
+ hdev->ibh_mr_shift = 64;
+ return 0;
+ }
+
+ for (hdev->ibh_mr_shift = 0;
+ hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift ++) {
+ if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
+ hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
+ return 0;
+ }
+
+ CERROR("Invalid mr size: "LPX64"\n", hdev->ibh_mr_size);
+ return -EINVAL;
+}
+
+void
+kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
+{
+ int i;
+
+ if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
+ return;
+
+ for (i = 0; i < hdev->ibh_nmrs; i++) {
+ if (hdev->ibh_mrs[i] == NULL)
+ break;
+
+ ib_dereg_mr(hdev->ibh_mrs[i]);
+ }
+
+ LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
+ hdev->ibh_mrs = NULL;
+ hdev->ibh_nmrs = 0;
+}
+
+void
+kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
+{
+ kiblnd_hdev_cleanup_mrs(hdev);
+
+ if (hdev->ibh_pd != NULL)
+ ib_dealloc_pd(hdev->ibh_pd);
+
+ if (hdev->ibh_cmid != NULL)
+ rdma_destroy_id(hdev->ibh_cmid);
+
+ LIBCFS_FREE(hdev, sizeof(*hdev));
+}
+
+int
+kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
+{
+ struct ib_mr *mr;
+ int i;
+ int rc;
+ __u64 mm_size;
+ __u64 mr_size;
+ int acflags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE;
+
+ rc = kiblnd_hdev_get_attr(hdev);
+ if (rc != 0)
+ return rc;
+
+ if (hdev->ibh_mr_shift == 64) {
+ LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
+ if (hdev->ibh_mrs == NULL) {
+ CERROR("Failed to allocate MRs table\n");
+ return -ENOMEM;
+ }
+
+ hdev->ibh_mrs[0] = NULL;
+ hdev->ibh_nmrs = 1;
+
+ mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
+ if (IS_ERR(mr)) {
+ CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
+ kiblnd_hdev_cleanup_mrs(hdev);
+ return PTR_ERR(mr);
+ }
+
+ hdev->ibh_mrs[0] = mr;
+
+ goto out;
+ }
+
+ mr_size = (1ULL << hdev->ibh_mr_shift);
+ mm_size = (unsigned long)high_memory - PAGE_OFFSET;
+
+ hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift);
+
+ if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) {
+ /* it's 4T..., assume we will re-code at that time */
+ CERROR("Can't support memory size: x"LPX64
+ " with MR size: x"LPX64"\n", mm_size, mr_size);
+ return -EINVAL;
+ }
+
+ /* create an array of MRs to cover all memory */
+ LIBCFS_ALLOC(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
+ if (hdev->ibh_mrs == NULL) {
+ CERROR("Failed to allocate MRs' table\n");
+ return -ENOMEM;
+ }
+
+ memset(hdev->ibh_mrs, 0, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
+
+ for (i = 0; i < hdev->ibh_nmrs; i++) {
+ struct ib_phys_buf ipb;
+ __u64 iova;
+
+ ipb.size = hdev->ibh_mr_size;
+ ipb.addr = i * mr_size;
+ iova = ipb.addr;
+
+ mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
+ if (IS_ERR(mr)) {
+ CERROR("Failed ib_reg_phys_mr addr "LPX64
+ " size "LPX64" : %ld\n",
+ ipb.addr, ipb.size, PTR_ERR(mr));
+ kiblnd_hdev_cleanup_mrs(hdev);
+ return PTR_ERR(mr);
+ }
+
+ LASSERT (iova == ipb.addr);
+
+ hdev->ibh_mrs[i] = mr;
+ }
+
+out:
+ if (hdev->ibh_mr_size != ~0ULL || hdev->ibh_nmrs != 1)
+ LCONSOLE_INFO("Register global MR array, MR size: "
+ LPX64", array size: %d\n",
+ hdev->ibh_mr_size, hdev->ibh_nmrs);
+ return 0;
+}
+
+static int
+kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
+{ /* DUMMY */
+ return 0;
+}
+
+static int
+kiblnd_dev_need_failover(kib_dev_t *dev)
+{
+ struct rdma_cm_id *cmid;
+ struct sockaddr_in srcaddr;
+ struct sockaddr_in dstaddr;
+ int rc;
+
+ if (dev->ibd_hdev == NULL || /* initializing */
+ dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
+ *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
+ return 1;
+
+ /* XXX: it's UGLY, but I don't have better way to find
+ * ib-bonding HCA failover because:
+ *
+ * a. no reliable CM event for HCA failover...
+ * b. no OFED API to get ib_device for current net_device...
+ *
+ * We have only two choices at this point:
+ *
+ * a. rdma_bind_addr(), it will conflict with listener cmid
+ * b. rdma_resolve_addr() to zero addr */
+ cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
+ IB_QPT_RC);
+ if (IS_ERR(cmid)) {
+ rc = PTR_ERR(cmid);
+ CERROR("Failed to create cmid for failover: %d\n", rc);
+ return rc;
+ }
+
+ memset(&srcaddr, 0, sizeof(srcaddr));
+ srcaddr.sin_family = AF_INET;
+ srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
+
+ memset(&dstaddr, 0, sizeof(dstaddr));
+ dstaddr.sin_family = AF_INET;
+ rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
+ (struct sockaddr *)&dstaddr, 1);
+ if (rc != 0 || cmid->device == NULL) {
+ CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
+ dev->ibd_ifname, &dev->ibd_ifip,
+ cmid->device, rc);
+ rdma_destroy_id(cmid);
+ return rc;
+ }
+
+ if (dev->ibd_hdev->ibh_ibdev == cmid->device) {
+ /* don't need device failover */
+ rdma_destroy_id(cmid);
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+kiblnd_dev_failover(kib_dev_t *dev)
+{
+ LIST_HEAD (zombie_tpo);
+ LIST_HEAD (zombie_ppo);
+ LIST_HEAD (zombie_fpo);
+ struct rdma_cm_id *cmid = NULL;
+ kib_hca_dev_t *hdev = NULL;
+ kib_hca_dev_t *old;
+ struct ib_pd *pd;
+ kib_net_t *net;
+ struct sockaddr_in addr;
+ unsigned long flags;
+ int rc = 0;
+ int i;
+
+ LASSERT (*kiblnd_tunables.kib_dev_failover > 1 ||
+ dev->ibd_can_failover ||
+ dev->ibd_hdev == NULL);
+
+ rc = kiblnd_dev_need_failover(dev);
+ if (rc <= 0)
+ goto out;
+
+ if (dev->ibd_hdev != NULL &&
+ dev->ibd_hdev->ibh_cmid != NULL) {
+ /* XXX it's not good to close old listener at here,
+ * because we can fail to create new listener.
+ * But we have to close it now, otherwise rdma_bind_addr
+ * will return EADDRINUSE... How crap! */
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ cmid = dev->ibd_hdev->ibh_cmid;
+ /* make next schedule of kiblnd_dev_need_failover()
+ * return 1 for me */
+ dev->ibd_hdev->ibh_cmid = NULL;
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ rdma_destroy_id(cmid);
+ }
+
+ cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
+ IB_QPT_RC);
+ if (IS_ERR(cmid)) {
+ rc = PTR_ERR(cmid);
+ CERROR("Failed to create cmid for failover: %d\n", rc);
+ goto out;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
+ addr.sin_port = htons(*kiblnd_tunables.kib_service);
+
+ /* Bind to failover device or port */
+ rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
+ if (rc != 0 || cmid->device == NULL) {
+ CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
+ dev->ibd_ifname, &dev->ibd_ifip,
+ cmid->device, rc);
+ rdma_destroy_id(cmid);
+ goto out;
+ }
+
+ LIBCFS_ALLOC(hdev, sizeof(*hdev));
+ if (hdev == NULL) {
+ CERROR("Failed to allocate kib_hca_dev\n");
+ rdma_destroy_id(cmid);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ atomic_set(&hdev->ibh_ref, 1);
+ hdev->ibh_dev = dev;
+ hdev->ibh_cmid = cmid;
+ hdev->ibh_ibdev = cmid->device;
+
+ pd = ib_alloc_pd(cmid->device);
+ if (IS_ERR(pd)) {
+ rc = PTR_ERR(pd);
+ CERROR("Can't allocate PD: %d\n", rc);
+ goto out;
+ }
+
+ hdev->ibh_pd = pd;
+
+ rc = rdma_listen(cmid, 0);
+ if (rc != 0) {
+ CERROR("Can't start new listener: %d\n", rc);
+ goto out;
+ }
+
+ rc = kiblnd_hdev_setup_mrs(hdev);
+ if (rc != 0) {
+ CERROR("Can't setup device: %d\n", rc);
+ goto out;
+ }
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ old = dev->ibd_hdev;
+ dev->ibd_hdev = hdev; /* take over the refcount */
+ hdev = old;
+
+ list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
+ cfs_cpt_for_each(i, lnet_cpt_table()) {
+ kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
+ &zombie_tpo);
+
+ if (net->ibn_fmr_ps != NULL) {
+ kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
+ &zombie_fpo);
+
+ } else if (net->ibn_pmr_ps != NULL) {
+ kiblnd_fail_poolset(&net->ibn_pmr_ps[i]->
+ pps_poolset, &zombie_ppo);
+ }
+ }
+ }
+
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ out:
+ if (!list_empty(&zombie_tpo))
+ kiblnd_destroy_pool_list(&zombie_tpo);
+ if (!list_empty(&zombie_ppo))
+ kiblnd_destroy_pool_list(&zombie_ppo);
+ if (!list_empty(&zombie_fpo))
+ kiblnd_destroy_fmr_pool_list(&zombie_fpo);
+ if (hdev != NULL)
+ kiblnd_hdev_decref(hdev);
+
+ if (rc != 0)
+ dev->ibd_failed_failover++;
+ else
+ dev->ibd_failed_failover = 0;
+
+ return rc;
+}
+
+void
+kiblnd_destroy_dev (kib_dev_t *dev)
+{
+ LASSERT (dev->ibd_nnets == 0);
+ LASSERT (list_empty(&dev->ibd_nets));
+
+ list_del(&dev->ibd_fail_list);
+ list_del(&dev->ibd_list);
+
+ if (dev->ibd_hdev != NULL)
+ kiblnd_hdev_decref(dev->ibd_hdev);
+
+ LIBCFS_FREE(dev, sizeof(*dev));
+}
+
+kib_dev_t *
+kiblnd_create_dev(char *ifname)
+{
+ struct net_device *netdev;
+ kib_dev_t *dev;
+ __u32 netmask;
+ __u32 ip;
+ int up;
+ int rc;
+
+ rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
+ if (rc != 0) {
+ CERROR("Can't query IPoIB interface %s: %d\n",
+ ifname, rc);
+ return NULL;
+ }
+
+ if (!up) {
+ CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
+ return NULL;
+ }
+
+ LIBCFS_ALLOC(dev, sizeof(*dev));
+ if (dev == NULL)
+ return NULL;
+
+ memset(dev, 0, sizeof(*dev));
+ netdev = dev_get_by_name(&init_net, ifname);
+ if (netdev == NULL) {
+ dev->ibd_can_failover = 0;
+ } else {
+ dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
+ dev_put(netdev);
+ }
+
+ INIT_LIST_HEAD(&dev->ibd_nets);
+ INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
+ INIT_LIST_HEAD(&dev->ibd_fail_list);
+ dev->ibd_ifip = ip;
+ strcpy(&dev->ibd_ifname[0], ifname);
+
+ /* initialize the device */
+ rc = kiblnd_dev_failover(dev);
+ if (rc != 0) {
+ CERROR("Can't initialize device: %d\n", rc);
+ LIBCFS_FREE(dev, sizeof(*dev));
+ return NULL;
+ }
+
+ list_add_tail(&dev->ibd_list,
+ &kiblnd_data.kib_devs);
+ return dev;
+}
+
+void
+kiblnd_base_shutdown(void)
+{
+ struct kib_sched_info *sched;
+ int i;
+
+ LASSERT (list_empty(&kiblnd_data.kib_devs));
+
+ CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
+ atomic_read(&libcfs_kmemory));
+
+ switch (kiblnd_data.kib_init) {
+ default:
+ LBUG();
+
+ case IBLND_INIT_ALL:
+ case IBLND_INIT_DATA:
+ LASSERT (kiblnd_data.kib_peers != NULL);
+ for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
+ LASSERT (list_empty(&kiblnd_data.kib_peers[i]));
+ }
+ LASSERT (list_empty(&kiblnd_data.kib_connd_zombies));
+ LASSERT (list_empty(&kiblnd_data.kib_connd_conns));
+
+ /* flag threads to terminate; wake and wait for them to die */
+ kiblnd_data.kib_shutdown = 1;
+
+ /* NB: we really want to stop scheduler threads net by net
+ * instead of the whole module, this should be improved
+ * with dynamic configuration LNet */
+ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
+ wake_up_all(&sched->ibs_waitq);
+
+ wake_up_all(&kiblnd_data.kib_connd_waitq);
+ wake_up_all(&kiblnd_data.kib_failover_waitq);
+
+ i = 2;
+ while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+ "Waiting for %d threads to terminate\n",
+ atomic_read(&kiblnd_data.kib_nthreads));
+ cfs_pause(cfs_time_seconds(1));
+ }
+
+ /* fall through */
+
+ case IBLND_INIT_NOTHING:
+ break;
+ }
+
+ if (kiblnd_data.kib_peers != NULL) {
+ LIBCFS_FREE(kiblnd_data.kib_peers,
+ sizeof(struct list_head) *
+ kiblnd_data.kib_peer_hash_size);
+ }
+
+ if (kiblnd_data.kib_scheds != NULL)
+ cfs_percpt_free(kiblnd_data.kib_scheds);
+
+ CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
+ atomic_read(&libcfs_kmemory));
+
+ kiblnd_data.kib_init = IBLND_INIT_NOTHING;
+ module_put(THIS_MODULE);
+}
+
+void
+kiblnd_shutdown (lnet_ni_t *ni)
+{
+ kib_net_t *net = ni->ni_data;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ int i;
+ unsigned long flags;
+
+ LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
+
+ if (net == NULL)
+ goto out;
+
+ CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
+ atomic_read(&libcfs_kmemory));
+
+ write_lock_irqsave(g_lock, flags);
+ net->ibn_shutdown = 1;
+ write_unlock_irqrestore(g_lock, flags);
+
+ switch (net->ibn_init) {
+ default:
+ LBUG();
+
+ case IBLND_INIT_ALL:
+ /* nuke all existing peers within this net */
+ kiblnd_del_peer(ni, LNET_NID_ANY);
+
+ /* Wait for all peer state to clean up */
+ i = 2;
+ while (atomic_read(&net->ibn_npeers) != 0) {
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
+ "%s: waiting for %d peers to disconnect\n",
+ libcfs_nid2str(ni->ni_nid),
+ atomic_read(&net->ibn_npeers));
+ cfs_pause(cfs_time_seconds(1));
+ }
+
+ kiblnd_net_fini_pools(net);
+
+ write_lock_irqsave(g_lock, flags);
+ LASSERT(net->ibn_dev->ibd_nnets > 0);
+ net->ibn_dev->ibd_nnets--;
+ list_del(&net->ibn_list);
+ write_unlock_irqrestore(g_lock, flags);
+
+ /* fall through */
+
+ case IBLND_INIT_NOTHING:
+ LASSERT (atomic_read(&net->ibn_nconns) == 0);
+
+ if (net->ibn_dev != NULL &&
+ net->ibn_dev->ibd_nnets == 0)
+ kiblnd_destroy_dev(net->ibn_dev);
+
+ break;
+ }
+
+ CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
+ atomic_read(&libcfs_kmemory));
+
+ net->ibn_init = IBLND_INIT_NOTHING;
+ ni->ni_data = NULL;
+
+ LIBCFS_FREE(net, sizeof(*net));
+
+out:
+ if (list_empty(&kiblnd_data.kib_devs))
+ kiblnd_base_shutdown();
+ return;
+}
+
+int
+kiblnd_base_startup(void)
+{
+ struct kib_sched_info *sched;
+ int rc;
+ int i;
+
+ LASSERT (kiblnd_data.kib_init == IBLND_INIT_NOTHING);
+
+ try_module_get(THIS_MODULE);
+ memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
+
+ rwlock_init(&kiblnd_data.kib_global_lock);
+
+ INIT_LIST_HEAD(&kiblnd_data.kib_devs);
+ INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
+
+ kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
+ LIBCFS_ALLOC(kiblnd_data.kib_peers,
+ sizeof(struct list_head) *
+ kiblnd_data.kib_peer_hash_size);
+ if (kiblnd_data.kib_peers == NULL) {
+ goto failed;
+ }
+ for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
+ INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
+
+ spin_lock_init(&kiblnd_data.kib_connd_lock);
+ INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+ INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+ init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
+ init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
+
+ kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*sched));
+ if (kiblnd_data.kib_scheds == NULL)
+ goto failed;
+
+ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
+ int nthrs;
+
+ spin_lock_init(&sched->ibs_lock);
+ INIT_LIST_HEAD(&sched->ibs_conns);
+ init_waitqueue_head(&sched->ibs_waitq);
+
+ nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
+ if (*kiblnd_tunables.kib_nscheds > 0) {
+ nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
+ } else {
+ /* max to half of CPUs, another half is reserved for
+ * upper layer modules */
+ nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
+ }
+
+ sched->ibs_nthreads_max = nthrs;
+ sched->ibs_cpt = i;
+ }
+
+ kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
+
+ /* lists/ptrs/locks initialised */
+ kiblnd_data.kib_init = IBLND_INIT_DATA;
+ /*****************************************************/
+
+ rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
+ if (rc != 0) {
+ CERROR("Can't spawn o2iblnd connd: %d\n", rc);
+ goto failed;
+ }
+
+ if (*kiblnd_tunables.kib_dev_failover != 0)
+ rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
+ "kiblnd_failover");
+
+ if (rc != 0) {
+ CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
+ goto failed;
+ }
+
+ /* flag everything initialised */
+ kiblnd_data.kib_init = IBLND_INIT_ALL;
+ /*****************************************************/
+
+ return 0;
+
+ failed:
+ kiblnd_base_shutdown();
+ return -ENETDOWN;
+}
+
+int
+kiblnd_start_schedulers(struct kib_sched_info *sched)
+{
+ int rc = 0;
+ int nthrs;
+ int i;
+
+ if (sched->ibs_nthreads == 0) {
+ if (*kiblnd_tunables.kib_nscheds > 0) {
+ nthrs = sched->ibs_nthreads_max;
+ } else {
+ nthrs = cfs_cpt_weight(lnet_cpt_table(),
+ sched->ibs_cpt);
+ nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
+ nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
+ }
+ } else {
+ LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
+ /* increase one thread if there is new interface */
+ nthrs = (sched->ibs_nthreads < sched->ibs_nthreads_max);
+ }
+
+ for (i = 0; i < nthrs; i++) {
+ long id;
+ char name[20];
+ id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
+ snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
+ KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
+ rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
+ if (rc == 0)
+ continue;
+
+ CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
+ sched->ibs_cpt, sched->ibs_nthreads + i, rc);
+ break;
+ }
+
+ sched->ibs_nthreads += i;
+ return rc;
+}
+
+int
+kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts)
+{
+ int cpt;
+ int rc;
+ int i;
+
+ for (i = 0; i < ncpts; i++) {
+ struct kib_sched_info *sched;
+
+ cpt = (cpts == NULL) ? i : cpts[i];
+ sched = kiblnd_data.kib_scheds[cpt];
+
+ if (!newdev && sched->ibs_nthreads > 0)
+ continue;
+
+ rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
+ if (rc != 0) {
+ CERROR("Failed to start scheduler threads for %s\n",
+ dev->ibd_ifname);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+kib_dev_t *
+kiblnd_dev_search(char *ifname)
+{
+ kib_dev_t *alias = NULL;
+ kib_dev_t *dev;
+ char *colon;
+ char *colon2;
+
+ colon = strchr(ifname, ':');
+ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+ if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+ return dev;
+
+ if (alias != NULL)
+ continue;
+
+ colon2 = strchr(dev->ibd_ifname, ':');
+ if (colon != NULL)
+ *colon = 0;
+ if (colon2 != NULL)
+ *colon2 = 0;
+
+ if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+ alias = dev;
+
+ if (colon != NULL)
+ *colon = ':';
+ if (colon2 != NULL)
+ *colon2 = ':';
+ }
+ return alias;
+}
+
+int
+kiblnd_startup (lnet_ni_t *ni)
+{
+ char *ifname;
+ kib_dev_t *ibdev = NULL;
+ kib_net_t *net;
+ struct timeval tv;
+ unsigned long flags;
+ int rc;
+ int newdev;
+
+ LASSERT (ni->ni_lnd == &the_o2iblnd);
+
+ if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
+ rc = kiblnd_base_startup();
+ if (rc != 0)
+ return rc;
+ }
+
+ LIBCFS_ALLOC(net, sizeof(*net));
+ ni->ni_data = net;
+ if (net == NULL)
+ goto failed;
+
+ memset(net, 0, sizeof(*net));
+
+ do_gettimeofday(&tv);
+ net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+
+ ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
+ ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
+ ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
+ ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
+
+ if (ni->ni_interfaces[0] != NULL) {
+ /* Use the IPoIB interface specified in 'networks=' */
+
+ CLASSERT (LNET_MAX_INTERFACES > 1);
+ if (ni->ni_interfaces[1] != NULL) {
+ CERROR("Multiple interfaces not supported\n");
+ goto failed;
+ }
+
+ ifname = ni->ni_interfaces[0];
+ } else {
+ ifname = *kiblnd_tunables.kib_default_ipif;
+ }
+
+ if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
+ CERROR("IPoIB interface name too long: %s\n", ifname);
+ goto failed;
+ }
+
+ ibdev = kiblnd_dev_search(ifname);
+
+ newdev = ibdev == NULL;
+ /* hmm...create kib_dev even for alias */
+ if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
+ ibdev = kiblnd_create_dev(ifname);
+
+ if (ibdev == NULL)
+ goto failed;
+
+ net->ibn_dev = ibdev;
+ ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
+
+ rc = kiblnd_dev_start_threads(ibdev, newdev,
+ ni->ni_cpts, ni->ni_ncpts);
+ if (rc != 0)
+ goto failed;
+
+ rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
+ if (rc != 0) {
+ CERROR("Failed to initialize NI pools: %d\n", rc);
+ goto failed;
+ }
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ ibdev->ibd_nnets++;
+ list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ net->ibn_init = IBLND_INIT_ALL;
+
+ return 0;
+
+failed:
+ if (net->ibn_dev == NULL && ibdev != NULL)
+ kiblnd_destroy_dev(ibdev);
+
+ kiblnd_shutdown(ni);
+
+ CDEBUG(D_NET, "kiblnd_startup failed\n");
+ return -ENETDOWN;
+}
+
+void __exit
+kiblnd_module_fini (void)
+{
+ lnet_unregister_lnd(&the_o2iblnd);
+ kiblnd_tunables_fini();
+}
+
+int __init
+kiblnd_module_init (void)
+{
+ int rc;
+
+ CLASSERT (sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
+ CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+ <= IBLND_MSG_SIZE);
+ CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+ <= IBLND_MSG_SIZE);
+
+ rc = kiblnd_tunables_init();
+ if (rc != 0)
+ return rc;
+
+ lnet_register_lnd(&the_o2iblnd);
+
+ return 0;
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
+MODULE_LICENSE("GPL");
+
+module_init(kiblnd_module_init);
+module_exit(kiblnd_module_fini);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
new file mode 100644
index 0000000..938df0c
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -0,0 +1,1056 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/klnds/o2iblnd/o2iblnd.h
+ *
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/uio.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/kmod.h>
+#include <linux/sysctl.h>
+#include <linux/pci.h>
+
+#include <net/sock.h>
+#include <linux/in.h>
+
+#define DEBUG_SUBSYSTEM S_LND
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lnet.h>
+#include <linux/lnet/lib-lnet.h>
+#include <linux/lnet/lnet-sysctl.h>
+
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_fmr_pool.h>
+
+#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
+/* # scheduler loops before reschedule */
+#define IBLND_RESCHED 100
+
+#define IBLND_N_SCHED 2
+#define IBLND_N_SCHED_HIGH 4
+
+typedef struct
+{
+ int *kib_dev_failover; /* HCA failover */
+ unsigned int *kib_service; /* IB service number */
+ int *kib_min_reconnect_interval; /* first failed connection retry... */
+ int *kib_max_reconnect_interval; /* ...exponentially increasing to this */
+ int *kib_cksum; /* checksum kib_msg_t? */
+ int *kib_timeout; /* comms timeout (seconds) */
+ int *kib_keepalive; /* keepalive timeout (seconds) */
+ int *kib_ntx; /* # tx descs */
+ int *kib_credits; /* # concurrent sends */
+ int *kib_peertxcredits; /* # concurrent sends to 1 peer */
+ int *kib_peerrtrcredits; /* # per-peer router buffer credits */
+ int *kib_peercredits_hiw; /* # when eagerly to return credits */
+ int *kib_peertimeout; /* seconds to consider peer dead */
+ char **kib_default_ipif; /* default IPoIB interface */
+ int *kib_retry_count;
+ int *kib_rnr_retry_count;
+ int *kib_concurrent_sends; /* send work queue sizing */
+ int *kib_ib_mtu; /* IB MTU */
+ int *kib_map_on_demand; /* map-on-demand if RD has more fragments
+ * than this value, 0 disable map-on-demand */
+ int *kib_pmr_pool_size; /* # physical MR in pool */
+ int *kib_fmr_pool_size; /* # FMRs in pool */
+ int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
+ int *kib_fmr_cache; /* enable FMR pool cache? */
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
+ ctl_table_header_t *kib_sysctl; /* sysctl interface */
+#endif
+ int *kib_require_priv_port;/* accept only privileged ports */
+ int *kib_use_priv_port; /* use privileged port for active connect */
+ /* # threads on each CPT */
+ int *kib_nscheds;
+} kib_tunables_t;
+
+extern kib_tunables_t kiblnd_tunables;
+
+#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
+#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
+
+#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
+#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */
+
+#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_MSG_QUEUE_SIZE_V1 : \
+ *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
+#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_CREDIT_HIGHWATER_V1 : \
+ *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
+
+#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
+
+static inline int
+kiblnd_concurrent_sends_v1(void)
+{
+ if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 * 2;
+
+ if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 / 2;
+
+ return *kiblnd_tunables.kib_concurrent_sends;
+}
+
+#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
+ kiblnd_concurrent_sends_v1() : \
+ *kiblnd_tunables.kib_concurrent_sends)
+/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
+#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
+#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
+
+#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
+#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
+#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
+ *kiblnd_tunables.kib_map_on_demand : \
+ IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
+#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
+
+/************************/
+/* derived constants... */
+/* Pools (shared by connections on each CPT) */
+/* These pools can grow at runtime, so don't need give a very large value */
+#define IBLND_TX_POOL 256
+#define IBLND_PMR_POOL 256
+#define IBLND_FMR_POOL 256
+#define IBLND_FMR_POOL_FLUSH 192
+
+/* TX messages (shared by all connections) */
+#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
+
+/* RX messages (per connection) */
+#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
+#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
+#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
+
+/* WRs and CQEs (per connection) */
+#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
+#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
+#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
+
+struct kib_hca_dev;
+
+/* o2iblnd can run over aliased interface */
+#ifdef IFALIASZ
+#define KIB_IFNAME_SIZE IFALIASZ
+#else
+#define KIB_IFNAME_SIZE 256
+#endif
+
+typedef struct
+{
+ struct list_head ibd_list; /* chain on kib_devs */
+ struct list_head ibd_fail_list; /* chain on kib_failed_devs */
+ __u32 ibd_ifip; /* IPoIB interface IP */
+ /** IPoIB interface name */
+ char ibd_ifname[KIB_IFNAME_SIZE];
+ int ibd_nnets; /* # nets extant */
+
+ cfs_time_t ibd_next_failover;
+ int ibd_failed_failover; /* # failover failures */
+ unsigned int ibd_failover; /* failover in progress */
+ unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
+ struct list_head ibd_nets;
+ struct kib_hca_dev *ibd_hdev;
+} kib_dev_t;
+
+typedef struct kib_hca_dev
+{
+ struct rdma_cm_id *ibh_cmid; /* listener cmid */
+ struct ib_device *ibh_ibdev; /* IB device */
+ int ibh_page_shift; /* page shift of current HCA */
+ int ibh_page_size; /* page size of current HCA */
+ __u64 ibh_page_mask; /* page mask of current HCA */
+ int ibh_mr_shift; /* bits shift of max MR size */
+ __u64 ibh_mr_size; /* size of MR */
+ int ibh_nmrs; /* # of global MRs */
+ struct ib_mr **ibh_mrs; /* global MR */
+ struct ib_pd *ibh_pd; /* PD */
+ kib_dev_t *ibh_dev; /* owner */
+ atomic_t ibh_ref; /* refcount */
+} kib_hca_dev_t;
+
+/** # of seconds to keep pool alive */
+#define IBLND_POOL_DEADLINE 300
+/** # of seconds to retry if allocation failed */
+#define IBLND_POOL_RETRY 1
+
+typedef struct
+{
+ int ibp_npages; /* # pages */
+ struct page *ibp_pages[0]; /* page array */
+} kib_pages_t;
+
+struct kib_pmr_pool;
+
+typedef struct {
+ struct list_head pmr_list; /* chain node */
+ struct ib_phys_buf *pmr_ipb; /* physical buffer */
+ struct ib_mr *pmr_mr; /* IB MR */
+ struct kib_pmr_pool *pmr_pool; /* owner of this MR */
+ __u64 pmr_iova; /* Virtual I/O address */
+ int pmr_refcount; /* reference count */
+} kib_phys_mr_t;
+
+struct kib_pool;
+struct kib_poolset;
+
+typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
+ int inc, struct kib_pool **pp_po);
+typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
+typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
+typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
+
+struct kib_net;
+
+#define IBLND_POOL_NAME_LEN 32
+
+typedef struct kib_poolset
+{
+ spinlock_t ps_lock; /* serialize */
+ struct kib_net *ps_net; /* network it belongs to */
+ char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
+ struct list_head ps_pool_list; /* list of pools */
+ struct list_head ps_failed_pool_list; /* failed pool list */
+ cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
+ int ps_increasing; /* is allocating new pool */
+ int ps_pool_size; /* new pool size */
+ int ps_cpt; /* CPT id */
+
+ kib_ps_pool_create_t ps_pool_create; /* create a new pool */
+ kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
+ kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
+ kib_ps_node_fini_t ps_node_fini; /* finalize node */
+} kib_poolset_t;
+
+typedef struct kib_pool
+{
+ struct list_head po_list; /* chain on pool list */
+ struct list_head po_free_list; /* pre-allocated node */
+ kib_poolset_t *po_owner; /* pool_set of this pool */
+ cfs_time_t po_deadline; /* deadline of this pool */
+ int po_allocated; /* # of elements in use */
+ int po_failed; /* pool is created on failed HCA */
+ int po_size; /* # of pre-allocated elements */
+} kib_pool_t;
+
+typedef struct {
+ kib_poolset_t tps_poolset; /* pool-set */
+ __u64 tps_next_tx_cookie; /* cookie of TX */
+} kib_tx_poolset_t;
+
+typedef struct {
+ kib_pool_t tpo_pool; /* pool */
+ struct kib_hca_dev *tpo_hdev; /* device for this pool */
+ struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
+ kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
+} kib_tx_pool_t;
+
+typedef struct {
+ kib_poolset_t pps_poolset; /* pool-set */
+} kib_pmr_poolset_t;
+
+typedef struct kib_pmr_pool {
+ struct kib_hca_dev *ppo_hdev; /* device for this pool */
+ kib_pool_t ppo_pool; /* pool */
+} kib_pmr_pool_t;
+
+typedef struct
+{
+ spinlock_t fps_lock; /* serialize */
+ struct kib_net *fps_net; /* IB network */
+ struct list_head fps_pool_list; /* FMR pool list */
+ struct list_head fps_failed_pool_list; /* FMR pool list */
+ __u64 fps_version; /* validity stamp */
+ int fps_cpt; /* CPT id */
+ int fps_pool_size;
+ int fps_flush_trigger;
+ /* is allocating new pool */
+ int fps_increasing;
+ /* time stamp for retry if failed to allocate */
+ cfs_time_t fps_next_retry;
+} kib_fmr_poolset_t;
+
+typedef struct
+{
+ struct list_head fpo_list; /* chain on pool list */
+ struct kib_hca_dev *fpo_hdev; /* device for this pool */
+ kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
+ struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ cfs_time_t fpo_deadline; /* deadline of this pool */
+ int fpo_failed; /* fmr pool is failed */
+ int fpo_map_count; /* # of mapped FMR */
+} kib_fmr_pool_t;
+
+typedef struct {
+ struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
+ kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+} kib_fmr_t;
+
+typedef struct kib_net
+{
+ struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */
+ __u64 ibn_incarnation; /* my epoch */
+ int ibn_init; /* initialisation state */
+ int ibn_shutdown; /* shutting down? */
+
+ atomic_t ibn_npeers; /* # peers extant */
+ atomic_t ibn_nconns; /* # connections extant */
+
+ kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
+ kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
+ kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */
+
+ kib_dev_t *ibn_dev; /* underlying IB device */
+} kib_net_t;
+
+#define KIB_THREAD_SHIFT 16
+#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
+#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
+#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
+
+struct kib_sched_info {
+ /* serialise */
+ spinlock_t ibs_lock;
+ /* schedulers sleep here */
+ wait_queue_head_t ibs_waitq;
+ /* conns to check for rx completions */
+ struct list_head ibs_conns;
+ /* number of scheduler threads */
+ int ibs_nthreads;
+ /* max allowed scheduler threads */
+ int ibs_nthreads_max;
+ int ibs_cpt; /* CPT id */
+};
+
+typedef struct
+{
+ int kib_init; /* initialisation state */
+ int kib_shutdown; /* shut down? */
+ struct list_head kib_devs; /* IB devices extant */
+ /* list head of failed devices */
+ struct list_head kib_failed_devs;
+ /* schedulers sleep here */
+ wait_queue_head_t kib_failover_waitq;
+ atomic_t kib_nthreads; /* # live threads */
+ /* stabilize net/dev/peer/conn ops */
+ rwlock_t kib_global_lock;
+ /* hash table of all my known peers */
+ struct list_head *kib_peers;
+ /* size of kib_peers */
+ int kib_peer_hash_size;
+ /* the connd task (serialisation assertions) */
+ void *kib_connd;
+ /* connections to setup/teardown */
+ struct list_head kib_connd_conns;
+ /* connections with zero refcount */
+ struct list_head kib_connd_zombies;
+ /* connection daemon sleeps here */
+ wait_queue_head_t kib_connd_waitq;
+ spinlock_t kib_connd_lock; /* serialise */
+ struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
+ /* percpt data for schedulers */
+ struct kib_sched_info **kib_scheds;
+} kib_data_t;
+
+#define IBLND_INIT_NOTHING 0
+#define IBLND_INIT_DATA 1
+#define IBLND_INIT_ALL 2
+
+/************************************************************************
+ * IB Wire message format.
+ * These are sent in sender's byte order (i.e. receiver flips).
+ */
+
+typedef struct kib_connparams
+{
+ __u16 ibcp_queue_depth;
+ __u16 ibcp_max_frags;
+ __u32 ibcp_max_msg_size;
+} WIRE_ATTR kib_connparams_t;
+
+typedef struct
+{
+ lnet_hdr_t ibim_hdr; /* portals header */
+ char ibim_payload[0]; /* piggy-backed payload */
+} WIRE_ATTR kib_immediate_msg_t;
+
+typedef struct
+{
+ __u32 rf_nob; /* # bytes this frag */
+ __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
+} WIRE_ATTR kib_rdma_frag_t;
+
+typedef struct
+{
+ __u32 rd_key; /* local/remote key */
+ __u32 rd_nfrags; /* # fragments */
+ kib_rdma_frag_t rd_frags[0]; /* buffer frags */
+} WIRE_ATTR kib_rdma_desc_t;
+
+typedef struct
+{
+ lnet_hdr_t ibprm_hdr; /* portals header */
+ __u64 ibprm_cookie; /* opaque completion cookie */
+} WIRE_ATTR kib_putreq_msg_t;
+
+typedef struct
+{
+ __u64 ibpam_src_cookie; /* reflected completion cookie */
+ __u64 ibpam_dst_cookie; /* opaque completion cookie */
+ kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
+} WIRE_ATTR kib_putack_msg_t;
+
+typedef struct
+{
+ lnet_hdr_t ibgm_hdr; /* portals header */
+ __u64 ibgm_cookie; /* opaque completion cookie */
+ kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
+} WIRE_ATTR kib_get_msg_t;
+
+typedef struct
+{
+ __u64 ibcm_cookie; /* opaque completion cookie */
+ __s32 ibcm_status; /* < 0 failure: >= 0 length */
+} WIRE_ATTR kib_completion_msg_t;
+
+typedef struct
+{
+ /* First 2 fields fixed FOR ALL TIME */
+ __u32 ibm_magic; /* I'm an ibnal message */
+ __u16 ibm_version; /* this is my version number */
+
+ __u8 ibm_type; /* msg type */
+ __u8 ibm_credits; /* returned credits */
+ __u32 ibm_nob; /* # bytes in whole message */
+ __u32 ibm_cksum; /* checksum (0 == no checksum) */
+ __u64 ibm_srcnid; /* sender's NID */
+ __u64 ibm_srcstamp; /* sender's incarnation */
+ __u64 ibm_dstnid; /* destination's NID */
+ __u64 ibm_dststamp; /* destination's incarnation */
+
+ union {
+ kib_connparams_t connparams;
+ kib_immediate_msg_t immediate;
+ kib_putreq_msg_t putreq;
+ kib_putack_msg_t putack;
+ kib_get_msg_t get;
+ kib_completion_msg_t completion;
+ } WIRE_ATTR ibm_u;
+} WIRE_ATTR kib_msg_t;
+
+#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
+
+#define IBLND_MSG_VERSION_1 0x11
+#define IBLND_MSG_VERSION_2 0x12
+#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
+
+#define IBLND_MSG_CONNREQ 0xc0 /* connection request */
+#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
+#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
+#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
+#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
+#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
+#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
+#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
+#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
+#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
+
+typedef struct {
+ __u32 ibr_magic; /* sender's magic */
+ __u16 ibr_version; /* sender's version */
+ __u8 ibr_why; /* reject reason */
+ __u8 ibr_padding; /* padding */
+ __u64 ibr_incarnation; /* incarnation of peer */
+ kib_connparams_t ibr_cp; /* connection parameters */
+} WIRE_ATTR kib_rej_t;
+
+/* connection rejection reasons */
+#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
+#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
+#define IBLND_REJECT_FATAL 3 /* Anything else */
+
+#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
+#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
+
+#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match mine */
+#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't match mine */
+
+/***********************************************************************/
+
+typedef struct kib_rx /* receive message */
+{
+ struct list_head rx_list; /* queue for attention */
+ struct kib_conn *rx_conn; /* owning conn */
+ int rx_nob; /* # bytes received (-1 while posted) */
+ enum ib_wc_status rx_status; /* completion status */
+ kib_msg_t *rx_msg; /* message buffer (host vaddr) */
+ __u64 rx_msgaddr; /* message buffer (I/O addr) */
+ DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
+ struct ib_recv_wr rx_wrq; /* receive work item... */
+ struct ib_sge rx_sge; /* ...and its memory */
+} kib_rx_t;
+
+#define IBLND_POSTRX_DONT_POST 0 /* don't post */
+#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
+#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
+#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */
+
+typedef struct kib_tx /* transmit message */
+{
+ struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
+ kib_tx_pool_t *tx_pool; /* pool I'm from */
+ struct kib_conn *tx_conn; /* owning conn */
+ short tx_sending; /* # tx callbacks outstanding */
+ short tx_queued; /* queued for sending */
+ short tx_waiting; /* waiting for peer */
+ int tx_status; /* LNET completion status */
+ unsigned long tx_deadline; /* completion deadline */
+ __u64 tx_cookie; /* completion cookie */
+ lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
+ kib_msg_t *tx_msg; /* message buffer (host vaddr) */
+ __u64 tx_msgaddr; /* message buffer (I/O addr) */
+ DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
+ int tx_nwrq; /* # send work items */
+ struct ib_send_wr *tx_wrq; /* send work items... */
+ struct ib_sge *tx_sge; /* ...and their memory */
+ kib_rdma_desc_t *tx_rd; /* rdma descriptor */
+ int tx_nfrags; /* # entries in... */
+ struct scatterlist *tx_frags; /* dma_map_sg descriptor */
+ __u64 *tx_pages; /* rdma phys page addrs */
+ union {
+ kib_phys_mr_t *pmr; /* MR for physical buffer */
+ kib_fmr_t fmr; /* FMR */
+ } tx_u;
+ int tx_dmadir; /* dma direction */
+} kib_tx_t;
+
+typedef struct kib_connvars
+{
+ /* connection-in-progress variables */
+ kib_msg_t cv_msg;
+} kib_connvars_t;
+
+typedef struct kib_conn
+{
+ struct kib_sched_info *ibc_sched; /* scheduler information */
+ struct kib_peer *ibc_peer; /* owning peer */
+ kib_hca_dev_t *ibc_hdev; /* HCA bound on */
+ struct list_head ibc_list; /* stash on peer's conn list */
+ struct list_head ibc_sched_list; /* schedule for attention */
+ __u16 ibc_version; /* version of connection */
+ __u64 ibc_incarnation; /* which instance of the peer */
+ atomic_t ibc_refcount; /* # users */
+ int ibc_state; /* what's happening */
+ int ibc_nsends_posted; /* # uncompleted sends */
+ int ibc_noops_posted; /* # uncompleted NOOPs */
+ int ibc_credits; /* # credits I have */
+ int ibc_outstanding_credits; /* # credits to return */
+ int ibc_reserved_credits;/* # ACK/DONE msg credits */
+ int ibc_comms_error; /* set on comms error */
+ unsigned int ibc_nrx:16; /* receive buffers owned */
+ unsigned int ibc_scheduled:1; /* scheduled for attention */
+ unsigned int ibc_ready:1; /* CQ callback fired */
+ /* time of last send */
+ unsigned long ibc_last_send;
+ /** link chain for kiblnd_check_conns only */
+ struct list_head ibc_connd_list;
+ /** rxs completed before ESTABLISHED */
+ struct list_head ibc_early_rxs;
+ /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
+ struct list_head ibc_tx_noops;
+ struct list_head ibc_tx_queue; /* sends that need a credit */
+ struct list_head ibc_tx_queue_nocred;/* sends that don't need a credit */
+ struct list_head ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
+ struct list_head ibc_active_txs; /* active tx awaiting completion */
+ spinlock_t ibc_lock; /* serialise */
+ kib_rx_t *ibc_rxs; /* the rx descs */
+ kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
+
+ struct rdma_cm_id *ibc_cmid; /* CM id */
+ struct ib_cq *ibc_cq; /* completion queue */
+
+ kib_connvars_t *ibc_connvars; /* in-progress connection state */
+} kib_conn_t;
+
+#define IBLND_CONN_INIT 0 /* being initialised */
+#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
+#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
+#define IBLND_CONN_ESTABLISHED 3 /* connection established */
+#define IBLND_CONN_CLOSING 4 /* being closed */
+#define IBLND_CONN_DISCONNECTED 5 /* disconnected */
+
+typedef struct kib_peer
+{
+ struct list_head ibp_list; /* stash on global peer list */
+ lnet_nid_t ibp_nid; /* who's on the other end(s) */
+ lnet_ni_t *ibp_ni; /* LNet interface */
+ atomic_t ibp_refcount; /* # users */
+ struct list_head ibp_conns; /* all active connections */
+ struct list_head ibp_tx_queue; /* msgs waiting for a conn */
+ __u16 ibp_version; /* version of peer */
+ __u64 ibp_incarnation; /* incarnation of peer */
+ int ibp_connecting; /* current active connection attempts */
+ int ibp_accepting; /* current passive connection attempts */
+ int ibp_error; /* errno on closing this peer */
+ cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
+} kib_peer_t;
+
+extern kib_data_t kiblnd_data;
+
+extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+
+static inline void
+kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
+{
+ LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+ atomic_inc(&hdev->ibh_ref);
+}
+
+static inline void
+kiblnd_hdev_decref(kib_hca_dev_t *hdev)
+{
+ LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+ if (atomic_dec_and_test(&hdev->ibh_ref))
+ kiblnd_hdev_destroy(hdev);
+}
+
+static inline int
+kiblnd_dev_can_failover(kib_dev_t *dev)
+{
+ if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
+ return 0;
+
+ if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
+ return 0;
+
+ if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
+ return 1;
+
+ return dev->ibd_can_failover;
+}
+
+#define kiblnd_conn_addref(conn) \
+do { \
+ CDEBUG(D_NET, "conn[%p] (%d)++\n", \
+ (conn), atomic_read(&(conn)->ibc_refcount)); \
+ atomic_inc(&(conn)->ibc_refcount); \
+} while (0)
+
+#define kiblnd_conn_decref(conn) \
+do { \
+ unsigned long flags; \
+ \
+ CDEBUG(D_NET, "conn[%p] (%d)--\n", \
+ (conn), atomic_read(&(conn)->ibc_refcount)); \
+ LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
+ if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
+ list_add_tail(&(conn)->ibc_list, \
+ &kiblnd_data.kib_connd_zombies); \
+ wake_up(&kiblnd_data.kib_connd_waitq); \
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
+ } \
+} while (0)
+
+#define kiblnd_peer_addref(peer) \
+do { \
+ CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
+ (peer), libcfs_nid2str((peer)->ibp_nid), \
+ atomic_read (&(peer)->ibp_refcount)); \
+ atomic_inc(&(peer)->ibp_refcount); \
+} while (0)
+
+#define kiblnd_peer_decref(peer) \
+do { \
+ CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
+ (peer), libcfs_nid2str((peer)->ibp_nid), \
+ atomic_read (&(peer)->ibp_refcount)); \
+ LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
+ if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
+ kiblnd_destroy_peer(peer); \
+} while (0)
+
+static inline struct list_head *
+kiblnd_nid2peerlist (lnet_nid_t nid)
+{
+ unsigned int hash =
+ ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
+
+ return (&kiblnd_data.kib_peers [hash]);
+}
+
+static inline int
+kiblnd_peer_active (kib_peer_t *peer)
+{
+ /* Am I in the peer hash table? */
+ return (!list_empty(&peer->ibp_list));
+}
+
+static inline kib_conn_t *
+kiblnd_get_conn_locked (kib_peer_t *peer)
+{
+ LASSERT (!list_empty(&peer->ibp_conns));
+
+ /* just return the first connection */
+ return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+}
+
+static inline int
+kiblnd_send_keepalive(kib_conn_t *conn)
+{
+ return (*kiblnd_tunables.kib_keepalive > 0) &&
+ cfs_time_after(jiffies, conn->ibc_last_send +
+ *kiblnd_tunables.kib_keepalive*HZ);
+}
+
+static inline int
+kiblnd_need_noop(kib_conn_t *conn)
+{
+ LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+
+ if (conn->ibc_outstanding_credits <
+ IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
+ !kiblnd_send_keepalive(conn))
+ return 0; /* No need to send NOOP */
+
+ if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
+ if (!list_empty(&conn->ibc_tx_queue_nocred))
+ return 0; /* NOOP can be piggybacked */
+
+ /* No tx to piggyback NOOP onto or no credit to send a tx */
+ return (list_empty(&conn->ibc_tx_queue) ||
+ conn->ibc_credits == 0);
+ }
+
+ if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
+ !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
+ conn->ibc_credits == 0) /* no credit */
+ return 0;
+
+ if (conn->ibc_credits == 1 && /* last credit reserved for */
+ conn->ibc_outstanding_credits == 0) /* giving back credits */
+ return 0;
+
+ /* No tx to piggyback NOOP onto or no credit to send a tx */
+ return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
+}
+
+static inline void
+kiblnd_abort_receives(kib_conn_t *conn)
+{
+ ib_modify_qp(conn->ibc_cmid->qp,
+ &kiblnd_data.kib_error_qpa, IB_QP_STATE);
+}
+
+static inline const char *
+kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
+{
+ if (q == &conn->ibc_tx_queue)
+ return "tx_queue";
+
+ if (q == &conn->ibc_tx_queue_rsrvd)
+ return "tx_queue_rsrvd";
+
+ if (q == &conn->ibc_tx_queue_nocred)
+ return "tx_queue_nocred";
+
+ if (q == &conn->ibc_active_txs)
+ return "active_txs";
+
+ LBUG();
+ return NULL;
+}
+
+/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
+ * lowest bits of the work request id to stash the work item type. */
+
+#define IBLND_WID_TX 0
+#define IBLND_WID_RDMA 1
+#define IBLND_WID_RX 2
+#define IBLND_WID_MASK 3UL
+
+static inline __u64
+kiblnd_ptr2wreqid (void *ptr, int type)
+{
+ unsigned long lptr = (unsigned long)ptr;
+
+ LASSERT ((lptr & IBLND_WID_MASK) == 0);
+ LASSERT ((type & ~IBLND_WID_MASK) == 0);
+ return (__u64)(lptr | type);
+}
+
+static inline void *
+kiblnd_wreqid2ptr (__u64 wreqid)
+{
+ return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
+}
+
+static inline int
+kiblnd_wreqid2type (__u64 wreqid)
+{
+ return (wreqid & IBLND_WID_MASK);
+}
+
+static inline void
+kiblnd_set_conn_state (kib_conn_t *conn, int state)
+{
+ conn->ibc_state = state;
+ mb();
+}
+
+static inline void
+kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
+{
+ msg->ibm_type = type;
+ msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+}
+
+static inline int
+kiblnd_rd_size (kib_rdma_desc_t *rd)
+{
+ int i;
+ int size;
+
+ for (i = size = 0; i < rd->rd_nfrags; i++)
+ size += rd->rd_frags[i].rf_nob;
+
+ return size;
+}
+
+static inline __u64
+kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
+{
+ return rd->rd_frags[index].rf_addr;
+}
+
+static inline __u32
+kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
+{
+ return rd->rd_frags[index].rf_nob;
+}
+
+static inline __u32
+kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
+{
+ return rd->rd_key;
+}
+
+static inline int
+kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
+{
+ if (nob < rd->rd_frags[index].rf_nob) {
+ rd->rd_frags[index].rf_addr += nob;
+ rd->rd_frags[index].rf_nob -= nob;
+ } else {
+ index ++;
+ }
+
+ return index;
+}
+
+static inline int
+kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
+{
+ LASSERT (msgtype == IBLND_MSG_GET_REQ ||
+ msgtype == IBLND_MSG_PUT_ACK);
+
+ return msgtype == IBLND_MSG_GET_REQ ?
+ offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
+ offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
+}
+
+
+static inline __u64
+kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return ib_dma_mapping_error(dev, dma_addr);
+}
+
+static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
+ void *msg, size_t size,
+ enum dma_data_direction direction)
+{
+ return ib_dma_map_single(dev, msg, size, direction);
+}
+
+static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
+ __u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ ib_dma_unmap_single(dev, addr, size, direction);
+}
+
+#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
+#define KIBLND_UNMAP_ADDR(p, m, a) (a)
+
+static inline int kiblnd_dma_map_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ return ib_dma_map_sg(dev, sg, nents, direction);
+}
+
+static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ ib_dma_unmap_sg(dev, sg, nents, direction);
+}
+
+static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
+ struct scatterlist *sg)
+{
+ return ib_sg_dma_address(dev, sg);
+}
+
+static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
+ struct scatterlist *sg)
+{
+ return ib_sg_dma_len(dev, sg);
+}
+
+/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly
+ * right because OFED1.2 defines it as const, to use it we have to add
+ * (void *) cast to overcome "const" */
+
+#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
+#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
+
+
+struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
+ kib_rdma_desc_t *rd);
+struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
+ __u64 addr, __u64 size);
+void kiblnd_map_rx_descs(kib_conn_t *conn);
+void kiblnd_unmap_rx_descs(kib_conn_t *conn);
+int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
+ kib_rdma_desc_t *rd, int nfrags);
+void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
+void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
+struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
+
+int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
+ int npages, __u64 iov, kib_fmr_t *fmr);
+void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
+
+int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
+ kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
+void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
+
+int kiblnd_startup (lnet_ni_t *ni);
+void kiblnd_shutdown (lnet_ni_t *ni);
+int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
+void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
+
+int kiblnd_tunables_init(void);
+void kiblnd_tunables_fini(void);
+
+int kiblnd_connd (void *arg);
+int kiblnd_scheduler(void *arg);
+int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
+int kiblnd_failover_thread (void *arg);
+
+int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
+void kiblnd_free_pages (kib_pages_t *p);
+
+int kiblnd_cm_callback(struct rdma_cm_id *cmid,
+ struct rdma_cm_event *event);
+int kiblnd_translate_mtu(int value);
+
+int kiblnd_dev_failover(kib_dev_t *dev);
+int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
+void kiblnd_destroy_peer (kib_peer_t *peer);
+void kiblnd_destroy_dev (kib_dev_t *dev);
+void kiblnd_unlink_peer_locked (kib_peer_t *peer);
+void kiblnd_peer_alive (kib_peer_t *peer);
+kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
+void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
+int kiblnd_close_stale_conns_locked (kib_peer_t *peer,
+ int version, __u64 incarnation);
+int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
+
+void kiblnd_connreq_done(kib_conn_t *conn, int status);
+kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
+ int state, int version);
+void kiblnd_destroy_conn (kib_conn_t *conn);
+void kiblnd_close_conn (kib_conn_t *conn, int error);
+void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
+
+int kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
+ int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
+
+void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
+void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
+void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
+ int status);
+void kiblnd_check_sends (kib_conn_t *conn);
+
+void kiblnd_qp_event(struct ib_event *event, void *arg);
+void kiblnd_cq_event(struct ib_event *event, void *arg);
+void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
+
+void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
+ int credits, lnet_nid_t dstnid, __u64 dststamp);
+int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
+int kiblnd_post_rx (kib_rx_t *rx, int credit);
+
+int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
+int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
+ unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
new file mode 100644
index 0000000..26b49a2
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -0,0 +1,3529 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/klnds/o2iblnd/o2iblnd_cb.c
+ *
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ */
+
+#include "o2iblnd.h"
+
+void
+kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
+{
+ lnet_msg_t *lntmsg[2];
+ kib_net_t *net = ni->ni_data;
+ int rc;
+ int i;
+
+ LASSERT (net != NULL);
+ LASSERT (!in_interrupt());
+ LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
+ LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
+ LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
+ LASSERT (tx->tx_pool != NULL);
+
+ kiblnd_unmap_tx(ni, tx);
+
+ /* tx may have up to 2 lnet msgs to finalise */
+ lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
+ lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
+ rc = tx->tx_status;
+
+ if (tx->tx_conn != NULL) {
+ LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
+
+ kiblnd_conn_decref(tx->tx_conn);
+ tx->tx_conn = NULL;
+ }
+
+ tx->tx_nwrq = 0;
+ tx->tx_status = 0;
+
+ kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
+
+ /* delay finalize until my descs have been freed */
+ for (i = 0; i < 2; i++) {
+ if (lntmsg[i] == NULL)
+ continue;
+
+ lnet_finalize(ni, lntmsg[i], rc);
+ }
+}
+
+void
+kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status)
+{
+ kib_tx_t *tx;
+
+ while (!list_empty (txlist)) {
+ tx = list_entry (txlist->next, kib_tx_t, tx_list);
+
+ list_del(&tx->tx_list);
+ /* complete now */
+ tx->tx_waiting = 0;
+ tx->tx_status = status;
+ kiblnd_tx_done(ni, tx);
+ }
+}
+
+kib_tx_t *
+kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
+{
+ kib_net_t *net = (kib_net_t *)ni->ni_data;
+ struct list_head *node;
+ kib_tx_t *tx;
+ kib_tx_poolset_t *tps;
+
+ tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
+ node = kiblnd_pool_alloc_node(&tps->tps_poolset);
+ if (node == NULL)
+ return NULL;
+ tx = container_of(node, kib_tx_t, tx_list);
+
+ LASSERT (tx->tx_nwrq == 0);
+ LASSERT (!tx->tx_queued);
+ LASSERT (tx->tx_sending == 0);
+ LASSERT (!tx->tx_waiting);
+ LASSERT (tx->tx_status == 0);
+ LASSERT (tx->tx_conn == NULL);
+ LASSERT (tx->tx_lntmsg[0] == NULL);
+ LASSERT (tx->tx_lntmsg[1] == NULL);
+ LASSERT (tx->tx_u.pmr == NULL);
+ LASSERT (tx->tx_nfrags == 0);
+
+ return tx;
+}
+
+void
+kiblnd_drop_rx(kib_rx_t *rx)
+{
+ kib_conn_t *conn = rx->rx_conn;
+ struct kib_sched_info *sched = conn->ibc_sched;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ LASSERT(conn->ibc_nrx > 0);
+ conn->ibc_nrx--;
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ kiblnd_conn_decref(conn);
+}
+
+int
+kiblnd_post_rx (kib_rx_t *rx, int credit)
+{
+ kib_conn_t *conn = rx->rx_conn;
+ kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
+ struct ib_recv_wr *bad_wrq = NULL;
+ struct ib_mr *mr;
+ int rc;
+
+ LASSERT (net != NULL);
+ LASSERT (!in_interrupt());
+ LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
+ credit == IBLND_POSTRX_PEER_CREDIT ||
+ credit == IBLND_POSTRX_RSRVD_CREDIT);
+
+ mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
+ LASSERT (mr != NULL);
+
+ rx->rx_sge.lkey = mr->lkey;
+ rx->rx_sge.addr = rx->rx_msgaddr;
+ rx->rx_sge.length = IBLND_MSG_SIZE;
+
+ rx->rx_wrq.next = NULL;
+ rx->rx_wrq.sg_list = &rx->rx_sge;
+ rx->rx_wrq.num_sge = 1;
+ rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
+
+ LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
+ LASSERT (rx->rx_nob >= 0); /* not posted */
+
+ if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
+ kiblnd_drop_rx(rx); /* No more posts for this rx */
+ return 0;
+ }
+
+ rx->rx_nob = -1; /* flag posted */
+
+ rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
+ if (rc != 0) {
+ CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
+ rx->rx_nob = 0;
+ }
+
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
+ return rc;
+
+ if (rc != 0) {
+ kiblnd_close_conn(conn, rc);
+ kiblnd_drop_rx(rx); /* No more posts for this rx */
+ return rc;
+ }
+
+ if (credit == IBLND_POSTRX_NO_CREDIT)
+ return 0;
+
+ spin_lock(&conn->ibc_lock);
+ if (credit == IBLND_POSTRX_PEER_CREDIT)
+ conn->ibc_outstanding_credits++;
+ else
+ conn->ibc_reserved_credits++;
+ spin_unlock(&conn->ibc_lock);
+
+ kiblnd_check_sends(conn);
+ return 0;
+}
+
+kib_tx_t *
+kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
+{
+ struct list_head *tmp;
+
+ list_for_each(tmp, &conn->ibc_active_txs) {
+ kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
+
+ LASSERT (!tx->tx_queued);
+ LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
+
+ if (tx->tx_cookie != cookie)
+ continue;
+
+ if (tx->tx_waiting &&
+ tx->tx_msg->ibm_type == txtype)
+ return tx;
+
+ CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
+ tx->tx_waiting ? "" : "NOT ",
+ tx->tx_msg->ibm_type, txtype);
+ }
+ return NULL;
+}
+
+void
+kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
+{
+ kib_tx_t *tx;
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ int idle;
+
+ spin_lock(&conn->ibc_lock);
+
+ tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
+ if (tx == NULL) {
+ spin_unlock(&conn->ibc_lock);
+
+ CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
+ txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ kiblnd_close_conn(conn, -EPROTO);
+ return;
+ }
+
+ if (tx->tx_status == 0) { /* success so far */
+ if (status < 0) { /* failed? */
+ tx->tx_status = status;
+ } else if (txtype == IBLND_MSG_GET_REQ) {
+ lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
+ }
+ }
+
+ tx->tx_waiting = 0;
+
+ idle = !tx->tx_queued && (tx->tx_sending == 0);
+ if (idle)
+ list_del(&tx->tx_list);
+
+ spin_unlock(&conn->ibc_lock);
+
+ if (idle)
+ kiblnd_tx_done(ni, tx);
+}
+
+void
+kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
+{
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+
+ if (tx == NULL) {
+ CERROR("Can't get tx for completion %x for %s\n",
+ type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ return;
+ }
+
+ tx->tx_msg->ibm_u.completion.ibcm_status = status;
+ tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
+ kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
+
+ kiblnd_queue_tx(tx, conn);
+}
+
+void
+kiblnd_handle_rx (kib_rx_t *rx)
+{
+ kib_msg_t *msg = rx->rx_msg;
+ kib_conn_t *conn = rx->rx_conn;
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ int credits = msg->ibm_credits;
+ kib_tx_t *tx;
+ int rc = 0;
+ int rc2;
+ int post_credit;
+
+ LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+
+ CDEBUG (D_NET, "Received %x[%d] from %s\n",
+ msg->ibm_type, credits,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+
+ if (credits != 0) {
+ /* Have I received credits that will let me send? */
+ spin_lock(&conn->ibc_lock);
+
+ if (conn->ibc_credits + credits >
+ IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
+ rc2 = conn->ibc_credits;
+ spin_unlock(&conn->ibc_lock);
+
+ CERROR("Bad credits from %s: %d + %d > %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ rc2, credits,
+ IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+
+ kiblnd_close_conn(conn, -EPROTO);
+ kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
+ return;
+ }
+
+ conn->ibc_credits += credits;
+
+ /* This ensures the credit taken by NOOP can be returned */
+ if (msg->ibm_type == IBLND_MSG_NOOP &&
+ !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
+ conn->ibc_outstanding_credits++;
+
+ spin_unlock(&conn->ibc_lock);
+ kiblnd_check_sends(conn);
+ }
+
+ switch (msg->ibm_type) {
+ default:
+ CERROR("Bad IBLND message type %x from %s\n",
+ msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ post_credit = IBLND_POSTRX_NO_CREDIT;
+ rc = -EPROTO;
+ break;
+
+ case IBLND_MSG_NOOP:
+ if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
+ post_credit = IBLND_POSTRX_NO_CREDIT;
+ break;
+ }
+
+ if (credits != 0) /* credit already posted */
+ post_credit = IBLND_POSTRX_NO_CREDIT;
+ else /* a keepalive NOOP */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
+
+ case IBLND_MSG_IMMEDIATE:
+ post_credit = IBLND_POSTRX_DONT_POST;
+ rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
+ msg->ibm_srcnid, rx, 0);
+ if (rc < 0) /* repost on error */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
+
+ case IBLND_MSG_PUT_REQ:
+ post_credit = IBLND_POSTRX_DONT_POST;
+ rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
+ msg->ibm_srcnid, rx, 1);
+ if (rc < 0) /* repost on error */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
+
+ case IBLND_MSG_PUT_NAK:
+ CWARN ("PUT_NACK from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ post_credit = IBLND_POSTRX_RSRVD_CREDIT;
+ kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
+ msg->ibm_u.completion.ibcm_status,
+ msg->ibm_u.completion.ibcm_cookie);
+ break;
+
+ case IBLND_MSG_PUT_ACK:
+ post_credit = IBLND_POSTRX_RSRVD_CREDIT;
+
+ spin_lock(&conn->ibc_lock);
+ tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
+ msg->ibm_u.putack.ibpam_src_cookie);
+ if (tx != NULL)
+ list_del(&tx->tx_list);
+ spin_unlock(&conn->ibc_lock);
+
+ if (tx == NULL) {
+ CERROR("Unmatched PUT_ACK from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ rc = -EPROTO;
+ break;
+ }
+
+ LASSERT (tx->tx_waiting);
+ /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
+ * (a) I can overwrite tx_msg since my peer has received it!
+ * (b) tx_waiting set tells tx_complete() it's not done. */
+
+ tx->tx_nwrq = 0; /* overwrite PUT_REQ */
+
+ rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
+ kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
+ &msg->ibm_u.putack.ibpam_rd,
+ msg->ibm_u.putack.ibpam_dst_cookie);
+ if (rc2 < 0)
+ CERROR("Can't setup rdma for PUT to %s: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
+
+ spin_lock(&conn->ibc_lock);
+ tx->tx_waiting = 0; /* clear waiting and queue atomically */
+ kiblnd_queue_tx_locked(tx, conn);
+ spin_unlock(&conn->ibc_lock);
+ break;
+
+ case IBLND_MSG_PUT_DONE:
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
+ msg->ibm_u.completion.ibcm_status,
+ msg->ibm_u.completion.ibcm_cookie);
+ break;
+
+ case IBLND_MSG_GET_REQ:
+ post_credit = IBLND_POSTRX_DONT_POST;
+ rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
+ msg->ibm_srcnid, rx, 1);
+ if (rc < 0) /* repost on error */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
+
+ case IBLND_MSG_GET_DONE:
+ post_credit = IBLND_POSTRX_RSRVD_CREDIT;
+ kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
+ msg->ibm_u.completion.ibcm_status,
+ msg->ibm_u.completion.ibcm_cookie);
+ break;
+ }
+
+ if (rc < 0) /* protocol error */
+ kiblnd_close_conn(conn, rc);
+
+ if (post_credit != IBLND_POSTRX_DONT_POST)
+ kiblnd_post_rx(rx, post_credit);
+}
+
+void
+kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
+{
+ kib_msg_t *msg = rx->rx_msg;
+ kib_conn_t *conn = rx->rx_conn;
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ kib_net_t *net = ni->ni_data;
+ int rc;
+ int err = -EIO;
+
+ LASSERT (net != NULL);
+ LASSERT (rx->rx_nob < 0); /* was posted */
+ rx->rx_nob = 0; /* isn't now */
+
+ if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
+ goto ignore;
+
+ if (status != IB_WC_SUCCESS) {
+ CNETERR("Rx from %s failed: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
+ goto failed;
+ }
+
+ LASSERT (nob >= 0);
+ rx->rx_nob = nob;
+
+ rc = kiblnd_unpack_msg(msg, rx->rx_nob);
+ if (rc != 0) {
+ CERROR ("Error %d unpacking rx from %s\n",
+ rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ goto failed;
+ }
+
+ if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
+ msg->ibm_dstnid != ni->ni_nid ||
+ msg->ibm_srcstamp != conn->ibc_incarnation ||
+ msg->ibm_dststamp != net->ibn_incarnation) {
+ CERROR ("Stale rx from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ err = -ESTALE;
+ goto failed;
+ }
+
+ /* set time last known alive */
+ kiblnd_peer_alive(conn->ibc_peer);
+
+ /* racing with connection establishment/teardown! */
+
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ unsigned long flags;
+
+ write_lock_irqsave(g_lock, flags);
+ /* must check holding global lock to eliminate race */
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
+ write_unlock_irqrestore(g_lock, flags);
+ return;
+ }
+ write_unlock_irqrestore(g_lock, flags);
+ }
+ kiblnd_handle_rx(rx);
+ return;
+
+ failed:
+ CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
+ kiblnd_close_conn(conn, err);
+ ignore:
+ kiblnd_drop_rx(rx); /* Don't re-post rx. */
+}
+
+struct page *
+kiblnd_kvaddr_to_page (unsigned long vaddr)
+{
+ struct page *page;
+
+ if (vaddr >= VMALLOC_START &&
+ vaddr < VMALLOC_END) {
+ page = vmalloc_to_page ((void *)vaddr);
+ LASSERT (page != NULL);
+ return page;
+ }
+#ifdef CONFIG_HIGHMEM
+ if (vaddr >= PKMAP_BASE &&
+ vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
+ /* No highmem pages only used for bulk (kiov) I/O */
+ CERROR("find page for address in highmem\n");
+ LBUG();
+ }
+#endif
+ page = virt_to_page (vaddr);
+ LASSERT (page != NULL);
+ return page;
+}
+
+static int
+kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+{
+ kib_hca_dev_t *hdev;
+ __u64 *pages = tx->tx_pages;
+ kib_fmr_poolset_t *fps;
+ int npages;
+ int size;
+ int cpt;
+ int rc;
+ int i;
+
+ LASSERT(tx->tx_pool != NULL);
+ LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+
+ hdev = tx->tx_pool->tpo_hdev;
+
+ for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
+ for (size = 0; size < rd->rd_frags[i].rf_nob;
+ size += hdev->ibh_page_size) {
+ pages[npages ++] = (rd->rd_frags[i].rf_addr &
+ hdev->ibh_page_mask) + size;
+ }
+ }
+
+ cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
+
+ fps = net->ibn_fmr_ps[cpt];
+ rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr);
+ if (rc != 0) {
+ CERROR ("Can't map %d pages: %d\n", npages, rc);
+ return rc;
+ }
+
+ /* If rd is not tx_rd, it's going to get sent to a peer, who will need
+ * the rkey */
+ rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
+ tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
+ rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
+ rd->rd_frags[0].rf_nob = nob;
+ rd->rd_nfrags = 1;
+
+ return 0;
+}
+
+static int
+kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+{
+ kib_hca_dev_t *hdev;
+ kib_pmr_poolset_t *pps;
+ __u64 iova;
+ int cpt;
+ int rc;
+
+ LASSERT(tx->tx_pool != NULL);
+ LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+
+ hdev = tx->tx_pool->tpo_hdev;
+
+ iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
+
+ cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
+
+ pps = net->ibn_pmr_ps[cpt];
+ rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr);
+ if (rc != 0) {
+ CERROR("Failed to create MR by phybuf: %d\n", rc);
+ return rc;
+ }
+
+ /* If rd is not tx_rd, it's going to get sent to a peer, who will need
+ * the rkey */
+ rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey :
+ tx->tx_u.pmr->pmr_mr->lkey;
+ rd->rd_nfrags = 1;
+ rd->rd_frags[0].rf_addr = iova;
+ rd->rd_frags[0].rf_nob = nob;
+
+ return 0;
+}
+
+void
+kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
+{
+ kib_net_t *net = ni->ni_data;
+
+ LASSERT(net != NULL);
+
+ if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) {
+ kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status);
+ tx->tx_u.fmr.fmr_pfmr = NULL;
+
+ } else if (net->ibn_pmr_ps != NULL && tx->tx_u.pmr != NULL) {
+ kiblnd_pmr_pool_unmap(tx->tx_u.pmr);
+ tx->tx_u.pmr = NULL;
+ }
+
+ if (tx->tx_nfrags != 0) {
+ kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
+ tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+ tx->tx_nfrags = 0;
+ }
+}
+
+int
+kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
+ kib_rdma_desc_t *rd, int nfrags)
+{
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ kib_net_t *net = ni->ni_data;
+ struct ib_mr *mr = NULL;
+ __u32 nob;
+ int i;
+
+ /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
+ * RDMA sink */
+ tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ tx->tx_nfrags = nfrags;
+
+ rd->rd_nfrags =
+ kiblnd_dma_map_sg(hdev->ibh_ibdev,
+ tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+
+ for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
+ rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
+ hdev->ibh_ibdev, &tx->tx_frags[i]);
+ rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
+ hdev->ibh_ibdev, &tx->tx_frags[i]);
+ nob += rd->rd_frags[i].rf_nob;
+ }
+
+ /* looking for pre-mapping MR */
+ mr = kiblnd_find_rd_dma_mr(hdev, rd);
+ if (mr != NULL) {
+ /* found pre-mapping MR */
+ rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
+ return 0;
+ }
+
+ if (net->ibn_fmr_ps != NULL)
+ return kiblnd_fmr_map_tx(net, tx, rd, nob);
+ else if (net->ibn_pmr_ps != NULL)
+ return kiblnd_pmr_map_tx(net, tx, rd, nob);
+
+ return -EINVAL;
+}
+
+
+int
+kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+ unsigned int niov, struct iovec *iov, int offset, int nob)
+{
+ kib_net_t *net = ni->ni_data;
+ struct page *page;
+ struct scatterlist *sg;
+ unsigned long vaddr;
+ int fragnob;
+ int page_offset;
+
+ LASSERT (nob > 0);
+ LASSERT (niov > 0);
+ LASSERT (net != NULL);
+
+ while (offset >= iov->iov_len) {
+ offset -= iov->iov_len;
+ niov--;
+ iov++;
+ LASSERT (niov > 0);
+ }
+
+ sg = tx->tx_frags;
+ do {
+ LASSERT (niov > 0);
+
+ vaddr = ((unsigned long)iov->iov_base) + offset;
+ page_offset = vaddr & (PAGE_SIZE - 1);
+ page = kiblnd_kvaddr_to_page(vaddr);
+ if (page == NULL) {
+ CERROR ("Can't find page\n");
+ return -EFAULT;
+ }
+
+ fragnob = min((int)(iov->iov_len - offset), nob);
+ fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
+
+ sg_set_page(sg, page, fragnob, page_offset);
+ sg++;
+
+ if (offset + fragnob < iov->iov_len) {
+ offset += fragnob;
+ } else {
+ offset = 0;
+ iov++;
+ niov--;
+ }
+ nob -= fragnob;
+ } while (nob > 0);
+
+ return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
+}
+
+int
+kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+ int nkiov, lnet_kiov_t *kiov, int offset, int nob)
+{
+ kib_net_t *net = ni->ni_data;
+ struct scatterlist *sg;
+ int fragnob;
+
+ CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
+
+ LASSERT (nob > 0);
+ LASSERT (nkiov > 0);
+ LASSERT (net != NULL);
+
+ while (offset >= kiov->kiov_len) {
+ offset -= kiov->kiov_len;
+ nkiov--;
+ kiov++;
+ LASSERT (nkiov > 0);
+ }
+
+ sg = tx->tx_frags;
+ do {
+ LASSERT (nkiov > 0);
+
+ fragnob = min((int)(kiov->kiov_len - offset), nob);
+
+ sg_set_page(sg, kiov->kiov_page, fragnob,
+ kiov->kiov_offset + offset);
+ sg++;
+
+ offset = 0;
+ kiov++;
+ nkiov--;
+ nob -= fragnob;
+ } while (nob > 0);
+
+ return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
+}
+
+int
+kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
+{
+ kib_msg_t *msg = tx->tx_msg;
+ kib_peer_t *peer = conn->ibc_peer;
+ int ver = conn->ibc_version;
+ int rc;
+ int done;
+ struct ib_send_wr *bad_wrq;
+
+ LASSERT (tx->tx_queued);
+ /* We rely on this for QP sizing */
+ LASSERT (tx->tx_nwrq > 0);
+ LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
+
+ LASSERT (credit == 0 || credit == 1);
+ LASSERT (conn->ibc_outstanding_credits >= 0);
+ LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
+ LASSERT (conn->ibc_credits >= 0);
+ LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
+
+ if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
+ /* tx completions outstanding... */
+ CDEBUG(D_NET, "%s: posted enough\n",
+ libcfs_nid2str(peer->ibp_nid));
+ return -EAGAIN;
+ }
+
+ if (credit != 0 && conn->ibc_credits == 0) { /* no credits */
+ CDEBUG(D_NET, "%s: no credits\n",
+ libcfs_nid2str(peer->ibp_nid));
+ return -EAGAIN;
+ }
+
+ if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
+ conn->ibc_credits == 1 && /* last credit reserved */
+ msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */
+ CDEBUG(D_NET, "%s: not using last credit\n",
+ libcfs_nid2str(peer->ibp_nid));
+ return -EAGAIN;
+ }
+
+ /* NB don't drop ibc_lock before bumping tx_sending */
+ list_del(&tx->tx_list);
+ tx->tx_queued = 0;
+
+ if (msg->ibm_type == IBLND_MSG_NOOP &&
+ (!kiblnd_need_noop(conn) || /* redundant NOOP */
+ (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
+ conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
+ /* OK to drop when posted enough NOOPs, since
+ * kiblnd_check_sends will queue NOOP again when
+ * posted NOOPs complete */
+ spin_unlock(&conn->ibc_lock);
+ kiblnd_tx_done(peer->ibp_ni, tx);
+ spin_lock(&conn->ibc_lock);
+ CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
+ libcfs_nid2str(peer->ibp_nid),
+ conn->ibc_noops_posted);
+ return 0;
+ }
+
+ kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
+ peer->ibp_nid, conn->ibc_incarnation);
+
+ conn->ibc_credits -= credit;
+ conn->ibc_outstanding_credits = 0;
+ conn->ibc_nsends_posted++;
+ if (msg->ibm_type == IBLND_MSG_NOOP)
+ conn->ibc_noops_posted++;
+
+ /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
+ * PUT. If so, it was first queued here as a PUT_REQ, sent and
+ * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
+ * and then re-queued here. It's (just) possible that
+ * tx_sending is non-zero if we've not done the tx_complete()
+ * from the first send; hence the ++ rather than = below. */
+ tx->tx_sending++;
+ list_add(&tx->tx_list, &conn->ibc_active_txs);
+
+ /* I'm still holding ibc_lock! */
+ if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
+ rc = -ECONNABORTED;
+ } else if (tx->tx_pool->tpo_pool.po_failed ||
+ conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
+ /* close_conn will launch failover */
+ rc = -ENETDOWN;
+ } else {
+ rc = ib_post_send(conn->ibc_cmid->qp,
+ tx->tx_wrq, &bad_wrq);
+ }
+
+ conn->ibc_last_send = jiffies;
+
+ if (rc == 0)
+ return 0;
+
+ /* NB credits are transferred in the actual
+ * message, which can only be the last work item */
+ conn->ibc_credits += credit;
+ conn->ibc_outstanding_credits += msg->ibm_credits;
+ conn->ibc_nsends_posted--;
+ if (msg->ibm_type == IBLND_MSG_NOOP)
+ conn->ibc_noops_posted--;
+
+ tx->tx_status = rc;
+ tx->tx_waiting = 0;
+ tx->tx_sending--;
+
+ done = (tx->tx_sending == 0);
+ if (done)
+ list_del(&tx->tx_list);
+
+ spin_unlock(&conn->ibc_lock);
+
+ if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
+ CERROR("Error %d posting transmit to %s\n",
+ rc, libcfs_nid2str(peer->ibp_nid));
+ else
+ CDEBUG(D_NET, "Error %d posting transmit to %s\n",
+ rc, libcfs_nid2str(peer->ibp_nid));
+
+ kiblnd_close_conn(conn, rc);
+
+ if (done)
+ kiblnd_tx_done(peer->ibp_ni, tx);
+
+ spin_lock(&conn->ibc_lock);
+
+ return -EIO;
+}
+
+void
+kiblnd_check_sends (kib_conn_t *conn)
+{
+ int ver = conn->ibc_version;
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ kib_tx_t *tx;
+
+ /* Don't send anything until after the connection is established */
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ CDEBUG(D_NET, "%s too soon\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ return;
+ }
+
+ spin_lock(&conn->ibc_lock);
+
+ LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
+ LASSERT (!IBLND_OOB_CAPABLE(ver) ||
+ conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
+ LASSERT (conn->ibc_reserved_credits >= 0);
+
+ while (conn->ibc_reserved_credits > 0 &&
+ !list_empty(&conn->ibc_tx_queue_rsrvd)) {
+ tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
+ kib_tx_t, tx_list);
+ list_del(&tx->tx_list);
+ list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+ conn->ibc_reserved_credits--;
+ }
+
+ if (kiblnd_need_noop(conn)) {
+ spin_unlock(&conn->ibc_lock);
+
+ tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+ if (tx != NULL)
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
+
+ spin_lock(&conn->ibc_lock);
+ if (tx != NULL)
+ kiblnd_queue_tx_locked(tx, conn);
+ }
+
+ kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */
+
+ for (;;) {
+ int credit;
+
+ if (!list_empty(&conn->ibc_tx_queue_nocred)) {
+ credit = 0;
+ tx = list_entry(conn->ibc_tx_queue_nocred.next,
+ kib_tx_t, tx_list);
+ } else if (!list_empty(&conn->ibc_tx_noops)) {
+ LASSERT (!IBLND_OOB_CAPABLE(ver));
+ credit = 1;
+ tx = list_entry(conn->ibc_tx_noops.next,
+ kib_tx_t, tx_list);
+ } else if (!list_empty(&conn->ibc_tx_queue)) {
+ credit = 1;
+ tx = list_entry(conn->ibc_tx_queue.next,
+ kib_tx_t, tx_list);
+ } else
+ break;
+
+ if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
+ break;
+ }
+
+ spin_unlock(&conn->ibc_lock);
+
+ kiblnd_conn_decref(conn); /* ...until here */
+}
+
+void
+kiblnd_tx_complete (kib_tx_t *tx, int status)
+{
+ int failed = (status != IB_WC_SUCCESS);
+ kib_conn_t *conn = tx->tx_conn;
+ int idle;
+
+ LASSERT (tx->tx_sending > 0);
+
+ if (failed) {
+ if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
+ CNETERR("Tx -> %s cookie "LPX64
+ " sending %d waiting %d: failed %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
+ status);
+
+ kiblnd_close_conn(conn, -EIO);
+ } else {
+ kiblnd_peer_alive(conn->ibc_peer);
+ }
+
+ spin_lock(&conn->ibc_lock);
+
+ /* I could be racing with rdma completion. Whoever makes 'tx' idle
+ * gets to free it, which also drops its ref on 'conn'. */
+
+ tx->tx_sending--;
+ conn->ibc_nsends_posted--;
+ if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
+ conn->ibc_noops_posted--;
+
+ if (failed) {
+ tx->tx_waiting = 0; /* don't wait for peer */
+ tx->tx_status = -EIO;
+ }
+
+ idle = (tx->tx_sending == 0) && /* This is the final callback */
+ !tx->tx_waiting && /* Not waiting for peer */
+ !tx->tx_queued; /* Not re-queued (PUT_DONE) */
+ if (idle)
+ list_del(&tx->tx_list);
+
+ kiblnd_conn_addref(conn); /* 1 ref for me.... */
+
+ spin_unlock(&conn->ibc_lock);
+
+ if (idle)
+ kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
+
+ kiblnd_check_sends(conn);
+
+ kiblnd_conn_decref(conn); /* ...until here */
+}
+
+void
+kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
+{
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
+ struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
+ int nob = offsetof (kib_msg_t, ibm_u) + body_nob;
+ struct ib_mr *mr;
+
+ LASSERT (tx->tx_nwrq >= 0);
+ LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
+ LASSERT (nob <= IBLND_MSG_SIZE);
+
+ kiblnd_init_msg(tx->tx_msg, type, body_nob);
+
+ mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
+ LASSERT (mr != NULL);
+
+ sge->lkey = mr->lkey;
+ sge->addr = tx->tx_msgaddr;
+ sge->length = nob;
+
+ memset(wrq, 0, sizeof(*wrq));
+
+ wrq->next = NULL;
+ wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
+ wrq->sg_list = sge;
+ wrq->num_sge = 1;
+ wrq->opcode = IB_WR_SEND;
+ wrq->send_flags = IB_SEND_SIGNALED;
+
+ tx->tx_nwrq++;
+}
+
+int
+kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
+ int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+{
+ kib_msg_t *ibmsg = tx->tx_msg;
+ kib_rdma_desc_t *srcrd = tx->tx_rd;
+ struct ib_sge *sge = &tx->tx_sge[0];
+ struct ib_send_wr *wrq = &tx->tx_wrq[0];
+ int rc = resid;
+ int srcidx;
+ int dstidx;
+ int wrknob;
+
+ LASSERT (!in_interrupt());
+ LASSERT (tx->tx_nwrq == 0);
+ LASSERT (type == IBLND_MSG_GET_DONE ||
+ type == IBLND_MSG_PUT_DONE);
+
+ srcidx = dstidx = 0;
+
+ while (resid > 0) {
+ if (srcidx >= srcrd->rd_nfrags) {
+ CERROR("Src buffer exhausted: %d frags\n", srcidx);
+ rc = -EPROTO;
+ break;
+ }
+
+ if (dstidx == dstrd->rd_nfrags) {
+ CERROR("Dst buffer exhausted: %d frags\n", dstidx);
+ rc = -EPROTO;
+ break;
+ }
+
+ if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
+ CERROR("RDMA too fragmented for %s (%d): "
+ "%d/%d src %d/%d dst frags\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ IBLND_RDMA_FRAGS(conn->ibc_version),
+ srcidx, srcrd->rd_nfrags,
+ dstidx, dstrd->rd_nfrags);
+ rc = -EMSGSIZE;
+ break;
+ }
+
+ wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
+ kiblnd_rd_frag_size(dstrd, dstidx)), resid);
+
+ sge = &tx->tx_sge[tx->tx_nwrq];
+ sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
+ sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx);
+ sge->length = wrknob;
+
+ wrq = &tx->tx_wrq[tx->tx_nwrq];
+
+ wrq->next = wrq + 1;
+ wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
+ wrq->sg_list = sge;
+ wrq->num_sge = 1;
+ wrq->opcode = IB_WR_RDMA_WRITE;
+ wrq->send_flags = 0;
+
+ wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
+ wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx);
+
+ srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
+ dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
+
+ resid -= wrknob;
+
+ tx->tx_nwrq++;
+ wrq++;
+ sge++;
+ }
+
+ if (rc < 0) /* no RDMA if completing with failure */
+ tx->tx_nwrq = 0;
+
+ ibmsg->ibm_u.completion.ibcm_status = rc;
+ ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
+ kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
+ type, sizeof (kib_completion_msg_t));
+
+ return rc;
+}
+
+void
+kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
+{
+ struct list_head *q;
+
+ LASSERT (tx->tx_nwrq > 0); /* work items set up */
+ LASSERT (!tx->tx_queued); /* not queued for sending already */
+ LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+
+ tx->tx_queued = 1;
+ tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
+
+ if (tx->tx_conn == NULL) {
+ kiblnd_conn_addref(conn);
+ tx->tx_conn = conn;
+ LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
+ } else {
+ /* PUT_DONE first attached to conn as a PUT_REQ */
+ LASSERT (tx->tx_conn == conn);
+ LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
+ }
+
+ switch (tx->tx_msg->ibm_type) {
+ default:
+ LBUG();
+
+ case IBLND_MSG_PUT_REQ:
+ case IBLND_MSG_GET_REQ:
+ q = &conn->ibc_tx_queue_rsrvd;
+ break;
+
+ case IBLND_MSG_PUT_NAK:
+ case IBLND_MSG_PUT_ACK:
+ case IBLND_MSG_PUT_DONE:
+ case IBLND_MSG_GET_DONE:
+ q = &conn->ibc_tx_queue_nocred;
+ break;
+
+ case IBLND_MSG_NOOP:
+ if (IBLND_OOB_CAPABLE(conn->ibc_version))
+ q = &conn->ibc_tx_queue_nocred;
+ else
+ q = &conn->ibc_tx_noops;
+ break;
+
+ case IBLND_MSG_IMMEDIATE:
+ q = &conn->ibc_tx_queue;
+ break;
+ }
+
+ list_add_tail(&tx->tx_list, q);
+}
+
+void
+kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
+{
+ spin_lock(&conn->ibc_lock);
+ kiblnd_queue_tx_locked(tx, conn);
+ spin_unlock(&conn->ibc_lock);
+
+ kiblnd_check_sends(conn);
+}
+
+static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
+ struct sockaddr_in *srcaddr,
+ struct sockaddr_in *dstaddr,
+ int timeout_ms)
+{
+ unsigned short port;
+ int rc;
+
+ /* allow the port to be reused */
+ rc = rdma_set_reuseaddr(cmid, 1);
+ if (rc != 0) {
+ CERROR("Unable to set reuse on cmid: %d\n", rc);
+ return rc;
+ }
+
+ /* look for a free privileged port */
+ for (port = PROT_SOCK-1; port > 0; port--) {
+ srcaddr->sin_port = htons(port);
+ rc = rdma_resolve_addr(cmid,
+ (struct sockaddr *)srcaddr,
+ (struct sockaddr *)dstaddr,
+ timeout_ms);
+ if (rc == 0) {
+ CDEBUG(D_NET, "bound to port %hu\n", port);
+ return 0;
+ } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
+ CDEBUG(D_NET, "bind to port %hu failed: %d\n",
+ port, rc);
+ } else {
+ return rc;
+ }
+ }
+
+ CERROR("Failed to bind to a free privileged port\n");
+ return rc;
+}
+
+void
+kiblnd_connect_peer (kib_peer_t *peer)
+{
+ struct rdma_cm_id *cmid;
+ kib_dev_t *dev;
+ kib_net_t *net = peer->ibp_ni->ni_data;
+ struct sockaddr_in srcaddr;
+ struct sockaddr_in dstaddr;
+ int rc;
+
+ LASSERT (net != NULL);
+ LASSERT (peer->ibp_connecting > 0);
+
+ cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
+ IB_QPT_RC);
+
+ if (IS_ERR(cmid)) {
+ CERROR("Can't create CMID for %s: %ld\n",
+ libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
+ rc = PTR_ERR(cmid);
+ goto failed;
+ }
+
+ dev = net->ibn_dev;
+ memset(&srcaddr, 0, sizeof(srcaddr));
+ srcaddr.sin_family = AF_INET;
+ srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
+
+ memset(&dstaddr, 0, sizeof(dstaddr));
+ dstaddr.sin_family = AF_INET;
+ dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
+ dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid));
+
+ kiblnd_peer_addref(peer); /* cmid's ref */
+
+ if (*kiblnd_tunables.kib_use_priv_port) {
+ rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
+ *kiblnd_tunables.kib_timeout * 1000);
+ } else {
+ rc = rdma_resolve_addr(cmid,
+ (struct sockaddr *)&srcaddr,
+ (struct sockaddr *)&dstaddr,
+ *kiblnd_tunables.kib_timeout * 1000);
+ }
+ if (rc != 0) {
+ /* Can't initiate address resolution: */
+ CERROR("Can't resolve addr for %s: %d\n",
+ libcfs_nid2str(peer->ibp_nid), rc);
+ goto failed2;
+ }
+
+ LASSERT (cmid->device != NULL);
+ CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
+ libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
+ &dev->ibd_ifip, cmid->device->name);
+
+ return;
+
+ failed2:
+ kiblnd_peer_decref(peer); /* cmid's ref */
+ rdma_destroy_id(cmid);
+ failed:
+ kiblnd_peer_connect_failed(peer, 1, rc);
+}
+
+void
+kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
+{
+ kib_peer_t *peer;
+ kib_peer_t *peer2;
+ kib_conn_t *conn;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ unsigned long flags;
+ int rc;
+
+ /* If I get here, I've committed to send, so I complete the tx with
+ * failure on any problems */
+
+ LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
+ LASSERT (tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */
+
+ /* First time, just use a read lock since I expect to find my peer
+ * connected */
+ read_lock_irqsave(g_lock, flags);
+
+ peer = kiblnd_find_peer_locked(nid);
+ if (peer != NULL && !list_empty(&peer->ibp_conns)) {
+ /* Found a peer with an established connection */
+ conn = kiblnd_get_conn_locked(peer);
+ kiblnd_conn_addref(conn); /* 1 ref for me... */
+
+ read_unlock_irqrestore(g_lock, flags);
+
+ if (tx != NULL)
+ kiblnd_queue_tx(tx, conn);
+ kiblnd_conn_decref(conn); /* ...to here */
+ return;
+ }
+
+ read_unlock(g_lock);
+ /* Re-try with a write lock */
+ write_lock(g_lock);
+
+ peer = kiblnd_find_peer_locked(nid);
+ if (peer != NULL) {
+ if (list_empty(&peer->ibp_conns)) {
+ /* found a peer, but it's still connecting... */
+ LASSERT (peer->ibp_connecting != 0 ||
+ peer->ibp_accepting != 0);
+ if (tx != NULL)
+ list_add_tail(&tx->tx_list,
+ &peer->ibp_tx_queue);
+ write_unlock_irqrestore(g_lock, flags);
+ } else {
+ conn = kiblnd_get_conn_locked(peer);
+ kiblnd_conn_addref(conn); /* 1 ref for me... */
+
+ write_unlock_irqrestore(g_lock, flags);
+
+ if (tx != NULL)
+ kiblnd_queue_tx(tx, conn);
+ kiblnd_conn_decref(conn); /* ...to here */
+ }
+ return;
+ }
+
+ write_unlock_irqrestore(g_lock, flags);
+
+ /* Allocate a peer ready to add to the peer table and retry */
+ rc = kiblnd_create_peer(ni, &peer, nid);
+ if (rc != 0) {
+ CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
+ if (tx != NULL) {
+ tx->tx_status = -EHOSTUNREACH;
+ tx->tx_waiting = 0;
+ kiblnd_tx_done(ni, tx);
+ }
+ return;
+ }
+
+ write_lock_irqsave(g_lock, flags);
+
+ peer2 = kiblnd_find_peer_locked(nid);
+ if (peer2 != NULL) {
+ if (list_empty(&peer2->ibp_conns)) {
+ /* found a peer, but it's still connecting... */
+ LASSERT (peer2->ibp_connecting != 0 ||
+ peer2->ibp_accepting != 0);
+ if (tx != NULL)
+ list_add_tail(&tx->tx_list,
+ &peer2->ibp_tx_queue);
+ write_unlock_irqrestore(g_lock, flags);
+ } else {
+ conn = kiblnd_get_conn_locked(peer2);
+ kiblnd_conn_addref(conn); /* 1 ref for me... */
+
+ write_unlock_irqrestore(g_lock, flags);
+
+ if (tx != NULL)
+ kiblnd_queue_tx(tx, conn);
+ kiblnd_conn_decref(conn); /* ...to here */
+ }
+
+ kiblnd_peer_decref(peer);
+ return;
+ }
+
+ /* Brand new peer */
+ LASSERT (peer->ibp_connecting == 0);
+ peer->ibp_connecting = 1;
+
+ /* always called with a ref on ni, which prevents ni being shutdown */
+ LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
+
+ if (tx != NULL)
+ list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
+
+ kiblnd_peer_addref(peer);
+ list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+
+ write_unlock_irqrestore(g_lock, flags);
+
+ kiblnd_connect_peer(peer);
+ kiblnd_peer_decref(peer);
+}
+
+int
+kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+{
+ lnet_hdr_t *hdr = &lntmsg->msg_hdr;
+ int type = lntmsg->msg_type;
+ lnet_process_id_t target = lntmsg->msg_target;
+ int target_is_router = lntmsg->msg_target_is_router;
+ int routing = lntmsg->msg_routing;
+ unsigned int payload_niov = lntmsg->msg_niov;
+ struct iovec *payload_iov = lntmsg->msg_iov;
+ lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
+ unsigned int payload_offset = lntmsg->msg_offset;
+ unsigned int payload_nob = lntmsg->msg_len;
+ kib_msg_t *ibmsg;
+ kib_tx_t *tx;
+ int nob;
+ int rc;
+
+ /* NB 'private' is different depending on what we're sending.... */
+
+ CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
+ payload_nob, payload_niov, libcfs_id2str(target));
+
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
+
+ /* Thread context */
+ LASSERT (!in_interrupt());
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+
+ switch (type) {
+ default:
+ LBUG();
+ return (-EIO);
+
+ case LNET_MSG_ACK:
+ LASSERT (payload_nob == 0);
+ break;
+
+ case LNET_MSG_GET:
+ if (routing || target_is_router)
+ break; /* send IMMEDIATE */
+
+ /* is the REPLY message too small for RDMA? */
+ nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
+ if (nob <= IBLND_MSG_SIZE)
+ break; /* send IMMEDIATE */
+
+ tx = kiblnd_get_idle_tx(ni, target.nid);
+ if (tx == NULL) {
+ CERROR("Can't allocate txd for GET to %s\n",
+ libcfs_nid2str(target.nid));
+ return -ENOMEM;
+ }
+
+ ibmsg = tx->tx_msg;
+
+ if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
+ rc = kiblnd_setup_rd_iov(ni, tx,
+ &ibmsg->ibm_u.get.ibgm_rd,
+ lntmsg->msg_md->md_niov,
+ lntmsg->msg_md->md_iov.iov,
+ 0, lntmsg->msg_md->md_length);
+ else
+ rc = kiblnd_setup_rd_kiov(ni, tx,
+ &ibmsg->ibm_u.get.ibgm_rd,
+ lntmsg->msg_md->md_niov,
+ lntmsg->msg_md->md_iov.kiov,
+ 0, lntmsg->msg_md->md_length);
+ if (rc != 0) {
+ CERROR("Can't setup GET sink for %s: %d\n",
+ libcfs_nid2str(target.nid), rc);
+ kiblnd_tx_done(ni, tx);
+ return -EIO;
+ }
+
+ nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]);
+ ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
+ ibmsg->ibm_u.get.ibgm_hdr = *hdr;
+
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
+
+ tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
+ if (tx->tx_lntmsg[1] == NULL) {
+ CERROR("Can't create reply for GET -> %s\n",
+ libcfs_nid2str(target.nid));
+ kiblnd_tx_done(ni, tx);
+ return -EIO;
+ }
+
+ tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
+ tx->tx_waiting = 1; /* waiting for GET_DONE */
+ kiblnd_launch_tx(ni, tx, target.nid);
+ return 0;
+
+ case LNET_MSG_REPLY:
+ case LNET_MSG_PUT:
+ /* Is the payload small enough not to need RDMA? */
+ nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
+ if (nob <= IBLND_MSG_SIZE)
+ break; /* send IMMEDIATE */
+
+ tx = kiblnd_get_idle_tx(ni, target.nid);
+ if (tx == NULL) {
+ CERROR("Can't allocate %s txd for %s\n",
+ type == LNET_MSG_PUT ? "PUT" : "REPLY",
+ libcfs_nid2str(target.nid));
+ return -ENOMEM;
+ }
+
+ if (payload_kiov == NULL)
+ rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
+ payload_niov, payload_iov,
+ payload_offset, payload_nob);
+ else
+ rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+ if (rc != 0) {
+ CERROR("Can't setup PUT src for %s: %d\n",
+ libcfs_nid2str(target.nid), rc);
+ kiblnd_tx_done(ni, tx);
+ return -EIO;
+ }
+
+ ibmsg = tx->tx_msg;
+ ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
+ ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
+
+ tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
+ tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
+ kiblnd_launch_tx(ni, tx, target.nid);
+ return 0;
+ }
+
+ /* send IMMEDIATE */
+
+ LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
+ <= IBLND_MSG_SIZE);
+
+ tx = kiblnd_get_idle_tx(ni, target.nid);
+ if (tx == NULL) {
+ CERROR ("Can't send %d to %s: tx descs exhausted\n",
+ type, libcfs_nid2str(target.nid));
+ return -ENOMEM;
+ }
+
+ ibmsg = tx->tx_msg;
+ ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
+
+ if (payload_kiov != NULL)
+ lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
+ offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+ else
+ lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
+ offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ payload_niov, payload_iov,
+ payload_offset, payload_nob);
+
+ nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
+
+ tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
+ kiblnd_launch_tx(ni, tx, target.nid);
+ return 0;
+}
+
+void
+kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
+{
+ lnet_process_id_t target = lntmsg->msg_target;
+ unsigned int niov = lntmsg->msg_niov;
+ struct iovec *iov = lntmsg->msg_iov;
+ lnet_kiov_t *kiov = lntmsg->msg_kiov;
+ unsigned int offset = lntmsg->msg_offset;
+ unsigned int nob = lntmsg->msg_len;
+ kib_tx_t *tx;
+ int rc;
+
+ tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
+ if (tx == NULL) {
+ CERROR("Can't get tx for REPLY to %s\n",
+ libcfs_nid2str(target.nid));
+ goto failed_0;
+ }
+
+ if (nob == 0)
+ rc = 0;
+ else if (kiov == NULL)
+ rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
+ niov, iov, offset, nob);
+ else
+ rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
+ niov, kiov, offset, nob);
+
+ if (rc != 0) {
+ CERROR("Can't setup GET src for %s: %d\n",
+ libcfs_nid2str(target.nid), rc);
+ goto failed_1;
+ }
+
+ rc = kiblnd_init_rdma(rx->rx_conn, tx,
+ IBLND_MSG_GET_DONE, nob,
+ &rx->rx_msg->ibm_u.get.ibgm_rd,
+ rx->rx_msg->ibm_u.get.ibgm_cookie);
+ if (rc < 0) {
+ CERROR("Can't setup rdma for GET from %s: %d\n",
+ libcfs_nid2str(target.nid), rc);
+ goto failed_1;
+ }
+
+ if (nob == 0) {
+ /* No RDMA: local completion may happen now! */
+ lnet_finalize(ni, lntmsg, 0);
+ } else {
+ /* RDMA: lnet_finalize(lntmsg) when it
+ * completes */
+ tx->tx_lntmsg[0] = lntmsg;
+ }
+
+ kiblnd_queue_tx(tx, rx->rx_conn);
+ return;
+
+ failed_1:
+ kiblnd_tx_done(ni, tx);
+ failed_0:
+ lnet_finalize(ni, lntmsg, -EIO);
+}
+
+int
+kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
+ unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
+{
+ kib_rx_t *rx = private;
+ kib_msg_t *rxmsg = rx->rx_msg;
+ kib_conn_t *conn = rx->rx_conn;
+ kib_tx_t *tx;
+ kib_msg_t *txmsg;
+ int nob;
+ int post_credit = IBLND_POSTRX_PEER_CREDIT;
+ int rc = 0;
+
+ LASSERT (mlen <= rlen);
+ LASSERT (!in_interrupt());
+ /* Either all pages or all vaddrs */
+ LASSERT (!(kiov != NULL && iov != NULL));
+
+ switch (rxmsg->ibm_type) {
+ default:
+ LBUG();
+
+ case IBLND_MSG_IMMEDIATE:
+ nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
+ if (nob > rx->rx_nob) {
+ CERROR ("Immediate message from %s too big: %d(%d)\n",
+ libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
+ nob, rx->rx_nob);
+ rc = -EPROTO;
+ break;
+ }
+
+ if (kiov != NULL)
+ lnet_copy_flat2kiov(niov, kiov, offset,
+ IBLND_MSG_SIZE, rxmsg,
+ offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ mlen);
+ else
+ lnet_copy_flat2iov(niov, iov, offset,
+ IBLND_MSG_SIZE, rxmsg,
+ offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ mlen);
+ lnet_finalize (ni, lntmsg, 0);
+ break;
+
+ case IBLND_MSG_PUT_REQ:
+ if (mlen == 0) {
+ lnet_finalize(ni, lntmsg, 0);
+ kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
+ rxmsg->ibm_u.putreq.ibprm_cookie);
+ break;
+ }
+
+ tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+ if (tx == NULL) {
+ CERROR("Can't allocate tx for %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ /* Not replying will break the connection */
+ rc = -ENOMEM;
+ break;
+ }
+
+ txmsg = tx->tx_msg;
+ if (kiov == NULL)
+ rc = kiblnd_setup_rd_iov(ni, tx,
+ &txmsg->ibm_u.putack.ibpam_rd,
+ niov, iov, offset, mlen);
+ else
+ rc = kiblnd_setup_rd_kiov(ni, tx,
+ &txmsg->ibm_u.putack.ibpam_rd,
+ niov, kiov, offset, mlen);
+ if (rc != 0) {
+ CERROR("Can't setup PUT sink for %s: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+ kiblnd_tx_done(ni, tx);
+ /* tell peer it's over */
+ kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
+ rxmsg->ibm_u.putreq.ibprm_cookie);
+ break;
+ }
+
+ nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
+ txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
+ txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
+
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
+
+ tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
+ tx->tx_waiting = 1; /* waiting for PUT_DONE */
+ kiblnd_queue_tx(tx, conn);
+
+ /* reposted buffer reserved for PUT_DONE */
+ post_credit = IBLND_POSTRX_NO_CREDIT;
+ break;
+
+ case IBLND_MSG_GET_REQ:
+ if (lntmsg != NULL) {
+ /* Optimized GET; RDMA lntmsg's payload */
+ kiblnd_reply(ni, rx, lntmsg);
+ } else {
+ /* GET didn't match anything */
+ kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
+ -ENODATA,
+ rxmsg->ibm_u.get.ibgm_cookie);
+ }
+ break;
+ }
+
+ kiblnd_post_rx(rx, post_credit);
+ return rc;
+}
+
+int
+kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
+{
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
+
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ atomic_inc(&kiblnd_data.kib_nthreads);
+ return 0;
+}
+
+void
+kiblnd_thread_fini (void)
+{
+ atomic_dec (&kiblnd_data.kib_nthreads);
+}
+
+void
+kiblnd_peer_alive (kib_peer_t *peer)
+{
+ /* This is racy, but everyone's only writing cfs_time_current() */
+ peer->ibp_last_alive = cfs_time_current();
+ mb();
+}
+
+void
+kiblnd_peer_notify (kib_peer_t *peer)
+{
+ int error = 0;
+ cfs_time_t last_alive = 0;
+ unsigned long flags;
+
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ if (list_empty(&peer->ibp_conns) &&
+ peer->ibp_accepting == 0 &&
+ peer->ibp_connecting == 0 &&
+ peer->ibp_error != 0) {
+ error = peer->ibp_error;
+ peer->ibp_error = 0;
+
+ last_alive = peer->ibp_last_alive;
+ }
+
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ if (error != 0)
+ lnet_notify(peer->ibp_ni,
+ peer->ibp_nid, 0, last_alive);
+}
+
+void
+kiblnd_close_conn_locked (kib_conn_t *conn, int error)
+{
+ /* This just does the immediate housekeeping. 'error' is zero for a
+ * normal shutdown which can happen only after the connection has been
+ * established. If the connection is established, schedule the
+ * connection to be finished off by the connd. Otherwise the connd is
+ * already dealing with it (either to set it up or tear it down).
+ * Caller holds kib_global_lock exclusively in irq context */
+ kib_peer_t *peer = conn->ibc_peer;
+ kib_dev_t *dev;
+ unsigned long flags;
+
+ LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+
+ if (error != 0 && conn->ibc_comms_error == 0)
+ conn->ibc_comms_error = error;
+
+ if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
+ return; /* already being handled */
+
+ if (error == 0 &&
+ list_empty(&conn->ibc_tx_noops) &&
+ list_empty(&conn->ibc_tx_queue) &&
+ list_empty(&conn->ibc_tx_queue_rsrvd) &&
+ list_empty(&conn->ibc_tx_queue_nocred) &&
+ list_empty(&conn->ibc_active_txs)) {
+ CDEBUG(D_NET, "closing conn to %s\n",
+ libcfs_nid2str(peer->ibp_nid));
+ } else {
+ CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
+ libcfs_nid2str(peer->ibp_nid), error,
+ list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+ list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
+ list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+ list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+ }
+
+ dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
+ list_del(&conn->ibc_list);
+ /* connd (see below) takes over ibc_list's ref */
+
+ if (list_empty (&peer->ibp_conns) && /* no more conns */
+ kiblnd_peer_active(peer)) { /* still in peer table */
+ kiblnd_unlink_peer_locked(peer);
+
+ /* set/clear error on last conn */
+ peer->ibp_error = conn->ibc_comms_error;
+ }
+
+ kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
+
+ if (error != 0 &&
+ kiblnd_dev_can_failover(dev)) {
+ list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ wake_up(&kiblnd_data.kib_failover_waitq);
+ }
+
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+
+ list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
+ wake_up(&kiblnd_data.kib_connd_waitq);
+
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+}
+
+void
+kiblnd_close_conn(kib_conn_t *conn, int error)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ kiblnd_close_conn_locked(conn, error);
+
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+}
+
+void
+kiblnd_handle_early_rxs(kib_conn_t *conn)
+{
+ unsigned long flags;
+ kib_rx_t *rx;
+
+ LASSERT(!in_interrupt());
+ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ while (!list_empty(&conn->ibc_early_rxs)) {
+ rx = list_entry(conn->ibc_early_rxs.next,
+ kib_rx_t, rx_list);
+ list_del(&rx->rx_list);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ kiblnd_handle_rx(rx);
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ }
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+}
+
+void
+kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
+{
+ LIST_HEAD (zombies);
+ struct list_head *tmp;
+ struct list_head *nxt;
+ kib_tx_t *tx;
+
+ spin_lock(&conn->ibc_lock);
+
+ list_for_each_safe (tmp, nxt, txs) {
+ tx = list_entry (tmp, kib_tx_t, tx_list);
+
+ if (txs == &conn->ibc_active_txs) {
+ LASSERT (!tx->tx_queued);
+ LASSERT (tx->tx_waiting ||
+ tx->tx_sending != 0);
+ } else {
+ LASSERT (tx->tx_queued);
+ }
+
+ tx->tx_status = -ECONNABORTED;
+ tx->tx_waiting = 0;
+
+ if (tx->tx_sending == 0) {
+ tx->tx_queued = 0;
+ list_del (&tx->tx_list);
+ list_add (&tx->tx_list, &zombies);
+ }
+ }
+
+ spin_unlock(&conn->ibc_lock);
+
+ kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED);
+}
+
+void
+kiblnd_finalise_conn (kib_conn_t *conn)
+{
+ LASSERT (!in_interrupt());
+ LASSERT (conn->ibc_state > IBLND_CONN_INIT);
+
+ kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
+
+ /* abort_receives moves QP state to IB_QPS_ERR. This is only required
+ * for connections that didn't get as far as being connected, because
+ * rdma_disconnect() does this for free. */
+ kiblnd_abort_receives(conn);
+
+ /* Complete all tx descs not waiting for sends to complete.
+ * NB we should be safe from RDMA now that the QP has changed state */
+
+ kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
+ kiblnd_abort_txs(conn, &conn->ibc_active_txs);
+
+ kiblnd_handle_early_rxs(conn);
+}
+
+void
+kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
+{
+ LIST_HEAD (zombies);
+ unsigned long flags;
+
+ LASSERT (error != 0);
+ LASSERT (!in_interrupt());
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ if (active) {
+ LASSERT (peer->ibp_connecting > 0);
+ peer->ibp_connecting--;
+ } else {
+ LASSERT (peer->ibp_accepting > 0);
+ peer->ibp_accepting--;
+ }
+
+ if (peer->ibp_connecting != 0 ||
+ peer->ibp_accepting != 0) {
+ /* another connection attempt under way... */
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
+ return;
+ }
+
+ if (list_empty(&peer->ibp_conns)) {
+ /* Take peer's blocked transmits to complete with error */
+ list_add(&zombies, &peer->ibp_tx_queue);
+ list_del_init(&peer->ibp_tx_queue);
+
+ if (kiblnd_peer_active(peer))
+ kiblnd_unlink_peer_locked(peer);
+
+ peer->ibp_error = error;
+ } else {
+ /* Can't have blocked transmits if there are connections */
+ LASSERT (list_empty(&peer->ibp_tx_queue));
+ }
+
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ kiblnd_peer_notify(peer);
+
+ if (list_empty (&zombies))
+ return;
+
+ CNETERR("Deleting messages for %s: connection failed\n",
+ libcfs_nid2str(peer->ibp_nid));
+
+ kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
+}
+
+void
+kiblnd_connreq_done(kib_conn_t *conn, int status)
+{
+ kib_peer_t *peer = conn->ibc_peer;
+ kib_tx_t *tx;
+ struct list_head txs;
+ unsigned long flags;
+ int active;
+
+ active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
+
+ CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
+ libcfs_nid2str(peer->ibp_nid), active,
+ conn->ibc_version, status);
+
+ LASSERT (!in_interrupt());
+ LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
+ peer->ibp_connecting > 0) ||
+ (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
+ peer->ibp_accepting > 0));
+
+ LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+ conn->ibc_connvars = NULL;
+
+ if (status != 0) {
+ /* failed to establish connection */
+ kiblnd_peer_connect_failed(peer, active, status);
+ kiblnd_finalise_conn(conn);
+ return;
+ }
+
+ /* connection established */
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ conn->ibc_last_send = jiffies;
+ kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
+ kiblnd_peer_alive(peer);
+
+ /* Add conn to peer's list and nuke any dangling conns from a different
+ * peer instance... */
+ kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
+ list_add(&conn->ibc_list, &peer->ibp_conns);
+ if (active)
+ peer->ibp_connecting--;
+ else
+ peer->ibp_accepting--;
+
+ if (peer->ibp_version == 0) {
+ peer->ibp_version = conn->ibc_version;
+ peer->ibp_incarnation = conn->ibc_incarnation;
+ }
+
+ if (peer->ibp_version != conn->ibc_version ||
+ peer->ibp_incarnation != conn->ibc_incarnation) {
+ kiblnd_close_stale_conns_locked(peer, conn->ibc_version,
+ conn->ibc_incarnation);
+ peer->ibp_version = conn->ibc_version;
+ peer->ibp_incarnation = conn->ibc_incarnation;
+ }
+
+ /* grab pending txs while I have the lock */
+ list_add(&txs, &peer->ibp_tx_queue);
+ list_del_init(&peer->ibp_tx_queue);
+
+ if (!kiblnd_peer_active(peer) || /* peer has been deleted */
+ conn->ibc_comms_error != 0) { /* error has happened already */
+ lnet_ni_t *ni = peer->ibp_ni;
+
+ /* start to shut down connection */
+ kiblnd_close_conn_locked(conn, -ECONNABORTED);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
+
+ return;
+ }
+
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ /* Schedule blocked txs */
+ spin_lock(&conn->ibc_lock);
+ while (!list_empty(&txs)) {
+ tx = list_entry(txs.next, kib_tx_t, tx_list);
+ list_del(&tx->tx_list);
+
+ kiblnd_queue_tx_locked(tx, conn);
+ }
+ spin_unlock(&conn->ibc_lock);
+
+ kiblnd_check_sends(conn);
+
+ /* schedule blocked rxs */
+ kiblnd_handle_early_rxs(conn);
+}
+
+void
+kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
+{
+ int rc;
+
+ rc = rdma_reject(cmid, rej, sizeof(*rej));
+
+ if (rc != 0)
+ CWARN("Error %d sending reject\n", rc);
+}
+
+int
+kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
+{
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ kib_msg_t *reqmsg = priv;
+ kib_msg_t *ackmsg;
+ kib_dev_t *ibdev;
+ kib_peer_t *peer;
+ kib_peer_t *peer2;
+ kib_conn_t *conn;
+ lnet_ni_t *ni = NULL;
+ kib_net_t *net = NULL;
+ lnet_nid_t nid;
+ struct rdma_conn_param cp;
+ kib_rej_t rej;
+ int version = IBLND_MSG_VERSION;
+ unsigned long flags;
+ int rc;
+ struct sockaddr_in *peer_addr;
+ LASSERT (!in_interrupt());
+
+ /* cmid inherits 'context' from the corresponding listener id */
+ ibdev = (kib_dev_t *)cmid->context;
+ LASSERT (ibdev != NULL);
+
+ memset(&rej, 0, sizeof(rej));
+ rej.ibr_magic = IBLND_MSG_MAGIC;
+ rej.ibr_why = IBLND_REJECT_FATAL;
+ rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
+
+ peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
+ if (*kiblnd_tunables.kib_require_priv_port &&
+ ntohs(peer_addr->sin_port) >= PROT_SOCK) {
+ __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
+ CERROR("Peer's port (%pI4h:%hu) is not privileged\n",
+ &ip, ntohs(peer_addr->sin_port));
+ goto failed;
+ }
+
+ if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
+ CERROR("Short connection request\n");
+ goto failed;
+ }
+
+ /* Future protocol version compatibility support! If the
+ * o2iblnd-specific protocol changes, or when LNET unifies
+ * protocols over all LNDs, the initial connection will
+ * negotiate a protocol version. I trap this here to avoid
+ * console errors; the reject tells the peer which protocol I
+ * speak. */
+ if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
+ reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
+ goto failed;
+ if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
+ reqmsg->ibm_version != IBLND_MSG_VERSION &&
+ reqmsg->ibm_version != IBLND_MSG_VERSION_1)
+ goto failed;
+ if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
+ reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
+ reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
+ goto failed;
+
+ rc = kiblnd_unpack_msg(reqmsg, priv_nob);
+ if (rc != 0) {
+ CERROR("Can't parse connection request: %d\n", rc);
+ goto failed;
+ }
+
+ nid = reqmsg->ibm_srcnid;
+ ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
+
+ if (ni != NULL) {
+ net = (kib_net_t *)ni->ni_data;
+ rej.ibr_incarnation = net->ibn_incarnation;
+ }
+
+ if (ni == NULL || /* no matching net */
+ ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
+ net->ibn_dev != ibdev) { /* wrong device */
+ CERROR("Can't accept %s on %s (%s:%d:%pI4h): "
+ "bad dst nid %s\n", libcfs_nid2str(nid),
+ ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
+ ibdev->ibd_ifname, ibdev->ibd_nnets,
+ &ibdev->ibd_ifip,
+ libcfs_nid2str(reqmsg->ibm_dstnid));
+
+ goto failed;
+ }
+
+ /* check time stamp as soon as possible */
+ if (reqmsg->ibm_dststamp != 0 &&
+ reqmsg->ibm_dststamp != net->ibn_incarnation) {
+ CWARN("Stale connection request\n");
+ rej.ibr_why = IBLND_REJECT_CONN_STALE;
+ goto failed;
+ }
+
+ /* I can accept peer's version */
+ version = reqmsg->ibm_version;
+
+ if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
+ CERROR("Unexpected connreq msg type: %x from %s\n",
+ reqmsg->ibm_type, libcfs_nid2str(nid));
+ goto failed;
+ }
+
+ if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
+ IBLND_MSG_QUEUE_SIZE(version)) {
+ CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
+ libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
+ IBLND_MSG_QUEUE_SIZE(version));
+
+ if (version == IBLND_MSG_VERSION)
+ rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
+
+ goto failed;
+ }
+
+ if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
+ IBLND_RDMA_FRAGS(version)) {
+ CERROR("Can't accept %s(version %x): "
+ "incompatible max_frags %d (%d wanted)\n",
+ libcfs_nid2str(nid), version,
+ reqmsg->ibm_u.connparams.ibcp_max_frags,
+ IBLND_RDMA_FRAGS(version));
+
+ if (version == IBLND_MSG_VERSION)
+ rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
+
+ goto failed;
+
+ }
+
+ if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
+ CERROR("Can't accept %s: message size %d too big (%d max)\n",
+ libcfs_nid2str(nid),
+ reqmsg->ibm_u.connparams.ibcp_max_msg_size,
+ IBLND_MSG_SIZE);
+ goto failed;
+ }
+
+ /* assume 'nid' is a new peer; create */
+ rc = kiblnd_create_peer(ni, &peer, nid);
+ if (rc != 0) {
+ CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
+ rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
+ goto failed;
+ }
+
+ write_lock_irqsave(g_lock, flags);
+
+ peer2 = kiblnd_find_peer_locked(nid);
+ if (peer2 != NULL) {
+ if (peer2->ibp_version == 0) {
+ peer2->ibp_version = version;
+ peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
+ }
+
+ /* not the guy I've talked with */
+ if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
+ peer2->ibp_version != version) {
+ kiblnd_close_peer_conns_locked(peer2, -ESTALE);
+ write_unlock_irqrestore(g_lock, flags);
+
+ CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
+ libcfs_nid2str(nid), peer2->ibp_version, version);
+
+ kiblnd_peer_decref(peer);
+ rej.ibr_why = IBLND_REJECT_CONN_STALE;
+ goto failed;
+ }
+
+ /* tie-break connection race in favour of the higher NID */
+ if (peer2->ibp_connecting != 0 &&
+ nid < ni->ni_nid) {
+ write_unlock_irqrestore(g_lock, flags);
+
+ CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
+
+ kiblnd_peer_decref(peer);
+ rej.ibr_why = IBLND_REJECT_CONN_RACE;
+ goto failed;
+ }
+
+ peer2->ibp_accepting++;
+ kiblnd_peer_addref(peer2);
+
+ write_unlock_irqrestore(g_lock, flags);
+ kiblnd_peer_decref(peer);
+ peer = peer2;
+ } else {
+ /* Brand new peer */
+ LASSERT (peer->ibp_accepting == 0);
+ LASSERT (peer->ibp_version == 0 &&
+ peer->ibp_incarnation == 0);
+
+ peer->ibp_accepting = 1;
+ peer->ibp_version = version;
+ peer->ibp_incarnation = reqmsg->ibm_srcstamp;
+
+ /* I have a ref on ni that prevents it being shutdown */
+ LASSERT (net->ibn_shutdown == 0);
+
+ kiblnd_peer_addref(peer);
+ list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+
+ write_unlock_irqrestore(g_lock, flags);
+ }
+
+ conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
+ if (conn == NULL) {
+ kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
+ kiblnd_peer_decref(peer);
+ rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
+ goto failed;
+ }
+
+ /* conn now "owns" cmid, so I return success from here on to ensure the
+ * CM callback doesn't destroy cmid. */
+
+ conn->ibc_incarnation = reqmsg->ibm_srcstamp;
+ conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version);
+ conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
+ LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
+ <= IBLND_RX_MSGS(version));
+
+ ackmsg = &conn->ibc_connvars->cv_msg;
+ memset(ackmsg, 0, sizeof(*ackmsg));
+
+ kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
+ sizeof(ackmsg->ibm_u.connparams));
+ ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
+ ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
+ ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
+
+ kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.private_data = ackmsg;
+ cp.private_data_len = ackmsg->ibm_nob;
+ cp.responder_resources = 0; /* No atomic ops or RDMA reads */
+ cp.initiator_depth = 0;
+ cp.flow_control = 1;
+ cp.retry_count = *kiblnd_tunables.kib_retry_count;
+ cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
+
+ CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
+
+ rc = rdma_accept(cmid, &cp);
+ if (rc != 0) {
+ CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
+ rej.ibr_version = version;
+ rej.ibr_why = IBLND_REJECT_FATAL;
+
+ kiblnd_reject(cmid, &rej);
+ kiblnd_connreq_done(conn, rc);
+ kiblnd_conn_decref(conn);
+ }
+
+ lnet_ni_decref(ni);
+ return 0;
+
+ failed:
+ if (ni != NULL)
+ lnet_ni_decref(ni);
+
+ rej.ibr_version = version;
+ rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
+ rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
+ kiblnd_reject(cmid, &rej);
+
+ return -ECONNREFUSED;
+}
+
+void
+kiblnd_reconnect (kib_conn_t *conn, int version,
+ __u64 incarnation, int why, kib_connparams_t *cp)
+{
+ kib_peer_t *peer = conn->ibc_peer;
+ char *reason;
+ int retry = 0;
+ unsigned long flags;
+
+ LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
+ LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ /* retry connection if it's still needed and no other connection
+ * attempts (active or passive) are in progress
+ * NB: reconnect is still needed even when ibp_tx_queue is
+ * empty if ibp_version != version because reconnect may be
+ * initiated by kiblnd_query() */
+ if ((!list_empty(&peer->ibp_tx_queue) ||
+ peer->ibp_version != version) &&
+ peer->ibp_connecting == 1 &&
+ peer->ibp_accepting == 0) {
+ retry = 1;
+ peer->ibp_connecting++;
+
+ peer->ibp_version = version;
+ peer->ibp_incarnation = incarnation;
+ }
+
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ if (!retry)
+ return;
+
+ switch (why) {
+ default:
+ reason = "Unknown";
+ break;
+
+ case IBLND_REJECT_CONN_STALE:
+ reason = "stale";
+ break;
+
+ case IBLND_REJECT_CONN_RACE:
+ reason = "conn race";
+ break;
+
+ case IBLND_REJECT_CONN_UNCOMPAT:
+ reason = "version negotiation";
+ break;
+ }
+
+ CNETERR("%s: retrying (%s), %x, %x, "
+ "queue_dep: %d, max_frag: %d, msg_size: %d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ reason, IBLND_MSG_VERSION, version,
+ cp != NULL? cp->ibcp_queue_depth :IBLND_MSG_QUEUE_SIZE(version),
+ cp != NULL? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version),
+ cp != NULL? cp->ibcp_max_msg_size: IBLND_MSG_SIZE);
+
+ kiblnd_connect_peer(peer);
+}
+
+void
+kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
+{
+ kib_peer_t *peer = conn->ibc_peer;
+
+ LASSERT (!in_interrupt());
+ LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
+
+ switch (reason) {
+ case IB_CM_REJ_STALE_CONN:
+ kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
+ IBLND_REJECT_CONN_STALE, NULL);
+ break;
+
+ case IB_CM_REJ_INVALID_SERVICE_ID:
+ CNETERR("%s rejected: no listener at %d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ *kiblnd_tunables.kib_service);
+ break;
+
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
+ kib_rej_t *rej = priv;
+ kib_connparams_t *cp = NULL;
+ int flip = 0;
+ __u64 incarnation = -1;
+
+ /* NB. default incarnation is -1 because:
+ * a) V1 will ignore dst incarnation in connreq.
+ * b) V2 will provide incarnation while rejecting me,
+ * -1 will be overwrote.
+ *
+ * if I try to connect to a V1 peer with V2 protocol,
+ * it rejected me then upgrade to V2, I have no idea
+ * about the upgrading and try to reconnect with V1,
+ * in this case upgraded V2 can find out I'm trying to
+ * talk to the old guy and reject me(incarnation is -1).
+ */
+
+ if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
+ rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
+ __swab32s(&rej->ibr_magic);
+ __swab16s(&rej->ibr_version);
+ flip = 1;
+ }
+
+ if (priv_nob >= sizeof(kib_rej_t) &&
+ rej->ibr_version > IBLND_MSG_VERSION_1) {
+ /* priv_nob is always 148 in current version
+ * of OFED, so we still need to check version.
+ * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
+ cp = &rej->ibr_cp;
+
+ if (flip) {
+ __swab64s(&rej->ibr_incarnation);
+ __swab16s(&cp->ibcp_queue_depth);
+ __swab16s(&cp->ibcp_max_frags);
+ __swab32s(&cp->ibcp_max_msg_size);
+ }
+
+ incarnation = rej->ibr_incarnation;
+ }
+
+ if (rej->ibr_magic != IBLND_MSG_MAGIC &&
+ rej->ibr_magic != LNET_PROTO_MAGIC) {
+ CERROR("%s rejected: consumer defined fatal error\n",
+ libcfs_nid2str(peer->ibp_nid));
+ break;
+ }
+
+ if (rej->ibr_version != IBLND_MSG_VERSION &&
+ rej->ibr_version != IBLND_MSG_VERSION_1) {
+ CERROR("%s rejected: o2iblnd version %x error\n",
+ libcfs_nid2str(peer->ibp_nid),
+ rej->ibr_version);
+ break;
+ }
+
+ if (rej->ibr_why == IBLND_REJECT_FATAL &&
+ rej->ibr_version == IBLND_MSG_VERSION_1) {
+ CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
+ libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
+
+ if (conn->ibc_version != IBLND_MSG_VERSION_1)
+ rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
+ }
+
+ switch (rej->ibr_why) {
+ case IBLND_REJECT_CONN_RACE:
+ case IBLND_REJECT_CONN_STALE:
+ case IBLND_REJECT_CONN_UNCOMPAT:
+ kiblnd_reconnect(conn, rej->ibr_version,
+ incarnation, rej->ibr_why, cp);
+ break;
+
+ case IBLND_REJECT_MSG_QUEUE_SIZE:
+ CERROR("%s rejected: incompatible message queue depth %d, %d\n",
+ libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth,
+ IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+ break;
+
+ case IBLND_REJECT_RDMA_FRAGS:
+ CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
+ libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags,
+ IBLND_RDMA_FRAGS(conn->ibc_version));
+ break;
+
+ case IBLND_REJECT_NO_RESOURCES:
+ CERROR("%s rejected: o2iblnd no resources\n",
+ libcfs_nid2str(peer->ibp_nid));
+ break;
+
+ case IBLND_REJECT_FATAL:
+ CERROR("%s rejected: o2iblnd fatal error\n",
+ libcfs_nid2str(peer->ibp_nid));
+ break;
+
+ default:
+ CERROR("%s rejected: o2iblnd reason %d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ rej->ibr_why);
+ break;
+ }
+ break;
+ }
+ /* fall through */
+ default:
+ CNETERR("%s rejected: reason %d, size %d\n",
+ libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
+ break;
+ }
+
+ kiblnd_connreq_done(conn, -ECONNREFUSED);
+}
+
+void
+kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
+{
+ kib_peer_t *peer = conn->ibc_peer;
+ lnet_ni_t *ni = peer->ibp_ni;
+ kib_net_t *net = ni->ni_data;
+ kib_msg_t *msg = priv;
+ int ver = conn->ibc_version;
+ int rc = kiblnd_unpack_msg(msg, priv_nob);
+ unsigned long flags;
+
+ LASSERT (net != NULL);
+
+ if (rc != 0) {
+ CERROR("Can't unpack connack from %s: %d\n",
+ libcfs_nid2str(peer->ibp_nid), rc);
+ goto failed;
+ }
+
+ if (msg->ibm_type != IBLND_MSG_CONNACK) {
+ CERROR("Unexpected message %d from %s\n",
+ msg->ibm_type, libcfs_nid2str(peer->ibp_nid));
+ rc = -EPROTO;
+ goto failed;
+ }
+
+ if (ver != msg->ibm_version) {
+ CERROR("%s replied version %x is different with "
+ "requested version %x\n",
+ libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
+ rc = -EPROTO;
+ goto failed;
+ }
+
+ if (msg->ibm_u.connparams.ibcp_queue_depth !=
+ IBLND_MSG_QUEUE_SIZE(ver)) {
+ CERROR("%s has incompatible queue depth %d(%d wanted)\n",
+ libcfs_nid2str(peer->ibp_nid),
+ msg->ibm_u.connparams.ibcp_queue_depth,
+ IBLND_MSG_QUEUE_SIZE(ver));
+ rc = -EPROTO;
+ goto failed;
+ }
+
+ if (msg->ibm_u.connparams.ibcp_max_frags !=
+ IBLND_RDMA_FRAGS(ver)) {
+ CERROR("%s has incompatible max_frags %d (%d wanted)\n",
+ libcfs_nid2str(peer->ibp_nid),
+ msg->ibm_u.connparams.ibcp_max_frags,
+ IBLND_RDMA_FRAGS(ver));
+ rc = -EPROTO;
+ goto failed;
+ }
+
+ if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
+ CERROR("%s max message size %d too big (%d max)\n",
+ libcfs_nid2str(peer->ibp_nid),
+ msg->ibm_u.connparams.ibcp_max_msg_size,
+ IBLND_MSG_SIZE);
+ rc = -EPROTO;
+ goto failed;
+ }
+
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ if (msg->ibm_dstnid == ni->ni_nid &&
+ msg->ibm_dststamp == net->ibn_incarnation)
+ rc = 0;
+ else
+ rc = -ESTALE;
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ if (rc != 0) {
+ CERROR("Bad connection reply from %s, rc = %d, "
+ "version: %x max_frags: %d\n",
+ libcfs_nid2str(peer->ibp_nid), rc,
+ msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
+ goto failed;
+ }
+
+ conn->ibc_incarnation = msg->ibm_srcstamp;
+ conn->ibc_credits =
+ conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
+ LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
+ <= IBLND_RX_MSGS(ver));
+
+ kiblnd_connreq_done(conn, 0);
+ return;
+
+ failed:
+ /* NB My QP has already established itself, so I handle anything going
+ * wrong here by setting ibc_comms_error.
+ * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
+ * immediately tears it down. */
+
+ LASSERT (rc != 0);
+ conn->ibc_comms_error = rc;
+ kiblnd_connreq_done(conn, 0);
+}
+
+int
+kiblnd_active_connect (struct rdma_cm_id *cmid)
+{
+ kib_peer_t *peer = (kib_peer_t *)cmid->context;
+ kib_conn_t *conn;
+ kib_msg_t *msg;
+ struct rdma_conn_param cp;
+ int version;
+ __u64 incarnation;
+ unsigned long flags;
+ int rc;
+
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ incarnation = peer->ibp_incarnation;
+ version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION :
+ peer->ibp_version;
+
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
+ if (conn == NULL) {
+ kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
+ kiblnd_peer_decref(peer); /* lose cmid's ref */
+ return -ENOMEM;
+ }
+
+ /* conn "owns" cmid now, so I return success from here on to ensure the
+ * CM callback doesn't destroy cmid. conn also takes over cmid's ref
+ * on peer */
+
+ msg = &conn->ibc_connvars->cv_msg;
+
+ memset(msg, 0, sizeof(*msg));
+ kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
+ msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
+ msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
+ msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
+
+ kiblnd_pack_msg(peer->ibp_ni, msg, version,
+ 0, peer->ibp_nid, incarnation);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.private_data = msg;
+ cp.private_data_len = msg->ibm_nob;
+ cp.responder_resources = 0; /* No atomic ops or RDMA reads */
+ cp.initiator_depth = 0;
+ cp.flow_control = 1;
+ cp.retry_count = *kiblnd_tunables.kib_retry_count;
+ cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
+
+ LASSERT(cmid->context == (void *)conn);
+ LASSERT(conn->ibc_cmid == cmid);
+
+ rc = rdma_connect(cmid, &cp);
+ if (rc != 0) {
+ CERROR("Can't connect to %s: %d\n",
+ libcfs_nid2str(peer->ibp_nid), rc);
+ kiblnd_connreq_done(conn, rc);
+ kiblnd_conn_decref(conn);
+ }
+
+ return 0;
+}
+
+int
+kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
+{
+ kib_peer_t *peer;
+ kib_conn_t *conn;
+ int rc;
+
+ switch (event->event) {
+ default:
+ CERROR("Unexpected event: %d, status: %d\n",
+ event->event, event->status);
+ LBUG();
+
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ /* destroy cmid on failure */
+ rc = kiblnd_passive_connect(cmid,
+ (void *)KIBLND_CONN_PARAM(event),
+ KIBLND_CONN_PARAM_LEN(event));
+ CDEBUG(D_NET, "connreq: %d\n", rc);
+ return rc;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ peer = (kib_peer_t *)cmid->context;
+ CNETERR("%s: ADDR ERROR %d\n",
+ libcfs_nid2str(peer->ibp_nid), event->status);
+ kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
+ kiblnd_peer_decref(peer);
+ return -EHOSTUNREACH; /* rc != 0 destroys cmid */
+
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ peer = (kib_peer_t *)cmid->context;
+
+ CDEBUG(D_NET,"%s Addr resolved: %d\n",
+ libcfs_nid2str(peer->ibp_nid), event->status);
+
+ if (event->status != 0) {
+ CNETERR("Can't resolve address for %s: %d\n",
+ libcfs_nid2str(peer->ibp_nid), event->status);
+ rc = event->status;
+ } else {
+ rc = rdma_resolve_route(
+ cmid, *kiblnd_tunables.kib_timeout * 1000);
+ if (rc == 0)
+ return 0;
+ /* Can't initiate route resolution */
+ CERROR("Can't resolve route for %s: %d\n",
+ libcfs_nid2str(peer->ibp_nid), rc);
+ }
+ kiblnd_peer_connect_failed(peer, 1, rc);
+ kiblnd_peer_decref(peer);
+ return rc; /* rc != 0 destroys cmid */
+
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ peer = (kib_peer_t *)cmid->context;
+ CNETERR("%s: ROUTE ERROR %d\n",
+ libcfs_nid2str(peer->ibp_nid), event->status);
+ kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
+ kiblnd_peer_decref(peer);
+ return -EHOSTUNREACH; /* rc != 0 destroys cmid */
+
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ peer = (kib_peer_t *)cmid->context;
+ CDEBUG(D_NET,"%s Route resolved: %d\n",
+ libcfs_nid2str(peer->ibp_nid), event->status);
+
+ if (event->status == 0)
+ return kiblnd_active_connect(cmid);
+
+ CNETERR("Can't resolve route for %s: %d\n",
+ libcfs_nid2str(peer->ibp_nid), event->status);
+ kiblnd_peer_connect_failed(peer, 1, event->status);
+ kiblnd_peer_decref(peer);
+ return event->status; /* rc != 0 destroys cmid */
+
+ case RDMA_CM_EVENT_UNREACHABLE:
+ conn = (kib_conn_t *)cmid->context;
+ LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
+ conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
+ CNETERR("%s: UNREACHABLE %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+ kiblnd_connreq_done(conn, -ENETDOWN);
+ kiblnd_conn_decref(conn);
+ return 0;
+
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ conn = (kib_conn_t *)cmid->context;
+ LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
+ conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
+ CNETERR("%s: CONNECT ERROR %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+ kiblnd_connreq_done(conn, -ENOTCONN);
+ kiblnd_conn_decref(conn);
+ return 0;
+
+ case RDMA_CM_EVENT_REJECTED:
+ conn = (kib_conn_t *)cmid->context;
+ switch (conn->ibc_state) {
+ default:
+ LBUG();
+
+ case IBLND_CONN_PASSIVE_WAIT:
+ CERROR ("%s: REJECTED %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ event->status);
+ kiblnd_connreq_done(conn, -ECONNRESET);
+ break;
+
+ case IBLND_CONN_ACTIVE_CONNECT:
+ kiblnd_rejected(conn, event->status,
+ (void *)KIBLND_CONN_PARAM(event),
+ KIBLND_CONN_PARAM_LEN(event));
+ break;
+ }
+ kiblnd_conn_decref(conn);
+ return 0;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ conn = (kib_conn_t *)cmid->context;
+ switch (conn->ibc_state) {
+ default:
+ LBUG();
+
+ case IBLND_CONN_PASSIVE_WAIT:
+ CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ kiblnd_connreq_done(conn, 0);
+ break;
+
+ case IBLND_CONN_ACTIVE_CONNECT:
+ CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ kiblnd_check_connreply(conn,
+ (void *)KIBLND_CONN_PARAM(event),
+ KIBLND_CONN_PARAM_LEN(event));
+ break;
+ }
+ /* net keeps its ref on conn! */
+ return 0;
+
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
+ return 0;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ conn = (kib_conn_t *)cmid->context;
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ CERROR("%s DISCONNECTED\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ kiblnd_connreq_done(conn, -ECONNRESET);
+ } else {
+ kiblnd_close_conn(conn, 0);
+ }
+ kiblnd_conn_decref(conn);
+ cmid->context = NULL;
+ return 0;
+
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ LCONSOLE_ERROR_MSG(0x131,
+ "Received notification of device removal\n"
+ "Please shutdown LNET to allow this to proceed\n");
+ /* Can't remove network from underneath LNET for now, so I have
+ * to ignore this */
+ return 0;
+
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
+ return 0;
+ }
+}
+
+static int
+kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
+{
+ kib_tx_t *tx;
+ struct list_head *ttmp;
+
+ list_for_each (ttmp, txs) {
+ tx = list_entry (ttmp, kib_tx_t, tx_list);
+
+ if (txs != &conn->ibc_active_txs) {
+ LASSERT (tx->tx_queued);
+ } else {
+ LASSERT (!tx->tx_queued);
+ LASSERT (tx->tx_waiting || tx->tx_sending != 0);
+ }
+
+ if (cfs_time_aftereq (jiffies, tx->tx_deadline)) {
+ CERROR("Timed out tx: %s, %lu seconds\n",
+ kiblnd_queue2str(conn, txs),
+ cfs_duration_sec(jiffies - tx->tx_deadline));
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+kiblnd_conn_timed_out_locked(kib_conn_t *conn)
+{
+ return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
+}
+
+void
+kiblnd_check_conns (int idx)
+{
+ LIST_HEAD (closes);
+ LIST_HEAD (checksends);
+ struct list_head *peers = &kiblnd_data.kib_peers[idx];
+ struct list_head *ptmp;
+ kib_peer_t *peer;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ unsigned long flags;
+
+ /* NB. We expect to have a look at all the peers and not find any
+ * RDMAs to time out, so we just use a shared lock while we
+ * take a look... */
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ list_for_each (ptmp, peers) {
+ peer = list_entry (ptmp, kib_peer_t, ibp_list);
+
+ list_for_each (ctmp, &peer->ibp_conns) {
+ int timedout;
+ int sendnoop;
+
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
+
+ LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
+
+ spin_lock(&conn->ibc_lock);
+
+ sendnoop = kiblnd_need_noop(conn);
+ timedout = kiblnd_conn_timed_out_locked(conn);
+ if (!sendnoop && !timedout) {
+ spin_unlock(&conn->ibc_lock);
+ continue;
+ }
+
+ if (timedout) {
+ CERROR("Timed out RDMA with %s (%lu): "
+ "c: %u, oc: %u, rc: %u\n",
+ libcfs_nid2str(peer->ibp_nid),
+ cfs_duration_sec(cfs_time_current() -
+ peer->ibp_last_alive),
+ conn->ibc_credits,
+ conn->ibc_outstanding_credits,
+ conn->ibc_reserved_credits);
+ list_add(&conn->ibc_connd_list, &closes);
+ } else {
+ list_add(&conn->ibc_connd_list,
+ &checksends);
+ }
+ /* +ref for 'closes' or 'checksends' */
+ kiblnd_conn_addref(conn);
+
+ spin_unlock(&conn->ibc_lock);
+ }
+ }
+
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ /* Handle timeout by closing the whole
+ * connection. We can only be sure RDMA activity
+ * has ceased once the QP has been modified. */
+ while (!list_empty(&closes)) {
+ conn = list_entry(closes.next,
+ kib_conn_t, ibc_connd_list);
+ list_del(&conn->ibc_connd_list);
+ kiblnd_close_conn(conn, -ETIMEDOUT);
+ kiblnd_conn_decref(conn);
+ }
+
+ /* In case we have enough credits to return via a
+ * NOOP, but there were no non-blocking tx descs
+ * free to do it last time... */
+ while (!list_empty(&checksends)) {
+ conn = list_entry(checksends.next,
+ kib_conn_t, ibc_connd_list);
+ list_del(&conn->ibc_connd_list);
+ kiblnd_check_sends(conn);
+ kiblnd_conn_decref(conn);
+ }
+}
+
+void
+kiblnd_disconnect_conn (kib_conn_t *conn)
+{
+ LASSERT (!in_interrupt());
+ LASSERT (current == kiblnd_data.kib_connd);
+ LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
+
+ rdma_disconnect(conn->ibc_cmid);
+ kiblnd_finalise_conn(conn);
+
+ kiblnd_peer_notify(conn->ibc_peer);
+}
+
+int
+kiblnd_connd (void *arg)
+{
+ wait_queue_t wait;
+ unsigned long flags;
+ kib_conn_t *conn;
+ int timeout;
+ int i;
+ int dropped_lock;
+ int peer_index = 0;
+ unsigned long deadline = jiffies;
+
+ cfs_block_allsigs ();
+
+ init_waitqueue_entry_current (&wait);
+ kiblnd_data.kib_connd = current;
+
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+
+ while (!kiblnd_data.kib_shutdown) {
+
+ dropped_lock = 0;
+
+ if (!list_empty (&kiblnd_data.kib_connd_zombies)) {
+ conn = list_entry(kiblnd_data. \
+ kib_connd_zombies.next,
+ kib_conn_t, ibc_list);
+ list_del(&conn->ibc_list);
+
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
+ flags);
+ dropped_lock = 1;
+
+ kiblnd_destroy_conn(conn);
+
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ }
+
+ if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+ conn = list_entry(kiblnd_data.kib_connd_conns.next,
+ kib_conn_t, ibc_list);
+ list_del(&conn->ibc_list);
+
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
+ flags);
+ dropped_lock = 1;
+
+ kiblnd_disconnect_conn(conn);
+ kiblnd_conn_decref(conn);
+
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ }
+
+ /* careful with the jiffy wrap... */
+ timeout = (int)(deadline - jiffies);
+ if (timeout <= 0) {
+ const int n = 4;
+ const int p = 1;
+ int chunk = kiblnd_data.kib_peer_hash_size;
+
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ dropped_lock = 1;
+
+ /* Time to check for RDMA timeouts on a few more
+ * peers: I do checks every 'p' seconds on a
+ * proportion of the peer table and I need to check
+ * every connection 'n' times within a timeout
+ * interval, to ensure I detect a timeout on any
+ * connection within (n+1)/n times the timeout
+ * interval. */
+
+ if (*kiblnd_tunables.kib_timeout > n * p)
+ chunk = (chunk * n * p) /
+ *kiblnd_tunables.kib_timeout;
+ if (chunk == 0)
+ chunk = 1;
+
+ for (i = 0; i < chunk; i++) {
+ kiblnd_check_conns(peer_index);
+ peer_index = (peer_index + 1) %
+ kiblnd_data.kib_peer_hash_size;
+ }
+
+ deadline += p * HZ;
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ }
+
+ if (dropped_lock)
+ continue;
+
+ /* Nothing to do for 'timeout' */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+
+ kiblnd_thread_fini();
+ return 0;
+}
+
+void
+kiblnd_qp_event(struct ib_event *event, void *arg)
+{
+ kib_conn_t *conn = arg;
+
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ CDEBUG(D_NET, "%s established\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ return;
+
+ default:
+ CERROR("%s: Async QP event type %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
+ return;
+ }
+}
+
+void
+kiblnd_complete (struct ib_wc *wc)
+{
+ switch (kiblnd_wreqid2type(wc->wr_id)) {
+ default:
+ LBUG();
+
+ case IBLND_WID_RDMA:
+ /* We only get RDMA completion notification if it fails. All
+ * subsequent work items, including the final SEND will fail
+ * too. However we can't print out any more info about the
+ * failing RDMA because 'tx' might be back on the idle list or
+ * even reused already if we didn't manage to post all our work
+ * items */
+ CNETERR("RDMA (tx: %p) failed: %d\n",
+ kiblnd_wreqid2ptr(wc->wr_id), wc->status);
+ return;
+
+ case IBLND_WID_TX:
+ kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
+ return;
+
+ case IBLND_WID_RX:
+ kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
+ wc->byte_len);
+ return;
+ }
+}
+
+void
+kiblnd_cq_completion(struct ib_cq *cq, void *arg)
+{
+ /* NB I'm not allowed to schedule this conn once its refcount has
+ * reached 0. Since fundamentally I'm racing with scheduler threads
+ * consuming my CQ I could be called after all completions have
+ * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
+ * and this CQ is about to be destroyed so I NOOP. */
+ kib_conn_t *conn = (kib_conn_t *)arg;
+ struct kib_sched_info *sched = conn->ibc_sched;
+ unsigned long flags;
+
+ LASSERT(cq == conn->ibc_cq);
+
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+
+ conn->ibc_ready = 1;
+
+ if (!conn->ibc_scheduled &&
+ (conn->ibc_nrx > 0 ||
+ conn->ibc_nsends_posted > 0)) {
+ kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
+ conn->ibc_scheduled = 1;
+ list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
+
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
+ }
+
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+}
+
+void
+kiblnd_cq_event(struct ib_event *event, void *arg)
+{
+ kib_conn_t *conn = arg;
+
+ CERROR("%s: async CQ event type %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
+}
+
+int
+kiblnd_scheduler(void *arg)
+{
+ long id = (long)arg;
+ struct kib_sched_info *sched;
+ kib_conn_t *conn;
+ wait_queue_t wait;
+ unsigned long flags;
+ struct ib_wc wc;
+ int did_something;
+ int busy_loops = 0;
+ int rc;
+
+ cfs_block_allsigs();
+
+ init_waitqueue_entry_current(&wait);
+
+ sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
+
+ rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
+ if (rc != 0) {
+ CWARN("Failed to bind on CPT %d, please verify whether "
+ "all CPUs are healthy and reload modules if necessary, "
+ "otherwise your system might under risk of low "
+ "performance\n", sched->ibs_cpt);
+ }
+
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+
+ while (!kiblnd_data.kib_shutdown) {
+ if (busy_loops++ >= IBLND_RESCHED) {
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ cond_resched();
+ busy_loops = 0;
+
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ }
+
+ did_something = 0;
+
+ if (!list_empty(&sched->ibs_conns)) {
+ conn = list_entry(sched->ibs_conns.next,
+ kib_conn_t, ibc_sched_list);
+ /* take over kib_sched_conns' ref on conn... */
+ LASSERT(conn->ibc_scheduled);
+ list_del(&conn->ibc_sched_list);
+ conn->ibc_ready = 0;
+
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
+ if (rc == 0) {
+ rc = ib_req_notify_cq(conn->ibc_cq,
+ IB_CQ_NEXT_COMP);
+ if (rc < 0) {
+ CWARN("%s: ib_req_notify_cq failed: %d, "
+ "closing connection\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+ kiblnd_close_conn(conn, -EIO);
+ kiblnd_conn_decref(conn);
+ spin_lock_irqsave(&sched->ibs_lock,
+ flags);
+ continue;
+ }
+
+ rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
+ }
+
+ if (rc < 0) {
+ CWARN("%s: ib_poll_cq failed: %d, "
+ "closing connection\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ rc);
+ kiblnd_close_conn(conn, -EIO);
+ kiblnd_conn_decref(conn);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ continue;
+ }
+
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+
+ if (rc != 0 || conn->ibc_ready) {
+ /* There may be another completion waiting; get
+ * another scheduler to check while I handle
+ * this one... */
+ /* +1 ref for sched_conns */
+ kiblnd_conn_addref(conn);
+ list_add_tail(&conn->ibc_sched_list,
+ &sched->ibs_conns);
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
+ } else {
+ conn->ibc_scheduled = 0;
+ }
+
+ if (rc != 0) {
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ kiblnd_complete(&wc);
+
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ }
+
+ kiblnd_conn_decref(conn); /* ...drop my ref from above */
+ did_something = 1;
+ }
+
+ if (did_something)
+ continue;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
+ busy_loops = 0;
+
+ remove_wait_queue(&sched->ibs_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ kiblnd_thread_fini();
+ return 0;
+}
+
+int
+kiblnd_failover_thread(void *arg)
+{
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_dev_t *dev;
+ wait_queue_t wait;
+ unsigned long flags;
+ int rc;
+
+ LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+
+ cfs_block_allsigs ();
+
+ init_waitqueue_entry_current(&wait);
+ write_lock_irqsave(glock, flags);
+
+ while (!kiblnd_data.kib_shutdown) {
+ int do_failover = 0;
+ int long_sleep;
+
+ list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
+ ibd_fail_list) {
+ if (cfs_time_before(cfs_time_current(),
+ dev->ibd_next_failover))
+ continue;
+ do_failover = 1;
+ break;
+ }
+
+ if (do_failover) {
+ list_del_init(&dev->ibd_fail_list);
+ dev->ibd_failover = 1;
+ write_unlock_irqrestore(glock, flags);
+
+ rc = kiblnd_dev_failover(dev);
+
+ write_lock_irqsave(glock, flags);
+
+ LASSERT (dev->ibd_failover);
+ dev->ibd_failover = 0;
+ if (rc >= 0) { /* Device is OK or failover succeed */
+ dev->ibd_next_failover = cfs_time_shift(3);
+ continue;
+ }
+
+ /* failed to failover, retry later */
+ dev->ibd_next_failover =
+ cfs_time_shift(min(dev->ibd_failed_failover, 10));
+ if (kiblnd_dev_can_failover(dev)) {
+ list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ }
+
+ continue;
+ }
+
+ /* long sleep if no more pending failover */
+ long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
+ write_unlock_irqrestore(glock, flags);
+
+ rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+ cfs_time_seconds(1));
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
+ write_lock_irqsave(glock, flags);
+
+ if (!long_sleep || rc != 0)
+ continue;
+
+ /* have a long sleep, routine check all active devices,
+ * we need checking like this because if there is not active
+ * connection on the dev and no SEND from local, we may listen
+ * on wrong HCA for ever while there is a bonding failover */
+ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+ if (kiblnd_dev_can_failover(dev)) {
+ list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ }
+ }
+ }
+
+ write_unlock_irqrestore(glock, flags);
+
+ kiblnd_thread_fini();
+ return 0;
+}
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
new file mode 100644
index 0000000..92dc567
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -0,0 +1,493 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/klnds/o2iblnd/o2iblnd_modparams.c
+ *
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ */
+
+#include "o2iblnd.h"
+
+static int service = 987;
+CFS_MODULE_PARM(service, "i", int, 0444,
+ "service number (within RDMA_PS_TCP)");
+
+static int cksum = 0;
+CFS_MODULE_PARM(cksum, "i", int, 0644,
+ "set non-zero to enable message (not RDMA) checksums");
+
+static int timeout = 50;
+CFS_MODULE_PARM(timeout, "i", int, 0644,
+ "timeout (seconds)");
+
+/* Number of threads in each scheduler pool which is percpt,
+ * we will estimate reasonable value based on CPUs if it's set to zero. */
+static int nscheds;
+CFS_MODULE_PARM(nscheds, "i", int, 0444,
+ "number of threads in each scheduler pool");
+
+/* NB: this value is shared by all CPTs, it can grow at runtime */
+static int ntx = 512;
+CFS_MODULE_PARM(ntx, "i", int, 0444,
+ "# of message descriptors allocated for each pool");
+
+/* NB: this value is shared by all CPTs */
+static int credits = 256;
+CFS_MODULE_PARM(credits, "i", int, 0444,
+ "# concurrent sends");
+
+static int peer_credits = 8;
+CFS_MODULE_PARM(peer_credits, "i", int, 0444,
+ "# concurrent sends to 1 peer");
+
+static int peer_credits_hiw = 0;
+CFS_MODULE_PARM(peer_credits_hiw, "i", int, 0444,
+ "when eagerly to return credits");
+
+static int peer_buffer_credits = 0;
+CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
+ "# per-peer router buffer credits");
+
+static int peer_timeout = 180;
+CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
+ "Seconds without aliveness news to declare peer dead (<=0 to disable)");
+
+static char *ipif_name = "ib0";
+CFS_MODULE_PARM(ipif_name, "s", charp, 0444,
+ "IPoIB interface name");
+
+static int retry_count = 5;
+CFS_MODULE_PARM(retry_count, "i", int, 0644,
+ "Retransmissions when no ACK received");
+
+static int rnr_retry_count = 6;
+CFS_MODULE_PARM(rnr_retry_count, "i", int, 0644,
+ "RNR retransmissions");
+
+static int keepalive = 100;
+CFS_MODULE_PARM(keepalive, "i", int, 0644,
+ "Idle time in seconds before sending a keepalive");
+
+static int ib_mtu = 0;
+CFS_MODULE_PARM(ib_mtu, "i", int, 0444,
+ "IB MTU 256/512/1024/2048/4096");
+
+static int concurrent_sends = 0;
+CFS_MODULE_PARM(concurrent_sends, "i", int, 0444,
+ "send work-queue sizing");
+
+static int map_on_demand = 0;
+CFS_MODULE_PARM(map_on_demand, "i", int, 0444,
+ "map on demand");
+
+/* NB: this value is shared by all CPTs, it can grow at runtime */
+static int fmr_pool_size = 512;
+CFS_MODULE_PARM(fmr_pool_size, "i", int, 0444,
+ "size of fmr pool on each CPT (>= ntx / 4)");
+
+/* NB: this value is shared by all CPTs, it can grow at runtime */
+static int fmr_flush_trigger = 384;
+CFS_MODULE_PARM(fmr_flush_trigger, "i", int, 0444,
+ "# dirty FMRs that triggers pool flush");
+
+static int fmr_cache = 1;
+CFS_MODULE_PARM(fmr_cache, "i", int, 0444,
+ "non-zero to enable FMR caching");
+
+/* NB: this value is shared by all CPTs, it can grow at runtime */
+static int pmr_pool_size = 512;
+CFS_MODULE_PARM(pmr_pool_size, "i", int, 0444,
+ "size of MR cache pmr pool on each CPT");
+
+/*
+ * 0: disable failover
+ * 1: enable failover if necessary
+ * 2: force to failover (for debug)
+ */
+static int dev_failover = 0;
+CFS_MODULE_PARM(dev_failover, "i", int, 0444,
+ "HCA failover for bonding (0 off, 1 on, other values reserved)");
+
+
+static int require_privileged_port = 0;
+CFS_MODULE_PARM(require_privileged_port, "i", int, 0644,
+ "require privileged port when accepting connection");
+
+static int use_privileged_port = 1;
+CFS_MODULE_PARM(use_privileged_port, "i", int, 0644,
+ "use privileged port when initiating connection");
+
+kib_tunables_t kiblnd_tunables = {
+ .kib_dev_failover = &dev_failover,
+ .kib_service = &service,
+ .kib_cksum = &cksum,
+ .kib_timeout = &timeout,
+ .kib_keepalive = &keepalive,
+ .kib_ntx = &ntx,
+ .kib_credits = &credits,
+ .kib_peertxcredits = &peer_credits,
+ .kib_peercredits_hiw = &peer_credits_hiw,
+ .kib_peerrtrcredits = &peer_buffer_credits,
+ .kib_peertimeout = &peer_timeout,
+ .kib_default_ipif = &ipif_name,
+ .kib_retry_count = &retry_count,
+ .kib_rnr_retry_count = &rnr_retry_count,
+ .kib_concurrent_sends = &concurrent_sends,
+ .kib_ib_mtu = &ib_mtu,
+ .kib_map_on_demand = &map_on_demand,
+ .kib_fmr_pool_size = &fmr_pool_size,
+ .kib_fmr_flush_trigger = &fmr_flush_trigger,
+ .kib_fmr_cache = &fmr_cache,
+ .kib_pmr_pool_size = &pmr_pool_size,
+ .kib_require_priv_port = &require_privileged_port,
+ .kib_use_priv_port = &use_privileged_port,
+ .kib_nscheds = &nscheds
+};
+
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
+
+static char ipif_basename_space[32];
+
+
+enum {
+ O2IBLND_SERVICE = 1,
+ O2IBLND_CKSUM,
+ O2IBLND_TIMEOUT,
+ O2IBLND_NTX,
+ O2IBLND_CREDITS,
+ O2IBLND_PEER_TXCREDITS,
+ O2IBLND_PEER_CREDITS_HIW,
+ O2IBLND_PEER_RTRCREDITS,
+ O2IBLND_PEER_TIMEOUT,
+ O2IBLND_IPIF_BASENAME,
+ O2IBLND_RETRY_COUNT,
+ O2IBLND_RNR_RETRY_COUNT,
+ O2IBLND_KEEPALIVE,
+ O2IBLND_CONCURRENT_SENDS,
+ O2IBLND_IB_MTU,
+ O2IBLND_MAP_ON_DEMAND,
+ O2IBLND_FMR_POOL_SIZE,
+ O2IBLND_FMR_FLUSH_TRIGGER,
+ O2IBLND_FMR_CACHE,
+ O2IBLND_PMR_POOL_SIZE,
+ O2IBLND_DEV_FAILOVER
+};
+
+static ctl_table_t kiblnd_ctl_table[] = {
+ {
+ .ctl_name = O2IBLND_SERVICE,
+ .procname = "service",
+ .data = &service,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_CKSUM,
+ .procname = "cksum",
+ .data = &cksum,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_TIMEOUT,
+ .procname = "timeout",
+ .data = &timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_NTX,
+ .procname = "ntx",
+ .data = &ntx,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_CREDITS,
+ .procname = "credits",
+ .data = &credits,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_PEER_TXCREDITS,
+ .procname = "peer_credits",
+ .data = &peer_credits,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_PEER_CREDITS_HIW,
+ .procname = "peer_credits_hiw",
+ .data = &peer_credits_hiw,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_PEER_RTRCREDITS,
+ .procname = "peer_buffer_credits",
+ .data = &peer_buffer_credits,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_PEER_TIMEOUT,
+ .procname = "peer_timeout",
+ .data = &peer_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_IPIF_BASENAME,
+ .procname = "ipif_name",
+ .data = ipif_basename_space,
+ .maxlen = sizeof(ipif_basename_space),
+ .mode = 0444,
+ .proc_handler = &proc_dostring
+ },
+ {
+ .ctl_name = O2IBLND_RETRY_COUNT,
+ .procname = "retry_count",
+ .data = &retry_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_RNR_RETRY_COUNT,
+ .procname = "rnr_retry_count",
+ .data = &rnr_retry_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_KEEPALIVE,
+ .procname = "keepalive",
+ .data = &keepalive,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_CONCURRENT_SENDS,
+ .procname = "concurrent_sends",
+ .data = &concurrent_sends,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_IB_MTU,
+ .procname = "ib_mtu",
+ .data = &ib_mtu,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_MAP_ON_DEMAND,
+ .procname = "map_on_demand",
+ .data = &map_on_demand,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+
+ {
+ .ctl_name = O2IBLND_FMR_POOL_SIZE,
+ .procname = "fmr_pool_size",
+ .data = &fmr_pool_size,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_FMR_FLUSH_TRIGGER,
+ .procname = "fmr_flush_trigger",
+ .data = &fmr_flush_trigger,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_FMR_CACHE,
+ .procname = "fmr_cache",
+ .data = &fmr_cache,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_PMR_POOL_SIZE,
+ .procname = "pmr_pool_size",
+ .data = &pmr_pool_size,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ .ctl_name = O2IBLND_DEV_FAILOVER,
+ .procname = "dev_failover",
+ .data = &dev_failover,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ },
+ {0}
+};
+
+static ctl_table_t kiblnd_top_ctl_table[] = {
+ {
+ .ctl_name = CTL_O2IBLND,
+ .procname = "o2iblnd",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = kiblnd_ctl_table
+ },
+ {0}
+};
+
+void
+kiblnd_initstrtunable(char *space, char *str, int size)
+{
+ strncpy(space, str, size);
+ space[size-1] = 0;
+}
+
+void
+kiblnd_sysctl_init (void)
+{
+ kiblnd_initstrtunable(ipif_basename_space, ipif_name,
+ sizeof(ipif_basename_space));
+
+ kiblnd_tunables.kib_sysctl =
+ register_sysctl_table(kiblnd_top_ctl_table);
+
+ if (kiblnd_tunables.kib_sysctl == NULL)
+ CWARN("Can't setup /proc tunables\n");
+}
+
+void
+kiblnd_sysctl_fini (void)
+{
+ if (kiblnd_tunables.kib_sysctl != NULL)
+ unregister_sysctl_table(kiblnd_tunables.kib_sysctl);
+}
+
+#else
+
+void
+kiblnd_sysctl_init (void)
+{
+}
+
+void
+kiblnd_sysctl_fini (void)
+{
+}
+
+#endif
+
+int
+kiblnd_tunables_init (void)
+{
+ if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
+ CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
+ *kiblnd_tunables.kib_ib_mtu);
+ return -EINVAL;
+ }
+
+ if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT)
+ *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT;
+
+ if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX)
+ *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
+
+ if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
+ *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
+
+ if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
+ *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
+
+ if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
+ *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
+
+ if (*kiblnd_tunables.kib_map_on_demand < 0 ||
+ *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
+ *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
+
+ if (*kiblnd_tunables.kib_map_on_demand == 1)
+ *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
+
+ if (*kiblnd_tunables.kib_concurrent_sends == 0) {
+ if (*kiblnd_tunables.kib_map_on_demand > 0 &&
+ *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
+ *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
+ else
+ *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
+ }
+
+ if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
+ *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
+
+ if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
+ *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
+
+ if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
+ CWARN("Concurrent sends %d is lower than message queue size: %d, "
+ "performance may drop slightly.\n",
+ *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
+ }
+
+ kiblnd_sysctl_init();
+ return 0;
+}
+
+void
+kiblnd_tunables_fini (void)
+{
+ kiblnd_sysctl_fini();
+}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile b/drivers/staging/lustre/lnet/klnds/socklnd/Makefile
new file mode 100644
index 0000000..6494b2b
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_LNET) += ksocklnd.o
+
+ksocklnd-y := socklnd.o socklnd_cb.o socklnd_proto.o socklnd_modparams.o socklnd_lib-linux.o
+
+
+
+ccflags-y := -I$(src)/../../include
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
new file mode 100644
index 0000000..6825b45
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -0,0 +1,2904 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/klnds/socklnd/socklnd.c
+ *
+ * Author: Zach Brown <zab@zabbo.net>
+ * Author: Peter J. Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ */
+
+#include "socklnd.h"
+
+lnd_t the_ksocklnd;
+ksock_nal_data_t ksocknal_data;
+
+ksock_interface_t *
+ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
+{
+ ksock_net_t *net = ni->ni_data;
+ int i;
+ ksock_interface_t *iface;
+
+ for (i = 0; i < net->ksnn_ninterfaces; i++) {
+ LASSERT(i < LNET_MAX_INTERFACES);
+ iface = &net->ksnn_interfaces[i];
+
+ if (iface->ksni_ipaddr == ip)
+ return (iface);
+ }
+
+ return (NULL);
+}
+
+ksock_route_t *
+ksocknal_create_route (__u32 ipaddr, int port)
+{
+ ksock_route_t *route;
+
+ LIBCFS_ALLOC (route, sizeof (*route));
+ if (route == NULL)
+ return (NULL);
+
+ atomic_set (&route->ksnr_refcount, 1);
+ route->ksnr_peer = NULL;
+ route->ksnr_retry_interval = 0; /* OK to connect at any time */
+ route->ksnr_ipaddr = ipaddr;
+ route->ksnr_port = port;
+ route->ksnr_scheduled = 0;
+ route->ksnr_connecting = 0;
+ route->ksnr_connected = 0;
+ route->ksnr_deleted = 0;
+ route->ksnr_conn_count = 0;
+ route->ksnr_share_count = 0;
+
+ return (route);
+}
+
+void
+ksocknal_destroy_route (ksock_route_t *route)
+{
+ LASSERT (atomic_read(&route->ksnr_refcount) == 0);
+
+ if (route->ksnr_peer != NULL)
+ ksocknal_peer_decref(route->ksnr_peer);
+
+ LIBCFS_FREE (route, sizeof (*route));
+}
+
+int
+ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+{
+ ksock_net_t *net = ni->ni_data;
+ ksock_peer_t *peer;
+
+ LASSERT (id.nid != LNET_NID_ANY);
+ LASSERT (id.pid != LNET_PID_ANY);
+ LASSERT (!in_interrupt());
+
+ LIBCFS_ALLOC (peer, sizeof (*peer));
+ if (peer == NULL)
+ return -ENOMEM;
+
+ memset (peer, 0, sizeof (*peer)); /* NULL pointers/clear flags etc */
+
+ peer->ksnp_ni = ni;
+ peer->ksnp_id = id;
+ atomic_set (&peer->ksnp_refcount, 1); /* 1 ref for caller */
+ peer->ksnp_closing = 0;
+ peer->ksnp_accepting = 0;
+ peer->ksnp_proto = NULL;
+ peer->ksnp_last_alive = 0;
+ peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
+
+ INIT_LIST_HEAD (&peer->ksnp_conns);
+ INIT_LIST_HEAD (&peer->ksnp_routes);
+ INIT_LIST_HEAD (&peer->ksnp_tx_queue);
+ INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
+ spin_lock_init(&peer->ksnp_lock);
+
+ spin_lock_bh(&net->ksnn_lock);
+
+ if (net->ksnn_shutdown) {
+ spin_unlock_bh(&net->ksnn_lock);
+
+ LIBCFS_FREE(peer, sizeof(*peer));
+ CERROR("Can't create peer: network shutdown\n");
+ return -ESHUTDOWN;
+ }
+
+ net->ksnn_npeers++;
+
+ spin_unlock_bh(&net->ksnn_lock);
+
+ *peerp = peer;
+ return 0;
+}
+
+void
+ksocknal_destroy_peer (ksock_peer_t *peer)
+{
+ ksock_net_t *net = peer->ksnp_ni->ni_data;
+
+ CDEBUG (D_NET, "peer %s %p deleted\n",
+ libcfs_id2str(peer->ksnp_id), peer);
+
+ LASSERT (atomic_read (&peer->ksnp_refcount) == 0);
+ LASSERT (peer->ksnp_accepting == 0);
+ LASSERT (list_empty (&peer->ksnp_conns));
+ LASSERT (list_empty (&peer->ksnp_routes));
+ LASSERT (list_empty (&peer->ksnp_tx_queue));
+ LASSERT (list_empty (&peer->ksnp_zc_req_list));
+
+ LIBCFS_FREE (peer, sizeof (*peer));
+
+ /* NB a peer's connections and routes keep a reference on their peer
+ * until they are destroyed, so we can be assured that _all_ state to
+ * do with this peer has been cleaned up when its refcount drops to
+ * zero. */
+ spin_lock_bh(&net->ksnn_lock);
+ net->ksnn_npeers--;
+ spin_unlock_bh(&net->ksnn_lock);
+}
+
+ksock_peer_t *
+ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id)
+{
+ struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
+ struct list_head *tmp;
+ ksock_peer_t *peer;
+
+ list_for_each (tmp, peer_list) {
+
+ peer = list_entry (tmp, ksock_peer_t, ksnp_list);
+
+ LASSERT (!peer->ksnp_closing);
+
+ if (peer->ksnp_ni != ni)
+ continue;
+
+ if (peer->ksnp_id.nid != id.nid ||
+ peer->ksnp_id.pid != id.pid)
+ continue;
+
+ CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
+ peer, libcfs_id2str(id),
+ atomic_read(&peer->ksnp_refcount));
+ return (peer);
+ }
+ return (NULL);
+}
+
+ksock_peer_t *
+ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
+{
+ ksock_peer_t *peer;
+
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ peer = ksocknal_find_peer_locked(ni, id);
+ if (peer != NULL) /* +1 ref for caller? */
+ ksocknal_peer_addref(peer);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ return (peer);
+}
+
+void
+ksocknal_unlink_peer_locked (ksock_peer_t *peer)
+{
+ int i;
+ __u32 ip;
+ ksock_interface_t *iface;
+
+ for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
+ LASSERT (i < LNET_MAX_INTERFACES);
+ ip = peer->ksnp_passive_ips[i];
+
+ iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
+ /* All IPs in peer->ksnp_passive_ips[] come from the
+ * interface list, therefore the call must succeed. */
+ LASSERT (iface != NULL);
+
+ CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
+ peer, iface, iface->ksni_nroutes);
+ iface->ksni_npeers--;
+ }
+
+ LASSERT (list_empty(&peer->ksnp_conns));
+ LASSERT (list_empty(&peer->ksnp_routes));
+ LASSERT (!peer->ksnp_closing);
+ peer->ksnp_closing = 1;
+ list_del (&peer->ksnp_list);
+ /* lose peerlist's ref */
+ ksocknal_peer_decref(peer);
+}
+
+int
+ksocknal_get_peer_info (lnet_ni_t *ni, int index,
+ lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
+ int *port, int *conn_count, int *share_count)
+{
+ ksock_peer_t *peer;
+ struct list_head *ptmp;
+ ksock_route_t *route;
+ struct list_head *rtmp;
+ int i;
+ int j;
+ int rc = -ENOENT;
+
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+
+ list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+
+ if (peer->ksnp_ni != ni)
+ continue;
+
+ if (peer->ksnp_n_passive_ips == 0 &&
+ list_empty(&peer->ksnp_routes)) {
+ if (index-- > 0)
+ continue;
+
+ *id = peer->ksnp_id;
+ *myip = 0;
+ *peer_ip = 0;
+ *port = 0;
+ *conn_count = 0;
+ *share_count = 0;
+ rc = 0;
+ goto out;
+ }
+
+ for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
+ if (index-- > 0)
+ continue;
+
+ *id = peer->ksnp_id;
+ *myip = peer->ksnp_passive_ips[j];
+ *peer_ip = 0;
+ *port = 0;
+ *conn_count = 0;
+ *share_count = 0;
+ rc = 0;
+ goto out;
+ }
+
+ list_for_each (rtmp, &peer->ksnp_routes) {
+ if (index-- > 0)
+ continue;
+
+ route = list_entry(rtmp, ksock_route_t,
+ ksnr_list);
+
+ *id = peer->ksnp_id;
+ *myip = route->ksnr_myipaddr;
+ *peer_ip = route->ksnr_ipaddr;
+ *port = route->ksnr_port;
+ *conn_count = route->ksnr_conn_count;
+ *share_count = route->ksnr_share_count;
+ rc = 0;
+ goto out;
+ }
+ }
+ }
+ out:
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return (rc);
+}
+
+void
+ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
+{
+ ksock_peer_t *peer = route->ksnr_peer;
+ int type = conn->ksnc_type;
+ ksock_interface_t *iface;
+
+ conn->ksnc_route = route;
+ ksocknal_route_addref(route);
+
+ if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
+ if (route->ksnr_myipaddr == 0) {
+ /* route wasn't bound locally yet (the initial route) */
+ CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
+ libcfs_id2str(peer->ksnp_id),
+ &route->ksnr_ipaddr,
+ &conn->ksnc_myipaddr);
+ } else {
+ CDEBUG(D_NET, "Rebinding %s %pI4h from "
+ "%pI4h to %pI4h\n",
+ libcfs_id2str(peer->ksnp_id),
+ &route->ksnr_ipaddr,
+ &route->ksnr_myipaddr,
+ &conn->ksnc_myipaddr);
+
+ iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
+ route->ksnr_myipaddr);
+ if (iface != NULL)
+ iface->ksni_nroutes--;
+ }
+ route->ksnr_myipaddr = conn->ksnc_myipaddr;
+ iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
+ route->ksnr_myipaddr);
+ if (iface != NULL)
+ iface->ksni_nroutes++;
+ }
+
+ route->ksnr_connected |= (1<<type);
+ route->ksnr_conn_count++;
+
+ /* Successful connection => further attempts can
+ * proceed immediately */
+ route->ksnr_retry_interval = 0;
+}
+
+void
+ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
+{
+ struct list_head *tmp;
+ ksock_conn_t *conn;
+ ksock_route_t *route2;
+
+ LASSERT (!peer->ksnp_closing);
+ LASSERT (route->ksnr_peer == NULL);
+ LASSERT (!route->ksnr_scheduled);
+ LASSERT (!route->ksnr_connecting);
+ LASSERT (route->ksnr_connected == 0);
+
+ /* LASSERT(unique) */
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+
+ if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
+ CERROR("Duplicate route %s %pI4h\n",
+ libcfs_id2str(peer->ksnp_id),
+ &route->ksnr_ipaddr);
+ LBUG();
+ }
+ }
+
+ route->ksnr_peer = peer;
+ ksocknal_peer_addref(peer);
+ /* peer's routelist takes over my ref on 'route' */
+ list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+
+ list_for_each(tmp, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+
+ if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
+ continue;
+
+ ksocknal_associate_route_conn_locked(route, conn);
+ /* keep going (typed routes) */
+ }
+}
+
+void
+ksocknal_del_route_locked (ksock_route_t *route)
+{
+ ksock_peer_t *peer = route->ksnr_peer;
+ ksock_interface_t *iface;
+ ksock_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+
+ LASSERT (!route->ksnr_deleted);
+
+ /* Close associated conns */
+ list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
+ conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+
+ if (conn->ksnc_route != route)
+ continue;
+
+ ksocknal_close_conn_locked (conn, 0);
+ }
+
+ if (route->ksnr_myipaddr != 0) {
+ iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
+ route->ksnr_myipaddr);
+ if (iface != NULL)
+ iface->ksni_nroutes--;
+ }
+
+ route->ksnr_deleted = 1;
+ list_del (&route->ksnr_list);
+ ksocknal_route_decref(route); /* drop peer's ref */
+
+ if (list_empty (&peer->ksnp_routes) &&
+ list_empty (&peer->ksnp_conns)) {
+ /* I've just removed the last route to a peer with no active
+ * connections */
+ ksocknal_unlink_peer_locked (peer);
+ }
+}
+
+int
+ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
+{
+ struct list_head *tmp;
+ ksock_peer_t *peer;
+ ksock_peer_t *peer2;
+ ksock_route_t *route;
+ ksock_route_t *route2;
+ int rc;
+
+ if (id.nid == LNET_NID_ANY ||
+ id.pid == LNET_PID_ANY)
+ return (-EINVAL);
+
+ /* Have a brand new peer ready... */
+ rc = ksocknal_create_peer(&peer, ni, id);
+ if (rc != 0)
+ return rc;
+
+ route = ksocknal_create_route (ipaddr, port);
+ if (route == NULL) {
+ ksocknal_peer_decref(peer);
+ return (-ENOMEM);
+ }
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ /* always called with a ref on ni, so shutdown can't have started */
+ LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+
+ peer2 = ksocknal_find_peer_locked (ni, id);
+ if (peer2 != NULL) {
+ ksocknal_peer_decref(peer);
+ peer = peer2;
+ } else {
+ /* peer table takes my ref on peer */
+ list_add_tail (&peer->ksnp_list,
+ ksocknal_nid2peerlist (id.nid));
+ }
+
+ route2 = NULL;
+ list_for_each (tmp, &peer->ksnp_routes) {
+ route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+
+ if (route2->ksnr_ipaddr == ipaddr)
+ break;
+
+ route2 = NULL;
+ }
+ if (route2 == NULL) {
+ ksocknal_add_route_locked(peer, route);
+ route->ksnr_share_count++;
+ } else {
+ ksocknal_route_decref(route);
+ route2->ksnr_share_count++;
+ }
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ return (0);
+}
+
+void
+ksocknal_del_peer_locked (ksock_peer_t *peer, __u32 ip)
+{
+ ksock_conn_t *conn;
+ ksock_route_t *route;
+ struct list_head *tmp;
+ struct list_head *nxt;
+ int nshared;
+
+ LASSERT (!peer->ksnp_closing);
+
+ /* Extra ref prevents peer disappearing until I'm done with it */
+ ksocknal_peer_addref(peer);
+
+ list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
+
+ /* no match */
+ if (!(ip == 0 || route->ksnr_ipaddr == ip))
+ continue;
+
+ route->ksnr_share_count = 0;
+ /* This deletes associated conns too */
+ ksocknal_del_route_locked (route);
+ }
+
+ nshared = 0;
+ list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
+ nshared += route->ksnr_share_count;
+ }
+
+ if (nshared == 0) {
+ /* remove everything else if there are no explicit entries
+ * left */
+
+ list_for_each_safe (tmp, nxt, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
+
+ /* we should only be removing auto-entries */
+ LASSERT(route->ksnr_share_count == 0);
+ ksocknal_del_route_locked (route);
+ }
+
+ list_for_each_safe (tmp, nxt, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+
+ ksocknal_close_conn_locked(conn, 0);
+ }
+ }
+
+ ksocknal_peer_decref(peer);
+ /* NB peer unlinks itself when last conn/route is removed */
+}
+
+int
+ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
+{
+ LIST_HEAD (zombies);
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ ksock_peer_t *peer;
+ int lo;
+ int hi;
+ int i;
+ int rc = -ENOENT;
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ if (id.nid != LNET_NID_ANY)
+ lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ else {
+ lo = 0;
+ hi = ksocknal_data.ksnd_peer_hash_size - 1;
+ }
+
+ for (i = lo; i <= hi; i++) {
+ list_for_each_safe (ptmp, pnxt,
+ &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+
+ if (peer->ksnp_ni != ni)
+ continue;
+
+ if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
+ (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
+ continue;
+
+ ksocknal_peer_addref(peer); /* a ref for me... */
+
+ ksocknal_del_peer_locked (peer, ip);
+
+ if (peer->ksnp_closing &&
+ !list_empty(&peer->ksnp_tx_queue)) {
+ LASSERT (list_empty(&peer->ksnp_conns));
+ LASSERT (list_empty(&peer->ksnp_routes));
+
+ list_splice_init(&peer->ksnp_tx_queue,
+ &zombies);
+ }
+
+ ksocknal_peer_decref(peer); /* ...till here */
+
+ rc = 0; /* matched! */
+ }
+ }
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ ksocknal_txlist_done(ni, &zombies, 1);
+
+ return (rc);
+}
+
+ksock_conn_t *
+ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
+{
+ ksock_peer_t *peer;
+ struct list_head *ptmp;
+ ksock_conn_t *conn;
+ struct list_head *ctmp;
+ int i;
+
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+ list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+
+ LASSERT (!peer->ksnp_closing);
+
+ if (peer->ksnp_ni != ni)
+ continue;
+
+ list_for_each (ctmp, &peer->ksnp_conns) {
+ if (index-- > 0)
+ continue;
+
+ conn = list_entry (ctmp, ksock_conn_t,
+ ksnc_list);
+ ksocknal_conn_addref(conn);
+ read_unlock(&ksocknal_data. \
+ ksnd_global_lock);
+ return (conn);
+ }
+ }
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return (NULL);
+}
+
+ksock_sched_t *
+ksocknal_choose_scheduler_locked(unsigned int cpt)
+{
+ struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
+ ksock_sched_t *sched;
+ int i;
+
+ LASSERT(info->ksi_nthreads > 0);
+
+ sched = &info->ksi_scheds[0];
+ /*
+ * NB: it's safe so far, but info->ksi_nthreads could be changed
+ * at runtime when we have dynamic LNet configuration, then we
+ * need to take care of this.
+ */
+ for (i = 1; i < info->ksi_nthreads; i++) {
+ if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
+ sched = &info->ksi_scheds[i];
+ }
+
+ return sched;
+}
+
+int
+ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
+{
+ ksock_net_t *net = ni->ni_data;
+ int i;
+ int nip;
+
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ nip = net->ksnn_ninterfaces;
+ LASSERT (nip <= LNET_MAX_INTERFACES);
+
+ /* Only offer interfaces for additional connections if I have
+ * more than one. */
+ if (nip < 2) {
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return 0;
+ }
+
+ for (i = 0; i < nip; i++) {
+ ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
+ LASSERT (ipaddrs[i] != 0);
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return (nip);
+}
+
+int
+ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
+{
+ int best_netmatch = 0;
+ int best_xor = 0;
+ int best = -1;
+ int this_xor;
+ int this_netmatch;
+ int i;
+
+ for (i = 0; i < nips; i++) {
+ if (ips[i] == 0)
+ continue;
+
+ this_xor = (ips[i] ^ iface->ksni_ipaddr);
+ this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
+
+ if (!(best < 0 ||
+ best_netmatch < this_netmatch ||
+ (best_netmatch == this_netmatch &&
+ best_xor > this_xor)))
+ continue;
+
+ best = i;
+ best_netmatch = this_netmatch;
+ best_xor = this_xor;
+ }
+
+ LASSERT (best >= 0);
+ return (best);
+}
+
+int
+ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
+{
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ ksock_net_t *net = peer->ksnp_ni->ni_data;
+ ksock_interface_t *iface;
+ ksock_interface_t *best_iface;
+ int n_ips;
+ int i;
+ int j;
+ int k;
+ __u32 ip;
+ __u32 xor;
+ int this_netmatch;
+ int best_netmatch;
+ int best_npeers;
+
+ /* CAVEAT EMPTOR: We do all our interface matching with an
+ * exclusive hold of global lock at IRQ priority. We're only
+ * expecting to be dealing with small numbers of interfaces, so the
+ * O(n**3)-ness shouldn't matter */
+
+ /* Also note that I'm not going to return more than n_peerips
+ * interfaces, even if I have more myself */
+
+ write_lock_bh(global_lock);
+
+ LASSERT (n_peerips <= LNET_MAX_INTERFACES);
+ LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
+
+ /* Only match interfaces for additional connections
+ * if I have > 1 interface */
+ n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
+ MIN(n_peerips, net->ksnn_ninterfaces);
+
+ for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
+ /* ^ yes really... */
+
+ /* If we have any new interfaces, first tick off all the
+ * peer IPs that match old interfaces, then choose new
+ * interfaces to match the remaining peer IPS.
+ * We don't forget interfaces we've stopped using; we might
+ * start using them again... */
+
+ if (i < peer->ksnp_n_passive_ips) {
+ /* Old interface. */
+ ip = peer->ksnp_passive_ips[i];
+ best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
+
+ /* peer passive ips are kept up to date */
+ LASSERT(best_iface != NULL);
+ } else {
+ /* choose a new interface */
+ LASSERT (i == peer->ksnp_n_passive_ips);
+
+ best_iface = NULL;
+ best_netmatch = 0;
+ best_npeers = 0;
+
+ for (j = 0; j < net->ksnn_ninterfaces; j++) {
+ iface = &net->ksnn_interfaces[j];
+ ip = iface->ksni_ipaddr;
+
+ for (k = 0; k < peer->ksnp_n_passive_ips; k++)
+ if (peer->ksnp_passive_ips[k] == ip)
+ break;
+
+ if (k < peer->ksnp_n_passive_ips) /* using it already */
+ continue;
+
+ k = ksocknal_match_peerip(iface, peerips, n_peerips);
+ xor = (ip ^ peerips[k]);
+ this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
+
+ if (!(best_iface == NULL ||
+ best_netmatch < this_netmatch ||
+ (best_netmatch == this_netmatch &&
+ best_npeers > iface->ksni_npeers)))
+ continue;
+
+ best_iface = iface;
+ best_netmatch = this_netmatch;
+ best_npeers = iface->ksni_npeers;
+ }
+
+ best_iface->ksni_npeers++;
+ ip = best_iface->ksni_ipaddr;
+ peer->ksnp_passive_ips[i] = ip;
+ peer->ksnp_n_passive_ips = i+1;
+ }
+
+ LASSERT (best_iface != NULL);
+
+ /* mark the best matching peer IP used */
+ j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
+ peerips[j] = 0;
+ }
+
+ /* Overwrite input peer IP addresses */
+ memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
+
+ write_unlock_bh(global_lock);
+
+ return (n_ips);
+}
+
+void
+ksocknal_create_routes(ksock_peer_t *peer, int port,
+ __u32 *peer_ipaddrs, int npeer_ipaddrs)
+{
+ ksock_route_t *newroute = NULL;
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ lnet_ni_t *ni = peer->ksnp_ni;
+ ksock_net_t *net = ni->ni_data;
+ struct list_head *rtmp;
+ ksock_route_t *route;
+ ksock_interface_t *iface;
+ ksock_interface_t *best_iface;
+ int best_netmatch;
+ int this_netmatch;
+ int best_nroutes;
+ int i;
+ int j;
+
+ /* CAVEAT EMPTOR: We do all our interface matching with an
+ * exclusive hold of global lock at IRQ priority. We're only
+ * expecting to be dealing with small numbers of interfaces, so the
+ * O(n**3)-ness here shouldn't matter */
+
+ write_lock_bh(global_lock);
+
+ if (net->ksnn_ninterfaces < 2) {
+ /* Only create additional connections
+ * if I have > 1 interface */
+ write_unlock_bh(global_lock);
+ return;
+ }
+
+ LASSERT (npeer_ipaddrs <= LNET_MAX_INTERFACES);
+
+ for (i = 0; i < npeer_ipaddrs; i++) {
+ if (newroute != NULL) {
+ newroute->ksnr_ipaddr = peer_ipaddrs[i];
+ } else {
+ write_unlock_bh(global_lock);
+
+ newroute = ksocknal_create_route(peer_ipaddrs[i], port);
+ if (newroute == NULL)
+ return;
+
+ write_lock_bh(global_lock);
+ }
+
+ if (peer->ksnp_closing) {
+ /* peer got closed under me */
+ break;
+ }
+
+ /* Already got a route? */
+ route = NULL;
+ list_for_each(rtmp, &peer->ksnp_routes) {
+ route = list_entry(rtmp, ksock_route_t, ksnr_list);
+
+ if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
+ break;
+
+ route = NULL;
+ }
+ if (route != NULL)
+ continue;
+
+ best_iface = NULL;
+ best_nroutes = 0;
+ best_netmatch = 0;
+
+ LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
+
+ /* Select interface to connect from */
+ for (j = 0; j < net->ksnn_ninterfaces; j++) {
+ iface = &net->ksnn_interfaces[j];
+
+ /* Using this interface already? */
+ list_for_each(rtmp, &peer->ksnp_routes) {
+ route = list_entry(rtmp, ksock_route_t,
+ ksnr_list);
+
+ if (route->ksnr_myipaddr == iface->ksni_ipaddr)
+ break;
+
+ route = NULL;
+ }
+ if (route != NULL)
+ continue;
+
+ this_netmatch = (((iface->ksni_ipaddr ^
+ newroute->ksnr_ipaddr) &
+ iface->ksni_netmask) == 0) ? 1 : 0;
+
+ if (!(best_iface == NULL ||
+ best_netmatch < this_netmatch ||
+ (best_netmatch == this_netmatch &&
+ best_nroutes > iface->ksni_nroutes)))
+ continue;
+
+ best_iface = iface;
+ best_netmatch = this_netmatch;
+ best_nroutes = iface->ksni_nroutes;
+ }
+
+ if (best_iface == NULL)
+ continue;
+
+ newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
+ best_iface->ksni_nroutes++;
+
+ ksocknal_add_route_locked(peer, newroute);
+ newroute = NULL;
+ }
+
+ write_unlock_bh(global_lock);
+ if (newroute != NULL)
+ ksocknal_route_decref(newroute);
+}
+
+int
+ksocknal_accept (lnet_ni_t *ni, socket_t *sock)
+{
+ ksock_connreq_t *cr;
+ int rc;
+ __u32 peer_ip;
+ int peer_port;
+
+ rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
+ LASSERT (rc == 0); /* we succeeded before */
+
+ LIBCFS_ALLOC(cr, sizeof(*cr));
+ if (cr == NULL) {
+ LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
+ "%pI4h: memory exhausted\n",
+ &peer_ip);
+ return -ENOMEM;
+ }
+
+ lnet_ni_addref(ni);
+ cr->ksncr_ni = ni;
+ cr->ksncr_sock = sock;
+
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+
+ list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
+
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+ return 0;
+}
+
+int
+ksocknal_connecting (ksock_peer_t *peer, __u32 ipaddr)
+{
+ ksock_route_t *route;
+
+ list_for_each_entry (route, &peer->ksnp_routes, ksnr_list) {
+
+ if (route->ksnr_ipaddr == ipaddr)
+ return route->ksnr_connecting;
+ }
+ return 0;
+}
+
+int
+ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
+ socket_t *sock, int type)
+{
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ LIST_HEAD (zombies);
+ lnet_process_id_t peerid;
+ struct list_head *tmp;
+ __u64 incarnation;
+ ksock_conn_t *conn;
+ ksock_conn_t *conn2;
+ ksock_peer_t *peer = NULL;
+ ksock_peer_t *peer2;
+ ksock_sched_t *sched;
+ ksock_hello_msg_t *hello;
+ int cpt;
+ ksock_tx_t *tx;
+ ksock_tx_t *txtmp;
+ int rc;
+ int active;
+ char *warn = NULL;
+
+ active = (route != NULL);
+
+ LASSERT (active == (type != SOCKLND_CONN_NONE));
+
+ LIBCFS_ALLOC(conn, sizeof(*conn));
+ if (conn == NULL) {
+ rc = -ENOMEM;
+ goto failed_0;
+ }
+
+ memset (conn, 0, sizeof (*conn));
+
+ conn->ksnc_peer = NULL;
+ conn->ksnc_route = NULL;
+ conn->ksnc_sock = sock;
+ /* 2 ref, 1 for conn, another extra ref prevents socket
+ * being closed before establishment of connection */
+ atomic_set (&conn->ksnc_sock_refcount, 2);
+ conn->ksnc_type = type;
+ ksocknal_lib_save_callback(sock, conn);
+ atomic_set (&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
+
+ conn->ksnc_rx_ready = 0;
+ conn->ksnc_rx_scheduled = 0;
+
+ INIT_LIST_HEAD (&conn->ksnc_tx_queue);
+ conn->ksnc_tx_ready = 0;
+ conn->ksnc_tx_scheduled = 0;
+ conn->ksnc_tx_carrier = NULL;
+ atomic_set (&conn->ksnc_tx_nob, 0);
+
+ LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
+ kshm_ips[LNET_MAX_INTERFACES]));
+ if (hello == NULL) {
+ rc = -ENOMEM;
+ goto failed_1;
+ }
+
+ /* stash conn's local and remote addrs */
+ rc = ksocknal_lib_get_conn_addrs (conn);
+ if (rc != 0)
+ goto failed_1;
+
+ /* Find out/confirm peer's NID and connection type and get the
+ * vector of interfaces she's willing to let me connect to.
+ * Passive connections use the listener timeout since the peer sends
+ * eagerly */
+
+ if (active) {
+ peer = route->ksnr_peer;
+ LASSERT(ni == peer->ksnp_ni);
+
+ /* Active connection sends HELLO eagerly */
+ hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
+ peerid = peer->ksnp_id;
+
+ write_lock_bh(global_lock);
+ conn->ksnc_proto = peer->ksnp_proto;
+ write_unlock_bh(global_lock);
+
+ if (conn->ksnc_proto == NULL) {
+ conn->ksnc_proto = &ksocknal_protocol_v3x;
+#if SOCKNAL_VERSION_DEBUG
+ if (*ksocknal_tunables.ksnd_protocol == 2)
+ conn->ksnc_proto = &ksocknal_protocol_v2x;
+ else if (*ksocknal_tunables.ksnd_protocol == 1)
+ conn->ksnc_proto = &ksocknal_protocol_v1x;
+#endif
+ }
+
+ rc = ksocknal_send_hello (ni, conn, peerid.nid, hello);
+ if (rc != 0)
+ goto failed_1;
+ } else {
+ peerid.nid = LNET_NID_ANY;
+ peerid.pid = LNET_PID_ANY;
+
+ /* Passive, get protocol from peer */
+ conn->ksnc_proto = NULL;
+ }
+
+ rc = ksocknal_recv_hello (ni, conn, hello, &peerid, &incarnation);
+ if (rc < 0)
+ goto failed_1;
+
+ LASSERT (rc == 0 || active);
+ LASSERT (conn->ksnc_proto != NULL);
+ LASSERT (peerid.nid != LNET_NID_ANY);
+
+ cpt = lnet_cpt_of_nid(peerid.nid);
+
+ if (active) {
+ ksocknal_peer_addref(peer);
+ write_lock_bh(global_lock);
+ } else {
+ rc = ksocknal_create_peer(&peer, ni, peerid);
+ if (rc != 0)
+ goto failed_1;
+
+ write_lock_bh(global_lock);
+
+ /* called with a ref on ni, so shutdown can't have started */
+ LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+
+ peer2 = ksocknal_find_peer_locked(ni, peerid);
+ if (peer2 == NULL) {
+ /* NB this puts an "empty" peer in the peer
+ * table (which takes my ref) */
+ list_add_tail(&peer->ksnp_list,
+ ksocknal_nid2peerlist(peerid.nid));
+ } else {
+ ksocknal_peer_decref(peer);
+ peer = peer2;
+ }
+
+ /* +1 ref for me */
+ ksocknal_peer_addref(peer);
+ peer->ksnp_accepting++;
+
+ /* Am I already connecting to this guy? Resolve in
+ * favour of higher NID... */
+ if (peerid.nid < ni->ni_nid &&
+ ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
+ rc = EALREADY;
+ warn = "connection race resolution";
+ goto failed_2;
+ }
+ }
+
+ if (peer->ksnp_closing ||
+ (active && route->ksnr_deleted)) {
+ /* peer/route got closed under me */
+ rc = -ESTALE;
+ warn = "peer/route removed";
+ goto failed_2;
+ }
+
+ if (peer->ksnp_proto == NULL) {
+ /* Never connected before.
+ * NB recv_hello may have returned EPROTO to signal my peer
+ * wants a different protocol than the one I asked for.
+ */
+ LASSERT (list_empty(&peer->ksnp_conns));
+
+ peer->ksnp_proto = conn->ksnc_proto;
+ peer->ksnp_incarnation = incarnation;
+ }
+
+ if (peer->ksnp_proto != conn->ksnc_proto ||
+ peer->ksnp_incarnation != incarnation) {
+ /* Peer rebooted or I've got the wrong protocol version */
+ ksocknal_close_peer_conns_locked(peer, 0, 0);
+
+ peer->ksnp_proto = NULL;
+ rc = ESTALE;
+ warn = peer->ksnp_incarnation != incarnation ?
+ "peer rebooted" :
+ "wrong proto version";
+ goto failed_2;
+ }
+
+ switch (rc) {
+ default:
+ LBUG();
+ case 0:
+ break;
+ case EALREADY:
+ warn = "lost conn race";
+ goto failed_2;
+ case EPROTO:
+ warn = "retry with different protocol version";
+ goto failed_2;
+ }
+
+ /* Refuse to duplicate an existing connection, unless this is a
+ * loopback connection */
+ if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
+ list_for_each(tmp, &peer->ksnp_conns) {
+ conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+
+ if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
+ conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
+ conn2->ksnc_type != conn->ksnc_type)
+ continue;
+
+ /* Reply on a passive connection attempt so the peer
+ * realises we're connected. */
+ LASSERT (rc == 0);
+ if (!active)
+ rc = EALREADY;
+
+ warn = "duplicate";
+ goto failed_2;
+ }
+ }
+
+ /* If the connection created by this route didn't bind to the IP
+ * address the route connected to, the connection/route matching
+ * code below probably isn't going to work. */
+ if (active &&
+ route->ksnr_ipaddr != conn->ksnc_ipaddr) {
+ CERROR("Route %s %pI4h connected to %pI4h\n",
+ libcfs_id2str(peer->ksnp_id),
+ &route->ksnr_ipaddr,
+ &conn->ksnc_ipaddr);
+ }
+
+ /* Search for a route corresponding to the new connection and
+ * create an association. This allows incoming connections created
+ * by routes in my peer to match my own route entries so I don't
+ * continually create duplicate routes. */
+ list_for_each (tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
+
+ if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
+ continue;
+
+ ksocknal_associate_route_conn_locked(route, conn);
+ break;
+ }
+
+ conn->ksnc_peer = peer; /* conn takes my ref on peer */
+ peer->ksnp_last_alive = cfs_time_current();
+ peer->ksnp_send_keepalive = 0;
+ peer->ksnp_error = 0;
+
+ sched = ksocknal_choose_scheduler_locked(cpt);
+ sched->kss_nconns++;
+ conn->ksnc_scheduler = sched;
+
+ conn->ksnc_tx_last_post = cfs_time_current();
+ /* Set the deadline for the outgoing HELLO to drain */
+ conn->ksnc_tx_bufnob = cfs_sock_wmem_queued(sock);
+ conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ mb(); /* order with adding to peer's conn list */
+
+ list_add (&conn->ksnc_list, &peer->ksnp_conns);
+ ksocknal_conn_addref(conn);
+
+ ksocknal_new_packet(conn, 0);
+
+ conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
+
+ /* Take packets blocking for this connection. */
+ list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
+ if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
+ continue;
+
+ list_del (&tx->tx_list);
+ ksocknal_queue_tx_locked (tx, conn);
+ }
+
+ write_unlock_bh(global_lock);
+
+ /* We've now got a new connection. Any errors from here on are just
+ * like "normal" comms errors and we close the connection normally.
+ * NB (a) we still have to send the reply HELLO for passive
+ * connections,
+ * (b) normal I/O on the conn is blocked until I setup and call the
+ * socket callbacks.
+ */
+
+ CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
+ " incarnation:"LPD64" sched[%d:%d]\n",
+ libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
+ &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
+ conn->ksnc_port, incarnation, cpt,
+ (int)(sched - &sched->kss_info->ksi_scheds[0]));
+
+ if (active) {
+ /* additional routes after interface exchange? */
+ ksocknal_create_routes(peer, conn->ksnc_port,
+ hello->kshm_ips, hello->kshm_nips);
+ } else {
+ hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
+ hello->kshm_nips);
+ rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
+ }
+
+ LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
+ kshm_ips[LNET_MAX_INTERFACES]));
+
+ /* setup the socket AFTER I've received hello (it disables
+ * SO_LINGER). I might call back to the acceptor who may want
+ * to send a protocol version response and then close the
+ * socket; this ensures the socket only tears down after the
+ * response has been sent. */
+ if (rc == 0)
+ rc = ksocknal_lib_setup_sock(sock);
+
+ write_lock_bh(global_lock);
+
+ /* NB my callbacks block while I hold ksnd_global_lock */
+ ksocknal_lib_set_callback(sock, conn);
+
+ if (!active)
+ peer->ksnp_accepting--;
+
+ write_unlock_bh(global_lock);
+
+ if (rc != 0) {
+ write_lock_bh(global_lock);
+ if (!conn->ksnc_closing) {
+ /* could be closed by another thread */
+ ksocknal_close_conn_locked(conn, rc);
+ }
+ write_unlock_bh(global_lock);
+ } else if (ksocknal_connsock_addref(conn) == 0) {
+ /* Allow I/O to proceed. */
+ ksocknal_read_callback(conn);
+ ksocknal_write_callback(conn);
+ ksocknal_connsock_decref(conn);
+ }
+
+ ksocknal_connsock_decref(conn);
+ ksocknal_conn_decref(conn);
+ return rc;
+
+ failed_2:
+ if (!peer->ksnp_closing &&
+ list_empty (&peer->ksnp_conns) &&
+ list_empty (&peer->ksnp_routes)) {
+ list_add(&zombies, &peer->ksnp_tx_queue);
+ list_del_init(&peer->ksnp_tx_queue);
+ ksocknal_unlink_peer_locked(peer);
+ }
+
+ write_unlock_bh(global_lock);
+
+ if (warn != NULL) {
+ if (rc < 0)
+ CERROR("Not creating conn %s type %d: %s\n",
+ libcfs_id2str(peerid), conn->ksnc_type, warn);
+ else
+ CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
+ libcfs_id2str(peerid), conn->ksnc_type, warn);
+ }
+
+ if (!active) {
+ if (rc > 0) {
+ /* Request retry by replying with CONN_NONE
+ * ksnc_proto has been set already */
+ conn->ksnc_type = SOCKLND_CONN_NONE;
+ hello->kshm_nips = 0;
+ ksocknal_send_hello(ni, conn, peerid.nid, hello);
+ }
+
+ write_lock_bh(global_lock);
+ peer->ksnp_accepting--;
+ write_unlock_bh(global_lock);
+ }
+
+ ksocknal_txlist_done(ni, &zombies, 1);
+ ksocknal_peer_decref(peer);
+
+ failed_1:
+ if (hello != NULL)
+ LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
+ kshm_ips[LNET_MAX_INTERFACES]));
+
+ LIBCFS_FREE (conn, sizeof(*conn));
+
+ failed_0:
+ libcfs_sock_release(sock);
+ return rc;
+}
+
+void
+ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
+{
+ /* This just does the immmediate housekeeping, and queues the
+ * connection for the reaper to terminate.
+ * Caller holds ksnd_global_lock exclusively in irq context */
+ ksock_peer_t *peer = conn->ksnc_peer;
+ ksock_route_t *route;
+ ksock_conn_t *conn2;
+ struct list_head *tmp;
+
+ LASSERT (peer->ksnp_error == 0);
+ LASSERT (!conn->ksnc_closing);
+ conn->ksnc_closing = 1;
+
+ /* ksnd_deathrow_conns takes over peer's ref */
+ list_del (&conn->ksnc_list);
+
+ route = conn->ksnc_route;
+ if (route != NULL) {
+ /* dissociate conn from route... */
+ LASSERT (!route->ksnr_deleted);
+ LASSERT ((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
+
+ conn2 = NULL;
+ list_for_each(tmp, &peer->ksnp_conns) {
+ conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+
+ if (conn2->ksnc_route == route &&
+ conn2->ksnc_type == conn->ksnc_type)
+ break;
+
+ conn2 = NULL;
+ }
+ if (conn2 == NULL)
+ route->ksnr_connected &= ~(1 << conn->ksnc_type);
+
+ conn->ksnc_route = NULL;
+
+#if 0 /* irrelevant with only eager routes */
+ /* make route least favourite */
+ list_del (&route->ksnr_list);
+ list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
+#endif
+ ksocknal_route_decref(route); /* drop conn's ref on route */
+ }
+
+ if (list_empty (&peer->ksnp_conns)) {
+ /* No more connections to this peer */
+
+ if (!list_empty(&peer->ksnp_tx_queue)) {
+ ksock_tx_t *tx;
+
+ LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
+
+ /* throw them to the last connection...,
+ * these TXs will be send to /dev/null by scheduler */
+ list_for_each_entry(tx, &peer->ksnp_tx_queue,
+ tx_list)
+ ksocknal_tx_prep(conn, tx);
+
+ spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
+ list_splice_init(&peer->ksnp_tx_queue,
+ &conn->ksnc_tx_queue);
+ spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
+ }
+
+ peer->ksnp_proto = NULL; /* renegotiate protocol version */
+ peer->ksnp_error = error; /* stash last conn close reason */
+
+ if (list_empty (&peer->ksnp_routes)) {
+ /* I've just closed last conn belonging to a
+ * peer with no routes to it */
+ ksocknal_unlink_peer_locked (peer);
+ }
+ }
+
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ list_add_tail(&conn->ksnc_list,
+ &ksocknal_data.ksnd_deathrow_conns);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
+
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+}
+
+void
+ksocknal_peer_failed (ksock_peer_t *peer)
+{
+ int notify = 0;
+ cfs_time_t last_alive = 0;
+
+ /* There has been a connection failure or comms error; but I'll only
+ * tell LNET I think the peer is dead if it's to another kernel and
+ * there are no connections or connection attempts in existence. */
+
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
+ list_empty(&peer->ksnp_conns) &&
+ peer->ksnp_accepting == 0 &&
+ ksocknal_find_connecting_route_locked(peer) == NULL) {
+ notify = 1;
+ last_alive = peer->ksnp_last_alive;
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ if (notify)
+ lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
+ last_alive);
+}
+
+void
+ksocknal_finalize_zcreq(ksock_conn_t *conn)
+{
+ ksock_peer_t *peer = conn->ksnc_peer;
+ ksock_tx_t *tx;
+ ksock_tx_t *tmp;
+ LIST_HEAD (zlist);
+
+ /* NB safe to finalize TXs because closing of socket will
+ * abort all buffered data */
+ LASSERT (conn->ksnc_sock == NULL);
+
+ spin_lock(&peer->ksnp_lock);
+
+ list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
+ if (tx->tx_conn != conn)
+ continue;
+
+ LASSERT (tx->tx_msg.ksm_zc_cookies[0] != 0);
+
+ tx->tx_msg.ksm_zc_cookies[0] = 0;
+ tx->tx_zc_aborted = 1; /* mark it as not-acked */
+ list_del(&tx->tx_zc_list);
+ list_add(&tx->tx_zc_list, &zlist);
+ }
+
+ spin_unlock(&peer->ksnp_lock);
+
+ while (!list_empty(&zlist)) {
+ tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+
+ list_del(&tx->tx_zc_list);
+ ksocknal_tx_decref(tx);
+ }
+}
+
+void
+ksocknal_terminate_conn (ksock_conn_t *conn)
+{
+ /* This gets called by the reaper (guaranteed thread context) to
+ * disengage the socket from its callbacks and close it.
+ * ksnc_refcount will eventually hit zero, and then the reaper will
+ * destroy it. */
+ ksock_peer_t *peer = conn->ksnc_peer;
+ ksock_sched_t *sched = conn->ksnc_scheduler;
+ int failed = 0;
+
+ LASSERT(conn->ksnc_closing);
+
+ /* wake up the scheduler to "send" all remaining packets to /dev/null */
+ spin_lock_bh(&sched->kss_lock);
+
+ /* a closing conn is always ready to tx */
+ conn->ksnc_tx_ready = 1;
+
+ if (!conn->ksnc_tx_scheduled &&
+ !list_empty(&conn->ksnc_tx_queue)){
+ list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
+
+ wake_up (&sched->kss_waitq);
+ }
+
+ spin_unlock_bh(&sched->kss_lock);
+
+ /* serialise with callbacks */
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
+
+ /* OK, so this conn may not be completely disengaged from its
+ * scheduler yet, but it _has_ committed to terminate... */
+ conn->ksnc_scheduler->kss_nconns--;
+
+ if (peer->ksnp_error != 0) {
+ /* peer's last conn closed in error */
+ LASSERT (list_empty (&peer->ksnp_conns));
+ failed = 1;
+ peer->ksnp_error = 0; /* avoid multiple notifications */
+ }
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ if (failed)
+ ksocknal_peer_failed(peer);
+
+ /* The socket is closed on the final put; either here, or in
+ * ksocknal_{send,recv}msg(). Since we set up the linger2 option
+ * when the connection was established, this will close the socket
+ * immediately, aborting anything buffered in it. Any hung
+ * zero-copy transmits will therefore complete in finite time. */
+ ksocknal_connsock_decref(conn);
+}
+
+void
+ksocknal_queue_zombie_conn (ksock_conn_t *conn)
+{
+ /* Queue the conn for the reaper to destroy */
+
+ LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
+
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+}
+
+void
+ksocknal_destroy_conn (ksock_conn_t *conn)
+{
+ cfs_time_t last_rcv;
+
+ /* Final coup-de-grace of the reaper */
+ CDEBUG (D_NET, "connection %p\n", conn);
+
+ LASSERT (atomic_read (&conn->ksnc_conn_refcount) == 0);
+ LASSERT (atomic_read (&conn->ksnc_sock_refcount) == 0);
+ LASSERT (conn->ksnc_sock == NULL);
+ LASSERT (conn->ksnc_route == NULL);
+ LASSERT (!conn->ksnc_tx_scheduled);
+ LASSERT (!conn->ksnc_rx_scheduled);
+ LASSERT (list_empty(&conn->ksnc_tx_queue));
+
+ /* complete current receive if any */
+ switch (conn->ksnc_rx_state) {
+ case SOCKNAL_RX_LNET_PAYLOAD:
+ last_rcv = conn->ksnc_rx_deadline -
+ cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
+ CERROR("Completing partial receive from %s[%d]"
+ ", ip %pI4h:%d, with error, wanted: %d, left: %d, "
+ "last alive is %ld secs ago\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
+ &conn->ksnc_ipaddr, conn->ksnc_port,
+ conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
+ cfs_duration_sec(cfs_time_sub(cfs_time_current(),
+ last_rcv)));
+ lnet_finalize (conn->ksnc_peer->ksnp_ni,
+ conn->ksnc_cookie, -EIO);
+ break;
+ case SOCKNAL_RX_LNET_HEADER:
+ if (conn->ksnc_rx_started)
+ CERROR("Incomplete receive of lnet header from %s"
+ ", ip %pI4h:%d, with error, protocol: %d.x.\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr, conn->ksnc_port,
+ conn->ksnc_proto->pro_version);
+ break;
+ case SOCKNAL_RX_KSM_HEADER:
+ if (conn->ksnc_rx_started)
+ CERROR("Incomplete receive of ksock message from %s"
+ ", ip %pI4h:%d, with error, protocol: %d.x.\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr, conn->ksnc_port,
+ conn->ksnc_proto->pro_version);
+ break;
+ case SOCKNAL_RX_SLOP:
+ if (conn->ksnc_rx_started)
+ CERROR("Incomplete receive of slops from %s"
+ ", ip %pI4h:%d, with error\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr, conn->ksnc_port);
+ break;
+ default:
+ LBUG ();
+ break;
+ }
+
+ ksocknal_peer_decref(conn->ksnc_peer);
+
+ LIBCFS_FREE (conn, sizeof (*conn));
+}
+
+int
+ksocknal_close_peer_conns_locked (ksock_peer_t *peer, __u32 ipaddr, int why)
+{
+ ksock_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int count = 0;
+
+ list_for_each_safe (ctmp, cnxt, &peer->ksnp_conns) {
+ conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+
+ if (ipaddr == 0 ||
+ conn->ksnc_ipaddr == ipaddr) {
+ count++;
+ ksocknal_close_conn_locked (conn, why);
+ }
+ }
+
+ return (count);
+}
+
+int
+ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
+{
+ ksock_peer_t *peer = conn->ksnc_peer;
+ __u32 ipaddr = conn->ksnc_ipaddr;
+ int count;
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ return (count);
+}
+
+int
+ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
+{
+ ksock_peer_t *peer;
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ int lo;
+ int hi;
+ int i;
+ int count = 0;
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ if (id.nid != LNET_NID_ANY)
+ lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ else {
+ lo = 0;
+ hi = ksocknal_data.ksnd_peer_hash_size - 1;
+ }
+
+ for (i = lo; i <= hi; i++) {
+ list_for_each_safe (ptmp, pnxt,
+ &ksocknal_data.ksnd_peers[i]) {
+
+ peer = list_entry (ptmp, ksock_peer_t, ksnp_list);
+
+ if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
+ (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
+ continue;
+
+ count += ksocknal_close_peer_conns_locked (peer, ipaddr, 0);
+ }
+ }
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ /* wildcards always succeed */
+ if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
+ return (0);
+
+ return (count == 0 ? -ENOENT : 0);
+}
+
+void
+ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
+{
+ /* The router is telling me she's been notified of a change in
+ * gateway state.... */
+ lnet_process_id_t id = {0};
+
+ id.nid = gw_nid;
+ id.pid = LNET_PID_ANY;
+
+ CDEBUG (D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
+ alive ? "up" : "down");
+
+ if (!alive) {
+ /* If the gateway crashed, close all open connections... */
+ ksocknal_close_matching_conns (id, 0);
+ return;
+ }
+
+ /* ...otherwise do nothing. We can only establish new connections
+ * if we have autroutes, and these connect on demand. */
+}
+
+void
+ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
+{
+ int connect = 1;
+ cfs_time_t last_alive = 0;
+ cfs_time_t now = cfs_time_current();
+ ksock_peer_t *peer = NULL;
+ rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
+ lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
+
+ read_lock(glock);
+
+ peer = ksocknal_find_peer_locked(ni, id);
+ if (peer != NULL) {
+ struct list_head *tmp;
+ ksock_conn_t *conn;
+ int bufnob;
+
+ list_for_each (tmp, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ bufnob = cfs_sock_wmem_queued(conn->ksnc_sock);
+
+ if (bufnob < conn->ksnc_tx_bufnob) {
+ /* something got ACKed */
+ conn->ksnc_tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ peer->ksnp_last_alive = now;
+ conn->ksnc_tx_bufnob = bufnob;
+ }
+ }
+
+ last_alive = peer->ksnp_last_alive;
+ if (ksocknal_find_connectable_route_locked(peer) == NULL)
+ connect = 0;
+ }
+
+ read_unlock(glock);
+
+ if (last_alive != 0)
+ *when = last_alive;
+
+ CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
+ libcfs_nid2str(nid), peer,
+ last_alive ? cfs_duration_sec(now - last_alive) : -1,
+ connect);
+
+ if (!connect)
+ return;
+
+ ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
+
+ write_lock_bh(glock);
+
+ peer = ksocknal_find_peer_locked(ni, id);
+ if (peer != NULL)
+ ksocknal_launch_all_connections_locked(peer);
+
+ write_unlock_bh(glock);
+ return;
+}
+
+void
+ksocknal_push_peer (ksock_peer_t *peer)
+{
+ int index;
+ int i;
+ struct list_head *tmp;
+ ksock_conn_t *conn;
+
+ for (index = 0; ; index++) {
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ i = 0;
+ conn = NULL;
+
+ list_for_each (tmp, &peer->ksnp_conns) {
+ if (i++ == index) {
+ conn = list_entry (tmp, ksock_conn_t,
+ ksnc_list);
+ ksocknal_conn_addref(conn);
+ break;
+ }
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ if (conn == NULL)
+ break;
+
+ ksocknal_lib_push_conn (conn);
+ ksocknal_conn_decref(conn);
+ }
+}
+
+int
+ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
+{
+ ksock_peer_t *peer;
+ struct list_head *tmp;
+ int index;
+ int i;
+ int j;
+ int rc = -ENOENT;
+
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+ for (j = 0; ; j++) {
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ index = 0;
+ peer = NULL;
+
+ list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry(tmp, ksock_peer_t,
+ ksnp_list);
+
+ if (!((id.nid == LNET_NID_ANY ||
+ id.nid == peer->ksnp_id.nid) &&
+ (id.pid == LNET_PID_ANY ||
+ id.pid == peer->ksnp_id.pid))) {
+ peer = NULL;
+ continue;
+ }
+
+ if (index++ == j) {
+ ksocknal_peer_addref(peer);
+ break;
+ }
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ if (peer != NULL) {
+ rc = 0;
+ ksocknal_push_peer (peer);
+ ksocknal_peer_decref(peer);
+ }
+ }
+
+ }
+
+ return (rc);
+}
+
+int
+ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
+{
+ ksock_net_t *net = ni->ni_data;
+ ksock_interface_t *iface;
+ int rc;
+ int i;
+ int j;
+ struct list_head *ptmp;
+ ksock_peer_t *peer;
+ struct list_head *rtmp;
+ ksock_route_t *route;
+
+ if (ipaddress == 0 ||
+ netmask == 0)
+ return (-EINVAL);
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ iface = ksocknal_ip2iface(ni, ipaddress);
+ if (iface != NULL) {
+ /* silently ignore dups */
+ rc = 0;
+ } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
+ rc = -ENOSPC;
+ } else {
+ iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
+
+ iface->ksni_ipaddr = ipaddress;
+ iface->ksni_netmask = netmask;
+ iface->ksni_nroutes = 0;
+ iface->ksni_npeers = 0;
+
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+ list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry(ptmp, ksock_peer_t,
+ ksnp_list);
+
+ for (j = 0; j < peer->ksnp_n_passive_ips; j++)
+ if (peer->ksnp_passive_ips[j] == ipaddress)
+ iface->ksni_npeers++;
+
+ list_for_each(rtmp, &peer->ksnp_routes) {
+ route = list_entry(rtmp,
+ ksock_route_t,
+ ksnr_list);
+
+ if (route->ksnr_myipaddr == ipaddress)
+ iface->ksni_nroutes++;
+ }
+ }
+ }
+
+ rc = 0;
+ /* NB only new connections will pay attention to the new interface! */
+ }
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ return (rc);
+}
+
+void
+ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
+{
+ struct list_head *tmp;
+ struct list_head *nxt;
+ ksock_route_t *route;
+ ksock_conn_t *conn;
+ int i;
+ int j;
+
+ for (i = 0; i < peer->ksnp_n_passive_ips; i++)
+ if (peer->ksnp_passive_ips[i] == ipaddr) {
+ for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
+ peer->ksnp_passive_ips[j-1] =
+ peer->ksnp_passive_ips[j];
+ peer->ksnp_n_passive_ips--;
+ break;
+ }
+
+ list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
+ route = list_entry (tmp, ksock_route_t, ksnr_list);
+
+ if (route->ksnr_myipaddr != ipaddr)
+ continue;
+
+ if (route->ksnr_share_count != 0) {
+ /* Manually created; keep, but unbind */
+ route->ksnr_myipaddr = 0;
+ } else {
+ ksocknal_del_route_locked(route);
+ }
+ }
+
+ list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+
+ if (conn->ksnc_myipaddr == ipaddr)
+ ksocknal_close_conn_locked (conn, 0);
+ }
+}
+
+int
+ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
+{
+ ksock_net_t *net = ni->ni_data;
+ int rc = -ENOENT;
+ struct list_head *tmp;
+ struct list_head *nxt;
+ ksock_peer_t *peer;
+ __u32 this_ip;
+ int i;
+ int j;
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ for (i = 0; i < net->ksnn_ninterfaces; i++) {
+ this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
+
+ if (!(ipaddress == 0 ||
+ ipaddress == this_ip))
+ continue;
+
+ rc = 0;
+
+ for (j = i+1; j < net->ksnn_ninterfaces; j++)
+ net->ksnn_interfaces[j-1] =
+ net->ksnn_interfaces[j];
+
+ net->ksnn_ninterfaces--;
+
+ for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
+ list_for_each_safe(tmp, nxt,
+ &ksocknal_data.ksnd_peers[j]) {
+ peer = list_entry(tmp, ksock_peer_t,
+ ksnp_list);
+
+ if (peer->ksnp_ni != ni)
+ continue;
+
+ ksocknal_peer_del_interface_locked(peer, this_ip);
+ }
+ }
+ }
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ return (rc);
+}
+
+int
+ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+{
+ lnet_process_id_t id = {0};
+ struct libcfs_ioctl_data *data = arg;
+ int rc;
+
+ switch(cmd) {
+ case IOC_LIBCFS_GET_INTERFACE: {
+ ksock_net_t *net = ni->ni_data;
+ ksock_interface_t *iface;
+
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
+ rc = -ENOENT;
+ } else {
+ rc = 0;
+ iface = &net->ksnn_interfaces[data->ioc_count];
+
+ data->ioc_u32[0] = iface->ksni_ipaddr;
+ data->ioc_u32[1] = iface->ksni_netmask;
+ data->ioc_u32[2] = iface->ksni_npeers;
+ data->ioc_u32[3] = iface->ksni_nroutes;
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return rc;
+ }
+
+ case IOC_LIBCFS_ADD_INTERFACE:
+ return ksocknal_add_interface(ni,
+ data->ioc_u32[0], /* IP address */
+ data->ioc_u32[1]); /* net mask */
+
+ case IOC_LIBCFS_DEL_INTERFACE:
+ return ksocknal_del_interface(ni,
+ data->ioc_u32[0]); /* IP address */
+
+ case IOC_LIBCFS_GET_PEER: {
+ __u32 myip = 0;
+ __u32 ip = 0;
+ int port = 0;
+ int conn_count = 0;
+ int share_count = 0;
+
+ rc = ksocknal_get_peer_info(ni, data->ioc_count,
+ &id, &myip, &ip, &port,
+ &conn_count, &share_count);
+ if (rc != 0)
+ return rc;
+
+ data->ioc_nid = id.nid;
+ data->ioc_count = share_count;
+ data->ioc_u32[0] = ip;
+ data->ioc_u32[1] = port;
+ data->ioc_u32[2] = myip;
+ data->ioc_u32[3] = conn_count;
+ data->ioc_u32[4] = id.pid;
+ return 0;
+ }
+
+ case IOC_LIBCFS_ADD_PEER:
+ id.nid = data->ioc_nid;
+ id.pid = LUSTRE_SRV_LNET_PID;
+ return ksocknal_add_peer (ni, id,
+ data->ioc_u32[0], /* IP */
+ data->ioc_u32[1]); /* port */
+
+ case IOC_LIBCFS_DEL_PEER:
+ id.nid = data->ioc_nid;
+ id.pid = LNET_PID_ANY;
+ return ksocknal_del_peer (ni, id,
+ data->ioc_u32[0]); /* IP */
+
+ case IOC_LIBCFS_GET_CONN: {
+ int txmem;
+ int rxmem;
+ int nagle;
+ ksock_conn_t *conn = ksocknal_get_conn_by_idx (ni, data->ioc_count);
+
+ if (conn == NULL)
+ return -ENOENT;
+
+ ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
+
+ data->ioc_count = txmem;
+ data->ioc_nid = conn->ksnc_peer->ksnp_id.nid;
+ data->ioc_flags = nagle;
+ data->ioc_u32[0] = conn->ksnc_ipaddr;
+ data->ioc_u32[1] = conn->ksnc_port;
+ data->ioc_u32[2] = conn->ksnc_myipaddr;
+ data->ioc_u32[3] = conn->ksnc_type;
+ data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
+ data->ioc_u32[5] = rxmem;
+ data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
+ ksocknal_conn_decref(conn);
+ return 0;
+ }
+
+ case IOC_LIBCFS_CLOSE_CONNECTION:
+ id.nid = data->ioc_nid;
+ id.pid = LNET_PID_ANY;
+ return ksocknal_close_matching_conns (id,
+ data->ioc_u32[0]);
+
+ case IOC_LIBCFS_REGISTER_MYNID:
+ /* Ignore if this is a noop */
+ if (data->ioc_nid == ni->ni_nid)
+ return 0;
+
+ CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
+ libcfs_nid2str(data->ioc_nid),
+ libcfs_nid2str(ni->ni_nid));
+ return -EINVAL;
+
+ case IOC_LIBCFS_PUSH_CONNECTION:
+ id.nid = data->ioc_nid;
+ id.pid = LNET_PID_ANY;
+ return ksocknal_push(ni, id);
+
+ default:
+ return -EINVAL;
+ }
+ /* not reached */
+}
+
+void
+ksocknal_free_buffers (void)
+{
+ LASSERT (atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+
+ if (ksocknal_data.ksnd_sched_info != NULL) {
+ struct ksock_sched_info *info;
+ int i;
+
+ cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
+ if (info->ksi_scheds != NULL) {
+ LIBCFS_FREE(info->ksi_scheds,
+ info->ksi_nthreads_max *
+ sizeof(info->ksi_scheds[0]));
+ }
+ }
+ cfs_percpt_free(ksocknal_data.ksnd_sched_info);
+ }
+
+ LIBCFS_FREE (ksocknal_data.ksnd_peers,
+ sizeof (struct list_head) *
+ ksocknal_data.ksnd_peer_hash_size);
+
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
+
+ if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ struct list_head zlist;
+ ksock_tx_t *tx;
+
+ list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
+ list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
+
+ while (!list_empty(&zlist)) {
+ tx = list_entry(zlist.next, ksock_tx_t, tx_list);
+ list_del(&tx->tx_list);
+ LIBCFS_FREE(tx, tx->tx_desc_size);
+ }
+ } else {
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ }
+}
+
+void
+ksocknal_base_shutdown(void)
+{
+ struct ksock_sched_info *info;
+ ksock_sched_t *sched;
+ int i;
+ int j;
+
+ CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
+ atomic_read (&libcfs_kmemory));
+ LASSERT (ksocknal_data.ksnd_nnets == 0);
+
+ switch (ksocknal_data.ksnd_init) {
+ default:
+ LASSERT (0);
+
+ case SOCKNAL_INIT_ALL:
+ case SOCKNAL_INIT_DATA:
+ LASSERT (ksocknal_data.ksnd_peers != NULL);
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+ LASSERT (list_empty (&ksocknal_data.ksnd_peers[i]));
+ }
+
+ LASSERT(list_empty(&ksocknal_data.ksnd_nets));
+ LASSERT (list_empty (&ksocknal_data.ksnd_enomem_conns));
+ LASSERT (list_empty (&ksocknal_data.ksnd_zombie_conns));
+ LASSERT (list_empty (&ksocknal_data.ksnd_connd_connreqs));
+ LASSERT (list_empty (&ksocknal_data.ksnd_connd_routes));
+
+ if (ksocknal_data.ksnd_sched_info != NULL) {
+ cfs_percpt_for_each(info, i,
+ ksocknal_data.ksnd_sched_info) {
+ if (info->ksi_scheds == NULL)
+ continue;
+
+ for (j = 0; j < info->ksi_nthreads_max; j++) {
+
+ sched = &info->ksi_scheds[j];
+ LASSERT(list_empty(&sched->\
+ kss_tx_conns));
+ LASSERT(list_empty(&sched->\
+ kss_rx_conns));
+ LASSERT(list_empty(&sched-> \
+ kss_zombie_noop_txs));
+ LASSERT(sched->kss_nconns == 0);
+ }
+ }
+ }
+
+ /* flag threads to terminate; wake and wait for them to die */
+ ksocknal_data.ksnd_shuttingdown = 1;
+ wake_up_all(&ksocknal_data.ksnd_connd_waitq);
+ wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
+
+ if (ksocknal_data.ksnd_sched_info != NULL) {
+ cfs_percpt_for_each(info, i,
+ ksocknal_data.ksnd_sched_info) {
+ if (info->ksi_scheds == NULL)
+ continue;
+
+ for (j = 0; j < info->ksi_nthreads_max; j++) {
+ sched = &info->ksi_scheds[j];
+ wake_up_all(&sched->kss_waitq);
+ }
+ }
+ }
+
+ i = 4;
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ while (ksocknal_data.ksnd_nthreads != 0) {
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+ "waiting for %d threads to terminate\n",
+ ksocknal_data.ksnd_nthreads);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ cfs_pause(cfs_time_seconds(1));
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ }
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ ksocknal_free_buffers();
+
+ ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
+ break;
+ }
+
+ CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
+ atomic_read (&libcfs_kmemory));
+
+ module_put(THIS_MODULE);
+}
+
+__u64
+ksocknal_new_incarnation (void)
+{
+ struct timeval tv;
+
+ /* The incarnation number is the time this module loaded and it
+ * identifies this particular instance of the socknal. Hopefully
+ * we won't be able to reboot more frequently than 1MHz for the
+ * foreseeable future :) */
+
+ do_gettimeofday(&tv);
+
+ return (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+}
+
+int
+ksocknal_base_startup(void)
+{
+ struct ksock_sched_info *info;
+ int rc;
+ int i;
+
+ LASSERT (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
+ LASSERT (ksocknal_data.ksnd_nnets == 0);
+
+ memset (&ksocknal_data, 0, sizeof (ksocknal_data)); /* zero pointers */
+
+ ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
+ LIBCFS_ALLOC (ksocknal_data.ksnd_peers,
+ sizeof (struct list_head) *
+ ksocknal_data.ksnd_peer_hash_size);
+ if (ksocknal_data.ksnd_peers == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
+
+ rwlock_init(&ksocknal_data.ksnd_global_lock);
+ INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
+
+ spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
+ INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
+ INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
+ INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
+ init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
+
+ spin_lock_init(&ksocknal_data.ksnd_connd_lock);
+ INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
+ INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
+ init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
+
+ spin_lock_init(&ksocknal_data.ksnd_tx_lock);
+ INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
+
+ /* NB memset above zeros whole of ksocknal_data */
+
+ /* flag lists/ptrs/locks initialised */
+ ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
+ try_module_get(THIS_MODULE);
+
+ ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*info));
+ if (ksocknal_data.ksnd_sched_info == NULL)
+ goto failed;
+
+ cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
+ ksock_sched_t *sched;
+ int nthrs;
+
+ nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
+ if (*ksocknal_tunables.ksnd_nscheds > 0) {
+ nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
+ } else {
+ /* max to half of CPUs, assume another half should be
+ * reserved for upper layer modules */
+ nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
+ }
+
+ info->ksi_nthreads_max = nthrs;
+ info->ksi_cpt = i;
+
+ LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
+ info->ksi_nthreads_max * sizeof(*sched));
+ if (info->ksi_scheds == NULL)
+ goto failed;
+
+ for (; nthrs > 0; nthrs--) {
+ sched = &info->ksi_scheds[nthrs - 1];
+
+ sched->kss_info = info;
+ spin_lock_init(&sched->kss_lock);
+ INIT_LIST_HEAD(&sched->kss_rx_conns);
+ INIT_LIST_HEAD(&sched->kss_tx_conns);
+ INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
+ init_waitqueue_head(&sched->kss_waitq);
+ }
+ }
+
+ ksocknal_data.ksnd_connd_starting = 0;
+ ksocknal_data.ksnd_connd_failed_stamp = 0;
+ ksocknal_data.ksnd_connd_starting_stamp = cfs_time_current_sec();
+ /* must have at least 2 connds to remain responsive to accepts while
+ * connecting */
+ if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
+ *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
+
+ if (*ksocknal_tunables.ksnd_nconnds_max <
+ *ksocknal_tunables.ksnd_nconnds) {
+ ksocknal_tunables.ksnd_nconnds_max =
+ ksocknal_tunables.ksnd_nconnds;
+ }
+
+ for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
+ char name[16];
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+ ksocknal_data.ksnd_connd_starting++;
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+
+
+ snprintf(name, sizeof(name), "socknal_cd%02d", i);
+ rc = ksocknal_thread_start(ksocknal_connd,
+ (void *)((ulong_ptr_t)i), name);
+ if (rc != 0) {
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+ ksocknal_data.ksnd_connd_starting--;
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+ CERROR("Can't spawn socknal connd: %d\n", rc);
+ goto failed;
+ }
+ }
+
+ rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
+ if (rc != 0) {
+ CERROR ("Can't spawn socknal reaper: %d\n", rc);
+ goto failed;
+ }
+
+ /* flag everything initialised */
+ ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
+
+ return 0;
+
+ failed:
+ ksocknal_base_shutdown();
+ return -ENETDOWN;
+}
+
+void
+ksocknal_debug_peerhash (lnet_ni_t *ni)
+{
+ ksock_peer_t *peer = NULL;
+ struct list_head *tmp;
+ int i;
+
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
+ list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
+ peer = list_entry (tmp, ksock_peer_t, ksnp_list);
+
+ if (peer->ksnp_ni == ni) break;
+
+ peer = NULL;
+ }
+ }
+
+ if (peer != NULL) {
+ ksock_route_t *route;
+ ksock_conn_t *conn;
+
+ CWARN ("Active peer on shutdown: %s, ref %d, scnt %d, "
+ "closing %d, accepting %d, err %d, zcookie "LPU64", "
+ "txq %d, zc_req %d\n", libcfs_id2str(peer->ksnp_id),
+ atomic_read(&peer->ksnp_refcount),
+ peer->ksnp_sharecount, peer->ksnp_closing,
+ peer->ksnp_accepting, peer->ksnp_error,
+ peer->ksnp_zc_next_cookie,
+ !list_empty(&peer->ksnp_tx_queue),
+ !list_empty(&peer->ksnp_zc_req_list));
+
+ list_for_each (tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
+ CWARN ("Route: ref %d, schd %d, conn %d, cnted %d, "
+ "del %d\n", atomic_read(&route->ksnr_refcount),
+ route->ksnr_scheduled, route->ksnr_connecting,
+ route->ksnr_connected, route->ksnr_deleted);
+ }
+
+ list_for_each (tmp, &peer->ksnp_conns) {
+ conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ CWARN ("Conn: ref %d, sref %d, t %d, c %d\n",
+ atomic_read(&conn->ksnc_conn_refcount),
+ atomic_read(&conn->ksnc_sock_refcount),
+ conn->ksnc_type, conn->ksnc_closing);
+ }
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return;
+}
+
+void
+ksocknal_shutdown (lnet_ni_t *ni)
+{
+ ksock_net_t *net = ni->ni_data;
+ int i;
+ lnet_process_id_t anyid = {0};
+
+ anyid.nid = LNET_NID_ANY;
+ anyid.pid = LNET_PID_ANY;
+
+ LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
+ LASSERT(ksocknal_data.ksnd_nnets > 0);
+
+ spin_lock_bh(&net->ksnn_lock);
+ net->ksnn_shutdown = 1; /* prevent new peers */
+ spin_unlock_bh(&net->ksnn_lock);
+
+ /* Delete all peers */
+ ksocknal_del_peer(ni, anyid, 0);
+
+ /* Wait for all peer state to clean up */
+ i = 2;
+ spin_lock_bh(&net->ksnn_lock);
+ while (net->ksnn_npeers != 0) {
+ spin_unlock_bh(&net->ksnn_lock);
+
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+ "waiting for %d peers to disconnect\n",
+ net->ksnn_npeers);
+ cfs_pause(cfs_time_seconds(1));
+
+ ksocknal_debug_peerhash(ni);
+
+ spin_lock_bh(&net->ksnn_lock);
+ }
+ spin_unlock_bh(&net->ksnn_lock);
+
+ for (i = 0; i < net->ksnn_ninterfaces; i++) {
+ LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
+ LASSERT (net->ksnn_interfaces[i].ksni_nroutes == 0);
+ }
+
+ list_del(&net->ksnn_list);
+ LIBCFS_FREE(net, sizeof(*net));
+
+ ksocknal_data.ksnd_nnets--;
+ if (ksocknal_data.ksnd_nnets == 0)
+ ksocknal_base_shutdown();
+}
+
+int
+ksocknal_enumerate_interfaces(ksock_net_t *net)
+{
+ char **names;
+ int i;
+ int j;
+ int rc;
+ int n;
+
+ n = libcfs_ipif_enumerate(&names);
+ if (n <= 0) {
+ CERROR("Can't enumerate interfaces: %d\n", n);
+ return n;
+ }
+
+ for (i = j = 0; i < n; i++) {
+ int up;
+ __u32 ip;
+ __u32 mask;
+
+ if (!strcmp(names[i], "lo")) /* skip the loopback IF */
+ continue;
+
+ rc = libcfs_ipif_query(names[i], &up, &ip, &mask);
+ if (rc != 0) {
+ CWARN("Can't get interface %s info: %d\n",
+ names[i], rc);
+ continue;
+ }
+
+ if (!up) {
+ CWARN("Ignoring interface %s (down)\n",
+ names[i]);
+ continue;
+ }
+
+ if (j == LNET_MAX_INTERFACES) {
+ CWARN("Ignoring interface %s (too many interfaces)\n",
+ names[i]);
+ continue;
+ }
+
+ net->ksnn_interfaces[j].ksni_ipaddr = ip;
+ net->ksnn_interfaces[j].ksni_netmask = mask;
+ strncpy(&net->ksnn_interfaces[j].ksni_name[0],
+ names[i], IFNAMSIZ);
+ j++;
+ }
+
+ libcfs_ipif_free_enumeration(names, n);
+
+ if (j == 0)
+ CERROR("Can't find any usable interfaces\n");
+
+ return j;
+}
+
+int
+ksocknal_search_new_ipif(ksock_net_t *net)
+{
+ int new_ipif = 0;
+ int i;
+
+ for (i = 0; i < net->ksnn_ninterfaces; i++) {
+ char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
+ char *colon = strchr(ifnam, ':');
+ int found = 0;
+ ksock_net_t *tmp;
+ int j;
+
+ if (colon != NULL) /* ignore alias device */
+ *colon = 0;
+
+ list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
+ ksnn_list) {
+ for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
+ char *ifnam2 = &tmp->ksnn_interfaces[j].\
+ ksni_name[0];
+ char *colon2 = strchr(ifnam2, ':');
+
+ if (colon2 != NULL)
+ *colon2 = 0;
+
+ found = strcmp(ifnam, ifnam2) == 0;
+ if (colon2 != NULL)
+ *colon2 = ':';
+ }
+ if (found)
+ break;
+ }
+
+ new_ipif += !found;
+ if (colon != NULL)
+ *colon = ':';
+ }
+
+ return new_ipif;
+}
+
+int
+ksocknal_start_schedulers(struct ksock_sched_info *info)
+{
+ int nthrs;
+ int rc = 0;
+ int i;
+
+ if (info->ksi_nthreads == 0) {
+ if (*ksocknal_tunables.ksnd_nscheds > 0) {
+ nthrs = info->ksi_nthreads_max;
+ } else {
+ nthrs = cfs_cpt_weight(lnet_cpt_table(),
+ info->ksi_cpt);
+ nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
+ nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
+ }
+ nthrs = min(nthrs, info->ksi_nthreads_max);
+ } else {
+ LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
+ /* increase two threads if there is new interface */
+ nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
+ }
+
+ for (i = 0; i < nthrs; i++) {
+ long id;
+ char name[20];
+ ksock_sched_t *sched;
+ id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
+ sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
+ snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
+ info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
+
+ rc = ksocknal_thread_start(ksocknal_scheduler,
+ (void *)id, name);
+ if (rc == 0)
+ continue;
+
+ CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
+ info->ksi_cpt, info->ksi_nthreads + i, rc);
+ break;
+ }
+
+ info->ksi_nthreads += i;
+ return rc;
+}
+
+int
+ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
+{
+ int newif = ksocknal_search_new_ipif(net);
+ int rc;
+ int i;
+
+ LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
+
+ for (i = 0; i < ncpts; i++) {
+ struct ksock_sched_info *info;
+ int cpt = (cpts == NULL) ? i : cpts[i];
+
+ LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
+ info = ksocknal_data.ksnd_sched_info[cpt];
+
+ if (!newif && info->ksi_nthreads > 0)
+ continue;
+
+ rc = ksocknal_start_schedulers(info);
+ if (rc != 0)
+ return rc;
+ }
+ return 0;
+}
+
+int
+ksocknal_startup (lnet_ni_t *ni)
+{
+ ksock_net_t *net;
+ int rc;
+ int i;
+
+ LASSERT (ni->ni_lnd == &the_ksocklnd);
+
+ if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
+ rc = ksocknal_base_startup();
+ if (rc != 0)
+ return rc;
+ }
+
+ LIBCFS_ALLOC(net, sizeof(*net));
+ if (net == NULL)
+ goto fail_0;
+
+ spin_lock_init(&net->ksnn_lock);
+ net->ksnn_incarnation = ksocknal_new_incarnation();
+ ni->ni_data = net;
+ ni->ni_peertimeout = *ksocknal_tunables.ksnd_peertimeout;
+ ni->ni_maxtxcredits = *ksocknal_tunables.ksnd_credits;
+ ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits;
+ ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
+
+ if (ni->ni_interfaces[0] == NULL) {
+ rc = ksocknal_enumerate_interfaces(net);
+ if (rc <= 0)
+ goto fail_1;
+
+ net->ksnn_ninterfaces = 1;
+ } else {
+ for (i = 0; i < LNET_MAX_INTERFACES; i++) {
+ int up;
+
+ if (ni->ni_interfaces[i] == NULL)
+ break;
+
+ rc = libcfs_ipif_query(
+ ni->ni_interfaces[i], &up,
+ &net->ksnn_interfaces[i].ksni_ipaddr,
+ &net->ksnn_interfaces[i].ksni_netmask);
+
+ if (rc != 0) {
+ CERROR("Can't get interface %s info: %d\n",
+ ni->ni_interfaces[i], rc);
+ goto fail_1;
+ }
+
+ if (!up) {
+ CERROR("Interface %s is down\n",
+ ni->ni_interfaces[i]);
+ goto fail_1;
+ }
+
+ strncpy(&net->ksnn_interfaces[i].ksni_name[0],
+ ni->ni_interfaces[i], IFNAMSIZ);
+ }
+ net->ksnn_ninterfaces = i;
+ }
+
+ /* call it before add it to ksocknal_data.ksnd_nets */
+ rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
+ if (rc != 0)
+ goto fail_1;
+
+ ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
+ net->ksnn_interfaces[0].ksni_ipaddr);
+ list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
+
+ ksocknal_data.ksnd_nnets++;
+
+ return 0;
+
+ fail_1:
+ LIBCFS_FREE(net, sizeof(*net));
+ fail_0:
+ if (ksocknal_data.ksnd_nnets == 0)
+ ksocknal_base_shutdown();
+
+ return -ENETDOWN;
+}
+
+
+void __exit
+ksocknal_module_fini (void)
+{
+ lnet_unregister_lnd(&the_ksocklnd);
+ ksocknal_tunables_fini();
+}
+
+int __init
+ksocknal_module_init (void)
+{
+ int rc;
+
+ /* check ksnr_connected/connecting field large enough */
+ CLASSERT (SOCKLND_CONN_NTYPES <= 4);
+ CLASSERT (SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
+
+ /* initialize the_ksocklnd */
+ the_ksocklnd.lnd_type = SOCKLND;
+ the_ksocklnd.lnd_startup = ksocknal_startup;
+ the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
+ the_ksocklnd.lnd_ctl = ksocknal_ctl;
+ the_ksocklnd.lnd_send = ksocknal_send;
+ the_ksocklnd.lnd_recv = ksocknal_recv;
+ the_ksocklnd.lnd_notify = ksocknal_notify;
+ the_ksocklnd.lnd_query = ksocknal_query;
+ the_ksocklnd.lnd_accept = ksocknal_accept;
+
+ rc = ksocknal_tunables_init();
+ if (rc != 0)
+ return rc;
+
+ lnet_register_lnd(&the_ksocklnd);
+
+ return 0;
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("3.0.0");
+
+module_init(ksocknal_module_init);
+module_exit(ksocknal_module_fini);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
new file mode 100644
index 0000000..b483e0c
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -0,0 +1,602 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ *
+ * Author: Zach Brown <zab@zabbo.net>
+ * Author: Peter J. Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ *
+ * This file is part of Lustre, http://www.lustre.org
+ *
+ * Portals is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Portals is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Portals; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#define DEBUG_PORTAL_ALLOC
+#define DEBUG_SUBSYSTEM S_LND
+
+#include "socklnd_lib-linux.h"
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lnet.h>
+#include <linux/lnet/lib-lnet.h>
+#include <linux/lnet/socklnd.h>
+#include <linux/lnet/lnet-sysctl.h>
+
+#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */
+#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
+#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
+#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */
+
+#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */
+#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */
+
+#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
+
+/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
+ * no risk if we're not running on a CONFIG_HIGHMEM platform. */
+#ifdef CONFIG_HIGHMEM
+# define SOCKNAL_RISK_KMAP_DEADLOCK 0
+#else
+# define SOCKNAL_RISK_KMAP_DEADLOCK 1
+#endif
+
+struct ksock_sched_info;
+
+typedef struct /* per scheduler state */
+{
+ spinlock_t kss_lock; /* serialise */
+ struct list_head kss_rx_conns; /* conn waiting to be read */
+ /* conn waiting to be written */
+ struct list_head kss_tx_conns;
+ /* zombie noop tx list */
+ struct list_head kss_zombie_noop_txs;
+ wait_queue_head_t kss_waitq; /* where scheduler sleeps */
+ /* # connections assigned to this scheduler */
+ int kss_nconns;
+ struct ksock_sched_info *kss_info; /* owner of it */
+ struct page *kss_rx_scratch_pgs[LNET_MAX_IOV];
+ struct iovec kss_scratch_iov[LNET_MAX_IOV];
+} ksock_sched_t;
+
+struct ksock_sched_info {
+ int ksi_nthreads_max; /* max allowed threads */
+ int ksi_nthreads; /* number of threads */
+ int ksi_cpt; /* CPT id */
+ ksock_sched_t *ksi_scheds; /* array of schedulers */
+};
+
+#define KSOCK_CPT_SHIFT 16
+#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid))
+#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
+#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
+
+typedef struct /* in-use interface */
+{
+ __u32 ksni_ipaddr; /* interface's IP address */
+ __u32 ksni_netmask; /* interface's network mask */
+ int ksni_nroutes; /* # routes using (active) */
+ int ksni_npeers; /* # peers using (passive) */
+ char ksni_name[IFNAMSIZ]; /* interface name */
+} ksock_interface_t;
+
+typedef struct
+{
+ /* "stuck" socket timeout (seconds) */
+ int *ksnd_timeout;
+ /* # scheduler threads in each pool while starting */
+ int *ksnd_nscheds;
+ int *ksnd_nconnds; /* # connection daemons */
+ int *ksnd_nconnds_max; /* max # connection daemons */
+ int *ksnd_min_reconnectms; /* first connection retry after (ms)... */
+ int *ksnd_max_reconnectms; /* ...exponentially increasing to this */
+ int *ksnd_eager_ack; /* make TCP ack eagerly? */
+ int *ksnd_typed_conns; /* drive sockets by type? */
+ int *ksnd_min_bulk; /* smallest "large" message */
+ int *ksnd_tx_buffer_size; /* socket tx buffer size */
+ int *ksnd_rx_buffer_size; /* socket rx buffer size */
+ int *ksnd_nagle; /* enable NAGLE? */
+ int *ksnd_round_robin; /* round robin for multiple interfaces */
+ int *ksnd_keepalive; /* # secs for sending keepalive NOOP */
+ int *ksnd_keepalive_idle; /* # idle secs before 1st probe */
+ int *ksnd_keepalive_count; /* # probes */
+ int *ksnd_keepalive_intvl; /* time between probes */
+ int *ksnd_credits; /* # concurrent sends */
+ int *ksnd_peertxcredits; /* # concurrent sends to 1 peer */
+ int *ksnd_peerrtrcredits; /* # per-peer router buffer credits */
+ int *ksnd_peertimeout; /* seconds to consider peer dead */
+ int *ksnd_enable_csum; /* enable check sum */
+ int *ksnd_inject_csum_error; /* set non-zero to inject checksum error */
+ int *ksnd_nonblk_zcack; /* always send zc-ack on non-blocking connection */
+ unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload size */
+ int *ksnd_zc_recv; /* enable ZC receive (for Chelsio TOE) */
+ int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to enable ZC receive */
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
+ ctl_table_header_t *ksnd_sysctl; /* sysctl interface */
+#endif
+} ksock_tunables_t;
+
+typedef struct
+{
+ __u64 ksnn_incarnation; /* my epoch */
+ spinlock_t ksnn_lock; /* serialise */
+ struct list_head ksnn_list; /* chain on global list */
+ int ksnn_npeers; /* # peers */
+ int ksnn_shutdown; /* shutting down? */
+ int ksnn_ninterfaces; /* IP interfaces */
+ ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
+} ksock_net_t;
+
+/** connd timeout */
+#define SOCKNAL_CONND_TIMEOUT 120
+/** reserved thread for accepting & creating new connd */
+#define SOCKNAL_CONND_RESV 1
+
+typedef struct
+{
+ int ksnd_init; /* initialisation state */
+ int ksnd_nnets; /* # networks set up */
+ struct list_head ksnd_nets; /* list of nets */
+ /* stabilize peer/conn ops */
+ rwlock_t ksnd_global_lock;
+ /* hash table of all my known peers */
+ struct list_head *ksnd_peers;
+ int ksnd_peer_hash_size; /* size of ksnd_peers */
+
+ int ksnd_nthreads; /* # live threads */
+ int ksnd_shuttingdown; /* tell threads to exit */
+ /* schedulers information */
+ struct ksock_sched_info **ksnd_sched_info;
+
+ atomic_t ksnd_nactive_txs; /* #active txs */
+
+ struct list_head ksnd_deathrow_conns; /* conns to close: reaper_lock*/
+ struct list_head ksnd_zombie_conns; /* conns to free: reaper_lock */
+ struct list_head ksnd_enomem_conns; /* conns to retry: reaper_lock*/
+ wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
+ cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
+ spinlock_t ksnd_reaper_lock; /* serialise */
+
+ int ksnd_enomem_tx; /* test ENOMEM sender */
+ int ksnd_stall_tx; /* test sluggish sender */
+ int ksnd_stall_rx; /* test sluggish receiver */
+
+ struct list_head ksnd_connd_connreqs; /* incoming connection requests */
+ struct list_head ksnd_connd_routes; /* routes waiting to be connected */
+ wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */
+ int ksnd_connd_connecting;/* # connds connecting */
+ /** time stamp of the last failed connecting attempt */
+ long ksnd_connd_failed_stamp;
+ /** # starting connd */
+ unsigned ksnd_connd_starting;
+ /** time stamp of the last starting connd */
+ long ksnd_connd_starting_stamp;
+ /** # running connd */
+ unsigned ksnd_connd_running;
+ spinlock_t ksnd_connd_lock; /* serialise */
+
+ struct list_head ksnd_idle_noop_txs; /* list head for freed noop tx */
+ spinlock_t ksnd_tx_lock; /* serialise, g_lock unsafe */
+
+} ksock_nal_data_t;
+
+#define SOCKNAL_INIT_NOTHING 0
+#define SOCKNAL_INIT_DATA 1
+#define SOCKNAL_INIT_ALL 2
+
+/* A packet just assembled for transmission is represented by 1 or more
+ * struct iovec fragments (the first frag contains the portals header),
+ * followed by 0 or more lnet_kiov_t fragments.
+ *
+ * On the receive side, initially 1 struct iovec fragment is posted for
+ * receive (the header). Once the header has been received, the payload is
+ * received into either struct iovec or lnet_kiov_t fragments, depending on
+ * what the header matched or whether the message needs forwarding. */
+
+struct ksock_conn; /* forward ref */
+struct ksock_peer; /* forward ref */
+struct ksock_route; /* forward ref */
+struct ksock_proto; /* forward ref */
+
+typedef struct /* transmit packet */
+{
+ struct list_head tx_list; /* queue on conn for transmission etc */
+ struct list_head tx_zc_list; /* queue on peer for ZC request */
+ atomic_t tx_refcount; /* tx reference count */
+ int tx_nob; /* # packet bytes */
+ int tx_resid; /* residual bytes */
+ int tx_niov; /* # packet iovec frags */
+ struct iovec *tx_iov; /* packet iovec frags */
+ int tx_nkiov; /* # packet page frags */
+ unsigned short tx_zc_aborted; /* aborted ZC request */
+ unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
+ unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
+ unsigned short tx_nonblk:1; /* it's a non-blocking ACK */
+ lnet_kiov_t *tx_kiov; /* packet page frags */
+ struct ksock_conn *tx_conn; /* owning conn */
+ lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
+ cfs_time_t tx_deadline; /* when (in jiffies) tx times out */
+ ksock_msg_t tx_msg; /* socklnd message buffer */
+ int tx_desc_size; /* size of this descriptor */
+ union {
+ struct {
+ struct iovec iov; /* virt hdr */
+ lnet_kiov_t kiov[0]; /* paged payload */
+ } paged;
+ struct {
+ struct iovec iov[1]; /* virt hdr + payload */
+ } virt;
+ } tx_frags;
+} ksock_tx_t;
+
+#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
+
+/* network zero copy callback descriptor embedded in ksock_tx_t */
+
+/* space for the rx frag descriptors; we either read a single contiguous
+ * header, or up to LNET_MAX_IOV frags of payload of either type. */
+typedef union {
+ struct iovec iov[LNET_MAX_IOV];
+ lnet_kiov_t kiov[LNET_MAX_IOV];
+} ksock_rxiovspace_t;
+
+#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
+#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
+#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */
+#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */
+#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
+#define SOCKNAL_RX_SLOP 6 /* skipping body */
+
+typedef struct ksock_conn
+{
+ struct ksock_peer *ksnc_peer; /* owning peer */
+ struct ksock_route *ksnc_route; /* owning route */
+ struct list_head ksnc_list; /* stash on peer's conn list */
+ socket_t *ksnc_sock; /* actual socket */
+ void *ksnc_saved_data_ready; /* socket's original data_ready() callback */
+ void *ksnc_saved_write_space; /* socket's original write_space() callback */
+ atomic_t ksnc_conn_refcount; /* conn refcount */
+ atomic_t ksnc_sock_refcount; /* sock refcount */
+ ksock_sched_t *ksnc_scheduler; /* who schedules this connection */
+ __u32 ksnc_myipaddr; /* my IP */
+ __u32 ksnc_ipaddr; /* peer's IP */
+ int ksnc_port; /* peer's port */
+ signed int ksnc_type:3; /* type of connection,
+ * should be signed value */
+ unsigned int ksnc_closing:1; /* being shut down */
+ unsigned int ksnc_flip:1; /* flip or not, only for V2.x */
+ unsigned int ksnc_zc_capable:1; /* enable to ZC */
+ struct ksock_proto *ksnc_proto; /* protocol for the connection */
+
+ /* reader */
+ struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */
+ cfs_time_t ksnc_rx_deadline; /* when (in jiffies) receive times out */
+ __u8 ksnc_rx_started; /* started receiving a message */
+ __u8 ksnc_rx_ready; /* data ready to read */
+ __u8 ksnc_rx_scheduled;/* being progressed */
+ __u8 ksnc_rx_state; /* what is being read */
+ int ksnc_rx_nob_left; /* # bytes to next hdr/body */
+ int ksnc_rx_nob_wanted; /* bytes actually wanted */
+ int ksnc_rx_niov; /* # iovec frags */
+ struct iovec *ksnc_rx_iov; /* the iovec frags */
+ int ksnc_rx_nkiov; /* # page frags */
+ lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
+ ksock_rxiovspace_t ksnc_rx_iov_space;/* space for frag descriptors */
+ __u32 ksnc_rx_csum; /* partial checksum for incoming data */
+ void *ksnc_cookie; /* rx lnet_finalize passthru arg */
+ ksock_msg_t ksnc_msg; /* incoming message buffer:
+ * V2.x message takes the
+ * whole struct
+ * V1.x message is a bare
+ * lnet_hdr_t, it's stored in
+ * ksnc_msg.ksm_u.lnetmsg */
+
+ /* WRITER */
+ struct list_head ksnc_tx_list; /* where I enq waiting for output space */
+ struct list_head ksnc_tx_queue; /* packets waiting to be sent */
+ ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */
+ cfs_time_t ksnc_tx_deadline; /* when (in jiffies) tx times out */
+ int ksnc_tx_bufnob; /* send buffer marker */
+ atomic_t ksnc_tx_nob; /* # bytes queued */
+ int ksnc_tx_ready; /* write space */
+ int ksnc_tx_scheduled; /* being progressed */
+ cfs_time_t ksnc_tx_last_post; /* time stamp of the last posted TX */
+} ksock_conn_t;
+
+typedef struct ksock_route
+{
+ struct list_head ksnr_list; /* chain on peer route list */
+ struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
+ struct ksock_peer *ksnr_peer; /* owning peer */
+ atomic_t ksnr_refcount; /* # users */
+ cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
+ cfs_duration_t ksnr_retry_interval; /* how long between retries */
+ __u32 ksnr_myipaddr; /* my IP */
+ __u32 ksnr_ipaddr; /* IP address to connect to */
+ int ksnr_port; /* port to connect to */
+ unsigned int ksnr_scheduled:1; /* scheduled for attention */
+ unsigned int ksnr_connecting:1;/* connection establishment in progress */
+ unsigned int ksnr_connected:4; /* connections established by type */
+ unsigned int ksnr_deleted:1; /* been removed from peer? */
+ unsigned int ksnr_share_count; /* created explicitly? */
+ int ksnr_conn_count; /* # conns established by this route */
+} ksock_route_t;
+
+#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
+
+typedef struct ksock_peer
+{
+ struct list_head ksnp_list; /* stash on global peer list */
+ cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
+ lnet_process_id_t ksnp_id; /* who's on the other end(s) */
+ atomic_t ksnp_refcount; /* # users */
+ int ksnp_sharecount; /* lconf usage counter */
+ int ksnp_closing; /* being closed */
+ int ksnp_accepting;/* # passive connections pending */
+ int ksnp_error; /* errno on closing last conn */
+ __u64 ksnp_zc_next_cookie;/* ZC completion cookie */
+ __u64 ksnp_incarnation; /* latest known peer incarnation */
+ struct ksock_proto *ksnp_proto; /* latest known peer protocol */
+ struct list_head ksnp_conns; /* all active connections */
+ struct list_head ksnp_routes; /* routes */
+ struct list_head ksnp_tx_queue; /* waiting packets */
+ spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
+ struct list_head ksnp_zc_req_list; /* zero copy requests wait for ACK */
+ cfs_time_t ksnp_send_keepalive; /* time to send keepalive */
+ lnet_ni_t *ksnp_ni; /* which network */
+ int ksnp_n_passive_ips; /* # of... */
+ __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */
+} ksock_peer_t;
+
+typedef struct ksock_connreq
+{
+ struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
+ lnet_ni_t *ksncr_ni; /* chosen NI */
+ socket_t *ksncr_sock; /* accepted socket */
+} ksock_connreq_t;
+
+extern ksock_nal_data_t ksocknal_data;
+extern ksock_tunables_t ksocknal_tunables;
+
+#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
+#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
+#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not preferred */
+
+typedef struct ksock_proto
+{
+ int pro_version; /* version number of protocol */
+ int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *); /* handshake function */
+ int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);/* handshake function */
+ void (*pro_pack)(ksock_tx_t *); /* message pack */
+ void (*pro_unpack)(ksock_msg_t *); /* message unpack */
+ ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); /* queue tx on the connection */
+ int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64); /* queue ZC ack on the connection */
+ int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int); /* handle ZC request */
+ int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); /* handle ZC ACK */
+ int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); /* msg type matches the connection type:
+ * return value:
+ * return MATCH_NO : no
+ * return MATCH_YES : matching type
+ * return MATCH_MAY : can be backup */
+} ksock_proto_t;
+
+extern ksock_proto_t ksocknal_protocol_v1x;
+extern ksock_proto_t ksocknal_protocol_v2x;
+extern ksock_proto_t ksocknal_protocol_v3x;
+
+#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
+#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
+#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR
+
+#ifndef CPU_MASK_NONE
+#define CPU_MASK_NONE 0UL
+#endif
+
+static inline int
+ksocknal_route_mask(void)
+{
+ if (!*ksocknal_tunables.ksnd_typed_conns)
+ return (1 << SOCKLND_CONN_ANY);
+
+ return ((1 << SOCKLND_CONN_CONTROL) |
+ (1 << SOCKLND_CONN_BULK_IN) |
+ (1 << SOCKLND_CONN_BULK_OUT));
+}
+
+static inline struct list_head *
+ksocknal_nid2peerlist (lnet_nid_t nid)
+{
+ unsigned int hash = ((unsigned int)nid) % ksocknal_data.ksnd_peer_hash_size;
+
+ return (&ksocknal_data.ksnd_peers [hash]);
+}
+
+static inline void
+ksocknal_conn_addref (ksock_conn_t *conn)
+{
+ LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ atomic_inc(&conn->ksnc_conn_refcount);
+}
+
+extern void ksocknal_queue_zombie_conn (ksock_conn_t *conn);
+extern void ksocknal_finalize_zcreq(ksock_conn_t *conn);
+
+static inline void
+ksocknal_conn_decref (ksock_conn_t *conn)
+{
+ LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
+ ksocknal_queue_zombie_conn(conn);
+}
+
+static inline int
+ksocknal_connsock_addref (ksock_conn_t *conn)
+{
+ int rc = -ESHUTDOWN;
+
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ if (!conn->ksnc_closing) {
+ LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
+ atomic_inc(&conn->ksnc_sock_refcount);
+ rc = 0;
+ }
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ return (rc);
+}
+
+static inline void
+ksocknal_connsock_decref (ksock_conn_t *conn)
+{
+ LASSERT (atomic_read(&conn->ksnc_sock_refcount) > 0);
+ if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
+ LASSERT (conn->ksnc_closing);
+ libcfs_sock_release(conn->ksnc_sock);
+ conn->ksnc_sock = NULL;
+ ksocknal_finalize_zcreq(conn);
+ }
+}
+
+static inline void
+ksocknal_tx_addref (ksock_tx_t *tx)
+{
+ LASSERT (atomic_read(&tx->tx_refcount) > 0);
+ atomic_inc(&tx->tx_refcount);
+}
+
+extern void ksocknal_tx_prep (ksock_conn_t *, ksock_tx_t *tx);
+extern void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx);
+
+static inline void
+ksocknal_tx_decref (ksock_tx_t *tx)
+{
+ LASSERT (atomic_read(&tx->tx_refcount) > 0);
+ if (atomic_dec_and_test(&tx->tx_refcount))
+ ksocknal_tx_done(NULL, tx);
+}
+
+static inline void
+ksocknal_route_addref (ksock_route_t *route)
+{
+ LASSERT (atomic_read(&route->ksnr_refcount) > 0);
+ atomic_inc(&route->ksnr_refcount);
+}
+
+extern void ksocknal_destroy_route (ksock_route_t *route);
+
+static inline void
+ksocknal_route_decref (ksock_route_t *route)
+{
+ LASSERT (atomic_read (&route->ksnr_refcount) > 0);
+ if (atomic_dec_and_test(&route->ksnr_refcount))
+ ksocknal_destroy_route (route);
+}
+
+static inline void
+ksocknal_peer_addref (ksock_peer_t *peer)
+{
+ LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
+ atomic_inc(&peer->ksnp_refcount);
+}
+
+extern void ksocknal_destroy_peer (ksock_peer_t *peer);
+
+static inline void
+ksocknal_peer_decref (ksock_peer_t *peer)
+{
+ LASSERT (atomic_read (&peer->ksnp_refcount) > 0);
+ if (atomic_dec_and_test(&peer->ksnp_refcount))
+ ksocknal_destroy_peer (peer);
+}
+
+int ksocknal_startup (lnet_ni_t *ni);
+void ksocknal_shutdown (lnet_ni_t *ni);
+int ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
+int ksocknal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
+int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+ int delayed, unsigned int niov,
+ struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen);
+int ksocknal_accept(lnet_ni_t *ni, socket_t *sock);
+
+extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
+extern ksock_peer_t *ksocknal_find_peer_locked (lnet_ni_t *ni, lnet_process_id_t id);
+extern ksock_peer_t *ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id);
+extern void ksocknal_peer_failed (ksock_peer_t *peer);
+extern int ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
+ socket_t *sock, int type);
+extern void ksocknal_close_conn_locked (ksock_conn_t *conn, int why);
+extern void ksocknal_terminate_conn (ksock_conn_t *conn);
+extern void ksocknal_destroy_conn (ksock_conn_t *conn);
+extern int ksocknal_close_peer_conns_locked (ksock_peer_t *peer,
+ __u32 ipaddr, int why);
+extern int ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why);
+extern int ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr);
+extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
+ ksock_tx_t *tx, int nonblk);
+
+extern int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
+ lnet_process_id_t id);
+extern ksock_tx_t *ksocknal_alloc_tx(int type, int size);
+extern void ksocknal_free_tx (ksock_tx_t *tx);
+extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
+extern void ksocknal_next_tx_carrier(ksock_conn_t *conn);
+extern void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn);
+extern void ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
+ int error);
+extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
+extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
+extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
+extern void ksocknal_thread_fini (void);
+extern void ksocknal_launch_all_connections_locked (ksock_peer_t *peer);
+extern ksock_route_t *ksocknal_find_connectable_route_locked (ksock_peer_t *peer);
+extern ksock_route_t *ksocknal_find_connecting_route_locked (ksock_peer_t *peer);
+extern int ksocknal_new_packet (ksock_conn_t *conn, int skip);
+extern int ksocknal_scheduler (void *arg);
+extern int ksocknal_connd (void *arg);
+extern int ksocknal_reaper (void *arg);
+extern int ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
+ lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
+extern int ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
+ ksock_hello_msg_t *hello, lnet_process_id_t *id,
+ __u64 *incarnation);
+extern void ksocknal_read_callback(ksock_conn_t *conn);
+extern void ksocknal_write_callback(ksock_conn_t *conn);
+
+extern int ksocknal_lib_zc_capable(ksock_conn_t *conn);
+extern void ksocknal_lib_save_callback(socket_t *sock, ksock_conn_t *conn);
+extern void ksocknal_lib_set_callback(socket_t *sock, ksock_conn_t *conn);
+extern void ksocknal_lib_reset_callback(socket_t *sock, ksock_conn_t *conn);
+extern void ksocknal_lib_push_conn (ksock_conn_t *conn);
+extern int ksocknal_lib_get_conn_addrs (ksock_conn_t *conn);
+extern int ksocknal_lib_setup_sock (socket_t *so);
+extern int ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx);
+extern int ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx);
+extern void ksocknal_lib_eager_ack (ksock_conn_t *conn);
+extern int ksocknal_lib_recv_iov (ksock_conn_t *conn);
+extern int ksocknal_lib_recv_kiov (ksock_conn_t *conn);
+extern int ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem,
+ int *rxmem, int *nagle);
+
+extern int ksocknal_tunables_init(void);
+extern void ksocknal_tunables_fini(void);
+extern int ksocknal_lib_tunables_init(void);
+extern void ksocknal_lib_tunables_fini(void);
+
+extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+
+extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+extern int ksocknal_lib_bind_thread_to_cpu(int id);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
new file mode 100644
index 0000000..68a4f52
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -0,0 +1,2654 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ *
+ * Author: Zach Brown <zab@zabbo.net>
+ * Author: Peter J. Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ *
+ * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
+ *
+ * Portals is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Portals is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Portals; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "socklnd.h"
+
+ksock_tx_t *
+ksocknal_alloc_tx(int type, int size)
+{
+ ksock_tx_t *tx = NULL;
+
+ if (type == KSOCK_MSG_NOOP) {
+ LASSERT(size == KSOCK_NOOP_TX_SIZE);
+
+ /* searching for a noop tx in free list */
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
+
+ if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
+ next, ksock_tx_t, tx_list);
+ LASSERT(tx->tx_desc_size == size);
+ list_del(&tx->tx_list);
+ }
+
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ }
+
+ if (tx == NULL)
+ LIBCFS_ALLOC(tx, size);
+
+ if (tx == NULL)
+ return NULL;
+
+ atomic_set(&tx->tx_refcount, 1);
+ tx->tx_zc_aborted = 0;
+ tx->tx_zc_capable = 0;
+ tx->tx_zc_checked = 0;
+ tx->tx_desc_size = size;
+
+ atomic_inc(&ksocknal_data.ksnd_nactive_txs);
+
+ return tx;
+}
+
+ksock_tx_t *
+ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
+{
+ ksock_tx_t *tx;
+
+ tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
+ if (tx == NULL) {
+ CERROR("Can't allocate noop tx desc\n");
+ return NULL;
+ }
+
+ tx->tx_conn = NULL;
+ tx->tx_lnetmsg = NULL;
+ tx->tx_kiov = NULL;
+ tx->tx_nkiov = 0;
+ tx->tx_iov = tx->tx_frags.virt.iov;
+ tx->tx_niov = 1;
+ tx->tx_nonblk = nonblk;
+
+ socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
+ tx->tx_msg.ksm_zc_cookies[1] = cookie;
+
+ return tx;
+}
+
+
+void
+ksocknal_free_tx (ksock_tx_t *tx)
+{
+ atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+
+ if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
+ /* it's a noop tx */
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
+
+ list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
+
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ } else {
+ LIBCFS_FREE(tx, tx->tx_desc_size);
+ }
+}
+
+int
+ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+{
+ struct iovec *iov = tx->tx_iov;
+ int nob;
+ int rc;
+
+ LASSERT (tx->tx_niov > 0);
+
+ /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
+ rc = ksocknal_lib_send_iov(conn, tx);
+
+ if (rc <= 0) /* sent nothing? */
+ return (rc);
+
+ nob = rc;
+ LASSERT (nob <= tx->tx_resid);
+ tx->tx_resid -= nob;
+
+ /* "consume" iov */
+ do {
+ LASSERT (tx->tx_niov > 0);
+
+ if (nob < (int) iov->iov_len) {
+ iov->iov_base = (void *)((char *)iov->iov_base + nob);
+ iov->iov_len -= nob;
+ return (rc);
+ }
+
+ nob -= iov->iov_len;
+ tx->tx_iov = ++iov;
+ tx->tx_niov--;
+ } while (nob != 0);
+
+ return (rc);
+}
+
+int
+ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+{
+ lnet_kiov_t *kiov = tx->tx_kiov;
+ int nob;
+ int rc;
+
+ LASSERT (tx->tx_niov == 0);
+ LASSERT (tx->tx_nkiov > 0);
+
+ /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
+ rc = ksocknal_lib_send_kiov(conn, tx);
+
+ if (rc <= 0) /* sent nothing? */
+ return (rc);
+
+ nob = rc;
+ LASSERT (nob <= tx->tx_resid);
+ tx->tx_resid -= nob;
+
+ /* "consume" kiov */
+ do {
+ LASSERT(tx->tx_nkiov > 0);
+
+ if (nob < (int)kiov->kiov_len) {
+ kiov->kiov_offset += nob;
+ kiov->kiov_len -= nob;
+ return rc;
+ }
+
+ nob -= (int)kiov->kiov_len;
+ tx->tx_kiov = ++kiov;
+ tx->tx_nkiov--;
+ } while (nob != 0);
+
+ return (rc);
+}
+
+int
+ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+{
+ int rc;
+ int bufnob;
+
+ if (ksocknal_data.ksnd_stall_tx != 0) {
+ cfs_pause(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
+ }
+
+ LASSERT (tx->tx_resid != 0);
+
+ rc = ksocknal_connsock_addref(conn);
+ if (rc != 0) {
+ LASSERT (conn->ksnc_closing);
+ return (-ESHUTDOWN);
+ }
+
+ do {
+ if (ksocknal_data.ksnd_enomem_tx > 0) {
+ /* testing... */
+ ksocknal_data.ksnd_enomem_tx--;
+ rc = -EAGAIN;
+ } else if (tx->tx_niov != 0) {
+ rc = ksocknal_send_iov (conn, tx);
+ } else {
+ rc = ksocknal_send_kiov (conn, tx);
+ }
+
+ bufnob = cfs_sock_wmem_queued(conn->ksnc_sock);
+ if (rc > 0) /* sent something? */
+ conn->ksnc_tx_bufnob += rc; /* account it */
+
+ if (bufnob < conn->ksnc_tx_bufnob) {
+ /* allocated send buffer bytes < computed; infer
+ * something got ACKed */
+ conn->ksnc_tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_tx_bufnob = bufnob;
+ mb();
+ }
+
+ if (rc <= 0) { /* Didn't write anything? */
+
+ if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
+ rc = -EAGAIN;
+
+ /* Check if EAGAIN is due to memory pressure */
+ if(rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
+ rc = -ENOMEM;
+
+ break;
+ }
+
+ /* socket's wmem_queued now includes 'rc' bytes */
+ atomic_sub (rc, &conn->ksnc_tx_nob);
+ rc = 0;
+
+ } while (tx->tx_resid != 0);
+
+ ksocknal_connsock_decref(conn);
+ return (rc);
+}
+
+int
+ksocknal_recv_iov (ksock_conn_t *conn)
+{
+ struct iovec *iov = conn->ksnc_rx_iov;
+ int nob;
+ int rc;
+
+ LASSERT (conn->ksnc_rx_niov > 0);
+
+ /* Never touch conn->ksnc_rx_iov or change connection
+ * status inside ksocknal_lib_recv_iov */
+ rc = ksocknal_lib_recv_iov(conn);
+
+ if (rc <= 0)
+ return (rc);
+
+ /* received something... */
+ nob = rc;
+
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_rx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ mb(); /* order with setting rx_started */
+ conn->ksnc_rx_started = 1;
+
+ conn->ksnc_rx_nob_wanted -= nob;
+ conn->ksnc_rx_nob_left -= nob;
+
+ do {
+ LASSERT (conn->ksnc_rx_niov > 0);
+
+ if (nob < (int)iov->iov_len) {
+ iov->iov_len -= nob;
+ iov->iov_base = (void *)((char *)iov->iov_base + nob);
+ return (-EAGAIN);
+ }
+
+ nob -= iov->iov_len;
+ conn->ksnc_rx_iov = ++iov;
+ conn->ksnc_rx_niov--;
+ } while (nob != 0);
+
+ return (rc);
+}
+
+int
+ksocknal_recv_kiov (ksock_conn_t *conn)
+{
+ lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ int nob;
+ int rc;
+ LASSERT (conn->ksnc_rx_nkiov > 0);
+
+ /* Never touch conn->ksnc_rx_kiov or change connection
+ * status inside ksocknal_lib_recv_iov */
+ rc = ksocknal_lib_recv_kiov(conn);
+
+ if (rc <= 0)
+ return (rc);
+
+ /* received something... */
+ nob = rc;
+
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_rx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ mb(); /* order with setting rx_started */
+ conn->ksnc_rx_started = 1;
+
+ conn->ksnc_rx_nob_wanted -= nob;
+ conn->ksnc_rx_nob_left -= nob;
+
+ do {
+ LASSERT (conn->ksnc_rx_nkiov > 0);
+
+ if (nob < (int) kiov->kiov_len) {
+ kiov->kiov_offset += nob;
+ kiov->kiov_len -= nob;
+ return -EAGAIN;
+ }
+
+ nob -= kiov->kiov_len;
+ conn->ksnc_rx_kiov = ++kiov;
+ conn->ksnc_rx_nkiov--;
+ } while (nob != 0);
+
+ return 1;
+}
+
+int
+ksocknal_receive (ksock_conn_t *conn)
+{
+ /* Return 1 on success, 0 on EOF, < 0 on error.
+ * Caller checks ksnc_rx_nob_wanted to determine
+ * progress/completion. */
+ int rc;
+
+ if (ksocknal_data.ksnd_stall_rx != 0) {
+ cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
+ }
+
+ rc = ksocknal_connsock_addref(conn);
+ if (rc != 0) {
+ LASSERT (conn->ksnc_closing);
+ return (-ESHUTDOWN);
+ }
+
+ for (;;) {
+ if (conn->ksnc_rx_niov != 0)
+ rc = ksocknal_recv_iov (conn);
+ else
+ rc = ksocknal_recv_kiov (conn);
+
+ if (rc <= 0) {
+ /* error/EOF or partial receive */
+ if (rc == -EAGAIN) {
+ rc = 1;
+ } else if (rc == 0 && conn->ksnc_rx_started) {
+ /* EOF in the middle of a message */
+ rc = -EPROTO;
+ }
+ break;
+ }
+
+ /* Completed a fragment */
+
+ if (conn->ksnc_rx_nob_wanted == 0) {
+ rc = 1;
+ break;
+ }
+ }
+
+ ksocknal_connsock_decref(conn);
+ return rc;
+}
+
+void
+ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
+{
+ lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
+ int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
+
+ LASSERT(ni != NULL || tx->tx_conn != NULL);
+
+ if (tx->tx_conn != NULL)
+ ksocknal_conn_decref(tx->tx_conn);
+
+ if (ni == NULL && tx->tx_conn != NULL)
+ ni = tx->tx_conn->ksnc_peer->ksnp_ni;
+
+ ksocknal_free_tx (tx);
+ if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
+ lnet_finalize (ni, lnetmsg, rc);
+}
+
+void
+ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+{
+ ksock_tx_t *tx;
+
+ while (!list_empty (txlist)) {
+ tx = list_entry (txlist->next, ksock_tx_t, tx_list);
+
+ if (error && tx->tx_lnetmsg != NULL) {
+ CNETERR("Deleting packet type %d len %d %s->%s\n",
+ le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
+ le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
+ libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
+ libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
+ } else if (error) {
+ CNETERR("Deleting noop packet\n");
+ }
+
+ list_del (&tx->tx_list);
+
+ LASSERT (atomic_read(&tx->tx_refcount) == 1);
+ ksocknal_tx_done (ni, tx);
+ }
+}
+
+static void
+ksocknal_check_zc_req(ksock_tx_t *tx)
+{
+ ksock_conn_t *conn = tx->tx_conn;
+ ksock_peer_t *peer = conn->ksnc_peer;
+
+ /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
+ * to ksnp_zc_req_list if some fragment of this message should be sent
+ * zero-copy. Our peer will send an ACK containing this cookie when
+ * she has received this message to tell us we can signal completion.
+ * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
+ * ksnp_zc_req_list. */
+ LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
+ LASSERT (tx->tx_zc_capable);
+
+ tx->tx_zc_checked = 1;
+
+ if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
+ !conn->ksnc_zc_capable)
+ return;
+
+ /* assign cookie and queue tx to pending list, it will be released when
+ * a matching ack is received. See ksocknal_handle_zcack() */
+
+ ksocknal_tx_addref(tx);
+
+ spin_lock(&peer->ksnp_lock);
+
+ /* ZC_REQ is going to be pinned to the peer */
+ tx->tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+
+ LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
+
+ tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
+
+ if (peer->ksnp_zc_next_cookie == 0)
+ peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
+
+ list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
+
+ spin_unlock(&peer->ksnp_lock);
+}
+
+static void
+ksocknal_uncheck_zc_req(ksock_tx_t *tx)
+{
+ ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
+
+ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
+ LASSERT(tx->tx_zc_capable);
+
+ tx->tx_zc_checked = 0;
+
+ spin_lock(&peer->ksnp_lock);
+
+ if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+ /* Not waiting for an ACK */
+ spin_unlock(&peer->ksnp_lock);
+ return;
+ }
+
+ tx->tx_msg.ksm_zc_cookies[0] = 0;
+ list_del(&tx->tx_zc_list);
+
+ spin_unlock(&peer->ksnp_lock);
+
+ ksocknal_tx_decref(tx);
+}
+
+int
+ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+{
+ int rc;
+
+ if (tx->tx_zc_capable && !tx->tx_zc_checked)
+ ksocknal_check_zc_req(tx);
+
+ rc = ksocknal_transmit (conn, tx);
+
+ CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc);
+
+ if (tx->tx_resid == 0) {
+ /* Sent everything OK */
+ LASSERT (rc == 0);
+
+ return (0);
+ }
+
+ if (rc == -EAGAIN)
+ return (rc);
+
+ if (rc == -ENOMEM) {
+ static int counter;
+
+ counter++; /* exponential backoff warnings */
+ if ((counter & (-counter)) == counter)
+ CWARN("%u ENOMEM tx %p (%u allocated)\n",
+ counter, conn, atomic_read(&libcfs_kmemory));
+
+ /* Queue on ksnd_enomem_conns for retry after a timeout */
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ /* enomem list takes over scheduler's ref... */
+ LASSERT (conn->ksnc_tx_scheduled);
+ list_add_tail(&conn->ksnc_tx_list,
+ &ksocknal_data.ksnd_enomem_conns);
+ if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
+ SOCKNAL_ENOMEM_RETRY),
+ ksocknal_data.ksnd_reaper_waketime))
+ wake_up (&ksocknal_data.ksnd_reaper_waitq);
+
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+ return (rc);
+ }
+
+ /* Actual error */
+ LASSERT (rc < 0);
+
+ if (!conn->ksnc_closing) {
+ switch (rc) {
+ case -ECONNRESET:
+ LCONSOLE_WARN("Host %pI4h reset our connection "
+ "while we were sending data; it may have "
+ "rebooted.\n",
+ &conn->ksnc_ipaddr);
+ break;
+ default:
+ LCONSOLE_WARN("There was an unexpected network error "
+ "while writing to %pI4h: %d.\n",
+ &conn->ksnc_ipaddr, rc);
+ break;
+ }
+ CDEBUG(D_NET, "[%p] Error %d on write to %s"
+ " ip %pI4h:%d\n", conn, rc,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
+ }
+
+ if (tx->tx_zc_checked)
+ ksocknal_uncheck_zc_req(tx);
+
+ /* it's not an error if conn is being closed */
+ ksocknal_close_conn_and_siblings (conn,
+ (conn->ksnc_closing) ? 0 : rc);
+
+ return (rc);
+}
+
+void
+ksocknal_launch_connection_locked (ksock_route_t *route)
+{
+
+ /* called holding write lock on ksnd_global_lock */
+
+ LASSERT (!route->ksnr_scheduled);
+ LASSERT (!route->ksnr_connecting);
+ LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
+
+ route->ksnr_scheduled = 1; /* scheduling conn for connd */
+ ksocknal_route_addref(route); /* extra ref for connd */
+
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+
+ list_add_tail(&route->ksnr_connd_list,
+ &ksocknal_data.ksnd_connd_routes);
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
+
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+}
+
+void
+ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
+{
+ ksock_route_t *route;
+
+ /* called holding write lock on ksnd_global_lock */
+ for (;;) {
+ /* launch any/all connections that need it */
+ route = ksocknal_find_connectable_route_locked(peer);
+ if (route == NULL)
+ return;
+
+ ksocknal_launch_connection_locked(route);
+ }
+}
+
+ksock_conn_t *
+ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
+{
+ struct list_head *tmp;
+ ksock_conn_t *conn;
+ ksock_conn_t *typed = NULL;
+ ksock_conn_t *fallback = NULL;
+ int tnob = 0;
+ int fnob = 0;
+
+ list_for_each (tmp, &peer->ksnp_conns) {
+ ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
+ int nob = atomic_read(&c->ksnc_tx_nob) +
+ cfs_sock_wmem_queued(c->ksnc_sock);
+ int rc;
+
+ LASSERT (!c->ksnc_closing);
+ LASSERT (c->ksnc_proto != NULL &&
+ c->ksnc_proto->pro_match_tx != NULL);
+
+ rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
+
+ switch (rc) {
+ default:
+ LBUG();
+ case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
+ continue;
+
+ case SOCKNAL_MATCH_YES: /* typed connection */
+ if (typed == NULL || tnob > nob ||
+ (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
+ cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+ typed = c;
+ tnob = nob;
+ }
+ break;
+
+ case SOCKNAL_MATCH_MAY: /* fallback connection */
+ if (fallback == NULL || fnob > nob ||
+ (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
+ cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+ fallback = c;
+ fnob = nob;
+ }
+ break;
+ }
+ }
+
+ /* prefer the typed selection */
+ conn = (typed != NULL) ? typed : fallback;
+
+ if (conn != NULL)
+ conn->ksnc_tx_last_post = cfs_time_current();
+
+ return conn;
+}
+
+void
+ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
+{
+ conn->ksnc_proto->pro_pack(tx);
+
+ atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+ ksocknal_conn_addref(conn); /* +1 ref for tx */
+ tx->tx_conn = conn;
+}
+
+void
+ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
+{
+ ksock_sched_t *sched = conn->ksnc_scheduler;
+ ksock_msg_t *msg = &tx->tx_msg;
+ ksock_tx_t *ztx = NULL;
+ int bufnob = 0;
+
+ /* called holding global lock (read or irq-write) and caller may
+ * not have dropped this lock between finding conn and calling me,
+ * so we don't need the {get,put}connsock dance to deref
+ * ksnc_sock... */
+ LASSERT(!conn->ksnc_closing);
+
+ CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
+
+ ksocknal_tx_prep(conn, tx);
+
+ /* Ensure the frags we've been given EXACTLY match the number of
+ * bytes we want to send. Many TCP/IP stacks disregard any total
+ * size parameters passed to them and just look at the frags.
+ *
+ * We always expect at least 1 mapped fragment containing the
+ * complete ksocknal message header. */
+ LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
+ lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
+ (unsigned int)tx->tx_nob);
+ LASSERT (tx->tx_niov >= 1);
+ LASSERT (tx->tx_resid == tx->tx_nob);
+
+ CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
+ tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type:
+ KSOCK_MSG_NOOP,
+ tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
+
+ /*
+ * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
+ * but they're used inside spinlocks a lot.
+ */
+ bufnob = cfs_sock_wmem_queued(conn->ksnc_sock);
+ spin_lock_bh(&sched->kss_lock);
+
+ if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+ /* First packet starts the timeout */
+ conn->ksnc_tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
+ conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
+ conn->ksnc_tx_bufnob = 0;
+ mb(); /* order with adding to tx_queue */
+ }
+
+ if (msg->ksm_type == KSOCK_MSG_NOOP) {
+ /* The packet is noop ZC ACK, try to piggyback the ack_cookie
+ * on a normal packet so I don't need to send it */
+ LASSERT (msg->ksm_zc_cookies[1] != 0);
+ LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+
+ if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
+ ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
+
+ } else {
+ /* It's a normal packet - can it piggback a noop zc-ack that
+ * has been queued already? */
+ LASSERT (msg->ksm_zc_cookies[1] == 0);
+ LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL);
+
+ ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
+ /* ztx will be released later */
+ }
+
+ if (ztx != NULL) {
+ atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+ list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
+ }
+
+ if (conn->ksnc_tx_ready && /* able to send */
+ !conn->ksnc_tx_scheduled) { /* not scheduled to send */
+ /* +1 ref for scheduler */
+ ksocknal_conn_addref(conn);
+ list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ wake_up (&sched->kss_waitq);
+ }
+
+ spin_unlock_bh(&sched->kss_lock);
+}
+
+
+ksock_route_t *
+ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
+{
+ cfs_time_t now = cfs_time_current();
+ struct list_head *tmp;
+ ksock_route_t *route;
+
+ list_for_each (tmp, &peer->ksnp_routes) {
+ route = list_entry (tmp, ksock_route_t, ksnr_list);
+
+ LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
+
+ if (route->ksnr_scheduled) /* connections being established */
+ continue;
+
+ /* all route types connected ? */
+ if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
+ continue;
+
+ if (!(route->ksnr_retry_interval == 0 || /* first attempt */
+ cfs_time_aftereq(now, route->ksnr_timeout))) {
+ CDEBUG(D_NET,
+ "Too soon to retry route %pI4h "
+ "(cnted %d, interval %ld, %ld secs later)\n",
+ &route->ksnr_ipaddr,
+ route->ksnr_connected,
+ route->ksnr_retry_interval,
+ cfs_duration_sec(route->ksnr_timeout - now));
+ continue;
+ }
+
+ return (route);
+ }
+
+ return (NULL);
+}
+
+ksock_route_t *
+ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
+{
+ struct list_head *tmp;
+ ksock_route_t *route;
+
+ list_for_each (tmp, &peer->ksnp_routes) {
+ route = list_entry (tmp, ksock_route_t, ksnr_list);
+
+ LASSERT (!route->ksnr_connecting || route->ksnr_scheduled);
+
+ if (route->ksnr_scheduled)
+ return (route);
+ }
+
+ return (NULL);
+}
+
+int
+ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+{
+ ksock_peer_t *peer;
+ ksock_conn_t *conn;
+ rwlock_t *g_lock;
+ int retry;
+ int rc;
+
+ LASSERT (tx->tx_conn == NULL);
+
+ g_lock = &ksocknal_data.ksnd_global_lock;
+
+ for (retry = 0;; retry = 1) {
+ read_lock(g_lock);
+ peer = ksocknal_find_peer_locked(ni, id);
+ if (peer != NULL) {
+ if (ksocknal_find_connectable_route_locked(peer) == NULL) {
+ conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
+ if (conn != NULL) {
+ /* I've got no routes that need to be
+ * connecting and I do have an actual
+ * connection... */
+ ksocknal_queue_tx_locked (tx, conn);
+ read_unlock(g_lock);
+ return (0);
+ }
+ }
+ }
+
+ /* I'll need a write lock... */
+ read_unlock(g_lock);
+
+ write_lock_bh(g_lock);
+
+ peer = ksocknal_find_peer_locked(ni, id);
+ if (peer != NULL)
+ break;
+
+ write_unlock_bh(g_lock);
+
+ if ((id.pid & LNET_PID_USERFLAG) != 0) {
+ CERROR("Refusing to create a connection to "
+ "userspace process %s\n", libcfs_id2str(id));
+ return -EHOSTUNREACH;
+ }
+
+ if (retry) {
+ CERROR("Can't find peer %s\n", libcfs_id2str(id));
+ return -EHOSTUNREACH;
+ }
+
+ rc = ksocknal_add_peer(ni, id,
+ LNET_NIDADDR(id.nid),
+ lnet_acceptor_port());
+ if (rc != 0) {
+ CERROR("Can't add peer %s: %d\n",
+ libcfs_id2str(id), rc);
+ return rc;
+ }
+ }
+
+ ksocknal_launch_all_connections_locked(peer);
+
+ conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
+ if (conn != NULL) {
+ /* Connection exists; queue message on it */
+ ksocknal_queue_tx_locked (tx, conn);
+ write_unlock_bh(g_lock);
+ return (0);
+ }
+
+ if (peer->ksnp_accepting > 0 ||
+ ksocknal_find_connecting_route_locked (peer) != NULL) {
+ /* the message is going to be pinned to the peer */
+ tx->tx_deadline =
+ cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+
+ /* Queue the message until a connection is established */
+ list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+ write_unlock_bh(g_lock);
+ return 0;
+ }
+
+ write_unlock_bh(g_lock);
+
+ /* NB Routes may be ignored if connections to them failed recently */
+ CNETERR("No usable routes to %s\n", libcfs_id2str(id));
+ return (-EHOSTUNREACH);
+}
+
+int
+ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+{
+ int mpflag = 0;
+ int type = lntmsg->msg_type;
+ lnet_process_id_t target = lntmsg->msg_target;
+ unsigned int payload_niov = lntmsg->msg_niov;
+ struct iovec *payload_iov = lntmsg->msg_iov;
+ lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
+ unsigned int payload_offset = lntmsg->msg_offset;
+ unsigned int payload_nob = lntmsg->msg_len;
+ ksock_tx_t *tx;
+ int desc_size;
+ int rc;
+
+ /* NB 'private' is different depending on what we're sending.
+ * Just ignore it... */
+
+ CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
+ payload_nob, payload_niov, libcfs_id2str(target));
+
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ LASSERT (!in_interrupt ());
+
+ if (payload_iov != NULL)
+ desc_size = offsetof(ksock_tx_t,
+ tx_frags.virt.iov[1 + payload_niov]);
+ else
+ desc_size = offsetof(ksock_tx_t,
+ tx_frags.paged.kiov[payload_niov]);
+
+ if (lntmsg->msg_vmflush)
+ mpflag = cfs_memory_pressure_get_and_set();
+ tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
+ if (tx == NULL) {
+ CERROR("Can't allocate tx desc type %d size %d\n",
+ type, desc_size);
+ if (lntmsg->msg_vmflush)
+ cfs_memory_pressure_restore(mpflag);
+ return (-ENOMEM);
+ }
+
+ tx->tx_conn = NULL; /* set when assigned a conn */
+ tx->tx_lnetmsg = lntmsg;
+
+ if (payload_iov != NULL) {
+ tx->tx_kiov = NULL;
+ tx->tx_nkiov = 0;
+ tx->tx_iov = tx->tx_frags.virt.iov;
+ tx->tx_niov = 1 +
+ lnet_extract_iov(payload_niov, &tx->tx_iov[1],
+ payload_niov, payload_iov,
+ payload_offset, payload_nob);
+ } else {
+ tx->tx_niov = 1;
+ tx->tx_iov = &tx->tx_frags.paged.iov;
+ tx->tx_kiov = tx->tx_frags.paged.kiov;
+ tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+
+ if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
+ tx->tx_zc_capable = 1;
+ }
+
+ socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
+
+ /* The first fragment will be set later in pro_pack */
+ rc = ksocknal_launch_packet(ni, tx, target);
+ if (lntmsg->msg_vmflush)
+ cfs_memory_pressure_restore(mpflag);
+ if (rc == 0)
+ return (0);
+
+ ksocknal_free_tx(tx);
+ return (-EIO);
+}
+
+int
+ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
+{
+ struct task_struct *task = kthread_run(fn, arg, "%s", name);
+
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+ ksocknal_data.ksnd_nthreads++;
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ return 0;
+}
+
+void
+ksocknal_thread_fini (void)
+{
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+ ksocknal_data.ksnd_nthreads--;
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+}
+
+int
+ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
+{
+ static char ksocknal_slop_buffer[4096];
+
+ int nob;
+ unsigned int niov;
+ int skipped;
+
+ LASSERT(conn->ksnc_proto != NULL);
+
+ if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
+ /* Remind the socket to ack eagerly... */
+ ksocknal_lib_eager_ack(conn);
+ }
+
+ if (nob_to_skip == 0) { /* right at next packet boundary now */
+ conn->ksnc_rx_started = 0;
+ mb(); /* racing with timeout thread */
+
+ switch (conn->ksnc_proto->pro_version) {
+ case KSOCK_PROTO_V2:
+ case KSOCK_PROTO_V3:
+ conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
+ conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg;
+
+ conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
+ conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
+ conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
+ break;
+
+ case KSOCK_PROTO_V1:
+ /* Receiving bare lnet_hdr_t */
+ conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
+ conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
+ conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
+
+ conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
+ conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
+ break;
+
+ default:
+ LBUG ();
+ }
+ conn->ksnc_rx_niov = 1;
+
+ conn->ksnc_rx_kiov = NULL;
+ conn->ksnc_rx_nkiov = 0;
+ conn->ksnc_rx_csum = ~0;
+ return (1);
+ }
+
+ /* Set up to skip as much as possible now. If there's more left
+ * (ran out of iov entries) we'll get called again */
+
+ conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
+ conn->ksnc_rx_nob_left = nob_to_skip;
+ conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ skipped = 0;
+ niov = 0;
+
+ do {
+ nob = MIN (nob_to_skip, sizeof (ksocknal_slop_buffer));
+
+ conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
+ conn->ksnc_rx_iov[niov].iov_len = nob;
+ niov++;
+ skipped += nob;
+ nob_to_skip -=nob;
+
+ } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
+ niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
+
+ conn->ksnc_rx_niov = niov;
+ conn->ksnc_rx_kiov = NULL;
+ conn->ksnc_rx_nkiov = 0;
+ conn->ksnc_rx_nob_wanted = skipped;
+ return (0);
+}
+
+int
+ksocknal_process_receive (ksock_conn_t *conn)
+{
+ lnet_hdr_t *lhdr;
+ lnet_process_id_t *id;
+ int rc;
+
+ LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+
+ /* NB: sched lock NOT held */
+ /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
+ LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
+ conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
+ again:
+ if (conn->ksnc_rx_nob_wanted != 0) {
+ rc = ksocknal_receive(conn);
+
+ if (rc <= 0) {
+ LASSERT (rc != -EAGAIN);
+
+ if (rc == 0)
+ CDEBUG(D_NET, "[%p] EOF from %s"
+ " ip %pI4h:%d\n", conn,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
+ else if (!conn->ksnc_closing)
+ CERROR("[%p] Error %d on read from %s"
+ " ip %pI4h:%d\n",
+ conn, rc,
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
+
+ /* it's not an error if conn is being closed */
+ ksocknal_close_conn_and_siblings (conn,
+ (conn->ksnc_closing) ? 0 : rc);
+ return (rc == 0 ? -ESHUTDOWN : rc);
+ }
+
+ if (conn->ksnc_rx_nob_wanted != 0) {
+ /* short read */
+ return (-EAGAIN);
+ }
+ }
+ switch (conn->ksnc_rx_state) {
+ case SOCKNAL_RX_KSM_HEADER:
+ if (conn->ksnc_flip) {
+ __swab32s(&conn->ksnc_msg.ksm_type);
+ __swab32s(&conn->ksnc_msg.ksm_csum);
+ __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
+ __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
+ }
+
+ if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
+ conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
+ CERROR("%s: Unknown message type: %x\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ conn->ksnc_msg.ksm_type);
+ ksocknal_new_packet(conn, 0);
+ ksocknal_close_conn_and_siblings(conn, -EPROTO);
+ return (-EPROTO);
+ }
+
+ if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
+ conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
+ conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
+ /* NOOP Checksum error */
+ CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
+ ksocknal_new_packet(conn, 0);
+ ksocknal_close_conn_and_siblings(conn, -EPROTO);
+ return (-EIO);
+ }
+
+ if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
+ __u64 cookie = 0;
+
+ LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
+
+ if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
+ cookie = conn->ksnc_msg.ksm_zc_cookies[0];
+
+ rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
+ conn->ksnc_msg.ksm_zc_cookies[1]);
+
+ if (rc != 0) {
+ CERROR("%s: Unknown ZC-ACK cookie: "LPU64", "LPU64"\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
+ ksocknal_new_packet(conn, 0);
+ ksocknal_close_conn_and_siblings(conn, -EPROTO);
+ return (rc);
+ }
+ }
+
+ if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
+ ksocknal_new_packet (conn, 0);
+ return 0; /* NOOP is done and just return */
+ }
+
+ conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
+ conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
+ conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
+
+ conn->ksnc_rx_iov = (struct iovec *)&conn->ksnc_rx_iov_space;
+ conn->ksnc_rx_iov[0].iov_base = (char *)&conn->ksnc_msg.ksm_u.lnetmsg;
+ conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
+
+ conn->ksnc_rx_niov = 1;
+ conn->ksnc_rx_kiov = NULL;
+ conn->ksnc_rx_nkiov = 0;
+
+ goto again; /* read lnet header now */
+
+ case SOCKNAL_RX_LNET_HEADER:
+ /* unpack message header */
+ conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
+
+ if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
+ /* Userspace peer */
+ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
+ id = &conn->ksnc_peer->ksnp_id;
+
+ /* Substitute process ID assigned at connection time */
+ lhdr->src_pid = cpu_to_le32(id->pid);
+ lhdr->src_nid = cpu_to_le64(id->nid);
+ }
+
+ conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
+ ksocknal_conn_addref(conn); /* ++ref while parsing */
+
+ rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
+ &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
+ conn->ksnc_peer->ksnp_id.nid, conn, 0);
+ if (rc < 0) {
+ /* I just received garbage: give up on this conn */
+ ksocknal_new_packet(conn, 0);
+ ksocknal_close_conn_and_siblings (conn, rc);
+ ksocknal_conn_decref(conn);
+ return (-EPROTO);
+ }
+
+ /* I'm racing with ksocknal_recv() */
+ LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
+
+ if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
+ return 0;
+
+ /* ksocknal_recv() got called */
+ goto again;
+
+ case SOCKNAL_RX_LNET_PAYLOAD:
+ /* payload all received */
+ rc = 0;
+
+ if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
+ conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
+ conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
+ CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
+ rc = -EIO;
+ }
+
+ if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
+ LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
+
+ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
+ id = &conn->ksnc_peer->ksnp_id;
+
+ rc = conn->ksnc_proto->pro_handle_zcreq(conn,
+ conn->ksnc_msg.ksm_zc_cookies[0],
+ *ksocknal_tunables.ksnd_nonblk_zcack ||
+ le64_to_cpu(lhdr->src_nid) != id->nid);
+ }
+
+ lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
+
+ if (rc != 0) {
+ ksocknal_new_packet(conn, 0);
+ ksocknal_close_conn_and_siblings (conn, rc);
+ return (-EPROTO);
+ }
+ /* Fall through */
+
+ case SOCKNAL_RX_SLOP:
+ /* starting new packet? */
+ if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
+ return 0; /* come back later */
+ goto again; /* try to finish reading slop now */
+
+ default:
+ break;
+ }
+
+ /* Not Reached */
+ LBUG ();
+ return (-EINVAL); /* keep gcc happy */
+}
+
+int
+ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+ unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
+{
+ ksock_conn_t *conn = (ksock_conn_t *)private;
+ ksock_sched_t *sched = conn->ksnc_scheduler;
+
+ LASSERT (mlen <= rlen);
+ LASSERT (niov <= LNET_MAX_IOV);
+
+ conn->ksnc_cookie = msg;
+ conn->ksnc_rx_nob_wanted = mlen;
+ conn->ksnc_rx_nob_left = rlen;
+
+ if (mlen == 0 || iov != NULL) {
+ conn->ksnc_rx_nkiov = 0;
+ conn->ksnc_rx_kiov = NULL;
+ conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
+ conn->ksnc_rx_niov =
+ lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
+ niov, iov, offset, mlen);
+ } else {
+ conn->ksnc_rx_niov = 0;
+ conn->ksnc_rx_iov = NULL;
+ conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
+ conn->ksnc_rx_nkiov =
+ lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
+ niov, kiov, offset, mlen);
+ }
+
+ LASSERT (mlen ==
+ lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
+ lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
+
+ LASSERT (conn->ksnc_rx_scheduled);
+
+ spin_lock_bh(&sched->kss_lock);
+
+ switch (conn->ksnc_rx_state) {
+ case SOCKNAL_RX_PARSE_WAIT:
+ list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
+ wake_up (&sched->kss_waitq);
+ LASSERT (conn->ksnc_rx_ready);
+ break;
+
+ case SOCKNAL_RX_PARSE:
+ /* scheduler hasn't noticed I'm parsing yet */
+ break;
+ }
+
+ conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
+
+ spin_unlock_bh(&sched->kss_lock);
+ ksocknal_conn_decref(conn);
+ return 0;
+}
+
+static inline int
+ksocknal_sched_cansleep(ksock_sched_t *sched)
+{
+ int rc;
+
+ spin_lock_bh(&sched->kss_lock);
+
+ rc = (!ksocknal_data.ksnd_shuttingdown &&
+ list_empty(&sched->kss_rx_conns) &&
+ list_empty(&sched->kss_tx_conns));
+
+ spin_unlock_bh(&sched->kss_lock);
+ return rc;
+}
+
+int ksocknal_scheduler(void *arg)
+{
+ struct ksock_sched_info *info;
+ ksock_sched_t *sched;
+ ksock_conn_t *conn;
+ ksock_tx_t *tx;
+ int rc;
+ int nloops = 0;
+ long id = (long)arg;
+
+ info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
+ sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
+
+ cfs_block_allsigs();
+
+ rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
+ if (rc != 0) {
+ CERROR("Can't set CPT affinity to %d: %d\n",
+ info->ksi_cpt, rc);
+ }
+
+ spin_lock_bh(&sched->kss_lock);
+
+ while (!ksocknal_data.ksnd_shuttingdown) {
+ int did_something = 0;
+
+ /* Ensure I progress everything semi-fairly */
+
+ if (!list_empty (&sched->kss_rx_conns)) {
+ conn = list_entry(sched->kss_rx_conns.next,
+ ksock_conn_t, ksnc_rx_list);
+ list_del(&conn->ksnc_rx_list);
+
+ LASSERT(conn->ksnc_rx_scheduled);
+ LASSERT(conn->ksnc_rx_ready);
+
+ /* clear rx_ready in case receive isn't complete.
+ * Do it BEFORE we call process_recv, since
+ * data_ready can set it any time after we release
+ * kss_lock. */
+ conn->ksnc_rx_ready = 0;
+ spin_unlock_bh(&sched->kss_lock);
+
+ rc = ksocknal_process_receive(conn);
+
+ spin_lock_bh(&sched->kss_lock);
+
+ /* I'm the only one that can clear this flag */
+ LASSERT(conn->ksnc_rx_scheduled);
+
+ /* Did process_receive get everything it wanted? */
+ if (rc == 0)
+ conn->ksnc_rx_ready = 1;
+
+ if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
+ /* Conn blocked waiting for ksocknal_recv()
+ * I change its state (under lock) to signal
+ * it can be rescheduled */
+ conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
+ } else if (conn->ksnc_rx_ready) {
+ /* reschedule for rx */
+ list_add_tail (&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
+ } else {
+ conn->ksnc_rx_scheduled = 0;
+ /* drop my ref */
+ ksocknal_conn_decref(conn);
+ }
+
+ did_something = 1;
+ }
+
+ if (!list_empty (&sched->kss_tx_conns)) {
+ LIST_HEAD (zlist);
+
+ if (!list_empty(&sched->kss_zombie_noop_txs)) {
+ list_add(&zlist,
+ &sched->kss_zombie_noop_txs);
+ list_del_init(&sched->kss_zombie_noop_txs);
+ }
+
+ conn = list_entry(sched->kss_tx_conns.next,
+ ksock_conn_t, ksnc_tx_list);
+ list_del (&conn->ksnc_tx_list);
+
+ LASSERT(conn->ksnc_tx_scheduled);
+ LASSERT(conn->ksnc_tx_ready);
+ LASSERT(!list_empty(&conn->ksnc_tx_queue));
+
+ tx = list_entry(conn->ksnc_tx_queue.next,
+ ksock_tx_t, tx_list);
+
+ if (conn->ksnc_tx_carrier == tx)
+ ksocknal_next_tx_carrier(conn);
+
+ /* dequeue now so empty list => more to send */
+ list_del(&tx->tx_list);
+
+ /* Clear tx_ready in case send isn't complete. Do
+ * it BEFORE we call process_transmit, since
+ * write_space can set it any time after we release
+ * kss_lock. */
+ conn->ksnc_tx_ready = 0;
+ spin_unlock_bh(&sched->kss_lock);
+
+ if (!list_empty(&zlist)) {
+ /* free zombie noop txs, it's fast because
+ * noop txs are just put in freelist */
+ ksocknal_txlist_done(NULL, &zlist, 0);
+ }
+
+ rc = ksocknal_process_transmit(conn, tx);
+
+ if (rc == -ENOMEM || rc == -EAGAIN) {
+ /* Incomplete send: replace tx on HEAD of tx_queue */
+ spin_lock_bh(&sched->kss_lock);
+ list_add(&tx->tx_list,
+ &conn->ksnc_tx_queue);
+ } else {
+ /* Complete send; tx -ref */
+ ksocknal_tx_decref(tx);
+
+ spin_lock_bh(&sched->kss_lock);
+ /* assume space for more */
+ conn->ksnc_tx_ready = 1;
+ }
+
+ if (rc == -ENOMEM) {
+ /* Do nothing; after a short timeout, this
+ * conn will be reposted on kss_tx_conns. */
+ } else if (conn->ksnc_tx_ready &&
+ !list_empty (&conn->ksnc_tx_queue)) {
+ /* reschedule for tx */
+ list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ } else {
+ conn->ksnc_tx_scheduled = 0;
+ /* drop my ref */
+ ksocknal_conn_decref(conn);
+ }
+
+ did_something = 1;
+ }
+ if (!did_something || /* nothing to do */
+ ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
+ spin_unlock_bh(&sched->kss_lock);
+
+ nloops = 0;
+
+ if (!did_something) { /* wait for something to do */
+ cfs_wait_event_interruptible_exclusive(
+ sched->kss_waitq,
+ !ksocknal_sched_cansleep(sched), rc);
+ LASSERT (rc == 0);
+ } else {
+ cond_resched();
+ }
+
+ spin_lock_bh(&sched->kss_lock);
+ }
+ }
+
+ spin_unlock_bh(&sched->kss_lock);
+ ksocknal_thread_fini();
+ return 0;
+}
+
+/*
+ * Add connection to kss_rx_conns of scheduler
+ * and wakeup the scheduler.
+ */
+void ksocknal_read_callback (ksock_conn_t *conn)
+{
+ ksock_sched_t *sched;
+
+ sched = conn->ksnc_scheduler;
+
+ spin_lock_bh(&sched->kss_lock);
+
+ conn->ksnc_rx_ready = 1;
+
+ if (!conn->ksnc_rx_scheduled) { /* not being progressed */
+ list_add_tail(&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
+ conn->ksnc_rx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
+
+ wake_up (&sched->kss_waitq);
+ }
+ spin_unlock_bh(&sched->kss_lock);
+}
+
+/*
+ * Add connection to kss_tx_conns of scheduler
+ * and wakeup the scheduler.
+ */
+void ksocknal_write_callback (ksock_conn_t *conn)
+{
+ ksock_sched_t *sched;
+
+ sched = conn->ksnc_scheduler;
+
+ spin_lock_bh(&sched->kss_lock);
+
+ conn->ksnc_tx_ready = 1;
+
+ if (!conn->ksnc_tx_scheduled && // not being progressed
+ !list_empty(&conn->ksnc_tx_queue)){//packets to send
+ list_add_tail (&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ conn->ksnc_tx_scheduled = 1;
+ /* extra ref for scheduler */
+ ksocknal_conn_addref(conn);
+
+ wake_up (&sched->kss_waitq);
+ }
+
+ spin_unlock_bh(&sched->kss_lock);
+}
+
+ksock_proto_t *
+ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
+{
+ __u32 version = 0;
+
+ if (hello->kshm_magic == LNET_PROTO_MAGIC)
+ version = hello->kshm_version;
+ else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
+ version = __swab32(hello->kshm_version);
+
+ if (version != 0) {
+#if SOCKNAL_VERSION_DEBUG
+ if (*ksocknal_tunables.ksnd_protocol == 1)
+ return NULL;
+
+ if (*ksocknal_tunables.ksnd_protocol == 2 &&
+ version == KSOCK_PROTO_V3)
+ return NULL;
+#endif
+ if (version == KSOCK_PROTO_V2)
+ return &ksocknal_protocol_v2x;
+
+ if (version == KSOCK_PROTO_V3)
+ return &ksocknal_protocol_v3x;
+
+ return NULL;
+ }
+
+ if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
+ lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
+
+ CLASSERT (sizeof (lnet_magicversion_t) ==
+ offsetof (ksock_hello_msg_t, kshm_src_nid));
+
+ if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
+ hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
+ return &ksocknal_protocol_v1x;
+ }
+
+ return NULL;
+}
+
+int
+ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
+ lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
+{
+ /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
+ ksock_net_t *net = (ksock_net_t *)ni->ni_data;
+
+ LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES);
+
+ /* rely on caller to hold a ref on socket so it wouldn't disappear */
+ LASSERT (conn->ksnc_proto != NULL);
+
+ hello->kshm_src_nid = ni->ni_nid;
+ hello->kshm_dst_nid = peer_nid;
+ hello->kshm_src_pid = the_lnet.ln_pid;
+
+ hello->kshm_src_incarnation = net->ksnn_incarnation;
+ hello->kshm_ctype = conn->ksnc_type;
+
+ return conn->ksnc_proto->pro_send_hello(conn, hello);
+}
+
+int
+ksocknal_invert_type(int type)
+{
+ switch (type)
+ {
+ case SOCKLND_CONN_ANY:
+ case SOCKLND_CONN_CONTROL:
+ return (type);
+ case SOCKLND_CONN_BULK_IN:
+ return SOCKLND_CONN_BULK_OUT;
+ case SOCKLND_CONN_BULK_OUT:
+ return SOCKLND_CONN_BULK_IN;
+ default:
+ return (SOCKLND_CONN_NONE);
+ }
+}
+
+int
+ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
+ ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
+ __u64 *incarnation)
+{
+ /* Return < 0 fatal error
+ * 0 success
+ * EALREADY lost connection race
+ * EPROTO protocol version mismatch
+ */
+ socket_t *sock = conn->ksnc_sock;
+ int active = (conn->ksnc_proto != NULL);
+ int timeout;
+ int proto_match;
+ int rc;
+ ksock_proto_t *proto;
+ lnet_process_id_t recv_id;
+
+ /* socket type set on active connections - not set on passive */
+ LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
+
+ timeout = active ? *ksocknal_tunables.ksnd_timeout :
+ lnet_acceptor_timeout();
+
+ rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading HELLO from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT (rc < 0);
+ return rc;
+ }
+
+ if (hello->kshm_magic != LNET_PROTO_MAGIC &&
+ hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
+ hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
+ /* Unexpected magic! */
+ CERROR("Bad magic(1) %#08x (%#08x expected) from "
+ "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
+ LNET_PROTO_TCP_MAGIC,
+ &conn->ksnc_ipaddr);
+ return -EPROTO;
+ }
+
+ rc = libcfs_sock_read(sock, &hello->kshm_version,
+ sizeof(hello->kshm_version), timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading HELLO from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT (rc < 0);
+ return rc;
+ }
+
+ proto = ksocknal_parse_proto_version(hello);
+ if (proto == NULL) {
+ if (!active) {
+ /* unknown protocol from peer, tell peer my protocol */
+ conn->ksnc_proto = &ksocknal_protocol_v3x;
+#if SOCKNAL_VERSION_DEBUG
+ if (*ksocknal_tunables.ksnd_protocol == 2)
+ conn->ksnc_proto = &ksocknal_protocol_v2x;
+ else if (*ksocknal_tunables.ksnd_protocol == 1)
+ conn->ksnc_proto = &ksocknal_protocol_v1x;
+#endif
+ hello->kshm_nips = 0;
+ ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
+ }
+
+ CERROR("Unknown protocol version (%d.x expected)"
+ " from %pI4h\n",
+ conn->ksnc_proto->pro_version,
+ &conn->ksnc_ipaddr);
+
+ return -EPROTO;
+ }
+
+ proto_match = (conn->ksnc_proto == proto);
+ conn->ksnc_proto = proto;
+
+ /* receive the rest of hello message anyway */
+ rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading or checking hello from from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT (rc < 0);
+ return rc;
+ }
+
+ *incarnation = hello->kshm_src_incarnation;
+
+ if (hello->kshm_src_nid == LNET_NID_ANY) {
+ CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
+ "from %pI4h\n", &conn->ksnc_ipaddr);
+ return -EPROTO;
+ }
+
+ if (!active &&
+ conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
+ /* Userspace NAL assigns peer process ID from socket */
+ recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
+ recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
+ } else {
+ recv_id.nid = hello->kshm_src_nid;
+ recv_id.pid = hello->kshm_src_pid;
+ }
+
+ if (!active) {
+ *peerid = recv_id;
+
+ /* peer determines type */
+ conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
+ if (conn->ksnc_type == SOCKLND_CONN_NONE) {
+ CERROR("Unexpected type %d from %s ip %pI4h\n",
+ hello->kshm_ctype, libcfs_id2str(*peerid),
+ &conn->ksnc_ipaddr);
+ return -EPROTO;
+ }
+
+ return 0;
+ }
+
+ if (peerid->pid != recv_id.pid ||
+ peerid->nid != recv_id.nid) {
+ LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
+ " %pI4h, but they claimed they were "
+ "%s; please check your Lustre "
+ "configuration.\n",
+ libcfs_id2str(*peerid),
+ &conn->ksnc_ipaddr,
+ libcfs_id2str(recv_id));
+ return -EPROTO;
+ }
+
+ if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
+ /* Possible protocol mismatch or I lost the connection race */
+ return proto_match ? EALREADY : EPROTO;
+ }
+
+ if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
+ CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
+ conn->ksnc_type, libcfs_id2str(*peerid),
+ &conn->ksnc_ipaddr,
+ hello->kshm_ctype);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+int
+ksocknal_connect (ksock_route_t *route)
+{
+ LIST_HEAD (zombies);
+ ksock_peer_t *peer = route->ksnr_peer;
+ int type;
+ int wanted;
+ socket_t *sock;
+ cfs_time_t deadline;
+ int retry_later = 0;
+ int rc = 0;
+
+ deadline = cfs_time_add(cfs_time_current(),
+ cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ LASSERT (route->ksnr_scheduled);
+ LASSERT (!route->ksnr_connecting);
+
+ route->ksnr_connecting = 1;
+
+ for (;;) {
+ wanted = ksocknal_route_mask() & ~route->ksnr_connected;
+
+ /* stop connecting if peer/route got closed under me, or
+ * route got connected while queued */
+ if (peer->ksnp_closing || route->ksnr_deleted ||
+ wanted == 0) {
+ retry_later = 0;
+ break;
+ }
+
+ /* reschedule if peer is connecting to me */
+ if (peer->ksnp_accepting > 0) {
+ CDEBUG(D_NET,
+ "peer %s(%d) already connecting to me, retry later.\n",
+ libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
+ retry_later = 1;
+ }
+
+ if (retry_later) /* needs reschedule */
+ break;
+
+ if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
+ type = SOCKLND_CONN_ANY;
+ } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
+ type = SOCKLND_CONN_CONTROL;
+ } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
+ type = SOCKLND_CONN_BULK_IN;
+ } else {
+ LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+ type = SOCKLND_CONN_BULK_OUT;
+ }
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ if (cfs_time_aftereq(cfs_time_current(), deadline)) {
+ rc = -ETIMEDOUT;
+ lnet_connect_console_error(rc, peer->ksnp_id.nid,
+ route->ksnr_ipaddr,
+ route->ksnr_port);
+ goto failed;
+ }
+
+ rc = lnet_connect(&sock, peer->ksnp_id.nid,
+ route->ksnr_myipaddr,
+ route->ksnr_ipaddr, route->ksnr_port);
+ if (rc != 0)
+ goto failed;
+
+ rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
+ if (rc < 0) {
+ lnet_connect_console_error(rc, peer->ksnp_id.nid,
+ route->ksnr_ipaddr,
+ route->ksnr_port);
+ goto failed;
+ }
+
+ /* A +ve RC means I have to retry because I lost the connection
+ * race or I have to renegotiate protocol version */
+ retry_later = (rc != 0);
+ if (retry_later)
+ CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
+ libcfs_nid2str(peer->ksnp_id.nid));
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+ }
+
+ route->ksnr_scheduled = 0;
+ route->ksnr_connecting = 0;
+
+ if (retry_later) {
+ /* re-queue for attention; this frees me up to handle
+ * the peer's incoming connection request */
+
+ if (rc == EALREADY ||
+ (rc == 0 && peer->ksnp_accepting > 0)) {
+ /* We want to introduce a delay before next
+ * attempt to connect if we lost conn race,
+ * but the race is resolved quickly usually,
+ * so min_reconnectms should be good heuristic */
+ route->ksnr_retry_interval =
+ cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
+ route->ksnr_timeout = cfs_time_add(cfs_time_current(),
+ route->ksnr_retry_interval);
+ }
+
+ ksocknal_launch_connection_locked(route);
+ }
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+ return retry_later;
+
+ failed:
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ route->ksnr_scheduled = 0;
+ route->ksnr_connecting = 0;
+
+ /* This is a retry rather than a new connection */
+ route->ksnr_retry_interval *= 2;
+ route->ksnr_retry_interval =
+ MAX(route->ksnr_retry_interval,
+ cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
+ route->ksnr_retry_interval =
+ MIN(route->ksnr_retry_interval,
+ cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
+
+ LASSERT (route->ksnr_retry_interval != 0);
+ route->ksnr_timeout = cfs_time_add(cfs_time_current(),
+ route->ksnr_retry_interval);
+
+ if (!list_empty(&peer->ksnp_tx_queue) &&
+ peer->ksnp_accepting == 0 &&
+ ksocknal_find_connecting_route_locked(peer) == NULL) {
+ ksock_conn_t *conn;
+
+ /* ksnp_tx_queue is queued on a conn on successful
+ * connection for V1.x and V2.x */
+ if (!list_empty (&peer->ksnp_conns)) {
+ conn = list_entry(peer->ksnp_conns.next,
+ ksock_conn_t, ksnc_list);
+ LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
+ }
+
+ /* take all the blocked packets while I've got the lock and
+ * complete below... */
+ list_splice_init(&peer->ksnp_tx_queue, &zombies);
+ }
+
+#if 0 /* irrelevant with only eager routes */
+ if (!route->ksnr_deleted) {
+ /* make this route least-favourite for re-selection */
+ list_del(&route->ksnr_list);
+ list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
+ }
+#endif
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ ksocknal_peer_failed(peer);
+ ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
+ return 0;
+}
+
+/*
+ * check whether we need to create more connds.
+ * It will try to create new thread if it's necessary, @timeout can
+ * be updated if failed to create, so caller wouldn't keep try while
+ * running out of resource.
+ */
+static int
+ksocknal_connd_check_start(long sec, long *timeout)
+{
+ char name[16];
+ int rc;
+ int total = ksocknal_data.ksnd_connd_starting +
+ ksocknal_data.ksnd_connd_running;
+
+ if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
+ /* still in initializing */
+ return 0;
+ }
+
+ if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
+ total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
+ /* can't create more connd, or still have enough
+ * threads to handle more connecting */
+ return 0;
+ }
+
+ if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
+ /* no pending connecting request */
+ return 0;
+ }
+
+ if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
+ /* may run out of resource, retry later */
+ *timeout = cfs_time_seconds(1);
+ return 0;
+ }
+
+ if (ksocknal_data.ksnd_connd_starting > 0) {
+ /* serialize starting to avoid flood */
+ return 0;
+ }
+
+ ksocknal_data.ksnd_connd_starting_stamp = sec;
+ ksocknal_data.ksnd_connd_starting++;
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+
+ /* NB: total is the next id */
+ snprintf(name, sizeof(name), "socknal_cd%02d", total);
+ rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
+
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+ if (rc == 0)
+ return 1;
+
+ /* we tried ... */
+ LASSERT(ksocknal_data.ksnd_connd_starting > 0);
+ ksocknal_data.ksnd_connd_starting--;
+ ksocknal_data.ksnd_connd_failed_stamp = cfs_time_current_sec();
+
+ return 1;
+}
+
+/*
+ * check whether current thread can exit, it will return 1 if there are too
+ * many threads and no creating in past 120 seconds.
+ * Also, this function may update @timeout to make caller come back
+ * again to recheck these conditions.
+ */
+static int
+ksocknal_connd_check_stop(long sec, long *timeout)
+{
+ int val;
+
+ if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
+ /* still in initializing */
+ return 0;
+ }
+
+ if (ksocknal_data.ksnd_connd_starting > 0) {
+ /* in progress of starting new thread */
+ return 0;
+ }
+
+ if (ksocknal_data.ksnd_connd_running <=
+ *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
+ return 0;
+ }
+
+ /* created thread in past 120 seconds? */
+ val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
+ SOCKNAL_CONND_TIMEOUT - sec);
+
+ *timeout = (val > 0) ? cfs_time_seconds(val) :
+ cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
+ if (val > 0)
+ return 0;
+
+ /* no creating in past 120 seconds */
+
+ return ksocknal_data.ksnd_connd_running >
+ ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
+}
+
+/* Go through connd_routes queue looking for a route that we can process
+ * right now, @timeout_p can be updated if we need to come back later */
+static ksock_route_t *
+ksocknal_connd_get_route_locked(signed long *timeout_p)
+{
+ ksock_route_t *route;
+ cfs_time_t now;
+
+ now = cfs_time_current();
+
+ /* connd_routes can contain both pending and ordinary routes */
+ list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
+ ksnr_connd_list) {
+
+ if (route->ksnr_retry_interval == 0 ||
+ cfs_time_aftereq(now, route->ksnr_timeout))
+ return route;
+
+ if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
+ (int)*timeout_p > (int)(route->ksnr_timeout - now))
+ *timeout_p = (int)(route->ksnr_timeout - now);
+ }
+
+ return NULL;
+}
+
+int
+ksocknal_connd (void *arg)
+{
+ spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
+ ksock_connreq_t *cr;
+ wait_queue_t wait;
+ int nloops = 0;
+ int cons_retry = 0;
+
+ cfs_block_allsigs ();
+
+ init_waitqueue_entry_current (&wait);
+
+ spin_lock_bh(connd_lock);
+
+ LASSERT(ksocknal_data.ksnd_connd_starting > 0);
+ ksocknal_data.ksnd_connd_starting--;
+ ksocknal_data.ksnd_connd_running++;
+
+ while (!ksocknal_data.ksnd_shuttingdown) {
+ ksock_route_t *route = NULL;
+ long sec = cfs_time_current_sec();
+ long timeout = MAX_SCHEDULE_TIMEOUT;
+ int dropped_lock = 0;
+
+ if (ksocknal_connd_check_stop(sec, &timeout)) {
+ /* wakeup another one to check stop */
+ wake_up(&ksocknal_data.ksnd_connd_waitq);
+ break;
+ }
+
+ if (ksocknal_connd_check_start(sec, &timeout)) {
+ /* created new thread */
+ dropped_lock = 1;
+ }
+
+ if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
+ /* Connection accepted by the listener */
+ cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
+ next, ksock_connreq_t, ksncr_list);
+
+ list_del(&cr->ksncr_list);
+ spin_unlock_bh(connd_lock);
+ dropped_lock = 1;
+
+ ksocknal_create_conn(cr->ksncr_ni, NULL,
+ cr->ksncr_sock, SOCKLND_CONN_NONE);
+ lnet_ni_decref(cr->ksncr_ni);
+ LIBCFS_FREE(cr, sizeof(*cr));
+
+ spin_lock_bh(connd_lock);
+ }
+
+ /* Only handle an outgoing connection request if there
+ * is a thread left to handle incoming connections and
+ * create new connd */
+ if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
+ ksocknal_data.ksnd_connd_running) {
+ route = ksocknal_connd_get_route_locked(&timeout);
+ }
+ if (route != NULL) {
+ list_del (&route->ksnr_connd_list);
+ ksocknal_data.ksnd_connd_connecting++;
+ spin_unlock_bh(connd_lock);
+ dropped_lock = 1;
+
+ if (ksocknal_connect(route)) {
+ /* consecutive retry */
+ if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
+ CWARN("massive consecutive "
+ "re-connecting to %pI4h\n",
+ &route->ksnr_ipaddr);
+ cons_retry = 0;
+ }
+ } else {
+ cons_retry = 0;
+ }
+
+ ksocknal_route_decref(route);
+
+ spin_lock_bh(connd_lock);
+ ksocknal_data.ksnd_connd_connecting--;
+ }
+
+ if (dropped_lock) {
+ if (++nloops < SOCKNAL_RESCHED)
+ continue;
+ spin_unlock_bh(connd_lock);
+ nloops = 0;
+ cond_resched();
+ spin_lock_bh(connd_lock);
+ continue;
+ }
+
+ /* Nothing to do for 'timeout' */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ spin_unlock_bh(connd_lock);
+
+ nloops = 0;
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
+ spin_lock_bh(connd_lock);
+ }
+ ksocknal_data.ksnd_connd_running--;
+ spin_unlock_bh(connd_lock);
+
+ ksocknal_thread_fini();
+ return 0;
+}
+
+ksock_conn_t *
+ksocknal_find_timed_out_conn (ksock_peer_t *peer)
+{
+ /* We're called with a shared lock on ksnd_global_lock */
+ ksock_conn_t *conn;
+ struct list_head *ctmp;
+
+ list_for_each (ctmp, &peer->ksnp_conns) {
+ int error;
+ conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+
+ /* Don't need the {get,put}connsock dance to deref ksnc_sock */
+ LASSERT (!conn->ksnc_closing);
+
+ /* SOCK_ERROR will reset error code of socket in
+ * some platform (like Darwin8.x) */
+ error = cfs_sock_error(conn->ksnc_sock);
+ if (error != 0) {
+ ksocknal_conn_addref(conn);
+
+ switch (error) {
+ case ECONNRESET:
+ CNETERR("A connection with %s "
+ "(%pI4h:%d) was reset; "
+ "it may have rebooted.\n",
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
+ break;
+ case ETIMEDOUT:
+ CNETERR("A connection with %s "
+ "(%pI4h:%d) timed out; the "
+ "network or node may be down.\n",
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
+ break;
+ default:
+ CNETERR("An unexpected network error %d "
+ "occurred with %s "
+ "(%pI4h:%d\n", error,
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
+ break;
+ }
+
+ return (conn);
+ }
+
+ if (conn->ksnc_rx_started &&
+ cfs_time_aftereq(cfs_time_current(),
+ conn->ksnc_rx_deadline)) {
+ /* Timed out incomplete incoming message */
+ ksocknal_conn_addref(conn);
+ CNETERR("Timeout receiving from %s (%pI4h:%d), "
+ "state %d wanted %d left %d\n",
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port,
+ conn->ksnc_rx_state,
+ conn->ksnc_rx_nob_wanted,
+ conn->ksnc_rx_nob_left);
+ return (conn);
+ }
+
+ if ((!list_empty(&conn->ksnc_tx_queue) ||
+ cfs_sock_wmem_queued(conn->ksnc_sock) != 0) &&
+ cfs_time_aftereq(cfs_time_current(),
+ conn->ksnc_tx_deadline)) {
+ /* Timed out messages queued for sending or
+ * buffered in the socket's send buffer */
+ ksocknal_conn_addref(conn);
+ CNETERR("Timeout sending data to %s (%pI4h:%d) "
+ "the network or that node may be down.\n",
+ libcfs_id2str(peer->ksnp_id),
+ &conn->ksnc_ipaddr,
+ conn->ksnc_port);
+ return (conn);
+ }
+ }
+
+ return (NULL);
+}
+
+static inline void
+ksocknal_flush_stale_txs(ksock_peer_t *peer)
+{
+ ksock_tx_t *tx;
+ LIST_HEAD (stale_txs);
+
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
+
+ while (!list_empty (&peer->ksnp_tx_queue)) {
+ tx = list_entry (peer->ksnp_tx_queue.next,
+ ksock_tx_t, tx_list);
+
+ if (!cfs_time_aftereq(cfs_time_current(),
+ tx->tx_deadline))
+ break;
+
+ list_del (&tx->tx_list);
+ list_add_tail (&tx->tx_list, &stale_txs);
+ }
+
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
+
+ ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
+}
+
+int
+ksocknal_send_keepalive_locked(ksock_peer_t *peer)
+{
+ ksock_sched_t *sched;
+ ksock_conn_t *conn;
+ ksock_tx_t *tx;
+
+ if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+ return 0;
+
+ if (peer->ksnp_proto != &ksocknal_protocol_v3x)
+ return 0;
+
+ if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
+ cfs_time_before(cfs_time_current(),
+ cfs_time_add(peer->ksnp_last_alive,
+ cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
+ return 0;
+
+ if (cfs_time_before(cfs_time_current(),
+ peer->ksnp_send_keepalive))
+ return 0;
+
+ /* retry 10 secs later, so we wouldn't put pressure
+ * on this peer if we failed to send keepalive this time */
+ peer->ksnp_send_keepalive = cfs_time_shift(10);
+
+ conn = ksocknal_find_conn_locked(peer, NULL, 1);
+ if (conn != NULL) {
+ sched = conn->ksnc_scheduler;
+
+ spin_lock_bh(&sched->kss_lock);
+ if (!list_empty(&conn->ksnc_tx_queue)) {
+ spin_unlock_bh(&sched->kss_lock);
+ /* there is an queued ACK, don't need keepalive */
+ return 0;
+ }
+
+ spin_unlock_bh(&sched->kss_lock);
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ /* cookie = 1 is reserved for keepalive PING */
+ tx = ksocknal_alloc_tx_noop(1, 1);
+ if (tx == NULL) {
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ return -ENOMEM;
+ }
+
+ if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ return 1;
+ }
+
+ ksocknal_free_tx(tx);
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ return -EIO;
+}
+
+
+void
+ksocknal_check_peer_timeouts (int idx)
+{
+ struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
+ ksock_peer_t *peer;
+ ksock_conn_t *conn;
+ ksock_tx_t *tx;
+
+ again:
+ /* NB. We expect to have a look at all the peers and not find any
+ * connections to time out, so we just use a shared lock while we
+ * take a look... */
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ list_for_each_entry(peer, peers, ksnp_list) {
+ cfs_time_t deadline = 0;
+ int resid = 0;
+ int n = 0;
+
+ if (ksocknal_send_keepalive_locked(peer) != 0) {
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ goto again;
+ }
+
+ conn = ksocknal_find_timed_out_conn (peer);
+
+ if (conn != NULL) {
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+
+ /* NB we won't find this one again, but we can't
+ * just proceed with the next peer, since we dropped
+ * ksnd_global_lock and it might be dead already! */
+ ksocknal_conn_decref(conn);
+ goto again;
+ }
+
+ /* we can't process stale txs right here because we're
+ * holding only shared lock */
+ if (!list_empty (&peer->ksnp_tx_queue)) {
+ ksock_tx_t *tx =
+ list_entry (peer->ksnp_tx_queue.next,
+ ksock_tx_t, tx_list);
+
+ if (cfs_time_aftereq(cfs_time_current(),
+ tx->tx_deadline)) {
+
+ ksocknal_peer_addref(peer);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ ksocknal_flush_stale_txs(peer);
+
+ ksocknal_peer_decref(peer);
+ goto again;
+ }
+ }
+
+ if (list_empty(&peer->ksnp_zc_req_list))
+ continue;
+
+ spin_lock(&peer->ksnp_lock);
+ list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
+ if (!cfs_time_aftereq(cfs_time_current(),
+ tx->tx_deadline))
+ break;
+ /* ignore the TX if connection is being closed */
+ if (tx->tx_conn->ksnc_closing)
+ continue;
+ n++;
+ }
+
+ if (n == 0) {
+ spin_unlock(&peer->ksnp_lock);
+ continue;
+ }
+
+ tx = list_entry(peer->ksnp_zc_req_list.next,
+ ksock_tx_t, tx_zc_list);
+ deadline = tx->tx_deadline;
+ resid = tx->tx_resid;
+ conn = tx->tx_conn;
+ ksocknal_conn_addref(conn);
+
+ spin_unlock(&peer->ksnp_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ CERROR("Total %d stale ZC_REQs for peer %s detected; the "
+ "oldest(%p) timed out %ld secs ago, "
+ "resid: %d, wmem: %d\n",
+ n, libcfs_nid2str(peer->ksnp_id.nid), tx,
+ cfs_duration_sec(cfs_time_current() - deadline),
+ resid, cfs_sock_wmem_queued(conn->ksnc_sock));
+
+ ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+ ksocknal_conn_decref(conn);
+ goto again;
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+}
+
+int
+ksocknal_reaper (void *arg)
+{
+ wait_queue_t wait;
+ ksock_conn_t *conn;
+ ksock_sched_t *sched;
+ struct list_head enomem_conns;
+ int nenomem_conns;
+ cfs_duration_t timeout;
+ int i;
+ int peer_index = 0;
+ cfs_time_t deadline = cfs_time_current();
+
+ cfs_block_allsigs ();
+
+ INIT_LIST_HEAD(&enomem_conns);
+ init_waitqueue_entry_current (&wait);
+
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ while (!ksocknal_data.ksnd_shuttingdown) {
+
+ if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
+ conn = list_entry (ksocknal_data. \
+ ksnd_deathrow_conns.next,
+ ksock_conn_t, ksnc_list);
+ list_del (&conn->ksnc_list);
+
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ ksocknal_terminate_conn(conn);
+ ksocknal_conn_decref(conn);
+
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+ continue;
+ }
+
+ if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
+ conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
+ next, ksock_conn_t, ksnc_list);
+ list_del (&conn->ksnc_list);
+
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ ksocknal_destroy_conn(conn);
+
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+ continue;
+ }
+
+ if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+ list_add(&enomem_conns,
+ &ksocknal_data.ksnd_enomem_conns);
+ list_del_init(&ksocknal_data.ksnd_enomem_conns);
+ }
+
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ /* reschedule all the connections that stalled with ENOMEM... */
+ nenomem_conns = 0;
+ while (!list_empty (&enomem_conns)) {
+ conn = list_entry (enomem_conns.next,
+ ksock_conn_t, ksnc_tx_list);
+ list_del (&conn->ksnc_tx_list);
+
+ sched = conn->ksnc_scheduler;
+
+ spin_lock_bh(&sched->kss_lock);
+
+ LASSERT(conn->ksnc_tx_scheduled);
+ conn->ksnc_tx_ready = 1;
+ list_add_tail(&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ wake_up(&sched->kss_waitq);
+
+ spin_unlock_bh(&sched->kss_lock);
+ nenomem_conns++;
+ }
+
+ /* careful with the jiffy wrap... */
+ while ((timeout = cfs_time_sub(deadline,
+ cfs_time_current())) <= 0) {
+ const int n = 4;
+ const int p = 1;
+ int chunk = ksocknal_data.ksnd_peer_hash_size;
+
+ /* Time to check for timeouts on a few more peers: I do
+ * checks every 'p' seconds on a proportion of the peer
+ * table and I need to check every connection 'n' times
+ * within a timeout interval, to ensure I detect a
+ * timeout on any connection within (n+1)/n times the
+ * timeout interval. */
+
+ if (*ksocknal_tunables.ksnd_timeout > n * p)
+ chunk = (chunk * n * p) /
+ *ksocknal_tunables.ksnd_timeout;
+ if (chunk == 0)
+ chunk = 1;
+
+ for (i = 0; i < chunk; i++) {
+ ksocknal_check_peer_timeouts (peer_index);
+ peer_index = (peer_index + 1) %
+ ksocknal_data.ksnd_peer_hash_size;
+ }
+
+ deadline = cfs_time_add(deadline, cfs_time_seconds(p));
+ }
+
+ if (nenomem_conns != 0) {
+ /* Reduce my timeout if I rescheduled ENOMEM conns.
+ * This also prevents me getting woken immediately
+ * if any go back on my enomem list. */
+ timeout = SOCKNAL_ENOMEM_RETRY;
+ }
+ ksocknal_data.ksnd_reaper_waketime =
+ cfs_time_add(cfs_time_current(), timeout);
+
+ set_current_state (TASK_INTERRUPTIBLE);
+ add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+
+ if (!ksocknal_data.ksnd_shuttingdown &&
+ list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
+ list_empty (&ksocknal_data.ksnd_zombie_conns))
+ waitq_timedwait (&wait, TASK_INTERRUPTIBLE,
+ timeout);
+
+ set_current_state (TASK_RUNNING);
+ remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+ }
+
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
+
+ ksocknal_thread_fini();
+ return 0;
+}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
new file mode 100644
index 0000000..a1c6a51
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -0,0 +1,1085 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#include "socklnd.h"
+
+# if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
+
+
+enum {
+ SOCKLND_TIMEOUT = 1,
+ SOCKLND_CREDITS,
+ SOCKLND_PEER_TXCREDITS,
+ SOCKLND_PEER_RTRCREDITS,
+ SOCKLND_PEER_TIMEOUT,
+ SOCKLND_NCONNDS,
+ SOCKLND_RECONNECTS_MIN,
+ SOCKLND_RECONNECTS_MAX,
+ SOCKLND_EAGER_ACK,
+ SOCKLND_ZERO_COPY,
+ SOCKLND_TYPED,
+ SOCKLND_BULK_MIN,
+ SOCKLND_RX_BUFFER_SIZE,
+ SOCKLND_TX_BUFFER_SIZE,
+ SOCKLND_NAGLE,
+ SOCKLND_IRQ_AFFINITY,
+ SOCKLND_ROUND_ROBIN,
+ SOCKLND_KEEPALIVE,
+ SOCKLND_KEEPALIVE_IDLE,
+ SOCKLND_KEEPALIVE_COUNT,
+ SOCKLND_KEEPALIVE_INTVL,
+ SOCKLND_BACKOFF_INIT,
+ SOCKLND_BACKOFF_MAX,
+ SOCKLND_PROTOCOL,
+ SOCKLND_ZERO_COPY_RECV,
+ SOCKLND_ZERO_COPY_RECV_MIN_NFRAGS
+};
+
+static ctl_table_t ksocknal_ctl_table[] = {
+ {
+ .ctl_name = SOCKLND_TIMEOUT,
+ .procname = "timeout",
+ .data = &ksocknal_tunables.ksnd_timeout,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_CREDITS,
+ .procname = "credits",
+ .data = &ksocknal_tunables.ksnd_credits,
+ .maxlen = sizeof (int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_PEER_TXCREDITS,
+ .procname = "peer_credits",
+ .data = &ksocknal_tunables.ksnd_peertxcredits,
+ .maxlen = sizeof (int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_PEER_RTRCREDITS,
+ .procname = "peer_buffer_credits",
+ .data = &ksocknal_tunables.ksnd_peerrtrcredits,
+ .maxlen = sizeof (int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_PEER_TIMEOUT,
+ .procname = "peer_timeout",
+ .data = &ksocknal_tunables.ksnd_peertimeout,
+ .maxlen = sizeof (int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_NCONNDS,
+ .procname = "nconnds",
+ .data = &ksocknal_tunables.ksnd_nconnds,
+ .maxlen = sizeof (int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_RECONNECTS_MIN,
+ .procname = "min_reconnectms",
+ .data = &ksocknal_tunables.ksnd_min_reconnectms,
+ .maxlen = sizeof (int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_RECONNECTS_MAX,
+ .procname = "max_reconnectms",
+ .data = &ksocknal_tunables.ksnd_max_reconnectms,
+ .maxlen = sizeof (int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_EAGER_ACK,
+ .procname = "eager_ack",
+ .data = &ksocknal_tunables.ksnd_eager_ack,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_ZERO_COPY,
+ .procname = "zero_copy",
+ .data = &ksocknal_tunables.ksnd_zc_min_payload,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_ZERO_COPY_RECV,
+ .procname = "zero_copy_recv",
+ .data = &ksocknal_tunables.ksnd_zc_recv,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+
+ {
+ .ctl_name = SOCKLND_ZERO_COPY_RECV_MIN_NFRAGS,
+ .procname = "zero_copy_recv",
+ .data = &ksocknal_tunables.ksnd_zc_recv_min_nfrags,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_TYPED,
+ .procname = "typed",
+ .data = &ksocknal_tunables.ksnd_typed_conns,
+ .maxlen = sizeof (int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_BULK_MIN,
+ .procname = "min_bulk",
+ .data = &ksocknal_tunables.ksnd_min_bulk,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_RX_BUFFER_SIZE,
+ .procname = "rx_buffer_size",
+ .data = &ksocknal_tunables.ksnd_rx_buffer_size,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_TX_BUFFER_SIZE,
+ .procname = "tx_buffer_size",
+ .data = &ksocknal_tunables.ksnd_tx_buffer_size,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_NAGLE,
+ .procname = "nagle",
+ .data = &ksocknal_tunables.ksnd_nagle,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_ROUND_ROBIN,
+ .procname = "round_robin",
+ .data = &ksocknal_tunables.ksnd_round_robin,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_KEEPALIVE,
+ .procname = "keepalive",
+ .data = &ksocknal_tunables.ksnd_keepalive,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_KEEPALIVE_IDLE,
+ .procname = "keepalive_idle",
+ .data = &ksocknal_tunables.ksnd_keepalive_idle,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_KEEPALIVE_COUNT,
+ .procname = "keepalive_count",
+ .data = &ksocknal_tunables.ksnd_keepalive_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+ {
+ .ctl_name = SOCKLND_KEEPALIVE_INTVL,
+ .procname = "keepalive_intvl",
+ .data = &ksocknal_tunables.ksnd_keepalive_intvl,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+#if SOCKNAL_VERSION_DEBUG
+ {
+ .ctl_name = SOCKLND_PROTOCOL,
+ .procname = "protocol",
+ .data = &ksocknal_tunables.ksnd_protocol,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ },
+#endif
+ {0}
+};
+
+
+ctl_table_t ksocknal_top_ctl_table[] = {
+ {
+ .ctl_name = CTL_SOCKLND,
+ .procname = "socknal",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = ksocknal_ctl_table
+ },
+ { 0 }
+};
+
+int
+ksocknal_lib_tunables_init ()
+{
+ if (!*ksocknal_tunables.ksnd_typed_conns) {
+ int rc = -EINVAL;
+#if SOCKNAL_VERSION_DEBUG
+ if (*ksocknal_tunables.ksnd_protocol < 3)
+ rc = 0;
+#endif
+ if (rc != 0) {
+ CERROR("Protocol V3.x MUST have typed connections\n");
+ return rc;
+ }
+ }
+
+ if (*ksocknal_tunables.ksnd_zc_recv_min_nfrags < 2)
+ *ksocknal_tunables.ksnd_zc_recv_min_nfrags = 2;
+ if (*ksocknal_tunables.ksnd_zc_recv_min_nfrags > LNET_MAX_IOV)
+ *ksocknal_tunables.ksnd_zc_recv_min_nfrags = LNET_MAX_IOV;
+
+ ksocknal_tunables.ksnd_sysctl =
+ register_sysctl_table(ksocknal_top_ctl_table);
+
+ if (ksocknal_tunables.ksnd_sysctl == NULL)
+ CWARN("Can't setup /proc tunables\n");
+
+ return 0;
+}
+
+void
+ksocknal_lib_tunables_fini(void)
+{
+ if (ksocknal_tunables.ksnd_sysctl != NULL)
+ unregister_sysctl_table(ksocknal_tunables.ksnd_sysctl);
+}
+#else
+int
+ksocknal_lib_tunables_init(void)
+{
+ return 0;
+}
+
+void
+ksocknal_lib_tunables_fini(void)
+{
+}
+#endif /* # if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM */
+
+int
+ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
+{
+ int rc = libcfs_sock_getaddr(conn->ksnc_sock, 1,
+ &conn->ksnc_ipaddr,
+ &conn->ksnc_port);
+
+ /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
+ LASSERT (!conn->ksnc_closing);
+
+ if (rc != 0) {
+ CERROR ("Error %d getting sock peer IP\n", rc);
+ return rc;
+ }
+
+ rc = libcfs_sock_getaddr(conn->ksnc_sock, 0,
+ &conn->ksnc_myipaddr, NULL);
+ if (rc != 0) {
+ CERROR ("Error %d getting sock local IP\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int
+ksocknal_lib_zc_capable(ksock_conn_t *conn)
+{
+ int caps = conn->ksnc_sock->sk->sk_route_caps;
+
+ if (conn->ksnc_proto == &ksocknal_protocol_v1x)
+ return 0;
+
+ /* ZC if the socket supports scatter/gather and doesn't need software
+ * checksums */
+ return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_ALL_CSUM) != 0);
+}
+
+int
+ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+{
+ struct socket *sock = conn->ksnc_sock;
+ int nob;
+ int rc;
+
+ if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
+ conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
+ tx->tx_nob == tx->tx_resid && /* frist sending */
+ tx->tx_msg.ksm_csum == 0) /* not checksummed */
+ ksocknal_lib_csum_tx(tx);
+
+ /* NB we can't trust socket ops to either consume our iovs
+ * or leave them alone. */
+
+ {
+#if SOCKNAL_SINGLE_FRAG_TX
+ struct iovec scratch;
+ struct iovec *scratchiov = &scratch;
+ unsigned int niov = 1;
+#else
+ struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
+ unsigned int niov = tx->tx_niov;
+#endif
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_iov = scratchiov,
+ .msg_iovlen = niov,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = MSG_DONTWAIT
+ };
+ mm_segment_t oldmm = get_fs();
+ int i;
+
+ for (nob = i = 0; i < niov; i++) {
+ scratchiov[i] = tx->tx_iov[i];
+ nob += scratchiov[i].iov_len;
+ }
+
+ if (!list_empty(&conn->ksnc_tx_queue) ||
+ nob < tx->tx_resid)
+ msg.msg_flags |= MSG_MORE;
+
+ set_fs (KERNEL_DS);
+ rc = sock_sendmsg(sock, &msg, nob);
+ set_fs (oldmm);
+ }
+ return rc;
+}
+
+int
+ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+{
+ struct socket *sock = conn->ksnc_sock;
+ lnet_kiov_t *kiov = tx->tx_kiov;
+ int rc;
+ int nob;
+
+ /* Not NOOP message */
+ LASSERT (tx->tx_lnetmsg != NULL);
+
+ /* NB we can't trust socket ops to either consume our iovs
+ * or leave them alone. */
+ if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
+ /* Zero copy is enabled */
+ struct sock *sk = sock->sk;
+ struct page *page = kiov->kiov_page;
+ int offset = kiov->kiov_offset;
+ int fragsize = kiov->kiov_len;
+ int msgflg = MSG_DONTWAIT;
+
+ CDEBUG(D_NET, "page %p + offset %x for %d\n",
+ page, offset, kiov->kiov_len);
+
+ if (!list_empty(&conn->ksnc_tx_queue) ||
+ fragsize < tx->tx_resid)
+ msgflg |= MSG_MORE;
+
+ if (sk->sk_prot->sendpage != NULL) {
+ rc = sk->sk_prot->sendpage(sk, page,
+ offset, fragsize, msgflg);
+ } else {
+ rc = cfs_tcp_sendpage(sk, page, offset, fragsize,
+ msgflg);
+ }
+ } else {
+#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
+ struct iovec scratch;
+ struct iovec *scratchiov = &scratch;
+ unsigned int niov = 1;
+#else
+#ifdef CONFIG_HIGHMEM
+#warning "XXX risk of kmap deadlock on multiple frags..."
+#endif
+ struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
+ unsigned int niov = tx->tx_nkiov;
+#endif
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_iov = scratchiov,
+ .msg_iovlen = niov,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = MSG_DONTWAIT
+ };
+ mm_segment_t oldmm = get_fs();
+ int i;
+
+ for (nob = i = 0; i < niov; i++) {
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
+ kiov[i].kiov_offset;
+ nob += scratchiov[i].iov_len = kiov[i].kiov_len;
+ }
+
+ if (!list_empty(&conn->ksnc_tx_queue) ||
+ nob < tx->tx_resid)
+ msg.msg_flags |= MSG_MORE;
+
+ set_fs (KERNEL_DS);
+ rc = sock_sendmsg(sock, &msg, nob);
+ set_fs (oldmm);
+
+ for (i = 0; i < niov; i++)
+ kunmap(kiov[i].kiov_page);
+ }
+ return rc;
+}
+
+void
+ksocknal_lib_eager_ack (ksock_conn_t *conn)
+{
+ int opt = 1;
+ mm_segment_t oldmm = get_fs();
+ struct socket *sock = conn->ksnc_sock;
+
+ /* Remind the socket to ACK eagerly. If I don't, the socket might
+ * think I'm about to send something it could piggy-back the ACK
+ * on, introducing delay in completing zero-copy sends in my
+ * peer. */
+
+ set_fs(KERNEL_DS);
+ sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK,
+ (char *)&opt, sizeof (opt));
+ set_fs(oldmm);
+}
+
+int
+ksocknal_lib_recv_iov (ksock_conn_t *conn)
+{
+#if SOCKNAL_SINGLE_FRAG_RX
+ struct iovec scratch;
+ struct iovec *scratchiov = &scratch;
+ unsigned int niov = 1;
+#else
+ struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
+ unsigned int niov = conn->ksnc_rx_niov;
+#endif
+ struct iovec *iov = conn->ksnc_rx_iov;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_iov = scratchiov,
+ .msg_iovlen = niov,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = 0
+ };
+ mm_segment_t oldmm = get_fs();
+ int nob;
+ int i;
+ int rc;
+ int fragnob;
+ int sum;
+ __u32 saved_csum;
+
+ /* NB we can't trust socket ops to either consume our iovs
+ * or leave them alone. */
+ LASSERT (niov > 0);
+
+ for (nob = i = 0; i < niov; i++) {
+ scratchiov[i] = iov[i];
+ nob += scratchiov[i].iov_len;
+ }
+ LASSERT (nob <= conn->ksnc_rx_nob_wanted);
+
+ set_fs (KERNEL_DS);
+ rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
+ /* NB this is just a boolean..........................^ */
+ set_fs (oldmm);
+
+ saved_csum = 0;
+ if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
+ saved_csum = conn->ksnc_msg.ksm_csum;
+ conn->ksnc_msg.ksm_csum = 0;
+ }
+
+ if (saved_csum != 0) {
+ /* accumulate checksum */
+ for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
+ LASSERT (i < niov);
+
+ fragnob = iov[i].iov_len;
+ if (fragnob > sum)
+ fragnob = sum;
+
+ conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
+ iov[i].iov_base, fragnob);
+ }
+ conn->ksnc_msg.ksm_csum = saved_csum;
+ }
+
+ return rc;
+}
+
+static void
+ksocknal_lib_kiov_vunmap(void *addr)
+{
+ if (addr == NULL)
+ return;
+
+ vunmap(addr);
+}
+
+static void *
+ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
+ struct iovec *iov, struct page **pages)
+{
+ void *addr;
+ int nob;
+ int i;
+
+ if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
+ return NULL;
+
+ LASSERT (niov <= LNET_MAX_IOV);
+
+ if (niov < 2 ||
+ niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
+ return NULL;
+
+ for (nob = i = 0; i < niov; i++) {
+ if ((kiov[i].kiov_offset != 0 && i > 0) ||
+ (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
+ return NULL;
+
+ pages[i] = kiov[i].kiov_page;
+ nob += kiov[i].kiov_len;
+ }
+
+ addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
+ if (addr == NULL)
+ return NULL;
+
+ iov->iov_base = addr + kiov[0].kiov_offset;
+ iov->iov_len = nob;
+
+ return addr;
+}
+
+int
+ksocknal_lib_recv_kiov (ksock_conn_t *conn)
+{
+#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
+ struct iovec scratch;
+ struct iovec *scratchiov = &scratch;
+ struct page **pages = NULL;
+ unsigned int niov = 1;
+#else
+#ifdef CONFIG_HIGHMEM
+#warning "XXX risk of kmap deadlock on multiple frags..."
+#endif
+ struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
+ struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
+ unsigned int niov = conn->ksnc_rx_nkiov;
+#endif
+ lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_iov = scratchiov,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = 0
+ };
+ mm_segment_t oldmm = get_fs();
+ int nob;
+ int i;
+ int rc;
+ void *base;
+ void *addr;
+ int sum;
+ int fragnob;
+
+ /* NB we can't trust socket ops to either consume our iovs
+ * or leave them alone. */
+ if ((addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages)) != NULL) {
+ nob = scratchiov[0].iov_len;
+ msg.msg_iovlen = 1;
+
+ } else {
+ for (nob = i = 0; i < niov; i++) {
+ nob += scratchiov[i].iov_len = kiov[i].kiov_len;
+ scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
+ kiov[i].kiov_offset;
+ }
+ msg.msg_iovlen = niov;
+ }
+
+ LASSERT (nob <= conn->ksnc_rx_nob_wanted);
+
+ set_fs (KERNEL_DS);
+ rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
+ /* NB this is just a boolean.......................^ */
+ set_fs (oldmm);
+
+ if (conn->ksnc_msg.ksm_csum != 0) {
+ for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
+ LASSERT (i < niov);
+
+ /* Dang! have to kmap again because I have nowhere to stash the
+ * mapped address. But by doing it while the page is still
+ * mapped, the kernel just bumps the map count and returns me
+ * the address it stashed. */
+ base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
+ fragnob = kiov[i].kiov_len;
+ if (fragnob > sum)
+ fragnob = sum;
+
+ conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
+ base, fragnob);
+
+ kunmap(kiov[i].kiov_page);
+ }
+ }
+
+ if (addr != NULL) {
+ ksocknal_lib_kiov_vunmap(addr);
+ } else {
+ for (i = 0; i < niov; i++)
+ kunmap(kiov[i].kiov_page);
+ }
+
+ return (rc);
+}
+
+void
+ksocknal_lib_csum_tx(ksock_tx_t *tx)
+{
+ int i;
+ __u32 csum;
+ void *base;
+
+ LASSERT(tx->tx_iov[0].iov_base == (void *)&tx->tx_msg);
+ LASSERT(tx->tx_conn != NULL);
+ LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
+
+ tx->tx_msg.ksm_csum = 0;
+
+ csum = ksocknal_csum(~0, (void *)tx->tx_iov[0].iov_base,
+ tx->tx_iov[0].iov_len);
+
+ if (tx->tx_kiov != NULL) {
+ for (i = 0; i < tx->tx_nkiov; i++) {
+ base = kmap(tx->tx_kiov[i].kiov_page) +
+ tx->tx_kiov[i].kiov_offset;
+
+ csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
+
+ kunmap(tx->tx_kiov[i].kiov_page);
+ }
+ } else {
+ for (i = 1; i < tx->tx_niov; i++)
+ csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
+ tx->tx_iov[i].iov_len);
+ }
+
+ if (*ksocknal_tunables.ksnd_inject_csum_error) {
+ csum++;
+ *ksocknal_tunables.ksnd_inject_csum_error = 0;
+ }
+
+ tx->tx_msg.ksm_csum = csum;
+}
+
+int
+ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
+{
+ mm_segment_t oldmm = get_fs ();
+ struct socket *sock = conn->ksnc_sock;
+ int len;
+ int rc;
+
+ rc = ksocknal_connsock_addref(conn);
+ if (rc != 0) {
+ LASSERT (conn->ksnc_closing);
+ *txmem = *rxmem = *nagle = 0;
+ return (-ESHUTDOWN);
+ }
+
+ rc = libcfs_sock_getbuf(sock, txmem, rxmem);
+ if (rc == 0) {
+ len = sizeof(*nagle);
+ set_fs(KERNEL_DS);
+ rc = sock->ops->getsockopt(sock, SOL_TCP, TCP_NODELAY,
+ (char *)nagle, &len);
+ set_fs(oldmm);
+ }
+
+ ksocknal_connsock_decref(conn);
+
+ if (rc == 0)
+ *nagle = !*nagle;
+ else
+ *txmem = *rxmem = *nagle = 0;
+
+ return (rc);
+}
+
+int
+ksocknal_lib_setup_sock (struct socket *sock)
+{
+ mm_segment_t oldmm = get_fs ();
+ int rc;
+ int option;
+ int keep_idle;
+ int keep_intvl;
+ int keep_count;
+ int do_keepalive;
+ struct linger linger;
+
+ sock->sk->sk_allocation = GFP_NOFS;
+
+ /* Ensure this socket aborts active sends immediately when we close
+ * it. */
+
+ linger.l_onoff = 0;
+ linger.l_linger = 0;
+
+ set_fs (KERNEL_DS);
+ rc = sock_setsockopt (sock, SOL_SOCKET, SO_LINGER,
+ (char *)&linger, sizeof (linger));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR ("Can't set SO_LINGER: %d\n", rc);
+ return (rc);
+ }
+
+ option = -1;
+ set_fs (KERNEL_DS);
+ rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_LINGER2,
+ (char *)&option, sizeof (option));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR ("Can't set SO_LINGER2: %d\n", rc);
+ return (rc);
+ }
+
+ if (!*ksocknal_tunables.ksnd_nagle) {
+ option = 1;
+
+ set_fs (KERNEL_DS);
+ rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_NODELAY,
+ (char *)&option, sizeof (option));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR ("Can't disable nagle: %d\n", rc);
+ return (rc);
+ }
+ }
+
+ rc = libcfs_sock_setbuf(sock,
+ *ksocknal_tunables.ksnd_tx_buffer_size,
+ *ksocknal_tunables.ksnd_rx_buffer_size);
+ if (rc != 0) {
+ CERROR ("Can't set buffer tx %d, rx %d buffers: %d\n",
+ *ksocknal_tunables.ksnd_tx_buffer_size,
+ *ksocknal_tunables.ksnd_rx_buffer_size, rc);
+ return (rc);
+ }
+
+/* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
+
+ /* snapshot tunables */
+ keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
+ keep_count = *ksocknal_tunables.ksnd_keepalive_count;
+ keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
+
+ do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
+
+ option = (do_keepalive ? 1 : 0);
+ set_fs (KERNEL_DS);
+ rc = sock_setsockopt (sock, SOL_SOCKET, SO_KEEPALIVE,
+ (char *)&option, sizeof (option));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR ("Can't set SO_KEEPALIVE: %d\n", rc);
+ return (rc);
+ }
+
+ if (!do_keepalive)
+ return (0);
+
+ set_fs (KERNEL_DS);
+ rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE,
+ (char *)&keep_idle, sizeof (keep_idle));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc);
+ return (rc);
+ }
+
+ set_fs (KERNEL_DS);
+ rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPINTVL,
+ (char *)&keep_intvl, sizeof (keep_intvl));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
+ return (rc);
+ }
+
+ set_fs (KERNEL_DS);
+ rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT,
+ (char *)&keep_count, sizeof (keep_count));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR ("Can't set TCP_KEEPCNT: %d\n", rc);
+ return (rc);
+ }
+
+ return (0);
+}
+
+void
+ksocknal_lib_push_conn (ksock_conn_t *conn)
+{
+ struct sock *sk;
+ struct tcp_sock *tp;
+ int nonagle;
+ int val = 1;
+ int rc;
+ mm_segment_t oldmm;
+
+ rc = ksocknal_connsock_addref(conn);
+ if (rc != 0) /* being shut down */
+ return;
+
+ sk = conn->ksnc_sock->sk;
+ tp = tcp_sk(sk);
+
+ lock_sock (sk);
+ nonagle = tp->nonagle;
+ tp->nonagle = 1;
+ release_sock (sk);
+
+ oldmm = get_fs ();
+ set_fs (KERNEL_DS);
+
+ rc = sk->sk_prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof (val));
+ LASSERT (rc == 0);
+
+ set_fs (oldmm);
+
+ lock_sock (sk);
+ tp->nonagle = nonagle;
+ release_sock (sk);
+
+ ksocknal_connsock_decref(conn);
+}
+
+extern void ksocknal_read_callback (ksock_conn_t *conn);
+extern void ksocknal_write_callback (ksock_conn_t *conn);
+/*
+ * socket call back in Linux
+ */
+static void
+ksocknal_data_ready (struct sock *sk, int n)
+{
+ ksock_conn_t *conn;
+
+ /* interleave correctly with closing sockets... */
+ LASSERT(!in_irq());
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ conn = sk->sk_user_data;
+ if (conn == NULL) { /* raced with ksocknal_terminate_conn */
+ LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
+ sk->sk_data_ready (sk, n);
+ } else
+ ksocknal_read_callback(conn);
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+}
+
+static void
+ksocknal_write_space (struct sock *sk)
+{
+ ksock_conn_t *conn;
+ int wspace;
+ int min_wpace;
+
+ /* interleave correctly with closing sockets... */
+ LASSERT(!in_irq());
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ conn = sk->sk_user_data;
+ wspace = SOCKNAL_WSPACE(sk);
+ min_wpace = SOCKNAL_MIN_WSPACE(sk);
+
+ CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
+ sk, wspace, min_wpace, conn,
+ (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
+ " ready" : " blocked"),
+ (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
+ " scheduled" : " idle"),
+ (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
+ " empty" : " queued"));
+
+ if (conn == NULL) { /* raced with ksocknal_terminate_conn */
+ LASSERT (sk->sk_write_space != &ksocknal_write_space);
+ sk->sk_write_space (sk);
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return;
+ }
+
+ if (wspace >= min_wpace) { /* got enough space */
+ ksocknal_write_callback(conn);
+
+ /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
+ * ENOMEM check in ksocknal_transmit is race-free (think about
+ * it). */
+
+ clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+}
+
+void
+ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
+{
+ conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
+ conn->ksnc_saved_write_space = sock->sk->sk_write_space;
+}
+
+void
+ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
+{
+ sock->sk->sk_user_data = conn;
+ sock->sk->sk_data_ready = ksocknal_data_ready;
+ sock->sk->sk_write_space = ksocknal_write_space;
+ return;
+}
+
+void
+ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
+{
+ /* Remove conn's network callbacks.
+ * NB I _have_ to restore the callback, rather than storing a noop,
+ * since the socket could survive past this module being unloaded!! */
+ sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
+ sock->sk->sk_write_space = conn->ksnc_saved_write_space;
+
+ /* A callback could be in progress already; they hold a read lock
+ * on ksnd_global_lock (to serialise with me) and NOOP if
+ * sk_user_data is NULL. */
+ sock->sk->sk_user_data = NULL;
+
+ return ;
+}
+
+int
+ksocknal_lib_memory_pressure(ksock_conn_t *conn)
+{
+ int rc = 0;
+ ksock_sched_t *sched;
+
+ sched = conn->ksnc_scheduler;
+ spin_lock_bh(&sched->kss_lock);
+
+ if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
+ !conn->ksnc_tx_ready) {
+ /* SOCK_NOSPACE is set when the socket fills
+ * and cleared in the write_space callback
+ * (which also sets ksnc_tx_ready). If
+ * SOCK_NOSPACE and ksnc_tx_ready are BOTH
+ * zero, I didn't fill the socket and
+ * write_space won't reschedule me, so I
+ * return -ENOMEM to get my caller to retry
+ * after a timeout */
+ rc = -ENOMEM;
+ }
+
+ spin_unlock_bh(&sched->kss_lock);
+
+ return rc;
+}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
new file mode 100644
index 0000000..1cfc1b1
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
@@ -0,0 +1,88 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_PORTAL_ALLOC
+
+#ifndef __LINUX_SOCKNAL_LIB_H__
+#define __LINUX_SOCKNAL_LIB_H__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <linux/uio.h>
+#include <linux/if.h>
+
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/kmod.h>
+#include <linux/sysctl.h>
+#include <asm/div64.h>
+#include <linux/syscalls.h>
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/libcfs/linux/portals_compat25.h>
+
+#include <linux/crc32.h>
+static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len)
+{
+#if 1
+ return crc32_le(crc, p, len);
+#else
+ while (len-- > 0)
+ crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ;
+ return crc;
+#endif
+}
+
+#define SOCKNAL_WSPACE(sk) sk_stream_wspace(sk)
+#define SOCKNAL_MIN_WSPACE(sk) sk_stream_min_wspace(sk)
+
+/* assume one thread for each connection type */
+#define SOCKNAL_NSCHEDS 3
+#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1)
+
+#endif
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
new file mode 100644
index 0000000..8a474f6
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ *
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ *
+ * Portals is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Portals is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Portals; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "socklnd.h"
+
+static int sock_timeout = 50;
+CFS_MODULE_PARM(sock_timeout, "i", int, 0644,
+ "dead socket timeout (seconds)");
+
+static int credits = 256;
+CFS_MODULE_PARM(credits, "i", int, 0444,
+ "# concurrent sends");
+
+static int peer_credits = 8;
+CFS_MODULE_PARM(peer_credits, "i", int, 0444,
+ "# concurrent sends to 1 peer");
+
+static int peer_buffer_credits = 0;
+CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
+ "# per-peer router buffer credits");
+
+static int peer_timeout = 180;
+CFS_MODULE_PARM(peer_timeout, "i", int, 0444,
+ "Seconds without aliveness news to declare peer dead (<=0 to disable)");
+
+/* Number of daemons in each thread pool which is percpt,
+ * we will estimate reasonable value based on CPUs if it's not set. */
+static unsigned int nscheds;
+CFS_MODULE_PARM(nscheds, "i", int, 0444,
+ "# scheduler daemons in each pool while starting");
+
+static int nconnds = 4;
+CFS_MODULE_PARM(nconnds, "i", int, 0444,
+ "# connection daemons while starting");
+
+static int nconnds_max = 64;
+CFS_MODULE_PARM(nconnds_max, "i", int, 0444,
+ "max # connection daemons");
+
+static int min_reconnectms = 1000;
+CFS_MODULE_PARM(min_reconnectms, "i", int, 0644,
+ "min connection retry interval (mS)");
+
+static int max_reconnectms = 60000;
+CFS_MODULE_PARM(max_reconnectms, "i", int, 0644,
+ "max connection retry interval (mS)");
+
+# define DEFAULT_EAGER_ACK 0
+static int eager_ack = DEFAULT_EAGER_ACK;
+CFS_MODULE_PARM(eager_ack, "i", int, 0644,
+ "send tcp ack packets eagerly");
+
+static int typed_conns = 1;
+CFS_MODULE_PARM(typed_conns, "i", int, 0444,
+ "use different sockets for bulk");
+
+static int min_bulk = (1<<10);
+CFS_MODULE_PARM(min_bulk, "i", int, 0644,
+ "smallest 'large' message");
+
+# define DEFAULT_BUFFER_SIZE 0
+static int tx_buffer_size = DEFAULT_BUFFER_SIZE;
+CFS_MODULE_PARM(tx_buffer_size, "i", int, 0644,
+ "socket tx buffer size (0 for system default)");
+
+static int rx_buffer_size = DEFAULT_BUFFER_SIZE;
+CFS_MODULE_PARM(rx_buffer_size, "i", int, 0644,
+ "socket rx buffer size (0 for system default)");
+
+static int nagle = 0;
+CFS_MODULE_PARM(nagle, "i", int, 0644,
+ "enable NAGLE?");
+
+static int round_robin = 1;
+CFS_MODULE_PARM(round_robin, "i", int, 0644,
+ "Round robin for multiple interfaces");
+
+static int keepalive = 30;
+CFS_MODULE_PARM(keepalive, "i", int, 0644,
+ "# seconds before send keepalive");
+
+static int keepalive_idle = 30;
+CFS_MODULE_PARM(keepalive_idle, "i", int, 0644,
+ "# idle seconds before probe");
+
+#define DEFAULT_KEEPALIVE_COUNT 5
+static int keepalive_count = DEFAULT_KEEPALIVE_COUNT;
+CFS_MODULE_PARM(keepalive_count, "i", int, 0644,
+ "# missed probes == dead");
+
+static int keepalive_intvl = 5;
+CFS_MODULE_PARM(keepalive_intvl, "i", int, 0644,
+ "seconds between probes");
+
+static int enable_csum = 0;
+CFS_MODULE_PARM(enable_csum, "i", int, 0644,
+ "enable check sum");
+
+static int inject_csum_error = 0;
+CFS_MODULE_PARM(inject_csum_error, "i", int, 0644,
+ "set non-zero to inject a checksum error");
+
+static int nonblk_zcack = 1;
+CFS_MODULE_PARM(nonblk_zcack, "i", int, 0644,
+ "always send ZC-ACK on non-blocking connection");
+
+static unsigned int zc_min_payload = (16 << 10);
+CFS_MODULE_PARM(zc_min_payload, "i", int, 0644,
+ "minimum payload size to zero copy");
+
+static unsigned int zc_recv = 0;
+CFS_MODULE_PARM(zc_recv, "i", int, 0644,
+ "enable ZC recv for Chelsio driver");
+
+static unsigned int zc_recv_min_nfrags = 16;
+CFS_MODULE_PARM(zc_recv_min_nfrags, "i", int, 0644,
+ "minimum # of fragments to enable ZC recv");
+
+
+#if SOCKNAL_VERSION_DEBUG
+static int protocol = 3;
+CFS_MODULE_PARM(protocol, "i", int, 0644,
+ "protocol version");
+#endif
+
+ksock_tunables_t ksocknal_tunables;
+
+int ksocknal_tunables_init(void)
+{
+
+ /* initialize ksocknal_tunables structure */
+ ksocknal_tunables.ksnd_timeout = &sock_timeout;
+ ksocknal_tunables.ksnd_nscheds = &nscheds;
+ ksocknal_tunables.ksnd_nconnds = &nconnds;
+ ksocknal_tunables.ksnd_nconnds_max = &nconnds_max;
+ ksocknal_tunables.ksnd_min_reconnectms = &min_reconnectms;
+ ksocknal_tunables.ksnd_max_reconnectms = &max_reconnectms;
+ ksocknal_tunables.ksnd_eager_ack = &eager_ack;
+ ksocknal_tunables.ksnd_typed_conns = &typed_conns;
+ ksocknal_tunables.ksnd_min_bulk = &min_bulk;
+ ksocknal_tunables.ksnd_tx_buffer_size = &tx_buffer_size;
+ ksocknal_tunables.ksnd_rx_buffer_size = &rx_buffer_size;
+ ksocknal_tunables.ksnd_nagle = &nagle;
+ ksocknal_tunables.ksnd_round_robin = &round_robin;
+ ksocknal_tunables.ksnd_keepalive = &keepalive;
+ ksocknal_tunables.ksnd_keepalive_idle = &keepalive_idle;
+ ksocknal_tunables.ksnd_keepalive_count = &keepalive_count;
+ ksocknal_tunables.ksnd_keepalive_intvl = &keepalive_intvl;
+ ksocknal_tunables.ksnd_credits = &credits;
+ ksocknal_tunables.ksnd_peertxcredits = &peer_credits;
+ ksocknal_tunables.ksnd_peerrtrcredits = &peer_buffer_credits;
+ ksocknal_tunables.ksnd_peertimeout = &peer_timeout;
+ ksocknal_tunables.ksnd_enable_csum = &enable_csum;
+ ksocknal_tunables.ksnd_inject_csum_error = &inject_csum_error;
+ ksocknal_tunables.ksnd_nonblk_zcack = &nonblk_zcack;
+ ksocknal_tunables.ksnd_zc_min_payload = &zc_min_payload;
+ ksocknal_tunables.ksnd_zc_recv = &zc_recv;
+ ksocknal_tunables.ksnd_zc_recv_min_nfrags = &zc_recv_min_nfrags;
+
+
+
+#if SOCKNAL_VERSION_DEBUG
+ ksocknal_tunables.ksnd_protocol = &protocol;
+#endif
+
+#if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
+ ksocknal_tunables.ksnd_sysctl = NULL;
+#endif
+
+ if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10))
+ *ksocknal_tunables.ksnd_zc_min_payload = (2 << 10);
+
+ /* initialize platform-sepcific tunables */
+ return ksocknal_lib_tunables_init();
+};
+
+void ksocknal_tunables_fini(void)
+{
+ ksocknal_lib_tunables_fini();
+}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
new file mode 100644
index 0000000..71205e2
--- /dev/null
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -0,0 +1,797 @@
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Author: Zach Brown <zab@zabbo.net>
+ * Author: Peter J. Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Eric Barton <eric@bartonsoftware.com>
+ *
+ * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
+ *
+ * Portals is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Portals is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Portals; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "socklnd.h"
+
+/*
+ * Protocol entries :
+ * pro_send_hello : send hello message
+ * pro_recv_hello : receive hello message
+ * pro_pack : pack message header
+ * pro_unpack : unpack message header
+ * pro_queue_tx_zcack() : Called holding BH lock: kss_lock
+ * return 1 if ACK is piggybacked, otherwise return 0
+ * pro_queue_tx_msg() : Called holding BH lock: kss_lock
+ * return the ACK that piggybacked by my message, or NULL
+ * pro_handle_zcreq() : handler of incoming ZC-REQ
+ * pro_handle_zcack() : handler of incoming ZC-ACK
+ * pro_match_tx() : Called holding glock
+ */
+
+static ksock_tx_t *
+ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+{
+ /* V1.x, just enqueue it */
+ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+ return NULL;
+}
+
+void
+ksocknal_next_tx_carrier(ksock_conn_t *conn)
+{
+ ksock_tx_t *tx = conn->ksnc_tx_carrier;
+
+ /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
+ LASSERT (!list_empty(&conn->ksnc_tx_queue));
+ LASSERT (tx != NULL);
+
+ /* Next TX that can carry ZC-ACK or LNet message */
+ if (tx->tx_list.next == &conn->ksnc_tx_queue) {
+ /* no more packets queued */
+ conn->ksnc_tx_carrier = NULL;
+ } else {
+ conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
+ ksock_tx_t, tx_list);
+ LASSERT (conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
+ }
+}
+
+static int
+ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
+ ksock_tx_t *tx_ack, __u64 cookie)
+{
+ ksock_tx_t *tx = conn->ksnc_tx_carrier;
+
+ LASSERT (tx_ack == NULL ||
+ tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+
+ /*
+ * Enqueue or piggyback tx_ack / cookie
+ * . no tx can piggyback cookie of tx_ack (or cookie), just
+ * enqueue the tx_ack (if tx_ack != NUL) and return NULL.
+ * . There is tx can piggyback cookie of tx_ack (or cookie),
+ * piggyback the cookie and return the tx.
+ */
+ if (tx == NULL) {
+ if (tx_ack != NULL) {
+ list_add_tail(&tx_ack->tx_list,
+ &conn->ksnc_tx_queue);
+ conn->ksnc_tx_carrier = tx_ack;
+ }
+ return 0;
+ }
+
+ if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
+ /* tx is noop zc-ack, can't piggyback zc-ack cookie */
+ if (tx_ack != NULL)
+ list_add_tail(&tx_ack->tx_list,
+ &conn->ksnc_tx_queue);
+ return 0;
+ }
+
+ LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
+ LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0);
+
+ if (tx_ack != NULL)
+ cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
+
+ /* piggyback the zc-ack cookie */
+ tx->tx_msg.ksm_zc_cookies[1] = cookie;
+ /* move on to the next TX which can carry cookie */
+ ksocknal_next_tx_carrier(conn);
+
+ return 1;
+}
+
+static ksock_tx_t *
+ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+{
+ ksock_tx_t *tx = conn->ksnc_tx_carrier;
+
+ /*
+ * Enqueue tx_msg:
+ * . If there is no NOOP on the connection, just enqueue
+ * tx_msg and return NULL
+ * . If there is NOOP on the connection, piggyback the cookie
+ * and replace the NOOP tx, and return the NOOP tx.
+ */
+ if (tx == NULL) { /* nothing on queue */
+ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+ conn->ksnc_tx_carrier = tx_msg;
+ return NULL;
+ }
+
+ if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */
+ list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
+ return NULL;
+ }
+
+ LASSERT (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+
+ /* There is a noop zc-ack can be piggybacked */
+ tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1];
+ ksocknal_next_tx_carrier(conn);
+
+ /* use new_tx to replace the noop zc-ack packet */
+ list_add(&tx_msg->tx_list, &tx->tx_list);
+ list_del(&tx->tx_list);
+
+ return tx;
+}
+
+static int
+ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
+ ksock_tx_t *tx_ack, __u64 cookie)
+{
+ ksock_tx_t *tx;
+
+ if (conn->ksnc_type != SOCKLND_CONN_ACK)
+ return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
+
+ /* non-blocking ZC-ACK (to router) */
+ LASSERT (tx_ack == NULL ||
+ tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+
+ if ((tx = conn->ksnc_tx_carrier) == NULL) {
+ if (tx_ack != NULL) {
+ list_add_tail(&tx_ack->tx_list,
+ &conn->ksnc_tx_queue);
+ conn->ksnc_tx_carrier = tx_ack;
+ }
+ return 0;
+ }
+
+ /* conn->ksnc_tx_carrier != NULL */
+
+ if (tx_ack != NULL)
+ cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
+
+ if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */
+ return 1;
+
+ if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
+ /* replace the keepalive PING with a real ACK */
+ LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0);
+ tx->tx_msg.ksm_zc_cookies[1] = cookie;
+ return 1;
+ }
+
+ if (cookie == tx->tx_msg.ksm_zc_cookies[0] ||
+ cookie == tx->tx_msg.ksm_zc_cookies[1]) {
+ CWARN("%s: duplicated ZC cookie: "LPU64"\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
+ return 1; /* XXX return error in the future */
+ }
+
+ if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+ /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
+ if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
+ tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
+ tx->tx_msg.ksm_zc_cookies[1] = cookie;
+ } else {
+ tx->tx_msg.ksm_zc_cookies[0] = cookie;
+ }
+
+ if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
+ /* not likely to carry more ACKs, skip it to simplify logic */
+ ksocknal_next_tx_carrier(conn);
+ }
+
+ return 1;
+ }
+
+ /* takes two or more cookies already */
+
+ if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) {
+ __u64 tmp = 0;
+
+ /* two separated cookies: (a+2, a) or (a+1, a) */
+ LASSERT (tx->tx_msg.ksm_zc_cookies[0] -
+ tx->tx_msg.ksm_zc_cookies[1] <= 2);
+
+ if (tx->tx_msg.ksm_zc_cookies[0] -
+ tx->tx_msg.ksm_zc_cookies[1] == 2) {
+ if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1)
+ tmp = cookie;
+ } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) {
+ tmp = tx->tx_msg.ksm_zc_cookies[1];
+ } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) {
+ tmp = tx->tx_msg.ksm_zc_cookies[0];
+ }
+
+ if (tmp != 0) {
+ /* range of cookies */
+ tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
+ tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
+ return 1;
+ }
+
+ } else {
+ /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
+ if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
+ cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
+ CWARN("%s: duplicated ZC cookie: "LPU64"\n",
+ libcfs_id2str(conn->ksnc_peer->ksnp_id), cookie);
+ return 1; /* XXX: return error in the future */
+ }
+
+ if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) {
+ tx->tx_msg.ksm_zc_cookies[1] = cookie;
+ return 1;
+ }
+
+ if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) {
+ tx->tx_msg.ksm_zc_cookies[0] = cookie;
+ return 1;
+ }
+ }
+
+ /* failed to piggyback ZC-ACK */
+ if (tx_ack != NULL) {
+ list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
+ /* the next tx can piggyback at least 1 ACK */
+ ksocknal_next_tx_carrier(conn);
+ }
+
+ return 0;
+}
+
+static int
+ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+{
+ int nob;
+
+#if SOCKNAL_VERSION_DEBUG
+ if (!*ksocknal_tunables.ksnd_typed_conns)
+ return SOCKNAL_MATCH_YES;
+#endif
+
+ if (tx == NULL || tx->tx_lnetmsg == NULL) {
+ /* noop packet */
+ nob = offsetof(ksock_msg_t, ksm_u);
+ } else {
+ nob = tx->tx_lnetmsg->msg_len +
+ ((conn->ksnc_proto == &ksocknal_protocol_v1x) ?
+ sizeof(lnet_hdr_t) : sizeof(ksock_msg_t));
+ }
+
+ /* default checking for typed connection */
+ switch (conn->ksnc_type) {
+ default:
+ CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
+ LBUG();
+ case SOCKLND_CONN_ANY:
+ return SOCKNAL_MATCH_YES;
+
+ case SOCKLND_CONN_BULK_IN:
+ return SOCKNAL_MATCH_MAY;
+
+ case SOCKLND_CONN_BULK_OUT:
+ if (nob < *ksocknal_tunables.ksnd_min_bulk)
+ return SOCKNAL_MATCH_MAY;
+ else
+ return SOCKNAL_MATCH_YES;
+
+ case SOCKLND_CONN_CONTROL:
+ if (nob >= *ksocknal_tunables.ksnd_min_bulk)
+ return SOCKNAL_MATCH_MAY;
+ else
+ return SOCKNAL_MATCH_YES;
+ }
+}
+
+static int
+ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+{
+ int nob;
+
+ if (tx == NULL || tx->tx_lnetmsg == NULL)
+ nob = offsetof(ksock_msg_t, ksm_u);
+ else
+ nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t);
+
+ switch (conn->ksnc_type) {
+ default:
+ CERROR("ksnc_type bad: %u\n", conn->ksnc_type);
+ LBUG();
+ case SOCKLND_CONN_ANY:
+ return SOCKNAL_MATCH_NO;
+
+ case SOCKLND_CONN_ACK:
+ if (nonblk)
+ return SOCKNAL_MATCH_YES;
+ else if (tx == NULL || tx->tx_lnetmsg == NULL)
+ return SOCKNAL_MATCH_MAY;
+ else
+ return SOCKNAL_MATCH_NO;
+
+ case SOCKLND_CONN_BULK_OUT:
+ if (nonblk)
+ return SOCKNAL_MATCH_NO;
+ else if (nob < *ksocknal_tunables.ksnd_min_bulk)
+ return SOCKNAL_MATCH_MAY;
+ else
+ return SOCKNAL_MATCH_YES;
+
+ case SOCKLND_CONN_CONTROL:
+ if (nonblk)
+ return SOCKNAL_MATCH_NO;
+ else if (nob >= *ksocknal_tunables.ksnd_min_bulk)
+ return SOCKNAL_MATCH_MAY;
+ else
+ return SOCKNAL_MATCH_YES;
+ }
+}
+
+/* (Sink) handle incoming ZC request from sender */
+static int
+ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
+{
+ ksock_peer_t *peer = c->ksnc_peer;
+ ksock_conn_t *conn;
+ ksock_tx_t *tx;
+ int rc;
+
+ read_lock(&ksocknal_data.ksnd_global_lock);
+
+ conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
+ if (conn != NULL) {
+ ksock_sched_t *sched = conn->ksnc_scheduler;
+
+ LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+
+ spin_lock_bh(&sched->kss_lock);
+
+ rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
+
+ spin_unlock_bh(&sched->kss_lock);
+
+ if (rc) { /* piggybacked */
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return 0;
+ }
+ }
+
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+
+ /* ACK connection is not ready, or can't piggyback the ACK */
+ tx = ksocknal_alloc_tx_noop(cookie, !!remote);
+ if (tx == NULL)
+ return -ENOMEM;
+
+ if ((rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) == 0)
+ return 0;
+
+ ksocknal_free_tx(tx);
+ return rc;
+}
+
+/* (Sender) handle ZC_ACK from sink */
+static int
+ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
+{
+ ksock_peer_t *peer = conn->ksnc_peer;
+ ksock_tx_t *tx;
+ ksock_tx_t *tmp;
+ LIST_HEAD (zlist);
+ int count;
+
+ if (cookie1 == 0)
+ cookie1 = cookie2;
+
+ count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
+
+ if (cookie2 == SOCKNAL_KEEPALIVE_PING &&
+ conn->ksnc_proto == &ksocknal_protocol_v3x) {
+ /* keepalive PING for V3.x, just ignore it */
+ return count == 1 ? 0 : -EPROTO;
+ }
+
+ spin_lock(&peer->ksnp_lock);
+
+ list_for_each_entry_safe(tx, tmp,
+ &peer->ksnp_zc_req_list, tx_zc_list) {
+ __u64 c = tx->tx_msg.ksm_zc_cookies[0];
+
+ if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
+ tx->tx_msg.ksm_zc_cookies[0] = 0;
+ list_del(&tx->tx_zc_list);
+ list_add(&tx->tx_zc_list, &zlist);
+
+ if (--count == 0)
+ break;
+ }
+ }
+
+ spin_unlock(&peer->ksnp_lock);
+
+ while (!list_empty(&zlist)) {
+ tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ list_del(&tx->tx_zc_list);
+ ksocknal_tx_decref(tx);
+ }
+
+ return count == 0 ? 0 : -EPROTO;
+}
+
+static int
+ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
+{
+ socket_t *sock = conn->ksnc_sock;
+ lnet_hdr_t *hdr;
+ lnet_magicversion_t *hmv;
+ int rc;
+ int i;
+
+ CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid));
+
+ LIBCFS_ALLOC(hdr, sizeof(*hdr));
+ if (hdr == NULL) {
+ CERROR("Can't allocate lnet_hdr_t\n");
+ return -ENOMEM;
+ }
+
+ hmv = (lnet_magicversion_t *)&hdr->dest_nid;
+
+ /* Re-organize V2.x message header to V1.x (lnet_hdr_t)
+ * header and send out */
+ hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
+ hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR);
+ hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR);
+
+ if (the_lnet.ln_testprotocompat != 0) {
+ /* single-shot proto check */
+ LNET_LOCK();
+ if ((the_lnet.ln_testprotocompat & 1) != 0) {
+ hmv->version_major++; /* just different! */
+ the_lnet.ln_testprotocompat &= ~1;
+ }
+ if ((the_lnet.ln_testprotocompat & 2) != 0) {
+ hmv->magic = LNET_PROTO_MAGIC;
+ the_lnet.ln_testprotocompat &= ~2;
+ }
+ LNET_UNLOCK();
+ }
+
+ hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
+ hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
+ hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
+ hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32));
+ hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype);
+ hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation);
+
+ rc = libcfs_sock_write(sock, hdr, sizeof(*hdr),lnet_acceptor_timeout());
+
+ if (rc != 0) {
+ CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
+ rc, &conn->ksnc_ipaddr, conn->ksnc_port);
+ goto out;
+ }
+
+ if (hello->kshm_nips == 0)
+ goto out;
+
+ for (i = 0; i < (int) hello->kshm_nips; i++) {
+ hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
+ }
+
+ rc = libcfs_sock_write(sock, hello->kshm_ips,
+ hello->kshm_nips * sizeof(__u32),
+ lnet_acceptor_timeout());
+ if (rc != 0) {
+ CNETERR("Error %d sending HELLO payload (%d)"
+ " to %pI4h/%d\n", rc, hello->kshm_nips,
+ &conn->ksnc_ipaddr, conn->ksnc_port);
+ }
+out:
+ LIBCFS_FREE(hdr, sizeof(*hdr));
+
+ return rc;
+}
+
+static int
+ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
+{
+ socket_t *sock = conn->ksnc_sock;
+ int rc;
+
+ hello->kshm_magic = LNET_PROTO_MAGIC;
+ hello->kshm_version = conn->ksnc_proto->pro_version;
+
+ if (the_lnet.ln_testprotocompat != 0) {
+ /* single-shot proto check */
+ LNET_LOCK();
+ if ((the_lnet.ln_testprotocompat & 1) != 0) {
+ hello->kshm_version++; /* just different! */
+ the_lnet.ln_testprotocompat &= ~1;
+ }
+ LNET_UNLOCK();
+ }
+
+ rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
+ lnet_acceptor_timeout());
+
+ if (rc != 0) {
+ CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
+ rc, &conn->ksnc_ipaddr, conn->ksnc_port);
+ return rc;
+ }
+
+ if (hello->kshm_nips == 0)
+ return 0;
+
+ rc = libcfs_sock_write(sock, hello->kshm_ips,
+ hello->kshm_nips * sizeof(__u32),
+ lnet_acceptor_timeout());
+ if (rc != 0) {
+ CNETERR("Error %d sending HELLO payload (%d)"
+ " to %pI4h/%d\n", rc, hello->kshm_nips,
+ &conn->ksnc_ipaddr, conn->ksnc_port);
+ }
+
+ return rc;
+}
+
+static int
+ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,int timeout)
+{
+ socket_t *sock = conn->ksnc_sock;
+ lnet_hdr_t *hdr;
+ int rc;
+ int i;
+
+ LIBCFS_ALLOC(hdr, sizeof(*hdr));
+ if (hdr == NULL) {
+ CERROR("Can't allocate lnet_hdr_t\n");
+ return -ENOMEM;
+ }
+
+ rc = libcfs_sock_read(sock, &hdr->src_nid,
+ sizeof (*hdr) - offsetof (lnet_hdr_t, src_nid),
+ timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT (rc < 0 && rc != -EALREADY);
+ goto out;
+ }
+
+ /* ...and check we got what we expected */
+ if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
+ CERROR("Expecting a HELLO hdr,"
+ " but got type %d from %pI4h\n",
+ le32_to_cpu (hdr->type),
+ &conn->ksnc_ipaddr);
+ rc = -EPROTO;
+ goto out;
+ }
+
+ hello->kshm_src_nid = le64_to_cpu (hdr->src_nid);
+ hello->kshm_src_pid = le32_to_cpu (hdr->src_pid);
+ hello->kshm_src_incarnation = le64_to_cpu (hdr->msg.hello.incarnation);
+ hello->kshm_ctype = le32_to_cpu (hdr->msg.hello.type);
+ hello->kshm_nips = le32_to_cpu (hdr->payload_length) /
+ sizeof (__u32);
+
+ if (hello->kshm_nips > LNET_MAX_INTERFACES) {
+ CERROR("Bad nips %d from ip %pI4h\n",
+ hello->kshm_nips, &conn->ksnc_ipaddr);
+ rc = -EPROTO;
+ goto out;
+ }
+
+ if (hello->kshm_nips == 0)
+ goto out;
+
+ rc = libcfs_sock_read(sock, hello->kshm_ips,
+ hello->kshm_nips * sizeof(__u32), timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading IPs from ip %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT(rc < 0 && rc != -EALREADY);
+ goto out;
+ }
+
+ for (i = 0; i < (int) hello->kshm_nips; i++) {
+ hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
+
+ if (hello->kshm_ips[i] == 0) {
+ CERROR("Zero IP[%d] from ip %pI4h\n",
+ i, &conn->ksnc_ipaddr);
+ rc = -EPROTO;
+ break;
+ }
+ }
+out:
+ LIBCFS_FREE(hdr, sizeof(*hdr));
+
+ return rc;
+}
+
+static int
+ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
+{
+ socket_t *sock = conn->ksnc_sock;
+ int rc;
+ int i;
+
+ if (hello->kshm_magic == LNET_PROTO_MAGIC)
+ conn->ksnc_flip = 0;
+ else
+ conn->ksnc_flip = 1;
+
+ rc = libcfs_sock_read(sock, &hello->kshm_src_nid,
+ offsetof(ksock_hello_msg_t, kshm_ips) -
+ offsetof(ksock_hello_msg_t, kshm_src_nid),
+ timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading HELLO from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT(rc < 0 && rc != -EALREADY);
+ return rc;
+ }
+
+ if (conn->ksnc_flip) {
+ __swab32s(&hello->kshm_src_pid);
+ __swab64s(&hello->kshm_src_nid);
+ __swab32s(&hello->kshm_dst_pid);
+ __swab64s(&hello->kshm_dst_nid);
+ __swab64s(&hello->kshm_src_incarnation);
+ __swab64s(&hello->kshm_dst_incarnation);
+ __swab32s(&hello->kshm_ctype);
+ __swab32s(&hello->kshm_nips);
+ }
+
+ if (hello->kshm_nips > LNET_MAX_INTERFACES) {
+ CERROR("Bad nips %d from ip %pI4h\n",
+ hello->kshm_nips, &conn->ksnc_ipaddr);
+ return -EPROTO;
+ }
+
+ if (hello->kshm_nips == 0)
+ return 0;
+
+ rc = libcfs_sock_read(sock, hello->kshm_ips,
+ hello->kshm_nips * sizeof(__u32), timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading IPs from ip %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT(rc < 0 && rc != -EALREADY);
+ return rc;
+ }
+
+ for (i = 0; i < (int) hello->kshm_nips; i++) {
+ if (conn->ksnc_flip)
+ __swab32s(&hello->kshm_ips[i]);
+
+ if (hello->kshm_ips[i] == 0) {
+ CERROR("Zero IP[%d] from ip %pI4h\n",
+ i, &conn->ksnc_ipaddr);
+ return -EPROTO;
+ }
+ }
+
+ return 0;
+}
+
+static void
+ksocknal_pack_msg_v1(ksock_tx_t *tx)
+{
+ /* V1.x has no KSOCK_MSG_NOOP */
+ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
+ LASSERT(tx->tx_lnetmsg != NULL);
+
+ tx->tx_iov[0].iov_base = (void *)&tx->tx_lnetmsg->msg_hdr;
+ tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t);
+
+ tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+}
+
+static void
+ksocknal_pack_msg_v2(ksock_tx_t *tx)
+{
+ tx->tx_iov[0].iov_base = (void *)&tx->tx_msg;
+
+ if (tx->tx_lnetmsg != NULL) {
+ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
+
+ tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
+ tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
+ tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+ } else {
+ LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+
+ tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
+ tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
+ }
+ /* Don't checksum before start sending, because packet can be piggybacked with ACK */
+}
+
+static void
+ksocknal_unpack_msg_v1(ksock_msg_t *msg)
+{
+ msg->ksm_csum = 0;
+ msg->ksm_type = KSOCK_MSG_LNET;
+ msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
+}
+
+static void
+ksocknal_unpack_msg_v2(ksock_msg_t *msg)
+{
+ return; /* Do nothing */
+}
+
+ksock_proto_t ksocknal_protocol_v1x =
+{
+ .pro_version = KSOCK_PROTO_V1,
+ .pro_send_hello = ksocknal_send_hello_v1,
+ .pro_recv_hello = ksocknal_recv_hello_v1,
+ .pro_pack = ksocknal_pack_msg_v1,
+ .pro_unpack = ksocknal_unpack_msg_v1,
+ .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1,
+ .pro_handle_zcreq = NULL,
+ .pro_handle_zcack = NULL,
+ .pro_queue_tx_zcack = NULL,
+ .pro_match_tx = ksocknal_match_tx
+};
+
+ksock_proto_t ksocknal_protocol_v2x =
+{
+ .pro_version = KSOCK_PROTO_V2,
+ .pro_send_hello = ksocknal_send_hello_v2,
+ .pro_recv_hello = ksocknal_recv_hello_v2,
+ .pro_pack = ksocknal_pack_msg_v2,
+ .pro_unpack = ksocknal_unpack_msg_v2,
+ .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
+ .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2,
+ .pro_handle_zcreq = ksocknal_handle_zcreq,
+ .pro_handle_zcack = ksocknal_handle_zcack,
+ .pro_match_tx = ksocknal_match_tx
+};
+
+ksock_proto_t ksocknal_protocol_v3x =
+{
+ .pro_version = KSOCK_PROTO_V3,
+ .pro_send_hello = ksocknal_send_hello_v2,
+ .pro_recv_hello = ksocknal_recv_hello_v2,
+ .pro_pack = ksocknal_pack_msg_v2,
+ .pro_unpack = ksocknal_unpack_msg_v2,
+ .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2,
+ .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3,
+ .pro_handle_zcreq = ksocknal_handle_zcreq,
+ .pro_handle_zcack = ksocknal_handle_zcack,
+ .pro_match_tx = ksocknal_match_tx_v3
+};
diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile
new file mode 100644
index 0000000..b815fe1
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_LNET) += lnet.o
+
+lnet-y := api-ni.o config.o lib-me.o lib-msg.o lib-eq.o \
+ lib-md.o lib-ptl.o lib-move.o module.o lo.o router.o \
+ router_proc.o acceptor.o peer.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
new file mode 100644
index 0000000..bb15bde
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -0,0 +1,527 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/lnet/lib-lnet.h>
+
+
+static int accept_port = 988;
+static int accept_backlog = 127;
+static int accept_timeout = 5;
+
+struct {
+ int pta_shutdown;
+ socket_t *pta_sock;
+ struct completion pta_signal;
+} lnet_acceptor_state;
+
+int
+lnet_acceptor_port(void)
+{
+ return accept_port;
+}
+
+static inline int
+lnet_accept_magic(__u32 magic, __u32 constant)
+{
+ return (magic == constant ||
+ magic == __swab32(constant));
+}
+
+
+EXPORT_SYMBOL(lnet_acceptor_port);
+
+static char *accept = "secure";
+
+CFS_MODULE_PARM(accept, "s", charp, 0444,
+ "Accept connections (secure|all|none)");
+CFS_MODULE_PARM(accept_port, "i", int, 0444,
+ "Acceptor's port (same on all nodes)");
+CFS_MODULE_PARM(accept_backlog, "i", int, 0444,
+ "Acceptor's listen backlog");
+CFS_MODULE_PARM(accept_timeout, "i", int, 0644,
+ "Acceptor's timeout (seconds)");
+
+static char *accept_type = NULL;
+
+int
+lnet_acceptor_get_tunables(void)
+{
+ /* Userland acceptor uses 'accept_type' instead of 'accept', due to
+ * conflict with 'accept(2)', but kernel acceptor still uses 'accept'
+ * for compatibility. Hence the trick. */
+ accept_type = accept;
+ return 0;
+}
+
+int
+lnet_acceptor_timeout(void)
+{
+ return accept_timeout;
+}
+EXPORT_SYMBOL(lnet_acceptor_timeout);
+
+void
+lnet_connect_console_error (int rc, lnet_nid_t peer_nid,
+ __u32 peer_ip, int peer_port)
+{
+ switch (rc) {
+ /* "normal" errors */
+ case -ECONNREFUSED:
+ CNETERR("Connection to %s at host %pI4h on port %d was "
+ "refused: check that Lustre is running on that node.\n",
+ libcfs_nid2str(peer_nid),
+ &peer_ip, peer_port);
+ break;
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ CNETERR("Connection to %s at host %pI4h "
+ "was unreachable: the network or that node may "
+ "be down, or Lustre may be misconfigured.\n",
+ libcfs_nid2str(peer_nid), &peer_ip);
+ break;
+ case -ETIMEDOUT:
+ CNETERR("Connection to %s at host %pI4h on "
+ "port %d took too long: that node may be hung "
+ "or experiencing high load.\n",
+ libcfs_nid2str(peer_nid),
+ &peer_ip, peer_port);
+ break;
+ case -ECONNRESET:
+ LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %pI4h"
+ " on port %d was reset: "
+ "is it running a compatible version of "
+ "Lustre and is %s one of its NIDs?\n",
+ libcfs_nid2str(peer_nid),
+ &peer_ip, peer_port,
+ libcfs_nid2str(peer_nid));
+ break;
+ case -EPROTO:
+ LCONSOLE_ERROR_MSG(0x11c, "Protocol error connecting to %s at "
+ "host %pI4h on port %d: is it running "
+ "a compatible version of Lustre?\n",
+ libcfs_nid2str(peer_nid),
+ &peer_ip, peer_port);
+ break;
+ case -EADDRINUSE:
+ LCONSOLE_ERROR_MSG(0x11d, "No privileged ports available to "
+ "connect to %s at host %pI4h on port "
+ "%d\n", libcfs_nid2str(peer_nid),
+ &peer_ip, peer_port);
+ break;
+ default:
+ LCONSOLE_ERROR_MSG(0x11e, "Unexpected error %d connecting to %s"
+ " at host %pI4h on port %d\n", rc,
+ libcfs_nid2str(peer_nid),
+ &peer_ip, peer_port);
+ break;
+ }
+}
+EXPORT_SYMBOL(lnet_connect_console_error);
+
+int
+lnet_connect(socket_t **sockp, lnet_nid_t peer_nid,
+ __u32 local_ip, __u32 peer_ip, int peer_port)
+{
+ lnet_acceptor_connreq_t cr;
+ socket_t *sock;
+ int rc;
+ int port;
+ int fatal;
+
+ CLASSERT (sizeof(cr) <= 16); /* not too big to be on the stack */
+
+ for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT;
+ port >= LNET_ACCEPTOR_MIN_RESERVED_PORT;
+ --port) {
+ /* Iterate through reserved ports. */
+
+ rc = libcfs_sock_connect(&sock, &fatal,
+ local_ip, port,
+ peer_ip, peer_port);
+ if (rc != 0) {
+ if (fatal)
+ goto failed;
+ continue;
+ }
+
+ CLASSERT (LNET_PROTO_ACCEPTOR_VERSION == 1);
+
+ cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
+ cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
+ cr.acr_nid = peer_nid;
+
+ if (the_lnet.ln_testprotocompat != 0) {
+ /* single-shot proto check */
+ lnet_net_lock(LNET_LOCK_EX);
+ if ((the_lnet.ln_testprotocompat & 4) != 0) {
+ cr.acr_version++;
+ the_lnet.ln_testprotocompat &= ~4;
+ }
+ if ((the_lnet.ln_testprotocompat & 8) != 0) {
+ cr.acr_magic = LNET_PROTO_MAGIC;
+ the_lnet.ln_testprotocompat &= ~8;
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+ }
+
+ rc = libcfs_sock_write(sock, &cr, sizeof(cr),
+ accept_timeout);
+ if (rc != 0)
+ goto failed_sock;
+
+ *sockp = sock;
+ return 0;
+ }
+
+ rc = -EADDRINUSE;
+ goto failed;
+
+ failed_sock:
+ libcfs_sock_release(sock);
+ failed:
+ lnet_connect_console_error(rc, peer_nid, peer_ip, peer_port);
+ return rc;
+}
+EXPORT_SYMBOL(lnet_connect);
+
+
+/* Below is the code common for both kernel and MT user-space */
+
+int
+lnet_accept(socket_t *sock, __u32 magic)
+{
+ lnet_acceptor_connreq_t cr;
+ __u32 peer_ip;
+ int peer_port;
+ int rc;
+ int flip;
+ lnet_ni_t *ni;
+ char *str;
+
+ LASSERT (sizeof(cr) <= 16); /* not too big for the stack */
+
+ rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
+ LASSERT (rc == 0); /* we succeeded before */
+
+ if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) {
+
+ if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) {
+ /* future version compatibility!
+ * When LNET unifies protocols over all LNDs, the first
+ * thing sent will be a version query. I send back
+ * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" */
+
+ memset (&cr, 0, sizeof(cr));
+ cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
+ cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
+ rc = libcfs_sock_write(sock, &cr, sizeof(cr),
+ accept_timeout);
+
+ if (rc != 0)
+ CERROR("Error sending magic+version in response"
+ "to LNET magic from %pI4h: %d\n",
+ &peer_ip, rc);
+ return -EPROTO;
+ }
+
+ if (magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC))
+ str = "'old' socknal/tcpnal";
+ else if (lnet_accept_magic(magic, LNET_PROTO_RA_MAGIC))
+ str = "'old' ranal";
+ else
+ str = "unrecognised";
+
+ LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h"
+ " magic %08x: %s acceptor protocol\n",
+ &peer_ip, magic, str);
+ return -EPROTO;
+ }
+
+ flip = (magic != LNET_PROTO_ACCEPTOR_MAGIC);
+
+ rc = libcfs_sock_read(sock, &cr.acr_version,
+ sizeof(cr.acr_version),
+ accept_timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading connection request version from "
+ "%pI4h\n", rc, &peer_ip);
+ return -EIO;
+ }
+
+ if (flip)
+ __swab32s(&cr.acr_version);
+
+ if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) {
+ /* future version compatibility!
+ * An acceptor-specific protocol rev will first send a version
+ * query. I send back my current version to tell her I'm
+ * "old". */
+ int peer_version = cr.acr_version;
+
+ memset (&cr, 0, sizeof(cr));
+ cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
+ cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
+
+ rc = libcfs_sock_write(sock, &cr, sizeof(cr),
+ accept_timeout);
+
+ if (rc != 0)
+ CERROR("Error sending magic+version in response"
+ "to version %d from %pI4h: %d\n",
+ peer_version, &peer_ip, rc);
+ return -EPROTO;
+ }
+
+ rc = libcfs_sock_read(sock, &cr.acr_nid,
+ sizeof(cr) -
+ offsetof(lnet_acceptor_connreq_t, acr_nid),
+ accept_timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading connection request from "
+ "%pI4h\n", rc, &peer_ip);
+ return -EIO;
+ }
+
+ if (flip)
+ __swab64s(&cr.acr_nid);
+
+ ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid));
+ if (ni == NULL || /* no matching net */
+ ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
+ if (ni != NULL)
+ lnet_ni_decref(ni);
+ LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h"
+ " for %s: No matching NI\n",
+ &peer_ip, libcfs_nid2str(cr.acr_nid));
+ return -EPERM;
+ }
+
+ if (ni->ni_lnd->lnd_accept == NULL) {
+ /* This catches a request for the loopback LND */
+ lnet_ni_decref(ni);
+ LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h"
+ " for %s: NI doesn not accept IP connections\n",
+ &peer_ip, libcfs_nid2str(cr.acr_nid));
+ return -EPERM;
+ }
+
+ CDEBUG(D_NET, "Accept %s from %pI4h\n",
+ libcfs_nid2str(cr.acr_nid), &peer_ip);
+
+ rc = ni->ni_lnd->lnd_accept(ni, sock);
+
+ lnet_ni_decref(ni);
+ return rc;
+}
+
+int
+lnet_acceptor(void *arg)
+{
+ socket_t *newsock;
+ int rc;
+ __u32 magic;
+ __u32 peer_ip;
+ int peer_port;
+ int secure = (int)((long_ptr_t)arg);
+
+ LASSERT (lnet_acceptor_state.pta_sock == NULL);
+
+ cfs_block_allsigs();
+
+ rc = libcfs_sock_listen(&lnet_acceptor_state.pta_sock,
+ 0, accept_port, accept_backlog);
+ if (rc != 0) {
+ if (rc == -EADDRINUSE)
+ LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port"
+ " %d: port already in use\n",
+ accept_port);
+ else
+ LCONSOLE_ERROR_MSG(0x123, "Can't start acceptor on port "
+ "%d: unexpected error %d\n",
+ accept_port, rc);
+
+ lnet_acceptor_state.pta_sock = NULL;
+ } else {
+ LCONSOLE(0, "Accept %s, port %d\n", accept_type, accept_port);
+ }
+
+ /* set init status and unblock parent */
+ lnet_acceptor_state.pta_shutdown = rc;
+ complete(&lnet_acceptor_state.pta_signal);
+
+ if (rc != 0)
+ return rc;
+
+ while (!lnet_acceptor_state.pta_shutdown) {
+
+ rc = libcfs_sock_accept(&newsock, lnet_acceptor_state.pta_sock);
+ if (rc != 0) {
+ if (rc != -EAGAIN) {
+ CWARN("Accept error %d: pausing...\n", rc);
+ cfs_pause(cfs_time_seconds(1));
+ }
+ continue;
+ }
+
+ /* maybe we're waken up with libcfs_sock_abort_accept() */
+ if (lnet_acceptor_state.pta_shutdown) {
+ libcfs_sock_release(newsock);
+ break;
+ }
+
+ rc = libcfs_sock_getaddr(newsock, 1, &peer_ip, &peer_port);
+ if (rc != 0) {
+ CERROR("Can't determine new connection's address\n");
+ goto failed;
+ }
+
+ if (secure && peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
+ CERROR("Refusing connection from %pI4h: "
+ "insecure port %d\n",
+ &peer_ip, peer_port);
+ goto failed;
+ }
+
+ rc = libcfs_sock_read(newsock, &magic, sizeof(magic),
+ accept_timeout);
+ if (rc != 0) {
+ CERROR("Error %d reading connection request from "
+ "%pI4h\n", rc, &peer_ip);
+ goto failed;
+ }
+
+ rc = lnet_accept(newsock, magic);
+ if (rc != 0)
+ goto failed;
+
+ continue;
+
+ failed:
+ libcfs_sock_release(newsock);
+ }
+
+ libcfs_sock_release(lnet_acceptor_state.pta_sock);
+ lnet_acceptor_state.pta_sock = NULL;
+
+ CDEBUG(D_NET, "Acceptor stopping\n");
+
+ /* unblock lnet_acceptor_stop() */
+ complete(&lnet_acceptor_state.pta_signal);
+ return 0;
+}
+
+static inline int
+accept2secure(const char *acc, long *sec)
+{
+ if (!strcmp(acc, "secure")) {
+ *sec = 1;
+ return 1;
+ } else if (!strcmp(acc, "all")) {
+ *sec = 0;
+ return 1;
+ } else if (!strcmp(acc, "none")) {
+ return 0;
+ } else {
+ LCONSOLE_ERROR_MSG(0x124, "Can't parse 'accept=\"%s\"'\n",
+ acc);
+ return -EINVAL;
+ }
+}
+
+int
+lnet_acceptor_start(void)
+{
+ int rc;
+ long rc2;
+ long secure;
+
+ LASSERT (lnet_acceptor_state.pta_sock == NULL);
+
+ rc = lnet_acceptor_get_tunables();
+ if (rc != 0)
+ return rc;
+
+
+ init_completion(&lnet_acceptor_state.pta_signal);
+ rc = accept2secure(accept_type, &secure);
+ if (rc <= 0) {
+ fini_completion(&lnet_acceptor_state.pta_signal);
+ return rc;
+ }
+
+ if (lnet_count_acceptor_nis() == 0) /* not required */
+ return 0;
+
+ rc2 = PTR_ERR(kthread_run(lnet_acceptor,
+ (void *)(ulong_ptr_t)secure,
+ "acceptor_%03ld", secure));
+ if (IS_ERR_VALUE(rc2)) {
+ CERROR("Can't start acceptor thread: %ld\n", rc2);
+ fini_completion(&lnet_acceptor_state.pta_signal);
+
+ return -ESRCH;
+ }
+
+ /* wait for acceptor to startup */
+ wait_for_completion(&lnet_acceptor_state.pta_signal);
+
+ if (!lnet_acceptor_state.pta_shutdown) {
+ /* started OK */
+ LASSERT(lnet_acceptor_state.pta_sock != NULL);
+ return 0;
+ }
+
+ LASSERT(lnet_acceptor_state.pta_sock == NULL);
+ fini_completion(&lnet_acceptor_state.pta_signal);
+
+ return -ENETDOWN;
+}
+
+void
+lnet_acceptor_stop(void)
+{
+ if (lnet_acceptor_state.pta_sock == NULL) /* not running */
+ return;
+
+ lnet_acceptor_state.pta_shutdown = 1;
+ libcfs_sock_abort_accept(lnet_acceptor_state.pta_sock);
+
+ /* block until acceptor signals exit */
+ wait_for_completion(&lnet_acceptor_state.pta_signal);
+
+ fini_completion(&lnet_acceptor_state.pta_signal);
+}
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
new file mode 100644
index 0000000..160a429
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -0,0 +1,1944 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/lnet/lib-lnet.h>
+#include <linux/log2.h>
+
+#define D_LNI D_CONSOLE
+
+lnet_t the_lnet; /* THE state of the network */
+EXPORT_SYMBOL(the_lnet);
+
+
+static char *ip2nets = "";
+CFS_MODULE_PARM(ip2nets, "s", charp, 0444,
+ "LNET network <- IP table");
+
+static char *networks = "";
+CFS_MODULE_PARM(networks, "s", charp, 0444,
+ "local networks");
+
+static char *routes = "";
+CFS_MODULE_PARM(routes, "s", charp, 0444,
+ "routes to non-local networks");
+
+static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
+CFS_MODULE_PARM(rnet_htable_size, "i", int, 0444,
+ "size of remote network hash table");
+
+char *
+lnet_get_routes(void)
+{
+ return routes;
+}
+
+char *
+lnet_get_networks(void)
+{
+ char *nets;
+ int rc;
+
+ if (*networks != 0 && *ip2nets != 0) {
+ LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
+ "'ip2nets' but not both at once\n");
+ return NULL;
+ }
+
+ if (*ip2nets != 0) {
+ rc = lnet_parse_ip2nets(&nets, ip2nets);
+ return (rc == 0) ? nets : NULL;
+ }
+
+ if (*networks != 0)
+ return networks;
+
+ return "tcp";
+}
+
+void
+lnet_init_locks(void)
+{
+ spin_lock_init(&the_lnet.ln_eq_wait_lock);
+ init_waitqueue_head(&the_lnet.ln_eq_waitq);
+ mutex_init(&the_lnet.ln_lnd_mutex);
+ mutex_init(&the_lnet.ln_api_mutex);
+}
+
+void
+lnet_fini_locks(void)
+{
+}
+
+
+static int
+lnet_create_remote_nets_table(void)
+{
+ int i;
+ struct list_head *hash;
+
+ LASSERT(the_lnet.ln_remote_nets_hash == NULL);
+ LASSERT(the_lnet.ln_remote_nets_hbits > 0);
+ LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
+ if (hash == NULL) {
+ CERROR("Failed to create remote nets hash table\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&hash[i]);
+ the_lnet.ln_remote_nets_hash = hash;
+ return 0;
+}
+
+static void
+lnet_destroy_remote_nets_table(void)
+{
+ int i;
+ struct list_head *hash;
+
+ if (the_lnet.ln_remote_nets_hash == NULL)
+ return;
+
+ for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
+ LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
+
+ LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
+ LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
+ the_lnet.ln_remote_nets_hash = NULL;
+}
+
+static void
+lnet_destroy_locks(void)
+{
+ if (the_lnet.ln_res_lock != NULL) {
+ cfs_percpt_lock_free(the_lnet.ln_res_lock);
+ the_lnet.ln_res_lock = NULL;
+ }
+
+ if (the_lnet.ln_net_lock != NULL) {
+ cfs_percpt_lock_free(the_lnet.ln_net_lock);
+ the_lnet.ln_net_lock = NULL;
+ }
+
+ lnet_fini_locks();
+}
+
+static int
+lnet_create_locks(void)
+{
+ lnet_init_locks();
+
+ the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
+ if (the_lnet.ln_res_lock == NULL)
+ goto failed;
+
+ the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
+ if (the_lnet.ln_net_lock == NULL)
+ goto failed;
+
+ return 0;
+
+ failed:
+ lnet_destroy_locks();
+ return -ENOMEM;
+}
+
+void lnet_assert_wire_constants (void)
+{
+ /* Wire protocol assertions generated by 'wirecheck'
+ * running on Linux robert.bartonsoftware.com 2.6.8-1.521
+ * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
+ * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
+
+ /* Constants... */
+ CLASSERT (LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
+ CLASSERT (LNET_PROTO_TCP_VERSION_MAJOR == 1);
+ CLASSERT (LNET_PROTO_TCP_VERSION_MINOR == 0);
+ CLASSERT (LNET_MSG_ACK == 0);
+ CLASSERT (LNET_MSG_PUT == 1);
+ CLASSERT (LNET_MSG_GET == 2);
+ CLASSERT (LNET_MSG_REPLY == 3);
+ CLASSERT (LNET_MSG_HELLO == 4);
+
+ /* Checks for struct ptl_handle_wire_t */
+ CLASSERT ((int)sizeof(lnet_handle_wire_t) == 16);
+ CLASSERT ((int)offsetof(lnet_handle_wire_t, wh_interface_cookie) == 0);
+ CLASSERT ((int)sizeof(((lnet_handle_wire_t *)0)->wh_interface_cookie) == 8);
+ CLASSERT ((int)offsetof(lnet_handle_wire_t, wh_object_cookie) == 8);
+ CLASSERT ((int)sizeof(((lnet_handle_wire_t *)0)->wh_object_cookie) == 8);
+
+ /* Checks for struct lnet_magicversion_t */
+ CLASSERT ((int)sizeof(lnet_magicversion_t) == 8);
+ CLASSERT ((int)offsetof(lnet_magicversion_t, magic) == 0);
+ CLASSERT ((int)sizeof(((lnet_magicversion_t *)0)->magic) == 4);
+ CLASSERT ((int)offsetof(lnet_magicversion_t, version_major) == 4);
+ CLASSERT ((int)sizeof(((lnet_magicversion_t *)0)->version_major) == 2);
+ CLASSERT ((int)offsetof(lnet_magicversion_t, version_minor) == 6);
+ CLASSERT ((int)sizeof(((lnet_magicversion_t *)0)->version_minor) == 2);
+
+ /* Checks for struct lnet_hdr_t */
+ CLASSERT ((int)sizeof(lnet_hdr_t) == 72);
+ CLASSERT ((int)offsetof(lnet_hdr_t, dest_nid) == 0);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->dest_nid) == 8);
+ CLASSERT ((int)offsetof(lnet_hdr_t, src_nid) == 8);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->src_nid) == 8);
+ CLASSERT ((int)offsetof(lnet_hdr_t, dest_pid) == 16);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->dest_pid) == 4);
+ CLASSERT ((int)offsetof(lnet_hdr_t, src_pid) == 20);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->src_pid) == 4);
+ CLASSERT ((int)offsetof(lnet_hdr_t, type) == 24);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->type) == 4);
+ CLASSERT ((int)offsetof(lnet_hdr_t, payload_length) == 28);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->payload_length) == 4);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg) == 32);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg) == 40);
+
+ /* Ack */
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.ack.dst_wmd) == 32);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.ack.dst_wmd) == 16);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.ack.match_bits) == 48);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.ack.match_bits) == 8);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.ack.mlength) == 56);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.ack.mlength) == 4);
+
+ /* Put */
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.ack_wmd) == 32);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.ack_wmd) == 16);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.match_bits) == 48);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.match_bits) == 8);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.hdr_data) == 56);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.hdr_data) == 8);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.ptl_index) == 64);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.ptl_index) == 4);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.put.offset) == 68);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.put.offset) == 4);
+
+ /* Get */
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.return_wmd) == 32);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.return_wmd) == 16);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.match_bits) == 48);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.match_bits) == 8);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.ptl_index) == 56);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.ptl_index) == 4);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.src_offset) == 60);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.src_offset) == 4);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.get.sink_length) == 64);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.get.sink_length) == 4);
+
+ /* Reply */
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.reply.dst_wmd) == 32);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.reply.dst_wmd) == 16);
+
+ /* Hello */
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.hello.incarnation) == 32);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.hello.incarnation) == 8);
+ CLASSERT ((int)offsetof(lnet_hdr_t, msg.hello.type) == 40);
+ CLASSERT ((int)sizeof(((lnet_hdr_t *)0)->msg.hello.type) == 4);
+}
+
+lnd_t *
+lnet_find_lnd_by_type (int type)
+{
+ lnd_t *lnd;
+ struct list_head *tmp;
+
+ /* holding lnd mutex */
+ list_for_each (tmp, &the_lnet.ln_lnds) {
+ lnd = list_entry(tmp, lnd_t, lnd_list);
+
+ if ((int)lnd->lnd_type == type)
+ return lnd;
+ }
+
+ return NULL;
+}
+
+void
+lnet_register_lnd (lnd_t *lnd)
+{
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (libcfs_isknown_lnd(lnd->lnd_type));
+ LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
+
+ list_add_tail (&lnd->lnd_list, &the_lnet.ln_lnds);
+ lnd->lnd_refcount = 0;
+
+ CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
+
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+}
+EXPORT_SYMBOL(lnet_register_lnd);
+
+void
+lnet_unregister_lnd (lnd_t *lnd)
+{
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
+ LASSERT (lnd->lnd_refcount == 0);
+
+ list_del (&lnd->lnd_list);
+ CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
+
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+}
+EXPORT_SYMBOL(lnet_unregister_lnd);
+
+void
+lnet_counters_get(lnet_counters_t *counters)
+{
+ lnet_counters_t *ctr;
+ int i;
+
+ memset(counters, 0, sizeof(*counters));
+
+ lnet_net_lock(LNET_LOCK_EX);
+
+ cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
+ counters->msgs_max += ctr->msgs_max;
+ counters->msgs_alloc += ctr->msgs_alloc;
+ counters->errors += ctr->errors;
+ counters->send_count += ctr->send_count;
+ counters->recv_count += ctr->recv_count;
+ counters->route_count += ctr->route_count;
+ counters->drop_length += ctr->drop_length;
+ counters->send_length += ctr->send_length;
+ counters->recv_length += ctr->recv_length;
+ counters->route_length += ctr->route_length;
+ counters->drop_length += ctr->drop_length;
+
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+EXPORT_SYMBOL(lnet_counters_get);
+
+void
+lnet_counters_reset(void)
+{
+ lnet_counters_t *counters;
+ int i;
+
+ lnet_net_lock(LNET_LOCK_EX);
+
+ cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
+ memset(counters, 0, sizeof(lnet_counters_t));
+
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+EXPORT_SYMBOL(lnet_counters_reset);
+
+#ifdef LNET_USE_LIB_FREELIST
+
+int
+lnet_freelist_init (lnet_freelist_t *fl, int n, int size)
+{
+ char *space;
+
+ LASSERT (n > 0);
+
+ size += offsetof (lnet_freeobj_t, fo_contents);
+
+ LIBCFS_ALLOC(space, n * size);
+ if (space == NULL)
+ return (-ENOMEM);
+
+ INIT_LIST_HEAD (&fl->fl_list);
+ fl->fl_objs = space;
+ fl->fl_nobjs = n;
+ fl->fl_objsize = size;
+
+ do
+ {
+ memset (space, 0, size);
+ list_add ((struct list_head *)space, &fl->fl_list);
+ space += size;
+ } while (--n != 0);
+
+ return (0);
+}
+
+void
+lnet_freelist_fini (lnet_freelist_t *fl)
+{
+ struct list_head *el;
+ int count;
+
+ if (fl->fl_nobjs == 0)
+ return;
+
+ count = 0;
+ for (el = fl->fl_list.next; el != &fl->fl_list; el = el->next)
+ count++;
+
+ LASSERT (count == fl->fl_nobjs);
+
+ LIBCFS_FREE(fl->fl_objs, fl->fl_nobjs * fl->fl_objsize);
+ memset (fl, 0, sizeof (*fl));
+}
+
+#endif /* LNET_USE_LIB_FREELIST */
+
+__u64
+lnet_create_interface_cookie (void)
+{
+ /* NB the interface cookie in wire handles guards against delayed
+ * replies and ACKs appearing valid after reboot. Initialisation time,
+ * even if it's only implemented to millisecond resolution is probably
+ * easily good enough. */
+ struct timeval tv;
+ __u64 cookie;
+ do_gettimeofday(&tv);
+ cookie = tv.tv_sec;
+ cookie *= 1000000;
+ cookie += tv.tv_usec;
+ return cookie;
+}
+
+static char *
+lnet_res_type2str(int type)
+{
+ switch (type) {
+ default:
+ LBUG();
+ case LNET_COOKIE_TYPE_MD:
+ return "MD";
+ case LNET_COOKIE_TYPE_ME:
+ return "ME";
+ case LNET_COOKIE_TYPE_EQ:
+ return "EQ";
+ }
+}
+
+void
+lnet_res_container_cleanup(struct lnet_res_container *rec)
+{
+ int count = 0;
+
+ if (rec->rec_type == 0) /* not set yet, it's uninitialized */
+ return;
+
+ while (!list_empty(&rec->rec_active)) {
+ struct list_head *e = rec->rec_active.next;
+
+ list_del_init(e);
+ if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
+ lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
+
+ } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
+ lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
+
+ } else { /* NB: Active MEs should be attached on portals */
+ LBUG();
+ }
+ count++;
+ }
+
+ if (count > 0) {
+ /* Found alive MD/ME/EQ, user really should unlink/free
+ * all of them before finalize LNet, but if someone didn't,
+ * we have to recycle garbage for him */
+ CERROR("%d active elements on exit of %s container\n",
+ count, lnet_res_type2str(rec->rec_type));
+ }
+
+#ifdef LNET_USE_LIB_FREELIST
+ lnet_freelist_fini(&rec->rec_freelist);
+#endif
+ if (rec->rec_lh_hash != NULL) {
+ LIBCFS_FREE(rec->rec_lh_hash,
+ LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
+ rec->rec_lh_hash = NULL;
+ }
+
+ rec->rec_type = 0; /* mark it as finalized */
+}
+
+int
+lnet_res_container_setup(struct lnet_res_container *rec,
+ int cpt, int type, int objnum, int objsz)
+{
+ int rc = 0;
+ int i;
+
+ LASSERT(rec->rec_type == 0);
+
+ rec->rec_type = type;
+ INIT_LIST_HEAD(&rec->rec_active);
+
+#ifdef LNET_USE_LIB_FREELIST
+ memset(&rec->rec_freelist, 0, sizeof(rec->rec_freelist));
+ rc = lnet_freelist_init(&rec->rec_freelist, objnum, objsz);
+ if (rc != 0)
+ goto out;
+#endif
+ rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
+
+ /* Arbitrary choice of hash table size */
+ LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
+ LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
+ if (rec->rec_lh_hash == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < LNET_LH_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
+
+ return 0;
+
+out:
+ CERROR("Failed to setup %s resource container\n",
+ lnet_res_type2str(type));
+ lnet_res_container_cleanup(rec);
+ return rc;
+}
+
+static void
+lnet_res_containers_destroy(struct lnet_res_container **recs)
+{
+ struct lnet_res_container *rec;
+ int i;
+
+ cfs_percpt_for_each(rec, i, recs)
+ lnet_res_container_cleanup(rec);
+
+ cfs_percpt_free(recs);
+}
+
+static struct lnet_res_container **
+lnet_res_containers_create(int type, int objnum, int objsz)
+{
+ struct lnet_res_container **recs;
+ struct lnet_res_container *rec;
+ int rc;
+ int i;
+
+ recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
+ if (recs == NULL) {
+ CERROR("Failed to allocate %s resource containers\n",
+ lnet_res_type2str(type));
+ return NULL;
+ }
+
+ cfs_percpt_for_each(rec, i, recs) {
+ rc = lnet_res_container_setup(rec, i, type, objnum, objsz);
+ if (rc != 0) {
+ lnet_res_containers_destroy(recs);
+ return NULL;
+ }
+ }
+
+ return recs;
+}
+
+lnet_libhandle_t *
+lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
+{
+ /* ALWAYS called with lnet_res_lock held */
+ struct list_head *head;
+ lnet_libhandle_t *lh;
+ unsigned int hash;
+
+ if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
+ return NULL;
+
+ hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
+ head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
+
+ list_for_each_entry(lh, head, lh_hash_chain) {
+ if (lh->lh_cookie == cookie)
+ return lh;
+ }
+
+ return NULL;
+}
+
+void
+lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
+{
+ /* ALWAYS called with lnet_res_lock held */
+ unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
+ unsigned int hash;
+
+ lh->lh_cookie = rec->rec_lh_cookie;
+ rec->rec_lh_cookie += 1 << ibits;
+
+ hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
+
+ list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
+}
+
+
+int lnet_unprepare(void);
+
+int
+lnet_prepare(lnet_pid_t requested_pid)
+{
+ /* Prepare to bring up the network */
+ struct lnet_res_container **recs;
+ int rc = 0;
+
+ LASSERT (the_lnet.ln_refcount == 0);
+
+ the_lnet.ln_routing = 0;
+
+ LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
+ the_lnet.ln_pid = requested_pid;
+
+ INIT_LIST_HEAD(&the_lnet.ln_test_peers);
+ INIT_LIST_HEAD(&the_lnet.ln_nis);
+ INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
+ INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
+ INIT_LIST_HEAD(&the_lnet.ln_routers);
+
+ rc = lnet_create_remote_nets_table();
+ if (rc != 0)
+ goto failed;
+
+ the_lnet.ln_interface_cookie = lnet_create_interface_cookie();
+
+ the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(lnet_counters_t));
+ if (the_lnet.ln_counters == NULL) {
+ CERROR("Failed to allocate counters for LNet\n");
+ rc = -ENOMEM;
+ goto failed;
+ }
+
+ rc = lnet_peer_tables_create();
+ if (rc != 0)
+ goto failed;
+
+ rc = lnet_msg_containers_create();
+ if (rc != 0)
+ goto failed;
+
+ rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
+ LNET_COOKIE_TYPE_EQ, LNET_FL_MAX_EQS,
+ sizeof(lnet_eq_t));
+ if (rc != 0)
+ goto failed;
+
+ recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME, LNET_FL_MAX_MES,
+ sizeof(lnet_me_t));
+ if (recs == NULL)
+ goto failed;
+
+ the_lnet.ln_me_containers = recs;
+
+ recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD, LNET_FL_MAX_MDS,
+ sizeof(lnet_libmd_t));
+ if (recs == NULL)
+ goto failed;
+
+ the_lnet.ln_md_containers = recs;
+
+ rc = lnet_portals_create();
+ if (rc != 0) {
+ CERROR("Failed to create portals for LNet: %d\n", rc);
+ goto failed;
+ }
+
+ return 0;
+
+ failed:
+ lnet_unprepare();
+ return rc;
+}
+
+int
+lnet_unprepare (void)
+{
+ /* NB no LNET_LOCK since this is the last reference. All LND instances
+ * have shut down already, so it is safe to unlink and free all
+ * descriptors, even those that appear committed to a network op (eg MD
+ * with non-zero pending count) */
+
+ lnet_fail_nid(LNET_NID_ANY, 0);
+
+ LASSERT(the_lnet.ln_refcount == 0);
+ LASSERT(list_empty(&the_lnet.ln_test_peers));
+ LASSERT(list_empty(&the_lnet.ln_nis));
+ LASSERT(list_empty(&the_lnet.ln_nis_cpt));
+ LASSERT(list_empty(&the_lnet.ln_nis_zombie));
+
+ lnet_portals_destroy();
+
+ if (the_lnet.ln_md_containers != NULL) {
+ lnet_res_containers_destroy(the_lnet.ln_md_containers);
+ the_lnet.ln_md_containers = NULL;
+ }
+
+ if (the_lnet.ln_me_containers != NULL) {
+ lnet_res_containers_destroy(the_lnet.ln_me_containers);
+ the_lnet.ln_me_containers = NULL;
+ }
+
+ lnet_res_container_cleanup(&the_lnet.ln_eq_container);
+
+ lnet_msg_containers_destroy();
+ lnet_peer_tables_destroy();
+ lnet_rtrpools_free();
+
+ if (the_lnet.ln_counters != NULL) {
+ cfs_percpt_free(the_lnet.ln_counters);
+ the_lnet.ln_counters = NULL;
+ }
+ lnet_destroy_remote_nets_table();
+
+ return 0;
+}
+
+lnet_ni_t *
+lnet_net2ni_locked(__u32 net, int cpt)
+{
+ struct list_head *tmp;
+ lnet_ni_t *ni;
+
+ LASSERT(cpt != LNET_LOCK_EX);
+
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
+
+ if (LNET_NIDNET(ni->ni_nid) == net) {
+ lnet_ni_addref_locked(ni, cpt);
+ return ni;
+ }
+ }
+
+ return NULL;
+}
+
+lnet_ni_t *
+lnet_net2ni(__u32 net)
+{
+ lnet_ni_t *ni;
+
+ lnet_net_lock(0);
+ ni = lnet_net2ni_locked(net, 0);
+ lnet_net_unlock(0);
+
+ return ni;
+}
+EXPORT_SYMBOL(lnet_net2ni);
+
+static unsigned int
+lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
+{
+ __u64 key = nid;
+ unsigned int val;
+
+ LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
+
+ if (number == 1)
+ return 0;
+
+ val = cfs_hash_long(key, LNET_CPT_BITS);
+ /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
+ if (val < number)
+ return val;
+
+ return (unsigned int)(key + val + (val >> 1)) % number;
+}
+
+int
+lnet_cpt_of_nid_locked(lnet_nid_t nid)
+{
+ struct lnet_ni *ni;
+
+ /* must called with hold of lnet_net_lock */
+ if (LNET_CPT_NUMBER == 1)
+ return 0; /* the only one */
+
+ /* take lnet_net_lock(any) would be OK */
+ if (!list_empty(&the_lnet.ln_nis_cpt)) {
+ list_for_each_entry(ni, &the_lnet.ln_nis_cpt, ni_cptlist) {
+ if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
+ continue;
+
+ LASSERT(ni->ni_cpts != NULL);
+ return ni->ni_cpts[lnet_nid_cpt_hash
+ (nid, ni->ni_ncpts)];
+ }
+ }
+
+ return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
+}
+
+int
+lnet_cpt_of_nid(lnet_nid_t nid)
+{
+ int cpt;
+ int cpt2;
+
+ if (LNET_CPT_NUMBER == 1)
+ return 0; /* the only one */
+
+ if (list_empty(&the_lnet.ln_nis_cpt))
+ return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
+
+ cpt = lnet_net_lock_current();
+ cpt2 = lnet_cpt_of_nid_locked(nid);
+ lnet_net_unlock(cpt);
+
+ return cpt2;
+}
+EXPORT_SYMBOL(lnet_cpt_of_nid);
+
+int
+lnet_islocalnet(__u32 net)
+{
+ struct lnet_ni *ni;
+ int cpt;
+
+ cpt = lnet_net_lock_current();
+
+ ni = lnet_net2ni_locked(net, cpt);
+ if (ni != NULL)
+ lnet_ni_decref_locked(ni, cpt);
+
+ lnet_net_unlock(cpt);
+
+ return ni != NULL;
+}
+
+lnet_ni_t *
+lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
+{
+ struct lnet_ni *ni;
+ struct list_head *tmp;
+
+ LASSERT(cpt != LNET_LOCK_EX);
+
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
+
+ if (ni->ni_nid == nid) {
+ lnet_ni_addref_locked(ni, cpt);
+ return ni;
+ }
+ }
+
+ return NULL;
+}
+
+int
+lnet_islocalnid(lnet_nid_t nid)
+{
+ struct lnet_ni *ni;
+ int cpt;
+
+ cpt = lnet_net_lock_current();
+ ni = lnet_nid2ni_locked(nid, cpt);
+ if (ni != NULL)
+ lnet_ni_decref_locked(ni, cpt);
+ lnet_net_unlock(cpt);
+
+ return ni != NULL;
+}
+
+int
+lnet_count_acceptor_nis (void)
+{
+ /* Return the # of NIs that need the acceptor. */
+ int count = 0;
+ struct list_head *tmp;
+ struct lnet_ni *ni;
+ int cpt;
+
+ cpt = lnet_net_lock_current();
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
+
+ if (ni->ni_lnd->lnd_accept != NULL)
+ count++;
+ }
+
+ lnet_net_unlock(cpt);
+
+ return count;
+}
+
+static int
+lnet_ni_tq_credits(lnet_ni_t *ni)
+{
+ int credits;
+
+ LASSERT(ni->ni_ncpts >= 1);
+
+ if (ni->ni_ncpts == 1)
+ return ni->ni_maxtxcredits;
+
+ credits = ni->ni_maxtxcredits / ni->ni_ncpts;
+ credits = max(credits, 8 * ni->ni_peertxcredits);
+ credits = min(credits, ni->ni_maxtxcredits);
+
+ return credits;
+}
+
+void
+lnet_shutdown_lndnis (void)
+{
+ int i;
+ int islo;
+ lnet_ni_t *ni;
+
+ /* NB called holding the global mutex */
+
+ /* All quiet on the API front */
+ LASSERT(!the_lnet.ln_shutdown);
+ LASSERT(the_lnet.ln_refcount == 0);
+ LASSERT(list_empty(&the_lnet.ln_nis_zombie));
+
+ lnet_net_lock(LNET_LOCK_EX);
+ the_lnet.ln_shutdown = 1; /* flag shutdown */
+
+ /* Unlink NIs from the global table */
+ while (!list_empty(&the_lnet.ln_nis)) {
+ ni = list_entry(the_lnet.ln_nis.next,
+ lnet_ni_t, ni_list);
+ /* move it to zombie list and nobody can find it anymore */
+ list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
+ lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
+
+ if (!list_empty(&ni->ni_cptlist)) {
+ list_del_init(&ni->ni_cptlist);
+ lnet_ni_decref_locked(ni, 0);
+ }
+ }
+
+ /* Drop the cached eqwait NI. */
+ if (the_lnet.ln_eq_waitni != NULL) {
+ lnet_ni_decref_locked(the_lnet.ln_eq_waitni, 0);
+ the_lnet.ln_eq_waitni = NULL;
+ }
+
+ /* Drop the cached loopback NI. */
+ if (the_lnet.ln_loni != NULL) {
+ lnet_ni_decref_locked(the_lnet.ln_loni, 0);
+ the_lnet.ln_loni = NULL;
+ }
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /* Clear lazy portals and drop delayed messages which hold refs
+ * on their lnet_msg_t::msg_rxpeer */
+ for (i = 0; i < the_lnet.ln_nportals; i++)
+ LNetClearLazyPortal(i);
+
+ /* Clear the peer table and wait for all peers to go (they hold refs on
+ * their NIs) */
+ lnet_peer_tables_cleanup();
+
+ lnet_net_lock(LNET_LOCK_EX);
+ /* Now wait for the NI's I just nuked to show up on ln_zombie_nis
+ * and shut them down in guaranteed thread context */
+ i = 2;
+ while (!list_empty(&the_lnet.ln_nis_zombie)) {
+ int *ref;
+ int j;
+
+ ni = list_entry(the_lnet.ln_nis_zombie.next,
+ lnet_ni_t, ni_list);
+ list_del_init(&ni->ni_list);
+ cfs_percpt_for_each(ref, j, ni->ni_refs) {
+ if (*ref == 0)
+ continue;
+ /* still busy, add it back to zombie list */
+ list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
+ break;
+ }
+
+ while (!list_empty(&ni->ni_list)) {
+ lnet_net_unlock(LNET_LOCK_EX);
+ ++i;
+ if ((i & (-i)) == i) {
+ CDEBUG(D_WARNING,
+ "Waiting for zombie LNI %s\n",
+ libcfs_nid2str(ni->ni_nid));
+ }
+ cfs_pause(cfs_time_seconds(1));
+ lnet_net_lock(LNET_LOCK_EX);
+ continue;
+ }
+
+ ni->ni_lnd->lnd_refcount--;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ islo = ni->ni_lnd->lnd_type == LOLND;
+
+ LASSERT (!in_interrupt ());
+ (ni->ni_lnd->lnd_shutdown)(ni);
+
+ /* can't deref lnd anymore now; it might have unregistered
+ * itself... */
+
+ if (!islo)
+ CDEBUG(D_LNI, "Removed LNI %s\n",
+ libcfs_nid2str(ni->ni_nid));
+
+ lnet_ni_free(ni);
+ lnet_net_lock(LNET_LOCK_EX);
+ }
+
+ the_lnet.ln_shutdown = 0;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ if (the_lnet.ln_network_tokens != NULL) {
+ LIBCFS_FREE(the_lnet.ln_network_tokens,
+ the_lnet.ln_network_tokens_nob);
+ the_lnet.ln_network_tokens = NULL;
+ }
+}
+
+int
+lnet_startup_lndnis (void)
+{
+ lnd_t *lnd;
+ struct lnet_ni *ni;
+ struct lnet_tx_queue *tq;
+ struct list_head nilist;
+ int i;
+ int rc = 0;
+ int lnd_type;
+ int nicount = 0;
+ char *nets = lnet_get_networks();
+
+ INIT_LIST_HEAD(&nilist);
+
+ if (nets == NULL)
+ goto failed;
+
+ rc = lnet_parse_networks(&nilist, nets);
+ if (rc != 0)
+ goto failed;
+
+ while (!list_empty(&nilist)) {
+ ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+ lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+
+ LASSERT (libcfs_isknown_lnd(lnd_type));
+
+ if (lnd_type == CIBLND ||
+ lnd_type == OPENIBLND ||
+ lnd_type == IIBLND ||
+ lnd_type == VIBLND) {
+ CERROR("LND %s obsoleted\n",
+ libcfs_lnd2str(lnd_type));
+ goto failed;
+ }
+
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+ lnd = lnet_find_lnd_by_type(lnd_type);
+
+ if (lnd == NULL) {
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ rc = request_module("%s",
+ libcfs_lnd2modname(lnd_type));
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
+
+ lnd = lnet_find_lnd_by_type(lnd_type);
+ if (lnd == NULL) {
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+ CERROR("Can't load LND %s, module %s, rc=%d\n",
+ libcfs_lnd2str(lnd_type),
+ libcfs_lnd2modname(lnd_type), rc);
+ goto failed;
+ }
+ }
+
+ lnet_net_lock(LNET_LOCK_EX);
+ lnd->lnd_refcount++;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ ni->ni_lnd = lnd;
+
+ rc = (lnd->lnd_startup)(ni);
+
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
+
+ if (rc != 0) {
+ LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s"
+ "\n",
+ rc, libcfs_lnd2str(lnd->lnd_type));
+ lnet_net_lock(LNET_LOCK_EX);
+ lnd->lnd_refcount--;
+ lnet_net_unlock(LNET_LOCK_EX);
+ goto failed;
+ }
+
+ LASSERT (ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
+
+ list_del(&ni->ni_list);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ /* refcount for ln_nis */
+ lnet_ni_addref_locked(ni, 0);
+ list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
+ if (ni->ni_cpts != NULL) {
+ list_add_tail(&ni->ni_cptlist,
+ &the_lnet.ln_nis_cpt);
+ lnet_ni_addref_locked(ni, 0);
+ }
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ if (lnd->lnd_type == LOLND) {
+ lnet_ni_addref(ni);
+ LASSERT (the_lnet.ln_loni == NULL);
+ the_lnet.ln_loni = ni;
+ continue;
+ }
+
+ if (ni->ni_peertxcredits == 0 ||
+ ni->ni_maxtxcredits == 0) {
+ LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
+ libcfs_lnd2str(lnd->lnd_type),
+ ni->ni_peertxcredits == 0 ?
+ "" : "per-peer ");
+ goto failed;
+ }
+
+ cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
+ tq->tq_credits_min =
+ tq->tq_credits_max =
+ tq->tq_credits = lnet_ni_tq_credits(ni);
+ }
+
+ CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
+ libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
+ lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
+ ni->ni_peerrtrcredits, ni->ni_peertimeout);
+
+ nicount++;
+ }
+
+ if (the_lnet.ln_eq_waitni != NULL && nicount > 1) {
+ lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type;
+ LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network"
+ "\n",
+ libcfs_lnd2str(lnd_type));
+ goto failed;
+ }
+
+ return 0;
+
+ failed:
+ lnet_shutdown_lndnis();
+
+ while (!list_empty(&nilist)) {
+ ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+ list_del(&ni->ni_list);
+ lnet_ni_free(ni);
+ }
+
+ return -ENETDOWN;
+}
+
+/**
+ * Initialize LNet library.
+ *
+ * Only userspace program needs to call this function - it's automatically
+ * called in the kernel at module loading time. Caller has to call LNetFini()
+ * after a call to LNetInit(), if and only if the latter returned 0. It must
+ * be called exactly once.
+ *
+ * \return 0 on success, and -ve on failures.
+ */
+int
+LNetInit(void)
+{
+ int rc;
+
+ lnet_assert_wire_constants();
+ LASSERT(!the_lnet.ln_init);
+
+ memset(&the_lnet, 0, sizeof(the_lnet));
+
+ /* refer to global cfs_cpt_table for now */
+ the_lnet.ln_cpt_table = cfs_cpt_table;
+ the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
+
+ LASSERT(the_lnet.ln_cpt_number > 0);
+ if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
+ /* we are under risk of consuming all lh_cookie */
+ CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
+ "please change setting of CPT-table and retry\n",
+ the_lnet.ln_cpt_number, LNET_CPT_MAX);
+ return -1;
+ }
+
+ while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
+ the_lnet.ln_cpt_bits++;
+
+ rc = lnet_create_locks();
+ if (rc != 0) {
+ CERROR("Can't create LNet global locks: %d\n", rc);
+ return -1;
+ }
+
+ the_lnet.ln_refcount = 0;
+ the_lnet.ln_init = 1;
+ LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
+ INIT_LIST_HEAD(&the_lnet.ln_lnds);
+ INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
+ INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
+
+ /* The hash table size is the number of bits it takes to express the set
+ * ln_num_routes, minus 1 (better to under estimate than over so we
+ * don't waste memory). */
+ if (rnet_htable_size <= 0)
+ rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
+ else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
+ rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
+ the_lnet.ln_remote_nets_hbits = max_t(int, 1,
+ order_base_2(rnet_htable_size) - 1);
+
+ /* All LNDs apart from the LOLND are in separate modules. They
+ * register themselves when their module loads, and unregister
+ * themselves when their module is unloaded. */
+ lnet_register_lnd(&the_lolnd);
+ return 0;
+}
+EXPORT_SYMBOL(LNetInit);
+
+/**
+ * Finalize LNet library.
+ *
+ * Only userspace program needs to call this function. It can be called
+ * at most once.
+ *
+ * \pre LNetInit() called with success.
+ * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
+ */
+void
+LNetFini(void)
+{
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount == 0);
+
+ while (!list_empty(&the_lnet.ln_lnds))
+ lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
+ lnd_t, lnd_list));
+ lnet_destroy_locks();
+
+ the_lnet.ln_init = 0;
+}
+EXPORT_SYMBOL(LNetFini);
+
+/**
+ * Set LNet PID and start LNet interfaces, routing, and forwarding.
+ *
+ * Userspace program should call this after a successful call to LNetInit().
+ * Users must call this function at least once before any other functions.
+ * For each successful call there must be a corresponding call to
+ * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
+ * ignored.
+ *
+ * The PID used by LNet may be different from the one requested.
+ * See LNetGetId().
+ *
+ * \param requested_pid PID requested by the caller.
+ *
+ * \return >= 0 on success, and < 0 error code on failures.
+ */
+int
+LNetNIInit(lnet_pid_t requested_pid)
+{
+ int im_a_router = 0;
+ int rc;
+
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+
+ LASSERT (the_lnet.ln_init);
+ CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
+
+ if (the_lnet.ln_refcount > 0) {
+ rc = the_lnet.ln_refcount++;
+ goto out;
+ }
+
+ lnet_get_tunables();
+
+ if (requested_pid == LNET_PID_ANY) {
+ /* Don't instantiate LNET just for me */
+ rc = -ENETDOWN;
+ goto failed0;
+ }
+
+ rc = lnet_prepare(requested_pid);
+ if (rc != 0)
+ goto failed0;
+
+ rc = lnet_startup_lndnis();
+ if (rc != 0)
+ goto failed1;
+
+ rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
+ if (rc != 0)
+ goto failed2;
+
+ rc = lnet_check_routes();
+ if (rc != 0)
+ goto failed2;
+
+ rc = lnet_rtrpools_alloc(im_a_router);
+ if (rc != 0)
+ goto failed2;
+
+ rc = lnet_acceptor_start();
+ if (rc != 0)
+ goto failed2;
+
+ the_lnet.ln_refcount = 1;
+ /* Now I may use my own API functions... */
+
+ /* NB router checker needs the_lnet.ln_ping_info in
+ * lnet_router_checker -> lnet_update_ni_status_locked */
+ rc = lnet_ping_target_init();
+ if (rc != 0)
+ goto failed3;
+
+ rc = lnet_router_checker_start();
+ if (rc != 0)
+ goto failed4;
+
+ lnet_proc_init();
+ goto out;
+
+ failed4:
+ lnet_ping_target_fini();
+ failed3:
+ the_lnet.ln_refcount = 0;
+ lnet_acceptor_stop();
+ failed2:
+ lnet_destroy_routes();
+ lnet_shutdown_lndnis();
+ failed1:
+ lnet_unprepare();
+ failed0:
+ LASSERT (rc < 0);
+ out:
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(LNetNIInit);
+
+/**
+ * Stop LNet interfaces, routing, and forwarding.
+ *
+ * Users must call this function once for each successful call to LNetNIInit().
+ * Once the LNetNIFini() operation has been started, the results of pending
+ * API operations are undefined.
+ *
+ * \return always 0 for current implementation.
+ */
+int
+LNetNIFini(void)
+{
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ if (the_lnet.ln_refcount != 1) {
+ the_lnet.ln_refcount--;
+ } else {
+ LASSERT (!the_lnet.ln_niinit_self);
+
+ lnet_proc_fini();
+ lnet_router_checker_stop();
+ lnet_ping_target_fini();
+
+ /* Teardown fns that use my own API functions BEFORE here */
+ the_lnet.ln_refcount = 0;
+
+ lnet_acceptor_stop();
+ lnet_destroy_routes();
+ lnet_shutdown_lndnis();
+ lnet_unprepare();
+ }
+
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(LNetNIFini);
+
+/**
+ * This is an ugly hack to export IOC_LIBCFS_DEBUG_PEER and
+ * IOC_LIBCFS_PORTALS_COMPATIBILITY commands to users, by tweaking the LNet
+ * internal ioctl handler.
+ *
+ * IOC_LIBCFS_PORTALS_COMPATIBILITY is now deprecated, don't use it.
+ *
+ * \param cmd IOC_LIBCFS_DEBUG_PEER to print debugging data about a peer.
+ * The data will be printed to system console. Don't use it excessively.
+ * \param arg A pointer to lnet_process_id_t, process ID of the peer.
+ *
+ * \return Always return 0 when called by users directly (i.e., not via ioctl).
+ */
+int
+LNetCtl(unsigned int cmd, void *arg)
+{
+ struct libcfs_ioctl_data *data = arg;
+ lnet_process_id_t id = {0};
+ lnet_ni_t *ni;
+ int rc;
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ switch (cmd) {
+ case IOC_LIBCFS_GET_NI:
+ rc = LNetGetId(data->ioc_count, &id);
+ data->ioc_nid = id.nid;
+ return rc;
+
+ case IOC_LIBCFS_FAIL_NID:
+ return lnet_fail_nid(data->ioc_nid, data->ioc_count);
+
+ case IOC_LIBCFS_ADD_ROUTE:
+ rc = lnet_add_route(data->ioc_net, data->ioc_count,
+ data->ioc_nid);
+ return (rc != 0) ? rc : lnet_check_routes();
+
+ case IOC_LIBCFS_DEL_ROUTE:
+ return lnet_del_route(data->ioc_net, data->ioc_nid);
+
+ case IOC_LIBCFS_GET_ROUTE:
+ return lnet_get_route(data->ioc_count,
+ &data->ioc_net, &data->ioc_count,
+ &data->ioc_nid, &data->ioc_flags);
+ case IOC_LIBCFS_NOTIFY_ROUTER:
+ return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
+ cfs_time_current() -
+ cfs_time_seconds(cfs_time_current_sec() -
+ (time_t)data->ioc_u64[0]));
+
+ case IOC_LIBCFS_PORTALS_COMPATIBILITY:
+ /* This can be removed once lustre stops calling it */
+ return 0;
+
+ case IOC_LIBCFS_LNET_DIST:
+ rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
+ if (rc < 0 && rc != -EHOSTUNREACH)
+ return rc;
+
+ data->ioc_u32[0] = rc;
+ return 0;
+
+ case IOC_LIBCFS_TESTPROTOCOMPAT:
+ lnet_net_lock(LNET_LOCK_EX);
+ the_lnet.ln_testprotocompat = data->ioc_flags;
+ lnet_net_unlock(LNET_LOCK_EX);
+ return 0;
+
+ case IOC_LIBCFS_PING:
+ id.nid = data->ioc_nid;
+ id.pid = data->ioc_u32[0];
+ rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
+ (lnet_process_id_t *)data->ioc_pbuf1,
+ data->ioc_plen1/sizeof(lnet_process_id_t));
+ if (rc < 0)
+ return rc;
+ data->ioc_count = rc;
+ return 0;
+
+ case IOC_LIBCFS_DEBUG_PEER: {
+ /* CAVEAT EMPTOR: this one designed for calling directly; not
+ * via an ioctl */
+ id = *((lnet_process_id_t *) arg);
+
+ lnet_debug_peer(id.nid);
+
+ ni = lnet_net2ni(LNET_NIDNET(id.nid));
+ if (ni == NULL) {
+ CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id));
+ } else {
+ if (ni->ni_lnd->lnd_ctl == NULL) {
+ CDEBUG(D_WARNING, "No ctl for %s\n",
+ libcfs_id2str(id));
+ } else {
+ (void)ni->ni_lnd->lnd_ctl(ni, cmd, arg);
+ }
+
+ lnet_ni_decref(ni);
+ }
+ return 0;
+ }
+
+ default:
+ ni = lnet_net2ni(data->ioc_net);
+ if (ni == NULL)
+ return -EINVAL;
+
+ if (ni->ni_lnd->lnd_ctl == NULL)
+ rc = -EINVAL;
+ else
+ rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
+
+ lnet_ni_decref(ni);
+ return rc;
+ }
+ /* not reached */
+}
+EXPORT_SYMBOL(LNetCtl);
+
+/**
+ * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
+ * all interfaces share a same PID, as requested by LNetNIInit().
+ *
+ * \param index Index of the interface to look up.
+ * \param id On successful return, this location will hold the
+ * lnet_process_id_t ID of the interface.
+ *
+ * \retval 0 If an interface exists at \a index.
+ * \retval -ENOENT If no interface has been found.
+ */
+int
+LNetGetId(unsigned int index, lnet_process_id_t *id)
+{
+ struct lnet_ni *ni;
+ struct list_head *tmp;
+ int cpt;
+ int rc = -ENOENT;
+
+ LASSERT(the_lnet.ln_init);
+
+ /* LNetNI initilization failed? */
+ if (the_lnet.ln_refcount == 0)
+ return rc;
+
+ cpt = lnet_net_lock_current();
+
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ if (index-- != 0)
+ continue;
+
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
+
+ id->nid = ni->ni_nid;
+ id->pid = the_lnet.ln_pid;
+ rc = 0;
+ break;
+ }
+
+ lnet_net_unlock(cpt);
+ return rc;
+}
+EXPORT_SYMBOL(LNetGetId);
+
+/**
+ * Print a string representation of handle \a h into buffer \a str of
+ * \a len bytes.
+ */
+void
+LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
+{
+ snprintf(str, len, LPX64, h.cookie);
+}
+EXPORT_SYMBOL(LNetSnprintHandle);
+
+static int
+lnet_create_ping_info(void)
+{
+ int i;
+ int n;
+ int rc;
+ unsigned int infosz;
+ lnet_ni_t *ni;
+ lnet_process_id_t id;
+ lnet_ping_info_t *pinfo;
+
+ for (n = 0; ; n++) {
+ rc = LNetGetId(n, &id);
+ if (rc == -ENOENT)
+ break;
+
+ LASSERT (rc == 0);
+ }
+
+ infosz = offsetof(lnet_ping_info_t, pi_ni[n]);
+ LIBCFS_ALLOC(pinfo, infosz);
+ if (pinfo == NULL) {
+ CERROR("Can't allocate ping info[%d]\n", n);
+ return -ENOMEM;
+ }
+
+ pinfo->pi_nnis = n;
+ pinfo->pi_pid = the_lnet.ln_pid;
+ pinfo->pi_magic = LNET_PROTO_PING_MAGIC;
+ pinfo->pi_features = LNET_PING_FEAT_NI_STATUS;
+
+ for (i = 0; i < n; i++) {
+ lnet_ni_status_t *ns = &pinfo->pi_ni[i];
+
+ rc = LNetGetId(i, &id);
+ LASSERT (rc == 0);
+
+ ns->ns_nid = id.nid;
+ ns->ns_status = LNET_NI_STATUS_UP;
+
+ lnet_net_lock(0);
+
+ ni = lnet_nid2ni_locked(id.nid, 0);
+ LASSERT(ni != NULL);
+
+ lnet_ni_lock(ni);
+ LASSERT(ni->ni_status == NULL);
+ ni->ni_status = ns;
+ lnet_ni_unlock(ni);
+
+ lnet_ni_decref_locked(ni, 0);
+ lnet_net_unlock(0);
+ }
+
+ the_lnet.ln_ping_info = pinfo;
+ return 0;
+}
+
+static void
+lnet_destroy_ping_info(void)
+{
+ struct lnet_ni *ni;
+
+ lnet_net_lock(0);
+
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+ lnet_ni_lock(ni);
+ ni->ni_status = NULL;
+ lnet_ni_unlock(ni);
+ }
+
+ lnet_net_unlock(0);
+
+ LIBCFS_FREE(the_lnet.ln_ping_info,
+ offsetof(lnet_ping_info_t,
+ pi_ni[the_lnet.ln_ping_info->pi_nnis]));
+ the_lnet.ln_ping_info = NULL;
+ return;
+}
+
+int
+lnet_ping_target_init(void)
+{
+ lnet_md_t md = {0};
+ lnet_handle_me_t meh;
+ lnet_process_id_t id;
+ int rc;
+ int rc2;
+ int infosz;
+
+ rc = lnet_create_ping_info();
+ if (rc != 0)
+ return rc;
+
+ /* We can have a tiny EQ since we only need to see the unlink event on
+ * teardown, which by definition is the last one! */
+ rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &the_lnet.ln_ping_target_eq);
+ if (rc != 0) {
+ CERROR("Can't allocate ping EQ: %d\n", rc);
+ goto failed_0;
+ }
+
+ memset(&id, 0, sizeof(lnet_process_id_t));
+ id.nid = LNET_NID_ANY;
+ id.pid = LNET_PID_ANY;
+
+ rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+ LNET_PROTO_PING_MATCHBITS, 0,
+ LNET_UNLINK, LNET_INS_AFTER,
+ &meh);
+ if (rc != 0) {
+ CERROR("Can't create ping ME: %d\n", rc);
+ goto failed_1;
+ }
+
+ /* initialize md content */
+ infosz = offsetof(lnet_ping_info_t,
+ pi_ni[the_lnet.ln_ping_info->pi_nnis]);
+ md.start = the_lnet.ln_ping_info;
+ md.length = infosz;
+ md.threshold = LNET_MD_THRESH_INF;
+ md.max_size = 0;
+ md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
+ LNET_MD_MANAGE_REMOTE;
+ md.user_ptr = NULL;
+ md.eq_handle = the_lnet.ln_ping_target_eq;
+
+ rc = LNetMDAttach(meh, md,
+ LNET_RETAIN,
+ &the_lnet.ln_ping_target_md);
+ if (rc != 0) {
+ CERROR("Can't attach ping MD: %d\n", rc);
+ goto failed_2;
+ }
+
+ return 0;
+
+ failed_2:
+ rc2 = LNetMEUnlink(meh);
+ LASSERT (rc2 == 0);
+ failed_1:
+ rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
+ LASSERT (rc2 == 0);
+ failed_0:
+ lnet_destroy_ping_info();
+ return rc;
+}
+
+void
+lnet_ping_target_fini(void)
+{
+ lnet_event_t event;
+ int rc;
+ int which;
+ int timeout_ms = 1000;
+ sigset_t blocked = cfs_block_allsigs();
+
+ LNetMDUnlink(the_lnet.ln_ping_target_md);
+ /* NB md could be busy; this just starts the unlink */
+
+ for (;;) {
+ rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1,
+ timeout_ms, &event, &which);
+
+ /* I expect overflow... */
+ LASSERT (rc >= 0 || rc == -EOVERFLOW);
+
+ if (rc == 0) {
+ /* timed out: provide a diagnostic */
+ CWARN("Still waiting for ping MD to unlink\n");
+ timeout_ms *= 2;
+ continue;
+ }
+
+ /* Got a valid event */
+ if (event.unlinked)
+ break;
+ }
+
+ rc = LNetEQFree(the_lnet.ln_ping_target_eq);
+ LASSERT (rc == 0);
+ lnet_destroy_ping_info();
+ cfs_restore_sigs(blocked);
+}
+
+int
+lnet_ping (lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
+{
+ lnet_handle_eq_t eqh;
+ lnet_handle_md_t mdh;
+ lnet_event_t event;
+ lnet_md_t md = {0};
+ int which;
+ int unlinked = 0;
+ int replied = 0;
+ const int a_long_time = 60000; /* mS */
+ int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+ lnet_ping_info_t *info;
+ lnet_process_id_t tmpid;
+ int i;
+ int nob;
+ int rc;
+ int rc2;
+ sigset_t blocked;
+
+ if (n_ids <= 0 ||
+ id.nid == LNET_NID_ANY ||
+ timeout_ms > 500000 || /* arbitrary limit! */
+ n_ids > 20) /* arbitrary limit! */
+ return -EINVAL;
+
+ if (id.pid == LNET_PID_ANY)
+ id.pid = LUSTRE_SRV_LNET_PID;
+
+ LIBCFS_ALLOC(info, infosz);
+ if (info == NULL)
+ return -ENOMEM;
+
+ /* NB 2 events max (including any unlink event) */
+ rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
+ if (rc != 0) {
+ CERROR("Can't allocate EQ: %d\n", rc);
+ goto out_0;
+ }
+
+ /* initialize md content */
+ md.start = info;
+ md.length = infosz;
+ md.threshold = 2; /*GET/REPLY*/
+ md.max_size = 0;
+ md.options = LNET_MD_TRUNCATE;
+ md.user_ptr = NULL;
+ md.eq_handle = eqh;
+
+ rc = LNetMDBind(md, LNET_UNLINK, &mdh);
+ if (rc != 0) {
+ CERROR("Can't bind MD: %d\n", rc);
+ goto out_1;
+ }
+
+ rc = LNetGet(LNET_NID_ANY, mdh, id,
+ LNET_RESERVED_PORTAL,
+ LNET_PROTO_PING_MATCHBITS, 0);
+
+ if (rc != 0) {
+ /* Don't CERROR; this could be deliberate! */
+
+ rc2 = LNetMDUnlink(mdh);
+ LASSERT (rc2 == 0);
+
+ /* NB must wait for the UNLINK event below... */
+ unlinked = 1;
+ timeout_ms = a_long_time;
+ }
+
+ do {
+ /* MUST block for unlink to complete */
+ if (unlinked)
+ blocked = cfs_block_allsigs();
+
+ rc2 = LNetEQPoll(&eqh, 1, timeout_ms, &event, &which);
+
+ if (unlinked)
+ cfs_restore_sigs(blocked);
+
+ CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
+ (rc2 <= 0) ? -1 : event.type,
+ (rc2 <= 0) ? -1 : event.status,
+ (rc2 > 0 && event.unlinked) ? " unlinked" : "");
+
+ LASSERT (rc2 != -EOVERFLOW); /* can't miss anything */
+
+ if (rc2 <= 0 || event.status != 0) {
+ /* timeout or error */
+ if (!replied && rc == 0)
+ rc = (rc2 < 0) ? rc2 :
+ (rc2 == 0) ? -ETIMEDOUT :
+ event.status;
+
+ if (!unlinked) {
+ /* Ensure completion in finite time... */
+ LNetMDUnlink(mdh);
+ /* No assertion (racing with network) */
+ unlinked = 1;
+ timeout_ms = a_long_time;
+ } else if (rc2 == 0) {
+ /* timed out waiting for unlink */
+ CWARN("ping %s: late network completion\n",
+ libcfs_id2str(id));
+ }
+ } else if (event.type == LNET_EVENT_REPLY) {
+ replied = 1;
+ rc = event.mlength;
+ }
+
+ } while (rc2 <= 0 || !event.unlinked);
+
+ if (!replied) {
+ if (rc >= 0)
+ CWARN("%s: Unexpected rc >= 0 but no reply!\n",
+ libcfs_id2str(id));
+ rc = -EIO;
+ goto out_1;
+ }
+
+ nob = rc;
+ LASSERT (nob >= 0 && nob <= infosz);
+
+ rc = -EPROTO; /* if I can't parse... */
+
+ if (nob < 8) {
+ /* can't check magic/version */
+ CERROR("%s: ping info too short %d\n",
+ libcfs_id2str(id), nob);
+ goto out_1;
+ }
+
+ if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
+ lnet_swap_pinginfo(info);
+ } else if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
+ CERROR("%s: Unexpected magic %08x\n",
+ libcfs_id2str(id), info->pi_magic);
+ goto out_1;
+ }
+
+ if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
+ CERROR("%s: ping w/o NI status: 0x%x\n",
+ libcfs_id2str(id), info->pi_features);
+ goto out_1;
+ }
+
+ if (nob < offsetof(lnet_ping_info_t, pi_ni[0])) {
+ CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
+ nob, (int)offsetof(lnet_ping_info_t, pi_ni[0]));
+ goto out_1;
+ }
+
+ if (info->pi_nnis < n_ids)
+ n_ids = info->pi_nnis;
+
+ if (nob < offsetof(lnet_ping_info_t, pi_ni[n_ids])) {
+ CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
+ nob, (int)offsetof(lnet_ping_info_t, pi_ni[n_ids]));
+ goto out_1;
+ }
+
+ rc = -EFAULT; /* If I SEGV... */
+
+ for (i = 0; i < n_ids; i++) {
+ tmpid.pid = info->pi_pid;
+ tmpid.nid = info->pi_ni[i].ns_nid;
+ if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
+ goto out_1;
+ }
+ rc = info->pi_nnis;
+
+ out_1:
+ rc2 = LNetEQFree(eqh);
+ if (rc2 != 0)
+ CERROR("rc2 %d\n", rc2);
+ LASSERT (rc2 == 0);
+
+ out_0:
+ LIBCFS_FREE(info, infosz);
+ return rc;
+}
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
new file mode 100644
index 0000000..28711e6
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -0,0 +1,1264 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/lnet/lib-lnet.h>
+
+typedef struct { /* tmp struct for parsing routes */
+ struct list_head ltb_list; /* stash on lists */
+ int ltb_size; /* allocated size */
+ char ltb_text[0]; /* text buffer */
+} lnet_text_buf_t;
+
+static int lnet_tbnob = 0; /* track text buf allocation */
+#define LNET_MAX_TEXTBUF_NOB (64<<10) /* bound allocation */
+#define LNET_SINGLE_TEXTBUF_NOB (4<<10)
+
+void
+lnet_syntax(char *name, char *str, int offset, int width)
+{
+ static char dots[LNET_SINGLE_TEXTBUF_NOB];
+ static char dashes[LNET_SINGLE_TEXTBUF_NOB];
+
+ memset(dots, '.', sizeof(dots));
+ dots[sizeof(dots)-1] = 0;
+ memset(dashes, '-', sizeof(dashes));
+ dashes[sizeof(dashes)-1] = 0;
+
+ LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str);
+ LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n",
+ (int)strlen(name), dots, offset, dots,
+ (width < 1) ? 0 : width - 1, dashes);
+}
+
+int
+lnet_issep (char c)
+{
+ switch (c) {
+ case '\n':
+ case '\r':
+ case ';':
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int
+lnet_net_unique(__u32 net, struct list_head *nilist)
+{
+ struct list_head *tmp;
+ lnet_ni_t *ni;
+
+ list_for_each (tmp, nilist) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
+
+ if (LNET_NIDNET(ni->ni_nid) == net)
+ return 0;
+ }
+
+ return 1;
+}
+
+void
+lnet_ni_free(struct lnet_ni *ni)
+{
+ if (ni->ni_refs != NULL)
+ cfs_percpt_free(ni->ni_refs);
+
+ if (ni->ni_tx_queues != NULL)
+ cfs_percpt_free(ni->ni_tx_queues);
+
+ if (ni->ni_cpts != NULL)
+ cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
+
+ LIBCFS_FREE(ni, sizeof(*ni));
+}
+
+lnet_ni_t *
+lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
+{
+ struct lnet_tx_queue *tq;
+ struct lnet_ni *ni;
+ int rc;
+ int i;
+
+ if (!lnet_net_unique(net, nilist)) {
+ LCONSOLE_ERROR_MSG(0x111, "Duplicate network specified: %s\n",
+ libcfs_net2str(net));
+ return NULL;
+ }
+
+ LIBCFS_ALLOC(ni, sizeof(*ni));
+ if (ni == NULL) {
+ CERROR("Out of memory creating network %s\n",
+ libcfs_net2str(net));
+ return NULL;
+ }
+
+ spin_lock_init(&ni->ni_lock);
+ INIT_LIST_HEAD(&ni->ni_cptlist);
+ ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*ni->ni_refs[0]));
+ if (ni->ni_refs == NULL)
+ goto failed;
+
+ ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*ni->ni_tx_queues[0]));
+ if (ni->ni_tx_queues == NULL)
+ goto failed;
+
+ cfs_percpt_for_each(tq, i, ni->ni_tx_queues)
+ INIT_LIST_HEAD(&tq->tq_delayed);
+
+ if (el == NULL) {
+ ni->ni_cpts = NULL;
+ ni->ni_ncpts = LNET_CPT_NUMBER;
+ } else {
+ rc = cfs_expr_list_values(el, LNET_CPT_NUMBER, &ni->ni_cpts);
+ if (rc <= 0) {
+ CERROR("Failed to set CPTs for NI %s: %d\n",
+ libcfs_net2str(net), rc);
+ goto failed;
+ }
+
+ LASSERT(rc <= LNET_CPT_NUMBER);
+ if (rc == LNET_CPT_NUMBER) {
+ LIBCFS_FREE(ni->ni_cpts, rc * sizeof(ni->ni_cpts[0]));
+ ni->ni_cpts = NULL;
+ }
+
+ ni->ni_ncpts = rc;
+ }
+
+ /* LND will fill in the address part of the NID */
+ ni->ni_nid = LNET_MKNID(net, 0);
+ ni->ni_last_alive = cfs_time_current_sec();
+ list_add_tail(&ni->ni_list, nilist);
+ return ni;
+ failed:
+ lnet_ni_free(ni);
+ return NULL;
+}
+
+int
+lnet_parse_networks(struct list_head *nilist, char *networks)
+{
+ struct cfs_expr_list *el = NULL;
+ int tokensize = strlen(networks) + 1;
+ char *tokens;
+ char *str;
+ char *tmp;
+ struct lnet_ni *ni;
+ __u32 net;
+ int nnets = 0;
+
+ if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
+ /* _WAY_ conservative */
+ LCONSOLE_ERROR_MSG(0x112, "Can't parse networks: string too "
+ "long\n");
+ return -EINVAL;
+ }
+
+ LIBCFS_ALLOC(tokens, tokensize);
+ if (tokens == NULL) {
+ CERROR("Can't allocate net tokens\n");
+ return -ENOMEM;
+ }
+
+ the_lnet.ln_network_tokens = tokens;
+ the_lnet.ln_network_tokens_nob = tokensize;
+ memcpy (tokens, networks, tokensize);
+ str = tmp = tokens;
+
+ /* Add in the loopback network */
+ ni = lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, nilist);
+ if (ni == NULL)
+ goto failed;
+
+ while (str != NULL && *str != 0) {
+ char *comma = strchr(str, ',');
+ char *bracket = strchr(str, '(');
+ char *square = strchr(str, '[');
+ char *iface;
+ int niface;
+ int rc;
+
+ /* NB we don't check interface conflicts here; it's the LNDs
+ * responsibility (if it cares at all) */
+
+ if (square != NULL && (comma == NULL || square < comma)) {
+ /* i.e: o2ib0(ib0)[1,2], number between square
+ * brackets are CPTs this NI needs to be bond */
+ if (bracket != NULL && bracket > square) {
+ tmp = square;
+ goto failed_syntax;
+ }
+
+ tmp = strchr(square, ']');
+ if (tmp == NULL) {
+ tmp = square;
+ goto failed_syntax;
+ }
+
+ rc = cfs_expr_list_parse(square, tmp - square + 1,
+ 0, LNET_CPT_NUMBER - 1, &el);
+ if (rc != 0) {
+ tmp = square;
+ goto failed_syntax;
+ }
+
+ while (square <= tmp)
+ *square++ = ' ';
+ }
+
+ if (bracket == NULL ||
+ (comma != NULL && comma < bracket)) {
+
+ /* no interface list specified */
+
+ if (comma != NULL)
+ *comma++ = 0;
+ net = libcfs_str2net(cfs_trimwhite(str));
+
+ if (net == LNET_NIDNET(LNET_NID_ANY)) {
+ LCONSOLE_ERROR_MSG(0x113, "Unrecognised network"
+ " type\n");
+ tmp = str;
+ goto failed_syntax;
+ }
+
+ if (LNET_NETTYP(net) != LOLND && /* LO is implicit */
+ lnet_ni_alloc(net, el, nilist) == NULL)
+ goto failed;
+
+ if (el != NULL) {
+ cfs_expr_list_free(el);
+ el = NULL;
+ }
+
+ str = comma;
+ continue;
+ }
+
+ *bracket = 0;
+ net = libcfs_str2net(cfs_trimwhite(str));
+ if (net == LNET_NIDNET(LNET_NID_ANY)) {
+ tmp = str;
+ goto failed_syntax;
+ }
+
+ nnets++;
+ ni = lnet_ni_alloc(net, el, nilist);
+ if (ni == NULL)
+ goto failed;
+
+ if (el != NULL) {
+ cfs_expr_list_free(el);
+ el = NULL;
+ }
+
+ niface = 0;
+ iface = bracket + 1;
+
+ bracket = strchr(iface, ')');
+ if (bracket == NULL) {
+ tmp = iface;
+ goto failed_syntax;
+ }
+
+ *bracket = 0;
+ do {
+ comma = strchr(iface, ',');
+ if (comma != NULL)
+ *comma++ = 0;
+
+ iface = cfs_trimwhite(iface);
+ if (*iface == 0) {
+ tmp = iface;
+ goto failed_syntax;
+ }
+
+ if (niface == LNET_MAX_INTERFACES) {
+ LCONSOLE_ERROR_MSG(0x115, "Too many interfaces "
+ "for net %s\n",
+ libcfs_net2str(net));
+ goto failed;
+ }
+
+ ni->ni_interfaces[niface++] = iface;
+ iface = comma;
+ } while (iface != NULL);
+
+ str = bracket + 1;
+ comma = strchr(bracket + 1, ',');
+ if (comma != NULL) {
+ *comma = 0;
+ str = cfs_trimwhite(str);
+ if (*str != 0) {
+ tmp = str;
+ goto failed_syntax;
+ }
+ str = comma + 1;
+ continue;
+ }
+
+ str = cfs_trimwhite(str);
+ if (*str != 0) {
+ tmp = str;
+ goto failed_syntax;
+ }
+ }
+
+ LASSERT(!list_empty(nilist));
+ return 0;
+
+ failed_syntax:
+ lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp));
+ failed:
+ while (!list_empty(nilist)) {
+ ni = list_entry(nilist->next, lnet_ni_t, ni_list);
+
+ list_del(&ni->ni_list);
+ lnet_ni_free(ni);
+ }
+
+ if (el != NULL)
+ cfs_expr_list_free(el);
+
+ LIBCFS_FREE(tokens, tokensize);
+ the_lnet.ln_network_tokens = NULL;
+
+ return -EINVAL;
+}
+
+lnet_text_buf_t *
+lnet_new_text_buf (int str_len)
+{
+ lnet_text_buf_t *ltb;
+ int nob;
+
+ /* NB allocate space for the terminating 0 */
+ nob = offsetof(lnet_text_buf_t, ltb_text[str_len + 1]);
+ if (nob > LNET_SINGLE_TEXTBUF_NOB) {
+ /* _way_ conservative for "route net gateway..." */
+ CERROR("text buffer too big\n");
+ return NULL;
+ }
+
+ if (lnet_tbnob + nob > LNET_MAX_TEXTBUF_NOB) {
+ CERROR("Too many text buffers\n");
+ return NULL;
+ }
+
+ LIBCFS_ALLOC(ltb, nob);
+ if (ltb == NULL)
+ return NULL;
+
+ ltb->ltb_size = nob;
+ ltb->ltb_text[0] = 0;
+ lnet_tbnob += nob;
+ return ltb;
+}
+
+void
+lnet_free_text_buf (lnet_text_buf_t *ltb)
+{
+ lnet_tbnob -= ltb->ltb_size;
+ LIBCFS_FREE(ltb, ltb->ltb_size);
+}
+
+void
+lnet_free_text_bufs(struct list_head *tbs)
+{
+ lnet_text_buf_t *ltb;
+
+ while (!list_empty(tbs)) {
+ ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
+
+ list_del(&ltb->ltb_list);
+ lnet_free_text_buf(ltb);
+ }
+}
+
+void
+lnet_print_text_bufs(struct list_head *tbs)
+{
+ struct list_head *tmp;
+ lnet_text_buf_t *ltb;
+
+ list_for_each (tmp, tbs) {
+ ltb = list_entry(tmp, lnet_text_buf_t, ltb_list);
+
+ CDEBUG(D_WARNING, "%s\n", ltb->ltb_text);
+ }
+
+ CDEBUG(D_WARNING, "%d allocated\n", lnet_tbnob);
+}
+
+int
+lnet_str2tbs_sep (struct list_head *tbs, char *str)
+{
+ struct list_head pending;
+ char *sep;
+ int nob;
+ int i;
+ lnet_text_buf_t *ltb;
+
+ INIT_LIST_HEAD(&pending);
+
+ /* Split 'str' into separate commands */
+ for (;;) {
+ /* skip leading whitespace */
+ while (cfs_iswhite(*str))
+ str++;
+
+ /* scan for separator or comment */
+ for (sep = str; *sep != 0; sep++)
+ if (lnet_issep(*sep) || *sep == '#')
+ break;
+
+ nob = (int)(sep - str);
+ if (nob > 0) {
+ ltb = lnet_new_text_buf(nob);
+ if (ltb == NULL) {
+ lnet_free_text_bufs(&pending);
+ return -1;
+ }
+
+ for (i = 0; i < nob; i++)
+ if (cfs_iswhite(str[i]))
+ ltb->ltb_text[i] = ' ';
+ else
+ ltb->ltb_text[i] = str[i];
+
+ ltb->ltb_text[nob] = 0;
+
+ list_add_tail(&ltb->ltb_list, &pending);
+ }
+
+ if (*sep == '#') {
+ /* scan for separator */
+ do {
+ sep++;
+ } while (*sep != 0 && !lnet_issep(*sep));
+ }
+
+ if (*sep == 0)
+ break;
+
+ str = sep + 1;
+ }
+
+ list_splice(&pending, tbs->prev);
+ return 0;
+}
+
+int
+lnet_expand1tb (struct list_head *list,
+ char *str, char *sep1, char *sep2,
+ char *item, int itemlen)
+{
+ int len1 = (int)(sep1 - str);
+ int len2 = strlen(sep2 + 1);
+ lnet_text_buf_t *ltb;
+
+ LASSERT (*sep1 == '[');
+ LASSERT (*sep2 == ']');
+
+ ltb = lnet_new_text_buf(len1 + itemlen + len2);
+ if (ltb == NULL)
+ return -ENOMEM;
+
+ memcpy(ltb->ltb_text, str, len1);
+ memcpy(&ltb->ltb_text[len1], item, itemlen);
+ memcpy(&ltb->ltb_text[len1+itemlen], sep2 + 1, len2);
+ ltb->ltb_text[len1 + itemlen + len2] = 0;
+
+ list_add_tail(&ltb->ltb_list, list);
+ return 0;
+}
+
+int
+lnet_str2tbs_expand (struct list_head *tbs, char *str)
+{
+ char num[16];
+ struct list_head pending;
+ char *sep;
+ char *sep2;
+ char *parsed;
+ char *enditem;
+ int lo;
+ int hi;
+ int stride;
+ int i;
+ int nob;
+ int scanned;
+
+ INIT_LIST_HEAD(&pending);
+
+ sep = strchr(str, '[');
+ if (sep == NULL) /* nothing to expand */
+ return 0;
+
+ sep2 = strchr(sep, ']');
+ if (sep2 == NULL)
+ goto failed;
+
+ for (parsed = sep; parsed < sep2; parsed = enditem) {
+
+ enditem = ++parsed;
+ while (enditem < sep2 && *enditem != ',')
+ enditem++;
+
+ if (enditem == parsed) /* no empty items */
+ goto failed;
+
+ if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi, &stride, &scanned) < 3) {
+
+ if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) {
+
+ /* simple string enumeration */
+ if (lnet_expand1tb(&pending, str, sep, sep2,
+ parsed, (int)(enditem - parsed)) != 0)
+ goto failed;
+
+ continue;
+ }
+
+ stride = 1;
+ }
+
+ /* range expansion */
+
+ if (enditem != parsed + scanned) /* no trailing junk */
+ goto failed;
+
+ if (hi < 0 || lo < 0 || stride < 0 || hi < lo ||
+ (hi - lo) % stride != 0)
+ goto failed;
+
+ for (i = lo; i <= hi; i += stride) {
+
+ snprintf(num, sizeof(num), "%d", i);
+ nob = strlen(num);
+ if (nob + 1 == sizeof(num))
+ goto failed;
+
+ if (lnet_expand1tb(&pending, str, sep, sep2,
+ num, nob) != 0)
+ goto failed;
+ }
+ }
+
+ list_splice(&pending, tbs->prev);
+ return 1;
+
+ failed:
+ lnet_free_text_bufs(&pending);
+ return -1;
+}
+
+int
+lnet_parse_hops (char *str, unsigned int *hops)
+{
+ int len = strlen(str);
+ int nob = len;
+
+ return (sscanf(str, "%u%n", hops, &nob) >= 1 &&
+ nob == len &&
+ *hops > 0 && *hops < 256);
+}
+
+
+int
+lnet_parse_route (char *str, int *im_a_router)
+{
+ /* static scratch buffer OK (single threaded) */
+ static char cmd[LNET_SINGLE_TEXTBUF_NOB];
+
+ struct list_head nets;
+ struct list_head gateways;
+ struct list_head *tmp1;
+ struct list_head *tmp2;
+ __u32 net;
+ lnet_nid_t nid;
+ lnet_text_buf_t *ltb;
+ int rc;
+ char *sep;
+ char *token = str;
+ int ntokens = 0;
+ int myrc = -1;
+ unsigned int hops;
+ int got_hops = 0;
+
+ INIT_LIST_HEAD(&gateways);
+ INIT_LIST_HEAD(&nets);
+
+ /* save a copy of the string for error messages */
+ strncpy(cmd, str, sizeof(cmd) - 1);
+ cmd[sizeof(cmd) - 1] = 0;
+
+ sep = str;
+ for (;;) {
+ /* scan for token start */
+ while (cfs_iswhite(*sep))
+ sep++;
+ if (*sep == 0) {
+ if (ntokens < (got_hops ? 3 : 2))
+ goto token_error;
+ break;
+ }
+
+ ntokens++;
+ token = sep++;
+
+ /* scan for token end */
+ while (*sep != 0 && !cfs_iswhite(*sep))
+ sep++;
+ if (*sep != 0)
+ *sep++ = 0;
+
+ if (ntokens == 1) {
+ tmp2 = &nets; /* expanding nets */
+ } else if (ntokens == 2 &&
+ lnet_parse_hops(token, &hops)) {
+ got_hops = 1; /* got a hop count */
+ continue;
+ } else {
+ tmp2 = &gateways; /* expanding gateways */
+ }
+
+ ltb = lnet_new_text_buf(strlen(token));
+ if (ltb == NULL)
+ goto out;
+
+ strcpy(ltb->ltb_text, token);
+ tmp1 = &ltb->ltb_list;
+ list_add_tail(tmp1, tmp2);
+
+ while (tmp1 != tmp2) {
+ ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
+
+ rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
+ if (rc < 0)
+ goto token_error;
+
+ tmp1 = tmp1->next;
+
+ if (rc > 0) { /* expanded! */
+ list_del(&ltb->ltb_list);
+ lnet_free_text_buf(ltb);
+ continue;
+ }
+
+ if (ntokens == 1) {
+ net = libcfs_str2net(ltb->ltb_text);
+ if (net == LNET_NIDNET(LNET_NID_ANY) ||
+ LNET_NETTYP(net) == LOLND)
+ goto token_error;
+ } else {
+ nid = libcfs_str2nid(ltb->ltb_text);
+ if (nid == LNET_NID_ANY ||
+ LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
+ goto token_error;
+ }
+ }
+ }
+
+ if (!got_hops)
+ hops = 1;
+
+ LASSERT (!list_empty(&nets));
+ LASSERT (!list_empty(&gateways));
+
+ list_for_each (tmp1, &nets) {
+ ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
+ net = libcfs_str2net(ltb->ltb_text);
+ LASSERT (net != LNET_NIDNET(LNET_NID_ANY));
+
+ list_for_each (tmp2, &gateways) {
+ ltb = list_entry(tmp2, lnet_text_buf_t, ltb_list);
+ nid = libcfs_str2nid(ltb->ltb_text);
+ LASSERT (nid != LNET_NID_ANY);
+
+ if (lnet_islocalnid(nid)) {
+ *im_a_router = 1;
+ continue;
+ }
+
+ rc = lnet_add_route (net, hops, nid);
+ if (rc != 0) {
+ CERROR("Can't create route "
+ "to %s via %s\n",
+ libcfs_net2str(net),
+ libcfs_nid2str(nid));
+ goto out;
+ }
+ }
+ }
+
+ myrc = 0;
+ goto out;
+
+ token_error:
+ lnet_syntax("routes", cmd, (int)(token - str), strlen(token));
+ out:
+ lnet_free_text_bufs(&nets);
+ lnet_free_text_bufs(&gateways);
+ return myrc;
+}
+
+int
+lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
+{
+ lnet_text_buf_t *ltb;
+
+ while (!list_empty(tbs)) {
+ ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
+
+ if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
+ lnet_free_text_bufs(tbs);
+ return -EINVAL;
+ }
+
+ list_del(&ltb->ltb_list);
+ lnet_free_text_buf(ltb);
+ }
+
+ return 0;
+}
+
+int
+lnet_parse_routes (char *routes, int *im_a_router)
+{
+ struct list_head tbs;
+ int rc = 0;
+
+ *im_a_router = 0;
+
+ INIT_LIST_HEAD(&tbs);
+
+ if (lnet_str2tbs_sep(&tbs, routes) < 0) {
+ CERROR("Error parsing routes\n");
+ rc = -EINVAL;
+ } else {
+ rc = lnet_parse_route_tbs(&tbs, im_a_router);
+ }
+
+ LASSERT (lnet_tbnob == 0);
+ return rc;
+}
+
+int
+lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
+{
+ LIST_HEAD (list);
+ int rc;
+ int i;
+
+ rc = cfs_ip_addr_parse(token, len, &list);
+ if (rc != 0)
+ return rc;
+
+ for (rc = i = 0; !rc && i < nip; i++)
+ rc = cfs_ip_addr_match(ipaddrs[i], &list);
+
+ cfs_ip_addr_free(&list);
+
+ return rc;
+}
+
+int
+lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
+{
+ static char tokens[LNET_SINGLE_TEXTBUF_NOB];
+
+ int matched = 0;
+ int ntokens = 0;
+ int len;
+ char *net = NULL;
+ char *sep;
+ char *token;
+ int rc;
+
+ LASSERT (strlen(net_entry) < sizeof(tokens));
+
+ /* work on a copy of the string */
+ strcpy(tokens, net_entry);
+ sep = tokens;
+ for (;;) {
+ /* scan for token start */
+ while (cfs_iswhite(*sep))
+ sep++;
+ if (*sep == 0)
+ break;
+
+ token = sep++;
+
+ /* scan for token end */
+ while (*sep != 0 && !cfs_iswhite(*sep))
+ sep++;
+ if (*sep != 0)
+ *sep++ = 0;
+
+ if (ntokens++ == 0) {
+ net = token;
+ continue;
+ }
+
+ len = strlen(token);
+
+ rc = lnet_match_network_token(token, len, ipaddrs, nip);
+ if (rc < 0) {
+ lnet_syntax("ip2nets", net_entry,
+ (int)(token - tokens), len);
+ return rc;
+ }
+
+ matched |= (rc != 0);
+ }
+
+ if (!matched)
+ return 0;
+
+ strcpy(net_entry, net); /* replace with matched net */
+ return 1;
+}
+
+__u32
+lnet_netspec2net(char *netspec)
+{
+ char *bracket = strchr(netspec, '(');
+ __u32 net;
+
+ if (bracket != NULL)
+ *bracket = 0;
+
+ net = libcfs_str2net(netspec);
+
+ if (bracket != NULL)
+ *bracket = '(';
+
+ return net;
+}
+
+int
+lnet_splitnets(char *source, struct list_head *nets)
+{
+ int offset = 0;
+ int offset2;
+ int len;
+ lnet_text_buf_t *tb;
+ lnet_text_buf_t *tb2;
+ struct list_head *t;
+ char *sep;
+ char *bracket;
+ __u32 net;
+
+ LASSERT (!list_empty(nets));
+ LASSERT (nets->next == nets->prev); /* single entry */
+
+ tb = list_entry(nets->next, lnet_text_buf_t, ltb_list);
+
+ for (;;) {
+ sep = strchr(tb->ltb_text, ',');
+ bracket = strchr(tb->ltb_text, '(');
+
+ if (sep != NULL &&
+ bracket != NULL &&
+ bracket < sep) {
+ /* netspec lists interfaces... */
+
+ offset2 = offset + (int)(bracket - tb->ltb_text);
+ len = strlen(bracket);
+
+ bracket = strchr(bracket + 1, ')');
+
+ if (bracket == NULL ||
+ !(bracket[1] == ',' || bracket[1] == 0)) {
+ lnet_syntax("ip2nets", source, offset2, len);
+ return -EINVAL;
+ }
+
+ sep = (bracket[1] == 0) ? NULL : bracket + 1;
+ }
+
+ if (sep != NULL)
+ *sep++ = 0;
+
+ net = lnet_netspec2net(tb->ltb_text);
+ if (net == LNET_NIDNET(LNET_NID_ANY)) {
+ lnet_syntax("ip2nets", source, offset,
+ strlen(tb->ltb_text));
+ return -EINVAL;
+ }
+
+ list_for_each(t, nets) {
+ tb2 = list_entry(t, lnet_text_buf_t, ltb_list);
+
+ if (tb2 == tb)
+ continue;
+
+ if (net == lnet_netspec2net(tb2->ltb_text)) {
+ /* duplicate network */
+ lnet_syntax("ip2nets", source, offset,
+ strlen(tb->ltb_text));
+ return -EINVAL;
+ }
+ }
+
+ if (sep == NULL)
+ return 0;
+
+ offset += (int)(sep - tb->ltb_text);
+ tb2 = lnet_new_text_buf(strlen(sep));
+ if (tb2 == NULL)
+ return -ENOMEM;
+
+ strcpy(tb2->ltb_text, sep);
+ list_add_tail(&tb2->ltb_list, nets);
+
+ tb = tb2;
+ }
+}
+
+int
+lnet_match_networks (char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
+{
+ static char networks[LNET_SINGLE_TEXTBUF_NOB];
+ static char source[LNET_SINGLE_TEXTBUF_NOB];
+
+ struct list_head raw_entries;
+ struct list_head matched_nets;
+ struct list_head current_nets;
+ struct list_head *t;
+ struct list_head *t2;
+ lnet_text_buf_t *tb;
+ lnet_text_buf_t *tb2;
+ __u32 net1;
+ __u32 net2;
+ int len;
+ int count;
+ int dup;
+ int rc;
+
+ INIT_LIST_HEAD(&raw_entries);
+ if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
+ CERROR("Error parsing ip2nets\n");
+ LASSERT (lnet_tbnob == 0);
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&matched_nets);
+ INIT_LIST_HEAD(&current_nets);
+ networks[0] = 0;
+ count = 0;
+ len = 0;
+ rc = 0;
+
+ while (!list_empty(&raw_entries)) {
+ tb = list_entry(raw_entries.next, lnet_text_buf_t,
+ ltb_list);
+
+ strncpy(source, tb->ltb_text, sizeof(source)-1);
+ source[sizeof(source)-1] = 0;
+
+ /* replace ltb_text with the network(s) add on match */
+ rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip);
+ if (rc < 0)
+ break;
+
+ list_del(&tb->ltb_list);
+
+ if (rc == 0) { /* no match */
+ lnet_free_text_buf(tb);
+ continue;
+ }
+
+ /* split into separate networks */
+ INIT_LIST_HEAD(&current_nets);
+ list_add(&tb->ltb_list, &current_nets);
+ rc = lnet_splitnets(source, &current_nets);
+ if (rc < 0)
+ break;
+
+ dup = 0;
+ list_for_each (t, &current_nets) {
+ tb = list_entry(t, lnet_text_buf_t, ltb_list);
+ net1 = lnet_netspec2net(tb->ltb_text);
+ LASSERT (net1 != LNET_NIDNET(LNET_NID_ANY));
+
+ list_for_each(t2, &matched_nets) {
+ tb2 = list_entry(t2, lnet_text_buf_t,
+ ltb_list);
+ net2 = lnet_netspec2net(tb2->ltb_text);
+ LASSERT (net2 != LNET_NIDNET(LNET_NID_ANY));
+
+ if (net1 == net2) {
+ dup = 1;
+ break;
+ }
+ }
+
+ if (dup)
+ break;
+ }
+
+ if (dup) {
+ lnet_free_text_bufs(&current_nets);
+ continue;
+ }
+
+ list_for_each_safe(t, t2, &current_nets) {
+ tb = list_entry(t, lnet_text_buf_t, ltb_list);
+
+ list_del(&tb->ltb_list);
+ list_add_tail(&tb->ltb_list, &matched_nets);
+
+ len += snprintf(networks + len, sizeof(networks) - len,
+ "%s%s", (len == 0) ? "" : ",",
+ tb->ltb_text);
+
+ if (len >= sizeof(networks)) {
+ CERROR("Too many matched networks\n");
+ rc = -E2BIG;
+ goto out;
+ }
+ }
+
+ count++;
+ }
+
+ out:
+ lnet_free_text_bufs(&raw_entries);
+ lnet_free_text_bufs(&matched_nets);
+ lnet_free_text_bufs(&current_nets);
+ LASSERT (lnet_tbnob == 0);
+
+ if (rc < 0)
+ return rc;
+
+ *networksp = networks;
+ return count;
+}
+
+void
+lnet_ipaddr_free_enumeration(__u32 *ipaddrs, int nip)
+{
+ LIBCFS_FREE(ipaddrs, nip * sizeof(*ipaddrs));
+}
+
+int
+lnet_ipaddr_enumerate (__u32 **ipaddrsp)
+{
+ int up;
+ __u32 netmask;
+ __u32 *ipaddrs;
+ __u32 *ipaddrs2;
+ int nip;
+ char **ifnames;
+ int nif = libcfs_ipif_enumerate(&ifnames);
+ int i;
+ int rc;
+
+ if (nif <= 0)
+ return nif;
+
+ LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs));
+ if (ipaddrs == NULL) {
+ CERROR("Can't allocate ipaddrs[%d]\n", nif);
+ libcfs_ipif_free_enumeration(ifnames, nif);
+ return -ENOMEM;
+ }
+
+ for (i = nip = 0; i < nif; i++) {
+ if (!strcmp(ifnames[i], "lo"))
+ continue;
+
+ rc = libcfs_ipif_query(ifnames[i], &up,
+ &ipaddrs[nip], &netmask);
+ if (rc != 0) {
+ CWARN("Can't query interface %s: %d\n",
+ ifnames[i], rc);
+ continue;
+ }
+
+ if (!up) {
+ CWARN("Ignoring interface %s: it's down\n",
+ ifnames[i]);
+ continue;
+ }
+
+ nip++;
+ }
+
+ libcfs_ipif_free_enumeration(ifnames, nif);
+
+ if (nip == nif) {
+ *ipaddrsp = ipaddrs;
+ } else {
+ if (nip > 0) {
+ LIBCFS_ALLOC(ipaddrs2, nip * sizeof(*ipaddrs2));
+ if (ipaddrs2 == NULL) {
+ CERROR("Can't allocate ipaddrs[%d]\n", nip);
+ nip = -ENOMEM;
+ } else {
+ memcpy(ipaddrs2, ipaddrs,
+ nip * sizeof(*ipaddrs));
+ *ipaddrsp = ipaddrs2;
+ rc = nip;
+ }
+ }
+ lnet_ipaddr_free_enumeration(ipaddrs, nif);
+ }
+ return nip;
+}
+
+int
+lnet_parse_ip2nets (char **networksp, char *ip2nets)
+{
+ __u32 *ipaddrs;
+ int nip = lnet_ipaddr_enumerate(&ipaddrs);
+ int rc;
+
+ if (nip < 0) {
+ LCONSOLE_ERROR_MSG(0x117, "Error %d enumerating local IP "
+ "interfaces for ip2nets to match\n", nip);
+ return nip;
+ }
+
+ if (nip == 0) {
+ LCONSOLE_ERROR_MSG(0x118, "No local IP interfaces "
+ "for ip2nets to match\n");
+ return -ENOENT;
+ }
+
+ rc = lnet_match_networks(networksp, ip2nets, ipaddrs, nip);
+ lnet_ipaddr_free_enumeration(ipaddrs, nip);
+
+ if (rc < 0) {
+ LCONSOLE_ERROR_MSG(0x119, "Error %d parsing ip2nets\n", rc);
+ return rc;
+ }
+
+ if (rc == 0) {
+ LCONSOLE_ERROR_MSG(0x11a, "ip2nets does not match "
+ "any local IP interfaces\n");
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+int
+lnet_set_ip_niaddr (lnet_ni_t *ni)
+{
+ __u32 net = LNET_NIDNET(ni->ni_nid);
+ char **names;
+ int n;
+ __u32 ip;
+ __u32 netmask;
+ int up;
+ int i;
+ int rc;
+
+ /* Convenience for LNDs that use the IP address of a local interface as
+ * the local address part of their NID */
+
+ if (ni->ni_interfaces[0] != NULL) {
+
+ CLASSERT (LNET_MAX_INTERFACES > 1);
+
+ if (ni->ni_interfaces[1] != NULL) {
+ CERROR("Net %s doesn't support multiple interfaces\n",
+ libcfs_net2str(net));
+ return -EPERM;
+ }
+
+ rc = libcfs_ipif_query(ni->ni_interfaces[0],
+ &up, &ip, &netmask);
+ if (rc != 0) {
+ CERROR("Net %s can't query interface %s: %d\n",
+ libcfs_net2str(net), ni->ni_interfaces[0], rc);
+ return -EPERM;
+ }
+
+ if (!up) {
+ CERROR("Net %s can't use interface %s: it's down\n",
+ libcfs_net2str(net), ni->ni_interfaces[0]);
+ return -ENETDOWN;
+ }
+
+ ni->ni_nid = LNET_MKNID(net, ip);
+ return 0;
+ }
+
+ n = libcfs_ipif_enumerate(&names);
+ if (n <= 0) {
+ CERROR("Net %s can't enumerate interfaces: %d\n",
+ libcfs_net2str(net), n);
+ return 0;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (!strcmp(names[i], "lo")) /* skip the loopback IF */
+ continue;
+
+ rc = libcfs_ipif_query(names[i], &up, &ip, &netmask);
+
+ if (rc != 0) {
+ CWARN("Net %s can't query interface %s: %d\n",
+ libcfs_net2str(net), names[i], rc);
+ continue;
+ }
+
+ if (!up) {
+ CWARN("Net %s ignoring interface %s (down)\n",
+ libcfs_net2str(net), names[i]);
+ continue;
+ }
+
+ libcfs_ipif_free_enumeration(names, n);
+ ni->ni_nid = LNET_MKNID(net, ip);
+ return 0;
+ }
+
+ CERROR("Net %s can't find any interfaces\n", libcfs_net2str(net));
+ libcfs_ipif_free_enumeration(names, n);
+ return -ENOENT;
+}
+EXPORT_SYMBOL(lnet_set_ip_niaddr);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
new file mode 100644
index 0000000..4ce68d3
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -0,0 +1,445 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/lnet/lib-eq.c
+ *
+ * Library level Event queue management routines
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/lnet/lib-lnet.h>
+
+/**
+ * Create an event queue that has room for \a count number of events.
+ *
+ * The event queue is circular and older events will be overwritten by new
+ * ones if they are not removed in time by the user using the functions
+ * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to
+ * determine the appropriate size of the event queue to prevent this loss
+ * of events. Note that when EQ handler is specified in \a callback, no
+ * event loss can happen, since the handler is run for each event deposited
+ * into the EQ.
+ *
+ * \param count The number of events to be stored in the event queue. It
+ * will be rounded up to the next power of two.
+ * \param callback A handler function that runs when an event is deposited
+ * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to
+ * indicate that no event handler is desired.
+ * \param handle On successful return, this location will hold a handle for
+ * the newly created EQ.
+ *
+ * \retval 0 On success.
+ * \retval -EINVAL If an parameter is not valid.
+ * \retval -ENOMEM If memory for the EQ can't be allocated.
+ *
+ * \see lnet_eq_handler_t for the discussion on EQ handler semantics.
+ */
+int
+LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
+ lnet_handle_eq_t *handle)
+{
+ lnet_eq_t *eq;
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ /* We need count to be a power of 2 so that when eq_{enq,deq}_seq
+ * overflow, they don't skip entries, so the queue has the same
+ * apparent capacity at all times */
+
+ count = cfs_power2_roundup(count);
+
+ if (callback != LNET_EQ_HANDLER_NONE && count != 0) {
+ CWARN("EQ callback is guaranteed to get every event, "
+ "do you still want to set eqcount %d for polling "
+ "event which will have locking overhead? "
+ "Please contact with developer to confirm\n", count);
+ }
+
+ /* count can be 0 if only need callback, we can eliminate
+ * overhead of enqueue event */
+ if (count == 0 && callback == LNET_EQ_HANDLER_NONE)
+ return -EINVAL;
+
+ eq = lnet_eq_alloc();
+ if (eq == NULL)
+ return -ENOMEM;
+
+ if (count != 0) {
+ LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
+ if (eq->eq_events == NULL)
+ goto failed;
+ /* NB allocator has set all event sequence numbers to 0,
+ * so all them should be earlier than eq_deq_seq */
+ }
+
+ eq->eq_deq_seq = 1;
+ eq->eq_enq_seq = 1;
+ eq->eq_size = count;
+ eq->eq_callback = callback;
+
+ eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*eq->eq_refs[0]));
+ if (eq->eq_refs == NULL)
+ goto failed;
+
+ /* MUST hold both exclusive lnet_res_lock */
+ lnet_res_lock(LNET_LOCK_EX);
+ /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+ * both EQ lookup and poll event with only lnet_eq_wait_lock */
+ lnet_eq_wait_lock();
+
+ lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
+ list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
+
+ lnet_eq_wait_unlock();
+ lnet_res_unlock(LNET_LOCK_EX);
+
+ lnet_eq2handle(handle, eq);
+ return 0;
+
+failed:
+ if (eq->eq_events != NULL)
+ LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t));
+
+ if (eq->eq_refs != NULL)
+ cfs_percpt_free(eq->eq_refs);
+
+ lnet_eq_free(eq);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(LNetEQAlloc);
+
+/**
+ * Release the resources associated with an event queue if it's idle;
+ * otherwise do nothing and it's up to the user to try again.
+ *
+ * \param eqh A handle for the event queue to be released.
+ *
+ * \retval 0 If the EQ is not in use and freed.
+ * \retval -ENOENT If \a eqh does not point to a valid EQ.
+ * \retval -EBUSY If the EQ is still in use by some MDs.
+ */
+int
+LNetEQFree(lnet_handle_eq_t eqh)
+{
+ struct lnet_eq *eq;
+ lnet_event_t *events = NULL;
+ int **refs = NULL;
+ int *ref;
+ int rc = 0;
+ int size = 0;
+ int i;
+
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
+
+ lnet_res_lock(LNET_LOCK_EX);
+ /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+ * both EQ lookup and poll event with only lnet_eq_wait_lock */
+ lnet_eq_wait_lock();
+
+ eq = lnet_handle2eq(&eqh);
+ if (eq == NULL) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ cfs_percpt_for_each(ref, i, eq->eq_refs) {
+ LASSERT(*ref >= 0);
+ if (*ref == 0)
+ continue;
+
+ CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
+ i, *ref);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* stash for free after lock dropped */
+ events = eq->eq_events;
+ size = eq->eq_size;
+ refs = eq->eq_refs;
+
+ lnet_res_lh_invalidate(&eq->eq_lh);
+ list_del(&eq->eq_list);
+ lnet_eq_free_locked(eq);
+ out:
+ lnet_eq_wait_unlock();
+ lnet_res_unlock(LNET_LOCK_EX);
+
+ if (events != NULL)
+ LIBCFS_FREE(events, size * sizeof(lnet_event_t));
+ if (refs != NULL)
+ cfs_percpt_free(refs);
+
+ return rc;
+}
+EXPORT_SYMBOL(LNetEQFree);
+
+void
+lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
+{
+ /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
+ int index;
+
+ if (eq->eq_size == 0) {
+ LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
+ eq->eq_callback(ev);
+ return;
+ }
+
+ lnet_eq_wait_lock();
+ ev->sequence = eq->eq_enq_seq++;
+
+ LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size));
+ index = ev->sequence & (eq->eq_size - 1);
+
+ eq->eq_events[index] = *ev;
+
+ if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
+ eq->eq_callback(ev);
+
+ /* Wake anyone waiting in LNetEQPoll() */
+ if (waitqueue_active(&the_lnet.ln_eq_waitq))
+ wake_up_all(&the_lnet.ln_eq_waitq);
+ lnet_eq_wait_unlock();
+}
+
+int
+lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
+{
+ int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
+ lnet_event_t *new_event = &eq->eq_events[new_index];
+ int rc;
+
+ /* must called with lnet_eq_wait_lock hold */
+ if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
+ return 0;
+
+ /* We've got a new event... */
+ *ev = *new_event;
+
+ CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
+ new_event, eq->eq_deq_seq, eq->eq_size);
+
+ /* ...but did it overwrite an event we've not seen yet? */
+ if (eq->eq_deq_seq == new_event->sequence) {
+ rc = 1;
+ } else {
+ /* don't complain with CERROR: some EQs are sized small
+ * anyway; if it's important, the caller should complain */
+ CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
+ eq->eq_deq_seq, new_event->sequence);
+ rc = -EOVERFLOW;
+ }
+
+ eq->eq_deq_seq = new_event->sequence + 1;
+ return rc;
+}
+
+/**
+ * A nonblocking function that can be used to get the next event in an EQ.
+ * If an event handler is associated with the EQ, the handler will run before
+ * this function returns successfully. The event is removed from the queue.
+ *
+ * \param eventq A handle for the event queue.
+ * \param event On successful return (1 or -EOVERFLOW), this location will
+ * hold the next event in the EQ.
+ *
+ * \retval 0 No pending event in the EQ.
+ * \retval 1 Indicates success.
+ * \retval -ENOENT If \a eventq does not point to a valid EQ.
+ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
+ * at least one event between this event and the last event obtained from the
+ * EQ has been dropped due to limited space in the EQ.
+ */
+int
+LNetEQGet (lnet_handle_eq_t eventq, lnet_event_t *event)
+{
+ int which;
+
+ return LNetEQPoll(&eventq, 1, 0,
+ event, &which);
+}
+EXPORT_SYMBOL(LNetEQGet);
+
+/**
+ * Block the calling process until there is an event in the EQ.
+ * If an event handler is associated with the EQ, the handler will run before
+ * this function returns successfully. This function returns the next event
+ * in the EQ and removes it from the EQ.
+ *
+ * \param eventq A handle for the event queue.
+ * \param event On successful return (1 or -EOVERFLOW), this location will
+ * hold the next event in the EQ.
+ *
+ * \retval 1 Indicates success.
+ * \retval -ENOENT If \a eventq does not point to a valid EQ.
+ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
+ * at least one event between this event and the last event obtained from the
+ * EQ has been dropped due to limited space in the EQ.
+ */
+int
+LNetEQWait (lnet_handle_eq_t eventq, lnet_event_t *event)
+{
+ int which;
+
+ return LNetEQPoll(&eventq, 1, LNET_TIME_FOREVER,
+ event, &which);
+}
+EXPORT_SYMBOL(LNetEQWait);
+
+
+static int
+lnet_eq_wait_locked(int *timeout_ms)
+{
+ int tms = *timeout_ms;
+ int wait;
+ wait_queue_t wl;
+ cfs_time_t now;
+
+ if (tms == 0)
+ return -1; /* don't want to wait and no new event */
+
+ init_waitqueue_entry_current(&wl);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
+
+ lnet_eq_wait_unlock();
+
+ if (tms < 0) {
+ waitq_wait(&wl, TASK_INTERRUPTIBLE);
+
+ } else {
+ struct timeval tv;
+
+ now = cfs_time_current();
+ waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
+ cfs_time_seconds(tms) / 1000);
+ cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
+ tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
+ if (tms < 0) /* no more wait but may have new event */
+ tms = 0;
+ }
+
+ wait = tms != 0; /* might need to call here again */
+ *timeout_ms = tms;
+
+ lnet_eq_wait_lock();
+ remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
+
+ return wait;
+}
+
+
+
+/**
+ * Block the calling process until there's an event from a set of EQs or
+ * timeout happens.
+ *
+ * If an event handler is associated with the EQ, the handler will run before
+ * this function returns successfully, in which case the corresponding event
+ * is consumed.
+ *
+ * LNetEQPoll() provides a timeout to allow applications to poll, block for a
+ * fixed period, or block indefinitely.
+ *
+ * \param eventqs,neq An array of EQ handles, and size of the array.
+ * \param timeout_ms Time in milliseconds to wait for an event to occur on
+ * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an
+ * infinite timeout.
+ * \param event,which On successful return (1 or -EOVERFLOW), \a event will
+ * hold the next event in the EQs, and \a which will contain the index of the
+ * EQ from which the event was taken.
+ *
+ * \retval 0 No pending event in the EQs after timeout.
+ * \retval 1 Indicates success.
+ * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
+ * at least one event between this event and the last event obtained from the
+ * EQ indicated by \a which has been dropped due to limited space in the EQ.
+ * \retval -ENOENT If there's an invalid handle in \a eventqs.
+ */
+int
+LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
+ lnet_event_t *event, int *which)
+{
+ int wait = 1;
+ int rc;
+ int i;
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ if (neq < 1)
+ return -ENOENT;
+
+ lnet_eq_wait_lock();
+
+ for (;;) {
+ for (i = 0; i < neq; i++) {
+ lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
+
+ if (eq == NULL) {
+ lnet_eq_wait_unlock();
+ return -ENOENT;
+ }
+
+ rc = lnet_eq_dequeue_event(eq, event);
+ if (rc != 0) {
+ lnet_eq_wait_unlock();
+ *which = i;
+ return rc;
+ }
+ }
+
+ if (wait == 0)
+ break;
+
+ /*
+ * return value of lnet_eq_wait_locked:
+ * -1 : did nothing and it's sure no new event
+ * 1 : sleep inside and wait until new event
+ * 0 : don't want to wait anymore, but might have new event
+ * so need to call dequeue again
+ */
+ wait = lnet_eq_wait_locked(&timeout_ms);
+ if (wait < 0) /* no new event */
+ break;
+ }
+
+ lnet_eq_wait_unlock();
+ return 0;
+}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
new file mode 100644
index 0000000..ae643f2
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -0,0 +1,451 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/lnet/lib-md.c
+ *
+ * Memory Descriptor management routines
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/lnet/lib-lnet.h>
+
+/* must be called with lnet_res_lock held */
+void
+lnet_md_unlink(lnet_libmd_t *md)
+{
+ if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
+ /* first unlink attempt... */
+ lnet_me_t *me = md->md_me;
+
+ md->md_flags |= LNET_MD_FLAG_ZOMBIE;
+
+ /* Disassociate from ME (if any), and unlink it if it was created
+ * with LNET_UNLINK */
+ if (me != NULL) {
+ /* detach MD from portal */
+ lnet_ptl_detach_md(me, md);
+ if (me->me_unlink == LNET_UNLINK)
+ lnet_me_unlink(me);
+ }
+
+ /* ensure all future handle lookups fail */
+ lnet_res_lh_invalidate(&md->md_lh);
+ }
+
+ if (md->md_refcount != 0) {
+ CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
+ return;
+ }
+
+ CDEBUG(D_NET, "Unlinking md %p\n", md);
+
+ if (md->md_eq != NULL) {
+ int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
+
+ LASSERT(*md->md_eq->eq_refs[cpt] > 0);
+ (*md->md_eq->eq_refs[cpt])--;
+ }
+
+ LASSERT(!list_empty(&md->md_list));
+ list_del_init(&md->md_list);
+ lnet_md_free_locked(md);
+}
+
+static int
+lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
+{
+ int i;
+ unsigned int niov;
+ int total_length = 0;
+
+ lmd->md_me = NULL;
+ lmd->md_start = umd->start;
+ lmd->md_offset = 0;
+ lmd->md_max_size = umd->max_size;
+ lmd->md_options = umd->options;
+ lmd->md_user_ptr = umd->user_ptr;
+ lmd->md_eq = NULL;
+ lmd->md_threshold = umd->threshold;
+ lmd->md_refcount = 0;
+ lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
+
+ if ((umd->options & LNET_MD_IOVEC) != 0) {
+
+ if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
+ return -EINVAL;
+
+ lmd->md_niov = niov = umd->length;
+ memcpy(lmd->md_iov.iov, umd->start,
+ niov * sizeof (lmd->md_iov.iov[0]));
+
+ for (i = 0; i < (int)niov; i++) {
+ /* We take the base address on trust */
+ if (lmd->md_iov.iov[i].iov_len <= 0) /* invalid length */
+ return -EINVAL;
+
+ total_length += lmd->md_iov.iov[i].iov_len;
+ }
+
+ lmd->md_length = total_length;
+
+ if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+ (umd->max_size < 0 ||
+ umd->max_size > total_length)) // illegal max_size
+ return -EINVAL;
+
+ } else if ((umd->options & LNET_MD_KIOV) != 0) {
+ lmd->md_niov = niov = umd->length;
+ memcpy(lmd->md_iov.kiov, umd->start,
+ niov * sizeof (lmd->md_iov.kiov[0]));
+
+ for (i = 0; i < (int)niov; i++) {
+ /* We take the page pointer on trust */
+ if (lmd->md_iov.kiov[i].kiov_offset +
+ lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE )
+ return -EINVAL; /* invalid length */
+
+ total_length += lmd->md_iov.kiov[i].kiov_len;
+ }
+
+ lmd->md_length = total_length;
+
+ if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+ (umd->max_size < 0 ||
+ umd->max_size > total_length)) // illegal max_size
+ return -EINVAL;
+ } else { /* contiguous */
+ lmd->md_length = umd->length;
+ lmd->md_niov = niov = 1;
+ lmd->md_iov.iov[0].iov_base = umd->start;
+ lmd->md_iov.iov[0].iov_len = umd->length;
+
+ if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+ (umd->max_size < 0 ||
+ umd->max_size > (int)umd->length)) // illegal max_size
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* must be called with resource lock held */
+static int
+lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt)
+{
+ struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
+
+ /* NB we are passed an allocated, but inactive md.
+ * if we return success, caller may lnet_md_unlink() it.
+ * otherwise caller may only lnet_md_free() it.
+ */
+ /* This implementation doesn't know how to create START events or
+ * disable END events. Best to LASSERT our caller is compliant so
+ * we find out quickly... */
+ /* TODO - reevaluate what should be here in light of
+ * the removal of the start and end events
+ * maybe there we shouldn't even allow LNET_EQ_NONE!)
+ * LASSERT (eq == NULL);
+ */
+ if (!LNetHandleIsInvalid(eq_handle)) {
+ md->md_eq = lnet_handle2eq(&eq_handle);
+
+ if (md->md_eq == NULL)
+ return -ENOENT;
+
+ (*md->md_eq->eq_refs[cpt])++;
+ }
+
+ lnet_res_lh_initialize(container, &md->md_lh);
+
+ LASSERT(list_empty(&md->md_list));
+ list_add(&md->md_list, &container->rec_active);
+
+ return 0;
+}
+
+/* must be called with lnet_res_lock held */
+void
+lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
+{
+ /* NB this doesn't copy out all the iov entries so when a
+ * discontiguous MD is copied out, the target gets to know the
+ * original iov pointer (in start) and the number of entries it had
+ * and that's all.
+ */
+ umd->start = lmd->md_start;
+ umd->length = ((lmd->md_options & (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
+ lmd->md_length : lmd->md_niov;
+ umd->threshold = lmd->md_threshold;
+ umd->max_size = lmd->md_max_size;
+ umd->options = lmd->md_options;
+ umd->user_ptr = lmd->md_user_ptr;
+ lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
+}
+
+int
+lnet_md_validate(lnet_md_t *umd)
+{
+ if (umd->start == NULL && umd->length != 0) {
+ CERROR("MD start pointer can not be NULL with length %u\n",
+ umd->length);
+ return -EINVAL;
+ }
+
+ if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
+ umd->length > LNET_MAX_IOV) {
+ CERROR("Invalid option: too many fragments %u, %d max\n",
+ umd->length, LNET_MAX_IOV);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Create a memory descriptor and attach it to a ME
+ *
+ * \param meh A handle for a ME to associate the new MD with.
+ * \param umd Provides initial values for the user-visible parts of a MD.
+ * Other than its use for initialization, there is no linkage between this
+ * structure and the MD maintained by the LNet.
+ * \param unlink A flag to indicate whether the MD is automatically unlinked
+ * when it becomes inactive, either because the operation threshold drops to
+ * zero or because the available memory becomes less than \a umd.max_size.
+ * (Note that the check for unlinking a MD only occurs after the completion
+ * of a successful operation on the MD.) The value LNET_UNLINK enables auto
+ * unlinking; the value LNET_RETAIN disables it.
+ * \param handle On successful returns, a handle to the newly created MD is
+ * saved here. This handle can be used later in LNetMDUnlink().
+ *
+ * \retval 0 On success.
+ * \retval -EINVAL If \a umd is not valid.
+ * \retval -ENOMEM If new MD cannot be allocated.
+ * \retval -ENOENT Either \a meh or \a umd.eq_handle does not point to a
+ * valid object. Note that it's OK to supply a NULL \a umd.eq_handle by
+ * calling LNetInvalidateHandle() on it.
+ * \retval -EBUSY If the ME pointed to by \a meh is already associated with
+ * a MD.
+ */
+int
+LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
+ lnet_unlink_t unlink, lnet_handle_md_t *handle)
+{
+ LIST_HEAD (matches);
+ LIST_HEAD (drops);
+ struct lnet_me *me;
+ struct lnet_libmd *md;
+ int cpt;
+ int rc;
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ if (lnet_md_validate(&umd) != 0)
+ return -EINVAL;
+
+ if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
+ CERROR("Invalid option: no MD_OP set\n");
+ return -EINVAL;
+ }
+
+ md = lnet_md_alloc(&umd);
+ if (md == NULL)
+ return -ENOMEM;
+
+ rc = lnet_md_build(md, &umd, unlink);
+ cpt = lnet_cpt_of_cookie(meh.cookie);
+
+ lnet_res_lock(cpt);
+ if (rc != 0)
+ goto failed;
+
+ me = lnet_handle2me(&meh);
+ if (me == NULL)
+ rc = -ENOENT;
+ else if (me->me_md != NULL)
+ rc = -EBUSY;
+ else
+ rc = lnet_md_link(md, umd.eq_handle, cpt);
+
+ if (rc != 0)
+ goto failed;
+
+ /* attach this MD to portal of ME and check if it matches any
+ * blocked msgs on this portal */
+ lnet_ptl_attach_md(me, md, &matches, &drops);
+
+ lnet_md2handle(handle, md);
+
+ lnet_res_unlock(cpt);
+
+ lnet_drop_delayed_msg_list(&drops, "Bad match");
+ lnet_recv_delayed_msg_list(&matches);
+
+ return 0;
+
+ failed:
+ lnet_md_free_locked(md);
+
+ lnet_res_unlock(cpt);
+ return rc;
+}
+EXPORT_SYMBOL(LNetMDAttach);
+
+/**
+ * Create a "free floating" memory descriptor - a MD that is not associated
+ * with a ME. Such MDs are usually used in LNetPut() and LNetGet() operations.
+ *
+ * \param umd,unlink See the discussion for LNetMDAttach().
+ * \param handle On successful returns, a handle to the newly created MD is
+ * saved here. This handle can be used later in LNetMDUnlink(), LNetPut(),
+ * and LNetGet() operations.
+ *
+ * \retval 0 On success.
+ * \retval -EINVAL If \a umd is not valid.
+ * \retval -ENOMEM If new MD cannot be allocated.
+ * \retval -ENOENT \a umd.eq_handle does not point to a valid EQ. Note that
+ * it's OK to supply a NULL \a umd.eq_handle by calling
+ * LNetInvalidateHandle() on it.
+ */
+int
+LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
+{
+ lnet_libmd_t *md;
+ int cpt;
+ int rc;
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ if (lnet_md_validate(&umd) != 0)
+ return -EINVAL;
+
+ if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
+ CERROR("Invalid option: GET|PUT illegal on active MDs\n");
+ return -EINVAL;
+ }
+
+ md = lnet_md_alloc(&umd);
+ if (md == NULL)
+ return -ENOMEM;
+
+ rc = lnet_md_build(md, &umd, unlink);
+
+ cpt = lnet_res_lock_current();
+ if (rc != 0)
+ goto failed;
+
+ rc = lnet_md_link(md, umd.eq_handle, cpt);
+ if (rc != 0)
+ goto failed;
+
+ lnet_md2handle(handle, md);
+
+ lnet_res_unlock(cpt);
+ return 0;
+
+ failed:
+ lnet_md_free_locked(md);
+
+ lnet_res_unlock(cpt);
+ return rc;
+}
+EXPORT_SYMBOL(LNetMDBind);
+
+/**
+ * Unlink the memory descriptor from any ME it may be linked to and release
+ * the internal resources associated with it.
+ *
+ * This function does not free the memory region associated with the MD;
+ * i.e., the memory the user allocated for this MD. If the ME associated with
+ * this MD is not NULL and was created with auto unlink enabled, the ME is
+ * unlinked as well (see LNetMEAttach()).
+ *
+ * Explicitly unlinking a MD via this function call has the same behavior as
+ * a MD that has been automatically unlinked, except that no LNET_EVENT_UNLINK
+ * is generated in the latter case.
+ *
+ * An unlinked event can be reported in two ways:
+ * - If there's no pending operations on the MD, it's unlinked immediately
+ * and an LNET_EVENT_UNLINK event is logged before this function returns.
+ * - Otherwise, the MD is only marked for deletion when this function
+ * returns, and the unlinked event will be piggybacked on the event of
+ * the completion of the last operation by setting the unlinked field of
+ * the event. No dedicated LNET_EVENT_UNLINK event is generated.
+ *
+ * Note that in both cases the unlinked field of the event is always set; no
+ * more event will happen on the MD after such an event is logged.
+ *
+ * \param mdh A handle for the MD to be unlinked.
+ *
+ * \retval 0 On success.
+ * \retval -ENOENT If \a mdh does not point to a valid MD object.
+ */
+int
+LNetMDUnlink (lnet_handle_md_t mdh)
+{
+ lnet_event_t ev;
+ lnet_libmd_t *md;
+ int cpt;
+
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
+
+ cpt = lnet_cpt_of_cookie(mdh.cookie);
+ lnet_res_lock(cpt);
+
+ md = lnet_handle2md(&mdh);
+ if (md == NULL) {
+ lnet_res_unlock(cpt);
+ return -ENOENT;
+ }
+
+ /* If the MD is busy, lnet_md_unlink just marks it for deletion, and
+ * when the NAL is done, the completion event flags that the MD was
+ * unlinked. Otherwise, we enqueue an event now... */
+
+ if (md->md_eq != NULL &&
+ md->md_refcount == 0) {
+ lnet_build_unlink_event(md, &ev);
+ lnet_eq_enqueue_event(md->md_eq, &ev);
+ }
+
+ lnet_md_unlink(md);
+
+ lnet_res_unlock(cpt);
+ return 0;
+}
+EXPORT_SYMBOL(LNetMDUnlink);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
new file mode 100644
index 0000000..0081075
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -0,0 +1,297 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/lnet/lib-me.c
+ *
+ * Match Entry management routines
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/lnet/lib-lnet.h>
+
+/**
+ * Create and attach a match entry to the match list of \a portal. The new
+ * ME is empty, i.e. not associated with a memory descriptor. LNetMDAttach()
+ * can be used to attach a MD to an empty ME.
+ *
+ * \param portal The portal table index where the ME should be attached.
+ * \param match_id Specifies the match criteria for the process ID of
+ * the requester. The constants LNET_PID_ANY and LNET_NID_ANY can be
+ * used to wildcard either of the identifiers in the lnet_process_id_t
+ * structure.
+ * \param match_bits,ignore_bits Specify the match criteria to apply
+ * to the match bits in the incoming request. The ignore bits are used
+ * to mask out insignificant bits in the incoming match bits. The resulting
+ * bits are then compared to the ME's match bits to determine if the
+ * incoming request meets the match criteria.
+ * \param unlink Indicates whether the ME should be unlinked when the memory
+ * descriptor associated with it is unlinked (Note that the check for
+ * unlinking a ME only occurs when the memory descriptor is unlinked.).
+ * Valid values are LNET_RETAIN and LNET_UNLINK.
+ * \param pos Indicates whether the new ME should be prepended or
+ * appended to the match list. Allowed constants: LNET_INS_BEFORE,
+ * LNET_INS_AFTER.
+ * \param handle On successful returns, a handle to the newly created ME
+ * object is saved here. This handle can be used later in LNetMEInsert(),
+ * LNetMEUnlink(), or LNetMDAttach() functions.
+ *
+ * \retval 0 On success.
+ * \retval -EINVAL If \a portal is invalid.
+ * \retval -ENOMEM If new ME object cannot be allocated.
+ */
+int
+LNetMEAttach(unsigned int portal,
+ lnet_process_id_t match_id,
+ __u64 match_bits, __u64 ignore_bits,
+ lnet_unlink_t unlink, lnet_ins_pos_t pos,
+ lnet_handle_me_t *handle)
+{
+ struct lnet_match_table *mtable;
+ struct lnet_me *me;
+ struct list_head *head;
+
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
+
+ if ((int)portal >= the_lnet.ln_nportals)
+ return -EINVAL;
+
+ mtable = lnet_mt_of_attach(portal, match_id,
+ match_bits, ignore_bits, pos);
+ if (mtable == NULL) /* can't match portal type */
+ return -EPERM;
+
+ me = lnet_me_alloc();
+ if (me == NULL)
+ return -ENOMEM;
+
+ lnet_res_lock(mtable->mt_cpt);
+
+ me->me_portal = portal;
+ me->me_match_id = match_id;
+ me->me_match_bits = match_bits;
+ me->me_ignore_bits = ignore_bits;
+ me->me_unlink = unlink;
+ me->me_md = NULL;
+
+ lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt],
+ &me->me_lh);
+ if (ignore_bits != 0)
+ head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
+ else
+ head = lnet_mt_match_head(mtable, match_id, match_bits);
+
+ me->me_pos = head - &mtable->mt_mhash[0];
+ if (pos == LNET_INS_AFTER || pos == LNET_INS_LOCAL)
+ list_add_tail(&me->me_list, head);
+ else
+ list_add(&me->me_list, head);
+
+ lnet_me2handle(handle, me);
+
+ lnet_res_unlock(mtable->mt_cpt);
+ return 0;
+}
+EXPORT_SYMBOL(LNetMEAttach);
+
+/**
+ * Create and a match entry and insert it before or after the ME pointed to by
+ * \a current_meh. The new ME is empty, i.e. not associated with a memory
+ * descriptor. LNetMDAttach() can be used to attach a MD to an empty ME.
+ *
+ * This function is identical to LNetMEAttach() except for the position
+ * where the new ME is inserted.
+ *
+ * \param current_meh A handle for a ME. The new ME will be inserted
+ * immediately before or immediately after this ME.
+ * \param match_id,match_bits,ignore_bits,unlink,pos,handle See the discussion
+ * for LNetMEAttach().
+ *
+ * \retval 0 On success.
+ * \retval -ENOMEM If new ME object cannot be allocated.
+ * \retval -ENOENT If \a current_meh does not point to a valid match entry.
+ */
+int
+LNetMEInsert(lnet_handle_me_t current_meh,
+ lnet_process_id_t match_id,
+ __u64 match_bits, __u64 ignore_bits,
+ lnet_unlink_t unlink, lnet_ins_pos_t pos,
+ lnet_handle_me_t *handle)
+{
+ struct lnet_me *current_me;
+ struct lnet_me *new_me;
+ struct lnet_portal *ptl;
+ int cpt;
+
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
+
+ if (pos == LNET_INS_LOCAL)
+ return -EPERM;
+
+ new_me = lnet_me_alloc();
+ if (new_me == NULL)
+ return -ENOMEM;
+
+ cpt = lnet_cpt_of_cookie(current_meh.cookie);
+
+ lnet_res_lock(cpt);
+
+ current_me = lnet_handle2me(&current_meh);
+ if (current_me == NULL) {
+ lnet_me_free_locked(new_me);
+
+ lnet_res_unlock(cpt);
+ return -ENOENT;
+ }
+
+ LASSERT(current_me->me_portal < the_lnet.ln_nportals);
+
+ ptl = the_lnet.ln_portals[current_me->me_portal];
+ if (lnet_ptl_is_unique(ptl)) {
+ /* nosense to insertion on unique portal */
+ lnet_me_free_locked(new_me);
+ lnet_res_unlock(cpt);
+ return -EPERM;
+ }
+
+ new_me->me_pos = current_me->me_pos;
+ new_me->me_portal = current_me->me_portal;
+ new_me->me_match_id = match_id;
+ new_me->me_match_bits = match_bits;
+ new_me->me_ignore_bits = ignore_bits;
+ new_me->me_unlink = unlink;
+ new_me->me_md = NULL;
+
+ lnet_res_lh_initialize(the_lnet.ln_me_containers[cpt], &new_me->me_lh);
+
+ if (pos == LNET_INS_AFTER)
+ list_add(&new_me->me_list, &current_me->me_list);
+ else
+ list_add_tail(&new_me->me_list, &current_me->me_list);
+
+ lnet_me2handle(handle, new_me);
+
+ lnet_res_unlock(cpt);
+
+ return 0;
+}
+EXPORT_SYMBOL(LNetMEInsert);
+
+/**
+ * Unlink a match entry from its match list.
+ *
+ * This operation also releases any resources associated with the ME. If a
+ * memory descriptor is attached to the ME, then it will be unlinked as well
+ * and an unlink event will be generated. It is an error to use the ME handle
+ * after calling LNetMEUnlink().
+ *
+ * \param meh A handle for the ME to be unlinked.
+ *
+ * \retval 0 On success.
+ * \retval -ENOENT If \a meh does not point to a valid ME.
+ * \see LNetMDUnlink() for the discussion on delivering unlink event.
+ */
+int
+LNetMEUnlink(lnet_handle_me_t meh)
+{
+ lnet_me_t *me;
+ lnet_libmd_t *md;
+ lnet_event_t ev;
+ int cpt;
+
+ LASSERT(the_lnet.ln_init);
+ LASSERT(the_lnet.ln_refcount > 0);
+
+ cpt = lnet_cpt_of_cookie(meh.cookie);
+ lnet_res_lock(cpt);
+
+ me = lnet_handle2me(&meh);
+ if (me == NULL) {
+ lnet_res_unlock(cpt);
+ return -ENOENT;
+ }
+
+ md = me->me_md;
+ if (md != NULL &&
+ md->md_eq != NULL &&
+ md->md_refcount == 0) {
+ lnet_build_unlink_event(md, &ev);
+ lnet_eq_enqueue_event(md->md_eq, &ev);
+ }
+
+ lnet_me_unlink(me);
+
+ lnet_res_unlock(cpt);
+ return 0;
+}
+EXPORT_SYMBOL(LNetMEUnlink);
+
+/* call with lnet_res_lock please */
+void
+lnet_me_unlink(lnet_me_t *me)
+{
+ list_del(&me->me_list);
+
+ if (me->me_md != NULL) {
+ lnet_libmd_t *md = me->me_md;
+
+ /* detach MD from portal of this ME */
+ lnet_ptl_detach_md(me, md);
+ lnet_md_unlink(md);
+ }
+
+ lnet_res_lh_invalidate(&me->me_lh);
+ lnet_me_free_locked(me);
+}
+
+#if 0
+static void
+lib_me_dump(lnet_me_t *me)
+{
+ CWARN("Match Entry %p ("LPX64")\n", me,
+ me->me_lh.lh_cookie);
+
+ CWARN("\tMatch/Ignore\t= %016lx / %016lx\n",
+ me->me_match_bits, me->me_ignore_bits);
+
+ CWARN("\tMD\t= %p\n", me->md);
+ CWARN("\tprev\t= %p\n",
+ list_entry(me->me_list.prev, lnet_me_t, me_list));
+ CWARN("\tnext\t= %p\n",
+ list_entry(me->me_list.next, lnet_me_t, me_list));
+}
+#endif
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
new file mode 100644
index 0000000..49b0f12
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -0,0 +1,2441 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/lnet/lib-move.c
+ *
+ * Data movement routines
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/lnet/lib-lnet.h>
+
+static int local_nid_dist_zero = 1;
+CFS_MODULE_PARM(local_nid_dist_zero, "i", int, 0444,
+ "Reserved");
+
+int
+lnet_fail_nid (lnet_nid_t nid, unsigned int threshold)
+{
+ lnet_test_peer_t *tp;
+ struct list_head *el;
+ struct list_head *next;
+ struct list_head cull;
+
+ LASSERT (the_lnet.ln_init);
+
+ /* NB: use lnet_net_lock(0) to serialize operations on test peers */
+ if (threshold != 0) {
+ /* Adding a new entry */
+ LIBCFS_ALLOC(tp, sizeof(*tp));
+ if (tp == NULL)
+ return -ENOMEM;
+
+ tp->tp_nid = nid;
+ tp->tp_threshold = threshold;
+
+ lnet_net_lock(0);
+ list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
+ lnet_net_unlock(0);
+ return 0;
+ }
+
+ /* removing entries */
+ INIT_LIST_HEAD(&cull);
+
+ lnet_net_lock(0);
+
+ list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry (el, lnet_test_peer_t, tp_list);
+
+ if (tp->tp_threshold == 0 || /* needs culling anyway */
+ nid == LNET_NID_ANY || /* removing all entries */
+ tp->tp_nid == nid) /* matched this one */
+ {
+ list_del (&tp->tp_list);
+ list_add (&tp->tp_list, &cull);
+ }
+ }
+
+ lnet_net_unlock(0);
+
+ while (!list_empty (&cull)) {
+ tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
+
+ list_del (&tp->tp_list);
+ LIBCFS_FREE(tp, sizeof (*tp));
+ }
+ return 0;
+}
+
+static int
+fail_peer (lnet_nid_t nid, int outgoing)
+{
+ lnet_test_peer_t *tp;
+ struct list_head *el;
+ struct list_head *next;
+ struct list_head cull;
+ int fail = 0;
+
+ INIT_LIST_HEAD (&cull);
+
+ /* NB: use lnet_net_lock(0) to serialize operations on test peers */
+ lnet_net_lock(0);
+
+ list_for_each_safe (el, next, &the_lnet.ln_test_peers) {
+ tp = list_entry (el, lnet_test_peer_t, tp_list);
+
+ if (tp->tp_threshold == 0) {
+ /* zombie entry */
+ if (outgoing) {
+ /* only cull zombies on outgoing tests,
+ * since we may be at interrupt priority on
+ * incoming messages. */
+ list_del (&tp->tp_list);
+ list_add (&tp->tp_list, &cull);
+ }
+ continue;
+ }
+
+ if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
+ nid == tp->tp_nid) { /* fail this peer */
+ fail = 1;
+
+ if (tp->tp_threshold != LNET_MD_THRESH_INF) {
+ tp->tp_threshold--;
+ if (outgoing &&
+ tp->tp_threshold == 0) {
+ /* see above */
+ list_del (&tp->tp_list);
+ list_add (&tp->tp_list, &cull);
+ }
+ }
+ break;
+ }
+ }
+
+ lnet_net_unlock(0);
+
+ while (!list_empty (&cull)) {
+ tp = list_entry (cull.next, lnet_test_peer_t, tp_list);
+ list_del (&tp->tp_list);
+
+ LIBCFS_FREE(tp, sizeof (*tp));
+ }
+
+ return (fail);
+}
+
+unsigned int
+lnet_iov_nob (unsigned int niov, struct iovec *iov)
+{
+ unsigned int nob = 0;
+
+ while (niov-- > 0)
+ nob += (iov++)->iov_len;
+
+ return (nob);
+}
+EXPORT_SYMBOL(lnet_iov_nob);
+
+void
+lnet_copy_iov2iov (unsigned int ndiov, struct iovec *diov, unsigned int doffset,
+ unsigned int nsiov, struct iovec *siov, unsigned int soffset,
+ unsigned int nob)
+{
+ /* NB diov, siov are READ-ONLY */
+ unsigned int this_nob;
+
+ if (nob == 0)
+ return;
+
+ /* skip complete frags before 'doffset' */
+ LASSERT (ndiov > 0);
+ while (doffset >= diov->iov_len) {
+ doffset -= diov->iov_len;
+ diov++;
+ ndiov--;
+ LASSERT (ndiov > 0);
+ }
+
+ /* skip complete frags before 'soffset' */
+ LASSERT (nsiov > 0);
+ while (soffset >= siov->iov_len) {
+ soffset -= siov->iov_len;
+ siov++;
+ nsiov--;
+ LASSERT (nsiov > 0);
+ }
+
+ do {
+ LASSERT (ndiov > 0);
+ LASSERT (nsiov > 0);
+ this_nob = MIN(diov->iov_len - doffset,
+ siov->iov_len - soffset);
+ this_nob = MIN(this_nob, nob);
+
+ memcpy ((char *)diov->iov_base + doffset,
+ (char *)siov->iov_base + soffset, this_nob);
+ nob -= this_nob;
+
+ if (diov->iov_len > doffset + this_nob) {
+ doffset += this_nob;
+ } else {
+ diov++;
+ ndiov--;
+ doffset = 0;
+ }
+
+ if (siov->iov_len > soffset + this_nob) {
+ soffset += this_nob;
+ } else {
+ siov++;
+ nsiov--;
+ soffset = 0;
+ }
+ } while (nob > 0);
+}
+EXPORT_SYMBOL(lnet_copy_iov2iov);
+
+int
+lnet_extract_iov (int dst_niov, struct iovec *dst,
+ int src_niov, struct iovec *src,
+ unsigned int offset, unsigned int len)
+{
+ /* Initialise 'dst' to the subset of 'src' starting at 'offset',
+ * for exactly 'len' bytes, and return the number of entries.
+ * NB not destructive to 'src' */
+ unsigned int frag_len;
+ unsigned int niov;
+
+ if (len == 0) /* no data => */
+ return (0); /* no frags */
+
+ LASSERT (src_niov > 0);
+ while (offset >= src->iov_len) { /* skip initial frags */
+ offset -= src->iov_len;
+ src_niov--;
+ src++;
+ LASSERT (src_niov > 0);
+ }
+
+ niov = 1;
+ for (;;) {
+ LASSERT (src_niov > 0);
+ LASSERT ((int)niov <= dst_niov);
+
+ frag_len = src->iov_len - offset;
+ dst->iov_base = ((char *)src->iov_base) + offset;
+
+ if (len <= frag_len) {
+ dst->iov_len = len;
+ return (niov);
+ }
+
+ dst->iov_len = frag_len;
+
+ len -= frag_len;
+ dst++;
+ src++;
+ niov++;
+ src_niov--;
+ offset = 0;
+ }
+}
+EXPORT_SYMBOL(lnet_extract_iov);
+
+
+unsigned int
+lnet_kiov_nob (unsigned int niov, lnet_kiov_t *kiov)
+{
+ unsigned int nob = 0;
+
+ while (niov-- > 0)
+ nob += (kiov++)->kiov_len;
+
+ return (nob);
+}
+EXPORT_SYMBOL(lnet_kiov_nob);
+
+void
+lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
+ unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
+ unsigned int nob)
+{
+ /* NB diov, siov are READ-ONLY */
+ unsigned int this_nob;
+ char *daddr = NULL;
+ char *saddr = NULL;
+
+ if (nob == 0)
+ return;
+
+ LASSERT (!in_interrupt ());
+
+ LASSERT (ndiov > 0);
+ while (doffset >= diov->kiov_len) {
+ doffset -= diov->kiov_len;
+ diov++;
+ ndiov--;
+ LASSERT (ndiov > 0);
+ }
+
+ LASSERT (nsiov > 0);
+ while (soffset >= siov->kiov_len) {
+ soffset -= siov->kiov_len;
+ siov++;
+ nsiov--;
+ LASSERT (nsiov > 0);
+ }
+
+ do {
+ LASSERT (ndiov > 0);
+ LASSERT (nsiov > 0);
+ this_nob = MIN(diov->kiov_len - doffset,
+ siov->kiov_len - soffset);
+ this_nob = MIN(this_nob, nob);
+
+ if (daddr == NULL)
+ daddr = ((char *)kmap(diov->kiov_page)) +
+ diov->kiov_offset + doffset;
+ if (saddr == NULL)
+ saddr = ((char *)kmap(siov->kiov_page)) +
+ siov->kiov_offset + soffset;
+
+ /* Vanishing risk of kmap deadlock when mapping 2 pages.
+ * However in practice at least one of the kiovs will be mapped
+ * kernel pages and the map/unmap will be NOOPs */
+
+ memcpy (daddr, saddr, this_nob);
+ nob -= this_nob;
+
+ if (diov->kiov_len > doffset + this_nob) {
+ daddr += this_nob;
+ doffset += this_nob;
+ } else {
+ kunmap(diov->kiov_page);
+ daddr = NULL;
+ diov++;
+ ndiov--;
+ doffset = 0;
+ }
+
+ if (siov->kiov_len > soffset + this_nob) {
+ saddr += this_nob;
+ soffset += this_nob;
+ } else {
+ kunmap(siov->kiov_page);
+ saddr = NULL;
+ siov++;
+ nsiov--;
+ soffset = 0;
+ }
+ } while (nob > 0);
+
+ if (daddr != NULL)
+ kunmap(diov->kiov_page);
+ if (saddr != NULL)
+ kunmap(siov->kiov_page);
+}
+EXPORT_SYMBOL(lnet_copy_kiov2kiov);
+
+void
+lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset,
+ unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
+ unsigned int nob)
+{
+ /* NB iov, kiov are READ-ONLY */
+ unsigned int this_nob;
+ char *addr = NULL;
+
+ if (nob == 0)
+ return;
+
+ LASSERT (!in_interrupt ());
+
+ LASSERT (niov > 0);
+ while (iovoffset >= iov->iov_len) {
+ iovoffset -= iov->iov_len;
+ iov++;
+ niov--;
+ LASSERT (niov > 0);
+ }
+
+ LASSERT (nkiov > 0);
+ while (kiovoffset >= kiov->kiov_len) {
+ kiovoffset -= kiov->kiov_len;
+ kiov++;
+ nkiov--;
+ LASSERT (nkiov > 0);
+ }
+
+ do {
+ LASSERT (niov > 0);
+ LASSERT (nkiov > 0);
+ this_nob = MIN(iov->iov_len - iovoffset,
+ kiov->kiov_len - kiovoffset);
+ this_nob = MIN(this_nob, nob);
+
+ if (addr == NULL)
+ addr = ((char *)kmap(kiov->kiov_page)) +
+ kiov->kiov_offset + kiovoffset;
+
+ memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
+ nob -= this_nob;
+
+ if (iov->iov_len > iovoffset + this_nob) {
+ iovoffset += this_nob;
+ } else {
+ iov++;
+ niov--;
+ iovoffset = 0;
+ }
+
+ if (kiov->kiov_len > kiovoffset + this_nob) {
+ addr += this_nob;
+ kiovoffset += this_nob;
+ } else {
+ kunmap(kiov->kiov_page);
+ addr = NULL;
+ kiov++;
+ nkiov--;
+ kiovoffset = 0;
+ }
+
+ } while (nob > 0);
+
+ if (addr != NULL)
+ kunmap(kiov->kiov_page);
+}
+EXPORT_SYMBOL(lnet_copy_kiov2iov);
+
+void
+lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
+ unsigned int niov, struct iovec *iov, unsigned int iovoffset,
+ unsigned int nob)
+{
+ /* NB kiov, iov are READ-ONLY */
+ unsigned int this_nob;
+ char *addr = NULL;
+
+ if (nob == 0)
+ return;
+
+ LASSERT (!in_interrupt ());
+
+ LASSERT (nkiov > 0);
+ while (kiovoffset >= kiov->kiov_len) {
+ kiovoffset -= kiov->kiov_len;
+ kiov++;
+ nkiov--;
+ LASSERT (nkiov > 0);
+ }
+
+ LASSERT (niov > 0);
+ while (iovoffset >= iov->iov_len) {
+ iovoffset -= iov->iov_len;
+ iov++;
+ niov--;
+ LASSERT (niov > 0);
+ }
+
+ do {
+ LASSERT (nkiov > 0);
+ LASSERT (niov > 0);
+ this_nob = MIN(kiov->kiov_len - kiovoffset,
+ iov->iov_len - iovoffset);
+ this_nob = MIN(this_nob, nob);
+
+ if (addr == NULL)
+ addr = ((char *)kmap(kiov->kiov_page)) +
+ kiov->kiov_offset + kiovoffset;
+
+ memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
+ nob -= this_nob;
+
+ if (kiov->kiov_len > kiovoffset + this_nob) {
+ addr += this_nob;
+ kiovoffset += this_nob;
+ } else {
+ kunmap(kiov->kiov_page);
+ addr = NULL;
+ kiov++;
+ nkiov--;
+ kiovoffset = 0;
+ }
+
+ if (iov->iov_len > iovoffset + this_nob) {
+ iovoffset += this_nob;
+ } else {
+ iov++;
+ niov--;
+ iovoffset = 0;
+ }
+ } while (nob > 0);
+
+ if (addr != NULL)
+ kunmap(kiov->kiov_page);
+}
+EXPORT_SYMBOL(lnet_copy_iov2kiov);
+
+int
+lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
+ int src_niov, lnet_kiov_t *src,
+ unsigned int offset, unsigned int len)
+{
+ /* Initialise 'dst' to the subset of 'src' starting at 'offset',
+ * for exactly 'len' bytes, and return the number of entries.
+ * NB not destructive to 'src' */
+ unsigned int frag_len;
+ unsigned int niov;
+
+ if (len == 0) /* no data => */
+ return (0); /* no frags */
+
+ LASSERT (src_niov > 0);
+ while (offset >= src->kiov_len) { /* skip initial frags */
+ offset -= src->kiov_len;
+ src_niov--;
+ src++;
+ LASSERT (src_niov > 0);
+ }
+
+ niov = 1;
+ for (;;) {
+ LASSERT (src_niov > 0);
+ LASSERT ((int)niov <= dst_niov);
+
+ frag_len = src->kiov_len - offset;
+ dst->kiov_page = src->kiov_page;
+ dst->kiov_offset = src->kiov_offset + offset;
+
+ if (len <= frag_len) {
+ dst->kiov_len = len;
+ LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ return (niov);
+ }
+
+ dst->kiov_len = frag_len;
+ LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+
+ len -= frag_len;
+ dst++;
+ src++;
+ niov++;
+ src_niov--;
+ offset = 0;
+ }
+}
+EXPORT_SYMBOL(lnet_extract_kiov);
+
+void
+lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
+{
+ unsigned int niov = 0;
+ struct iovec *iov = NULL;
+ lnet_kiov_t *kiov = NULL;
+ int rc;
+
+ LASSERT (!in_interrupt ());
+ LASSERT (mlen == 0 || msg != NULL);
+
+ if (msg != NULL) {
+ LASSERT(msg->msg_receiving);
+ LASSERT(!msg->msg_sending);
+ LASSERT(rlen == msg->msg_len);
+ LASSERT(mlen <= msg->msg_len);
+ LASSERT(msg->msg_offset == offset);
+ LASSERT(msg->msg_wanted == mlen);
+
+ msg->msg_receiving = 0;
+
+ if (mlen != 0) {
+ niov = msg->msg_niov;
+ iov = msg->msg_iov;
+ kiov = msg->msg_kiov;
+
+ LASSERT (niov > 0);
+ LASSERT ((iov == NULL) != (kiov == NULL));
+ }
+ }
+
+ rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
+ niov, iov, kiov, offset, mlen, rlen);
+ if (rc < 0)
+ lnet_finalize(ni, msg, rc);
+}
+
+void
+lnet_setpayloadbuffer(lnet_msg_t *msg)
+{
+ lnet_libmd_t *md = msg->msg_md;
+
+ LASSERT (msg->msg_len > 0);
+ LASSERT (!msg->msg_routing);
+ LASSERT (md != NULL);
+ LASSERT (msg->msg_niov == 0);
+ LASSERT (msg->msg_iov == NULL);
+ LASSERT (msg->msg_kiov == NULL);
+
+ msg->msg_niov = md->md_niov;
+ if ((md->md_options & LNET_MD_KIOV) != 0)
+ msg->msg_kiov = md->md_iov.kiov;
+ else
+ msg->msg_iov = md->md_iov.iov;
+}
+
+void
+lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
+ unsigned int offset, unsigned int len)
+{
+ msg->msg_type = type;
+ msg->msg_target = target;
+ msg->msg_len = len;
+ msg->msg_offset = offset;
+
+ if (len != 0)
+ lnet_setpayloadbuffer(msg);
+
+ memset (&msg->msg_hdr, 0, sizeof (msg->msg_hdr));
+ msg->msg_hdr.type = cpu_to_le32(type);
+ msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
+ msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
+ /* src_nid will be set later */
+ msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
+ msg->msg_hdr.payload_length = cpu_to_le32(len);
+}
+
+void
+lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
+{
+ void *priv = msg->msg_private;
+ int rc;
+
+ LASSERT (!in_interrupt ());
+ LASSERT (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
+ (msg->msg_txcredit && msg->msg_peertxcredit));
+
+ rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
+ if (rc < 0)
+ lnet_finalize(ni, msg, rc);
+}
+
+int
+lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
+{
+ int rc;
+
+ LASSERT(!msg->msg_sending);
+ LASSERT(msg->msg_receiving);
+ LASSERT(!msg->msg_rx_ready_delay);
+ LASSERT(ni->ni_lnd->lnd_eager_recv != NULL);
+
+ msg->msg_rx_ready_delay = 1;
+ rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
+ &msg->msg_private);
+ if (rc != 0) {
+ CERROR("recv from %s / send to %s aborted: "
+ "eager_recv failed %d\n",
+ libcfs_nid2str(msg->msg_rxpeer->lp_nid),
+ libcfs_id2str(msg->msg_target), rc);
+ LASSERT(rc < 0); /* required by my callers */
+ }
+
+ return rc;
+}
+
+/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
+void
+lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
+{
+ cfs_time_t last_alive = 0;
+
+ LASSERT(lnet_peer_aliveness_enabled(lp));
+ LASSERT(ni->ni_lnd->lnd_query != NULL);
+
+ lnet_net_unlock(lp->lp_cpt);
+ (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
+ lnet_net_lock(lp->lp_cpt);
+
+ lp->lp_last_query = cfs_time_current();
+
+ if (last_alive != 0) /* NI has updated timestamp */
+ lp->lp_last_alive = last_alive;
+}
+
+/* NB: always called with lnet_net_lock held */
+static inline int
+lnet_peer_is_alive (lnet_peer_t *lp, cfs_time_t now)
+{
+ int alive;
+ cfs_time_t deadline;
+
+ LASSERT (lnet_peer_aliveness_enabled(lp));
+
+ /* Trust lnet_notify() if it has more recent aliveness news, but
+ * ignore the initial assumed death (see lnet_peers_start_down()).
+ */
+ if (!lp->lp_alive && lp->lp_alive_count > 0 &&
+ cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
+ return 0;
+
+ deadline = cfs_time_add(lp->lp_last_alive,
+ cfs_time_seconds(lp->lp_ni->ni_peertimeout));
+ alive = cfs_time_after(deadline, now);
+
+ /* Update obsolete lp_alive except for routers assumed to be dead
+ * initially, because router checker would update aliveness in this
+ * case, and moreover lp_last_alive at peer creation is assumed.
+ */
+ if (alive && !lp->lp_alive &&
+ !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
+ lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
+
+ return alive;
+}
+
+
+/* NB: returns 1 when alive, 0 when dead, negative when error;
+ * may drop the lnet_net_lock */
+int
+lnet_peer_alive_locked (lnet_peer_t *lp)
+{
+ cfs_time_t now = cfs_time_current();
+
+ if (!lnet_peer_aliveness_enabled(lp))
+ return -ENODEV;
+
+ if (lnet_peer_is_alive(lp, now))
+ return 1;
+
+ /* Peer appears dead, but we should avoid frequent NI queries (at
+ * most once per lnet_queryinterval seconds). */
+ if (lp->lp_last_query != 0) {
+ static const int lnet_queryinterval = 1;
+
+ cfs_time_t next_query =
+ cfs_time_add(lp->lp_last_query,
+ cfs_time_seconds(lnet_queryinterval));
+
+ if (cfs_time_before(now, next_query)) {
+ if (lp->lp_alive)
+ CWARN("Unexpected aliveness of peer %s: "
+ "%d < %d (%d/%d)\n",
+ libcfs_nid2str(lp->lp_nid),
+ (int)now, (int)next_query,
+ lnet_queryinterval,
+ lp->lp_ni->ni_peertimeout);
+ return 0;
+ }
+ }
+
+ /* query NI for latest aliveness news */
+ lnet_ni_query_locked(lp->lp_ni, lp);
+
+ if (lnet_peer_is_alive(lp, now))
+ return 1;
+
+ lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
+ return 0;
+}
+
+int
+lnet_post_send_locked(lnet_msg_t *msg, int do_send)
+{
+ /* lnet_send is going to lnet_net_unlock immediately after this,
+ * so it sets do_send FALSE and I don't do the unlock/send/lock bit.
+ * I return EAGAIN if msg blocked, EHOSTUNREACH if msg_txpeer
+ * appears dead, and 0 if sent or OK to send */
+ struct lnet_peer *lp = msg->msg_txpeer;
+ struct lnet_ni *ni = lp->lp_ni;
+ struct lnet_tx_queue *tq;
+ int cpt;
+
+ /* non-lnet_send() callers have checked before */
+ LASSERT(!do_send || msg->msg_tx_delayed);
+ LASSERT(!msg->msg_receiving);
+ LASSERT(msg->msg_tx_committed);
+
+ cpt = msg->msg_tx_cpt;
+ tq = ni->ni_tx_queues[cpt];
+
+ /* NB 'lp' is always the next hop */
+ if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
+ lnet_peer_alive_locked(lp) == 0) {
+ the_lnet.ln_counters[cpt]->drop_count++;
+ the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
+ lnet_net_unlock(cpt);
+
+ CNETERR("Dropping message for %s: peer not alive\n",
+ libcfs_id2str(msg->msg_target));
+ if (do_send)
+ lnet_finalize(ni, msg, -EHOSTUNREACH);
+
+ lnet_net_lock(cpt);
+ return EHOSTUNREACH;
+ }
+
+ if (!msg->msg_peertxcredit) {
+ LASSERT ((lp->lp_txcredits < 0) ==
+ !list_empty(&lp->lp_txq));
+
+ msg->msg_peertxcredit = 1;
+ lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
+ lp->lp_txcredits--;
+
+ if (lp->lp_txcredits < lp->lp_mintxcredits)
+ lp->lp_mintxcredits = lp->lp_txcredits;
+
+ if (lp->lp_txcredits < 0) {
+ msg->msg_tx_delayed = 1;
+ list_add_tail(&msg->msg_list, &lp->lp_txq);
+ return EAGAIN;
+ }
+ }
+
+ if (!msg->msg_txcredit) {
+ LASSERT((tq->tq_credits < 0) ==
+ !list_empty(&tq->tq_delayed));
+
+ msg->msg_txcredit = 1;
+ tq->tq_credits--;
+
+ if (tq->tq_credits < tq->tq_credits_min)
+ tq->tq_credits_min = tq->tq_credits;
+
+ if (tq->tq_credits < 0) {
+ msg->msg_tx_delayed = 1;
+ list_add_tail(&msg->msg_list, &tq->tq_delayed);
+ return EAGAIN;
+ }
+ }
+
+ if (do_send) {
+ lnet_net_unlock(cpt);
+ lnet_ni_send(ni, msg);
+ lnet_net_lock(cpt);
+ }
+ return 0;
+}
+
+
+lnet_rtrbufpool_t *
+lnet_msg2bufpool(lnet_msg_t *msg)
+{
+ lnet_rtrbufpool_t *rbp;
+ int cpt;
+
+ LASSERT(msg->msg_rx_committed);
+
+ cpt = msg->msg_rx_cpt;
+ rbp = &the_lnet.ln_rtrpools[cpt][0];
+
+ LASSERT(msg->msg_len <= LNET_MTU);
+ while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
+ rbp++;
+ LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
+ }
+
+ return rbp;
+}
+
+int
+lnet_post_routed_recv_locked (lnet_msg_t *msg, int do_recv)
+{
+ /* lnet_parse is going to lnet_net_unlock immediately after this, so it
+ * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
+ * return EAGAIN if msg blocked and 0 if received or OK to receive */
+ lnet_peer_t *lp = msg->msg_rxpeer;
+ lnet_rtrbufpool_t *rbp;
+ lnet_rtrbuf_t *rb;
+
+ LASSERT (msg->msg_iov == NULL);
+ LASSERT (msg->msg_kiov == NULL);
+ LASSERT (msg->msg_niov == 0);
+ LASSERT (msg->msg_routing);
+ LASSERT (msg->msg_receiving);
+ LASSERT (!msg->msg_sending);
+
+ /* non-lnet_parse callers only receive delayed messages */
+ LASSERT(!do_recv || msg->msg_rx_delayed);
+
+ if (!msg->msg_peerrtrcredit) {
+ LASSERT ((lp->lp_rtrcredits < 0) ==
+ !list_empty(&lp->lp_rtrq));
+
+ msg->msg_peerrtrcredit = 1;
+ lp->lp_rtrcredits--;
+ if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
+ lp->lp_minrtrcredits = lp->lp_rtrcredits;
+
+ if (lp->lp_rtrcredits < 0) {
+ /* must have checked eager_recv before here */
+ LASSERT(msg->msg_rx_ready_delay);
+ msg->msg_rx_delayed = 1;
+ list_add_tail(&msg->msg_list, &lp->lp_rtrq);
+ return EAGAIN;
+ }
+ }
+
+ rbp = lnet_msg2bufpool(msg);
+
+ if (!msg->msg_rtrcredit) {
+ LASSERT ((rbp->rbp_credits < 0) ==
+ !list_empty(&rbp->rbp_msgs));
+
+ msg->msg_rtrcredit = 1;
+ rbp->rbp_credits--;
+ if (rbp->rbp_credits < rbp->rbp_mincredits)
+ rbp->rbp_mincredits = rbp->rbp_credits;
+
+ if (rbp->rbp_credits < 0) {
+ /* must have checked eager_recv before here */
+ LASSERT(msg->msg_rx_ready_delay);
+ msg->msg_rx_delayed = 1;
+ list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
+ return EAGAIN;
+ }
+ }
+
+ LASSERT (!list_empty(&rbp->rbp_bufs));
+ rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
+ list_del(&rb->rb_list);
+
+ msg->msg_niov = rbp->rbp_npages;
+ msg->msg_kiov = &rb->rb_kiov[0];
+
+ if (do_recv) {
+ int cpt = msg->msg_rx_cpt;
+
+ lnet_net_unlock(cpt);
+ lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
+ 0, msg->msg_len, msg->msg_len);
+ lnet_net_lock(cpt);
+ }
+ return 0;
+}
+
+void
+lnet_return_tx_credits_locked(lnet_msg_t *msg)
+{
+ lnet_peer_t *txpeer = msg->msg_txpeer;
+ lnet_msg_t *msg2;
+
+ if (msg->msg_txcredit) {
+ struct lnet_ni *ni = txpeer->lp_ni;
+ struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
+
+ /* give back NI txcredits */
+ msg->msg_txcredit = 0;
+
+ LASSERT((tq->tq_credits < 0) ==
+ !list_empty(&tq->tq_delayed));
+
+ tq->tq_credits++;
+ if (tq->tq_credits <= 0) {
+ msg2 = list_entry(tq->tq_delayed.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg2->msg_list);
+
+ LASSERT(msg2->msg_txpeer->lp_ni == ni);
+ LASSERT(msg2->msg_tx_delayed);
+
+ (void) lnet_post_send_locked(msg2, 1);
+ }
+ }
+
+ if (msg->msg_peertxcredit) {
+ /* give back peer txcredits */
+ msg->msg_peertxcredit = 0;
+
+ LASSERT((txpeer->lp_txcredits < 0) ==
+ !list_empty(&txpeer->lp_txq));
+
+ txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
+ LASSERT (txpeer->lp_txqnob >= 0);
+
+ txpeer->lp_txcredits++;
+ if (txpeer->lp_txcredits <= 0) {
+ msg2 = list_entry(txpeer->lp_txq.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg2->msg_list);
+
+ LASSERT(msg2->msg_txpeer == txpeer);
+ LASSERT(msg2->msg_tx_delayed);
+
+ (void) lnet_post_send_locked(msg2, 1);
+ }
+ }
+
+ if (txpeer != NULL) {
+ msg->msg_txpeer = NULL;
+ lnet_peer_decref_locked(txpeer);
+ }
+}
+
+void
+lnet_return_rx_credits_locked(lnet_msg_t *msg)
+{
+ lnet_peer_t *rxpeer = msg->msg_rxpeer;
+ lnet_msg_t *msg2;
+
+ if (msg->msg_rtrcredit) {
+ /* give back global router credits */
+ lnet_rtrbuf_t *rb;
+ lnet_rtrbufpool_t *rbp;
+
+ /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
+ * there until it gets one allocated, or aborts the wait
+ * itself */
+ LASSERT (msg->msg_kiov != NULL);
+
+ rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
+ rbp = rb->rb_pool;
+ LASSERT (rbp == lnet_msg2bufpool(msg));
+
+ msg->msg_kiov = NULL;
+ msg->msg_rtrcredit = 0;
+
+ LASSERT((rbp->rbp_credits < 0) ==
+ !list_empty(&rbp->rbp_msgs));
+ LASSERT((rbp->rbp_credits > 0) ==
+ !list_empty(&rbp->rbp_bufs));
+
+ list_add(&rb->rb_list, &rbp->rbp_bufs);
+ rbp->rbp_credits++;
+ if (rbp->rbp_credits <= 0) {
+ msg2 = list_entry(rbp->rbp_msgs.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg2->msg_list);
+
+ (void) lnet_post_routed_recv_locked(msg2, 1);
+ }
+ }
+
+ if (msg->msg_peerrtrcredit) {
+ /* give back peer router credits */
+ msg->msg_peerrtrcredit = 0;
+
+ LASSERT((rxpeer->lp_rtrcredits < 0) ==
+ !list_empty(&rxpeer->lp_rtrq));
+
+ rxpeer->lp_rtrcredits++;
+ if (rxpeer->lp_rtrcredits <= 0) {
+ msg2 = list_entry(rxpeer->lp_rtrq.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg2->msg_list);
+
+ (void) lnet_post_routed_recv_locked(msg2, 1);
+ }
+ }
+ if (rxpeer != NULL) {
+ msg->msg_rxpeer = NULL;
+ lnet_peer_decref_locked(rxpeer);
+ }
+}
+
+static int
+lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
+{
+ lnet_peer_t *p1 = r1->lr_gateway;
+ lnet_peer_t *p2 = r2->lr_gateway;
+
+ if (r1->lr_hops < r2->lr_hops)
+ return 1;
+
+ if (r1->lr_hops > r2->lr_hops)
+ return -1;
+
+ if (p1->lp_txqnob < p2->lp_txqnob)
+ return 1;
+
+ if (p1->lp_txqnob > p2->lp_txqnob)
+ return -1;
+
+ if (p1->lp_txcredits > p2->lp_txcredits)
+ return 1;
+
+ if (p1->lp_txcredits < p2->lp_txcredits)
+ return -1;
+
+ if (r1->lr_seq - r2->lr_seq <= 0)
+ return 1;
+
+ return -1;
+}
+
+static lnet_peer_t *
+lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
+{
+ lnet_remotenet_t *rnet;
+ lnet_route_t *rtr;
+ lnet_route_t *rtr_best;
+ lnet_route_t *rtr_last;
+ struct lnet_peer *lp_best;
+ struct lnet_peer *lp;
+ int rc;
+
+ /* If @rtr_nid is not LNET_NID_ANY, return the gateway with
+ * rtr_nid nid, otherwise find the best gateway I can use */
+
+ rnet = lnet_find_net_locked(LNET_NIDNET(target));
+ if (rnet == NULL)
+ return NULL;
+
+ lp_best = NULL;
+ rtr_best = rtr_last = NULL;
+ list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) {
+ lp = rtr->lr_gateway;
+
+ if (!lp->lp_alive || /* gateway is down */
+ ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
+ rtr->lr_downis != 0)) /* NI to target is down */
+ continue;
+
+ if (ni != NULL && lp->lp_ni != ni)
+ continue;
+
+ if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
+ return lp;
+
+ if (lp_best == NULL) {
+ rtr_best = rtr_last = rtr;
+ lp_best = lp;
+ continue;
+ }
+
+ /* no protection on below fields, but it's harmless */
+ if (rtr_last->lr_seq - rtr->lr_seq < 0)
+ rtr_last = rtr;
+
+ rc = lnet_compare_routes(rtr, rtr_best);
+ if (rc < 0)
+ continue;
+
+ rtr_best = rtr;
+ lp_best = lp;
+ }
+
+ /* set sequence number on the best router to the latest sequence + 1
+ * so we can round-robin all routers, it's race and inaccurate but
+ * harmless and functional */
+ if (rtr_best != NULL)
+ rtr_best->lr_seq = rtr_last->lr_seq + 1;
+ return lp_best;
+}
+
+int
+lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
+{
+ lnet_nid_t dst_nid = msg->msg_target.nid;
+ struct lnet_ni *src_ni;
+ struct lnet_ni *local_ni;
+ struct lnet_peer *lp;
+ int cpt;
+ int cpt2;
+ int rc;
+
+ /* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
+ * but we might want to use pre-determined router for ACK/REPLY
+ * in the future */
+ /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
+ LASSERT (msg->msg_txpeer == NULL);
+ LASSERT (!msg->msg_sending);
+ LASSERT (!msg->msg_target_is_router);
+ LASSERT (!msg->msg_receiving);
+
+ msg->msg_sending = 1;
+
+ LASSERT(!msg->msg_tx_committed);
+ cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
+ again:
+ lnet_net_lock(cpt);
+
+ if (the_lnet.ln_shutdown) {
+ lnet_net_unlock(cpt);
+ return -ESHUTDOWN;
+ }
+
+ if (src_nid == LNET_NID_ANY) {
+ src_ni = NULL;
+ } else {
+ src_ni = lnet_nid2ni_locked(src_nid, cpt);
+ if (src_ni == NULL) {
+ lnet_net_unlock(cpt);
+ LCONSOLE_WARN("Can't send to %s: src %s is not a "
+ "local nid\n", libcfs_nid2str(dst_nid),
+ libcfs_nid2str(src_nid));
+ return -EINVAL;
+ }
+ LASSERT (!msg->msg_routing);
+ }
+
+ /* Is this for someone on a local network? */
+ local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
+
+ if (local_ni != NULL) {
+ if (src_ni == NULL) {
+ src_ni = local_ni;
+ src_nid = src_ni->ni_nid;
+ } else if (src_ni == local_ni) {
+ lnet_ni_decref_locked(local_ni, cpt);
+ } else {
+ lnet_ni_decref_locked(local_ni, cpt);
+ lnet_ni_decref_locked(src_ni, cpt);
+ lnet_net_unlock(cpt);
+ LCONSOLE_WARN("No route to %s via from %s\n",
+ libcfs_nid2str(dst_nid),
+ libcfs_nid2str(src_nid));
+ return -EINVAL;
+ }
+
+ LASSERT(src_nid != LNET_NID_ANY);
+ lnet_msg_commit(msg, cpt);
+
+ if (!msg->msg_routing)
+ msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
+
+ if (src_ni == the_lnet.ln_loni) {
+ /* No send credit hassles with LOLND */
+ lnet_net_unlock(cpt);
+ lnet_ni_send(src_ni, msg);
+
+ lnet_net_lock(cpt);
+ lnet_ni_decref_locked(src_ni, cpt);
+ lnet_net_unlock(cpt);
+ return 0;
+ }
+
+ rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
+ /* lp has ref on src_ni; lose mine */
+ lnet_ni_decref_locked(src_ni, cpt);
+ if (rc != 0) {
+ lnet_net_unlock(cpt);
+ LCONSOLE_WARN("Error %d finding peer %s\n", rc,
+ libcfs_nid2str(dst_nid));
+ /* ENOMEM or shutting down */
+ return rc;
+ }
+ LASSERT (lp->lp_ni == src_ni);
+ } else {
+ /* sending to a remote network */
+ lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
+ if (lp == NULL) {
+ if (src_ni != NULL)
+ lnet_ni_decref_locked(src_ni, cpt);
+ lnet_net_unlock(cpt);
+
+ LCONSOLE_WARN("No route to %s via %s "
+ "(all routers down)\n",
+ libcfs_id2str(msg->msg_target),
+ libcfs_nid2str(src_nid));
+ return -EHOSTUNREACH;
+ }
+
+ /* rtr_nid is LNET_NID_ANY or NID of pre-determined router,
+ * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
+ * pre-determined router, this can happen if router table
+ * was changed when we release the lock */
+ if (rtr_nid != lp->lp_nid) {
+ cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
+ if (cpt2 != cpt) {
+ if (src_ni != NULL)
+ lnet_ni_decref_locked(src_ni, cpt);
+ lnet_net_unlock(cpt);
+
+ rtr_nid = lp->lp_nid;
+ cpt = cpt2;
+ goto again;
+ }
+ }
+
+ CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
+ libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
+ lnet_msgtyp2str(msg->msg_type), msg->msg_len);
+
+ if (src_ni == NULL) {
+ src_ni = lp->lp_ni;
+ src_nid = src_ni->ni_nid;
+ } else {
+ LASSERT (src_ni == lp->lp_ni);
+ lnet_ni_decref_locked(src_ni, cpt);
+ }
+
+ lnet_peer_addref_locked(lp);
+
+ LASSERT(src_nid != LNET_NID_ANY);
+ lnet_msg_commit(msg, cpt);
+
+ if (!msg->msg_routing) {
+ /* I'm the source and now I know which NI to send on */
+ msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
+ }
+
+ msg->msg_target_is_router = 1;
+ msg->msg_target.nid = lp->lp_nid;
+ msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
+ }
+
+ /* 'lp' is our best choice of peer */
+
+ LASSERT (!msg->msg_peertxcredit);
+ LASSERT (!msg->msg_txcredit);
+ LASSERT (msg->msg_txpeer == NULL);
+
+ msg->msg_txpeer = lp; /* msg takes my ref on lp */
+
+ rc = lnet_post_send_locked(msg, 0);
+ lnet_net_unlock(cpt);
+
+ if (rc == EHOSTUNREACH)
+ return -EHOSTUNREACH;
+
+ if (rc == 0)
+ lnet_ni_send(src_ni, msg);
+
+ return 0;
+}
+
+static void
+lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
+{
+ lnet_net_lock(cpt);
+ the_lnet.ln_counters[cpt]->drop_count++;
+ the_lnet.ln_counters[cpt]->drop_length += nob;
+ lnet_net_unlock(cpt);
+
+ lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
+}
+
+static void
+lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
+{
+ lnet_hdr_t *hdr = &msg->msg_hdr;
+
+ if (msg->msg_wanted != 0)
+ lnet_setpayloadbuffer(msg);
+
+ lnet_build_msg_event(msg, LNET_EVENT_PUT);
+
+ /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
+ * it back into the ACK during lnet_finalize() */
+ msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
+ (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
+
+ lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
+ msg->msg_offset, msg->msg_wanted, hdr->payload_length);
+}
+
+static int
+lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
+{
+ lnet_hdr_t *hdr = &msg->msg_hdr;
+ struct lnet_match_info info;
+ int rc;
+
+ /* Convert put fields to host byte order */
+ hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
+ hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
+ hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
+
+ info.mi_id.nid = hdr->src_nid;
+ info.mi_id.pid = hdr->src_pid;
+ info.mi_opc = LNET_MD_OP_PUT;
+ info.mi_portal = hdr->msg.put.ptl_index;
+ info.mi_rlength = hdr->payload_length;
+ info.mi_roffset = hdr->msg.put.offset;
+ info.mi_mbits = hdr->msg.put.match_bits;
+
+ msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL;
+
+ again:
+ rc = lnet_ptl_match_md(&info, msg);
+ switch (rc) {
+ default:
+ LBUG();
+
+ case LNET_MATCHMD_OK:
+ lnet_recv_put(ni, msg);
+ return 0;
+
+ case LNET_MATCHMD_NONE:
+ if (msg->msg_rx_delayed) /* attached on delayed list */
+ return 0;
+
+ rc = lnet_ni_eager_recv(ni, msg);
+ if (rc == 0)
+ goto again;
+ /* fall through */
+
+ case LNET_MATCHMD_DROP:
+ CNETERR("Dropping PUT from %s portal %d match "LPU64
+ " offset %d length %d: %d\n",
+ libcfs_id2str(info.mi_id), info.mi_portal,
+ info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
+
+ return ENOENT; /* +ve: OK but no match */
+ }
+}
+
+static int
+lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
+{
+ struct lnet_match_info info;
+ lnet_hdr_t *hdr = &msg->msg_hdr;
+ lnet_handle_wire_t reply_wmd;
+ int rc;
+
+ /* Convert get fields to host byte order */
+ hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
+ hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
+ hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
+ hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
+
+ info.mi_id.nid = hdr->src_nid;
+ info.mi_id.pid = hdr->src_pid;
+ info.mi_opc = LNET_MD_OP_GET;
+ info.mi_portal = hdr->msg.get.ptl_index;
+ info.mi_rlength = hdr->msg.get.sink_length;
+ info.mi_roffset = hdr->msg.get.src_offset;
+ info.mi_mbits = hdr->msg.get.match_bits;
+
+ rc = lnet_ptl_match_md(&info, msg);
+ if (rc == LNET_MATCHMD_DROP) {
+ CNETERR("Dropping GET from %s portal %d match "LPU64
+ " offset %d length %d\n",
+ libcfs_id2str(info.mi_id), info.mi_portal,
+ info.mi_mbits, info.mi_roffset, info.mi_rlength);
+ return ENOENT; /* +ve: OK but no match */
+ }
+
+ LASSERT(rc == LNET_MATCHMD_OK);
+
+ lnet_build_msg_event(msg, LNET_EVENT_GET);
+
+ reply_wmd = hdr->msg.get.return_wmd;
+
+ lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
+ msg->msg_offset, msg->msg_wanted);
+
+ msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
+
+ if (rdma_get) {
+ /* The LND completes the REPLY from her recv procedure */
+ lnet_ni_recv(ni, msg->msg_private, msg, 0,
+ msg->msg_offset, msg->msg_len, msg->msg_len);
+ return 0;
+ }
+
+ lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
+ msg->msg_receiving = 0;
+
+ rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
+ if (rc < 0) {
+ /* didn't get as far as lnet_ni_send() */
+ CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
+ libcfs_nid2str(ni->ni_nid),
+ libcfs_id2str(info.mi_id), rc);
+
+ lnet_finalize(ni, msg, rc);
+ }
+
+ return 0;
+}
+
+static int
+lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
+{
+ void *private = msg->msg_private;
+ lnet_hdr_t *hdr = &msg->msg_hdr;
+ lnet_process_id_t src = {0};
+ lnet_libmd_t *md;
+ int rlength;
+ int mlength;
+ int cpt;
+
+ cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
+ lnet_res_lock(cpt);
+
+ src.nid = hdr->src_nid;
+ src.pid = hdr->src_pid;
+
+ /* NB handles only looked up by creator (no flips) */
+ md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
+ if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+ CNETERR("%s: Dropping REPLY from %s for %s "
+ "MD "LPX64"."LPX64"\n",
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ (md == NULL) ? "invalid" : "inactive",
+ hdr->msg.reply.dst_wmd.wh_interface_cookie,
+ hdr->msg.reply.dst_wmd.wh_object_cookie);
+ if (md != NULL && md->md_me != NULL)
+ CERROR("REPLY MD also attached to portal %d\n",
+ md->md_me->me_portal);
+
+ lnet_res_unlock(cpt);
+ return ENOENT; /* +ve: OK but no match */
+ }
+
+ LASSERT (md->md_offset == 0);
+
+ rlength = hdr->payload_length;
+ mlength = MIN(rlength, (int)md->md_length);
+
+ if (mlength < rlength &&
+ (md->md_options & LNET_MD_TRUNCATE) == 0) {
+ CNETERR("%s: Dropping REPLY from %s length %d "
+ "for MD "LPX64" would overflow (%d)\n",
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
+ mlength);
+ lnet_res_unlock(cpt);
+ return ENOENT; /* +ve: OK but no match */
+ }
+
+ CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md "LPX64"\n",
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
+
+ lnet_msg_attach_md(msg, md, 0, mlength);
+
+ if (mlength != 0)
+ lnet_setpayloadbuffer(msg);
+
+ lnet_res_unlock(cpt);
+
+ lnet_build_msg_event(msg, LNET_EVENT_REPLY);
+
+ lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
+ return 0;
+}
+
+static int
+lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
+{
+ lnet_hdr_t *hdr = &msg->msg_hdr;
+ lnet_process_id_t src = {0};
+ lnet_libmd_t *md;
+ int cpt;
+
+ src.nid = hdr->src_nid;
+ src.pid = hdr->src_pid;
+
+ /* Convert ack fields to host byte order */
+ hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
+ hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
+
+ cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
+ lnet_res_lock(cpt);
+
+ /* NB handles only looked up by creator (no flips) */
+ md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
+ if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+ /* Don't moan; this is expected */
+ CDEBUG(D_NET,
+ "%s: Dropping ACK from %s to %s MD "LPX64"."LPX64"\n",
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ (md == NULL) ? "invalid" : "inactive",
+ hdr->msg.ack.dst_wmd.wh_interface_cookie,
+ hdr->msg.ack.dst_wmd.wh_object_cookie);
+ if (md != NULL && md->md_me != NULL)
+ CERROR("Source MD also attached to portal %d\n",
+ md->md_me->me_portal);
+
+ lnet_res_unlock(cpt);
+ return ENOENT; /* +ve! */
+ }
+
+ CDEBUG(D_NET, "%s: ACK from %s into md "LPX64"\n",
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
+ hdr->msg.ack.dst_wmd.wh_object_cookie);
+
+ lnet_msg_attach_md(msg, md, 0, 0);
+
+ lnet_res_unlock(cpt);
+
+ lnet_build_msg_event(msg, LNET_EVENT_ACK);
+
+ lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
+ return 0;
+}
+
+static int
+lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
+{
+ int rc = 0;
+
+ if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
+ lnet_msg2bufpool(msg)->rbp_credits <= 0) {
+ if (ni->ni_lnd->lnd_eager_recv == NULL) {
+ msg->msg_rx_ready_delay = 1;
+ } else {
+ lnet_net_unlock(msg->msg_rx_cpt);
+ rc = lnet_ni_eager_recv(ni, msg);
+ lnet_net_lock(msg->msg_rx_cpt);
+ }
+ }
+
+ if (rc == 0)
+ rc = lnet_post_routed_recv_locked(msg, 0);
+ return rc;
+}
+
+char *
+lnet_msgtyp2str (int type)
+{
+ switch (type) {
+ case LNET_MSG_ACK:
+ return ("ACK");
+ case LNET_MSG_PUT:
+ return ("PUT");
+ case LNET_MSG_GET:
+ return ("GET");
+ case LNET_MSG_REPLY:
+ return ("REPLY");
+ case LNET_MSG_HELLO:
+ return ("HELLO");
+ default:
+ return ("<UNKNOWN>");
+ }
+}
+EXPORT_SYMBOL(lnet_msgtyp2str);
+
+void
+lnet_print_hdr(lnet_hdr_t * hdr)
+{
+ lnet_process_id_t src = {0};
+ lnet_process_id_t dst = {0};
+ char *type_str = lnet_msgtyp2str (hdr->type);
+
+ src.nid = hdr->src_nid;
+ src.pid = hdr->src_pid;
+
+ dst.nid = hdr->dest_nid;
+ dst.pid = hdr->dest_pid;
+
+ CWARN("P3 Header at %p of type %s\n", hdr, type_str);
+ CWARN(" From %s\n", libcfs_id2str(src));
+ CWARN(" To %s\n", libcfs_id2str(dst));
+
+ switch (hdr->type) {
+ default:
+ break;
+
+ case LNET_MSG_PUT:
+ CWARN(" Ptl index %d, ack md "LPX64"."LPX64", "
+ "match bits "LPU64"\n",
+ hdr->msg.put.ptl_index,
+ hdr->msg.put.ack_wmd.wh_interface_cookie,
+ hdr->msg.put.ack_wmd.wh_object_cookie,
+ hdr->msg.put.match_bits);
+ CWARN(" Length %d, offset %d, hdr data "LPX64"\n",
+ hdr->payload_length, hdr->msg.put.offset,
+ hdr->msg.put.hdr_data);
+ break;
+
+ case LNET_MSG_GET:
+ CWARN(" Ptl index %d, return md "LPX64"."LPX64", "
+ "match bits "LPU64"\n", hdr->msg.get.ptl_index,
+ hdr->msg.get.return_wmd.wh_interface_cookie,
+ hdr->msg.get.return_wmd.wh_object_cookie,
+ hdr->msg.get.match_bits);
+ CWARN(" Length %d, src offset %d\n",
+ hdr->msg.get.sink_length,
+ hdr->msg.get.src_offset);
+ break;
+
+ case LNET_MSG_ACK:
+ CWARN(" dst md "LPX64"."LPX64", "
+ "manipulated length %d\n",
+ hdr->msg.ack.dst_wmd.wh_interface_cookie,
+ hdr->msg.ack.dst_wmd.wh_object_cookie,
+ hdr->msg.ack.mlength);
+ break;
+
+ case LNET_MSG_REPLY:
+ CWARN(" dst md "LPX64"."LPX64", "
+ "length %d\n",
+ hdr->msg.reply.dst_wmd.wh_interface_cookie,
+ hdr->msg.reply.dst_wmd.wh_object_cookie,
+ hdr->payload_length);
+ }
+
+}
+
+int
+lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
+ void *private, int rdma_req)
+{
+ int rc = 0;
+ int cpt;
+ int for_me;
+ struct lnet_msg *msg;
+ lnet_pid_t dest_pid;
+ lnet_nid_t dest_nid;
+ lnet_nid_t src_nid;
+ __u32 payload_length;
+ __u32 type;
+
+ LASSERT (!in_interrupt ());
+
+ type = le32_to_cpu(hdr->type);
+ src_nid = le64_to_cpu(hdr->src_nid);
+ dest_nid = le64_to_cpu(hdr->dest_nid);
+ dest_pid = le32_to_cpu(hdr->dest_pid);
+ payload_length = le32_to_cpu(hdr->payload_length);
+
+ for_me = (ni->ni_nid == dest_nid);
+ cpt = lnet_cpt_of_nid(from_nid);
+
+ switch (type) {
+ case LNET_MSG_ACK:
+ case LNET_MSG_GET:
+ if (payload_length > 0) {
+ CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type), payload_length);
+ return -EPROTO;
+ }
+ break;
+
+ case LNET_MSG_PUT:
+ case LNET_MSG_REPLY:
+ if (payload_length > (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
+ CERROR("%s, src %s: bad %s payload %d "
+ "(%d max expected)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type),
+ payload_length,
+ for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
+ return -EPROTO;
+ }
+ break;
+
+ default:
+ CERROR("%s, src %s: Bad message type 0x%x\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid), type);
+ return -EPROTO;
+ }
+
+ if (the_lnet.ln_routing &&
+ ni->ni_last_alive != cfs_time_current_sec()) {
+ lnet_ni_lock(ni);
+
+ /* NB: so far here is the only place to set NI status to "up */
+ ni->ni_last_alive = cfs_time_current_sec();
+ if (ni->ni_status != NULL &&
+ ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
+ ni->ni_status->ns_status = LNET_NI_STATUS_UP;
+ lnet_ni_unlock(ni);
+ }
+
+ /* Regard a bad destination NID as a protocol error. Senders should
+ * know what they're doing; if they don't they're misconfigured, buggy
+ * or malicious so we chop them off at the knees :) */
+
+ if (!for_me) {
+ if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
+ /* should have gone direct */
+ CERROR ("%s, src %s: Bad dest nid %s "
+ "(should have been sent direct)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
+ return -EPROTO;
+ }
+
+ if (lnet_islocalnid(dest_nid)) {
+ /* dest is another local NI; sender should have used
+ * this node's NID on its own network */
+ CERROR ("%s, src %s: Bad dest nid %s "
+ "(it's my nid but on a different network)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
+ return -EPROTO;
+ }
+
+ if (rdma_req && type == LNET_MSG_GET) {
+ CERROR ("%s, src %s: Bad optimized GET for %s "
+ "(final destination must be me)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
+ return -EPROTO;
+ }
+
+ if (!the_lnet.ln_routing) {
+ CERROR ("%s, src %s: Dropping message for %s "
+ "(routing not enabled)\n",
+ libcfs_nid2str(from_nid),
+ libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid));
+ goto drop;
+ }
+ }
+
+ /* Message looks OK; we're not going to return an error, so we MUST
+ * call back lnd_recv() come what may... */
+
+ if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer (src_nid, 0)) /* shall we now? */
+ {
+ CERROR("%s, src %s: Dropping %s to simulate failure\n",
+ libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type));
+ goto drop;
+ }
+
+ msg = lnet_msg_alloc();
+ if (msg == NULL) {
+ CERROR("%s, src %s: Dropping %s (out of memory)\n",
+ libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type));
+ goto drop;
+ }
+
+ /* msg zeroed in lnet_msg_alloc; i.e. flags all clear, pointers NULL etc */
+
+ msg->msg_type = type;
+ msg->msg_private = private;
+ msg->msg_receiving = 1;
+ msg->msg_len = msg->msg_wanted = payload_length;
+ msg->msg_offset = 0;
+ msg->msg_hdr = *hdr;
+ /* for building message event */
+ msg->msg_from = from_nid;
+ if (!for_me) {
+ msg->msg_target.pid = dest_pid;
+ msg->msg_target.nid = dest_nid;
+ msg->msg_routing = 1;
+
+ } else {
+ /* convert common msg->hdr fields to host byteorder */
+ msg->msg_hdr.type = type;
+ msg->msg_hdr.src_nid = src_nid;
+ msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
+ msg->msg_hdr.dest_nid = dest_nid;
+ msg->msg_hdr.dest_pid = dest_pid;
+ msg->msg_hdr.payload_length = payload_length;
+ }
+
+ lnet_net_lock(cpt);
+ rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
+ if (rc != 0) {
+ lnet_net_unlock(cpt);
+ CERROR("%s, src %s: Dropping %s "
+ "(error %d looking up sender)\n",
+ libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
+ lnet_msgtyp2str(type), rc);
+ lnet_msg_free(msg);
+ goto drop;
+ }
+
+ lnet_msg_commit(msg, cpt);
+
+ if (!for_me) {
+ rc = lnet_parse_forward_locked(ni, msg);
+ lnet_net_unlock(cpt);
+
+ if (rc < 0)
+ goto free_drop;
+ if (rc == 0) {
+ lnet_ni_recv(ni, msg->msg_private, msg, 0,
+ 0, payload_length, payload_length);
+ }
+ return 0;
+ }
+
+ lnet_net_unlock(cpt);
+
+ switch (type) {
+ case LNET_MSG_ACK:
+ rc = lnet_parse_ack(ni, msg);
+ break;
+ case LNET_MSG_PUT:
+ rc = lnet_parse_put(ni, msg);
+ break;
+ case LNET_MSG_GET:
+ rc = lnet_parse_get(ni, msg, rdma_req);
+ break;
+ case LNET_MSG_REPLY:
+ rc = lnet_parse_reply(ni, msg);
+ break;
+ default:
+ LASSERT(0);
+ rc = -EPROTO;
+ goto free_drop; /* prevent an unused label if !kernel */
+ }
+
+ if (rc == 0)
+ return 0;
+
+ LASSERT (rc == ENOENT);
+
+ free_drop:
+ LASSERT(msg->msg_md == NULL);
+ lnet_finalize(ni, msg, rc);
+
+ drop:
+ lnet_drop_message(ni, cpt, private, payload_length);
+ return 0;
+}
+EXPORT_SYMBOL(lnet_parse);
+
+void
+lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
+{
+ while (!list_empty(head)) {
+ lnet_process_id_t id = {0};
+ lnet_msg_t *msg;
+
+ msg = list_entry(head->next, lnet_msg_t, msg_list);
+ list_del(&msg->msg_list);
+
+ id.nid = msg->msg_hdr.src_nid;
+ id.pid = msg->msg_hdr.src_pid;
+
+ LASSERT(msg->msg_md == NULL);
+ LASSERT(msg->msg_rx_delayed);
+ LASSERT(msg->msg_rxpeer != NULL);
+ LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
+
+ CWARN("Dropping delayed PUT from %s portal %d match "LPU64
+ " offset %d length %d: %s\n",
+ libcfs_id2str(id),
+ msg->msg_hdr.msg.put.ptl_index,
+ msg->msg_hdr.msg.put.match_bits,
+ msg->msg_hdr.msg.put.offset,
+ msg->msg_hdr.payload_length, reason);
+
+ /* NB I can't drop msg's ref on msg_rxpeer until after I've
+ * called lnet_drop_message(), so I just hang onto msg as well
+ * until that's done */
+
+ lnet_drop_message(msg->msg_rxpeer->lp_ni,
+ msg->msg_rxpeer->lp_cpt,
+ msg->msg_private, msg->msg_len);
+ /*
+ * NB: message will not generate event because w/o attached MD,
+ * but we still should give error code so lnet_msg_decommit()
+ * can skip counters operations and other checks.
+ */
+ lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
+ }
+}
+
+void
+lnet_recv_delayed_msg_list(struct list_head *head)
+{
+ while (!list_empty(head)) {
+ lnet_msg_t *msg;
+ lnet_process_id_t id;
+
+ msg = list_entry(head->next, lnet_msg_t, msg_list);
+ list_del(&msg->msg_list);
+
+ /* md won't disappear under me, since each msg
+ * holds a ref on it */
+
+ id.nid = msg->msg_hdr.src_nid;
+ id.pid = msg->msg_hdr.src_pid;
+
+ LASSERT(msg->msg_rx_delayed);
+ LASSERT(msg->msg_md != NULL);
+ LASSERT(msg->msg_rxpeer != NULL);
+ LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
+
+ CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
+ "match "LPU64" offset %d length %d.\n",
+ libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
+ msg->msg_hdr.msg.put.match_bits,
+ msg->msg_hdr.msg.put.offset,
+ msg->msg_hdr.payload_length);
+
+ lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
+ }
+}
+
+/**
+ * Initiate an asynchronous PUT operation.
+ *
+ * There are several events associated with a PUT: completion of the send on
+ * the initiator node (LNET_EVENT_SEND), and when the send completes
+ * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
+ * that the operation was accepted by the target. The event LNET_EVENT_PUT is
+ * used at the target node to indicate the completion of incoming data
+ * delivery.
+ *
+ * The local events will be logged in the EQ associated with the MD pointed to
+ * by \a mdh handle. Using a MD without an associated EQ results in these
+ * events being discarded. In this case, the caller must have another
+ * mechanism (e.g., a higher level protocol) for determining when it is safe
+ * to modify the memory region associated with the MD.
+ *
+ * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
+ * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
+ *
+ * \param self Indicates the NID of a local interface through which to send
+ * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
+ * \param mdh A handle for the MD that describes the memory to be sent. The MD
+ * must be "free floating" (See LNetMDBind()).
+ * \param ack Controls whether an acknowledgment is requested.
+ * Acknowledgments are only sent when they are requested by the initiating
+ * process and the target MD enables them.
+ * \param target A process identifier for the target process.
+ * \param portal The index in the \a target's portal table.
+ * \param match_bits The match bits to use for MD selection at the target
+ * process.
+ * \param offset The offset into the target MD (only used when the target
+ * MD has the LNET_MD_MANAGE_REMOTE option set).
+ * \param hdr_data 64 bits of user data that can be included in the message
+ * header. This data is written to an event queue entry at the target if an
+ * EQ is present on the matching MD.
+ *
+ * \retval 0 Success, and only in this case events will be generated
+ * and logged to EQ (if it exists).
+ * \retval -EIO Simulated failure.
+ * \retval -ENOMEM Memory allocation failure.
+ * \retval -ENOENT Invalid MD object.
+ *
+ * \see lnet_event_t::hdr_data and lnet_event_kind_t.
+ */
+int
+LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
+ lnet_process_id_t target, unsigned int portal,
+ __u64 match_bits, unsigned int offset,
+ __u64 hdr_data)
+{
+ struct lnet_msg *msg;
+ struct lnet_libmd *md;
+ int cpt;
+ int rc;
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer (target.nid, 1)) /* shall we now? */
+ {
+ CERROR("Dropping PUT to %s: simulated failure\n",
+ libcfs_id2str(target));
+ return -EIO;
+ }
+
+ msg = lnet_msg_alloc();
+ if (msg == NULL) {
+ CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
+ libcfs_id2str(target));
+ return -ENOMEM;
+ }
+ msg->msg_vmflush = !!memory_pressure_get();
+
+ cpt = lnet_cpt_of_cookie(mdh.cookie);
+ lnet_res_lock(cpt);
+
+ md = lnet_handle2md(&mdh);
+ if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+ CERROR("Dropping PUT ("LPU64":%d:%s): MD (%d) invalid\n",
+ match_bits, portal, libcfs_id2str(target),
+ md == NULL ? -1 : md->md_threshold);
+ if (md != NULL && md->md_me != NULL)
+ CERROR("Source MD also attached to portal %d\n",
+ md->md_me->me_portal);
+ lnet_res_unlock(cpt);
+
+ lnet_msg_free(msg);
+ return -ENOENT;
+ }
+
+ CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
+
+ lnet_msg_attach_md(msg, md, 0, 0);
+
+ lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
+
+ msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
+ msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
+ msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
+ msg->msg_hdr.msg.put.hdr_data = hdr_data;
+
+ /* NB handles only looked up by creator (no flips) */
+ if (ack == LNET_ACK_REQ) {
+ msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
+ the_lnet.ln_interface_cookie;
+ msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
+ md->md_lh.lh_cookie;
+ } else {
+ msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
+ LNET_WIRE_HANDLE_COOKIE_NONE;
+ msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
+ LNET_WIRE_HANDLE_COOKIE_NONE;
+ }
+
+ lnet_res_unlock(cpt);
+
+ lnet_build_msg_event(msg, LNET_EVENT_SEND);
+
+ rc = lnet_send(self, msg, LNET_NID_ANY);
+ if (rc != 0) {
+ CNETERR( "Error sending PUT to %s: %d\n",
+ libcfs_id2str(target), rc);
+ lnet_finalize (NULL, msg, rc);
+ }
+
+ /* completion will be signalled by an event */
+ return 0;
+}
+EXPORT_SYMBOL(LNetPut);
+
+lnet_msg_t *
+lnet_create_reply_msg (lnet_ni_t *ni, lnet_msg_t *getmsg)
+{
+ /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
+ * returns a msg for the LND to pass to lnet_finalize() when the sink
+ * data has been received.
+ *
+ * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
+ * lnet_finalize() is called on it, so the LND must call this first */
+
+ struct lnet_msg *msg = lnet_msg_alloc();
+ struct lnet_libmd *getmd = getmsg->msg_md;
+ lnet_process_id_t peer_id = getmsg->msg_target;
+ int cpt;
+
+ LASSERT(!getmsg->msg_target_is_router);
+ LASSERT(!getmsg->msg_routing);
+
+ cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
+ lnet_res_lock(cpt);
+
+ LASSERT (getmd->md_refcount > 0);
+
+ if (msg == NULL) {
+ CERROR ("%s: Dropping REPLY from %s: can't allocate msg\n",
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
+ goto drop;
+ }
+
+ if (getmd->md_threshold == 0) {
+ CERROR ("%s: Dropping REPLY from %s for inactive MD %p\n",
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
+ getmd);
+ lnet_res_unlock(cpt);
+ goto drop;
+ }
+
+ LASSERT(getmd->md_offset == 0);
+
+ CDEBUG(D_NET, "%s: Reply from %s md %p\n",
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
+
+ /* setup information for lnet_build_msg_event */
+ msg->msg_from = peer_id.nid;
+ msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
+ msg->msg_hdr.src_nid = peer_id.nid;
+ msg->msg_hdr.payload_length = getmd->md_length;
+ msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
+
+ lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
+ lnet_res_unlock(cpt);
+
+ cpt = lnet_cpt_of_nid(peer_id.nid);
+
+ lnet_net_lock(cpt);
+ lnet_msg_commit(msg, cpt);
+ lnet_net_unlock(cpt);
+
+ lnet_build_msg_event(msg, LNET_EVENT_REPLY);
+
+ return msg;
+
+ drop:
+ cpt = lnet_cpt_of_nid(peer_id.nid);
+
+ lnet_net_lock(cpt);
+ the_lnet.ln_counters[cpt]->drop_count++;
+ the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
+ lnet_net_unlock(cpt);
+
+ if (msg != NULL)
+ lnet_msg_free(msg);
+
+ return NULL;
+}
+EXPORT_SYMBOL(lnet_create_reply_msg);
+
+void
+lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
+{
+ /* Set the REPLY length, now the RDMA that elides the REPLY message has
+ * completed and I know it. */
+ LASSERT (reply != NULL);
+ LASSERT (reply->msg_type == LNET_MSG_GET);
+ LASSERT (reply->msg_ev.type == LNET_EVENT_REPLY);
+
+ /* NB I trusted my peer to RDMA. If she tells me she's written beyond
+ * the end of my buffer, I might as well be dead. */
+ LASSERT (len <= reply->msg_ev.mlength);
+
+ reply->msg_ev.mlength = len;
+}
+EXPORT_SYMBOL(lnet_set_reply_msg_len);
+
+/**
+ * Initiate an asynchronous GET operation.
+ *
+ * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
+ * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
+ * the target node in the REPLY has been written to local MD.
+ *
+ * On the target node, an LNET_EVENT_GET is logged when the GET request
+ * arrives and is accepted into a MD.
+ *
+ * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
+ * \param mdh A handle for the MD that describes the memory into which the
+ * requested data will be received. The MD must be "free floating" (See LNetMDBind()).
+ *
+ * \retval 0 Success, and only in this case events will be generated
+ * and logged to EQ (if it exists) of the MD.
+ * \retval -EIO Simulated failure.
+ * \retval -ENOMEM Memory allocation failure.
+ * \retval -ENOENT Invalid MD object.
+ */
+int
+LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
+ lnet_process_id_t target, unsigned int portal,
+ __u64 match_bits, unsigned int offset)
+{
+ struct lnet_msg *msg;
+ struct lnet_libmd *md;
+ int cpt;
+ int rc;
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ if (!list_empty (&the_lnet.ln_test_peers) && /* normally we don't */
+ fail_peer (target.nid, 1)) /* shall we now? */
+ {
+ CERROR("Dropping GET to %s: simulated failure\n",
+ libcfs_id2str(target));
+ return -EIO;
+ }
+
+ msg = lnet_msg_alloc();
+ if (msg == NULL) {
+ CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
+ libcfs_id2str(target));
+ return -ENOMEM;
+ }
+
+ cpt = lnet_cpt_of_cookie(mdh.cookie);
+ lnet_res_lock(cpt);
+
+ md = lnet_handle2md(&mdh);
+ if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+ CERROR("Dropping GET ("LPU64":%d:%s): MD (%d) invalid\n",
+ match_bits, portal, libcfs_id2str(target),
+ md == NULL ? -1 : md->md_threshold);
+ if (md != NULL && md->md_me != NULL)
+ CERROR("REPLY MD also attached to portal %d\n",
+ md->md_me->me_portal);
+
+ lnet_res_unlock(cpt);
+
+ lnet_msg_free(msg);
+
+ return -ENOENT;
+ }
+
+ CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
+
+ lnet_msg_attach_md(msg, md, 0, 0);
+
+ lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
+
+ msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
+ msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
+ msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
+ msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
+
+ /* NB handles only looked up by creator (no flips) */
+ msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
+ the_lnet.ln_interface_cookie;
+ msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
+ md->md_lh.lh_cookie;
+
+ lnet_res_unlock(cpt);
+
+ lnet_build_msg_event(msg, LNET_EVENT_SEND);
+
+ rc = lnet_send(self, msg, LNET_NID_ANY);
+ if (rc < 0) {
+ CNETERR( "Error sending GET to %s: %d\n",
+ libcfs_id2str(target), rc);
+ lnet_finalize (NULL, msg, rc);
+ }
+
+ /* completion will be signalled by an event */
+ return 0;
+}
+EXPORT_SYMBOL(LNetGet);
+
+/**
+ * Calculate distance to node at \a dstnid.
+ *
+ * \param dstnid Target NID.
+ * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
+ * is saved here.
+ * \param orderp If not NULL, order of the route to reach \a dstnid is saved
+ * here.
+ *
+ * \retval 0 If \a dstnid belongs to a local interface, and reserved option
+ * local_nid_dist_zero is set, which is the default.
+ * \retval positives Distance to target NID, i.e. number of hops plus one.
+ * \retval -EHOSTUNREACH If \a dstnid is not reachable.
+ */
+int
+LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
+{
+ struct list_head *e;
+ struct lnet_ni *ni;
+ lnet_remotenet_t *rnet;
+ __u32 dstnet = LNET_NIDNET(dstnid);
+ int hops;
+ int cpt;
+ __u32 order = 2;
+ struct list_head *rn_list;
+
+ /* if !local_nid_dist_zero, I don't return a distance of 0 ever
+ * (when lustre sees a distance of 0, it substitutes 0@lo), so I
+ * keep order 0 free for 0@lo and order 1 free for a local NID
+ * match */
+
+ LASSERT (the_lnet.ln_init);
+ LASSERT (the_lnet.ln_refcount > 0);
+
+ cpt = lnet_net_lock_current();
+
+ list_for_each (e, &the_lnet.ln_nis) {
+ ni = list_entry(e, lnet_ni_t, ni_list);
+
+ if (ni->ni_nid == dstnid) {
+ if (srcnidp != NULL)
+ *srcnidp = dstnid;
+ if (orderp != NULL) {
+ if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
+ *orderp = 0;
+ else
+ *orderp = 1;
+ }
+ lnet_net_unlock(cpt);
+
+ return local_nid_dist_zero ? 0 : 1;
+ }
+
+ if (LNET_NIDNET(ni->ni_nid) == dstnet) {
+ if (srcnidp != NULL)
+ *srcnidp = ni->ni_nid;
+ if (orderp != NULL)
+ *orderp = order;
+ lnet_net_unlock(cpt);
+ return 1;
+ }
+
+ order++;
+ }
+
+ rn_list = lnet_net2rnethash(dstnet);
+ list_for_each(e, rn_list) {
+ rnet = list_entry(e, lnet_remotenet_t, lrn_list);
+
+ if (rnet->lrn_net == dstnet) {
+ lnet_route_t *route;
+ lnet_route_t *shortest = NULL;
+
+ LASSERT (!list_empty(&rnet->lrn_routes));
+
+ list_for_each_entry(route, &rnet->lrn_routes,
+ lr_list) {
+ if (shortest == NULL ||
+ route->lr_hops < shortest->lr_hops)
+ shortest = route;
+ }
+
+ LASSERT (shortest != NULL);
+ hops = shortest->lr_hops;
+ if (srcnidp != NULL)
+ *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
+ if (orderp != NULL)
+ *orderp = order;
+ lnet_net_unlock(cpt);
+ return hops + 1;
+ }
+ order++;
+ }
+
+ lnet_net_unlock(cpt);
+ return -EHOSTUNREACH;
+}
+EXPORT_SYMBOL(LNetDist);
+
+/**
+ * Set the number of asynchronous messages expected from a target process.
+ *
+ * This function is only meaningful for userspace callers. It's a no-op when
+ * called from kernel.
+ *
+ * Asynchronous messages are those that can come from a target when the
+ * userspace process is not waiting for IO to complete; e.g., AST callbacks
+ * from Lustre servers. Specifying the expected number of such messages
+ * allows them to be eagerly received when user process is not running in
+ * LNet; otherwise network errors may occur.
+ *
+ * \param id Process ID of the target process.
+ * \param nasync Number of asynchronous messages expected from the target.
+ *
+ * \return 0 on success, and an error code otherwise.
+ */
+int
+LNetSetAsync(lnet_process_id_t id, int nasync)
+{
+ return 0;
+}
+EXPORT_SYMBOL(LNetSetAsync);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
new file mode 100644
index 0000000..61ae88b
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -0,0 +1,647 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/lnet/lib-msg.c
+ *
+ * Message decoding, parsing and finalizing routines
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/lnet/lib-lnet.h>
+
+void
+lnet_build_unlink_event (lnet_libmd_t *md, lnet_event_t *ev)
+{
+ memset(ev, 0, sizeof(*ev));
+
+ ev->status = 0;
+ ev->unlinked = 1;
+ ev->type = LNET_EVENT_UNLINK;
+ lnet_md_deconstruct(md, &ev->md);
+ lnet_md2handle(&ev->md_handle, md);
+}
+
+/*
+ * Don't need any lock, must be called after lnet_commit_md
+ */
+void
+lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
+{
+ lnet_hdr_t *hdr = &msg->msg_hdr;
+ lnet_event_t *ev = &msg->msg_ev;
+
+ LASSERT(!msg->msg_routing);
+
+ ev->type = ev_type;
+
+ if (ev_type == LNET_EVENT_SEND) {
+ /* event for active message */
+ ev->target.nid = le64_to_cpu(hdr->dest_nid);
+ ev->target.pid = le32_to_cpu(hdr->dest_pid);
+ ev->initiator.nid = LNET_NID_ANY;
+ ev->initiator.pid = the_lnet.ln_pid;
+ ev->sender = LNET_NID_ANY;
+
+ } else {
+ /* event for passive message */
+ ev->target.pid = hdr->dest_pid;
+ ev->target.nid = hdr->dest_nid;
+ ev->initiator.pid = hdr->src_pid;
+ ev->initiator.nid = hdr->src_nid;
+ ev->rlength = hdr->payload_length;
+ ev->sender = msg->msg_from;
+ ev->mlength = msg->msg_wanted;
+ ev->offset = msg->msg_offset;
+ }
+
+ switch (ev_type) {
+ default:
+ LBUG();
+
+ case LNET_EVENT_PUT: /* passive PUT */
+ ev->pt_index = hdr->msg.put.ptl_index;
+ ev->match_bits = hdr->msg.put.match_bits;
+ ev->hdr_data = hdr->msg.put.hdr_data;
+ return;
+
+ case LNET_EVENT_GET: /* passive GET */
+ ev->pt_index = hdr->msg.get.ptl_index;
+ ev->match_bits = hdr->msg.get.match_bits;
+ ev->hdr_data = 0;
+ return;
+
+ case LNET_EVENT_ACK: /* ACK */
+ ev->match_bits = hdr->msg.ack.match_bits;
+ ev->mlength = hdr->msg.ack.mlength;
+ return;
+
+ case LNET_EVENT_REPLY: /* REPLY */
+ return;
+
+ case LNET_EVENT_SEND: /* active message */
+ if (msg->msg_type == LNET_MSG_PUT) {
+ ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
+ ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
+ ev->offset = le32_to_cpu(hdr->msg.put.offset);
+ ev->mlength =
+ ev->rlength = le32_to_cpu(hdr->payload_length);
+ ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
+
+ } else {
+ LASSERT(msg->msg_type == LNET_MSG_GET);
+ ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
+ ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
+ ev->mlength =
+ ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
+ ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
+ ev->hdr_data = 0;
+ }
+ return;
+ }
+}
+
+void
+lnet_msg_commit(lnet_msg_t *msg, int cpt)
+{
+ struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
+ lnet_counters_t *counters = the_lnet.ln_counters[cpt];
+
+ /* routed message can be committed for both receiving and sending */
+ LASSERT(!msg->msg_tx_committed);
+
+ if (msg->msg_sending) {
+ LASSERT(!msg->msg_receiving);
+
+ msg->msg_tx_cpt = cpt;
+ msg->msg_tx_committed = 1;
+ if (msg->msg_rx_committed) { /* routed message REPLY */
+ LASSERT(msg->msg_onactivelist);
+ return;
+ }
+ } else {
+ LASSERT(!msg->msg_sending);
+ msg->msg_rx_cpt = cpt;
+ msg->msg_rx_committed = 1;
+ }
+
+ LASSERT(!msg->msg_onactivelist);
+ msg->msg_onactivelist = 1;
+ list_add(&msg->msg_activelist, &container->msc_active);
+
+ counters->msgs_alloc++;
+ if (counters->msgs_alloc > counters->msgs_max)
+ counters->msgs_max = counters->msgs_alloc;
+}
+
+static void
+lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
+{
+ lnet_counters_t *counters;
+ lnet_event_t *ev = &msg->msg_ev;
+
+ LASSERT(msg->msg_tx_committed);
+ if (status != 0)
+ goto out;
+
+ counters = the_lnet.ln_counters[msg->msg_tx_cpt];
+ switch (ev->type) {
+ default: /* routed message */
+ LASSERT(msg->msg_routing);
+ LASSERT(msg->msg_rx_committed);
+ LASSERT(ev->type == 0);
+
+ counters->route_length += msg->msg_len;
+ counters->route_count++;
+ goto out;
+
+ case LNET_EVENT_PUT:
+ /* should have been decommitted */
+ LASSERT(!msg->msg_rx_committed);
+ /* overwritten while sending ACK */
+ LASSERT(msg->msg_type == LNET_MSG_ACK);
+ msg->msg_type = LNET_MSG_PUT; /* fix type */
+ break;
+
+ case LNET_EVENT_SEND:
+ LASSERT(!msg->msg_rx_committed);
+ if (msg->msg_type == LNET_MSG_PUT)
+ counters->send_length += msg->msg_len;
+ break;
+
+ case LNET_EVENT_GET:
+ LASSERT(msg->msg_rx_committed);
+ /* overwritten while sending reply, we should never be
+ * here for optimized GET */
+ LASSERT(msg->msg_type == LNET_MSG_REPLY);
+ msg->msg_type = LNET_MSG_GET; /* fix type */
+ break;
+ }
+
+ counters->send_count++;
+ out:
+ lnet_return_tx_credits_locked(msg);
+ msg->msg_tx_committed = 0;
+}
+
+static void
+lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
+{
+ lnet_counters_t *counters;
+ lnet_event_t *ev = &msg->msg_ev;
+
+ LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
+ LASSERT(msg->msg_rx_committed);
+
+ if (status != 0)
+ goto out;
+
+ counters = the_lnet.ln_counters[msg->msg_rx_cpt];
+ switch (ev->type) {
+ default:
+ LASSERT(ev->type == 0);
+ LASSERT(msg->msg_routing);
+ goto out;
+
+ case LNET_EVENT_ACK:
+ LASSERT(msg->msg_type == LNET_MSG_ACK);
+ break;
+
+ case LNET_EVENT_GET:
+ /* type is "REPLY" if it's an optimized GET on passive side,
+ * because optimized GET will never be committed for sending,
+ * so message type wouldn't be changed back to "GET" by
+ * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
+ LASSERT(msg->msg_type == LNET_MSG_REPLY ||
+ msg->msg_type == LNET_MSG_GET);
+ counters->send_length += msg->msg_wanted;
+ break;
+
+ case LNET_EVENT_PUT:
+ LASSERT(msg->msg_type == LNET_MSG_PUT);
+ break;
+
+ case LNET_EVENT_REPLY:
+ /* type is "GET" if it's an optimized GET on active side,
+ * see details in lnet_create_reply_msg() */
+ LASSERT(msg->msg_type == LNET_MSG_GET ||
+ msg->msg_type == LNET_MSG_REPLY);
+ break;
+ }
+
+ counters->recv_count++;
+ if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
+ counters->recv_length += msg->msg_wanted;
+
+ out:
+ lnet_return_rx_credits_locked(msg);
+ msg->msg_rx_committed = 0;
+}
+
+void
+lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status)
+{
+ int cpt2 = cpt;
+
+ LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
+ LASSERT(msg->msg_onactivelist);
+
+ if (msg->msg_tx_committed) { /* always decommit for sending first */
+ LASSERT(cpt == msg->msg_tx_cpt);
+ lnet_msg_decommit_tx(msg, status);
+ }
+
+ if (msg->msg_rx_committed) {
+ /* forwarding msg committed for both receiving and sending */
+ if (cpt != msg->msg_rx_cpt) {
+ lnet_net_unlock(cpt);
+ cpt2 = msg->msg_rx_cpt;
+ lnet_net_lock(cpt2);
+ }
+ lnet_msg_decommit_rx(msg, status);
+ }
+
+ list_del(&msg->msg_activelist);
+ msg->msg_onactivelist = 0;
+
+ the_lnet.ln_counters[cpt2]->msgs_alloc--;
+
+ if (cpt2 != cpt) {
+ lnet_net_unlock(cpt2);
+ lnet_net_lock(cpt);
+ }
+}
+
+void
+lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
+ unsigned int offset, unsigned int mlen)
+{
+ /* NB: @offset and @len are only useful for receiving */
+ /* Here, we attach the MD on lnet_msg and mark it busy and
+ * decrementing its threshold. Come what may, the lnet_msg "owns"
+ * the MD until a call to lnet_msg_detach_md or lnet_finalize()
+ * signals completion. */
+ LASSERT(!msg->msg_routing);
+
+ msg->msg_md = md;
+ if (msg->msg_receiving) { /* committed for receiving */
+ msg->msg_offset = offset;
+ msg->msg_wanted = mlen;
+ }
+
+ md->md_refcount++;
+ if (md->md_threshold != LNET_MD_THRESH_INF) {
+ LASSERT(md->md_threshold > 0);
+ md->md_threshold--;
+ }
+
+ /* build umd in event */
+ lnet_md2handle(&msg->msg_ev.md_handle, md);
+ lnet_md_deconstruct(md, &msg->msg_ev.md);
+}
+
+void
+lnet_msg_detach_md(lnet_msg_t *msg, int status)
+{
+ lnet_libmd_t *md = msg->msg_md;
+ int unlink;
+
+ /* Now it's safe to drop my caller's ref */
+ md->md_refcount--;
+ LASSERT(md->md_refcount >= 0);
+
+ unlink = lnet_md_unlinkable(md);
+ if (md->md_eq != NULL) {
+ msg->msg_ev.status = status;
+ msg->msg_ev.unlinked = unlink;
+ lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
+ }
+
+ if (unlink)
+ lnet_md_unlink(md);
+
+ msg->msg_md = NULL;
+}
+
+static int
+lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
+{
+ lnet_handle_wire_t ack_wmd;
+ int rc;
+ int status = msg->msg_ev.status;
+
+ LASSERT (msg->msg_onactivelist);
+
+ if (status == 0 && msg->msg_ack) {
+ /* Only send an ACK if the PUT completed successfully */
+
+ lnet_msg_decommit(msg, cpt, 0);
+
+ msg->msg_ack = 0;
+ lnet_net_unlock(cpt);
+
+ LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
+ LASSERT(!msg->msg_routing);
+
+ ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
+
+ lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0);
+
+ msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
+ msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
+ msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
+
+ /* NB: we probably want to use NID of msg::msg_from as 3rd
+ * parameter (router NID) if it's routed message */
+ rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
+
+ lnet_net_lock(cpt);
+ /*
+ * NB: message is committed for sending, we should return
+ * on success because LND will finalize this message later.
+ *
+ * Also, there is possibility that message is committed for
+ * sending and also failed before delivering to LND,
+ * i.e: ENOMEM, in that case we can't fall through either
+ * because CPT for sending can be different with CPT for
+ * receiving, so we should return back to lnet_finalize()
+ * to make sure we are locking the correct partition.
+ */
+ return rc;
+
+ } else if (status == 0 && /* OK so far */
+ (msg->msg_routing && !msg->msg_sending)) {
+ /* not forwarded */
+ LASSERT(!msg->msg_receiving); /* called back recv already */
+ lnet_net_unlock(cpt);
+
+ rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
+
+ lnet_net_lock(cpt);
+ /*
+ * NB: message is committed for sending, we should return
+ * on success because LND will finalize this message later.
+ *
+ * Also, there is possibility that message is committed for
+ * sending and also failed before delivering to LND,
+ * i.e: ENOMEM, in that case we can't fall through either:
+ * - The rule is message must decommit for sending first if
+ * the it's committed for both sending and receiving
+ * - CPT for sending can be different with CPT for receiving,
+ * so we should return back to lnet_finalize() to make
+ * sure we are locking the correct partition.
+ */
+ return rc;
+ }
+
+ lnet_msg_decommit(msg, cpt, status);
+ lnet_msg_free_locked(msg);
+ return 0;
+}
+
+void
+lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
+{
+ struct lnet_msg_container *container;
+ int my_slot;
+ int cpt;
+ int rc;
+ int i;
+
+ LASSERT (!in_interrupt ());
+
+ if (msg == NULL)
+ return;
+#if 0
+ CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
+ lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target),
+ msg->msg_target_is_router ? "t" : "",
+ msg->msg_routing ? "X" : "",
+ msg->msg_ack ? "A" : "",
+ msg->msg_sending ? "S" : "",
+ msg->msg_receiving ? "R" : "",
+ msg->msg_delayed ? "d" : "",
+ msg->msg_txcredit ? "C" : "",
+ msg->msg_peertxcredit ? "c" : "",
+ msg->msg_rtrcredit ? "F" : "",
+ msg->msg_peerrtrcredit ? "f" : "",
+ msg->msg_onactivelist ? "!" : "",
+ msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
+ msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
+#endif
+ msg->msg_ev.status = status;
+
+ if (msg->msg_md != NULL) {
+ cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
+
+ lnet_res_lock(cpt);
+ lnet_msg_detach_md(msg, status);
+ lnet_res_unlock(cpt);
+ }
+
+ again:
+ rc = 0;
+ if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
+ /* not committed to network yet */
+ LASSERT(!msg->msg_onactivelist);
+ lnet_msg_free(msg);
+ return;
+ }
+
+ /*
+ * NB: routed message can be committed for both receiving and sending,
+ * we should finalize in LIFO order and keep counters correct.
+ * (finalize sending first then finalize receiving)
+ */
+ cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
+ lnet_net_lock(cpt);
+
+ container = the_lnet.ln_msg_containers[cpt];
+ list_add_tail(&msg->msg_list, &container->msc_finalizing);
+
+ /* Recursion breaker. Don't complete the message here if I am (or
+ * enough other threads are) already completing messages */
+
+ my_slot = -1;
+ for (i = 0; i < container->msc_nfinalizers; i++) {
+ if (container->msc_finalizers[i] == current)
+ break;
+
+ if (my_slot < 0 && container->msc_finalizers[i] == NULL)
+ my_slot = i;
+ }
+
+ if (i < container->msc_nfinalizers || my_slot < 0) {
+ lnet_net_unlock(cpt);
+ return;
+ }
+
+ container->msc_finalizers[my_slot] = current;
+
+ while (!list_empty(&container->msc_finalizing)) {
+ msg = list_entry(container->msc_finalizing.next,
+ lnet_msg_t, msg_list);
+
+ list_del(&msg->msg_list);
+
+ /* NB drops and regains the lnet lock if it actually does
+ * anything, so my finalizing friends can chomp along too */
+ rc = lnet_complete_msg_locked(msg, cpt);
+ if (rc != 0)
+ break;
+ }
+
+ container->msc_finalizers[my_slot] = NULL;
+ lnet_net_unlock(cpt);
+
+ if (rc != 0)
+ goto again;
+}
+EXPORT_SYMBOL(lnet_finalize);
+
+void
+lnet_msg_container_cleanup(struct lnet_msg_container *container)
+{
+ int count = 0;
+
+ if (container->msc_init == 0)
+ return;
+
+ while (!list_empty(&container->msc_active)) {
+ lnet_msg_t *msg = list_entry(container->msc_active.next,
+ lnet_msg_t, msg_activelist);
+
+ LASSERT(msg->msg_onactivelist);
+ msg->msg_onactivelist = 0;
+ list_del(&msg->msg_activelist);
+ lnet_msg_free(msg);
+ count++;
+ }
+
+ if (count > 0)
+ CERROR("%d active msg on exit\n", count);
+
+ if (container->msc_finalizers != NULL) {
+ LIBCFS_FREE(container->msc_finalizers,
+ container->msc_nfinalizers *
+ sizeof(*container->msc_finalizers));
+ container->msc_finalizers = NULL;
+ }
+#ifdef LNET_USE_LIB_FREELIST
+ lnet_freelist_fini(&container->msc_freelist);
+#endif
+ container->msc_init = 0;
+}
+
+int
+lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
+{
+ int rc;
+
+ container->msc_init = 1;
+
+ INIT_LIST_HEAD(&container->msc_active);
+ INIT_LIST_HEAD(&container->msc_finalizing);
+
+#ifdef LNET_USE_LIB_FREELIST
+ memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t));
+
+ rc = lnet_freelist_init(&container->msc_freelist,
+ LNET_FL_MAX_MSGS, sizeof(lnet_msg_t));
+ if (rc != 0) {
+ CERROR("Failed to init freelist for message container\n");
+ lnet_msg_container_cleanup(container);
+ return rc;
+ }
+#else
+ rc = 0;
+#endif
+ /* number of CPUs */
+ container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
+
+ LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
+ container->msc_nfinalizers *
+ sizeof(*container->msc_finalizers));
+
+ if (container->msc_finalizers == NULL) {
+ CERROR("Failed to allocate message finalizers\n");
+ lnet_msg_container_cleanup(container);
+ return -ENOMEM;
+ }
+
+ return rc;
+}
+
+void
+lnet_msg_containers_destroy(void)
+{
+ struct lnet_msg_container *container;
+ int i;
+
+ if (the_lnet.ln_msg_containers == NULL)
+ return;
+
+ cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
+ lnet_msg_container_cleanup(container);
+
+ cfs_percpt_free(the_lnet.ln_msg_containers);
+ the_lnet.ln_msg_containers = NULL;
+}
+
+int
+lnet_msg_containers_create(void)
+{
+ struct lnet_msg_container *container;
+ int rc;
+ int i;
+
+ the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*container));
+
+ if (the_lnet.ln_msg_containers == NULL) {
+ CERROR("Failed to allocate cpu-partition data for network\n");
+ return -ENOMEM;
+ }
+
+ cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
+ rc = lnet_msg_container_setup(container, i);
+ if (rc != 0) {
+ lnet_msg_containers_destroy();
+ return rc;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
new file mode 100644
index 0000000..9b9e7d31
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -0,0 +1,938 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/lnet/lib-ptl.c
+ *
+ * portal & match routines
+ *
+ * Author: liang@whamcloud.com
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/lnet/lib-lnet.h>
+
+/* NB: add /proc interfaces in upcoming patches */
+int portal_rotor = LNET_PTL_ROTOR_HASH_RT;
+CFS_MODULE_PARM(portal_rotor, "i", int, 0644,
+ "redirect PUTs to different cpu-partitions");
+
+static int
+lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
+ __u64 mbits, __u64 ignore_bits)
+{
+ struct lnet_portal *ptl = the_lnet.ln_portals[index];
+ int unique;
+
+ unique = ignore_bits == 0 &&
+ match_id.nid != LNET_NID_ANY &&
+ match_id.pid != LNET_PID_ANY;
+
+ LASSERT(!lnet_ptl_is_unique(ptl) || !lnet_ptl_is_wildcard(ptl));
+
+ /* prefer to check w/o any lock */
+ if (likely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl)))
+ goto match;
+
+ /* unset, new portal */
+ lnet_ptl_lock(ptl);
+ /* check again with lock */
+ if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) {
+ lnet_ptl_unlock(ptl);
+ goto match;
+ }
+
+ /* still not set */
+ if (unique)
+ lnet_ptl_setopt(ptl, LNET_PTL_MATCH_UNIQUE);
+ else
+ lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD);
+
+ lnet_ptl_unlock(ptl);
+
+ return 1;
+
+ match:
+ if ((lnet_ptl_is_unique(ptl) && !unique) ||
+ (lnet_ptl_is_wildcard(ptl) && unique))
+ return 0;
+ return 1;
+}
+
+static void
+lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt)
+{
+ struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
+ int i;
+
+ /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
+ LASSERT(lnet_ptl_is_wildcard(ptl));
+
+ mtable->mt_enabled = 1;
+
+ ptl->ptl_mt_maps[ptl->ptl_mt_nmaps] = cpt;
+ for (i = ptl->ptl_mt_nmaps - 1; i >= 0; i--) {
+ LASSERT(ptl->ptl_mt_maps[i] != cpt);
+ if (ptl->ptl_mt_maps[i] < cpt)
+ break;
+
+ /* swap to order */
+ ptl->ptl_mt_maps[i + 1] = ptl->ptl_mt_maps[i];
+ ptl->ptl_mt_maps[i] = cpt;
+ }
+
+ ptl->ptl_mt_nmaps++;
+}
+
+static void
+lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt)
+{
+ struct lnet_match_table *mtable = ptl->ptl_mtables[cpt];
+ int i;
+
+ /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */
+ LASSERT(lnet_ptl_is_wildcard(ptl));
+
+ if (LNET_CPT_NUMBER == 1)
+ return; /* never disable the only match-table */
+
+ mtable->mt_enabled = 0;
+
+ LASSERT(ptl->ptl_mt_nmaps > 0 &&
+ ptl->ptl_mt_nmaps <= LNET_CPT_NUMBER);
+
+ /* remove it from mt_maps */
+ ptl->ptl_mt_nmaps--;
+ for (i = 0; i < ptl->ptl_mt_nmaps; i++) {
+ if (ptl->ptl_mt_maps[i] >= cpt) /* overwrite it */
+ ptl->ptl_mt_maps[i] = ptl->ptl_mt_maps[i + 1];
+ }
+}
+
+static int
+lnet_try_match_md(lnet_libmd_t *md,
+ struct lnet_match_info *info, struct lnet_msg *msg)
+{
+ /* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
+ * lnet_match_blocked_msg() relies on this to avoid races */
+ unsigned int offset;
+ unsigned int mlength;
+ lnet_me_t *me = md->md_me;
+
+ /* MD exhausted */
+ if (lnet_md_exhausted(md))
+ return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
+
+ /* mismatched MD op */
+ if ((md->md_options & info->mi_opc) == 0)
+ return LNET_MATCHMD_NONE;
+
+ /* mismatched ME nid/pid? */
+ if (me->me_match_id.nid != LNET_NID_ANY &&
+ me->me_match_id.nid != info->mi_id.nid)
+ return LNET_MATCHMD_NONE;
+
+ if (me->me_match_id.pid != LNET_PID_ANY &&
+ me->me_match_id.pid != info->mi_id.pid)
+ return LNET_MATCHMD_NONE;
+
+ /* mismatched ME matchbits? */
+ if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0)
+ return LNET_MATCHMD_NONE;
+
+ /* Hurrah! This _is_ a match; check it out... */
+
+ if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
+ offset = md->md_offset;
+ else
+ offset = info->mi_roffset;
+
+ if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
+ mlength = md->md_max_size;
+ LASSERT(md->md_offset + mlength <= md->md_length);
+ } else {
+ mlength = md->md_length - offset;
+ }
+
+ if (info->mi_rlength <= mlength) { /* fits in allowed space */
+ mlength = info->mi_rlength;
+ } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
+ /* this packet _really_ is too big */
+ CERROR("Matching packet from %s, match "LPU64
+ " length %d too big: %d left, %d allowed\n",
+ libcfs_id2str(info->mi_id), info->mi_mbits,
+ info->mi_rlength, md->md_length - offset, mlength);
+
+ return LNET_MATCHMD_DROP;
+ }
+
+ /* Commit to this ME/MD */
+ CDEBUG(D_NET, "Incoming %s index %x from %s of "
+ "length %d/%d into md "LPX64" [%d] + %d\n",
+ (info->mi_opc == LNET_MD_OP_PUT) ? "put" : "get",
+ info->mi_portal, libcfs_id2str(info->mi_id), mlength,
+ info->mi_rlength, md->md_lh.lh_cookie, md->md_niov, offset);
+
+ lnet_msg_attach_md(msg, md, offset, mlength);
+ md->md_offset = offset + mlength;
+
+ if (!lnet_md_exhausted(md))
+ return LNET_MATCHMD_OK;
+
+ /* Auto-unlink NOW, so the ME gets unlinked if required.
+ * We bumped md->md_refcount above so the MD just gets flagged
+ * for unlink when it is finalized. */
+ if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0)
+ lnet_md_unlink(md);
+
+ return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
+}
+
+static struct lnet_match_table *
+lnet_match2mt(struct lnet_portal *ptl, lnet_process_id_t id, __u64 mbits)
+{
+ if (LNET_CPT_NUMBER == 1)
+ return ptl->ptl_mtables[0]; /* the only one */
+
+ /* if it's a unique portal, return match-table hashed by NID */
+ return lnet_ptl_is_unique(ptl) ?
+ ptl->ptl_mtables[lnet_cpt_of_nid(id.nid)] : NULL;
+}
+
+struct lnet_match_table *
+lnet_mt_of_attach(unsigned int index, lnet_process_id_t id,
+ __u64 mbits, __u64 ignore_bits, lnet_ins_pos_t pos)
+{
+ struct lnet_portal *ptl;
+ struct lnet_match_table *mtable;
+
+ /* NB: called w/o lock */
+ LASSERT(index < the_lnet.ln_nportals);
+
+ if (!lnet_ptl_match_type(index, id, mbits, ignore_bits))
+ return NULL;
+
+ ptl = the_lnet.ln_portals[index];
+
+ mtable = lnet_match2mt(ptl, id, mbits);
+ if (mtable != NULL) /* unique portal or only one match-table */
+ return mtable;
+
+ /* it's a wildcard portal */
+ switch (pos) {
+ default:
+ return NULL;
+ case LNET_INS_BEFORE:
+ case LNET_INS_AFTER:
+ /* posted by no affinity thread, always hash to specific
+ * match-table to avoid buffer stealing which is heavy */
+ return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER];
+ case LNET_INS_LOCAL:
+ /* posted by cpu-affinity thread */
+ return ptl->ptl_mtables[lnet_cpt_current()];
+ }
+}
+
+static struct lnet_match_table *
+lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
+{
+ struct lnet_match_table *mtable;
+ struct lnet_portal *ptl;
+ int nmaps;
+ int rotor;
+ int routed;
+ int cpt;
+
+ /* NB: called w/o lock */
+ LASSERT(info->mi_portal < the_lnet.ln_nportals);
+ ptl = the_lnet.ln_portals[info->mi_portal];
+
+ LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl));
+
+ mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits);
+ if (mtable != NULL)
+ return mtable;
+
+ /* it's a wildcard portal */
+ routed = LNET_NIDNET(msg->msg_hdr.src_nid) !=
+ LNET_NIDNET(msg->msg_hdr.dest_nid);
+
+ if (portal_rotor == LNET_PTL_ROTOR_OFF ||
+ (portal_rotor != LNET_PTL_ROTOR_ON && !routed)) {
+ cpt = lnet_cpt_current();
+ if (ptl->ptl_mtables[cpt]->mt_enabled)
+ return ptl->ptl_mtables[cpt];
+ }
+
+ rotor = ptl->ptl_rotor++; /* get round-robin factor */
+ if (portal_rotor == LNET_PTL_ROTOR_HASH_RT && routed)
+ cpt = lnet_cpt_of_nid(msg->msg_hdr.src_nid);
+ else
+ cpt = rotor % LNET_CPT_NUMBER;
+
+ if (!ptl->ptl_mtables[cpt]->mt_enabled) {
+ /* is there any active entry for this portal? */
+ nmaps = ptl->ptl_mt_nmaps;
+ /* map to an active mtable to avoid heavy "stealing" */
+ if (nmaps != 0) {
+ /* NB: there is possibility that ptl_mt_maps is being
+ * changed because we are not under protection of
+ * lnet_ptl_lock, but it shouldn't hurt anything */
+ cpt = ptl->ptl_mt_maps[rotor % nmaps];
+ }
+ }
+
+ return ptl->ptl_mtables[cpt];
+}
+
+static int
+lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
+{
+ __u64 *bmap;
+ int i;
+
+ if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
+ return 0;
+
+ if (pos < 0) { /* check all bits */
+ for (i = 0; i < LNET_MT_EXHAUSTED_BMAP; i++) {
+ if (mtable->mt_exhausted[i] != (__u64)(-1))
+ return 0;
+ }
+ return 1;
+ }
+
+ LASSERT(pos <= LNET_MT_HASH_IGNORE);
+ /* mtable::mt_mhash[pos] is marked as exhausted or not */
+ bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
+ pos &= (1 << LNET_MT_BITS_U64) - 1;
+
+ return ((*bmap) & (1ULL << pos)) != 0;
+}
+
+static void
+lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted)
+{
+ __u64 *bmap;
+
+ LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]));
+ LASSERT(pos <= LNET_MT_HASH_IGNORE);
+
+ /* set mtable::mt_mhash[pos] as exhausted/non-exhausted */
+ bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
+ pos &= (1 << LNET_MT_BITS_U64) - 1;
+
+ if (!exhausted)
+ *bmap &= ~(1ULL << pos);
+ else
+ *bmap |= 1ULL << pos;
+}
+
+struct list_head *
+lnet_mt_match_head(struct lnet_match_table *mtable,
+ lnet_process_id_t id, __u64 mbits)
+{
+ struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal];
+
+ if (lnet_ptl_is_wildcard(ptl)) {
+ return &mtable->mt_mhash[mbits & LNET_MT_HASH_MASK];
+ } else {
+ unsigned long hash = mbits + id.nid + id.pid;
+
+ LASSERT(lnet_ptl_is_unique(ptl));
+ hash = cfs_hash_long(hash, LNET_MT_HASH_BITS);
+ return &mtable->mt_mhash[hash];
+ }
+}
+
+int
+lnet_mt_match_md(struct lnet_match_table *mtable,
+ struct lnet_match_info *info, struct lnet_msg *msg)
+{
+ struct list_head *head;
+ lnet_me_t *me;
+ lnet_me_t *tmp;
+ int exhausted = 0;
+ int rc;
+
+ /* any ME with ignore bits? */
+ if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE]))
+ head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
+ else
+ head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
+ again:
+ /* NB: only wildcard portal needs to return LNET_MATCHMD_EXHAUSTED */
+ if (lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal]))
+ exhausted = LNET_MATCHMD_EXHAUSTED;
+
+ list_for_each_entry_safe(me, tmp, head, me_list) {
+ /* ME attached but MD not attached yet */
+ if (me->me_md == NULL)
+ continue;
+
+ LASSERT(me == me->me_md->md_me);
+
+ rc = lnet_try_match_md(me->me_md, info, msg);
+ if ((rc & LNET_MATCHMD_EXHAUSTED) == 0)
+ exhausted = 0; /* mlist is not empty */
+
+ if ((rc & LNET_MATCHMD_FINISH) != 0) {
+ /* don't return EXHAUSTED bit because we don't know
+ * whether the mlist is empty or not */
+ return rc & ~LNET_MATCHMD_EXHAUSTED;
+ }
+ }
+
+ if (exhausted == LNET_MATCHMD_EXHAUSTED) { /* @head is exhausted */
+ lnet_mt_set_exhausted(mtable, head - mtable->mt_mhash, 1);
+ if (!lnet_mt_test_exhausted(mtable, -1))
+ exhausted = 0;
+ }
+
+ if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
+ head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
+ goto again; /* re-check MEs w/o ignore-bits */
+ }
+
+ if (info->mi_opc == LNET_MD_OP_GET ||
+ !lnet_ptl_is_lazy(the_lnet.ln_portals[info->mi_portal]))
+ return LNET_MATCHMD_DROP | exhausted;
+
+ return LNET_MATCHMD_NONE | exhausted;
+}
+
+static int
+lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
+{
+ int rc;
+
+ /* message arrived before any buffer posting on this portal,
+ * simply delay or drop this message */
+ if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)))
+ return 0;
+
+ lnet_ptl_lock(ptl);
+ /* check it again with hold of lock */
+ if (lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)) {
+ lnet_ptl_unlock(ptl);
+ return 0;
+ }
+
+ if (lnet_ptl_is_lazy(ptl)) {
+ if (msg->msg_rx_ready_delay) {
+ msg->msg_rx_delayed = 1;
+ list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_delayed);
+ }
+ rc = LNET_MATCHMD_NONE;
+ } else {
+ rc = LNET_MATCHMD_DROP;
+ }
+
+ lnet_ptl_unlock(ptl);
+ return rc;
+}
+
+static int
+lnet_ptl_match_delay(struct lnet_portal *ptl,
+ struct lnet_match_info *info, struct lnet_msg *msg)
+{
+ int first = ptl->ptl_mt_maps[0]; /* read w/o lock */
+ int rc = 0;
+ int i;
+
+ /* steal buffer from other CPTs, and delay it if nothing to steal,
+ * this function is more expensive than a regular match, but we
+ * don't expect it can happen a lot */
+ LASSERT(lnet_ptl_is_wildcard(ptl));
+
+ for (i = 0; i < LNET_CPT_NUMBER; i++) {
+ struct lnet_match_table *mtable;
+ int cpt;
+
+ cpt = (first + i) % LNET_CPT_NUMBER;
+ mtable = ptl->ptl_mtables[cpt];
+ if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
+ continue;
+
+ lnet_res_lock(cpt);
+ lnet_ptl_lock(ptl);
+
+ if (i == 0) { /* the first try, attach on stealing list */
+ list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_stealing);
+ }
+
+ if (!list_empty(&msg->msg_list)) { /* on stealing list */
+ rc = lnet_mt_match_md(mtable, info, msg);
+
+ if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
+ mtable->mt_enabled)
+ lnet_ptl_disable_mt(ptl, cpt);
+
+ if ((rc & LNET_MATCHMD_FINISH) != 0)
+ list_del_init(&msg->msg_list);
+
+ } else {
+ /* could be matched by lnet_ptl_attach_md()
+ * which is called by another thread */
+ rc = msg->msg_md == NULL ?
+ LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
+ }
+
+ if (!list_empty(&msg->msg_list) && /* not matched yet */
+ (i == LNET_CPT_NUMBER - 1 || /* the last CPT */
+ ptl->ptl_mt_nmaps == 0 || /* no active CPT */
+ (ptl->ptl_mt_nmaps == 1 && /* the only active CPT */
+ ptl->ptl_mt_maps[0] == cpt))) {
+ /* nothing to steal, delay or drop */
+ list_del_init(&msg->msg_list);
+
+ if (lnet_ptl_is_lazy(ptl)) {
+ msg->msg_rx_delayed = 1;
+ list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_delayed);
+ rc = LNET_MATCHMD_NONE;
+ } else {
+ rc = LNET_MATCHMD_DROP;
+ }
+ }
+
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(cpt);
+
+ if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed)
+ break;
+ }
+
+ return rc;
+}
+
+int
+lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
+{
+ struct lnet_match_table *mtable;
+ struct lnet_portal *ptl;
+ int rc;
+
+ CDEBUG(D_NET, "Request from %s of length %d into portal %d "
+ "MB="LPX64"\n", libcfs_id2str(info->mi_id),
+ info->mi_rlength, info->mi_portal, info->mi_mbits);
+
+ if (info->mi_portal >= the_lnet.ln_nportals) {
+ CERROR("Invalid portal %d not in [0-%d]\n",
+ info->mi_portal, the_lnet.ln_nportals);
+ return LNET_MATCHMD_DROP;
+ }
+
+ ptl = the_lnet.ln_portals[info->mi_portal];
+ rc = lnet_ptl_match_early(ptl, msg);
+ if (rc != 0) /* matched or delayed early message */
+ return rc;
+
+ mtable = lnet_mt_of_match(info, msg);
+ lnet_res_lock(mtable->mt_cpt);
+
+ if (the_lnet.ln_shutdown) {
+ rc = LNET_MATCHMD_DROP;
+ goto out1;
+ }
+
+ rc = lnet_mt_match_md(mtable, info, msg);
+ if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) {
+ lnet_ptl_lock(ptl);
+ lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
+ lnet_ptl_unlock(ptl);
+ }
+
+ if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */
+ goto out1;
+
+ if (!msg->msg_rx_ready_delay)
+ goto out1;
+
+ LASSERT(lnet_ptl_is_lazy(ptl));
+ LASSERT(!msg->msg_rx_delayed);
+
+ /* NB: we don't expect "delay" can happen a lot */
+ if (lnet_ptl_is_unique(ptl) || LNET_CPT_NUMBER == 1) {
+ lnet_ptl_lock(ptl);
+
+ msg->msg_rx_delayed = 1;
+ list_add_tail(&msg->msg_list, &ptl->ptl_msg_delayed);
+
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(mtable->mt_cpt);
+
+ } else {
+ lnet_res_unlock(mtable->mt_cpt);
+ rc = lnet_ptl_match_delay(ptl, info, msg);
+ }
+
+ if (msg->msg_rx_delayed) {
+ CDEBUG(D_NET,
+ "Delaying %s from %s ptl %d MB "LPX64" off %d len %d\n",
+ info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET",
+ libcfs_id2str(info->mi_id), info->mi_portal,
+ info->mi_mbits, info->mi_roffset, info->mi_rlength);
+ }
+ goto out0;
+ out1:
+ lnet_res_unlock(mtable->mt_cpt);
+ out0:
+ /* EXHAUSTED bit is only meaningful for internal functions */
+ return rc & ~LNET_MATCHMD_EXHAUSTED;
+}
+
+void
+lnet_ptl_detach_md(lnet_me_t *me, lnet_libmd_t *md)
+{
+ LASSERT(me->me_md == md && md->md_me == me);
+
+ me->me_md = NULL;
+ md->md_me = NULL;
+}
+
+/* called with lnet_res_lock held */
+void
+lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
+ struct list_head *matches, struct list_head *drops)
+{
+ struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal];
+ struct lnet_match_table *mtable;
+ struct list_head *head;
+ lnet_msg_t *tmp;
+ lnet_msg_t *msg;
+ int exhausted = 0;
+ int cpt;
+
+ LASSERT(md->md_refcount == 0); /* a brand new MD */
+
+ me->me_md = md;
+ md->md_me = me;
+
+ cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
+ mtable = ptl->ptl_mtables[cpt];
+
+ if (list_empty(&ptl->ptl_msg_stealing) &&
+ list_empty(&ptl->ptl_msg_delayed) &&
+ !lnet_mt_test_exhausted(mtable, me->me_pos))
+ return;
+
+ lnet_ptl_lock(ptl);
+ head = &ptl->ptl_msg_stealing;
+ again:
+ list_for_each_entry_safe(msg, tmp, head, msg_list) {
+ struct lnet_match_info info;
+ lnet_hdr_t *hdr;
+ int rc;
+
+ LASSERT(msg->msg_rx_delayed || head == &ptl->ptl_msg_stealing);
+
+ hdr = &msg->msg_hdr;
+ info.mi_id.nid = hdr->src_nid;
+ info.mi_id.pid = hdr->src_pid;
+ info.mi_opc = LNET_MD_OP_PUT;
+ info.mi_portal = hdr->msg.put.ptl_index;
+ info.mi_rlength = hdr->payload_length;
+ info.mi_roffset = hdr->msg.put.offset;
+ info.mi_mbits = hdr->msg.put.match_bits;
+
+ rc = lnet_try_match_md(md, &info, msg);
+
+ exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0;
+ if ((rc & LNET_MATCHMD_NONE) != 0) {
+ if (exhausted)
+ break;
+ continue;
+ }
+
+ /* Hurrah! This _is_ a match */
+ LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
+ list_del_init(&msg->msg_list);
+
+ if (head == &ptl->ptl_msg_stealing) {
+ if (exhausted)
+ break;
+ /* stealing thread will handle the message */
+ continue;
+ }
+
+ if ((rc & LNET_MATCHMD_OK) != 0) {
+ list_add_tail(&msg->msg_list, matches);
+
+ CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d "
+ "match "LPU64" offset %d length %d.\n",
+ libcfs_id2str(info.mi_id),
+ info.mi_portal, info.mi_mbits,
+ info.mi_roffset, info.mi_rlength);
+ } else {
+ list_add_tail(&msg->msg_list, drops);
+ }
+
+ if (exhausted)
+ break;
+ }
+
+ if (!exhausted && head == &ptl->ptl_msg_stealing) {
+ head = &ptl->ptl_msg_delayed;
+ goto again;
+ }
+
+ if (lnet_ptl_is_wildcard(ptl) && !exhausted) {
+ lnet_mt_set_exhausted(mtable, me->me_pos, 0);
+ if (!mtable->mt_enabled)
+ lnet_ptl_enable_mt(ptl, cpt);
+ }
+
+ lnet_ptl_unlock(ptl);
+}
+
+void
+lnet_ptl_cleanup(struct lnet_portal *ptl)
+{
+ struct lnet_match_table *mtable;
+ int i;
+
+ if (ptl->ptl_mtables == NULL) /* uninitialized portal */
+ return;
+
+ LASSERT(list_empty(&ptl->ptl_msg_delayed));
+ LASSERT(list_empty(&ptl->ptl_msg_stealing));
+ cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
+ struct list_head *mhash;
+ lnet_me_t *me;
+ int j;
+
+ if (mtable->mt_mhash == NULL) /* uninitialized match-table */
+ continue;
+
+ mhash = mtable->mt_mhash;
+ /* cleanup ME */
+ for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
+ while (!list_empty(&mhash[j])) {
+ me = list_entry(mhash[j].next,
+ lnet_me_t, me_list);
+ CERROR("Active ME %p on exit\n", me);
+ list_del(&me->me_list);
+ lnet_me_free(me);
+ }
+ }
+ /* the extra entry is for MEs with ignore bits */
+ LIBCFS_FREE(mhash, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
+ }
+
+ cfs_percpt_free(ptl->ptl_mtables);
+ ptl->ptl_mtables = NULL;
+}
+
+int
+lnet_ptl_setup(struct lnet_portal *ptl, int index)
+{
+ struct lnet_match_table *mtable;
+ struct list_head *mhash;
+ int i;
+ int j;
+
+ ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(struct lnet_match_table));
+ if (ptl->ptl_mtables == NULL) {
+ CERROR("Failed to create match table for portal %d\n", index);
+ return -ENOMEM;
+ }
+
+ ptl->ptl_index = index;
+ INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
+ INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
+ spin_lock_init(&ptl->ptl_lock);
+ cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) {
+ /* the extra entry is for MEs with ignore bits */
+ LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
+ sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
+ if (mhash == NULL) {
+ CERROR("Failed to create match hash for portal %d\n",
+ index);
+ goto failed;
+ }
+
+ memset(&mtable->mt_exhausted[0], -1,
+ sizeof(mtable->mt_exhausted[0]) *
+ LNET_MT_EXHAUSTED_BMAP);
+ mtable->mt_mhash = mhash;
+ for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++)
+ INIT_LIST_HEAD(&mhash[j]);
+
+ mtable->mt_portal = index;
+ mtable->mt_cpt = i;
+ }
+
+ return 0;
+ failed:
+ lnet_ptl_cleanup(ptl);
+ return -ENOMEM;
+}
+
+void
+lnet_portals_destroy(void)
+{
+ int i;
+
+ if (the_lnet.ln_portals == NULL)
+ return;
+
+ for (i = 0; i < the_lnet.ln_nportals; i++)
+ lnet_ptl_cleanup(the_lnet.ln_portals[i]);
+
+ cfs_array_free(the_lnet.ln_portals);
+ the_lnet.ln_portals = NULL;
+}
+
+int
+lnet_portals_create(void)
+{
+ int size;
+ int i;
+
+ size = offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]);
+
+ the_lnet.ln_nportals = MAX_PORTALS;
+ the_lnet.ln_portals = cfs_array_alloc(the_lnet.ln_nportals, size);
+ if (the_lnet.ln_portals == NULL) {
+ CERROR("Failed to allocate portals table\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < the_lnet.ln_nportals; i++) {
+ if (lnet_ptl_setup(the_lnet.ln_portals[i], i)) {
+ lnet_portals_destroy();
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Turn on the lazy portal attribute. Use with caution!
+ *
+ * This portal attribute only affects incoming PUT requests to the portal,
+ * and is off by default. By default, if there's no matching MD for an
+ * incoming PUT request, it is simply dropped. With the lazy attribute on,
+ * such requests are queued indefinitely until either a matching MD is
+ * posted to the portal or the lazy attribute is turned off.
+ *
+ * It would prevent dropped requests, however it should be regarded as the
+ * last line of defense - i.e. users must keep a close watch on active
+ * buffers on a lazy portal and once it becomes too low post more buffers as
+ * soon as possible. This is because delayed requests usually have detrimental
+ * effects on underlying network connections. A few delayed requests often
+ * suffice to bring an underlying connection to a complete halt, due to flow
+ * control mechanisms.
+ *
+ * There's also a DOS attack risk. If users don't post match-all MDs on a
+ * lazy portal, a malicious peer can easily stop a service by sending some
+ * PUT requests with match bits that won't match any MD. A routed server is
+ * especially vulnerable since the connections to its neighbor routers are
+ * shared among all clients.
+ *
+ * \param portal Index of the portal to enable the lazy attribute on.
+ *
+ * \retval 0 On success.
+ * \retval -EINVAL If \a portal is not a valid index.
+ */
+int
+LNetSetLazyPortal(int portal)
+{
+ struct lnet_portal *ptl;
+
+ if (portal < 0 || portal >= the_lnet.ln_nportals)
+ return -EINVAL;
+
+ CDEBUG(D_NET, "Setting portal %d lazy\n", portal);
+ ptl = the_lnet.ln_portals[portal];
+
+ lnet_res_lock(LNET_LOCK_EX);
+ lnet_ptl_lock(ptl);
+
+ lnet_ptl_setopt(ptl, LNET_PTL_LAZY);
+
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(LNET_LOCK_EX);
+
+ return 0;
+}
+EXPORT_SYMBOL(LNetSetLazyPortal);
+
+/**
+ * Turn off the lazy portal attribute. Delayed requests on the portal,
+ * if any, will be all dropped when this function returns.
+ *
+ * \param portal Index of the portal to disable the lazy attribute on.
+ *
+ * \retval 0 On success.
+ * \retval -EINVAL If \a portal is not a valid index.
+ */
+int
+LNetClearLazyPortal(int portal)
+{
+ struct lnet_portal *ptl;
+ LIST_HEAD (zombies);
+
+ if (portal < 0 || portal >= the_lnet.ln_nportals)
+ return -EINVAL;
+
+ ptl = the_lnet.ln_portals[portal];
+
+ lnet_res_lock(LNET_LOCK_EX);
+ lnet_ptl_lock(ptl);
+
+ if (!lnet_ptl_is_lazy(ptl)) {
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(LNET_LOCK_EX);
+ return 0;
+ }
+
+ if (the_lnet.ln_shutdown)
+ CWARN("Active lazy portal %d on exit\n", portal);
+ else
+ CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
+
+ /* grab all the blocked messages atomically */
+ list_splice_init(&ptl->ptl_msg_delayed, &zombies);
+
+ lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
+
+ lnet_ptl_unlock(ptl);
+ lnet_res_unlock(LNET_LOCK_EX);
+
+ lnet_drop_delayed_msg_list(&zombies, "Clearing lazy portal attr");
+
+ return 0;
+}
+EXPORT_SYMBOL(LNetClearLazyPortal);
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
new file mode 100644
index 0000000..670dae3
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -0,0 +1,120 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/lnet/lib-lnet.h>
+
+int
+lolnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
+{
+ LASSERT (!lntmsg->msg_routing);
+ LASSERT (!lntmsg->msg_target_is_router);
+
+ return lnet_parse(ni, &lntmsg->msg_hdr, ni->ni_nid, lntmsg, 0);
+}
+
+int
+lolnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
+ int delayed, unsigned int niov,
+ struct iovec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
+{
+ lnet_msg_t *sendmsg = private;
+
+ if (lntmsg != NULL) { /* not discarding */
+ if (sendmsg->msg_iov != NULL) {
+ if (iov != NULL)
+ lnet_copy_iov2iov(niov, iov, offset,
+ sendmsg->msg_niov,
+ sendmsg->msg_iov,
+ sendmsg->msg_offset, mlen);
+ else
+ lnet_copy_iov2kiov(niov, kiov, offset,
+ sendmsg->msg_niov,
+ sendmsg->msg_iov,
+ sendmsg->msg_offset, mlen);
+ } else {
+ if (iov != NULL)
+ lnet_copy_kiov2iov(niov, iov, offset,
+ sendmsg->msg_niov,
+ sendmsg->msg_kiov,
+ sendmsg->msg_offset, mlen);
+ else
+ lnet_copy_kiov2kiov(niov, kiov, offset,
+ sendmsg->msg_niov,
+ sendmsg->msg_kiov,
+ sendmsg->msg_offset, mlen);
+ }
+
+ lnet_finalize(ni, lntmsg, 0);
+ }
+
+ lnet_finalize(ni, sendmsg, 0);
+ return 0;
+}
+
+static int lolnd_instanced;
+
+void
+lolnd_shutdown(lnet_ni_t *ni)
+{
+ CDEBUG (D_NET, "shutdown\n");
+ LASSERT (lolnd_instanced);
+
+ lolnd_instanced = 0;
+}
+
+int
+lolnd_startup (lnet_ni_t *ni)
+{
+ LASSERT (ni->ni_lnd == &the_lolnd);
+ LASSERT (!lolnd_instanced);
+ lolnd_instanced = 1;
+
+ return (0);
+}
+
+lnd_t the_lolnd = {
+ /* .lnd_list = */ {&the_lolnd.lnd_list, &the_lolnd.lnd_list},
+ /* .lnd_refcount = */ 0,
+ /* .lnd_type = */ LOLND,
+ /* .lnd_startup = */ lolnd_startup,
+ /* .lnd_shutdown = */ lolnd_shutdown,
+ /* .lnt_ctl = */ NULL,
+ /* .lnd_send = */ lolnd_send,
+ /* .lnd_recv = */ lolnd_recv,
+ /* .lnd_eager_recv = */ NULL,
+ /* .lnd_notify = */ NULL,
+ /* .lnd_accept = */ NULL
+};
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
new file mode 100644
index 0000000..afb8175
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -0,0 +1,155 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/lnet/lib-lnet.h>
+
+static int config_on_load = 0;
+CFS_MODULE_PARM(config_on_load, "i", int, 0444,
+ "configure network at module load");
+
+static struct mutex lnet_config_mutex;
+
+int
+lnet_configure (void *arg)
+{
+ /* 'arg' only there so I can be passed to cfs_create_thread() */
+ int rc = 0;
+
+ LNET_MUTEX_LOCK(&lnet_config_mutex);
+
+ if (!the_lnet.ln_niinit_self) {
+ rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
+ if (rc >= 0) {
+ the_lnet.ln_niinit_self = 1;
+ rc = 0;
+ }
+ }
+
+ LNET_MUTEX_UNLOCK(&lnet_config_mutex);
+ return rc;
+}
+
+int
+lnet_unconfigure (void)
+{
+ int refcount;
+
+ LNET_MUTEX_LOCK(&lnet_config_mutex);
+
+ if (the_lnet.ln_niinit_self) {
+ the_lnet.ln_niinit_self = 0;
+ LNetNIFini();
+ }
+
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
+ refcount = the_lnet.ln_refcount;
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
+
+ LNET_MUTEX_UNLOCK(&lnet_config_mutex);
+ return (refcount == 0) ? 0 : -EBUSY;
+}
+
+int
+lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
+{
+ int rc;
+
+ switch (cmd) {
+ case IOC_LIBCFS_CONFIGURE:
+ return lnet_configure(NULL);
+
+ case IOC_LIBCFS_UNCONFIGURE:
+ return lnet_unconfigure();
+
+ default:
+ /* Passing LNET_PID_ANY only gives me a ref if the net is up
+ * already; I'll need it to ensure the net can't go down while
+ * I'm called into it */
+ rc = LNetNIInit(LNET_PID_ANY);
+ if (rc >= 0) {
+ rc = LNetCtl(cmd, data);
+ LNetNIFini();
+ }
+ return rc;
+ }
+}
+
+DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
+
+int
+init_lnet(void)
+{
+ int rc;
+
+ mutex_init(&lnet_config_mutex);
+
+ rc = LNetInit();
+ if (rc != 0) {
+ CERROR("LNetInit: error %d\n", rc);
+ return rc;
+ }
+
+ rc = libcfs_register_ioctl(&lnet_ioctl_handler);
+ LASSERT (rc == 0);
+
+ if (config_on_load) {
+ /* Have to schedule a separate thread to avoid deadlocking
+ * in modload */
+ (void) kthread_run(lnet_configure, NULL, "lnet_initd");
+ }
+
+ return 0;
+}
+
+void
+fini_lnet(void)
+{
+ int rc;
+
+ rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
+ LASSERT (rc == 0);
+
+ LNetFini();
+}
+
+MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
+MODULE_DESCRIPTION("Portals v3.1");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+
+module_init(init_lnet);
+module_exit(fini_lnet);
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
new file mode 100644
index 0000000..2869776
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -0,0 +1,337 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/lnet/peer.c
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/lnet/lib-lnet.h>
+
+int
+lnet_peer_tables_create(void)
+{
+ struct lnet_peer_table *ptable;
+ struct list_head *hash;
+ int i;
+ int j;
+
+ the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(*ptable));
+ if (the_lnet.ln_peer_tables == NULL) {
+ CERROR("Failed to allocate cpu-partition peer tables\n");
+ return -ENOMEM;
+ }
+
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+ INIT_LIST_HEAD(&ptable->pt_deathrow);
+
+ LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
+ LNET_PEER_HASH_SIZE * sizeof(*hash));
+ if (hash == NULL) {
+ CERROR("Failed to create peer hash table\n");
+ lnet_peer_tables_destroy();
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
+ INIT_LIST_HEAD(&hash[j]);
+ ptable->pt_hash = hash; /* sign of initialization */
+ }
+
+ return 0;
+}
+
+void
+lnet_peer_tables_destroy(void)
+{
+ struct lnet_peer_table *ptable;
+ struct list_head *hash;
+ int i;
+ int j;
+
+ if (the_lnet.ln_peer_tables == NULL)
+ return;
+
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+ hash = ptable->pt_hash;
+ if (hash == NULL) /* not intialized */
+ break;
+
+ LASSERT(list_empty(&ptable->pt_deathrow));
+
+ ptable->pt_hash = NULL;
+ for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
+ LASSERT(list_empty(&hash[j]));
+
+ LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
+ }
+
+ cfs_percpt_free(the_lnet.ln_peer_tables);
+ the_lnet.ln_peer_tables = NULL;
+}
+
+void
+lnet_peer_tables_cleanup(void)
+{
+ struct lnet_peer_table *ptable;
+ int i;
+ int j;
+
+ LASSERT(the_lnet.ln_shutdown); /* i.e. no new peers */
+
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+ lnet_net_lock(i);
+
+ for (j = 0; j < LNET_PEER_HASH_SIZE; j++) {
+ struct list_head *peers = &ptable->pt_hash[j];
+
+ while (!list_empty(peers)) {
+ lnet_peer_t *lp = list_entry(peers->next,
+ lnet_peer_t,
+ lp_hashlist);
+ list_del_init(&lp->lp_hashlist);
+ /* lose hash table's ref */
+ lnet_peer_decref_locked(lp);
+ }
+ }
+
+ lnet_net_unlock(i);
+ }
+
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+ LIST_HEAD (deathrow);
+ lnet_peer_t *lp;
+
+ lnet_net_lock(i);
+
+ for (j = 3; ptable->pt_number != 0; j++) {
+ lnet_net_unlock(i);
+
+ if ((j & (j - 1)) == 0) {
+ CDEBUG(D_WARNING,
+ "Waiting for %d peers on peer table\n",
+ ptable->pt_number);
+ }
+ cfs_pause(cfs_time_seconds(1) / 2);
+ lnet_net_lock(i);
+ }
+ list_splice_init(&ptable->pt_deathrow, &deathrow);
+
+ lnet_net_unlock(i);
+
+ while (!list_empty(&deathrow)) {
+ lp = list_entry(deathrow.next,
+ lnet_peer_t, lp_hashlist);
+ list_del(&lp->lp_hashlist);
+ LIBCFS_FREE(lp, sizeof(*lp));
+ }
+ }
+}
+
+void
+lnet_destroy_peer_locked(lnet_peer_t *lp)
+{
+ struct lnet_peer_table *ptable;
+
+ LASSERT(lp->lp_refcount == 0);
+ LASSERT(lp->lp_rtr_refcount == 0);
+ LASSERT(list_empty(&lp->lp_txq));
+ LASSERT(list_empty(&lp->lp_hashlist));
+ LASSERT(lp->lp_txqnob == 0);
+
+ ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
+ LASSERT(ptable->pt_number > 0);
+ ptable->pt_number--;
+
+ lnet_ni_decref_locked(lp->lp_ni, lp->lp_cpt);
+ lp->lp_ni = NULL;
+
+ list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+}
+
+lnet_peer_t *
+lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
+{
+ struct list_head *peers;
+ lnet_peer_t *lp;
+
+ LASSERT(!the_lnet.ln_shutdown);
+
+ peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
+ list_for_each_entry(lp, peers, lp_hashlist) {
+ if (lp->lp_nid == nid) {
+ lnet_peer_addref_locked(lp);
+ return lp;
+ }
+ }
+
+ return NULL;
+}
+
+int
+lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
+{
+ struct lnet_peer_table *ptable;
+ lnet_peer_t *lp = NULL;
+ lnet_peer_t *lp2;
+ int cpt2;
+ int rc = 0;
+
+ *lpp = NULL;
+ if (the_lnet.ln_shutdown) /* it's shutting down */
+ return -ESHUTDOWN;
+
+ /* cpt can be LNET_LOCK_EX if it's called from router functions */
+ cpt2 = cpt != LNET_LOCK_EX ? cpt : lnet_cpt_of_nid_locked(nid);
+
+ ptable = the_lnet.ln_peer_tables[cpt2];
+ lp = lnet_find_peer_locked(ptable, nid);
+ if (lp != NULL) {
+ *lpp = lp;
+ return 0;
+ }
+
+ if (!list_empty(&ptable->pt_deathrow)) {
+ lp = list_entry(ptable->pt_deathrow.next,
+ lnet_peer_t, lp_hashlist);
+ list_del(&lp->lp_hashlist);
+ }
+
+ /*
+ * take extra refcount in case another thread has shutdown LNet
+ * and destroyed locks and peer-table before I finish the allocation
+ */
+ ptable->pt_number++;
+ lnet_net_unlock(cpt);
+
+ if (lp != NULL)
+ memset(lp, 0, sizeof(*lp));
+ else
+ LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp));
+
+ if (lp == NULL) {
+ rc = -ENOMEM;
+ lnet_net_lock(cpt);
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&lp->lp_txq);
+ INIT_LIST_HEAD(&lp->lp_rtrq);
+ INIT_LIST_HEAD(&lp->lp_routes);
+
+ lp->lp_notify = 0;
+ lp->lp_notifylnd = 0;
+ lp->lp_notifying = 0;
+ lp->lp_alive_count = 0;
+ lp->lp_timestamp = 0;
+ lp->lp_alive = !lnet_peers_start_down(); /* 1 bit!! */
+ lp->lp_last_alive = cfs_time_current(); /* assumes alive */
+ lp->lp_last_query = 0; /* haven't asked NI yet */
+ lp->lp_ping_timestamp = 0;
+ lp->lp_ping_feats = LNET_PING_FEAT_INVAL;
+ lp->lp_nid = nid;
+ lp->lp_cpt = cpt2;
+ lp->lp_refcount = 2; /* 1 for caller; 1 for hash */
+ lp->lp_rtr_refcount = 0;
+
+ lnet_net_lock(cpt);
+
+ if (the_lnet.ln_shutdown) {
+ rc = -ESHUTDOWN;
+ goto out;
+ }
+
+ lp2 = lnet_find_peer_locked(ptable, nid);
+ if (lp2 != NULL) {
+ *lpp = lp2;
+ goto out;
+ }
+
+ lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2);
+ if (lp->lp_ni == NULL) {
+ rc = -EHOSTUNREACH;
+ goto out;
+ }
+
+ lp->lp_txcredits =
+ lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
+ lp->lp_rtrcredits =
+ lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
+
+ list_add_tail(&lp->lp_hashlist,
+ &ptable->pt_hash[lnet_nid2peerhash(nid)]);
+ ptable->pt_version++;
+ *lpp = lp;
+
+ return 0;
+out:
+ if (lp != NULL)
+ list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+ ptable->pt_number--;
+ return rc;
+}
+
+void
+lnet_debug_peer(lnet_nid_t nid)
+{
+ char *aliveness = "NA";
+ lnet_peer_t *lp;
+ int rc;
+ int cpt;
+
+ cpt = lnet_cpt_of_nid(nid);
+ lnet_net_lock(cpt);
+
+ rc = lnet_nid2peer_locked(&lp, nid, cpt);
+ if (rc != 0) {
+ lnet_net_unlock(cpt);
+ CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
+ return;
+ }
+
+ if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
+ aliveness = lp->lp_alive ? "up" : "down";
+
+ CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
+ libcfs_nid2str(lp->lp_nid), lp->lp_refcount,
+ aliveness, lp->lp_ni->ni_peertxcredits,
+ lp->lp_rtrcredits, lp->lp_minrtrcredits,
+ lp->lp_txcredits, lp->lp_mintxcredits, lp->lp_txqnob);
+
+ lnet_peer_decref_locked(lp);
+
+ lnet_net_unlock(cpt);
+}
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
new file mode 100644
index 0000000..a326ce0
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -0,0 +1,1694 @@
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ *
+ * This file is part of Portals
+ * http://sourceforge.net/projects/sandiaportals/
+ *
+ * Portals is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Portals is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Portals; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/lnet/lib-lnet.h>
+
+#if defined(LNET_ROUTER)
+
+#define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
+#define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
+#define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
+#define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
+#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
+#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
+
+static char *forwarding = "";
+CFS_MODULE_PARM(forwarding, "s", charp, 0444,
+ "Explicitly enable/disable forwarding between networks");
+
+static int tiny_router_buffers;
+CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
+ "# of 0 payload messages to buffer in the router");
+static int small_router_buffers;
+CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
+ "# of small (1 page) messages to buffer in the router");
+static int large_router_buffers;
+CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
+ "# of large messages to buffer in the router");
+static int peer_buffer_credits = 0;
+CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
+ "# router buffer credits per peer");
+
+static int auto_down = 1;
+CFS_MODULE_PARM(auto_down, "i", int, 0444,
+ "Automatically mark peers down on comms error");
+
+int
+lnet_peer_buffer_credits(lnet_ni_t *ni)
+{
+ /* NI option overrides LNet default */
+ if (ni->ni_peerrtrcredits > 0)
+ return ni->ni_peerrtrcredits;
+ if (peer_buffer_credits > 0)
+ return peer_buffer_credits;
+
+ /* As an approximation, allow this peer the same number of router
+ * buffers as it is allowed outstanding sends */
+ return ni->ni_peertxcredits;
+}
+
+/* forward ref's */
+static int lnet_router_checker(void *);
+#else
+
+int
+lnet_peer_buffer_credits(lnet_ni_t *ni)
+{
+ return 0;
+}
+
+#endif
+
+static int check_routers_before_use = 0;
+CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
+ "Assume routers are down and ping them before use");
+
+static int avoid_asym_router_failure = 1;
+CFS_MODULE_PARM(avoid_asym_router_failure, "i", int, 0644,
+ "Avoid asymmetrical router failures (0 to disable)");
+
+static int dead_router_check_interval = 60;
+CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0644,
+ "Seconds between dead router health checks (<= 0 to disable)");
+
+static int live_router_check_interval = 60;
+CFS_MODULE_PARM(live_router_check_interval, "i", int, 0644,
+ "Seconds between live router health checks (<= 0 to disable)");
+
+static int router_ping_timeout = 50;
+CFS_MODULE_PARM(router_ping_timeout, "i", int, 0644,
+ "Seconds to wait for the reply to a router health query");
+
+int
+lnet_peers_start_down(void)
+{
+ return check_routers_before_use;
+}
+
+void
+lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when)
+{
+ if (cfs_time_before(when, lp->lp_timestamp)) { /* out of date information */
+ CDEBUG(D_NET, "Out of date\n");
+ return;
+ }
+
+ lp->lp_timestamp = when; /* update timestamp */
+ lp->lp_ping_deadline = 0; /* disable ping timeout */
+
+ if (lp->lp_alive_count != 0 && /* got old news */
+ (!lp->lp_alive) == (!alive)) { /* new date for old news */
+ CDEBUG(D_NET, "Old news\n");
+ return;
+ }
+
+ /* Flag that notification is outstanding */
+
+ lp->lp_alive_count++;
+ lp->lp_alive = !(!alive); /* 1 bit! */
+ lp->lp_notify = 1;
+ lp->lp_notifylnd |= notifylnd;
+ if (lp->lp_alive)
+ lp->lp_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
+
+ CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
+}
+
+void
+lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
+{
+ int alive;
+ int notifylnd;
+
+ /* Notify only in 1 thread at any time to ensure ordered notification.
+ * NB individual events can be missed; the only guarantee is that you
+ * always get the most recent news */
+
+ if (lp->lp_notifying)
+ return;
+
+ lp->lp_notifying = 1;
+
+ while (lp->lp_notify) {
+ alive = lp->lp_alive;
+ notifylnd = lp->lp_notifylnd;
+
+ lp->lp_notifylnd = 0;
+ lp->lp_notify = 0;
+
+ if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
+ lnet_net_unlock(lp->lp_cpt);
+
+ /* A new notification could happen now; I'll handle it
+ * when control returns to me */
+
+ (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
+
+ lnet_net_lock(lp->lp_cpt);
+ }
+ }
+
+ lp->lp_notifying = 0;
+}
+
+
+static void
+lnet_rtr_addref_locked(lnet_peer_t *lp)
+{
+ LASSERT(lp->lp_refcount > 0);
+ LASSERT(lp->lp_rtr_refcount >= 0);
+
+ /* lnet_net_lock must be exclusively locked */
+ lp->lp_rtr_refcount++;
+ if (lp->lp_rtr_refcount == 1) {
+ struct list_head *pos;
+
+ /* a simple insertion sort */
+ list_for_each_prev(pos, &the_lnet.ln_routers) {
+ lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
+ lp_rtr_list);
+
+ if (rtr->lp_nid < lp->lp_nid)
+ break;
+ }
+
+ list_add(&lp->lp_rtr_list, pos);
+ /* addref for the_lnet.ln_routers */
+ lnet_peer_addref_locked(lp);
+ the_lnet.ln_routers_version++;
+ }
+}
+
+static void
+lnet_rtr_decref_locked(lnet_peer_t *lp)
+{
+ LASSERT(lp->lp_refcount > 0);
+ LASSERT(lp->lp_rtr_refcount > 0);
+
+ /* lnet_net_lock must be exclusively locked */
+ lp->lp_rtr_refcount--;
+ if (lp->lp_rtr_refcount == 0) {
+ LASSERT(list_empty(&lp->lp_routes));
+
+ if (lp->lp_rcd != NULL) {
+ list_add(&lp->lp_rcd->rcd_list,
+ &the_lnet.ln_rcd_deathrow);
+ lp->lp_rcd = NULL;
+ }
+
+ list_del(&lp->lp_rtr_list);
+ /* decref for the_lnet.ln_routers */
+ lnet_peer_decref_locked(lp);
+ the_lnet.ln_routers_version++;
+ }
+}
+
+lnet_remotenet_t *
+lnet_find_net_locked (__u32 net)
+{
+ lnet_remotenet_t *rnet;
+ struct list_head *tmp;
+ struct list_head *rn_list;
+
+ LASSERT(!the_lnet.ln_shutdown);
+
+ rn_list = lnet_net2rnethash(net);
+ list_for_each(tmp, rn_list) {
+ rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
+
+ if (rnet->lrn_net == net)
+ return rnet;
+ }
+ return NULL;
+}
+
+static void lnet_shuffle_seed(void)
+{
+ static int seeded = 0;
+ int lnd_type, seed[2];
+ struct timeval tv;
+ lnet_ni_t *ni;
+ struct list_head *tmp;
+
+ if (seeded)
+ return;
+
+ cfs_get_random_bytes(seed, sizeof(seed));
+
+ /* Nodes with small feet have little entropy
+ * the NID for this node gives the most entropy in the low bits */
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
+ lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+
+ if (lnd_type != LOLND)
+ seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
+ }
+
+ do_gettimeofday(&tv);
+ cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
+ seeded = 1;
+ return;
+}
+
+/* NB expects LNET_LOCK held */
+void
+lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
+{
+ unsigned int len = 0;
+ unsigned int offset = 0;
+ struct list_head *e;
+
+ lnet_shuffle_seed();
+
+ list_for_each (e, &rnet->lrn_routes) {
+ len++;
+ }
+
+ /* len+1 positions to add a new entry, also prevents division by 0 */
+ offset = cfs_rand() % (len + 1);
+ list_for_each (e, &rnet->lrn_routes) {
+ if (offset == 0)
+ break;
+ offset--;
+ }
+ list_add(&route->lr_list, e);
+ list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
+
+ the_lnet.ln_remote_nets_version++;
+ lnet_rtr_addref_locked(route->lr_gateway);
+}
+
+int
+lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
+{
+ struct list_head *e;
+ lnet_remotenet_t *rnet;
+ lnet_remotenet_t *rnet2;
+ lnet_route_t *route;
+ lnet_ni_t *ni;
+ int add_route;
+ int rc;
+
+ CDEBUG(D_NET, "Add route: net %s hops %u gw %s\n",
+ libcfs_net2str(net), hops, libcfs_nid2str(gateway));
+
+ if (gateway == LNET_NID_ANY ||
+ LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
+ net == LNET_NIDNET(LNET_NID_ANY) ||
+ LNET_NETTYP(net) == LOLND ||
+ LNET_NIDNET(gateway) == net ||
+ hops < 1 || hops > 255)
+ return (-EINVAL);
+
+ if (lnet_islocalnet(net)) /* it's a local network */
+ return 0; /* ignore the route entry */
+
+ /* Assume net, route, all new */
+ LIBCFS_ALLOC(route, sizeof(*route));
+ LIBCFS_ALLOC(rnet, sizeof(*rnet));
+ if (route == NULL || rnet == NULL) {
+ CERROR("Out of memory creating route %s %d %s\n",
+ libcfs_net2str(net), hops, libcfs_nid2str(gateway));
+ if (route != NULL)
+ LIBCFS_FREE(route, sizeof(*route));
+ if (rnet != NULL)
+ LIBCFS_FREE(rnet, sizeof(*rnet));
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&rnet->lrn_routes);
+ rnet->lrn_net = net;
+ route->lr_hops = hops;
+ route->lr_net = net;
+
+ lnet_net_lock(LNET_LOCK_EX);
+
+ rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
+ if (rc != 0) {
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ LIBCFS_FREE(route, sizeof(*route));
+ LIBCFS_FREE(rnet, sizeof(*rnet));
+
+ if (rc == -EHOSTUNREACH) { /* gateway is not on a local net */
+ return 0; /* ignore the route entry */
+ } else {
+ CERROR("Error %d creating route %s %d %s\n", rc,
+ libcfs_net2str(net), hops,
+ libcfs_nid2str(gateway));
+ }
+ return rc;
+ }
+
+ LASSERT (!the_lnet.ln_shutdown);
+
+ rnet2 = lnet_find_net_locked(net);
+ if (rnet2 == NULL) {
+ /* new network */
+ list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
+ rnet2 = rnet;
+ }
+
+ /* Search for a duplicate route (it's a NOOP if it is) */
+ add_route = 1;
+ list_for_each (e, &rnet2->lrn_routes) {
+ lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
+
+ if (route2->lr_gateway == route->lr_gateway) {
+ add_route = 0;
+ break;
+ }
+
+ /* our lookups must be true */
+ LASSERT (route2->lr_gateway->lp_nid != gateway);
+ }
+
+ if (add_route) {
+ lnet_peer_addref_locked(route->lr_gateway); /* +1 for notify */
+ lnet_add_route_to_rnet(rnet2, route);
+
+ ni = route->lr_gateway->lp_ni;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /* XXX Assume alive */
+ if (ni->ni_lnd->lnd_notify != NULL)
+ (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ }
+
+ /* -1 for notify or !add_route */
+ lnet_peer_decref_locked(route->lr_gateway);
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ if (!add_route)
+ LIBCFS_FREE(route, sizeof(*route));
+
+ if (rnet != rnet2)
+ LIBCFS_FREE(rnet, sizeof(*rnet));
+
+ return 0;
+}
+
+int
+lnet_check_routes(void)
+{
+ lnet_remotenet_t *rnet;
+ lnet_route_t *route;
+ lnet_route_t *route2;
+ struct list_head *e1;
+ struct list_head *e2;
+ int cpt;
+ struct list_head *rn_list;
+ int i;
+
+ cpt = lnet_net_lock_current();
+
+ for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
+ rn_list = &the_lnet.ln_remote_nets_hash[i];
+ list_for_each(e1, rn_list) {
+ rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+
+ route2 = NULL;
+ list_for_each(e2, &rnet->lrn_routes) {
+ lnet_nid_t nid1;
+ lnet_nid_t nid2;
+ int net;
+
+ route = list_entry(e2, lnet_route_t,
+ lr_list);
+
+ if (route2 == NULL) {
+ route2 = route;
+ continue;
+ }
+
+ if (route->lr_gateway->lp_ni ==
+ route2->lr_gateway->lp_ni)
+ continue;
+
+ nid1 = route->lr_gateway->lp_nid;
+ nid2 = route2->lr_gateway->lp_nid;
+ net = rnet->lrn_net;
+
+ lnet_net_unlock(cpt);
+
+ CERROR("Routes to %s via %s and %s not "
+ "supported\n",
+ libcfs_net2str(net),
+ libcfs_nid2str(nid1),
+ libcfs_nid2str(nid2));
+ return -EINVAL;
+ }
+ }
+ }
+
+ lnet_net_unlock(cpt);
+ return 0;
+}
+
+int
+lnet_del_route(__u32 net, lnet_nid_t gw_nid)
+{
+ struct lnet_peer *gateway;
+ lnet_remotenet_t *rnet;
+ lnet_route_t *route;
+ struct list_head *e1;
+ struct list_head *e2;
+ int rc = -ENOENT;
+ struct list_head *rn_list;
+ int idx = 0;
+
+ CDEBUG(D_NET, "Del route: net %s : gw %s\n",
+ libcfs_net2str(net), libcfs_nid2str(gw_nid));
+
+ /* NB Caller may specify either all routes via the given gateway
+ * or a specific route entry actual NIDs) */
+
+ lnet_net_lock(LNET_LOCK_EX);
+ if (net == LNET_NIDNET(LNET_NID_ANY))
+ rn_list = &the_lnet.ln_remote_nets_hash[0];
+ else
+ rn_list = lnet_net2rnethash(net);
+
+ again:
+ list_for_each(e1, rn_list) {
+ rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+
+ if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
+ net == rnet->lrn_net))
+ continue;
+
+ list_for_each(e2, &rnet->lrn_routes) {
+ route = list_entry(e2, lnet_route_t, lr_list);
+
+ gateway = route->lr_gateway;
+ if (!(gw_nid == LNET_NID_ANY ||
+ gw_nid == gateway->lp_nid))
+ continue;
+
+ list_del(&route->lr_list);
+ list_del(&route->lr_gwlist);
+ the_lnet.ln_remote_nets_version++;
+
+ if (list_empty(&rnet->lrn_routes))
+ list_del(&rnet->lrn_list);
+ else
+ rnet = NULL;
+
+ lnet_rtr_decref_locked(gateway);
+ lnet_peer_decref_locked(gateway);
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ LIBCFS_FREE(route, sizeof(*route));
+
+ if (rnet != NULL)
+ LIBCFS_FREE(rnet, sizeof(*rnet));
+
+ rc = 0;
+ lnet_net_lock(LNET_LOCK_EX);
+ goto again;
+ }
+ }
+
+ if (net == LNET_NIDNET(LNET_NID_ANY) &&
+ ++idx < LNET_REMOTE_NETS_HASH_SIZE) {
+ rn_list = &the_lnet.ln_remote_nets_hash[idx];
+ goto again;
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ return rc;
+}
+
+void
+lnet_destroy_routes (void)
+{
+ lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
+}
+
+int
+lnet_get_route(int idx, __u32 *net, __u32 *hops,
+ lnet_nid_t *gateway, __u32 *alive)
+{
+ struct list_head *e1;
+ struct list_head *e2;
+ lnet_remotenet_t *rnet;
+ lnet_route_t *route;
+ int cpt;
+ int i;
+ struct list_head *rn_list;
+
+ cpt = lnet_net_lock_current();
+
+ for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
+ rn_list = &the_lnet.ln_remote_nets_hash[i];
+ list_for_each(e1, rn_list) {
+ rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+
+ list_for_each(e2, &rnet->lrn_routes) {
+ route = list_entry(e2, lnet_route_t,
+ lr_list);
+
+ if (idx-- == 0) {
+ *net = rnet->lrn_net;
+ *hops = route->lr_hops;
+ *gateway = route->lr_gateway->lp_nid;
+ *alive = route->lr_gateway->lp_alive;
+ lnet_net_unlock(cpt);
+ return 0;
+ }
+ }
+ }
+ }
+
+ lnet_net_unlock(cpt);
+ return -ENOENT;
+}
+
+void
+lnet_swap_pinginfo(lnet_ping_info_t *info)
+{
+ int i;
+ lnet_ni_status_t *stat;
+
+ __swab32s(&info->pi_magic);
+ __swab32s(&info->pi_features);
+ __swab32s(&info->pi_pid);
+ __swab32s(&info->pi_nnis);
+ for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
+ stat = &info->pi_ni[i];
+ __swab64s(&stat->ns_nid);
+ __swab32s(&stat->ns_status);
+ }
+ return;
+}
+
+/**
+ * parse router-checker pinginfo, record number of down NIs for remote
+ * networks on that router.
+ */
+static void
+lnet_parse_rc_info(lnet_rc_data_t *rcd)
+{
+ lnet_ping_info_t *info = rcd->rcd_pinginfo;
+ struct lnet_peer *gw = rcd->rcd_gateway;
+ lnet_route_t *rtr;
+
+ if (!gw->lp_alive)
+ return;
+
+ if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
+ lnet_swap_pinginfo(info);
+
+ /* NB always racing with network! */
+ if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
+ CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
+ libcfs_nid2str(gw->lp_nid), info->pi_magic);
+ gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
+ return;
+ }
+
+ gw->lp_ping_feats = info->pi_features;
+ if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
+ CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
+ libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
+ return; /* nothing I can understand */
+ }
+
+ if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
+ return; /* can't carry NI status info */
+
+ list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
+ int ptl_status = LNET_NI_STATUS_INVALID;
+ int down = 0;
+ int up = 0;
+ int i;
+
+ for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
+ lnet_ni_status_t *stat = &info->pi_ni[i];
+ lnet_nid_t nid = stat->ns_nid;
+
+ if (nid == LNET_NID_ANY) {
+ CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
+ libcfs_nid2str(gw->lp_nid));
+ gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
+ return;
+ }
+
+ if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
+ continue;
+
+ if (stat->ns_status == LNET_NI_STATUS_DOWN) {
+ if (LNET_NETTYP(LNET_NIDNET(nid)) != PTLLND)
+ down++;
+ else if (ptl_status != LNET_NI_STATUS_UP)
+ ptl_status = LNET_NI_STATUS_DOWN;
+ continue;
+ }
+
+ if (stat->ns_status == LNET_NI_STATUS_UP) {
+ if (LNET_NIDNET(nid) == rtr->lr_net) {
+ up = 1;
+ break;
+ }
+ /* ptl NIs are considered down only when
+ * they're all down */
+ if (LNET_NETTYP(LNET_NIDNET(nid)) == PTLLND)
+ ptl_status = LNET_NI_STATUS_UP;
+ continue;
+ }
+
+ CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
+ libcfs_nid2str(gw->lp_nid), stat->ns_status);
+ gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
+ return;
+ }
+
+ if (up) { /* ignore downed NIs if NI for dest network is up */
+ rtr->lr_downis = 0;
+ continue;
+ }
+ rtr->lr_downis = down + (ptl_status == LNET_NI_STATUS_DOWN);
+ }
+}
+
+static void
+lnet_router_checker_event(lnet_event_t *event)
+{
+ lnet_rc_data_t *rcd = event->md.user_ptr;
+ struct lnet_peer *lp;
+
+ LASSERT(rcd != NULL);
+
+ if (event->unlinked) {
+ LNetInvalidateHandle(&rcd->rcd_mdh);
+ return;
+ }
+
+ LASSERT(event->type == LNET_EVENT_SEND ||
+ event->type == LNET_EVENT_REPLY);
+
+ lp = rcd->rcd_gateway;
+ LASSERT(lp != NULL);
+
+ /* NB: it's called with holding lnet_res_lock, we have a few
+ * places need to hold both locks at the same time, please take
+ * care of lock ordering */
+ lnet_net_lock(lp->lp_cpt);
+ if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
+ /* ignore if no longer a router or rcd is replaced */
+ goto out;
+ }
+
+ if (event->type == LNET_EVENT_SEND) {
+ lp->lp_ping_notsent = 0;
+ if (event->status == 0)
+ goto out;
+ }
+
+ /* LNET_EVENT_REPLY */
+ /* A successful REPLY means the router is up. If _any_ comms
+ * to the router fail I assume it's down (this will happen if
+ * we ping alive routers to try to detect router death before
+ * apps get burned). */
+
+ lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
+ /* The router checker will wake up very shortly and do the
+ * actual notification.
+ * XXX If 'lp' stops being a router before then, it will still
+ * have the notification pending!!! */
+
+ if (avoid_asym_router_failure && event->status == 0)
+ lnet_parse_rc_info(rcd);
+
+ out:
+ lnet_net_unlock(lp->lp_cpt);
+}
+
+void
+lnet_wait_known_routerstate(void)
+{
+ lnet_peer_t *rtr;
+ struct list_head *entry;
+ int all_known;
+
+ LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
+
+ for (;;) {
+ int cpt = lnet_net_lock_current();
+
+ all_known = 1;
+ list_for_each (entry, &the_lnet.ln_routers) {
+ rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+
+ if (rtr->lp_alive_count == 0) {
+ all_known = 0;
+ break;
+ }
+ }
+
+ lnet_net_unlock(cpt);
+
+ if (all_known)
+ return;
+
+ cfs_pause(cfs_time_seconds(1));
+ }
+}
+
+void
+lnet_update_ni_status_locked(void)
+{
+ lnet_ni_t *ni;
+ long now;
+ int timeout;
+
+ LASSERT(the_lnet.ln_routing);
+
+ timeout = router_ping_timeout +
+ MAX(live_router_check_interval, dead_router_check_interval);
+
+ now = cfs_time_current_sec();
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+ if (ni->ni_lnd->lnd_type == LOLND)
+ continue;
+
+ if (now < ni->ni_last_alive + timeout)
+ continue;
+
+ lnet_ni_lock(ni);
+ /* re-check with lock */
+ if (now < ni->ni_last_alive + timeout) {
+ lnet_ni_unlock(ni);
+ continue;
+ }
+
+ LASSERT(ni->ni_status != NULL);
+
+ if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
+ CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
+ libcfs_nid2str(ni->ni_nid), timeout);
+ /* NB: so far, this is the only place to set
+ * NI status to "down" */
+ ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
+ }
+ lnet_ni_unlock(ni);
+ }
+}
+
+void
+lnet_destroy_rc_data(lnet_rc_data_t *rcd)
+{
+ LASSERT(list_empty(&rcd->rcd_list));
+ /* detached from network */
+ LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
+
+ if (rcd->rcd_gateway != NULL) {
+ int cpt = rcd->rcd_gateway->lp_cpt;
+
+ lnet_net_lock(cpt);
+ lnet_peer_decref_locked(rcd->rcd_gateway);
+ lnet_net_unlock(cpt);
+ }
+
+ if (rcd->rcd_pinginfo != NULL)
+ LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
+
+ LIBCFS_FREE(rcd, sizeof(*rcd));
+}
+
+lnet_rc_data_t *
+lnet_create_rc_data_locked(lnet_peer_t *gateway)
+{
+ lnet_rc_data_t *rcd = NULL;
+ lnet_ping_info_t *pi;
+ int rc;
+ int i;
+
+ lnet_net_unlock(gateway->lp_cpt);
+
+ LIBCFS_ALLOC(rcd, sizeof(*rcd));
+ if (rcd == NULL)
+ goto out;
+
+ LNetInvalidateHandle(&rcd->rcd_mdh);
+ INIT_LIST_HEAD(&rcd->rcd_list);
+
+ LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
+ if (pi == NULL)
+ goto out;
+
+ memset(pi, 0, LNET_PINGINFO_SIZE);
+ for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
+ pi->pi_ni[i].ns_nid = LNET_NID_ANY;
+ pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
+ }
+ rcd->rcd_pinginfo = pi;
+
+ LASSERT (!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
+ rc = LNetMDBind((lnet_md_t){.start = pi,
+ .user_ptr = rcd,
+ .length = LNET_PINGINFO_SIZE,
+ .threshold = LNET_MD_THRESH_INF,
+ .options = LNET_MD_TRUNCATE,
+ .eq_handle = the_lnet.ln_rc_eqh},
+ LNET_UNLINK,
+ &rcd->rcd_mdh);
+ if (rc < 0) {
+ CERROR("Can't bind MD: %d\n", rc);
+ goto out;
+ }
+ LASSERT(rc == 0);
+
+ lnet_net_lock(gateway->lp_cpt);
+ /* router table changed or someone has created rcd for this gateway */
+ if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) {
+ lnet_net_unlock(gateway->lp_cpt);
+ goto out;
+ }
+
+ lnet_peer_addref_locked(gateway);
+ rcd->rcd_gateway = gateway;
+ gateway->lp_rcd = rcd;
+ gateway->lp_ping_notsent = 0;
+
+ return rcd;
+
+ out:
+ if (rcd != NULL) {
+ if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
+ rc = LNetMDUnlink(rcd->rcd_mdh);
+ LASSERT(rc == 0);
+ }
+ lnet_destroy_rc_data(rcd);
+ }
+
+ lnet_net_lock(gateway->lp_cpt);
+ return gateway->lp_rcd;
+}
+
+static int
+lnet_router_check_interval (lnet_peer_t *rtr)
+{
+ int secs;
+
+ secs = rtr->lp_alive ? live_router_check_interval :
+ dead_router_check_interval;
+ if (secs < 0)
+ secs = 0;
+
+ return secs;
+}
+
+static void
+lnet_ping_router_locked (lnet_peer_t *rtr)
+{
+ lnet_rc_data_t *rcd = NULL;
+ cfs_time_t now = cfs_time_current();
+ int secs;
+
+ lnet_peer_addref_locked(rtr);
+
+ if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
+ cfs_time_after(now, rtr->lp_ping_deadline))
+ lnet_notify_locked(rtr, 1, 0, now);
+
+ /* Run any outstanding notifications */
+ lnet_ni_notify_locked(rtr->lp_ni, rtr);
+
+ if (!lnet_isrouter(rtr) ||
+ the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
+ /* router table changed or router checker is shutting down */
+ lnet_peer_decref_locked(rtr);
+ return;
+ }
+
+ rcd = rtr->lp_rcd != NULL ?
+ rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
+
+ if (rcd == NULL)
+ return;
+
+ secs = lnet_router_check_interval(rtr);
+
+ CDEBUG(D_NET,
+ "rtr %s %d: deadline %lu ping_notsent %d alive %d "
+ "alive_count %d lp_ping_timestamp %lu\n",
+ libcfs_nid2str(rtr->lp_nid), secs,
+ rtr->lp_ping_deadline, rtr->lp_ping_notsent,
+ rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
+
+ if (secs != 0 && !rtr->lp_ping_notsent &&
+ cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
+ cfs_time_seconds(secs)))) {
+ int rc;
+ lnet_process_id_t id;
+ lnet_handle_md_t mdh;
+
+ id.nid = rtr->lp_nid;
+ id.pid = LUSTRE_SRV_LNET_PID;
+ CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
+
+ rtr->lp_ping_notsent = 1;
+ rtr->lp_ping_timestamp = now;
+
+ mdh = rcd->rcd_mdh;
+
+ if (rtr->lp_ping_deadline == 0) {
+ rtr->lp_ping_deadline =
+ cfs_time_shift(router_ping_timeout);
+ }
+
+ lnet_net_unlock(rtr->lp_cpt);
+
+ rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
+ LNET_PROTO_PING_MATCHBITS, 0);
+
+ lnet_net_lock(rtr->lp_cpt);
+ if (rc != 0)
+ rtr->lp_ping_notsent = 0; /* no event pending */
+ }
+
+ lnet_peer_decref_locked(rtr);
+ return;
+}
+
+int
+lnet_router_checker_start(void)
+{
+ int rc;
+ int eqsz;
+
+ LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
+
+ if (check_routers_before_use &&
+ dead_router_check_interval <= 0) {
+ LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
+ " set if 'check_routers_before_use' is set"
+ "\n");
+ return -EINVAL;
+ }
+
+ if (!the_lnet.ln_routing &&
+ live_router_check_interval <= 0 &&
+ dead_router_check_interval <= 0)
+ return 0;
+
+ sema_init(&the_lnet.ln_rc_signal, 0);
+ /* EQ size doesn't matter; the callback is guaranteed to get every
+ * event */
+ eqsz = 0;
+ rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
+ &the_lnet.ln_rc_eqh);
+ if (rc != 0) {
+ CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
+ return -ENOMEM;
+ }
+
+ the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
+ rc = PTR_ERR(kthread_run(lnet_router_checker,
+ NULL, "router_checker"));
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("Can't start router checker thread: %d\n", rc);
+ /* block until event callback signals exit */
+ down(&the_lnet.ln_rc_signal);
+ rc = LNetEQFree(the_lnet.ln_rc_eqh);
+ LASSERT(rc == 0);
+ the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
+ return -ENOMEM;
+ }
+
+ if (check_routers_before_use) {
+ /* Note that a helpful side-effect of pinging all known routers
+ * at startup is that it makes them drop stale connections they
+ * may have to a previous instance of me. */
+ lnet_wait_known_routerstate();
+ }
+
+ return 0;
+}
+
+void
+lnet_router_checker_stop (void)
+{
+ int rc;
+
+ if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
+ return;
+
+ LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
+ the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
+
+ /* block until event callback signals exit */
+ down(&the_lnet.ln_rc_signal);
+ LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
+
+ rc = LNetEQFree(the_lnet.ln_rc_eqh);
+ LASSERT (rc == 0);
+ return;
+}
+
+static void
+lnet_prune_rc_data(int wait_unlink)
+{
+ lnet_rc_data_t *rcd;
+ lnet_rc_data_t *tmp;
+ lnet_peer_t *lp;
+ struct list_head head;
+ int i = 2;
+
+ if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
+ list_empty(&the_lnet.ln_rcd_deathrow) &&
+ list_empty(&the_lnet.ln_rcd_zombie)))
+ return;
+
+ INIT_LIST_HEAD(&head);
+
+ lnet_net_lock(LNET_LOCK_EX);
+
+ if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
+ /* router checker is stopping, prune all */
+ list_for_each_entry(lp, &the_lnet.ln_routers,
+ lp_rtr_list) {
+ if (lp->lp_rcd == NULL)
+ continue;
+
+ LASSERT(list_empty(&lp->lp_rcd->rcd_list));
+ list_add(&lp->lp_rcd->rcd_list,
+ &the_lnet.ln_rcd_deathrow);
+ lp->lp_rcd = NULL;
+ }
+ }
+
+ /* unlink all RCDs on deathrow list */
+ list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
+
+ if (!list_empty(&head)) {
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ list_for_each_entry(rcd, &head, rcd_list)
+ LNetMDUnlink(rcd->rcd_mdh);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ }
+
+ list_splice_init(&head, &the_lnet.ln_rcd_zombie);
+
+ /* release all zombie RCDs */
+ while (!list_empty(&the_lnet.ln_rcd_zombie)) {
+ list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
+ rcd_list) {
+ if (LNetHandleIsInvalid(rcd->rcd_mdh))
+ list_move(&rcd->rcd_list, &head);
+ }
+
+ wait_unlink = wait_unlink &&
+ !list_empty(&the_lnet.ln_rcd_zombie);
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ while (!list_empty(&head)) {
+ rcd = list_entry(head.next,
+ lnet_rc_data_t, rcd_list);
+ list_del_init(&rcd->rcd_list);
+ lnet_destroy_rc_data(rcd);
+ }
+
+ if (!wait_unlink)
+ return;
+
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
+ "Waiting for rc buffers to unlink\n");
+ cfs_pause(cfs_time_seconds(1) / 4);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ }
+
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+
+
+#if defined(LNET_ROUTER)
+
+static int
+lnet_router_checker(void *arg)
+{
+ lnet_peer_t *rtr;
+ struct list_head *entry;
+
+ cfs_block_allsigs();
+
+ LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
+
+ while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
+ __u64 version;
+ int cpt;
+ int cpt2;
+
+ cpt = lnet_net_lock_current();
+rescan:
+ version = the_lnet.ln_routers_version;
+
+ list_for_each(entry, &the_lnet.ln_routers) {
+ rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+
+ cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid);
+ if (cpt != cpt2) {
+ lnet_net_unlock(cpt);
+ cpt = cpt2;
+ lnet_net_lock(cpt);
+ /* the routers list has changed */
+ if (version != the_lnet.ln_routers_version)
+ goto rescan;
+ }
+
+ lnet_ping_router_locked(rtr);
+
+ /* NB dropped lock */
+ if (version != the_lnet.ln_routers_version) {
+ /* the routers list has changed */
+ goto rescan;
+ }
+ }
+
+ if (the_lnet.ln_routing)
+ lnet_update_ni_status_locked();
+
+ lnet_net_unlock(cpt);
+
+ lnet_prune_rc_data(0); /* don't wait for UNLINK */
+
+ /* Call cfs_pause() here always adds 1 to load average
+ * because kernel counts # active tasks as nr_running
+ * + nr_uninterruptible. */
+ schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
+ }
+
+ LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
+
+ lnet_prune_rc_data(1); /* wait for UNLINK */
+
+ the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
+ up(&the_lnet.ln_rc_signal);
+ /* The unlink event callback will signal final completion */
+ return 0;
+}
+
+void
+lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
+{
+ int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
+
+ while (--npages >= 0)
+ __free_page(rb->rb_kiov[npages].kiov_page);
+
+ LIBCFS_FREE(rb, sz);
+}
+
+lnet_rtrbuf_t *
+lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
+{
+ int npages = rbp->rbp_npages;
+ int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
+ struct page *page;
+ lnet_rtrbuf_t *rb;
+ int i;
+
+ LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
+ if (rb == NULL)
+ return NULL;
+
+ rb->rb_pool = rbp;
+
+ for (i = 0; i < npages; i++) {
+ page = alloc_pages_node(
+ cfs_cpt_spread_node(lnet_cpt_table(), cpt),
+ __GFP_ZERO | GFP_IOFS, 0);
+ if (page == NULL) {
+ while (--i >= 0)
+ __free_page(rb->rb_kiov[i].kiov_page);
+
+ LIBCFS_FREE(rb, sz);
+ return NULL;
+ }
+
+ rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
+ rb->rb_kiov[i].kiov_offset = 0;
+ rb->rb_kiov[i].kiov_page = page;
+ }
+
+ return rb;
+}
+
+void
+lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
+{
+ int npages = rbp->rbp_npages;
+ int nbuffers = 0;
+ lnet_rtrbuf_t *rb;
+
+ if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
+ return;
+
+ LASSERT (list_empty(&rbp->rbp_msgs));
+ LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
+
+ while (!list_empty(&rbp->rbp_bufs)) {
+ LASSERT (rbp->rbp_credits > 0);
+
+ rb = list_entry(rbp->rbp_bufs.next,
+ lnet_rtrbuf_t, rb_list);
+ list_del(&rb->rb_list);
+ lnet_destroy_rtrbuf(rb, npages);
+ nbuffers++;
+ }
+
+ LASSERT (rbp->rbp_nbuffers == nbuffers);
+ LASSERT (rbp->rbp_credits == nbuffers);
+
+ rbp->rbp_nbuffers = rbp->rbp_credits = 0;
+}
+
+int
+lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
+{
+ lnet_rtrbuf_t *rb;
+ int i;
+
+ if (rbp->rbp_nbuffers != 0) {
+ LASSERT (rbp->rbp_nbuffers == nbufs);
+ return 0;
+ }
+
+ for (i = 0; i < nbufs; i++) {
+ rb = lnet_new_rtrbuf(rbp, cpt);
+
+ if (rb == NULL) {
+ CERROR("Failed to allocate %d router bufs of %d pages\n",
+ nbufs, rbp->rbp_npages);
+ return -ENOMEM;
+ }
+
+ rbp->rbp_nbuffers++;
+ rbp->rbp_credits++;
+ rbp->rbp_mincredits++;
+ list_add(&rb->rb_list, &rbp->rbp_bufs);
+
+ /* No allocation "under fire" */
+ /* Otherwise we'd need code to schedule blocked msgs etc */
+ LASSERT (!the_lnet.ln_routing);
+ }
+
+ LASSERT (rbp->rbp_credits == nbufs);
+ return 0;
+}
+
+void
+lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
+{
+ INIT_LIST_HEAD(&rbp->rbp_msgs);
+ INIT_LIST_HEAD(&rbp->rbp_bufs);
+
+ rbp->rbp_npages = npages;
+ rbp->rbp_credits = 0;
+ rbp->rbp_mincredits = 0;
+}
+
+void
+lnet_rtrpools_free(void)
+{
+ lnet_rtrbufpool_t *rtrp;
+ int i;
+
+ if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
+ return;
+
+ cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+ lnet_rtrpool_free_bufs(&rtrp[0]);
+ lnet_rtrpool_free_bufs(&rtrp[1]);
+ lnet_rtrpool_free_bufs(&rtrp[2]);
+ }
+
+ cfs_percpt_free(the_lnet.ln_rtrpools);
+ the_lnet.ln_rtrpools = NULL;
+}
+
+static int
+lnet_nrb_tiny_calculate(int npages)
+{
+ int nrbs = LNET_NRB_TINY;
+
+ if (tiny_router_buffers < 0) {
+ LCONSOLE_ERROR_MSG(0x10c,
+ "tiny_router_buffers=%d invalid when "
+ "routing enabled\n", tiny_router_buffers);
+ return -1;
+ }
+
+ if (tiny_router_buffers > 0)
+ nrbs = tiny_router_buffers;
+
+ nrbs /= LNET_CPT_NUMBER;
+ return max(nrbs, LNET_NRB_TINY_MIN);
+}
+
+static int
+lnet_nrb_small_calculate(int npages)
+{
+ int nrbs = LNET_NRB_SMALL;
+
+ if (small_router_buffers < 0) {
+ LCONSOLE_ERROR_MSG(0x10c,
+ "small_router_buffers=%d invalid when "
+ "routing enabled\n", small_router_buffers);
+ return -1;
+ }
+
+ if (small_router_buffers > 0)
+ nrbs = small_router_buffers;
+
+ nrbs /= LNET_CPT_NUMBER;
+ return max(nrbs, LNET_NRB_SMALL_MIN);
+}
+
+static int
+lnet_nrb_large_calculate(int npages)
+{
+ int nrbs = LNET_NRB_LARGE;
+
+ if (large_router_buffers < 0) {
+ LCONSOLE_ERROR_MSG(0x10c,
+ "large_router_buffers=%d invalid when "
+ "routing enabled\n", large_router_buffers);
+ return -1;
+ }
+
+ if (large_router_buffers > 0)
+ nrbs = large_router_buffers;
+
+ nrbs /= LNET_CPT_NUMBER;
+ return max(nrbs, LNET_NRB_LARGE_MIN);
+}
+
+int
+lnet_rtrpools_alloc(int im_a_router)
+{
+ lnet_rtrbufpool_t *rtrp;
+ int large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int small_pages = 1;
+ int nrb_tiny;
+ int nrb_small;
+ int nrb_large;
+ int rc;
+ int i;
+
+ if (!strcmp(forwarding, "")) {
+ /* not set either way */
+ if (!im_a_router)
+ return 0;
+ } else if (!strcmp(forwarding, "disabled")) {
+ /* explicitly disabled */
+ return 0;
+ } else if (!strcmp(forwarding, "enabled")) {
+ /* explicitly enabled */
+ } else {
+ LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
+ "'enabled' or 'disabled'\n");
+ return -EINVAL;
+ }
+
+ nrb_tiny = lnet_nrb_tiny_calculate(0);
+ if (nrb_tiny < 0)
+ return -EINVAL;
+
+ nrb_small = lnet_nrb_small_calculate(small_pages);
+ if (nrb_small < 0)
+ return -EINVAL;
+
+ nrb_large = lnet_nrb_large_calculate(large_pages);
+ if (nrb_large < 0)
+ return -EINVAL;
+
+ the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
+ LNET_NRBPOOLS *
+ sizeof(lnet_rtrbufpool_t));
+ if (the_lnet.ln_rtrpools == NULL) {
+ LCONSOLE_ERROR_MSG(0x10c,
+ "Failed to initialize router buffe pool\n");
+ return -ENOMEM;
+ }
+
+ cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+ lnet_rtrpool_init(&rtrp[0], 0);
+ rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i);
+ if (rc != 0)
+ goto failed;
+
+ lnet_rtrpool_init(&rtrp[1], small_pages);
+ rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i);
+ if (rc != 0)
+ goto failed;
+
+ lnet_rtrpool_init(&rtrp[2], large_pages);
+ rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i);
+ if (rc != 0)
+ goto failed;
+ }
+
+ lnet_net_lock(LNET_LOCK_EX);
+ the_lnet.ln_routing = 1;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ return 0;
+
+ failed:
+ lnet_rtrpools_free();
+ return rc;
+}
+
+int
+lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
+{
+ struct lnet_peer *lp = NULL;
+ cfs_time_t now = cfs_time_current();
+ int cpt = lnet_cpt_of_nid(nid);
+
+ LASSERT (!in_interrupt ());
+
+ CDEBUG (D_NET, "%s notifying %s: %s\n",
+ (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+ libcfs_nid2str(nid),
+ alive ? "up" : "down");
+
+ if (ni != NULL &&
+ LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
+ CWARN ("Ignoring notification of %s %s by %s (different net)\n",
+ libcfs_nid2str(nid), alive ? "birth" : "death",
+ libcfs_nid2str(ni->ni_nid));
+ return -EINVAL;
+ }
+
+ /* can't do predictions... */
+ if (cfs_time_after(when, now)) {
+ CWARN ("Ignoring prediction from %s of %s %s "
+ "%ld seconds in the future\n",
+ (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+ libcfs_nid2str(nid), alive ? "up" : "down",
+ cfs_duration_sec(cfs_time_sub(when, now)));
+ return -EINVAL;
+ }
+
+ if (ni != NULL && !alive && /* LND telling me she's down */
+ !auto_down) { /* auto-down disabled */
+ CDEBUG(D_NET, "Auto-down disabled\n");
+ return 0;
+ }
+
+ lnet_net_lock(cpt);
+
+ if (the_lnet.ln_shutdown) {
+ lnet_net_unlock(cpt);
+ return -ESHUTDOWN;
+ }
+
+ lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
+ if (lp == NULL) {
+ /* nid not found */
+ lnet_net_unlock(cpt);
+ CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
+ return 0;
+ }
+
+ /* We can't fully trust LND on reporting exact peer last_alive
+ * if he notifies us about dead peer. For example ksocklnd can
+ * call us with when == _time_when_the_node_was_booted_ if
+ * no connections were successfully established */
+ if (ni != NULL && !alive && when < lp->lp_last_alive)
+ when = lp->lp_last_alive;
+
+ lnet_notify_locked(lp, ni == NULL, alive, when);
+
+ lnet_ni_notify_locked(ni, lp);
+
+ lnet_peer_decref_locked(lp);
+
+ lnet_net_unlock(cpt);
+ return 0;
+}
+EXPORT_SYMBOL(lnet_notify);
+
+void
+lnet_get_tunables (void)
+{
+ return;
+}
+
+#else
+
+int
+lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
+{
+ return -EOPNOTSUPP;
+}
+
+void
+lnet_router_checker (void)
+{
+ static time_t last = 0;
+ static int running = 0;
+
+ time_t now = cfs_time_current_sec();
+ int interval = now - last;
+ int rc;
+ __u64 version;
+ lnet_peer_t *rtr;
+
+ /* It's no use to call me again within a sec - all intervals and
+ * timeouts are measured in seconds */
+ if (last != 0 && interval < 2)
+ return;
+
+ if (last != 0 &&
+ interval > MAX(live_router_check_interval,
+ dead_router_check_interval))
+ CNETERR("Checker(%d/%d) not called for %d seconds\n",
+ live_router_check_interval, dead_router_check_interval,
+ interval);
+
+ LASSERT(LNET_CPT_NUMBER == 1);
+
+ lnet_net_lock(0);
+ LASSERT(!running); /* recursion check */
+ running = 1;
+ lnet_net_unlock(0);
+
+ last = now;
+
+ if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING)
+ lnet_prune_rc_data(0); /* unlink all rcd and nowait */
+
+ /* consume all pending events */
+ while (1) {
+ int i;
+ lnet_event_t ev;
+
+ /* NB ln_rc_eqh must be the 1st in 'eventqs' otherwise the
+ * recursion breaker in LNetEQPoll would fail */
+ rc = LNetEQPoll(&the_lnet.ln_rc_eqh, 1, 0, &ev, &i);
+ if (rc == 0) /* no event pending */
+ break;
+
+ /* NB a lost SENT prevents me from pinging a router again */
+ if (rc == -EOVERFLOW) {
+ CERROR("Dropped an event!!!\n");
+ abort();
+ }
+
+ LASSERT (rc == 1);
+
+ lnet_router_checker_event(&ev);
+ }
+
+ if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING) {
+ lnet_prune_rc_data(1); /* release rcd */
+ the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
+ running = 0;
+ return;
+ }
+
+ LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
+
+ lnet_net_lock(0);
+
+ version = the_lnet.ln_routers_version;
+ list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
+ lnet_ping_router_locked(rtr);
+ LASSERT (version == the_lnet.ln_routers_version);
+ }
+
+ lnet_net_unlock(0);
+
+ running = 0; /* lock only needed for the recursion check */
+ return;
+}
+
+/* NB lnet_peers_start_down depends on me,
+ * so must be called before any peer creation */
+void
+lnet_get_tunables (void)
+{
+ char *s;
+
+ s = getenv("LNET_ROUTER_PING_TIMEOUT");
+ if (s != NULL) router_ping_timeout = atoi(s);
+
+ s = getenv("LNET_LIVE_ROUTER_CHECK_INTERVAL");
+ if (s != NULL) live_router_check_interval = atoi(s);
+
+ s = getenv("LNET_DEAD_ROUTER_CHECK_INTERVAL");
+ if (s != NULL) dead_router_check_interval = atoi(s);
+
+ /* This replaces old lnd_notify mechanism */
+ check_routers_before_use = 1;
+ if (dead_router_check_interval <= 0)
+ dead_router_check_interval = 30;
+}
+
+void
+lnet_rtrpools_free(void)
+{
+}
+
+int
+lnet_rtrpools_alloc(int im_a_arouter)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
new file mode 100644
index 0000000..931f6ca2
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -0,0 +1,950 @@
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ *
+ * This file is part of Portals
+ * http://sourceforge.net/projects/sandiaportals/
+ *
+ * Portals is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Portals is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Portals; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lib-lnet.h>
+
+#if defined(LNET_ROUTER)
+
+/* This is really lnet_proc.c. You might need to update sanity test 215
+ * if any file format is changed. */
+
+static ctl_table_header_t *lnet_table_header = NULL;
+
+#define CTL_LNET (0x100)
+enum {
+ PSDEV_LNET_STATS = 100,
+ PSDEV_LNET_ROUTES,
+ PSDEV_LNET_ROUTERS,
+ PSDEV_LNET_PEERS,
+ PSDEV_LNET_BUFFERS,
+ PSDEV_LNET_NIS,
+ PSDEV_LNET_PTL_ROTOR,
+};
+
+#define LNET_LOFFT_BITS (sizeof(loff_t) * 8)
+/*
+ * NB: max allowed LNET_CPT_BITS is 8 on 64-bit system and 2 on 32-bit system
+ */
+#define LNET_PROC_CPT_BITS (LNET_CPT_BITS + 1)
+/* change version, 16 bits or 8 bits */
+#define LNET_PROC_VER_BITS MAX(((MIN(LNET_LOFFT_BITS, 64)) / 4), 8)
+
+#define LNET_PROC_HASH_BITS LNET_PEER_HASH_BITS
+/*
+ * bits for peer hash offset
+ * NB: we don't use the highest bit of *ppos because it's signed
+ */
+#define LNET_PROC_HOFF_BITS (LNET_LOFFT_BITS - \
+ LNET_PROC_CPT_BITS - \
+ LNET_PROC_VER_BITS - \
+ LNET_PROC_HASH_BITS - 1)
+/* bits for hash index + position */
+#define LNET_PROC_HPOS_BITS (LNET_PROC_HASH_BITS + LNET_PROC_HOFF_BITS)
+/* bits for peer hash table + hash version */
+#define LNET_PROC_VPOS_BITS (LNET_PROC_HPOS_BITS + LNET_PROC_VER_BITS)
+
+#define LNET_PROC_CPT_MASK ((1ULL << LNET_PROC_CPT_BITS) - 1)
+#define LNET_PROC_VER_MASK ((1ULL << LNET_PROC_VER_BITS) - 1)
+#define LNET_PROC_HASH_MASK ((1ULL << LNET_PROC_HASH_BITS) - 1)
+#define LNET_PROC_HOFF_MASK ((1ULL << LNET_PROC_HOFF_BITS) - 1)
+
+#define LNET_PROC_CPT_GET(pos) \
+ (int)(((pos) >> LNET_PROC_VPOS_BITS) & LNET_PROC_CPT_MASK)
+
+#define LNET_PROC_VER_GET(pos) \
+ (int)(((pos) >> LNET_PROC_HPOS_BITS) & LNET_PROC_VER_MASK)
+
+#define LNET_PROC_HASH_GET(pos) \
+ (int)(((pos) >> LNET_PROC_HOFF_BITS) & LNET_PROC_HASH_MASK)
+
+#define LNET_PROC_HOFF_GET(pos) \
+ (int)((pos) & LNET_PROC_HOFF_MASK)
+
+#define LNET_PROC_POS_MAKE(cpt, ver, hash, off) \
+ (((((loff_t)(cpt)) & LNET_PROC_CPT_MASK) << LNET_PROC_VPOS_BITS) | \
+ ((((loff_t)(ver)) & LNET_PROC_VER_MASK) << LNET_PROC_HPOS_BITS) | \
+ ((((loff_t)(hash)) & LNET_PROC_HASH_MASK) << LNET_PROC_HOFF_BITS) | \
+ ((off) & LNET_PROC_HOFF_MASK))
+
+#define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK))
+
+static int __proc_lnet_stats(void *data, int write,
+ loff_t pos, void *buffer, int nob)
+{
+ int rc;
+ lnet_counters_t *ctrs;
+ int len;
+ char *tmpstr;
+ const int tmpsiz = 256; /* 7 %u and 4 LPU64 */
+
+ if (write) {
+ lnet_counters_reset();
+ return 0;
+ }
+
+ /* read */
+
+ LIBCFS_ALLOC(ctrs, sizeof(*ctrs));
+ if (ctrs == NULL)
+ return -ENOMEM;
+
+ LIBCFS_ALLOC(tmpstr, tmpsiz);
+ if (tmpstr == NULL) {
+ LIBCFS_FREE(ctrs, sizeof(*ctrs));
+ return -ENOMEM;
+ }
+
+ lnet_counters_get(ctrs);
+
+ len = snprintf(tmpstr, tmpsiz,
+ "%u %u %u %u %u %u %u "LPU64" "LPU64" "
+ LPU64" "LPU64,
+ ctrs->msgs_alloc, ctrs->msgs_max,
+ ctrs->errors,
+ ctrs->send_count, ctrs->recv_count,
+ ctrs->route_count, ctrs->drop_count,
+ ctrs->send_length, ctrs->recv_length,
+ ctrs->route_length, ctrs->drop_length);
+
+ if (pos >= min_t(int, len, strlen(tmpstr)))
+ rc = 0;
+ else
+ rc = cfs_trace_copyout_string(buffer, nob,
+ tmpstr + pos, "\n");
+
+ LIBCFS_FREE(tmpstr, tmpsiz);
+ LIBCFS_FREE(ctrs, sizeof(*ctrs));
+ return rc;
+}
+
+DECLARE_PROC_HANDLER(proc_lnet_stats);
+
+int LL_PROC_PROTO(proc_lnet_routes)
+{
+ const int tmpsiz = 256;
+ char *tmpstr;
+ char *s;
+ int rc = 0;
+ int len;
+ int ver;
+ int off;
+
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ CLASSERT(sizeof(loff_t) >= 4);
+
+ off = LNET_PROC_HOFF_GET(*ppos);
+ ver = LNET_PROC_VER_GET(*ppos);
+
+ LASSERT (!write);
+
+ if (*lenp == 0)
+ return 0;
+
+ LIBCFS_ALLOC(tmpstr, tmpsiz);
+ if (tmpstr == NULL)
+ return -ENOMEM;
+
+ s = tmpstr; /* points to current position in tmpstr[] */
+
+ if (*ppos == 0) {
+ s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
+ the_lnet.ln_routing ? "enabled" : "disabled");
+ LASSERT (tmpstr + tmpsiz - s > 0);
+
+ s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %7s %s\n",
+ "net", "hops", "state", "router");
+ LASSERT (tmpstr + tmpsiz - s > 0);
+
+ lnet_net_lock(0);
+ ver = (unsigned int)the_lnet.ln_remote_nets_version;
+ lnet_net_unlock(0);
+ *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
+ } else {
+ struct list_head *n;
+ struct list_head *r;
+ lnet_route_t *route = NULL;
+ lnet_remotenet_t *rnet = NULL;
+ int skip = off - 1;
+ struct list_head *rn_list;
+ int i;
+
+ lnet_net_lock(0);
+
+ if (ver != LNET_PROC_VERSION(the_lnet.ln_remote_nets_version)) {
+ lnet_net_unlock(0);
+ LIBCFS_FREE(tmpstr, tmpsiz);
+ return -ESTALE;
+ }
+
+ for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && route == NULL;
+ i++) {
+ rn_list = &the_lnet.ln_remote_nets_hash[i];
+
+ n = rn_list->next;
+
+ while (n != rn_list && route == NULL) {
+ rnet = list_entry(n, lnet_remotenet_t,
+ lrn_list);
+
+ r = rnet->lrn_routes.next;
+
+ while (r != &rnet->lrn_routes) {
+ lnet_route_t *re =
+ list_entry(r, lnet_route_t,
+ lr_list);
+ if (skip == 0) {
+ route = re;
+ break;
+ }
+
+ skip--;
+ r = r->next;
+ }
+
+ n = n->next;
+ }
+ }
+
+ if (route != NULL) {
+ __u32 net = rnet->lrn_net;
+ unsigned int hops = route->lr_hops;
+ lnet_nid_t nid = route->lr_gateway->lp_nid;
+ int alive = route->lr_gateway->lp_alive;
+
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%-8s %4u %7s %s\n",
+ libcfs_net2str(net), hops,
+ alive ? "up" : "down",
+ libcfs_nid2str(nid));
+ LASSERT(tmpstr + tmpsiz - s > 0);
+ }
+
+ lnet_net_unlock(0);
+ }
+
+ len = s - tmpstr; /* how many bytes was written */
+
+ if (len > *lenp) { /* linux-supplied buffer is too small */
+ rc = -EINVAL;
+ } else if (len > 0) { /* wrote something */
+ if (copy_to_user(buffer, tmpstr, len))
+ rc = -EFAULT;
+ else {
+ off += 1;
+ *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
+ }
+ }
+
+ LIBCFS_FREE(tmpstr, tmpsiz);
+
+ if (rc == 0)
+ *lenp = len;
+
+ return rc;
+}
+
+int LL_PROC_PROTO(proc_lnet_routers)
+{
+ int rc = 0;
+ char *tmpstr;
+ char *s;
+ const int tmpsiz = 256;
+ int len;
+ int ver;
+ int off;
+
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ off = LNET_PROC_HOFF_GET(*ppos);
+ ver = LNET_PROC_VER_GET(*ppos);
+
+ LASSERT (!write);
+
+ if (*lenp == 0)
+ return 0;
+
+ LIBCFS_ALLOC(tmpstr, tmpsiz);
+ if (tmpstr == NULL)
+ return -ENOMEM;
+
+ s = tmpstr; /* points to current position in tmpstr[] */
+
+ if (*ppos == 0) {
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%-4s %7s %9s %6s %12s %9s %8s %7s %s\n",
+ "ref", "rtr_ref", "alive_cnt", "state",
+ "last_ping", "ping_sent", "deadline",
+ "down_ni", "router");
+ LASSERT(tmpstr + tmpsiz - s > 0);
+
+ lnet_net_lock(0);
+ ver = (unsigned int)the_lnet.ln_routers_version;
+ lnet_net_unlock(0);
+ *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
+ } else {
+ struct list_head *r;
+ struct lnet_peer *peer = NULL;
+ int skip = off - 1;
+
+ lnet_net_lock(0);
+
+ if (ver != LNET_PROC_VERSION(the_lnet.ln_routers_version)) {
+ lnet_net_unlock(0);
+
+ LIBCFS_FREE(tmpstr, tmpsiz);
+ return -ESTALE;
+ }
+
+ r = the_lnet.ln_routers.next;
+
+ while (r != &the_lnet.ln_routers) {
+ lnet_peer_t *lp = list_entry(r, lnet_peer_t,
+ lp_rtr_list);
+
+ if (skip == 0) {
+ peer = lp;
+ break;
+ }
+
+ skip--;
+ r = r->next;
+ }
+
+ if (peer != NULL) {
+ lnet_nid_t nid = peer->lp_nid;
+ cfs_time_t now = cfs_time_current();
+ cfs_time_t deadline = peer->lp_ping_deadline;
+ int nrefs = peer->lp_refcount;
+ int nrtrrefs = peer->lp_rtr_refcount;
+ int alive_cnt = peer->lp_alive_count;
+ int alive = peer->lp_alive;
+ int pingsent = !peer->lp_ping_notsent;
+ int last_ping = cfs_duration_sec(cfs_time_sub(now,
+ peer->lp_ping_timestamp));
+ int down_ni = 0;
+ lnet_route_t *rtr;
+
+ if ((peer->lp_ping_feats &
+ LNET_PING_FEAT_NI_STATUS) != 0) {
+ list_for_each_entry(rtr, &peer->lp_routes,
+ lr_gwlist) {
+ /* downis on any route should be the
+ * number of downis on the gateway */
+ if (rtr->lr_downis != 0) {
+ down_ni = rtr->lr_downis;
+ break;
+ }
+ }
+ }
+
+ if (deadline == 0)
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%-4d %7d %9d %6s %12d %9d %8s %7d %s\n",
+ nrefs, nrtrrefs, alive_cnt,
+ alive ? "up" : "down", last_ping,
+ pingsent, "NA", down_ni,
+ libcfs_nid2str(nid));
+ else
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%-4d %7d %9d %6s %12d %9d %8lu %7d %s\n",
+ nrefs, nrtrrefs, alive_cnt,
+ alive ? "up" : "down", last_ping,
+ pingsent,
+ cfs_duration_sec(cfs_time_sub(deadline, now)),
+ down_ni, libcfs_nid2str(nid));
+ LASSERT (tmpstr + tmpsiz - s > 0);
+ }
+
+ lnet_net_unlock(0);
+ }
+
+ len = s - tmpstr; /* how many bytes was written */
+
+ if (len > *lenp) { /* linux-supplied buffer is too small */
+ rc = -EINVAL;
+ } else if (len > 0) { /* wrote something */
+ if (copy_to_user(buffer, tmpstr, len))
+ rc = -EFAULT;
+ else {
+ off += 1;
+ *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
+ }
+ }
+
+ LIBCFS_FREE(tmpstr, tmpsiz);
+
+ if (rc == 0)
+ *lenp = len;
+
+ return rc;
+}
+
+int LL_PROC_PROTO(proc_lnet_peers)
+{
+ const int tmpsiz = 256;
+ struct lnet_peer_table *ptable;
+ char *tmpstr;
+ char *s;
+ int cpt = LNET_PROC_CPT_GET(*ppos);
+ int ver = LNET_PROC_VER_GET(*ppos);
+ int hash = LNET_PROC_HASH_GET(*ppos);
+ int hoff = LNET_PROC_HOFF_GET(*ppos);
+ int rc = 0;
+ int len;
+
+ CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
+ LASSERT(!write);
+
+ if (*lenp == 0)
+ return 0;
+
+ if (cpt >= LNET_CPT_NUMBER) {
+ *lenp = 0;
+ return 0;
+ }
+
+ LIBCFS_ALLOC(tmpstr, tmpsiz);
+ if (tmpstr == NULL)
+ return -ENOMEM;
+
+ s = tmpstr; /* points to current position in tmpstr[] */
+
+ if (*ppos == 0) {
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
+ "nid", "refs", "state", "last", "max",
+ "rtr", "min", "tx", "min", "queue");
+ LASSERT (tmpstr + tmpsiz - s > 0);
+
+ hoff++;
+ } else {
+ struct lnet_peer *peer;
+ struct list_head *p;
+ int skip;
+ again:
+ p = NULL;
+ peer = NULL;
+ skip = hoff - 1;
+
+ lnet_net_lock(cpt);
+ ptable = the_lnet.ln_peer_tables[cpt];
+ if (hoff == 1)
+ ver = LNET_PROC_VERSION(ptable->pt_version);
+
+ if (ver != LNET_PROC_VERSION(ptable->pt_version)) {
+ lnet_net_unlock(cpt);
+ LIBCFS_FREE(tmpstr, tmpsiz);
+ return -ESTALE;
+ }
+
+ while (hash < LNET_PEER_HASH_SIZE) {
+ if (p == NULL)
+ p = ptable->pt_hash[hash].next;
+
+ while (p != &ptable->pt_hash[hash]) {
+ lnet_peer_t *lp = list_entry(p, lnet_peer_t,
+ lp_hashlist);
+ if (skip == 0) {
+ peer = lp;
+
+ /* minor optimization: start from idx+1
+ * on next iteration if we've just
+ * drained lp_hashlist */
+ if (lp->lp_hashlist.next ==
+ &ptable->pt_hash[hash]) {
+ hoff = 1;
+ hash++;
+ } else {
+ hoff++;
+ }
+
+ break;
+ }
+
+ skip--;
+ p = lp->lp_hashlist.next;
+ }
+
+ if (peer != NULL)
+ break;
+
+ p = NULL;
+ hoff = 1;
+ hash++;
+ }
+
+ if (peer != NULL) {
+ lnet_nid_t nid = peer->lp_nid;
+ int nrefs = peer->lp_refcount;
+ int lastalive = -1;
+ char *aliveness = "NA";
+ int maxcr = peer->lp_ni->ni_peertxcredits;
+ int txcr = peer->lp_txcredits;
+ int mintxcr = peer->lp_mintxcredits;
+ int rtrcr = peer->lp_rtrcredits;
+ int minrtrcr = peer->lp_minrtrcredits;
+ int txqnob = peer->lp_txqnob;
+
+ if (lnet_isrouter(peer) ||
+ lnet_peer_aliveness_enabled(peer))
+ aliveness = peer->lp_alive ? "up" : "down";
+
+ if (lnet_peer_aliveness_enabled(peer)) {
+ cfs_time_t now = cfs_time_current();
+ cfs_duration_t delta;
+
+ delta = cfs_time_sub(now, peer->lp_last_alive);
+ lastalive = cfs_duration_sec(delta);
+
+ /* No need to mess up peers contents with
+ * arbitrarily long integers - it suffices to
+ * know that lastalive is more than 10000s old
+ */
+ if (lastalive >= 10000)
+ lastalive = 9999;
+ }
+
+ lnet_net_unlock(cpt);
+
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%-24s %4d %5s %5d %5d %5d %5d %5d %5d %d\n",
+ libcfs_nid2str(nid), nrefs, aliveness,
+ lastalive, maxcr, rtrcr, minrtrcr, txcr,
+ mintxcr, txqnob);
+ LASSERT (tmpstr + tmpsiz - s > 0);
+
+ } else { /* peer is NULL */
+ lnet_net_unlock(cpt);
+ }
+
+ if (hash == LNET_PEER_HASH_SIZE) {
+ cpt++;
+ hash = 0;
+ hoff = 1;
+ if (peer == NULL && cpt < LNET_CPT_NUMBER)
+ goto again;
+ }
+ }
+
+ len = s - tmpstr; /* how many bytes was written */
+
+ if (len > *lenp) { /* linux-supplied buffer is too small */
+ rc = -EINVAL;
+ } else if (len > 0) { /* wrote something */
+ if (copy_to_user(buffer, tmpstr, len))
+ rc = -EFAULT;
+ else
+ *ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
+ }
+
+ LIBCFS_FREE(tmpstr, tmpsiz);
+
+ if (rc == 0)
+ *lenp = len;
+
+ return rc;
+}
+
+static int __proc_lnet_buffers(void *data, int write,
+ loff_t pos, void *buffer, int nob)
+{
+ char *s;
+ char *tmpstr;
+ int tmpsiz;
+ int idx;
+ int len;
+ int rc;
+ int i;
+
+ LASSERT(!write);
+
+ /* (4 %d) * 4 * LNET_CPT_NUMBER */
+ tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER;
+ LIBCFS_ALLOC(tmpstr, tmpsiz);
+ if (tmpstr == NULL)
+ return -ENOMEM;
+
+ s = tmpstr; /* points to current position in tmpstr[] */
+
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%5s %5s %7s %7s\n",
+ "pages", "count", "credits", "min");
+ LASSERT (tmpstr + tmpsiz - s > 0);
+
+ if (the_lnet.ln_rtrpools == NULL)
+ goto out; /* I'm not a router */
+
+ for (idx = 0; idx < LNET_NRBPOOLS; idx++) {
+ lnet_rtrbufpool_t *rbp;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%5d %5d %7d %7d\n",
+ rbp[idx].rbp_npages,
+ rbp[idx].rbp_nbuffers,
+ rbp[idx].rbp_credits,
+ rbp[idx].rbp_mincredits);
+ LASSERT(tmpstr + tmpsiz - s > 0);
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+ }
+
+ out:
+ len = s - tmpstr;
+
+ if (pos >= min_t(int, len, strlen(tmpstr)))
+ rc = 0;
+ else
+ rc = cfs_trace_copyout_string(buffer, nob,
+ tmpstr + pos, NULL);
+
+ LIBCFS_FREE(tmpstr, tmpsiz);
+ return rc;
+}
+
+DECLARE_PROC_HANDLER(proc_lnet_buffers);
+
+int LL_PROC_PROTO(proc_lnet_nis)
+{
+ int tmpsiz = 128 * LNET_CPT_NUMBER;
+ int rc = 0;
+ char *tmpstr;
+ char *s;
+ int len;
+
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ LASSERT (!write);
+
+ if (*lenp == 0)
+ return 0;
+
+ LIBCFS_ALLOC(tmpstr, tmpsiz);
+ if (tmpstr == NULL)
+ return -ENOMEM;
+
+ s = tmpstr; /* points to current position in tmpstr[] */
+
+ if (*ppos == 0) {
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
+ "nid", "status", "alive", "refs", "peer",
+ "rtr", "max", "tx", "min");
+ LASSERT (tmpstr + tmpsiz - s > 0);
+ } else {
+ struct list_head *n;
+ lnet_ni_t *ni = NULL;
+ int skip = *ppos - 1;
+
+ lnet_net_lock(0);
+
+ n = the_lnet.ln_nis.next;
+
+ while (n != &the_lnet.ln_nis) {
+ lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
+
+ if (skip == 0) {
+ ni = a_ni;
+ break;
+ }
+
+ skip--;
+ n = n->next;
+ }
+
+ if (ni != NULL) {
+ struct lnet_tx_queue *tq;
+ char *stat;
+ long now = cfs_time_current_sec();
+ int last_alive = -1;
+ int i;
+ int j;
+
+ if (the_lnet.ln_routing)
+ last_alive = now - ni->ni_last_alive;
+
+ /* @lo forever alive */
+ if (ni->ni_lnd->lnd_type == LOLND)
+ last_alive = 0;
+
+ lnet_ni_lock(ni);
+ LASSERT(ni->ni_status != NULL);
+ stat = (ni->ni_status->ns_status ==
+ LNET_NI_STATUS_UP) ? "up" : "down";
+ lnet_ni_unlock(ni);
+
+ /* we actually output credits information for
+ * TX queue of each partition */
+ cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
+ for (j = 0; ni->ni_cpts != NULL &&
+ j < ni->ni_ncpts; j++) {
+ if (i == ni->ni_cpts[j])
+ break;
+ }
+
+ if (j == ni->ni_ncpts)
+ continue;
+
+ if (i != 0)
+ lnet_net_lock(i);
+
+ s += snprintf(s, tmpstr + tmpsiz - s,
+ "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n",
+ libcfs_nid2str(ni->ni_nid), stat,
+ last_alive, *ni->ni_refs[i],
+ ni->ni_peertxcredits,
+ ni->ni_peerrtrcredits,
+ tq->tq_credits_max,
+ tq->tq_credits, tq->tq_credits_min);
+ if (i != 0)
+ lnet_net_unlock(i);
+ }
+ LASSERT(tmpstr + tmpsiz - s > 0);
+ }
+
+ lnet_net_unlock(0);
+ }
+
+ len = s - tmpstr; /* how many bytes was written */
+
+ if (len > *lenp) { /* linux-supplied buffer is too small */
+ rc = -EINVAL;
+ } else if (len > 0) { /* wrote something */
+ if (copy_to_user(buffer, tmpstr, len))
+ rc = -EFAULT;
+ else
+ *ppos += 1;
+ }
+
+ LIBCFS_FREE(tmpstr, tmpsiz);
+
+ if (rc == 0)
+ *lenp = len;
+
+ return rc;
+}
+
+struct lnet_portal_rotors {
+ int pr_value;
+ const char *pr_name;
+ const char *pr_desc;
+};
+
+static struct lnet_portal_rotors portal_rotors[] = {
+ {
+ .pr_value = LNET_PTL_ROTOR_OFF,
+ .pr_name = "OFF",
+ .pr_desc = "Turn off message rotor for wildcard portals"
+ },
+ {
+ .pr_value = LNET_PTL_ROTOR_ON,
+ .pr_name = "ON",
+ .pr_desc = "round-robin dispatch all PUT messages for "
+ "wildcard portals"
+ },
+ {
+ .pr_value = LNET_PTL_ROTOR_RR_RT,
+ .pr_name = "RR_RT",
+ .pr_desc = "round-robin dispatch routed PUT message for "
+ "wildcard portals"
+ },
+ {
+ .pr_value = LNET_PTL_ROTOR_HASH_RT,
+ .pr_name = "HASH_RT",
+ .pr_desc = "dispatch routed PUT message by hashing source "
+ "NID for wildcard portals"
+ },
+ {
+ .pr_value = -1,
+ .pr_name = NULL,
+ .pr_desc = NULL
+ },
+};
+
+extern int portal_rotor;
+
+static int __proc_lnet_portal_rotor(void *data, int write,
+ loff_t pos, void *buffer, int nob)
+{
+ const int buf_len = 128;
+ char *buf;
+ char *tmp;
+ int rc;
+ int i;
+
+ LIBCFS_ALLOC(buf, buf_len);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!write) {
+ lnet_res_lock(0);
+
+ for (i = 0; portal_rotors[i].pr_value >= 0; i++) {
+ if (portal_rotors[i].pr_value == portal_rotor)
+ break;
+ }
+
+ LASSERT(portal_rotors[i].pr_value == portal_rotor);
+ lnet_res_unlock(0);
+
+ rc = snprintf(buf, buf_len,
+ "{\n\tportals: all\n"
+ "\trotor: %s\n\tdescription: %s\n}",
+ portal_rotors[i].pr_name,
+ portal_rotors[i].pr_desc);
+
+ if (pos >= min_t(int, rc, buf_len)) {
+ rc = 0;
+ } else {
+ rc = cfs_trace_copyout_string(buffer, nob,
+ buf + pos, "\n");
+ }
+ goto out;
+ }
+
+ rc = cfs_trace_copyin_string(buf, buf_len, buffer, nob);
+ if (rc < 0)
+ goto out;
+
+ tmp = cfs_trimwhite(buf);
+
+ rc = -EINVAL;
+ lnet_res_lock(0);
+ for (i = 0; portal_rotors[i].pr_name != NULL; i++) {
+ if (cfs_strncasecmp(portal_rotors[i].pr_name, tmp,
+ strlen(portal_rotors[i].pr_name)) == 0) {
+ portal_rotor = portal_rotors[i].pr_value;
+ rc = 0;
+ break;
+ }
+ }
+ lnet_res_unlock(0);
+out:
+ LIBCFS_FREE(buf, buf_len);
+ return rc;
+}
+DECLARE_PROC_HANDLER(proc_lnet_portal_rotor);
+
+static ctl_table_t lnet_table[] = {
+ /*
+ * NB No .strategy entries have been provided since sysctl(8) prefers
+ * to go via /proc for portability.
+ */
+ {
+ INIT_CTL_NAME(PSDEV_LNET_STATS)
+ .procname = "stats",
+ .mode = 0644,
+ .proc_handler = &proc_lnet_stats,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_ROUTES)
+ .procname = "routes",
+ .mode = 0444,
+ .proc_handler = &proc_lnet_routes,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_ROUTERS)
+ .procname = "routers",
+ .mode = 0444,
+ .proc_handler = &proc_lnet_routers,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_PEERS)
+ .procname = "peers",
+ .mode = 0444,
+ .proc_handler = &proc_lnet_peers,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_PEERS)
+ .procname = "buffers",
+ .mode = 0444,
+ .proc_handler = &proc_lnet_buffers,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_NIS)
+ .procname = "nis",
+ .mode = 0444,
+ .proc_handler = &proc_lnet_nis,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_PTL_ROTOR)
+ .procname = "portal_rotor",
+ .mode = 0644,
+ .proc_handler = &proc_lnet_portal_rotor,
+ },
+ {
+ INIT_CTL_NAME(0)
+ }
+};
+
+static ctl_table_t top_table[] = {
+ {
+ INIT_CTL_NAME(CTL_LNET)
+ .procname = "lnet",
+ .mode = 0555,
+ .data = NULL,
+ .maxlen = 0,
+ .child = lnet_table,
+ },
+ {
+ INIT_CTL_NAME(0)
+ }
+};
+
+void
+lnet_proc_init(void)
+{
+#ifdef CONFIG_SYSCTL
+ if (lnet_table_header == NULL)
+ lnet_table_header = register_sysctl_table(top_table);
+#endif
+}
+
+void
+lnet_proc_fini(void)
+{
+#ifdef CONFIG_SYSCTL
+ if (lnet_table_header != NULL)
+ unregister_sysctl_table(lnet_table_header);
+
+ lnet_table_header = NULL;
+#endif
+}
+
+#else
+
+void
+lnet_proc_init(void)
+{
+}
+
+void
+lnet_proc_fini(void)
+{
+}
+
+#endif
diff --git a/drivers/staging/lustre/lnet/selftest/Makefile b/drivers/staging/lustre/lnet/selftest/Makefile
new file mode 100644
index 0000000..1e40aee
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_LNET_SELFTEST) := lnet_selftest.o
+
+lnet_selftest-y := console.o conrpc.o conctl.o framework.o timer.o rpc.o \
+ module.o ping_test.o brw_test.o
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
new file mode 100644
index 0000000..ef5064e
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -0,0 +1,499 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/brw_test.c
+ *
+ * Author: Isaac Huang <isaac@clusterfs.com>
+ */
+
+#include "selftest.h"
+
+static int brw_srv_workitems = SFW_TEST_WI_MAX;
+CFS_MODULE_PARM(brw_srv_workitems, "i", int, 0644, "# BRW server workitems");
+
+static int brw_inject_errors;
+CFS_MODULE_PARM(brw_inject_errors, "i", int, 0644,
+ "# data errors to inject randomly, zero by default");
+
+static void
+brw_client_fini (sfw_test_instance_t *tsi)
+{
+ srpc_bulk_t *bulk;
+ sfw_test_unit_t *tsu;
+
+ LASSERT (tsi->tsi_is_client);
+
+ list_for_each_entry (tsu, &tsi->tsi_units, tsu_list) {
+ bulk = tsu->tsu_private;
+ if (bulk == NULL) continue;
+
+ srpc_free_bulk(bulk);
+ tsu->tsu_private = NULL;
+ }
+}
+
+int
+brw_client_init (sfw_test_instance_t *tsi)
+{
+ sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ int flags;
+ int npg;
+ int len;
+ int opc;
+ srpc_bulk_t *bulk;
+ sfw_test_unit_t *tsu;
+
+ LASSERT(sn != NULL);
+ LASSERT(tsi->tsi_is_client);
+
+ if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+ test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
+
+ opc = breq->blk_opc;
+ flags = breq->blk_flags;
+ npg = breq->blk_npg;
+ /* NB: this is not going to work for variable page size,
+ * but we have to keep it for compatibility */
+ len = npg * PAGE_CACHE_SIZE;
+
+ } else {
+ test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
+
+ /* I should never get this step if it's unknown feature
+ * because make_session will reject unknown feature */
+ LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+
+ opc = breq->blk_opc;
+ flags = breq->blk_flags;
+ len = breq->blk_len;
+ npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ }
+
+ if (npg > LNET_MAX_IOV || npg <= 0)
+ return -EINVAL;
+
+ if (opc != LST_BRW_READ && opc != LST_BRW_WRITE)
+ return -EINVAL;
+
+ if (flags != LST_BRW_CHECK_NONE &&
+ flags != LST_BRW_CHECK_FULL && flags != LST_BRW_CHECK_SIMPLE)
+ return -EINVAL;
+
+ list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
+ bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
+ npg, len, opc == LST_BRW_READ);
+ if (bulk == NULL) {
+ brw_client_fini(tsi);
+ return -ENOMEM;
+ }
+
+ tsu->tsu_private = bulk;
+ }
+
+ return 0;
+}
+
+#define BRW_POISON 0xbeefbeefbeefbeefULL
+#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
+#define BRW_MSIZE sizeof(__u64)
+
+int
+brw_inject_one_error (void)
+{
+ struct timeval tv;
+
+ if (brw_inject_errors <= 0) return 0;
+
+ do_gettimeofday(&tv);
+
+ if ((tv.tv_usec & 1) == 0) return 0;
+
+ return brw_inject_errors--;
+}
+
+void
+brw_fill_page (struct page *pg, int pattern, __u64 magic)
+{
+ char *addr = page_address(pg);
+ int i;
+
+ LASSERT (addr != NULL);
+
+ if (pattern == LST_BRW_CHECK_NONE) return;
+
+ if (magic == BRW_MAGIC)
+ magic += brw_inject_one_error();
+
+ if (pattern == LST_BRW_CHECK_SIMPLE) {
+ memcpy(addr, &magic, BRW_MSIZE);
+ addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+ memcpy(addr, &magic, BRW_MSIZE);
+ return;
+ }
+
+ if (pattern == LST_BRW_CHECK_FULL) {
+ for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
+ memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
+ return;
+ }
+
+ LBUG ();
+ return;
+}
+
+int
+brw_check_page (struct page *pg, int pattern, __u64 magic)
+{
+ char *addr = page_address(pg);
+ __u64 data = 0; /* make compiler happy */
+ int i;
+
+ LASSERT (addr != NULL);
+
+ if (pattern == LST_BRW_CHECK_NONE)
+ return 0;
+
+ if (pattern == LST_BRW_CHECK_SIMPLE) {
+ data = *((__u64 *) addr);
+ if (data != magic) goto bad_data;
+
+ addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+ data = *((__u64 *) addr);
+ if (data != magic) goto bad_data;
+
+ return 0;
+ }
+
+ if (pattern == LST_BRW_CHECK_FULL) {
+ for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
+ data = *(((__u64 *) addr) + i);
+ if (data != magic) goto bad_data;
+ }
+
+ return 0;
+ }
+
+ LBUG ();
+
+bad_data:
+ CERROR ("Bad data in page %p: "LPX64", "LPX64" expected\n",
+ pg, data, magic);
+ return 1;
+}
+
+void
+brw_fill_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
+{
+ int i;
+ struct page *pg;
+
+ for (i = 0; i < bk->bk_niov; i++) {
+ pg = bk->bk_iovs[i].kiov_page;
+ brw_fill_page(pg, pattern, magic);
+ }
+}
+
+int
+brw_check_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
+{
+ int i;
+ struct page *pg;
+
+ for (i = 0; i < bk->bk_niov; i++) {
+ pg = bk->bk_iovs[i].kiov_page;
+ if (brw_check_page(pg, pattern, magic) != 0) {
+ CERROR ("Bulk page %p (%d/%d) is corrupted!\n",
+ pg, i, bk->bk_niov);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+brw_client_prep_rpc (sfw_test_unit_t *tsu,
+ lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
+{
+ srpc_bulk_t *bulk = tsu->tsu_private;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
+ sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ srpc_client_rpc_t *rpc;
+ srpc_brw_reqst_t *req;
+ int flags;
+ int npg;
+ int len;
+ int opc;
+ int rc;
+
+ LASSERT(sn != NULL);
+ LASSERT(bulk != NULL);
+
+ if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+ test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
+
+ opc = breq->blk_opc;
+ flags = breq->blk_flags;
+ npg = breq->blk_npg;
+ len = npg * PAGE_CACHE_SIZE;
+
+ } else {
+ test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
+
+ /* I should never get this step if it's unknown feature
+ * because make_session will reject unknown feature */
+ LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+
+ opc = breq->blk_opc;
+ flags = breq->blk_flags;
+ len = breq->blk_len;
+ npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ }
+
+ rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
+ if (rc != 0)
+ return rc;
+
+ memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg]));
+ if (opc == LST_BRW_WRITE)
+ brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC);
+ else
+ brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_POISON);
+
+ req = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
+ req->brw_flags = flags;
+ req->brw_rw = opc;
+ req->brw_len = len;
+
+ *rpcpp = rpc;
+ return 0;
+}
+
+static void
+brw_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
+{
+ __u64 magic = BRW_MAGIC;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
+ sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ srpc_msg_t *msg = &rpc->crpc_replymsg;
+ srpc_brw_reply_t *reply = &msg->msg_body.brw_reply;
+ srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
+
+ LASSERT (sn != NULL);
+
+ if (rpc->crpc_status != 0) {
+ CERROR ("BRW RPC to %s failed with %d\n",
+ libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
+ if (!tsi->tsi_stopping) /* rpc could have been aborted */
+ atomic_inc(&sn->sn_brw_errors);
+ goto out;
+ }
+
+ if (msg->msg_magic != SRPC_MSG_MAGIC) {
+ __swab64s(&magic);
+ __swab32s(&reply->brw_status);
+ }
+
+ CDEBUG (reply->brw_status ? D_WARNING : D_NET,
+ "BRW RPC to %s finished with brw_status: %d\n",
+ libcfs_id2str(rpc->crpc_dest), reply->brw_status);
+
+ if (reply->brw_status != 0) {
+ atomic_inc(&sn->sn_brw_errors);
+ rpc->crpc_status = -(int)reply->brw_status;
+ goto out;
+ }
+
+ if (reqst->brw_rw == LST_BRW_WRITE) goto out;
+
+ if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
+ CERROR ("Bulk data from %s is corrupted!\n",
+ libcfs_id2str(rpc->crpc_dest));
+ atomic_inc(&sn->sn_brw_errors);
+ rpc->crpc_status = -EBADMSG;
+ }
+
+out:
+ return;
+}
+
+void
+brw_server_rpc_done (srpc_server_rpc_t *rpc)
+{
+ srpc_bulk_t *blk = rpc->srpc_bulk;
+
+ if (blk == NULL) return;
+
+ if (rpc->srpc_status != 0)
+ CERROR ("Bulk transfer %s %s has failed: %d\n",
+ blk->bk_sink ? "from" : "to",
+ libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
+ else
+ CDEBUG (D_NET, "Transferred %d pages bulk data %s %s\n",
+ blk->bk_niov, blk->bk_sink ? "from" : "to",
+ libcfs_id2str(rpc->srpc_peer));
+
+ sfw_free_pages(rpc);
+}
+
+int
+brw_bulk_ready (srpc_server_rpc_t *rpc, int status)
+{
+ __u64 magic = BRW_MAGIC;
+ srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
+ srpc_brw_reqst_t *reqst;
+ srpc_msg_t *reqstmsg;
+
+ LASSERT (rpc->srpc_bulk != NULL);
+ LASSERT (rpc->srpc_reqstbuf != NULL);
+
+ reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
+ reqst = &reqstmsg->msg_body.brw_reqst;
+
+ if (status != 0) {
+ CERROR ("BRW bulk %s failed for RPC from %s: %d\n",
+ reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
+ libcfs_id2str(rpc->srpc_peer), status);
+ return -EIO;
+ }
+
+ if (reqst->brw_rw == LST_BRW_READ)
+ return 0;
+
+ if (reqstmsg->msg_magic != SRPC_MSG_MAGIC)
+ __swab64s(&magic);
+
+ if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic) != 0) {
+ CERROR ("Bulk data from %s is corrupted!\n",
+ libcfs_id2str(rpc->srpc_peer));
+ reply->brw_status = EBADMSG;
+ }
+
+ return 0;
+}
+
+int
+brw_server_handle(struct srpc_server_rpc *rpc)
+{
+ struct srpc_service *sv = rpc->srpc_scd->scd_svc;
+ srpc_msg_t *replymsg = &rpc->srpc_replymsg;
+ srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
+ srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply;
+ srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst;
+ int npg;
+ int rc;
+
+ LASSERT (sv->sv_id == SRPC_SERVICE_BRW);
+
+ if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) {
+ LASSERT (reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC));
+
+ __swab32s(&reqst->brw_rw);
+ __swab32s(&reqst->brw_len);
+ __swab32s(&reqst->brw_flags);
+ __swab64s(&reqst->brw_rpyid);
+ __swab64s(&reqst->brw_bulkid);
+ }
+ LASSERT (reqstmsg->msg_type == (__u32)srpc_service2request(sv->sv_id));
+
+ reply->brw_status = 0;
+ rpc->srpc_done = brw_server_rpc_done;
+
+ if ((reqst->brw_rw != LST_BRW_READ && reqst->brw_rw != LST_BRW_WRITE) ||
+ (reqst->brw_flags != LST_BRW_CHECK_NONE &&
+ reqst->brw_flags != LST_BRW_CHECK_FULL &&
+ reqst->brw_flags != LST_BRW_CHECK_SIMPLE)) {
+ reply->brw_status = EINVAL;
+ return 0;
+ }
+
+ if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ replymsg->msg_ses_feats = LST_FEATS_MASK;
+ reply->brw_status = EPROTO;
+ return 0;
+ }
+
+ if ((reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
+ /* compat with old version */
+ if ((reqst->brw_len & ~CFS_PAGE_MASK) != 0) {
+ reply->brw_status = EINVAL;
+ return 0;
+ }
+ npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
+
+ } else {
+ npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ }
+
+ replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
+
+ if (reqst->brw_len == 0 || npg > LNET_MAX_IOV) {
+ reply->brw_status = EINVAL;
+ return 0;
+ }
+
+ rc = sfw_alloc_pages(rpc, rpc->srpc_scd->scd_cpt, npg,
+ reqst->brw_len,
+ reqst->brw_rw == LST_BRW_WRITE);
+ if (rc != 0)
+ return rc;
+
+ if (reqst->brw_rw == LST_BRW_READ)
+ brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_MAGIC);
+ else
+ brw_fill_bulk(rpc->srpc_bulk, reqst->brw_flags, BRW_POISON);
+
+ return 0;
+}
+
+sfw_test_client_ops_t brw_test_client;
+void brw_init_test_client(void)
+{
+ brw_test_client.tso_init = brw_client_init;
+ brw_test_client.tso_fini = brw_client_fini;
+ brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
+ brw_test_client.tso_done_rpc = brw_client_done_rpc;
+};
+
+srpc_service_t brw_test_service;
+void brw_init_test_service(void)
+{
+
+ brw_test_service.sv_id = SRPC_SERVICE_BRW;
+ brw_test_service.sv_name = "brw_test";
+ brw_test_service.sv_handler = brw_server_handle;
+ brw_test_service.sv_bulk_ready = brw_bulk_ready;
+ brw_test_service.sv_wi_total = brw_srv_workitems;
+}
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
new file mode 100644
index 0000000..bce3d3b
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -0,0 +1,931 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/conctl.c
+ *
+ * IOC handle in kernel
+ *
+ * Author: Liang Zhen <liangzhen@clusterfs.com>
+ */
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lib-lnet.h>
+#include <linux/lnet/lnetst.h>
+#include "console.h"
+
+int
+lst_session_new_ioctl(lstio_session_new_args_t *args)
+{
+ char *name;
+ int rc;
+
+ if (args->lstio_ses_idp == NULL || /* address for output sid */
+ args->lstio_ses_key == 0 || /* no key is specified */
+ args->lstio_ses_namep == NULL || /* session name */
+ args->lstio_ses_nmlen <= 0 ||
+ args->lstio_ses_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_ses_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_ses_namep,
+ args->lstio_ses_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
+ return -EFAULT;
+ }
+
+ name[args->lstio_ses_nmlen] = 0;
+
+ rc = lstcon_session_new(name,
+ args->lstio_ses_key,
+ args->lstio_ses_feats,
+ args->lstio_ses_force,
+ args->lstio_ses_timeout,
+ args->lstio_ses_idp);
+
+ LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
+ return rc;
+}
+
+int
+lst_session_end_ioctl(lstio_session_end_args_t *args)
+{
+ if (args->lstio_ses_key != console_session.ses_key)
+ return -EACCES;
+
+ return lstcon_session_end();
+}
+
+int
+lst_session_info_ioctl(lstio_session_info_args_t *args)
+{
+ /* no checking of key */
+
+ if (args->lstio_ses_idp == NULL || /* address for ouput sid */
+ args->lstio_ses_keyp == NULL || /* address for ouput key */
+ args->lstio_ses_featp == NULL || /* address for ouput features */
+ args->lstio_ses_ndinfo == NULL || /* address for output ndinfo */
+ args->lstio_ses_namep == NULL || /* address for ouput name */
+ args->lstio_ses_nmlen <= 0 ||
+ args->lstio_ses_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ return lstcon_session_info(args->lstio_ses_idp,
+ args->lstio_ses_keyp,
+ args->lstio_ses_featp,
+ args->lstio_ses_ndinfo,
+ args->lstio_ses_namep,
+ args->lstio_ses_nmlen);
+}
+
+int
+lst_debug_ioctl(lstio_debug_args_t *args)
+{
+ char *name = NULL;
+ int client = 1;
+ int rc;
+
+ if (args->lstio_dbg_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_dbg_resultp == NULL)
+ return -EINVAL;
+
+ if (args->lstio_dbg_namep != NULL && /* name of batch/group */
+ (args->lstio_dbg_nmlen <= 0 ||
+ args->lstio_dbg_nmlen > LST_NAME_SIZE))
+ return -EINVAL;
+
+ if (args->lstio_dbg_namep != NULL) {
+ LIBCFS_ALLOC(name, args->lstio_dbg_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name, args->lstio_dbg_namep,
+ args->lstio_dbg_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
+
+ return -EFAULT;
+ }
+
+ name[args->lstio_dbg_nmlen] = 0;
+ }
+
+ rc = -EINVAL;
+
+ switch (args->lstio_dbg_type) {
+ case LST_OPC_SESSION:
+ rc = lstcon_session_debug(args->lstio_dbg_timeout,
+ args->lstio_dbg_resultp);
+ break;
+
+ case LST_OPC_BATCHSRV:
+ client = 0;
+ case LST_OPC_BATCHCLI:
+ if (name == NULL)
+ goto out;
+
+ rc = lstcon_batch_debug(args->lstio_dbg_timeout,
+ name, client, args->lstio_dbg_resultp);
+ break;
+
+ case LST_OPC_GROUP:
+ if (name == NULL)
+ goto out;
+
+ rc = lstcon_group_debug(args->lstio_dbg_timeout,
+ name, args->lstio_dbg_resultp);
+ break;
+
+ case LST_OPC_NODES:
+ if (args->lstio_dbg_count <= 0 ||
+ args->lstio_dbg_idsp == NULL)
+ goto out;
+
+ rc = lstcon_nodes_debug(args->lstio_dbg_timeout,
+ args->lstio_dbg_count,
+ args->lstio_dbg_idsp,
+ args->lstio_dbg_resultp);
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ if (name != NULL)
+ LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
+
+ return rc;
+}
+
+int
+lst_group_add_ioctl(lstio_group_add_args_t *args)
+{
+ char *name;
+ int rc;
+
+ if (args->lstio_grp_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_grp_namep == NULL||
+ args->lstio_grp_nmlen <= 0 ||
+ args->lstio_grp_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_grp_nmlen);
+ return -EFAULT;
+ }
+
+ name[args->lstio_grp_nmlen] = 0;
+
+ rc = lstcon_group_add(name);
+
+ LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+
+ return rc;
+}
+
+int
+lst_group_del_ioctl(lstio_group_del_args_t *args)
+{
+ int rc;
+ char *name;
+
+ if (args->lstio_grp_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_grp_namep == NULL ||
+ args->lstio_grp_nmlen <= 0 ||
+ args->lstio_grp_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+ return -EFAULT;
+ }
+
+ name[args->lstio_grp_nmlen] = 0;
+
+ rc = lstcon_group_del(name);
+
+ LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+
+ return rc;
+}
+
+int
+lst_group_update_ioctl(lstio_group_update_args_t *args)
+{
+ int rc;
+ char *name;
+
+ if (args->lstio_grp_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_grp_resultp == NULL ||
+ args->lstio_grp_namep == NULL ||
+ args->lstio_grp_nmlen <= 0 ||
+ args->lstio_grp_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+ return -EFAULT;
+ }
+
+ name[args->lstio_grp_nmlen] = 0;
+
+ switch (args->lstio_grp_opc) {
+ case LST_GROUP_CLEAN:
+ rc = lstcon_group_clean(name, args->lstio_grp_args);
+ break;
+
+ case LST_GROUP_REFRESH:
+ rc = lstcon_group_refresh(name, args->lstio_grp_resultp);
+ break;
+
+ case LST_GROUP_RMND:
+ if (args->lstio_grp_count <= 0 ||
+ args->lstio_grp_idsp == NULL) {
+ rc = -EINVAL;
+ break;
+ }
+ rc = lstcon_nodes_remove(name, args->lstio_grp_count,
+ args->lstio_grp_idsp,
+ args->lstio_grp_resultp);
+ break;
+
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+
+ return rc;
+}
+
+int
+lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
+{
+ unsigned feats;
+ int rc;
+ char *name;
+
+ if (args->lstio_grp_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_grp_idsp == NULL || /* array of ids */
+ args->lstio_grp_count <= 0 ||
+ args->lstio_grp_resultp == NULL ||
+ args->lstio_grp_featp == NULL ||
+ args->lstio_grp_namep == NULL ||
+ args->lstio_grp_nmlen <= 0 ||
+ args->lstio_grp_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+
+ return -EFAULT;
+ }
+
+ name[args->lstio_grp_nmlen] = 0;
+
+ rc = lstcon_nodes_add(name, args->lstio_grp_count,
+ args->lstio_grp_idsp, &feats,
+ args->lstio_grp_resultp);
+
+ LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+ if (rc == 0 &&
+ copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int
+lst_group_list_ioctl(lstio_group_list_args_t *args)
+{
+ if (args->lstio_grp_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_grp_idx < 0 ||
+ args->lstio_grp_namep == NULL ||
+ args->lstio_grp_nmlen <= 0 ||
+ args->lstio_grp_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ return lstcon_group_list(args->lstio_grp_idx,
+ args->lstio_grp_nmlen,
+ args->lstio_grp_namep);
+}
+
+int
+lst_group_info_ioctl(lstio_group_info_args_t *args)
+{
+ char *name;
+ int ndent;
+ int index;
+ int rc;
+
+ if (args->lstio_grp_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_grp_namep == NULL ||
+ args->lstio_grp_nmlen <= 0 ||
+ args->lstio_grp_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ if (args->lstio_grp_entp == NULL && /* output: group entry */
+ args->lstio_grp_dentsp == NULL) /* output: node entry */
+ return -EINVAL;
+
+ if (args->lstio_grp_dentsp != NULL) { /* have node entry */
+ if (args->lstio_grp_idxp == NULL || /* node index */
+ args->lstio_grp_ndentp == NULL) /* # of node entry */
+ return -EINVAL;
+
+ if (copy_from_user(&ndent, args->lstio_grp_ndentp,
+ sizeof(ndent)) ||
+ copy_from_user(&index, args->lstio_grp_idxp,
+ sizeof(index)))
+ return -EFAULT;
+
+ if (ndent <= 0 || index < 0)
+ return -EINVAL;
+ }
+
+ LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+ return -EFAULT;
+ }
+
+ name[args->lstio_grp_nmlen] = 0;
+
+ rc = lstcon_group_info(name, args->lstio_grp_entp,
+ &index, &ndent, args->lstio_grp_dentsp);
+
+ LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
+
+ if (rc != 0)
+ return rc;
+
+ if (args->lstio_grp_dentsp != NULL &&
+ (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
+ copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
+ rc = -EFAULT;
+
+ return 0;
+}
+
+int
+lst_batch_add_ioctl(lstio_batch_add_args_t *args)
+{
+ int rc;
+ char *name;
+
+ if (args->lstio_bat_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_bat_namep == NULL ||
+ args->lstio_bat_nmlen <= 0 ||
+ args->lstio_bat_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
+
+ name[args->lstio_bat_nmlen] = 0;
+
+ rc = lstcon_batch_add(name);
+
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+
+ return rc;
+}
+
+int
+lst_batch_run_ioctl(lstio_batch_run_args_t *args)
+{
+ int rc;
+ char *name;
+
+ if (args->lstio_bat_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_bat_namep == NULL ||
+ args->lstio_bat_nmlen <= 0 ||
+ args->lstio_bat_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
+
+ name[args->lstio_bat_nmlen] = 0;
+
+ rc = lstcon_batch_run(name, args->lstio_bat_timeout,
+ args->lstio_bat_resultp);
+
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+
+ return rc;
+}
+
+int
+lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
+{
+ int rc;
+ char *name;
+
+ if (args->lstio_bat_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_bat_resultp == NULL ||
+ args->lstio_bat_namep == NULL ||
+ args->lstio_bat_nmlen <= 0 ||
+ args->lstio_bat_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
+
+ name[args->lstio_bat_nmlen] = 0;
+
+ rc = lstcon_batch_stop(name, args->lstio_bat_force,
+ args->lstio_bat_resultp);
+
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+
+ return rc;
+}
+
+int
+lst_batch_query_ioctl(lstio_batch_query_args_t *args)
+{
+ char *name;
+ int rc;
+
+ if (args->lstio_bat_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_bat_resultp == NULL ||
+ args->lstio_bat_namep == NULL ||
+ args->lstio_bat_nmlen <= 0 ||
+ args->lstio_bat_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ if (args->lstio_bat_testidx < 0)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
+
+ name[args->lstio_bat_nmlen] = 0;
+
+ rc = lstcon_test_batch_query(name,
+ args->lstio_bat_testidx,
+ args->lstio_bat_client,
+ args->lstio_bat_timeout,
+ args->lstio_bat_resultp);
+
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+
+ return rc;
+}
+
+int
+lst_batch_list_ioctl(lstio_batch_list_args_t *args)
+{
+ if (args->lstio_bat_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_bat_idx < 0 ||
+ args->lstio_bat_namep == NULL ||
+ args->lstio_bat_nmlen <= 0 ||
+ args->lstio_bat_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ return lstcon_batch_list(args->lstio_bat_idx,
+ args->lstio_bat_nmlen,
+ args->lstio_bat_namep);
+}
+
+int
+lst_batch_info_ioctl(lstio_batch_info_args_t *args)
+{
+ char *name;
+ int rc;
+ int index;
+ int ndent;
+
+ if (args->lstio_bat_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_bat_namep == NULL || /* batch name */
+ args->lstio_bat_nmlen <= 0 ||
+ args->lstio_bat_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ if (args->lstio_bat_entp == NULL && /* output: batch entry */
+ args->lstio_bat_dentsp == NULL) /* output: node entry */
+ return -EINVAL;
+
+ if (args->lstio_bat_dentsp != NULL) { /* have node entry */
+ if (args->lstio_bat_idxp == NULL || /* node index */
+ args->lstio_bat_ndentp == NULL) /* # of node entry */
+ return -EINVAL;
+
+ if (copy_from_user(&index, args->lstio_bat_idxp,
+ sizeof(index)) ||
+ copy_from_user(&ndent, args->lstio_bat_ndentp,
+ sizeof(ndent)))
+ return -EFAULT;
+
+ if (ndent <= 0 || index < 0)
+ return -EINVAL;
+ }
+
+ LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name,
+ args->lstio_bat_namep, args->lstio_bat_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+ return -EFAULT;
+ }
+
+ name[args->lstio_bat_nmlen] = 0;
+
+ rc = lstcon_batch_info(name,
+ args->lstio_bat_entp, args->lstio_bat_server,
+ args->lstio_bat_testidx, &index, &ndent,
+ args->lstio_bat_dentsp);
+
+ LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+
+ if (rc != 0)
+ return rc;
+
+ if (args->lstio_bat_dentsp != NULL &&
+ (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
+ copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
+ rc = -EFAULT;
+
+ return rc;
+}
+
+int
+lst_stat_query_ioctl(lstio_stat_args_t *args)
+{
+ int rc;
+ char *name;
+
+ /* TODO: not finished */
+ if (args->lstio_sta_key != console_session.ses_key)
+ return -EACCES;
+
+ if (args->lstio_sta_resultp == NULL ||
+ (args->lstio_sta_namep == NULL &&
+ args->lstio_sta_idsp == NULL) ||
+ args->lstio_sta_nmlen <= 0 ||
+ args->lstio_sta_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ if (args->lstio_sta_idsp != NULL &&
+ args->lstio_sta_count <= 0)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1);
+ if (name == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(name, args->lstio_sta_namep,
+ args->lstio_sta_nmlen)) {
+ LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
+ return -EFAULT;
+ }
+
+ if (args->lstio_sta_idsp == NULL) {
+ rc = lstcon_group_stat(name, args->lstio_sta_timeout,
+ args->lstio_sta_resultp);
+ } else {
+ rc = lstcon_nodes_stat(args->lstio_sta_count,
+ args->lstio_sta_idsp,
+ args->lstio_sta_timeout,
+ args->lstio_sta_resultp);
+ }
+
+ LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
+
+ return rc;
+}
+
+int lst_test_add_ioctl(lstio_test_args_t *args)
+{
+ char *name;
+ char *srcgrp = NULL;
+ char *dstgrp = NULL;
+ void *param = NULL;
+ int ret = 0;
+ int rc = -ENOMEM;
+
+ if (args->lstio_tes_resultp == NULL ||
+ args->lstio_tes_retp == NULL ||
+ args->lstio_tes_bat_name == NULL || /* no specified batch */
+ args->lstio_tes_bat_nmlen <= 0 ||
+ args->lstio_tes_bat_nmlen > LST_NAME_SIZE ||
+ args->lstio_tes_sgrp_name == NULL || /* no source group */
+ args->lstio_tes_sgrp_nmlen <= 0 ||
+ args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE ||
+ args->lstio_tes_dgrp_name == NULL || /* no target group */
+ args->lstio_tes_dgrp_nmlen <= 0 ||
+ args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ if (args->lstio_tes_loop == 0 || /* negative is infinite */
+ args->lstio_tes_concur <= 0 ||
+ args->lstio_tes_dist <= 0 ||
+ args->lstio_tes_span <= 0)
+ return -EINVAL;
+
+ /* have parameter, check if parameter length is valid */
+ if (args->lstio_tes_param != NULL &&
+ (args->lstio_tes_param_len <= 0 ||
+ args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_tes_bat_nmlen + 1);
+ if (name == NULL)
+ return rc;
+
+ LIBCFS_ALLOC(srcgrp, args->lstio_tes_sgrp_nmlen + 1);
+ if (srcgrp == NULL)
+ goto out;
+
+ LIBCFS_ALLOC(dstgrp, args->lstio_tes_dgrp_nmlen + 1);
+ if (dstgrp == NULL)
+ goto out;
+
+ if (args->lstio_tes_param != NULL) {
+ LIBCFS_ALLOC(param, args->lstio_tes_param_len);
+ if (param == NULL)
+ goto out;
+ }
+
+ rc = -EFAULT;
+ if (copy_from_user(name,
+ args->lstio_tes_bat_name,
+ args->lstio_tes_bat_nmlen) ||
+ copy_from_user(srcgrp,
+ args->lstio_tes_sgrp_name,
+ args->lstio_tes_sgrp_nmlen) ||
+ copy_from_user(dstgrp,
+ args->lstio_tes_dgrp_name,
+ args->lstio_tes_dgrp_nmlen) ||
+ copy_from_user(param, args->lstio_tes_param,
+ args->lstio_tes_param_len))
+ goto out;
+
+ rc = lstcon_test_add(name,
+ args->lstio_tes_type,
+ args->lstio_tes_loop,
+ args->lstio_tes_concur,
+ args->lstio_tes_dist, args->lstio_tes_span,
+ srcgrp, dstgrp, param, args->lstio_tes_param_len,
+ &ret, args->lstio_tes_resultp);
+
+ if (ret != 0)
+ rc = (copy_to_user(args->lstio_tes_retp, &ret,
+ sizeof(ret))) ? -EFAULT : 0;
+out:
+ if (name != NULL)
+ LIBCFS_FREE(name, args->lstio_tes_bat_nmlen + 1);
+
+ if (srcgrp != NULL)
+ LIBCFS_FREE(srcgrp, args->lstio_tes_sgrp_nmlen + 1);
+
+ if (dstgrp != NULL)
+ LIBCFS_FREE(dstgrp, args->lstio_tes_dgrp_nmlen + 1);
+
+ if (param != NULL)
+ LIBCFS_FREE(param, args->lstio_tes_param_len);
+
+ return rc;
+}
+
+int
+lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
+{
+ char *buf;
+ int opc = data->ioc_u32[0];
+ int rc;
+
+ if (cmd != IOC_LIBCFS_LNETST)
+ return -EINVAL;
+
+ if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(buf, data->ioc_plen1);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ /* copy in parameter */
+ if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
+ LIBCFS_FREE(buf, data->ioc_plen1);
+ return -EFAULT;
+ }
+
+ mutex_lock(&console_session.ses_mutex);
+
+ console_session.ses_laststamp = cfs_time_current_sec();
+
+ if (console_session.ses_shutdown) {
+ rc = -ESHUTDOWN;
+ goto out;
+ }
+
+ if (console_session.ses_expired)
+ lstcon_session_end();
+
+ if (opc != LSTIO_SESSION_NEW &&
+ console_session.ses_state == LST_SESSION_NONE) {
+ CDEBUG(D_NET, "LST no active session\n");
+ rc = -ESRCH;
+ goto out;
+ }
+
+ memset(&console_session.ses_trans_stat, 0, sizeof(lstcon_trans_stat_t));
+
+ switch (opc) {
+ case LSTIO_SESSION_NEW:
+ rc = lst_session_new_ioctl((lstio_session_new_args_t *)buf);
+ break;
+ case LSTIO_SESSION_END:
+ rc = lst_session_end_ioctl((lstio_session_end_args_t *)buf);
+ break;
+ case LSTIO_SESSION_INFO:
+ rc = lst_session_info_ioctl((lstio_session_info_args_t *)buf);
+ break;
+ case LSTIO_DEBUG:
+ rc = lst_debug_ioctl((lstio_debug_args_t *)buf);
+ break;
+ case LSTIO_GROUP_ADD:
+ rc = lst_group_add_ioctl((lstio_group_add_args_t *)buf);
+ break;
+ case LSTIO_GROUP_DEL:
+ rc = lst_group_del_ioctl((lstio_group_del_args_t *)buf);
+ break;
+ case LSTIO_GROUP_UPDATE:
+ rc = lst_group_update_ioctl((lstio_group_update_args_t *)buf);
+ break;
+ case LSTIO_NODES_ADD:
+ rc = lst_nodes_add_ioctl((lstio_group_nodes_args_t *)buf);
+ break;
+ case LSTIO_GROUP_LIST:
+ rc = lst_group_list_ioctl((lstio_group_list_args_t *)buf);
+ break;
+ case LSTIO_GROUP_INFO:
+ rc = lst_group_info_ioctl((lstio_group_info_args_t *)buf);
+ break;
+ case LSTIO_BATCH_ADD:
+ rc = lst_batch_add_ioctl((lstio_batch_add_args_t *)buf);
+ break;
+ case LSTIO_BATCH_START:
+ rc = lst_batch_run_ioctl((lstio_batch_run_args_t *)buf);
+ break;
+ case LSTIO_BATCH_STOP:
+ rc = lst_batch_stop_ioctl((lstio_batch_stop_args_t *)buf);
+ break;
+ case LSTIO_BATCH_QUERY:
+ rc = lst_batch_query_ioctl((lstio_batch_query_args_t *)buf);
+ break;
+ case LSTIO_BATCH_LIST:
+ rc = lst_batch_list_ioctl((lstio_batch_list_args_t *)buf);
+ break;
+ case LSTIO_BATCH_INFO:
+ rc = lst_batch_info_ioctl((lstio_batch_info_args_t *)buf);
+ break;
+ case LSTIO_TEST_ADD:
+ rc = lst_test_add_ioctl((lstio_test_args_t *)buf);
+ break;
+ case LSTIO_STAT_QUERY:
+ rc = lst_stat_query_ioctl((lstio_stat_args_t *)buf);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
+ sizeof(lstcon_trans_stat_t)))
+ rc = -EFAULT;
+out:
+ mutex_unlock(&console_session.ses_mutex);
+
+ LIBCFS_FREE(buf, data->ioc_plen1);
+
+ return rc;
+}
+
+EXPORT_SYMBOL(lstcon_ioctl_entry);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
new file mode 100644
index 0000000..cbce662
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -0,0 +1,1397 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/conctl.c
+ *
+ * Console framework rpcs
+ *
+ * Author: Liang Zhen <liang@whamcloud.com>
+ */
+
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lib-lnet.h>
+#include "timer.h"
+#include "conrpc.h"
+#include "console.h"
+
+void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *, srpc_msg_t *,
+ lstcon_node_t *, lstcon_trans_stat_t *);
+
+static void
+lstcon_rpc_done(srpc_client_rpc_t *rpc)
+{
+ lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
+
+ LASSERT(crpc != NULL && rpc == crpc->crp_rpc);
+ LASSERT(crpc->crp_posted && !crpc->crp_finished);
+
+ spin_lock(&rpc->crpc_lock);
+
+ if (crpc->crp_trans == NULL) {
+ /* Orphan RPC is not in any transaction,
+ * I'm just a poor body and nobody loves me */
+ spin_unlock(&rpc->crpc_lock);
+
+ /* release it */
+ lstcon_rpc_put(crpc);
+ return;
+ }
+
+ /* not an orphan RPC */
+ crpc->crp_finished = 1;
+
+ if (crpc->crp_stamp == 0) {
+ /* not aborted */
+ LASSERT (crpc->crp_status == 0);
+
+ crpc->crp_stamp = cfs_time_current();
+ crpc->crp_status = rpc->crpc_status;
+ }
+
+ /* wakeup (transaction)thread if I'm the last RPC in the transaction */
+ if (atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
+ wake_up(&crpc->crp_trans->tas_waitq);
+
+ spin_unlock(&rpc->crpc_lock);
+}
+
+int
+lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
+ int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc)
+{
+ crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
+ feats, bulk_npg, bulk_len,
+ lstcon_rpc_done, (void *)crpc);
+ if (crpc->crp_rpc == NULL)
+ return -ENOMEM;
+
+ crpc->crp_trans = NULL;
+ crpc->crp_node = nd;
+ crpc->crp_posted = 0;
+ crpc->crp_finished = 0;
+ crpc->crp_unpacked = 0;
+ crpc->crp_status = 0;
+ crpc->crp_stamp = 0;
+ crpc->crp_embedded = embedded;
+ INIT_LIST_HEAD(&crpc->crp_link);
+
+ atomic_inc(&console_session.ses_rpc_counter);
+
+ return 0;
+}
+
+int
+lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
+ int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp)
+{
+ lstcon_rpc_t *crpc = NULL;
+ int rc;
+
+ spin_lock(&console_session.ses_rpc_lock);
+
+ if (!list_empty(&console_session.ses_rpc_freelist)) {
+ crpc = list_entry(console_session.ses_rpc_freelist.next,
+ lstcon_rpc_t, crp_link);
+ list_del_init(&crpc->crp_link);
+ }
+
+ spin_unlock(&console_session.ses_rpc_lock);
+
+ if (crpc == NULL) {
+ LIBCFS_ALLOC(crpc, sizeof(*crpc));
+ if (crpc == NULL)
+ return -ENOMEM;
+ }
+
+ rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc);
+ if (rc == 0) {
+ *crpcpp = crpc;
+ return 0;
+ }
+
+ LIBCFS_FREE(crpc, sizeof(*crpc));
+
+ return rc;
+}
+
+void
+lstcon_rpc_put(lstcon_rpc_t *crpc)
+{
+ srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
+ int i;
+
+ LASSERT (list_empty(&crpc->crp_link));
+
+ for (i = 0; i < bulk->bk_niov; i++) {
+ if (bulk->bk_iovs[i].kiov_page == NULL)
+ continue;
+
+ __free_page(bulk->bk_iovs[i].kiov_page);
+ }
+
+ srpc_client_rpc_decref(crpc->crp_rpc);
+
+ if (crpc->crp_embedded) {
+ /* embedded RPC, don't recycle it */
+ memset(crpc, 0, sizeof(*crpc));
+ crpc->crp_embedded = 1;
+
+ } else {
+ spin_lock(&console_session.ses_rpc_lock);
+
+ list_add(&crpc->crp_link,
+ &console_session.ses_rpc_freelist);
+
+ spin_unlock(&console_session.ses_rpc_lock);
+ }
+
+ /* RPC is not alive now */
+ atomic_dec(&console_session.ses_rpc_counter);
+}
+
+void
+lstcon_rpc_post(lstcon_rpc_t *crpc)
+{
+ lstcon_rpc_trans_t *trans = crpc->crp_trans;
+
+ LASSERT (trans != NULL);
+
+ atomic_inc(&trans->tas_remaining);
+ crpc->crp_posted = 1;
+
+ sfw_post_rpc(crpc->crp_rpc);
+}
+
+static char *
+lstcon_rpc_trans_name(int transop)
+{
+ if (transop == LST_TRANS_SESNEW)
+ return "SESNEW";
+
+ if (transop == LST_TRANS_SESEND)
+ return "SESEND";
+
+ if (transop == LST_TRANS_SESQRY)
+ return "SESQRY";
+
+ if (transop == LST_TRANS_SESPING)
+ return "SESPING";
+
+ if (transop == LST_TRANS_TSBCLIADD)
+ return "TSBCLIADD";
+
+ if (transop == LST_TRANS_TSBSRVADD)
+ return "TSBSRVADD";
+
+ if (transop == LST_TRANS_TSBRUN)
+ return "TSBRUN";
+
+ if (transop == LST_TRANS_TSBSTOP)
+ return "TSBSTOP";
+
+ if (transop == LST_TRANS_TSBCLIQRY)
+ return "TSBCLIQRY";
+
+ if (transop == LST_TRANS_TSBSRVQRY)
+ return "TSBSRVQRY";
+
+ if (transop == LST_TRANS_STATQRY)
+ return "STATQRY";
+
+ return "Unknown";
+}
+
+int
+lstcon_rpc_trans_prep(struct list_head *translist,
+ int transop, lstcon_rpc_trans_t **transpp)
+{
+ lstcon_rpc_trans_t *trans;
+
+ if (translist != NULL) {
+ list_for_each_entry(trans, translist, tas_link) {
+ /* Can't enqueue two private transaction on
+ * the same object */
+ if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE)
+ return -EPERM;
+ }
+ }
+
+ /* create a trans group */
+ LIBCFS_ALLOC(trans, sizeof(*trans));
+ if (trans == NULL)
+ return -ENOMEM;
+
+ trans->tas_opc = transop;
+
+ if (translist == NULL)
+ INIT_LIST_HEAD(&trans->tas_olink);
+ else
+ list_add_tail(&trans->tas_olink, translist);
+
+ list_add_tail(&trans->tas_link, &console_session.ses_trans_list);
+
+ INIT_LIST_HEAD(&trans->tas_rpcs_list);
+ atomic_set(&trans->tas_remaining, 0);
+ init_waitqueue_head(&trans->tas_waitq);
+
+ spin_lock(&console_session.ses_rpc_lock);
+ trans->tas_features = console_session.ses_features;
+ spin_unlock(&console_session.ses_rpc_lock);
+
+ *transpp = trans;
+ return 0;
+}
+
+void
+lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc)
+{
+ list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
+ crpc->crp_trans = trans;
+}
+
+void
+lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
+{
+ srpc_client_rpc_t *rpc;
+ lstcon_rpc_t *crpc;
+ lstcon_node_t *nd;
+
+ list_for_each_entry (crpc, &trans->tas_rpcs_list, crp_link) {
+ rpc = crpc->crp_rpc;
+
+ spin_lock(&rpc->crpc_lock);
+
+ if (!crpc->crp_posted || /* not posted */
+ crpc->crp_stamp != 0) { /* rpc done or aborted already */
+ if (crpc->crp_stamp == 0) {
+ crpc->crp_stamp = cfs_time_current();
+ crpc->crp_status = -EINTR;
+ }
+ spin_unlock(&rpc->crpc_lock);
+ continue;
+ }
+
+ crpc->crp_stamp = cfs_time_current();
+ crpc->crp_status = error;
+
+ spin_unlock(&rpc->crpc_lock);
+
+ sfw_abort_rpc(rpc);
+
+ if (error != ETIMEDOUT)
+ continue;
+
+ nd = crpc->crp_node;
+ if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp))
+ continue;
+
+ nd->nd_stamp = crpc->crp_stamp;
+ nd->nd_state = LST_NODE_DOWN;
+ }
+}
+
+static int
+lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
+{
+ if (console_session.ses_shutdown &&
+ !list_empty(&trans->tas_olink)) /* Not an end session RPC */
+ return 1;
+
+ return (atomic_read(&trans->tas_remaining) == 0) ? 1: 0;
+}
+
+int
+lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
+{
+ lstcon_rpc_t *crpc;
+ int rc;
+
+ if (list_empty(&trans->tas_rpcs_list))
+ return 0;
+
+ if (timeout < LST_TRANS_MIN_TIMEOUT)
+ timeout = LST_TRANS_MIN_TIMEOUT;
+
+ CDEBUG(D_NET, "Transaction %s started\n",
+ lstcon_rpc_trans_name(trans->tas_opc));
+
+ /* post all requests */
+ list_for_each_entry (crpc, &trans->tas_rpcs_list, crp_link) {
+ LASSERT (!crpc->crp_posted);
+
+ lstcon_rpc_post(crpc);
+ }
+
+ mutex_unlock(&console_session.ses_mutex);
+
+ rc = wait_event_interruptible_timeout(trans->tas_waitq,
+ lstcon_rpc_trans_check(trans),
+ cfs_time_seconds(timeout));
+ rc = (rc > 0) ? 0 : ((rc < 0) ? -EINTR : -ETIMEDOUT);
+
+ mutex_lock(&console_session.ses_mutex);
+
+ if (console_session.ses_shutdown)
+ rc = -ESHUTDOWN;
+
+ if (rc != 0 || atomic_read(&trans->tas_remaining) != 0) {
+ /* treat short timeout as canceled */
+ if (rc == -ETIMEDOUT && timeout < LST_TRANS_MIN_TIMEOUT * 2)
+ rc = -EINTR;
+
+ lstcon_rpc_trans_abort(trans, rc);
+ }
+
+ CDEBUG(D_NET, "Transaction %s stopped: %d\n",
+ lstcon_rpc_trans_name(trans->tas_opc), rc);
+
+ lstcon_rpc_trans_stat(trans, lstcon_trans_stat());
+
+ return rc;
+}
+
+int
+lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
+{
+ lstcon_node_t *nd = crpc->crp_node;
+ srpc_client_rpc_t *rpc = crpc->crp_rpc;
+ srpc_generic_reply_t *rep;
+
+ LASSERT (nd != NULL && rpc != NULL);
+ LASSERT (crpc->crp_stamp != 0);
+
+ if (crpc->crp_status != 0) {
+ *msgpp = NULL;
+ return crpc->crp_status;
+ }
+
+ *msgpp = &rpc->crpc_replymsg;
+ if (!crpc->crp_unpacked) {
+ sfw_unpack_message(*msgpp);
+ crpc->crp_unpacked = 1;
+ }
+
+ if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp))
+ return 0;
+
+ nd->nd_stamp = crpc->crp_stamp;
+ rep = &(*msgpp)->msg_body.reply;
+
+ if (rep->sid.ses_nid == LNET_NID_ANY)
+ nd->nd_state = LST_NODE_UNKNOWN;
+ else if (lstcon_session_match(rep->sid))
+ nd->nd_state = LST_NODE_ACTIVE;
+ else
+ nd->nd_state = LST_NODE_BUSY;
+
+ return 0;
+}
+
+void
+lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
+{
+ lstcon_rpc_t *crpc;
+ srpc_msg_t *rep;
+ int error;
+
+ LASSERT (stat != NULL);
+
+ memset(stat, 0, sizeof(*stat));
+
+ list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
+ lstcon_rpc_stat_total(stat, 1);
+
+ LASSERT (crpc->crp_stamp != 0);
+
+ error = lstcon_rpc_get_reply(crpc, &rep);
+ if (error != 0) {
+ lstcon_rpc_stat_failure(stat, 1);
+ if (stat->trs_rpc_errno == 0)
+ stat->trs_rpc_errno = -error;
+
+ continue;
+ }
+
+ lstcon_rpc_stat_success(stat, 1);
+
+ lstcon_rpc_stat_reply(trans, rep, crpc->crp_node, stat);
+ }
+
+ if (trans->tas_opc == LST_TRANS_SESNEW && stat->trs_fwk_errno == 0) {
+ stat->trs_fwk_errno =
+ lstcon_session_feats_check(trans->tas_features);
+ }
+
+ CDEBUG(D_NET, "transaction %s : success %d, failure %d, total %d, "
+ "RPC error(%d), Framework error(%d)\n",
+ lstcon_rpc_trans_name(trans->tas_opc),
+ lstcon_rpc_stat_success(stat, 0),
+ lstcon_rpc_stat_failure(stat, 0),
+ lstcon_rpc_stat_total(stat, 0),
+ stat->trs_rpc_errno, stat->trs_fwk_errno);
+
+ return;
+}
+
+int
+lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
+ struct list_head *head_up,
+ lstcon_rpc_readent_func_t readent)
+{
+ struct list_head tmp;
+ struct list_head *next;
+ lstcon_rpc_ent_t *ent;
+ srpc_generic_reply_t *rep;
+ lstcon_rpc_t *crpc;
+ srpc_msg_t *msg;
+ lstcon_node_t *nd;
+ cfs_duration_t dur;
+ struct timeval tv;
+ int error;
+
+ LASSERT (head_up != NULL);
+
+ next = head_up;
+
+ list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
+ if (copy_from_user(&tmp, next,
+ sizeof(struct list_head)))
+ return -EFAULT;
+
+ if (tmp.next == head_up)
+ return 0;
+
+ next = tmp.next;
+
+ ent = list_entry(next, lstcon_rpc_ent_t, rpe_link);
+
+ LASSERT (crpc->crp_stamp != 0);
+
+ error = lstcon_rpc_get_reply(crpc, &msg);
+
+ nd = crpc->crp_node;
+
+ dur = (cfs_duration_t)cfs_time_sub(crpc->crp_stamp,
+ (cfs_time_t)console_session.ses_id.ses_stamp);
+ cfs_duration_usec(dur, &tv);
+
+ if (copy_to_user(&ent->rpe_peer,
+ &nd->nd_id, sizeof(lnet_process_id_t)) ||
+ copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
+ copy_to_user(&ent->rpe_state,
+ &nd->nd_state, sizeof(nd->nd_state)) ||
+ copy_to_user(&ent->rpe_rpc_errno, &error,
+ sizeof(error)))
+ return -EFAULT;
+
+ if (error != 0)
+ continue;
+
+ /* RPC is done */
+ rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
+
+ if (copy_to_user(&ent->rpe_sid,
+ &rep->sid, sizeof(lst_sid_t)) ||
+ copy_to_user(&ent->rpe_fwk_errno,
+ &rep->status, sizeof(rep->status)))
+ return -EFAULT;
+
+ if (readent == NULL)
+ continue;
+
+ if ((error = readent(trans->tas_opc, msg, ent)) != 0)
+ return error;
+ }
+
+ return 0;
+}
+
+void
+lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
+{
+ srpc_client_rpc_t *rpc;
+ lstcon_rpc_t *crpc;
+ lstcon_rpc_t *tmp;
+ int count = 0;
+
+ list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list,
+ crp_link) {
+ rpc = crpc->crp_rpc;
+
+ spin_lock(&rpc->crpc_lock);
+
+ /* free it if not posted or finished already */
+ if (!crpc->crp_posted || crpc->crp_finished) {
+ spin_unlock(&rpc->crpc_lock);
+
+ list_del_init(&crpc->crp_link);
+ lstcon_rpc_put(crpc);
+
+ continue;
+ }
+
+ /* rpcs can be still not callbacked (even LNetMDUnlink is called)
+ * because huge timeout for inaccessible network, don't make
+ * user wait for them, just abandon them, they will be recycled
+ * in callback */
+
+ LASSERT (crpc->crp_status != 0);
+
+ crpc->crp_node = NULL;
+ crpc->crp_trans = NULL;
+ list_del_init(&crpc->crp_link);
+ count ++;
+
+ spin_unlock(&rpc->crpc_lock);
+
+ atomic_dec(&trans->tas_remaining);
+ }
+
+ LASSERT (atomic_read(&trans->tas_remaining) == 0);
+
+ list_del(&trans->tas_link);
+ if (!list_empty(&trans->tas_olink))
+ list_del(&trans->tas_olink);
+
+ CDEBUG(D_NET, "Transaction %s destroyed with %d pending RPCs\n",
+ lstcon_rpc_trans_name(trans->tas_opc), count);
+
+ LIBCFS_FREE(trans, sizeof(*trans));
+
+ return;
+}
+
+int
+lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
+ unsigned feats, lstcon_rpc_t **crpc)
+{
+ srpc_mksn_reqst_t *msrq;
+ srpc_rmsn_reqst_t *rsrq;
+ int rc;
+
+ switch (transop) {
+ case LST_TRANS_SESNEW:
+ rc = lstcon_rpc_prep(nd, SRPC_SERVICE_MAKE_SESSION,
+ feats, 0, 0, crpc);
+ if (rc != 0)
+ return rc;
+
+ msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst;
+ msrq->mksn_sid = console_session.ses_id;
+ msrq->mksn_force = console_session.ses_force;
+ strncpy(msrq->mksn_name, console_session.ses_name,
+ strlen(console_session.ses_name));
+ break;
+
+ case LST_TRANS_SESEND:
+ rc = lstcon_rpc_prep(nd, SRPC_SERVICE_REMOVE_SESSION,
+ feats, 0, 0, crpc);
+ if (rc != 0)
+ return rc;
+
+ rsrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.rmsn_reqst;
+ rsrq->rmsn_sid = console_session.ses_id;
+ break;
+
+ default:
+ LBUG();
+ }
+
+ return 0;
+}
+
+int
+lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
+{
+ srpc_debug_reqst_t *drq;
+ int rc;
+
+ rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
+ if (rc != 0)
+ return rc;
+
+ drq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
+
+ drq->dbg_sid = console_session.ses_id;
+ drq->dbg_flags = 0;
+
+ return rc;
+}
+
+int
+lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
+ lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc)
+{
+ lstcon_batch_t *batch;
+ srpc_batch_reqst_t *brq;
+ int rc;
+
+ rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
+ if (rc != 0)
+ return rc;
+
+ brq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.bat_reqst;
+
+ brq->bar_sid = console_session.ses_id;
+ brq->bar_bid = tsb->tsb_id;
+ brq->bar_testidx = tsb->tsb_index;
+ brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN :
+ (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP:
+ SRPC_BATCH_OPC_QUERY);
+
+ if (transop != LST_TRANS_TSBRUN &&
+ transop != LST_TRANS_TSBSTOP)
+ return 0;
+
+ LASSERT (tsb->tsb_index == 0);
+
+ batch = (lstcon_batch_t *)tsb;
+ brq->bar_arg = batch->bat_arg;
+
+ return 0;
+}
+
+int
+lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
+{
+ srpc_stat_reqst_t *srq;
+ int rc;
+
+ rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
+ if (rc != 0)
+ return rc;
+
+ srq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.stat_reqst;
+
+ srq->str_sid = console_session.ses_id;
+ srq->str_type = 0; /* XXX remove it */
+
+ return 0;
+}
+
+lnet_process_id_packed_t *
+lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
+{
+ lnet_process_id_packed_t *pid;
+ int i;
+
+ i = idx / SFW_ID_PER_PAGE;
+
+ LASSERT (i < nkiov);
+
+ pid = (lnet_process_id_packed_t *)page_address(kiov[i].kiov_page);
+
+ return &pid[idx % SFW_ID_PER_PAGE];
+}
+
+int
+lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
+ int dist, int span, int nkiov, lnet_kiov_t *kiov)
+{
+ lnet_process_id_packed_t *pid;
+ lstcon_ndlink_t *ndl;
+ lstcon_node_t *nd;
+ int start;
+ int end;
+ int i = 0;
+
+ LASSERT (dist >= 1);
+ LASSERT (span >= 1);
+ LASSERT (grp->grp_nnode >= 1);
+
+ if (span > grp->grp_nnode)
+ return -EINVAL;
+
+ start = ((idx / dist) * span) % grp->grp_nnode;
+ end = ((idx / dist) * span + span - 1) % grp->grp_nnode;
+
+ list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
+ nd = ndl->ndl_node;
+ if (i < start) {
+ i ++;
+ continue;
+ }
+
+ if (i > (end >= start ? end: grp->grp_nnode))
+ break;
+
+ pid = lstcon_next_id((i - start), nkiov, kiov);
+ pid->nid = nd->nd_id.nid;
+ pid->pid = nd->nd_id.pid;
+ i++;
+ }
+
+ if (start <= end) /* done */
+ return 0;
+
+ list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
+ if (i > grp->grp_nnode + end)
+ break;
+
+ nd = ndl->ndl_node;
+ pid = lstcon_next_id((i - start), nkiov, kiov);
+ pid->nid = nd->nd_id.nid;
+ pid->pid = nd->nd_id.pid;
+ i++;
+ }
+
+ return 0;
+}
+
+int
+lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
+{
+ test_ping_req_t *prq = &req->tsr_u.ping;
+
+ prq->png_size = param->png_size;
+ prq->png_flags = param->png_flags;
+ /* TODO dest */
+ return 0;
+}
+
+int
+lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
+{
+ test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
+
+ brq->blk_opc = param->blk_opc;
+ brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE;
+ brq->blk_flags = param->blk_flags;
+
+ return 0;
+}
+
+int
+lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
+{
+ test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1;
+
+ brq->blk_opc = param->blk_opc;
+ brq->blk_flags = param->blk_flags;
+ brq->blk_len = param->blk_size;
+ brq->blk_offset = 0; /* reserved */
+
+ return 0;
+}
+
+int
+lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
+ lstcon_test_t *test, lstcon_rpc_t **crpc)
+{
+ lstcon_group_t *sgrp = test->tes_src_grp;
+ lstcon_group_t *dgrp = test->tes_dst_grp;
+ srpc_test_reqst_t *trq;
+ srpc_bulk_t *bulk;
+ int i;
+ int npg = 0;
+ int nob = 0;
+ int rc = 0;
+
+ if (transop == LST_TRANS_TSBCLIADD) {
+ npg = sfw_id_pages(test->tes_span);
+ nob = (feats & LST_FEAT_BULK_LEN) == 0 ?
+ npg * PAGE_CACHE_SIZE :
+ sizeof(lnet_process_id_packed_t) * test->tes_span;
+ }
+
+ rc = lstcon_rpc_prep(nd, SRPC_SERVICE_TEST, feats, npg, nob, crpc);
+ if (rc != 0)
+ return rc;
+
+ trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst;
+
+ if (transop == LST_TRANS_TSBSRVADD) {
+ int ndist = (sgrp->grp_nnode + test->tes_dist - 1) / test->tes_dist;
+ int nspan = (dgrp->grp_nnode + test->tes_span - 1) / test->tes_span;
+ int nmax = (ndist + nspan - 1) / nspan;
+
+ trq->tsr_ndest = 0;
+ trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
+
+ } else {
+ bulk = &(*crpc)->crp_rpc->crpc_bulk;
+
+ for (i = 0; i < npg; i++) {
+ int len;
+
+ LASSERT(nob > 0);
+
+ len = (feats & LST_FEAT_BULK_LEN) == 0 ?
+ PAGE_CACHE_SIZE : min_t(int, nob, PAGE_CACHE_SIZE);
+ nob -= len;
+
+ bulk->bk_iovs[i].kiov_offset = 0;
+ bulk->bk_iovs[i].kiov_len = len;
+ bulk->bk_iovs[i].kiov_page =
+ alloc_page(GFP_IOFS);
+
+ if (bulk->bk_iovs[i].kiov_page == NULL) {
+ lstcon_rpc_put(*crpc);
+ return -ENOMEM;
+ }
+ }
+
+ bulk->bk_sink = 0;
+
+ LASSERT (transop == LST_TRANS_TSBCLIADD);
+
+ rc = lstcon_dstnodes_prep(test->tes_dst_grp,
+ test->tes_cliidx++,
+ test->tes_dist,
+ test->tes_span,
+ npg, &bulk->bk_iovs[0]);
+ if (rc != 0) {
+ lstcon_rpc_put(*crpc);
+ return rc;
+ }
+
+ trq->tsr_ndest = test->tes_span;
+ trq->tsr_loop = test->tes_loop;
+ }
+
+ trq->tsr_sid = console_session.ses_id;
+ trq->tsr_bid = test->tes_hdr.tsb_id;
+ trq->tsr_concur = test->tes_concur;
+ trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0;
+ trq->tsr_stop_onerr = !!test->tes_stop_onerr;
+
+ switch (test->tes_type) {
+ case LST_TEST_PING:
+ trq->tsr_service = SRPC_SERVICE_PING;
+ rc = lstcon_pingrpc_prep((lst_test_ping_param_t *)
+ &test->tes_param[0], trq);
+ break;
+
+ case LST_TEST_BULK:
+ trq->tsr_service = SRPC_SERVICE_BRW;
+ if ((feats & LST_FEAT_BULK_LEN) == 0) {
+ rc = lstcon_bulkrpc_v0_prep((lst_test_bulk_param_t *)
+ &test->tes_param[0], trq);
+ } else {
+ rc = lstcon_bulkrpc_v1_prep((lst_test_bulk_param_t *)
+ &test->tes_param[0], trq);
+ }
+
+ break;
+ default:
+ LBUG();
+ break;
+ }
+
+ return rc;
+}
+
+int
+lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
+ lstcon_node_t *nd, srpc_msg_t *reply)
+{
+ srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply;
+ int status = mksn_rep->mksn_status;
+
+ if (status == 0 &&
+ (reply->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ mksn_rep->mksn_status = EPROTO;
+ status = EPROTO;
+ }
+
+ if (status == EPROTO) {
+ CNETERR("session protocol error from %s: %u\n",
+ libcfs_nid2str(nd->nd_id.nid),
+ reply->msg_ses_feats);
+ }
+
+ if (status != 0)
+ return status;
+
+ if (!trans->tas_feats_updated) {
+ trans->tas_feats_updated = 1;
+ trans->tas_features = reply->msg_ses_feats;
+ }
+
+ if (reply->msg_ses_feats != trans->tas_features) {
+ CNETERR("Framework features %x from %s is different with "
+ "features on this transaction: %x\n",
+ reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
+ trans->tas_features);
+ status = mksn_rep->mksn_status = EPROTO;
+ }
+
+ if (status == 0) {
+ /* session timeout on remote node */
+ nd->nd_timeout = mksn_rep->mksn_timeout;
+ }
+
+ return status;
+}
+
+void
+lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
+ lstcon_node_t *nd, lstcon_trans_stat_t *stat)
+{
+ srpc_rmsn_reply_t *rmsn_rep;
+ srpc_debug_reply_t *dbg_rep;
+ srpc_batch_reply_t *bat_rep;
+ srpc_test_reply_t *test_rep;
+ srpc_stat_reply_t *stat_rep;
+ int rc = 0;
+
+ switch (trans->tas_opc) {
+ case LST_TRANS_SESNEW:
+ rc = lstcon_sesnew_stat_reply(trans, nd, msg);
+ if (rc == 0) {
+ lstcon_sesop_stat_success(stat, 1);
+ return;
+ }
+
+ lstcon_sesop_stat_failure(stat, 1);
+ break;
+
+ case LST_TRANS_SESEND:
+ rmsn_rep = &msg->msg_body.rmsn_reply;
+ /* ESRCH is not an error for end session */
+ if (rmsn_rep->rmsn_status == 0 ||
+ rmsn_rep->rmsn_status == ESRCH) {
+ lstcon_sesop_stat_success(stat, 1);
+ return;
+ }
+
+ lstcon_sesop_stat_failure(stat, 1);
+ rc = rmsn_rep->rmsn_status;
+ break;
+
+ case LST_TRANS_SESQRY:
+ case LST_TRANS_SESPING:
+ dbg_rep = &msg->msg_body.dbg_reply;
+
+ if (dbg_rep->dbg_status == ESRCH) {
+ lstcon_sesqry_stat_unknown(stat, 1);
+ return;
+ }
+
+ if (lstcon_session_match(dbg_rep->dbg_sid))
+ lstcon_sesqry_stat_active(stat, 1);
+ else
+ lstcon_sesqry_stat_busy(stat, 1);
+ return;
+
+ case LST_TRANS_TSBRUN:
+ case LST_TRANS_TSBSTOP:
+ bat_rep = &msg->msg_body.bat_reply;
+
+ if (bat_rep->bar_status == 0) {
+ lstcon_tsbop_stat_success(stat, 1);
+ return;
+ }
+
+ if (bat_rep->bar_status == EPERM &&
+ trans->tas_opc == LST_TRANS_TSBSTOP) {
+ lstcon_tsbop_stat_success(stat, 1);
+ return;
+ }
+
+ lstcon_tsbop_stat_failure(stat, 1);
+ rc = bat_rep->bar_status;
+ break;
+
+ case LST_TRANS_TSBCLIQRY:
+ case LST_TRANS_TSBSRVQRY:
+ bat_rep = &msg->msg_body.bat_reply;
+
+ if (bat_rep->bar_active != 0)
+ lstcon_tsbqry_stat_run(stat, 1);
+ else
+ lstcon_tsbqry_stat_idle(stat, 1);
+
+ if (bat_rep->bar_status == 0)
+ return;
+
+ lstcon_tsbqry_stat_failure(stat, 1);
+ rc = bat_rep->bar_status;
+ break;
+
+ case LST_TRANS_TSBCLIADD:
+ case LST_TRANS_TSBSRVADD:
+ test_rep = &msg->msg_body.tes_reply;
+
+ if (test_rep->tsr_status == 0) {
+ lstcon_tsbop_stat_success(stat, 1);
+ return;
+ }
+
+ lstcon_tsbop_stat_failure(stat, 1);
+ rc = test_rep->tsr_status;
+ break;
+
+ case LST_TRANS_STATQRY:
+ stat_rep = &msg->msg_body.stat_reply;
+
+ if (stat_rep->str_status == 0) {
+ lstcon_statqry_stat_success(stat, 1);
+ return;
+ }
+
+ lstcon_statqry_stat_failure(stat, 1);
+ rc = stat_rep->str_status;
+ break;
+
+ default:
+ LBUG();
+ }
+
+ if (stat->trs_fwk_errno == 0)
+ stat->trs_fwk_errno = rc;
+
+ return;
+}
+
+int
+lstcon_rpc_trans_ndlist(struct list_head *ndlist,
+ struct list_head *translist, int transop,
+ void *arg, lstcon_rpc_cond_func_t condition,
+ lstcon_rpc_trans_t **transpp)
+{
+ lstcon_rpc_trans_t *trans;
+ lstcon_ndlink_t *ndl;
+ lstcon_node_t *nd;
+ lstcon_rpc_t *rpc;
+ unsigned feats;
+ int rc;
+
+ /* Creating session RPG for list of nodes */
+
+ rc = lstcon_rpc_trans_prep(translist, transop, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction %d: %d\n", transop, rc);
+ return rc;
+ }
+
+ feats = trans->tas_features;
+ list_for_each_entry(ndl, ndlist, ndl_link) {
+ rc = condition == NULL ? 1 :
+ condition(transop, ndl->ndl_node, arg);
+
+ if (rc == 0)
+ continue;
+
+ if (rc < 0) {
+ CDEBUG(D_NET, "Condition error while creating RPC "
+ " for transaction %d: %d\n", transop, rc);
+ break;
+ }
+
+ nd = ndl->ndl_node;
+
+ switch (transop) {
+ case LST_TRANS_SESNEW:
+ case LST_TRANS_SESEND:
+ rc = lstcon_sesrpc_prep(nd, transop, feats, &rpc);
+ break;
+ case LST_TRANS_SESQRY:
+ case LST_TRANS_SESPING:
+ rc = lstcon_dbgrpc_prep(nd, feats, &rpc);
+ break;
+ case LST_TRANS_TSBCLIADD:
+ case LST_TRANS_TSBSRVADD:
+ rc = lstcon_testrpc_prep(nd, transop, feats,
+ (lstcon_test_t *)arg, &rpc);
+ break;
+ case LST_TRANS_TSBRUN:
+ case LST_TRANS_TSBSTOP:
+ case LST_TRANS_TSBCLIQRY:
+ case LST_TRANS_TSBSRVQRY:
+ rc = lstcon_batrpc_prep(nd, transop, feats,
+ (lstcon_tsb_hdr_t *)arg, &rpc);
+ break;
+ case LST_TRANS_STATQRY:
+ rc = lstcon_statrpc_prep(nd, feats, &rpc);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc != 0) {
+ CERROR("Failed to create RPC for transaction %s: %d\n",
+ lstcon_rpc_trans_name(transop), rc);
+ break;
+ }
+
+ lstcon_rpc_trans_addreq(trans, rpc);
+ }
+
+ if (rc == 0) {
+ *transpp = trans;
+ return 0;
+ }
+
+ lstcon_rpc_trans_destroy(trans);
+
+ return rc;
+}
+
+void
+lstcon_rpc_pinger(void *arg)
+{
+ stt_timer_t *ptimer = (stt_timer_t *)arg;
+ lstcon_rpc_trans_t *trans;
+ lstcon_rpc_t *crpc;
+ srpc_msg_t *rep;
+ srpc_debug_reqst_t *drq;
+ lstcon_ndlink_t *ndl;
+ lstcon_node_t *nd;
+ time_t intv;
+ int count = 0;
+ int rc;
+
+ /* RPC pinger is a special case of transaction,
+ * it's called by timer at 8 seconds interval.
+ */
+ mutex_lock(&console_session.ses_mutex);
+
+ if (console_session.ses_shutdown || console_session.ses_expired) {
+ mutex_unlock(&console_session.ses_mutex);
+ return;
+ }
+
+ if (!console_session.ses_expired &&
+ cfs_time_current_sec() - console_session.ses_laststamp >
+ (time_t)console_session.ses_timeout)
+ console_session.ses_expired = 1;
+
+ trans = console_session.ses_ping;
+
+ LASSERT (trans != NULL);
+
+ list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) {
+ nd = ndl->ndl_node;
+
+ if (console_session.ses_expired) {
+ /* idle console, end session on all nodes */
+ if (nd->nd_state != LST_NODE_ACTIVE)
+ continue;
+
+ rc = lstcon_sesrpc_prep(nd, LST_TRANS_SESEND,
+ trans->tas_features, &crpc);
+ if (rc != 0) {
+ CERROR("Out of memory\n");
+ break;
+ }
+
+ lstcon_rpc_trans_addreq(trans, crpc);
+ lstcon_rpc_post(crpc);
+
+ continue;
+ }
+
+ crpc = &nd->nd_ping;
+
+ if (crpc->crp_rpc != NULL) {
+ LASSERT (crpc->crp_trans == trans);
+ LASSERT (!list_empty(&crpc->crp_link));
+
+ spin_lock(&crpc->crp_rpc->crpc_lock);
+
+ LASSERT(crpc->crp_posted);
+
+ if (!crpc->crp_finished) {
+ /* in flight */
+ spin_unlock(&crpc->crp_rpc->crpc_lock);
+ continue;
+ }
+
+ spin_unlock(&crpc->crp_rpc->crpc_lock);
+
+ lstcon_rpc_get_reply(crpc, &rep);
+
+ list_del_init(&crpc->crp_link);
+
+ lstcon_rpc_put(crpc);
+ }
+
+ if (nd->nd_state != LST_NODE_ACTIVE)
+ continue;
+
+ intv = cfs_duration_sec(cfs_time_sub(cfs_time_current(),
+ nd->nd_stamp));
+ if (intv < (time_t)nd->nd_timeout / 2)
+ continue;
+
+ rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG,
+ trans->tas_features, 0, 0, 1, crpc);
+ if (rc != 0) {
+ CERROR("Out of memory\n");
+ break;
+ }
+
+ drq = &crpc->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
+
+ drq->dbg_sid = console_session.ses_id;
+ drq->dbg_flags = 0;
+
+ lstcon_rpc_trans_addreq(trans, crpc);
+ lstcon_rpc_post(crpc);
+
+ count ++;
+ }
+
+ if (console_session.ses_expired) {
+ mutex_unlock(&console_session.ses_mutex);
+ return;
+ }
+
+ CDEBUG(D_NET, "Ping %d nodes in session\n", count);
+
+ ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
+ stt_add_timer(ptimer);
+
+ mutex_unlock(&console_session.ses_mutex);
+}
+
+int
+lstcon_rpc_pinger_start(void)
+{
+ stt_timer_t *ptimer;
+ int rc;
+
+ LASSERT (list_empty(&console_session.ses_rpc_freelist));
+ LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+
+ rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
+ &console_session.ses_ping);
+ if (rc != 0) {
+ CERROR("Failed to create console pinger\n");
+ return rc;
+ }
+
+ ptimer = &console_session.ses_ping_timer;
+ ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
+
+ stt_add_timer(ptimer);
+
+ return 0;
+}
+
+void
+lstcon_rpc_pinger_stop(void)
+{
+ LASSERT (console_session.ses_shutdown);
+
+ stt_del_timer(&console_session.ses_ping_timer);
+
+ lstcon_rpc_trans_abort(console_session.ses_ping, -ESHUTDOWN);
+ lstcon_rpc_trans_stat(console_session.ses_ping, lstcon_trans_stat());
+ lstcon_rpc_trans_destroy(console_session.ses_ping);
+
+ memset(lstcon_trans_stat(), 0, sizeof(lstcon_trans_stat_t));
+
+ console_session.ses_ping = NULL;
+}
+
+void
+lstcon_rpc_cleanup_wait(void)
+{
+ lstcon_rpc_trans_t *trans;
+ lstcon_rpc_t *crpc;
+ struct list_head *pacer;
+ struct list_head zlist;
+
+ /* Called with hold of global mutex */
+
+ LASSERT (console_session.ses_shutdown);
+
+ while (!list_empty(&console_session.ses_trans_list)) {
+ list_for_each(pacer, &console_session.ses_trans_list) {
+ trans = list_entry(pacer, lstcon_rpc_trans_t,
+ tas_link);
+
+ CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
+ lstcon_rpc_trans_name(trans->tas_opc));
+
+ wake_up(&trans->tas_waitq);
+ }
+
+ mutex_unlock(&console_session.ses_mutex);
+
+ CWARN("Session is shutting down, "
+ "waiting for termination of transactions\n");
+ cfs_pause(cfs_time_seconds(1));
+
+ mutex_lock(&console_session.ses_mutex);
+ }
+
+ spin_lock(&console_session.ses_rpc_lock);
+
+ lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
+ console_session.ses_rpc_lock,
+ "Network is not accessible or target is down, "
+ "waiting for %d console RPCs to being recycled\n",
+ atomic_read(&console_session.ses_rpc_counter));
+
+ list_add(&zlist, &console_session.ses_rpc_freelist);
+ list_del_init(&console_session.ses_rpc_freelist);
+
+ spin_unlock(&console_session.ses_rpc_lock);
+
+ while (!list_empty(&zlist)) {
+ crpc = list_entry(zlist.next, lstcon_rpc_t, crp_link);
+
+ list_del(&crpc->crp_link);
+ LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
+ }
+}
+
+int
+lstcon_rpc_module_init(void)
+{
+ INIT_LIST_HEAD(&console_session.ses_ping_timer.stt_list);
+ console_session.ses_ping_timer.stt_func = lstcon_rpc_pinger;
+ console_session.ses_ping_timer.stt_data = &console_session.ses_ping_timer;
+
+ console_session.ses_ping = NULL;
+
+ spin_lock_init(&console_session.ses_rpc_lock);
+ atomic_set(&console_session.ses_rpc_counter, 0);
+ INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
+
+ return 0;
+}
+
+void
+lstcon_rpc_module_fini(void)
+{
+ LASSERT (list_empty(&console_session.ses_rpc_freelist));
+ LASSERT (atomic_read(&console_session.ses_rpc_counter) == 0);
+}
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
new file mode 100644
index 0000000..9aba24a
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -0,0 +1,146 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * /lnet/selftest/conrpc.h
+ *
+ * Console rpc
+ *
+ * Author: Liang Zhen <liang@whamcloud.com>
+ */
+
+#ifndef __LST_CONRPC_H__
+#define __LST_CONRPC_H__
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lnet.h>
+#include <linux/lnet/lib-types.h>
+#include <linux/lnet/lnetst.h>
+#include "rpc.h"
+#include "selftest.h"
+
+/* Console rpc and rpc transaction */
+#define LST_TRANS_TIMEOUT 30
+#define LST_TRANS_MIN_TIMEOUT 3
+
+#define LST_VALIDATE_TIMEOUT(t) MIN(MAX(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT)
+
+#define LST_PING_INTERVAL 8
+
+struct lstcon_rpc_trans;
+struct lstcon_tsb_hdr;
+struct lstcon_test;
+struct lstcon_node;
+
+typedef struct lstcon_rpc {
+ struct list_head crp_link; /* chain on rpc transaction */
+ srpc_client_rpc_t *crp_rpc; /* client rpc */
+ struct lstcon_node *crp_node; /* destination node */
+ struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
+
+ unsigned int crp_posted:1; /* rpc is posted */
+ unsigned int crp_finished:1; /* rpc is finished */
+ unsigned int crp_unpacked:1; /* reply is unpacked */
+ /** RPC is embedded in other structure and can't free it */
+ unsigned int crp_embedded:1;
+ int crp_status; /* console rpc errors */
+ cfs_time_t crp_stamp; /* replied time stamp */
+} lstcon_rpc_t;
+
+typedef struct lstcon_rpc_trans {
+ struct list_head tas_olink; /* link chain on owner list */
+ struct list_head tas_link; /* link chain on global list */
+ int tas_opc; /* operation code of transaction */
+ /* features mask is uptodate */
+ unsigned tas_feats_updated;
+ /* test features mask */
+ unsigned tas_features;
+ wait_queue_head_t tas_waitq; /* wait queue head */
+ atomic_t tas_remaining; /* # of un-scheduled rpcs */
+ struct list_head tas_rpcs_list; /* queued requests */
+} lstcon_rpc_trans_t;
+
+#define LST_TRANS_PRIVATE 0x1000
+
+#define LST_TRANS_SESNEW (LST_TRANS_PRIVATE | 0x01)
+#define LST_TRANS_SESEND (LST_TRANS_PRIVATE | 0x02)
+#define LST_TRANS_SESQRY 0x03
+#define LST_TRANS_SESPING 0x04
+
+#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11)
+#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12)
+#define LST_TRANS_TSBRUN (LST_TRANS_PRIVATE | 0x13)
+#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14)
+#define LST_TRANS_TSBCLIQRY 0x15
+#define LST_TRANS_TSBSRVQRY 0x16
+
+#define LST_TRANS_STATQRY 0x21
+
+typedef int (* lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
+typedef int (* lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *);
+
+int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
+ unsigned version, lstcon_rpc_t **crpc);
+int lstcon_dbgrpc_prep(struct lstcon_node *nd,
+ unsigned version, lstcon_rpc_t **crpc);
+int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
+ struct lstcon_tsb_hdr *tsb, lstcon_rpc_t **crpc);
+int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
+ struct lstcon_test *test, lstcon_rpc_t **crpc);
+int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
+ lstcon_rpc_t **crpc);
+void lstcon_rpc_put(lstcon_rpc_t *crpc);
+int lstcon_rpc_trans_prep(struct list_head *translist,
+ int transop, lstcon_rpc_trans_t **transpp);
+int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
+ struct list_head *translist, int transop,
+ void *arg, lstcon_rpc_cond_func_t condition,
+ lstcon_rpc_trans_t **transpp);
+void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
+ lstcon_trans_stat_t *stat);
+int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
+ struct list_head *head_up,
+ lstcon_rpc_readent_func_t readent);
+void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
+void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
+void lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *req);
+int lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout);
+int lstcon_rpc_pinger_start(void);
+void lstcon_rpc_pinger_stop(void);
+void lstcon_rpc_cleanup_wait(void);
+int lstcon_rpc_module_init(void);
+void lstcon_rpc_module_fini(void);
+
+
+#endif
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
new file mode 100644
index 0000000..09e4700
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -0,0 +1,2071 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/conctl.c
+ *
+ * Infrastructure of LST console
+ *
+ * Author: Liang Zhen <liangzhen@clusterfs.com>
+ */
+
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lib-lnet.h>
+#include "console.h"
+#include "conrpc.h"
+
+#define LST_NODE_STATE_COUNTER(nd, p) \
+do { \
+ if ((nd)->nd_state == LST_NODE_ACTIVE) \
+ (p)->nle_nactive ++; \
+ else if ((nd)->nd_state == LST_NODE_BUSY) \
+ (p)->nle_nbusy ++; \
+ else if ((nd)->nd_state == LST_NODE_DOWN) \
+ (p)->nle_ndown ++; \
+ else \
+ (p)->nle_nunknown ++; \
+ (p)->nle_nnode ++; \
+} while (0)
+
+lstcon_session_t console_session;
+
+void
+lstcon_node_get(lstcon_node_t *nd)
+{
+ LASSERT (nd->nd_ref >= 1);
+
+ nd->nd_ref++;
+}
+
+static int
+lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
+{
+ lstcon_ndlink_t *ndl;
+ unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
+
+ LASSERT (id.nid != LNET_NID_ANY);
+
+ list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], ndl_hlink) {
+ if (ndl->ndl_node->nd_id.nid != id.nid ||
+ ndl->ndl_node->nd_id.pid != id.pid)
+ continue;
+
+ lstcon_node_get(ndl->ndl_node);
+ *ndpp = ndl->ndl_node;
+ return 0;
+ }
+
+ if (!create)
+ return -ENOENT;
+
+ LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
+ if (*ndpp == NULL)
+ return -ENOMEM;
+
+ ndl = (lstcon_ndlink_t *)(*ndpp + 1);
+
+ ndl->ndl_node = *ndpp;
+
+ ndl->ndl_node->nd_ref = 1;
+ ndl->ndl_node->nd_id = id;
+ ndl->ndl_node->nd_stamp = cfs_time_current();
+ ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
+ ndl->ndl_node->nd_timeout = 0;
+ memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t));
+
+ /* queued in global hash & list, no refcount is taken by
+ * global hash & list, if caller release his refcount,
+ * node will be released */
+ list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
+ list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
+
+ return 0;
+}
+
+void
+lstcon_node_put(lstcon_node_t *nd)
+{
+ lstcon_ndlink_t *ndl;
+
+ LASSERT (nd->nd_ref > 0);
+
+ if (--nd->nd_ref > 0)
+ return;
+
+ ndl = (lstcon_ndlink_t *)(nd + 1);
+
+ LASSERT (!list_empty(&ndl->ndl_link));
+ LASSERT (!list_empty(&ndl->ndl_hlink));
+
+ /* remove from session */
+ list_del(&ndl->ndl_link);
+ list_del(&ndl->ndl_hlink);
+
+ LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
+}
+
+static int
+lstcon_ndlink_find(struct list_head *hash,
+ lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create)
+{
+ unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
+ lstcon_ndlink_t *ndl;
+ lstcon_node_t *nd;
+ int rc;
+
+ if (id.nid == LNET_NID_ANY)
+ return -EINVAL;
+
+ /* search in hash */
+ list_for_each_entry(ndl, &hash[idx], ndl_hlink) {
+ if (ndl->ndl_node->nd_id.nid != id.nid ||
+ ndl->ndl_node->nd_id.pid != id.pid)
+ continue;
+
+ *ndlpp = ndl;
+ return 0;
+ }
+
+ if (create == 0)
+ return -ENOENT;
+
+ /* find or create in session hash */
+ rc = lstcon_node_find(id, &nd, (create == 1) ? 1 : 0);
+ if (rc != 0)
+ return rc;
+
+ LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t));
+ if (ndl == NULL) {
+ lstcon_node_put(nd);
+ return -ENOMEM;
+ }
+
+ *ndlpp = ndl;
+
+ ndl->ndl_node = nd;
+ INIT_LIST_HEAD(&ndl->ndl_link);
+ list_add_tail(&ndl->ndl_hlink, &hash[idx]);
+
+ return 0;
+}
+
+static void
+lstcon_ndlink_release(lstcon_ndlink_t *ndl)
+{
+ LASSERT (list_empty(&ndl->ndl_link));
+ LASSERT (!list_empty(&ndl->ndl_hlink));
+
+ list_del(&ndl->ndl_hlink); /* delete from hash */
+ lstcon_node_put(ndl->ndl_node);
+
+ LIBCFS_FREE(ndl, sizeof(*ndl));
+}
+
+static int
+lstcon_group_alloc(char *name, lstcon_group_t **grpp)
+{
+ lstcon_group_t *grp;
+ int i;
+
+ LIBCFS_ALLOC(grp, offsetof(lstcon_group_t,
+ grp_ndl_hash[LST_NODE_HASHSIZE]));
+ if (grp == NULL)
+ return -ENOMEM;
+
+ memset(grp, 0, offsetof(lstcon_group_t,
+ grp_ndl_hash[LST_NODE_HASHSIZE]));
+
+ grp->grp_ref = 1;
+ if (name != NULL)
+ strcpy(grp->grp_name, name);
+
+ INIT_LIST_HEAD(&grp->grp_link);
+ INIT_LIST_HEAD(&grp->grp_ndl_list);
+ INIT_LIST_HEAD(&grp->grp_trans_list);
+
+ for (i = 0; i < LST_NODE_HASHSIZE; i++)
+ INIT_LIST_HEAD(&grp->grp_ndl_hash[i]);
+
+ *grpp = grp;
+
+ return 0;
+}
+
+static void
+lstcon_group_addref(lstcon_group_t *grp)
+{
+ grp->grp_ref ++;
+}
+
+static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *);
+
+static void
+lstcon_group_drain(lstcon_group_t *grp, int keep)
+{
+ lstcon_ndlink_t *ndl;
+ lstcon_ndlink_t *tmp;
+
+ list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
+ if ((ndl->ndl_node->nd_state & keep) == 0)
+ lstcon_group_ndlink_release(grp, ndl);
+ }
+}
+
+static void
+lstcon_group_decref(lstcon_group_t *grp)
+{
+ int i;
+
+ if (--grp->grp_ref > 0)
+ return;
+
+ if (!list_empty(&grp->grp_link))
+ list_del(&grp->grp_link);
+
+ lstcon_group_drain(grp, 0);
+
+ for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+ LASSERT (list_empty(&grp->grp_ndl_hash[i]));
+ }
+
+ LIBCFS_FREE(grp, offsetof(lstcon_group_t,
+ grp_ndl_hash[LST_NODE_HASHSIZE]));
+}
+
+static int
+lstcon_group_find(char *name, lstcon_group_t **grpp)
+{
+ lstcon_group_t *grp;
+
+ list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
+ if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0)
+ continue;
+
+ lstcon_group_addref(grp); /* +1 ref for caller */
+ *grpp = grp;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static void
+lstcon_group_put(lstcon_group_t *grp)
+{
+ lstcon_group_decref(grp);
+}
+
+static int
+lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
+ lstcon_ndlink_t **ndlpp, int create)
+{
+ int rc;
+
+ rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create);
+ if (rc != 0)
+ return rc;
+
+ if (!list_empty(&(*ndlpp)->ndl_link))
+ return 0;
+
+ list_add_tail(&(*ndlpp)->ndl_link, &grp->grp_ndl_list);
+ grp->grp_nnode ++;
+
+ return 0;
+}
+
+static void
+lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
+{
+ list_del_init(&ndl->ndl_link);
+ lstcon_ndlink_release(ndl);
+ grp->grp_nnode --;
+}
+
+static void
+lstcon_group_ndlink_move(lstcon_group_t *old,
+ lstcon_group_t *new, lstcon_ndlink_t *ndl)
+{
+ unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
+ LST_NODE_HASHSIZE;
+
+ list_del(&ndl->ndl_hlink);
+ list_del(&ndl->ndl_link);
+ old->grp_nnode --;
+
+ list_add_tail(&ndl->ndl_hlink, &new->grp_ndl_hash[idx]);
+ list_add_tail(&ndl->ndl_link, &new->grp_ndl_list);
+ new->grp_nnode ++;
+
+ return;
+}
+
+static void
+lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
+{
+ lstcon_ndlink_t *ndl;
+
+ while (!list_empty(&old->grp_ndl_list)) {
+ ndl = list_entry(old->grp_ndl_list.next,
+ lstcon_ndlink_t, ndl_link);
+ lstcon_group_ndlink_move(old, new, ndl);
+ }
+}
+
+int
+lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+{
+ lstcon_group_t *grp = (lstcon_group_t *)arg;
+
+ switch (transop) {
+ case LST_TRANS_SESNEW:
+ if (nd->nd_state == LST_NODE_ACTIVE)
+ return 0;
+ break;
+
+ case LST_TRANS_SESEND:
+ if (nd->nd_state != LST_NODE_ACTIVE)
+ return 0;
+
+ if (grp != NULL && nd->nd_ref > 1)
+ return 0;
+ break;
+
+ case LST_TRANS_SESQRY:
+ break;
+
+ default:
+ LBUG();
+ }
+
+ return 1;
+}
+
+int
+lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
+ lstcon_rpc_ent_t *ent_up)
+{
+ srpc_debug_reply_t *rep;
+
+ switch (transop) {
+ case LST_TRANS_SESNEW:
+ case LST_TRANS_SESEND:
+ return 0;
+
+ case LST_TRANS_SESQRY:
+ rep = &msg->msg_body.dbg_reply;
+
+ if (copy_to_user(&ent_up->rpe_priv[0],
+ &rep->dbg_timeout, sizeof(int)) ||
+ copy_to_user(&ent_up->rpe_payload[0],
+ &rep->dbg_name, LST_NAME_SIZE))
+ return -EFAULT;
+
+ return 0;
+
+ default:
+ LBUG();
+ }
+
+ return 0;
+}
+
+static int
+lstcon_group_nodes_add(lstcon_group_t *grp,
+ int count, lnet_process_id_t *ids_up,
+ unsigned *featp, struct list_head *result_up)
+{
+ lstcon_rpc_trans_t *trans;
+ lstcon_ndlink_t *ndl;
+ lstcon_group_t *tmp;
+ lnet_process_id_t id;
+ int i;
+ int rc;
+
+ rc = lstcon_group_alloc(NULL, &tmp);
+ if (rc != 0) {
+ CERROR("Out of memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0 ; i < count; i++) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ /* skip if it's in this group already */
+ rc = lstcon_group_ndlink_find(grp, id, &ndl, 0);
+ if (rc == 0)
+ continue;
+
+ /* add to tmp group */
+ rc = lstcon_group_ndlink_find(tmp, id, &ndl, 1);
+ if (rc != 0) {
+ CERROR("Can't create ndlink, out of memory\n");
+ break;
+ }
+ }
+
+ if (rc != 0) {
+ lstcon_group_put(tmp);
+ return rc;
+ }
+
+ rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
+ &tmp->grp_trans_list, LST_TRANS_SESNEW,
+ tmp, lstcon_sesrpc_condition, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction: %d\n", rc);
+ lstcon_group_put(tmp);
+ return rc;
+ }
+
+ /* post all RPCs */
+ lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
+
+ rc = lstcon_rpc_trans_interpreter(trans, result_up,
+ lstcon_sesrpc_readent);
+ *featp = trans->tas_features;
+
+ /* destroy all RPGs */
+ lstcon_rpc_trans_destroy(trans);
+
+ lstcon_group_move(tmp, grp);
+ lstcon_group_put(tmp);
+
+ return rc;
+}
+
+static int
+lstcon_group_nodes_remove(lstcon_group_t *grp,
+ int count, lnet_process_id_t *ids_up,
+ struct list_head *result_up)
+{
+ lstcon_rpc_trans_t *trans;
+ lstcon_ndlink_t *ndl;
+ lstcon_group_t *tmp;
+ lnet_process_id_t id;
+ int rc;
+ int i;
+
+ /* End session and remove node from the group */
+
+ rc = lstcon_group_alloc(NULL, &tmp);
+ if (rc != 0) {
+ CERROR("Out of memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ rc = -EFAULT;
+ goto error;
+ }
+
+ /* move node to tmp group */
+ if (lstcon_group_ndlink_find(grp, id, &ndl, 0) == 0)
+ lstcon_group_ndlink_move(grp, tmp, ndl);
+ }
+
+ rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
+ &tmp->grp_trans_list, LST_TRANS_SESEND,
+ tmp, lstcon_sesrpc_condition, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction: %d\n", rc);
+ goto error;
+ }
+
+ lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
+
+ rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL);
+
+ lstcon_rpc_trans_destroy(trans);
+ /* release nodes anyway, because we can't rollback status */
+ lstcon_group_put(tmp);
+
+ return rc;
+error:
+ lstcon_group_move(tmp, grp);
+ lstcon_group_put(tmp);
+
+ return rc;
+}
+
+int
+lstcon_group_add(char *name)
+{
+ lstcon_group_t *grp;
+ int rc;
+
+ rc = (lstcon_group_find(name, &grp) == 0)? -EEXIST: 0;
+ if (rc != 0) {
+ /* find a group with same name */
+ lstcon_group_put(grp);
+ return rc;
+ }
+
+ rc = lstcon_group_alloc(name, &grp);
+ if (rc != 0) {
+ CERROR("Can't allocate descriptor for group %s\n", name);
+ return -ENOMEM;
+ }
+
+ list_add_tail(&grp->grp_link, &console_session.ses_grp_list);
+
+ return rc;
+}
+
+int
+lstcon_nodes_add(char *name, int count, lnet_process_id_t *ids_up,
+ unsigned *featp, struct list_head *result_up)
+{
+ lstcon_group_t *grp;
+ int rc;
+
+ LASSERT (count > 0);
+ LASSERT (ids_up != NULL);
+
+ rc = lstcon_group_find(name, &grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find group %s\n", name);
+ return rc;
+ }
+
+ if (grp->grp_ref > 2) {
+ /* referred by other threads or test */
+ CDEBUG(D_NET, "Group %s is busy\n", name);
+ lstcon_group_put(grp);
+
+ return -EBUSY;
+ }
+
+ rc = lstcon_group_nodes_add(grp, count, ids_up, featp, result_up);
+
+ lstcon_group_put(grp);
+
+ return rc;
+}
+
+int
+lstcon_group_del(char *name)
+{
+ lstcon_rpc_trans_t *trans;
+ lstcon_group_t *grp;
+ int rc;
+
+ rc = lstcon_group_find(name, &grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find group: %s\n", name);
+ return rc;
+ }
+
+ if (grp->grp_ref > 2) {
+ /* referred by others threads or test */
+ CDEBUG(D_NET, "Group %s is busy\n", name);
+ lstcon_group_put(grp);
+ return -EBUSY;
+ }
+
+ rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
+ &grp->grp_trans_list, LST_TRANS_SESEND,
+ grp, lstcon_sesrpc_condition, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction: %d\n", rc);
+ lstcon_group_put(grp);
+ return rc;
+ }
+
+ lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
+
+ lstcon_rpc_trans_destroy(trans);
+
+ lstcon_group_put(grp);
+ /* -ref for session, it's destroyed,
+ * status can't be rolled back, destroy group anway */
+ lstcon_group_put(grp);
+
+ return rc;
+}
+
+int
+lstcon_group_clean(char *name, int args)
+{
+ lstcon_group_t *grp = NULL;
+ int rc;
+
+ rc = lstcon_group_find(name, &grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find group %s\n", name);
+ return rc;
+ }
+
+ if (grp->grp_ref > 2) {
+ /* referred by test */
+ CDEBUG(D_NET, "Group %s is busy\n", name);
+ lstcon_group_put(grp);
+ return -EBUSY;
+ }
+
+ args = (LST_NODE_ACTIVE | LST_NODE_BUSY |
+ LST_NODE_DOWN | LST_NODE_UNKNOWN) & ~args;
+
+ lstcon_group_drain(grp, args);
+
+ lstcon_group_put(grp);
+ /* release empty group */
+ if (list_empty(&grp->grp_ndl_list))
+ lstcon_group_put(grp);
+
+ return 0;
+}
+
+int
+lstcon_nodes_remove(char *name, int count,
+ lnet_process_id_t *ids_up, struct list_head *result_up)
+{
+ lstcon_group_t *grp = NULL;
+ int rc;
+
+ rc = lstcon_group_find(name, &grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find group: %s\n", name);
+ return rc;
+ }
+
+ if (grp->grp_ref > 2) {
+ /* referred by test */
+ CDEBUG(D_NET, "Group %s is busy\n", name);
+ lstcon_group_put(grp);
+ return -EBUSY;
+ }
+
+ rc = lstcon_group_nodes_remove(grp, count, ids_up, result_up);
+
+ lstcon_group_put(grp);
+ /* release empty group */
+ if (list_empty(&grp->grp_ndl_list))
+ lstcon_group_put(grp);
+
+ return rc;
+}
+
+int
+lstcon_group_refresh(char *name, struct list_head *result_up)
+{
+ lstcon_rpc_trans_t *trans;
+ lstcon_group_t *grp;
+ int rc;
+
+ rc = lstcon_group_find(name, &grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find group: %s\n", name);
+ return rc;
+ }
+
+ if (grp->grp_ref > 2) {
+ /* referred by test */
+ CDEBUG(D_NET, "Group %s is busy\n", name);
+ lstcon_group_put(grp);
+ return -EBUSY;
+ }
+
+ /* re-invite all inactive nodes int the group */
+ rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
+ &grp->grp_trans_list, LST_TRANS_SESNEW,
+ grp, lstcon_sesrpc_condition, &trans);
+ if (rc != 0) {
+ /* local error, return */
+ CDEBUG(D_NET, "Can't create transaction: %d\n", rc);
+ lstcon_group_put(grp);
+ return rc;
+ }
+
+ lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
+
+ rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL);
+
+ lstcon_rpc_trans_destroy(trans);
+ /* -ref for me */
+ lstcon_group_put(grp);
+
+ return rc;
+}
+
+int
+lstcon_group_list(int index, int len, char *name_up)
+{
+ lstcon_group_t *grp;
+
+ LASSERT (index >= 0);
+ LASSERT (name_up != NULL);
+
+ list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
+ if (index-- == 0) {
+ return copy_to_user(name_up, grp->grp_name, len) ?
+ -EFAULT : 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int
+lstcon_nodes_getent(struct list_head *head, int *index_p,
+ int *count_p, lstcon_node_ent_t *dents_up)
+{
+ lstcon_ndlink_t *ndl;
+ lstcon_node_t *nd;
+ int count = 0;
+ int index = 0;
+
+ LASSERT (index_p != NULL && count_p != NULL);
+ LASSERT (dents_up != NULL);
+ LASSERT (*index_p >= 0);
+ LASSERT (*count_p > 0);
+
+ list_for_each_entry(ndl, head, ndl_link) {
+ if (index++ < *index_p)
+ continue;
+
+ if (count >= *count_p)
+ break;
+
+ nd = ndl->ndl_node;
+ if (copy_to_user(&dents_up[count].nde_id,
+ &nd->nd_id, sizeof(nd->nd_id)) ||
+ copy_to_user(&dents_up[count].nde_state,
+ &nd->nd_state, sizeof(nd->nd_state)))
+ return -EFAULT;
+
+ count ++;
+ }
+
+ if (index <= *index_p)
+ return -ENOENT;
+
+ *count_p = count;
+ *index_p = index;
+
+ return 0;
+}
+
+int
+lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
+ int *index_p, int *count_p, lstcon_node_ent_t *dents_up)
+{
+ lstcon_ndlist_ent_t *gentp;
+ lstcon_group_t *grp;
+ lstcon_ndlink_t *ndl;
+ int rc;
+
+ rc = lstcon_group_find(name, &grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find group %s\n", name);
+ return rc;
+ }
+
+ if (dents_up != 0) {
+ /* verbose query */
+ rc = lstcon_nodes_getent(&grp->grp_ndl_list,
+ index_p, count_p, dents_up);
+ lstcon_group_put(grp);
+
+ return rc;
+ }
+
+ /* non-verbose query */
+ LIBCFS_ALLOC(gentp, sizeof(lstcon_ndlist_ent_t));
+ if (gentp == NULL) {
+ CERROR("Can't allocate ndlist_ent\n");
+ lstcon_group_put(grp);
+
+ return -ENOMEM;
+ }
+
+ memset(gentp, 0, sizeof(lstcon_ndlist_ent_t));
+
+ list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link)
+ LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
+
+ rc = copy_to_user(gents_p, gentp,
+ sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0;
+
+ LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
+
+ lstcon_group_put(grp);
+
+ return 0;
+}
+
+int
+lstcon_batch_find(char *name, lstcon_batch_t **batpp)
+{
+ lstcon_batch_t *bat;
+
+ list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
+ if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) {
+ *batpp = bat;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+lstcon_batch_add(char *name)
+{
+ lstcon_batch_t *bat;
+ int i;
+ int rc;
+
+ rc = (lstcon_batch_find(name, &bat) == 0)? -EEXIST: 0;
+ if (rc != 0) {
+ CDEBUG(D_NET, "Batch %s already exists\n", name);
+ return rc;
+ }
+
+ LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t));
+ if (bat == NULL) {
+ CERROR("Can't allocate descriptor for batch %s\n", name);
+ return -ENOMEM;
+ }
+
+ LIBCFS_ALLOC(bat->bat_cli_hash,
+ sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ if (bat->bat_cli_hash == NULL) {
+ CERROR("Can't allocate hash for batch %s\n", name);
+ LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+
+ return -ENOMEM;
+ }
+
+ LIBCFS_ALLOC(bat->bat_srv_hash,
+ sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ if (bat->bat_srv_hash == NULL) {
+ CERROR("Can't allocate hash for batch %s\n", name);
+ LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
+ LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+
+ return -ENOMEM;
+ }
+
+ strcpy(bat->bat_name, name);
+ bat->bat_hdr.tsb_index = 0;
+ bat->bat_hdr.tsb_id.bat_id = ++console_session.ses_id_cookie;
+
+ bat->bat_ntest = 0;
+ bat->bat_state = LST_BATCH_IDLE;
+
+ INIT_LIST_HEAD(&bat->bat_cli_list);
+ INIT_LIST_HEAD(&bat->bat_srv_list);
+ INIT_LIST_HEAD(&bat->bat_test_list);
+ INIT_LIST_HEAD(&bat->bat_trans_list);
+
+ for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+ INIT_LIST_HEAD(&bat->bat_cli_hash[i]);
+ INIT_LIST_HEAD(&bat->bat_srv_hash[i]);
+ }
+
+ list_add_tail(&bat->bat_link, &console_session.ses_bat_list);
+
+ return rc;
+}
+
+int
+lstcon_batch_list(int index, int len, char *name_up)
+{
+ lstcon_batch_t *bat;
+
+ LASSERT (name_up != NULL);
+ LASSERT (index >= 0);
+
+ list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
+ if (index-- == 0) {
+ return copy_to_user(name_up,bat->bat_name, len) ?
+ -EFAULT: 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
+ int testidx, int *index_p, int *ndent_p,
+ lstcon_node_ent_t *dents_up)
+{
+ lstcon_test_batch_ent_t *entp;
+ struct list_head *clilst;
+ struct list_head *srvlst;
+ lstcon_test_t *test = NULL;
+ lstcon_batch_t *bat;
+ lstcon_ndlink_t *ndl;
+ int rc;
+
+ rc = lstcon_batch_find(name, &bat);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find batch %s\n", name);
+ return -ENOENT;
+ }
+
+ if (testidx > 0) {
+ /* query test, test index start from 1 */
+ list_for_each_entry(test, &bat->bat_test_list, tes_link) {
+ if (testidx-- == 1)
+ break;
+ }
+
+ if (testidx > 0) {
+ CDEBUG(D_NET, "Can't find specified test in batch\n");
+ return -ENOENT;
+ }
+ }
+
+ clilst = (test == NULL) ? &bat->bat_cli_list :
+ &test->tes_src_grp->grp_ndl_list;
+ srvlst = (test == NULL) ? &bat->bat_srv_list :
+ &test->tes_dst_grp->grp_ndl_list;
+
+ if (dents_up != NULL) {
+ rc = lstcon_nodes_getent((server ? srvlst: clilst),
+ index_p, ndent_p, dents_up);
+ return rc;
+ }
+
+ /* non-verbose query */
+ LIBCFS_ALLOC(entp, sizeof(lstcon_test_batch_ent_t));
+ if (entp == NULL)
+ return -ENOMEM;
+
+ memset(entp, 0, sizeof(lstcon_test_batch_ent_t));
+
+ if (test == NULL) {
+ entp->u.tbe_batch.bae_ntest = bat->bat_ntest;
+ entp->u.tbe_batch.bae_state = bat->bat_state;
+
+ } else {
+
+ entp->u.tbe_test.tse_type = test->tes_type;
+ entp->u.tbe_test.tse_loop = test->tes_loop;
+ entp->u.tbe_test.tse_concur = test->tes_concur;
+ }
+
+ list_for_each_entry(ndl, clilst, ndl_link)
+ LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_cli_nle);
+
+ list_for_each_entry(ndl, srvlst, ndl_link)
+ LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
+
+ rc = copy_to_user(ent_up, entp,
+ sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
+
+ LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t));
+
+ return rc;
+}
+
+int
+lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+{
+ switch (transop) {
+ case LST_TRANS_TSBRUN:
+ if (nd->nd_state != LST_NODE_ACTIVE)
+ return -ENETDOWN;
+ break;
+
+ case LST_TRANS_TSBSTOP:
+ if (nd->nd_state != LST_NODE_ACTIVE)
+ return 0;
+ break;
+
+ case LST_TRANS_TSBCLIQRY:
+ case LST_TRANS_TSBSRVQRY:
+ break;
+ }
+
+ return 1;
+}
+
+static int
+lstcon_batch_op(lstcon_batch_t *bat, int transop,
+ struct list_head *result_up)
+{
+ lstcon_rpc_trans_t *trans;
+ int rc;
+
+ rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
+ &bat->bat_trans_list, transop,
+ bat, lstcon_batrpc_condition, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction: %d\n", rc);
+ return rc;
+ }
+
+ lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
+
+ rc = lstcon_rpc_trans_interpreter(trans, result_up, NULL);
+
+ lstcon_rpc_trans_destroy(trans);
+
+ return rc;
+}
+
+int
+lstcon_batch_run(char *name, int timeout, struct list_head *result_up)
+{
+ lstcon_batch_t *bat;
+ int rc;
+
+ if (lstcon_batch_find(name, &bat) != 0) {
+ CDEBUG(D_NET, "Can't find batch %s\n", name);
+ return -ENOENT;
+ }
+
+ bat->bat_arg = timeout;
+
+ rc = lstcon_batch_op(bat, LST_TRANS_TSBRUN, result_up);
+
+ /* mark batch as running if it's started in any node */
+ if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0) != 0)
+ bat->bat_state = LST_BATCH_RUNNING;
+
+ return rc;
+}
+
+int
+lstcon_batch_stop(char *name, int force, struct list_head *result_up)
+{
+ lstcon_batch_t *bat;
+ int rc;
+
+ if (lstcon_batch_find(name, &bat) != 0) {
+ CDEBUG(D_NET, "Can't find batch %s\n", name);
+ return -ENOENT;
+ }
+
+ bat->bat_arg = force;
+
+ rc = lstcon_batch_op(bat, LST_TRANS_TSBSTOP, result_up);
+
+ /* mark batch as stopped if all RPCs finished */
+ if (lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0) == 0)
+ bat->bat_state = LST_BATCH_IDLE;
+
+ return rc;
+}
+
+static void
+lstcon_batch_destroy(lstcon_batch_t *bat)
+{
+ lstcon_ndlink_t *ndl;
+ lstcon_test_t *test;
+ int i;
+
+ list_del(&bat->bat_link);
+
+ while (!list_empty(&bat->bat_test_list)) {
+ test = list_entry(bat->bat_test_list.next,
+ lstcon_test_t, tes_link);
+ LASSERT (list_empty(&test->tes_trans_list));
+
+ list_del(&test->tes_link);
+
+ lstcon_group_put(test->tes_src_grp);
+ lstcon_group_put(test->tes_dst_grp);
+
+ LIBCFS_FREE(test, offsetof(lstcon_test_t,
+ tes_param[test->tes_paramlen]));
+ }
+
+ LASSERT (list_empty(&bat->bat_trans_list));
+
+ while (!list_empty(&bat->bat_cli_list)) {
+ ndl = list_entry(bat->bat_cli_list.next,
+ lstcon_ndlink_t, ndl_link);
+ list_del_init(&ndl->ndl_link);
+
+ lstcon_ndlink_release(ndl);
+ }
+
+ while (!list_empty(&bat->bat_srv_list)) {
+ ndl = list_entry(bat->bat_srv_list.next,
+ lstcon_ndlink_t, ndl_link);
+ list_del_init(&ndl->ndl_link);
+
+ lstcon_ndlink_release(ndl);
+ }
+
+ for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+ LASSERT (list_empty(&bat->bat_cli_hash[i]));
+ LASSERT (list_empty(&bat->bat_srv_hash[i]));
+ }
+
+ LIBCFS_FREE(bat->bat_cli_hash,
+ sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ LIBCFS_FREE(bat->bat_srv_hash,
+ sizeof(struct list_head) * LST_NODE_HASHSIZE);
+ LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+}
+
+int
+lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+{
+ lstcon_test_t *test;
+ lstcon_batch_t *batch;
+ lstcon_ndlink_t *ndl;
+ struct list_head *hash;
+ struct list_head *head;
+
+ test = (lstcon_test_t *)arg;
+ LASSERT (test != NULL);
+
+ batch = test->tes_batch;
+ LASSERT (batch != NULL);
+
+ if (test->tes_oneside &&
+ transop == LST_TRANS_TSBSRVADD)
+ return 0;
+
+ if (nd->nd_state != LST_NODE_ACTIVE)
+ return -ENETDOWN;
+
+ if (transop == LST_TRANS_TSBCLIADD) {
+ hash = batch->bat_cli_hash;
+ head = &batch->bat_cli_list;
+
+ } else {
+ LASSERT (transop == LST_TRANS_TSBSRVADD);
+
+ hash = batch->bat_srv_hash;
+ head = &batch->bat_srv_list;
+ }
+
+ LASSERT (nd->nd_id.nid != LNET_NID_ANY);
+
+ if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0)
+ return -ENOMEM;
+
+ if (list_empty(&ndl->ndl_link))
+ list_add_tail(&ndl->ndl_link, head);
+
+ return 1;
+}
+
+static int
+lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up)
+{
+ lstcon_rpc_trans_t *trans;
+ lstcon_group_t *grp;
+ int transop;
+ int rc;
+
+ LASSERT (test->tes_src_grp != NULL);
+ LASSERT (test->tes_dst_grp != NULL);
+
+ transop = LST_TRANS_TSBSRVADD;
+ grp = test->tes_dst_grp;
+again:
+ rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
+ &test->tes_trans_list, transop,
+ test, lstcon_testrpc_condition, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction: %d\n", rc);
+ return rc;
+ }
+
+ lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
+
+ if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
+ lstcon_trans_stat()->trs_fwk_errno != 0) {
+ lstcon_rpc_trans_interpreter(trans, result_up, NULL);
+
+ lstcon_rpc_trans_destroy(trans);
+ /* return if any error */
+ CDEBUG(D_NET, "Failed to add test %s, "
+ "RPC error %d, framework error %d\n",
+ transop == LST_TRANS_TSBCLIADD ? "client" : "server",
+ lstcon_trans_stat()->trs_rpc_errno,
+ lstcon_trans_stat()->trs_fwk_errno);
+
+ return rc;
+ }
+
+ lstcon_rpc_trans_destroy(trans);
+
+ if (transop == LST_TRANS_TSBCLIADD)
+ return rc;
+
+ transop = LST_TRANS_TSBCLIADD;
+ grp = test->tes_src_grp;
+ test->tes_cliidx = 0;
+
+ /* requests to test clients */
+ goto again;
+}
+
+int
+lstcon_test_add(char *name, int type, int loop, int concur,
+ int dist, int span, char *src_name, char * dst_name,
+ void *param, int paramlen, int *retp,
+ struct list_head *result_up)
+{
+ lstcon_group_t *src_grp = NULL;
+ lstcon_group_t *dst_grp = NULL;
+ lstcon_test_t *test = NULL;
+ lstcon_batch_t *batch;
+ int rc;
+
+ rc = lstcon_batch_find(name, &batch);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find batch %s\n", name);
+ return rc;
+ }
+
+ if (batch->bat_state != LST_BATCH_IDLE) {
+ CDEBUG(D_NET, "Can't change running batch %s\n", name);
+ return rc;
+ }
+
+ rc = lstcon_group_find(src_name, &src_grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find group %s\n", src_name);
+ goto out;
+ }
+
+ rc = lstcon_group_find(dst_name, &dst_grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find group %s\n", dst_name);
+ goto out;
+ }
+
+ if (dst_grp->grp_userland)
+ *retp = 1;
+
+ LIBCFS_ALLOC(test, offsetof(lstcon_test_t, tes_param[paramlen]));
+ if (!test) {
+ CERROR("Can't allocate test descriptor\n");
+ rc = -ENOMEM;
+
+ goto out;
+ }
+
+ memset(test, 0, offsetof(lstcon_test_t, tes_param[paramlen]));
+ test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id;
+ test->tes_batch = batch;
+ test->tes_type = type;
+ test->tes_oneside = 0; /* TODO */
+ test->tes_loop = loop;
+ test->tes_concur = concur;
+ test->tes_stop_onerr = 1; /* TODO */
+ test->tes_span = span;
+ test->tes_dist = dist;
+ test->tes_cliidx = 0; /* just used for creating RPC */
+ test->tes_src_grp = src_grp;
+ test->tes_dst_grp = dst_grp;
+ INIT_LIST_HEAD(&test->tes_trans_list);
+
+ if (param != NULL) {
+ test->tes_paramlen = paramlen;
+ memcpy(&test->tes_param[0], param, paramlen);
+ }
+
+ rc = lstcon_test_nodes_add(test, result_up);
+
+ if (rc != 0)
+ goto out;
+
+ if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
+ lstcon_trans_stat()->trs_fwk_errno != 0)
+ CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type, name);
+
+ /* add to test list anyway, so user can check what's going on */
+ list_add_tail(&test->tes_link, &batch->bat_test_list);
+
+ batch->bat_ntest ++;
+ test->tes_hdr.tsb_index = batch->bat_ntest;
+
+ /* hold groups so nobody can change them */
+ return rc;
+out:
+ if (test != NULL)
+ LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen]));
+
+ if (dst_grp != NULL)
+ lstcon_group_put(dst_grp);
+
+ if (src_grp != NULL)
+ lstcon_group_put(src_grp);
+
+ return rc;
+}
+
+int
+lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
+{
+ lstcon_test_t *test;
+
+ list_for_each_entry(test, &batch->bat_test_list, tes_link) {
+ if (idx == test->tes_hdr.tsb_index) {
+ *testpp = test;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int
+lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
+ lstcon_rpc_ent_t *ent_up)
+{
+ srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
+
+ LASSERT (transop == LST_TRANS_TSBCLIQRY ||
+ transop == LST_TRANS_TSBSRVQRY);
+
+ /* positive errno, framework error code */
+ if (copy_to_user(&ent_up->rpe_priv[0],
+ &rep->bar_active, sizeof(rep->bar_active)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int
+lstcon_test_batch_query(char *name, int testidx, int client,
+ int timeout, struct list_head *result_up)
+{
+ lstcon_rpc_trans_t *trans;
+ struct list_head *translist;
+ struct list_head *ndlist;
+ lstcon_tsb_hdr_t *hdr;
+ lstcon_batch_t *batch;
+ lstcon_test_t *test = NULL;
+ int transop;
+ int rc;
+
+ rc = lstcon_batch_find(name, &batch);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find batch: %s\n", name);
+ return rc;
+ }
+
+ if (testidx == 0) {
+ translist = &batch->bat_trans_list;
+ ndlist = &batch->bat_cli_list;
+ hdr = &batch->bat_hdr;
+
+ } else {
+ /* query specified test only */
+ rc = lstcon_test_find(batch, testidx, &test);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find test: %d\n", testidx);
+ return rc;
+ }
+
+ translist = &test->tes_trans_list;
+ ndlist = &test->tes_src_grp->grp_ndl_list;
+ hdr = &test->tes_hdr;
+ }
+
+ transop = client ? LST_TRANS_TSBCLIQRY : LST_TRANS_TSBSRVQRY;
+
+ rc = lstcon_rpc_trans_ndlist(ndlist, translist, transop, hdr,
+ lstcon_batrpc_condition, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction: %d\n", rc);
+ return rc;
+ }
+
+ lstcon_rpc_trans_postwait(trans, timeout);
+
+ if (testidx == 0 && /* query a batch, not a test */
+ lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) == 0 &&
+ lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0) == 0) {
+ /* all RPCs finished, and no active test */
+ batch->bat_state = LST_BATCH_IDLE;
+ }
+
+ rc = lstcon_rpc_trans_interpreter(trans, result_up,
+ lstcon_tsbrpc_readent);
+ lstcon_rpc_trans_destroy(trans);
+
+ return rc;
+}
+
+int
+lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
+ lstcon_rpc_ent_t *ent_up)
+{
+ srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
+ sfw_counters_t *sfwk_stat;
+ srpc_counters_t *srpc_stat;
+ lnet_counters_t *lnet_stat;
+
+ if (rep->str_status != 0)
+ return 0;
+
+ sfwk_stat = (sfw_counters_t *)&ent_up->rpe_payload[0];
+ srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat));
+ lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat));
+
+ if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
+ copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
+ copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int
+lstcon_ndlist_stat(struct list_head *ndlist,
+ int timeout, struct list_head *result_up)
+{
+ struct list_head head;
+ lstcon_rpc_trans_t *trans;
+ int rc;
+
+ INIT_LIST_HEAD(&head);
+
+ rc = lstcon_rpc_trans_ndlist(ndlist, &head,
+ LST_TRANS_STATQRY, NULL, NULL, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction: %d\n", rc);
+ return rc;
+ }
+
+ lstcon_rpc_trans_postwait(trans, LST_VALIDATE_TIMEOUT(timeout));
+
+ rc = lstcon_rpc_trans_interpreter(trans, result_up,
+ lstcon_statrpc_readent);
+ lstcon_rpc_trans_destroy(trans);
+
+ return rc;
+}
+
+int
+lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up)
+{
+ lstcon_group_t *grp;
+ int rc;
+
+ rc = lstcon_group_find(grp_name, &grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Can't find group %s\n", grp_name);
+ return rc;
+ }
+
+ rc = lstcon_ndlist_stat(&grp->grp_ndl_list, timeout, result_up);
+
+ lstcon_group_put(grp);
+
+ return rc;
+}
+
+int
+lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
+ int timeout, struct list_head *result_up)
+{
+ lstcon_ndlink_t *ndl;
+ lstcon_group_t *tmp;
+ lnet_process_id_t id;
+ int i;
+ int rc;
+
+ rc = lstcon_group_alloc(NULL, &tmp);
+ if (rc != 0) {
+ CERROR("Out of memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0 ; i < count; i++) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ /* add to tmp group */
+ rc = lstcon_group_ndlink_find(tmp, id, &ndl, 2);
+ if (rc != 0) {
+ CDEBUG((rc == -ENOMEM) ? D_ERROR : D_NET,
+ "Failed to find or create %s: %d\n",
+ libcfs_id2str(id), rc);
+ break;
+ }
+ }
+
+ if (rc != 0) {
+ lstcon_group_put(tmp);
+ return rc;
+ }
+
+ rc = lstcon_ndlist_stat(&tmp->grp_ndl_list, timeout, result_up);
+
+ lstcon_group_put(tmp);
+
+ return rc;
+}
+
+int
+lstcon_debug_ndlist(struct list_head *ndlist,
+ struct list_head *translist,
+ int timeout, struct list_head *result_up)
+{
+ lstcon_rpc_trans_t *trans;
+ int rc;
+
+ rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
+ NULL, lstcon_sesrpc_condition, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction: %d\n", rc);
+ return rc;
+ }
+
+ lstcon_rpc_trans_postwait(trans, LST_VALIDATE_TIMEOUT(timeout));
+
+ rc = lstcon_rpc_trans_interpreter(trans, result_up,
+ lstcon_sesrpc_readent);
+ lstcon_rpc_trans_destroy(trans);
+
+ return rc;
+}
+
+int
+lstcon_session_debug(int timeout, struct list_head *result_up)
+{
+ return lstcon_debug_ndlist(&console_session.ses_ndl_list,
+ NULL, timeout, result_up);
+}
+
+int
+lstcon_batch_debug(int timeout, char *name,
+ int client, struct list_head *result_up)
+{
+ lstcon_batch_t *bat;
+ int rc;
+
+ rc = lstcon_batch_find(name, &bat);
+ if (rc != 0)
+ return -ENOENT;
+
+ rc = lstcon_debug_ndlist(client ? &bat->bat_cli_list :
+ &bat->bat_srv_list,
+ NULL, timeout, result_up);
+
+ return rc;
+}
+
+int
+lstcon_group_debug(int timeout, char *name,
+ struct list_head *result_up)
+{
+ lstcon_group_t *grp;
+ int rc;
+
+ rc = lstcon_group_find(name, &grp);
+ if (rc != 0)
+ return -ENOENT;
+
+ rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL,
+ timeout, result_up);
+ lstcon_group_put(grp);
+
+ return rc;
+}
+
+int
+lstcon_nodes_debug(int timeout,
+ int count, lnet_process_id_t *ids_up,
+ struct list_head *result_up)
+{
+ lnet_process_id_t id;
+ lstcon_ndlink_t *ndl;
+ lstcon_group_t *grp;
+ int i;
+ int rc;
+
+ rc = lstcon_group_alloc(NULL, &grp);
+ if (rc != 0) {
+ CDEBUG(D_NET, "Out of memory\n");
+ return rc;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
+ rc = -EFAULT;
+ break;
+ }
+
+ /* node is added to tmp group */
+ rc = lstcon_group_ndlink_find(grp, id, &ndl, 1);
+ if (rc != 0) {
+ CERROR("Can't create node link\n");
+ break;
+ }
+ }
+
+ if (rc != 0) {
+ lstcon_group_put(grp);
+ return rc;
+ }
+
+ rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL,
+ timeout, result_up);
+
+ lstcon_group_put(grp);
+
+ return rc;
+}
+
+int
+lstcon_session_match(lst_sid_t sid)
+{
+ return (console_session.ses_id.ses_nid == sid.ses_nid &&
+ console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1: 0;
+}
+
+static void
+lstcon_new_session_id(lst_sid_t *sid)
+{
+ lnet_process_id_t id;
+
+ LASSERT (console_session.ses_state == LST_SESSION_NONE);
+
+ LNetGetId(1, &id);
+ sid->ses_nid = id.nid;
+ sid->ses_stamp = cfs_time_current();
+}
+
+extern srpc_service_t lstcon_acceptor_service;
+
+int
+lstcon_session_new(char *name, int key, unsigned feats,
+ int timeout, int force, lst_sid_t *sid_up)
+{
+ int rc = 0;
+ int i;
+
+ if (console_session.ses_state != LST_SESSION_NONE) {
+ /* session exists */
+ if (!force) {
+ CNETERR("Session %s already exists\n",
+ console_session.ses_name);
+ return -EEXIST;
+ }
+
+ rc = lstcon_session_end();
+
+ /* lstcon_session_end() only return local error */
+ if (rc != 0)
+ return rc;
+ }
+
+ if ((feats & ~LST_FEATS_MASK) != 0) {
+ CNETERR("Unknown session features %x\n",
+ (feats & ~LST_FEATS_MASK));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
+ LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
+
+ lstcon_new_session_id(&console_session.ses_id);
+
+ console_session.ses_key = key;
+ console_session.ses_state = LST_SESSION_ACTIVE;
+ console_session.ses_force = !!force;
+ console_session.ses_features = feats;
+ console_session.ses_feats_updated = 0;
+ console_session.ses_timeout = (timeout <= 0) ?
+ LST_CONSOLE_TIMEOUT : timeout;
+ strcpy(console_session.ses_name, name);
+
+ rc = lstcon_batch_add(LST_DEFAULT_BATCH);
+ if (rc != 0)
+ return rc;
+
+ rc = lstcon_rpc_pinger_start();
+ if (rc != 0) {
+ lstcon_batch_t *bat = NULL;
+
+ lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
+ lstcon_batch_destroy(bat);
+
+ return rc;
+ }
+
+ if (copy_to_user(sid_up, &console_session.ses_id,
+ sizeof(lst_sid_t)) == 0)
+ return rc;
+
+ lstcon_session_end();
+
+ return -EFAULT;
+}
+
+int
+lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp,
+ lstcon_ndlist_ent_t *ndinfo_up, char *name_up, int len)
+{
+ lstcon_ndlist_ent_t *entp;
+ lstcon_ndlink_t *ndl;
+ int rc = 0;
+
+ if (console_session.ses_state != LST_SESSION_ACTIVE)
+ return -ESRCH;
+
+ LIBCFS_ALLOC(entp, sizeof(*entp));
+ if (entp == NULL)
+ return -ENOMEM;
+
+ memset(entp, 0, sizeof(*entp));
+
+ list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link)
+ LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
+
+ if (copy_to_user(sid_up, &console_session.ses_id,
+ sizeof(lst_sid_t)) ||
+ copy_to_user(key_up, &console_session.ses_key,
+ sizeof(*key_up)) ||
+ copy_to_user(featp, &console_session.ses_features,
+ sizeof(*featp)) ||
+ copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
+ copy_to_user(name_up, console_session.ses_name, len))
+ rc = -EFAULT;
+
+ LIBCFS_FREE(entp, sizeof(*entp));
+
+ return rc;
+}
+
+int
+lstcon_session_end(void)
+{
+ lstcon_rpc_trans_t *trans;
+ lstcon_group_t *grp;
+ lstcon_batch_t *bat;
+ int rc = 0;
+
+ LASSERT (console_session.ses_state == LST_SESSION_ACTIVE);
+
+ rc = lstcon_rpc_trans_ndlist(&console_session.ses_ndl_list,
+ NULL, LST_TRANS_SESEND, NULL,
+ lstcon_sesrpc_condition, &trans);
+ if (rc != 0) {
+ CERROR("Can't create transaction: %d\n", rc);
+ return rc;
+ }
+
+ console_session.ses_shutdown = 1;
+
+ lstcon_rpc_pinger_stop();
+
+ lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
+
+ lstcon_rpc_trans_destroy(trans);
+ /* User can do nothing even rpc failed, so go on */
+
+ /* waiting for orphan rpcs to die */
+ lstcon_rpc_cleanup_wait();
+
+ console_session.ses_id = LST_INVALID_SID;
+ console_session.ses_state = LST_SESSION_NONE;
+ console_session.ses_key = 0;
+ console_session.ses_force = 0;
+ console_session.ses_feats_updated = 0;
+
+ /* destroy all batches */
+ while (!list_empty(&console_session.ses_bat_list)) {
+ bat = list_entry(console_session.ses_bat_list.next,
+ lstcon_batch_t, bat_link);
+
+ lstcon_batch_destroy(bat);
+ }
+
+ /* destroy all groups */
+ while (!list_empty(&console_session.ses_grp_list)) {
+ grp = list_entry(console_session.ses_grp_list.next,
+ lstcon_group_t, grp_link);
+ LASSERT (grp->grp_ref == 1);
+
+ lstcon_group_put(grp);
+ }
+
+ /* all nodes should be released */
+ LASSERT (list_empty(&console_session.ses_ndl_list));
+
+ console_session.ses_shutdown = 0;
+ console_session.ses_expired = 0;
+
+ return rc;
+}
+
+int
+lstcon_session_feats_check(unsigned feats)
+{
+ int rc = 0;
+
+ if ((feats & ~LST_FEATS_MASK) != 0) {
+ CERROR("Can't support these features: %x\n",
+ (feats & ~LST_FEATS_MASK));
+ return -EPROTO;
+ }
+
+ spin_lock(&console_session.ses_rpc_lock);
+
+ if (!console_session.ses_feats_updated) {
+ console_session.ses_feats_updated = 1;
+ console_session.ses_features = feats;
+ }
+
+ if (console_session.ses_features != feats)
+ rc = -EPROTO;
+
+ spin_unlock(&console_session.ses_rpc_lock);
+
+ if (rc != 0) {
+ CERROR("remote features %x do not match with "
+ "session features %x of console\n",
+ feats, console_session.ses_features);
+ }
+
+ return rc;
+}
+
+static int
+lstcon_acceptor_handle (srpc_server_rpc_t *rpc)
+{
+ srpc_msg_t *rep = &rpc->srpc_replymsg;
+ srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg;
+ srpc_join_reqst_t *jreq = &req->msg_body.join_reqst;
+ srpc_join_reply_t *jrep = &rep->msg_body.join_reply;
+ lstcon_group_t *grp = NULL;
+ lstcon_ndlink_t *ndl;
+ int rc = 0;
+
+ sfw_unpack_message(req);
+
+ mutex_lock(&console_session.ses_mutex);
+
+ jrep->join_sid = console_session.ses_id;
+
+ if (console_session.ses_id.ses_nid == LNET_NID_ANY) {
+ jrep->join_status = ESRCH;
+ goto out;
+ }
+
+ if (lstcon_session_feats_check(req->msg_ses_feats) != 0) {
+ jrep->join_status = EPROTO;
+ goto out;
+ }
+
+ if (jreq->join_sid.ses_nid != LNET_NID_ANY &&
+ !lstcon_session_match(jreq->join_sid)) {
+ jrep->join_status = EBUSY;
+ goto out;
+ }
+
+ if (lstcon_group_find(jreq->join_group, &grp) != 0) {
+ rc = lstcon_group_alloc(jreq->join_group, &grp);
+ if (rc != 0) {
+ CERROR("Out of memory\n");
+ goto out;
+ }
+
+ list_add_tail(&grp->grp_link,
+ &console_session.ses_grp_list);
+ lstcon_group_addref(grp);
+ }
+
+ if (grp->grp_ref > 2) {
+ /* Group in using */
+ jrep->join_status = EBUSY;
+ goto out;
+ }
+
+ rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 0);
+ if (rc == 0) {
+ jrep->join_status = EEXIST;
+ goto out;
+ }
+
+ rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 1);
+ if (rc != 0) {
+ CERROR("Out of memory\n");
+ goto out;
+ }
+
+ ndl->ndl_node->nd_state = LST_NODE_ACTIVE;
+ ndl->ndl_node->nd_timeout = console_session.ses_timeout;
+
+ if (grp->grp_userland == 0)
+ grp->grp_userland = 1;
+
+ strcpy(jrep->join_session, console_session.ses_name);
+ jrep->join_timeout = console_session.ses_timeout;
+ jrep->join_status = 0;
+
+out:
+ rep->msg_ses_feats = console_session.ses_features;
+ if (grp != NULL)
+ lstcon_group_put(grp);
+
+ mutex_unlock(&console_session.ses_mutex);
+
+ return rc;
+}
+
+srpc_service_t lstcon_acceptor_service;
+void lstcon_init_acceptor_service(void)
+{
+ /* initialize selftest console acceptor service table */
+ lstcon_acceptor_service.sv_name = "join session";
+ lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle;
+ lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN;
+ lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX;
+}
+
+extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
+
+DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry);
+
+/* initialize console */
+int
+lstcon_console_init(void)
+{
+ int i;
+ int rc;
+
+ memset(&console_session, 0, sizeof(lstcon_session_t));
+
+ console_session.ses_id = LST_INVALID_SID;
+ console_session.ses_state = LST_SESSION_NONE;
+ console_session.ses_timeout = 0;
+ console_session.ses_force = 0;
+ console_session.ses_expired = 0;
+ console_session.ses_feats_updated = 0;
+ console_session.ses_features = LST_FEATS_MASK;
+ console_session.ses_laststamp = cfs_time_current_sec();
+
+ mutex_init(&console_session.ses_mutex);
+
+ INIT_LIST_HEAD(&console_session.ses_ndl_list);
+ INIT_LIST_HEAD(&console_session.ses_grp_list);
+ INIT_LIST_HEAD(&console_session.ses_bat_list);
+ INIT_LIST_HEAD(&console_session.ses_trans_list);
+
+ LIBCFS_ALLOC(console_session.ses_ndl_hash,
+ sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ if (console_session.ses_ndl_hash == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
+ INIT_LIST_HEAD(&console_session.ses_ndl_hash[i]);
+
+
+ /* initialize acceptor service table */
+ lstcon_init_acceptor_service();
+
+ rc = srpc_add_service(&lstcon_acceptor_service);
+ LASSERT (rc != -EBUSY);
+ if (rc != 0) {
+ LIBCFS_FREE(console_session.ses_ndl_hash,
+ sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+ return rc;
+ }
+
+ rc = srpc_service_add_buffers(&lstcon_acceptor_service,
+ lstcon_acceptor_service.sv_wi_total);
+ if (rc != 0) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = libcfs_register_ioctl(&lstcon_ioctl_handler);
+
+ if (rc == 0) {
+ lstcon_rpc_module_init();
+ return 0;
+ }
+
+out:
+ srpc_shutdown_service(&lstcon_acceptor_service);
+ srpc_remove_service(&lstcon_acceptor_service);
+
+ LIBCFS_FREE(console_session.ses_ndl_hash,
+ sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+
+ srpc_wait_service_shutdown(&lstcon_acceptor_service);
+
+ return rc;
+}
+
+int
+lstcon_console_fini(void)
+{
+ int i;
+
+ libcfs_deregister_ioctl(&lstcon_ioctl_handler);
+
+ mutex_lock(&console_session.ses_mutex);
+
+ srpc_shutdown_service(&lstcon_acceptor_service);
+ srpc_remove_service(&lstcon_acceptor_service);
+
+ if (console_session.ses_state != LST_SESSION_NONE)
+ lstcon_session_end();
+
+ lstcon_rpc_module_fini();
+
+ mutex_unlock(&console_session.ses_mutex);
+
+ LASSERT (list_empty(&console_session.ses_ndl_list));
+ LASSERT (list_empty(&console_session.ses_grp_list));
+ LASSERT (list_empty(&console_session.ses_bat_list));
+ LASSERT (list_empty(&console_session.ses_trans_list));
+
+ for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+ LASSERT (list_empty(&console_session.ses_ndl_hash[i]));
+ }
+
+ LIBCFS_FREE(console_session.ses_ndl_hash,
+ sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
+
+ srpc_wait_service_shutdown(&lstcon_acceptor_service);
+
+ return 0;
+}
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
new file mode 100644
index 0000000..e61b266
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -0,0 +1,232 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/console.h
+ *
+ * kernel structure for LST console
+ *
+ * Author: Liang Zhen <liangzhen@clusterfs.com>
+ */
+
+#ifndef __LST_CONSOLE_H__
+#define __LST_CONSOLE_H__
+
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lnet.h>
+#include <linux/lnet/lib-types.h>
+#include <linux/lnet/lnetst.h>
+#include "selftest.h"
+#include "conrpc.h"
+
+typedef struct lstcon_node {
+ lnet_process_id_t nd_id; /* id of the node */
+ int nd_ref; /* reference count */
+ int nd_state; /* state of the node */
+ int nd_timeout; /* session timeout */
+ cfs_time_t nd_stamp; /* timestamp of last replied RPC */
+ struct lstcon_rpc nd_ping; /* ping rpc */
+} lstcon_node_t; /*** node descriptor */
+
+typedef struct {
+ struct list_head ndl_link; /* chain on list */
+ struct list_head ndl_hlink; /* chain on hash */
+ lstcon_node_t *ndl_node; /* pointer to node */
+} lstcon_ndlink_t; /*** node link descriptor */
+
+typedef struct {
+ struct list_head grp_link; /* chain on global group list */
+ int grp_ref; /* reference count */
+ int grp_userland; /* has userland nodes */
+ int grp_nnode; /* # of nodes */
+ char grp_name[LST_NAME_SIZE]; /* group name */
+
+ struct list_head grp_trans_list; /* transaction list */
+ struct list_head grp_ndl_list; /* nodes list */
+ struct list_head grp_ndl_hash[0];/* hash table for nodes */
+} lstcon_group_t; /*** (alias of nodes) group descriptor */
+
+#define LST_BATCH_IDLE 0xB0 /* idle batch */
+#define LST_BATCH_RUNNING 0xB1 /* running batch */
+
+typedef struct lstcon_tsb_hdr {
+ lst_bid_t tsb_id; /* batch ID */
+ int tsb_index; /* test index */
+} lstcon_tsb_hdr_t;
+
+typedef struct {
+ lstcon_tsb_hdr_t bat_hdr; /* test_batch header */
+ struct list_head bat_link; /* chain on session's batches list */
+ int bat_ntest; /* # of test */
+ int bat_state; /* state of the batch */
+ int bat_arg; /* parameter for run|stop, timeout for run, force for stop */
+ char bat_name[LST_NAME_SIZE]; /* name of batch */
+
+ struct list_head bat_test_list; /* list head of tests (lstcon_test_t) */
+ struct list_head bat_trans_list; /* list head of transaction */
+ struct list_head bat_cli_list; /* list head of client nodes (lstcon_node_t) */
+ struct list_head *bat_cli_hash; /* hash table of client nodes */
+ struct list_head bat_srv_list; /* list head of server nodes */
+ struct list_head *bat_srv_hash; /* hash table of server nodes */
+} lstcon_batch_t; /*** (tests ) batch descritptor */
+
+typedef struct lstcon_test {
+ lstcon_tsb_hdr_t tes_hdr; /* test batch header */
+ struct list_head tes_link; /* chain on batch's tests list */
+ lstcon_batch_t *tes_batch; /* pointer to batch */
+
+ int tes_type; /* type of the test, i.e: bulk, ping */
+ int tes_stop_onerr; /* stop on error */
+ int tes_oneside; /* one-sided test */
+ int tes_concur; /* concurrency */
+ int tes_loop; /* loop count */
+ int tes_dist; /* nodes distribution of target group */
+ int tes_span; /* nodes span of target group */
+ int tes_cliidx; /* client index, used for RPC creating */
+
+ struct list_head tes_trans_list; /* transaction list */
+ lstcon_group_t *tes_src_grp; /* group run the test */
+ lstcon_group_t *tes_dst_grp; /* target group */
+
+ int tes_paramlen; /* test parameter length */
+ char tes_param[0]; /* test parameter */
+} lstcon_test_t; /*** a single test descriptor */
+
+#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */
+#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */
+
+#define LST_SESSION_NONE 0x0 /* no session */
+#define LST_SESSION_ACTIVE 0x1 /* working session */
+
+#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */
+
+typedef struct {
+ struct mutex ses_mutex; /* only 1 thread in session */
+ lst_sid_t ses_id; /* global session id */
+ int ses_key; /* local session key */
+ int ses_state; /* state of session */
+ int ses_timeout; /* timeout in seconds */
+ time_t ses_laststamp; /* last operation stamp (seconds) */
+ /** tests features of the session */
+ unsigned ses_features;
+ /** features are synced with remote test nodes */
+ unsigned ses_feats_updated:1;
+ /** force creating */
+ unsigned ses_force:1;
+ /** session is shutting down */
+ unsigned ses_shutdown:1;
+ /** console is timedout */
+ unsigned ses_expired:1;
+ __u64 ses_id_cookie; /* batch id cookie */
+ char ses_name[LST_NAME_SIZE]; /* session name */
+ lstcon_rpc_trans_t *ses_ping; /* session pinger */
+ stt_timer_t ses_ping_timer; /* timer for pinger */
+ lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
+
+ struct list_head ses_trans_list; /* global list of transaction */
+ struct list_head ses_grp_list; /* global list of groups */
+ struct list_head ses_bat_list; /* global list of batches */
+ struct list_head ses_ndl_list; /* global list of nodes */
+ struct list_head *ses_ndl_hash; /* hash table of nodes */
+
+ spinlock_t ses_rpc_lock; /* serialize */
+ atomic_t ses_rpc_counter;/* # of initialized RPCs */
+ struct list_head ses_rpc_freelist; /* idle console rpc */
+} lstcon_session_t; /*** session descriptor */
+
+extern lstcon_session_t console_session;
+
+static inline lstcon_trans_stat_t *
+lstcon_trans_stat(void)
+{
+ return &console_session.ses_trans_stat;
+}
+
+static inline struct list_head *
+lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
+{
+ unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
+
+ return &hash[idx];
+}
+
+extern int lstcon_session_match(lst_sid_t sid);
+extern int lstcon_session_new(char *name, int key, unsigned version,
+ int timeout, int flags, lst_sid_t *sid_up);
+extern int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
+ lstcon_ndlist_ent_t *entp, char *name_up, int len);
+extern int lstcon_session_end(void);
+extern int lstcon_session_debug(int timeout, struct list_head *result_up);
+extern int lstcon_session_feats_check(unsigned feats);
+extern int lstcon_batch_debug(int timeout, char *name,
+ int client, struct list_head *result_up);
+extern int lstcon_group_debug(int timeout, char *name,
+ struct list_head *result_up);
+extern int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
+ struct list_head *result_up);
+extern int lstcon_group_add(char *name);
+extern int lstcon_group_del(char *name);
+extern int lstcon_group_clean(char *name, int args);
+extern int lstcon_group_refresh(char *name, struct list_head *result_up);
+extern int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
+ unsigned *featp, struct list_head *result_up);
+extern int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
+ struct list_head *result_up);
+extern int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
+ int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
+extern int lstcon_group_list(int idx, int len, char *name_up);
+extern int lstcon_batch_add(char *name);
+extern int lstcon_batch_run(char *name, int timeout,
+ struct list_head *result_up);
+extern int lstcon_batch_stop(char *name, int force,
+ struct list_head *result_up);
+extern int lstcon_test_batch_query(char *name, int testidx,
+ int client, int timeout,
+ struct list_head *result_up);
+extern int lstcon_batch_del(char *name);
+extern int lstcon_batch_list(int idx, int namelen, char *name_up);
+extern int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
+ int server, int testidx, int *index_p,
+ int *ndent_p, lstcon_node_ent_t *dents_up);
+extern int lstcon_group_stat(char *grp_name, int timeout,
+ struct list_head *result_up);
+extern int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
+ int timeout, struct list_head *result_up);
+extern int lstcon_test_add(char *name, int type, int loop, int concur,
+ int dist, int span, char *src_name, char * dst_name,
+ void *param, int paramlen, int *retp,
+ struct list_head *result_up);
+
+#endif
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
new file mode 100644
index 0000000..483c785
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -0,0 +1,1814 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/framework.c
+ *
+ * Author: Isaac Huang <isaac@clusterfs.com>
+ * Author: Liang Zhen <liangzhen@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include "selftest.h"
+
+lst_sid_t LST_INVALID_SID = {LNET_NID_ANY, -1};
+
+static int session_timeout = 100;
+CFS_MODULE_PARM(session_timeout, "i", int, 0444,
+ "test session timeout in seconds (100 by default, 0 == never)");
+
+static int rpc_timeout = 64;
+CFS_MODULE_PARM(rpc_timeout, "i", int, 0644,
+ "rpc timeout in seconds (64 by default, 0 == never)");
+
+#define sfw_unpack_id(id) \
+do { \
+ __swab64s(&(id).nid); \
+ __swab32s(&(id).pid); \
+} while (0)
+
+#define sfw_unpack_sid(sid) \
+do { \
+ __swab64s(&(sid).ses_nid); \
+ __swab64s(&(sid).ses_stamp); \
+} while (0)
+
+#define sfw_unpack_fw_counters(fc) \
+do { \
+ __swab32s(&(fc).running_ms); \
+ __swab32s(&(fc).active_batches); \
+ __swab32s(&(fc).zombie_sessions); \
+ __swab32s(&(fc).brw_errors); \
+ __swab32s(&(fc).ping_errors); \
+} while (0)
+
+#define sfw_unpack_rpc_counters(rc) \
+do { \
+ __swab32s(&(rc).errors); \
+ __swab32s(&(rc).rpcs_sent); \
+ __swab32s(&(rc).rpcs_rcvd); \
+ __swab32s(&(rc).rpcs_dropped); \
+ __swab32s(&(rc).rpcs_expired); \
+ __swab64s(&(rc).bulk_get); \
+ __swab64s(&(rc).bulk_put); \
+} while (0)
+
+#define sfw_unpack_lnet_counters(lc) \
+do { \
+ __swab32s(&(lc).errors); \
+ __swab32s(&(lc).msgs_max); \
+ __swab32s(&(lc).msgs_alloc); \
+ __swab32s(&(lc).send_count); \
+ __swab32s(&(lc).recv_count); \
+ __swab32s(&(lc).drop_count); \
+ __swab32s(&(lc).route_count); \
+ __swab64s(&(lc).send_length); \
+ __swab64s(&(lc).recv_length); \
+ __swab64s(&(lc).drop_length); \
+ __swab64s(&(lc).route_length); \
+} while (0)
+
+#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0)
+#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0)
+
+struct smoketest_framework {
+ struct list_head fw_zombie_rpcs; /* RPCs to be recycled */
+ struct list_head fw_zombie_sessions; /* stopping sessions */
+ struct list_head fw_tests; /* registered test cases */
+ atomic_t fw_nzombies; /* # zombie sessions */
+ spinlock_t fw_lock; /* serialise */
+ sfw_session_t *fw_session; /* _the_ session */
+ int fw_shuttingdown; /* shutdown in progress */
+ srpc_server_rpc_t *fw_active_srpc; /* running RPC */
+} sfw_data;
+
+/* forward ref's */
+int sfw_stop_batch (sfw_batch_t *tsb, int force);
+void sfw_destroy_session (sfw_session_t *sn);
+
+static inline sfw_test_case_t *
+sfw_find_test_case(int id)
+{
+ sfw_test_case_t *tsc;
+
+ LASSERT (id <= SRPC_SERVICE_MAX_ID);
+ LASSERT (id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
+
+ list_for_each_entry (tsc, &sfw_data.fw_tests, tsc_list) {
+ if (tsc->tsc_srv_service->sv_id == id)
+ return tsc;
+ }
+
+ return NULL;
+}
+
+static int
+sfw_register_test (srpc_service_t *service, sfw_test_client_ops_t *cliops)
+{
+ sfw_test_case_t *tsc;
+
+ if (sfw_find_test_case(service->sv_id) != NULL) {
+ CERROR ("Failed to register test %s (%d)\n",
+ service->sv_name, service->sv_id);
+ return -EEXIST;
+ }
+
+ LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t));
+ if (tsc == NULL)
+ return -ENOMEM;
+
+ memset(tsc, 0, sizeof(sfw_test_case_t));
+ tsc->tsc_cli_ops = cliops;
+ tsc->tsc_srv_service = service;
+
+ list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
+ return 0;
+}
+
+void
+sfw_add_session_timer (void)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+ stt_timer_t *timer = &sn->sn_timer;
+
+ LASSERT (!sfw_data.fw_shuttingdown);
+
+ if (sn == NULL || sn->sn_timeout == 0)
+ return;
+
+ LASSERT (!sn->sn_timer_active);
+
+ sn->sn_timer_active = 1;
+ timer->stt_expires = cfs_time_add(sn->sn_timeout,
+ cfs_time_current_sec());
+ stt_add_timer(timer);
+ return;
+}
+
+int
+sfw_del_session_timer (void)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+
+ if (sn == NULL || !sn->sn_timer_active)
+ return 0;
+
+ LASSERT (sn->sn_timeout != 0);
+
+ if (stt_del_timer(&sn->sn_timer)) { /* timer defused */
+ sn->sn_timer_active = 0;
+ return 0;
+ }
+
+ return EBUSY; /* racing with sfw_session_expired() */
+}
+
+/* called with sfw_data.fw_lock held */
+static void
+sfw_deactivate_session (void)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+ int nactive = 0;
+ sfw_batch_t *tsb;
+ sfw_test_case_t *tsc;
+
+ if (sn == NULL) return;
+
+ LASSERT (!sn->sn_timer_active);
+
+ sfw_data.fw_session = NULL;
+ atomic_inc(&sfw_data.fw_nzombies);
+ list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
+
+ spin_unlock(&sfw_data.fw_lock);
+
+ list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
+ srpc_abort_service(tsc->tsc_srv_service);
+ }
+
+ spin_lock(&sfw_data.fw_lock);
+
+ list_for_each_entry (tsb, &sn->sn_batches, bat_list) {
+ if (sfw_batch_active(tsb)) {
+ nactive++;
+ sfw_stop_batch(tsb, 1);
+ }
+ }
+
+ if (nactive != 0)
+ return; /* wait for active batches to stop */
+
+ list_del_init(&sn->sn_list);
+ spin_unlock(&sfw_data.fw_lock);
+
+ sfw_destroy_session(sn);
+
+ spin_lock(&sfw_data.fw_lock);
+}
+
+
+void
+sfw_session_expired (void *data)
+{
+ sfw_session_t *sn = data;
+
+ spin_lock(&sfw_data.fw_lock);
+
+ LASSERT (sn->sn_timer_active);
+ LASSERT (sn == sfw_data.fw_session);
+
+ CWARN ("Session expired! sid: %s-"LPU64", name: %s\n",
+ libcfs_nid2str(sn->sn_id.ses_nid),
+ sn->sn_id.ses_stamp, &sn->sn_name[0]);
+
+ sn->sn_timer_active = 0;
+ sfw_deactivate_session();
+
+ spin_unlock(&sfw_data.fw_lock);
+}
+
+static inline void
+sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
+ unsigned features, const char *name)
+{
+ stt_timer_t *timer = &sn->sn_timer;
+
+ memset(sn, 0, sizeof(sfw_session_t));
+ INIT_LIST_HEAD(&sn->sn_list);
+ INIT_LIST_HEAD(&sn->sn_batches);
+ atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
+ atomic_set(&sn->sn_brw_errors, 0);
+ atomic_set(&sn->sn_ping_errors, 0);
+ strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
+
+ sn->sn_timer_active = 0;
+ sn->sn_id = sid;
+ sn->sn_features = features;
+ sn->sn_timeout = session_timeout;
+ sn->sn_started = cfs_time_current();
+
+ timer->stt_data = sn;
+ timer->stt_func = sfw_session_expired;
+ INIT_LIST_HEAD(&timer->stt_list);
+}
+
+/* completion handler for incoming framework RPCs */
+void
+sfw_server_rpc_done(struct srpc_server_rpc *rpc)
+{
+ struct srpc_service *sv = rpc->srpc_scd->scd_svc;
+ int status = rpc->srpc_status;
+
+ CDEBUG (D_NET,
+ "Incoming framework RPC done: "
+ "service %s, peer %s, status %s:%d\n",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer),
+ swi_state2str(rpc->srpc_wi.swi_state),
+ status);
+
+ if (rpc->srpc_bulk != NULL)
+ sfw_free_pages(rpc);
+ return;
+}
+
+void
+sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
+{
+ LASSERT (rpc->crpc_bulk.bk_niov == 0);
+ LASSERT (list_empty(&rpc->crpc_list));
+ LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
+
+ CDEBUG (D_NET,
+ "Outgoing framework RPC done: "
+ "service %d, peer %s, status %s:%d:%d\n",
+ rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+ swi_state2str(rpc->crpc_wi.swi_state),
+ rpc->crpc_aborted, rpc->crpc_status);
+
+ spin_lock(&sfw_data.fw_lock);
+
+ /* my callers must finish all RPCs before shutting me down */
+ LASSERT(!sfw_data.fw_shuttingdown);
+ list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
+
+ spin_unlock(&sfw_data.fw_lock);
+}
+
+sfw_batch_t *
+sfw_find_batch (lst_bid_t bid)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+ sfw_batch_t *bat;
+
+ LASSERT (sn != NULL);
+
+ list_for_each_entry (bat, &sn->sn_batches, bat_list) {
+ if (bat->bat_id.bat_id == bid.bat_id)
+ return bat;
+ }
+
+ return NULL;
+}
+
+sfw_batch_t *
+sfw_bid2batch (lst_bid_t bid)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+ sfw_batch_t *bat;
+
+ LASSERT (sn != NULL);
+
+ bat = sfw_find_batch(bid);
+ if (bat != NULL)
+ return bat;
+
+ LIBCFS_ALLOC(bat, sizeof(sfw_batch_t));
+ if (bat == NULL)
+ return NULL;
+
+ bat->bat_error = 0;
+ bat->bat_session = sn;
+ bat->bat_id = bid;
+ atomic_set(&bat->bat_nactive, 0);
+ INIT_LIST_HEAD(&bat->bat_tests);
+
+ list_add_tail(&bat->bat_list, &sn->sn_batches);
+ return bat;
+}
+
+int
+sfw_get_stats (srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+ sfw_counters_t *cnt = &reply->str_fw;
+ sfw_batch_t *bat;
+ struct timeval tv;
+
+ reply->str_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+
+ if (request->str_sid.ses_nid == LNET_NID_ANY) {
+ reply->str_status = EINVAL;
+ return 0;
+ }
+
+ if (sn == NULL || !sfw_sid_equal(request->str_sid, sn->sn_id)) {
+ reply->str_status = ESRCH;
+ return 0;
+ }
+
+ lnet_counters_get(&reply->str_lnet);
+ srpc_get_counters(&reply->str_rpc);
+
+ /* send over the msecs since the session was started
+ - with 32 bits to send, this is ~49 days */
+ cfs_duration_usec(cfs_time_sub(cfs_time_current(),
+ sn->sn_started), &tv);
+
+ cnt->running_ms = (__u32)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
+ cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
+ cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
+ cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
+
+ cnt->active_batches = 0;
+ list_for_each_entry (bat, &sn->sn_batches, bat_list) {
+ if (atomic_read(&bat->bat_nactive) > 0)
+ cnt->active_batches++;
+ }
+
+ reply->str_status = 0;
+ return 0;
+}
+
+int
+sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+ srpc_msg_t *msg = container_of(request, srpc_msg_t,
+ msg_body.mksn_reqst);
+ int cplen = 0;
+
+ if (request->mksn_sid.ses_nid == LNET_NID_ANY) {
+ reply->mksn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+ reply->mksn_status = EINVAL;
+ return 0;
+ }
+
+ if (sn != NULL) {
+ reply->mksn_status = 0;
+ reply->mksn_sid = sn->sn_id;
+ reply->mksn_timeout = sn->sn_timeout;
+
+ if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
+ atomic_inc(&sn->sn_refcount);
+ return 0;
+ }
+
+ if (!request->mksn_force) {
+ reply->mksn_status = EBUSY;
+ cplen = strlcpy(&reply->mksn_name[0], &sn->sn_name[0],
+ sizeof(reply->mksn_name));
+ if (cplen >= sizeof(reply->mksn_name))
+ return -E2BIG;
+ return 0;
+ }
+ }
+
+ /* reject the request if it requires unknown features
+ * NB: old version will always accept all features because it's not
+ * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also
+ * harmless because it will return zero feature to console, and it's
+ * console's responsibility to make sure all nodes in a session have
+ * same feature mask. */
+ if ((msg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ reply->mksn_status = EPROTO;
+ return 0;
+ }
+
+ /* brand new or create by force */
+ LIBCFS_ALLOC(sn, sizeof(sfw_session_t));
+ if (sn == NULL) {
+ CERROR ("Dropping RPC (mksn) under memory pressure.\n");
+ return -ENOMEM;
+ }
+
+ sfw_init_session(sn, request->mksn_sid,
+ msg->msg_ses_feats, &request->mksn_name[0]);
+
+ spin_lock(&sfw_data.fw_lock);
+
+ sfw_deactivate_session();
+ LASSERT(sfw_data.fw_session == NULL);
+ sfw_data.fw_session = sn;
+
+ spin_unlock(&sfw_data.fw_lock);
+
+ reply->mksn_status = 0;
+ reply->mksn_sid = sn->sn_id;
+ reply->mksn_timeout = sn->sn_timeout;
+ return 0;
+}
+
+int
+sfw_remove_session (srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+
+ reply->rmsn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+
+ if (request->rmsn_sid.ses_nid == LNET_NID_ANY) {
+ reply->rmsn_status = EINVAL;
+ return 0;
+ }
+
+ if (sn == NULL || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) {
+ reply->rmsn_status = (sn == NULL) ? ESRCH : EBUSY;
+ return 0;
+ }
+
+ if (!atomic_dec_and_test(&sn->sn_refcount)) {
+ reply->rmsn_status = 0;
+ return 0;
+ }
+
+ spin_lock(&sfw_data.fw_lock);
+ sfw_deactivate_session();
+ spin_unlock(&sfw_data.fw_lock);
+
+ reply->rmsn_status = 0;
+ reply->rmsn_sid = LST_INVALID_SID;
+ LASSERT(sfw_data.fw_session == NULL);
+ return 0;
+}
+
+int
+sfw_debug_session (srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+
+ if (sn == NULL) {
+ reply->dbg_status = ESRCH;
+ reply->dbg_sid = LST_INVALID_SID;
+ return 0;
+ }
+
+ reply->dbg_status = 0;
+ reply->dbg_sid = sn->sn_id;
+ reply->dbg_timeout = sn->sn_timeout;
+ if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name))
+ >= sizeof(reply->dbg_name))
+ return -E2BIG;
+
+ return 0;
+}
+
+void
+sfw_test_rpc_fini (srpc_client_rpc_t *rpc)
+{
+ sfw_test_unit_t *tsu = rpc->crpc_priv;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
+
+ /* Called with hold of tsi->tsi_lock */
+ LASSERT (list_empty(&rpc->crpc_list));
+ list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+}
+
+static inline int
+sfw_test_buffers(sfw_test_instance_t *tsi)
+{
+ struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
+ struct srpc_service *svc = tsc->tsc_srv_service;
+ int nbuf;
+
+ nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts;
+ return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA);
+}
+
+int
+sfw_load_test(struct sfw_test_instance *tsi)
+{
+ struct sfw_test_case *tsc;
+ struct srpc_service *svc;
+ int nbuf;
+ int rc;
+
+ LASSERT(tsi != NULL);
+ tsc = sfw_find_test_case(tsi->tsi_service);
+ nbuf = sfw_test_buffers(tsi);
+ LASSERT(tsc != NULL);
+ svc = tsc->tsc_srv_service;
+
+ if (tsi->tsi_is_client) {
+ tsi->tsi_ops = tsc->tsc_cli_ops;
+ return 0;
+ }
+
+ rc = srpc_service_add_buffers(svc, nbuf);
+ if (rc != 0) {
+ CWARN("Failed to reserve enough buffers: "
+ "service %s, %d needed: %d\n", svc->sv_name, nbuf, rc);
+ /* NB: this error handler is not strictly correct, because
+ * it may release more buffers than already allocated,
+ * but it doesn't matter because request portal should
+ * be lazy portal and will grow buffers if necessary. */
+ srpc_service_remove_buffers(svc, nbuf);
+ return -ENOMEM;
+ }
+
+ CDEBUG(D_NET, "Reserved %d buffers for test %s\n",
+ nbuf * (srpc_serv_is_framework(svc) ?
+ 1 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name);
+ return 0;
+}
+
+void
+sfw_unload_test(struct sfw_test_instance *tsi)
+{
+ struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
+
+ LASSERT(tsc != NULL);
+
+ if (tsi->tsi_is_client)
+ return;
+
+ /* shrink buffers, because request portal is lazy portal
+ * which can grow buffers at runtime so we may leave
+ * some buffers behind, but never mind... */
+ srpc_service_remove_buffers(tsc->tsc_srv_service,
+ sfw_test_buffers(tsi));
+ return;
+}
+
+void
+sfw_destroy_test_instance (sfw_test_instance_t *tsi)
+{
+ srpc_client_rpc_t *rpc;
+ sfw_test_unit_t *tsu;
+
+ if (!tsi->tsi_is_client) goto clean;
+
+ tsi->tsi_ops->tso_fini(tsi);
+
+ LASSERT (!tsi->tsi_stopping);
+ LASSERT (list_empty(&tsi->tsi_active_rpcs));
+ LASSERT (!sfw_test_active(tsi));
+
+ while (!list_empty(&tsi->tsi_units)) {
+ tsu = list_entry(tsi->tsi_units.next,
+ sfw_test_unit_t, tsu_list);
+ list_del(&tsu->tsu_list);
+ LIBCFS_FREE(tsu, sizeof(*tsu));
+ }
+
+ while (!list_empty(&tsi->tsi_free_rpcs)) {
+ rpc = list_entry(tsi->tsi_free_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ list_del(&rpc->crpc_list);
+ LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+ }
+
+clean:
+ sfw_unload_test(tsi);
+ LIBCFS_FREE(tsi, sizeof(*tsi));
+ return;
+}
+
+void
+sfw_destroy_batch (sfw_batch_t *tsb)
+{
+ sfw_test_instance_t *tsi;
+
+ LASSERT (!sfw_batch_active(tsb));
+ LASSERT (list_empty(&tsb->bat_list));
+
+ while (!list_empty(&tsb->bat_tests)) {
+ tsi = list_entry(tsb->bat_tests.next,
+ sfw_test_instance_t, tsi_list);
+ list_del_init(&tsi->tsi_list);
+ sfw_destroy_test_instance(tsi);
+ }
+
+ LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
+ return;
+}
+
+void
+sfw_destroy_session (sfw_session_t *sn)
+{
+ sfw_batch_t *batch;
+
+ LASSERT (list_empty(&sn->sn_list));
+ LASSERT (sn != sfw_data.fw_session);
+
+ while (!list_empty(&sn->sn_batches)) {
+ batch = list_entry(sn->sn_batches.next,
+ sfw_batch_t, bat_list);
+ list_del_init(&batch->bat_list);
+ sfw_destroy_batch(batch);
+ }
+
+ LIBCFS_FREE(sn, sizeof(*sn));
+ atomic_dec(&sfw_data.fw_nzombies);
+ return;
+}
+
+void
+sfw_unpack_addtest_req(srpc_msg_t *msg)
+{
+ srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
+
+ LASSERT (msg->msg_type == SRPC_MSG_TEST_REQST);
+ LASSERT (req->tsr_is_client);
+
+ if (msg->msg_magic == SRPC_MSG_MAGIC)
+ return; /* no flipping needed */
+
+ LASSERT (msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
+
+ if (req->tsr_service == SRPC_SERVICE_BRW) {
+ if ((msg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
+ test_bulk_req_t *bulk = &req->tsr_u.bulk_v0;
+
+ __swab32s(&bulk->blk_opc);
+ __swab32s(&bulk->blk_npg);
+ __swab32s(&bulk->blk_flags);
+
+ } else {
+ test_bulk_req_v1_t *bulk = &req->tsr_u.bulk_v1;
+
+ __swab16s(&bulk->blk_opc);
+ __swab16s(&bulk->blk_flags);
+ __swab32s(&bulk->blk_offset);
+ __swab32s(&bulk->blk_len);
+ }
+
+ return;
+ }
+
+ if (req->tsr_service == SRPC_SERVICE_PING) {
+ test_ping_req_t *ping = &req->tsr_u.ping;
+
+ __swab32s(&ping->png_size);
+ __swab32s(&ping->png_flags);
+ return;
+ }
+
+ LBUG ();
+ return;
+}
+
+int
+sfw_add_test_instance (sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
+{
+ srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg;
+ srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
+ srpc_bulk_t *bk = rpc->srpc_bulk;
+ int ndest = req->tsr_ndest;
+ sfw_test_unit_t *tsu;
+ sfw_test_instance_t *tsi;
+ int i;
+ int rc;
+
+ LIBCFS_ALLOC(tsi, sizeof(*tsi));
+ if (tsi == NULL) {
+ CERROR ("Can't allocate test instance for batch: "LPU64"\n",
+ tsb->bat_id.bat_id);
+ return -ENOMEM;
+ }
+
+ memset(tsi, 0, sizeof(*tsi));
+ spin_lock_init(&tsi->tsi_lock);
+ atomic_set(&tsi->tsi_nactive, 0);
+ INIT_LIST_HEAD(&tsi->tsi_units);
+ INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
+ INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
+
+ tsi->tsi_stopping = 0;
+ tsi->tsi_batch = tsb;
+ tsi->tsi_loop = req->tsr_loop;
+ tsi->tsi_concur = req->tsr_concur;
+ tsi->tsi_service = req->tsr_service;
+ tsi->tsi_is_client = !!(req->tsr_is_client);
+ tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr);
+
+ rc = sfw_load_test(tsi);
+ if (rc != 0) {
+ LIBCFS_FREE(tsi, sizeof(*tsi));
+ return rc;
+ }
+
+ LASSERT (!sfw_batch_active(tsb));
+
+ if (!tsi->tsi_is_client) {
+ /* it's test server, just add it to tsb */
+ list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
+ return 0;
+ }
+
+ LASSERT (bk != NULL);
+ LASSERT (bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
+ LASSERT((unsigned int)bk->bk_len >=
+ sizeof(lnet_process_id_packed_t) * ndest);
+
+ sfw_unpack_addtest_req(msg);
+ memcpy(&tsi->tsi_u, &req->tsr_u, sizeof(tsi->tsi_u));
+
+ for (i = 0; i < ndest; i++) {
+ lnet_process_id_packed_t *dests;
+ lnet_process_id_packed_t id;
+ int j;
+
+ dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
+ LASSERT (dests != NULL); /* my pages are within KVM always */
+ id = dests[i % SFW_ID_PER_PAGE];
+ if (msg->msg_magic != SRPC_MSG_MAGIC)
+ sfw_unpack_id(id);
+
+ for (j = 0; j < tsi->tsi_concur; j++) {
+ LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t));
+ if (tsu == NULL) {
+ rc = -ENOMEM;
+ CERROR ("Can't allocate tsu for %d\n",
+ tsi->tsi_service);
+ goto error;
+ }
+
+ tsu->tsu_dest.nid = id.nid;
+ tsu->tsu_dest.pid = id.pid;
+ tsu->tsu_instance = tsi;
+ tsu->tsu_private = NULL;
+ list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
+ }
+ }
+
+ rc = tsi->tsi_ops->tso_init(tsi);
+ if (rc == 0) {
+ list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
+ return 0;
+ }
+
+error:
+ LASSERT (rc != 0);
+ sfw_destroy_test_instance(tsi);
+ return rc;
+}
+
+static void
+sfw_test_unit_done (sfw_test_unit_t *tsu)
+{
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
+ sfw_batch_t *tsb = tsi->tsi_batch;
+ sfw_session_t *sn = tsb->bat_session;
+
+ LASSERT (sfw_test_active(tsi));
+
+ if (!atomic_dec_and_test(&tsi->tsi_nactive))
+ return;
+
+ /* the test instance is done */
+ spin_lock(&tsi->tsi_lock);
+
+ tsi->tsi_stopping = 0;
+
+ spin_unlock(&tsi->tsi_lock);
+
+ spin_lock(&sfw_data.fw_lock);
+
+ if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
+ sn == sfw_data.fw_session) { /* sn also active */
+ spin_unlock(&sfw_data.fw_lock);
+ return;
+ }
+
+ LASSERT (!list_empty(&sn->sn_list)); /* I'm a zombie! */
+
+ list_for_each_entry (tsb, &sn->sn_batches, bat_list) {
+ if (sfw_batch_active(tsb)) {
+ spin_unlock(&sfw_data.fw_lock);
+ return;
+ }
+ }
+
+ list_del_init(&sn->sn_list);
+ spin_unlock(&sfw_data.fw_lock);
+
+ sfw_destroy_session(sn);
+ return;
+}
+
+void
+sfw_test_rpc_done (srpc_client_rpc_t *rpc)
+{
+ sfw_test_unit_t *tsu = rpc->crpc_priv;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
+ int done = 0;
+
+ tsi->tsi_ops->tso_done_rpc(tsu, rpc);
+
+ spin_lock(&tsi->tsi_lock);
+
+ LASSERT (sfw_test_active(tsi));
+ LASSERT (!list_empty(&rpc->crpc_list));
+
+ list_del_init(&rpc->crpc_list);
+
+ /* batch is stopping or loop is done or get error */
+ if (tsi->tsi_stopping ||
+ tsu->tsu_loop == 0 ||
+ (rpc->crpc_status != 0 && tsi->tsi_stoptsu_onerr))
+ done = 1;
+
+ /* dec ref for poster */
+ srpc_client_rpc_decref(rpc);
+
+ spin_unlock(&tsi->tsi_lock);
+
+ if (!done) {
+ swi_schedule_workitem(&tsu->tsu_worker);
+ return;
+ }
+
+ sfw_test_unit_done(tsu);
+ return;
+}
+
+int
+sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
+ unsigned features, int nblk, int blklen,
+ srpc_client_rpc_t **rpcpp)
+{
+ srpc_client_rpc_t *rpc = NULL;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
+
+ spin_lock(&tsi->tsi_lock);
+
+ LASSERT (sfw_test_active(tsi));
+
+ if (!list_empty(&tsi->tsi_free_rpcs)) {
+ /* pick request from buffer */
+ rpc = list_entry(tsi->tsi_free_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ LASSERT (nblk == rpc->crpc_bulk.bk_niov);
+ list_del_init(&rpc->crpc_list);
+ }
+
+ spin_unlock(&tsi->tsi_lock);
+
+ if (rpc == NULL) {
+ rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
+ blklen, sfw_test_rpc_done,
+ sfw_test_rpc_fini, tsu);
+ } else {
+ srpc_init_client_rpc(rpc, peer, tsi->tsi_service, nblk,
+ blklen, sfw_test_rpc_done,
+ sfw_test_rpc_fini, tsu);
+ }
+
+ if (rpc == NULL) {
+ CERROR("Can't create rpc for test %d\n", tsi->tsi_service);
+ return -ENOMEM;
+ }
+
+ rpc->crpc_reqstmsg.msg_ses_feats = features;
+ *rpcpp = rpc;
+
+ return 0;
+}
+
+int
+sfw_run_test (swi_workitem_t *wi)
+{
+ sfw_test_unit_t *tsu = wi->swi_workitem.wi_data;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
+ srpc_client_rpc_t *rpc = NULL;
+
+ LASSERT (wi == &tsu->tsu_worker);
+
+ if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc) != 0) {
+ LASSERT (rpc == NULL);
+ goto test_done;
+ }
+
+ LASSERT (rpc != NULL);
+
+ spin_lock(&tsi->tsi_lock);
+
+ if (tsi->tsi_stopping) {
+ list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+ spin_unlock(&tsi->tsi_lock);
+ goto test_done;
+ }
+
+ if (tsu->tsu_loop > 0)
+ tsu->tsu_loop--;
+
+ list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
+ spin_unlock(&tsi->tsi_lock);
+
+ rpc->crpc_timeout = rpc_timeout;
+
+ spin_lock(&rpc->crpc_lock);
+ srpc_post_rpc(rpc);
+ spin_unlock(&rpc->crpc_lock);
+ return 0;
+
+test_done:
+ /*
+ * No one can schedule me now since:
+ * - previous RPC, if any, has done and
+ * - no new RPC is initiated.
+ * - my batch is still active; no one can run it again now.
+ * Cancel pending schedules and prevent future schedule attempts:
+ */
+ swi_exit_workitem(wi);
+ sfw_test_unit_done(tsu);
+ return 1;
+}
+
+int
+sfw_run_batch (sfw_batch_t *tsb)
+{
+ swi_workitem_t *wi;
+ sfw_test_unit_t *tsu;
+ sfw_test_instance_t *tsi;
+
+ if (sfw_batch_active(tsb)) {
+ CDEBUG(D_NET, "Batch already active: "LPU64" (%d)\n",
+ tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
+ return 0;
+ }
+
+ list_for_each_entry (tsi, &tsb->bat_tests, tsi_list) {
+ if (!tsi->tsi_is_client) /* skip server instances */
+ continue;
+
+ LASSERT (!tsi->tsi_stopping);
+ LASSERT (!sfw_test_active(tsi));
+
+ atomic_inc(&tsb->bat_nactive);
+
+ list_for_each_entry (tsu, &tsi->tsi_units, tsu_list) {
+ atomic_inc(&tsi->tsi_nactive);
+ tsu->tsu_loop = tsi->tsi_loop;
+ wi = &tsu->tsu_worker;
+ swi_init_workitem(wi, tsu, sfw_run_test,
+ lst_sched_test[\
+ lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
+ swi_schedule_workitem(wi);
+ }
+ }
+
+ return 0;
+}
+
+int
+sfw_stop_batch (sfw_batch_t *tsb, int force)
+{
+ sfw_test_instance_t *tsi;
+ srpc_client_rpc_t *rpc;
+
+ if (!sfw_batch_active(tsb)) {
+ CDEBUG(D_NET, "Batch "LPU64" inactive\n", tsb->bat_id.bat_id);
+ return 0;
+ }
+
+ list_for_each_entry (tsi, &tsb->bat_tests, tsi_list) {
+ spin_lock(&tsi->tsi_lock);
+
+ if (!tsi->tsi_is_client ||
+ !sfw_test_active(tsi) || tsi->tsi_stopping) {
+ spin_unlock(&tsi->tsi_lock);
+ continue;
+ }
+
+ tsi->tsi_stopping = 1;
+
+ if (!force) {
+ spin_unlock(&tsi->tsi_lock);
+ continue;
+ }
+
+ /* abort launched rpcs in the test */
+ list_for_each_entry(rpc, &tsi->tsi_active_rpcs, crpc_list) {
+ spin_lock(&rpc->crpc_lock);
+
+ srpc_abort_rpc(rpc, -EINTR);
+
+ spin_unlock(&rpc->crpc_lock);
+ }
+
+ spin_unlock(&tsi->tsi_lock);
+ }
+
+ return 0;
+}
+
+int
+sfw_query_batch (sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
+{
+ sfw_test_instance_t *tsi;
+
+ if (testidx < 0)
+ return -EINVAL;
+
+ if (testidx == 0) {
+ reply->bar_active = atomic_read(&tsb->bat_nactive);
+ return 0;
+ }
+
+ list_for_each_entry (tsi, &tsb->bat_tests, tsi_list) {
+ if (testidx-- > 1)
+ continue;
+
+ reply->bar_active = atomic_read(&tsi->tsi_nactive);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+void
+sfw_free_pages (srpc_server_rpc_t *rpc)
+{
+ srpc_free_bulk(rpc->srpc_bulk);
+ rpc->srpc_bulk = NULL;
+}
+
+int
+sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
+ int sink)
+{
+ LASSERT(rpc->srpc_bulk == NULL);
+ LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
+
+ rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink);
+ if (rpc->srpc_bulk == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int
+sfw_add_test (srpc_server_rpc_t *rpc)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+ srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
+ srpc_test_reqst_t *request;
+ int rc;
+ sfw_batch_t *bat;
+
+ request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
+ reply->tsr_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+
+ if (request->tsr_loop == 0 ||
+ request->tsr_concur == 0 ||
+ request->tsr_sid.ses_nid == LNET_NID_ANY ||
+ request->tsr_ndest > SFW_MAX_NDESTS ||
+ (request->tsr_is_client && request->tsr_ndest == 0) ||
+ request->tsr_concur > SFW_MAX_CONCUR ||
+ request->tsr_service > SRPC_SERVICE_MAX_ID ||
+ request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) {
+ reply->tsr_status = EINVAL;
+ return 0;
+ }
+
+ if (sn == NULL || !sfw_sid_equal(request->tsr_sid, sn->sn_id) ||
+ sfw_find_test_case(request->tsr_service) == NULL) {
+ reply->tsr_status = ENOENT;
+ return 0;
+ }
+
+ bat = sfw_bid2batch(request->tsr_bid);
+ if (bat == NULL) {
+ CERROR ("Dropping RPC (%s) from %s under memory pressure.\n",
+ rpc->srpc_scd->scd_svc->sv_name,
+ libcfs_id2str(rpc->srpc_peer));
+ return -ENOMEM;
+ }
+
+ if (sfw_batch_active(bat)) {
+ reply->tsr_status = EBUSY;
+ return 0;
+ }
+
+ if (request->tsr_is_client && rpc->srpc_bulk == NULL) {
+ /* rpc will be resumed later in sfw_bulk_ready */
+ int npg = sfw_id_pages(request->tsr_ndest);
+ int len;
+
+ if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+ len = npg * PAGE_CACHE_SIZE;
+
+ } else {
+ len = sizeof(lnet_process_id_packed_t) *
+ request->tsr_ndest;
+ }
+
+ return sfw_alloc_pages(rpc, CFS_CPT_ANY, npg, len, 1);
+ }
+
+ rc = sfw_add_test_instance(bat, rpc);
+ CDEBUG (rc == 0 ? D_NET : D_WARNING,
+ "%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
+ rc == 0 ? "Added" : "Failed to add", request->tsr_service,
+ request->tsr_is_client ? "client" : "server",
+ request->tsr_loop, request->tsr_concur, request->tsr_ndest);
+
+ reply->tsr_status = (rc < 0) ? -rc : rc;
+ return 0;
+}
+
+int
+sfw_control_batch (srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
+{
+ sfw_session_t *sn = sfw_data.fw_session;
+ int rc = 0;
+ sfw_batch_t *bat;
+
+ reply->bar_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+
+ if (sn == NULL || !sfw_sid_equal(request->bar_sid, sn->sn_id)) {
+ reply->bar_status = ESRCH;
+ return 0;
+ }
+
+ bat = sfw_find_batch(request->bar_bid);
+ if (bat == NULL) {
+ reply->bar_status = ENOENT;
+ return 0;
+ }
+
+ switch (request->bar_opc) {
+ case SRPC_BATCH_OPC_RUN:
+ rc = sfw_run_batch(bat);
+ break;
+
+ case SRPC_BATCH_OPC_STOP:
+ rc = sfw_stop_batch(bat, request->bar_arg);
+ break;
+
+ case SRPC_BATCH_OPC_QUERY:
+ rc = sfw_query_batch(bat, request->bar_testidx, reply);
+ break;
+
+ default:
+ return -EINVAL; /* drop it */
+ }
+
+ reply->bar_status = (rc < 0) ? -rc : rc;
+ return 0;
+}
+
+int
+sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
+{
+ struct srpc_service *sv = rpc->srpc_scd->scd_svc;
+ srpc_msg_t *reply = &rpc->srpc_replymsg;
+ srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg;
+ unsigned features = LST_FEATS_MASK;
+ int rc = 0;
+
+ LASSERT(sfw_data.fw_active_srpc == NULL);
+ LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
+
+ spin_lock(&sfw_data.fw_lock);
+
+ if (sfw_data.fw_shuttingdown) {
+ spin_unlock(&sfw_data.fw_lock);
+ return -ESHUTDOWN;
+ }
+
+ /* Remove timer to avoid racing with it or expiring active session */
+ if (sfw_del_session_timer() != 0) {
+ CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer));
+ spin_unlock(&sfw_data.fw_lock);
+ return -EAGAIN;
+ }
+
+ sfw_data.fw_active_srpc = rpc;
+ spin_unlock(&sfw_data.fw_lock);
+
+ sfw_unpack_message(request);
+ LASSERT(request->msg_type == srpc_service2request(sv->sv_id));
+
+ /* rpc module should have checked this */
+ LASSERT(request->msg_version == SRPC_MSG_VERSION);
+
+ if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
+ sv->sv_id != SRPC_SERVICE_DEBUG) {
+ sfw_session_t *sn = sfw_data.fw_session;
+
+ if (sn != NULL &&
+ sn->sn_features != request->msg_ses_feats) {
+ CNETERR("Features of framework RPC don't match "
+ "features of current session: %x/%x\n",
+ request->msg_ses_feats, sn->sn_features);
+ reply->msg_body.reply.status = EPROTO;
+ reply->msg_body.reply.sid = sn->sn_id;
+ goto out;
+ }
+
+ } else if ((request->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ /* NB: at this point, old version will ignore features and
+ * create new session anyway, so console should be able
+ * to handle this */
+ reply->msg_body.reply.status = EPROTO;
+ goto out;
+ }
+
+ switch(sv->sv_id) {
+ default:
+ LBUG ();
+ case SRPC_SERVICE_TEST:
+ rc = sfw_add_test(rpc);
+ break;
+
+ case SRPC_SERVICE_BATCH:
+ rc = sfw_control_batch(&request->msg_body.bat_reqst,
+ &reply->msg_body.bat_reply);
+ break;
+
+ case SRPC_SERVICE_QUERY_STAT:
+ rc = sfw_get_stats(&request->msg_body.stat_reqst,
+ &reply->msg_body.stat_reply);
+ break;
+
+ case SRPC_SERVICE_DEBUG:
+ rc = sfw_debug_session(&request->msg_body.dbg_reqst,
+ &reply->msg_body.dbg_reply);
+ break;
+
+ case SRPC_SERVICE_MAKE_SESSION:
+ rc = sfw_make_session(&request->msg_body.mksn_reqst,
+ &reply->msg_body.mksn_reply);
+ break;
+
+ case SRPC_SERVICE_REMOVE_SESSION:
+ rc = sfw_remove_session(&request->msg_body.rmsn_reqst,
+ &reply->msg_body.rmsn_reply);
+ break;
+ }
+
+ if (sfw_data.fw_session != NULL)
+ features = sfw_data.fw_session->sn_features;
+ out:
+ reply->msg_ses_feats = features;
+ rpc->srpc_done = sfw_server_rpc_done;
+ spin_lock(&sfw_data.fw_lock);
+
+ if (!sfw_data.fw_shuttingdown)
+ sfw_add_session_timer();
+
+ sfw_data.fw_active_srpc = NULL;
+ spin_unlock(&sfw_data.fw_lock);
+ return rc;
+}
+
+int
+sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
+{
+ struct srpc_service *sv = rpc->srpc_scd->scd_svc;
+ int rc;
+
+ LASSERT(rpc->srpc_bulk != NULL);
+ LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
+ LASSERT(sfw_data.fw_active_srpc == NULL);
+ LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
+
+ spin_lock(&sfw_data.fw_lock);
+
+ if (status != 0) {
+ CERROR("Bulk transfer failed for RPC: "
+ "service %s, peer %s, status %d\n",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
+ spin_unlock(&sfw_data.fw_lock);
+ return -EIO;
+ }
+
+ if (sfw_data.fw_shuttingdown) {
+ spin_unlock(&sfw_data.fw_lock);
+ return -ESHUTDOWN;
+ }
+
+ if (sfw_del_session_timer() != 0) {
+ CERROR("Dropping RPC (%s) from %s: racing with expiry timer",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer));
+ spin_unlock(&sfw_data.fw_lock);
+ return -EAGAIN;
+ }
+
+ sfw_data.fw_active_srpc = rpc;
+ spin_unlock(&sfw_data.fw_lock);
+
+ rc = sfw_add_test(rpc);
+
+ spin_lock(&sfw_data.fw_lock);
+
+ if (!sfw_data.fw_shuttingdown)
+ sfw_add_session_timer();
+
+ sfw_data.fw_active_srpc = NULL;
+ spin_unlock(&sfw_data.fw_lock);
+ return rc;
+}
+
+srpc_client_rpc_t *
+sfw_create_rpc(lnet_process_id_t peer, int service,
+ unsigned features, int nbulkiov, int bulklen,
+ void (*done)(srpc_client_rpc_t *), void *priv)
+{
+ srpc_client_rpc_t *rpc = NULL;
+
+ spin_lock(&sfw_data.fw_lock);
+
+ LASSERT (!sfw_data.fw_shuttingdown);
+ LASSERT (service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
+
+ if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) {
+ rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ list_del(&rpc->crpc_list);
+
+ srpc_init_client_rpc(rpc, peer, service, 0, 0,
+ done, sfw_client_rpc_fini, priv);
+ }
+
+ spin_unlock(&sfw_data.fw_lock);
+
+ if (rpc == NULL) {
+ rpc = srpc_create_client_rpc(peer, service,
+ nbulkiov, bulklen, done,
+ nbulkiov != 0 ? NULL :
+ sfw_client_rpc_fini,
+ priv);
+ }
+
+ if (rpc != NULL) /* "session" is concept in framework */
+ rpc->crpc_reqstmsg.msg_ses_feats = features;
+
+ return rpc;
+}
+
+void
+sfw_unpack_message (srpc_msg_t *msg)
+{
+ if (msg->msg_magic == SRPC_MSG_MAGIC)
+ return; /* no flipping needed */
+
+ /* srpc module should guarantee I wouldn't get crap */
+ LASSERT (msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
+
+ if (msg->msg_type == SRPC_MSG_STAT_REQST) {
+ srpc_stat_reqst_t *req = &msg->msg_body.stat_reqst;
+
+ __swab32s(&req->str_type);
+ __swab64s(&req->str_rpyid);
+ sfw_unpack_sid(req->str_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_STAT_REPLY) {
+ srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
+
+ __swab32s(&rep->str_status);
+ sfw_unpack_sid(rep->str_sid);
+ sfw_unpack_fw_counters(rep->str_fw);
+ sfw_unpack_rpc_counters(rep->str_rpc);
+ sfw_unpack_lnet_counters(rep->str_lnet);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_MKSN_REQST) {
+ srpc_mksn_reqst_t *req = &msg->msg_body.mksn_reqst;
+
+ __swab64s(&req->mksn_rpyid);
+ __swab32s(&req->mksn_force);
+ sfw_unpack_sid(req->mksn_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_MKSN_REPLY) {
+ srpc_mksn_reply_t *rep = &msg->msg_body.mksn_reply;
+
+ __swab32s(&rep->mksn_status);
+ __swab32s(&rep->mksn_timeout);
+ sfw_unpack_sid(rep->mksn_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_RMSN_REQST) {
+ srpc_rmsn_reqst_t *req = &msg->msg_body.rmsn_reqst;
+
+ __swab64s(&req->rmsn_rpyid);
+ sfw_unpack_sid(req->rmsn_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_RMSN_REPLY) {
+ srpc_rmsn_reply_t *rep = &msg->msg_body.rmsn_reply;
+
+ __swab32s(&rep->rmsn_status);
+ sfw_unpack_sid(rep->rmsn_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_DEBUG_REQST) {
+ srpc_debug_reqst_t *req = &msg->msg_body.dbg_reqst;
+
+ __swab64s(&req->dbg_rpyid);
+ __swab32s(&req->dbg_flags);
+ sfw_unpack_sid(req->dbg_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) {
+ srpc_debug_reply_t *rep = &msg->msg_body.dbg_reply;
+
+ __swab32s(&rep->dbg_nbatch);
+ __swab32s(&rep->dbg_timeout);
+ sfw_unpack_sid(rep->dbg_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_BATCH_REQST) {
+ srpc_batch_reqst_t *req = &msg->msg_body.bat_reqst;
+
+ __swab32s(&req->bar_opc);
+ __swab64s(&req->bar_rpyid);
+ __swab32s(&req->bar_testidx);
+ __swab32s(&req->bar_arg);
+ sfw_unpack_sid(req->bar_sid);
+ __swab64s(&req->bar_bid.bat_id);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_BATCH_REPLY) {
+ srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
+
+ __swab32s(&rep->bar_status);
+ sfw_unpack_sid(rep->bar_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_TEST_REQST) {
+ srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
+
+ __swab64s(&req->tsr_rpyid);
+ __swab64s(&req->tsr_bulkid);
+ __swab32s(&req->tsr_loop);
+ __swab32s(&req->tsr_ndest);
+ __swab32s(&req->tsr_concur);
+ __swab32s(&req->tsr_service);
+ sfw_unpack_sid(req->tsr_sid);
+ __swab64s(&req->tsr_bid.bat_id);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_TEST_REPLY) {
+ srpc_test_reply_t *rep = &msg->msg_body.tes_reply;
+
+ __swab32s(&rep->tsr_status);
+ sfw_unpack_sid(rep->tsr_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_JOIN_REQST) {
+ srpc_join_reqst_t *req = &msg->msg_body.join_reqst;
+
+ __swab64s(&req->join_rpyid);
+ sfw_unpack_sid(req->join_sid);
+ return;
+ }
+
+ if (msg->msg_type == SRPC_MSG_JOIN_REPLY) {
+ srpc_join_reply_t *rep = &msg->msg_body.join_reply;
+
+ __swab32s(&rep->join_status);
+ __swab32s(&rep->join_timeout);
+ sfw_unpack_sid(rep->join_sid);
+ return;
+ }
+
+ LBUG ();
+ return;
+}
+
+void
+sfw_abort_rpc (srpc_client_rpc_t *rpc)
+{
+ LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
+ LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
+
+ spin_lock(&rpc->crpc_lock);
+ srpc_abort_rpc(rpc, -EINTR);
+ spin_unlock(&rpc->crpc_lock);
+ return;
+}
+
+void
+sfw_post_rpc (srpc_client_rpc_t *rpc)
+{
+ spin_lock(&rpc->crpc_lock);
+
+ LASSERT (!rpc->crpc_closed);
+ LASSERT (!rpc->crpc_aborted);
+ LASSERT (list_empty(&rpc->crpc_list));
+ LASSERT (!sfw_data.fw_shuttingdown);
+
+ rpc->crpc_timeout = rpc_timeout;
+ srpc_post_rpc(rpc);
+
+ spin_unlock(&rpc->crpc_lock);
+ return;
+}
+
+static srpc_service_t sfw_services[] =
+{
+ {
+ /* sv_id */ SRPC_SERVICE_DEBUG,
+ /* sv_name */ "debug",
+ 0
+ },
+ {
+ /* sv_id */ SRPC_SERVICE_QUERY_STAT,
+ /* sv_name */ "query stats",
+ 0
+ },
+ {
+ /* sv_id */ SRPC_SERVICE_MAKE_SESSION,
+ /* sv_name */ "make session",
+ 0
+ },
+ {
+ /* sv_id */ SRPC_SERVICE_REMOVE_SESSION,
+ /* sv_name */ "remove session",
+ 0
+ },
+ {
+ /* sv_id */ SRPC_SERVICE_BATCH,
+ /* sv_name */ "batch service",
+ 0
+ },
+ {
+ /* sv_id */ SRPC_SERVICE_TEST,
+ /* sv_name */ "test service",
+ 0
+ },
+ {
+ /* sv_id */ 0,
+ /* sv_name */ NULL,
+ 0
+ }
+};
+
+extern sfw_test_client_ops_t ping_test_client;
+extern srpc_service_t ping_test_service;
+extern void ping_init_test_client(void);
+extern void ping_init_test_service(void);
+
+extern sfw_test_client_ops_t brw_test_client;
+extern srpc_service_t brw_test_service;
+extern void brw_init_test_client(void);
+extern void brw_init_test_service(void);
+
+
+int
+sfw_startup (void)
+{
+ int i;
+ int rc;
+ int error;
+ srpc_service_t *sv;
+ sfw_test_case_t *tsc;
+
+
+ if (session_timeout < 0) {
+ CERROR ("Session timeout must be non-negative: %d\n",
+ session_timeout);
+ return -EINVAL;
+ }
+
+ if (rpc_timeout < 0) {
+ CERROR ("RPC timeout must be non-negative: %d\n",
+ rpc_timeout);
+ return -EINVAL;
+ }
+
+ if (session_timeout == 0)
+ CWARN ("Zero session_timeout specified "
+ "- test sessions never expire.\n");
+
+ if (rpc_timeout == 0)
+ CWARN ("Zero rpc_timeout specified "
+ "- test RPC never expire.\n");
+
+ memset(&sfw_data, 0, sizeof(struct smoketest_framework));
+
+ sfw_data.fw_session = NULL;
+ sfw_data.fw_active_srpc = NULL;
+ spin_lock_init(&sfw_data.fw_lock);
+ atomic_set(&sfw_data.fw_nzombies, 0);
+ INIT_LIST_HEAD(&sfw_data.fw_tests);
+ INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
+ INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
+
+ brw_init_test_client();
+ brw_init_test_service();
+ rc = sfw_register_test(&brw_test_service, &brw_test_client);
+ LASSERT (rc == 0);
+
+ ping_init_test_client();
+ ping_init_test_service();
+ rc = sfw_register_test(&ping_test_service, &ping_test_client);
+ LASSERT (rc == 0);
+
+ error = 0;
+ list_for_each_entry (tsc, &sfw_data.fw_tests, tsc_list) {
+ sv = tsc->tsc_srv_service;
+
+ rc = srpc_add_service(sv);
+ LASSERT (rc != -EBUSY);
+ if (rc != 0) {
+ CWARN ("Failed to add %s service: %d\n",
+ sv->sv_name, rc);
+ error = rc;
+ }
+ }
+
+ for (i = 0; ; i++) {
+ sv = &sfw_services[i];
+ if (sv->sv_name == NULL) break;
+
+ sv->sv_bulk_ready = NULL;
+ sv->sv_handler = sfw_handle_server_rpc;
+ sv->sv_wi_total = SFW_FRWK_WI_MAX;
+ if (sv->sv_id == SRPC_SERVICE_TEST)
+ sv->sv_bulk_ready = sfw_bulk_ready;
+
+ rc = srpc_add_service(sv);
+ LASSERT (rc != -EBUSY);
+ if (rc != 0) {
+ CWARN ("Failed to add %s service: %d\n",
+ sv->sv_name, rc);
+ error = rc;
+ }
+
+ /* about to sfw_shutdown, no need to add buffer */
+ if (error) continue;
+
+ rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
+ if (rc != 0) {
+ CWARN("Failed to reserve enough buffers: "
+ "service %s, %d needed: %d\n",
+ sv->sv_name, sv->sv_wi_total, rc);
+ error = -ENOMEM;
+ }
+ }
+
+ if (error != 0)
+ sfw_shutdown();
+ return error;
+}
+
+void
+sfw_shutdown (void)
+{
+ srpc_service_t *sv;
+ sfw_test_case_t *tsc;
+ int i;
+
+ spin_lock(&sfw_data.fw_lock);
+
+ sfw_data.fw_shuttingdown = 1;
+ lst_wait_until(sfw_data.fw_active_srpc == NULL, sfw_data.fw_lock,
+ "waiting for active RPC to finish.\n");
+
+ if (sfw_del_session_timer() != 0)
+ lst_wait_until(sfw_data.fw_session == NULL, sfw_data.fw_lock,
+ "waiting for session timer to explode.\n");
+
+ sfw_deactivate_session();
+ lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
+ sfw_data.fw_lock,
+ "waiting for %d zombie sessions to die.\n",
+ atomic_read(&sfw_data.fw_nzombies));
+
+ spin_unlock(&sfw_data.fw_lock);
+
+ for (i = 0; ; i++) {
+ sv = &sfw_services[i];
+ if (sv->sv_name == NULL)
+ break;
+
+ srpc_shutdown_service(sv);
+ srpc_remove_service(sv);
+ }
+
+ list_for_each_entry (tsc, &sfw_data.fw_tests, tsc_list) {
+ sv = tsc->tsc_srv_service;
+ srpc_shutdown_service(sv);
+ srpc_remove_service(sv);
+ }
+
+ while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
+ srpc_client_rpc_t *rpc;
+
+ rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
+ srpc_client_rpc_t, crpc_list);
+ list_del(&rpc->crpc_list);
+
+ LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+ }
+
+ for (i = 0; ; i++) {
+ sv = &sfw_services[i];
+ if (sv->sv_name == NULL)
+ break;
+
+ srpc_wait_service_shutdown(sv);
+ }
+
+ while (!list_empty(&sfw_data.fw_tests)) {
+ tsc = list_entry(sfw_data.fw_tests.next,
+ sfw_test_case_t, tsc_list);
+
+ srpc_wait_service_shutdown(tsc->tsc_srv_service);
+
+ list_del(&tsc->tsc_list);
+ LIBCFS_FREE(tsc, sizeof(*tsc));
+ }
+
+ return;
+}
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
new file mode 100644
index 0000000..6dd4309
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -0,0 +1,171 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include "selftest.h"
+
+enum {
+ LST_INIT_NONE = 0,
+ LST_INIT_WI_SERIAL,
+ LST_INIT_WI_TEST,
+ LST_INIT_RPC,
+ LST_INIT_FW,
+ LST_INIT_CONSOLE
+};
+
+extern int lstcon_console_init(void);
+extern int lstcon_console_fini(void);
+
+static int lst_init_step = LST_INIT_NONE;
+
+struct cfs_wi_sched *lst_sched_serial;
+struct cfs_wi_sched **lst_sched_test;
+
+void
+lnet_selftest_fini(void)
+{
+ int i;
+
+ switch (lst_init_step) {
+ case LST_INIT_CONSOLE:
+ lstcon_console_fini();
+ case LST_INIT_FW:
+ sfw_shutdown();
+ case LST_INIT_RPC:
+ srpc_shutdown();
+ case LST_INIT_WI_TEST:
+ for (i = 0;
+ i < cfs_cpt_number(lnet_cpt_table()); i++) {
+ if (lst_sched_test[i] == NULL)
+ continue;
+ cfs_wi_sched_destroy(lst_sched_test[i]);
+ }
+ LIBCFS_FREE(lst_sched_test,
+ sizeof(lst_sched_test[0]) *
+ cfs_cpt_number(lnet_cpt_table()));
+ lst_sched_test = NULL;
+
+ case LST_INIT_WI_SERIAL:
+ cfs_wi_sched_destroy(lst_sched_serial);
+ lst_sched_serial = NULL;
+ case LST_INIT_NONE:
+ break;
+ default:
+ LBUG();
+ }
+ return;
+}
+
+void
+lnet_selftest_structure_assertion(void)
+{
+ CLASSERT(sizeof(srpc_msg_t) == 160);
+ CLASSERT(sizeof(srpc_test_reqst_t) == 70);
+ CLASSERT(offsetof(srpc_msg_t, msg_body.tes_reqst.tsr_concur) == 72);
+ CLASSERT(offsetof(srpc_msg_t, msg_body.tes_reqst.tsr_ndest) == 78);
+ CLASSERT(sizeof(srpc_stat_reply_t) == 136);
+ CLASSERT(sizeof(srpc_stat_reqst_t) == 28);
+}
+
+int
+lnet_selftest_init(void)
+{
+ int nscheds;
+ int rc;
+ int i;
+
+ rc = cfs_wi_sched_create("lst_s", lnet_cpt_table(), CFS_CPT_ANY,
+ 1, &lst_sched_serial);
+ if (rc != 0) {
+ CERROR("Failed to create serial WI scheduler for LST\n");
+ return rc;
+ }
+ lst_init_step = LST_INIT_WI_SERIAL;
+
+ nscheds = cfs_cpt_number(lnet_cpt_table());
+ LIBCFS_ALLOC(lst_sched_test, sizeof(lst_sched_test[0]) * nscheds);
+ if (lst_sched_test == NULL)
+ goto error;
+
+ lst_init_step = LST_INIT_WI_TEST;
+ for (i = 0; i < nscheds; i++) {
+ int nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
+
+ /* reserve at least one CPU for LND */
+ nthrs = max(nthrs - 1, 1);
+ rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
+ nthrs, &lst_sched_test[i]);
+ if (rc != 0) {
+ CERROR("Failed to create CPT affinity WI scheduler "
+ "%d for LST\n", i);
+ goto error;
+ }
+ }
+
+ rc = srpc_startup();
+ if (rc != 0) {
+ CERROR("LST can't startup rpc\n");
+ goto error;
+ }
+ lst_init_step = LST_INIT_RPC;
+
+ rc = sfw_startup();
+ if (rc != 0) {
+ CERROR("LST can't startup framework\n");
+ goto error;
+ }
+ lst_init_step = LST_INIT_FW;
+
+ rc = lstcon_console_init();
+ if (rc != 0) {
+ CERROR("LST can't startup console\n");
+ goto error;
+ }
+ lst_init_step = LST_INIT_CONSOLE;
+ return 0;
+error:
+ lnet_selftest_fini();
+ return rc;
+}
+
+
+MODULE_DESCRIPTION("LNet Selftest");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.9.0");
+
+module_init(lnet_selftest_init);
+module_exit(lnet_selftest_fini);
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
new file mode 100644
index 0000000..f0f9194
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -0,0 +1,229 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/conctl.c
+ *
+ * Test client & Server
+ *
+ * Author: Liang Zhen <liangzhen@clusterfs.com>
+ */
+
+#include "selftest.h"
+
+#define LST_PING_TEST_MAGIC 0xbabeface
+
+int ping_srv_workitems = SFW_TEST_WI_MAX;
+CFS_MODULE_PARM(ping_srv_workitems, "i", int, 0644, "# PING server workitems");
+
+typedef struct {
+ spinlock_t pnd_lock; /* serialize */
+ int pnd_counter; /* sequence counter */
+} lst_ping_data_t;
+
+static lst_ping_data_t lst_ping_data;
+
+static int
+ping_client_init(sfw_test_instance_t *tsi)
+{
+ sfw_session_t *sn = tsi->tsi_batch->bat_session;
+
+ LASSERT(tsi->tsi_is_client);
+ LASSERT(sn != NULL && (sn->sn_features & ~LST_FEATS_MASK) == 0);
+
+ spin_lock_init(&lst_ping_data.pnd_lock);
+ lst_ping_data.pnd_counter = 0;
+
+ return 0;
+}
+
+static void
+ping_client_fini (sfw_test_instance_t *tsi)
+{
+ sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ int errors;
+
+ LASSERT (sn != NULL);
+ LASSERT (tsi->tsi_is_client);
+
+ errors = atomic_read(&sn->sn_ping_errors);
+ if (errors)
+ CWARN ("%d pings have failed.\n", errors);
+ else
+ CDEBUG (D_NET, "Ping test finished OK.\n");
+}
+
+static int
+ping_client_prep_rpc(sfw_test_unit_t *tsu,
+ lnet_process_id_t dest, srpc_client_rpc_t **rpc)
+{
+ srpc_ping_reqst_t *req;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
+ sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct timeval tv;
+ int rc;
+
+ LASSERT(sn != NULL);
+ LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+
+ rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, 0, 0, rpc);
+ if (rc != 0)
+ return rc;
+
+ req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst;
+
+ req->pnr_magic = LST_PING_TEST_MAGIC;
+
+ spin_lock(&lst_ping_data.pnd_lock);
+ req->pnr_seq = lst_ping_data.pnd_counter++;
+ spin_unlock(&lst_ping_data.pnd_lock);
+
+ cfs_fs_timeval(&tv);
+ req->pnr_time_sec = tv.tv_sec;
+ req->pnr_time_usec = tv.tv_usec;
+
+ return rc;
+}
+
+static void
+ping_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
+{
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
+ sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
+ srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
+ struct timeval tv;
+
+ LASSERT (sn != NULL);
+
+ if (rpc->crpc_status != 0) {
+ if (!tsi->tsi_stopping) /* rpc could have been aborted */
+ atomic_inc(&sn->sn_ping_errors);
+ CERROR ("Unable to ping %s (%d): %d\n",
+ libcfs_id2str(rpc->crpc_dest),
+ reqst->pnr_seq, rpc->crpc_status);
+ return;
+ }
+
+ if (rpc->crpc_replymsg.msg_magic != SRPC_MSG_MAGIC) {
+ __swab32s(&reply->pnr_seq);
+ __swab32s(&reply->pnr_magic);
+ __swab32s(&reply->pnr_status);
+ }
+
+ if (reply->pnr_magic != LST_PING_TEST_MAGIC) {
+ rpc->crpc_status = -EBADMSG;
+ atomic_inc(&sn->sn_ping_errors);
+ CERROR ("Bad magic %u from %s, %u expected.\n",
+ reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
+ LST_PING_TEST_MAGIC);
+ return;
+ }
+
+ if (reply->pnr_seq != reqst->pnr_seq) {
+ rpc->crpc_status = -EBADMSG;
+ atomic_inc(&sn->sn_ping_errors);
+ CERROR ("Bad seq %u from %s, %u expected.\n",
+ reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
+ reqst->pnr_seq);
+ return;
+ }
+
+ cfs_fs_timeval(&tv);
+ CDEBUG (D_NET, "%d reply in %u usec\n", reply->pnr_seq,
+ (unsigned)((tv.tv_sec - (unsigned)reqst->pnr_time_sec) * 1000000
+ + (tv.tv_usec - reqst->pnr_time_usec)));
+ return;
+}
+
+static int
+ping_server_handle(struct srpc_server_rpc *rpc)
+{
+ struct srpc_service *sv = rpc->srpc_scd->scd_svc;
+ srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
+ srpc_msg_t *replymsg = &rpc->srpc_replymsg;
+ srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst;
+ srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
+
+ LASSERT (sv->sv_id == SRPC_SERVICE_PING);
+
+ if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) {
+ LASSERT (reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC));
+
+ __swab32s(&req->pnr_seq);
+ __swab32s(&req->pnr_magic);
+ __swab64s(&req->pnr_time_sec);
+ __swab64s(&req->pnr_time_usec);
+ }
+ LASSERT (reqstmsg->msg_type == srpc_service2request(sv->sv_id));
+
+ if (req->pnr_magic != LST_PING_TEST_MAGIC) {
+ CERROR ("Unexpect magic %08x from %s\n",
+ req->pnr_magic, libcfs_id2str(rpc->srpc_peer));
+ return -EINVAL;
+ }
+
+ rep->pnr_seq = req->pnr_seq;
+ rep->pnr_magic = LST_PING_TEST_MAGIC;
+
+ if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ replymsg->msg_ses_feats = LST_FEATS_MASK;
+ rep->pnr_status = EPROTO;
+ return 0;
+ }
+
+ replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
+
+ CDEBUG(D_NET, "Get ping %d from %s\n",
+ req->pnr_seq, libcfs_id2str(rpc->srpc_peer));
+ return 0;
+}
+
+sfw_test_client_ops_t ping_test_client;
+void ping_init_test_client(void)
+{
+ ping_test_client.tso_init = ping_client_init;
+ ping_test_client.tso_fini = ping_client_fini;
+ ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
+ ping_test_client.tso_done_rpc = ping_client_done_rpc;
+}
+
+srpc_service_t ping_test_service;
+void ping_init_test_service(void)
+{
+ ping_test_service.sv_id = SRPC_SERVICE_PING;
+ ping_test_service.sv_name = "ping_test";
+ ping_test_service.sv_handler = ping_server_handle;
+ ping_test_service.sv_wi_total = ping_srv_workitems;
+}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
new file mode 100644
index 0000000..7659a26
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -0,0 +1,1668 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/rpc.c
+ *
+ * Author: Isaac Huang <isaac@clusterfs.com>
+ *
+ * 2012-05-13: Liang Zhen <liang@whamcloud.com>
+ * - percpt data for service to improve smp performance
+ * - code cleanup
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include "selftest.h"
+
+typedef enum {
+ SRPC_STATE_NONE,
+ SRPC_STATE_NI_INIT,
+ SRPC_STATE_EQ_INIT,
+ SRPC_STATE_RUNNING,
+ SRPC_STATE_STOPPING,
+} srpc_state_t;
+
+struct smoketest_rpc {
+ spinlock_t rpc_glock; /* global lock */
+ srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
+ lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
+ srpc_state_t rpc_state;
+ srpc_counters_t rpc_counters;
+ __u64 rpc_matchbits; /* matchbits counter */
+} srpc_data;
+
+static inline int
+srpc_serv_portal(int svc_id)
+{
+ return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ?
+ SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL;
+}
+
+/* forward ref's */
+int srpc_handle_rpc (swi_workitem_t *wi);
+
+void srpc_get_counters (srpc_counters_t *cnt)
+{
+ spin_lock(&srpc_data.rpc_glock);
+ *cnt = srpc_data.rpc_counters;
+ spin_unlock(&srpc_data.rpc_glock);
+}
+
+void srpc_set_counters (const srpc_counters_t *cnt)
+{
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters = *cnt;
+ spin_unlock(&srpc_data.rpc_glock);
+}
+
+int
+srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
+{
+ nob = min(nob, (int)PAGE_CACHE_SIZE);
+
+ LASSERT(nob > 0);
+ LASSERT(i >= 0 && i < bk->bk_niov);
+
+ bk->bk_iovs[i].kiov_offset = 0;
+ bk->bk_iovs[i].kiov_page = pg;
+ bk->bk_iovs[i].kiov_len = nob;
+ return nob;
+}
+
+void
+srpc_free_bulk (srpc_bulk_t *bk)
+{
+ int i;
+ struct page *pg;
+
+ LASSERT (bk != NULL);
+
+ for (i = 0; i < bk->bk_niov; i++) {
+ pg = bk->bk_iovs[i].kiov_page;
+ if (pg == NULL) break;
+
+ __free_page(pg);
+ }
+
+ LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
+ return;
+}
+
+srpc_bulk_t *
+srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
+{
+ srpc_bulk_t *bk;
+ struct page **pages;
+ int i;
+
+ LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
+
+ LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
+ offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ if (bk == NULL) {
+ CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
+ return NULL;
+ }
+
+ memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ bk->bk_sink = sink;
+ bk->bk_len = bulk_len;
+ bk->bk_niov = bulk_npg;
+ UNUSED(pages);
+
+ for (i = 0; i < bulk_npg; i++) {
+ struct page *pg;
+ int nob;
+
+ pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt),
+ GFP_IOFS, 0);
+ if (pg == NULL) {
+ CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
+ srpc_free_bulk(bk);
+ return NULL;
+ }
+
+ nob = srpc_add_bulk_page(bk, pg, i, bulk_len);
+ bulk_len -= nob;
+ }
+
+ return bk;
+}
+
+static inline __u64
+srpc_next_id (void)
+{
+ __u64 id;
+
+ spin_lock(&srpc_data.rpc_glock);
+ id = srpc_data.rpc_matchbits++;
+ spin_unlock(&srpc_data.rpc_glock);
+ return id;
+}
+
+void
+srpc_init_server_rpc(struct srpc_server_rpc *rpc,
+ struct srpc_service_cd *scd,
+ struct srpc_buffer *buffer)
+{
+ memset(rpc, 0, sizeof(*rpc));
+ swi_init_workitem(&rpc->srpc_wi, rpc, srpc_handle_rpc,
+ srpc_serv_is_framework(scd->scd_svc) ?
+ lst_sched_serial : lst_sched_test[scd->scd_cpt]);
+
+ rpc->srpc_ev.ev_fired = 1; /* no event expected now */
+
+ rpc->srpc_scd = scd;
+ rpc->srpc_reqstbuf = buffer;
+ rpc->srpc_peer = buffer->buf_peer;
+ rpc->srpc_self = buffer->buf_self;
+ LNetInvalidateHandle(&rpc->srpc_replymdh);
+}
+
+static void
+srpc_service_fini(struct srpc_service *svc)
+{
+ struct srpc_service_cd *scd;
+ struct srpc_server_rpc *rpc;
+ struct srpc_buffer *buf;
+ struct list_head *q;
+ int i;
+
+ if (svc->sv_cpt_data == NULL)
+ return;
+
+ cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
+ while (1) {
+ if (!list_empty(&scd->scd_buf_posted))
+ q = &scd->scd_buf_posted;
+ else if (!list_empty(&scd->scd_buf_blocked))
+ q = &scd->scd_buf_blocked;
+ else
+ break;
+
+ while (!list_empty(q)) {
+ buf = list_entry(q->next,
+ struct srpc_buffer,
+ buf_list);
+ list_del(&buf->buf_list);
+ LIBCFS_FREE(buf, sizeof(*buf));
+ }
+ }
+
+ LASSERT(list_empty(&scd->scd_rpc_active));
+
+ while (!list_empty(&scd->scd_rpc_free)) {
+ rpc = list_entry(scd->scd_rpc_free.next,
+ struct srpc_server_rpc,
+ srpc_list);
+ list_del(&rpc->srpc_list);
+ LIBCFS_FREE(rpc, sizeof(*rpc));
+ }
+ }
+
+ cfs_percpt_free(svc->sv_cpt_data);
+ svc->sv_cpt_data = NULL;
+}
+
+static int
+srpc_service_nrpcs(struct srpc_service *svc)
+{
+ int nrpcs = svc->sv_wi_total / svc->sv_ncpts;
+
+ return srpc_serv_is_framework(svc) ?
+ max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
+}
+
+int srpc_add_buffer(struct swi_workitem *wi);
+
+static int
+srpc_service_init(struct srpc_service *svc)
+{
+ struct srpc_service_cd *scd;
+ struct srpc_server_rpc *rpc;
+ int nrpcs;
+ int i;
+ int j;
+
+ svc->sv_shuttingdown = 0;
+
+ svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(struct srpc_service_cd));
+ if (svc->sv_cpt_data == NULL)
+ return -ENOMEM;
+
+ svc->sv_ncpts = srpc_serv_is_framework(svc) ?
+ 1 : cfs_cpt_number(lnet_cpt_table());
+ nrpcs = srpc_service_nrpcs(svc);
+
+ cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
+ scd->scd_cpt = i;
+ scd->scd_svc = svc;
+ spin_lock_init(&scd->scd_lock);
+ INIT_LIST_HEAD(&scd->scd_rpc_free);
+ INIT_LIST_HEAD(&scd->scd_rpc_active);
+ INIT_LIST_HEAD(&scd->scd_buf_posted);
+ INIT_LIST_HEAD(&scd->scd_buf_blocked);
+
+ scd->scd_ev.ev_data = scd;
+ scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
+
+ /* NB: don't use lst_sched_serial for adding buffer,
+ * see details in srpc_service_add_buffers() */
+ swi_init_workitem(&scd->scd_buf_wi, scd,
+ srpc_add_buffer, lst_sched_test[i]);
+
+ if (i != 0 && srpc_serv_is_framework(svc)) {
+ /* NB: framework service only needs srpc_service_cd for
+ * one partition, but we allocate for all to make
+ * it easier to implement, it will waste a little
+ * memory but nobody should care about this */
+ continue;
+ }
+
+ for (j = 0; j < nrpcs; j++) {
+ LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
+ i, sizeof(*rpc));
+ if (rpc == NULL) {
+ srpc_service_fini(svc);
+ return -ENOMEM;
+ }
+ list_add(&rpc->srpc_list, &scd->scd_rpc_free);
+ }
+ }
+
+ return 0;
+}
+
+int
+srpc_add_service(struct srpc_service *sv)
+{
+ int id = sv->sv_id;
+
+ LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
+
+ if (srpc_service_init(sv) != 0)
+ return -ENOMEM;
+
+ spin_lock(&srpc_data.rpc_glock);
+
+ LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
+
+ if (srpc_data.rpc_services[id] != NULL) {
+ spin_unlock(&srpc_data.rpc_glock);
+ goto failed;
+ }
+
+ srpc_data.rpc_services[id] = sv;
+ spin_unlock(&srpc_data.rpc_glock);
+
+ CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
+ return 0;
+
+ failed:
+ srpc_service_fini(sv);
+ return -EBUSY;
+}
+
+int
+srpc_remove_service (srpc_service_t *sv)
+{
+ int id = sv->sv_id;
+
+ spin_lock(&srpc_data.rpc_glock);
+
+ if (srpc_data.rpc_services[id] != sv) {
+ spin_unlock(&srpc_data.rpc_glock);
+ return -ENOENT;
+ }
+
+ srpc_data.rpc_services[id] = NULL;
+ spin_unlock(&srpc_data.rpc_glock);
+ return 0;
+}
+
+int
+srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
+ int len, int options, lnet_process_id_t peer,
+ lnet_handle_md_t *mdh, srpc_event_t *ev)
+{
+ int rc;
+ lnet_md_t md;
+ lnet_handle_me_t meh;
+
+ rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
+ local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
+ if (rc != 0) {
+ CERROR ("LNetMEAttach failed: %d\n", rc);
+ LASSERT (rc == -ENOMEM);
+ return -ENOMEM;
+ }
+
+ md.threshold = 1;
+ md.user_ptr = ev;
+ md.start = buf;
+ md.length = len;
+ md.options = options;
+ md.eq_handle = srpc_data.rpc_lnet_eq;
+
+ rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
+ if (rc != 0) {
+ CERROR ("LNetMDAttach failed: %d\n", rc);
+ LASSERT (rc == -ENOMEM);
+
+ rc = LNetMEUnlink(meh);
+ LASSERT (rc == 0);
+ return -ENOMEM;
+ }
+
+ CDEBUG (D_NET,
+ "Posted passive RDMA: peer %s, portal %d, matchbits "LPX64"\n",
+ libcfs_id2str(peer), portal, matchbits);
+ return 0;
+}
+
+int
+srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
+ int options, lnet_process_id_t peer, lnet_nid_t self,
+ lnet_handle_md_t *mdh, srpc_event_t *ev)
+{
+ int rc;
+ lnet_md_t md;
+
+ md.user_ptr = ev;
+ md.start = buf;
+ md.length = len;
+ md.eq_handle = srpc_data.rpc_lnet_eq;
+ md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
+ md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
+
+ rc = LNetMDBind(md, LNET_UNLINK, mdh);
+ if (rc != 0) {
+ CERROR ("LNetMDBind failed: %d\n", rc);
+ LASSERT (rc == -ENOMEM);
+ return -ENOMEM;
+ }
+
+ /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
+ * they're only meaningful for MDs attached to an ME (i.e. passive
+ * buffers... */
+ if ((options & LNET_MD_OP_PUT) != 0) {
+ rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
+ portal, matchbits, 0, 0);
+ } else {
+ LASSERT ((options & LNET_MD_OP_GET) != 0);
+
+ rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
+ }
+
+ if (rc != 0) {
+ CERROR ("LNet%s(%s, %d, "LPD64") failed: %d\n",
+ ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
+ libcfs_id2str(peer), portal, matchbits, rc);
+
+ /* The forthcoming unlink event will complete this operation
+ * with failure, so fall through and return success here.
+ */
+ rc = LNetMDUnlink(*mdh);
+ LASSERT (rc == 0);
+ } else {
+ CDEBUG (D_NET,
+ "Posted active RDMA: peer %s, portal %u, matchbits "LPX64"\n",
+ libcfs_id2str(peer), portal, matchbits);
+ }
+ return 0;
+}
+
+int
+srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
+ int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
+{
+ return srpc_post_active_rdma(srpc_serv_portal(service), service,
+ buf, len, LNET_MD_OP_PUT, peer,
+ LNET_NID_ANY, mdh, ev);
+}
+
+int
+srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
+ lnet_handle_md_t *mdh, srpc_event_t *ev)
+{
+ lnet_process_id_t any = {0};
+
+ any.nid = LNET_NID_ANY;
+ any.pid = LNET_PID_ANY;
+
+ return srpc_post_passive_rdma(srpc_serv_portal(service),
+ local, service, buf, len,
+ LNET_MD_OP_PUT, any, mdh, ev);
+}
+
+int
+srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+{
+ struct srpc_service *sv = scd->scd_svc;
+ struct srpc_msg *msg = &buf->buf_msg;
+ int rc;
+
+ LNetInvalidateHandle(&buf->buf_mdh);
+ list_add(&buf->buf_list, &scd->scd_buf_posted);
+ scd->scd_buf_nposted++;
+ spin_unlock(&scd->scd_lock);
+
+ rc = srpc_post_passive_rqtbuf(sv->sv_id,
+ !srpc_serv_is_framework(sv),
+ msg, sizeof(*msg), &buf->buf_mdh,
+ &scd->scd_ev);
+
+ /* At this point, a RPC (new or delayed) may have arrived in
+ * msg and its event handler has been called. So we must add
+ * buf to scd_buf_posted _before_ dropping scd_lock */
+
+ spin_lock(&scd->scd_lock);
+
+ if (rc == 0) {
+ if (!sv->sv_shuttingdown)
+ return 0;
+
+ spin_unlock(&scd->scd_lock);
+ /* srpc_shutdown_service might have tried to unlink me
+ * when my buf_mdh was still invalid */
+ LNetMDUnlink(buf->buf_mdh);
+ spin_lock(&scd->scd_lock);
+ return 0;
+ }
+
+ scd->scd_buf_nposted--;
+ if (sv->sv_shuttingdown)
+ return rc; /* don't allow to change scd_buf_posted */
+
+ list_del(&buf->buf_list);
+ spin_unlock(&scd->scd_lock);
+
+ LIBCFS_FREE(buf, sizeof(*buf));
+
+ spin_lock(&scd->scd_lock);
+ return rc;
+}
+
+int
+srpc_add_buffer(struct swi_workitem *wi)
+{
+ struct srpc_service_cd *scd = wi->swi_workitem.wi_data;
+ struct srpc_buffer *buf;
+ int rc = 0;
+
+ /* it's called by workitem scheduler threads, these threads
+ * should have been set CPT affinity, so buffers will be posted
+ * on CPT local list of Portal */
+ spin_lock(&scd->scd_lock);
+
+ while (scd->scd_buf_adjust > 0 &&
+ !scd->scd_svc->sv_shuttingdown) {
+ scd->scd_buf_adjust--; /* consume it */
+ scd->scd_buf_posting++;
+
+ spin_unlock(&scd->scd_lock);
+
+ LIBCFS_ALLOC(buf, sizeof(*buf));
+ if (buf == NULL) {
+ CERROR("Failed to add new buf to service: %s\n",
+ scd->scd_svc->sv_name);
+ spin_lock(&scd->scd_lock);
+ rc = -ENOMEM;
+ break;
+ }
+
+ spin_lock(&scd->scd_lock);
+ if (scd->scd_svc->sv_shuttingdown) {
+ spin_unlock(&scd->scd_lock);
+ LIBCFS_FREE(buf, sizeof(*buf));
+
+ spin_lock(&scd->scd_lock);
+ rc = -ESHUTDOWN;
+ break;
+ }
+
+ rc = srpc_service_post_buffer(scd, buf);
+ if (rc != 0)
+ break; /* buf has been freed inside */
+
+ LASSERT(scd->scd_buf_posting > 0);
+ scd->scd_buf_posting--;
+ scd->scd_buf_total++;
+ scd->scd_buf_low = MAX(2, scd->scd_buf_total / 4);
+ }
+
+ if (rc != 0) {
+ scd->scd_buf_err_stamp = cfs_time_current_sec();
+ scd->scd_buf_err = rc;
+
+ LASSERT(scd->scd_buf_posting > 0);
+ scd->scd_buf_posting--;
+ }
+
+ spin_unlock(&scd->scd_lock);
+ return 0;
+}
+
+int
+srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
+{
+ struct srpc_service_cd *scd;
+ int rc = 0;
+ int i;
+
+ LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
+
+ cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+ spin_lock(&scd->scd_lock);
+
+ scd->scd_buf_err = 0;
+ scd->scd_buf_err_stamp = 0;
+ scd->scd_buf_posting = 0;
+ scd->scd_buf_adjust = nbuffer;
+ /* start to post buffers */
+ swi_schedule_workitem(&scd->scd_buf_wi);
+ spin_unlock(&scd->scd_lock);
+
+ /* framework service only post buffer for one partition */
+ if (srpc_serv_is_framework(sv))
+ break;
+ }
+
+ cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+ spin_lock(&scd->scd_lock);
+ /*
+ * NB: srpc_service_add_buffers() can be called inside
+ * thread context of lst_sched_serial, and we don't normally
+ * allow to sleep inside thread context of WI scheduler
+ * because it will block current scheduler thread from doing
+ * anything else, even worse, it could deadlock if it's
+ * waiting on result from another WI of the same scheduler.
+ * However, it's safe at here because scd_buf_wi is scheduled
+ * by thread in a different WI scheduler (lst_sched_test),
+ * so we don't have any risk of deadlock, though this could
+ * block all WIs pending on lst_sched_serial for a moment
+ * which is not good but not fatal.
+ */
+ lst_wait_until(scd->scd_buf_err != 0 ||
+ (scd->scd_buf_adjust == 0 &&
+ scd->scd_buf_posting == 0),
+ scd->scd_lock, "waiting for adding buffer\n");
+
+ if (scd->scd_buf_err != 0 && rc == 0)
+ rc = scd->scd_buf_err;
+
+ spin_unlock(&scd->scd_lock);
+ }
+
+ return rc;
+}
+
+void
+srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
+{
+ struct srpc_service_cd *scd;
+ int num;
+ int i;
+
+ LASSERT(!sv->sv_shuttingdown);
+
+ cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+ spin_lock(&scd->scd_lock);
+
+ num = scd->scd_buf_total + scd->scd_buf_posting;
+ scd->scd_buf_adjust -= min(nbuffer, num);
+
+ spin_unlock(&scd->scd_lock);
+ }
+}
+
+/* returns 1 if sv has finished, otherwise 0 */
+int
+srpc_finish_service(struct srpc_service *sv)
+{
+ struct srpc_service_cd *scd;
+ struct srpc_server_rpc *rpc;
+ int i;
+
+ LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
+
+ cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+ spin_lock(&scd->scd_lock);
+ if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
+ spin_unlock(&scd->scd_lock);
+ return 0;
+ }
+
+ if (scd->scd_buf_nposted > 0) {
+ CDEBUG(D_NET, "waiting for %d posted buffers to unlink",
+ scd->scd_buf_nposted);
+ spin_unlock(&scd->scd_lock);
+ return 0;
+ }
+
+ if (list_empty(&scd->scd_rpc_active)) {
+ spin_unlock(&scd->scd_lock);
+ continue;
+ }
+
+ rpc = list_entry(scd->scd_rpc_active.next,
+ struct srpc_server_rpc, srpc_list);
+ CNETERR("Active RPC %p on shutdown: sv %s, peer %s, "
+ "wi %s scheduled %d running %d, "
+ "ev fired %d type %d status %d lnet %d\n",
+ rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
+ swi_state2str(rpc->srpc_wi.swi_state),
+ rpc->srpc_wi.swi_workitem.wi_scheduled,
+ rpc->srpc_wi.swi_workitem.wi_running,
+ rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
+ rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
+ spin_unlock(&scd->scd_lock);
+ return 0;
+ }
+
+ /* no lock needed from now on */
+ srpc_service_fini(sv);
+ return 1;
+}
+
+/* called with sv->sv_lock held */
+void
+srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
+{
+ if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
+ if (srpc_service_post_buffer(scd, buf) != 0) {
+ CWARN("Failed to post %s buffer\n",
+ scd->scd_svc->sv_name);
+ }
+ return;
+ }
+
+ /* service is shutting down, or we want to recycle some buffers */
+ scd->scd_buf_total--;
+
+ if (scd->scd_buf_adjust < 0) {
+ scd->scd_buf_adjust++;
+ if (scd->scd_buf_adjust < 0 &&
+ scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
+ CDEBUG(D_INFO,
+ "Try to recyle %d buffers but nothing left\n",
+ scd->scd_buf_adjust);
+ scd->scd_buf_adjust = 0;
+ }
+ }
+
+ spin_unlock(&scd->scd_lock);
+ LIBCFS_FREE(buf, sizeof(*buf));
+ spin_lock(&scd->scd_lock);
+}
+
+void
+srpc_abort_service(struct srpc_service *sv)
+{
+ struct srpc_service_cd *scd;
+ struct srpc_server_rpc *rpc;
+ int i;
+
+ CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
+ sv->sv_id, sv->sv_name);
+
+ cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+ spin_lock(&scd->scd_lock);
+
+ /* schedule in-flight RPCs to notice the abort, NB:
+ * racing with incoming RPCs; complete fix should make test
+ * RPCs carry session ID in its headers */
+ list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
+ rpc->srpc_aborted = 1;
+ swi_schedule_workitem(&rpc->srpc_wi);
+ }
+
+ spin_unlock(&scd->scd_lock);
+ }
+}
+
+void
+srpc_shutdown_service(srpc_service_t *sv)
+{
+ struct srpc_service_cd *scd;
+ struct srpc_server_rpc *rpc;
+ srpc_buffer_t *buf;
+ int i;
+
+ CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
+ sv->sv_id, sv->sv_name);
+
+ cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
+ spin_lock(&scd->scd_lock);
+
+ sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
+
+ cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
+ spin_unlock(&scd->scd_lock);
+
+ cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
+ spin_lock(&scd->scd_lock);
+
+ /* schedule in-flight RPCs to notice the shutdown */
+ list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
+ swi_schedule_workitem(&rpc->srpc_wi);
+
+ spin_unlock(&scd->scd_lock);
+
+ /* OK to traverse scd_buf_posted without lock, since no one
+ * touches scd_buf_posted now */
+ list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
+ LNetMDUnlink(buf->buf_mdh);
+ }
+}
+
+int
+srpc_send_request (srpc_client_rpc_t *rpc)
+{
+ srpc_event_t *ev = &rpc->crpc_reqstev;
+ int rc;
+
+ ev->ev_fired = 0;
+ ev->ev_data = rpc;
+ ev->ev_type = SRPC_REQUEST_SENT;
+
+ rc = srpc_post_active_rqtbuf(rpc->crpc_dest, rpc->crpc_service,
+ &rpc->crpc_reqstmsg, sizeof(srpc_msg_t),
+ &rpc->crpc_reqstmdh, ev);
+ if (rc != 0) {
+ LASSERT (rc == -ENOMEM);
+ ev->ev_fired = 1; /* no more event expected */
+ }
+ return rc;
+}
+
+int
+srpc_prepare_reply (srpc_client_rpc_t *rpc)
+{
+ srpc_event_t *ev = &rpc->crpc_replyev;
+ __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
+ int rc;
+
+ ev->ev_fired = 0;
+ ev->ev_data = rpc;
+ ev->ev_type = SRPC_REPLY_RCVD;
+
+ *id = srpc_next_id();
+
+ rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
+ &rpc->crpc_replymsg, sizeof(srpc_msg_t),
+ LNET_MD_OP_PUT, rpc->crpc_dest,
+ &rpc->crpc_replymdh, ev);
+ if (rc != 0) {
+ LASSERT (rc == -ENOMEM);
+ ev->ev_fired = 1; /* no more event expected */
+ }
+ return rc;
+}
+
+int
+srpc_prepare_bulk (srpc_client_rpc_t *rpc)
+{
+ srpc_bulk_t *bk = &rpc->crpc_bulk;
+ srpc_event_t *ev = &rpc->crpc_bulkev;
+ __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
+ int rc;
+ int opt;
+
+ LASSERT (bk->bk_niov <= LNET_MAX_IOV);
+
+ if (bk->bk_niov == 0) return 0; /* nothing to do */
+
+ opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
+ opt |= LNET_MD_KIOV;
+
+ ev->ev_fired = 0;
+ ev->ev_data = rpc;
+ ev->ev_type = SRPC_BULK_REQ_RCVD;
+
+ *id = srpc_next_id();
+
+ rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
+ &bk->bk_iovs[0], bk->bk_niov, opt,
+ rpc->crpc_dest, &bk->bk_mdh, ev);
+ if (rc != 0) {
+ LASSERT (rc == -ENOMEM);
+ ev->ev_fired = 1; /* no more event expected */
+ }
+ return rc;
+}
+
+int
+srpc_do_bulk (srpc_server_rpc_t *rpc)
+{
+ srpc_event_t *ev = &rpc->srpc_ev;
+ srpc_bulk_t *bk = rpc->srpc_bulk;
+ __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
+ int rc;
+ int opt;
+
+ LASSERT (bk != NULL);
+
+ opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
+ opt |= LNET_MD_KIOV;
+
+ ev->ev_fired = 0;
+ ev->ev_data = rpc;
+ ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
+
+ rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
+ &bk->bk_iovs[0], bk->bk_niov, opt,
+ rpc->srpc_peer, rpc->srpc_self,
+ &bk->bk_mdh, ev);
+ if (rc != 0)
+ ev->ev_fired = 1; /* no more event expected */
+ return rc;
+}
+
+/* only called from srpc_handle_rpc */
+void
+srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
+{
+ struct srpc_service_cd *scd = rpc->srpc_scd;
+ struct srpc_service *sv = scd->scd_svc;
+ srpc_buffer_t *buffer;
+
+ LASSERT (status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
+
+ rpc->srpc_status = status;
+
+ CDEBUG_LIMIT (status == 0 ? D_NET : D_NETERROR,
+ "Server RPC %p done: service %s, peer %s, status %s:%d\n",
+ rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
+ swi_state2str(rpc->srpc_wi.swi_state), status);
+
+ if (status != 0) {
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.rpcs_dropped++;
+ spin_unlock(&srpc_data.rpc_glock);
+ }
+
+ if (rpc->srpc_done != NULL)
+ (*rpc->srpc_done) (rpc);
+ LASSERT(rpc->srpc_bulk == NULL);
+
+ spin_lock(&scd->scd_lock);
+
+ if (rpc->srpc_reqstbuf != NULL) {
+ /* NB might drop sv_lock in srpc_service_recycle_buffer, but
+ * sv won't go away for scd_rpc_active must not be empty */
+ srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
+ rpc->srpc_reqstbuf = NULL;
+ }
+
+ list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
+
+ /*
+ * No one can schedule me now since:
+ * - I'm not on scd_rpc_active.
+ * - all LNet events have been fired.
+ * Cancel pending schedules and prevent future schedule attempts:
+ */
+ LASSERT(rpc->srpc_ev.ev_fired);
+ swi_exit_workitem(&rpc->srpc_wi);
+
+ if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
+ buffer = list_entry(scd->scd_buf_blocked.next,
+ srpc_buffer_t, buf_list);
+ list_del(&buffer->buf_list);
+
+ srpc_init_server_rpc(rpc, scd, buffer);
+ list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
+ swi_schedule_workitem(&rpc->srpc_wi);
+ } else {
+ list_add(&rpc->srpc_list, &scd->scd_rpc_free);
+ }
+
+ spin_unlock(&scd->scd_lock);
+ return;
+}
+
+/* handles an incoming RPC */
+int
+srpc_handle_rpc(swi_workitem_t *wi)
+{
+ struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
+ struct srpc_service_cd *scd = rpc->srpc_scd;
+ struct srpc_service *sv = scd->scd_svc;
+ srpc_event_t *ev = &rpc->srpc_ev;
+ int rc = 0;
+
+ LASSERT(wi == &rpc->srpc_wi);
+
+ spin_lock(&scd->scd_lock);
+
+ if (sv->sv_shuttingdown || rpc->srpc_aborted) {
+ spin_unlock(&scd->scd_lock);
+
+ if (rpc->srpc_bulk != NULL)
+ LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
+ LNetMDUnlink(rpc->srpc_replymdh);
+
+ if (ev->ev_fired) { /* no more event, OK to finish */
+ srpc_server_rpc_done(rpc, -ESHUTDOWN);
+ return 1;
+ }
+ return 0;
+ }
+
+ spin_unlock(&scd->scd_lock);
+
+ switch (wi->swi_state) {
+ default:
+ LBUG ();
+ case SWI_STATE_NEWBORN: {
+ srpc_msg_t *msg;
+ srpc_generic_reply_t *reply;
+
+ msg = &rpc->srpc_reqstbuf->buf_msg;
+ reply = &rpc->srpc_replymsg.msg_body.reply;
+
+ if (msg->msg_magic == 0) {
+ /* moaned already in srpc_lnet_ev_handler */
+ srpc_server_rpc_done(rpc, EBADMSG);
+ return 1;
+ }
+
+ srpc_unpack_msg_hdr(msg);
+ if (msg->msg_version != SRPC_MSG_VERSION) {
+ CWARN("Version mismatch: %u, %u expected, from %s\n",
+ msg->msg_version, SRPC_MSG_VERSION,
+ libcfs_id2str(rpc->srpc_peer));
+ reply->status = EPROTO;
+ /* drop through and send reply */
+ } else {
+ reply->status = 0;
+ rc = (*sv->sv_handler)(rpc);
+ LASSERT(reply->status == 0 || !rpc->srpc_bulk);
+ if (rc != 0) {
+ srpc_server_rpc_done(rpc, rc);
+ return 1;
+ }
+ }
+
+ wi->swi_state = SWI_STATE_BULK_STARTED;
+
+ if (rpc->srpc_bulk != NULL) {
+ rc = srpc_do_bulk(rpc);
+ if (rc == 0)
+ return 0; /* wait for bulk */
+
+ LASSERT (ev->ev_fired);
+ ev->ev_status = rc;
+ }
+ }
+ case SWI_STATE_BULK_STARTED:
+ LASSERT (rpc->srpc_bulk == NULL || ev->ev_fired);
+
+ if (rpc->srpc_bulk != NULL) {
+ rc = ev->ev_status;
+
+ if (sv->sv_bulk_ready != NULL)
+ rc = (*sv->sv_bulk_ready) (rpc, rc);
+
+ if (rc != 0) {
+ srpc_server_rpc_done(rpc, rc);
+ return 1;
+ }
+ }
+
+ wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
+ rc = srpc_send_reply(rpc);
+ if (rc == 0)
+ return 0; /* wait for reply */
+ srpc_server_rpc_done(rpc, rc);
+ return 1;
+
+ case SWI_STATE_REPLY_SUBMITTED:
+ if (!ev->ev_fired) {
+ CERROR("RPC %p: bulk %p, service %d\n",
+ rpc, rpc->srpc_bulk, sv->sv_id);
+ CERROR("Event: status %d, type %d, lnet %d\n",
+ ev->ev_status, ev->ev_type, ev->ev_lnet);
+ LASSERT (ev->ev_fired);
+ }
+
+ wi->swi_state = SWI_STATE_DONE;
+ srpc_server_rpc_done(rpc, ev->ev_status);
+ return 1;
+ }
+
+ return 0;
+}
+
+void
+srpc_client_rpc_expired (void *data)
+{
+ srpc_client_rpc_t *rpc = data;
+
+ CWARN ("Client RPC expired: service %d, peer %s, timeout %d.\n",
+ rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+ rpc->crpc_timeout);
+
+ spin_lock(&rpc->crpc_lock);
+
+ rpc->crpc_timeout = 0;
+ srpc_abort_rpc(rpc, -ETIMEDOUT);
+
+ spin_unlock(&rpc->crpc_lock);
+
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.rpcs_expired++;
+ spin_unlock(&srpc_data.rpc_glock);
+}
+
+inline void
+srpc_add_client_rpc_timer (srpc_client_rpc_t *rpc)
+{
+ stt_timer_t *timer = &rpc->crpc_timer;
+
+ if (rpc->crpc_timeout == 0) return;
+
+ INIT_LIST_HEAD(&timer->stt_list);
+ timer->stt_data = rpc;
+ timer->stt_func = srpc_client_rpc_expired;
+ timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
+ cfs_time_current_sec());
+ stt_add_timer(timer);
+ return;
+}
+
+/*
+ * Called with rpc->crpc_lock held.
+ *
+ * Upon exit the RPC expiry timer is not queued and the handler is not
+ * running on any CPU. */
+void
+srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
+{
+ /* timer not planted or already exploded */
+ if (rpc->crpc_timeout == 0)
+ return;
+
+ /* timer successfully defused */
+ if (stt_del_timer(&rpc->crpc_timer))
+ return;
+
+ /* timer detonated, wait for it to explode */
+ while (rpc->crpc_timeout != 0) {
+ spin_unlock(&rpc->crpc_lock);
+
+ schedule();
+
+ spin_lock(&rpc->crpc_lock);
+ }
+}
+
+void
+srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
+{
+ swi_workitem_t *wi = &rpc->crpc_wi;
+
+ LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
+
+ spin_lock(&rpc->crpc_lock);
+
+ rpc->crpc_closed = 1;
+ if (rpc->crpc_status == 0)
+ rpc->crpc_status = status;
+
+ srpc_del_client_rpc_timer(rpc);
+
+ CDEBUG_LIMIT ((status == 0) ? D_NET : D_NETERROR,
+ "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
+ rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+ swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
+
+ /*
+ * No one can schedule me now since:
+ * - RPC timer has been defused.
+ * - all LNet events have been fired.
+ * - crpc_closed has been set, preventing srpc_abort_rpc from
+ * scheduling me.
+ * Cancel pending schedules and prevent future schedule attempts:
+ */
+ LASSERT (!srpc_event_pending(rpc));
+ swi_exit_workitem(wi);
+
+ spin_unlock(&rpc->crpc_lock);
+
+ (*rpc->crpc_done)(rpc);
+ return;
+}
+
+/* sends an outgoing RPC */
+int
+srpc_send_rpc (swi_workitem_t *wi)
+{
+ int rc = 0;
+ srpc_client_rpc_t *rpc;
+ srpc_msg_t *reply;
+ int do_bulk;
+
+ LASSERT(wi != NULL);
+
+ rpc = wi->swi_workitem.wi_data;
+
+ LASSERT (rpc != NULL);
+ LASSERT (wi == &rpc->crpc_wi);
+
+ reply = &rpc->crpc_replymsg;
+ do_bulk = rpc->crpc_bulk.bk_niov > 0;
+
+ spin_lock(&rpc->crpc_lock);
+
+ if (rpc->crpc_aborted) {
+ spin_unlock(&rpc->crpc_lock);
+ goto abort;
+ }
+
+ spin_unlock(&rpc->crpc_lock);
+
+ switch (wi->swi_state) {
+ default:
+ LBUG ();
+ case SWI_STATE_NEWBORN:
+ LASSERT (!srpc_event_pending(rpc));
+
+ rc = srpc_prepare_reply(rpc);
+ if (rc != 0) {
+ srpc_client_rpc_done(rpc, rc);
+ return 1;
+ }
+
+ rc = srpc_prepare_bulk(rpc);
+ if (rc != 0) break;
+
+ wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
+ rc = srpc_send_request(rpc);
+ break;
+
+ case SWI_STATE_REQUEST_SUBMITTED:
+ /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
+ * order; however, they're processed in a strict order:
+ * rqt, rpy, and bulk. */
+ if (!rpc->crpc_reqstev.ev_fired) break;
+
+ rc = rpc->crpc_reqstev.ev_status;
+ if (rc != 0) break;
+
+ wi->swi_state = SWI_STATE_REQUEST_SENT;
+ /* perhaps more events, fall thru */
+ case SWI_STATE_REQUEST_SENT: {
+ srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
+
+ if (!rpc->crpc_replyev.ev_fired) break;
+
+ rc = rpc->crpc_replyev.ev_status;
+ if (rc != 0) break;
+
+ srpc_unpack_msg_hdr(reply);
+ if (reply->msg_type != type ||
+ (reply->msg_magic != SRPC_MSG_MAGIC &&
+ reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
+ CWARN ("Bad message from %s: type %u (%d expected),"
+ " magic %u (%d expected).\n",
+ libcfs_id2str(rpc->crpc_dest),
+ reply->msg_type, type,
+ reply->msg_magic, SRPC_MSG_MAGIC);
+ rc = -EBADMSG;
+ break;
+ }
+
+ if (do_bulk && reply->msg_body.reply.status != 0) {
+ CWARN ("Remote error %d at %s, unlink bulk buffer in "
+ "case peer didn't initiate bulk transfer\n",
+ reply->msg_body.reply.status,
+ libcfs_id2str(rpc->crpc_dest));
+ LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
+ }
+
+ wi->swi_state = SWI_STATE_REPLY_RECEIVED;
+ }
+ case SWI_STATE_REPLY_RECEIVED:
+ if (do_bulk && !rpc->crpc_bulkev.ev_fired) break;
+
+ rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
+
+ /* Bulk buffer was unlinked due to remote error. Clear error
+ * since reply buffer still contains valid data.
+ * NB rpc->crpc_done shouldn't look into bulk data in case of
+ * remote error. */
+ if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
+ rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
+ rc = 0;
+
+ wi->swi_state = SWI_STATE_DONE;
+ srpc_client_rpc_done(rpc, rc);
+ return 1;
+ }
+
+ if (rc != 0) {
+ spin_lock(&rpc->crpc_lock);
+ srpc_abort_rpc(rpc, rc);
+ spin_unlock(&rpc->crpc_lock);
+ }
+
+abort:
+ if (rpc->crpc_aborted) {
+ LNetMDUnlink(rpc->crpc_reqstmdh);
+ LNetMDUnlink(rpc->crpc_replymdh);
+ LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
+
+ if (!srpc_event_pending(rpc)) {
+ srpc_client_rpc_done(rpc, -EINTR);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+srpc_client_rpc_t *
+srpc_create_client_rpc (lnet_process_id_t peer, int service,
+ int nbulkiov, int bulklen,
+ void (*rpc_done)(srpc_client_rpc_t *),
+ void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+{
+ srpc_client_rpc_t *rpc;
+
+ LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
+ crpc_bulk.bk_iovs[nbulkiov]));
+ if (rpc == NULL)
+ return NULL;
+
+ srpc_init_client_rpc(rpc, peer, service, nbulkiov,
+ bulklen, rpc_done, rpc_fini, priv);
+ return rpc;
+}
+
+/* called with rpc->crpc_lock held */
+void
+srpc_abort_rpc (srpc_client_rpc_t *rpc, int why)
+{
+ LASSERT (why != 0);
+
+ if (rpc->crpc_aborted || /* already aborted */
+ rpc->crpc_closed) /* callback imminent */
+ return;
+
+ CDEBUG (D_NET,
+ "Aborting RPC: service %d, peer %s, state %s, why %d\n",
+ rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+ swi_state2str(rpc->crpc_wi.swi_state), why);
+
+ rpc->crpc_aborted = 1;
+ rpc->crpc_status = why;
+ swi_schedule_workitem(&rpc->crpc_wi);
+ return;
+}
+
+/* called with rpc->crpc_lock held */
+void
+srpc_post_rpc (srpc_client_rpc_t *rpc)
+{
+ LASSERT (!rpc->crpc_aborted);
+ LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
+
+ CDEBUG (D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
+ libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
+ rpc->crpc_timeout);
+
+ srpc_add_client_rpc_timer(rpc);
+ swi_schedule_workitem(&rpc->crpc_wi);
+ return;
+}
+
+
+int
+srpc_send_reply(struct srpc_server_rpc *rpc)
+{
+ srpc_event_t *ev = &rpc->srpc_ev;
+ struct srpc_msg *msg = &rpc->srpc_replymsg;
+ struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
+ struct srpc_service_cd *scd = rpc->srpc_scd;
+ struct srpc_service *sv = scd->scd_svc;
+ __u64 rpyid;
+ int rc;
+
+ LASSERT(buffer != NULL);
+ rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
+
+ spin_lock(&scd->scd_lock);
+
+ if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
+ /* Repost buffer before replying since test client
+ * might send me another RPC once it gets the reply */
+ if (srpc_service_post_buffer(scd, buffer) != 0)
+ CWARN("Failed to repost %s buffer\n", sv->sv_name);
+ rpc->srpc_reqstbuf = NULL;
+ }
+
+ spin_unlock(&scd->scd_lock);
+
+ ev->ev_fired = 0;
+ ev->ev_data = rpc;
+ ev->ev_type = SRPC_REPLY_SENT;
+
+ msg->msg_magic = SRPC_MSG_MAGIC;
+ msg->msg_version = SRPC_MSG_VERSION;
+ msg->msg_type = srpc_service2reply(sv->sv_id);
+
+ rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
+ sizeof(*msg), LNET_MD_OP_PUT,
+ rpc->srpc_peer, rpc->srpc_self,
+ &rpc->srpc_replymdh, ev);
+ if (rc != 0)
+ ev->ev_fired = 1; /* no more event expected */
+ return rc;
+}
+
+/* when in kernel always called with LNET_LOCK() held, and in thread context */
+void
+srpc_lnet_ev_handler(lnet_event_t *ev)
+{
+ struct srpc_service_cd *scd;
+ srpc_event_t *rpcev = ev->md.user_ptr;
+ srpc_client_rpc_t *crpc;
+ srpc_server_rpc_t *srpc;
+ srpc_buffer_t *buffer;
+ srpc_service_t *sv;
+ srpc_msg_t *msg;
+ srpc_msg_type_t type;
+
+ LASSERT (!in_interrupt());
+
+ if (ev->status != 0) {
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.errors++;
+ spin_unlock(&srpc_data.rpc_glock);
+ }
+
+ rpcev->ev_lnet = ev->type;
+
+ switch (rpcev->ev_type) {
+ default:
+ CERROR("Unknown event: status %d, type %d, lnet %d\n",
+ rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
+ LBUG ();
+ case SRPC_REQUEST_SENT:
+ if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.rpcs_sent++;
+ spin_unlock(&srpc_data.rpc_glock);
+ }
+ case SRPC_REPLY_RCVD:
+ case SRPC_BULK_REQ_RCVD:
+ crpc = rpcev->ev_data;
+
+ if (rpcev != &crpc->crpc_reqstev &&
+ rpcev != &crpc->crpc_replyev &&
+ rpcev != &crpc->crpc_bulkev) {
+ CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
+ rpcev, crpc, &crpc->crpc_reqstev,
+ &crpc->crpc_replyev, &crpc->crpc_bulkev);
+ CERROR("Bad event: status %d, type %d, lnet %d\n",
+ rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
+ LBUG ();
+ }
+
+ spin_lock(&crpc->crpc_lock);
+
+ LASSERT(rpcev->ev_fired == 0);
+ rpcev->ev_fired = 1;
+ rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
+ -EINTR : ev->status;
+ swi_schedule_workitem(&crpc->crpc_wi);
+
+ spin_unlock(&crpc->crpc_lock);
+ break;
+
+ case SRPC_REQUEST_RCVD:
+ scd = rpcev->ev_data;
+ sv = scd->scd_svc;
+
+ LASSERT(rpcev == &scd->scd_ev);
+
+ spin_lock(&scd->scd_lock);
+
+ LASSERT (ev->unlinked);
+ LASSERT (ev->type == LNET_EVENT_PUT ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT (ev->type != LNET_EVENT_UNLINK ||
+ sv->sv_shuttingdown);
+
+ buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
+ buffer->buf_peer = ev->initiator;
+ buffer->buf_self = ev->target.nid;
+
+ LASSERT(scd->scd_buf_nposted > 0);
+ scd->scd_buf_nposted--;
+
+ if (sv->sv_shuttingdown) {
+ /* Leave buffer on scd->scd_buf_nposted since
+ * srpc_finish_service needs to traverse it. */
+ spin_unlock(&scd->scd_lock);
+ break;
+ }
+
+ if (scd->scd_buf_err_stamp != 0 &&
+ scd->scd_buf_err_stamp < cfs_time_current_sec()) {
+ /* re-enable adding buffer */
+ scd->scd_buf_err_stamp = 0;
+ scd->scd_buf_err = 0;
+ }
+
+ if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
+ scd->scd_buf_adjust == 0 &&
+ scd->scd_buf_nposted < scd->scd_buf_low) {
+ scd->scd_buf_adjust = MAX(scd->scd_buf_total / 2,
+ SFW_TEST_WI_MIN);
+ swi_schedule_workitem(&scd->scd_buf_wi);
+ }
+
+ list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
+ msg = &buffer->buf_msg;
+ type = srpc_service2request(sv->sv_id);
+
+ if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
+ (msg->msg_type != type &&
+ msg->msg_type != __swab32(type)) ||
+ (msg->msg_magic != SRPC_MSG_MAGIC &&
+ msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
+ CERROR ("Dropping RPC (%s) from %s: "
+ "status %d mlength %d type %u magic %u.\n",
+ sv->sv_name, libcfs_id2str(ev->initiator),
+ ev->status, ev->mlength,
+ msg->msg_type, msg->msg_magic);
+
+ /* NB can't call srpc_service_recycle_buffer here since
+ * it may call LNetM[DE]Attach. The invalid magic tells
+ * srpc_handle_rpc to drop this RPC */
+ msg->msg_magic = 0;
+ }
+
+ if (!list_empty(&scd->scd_rpc_free)) {
+ srpc = list_entry(scd->scd_rpc_free.next,
+ struct srpc_server_rpc,
+ srpc_list);
+ list_del(&srpc->srpc_list);
+
+ srpc_init_server_rpc(srpc, scd, buffer);
+ list_add_tail(&srpc->srpc_list,
+ &scd->scd_rpc_active);
+ swi_schedule_workitem(&srpc->srpc_wi);
+ } else {
+ list_add_tail(&buffer->buf_list,
+ &scd->scd_buf_blocked);
+ }
+
+ spin_unlock(&scd->scd_lock);
+
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.rpcs_rcvd++;
+ spin_unlock(&srpc_data.rpc_glock);
+ break;
+
+ case SRPC_BULK_GET_RPLD:
+ LASSERT (ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_REPLY ||
+ ev->type == LNET_EVENT_UNLINK);
+
+ if (!ev->unlinked)
+ break; /* wait for final event */
+
+ case SRPC_BULK_PUT_SENT:
+ if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
+ spin_lock(&srpc_data.rpc_glock);
+
+ if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
+ srpc_data.rpc_counters.bulk_get += ev->mlength;
+ else
+ srpc_data.rpc_counters.bulk_put += ev->mlength;
+
+ spin_unlock(&srpc_data.rpc_glock);
+ }
+ case SRPC_REPLY_SENT:
+ srpc = rpcev->ev_data;
+ scd = srpc->srpc_scd;
+
+ LASSERT(rpcev == &srpc->srpc_ev);
+
+ spin_lock(&scd->scd_lock);
+
+ rpcev->ev_fired = 1;
+ rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
+ -EINTR : ev->status;
+ swi_schedule_workitem(&srpc->srpc_wi);
+
+ spin_unlock(&scd->scd_lock);
+ break;
+ }
+}
+
+
+int
+srpc_startup (void)
+{
+ int rc;
+
+ memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
+ spin_lock_init(&srpc_data.rpc_glock);
+
+ /* 1 second pause to avoid timestamp reuse */
+ cfs_pause(cfs_time_seconds(1));
+ srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
+
+ srpc_data.rpc_state = SRPC_STATE_NONE;
+
+ rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
+ if (rc < 0) {
+ CERROR ("LNetNIInit() has failed: %d\n", rc);
+ return rc;
+ }
+
+ srpc_data.rpc_state = SRPC_STATE_NI_INIT;
+
+ LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
+ rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
+ if (rc != 0) {
+ CERROR("LNetEQAlloc() has failed: %d\n", rc);
+ goto bail;
+ }
+
+ rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
+ LASSERT(rc == 0);
+ rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
+ LASSERT(rc == 0);
+
+ srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
+
+ rc = stt_startup();
+
+bail:
+ if (rc != 0)
+ srpc_shutdown();
+ else
+ srpc_data.rpc_state = SRPC_STATE_RUNNING;
+
+ return rc;
+}
+
+void
+srpc_shutdown (void)
+{
+ int i;
+ int rc;
+ int state;
+
+ state = srpc_data.rpc_state;
+ srpc_data.rpc_state = SRPC_STATE_STOPPING;
+
+ switch (state) {
+ default:
+ LBUG ();
+ case SRPC_STATE_RUNNING:
+ spin_lock(&srpc_data.rpc_glock);
+
+ for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
+ srpc_service_t *sv = srpc_data.rpc_services[i];
+
+ LASSERTF (sv == NULL,
+ "service not empty: id %d, name %s\n",
+ i, sv->sv_name);
+ }
+
+ spin_unlock(&srpc_data.rpc_glock);
+
+ stt_shutdown();
+
+ case SRPC_STATE_EQ_INIT:
+ rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
+ rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
+ LASSERT (rc == 0);
+ rc = LNetEQFree(srpc_data.rpc_lnet_eq);
+ LASSERT (rc == 0); /* the EQ should have no user by now */
+
+ case SRPC_STATE_NI_INIT:
+ LNetNIFini();
+ }
+
+ return;
+}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
new file mode 100644
index 0000000..b905d49
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -0,0 +1,302 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __SELFTEST_RPC_H__
+#define __SELFTEST_RPC_H__
+
+#include <linux/lnet/lnetst.h>
+
+/*
+ * LST wired structures
+ *
+ * XXX: *REPLY == *REQST + 1
+ */
+typedef enum {
+ SRPC_MSG_MKSN_REQST = 0,
+ SRPC_MSG_MKSN_REPLY = 1,
+ SRPC_MSG_RMSN_REQST = 2,
+ SRPC_MSG_RMSN_REPLY = 3,
+ SRPC_MSG_BATCH_REQST = 4,
+ SRPC_MSG_BATCH_REPLY = 5,
+ SRPC_MSG_STAT_REQST = 6,
+ SRPC_MSG_STAT_REPLY = 7,
+ SRPC_MSG_TEST_REQST = 8,
+ SRPC_MSG_TEST_REPLY = 9,
+ SRPC_MSG_DEBUG_REQST = 10,
+ SRPC_MSG_DEBUG_REPLY = 11,
+ SRPC_MSG_BRW_REQST = 12,
+ SRPC_MSG_BRW_REPLY = 13,
+ SRPC_MSG_PING_REQST = 14,
+ SRPC_MSG_PING_REPLY = 15,
+ SRPC_MSG_JOIN_REQST = 16,
+ SRPC_MSG_JOIN_REPLY = 17,
+} srpc_msg_type_t;
+
+
+/* CAVEAT EMPTOR:
+ * All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
+ * and 2nd field matchbits of bulk buffer if any.
+ *
+ * All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field
+ * session id if needed.
+ */
+typedef struct {
+ __u64 rpyid; /* reply buffer matchbits */
+ __u64 bulkid; /* bulk buffer matchbits */
+} WIRE_ATTR srpc_generic_reqst_t;
+
+typedef struct {
+ __u32 status;
+ lst_sid_t sid;
+} WIRE_ATTR srpc_generic_reply_t;
+
+/* FRAMEWORK RPCs */
+typedef struct {
+ __u64 mksn_rpyid; /* reply buffer matchbits */
+ lst_sid_t mksn_sid; /* session id */
+ __u32 mksn_force; /* use brute force */
+ char mksn_name[LST_NAME_SIZE];
+} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */
+
+typedef struct {
+ __u32 mksn_status; /* session status */
+ lst_sid_t mksn_sid; /* session id */
+ __u32 mksn_timeout; /* session timeout */
+ char mksn_name[LST_NAME_SIZE];
+} WIRE_ATTR srpc_mksn_reply_t; /* make session reply */
+
+typedef struct {
+ __u64 rmsn_rpyid; /* reply buffer matchbits */
+ lst_sid_t rmsn_sid; /* session id */
+} WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */
+
+typedef struct {
+ __u32 rmsn_status;
+ lst_sid_t rmsn_sid; /* session id */
+} WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */
+
+typedef struct {
+ __u64 join_rpyid; /* reply buffer matchbits */
+ lst_sid_t join_sid; /* session id to join */
+ char join_group[LST_NAME_SIZE]; /* group name */
+} WIRE_ATTR srpc_join_reqst_t;
+
+typedef struct {
+ __u32 join_status; /* returned status */
+ lst_sid_t join_sid; /* session id */
+ __u32 join_timeout; /* # seconds' inactivity to expire */
+ char join_session[LST_NAME_SIZE]; /* session name */
+} WIRE_ATTR srpc_join_reply_t;
+
+typedef struct {
+ __u64 dbg_rpyid; /* reply buffer matchbits */
+ lst_sid_t dbg_sid; /* session id */
+ __u32 dbg_flags; /* bitmap of debug */
+} WIRE_ATTR srpc_debug_reqst_t;
+
+typedef struct {
+ __u32 dbg_status; /* returned code */
+ lst_sid_t dbg_sid; /* session id */
+ __u32 dbg_timeout; /* session timeout */
+ __u32 dbg_nbatch; /* # of batches in the node */
+ char dbg_name[LST_NAME_SIZE]; /* session name */
+} WIRE_ATTR srpc_debug_reply_t;
+
+#define SRPC_BATCH_OPC_RUN 1
+#define SRPC_BATCH_OPC_STOP 2
+#define SRPC_BATCH_OPC_QUERY 3
+
+typedef struct {
+ __u64 bar_rpyid; /* reply buffer matchbits */
+ lst_sid_t bar_sid; /* session id */
+ lst_bid_t bar_bid; /* batch id */
+ __u32 bar_opc; /* create/start/stop batch */
+ __u32 bar_testidx; /* index of test */
+ __u32 bar_arg; /* parameters */
+} WIRE_ATTR srpc_batch_reqst_t;
+
+typedef struct {
+ __u32 bar_status; /* status of request */
+ lst_sid_t bar_sid; /* session id */
+ __u32 bar_active; /* # of active tests in batch/test */
+ __u32 bar_time; /* remained time */
+} WIRE_ATTR srpc_batch_reply_t;
+
+typedef struct {
+ __u64 str_rpyid; /* reply buffer matchbits */
+ lst_sid_t str_sid; /* session id */
+ __u32 str_type; /* type of stat */
+} WIRE_ATTR srpc_stat_reqst_t;
+
+typedef struct {
+ __u32 str_status;
+ lst_sid_t str_sid;
+ sfw_counters_t str_fw;
+ srpc_counters_t str_rpc;
+ lnet_counters_t str_lnet;
+} WIRE_ATTR srpc_stat_reply_t;
+
+typedef struct {
+ __u32 blk_opc; /* bulk operation code */
+ __u32 blk_npg; /* # of pages */
+ __u32 blk_flags; /* reserved flags */
+} WIRE_ATTR test_bulk_req_t;
+
+typedef struct {
+ /** bulk operation code */
+ __u16 blk_opc;
+ /** data check flags */
+ __u16 blk_flags;
+ /** data length */
+ __u32 blk_len;
+ /** reserved: offset */
+ __u32 blk_offset;
+} WIRE_ATTR test_bulk_req_v1_t;
+
+typedef struct {
+ __u32 png_size; /* size of ping message */
+ __u32 png_flags; /* reserved flags */
+} WIRE_ATTR test_ping_req_t;
+
+typedef struct {
+ __u64 tsr_rpyid; /* reply buffer matchbits */
+ __u64 tsr_bulkid; /* bulk buffer matchbits */
+ lst_sid_t tsr_sid; /* session id */
+ lst_bid_t tsr_bid; /* batch id */
+ __u32 tsr_service; /* test type: bulk|ping|... */
+ /* test client loop count or # server buffers needed */
+ __u32 tsr_loop;
+ __u32 tsr_concur; /* concurrency of test */
+ __u8 tsr_is_client; /* is test client or not */
+ __u8 tsr_stop_onerr; /* stop on error */
+ __u32 tsr_ndest; /* # of dest nodes */
+
+ union {
+ test_ping_req_t ping;
+ test_bulk_req_t bulk_v0;
+ test_bulk_req_v1_t bulk_v1;
+ } tsr_u;
+} WIRE_ATTR srpc_test_reqst_t;
+
+typedef struct {
+ __u32 tsr_status; /* returned code */
+ lst_sid_t tsr_sid;
+} WIRE_ATTR srpc_test_reply_t;
+
+/* TEST RPCs */
+typedef struct {
+ __u64 pnr_rpyid;
+ __u32 pnr_magic;
+ __u32 pnr_seq;
+ __u64 pnr_time_sec;
+ __u64 pnr_time_usec;
+} WIRE_ATTR srpc_ping_reqst_t;
+
+typedef struct {
+ __u32 pnr_status;
+ __u32 pnr_magic;
+ __u32 pnr_seq;
+} WIRE_ATTR srpc_ping_reply_t;
+
+typedef struct {
+ __u64 brw_rpyid; /* reply buffer matchbits */
+ __u64 brw_bulkid; /* bulk buffer matchbits */
+ __u32 brw_rw; /* read or write */
+ __u32 brw_len; /* bulk data len */
+ __u32 brw_flags; /* bulk data patterns */
+} WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */
+
+typedef struct {
+ __u32 brw_status;
+} WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */
+
+#define SRPC_MSG_MAGIC 0xeeb0f00d
+#define SRPC_MSG_VERSION 1
+
+typedef struct srpc_msg {
+ /** magic number */
+ __u32 msg_magic;
+ /** message version number */
+ __u32 msg_version;
+ /** type of message body: srpc_msg_type_t */
+ __u32 msg_type;
+ __u32 msg_reserved0;
+ __u32 msg_reserved1;
+ /** test session features */
+ __u32 msg_ses_feats;
+ union {
+ srpc_generic_reqst_t reqst;
+ srpc_generic_reply_t reply;
+
+ srpc_mksn_reqst_t mksn_reqst;
+ srpc_mksn_reply_t mksn_reply;
+ srpc_rmsn_reqst_t rmsn_reqst;
+ srpc_rmsn_reply_t rmsn_reply;
+ srpc_debug_reqst_t dbg_reqst;
+ srpc_debug_reply_t dbg_reply;
+ srpc_batch_reqst_t bat_reqst;
+ srpc_batch_reply_t bat_reply;
+ srpc_stat_reqst_t stat_reqst;
+ srpc_stat_reply_t stat_reply;
+ srpc_test_reqst_t tes_reqst;
+ srpc_test_reply_t tes_reply;
+ srpc_join_reqst_t join_reqst;
+ srpc_join_reply_t join_reply;
+
+ srpc_ping_reqst_t ping_reqst;
+ srpc_ping_reply_t ping_reply;
+ srpc_brw_reqst_t brw_reqst;
+ srpc_brw_reply_t brw_reply;
+ } msg_body;
+} WIRE_ATTR srpc_msg_t;
+
+static inline void
+srpc_unpack_msg_hdr(srpc_msg_t *msg)
+{
+ if (msg->msg_magic == SRPC_MSG_MAGIC)
+ return; /* no flipping needed */
+
+ /* We do not swap the magic number here as it is needed to
+ determine whether the body needs to be swapped. */
+ /* __swab32s(&msg->msg_magic); */
+ __swab32s(&msg->msg_type);
+ __swab32s(&msg->msg_version);
+ __swab32s(&msg->msg_ses_feats);
+ __swab32s(&msg->msg_reserved0);
+ __swab32s(&msg->msg_reserved1);
+}
+
+#endif /* __SELFTEST_RPC_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
new file mode 100644
index 0000000..8053b05
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -0,0 +1,611 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ * copy of GPLv2].
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/selftest.h
+ *
+ * Author: Isaac Huang <isaac@clusterfs.com>
+ */
+#ifndef __SELFTEST_SELFTEST_H__
+#define __SELFTEST_SELFTEST_H__
+
+#define LNET_ONLY
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lnet.h>
+#include <linux/lnet/lib-lnet.h>
+#include <linux/lnet/lib-types.h>
+#include <linux/lnet/lnetst.h>
+
+#include "rpc.h"
+#include "timer.h"
+
+#ifndef MADE_WITHOUT_COMPROMISE
+#define MADE_WITHOUT_COMPROMISE
+#endif
+
+
+#define SWI_STATE_NEWBORN 0
+#define SWI_STATE_REPLY_SUBMITTED 1
+#define SWI_STATE_REPLY_SENT 2
+#define SWI_STATE_REQUEST_SUBMITTED 3
+#define SWI_STATE_REQUEST_SENT 4
+#define SWI_STATE_REPLY_RECEIVED 5
+#define SWI_STATE_BULK_STARTED 6
+#define SWI_STATE_DONE 10
+
+/* forward refs */
+struct srpc_service;
+struct srpc_service_cd;
+struct sfw_test_unit;
+struct sfw_test_instance;
+
+/* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework
+ * services, e.g. create/modify session.
+ */
+#define SRPC_SERVICE_DEBUG 0
+#define SRPC_SERVICE_MAKE_SESSION 1
+#define SRPC_SERVICE_REMOVE_SESSION 2
+#define SRPC_SERVICE_BATCH 3
+#define SRPC_SERVICE_TEST 4
+#define SRPC_SERVICE_QUERY_STAT 5
+#define SRPC_SERVICE_JOIN 6
+#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10
+/* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */
+#define SRPC_SERVICE_BRW 11
+#define SRPC_SERVICE_PING 12
+#define SRPC_SERVICE_MAX_ID 12
+
+#define SRPC_REQUEST_PORTAL 50
+/* a lazy portal for framework RPC requests */
+#define SRPC_FRAMEWORK_REQUEST_PORTAL 51
+/* all reply/bulk RDMAs go to this portal */
+#define SRPC_RDMA_PORTAL 52
+
+static inline srpc_msg_type_t
+srpc_service2request (int service)
+{
+ switch (service) {
+ default:
+ LBUG ();
+ case SRPC_SERVICE_DEBUG:
+ return SRPC_MSG_DEBUG_REQST;
+
+ case SRPC_SERVICE_MAKE_SESSION:
+ return SRPC_MSG_MKSN_REQST;
+
+ case SRPC_SERVICE_REMOVE_SESSION:
+ return SRPC_MSG_RMSN_REQST;
+
+ case SRPC_SERVICE_BATCH:
+ return SRPC_MSG_BATCH_REQST;
+
+ case SRPC_SERVICE_TEST:
+ return SRPC_MSG_TEST_REQST;
+
+ case SRPC_SERVICE_QUERY_STAT:
+ return SRPC_MSG_STAT_REQST;
+
+ case SRPC_SERVICE_BRW:
+ return SRPC_MSG_BRW_REQST;
+
+ case SRPC_SERVICE_PING:
+ return SRPC_MSG_PING_REQST;
+
+ case SRPC_SERVICE_JOIN:
+ return SRPC_MSG_JOIN_REQST;
+ }
+}
+
+static inline srpc_msg_type_t
+srpc_service2reply (int service)
+{
+ return srpc_service2request(service) + 1;
+}
+
+typedef enum {
+ SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) received */
+ SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
+ SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */
+ SRPC_REPLY_RCVD = 4, /* incoming reply received */
+ SRPC_REPLY_SENT = 5, /* outgoing reply sent */
+ SRPC_REQUEST_RCVD = 6, /* incoming request received */
+ SRPC_REQUEST_SENT = 7, /* outgoing request sent */
+} srpc_event_type_t;
+
+/* RPC event */
+typedef struct {
+ srpc_event_type_t ev_type; /* what's up */
+ lnet_event_kind_t ev_lnet; /* LNet event type */
+ int ev_fired; /* LNet event fired? */
+ int ev_status; /* LNet event status */
+ void *ev_data; /* owning server/client RPC */
+} srpc_event_t;
+
+typedef struct {
+ int bk_len; /* len of bulk data */
+ lnet_handle_md_t bk_mdh;
+ int bk_sink; /* sink/source */
+ int bk_niov; /* # iov in bk_iovs */
+ lnet_kiov_t bk_iovs[0];
+} srpc_bulk_t; /* bulk descriptor */
+
+/* message buffer descriptor */
+typedef struct srpc_buffer {
+ struct list_head buf_list; /* chain on srpc_service::*_msgq */
+ srpc_msg_t buf_msg;
+ lnet_handle_md_t buf_mdh;
+ lnet_nid_t buf_self;
+ lnet_process_id_t buf_peer;
+} srpc_buffer_t;
+
+struct swi_workitem;
+typedef int (*swi_action_t) (struct swi_workitem *);
+
+typedef struct swi_workitem {
+ struct cfs_wi_sched *swi_sched;
+ cfs_workitem_t swi_workitem;
+ swi_action_t swi_action;
+ int swi_state;
+} swi_workitem_t;
+
+/* server-side state of a RPC */
+typedef struct srpc_server_rpc {
+ /* chain on srpc_service::*_rpcq */
+ struct list_head srpc_list;
+ struct srpc_service_cd *srpc_scd;
+ swi_workitem_t srpc_wi;
+ srpc_event_t srpc_ev; /* bulk/reply event */
+ lnet_nid_t srpc_self;
+ lnet_process_id_t srpc_peer;
+ srpc_msg_t srpc_replymsg;
+ lnet_handle_md_t srpc_replymdh;
+ srpc_buffer_t *srpc_reqstbuf;
+ srpc_bulk_t *srpc_bulk;
+
+ unsigned int srpc_aborted; /* being given up */
+ int srpc_status;
+ void (*srpc_done)(struct srpc_server_rpc *);
+} srpc_server_rpc_t;
+
+/* client-side state of a RPC */
+typedef struct srpc_client_rpc {
+ struct list_head crpc_list; /* chain on user's lists */
+ spinlock_t crpc_lock; /* serialize */
+ int crpc_service;
+ atomic_t crpc_refcount;
+ int crpc_timeout; /* # seconds to wait for reply */
+ stt_timer_t crpc_timer;
+ swi_workitem_t crpc_wi;
+ lnet_process_id_t crpc_dest;
+
+ void (*crpc_done)(struct srpc_client_rpc *);
+ void (*crpc_fini)(struct srpc_client_rpc *);
+ int crpc_status; /* completion status */
+ void *crpc_priv; /* caller data */
+
+ /* state flags */
+ unsigned int crpc_aborted:1; /* being given up */
+ unsigned int crpc_closed:1; /* completed */
+
+ /* RPC events */
+ srpc_event_t crpc_bulkev; /* bulk event */
+ srpc_event_t crpc_reqstev; /* request event */
+ srpc_event_t crpc_replyev; /* reply event */
+
+ /* bulk, request(reqst), and reply exchanged on wire */
+ srpc_msg_t crpc_reqstmsg;
+ srpc_msg_t crpc_replymsg;
+ lnet_handle_md_t crpc_reqstmdh;
+ lnet_handle_md_t crpc_replymdh;
+ srpc_bulk_t crpc_bulk;
+} srpc_client_rpc_t;
+
+#define srpc_client_rpc_size(rpc) \
+offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
+
+#define srpc_client_rpc_addref(rpc) \
+do { \
+ CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \
+ (rpc), libcfs_id2str((rpc)->crpc_dest), \
+ atomic_read(&(rpc)->crpc_refcount)); \
+ LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
+ atomic_inc(&(rpc)->crpc_refcount); \
+} while (0)
+
+#define srpc_client_rpc_decref(rpc) \
+do { \
+ CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \
+ (rpc), libcfs_id2str((rpc)->crpc_dest), \
+ atomic_read(&(rpc)->crpc_refcount)); \
+ LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
+ if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \
+ srpc_destroy_client_rpc(rpc); \
+} while (0)
+
+#define srpc_event_pending(rpc) ((rpc)->crpc_bulkev.ev_fired == 0 || \
+ (rpc)->crpc_reqstev.ev_fired == 0 || \
+ (rpc)->crpc_replyev.ev_fired == 0)
+
+/* CPU partition data of srpc service */
+struct srpc_service_cd {
+ /** serialize */
+ spinlock_t scd_lock;
+ /** backref to service */
+ struct srpc_service *scd_svc;
+ /** event buffer */
+ srpc_event_t scd_ev;
+ /** free RPC descriptors */
+ struct list_head scd_rpc_free;
+ /** in-flight RPCs */
+ struct list_head scd_rpc_active;
+ /** workitem for posting buffer */
+ swi_workitem_t scd_buf_wi;
+ /** CPT id */
+ int scd_cpt;
+ /** error code for scd_buf_wi */
+ int scd_buf_err;
+ /** timestamp for scd_buf_err */
+ unsigned long scd_buf_err_stamp;
+ /** total # request buffers */
+ int scd_buf_total;
+ /** # posted request buffers */
+ int scd_buf_nposted;
+ /** in progress of buffer posting */
+ int scd_buf_posting;
+ /** allocate more buffers if scd_buf_nposted < scd_buf_low */
+ int scd_buf_low;
+ /** increase/decrease some buffers */
+ int scd_buf_adjust;
+ /** posted message buffers */
+ struct list_head scd_buf_posted;
+ /** blocked for RPC descriptor */
+ struct list_head scd_buf_blocked;
+};
+
+/* number of server workitems (mini-thread) for testing service */
+#define SFW_TEST_WI_MIN 256
+#define SFW_TEST_WI_MAX 2048
+/* extra buffers for tolerating buggy peers, or unbalanced number
+ * of peers between partitions */
+#define SFW_TEST_WI_EXTRA 64
+
+/* number of server workitems (mini-thread) for framework service */
+#define SFW_FRWK_WI_MIN 16
+#define SFW_FRWK_WI_MAX 256
+
+typedef struct srpc_service {
+ int sv_id; /* service id */
+ const char *sv_name; /* human readable name */
+ int sv_wi_total; /* total server workitems */
+ int sv_shuttingdown;
+ int sv_ncpts;
+ /* percpt data for srpc_service */
+ struct srpc_service_cd **sv_cpt_data;
+ /* Service callbacks:
+ * - sv_handler: process incoming RPC request
+ * - sv_bulk_ready: notify bulk data
+ */
+ int (*sv_handler) (srpc_server_rpc_t *);
+ int (*sv_bulk_ready) (srpc_server_rpc_t *, int);
+} srpc_service_t;
+
+typedef struct {
+ struct list_head sn_list; /* chain on fw_zombie_sessions */
+ lst_sid_t sn_id; /* unique identifier */
+ unsigned int sn_timeout; /* # seconds' inactivity to expire */
+ int sn_timer_active;
+ unsigned int sn_features;
+ stt_timer_t sn_timer;
+ struct list_head sn_batches; /* list of batches */
+ char sn_name[LST_NAME_SIZE];
+ atomic_t sn_refcount;
+ atomic_t sn_brw_errors;
+ atomic_t sn_ping_errors;
+ cfs_time_t sn_started;
+} sfw_session_t;
+
+#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
+ (sid0).ses_stamp == (sid1).ses_stamp)
+
+typedef struct {
+ struct list_head bat_list; /* chain on sn_batches */
+ lst_bid_t bat_id; /* batch id */
+ int bat_error; /* error code of batch */
+ sfw_session_t *bat_session; /* batch's session */
+ atomic_t bat_nactive; /* # of active tests */
+ struct list_head bat_tests; /* test instances */
+} sfw_batch_t;
+
+typedef struct {
+ int (*tso_init)(struct sfw_test_instance *tsi); /* intialize test client */
+ void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */
+ int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
+ lnet_process_id_t dest,
+ srpc_client_rpc_t **rpc); /* prep a tests rpc */
+ void (*tso_done_rpc)(struct sfw_test_unit *tsu,
+ srpc_client_rpc_t *rpc); /* done a test rpc */
+} sfw_test_client_ops_t;
+
+typedef struct sfw_test_instance {
+ struct list_head tsi_list; /* chain on batch */
+ int tsi_service; /* test type */
+ sfw_batch_t *tsi_batch; /* batch */
+ sfw_test_client_ops_t *tsi_ops; /* test client operations */
+
+ /* public parameter for all test units */
+ unsigned int tsi_is_client:1; /* is test client */
+ unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */
+ int tsi_concur; /* concurrency */
+ int tsi_loop; /* loop count */
+
+ /* status of test instance */
+ spinlock_t tsi_lock; /* serialize */
+ unsigned int tsi_stopping:1; /* test is stopping */
+ atomic_t tsi_nactive; /* # of active test unit */
+ struct list_head tsi_units; /* test units */
+ struct list_head tsi_free_rpcs; /* free rpcs */
+ struct list_head tsi_active_rpcs; /* active rpcs */
+
+ union {
+ test_ping_req_t ping; /* ping parameter */
+ test_bulk_req_t bulk_v0; /* bulk parameter */
+ test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */
+ } tsi_u;
+} sfw_test_instance_t;
+
+/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
+ * the end of pages are not used */
+#define SFW_MAX_CONCUR LST_MAX_CONCUR
+#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
+#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
+#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
+
+typedef struct sfw_test_unit {
+ struct list_head tsu_list; /* chain on lst_test_instance */
+ lnet_process_id_t tsu_dest; /* id of dest node */
+ int tsu_loop; /* loop count of the test */
+ sfw_test_instance_t *tsu_instance; /* pointer to test instance */
+ void *tsu_private; /* private data */
+ swi_workitem_t tsu_worker; /* workitem of the test unit */
+} sfw_test_unit_t;
+
+typedef struct sfw_test_case {
+ struct list_head tsc_list; /* chain on fw_tests */
+ srpc_service_t *tsc_srv_service; /* test service */
+ sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
+} sfw_test_case_t;
+
+srpc_client_rpc_t *
+sfw_create_rpc(lnet_process_id_t peer, int service,
+ unsigned features, int nbulkiov, int bulklen,
+ void (*done) (srpc_client_rpc_t *), void *priv);
+int sfw_create_test_rpc(sfw_test_unit_t *tsu,
+ lnet_process_id_t peer, unsigned features,
+ int nblk, int blklen, srpc_client_rpc_t **rpc);
+void sfw_abort_rpc(srpc_client_rpc_t *rpc);
+void sfw_post_rpc(srpc_client_rpc_t *rpc);
+void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
+void sfw_unpack_message(srpc_msg_t *msg);
+void sfw_free_pages(srpc_server_rpc_t *rpc);
+void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
+int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len,
+ int sink);
+int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
+
+srpc_client_rpc_t *
+srpc_create_client_rpc(lnet_process_id_t peer, int service,
+ int nbulkiov, int bulklen,
+ void (*rpc_done)(srpc_client_rpc_t *),
+ void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
+void srpc_post_rpc(srpc_client_rpc_t *rpc);
+void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why);
+void srpc_free_bulk(srpc_bulk_t *bk);
+srpc_bulk_t *srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len,
+ int sink);
+int srpc_send_rpc(swi_workitem_t *wi);
+int srpc_send_reply(srpc_server_rpc_t *rpc);
+int srpc_add_service(srpc_service_t *sv);
+int srpc_remove_service(srpc_service_t *sv);
+void srpc_shutdown_service(srpc_service_t *sv);
+void srpc_abort_service(srpc_service_t *sv);
+int srpc_finish_service(srpc_service_t *sv);
+int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
+void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
+void srpc_get_counters(srpc_counters_t *cnt);
+void srpc_set_counters(const srpc_counters_t *cnt);
+
+extern struct cfs_wi_sched *lst_sched_serial;
+extern struct cfs_wi_sched **lst_sched_test;
+
+static inline int
+srpc_serv_is_framework(struct srpc_service *svc)
+{
+ return svc->sv_id < SRPC_FRAMEWORK_SERVICE_MAX_ID;
+}
+
+static inline int
+swi_wi_action(cfs_workitem_t *wi)
+{
+ swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem);
+
+ return swi->swi_action(swi);
+}
+
+static inline void
+swi_init_workitem(swi_workitem_t *swi, void *data,
+ swi_action_t action, struct cfs_wi_sched *sched)
+{
+ swi->swi_sched = sched;
+ swi->swi_action = action;
+ swi->swi_state = SWI_STATE_NEWBORN;
+ cfs_wi_init(&swi->swi_workitem, data, swi_wi_action);
+}
+
+static inline void
+swi_schedule_workitem(swi_workitem_t *wi)
+{
+ cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem);
+}
+
+static inline void
+swi_exit_workitem(swi_workitem_t *swi)
+{
+ cfs_wi_exit(swi->swi_sched, &swi->swi_workitem);
+}
+
+static inline int
+swi_deschedule_workitem(swi_workitem_t *swi)
+{
+ return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
+}
+
+
+int sfw_startup(void);
+int srpc_startup(void);
+void sfw_shutdown(void);
+void srpc_shutdown(void);
+
+static inline void
+srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
+{
+ LASSERT (rpc != NULL);
+ LASSERT (!srpc_event_pending(rpc));
+ LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
+
+ if (rpc->crpc_fini == NULL) {
+ LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
+ } else {
+ (*rpc->crpc_fini) (rpc);
+ }
+
+ return;
+}
+
+static inline void
+srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
+ int service, int nbulkiov, int bulklen,
+ void (*rpc_done)(srpc_client_rpc_t *),
+ void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+{
+ LASSERT (nbulkiov <= LNET_MAX_IOV);
+
+ memset(rpc, 0, offsetof(srpc_client_rpc_t,
+ crpc_bulk.bk_iovs[nbulkiov]));
+
+ INIT_LIST_HEAD(&rpc->crpc_list);
+ swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc,
+ lst_sched_test[lnet_cpt_of_nid(peer.nid)]);
+ spin_lock_init(&rpc->crpc_lock);
+ atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
+
+ rpc->crpc_dest = peer;
+ rpc->crpc_priv = priv;
+ rpc->crpc_service = service;
+ rpc->crpc_bulk.bk_len = bulklen;
+ rpc->crpc_bulk.bk_niov = nbulkiov;
+ rpc->crpc_done = rpc_done;
+ rpc->crpc_fini = rpc_fini;
+ LNetInvalidateHandle(&rpc->crpc_reqstmdh);
+ LNetInvalidateHandle(&rpc->crpc_replymdh);
+ LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh);
+
+ /* no event is expected at this point */
+ rpc->crpc_bulkev.ev_fired =
+ rpc->crpc_reqstev.ev_fired =
+ rpc->crpc_replyev.ev_fired = 1;
+
+ rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC;
+ rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
+ rpc->crpc_reqstmsg.msg_type = srpc_service2request(service);
+ return;
+}
+
+static inline const char *
+swi_state2str (int state)
+{
+#define STATE2STR(x) case x: return #x
+ switch(state) {
+ default:
+ LBUG();
+ STATE2STR(SWI_STATE_NEWBORN);
+ STATE2STR(SWI_STATE_REPLY_SUBMITTED);
+ STATE2STR(SWI_STATE_REPLY_SENT);
+ STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
+ STATE2STR(SWI_STATE_REQUEST_SENT);
+ STATE2STR(SWI_STATE_REPLY_RECEIVED);
+ STATE2STR(SWI_STATE_BULK_STARTED);
+ STATE2STR(SWI_STATE_DONE);
+ }
+#undef STATE2STR
+}
+
+#define UNUSED(x) ( (void)(x) )
+
+
+#define selftest_wait_events() cfs_pause(cfs_time_seconds(1) / 10)
+
+
+#define lst_wait_until(cond, lock, fmt, ...) \
+do { \
+ int __I = 2; \
+ while (!(cond)) { \
+ CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \
+ fmt, ## __VA_ARGS__); \
+ spin_unlock(&(lock)); \
+ \
+ selftest_wait_events(); \
+ \
+ spin_lock(&(lock)); \
+ } \
+} while (0)
+
+static inline void
+srpc_wait_service_shutdown(srpc_service_t *sv)
+{
+ int i = 2;
+
+ LASSERT(sv->sv_shuttingdown);
+
+ while (srpc_finish_service(sv) == 0) {
+ i++;
+ CDEBUG (((i & -i) == i) ? D_WARNING : D_NET,
+ "Waiting for %s service to shutdown...\n",
+ sv->sv_name);
+ selftest_wait_events();
+ }
+}
+
+#endif /* __SELFTEST_SELFTEST_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
new file mode 100644
index 0000000..3bf4afb
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -0,0 +1,253 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/timer.c
+ *
+ * Author: Isaac Huang <isaac@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include "selftest.h"
+
+
+/*
+ * Timers are implemented as a sorted queue of expiry times. The queue
+ * is slotted, with each slot holding timers which expire in a
+ * 2**STTIMER_MINPOLL (8) second period. The timers in each slot are
+ * sorted by increasing expiry time. The number of slots is 2**7 (128),
+ * to cover a time period of 1024 seconds into the future before wrapping.
+ */
+#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
+#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
+#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
+#define STTIMER_NSLOTS (1 << 7)
+#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
+ (STTIMER_NSLOTS - 1))])
+
+struct st_timer_data {
+ spinlock_t stt_lock;
+ /* start time of the slot processed previously */
+ cfs_time_t stt_prev_slot;
+ struct list_head stt_hash[STTIMER_NSLOTS];
+ int stt_shuttingdown;
+ wait_queue_head_t stt_waitq;
+ int stt_nthreads;
+} stt_data;
+
+void
+stt_add_timer(stt_timer_t *timer)
+{
+ struct list_head *pos;
+
+ spin_lock(&stt_data.stt_lock);
+
+ LASSERT (stt_data.stt_nthreads > 0);
+ LASSERT (!stt_data.stt_shuttingdown);
+ LASSERT (timer->stt_func != NULL);
+ LASSERT (list_empty(&timer->stt_list));
+ LASSERT (cfs_time_after(timer->stt_expires, cfs_time_current_sec()));
+
+ /* a simple insertion sort */
+ list_for_each_prev (pos, STTIMER_SLOT(timer->stt_expires)) {
+ stt_timer_t *old = list_entry(pos, stt_timer_t, stt_list);
+
+ if (cfs_time_aftereq(timer->stt_expires, old->stt_expires))
+ break;
+ }
+ list_add(&timer->stt_list, pos);
+
+ spin_unlock(&stt_data.stt_lock);
+}
+
+/*
+ * The function returns whether it has deactivated a pending timer or not.
+ * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
+ * active timer returns 1.)
+ *
+ * CAVEAT EMPTOR:
+ * When 0 is returned, it is possible that timer->stt_func _is_ running on
+ * another CPU.
+ */
+int
+stt_del_timer (stt_timer_t *timer)
+{
+ int ret = 0;
+
+ spin_lock(&stt_data.stt_lock);
+
+ LASSERT (stt_data.stt_nthreads > 0);
+ LASSERT (!stt_data.stt_shuttingdown);
+
+ if (!list_empty(&timer->stt_list)) {
+ ret = 1;
+ list_del_init(&timer->stt_list);
+ }
+
+ spin_unlock(&stt_data.stt_lock);
+ return ret;
+}
+
+/* called with stt_data.stt_lock held */
+int
+stt_expire_list (struct list_head *slot, cfs_time_t now)
+{
+ int expired = 0;
+ stt_timer_t *timer;
+
+ while (!list_empty(slot)) {
+ timer = list_entry(slot->next, stt_timer_t, stt_list);
+
+ if (cfs_time_after(timer->stt_expires, now))
+ break;
+
+ list_del_init(&timer->stt_list);
+ spin_unlock(&stt_data.stt_lock);
+
+ expired++;
+ (*timer->stt_func) (timer->stt_data);
+
+ spin_lock(&stt_data.stt_lock);
+ }
+
+ return expired;
+}
+
+int
+stt_check_timers (cfs_time_t *last)
+{
+ int expired = 0;
+ cfs_time_t now;
+ cfs_time_t this_slot;
+
+ now = cfs_time_current_sec();
+ this_slot = now & STTIMER_SLOTTIMEMASK;
+
+ spin_lock(&stt_data.stt_lock);
+
+ while (cfs_time_aftereq(this_slot, *last)) {
+ expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
+ this_slot = cfs_time_sub(this_slot, STTIMER_SLOTTIME);
+ }
+
+ *last = now & STTIMER_SLOTTIMEMASK;
+ spin_unlock(&stt_data.stt_lock);
+ return expired;
+}
+
+
+int
+stt_timer_main (void *arg)
+{
+ int rc = 0;
+ UNUSED(arg);
+
+ SET_BUT_UNUSED(rc);
+
+ cfs_block_allsigs();
+
+ while (!stt_data.stt_shuttingdown) {
+ stt_check_timers(&stt_data.stt_prev_slot);
+
+ rc = wait_event_timeout(stt_data.stt_waitq,
+ stt_data.stt_shuttingdown,
+ cfs_time_seconds(STTIMER_SLOTTIME));
+ }
+
+ spin_lock(&stt_data.stt_lock);
+ stt_data.stt_nthreads--;
+ spin_unlock(&stt_data.stt_lock);
+ return 0;
+}
+
+int
+stt_start_timer_thread (void)
+{
+ struct task_struct *task;
+
+ LASSERT(!stt_data.stt_shuttingdown);
+
+ task = kthread_run(stt_timer_main, NULL, "st_timer");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ spin_lock(&stt_data.stt_lock);
+ stt_data.stt_nthreads++;
+ spin_unlock(&stt_data.stt_lock);
+ return 0;
+}
+
+
+int
+stt_startup (void)
+{
+ int rc = 0;
+ int i;
+
+ stt_data.stt_shuttingdown = 0;
+ stt_data.stt_prev_slot = cfs_time_current_sec() & STTIMER_SLOTTIMEMASK;
+
+ spin_lock_init(&stt_data.stt_lock);
+ for (i = 0; i < STTIMER_NSLOTS; i++)
+ INIT_LIST_HEAD(&stt_data.stt_hash[i]);
+
+ stt_data.stt_nthreads = 0;
+ init_waitqueue_head(&stt_data.stt_waitq);
+ rc = stt_start_timer_thread();
+ if (rc != 0)
+ CERROR ("Can't spawn timer thread: %d\n", rc);
+
+ return rc;
+}
+
+void
+stt_shutdown (void)
+{
+ int i;
+
+ spin_lock(&stt_data.stt_lock);
+
+ for (i = 0; i < STTIMER_NSLOTS; i++)
+ LASSERT (list_empty(&stt_data.stt_hash[i]));
+
+ stt_data.stt_shuttingdown = 1;
+
+ wake_up(&stt_data.stt_waitq);
+ lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
+ "waiting for %d threads to terminate\n",
+ stt_data.stt_nthreads);
+
+ spin_unlock(&stt_data.stt_lock);
+}
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
new file mode 100644
index 0000000..56dbfe5
--- /dev/null
+++ b/drivers/staging/lustre/lnet/selftest/timer.h
@@ -0,0 +1,53 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lnet/selftest/timer.h
+ *
+ * Author: Isaac Huang <isaac@clusterfs.com>
+ */
+#ifndef __SELFTEST_TIMER_H__
+#define __SELFTEST_TIMER_H__
+
+typedef struct {
+ struct list_head stt_list;
+ cfs_time_t stt_expires;
+ void (*stt_func) (void *);
+ void *stt_data;
+} stt_timer_t;
+
+void stt_add_timer (stt_timer_t *timer);
+int stt_del_timer (stt_timer_t *timer);
+int stt_startup (void);
+void stt_shutdown (void);
+
+#endif /* __SELFTEST_TIMER_H__ */
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
new file mode 100644
index 0000000..2156a44
--- /dev/null
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -0,0 +1,60 @@
+config LUSTRE_FS
+ tristate "Lustre file system client support"
+ depends on INET && m && !MIPS && !XTENSA && !SUPERH
+ select LNET
+ select CRYPTO
+ select CRYPTO_CRC32
+ select CRYPTO_CRC32_PCLMUL if X86
+ select CRYPTO_CRC32C
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ help
+ This option enables Lustre file system client support. Choose Y
+ here if you want to access a Lustre file system cluster. To compile
+ this file system support as a module, choose M here: the module will
+ be called lustre.
+
+ To mount Lustre file systems , you also need to install the user space
+ mount.lustre and other user space commands which can be found in the
+ lustre-client package, available from
+ http://downloads.whamcloud.com/public/lustre/
+
+ Lustre file system is the most popular cluster file system in high
+ performance computing. Source code of both kernel space and user space
+ Lustre components can also be found at
+ http://git.whamcloud.com/?p=fs/lustre-release.git;a=summary
+
+ If unsure, say N.
+
+ See also http://wiki.lustre.org/
+
+config LUSTRE_OBD_MAX_IOCTL_BUFFER
+ int "Lustre obd max ioctl buffer bytes (default 8KB)"
+ depends on LUSTRE_FS
+ default 8192
+ help
+ This option defines the maximum size of buffer in bytes that user space
+ applications can pass to Lustre kernel module through ioctl interface.
+
+ If unsure, use default.
+
+config LUSTRE_DEBUG_EXPENSIVE_CHECK
+ bool "Enable Lustre DEBUG checks"
+ depends on LUSTRE_FS
+ help
+ This option is mainly for debug purpose. It enables Lustre code to do
+ expensive checks that may have a performance impact.
+
+ Use with caution. If unsure, say N.
+
+config LUSTRE_TRANSLATE_ERRNOS
+ bool
+ depends on LUSTRE_FS && !X86
+ default y
+
+config LUSTRE_LLITE_LLOOP
+ bool "Lustre virtual block device"
+ depends on LUSTRE_FS && BLOCK
+ default m
diff --git a/drivers/staging/lustre/lustre/Makefile b/drivers/staging/lustre/lustre/Makefile
new file mode 100644
index 0000000..d1eb0bd
--- /dev/null
+++ b/drivers/staging/lustre/lustre/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_LUSTRE_FS) += libcfs/ lvfs/ obdclass/ ptlrpc/ fld/ osc/ mgc/ \
+ fid/ lov/ mdc/ lmv/ llite/ obdecho/
diff --git a/drivers/staging/lustre/lustre/fid/Makefile b/drivers/staging/lustre/lustre/fid/Makefile
new file mode 100644
index 0000000..ed21bea
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fid/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_LUSTRE_FS) += fid.o
+fid-y := fid_request.o lproc_fid.o fid_lib.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h
new file mode 100644
index 0000000..1dbe46b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fid/fid_internal.h
@@ -0,0 +1,56 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/fid/fid_internal.h
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
+ */
+#ifndef __FID_INTERNAL_H
+#define __FID_INTERNAL_H
+
+#include <lustre/lustre_idl.h>
+#include <linux/libcfs/libcfs.h>
+
+/* Functions used internally in module. */
+int seq_client_alloc_super(struct lu_client_seq *seq,
+ const struct lu_env *env);
+
+# ifdef LPROCFS
+extern struct lprocfs_vars seq_client_proc_list[];
+# endif
+
+extern struct proc_dir_entry *seq_type_proc_dir;
+
+#endif /* __FID_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c
new file mode 100644
index 0000000..f03afde
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fid/fid_lib.c
@@ -0,0 +1,95 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/fid/fid_lib.c
+ *
+ * Miscellaneous fid functions.
+ *
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ * Author: Yury Umanets <umka@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_FID
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/module.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_fid.h>
+
+/**
+ * A cluster-wide range from which fid-sequences are granted to servers and
+ * then clients.
+ *
+ * Fid namespace:
+ * <pre>
+ * Normal FID: seq:64 [2^33,2^64-1] oid:32 ver:32
+ * IGIF : 0:32, ino:32 gen:32 0:32
+ * IDIF : 0:31, 1:1, ost-index:16, objd:48 0:32
+ * </pre>
+ *
+ * The first 0x400 sequences of normal FID are reserved for special purpose.
+ * FID_SEQ_START + 1 is for local file id generation.
+ * FID_SEQ_START + 2 is for .lustre directory and its objects
+ */
+const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE = {
+ FID_SEQ_NORMAL,
+ (__u64)~0ULL
+};
+EXPORT_SYMBOL(LUSTRE_SEQ_SPACE_RANGE);
+
+/* Zero range, used for init and other purposes. */
+const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE = {
+ 0,
+ 0
+};
+EXPORT_SYMBOL(LUSTRE_SEQ_ZERO_RANGE);
+
+/* Lustre Big Fs Lock fid. */
+const struct lu_fid LUSTRE_BFL_FID = { .f_seq = FID_SEQ_SPECIAL,
+ .f_oid = FID_OID_SPECIAL_BFL,
+ .f_ver = 0x0000000000000000 };
+EXPORT_SYMBOL(LUSTRE_BFL_FID);
+
+/** Special fid for ".lustre" directory */
+const struct lu_fid LU_DOT_LUSTRE_FID = { .f_seq = FID_SEQ_DOT_LUSTRE,
+ .f_oid = FID_OID_DOT_LUSTRE,
+ .f_ver = 0x0000000000000000 };
+EXPORT_SYMBOL(LU_DOT_LUSTRE_FID);
+
+/** Special fid for "fid" special object in .lustre */
+const struct lu_fid LU_OBF_FID = { .f_seq = FID_SEQ_DOT_LUSTRE,
+ .f_oid = FID_OID_DOT_LUSTRE_OBF,
+ .f_ver = 0x0000000000000000 };
+EXPORT_SYMBOL(LU_OBF_FID);
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
new file mode 100644
index 0000000..66007b5
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -0,0 +1,570 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/fid/fid_request.c
+ *
+ * Lustre Sequence Manager
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_FID
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/module.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_fid.h>
+/* mdc RPC locks */
+#include <lustre_mdc.h>
+#include "fid_internal.h"
+
+static int seq_client_rpc(struct lu_client_seq *seq,
+ struct lu_seq_range *output, __u32 opc,
+ const char *opcname)
+{
+ struct obd_export *exp = seq->lcs_exp;
+ struct ptlrpc_request *req;
+ struct lu_seq_range *out, *in;
+ __u32 *op;
+ unsigned int debug_mask;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
+ LUSTRE_MDS_VERSION, SEQ_QUERY);
+ if (req == NULL)
+ return -ENOMEM;
+
+ /* Init operation code */
+ op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
+ *op = opc;
+
+ /* Zero out input range, this is not recovery yet. */
+ in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
+ range_init(in);
+
+ ptlrpc_request_set_replen(req);
+
+ in->lsr_index = seq->lcs_space.lsr_index;
+ if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ fld_range_set_mdt(in);
+ else
+ fld_range_set_ost(in);
+
+ if (opc == SEQ_ALLOC_SUPER) {
+ req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
+ req->rq_reply_portal = MDC_REPLY_PORTAL;
+ /* During allocating super sequence for data object,
+ * the current thread might hold the export of MDT0(MDT0
+ * precreating objects on this OST), and it will send the
+ * request to MDT0 here, so we can not keep resending the
+ * request here, otherwise if MDT0 is failed(umounted),
+ * it can not release the export of MDT0 */
+ if (seq->lcs_type == LUSTRE_SEQ_DATA)
+ req->rq_no_delay = req->rq_no_resend = 1;
+ debug_mask = D_CONSOLE;
+ } else {
+ if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ req->rq_request_portal = SEQ_METADATA_PORTAL;
+ else
+ req->rq_request_portal = SEQ_DATA_PORTAL;
+ debug_mask = D_INFO;
+ }
+
+ ptlrpc_at_set_req_timeout(req);
+
+ if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ rc = ptlrpc_queue_wait(req);
+ if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ if (rc)
+ GOTO(out_req, rc);
+
+ out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
+ *output = *out;
+
+ if (!range_is_sane(output)) {
+ CERROR("%s: Invalid range received from server: "
+ DRANGE"\n", seq->lcs_name, PRANGE(output));
+ GOTO(out_req, rc = -EINVAL);
+ }
+
+ if (range_is_exhausted(output)) {
+ CERROR("%s: Range received from server is exhausted: "
+ DRANGE"]\n", seq->lcs_name, PRANGE(output));
+ GOTO(out_req, rc = -EINVAL);
+ }
+
+ CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n",
+ seq->lcs_name, opcname, PRANGE(output));
+
+out_req:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+/* Request sequence-controller node to allocate new super-sequence. */
+int seq_client_alloc_super(struct lu_client_seq *seq,
+ const struct lu_env *env)
+{
+ int rc;
+
+ mutex_lock(&seq->lcs_mutex);
+
+ if (seq->lcs_srv) {
+ rc = 0;
+ } else {
+ /* Check whether the connection to seq controller has been
+ * setup (lcs_exp != NULL) */
+ if (seq->lcs_exp == NULL) {
+ mutex_unlock(&seq->lcs_mutex);
+ return -EINPROGRESS;
+ }
+
+ rc = seq_client_rpc(seq, &seq->lcs_space,
+ SEQ_ALLOC_SUPER, "super");
+ }
+ mutex_unlock(&seq->lcs_mutex);
+ return rc;
+}
+
+/* Request sequence-controller node to allocate new meta-sequence. */
+static int seq_client_alloc_meta(const struct lu_env *env,
+ struct lu_client_seq *seq)
+{
+ int rc;
+
+ if (seq->lcs_srv) {
+ rc = 0;
+ } else {
+ do {
+ /* If meta server return -EINPROGRESS or EAGAIN,
+ * it means meta server might not be ready to
+ * allocate super sequence from sequence controller
+ * (MDT0)yet */
+ rc = seq_client_rpc(seq, &seq->lcs_space,
+ SEQ_ALLOC_META, "meta");
+ } while (rc == -EINPROGRESS || rc == -EAGAIN);
+ }
+
+ return rc;
+}
+
+/* Allocate new sequence for client. */
+static int seq_client_alloc_seq(const struct lu_env *env,
+ struct lu_client_seq *seq, seqno_t *seqnr)
+{
+ int rc;
+
+ LASSERT(range_is_sane(&seq->lcs_space));
+
+ if (range_is_exhausted(&seq->lcs_space)) {
+ rc = seq_client_alloc_meta(env, seq);
+ if (rc) {
+ CERROR("%s: Can't allocate new meta-sequence,"
+ "rc %d\n", seq->lcs_name, rc);
+ return rc;
+ } else {
+ CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
+ seq->lcs_name, PRANGE(&seq->lcs_space));
+ }
+ } else {
+ rc = 0;
+ }
+
+ LASSERT(!range_is_exhausted(&seq->lcs_space));
+ *seqnr = seq->lcs_space.lsr_start;
+ seq->lcs_space.lsr_start += 1;
+
+ CDEBUG(D_INFO, "%s: Allocated sequence ["LPX64"]\n", seq->lcs_name,
+ *seqnr);
+
+ return rc;
+}
+
+static int seq_fid_alloc_prep(struct lu_client_seq *seq,
+ wait_queue_t *link)
+{
+ if (seq->lcs_update) {
+ add_wait_queue(&seq->lcs_waitq, link);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ mutex_unlock(&seq->lcs_mutex);
+
+ waitq_wait(link, TASK_UNINTERRUPTIBLE);
+
+ mutex_lock(&seq->lcs_mutex);
+ remove_wait_queue(&seq->lcs_waitq, link);
+ set_current_state(TASK_RUNNING);
+ return -EAGAIN;
+ }
+ ++seq->lcs_update;
+ mutex_unlock(&seq->lcs_mutex);
+ return 0;
+}
+
+static void seq_fid_alloc_fini(struct lu_client_seq *seq)
+{
+ LASSERT(seq->lcs_update == 1);
+ mutex_lock(&seq->lcs_mutex);
+ --seq->lcs_update;
+ wake_up(&seq->lcs_waitq);
+}
+
+/**
+ * Allocate the whole seq to the caller.
+ **/
+int seq_client_get_seq(const struct lu_env *env,
+ struct lu_client_seq *seq, seqno_t *seqnr)
+{
+ wait_queue_t link;
+ int rc;
+
+ LASSERT(seqnr != NULL);
+ mutex_lock(&seq->lcs_mutex);
+ init_waitqueue_entry_current(&link);
+
+ while (1) {
+ rc = seq_fid_alloc_prep(seq, &link);
+ if (rc == 0)
+ break;
+ }
+
+ rc = seq_client_alloc_seq(env, seq, seqnr);
+ if (rc) {
+ CERROR("%s: Can't allocate new sequence, "
+ "rc %d\n", seq->lcs_name, rc);
+ seq_fid_alloc_fini(seq);
+ mutex_unlock(&seq->lcs_mutex);
+ return rc;
+ }
+
+ CDEBUG(D_INFO, "%s: allocate sequence "
+ "[0x%16.16"LPF64"x]\n", seq->lcs_name, *seqnr);
+
+ /* Since the caller require the whole seq,
+ * so marked this seq to be used */
+ if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ seq->lcs_fid.f_oid = LUSTRE_METADATA_SEQ_MAX_WIDTH;
+ else
+ seq->lcs_fid.f_oid = LUSTRE_DATA_SEQ_MAX_WIDTH;
+
+ seq->lcs_fid.f_seq = *seqnr;
+ seq->lcs_fid.f_ver = 0;
+ /*
+ * Inform caller that sequence switch is performed to allow it
+ * to setup FLD for it.
+ */
+ seq_fid_alloc_fini(seq);
+ mutex_unlock(&seq->lcs_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(seq_client_get_seq);
+
+/* Allocate new fid on passed client @seq and save it to @fid. */
+int seq_client_alloc_fid(const struct lu_env *env,
+ struct lu_client_seq *seq, struct lu_fid *fid)
+{
+ wait_queue_t link;
+ int rc;
+
+ LASSERT(seq != NULL);
+ LASSERT(fid != NULL);
+
+ init_waitqueue_entry_current(&link);
+ mutex_lock(&seq->lcs_mutex);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
+ seq->lcs_fid.f_oid = seq->lcs_width;
+
+ while (1) {
+ seqno_t seqnr;
+
+ if (!fid_is_zero(&seq->lcs_fid) &&
+ fid_oid(&seq->lcs_fid) < seq->lcs_width) {
+ /* Just bump last allocated fid and return to caller. */
+ seq->lcs_fid.f_oid += 1;
+ rc = 0;
+ break;
+ }
+
+ rc = seq_fid_alloc_prep(seq, &link);
+ if (rc)
+ continue;
+
+ rc = seq_client_alloc_seq(env, seq, &seqnr);
+ if (rc) {
+ CERROR("%s: Can't allocate new sequence, "
+ "rc %d\n", seq->lcs_name, rc);
+ seq_fid_alloc_fini(seq);
+ mutex_unlock(&seq->lcs_mutex);
+ return rc;
+ }
+
+ CDEBUG(D_INFO, "%s: Switch to sequence "
+ "[0x%16.16"LPF64"x]\n", seq->lcs_name, seqnr);
+
+ seq->lcs_fid.f_oid = LUSTRE_FID_INIT_OID;
+ seq->lcs_fid.f_seq = seqnr;
+ seq->lcs_fid.f_ver = 0;
+
+ /*
+ * Inform caller that sequence switch is performed to allow it
+ * to setup FLD for it.
+ */
+ rc = 1;
+
+ seq_fid_alloc_fini(seq);
+ break;
+ }
+
+ *fid = seq->lcs_fid;
+ mutex_unlock(&seq->lcs_mutex);
+
+ CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name, PFID(fid));
+ return rc;
+}
+EXPORT_SYMBOL(seq_client_alloc_fid);
+
+/*
+ * Finish the current sequence due to disconnect.
+ * See mdc_import_event()
+ */
+void seq_client_flush(struct lu_client_seq *seq)
+{
+ wait_queue_t link;
+
+ LASSERT(seq != NULL);
+ init_waitqueue_entry_current(&link);
+ mutex_lock(&seq->lcs_mutex);
+
+ while (seq->lcs_update) {
+ add_wait_queue(&seq->lcs_waitq, &link);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ mutex_unlock(&seq->lcs_mutex);
+
+ waitq_wait(&link, TASK_UNINTERRUPTIBLE);
+
+ mutex_lock(&seq->lcs_mutex);
+ remove_wait_queue(&seq->lcs_waitq, &link);
+ set_current_state(TASK_RUNNING);
+ }
+
+ fid_zero(&seq->lcs_fid);
+ /**
+ * this id shld not be used for seq range allocation.
+ * set to -1 for dgb check.
+ */
+
+ seq->lcs_space.lsr_index = -1;
+
+ range_init(&seq->lcs_space);
+ mutex_unlock(&seq->lcs_mutex);
+}
+EXPORT_SYMBOL(seq_client_flush);
+
+static void seq_client_proc_fini(struct lu_client_seq *seq)
+{
+#ifdef LPROCFS
+ if (seq->lcs_proc_dir) {
+ if (!IS_ERR(seq->lcs_proc_dir))
+ lprocfs_remove(&seq->lcs_proc_dir);
+ seq->lcs_proc_dir = NULL;
+ }
+#endif /* LPROCFS */
+}
+
+static int seq_client_proc_init(struct lu_client_seq *seq)
+{
+#ifdef LPROCFS
+ int rc;
+
+ seq->lcs_proc_dir = lprocfs_register(seq->lcs_name,
+ seq_type_proc_dir,
+ NULL, NULL);
+
+ if (IS_ERR(seq->lcs_proc_dir)) {
+ CERROR("%s: LProcFS failed in seq-init\n",
+ seq->lcs_name);
+ rc = PTR_ERR(seq->lcs_proc_dir);
+ return rc;
+ }
+
+ rc = lprocfs_add_vars(seq->lcs_proc_dir,
+ seq_client_proc_list, seq);
+ if (rc) {
+ CERROR("%s: Can't init sequence manager "
+ "proc, rc %d\n", seq->lcs_name, rc);
+ GOTO(out_cleanup, rc);
+ }
+
+ return 0;
+
+out_cleanup:
+ seq_client_proc_fini(seq);
+ return rc;
+
+#else /* LPROCFS */
+ return 0;
+#endif
+}
+
+int seq_client_init(struct lu_client_seq *seq,
+ struct obd_export *exp,
+ enum lu_cli_type type,
+ const char *prefix,
+ struct lu_server_seq *srv)
+{
+ int rc;
+
+ LASSERT(seq != NULL);
+ LASSERT(prefix != NULL);
+
+ seq->lcs_srv = srv;
+ seq->lcs_type = type;
+
+ mutex_init(&seq->lcs_mutex);
+ if (type == LUSTRE_SEQ_METADATA)
+ seq->lcs_width = LUSTRE_METADATA_SEQ_MAX_WIDTH;
+ else
+ seq->lcs_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
+
+ init_waitqueue_head(&seq->lcs_waitq);
+ /* Make sure that things are clear before work is started. */
+ seq_client_flush(seq);
+
+ if (exp != NULL)
+ seq->lcs_exp = class_export_get(exp);
+ else if (type == LUSTRE_SEQ_METADATA)
+ LASSERT(seq->lcs_srv != NULL);
+
+ snprintf(seq->lcs_name, sizeof(seq->lcs_name),
+ "cli-%s", prefix);
+
+ rc = seq_client_proc_init(seq);
+ if (rc)
+ seq_client_fini(seq);
+ return rc;
+}
+EXPORT_SYMBOL(seq_client_init);
+
+void seq_client_fini(struct lu_client_seq *seq)
+{
+ seq_client_proc_fini(seq);
+
+ if (seq->lcs_exp != NULL) {
+ class_export_put(seq->lcs_exp);
+ seq->lcs_exp = NULL;
+ }
+
+ seq->lcs_srv = NULL;
+}
+EXPORT_SYMBOL(seq_client_fini);
+
+int client_fid_init(struct obd_device *obd,
+ struct obd_export *exp, enum lu_cli_type type)
+{
+ struct client_obd *cli = &obd->u.cli;
+ char *prefix;
+ int rc;
+
+ OBD_ALLOC_PTR(cli->cl_seq);
+ if (cli->cl_seq == NULL)
+ return -ENOMEM;
+
+ OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
+ if (prefix == NULL)
+ GOTO(out_free_seq, rc = -ENOMEM);
+
+ snprintf(prefix, MAX_OBD_NAME + 5, "cli-%s", obd->obd_name);
+
+ /* Init client side sequence-manager */
+ rc = seq_client_init(cli->cl_seq, exp, type, prefix, NULL);
+ OBD_FREE(prefix, MAX_OBD_NAME + 5);
+ if (rc)
+ GOTO(out_free_seq, rc);
+
+ return rc;
+out_free_seq:
+ OBD_FREE_PTR(cli->cl_seq);
+ cli->cl_seq = NULL;
+ return rc;
+}
+EXPORT_SYMBOL(client_fid_init);
+
+int client_fid_fini(struct obd_device *obd)
+{
+ struct client_obd *cli = &obd->u.cli;
+
+ if (cli->cl_seq != NULL) {
+ seq_client_fini(cli->cl_seq);
+ OBD_FREE_PTR(cli->cl_seq);
+ cli->cl_seq = NULL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(client_fid_fini);
+
+struct proc_dir_entry *seq_type_proc_dir;
+
+static int __init fid_mod_init(void)
+{
+ seq_type_proc_dir = lprocfs_register(LUSTRE_SEQ_NAME,
+ proc_lustre_root,
+ NULL, NULL);
+ if (IS_ERR(seq_type_proc_dir))
+ return PTR_ERR(seq_type_proc_dir);
+ return 0;
+}
+
+static void __exit fid_mod_exit(void)
+{
+ if (seq_type_proc_dir != NULL && !IS_ERR(seq_type_proc_dir)) {
+ lprocfs_remove(&seq_type_proc_dir);
+ seq_type_proc_dir = NULL;
+ }
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre FID Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.0");
+
+module_init(fid_mod_init);
+module_exit(fid_mod_exit);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
new file mode 100644
index 0000000..294070d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -0,0 +1,212 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/fid/lproc_fid.c
+ *
+ * Lustre Sequence Manager
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_FID
+
+# include <linux/libcfs/libcfs.h>
+# include <linux/module.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <dt_object.h>
+#include <md_object.h>
+#include <obd_support.h>
+#include <lustre_req_layout.h>
+#include <lustre_fid.h>
+#include "fid_internal.h"
+
+#ifdef LPROCFS
+/*
+ * Note: this function is only used for testing, it is no safe for production
+ * use.
+ */
+static int
+lprocfs_fid_write_common(const char *buffer, unsigned long count,
+ struct lu_seq_range *range)
+{
+ struct lu_seq_range tmp;
+ int rc;
+
+ LASSERT(range != NULL);
+
+ rc = sscanf(buffer, "[%llx - %llx]\n",
+ (long long unsigned *)&tmp.lsr_start,
+ (long long unsigned *)&tmp.lsr_end);
+ if (rc != 2 || !range_is_sane(&tmp) || range_is_zero(&tmp))
+ return -EINVAL;
+ *range = tmp;
+ return 0;
+}
+
+/* Client side procfs stuff */
+static ssize_t
+lprocfs_fid_space_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct lu_client_seq *seq = ((struct seq_file *)file->private_data)->private;
+ int rc;
+
+ LASSERT(seq != NULL);
+
+ mutex_lock(&seq->lcs_mutex);
+ rc = lprocfs_fid_write_common(buffer, count, &seq->lcs_space);
+
+ if (rc == 0) {
+ CDEBUG(D_INFO, "%s: Space: "DRANGE"\n",
+ seq->lcs_name, PRANGE(&seq->lcs_space));
+ }
+
+ mutex_unlock(&seq->lcs_mutex);
+
+ return count;
+}
+
+static int
+lprocfs_fid_space_seq_show(struct seq_file *m, void *unused)
+{
+ struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
+ int rc;
+
+ LASSERT(seq != NULL);
+
+ mutex_lock(&seq->lcs_mutex);
+ rc = seq_printf(m, "["LPX64" - "LPX64"]:%x:%s\n", PRANGE(&seq->lcs_space));
+ mutex_unlock(&seq->lcs_mutex);
+
+ return rc;
+}
+
+static ssize_t
+lprocfs_fid_width_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct lu_client_seq *seq = ((struct seq_file *)file->private_data)->private;
+ __u64 max;
+ int rc, val;
+
+ LASSERT(seq != NULL);
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ mutex_lock(&seq->lcs_mutex);
+ if (seq->lcs_type == LUSTRE_SEQ_DATA)
+ max = LUSTRE_DATA_SEQ_MAX_WIDTH;
+ else
+ max = LUSTRE_METADATA_SEQ_MAX_WIDTH;
+
+ if (val <= max && val > 0) {
+ seq->lcs_width = val;
+
+ if (rc == 0) {
+ CDEBUG(D_INFO, "%s: Sequence size: "LPU64"\n",
+ seq->lcs_name, seq->lcs_width);
+ }
+ }
+
+ mutex_unlock(&seq->lcs_mutex);
+
+ return count;
+}
+
+static int
+lprocfs_fid_width_seq_show(struct seq_file *m, void *unused)
+{
+ struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
+ int rc;
+
+ LASSERT(seq != NULL);
+
+ mutex_lock(&seq->lcs_mutex);
+ rc = seq_printf(m, LPU64"\n", seq->lcs_width);
+ mutex_unlock(&seq->lcs_mutex);
+
+ return rc;
+}
+
+static int
+lprocfs_fid_fid_seq_show(struct seq_file *m, void *unused)
+{
+ struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
+ int rc;
+
+ LASSERT(seq != NULL);
+
+ mutex_lock(&seq->lcs_mutex);
+ rc = seq_printf(m, DFID"\n", PFID(&seq->lcs_fid));
+ mutex_unlock(&seq->lcs_mutex);
+
+ return rc;
+}
+
+static int
+lprocfs_fid_server_seq_show(struct seq_file *m, void *unused)
+{
+ struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
+ struct client_obd *cli;
+ int rc;
+
+ LASSERT(seq != NULL);
+
+ if (seq->lcs_exp != NULL) {
+ cli = &seq->lcs_exp->exp_obd->u.cli;
+ rc = seq_printf(m, "%s\n", cli->cl_target_uuid.uuid);
+ } else {
+ rc = seq_printf(m, "%s\n", seq->lcs_srv->lss_name);
+ }
+ return rc;
+}
+
+LPROC_SEQ_FOPS(lprocfs_fid_space);
+LPROC_SEQ_FOPS(lprocfs_fid_width);
+LPROC_SEQ_FOPS_RO(lprocfs_fid_server);
+LPROC_SEQ_FOPS_RO(lprocfs_fid_fid);
+
+struct lprocfs_vars seq_client_proc_list[] = {
+ { "space", &lprocfs_fid_space_fops },
+ { "width", &lprocfs_fid_width_fops },
+ { "server", &lprocfs_fid_server_fops },
+ { "fid", &lprocfs_fid_fid_fops },
+ { NULL }
+};
+#endif
diff --git a/drivers/staging/lustre/lustre/fld/Makefile b/drivers/staging/lustre/lustre/fld/Makefile
new file mode 100644
index 0000000..90d46d8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fld/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_LUSTRE_FS) += fld.o
+fld-y := fld_request.o fld_cache.o lproc_fld.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
new file mode 100644
index 0000000..25099cb
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -0,0 +1,547 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2013, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/fld/fld_cache.c
+ *
+ * FLD (Fids Location Database)
+ *
+ * Author: Pravin Shelar <pravin.shelar@sun.com>
+ * Author: Yury Umanets <umka@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_FLD
+
+# include <linux/libcfs/libcfs.h>
+# include <linux/module.h>
+# include <asm/div64.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <lustre_ver.h>
+#include <obd_support.h>
+#include <lprocfs_status.h>
+
+#include <dt_object.h>
+#include <md_object.h>
+#include <lustre_req_layout.h>
+#include <lustre_fld.h>
+#include "fld_internal.h"
+
+/**
+ * create fld cache.
+ */
+struct fld_cache *fld_cache_init(const char *name,
+ int cache_size, int cache_threshold)
+{
+ struct fld_cache *cache;
+
+ LASSERT(name != NULL);
+ LASSERT(cache_threshold < cache_size);
+
+ OBD_ALLOC_PTR(cache);
+ if (cache == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&cache->fci_entries_head);
+ INIT_LIST_HEAD(&cache->fci_lru);
+
+ cache->fci_cache_count = 0;
+ rwlock_init(&cache->fci_lock);
+
+ strlcpy(cache->fci_name, name,
+ sizeof(cache->fci_name));
+
+ cache->fci_cache_size = cache_size;
+ cache->fci_threshold = cache_threshold;
+
+ /* Init fld cache info. */
+ memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));
+
+ CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
+ cache->fci_name, cache_size, cache_threshold);
+
+ return cache;
+}
+
+/**
+ * destroy fld cache.
+ */
+void fld_cache_fini(struct fld_cache *cache)
+{
+ __u64 pct;
+
+ LASSERT(cache != NULL);
+ fld_cache_flush(cache);
+
+ if (cache->fci_stat.fst_count > 0) {
+ pct = cache->fci_stat.fst_cache * 100;
+ do_div(pct, cache->fci_stat.fst_count);
+ } else {
+ pct = 0;
+ }
+
+ CDEBUG(D_INFO, "FLD cache statistics (%s):\n", cache->fci_name);
+ CDEBUG(D_INFO, " Total reqs: "LPU64"\n", cache->fci_stat.fst_count);
+ CDEBUG(D_INFO, " Cache reqs: "LPU64"\n", cache->fci_stat.fst_cache);
+ CDEBUG(D_INFO, " Cache hits: "LPU64"%%\n", pct);
+
+ OBD_FREE_PTR(cache);
+}
+
+/**
+ * delete given node from list.
+ */
+void fld_cache_entry_delete(struct fld_cache *cache,
+ struct fld_cache_entry *node)
+{
+ list_del(&node->fce_list);
+ list_del(&node->fce_lru);
+ cache->fci_cache_count--;
+ OBD_FREE_PTR(node);
+}
+
+/**
+ * fix list by checking new entry with NEXT entry in order.
+ */
+static void fld_fix_new_list(struct fld_cache *cache)
+{
+ struct fld_cache_entry *f_curr;
+ struct fld_cache_entry *f_next;
+ struct lu_seq_range *c_range;
+ struct lu_seq_range *n_range;
+ struct list_head *head = &cache->fci_entries_head;
+
+restart_fixup:
+
+ list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
+ c_range = &f_curr->fce_range;
+ n_range = &f_next->fce_range;
+
+ LASSERT(range_is_sane(c_range));
+ if (&f_next->fce_list == head)
+ break;
+
+ if (c_range->lsr_flags != n_range->lsr_flags)
+ continue;
+
+ LASSERTF(c_range->lsr_start <= n_range->lsr_start,
+ "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
+ PRANGE(c_range), PRANGE(n_range));
+
+ /* check merge possibility with next range */
+ if (c_range->lsr_end == n_range->lsr_start) {
+ if (c_range->lsr_index != n_range->lsr_index)
+ continue;
+ n_range->lsr_start = c_range->lsr_start;
+ fld_cache_entry_delete(cache, f_curr);
+ continue;
+ }
+
+ /* check if current range overlaps with next range. */
+ if (n_range->lsr_start < c_range->lsr_end) {
+ if (c_range->lsr_index == n_range->lsr_index) {
+ n_range->lsr_start = c_range->lsr_start;
+ n_range->lsr_end = max(c_range->lsr_end,
+ n_range->lsr_end);
+ fld_cache_entry_delete(cache, f_curr);
+ } else {
+ if (n_range->lsr_end <= c_range->lsr_end) {
+ *n_range = *c_range;
+ fld_cache_entry_delete(cache, f_curr);
+ } else
+ n_range->lsr_start = c_range->lsr_end;
+ }
+
+ /* we could have overlap over next
+ * range too. better restart. */
+ goto restart_fixup;
+ }
+
+ /* kill duplicates */
+ if (c_range->lsr_start == n_range->lsr_start &&
+ c_range->lsr_end == n_range->lsr_end)
+ fld_cache_entry_delete(cache, f_curr);
+ }
+}
+
+/**
+ * add node to fld cache
+ */
+static inline void fld_cache_entry_add(struct fld_cache *cache,
+ struct fld_cache_entry *f_new,
+ struct list_head *pos)
+{
+ list_add(&f_new->fce_list, pos);
+ list_add(&f_new->fce_lru, &cache->fci_lru);
+
+ cache->fci_cache_count++;
+ fld_fix_new_list(cache);
+}
+
+/**
+ * Check if cache needs to be shrunk. If so - do it.
+ * Remove one entry in list and so on until cache is shrunk enough.
+ */
+static int fld_cache_shrink(struct fld_cache *cache)
+{
+ struct fld_cache_entry *flde;
+ struct list_head *curr;
+ int num = 0;
+
+ LASSERT(cache != NULL);
+
+ if (cache->fci_cache_count < cache->fci_cache_size)
+ return 0;
+
+ curr = cache->fci_lru.prev;
+
+ while (cache->fci_cache_count + cache->fci_threshold >
+ cache->fci_cache_size && curr != &cache->fci_lru) {
+
+ flde = list_entry(curr, struct fld_cache_entry, fce_lru);
+ curr = curr->prev;
+ fld_cache_entry_delete(cache, flde);
+ num++;
+ }
+
+ CDEBUG(D_INFO, "%s: FLD cache - Shrunk by "
+ "%d entries\n", cache->fci_name, num);
+
+ return 0;
+}
+
+/**
+ * kill all fld cache entries.
+ */
+void fld_cache_flush(struct fld_cache *cache)
+{
+ write_lock(&cache->fci_lock);
+ cache->fci_cache_size = 0;
+ fld_cache_shrink(cache);
+ write_unlock(&cache->fci_lock);
+}
+
+/**
+ * punch hole in existing range. divide this range and add new
+ * entry accordingly.
+ */
+
+void fld_cache_punch_hole(struct fld_cache *cache,
+ struct fld_cache_entry *f_curr,
+ struct fld_cache_entry *f_new)
+{
+ const struct lu_seq_range *range = &f_new->fce_range;
+ const seqno_t new_start = range->lsr_start;
+ const seqno_t new_end = range->lsr_end;
+ struct fld_cache_entry *fldt;
+
+ OBD_ALLOC_GFP(fldt, sizeof *fldt, GFP_ATOMIC);
+ if (!fldt) {
+ OBD_FREE_PTR(f_new);
+ /* overlap is not allowed, so dont mess up list. */
+ return;
+ }
+ /* break f_curr RANGE into three RANGES:
+ * f_curr, f_new , fldt
+ */
+
+ /* f_new = *range */
+
+ /* fldt */
+ fldt->fce_range.lsr_start = new_end;
+ fldt->fce_range.lsr_end = f_curr->fce_range.lsr_end;
+ fldt->fce_range.lsr_index = f_curr->fce_range.lsr_index;
+
+ /* f_curr */
+ f_curr->fce_range.lsr_end = new_start;
+
+ /* add these two entries to list */
+ fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
+ fld_cache_entry_add(cache, fldt, &f_new->fce_list);
+
+ /* no need to fixup */
+}
+
+/**
+ * handle range overlap in fld cache.
+ */
+static void fld_cache_overlap_handle(struct fld_cache *cache,
+ struct fld_cache_entry *f_curr,
+ struct fld_cache_entry *f_new)
+{
+ const struct lu_seq_range *range = &f_new->fce_range;
+ const seqno_t new_start = range->lsr_start;
+ const seqno_t new_end = range->lsr_end;
+ const mdsno_t mdt = range->lsr_index;
+
+ /* this is overlap case, these case are checking overlapping with
+ * prev range only. fixup will handle overlaping with next range. */
+
+ if (f_curr->fce_range.lsr_index == mdt) {
+ f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
+ new_start);
+
+ f_curr->fce_range.lsr_end = max(f_curr->fce_range.lsr_end,
+ new_end);
+
+ OBD_FREE_PTR(f_new);
+ fld_fix_new_list(cache);
+
+ } else if (new_start <= f_curr->fce_range.lsr_start &&
+ f_curr->fce_range.lsr_end <= new_end) {
+ /* case 1: new range completely overshadowed existing range.
+ * e.g. whole range migrated. update fld cache entry */
+
+ f_curr->fce_range = *range;
+ OBD_FREE_PTR(f_new);
+ fld_fix_new_list(cache);
+
+ } else if (f_curr->fce_range.lsr_start < new_start &&
+ new_end < f_curr->fce_range.lsr_end) {
+ /* case 2: new range fit within existing range. */
+
+ fld_cache_punch_hole(cache, f_curr, f_new);
+
+ } else if (new_end <= f_curr->fce_range.lsr_end) {
+ /* case 3: overlap:
+ * [new_start [c_start new_end) c_end)
+ */
+
+ LASSERT(new_start <= f_curr->fce_range.lsr_start);
+
+ f_curr->fce_range.lsr_start = new_end;
+ fld_cache_entry_add(cache, f_new, f_curr->fce_list.prev);
+
+ } else if (f_curr->fce_range.lsr_start <= new_start) {
+ /* case 4: overlap:
+ * [c_start [new_start c_end) new_end)
+ */
+
+ LASSERT(f_curr->fce_range.lsr_end <= new_end);
+
+ f_curr->fce_range.lsr_end = new_start;
+ fld_cache_entry_add(cache, f_new, &f_curr->fce_list);
+ } else
+ CERROR("NEW range ="DRANGE" curr = "DRANGE"\n",
+ PRANGE(range),PRANGE(&f_curr->fce_range));
+}
+
+struct fld_cache_entry
+*fld_cache_entry_create(const struct lu_seq_range *range)
+{
+ struct fld_cache_entry *f_new;
+
+ LASSERT(range_is_sane(range));
+
+ OBD_ALLOC_PTR(f_new);
+ if (!f_new)
+ return ERR_PTR(-ENOMEM);
+
+ f_new->fce_range = *range;
+ return f_new;
+}
+
+/**
+ * Insert FLD entry in FLD cache.
+ *
+ * This function handles all cases of merging and breaking up of
+ * ranges.
+ */
+int fld_cache_insert_nolock(struct fld_cache *cache,
+ struct fld_cache_entry *f_new)
+{
+ struct fld_cache_entry *f_curr;
+ struct fld_cache_entry *n;
+ struct list_head *head;
+ struct list_head *prev = NULL;
+ const seqno_t new_start = f_new->fce_range.lsr_start;
+ const seqno_t new_end = f_new->fce_range.lsr_end;
+ __u32 new_flags = f_new->fce_range.lsr_flags;
+
+ /*
+ * Duplicate entries are eliminated in insert op.
+ * So we don't need to search new entry before starting
+ * insertion loop.
+ */
+
+ if (!cache->fci_no_shrink)
+ fld_cache_shrink(cache);
+
+ head = &cache->fci_entries_head;
+
+ list_for_each_entry_safe(f_curr, n, head, fce_list) {
+ /* add list if next is end of list */
+ if (new_end < f_curr->fce_range.lsr_start ||
+ (new_end == f_curr->fce_range.lsr_start &&
+ new_flags != f_curr->fce_range.lsr_flags))
+ break;
+
+ prev = &f_curr->fce_list;
+ /* check if this range is to left of new range. */
+ if (new_start < f_curr->fce_range.lsr_end &&
+ new_flags == f_curr->fce_range.lsr_flags) {
+ fld_cache_overlap_handle(cache, f_curr, f_new);
+ goto out;
+ }
+ }
+
+ if (prev == NULL)
+ prev = head;
+
+ CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
+ /* Add new entry to cache and lru list. */
+ fld_cache_entry_add(cache, f_new, prev);
+out:
+ return 0;
+}
+
+int fld_cache_insert(struct fld_cache *cache,
+ const struct lu_seq_range *range)
+{
+ struct fld_cache_entry *flde;
+ int rc;
+
+ flde = fld_cache_entry_create(range);
+ if (IS_ERR(flde))
+ return PTR_ERR(flde);
+
+ write_lock(&cache->fci_lock);
+ rc = fld_cache_insert_nolock(cache, flde);
+ write_unlock(&cache->fci_lock);
+ if (rc)
+ OBD_FREE_PTR(flde);
+
+ return rc;
+}
+
+void fld_cache_delete_nolock(struct fld_cache *cache,
+ const struct lu_seq_range *range)
+{
+ struct fld_cache_entry *flde;
+ struct fld_cache_entry *tmp;
+ struct list_head *head;
+
+ head = &cache->fci_entries_head;
+ list_for_each_entry_safe(flde, tmp, head, fce_list) {
+ /* add list if next is end of list */
+ if (range->lsr_start == flde->fce_range.lsr_start ||
+ (range->lsr_end == flde->fce_range.lsr_end &&
+ range->lsr_flags == flde->fce_range.lsr_flags)) {
+ fld_cache_entry_delete(cache, flde);
+ break;
+ }
+ }
+}
+
+/**
+ * Delete FLD entry in FLD cache.
+ *
+ */
+void fld_cache_delete(struct fld_cache *cache,
+ const struct lu_seq_range *range)
+{
+ write_lock(&cache->fci_lock);
+ fld_cache_delete_nolock(cache, range);
+ write_unlock(&cache->fci_lock);
+}
+
+struct fld_cache_entry
+*fld_cache_entry_lookup_nolock(struct fld_cache *cache,
+ struct lu_seq_range *range)
+{
+ struct fld_cache_entry *flde;
+ struct fld_cache_entry *got = NULL;
+ struct list_head *head;
+
+ head = &cache->fci_entries_head;
+ list_for_each_entry(flde, head, fce_list) {
+ if (range->lsr_start == flde->fce_range.lsr_start ||
+ (range->lsr_end == flde->fce_range.lsr_end &&
+ range->lsr_flags == flde->fce_range.lsr_flags)) {
+ got = flde;
+ break;
+ }
+ }
+
+ return got;
+}
+
+/**
+ * lookup \a seq sequence for range in fld cache.
+ */
+struct fld_cache_entry
+*fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range)
+{
+ struct fld_cache_entry *got = NULL;
+
+ read_lock(&cache->fci_lock);
+ got = fld_cache_entry_lookup_nolock(cache, range);
+ read_unlock(&cache->fci_lock);
+ return got;
+}
+
+/**
+ * lookup \a seq sequence for range in fld cache.
+ */
+int fld_cache_lookup(struct fld_cache *cache,
+ const seqno_t seq, struct lu_seq_range *range)
+{
+ struct fld_cache_entry *flde;
+ struct fld_cache_entry *prev = NULL;
+ struct list_head *head;
+
+ read_lock(&cache->fci_lock);
+ head = &cache->fci_entries_head;
+
+ cache->fci_stat.fst_count++;
+ list_for_each_entry(flde, head, fce_list) {
+ if (flde->fce_range.lsr_start > seq) {
+ if (prev != NULL)
+ *range = prev->fce_range;
+ break;
+ }
+
+ prev = flde;
+ if (range_within(&flde->fce_range, seq)) {
+ *range = flde->fce_range;
+
+ cache->fci_stat.fst_cache++;
+ read_unlock(&cache->fci_lock);
+ return 0;
+ }
+ }
+ read_unlock(&cache->fci_lock);
+ return -ENOENT;
+}
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
new file mode 100644
index 0000000..56686b1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -0,0 +1,194 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2013, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/fld/fld_internal.h
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
+ * Author: Tom WangDi <wangdi@clusterfs.com>
+ */
+#ifndef __FLD_INTERNAL_H
+#define __FLD_INTERNAL_H
+
+#include <lustre/lustre_idl.h>
+#include <dt_object.h>
+
+#include <linux/libcfs/libcfs.h>
+#include <lustre_req_layout.h>
+#include <lustre_fld.h>
+
+enum {
+ LUSTRE_FLD_INIT = 1 << 0,
+ LUSTRE_FLD_RUN = 1 << 1
+};
+
+struct fld_stats {
+ __u64 fst_count;
+ __u64 fst_cache;
+ __u64 fst_inflight;
+};
+
+typedef int (*fld_hash_func_t) (struct lu_client_fld *, __u64);
+
+typedef struct lu_fld_target *
+(*fld_scan_func_t) (struct lu_client_fld *, __u64);
+
+struct lu_fld_hash {
+ const char *fh_name;
+ fld_hash_func_t fh_hash_func;
+ fld_scan_func_t fh_scan_func;
+};
+
+struct fld_cache_entry {
+ struct list_head fce_lru;
+ struct list_head fce_list;
+ /**
+ * fld cache entries are sorted on range->lsr_start field. */
+ struct lu_seq_range fce_range;
+};
+
+struct fld_cache {
+ /**
+ * Cache guard, protects fci_hash mostly because others immutable after
+ * init is finished.
+ */
+ rwlock_t fci_lock;
+
+ /**
+ * Cache shrink threshold */
+ int fci_threshold;
+
+ /**
+ * Prefered number of cached entries */
+ int fci_cache_size;
+
+ /**
+ * Current number of cached entries. Protected by \a fci_lock */
+ int fci_cache_count;
+
+ /**
+ * LRU list fld entries. */
+ struct list_head fci_lru;
+
+ /**
+ * sorted fld entries. */
+ struct list_head fci_entries_head;
+
+ /**
+ * Cache statistics. */
+ struct fld_stats fci_stat;
+
+ /**
+ * Cache name used for debug and messages. */
+ char fci_name[80];
+ unsigned int fci_no_shrink:1;
+};
+
+enum fld_op {
+ FLD_CREATE = 0,
+ FLD_DELETE = 1,
+ FLD_LOOKUP = 2
+};
+
+enum {
+ /* 4M of FLD cache will not hurt client a lot. */
+ FLD_SERVER_CACHE_SIZE = (4 * 0x100000),
+
+ /* 1M of FLD cache will not hurt client a lot. */
+ FLD_CLIENT_CACHE_SIZE = (1 * 0x100000)
+};
+
+enum {
+ /* Cache threshold is 10 percent of size. */
+ FLD_SERVER_CACHE_THRESHOLD = 10,
+
+ /* Cache threshold is 10 percent of size. */
+ FLD_CLIENT_CACHE_THRESHOLD = 10
+};
+
+extern struct lu_fld_hash fld_hash[];
+
+int fld_client_rpc(struct obd_export *exp,
+ struct lu_seq_range *range, __u32 fld_op);
+
+#ifdef LPROCFS
+extern struct lprocfs_vars fld_client_proc_list[];
+#endif
+
+
+struct fld_cache *fld_cache_init(const char *name,
+ int cache_size, int cache_threshold);
+
+void fld_cache_fini(struct fld_cache *cache);
+
+void fld_cache_flush(struct fld_cache *cache);
+
+int fld_cache_insert(struct fld_cache *cache,
+ const struct lu_seq_range *range);
+
+struct fld_cache_entry
+*fld_cache_entry_create(const struct lu_seq_range *range);
+
+int fld_cache_insert_nolock(struct fld_cache *cache,
+ struct fld_cache_entry *f_new);
+void fld_cache_delete(struct fld_cache *cache,
+ const struct lu_seq_range *range);
+void fld_cache_delete_nolock(struct fld_cache *cache,
+ const struct lu_seq_range *range);
+int fld_cache_lookup(struct fld_cache *cache,
+ const seqno_t seq, struct lu_seq_range *range);
+
+struct fld_cache_entry*
+fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range);
+void fld_cache_entry_delete(struct fld_cache *cache,
+ struct fld_cache_entry *node);
+void fld_dump_cache_entries(struct fld_cache *cache);
+
+struct fld_cache_entry
+*fld_cache_entry_lookup_nolock(struct fld_cache *cache,
+ struct lu_seq_range *range);
+int fld_write_range(const struct lu_env *env, struct dt_object *dt,
+ const struct lu_seq_range *range, struct thandle *th);
+
+static inline const char *
+fld_target_name(struct lu_fld_target *tar)
+{
+ if (tar->ft_srv != NULL)
+ return tar->ft_srv->lsf_name;
+
+ return (const char *)tar->ft_exp->exp_obd->obd_name;
+}
+
+extern struct proc_dir_entry *fld_type_proc_dir;
+#endif /* __FLD_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
new file mode 100644
index 0000000..078e98b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -0,0 +1,531 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/fld/fld_request.c
+ *
+ * FLD (Fids Location Database)
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_FLD
+
+# include <linux/libcfs/libcfs.h>
+# include <linux/module.h>
+# include <asm/div64.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <lustre_ver.h>
+#include <obd_support.h>
+#include <lprocfs_status.h>
+
+#include <dt_object.h>
+#include <md_object.h>
+#include <lustre_req_layout.h>
+#include <lustre_fld.h>
+#include <lustre_mdc.h>
+#include "fld_internal.h"
+
+struct lu_context_key fld_thread_key;
+
+/* TODO: these 3 functions are copies of flow-control code from mdc_lib.c
+ * It should be common thing. The same about mdc RPC lock */
+static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
+{
+ int rc;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = list_empty(&mcw->mcw_entry);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return rc;
+};
+
+static void fld_enter_request(struct client_obd *cli)
+{
+ struct mdc_cache_waiter mcw;
+ struct l_wait_info lwi = { 0 };
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+ init_waitqueue_head(&mcw.mcw_waitq);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
+ } else {
+ cli->cl_r_in_flight++;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ }
+}
+
+static void fld_exit_request(struct client_obd *cli)
+{
+ struct list_head *l, *tmp;
+ struct mdc_cache_waiter *mcw;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_r_in_flight--;
+ list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ /* No free request slots anymore */
+ break;
+ }
+
+ mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
+ list_del_init(&mcw->mcw_entry);
+ cli->cl_r_in_flight++;
+ wake_up(&mcw->mcw_waitq);
+ }
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+}
+
+static int fld_rrb_hash(struct lu_client_fld *fld,
+ seqno_t seq)
+{
+ LASSERT(fld->lcf_count > 0);
+ return do_div(seq, fld->lcf_count);
+}
+
+static struct lu_fld_target *
+fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
+{
+ struct lu_fld_target *target;
+ int hash;
+
+ /* Because almost all of special sequence located in MDT0,
+ * it should go to index 0 directly, instead of calculating
+ * hash again, and also if other MDTs is not being connected,
+ * the fld lookup requests(for seq on MDT0) should not be
+ * blocked because of other MDTs */
+ if (fid_seq_is_norm(seq))
+ hash = fld_rrb_hash(fld, seq);
+ else
+ hash = 0;
+
+ list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+ if (target->ft_idx == hash)
+ return target;
+ }
+
+ CERROR("%s: Can't find target by hash %d (seq "LPX64"). "
+ "Targets (%d):\n", fld->lcf_name, hash, seq,
+ fld->lcf_count);
+
+ list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+ const char *srv_name = target->ft_srv != NULL ?
+ target->ft_srv->lsf_name : "<null>";
+ const char *exp_name = target->ft_exp != NULL ?
+ (char *)target->ft_exp->exp_obd->obd_uuid.uuid :
+ "<null>";
+
+ CERROR(" exp: 0x%p (%s), srv: 0x%p (%s), idx: "LPU64"\n",
+ target->ft_exp, exp_name, target->ft_srv,
+ srv_name, target->ft_idx);
+ }
+
+ /*
+ * If target is not found, there is logical error anyway, so here is
+ * LBUG() to catch this situation.
+ */
+ LBUG();
+ return NULL;
+}
+
+struct lu_fld_hash fld_hash[] = {
+ {
+ .fh_name = "RRB",
+ .fh_hash_func = fld_rrb_hash,
+ .fh_scan_func = fld_rrb_scan
+ },
+ {
+ 0,
+ }
+};
+
+static struct lu_fld_target *
+fld_client_get_target(struct lu_client_fld *fld, seqno_t seq)
+{
+ struct lu_fld_target *target;
+
+ LASSERT(fld->lcf_hash != NULL);
+
+ spin_lock(&fld->lcf_lock);
+ target = fld->lcf_hash->fh_scan_func(fld, seq);
+ spin_unlock(&fld->lcf_lock);
+
+ if (target != NULL) {
+ CDEBUG(D_INFO, "%s: Found target (idx "LPU64
+ ") by seq "LPX64"\n", fld->lcf_name,
+ target->ft_idx, seq);
+ }
+
+ return target;
+}
+
+/*
+ * Add export to FLD. This is usually done by CMM and LMV as they are main users
+ * of FLD module.
+ */
+int fld_client_add_target(struct lu_client_fld *fld,
+ struct lu_fld_target *tar)
+{
+ const char *name;
+ struct lu_fld_target *target, *tmp;
+
+ LASSERT(tar != NULL);
+ name = fld_target_name(tar);
+ LASSERT(name != NULL);
+ LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL);
+
+ if (fld->lcf_flags != LUSTRE_FLD_INIT) {
+ CERROR("%s: Attempt to add target %s (idx "LPU64") "
+ "on fly - skip it\n", fld->lcf_name, name,
+ tar->ft_idx);
+ return 0;
+ } else {
+ CDEBUG(D_INFO, "%s: Adding target %s (idx "
+ LPU64")\n", fld->lcf_name, name, tar->ft_idx);
+ }
+
+ OBD_ALLOC_PTR(target);
+ if (target == NULL)
+ return -ENOMEM;
+
+ spin_lock(&fld->lcf_lock);
+ list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
+ if (tmp->ft_idx == tar->ft_idx) {
+ spin_unlock(&fld->lcf_lock);
+ OBD_FREE_PTR(target);
+ CERROR("Target %s exists in FLD and known as %s:#"LPU64"\n",
+ name, fld_target_name(tmp), tmp->ft_idx);
+ return -EEXIST;
+ }
+ }
+
+ target->ft_exp = tar->ft_exp;
+ if (target->ft_exp != NULL)
+ class_export_get(target->ft_exp);
+ target->ft_srv = tar->ft_srv;
+ target->ft_idx = tar->ft_idx;
+
+ list_add_tail(&target->ft_chain,
+ &fld->lcf_targets);
+
+ fld->lcf_count++;
+ spin_unlock(&fld->lcf_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(fld_client_add_target);
+
+/* Remove export from FLD */
+int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
+{
+ struct lu_fld_target *target, *tmp;
+
+ spin_lock(&fld->lcf_lock);
+ list_for_each_entry_safe(target, tmp,
+ &fld->lcf_targets, ft_chain) {
+ if (target->ft_idx == idx) {
+ fld->lcf_count--;
+ list_del(&target->ft_chain);
+ spin_unlock(&fld->lcf_lock);
+
+ if (target->ft_exp != NULL)
+ class_export_put(target->ft_exp);
+
+ OBD_FREE_PTR(target);
+ return 0;
+ }
+ }
+ spin_unlock(&fld->lcf_lock);
+ return -ENOENT;
+}
+EXPORT_SYMBOL(fld_client_del_target);
+
+#ifdef LPROCFS
+struct proc_dir_entry *fld_type_proc_dir = NULL;
+
+static int fld_client_proc_init(struct lu_client_fld *fld)
+{
+ int rc;
+
+ fld->lcf_proc_dir = lprocfs_register(fld->lcf_name,
+ fld_type_proc_dir,
+ NULL, NULL);
+
+ if (IS_ERR(fld->lcf_proc_dir)) {
+ CERROR("%s: LProcFS failed in fld-init\n",
+ fld->lcf_name);
+ rc = PTR_ERR(fld->lcf_proc_dir);
+ return rc;
+ }
+
+ rc = lprocfs_add_vars(fld->lcf_proc_dir,
+ fld_client_proc_list, fld);
+ if (rc) {
+ CERROR("%s: Can't init FLD proc, rc %d\n",
+ fld->lcf_name, rc);
+ GOTO(out_cleanup, rc);
+ }
+
+ return 0;
+
+out_cleanup:
+ fld_client_proc_fini(fld);
+ return rc;
+}
+
+void fld_client_proc_fini(struct lu_client_fld *fld)
+{
+ if (fld->lcf_proc_dir) {
+ if (!IS_ERR(fld->lcf_proc_dir))
+ lprocfs_remove(&fld->lcf_proc_dir);
+ fld->lcf_proc_dir = NULL;
+ }
+}
+#else
+static int fld_client_proc_init(struct lu_client_fld *fld)
+{
+ return 0;
+}
+
+void fld_client_proc_fini(struct lu_client_fld *fld)
+{
+ return;
+}
+#endif
+
+EXPORT_SYMBOL(fld_client_proc_fini);
+
+static inline int hash_is_sane(int hash)
+{
+ return (hash >= 0 && hash < ARRAY_SIZE(fld_hash));
+}
+
+int fld_client_init(struct lu_client_fld *fld,
+ const char *prefix, int hash)
+{
+ int cache_size, cache_threshold;
+ int rc;
+
+ LASSERT(fld != NULL);
+
+ snprintf(fld->lcf_name, sizeof(fld->lcf_name),
+ "cli-%s", prefix);
+
+ if (!hash_is_sane(hash)) {
+ CERROR("%s: Wrong hash function %#x\n",
+ fld->lcf_name, hash);
+ return -EINVAL;
+ }
+
+ fld->lcf_count = 0;
+ spin_lock_init(&fld->lcf_lock);
+ fld->lcf_hash = &fld_hash[hash];
+ fld->lcf_flags = LUSTRE_FLD_INIT;
+ INIT_LIST_HEAD(&fld->lcf_targets);
+
+ cache_size = FLD_CLIENT_CACHE_SIZE /
+ sizeof(struct fld_cache_entry);
+
+ cache_threshold = cache_size *
+ FLD_CLIENT_CACHE_THRESHOLD / 100;
+
+ fld->lcf_cache = fld_cache_init(fld->lcf_name,
+ cache_size, cache_threshold);
+ if (IS_ERR(fld->lcf_cache)) {
+ rc = PTR_ERR(fld->lcf_cache);
+ fld->lcf_cache = NULL;
+ GOTO(out, rc);
+ }
+
+ rc = fld_client_proc_init(fld);
+ if (rc)
+ GOTO(out, rc);
+out:
+ if (rc)
+ fld_client_fini(fld);
+ else
+ CDEBUG(D_INFO, "%s: Using \"%s\" hash\n",
+ fld->lcf_name, fld->lcf_hash->fh_name);
+ return rc;
+}
+EXPORT_SYMBOL(fld_client_init);
+
+void fld_client_fini(struct lu_client_fld *fld)
+{
+ struct lu_fld_target *target, *tmp;
+
+ spin_lock(&fld->lcf_lock);
+ list_for_each_entry_safe(target, tmp,
+ &fld->lcf_targets, ft_chain) {
+ fld->lcf_count--;
+ list_del(&target->ft_chain);
+ if (target->ft_exp != NULL)
+ class_export_put(target->ft_exp);
+ OBD_FREE_PTR(target);
+ }
+ spin_unlock(&fld->lcf_lock);
+
+ if (fld->lcf_cache != NULL) {
+ if (!IS_ERR(fld->lcf_cache))
+ fld_cache_fini(fld->lcf_cache);
+ fld->lcf_cache = NULL;
+ }
+}
+EXPORT_SYMBOL(fld_client_fini);
+
+int fld_client_rpc(struct obd_export *exp,
+ struct lu_seq_range *range, __u32 fld_op)
+{
+ struct ptlrpc_request *req;
+ struct lu_seq_range *prange;
+ __u32 *op;
+ int rc;
+ struct obd_import *imp;
+
+ LASSERT(exp != NULL);
+
+ imp = class_exp2cliimp(exp);
+ req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION,
+ FLD_QUERY);
+ if (req == NULL)
+ return -ENOMEM;
+
+ op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
+ *op = fld_op;
+
+ prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD);
+ *prange = *range;
+
+ ptlrpc_request_set_replen(req);
+ req->rq_request_portal = FLD_REQUEST_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
+
+ if (fld_op == FLD_LOOKUP &&
+ imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
+ req->rq_allow_replay = 1;
+
+ if (fld_op != FLD_LOOKUP)
+ mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ fld_enter_request(&exp->exp_obd->u.cli);
+ rc = ptlrpc_queue_wait(req);
+ fld_exit_request(&exp->exp_obd->u.cli);
+ if (fld_op != FLD_LOOKUP)
+ mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ if (rc)
+ GOTO(out_req, rc);
+
+ prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
+ if (prange == NULL)
+ GOTO(out_req, rc = -EFAULT);
+ *range = *prange;
+out_req:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
+ __u32 flags, const struct lu_env *env)
+{
+ struct lu_seq_range res = { 0 };
+ struct lu_fld_target *target;
+ int rc;
+
+ fld->lcf_flags |= LUSTRE_FLD_RUN;
+
+ rc = fld_cache_lookup(fld->lcf_cache, seq, &res);
+ if (rc == 0) {
+ *mds = res.lsr_index;
+ return 0;
+ }
+
+ /* Can not find it in the cache */
+ target = fld_client_get_target(fld, seq);
+ LASSERT(target != NULL);
+
+ CDEBUG(D_INFO, "%s: Lookup fld entry (seq: "LPX64") on "
+ "target %s (idx "LPU64")\n", fld->lcf_name, seq,
+ fld_target_name(target), target->ft_idx);
+
+ res.lsr_start = seq;
+ fld_range_set_type(&res, flags);
+ rc = fld_client_rpc(target->ft_exp, &res, FLD_LOOKUP);
+
+ if (rc == 0) {
+ *mds = res.lsr_index;
+
+ fld_cache_insert(fld->lcf_cache, &res);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(fld_client_lookup);
+
+void fld_client_flush(struct lu_client_fld *fld)
+{
+ fld_cache_flush(fld->lcf_cache);
+}
+EXPORT_SYMBOL(fld_client_flush);
+
+static int __init fld_mod_init(void)
+{
+ fld_type_proc_dir = lprocfs_register(LUSTRE_FLD_NAME,
+ proc_lustre_root,
+ NULL, NULL);
+ if (IS_ERR(fld_type_proc_dir))
+ return PTR_ERR(fld_type_proc_dir);
+
+ LU_CONTEXT_KEY_INIT(&fld_thread_key);
+ lu_context_key_register(&fld_thread_key);
+ return 0;
+}
+
+static void __exit fld_mod_exit(void)
+{
+ lu_context_key_degister(&fld_thread_key);
+ if (fld_type_proc_dir != NULL && !IS_ERR(fld_type_proc_dir)) {
+ lprocfs_remove(&fld_type_proc_dir);
+ fld_type_proc_dir = NULL;
+ }
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre FLD");
+MODULE_LICENSE("GPL");
+
+module_init(fld_mod_init)
+module_exit(fld_mod_exit)
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
new file mode 100644
index 0000000..052f7d5
--- /dev/null
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -0,0 +1,166 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2013, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/fld/lproc_fld.c
+ *
+ * FLD (FIDs Location Database)
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
+ * Di Wang <di.wang@whamcloud.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_FLD
+
+# include <linux/libcfs/libcfs.h>
+# include <linux/module.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <dt_object.h>
+#include <md_object.h>
+#include <obd_support.h>
+#include <lustre_req_layout.h>
+#include <lustre_fld.h>
+#include <lustre_fid.h>
+#include "fld_internal.h"
+
+#ifdef LPROCFS
+static int
+fld_proc_targets_seq_show(struct seq_file *m, void *unused)
+{
+ struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
+ struct lu_fld_target *target;
+
+ LASSERT(fld != NULL);
+
+ spin_lock(&fld->lcf_lock);
+ list_for_each_entry(target,
+ &fld->lcf_targets, ft_chain)
+ seq_printf(m, "%s\n", fld_target_name(target));
+ spin_unlock(&fld->lcf_lock);
+
+ return 0;
+}
+
+static int
+fld_proc_hash_seq_show(struct seq_file *m, void *unused)
+{
+ struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
+
+ LASSERT(fld != NULL);
+
+ spin_lock(&fld->lcf_lock);
+ seq_printf(m, "%s\n", fld->lcf_hash->fh_name);
+ spin_unlock(&fld->lcf_lock);
+
+ return 0;
+}
+
+static ssize_t
+fld_proc_hash_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct lu_client_fld *fld = ((struct seq_file *)file->private_data)->private;
+ struct lu_fld_hash *hash = NULL;
+ int i;
+
+ LASSERT(fld != NULL);
+
+ for (i = 0; fld_hash[i].fh_name != NULL; i++) {
+ if (count != strlen(fld_hash[i].fh_name))
+ continue;
+
+ if (!strncmp(fld_hash[i].fh_name, buffer, count)) {
+ hash = &fld_hash[i];
+ break;
+ }
+ }
+
+ if (hash != NULL) {
+ spin_lock(&fld->lcf_lock);
+ fld->lcf_hash = hash;
+ spin_unlock(&fld->lcf_lock);
+
+ CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n",
+ fld->lcf_name, hash->fh_name);
+ }
+
+ return count;
+}
+
+static ssize_t
+fld_proc_cache_flush_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct lu_client_fld *fld = file->private_data;
+
+ LASSERT(fld != NULL);
+
+ fld_cache_flush(fld->lcf_cache);
+
+ CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name);
+
+ return count;
+}
+
+static int fld_proc_cache_flush_open(struct inode *inode, struct file *file)
+{
+ file->private_data = PDE_DATA(inode);
+ return 0;
+}
+
+static int fld_proc_cache_flush_release(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return 0;
+}
+
+struct file_operations fld_proc_cache_flush_fops = {
+ .owner = THIS_MODULE,
+ .open = fld_proc_cache_flush_open,
+ .write = fld_proc_cache_flush_write,
+ .release = fld_proc_cache_flush_release,
+};
+
+LPROC_SEQ_FOPS_RO(fld_proc_targets);
+LPROC_SEQ_FOPS(fld_proc_hash);
+
+struct lprocfs_vars fld_client_proc_list[] = {
+ { "targets", &fld_proc_targets_fops },
+ { "hash", &fld_proc_hash_fops },
+ { "cache_flush", &fld_proc_cache_flush_fops },
+ { NULL }};
+
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
new file mode 100644
index 0000000..edb40af
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -0,0 +1,3279 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#ifndef _LUSTRE_CL_OBJECT_H
+#define _LUSTRE_CL_OBJECT_H
+
+/** \defgroup clio clio
+ *
+ * Client objects implement io operations and cache pages.
+ *
+ * Examples: lov and osc are implementations of cl interface.
+ *
+ * Big Theory Statement.
+ *
+ * Layered objects.
+ *
+ * Client implementation is based on the following data-types:
+ *
+ * - cl_object
+ *
+ * - cl_page
+ *
+ * - cl_lock represents an extent lock on an object.
+ *
+ * - cl_io represents high-level i/o activity such as whole read/write
+ * system call, or write-out of pages from under the lock being
+ * canceled. cl_io has sub-ios that can be stopped and resumed
+ * independently, thus achieving high degree of transfer
+ * parallelism. Single cl_io can be advanced forward by
+ * the multiple threads (although in the most usual case of
+ * read/write system call it is associated with the single user
+ * thread, that issued the system call).
+ *
+ * - cl_req represents a collection of pages for a transfer. cl_req is
+ * constructed by req-forming engine that tries to saturate
+ * transport with large and continuous transfers.
+ *
+ * Terminology
+ *
+ * - to avoid confusion high-level I/O operation like read or write system
+ * call is referred to as "an io", whereas low-level I/O operation, like
+ * RPC, is referred to as "a transfer"
+ *
+ * - "generic code" means generic (not file system specific) code in the
+ * hosting environment. "cl-code" means code (mostly in cl_*.c files) that
+ * is not layer specific.
+ *
+ * Locking.
+ *
+ * - i_mutex
+ * - PG_locked
+ * - cl_object_header::coh_page_guard
+ * - cl_object_header::coh_lock_guard
+ * - lu_site::ls_guard
+ *
+ * See the top comment in cl_object.c for the description of overall locking and
+ * reference-counting design.
+ *
+ * See comments below for the description of i/o, page, and dlm-locking
+ * design.
+ *
+ * @{
+ */
+
+/*
+ * super-class definitions.
+ */
+#include <lu_object.h>
+#include <lvfs.h>
+# include <linux/mutex.h>
+# include <linux/radix-tree.h>
+
+struct inode;
+
+struct cl_device;
+struct cl_device_operations;
+
+struct cl_object;
+struct cl_object_page_operations;
+struct cl_object_lock_operations;
+
+struct cl_page;
+struct cl_page_slice;
+struct cl_lock;
+struct cl_lock_slice;
+
+struct cl_lock_operations;
+struct cl_page_operations;
+
+struct cl_io;
+struct cl_io_slice;
+
+struct cl_req;
+struct cl_req_slice;
+
+/**
+ * Operations for each data device in the client stack.
+ *
+ * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
+ */
+struct cl_device_operations {
+ /**
+ * Initialize cl_req. This method is called top-to-bottom on all
+ * devices in the stack to get them a chance to allocate layer-private
+ * data, and to attach them to the cl_req by calling
+ * cl_req_slice_add().
+ *
+ * \see osc_req_init(), lov_req_init(), lovsub_req_init()
+ * \see ccc_req_init()
+ */
+ int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
+};
+
+/**
+ * Device in the client stack.
+ *
+ * \see ccc_device, lov_device, lovsub_device, osc_device
+ */
+struct cl_device {
+ /** Super-class. */
+ struct lu_device cd_lu_dev;
+ /** Per-layer operation vector. */
+ const struct cl_device_operations *cd_ops;
+};
+
+/** \addtogroup cl_object cl_object
+ * @{ */
+/**
+ * "Data attributes" of cl_object. Data attributes can be updated
+ * independently for a sub-object, and top-object's attributes are calculated
+ * from sub-objects' ones.
+ */
+struct cl_attr {
+ /** Object size, in bytes */
+ loff_t cat_size;
+ /**
+ * Known minimal size, in bytes.
+ *
+ * This is only valid when at least one DLM lock is held.
+ */
+ loff_t cat_kms;
+ /** Modification time. Measured in seconds since epoch. */
+ time_t cat_mtime;
+ /** Access time. Measured in seconds since epoch. */
+ time_t cat_atime;
+ /** Change time. Measured in seconds since epoch. */
+ time_t cat_ctime;
+ /**
+ * Blocks allocated to this cl_object on the server file system.
+ *
+ * \todo XXX An interface for block size is needed.
+ */
+ __u64 cat_blocks;
+ /**
+ * User identifier for quota purposes.
+ */
+ uid_t cat_uid;
+ /**
+ * Group identifier for quota purposes.
+ */
+ gid_t cat_gid;
+};
+
+/**
+ * Fields in cl_attr that are being set.
+ */
+enum cl_attr_valid {
+ CAT_SIZE = 1 << 0,
+ CAT_KMS = 1 << 1,
+ CAT_MTIME = 1 << 3,
+ CAT_ATIME = 1 << 4,
+ CAT_CTIME = 1 << 5,
+ CAT_BLOCKS = 1 << 6,
+ CAT_UID = 1 << 7,
+ CAT_GID = 1 << 8
+};
+
+/**
+ * Sub-class of lu_object with methods common for objects on the client
+ * stacks.
+ *
+ * cl_object: represents a regular file system object, both a file and a
+ * stripe. cl_object is based on lu_object: it is identified by a fid,
+ * layered, cached, hashed, and lrued. Important distinction with the server
+ * side, where md_object and dt_object are used, is that cl_object "fans out"
+ * at the lov/sns level: depending on the file layout, single file is
+ * represented as a set of "sub-objects" (stripes). At the implementation
+ * level, struct lov_object contains an array of cl_objects. Each sub-object
+ * is a full-fledged cl_object, having its fid, living in the lru and hash
+ * table.
+ *
+ * This leads to the next important difference with the server side: on the
+ * client, it's quite usual to have objects with the different sequence of
+ * layers. For example, typical top-object is composed of the following
+ * layers:
+ *
+ * - vvp
+ * - lov
+ *
+ * whereas its sub-objects are composed of
+ *
+ * - lovsub
+ * - osc
+ *
+ * layers. Here "lovsub" is a mostly dummy layer, whose purpose is to keep
+ * track of the object-subobject relationship.
+ *
+ * Sub-objects are not cached independently: when top-object is about to
+ * be discarded from the memory, all its sub-objects are torn-down and
+ * destroyed too.
+ *
+ * \see ccc_object, lov_object, lovsub_object, osc_object
+ */
+struct cl_object {
+ /** super class */
+ struct lu_object co_lu;
+ /** per-object-layer operations */
+ const struct cl_object_operations *co_ops;
+ /** offset of page slice in cl_page buffer */
+ int co_slice_off;
+};
+
+/**
+ * Description of the client object configuration. This is used for the
+ * creation of a new client object that is identified by a more state than
+ * fid.
+ */
+struct cl_object_conf {
+ /** Super-class. */
+ struct lu_object_conf coc_lu;
+ union {
+ /**
+ * Object layout. This is consumed by lov.
+ */
+ struct lustre_md *coc_md;
+ /**
+ * Description of particular stripe location in the
+ * cluster. This is consumed by osc.
+ */
+ struct lov_oinfo *coc_oinfo;
+ } u;
+ /**
+ * VFS inode. This is consumed by vvp.
+ */
+ struct inode *coc_inode;
+ /**
+ * Layout lock handle.
+ */
+ struct ldlm_lock *coc_lock;
+ /**
+ * Operation to handle layout, OBJECT_CONF_XYZ.
+ */
+ int coc_opc;
+};
+
+enum {
+ /** configure layout, set up a new stripe, must be called while
+ * holding layout lock. */
+ OBJECT_CONF_SET = 0,
+ /** invalidate the current stripe configuration due to losing
+ * layout lock. */
+ OBJECT_CONF_INVALIDATE = 1,
+ /** wait for old layout to go away so that new layout can be
+ * set up. */
+ OBJECT_CONF_WAIT = 2
+};
+
+/**
+ * Operations implemented for each cl object layer.
+ *
+ * \see vvp_ops, lov_ops, lovsub_ops, osc_ops
+ */
+struct cl_object_operations {
+ /**
+ * Initialize page slice for this layer. Called top-to-bottom through
+ * every object layer when a new cl_page is instantiated. Layer
+ * keeping private per-page data, or requiring its own page operations
+ * vector should allocate these data here, and attach then to the page
+ * by calling cl_page_slice_add(). \a vmpage is locked (in the VM
+ * sense). Optional.
+ *
+ * \retval NULL success.
+ *
+ * \retval ERR_PTR(errno) failure code.
+ *
+ * \retval valid-pointer pointer to already existing referenced page
+ * to be used instead of newly created.
+ */
+ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
+ /**
+ * Initialize lock slice for this layer. Called top-to-bottom through
+ * every object layer when a new cl_lock is instantiated. Layer
+ * keeping private per-lock data, or requiring its own lock operations
+ * vector should allocate these data here, and attach then to the lock
+ * by calling cl_lock_slice_add(). Mandatory.
+ */
+ int (*coo_lock_init)(const struct lu_env *env,
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *io);
+ /**
+ * Initialize io state for a given layer.
+ *
+ * called top-to-bottom once per io existence to initialize io
+ * state. If layer wants to keep some state for this type of io, it
+ * has to embed struct cl_io_slice in lu_env::le_ses, and register
+ * slice with cl_io_slice_add(). It is guaranteed that all threads
+ * participating in this io share the same session.
+ */
+ int (*coo_io_init)(const struct lu_env *env,
+ struct cl_object *obj, struct cl_io *io);
+ /**
+ * Fill portion of \a attr that this layer controls. This method is
+ * called top-to-bottom through all object layers.
+ *
+ * \pre cl_object_header::coh_attr_guard of the top-object is locked.
+ *
+ * \return 0: to continue
+ * \return +ve: to stop iterating through layers (but 0 is returned
+ * from enclosing cl_object_attr_get())
+ * \return -ve: to signal error
+ */
+ int (*coo_attr_get)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr);
+ /**
+ * Update attributes.
+ *
+ * \a valid is a bitmask composed from enum #cl_attr_valid, and
+ * indicating what attributes are to be set.
+ *
+ * \pre cl_object_header::coh_attr_guard of the top-object is locked.
+ *
+ * \return the same convention as for
+ * cl_object_operations::coo_attr_get() is used.
+ */
+ int (*coo_attr_set)(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid);
+ /**
+ * Update object configuration. Called top-to-bottom to modify object
+ * configuration.
+ *
+ * XXX error conditions and handling.
+ */
+ int (*coo_conf_set)(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf);
+ /**
+ * Glimpse ast. Executed when glimpse ast arrives for a lock on this
+ * object. Layers are supposed to fill parts of \a lvb that will be
+ * shipped to the glimpse originator as a glimpse result.
+ *
+ * \see ccc_object_glimpse(), lovsub_object_glimpse(),
+ * \see osc_object_glimpse()
+ */
+ int (*coo_glimpse)(const struct lu_env *env,
+ const struct cl_object *obj, struct ost_lvb *lvb);
+};
+
+/**
+ * Extended header for client object.
+ */
+struct cl_object_header {
+ /** Standard lu_object_header. cl_object::co_lu::lo_header points
+ * here. */
+ struct lu_object_header coh_lu;
+ /** \name locks
+ * \todo XXX move locks below to the separate cache-lines, they are
+ * mostly useless otherwise.
+ */
+ /** @{ */
+ /** Lock protecting page tree. */
+ spinlock_t coh_page_guard;
+ /** Lock protecting lock list. */
+ spinlock_t coh_lock_guard;
+ /** @} locks */
+ /** Radix tree of cl_page's, cached for this object. */
+ struct radix_tree_root coh_tree;
+ /** # of pages in radix tree. */
+ unsigned long coh_pages;
+ /** List of cl_lock's granted for this object. */
+ struct list_head coh_locks;
+
+ /**
+ * Parent object. It is assumed that an object has a well-defined
+ * parent, but not a well-defined child (there may be multiple
+ * sub-objects, for the same top-object). cl_object_header::coh_parent
+ * field allows certain code to be written generically, without
+ * limiting possible cl_object layouts unduly.
+ */
+ struct cl_object_header *coh_parent;
+ /**
+ * Protects consistency between cl_attr of parent object and
+ * attributes of sub-objects, that the former is calculated ("merged")
+ * from.
+ *
+ * \todo XXX this can be read/write lock if needed.
+ */
+ spinlock_t coh_attr_guard;
+ /**
+ * Size of cl_page + page slices
+ */
+ unsigned short coh_page_bufsize;
+ /**
+ * Number of objects above this one: 0 for a top-object, 1 for its
+ * sub-object, etc.
+ */
+ unsigned char coh_nesting;
+};
+
+/**
+ * Helper macro: iterate over all layers of the object \a obj, assigning every
+ * layer top-to-bottom to \a slice.
+ */
+#define cl_object_for_each(slice, obj) \
+ list_for_each_entry((slice), \
+ &(obj)->co_lu.lo_header->loh_layers, \
+ co_lu.lo_linkage)
+/**
+ * Helper macro: iterate over all layers of the object \a obj, assigning every
+ * layer bottom-to-top to \a slice.
+ */
+#define cl_object_for_each_reverse(slice, obj) \
+ list_for_each_entry_reverse((slice), \
+ &(obj)->co_lu.lo_header->loh_layers, \
+ co_lu.lo_linkage)
+/** @} cl_object */
+
+#ifndef pgoff_t
+#define pgoff_t unsigned long
+#endif
+
+#define CL_PAGE_EOF ((pgoff_t)~0ull)
+
+/** \addtogroup cl_page cl_page
+ * @{ */
+
+/** \struct cl_page
+ * Layered client page.
+ *
+ * cl_page: represents a portion of a file, cached in the memory. All pages
+ * of the given file are of the same size, and are kept in the radix tree
+ * hanging off the cl_object. cl_page doesn't fan out, but as sub-objects
+ * of the top-level file object are first class cl_objects, they have their
+ * own radix trees of pages and hence page is implemented as a sequence of
+ * struct cl_pages's, linked into double-linked list through
+ * cl_page::cp_parent and cl_page::cp_child pointers, each residing in the
+ * corresponding radix tree at the corresponding logical offset.
+ *
+ * cl_page is associated with VM page of the hosting environment (struct
+ * page in Linux kernel, for example), struct page. It is assumed, that this
+ * association is implemented by one of cl_page layers (top layer in the
+ * current design) that
+ *
+ * - intercepts per-VM-page call-backs made by the environment (e.g.,
+ * memory pressure),
+ *
+ * - translates state (page flag bits) and locking between lustre and
+ * environment.
+ *
+ * The association between cl_page and struct page is immutable and
+ * established when cl_page is created.
+ *
+ * cl_page can be "owned" by a particular cl_io (see below), guaranteeing
+ * this io an exclusive access to this page w.r.t. other io attempts and
+ * various events changing page state (such as transfer completion, or
+ * eviction of the page from the memory). Note, that in general cl_io
+ * cannot be identified with a particular thread, and page ownership is not
+ * exactly equal to the current thread holding a lock on the page. Layer
+ * implementing association between cl_page and struct page has to implement
+ * ownership on top of available synchronization mechanisms.
+ *
+ * While lustre client maintains the notion of an page ownership by io,
+ * hosting MM/VM usually has its own page concurrency control
+ * mechanisms. For example, in Linux, page access is synchronized by the
+ * per-page PG_locked bit-lock, and generic kernel code (generic_file_*())
+ * takes care to acquire and release such locks as necessary around the
+ * calls to the file system methods (->readpage(), ->prepare_write(),
+ * ->commit_write(), etc.). This leads to the situation when there are two
+ * different ways to own a page in the client:
+ *
+ * - client code explicitly and voluntary owns the page (cl_page_own());
+ *
+ * - VM locks a page and then calls the client, that has "to assume"
+ * the ownership from the VM (cl_page_assume()).
+ *
+ * Dual methods to release ownership are cl_page_disown() and
+ * cl_page_unassume().
+ *
+ * cl_page is reference counted (cl_page::cp_ref). When reference counter
+ * drops to 0, the page is returned to the cache, unless it is in
+ * cl_page_state::CPS_FREEING state, in which case it is immediately
+ * destroyed.
+ *
+ * The general logic guaranteeing the absence of "existential races" for
+ * pages is the following:
+ *
+ * - there are fixed known ways for a thread to obtain a new reference
+ * to a page:
+ *
+ * - by doing a lookup in the cl_object radix tree, protected by the
+ * spin-lock;
+ *
+ * - by starting from VM-locked struct page and following some
+ * hosting environment method (e.g., following ->private pointer in
+ * the case of Linux kernel), see cl_vmpage_page();
+ *
+ * - when the page enters cl_page_state::CPS_FREEING state, all these
+ * ways are severed with the proper synchronization
+ * (cl_page_delete());
+ *
+ * - entry into cl_page_state::CPS_FREEING is serialized by the VM page
+ * lock;
+ *
+ * - no new references to the page in cl_page_state::CPS_FREEING state
+ * are allowed (checked in cl_page_get()).
+ *
+ * Together this guarantees that when last reference to a
+ * cl_page_state::CPS_FREEING page is released, it is safe to destroy the
+ * page, as neither references to it can be acquired at that point, nor
+ * ones exist.
+ *
+ * cl_page is a state machine. States are enumerated in enum
+ * cl_page_state. Possible state transitions are enumerated in
+ * cl_page_state_set(). State transition process (i.e., actual changing of
+ * cl_page::cp_state field) is protected by the lock on the underlying VM
+ * page.
+ *
+ * Linux Kernel implementation.
+ *
+ * Binding between cl_page and struct page (which is a typedef for
+ * struct page) is implemented in the vvp layer. cl_page is attached to the
+ * ->private pointer of the struct page, together with the setting of
+ * PG_private bit in page->flags, and acquiring additional reference on the
+ * struct page (much like struct buffer_head, or any similar file system
+ * private data structures).
+ *
+ * PG_locked lock is used to implement both ownership and transfer
+ * synchronization, that is, page is VM-locked in CPS_{OWNED,PAGE{IN,OUT}}
+ * states. No additional references are acquired for the duration of the
+ * transfer.
+ *
+ * \warning *THIS IS NOT* the behavior expected by the Linux kernel, where
+ * write-out is "protected" by the special PG_writeback bit.
+ */
+
+/**
+ * States of cl_page. cl_page.c assumes particular order here.
+ *
+ * The page state machine is rather crude, as it doesn't recognize finer page
+ * states like "dirty" or "up to date". This is because such states are not
+ * always well defined for the whole stack (see, for example, the
+ * implementation of the read-ahead, that hides page up-to-dateness to track
+ * cache hits accurately). Such sub-states are maintained by the layers that
+ * are interested in them.
+ */
+enum cl_page_state {
+ /**
+ * Page is in the cache, un-owned. Page leaves cached state in the
+ * following cases:
+ *
+ * - [cl_page_state::CPS_OWNED] io comes across the page and
+ * owns it;
+ *
+ * - [cl_page_state::CPS_PAGEOUT] page is dirty, the
+ * req-formation engine decides that it wants to include this page
+ * into an cl_req being constructed, and yanks it from the cache;
+ *
+ * - [cl_page_state::CPS_FREEING] VM callback is executed to
+ * evict the page form the memory;
+ *
+ * \invariant cl_page::cp_owner == NULL && cl_page::cp_req == NULL
+ */
+ CPS_CACHED,
+ /**
+ * Page is exclusively owned by some cl_io. Page may end up in this
+ * state as a result of
+ *
+ * - io creating new page and immediately owning it;
+ *
+ * - [cl_page_state::CPS_CACHED] io finding existing cached page
+ * and owning it;
+ *
+ * - [cl_page_state::CPS_OWNED] io finding existing owned page
+ * and waiting for owner to release the page;
+ *
+ * Page leaves owned state in the following cases:
+ *
+ * - [cl_page_state::CPS_CACHED] io decides to leave the page in
+ * the cache, doing nothing;
+ *
+ * - [cl_page_state::CPS_PAGEIN] io starts read transfer for
+ * this page;
+ *
+ * - [cl_page_state::CPS_PAGEOUT] io starts immediate write
+ * transfer for this page;
+ *
+ * - [cl_page_state::CPS_FREEING] io decides to destroy this
+ * page (e.g., as part of truncate or extent lock cancellation).
+ *
+ * \invariant cl_page::cp_owner != NULL && cl_page::cp_req == NULL
+ */
+ CPS_OWNED,
+ /**
+ * Page is being written out, as a part of a transfer. This state is
+ * entered when req-formation logic decided that it wants this page to
+ * be sent through the wire _now_. Specifically, it means that once
+ * this state is achieved, transfer completion handler (with either
+ * success or failure indication) is guaranteed to be executed against
+ * this page independently of any locks and any scheduling decisions
+ * made by the hosting environment (that effectively means that the
+ * page is never put into cl_page_state::CPS_PAGEOUT state "in
+ * advance". This property is mentioned, because it is important when
+ * reasoning about possible dead-locks in the system). The page can
+ * enter this state as a result of
+ *
+ * - [cl_page_state::CPS_OWNED] an io requesting an immediate
+ * write-out of this page, or
+ *
+ * - [cl_page_state::CPS_CACHED] req-forming engine deciding
+ * that it has enough dirty pages cached to issue a "good"
+ * transfer.
+ *
+ * The page leaves cl_page_state::CPS_PAGEOUT state when the transfer
+ * is completed---it is moved into cl_page_state::CPS_CACHED state.
+ *
+ * Underlying VM page is locked for the duration of transfer.
+ *
+ * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
+ */
+ CPS_PAGEOUT,
+ /**
+ * Page is being read in, as a part of a transfer. This is quite
+ * similar to the cl_page_state::CPS_PAGEOUT state, except that
+ * read-in is always "immediate"---there is no such thing a sudden
+ * construction of read cl_req from cached, presumably not up to date,
+ * pages.
+ *
+ * Underlying VM page is locked for the duration of transfer.
+ *
+ * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req != NULL
+ */
+ CPS_PAGEIN,
+ /**
+ * Page is being destroyed. This state is entered when client decides
+ * that page has to be deleted from its host object, as, e.g., a part
+ * of truncate.
+ *
+ * Once this state is reached, there is no way to escape it.
+ *
+ * \invariant: cl_page::cp_owner == NULL && cl_page::cp_req == NULL
+ */
+ CPS_FREEING,
+ CPS_NR
+};
+
+enum cl_page_type {
+ /** Host page, the page is from the host inode which the cl_page
+ * belongs to. */
+ CPT_CACHEABLE = 1,
+
+ /** Transient page, the transient cl_page is used to bind a cl_page
+ * to vmpage which is not belonging to the same object of cl_page.
+ * it is used in DirectIO, lockless IO and liblustre. */
+ CPT_TRANSIENT,
+};
+
+/**
+ * Flags maintained for every cl_page.
+ */
+enum cl_page_flags {
+ /**
+ * Set when pagein completes. Used for debugging (read completes at
+ * most once for a page).
+ */
+ CPF_READ_COMPLETED = 1 << 0
+};
+
+/**
+ * Fields are protected by the lock on struct page, except for atomics and
+ * immutables.
+ *
+ * \invariant Data type invariants are in cl_page_invariant(). Basically:
+ * cl_page::cp_parent and cl_page::cp_child are a well-formed double-linked
+ * list, consistent with the parent/child pointers in the cl_page::cp_obj and
+ * cl_page::cp_owner (when set).
+ */
+struct cl_page {
+ /** Reference counter. */
+ atomic_t cp_ref;
+ /** An object this page is a part of. Immutable after creation. */
+ struct cl_object *cp_obj;
+ /** Logical page index within the object. Immutable after creation. */
+ pgoff_t cp_index;
+ /** List of slices. Immutable after creation. */
+ struct list_head cp_layers;
+ /** Parent page, NULL for top-level page. Immutable after creation. */
+ struct cl_page *cp_parent;
+ /** Lower-layer page. NULL for bottommost page. Immutable after
+ * creation. */
+ struct cl_page *cp_child;
+ /**
+ * Page state. This field is const to avoid accidental update, it is
+ * modified only internally within cl_page.c. Protected by a VM lock.
+ */
+ const enum cl_page_state cp_state;
+ /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
+ struct list_head cp_batch;
+ /** Mutex serializing membership of a page in a batch. */
+ struct mutex cp_mutex;
+ /** Linkage of pages within cl_req. */
+ struct list_head cp_flight;
+ /** Transfer error. */
+ int cp_error;
+
+ /**
+ * Page type. Only CPT_TRANSIENT is used so far. Immutable after
+ * creation.
+ */
+ enum cl_page_type cp_type;
+
+ /**
+ * Owning IO in cl_page_state::CPS_OWNED state. Sub-page can be owned
+ * by sub-io. Protected by a VM lock.
+ */
+ struct cl_io *cp_owner;
+ /**
+ * Debug information, the task is owning the page.
+ */
+ struct task_struct *cp_task;
+ /**
+ * Owning IO request in cl_page_state::CPS_PAGEOUT and
+ * cl_page_state::CPS_PAGEIN states. This field is maintained only in
+ * the top-level pages. Protected by a VM lock.
+ */
+ struct cl_req *cp_req;
+ /** List of references to this page, for debugging. */
+ struct lu_ref cp_reference;
+ /** Link to an object, for debugging. */
+ struct lu_ref_link cp_obj_ref;
+ /** Link to a queue, for debugging. */
+ struct lu_ref_link cp_queue_ref;
+ /** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
+ unsigned cp_flags;
+ /** Assigned if doing a sync_io */
+ struct cl_sync_io *cp_sync_io;
+};
+
+/**
+ * Per-layer part of cl_page.
+ *
+ * \see ccc_page, lov_page, osc_page
+ */
+struct cl_page_slice {
+ struct cl_page *cpl_page;
+ /**
+ * Object slice corresponding to this page slice. Immutable after
+ * creation.
+ */
+ struct cl_object *cpl_obj;
+ const struct cl_page_operations *cpl_ops;
+ /** Linkage into cl_page::cp_layers. Immutable after creation. */
+ struct list_head cpl_linkage;
+};
+
+/**
+ * Lock mode. For the client extent locks.
+ *
+ * \warning: cl_lock_mode_match() assumes particular ordering here.
+ * \ingroup cl_lock
+ */
+enum cl_lock_mode {
+ /**
+ * Mode of a lock that protects no data, and exists only as a
+ * placeholder. This is used for `glimpse' requests. A phantom lock
+ * might get promoted to real lock at some point.
+ */
+ CLM_PHANTOM,
+ CLM_READ,
+ CLM_WRITE,
+ CLM_GROUP
+};
+
+/**
+ * Requested transfer type.
+ * \ingroup cl_req
+ */
+enum cl_req_type {
+ CRT_READ,
+ CRT_WRITE,
+ CRT_NR
+};
+
+/**
+ * Per-layer page operations.
+ *
+ * Methods taking an \a io argument are for the activity happening in the
+ * context of given \a io. Page is assumed to be owned by that io, except for
+ * the obvious cases (like cl_page_operations::cpo_own()).
+ *
+ * \see vvp_page_ops, lov_page_ops, osc_page_ops
+ */
+struct cl_page_operations {
+ /**
+ * cl_page<->struct page methods. Only one layer in the stack has to
+ * implement these. Current code assumes that this functionality is
+ * provided by the topmost layer, see cl_page_disown0() as an example.
+ */
+
+ /**
+ * \return the underlying VM page. Optional.
+ */
+ struct page *(*cpo_vmpage)(const struct lu_env *env,
+ const struct cl_page_slice *slice);
+ /**
+ * Called when \a io acquires this page into the exclusive
+ * ownership. When this method returns, it is guaranteed that the is
+ * not owned by other io, and no transfer is going on against
+ * it. Optional.
+ *
+ * \see cl_page_own()
+ * \see vvp_page_own(), lov_page_own()
+ */
+ int (*cpo_own)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, int nonblock);
+ /** Called when ownership it yielded. Optional.
+ *
+ * \see cl_page_disown()
+ * \see vvp_page_disown()
+ */
+ void (*cpo_disown)(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io);
+ /**
+ * Called for a page that is already "owned" by \a io from VM point of
+ * view. Optional.
+ *
+ * \see cl_page_assume()
+ * \see vvp_page_assume(), lov_page_assume()
+ */
+ void (*cpo_assume)(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io);
+ /** Dual to cl_page_operations::cpo_assume(). Optional. Called
+ * bottom-to-top when IO releases a page without actually unlocking
+ * it.
+ *
+ * \see cl_page_unassume()
+ * \see vvp_page_unassume()
+ */
+ void (*cpo_unassume)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+ /**
+ * Announces whether the page contains valid data or not by \a uptodate.
+ *
+ * \see cl_page_export()
+ * \see vvp_page_export()
+ */
+ void (*cpo_export)(const struct lu_env *env,
+ const struct cl_page_slice *slice, int uptodate);
+ /**
+ * Unmaps page from the user space (if it is mapped).
+ *
+ * \see cl_page_unmap()
+ * \see vvp_page_unmap()
+ */
+ int (*cpo_unmap)(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io);
+ /**
+ * Checks whether underlying VM page is locked (in the suitable
+ * sense). Used for assertions.
+ *
+ * \retval -EBUSY: page is protected by a lock of a given mode;
+ * \retval -ENODATA: page is not protected by a lock;
+ * \retval 0: this layer cannot decide. (Should never happen.)
+ */
+ int (*cpo_is_vmlocked)(const struct lu_env *env,
+ const struct cl_page_slice *slice);
+ /**
+ * Page destruction.
+ */
+
+ /**
+ * Called when page is truncated from the object. Optional.
+ *
+ * \see cl_page_discard()
+ * \see vvp_page_discard(), osc_page_discard()
+ */
+ void (*cpo_discard)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+ /**
+ * Called when page is removed from the cache, and is about to being
+ * destroyed. Optional.
+ *
+ * \see cl_page_delete()
+ * \see vvp_page_delete(), osc_page_delete()
+ */
+ void (*cpo_delete)(const struct lu_env *env,
+ const struct cl_page_slice *slice);
+ /** Destructor. Frees resources and slice itself. */
+ void (*cpo_fini)(const struct lu_env *env,
+ struct cl_page_slice *slice);
+
+ /**
+ * Checks whether the page is protected by a cl_lock. This is a
+ * per-layer method, because certain layers have ways to check for the
+ * lock much more efficiently than through the generic locks scan, or
+ * implement locking mechanisms separate from cl_lock, e.g.,
+ * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks
+ * being canceled, or scheduled for cancellation as soon as the last
+ * user goes away, too.
+ *
+ * \retval -EBUSY: page is protected by a lock of a given mode;
+ * \retval -ENODATA: page is not protected by a lock;
+ * \retval 0: this layer cannot decide.
+ *
+ * \see cl_page_is_under_lock()
+ */
+ int (*cpo_is_under_lock)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+
+ /**
+ * Optional debugging helper. Prints given page slice.
+ *
+ * \see cl_page_print()
+ */
+ int (*cpo_print)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t p);
+ /**
+ * \name transfer
+ *
+ * Transfer methods. See comment on cl_req for a description of
+ * transfer formation and life-cycle.
+ *
+ * @{
+ */
+ /**
+ * Request type dependent vector of operations.
+ *
+ * Transfer operations depend on transfer mode (cl_req_type). To avoid
+ * passing transfer mode to each and every of these methods, and to
+ * avoid branching on request type inside of the methods, separate
+ * methods for cl_req_type:CRT_READ and cl_req_type:CRT_WRITE are
+ * provided. That is, method invocation usually looks like
+ *
+ * slice->cp_ops.io[req->crq_type].cpo_method(env, slice, ...);
+ */
+ struct {
+ /**
+ * Called when a page is submitted for a transfer as a part of
+ * cl_page_list.
+ *
+ * \return 0 : page is eligible for submission;
+ * \return -EALREADY : skip this page;
+ * \return -ve : error.
+ *
+ * \see cl_page_prep()
+ */
+ int (*cpo_prep)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+ /**
+ * Completion handler. This is guaranteed to be eventually
+ * fired after cl_page_operations::cpo_prep() or
+ * cl_page_operations::cpo_make_ready() call.
+ *
+ * This method can be called in a non-blocking context. It is
+ * guaranteed however, that the page involved and its object
+ * are pinned in memory (and, hence, calling cl_page_put() is
+ * safe).
+ *
+ * \see cl_page_completion()
+ */
+ void (*cpo_completion)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret);
+ /**
+ * Called when cached page is about to be added to the
+ * cl_req as a part of req formation.
+ *
+ * \return 0 : proceed with this page;
+ * \return -EAGAIN : skip this page;
+ * \return -ve : error.
+ *
+ * \see cl_page_make_ready()
+ */
+ int (*cpo_make_ready)(const struct lu_env *env,
+ const struct cl_page_slice *slice);
+ /**
+ * Announce that this page is to be written out
+ * opportunistically, that is, page is dirty, it is not
+ * necessary to start write-out transfer right now, but
+ * eventually page has to be written out.
+ *
+ * Main caller of this is the write path (see
+ * vvp_io_commit_write()), using this method to build a
+ * "transfer cache" from which large transfers are then
+ * constructed by the req-formation engine.
+ *
+ * \todo XXX it would make sense to add page-age tracking
+ * semantics here, and to oblige the req-formation engine to
+ * send the page out not later than it is too old.
+ *
+ * \see cl_page_cache_add()
+ */
+ int (*cpo_cache_add)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+ } io[CRT_NR];
+ /**
+ * Tell transfer engine that only [to, from] part of a page should be
+ * transmitted.
+ *
+ * This is used for immediate transfers.
+ *
+ * \todo XXX this is not very good interface. It would be much better
+ * if all transfer parameters were supplied as arguments to
+ * cl_io_operations::cio_submit() call, but it is not clear how to do
+ * this for page queues.
+ *
+ * \see cl_page_clip()
+ */
+ void (*cpo_clip)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int from, int to);
+ /**
+ * \pre the page was queued for transferring.
+ * \post page is removed from client's pending list, or -EBUSY
+ * is returned if it has already been in transferring.
+ *
+ * This is one of seldom page operation which is:
+ * 0. called from top level;
+ * 1. don't have vmpage locked;
+ * 2. every layer should synchronize execution of its ->cpo_cancel()
+ * with completion handlers. Osc uses client obd lock for this
+ * purpose. Based on there is no vvp_page_cancel and
+ * lov_page_cancel(), cpo_cancel is defacto protected by client lock.
+ *
+ * \see osc_page_cancel().
+ */
+ int (*cpo_cancel)(const struct lu_env *env,
+ const struct cl_page_slice *slice);
+ /**
+ * Write out a page by kernel. This is only called by ll_writepage
+ * right now.
+ *
+ * \see cl_page_flush()
+ */
+ int (*cpo_flush)(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+ /** @} transfer */
+};
+
+/**
+ * Helper macro, dumping detailed information about \a page into a log.
+ */
+#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
+do { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
+ \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
+ CDEBUG(mask, format , ## __VA_ARGS__); \
+ } \
+} while (0)
+
+/**
+ * Helper macro, dumping shorter information about \a page into a log.
+ */
+#define CL_PAGE_HEADER(mask, env, page, format, ...) \
+do { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
+ \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
+ CDEBUG(mask, format , ## __VA_ARGS__); \
+ } \
+} while (0)
+
+static inline int __page_in_use(const struct cl_page *page, int refc)
+{
+ if (page->cp_type == CPT_CACHEABLE)
+ ++refc;
+ LASSERT(atomic_read(&page->cp_ref) > 0);
+ return (atomic_read(&page->cp_ref) > refc);
+}
+#define cl_page_in_use(pg) __page_in_use(pg, 1)
+#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+
+/** @} cl_page */
+
+/** \addtogroup cl_lock cl_lock
+ * @{ */
+/** \struct cl_lock
+ *
+ * Extent locking on the client.
+ *
+ * LAYERING
+ *
+ * The locking model of the new client code is built around
+ *
+ * struct cl_lock
+ *
+ * data-type representing an extent lock on a regular file. cl_lock is a
+ * layered object (much like cl_object and cl_page), it consists of a header
+ * (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
+ * cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
+ *
+ * All locks for a given object are linked into cl_object_header::coh_locks
+ * list (protected by cl_object_header::coh_lock_guard spin-lock) through
+ * cl_lock::cll_linkage. Currently this list is not sorted in any way. We can
+ * sort it in starting lock offset, or use altogether different data structure
+ * like a tree.
+ *
+ * Typical cl_lock consists of the two layers:
+ *
+ * - vvp_lock (vvp specific data), and
+ * - lov_lock (lov specific data).
+ *
+ * lov_lock contains an array of sub-locks. Each of these sub-locks is a
+ * normal cl_lock: it has a header (struct cl_lock) and a list of layers:
+ *
+ * - lovsub_lock, and
+ * - osc_lock
+ *
+ * Each sub-lock is associated with a cl_object (representing stripe
+ * sub-object or the file to which top-level cl_lock is associated to), and is
+ * linked into that cl_object::coh_locks. In this respect cl_lock is similar to
+ * cl_object (that at lov layer also fans out into multiple sub-objects), and
+ * is different from cl_page, that doesn't fan out (there is usually exactly
+ * one osc_page for every vvp_page). We shall call vvp-lov portion of the lock
+ * a "top-lock" and its lovsub-osc portion a "sub-lock".
+ *
+ * LIFE CYCLE
+ *
+ * cl_lock is reference counted. When reference counter drops to 0, lock is
+ * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING
+ * lock is destroyed when last reference is released. Referencing between
+ * top-lock and its sub-locks is described in the lov documentation module.
+ *
+ * STATE MACHINE
+ *
+ * Also, cl_lock is a state machine. This requires some clarification. One of
+ * the goals of client IO re-write was to make IO path non-blocking, or at
+ * least to make it easier to make it non-blocking in the future. Here
+ * `non-blocking' means that when a system call (read, write, truncate)
+ * reaches a situation where it has to wait for a communication with the
+ * server, it should --instead of waiting-- remember its current state and
+ * switch to some other work. E.g,. instead of waiting for a lock enqueue,
+ * client should proceed doing IO on the next stripe, etc. Obviously this is
+ * rather radical redesign, and it is not planned to be fully implemented at
+ * this time, instead we are putting some infrastructure in place, that would
+ * make it easier to do asynchronous non-blocking IO easier in the
+ * future. Specifically, where old locking code goes to sleep (waiting for
+ * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When
+ * enqueue reply comes, its completion handler signals that lock state-machine
+ * is ready to transit to the next state. There is some generic code in
+ * cl_lock.c that sleeps, waiting for these signals. As a result, for users of
+ * this cl_lock.c code, it looks like locking is done in normal blocking
+ * fashion, and it the same time it is possible to switch to the non-blocking
+ * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c
+ * functions).
+ *
+ * For a description of state machine states and transitions see enum
+ * cl_lock_state.
+ *
+ * There are two ways to restrict a set of states which lock might move to:
+ *
+ * - placing a "hold" on a lock guarantees that lock will not be moved
+ * into cl_lock_state::CLS_FREEING state until hold is released. Hold
+ * can be only acquired on a lock that is not in
+ * cl_lock_state::CLS_FREEING. All holds on a lock are counted in
+ * cl_lock::cll_holds. Hold protects lock from cancellation and
+ * destruction. Requests to cancel and destroy a lock on hold will be
+ * recorded, but only honored when last hold on a lock is released;
+ *
+ * - placing a "user" on a lock guarantees that lock will not leave
+ * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING,
+ * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of
+ * states, once it enters this set. That is, if a user is added onto a
+ * lock in a state not from this set, it doesn't immediately enforce
+ * lock to move to this set, but once lock enters this set it will
+ * remain there until all users are removed. Lock users are counted in
+ * cl_lock::cll_users.
+ *
+ * User is used to assure that lock is not canceled or destroyed while
+ * it is being enqueued, or actively used by some IO.
+ *
+ * Currently, a user always comes with a hold (cl_lock_invariant()
+ * checks that a number of holds is not less than a number of users).
+ *
+ * CONCURRENCY
+ *
+ * This is how lock state-machine operates. struct cl_lock contains a mutex
+ * cl_lock::cll_guard that protects struct fields.
+ *
+ * - mutex is taken, and cl_lock::cll_state is examined.
+ *
+ * - for every state there are possible target states where lock can move
+ * into. They are tried in order. Attempts to move into next state are
+ * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try().
+ *
+ * - if the transition can be performed immediately, state is changed,
+ * and mutex is released.
+ *
+ * - if the transition requires blocking, _try() function returns
+ * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to
+ * sleep, waiting for possibility of lock state change. It is woken
+ * up when some event occurs, that makes lock state change possible
+ * (e.g., the reception of the reply from the server), and repeats
+ * the loop.
+ *
+ * Top-lock and sub-lock has separate mutexes and the latter has to be taken
+ * first to avoid dead-lock.
+ *
+ * To see an example of interaction of all these issues, take a look at the
+ * lov_cl.c:lov_lock_enqueue() function. It is called as a part of
+ * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by
+ * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note
+ * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It
+ * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be
+ * done in parallel, rather than one after another (this is used for glimpse
+ * locks, that cannot dead-lock).
+ *
+ * INTERFACE AND USAGE
+ *
+ * struct cl_lock_operations provide a number of call-backs that are invoked
+ * when events of interest occurs. Layers can intercept and handle glimpse,
+ * blocking, cancel ASTs and a reception of the reply from the server.
+ *
+ * One important difference with the old client locking model is that new
+ * client has a representation for the top-lock, whereas in the old code only
+ * sub-locks existed as real data structures and file-level locks are
+ * represented by "request sets" that are created and destroyed on each and
+ * every lock creation.
+ *
+ * Top-locks are cached, and can be found in the cache by the system calls. It
+ * is possible that top-lock is in cache, but some of its sub-locks were
+ * canceled and destroyed. In that case top-lock has to be enqueued again
+ * before it can be used.
+ *
+ * Overall process of the locking during IO operation is as following:
+ *
+ * - once parameters for IO are setup in cl_io, cl_io_operations::cio_lock()
+ * is called on each layer. Responsibility of this method is to add locks,
+ * needed by a given layer into cl_io.ci_lockset.
+ *
+ * - once locks for all layers were collected, they are sorted to avoid
+ * dead-locks (cl_io_locks_sort()), and enqueued.
+ *
+ * - when all locks are acquired, IO is performed;
+ *
+ * - locks are released into cache.
+ *
+ * Striping introduces major additional complexity into locking. The
+ * fundamental problem is that it is generally unsafe to actively use (hold)
+ * two locks on the different OST servers at the same time, as this introduces
+ * inter-server dependency and can lead to cascading evictions.
+ *
+ * Basic solution is to sub-divide large read/write IOs into smaller pieces so
+ * that no multi-stripe locks are taken (note that this design abandons POSIX
+ * read/write semantics). Such pieces ideally can be executed concurrently. At
+ * the same time, certain types of IO cannot be sub-divived, without
+ * sacrificing correctness. This includes:
+ *
+ * - O_APPEND write, where [0, EOF] lock has to be taken, to guarantee
+ * atomicity;
+ *
+ * - ftruncate(fd, offset), where [offset, EOF] lock has to be taken.
+ *
+ * Also, in the case of read(fd, buf, count) or write(fd, buf, count), where
+ * buf is a part of memory mapped Lustre file, a lock or locks protecting buf
+ * has to be held together with the usual lock on [offset, offset + count].
+ *
+ * As multi-stripe locks have to be allowed, it makes sense to cache them, so
+ * that, for example, a sequence of O_APPEND writes can proceed quickly
+ * without going down to the individual stripes to do lock matching. On the
+ * other hand, multi-stripe locks shouldn't be used by normal read/write
+ * calls. To achieve this, every layer can implement ->clo_fits_into() method,
+ * that is called by lock matching code (cl_lock_lookup()), and that can be
+ * used to selectively disable matching of certain locks for certain IOs. For
+ * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe
+ * locks to be matched only for truncates and O_APPEND writes.
+ *
+ * Interaction with DLM
+ *
+ * In the expected setup, cl_lock is ultimately backed up by a collection of
+ * DLM locks (struct ldlm_lock). Association between cl_lock and DLM lock is
+ * implemented in osc layer, that also matches DLM events (ASTs, cancellation,
+ * etc.) into cl_lock_operation calls. See struct osc_lock for a more detailed
+ * description of interaction with DLM.
+ */
+
+/**
+ * Lock description.
+ */
+struct cl_lock_descr {
+ /** Object this lock is granted for. */
+ struct cl_object *cld_obj;
+ /** Index of the first page protected by this lock. */
+ pgoff_t cld_start;
+ /** Index of the last page (inclusive) protected by this lock. */
+ pgoff_t cld_end;
+ /** Group ID, for group lock */
+ __u64 cld_gid;
+ /** Lock mode. */
+ enum cl_lock_mode cld_mode;
+ /**
+ * flags to enqueue lock. A combination of bit-flags from
+ * enum cl_enq_flags.
+ */
+ __u32 cld_enq_flags;
+};
+
+#define DDESCR "%s(%d):[%lu, %lu]"
+#define PDESCR(descr) \
+ cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
+ (descr)->cld_start, (descr)->cld_end
+
+const char *cl_lock_mode_name(const enum cl_lock_mode mode);
+
+/**
+ * Lock state-machine states.
+ *
+ * \htmlonly
+ * <pre>
+ *
+ * Possible state transitions:
+ *
+ * +------------------>NEW
+ * | |
+ * | | cl_enqueue_try()
+ * | |
+ * | cl_unuse_try() V
+ * | +--------------QUEUING (*)
+ * | | |
+ * | | | cl_enqueue_try()
+ * | | |
+ * | | cl_unuse_try() V
+ * sub-lock | +-------------ENQUEUED (*)
+ * canceled | | |
+ * | | | cl_wait_try()
+ * | | |
+ * | | (R)
+ * | | |
+ * | | V
+ * | | HELD<---------+
+ * | | | |
+ * | | | | cl_use_try()
+ * | | cl_unuse_try() | |
+ * | | | |
+ * | | V ---+
+ * | +------------>INTRANSIT (D) <--+
+ * | | |
+ * | cl_unuse_try() | | cached lock found
+ * | | | cl_use_try()
+ * | | |
+ * | V |
+ * +------------------CACHED---------+
+ * |
+ * (C)
+ * |
+ * V
+ * FREEING
+ *
+ * Legend:
+ *
+ * In states marked with (*) transition to the same state (i.e., a loop
+ * in the diagram) is possible.
+ *
+ * (R) is the point where Receive call-back is invoked: it allows layers
+ * to handle arrival of lock reply.
+ *
+ * (C) is the point where Cancellation call-back is invoked.
+ *
+ * (D) is the transit state which means the lock is changing.
+ *
+ * Transition to FREEING state is possible from any other state in the
+ * diagram in case of unrecoverable error.
+ * </pre>
+ * \endhtmlonly
+ *
+ * These states are for individual cl_lock object. Top-lock and its sub-locks
+ * can be in the different states. Another way to say this is that we have
+ * nested state-machines.
+ *
+ * Separate QUEUING and ENQUEUED states are needed to support non-blocking
+ * operation for locks with multiple sub-locks. Imagine lock on a file F, that
+ * intersects 3 stripes S0, S1, and S2. To enqueue F client has to send
+ * enqueue to S0, wait for its completion, then send enqueue for S1, wait for
+ * its completion and at last enqueue lock for S2, and wait for its
+ * completion. In that case, top-lock is in QUEUING state while S0, S1 are
+ * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note
+ * that in this case, sub-locks move from state to state, and top-lock remains
+ * in the same state).
+ */
+enum cl_lock_state {
+ /**
+ * Lock that wasn't yet enqueued
+ */
+ CLS_NEW,
+ /**
+ * Enqueue is in progress, blocking for some intermediate interaction
+ * with the other side.
+ */
+ CLS_QUEUING,
+ /**
+ * Lock is fully enqueued, waiting for server to reply when it is
+ * granted.
+ */
+ CLS_ENQUEUED,
+ /**
+ * Lock granted, actively used by some IO.
+ */
+ CLS_HELD,
+ /**
+ * This state is used to mark the lock is being used, or unused.
+ * We need this state because the lock may have several sublocks,
+ * so it's impossible to have an atomic way to bring all sublocks
+ * into CLS_HELD state at use case, or all sublocks to CLS_CACHED
+ * at unuse case.
+ * If a thread is referring to a lock, and it sees the lock is in this
+ * state, it must wait for the lock.
+ * See state diagram for details.
+ */
+ CLS_INTRANSIT,
+ /**
+ * Lock granted, not used.
+ */
+ CLS_CACHED,
+ /**
+ * Lock is being destroyed.
+ */
+ CLS_FREEING,
+ CLS_NR
+};
+
+enum cl_lock_flags {
+ /**
+ * lock has been cancelled. This flag is never cleared once set (by
+ * cl_lock_cancel0()).
+ */
+ CLF_CANCELLED = 1 << 0,
+ /** cancellation is pending for this lock. */
+ CLF_CANCELPEND = 1 << 1,
+ /** destruction is pending for this lock. */
+ CLF_DOOMED = 1 << 2,
+ /** from enqueue RPC reply upcall. */
+ CLF_FROM_UPCALL= 1 << 3,
+};
+
+/**
+ * Lock closure.
+ *
+ * Lock closure is a collection of locks (both top-locks and sub-locks) that
+ * might be updated in a result of an operation on a certain lock (which lock
+ * this is a closure of).
+ *
+ * Closures are needed to guarantee dead-lock freedom in the presence of
+ *
+ * - nested state-machines (top-lock state-machine composed of sub-lock
+ * state-machines), and
+ *
+ * - shared sub-locks.
+ *
+ * Specifically, many operations, such as lock enqueue, wait, unlock,
+ * etc. start from a top-lock, and then operate on a sub-locks of this
+ * top-lock, holding a top-lock mutex. When sub-lock state changes as a result
+ * of such operation, this change has to be propagated to all top-locks that
+ * share this sub-lock. Obviously, no natural lock ordering (e.g.,
+ * top-to-bottom or bottom-to-top) captures this scenario, so try-locking has
+ * to be used. Lock closure systematizes this try-and-repeat logic.
+ */
+struct cl_lock_closure {
+ /**
+ * Lock that is mutexed when closure construction is started. When
+ * closure in is `wait' mode (cl_lock_closure::clc_wait), mutex on
+ * origin is released before waiting.
+ */
+ struct cl_lock *clc_origin;
+ /**
+ * List of enclosed locks, so far. Locks are linked here through
+ * cl_lock::cll_inclosure.
+ */
+ struct list_head clc_list;
+ /**
+ * True iff closure is in a `wait' mode. This determines what
+ * cl_lock_enclosure() does when a lock L to be added to the closure
+ * is currently mutexed by some other thread.
+ *
+ * If cl_lock_closure::clc_wait is not set, then closure construction
+ * fails with CLO_REPEAT immediately.
+ *
+ * In wait mode, cl_lock_enclosure() waits until next attempt to build
+ * a closure might succeed. To this end it releases an origin mutex
+ * (cl_lock_closure::clc_origin), that has to be the only lock mutex
+ * owned by the current thread, and then waits on L mutex (by grabbing
+ * it and immediately releasing), before returning CLO_REPEAT to the
+ * caller.
+ */
+ int clc_wait;
+ /** Number of locks in the closure. */
+ int clc_nr;
+};
+
+/**
+ * Layered client lock.
+ */
+struct cl_lock {
+ /** Reference counter. */
+ atomic_t cll_ref;
+ /** List of slices. Immutable after creation. */
+ struct list_head cll_layers;
+ /**
+ * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
+ * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
+ */
+ struct list_head cll_linkage;
+ /**
+ * Parameters of this lock. Protected by
+ * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
+ * cl_lock::cll_guard. Modified only on lock creation and in
+ * cl_lock_modify().
+ */
+ struct cl_lock_descr cll_descr;
+ /** Protected by cl_lock::cll_guard. */
+ enum cl_lock_state cll_state;
+ /** signals state changes. */
+ wait_queue_head_t cll_wq;
+ /**
+ * Recursive lock, most fields in cl_lock{} are protected by this.
+ *
+ * Locking rules: this mutex is never held across network
+ * communication, except when lock is being canceled.
+ *
+ * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
+ * on a top-lock. Other direction is implemented through a
+ * try-lock-repeat loop. Mutices of unrelated locks can be taken only
+ * by try-locking.
+ *
+ * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
+ */
+ struct mutex cll_guard;
+ struct task_struct *cll_guarder;
+ int cll_depth;
+
+ /**
+ * the owner for INTRANSIT state
+ */
+ struct task_struct *cll_intransit_owner;
+ int cll_error;
+ /**
+ * Number of holds on a lock. A hold prevents a lock from being
+ * canceled and destroyed. Protected by cl_lock::cll_guard.
+ *
+ * \see cl_lock_hold(), cl_lock_unhold(), cl_lock_release()
+ */
+ int cll_holds;
+ /**
+ * Number of lock users. Valid in cl_lock_state::CLS_HELD state
+ * only. Lock user pins lock in CLS_HELD state. Protected by
+ * cl_lock::cll_guard.
+ *
+ * \see cl_wait(), cl_unuse().
+ */
+ int cll_users;
+ /**
+ * Flag bit-mask. Values from enum cl_lock_flags. Updates are
+ * protected by cl_lock::cll_guard.
+ */
+ unsigned long cll_flags;
+ /**
+ * A linkage into a list of locks in a closure.
+ *
+ * \see cl_lock_closure
+ */
+ struct list_head cll_inclosure;
+ /**
+ * Confict lock at queuing time.
+ */
+ struct cl_lock *cll_conflict;
+ /**
+ * A list of references to this lock, for debugging.
+ */
+ struct lu_ref cll_reference;
+ /**
+ * A list of holds on this lock, for debugging.
+ */
+ struct lu_ref cll_holders;
+ /**
+ * A reference for cl_lock::cll_descr::cld_obj. For debugging.
+ */
+ struct lu_ref_link cll_obj_ref;
+#ifdef CONFIG_LOCKDEP
+ /* "dep_map" name is assumed by lockdep.h macros. */
+ struct lockdep_map dep_map;
+#endif
+};
+
+/**
+ * Per-layer part of cl_lock
+ *
+ * \see ccc_lock, lov_lock, lovsub_lock, osc_lock
+ */
+struct cl_lock_slice {
+ struct cl_lock *cls_lock;
+ /** Object slice corresponding to this lock slice. Immutable after
+ * creation. */
+ struct cl_object *cls_obj;
+ const struct cl_lock_operations *cls_ops;
+ /** Linkage into cl_lock::cll_layers. Immutable after creation. */
+ struct list_head cls_linkage;
+};
+
+/**
+ * Possible (non-error) return values of ->clo_{enqueue,wait,unlock}().
+ *
+ * NOTE: lov_subresult() depends on ordering here.
+ */
+enum cl_lock_transition {
+ /** operation cannot be completed immediately. Wait for state change. */
+ CLO_WAIT = 1,
+ /** operation had to release lock mutex, restart. */
+ CLO_REPEAT = 2,
+ /** lower layer re-enqueued. */
+ CLO_REENQUEUED = 3,
+};
+
+/**
+ *
+ * \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
+ */
+struct cl_lock_operations {
+ /**
+ * \name statemachine
+ *
+ * State machine transitions. These 3 methods are called to transfer
+ * lock from one state to another, as described in the commentary
+ * above enum #cl_lock_state.
+ *
+ * \retval 0 this layer has nothing more to do to before
+ * transition to the target state happens;
+ *
+ * \retval CLO_REPEAT method had to release and re-acquire cl_lock
+ * mutex, repeat invocation of transition method
+ * across all layers;
+ *
+ * \retval CLO_WAIT this layer cannot move to the target state
+ * immediately, as it has to wait for certain event
+ * (e.g., the communication with the server). It
+ * is guaranteed, that when the state transfer
+ * becomes possible, cl_lock::cll_wq wait-queue
+ * is signaled. Caller can wait for this event by
+ * calling cl_lock_state_wait();
+ *
+ * \retval -ve failure, abort state transition, move the lock
+ * into cl_lock_state::CLS_FREEING state, and set
+ * cl_lock::cll_error.
+ *
+ * Once all layers voted to agree to transition (by returning 0), lock
+ * is moved into corresponding target state. All state transition
+ * methods are optional.
+ */
+ /** @{ */
+ /**
+ * Attempts to enqueue the lock. Called top-to-bottom.
+ *
+ * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
+ * \see osc_lock_enqueue()
+ */
+ int (*clo_enqueue)(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ struct cl_io *io, __u32 enqflags);
+ /**
+ * Attempts to wait for enqueue result. Called top-to-bottom.
+ *
+ * \see ccc_lock_wait(), lov_lock_wait(), osc_lock_wait()
+ */
+ int (*clo_wait)(const struct lu_env *env,
+ const struct cl_lock_slice *slice);
+ /**
+ * Attempts to unlock the lock. Called bottom-to-top. In addition to
+ * usual return values of lock state-machine methods, this can return
+ * -ESTALE to indicate that lock cannot be returned to the cache, and
+ * has to be re-initialized.
+ * unuse is a one-shot operation, so it must NOT return CLO_WAIT.
+ *
+ * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse()
+ */
+ int (*clo_unuse)(const struct lu_env *env,
+ const struct cl_lock_slice *slice);
+ /**
+ * Notifies layer that cached lock is started being used.
+ *
+ * \pre lock->cll_state == CLS_CACHED
+ *
+ * \see lov_lock_use(), osc_lock_use()
+ */
+ int (*clo_use)(const struct lu_env *env,
+ const struct cl_lock_slice *slice);
+ /** @} statemachine */
+ /**
+ * A method invoked when lock state is changed (as a result of state
+ * transition). This is used, for example, to track when the state of
+ * a sub-lock changes, to propagate this change to the corresponding
+ * top-lock. Optional
+ *
+ * \see lovsub_lock_state()
+ */
+ void (*clo_state)(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ enum cl_lock_state st);
+ /**
+ * Returns true, iff given lock is suitable for the given io, idea
+ * being, that there are certain "unsafe" locks, e.g., ones acquired
+ * for O_APPEND writes, that we don't want to re-use for a normal
+ * write, to avoid the danger of cascading evictions. Optional. Runs
+ * under cl_object_header::coh_lock_guard.
+ *
+ * XXX this should take more information about lock needed by
+ * io. Probably lock description or something similar.
+ *
+ * \see lov_fits_into()
+ */
+ int (*clo_fits_into)(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ const struct cl_lock_descr *need,
+ const struct cl_io *io);
+ /**
+ * \name ast
+ * Asynchronous System Traps. All of then are optional, all are
+ * executed bottom-to-top.
+ */
+ /** @{ */
+
+ /**
+ * Cancellation callback. Cancel a lock voluntarily, or under
+ * the request of server.
+ */
+ void (*clo_cancel)(const struct lu_env *env,
+ const struct cl_lock_slice *slice);
+ /**
+ * Lock weighting ast. Executed to estimate how precious this lock
+ * is. The sum of results across all layers is used to determine
+ * whether lock worth keeping in cache given present memory usage.
+ *
+ * \see osc_lock_weigh(), vvp_lock_weigh(), lovsub_lock_weigh().
+ */
+ unsigned long (*clo_weigh)(const struct lu_env *env,
+ const struct cl_lock_slice *slice);
+ /** @} ast */
+
+ /**
+ * \see lovsub_lock_closure()
+ */
+ int (*clo_closure)(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ struct cl_lock_closure *closure);
+ /**
+ * Executed bottom-to-top when lock description changes (e.g., as a
+ * result of server granting more generous lock than was requested).
+ *
+ * \see lovsub_lock_modify()
+ */
+ int (*clo_modify)(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ const struct cl_lock_descr *updated);
+ /**
+ * Notifies layers (bottom-to-top) that lock is going to be
+ * destroyed. Responsibility of layers is to prevent new references on
+ * this lock from being acquired once this method returns.
+ *
+ * This can be called multiple times due to the races.
+ *
+ * \see cl_lock_delete()
+ * \see osc_lock_delete(), lovsub_lock_delete()
+ */
+ void (*clo_delete)(const struct lu_env *env,
+ const struct cl_lock_slice *slice);
+ /**
+ * Destructor. Frees resources and the slice.
+ *
+ * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
+ * \see osc_lock_fini()
+ */
+ void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
+ /**
+ * Optional debugging helper. Prints given lock slice.
+ */
+ int (*clo_print)(const struct lu_env *env,
+ void *cookie, lu_printer_t p,
+ const struct cl_lock_slice *slice);
+};
+
+#define CL_LOCK_DEBUG(mask, env, lock, format, ...) \
+do { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
+ \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \
+ CDEBUG(mask, format , ## __VA_ARGS__); \
+ } \
+} while (0)
+
+#define CL_LOCK_ASSERT(expr, env, lock) do { \
+ if (likely(expr)) \
+ break; \
+ \
+ CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \
+ LBUG(); \
+} while (0)
+
+/** @} cl_lock */
+
+/** \addtogroup cl_page_list cl_page_list
+ * Page list used to perform collective operations on a group of pages.
+ *
+ * Pages are added to the list one by one. cl_page_list acquires a reference
+ * for every page in it. Page list is used to perform collective operations on
+ * pages:
+ *
+ * - submit pages for an immediate transfer,
+ *
+ * - own pages on behalf of certain io (waiting for each page in turn),
+ *
+ * - discard pages.
+ *
+ * When list is finalized, it releases references on all pages it still has.
+ *
+ * \todo XXX concurrency control.
+ *
+ * @{
+ */
+struct cl_page_list {
+ unsigned pl_nr;
+ struct list_head pl_pages;
+ struct task_struct *pl_owner;
+};
+
+/**
+ * A 2-queue of pages. A convenience data-type for common use case, 2-queue
+ * contains an incoming page list and an outgoing page list.
+ */
+struct cl_2queue {
+ struct cl_page_list c2_qin;
+ struct cl_page_list c2_qout;
+};
+
+/** @} cl_page_list */
+
+/** \addtogroup cl_io cl_io
+ * @{ */
+/** \struct cl_io
+ * I/O
+ *
+ * cl_io represents a high level I/O activity like
+ * read(2)/write(2)/truncate(2) system call, or cancellation of an extent
+ * lock.
+ *
+ * cl_io is a layered object, much like cl_{object,page,lock} but with one
+ * important distinction. We want to minimize number of calls to the allocator
+ * in the fast path, e.g., in the case of read(2) when everything is cached:
+ * client already owns the lock over region being read, and data are cached
+ * due to read-ahead. To avoid allocation of cl_io layers in such situations,
+ * per-layer io state is stored in the session, associated with the io, see
+ * struct {vvp,lov,osc}_io for example. Sessions allocation is amortized
+ * by using free-lists, see cl_env_get().
+ *
+ * There is a small predefined number of possible io types, enumerated in enum
+ * cl_io_type.
+ *
+ * cl_io is a state machine, that can be advanced concurrently by the multiple
+ * threads. It is up to these threads to control the concurrency and,
+ * specifically, to detect when io is done, and its state can be safely
+ * released.
+ *
+ * For read/write io overall execution plan is as following:
+ *
+ * (0) initialize io state through all layers;
+ *
+ * (1) loop: prepare chunk of work to do
+ *
+ * (2) call all layers to collect locks they need to process current chunk
+ *
+ * (3) sort all locks to avoid dead-locks, and acquire them
+ *
+ * (4) process the chunk: call per-page methods
+ * (cl_io_operations::cio_read_page() for read,
+ * cl_io_operations::cio_prepare_write(),
+ * cl_io_operations::cio_commit_write() for write)
+ *
+ * (5) release locks
+ *
+ * (6) repeat loop.
+ *
+ * To implement the "parallel IO mode", lov layer creates sub-io's (lazily to
+ * address allocation efficiency issues mentioned above), and returns with the
+ * special error condition from per-page method when current sub-io has to
+ * block. This causes io loop to be repeated, and lov switches to the next
+ * sub-io in its cl_io_operations::cio_iter_init() implementation.
+ */
+
+/** IO types */
+enum cl_io_type {
+ /** read system call */
+ CIT_READ,
+ /** write system call */
+ CIT_WRITE,
+ /** truncate, utime system calls */
+ CIT_SETATTR,
+ /**
+ * page fault handling
+ */
+ CIT_FAULT,
+ /**
+ * fsync system call handling
+ * To write out a range of file
+ */
+ CIT_FSYNC,
+ /**
+ * Miscellaneous io. This is used for occasional io activity that
+ * doesn't fit into other types. Currently this is used for:
+ *
+ * - cancellation of an extent lock. This io exists as a context
+ * to write dirty pages from under the lock being canceled back
+ * to the server;
+ *
+ * - VM induced page write-out. An io context for writing page out
+ * for memory cleansing;
+ *
+ * - glimpse. An io context to acquire glimpse lock.
+ *
+ * - grouplock. An io context to acquire group lock.
+ *
+ * CIT_MISC io is used simply as a context in which locks and pages
+ * are manipulated. Such io has no internal "process", that is,
+ * cl_io_loop() is never called for it.
+ */
+ CIT_MISC,
+ CIT_OP_NR
+};
+
+/**
+ * States of cl_io state machine
+ */
+enum cl_io_state {
+ /** Not initialized. */
+ CIS_ZERO,
+ /** Initialized. */
+ CIS_INIT,
+ /** IO iteration started. */
+ CIS_IT_STARTED,
+ /** Locks taken. */
+ CIS_LOCKED,
+ /** Actual IO is in progress. */
+ CIS_IO_GOING,
+ /** IO for the current iteration finished. */
+ CIS_IO_FINISHED,
+ /** Locks released. */
+ CIS_UNLOCKED,
+ /** Iteration completed. */
+ CIS_IT_ENDED,
+ /** cl_io finalized. */
+ CIS_FINI
+};
+
+/**
+ * IO state private for a layer.
+ *
+ * This is usually embedded into layer session data, rather than allocated
+ * dynamically.
+ *
+ * \see vvp_io, lov_io, osc_io, ccc_io
+ */
+struct cl_io_slice {
+ struct cl_io *cis_io;
+ /** corresponding object slice. Immutable after creation. */
+ struct cl_object *cis_obj;
+ /** io operations. Immutable after creation. */
+ const struct cl_io_operations *cis_iop;
+ /**
+ * linkage into a list of all slices for a given cl_io, hanging off
+ * cl_io::ci_layers. Immutable after creation.
+ */
+ struct list_head cis_linkage;
+};
+
+
+/**
+ * Per-layer io operations.
+ * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
+ */
+struct cl_io_operations {
+ /**
+ * Vector of io state transition methods for every io type.
+ *
+ * \see cl_page_operations::io
+ */
+ struct {
+ /**
+ * Prepare io iteration at a given layer.
+ *
+ * Called top-to-bottom at the beginning of each iteration of
+ * "io loop" (if it makes sense for this type of io). Here
+ * layer selects what work it will do during this iteration.
+ *
+ * \see cl_io_operations::cio_iter_fini()
+ */
+ int (*cio_iter_init) (const struct lu_env *env,
+ const struct cl_io_slice *slice);
+ /**
+ * Finalize io iteration.
+ *
+ * Called bottom-to-top at the end of each iteration of "io
+ * loop". Here layers can decide whether IO has to be
+ * continued.
+ *
+ * \see cl_io_operations::cio_iter_init()
+ */
+ void (*cio_iter_fini) (const struct lu_env *env,
+ const struct cl_io_slice *slice);
+ /**
+ * Collect locks for the current iteration of io.
+ *
+ * Called top-to-bottom to collect all locks necessary for
+ * this iteration. This methods shouldn't actually enqueue
+ * anything, instead it should post a lock through
+ * cl_io_lock_add(). Once all locks are collected, they are
+ * sorted and enqueued in the proper order.
+ */
+ int (*cio_lock) (const struct lu_env *env,
+ const struct cl_io_slice *slice);
+ /**
+ * Finalize unlocking.
+ *
+ * Called bottom-to-top to finish layer specific unlocking
+ * functionality, after generic code released all locks
+ * acquired by cl_io_operations::cio_lock().
+ */
+ void (*cio_unlock)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
+ /**
+ * Start io iteration.
+ *
+ * Once all locks are acquired, called top-to-bottom to
+ * commence actual IO. In the current implementation,
+ * top-level vvp_io_{read,write}_start() does all the work
+ * synchronously by calling generic_file_*(), so other layers
+ * are called when everything is done.
+ */
+ int (*cio_start)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
+ /**
+ * Called top-to-bottom at the end of io loop. Here layer
+ * might wait for an unfinished asynchronous io.
+ */
+ void (*cio_end) (const struct lu_env *env,
+ const struct cl_io_slice *slice);
+ /**
+ * Called bottom-to-top to notify layers that read/write IO
+ * iteration finished, with \a nob bytes transferred.
+ */
+ void (*cio_advance)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ size_t nob);
+ /**
+ * Called once per io, bottom-to-top to release io resources.
+ */
+ void (*cio_fini) (const struct lu_env *env,
+ const struct cl_io_slice *slice);
+ } op[CIT_OP_NR];
+ struct {
+ /**
+ * Submit pages from \a queue->c2_qin for IO, and move
+ * successfully submitted pages into \a queue->c2_qout. Return
+ * non-zero if failed to submit even the single page. If
+ * submission failed after some pages were moved into \a
+ * queue->c2_qout, completion callback with non-zero ioret is
+ * executed on them.
+ */
+ int (*cio_submit)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ enum cl_req_type crt,
+ struct cl_2queue *queue);
+ } req_op[CRT_NR];
+ /**
+ * Read missing page.
+ *
+ * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start()
+ * method, when it hits not-up-to-date page in the range. Optional.
+ *
+ * \pre io->ci_type == CIT_READ
+ */
+ int (*cio_read_page)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ const struct cl_page_slice *page);
+ /**
+ * Prepare write of a \a page. Called bottom-to-top by a top-level
+ * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
+ * get data from user-level buffer.
+ *
+ * \pre io->ci_type == CIT_WRITE
+ *
+ * \see vvp_io_prepare_write(), lov_io_prepare_write(),
+ * osc_io_prepare_write().
+ */
+ int (*cio_prepare_write)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ const struct cl_page_slice *page,
+ unsigned from, unsigned to);
+ /**
+ *
+ * \pre io->ci_type == CIT_WRITE
+ *
+ * \see vvp_io_commit_write(), lov_io_commit_write(),
+ * osc_io_commit_write().
+ */
+ int (*cio_commit_write)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ const struct cl_page_slice *page,
+ unsigned from, unsigned to);
+ /**
+ * Optional debugging helper. Print given io slice.
+ */
+ int (*cio_print)(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct cl_io_slice *slice);
+};
+
+/**
+ * Flags to lock enqueue procedure.
+ * \ingroup cl_lock
+ */
+enum cl_enq_flags {
+ /**
+ * instruct server to not block, if conflicting lock is found. Instead
+ * -EWOULDBLOCK is returned immediately.
+ */
+ CEF_NONBLOCK = 0x00000001,
+ /**
+ * take lock asynchronously (out of order), as it cannot
+ * deadlock. This is for LDLM_FL_HAS_INTENT locks used for glimpsing.
+ */
+ CEF_ASYNC = 0x00000002,
+ /**
+ * tell the server to instruct (though a flag in the blocking ast) an
+ * owner of the conflicting lock, that it can drop dirty pages
+ * protected by this lock, without sending them to the server.
+ */
+ CEF_DISCARD_DATA = 0x00000004,
+ /**
+ * tell the sub layers that it must be a `real' lock. This is used for
+ * mmapped-buffer locks and glimpse locks that must be never converted
+ * into lockless mode.
+ *
+ * \see vvp_mmap_locks(), cl_glimpse_lock().
+ */
+ CEF_MUST = 0x00000008,
+ /**
+ * tell the sub layers that never request a `real' lock. This flag is
+ * not used currently.
+ *
+ * cl_io::ci_lockreq and CEF_{MUST,NEVER} flags specify lockless
+ * conversion policy: ci_lockreq describes generic information of lock
+ * requirement for this IO, especially for locks which belong to the
+ * object doing IO; however, lock itself may have precise requirements
+ * that are described by the enqueue flags.
+ */
+ CEF_NEVER = 0x00000010,
+ /**
+ * for async glimpse lock.
+ */
+ CEF_AGL = 0x00000020,
+ /**
+ * mask of enq_flags.
+ */
+ CEF_MASK = 0x0000003f,
+};
+
+/**
+ * Link between lock and io. Intermediate structure is needed, because the
+ * same lock can be part of multiple io's simultaneously.
+ */
+struct cl_io_lock_link {
+ /** linkage into one of cl_lockset lists. */
+ struct list_head cill_linkage;
+ struct cl_lock_descr cill_descr;
+ struct cl_lock *cill_lock;
+ /** optional destructor */
+ void (*cill_fini)(const struct lu_env *env,
+ struct cl_io_lock_link *link);
+};
+
+/**
+ * Lock-set represents a collection of locks, that io needs at a
+ * time. Generally speaking, client tries to avoid holding multiple locks when
+ * possible, because
+ *
+ * - holding extent locks over multiple ost's introduces the danger of
+ * "cascading timeouts";
+ *
+ * - holding multiple locks over the same ost is still dead-lock prone,
+ * see comment in osc_lock_enqueue(),
+ *
+ * but there are certain situations where this is unavoidable:
+ *
+ * - O_APPEND writes have to take [0, EOF] lock for correctness;
+ *
+ * - truncate has to take [new-size, EOF] lock for correctness;
+ *
+ * - SNS has to take locks across full stripe for correctness;
+ *
+ * - in the case when user level buffer, supplied to {read,write}(file0),
+ * is a part of a memory mapped lustre file, client has to take a dlm
+ * locks on file0, and all files that back up the buffer (or a part of
+ * the buffer, that is being processed in the current chunk, in any
+ * case, there are situations where at least 2 locks are necessary).
+ *
+ * In such cases we at least try to take locks in the same consistent
+ * order. To this end, all locks are first collected, then sorted, and then
+ * enqueued.
+ */
+struct cl_lockset {
+ /** locks to be acquired. */
+ struct list_head cls_todo;
+ /** locks currently being processed. */
+ struct list_head cls_curr;
+ /** locks acquired. */
+ struct list_head cls_done;
+};
+
+/**
+ * Lock requirements(demand) for IO. It should be cl_io_lock_req,
+ * but 'req' is always to be thought as 'request' :-)
+ */
+enum cl_io_lock_dmd {
+ /** Always lock data (e.g., O_APPEND). */
+ CILR_MANDATORY = 0,
+ /** Layers are free to decide between local and global locking. */
+ CILR_MAYBE,
+ /** Never lock: there is no cache (e.g., liblustre). */
+ CILR_NEVER
+};
+
+enum cl_fsync_mode {
+ /** start writeback, do not wait for them to finish */
+ CL_FSYNC_NONE = 0,
+ /** start writeback and wait for them to finish */
+ CL_FSYNC_LOCAL = 1,
+ /** discard all of dirty pages in a specific file range */
+ CL_FSYNC_DISCARD = 2,
+ /** start writeback and make sure they have reached storage before
+ * return. OST_SYNC RPC must be issued and finished */
+ CL_FSYNC_ALL = 3
+};
+
+struct cl_io_rw_common {
+ loff_t crw_pos;
+ size_t crw_count;
+ int crw_nonblock;
+};
+
+
+/**
+ * State for io.
+ *
+ * cl_io is shared by all threads participating in this IO (in current
+ * implementation only one thread advances IO, but parallel IO design and
+ * concurrent copy_*_user() require multiple threads acting on the same IO. It
+ * is up to these threads to serialize their activities, including updates to
+ * mutable cl_io fields.
+ */
+struct cl_io {
+ /** type of this IO. Immutable after creation. */
+ enum cl_io_type ci_type;
+ /** current state of cl_io state machine. */
+ enum cl_io_state ci_state;
+ /** main object this io is against. Immutable after creation. */
+ struct cl_object *ci_obj;
+ /**
+ * Upper layer io, of which this io is a part of. Immutable after
+ * creation.
+ */
+ struct cl_io *ci_parent;
+ /** List of slices. Immutable after creation. */
+ struct list_head ci_layers;
+ /** list of locks (to be) acquired by this io. */
+ struct cl_lockset ci_lockset;
+ /** lock requirements, this is just a help info for sublayers. */
+ enum cl_io_lock_dmd ci_lockreq;
+ union {
+ struct cl_rd_io {
+ struct cl_io_rw_common rd;
+ } ci_rd;
+ struct cl_wr_io {
+ struct cl_io_rw_common wr;
+ int wr_append;
+ int wr_sync;
+ } ci_wr;
+ struct cl_io_rw_common ci_rw;
+ struct cl_setattr_io {
+ struct ost_lvb sa_attr;
+ unsigned int sa_valid;
+ struct obd_capa *sa_capa;
+ } ci_setattr;
+ struct cl_fault_io {
+ /** page index within file. */
+ pgoff_t ft_index;
+ /** bytes valid byte on a faulted page. */
+ int ft_nob;
+ /** writable page? for nopage() only */
+ int ft_writable;
+ /** page of an executable? */
+ int ft_executable;
+ /** page_mkwrite() */
+ int ft_mkwrite;
+ /** resulting page */
+ struct cl_page *ft_page;
+ } ci_fault;
+ struct cl_fsync_io {
+ loff_t fi_start;
+ loff_t fi_end;
+ struct obd_capa *fi_capa;
+ /** file system level fid */
+ struct lu_fid *fi_fid;
+ enum cl_fsync_mode fi_mode;
+ /* how many pages were written/discarded */
+ unsigned int fi_nr_written;
+ } ci_fsync;
+ } u;
+ struct cl_2queue ci_queue;
+ size_t ci_nob;
+ int ci_result;
+ unsigned int ci_continue:1,
+ /**
+ * This io has held grouplock, to inform sublayers that
+ * don't do lockless i/o.
+ */
+ ci_no_srvlock:1,
+ /**
+ * The whole IO need to be restarted because layout has been changed
+ */
+ ci_need_restart:1,
+ /**
+ * to not refresh layout - the IO issuer knows that the layout won't
+ * change(page operations, layout change causes all page to be
+ * discarded), or it doesn't matter if it changes(sync).
+ */
+ ci_ignore_layout:1,
+ /**
+ * Check if layout changed after the IO finishes. Mainly for HSM
+ * requirement. If IO occurs to openning files, it doesn't need to
+ * verify layout because HSM won't release openning files.
+ * Right now, only two opertaions need to verify layout: glimpse
+ * and setattr.
+ */
+ ci_verify_layout:1;
+ /**
+ * Number of pages owned by this IO. For invariant checking.
+ */
+ unsigned ci_owned_nr;
+};
+
+/** @} cl_io */
+
+/** \addtogroup cl_req cl_req
+ * @{ */
+/** \struct cl_req
+ * Transfer.
+ *
+ * There are two possible modes of transfer initiation on the client:
+ *
+ * - immediate transfer: this is started when a high level io wants a page
+ * or a collection of pages to be transferred right away. Examples:
+ * read-ahead, synchronous read in the case of non-page aligned write,
+ * page write-out as a part of extent lock cancellation, page write-out
+ * as a part of memory cleansing. Immediate transfer can be both
+ * cl_req_type::CRT_READ and cl_req_type::CRT_WRITE;
+ *
+ * - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens
+ * when io wants to transfer a page to the server some time later, when
+ * it can be done efficiently. Example: pages dirtied by the write(2)
+ * path.
+ *
+ * In any case, transfer takes place in the form of a cl_req, which is a
+ * representation for a network RPC.
+ *
+ * Pages queued for an opportunistic transfer are cached until it is decided
+ * that efficient RPC can be composed of them. This decision is made by "a
+ * req-formation engine", currently implemented as a part of osc
+ * layer. Req-formation depends on many factors: the size of the resulting
+ * RPC, whether or not multi-object RPCs are supported by the server,
+ * max-rpc-in-flight limitations, size of the dirty cache, etc.
+ *
+ * For the immediate transfer io submits a cl_page_list, that req-formation
+ * engine slices into cl_req's, possibly adding cached pages to some of
+ * the resulting req's.
+ *
+ * Whenever a page from cl_page_list is added to a newly constructed req, its
+ * cl_page_operations::cpo_prep() layer methods are called. At that moment,
+ * page state is atomically changed from cl_page_state::CPS_OWNED to
+ * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner
+ * is zeroed, and cl_page::cp_req is set to the
+ * req. cl_page_operations::cpo_prep() method at the particular layer might
+ * return -EALREADY to indicate that it does not need to submit this page
+ * at all. This is possible, for example, if page, submitted for read,
+ * became up-to-date in the meantime; and for write, the page don't have
+ * dirty bit marked. \see cl_io_submit_rw()
+ *
+ * Whenever a cached page is added to a newly constructed req, its
+ * cl_page_operations::cpo_make_ready() layer methods are called. At that
+ * moment, page state is atomically changed from cl_page_state::CPS_CACHED to
+ * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to
+ * req. cl_page_operations::cpo_make_ready() method at the particular layer
+ * might return -EAGAIN to indicate that this page is not eligible for the
+ * transfer right now.
+ *
+ * FUTURE
+ *
+ * Plan is to divide transfers into "priority bands" (indicated when
+ * submitting cl_page_list, and queuing a page for the opportunistic transfer)
+ * and allow glueing of cached pages to immediate transfers only within single
+ * band. This would make high priority transfers (like lock cancellation or
+ * memory pressure induced write-out) really high priority.
+ *
+ */
+
+/**
+ * Per-transfer attributes.
+ */
+struct cl_req_attr {
+ /** Generic attributes for the server consumption. */
+ struct obdo *cra_oa;
+ /** Capability. */
+ struct obd_capa *cra_capa;
+ /** Jobid */
+ char cra_jobid[JOBSTATS_JOBID_SIZE];
+};
+
+/**
+ * Transfer request operations definable at every layer.
+ *
+ * Concurrency: transfer formation engine synchronizes calls to all transfer
+ * methods.
+ */
+struct cl_req_operations {
+ /**
+ * Invoked top-to-bottom by cl_req_prep() when transfer formation is
+ * complete (all pages are added).
+ *
+ * \see osc_req_prep()
+ */
+ int (*cro_prep)(const struct lu_env *env,
+ const struct cl_req_slice *slice);
+ /**
+ * Called top-to-bottom to fill in \a oa fields. This is called twice
+ * with different flags, see bug 10150 and osc_build_req().
+ *
+ * \param obj an object from cl_req which attributes are to be set in
+ * \a oa.
+ *
+ * \param oa struct obdo where attributes are placed
+ *
+ * \param flags \a oa fields to be filled.
+ */
+ void (*cro_attr_set)(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, obd_valid flags);
+ /**
+ * Called top-to-bottom from cl_req_completion() to notify layers that
+ * transfer completed. Has to free all state allocated by
+ * cl_device_operations::cdo_req_init().
+ */
+ void (*cro_completion)(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret);
+};
+
+/**
+ * A per-object state that (potentially multi-object) transfer request keeps.
+ */
+struct cl_req_obj {
+ /** object itself */
+ struct cl_object *ro_obj;
+ /** reference to cl_req_obj::ro_obj. For debugging. */
+ struct lu_ref_link ro_obj_ref;
+ /* something else? Number of pages for a given object? */
+};
+
+/**
+ * Transfer request.
+ *
+ * Transfer requests are not reference counted, because IO sub-system owns
+ * them exclusively and knows when to free them.
+ *
+ * Life cycle.
+ *
+ * cl_req is created by cl_req_alloc() that calls
+ * cl_device_operations::cdo_req_init() device methods to allocate per-req
+ * state in every layer.
+ *
+ * Then pages are added (cl_req_page_add()), req keeps track of all objects it
+ * contains pages for.
+ *
+ * Once all pages were collected, cl_page_operations::cpo_prep() method is
+ * called top-to-bottom. At that point layers can modify req, let it pass, or
+ * deny it completely. This is to support things like SNS that have transfer
+ * ordering requirements invisible to the individual req-formation engine.
+ *
+ * On transfer completion (or transfer timeout, or failure to initiate the
+ * transfer of an allocated req), cl_req_operations::cro_completion() method
+ * is called, after execution of cl_page_operations::cpo_completion() of all
+ * req's pages.
+ */
+struct cl_req {
+ enum cl_req_type crq_type;
+ /** A list of pages being transfered */
+ struct list_head crq_pages;
+ /** Number of pages in cl_req::crq_pages */
+ unsigned crq_nrpages;
+ /** An array of objects which pages are in ->crq_pages */
+ struct cl_req_obj *crq_o;
+ /** Number of elements in cl_req::crq_objs[] */
+ unsigned crq_nrobjs;
+ struct list_head crq_layers;
+};
+
+/**
+ * Per-layer state for request.
+ */
+struct cl_req_slice {
+ struct cl_req *crs_req;
+ struct cl_device *crs_dev;
+ struct list_head crs_linkage;
+ const struct cl_req_operations *crs_ops;
+};
+
+/* @} cl_req */
+
+enum cache_stats_item {
+ /** how many cache lookups were performed */
+ CS_lookup = 0,
+ /** how many times cache lookup resulted in a hit */
+ CS_hit,
+ /** how many entities are in the cache right now */
+ CS_total,
+ /** how many entities in the cache are actively used (and cannot be
+ * evicted) right now */
+ CS_busy,
+ /** how many entities were created at all */
+ CS_create,
+ CS_NR
+};
+
+#define CS_NAMES { "lookup", "hit", "total", "busy", "create" }
+
+/**
+ * Stats for a generic cache (similar to inode, lu_object, etc. caches).
+ */
+struct cache_stats {
+ const char *cs_name;
+ atomic_t cs_stats[CS_NR];
+};
+
+/** These are not exported so far */
+void cache_stats_init (struct cache_stats *cs, const char *name);
+
+/**
+ * Client-side site. This represents particular client stack. "Global"
+ * variables should (directly or indirectly) be added here to allow multiple
+ * clients to co-exist in the single address space.
+ */
+struct cl_site {
+ struct lu_site cs_lu;
+ /**
+ * Statistical counters. Atomics do not scale, something better like
+ * per-cpu counters is needed.
+ *
+ * These are exported as /proc/fs/lustre/llite/.../site
+ *
+ * When interpreting keep in mind that both sub-locks (and sub-pages)
+ * and top-locks (and top-pages) are accounted here.
+ */
+ struct cache_stats cs_pages;
+ struct cache_stats cs_locks;
+ atomic_t cs_pages_state[CPS_NR];
+ atomic_t cs_locks_state[CLS_NR];
+};
+
+int cl_site_init (struct cl_site *s, struct cl_device *top);
+void cl_site_fini (struct cl_site *s);
+void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
+
+/**
+ * Output client site statistical counters into a buffer. Suitable for
+ * ll_rd_*()-style functions.
+ */
+int cl_site_stats_print(const struct cl_site *site, struct seq_file *m);
+
+/**
+ * \name helpers
+ *
+ * Type conversion and accessory functions.
+ */
+/** @{ */
+
+static inline struct cl_site *lu2cl_site(const struct lu_site *site)
+{
+ return container_of(site, struct cl_site, cs_lu);
+}
+
+static inline int lu_device_is_cl(const struct lu_device *d)
+{
+ return d->ld_type->ldt_tags & LU_DEVICE_CL;
+}
+
+static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
+{
+ LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
+ return container_of0(d, struct cl_device, cd_lu_dev);
+}
+
+static inline struct lu_device *cl2lu_dev(struct cl_device *d)
+{
+ return &d->cd_lu_dev;
+}
+
+static inline struct cl_object *lu2cl(const struct lu_object *o)
+{
+ LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
+ return container_of0(o, struct cl_object, co_lu);
+}
+
+static inline const struct cl_object_conf *
+lu2cl_conf(const struct lu_object_conf *conf)
+{
+ return container_of0(conf, struct cl_object_conf, coc_lu);
+}
+
+static inline struct cl_object *cl_object_next(const struct cl_object *obj)
+{
+ return obj ? lu2cl(lu_object_next(&obj->co_lu)) : NULL;
+}
+
+static inline struct cl_device *cl_object_device(const struct cl_object *o)
+{
+ LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
+ return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
+}
+
+static inline struct cl_object_header *luh2coh(const struct lu_object_header *h)
+{
+ return container_of0(h, struct cl_object_header, coh_lu);
+}
+
+static inline struct cl_site *cl_object_site(const struct cl_object *obj)
+{
+ return lu2cl_site(obj->co_lu.lo_dev->ld_site);
+}
+
+static inline
+struct cl_object_header *cl_object_header(const struct cl_object *obj)
+{
+ return luh2coh(obj->co_lu.lo_header);
+}
+
+static inline int cl_device_init(struct cl_device *d, struct lu_device_type *t)
+{
+ return lu_device_init(&d->cd_lu_dev, t);
+}
+
+static inline void cl_device_fini(struct cl_device *d)
+{
+ lu_device_fini(&d->cd_lu_dev);
+}
+
+void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
+ struct cl_object *obj,
+ const struct cl_page_operations *ops);
+void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
+ struct cl_object *obj,
+ const struct cl_lock_operations *ops);
+void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
+ struct cl_object *obj, const struct cl_io_operations *ops);
+void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
+ struct cl_device *dev,
+ const struct cl_req_operations *ops);
+/** @} helpers */
+
+/** \defgroup cl_object cl_object
+ * @{ */
+struct cl_object *cl_object_top (struct cl_object *o);
+struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
+ const struct lu_fid *fid,
+ const struct cl_object_conf *c);
+
+int cl_object_header_init(struct cl_object_header *h);
+void cl_object_header_fini(struct cl_object_header *h);
+void cl_object_put (const struct lu_env *env, struct cl_object *o);
+void cl_object_get (struct cl_object *o);
+void cl_object_attr_lock (struct cl_object *o);
+void cl_object_attr_unlock(struct cl_object *o);
+int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr);
+int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid);
+int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
+ struct ost_lvb *lvb);
+int cl_conf_set (const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf);
+void cl_object_prune (const struct lu_env *env, struct cl_object *obj);
+void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
+int cl_object_has_locks (struct cl_object *obj);
+
+/**
+ * Returns true, iff \a o0 and \a o1 are slices of the same object.
+ */
+static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
+{
+ return cl_object_header(o0) == cl_object_header(o1);
+}
+
+static inline void cl_object_page_init(struct cl_object *clob, int size)
+{
+ clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
+ cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+}
+
+static inline void *cl_object_page_slice(struct cl_object *clob,
+ struct cl_page *page)
+{
+ return (void *)((char *)page + clob->co_slice_off);
+}
+
+/** @} cl_object */
+
+/** \defgroup cl_page cl_page
+ * @{ */
+enum {
+ CLP_GANG_OKAY = 0,
+ CLP_GANG_RESCHED,
+ CLP_GANG_AGAIN,
+ CLP_GANG_ABORT
+};
+
+/* callback of cl_page_gang_lookup() */
+typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
+ struct cl_page *, void *);
+int cl_page_gang_lookup (const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_io *io,
+ pgoff_t start, pgoff_t end,
+ cl_page_gang_cb_t cb, void *cbdata);
+struct cl_page *cl_page_lookup (struct cl_object_header *hdr,
+ pgoff_t index);
+struct cl_page *cl_page_find (const struct lu_env *env,
+ struct cl_object *obj,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type);
+struct cl_page *cl_page_find_sub (const struct lu_env *env,
+ struct cl_object *obj,
+ pgoff_t idx, struct page *vmpage,
+ struct cl_page *parent);
+void cl_page_get (struct cl_page *page);
+void cl_page_put (const struct lu_env *env,
+ struct cl_page *page);
+void cl_page_print (const struct lu_env *env, void *cookie,
+ lu_printer_t printer,
+ const struct cl_page *pg);
+void cl_page_header_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer,
+ const struct cl_page *pg);
+struct page *cl_page_vmpage (const struct lu_env *env,
+ struct cl_page *page);
+struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
+struct cl_page *cl_page_top (struct cl_page *page);
+
+const struct cl_page_slice *cl_page_at(const struct cl_page *page,
+ const struct lu_device_type *dtype);
+
+/**
+ * \name ownership
+ *
+ * Functions dealing with the ownership of page by io.
+ */
+/** @{ */
+
+int cl_page_own (const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+int cl_page_own_try (const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+void cl_page_assume (const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+void cl_page_unassume (const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg);
+void cl_page_disown (const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
+
+/** @} ownership */
+
+/**
+ * \name transfer
+ *
+ * Functions dealing with the preparation of a page for a transfer, and
+ * tracking transfer state.
+ */
+/** @{ */
+int cl_page_prep (const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, enum cl_req_type crt);
+void cl_page_completion (const struct lu_env *env,
+ struct cl_page *pg, enum cl_req_type crt, int ioret);
+int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg,
+ enum cl_req_type crt);
+int cl_page_cache_add (const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, enum cl_req_type crt);
+void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
+ int from, int to);
+int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
+int cl_page_flush (const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
+
+/** @} transfer */
+
+
+/**
+ * \name helper routines
+ * Functions to discard, delete and export a cl_page.
+ */
+/** @{ */
+void cl_page_discard (const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
+void cl_page_delete (const struct lu_env *env, struct cl_page *pg);
+int cl_page_unmap (const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
+int cl_page_is_vmlocked (const struct lu_env *env,
+ const struct cl_page *pg);
+void cl_page_export (const struct lu_env *env,
+ struct cl_page *pg, int uptodate);
+int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page);
+loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
+pgoff_t cl_index (const struct cl_object *obj, loff_t offset);
+int cl_page_size (const struct cl_object *obj);
+int cl_pages_prune (const struct lu_env *env, struct cl_object *obj);
+
+void cl_lock_print (const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_lock *lock);
+void cl_lock_descr_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer,
+ const struct cl_lock_descr *descr);
+/* @} helper */
+
+/** @} cl_page */
+
+/** \defgroup cl_lock cl_lock
+ * @{ */
+
+struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
+ const struct cl_lock_descr *need,
+ const char *scope, const void *source);
+struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
+ const struct cl_lock_descr *need,
+ const char *scope, const void *source);
+struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
+ const struct cl_lock_descr *need,
+ const char *scope, const void *source);
+struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
+ struct cl_object *obj, pgoff_t index,
+ struct cl_lock *except, int pending,
+ int canceld);
+static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_page *page,
+ struct cl_lock *except,
+ int pending, int canceld)
+{
+ LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
+ return cl_lock_at_pgoff(env, obj, page->cp_index, except,
+ pending, canceld);
+}
+
+const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
+ const struct lu_device_type *dtype);
+
+void cl_lock_get (struct cl_lock *lock);
+void cl_lock_get_trust (struct cl_lock *lock);
+void cl_lock_put (const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
+void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
+void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
+void cl_lock_release (const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
+void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
+
+enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
+ struct cl_lock *lock);
+void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
+ enum cl_lock_state state);
+int cl_lock_is_intransit(struct cl_lock *lock);
+
+int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
+ int keep_mutex);
+
+/** \name statemachine statemachine
+ * Interface to lock state machine consists of 3 parts:
+ *
+ * - "try" functions that attempt to effect a state transition. If state
+ * transition is not possible right now (e.g., if it has to wait for some
+ * asynchronous event to occur), these functions return
+ * cl_lock_transition::CLO_WAIT.
+ *
+ * - "non-try" functions that implement synchronous blocking interface on
+ * top of non-blocking "try" functions. These functions repeatedly call
+ * corresponding "try" versions, and if state transition is not possible
+ * immediately, wait for lock state change.
+ *
+ * - methods from cl_lock_operations, called by "try" functions. Lock can
+ * be advanced to the target state only when all layers voted that they
+ * are ready for this transition. "Try" functions call methods under lock
+ * mutex. If a layer had to release a mutex, it re-acquires it and returns
+ * cl_lock_transition::CLO_REPEAT, causing "try" function to call all
+ * layers again.
+ *
+ * TRY NON-TRY METHOD FINAL STATE
+ *
+ * cl_enqueue_try() cl_enqueue() cl_lock_operations::clo_enqueue() CLS_ENQUEUED
+ *
+ * cl_wait_try() cl_wait() cl_lock_operations::clo_wait() CLS_HELD
+ *
+ * cl_unuse_try() cl_unuse() cl_lock_operations::clo_unuse() CLS_CACHED
+ *
+ * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD
+ *
+ * @{ */
+
+int cl_enqueue (const struct lu_env *env, struct cl_lock *lock,
+ struct cl_io *io, __u32 flags);
+int cl_wait (const struct lu_env *env, struct cl_lock *lock);
+void cl_unuse (const struct lu_env *env, struct cl_lock *lock);
+int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_io *io, __u32 flags);
+int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock);
+int cl_wait_try (const struct lu_env *env, struct cl_lock *lock);
+int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic);
+
+/** @} statemachine */
+
+void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
+ enum cl_lock_state state);
+int cl_queue_match (const struct list_head *queue,
+ const struct cl_lock_descr *need);
+
+void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_is_mutexed (struct cl_lock *lock);
+int cl_lock_nr_mutexed (const struct lu_env *env);
+int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_ext_match (const struct cl_lock_descr *has,
+ const struct cl_lock_descr *need);
+int cl_lock_descr_match(const struct cl_lock_descr *has,
+ const struct cl_lock_descr *need);
+int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need);
+int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_lock_descr *desc);
+
+void cl_lock_closure_init (const struct lu_env *env,
+ struct cl_lock_closure *closure,
+ struct cl_lock *origin, int wait);
+void cl_lock_closure_fini (struct cl_lock_closure *closure);
+int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_lock_closure *closure);
+void cl_lock_disclosure (const struct lu_env *env,
+ struct cl_lock_closure *closure);
+int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock,
+ struct cl_lock_closure *closure);
+
+void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error);
+void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
+
+unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
+
+/** @} cl_lock */
+
+/** \defgroup cl_io cl_io
+ * @{ */
+
+int cl_io_init (const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj);
+int cl_io_sub_init (const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj);
+int cl_io_rw_init (const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, loff_t pos, size_t count);
+int cl_io_loop (const struct lu_env *env, struct cl_io *io);
+
+void cl_io_fini (const struct lu_env *env, struct cl_io *io);
+int cl_io_iter_init (const struct lu_env *env, struct cl_io *io);
+void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io);
+int cl_io_lock (const struct lu_env *env, struct cl_io *io);
+void cl_io_unlock (const struct lu_env *env, struct cl_io *io);
+int cl_io_start (const struct lu_env *env, struct cl_io *io);
+void cl_io_end (const struct lu_env *env, struct cl_io *io);
+int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
+ struct cl_io_lock_link *link);
+int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock_descr *descr);
+int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page);
+int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, unsigned from, unsigned to);
+int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, unsigned from, unsigned to);
+int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type iot, struct cl_2queue *queue);
+int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type iot, struct cl_2queue *queue,
+ long timeout);
+void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io,
+ size_t nob);
+int cl_io_cancel (const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue);
+int cl_io_is_going (const struct lu_env *env);
+
+/**
+ * True, iff \a io is an O_APPEND write(2).
+ */
+static inline int cl_io_is_append(const struct cl_io *io)
+{
+ return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append;
+}
+
+static inline int cl_io_is_sync_write(const struct cl_io *io)
+{
+ return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync;
+}
+
+static inline int cl_io_is_mkwrite(const struct cl_io *io)
+{
+ return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite;
+}
+
+/**
+ * True, iff \a io is a truncate(2).
+ */
+static inline int cl_io_is_trunc(const struct cl_io *io)
+{
+ return io->ci_type == CIT_SETATTR &&
+ (io->u.ci_setattr.sa_valid & ATTR_SIZE);
+}
+
+struct cl_io *cl_io_top(struct cl_io *io);
+
+void cl_io_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_io *io);
+
+#define CL_IO_SLICE_CLEAN(foo_io, base) \
+do { \
+ typeof(foo_io) __foo_io = (foo_io); \
+ \
+ CLASSERT(offsetof(typeof(*__foo_io), base) == 0); \
+ memset(&__foo_io->base + 1, 0, \
+ (sizeof *__foo_io) - sizeof __foo_io->base); \
+} while (0)
+
+/** @} cl_io */
+
+/** \defgroup cl_page_list cl_page_list
+ * @{ */
+
+/**
+ * Last page in the page list.
+ */
+static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
+{
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
+}
+
+/**
+ * Iterate over pages in a page list.
+ */
+#define cl_page_list_for_each(page, list) \
+ list_for_each_entry((page), &(list)->pl_pages, cp_batch)
+
+/**
+ * Iterate over pages in a page list, taking possible removals into account.
+ */
+#define cl_page_list_for_each_safe(page, temp, list) \
+ list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
+
+void cl_page_list_init (struct cl_page_list *plist);
+void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
+void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page);
+void cl_page_list_splice (struct cl_page_list *list,
+ struct cl_page_list *head);
+void cl_page_list_del (const struct lu_env *env,
+ struct cl_page_list *plist, struct cl_page *page);
+void cl_page_list_disown (const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+int cl_page_list_own (const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+void cl_page_list_assume (const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+void cl_page_list_discard(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+int cl_page_list_unmap (const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist);
+
+void cl_2queue_init (struct cl_2queue *queue);
+void cl_2queue_add (struct cl_2queue *queue, struct cl_page *page);
+void cl_2queue_disown (const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue);
+void cl_2queue_assume (const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue);
+void cl_2queue_discard (const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue);
+void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue);
+void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
+
+/** @} cl_page_list */
+
+/** \defgroup cl_req cl_req
+ * @{ */
+struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
+ enum cl_req_type crt, int nr_objects);
+
+void cl_req_page_add (const struct lu_env *env, struct cl_req *req,
+ struct cl_page *page);
+void cl_req_page_done (const struct lu_env *env, struct cl_page *page);
+int cl_req_prep (const struct lu_env *env, struct cl_req *req);
+void cl_req_attr_set (const struct lu_env *env, struct cl_req *req,
+ struct cl_req_attr *attr, obd_valid flags);
+void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
+
+/** \defgroup cl_sync_io cl_sync_io
+ * @{ */
+
+/**
+ * Anchor for synchronous transfer. This is allocated on a stack by thread
+ * doing synchronous transfer, and a pointer to this structure is set up in
+ * every page submitted for transfer. Transfer completion routine updates
+ * anchor and wakes up waiting thread when transfer is complete.
+ */
+struct cl_sync_io {
+ /** number of pages yet to be transferred. */
+ atomic_t csi_sync_nr;
+ /** error code. */
+ int csi_sync_rc;
+ /** barrier of destroy this structure */
+ atomic_t csi_barrier;
+ /** completion to be signaled when transfer is complete. */
+ wait_queue_head_t csi_waitq;
+};
+
+void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
+int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, struct cl_sync_io *anchor,
+ long timeout);
+void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
+
+/** @} cl_sync_io */
+
+/** @} cl_req */
+
+/** \defgroup cl_env cl_env
+ *
+ * lu_env handling for a client.
+ *
+ * lu_env is an environment within which lustre code executes. Its major part
+ * is lu_context---a fast memory allocation mechanism that is used to conserve
+ * precious kernel stack space. Originally lu_env was designed for a server,
+ * where
+ *
+ * - there is a (mostly) fixed number of threads, and
+ *
+ * - call chains have no non-lustre portions inserted between lustre code.
+ *
+ * On a client both these assumtpion fails, because every user thread can
+ * potentially execute lustre code as part of a system call, and lustre calls
+ * into VFS or MM that call back into lustre.
+ *
+ * To deal with that, cl_env wrapper functions implement the following
+ * optimizations:
+ *
+ * - allocation and destruction of environment is amortized by caching no
+ * longer used environments instead of destroying them;
+ *
+ * - there is a notion of "current" environment, attached to the kernel
+ * data structure representing current thread Top-level lustre code
+ * allocates an environment and makes it current, then calls into
+ * non-lustre code, that in turn calls lustre back. Low-level lustre
+ * code thus called can fetch environment created by the top-level code
+ * and reuse it, avoiding additional environment allocation.
+ * Right now, three interfaces can attach the cl_env to running thread:
+ * - cl_env_get
+ * - cl_env_implant
+ * - cl_env_reexit(cl_env_reenter had to be called priorly)
+ *
+ * \see lu_env, lu_context, lu_context_key
+ * @{ */
+
+struct cl_env_nest {
+ int cen_refcheck;
+ void *cen_cookie;
+};
+
+struct lu_env *cl_env_peek (int *refcheck);
+struct lu_env *cl_env_get (int *refcheck);
+struct lu_env *cl_env_alloc (int *refcheck, __u32 tags);
+struct lu_env *cl_env_nested_get (struct cl_env_nest *nest);
+void cl_env_put (struct lu_env *env, int *refcheck);
+void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env);
+void *cl_env_reenter (void);
+void cl_env_reexit (void *cookie);
+void cl_env_implant (struct lu_env *env, int *refcheck);
+void cl_env_unplant (struct lu_env *env, int *refcheck);
+
+/** @} cl_env */
+
+/*
+ * Misc
+ */
+void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr);
+void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb);
+
+struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
+ struct lu_device_type *ldt,
+ struct lu_device *next);
+/** @} clio */
+
+int cl_global_init(void);
+void cl_global_fini(void);
+
+#endif /* _LINUX_CL_OBJECT_H */
diff --git a/drivers/staging/lustre/lustre/include/dt_object.h b/drivers/staging/lustre/lustre/include/dt_object.h
new file mode 100644
index 0000000..e116bb2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/dt_object.h
@@ -0,0 +1,1498 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LUSTRE_DT_OBJECT_H
+#define __LUSTRE_DT_OBJECT_H
+
+/** \defgroup dt dt
+ * Sub-class of lu_object with methods common for "data" objects in OST stack.
+ *
+ * Data objects behave like regular files: you can read/write them, get and
+ * set their attributes. Implementation of dt interface is supposed to
+ * implement some form of garbage collection, normally reference counting
+ * (nlink) based one.
+ *
+ * Examples: osd (lustre/osd) is an implementation of dt interface.
+ * @{
+ */
+
+
+/*
+ * super-class definitions.
+ */
+#include <lu_object.h>
+
+#include <linux/libcfs/libcfs.h>
+
+struct seq_file;
+struct proc_dir_entry;
+struct lustre_cfg;
+
+struct thandle;
+struct dt_device;
+struct dt_object;
+struct dt_index_features;
+struct niobuf_local;
+struct niobuf_remote;
+struct ldlm_enqueue_info;
+
+typedef enum {
+ MNTOPT_USERXATTR = 0x00000001,
+ MNTOPT_ACL = 0x00000002,
+} mntopt_t;
+
+struct dt_device_param {
+ unsigned ddp_max_name_len;
+ unsigned ddp_max_nlink;
+ unsigned ddp_block_shift;
+ mntopt_t ddp_mntopts;
+ unsigned ddp_max_ea_size;
+ void *ddp_mnt; /* XXX: old code can retrieve mnt -bzzz */
+ int ddp_mount_type;
+ unsigned long long ddp_maxbytes;
+ /* percentage of available space to reserve for grant error margin */
+ int ddp_grant_reserved;
+ /* per-inode space consumption */
+ short ddp_inodespace;
+ /* per-fragment grant overhead to be used by client for grant
+ * calculation */
+ int ddp_grant_frag;
+};
+
+/**
+ * Per-transaction commit callback function
+ */
+struct dt_txn_commit_cb;
+typedef void (*dt_cb_t)(struct lu_env *env, struct thandle *th,
+ struct dt_txn_commit_cb *cb, int err);
+/**
+ * Special per-transaction callback for cases when just commit callback
+ * is needed and per-device callback are not convenient to use
+ */
+#define TRANS_COMMIT_CB_MAGIC 0xa0a00a0a
+#define MAX_COMMIT_CB_STR_LEN 32
+
+struct dt_txn_commit_cb {
+ struct list_head dcb_linkage;
+ dt_cb_t dcb_func;
+ __u32 dcb_magic;
+ char dcb_name[MAX_COMMIT_CB_STR_LEN];
+};
+
+/**
+ * Operations on dt device.
+ */
+struct dt_device_operations {
+ /**
+ * Return device-wide statistics.
+ */
+ int (*dt_statfs)(const struct lu_env *env,
+ struct dt_device *dev, struct obd_statfs *osfs);
+ /**
+ * Create transaction, described by \a param.
+ */
+ struct thandle *(*dt_trans_create)(const struct lu_env *env,
+ struct dt_device *dev);
+ /**
+ * Start transaction, described by \a param.
+ */
+ int (*dt_trans_start)(const struct lu_env *env,
+ struct dt_device *dev, struct thandle *th);
+ /**
+ * Finish previously started transaction.
+ */
+ int (*dt_trans_stop)(const struct lu_env *env,
+ struct thandle *th);
+ /**
+ * Add commit callback to the transaction.
+ */
+ int (*dt_trans_cb_add)(struct thandle *th,
+ struct dt_txn_commit_cb *dcb);
+ /**
+ * Return fid of root index object.
+ */
+ int (*dt_root_get)(const struct lu_env *env,
+ struct dt_device *dev, struct lu_fid *f);
+ /**
+ * Return device configuration data.
+ */
+ void (*dt_conf_get)(const struct lu_env *env,
+ const struct dt_device *dev,
+ struct dt_device_param *param);
+ /**
+ * handling device state, mostly for tests
+ */
+ int (*dt_sync)(const struct lu_env *env, struct dt_device *dev);
+ int (*dt_ro)(const struct lu_env *env, struct dt_device *dev);
+ /**
+ * Start a transaction commit asynchronously
+ *
+ * \param env environment
+ * \param dev dt_device to start commit on
+ *
+ * \return 0 success, negative value if error
+ */
+ int (*dt_commit_async)(const struct lu_env *env,
+ struct dt_device *dev);
+ /**
+ * Initialize capability context.
+ */
+ int (*dt_init_capa_ctxt)(const struct lu_env *env,
+ struct dt_device *dev,
+ int mode, unsigned long timeout,
+ __u32 alg, struct lustre_capa_key *keys);
+};
+
+struct dt_index_features {
+ /** required feature flags from enum dt_index_flags */
+ __u32 dif_flags;
+ /** minimal required key size */
+ size_t dif_keysize_min;
+ /** maximal required key size, 0 if no limit */
+ size_t dif_keysize_max;
+ /** minimal required record size */
+ size_t dif_recsize_min;
+ /** maximal required record size, 0 if no limit */
+ size_t dif_recsize_max;
+ /** pointer size for record */
+ size_t dif_ptrsize;
+};
+
+enum dt_index_flags {
+ /** index supports variable sized keys */
+ DT_IND_VARKEY = 1 << 0,
+ /** index supports variable sized records */
+ DT_IND_VARREC = 1 << 1,
+ /** index can be modified */
+ DT_IND_UPDATE = 1 << 2,
+ /** index supports records with non-unique (duplicate) keys */
+ DT_IND_NONUNQ = 1 << 3,
+ /**
+ * index support fixed-size keys sorted with natural numerical way
+ * and is able to return left-side value if no exact value found
+ */
+ DT_IND_RANGE = 1 << 4,
+};
+
+/**
+ * Features, required from index to support file system directories (mapping
+ * names to fids).
+ */
+extern const struct dt_index_features dt_directory_features;
+extern const struct dt_index_features dt_otable_features;
+extern const struct dt_index_features dt_lfsck_features;
+
+/* index features supported by the accounting objects */
+extern const struct dt_index_features dt_acct_features;
+
+/* index features supported by the quota global indexes */
+extern const struct dt_index_features dt_quota_glb_features;
+
+/* index features supported by the quota slave indexes */
+extern const struct dt_index_features dt_quota_slv_features;
+
+/**
+ * This is a general purpose dt allocation hint.
+ * It now contains the parent object.
+ * It can contain any allocation hint in the future.
+ */
+struct dt_allocation_hint {
+ struct dt_object *dah_parent;
+ __u32 dah_mode;
+};
+
+/**
+ * object type specifier.
+ */
+
+enum dt_format_type {
+ DFT_REGULAR,
+ DFT_DIR,
+ /** for mknod */
+ DFT_NODE,
+ /** for special index */
+ DFT_INDEX,
+ /** for symbolic link */
+ DFT_SYM,
+};
+
+/**
+ * object format specifier.
+ */
+struct dt_object_format {
+ /** type for dt object */
+ enum dt_format_type dof_type;
+ union {
+ struct dof_regular {
+ int striped;
+ } dof_reg;
+ struct dof_dir {
+ } dof_dir;
+ struct dof_node {
+ } dof_node;
+ /**
+ * special index need feature as parameter to create
+ * special idx
+ */
+ struct dof_index {
+ const struct dt_index_features *di_feat;
+ } dof_idx;
+ } u;
+};
+
+enum dt_format_type dt_mode_to_dft(__u32 mode);
+
+typedef __u64 dt_obj_version_t;
+
+/**
+ * Per-dt-object operations.
+ */
+struct dt_object_operations {
+ void (*do_read_lock)(const struct lu_env *env,
+ struct dt_object *dt, unsigned role);
+ void (*do_write_lock)(const struct lu_env *env,
+ struct dt_object *dt, unsigned role);
+ void (*do_read_unlock)(const struct lu_env *env,
+ struct dt_object *dt);
+ void (*do_write_unlock)(const struct lu_env *env,
+ struct dt_object *dt);
+ int (*do_write_locked)(const struct lu_env *env,
+ struct dt_object *dt);
+ /**
+ * Note: following ->do_{x,}attr_{set,get}() operations are very
+ * similar to ->moo_{x,}attr_{set,get}() operations in struct
+ * md_object_operations (see md_object.h). These operations are not in
+ * lu_object_operations, because ->do_{x,}attr_set() versions take
+ * transaction handle as an argument (this transaction is started by
+ * caller). We might factor ->do_{x,}attr_get() into
+ * lu_object_operations, but that would break existing symmetry.
+ */
+
+ /**
+ * Return standard attributes.
+ *
+ * precondition: lu_object_exists(&dt->do_lu);
+ */
+ int (*do_attr_get)(const struct lu_env *env,
+ struct dt_object *dt, struct lu_attr *attr,
+ struct lustre_capa *capa);
+ /**
+ * Set standard attributes.
+ *
+ * precondition: dt_object_exists(dt);
+ */
+ int (*do_declare_attr_set)(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct lu_attr *attr,
+ struct thandle *handle);
+ int (*do_attr_set)(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct lu_attr *attr,
+ struct thandle *handle,
+ struct lustre_capa *capa);
+ /**
+ * Return a value of an extended attribute.
+ *
+ * precondition: dt_object_exists(dt);
+ */
+ int (*do_xattr_get)(const struct lu_env *env, struct dt_object *dt,
+ struct lu_buf *buf, const char *name,
+ struct lustre_capa *capa);
+ /**
+ * Set value of an extended attribute.
+ *
+ * \a fl - flags from enum lu_xattr_flags
+ *
+ * precondition: dt_object_exists(dt);
+ */
+ int (*do_declare_xattr_set)(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct lu_buf *buf,
+ const char *name, int fl,
+ struct thandle *handle);
+ int (*do_xattr_set)(const struct lu_env *env,
+ struct dt_object *dt, const struct lu_buf *buf,
+ const char *name, int fl, struct thandle *handle,
+ struct lustre_capa *capa);
+ /**
+ * Delete existing extended attribute.
+ *
+ * precondition: dt_object_exists(dt);
+ */
+ int (*do_declare_xattr_del)(const struct lu_env *env,
+ struct dt_object *dt,
+ const char *name, struct thandle *handle);
+ int (*do_xattr_del)(const struct lu_env *env,
+ struct dt_object *dt,
+ const char *name, struct thandle *handle,
+ struct lustre_capa *capa);
+ /**
+ * Place list of existing extended attributes into \a buf (which has
+ * length len).
+ *
+ * precondition: dt_object_exists(dt);
+ */
+ int (*do_xattr_list)(const struct lu_env *env,
+ struct dt_object *dt, struct lu_buf *buf,
+ struct lustre_capa *capa);
+ /**
+ * Init allocation hint using parent object and child mode.
+ * (1) The \a parent might be NULL if this is a partial creation for
+ * remote object.
+ * (2) The type of child is in \a child_mode.
+ * (3) The result hint is stored in \a ah;
+ */
+ void (*do_ah_init)(const struct lu_env *env,
+ struct dt_allocation_hint *ah,
+ struct dt_object *parent,
+ struct dt_object *child,
+ umode_t child_mode);
+ /**
+ * Create new object on this device.
+ *
+ * precondition: !dt_object_exists(dt);
+ * postcondition: ergo(result == 0, dt_object_exists(dt));
+ */
+ int (*do_declare_create)(const struct lu_env *env,
+ struct dt_object *dt,
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *th);
+ int (*do_create)(const struct lu_env *env, struct dt_object *dt,
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *th);
+
+ /**
+ Destroy object on this device
+ * precondition: !dt_object_exists(dt);
+ * postcondition: ergo(result == 0, dt_object_exists(dt));
+ */
+ int (*do_declare_destroy)(const struct lu_env *env,
+ struct dt_object *dt,
+ struct thandle *th);
+ int (*do_destroy)(const struct lu_env *env, struct dt_object *dt,
+ struct thandle *th);
+
+ /**
+ * Announce that this object is going to be used as an index. This
+ * operation check that object supports indexing operations and
+ * installs appropriate dt_index_operations vector on success.
+ *
+ * Also probes for features. Operation is successful if all required
+ * features are supported.
+ */
+ int (*do_index_try)(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_index_features *feat);
+ /**
+ * Add nlink of the object
+ * precondition: dt_object_exists(dt);
+ */
+ int (*do_declare_ref_add)(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th);
+ int (*do_ref_add)(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th);
+ /**
+ * Del nlink of the object
+ * precondition: dt_object_exists(dt);
+ */
+ int (*do_declare_ref_del)(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th);
+ int (*do_ref_del)(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th);
+
+ struct obd_capa *(*do_capa_get)(const struct lu_env *env,
+ struct dt_object *dt,
+ struct lustre_capa *old,
+ __u64 opc);
+ int (*do_object_sync)(const struct lu_env *, struct dt_object *);
+ /**
+ * Get object info of next level. Currently, only get inode from osd.
+ * This is only used by quota b=16542
+ * precondition: dt_object_exists(dt);
+ */
+ int (*do_data_get)(const struct lu_env *env, struct dt_object *dt,
+ void **data);
+
+ /**
+ * Lock object.
+ */
+ int (*do_object_lock)(const struct lu_env *env, struct dt_object *dt,
+ struct lustre_handle *lh,
+ struct ldlm_enqueue_info *einfo,
+ void *policy);
+};
+
+/**
+ * Per-dt-object operations on "file body".
+ */
+struct dt_body_operations {
+ /**
+ * precondition: dt_object_exists(dt);
+ */
+ ssize_t (*dbo_read)(const struct lu_env *env, struct dt_object *dt,
+ struct lu_buf *buf, loff_t *pos,
+ struct lustre_capa *capa);
+ /**
+ * precondition: dt_object_exists(dt);
+ */
+ ssize_t (*dbo_declare_write)(const struct lu_env *env,
+ struct dt_object *dt,
+ const loff_t size, loff_t pos,
+ struct thandle *handle);
+ ssize_t (*dbo_write)(const struct lu_env *env, struct dt_object *dt,
+ const struct lu_buf *buf, loff_t *pos,
+ struct thandle *handle, struct lustre_capa *capa,
+ int ignore_quota);
+ /*
+ * methods for zero-copy IO
+ */
+
+ /*
+ * precondition: dt_object_exists(dt);
+ * returns:
+ * < 0 - error code
+ * = 0 - illegal
+ * > 0 - number of local buffers prepared
+ */
+ int (*dbo_bufs_get)(const struct lu_env *env, struct dt_object *dt,
+ loff_t pos, ssize_t len, struct niobuf_local *lb,
+ int rw, struct lustre_capa *capa);
+ /*
+ * precondition: dt_object_exists(dt);
+ */
+ int (*dbo_bufs_put)(const struct lu_env *env, struct dt_object *dt,
+ struct niobuf_local *lb, int nr);
+ /*
+ * precondition: dt_object_exists(dt);
+ */
+ int (*dbo_write_prep)(const struct lu_env *env, struct dt_object *dt,
+ struct niobuf_local *lb, int nr);
+ /*
+ * precondition: dt_object_exists(dt);
+ */
+ int (*dbo_declare_write_commit)(const struct lu_env *env,
+ struct dt_object *dt,
+ struct niobuf_local *,
+ int, struct thandle *);
+ /*
+ * precondition: dt_object_exists(dt);
+ */
+ int (*dbo_write_commit)(const struct lu_env *env, struct dt_object *dt,
+ struct niobuf_local *, int, struct thandle *);
+ /*
+ * precondition: dt_object_exists(dt);
+ */
+ int (*dbo_read_prep)(const struct lu_env *env, struct dt_object *dt,
+ struct niobuf_local *lnb, int nr);
+ int (*dbo_fiemap_get)(const struct lu_env *env, struct dt_object *dt,
+ struct ll_user_fiemap *fm);
+ /**
+ * Punch object's content
+ * precondition: regular object, not index
+ */
+ int (*dbo_declare_punch)(const struct lu_env *, struct dt_object *,
+ __u64, __u64, struct thandle *th);
+ int (*dbo_punch)(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end, struct thandle *th,
+ struct lustre_capa *capa);
+};
+
+/**
+ * Incomplete type of index record.
+ */
+struct dt_rec;
+
+/**
+ * Incomplete type of index key.
+ */
+struct dt_key;
+
+/**
+ * Incomplete type of dt iterator.
+ */
+struct dt_it;
+
+/**
+ * Per-dt-object operations on object as index.
+ */
+struct dt_index_operations {
+ /**
+ * precondition: dt_object_exists(dt);
+ */
+ int (*dio_lookup)(const struct lu_env *env, struct dt_object *dt,
+ struct dt_rec *rec, const struct dt_key *key,
+ struct lustre_capa *capa);
+ /**
+ * precondition: dt_object_exists(dt);
+ */
+ int (*dio_declare_insert)(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_rec *rec,
+ const struct dt_key *key,
+ struct thandle *handle);
+ int (*dio_insert)(const struct lu_env *env, struct dt_object *dt,
+ const struct dt_rec *rec, const struct dt_key *key,
+ struct thandle *handle, struct lustre_capa *capa,
+ int ignore_quota);
+ /**
+ * precondition: dt_object_exists(dt);
+ */
+ int (*dio_declare_delete)(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_key *key,
+ struct thandle *handle);
+ int (*dio_delete)(const struct lu_env *env, struct dt_object *dt,
+ const struct dt_key *key, struct thandle *handle,
+ struct lustre_capa *capa);
+ /**
+ * Iterator interface
+ */
+ struct dt_it_ops {
+ /**
+ * Allocate and initialize new iterator.
+ *
+ * precondition: dt_object_exists(dt);
+ */
+ struct dt_it *(*init)(const struct lu_env *env,
+ struct dt_object *dt,
+ __u32 attr,
+ struct lustre_capa *capa);
+ void (*fini)(const struct lu_env *env,
+ struct dt_it *di);
+ int (*get)(const struct lu_env *env,
+ struct dt_it *di,
+ const struct dt_key *key);
+ void (*put)(const struct lu_env *env,
+ struct dt_it *di);
+ int (*next)(const struct lu_env *env,
+ struct dt_it *di);
+ struct dt_key *(*key)(const struct lu_env *env,
+ const struct dt_it *di);
+ int (*key_size)(const struct lu_env *env,
+ const struct dt_it *di);
+ int (*rec)(const struct lu_env *env,
+ const struct dt_it *di,
+ struct dt_rec *rec,
+ __u32 attr);
+ __u64 (*store)(const struct lu_env *env,
+ const struct dt_it *di);
+ int (*load)(const struct lu_env *env,
+ const struct dt_it *di, __u64 hash);
+ int (*key_rec)(const struct lu_env *env,
+ const struct dt_it *di, void* key_rec);
+ } dio_it;
+};
+
+enum dt_otable_it_valid {
+ DOIV_ERROR_HANDLE = 0x0001,
+};
+
+enum dt_otable_it_flags {
+ /* Exit when fail. */
+ DOIF_FAILOUT = 0x0001,
+
+ /* Reset iteration position to the device beginning. */
+ DOIF_RESET = 0x0002,
+
+ /* There is up layer component uses the iteration. */
+ DOIF_OUTUSED = 0x0004,
+};
+
+/* otable based iteration needs to use the common DT interation APIs.
+ * To initialize the iteration, it needs call dio_it::init() firstly.
+ * Here is how the otable based iteration should prepare arguments to
+ * call dt_it_ops::init().
+ *
+ * For otable based iteration, the 32-bits 'attr' for dt_it_ops::init()
+ * is composed of two parts:
+ * low 16-bits is for valid bits, high 16-bits is for flags bits. */
+#define DT_OTABLE_IT_FLAGS_SHIFT 16
+#define DT_OTABLE_IT_FLAGS_MASK 0xffff0000
+
+struct dt_device {
+ struct lu_device dd_lu_dev;
+ const struct dt_device_operations *dd_ops;
+
+ /**
+ * List of dt_txn_callback (see below). This is not protected in any
+ * way, because callbacks are supposed to be added/deleted only during
+ * single-threaded start-up shut-down procedures.
+ */
+ struct list_head dd_txn_callbacks;
+};
+
+int dt_device_init(struct dt_device *dev, struct lu_device_type *t);
+void dt_device_fini(struct dt_device *dev);
+
+static inline int lu_device_is_dt(const struct lu_device *d)
+{
+ return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_DT);
+}
+
+static inline struct dt_device * lu2dt_dev(struct lu_device *l)
+{
+ LASSERT(lu_device_is_dt(l));
+ return container_of0(l, struct dt_device, dd_lu_dev);
+}
+
+struct dt_object {
+ struct lu_object do_lu;
+ const struct dt_object_operations *do_ops;
+ const struct dt_body_operations *do_body_ops;
+ const struct dt_index_operations *do_index_ops;
+};
+
+/*
+ * In-core representation of per-device local object OID storage
+ */
+struct local_oid_storage {
+ /* all initialized llog systems on this node linked by this */
+ struct list_head los_list;
+
+ /* how many handle's reference this los has */
+ atomic_t los_refcount;
+ struct dt_device *los_dev;
+ struct dt_object *los_obj;
+
+ /* data used to generate new fids */
+ struct mutex los_id_lock;
+ __u64 los_seq;
+ __u32 los_last_oid;
+};
+
+static inline struct dt_object *lu2dt(struct lu_object *l)
+{
+ LASSERT(l == NULL || IS_ERR(l) || lu_device_is_dt(l->lo_dev));
+ return container_of0(l, struct dt_object, do_lu);
+}
+
+int dt_object_init(struct dt_object *obj,
+ struct lu_object_header *h, struct lu_device *d);
+
+void dt_object_fini(struct dt_object *obj);
+
+static inline int dt_object_exists(const struct dt_object *dt)
+{
+ return lu_object_exists(&dt->do_lu);
+}
+
+static inline int dt_object_remote(const struct dt_object *dt)
+{
+ return lu_object_remote(&dt->do_lu);
+}
+
+static inline struct dt_object *lu2dt_obj(struct lu_object *o)
+{
+ LASSERT(ergo(o != NULL, lu_device_is_dt(o->lo_dev)));
+ return container_of0(o, struct dt_object, do_lu);
+}
+
+/**
+ * This is the general purpose transaction handle.
+ * 1. Transaction Life Cycle
+ * This transaction handle is allocated upon starting a new transaction,
+ * and deallocated after this transaction is committed.
+ * 2. Transaction Nesting
+ * We do _NOT_ support nested transaction. So, every thread should only
+ * have one active transaction, and a transaction only belongs to one
+ * thread. Due to this, transaction handle need no reference count.
+ * 3. Transaction & dt_object locking
+ * dt_object locks should be taken inside transaction.
+ * 4. Transaction & RPC
+ * No RPC request should be issued inside transaction.
+ */
+struct thandle {
+ /** the dt device on which the transactions are executed */
+ struct dt_device *th_dev;
+
+ /** context for this transaction, tag is LCT_TX_HANDLE */
+ struct lu_context th_ctx;
+
+ /** additional tags (layers can add in declare) */
+ __u32 th_tags;
+
+ /** the last operation result in this transaction.
+ * this value is used in recovery */
+ __s32 th_result;
+
+ /** whether we need sync commit */
+ unsigned int th_sync:1;
+
+ /* local transation, no need to inform other layers */
+ unsigned int th_local:1;
+
+ /* In DNE, one transaction can be disassemblied into
+ * updates on several different MDTs, and these updates
+ * will be attached to th_remote_update_list per target.
+ * Only single thread will access the list, no need lock
+ */
+ struct list_head th_remote_update_list;
+ struct update_request *th_current_request;
+};
+
+/**
+ * Transaction call-backs.
+ *
+ * These are invoked by osd (or underlying transaction engine) when
+ * transaction changes state.
+ *
+ * Call-backs are used by upper layers to modify transaction parameters and to
+ * perform some actions on for each transaction state transition. Typical
+ * example is mdt registering call-back to write into last-received file
+ * before each transaction commit.
+ */
+struct dt_txn_callback {
+ int (*dtc_txn_start)(const struct lu_env *env,
+ struct thandle *txn, void *cookie);
+ int (*dtc_txn_stop)(const struct lu_env *env,
+ struct thandle *txn, void *cookie);
+ void (*dtc_txn_commit)(struct thandle *txn, void *cookie);
+ void *dtc_cookie;
+ __u32 dtc_tag;
+ struct list_head dtc_linkage;
+};
+
+void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb);
+void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb);
+
+int dt_txn_hook_start(const struct lu_env *env,
+ struct dt_device *dev, struct thandle *txn);
+int dt_txn_hook_stop(const struct lu_env *env, struct thandle *txn);
+void dt_txn_hook_commit(struct thandle *txn);
+
+int dt_try_as_dir(const struct lu_env *env, struct dt_object *obj);
+
+/**
+ * Callback function used for parsing path.
+ * \see llo_store_resolve
+ */
+typedef int (*dt_entry_func_t)(const struct lu_env *env,
+ const char *name,
+ void *pvt);
+
+#define DT_MAX_PATH 1024
+
+int dt_path_parser(const struct lu_env *env,
+ char *local, dt_entry_func_t entry_func,
+ void *data);
+
+struct dt_object *
+dt_store_resolve(const struct lu_env *env, struct dt_device *dt,
+ const char *path, struct lu_fid *fid);
+
+struct dt_object *dt_store_open(const struct lu_env *env,
+ struct dt_device *dt,
+ const char *dirname,
+ const char *filename,
+ struct lu_fid *fid);
+
+struct dt_object *dt_find_or_create(const struct lu_env *env,
+ struct dt_device *dt,
+ const struct lu_fid *fid,
+ struct dt_object_format *dof,
+ struct lu_attr *attr);
+
+struct dt_object *dt_locate_at(const struct lu_env *env,
+ struct dt_device *dev,
+ const struct lu_fid *fid,
+ struct lu_device *top_dev);
+static inline struct dt_object *
+dt_locate(const struct lu_env *env, struct dt_device *dev,
+ const struct lu_fid *fid)
+{
+ return dt_locate_at(env, dev, fid, dev->dd_lu_dev.ld_site->ls_top_dev);
+}
+
+
+int local_oid_storage_init(const struct lu_env *env, struct dt_device *dev,
+ const struct lu_fid *first_fid,
+ struct local_oid_storage **los);
+void local_oid_storage_fini(const struct lu_env *env,
+ struct local_oid_storage *los);
+int local_object_fid_generate(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct lu_fid *fid);
+int local_object_declare_create(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *o,
+ struct lu_attr *attr,
+ struct dt_object_format *dof,
+ struct thandle *th);
+int local_object_create(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *o,
+ struct lu_attr *attr, struct dt_object_format *dof,
+ struct thandle *th);
+struct dt_object *local_file_find_or_create(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *parent,
+ const char *name, __u32 mode);
+struct dt_object *local_file_find_or_create_with_fid(const struct lu_env *env,
+ struct dt_device *dt,
+ const struct lu_fid *fid,
+ struct dt_object *parent,
+ const char *name,
+ __u32 mode);
+struct dt_object *
+local_index_find_or_create(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *parent,
+ const char *name, __u32 mode,
+ const struct dt_index_features *ft);
+struct dt_object *
+local_index_find_or_create_with_fid(const struct lu_env *env,
+ struct dt_device *dt,
+ const struct lu_fid *fid,
+ struct dt_object *parent,
+ const char *name, __u32 mode,
+ const struct dt_index_features *ft);
+int local_object_unlink(const struct lu_env *env, struct dt_device *dt,
+ struct dt_object *parent, const char *name);
+
+static inline int dt_object_lock(const struct lu_env *env,
+ struct dt_object *o, struct lustre_handle *lh,
+ struct ldlm_enqueue_info *einfo,
+ void *policy)
+{
+ LASSERT(o);
+ LASSERT(o->do_ops);
+ LASSERT(o->do_ops->do_object_lock);
+ return o->do_ops->do_object_lock(env, o, lh, einfo, policy);
+}
+
+int dt_lookup_dir(const struct lu_env *env, struct dt_object *dir,
+ const char *name, struct lu_fid *fid);
+
+static inline int dt_object_sync(const struct lu_env *env,
+ struct dt_object *o)
+{
+ LASSERT(o);
+ LASSERT(o->do_ops);
+ LASSERT(o->do_ops->do_object_sync);
+ return o->do_ops->do_object_sync(env, o);
+}
+
+int dt_declare_version_set(const struct lu_env *env, struct dt_object *o,
+ struct thandle *th);
+void dt_version_set(const struct lu_env *env, struct dt_object *o,
+ dt_obj_version_t version, struct thandle *th);
+dt_obj_version_t dt_version_get(const struct lu_env *env, struct dt_object *o);
+
+
+int dt_read(const struct lu_env *env, struct dt_object *dt,
+ struct lu_buf *buf, loff_t *pos);
+int dt_record_read(const struct lu_env *env, struct dt_object *dt,
+ struct lu_buf *buf, loff_t *pos);
+int dt_record_write(const struct lu_env *env, struct dt_object *dt,
+ const struct lu_buf *buf, loff_t *pos, struct thandle *th);
+typedef int (*dt_index_page_build_t)(const struct lu_env *env,
+ union lu_page *lp, int nob,
+ const struct dt_it_ops *iops,
+ struct dt_it *it, __u32 attr, void *arg);
+int dt_index_walk(const struct lu_env *env, struct dt_object *obj,
+ const struct lu_rdpg *rdpg, dt_index_page_build_t filler,
+ void *arg);
+int dt_index_read(const struct lu_env *env, struct dt_device *dev,
+ struct idx_info *ii, const struct lu_rdpg *rdpg);
+
+static inline struct thandle *dt_trans_create(const struct lu_env *env,
+ struct dt_device *d)
+{
+ LASSERT(d->dd_ops->dt_trans_create);
+ return d->dd_ops->dt_trans_create(env, d);
+}
+
+static inline int dt_trans_start(const struct lu_env *env,
+ struct dt_device *d, struct thandle *th)
+{
+ LASSERT(d->dd_ops->dt_trans_start);
+ return d->dd_ops->dt_trans_start(env, d, th);
+}
+
+/* for this transaction hooks shouldn't be called */
+static inline int dt_trans_start_local(const struct lu_env *env,
+ struct dt_device *d, struct thandle *th)
+{
+ LASSERT(d->dd_ops->dt_trans_start);
+ th->th_local = 1;
+ return d->dd_ops->dt_trans_start(env, d, th);
+}
+
+static inline int dt_trans_stop(const struct lu_env *env,
+ struct dt_device *d, struct thandle *th)
+{
+ LASSERT(d->dd_ops->dt_trans_stop);
+ return d->dd_ops->dt_trans_stop(env, th);
+}
+
+static inline int dt_trans_cb_add(struct thandle *th,
+ struct dt_txn_commit_cb *dcb)
+{
+ LASSERT(th->th_dev->dd_ops->dt_trans_cb_add);
+ dcb->dcb_magic = TRANS_COMMIT_CB_MAGIC;
+ return th->th_dev->dd_ops->dt_trans_cb_add(th, dcb);
+}
+/** @} dt */
+
+
+static inline int dt_declare_record_write(const struct lu_env *env,
+ struct dt_object *dt,
+ int size, loff_t pos,
+ struct thandle *th)
+{
+ int rc;
+
+ LASSERTF(dt != NULL, "dt is NULL when we want to write record\n");
+ LASSERT(th != NULL);
+ LASSERT(dt->do_body_ops);
+ LASSERT(dt->do_body_ops->dbo_declare_write);
+ rc = dt->do_body_ops->dbo_declare_write(env, dt, size, pos, th);
+ return rc;
+}
+
+static inline int dt_declare_create(const struct lu_env *env,
+ struct dt_object *dt,
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_declare_create);
+ return dt->do_ops->do_declare_create(env, dt, attr, hint, dof, th);
+}
+
+static inline int dt_create(const struct lu_env *env,
+ struct dt_object *dt,
+ struct lu_attr *attr,
+ struct dt_allocation_hint *hint,
+ struct dt_object_format *dof,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_create);
+ return dt->do_ops->do_create(env, dt, attr, hint, dof, th);
+}
+
+static inline int dt_declare_destroy(const struct lu_env *env,
+ struct dt_object *dt,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_declare_destroy);
+ return dt->do_ops->do_declare_destroy(env, dt, th);
+}
+
+static inline int dt_destroy(const struct lu_env *env,
+ struct dt_object *dt,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_destroy);
+ return dt->do_ops->do_destroy(env, dt, th);
+}
+
+static inline void dt_read_lock(const struct lu_env *env,
+ struct dt_object *dt,
+ unsigned role)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_read_lock);
+ dt->do_ops->do_read_lock(env, dt, role);
+}
+
+static inline void dt_write_lock(const struct lu_env *env,
+ struct dt_object *dt,
+ unsigned role)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_write_lock);
+ dt->do_ops->do_write_lock(env, dt, role);
+}
+
+static inline void dt_read_unlock(const struct lu_env *env,
+ struct dt_object *dt)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_read_unlock);
+ dt->do_ops->do_read_unlock(env, dt);
+}
+
+static inline void dt_write_unlock(const struct lu_env *env,
+ struct dt_object *dt)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_write_unlock);
+ dt->do_ops->do_write_unlock(env, dt);
+}
+
+static inline int dt_write_locked(const struct lu_env *env,
+ struct dt_object *dt)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_write_locked);
+ return dt->do_ops->do_write_locked(env, dt);
+}
+
+static inline int dt_attr_get(const struct lu_env *env, struct dt_object *dt,
+ struct lu_attr *la, void *arg)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_attr_get);
+ return dt->do_ops->do_attr_get(env, dt, la, arg);
+}
+
+static inline int dt_declare_attr_set(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct lu_attr *la,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_declare_attr_set);
+ return dt->do_ops->do_declare_attr_set(env, dt, la, th);
+}
+
+static inline int dt_attr_set(const struct lu_env *env, struct dt_object *dt,
+ const struct lu_attr *la, struct thandle *th,
+ struct lustre_capa *capa)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_attr_set);
+ return dt->do_ops->do_attr_set(env, dt, la, th, capa);
+}
+
+static inline int dt_declare_ref_add(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_declare_ref_add);
+ return dt->do_ops->do_declare_ref_add(env, dt, th);
+}
+
+static inline int dt_ref_add(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_ref_add);
+ return dt->do_ops->do_ref_add(env, dt, th);
+}
+
+static inline int dt_declare_ref_del(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_declare_ref_del);
+ return dt->do_ops->do_declare_ref_del(env, dt, th);
+}
+
+static inline int dt_ref_del(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_ref_del);
+ return dt->do_ops->do_ref_del(env, dt, th);
+}
+
+static inline struct obd_capa *dt_capa_get(const struct lu_env *env,
+ struct dt_object *dt,
+ struct lustre_capa *old, __u64 opc)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_ref_del);
+ return dt->do_ops->do_capa_get(env, dt, old, opc);
+}
+
+static inline int dt_bufs_get(const struct lu_env *env, struct dt_object *d,
+ struct niobuf_remote *rnb,
+ struct niobuf_local *lnb, int rw,
+ struct lustre_capa *capa)
+{
+ LASSERT(d);
+ LASSERT(d->do_body_ops);
+ LASSERT(d->do_body_ops->dbo_bufs_get);
+ return d->do_body_ops->dbo_bufs_get(env, d, rnb->offset,
+ rnb->len, lnb, rw, capa);
+}
+
+static inline int dt_bufs_put(const struct lu_env *env, struct dt_object *d,
+ struct niobuf_local *lnb, int n)
+{
+ LASSERT(d);
+ LASSERT(d->do_body_ops);
+ LASSERT(d->do_body_ops->dbo_bufs_put);
+ return d->do_body_ops->dbo_bufs_put(env, d, lnb, n);
+}
+
+static inline int dt_write_prep(const struct lu_env *env, struct dt_object *d,
+ struct niobuf_local *lnb, int n)
+{
+ LASSERT(d);
+ LASSERT(d->do_body_ops);
+ LASSERT(d->do_body_ops->dbo_write_prep);
+ return d->do_body_ops->dbo_write_prep(env, d, lnb, n);
+}
+
+static inline int dt_declare_write_commit(const struct lu_env *env,
+ struct dt_object *d,
+ struct niobuf_local *lnb,
+ int n, struct thandle *th)
+{
+ LASSERTF(d != NULL, "dt is NULL when we want to declare write\n");
+ LASSERT(th != NULL);
+ return d->do_body_ops->dbo_declare_write_commit(env, d, lnb, n, th);
+}
+
+
+static inline int dt_write_commit(const struct lu_env *env,
+ struct dt_object *d, struct niobuf_local *lnb,
+ int n, struct thandle *th)
+{
+ LASSERT(d);
+ LASSERT(d->do_body_ops);
+ LASSERT(d->do_body_ops->dbo_write_commit);
+ return d->do_body_ops->dbo_write_commit(env, d, lnb, n, th);
+}
+
+static inline int dt_read_prep(const struct lu_env *env, struct dt_object *d,
+ struct niobuf_local *lnb, int n)
+{
+ LASSERT(d);
+ LASSERT(d->do_body_ops);
+ LASSERT(d->do_body_ops->dbo_read_prep);
+ return d->do_body_ops->dbo_read_prep(env, d, lnb, n);
+}
+
+static inline int dt_declare_punch(const struct lu_env *env,
+ struct dt_object *dt, __u64 start,
+ __u64 end, struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_body_ops);
+ LASSERT(dt->do_body_ops->dbo_declare_punch);
+ return dt->do_body_ops->dbo_declare_punch(env, dt, start, end, th);
+}
+
+static inline int dt_punch(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end, struct thandle *th,
+ struct lustre_capa *capa)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_body_ops);
+ LASSERT(dt->do_body_ops->dbo_punch);
+ return dt->do_body_ops->dbo_punch(env, dt, start, end, th, capa);
+}
+
+static inline int dt_fiemap_get(const struct lu_env *env, struct dt_object *d,
+ struct ll_user_fiemap *fm)
+{
+ LASSERT(d);
+ if (d->do_body_ops == NULL)
+ return -EPROTO;
+ if (d->do_body_ops->dbo_fiemap_get == NULL)
+ return -EOPNOTSUPP;
+ return d->do_body_ops->dbo_fiemap_get(env, d, fm);
+}
+
+static inline int dt_statfs(const struct lu_env *env, struct dt_device *dev,
+ struct obd_statfs *osfs)
+{
+ LASSERT(dev);
+ LASSERT(dev->dd_ops);
+ LASSERT(dev->dd_ops->dt_statfs);
+ return dev->dd_ops->dt_statfs(env, dev, osfs);
+}
+
+static inline int dt_root_get(const struct lu_env *env, struct dt_device *dev,
+ struct lu_fid *f)
+{
+ LASSERT(dev);
+ LASSERT(dev->dd_ops);
+ LASSERT(dev->dd_ops->dt_root_get);
+ return dev->dd_ops->dt_root_get(env, dev, f);
+}
+
+static inline void dt_conf_get(const struct lu_env *env,
+ const struct dt_device *dev,
+ struct dt_device_param *param)
+{
+ LASSERT(dev);
+ LASSERT(dev->dd_ops);
+ LASSERT(dev->dd_ops->dt_conf_get);
+ return dev->dd_ops->dt_conf_get(env, dev, param);
+}
+
+static inline int dt_sync(const struct lu_env *env, struct dt_device *dev)
+{
+ LASSERT(dev);
+ LASSERT(dev->dd_ops);
+ LASSERT(dev->dd_ops->dt_sync);
+ return dev->dd_ops->dt_sync(env, dev);
+}
+
+static inline int dt_ro(const struct lu_env *env, struct dt_device *dev)
+{
+ LASSERT(dev);
+ LASSERT(dev->dd_ops);
+ LASSERT(dev->dd_ops->dt_ro);
+ return dev->dd_ops->dt_ro(env, dev);
+}
+
+static inline int dt_declare_insert(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_rec *rec,
+ const struct dt_key *key,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_index_ops);
+ LASSERT(dt->do_index_ops->dio_declare_insert);
+ return dt->do_index_ops->dio_declare_insert(env, dt, rec, key, th);
+}
+
+static inline int dt_insert(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_rec *rec,
+ const struct dt_key *key,
+ struct thandle *th,
+ struct lustre_capa *capa,
+ int noquota)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_index_ops);
+ LASSERT(dt->do_index_ops->dio_insert);
+ return dt->do_index_ops->dio_insert(env, dt, rec, key, th,
+ capa, noquota);
+}
+
+static inline int dt_declare_xattr_del(const struct lu_env *env,
+ struct dt_object *dt,
+ const char *name,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_declare_xattr_del);
+ return dt->do_ops->do_declare_xattr_del(env, dt, name, th);
+}
+
+static inline int dt_xattr_del(const struct lu_env *env,
+ struct dt_object *dt, const char *name,
+ struct thandle *th,
+ struct lustre_capa *capa)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_xattr_del);
+ return dt->do_ops->do_xattr_del(env, dt, name, th, capa);
+}
+
+static inline int dt_declare_xattr_set(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct lu_buf *buf,
+ const char *name, int fl,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_declare_xattr_set);
+ return dt->do_ops->do_declare_xattr_set(env, dt, buf, name, fl, th);
+}
+
+static inline int dt_xattr_set(const struct lu_env *env,
+ struct dt_object *dt, const struct lu_buf *buf,
+ const char *name, int fl, struct thandle *th,
+ struct lustre_capa *capa)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_xattr_set);
+ return dt->do_ops->do_xattr_set(env, dt, buf, name, fl, th, capa);
+}
+
+static inline int dt_xattr_get(const struct lu_env *env,
+ struct dt_object *dt, struct lu_buf *buf,
+ const char *name, struct lustre_capa *capa)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_xattr_get);
+ return dt->do_ops->do_xattr_get(env, dt, buf, name, capa);
+}
+
+static inline int dt_xattr_list(const struct lu_env *env,
+ struct dt_object *dt, struct lu_buf *buf,
+ struct lustre_capa *capa)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_xattr_list);
+ return dt->do_ops->do_xattr_list(env, dt, buf, capa);
+}
+
+static inline int dt_declare_delete(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_key *key,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_index_ops);
+ LASSERT(dt->do_index_ops->dio_declare_delete);
+ return dt->do_index_ops->dio_declare_delete(env, dt, key, th);
+}
+
+static inline int dt_delete(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct dt_key *key,
+ struct thandle *th,
+ struct lustre_capa *capa)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_index_ops);
+ LASSERT(dt->do_index_ops->dio_delete);
+ return dt->do_index_ops->dio_delete(env, dt, key, th, capa);
+}
+
+static inline int dt_commit_async(const struct lu_env *env,
+ struct dt_device *dev)
+{
+ LASSERT(dev);
+ LASSERT(dev->dd_ops);
+ LASSERT(dev->dd_ops->dt_commit_async);
+ return dev->dd_ops->dt_commit_async(env, dev);
+}
+
+static inline int dt_init_capa_ctxt(const struct lu_env *env,
+ struct dt_device *dev,
+ int mode, unsigned long timeout,
+ __u32 alg, struct lustre_capa_key *keys)
+{
+ LASSERT(dev);
+ LASSERT(dev->dd_ops);
+ LASSERT(dev->dd_ops->dt_init_capa_ctxt);
+ return dev->dd_ops->dt_init_capa_ctxt(env, dev, mode,
+ timeout, alg, keys);
+}
+
+static inline int dt_lookup(const struct lu_env *env,
+ struct dt_object *dt,
+ struct dt_rec *rec,
+ const struct dt_key *key,
+ struct lustre_capa *capa)
+{
+ int ret;
+
+ LASSERT(dt);
+ LASSERT(dt->do_index_ops);
+ LASSERT(dt->do_index_ops->dio_lookup);
+
+ ret = dt->do_index_ops->dio_lookup(env, dt, rec, key, capa);
+ if (ret > 0)
+ ret = 0;
+ else if (ret == 0)
+ ret = -ENOENT;
+ return ret;
+}
+
+#define LU221_BAD_TIME (0x80000000U + 24 * 3600)
+
+struct dt_find_hint {
+ struct lu_fid *dfh_fid;
+ struct dt_device *dfh_dt;
+ struct dt_object *dfh_o;
+};
+
+struct dt_thread_info {
+ char dti_buf[DT_MAX_PATH];
+ struct dt_find_hint dti_dfh;
+ struct lu_attr dti_attr;
+ struct lu_fid dti_fid;
+ struct dt_object_format dti_dof;
+ struct lustre_mdt_attrs dti_lma;
+ struct lu_buf dti_lb;
+ loff_t dti_off;
+};
+
+extern struct lu_context_key dt_key;
+
+static inline struct dt_thread_info *dt_info(const struct lu_env *env)
+{
+ struct dt_thread_info *dti;
+
+ dti = lu_context_key_get(&env->le_ctx, &dt_key);
+ LASSERT(dti);
+ return dti;
+}
+
+int dt_global_init(void);
+void dt_global_fini(void);
+
+# ifdef LPROCFS
+int lprocfs_dt_rd_blksize(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+int lprocfs_dt_rd_kbytestotal(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+int lprocfs_dt_rd_kbytesfree(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+int lprocfs_dt_rd_kbytesavail(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+int lprocfs_dt_rd_filestotal(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+int lprocfs_dt_rd_filesfree(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+# endif /* LPROCFS */
+
+#endif /* __LUSTRE_DT_OBJECT_H */
diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h
new file mode 100644
index 0000000..dfdb8aa
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/interval_tree.h
@@ -0,0 +1,124 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/interval_tree.h
+ *
+ * Author: Huang Wei <huangwei@clusterfs.com>
+ * Author: Jay Xiong <jinshan.xiong@sun.com>
+ */
+
+#ifndef _INTERVAL_H__
+#define _INTERVAL_H__
+
+#include <linux/libcfs/libcfs.h> /* LASSERT. */
+
+struct interval_node {
+ struct interval_node *in_left;
+ struct interval_node *in_right;
+ struct interval_node *in_parent;
+ unsigned in_color:1,
+ in_intree:1, /** set if the node is in tree */
+ in_res1:30;
+ __u8 in_res2[4]; /** tags, 8-bytes aligned */
+ __u64 in_max_high;
+ struct interval_node_extent {
+ __u64 start;
+ __u64 end;
+ } in_extent;
+};
+
+enum interval_iter {
+ INTERVAL_ITER_CONT = 1,
+ INTERVAL_ITER_STOP = 2
+};
+
+static inline int interval_is_intree(struct interval_node *node)
+{
+ return node->in_intree == 1;
+}
+
+static inline __u64 interval_low(struct interval_node *node)
+{
+ return node->in_extent.start;
+}
+
+static inline __u64 interval_high(struct interval_node *node)
+{
+ return node->in_extent.end;
+}
+
+static inline void interval_set(struct interval_node *node,
+ __u64 start, __u64 end)
+{
+ LASSERT(start <= end);
+ node->in_extent.start = start;
+ node->in_extent.end = end;
+ node->in_max_high = end;
+}
+
+/* Rules to write an interval callback.
+ * - the callback returns INTERVAL_ITER_STOP when it thinks the iteration
+ * should be stopped. It will then cause the iteration function to return
+ * immediately with return value INTERVAL_ITER_STOP.
+ * - callbacks for interval_iterate and interval_iterate_reverse: Every
+ * nodes in the tree will be set to @node before the callback being called
+ * - callback for interval_search: Only overlapped node will be set to @node
+ * before the callback being called.
+ */
+typedef enum interval_iter (*interval_callback_t)(struct interval_node *node,
+ void *args);
+
+struct interval_node *interval_insert(struct interval_node *node,
+ struct interval_node **root);
+void interval_erase(struct interval_node *node, struct interval_node **root);
+
+/* Search the extents in the tree and call @func for each overlapped
+ * extents. */
+enum interval_iter interval_search(struct interval_node *root,
+ struct interval_node_extent *ex,
+ interval_callback_t func, void *data);
+
+/* Iterate every node in the tree - by reverse order or regular order. */
+enum interval_iter interval_iterate(struct interval_node *root,
+ interval_callback_t func, void *data);
+enum interval_iter interval_iterate_reverse(struct interval_node *root,
+ interval_callback_t func,void *data);
+
+void interval_expand(struct interval_node *root,
+ struct interval_node_extent *ext,
+ struct interval_node_extent *limiter);
+int interval_is_overlapped(struct interval_node *root,
+ struct interval_node_extent *ex);
+struct interval_node *interval_find(struct interval_node *root,
+ struct interval_node_extent *ex);
+#endif
diff --git a/drivers/staging/lustre/lustre/include/ioctl.h b/drivers/staging/lustre/lustre/include/ioctl.h
new file mode 100644
index 0000000..227c261
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/ioctl.h
@@ -0,0 +1,106 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _IOWR
+
+/* On i386 and x86_64, _ASM_I386_IOCTL_H is defined by the kernel's ioctl.h,
+ * and on newer kernels this header is shared as _ASM_GENERIC_IOCTL_H.
+ *
+ * We can avoid any problems with the kernel header being included again by
+ * defining _ASM_I386_IOCTL_H here so that a later occurence of <asm/ioctl.h>
+ * does not include the kernel's ioctl.h after this one. b=14746 */
+#define _ASM_I386_IOCTL_H
+#define _ASM_GENERIC_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The i386 ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir,type,nr,size) (((dir) << _IOC_DIRSHIFT) | ((type) << _IOC_TYPESHIFT) | ((nr) << _IOC_NRSHIFT) | ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _IOWR */
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
new file mode 100644
index 0000000..9d4011f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lclient.h
@@ -0,0 +1,437 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Definitions shared between vvp and liblustre, and other clients in the
+ * future.
+ *
+ * Author: Oleg Drokin <oleg.drokin@sun.com>
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#ifndef LCLIENT_H
+#define LCLIENT_H
+
+blkcnt_t dirty_cnt(struct inode *inode);
+
+int cl_glimpse_size0(struct inode *inode, int agl);
+int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
+ struct inode *inode, struct cl_object *clob, int agl);
+
+static inline int cl_glimpse_size(struct inode *inode)
+{
+ return cl_glimpse_size0(inode, 0);
+}
+
+static inline int cl_agl(struct inode *inode)
+{
+ return cl_glimpse_size0(inode, 1);
+}
+
+/**
+ * Locking policy for setattr.
+ */
+enum ccc_setattr_lock_type {
+ /** Locking is done by server */
+ SETATTR_NOLOCK,
+ /** Extent lock is enqueued */
+ SETATTR_EXTENT_LOCK,
+ /** Existing local extent lock is used */
+ SETATTR_MATCH_LOCK
+};
+
+
+/**
+ * IO state private to vvp or slp layers.
+ */
+struct ccc_io {
+ /** super class */
+ struct cl_io_slice cui_cl;
+ struct cl_io_lock_link cui_link;
+ /**
+ * I/O vector information to or from which read/write is going.
+ */
+ struct iovec *cui_iov;
+ unsigned long cui_nrsegs;
+ /**
+ * Total iov count for left IO.
+ */
+ unsigned long cui_tot_nrsegs;
+ /**
+ * Old length for iov that was truncated partially.
+ */
+ size_t cui_iov_olen;
+ /**
+ * Total size for the left IO.
+ */
+ size_t cui_tot_count;
+
+ union {
+ struct {
+ enum ccc_setattr_lock_type cui_local_lock;
+ } setattr;
+ } u;
+ /**
+ * True iff io is processing glimpse right now.
+ */
+ int cui_glimpse;
+ /**
+ * Layout version when this IO is initialized
+ */
+ __u32 cui_layout_gen;
+ /**
+ * File descriptor against which IO is done.
+ */
+ struct ll_file_data *cui_fd;
+ struct kiocb *cui_iocb;
+};
+
+/**
+ * True, if \a io is a normal io, False for other (sendfile, splice*).
+ * must be impementated in arch specific code.
+ */
+int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
+
+extern struct lu_context_key ccc_key;
+extern struct lu_context_key ccc_session_key;
+
+struct ccc_thread_info {
+ struct cl_lock_descr cti_descr;
+ struct cl_io cti_io;
+ struct cl_attr cti_attr;
+};
+
+static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
+{
+ struct ccc_thread_info *info;
+
+ info = lu_context_key_get(&env->le_ctx, &ccc_key);
+ LASSERT(info != NULL);
+ return info;
+}
+
+static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
+{
+ struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
+ memset(attr, 0, sizeof(*attr));
+ return attr;
+}
+
+static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
+{
+ struct cl_io *io = &ccc_env_info(env)->cti_io;
+ memset(io, 0, sizeof(*io));
+ return io;
+}
+
+struct ccc_session {
+ struct ccc_io cs_ios;
+};
+
+static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
+{
+ struct ccc_session *ses;
+
+ ses = lu_context_key_get(env->le_ses, &ccc_session_key);
+ LASSERT(ses != NULL);
+ return ses;
+}
+
+static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
+{
+ return &ccc_env_session(env)->cs_ios;
+}
+
+/**
+ * ccc-private object state.
+ */
+struct ccc_object {
+ struct cl_object_header cob_header;
+ struct cl_object cob_cl;
+ struct inode *cob_inode;
+
+ /**
+ * A list of dirty pages pending IO in the cache. Used by
+ * SOM. Protected by ll_inode_info::lli_lock.
+ *
+ * \see ccc_page::cpg_pending_linkage
+ */
+ struct list_head cob_pending_list;
+
+ /**
+ * Access this counter is protected by inode->i_sem. Now that
+ * the lifetime of transient pages must be covered by inode sem,
+ * we don't need to hold any lock..
+ */
+ int cob_transient_pages;
+ /**
+ * Number of outstanding mmaps on this file.
+ *
+ * \see ll_vm_open(), ll_vm_close().
+ */
+ atomic_t cob_mmap_cnt;
+
+ /**
+ * various flags
+ * cob_discard_page_warned
+ * if pages belonging to this object are discarded when a client
+ * is evicted, some debug info will be printed, this flag will be set
+ * during processing the first discarded page, then avoid flooding
+ * debug message for lots of discarded pages.
+ *
+ * \see ll_dirty_page_discard_warn.
+ */
+ unsigned int cob_discard_page_warned:1;
+};
+
+/**
+ * ccc-private page state.
+ */
+struct ccc_page {
+ struct cl_page_slice cpg_cl;
+ int cpg_defer_uptodate;
+ int cpg_ra_used;
+ int cpg_write_queued;
+ /**
+ * Non-empty iff this page is already counted in
+ * ccc_object::cob_pending_list. Protected by
+ * ccc_object::cob_pending_guard. This list is only used as a flag,
+ * that is, never iterated through, only checked for list_empty(), but
+ * having a list is useful for debugging.
+ */
+ struct list_head cpg_pending_linkage;
+ /** VM page */
+ struct page *cpg_page;
+};
+
+static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
+{
+ return container_of(slice, struct ccc_page, cpg_cl);
+}
+
+struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
+
+struct ccc_device {
+ struct cl_device cdv_cl;
+ struct super_block *cdv_sb;
+ struct cl_device *cdv_next;
+};
+
+struct ccc_lock {
+ struct cl_lock_slice clk_cl;
+};
+
+struct ccc_req {
+ struct cl_req_slice crq_cl;
+};
+
+void *ccc_key_init (const struct lu_context *ctx,
+ struct lu_context_key *key);
+void ccc_key_fini (const struct lu_context *ctx,
+ struct lu_context_key *key, void *data);
+void *ccc_session_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key);
+void ccc_session_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data);
+
+int ccc_device_init (const struct lu_env *env,
+ struct lu_device *d,
+ const char *name, struct lu_device *next);
+struct lu_device *ccc_device_fini (const struct lu_env *env,
+ struct lu_device *d);
+struct lu_device *ccc_device_alloc(const struct lu_env *env,
+ struct lu_device_type *t,
+ struct lustre_cfg *cfg,
+ const struct lu_device_operations *luops,
+ const struct cl_device_operations *clops);
+struct lu_device *ccc_device_free (const struct lu_env *env,
+ struct lu_device *d);
+struct lu_object *ccc_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *hdr,
+ struct lu_device *dev,
+ const struct cl_object_operations *clops,
+ const struct lu_object_operations *luops);
+
+int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
+void ccc_umount(const struct lu_env *env, struct cl_device *dev);
+int ccc_global_init(struct lu_device_type *device_type);
+void ccc_global_fini(struct lu_device_type *device_type);
+int ccc_object_init0(const struct lu_env *env,struct ccc_object *vob,
+ const struct cl_object_conf *conf);
+int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf);
+void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
+int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io,
+ const struct cl_lock_operations *lkops);
+int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid);
+int ccc_object_glimpse(const struct lu_env *env,
+ const struct cl_object *obj, struct ost_lvb *lvb);
+int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf);
+struct page *ccc_page_vmpage(const struct lu_env *env,
+ const struct cl_page_slice *slice);
+int ccc_page_is_under_lock(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io);
+int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
+void ccc_transient_page_verify(const struct cl_page *page);
+int ccc_transient_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, int nonblock);
+void ccc_transient_page_assume(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+void ccc_transient_page_unassume(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+void ccc_transient_page_disown(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+void ccc_transient_page_discard(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+int ccc_transient_page_prep(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io);
+void ccc_lock_delete(const struct lu_env *env,
+ const struct cl_lock_slice *slice);
+void ccc_lock_fini(const struct lu_env *env,struct cl_lock_slice *slice);
+int ccc_lock_enqueue(const struct lu_env *env,const struct cl_lock_slice *slice,
+ struct cl_io *io, __u32 enqflags);
+int ccc_lock_unuse(const struct lu_env *env,const struct cl_lock_slice *slice);
+int ccc_lock_wait(const struct lu_env *env,const struct cl_lock_slice *slice);
+int ccc_lock_fits_into(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ const struct cl_lock_descr *need,
+ const struct cl_io *io);
+void ccc_lock_state(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ enum cl_lock_state state);
+
+void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios);
+int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ pgoff_t start, pgoff_t end);
+int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ loff_t start, loff_t end);
+void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
+void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
+ size_t nob);
+void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
+ struct cl_io *io);
+int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, loff_t start, size_t count, int *exceed);
+void ccc_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret);
+void ccc_req_attr_set(const struct lu_env *env,const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *oa, obd_valid flags);
+
+struct lu_device *ccc2lu_dev (struct ccc_device *vdv);
+struct lu_object *ccc2lu (struct ccc_object *vob);
+struct ccc_device *lu2ccc_dev (const struct lu_device *d);
+struct ccc_device *cl2ccc_dev (const struct cl_device *d);
+struct ccc_object *lu2ccc (const struct lu_object *obj);
+struct ccc_object *cl2ccc (const struct cl_object *obj);
+struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice);
+struct ccc_io *cl2ccc_io (const struct lu_env *env,
+ const struct cl_io_slice *slice);
+struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
+struct page *cl2vm_page (const struct cl_page_slice *slice);
+struct inode *ccc_object_inode(const struct cl_object *obj);
+struct ccc_object *cl_inode2ccc (struct inode *inode);
+
+int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
+ struct obd_capa *capa);
+
+struct cl_page *ccc_vmpage_page_transient(struct page *vmpage);
+int ccc_object_invariant(const struct cl_object *obj);
+int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
+void cl_inode_fini(struct inode *inode);
+int cl_local_size(struct inode *inode);
+
+__u16 ll_dirent_type_get(struct lu_dirent *ent);
+__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
+__u32 cl_fid_build_gen(const struct lu_fid *fid);
+
+# define CLOBINVRNT(env, clob, expr) \
+ ((void)sizeof(env), (void)sizeof(clob), (void)sizeof !!(expr))
+
+int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
+int cl_ocd_update(struct obd_device *host,
+ struct obd_device *watched,
+ enum obd_notify_event ev, void *owner, void *data);
+
+struct ccc_grouplock {
+ struct lu_env *cg_env;
+ struct cl_io *cg_io;
+ struct cl_lock *cg_lock;
+ unsigned long cg_gid;
+};
+
+int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
+ struct ccc_grouplock *cg);
+void cl_put_grouplock(struct ccc_grouplock *cg);
+
+/**
+ * New interfaces to get and put lov_stripe_md from lov layer. This violates
+ * layering because lov_stripe_md is supposed to be a private data in lov.
+ *
+ * NB: If you find you have to use these interfaces for your new code, please
+ * think about it again. These interfaces may be removed in the future for
+ * better layering. */
+struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
+void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
+int lov_read_and_clear_async_rc(struct cl_object *clob);
+
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
+void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
+
+/**
+ * Data structure managing a client's cached clean pages. An LRU of
+ * pages is maintained, along with other statistics.
+ */
+struct cl_client_cache {
+ atomic_t ccc_users; /* # of users (OSCs) of this data */
+ struct list_head ccc_lru; /* LRU list of cached clean pages */
+ spinlock_t ccc_lru_lock; /* lock for list */
+ atomic_t ccc_lru_left; /* # of LRU entries available */
+ unsigned long ccc_lru_max; /* Max # of LRU entries possible */
+ unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
+};
+
+#endif /*LCLIENT_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lprocfs_status.h b/drivers/staging/lustre/lustre/include/linux/lprocfs_status.h
new file mode 100644
index 0000000..4bcc4dc
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lprocfs_status.h
@@ -0,0 +1,57 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/linux/lprocfs_status.h
+ *
+ * Top level header file for LProc SNMP
+ *
+ * Author: Hariharan Thantry thantry@users.sourceforge.net
+ */
+#ifndef _LINUX_LPROCFS_SNMP_H
+#define _LINUX_LPROCFS_SNMP_H
+
+#ifndef _LPROCFS_SNMP_H
+#error Do not #include this file directly. #include <lprocfs_status.h> instead
+#endif
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/rwsem.h>
+#include <linux/libcfs/libcfs.h>
+#include <linux/statfs.h>
+
+
+#endif /* LPROCFS_SNMP_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_acl.h b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
new file mode 100644
index 0000000..ff4fc4f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_acl.h
@@ -0,0 +1,66 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lustre/include/lustre_acl.h
+ *
+ * MDS data structures.
+ * See also lustre_idl.h for wire formats of requests.
+ */
+
+#ifndef _LUSTRE_LINUX_ACL_H
+#define _LUSTRE_LINUX_ACL_H
+
+#ifndef _LUSTRE_ACL_H
+#error Shoud not include direectly. use #include <lustre_acl.h> instead
+#endif
+
+# include <linux/fs.h>
+# include <linux/dcache.h>
+# ifdef CONFIG_FS_POSIX_ACL
+# include <linux/posix_acl_xattr.h>
+# define LUSTRE_POSIX_ACL_MAX_ENTRIES 32
+# define LUSTRE_POSIX_ACL_MAX_SIZE \
+ (sizeof(posix_acl_xattr_header) + \
+ LUSTRE_POSIX_ACL_MAX_ENTRIES * sizeof(posix_acl_xattr_entry))
+# endif /* CONFIG_FS_POSIX_ACL */
+# include <linux/lustre_intent.h>
+# include <linux/xattr.h> /* XATTR_{REPLACE,CREATE} */
+
+#ifndef LUSTRE_POSIX_ACL_MAX_SIZE
+# define LUSTRE_POSIX_ACL_MAX_SIZE 0
+#endif
+
+#endif /* _LUSTRE_LINUX_ACL_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_common.h b/drivers/staging/lustre/lustre/include/linux/lustre_common.h
new file mode 100644
index 0000000..d1783a3
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_common.h
@@ -0,0 +1,22 @@
+#ifndef LUSTRE_COMMON_H
+#define LUSTRE_COMMON_H
+
+#include <linux/sched.h>
+
+static inline int cfs_cleanup_group_info(void)
+{
+ struct group_info *ginfo;
+
+ ginfo = groups_alloc(0);
+ if (!ginfo)
+ return -ENOMEM;
+
+ set_current_groups(ginfo);
+ put_group_info(ginfo);
+
+ return 0;
+}
+
+#define ll_inode_blksize(a) (1<<(a)->i_blkbits)
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
new file mode 100644
index 0000000..9243dfa
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -0,0 +1,246 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LINUX_COMPAT25_H
+#define _LINUX_COMPAT25_H
+
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <linux/libcfs/linux/portals_compat25.h>
+
+#include <linux/lustre_patchless_compat.h>
+
+# define LOCK_FS_STRUCT(fs) spin_lock(&(fs)->lock)
+# define UNLOCK_FS_STRUCT(fs) spin_unlock(&(fs)->lock)
+
+static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
+ struct dentry *dentry)
+{
+ struct path path;
+ struct path old_pwd;
+
+ path.mnt = mnt;
+ path.dentry = dentry;
+ LOCK_FS_STRUCT(fs);
+ old_pwd = fs->pwd;
+ path_get(&path);
+ fs->pwd = path;
+ UNLOCK_FS_STRUCT(fs);
+
+ if (old_pwd.dentry)
+ path_put(&old_pwd);
+}
+
+
+/*
+ * set ATTR_BLOCKS to a high value to avoid any risk of collision with other
+ * ATTR_* attributes (see bug 13828)
+ */
+#define ATTR_BLOCKS (1 << 27)
+
+#define current_ngroups current_cred()->group_info->ngroups
+#define current_groups current_cred()->group_info->small_block
+
+/*
+ * OBD need working random driver, thus all our
+ * initialization routines must be called after device
+ * driver initialization
+ */
+#ifndef MODULE
+#undef module_init
+#define module_init(a) late_initcall(a)
+#endif
+
+
+#define LTIME_S(time) (time.tv_sec)
+
+/* inode_dio_wait(i) use as-is for write lock */
+# define inode_dio_write_done(i) do {} while (0) /* for write unlock */
+# define inode_dio_read(i) atomic_inc(&(i)->i_dio_count)
+/* inode_dio_done(i) use as-is for read unlock */
+
+#define TREE_READ_LOCK_IRQ(mapping) spin_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping) spin_unlock_irq(&(mapping)->tree_lock)
+
+#ifndef FS_HAS_FIEMAP
+#define FS_HAS_FIEMAP (0)
+#endif
+
+#define ll_vfs_rmdir(dir,entry,mnt) vfs_rmdir(dir,entry)
+#define ll_vfs_mkdir(inode,dir,mnt,mode) vfs_mkdir(inode,dir,mode)
+#define ll_vfs_link(old,mnt,dir,new,mnt1) vfs_link(old,dir,new)
+#define ll_vfs_unlink(inode,entry,mnt) vfs_unlink(inode,entry)
+#define ll_vfs_mknod(dir,entry,mnt,mode,dev) vfs_mknod(dir,entry,mode,dev)
+#define ll_security_inode_unlink(dir,entry,mnt) security_inode_unlink(dir,entry)
+#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
+ vfs_rename(old,old_dir,new,new_dir)
+
+#define cfs_bio_io_error(a,b) bio_io_error((a))
+#define cfs_bio_endio(a,b,c) bio_endio((a),(c))
+
+#define cfs_fs_pwd(fs) ((fs)->pwd.dentry)
+#define cfs_fs_mnt(fs) ((fs)->pwd.mnt)
+#define cfs_path_put(nd) path_put(&(nd)->path)
+
+
+#ifndef SLAB_DESTROY_BY_RCU
+#define SLAB_DESTROY_BY_RCU 0
+#endif
+
+
+
+static inline int
+ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
+{
+ int rc;
+
+ if (sb->s_qcop->quota_on) {
+ struct path path;
+
+ rc = kern_path(name, LOOKUP_FOLLOW, &path);
+ if (!rc)
+ return rc;
+ rc = sb->s_qcop->quota_on(sb, off, ver
+ , &path
+ );
+ path_put(&path);
+ return rc;
+ }
+ else
+ return -ENOSYS;
+}
+
+static inline int ll_quota_off(struct super_block *sb, int off, int remount)
+{
+ if (sb->s_qcop->quota_off) {
+ return sb->s_qcop->quota_off(sb, off
+ );
+ }
+ else
+ return -ENOSYS;
+}
+
+
+# define ll_vfs_dq_init dquot_initialize
+# define ll_vfs_dq_drop dquot_drop
+# define ll_vfs_dq_transfer dquot_transfer
+# define ll_vfs_dq_off(sb, remount) dquot_suspend(sb, -1)
+
+
+
+
+
+#define queue_max_phys_segments(rq) queue_max_segments(rq)
+#define queue_max_hw_segments(rq) queue_max_segments(rq)
+
+
+#define ll_d_hlist_node hlist_node
+#define ll_d_hlist_empty(list) hlist_empty(list)
+#define ll_d_hlist_entry(ptr, type, name) hlist_entry(ptr.first, type, name)
+#define ll_d_hlist_for_each(tmp, i_dentry) hlist_for_each(tmp, i_dentry)
+#define ll_d_hlist_for_each_entry(dentry, p, i_dentry, alias) \
+ p = NULL; hlist_for_each_entry(dentry, i_dentry, alias)
+
+
+#define bio_hw_segments(q, bio) 0
+
+
+#define ll_pagevec_init(pv, cold) do {} while (0)
+#define ll_pagevec_add(pv, pg) (0)
+#define ll_pagevec_lru_add_file(pv) do {} while (0)
+
+
+#ifndef QUOTA_OK
+# define QUOTA_OK 0
+#endif
+#ifndef NO_QUOTA
+# define NO_QUOTA (-EDQUOT)
+#endif
+
+#ifndef SEEK_DATA
+#define SEEK_DATA 3 /* seek to the next data */
+#endif
+#ifndef SEEK_HOLE
+#define SEEK_HOLE 4 /* seek to the next hole */
+#endif
+
+#ifndef FMODE_UNSIGNED_OFFSET
+#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
+#endif
+
+#if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit)
+# define ext2_set_bit __test_and_set_bit_le
+# define ext2_clear_bit __test_and_clear_bit_le
+# define ext2_test_bit test_bit_le
+# define ext2_find_first_zero_bit find_first_zero_bit_le
+# define ext2_find_next_zero_bit find_next_zero_bit_le
+#endif
+
+#ifdef ATTR_TIMES_SET
+# define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
+#else
+# define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET)
+#endif
+
+
+
+/*
+ * After 3.1, kernel's nameidata.intent.open.flags is different
+ * with lustre's lookup_intent.it_flags, as lustre's it_flags'
+ * lower bits equal to FMODE_xxx while kernel doesn't transliterate
+ * lower bits of nameidata.intent.open.flags to FMODE_xxx.
+ * */
+#include <linux/version.h>
+static inline int ll_namei_to_lookup_intent_flag(int flag)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
+ flag = (flag & ~O_ACCMODE) | OPEN_FMODE(flag);
+#endif
+ return flag;
+}
+
+# define ll_mrf_ret void
+# define LL_MRF_RETURN(rc)
+
+#include <linux/fs.h>
+
+# define ll_umode_t umode_t
+
+#include <linux/dcache.h>
+
+# define ll_dirty_inode(inode, flag) (inode)->i_sb->s_op->dirty_inode((inode), flag)
+
+#endif /* _COMPAT25_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_debug.h b/drivers/staging/lustre/lustre/include/linux/lustre_debug.h
new file mode 100644
index 0000000..11deac7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_debug.h
@@ -0,0 +1,47 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LINUX_LUSTRE_DEBUG_H
+#define _LINUX_LUSTRE_DEBUG_H
+
+#ifndef _LUSTRE_DEBUG_H
+#error Do not #include this file directly. #include <lprocfs_status.h> instead
+#endif
+
+#define LL_CDEBUG_PAGE(mask, page, fmt, arg...) \
+ CDEBUG(mask, "page %p map %p index %lu flags %lx count %u priv %0lx: "\
+ fmt, page, page->mapping, page->index, (long)page->flags, \
+ page_count(page), page_private(page), ## arg)
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_dlm.h b/drivers/staging/lustre/lustre/include/linux/lustre_dlm.h
new file mode 100644
index 0000000..207df03
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_dlm.h
@@ -0,0 +1,46 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LINUX_LUSTRE_DLM_H__
+#define _LINUX_LUSTRE_DLM_H__
+
+#ifndef _LUSTRE_DLM_H__
+#error Do not #include this file directly. #include <lprocfs_status.h> instead
+#endif
+
+# include <linux/proc_fs.h>
+# include <asm/processor.h>
+# include <linux/bit_spinlock.h>
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_fsfilt.h b/drivers/staging/lustre/lustre/include/linux/lustre_fsfilt.h
new file mode 100644
index 0000000..4da6e372
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_fsfilt.h
@@ -0,0 +1,171 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/linux/lustre_fsfilt.h
+ *
+ * Filesystem interface helper.
+ */
+
+#ifndef _LINUX_LUSTRE_FSFILT_H
+#define _LINUX_LUSTRE_FSFILT_H
+
+#ifndef _LUSTRE_FSFILT_H
+#error Do not #include this file directly. #include <lustre_fsfilt.h> instead
+#endif
+
+
+#include <obd.h>
+#include <obd_class.h>
+
+typedef void (*fsfilt_cb_t)(struct obd_device *obd, __u64 last_rcvd,
+ void *data, int error);
+
+struct fsfilt_operations {
+ struct list_head fs_list;
+ struct module *fs_owner;
+ char *fs_type;
+ char *(* fs_getlabel)(struct super_block *sb);
+ void *(* fs_start)(struct inode *inode, int op, void *desc_private,
+ int logs);
+ int (* fs_commit)(struct inode *inode, void *handle,int force_sync);
+ int (* fs_map_inode_pages)(struct inode *inode, struct page **page,
+ int pages, unsigned long *blocks,
+ int create, struct mutex *sem);
+ int (* fs_write_record)(struct file *, void *, int size, loff_t *,
+ int force_sync);
+ int (* fs_read_record)(struct file *, void *, int size, loff_t *);
+ int (* fs_setup)(struct super_block *sb);
+};
+
+extern int fsfilt_register_ops(struct fsfilt_operations *fs_ops);
+extern void fsfilt_unregister_ops(struct fsfilt_operations *fs_ops);
+extern struct fsfilt_operations *fsfilt_get_ops(const char *type);
+extern void fsfilt_put_ops(struct fsfilt_operations *fs_ops);
+
+static inline char *fsfilt_get_label(struct obd_device *obd,
+ struct super_block *sb)
+{
+ if (obd->obd_fsops->fs_getlabel == NULL)
+ return NULL;
+ if (obd->obd_fsops->fs_getlabel(sb)[0] == '\0')
+ return NULL;
+
+ return obd->obd_fsops->fs_getlabel(sb);
+}
+
+#define FSFILT_OP_UNLINK 1
+#define FSFILT_OP_CANCEL_UNLINK 10
+
+#define __fsfilt_check_slow(obd, start, msg) \
+do { \
+ if (cfs_time_before(jiffies, start + 15 * HZ)) \
+ break; \
+ else if (cfs_time_before(jiffies, start + 30 * HZ)) \
+ CDEBUG(D_VFSTRACE, "%s: slow %s %lus\n", obd->obd_name, \
+ msg, (jiffies-start) / HZ); \
+ else if (cfs_time_before(jiffies, start + DISK_TIMEOUT * HZ)) \
+ CWARN("%s: slow %s %lus\n", obd->obd_name, msg, \
+ (jiffies - start) / HZ); \
+ else \
+ CERROR("%s: slow %s %lus\n", obd->obd_name, msg, \
+ (jiffies - start) / HZ); \
+} while (0)
+
+#define fsfilt_check_slow(obd, start, msg) \
+do { \
+ __fsfilt_check_slow(obd, start, msg); \
+ start = jiffies; \
+} while (0)
+
+static inline void *fsfilt_start_log(struct obd_device *obd,
+ struct inode *inode, int op,
+ struct obd_trans_info *oti, int logs)
+{
+ unsigned long now = jiffies;
+ void *parent_handle = oti ? oti->oti_handle : NULL;
+ void *handle;
+
+ handle = obd->obd_fsops->fs_start(inode, op, parent_handle, logs);
+ CDEBUG(D_INFO, "started handle %p (%p)\n", handle, parent_handle);
+
+ if (oti != NULL) {
+ if (parent_handle == NULL) {
+ oti->oti_handle = handle;
+ } else if (handle != parent_handle) {
+ CERROR("mismatch: parent %p, handle %p, oti %p\n",
+ parent_handle, handle, oti);
+ LBUG();
+ }
+ }
+ fsfilt_check_slow(obd, now, "journal start");
+ return handle;
+}
+
+static inline int fsfilt_commit(struct obd_device *obd, struct inode *inode,
+ void *handle, int force_sync)
+{
+ unsigned long now = jiffies;
+ int rc = obd->obd_fsops->fs_commit(inode, handle, force_sync);
+ CDEBUG(D_INFO, "committing handle %p\n", handle);
+
+ fsfilt_check_slow(obd, now, "journal start");
+
+ return rc;
+}
+
+static inline int fsfilt_read_record(struct obd_device *obd, struct file *file,
+ void *buf, loff_t size, loff_t *offs)
+{
+ return obd->obd_fsops->fs_read_record(file, buf, size, offs);
+}
+
+static inline int fsfilt_write_record(struct obd_device *obd, struct file *file,
+ void *buf, loff_t size, loff_t *offs,
+ int force_sync)
+{
+ return obd->obd_fsops->fs_write_record(file, buf, size,offs,force_sync);
+}
+
+static inline int fsfilt_setup(struct obd_device *obd, struct super_block *fs)
+{
+ if (obd->obd_fsops->fs_setup)
+ return obd->obd_fsops->fs_setup(fs);
+ return 0;
+}
+
+
+
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_handles.h b/drivers/staging/lustre/lustre/include/linux/lustre_handles.h
new file mode 100644
index 0000000..459b238
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_handles.h
@@ -0,0 +1,52 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LINUX_LUSTRE_HANDLES_H_
+#define __LINUX_LUSTRE_HANDLES_H_
+
+#ifndef __LUSTRE_HANDLES_H_
+#error Do not #include this file directly. #include <lustre_handles.h> instead
+#endif
+
+#include <asm/types.h>
+#include <asm/atomic.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/rcupdate.h> /* for rcu_head{} */
+typedef struct rcu_head cfs_rcu_head_t;
+
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_intent.h b/drivers/staging/lustre/lustre/include/linux/lustre_intent.h
new file mode 100644
index 0000000..b10ddfa
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_intent.h
@@ -0,0 +1,62 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef LUSTRE_INTENT_H
+#define LUSTRE_INTENT_H
+
+/* intent IT_XXX are defined in lustre/include/obd.h */
+struct lustre_intent_data {
+ int it_disposition;
+ int it_status;
+ __u64 it_lock_handle;
+ __u64 it_lock_bits;
+ int it_lock_mode;
+ int it_remote_lock_mode;
+ __u64 it_remote_lock_handle;
+ void *it_data;
+ unsigned int it_lock_set:1;
+};
+
+struct lookup_intent {
+ int it_op;
+ int it_flags;
+ int it_create_mode;
+ union {
+ struct lustre_intent_data lustre;
+ } d;
+};
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lib.h b/drivers/staging/lustre/lustre/include/linux/lustre_lib.h
new file mode 100644
index 0000000..57f3b01
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_lib.h
@@ -0,0 +1,85 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/linux/lustre_lib.h
+ *
+ * Basic Lustre library routines.
+ */
+
+#ifndef _LINUX_LUSTRE_LIB_H
+#define _LINUX_LUSTRE_LIB_H
+
+#ifndef _LUSTRE_LIB_H
+#error Do not #include this file directly. #include <lustre_lib.h> instead
+#endif
+
+# include <linux/rwsem.h>
+# include <linux/sched.h>
+# include <linux/signal.h>
+# include <linux/types.h>
+# include <linux/lustre_compat25.h>
+# include <linux/lustre_common.h>
+
+#ifndef LP_POISON
+# define LI_POISON 0x5a5a5a5a
+#if BITS_PER_LONG > 32
+# define LL_POISON 0x5a5a5a5a5a5a5a5aL
+#else
+# define LL_POISON 0x5a5a5a5aL
+#endif
+# define LP_POISON ((void *)LL_POISON)
+#endif
+
+/* This macro is only for compatibility reasons with older Linux Lustre user
+ * tools. New ioctls should NOT use this macro as the ioctl "size". Instead
+ * the ioctl should get a "size" argument which is the actual data type used
+ * by the ioctl, to ensure the ioctl interface is versioned correctly. */
+#define OBD_IOC_DATA_TYPE long
+
+#define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | \
+ sigmask(SIGTERM) | sigmask(SIGQUIT) | \
+ sigmask(SIGALRM))
+
+/* initialize ost_lvb according to inode */
+static inline void inode_init_lvb(struct inode *inode, struct ost_lvb *lvb)
+{
+ lvb->lvb_size = i_size_read(inode);
+ lvb->lvb_blocks = inode->i_blocks;
+ lvb->lvb_mtime = LTIME_S(inode->i_mtime);
+ lvb->lvb_atime = LTIME_S(inode->i_atime);
+ lvb->lvb_ctime = LTIME_S(inode->i_ctime);
+}
+
+#endif /* _LUSTRE_LIB_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
new file mode 100644
index 0000000..9e5df8d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
@@ -0,0 +1,98 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LINUX_LL_H
+#define _LINUX_LL_H
+
+#ifndef _LL_H
+#error Do not #include this file directly. #include <lustre_lite.h> instead
+#endif
+
+
+#include <asm/statfs.h>
+
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/proc_fs.h>
+
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lustre_ha.h>
+
+#include <linux/rbtree.h>
+#include <linux/lustre_compat25.h>
+#include <linux/lustre_common.h>
+#include <linux/pagemap.h>
+
+/* lprocfs.c */
+enum {
+ LPROC_LL_DIRTY_HITS = 0,
+ LPROC_LL_DIRTY_MISSES,
+ LPROC_LL_READ_BYTES,
+ LPROC_LL_WRITE_BYTES,
+ LPROC_LL_BRW_READ,
+ LPROC_LL_BRW_WRITE,
+ LPROC_LL_OSC_READ,
+ LPROC_LL_OSC_WRITE,
+ LPROC_LL_IOCTL,
+ LPROC_LL_OPEN,
+ LPROC_LL_RELEASE,
+ LPROC_LL_MAP,
+ LPROC_LL_LLSEEK,
+ LPROC_LL_FSYNC,
+ LPROC_LL_READDIR,
+ LPROC_LL_SETATTR,
+ LPROC_LL_TRUNC,
+ LPROC_LL_FLOCK,
+ LPROC_LL_GETATTR,
+ LPROC_LL_CREATE,
+ LPROC_LL_LINK,
+ LPROC_LL_UNLINK,
+ LPROC_LL_SYMLINK,
+ LPROC_LL_MKDIR,
+ LPROC_LL_RMDIR,
+ LPROC_LL_MKNOD,
+ LPROC_LL_RENAME,
+ LPROC_LL_STAFS,
+ LPROC_LL_ALLOC_INODE,
+ LPROC_LL_SETXATTR,
+ LPROC_LL_GETXATTR,
+ LPROC_LL_LISTXATTR,
+ LPROC_LL_REMOVEXATTR,
+ LPROC_LL_INODE_PERM,
+ LPROC_LL_FILE_OPCODES
+};
+
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_log.h b/drivers/staging/lustre/lustre/include/linux/lustre_log.h
new file mode 100644
index 0000000..e9c8e56
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_log.h
@@ -0,0 +1,57 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/linux/lustre_log.h
+ *
+ * Generic infrastructure for managing a collection of logs.
+ * These logs are used for:
+ * - orphan recovery: OST adds record on create
+ * - mtime/size consistency: the OST adds a record on first write
+ * - open/unlinked objects: OST adds a record on destroy
+ *
+ * - mds unlink log: the MDS adds an entry upon delete
+ *
+ * - raid1 replication log between OST's
+ * - MDS replication logs
+ */
+
+#ifndef _LINUX_LUSTRE_LOG_H
+#define _LINUX_LUSTRE_LOG_H
+
+#ifndef _LUSTRE_LOG_H
+#error Do not #include this file directly. #include <lustre_log.h> instead
+#endif
+
+#define LUSTRE_LOG_SERVER
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_net.h b/drivers/staging/lustre/lustre/include/linux/lustre_net.h
new file mode 100644
index 0000000..05de4d8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_net.h
@@ -0,0 +1,49 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LINUX_LUSTRE_NET_H
+#define _LINUX_LUSTRE_NET_H
+
+#ifndef _LUSTRE_NET_H
+#error Do not #include this file directly. #include <lustre_net.h> instead
+#endif
+
+#include <linux/workqueue.h>
+
+/* XXX Liang: should be moved to other header instead of here */
+#ifndef WITH_GROUP_INFO
+#define WITH_GROUP_INFO
+#endif
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
new file mode 100644
index 0000000..a260e99
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -0,0 +1,83 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef LUSTRE_PATCHLESS_COMPAT_H
+#define LUSTRE_PATCHLESS_COMPAT_H
+
+#include <linux/fs.h>
+
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/hash.h>
+
+
+#define ll_delete_from_page_cache(page) delete_from_page_cache(page)
+
+static inline void
+truncate_complete_page(struct address_space *mapping, struct page *page)
+{
+ if (page->mapping != mapping)
+ return;
+
+ if (PagePrivate(page))
+ page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+
+ cancel_dirty_page(page, PAGE_SIZE);
+ ClearPageMappedToDisk(page);
+ ll_delete_from_page_cache(page);
+}
+
+#ifdef ATTR_OPEN
+# define ATTR_FROM_OPEN ATTR_OPEN
+#else
+# ifndef ATTR_FROM_OPEN
+# define ATTR_FROM_OPEN 0
+# endif
+#endif /* ATTR_OPEN */
+
+#ifndef ATTR_RAW
+#define ATTR_RAW 0
+#endif
+
+#ifndef ATTR_CTIME_SET
+/*
+ * set ATTR_CTIME_SET to a high value to avoid any risk of collision with other
+ * ATTR_* attributes (see bug 13828)
+ */
+#define ATTR_CTIME_SET (1 << 28)
+#endif
+
+#endif /* LUSTRE_PATCHLESS_COMPAT_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_quota.h b/drivers/staging/lustre/lustre/include/linux/lustre_quota.h
new file mode 100644
index 0000000..a395050
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_quota.h
@@ -0,0 +1,46 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LINUX_LUSTRE_QUOTA_H
+#define _LINUX_LUSTRE_QUOTA_H
+
+#ifndef _LUSTRE_QUOTA_H
+#error Do not #include this file directly. #include <lustre_quota.h> instead
+#endif
+
+#include <linux/fs.h>
+#include <linux/quota.h>
+#include <linux/quotaops.h>
+
+#endif /* _LUSTRE_QUOTA_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_user.h b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
new file mode 100644
index 0000000..9cc2849
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
@@ -0,0 +1,70 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/linux/lustre_user.h
+ *
+ * Lustre public user-space interface definitions.
+ */
+
+#ifndef _LINUX_LUSTRE_USER_H
+#define _LINUX_LUSTRE_USER_H
+
+# include <linux/quota.h>
+
+/*
+ * asm-x86_64/processor.h on some SLES 9 distros seems to use
+ * kernel-only typedefs. fortunately skipping it altogether is ok
+ * (for now).
+ */
+#define __ASM_X86_64_PROCESSOR_H
+
+#include <linux/string.h>
+
+/*
+ * We need to always use 64bit version because the structure
+ * is shared across entire cluster where 32bit and 64bit machines
+ * are co-existing.
+ */
+#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
+typedef struct stat64 lstat_t;
+#define lstat_f lstat64
+#else
+typedef struct stat lstat_t;
+#define lstat_f lstat
+#endif
+
+#define HAVE_LOV_USER_MDS_DATA
+
+#endif /* _LUSTRE_USER_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lvfs.h b/drivers/staging/lustre/lustre/include/linux/lvfs.h
new file mode 100644
index 0000000..e61f1b8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lvfs.h
@@ -0,0 +1,134 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/linux/lvfs.h
+ *
+ * lustre VFS/process permission interface
+ */
+
+#ifndef __LINUX_LVFS_H__
+#define __LINUX_LVFS_H__
+
+#ifndef __LVFS_H__
+#error Do not #include this file directly. #include <lvfs.h> instead
+#endif
+
+#include <linux/lustre_compat25.h>
+#include <linux/lustre_common.h>
+#include <linux/lvfs_linux.h>
+
+#define LLOG_LVFS
+
+/* simple.c */
+
+struct lvfs_ucred {
+ kuid_t luc_uid;
+ kgid_t luc_gid;
+ kuid_t luc_fsuid;
+ kgid_t luc_fsgid;
+ kernel_cap_t luc_cap;
+ __u32 luc_umask;
+ struct group_info *luc_ginfo;
+ struct md_identity *luc_identity;
+};
+
+struct lvfs_callback_ops {
+ struct dentry *(*l_fid2dentry)(__u64 id_ino, __u32 gen, __u64 gr, void *data);
+};
+
+#define OBD_RUN_CTXT_MAGIC 0xC0FFEEAA
+#define OBD_CTXT_DEBUG /* development-only debugging */
+struct lvfs_run_ctxt {
+ struct vfsmount *pwdmnt;
+ struct dentry *pwd;
+ mm_segment_t fs;
+ struct lvfs_ucred luc;
+ int ngroups;
+ struct lvfs_callback_ops cb_ops;
+ struct group_info *group_info;
+ struct dt_device *dt;
+#ifdef OBD_CTXT_DEBUG
+ __u32 magic;
+#endif
+};
+
+#ifdef OBD_CTXT_DEBUG
+#define OBD_SET_CTXT_MAGIC(ctxt) (ctxt)->magic = OBD_RUN_CTXT_MAGIC
+#else
+#define OBD_SET_CTXT_MAGIC(ctxt) do {} while(0)
+#endif
+
+
+int lustre_rename(struct dentry *dir, struct vfsmount *mnt, char *oldname,
+ char *newname);
+
+static inline void l_dput(struct dentry *de)
+{
+ if (!de || IS_ERR(de))
+ return;
+ //shrink_dcache_parent(de);
+ LASSERT(d_count(de) > 0);
+ dput(de);
+}
+
+/* We need to hold the inode semaphore over the dcache lookup itself, or we
+ * run the risk of entering the filesystem lookup path concurrently on SMP
+ * systems, and instantiating two inodes for the same entry. We still
+ * protect against concurrent addition/removal races with the DLM locking.
+ */
+static inline struct dentry *ll_lookup_one_len(const char *fid_name,
+ struct dentry *dparent,
+ int fid_namelen)
+{
+ struct dentry *dchild;
+
+ mutex_lock(&dparent->d_inode->i_mutex);
+ dchild = lookup_one_len(fid_name, dparent, fid_namelen);
+ mutex_unlock(&dparent->d_inode->i_mutex);
+
+ if (IS_ERR(dchild) || dchild->d_inode == NULL)
+ return dchild;
+
+ if (is_bad_inode(dchild->d_inode)) {
+ CERROR("bad inode returned %lu/%u\n",
+ dchild->d_inode->i_ino, dchild->d_inode->i_generation);
+ dput(dchild);
+ dchild = ERR_PTR(-ENOENT);
+ }
+ return dchild;
+}
+
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/linux/lvfs_linux.h b/drivers/staging/lustre/lustre/include/linux/lvfs_linux.h
new file mode 100644
index 0000000..140a60f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/lvfs_linux.h
@@ -0,0 +1,66 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LVFS_LINUX_H__
+#define __LVFS_LINUX_H__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/sched.h>
+
+#include <lvfs.h>
+
+#define l_file file
+#define l_dentry dentry
+
+#define l_filp_open filp_open
+
+struct lvfs_run_ctxt;
+struct l_file *l_dentry_open(struct lvfs_run_ctxt *, struct l_dentry *,
+ int flags);
+
+struct l_linux_dirent {
+ struct list_head lld_list;
+ ino_t lld_ino;
+ unsigned long lld_off;
+ char lld_name[LL_FID_NAMELEN];
+};
+struct l_readdir_callback {
+ struct l_linux_dirent *lrc_dirent;
+ struct list_head *lrc_list;
+};
+
+#endif /* __LVFS_LINUX_H__ */
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
new file mode 100644
index 0000000..01a5026
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/obd.h
@@ -0,0 +1,125 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LINUX_OBD_H
+#define __LINUX_OBD_H
+
+#ifndef __OBD_H
+#error Do not #include this file directly. #include <obd.h> instead
+#endif
+
+#include <obd_support.h>
+
+# include <linux/fs.h>
+# include <linux/list.h>
+# include <linux/sched.h> /* for struct task_struct, for current.h */
+# include <linux/proc_fs.h>
+# include <linux/mount.h>
+# include <linux/lustre_intent.h>
+
+struct ll_iattr {
+ struct iattr iattr;
+ unsigned int ia_attr_flags;
+};
+
+#define CLIENT_OBD_LIST_LOCK_DEBUG 1
+
+typedef struct {
+ spinlock_t lock;
+
+ unsigned long time;
+ struct task_struct *task;
+ const char *func;
+ int line;
+} client_obd_lock_t;
+
+static inline void __client_obd_list_lock(client_obd_lock_t *lock,
+ const char *func, int line)
+{
+ unsigned long cur = jiffies;
+ while (1) {
+ if (spin_trylock(&lock->lock)) {
+ LASSERT(lock->task == NULL);
+ lock->task = current;
+ lock->func = func;
+ lock->line = line;
+ lock->time = jiffies;
+ break;
+ }
+
+ if ((jiffies - cur > 5 * HZ) &&
+ (jiffies - lock->time > 5 * HZ)) {
+ struct task_struct *task = lock->task;
+
+ if (task == NULL)
+ continue;
+
+ LCONSOLE_WARN("%s:%d: lock %p was acquired"
+ " by <%s:%d:%s:%d> for %lu seconds.\n",
+ current->comm, current->pid,
+ lock, task->comm, task->pid,
+ lock->func, lock->line,
+ (jiffies - lock->time) / HZ);
+ LCONSOLE_WARN("====== for current process =====\n");
+ dump_stack();
+ LCONSOLE_WARN("====== end =======\n");
+ cfs_pause(1000 * HZ);
+ }
+ cpu_relax();
+ }
+}
+
+#define client_obd_list_lock(lock) \
+ __client_obd_list_lock(lock, __FUNCTION__, __LINE__)
+
+static inline void client_obd_list_unlock(client_obd_lock_t *lock)
+{
+ LASSERT(lock->task != NULL);
+ lock->task = NULL;
+ lock->time = jiffies;
+ spin_unlock(&lock->lock);
+}
+
+
+static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
+{
+ spin_lock_init(&lock->lock);
+}
+
+static inline void client_obd_list_lock_done(client_obd_lock_t *lock)
+{}
+
+#endif /* __LINUX_OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/obd_class.h b/drivers/staging/lustre/lustre/include/linux/obd_class.h
new file mode 100644
index 0000000..021ead6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/obd_class.h
@@ -0,0 +1,58 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LINUX_CLASS_OBD_H
+#define __LINUX_CLASS_OBD_H
+
+#ifndef __CLASS_OBD_H
+#error Do not #include this file directly. #include <obd_class.h> instead
+#endif
+
+#include <asm/uaccess.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+/* obdo.c */
+void obdo_from_la(struct obdo *dst, struct lu_attr *la, __u64 valid);
+void la_from_obdo(struct lu_attr *la, struct obdo *dst, obd_flag valid);
+void obdo_refresh_inode(struct inode *dst, struct obdo *src, obd_flag valid);
+void obdo_to_inode(struct inode *dst, struct obdo *src, obd_flag valid);
+#define ll_inode_flags(inode) (inode->i_flags)
+
+
+#endif /* __LINUX_OBD_CLASS_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/obd_support.h b/drivers/staging/lustre/lustre/include/linux/obd_support.h
new file mode 100644
index 0000000..9166503
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/linux/obd_support.h
@@ -0,0 +1,63 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LINUX_OBD_SUPPORT
+#define _LINUX_OBD_SUPPORT
+
+#ifndef _OBD_SUPPORT
+#error Do not #include this file directly. #include <obd_support.h> instead
+#endif
+
+#ifdef CONFIG_X86
+#include <asm/cpufeature.h>
+#endif
+#include <asm/processor.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/lustre_compat25.h>
+#include <linux/lustre_common.h>
+#include <linux/libcfs/libcfs.h>
+#include <lustre/lustre_idl.h>
+
+
+# include <linux/types.h>
+# include <linux/blkdev.h>
+# include <lvfs.h>
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
new file mode 100644
index 0000000..56b0572
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -0,0 +1,1004 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lprocfs_status.h
+ *
+ * Top level header file for LProc SNMP
+ *
+ * Author: Hariharan Thantry thantry@users.sourceforge.net
+ */
+#ifndef _LPROCFS_SNMP_H
+#define _LPROCFS_SNMP_H
+
+#include <linux/lprocfs_status.h>
+#include <lustre/lustre_idl.h>
+#include <linux/libcfs/params_tree.h>
+
+struct lprocfs_vars {
+ const char *name;
+ struct file_operations *fops;
+ void *data;
+ /**
+ * /proc file mode.
+ */
+ umode_t proc_mode;
+};
+
+struct lprocfs_static_vars {
+ struct lprocfs_vars *module_vars;
+ struct lprocfs_vars *obd_vars;
+};
+
+/* if we find more consumers this could be generalized */
+#define OBD_HIST_MAX 32
+struct obd_histogram {
+ spinlock_t oh_lock;
+ unsigned long oh_buckets[OBD_HIST_MAX];
+};
+
+enum {
+ BRW_R_PAGES = 0,
+ BRW_W_PAGES,
+ BRW_R_RPC_HIST,
+ BRW_W_RPC_HIST,
+ BRW_R_IO_TIME,
+ BRW_W_IO_TIME,
+ BRW_R_DISCONT_PAGES,
+ BRW_W_DISCONT_PAGES,
+ BRW_R_DISCONT_BLOCKS,
+ BRW_W_DISCONT_BLOCKS,
+ BRW_R_DISK_IOSIZE,
+ BRW_W_DISK_IOSIZE,
+ BRW_R_DIO_FRAGS,
+ BRW_W_DIO_FRAGS,
+ BRW_LAST,
+};
+
+struct brw_stats {
+ struct obd_histogram hist[BRW_LAST];
+};
+
+enum {
+ RENAME_SAMEDIR_SIZE = 0,
+ RENAME_CROSSDIR_SRC_SIZE,
+ RENAME_CROSSDIR_TGT_SIZE,
+ RENAME_LAST,
+};
+
+struct rename_stats {
+ struct obd_histogram hist[RENAME_LAST];
+};
+
+/* An lprocfs counter can be configured using the enum bit masks below.
+ *
+ * LPROCFS_CNTR_EXTERNALLOCK indicates that an external lock already
+ * protects this counter from concurrent updates. If not specified,
+ * lprocfs an internal per-counter lock variable. External locks are
+ * not used to protect counter increments, but are used to protect
+ * counter readout and resets.
+ *
+ * LPROCFS_CNTR_AVGMINMAX indicates a multi-valued counter samples,
+ * (i.e. counter can be incremented by more than "1"). When specified,
+ * the counter maintains min, max and sum in addition to a simple
+ * invocation count. This allows averages to be be computed.
+ * If not specified, the counter is an increment-by-1 counter.
+ * min, max, sum, etc. are not maintained.
+ *
+ * LPROCFS_CNTR_STDDEV indicates that the counter should track sum of
+ * squares (for multi-valued counter samples only). This allows
+ * external computation of standard deviation, but involves a 64-bit
+ * multiply per counter increment.
+ */
+
+enum {
+ LPROCFS_CNTR_EXTERNALLOCK = 0x0001,
+ LPROCFS_CNTR_AVGMINMAX = 0x0002,
+ LPROCFS_CNTR_STDDEV = 0x0004,
+
+ /* counter data type */
+ LPROCFS_TYPE_REGS = 0x0100,
+ LPROCFS_TYPE_BYTES = 0x0200,
+ LPROCFS_TYPE_PAGES = 0x0400,
+ LPROCFS_TYPE_CYCLE = 0x0800,
+};
+
+#define LC_MIN_INIT ((~(__u64)0) >> 1)
+
+struct lprocfs_counter_header {
+ unsigned int lc_config;
+ const char *lc_name; /* must be static */
+ const char *lc_units; /* must be static */
+};
+
+struct lprocfs_counter {
+ __s64 lc_count;
+ __s64 lc_min;
+ __s64 lc_max;
+ __s64 lc_sumsquare;
+ /*
+ * Every counter has lc_array_sum[0], while lc_array_sum[1] is only
+ * for irq context counter, i.e. stats with
+ * LPROCFS_STATS_FLAG_IRQ_SAFE flag, its counter need
+ * lc_array_sum[1]
+ */
+ __s64 lc_array_sum[1];
+};
+#define lc_sum lc_array_sum[0]
+#define lc_sum_irq lc_array_sum[1]
+
+struct lprocfs_percpu {
+#ifndef __GNUC__
+ __s64 pad;
+#endif
+ struct lprocfs_counter lp_cntr[0];
+};
+
+#define LPROCFS_GET_NUM_CPU 0x0001
+#define LPROCFS_GET_SMP_ID 0x0002
+
+enum lprocfs_stats_flags {
+ LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
+ LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
+ * area and need locking */
+ LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */
+};
+
+enum lprocfs_fields_flags {
+ LPROCFS_FIELDS_FLAGS_CONFIG = 0x0001,
+ LPROCFS_FIELDS_FLAGS_SUM = 0x0002,
+ LPROCFS_FIELDS_FLAGS_MIN = 0x0003,
+ LPROCFS_FIELDS_FLAGS_MAX = 0x0004,
+ LPROCFS_FIELDS_FLAGS_AVG = 0x0005,
+ LPROCFS_FIELDS_FLAGS_SUMSQUARE = 0x0006,
+ LPROCFS_FIELDS_FLAGS_COUNT = 0x0007,
+};
+
+struct lprocfs_stats {
+ /* # of counters */
+ unsigned short ls_num;
+ /* 1 + the biggest cpu # whose ls_percpu slot has been allocated */
+ unsigned short ls_biggest_alloc_num;
+ enum lprocfs_stats_flags ls_flags;
+ /* Lock used when there are no percpu stats areas; For percpu stats,
+ * it is used to protect ls_biggest_alloc_num change */
+ spinlock_t ls_lock;
+
+ /* has ls_num of counter headers */
+ struct lprocfs_counter_header *ls_cnt_header;
+ struct lprocfs_percpu *ls_percpu[0];
+};
+
+#define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC)
+
+/* Pack all opcodes down into a single monotonically increasing index */
+static inline int opcode_offset(__u32 opc) {
+ if (opc < OST_LAST_OPC) {
+ /* OST opcode */
+ return (opc - OST_FIRST_OPC);
+ } else if (opc < MDS_LAST_OPC) {
+ /* MDS opcode */
+ return (opc - MDS_FIRST_OPC +
+ OPC_RANGE(OST));
+ } else if (opc < LDLM_LAST_OPC) {
+ /* LDLM Opcode */
+ return (opc - LDLM_FIRST_OPC +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else if (opc < MGS_LAST_OPC) {
+ /* MGS Opcode */
+ return (opc - MGS_FIRST_OPC +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else if (opc < OBD_LAST_OPC) {
+ /* OBD Ping */
+ return (opc - OBD_FIRST_OPC +
+ OPC_RANGE(MGS) +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else if (opc < LLOG_LAST_OPC) {
+ /* LLOG Opcode */
+ return (opc - LLOG_FIRST_OPC +
+ OPC_RANGE(OBD) +
+ OPC_RANGE(MGS) +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else if (opc < QUOTA_LAST_OPC) {
+ /* LQUOTA Opcode */
+ return (opc - QUOTA_FIRST_OPC +
+ OPC_RANGE(LLOG) +
+ OPC_RANGE(OBD) +
+ OPC_RANGE(MGS) +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else if (opc < SEQ_LAST_OPC) {
+ /* SEQ opcode */
+ return (opc - SEQ_FIRST_OPC +
+ OPC_RANGE(QUOTA) +
+ OPC_RANGE(LLOG) +
+ OPC_RANGE(OBD) +
+ OPC_RANGE(MGS) +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else if (opc < SEC_LAST_OPC) {
+ /* SEC opcode */
+ return (opc - SEC_FIRST_OPC +
+ OPC_RANGE(SEQ) +
+ OPC_RANGE(QUOTA) +
+ OPC_RANGE(LLOG) +
+ OPC_RANGE(OBD) +
+ OPC_RANGE(MGS) +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else if (opc < FLD_LAST_OPC) {
+ /* FLD opcode */
+ return (opc - FLD_FIRST_OPC +
+ OPC_RANGE(SEC) +
+ OPC_RANGE(SEQ) +
+ OPC_RANGE(QUOTA) +
+ OPC_RANGE(LLOG) +
+ OPC_RANGE(OBD) +
+ OPC_RANGE(MGS) +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else if (opc < UPDATE_LAST_OPC) {
+ /* update opcode */
+ return (opc - UPDATE_FIRST_OPC +
+ OPC_RANGE(FLD) +
+ OPC_RANGE(SEC) +
+ OPC_RANGE(SEQ) +
+ OPC_RANGE(QUOTA) +
+ OPC_RANGE(LLOG) +
+ OPC_RANGE(OBD) +
+ OPC_RANGE(MGS) +
+ OPC_RANGE(LDLM) +
+ OPC_RANGE(MDS) +
+ OPC_RANGE(OST));
+ } else {
+ /* Unknown Opcode */
+ return -1;
+ }
+}
+
+
+#define LUSTRE_MAX_OPCODES (OPC_RANGE(OST) + \
+ OPC_RANGE(MDS) + \
+ OPC_RANGE(LDLM) + \
+ OPC_RANGE(MGS) + \
+ OPC_RANGE(OBD) + \
+ OPC_RANGE(LLOG) + \
+ OPC_RANGE(SEC) + \
+ OPC_RANGE(SEQ) + \
+ OPC_RANGE(SEC) + \
+ OPC_RANGE(FLD) + \
+ OPC_RANGE(UPDATE))
+
+#define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \
+ OPC_RANGE(EXTRA))
+
+enum {
+ PTLRPC_REQWAIT_CNTR = 0,
+ PTLRPC_REQQDEPTH_CNTR,
+ PTLRPC_REQACTIVE_CNTR,
+ PTLRPC_TIMEOUT,
+ PTLRPC_REQBUF_AVAIL_CNTR,
+ PTLRPC_LAST_CNTR
+};
+
+#define PTLRPC_FIRST_CNTR PTLRPC_REQWAIT_CNTR
+
+enum {
+ LDLM_GLIMPSE_ENQUEUE = 0,
+ LDLM_PLAIN_ENQUEUE,
+ LDLM_EXTENT_ENQUEUE,
+ LDLM_FLOCK_ENQUEUE,
+ LDLM_IBITS_ENQUEUE,
+ MDS_REINT_SETATTR,
+ MDS_REINT_CREATE,
+ MDS_REINT_LINK,
+ MDS_REINT_UNLINK,
+ MDS_REINT_RENAME,
+ MDS_REINT_OPEN,
+ MDS_REINT_SETXATTR,
+ BRW_READ_BYTES,
+ BRW_WRITE_BYTES,
+ EXTRA_LAST_OPC
+};
+
+#define EXTRA_FIRST_OPC LDLM_GLIMPSE_ENQUEUE
+/* class_obd.c */
+extern struct proc_dir_entry *proc_lustre_root;
+
+struct obd_device;
+struct obd_histogram;
+
+/* Days / hours / mins / seconds format */
+struct dhms {
+ int d,h,m,s;
+};
+static inline void s2dhms(struct dhms *ts, time_t secs)
+{
+ ts->d = secs / 86400;
+ secs = secs % 86400;
+ ts->h = secs / 3600;
+ secs = secs % 3600;
+ ts->m = secs / 60;
+ ts->s = secs % 60;
+}
+#define DHMS_FMT "%dd%dh%02dm%02ds"
+#define DHMS_VARS(x) (x)->d, (x)->h, (x)->m, (x)->s
+
+#define JOBSTATS_JOBID_VAR_MAX_LEN 20
+#define JOBSTATS_DISABLE "disable"
+#define JOBSTATS_PROCNAME_UID "procname_uid"
+
+#ifdef LPROCFS
+
+extern int lprocfs_stats_alloc_one(struct lprocfs_stats *stats,
+ unsigned int cpuid);
+/*
+ * \return value
+ * < 0 : on error (only possible for opc as LPROCFS_GET_SMP_ID)
+ */
+static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
+ unsigned long *flags)
+{
+ int rc = 0;
+
+ switch (opc) {
+ default:
+ LBUG();
+
+ case LPROCFS_GET_SMP_ID:
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_lock_irqsave(&stats->ls_lock, *flags);
+ else
+ spin_lock(&stats->ls_lock);
+ return 0;
+ } else {
+ unsigned int cpuid = get_cpu();
+
+ if (unlikely(stats->ls_percpu[cpuid] == NULL)) {
+ rc = lprocfs_stats_alloc_one(stats, cpuid);
+ if (rc < 0) {
+ put_cpu();
+ return rc;
+ }
+ }
+ return cpuid;
+ }
+
+ case LPROCFS_GET_NUM_CPU:
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_lock_irqsave(&stats->ls_lock, *flags);
+ else
+ spin_lock(&stats->ls_lock);
+ return 1;
+ } else {
+ return stats->ls_biggest_alloc_num;
+ }
+ }
+}
+
+static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
+ unsigned long *flags)
+{
+ switch (opc) {
+ default:
+ LBUG();
+
+ case LPROCFS_GET_SMP_ID:
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+ spin_unlock_irqrestore(&stats->ls_lock,
+ *flags);
+ } else {
+ spin_unlock(&stats->ls_lock);
+ }
+ } else {
+ put_cpu();
+ }
+ return;
+
+ case LPROCFS_GET_NUM_CPU:
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+ spin_unlock_irqrestore(&stats->ls_lock,
+ *flags);
+ } else {
+ spin_unlock(&stats->ls_lock);
+ }
+ }
+ return;
+ }
+}
+
+static inline unsigned int
+lprocfs_stats_counter_size(struct lprocfs_stats *stats)
+{
+ unsigned int percpusize;
+
+ percpusize = offsetof(struct lprocfs_percpu, lp_cntr[stats->ls_num]);
+
+ /* irq safe stats need lc_array_sum[1] */
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ percpusize += stats->ls_num * sizeof(__s64);
+
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0)
+ percpusize = L1_CACHE_ALIGN(percpusize);
+
+ return percpusize;
+}
+
+static inline struct lprocfs_counter *
+lprocfs_stats_counter_get(struct lprocfs_stats *stats, unsigned int cpuid,
+ int index)
+{
+ struct lprocfs_counter *cntr;
+
+ cntr = &stats->ls_percpu[cpuid]->lp_cntr[index];
+
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ cntr = (void *)cntr + index * sizeof(__s64);
+
+ return cntr;
+}
+
+/* Two optimized LPROCFS counter increment functions are provided:
+ * lprocfs_counter_incr(cntr, value) - optimized for by-one counters
+ * lprocfs_counter_add(cntr) - use for multi-valued counters
+ * Counter data layout allows config flag, counter lock and the
+ * count itself to reside within a single cache line.
+ */
+
+extern void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
+ long amount);
+extern void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
+ long amount);
+
+#define lprocfs_counter_incr(stats, idx) \
+ lprocfs_counter_add(stats, idx, 1)
+#define lprocfs_counter_decr(stats, idx) \
+ lprocfs_counter_sub(stats, idx, 1)
+
+extern __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
+ struct lprocfs_counter_header *header,
+ enum lprocfs_stats_flags flags,
+ enum lprocfs_fields_flags field);
+static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
+ int idx,
+ enum lprocfs_fields_flags field)
+{
+ int i;
+ unsigned int num_cpu;
+ unsigned long flags = 0;
+ __u64 ret = 0;
+
+ LASSERT(stats != NULL);
+
+ num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
+ for (i = 0; i < num_cpu; i++) {
+ if (stats->ls_percpu[i] == NULL)
+ continue;
+ ret += lprocfs_read_helper(
+ lprocfs_stats_counter_get(stats, i, idx),
+ &stats->ls_cnt_header[idx], stats->ls_flags,
+ field);
+ }
+ lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
+ return ret;
+}
+
+extern struct lprocfs_stats *
+lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
+extern void lprocfs_clear_stats(struct lprocfs_stats *stats);
+extern void lprocfs_free_stats(struct lprocfs_stats **stats);
+extern void lprocfs_init_ops_stats(int num_private_stats,
+ struct lprocfs_stats *stats);
+extern void lprocfs_init_mps_stats(int num_private_stats,
+ struct lprocfs_stats *stats);
+extern void lprocfs_init_ldlm_stats(struct lprocfs_stats *ldlm_stats);
+extern int lprocfs_alloc_obd_stats(struct obd_device *obddev,
+ unsigned int num_private_stats);
+extern int lprocfs_alloc_md_stats(struct obd_device *obddev,
+ unsigned int num_private_stats);
+extern void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
+ unsigned conf, const char *name,
+ const char *units);
+extern void lprocfs_free_obd_stats(struct obd_device *obddev);
+extern void lprocfs_free_md_stats(struct obd_device *obddev);
+struct obd_export;
+struct nid_stat;
+extern int lprocfs_add_clear_entry(struct obd_device * obd,
+ struct proc_dir_entry *entry);
+extern int lprocfs_exp_setup(struct obd_export *exp,
+ lnet_nid_t *peer_nid, int *newnid);
+extern int lprocfs_exp_cleanup(struct obd_export *exp);
+extern struct proc_dir_entry *lprocfs_add_simple(struct proc_dir_entry *root,
+ char *name,
+ void *data,
+ struct file_operations *fops);
+extern struct proc_dir_entry *
+lprocfs_add_symlink(const char *name, struct proc_dir_entry *parent,
+ const char *format, ...);
+extern void lprocfs_free_per_client_stats(struct obd_device *obd);
+extern int
+lprocfs_nid_stats_clear_write(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_nid_stats_clear_read(struct seq_file *m, void *data);
+
+extern int lprocfs_register_stats(struct proc_dir_entry *root, const char *name,
+ struct lprocfs_stats *stats);
+
+/* lprocfs_status.c */
+extern int lprocfs_add_vars(struct proc_dir_entry *root,
+ struct lprocfs_vars *var,
+ void *data);
+
+extern struct proc_dir_entry *lprocfs_register(const char *name,
+ struct proc_dir_entry *parent,
+ struct lprocfs_vars *list,
+ void *data);
+
+extern void lprocfs_remove(struct proc_dir_entry **root);
+extern void lprocfs_remove_proc_entry(const char *name,
+ struct proc_dir_entry *parent);
+
+extern int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list);
+extern int lprocfs_obd_cleanup(struct obd_device *obd);
+
+extern int lprocfs_seq_create(struct proc_dir_entry *parent, const char *name,
+ umode_t mode,
+ const struct file_operations *seq_fops,
+ void *data);
+extern int lprocfs_obd_seq_create(struct obd_device *dev, const char *name,
+ umode_t mode,
+ const struct file_operations *seq_fops,
+ void *data);
+
+/* Generic callbacks */
+
+extern int lprocfs_rd_u64(struct seq_file *m, void *data);
+extern int lprocfs_rd_atomic(struct seq_file *m, void *data);
+extern int lprocfs_wr_atomic(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_rd_uint(struct seq_file *m, void *data);
+extern int lprocfs_wr_uint(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_rd_uuid(struct seq_file *m, void *data);
+extern int lprocfs_rd_name(struct seq_file *m, void *data);
+extern int lprocfs_rd_server_uuid(struct seq_file *m, void *data);
+extern int lprocfs_rd_conn_uuid(struct seq_file *m, void *data);
+extern int lprocfs_rd_import(struct seq_file *m, void *data);
+extern int lprocfs_rd_state(struct seq_file *m, void *data);
+extern int lprocfs_rd_connect_flags(struct seq_file *m, void *data);
+extern int lprocfs_rd_num_exports(struct seq_file *m, void *data);
+extern int lprocfs_rd_numrefs(struct seq_file *m, void *data);
+
+struct adaptive_timeout;
+extern int lprocfs_at_hist_helper(struct seq_file *m,
+ struct adaptive_timeout *at);
+extern int lprocfs_rd_timeouts(struct seq_file *m, void *data);
+extern int lprocfs_wr_timeouts(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_wr_evict_client(struct file *file, const char *buffer,
+ size_t count, loff_t *off);
+extern int lprocfs_wr_ping(struct file *file, const char *buffer,
+ size_t count, loff_t *off);
+extern int lprocfs_wr_import(struct file *file, const char *buffer,
+ size_t count, loff_t *off);
+extern int lprocfs_rd_pinger_recov(struct seq_file *m, void *n);
+extern int lprocfs_wr_pinger_recov(struct file *file, const char *buffer,
+ size_t count, loff_t *off);
+
+/* Statfs helpers */
+extern int lprocfs_rd_blksize(struct seq_file *m, void *data);
+extern int lprocfs_rd_kbytestotal(struct seq_file *m, void *data);
+extern int lprocfs_rd_kbytesfree(struct seq_file *m, void *data);
+extern int lprocfs_rd_kbytesavail(struct seq_file *m, void *data);
+extern int lprocfs_rd_filestotal(struct seq_file *m, void *data);
+extern int lprocfs_rd_filesfree(struct seq_file *m, void *data);
+
+extern int lprocfs_write_helper(const char *buffer, unsigned long count,
+ int *val);
+extern int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
+ int *val, int mult);
+extern int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
+extern int lprocfs_read_frac_helper(char *buffer, unsigned long count,
+ long val, int mult);
+extern int lprocfs_write_u64_helper(const char *buffer, unsigned long count,
+ __u64 *val);
+extern int lprocfs_write_frac_u64_helper(const char *buffer,
+ unsigned long count,
+ __u64 *val, int mult);
+extern char *lprocfs_find_named_value(const char *buffer, const char *name,
+ size_t *count);
+void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value);
+void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value);
+void lprocfs_oh_clear(struct obd_histogram *oh);
+unsigned long lprocfs_oh_sum(struct obd_histogram *oh);
+
+void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
+ struct lprocfs_counter *cnt);
+
+extern int lprocfs_single_release(cfs_inode_t *, struct file *);
+extern int lprocfs_seq_release(cfs_inode_t *, struct file *);
+
+/* You must use these macros when you want to refer to
+ * the import in a client obd_device for a lprocfs entry */
+#define LPROCFS_CLIMP_CHECK(obd) do { \
+ typecheck(struct obd_device *, obd); \
+ down_read(&(obd)->u.cli.cl_sem); \
+ if ((obd)->u.cli.cl_import == NULL) { \
+ up_read(&(obd)->u.cli.cl_sem); \
+ return -ENODEV; \
+ } \
+} while(0)
+#define LPROCFS_CLIMP_EXIT(obd) \
+ up_read(&(obd)->u.cli.cl_sem);
+
+
+/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
+ proc entries; otherwise, you will define name##_seq_write function also for
+ a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
+ call lprocfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */
+#define __LPROC_SEQ_FOPS(name, custom_seq_write) \
+static int name##_single_open(cfs_inode_t *inode, struct file *file) \
+{ \
+ return single_open(file, name##_seq_show, PDE_DATA(inode)); \
+} \
+struct file_operations name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = name##_single_open, \
+ .read = seq_read, \
+ .write = custom_seq_write, \
+ .llseek = seq_lseek, \
+ .release = lprocfs_single_release, \
+}
+
+#define LPROC_SEQ_FOPS_RO(name) __LPROC_SEQ_FOPS(name, NULL)
+#define LPROC_SEQ_FOPS(name) __LPROC_SEQ_FOPS(name, name##_seq_write)
+
+#define LPROC_SEQ_FOPS_RO_TYPE(name, type) \
+ static int name##_##type##_seq_show(struct seq_file *m, void *v)\
+ { \
+ return lprocfs_rd_##type(m, m->private); \
+ } \
+ LPROC_SEQ_FOPS_RO(name##_##type)
+
+#define LPROC_SEQ_FOPS_RW_TYPE(name, type) \
+ static int name##_##type##_seq_show(struct seq_file *m, void *v)\
+ { \
+ return lprocfs_rd_##type(m, m->private); \
+ } \
+ static ssize_t name##_##type##_seq_write(struct file *file, \
+ const char *buffer, size_t count, loff_t *off) \
+ { \
+ struct seq_file *seq = file->private_data; \
+ return lprocfs_wr_##type(file, buffer, \
+ count, seq->private); \
+ } \
+ LPROC_SEQ_FOPS(name##_##type);
+
+#define LPROC_SEQ_FOPS_WR_ONLY(name, type) \
+ static ssize_t name##_##type##_write(struct file *file, \
+ const char *buffer, size_t count, loff_t *off) \
+ { \
+ return lprocfs_wr_##type(file, buffer, count, off); \
+ } \
+ static int name##_##type##_open(cfs_inode_t *inode, struct file *file) \
+ { \
+ return single_open(file, NULL, PDE_DATA(inode)); \
+ } \
+ struct file_operations name##_##type##_fops = { \
+ .open = name##_##type##_open, \
+ .write = name##_##type##_write, \
+ .release = lprocfs_single_release, \
+ };
+
+/* lproc_ptlrpc.c */
+struct ptlrpc_request;
+extern void target_print_req(void *seq_file, struct ptlrpc_request *req);
+
+/* lproc_status.c */
+int lprocfs_obd_rd_max_pages_per_rpc(struct seq_file *m, void *data);
+int lprocfs_obd_wr_max_pages_per_rpc(struct file *file, const char *buffer,
+ size_t count, loff_t *off);
+
+/* all quota proc functions */
+extern int lprocfs_quota_rd_bunit(char *page, char **start,
+ loff_t off, int count,
+ int *eof, void *data);
+extern int lprocfs_quota_wr_bunit(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_btune(char *page, char **start,
+ loff_t off, int count,
+ int *eof, void *data);
+extern int lprocfs_quota_wr_btune(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_iunit(char *page, char **start,
+ loff_t off, int count,
+ int *eof, void *data);
+extern int lprocfs_quota_wr_iunit(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_itune(char *page, char **start,
+ loff_t off, int count,
+ int *eof, void *data);
+extern int lprocfs_quota_wr_itune(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count,
+ int *eof, void *data);
+extern int lprocfs_quota_wr_type(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+extern int lprocfs_quota_wr_switch_seconds(struct file *file,
+ const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+extern int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+extern int lprocfs_quota_wr_switch_qs(struct file *file,
+ const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+extern int lprocfs_quota_wr_boundary_factor(struct file *file,
+ const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+extern int lprocfs_quota_wr_least_bunit(struct file *file,
+ const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+extern int lprocfs_quota_wr_least_iunit(struct file *file,
+ const char *buffer,
+ unsigned long count, void *data);
+extern int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off,
+ int count, int *eof, void *data);
+extern int lprocfs_quota_wr_qs_factor(struct file *file,
+ const char *buffer,
+ unsigned long count, void *data);
+#else
+/* LPROCFS is not defined */
+
+#define proc_lustre_root NULL
+
+static inline void lprocfs_counter_add(struct lprocfs_stats *stats,
+ int index, long amount)
+{ return; }
+static inline void lprocfs_counter_incr(struct lprocfs_stats *stats,
+ int index)
+{ return; }
+static inline void lprocfs_counter_sub(struct lprocfs_stats *stats,
+ int index, long amount)
+{ return; }
+static inline void lprocfs_counter_decr(struct lprocfs_stats *stats,
+ int index)
+{ return; }
+static inline void lprocfs_counter_init(struct lprocfs_stats *stats,
+ int index, unsigned conf,
+ const char *name, const char *units)
+{ return; }
+
+static inline __u64 lc_read_helper(struct lprocfs_counter *lc,
+ enum lprocfs_fields_flags field)
+{ return 0; }
+
+/* NB: we return !NULL to satisfy error checker */
+static inline struct lprocfs_stats *
+lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags)
+{ return (struct lprocfs_stats *)1; }
+static inline void lprocfs_clear_stats(struct lprocfs_stats *stats)
+{ return; }
+static inline void lprocfs_free_stats(struct lprocfs_stats **stats)
+{ return; }
+static inline int lprocfs_register_stats(struct proc_dir_entry *root,
+ const char *name,
+ struct lprocfs_stats *stats)
+{ return 0; }
+static inline void lprocfs_init_ops_stats(int num_private_stats,
+ struct lprocfs_stats *stats)
+{ return; }
+static inline void lprocfs_init_mps_stats(int num_private_stats,
+ struct lprocfs_stats *stats)
+{ return; }
+static inline void lprocfs_init_ldlm_stats(struct lprocfs_stats *ldlm_stats)
+{ return; }
+static inline int lprocfs_alloc_obd_stats(struct obd_device *obddev,
+ unsigned int num_private_stats)
+{ return 0; }
+static inline int lprocfs_alloc_md_stats(struct obd_device *obddev,
+ unsigned int num_private_stats)
+{ return 0; }
+static inline void lprocfs_free_obd_stats(struct obd_device *obddev)
+{ return; }
+static inline void lprocfs_free_md_stats(struct obd_device *obddev)
+{ return; }
+
+struct obd_export;
+static inline int lprocfs_add_clear_entry(struct obd_export *exp)
+{ return 0; }
+static inline int lprocfs_exp_setup(struct obd_export *exp,lnet_nid_t *peer_nid,
+ int *newnid)
+{ return 0; }
+static inline int lprocfs_exp_cleanup(struct obd_export *exp)
+{ return 0; }
+static inline struct proc_dir_entry *
+lprocfs_add_simple(struct proc_dir_entry *root, char *name,
+ void *data, struct file_operations *fops)
+{return 0; }
+static inline struct proc_dir_entry *
+lprocfs_add_symlink(const char *name, struct proc_dir_entry *parent,
+ const char *format, ...)
+{return NULL; }
+static inline void lprocfs_free_per_client_stats(struct obd_device *obd)
+{ return; }
+static inline
+int lprocfs_nid_stats_clear_write(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{return count;}
+static inline
+int lprocfs_nid_stats_clear_read(struct seq_file *m, void *data)
+{ return 0; }
+
+static inline struct proc_dir_entry *
+lprocfs_register(const char *name, struct proc_dir_entry *parent,
+ struct lprocfs_vars *list, void *data)
+{ return NULL; }
+static inline int lprocfs_add_vars(struct proc_dir_entry *root,
+ struct lprocfs_vars *var,
+ void *data)
+{ return 0; }
+static inline void lprocfs_remove(struct proc_dir_entry **root)
+{ return; }
+static inline void lprocfs_remove_proc_entry(const char *name,
+ struct proc_dir_entry *parent)
+{ return; }
+static inline int lprocfs_obd_setup(struct obd_device *dev,
+ struct lprocfs_vars *list)
+{ return 0; }
+static inline int lprocfs_obd_cleanup(struct obd_device *dev)
+{ return 0; }
+static inline int lprocfs_rd_u64(struct seq_file *m, void *data)
+{ return 0; }
+static inline int lprocfs_rd_uuid(struct seq_file *m, void *data)
+{ return 0; }
+static inline int lprocfs_rd_name(struct seq_file *m, void *data)
+{ return 0; }
+static inline int lprocfs_rd_server_uuid(struct seq_file *m, void *data)
+{ return 0; }
+static inline int lprocfs_rd_conn_uuid(struct seq_file *m, void *data)
+{ return 0; }
+static inline int lprocfs_rd_import(struct seq_file *m, void *data)
+{ return 0; }
+static inline int lprocfs_rd_pinger_recov(struct seq_file *m, void *n)
+{ return 0; }
+static inline int lprocfs_rd_state(struct seq_file *m, void *data)
+{ return 0; }
+static inline int lprocfs_rd_connect_flags(struct seq_file *m, void *data)
+{ return 0; }
+static inline int lprocfs_rd_num_exports(struct seq_file *m, void *data)
+{ return 0; }
+extern inline int lprocfs_rd_numrefs(struct seq_file *m, void *data)
+{ return 0; }
+struct adaptive_timeout;
+static inline int lprocfs_at_hist_helper(struct seq_file *m,
+ struct adaptive_timeout *at)
+{ return 0; }
+static inline int lprocfs_rd_timeouts(struct seq_file *m, void *data)
+{ return 0; }
+static inline int lprocfs_wr_timeouts(struct file *file,
+ const char *buffer,
+ unsigned long count, void *data)
+{ return 0; }
+static inline int lprocfs_wr_evict_client(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{ return 0; }
+static inline int lprocfs_wr_ping(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{ return 0; }
+static inline int lprocfs_wr_import(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{ return 0; }
+static inline int lprocfs_wr_pinger_recov(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{ return 0; }
+
+/* Statfs helpers */
+static inline
+int lprocfs_rd_blksize(struct seq_file *m, void *data)
+{ return 0; }
+static inline
+int lprocfs_rd_kbytestotal(struct seq_file *m, void *data)
+{ return 0; }
+static inline
+int lprocfs_rd_kbytesfree(struct seq_file *m, void *data)
+{ return 0; }
+static inline
+int lprocfs_rd_kbytesavail(struct seq_file *m, void *data)
+{ return 0; }
+static inline
+int lprocfs_rd_filestotal(struct seq_file *m, void *data)
+{ return 0; }
+static inline
+int lprocfs_rd_filesfree(struct seq_file *m, void *data)
+{ return 0; }
+static inline
+void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
+{ return; }
+static inline
+void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value)
+{ return; }
+static inline
+void lprocfs_oh_clear(struct obd_histogram *oh)
+{ return; }
+static inline
+unsigned long lprocfs_oh_sum(struct obd_histogram *oh)
+{ return 0; }
+static inline
+void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
+ struct lprocfs_counter *cnt)
+{ return; }
+static inline
+__u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx,
+ enum lprocfs_fields_flags field)
+{ return (__u64)0; }
+
+#define LPROC_SEQ_FOPS_RO(name)
+#define LPROC_SEQ_FOPS(name)
+#define LPROC_SEQ_FOPS_RO_TYPE(name, type)
+#define LPROC_SEQ_FOPS_RW_TYPE(name, type)
+#define LPROC_SEQ_FOPS_WR_ONLY(name, type)
+
+/* lproc_ptlrpc.c */
+#define target_print_req NULL
+
+#endif /* LPROCFS */
+
+#endif /* LPROCFS_SNMP_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
new file mode 100644
index 0000000..fa31be8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -0,0 +1,1359 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LUSTRE_LU_OBJECT_H
+#define __LUSTRE_LU_OBJECT_H
+
+#include <stdarg.h>
+#include <linux/libcfs/libcfs.h>
+#include <lustre/lustre_idl.h>
+#include <lu_ref.h>
+
+struct seq_file;
+struct proc_dir_entry;
+struct lustre_cfg;
+struct lprocfs_stats;
+
+/** \defgroup lu lu
+ * lu_* data-types represent server-side entities shared by data and meta-data
+ * stacks.
+ *
+ * Design goals:
+ *
+ * -# support for layering.
+ *
+ * Server side object is split into layers, one per device in the
+ * corresponding device stack. Individual layer is represented by struct
+ * lu_object. Compound layered object --- by struct lu_object_header. Most
+ * interface functions take lu_object as an argument and operate on the
+ * whole compound object. This decision was made due to the following
+ * reasons:
+ *
+ * - it's envisaged that lu_object will be used much more often than
+ * lu_object_header;
+ *
+ * - we want lower (non-top) layers to be able to initiate operations
+ * on the whole object.
+ *
+ * Generic code supports layering more complex than simple stacking, e.g.,
+ * it is possible that at some layer object "spawns" multiple sub-objects
+ * on the lower layer.
+ *
+ * -# fid-based identification.
+ *
+ * Compound object is uniquely identified by its fid. Objects are indexed
+ * by their fids (hash table is used for index).
+ *
+ * -# caching and life-cycle management.
+ *
+ * Object's life-time is controlled by reference counting. When reference
+ * count drops to 0, object is returned to cache. Cached objects still
+ * retain their identity (i.e., fid), and can be recovered from cache.
+ *
+ * Objects are kept in the global LRU list, and lu_site_purge() function
+ * can be used to reclaim given number of unused objects from the tail of
+ * the LRU.
+ *
+ * -# avoiding recursion.
+ *
+ * Generic code tries to replace recursion through layers by iterations
+ * where possible. Additionally to the end of reducing stack consumption,
+ * data, when practically possible, are allocated through lu_context_key
+ * interface rather than on stack.
+ * @{
+ */
+
+struct lu_site;
+struct lu_object;
+struct lu_device;
+struct lu_object_header;
+struct lu_context;
+struct lu_env;
+
+/**
+ * Operations common for data and meta-data devices.
+ */
+struct lu_device_operations {
+ /**
+ * Allocate object for the given device (without lower-layer
+ * parts). This is called by lu_object_operations::loo_object_init()
+ * from the parent layer, and should setup at least lu_object::lo_dev
+ * and lu_object::lo_ops fields of resulting lu_object.
+ *
+ * Object creation protocol.
+ *
+ * Due to design goal of avoiding recursion, object creation (see
+ * lu_object_alloc()) is somewhat involved:
+ *
+ * - first, lu_device_operations::ldo_object_alloc() method of the
+ * top-level device in the stack is called. It should allocate top
+ * level object (including lu_object_header), but without any
+ * lower-layer sub-object(s).
+ *
+ * - then lu_object_alloc() sets fid in the header of newly created
+ * object.
+ *
+ * - then lu_object_operations::loo_object_init() is called. It has
+ * to allocate lower-layer object(s). To do this,
+ * lu_object_operations::loo_object_init() calls ldo_object_alloc()
+ * of the lower-layer device(s).
+ *
+ * - for all new objects allocated by
+ * lu_object_operations::loo_object_init() (and inserted into object
+ * stack), lu_object_operations::loo_object_init() is called again
+ * repeatedly, until no new objects are created.
+ *
+ * \post ergo(!IS_ERR(result), result->lo_dev == d &&
+ * result->lo_ops != NULL);
+ */
+ struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
+ const struct lu_object_header *h,
+ struct lu_device *d);
+ /**
+ * process config specific for device.
+ */
+ int (*ldo_process_config)(const struct lu_env *env,
+ struct lu_device *, struct lustre_cfg *);
+ int (*ldo_recovery_complete)(const struct lu_env *,
+ struct lu_device *);
+
+ /**
+ * initialize local objects for device. this method called after layer has
+ * been initialized (after LCFG_SETUP stage) and before it starts serving
+ * user requests.
+ */
+
+ int (*ldo_prepare)(const struct lu_env *,
+ struct lu_device *parent,
+ struct lu_device *dev);
+
+};
+
+/**
+ * For lu_object_conf flags
+ */
+typedef enum {
+ /* This is a new object to be allocated, or the file
+ * corresponding to the object does not exists. */
+ LOC_F_NEW = 0x00000001,
+} loc_flags_t;
+
+/**
+ * Object configuration, describing particulars of object being created. On
+ * server this is not used, as server objects are full identified by fid. On
+ * client configuration contains struct lustre_md.
+ */
+struct lu_object_conf {
+ /**
+ * Some hints for obj find and alloc.
+ */
+ loc_flags_t loc_flags;
+};
+
+/**
+ * Type of "printer" function used by lu_object_operations::loo_object_print()
+ * method.
+ *
+ * Printer function is needed to provide some flexibility in (semi-)debugging
+ * output: possible implementations: printk, CDEBUG, sysfs/seq_file
+ */
+typedef int (*lu_printer_t)(const struct lu_env *env,
+ void *cookie, const char *format, ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+/**
+ * Operations specific for particular lu_object.
+ */
+struct lu_object_operations {
+
+ /**
+ * Allocate lower-layer parts of the object by calling
+ * lu_device_operations::ldo_object_alloc() of the corresponding
+ * underlying device.
+ *
+ * This method is called once for each object inserted into object
+ * stack. It's responsibility of this method to insert lower-layer
+ * object(s) it create into appropriate places of object stack.
+ */
+ int (*loo_object_init)(const struct lu_env *env,
+ struct lu_object *o,
+ const struct lu_object_conf *conf);
+ /**
+ * Called (in top-to-bottom order) during object allocation after all
+ * layers were allocated and initialized. Can be used to perform
+ * initialization depending on lower layers.
+ */
+ int (*loo_object_start)(const struct lu_env *env,
+ struct lu_object *o);
+ /**
+ * Called before lu_object_operations::loo_object_free() to signal
+ * that object is being destroyed. Dual to
+ * lu_object_operations::loo_object_init().
+ */
+ void (*loo_object_delete)(const struct lu_env *env,
+ struct lu_object *o);
+ /**
+ * Dual to lu_device_operations::ldo_object_alloc(). Called when
+ * object is removed from memory.
+ */
+ void (*loo_object_free)(const struct lu_env *env,
+ struct lu_object *o);
+ /**
+ * Called when last active reference to the object is released (and
+ * object returns to the cache). This method is optional.
+ */
+ void (*loo_object_release)(const struct lu_env *env,
+ struct lu_object *o);
+ /**
+ * Optional debugging helper. Print given object.
+ */
+ int (*loo_object_print)(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o);
+ /**
+ * Optional debugging method. Returns true iff method is internally
+ * consistent.
+ */
+ int (*loo_object_invariant)(const struct lu_object *o);
+};
+
+/**
+ * Type of lu_device.
+ */
+struct lu_device_type;
+
+/**
+ * Device: a layer in the server side abstraction stacking.
+ */
+struct lu_device {
+ /**
+ * reference count. This is incremented, in particular, on each object
+ * created at this layer.
+ *
+ * \todo XXX which means that atomic_t is probably too small.
+ */
+ atomic_t ld_ref;
+ /**
+ * Pointer to device type. Never modified once set.
+ */
+ struct lu_device_type *ld_type;
+ /**
+ * Operation vector for this device.
+ */
+ const struct lu_device_operations *ld_ops;
+ /**
+ * Stack this device belongs to.
+ */
+ struct lu_site *ld_site;
+ struct proc_dir_entry *ld_proc_entry;
+
+ /** \todo XXX: temporary back pointer into obd. */
+ struct obd_device *ld_obd;
+ /**
+ * A list of references to this object, for debugging.
+ */
+ struct lu_ref ld_reference;
+ /**
+ * Link the device to the site.
+ **/
+ struct list_head ld_linkage;
+};
+
+struct lu_device_type_operations;
+
+/**
+ * Tag bits for device type. They are used to distinguish certain groups of
+ * device types.
+ */
+enum lu_device_tag {
+ /** this is meta-data device */
+ LU_DEVICE_MD = (1 << 0),
+ /** this is data device */
+ LU_DEVICE_DT = (1 << 1),
+ /** data device in the client stack */
+ LU_DEVICE_CL = (1 << 2)
+};
+
+/**
+ * Type of device.
+ */
+struct lu_device_type {
+ /**
+ * Tag bits. Taken from enum lu_device_tag. Never modified once set.
+ */
+ __u32 ldt_tags;
+ /**
+ * Name of this class. Unique system-wide. Never modified once set.
+ */
+ char *ldt_name;
+ /**
+ * Operations for this type.
+ */
+ const struct lu_device_type_operations *ldt_ops;
+ /**
+ * \todo XXX: temporary pointer to associated obd_type.
+ */
+ struct obd_type *ldt_obd_type;
+ /**
+ * \todo XXX: temporary: context tags used by obd_*() calls.
+ */
+ __u32 ldt_ctx_tags;
+ /**
+ * Number of existing device type instances.
+ */
+ unsigned ldt_device_nr;
+ /**
+ * Linkage into a global list of all device types.
+ *
+ * \see lu_device_types.
+ */
+ struct list_head ldt_linkage;
+};
+
+/**
+ * Operations on a device type.
+ */
+struct lu_device_type_operations {
+ /**
+ * Allocate new device.
+ */
+ struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
+ struct lu_device_type *t,
+ struct lustre_cfg *lcfg);
+ /**
+ * Free device. Dual to
+ * lu_device_type_operations::ldto_device_alloc(). Returns pointer to
+ * the next device in the stack.
+ */
+ struct lu_device *(*ldto_device_free)(const struct lu_env *,
+ struct lu_device *);
+
+ /**
+ * Initialize the devices after allocation
+ */
+ int (*ldto_device_init)(const struct lu_env *env,
+ struct lu_device *, const char *,
+ struct lu_device *);
+ /**
+ * Finalize device. Dual to
+ * lu_device_type_operations::ldto_device_init(). Returns pointer to
+ * the next device in the stack.
+ */
+ struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
+ struct lu_device *);
+ /**
+ * Initialize device type. This is called on module load.
+ */
+ int (*ldto_init)(struct lu_device_type *t);
+ /**
+ * Finalize device type. Dual to
+ * lu_device_type_operations::ldto_init(). Called on module unload.
+ */
+ void (*ldto_fini)(struct lu_device_type *t);
+ /**
+ * Called when the first device is created.
+ */
+ void (*ldto_start)(struct lu_device_type *t);
+ /**
+ * Called when number of devices drops to 0.
+ */
+ void (*ldto_stop)(struct lu_device_type *t);
+};
+
+static inline int lu_device_is_md(const struct lu_device *d)
+{
+ return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD);
+}
+
+/**
+ * Flags for the object layers.
+ */
+enum lu_object_flags {
+ /**
+ * this flags is set if lu_object_operations::loo_object_init() has
+ * been called for this layer. Used by lu_object_alloc().
+ */
+ LU_OBJECT_ALLOCATED = (1 << 0)
+};
+
+/**
+ * Common object attributes.
+ */
+struct lu_attr {
+ /** size in bytes */
+ __u64 la_size;
+ /** modification time in seconds since Epoch */
+ obd_time la_mtime;
+ /** access time in seconds since Epoch */
+ obd_time la_atime;
+ /** change time in seconds since Epoch */
+ obd_time la_ctime;
+ /** 512-byte blocks allocated to object */
+ __u64 la_blocks;
+ /** permission bits and file type */
+ __u32 la_mode;
+ /** owner id */
+ __u32 la_uid;
+ /** group id */
+ __u32 la_gid;
+ /** object flags */
+ __u32 la_flags;
+ /** number of persistent references to this object */
+ __u32 la_nlink;
+ /** blk bits of the object*/
+ __u32 la_blkbits;
+ /** blk size of the object*/
+ __u32 la_blksize;
+ /** real device */
+ __u32 la_rdev;
+ /**
+ * valid bits
+ *
+ * \see enum la_valid
+ */
+ __u64 la_valid;
+};
+
+/** Bit-mask of valid attributes */
+enum la_valid {
+ LA_ATIME = 1 << 0,
+ LA_MTIME = 1 << 1,
+ LA_CTIME = 1 << 2,
+ LA_SIZE = 1 << 3,
+ LA_MODE = 1 << 4,
+ LA_UID = 1 << 5,
+ LA_GID = 1 << 6,
+ LA_BLOCKS = 1 << 7,
+ LA_TYPE = 1 << 8,
+ LA_FLAGS = 1 << 9,
+ LA_NLINK = 1 << 10,
+ LA_RDEV = 1 << 11,
+ LA_BLKSIZE = 1 << 12,
+ LA_KILL_SUID = 1 << 13,
+ LA_KILL_SGID = 1 << 14,
+};
+
+/**
+ * Layer in the layered object.
+ */
+struct lu_object {
+ /**
+ * Header for this object.
+ */
+ struct lu_object_header *lo_header;
+ /**
+ * Device for this layer.
+ */
+ struct lu_device *lo_dev;
+ /**
+ * Operations for this object.
+ */
+ const struct lu_object_operations *lo_ops;
+ /**
+ * Linkage into list of all layers.
+ */
+ struct list_head lo_linkage;
+ /**
+ * Depth. Top level layer depth is 0.
+ */
+ int lo_depth;
+ /**
+ * Flags from enum lu_object_flags.
+ */
+ __u32 lo_flags;
+ /**
+ * Link to the device, for debugging.
+ */
+ struct lu_ref_link lo_dev_ref;
+};
+
+enum lu_object_header_flags {
+ /**
+ * Don't keep this object in cache. Object will be destroyed as soon
+ * as last reference to it is released. This flag cannot be cleared
+ * once set.
+ */
+ LU_OBJECT_HEARD_BANSHEE = 0,
+ /**
+ * Mark this object has already been taken out of cache.
+ */
+ LU_OBJECT_UNHASHED = 1
+};
+
+enum lu_object_header_attr {
+ LOHA_EXISTS = 1 << 0,
+ LOHA_REMOTE = 1 << 1,
+ /**
+ * UNIX file type is stored in S_IFMT bits.
+ */
+ LOHA_FT_START = 001 << 12, /**< S_IFIFO */
+ LOHA_FT_END = 017 << 12, /**< S_IFMT */
+};
+
+/**
+ * "Compound" object, consisting of multiple layers.
+ *
+ * Compound object with given fid is unique with given lu_site.
+ *
+ * Note, that object does *not* necessary correspond to the real object in the
+ * persistent storage: object is an anchor for locking and method calling, so
+ * it is created for things like not-yet-existing child created by mkdir or
+ * create calls. lu_object_operations::loo_exists() can be used to check
+ * whether object is backed by persistent storage entity.
+ */
+struct lu_object_header {
+ /**
+ * Object flags from enum lu_object_header_flags. Set and checked
+ * atomically.
+ */
+ unsigned long loh_flags;
+ /**
+ * Object reference count. Protected by lu_site::ls_guard.
+ */
+ atomic_t loh_ref;
+ /**
+ * Fid, uniquely identifying this object.
+ */
+ struct lu_fid loh_fid;
+ /**
+ * Common object attributes, cached for efficiency. From enum
+ * lu_object_header_attr.
+ */
+ __u32 loh_attr;
+ /**
+ * Linkage into per-site hash table. Protected by lu_site::ls_guard.
+ */
+ struct hlist_node loh_hash;
+ /**
+ * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
+ */
+ struct list_head loh_lru;
+ /**
+ * Linkage into list of layers. Never modified once set (except lately
+ * during object destruction). No locking is necessary.
+ */
+ struct list_head loh_layers;
+ /**
+ * A list of references to this object, for debugging.
+ */
+ struct lu_ref loh_reference;
+};
+
+struct fld;
+
+struct lu_site_bkt_data {
+ /**
+ * number of busy object on this bucket
+ */
+ long lsb_busy;
+ /**
+ * LRU list, updated on each access to object. Protected by
+ * bucket lock of lu_site::ls_obj_hash.
+ *
+ * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
+ * moved to the lu_site::ls_lru.prev (this is due to the non-existence
+ * of list_for_each_entry_safe_reverse()).
+ */
+ struct list_head lsb_lru;
+ /**
+ * Wait-queue signaled when an object in this site is ultimately
+ * destroyed (lu_object_free()). It is used by lu_object_find() to
+ * wait before re-trying when object in the process of destruction is
+ * found in the hash table.
+ *
+ * \see htable_lookup().
+ */
+ wait_queue_head_t lsb_marche_funebre;
+};
+
+enum {
+ LU_SS_CREATED = 0,
+ LU_SS_CACHE_HIT,
+ LU_SS_CACHE_MISS,
+ LU_SS_CACHE_RACE,
+ LU_SS_CACHE_DEATH_RACE,
+ LU_SS_LRU_PURGED,
+ LU_SS_LAST_STAT
+};
+
+/**
+ * lu_site is a "compartment" within which objects are unique, and LRU
+ * discipline is maintained.
+ *
+ * lu_site exists so that multiple layered stacks can co-exist in the same
+ * address space.
+ *
+ * lu_site has the same relation to lu_device as lu_object_header to
+ * lu_object.
+ */
+struct lu_site {
+ /**
+ * objects hash table
+ */
+ cfs_hash_t *ls_obj_hash;
+ /**
+ * index of bucket on hash table while purging
+ */
+ int ls_purge_start;
+ /**
+ * Top-level device for this stack.
+ */
+ struct lu_device *ls_top_dev;
+ /**
+ * Bottom-level device for this stack
+ */
+ struct lu_device *ls_bottom_dev;
+ /**
+ * Linkage into global list of sites.
+ */
+ struct list_head ls_linkage;
+ /**
+ * List for lu device for this site, protected
+ * by ls_ld_lock.
+ **/
+ struct list_head ls_ld_linkage;
+ spinlock_t ls_ld_lock;
+
+ /**
+ * lu_site stats
+ */
+ struct lprocfs_stats *ls_stats;
+ /**
+ * XXX: a hack! fld has to find md_site via site, remove when possible
+ */
+ struct seq_server_site *ld_seq_site;
+};
+
+static inline struct lu_site_bkt_data *
+lu_site_bkt_from_fid(struct lu_site *site, struct lu_fid *fid)
+{
+ cfs_hash_bd_t bd;
+
+ cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
+ return cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
+}
+
+static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
+{
+ return s->ld_seq_site;
+}
+
+/** \name ctors
+ * Constructors/destructors.
+ * @{
+ */
+
+int lu_site_init (struct lu_site *s, struct lu_device *d);
+void lu_site_fini (struct lu_site *s);
+int lu_site_init_finish (struct lu_site *s);
+void lu_stack_fini (const struct lu_env *env, struct lu_device *top);
+void lu_device_get (struct lu_device *d);
+void lu_device_put (struct lu_device *d);
+int lu_device_init (struct lu_device *d, struct lu_device_type *t);
+void lu_device_fini (struct lu_device *d);
+int lu_object_header_init(struct lu_object_header *h);
+void lu_object_header_fini(struct lu_object_header *h);
+int lu_object_init (struct lu_object *o,
+ struct lu_object_header *h, struct lu_device *d);
+void lu_object_fini (struct lu_object *o);
+void lu_object_add_top (struct lu_object_header *h, struct lu_object *o);
+void lu_object_add (struct lu_object *before, struct lu_object *o);
+
+void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d);
+void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d);
+
+/**
+ * Helpers to initialize and finalize device types.
+ */
+
+int lu_device_type_init(struct lu_device_type *ldt);
+void lu_device_type_fini(struct lu_device_type *ldt);
+void lu_types_stop(void);
+
+/** @} ctors */
+
+/** \name caching
+ * Caching and reference counting.
+ * @{
+ */
+
+/**
+ * Acquire additional reference to the given object. This function is used to
+ * attain additional reference. To acquire initial reference use
+ * lu_object_find().
+ */
+static inline void lu_object_get(struct lu_object *o)
+{
+ LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
+ atomic_inc(&o->lo_header->loh_ref);
+}
+
+/**
+ * Return true of object will not be cached after last reference to it is
+ * released.
+ */
+static inline int lu_object_is_dying(const struct lu_object_header *h)
+{
+ return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
+}
+
+void lu_object_put(const struct lu_env *env, struct lu_object *o);
+void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o);
+void lu_object_unhash(const struct lu_env *env, struct lu_object *o);
+
+int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr);
+
+void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
+ lu_printer_t printer);
+struct lu_object *lu_object_find(const struct lu_env *env,
+ struct lu_device *dev, const struct lu_fid *f,
+ const struct lu_object_conf *conf);
+struct lu_object *lu_object_find_at(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf);
+struct lu_object *lu_object_find_slice(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf);
+/** @} caching */
+
+/** \name helpers
+ * Helpers.
+ * @{
+ */
+
+/**
+ * First (topmost) sub-object of given compound object
+ */
+static inline struct lu_object *lu_object_top(struct lu_object_header *h)
+{
+ LASSERT(!list_empty(&h->loh_layers));
+ return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
+}
+
+/**
+ * Next sub-object in the layering
+ */
+static inline struct lu_object *lu_object_next(const struct lu_object *o)
+{
+ return container_of0(o->lo_linkage.next, struct lu_object, lo_linkage);
+}
+
+/**
+ * Pointer to the fid of this object.
+ */
+static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
+{
+ return &o->lo_header->loh_fid;
+}
+
+/**
+ * return device operations vector for this object
+ */
+static const inline struct lu_device_operations *
+lu_object_ops(const struct lu_object *o)
+{
+ return o->lo_dev->ld_ops;
+}
+
+/**
+ * Given a compound object, find its slice, corresponding to the device type
+ * \a dtype.
+ */
+struct lu_object *lu_object_locate(struct lu_object_header *h,
+ const struct lu_device_type *dtype);
+
+/**
+ * Printer function emitting messages through libcfs_debug_msg().
+ */
+int lu_cdebug_printer(const struct lu_env *env,
+ void *cookie, const char *format, ...);
+
+/**
+ * Print object description followed by a user-supplied message.
+ */
+#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
+do { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
+ \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
+ CDEBUG(mask, format , ## __VA_ARGS__); \
+ } \
+} while (0)
+
+/**
+ * Print short object description followed by a user-supplied message.
+ */
+#define LU_OBJECT_HEADER(mask, env, object, format, ...) \
+do { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
+ \
+ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
+ (object)->lo_header); \
+ lu_cdebug_printer(env, &msgdata, "\n"); \
+ CDEBUG(mask, format , ## __VA_ARGS__); \
+ } \
+} while (0)
+
+void lu_object_print (const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct lu_object *o);
+void lu_object_header_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer,
+ const struct lu_object_header *hdr);
+
+/**
+ * Check object consistency.
+ */
+int lu_object_invariant(const struct lu_object *o);
+
+
+/**
+ * Check whether object exists, no matter on local or remote storage.
+ * Note: LOHA_EXISTS will be set once some one created the object,
+ * and it does not needs to be committed to storage.
+ */
+#define lu_object_exists(o) ((o)->lo_header->loh_attr & LOHA_EXISTS)
+
+/**
+ * Check whether object on the remote storage.
+ */
+#define lu_object_remote(o) unlikely((o)->lo_header->loh_attr & LOHA_REMOTE)
+
+static inline int lu_object_assert_exists(const struct lu_object *o)
+{
+ return lu_object_exists(o);
+}
+
+static inline int lu_object_assert_not_exists(const struct lu_object *o)
+{
+ return !lu_object_exists(o);
+}
+
+/**
+ * Attr of this object.
+ */
+static inline __u32 lu_object_attr(const struct lu_object *o)
+{
+ LASSERT(lu_object_exists(o) != 0);
+ return o->lo_header->loh_attr;
+}
+
+static inline void lu_object_ref_add(struct lu_object *o,
+ const char *scope,
+ const void *source)
+{
+ lu_ref_add(&o->lo_header->loh_reference, scope, source);
+}
+
+static inline void lu_object_ref_add_at(struct lu_object *o,
+ struct lu_ref_link *link,
+ const char *scope,
+ const void *source)
+{
+ lu_ref_add_at(&o->lo_header->loh_reference, link, scope, source);
+}
+
+static inline void lu_object_ref_del(struct lu_object *o,
+ const char *scope, const void *source)
+{
+ lu_ref_del(&o->lo_header->loh_reference, scope, source);
+}
+
+static inline void lu_object_ref_del_at(struct lu_object *o,
+ struct lu_ref_link *link,
+ const char *scope, const void *source)
+{
+ lu_ref_del_at(&o->lo_header->loh_reference, link, scope, source);
+}
+
+/** input params, should be filled out by mdt */
+struct lu_rdpg {
+ /** hash */
+ __u64 rp_hash;
+ /** count in bytes */
+ unsigned int rp_count;
+ /** number of pages */
+ unsigned int rp_npages;
+ /** requested attr */
+ __u32 rp_attrs;
+ /** pointers to pages */
+ struct page **rp_pages;
+};
+
+enum lu_xattr_flags {
+ LU_XATTR_REPLACE = (1 << 0),
+ LU_XATTR_CREATE = (1 << 1)
+};
+
+/** @} helpers */
+
+/** \name lu_context
+ * @{ */
+
+/** For lu_context health-checks */
+enum lu_context_state {
+ LCS_INITIALIZED = 1,
+ LCS_ENTERED,
+ LCS_LEFT,
+ LCS_FINALIZED
+};
+
+/**
+ * lu_context. Execution context for lu_object methods. Currently associated
+ * with thread.
+ *
+ * All lu_object methods, except device and device type methods (called during
+ * system initialization and shutdown) are executed "within" some
+ * lu_context. This means, that pointer to some "current" lu_context is passed
+ * as an argument to all methods.
+ *
+ * All service ptlrpc threads create lu_context as part of their
+ * initialization. It is possible to create "stand-alone" context for other
+ * execution environments (like system calls).
+ *
+ * lu_object methods mainly use lu_context through lu_context_key interface
+ * that allows each layer to associate arbitrary pieces of data with each
+ * context (see pthread_key_create(3) for similar interface).
+ *
+ * On a client, lu_context is bound to a thread, see cl_env_get().
+ *
+ * \see lu_context_key
+ */
+struct lu_context {
+ /**
+ * lu_context is used on the client side too. Yet we don't want to
+ * allocate values of server-side keys for the client contexts and
+ * vice versa.
+ *
+ * To achieve this, set of tags in introduced. Contexts and keys are
+ * marked with tags. Key value are created only for context whose set
+ * of tags has non-empty intersection with one for key. Tags are taken
+ * from enum lu_context_tag.
+ */
+ __u32 lc_tags;
+ enum lu_context_state lc_state;
+ /**
+ * Pointer to the home service thread. NULL for other execution
+ * contexts.
+ */
+ struct ptlrpc_thread *lc_thread;
+ /**
+ * Pointer to an array with key values. Internal implementation
+ * detail.
+ */
+ void **lc_value;
+ /**
+ * Linkage into a list of all remembered contexts. Only
+ * `non-transient' contexts, i.e., ones created for service threads
+ * are placed here.
+ */
+ struct list_head lc_remember;
+ /**
+ * Version counter used to skip calls to lu_context_refill() when no
+ * keys were registered.
+ */
+ unsigned lc_version;
+ /**
+ * Debugging cookie.
+ */
+ unsigned lc_cookie;
+};
+
+/**
+ * lu_context_key interface. Similar to pthread_key.
+ */
+
+enum lu_context_tag {
+ /**
+ * Thread on md server
+ */
+ LCT_MD_THREAD = 1 << 0,
+ /**
+ * Thread on dt server
+ */
+ LCT_DT_THREAD = 1 << 1,
+ /**
+ * Context for transaction handle
+ */
+ LCT_TX_HANDLE = 1 << 2,
+ /**
+ * Thread on client
+ */
+ LCT_CL_THREAD = 1 << 3,
+ /**
+ * A per-request session on a server, and a per-system-call session on
+ * a client.
+ */
+ LCT_SESSION = 1 << 4,
+ /**
+ * A per-request data on OSP device
+ */
+ LCT_OSP_THREAD = 1 << 5,
+ /**
+ * MGS device thread
+ */
+ LCT_MG_THREAD = 1 << 6,
+ /**
+ * Context for local operations
+ */
+ LCT_LOCAL = 1 << 7,
+ /**
+ * Set when at least one of keys, having values in this context has
+ * non-NULL lu_context_key::lct_exit() method. This is used to
+ * optimize lu_context_exit() call.
+ */
+ LCT_HAS_EXIT = 1 << 28,
+ /**
+ * Don't add references for modules creating key values in that context.
+ * This is only for contexts used internally by lu_object framework.
+ */
+ LCT_NOREF = 1 << 29,
+ /**
+ * Key is being prepared for retiring, don't create new values for it.
+ */
+ LCT_QUIESCENT = 1 << 30,
+ /**
+ * Context should be remembered.
+ */
+ LCT_REMEMBER = 1 << 31,
+ /**
+ * Contexts usable in cache shrinker thread.
+ */
+ LCT_SHRINKER = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD|LCT_NOREF
+};
+
+/**
+ * Key. Represents per-context value slot.
+ *
+ * Keys are usually registered when module owning the key is initialized, and
+ * de-registered when module is unloaded. Once key is registered, all new
+ * contexts with matching tags, will get key value. "Old" contexts, already
+ * initialized at the time of key registration, can be forced to get key value
+ * by calling lu_context_refill().
+ *
+ * Every key value is counted in lu_context_key::lct_used and acquires a
+ * reference on an owning module. This means, that all key values have to be
+ * destroyed before module can be unloaded. This is usually achieved by
+ * stopping threads started by the module, that created contexts in their
+ * entry functions. Situation is complicated by the threads shared by multiple
+ * modules, like ptlrpcd daemon on a client. To work around this problem,
+ * contexts, created in such threads, are `remembered' (see
+ * LCT_REMEMBER)---i.e., added into a global list. When module is preparing
+ * for unloading it does the following:
+ *
+ * - marks its keys as `quiescent' (lu_context_tag::LCT_QUIESCENT)
+ * preventing new key values from being allocated in the new contexts,
+ * and
+ *
+ * - scans a list of remembered contexts, destroying values of module
+ * keys, thus releasing references to the module.
+ *
+ * This is done by lu_context_key_quiesce(). If module is re-activated
+ * before key has been de-registered, lu_context_key_revive() call clears
+ * `quiescent' marker.
+ *
+ * lu_context code doesn't provide any internal synchronization for these
+ * activities---it's assumed that startup (including threads start-up) and
+ * shutdown are serialized by some external means.
+ *
+ * \see lu_context
+ */
+struct lu_context_key {
+ /**
+ * Set of tags for which values of this key are to be instantiated.
+ */
+ __u32 lct_tags;
+ /**
+ * Value constructor. This is called when new value is created for a
+ * context. Returns pointer to new value of error pointer.
+ */
+ void *(*lct_init)(const struct lu_context *ctx,
+ struct lu_context_key *key);
+ /**
+ * Value destructor. Called when context with previously allocated
+ * value of this slot is destroyed. \a data is a value that was returned
+ * by a matching call to lu_context_key::lct_init().
+ */
+ void (*lct_fini)(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data);
+ /**
+ * Optional method called on lu_context_exit() for all allocated
+ * keys. Can be used by debugging code checking that locks are
+ * released, etc.
+ */
+ void (*lct_exit)(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data);
+ /**
+ * Internal implementation detail: index within lu_context::lc_value[]
+ * reserved for this key.
+ */
+ int lct_index;
+ /**
+ * Internal implementation detail: number of values created for this
+ * key.
+ */
+ atomic_t lct_used;
+ /**
+ * Internal implementation detail: module for this key.
+ */
+ struct module *lct_owner;
+ /**
+ * References to this key. For debugging.
+ */
+ struct lu_ref lct_reference;
+};
+
+#define LU_KEY_INIT(mod, type) \
+ static void* mod##_key_init(const struct lu_context *ctx, \
+ struct lu_context_key *key) \
+ { \
+ type *value; \
+ \
+ CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
+ \
+ OBD_ALLOC_PTR(value); \
+ if (value == NULL) \
+ value = ERR_PTR(-ENOMEM); \
+ \
+ return value; \
+ } \
+ struct __##mod##__dummy_init {;} /* semicolon catcher */
+
+#define LU_KEY_FINI(mod, type) \
+ static void mod##_key_fini(const struct lu_context *ctx, \
+ struct lu_context_key *key, void* data) \
+ { \
+ type *info = data; \
+ \
+ OBD_FREE_PTR(info); \
+ } \
+ struct __##mod##__dummy_fini {;} /* semicolon catcher */
+
+#define LU_KEY_INIT_FINI(mod, type) \
+ LU_KEY_INIT(mod,type); \
+ LU_KEY_FINI(mod,type)
+
+#define LU_CONTEXT_KEY_DEFINE(mod, tags) \
+ struct lu_context_key mod##_thread_key = { \
+ .lct_tags = tags, \
+ .lct_init = mod##_key_init, \
+ .lct_fini = mod##_key_fini \
+ }
+
+#define LU_CONTEXT_KEY_INIT(key) \
+do { \
+ (key)->lct_owner = THIS_MODULE; \
+} while (0)
+
+int lu_context_key_register(struct lu_context_key *key);
+void lu_context_key_degister(struct lu_context_key *key);
+void *lu_context_key_get (const struct lu_context *ctx,
+ const struct lu_context_key *key);
+void lu_context_key_quiesce (struct lu_context_key *key);
+void lu_context_key_revive (struct lu_context_key *key);
+
+
+/*
+ * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
+ * owning module.
+ */
+
+#define LU_KEY_INIT_GENERIC(mod) \
+ static void mod##_key_init_generic(struct lu_context_key *k, ...) \
+ { \
+ struct lu_context_key *key = k; \
+ va_list args; \
+ \
+ va_start(args, k); \
+ do { \
+ LU_CONTEXT_KEY_INIT(key); \
+ key = va_arg(args, struct lu_context_key *); \
+ } while (key != NULL); \
+ va_end(args); \
+ }
+
+#define LU_TYPE_INIT(mod, ...) \
+ LU_KEY_INIT_GENERIC(mod) \
+ static int mod##_type_init(struct lu_device_type *t) \
+ { \
+ mod##_key_init_generic(__VA_ARGS__, NULL); \
+ return lu_context_key_register_many(__VA_ARGS__, NULL); \
+ } \
+ struct __##mod##_dummy_type_init {;}
+
+#define LU_TYPE_FINI(mod, ...) \
+ static void mod##_type_fini(struct lu_device_type *t) \
+ { \
+ lu_context_key_degister_many(__VA_ARGS__, NULL); \
+ } \
+ struct __##mod##_dummy_type_fini {;}
+
+#define LU_TYPE_START(mod, ...) \
+ static void mod##_type_start(struct lu_device_type *t) \
+ { \
+ lu_context_key_revive_many(__VA_ARGS__, NULL); \
+ } \
+ struct __##mod##_dummy_type_start {;}
+
+#define LU_TYPE_STOP(mod, ...) \
+ static void mod##_type_stop(struct lu_device_type *t) \
+ { \
+ lu_context_key_quiesce_many(__VA_ARGS__, NULL); \
+ } \
+ struct __##mod##_dummy_type_stop {;}
+
+
+
+#define LU_TYPE_INIT_FINI(mod, ...) \
+ LU_TYPE_INIT(mod, __VA_ARGS__); \
+ LU_TYPE_FINI(mod, __VA_ARGS__); \
+ LU_TYPE_START(mod, __VA_ARGS__); \
+ LU_TYPE_STOP(mod, __VA_ARGS__)
+
+int lu_context_init (struct lu_context *ctx, __u32 tags);
+void lu_context_fini (struct lu_context *ctx);
+void lu_context_enter (struct lu_context *ctx);
+void lu_context_exit (struct lu_context *ctx);
+int lu_context_refill(struct lu_context *ctx);
+
+/*
+ * Helper functions to operate on multiple keys. These are used by the default
+ * device type operations, defined by LU_TYPE_INIT_FINI().
+ */
+
+int lu_context_key_register_many(struct lu_context_key *k, ...);
+void lu_context_key_degister_many(struct lu_context_key *k, ...);
+void lu_context_key_revive_many (struct lu_context_key *k, ...);
+void lu_context_key_quiesce_many (struct lu_context_key *k, ...);
+
+/*
+ * update/clear ctx/ses tags.
+ */
+void lu_context_tags_update(__u32 tags);
+void lu_context_tags_clear(__u32 tags);
+void lu_session_tags_update(__u32 tags);
+void lu_session_tags_clear(__u32 tags);
+
+/**
+ * Environment.
+ */
+struct lu_env {
+ /**
+ * "Local" context, used to store data instead of stack.
+ */
+ struct lu_context le_ctx;
+ /**
+ * "Session" context for per-request data.
+ */
+ struct lu_context *le_ses;
+};
+
+int lu_env_init (struct lu_env *env, __u32 tags);
+void lu_env_fini (struct lu_env *env);
+int lu_env_refill(struct lu_env *env);
+int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, __u32 stags);
+
+/** @} lu_context */
+
+/**
+ * Output site statistical counters into a buffer. Suitable for
+ * ll_rd_*()-style functions.
+ */
+int lu_site_stats_print(const struct lu_site *s, struct seq_file *m);
+
+/**
+ * Common name structure to be passed around for various name related methods.
+ */
+struct lu_name {
+ const char *ln_name;
+ int ln_namelen;
+};
+
+/**
+ * Common buffer structure to be passed around for various xattr_{s,g}et()
+ * methods.
+ */
+struct lu_buf {
+ void *lb_buf;
+ ssize_t lb_len;
+};
+
+#define DLUBUF "(%p %zu)"
+#define PLUBUF(buf) (buf)->lb_buf, (buf)->lb_len
+/**
+ * One-time initializers, called at obdclass module initialization, not
+ * exported.
+ */
+
+/**
+ * Initialization of global lu_* data.
+ */
+int lu_global_init(void);
+
+/**
+ * Dual to lu_global_init().
+ */
+void lu_global_fini(void);
+
+struct lu_kmem_descr {
+ struct kmem_cache **ckd_cache;
+ const char *ckd_name;
+ const size_t ckd_size;
+};
+
+int lu_kmem_init(struct lu_kmem_descr *caches);
+void lu_kmem_fini(struct lu_kmem_descr *caches);
+
+void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
+ const struct lu_fid *fid);
+struct lu_object *lu_object_anon(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_object_conf *conf);
+
+/** null buffer */
+extern struct lu_buf LU_BUF_NULL;
+
+void lu_buf_free(struct lu_buf *buf);
+void lu_buf_alloc(struct lu_buf *buf, int size);
+void lu_buf_realloc(struct lu_buf *buf, int size);
+
+int lu_buf_check_and_grow(struct lu_buf *buf, int len);
+struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len);
+
+/** @} lu */
+#endif /* __LUSTRE_LU_OBJECT_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h
new file mode 100644
index 0000000..50a2a7f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lu_ref.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ *
+ * This file is part of Lustre, http://www.lustre.org.
+ *
+ * Lustre is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * Lustre is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Lustre; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LUSTRE_LU_REF_H
+#define __LUSTRE_LU_REF_H
+
+#include <linux/list.h>
+
+/** \defgroup lu_ref lu_ref
+ *
+ * An interface to track references between objects. Mostly for debugging.
+ *
+ * Suppose there is a reference counted data-structure struct foo. To track
+ * who acquired references to instance of struct foo, add lu_ref field to it:
+ *
+ * \code
+ * struct foo {
+ * atomic_t foo_refcount;
+ * struct lu_ref foo_reference;
+ * ...
+ * };
+ * \endcode
+ *
+ * foo::foo_reference has to be initialized by calling
+ * lu_ref_init(). Typically there will be functions or macros to increment and
+ * decrement foo::foo_refcount, let's say they are foo_get(struct foo *foo)
+ * and foo_put(struct foo *foo), respectively.
+ *
+ * Whenever foo_get() is called to acquire a reference on a foo, lu_ref_add()
+ * has to be called to insert into foo::foo_reference a record, describing
+ * acquired reference. Dually, lu_ref_del() removes matching record. Typical
+ * usages are:
+ *
+ * \code
+ * struct bar *bar;
+ *
+ * // bar owns a reference to foo.
+ * bar->bar_foo = foo_get(foo);
+ * lu_ref_add(&foo->foo_reference, "bar", bar);
+ *
+ * ...
+ *
+ * // reference from bar to foo is released.
+ * lu_ref_del(&foo->foo_reference, "bar", bar);
+ * foo_put(bar->bar_foo);
+ *
+ *
+ * // current thread acquired a temporary reference to foo.
+ * foo_get(foo);
+ * lu_ref_add(&foo->reference, __FUNCTION__, current);
+ *
+ * ...
+ *
+ * // temporary reference is released.
+ * lu_ref_del(&foo->reference, __FUNCTION__, current);
+ * foo_put(foo);
+ * \endcode
+ *
+ * \e Et \e cetera. Often it makes sense to include lu_ref_add() and
+ * lu_ref_del() calls into foo_get() and foo_put(). When an instance of struct
+ * foo is destroyed, lu_ref_fini() has to be called that checks that no
+ * pending references remain. lu_ref_print() can be used to dump a list of
+ * pending references, while hunting down a leak.
+ *
+ * For objects to which a large number of references can be acquired,
+ * lu_ref_del() can become cpu consuming, as it has to scan the list of
+ * references. To work around this, remember result of lu_ref_add() (usually
+ * in the same place where pointer to struct foo is stored), and use
+ * lu_ref_del_at():
+ *
+ * \code
+ * // There is a large number of bar's for a single foo.
+ * bar->bar_foo = foo_get(foo);
+ * bar->bar_foo_ref = lu_ref_add(&foo->foo_reference, "bar", bar);
+ *
+ * ...
+ *
+ * // reference from bar to foo is released.
+ * lu_ref_del_at(&foo->foo_reference, bar->bar_foo_ref, "bar", bar);
+ * foo_put(bar->bar_foo);
+ * \endcode
+ *
+ * lu_ref interface degrades gracefully in case of memory shortages.
+ *
+ * @{
+ */
+
+
+/*
+ * dummy data structures/functions to pass compile for now.
+ * We need to reimplement them with kref.
+ */
+struct lu_ref {};
+struct lu_ref_link {};
+
+static inline void lu_ref_init(struct lu_ref *ref)
+{
+}
+
+static inline void lu_ref_fini(struct lu_ref *ref)
+{
+}
+
+static inline struct lu_ref_link *lu_ref_add(struct lu_ref *ref,
+ const char *scope,
+ const void *source)
+{
+ return NULL;
+}
+
+static inline struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref,
+ const char *scope,
+ const void *source)
+{
+ return NULL;
+}
+
+static inline void lu_ref_add_at(struct lu_ref *ref,
+ struct lu_ref_link *link,
+ const char *scope,
+ const void *source)
+{
+}
+
+static inline void lu_ref_del(struct lu_ref *ref, const char *scope,
+ const void *source)
+{
+}
+
+static inline void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
+ const char *scope, const void *source0,
+ const void *source1)
+{
+}
+
+static inline void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
+ const char *scope, const void *source)
+{
+}
+
+static inline int lu_ref_global_init(void)
+{
+ return 0;
+}
+
+static inline void lu_ref_global_fini(void)
+{
+}
+
+static inline void lu_ref_print(const struct lu_ref *ref)
+{
+}
+
+static inline void lu_ref_print_all(void)
+{
+}
+
+/** @} lu */
+
+#endif /* __LUSTRE_LU_REF_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_target.h b/drivers/staging/lustre/lustre/include/lu_target.h
new file mode 100644
index 0000000..8d48cf4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lu_target.h
@@ -0,0 +1,91 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LUSTRE_LU_TARGET_H
+#define _LUSTRE_LU_TARGET_H
+
+#include <dt_object.h>
+#include <lustre_disk.h>
+
+struct lu_target {
+ struct obd_device *lut_obd;
+ struct dt_device *lut_bottom;
+ /** last_rcvd file */
+ struct dt_object *lut_last_rcvd;
+ /* transaction callbacks */
+ struct dt_txn_callback lut_txn_cb;
+ /** server data in last_rcvd file */
+ struct lr_server_data lut_lsd;
+ /** Server last transaction number */
+ __u64 lut_last_transno;
+ /** Lock protecting last transaction number */
+ spinlock_t lut_translock;
+ /** Lock protecting client bitmap */
+ spinlock_t lut_client_bitmap_lock;
+ /** Bitmap of known clients */
+ unsigned long *lut_client_bitmap;
+};
+
+typedef void (*tgt_cb_t)(struct lu_target *lut, __u64 transno,
+ void *data, int err);
+struct tgt_commit_cb {
+ tgt_cb_t tgt_cb_func;
+ void *tgt_cb_data;
+};
+
+void tgt_boot_epoch_update(struct lu_target *lut);
+int tgt_last_commit_cb_add(struct thandle *th, struct lu_target *lut,
+ struct obd_export *exp, __u64 transno);
+int tgt_new_client_cb_add(struct thandle *th, struct obd_export *exp);
+int tgt_init(const struct lu_env *env, struct lu_target *lut,
+ struct obd_device *obd, struct dt_device *dt);
+void tgt_fini(const struct lu_env *env, struct lu_target *lut);
+int tgt_client_alloc(struct obd_export *exp);
+void tgt_client_free(struct obd_export *exp);
+int tgt_client_del(const struct lu_env *env, struct obd_export *exp);
+int tgt_client_add(const struct lu_env *env, struct obd_export *exp, int);
+int tgt_client_new(const struct lu_env *env, struct obd_export *exp);
+int tgt_client_data_read(const struct lu_env *env, struct lu_target *tg,
+ struct lsd_client_data *lcd, loff_t *off, int index);
+int tgt_client_data_write(const struct lu_env *env, struct lu_target *tg,
+ struct lsd_client_data *lcd, loff_t *off, struct thandle *th);
+int tgt_server_data_read(const struct lu_env *env, struct lu_target *tg);
+int tgt_server_data_write(const struct lu_env *env, struct lu_target *tg,
+ struct thandle *th);
+int tgt_server_data_update(const struct lu_env *env, struct lu_target *tg, int sync);
+int tgt_truncate_last_rcvd(const struct lu_env *env, struct lu_target *tg, loff_t off);
+
+#endif /* __LUSTRE_LU_TARGET_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/libiam.h b/drivers/staging/lustre/lustre/include/lustre/libiam.h
new file mode 100644
index 0000000..e8e0b08
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/libiam.h
@@ -0,0 +1,145 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre/libiam.h
+ *
+ * iam user level library
+ *
+ * Author: Wang Di <wangdi@clusterfs.com>
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ * Author: Fan Yong <fanyong@clusterfs.com>
+ */
+
+/*
+ * lustre/libiam.h
+ */
+
+#ifndef __IAM_ULIB_H__
+#define __IAM_ULIB_H__
+
+/** \defgroup libiam libiam
+ *
+ * @{
+ */
+
+
+#define DX_FMT_NAME_LEN 16
+
+enum iam_fmt_t {
+ FMT_LFIX,
+ FMT_LVAR
+};
+
+struct iam_uapi_info {
+ __u16 iui_keysize;
+ __u16 iui_recsize;
+ __u16 iui_ptrsize;
+ __u16 iui_height;
+ char iui_fmt_name[DX_FMT_NAME_LEN];
+};
+
+/*
+ * Creat an iam file, but do NOT open it.
+ * Return 0 if success, else -1.
+ */
+int iam_creat(char *filename, enum iam_fmt_t fmt,
+ int blocksize, int keysize, int recsize, int ptrsize);
+
+/*
+ * Open an iam file, but do NOT creat it if the file doesn't exist.
+ * Please use iam_creat for creating the file before use iam_open.
+ * Return file id (fd) if success, else -1.
+ */
+int iam_open(char *filename, struct iam_uapi_info *ua);
+
+/*
+ * Close file opened by iam_open.
+ */
+int iam_close(int fd);
+
+/*
+ * Please use iam_open before use this function.
+ */
+int iam_insert(int fd, struct iam_uapi_info *ua,
+ int key_need_convert, char *keybuf,
+ int rec_need_convert, char *recbuf);
+
+/*
+ * Please use iam_open before use this function.
+ */
+int iam_lookup(int fd, struct iam_uapi_info *ua,
+ int key_need_convert, char *key_buf,
+ int *keysize, char *save_key,
+ int rec_need_convert, char *rec_buf,
+ int *recsize, char *save_rec);
+
+/*
+ * Please use iam_open before use this function.
+ */
+int iam_delete(int fd, struct iam_uapi_info *ua,
+ int key_need_convert, char *keybuf,
+ int rec_need_convert, char *recbuf);
+
+/*
+ * Please use iam_open before use this function.
+ */
+int iam_it_start(int fd, struct iam_uapi_info *ua,
+ int key_need_convert, char *key_buf,
+ int *keysize, char *save_key,
+ int rec_need_convert, char *rec_buf,
+ int *recsize, char *save_rec);
+
+/*
+ * Please use iam_open before use this function.
+ */
+int iam_it_next(int fd, struct iam_uapi_info *ua,
+ int key_need_convert, char *key_buf,
+ int *keysize, char *save_key,
+ int rec_need_convert, char *rec_buf,
+ int *recsize, char *save_rec);
+
+/*
+ * Please use iam_open before use this function.
+ */
+int iam_it_stop(int fd, struct iam_uapi_info *ua,
+ int key_need_convert, char *keybuf,
+ int rec_need_convert, char *recbuf);
+
+/*
+ * Change iam file mode.
+ */
+int iam_polymorph(char *filename, unsigned long mode);
+
+/** @} libiam */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre/liblustreapi.h b/drivers/staging/lustre/lustre/include/lustre/liblustreapi.h
new file mode 100644
index 0000000..707eb74
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/liblustreapi.h
@@ -0,0 +1,43 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+/*
+ * NOTE: This file is DEPRECATED! Please include lustreapi.h directly
+ * instead of this file. This file will be removed from a future version
+ * of lustre!
+ */
+
+#ifndef _LIBLUSTREAPI_H_
+#define _LIBLUSTREAPI_H_
+
+#include <lustre/lustreapi.h>
+#warning "Including liblustreapi.h is deprecated. Include lustreapi.h directly."
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
new file mode 100644
index 0000000..ad253c6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
@@ -0,0 +1,121 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre/ll_fiemap.h
+ *
+ * FIEMAP data structures and flags. This header file will be used until
+ * fiemap.h is available in the upstream kernel.
+ *
+ * Author: Kalpak Shah <kalpak.shah@sun.com>
+ * Author: Andreas Dilger <adilger@sun.com>
+ */
+
+#ifndef _LUSTRE_FIEMAP_H
+#define _LUSTRE_FIEMAP_H
+
+
+
+struct ll_fiemap_extent {
+ __u64 fe_logical; /* logical offset in bytes for the start of
+ * the extent from the beginning of the file */
+ __u64 fe_physical; /* physical offset in bytes for the start
+ * of the extent from the beginning of the disk */
+ __u64 fe_length; /* length in bytes for this extent */
+ __u64 fe_reserved64[2];
+ __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
+ __u32 fe_device; /* device number for this extent */
+ __u32 fe_reserved[2];
+};
+
+struct ll_user_fiemap {
+ __u64 fm_start; /* logical offset (inclusive) at
+ * which to start mapping (in) */
+ __u64 fm_length; /* logical length of mapping which
+ * userspace wants (in) */
+ __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
+ __u32 fm_mapped_extents;/* number of extents that were mapped (out) */
+ __u32 fm_extent_count; /* size of fm_extents array (in) */
+ __u32 fm_reserved;
+ struct ll_fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
+};
+
+#define FIEMAP_MAX_OFFSET (~0ULL)
+
+#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */
+#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */
+
+#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */
+#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */
+#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending.
+ * Sets EXTENT_UNKNOWN. */
+#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read
+ * while fs is unmounted */
+#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs.
+ * Sets EXTENT_NO_DIRECT. */
+#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be
+ * block aligned. */
+#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata.
+ * Sets EXTENT_NOT_ALIGNED.*/
+#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block.
+ * Sets EXTENT_NOT_ALIGNED.*/
+#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but
+ * no data (i.e. zero). */
+#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
+ * support extents. Result
+ * merged for efficiency. */
+
+
+static inline size_t fiemap_count_to_size(size_t extent_count)
+{
+ return (sizeof(struct ll_user_fiemap) + extent_count *
+ sizeof(struct ll_fiemap_extent));
+}
+
+static inline unsigned fiemap_size_to_count(size_t array_size)
+{
+ return ((array_size - sizeof(struct ll_user_fiemap)) /
+ sizeof(struct ll_fiemap_extent));
+}
+
+#define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */
+
+#ifdef FIEMAP_FLAGS_COMPAT
+#undef FIEMAP_FLAGS_COMPAT
+#endif
+
+/* Lustre specific flags - use a high bit, don't conflict with upstream flag */
+#define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */
+#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely.
+ * Sets NO_DIRECT flag */
+
+#endif /* _LUSTRE_FIEMAP_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h b/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h
new file mode 100644
index 0000000..93a3d7d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h
@@ -0,0 +1,2 @@
+#define BUILD_VERSION "v2_3_64_0-g6e62c21-CHANGED-3.9.0"
+#define LUSTRE_RELEASE 3.9.0_g6e62c21
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h b/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
new file mode 100644
index 0000000..2870487
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
@@ -0,0 +1,215 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.txt
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ */
+
+#ifndef LUSTRE_ERRNO_H
+#define LUSTRE_ERRNO_H
+
+/*
+ * Only "network" errnos, which are defined below, are allowed on wire (or on
+ * disk). Generic routines exist to help translate between these and a subset
+ * of the "host" errnos. Some host errnos (e.g., EDEADLOCK) are intentionally
+ * left out. See also the comment on lustre_errno_hton_mapping[].
+ *
+ * To maintain compatibility with existing x86 clients and servers, each of
+ * these network errnos has the same numerical value as its corresponding host
+ * errno on x86.
+ */
+#define LUSTRE_EPERM 1 /* Operation not permitted */
+#define LUSTRE_ENOENT 2 /* No such file or directory */
+#define LUSTRE_ESRCH 3 /* No such process */
+#define LUSTRE_EINTR 4 /* Interrupted system call */
+#define LUSTRE_EIO 5 /* I/O error */
+#define LUSTRE_ENXIO 6 /* No such device or address */
+#define LUSTRE_E2BIG 7 /* Argument list too long */
+#define LUSTRE_ENOEXEC 8 /* Exec format error */
+#define LUSTRE_EBADF 9 /* Bad file number */
+#define LUSTRE_ECHILD 10 /* No child processes */
+#define LUSTRE_EAGAIN 11 /* Try again */
+#define LUSTRE_ENOMEM 12 /* Out of memory */
+#define LUSTRE_EACCES 13 /* Permission denied */
+#define LUSTRE_EFAULT 14 /* Bad address */
+#define LUSTRE_ENOTBLK 15 /* Block device required */
+#define LUSTRE_EBUSY 16 /* Device or resource busy */
+#define LUSTRE_EEXIST 17 /* File exists */
+#define LUSTRE_EXDEV 18 /* Cross-device link */
+#define LUSTRE_ENODEV 19 /* No such device */
+#define LUSTRE_ENOTDIR 20 /* Not a directory */
+#define LUSTRE_EISDIR 21 /* Is a directory */
+#define LUSTRE_EINVAL 22 /* Invalid argument */
+#define LUSTRE_ENFILE 23 /* File table overflow */
+#define LUSTRE_EMFILE 24 /* Too many open files */
+#define LUSTRE_ENOTTY 25 /* Not a typewriter */
+#define LUSTRE_ETXTBSY 26 /* Text file busy */
+#define LUSTRE_EFBIG 27 /* File too large */
+#define LUSTRE_ENOSPC 28 /* No space left on device */
+#define LUSTRE_ESPIPE 29 /* Illegal seek */
+#define LUSTRE_EROFS 30 /* Read-only file system */
+#define LUSTRE_EMLINK 31 /* Too many links */
+#define LUSTRE_EPIPE 32 /* Broken pipe */
+#define LUSTRE_EDOM 33 /* Math argument out of domain of
+ func */
+#define LUSTRE_ERANGE 34 /* Math result not representable */
+#define LUSTRE_EDEADLK 35 /* Resource deadlock would occur */
+#define LUSTRE_ENAMETOOLONG 36 /* File name too long */
+#define LUSTRE_ENOLCK 37 /* No record locks available */
+#define LUSTRE_ENOSYS 38 /* Function not implemented */
+#define LUSTRE_ENOTEMPTY 39 /* Directory not empty */
+#define LUSTRE_ELOOP 40 /* Too many symbolic links
+ encountered */
+#define LUSTRE_ENOMSG 42 /* No message of desired type */
+#define LUSTRE_EIDRM 43 /* Identifier removed */
+#define LUSTRE_ECHRNG 44 /* Channel number out of range */
+#define LUSTRE_EL2NSYNC 45 /* Level 2 not synchronized */
+#define LUSTRE_EL3HLT 46 /* Level 3 halted */
+#define LUSTRE_EL3RST 47 /* Level 3 reset */
+#define LUSTRE_ELNRNG 48 /* Link number out of range */
+#define LUSTRE_EUNATCH 49 /* Protocol driver not attached */
+#define LUSTRE_ENOCSI 50 /* No CSI structure available */
+#define LUSTRE_EL2HLT 51 /* Level 2 halted */
+#define LUSTRE_EBADE 52 /* Invalid exchange */
+#define LUSTRE_EBADR 53 /* Invalid request descriptor */
+#define LUSTRE_EXFULL 54 /* Exchange full */
+#define LUSTRE_ENOANO 55 /* No anode */
+#define LUSTRE_EBADRQC 56 /* Invalid request code */
+#define LUSTRE_EBADSLT 57 /* Invalid slot */
+#define LUSTRE_EBFONT 59 /* Bad font file format */
+#define LUSTRE_ENOSTR 60 /* Device not a stream */
+#define LUSTRE_ENODATA 61 /* No data available */
+#define LUSTRE_ETIME 62 /* Timer expired */
+#define LUSTRE_ENOSR 63 /* Out of streams resources */
+#define LUSTRE_ENONET 64 /* Machine is not on the network */
+#define LUSTRE_ENOPKG 65 /* Package not installed */
+#define LUSTRE_EREMOTE 66 /* Object is remote */
+#define LUSTRE_ENOLINK 67 /* Link has been severed */
+#define LUSTRE_EADV 68 /* Advertise error */
+#define LUSTRE_ESRMNT 69 /* Srmount error */
+#define LUSTRE_ECOMM 70 /* Communication error on send */
+#define LUSTRE_EPROTO 71 /* Protocol error */
+#define LUSTRE_EMULTIHOP 72 /* Multihop attempted */
+#define LUSTRE_EDOTDOT 73 /* RFS specific error */
+#define LUSTRE_EBADMSG 74 /* Not a data message */
+#define LUSTRE_EOVERFLOW 75 /* Value too large for defined data
+ type */
+#define LUSTRE_ENOTUNIQ 76 /* Name not unique on network */
+#define LUSTRE_EBADFD 77 /* File descriptor in bad state */
+#define LUSTRE_EREMCHG 78 /* Remote address changed */
+#define LUSTRE_ELIBACC 79 /* Can not access a needed shared
+ library */
+#define LUSTRE_ELIBBAD 80 /* Accessing a corrupted shared
+ library */
+#define LUSTRE_ELIBSCN 81 /* .lib section in a.out corrupted */
+#define LUSTRE_ELIBMAX 82 /* Attempting to link in too many shared
+ libraries */
+#define LUSTRE_ELIBEXEC 83 /* Cannot exec a shared library
+ directly */
+#define LUSTRE_EILSEQ 84 /* Illegal byte sequence */
+#define LUSTRE_ERESTART 85 /* Interrupted system call should be
+ restarted */
+#define LUSTRE_ESTRPIPE 86 /* Streams pipe error */
+#define LUSTRE_EUSERS 87 /* Too many users */
+#define LUSTRE_ENOTSOCK 88 /* Socket operation on non-socket */
+#define LUSTRE_EDESTADDRREQ 89 /* Destination address required */
+#define LUSTRE_EMSGSIZE 90 /* Message too long */
+#define LUSTRE_EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define LUSTRE_ENOPROTOOPT 92 /* Protocol not available */
+#define LUSTRE_EPROTONOSUPPORT 93 /* Protocol not supported */
+#define LUSTRE_ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define LUSTRE_EOPNOTSUPP 95 /* Operation not supported on transport
+ endpoint */
+#define LUSTRE_EPFNOSUPPORT 96 /* Protocol family not supported */
+#define LUSTRE_EAFNOSUPPORT 97 /* Address family not supported by
+ protocol */
+#define LUSTRE_EADDRINUSE 98 /* Address already in use */
+#define LUSTRE_EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define LUSTRE_ENETDOWN 100 /* Network is down */
+#define LUSTRE_ENETUNREACH 101 /* Network is unreachable */
+#define LUSTRE_ENETRESET 102 /* Network dropped connection because of
+ reset */
+#define LUSTRE_ECONNABORTED 103 /* Software caused connection abort */
+#define LUSTRE_ECONNRESET 104 /* Connection reset by peer */
+#define LUSTRE_ENOBUFS 105 /* No buffer space available */
+#define LUSTRE_EISCONN 106 /* Transport endpoint is already
+ connected */
+#define LUSTRE_ENOTCONN 107 /* Transport endpoint is not
+ connected */
+#define LUSTRE_ESHUTDOWN 108 /* Cannot send after transport endpoint
+ shutdown */
+#define LUSTRE_ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define LUSTRE_ETIMEDOUT 110 /* Connection timed out */
+#define LUSTRE_ECONNREFUSED 111 /* Connection refused */
+#define LUSTRE_EHOSTDOWN 112 /* Host is down */
+#define LUSTRE_EHOSTUNREACH 113 /* No route to host */
+#define LUSTRE_EALREADY 114 /* Operation already in progress */
+#define LUSTRE_EINPROGRESS 115 /* Operation now in progress */
+#define LUSTRE_ESTALE 116 /* Stale NFS file handle */
+#define LUSTRE_EUCLEAN 117 /* Structure needs cleaning */
+#define LUSTRE_ENOTNAM 118 /* Not a XENIX named type file */
+#define LUSTRE_ENAVAIL 119 /* No XENIX semaphores available */
+#define LUSTRE_EISNAM 120 /* Is a named type file */
+#define LUSTRE_EREMOTEIO 121 /* Remote I/O error */
+#define LUSTRE_EDQUOT 122 /* Quota exceeded */
+#define LUSTRE_ENOMEDIUM 123 /* No medium found */
+#define LUSTRE_EMEDIUMTYPE 124 /* Wrong medium type */
+#define LUSTRE_ECANCELED 125 /* Operation Canceled */
+#define LUSTRE_ENOKEY 126 /* Required key not available */
+#define LUSTRE_EKEYEXPIRED 127 /* Key has expired */
+#define LUSTRE_EKEYREVOKED 128 /* Key has been revoked */
+#define LUSTRE_EKEYREJECTED 129 /* Key was rejected by service */
+#define LUSTRE_EOWNERDEAD 130 /* Owner died */
+#define LUSTRE_ENOTRECOVERABLE 131 /* State not recoverable */
+#define LUSTRE_ERESTARTSYS 512
+#define LUSTRE_ERESTARTNOINTR 513
+#define LUSTRE_ERESTARTNOHAND 514 /* restart if no handler.. */
+#define LUSTRE_ENOIOCTLCMD 515 /* No ioctl command */
+#define LUSTRE_ERESTART_RESTARTBLOCK 516 /* restart by calling
+ sys_restart_syscall */
+#define LUSTRE_EBADHANDLE 521 /* Illegal NFS file handle */
+#define LUSTRE_ENOTSYNC 522 /* Update synchronization mismatch */
+#define LUSTRE_EBADCOOKIE 523 /* Cookie is stale */
+#define LUSTRE_ENOTSUPP 524 /* Operation is not supported */
+#define LUSTRE_ETOOSMALL 525 /* Buffer or request is too small */
+#define LUSTRE_ESERVERFAULT 526 /* An untranslatable error occurred */
+#define LUSTRE_EBADTYPE 527 /* Type not supported by server */
+#define LUSTRE_EJUKEBOX 528 /* Request initiated, but will not
+ complete before timeout */
+#define LUSTRE_EIOCBQUEUED 529 /* iocb queued, will get completion
+ event */
+#define LUSTRE_EIOCBRETRY 530 /* iocb queued, will trigger a retry */
+
+/*
+ * Translations are optimized away on x86. Host errnos that shouldn't be put
+ * on wire could leak through as a result. Do not count on this side effect.
+ */
+#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
+unsigned int lustre_errno_hton(unsigned int h);
+unsigned int lustre_errno_ntoh(unsigned int n);
+#else
+#define lustre_errno_hton(h) (h)
+#define lustre_errno_ntoh(n) (n)
+#endif
+
+#endif /* LUSTRE_ERRNO_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
new file mode 100644
index 0000000..984235c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -0,0 +1,3723 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre/lustre_idl.h
+ *
+ * Lustre wire protocol definitions.
+ */
+
+/** \defgroup lustreidl lustreidl
+ *
+ * Lustre wire protocol definitions.
+ *
+ * ALL structs passing over the wire should be declared here. Structs
+ * that are used in interfaces with userspace should go in lustre_user.h.
+ *
+ * All structs being declared here should be built from simple fixed-size
+ * types (__u8, __u16, __u32, __u64) or be built from other types or
+ * structs also declared in this file. Similarly, all flags and magic
+ * values in those structs should also be declared here. This ensures
+ * that the Lustre wire protocol is not influenced by external dependencies.
+ *
+ * The only other acceptable items in this file are VERY SIMPLE accessor
+ * functions to avoid callers grubbing inside the structures, and the
+ * prototypes of the swabber functions for each struct. Nothing that
+ * depends on external functions or definitions should be in here.
+ *
+ * Structs must be properly aligned to put 64-bit values on an 8-byte
+ * boundary. Any structs being added here must also be added to
+ * utils/wirecheck.c and "make newwiretest" run to regenerate the
+ * utils/wiretest.c sources. This allows us to verify that wire structs
+ * have the proper alignment/size on all architectures.
+ *
+ * DO NOT CHANGE any of the structs, flags, values declared here and used
+ * in released Lustre versions. Some structs may have padding fields that
+ * can be used. Some structs might allow addition at the end (verify this
+ * in the code to ensure that new/old clients that see this larger struct
+ * do not fail, otherwise you need to implement protocol compatibility).
+ *
+ * We assume all nodes are either little-endian or big-endian, and we
+ * always send messages in the sender's native format. The receiver
+ * detects the message format by checking the 'magic' field of the message
+ * (see lustre_msg_swabbed() below).
+ *
+ * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
+ * implemented either here, inline (trivial implementations) or in
+ * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
+ * endian, in-place in the message buffer.
+ *
+ * A swabber takes a single pointer argument. The caller must already have
+ * verified that the length of the message buffer >= sizeof (type).
+ *
+ * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
+ * may be defined that swabs just the variable part, after the caller has
+ * verified that the message buffer is large enough.
+ *
+ * @{
+ */
+
+#ifndef _LUSTRE_IDL_H_
+#define _LUSTRE_IDL_H_
+
+#if !defined(LASSERT) && !defined(LPU64)
+#include <linux/libcfs/libcfs.h> /* for LASSERT, LPUX64, etc */
+#endif
+
+/* Defn's shared with user-space. */
+#include <lustre/lustre_user.h>
+
+#include <lustre/lustre_errno.h>
+
+/*
+ * GENERAL STUFF
+ */
+/* FOO_REQUEST_PORTAL is for incoming requests on the FOO
+ * FOO_REPLY_PORTAL is for incoming replies on the FOO
+ * FOO_BULK_PORTAL is for incoming bulk on the FOO
+ */
+
+#define CONNMGR_REQUEST_PORTAL 1
+#define CONNMGR_REPLY_PORTAL 2
+//#define OSC_REQUEST_PORTAL 3
+#define OSC_REPLY_PORTAL 4
+//#define OSC_BULK_PORTAL 5
+#define OST_IO_PORTAL 6
+#define OST_CREATE_PORTAL 7
+#define OST_BULK_PORTAL 8
+//#define MDC_REQUEST_PORTAL 9
+#define MDC_REPLY_PORTAL 10
+//#define MDC_BULK_PORTAL 11
+#define MDS_REQUEST_PORTAL 12
+//#define MDS_REPLY_PORTAL 13
+#define MDS_BULK_PORTAL 14
+#define LDLM_CB_REQUEST_PORTAL 15
+#define LDLM_CB_REPLY_PORTAL 16
+#define LDLM_CANCEL_REQUEST_PORTAL 17
+#define LDLM_CANCEL_REPLY_PORTAL 18
+//#define PTLBD_REQUEST_PORTAL 19
+//#define PTLBD_REPLY_PORTAL 20
+//#define PTLBD_BULK_PORTAL 21
+#define MDS_SETATTR_PORTAL 22
+#define MDS_READPAGE_PORTAL 23
+#define MDS_MDS_PORTAL 24
+
+#define MGC_REPLY_PORTAL 25
+#define MGS_REQUEST_PORTAL 26
+#define MGS_REPLY_PORTAL 27
+#define OST_REQUEST_PORTAL 28
+#define FLD_REQUEST_PORTAL 29
+#define SEQ_METADATA_PORTAL 30
+#define SEQ_DATA_PORTAL 31
+#define SEQ_CONTROLLER_PORTAL 32
+#define MGS_BULK_PORTAL 33
+
+/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
+
+/* packet types */
+#define PTL_RPC_MSG_REQUEST 4711
+#define PTL_RPC_MSG_ERR 4712
+#define PTL_RPC_MSG_REPLY 4713
+
+/* DON'T use swabbed values of MAGIC as magic! */
+#define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
+#define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
+
+#define LUSTRE_MSG_MAGIC_V1_SWABBED 0xD00BD00B
+#define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
+
+#define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
+
+#define PTLRPC_MSG_VERSION 0x00000003
+#define LUSTRE_VERSION_MASK 0xffff0000
+#define LUSTRE_OBD_VERSION 0x00010000
+#define LUSTRE_MDS_VERSION 0x00020000
+#define LUSTRE_OST_VERSION 0x00030000
+#define LUSTRE_DLM_VERSION 0x00040000
+#define LUSTRE_LOG_VERSION 0x00050000
+#define LUSTRE_MGS_VERSION 0x00060000
+
+typedef __u32 mdsno_t;
+typedef __u64 seqno_t;
+typedef __u64 obd_id;
+typedef __u64 obd_seq;
+typedef __s64 obd_time;
+typedef __u64 obd_size;
+typedef __u64 obd_off;
+typedef __u64 obd_blocks;
+typedef __u64 obd_valid;
+typedef __u32 obd_blksize;
+typedef __u32 obd_mode;
+typedef __u32 obd_uid;
+typedef __u32 obd_gid;
+typedef __u32 obd_flag;
+typedef __u32 obd_count;
+
+/**
+ * Describes a range of sequence, lsr_start is included but lsr_end is
+ * not in the range.
+ * Same structure is used in fld module where lsr_index field holds mdt id
+ * of the home mdt.
+ */
+struct lu_seq_range {
+ __u64 lsr_start;
+ __u64 lsr_end;
+ __u32 lsr_index;
+ __u32 lsr_flags;
+};
+
+#define LU_SEQ_RANGE_MDT 0x0
+#define LU_SEQ_RANGE_OST 0x1
+#define LU_SEQ_RANGE_ANY 0x3
+
+#define LU_SEQ_RANGE_MASK 0x3
+
+static inline unsigned fld_range_type(const struct lu_seq_range *range)
+{
+ return range->lsr_flags & LU_SEQ_RANGE_MASK;
+}
+
+static inline int fld_range_is_ost(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_OST;
+}
+
+static inline int fld_range_is_mdt(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_MDT;
+}
+
+/**
+ * This all range is only being used when fld client sends fld query request,
+ * but it does not know whether the seq is MDT or OST, so it will send req
+ * with ALL type, which means either seq type gotten from lookup can be
+ * expected.
+ */
+static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_ANY;
+}
+
+static inline void fld_range_set_type(struct lu_seq_range *range,
+ unsigned flags)
+{
+ LASSERT(!(flags & ~LU_SEQ_RANGE_MASK));
+ range->lsr_flags |= flags;
+}
+
+static inline void fld_range_set_mdt(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_MDT);
+}
+
+static inline void fld_range_set_ost(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_OST);
+}
+
+static inline void fld_range_set_any(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_ANY);
+}
+
+/**
+ * returns width of given range \a r
+ */
+
+static inline __u64 range_space(const struct lu_seq_range *range)
+{
+ return range->lsr_end - range->lsr_start;
+}
+
+/**
+ * initialize range to zero
+ */
+
+static inline void range_init(struct lu_seq_range *range)
+{
+ range->lsr_start = range->lsr_end = range->lsr_index = 0;
+}
+
+/**
+ * check if given seq id \a s is within given range \a r
+ */
+
+static inline int range_within(const struct lu_seq_range *range,
+ __u64 s)
+{
+ return s >= range->lsr_start && s < range->lsr_end;
+}
+
+static inline int range_is_sane(const struct lu_seq_range *range)
+{
+ return (range->lsr_end >= range->lsr_start);
+}
+
+static inline int range_is_zero(const struct lu_seq_range *range)
+{
+ return (range->lsr_start == 0 && range->lsr_end == 0);
+}
+
+static inline int range_is_exhausted(const struct lu_seq_range *range)
+
+{
+ return range_space(range) == 0;
+}
+
+/* return 0 if two range have the same location */
+static inline int range_compare_loc(const struct lu_seq_range *r1,
+ const struct lu_seq_range *r2)
+{
+ return r1->lsr_index != r2->lsr_index ||
+ r1->lsr_flags != r2->lsr_flags;
+}
+
+#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%s"
+
+#define PRANGE(range) \
+ (range)->lsr_start, \
+ (range)->lsr_end, \
+ (range)->lsr_index, \
+ fld_range_is_mdt(range) ? "mdt" : "ost"
+
+
+/** \defgroup lu_fid lu_fid
+ * @{ */
+
+/**
+ * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
+ * Deprecated since HSM and SOM attributes are now stored in separate on-disk
+ * xattr.
+ */
+enum lma_compat {
+ LMAC_HSM = 0x00000001,
+ LMAC_SOM = 0x00000002,
+};
+
+/**
+ * Masks for all features that should be supported by a Lustre version to
+ * access a specific file.
+ * This information is stored in lustre_mdt_attrs::lma_incompat.
+ */
+enum lma_incompat {
+ LMAI_RELEASED = 0x0000001, /* file is released */
+ LMAI_AGENT = 0x00000002, /* agent inode */
+ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
+ is on the remote MDT */
+};
+#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
+
+extern void lustre_lma_swab(struct lustre_mdt_attrs *lma);
+extern void lustre_lma_init(struct lustre_mdt_attrs *lma,
+ const struct lu_fid *fid, __u32 incompat);
+/**
+ * SOM on-disk attributes stored in a separate xattr.
+ */
+struct som_attrs {
+ /** Bitfield for supported data in this structure. For future use. */
+ __u32 som_compat;
+
+ /** Incompat feature list. The supported feature mask is availabe in
+ * SOM_INCOMPAT_SUPP */
+ __u32 som_incompat;
+
+ /** IO Epoch SOM attributes belongs to */
+ __u64 som_ioepoch;
+ /** total file size in objects */
+ __u64 som_size;
+ /** total fs blocks in objects */
+ __u64 som_blocks;
+ /** mds mount id the size is valid for */
+ __u64 som_mountid;
+};
+extern void lustre_som_swab(struct som_attrs *attrs);
+
+#define SOM_INCOMPAT_SUPP 0x0
+
+/**
+ * HSM on-disk attributes stored in a separate xattr.
+ */
+struct hsm_attrs {
+ /** Bitfield for supported data in this structure. For future use. */
+ __u32 hsm_compat;
+
+ /** HSM flags, see hsm_flags enum below */
+ __u32 hsm_flags;
+ /** backend archive id associated with the file */
+ __u64 hsm_arch_id;
+ /** version associated with the last archiving, if any */
+ __u64 hsm_arch_ver;
+};
+extern void lustre_hsm_swab(struct hsm_attrs *attrs);
+
+/**
+ * fid constants
+ */
+enum {
+ /** LASTID file has zero OID */
+ LUSTRE_FID_LASTID_OID = 0UL,
+ /** initial fid id value */
+ LUSTRE_FID_INIT_OID = 1UL
+};
+
+/** returns fid object sequence */
+static inline __u64 fid_seq(const struct lu_fid *fid)
+{
+ return fid->f_seq;
+}
+
+/** returns fid object id */
+static inline __u32 fid_oid(const struct lu_fid *fid)
+{
+ return fid->f_oid;
+}
+
+/** returns fid object version */
+static inline __u32 fid_ver(const struct lu_fid *fid)
+{
+ return fid->f_ver;
+}
+
+static inline void fid_zero(struct lu_fid *fid)
+{
+ memset(fid, 0, sizeof(*fid));
+}
+
+static inline obd_id fid_ver_oid(const struct lu_fid *fid)
+{
+ return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
+}
+
+/**
+ * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
+ * inodes in the IGIF namespace, so these reserved SEQ numbers can be
+ * used for other purposes and not risk collisions with existing inodes.
+ *
+ * Different FID Format
+ * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
+ */
+enum fid_seq {
+ FID_SEQ_OST_MDT0 = 0,
+ FID_SEQ_LLOG = 1, /* unnamed llogs */
+ FID_SEQ_ECHO = 2,
+ FID_SEQ_OST_MDT1 = 3,
+ FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
+ FID_SEQ_LLOG_NAME = 10, /* named llogs */
+ FID_SEQ_RSVD = 11,
+ FID_SEQ_IGIF = 12,
+ FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
+ FID_SEQ_IDIF = 0x100000000ULL,
+ FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
+ /* Normal FID sequence starts from this value, i.e. 1<<33 */
+ FID_SEQ_START = 0x200000000ULL,
+ /* sequence for local pre-defined FIDs listed in local_oid */
+ FID_SEQ_LOCAL_FILE = 0x200000001ULL,
+ FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
+ /* sequence is used for local named objects FIDs generated
+ * by local_object_storage library */
+ FID_SEQ_LOCAL_NAME = 0x200000003ULL,
+ /* Because current FLD will only cache the fid sequence, instead
+ * of oid on the client side, if the FID needs to be exposed to
+ * clients sides, it needs to make sure all of fids under one
+ * sequence will be located in one MDT. */
+ FID_SEQ_SPECIAL = 0x200000004ULL,
+ FID_SEQ_QUOTA = 0x200000005ULL,
+ FID_SEQ_QUOTA_GLB = 0x200000006ULL,
+ FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
+ FID_SEQ_NORMAL = 0x200000400ULL,
+ FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
+};
+
+#define OBIF_OID_MAX_BITS 32
+#define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
+#define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
+#define IDIF_OID_MAX_BITS 48
+#define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
+#define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
+
+/** OID for FID_SEQ_SPECIAL */
+enum special_oid {
+ /* Big Filesystem Lock to serialize rename operations */
+ FID_OID_SPECIAL_BFL = 1UL,
+};
+
+/** OID for FID_SEQ_DOT_LUSTRE */
+enum dot_lustre_oid {
+ FID_OID_DOT_LUSTRE = 1UL,
+ FID_OID_DOT_LUSTRE_OBF = 2UL,
+};
+
+static inline int fid_seq_is_mdt0(obd_seq seq)
+{
+ return (seq == FID_SEQ_OST_MDT0);
+}
+
+static inline int fid_seq_is_mdt(const __u64 seq)
+{
+ return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
+};
+
+static inline int fid_seq_is_echo(obd_seq seq)
+{
+ return (seq == FID_SEQ_ECHO);
+}
+
+static inline int fid_is_echo(const struct lu_fid *fid)
+{
+ return fid_seq_is_echo(fid_seq(fid));
+}
+
+static inline int fid_seq_is_llog(obd_seq seq)
+{
+ return (seq == FID_SEQ_LLOG);
+}
+
+static inline int fid_is_llog(const struct lu_fid *fid)
+{
+ /* file with OID == 0 is not llog but contains last oid */
+ return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
+}
+
+static inline int fid_seq_is_rsvd(const __u64 seq)
+{
+ return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
+};
+
+static inline int fid_seq_is_special(const __u64 seq)
+{
+ return seq == FID_SEQ_SPECIAL;
+};
+
+static inline int fid_seq_is_local_file(const __u64 seq)
+{
+ return seq == FID_SEQ_LOCAL_FILE ||
+ seq == FID_SEQ_LOCAL_NAME;
+};
+
+static inline int fid_seq_is_root(const __u64 seq)
+{
+ return seq == FID_SEQ_ROOT;
+}
+
+static inline int fid_seq_is_dot(const __u64 seq)
+{
+ return seq == FID_SEQ_DOT_LUSTRE;
+}
+
+static inline int fid_seq_is_default(const __u64 seq)
+{
+ return seq == FID_SEQ_LOV_DEFAULT;
+}
+
+static inline int fid_is_mdt0(const struct lu_fid *fid)
+{
+ return fid_seq_is_mdt0(fid_seq(fid));
+}
+
+static inline void lu_root_fid(struct lu_fid *fid)
+{
+ fid->f_seq = FID_SEQ_ROOT;
+ fid->f_oid = 1;
+ fid->f_ver = 0;
+}
+
+/**
+ * Check if a fid is igif or not.
+ * \param fid the fid to be tested.
+ * \return true if the fid is a igif; otherwise false.
+ */
+static inline int fid_seq_is_igif(const __u64 seq)
+{
+ return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
+}
+
+static inline int fid_is_igif(const struct lu_fid *fid)
+{
+ return fid_seq_is_igif(fid_seq(fid));
+}
+
+/**
+ * Check if a fid is idif or not.
+ * \param fid the fid to be tested.
+ * \return true if the fid is a idif; otherwise false.
+ */
+static inline int fid_seq_is_idif(const __u64 seq)
+{
+ return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
+}
+
+static inline int fid_is_idif(const struct lu_fid *fid)
+{
+ return fid_seq_is_idif(fid_seq(fid));
+}
+
+static inline int fid_is_local_file(const struct lu_fid *fid)
+{
+ return fid_seq_is_local_file(fid_seq(fid));
+}
+
+static inline int fid_seq_is_norm(const __u64 seq)
+{
+ return (seq >= FID_SEQ_NORMAL);
+}
+
+static inline int fid_is_norm(const struct lu_fid *fid)
+{
+ return fid_seq_is_norm(fid_seq(fid));
+}
+
+/* convert an OST objid into an IDIF FID SEQ number */
+static inline obd_seq fid_idif_seq(obd_id id, __u32 ost_idx)
+{
+ return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
+}
+
+/* convert a packed IDIF FID into an OST objid */
+static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver)
+{
+ return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
+}
+
+/* extract ost index from IDIF FID */
+static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
+{
+ LASSERT(fid_is_idif(fid));
+ return (fid_seq(fid) >> 16) & 0xffff;
+}
+
+/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
+static inline obd_seq ostid_seq(const struct ost_id *ostid)
+{
+ if (fid_seq_is_mdt0(ostid->oi.oi_seq))
+ return FID_SEQ_OST_MDT0;
+
+ if (fid_seq_is_default(ostid->oi.oi_seq))
+ return FID_SEQ_LOV_DEFAULT;
+
+ if (fid_is_idif(&ostid->oi_fid))
+ return FID_SEQ_OST_MDT0;
+
+ return fid_seq(&ostid->oi_fid);
+}
+
+/* extract OST objid from a wire ost_id (id/seq) pair */
+static inline obd_id ostid_id(const struct ost_id *ostid)
+{
+ if (fid_seq_is_mdt0(ostid_seq(ostid)))
+ return ostid->oi.oi_id & IDIF_OID_MASK;
+
+ if (fid_is_idif(&ostid->oi_fid))
+ return fid_idif_id(fid_seq(&ostid->oi_fid),
+ fid_oid(&ostid->oi_fid), 0);
+
+ return fid_oid(&ostid->oi_fid);
+}
+
+static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
+{
+ if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
+ oi->oi.oi_seq = seq;
+ } else {
+ oi->oi_fid.f_seq = seq;
+ /* Note: if f_oid + f_ver is zero, we need init it
+ * to be 1, otherwise, ostid_seq will treat this
+ * as old ostid (oi_seq == 0) */
+ if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
+ oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
+ }
+}
+
+static inline void ostid_set_seq_mdt0(struct ost_id *oi)
+{
+ ostid_set_seq(oi, FID_SEQ_OST_MDT0);
+}
+
+static inline void ostid_set_seq_echo(struct ost_id *oi)
+{
+ ostid_set_seq(oi, FID_SEQ_ECHO);
+}
+
+static inline void ostid_set_seq_llog(struct ost_id *oi)
+{
+ ostid_set_seq(oi, FID_SEQ_LLOG);
+}
+
+/**
+ * Note: we need check oi_seq to decide where to set oi_id,
+ * so oi_seq should always be set ahead of oi_id.
+ */
+static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
+{
+ if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Bad "LPU64" to set "DOSTID"\n",
+ oid, POSTID(oi));
+ return;
+ }
+ oi->oi.oi_id = oid;
+ } else {
+ if (oid > OBIF_MAX_OID) {
+ CERROR("Bad "LPU64" to set "DOSTID"\n",
+ oid, POSTID(oi));
+ return;
+ }
+ oi->oi_fid.f_oid = oid;
+ }
+}
+
+static inline void ostid_inc_id(struct ost_id *oi)
+{
+ if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
+ CERROR("Bad inc "DOSTID"\n", POSTID(oi));
+ return;
+ }
+ oi->oi.oi_id++;
+ } else {
+ oi->oi_fid.f_oid++;
+ }
+}
+
+static inline void ostid_dec_id(struct ost_id *oi)
+{
+ if (fid_seq_is_mdt0(ostid_seq(oi)))
+ oi->oi.oi_id--;
+ else
+ oi->oi_fid.f_oid--;
+}
+
+/**
+ * Unpack an OST object id/seq (group) into a FID. This is needed for
+ * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
+ * FIDs. Note that if an id/seq is already in FID/IDIF format it will
+ * be passed through unchanged. Only legacy OST objects in "group 0"
+ * will be mapped into the IDIF namespace so that they can fit into the
+ * struct lu_fid fields without loss. For reference see:
+ * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
+ */
+static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
+ __u32 ost_idx)
+{
+ if (ost_idx > 0xffff) {
+ CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
+ ost_idx);
+ return -EBADF;
+ }
+
+ if (fid_seq_is_mdt0(ostid_seq(ostid))) {
+ /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
+ * that we map into the IDIF namespace. It allows up to 2^48
+ * objects per OST, as this is the object namespace that has
+ * been in production for years. This can handle create rates
+ * of 1M objects/s/OST for 9 years, or combinations thereof. */
+ if (ostid_id(ostid) >= IDIF_MAX_OID) {
+ CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
+ POSTID(ostid), ost_idx);
+ return -EBADF;
+ }
+ fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
+ /* truncate to 32 bits by assignment */
+ fid->f_oid = ostid_id(ostid);
+ /* in theory, not currently used */
+ fid->f_ver = ostid_id(ostid) >> 48;
+ } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
+ /* This is either an IDIF object, which identifies objects across
+ * all OSTs, or a regular FID. The IDIF namespace maps legacy
+ * OST objects into the FID namespace. In both cases, we just
+ * pass the FID through, no conversion needed. */
+ if (ostid->oi_fid.f_ver != 0) {
+ CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
+ POSTID(ostid), ost_idx);
+ return -EBADF;
+ }
+ *fid = ostid->oi_fid;
+ }
+
+ return 0;
+}
+
+/* pack any OST FID into an ostid (id/seq) for the wire/disk */
+static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
+{
+ if (unlikely(fid_seq_is_igif(fid->f_seq))) {
+ CERROR("bad IGIF, "DFID"\n", PFID(fid));
+ return -EBADF;
+ }
+
+ if (fid_is_idif(fid)) {
+ ostid_set_seq_mdt0(ostid);
+ ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
+ fid_ver(fid)));
+ } else {
+ ostid->oi_fid = *fid;
+ }
+
+ return 0;
+}
+
+/* Check whether the fid is for LAST_ID */
+static inline int fid_is_last_id(const struct lu_fid *fid)
+{
+ return (fid_oid(fid) == 0);
+}
+
+/**
+ * Get inode number from a igif.
+ * \param fid a igif to get inode number from.
+ * \return inode number for the igif.
+ */
+static inline ino_t lu_igif_ino(const struct lu_fid *fid)
+{
+ return fid_seq(fid);
+}
+
+extern void lustre_swab_ost_id(struct ost_id *oid);
+
+/**
+ * Get inode generation from a igif.
+ * \param fid a igif to get inode generation from.
+ * \return inode generation for the igif.
+ */
+static inline __u32 lu_igif_gen(const struct lu_fid *fid)
+{
+ return fid_oid(fid);
+}
+
+/**
+ * Build igif from the inode number/generation.
+ */
+static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
+{
+ fid->f_seq = ino;
+ fid->f_oid = gen;
+ fid->f_ver = 0;
+}
+
+/*
+ * Fids are transmitted across network (in the sender byte-ordering),
+ * and stored on disk in big-endian order.
+ */
+static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
+{
+ /* check that all fields are converted */
+ CLASSERT(sizeof *src ==
+ sizeof fid_seq(src) +
+ sizeof fid_oid(src) + sizeof fid_ver(src));
+ dst->f_seq = cpu_to_le64(fid_seq(src));
+ dst->f_oid = cpu_to_le32(fid_oid(src));
+ dst->f_ver = cpu_to_le32(fid_ver(src));
+}
+
+static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
+{
+ /* check that all fields are converted */
+ CLASSERT(sizeof *src ==
+ sizeof fid_seq(src) +
+ sizeof fid_oid(src) + sizeof fid_ver(src));
+ dst->f_seq = le64_to_cpu(fid_seq(src));
+ dst->f_oid = le32_to_cpu(fid_oid(src));
+ dst->f_ver = le32_to_cpu(fid_ver(src));
+}
+
+static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
+{
+ /* check that all fields are converted */
+ CLASSERT(sizeof *src ==
+ sizeof fid_seq(src) +
+ sizeof fid_oid(src) + sizeof fid_ver(src));
+ dst->f_seq = cpu_to_be64(fid_seq(src));
+ dst->f_oid = cpu_to_be32(fid_oid(src));
+ dst->f_ver = cpu_to_be32(fid_ver(src));
+}
+
+static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
+{
+ /* check that all fields are converted */
+ CLASSERT(sizeof *src ==
+ sizeof fid_seq(src) +
+ sizeof fid_oid(src) + sizeof fid_ver(src));
+ dst->f_seq = be64_to_cpu(fid_seq(src));
+ dst->f_oid = be32_to_cpu(fid_oid(src));
+ dst->f_ver = be32_to_cpu(fid_ver(src));
+}
+
+static inline int fid_is_sane(const struct lu_fid *fid)
+{
+ return fid != NULL &&
+ ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
+ fid_is_igif(fid) || fid_is_idif(fid) ||
+ fid_seq_is_rsvd(fid_seq(fid)));
+}
+
+static inline int fid_is_zero(const struct lu_fid *fid)
+{
+ return fid_seq(fid) == 0 && fid_oid(fid) == 0;
+}
+
+extern void lustre_swab_lu_fid(struct lu_fid *fid);
+extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+
+static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
+{
+ /* Check that there is no alignment padding. */
+ CLASSERT(sizeof *f0 ==
+ sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver);
+ return memcmp(f0, f1, sizeof *f0) == 0;
+}
+
+#define __diff_normalize(val0, val1) \
+({ \
+ typeof(val0) __val0 = (val0); \
+ typeof(val1) __val1 = (val1); \
+ \
+ (__val0 == __val1 ? 0 : __val0 > __val1 ? +1 : -1); \
+})
+
+static inline int lu_fid_cmp(const struct lu_fid *f0,
+ const struct lu_fid *f1)
+{
+ return
+ __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
+ __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
+ __diff_normalize(fid_ver(f0), fid_ver(f1));
+}
+
+static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
+ struct ost_id *dst_oi)
+{
+ if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
+ dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
+ dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
+ } else {
+ fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
+ }
+}
+
+static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
+ struct ost_id *dst_oi)
+{
+ if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
+ dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
+ dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
+ } else {
+ fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
+ }
+}
+
+/** @} lu_fid */
+
+/** \defgroup lu_dir lu_dir
+ * @{ */
+
+/**
+ * Enumeration of possible directory entry attributes.
+ *
+ * Attributes follow directory entry header in the order they appear in this
+ * enumeration.
+ */
+enum lu_dirent_attrs {
+ LUDA_FID = 0x0001,
+ LUDA_TYPE = 0x0002,
+ LUDA_64BITHASH = 0x0004,
+
+ /* The following attrs are used for MDT interanl only,
+ * not visible to client */
+
+ /* Verify the dirent consistency */
+ LUDA_VERIFY = 0x8000,
+ /* Only check but not repair the dirent inconsistency */
+ LUDA_VERIFY_DRYRUN = 0x4000,
+ /* The dirent has been repaired, or to be repaired (dryrun). */
+ LUDA_REPAIR = 0x2000,
+ /* The system is upgraded, has beed or to be repaired (dryrun). */
+ LUDA_UPGRADE = 0x1000,
+ /* Ignore this record, go to next directly. */
+ LUDA_IGNORE = 0x0800,
+};
+
+#define LU_DIRENT_ATTRS_MASK 0xf800
+
+/**
+ * Layout of readdir pages, as transmitted on wire.
+ */
+struct lu_dirent {
+ /** valid if LUDA_FID is set. */
+ struct lu_fid lde_fid;
+ /** a unique entry identifier: a hash or an offset. */
+ __u64 lde_hash;
+ /** total record length, including all attributes. */
+ __u16 lde_reclen;
+ /** name length */
+ __u16 lde_namelen;
+ /** optional variable size attributes following this entry.
+ * taken from enum lu_dirent_attrs.
+ */
+ __u32 lde_attrs;
+ /** name is followed by the attributes indicated in ->ldp_attrs, in
+ * their natural order. After the last attribute, padding bytes are
+ * added to make ->lde_reclen a multiple of 8.
+ */
+ char lde_name[0];
+};
+
+/*
+ * Definitions of optional directory entry attributes formats.
+ *
+ * Individual attributes do not have their length encoded in a generic way. It
+ * is assumed that consumer of an attribute knows its format. This means that
+ * it is impossible to skip over an unknown attribute, except by skipping over all
+ * remaining attributes (by using ->lde_reclen), which is not too
+ * constraining, because new server versions will append new attributes at
+ * the end of an entry.
+ */
+
+/**
+ * Fid directory attribute: a fid of an object referenced by the entry. This
+ * will be almost always requested by the client and supplied by the server.
+ *
+ * Aligned to 8 bytes.
+ */
+/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
+
+/**
+ * File type.
+ *
+ * Aligned to 2 bytes.
+ */
+struct luda_type {
+ __u16 lt_type;
+};
+
+struct lu_dirpage {
+ __u64 ldp_hash_start;
+ __u64 ldp_hash_end;
+ __u32 ldp_flags;
+ __u32 ldp_pad0;
+ struct lu_dirent ldp_entries[0];
+};
+
+enum lu_dirpage_flags {
+ /**
+ * dirpage contains no entry.
+ */
+ LDF_EMPTY = 1 << 0,
+ /**
+ * last entry's lde_hash equals ldp_hash_end.
+ */
+ LDF_COLLIDE = 1 << 1
+};
+
+static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
+{
+ if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
+ return NULL;
+ else
+ return dp->ldp_entries;
+}
+
+static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
+{
+ struct lu_dirent *next;
+
+ if (le16_to_cpu(ent->lde_reclen) != 0)
+ next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
+ else
+ next = NULL;
+
+ return next;
+}
+
+static inline int lu_dirent_calc_size(int namelen, __u16 attr)
+{
+ int size;
+
+ if (attr & LUDA_TYPE) {
+ const unsigned align = sizeof(struct luda_type) - 1;
+ size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
+ size += sizeof(struct luda_type);
+ } else
+ size = sizeof(struct lu_dirent) + namelen;
+
+ return (size + 7) & ~7;
+}
+
+static inline int lu_dirent_size(struct lu_dirent *ent)
+{
+ if (le16_to_cpu(ent->lde_reclen) == 0) {
+ return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
+ le32_to_cpu(ent->lde_attrs));
+ }
+ return le16_to_cpu(ent->lde_reclen);
+}
+
+#define MDS_DIR_END_OFF 0xfffffffffffffffeULL
+
+/**
+ * MDS_READPAGE page size
+ *
+ * This is the directory page size packed in MDS_READPAGE RPC.
+ * It's different than PAGE_CACHE_SIZE because the client needs to
+ * access the struct lu_dirpage header packed at the beginning of
+ * the "page" and without this there isn't any way to know find the
+ * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
+ */
+#define LU_PAGE_SHIFT 12
+#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
+#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
+
+#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
+
+/** @} lu_dir */
+
+struct lustre_handle {
+ __u64 cookie;
+};
+#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
+
+static inline int lustre_handle_is_used(struct lustre_handle *lh)
+{
+ return lh->cookie != 0ull;
+}
+
+static inline int lustre_handle_equal(const struct lustre_handle *lh1,
+ const struct lustre_handle *lh2)
+{
+ return lh1->cookie == lh2->cookie;
+}
+
+static inline void lustre_handle_copy(struct lustre_handle *tgt,
+ struct lustre_handle *src)
+{
+ tgt->cookie = src->cookie;
+}
+
+/* flags for lm_flags */
+#define MSGHDR_AT_SUPPORT 0x1
+#define MSGHDR_CKSUM_INCOMPAT18 0x2
+
+#define lustre_msg lustre_msg_v2
+/* we depend on this structure to be 8-byte aligned */
+/* this type is only endian-adjusted in lustre_unpack_msg() */
+struct lustre_msg_v2 {
+ __u32 lm_bufcount;
+ __u32 lm_secflvr;
+ __u32 lm_magic;
+ __u32 lm_repsize;
+ __u32 lm_cksum;
+ __u32 lm_flags;
+ __u32 lm_padding_2;
+ __u32 lm_padding_3;
+ __u32 lm_buflens[0];
+};
+
+/* without gss, ptlrpc_body is put at the first buffer. */
+#define PTLRPC_NUM_VERSIONS 4
+#define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */
+struct ptlrpc_body_v3 {
+ struct lustre_handle pb_handle;
+ __u32 pb_type;
+ __u32 pb_version;
+ __u32 pb_opc;
+ __u32 pb_status;
+ __u64 pb_last_xid;
+ __u64 pb_last_seen;
+ __u64 pb_last_committed;
+ __u64 pb_transno;
+ __u32 pb_flags;
+ __u32 pb_op_flags;
+ __u32 pb_conn_cnt;
+ __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
+ __u32 pb_service_time; /* for rep, actual service time */
+ __u32 pb_limit;
+ __u64 pb_slv;
+ /* VBR: pre-versions */
+ __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+ /* padding for future needs */
+ __u64 pb_padding[4];
+ char pb_jobid[JOBSTATS_JOBID_SIZE];
+};
+#define ptlrpc_body ptlrpc_body_v3
+
+struct ptlrpc_body_v2 {
+ struct lustre_handle pb_handle;
+ __u32 pb_type;
+ __u32 pb_version;
+ __u32 pb_opc;
+ __u32 pb_status;
+ __u64 pb_last_xid;
+ __u64 pb_last_seen;
+ __u64 pb_last_committed;
+ __u64 pb_transno;
+ __u32 pb_flags;
+ __u32 pb_op_flags;
+ __u32 pb_conn_cnt;
+ __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
+ __u32 pb_service_time; /* for rep, actual service time, also used for
+ net_latency of req */
+ __u32 pb_limit;
+ __u64 pb_slv;
+ /* VBR: pre-versions */
+ __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+ /* padding for future needs */
+ __u64 pb_padding[4];
+};
+
+extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
+
+/* message body offset for lustre_msg_v2 */
+/* ptlrpc body offset in all request/reply messages */
+#define MSG_PTLRPC_BODY_OFF 0
+
+/* normal request/reply message record offset */
+#define REQ_REC_OFF 1
+#define REPLY_REC_OFF 1
+
+/* ldlm request message body offset */
+#define DLM_LOCKREQ_OFF 1 /* lockreq offset */
+#define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
+
+/* ldlm intent lock message body offset */
+#define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
+#define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
+
+/* ldlm reply message body offset */
+#define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
+#define DLM_REPLY_REC_OFF 2 /* reply record offset */
+
+/** only use in req->rq_{req,rep}_swab_mask */
+#define MSG_PTLRPC_HEADER_OFF 31
+
+/* Flags that are operation-specific go in the top 16 bits. */
+#define MSG_OP_FLAG_MASK 0xffff0000
+#define MSG_OP_FLAG_SHIFT 16
+
+/* Flags that apply to all requests are in the bottom 16 bits */
+#define MSG_GEN_FLAG_MASK 0x0000ffff
+#define MSG_LAST_REPLAY 0x0001
+#define MSG_RESENT 0x0002
+#define MSG_REPLAY 0x0004
+/* #define MSG_AT_SUPPORT 0x0008
+ * This was used in early prototypes of adaptive timeouts, and while there
+ * shouldn't be any users of that code there also isn't a need for using this
+ * bits. Defer usage until at least 1.10 to avoid potential conflict. */
+#define MSG_DELAY_REPLAY 0x0010
+#define MSG_VERSION_REPLAY 0x0020
+#define MSG_REQ_REPLAY_DONE 0x0040
+#define MSG_LOCK_REPLAY_DONE 0x0080
+
+/*
+ * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
+ */
+
+#define MSG_CONNECT_RECOVERING 0x00000001
+#define MSG_CONNECT_RECONNECT 0x00000002
+#define MSG_CONNECT_REPLAYABLE 0x00000004
+//#define MSG_CONNECT_PEER 0x8
+#define MSG_CONNECT_LIBCLIENT 0x00000010
+#define MSG_CONNECT_INITIAL 0x00000020
+#define MSG_CONNECT_ASYNC 0x00000040
+#define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
+#define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
+
+/* Connect flags */
+#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
+#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
+#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
+#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
+#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
+#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
+#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
+#define OBD_CONNECT_ACL 0x80ULL /*access control lists */
+#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
+#define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
+#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
+#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
+#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
+#define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
+ *We do not support JOIN FILE
+ *anymore, reserve this flags
+ *just for preventing such bit
+ *to be reused.*/
+#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
+#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
+#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
+#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
+#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
+#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
+#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
+#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
+#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
+#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
+#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
+#define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
+#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
+#define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
+#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
+#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
+#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
+#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
+#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
+#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
+#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
+#define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
+#define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
+#define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
+#define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
+ * directory hash */
+#define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
+#define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
+#define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
+#define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
+#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
+ * RPC error properly */
+#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
+ * finer space reservation */
+#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
+ * policy and 2.x server */
+#define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
+#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
+#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
+#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
+#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
+/* XXX README XXX:
+ * Please DO NOT add flag values here before first ensuring that this same
+ * flag value is not in use on some other branch. Please clear any such
+ * changes with senior engineers before starting to use a new flag. Then,
+ * submit a small patch against EVERY branch that ONLY adds the new flag,
+ * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
+ * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
+ * can be approved and landed easily to reserve the flag for future use. */
+
+/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
+ * connection. It is a temporary bug fix for Imperative Recovery interop
+ * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
+ * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */
+#define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
+
+#define OCD_HAS_FLAG(ocd, flg) \
+ (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
+
+
+#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
+
+#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
+ OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
+ OBD_CONNECT_IBITS | \
+ OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
+ OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
+ OBD_CONNECT_RMT_CLIENT | \
+ OBD_CONNECT_RMT_CLIENT_FORCE | \
+ OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
+ OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
+ OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
+ OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
+ OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
+ OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
+ OBD_CONNECT_EINPROGRESS | \
+ OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
+ OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
+ OBD_CONNECT_PINGLESS)
+#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
+ OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
+ OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
+ OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
+ OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
+ LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
+ OBD_CONNECT_RMT_CLIENT | \
+ OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
+ OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
+ OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
+ OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
+ OBD_CONNECT_MAX_EASIZE | \
+ OBD_CONNECT_EINPROGRESS | \
+ OBD_CONNECT_JOBSTATS | \
+ OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
+ OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
+ OBD_CONNECT_PINGLESS)
+#define ECHO_CONNECT_SUPPORTED (0)
+#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
+ OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
+ OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
+
+/* Features required for this version of the client to work with server */
+#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
+ OBD_CONNECT_FULL20)
+
+#define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\
+ ((patch)<<8) + (fix))
+#define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
+#define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
+#define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
+#define OBD_OCD_VERSION_FIX(version) ((int)(version)&255)
+
+/* This structure is used for both request and reply.
+ *
+ * If we eventually have separate connect data for different types, which we
+ * almost certainly will, then perhaps we stick a union in here. */
+struct obd_connect_data_v1 {
+ __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
+ __u32 ocd_version; /* lustre release version number */
+ __u32 ocd_grant; /* initial cache grant amount (bytes) */
+ __u32 ocd_index; /* LOV index to connect to */
+ __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
+ __u64 ocd_ibits_known; /* inode bits this client understands */
+ __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
+ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
+ __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
+ __u32 ocd_unused; /* also fix lustre_swab_connect */
+ __u64 ocd_transno; /* first transno from client to be replayed */
+ __u32 ocd_group; /* MDS group on OST */
+ __u32 ocd_cksum_types; /* supported checksum algorithms */
+ __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
+ __u32 ocd_instance; /* also fix lustre_swab_connect */
+ __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
+};
+
+struct obd_connect_data {
+ __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
+ __u32 ocd_version; /* lustre release version number */
+ __u32 ocd_grant; /* initial cache grant amount (bytes) */
+ __u32 ocd_index; /* LOV index to connect to */
+ __u32 ocd_brw_size; /* Maximum BRW size in bytes */
+ __u64 ocd_ibits_known; /* inode bits this client understands */
+ __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
+ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
+ __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
+ __u32 ocd_unused; /* also fix lustre_swab_connect */
+ __u64 ocd_transno; /* first transno from client to be replayed */
+ __u32 ocd_group; /* MDS group on OST */
+ __u32 ocd_cksum_types; /* supported checksum algorithms */
+ __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
+ __u32 ocd_instance; /* instance # of this target */
+ __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
+ /* Fields after ocd_maxbytes are only accessible by the receiver
+ * if the corresponding flag in ocd_connect_flags is set. Accessing
+ * any field after ocd_maxbytes on the receiver without a valid flag
+ * may result in out-of-bound memory access and kernel oops. */
+ __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
+ __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
+};
+/* XXX README XXX:
+ * Please DO NOT use any fields here before first ensuring that this same
+ * field is not in use on some other branch. Please clear any such changes
+ * with senior engineers before starting to use a new field. Then, submit
+ * a small patch against EVERY branch that ONLY adds the new field along with
+ * the matching OBD_CONNECT flag, so that can be approved and landed easily to
+ * reserve the flag for future use. */
+
+
+extern void lustre_swab_connect(struct obd_connect_data *ocd);
+
+/*
+ * Supported checksum algorithms. Up to 32 checksum types are supported.
+ * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
+ * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
+ * algorithm and also the OBD_FL_CKSUM* flags.
+ */
+typedef enum {
+ OBD_CKSUM_CRC32 = 0x00000001,
+ OBD_CKSUM_ADLER = 0x00000002,
+ OBD_CKSUM_CRC32C= 0x00000004,
+} cksum_type_t;
+
+/*
+ * OST requests: OBDO & OBD request records
+ */
+
+/* opcodes */
+typedef enum {
+ OST_REPLY = 0, /* reply ? */
+ OST_GETATTR = 1,
+ OST_SETATTR = 2,
+ OST_READ = 3,
+ OST_WRITE = 4,
+ OST_CREATE = 5,
+ OST_DESTROY = 6,
+ OST_GET_INFO = 7,
+ OST_CONNECT = 8,
+ OST_DISCONNECT = 9,
+ OST_PUNCH = 10,
+ OST_OPEN = 11,
+ OST_CLOSE = 12,
+ OST_STATFS = 13,
+ OST_SYNC = 16,
+ OST_SET_INFO = 17,
+ OST_QUOTACHECK = 18,
+ OST_QUOTACTL = 19,
+ OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
+ OST_LAST_OPC
+} ost_cmd_t;
+#define OST_FIRST_OPC OST_REPLY
+
+enum obdo_flags {
+ OBD_FL_INLINEDATA = 0x00000001,
+ OBD_FL_OBDMDEXISTS = 0x00000002,
+ OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
+ OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
+ OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
+ OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */
+ OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
+ OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
+ OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
+ OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
+ OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
+ OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
+ OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
+ OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
+ OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
+ OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
+ OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
+ OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
+ * XXX: obsoleted - reserved for old
+ * clients prior than 2.2 */
+ OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
+ OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
+
+ /* Note that while these checksum values are currently separate bits,
+ * in 2.x we can actually allow all values from 1-31 if we wanted. */
+ OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
+ OBD_FL_CKSUM_CRC32C,
+
+ /* mask for local-only flag, which won't be sent over network */
+ OBD_FL_LOCAL_MASK = 0xF0000000,
+};
+
+#define LOV_MAGIC_V1 0x0BD10BD0
+#define LOV_MAGIC LOV_MAGIC_V1
+#define LOV_MAGIC_JOIN_V1 0x0BD20BD0
+#define LOV_MAGIC_V3 0x0BD30BD0
+
+/*
+ * magic for fully defined striping
+ * the idea is that we should have different magics for striping "hints"
+ * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
+ * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
+ * we can't just change it w/o long way preparation, but we still need a
+ * mechanism to allow LOD to differentiate hint versus ready striping.
+ * so, at the moment we do a trick: MDT knows what to expect from request
+ * depending on the case (replay uses ready striping, non-replay req uses
+ * hints), so MDT replaces magic with appropriate one and now LOD can
+ * easily understand what's inside -bzzz
+ */
+#define LOV_MAGIC_V1_DEF 0x0CD10BD0
+#define LOV_MAGIC_V3_DEF 0x0CD30BD0
+
+#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
+#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
+#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
+#define LOV_PATTERN_CMOBD 0x200
+
+#define LOV_PATTERN_F_MASK 0xffff0000
+#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
+
+#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
+#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
+
+#define lov_ost_data lov_ost_data_v1
+struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
+ struct ost_id l_ost_oi; /* OST object ID */
+ __u32 l_ost_gen; /* generation of this l_ost_idx */
+ __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
+};
+
+#define lov_mds_md lov_mds_md_v1
+struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
+ __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
+ __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
+ struct ost_id lmm_oi; /* LOV object ID */
+ __u32 lmm_stripe_size; /* size of stripe in bytes */
+ /* lmm_stripe_count used to be __u32 */
+ __u16 lmm_stripe_count; /* num stripes in use for this object */
+ __u16 lmm_layout_gen; /* layout generation number */
+ struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
+};
+
+/**
+ * Sigh, because pre-2.4 uses
+ * struct lov_mds_md_v1 {
+ * ........
+ * __u64 lmm_object_id;
+ * __u64 lmm_object_seq;
+ * ......
+ * }
+ * to identify the LOV(MDT) object, and lmm_object_seq will
+ * be normal_fid, which make it hard to combine these conversion
+ * to ostid_to FID. so we will do lmm_oi/fid conversion separately
+ *
+ * We can tell the lmm_oi by this way,
+ * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
+ * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
+ * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
+ * lmm_oi.f_ver = 0
+ *
+ * But currently lmm_oi/lsm_oi does not have any "real" usages,
+ * except for printing some information, and the user can always
+ * get the real FID from LMA, besides this multiple case check might
+ * make swab more complicate. So we will keep using id/seq for lmm_oi.
+ */
+
+static inline void fid_to_lmm_oi(const struct lu_fid *fid,
+ struct ost_id *oi)
+{
+ oi->oi.oi_id = fid_oid(fid);
+ oi->oi.oi_seq = fid_seq(fid);
+}
+
+static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
+{
+ oi->oi.oi_seq = seq;
+}
+
+static inline __u64 lmm_oi_id(struct ost_id *oi)
+{
+ return oi->oi.oi_id;
+}
+
+static inline __u64 lmm_oi_seq(struct ost_id *oi)
+{
+ return oi->oi.oi_seq;
+}
+
+static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
+ struct ost_id *src_oi)
+{
+ dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
+ dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
+}
+
+static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
+ struct ost_id *src_oi)
+{
+ dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
+ dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
+}
+
+/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
+
+#define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
+#define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
+
+#define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
+#define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
+#define XATTR_USER_PREFIX "user."
+#define XATTR_TRUSTED_PREFIX "trusted."
+#define XATTR_SECURITY_PREFIX "security."
+#define XATTR_LUSTRE_PREFIX "lustre."
+
+#define XATTR_NAME_LOV "trusted.lov"
+#define XATTR_NAME_LMA "trusted.lma"
+#define XATTR_NAME_LMV "trusted.lmv"
+#define XATTR_NAME_LINK "trusted.link"
+#define XATTR_NAME_FID "trusted.fid"
+#define XATTR_NAME_VERSION "trusted.version"
+#define XATTR_NAME_SOM "trusted.som"
+#define XATTR_NAME_HSM "trusted.hsm"
+#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
+
+struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
+ __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
+ __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
+ struct ost_id lmm_oi; /* LOV object ID */
+ __u32 lmm_stripe_size; /* size of stripe in bytes */
+ /* lmm_stripe_count used to be __u32 */
+ __u16 lmm_stripe_count; /* num stripes in use for this object */
+ __u16 lmm_layout_gen; /* layout generation number */
+ char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
+ struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
+};
+
+static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
+{
+ if (lmm_magic == LOV_MAGIC_V3)
+ return sizeof(struct lov_mds_md_v3) +
+ stripes * sizeof(struct lov_ost_data_v1);
+ else
+ return sizeof(struct lov_mds_md_v1) +
+ stripes * sizeof(struct lov_ost_data_v1);
+}
+
+
+#define OBD_MD_FLID (0x00000001ULL) /* object ID */
+#define OBD_MD_FLATIME (0x00000002ULL) /* access time */
+#define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
+#define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
+#define OBD_MD_FLSIZE (0x00000010ULL) /* size */
+#define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
+#define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
+#define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
+#define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
+#define OBD_MD_FLUID (0x00000200ULL) /* user ID */
+#define OBD_MD_FLGID (0x00000400ULL) /* group ID */
+#define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
+#define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
+#define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
+/*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
+#define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
+#define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
+#define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
+#define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
+#define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
+#define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
+/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
+#define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
+#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
+#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
+#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
+ /* ->mds if epoch opens or closes */
+#define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
+#define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
+#define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
+#define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
+#define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
+
+#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
+#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
+#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
+
+/* OBD_MD_MDTIDX is used to get MDT index, but it is never been used overwire,
+ * and it is already obsolete since 2.3 */
+/* #define OBD_MD_MDTIDX (0x0000000800000000ULL) */
+
+#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
+#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
+#define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
+#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
+#define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
+#define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
+#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
+#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
+#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
+#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
+ * under lock */
+#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
+
+#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
+#define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
+#define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
+#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
+
+#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
+
+#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
+ OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
+ OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
+ OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
+ OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
+
+/* don't forget obdo_fid which is way down at the bottom so it can
+ * come after the definition of llog_cookie */
+
+enum hss_valid {
+ HSS_SETMASK = 0x01,
+ HSS_CLEARMASK = 0x02,
+ HSS_ARCHIVE_ID = 0x04,
+};
+
+struct hsm_state_set {
+ __u32 hss_valid;
+ __u32 hss_archive_id;
+ __u64 hss_setmask;
+ __u64 hss_clearmask;
+};
+
+extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
+
+extern void lustre_swab_obd_statfs (struct obd_statfs *os);
+
+/* ost_body.data values for OST_BRW */
+
+#define OBD_BRW_READ 0x01
+#define OBD_BRW_WRITE 0x02
+#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
+#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
+ * transfer and is not accounted in
+ * the grant. */
+#define OBD_BRW_CHECK 0x10
+#define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
+#define OBD_BRW_GRANTED 0x40 /* the ost manages this */
+#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
+#define OBD_BRW_NOQUOTA 0x100
+#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
+#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
+#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
+#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
+#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
+
+#define OBD_OBJECT_EOF 0xffffffffffffffffULL
+
+#define OST_MIN_PRECREATE 32
+#define OST_MAX_PRECREATE 20000
+
+struct obd_ioobj {
+ struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
+ __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
+ * now (PTLRPC_BULK_OPS_COUNT - 1) in
+ * high 16 bits in 2.4 and later */
+ __u32 ioo_bufcnt; /* number of niobufs for this object */
+};
+
+#define IOOBJ_MAX_BRW_BITS 16
+#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
+#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
+#define ioobj_max_brw_set(ioo, num) \
+do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
+
+extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);
+
+/* multiple of 8 bytes => can array */
+struct niobuf_remote {
+ __u64 offset;
+ __u32 len;
+ __u32 flags;
+};
+
+extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr);
+
+/* lock value block communicated between the filter and llite */
+
+/* OST_LVB_ERR_INIT is needed because the return code in rc is
+ * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
+#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
+#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
+#define OST_LVB_IS_ERR(blocks) \
+ ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
+#define OST_LVB_SET_ERR(blocks, rc) \
+ do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
+#define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
+
+struct ost_lvb_v1 {
+ __u64 lvb_size;
+ obd_time lvb_mtime;
+ obd_time lvb_atime;
+ obd_time lvb_ctime;
+ __u64 lvb_blocks;
+};
+
+extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
+
+struct ost_lvb {
+ __u64 lvb_size;
+ obd_time lvb_mtime;
+ obd_time lvb_atime;
+ obd_time lvb_ctime;
+ __u64 lvb_blocks;
+ __u32 lvb_mtime_ns;
+ __u32 lvb_atime_ns;
+ __u32 lvb_ctime_ns;
+ __u32 lvb_padding;
+};
+
+extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);
+
+/*
+ * lquota data structures
+ */
+
+#ifndef QUOTABLOCK_BITS
+#define QUOTABLOCK_BITS 10
+#endif
+
+#ifndef QUOTABLOCK_SIZE
+#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
+#endif
+
+#ifndef toqb
+#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
+#endif
+
+/* The lquota_id structure is an union of all the possible identifier types that
+ * can be used with quota, this includes:
+ * - 64-bit user ID
+ * - 64-bit group ID
+ * - a FID which can be used for per-directory quota in the future */
+union lquota_id {
+ struct lu_fid qid_fid; /* FID for per-directory quota */
+ __u64 qid_uid; /* user identifier */
+ __u64 qid_gid; /* group identifier */
+};
+
+/* quotactl management */
+struct obd_quotactl {
+ __u32 qc_cmd;
+ __u32 qc_type; /* see Q_* flag below */
+ __u32 qc_id;
+ __u32 qc_stat;
+ struct obd_dqinfo qc_dqinfo;
+ struct obd_dqblk qc_dqblk;
+};
+
+extern void lustre_swab_obd_quotactl(struct obd_quotactl *q);
+
+#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
+#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
+#define Q_GETOINFO 0x800102 /* get obd quota info */
+#define Q_GETOQUOTA 0x800103 /* get obd quotas */
+#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
+
+#define Q_COPY(out, in, member) (out)->member = (in)->member
+
+#define QCTL_COPY(out, in) \
+do { \
+ Q_COPY(out, in, qc_cmd); \
+ Q_COPY(out, in, qc_type); \
+ Q_COPY(out, in, qc_id); \
+ Q_COPY(out, in, qc_stat); \
+ Q_COPY(out, in, qc_dqinfo); \
+ Q_COPY(out, in, qc_dqblk); \
+} while (0)
+
+/* Body of quota request used for quota acquire/release RPCs between quota
+ * master (aka QMT) and slaves (ak QSD). */
+struct quota_body {
+ struct lu_fid qb_fid; /* FID of global index packing the pool ID
+ * and type (data or metadata) as well as
+ * the quota type (user or group). */
+ union lquota_id qb_id; /* uid or gid or directory FID */
+ __u32 qb_flags; /* see below */
+ __u32 qb_padding;
+ __u64 qb_count; /* acquire/release count (kbytes/inodes) */
+ __u64 qb_usage; /* current slave usage (kbytes/inodes) */
+ __u64 qb_slv_ver; /* slave index file version */
+ struct lustre_handle qb_lockh; /* per-ID lock handle */
+ struct lustre_handle qb_glb_lockh; /* global lock handle */
+ __u64 qb_padding1[4];
+};
+
+/* When the quota_body is used in the reply of quota global intent
+ * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
+#define qb_slv_fid qb_fid
+/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
+ * quota reply */
+#define qb_qunit qb_usage
+
+#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */
+#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */
+#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */
+#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */
+
+extern void lustre_swab_quota_body(struct quota_body *b);
+
+/* Quota types currently supported */
+enum {
+ LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */
+ LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */
+ LQUOTA_TYPE_MAX
+};
+
+/* There are 2 different resource types on which a quota limit can be enforced:
+ * - inodes on the MDTs
+ * - blocks on the OSTs */
+enum {
+ LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */
+ LQUOTA_RES_DT = 0x02,
+ LQUOTA_LAST_RES,
+ LQUOTA_FIRST_RES = LQUOTA_RES_MD
+};
+#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
+
+/*
+ * Space accounting support
+ * Format of an accounting record, providing disk usage information for a given
+ * user or group
+ */
+struct lquota_acct_rec { /* 16 bytes */
+ __u64 bspace; /* current space in use */
+ __u64 ispace; /* current # inodes in use */
+};
+
+/*
+ * Global quota index support
+ * Format of a global record, providing global quota settings for a given quota
+ * identifier
+ */
+struct lquota_glb_rec { /* 32 bytes */
+ __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
+ __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
+ __u64 qbr_time; /* grace time, in seconds */
+ __u64 qbr_granted; /* how much is granted to slaves, in #inodes or
+ * kbytes */
+};
+
+/*
+ * Slave index support
+ * Format of a slave record, recording how much space is granted to a given
+ * slave
+ */
+struct lquota_slv_rec { /* 8 bytes */
+ __u64 qsr_granted; /* space granted to the slave for the key=ID,
+ * in #inodes or kbytes */
+};
+
+/* Data structures associated with the quota locks */
+
+/* Glimpse descriptor used for the index & per-ID quota locks */
+struct ldlm_gl_lquota_desc {
+ union lquota_id gl_id; /* quota ID subject to the glimpse */
+ __u64 gl_flags; /* see LQUOTA_FL* below */
+ __u64 gl_ver; /* new index version */
+ __u64 gl_hardlimit; /* new hardlimit or qunit value */
+ __u64 gl_softlimit; /* new softlimit */
+ __u64 gl_time;
+ __u64 gl_pad2;
+};
+#define gl_qunit gl_hardlimit /* current qunit value used when
+ * glimpsing per-ID quota locks */
+
+/* quota glimpse flags */
+#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
+
+/* LVB used with quota (global and per-ID) locks */
+struct lquota_lvb {
+ __u64 lvb_flags; /* see LQUOTA_FL* above */
+ __u64 lvb_id_may_rel; /* space that might be released later */
+ __u64 lvb_id_rel; /* space released by the slave for this ID */
+ __u64 lvb_id_qunit; /* current qunit value */
+ __u64 lvb_pad1;
+};
+
+extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
+
+/* LVB used with global quota lock */
+#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
+
+/* op codes */
+typedef enum {
+ QUOTA_DQACQ = 601,
+ QUOTA_DQREL = 602,
+ QUOTA_LAST_OPC
+} quota_cmd_t;
+#define QUOTA_FIRST_OPC QUOTA_DQACQ
+
+/*
+ * MDS REQ RECORDS
+ */
+
+/* opcodes */
+typedef enum {
+ MDS_GETATTR = 33,
+ MDS_GETATTR_NAME = 34,
+ MDS_CLOSE = 35,
+ MDS_REINT = 36,
+ MDS_READPAGE = 37,
+ MDS_CONNECT = 38,
+ MDS_DISCONNECT = 39,
+ MDS_GETSTATUS = 40,
+ MDS_STATFS = 41,
+ MDS_PIN = 42,
+ MDS_UNPIN = 43,
+ MDS_SYNC = 44,
+ MDS_DONE_WRITING = 45,
+ MDS_SET_INFO = 46,
+ MDS_QUOTACHECK = 47,
+ MDS_QUOTACTL = 48,
+ MDS_GETXATTR = 49,
+ MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
+ MDS_WRITEPAGE = 51,
+ MDS_IS_SUBDIR = 52,
+ MDS_GET_INFO = 53,
+ MDS_HSM_STATE_GET = 54,
+ MDS_HSM_STATE_SET = 55,
+ MDS_HSM_ACTION = 56,
+ MDS_HSM_PROGRESS = 57,
+ MDS_HSM_REQUEST = 58,
+ MDS_HSM_CT_REGISTER = 59,
+ MDS_HSM_CT_UNREGISTER = 60,
+ MDS_SWAP_LAYOUTS = 61,
+ MDS_LAST_OPC
+} mds_cmd_t;
+
+#define MDS_FIRST_OPC MDS_GETATTR
+
+
+/* opcodes for object update */
+typedef enum {
+ UPDATE_OBJ = 1000,
+ UPDATE_LAST_OPC
+} update_cmd_t;
+
+#define UPDATE_FIRST_OPC UPDATE_OBJ
+
+/*
+ * Do not exceed 63
+ */
+
+typedef enum {
+ REINT_SETATTR = 1,
+ REINT_CREATE = 2,
+ REINT_LINK = 3,
+ REINT_UNLINK = 4,
+ REINT_RENAME = 5,
+ REINT_OPEN = 6,
+ REINT_SETXATTR = 7,
+ REINT_RMENTRY = 8,
+// REINT_WRITE = 9,
+ REINT_MAX
+} mds_reint_t, mdt_reint_t;
+
+extern void lustre_swab_generic_32s (__u32 *val);
+
+/* the disposition of the intent outlines what was executed */
+#define DISP_IT_EXECD 0x00000001
+#define DISP_LOOKUP_EXECD 0x00000002
+#define DISP_LOOKUP_NEG 0x00000004
+#define DISP_LOOKUP_POS 0x00000008
+#define DISP_OPEN_CREATE 0x00000010
+#define DISP_OPEN_OPEN 0x00000020
+#define DISP_ENQ_COMPLETE 0x00400000
+#define DISP_ENQ_OPEN_REF 0x00800000
+#define DISP_ENQ_CREATE_REF 0x01000000
+#define DISP_OPEN_LOCK 0x02000000
+
+/* INODE LOCK PARTS */
+#define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */
+#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
+#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
+#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
+#define MDS_INODELOCK_PERM 0x000010 /* for permission */
+
+#define MDS_INODELOCK_MAXSHIFT 4
+/* This FULL lock is useful to take on unlink sort of operations */
+#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
+
+extern void lustre_swab_ll_fid (struct ll_fid *fid);
+
+/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
+ * but was moved into name[1] along with the OID to avoid consuming the
+ * name[2,3] fields that need to be used for the quota id (also a FID). */
+enum {
+ LUSTRE_RES_ID_SEQ_OFF = 0,
+ LUSTRE_RES_ID_VER_OID_OFF = 1,
+ LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
+ LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
+ LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
+ LUSTRE_RES_ID_HSH_OFF = 3
+};
+
+#define MDS_STATUS_CONN 1
+#define MDS_STATUS_LOV 2
+
+/* mdt_thread_info.mti_flags. */
+enum md_op_flags {
+ /* The flag indicates Size-on-MDS attributes are changed. */
+ MF_SOM_CHANGE = (1 << 0),
+ /* Flags indicates an epoch opens or closes. */
+ MF_EPOCH_OPEN = (1 << 1),
+ MF_EPOCH_CLOSE = (1 << 2),
+ MF_MDC_CANCEL_FID1 = (1 << 3),
+ MF_MDC_CANCEL_FID2 = (1 << 4),
+ MF_MDC_CANCEL_FID3 = (1 << 5),
+ MF_MDC_CANCEL_FID4 = (1 << 6),
+ /* There is a pending attribute update. */
+ MF_SOM_AU = (1 << 7),
+ /* Cancel OST locks while getattr OST attributes. */
+ MF_GETATTR_LOCK = (1 << 8),
+ MF_GET_MDT_IDX = (1 << 9),
+};
+
+#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
+
+#define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
+
+/* these should be identical to their EXT4_*_FL counterparts, they are
+ * redefined here only to avoid dragging in fs/ext4/ext4.h */
+#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
+#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
+#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
+#define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
+#define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
+
+/* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
+ * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
+ * protocol equivalents of LDISKFS_*_FL values stored on disk, while
+ * the S_* flags are kernel-internal values that change between kernel
+ * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
+ * See b=16526 for a full history. */
+static inline int ll_ext_to_inode_flags(int flags)
+{
+ return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
+ ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
+ ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
+#if defined(S_DIRSYNC)
+ ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
+#endif
+ ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
+}
+
+static inline int ll_inode_to_ext_flags(int iflags)
+{
+ return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
+ ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
+ ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
+#if defined(S_DIRSYNC)
+ ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
+#endif
+ ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
+}
+
+struct mdt_body {
+ struct lu_fid fid1;
+ struct lu_fid fid2;
+ struct lustre_handle handle;
+ __u64 valid;
+ __u64 size; /* Offset, in the case of MDS_READPAGE */
+ obd_time mtime;
+ obd_time atime;
+ obd_time ctime;
+ __u64 blocks; /* XID, in the case of MDS_READPAGE */
+ __u64 ioepoch;
+ __u64 unused1; /* was "ino" until 2.4.0 */
+ __u32 fsuid;
+ __u32 fsgid;
+ __u32 capability;
+ __u32 mode;
+ __u32 uid;
+ __u32 gid;
+ __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
+ __u32 rdev;
+ __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
+ __u32 unused2; /* was "generation" until 2.4.0 */
+ __u32 suppgid;
+ __u32 eadatasize;
+ __u32 aclsize;
+ __u32 max_mdsize;
+ __u32 max_cookiesize;
+ __u32 uid_h; /* high 32-bits of uid, for FUID */
+ __u32 gid_h; /* high 32-bits of gid, for FUID */
+ __u32 padding_5; /* also fix lustre_swab_mdt_body */
+ __u64 padding_6;
+ __u64 padding_7;
+ __u64 padding_8;
+ __u64 padding_9;
+ __u64 padding_10;
+}; /* 216 */
+
+extern void lustre_swab_mdt_body (struct mdt_body *b);
+
+struct mdt_ioepoch {
+ struct lustre_handle handle;
+ __u64 ioepoch;
+ __u32 flags;
+ __u32 padding;
+};
+
+extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b);
+
+/* permissions for md_perm.mp_perm */
+enum {
+ CFS_SETUID_PERM = 0x01,
+ CFS_SETGID_PERM = 0x02,
+ CFS_SETGRP_PERM = 0x04,
+ CFS_RMTACL_PERM = 0x08,
+ CFS_RMTOWN_PERM = 0x10
+};
+
+/* inode access permission for remote user, the inode info are omitted,
+ * for client knows them. */
+struct mdt_remote_perm {
+ __u32 rp_uid;
+ __u32 rp_gid;
+ __u32 rp_fsuid;
+ __u32 rp_fsuid_h;
+ __u32 rp_fsgid;
+ __u32 rp_fsgid_h;
+ __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
+ __u32 rp_padding;
+};
+
+extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
+
+struct mdt_rec_setattr {
+ __u32 sa_opcode;
+ __u32 sa_cap;
+ __u32 sa_fsuid;
+ __u32 sa_fsuid_h;
+ __u32 sa_fsgid;
+ __u32 sa_fsgid_h;
+ __u32 sa_suppgid;
+ __u32 sa_suppgid_h;
+ __u32 sa_padding_1;
+ __u32 sa_padding_1_h;
+ struct lu_fid sa_fid;
+ __u64 sa_valid;
+ __u32 sa_uid;
+ __u32 sa_gid;
+ __u64 sa_size;
+ __u64 sa_blocks;
+ obd_time sa_mtime;
+ obd_time sa_atime;
+ obd_time sa_ctime;
+ __u32 sa_attr_flags;
+ __u32 sa_mode;
+ __u32 sa_bias; /* some operation flags */
+ __u32 sa_padding_3;
+ __u32 sa_padding_4;
+ __u32 sa_padding_5;
+};
+
+extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
+
+/*
+ * Attribute flags used in mdt_rec_setattr::sa_valid.
+ * The kernel's #defines for ATTR_* should not be used over the network
+ * since the client and MDS may run different kernels (see bug 13828)
+ * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
+ */
+#define MDS_ATTR_MODE 0x1ULL /* = 1 */
+#define MDS_ATTR_UID 0x2ULL /* = 2 */
+#define MDS_ATTR_GID 0x4ULL /* = 4 */
+#define MDS_ATTR_SIZE 0x8ULL /* = 8 */
+#define MDS_ATTR_ATIME 0x10ULL /* = 16 */
+#define MDS_ATTR_MTIME 0x20ULL /* = 32 */
+#define MDS_ATTR_CTIME 0x40ULL /* = 64 */
+#define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
+#define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
+#define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
+#define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
+#define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
+#define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
+#define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
+#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
+#define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
+
+#ifndef FMODE_READ
+#define FMODE_READ 00000001
+#define FMODE_WRITE 00000002
+#endif
+
+#define MDS_FMODE_CLOSED 00000000
+#define MDS_FMODE_EXEC 00000004
+/* IO Epoch is opened on a closed file. */
+#define MDS_FMODE_EPOCH 01000000
+/* IO Epoch is opened on a file truncate. */
+#define MDS_FMODE_TRUNC 02000000
+/* Size-on-MDS Attribute Update is pending. */
+#define MDS_FMODE_SOM 04000000
+
+#define MDS_OPEN_CREATED 00000010
+#define MDS_OPEN_CROSS 00000020
+
+#define MDS_OPEN_CREAT 00000100
+#define MDS_OPEN_EXCL 00000200
+#define MDS_OPEN_TRUNC 00001000
+#define MDS_OPEN_APPEND 00002000
+#define MDS_OPEN_SYNC 00010000
+#define MDS_OPEN_DIRECTORY 00200000
+
+#define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
+#define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
+#define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
+#define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
+ * We do not support JOIN FILE
+ * anymore, reserve this flags
+ * just for preventing such bit
+ * to be reused. */
+
+#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
+#define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
+#define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
+#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
+#define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
+ * hsm restore) */
+#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
+ unlinked */
+
+/* permission for create non-directory file */
+#define MAY_CREATE (1 << 7)
+/* permission for create directory file */
+#define MAY_LINK (1 << 8)
+/* permission for delete from the directory */
+#define MAY_UNLINK (1 << 9)
+/* source's permission for rename */
+#define MAY_RENAME_SRC (1 << 10)
+/* target's permission for rename */
+#define MAY_RENAME_TAR (1 << 11)
+/* part (parent's) VTX permission check */
+#define MAY_VTX_PART (1 << 12)
+/* full VTX permission check */
+#define MAY_VTX_FULL (1 << 13)
+/* lfs rgetfacl permission check */
+#define MAY_RGETFACL (1 << 14)
+
+enum {
+ MDS_CHECK_SPLIT = 1 << 0,
+ MDS_CROSS_REF = 1 << 1,
+ MDS_VTX_BYPASS = 1 << 2,
+ MDS_PERM_BYPASS = 1 << 3,
+ MDS_SOM = 1 << 4,
+ MDS_QUOTA_IGNORE = 1 << 5,
+ MDS_CLOSE_CLEANUP = 1 << 6,
+ MDS_KEEP_ORPHAN = 1 << 7,
+ MDS_RECOV_OPEN = 1 << 8,
+ MDS_DATA_MODIFIED = 1 << 9,
+ MDS_CREATE_VOLATILE = 1 << 10,
+ MDS_OWNEROVERRIDE = 1 << 11,
+};
+
+/* instance of mdt_reint_rec */
+struct mdt_rec_create {
+ __u32 cr_opcode;
+ __u32 cr_cap;
+ __u32 cr_fsuid;
+ __u32 cr_fsuid_h;
+ __u32 cr_fsgid;
+ __u32 cr_fsgid_h;
+ __u32 cr_suppgid1;
+ __u32 cr_suppgid1_h;
+ __u32 cr_suppgid2;
+ __u32 cr_suppgid2_h;
+ struct lu_fid cr_fid1;
+ struct lu_fid cr_fid2;
+ struct lustre_handle cr_old_handle; /* handle in case of open replay */
+ obd_time cr_time;
+ __u64 cr_rdev;
+ __u64 cr_ioepoch;
+ __u64 cr_padding_1; /* rr_blocks */
+ __u32 cr_mode;
+ __u32 cr_bias;
+ /* use of helpers set/get_mrc_cr_flags() is needed to access
+ * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
+ * extend cr_flags size without breaking 1.8 compat */
+ __u32 cr_flags_l; /* for use with open, low 32 bits */
+ __u32 cr_flags_h; /* for use with open, high 32 bits */
+ __u32 cr_umask; /* umask for create */
+ __u32 cr_padding_4; /* rr_padding_4 */
+};
+
+static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
+{
+ mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
+ mrc->cr_flags_h = (__u32)(flags >> 32);
+}
+
+static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
+{
+ return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
+}
+
+/* instance of mdt_reint_rec */
+struct mdt_rec_link {
+ __u32 lk_opcode;
+ __u32 lk_cap;
+ __u32 lk_fsuid;
+ __u32 lk_fsuid_h;
+ __u32 lk_fsgid;
+ __u32 lk_fsgid_h;
+ __u32 lk_suppgid1;
+ __u32 lk_suppgid1_h;
+ __u32 lk_suppgid2;
+ __u32 lk_suppgid2_h;
+ struct lu_fid lk_fid1;
+ struct lu_fid lk_fid2;
+ obd_time lk_time;
+ __u64 lk_padding_1; /* rr_atime */
+ __u64 lk_padding_2; /* rr_ctime */
+ __u64 lk_padding_3; /* rr_size */
+ __u64 lk_padding_4; /* rr_blocks */
+ __u32 lk_bias;
+ __u32 lk_padding_5; /* rr_mode */
+ __u32 lk_padding_6; /* rr_flags */
+ __u32 lk_padding_7; /* rr_padding_2 */
+ __u32 lk_padding_8; /* rr_padding_3 */
+ __u32 lk_padding_9; /* rr_padding_4 */
+};
+
+/* instance of mdt_reint_rec */
+struct mdt_rec_unlink {
+ __u32 ul_opcode;
+ __u32 ul_cap;
+ __u32 ul_fsuid;
+ __u32 ul_fsuid_h;
+ __u32 ul_fsgid;
+ __u32 ul_fsgid_h;
+ __u32 ul_suppgid1;
+ __u32 ul_suppgid1_h;
+ __u32 ul_suppgid2;
+ __u32 ul_suppgid2_h;
+ struct lu_fid ul_fid1;
+ struct lu_fid ul_fid2;
+ obd_time ul_time;
+ __u64 ul_padding_2; /* rr_atime */
+ __u64 ul_padding_3; /* rr_ctime */
+ __u64 ul_padding_4; /* rr_size */
+ __u64 ul_padding_5; /* rr_blocks */
+ __u32 ul_bias;
+ __u32 ul_mode;
+ __u32 ul_padding_6; /* rr_flags */
+ __u32 ul_padding_7; /* rr_padding_2 */
+ __u32 ul_padding_8; /* rr_padding_3 */
+ __u32 ul_padding_9; /* rr_padding_4 */
+};
+
+/* instance of mdt_reint_rec */
+struct mdt_rec_rename {
+ __u32 rn_opcode;
+ __u32 rn_cap;
+ __u32 rn_fsuid;
+ __u32 rn_fsuid_h;
+ __u32 rn_fsgid;
+ __u32 rn_fsgid_h;
+ __u32 rn_suppgid1;
+ __u32 rn_suppgid1_h;
+ __u32 rn_suppgid2;
+ __u32 rn_suppgid2_h;
+ struct lu_fid rn_fid1;
+ struct lu_fid rn_fid2;
+ obd_time rn_time;
+ __u64 rn_padding_1; /* rr_atime */
+ __u64 rn_padding_2; /* rr_ctime */
+ __u64 rn_padding_3; /* rr_size */
+ __u64 rn_padding_4; /* rr_blocks */
+ __u32 rn_bias; /* some operation flags */
+ __u32 rn_mode; /* cross-ref rename has mode */
+ __u32 rn_padding_5; /* rr_flags */
+ __u32 rn_padding_6; /* rr_padding_2 */
+ __u32 rn_padding_7; /* rr_padding_3 */
+ __u32 rn_padding_8; /* rr_padding_4 */
+};
+
+/* instance of mdt_reint_rec */
+struct mdt_rec_setxattr {
+ __u32 sx_opcode;
+ __u32 sx_cap;
+ __u32 sx_fsuid;
+ __u32 sx_fsuid_h;
+ __u32 sx_fsgid;
+ __u32 sx_fsgid_h;
+ __u32 sx_suppgid1;
+ __u32 sx_suppgid1_h;
+ __u32 sx_suppgid2;
+ __u32 sx_suppgid2_h;
+ struct lu_fid sx_fid;
+ __u64 sx_padding_1; /* These three are rr_fid2 */
+ __u32 sx_padding_2;
+ __u32 sx_padding_3;
+ __u64 sx_valid;
+ obd_time sx_time;
+ __u64 sx_padding_5; /* rr_ctime */
+ __u64 sx_padding_6; /* rr_size */
+ __u64 sx_padding_7; /* rr_blocks */
+ __u32 sx_size;
+ __u32 sx_flags;
+ __u32 sx_padding_8; /* rr_flags */
+ __u32 sx_padding_9; /* rr_padding_2 */
+ __u32 sx_padding_10; /* rr_padding_3 */
+ __u32 sx_padding_11; /* rr_padding_4 */
+};
+
+/*
+ * mdt_rec_reint is the template for all mdt_reint_xxx structures.
+ * Do NOT change the size of various members, otherwise the value
+ * will be broken in lustre_swab_mdt_rec_reint().
+ *
+ * If you add new members in other mdt_reint_xxx structres and need to use the
+ * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
+ */
+struct mdt_rec_reint {
+ __u32 rr_opcode;
+ __u32 rr_cap;
+ __u32 rr_fsuid;
+ __u32 rr_fsuid_h;
+ __u32 rr_fsgid;
+ __u32 rr_fsgid_h;
+ __u32 rr_suppgid1;
+ __u32 rr_suppgid1_h;
+ __u32 rr_suppgid2;
+ __u32 rr_suppgid2_h;
+ struct lu_fid rr_fid1;
+ struct lu_fid rr_fid2;
+ obd_time rr_mtime;
+ obd_time rr_atime;
+ obd_time rr_ctime;
+ __u64 rr_size;
+ __u64 rr_blocks;
+ __u32 rr_bias;
+ __u32 rr_mode;
+ __u32 rr_flags;
+ __u32 rr_flags_h;
+ __u32 rr_umask;
+ __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
+};
+
+extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
+
+struct lmv_desc {
+ __u32 ld_tgt_count; /* how many MDS's */
+ __u32 ld_active_tgt_count; /* how many active */
+ __u32 ld_default_stripe_count; /* how many objects are used */
+ __u32 ld_pattern; /* default MEA_MAGIC_* */
+ __u64 ld_default_hash_size;
+ __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
+ __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
+ __u32 ld_qos_maxage; /* in second */
+ __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
+ __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
+ struct obd_uuid ld_uuid;
+};
+
+extern void lustre_swab_lmv_desc (struct lmv_desc *ld);
+
+/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
+struct lmv_stripe_md {
+ __u32 mea_magic;
+ __u32 mea_count;
+ __u32 mea_master;
+ __u32 mea_padding;
+ char mea_pool_name[LOV_MAXPOOLNAME];
+ struct lu_fid mea_ids[0];
+};
+
+extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea);
+
+/* lmv structures */
+#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
+#define MEA_MAGIC_ALL_CHARS 0xb222a11c
+#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
+
+#define MAX_HASH_SIZE_32 0x7fffffffUL
+#define MAX_HASH_SIZE 0x7fffffffffffffffULL
+#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
+
+enum fld_rpc_opc {
+ FLD_QUERY = 900,
+ FLD_LAST_OPC,
+ FLD_FIRST_OPC = FLD_QUERY
+};
+
+enum seq_rpc_opc {
+ SEQ_QUERY = 700,
+ SEQ_LAST_OPC,
+ SEQ_FIRST_OPC = SEQ_QUERY
+};
+
+enum seq_op {
+ SEQ_ALLOC_SUPER = 0,
+ SEQ_ALLOC_META = 1
+};
+
+/*
+ * LOV data structures
+ */
+
+#define LOV_MAX_UUID_BUFFER_SIZE 8192
+/* The size of the buffer the lov/mdc reserves for the
+ * array of UUIDs returned by the MDS. With the current
+ * protocol, this will limit the max number of OSTs per LOV */
+
+#define LOV_DESC_MAGIC 0xB0CCDE5C
+
+/* LOV settings descriptor (should only contain static info) */
+struct lov_desc {
+ __u32 ld_tgt_count; /* how many OBD's */
+ __u32 ld_active_tgt_count; /* how many active */
+ __u32 ld_default_stripe_count; /* how many objects are used */
+ __u32 ld_pattern; /* default PATTERN_RAID0 */
+ __u64 ld_default_stripe_size; /* in bytes */
+ __u64 ld_default_stripe_offset; /* in bytes */
+ __u32 ld_padding_0; /* unused */
+ __u32 ld_qos_maxage; /* in second */
+ __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
+ __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
+ struct obd_uuid ld_uuid;
+};
+
+#define ld_magic ld_active_tgt_count /* for swabbing from llogs */
+
+extern void lustre_swab_lov_desc (struct lov_desc *ld);
+
+/*
+ * LDLM requests:
+ */
+/* opcodes -- MUST be distinct from OST/MDS opcodes */
+typedef enum {
+ LDLM_ENQUEUE = 101,
+ LDLM_CONVERT = 102,
+ LDLM_CANCEL = 103,
+ LDLM_BL_CALLBACK = 104,
+ LDLM_CP_CALLBACK = 105,
+ LDLM_GL_CALLBACK = 106,
+ LDLM_SET_INFO = 107,
+ LDLM_LAST_OPC
+} ldlm_cmd_t;
+#define LDLM_FIRST_OPC LDLM_ENQUEUE
+
+#define RES_NAME_SIZE 4
+struct ldlm_res_id {
+ __u64 name[RES_NAME_SIZE];
+};
+
+#define DLDLMRES "["LPX64":"LPX64":"LPX64"]."LPX64i
+#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
+ (res)->lr_name.name[2], (res)->lr_name.name[3]
+
+extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
+
+static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
+ const struct ldlm_res_id *res1)
+{
+ return !memcmp(res0, res1, sizeof(*res0));
+}
+
+/* lock types */
+typedef enum {
+ LCK_MINMODE = 0,
+ LCK_EX = 1,
+ LCK_PW = 2,
+ LCK_PR = 4,
+ LCK_CW = 8,
+ LCK_CR = 16,
+ LCK_NL = 32,
+ LCK_GROUP = 64,
+ LCK_COS = 128,
+ LCK_MAXMODE
+} ldlm_mode_t;
+
+#define LCK_MODE_NUM 8
+
+typedef enum {
+ LDLM_PLAIN = 10,
+ LDLM_EXTENT = 11,
+ LDLM_FLOCK = 12,
+ LDLM_IBITS = 13,
+ LDLM_MAX_TYPE
+} ldlm_type_t;
+
+#define LDLM_MIN_TYPE LDLM_PLAIN
+
+struct ldlm_extent {
+ __u64 start;
+ __u64 end;
+ __u64 gid;
+};
+
+static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
+ struct ldlm_extent *ex2)
+{
+ return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
+}
+
+/* check if @ex1 contains @ex2 */
+static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
+ struct ldlm_extent *ex2)
+{
+ return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
+}
+
+struct ldlm_inodebits {
+ __u64 bits;
+};
+
+struct ldlm_flock_wire {
+ __u64 lfw_start;
+ __u64 lfw_end;
+ __u64 lfw_owner;
+ __u32 lfw_padding;
+ __u32 lfw_pid;
+};
+
+/* it's important that the fields of the ldlm_extent structure match
+ * the first fields of the ldlm_flock structure because there is only
+ * one ldlm_swab routine to process the ldlm_policy_data_t union. if
+ * this ever changes we will need to swab the union differently based
+ * on the resource type. */
+
+typedef union {
+ struct ldlm_extent l_extent;
+ struct ldlm_flock_wire l_flock;
+ struct ldlm_inodebits l_inodebits;
+} ldlm_wire_policy_data_t;
+
+extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
+
+union ldlm_gl_desc {
+ struct ldlm_gl_lquota_desc lquota_desc;
+};
+
+extern void lustre_swab_gl_desc(union ldlm_gl_desc *);
+
+struct ldlm_intent {
+ __u64 opc;
+};
+
+extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
+
+struct ldlm_resource_desc {
+ ldlm_type_t lr_type;
+ __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
+ struct ldlm_res_id lr_name;
+};
+
+extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
+
+struct ldlm_lock_desc {
+ struct ldlm_resource_desc l_resource;
+ ldlm_mode_t l_req_mode;
+ ldlm_mode_t l_granted_mode;
+ ldlm_wire_policy_data_t l_policy_data;
+};
+
+extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);
+
+#define LDLM_LOCKREQ_HANDLES 2
+#define LDLM_ENQUEUE_CANCEL_OFF 1
+
+struct ldlm_request {
+ __u32 lock_flags;
+ __u32 lock_count;
+ struct ldlm_lock_desc lock_desc;
+ struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
+};
+
+extern void lustre_swab_ldlm_request (struct ldlm_request *rq);
+
+/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
+ * Otherwise, 2 are available. */
+#define ldlm_request_bufsize(count,type) \
+({ \
+ int _avail = LDLM_LOCKREQ_HANDLES; \
+ _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
+ sizeof(struct ldlm_request) + \
+ (count > _avail ? count - _avail : 0) * \
+ sizeof(struct lustre_handle); \
+})
+
+struct ldlm_reply {
+ __u32 lock_flags;
+ __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
+ struct ldlm_lock_desc lock_desc;
+ struct lustre_handle lock_handle;
+ __u64 lock_policy_res1;
+ __u64 lock_policy_res2;
+};
+
+extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
+
+#define ldlm_flags_to_wire(flags) ((__u32)(flags))
+#define ldlm_flags_from_wire(flags) ((__u64)(flags))
+
+/*
+ * Opcodes for mountconf (mgs and mgc)
+ */
+typedef enum {
+ MGS_CONNECT = 250,
+ MGS_DISCONNECT,
+ MGS_EXCEPTION, /* node died, etc. */
+ MGS_TARGET_REG, /* whenever target starts up */
+ MGS_TARGET_DEL,
+ MGS_SET_INFO,
+ MGS_CONFIG_READ,
+ MGS_LAST_OPC
+} mgs_cmd_t;
+#define MGS_FIRST_OPC MGS_CONNECT
+
+#define MGS_PARAM_MAXLEN 1024
+#define KEY_SET_INFO "set_info"
+
+struct mgs_send_param {
+ char mgs_param[MGS_PARAM_MAXLEN];
+};
+
+/* We pass this info to the MGS so it can write config logs */
+#define MTI_NAME_MAXLEN 64
+#define MTI_PARAM_MAXLEN 4096
+#define MTI_NIDS_MAX 32
+struct mgs_target_info {
+ __u32 mti_lustre_ver;
+ __u32 mti_stripe_index;
+ __u32 mti_config_ver;
+ __u32 mti_flags;
+ __u32 mti_nid_count;
+ __u32 mti_instance; /* Running instance of target */
+ char mti_fsname[MTI_NAME_MAXLEN];
+ char mti_svname[MTI_NAME_MAXLEN];
+ char mti_uuid[sizeof(struct obd_uuid)];
+ __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
+ char mti_params[MTI_PARAM_MAXLEN];
+};
+extern void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
+
+struct mgs_nidtbl_entry {
+ __u64 mne_version; /* table version of this entry */
+ __u32 mne_instance; /* target instance # */
+ __u32 mne_index; /* target index */
+ __u32 mne_length; /* length of this entry - by bytes */
+ __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
+ __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
+ __u8 mne_nid_size; /* size of each NID, by bytes */
+ __u8 mne_nid_count; /* # of NIDs in buffer */
+ union {
+ lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
+ } u;
+};
+extern void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
+
+struct mgs_config_body {
+ char mcb_name[MTI_NAME_MAXLEN]; /* logname */
+ __u64 mcb_offset; /* next index of config log to request */
+ __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
+ __u8 mcb_reserved;
+ __u8 mcb_bits; /* bits unit size of config log */
+ __u32 mcb_units; /* # of units for bulk transfer */
+};
+extern void lustre_swab_mgs_config_body(struct mgs_config_body *body);
+
+struct mgs_config_res {
+ __u64 mcr_offset; /* index of last config log */
+ __u64 mcr_size; /* size of the log */
+};
+extern void lustre_swab_mgs_config_res(struct mgs_config_res *body);
+
+/* Config marker flags (in config log) */
+#define CM_START 0x01
+#define CM_END 0x02
+#define CM_SKIP 0x04
+#define CM_UPGRADE146 0x08
+#define CM_EXCLUDE 0x10
+#define CM_START_SKIP (CM_START | CM_SKIP)
+
+struct cfg_marker {
+ __u32 cm_step; /* aka config version */
+ __u32 cm_flags;
+ __u32 cm_vers; /* lustre release version number */
+ __u32 cm_padding; /* 64 bit align */
+ obd_time cm_createtime; /*when this record was first created */
+ obd_time cm_canceltime; /*when this record is no longer valid*/
+ char cm_tgtname[MTI_NAME_MAXLEN];
+ char cm_comment[MTI_NAME_MAXLEN];
+};
+
+extern void lustre_swab_cfg_marker(struct cfg_marker *marker,
+ int swab, int size);
+
+/*
+ * Opcodes for multiple servers.
+ */
+
+typedef enum {
+ OBD_PING = 400,
+ OBD_LOG_CANCEL,
+ OBD_QC_CALLBACK,
+ OBD_IDX_READ,
+ OBD_LAST_OPC
+} obd_cmd_t;
+#define OBD_FIRST_OPC OBD_PING
+
+/* catalog of log objects */
+
+/** Identifier for a single log object */
+struct llog_logid {
+ struct ost_id lgl_oi;
+ __u32 lgl_ogen;
+} __attribute__((packed));
+
+/** Records written to the CATALOGS list */
+#define CATLIST "CATALOGS"
+struct llog_catid {
+ struct llog_logid lci_logid;
+ __u32 lci_padding1;
+ __u32 lci_padding2;
+ __u32 lci_padding3;
+} __attribute__((packed));
+
+/* Log data record types - there is no specific reason that these need to
+ * be related to the RPC opcodes, but no reason not to (may be handy later?)
+ */
+#define LLOG_OP_MAGIC 0x10600000
+#define LLOG_OP_MASK 0xfff00000
+
+typedef enum {
+ LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
+ OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
+ /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
+ MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
+ REINT_UNLINK, /* obsolete after 2.5.0 */
+ MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
+ REINT_UNLINK,
+ /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
+ MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
+ REINT_SETATTR,
+ OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
+ /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
+ LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
+ /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
+ CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
+ CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
+ HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
+ LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
+ LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
+} llog_op_type;
+
+#define LLOG_REC_HDR_NEEDS_SWABBING(r) \
+ (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
+
+/** Log record header - stored in little endian order.
+ * Each record must start with this struct, end with a llog_rec_tail,
+ * and be a multiple of 256 bits in size.
+ */
+struct llog_rec_hdr {
+ __u32 lrh_len;
+ __u32 lrh_index;
+ __u32 lrh_type;
+ __u32 lrh_id;
+};
+
+struct llog_rec_tail {
+ __u32 lrt_len;
+ __u32 lrt_index;
+};
+
+/* Where data follow just after header */
+#define REC_DATA(ptr) \
+ ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
+
+#define REC_DATA_LEN(rec) \
+ (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
+ sizeof(struct llog_rec_tail))
+
+struct llog_logid_rec {
+ struct llog_rec_hdr lid_hdr;
+ struct llog_logid lid_id;
+ __u32 lid_padding1;
+ __u64 lid_padding2;
+ __u64 lid_padding3;
+ struct llog_rec_tail lid_tail;
+} __attribute__((packed));
+
+struct llog_unlink_rec {
+ struct llog_rec_hdr lur_hdr;
+ obd_id lur_oid;
+ obd_count lur_oseq;
+ obd_count lur_count;
+ struct llog_rec_tail lur_tail;
+} __attribute__((packed));
+
+struct llog_unlink64_rec {
+ struct llog_rec_hdr lur_hdr;
+ struct lu_fid lur_fid;
+ obd_count lur_count; /* to destroy the lost precreated */
+ __u32 lur_padding1;
+ __u64 lur_padding2;
+ __u64 lur_padding3;
+ struct llog_rec_tail lur_tail;
+} __attribute__((packed));
+
+struct llog_setattr64_rec {
+ struct llog_rec_hdr lsr_hdr;
+ struct ost_id lsr_oi;
+ __u32 lsr_uid;
+ __u32 lsr_uid_h;
+ __u32 lsr_gid;
+ __u32 lsr_gid_h;
+ __u64 lsr_padding;
+ struct llog_rec_tail lsr_tail;
+} __attribute__((packed));
+
+struct llog_size_change_rec {
+ struct llog_rec_hdr lsc_hdr;
+ struct ll_fid lsc_fid;
+ __u32 lsc_ioepoch;
+ __u32 lsc_padding1;
+ __u64 lsc_padding2;
+ __u64 lsc_padding3;
+ struct llog_rec_tail lsc_tail;
+} __attribute__((packed));
+
+#define CHANGELOG_MAGIC 0xca103000
+
+/** \a changelog_rec_type's that can't be masked */
+#define CHANGELOG_MINMASK (1 << CL_MARK)
+/** bits covering all \a changelog_rec_type's */
+#define CHANGELOG_ALLMASK 0XFFFFFFFF
+/** default \a changelog_rec_type mask */
+#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE)
+
+/* changelog llog name, needed by client replicators */
+#define CHANGELOG_CATALOG "changelog_catalog"
+
+struct changelog_setinfo {
+ __u64 cs_recno;
+ __u32 cs_id;
+} __attribute__((packed));
+
+/** changelog record */
+struct llog_changelog_rec {
+ struct llog_rec_hdr cr_hdr;
+ struct changelog_rec cr;
+ struct llog_rec_tail cr_tail; /**< for_sizezof_only */
+} __attribute__((packed));
+
+struct llog_changelog_ext_rec {
+ struct llog_rec_hdr cr_hdr;
+ struct changelog_ext_rec cr;
+ struct llog_rec_tail cr_tail; /**< for_sizezof_only */
+} __attribute__((packed));
+
+#define CHANGELOG_USER_PREFIX "cl"
+
+struct llog_changelog_user_rec {
+ struct llog_rec_hdr cur_hdr;
+ __u32 cur_id;
+ __u32 cur_padding;
+ __u64 cur_endrec;
+ struct llog_rec_tail cur_tail;
+} __attribute__((packed));
+
+enum agent_req_status {
+ ARS_WAITING,
+ ARS_STARTED,
+ ARS_FAILED,
+ ARS_CANCELED,
+ ARS_SUCCEED,
+};
+
+static inline char *agent_req_status2name(enum agent_req_status ars)
+{
+ switch (ars) {
+ case ARS_WAITING:
+ return "WAITING";
+ case ARS_STARTED:
+ return "STARTED";
+ case ARS_FAILED:
+ return "FAILED";
+ case ARS_CANCELED:
+ return "CANCELED";
+ case ARS_SUCCEED:
+ return "SUCCEED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline bool agent_req_in_final_state(enum agent_req_status ars)
+{
+ return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
+ (ars == ARS_CANCELED));
+}
+
+struct llog_agent_req_rec {
+ struct llog_rec_hdr arr_hdr; /**< record header */
+ __u32 arr_status; /**< status of the request */
+ /* must match enum
+ * agent_req_status */
+ __u32 arr_archive_id; /**< backend archive number */
+ __u64 arr_flags; /**< req flags */
+ __u64 arr_compound_id; /**< compound cookie */
+ __u64 arr_req_create; /**< req. creation time */
+ __u64 arr_req_change; /**< req. status change time */
+ struct hsm_action_item arr_hai; /**< req. to the agent */
+ struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
+} __attribute__((packed));
+
+/* Old llog gen for compatibility */
+struct llog_gen {
+ __u64 mnt_cnt;
+ __u64 conn_cnt;
+} __attribute__((packed));
+
+struct llog_gen_rec {
+ struct llog_rec_hdr lgr_hdr;
+ struct llog_gen lgr_gen;
+ __u64 padding1;
+ __u64 padding2;
+ __u64 padding3;
+ struct llog_rec_tail lgr_tail;
+};
+
+/* On-disk header structure of each log object, stored in little endian order */
+#define LLOG_CHUNK_SIZE 8192
+#define LLOG_HEADER_SIZE (96)
+#define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
+
+#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
+
+/* flags for the logs */
+enum llog_flag {
+ LLOG_F_ZAP_WHEN_EMPTY = 0x1,
+ LLOG_F_IS_CAT = 0x2,
+ LLOG_F_IS_PLAIN = 0x4,
+};
+
+struct llog_log_hdr {
+ struct llog_rec_hdr llh_hdr;
+ obd_time llh_timestamp;
+ __u32 llh_count;
+ __u32 llh_bitmap_offset;
+ __u32 llh_size;
+ __u32 llh_flags;
+ __u32 llh_cat_idx;
+ /* for a catalog the first plain slot is next to it */
+ struct obd_uuid llh_tgtuuid;
+ __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
+ __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
+ struct llog_rec_tail llh_tail;
+} __attribute__((packed));
+
+#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
+ llh->llh_bitmap_offset - \
+ sizeof(llh->llh_tail)) * 8)
+
+/** log cookies are used to reference a specific log file and a record therein */
+struct llog_cookie {
+ struct llog_logid lgc_lgl;
+ __u32 lgc_subsys;
+ __u32 lgc_index;
+ __u32 lgc_padding;
+} __attribute__((packed));
+
+/** llog protocol */
+enum llogd_rpc_ops {
+ LLOG_ORIGIN_HANDLE_CREATE = 501,
+ LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
+ LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
+ LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
+ LLOG_ORIGIN_HANDLE_CLOSE = 505,
+ LLOG_ORIGIN_CONNECT = 506,
+ LLOG_CATINFO = 507, /* deprecated */
+ LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
+ LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
+ LLOG_LAST_OPC,
+ LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
+};
+
+struct llogd_body {
+ struct llog_logid lgd_logid;
+ __u32 lgd_ctxt_idx;
+ __u32 lgd_llh_flags;
+ __u32 lgd_index;
+ __u32 lgd_saved_index;
+ __u32 lgd_len;
+ __u64 lgd_cur_offset;
+} __attribute__((packed));
+
+struct llogd_conn_body {
+ struct llog_gen lgdc_gen;
+ struct llog_logid lgdc_logid;
+ __u32 lgdc_ctxt_idx;
+} __attribute__((packed));
+
+/* Note: 64-bit types are 64-bit aligned in structure */
+struct obdo {
+ obd_valid o_valid; /* hot fields in this obdo */
+ struct ost_id o_oi;
+ obd_id o_parent_seq;
+ obd_size o_size; /* o_size-o_blocks == ost_lvb */
+ obd_time o_mtime;
+ obd_time o_atime;
+ obd_time o_ctime;
+ obd_blocks o_blocks; /* brw: cli sent cached bytes */
+ obd_size o_grant;
+
+ /* 32-bit fields start here: keep an even number of them via padding */
+ obd_blksize o_blksize; /* optimal IO blocksize */
+ obd_mode o_mode; /* brw: cli sent cache remain */
+ obd_uid o_uid;
+ obd_gid o_gid;
+ obd_flag o_flags;
+ obd_count o_nlink; /* brw: checksum */
+ obd_count o_parent_oid;
+ obd_count o_misc; /* brw: o_dropped */
+
+ __u64 o_ioepoch; /* epoch in ost writes */
+ __u32 o_stripe_idx; /* holds stripe idx */
+ __u32 o_parent_ver;
+ struct lustre_handle o_handle; /* brw: lock handle to prolong
+ * locks */
+ struct llog_cookie o_lcookie; /* destroy: unlink cookie from
+ * MDS */
+ __u32 o_uid_h;
+ __u32 o_gid_h;
+
+ __u64 o_data_version; /* getattr: sum of iversion for
+ * each stripe.
+ * brw: grant space consumed on
+ * the client for the write */
+ __u64 o_padding_4;
+ __u64 o_padding_5;
+ __u64 o_padding_6;
+};
+
+#define o_dirty o_blocks
+#define o_undirty o_mode
+#define o_dropped o_misc
+#define o_cksum o_nlink
+#define o_grant_used o_data_version
+
+static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
+ struct obdo *wobdo, struct obdo *lobdo)
+{
+ memcpy(wobdo, lobdo, sizeof(*lobdo));
+ wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
+ if (ocd == NULL)
+ return;
+
+ if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
+ fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
+ /* Currently OBD_FL_OSTID will only be used when 2.4 echo
+ * client communicate with pre-2.4 server */
+ wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
+ wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
+ }
+}
+
+static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
+ struct obdo *lobdo, struct obdo *wobdo)
+{
+ obd_flag local_flags = 0;
+
+ if (lobdo->o_valid & OBD_MD_FLFLAGS)
+ local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
+
+ LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK));
+
+ memcpy(lobdo, wobdo, sizeof(*lobdo));
+ if (local_flags != 0) {
+ lobdo->o_valid |= OBD_MD_FLFLAGS;
+ lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
+ lobdo->o_flags |= local_flags;
+ }
+ if (ocd == NULL)
+ return;
+
+ if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
+ fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
+ /* see above */
+ lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
+ lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
+ lobdo->o_oi.oi_fid.f_ver = 0;
+ }
+}
+
+extern void lustre_swab_obdo (struct obdo *o);
+
+/* request structure for OST's */
+struct ost_body {
+ struct obdo oa;
+};
+
+/* Key for FIEMAP to be used in get_info calls */
+struct ll_fiemap_info_key {
+ char name[8];
+ struct obdo oa;
+ struct ll_user_fiemap fiemap;
+};
+
+extern void lustre_swab_ost_body (struct ost_body *b);
+extern void lustre_swab_ost_last_id(obd_id *id);
+extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
+
+extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
+extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
+extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
+ int stripe_count);
+extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
+
+/* llog_swab.c */
+extern void lustre_swab_llogd_body (struct llogd_body *d);
+extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
+extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
+extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
+extern void lustre_swab_llog_id(struct llog_logid *lid);
+
+struct lustre_cfg;
+extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
+
+/* Functions for dumping PTLRPC fields */
+void dump_rniobuf(struct niobuf_remote *rnb);
+void dump_ioo(struct obd_ioobj *nb);
+void dump_obdo(struct obdo *oa);
+void dump_ost_body(struct ost_body *ob);
+void dump_rcs(__u32 *rc);
+
+#define IDX_INFO_MAGIC 0x3D37CC37
+
+/* Index file transfer through the network. The server serializes the index into
+ * a byte stream which is sent to the client via a bulk transfer */
+struct idx_info {
+ __u32 ii_magic;
+
+ /* reply: see idx_info_flags below */
+ __u32 ii_flags;
+
+ /* request & reply: number of lu_idxpage (to be) transferred */
+ __u16 ii_count;
+ __u16 ii_pad0;
+
+ /* request: requested attributes passed down to the iterator API */
+ __u32 ii_attrs;
+
+ /* request & reply: index file identifier (FID) */
+ struct lu_fid ii_fid;
+
+ /* reply: version of the index file before starting to walk the index.
+ * Please note that the version can be modified at any time during the
+ * transfer */
+ __u64 ii_version;
+
+ /* request: hash to start with:
+ * reply: hash of the first entry of the first lu_idxpage and hash
+ * of the entry to read next if any */
+ __u64 ii_hash_start;
+ __u64 ii_hash_end;
+
+ /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
+ * set */
+ __u16 ii_keysize;
+
+ /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
+ * is set */
+ __u16 ii_recsize;
+
+ __u32 ii_pad1;
+ __u64 ii_pad2;
+ __u64 ii_pad3;
+};
+extern void lustre_swab_idx_info(struct idx_info *ii);
+
+#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
+
+/* List of flags used in idx_info::ii_flags */
+enum idx_info_flags {
+ II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
+ II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
+ II_FL_VARREC = 1 << 2, /* records can be of variable size */
+ II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
+};
+
+#define LIP_MAGIC 0x8A6D6B6C
+
+/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
+struct lu_idxpage {
+ /* 16-byte header */
+ __u32 lip_magic;
+ __u16 lip_flags;
+ __u16 lip_nr; /* number of entries in the container */
+ __u64 lip_pad0; /* additional padding for future use */
+
+ /* key/record pairs are stored in the remaining 4080 bytes.
+ * depending upon the flags in idx_info::ii_flags, each key/record
+ * pair might be preceded by:
+ * - a hash value
+ * - the key size (II_FL_VARKEY is set)
+ * - the record size (II_FL_VARREC is set)
+ *
+ * For the time being, we only support fixed-size key & record. */
+ char lip_entries[0];
+};
+extern void lustre_swab_lip_header(struct lu_idxpage *lip);
+
+#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
+
+/* Gather all possible type associated with a 4KB container */
+union lu_page {
+ struct lu_dirpage lp_dir; /* for MDS_READPAGE */
+ struct lu_idxpage lp_idx; /* for OBD_IDX_READ */
+ char lp_array[LU_PAGE_SIZE];
+};
+
+/* security opcodes */
+typedef enum {
+ SEC_CTX_INIT = 801,
+ SEC_CTX_INIT_CONT = 802,
+ SEC_CTX_FINI = 803,
+ SEC_LAST_OPC,
+ SEC_FIRST_OPC = SEC_CTX_INIT
+} sec_cmd_t;
+
+/*
+ * capa related definitions
+ */
+#define CAPA_HMAC_MAX_LEN 64
+#define CAPA_HMAC_KEY_MAX_LEN 56
+
+/* NB take care when changing the sequence of elements this struct,
+ * because the offset info is used in find_capa() */
+struct lustre_capa {
+ struct lu_fid lc_fid; /** fid */
+ __u64 lc_opc; /** operations allowed */
+ __u64 lc_uid; /** file owner */
+ __u64 lc_gid; /** file group */
+ __u32 lc_flags; /** HMAC algorithm & flags */
+ __u32 lc_keyid; /** key# used for the capability */
+ __u32 lc_timeout; /** capa timeout value (sec) */
+ __u32 lc_expiry; /** expiry time (sec) */
+ __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
+} __attribute__((packed));
+
+extern void lustre_swab_lustre_capa(struct lustre_capa *c);
+
+/** lustre_capa::lc_opc */
+enum {
+ CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
+ CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
+ CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
+ CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
+ CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
+ CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
+ CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
+ CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
+ CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
+ CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
+ CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
+};
+
+#define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
+#define CAPA_OPC_MDS_ONLY \
+ (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
+ CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
+#define CAPA_OPC_OSS_ONLY \
+ (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
+ CAPA_OPC_OSS_DESTROY)
+#define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
+#define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
+
+/* MDS capability covers object capability for operations of body r/w
+ * (dir readpage/sendpage), index lookup/insert/delete and meta data r/w,
+ * while OSS capability only covers object capability for operations of
+ * oss data(file content) r/w/truncate.
+ */
+static inline int capa_for_mds(struct lustre_capa *c)
+{
+ return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) != 0;
+}
+
+static inline int capa_for_oss(struct lustre_capa *c)
+{
+ return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0;
+}
+
+/* lustre_capa::lc_hmac_alg */
+enum {
+ CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */
+ CAPA_HMAC_ALG_MAX,
+};
+
+#define CAPA_FL_MASK 0x00ffffff
+#define CAPA_HMAC_ALG_MASK 0xff000000
+
+struct lustre_capa_key {
+ __u64 lk_seq; /**< mds# */
+ __u32 lk_keyid; /**< key# */
+ __u32 lk_padding;
+ __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
+} __attribute__((packed));
+
+extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
+
+/** The link ea holds 1 \a link_ea_entry for each hardlink */
+#define LINK_EA_MAGIC 0x11EAF1DFUL
+struct link_ea_header {
+ __u32 leh_magic;
+ __u32 leh_reccount;
+ __u64 leh_len; /* total size */
+ /* future use */
+ __u32 padding1;
+ __u32 padding2;
+};
+
+/** Hardlink data is name and parent fid.
+ * Stored in this crazy struct for maximum packing and endian-neutrality
+ */
+struct link_ea_entry {
+ /** __u16 stored big-endian, unaligned */
+ unsigned char lee_reclen[2];
+ unsigned char lee_parent_fid[sizeof(struct lu_fid)];
+ char lee_name[0];
+}__attribute__((packed));
+
+/** fid2path request/reply structure */
+struct getinfo_fid2path {
+ struct lu_fid gf_fid;
+ __u64 gf_recno;
+ __u32 gf_linkno;
+ __u32 gf_pathlen;
+ char gf_path[0];
+} __attribute__((packed));
+
+void lustre_swab_fid2path (struct getinfo_fid2path *gf);
+
+enum {
+ LAYOUT_INTENT_ACCESS = 0,
+ LAYOUT_INTENT_READ = 1,
+ LAYOUT_INTENT_WRITE = 2,
+ LAYOUT_INTENT_GLIMPSE = 3,
+ LAYOUT_INTENT_TRUNC = 4,
+ LAYOUT_INTENT_RELEASE = 5,
+ LAYOUT_INTENT_RESTORE = 6
+};
+
+/* enqueue layout lock with intent */
+struct layout_intent {
+ __u32 li_opc; /* intent operation for enqueue, read, write etc */
+ __u32 li_flags;
+ __u64 li_start;
+ __u64 li_end;
+};
+
+void lustre_swab_layout_intent(struct layout_intent *li);
+
+/**
+ * On the wire version of hsm_progress structure.
+ *
+ * Contains the userspace hsm_progress and some internal fields.
+ */
+struct hsm_progress_kernel {
+ /* Field taken from struct hsm_progress */
+ lustre_fid hpk_fid;
+ __u64 hpk_cookie;
+ struct hsm_extent hpk_extent;
+ __u16 hpk_flags;
+ __u16 hpk_errval; /* positive val */
+ __u32 hpk_padding1;
+ /* Additional fields */
+ __u64 hpk_data_version;
+ __u64 hpk_padding2;
+} __attribute__((packed));
+
+extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
+extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
+extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
+extern void lustre_swab_hsm_request(struct hsm_request *hr);
+
+/**
+ * These are object update opcode under UPDATE_OBJ, which is currently
+ * being used by cross-ref operations between MDT.
+ *
+ * During the cross-ref operation, the Master MDT, which the client send the
+ * request to, will disassembly the operation into object updates, then OSP
+ * will send these updates to the remote MDT to be executed.
+ *
+ * Update request format
+ * magic: UPDATE_BUFFER_MAGIC_V1
+ * Count: How many updates in the req.
+ * bufs[0] : following are packets of object.
+ * update[0]:
+ * type: object_update_op, the op code of update
+ * fid: The object fid of the update.
+ * lens/bufs: other parameters of the update.
+ * update[1]:
+ * type: object_update_op, the op code of update
+ * fid: The object fid of the update.
+ * lens/bufs: other parameters of the update.
+ * ..........
+ * update[7]: type: object_update_op, the op code of update
+ * fid: The object fid of the update.
+ * lens/bufs: other parameters of the update.
+ * Current 8 maxim updates per object update request.
+ *
+ *******************************************************************
+ * update reply format:
+ *
+ * ur_version: UPDATE_REPLY_V1
+ * ur_count: The count of the reply, which is usually equal
+ * to the number of updates in the request.
+ * ur_lens: The reply lengths of each object update.
+ *
+ * replies: 1st update reply [4bytes_ret: other body]
+ * 2nd update reply [4bytes_ret: other body]
+ * .....
+ * nth update reply [4bytes_ret: other body]
+ *
+ * For each reply of the update, the format would be
+ * result(4 bytes):Other stuff
+ */
+
+#define UPDATE_MAX_OPS 10
+#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001
+#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1
+#define UPDATE_BUF_COUNT 8
+enum object_update_op {
+ OBJ_CREATE = 1,
+ OBJ_DESTROY = 2,
+ OBJ_REF_ADD = 3,
+ OBJ_REF_DEL = 4,
+ OBJ_ATTR_SET = 5,
+ OBJ_ATTR_GET = 6,
+ OBJ_XATTR_SET = 7,
+ OBJ_XATTR_GET = 8,
+ OBJ_INDEX_LOOKUP = 9,
+ OBJ_INDEX_INSERT = 10,
+ OBJ_INDEX_DELETE = 11,
+ OBJ_LAST
+};
+
+struct update {
+ __u32 u_type;
+ __u32 u_batchid;
+ struct lu_fid u_fid;
+ __u32 u_lens[UPDATE_BUF_COUNT];
+ __u32 u_bufs[0];
+};
+
+struct update_buf {
+ __u32 ub_magic;
+ __u32 ub_count;
+ __u32 ub_bufs[0];
+};
+
+#define UPDATE_REPLY_V1 0x00BD0001
+struct update_reply {
+ __u32 ur_version;
+ __u32 ur_count;
+ __u32 ur_lens[0];
+};
+
+void lustre_swab_update_buf(struct update_buf *ub);
+void lustre_swab_update_reply_buf(struct update_reply *ur);
+
+/** layout swap request structure
+ * fid1 and fid2 are in mdt_body
+ */
+struct mdc_swap_layouts {
+ __u64 msl_flags;
+} __packed;
+
+void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
+
+#endif
+/** @} lustreidl */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_lfsck_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_lfsck_user.h
new file mode 100644
index 0000000..1c87a61
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_lfsck_user.h
@@ -0,0 +1,95 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details. A copy is
+ * included in the COPYING file that accompanied this code.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * lustre/include/lustre/lustre_lfsck_user.h
+ *
+ * Lustre LFSCK userspace interfaces.
+ *
+ * Author: Fan Yong <yong.fan@whamcloud.com>
+ */
+
+#ifndef _LUSTRE_LFSCK_USER_H
+# define _LUSTRE_LFSCK_USER_H
+
+enum lfsck_param_flags {
+ /* Reset LFSCK iterator position to the device beginning. */
+ LPF_RESET = 0x0001,
+
+ /* Exit when fail. */
+ LPF_FAILOUT = 0x0002,
+
+ /* Dryrun mode, only check without modification */
+ LPF_DRYRUN = 0x0004,
+};
+
+enum lfsck_type {
+ /* For MDT-OST consistency check/repair. */
+ LT_LAYOUT = 0x0001,
+
+ /* For MDT-MDT consistency check/repair. */
+ LT_DNE = 0x0002,
+
+ /* For FID-in-dirent and linkEA consistency check/repair. */
+ LT_NAMESPACE = 0x0004,
+};
+
+#define LFSCK_VERSION_V1 1
+#define LFSCK_VERSION_V2 2
+
+#define LFSCK_TYPES_ALL ((__u16)(~0))
+#define LFSCK_TYPES_DEF ((__u16)0)
+#define LFSCK_TYPES_SUPPORTED LT_NAMESPACE
+
+#define LFSCK_SPEED_NO_LIMIT 0
+#define LFSCK_SPEED_LIMIT_DEF LFSCK_SPEED_NO_LIMIT
+
+enum lfsck_start_valid {
+ LSV_SPEED_LIMIT = 0x00000001,
+ LSV_ERROR_HANDLE = 0x00000002,
+ LSV_DRYRUN = 0x00000004,
+};
+
+/* Arguments for starting lfsck. */
+struct lfsck_start {
+ /* Which arguments are valid, see 'enum lfsck_start_valid'. */
+ __u32 ls_valid;
+
+ /* How many items can be scanned at most per second. */
+ __u32 ls_speed_limit;
+
+ /* For compatibility between user space tools and kernel service. */
+ __u16 ls_version;
+
+ /* Which LFSCK components to be (have been) started. */
+ __u16 ls_active;
+
+ /* Flags for the LFSCK, see 'enum lfsck_param_flags'. */
+ __u16 ls_flags;
+
+ /* For 64-bits aligned. */
+ __u16 ls_padding;
+};
+
+#endif /* _LUSTRE_LFSCK_USER_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
new file mode 100644
index 0000000..c7bd447
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -0,0 +1,1157 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre/lustre_user.h
+ *
+ * Lustre public user-space interface definitions.
+ */
+
+#ifndef _LUSTRE_USER_H
+#define _LUSTRE_USER_H
+
+/** \defgroup lustreuser lustreuser
+ *
+ * @{
+ */
+
+#include <lustre/ll_fiemap.h>
+#include <linux/lustre_user.h>
+
+/* for statfs() */
+#define LL_SUPER_MAGIC 0x0BD00BD0
+
+#ifndef FSFILT_IOC_GETFLAGS
+#define FSFILT_IOC_GETFLAGS _IOR('f', 1, long)
+#define FSFILT_IOC_SETFLAGS _IOW('f', 2, long)
+#define FSFILT_IOC_GETVERSION _IOR('f', 3, long)
+#define FSFILT_IOC_SETVERSION _IOW('f', 4, long)
+#define FSFILT_IOC_GETVERSION_OLD _IOR('v', 1, long)
+#define FSFILT_IOC_SETVERSION_OLD _IOW('v', 2, long)
+#define FSFILT_IOC_FIEMAP _IOWR('f', 11, struct ll_user_fiemap)
+#endif
+
+/* FIEMAP flags supported by Lustre */
+#define LUSTRE_FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_DEVICE_ORDER)
+
+enum obd_statfs_state {
+ OS_STATE_DEGRADED = 0x00000001, /**< RAID degraded/rebuilding */
+ OS_STATE_READONLY = 0x00000002, /**< filesystem is read-only */
+ OS_STATE_RDONLY_1 = 0x00000004, /**< obsolete 1.6, was EROFS=30 */
+ OS_STATE_RDONLY_2 = 0x00000008, /**< obsolete 1.6, was EROFS=30 */
+ OS_STATE_RDONLY_3 = 0x00000010, /**< obsolete 1.6, was EROFS=30 */
+};
+
+struct obd_statfs {
+ __u64 os_type;
+ __u64 os_blocks;
+ __u64 os_bfree;
+ __u64 os_bavail;
+ __u64 os_files;
+ __u64 os_ffree;
+ __u8 os_fsid[40];
+ __u32 os_bsize;
+ __u32 os_namelen;
+ __u64 os_maxbytes;
+ __u32 os_state; /**< obd_statfs_state OS_STATE_* flag */
+ __u32 os_fprecreated; /* objs available now to the caller */
+ /* used in QoS code to find preferred
+ * OSTs */
+ __u32 os_spare2;
+ __u32 os_spare3;
+ __u32 os_spare4;
+ __u32 os_spare5;
+ __u32 os_spare6;
+ __u32 os_spare7;
+ __u32 os_spare8;
+ __u32 os_spare9;
+};
+
+/**
+ * File IDentifier.
+ *
+ * FID is a cluster-wide unique identifier of a file or an object (stripe).
+ * FIDs are never reused.
+ **/
+struct lu_fid {
+ /**
+ * FID sequence. Sequence is a unit of migration: all files (objects)
+ * with FIDs from a given sequence are stored on the same server.
+ * Lustre should support 2^64 objects, so even if each sequence
+ * has only a single object we can still enumerate 2^64 objects.
+ **/
+ __u64 f_seq;
+ /* FID number within sequence. */
+ __u32 f_oid;
+ /**
+ * FID version, used to distinguish different versions (in the sense
+ * of snapshots, etc.) of the same file system object. Not currently
+ * used.
+ **/
+ __u32 f_ver;
+};
+
+struct filter_fid {
+ struct lu_fid ff_parent; /* ff_parent.f_ver == file stripe number */
+};
+
+/* keep this one for compatibility */
+struct filter_fid_old {
+ struct lu_fid ff_parent;
+ __u64 ff_objid;
+ __u64 ff_seq;
+};
+
+/* Userspace should treat lu_fid as opaque, and only use the following methods
+ * to print or parse them. Other functions (e.g. compare, swab) could be moved
+ * here from lustre_idl.h if needed. */
+typedef struct lu_fid lustre_fid;
+
+/**
+ * Following struct for object attributes, that will be kept inode's EA.
+ * Introduced in 2.0 release (please see b15993, for details)
+ * Added to all objects since Lustre 2.4 as contains self FID
+ */
+struct lustre_mdt_attrs {
+ /**
+ * Bitfield for supported data in this structure. From enum lma_compat.
+ * lma_self_fid and lma_flags are always available.
+ */
+ __u32 lma_compat;
+ /**
+ * Per-file incompat feature list. Lustre version should support all
+ * flags set in this field. The supported feature mask is available in
+ * LMA_INCOMPAT_SUPP.
+ */
+ __u32 lma_incompat;
+ /** FID of this inode */
+ struct lu_fid lma_self_fid;
+};
+
+/**
+ * Prior to 2.4, the LMA structure also included SOM attributes which has since
+ * been moved to a dedicated xattr
+ * lma_flags was also removed because of lma_compat/incompat fields.
+ */
+#define LMA_OLD_SIZE (sizeof(struct lustre_mdt_attrs) + 5 * sizeof(__u64))
+
+/**
+ * OST object IDentifier.
+ */
+struct ost_id {
+ union {
+ struct ostid {
+ __u64 oi_id;
+ __u64 oi_seq;
+ } oi;
+ struct lu_fid oi_fid;
+ };
+};
+
+#define DOSTID LPX64":"LPU64
+#define POSTID(oi) ostid_seq(oi), ostid_id(oi)
+
+/*
+ * The ioctl naming rules:
+ * LL_* - works on the currently opened filehandle instead of parent dir
+ * *_OBD_* - gets data for both OSC or MDC (LOV, LMV indirectly)
+ * *_MDC_* - gets/sets data related to MDC
+ * *_LOV_* - gets/sets data related to OSC/LOV
+ * *FILE* - called on parent dir and passes in a filename
+ * *STRIPE* - set/get lov_user_md
+ * *INFO - set/get lov_user_mds_data
+ */
+/* see <lustre_lib.h> for ioctl numberss 101-150 */
+#define LL_IOC_GETFLAGS _IOR ('f', 151, long)
+#define LL_IOC_SETFLAGS _IOW ('f', 152, long)
+#define LL_IOC_CLRFLAGS _IOW ('f', 153, long)
+/* LL_IOC_LOV_SETSTRIPE: See also OBD_IOC_LOV_SETSTRIPE */
+#define LL_IOC_LOV_SETSTRIPE _IOW ('f', 154, long)
+/* LL_IOC_LOV_GETSTRIPE: See also OBD_IOC_LOV_GETSTRIPE */
+#define LL_IOC_LOV_GETSTRIPE _IOW ('f', 155, long)
+/* LL_IOC_LOV_SETEA: See also OBD_IOC_LOV_SETEA */
+#define LL_IOC_LOV_SETEA _IOW ('f', 156, long)
+#define LL_IOC_RECREATE_OBJ _IOW ('f', 157, long)
+#define LL_IOC_RECREATE_FID _IOW ('f', 157, struct lu_fid)
+#define LL_IOC_GROUP_LOCK _IOW ('f', 158, long)
+#define LL_IOC_GROUP_UNLOCK _IOW ('f', 159, long)
+/* LL_IOC_QUOTACHECK: See also OBD_IOC_QUOTACHECK */
+#define LL_IOC_QUOTACHECK _IOW ('f', 160, int)
+/* LL_IOC_POLL_QUOTACHECK: See also OBD_IOC_POLL_QUOTACHECK */
+#define LL_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
+/* LL_IOC_QUOTACTL: See also OBD_IOC_QUOTACTL */
+#define LL_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
+#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
+#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
+#define LL_IOC_FLUSHCTX _IOW ('f', 166, long)
+#define LL_IOC_RMTACL _IOW ('f', 167, long)
+#define LL_IOC_GETOBDCOUNT _IOR ('f', 168, long)
+#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long)
+#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long)
+#define LL_IOC_LLOOP_INFO _IOWR('f', 171, struct lu_fid)
+#define LL_IOC_LLOOP_DETACH_BYDEV _IOWR('f', 172, long)
+#define LL_IOC_PATH2FID _IOR ('f', 173, long)
+#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *)
+#define LL_IOC_GET_MDTIDX _IOR ('f', 175, int)
+
+/* see <lustre_lib.h> for ioctl numbers 177-210 */
+
+#define LL_IOC_HSM_STATE_GET _IOR('f', 211, struct hsm_user_state)
+#define LL_IOC_HSM_STATE_SET _IOW('f', 212, struct hsm_state_set)
+#define LL_IOC_HSM_CT_START _IOW('f', 213, struct lustre_kernelcomm)
+#define LL_IOC_HSM_COPY_START _IOW('f', 214, struct hsm_copy *)
+#define LL_IOC_HSM_COPY_END _IOW('f', 215, struct hsm_copy *)
+#define LL_IOC_HSM_PROGRESS _IOW('f', 216, struct hsm_user_request)
+#define LL_IOC_HSM_REQUEST _IOW('f', 217, struct hsm_user_request)
+#define LL_IOC_DATA_VERSION _IOR('f', 218, struct ioc_data_version)
+#define LL_IOC_LOV_SWAP_LAYOUTS _IOW('f', 219, \
+ struct lustre_swap_layouts)
+#define LL_IOC_HSM_ACTION _IOR('f', 220, \
+ struct hsm_current_action)
+/* see <lustre_lib.h> for ioctl numbers 221-232 */
+
+#define LL_IOC_LMV_SETSTRIPE _IOWR('f', 240, struct lmv_user_md)
+#define LL_IOC_LMV_GETSTRIPE _IOWR('f', 241, struct lmv_user_md)
+#define LL_IOC_REMOVE_ENTRY _IOWR('f', 242, __u64)
+
+#define LL_STATFS_LMV 1
+#define LL_STATFS_LOV 2
+#define LL_STATFS_NODELAY 4
+
+#define IOC_MDC_TYPE 'i'
+#define IOC_MDC_LOOKUP _IOWR(IOC_MDC_TYPE, 20, struct obd_device *)
+#define IOC_MDC_GETFILESTRIPE _IOWR(IOC_MDC_TYPE, 21, struct lov_user_md *)
+#define IOC_MDC_GETFILEINFO _IOWR(IOC_MDC_TYPE, 22, struct lov_user_mds_data *)
+#define LL_IOC_MDC_GETINFO _IOWR(IOC_MDC_TYPE, 23, struct lov_user_mds_data *)
+
+/* Keep these for backward compartability. */
+#define LL_IOC_OBD_STATFS IOC_OBD_STATFS
+#define IOC_MDC_GETSTRIPE IOC_MDC_GETFILESTRIPE
+
+
+#define MAX_OBD_NAME 128 /* If this changes, a NEW ioctl must be added */
+
+/* Hopefully O_LOV_DELAY_CREATE does not conflict with standard O_xxx flags.
+ * Previously it was defined as 0100000000 and conflicts with FMODE_NONOTIFY
+ * which was added since kernel 2.6.36, so we redefine it as 020000000.
+ * To be compatible with old version's statically linked binary, finally we
+ * define it as (020000000 | 0100000000).
+ * */
+#define O_LOV_DELAY_CREATE 0120000000
+
+#define LL_FILE_IGNORE_LOCK 0x00000001
+#define LL_FILE_GROUP_LOCKED 0x00000002
+#define LL_FILE_READAHEA 0x00000004
+#define LL_FILE_LOCKED_DIRECTIO 0x00000008 /* client-side locks with dio */
+#define LL_FILE_LOCKLESS_IO 0x00000010 /* server-side locks with cio */
+#define LL_FILE_RMTACL 0x00000020
+
+#define LOV_USER_MAGIC_V1 0x0BD10BD0
+#define LOV_USER_MAGIC LOV_USER_MAGIC_V1
+#define LOV_USER_MAGIC_JOIN_V1 0x0BD20BD0
+#define LOV_USER_MAGIC_V3 0x0BD30BD0
+
+#define LMV_MAGIC_V1 0x0CD10CD0 /*normal stripe lmv magic */
+#define LMV_USER_MAGIC 0x0CD20CD0 /*default lmv magic*/
+
+#define LOV_PATTERN_RAID0 0x001
+#define LOV_PATTERN_RAID1 0x002
+#define LOV_PATTERN_FIRST 0x100
+
+#define LOV_MAXPOOLNAME 16
+#define LOV_POOLNAMEF "%.16s"
+
+#define LOV_MIN_STRIPE_BITS 16 /* maximum PAGE_SIZE (ia64), power of 2 */
+#define LOV_MIN_STRIPE_SIZE (1 << LOV_MIN_STRIPE_BITS)
+#define LOV_MAX_STRIPE_COUNT_OLD 160
+/* This calculation is crafted so that input of 4096 will result in 160
+ * which in turn is equal to old maximal stripe count.
+ * XXX: In fact this is too simpified for now, what it also need is to get
+ * ea_type argument to clearly know how much space each stripe consumes.
+ *
+ * The limit of 12 pages is somewhat arbitrary, but is a reasonably large
+ * allocation that is sufficient for the current generation of systems.
+ *
+ * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) */
+#define LOV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */
+#define LOV_ALL_STRIPES 0xffff /* only valid for directories */
+#define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */
+
+#define lov_user_ost_data lov_user_ost_data_v1
+struct lov_user_ost_data_v1 { /* per-stripe data structure */
+ struct ost_id l_ost_oi; /* OST object ID */
+ __u32 l_ost_gen; /* generation of this OST index */
+ __u32 l_ost_idx; /* OST index in LOV */
+} __attribute__((packed));
+
+#define lov_user_md lov_user_md_v1
+struct lov_user_md_v1 { /* LOV EA user data (host-endian) */
+ __u32 lmm_magic; /* magic number = LOV_USER_MAGIC_V1 */
+ __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
+ struct ost_id lmm_oi; /* LOV object ID */
+ __u32 lmm_stripe_size; /* size of stripe in bytes */
+ __u16 lmm_stripe_count; /* num stripes in use for this object */
+ union {
+ __u16 lmm_stripe_offset; /* starting stripe offset in
+ * lmm_objects, use when writing */
+ __u16 lmm_layout_gen; /* layout generation number
+ * used when reading */
+ };
+ struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
+} __attribute__((packed, __may_alias__));
+
+struct lov_user_md_v3 { /* LOV EA user data (host-endian) */
+ __u32 lmm_magic; /* magic number = LOV_USER_MAGIC_V3 */
+ __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
+ struct ost_id lmm_oi; /* LOV object ID */
+ __u32 lmm_stripe_size; /* size of stripe in bytes */
+ __u16 lmm_stripe_count; /* num stripes in use for this object */
+ union {
+ __u16 lmm_stripe_offset; /* starting stripe offset in
+ * lmm_objects, use when writing */
+ __u16 lmm_layout_gen; /* layout generation number
+ * used when reading */
+ };
+ char lmm_pool_name[LOV_MAXPOOLNAME]; /* pool name */
+ struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
+} __attribute__((packed));
+
+static inline __u32 lov_user_md_size(__u16 stripes, __u32 lmm_magic)
+{
+ if (lmm_magic == LOV_USER_MAGIC_V3)
+ return sizeof(struct lov_user_md_v3) +
+ stripes * sizeof(struct lov_user_ost_data_v1);
+ else
+ return sizeof(struct lov_user_md_v1) +
+ stripes * sizeof(struct lov_user_ost_data_v1);
+}
+
+/* Compile with -D_LARGEFILE64_SOURCE or -D_GNU_SOURCE (or #define) to
+ * use this. It is unsafe to #define those values in this header as it
+ * is possible the application has already #included <sys/stat.h>. */
+#ifdef HAVE_LOV_USER_MDS_DATA
+#define lov_user_mds_data lov_user_mds_data_v1
+struct lov_user_mds_data_v1 {
+ lstat_t lmd_st; /* MDS stat struct */
+ struct lov_user_md_v1 lmd_lmm; /* LOV EA V1 user data */
+} __attribute__((packed));
+
+struct lov_user_mds_data_v3 {
+ lstat_t lmd_st; /* MDS stat struct */
+ struct lov_user_md_v3 lmd_lmm; /* LOV EA V3 user data */
+} __attribute__((packed));
+#endif
+
+/* keep this to be the same size as lov_user_ost_data_v1 */
+struct lmv_user_mds_data {
+ struct lu_fid lum_fid;
+ __u32 lum_padding;
+ __u32 lum_mds;
+};
+
+/* lum_type */
+enum {
+ LMV_STRIPE_TYPE = 0,
+ LMV_DEFAULT_TYPE = 1,
+};
+
+#define lmv_user_md lmv_user_md_v1
+struct lmv_user_md_v1 {
+ __u32 lum_magic; /* must be the first field */
+ __u32 lum_stripe_count; /* dirstripe count */
+ __u32 lum_stripe_offset; /* MDT idx for default dirstripe */
+ __u32 lum_hash_type; /* Dir stripe policy */
+ __u32 lum_type; /* LMV type: default or normal */
+ __u32 lum_padding1;
+ __u32 lum_padding2;
+ __u32 lum_padding3;
+ char lum_pool_name[LOV_MAXPOOLNAME];
+ struct lmv_user_mds_data lum_objects[0];
+};
+
+static inline int lmv_user_md_size(int stripes, int lmm_magic)
+{
+ return sizeof(struct lmv_user_md) +
+ stripes * sizeof(struct lmv_user_mds_data);
+}
+
+extern void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
+
+struct ll_recreate_obj {
+ __u64 lrc_id;
+ __u32 lrc_ost_idx;
+};
+
+struct ll_fid {
+ __u64 id; /* holds object id */
+ __u32 generation; /* holds object generation */
+ __u32 f_type; /* holds object type or stripe idx when passing it to
+ * OST for saving into EA. */
+};
+
+#define UUID_MAX 40
+struct obd_uuid {
+ char uuid[UUID_MAX];
+};
+
+static inline int obd_uuid_equals(const struct obd_uuid *u1,
+ const struct obd_uuid *u2)
+{
+ return strcmp((char *)u1->uuid, (char *)u2->uuid) == 0;
+}
+
+static inline int obd_uuid_empty(struct obd_uuid *uuid)
+{
+ return uuid->uuid[0] == '\0';
+}
+
+static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp)
+{
+ strncpy((char *)uuid->uuid, tmp, sizeof(*uuid));
+ uuid->uuid[sizeof(*uuid) - 1] = '\0';
+}
+
+/* For printf's only, make sure uuid is terminated */
+static inline char *obd_uuid2str(struct obd_uuid *uuid)
+{
+ if (uuid->uuid[sizeof(*uuid) - 1] != '\0') {
+ /* Obviously not safe, but for printfs, no real harm done...
+ we're always null-terminated, even in a race. */
+ static char temp[sizeof(*uuid)];
+ memcpy(temp, uuid->uuid, sizeof(*uuid) - 1);
+ temp[sizeof(*uuid) - 1] = '\0';
+ return temp;
+ }
+ return (char *)(uuid->uuid);
+}
+
+/* Extract fsname from uuid (or target name) of a target
+ e.g. (myfs-OST0007_UUID -> myfs)
+ see also deuuidify. */
+static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
+{
+ char *p;
+
+ strncpy(buf, uuid, buflen - 1);
+ buf[buflen - 1] = '\0';
+ p = strrchr(buf, '-');
+ if (p)
+ *p = '\0';
+}
+
+/* printf display format
+ e.g. printf("file FID is "DFID"\n", PFID(fid)); */
+#define FID_NOBRACE_LEN 40
+#define FID_LEN (FID_NOBRACE_LEN + 2)
+#define DFID_NOBRACE LPX64":0x%x:0x%x"
+#define DFID "["DFID_NOBRACE"]"
+#define PFID(fid) \
+ (fid)->f_seq, \
+ (fid)->f_oid, \
+ (fid)->f_ver
+
+/* scanf input parse format -- strip '[' first.
+ e.g. sscanf(fidstr, SFID, RFID(&fid)); */
+/* #define SFID "0x"LPX64i":0x"LPSZX":0x"LPSZX""
+liblustreapi.c:2893: warning: format '%lx' expects type 'long unsigned int *', but argument 4 has type 'unsigned int *'
+liblustreapi.c:2893: warning: format '%lx' expects type 'long unsigned int *', but argument 5 has type 'unsigned int *'
+*/
+#define SFID "0x"LPX64i":0x%x:0x%x"
+#define RFID(fid) \
+ &((fid)->f_seq), \
+ &((fid)->f_oid), \
+ &((fid)->f_ver)
+
+
+/********* Quotas **********/
+
+/* these must be explicitly translated into linux Q_* in ll_dir_ioctl */
+#define LUSTRE_Q_QUOTAON 0x800002 /* turn quotas on */
+#define LUSTRE_Q_QUOTAOFF 0x800003 /* turn quotas off */
+#define LUSTRE_Q_GETINFO 0x800005 /* get information about quota files */
+#define LUSTRE_Q_SETINFO 0x800006 /* set information about quota files */
+#define LUSTRE_Q_GETQUOTA 0x800007 /* get user quota structure */
+#define LUSTRE_Q_SETQUOTA 0x800008 /* set user quota structure */
+/* lustre-specific control commands */
+#define LUSTRE_Q_INVALIDATE 0x80000b /* invalidate quota data */
+#define LUSTRE_Q_FINVALIDATE 0x80000c /* invalidate filter quota data */
+
+#define UGQUOTA 2 /* set both USRQUOTA and GRPQUOTA */
+
+struct if_quotacheck {
+ char obd_type[16];
+ struct obd_uuid obd_uuid;
+};
+
+#define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629
+
+/* permission */
+#define N_PERMS_MAX 64
+
+struct perm_downcall_data {
+ __u64 pdd_nid;
+ __u32 pdd_perm;
+ __u32 pdd_padding;
+};
+
+struct identity_downcall_data {
+ __u32 idd_magic;
+ __u32 idd_err;
+ __u32 idd_uid;
+ __u32 idd_gid;
+ __u32 idd_nperms;
+ __u32 idd_ngroups;
+ struct perm_downcall_data idd_perms[N_PERMS_MAX];
+ __u32 idd_groups[0];
+};
+
+/* for non-mapped uid/gid */
+#define NOBODY_UID 99
+#define NOBODY_GID 99
+
+#define INVALID_ID (-1)
+
+enum {
+ RMT_LSETFACL = 1,
+ RMT_LGETFACL = 2,
+ RMT_RSETFACL = 3,
+ RMT_RGETFACL = 4
+};
+
+#ifdef NEED_QUOTA_DEFS
+#ifndef QIF_BLIMITS
+#define QIF_BLIMITS 1
+#define QIF_SPACE 2
+#define QIF_ILIMITS 4
+#define QIF_INODES 8
+#define QIF_BTIME 16
+#define QIF_ITIME 32
+#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS)
+#define QIF_USAGE (QIF_SPACE | QIF_INODES)
+#define QIF_TIMES (QIF_BTIME | QIF_ITIME)
+#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES)
+#endif
+
+#endif /* !__KERNEL__ */
+
+/* lustre volatile file support
+ * file name header: .^L^S^T^R:volatile"
+ */
+#define LUSTRE_VOLATILE_HDR ".\x0c\x13\x14\x12:VOLATILE"
+#define LUSTRE_VOLATILE_HDR_LEN 14
+/* hdr + MDT index */
+#define LUSTRE_VOLATILE_IDX LUSTRE_VOLATILE_HDR":%.4X:"
+
+typedef enum lustre_quota_version {
+ LUSTRE_QUOTA_V2 = 1
+} lustre_quota_version_t;
+
+/* XXX: same as if_dqinfo struct in kernel */
+struct obd_dqinfo {
+ __u64 dqi_bgrace;
+ __u64 dqi_igrace;
+ __u32 dqi_flags;
+ __u32 dqi_valid;
+};
+
+/* XXX: same as if_dqblk struct in kernel, plus one padding */
+struct obd_dqblk {
+ __u64 dqb_bhardlimit;
+ __u64 dqb_bsoftlimit;
+ __u64 dqb_curspace;
+ __u64 dqb_ihardlimit;
+ __u64 dqb_isoftlimit;
+ __u64 dqb_curinodes;
+ __u64 dqb_btime;
+ __u64 dqb_itime;
+ __u32 dqb_valid;
+ __u32 dqb_padding;
+};
+
+enum {
+ QC_GENERAL = 0,
+ QC_MDTIDX = 1,
+ QC_OSTIDX = 2,
+ QC_UUID = 3
+};
+
+struct if_quotactl {
+ __u32 qc_cmd;
+ __u32 qc_type;
+ __u32 qc_id;
+ __u32 qc_stat;
+ __u32 qc_valid;
+ __u32 qc_idx;
+ struct obd_dqinfo qc_dqinfo;
+ struct obd_dqblk qc_dqblk;
+ char obd_type[16];
+ struct obd_uuid obd_uuid;
+};
+
+/* swap layout flags */
+#define SWAP_LAYOUTS_CHECK_DV1 (1 << 0)
+#define SWAP_LAYOUTS_CHECK_DV2 (1 << 1)
+#define SWAP_LAYOUTS_KEEP_MTIME (1 << 2)
+#define SWAP_LAYOUTS_KEEP_ATIME (1 << 3)
+struct lustre_swap_layouts {
+ __u64 sl_flags;
+ __u32 sl_fd;
+ __u32 sl_gid;
+ __u64 sl_dv1;
+ __u64 sl_dv2;
+};
+
+
+/********* Changelogs **********/
+/** Changelog record types */
+enum changelog_rec_type {
+ CL_MARK = 0,
+ CL_CREATE = 1, /* namespace */
+ CL_MKDIR = 2, /* namespace */
+ CL_HARDLINK = 3, /* namespace */
+ CL_SOFTLINK = 4, /* namespace */
+ CL_MKNOD = 5, /* namespace */
+ CL_UNLINK = 6, /* namespace */
+ CL_RMDIR = 7, /* namespace */
+ CL_RENAME = 8, /* namespace */
+ CL_EXT = 9, /* namespace extended record (2nd half of rename) */
+ CL_OPEN = 10, /* not currently used */
+ CL_CLOSE = 11, /* may be written to log only with mtime change */
+ CL_LAYOUT = 12, /* file layout/striping modified */
+ CL_TRUNC = 13,
+ CL_SETATTR = 14,
+ CL_XATTR = 15,
+ CL_HSM = 16, /* HSM specific events, see flags */
+ CL_MTIME = 17, /* Precedence: setattr > mtime > ctime > atime */
+ CL_CTIME = 18,
+ CL_ATIME = 19,
+ CL_LAST
+};
+
+static inline const char *changelog_type2str(int type) {
+ static const char *changelog_str[] = {
+ "MARK", "CREAT", "MKDIR", "HLINK", "SLINK", "MKNOD", "UNLNK",
+ "RMDIR", "RENME", "RNMTO", "OPEN", "CLOSE", "LYOUT", "TRUNC",
+ "SATTR", "XATTR", "HSM", "MTIME", "CTIME", "ATIME",
+ };
+
+ if (type >= 0 && type < CL_LAST)
+ return changelog_str[type];
+ return NULL;
+}
+
+/* per-record flags */
+#define CLF_VERSION 0x1000
+#define CLF_EXT_VERSION 0x2000
+#define CLF_FLAGSHIFT 12
+#define CLF_FLAGMASK ((1U << CLF_FLAGSHIFT) - 1)
+#define CLF_VERMASK (~CLF_FLAGMASK)
+/* Anything under the flagmask may be per-type (if desired) */
+/* Flags for unlink */
+#define CLF_UNLINK_LAST 0x0001 /* Unlink of last hardlink */
+#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */
+ /* HSM cleaning needed */
+/* Flags for rename */
+#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of target */
+
+/* Flags for HSM */
+/* 12b used (from high weight to low weight):
+ * 2b for flags
+ * 3b for event
+ * 7b for error code
+ */
+#define CLF_HSM_ERR_L 0 /* HSM return code, 7 bits */
+#define CLF_HSM_ERR_H 6
+#define CLF_HSM_EVENT_L 7 /* HSM event, 3 bits, see enum hsm_event */
+#define CLF_HSM_EVENT_H 9
+#define CLF_HSM_FLAG_L 10 /* HSM flags, 2 bits, 1 used, 1 spare */
+#define CLF_HSM_FLAG_H 11
+#define CLF_HSM_SPARE_L 12 /* 4 spare bits */
+#define CLF_HSM_SPARE_H 15
+#define CLF_HSM_LAST 15
+
+/* Remove bits higher than _h, then extract the value
+ * between _h and _l by shifting lower weigth to bit 0. */
+#define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \
+ >> (CLF_HSM_LAST - _h + _l))
+
+#define CLF_HSM_SUCCESS 0x00
+#define CLF_HSM_MAXERROR 0x7E
+#define CLF_HSM_ERROVERFLOW 0x7F
+
+#define CLF_HSM_DIRTY 1 /* file is dirty after HSM request end */
+
+/* 3 bits field => 8 values allowed */
+enum hsm_event {
+ HE_ARCHIVE = 0,
+ HE_RESTORE = 1,
+ HE_CANCEL = 2,
+ HE_RELEASE = 3,
+ HE_REMOVE = 4,
+ HE_STATE = 5,
+ HE_SPARE1 = 6,
+ HE_SPARE2 = 7,
+};
+
+static inline enum hsm_event hsm_get_cl_event(__u16 flags)
+{
+ return CLF_GET_BITS(flags, CLF_HSM_EVENT_H, CLF_HSM_EVENT_L);
+}
+
+static inline void hsm_set_cl_event(int *flags, enum hsm_event he)
+{
+ *flags |= (he << CLF_HSM_EVENT_L);
+}
+
+static inline __u16 hsm_get_cl_flags(int flags)
+{
+ return CLF_GET_BITS(flags, CLF_HSM_FLAG_H, CLF_HSM_FLAG_L);
+}
+
+static inline void hsm_set_cl_flags(int *flags, int bits)
+{
+ *flags |= (bits << CLF_HSM_FLAG_L);
+}
+
+static inline int hsm_get_cl_error(int flags)
+{
+ return CLF_GET_BITS(flags, CLF_HSM_ERR_H, CLF_HSM_ERR_L);
+}
+
+static inline void hsm_set_cl_error(int *flags, int error)
+{
+ *flags |= (error << CLF_HSM_ERR_L);
+}
+
+#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec))
+
+struct changelog_rec {
+ __u16 cr_namelen;
+ __u16 cr_flags; /**< (flags&CLF_FLAGMASK)|CLF_VERSION */
+ __u32 cr_type; /**< \a changelog_rec_type */
+ __u64 cr_index; /**< changelog record number */
+ __u64 cr_prev; /**< last index for this target fid */
+ __u64 cr_time;
+ union {
+ lustre_fid cr_tfid; /**< target fid */
+ __u32 cr_markerflags; /**< CL_MARK flags */
+ };
+ lustre_fid cr_pfid; /**< parent fid */
+ char cr_name[0]; /**< last element */
+} __attribute__((packed));
+
+/* changelog_ext_rec is 2*sizeof(lu_fid) bigger than changelog_rec, to save
+ * space, only rename uses changelog_ext_rec, while others use changelog_rec to
+ * store records.
+ */
+struct changelog_ext_rec {
+ __u16 cr_namelen;
+ __u16 cr_flags; /**< (flags & CLF_FLAGMASK) |
+ CLF_EXT_VERSION */
+ __u32 cr_type; /**< \a changelog_rec_type */
+ __u64 cr_index; /**< changelog record number */
+ __u64 cr_prev; /**< last index for this target fid */
+ __u64 cr_time;
+ union {
+ lustre_fid cr_tfid; /**< target fid */
+ __u32 cr_markerflags; /**< CL_MARK flags */
+ };
+ lustre_fid cr_pfid; /**< target parent fid */
+ lustre_fid cr_sfid; /**< source fid, or zero */
+ lustre_fid cr_spfid; /**< source parent fid, or zero */
+ char cr_name[0]; /**< last element */
+} __attribute__((packed));
+
+#define CHANGELOG_REC_EXTENDED(rec) \
+ (((rec)->cr_flags & CLF_VERMASK) == CLF_EXT_VERSION)
+
+static inline int changelog_rec_size(struct changelog_rec *rec)
+{
+ return CHANGELOG_REC_EXTENDED(rec) ? sizeof(struct changelog_ext_rec):
+ sizeof(*rec);
+}
+
+static inline char *changelog_rec_name(struct changelog_rec *rec)
+{
+ return CHANGELOG_REC_EXTENDED(rec) ?
+ ((struct changelog_ext_rec *)rec)->cr_name: rec->cr_name;
+}
+
+static inline int changelog_rec_snamelen(struct changelog_ext_rec *rec)
+{
+ return rec->cr_namelen - strlen(rec->cr_name) - 1;
+}
+
+static inline char *changelog_rec_sname(struct changelog_ext_rec *rec)
+{
+ return rec->cr_name + strlen(rec->cr_name) + 1;
+}
+
+struct ioc_changelog {
+ __u64 icc_recno;
+ __u32 icc_mdtindex;
+ __u32 icc_id;
+ __u32 icc_flags;
+};
+
+enum changelog_message_type {
+ CL_RECORD = 10, /* message is a changelog_rec */
+ CL_EOF = 11, /* at end of current changelog */
+};
+
+/********* Misc **********/
+
+struct ioc_data_version {
+ __u64 idv_version;
+ __u64 idv_flags; /* See LL_DV_xxx */
+};
+#define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling
+ version. Dirty caches are left unchanged. */
+
+#ifndef offsetof
+# define offsetof(typ,memb) ((unsigned long)((char *)&(((typ *)0)->memb)))
+#endif
+
+#define dot_lustre_name ".lustre"
+
+
+/********* HSM **********/
+
+/** HSM per-file state
+ * See HSM_FLAGS below.
+ */
+enum hsm_states {
+ HS_EXISTS = 0x00000001,
+ HS_DIRTY = 0x00000002,
+ HS_RELEASED = 0x00000004,
+ HS_ARCHIVED = 0x00000008,
+ HS_NORELEASE = 0x00000010,
+ HS_NOARCHIVE = 0x00000020,
+ HS_LOST = 0x00000040,
+};
+
+/* HSM user-setable flags. */
+#define HSM_USER_MASK (HS_NORELEASE | HS_NOARCHIVE | HS_DIRTY)
+
+/* Other HSM flags. */
+#define HSM_STATUS_MASK (HS_EXISTS | HS_LOST | HS_RELEASED | HS_ARCHIVED)
+
+/*
+ * All HSM-related possible flags that could be applied to a file.
+ * This should be kept in sync with hsm_states.
+ */
+#define HSM_FLAGS_MASK (HSM_USER_MASK | HSM_STATUS_MASK)
+
+/**
+ * HSM request progress state
+ */
+enum hsm_progress_states {
+ HPS_WAITING = 1,
+ HPS_RUNNING = 2,
+ HPS_DONE = 3,
+};
+#define HPS_NONE 0
+
+static inline char *hsm_progress_state2name(enum hsm_progress_states s)
+{
+ switch (s) {
+ case HPS_WAITING: return "waiting";
+ case HPS_RUNNING: return "running";
+ case HPS_DONE: return "done";
+ default: return "unknown";
+ }
+}
+
+struct hsm_extent {
+ __u64 offset;
+ __u64 length;
+} __attribute__((packed));
+
+/**
+ * Current HSM states of a Lustre file.
+ *
+ * This structure purpose is to be sent to user-space mainly. It describes the
+ * current HSM flags and in-progress action.
+ */
+struct hsm_user_state {
+ /** Current HSM states, from enum hsm_states. */
+ __u32 hus_states;
+ __u32 hus_archive_id;
+ /** The current undergoing action, if there is one */
+ __u32 hus_in_progress_state;
+ __u32 hus_in_progress_action;
+ struct hsm_extent hus_in_progress_location;
+ char hus_extended_info[];
+};
+
+struct hsm_state_set_ioc {
+ struct lu_fid hssi_fid;
+ __u64 hssi_setmask;
+ __u64 hssi_clearmask;
+};
+
+/*
+ * This structure describes the current in-progress action for a file.
+ * it is retuned to user space and send over the wire
+ */
+struct hsm_current_action {
+ /** The current undergoing action, if there is one */
+ /* state is one of hsm_progress_states */
+ __u32 hca_state;
+ /* action is one of hsm_user_action */
+ __u32 hca_action;
+ struct hsm_extent hca_location;
+};
+
+/***** HSM user requests ******/
+/* User-generated (lfs/ioctl) request types */
+enum hsm_user_action {
+ HUA_NONE = 1, /* no action (noop) */
+ HUA_ARCHIVE = 10, /* copy to hsm */
+ HUA_RESTORE = 11, /* prestage */
+ HUA_RELEASE = 12, /* drop ost objects */
+ HUA_REMOVE = 13, /* remove from archive */
+ HUA_CANCEL = 14 /* cancel a request */
+};
+
+static inline char *hsm_user_action2name(enum hsm_user_action a)
+{
+ switch (a) {
+ case HUA_NONE: return "NOOP";
+ case HUA_ARCHIVE: return "ARCHIVE";
+ case HUA_RESTORE: return "RESTORE";
+ case HUA_RELEASE: return "RELEASE";
+ case HUA_REMOVE: return "REMOVE";
+ case HUA_CANCEL: return "CANCEL";
+ default: return "UNKNOWN";
+ }
+}
+
+/*
+ * List of hr_flags (bit field)
+ */
+#define HSM_FORCE_ACTION 0x0001
+/* used by CT, connot be set by user */
+#define HSM_GHOST_COPY 0x0002
+
+/**
+ * Contains all the fixed part of struct hsm_user_request.
+ *
+ */
+struct hsm_request {
+ __u32 hr_action; /* enum hsm_user_action */
+ __u32 hr_archive_id; /* archive id, used only with HUA_ARCHIVE */
+ __u64 hr_flags; /* request flags */
+ __u32 hr_itemcount; /* item count in hur_user_item vector */
+ __u32 hr_data_len;
+};
+
+struct hsm_user_item {
+ lustre_fid hui_fid;
+ struct hsm_extent hui_extent;
+} __attribute__((packed));
+
+struct hsm_user_request {
+ struct hsm_request hur_request;
+ struct hsm_user_item hur_user_item[0];
+ /* extra data blob at end of struct (after all
+ * hur_user_items), only use helpers to access it
+ */
+} __attribute__((packed));
+
+/** Return pointer to data field in a hsm user request */
+static inline void *hur_data(struct hsm_user_request *hur)
+{
+ return &(hur->hur_user_item[hur->hur_request.hr_itemcount]);
+}
+
+/** Compute the current length of the provided hsm_user_request. */
+static inline int hur_len(struct hsm_user_request *hur)
+{
+ return offsetof(struct hsm_user_request,
+ hur_user_item[hur->hur_request.hr_itemcount]) +
+ hur->hur_request.hr_data_len;
+}
+
+/****** HSM RPCs to copytool *****/
+/* Message types the copytool may receive */
+enum hsm_message_type {
+ HMT_ACTION_LIST = 100, /* message is a hsm_action_list */
+};
+
+/* Actions the copytool may be instructed to take for a given action_item */
+enum hsm_copytool_action {
+ HSMA_NONE = 10, /* no action */
+ HSMA_ARCHIVE = 20, /* arbitrary offset */
+ HSMA_RESTORE = 21,
+ HSMA_REMOVE = 22,
+ HSMA_CANCEL = 23
+};
+
+static inline char *hsm_copytool_action2name(enum hsm_copytool_action a)
+{
+ switch (a) {
+ case HSMA_NONE: return "NOOP";
+ case HSMA_ARCHIVE: return "ARCHIVE";
+ case HSMA_RESTORE: return "RESTORE";
+ case HSMA_REMOVE: return "REMOVE";
+ case HSMA_CANCEL: return "CANCEL";
+ default: return "UNKNOWN";
+ }
+}
+
+/* Copytool item action description */
+struct hsm_action_item {
+ __u32 hai_len; /* valid size of this struct */
+ __u32 hai_action; /* hsm_copytool_action, but use known size */
+ lustre_fid hai_fid; /* Lustre FID to operated on */
+ lustre_fid hai_dfid; /* fid used for data access */
+ struct hsm_extent hai_extent; /* byte range to operate on */
+ __u64 hai_cookie; /* action cookie from coordinator */
+ __u64 hai_gid; /* grouplock id */
+ char hai_data[0]; /* variable length */
+} __attribute__((packed));
+
+/*
+ * helper function which print in hexa the first bytes of
+ * hai opaque field
+ * \param hai [IN] record to print
+ * \param buffer [OUT] output buffer
+ * \param len [IN] max buffer len
+ * \retval buffer
+ */
+static inline char *hai_dump_data_field(struct hsm_action_item *hai,
+ char *buffer, int len)
+{
+ int i, sz, data_len;
+ char *ptr;
+
+ ptr = buffer;
+ sz = len;
+ data_len = hai->hai_len - sizeof(*hai);
+ for (i = 0 ; (i < data_len) && (sz > 0) ; i++)
+ {
+ int cnt;
+
+ cnt = snprintf(ptr, sz, "%.2X",
+ (unsigned char)hai->hai_data[i]);
+ ptr += cnt;
+ sz -= cnt;
+ }
+ *ptr = '\0';
+ return buffer;
+}
+
+/* Copytool action list */
+#define HAL_VERSION 1
+#define HAL_MAXSIZE LNET_MTU /* bytes, used in userspace only */
+struct hsm_action_list {
+ __u32 hal_version;
+ __u32 hal_count; /* number of hai's to follow */
+ __u64 hal_compound_id; /* returned by coordinator */
+ __u64 hal_flags;
+ __u32 hal_archive_id; /* which archive backend */
+ __u32 padding1;
+ char hal_fsname[0]; /* null-terminated */
+ /* struct hsm_action_item[hal_count] follows, aligned on 8-byte
+ boundaries. See hai_zero */
+} __attribute__((packed));
+
+#ifndef HAVE_CFS_SIZE_ROUND
+static inline int cfs_size_round (int val)
+{
+ return (val + 7) & (~0x7);
+}
+#define HAVE_CFS_SIZE_ROUND
+#endif
+
+/* Return pointer to first hai in action list */
+static inline struct hsm_action_item * hai_zero(struct hsm_action_list *hal)
+{
+ return (struct hsm_action_item *)(hal->hal_fsname +
+ cfs_size_round(strlen(hal-> \
+ hal_fsname)));
+}
+/* Return pointer to next hai */
+static inline struct hsm_action_item * hai_next(struct hsm_action_item *hai)
+{
+ return (struct hsm_action_item *)((char *)hai +
+ cfs_size_round(hai->hai_len));
+}
+
+/* Return size of an hsm_action_list */
+static inline int hal_size(struct hsm_action_list *hal)
+{
+ int i, sz;
+ struct hsm_action_item *hai;
+
+ sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname));
+ hai = hai_zero(hal);
+ for (i = 0 ; i < hal->hal_count ; i++) {
+ sz += cfs_size_round(hai->hai_len);
+ hai = hai_next(hai);
+ }
+ return(sz);
+}
+
+/* Copytool progress reporting */
+#define HP_FLAG_COMPLETED 0x01
+#define HP_FLAG_RETRY 0x02
+
+struct hsm_progress {
+ lustre_fid hp_fid;
+ __u64 hp_cookie;
+ struct hsm_extent hp_extent;
+ __u16 hp_flags;
+ __u16 hp_errval; /* positive val */
+ __u32 padding;
+};
+
+/**
+ * Use by copytool during any hsm request they handled.
+ * This structure is initialized by llapi_hsm_copy_start()
+ * which is an helper over the ioctl() interface
+ * Store Lustre, internal use only, data.
+ */
+struct hsm_copy {
+ __u64 hc_data_version;
+ __u16 hc_flags;
+ __u16 hc_errval; /* positive val */
+ __u32 padding;
+ struct hsm_action_item hc_hai;
+};
+
+/** @} lustreuser */
+
+#endif /* _LUSTRE_USER_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustreapi.h b/drivers/staging/lustre/lustre/include/lustre/lustreapi.h
new file mode 100644
index 0000000..63da665
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/lustreapi.h
@@ -0,0 +1,310 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LUSTREAPI_H_
+#define _LUSTREAPI_H_
+
+/** \defgroup llapi llapi
+ *
+ * @{
+ */
+
+#include <lustre/lustre_user.h>
+
+typedef void (*llapi_cb_t)(char *obd_type_name, char *obd_name, char *obd_uuid, void *args);
+
+/* lustreapi message severity level */
+enum llapi_message_level {
+ LLAPI_MSG_OFF = 0,
+ LLAPI_MSG_FATAL = 1,
+ LLAPI_MSG_ERROR = 2,
+ LLAPI_MSG_WARN = 3,
+ LLAPI_MSG_NORMAL = 4,
+ LLAPI_MSG_INFO = 5,
+ LLAPI_MSG_DEBUG = 6,
+ LLAPI_MSG_MAX
+};
+
+/* the bottom three bits reserved for llapi_message_level */
+#define LLAPI_MSG_MASK 0x00000007
+#define LLAPI_MSG_NO_ERRNO 0x00000010
+
+extern void llapi_msg_set_level(int level);
+extern void llapi_error(int level, int rc, char *fmt, ...);
+#define llapi_err_noerrno(level, fmt, a...) \
+ llapi_error((level) | LLAPI_MSG_NO_ERRNO, 0, fmt, ## a)
+extern void llapi_printf(int level, char *fmt, ...);
+extern int llapi_file_create(const char *name, unsigned long long stripe_size,
+ int stripe_offset, int stripe_count,
+ int stripe_pattern);
+extern int llapi_file_open(const char *name, int flags, int mode,
+ unsigned long long stripe_size, int stripe_offset,
+ int stripe_count, int stripe_pattern);
+extern int llapi_file_create_pool(const char *name,
+ unsigned long long stripe_size,
+ int stripe_offset, int stripe_count,
+ int stripe_pattern, char *pool_name);
+extern int llapi_file_open_pool(const char *name, int flags, int mode,
+ unsigned long long stripe_size,
+ int stripe_offset, int stripe_count,
+ int stripe_pattern, char *pool_name);
+extern int llapi_poollist(const char *name);
+extern int llapi_get_poollist(const char *name, char **poollist, int list_size,
+ char *buffer, int buffer_size);
+extern int llapi_get_poolmembers(const char *poolname, char **members,
+ int list_size, char *buffer, int buffer_size);
+extern int llapi_file_get_stripe(const char *path, struct lov_user_md *lum);
+#define HAVE_LLAPI_FILE_LOOKUP
+extern int llapi_file_lookup(int dirfd, const char *name);
+
+#define VERBOSE_COUNT 0x1
+#define VERBOSE_SIZE 0x2
+#define VERBOSE_OFFSET 0x4
+#define VERBOSE_POOL 0x8
+#define VERBOSE_DETAIL 0x10
+#define VERBOSE_OBJID 0x20
+#define VERBOSE_GENERATION 0x40
+#define VERBOSE_MDTINDEX 0x80
+#define VERBOSE_ALL (VERBOSE_COUNT | VERBOSE_SIZE | VERBOSE_OFFSET | \
+ VERBOSE_POOL | VERBOSE_OBJID | VERBOSE_GENERATION)
+
+struct find_param {
+ unsigned int maxdepth;
+ time_t atime;
+ time_t mtime;
+ time_t ctime;
+ int asign; /* cannot be bitfields due to using pointers to */
+ int csign; /* access them during argument parsing. */
+ int msign;
+ int type;
+ int size_sign:2, /* these need to be signed values */
+ stripesize_sign:2,
+ stripecount_sign:2;
+ unsigned long long size;
+ unsigned long long size_units;
+ uid_t uid;
+ gid_t gid;
+
+ unsigned long zeroend:1,
+ recursive:1,
+ exclude_pattern:1,
+ exclude_type:1,
+ exclude_obd:1,
+ exclude_mdt:1,
+ exclude_gid:1,
+ exclude_uid:1,
+ check_gid:1, /* group ID */
+ check_uid:1, /* user ID */
+ check_pool:1, /* LOV pool name */
+ check_size:1, /* file size */
+ exclude_pool:1,
+ exclude_size:1,
+ exclude_atime:1,
+ exclude_mtime:1,
+ exclude_ctime:1,
+ get_lmv:1, /* get MDT list from LMV */
+ raw:1, /* do not fill in defaults */
+ check_stripesize:1, /* LOV stripe size */
+ exclude_stripesize:1,
+ check_stripecount:1, /* LOV stripe count */
+ exclude_stripecount:1;
+
+ int verbose;
+ int quiet;
+
+ /* regular expression */
+ char *pattern;
+
+ char *print_fmt;
+
+ struct obd_uuid *obduuid;
+ int num_obds;
+ int num_alloc_obds;
+ int obdindex;
+ int *obdindexes;
+
+ struct obd_uuid *mdtuuid;
+ int num_mdts;
+ int num_alloc_mdts;
+ int mdtindex;
+ int *mdtindexes;
+ int file_mdtindex;
+
+ int lumlen;
+ struct lov_user_mds_data *lmd;
+
+ char poolname[LOV_MAXPOOLNAME + 1];
+
+ int fp_lmv_count;
+ struct lmv_user_md *fp_lmv_md;
+
+ unsigned long long stripesize;
+ unsigned long long stripesize_units;
+ unsigned long long stripecount;
+
+ /* In-process parameters. */
+ unsigned long got_uuids:1,
+ obds_printed:1,
+ have_fileinfo:1; /* file attrs and LOV xattr */
+ unsigned int depth;
+ dev_t st_dev;
+};
+
+extern int llapi_ostlist(char *path, struct find_param *param);
+extern int llapi_uuid_match(char *real_uuid, char *search_uuid);
+extern int llapi_getstripe(char *path, struct find_param *param);
+extern int llapi_find(char *path, struct find_param *param);
+
+extern int llapi_file_fget_mdtidx(int fd, int *mdtidx);
+extern int llapi_dir_create_pool(const char *name, int flags, int stripe_offset,
+ int stripe_count, int stripe_pattern,
+ char *poolname);
+int llapi_direntry_remove(char *dname);
+extern int llapi_obd_statfs(char *path, __u32 type, __u32 index,
+ struct obd_statfs *stat_buf,
+ struct obd_uuid *uuid_buf);
+extern int llapi_ping(char *obd_type, char *obd_name);
+extern int llapi_target_check(int num_types, char **obd_types, char *dir);
+extern int llapi_file_get_lov_uuid(const char *path, struct obd_uuid *lov_uuid);
+extern int llapi_file_get_lmv_uuid(const char *path, struct obd_uuid *lmv_uuid);
+extern int llapi_file_fget_lov_uuid(int fd, struct obd_uuid *lov_uuid);
+extern int llapi_lov_get_uuids(int fd, struct obd_uuid *uuidp, int *ost_count);
+extern int llapi_lmv_get_uuids(int fd, struct obd_uuid *uuidp, int *mdt_count);
+extern int llapi_is_lustre_mnttype(const char *type);
+extern int llapi_search_ost(char *fsname, char *poolname, char *ostname);
+extern int llapi_get_obd_count(char *mnt, int *count, int is_mdt);
+extern int parse_size(char *optarg, unsigned long long *size,
+ unsigned long long *size_units, int bytes_spec);
+extern int llapi_search_mounts(const char *pathname, int index,
+ char *mntdir, char *fsname);
+extern int llapi_search_fsname(const char *pathname, char *fsname);
+extern int llapi_getname(const char *path, char *buf, size_t size);
+
+extern void llapi_ping_target(char *obd_type, char *obd_name,
+ char *obd_uuid, void *args);
+
+extern int llapi_search_rootpath(char *pathname, const char *fsname);
+
+struct mntent;
+#define HAVE_LLAPI_IS_LUSTRE_MNT
+extern int llapi_is_lustre_mnt(struct mntent *mnt);
+extern int llapi_quotachown(char *path, int flag);
+extern int llapi_quotacheck(char *mnt, int check_type);
+extern int llapi_poll_quotacheck(char *mnt, struct if_quotacheck *qchk);
+extern int llapi_quotactl(char *mnt, struct if_quotactl *qctl);
+extern int llapi_target_iterate(int type_num, char **obd_type, void *args,
+ llapi_cb_t cb);
+extern int llapi_get_connect_flags(const char *mnt, __u64 *flags);
+extern int llapi_lsetfacl(int argc, char *argv[]);
+extern int llapi_lgetfacl(int argc, char *argv[]);
+extern int llapi_rsetfacl(int argc, char *argv[]);
+extern int llapi_rgetfacl(int argc, char *argv[]);
+extern int llapi_cp(int argc, char *argv[]);
+extern int llapi_ls(int argc, char *argv[]);
+extern int llapi_fid2path(const char *device, const char *fidstr, char *path,
+ int pathlen, long long *recno, int *linkno);
+extern int llapi_path2fid(const char *path, lustre_fid *fid);
+extern int llapi_fd2fid(const int fd, lustre_fid *fid);
+
+extern int llapi_get_version(char *buffer, int buffer_size, char **version);
+extern int llapi_get_data_version(int fd, __u64 *data_version, __u64 flags);
+extern int llapi_hsm_state_get(const char *path, struct hsm_user_state *hus);
+extern int llapi_hsm_state_set(const char *path, __u64 setmask, __u64 clearmask,
+ __u32 archive_id);
+
+extern int llapi_create_volatile_idx(char *directory, int idx, int mode);
+static inline int llapi_create_volatile(char *directory, int mode)
+{
+ return llapi_create_volatile_idx(directory, -1, mode);
+}
+
+
+extern int llapi_fswap_layouts(const int fd1, const int fd2,
+ __u64 dv1, __u64 dv2, __u64 flags);
+extern int llapi_swap_layouts(const char *path1, const char *path2,
+ __u64 dv1, __u64 dv2, __u64 flags);
+
+/* Changelog interface. priv is private state, managed internally
+ by these functions */
+#define CHANGELOG_FLAG_FOLLOW 0x01 /* Not yet implemented */
+#define CHANGELOG_FLAG_BLOCK 0x02 /* Blocking IO makes sense in case of
+ slow user parsing of the records, but it also prevents us from cleaning
+ up if the records are not consumed. */
+
+/* Records received are in extentded format now, though most of them are still
+ * written in disk in changelog_rec format (to save space and time), it's
+ * converted to extented format in the lustre api to ease changelog analysis. */
+#define HAVE_CHANGELOG_EXTEND_REC 1
+
+extern int llapi_changelog_start(void **priv, int flags, const char *mdtname,
+ long long startrec);
+extern int llapi_changelog_fini(void **priv);
+extern int llapi_changelog_recv(void *priv, struct changelog_ext_rec **rech);
+extern int llapi_changelog_free(struct changelog_ext_rec **rech);
+/* Allow records up to endrec to be destroyed; requires registered id. */
+extern int llapi_changelog_clear(const char *mdtname, const char *idstr,
+ long long endrec);
+
+/* HSM copytool interface.
+ * priv is private state, managed internally by these functions
+ */
+struct hsm_copytool_private;
+extern int llapi_hsm_copytool_start(struct hsm_copytool_private **priv,
+ char *fsname, int flags,
+ int archive_count, int *archives);
+extern int llapi_hsm_copytool_fini(struct hsm_copytool_private **priv);
+extern int llapi_hsm_copytool_recv(struct hsm_copytool_private *priv,
+ struct hsm_action_list **hal, int *msgsize);
+extern int llapi_hsm_copytool_free(struct hsm_action_list **hal);
+extern int llapi_hsm_copy_start(char *mnt, struct hsm_copy *copy,
+ const struct hsm_action_item *hai);
+extern int llapi_hsm_copy_end(char *mnt, struct hsm_copy *copy,
+ const struct hsm_progress *hp);
+extern int llapi_hsm_progress(char *mnt, struct hsm_progress *hp);
+extern int llapi_hsm_import(const char *dst, int archive, struct stat *st,
+ unsigned long long stripe_size, int stripe_offset,
+ int stripe_count, int stripe_pattern,
+ char *pool_name, lustre_fid *newfid);
+
+/* HSM user interface */
+extern struct hsm_user_request *llapi_hsm_user_request_alloc(int itemcount,
+ int data_len);
+extern int llapi_hsm_request(char *mnt, struct hsm_user_request *request);
+extern int llapi_hsm_current_action(const char *path,
+ struct hsm_current_action *hca);
+/** @} llapi */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h
new file mode 100644
index 0000000..5cfb87b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_acl.h
@@ -0,0 +1,42 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_acl.h
+ */
+
+#ifndef _LUSTRE_ACL_H
+#define _LUSTRE_ACL_H
+
+#include <linux/lustre_acl.h>
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_capa.h b/drivers/staging/lustre/lustre/include/lustre_capa.h
new file mode 100644
index 0000000..d77bffc
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_capa.h
@@ -0,0 +1,305 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_capa.h
+ *
+ * Author: Lai Siyao <lsy@clusterfs.com>
+ */
+
+#ifndef __LINUX_CAPA_H_
+#define __LINUX_CAPA_H_
+
+/** \defgroup capa capa
+ *
+ * @{
+ */
+
+/*
+ * capability
+ */
+#include <linux/crypto.h>
+#include <lustre/lustre_idl.h>
+
+#define CAPA_TIMEOUT 1800 /* sec, == 30 min */
+#define CAPA_KEY_TIMEOUT (24 * 60 * 60) /* sec, == 1 days */
+
+struct capa_hmac_alg {
+ const char *ha_name;
+ int ha_len;
+ int ha_keylen;
+};
+
+#define DEF_CAPA_HMAC_ALG(name, type, len, keylen) \
+[CAPA_HMAC_ALG_ ## type] = { \
+ .ha_name = name, \
+ .ha_len = len, \
+ .ha_keylen = keylen, \
+}
+
+struct client_capa {
+ struct inode *inode;
+ struct list_head lli_list; /* link to lli_oss_capas */
+};
+
+struct target_capa {
+ struct hlist_node c_hash; /* link to capa hash */
+};
+
+struct obd_capa {
+ struct list_head c_list; /* link to capa_list */
+
+ struct lustre_capa c_capa; /* capa */
+ atomic_t c_refc; /* ref count */
+ cfs_time_t c_expiry; /* jiffies */
+ spinlock_t c_lock; /* protect capa content */
+ int c_site;
+
+ union {
+ struct client_capa cli;
+ struct target_capa tgt;
+ } u;
+};
+
+enum {
+ CAPA_SITE_CLIENT = 0,
+ CAPA_SITE_SERVER,
+ CAPA_SITE_MAX
+};
+
+static inline struct lu_fid *capa_fid(struct lustre_capa *capa)
+{
+ return &capa->lc_fid;
+}
+
+static inline __u64 capa_opc(struct lustre_capa *capa)
+{
+ return capa->lc_opc;
+}
+
+static inline __u64 capa_uid(struct lustre_capa *capa)
+{
+ return capa->lc_uid;
+}
+
+static inline __u64 capa_gid(struct lustre_capa *capa)
+{
+ return capa->lc_gid;
+}
+
+static inline __u32 capa_flags(struct lustre_capa *capa)
+{
+ return capa->lc_flags & 0xffffff;
+}
+
+static inline __u32 capa_alg(struct lustre_capa *capa)
+{
+ return (capa->lc_flags >> 24);
+}
+
+static inline __u32 capa_keyid(struct lustre_capa *capa)
+{
+ return capa->lc_keyid;
+}
+
+static inline __u64 capa_key_seq(struct lustre_capa_key *key)
+{
+ return key->lk_seq;
+}
+
+static inline __u32 capa_key_keyid(struct lustre_capa_key *key)
+{
+ return key->lk_keyid;
+}
+
+static inline __u32 capa_timeout(struct lustre_capa *capa)
+{
+ return capa->lc_timeout;
+}
+
+static inline __u32 capa_expiry(struct lustre_capa *capa)
+{
+ return capa->lc_expiry;
+}
+
+void _debug_capa(struct lustre_capa *, struct libcfs_debug_msg_data *,
+ const char *fmt, ... );
+#define DEBUG_CAPA(level, capa, fmt, args...) \
+do { \
+ if (((level) & D_CANTMASK) != 0 || \
+ ((libcfs_debug & (level)) != 0 && \
+ (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
+ _debug_capa((capa), &msgdata, fmt, ##args); \
+ } \
+} while (0)
+
+#define DEBUG_CAPA_KEY(level, k, fmt, args...) \
+do { \
+CDEBUG(level, fmt " capability key@%p seq "LPU64" keyid %u\n", \
+ ##args, k, capa_key_seq(k), capa_key_keyid(k)); \
+} while (0)
+
+typedef int (* renew_capa_cb_t)(struct obd_capa *, struct lustre_capa *);
+
+/* obdclass/capa.c */
+extern struct list_head capa_list[];
+extern spinlock_t capa_lock;
+extern int capa_count[];
+extern struct kmem_cache *capa_cachep;
+
+struct hlist_head *init_capa_hash(void);
+void cleanup_capa_hash(struct hlist_head *hash);
+
+struct obd_capa *capa_add(struct hlist_head *hash,
+ struct lustre_capa *capa);
+struct obd_capa *capa_lookup(struct hlist_head *hash,
+ struct lustre_capa *capa, int alive);
+
+int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key);
+int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen);
+int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen);
+void capa_cpy(void *dst, struct obd_capa *ocapa);
+static inline struct obd_capa *alloc_capa(int site)
+{
+ struct obd_capa *ocapa;
+
+ if (unlikely(site != CAPA_SITE_CLIENT && site != CAPA_SITE_SERVER))
+ return ERR_PTR(-EINVAL);
+
+ OBD_SLAB_ALLOC_PTR(ocapa, capa_cachep);
+ if (unlikely(!ocapa))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ocapa->c_list);
+ atomic_set(&ocapa->c_refc, 1);
+ spin_lock_init(&ocapa->c_lock);
+ ocapa->c_site = site;
+ if (ocapa->c_site == CAPA_SITE_CLIENT)
+ INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
+ else
+ INIT_HLIST_NODE(&ocapa->u.tgt.c_hash);
+
+ return ocapa;
+}
+
+static inline struct obd_capa *capa_get(struct obd_capa *ocapa)
+{
+ if (!ocapa)
+ return NULL;
+
+ atomic_inc(&ocapa->c_refc);
+ return ocapa;
+}
+
+static inline void capa_put(struct obd_capa *ocapa)
+{
+ if (!ocapa)
+ return;
+
+ if (atomic_read(&ocapa->c_refc) == 0) {
+ DEBUG_CAPA(D_ERROR, &ocapa->c_capa, "refc is 0 for");
+ LBUG();
+ }
+
+ if (atomic_dec_and_test(&ocapa->c_refc)) {
+ LASSERT(list_empty(&ocapa->c_list));
+ if (ocapa->c_site == CAPA_SITE_CLIENT) {
+ LASSERT(list_empty(&ocapa->u.cli.lli_list));
+ } else {
+ struct hlist_node *hnode;
+
+ hnode = &ocapa->u.tgt.c_hash;
+ LASSERT(!hnode->next && !hnode->pprev);
+ }
+ OBD_SLAB_FREE(ocapa, capa_cachep, sizeof(*ocapa));
+ }
+}
+
+static inline int open_flags_to_accmode(int flags)
+{
+ int mode = flags;
+
+ if ((mode + 1) & O_ACCMODE)
+ mode++;
+ if (mode & O_TRUNC)
+ mode |= 2;
+
+ return mode;
+}
+
+static inline __u64 capa_open_opc(int mode)
+{
+ return mode & FMODE_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_READ;
+}
+
+static inline void set_capa_expiry(struct obd_capa *ocapa)
+{
+ cfs_time_t expiry = cfs_time_sub((cfs_time_t)ocapa->c_capa.lc_expiry,
+ cfs_time_current_sec());
+ ocapa->c_expiry = cfs_time_add(cfs_time_current(),
+ cfs_time_seconds(expiry));
+}
+
+static inline int capa_is_expired_sec(struct lustre_capa *capa)
+{
+ return (capa->lc_expiry - cfs_time_current_sec() <= 0);
+}
+
+static inline int capa_is_expired(struct obd_capa *ocapa)
+{
+ return cfs_time_beforeq(ocapa->c_expiry, cfs_time_current());
+}
+
+static inline int capa_opc_supported(struct lustre_capa *capa, __u64 opc)
+{
+ return (capa_opc(capa) & opc) == opc;
+}
+
+struct filter_capa_key {
+ struct list_head k_list;
+ struct lustre_capa_key k_key;
+};
+
+enum {
+ LC_ID_NONE = 0,
+ LC_ID_PLAIN = 1,
+ LC_ID_CONVERT = 2
+};
+
+#define BYPASS_CAPA (struct lustre_capa *)ERR_PTR(-ENOENT)
+
+/** @} capa */
+
+#endif /* __LINUX_CAPA_H_ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
new file mode 100644
index 0000000..e14a5f6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -0,0 +1,291 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LUSTRE_CFG_H
+#define _LUSTRE_CFG_H
+
+/** \defgroup cfg cfg
+ *
+ * @{
+ */
+
+/*
+ * 1cf6
+ * lcfG
+ */
+#define LUSTRE_CFG_VERSION 0x1cf60001
+#define LUSTRE_CFG_MAX_BUFCOUNT 8
+
+#define LCFG_HDR_SIZE(count) \
+ cfs_size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)]))
+
+/** If the LCFG_REQUIRED bit is set in a configuration command,
+ * then the client is required to understand this parameter
+ * in order to mount the filesystem. If it does not understand
+ * a REQUIRED command the client mount will fail. */
+#define LCFG_REQUIRED 0x0001000
+
+enum lcfg_command_type {
+ LCFG_ATTACH = 0x00cf001, /**< create a new obd instance */
+ LCFG_DETACH = 0x00cf002, /**< destroy obd instance */
+ LCFG_SETUP = 0x00cf003, /**< call type-specific setup */
+ LCFG_CLEANUP = 0x00cf004, /**< call type-specific cleanup */
+ LCFG_ADD_UUID = 0x00cf005, /**< add a nid to a niduuid */
+ LCFG_DEL_UUID = 0x00cf006, /**< remove a nid from a niduuid */
+ LCFG_MOUNTOPT = 0x00cf007, /**< create a profile (mdc, osc) */
+ LCFG_DEL_MOUNTOPT = 0x00cf008, /**< destroy a profile */
+ LCFG_SET_TIMEOUT = 0x00cf009, /**< set obd_timeout */
+ LCFG_SET_UPCALL = 0x00cf00a, /**< deprecated */
+ LCFG_ADD_CONN = 0x00cf00b, /**< add a failover niduuid to an obd */
+ LCFG_DEL_CONN = 0x00cf00c, /**< remove a failover niduuid */
+ LCFG_LOV_ADD_OBD = 0x00cf00d, /**< add an osc to a lov */
+ LCFG_LOV_DEL_OBD = 0x00cf00e, /**< remove an osc from a lov */
+ LCFG_PARAM = 0x00cf00f, /**< set a proc parameter */
+ LCFG_MARKER = 0x00cf010, /**< metadata about next cfg rec */
+ LCFG_LOG_START = 0x00ce011, /**< mgc only, process a cfg log */
+ LCFG_LOG_END = 0x00ce012, /**< stop processing updates */
+ LCFG_LOV_ADD_INA = 0x00ce013, /**< like LOV_ADD_OBD, inactive */
+ LCFG_ADD_MDC = 0x00cf014, /**< add an mdc to a lmv */
+ LCFG_DEL_MDC = 0x00cf015, /**< remove an mdc from a lmv */
+ LCFG_SPTLRPC_CONF = 0x00ce016, /**< security */
+ LCFG_POOL_NEW = 0x00ce020, /**< create an ost pool name */
+ LCFG_POOL_ADD = 0x00ce021, /**< add an ost to a pool */
+ LCFG_POOL_REM = 0x00ce022, /**< remove an ost from a pool */
+ LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */
+ LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */
+ LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre
+ * cleanup cleanup */
+};
+
+struct lustre_cfg_bufs {
+ void *lcfg_buf[LUSTRE_CFG_MAX_BUFCOUNT];
+ __u32 lcfg_buflen[LUSTRE_CFG_MAX_BUFCOUNT];
+ __u32 lcfg_bufcount;
+};
+
+struct lustre_cfg {
+ __u32 lcfg_version;
+ __u32 lcfg_command;
+
+ __u32 lcfg_num;
+ __u32 lcfg_flags;
+ __u64 lcfg_nid;
+ __u32 lcfg_nal; /* not used any more */
+
+ __u32 lcfg_bufcount;
+ __u32 lcfg_buflens[0];
+};
+
+enum cfg_record_type {
+ PORTALS_CFG_TYPE = 1,
+ LUSTRE_CFG_TYPE = 123,
+};
+
+#define LUSTRE_CFG_BUFLEN(lcfg, idx) \
+ ((lcfg)->lcfg_bufcount <= (idx) \
+ ? 0 \
+ : (lcfg)->lcfg_buflens[(idx)])
+
+static inline void lustre_cfg_bufs_set(struct lustre_cfg_bufs *bufs,
+ __u32 index,
+ void *buf,
+ __u32 buflen)
+{
+ if (index >= LUSTRE_CFG_MAX_BUFCOUNT)
+ return;
+ if (bufs == NULL)
+ return;
+
+ if (bufs->lcfg_bufcount <= index)
+ bufs->lcfg_bufcount = index + 1;
+
+ bufs->lcfg_buf[index] = buf;
+ bufs->lcfg_buflen[index] = buflen;
+}
+
+static inline void lustre_cfg_bufs_set_string(struct lustre_cfg_bufs *bufs,
+ __u32 index,
+ char *str)
+{
+ lustre_cfg_bufs_set(bufs, index, str, str ? strlen(str) + 1 : 0);
+}
+
+static inline void lustre_cfg_bufs_reset(struct lustre_cfg_bufs *bufs, char *name)
+{
+ memset((bufs), 0, sizeof(*bufs));
+ if (name)
+ lustre_cfg_bufs_set_string(bufs, 0, name);
+}
+
+static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
+{
+ int i;
+ int offset;
+ int bufcount;
+ LASSERT (lcfg != NULL);
+ LASSERT (index >= 0);
+
+ bufcount = lcfg->lcfg_bufcount;
+ if (index >= bufcount)
+ return NULL;
+
+ offset = LCFG_HDR_SIZE(lcfg->lcfg_bufcount);
+ for (i = 0; i < index; i++)
+ offset += cfs_size_round(lcfg->lcfg_buflens[i]);
+ return (char *)lcfg + offset;
+}
+
+static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs,
+ struct lustre_cfg *lcfg)
+{
+ int i;
+ bufs->lcfg_bufcount = lcfg->lcfg_bufcount;
+ for (i = 0; i < bufs->lcfg_bufcount; i++) {
+ bufs->lcfg_buflen[i] = lcfg->lcfg_buflens[i];
+ bufs->lcfg_buf[i] = lustre_cfg_buf(lcfg, i);
+ }
+}
+
+static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
+{
+ char *s;
+
+ if (lcfg->lcfg_buflens[index] == 0)
+ return NULL;
+
+ s = lustre_cfg_buf(lcfg, index);
+ if (s == NULL)
+ return NULL;
+
+ /*
+ * make sure it's NULL terminated, even if this kills a char
+ * of data. Try to use the padding first though.
+ */
+ if (s[lcfg->lcfg_buflens[index] - 1] != '\0') {
+ int last = min((int)lcfg->lcfg_buflens[index],
+ cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
+ char lost = s[last];
+ s[last] = '\0';
+ if (lost != '\0') {
+ CWARN("Truncated buf %d to '%s' (lost '%c'...)\n",
+ index, s, lost);
+ }
+ }
+ return s;
+}
+
+static inline int lustre_cfg_len(__u32 bufcount, __u32 *buflens)
+{
+ int i;
+ int len;
+
+ len = LCFG_HDR_SIZE(bufcount);
+ for (i = 0; i < bufcount; i++)
+ len += cfs_size_round(buflens[i]);
+
+ return cfs_size_round(len);
+}
+
+
+#include <obd_support.h>
+
+static inline struct lustre_cfg *lustre_cfg_new(int cmd,
+ struct lustre_cfg_bufs *bufs)
+{
+ struct lustre_cfg *lcfg;
+ char *ptr;
+ int i;
+
+ OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount,
+ bufs->lcfg_buflen));
+ if (!lcfg)
+ return ERR_PTR(-ENOMEM);
+
+ lcfg->lcfg_version = LUSTRE_CFG_VERSION;
+ lcfg->lcfg_command = cmd;
+ lcfg->lcfg_bufcount = bufs->lcfg_bufcount;
+
+ ptr = (char *)lcfg + LCFG_HDR_SIZE(lcfg->lcfg_bufcount);
+ for (i = 0; i < lcfg->lcfg_bufcount; i++) {
+ lcfg->lcfg_buflens[i] = bufs->lcfg_buflen[i];
+ LOGL((char *)bufs->lcfg_buf[i], bufs->lcfg_buflen[i], ptr);
+ }
+ return lcfg;
+}
+
+static inline void lustre_cfg_free(struct lustre_cfg *lcfg)
+{
+ int len;
+
+ len = lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens);
+
+ OBD_FREE(lcfg, len);
+ return;
+}
+
+static inline int lustre_cfg_sanity_check(void *buf, int len)
+{
+ struct lustre_cfg *lcfg = (struct lustre_cfg *)buf;
+
+ if (!lcfg)
+ return -EINVAL;
+
+ /* check that the first bits of the struct are valid */
+ if (len < LCFG_HDR_SIZE(0))
+ return -EINVAL;
+
+ if (lcfg->lcfg_version != LUSTRE_CFG_VERSION)
+ return -EINVAL;
+
+ if (lcfg->lcfg_bufcount >= LUSTRE_CFG_MAX_BUFCOUNT)
+ return -EINVAL;
+
+ /* check that the buflens are valid */
+ if (len < LCFG_HDR_SIZE(lcfg->lcfg_bufcount))
+ return -EINVAL;
+
+ /* make sure all the pointers point inside the data */
+ if (len < lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens))
+ return -EINVAL;
+
+ return 0;
+}
+
+#include <lustre/lustre_user.h>
+
+/** @} cfg */
+
+#endif // _LUSTRE_CFG_H
diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h
new file mode 100644
index 0000000..3d9e446
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_debug.h
@@ -0,0 +1,76 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LUSTRE_DEBUG_H
+#define _LUSTRE_DEBUG_H
+
+/** \defgroup debug debug
+ *
+ * @{
+ */
+
+#include <lustre_net.h>
+#include <obd.h>
+
+#include <linux/lustre_debug.h>
+
+#define ASSERT_MAX_SIZE_MB 60000ULL
+#define ASSERT_PAGE_INDEX(index, OP) \
+do { if (index > ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)) { \
+ CERROR("bad page index %lu > %llu\n", index, \
+ ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)); \
+ libcfs_debug = ~0UL; \
+ OP; \
+}} while(0)
+
+#define ASSERT_FILE_OFFSET(offset, OP) \
+do { if (offset > ASSERT_MAX_SIZE_MB << 20) { \
+ CERROR("bad file offset %llu > %llu\n", offset, \
+ ASSERT_MAX_SIZE_MB << 20); \
+ libcfs_debug = ~0UL; \
+ OP; \
+}} while(0)
+
+/* lib/debug.c */
+void dump_lniobuf(struct niobuf_local *lnb);
+int dump_req(struct ptlrpc_request *req);
+void dump_lsm(int level, struct lov_stripe_md *lsm);
+int block_debug_setup(void *addr, int len, __u64 off, __u64 id);
+int block_debug_check(char *who, void *addr, int len, __u64 off, __u64 id);
+
+/** @} debug */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
new file mode 100644
index 0000000..9228b16
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -0,0 +1,545 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_disk.h
+ *
+ * Lustre disk format definitions.
+ *
+ * Author: Nathan Rutman <nathan@clusterfs.com>
+ */
+
+#ifndef _LUSTRE_DISK_H
+#define _LUSTRE_DISK_H
+
+/** \defgroup disk disk
+ *
+ * @{
+ */
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/types.h>
+
+/****************** on-disk files *********************/
+
+#define MDT_LOGS_DIR "LOGS" /* COMPAT_146 */
+#define MOUNT_CONFIGS_DIR "CONFIGS"
+#define CONFIGS_FILE "mountdata"
+/** Persistent mount data are stored on the disk in this file. */
+#define MOUNT_DATA_FILE MOUNT_CONFIGS_DIR"/"CONFIGS_FILE
+#define LAST_RCVD "last_rcvd"
+#define LOV_OBJID "lov_objid"
+#define LOV_OBJSEQ "lov_objseq"
+#define HEALTH_CHECK "health_check"
+#define CAPA_KEYS "capa_keys"
+#define CHANGELOG_USERS "changelog_users"
+#define MGS_NIDTBL_DIR "NIDTBL_VERSIONS"
+#define QMT_DIR "quota_master"
+#define QSD_DIR "quota_slave"
+#define HSM_ACTIONS "hsm_actions"
+
+/****************** persistent mount data *********************/
+
+#define LDD_F_SV_TYPE_MDT 0x0001
+#define LDD_F_SV_TYPE_OST 0x0002
+#define LDD_F_SV_TYPE_MGS 0x0004
+#define LDD_F_SV_TYPE_MASK (LDD_F_SV_TYPE_MDT | \
+ LDD_F_SV_TYPE_OST | \
+ LDD_F_SV_TYPE_MGS)
+#define LDD_F_SV_ALL 0x0008
+/** need an index assignment */
+#define LDD_F_NEED_INDEX 0x0010
+/** never registered */
+#define LDD_F_VIRGIN 0x0020
+/** update the config logs for this server */
+#define LDD_F_UPDATE 0x0040
+/** rewrite the LDD */
+#define LDD_F_REWRITE_LDD 0x0080
+/** regenerate config logs for this fs or server */
+#define LDD_F_WRITECONF 0x0100
+/** COMPAT_14 */
+#define LDD_F_UPGRADE14 0x0200
+/** process as lctl conf_param */
+#define LDD_F_PARAM 0x0400
+/** all nodes are specified as service nodes */
+#define LDD_F_NO_PRIMNODE 0x1000
+/** IR enable flag */
+#define LDD_F_IR_CAPABLE 0x2000
+/** the MGS refused to register the target. */
+#define LDD_F_ERROR 0x4000
+
+/* opc for target register */
+#define LDD_F_OPC_REG 0x10000000
+#define LDD_F_OPC_UNREG 0x20000000
+#define LDD_F_OPC_READY 0x40000000
+#define LDD_F_OPC_MASK 0xf0000000
+
+#define LDD_F_ONDISK_MASK (LDD_F_SV_TYPE_MASK)
+
+#define LDD_F_MASK 0xFFFF
+
+enum ldd_mount_type {
+ LDD_MT_EXT3 = 0,
+ LDD_MT_LDISKFS,
+ LDD_MT_SMFS,
+ LDD_MT_REISERFS,
+ LDD_MT_LDISKFS2,
+ LDD_MT_ZFS,
+ LDD_MT_LAST
+};
+
+static inline char *mt_str(enum ldd_mount_type mt)
+{
+ static char *mount_type_string[] = {
+ "ext3",
+ "ldiskfs",
+ "smfs",
+ "reiserfs",
+ "ldiskfs2",
+ "zfs",
+ };
+ return mount_type_string[mt];
+}
+
+static inline char *mt_type(enum ldd_mount_type mt)
+{
+ static char *mount_type_string[] = {
+ "osd-ldiskfs",
+ "osd-ldiskfs",
+ "osd-smfs",
+ "osd-reiserfs",
+ "osd-ldiskfs",
+ "osd-zfs",
+ };
+ return mount_type_string[mt];
+}
+
+#define LDD_INCOMPAT_SUPP 0
+#define LDD_ROCOMPAT_SUPP 0
+
+#define LDD_MAGIC 0x1dd00001
+
+/* On-disk configuration file. In host-endian order. */
+struct lustre_disk_data {
+ __u32 ldd_magic;
+ __u32 ldd_feature_compat; /* compatible feature flags */
+ __u32 ldd_feature_rocompat;/* read-only compatible feature flags */
+ __u32 ldd_feature_incompat;/* incompatible feature flags */
+
+ __u32 ldd_config_ver; /* config rewrite count - not used */
+ __u32 ldd_flags; /* LDD_SV_TYPE */
+ __u32 ldd_svindex; /* server index (0001), must match
+ svname */
+ __u32 ldd_mount_type; /* target fs type LDD_MT_* */
+ char ldd_fsname[64]; /* filesystem this server is part of,
+ MTI_NAME_MAXLEN */
+ char ldd_svname[64]; /* this server's name (lustre-mdt0001)*/
+ __u8 ldd_uuid[40]; /* server UUID (COMPAT_146) */
+
+/*200*/ char ldd_userdata[1024 - 200]; /* arbitrary user string */
+/*1024*/__u8 ldd_padding[4096 - 1024];
+/*4096*/char ldd_mount_opts[4096]; /* target fs mount opts */
+/*8192*/char ldd_params[4096]; /* key=value pairs */
+};
+
+
+#define IS_MDT(data) ((data)->lsi_flags & LDD_F_SV_TYPE_MDT)
+#define IS_OST(data) ((data)->lsi_flags & LDD_F_SV_TYPE_OST)
+#define IS_MGS(data) ((data)->lsi_flags & LDD_F_SV_TYPE_MGS)
+#define IS_SERVER(data) ((data)->lsi_flags & (LDD_F_SV_TYPE_MGS | \
+ LDD_F_SV_TYPE_MDT | LDD_F_SV_TYPE_OST))
+#define MT_STR(data) mt_str((data)->ldd_mount_type)
+
+/* Make the mdt/ost server obd name based on the filesystem name */
+static inline int server_make_name(__u32 flags, __u16 index, char *fs,
+ char *name)
+{
+ if (flags & (LDD_F_SV_TYPE_MDT | LDD_F_SV_TYPE_OST)) {
+ if (!(flags & LDD_F_SV_ALL))
+ sprintf(name, "%.8s%c%s%04x", fs,
+ (flags & LDD_F_VIRGIN) ? ':' :
+ ((flags & LDD_F_WRITECONF) ? '=' : '-'),
+ (flags & LDD_F_SV_TYPE_MDT) ? "MDT" : "OST",
+ index);
+ } else if (flags & LDD_F_SV_TYPE_MGS) {
+ sprintf(name, "MGS");
+ } else {
+ CERROR("unknown server type %#x\n", flags);
+ return 1;
+ }
+ return 0;
+}
+
+/****************** mount command *********************/
+
+/* The lmd is only used internally by Lustre; mount simply passes
+ everything as string options */
+
+#define LMD_MAGIC 0xbdacbd03
+
+/* gleaned from the mount command - no persistent info here */
+struct lustre_mount_data {
+ __u32 lmd_magic;
+ __u32 lmd_flags; /* lustre mount flags */
+ int lmd_mgs_failnodes; /* mgs failover node count */
+ int lmd_exclude_count;
+ int lmd_recovery_time_soft;
+ int lmd_recovery_time_hard;
+ char *lmd_dev; /* device name */
+ char *lmd_profile; /* client only */
+ char *lmd_mgssec; /* sptlrpc flavor to mgs */
+ char *lmd_opts; /* lustre mount options (as opposed to
+ _device_ mount options) */
+ char *lmd_params; /* lustre params */
+ __u32 *lmd_exclude; /* array of OSTs to ignore */
+ char *lmd_mgs; /* MGS nid */
+ char *lmd_osd_type; /* OSD type */
+};
+
+#define LMD_FLG_SERVER 0x0001 /* Mounting a server */
+#define LMD_FLG_CLIENT 0x0002 /* Mounting a client */
+#define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */
+#define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers,
+ no other services */
+#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, reusing
+ existing MGS services */
+#define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */
+#define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */
+#define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */
+#define LMD_FLG_MGS 0x0200 /* Also start MGS along with server */
+#define LMD_FLG_IAM 0x0400 /* IAM dir */
+#define LMD_FLG_NO_PRIMNODE 0x0800 /* all nodes are service nodes */
+#define LMD_FLG_VIRGIN 0x1000 /* the service registers first time */
+#define LMD_FLG_UPDATE 0x2000 /* update parameters */
+#define LMD_FLG_HSM 0x4000 /* Start coordinator */
+
+#define lmd_is_client(x) ((x)->lmd_flags & LMD_FLG_CLIENT)
+
+
+/****************** last_rcvd file *********************/
+
+/** version recovery epoch */
+#define LR_EPOCH_BITS 32
+#define lr_epoch(a) ((a) >> LR_EPOCH_BITS)
+#define LR_EXPIRE_INTERVALS 16 /**< number of intervals to track transno */
+#define ENOENT_VERSION 1 /** 'virtual' version of non-existent object */
+
+#define LR_SERVER_SIZE 512
+#define LR_CLIENT_START 8192
+#define LR_CLIENT_SIZE 128
+#if LR_CLIENT_START < LR_SERVER_SIZE
+#error "Can't have LR_CLIENT_START < LR_SERVER_SIZE"
+#endif
+
+/*
+ * This limit is arbitrary (131072 clients on x86), but it is convenient to use
+ * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation.
+ * If we need more than 131072 clients (order-2 allocation on x86) then this
+ * should become an array of single-page pointers that are allocated on demand.
+ */
+#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8)
+#define LR_MAX_CLIENTS (128 * 1024UL)
+#else
+#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8)
+#endif
+
+/** COMPAT_146: this is an OST (temporary) */
+#define OBD_COMPAT_OST 0x00000002
+/** COMPAT_146: this is an MDT (temporary) */
+#define OBD_COMPAT_MDT 0x00000004
+/** 2.0 server, interop flag to show server version is changed */
+#define OBD_COMPAT_20 0x00000008
+
+/** MDS handles LOV_OBJID file */
+#define OBD_ROCOMPAT_LOVOBJID 0x00000001
+
+/** OST handles group subdirs */
+#define OBD_INCOMPAT_GROUPS 0x00000001
+/** this is an OST */
+#define OBD_INCOMPAT_OST 0x00000002
+/** this is an MDT */
+#define OBD_INCOMPAT_MDT 0x00000004
+/** common last_rvcd format */
+#define OBD_INCOMPAT_COMMON_LR 0x00000008
+/** FID is enabled */
+#define OBD_INCOMPAT_FID 0x00000010
+/** Size-on-MDS is enabled */
+#define OBD_INCOMPAT_SOM 0x00000020
+/** filesystem using iam format to store directory entries */
+#define OBD_INCOMPAT_IAM_DIR 0x00000040
+/** LMA attribute contains per-inode incompatible flags */
+#define OBD_INCOMPAT_LMA 0x00000080
+/** lmm_stripe_count has been shrunk from __u32 to __u16 and the remaining 16
+ * bits are now used to store a generation. Once we start changing the layout
+ * and bumping the generation, old versions expecting a 32-bit lmm_stripe_count
+ * will be confused by interpreting stripe_count | gen << 16 as the actual
+ * stripe count */
+#define OBD_INCOMPAT_LMM_VER 0x00000100
+/** multiple OI files for MDT */
+#define OBD_INCOMPAT_MULTI_OI 0x00000200
+
+/* Data stored per server at the head of the last_rcvd file. In le32 order.
+ This should be common to filter_internal.h, lustre_mds.h */
+struct lr_server_data {
+ __u8 lsd_uuid[40]; /* server UUID */
+ __u64 lsd_last_transno; /* last completed transaction ID */
+ __u64 lsd_compat14; /* reserved - compat with old last_rcvd */
+ __u64 lsd_mount_count; /* incarnation number */
+ __u32 lsd_feature_compat; /* compatible feature flags */
+ __u32 lsd_feature_rocompat;/* read-only compatible feature flags */
+ __u32 lsd_feature_incompat;/* incompatible feature flags */
+ __u32 lsd_server_size; /* size of server data area */
+ __u32 lsd_client_start; /* start of per-client data area */
+ __u16 lsd_client_size; /* size of per-client data area */
+ __u16 lsd_subdir_count; /* number of subdirectories for objects */
+ __u64 lsd_catalog_oid; /* recovery catalog object id */
+ __u32 lsd_catalog_ogen; /* recovery catalog inode generation */
+ __u8 lsd_peeruuid[40]; /* UUID of MDS associated with this OST */
+ __u32 lsd_osd_index; /* index number of OST in LOV */
+ __u32 lsd_padding1; /* was lsd_mdt_index, unused in 2.4.0 */
+ __u32 lsd_start_epoch; /* VBR: start epoch from last boot */
+ /** transaction values since lsd_trans_table_time */
+ __u64 lsd_trans_table[LR_EXPIRE_INTERVALS];
+ /** start point of transno table below */
+ __u32 lsd_trans_table_time; /* time of first slot in table above */
+ __u32 lsd_expire_intervals; /* LR_EXPIRE_INTERVALS */
+ __u8 lsd_padding[LR_SERVER_SIZE - 288];
+};
+
+/* Data stored per client in the last_rcvd file. In le32 order. */
+struct lsd_client_data {
+ __u8 lcd_uuid[40]; /* client UUID */
+ __u64 lcd_last_transno; /* last completed transaction ID */
+ __u64 lcd_last_xid; /* xid for the last transaction */
+ __u32 lcd_last_result; /* result from last RPC */
+ __u32 lcd_last_data; /* per-op data (disposition for open &c.) */
+ /* for MDS_CLOSE requests */
+ __u64 lcd_last_close_transno; /* last completed transaction ID */
+ __u64 lcd_last_close_xid; /* xid for the last transaction */
+ __u32 lcd_last_close_result; /* result from last RPC */
+ __u32 lcd_last_close_data; /* per-op data */
+ /* VBR: last versions */
+ __u64 lcd_pre_versions[4];
+ __u32 lcd_last_epoch;
+ /** orphans handling for delayed export rely on that */
+ __u32 lcd_first_epoch;
+ __u8 lcd_padding[LR_CLIENT_SIZE - 128];
+};
+
+/* bug20354: the lcd_uuid for export of clients may be wrong */
+static inline void check_lcd(char *obd_name, int index,
+ struct lsd_client_data *lcd)
+{
+ int length = sizeof(lcd->lcd_uuid);
+ if (strnlen((char*)lcd->lcd_uuid, length) == length) {
+ lcd->lcd_uuid[length - 1] = '\0';
+
+ LCONSOLE_ERROR("the client UUID (%s) on %s for exports"
+ "stored in last_rcvd(index = %d) is bad!\n",
+ lcd->lcd_uuid, obd_name, index);
+ }
+}
+
+/* last_rcvd handling */
+static inline void lsd_le_to_cpu(struct lr_server_data *buf,
+ struct lr_server_data *lsd)
+{
+ int i;
+ memcpy(lsd->lsd_uuid, buf->lsd_uuid, sizeof(lsd->lsd_uuid));
+ lsd->lsd_last_transno = le64_to_cpu(buf->lsd_last_transno);
+ lsd->lsd_compat14 = le64_to_cpu(buf->lsd_compat14);
+ lsd->lsd_mount_count = le64_to_cpu(buf->lsd_mount_count);
+ lsd->lsd_feature_compat = le32_to_cpu(buf->lsd_feature_compat);
+ lsd->lsd_feature_rocompat = le32_to_cpu(buf->lsd_feature_rocompat);
+ lsd->lsd_feature_incompat = le32_to_cpu(buf->lsd_feature_incompat);
+ lsd->lsd_server_size = le32_to_cpu(buf->lsd_server_size);
+ lsd->lsd_client_start = le32_to_cpu(buf->lsd_client_start);
+ lsd->lsd_client_size = le16_to_cpu(buf->lsd_client_size);
+ lsd->lsd_subdir_count = le16_to_cpu(buf->lsd_subdir_count);
+ lsd->lsd_catalog_oid = le64_to_cpu(buf->lsd_catalog_oid);
+ lsd->lsd_catalog_ogen = le32_to_cpu(buf->lsd_catalog_ogen);
+ memcpy(lsd->lsd_peeruuid, buf->lsd_peeruuid, sizeof(lsd->lsd_peeruuid));
+ lsd->lsd_osd_index = le32_to_cpu(buf->lsd_osd_index);
+ lsd->lsd_padding1 = le32_to_cpu(buf->lsd_padding1);
+ lsd->lsd_start_epoch = le32_to_cpu(buf->lsd_start_epoch);
+ for (i = 0; i < LR_EXPIRE_INTERVALS; i++)
+ lsd->lsd_trans_table[i] = le64_to_cpu(buf->lsd_trans_table[i]);
+ lsd->lsd_trans_table_time = le32_to_cpu(buf->lsd_trans_table_time);
+ lsd->lsd_expire_intervals = le32_to_cpu(buf->lsd_expire_intervals);
+}
+
+static inline void lsd_cpu_to_le(struct lr_server_data *lsd,
+ struct lr_server_data *buf)
+{
+ int i;
+ memcpy(buf->lsd_uuid, lsd->lsd_uuid, sizeof(buf->lsd_uuid));
+ buf->lsd_last_transno = cpu_to_le64(lsd->lsd_last_transno);
+ buf->lsd_compat14 = cpu_to_le64(lsd->lsd_compat14);
+ buf->lsd_mount_count = cpu_to_le64(lsd->lsd_mount_count);
+ buf->lsd_feature_compat = cpu_to_le32(lsd->lsd_feature_compat);
+ buf->lsd_feature_rocompat = cpu_to_le32(lsd->lsd_feature_rocompat);
+ buf->lsd_feature_incompat = cpu_to_le32(lsd->lsd_feature_incompat);
+ buf->lsd_server_size = cpu_to_le32(lsd->lsd_server_size);
+ buf->lsd_client_start = cpu_to_le32(lsd->lsd_client_start);
+ buf->lsd_client_size = cpu_to_le16(lsd->lsd_client_size);
+ buf->lsd_subdir_count = cpu_to_le16(lsd->lsd_subdir_count);
+ buf->lsd_catalog_oid = cpu_to_le64(lsd->lsd_catalog_oid);
+ buf->lsd_catalog_ogen = cpu_to_le32(lsd->lsd_catalog_ogen);
+ memcpy(buf->lsd_peeruuid, lsd->lsd_peeruuid, sizeof(buf->lsd_peeruuid));
+ buf->lsd_osd_index = cpu_to_le32(lsd->lsd_osd_index);
+ buf->lsd_padding1 = cpu_to_le32(lsd->lsd_padding1);
+ buf->lsd_start_epoch = cpu_to_le32(lsd->lsd_start_epoch);
+ for (i = 0; i < LR_EXPIRE_INTERVALS; i++)
+ buf->lsd_trans_table[i] = cpu_to_le64(lsd->lsd_trans_table[i]);
+ buf->lsd_trans_table_time = cpu_to_le32(lsd->lsd_trans_table_time);
+ buf->lsd_expire_intervals = cpu_to_le32(lsd->lsd_expire_intervals);
+}
+
+static inline void lcd_le_to_cpu(struct lsd_client_data *buf,
+ struct lsd_client_data *lcd)
+{
+ memcpy(lcd->lcd_uuid, buf->lcd_uuid, sizeof (lcd->lcd_uuid));
+ lcd->lcd_last_transno = le64_to_cpu(buf->lcd_last_transno);
+ lcd->lcd_last_xid = le64_to_cpu(buf->lcd_last_xid);
+ lcd->lcd_last_result = le32_to_cpu(buf->lcd_last_result);
+ lcd->lcd_last_data = le32_to_cpu(buf->lcd_last_data);
+ lcd->lcd_last_close_transno = le64_to_cpu(buf->lcd_last_close_transno);
+ lcd->lcd_last_close_xid = le64_to_cpu(buf->lcd_last_close_xid);
+ lcd->lcd_last_close_result = le32_to_cpu(buf->lcd_last_close_result);
+ lcd->lcd_last_close_data = le32_to_cpu(buf->lcd_last_close_data);
+ lcd->lcd_pre_versions[0] = le64_to_cpu(buf->lcd_pre_versions[0]);
+ lcd->lcd_pre_versions[1] = le64_to_cpu(buf->lcd_pre_versions[1]);
+ lcd->lcd_pre_versions[2] = le64_to_cpu(buf->lcd_pre_versions[2]);
+ lcd->lcd_pre_versions[3] = le64_to_cpu(buf->lcd_pre_versions[3]);
+ lcd->lcd_last_epoch = le32_to_cpu(buf->lcd_last_epoch);
+ lcd->lcd_first_epoch = le32_to_cpu(buf->lcd_first_epoch);
+}
+
+static inline void lcd_cpu_to_le(struct lsd_client_data *lcd,
+ struct lsd_client_data *buf)
+{
+ memcpy(buf->lcd_uuid, lcd->lcd_uuid, sizeof (lcd->lcd_uuid));
+ buf->lcd_last_transno = cpu_to_le64(lcd->lcd_last_transno);
+ buf->lcd_last_xid = cpu_to_le64(lcd->lcd_last_xid);
+ buf->lcd_last_result = cpu_to_le32(lcd->lcd_last_result);
+ buf->lcd_last_data = cpu_to_le32(lcd->lcd_last_data);
+ buf->lcd_last_close_transno = cpu_to_le64(lcd->lcd_last_close_transno);
+ buf->lcd_last_close_xid = cpu_to_le64(lcd->lcd_last_close_xid);
+ buf->lcd_last_close_result = cpu_to_le32(lcd->lcd_last_close_result);
+ buf->lcd_last_close_data = cpu_to_le32(lcd->lcd_last_close_data);
+ buf->lcd_pre_versions[0] = cpu_to_le64(lcd->lcd_pre_versions[0]);
+ buf->lcd_pre_versions[1] = cpu_to_le64(lcd->lcd_pre_versions[1]);
+ buf->lcd_pre_versions[2] = cpu_to_le64(lcd->lcd_pre_versions[2]);
+ buf->lcd_pre_versions[3] = cpu_to_le64(lcd->lcd_pre_versions[3]);
+ buf->lcd_last_epoch = cpu_to_le32(lcd->lcd_last_epoch);
+ buf->lcd_first_epoch = cpu_to_le32(lcd->lcd_first_epoch);
+}
+
+static inline __u64 lcd_last_transno(struct lsd_client_data *lcd)
+{
+ return (lcd->lcd_last_transno > lcd->lcd_last_close_transno ?
+ lcd->lcd_last_transno : lcd->lcd_last_close_transno);
+}
+
+static inline __u64 lcd_last_xid(struct lsd_client_data *lcd)
+{
+ return (lcd->lcd_last_xid > lcd->lcd_last_close_xid ?
+ lcd->lcd_last_xid : lcd->lcd_last_close_xid);
+}
+
+/****************** superblock additional info *********************/
+
+struct ll_sb_info;
+
+struct lustre_sb_info {
+ int lsi_flags;
+ struct obd_device *lsi_mgc; /* mgc obd */
+ struct lustre_mount_data *lsi_lmd; /* mount command info */
+ struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */
+ struct dt_device *lsi_dt_dev; /* dt device to access disk fs*/
+ struct vfsmount *lsi_srv_mnt; /* the one server mount */
+ atomic_t lsi_mounts; /* references to the srv_mnt */
+ char lsi_svname[MTI_NAME_MAXLEN];
+ char lsi_osd_obdname[64];
+ char lsi_osd_uuid[64];
+ struct obd_export *lsi_osd_exp;
+ char lsi_osd_type[16];
+ char lsi_fstype[16];
+ struct backing_dev_info lsi_bdi; /* each client mountpoint needs
+ own backing_dev_info */
+};
+
+#define LSI_UMOUNT_FAILOVER 0x00200000
+#define LSI_BDI_INITIALIZED 0x00400000
+
+#define s2lsi(sb) ((struct lustre_sb_info *)((sb)->s_fs_info))
+#define s2lsi_nocast(sb) ((sb)->s_fs_info)
+
+#define get_profile_name(sb) (s2lsi(sb)->lsi_lmd->lmd_profile)
+#define get_mount_flags(sb) (s2lsi(sb)->lsi_lmd->lmd_flags)
+#define get_mntdev_name(sb) (s2lsi(sb)->lsi_lmd->lmd_dev)
+
+
+/****************** mount lookup info *********************/
+
+struct lustre_mount_info {
+ char *lmi_name;
+ struct super_block *lmi_sb;
+ struct vfsmount *lmi_mnt;
+ struct list_head lmi_list_chain;
+};
+
+/****************** prototypes *********************/
+
+/* obd_mount.c */
+int server_name2fsname(const char *svname, char *fsname, const char **endptr);
+int server_name2index(const char *svname, __u32 *idx, const char **endptr);
+int server_name2svname(const char *label, char *svname, const char **endptr,
+ size_t svsize);
+
+int lustre_put_lsi(struct super_block *sb);
+int lustre_start_simple(char *obdname, char *type, char *uuid,
+ char *s1, char *s2, char *s3, char *s4);
+int lustre_start_mgc(struct super_block *sb);
+void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
+ struct vfsmount *mnt));
+void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb));
+int lustre_common_put_super(struct super_block *sb);
+
+
+int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type);
+
+/** @} disk */
+
+#endif // _LUSTRE_DISK_H
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
new file mode 100644
index 0000000..7020d9c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -0,0 +1,1481 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+/** \defgroup LDLM Lustre Distributed Lock Manager
+ *
+ * Lustre DLM is based on VAX DLM.
+ * Its two main roles are:
+ * - To provide locking assuring consistency of data on all Lustre nodes.
+ * - To allow clients to cache state protected by a lock by holding the
+ * lock until a conflicting lock is requested or it is expired by the LRU.
+ *
+ * @{
+ */
+
+#ifndef _LUSTRE_DLM_H__
+#define _LUSTRE_DLM_H__
+
+#include <linux/lustre_dlm.h>
+
+#include <lustre_lib.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_handles.h>
+#include <interval_tree.h> /* for interval_node{}, ldlm_extent */
+#include <lu_ref.h>
+
+#include "lustre_dlm_flags.h"
+
+struct obd_ops;
+struct obd_device;
+
+#define OBD_LDLM_DEVICENAME "ldlm"
+
+#define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
+#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
+#define LDLM_CTIME_AGE_LIMIT (10)
+#define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
+
+/**
+ * LDLM non-error return states
+ */
+typedef enum {
+ ELDLM_OK = 0,
+
+ ELDLM_LOCK_CHANGED = 300,
+ ELDLM_LOCK_ABORTED = 301,
+ ELDLM_LOCK_REPLACED = 302,
+ ELDLM_NO_LOCK_DATA = 303,
+ ELDLM_LOCK_WOULDBLOCK = 304,
+
+ ELDLM_NAMESPACE_EXISTS = 400,
+ ELDLM_BAD_NAMESPACE = 401
+} ldlm_error_t;
+
+/**
+ * LDLM namespace type.
+ * The "client" type is actually an indication that this is a narrow local view
+ * into complete namespace on the server. Such namespaces cannot make any
+ * decisions about lack of conflicts or do any autonomous lock granting without
+ * first speaking to a server.
+ */
+typedef enum {
+ LDLM_NAMESPACE_SERVER = 1 << 0,
+ LDLM_NAMESPACE_CLIENT = 1 << 1
+} ldlm_side_t;
+
+/**
+ * The blocking callback is overloaded to perform two functions. These flags
+ * indicate which operation should be performed.
+ */
+#define LDLM_CB_BLOCKING 1
+#define LDLM_CB_CANCELING 2
+
+/**
+ * \name Lock Compatibility Matrix.
+ *
+ * A lock has both a type (extent, flock, inode bits, or plain) and a mode.
+ * Lock types are described in their respective implementation files:
+ * ldlm_{extent,flock,inodebits,plain}.c.
+ *
+ * There are six lock modes along with a compatibility matrix to indicate if
+ * two locks are compatible.
+ *
+ * - EX: Exclusive mode. Before a new file is created, MDS requests EX lock
+ * on the parent.
+ * - PW: Protective Write (normal write) mode. When a client requests a write
+ * lock from an OST, a lock with PW mode will be issued.
+ * - PR: Protective Read (normal read) mode. When a client requests a read from
+ * an OST, a lock with PR mode will be issued. Also, if the client opens a
+ * file for execution, it is granted a lock with PR mode.
+ * - CW: Concurrent Write mode. The type of lock that the MDS grants if a client
+ * requests a write lock during a file open operation.
+ * - CR Concurrent Read mode. When a client performs a path lookup, MDS grants
+ * an inodebit lock with the CR mode on the intermediate path component.
+ * - NL Null mode.
+ *
+ * <PRE>
+ * NL CR CW PR PW EX
+ * NL 1 1 1 1 1 1
+ * CR 1 1 1 1 1 0
+ * CW 1 1 1 0 0 0
+ * PR 1 1 0 1 0 0
+ * PW 1 1 0 0 0 0
+ * EX 1 0 0 0 0 0
+ * </PRE>
+ */
+/** @{ */
+#define LCK_COMPAT_EX LCK_NL
+#define LCK_COMPAT_PW (LCK_COMPAT_EX | LCK_CR)
+#define LCK_COMPAT_PR (LCK_COMPAT_PW | LCK_PR)
+#define LCK_COMPAT_CW (LCK_COMPAT_PW | LCK_CW)
+#define LCK_COMPAT_CR (LCK_COMPAT_CW | LCK_PR | LCK_PW)
+#define LCK_COMPAT_NL (LCK_COMPAT_CR | LCK_EX | LCK_GROUP)
+#define LCK_COMPAT_GROUP (LCK_GROUP | LCK_NL)
+#define LCK_COMPAT_COS (LCK_COS)
+/** @} Lock Compatibility Matrix */
+
+extern ldlm_mode_t lck_compat_array[];
+
+static inline void lockmode_verify(ldlm_mode_t mode)
+{
+ LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
+}
+
+static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
+{
+ return (lck_compat_array[exist_mode] & new_mode);
+}
+
+/*
+ *
+ * cluster name spaces
+ *
+ */
+
+#define DLM_OST_NAMESPACE 1
+#define DLM_MDS_NAMESPACE 2
+
+/* XXX
+ - do we just separate this by security domains and use a prefix for
+ multiple namespaces in the same domain?
+ -
+*/
+
+/**
+ * Locking rules for LDLM:
+ *
+ * lr_lock
+ *
+ * lr_lock
+ * waiting_locks_spinlock
+ *
+ * lr_lock
+ * led_lock
+ *
+ * lr_lock
+ * ns_lock
+ *
+ * lr_lvb_mutex
+ * lr_lock
+ *
+ */
+
+struct ldlm_pool;
+struct ldlm_lock;
+struct ldlm_resource;
+struct ldlm_namespace;
+
+/**
+ * Operations on LDLM pools.
+ * LDLM pool is a pool of locks in the namespace without any implicitly
+ * specified limits.
+ * Locks in the pool are organized in LRU.
+ * Local memory pressure or server instructions (e.g. mempressure on server)
+ * can trigger freeing of locks from the pool
+ */
+struct ldlm_pool_ops {
+ /** Recalculate pool \a pl usage */
+ int (*po_recalc)(struct ldlm_pool *pl);
+ /** Cancel at least \a nr locks from pool \a pl */
+ int (*po_shrink)(struct ldlm_pool *pl, int nr,
+ unsigned int gfp_mask);
+ int (*po_setup)(struct ldlm_pool *pl, int limit);
+};
+
+/** One second for pools thread check interval. Each pool has own period. */
+#define LDLM_POOLS_THREAD_PERIOD (1)
+
+/** ~6% margin for modest pools. See ldlm_pool.c for details. */
+#define LDLM_POOLS_MODEST_MARGIN_SHIFT (4)
+
+/** Default recalc period for server side pools in sec. */
+#define LDLM_POOL_SRV_DEF_RECALC_PERIOD (1)
+
+/** Default recalc period for client side pools in sec. */
+#define LDLM_POOL_CLI_DEF_RECALC_PERIOD (10)
+
+/**
+ * LDLM pool structure to track granted locks.
+ * For purposes of determining when to release locks on e.g. memory pressure.
+ * This feature is commonly referred to as lru_resize.
+ */
+struct ldlm_pool {
+ /** Pool proc directory. */
+ struct proc_dir_entry *pl_proc_dir;
+ /** Pool name, must be long enough to hold compound proc entry name. */
+ char pl_name[100];
+ /** Lock for protecting SLV/CLV updates. */
+ spinlock_t pl_lock;
+ /** Number of allowed locks in in pool, both, client and server side. */
+ atomic_t pl_limit;
+ /** Number of granted locks in */
+ atomic_t pl_granted;
+ /** Grant rate per T. */
+ atomic_t pl_grant_rate;
+ /** Cancel rate per T. */
+ atomic_t pl_cancel_rate;
+ /** Server lock volume (SLV). Protected by pl_lock. */
+ __u64 pl_server_lock_volume;
+ /** Current biggest client lock volume. Protected by pl_lock. */
+ __u64 pl_client_lock_volume;
+ /** Lock volume factor. SLV on client is calculated as following:
+ * server_slv * lock_volume_factor. */
+ atomic_t pl_lock_volume_factor;
+ /** Time when last SLV from server was obtained. */
+ time_t pl_recalc_time;
+ /** Recalculation period for pool. */
+ time_t pl_recalc_period;
+ /** Recalculation and shrink operations. */
+ struct ldlm_pool_ops *pl_ops;
+ /** Number of planned locks for next period. */
+ int pl_grant_plan;
+ /** Pool statistics. */
+ struct lprocfs_stats *pl_stats;
+};
+
+typedef int (*ldlm_res_policy)(struct ldlm_namespace *, struct ldlm_lock **,
+ void *req_cookie, ldlm_mode_t mode, __u64 flags,
+ void *data);
+
+typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
+
+/**
+ * LVB operations.
+ * LVB is Lock Value Block. This is a special opaque (to LDLM) value that could
+ * be associated with an LDLM lock and transferred from client to server and
+ * back.
+ *
+ * Currently LVBs are used by:
+ * - OSC-OST code to maintain current object size/times
+ * - layout lock code to return the layout when the layout lock is granted
+ */
+struct ldlm_valblock_ops {
+ int (*lvbo_init)(struct ldlm_resource *res);
+ int (*lvbo_update)(struct ldlm_resource *res,
+ struct ptlrpc_request *r,
+ int increase);
+ int (*lvbo_free)(struct ldlm_resource *res);
+ /* Return size of lvb data appropriate RPC size can be reserved */
+ int (*lvbo_size)(struct ldlm_lock *lock);
+ /* Called to fill in lvb data to RPC buffer @buf */
+ int (*lvbo_fill)(struct ldlm_lock *lock, void *buf, int buflen);
+};
+
+/**
+ * LDLM pools related, type of lock pool in the namespace.
+ * Greedy means release cached locks aggressively
+ */
+typedef enum {
+ LDLM_NAMESPACE_GREEDY = 1 << 0,
+ LDLM_NAMESPACE_MODEST = 1 << 1
+} ldlm_appetite_t;
+
+/**
+ * Default values for the "max_nolock_size", "contention_time" and
+ * "contended_locks" namespace tunables.
+ */
+#define NS_DEFAULT_MAX_NOLOCK_BYTES 0
+#define NS_DEFAULT_CONTENTION_SECONDS 2
+#define NS_DEFAULT_CONTENDED_LOCKS 32
+
+struct ldlm_ns_bucket {
+ /** back pointer to namespace */
+ struct ldlm_namespace *nsb_namespace;
+ /**
+ * Estimated lock callback time. Used by adaptive timeout code to
+ * avoid spurious client evictions due to unresponsiveness when in
+ * fact the network or overall system load is at fault
+ */
+ struct adaptive_timeout nsb_at_estimate;
+};
+
+enum {
+ /** LDLM namespace lock stats */
+ LDLM_NSS_LOCKS = 0,
+ LDLM_NSS_LAST
+};
+
+typedef enum {
+ /** invalide type */
+ LDLM_NS_TYPE_UNKNOWN = 0,
+ /** mdc namespace */
+ LDLM_NS_TYPE_MDC,
+ /** mds namespace */
+ LDLM_NS_TYPE_MDT,
+ /** osc namespace */
+ LDLM_NS_TYPE_OSC,
+ /** ost namespace */
+ LDLM_NS_TYPE_OST,
+ /** mgc namespace */
+ LDLM_NS_TYPE_MGC,
+ /** mgs namespace */
+ LDLM_NS_TYPE_MGT,
+} ldlm_ns_type_t;
+
+/**
+ * LDLM Namespace.
+ *
+ * Namespace serves to contain locks related to a particular service.
+ * There are two kinds of namespaces:
+ * - Server namespace has knowledge of all locks and is therefore authoritative
+ * to make decisions like what locks could be granted and what conflicts
+ * exist during new lock enqueue.
+ * - Client namespace only has limited knowledge about locks in the namespace,
+ * only seeing locks held by the client.
+ *
+ * Every Lustre service has one server namespace present on the server serving
+ * that service. Every client connected to the service has a client namespace
+ * for it.
+ * Every lock obtained by client in that namespace is actually represented by
+ * two in-memory locks. One on the server and one on the client. The locks are
+ * linked by a special cookie by which one node can tell to the other which lock
+ * it actually means during communications. Such locks are called remote locks.
+ * The locks held by server only without any reference to a client are called
+ * local locks.
+ */
+struct ldlm_namespace {
+ /** Backward link to OBD, required for LDLM pool to store new SLV. */
+ struct obd_device *ns_obd;
+
+ /** Flag indicating if namespace is on client instead of server */
+ ldlm_side_t ns_client;
+
+ /** Resource hash table for namespace. */
+ cfs_hash_t *ns_rs_hash;
+
+ /** serialize */
+ spinlock_t ns_lock;
+
+ /** big refcount (by bucket) */
+ atomic_t ns_bref;
+
+ /**
+ * Namespace connect flags supported by server (may be changed via
+ * /proc, LRU resize may be disabled/enabled).
+ */
+ __u64 ns_connect_flags;
+
+ /** Client side original connect flags supported by server. */
+ __u64 ns_orig_connect_flags;
+
+ /* namespace proc dir entry */
+ struct proc_dir_entry *ns_proc_dir_entry;
+
+ /**
+ * Position in global namespace list linking all namespaces on
+ * the node.
+ */
+ struct list_head ns_list_chain;
+
+ /**
+ * List of unused locks for this namespace. This list is also called
+ * LRU lock list.
+ * Unused locks are locks with zero reader/writer reference counts.
+ * This list is only used on clients for lock caching purposes.
+ * When we want to release some locks voluntarily or if server wants
+ * us to release some locks due to e.g. memory pressure, we take locks
+ * to release from the head of this list.
+ * Locks are linked via l_lru field in \see struct ldlm_lock.
+ */
+ struct list_head ns_unused_list;
+ /** Number of locks in the LRU list above */
+ int ns_nr_unused;
+
+ /**
+ * Maximum number of locks permitted in the LRU. If 0, means locks
+ * are managed by pools and there is no preset limit, rather it is all
+ * controlled by available memory on this client and on server.
+ */
+ unsigned int ns_max_unused;
+ /** Maximum allowed age (last used time) for locks in the LRU */
+ unsigned int ns_max_age;
+ /**
+ * Server only: number of times we evicted clients due to lack of reply
+ * to ASTs.
+ */
+ unsigned int ns_timeouts;
+ /**
+ * Number of seconds since the file change time after which the
+ * MDT will return an UPDATE lock along with a LOOKUP lock.
+ * This allows the client to start caching negative dentries
+ * for a directory and may save an RPC for a later stat.
+ */
+ unsigned int ns_ctime_age_limit;
+
+ /**
+ * Used to rate-limit ldlm_namespace_dump calls.
+ * \see ldlm_namespace_dump. Increased by 10 seconds every time
+ * it is called.
+ */
+ cfs_time_t ns_next_dump;
+
+ /** "policy" function that does actual lock conflict determination */
+ ldlm_res_policy ns_policy;
+
+ /**
+ * LVB operations for this namespace.
+ * \see struct ldlm_valblock_ops
+ */
+ struct ldlm_valblock_ops *ns_lvbo;
+
+ /**
+ * Used by filter code to store pointer to OBD of the service.
+ * Should be dropped in favor of \a ns_obd
+ */
+ void *ns_lvbp;
+
+ /**
+ * Wait queue used by __ldlm_namespace_free. Gets woken up every time
+ * a resource is removed.
+ */
+ wait_queue_head_t ns_waitq;
+ /** LDLM pool structure for this namespace */
+ struct ldlm_pool ns_pool;
+ /** Definition of how eagerly unused locks will be released from LRU */
+ ldlm_appetite_t ns_appetite;
+
+ /**
+ * If more than \a ns_contended_locks are found, the resource is
+ * considered to be contended. Lock enqueues might specify that no
+ * contended locks should be granted
+ */
+ unsigned ns_contended_locks;
+
+ /**
+ * The resources in this namespace remember contended state during
+ * \a ns_contention_time, in seconds.
+ */
+ unsigned ns_contention_time;
+
+ /**
+ * Limit size of contended extent locks, in bytes.
+ * If extended lock is requested for more then this many bytes and
+ * caller instructs us not to grant contended locks, we would disregard
+ * such a request.
+ */
+ unsigned ns_max_nolock_size;
+
+ /** Limit of parallel AST RPC count. */
+ unsigned ns_max_parallel_ast;
+
+ /** Callback to cancel locks before replaying it during recovery. */
+ ldlm_cancel_for_recovery ns_cancel_for_recovery;
+
+ /** LDLM lock stats */
+ struct lprocfs_stats *ns_stats;
+
+ /**
+ * Flag to indicate namespace is being freed. Used to determine if
+ * recalculation of LDLM pool statistics should be skipped.
+ */
+ unsigned ns_stopping:1;
+};
+
+/**
+ * Returns 1 if namespace \a ns is a client namespace.
+ */
+static inline int ns_is_client(struct ldlm_namespace *ns)
+{
+ LASSERT(ns != NULL);
+ LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
+ LDLM_NAMESPACE_SERVER)));
+ LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
+ ns->ns_client == LDLM_NAMESPACE_SERVER);
+ return ns->ns_client == LDLM_NAMESPACE_CLIENT;
+}
+
+/**
+ * Returns 1 if namespace \a ns is a server namespace.
+ */
+static inline int ns_is_server(struct ldlm_namespace *ns)
+{
+ LASSERT(ns != NULL);
+ LASSERT(!(ns->ns_client & ~(LDLM_NAMESPACE_CLIENT |
+ LDLM_NAMESPACE_SERVER)));
+ LASSERT(ns->ns_client == LDLM_NAMESPACE_CLIENT ||
+ ns->ns_client == LDLM_NAMESPACE_SERVER);
+ return ns->ns_client == LDLM_NAMESPACE_SERVER;
+}
+
+/**
+ * Returns 1 if namespace \a ns supports early lock cancel (ELC).
+ */
+static inline int ns_connect_cancelset(struct ldlm_namespace *ns)
+{
+ LASSERT(ns != NULL);
+ return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET);
+}
+
+/**
+ * Returns 1 if this namespace supports lru_resize.
+ */
+static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
+{
+ LASSERT(ns != NULL);
+ return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
+}
+
+static inline void ns_register_cancel(struct ldlm_namespace *ns,
+ ldlm_cancel_for_recovery arg)
+{
+ LASSERT(ns != NULL);
+ ns->ns_cancel_for_recovery = arg;
+}
+
+struct ldlm_lock;
+
+/** Type for blocking callback function of a lock. */
+typedef int (*ldlm_blocking_callback)(struct ldlm_lock *lock,
+ struct ldlm_lock_desc *new, void *data,
+ int flag);
+/** Type for completion callback function of a lock. */
+typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
+ void *data);
+/** Type for glimpse callback function of a lock. */
+typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
+
+/** Work list for sending GL ASTs to multiple locks. */
+struct ldlm_glimpse_work {
+ struct ldlm_lock *gl_lock; /* lock to glimpse */
+ struct list_head gl_list; /* linkage to other gl work structs */
+ __u32 gl_flags;/* see LDLM_GL_WORK_* below */
+ union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in
+ * glimpse callback request */
+};
+
+/** The ldlm_glimpse_work is allocated on the stack and should not be freed. */
+#define LDLM_GL_WORK_NOFREE 0x1
+
+/** Interval node data for each LDLM_EXTENT lock. */
+struct ldlm_interval {
+ struct interval_node li_node; /* node for tree management */
+ struct list_head li_group; /* the locks which have the same
+ * policy - group of the policy */
+};
+#define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
+
+/**
+ * Interval tree for extent locks.
+ * The interval tree must be accessed under the resource lock.
+ * Interval trees are used for granted extent locks to speed up conflicts
+ * lookup. See ldlm/interval_tree.c for more details.
+ */
+struct ldlm_interval_tree {
+ /** Tree size. */
+ int lit_size;
+ ldlm_mode_t lit_mode; /* lock mode */
+ struct interval_node *lit_root; /* actual ldlm_interval */
+};
+
+/** Whether to track references to exports by LDLM locks. */
+#define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
+
+/** Cancel flags. */
+typedef enum {
+ LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
+ LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
+ LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
+ * in the same RPC */
+} ldlm_cancel_flags_t;
+
+struct ldlm_flock {
+ __u64 start;
+ __u64 end;
+ __u64 owner;
+ __u64 blocking_owner;
+ struct obd_export *blocking_export;
+ /* Protected by the hash lock */
+ __u32 blocking_refs;
+ __u32 pid;
+};
+
+typedef union {
+ struct ldlm_extent l_extent;
+ struct ldlm_flock l_flock;
+ struct ldlm_inodebits l_inodebits;
+} ldlm_policy_data_t;
+
+void ldlm_convert_policy_to_wire(ldlm_type_t type,
+ const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy);
+void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
+ const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy);
+
+enum lvb_type {
+ LVB_T_NONE = 0,
+ LVB_T_OST = 1,
+ LVB_T_LQUOTA = 2,
+ LVB_T_LAYOUT = 3,
+};
+
+/**
+ * LDLM lock structure
+ *
+ * Represents a single LDLM lock and its state in memory. Each lock is
+ * associated with a single ldlm_resource, the object which is being
+ * locked. There may be multiple ldlm_locks on a single resource,
+ * depending on the lock type and whether the locks are conflicting or
+ * not.
+ */
+struct ldlm_lock {
+ /**
+ * Local lock handle.
+ * When remote side wants to tell us about a lock, they address
+ * it by this opaque handle. The handle does not hold a
+ * reference on the ldlm_lock, so it can be safely passed to
+ * other threads or nodes. When the lock needs to be accessed
+ * from the handle, it is looked up again in the lock table, and
+ * may no longer exist.
+ *
+ * Must be first in the structure.
+ */
+ struct portals_handle l_handle;
+ /**
+ * Lock reference count.
+ * This is how many users have pointers to actual structure, so that
+ * we do not accidentally free lock structure that is in use.
+ */
+ atomic_t l_refc;
+ /**
+ * Internal spinlock protects l_resource. We should hold this lock
+ * first before taking res_lock.
+ */
+ spinlock_t l_lock;
+ /**
+ * Pointer to actual resource this lock is in.
+ * ldlm_lock_change_resource() can change this.
+ */
+ struct ldlm_resource *l_resource;
+ /**
+ * List item for client side LRU list.
+ * Protected by ns_lock in struct ldlm_namespace.
+ */
+ struct list_head l_lru;
+ /**
+ * Linkage to resource's lock queues according to current lock state.
+ * (could be granted, waiting or converting)
+ * Protected by lr_lock in struct ldlm_resource.
+ */
+ struct list_head l_res_link;
+ /**
+ * Tree node for ldlm_extent.
+ */
+ struct ldlm_interval *l_tree_node;
+ /**
+ * Per export hash of locks.
+ * Protected by per-bucket exp->exp_lock_hash locks.
+ */
+ struct hlist_node l_exp_hash;
+ /**
+ * Per export hash of flock locks.
+ * Protected by per-bucket exp->exp_flock_hash locks.
+ */
+ struct hlist_node l_exp_flock_hash;
+ /**
+ * Requested mode.
+ * Protected by lr_lock.
+ */
+ ldlm_mode_t l_req_mode;
+ /**
+ * Granted mode, also protected by lr_lock.
+ */
+ ldlm_mode_t l_granted_mode;
+ /** Lock completion handler pointer. Called when lock is granted. */
+ ldlm_completion_callback l_completion_ast;
+ /**
+ * Lock blocking AST handler pointer.
+ * It plays two roles:
+ * - as a notification of an attempt to queue a conflicting lock (once)
+ * - as a notification when the lock is being cancelled.
+ *
+ * As such it's typically called twice: once for the initial conflict
+ * and then once more when the last user went away and the lock is
+ * cancelled (could happen recursively).
+ */
+ ldlm_blocking_callback l_blocking_ast;
+ /**
+ * Lock glimpse handler.
+ * Glimpse handler is used to obtain LVB updates from a client by
+ * server
+ */
+ ldlm_glimpse_callback l_glimpse_ast;
+
+ /**
+ * Lock export.
+ * This is a pointer to actual client export for locks that were granted
+ * to clients. Used server-side.
+ */
+ struct obd_export *l_export;
+ /**
+ * Lock connection export.
+ * Pointer to server export on a client.
+ */
+ struct obd_export *l_conn_export;
+
+ /**
+ * Remote lock handle.
+ * If the lock is remote, this is the handle of the other side lock
+ * (l_handle)
+ */
+ struct lustre_handle l_remote_handle;
+
+ /**
+ * Representation of private data specific for a lock type.
+ * Examples are: extent range for extent lock or bitmask for ibits locks
+ */
+ ldlm_policy_data_t l_policy_data;
+
+ /**
+ * Lock state flags. Protected by lr_lock.
+ * \see lustre_dlm_flags.h where the bits are defined.
+ */
+ __u64 l_flags;
+
+ /**
+ * Lock r/w usage counters.
+ * Protected by lr_lock.
+ */
+ __u32 l_readers;
+ __u32 l_writers;
+ /**
+ * If the lock is granted, a process sleeps on this waitq to learn when
+ * it's no longer in use. If the lock is not granted, a process sleeps
+ * on this waitq to learn when it becomes granted.
+ */
+ wait_queue_head_t l_waitq;
+
+ /**
+ * Seconds. It will be updated if there is any activity related to
+ * the lock, e.g. enqueue the lock or send blocking AST.
+ */
+ cfs_time_t l_last_activity;
+
+ /**
+ * Time last used by e.g. being matched by lock match.
+ * Jiffies. Should be converted to time if needed.
+ */
+ cfs_time_t l_last_used;
+
+ /** Originally requested extent for the extent lock. */
+ struct ldlm_extent l_req_extent;
+
+ /*
+ * Client-side-only members.
+ */
+
+ enum lvb_type l_lvb_type;
+
+ /**
+ * Temporary storage for a LVB received during an enqueue operation.
+ */
+ __u32 l_lvb_len;
+ void *l_lvb_data;
+
+ /** Private storage for lock user. Opaque to LDLM. */
+ void *l_ast_data;
+
+ /*
+ * Server-side-only members.
+ */
+
+ /**
+ * Connection cookie for the client originating the operation.
+ * Used by Commit on Share (COS) code. Currently only used for
+ * inodebits locks on MDS.
+ */
+ __u64 l_client_cookie;
+
+ /**
+ * List item for locks waiting for cancellation from clients.
+ * The lists this could be linked into are:
+ * waiting_locks_list (protected by waiting_locks_spinlock),
+ * then if the lock timed out, it is moved to
+ * expired_lock_thread.elt_expired_locks for further processing.
+ * Protected by elt_lock.
+ */
+ struct list_head l_pending_chain;
+
+ /**
+ * Set when lock is sent a blocking AST. Time in seconds when timeout
+ * is reached and client holding this lock could be evicted.
+ * This timeout could be further extended by e.g. certain IO activity
+ * under this lock.
+ * \see ost_rw_prolong_locks
+ */
+ cfs_time_t l_callback_timeout;
+
+ /** Local PID of process which created this lock. */
+ __u32 l_pid;
+
+ /**
+ * Number of times blocking AST was sent for this lock.
+ * This is for debugging. Valid values are 0 and 1, if there is an
+ * attempt to send blocking AST more than once, an assertion would be
+ * hit. \see ldlm_work_bl_ast_lock
+ */
+ int l_bl_ast_run;
+ /** List item ldlm_add_ast_work_item() for case of blocking ASTs. */
+ struct list_head l_bl_ast;
+ /** List item ldlm_add_ast_work_item() for case of completion ASTs. */
+ struct list_head l_cp_ast;
+ /** For ldlm_add_ast_work_item() for "revoke" AST used in COS. */
+ struct list_head l_rk_ast;
+
+ /**
+ * Pointer to a conflicting lock that caused blocking AST to be sent
+ * for this lock
+ */
+ struct ldlm_lock *l_blocking_lock;
+
+ /**
+ * Protected by lr_lock, linkages to "skip lists".
+ * For more explanations of skip lists see ldlm/ldlm_inodebits.c
+ */
+ struct list_head l_sl_mode;
+ struct list_head l_sl_policy;
+
+ /** Reference tracking structure to debug leaked locks. */
+ struct lu_ref l_reference;
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+ /* Debugging stuff for bug 20498, for tracking export references. */
+ /** number of export references taken */
+ int l_exp_refs_nr;
+ /** link all locks referencing one export */
+ struct list_head l_exp_refs_link;
+ /** referenced export object */
+ struct obd_export *l_exp_refs_target;
+#endif
+ /**
+ * export blocking dlm lock list, protected by
+ * l_export->exp_bl_list_lock.
+ * Lock order of waiting_lists_spinlock, exp_bl_list_lock and res lock
+ * is: res lock -> exp_bl_list_lock -> wanting_lists_spinlock.
+ */
+ struct list_head l_exp_list;
+};
+
+/**
+ * LDLM resource description.
+ * Basically, resource is a representation for a single object.
+ * Object has a name which is currently 4 64-bit integers. LDLM user is
+ * responsible for creation of a mapping between objects it wants to be
+ * protected and resource names.
+ *
+ * A resource can only hold locks of a single lock type, though there may be
+ * multiple ldlm_locks on a single resource, depending on the lock type and
+ * whether the locks are conflicting or not.
+ */
+struct ldlm_resource {
+ struct ldlm_ns_bucket *lr_ns_bucket;
+
+ /**
+ * List item for list in namespace hash.
+ * protected by ns_lock
+ */
+ struct hlist_node lr_hash;
+
+ /** Spinlock to protect locks under this resource. */
+ spinlock_t lr_lock;
+
+ /**
+ * protected by lr_lock
+ * @{ */
+ /** List of locks in granted state */
+ struct list_head lr_granted;
+ /** List of locks waiting to change their granted mode (converted) */
+ struct list_head lr_converting;
+ /**
+ * List of locks that could not be granted due to conflicts and
+ * that are waiting for conflicts to go away */
+ struct list_head lr_waiting;
+ /** @} */
+
+ /* XXX No longer needed? Remove ASAP */
+ ldlm_mode_t lr_most_restr;
+
+ /** Type of locks this resource can hold. Only one type per resource. */
+ ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
+
+ /** Resource name */
+ struct ldlm_res_id lr_name;
+ /** Reference count for this resource */
+ atomic_t lr_refcount;
+
+ /**
+ * Interval trees (only for extent locks) for all modes of this resource
+ */
+ struct ldlm_interval_tree lr_itree[LCK_MODE_NUM];
+
+ /**
+ * Server-side-only lock value block elements.
+ * To serialize lvbo_init.
+ */
+ struct mutex lr_lvb_mutex;
+ int lr_lvb_len;
+ /** protected by lr_lock */
+ void *lr_lvb_data;
+
+ /** When the resource was considered as contended. */
+ cfs_time_t lr_contention_time;
+ /** List of references to this resource. For debugging. */
+ struct lu_ref lr_reference;
+
+ struct inode *lr_lvb_inode;
+};
+
+static inline bool ldlm_has_layout(struct ldlm_lock *lock)
+{
+ return lock->l_resource->lr_type == LDLM_IBITS &&
+ lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_LAYOUT;
+}
+
+static inline char *
+ldlm_ns_name(struct ldlm_namespace *ns)
+{
+ return ns->ns_rs_hash->hs_name;
+}
+
+static inline struct ldlm_namespace *
+ldlm_res_to_ns(struct ldlm_resource *res)
+{
+ return res->lr_ns_bucket->nsb_namespace;
+}
+
+static inline struct ldlm_namespace *
+ldlm_lock_to_ns(struct ldlm_lock *lock)
+{
+ return ldlm_res_to_ns(lock->l_resource);
+}
+
+static inline char *
+ldlm_lock_to_ns_name(struct ldlm_lock *lock)
+{
+ return ldlm_ns_name(ldlm_lock_to_ns(lock));
+}
+
+static inline struct adaptive_timeout *
+ldlm_lock_to_ns_at(struct ldlm_lock *lock)
+{
+ return &lock->l_resource->lr_ns_bucket->nsb_at_estimate;
+}
+
+static inline int ldlm_lvbo_init(struct ldlm_resource *res)
+{
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+
+ if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL)
+ return ns->ns_lvbo->lvbo_init(res);
+
+ return 0;
+}
+
+static inline int ldlm_lvbo_size(struct ldlm_lock *lock)
+{
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
+ if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL)
+ return ns->ns_lvbo->lvbo_size(lock);
+
+ return 0;
+}
+
+static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len)
+{
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
+ if (ns->ns_lvbo != NULL) {
+ LASSERT(ns->ns_lvbo->lvbo_fill != NULL);
+ return ns->ns_lvbo->lvbo_fill(lock, buf, len);
+ }
+ return 0;
+}
+
+struct ldlm_ast_work {
+ struct ldlm_lock *w_lock;
+ int w_blocking;
+ struct ldlm_lock_desc w_desc;
+ struct list_head w_list;
+ int w_flags;
+ void *w_data;
+ int w_datalen;
+};
+
+/**
+ * Common ldlm_enqueue parameters
+ */
+struct ldlm_enqueue_info {
+ __u32 ei_type; /** Type of the lock being enqueued. */
+ __u32 ei_mode; /** Mode of the lock being enqueued. */
+ void *ei_cb_bl; /** blocking lock callback */
+ void *ei_cb_cp; /** lock completion callback */
+ void *ei_cb_gl; /** lock glimpse callback */
+ void *ei_cbdata; /** Data to be passed into callbacks. */
+};
+
+extern struct obd_ops ldlm_obd_ops;
+
+extern char *ldlm_lockname[];
+extern char *ldlm_typename[];
+extern char *ldlm_it2str(int it);
+
+/**
+ * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
+ * For the cases where we do not have actual lock to print along
+ * with a debugging message that is ldlm-related
+ */
+#define LDLM_DEBUG_NOLOCK(format, a...) \
+ CDEBUG(D_DLMTRACE, "### " format "\n" , ##a)
+
+/**
+ * Support function for lock information printing into debug logs.
+ * \see LDLM_DEBUG
+ */
+#define ldlm_lock_debug(msgdata, mask, cdls, lock, fmt, a...) do { \
+ CFS_CHECK_STACK(msgdata, mask, cdls); \
+ \
+ if (((mask) & D_CANTMASK) != 0 || \
+ ((libcfs_debug & (mask)) != 0 && \
+ (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
+ _ldlm_lock_debug(lock, msgdata, fmt, ##a); \
+} while(0)
+
+void _ldlm_lock_debug(struct ldlm_lock *lock,
+ struct libcfs_debug_msg_data *data,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+/**
+ * Rate-limited version of lock printing function.
+ */
+#define LDLM_DEBUG_LIMIT(mask, lock, fmt, a...) do { \
+ static cfs_debug_limit_state_t _ldlm_cdls; \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, &_ldlm_cdls); \
+ ldlm_lock_debug(&msgdata, mask, &_ldlm_cdls, lock, "### " fmt , ##a);\
+} while (0)
+
+#define LDLM_ERROR(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_ERROR, lock, fmt, ## a)
+#define LDLM_WARN(lock, fmt, a...) LDLM_DEBUG_LIMIT(D_WARNING, lock, fmt, ## a)
+
+/** Non-rate-limited lock printing function for debugging purposes. */
+#define LDLM_DEBUG(lock, fmt, a...) do { \
+ if (likely(lock != NULL)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \
+ ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \
+ "### " fmt , ##a); \
+ } else { \
+ LDLM_DEBUG_NOLOCK("no dlm lock: " fmt, ##a); \
+ } \
+} while (0)
+
+typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
+ int first_enq, ldlm_error_t *err,
+ struct list_head *work_list);
+
+/**
+ * Return values for lock iterators.
+ * Also used during deciding of lock grants and cancellations.
+ */
+#define LDLM_ITER_CONTINUE 1 /* keep iterating */
+#define LDLM_ITER_STOP 2 /* stop iterating */
+
+typedef int (*ldlm_iterator_t)(struct ldlm_lock *, void *);
+typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
+
+/** \defgroup ldlm_iterator Lock iterators
+ *
+ * LDLM provides for a way to iterate through every lock on a resource or
+ * namespace or every resource in a namespace.
+ * @{ */
+int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
+ void *closure);
+void ldlm_namespace_foreach(struct ldlm_namespace *ns, ldlm_iterator_t iter,
+ void *closure);
+int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
+ ldlm_iterator_t iter, void *data);
+/** @} ldlm_iterator */
+
+int ldlm_replay_locks(struct obd_import *imp);
+
+/* ldlm_flock.c */
+int ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
+
+/* ldlm_extent.c */
+__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms);
+
+struct ldlm_callback_suite {
+ ldlm_completion_callback lcs_completion;
+ ldlm_blocking_callback lcs_blocking;
+ ldlm_glimpse_callback lcs_glimpse;
+};
+
+/* ldlm_lockd.c */
+int ldlm_del_waiting_lock(struct ldlm_lock *lock);
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout);
+int ldlm_get_ref(void);
+void ldlm_put_ref(void);
+int ldlm_init_export(struct obd_export *exp);
+void ldlm_destroy_export(struct obd_export *exp);
+struct ldlm_lock *ldlm_request_lock(struct ptlrpc_request *req);
+
+/* ldlm_lock.c */
+void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg);
+void ldlm_lock2handle(const struct ldlm_lock *lock,
+ struct lustre_handle *lockh);
+struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
+void ldlm_cancel_callback(struct ldlm_lock *);
+int ldlm_lock_remove_from_lru(struct ldlm_lock *);
+int ldlm_lock_set_data(struct lustre_handle *, void *);
+
+/**
+ * Obtain a lock reference by its handle.
+ */
+static inline struct ldlm_lock *ldlm_handle2lock(const struct lustre_handle *h)
+{
+ return __ldlm_handle2lock(h, 0);
+}
+
+#define LDLM_LOCK_REF_DEL(lock) \
+ lu_ref_del(&lock->l_reference, "handle", current)
+
+static inline struct ldlm_lock *
+ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
+{
+ struct ldlm_lock *lock;
+
+ lock = __ldlm_handle2lock(h, flags);
+ if (lock != NULL)
+ LDLM_LOCK_REF_DEL(lock);
+ return lock;
+}
+
+/**
+ * Update Lock Value Block Operations (LVBO) on a resource taking into account
+ * data from reqest \a r
+ */
+static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
+ struct ptlrpc_request *r, int increase)
+{
+ if (ldlm_res_to_ns(res)->ns_lvbo &&
+ ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
+ return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
+ increase);
+ }
+ return 0;
+}
+
+int ldlm_error2errno(ldlm_error_t error);
+ldlm_error_t ldlm_errno2error(int err_no); /* don't call it `errno': this
+ * confuses user-space. */
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+void ldlm_dump_export_locks(struct obd_export *exp);
+#endif
+
+/**
+ * Release a temporary lock reference obtained by ldlm_handle2lock() or
+ * __ldlm_handle2lock().
+ */
+#define LDLM_LOCK_PUT(lock) \
+do { \
+ LDLM_LOCK_REF_DEL(lock); \
+ /*LDLM_DEBUG((lock), "put");*/ \
+ ldlm_lock_put(lock); \
+} while (0)
+
+/**
+ * Release a lock reference obtained by some other means (see
+ * LDLM_LOCK_PUT()).
+ */
+#define LDLM_LOCK_RELEASE(lock) \
+do { \
+ /*LDLM_DEBUG((lock), "put");*/ \
+ ldlm_lock_put(lock); \
+} while (0)
+
+#define LDLM_LOCK_GET(lock) \
+({ \
+ ldlm_lock_get(lock); \
+ /*LDLM_DEBUG((lock), "get");*/ \
+ lock; \
+})
+
+#define ldlm_lock_list_put(head, member, count) \
+({ \
+ struct ldlm_lock *_lock, *_next; \
+ int c = count; \
+ list_for_each_entry_safe(_lock, _next, head, member) { \
+ if (c-- == 0) \
+ break; \
+ list_del_init(&_lock->member); \
+ LDLM_LOCK_RELEASE(_lock); \
+ } \
+ LASSERT(c <= 0); \
+})
+
+struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
+void ldlm_lock_put(struct ldlm_lock *lock);
+void ldlm_lock_destroy(struct ldlm_lock *lock);
+void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
+void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
+int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
+void ldlm_lock_fail_match(struct ldlm_lock *lock);
+void ldlm_lock_allow_match(struct ldlm_lock *lock);
+void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
+ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
+ const struct ldlm_res_id *, ldlm_type_t type,
+ ldlm_policy_data_t *, ldlm_mode_t mode,
+ struct lustre_handle *, int unref);
+ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+ __u64 *bits);
+struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
+ __u32 *flags);
+void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode);
+void ldlm_lock_cancel(struct ldlm_lock *lock);
+void ldlm_reprocess_all(struct ldlm_resource *res);
+void ldlm_reprocess_all_ns(struct ldlm_namespace *ns);
+void ldlm_lock_dump_handle(int level, struct lustre_handle *);
+void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
+
+/* resource.c */
+struct ldlm_namespace *
+ldlm_namespace_new(struct obd_device *obd, char *name,
+ ldlm_side_t client, ldlm_appetite_t apt,
+ ldlm_ns_type_t ns_type);
+int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
+void ldlm_namespace_free(struct ldlm_namespace *ns,
+ struct obd_import *imp, int force);
+void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
+void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
+void ldlm_namespace_get(struct ldlm_namespace *ns);
+void ldlm_namespace_put(struct ldlm_namespace *ns);
+int ldlm_proc_setup(void);
+#ifdef LPROCFS
+void ldlm_proc_cleanup(void);
+#else
+static inline void ldlm_proc_cleanup(void) {}
+#endif
+
+/* resource.c - internal */
+struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
+ struct ldlm_resource *parent,
+ const struct ldlm_res_id *,
+ ldlm_type_t type, int create);
+struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res);
+int ldlm_resource_putref(struct ldlm_resource *res);
+void ldlm_resource_add_lock(struct ldlm_resource *res,
+ struct list_head *head,
+ struct ldlm_lock *lock);
+void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
+void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
+void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
+void ldlm_namespace_dump(int level, struct ldlm_namespace *);
+void ldlm_resource_dump(int level, struct ldlm_resource *);
+int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
+ const struct ldlm_res_id *);
+
+#define LDLM_RESOURCE_ADDREF(res) do { \
+ lu_ref_add_atomic(&(res)->lr_reference, __FUNCTION__, current); \
+} while (0)
+
+#define LDLM_RESOURCE_DELREF(res) do { \
+ lu_ref_del(&(res)->lr_reference, __FUNCTION__, current); \
+} while (0)
+
+/* ldlm_request.c */
+int ldlm_expired_completion_wait(void *data);
+/** \defgroup ldlm_local_ast Default AST handlers for local locks
+ * These AST handlers are typically used for server-side local locks and are
+ * also used by client-side lock handlers to perform minimum level base
+ * processing.
+ * @{ */
+int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock);
+int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag);
+int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp);
+int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
+int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
+/** @} ldlm_local_ast */
+
+/** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users.
+ * These are typically used by client and server (*_local versions)
+ * to obtain and release locks.
+ * @{ */
+int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
+ struct ldlm_enqueue_info *einfo,
+ const struct ldlm_res_id *res_id,
+ ldlm_policy_data_t const *policy, __u64 *flags,
+ void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
+ struct lustre_handle *lockh, int async);
+int ldlm_prep_enqueue_req(struct obd_export *exp,
+ struct ptlrpc_request *req,
+ struct list_head *cancels,
+ int count);
+int ldlm_prep_elc_req(struct obd_export *exp,
+ struct ptlrpc_request *req,
+ int version, int opc, int canceloff,
+ struct list_head *cancels, int count);
+
+struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len);
+int ldlm_handle_enqueue0(struct ldlm_namespace *ns, struct ptlrpc_request *req,
+ const struct ldlm_request *dlm_req,
+ const struct ldlm_callback_suite *cbs);
+int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
+ ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
+ __u64 *flags, void *lvb, __u32 lvb_len,
+ struct lustre_handle *lockh, int rc);
+int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
+ const struct ldlm_res_id *res_id,
+ ldlm_type_t type, ldlm_policy_data_t *policy,
+ ldlm_mode_t mode, __u64 *flags,
+ ldlm_blocking_callback blocking,
+ ldlm_completion_callback completion,
+ ldlm_glimpse_callback glimpse,
+ void *data, __u32 lvb_len, enum lvb_type lvb_type,
+ const __u64 *client_cookie,
+ struct lustre_handle *lockh);
+int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new,
+ void *data, __u32 data_len);
+int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags);
+int ldlm_cli_update_pool(struct ptlrpc_request *req);
+int ldlm_cli_cancel(struct lustre_handle *lockh,
+ ldlm_cancel_flags_t cancel_flags);
+int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
+ ldlm_cancel_flags_t flags, void *opaque);
+int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
+ const struct ldlm_res_id *res_id,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque);
+int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *head,
+ int count, ldlm_cancel_flags_t flags);
+int ldlm_cancel_resource_local(struct ldlm_resource *res,
+ struct list_head *cancels,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode, int lock_flags,
+ ldlm_cancel_flags_t cancel_flags, void *opaque);
+int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
+ ldlm_cancel_flags_t flags);
+int ldlm_cli_cancel_list(struct list_head *head, int count,
+ struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
+/** @} ldlm_cli_api */
+
+/* mds/handler.c */
+/* This has to be here because recursive inclusion sucks. */
+int intent_disposition(struct ldlm_reply *rep, int flag);
+void intent_set_disposition(struct ldlm_reply *rep, int flag);
+
+
+/* ioctls for trying requests */
+#define IOC_LDLM_TYPE 'f'
+#define IOC_LDLM_MIN_NR 40
+
+#define IOC_LDLM_TEST _IOWR('f', 40, long)
+#define IOC_LDLM_DUMP _IOWR('f', 41, long)
+#define IOC_LDLM_REGRESS_START _IOWR('f', 42, long)
+#define IOC_LDLM_REGRESS_STOP _IOWR('f', 43, long)
+#define IOC_LDLM_MAX_NR 43
+
+/**
+ * "Modes" of acquiring lock_res, necessary to tell lockdep that taking more
+ * than one lock_res is dead-lock safe.
+ */
+enum lock_res_type {
+ LRT_NORMAL,
+ LRT_NEW
+};
+
+/** Lock resource. */
+static inline void lock_res(struct ldlm_resource *res)
+{
+ spin_lock(&res->lr_lock);
+}
+
+/** Lock resource with a way to instruct lockdep code about nestedness-safe. */
+static inline void lock_res_nested(struct ldlm_resource *res,
+ enum lock_res_type mode)
+{
+ spin_lock_nested(&res->lr_lock, mode);
+}
+
+/** Unlock resource. */
+static inline void unlock_res(struct ldlm_resource *res)
+{
+ spin_unlock(&res->lr_lock);
+}
+
+/** Check if resource is already locked, assert if not. */
+static inline void check_res_locked(struct ldlm_resource *res)
+{
+ LASSERT(spin_is_locked(&res->lr_lock));
+}
+
+struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
+void unlock_res_and_lock(struct ldlm_lock *lock);
+
+/* ldlm_pool.c */
+/** \defgroup ldlm_pools Various LDLM pool related functions
+ * There are not used outside of ldlm.
+ * @{
+ */
+int ldlm_pools_recalc(ldlm_side_t client);
+int ldlm_pools_init(void);
+void ldlm_pools_fini(void);
+
+int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
+ int idx, ldlm_side_t client);
+int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
+ unsigned int gfp_mask);
+void ldlm_pool_fini(struct ldlm_pool *pl);
+int ldlm_pool_setup(struct ldlm_pool *pl, int limit);
+int ldlm_pool_recalc(struct ldlm_pool *pl);
+__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl);
+__u64 ldlm_pool_get_slv(struct ldlm_pool *pl);
+__u64 ldlm_pool_get_clv(struct ldlm_pool *pl);
+__u32 ldlm_pool_get_limit(struct ldlm_pool *pl);
+void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv);
+void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv);
+void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit);
+void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
+void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
+/** @} */
+
+#endif
+/** @} LDLM */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
new file mode 100644
index 0000000..8c34d9d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -0,0 +1,460 @@
+/* -*- buffer-read-only: t -*- vi: set ro:
+ *
+ * DO NOT EDIT THIS FILE (lustre_dlm_flags.h)
+ *
+ * It has been AutoGen-ed
+ * From the definitions lustre_dlm_flags.def
+ * and the template file lustre_dlm_flags.tpl
+ *
+ * lustre is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * lustre is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+/**
+ * \file lustre_dlm_flags.h
+ * The flags and collections of flags (masks) for \see struct ldlm_lock.
+ * This file is derived from flag definitions in lustre_dlm_flags.def.
+ * The format is defined in the lustre_dlm_flags.tpl template file.
+ *
+ * \addtogroup LDLM Lustre Distributed Lock Manager
+ * @{
+ *
+ * \name flags
+ * The flags and collections of flags (masks) for \see struct ldlm_lock.
+ * @{
+ */
+#ifndef LDLM_ALL_FLAGS_MASK
+
+/** l_flags bits marked as "all_flags" bits */
+#define LDLM_FL_ALL_FLAGS_MASK 0x007FFFFFC08F132FULL
+
+/** l_flags bits marked as "ast" bits */
+#define LDLM_FL_AST_MASK 0x0000000080000000ULL
+
+/** l_flags bits marked as "blocked" bits */
+#define LDLM_FL_BLOCKED_MASK 0x000000000000000EULL
+
+/** l_flags bits marked as "gone" bits */
+#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
+
+/** l_flags bits marked as "hide_lock" bits */
+#define LDLM_FL_HIDE_LOCK_MASK 0x0000206400000000ULL
+
+/** l_flags bits marked as "inherit" bits */
+#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
+
+/** l_flags bits marked as "local_only" bits */
+#define LDLM_FL_LOCAL_ONLY_MASK 0x007FFFFF00000000ULL
+
+/** l_flags bits marked as "on_wire" bits */
+#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F132FULL
+
+/** extent, mode, or resource changed */
+#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL // bit 0
+#define ldlm_is_lock_changed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 0)
+#define ldlm_set_lock_changed(_l) LDLM_SET_FLAG(( _l), 1ULL << 0)
+#define ldlm_clear_lock_changed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 0)
+
+/**
+ * Server placed lock on granted list, or a recovering client wants the
+ * lock added to the granted list, no questions asked. */
+#define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL // bit 1
+#define ldlm_is_block_granted(_l) LDLM_TEST_FLAG(( _l), 1ULL << 1)
+#define ldlm_set_block_granted(_l) LDLM_SET_FLAG(( _l), 1ULL << 1)
+#define ldlm_clear_block_granted(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 1)
+
+/**
+ * Server placed lock on conv list, or a recovering client wants the lock
+ * added to the conv list, no questions asked. */
+#define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL // bit 2
+#define ldlm_is_block_conv(_l) LDLM_TEST_FLAG(( _l), 1ULL << 2)
+#define ldlm_set_block_conv(_l) LDLM_SET_FLAG(( _l), 1ULL << 2)
+#define ldlm_clear_block_conv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 2)
+
+/**
+ * Server placed lock on wait list, or a recovering client wants the lock
+ * added to the wait list, no questions asked. */
+#define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL // bit 3
+#define ldlm_is_block_wait(_l) LDLM_TEST_FLAG(( _l), 1ULL << 3)
+#define ldlm_set_block_wait(_l) LDLM_SET_FLAG(( _l), 1ULL << 3)
+#define ldlm_clear_block_wait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 3)
+
+/** blocking or cancel packet was queued for sending. */
+#define LDLM_FL_AST_SENT 0x0000000000000020ULL // bit 5
+#define ldlm_is_ast_sent(_l) LDLM_TEST_FLAG(( _l), 1ULL << 5)
+#define ldlm_set_ast_sent(_l) LDLM_SET_FLAG(( _l), 1ULL << 5)
+#define ldlm_clear_ast_sent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 5)
+
+/**
+ * Lock is being replayed. This could probably be implied by the fact that
+ * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
+#define LDLM_FL_REPLAY 0x0000000000000100ULL // bit 8
+#define ldlm_is_replay(_l) LDLM_TEST_FLAG(( _l), 1ULL << 8)
+#define ldlm_set_replay(_l) LDLM_SET_FLAG(( _l), 1ULL << 8)
+#define ldlm_clear_replay(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 8)
+
+/** Don't grant lock, just do intent. */
+#define LDLM_FL_INTENT_ONLY 0x0000000000000200ULL // bit 9
+#define ldlm_is_intent_only(_l) LDLM_TEST_FLAG(( _l), 1ULL << 9)
+#define ldlm_set_intent_only(_l) LDLM_SET_FLAG(( _l), 1ULL << 9)
+#define ldlm_clear_intent_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 9)
+
+/** lock request has intent */
+#define LDLM_FL_HAS_INTENT 0x0000000000001000ULL // bit 12
+#define ldlm_is_has_intent(_l) LDLM_TEST_FLAG(( _l), 1ULL << 12)
+#define ldlm_set_has_intent(_l) LDLM_SET_FLAG(( _l), 1ULL << 12)
+#define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12)
+
+/** discard (no writeback) on cancel */
+#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL // bit 16
+#define ldlm_is_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 16)
+#define ldlm_set_discard_data(_l) LDLM_SET_FLAG(( _l), 1ULL << 16)
+#define ldlm_clear_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 16)
+
+/** Blocked by group lock - wait indefinitely */
+#define LDLM_FL_NO_TIMEOUT 0x0000000000020000ULL // bit 17
+#define ldlm_is_no_timeout(_l) LDLM_TEST_FLAG(( _l), 1ULL << 17)
+#define ldlm_set_no_timeout(_l) LDLM_SET_FLAG(( _l), 1ULL << 17)
+#define ldlm_clear_no_timeout(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 17)
+
+/**
+ * Server told not to wait if blocked. For AGL, OST will not send glimpse
+ * callback. */
+#define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL // bit 18
+#define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG(( _l), 1ULL << 18)
+#define ldlm_set_block_nowait(_l) LDLM_SET_FLAG(( _l), 1ULL << 18)
+#define ldlm_clear_block_nowait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 18)
+
+/** return blocking lock */
+#define LDLM_FL_TEST_LOCK 0x0000000000080000ULL // bit 19
+#define ldlm_is_test_lock(_l) LDLM_TEST_FLAG(( _l), 1ULL << 19)
+#define ldlm_set_test_lock(_l) LDLM_SET_FLAG(( _l), 1ULL << 19)
+#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
+
+/**
+ * Immediatelly cancel such locks when they block some other locks. Send
+ * cancel notification to original lock holder, but expect no reply. This
+ * is for clients (like liblustre) that cannot be expected to reliably
+ * response to blocking AST. */
+#define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL // bit 23
+#define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG(( _l), 1ULL << 23)
+#define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG(( _l), 1ULL << 23)
+#define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23)
+
+/**
+ * measure lock contention and return -EUSERS if locking contention is high */
+#define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL // bit 30
+#define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG(( _l), 1ULL << 30)
+#define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG(( _l), 1ULL << 30)
+#define ldlm_clear_deny_on_contention(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 30)
+
+/**
+ * These are flags that are mapped into the flags and ASTs of blocking
+ * locks Add FL_DISCARD to blocking ASTs */
+#define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL // bit 31
+#define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 31)
+#define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG(( _l), 1ULL << 31)
+#define ldlm_clear_ast_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 31)
+
+/**
+ * Used for marking lock as a target for -EINTR while cp_ast sleep emulation
+ * + race with upcoming bl_ast. */
+#define LDLM_FL_FAIL_LOC 0x0000000100000000ULL // bit 32
+#define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG(( _l), 1ULL << 32)
+#define ldlm_set_fail_loc(_l) LDLM_SET_FLAG(( _l), 1ULL << 32)
+#define ldlm_clear_fail_loc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 32)
+
+/**
+ * Used while processing the unused list to know that we have already
+ * handled this lock and decided to skip it. */
+#define LDLM_FL_SKIPPED 0x0000000200000000ULL // bit 33
+#define ldlm_is_skipped(_l) LDLM_TEST_FLAG(( _l), 1ULL << 33)
+#define ldlm_set_skipped(_l) LDLM_SET_FLAG(( _l), 1ULL << 33)
+#define ldlm_clear_skipped(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 33)
+
+/** this lock is being destroyed */
+#define LDLM_FL_CBPENDING 0x0000000400000000ULL // bit 34
+#define ldlm_is_cbpending(_l) LDLM_TEST_FLAG(( _l), 1ULL << 34)
+#define ldlm_set_cbpending(_l) LDLM_SET_FLAG(( _l), 1ULL << 34)
+#define ldlm_clear_cbpending(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 34)
+
+/** not a real flag, not saved in lock */
+#define LDLM_FL_WAIT_NOREPROC 0x0000000800000000ULL // bit 35
+#define ldlm_is_wait_noreproc(_l) LDLM_TEST_FLAG(( _l), 1ULL << 35)
+#define ldlm_set_wait_noreproc(_l) LDLM_SET_FLAG(( _l), 1ULL << 35)
+#define ldlm_clear_wait_noreproc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 35)
+
+/** cancellation callback already run */
+#define LDLM_FL_CANCEL 0x0000001000000000ULL // bit 36
+#define ldlm_is_cancel(_l) LDLM_TEST_FLAG(( _l), 1ULL << 36)
+#define ldlm_set_cancel(_l) LDLM_SET_FLAG(( _l), 1ULL << 36)
+#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
+
+/** whatever it might mean */
+#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL // bit 37
+#define ldlm_is_local_only(_l) LDLM_TEST_FLAG(( _l), 1ULL << 37)
+#define ldlm_set_local_only(_l) LDLM_SET_FLAG(( _l), 1ULL << 37)
+#define ldlm_clear_local_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 37)
+
+/** don't run the cancel callback under ldlm_cli_cancel_unused */
+#define LDLM_FL_FAILED 0x0000004000000000ULL // bit 38
+#define ldlm_is_failed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 38)
+#define ldlm_set_failed(_l) LDLM_SET_FLAG(( _l), 1ULL << 38)
+#define ldlm_clear_failed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 38)
+
+/** lock cancel has already been sent */
+#define LDLM_FL_CANCELING 0x0000008000000000ULL // bit 39
+#define ldlm_is_canceling(_l) LDLM_TEST_FLAG(( _l), 1ULL << 39)
+#define ldlm_set_canceling(_l) LDLM_SET_FLAG(( _l), 1ULL << 39)
+#define ldlm_clear_canceling(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 39)
+
+/** local lock (ie, no srv/cli split) */
+#define LDLM_FL_LOCAL 0x0000010000000000ULL // bit 40
+#define ldlm_is_local(_l) LDLM_TEST_FLAG(( _l), 1ULL << 40)
+#define ldlm_set_local(_l) LDLM_SET_FLAG(( _l), 1ULL << 40)
+#define ldlm_clear_local(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 40)
+
+/**
+ * XXX FIXME: This is being added to b_size as a low-risk fix to the
+ * fact that the LVB filling happens _after_ the lock has been granted,
+ * so another thread can match it before the LVB has been updated. As a
+ * dirty hack, we set LDLM_FL_LVB_READY only after we've done the LVB poop.
+ * this is only needed on LOV/OSC now, where LVB is actually used and
+ * callers must set it in input flags.
+ *
+ * The proper fix is to do the granting inside of the completion AST,
+ * which can be replaced with a LVB-aware wrapping function for OSC locks.
+ * That change is pretty high-risk, though, and would need a lot more
+ * testing. */
+#define LDLM_FL_LVB_READY 0x0000020000000000ULL // bit 41
+#define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG(( _l), 1ULL << 41)
+#define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG(( _l), 1ULL << 41)
+#define ldlm_clear_lvb_ready(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 41)
+
+/**
+ * A lock contributes to the known minimum size (KMS) calculation until it
+ * has finished the part of its cancelation that performs write back on its
+ * dirty pages. It can remain on the granted list during this whole time.
+ * Threads racing to update the KMS after performing their writeback need
+ * to know to exclude each other's locks from the calculation as they walk
+ * the granted list. */
+#define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL // bit 42
+#define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG(( _l), 1ULL << 42)
+#define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG(( _l), 1ULL << 42)
+#define ldlm_clear_kms_ignore(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 42)
+
+/** completion AST to be executed */
+#define LDLM_FL_CP_REQD 0x0000080000000000ULL // bit 43
+#define ldlm_is_cp_reqd(_l) LDLM_TEST_FLAG(( _l), 1ULL << 43)
+#define ldlm_set_cp_reqd(_l) LDLM_SET_FLAG(( _l), 1ULL << 43)
+#define ldlm_clear_cp_reqd(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 43)
+
+/** cleanup_resource has already handled the lock */
+#define LDLM_FL_CLEANED 0x0000100000000000ULL // bit 44
+#define ldlm_is_cleaned(_l) LDLM_TEST_FLAG(( _l), 1ULL << 44)
+#define ldlm_set_cleaned(_l) LDLM_SET_FLAG(( _l), 1ULL << 44)
+#define ldlm_clear_cleaned(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 44)
+
+/**
+ * optimization hint: LDLM can run blocking callback from current context
+ * w/o involving separate thread. in order to decrease cs rate */
+#define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL // bit 45
+#define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG(( _l), 1ULL << 45)
+#define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG(( _l), 1ULL << 45)
+#define ldlm_clear_atomic_cb(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 45)
+
+/**
+ * It may happen that a client initiates two operations, e.g. unlink and
+ * mkdir, such that the server sends a blocking AST for conflicting locks
+ * to this client for the first operation, whereas the second operation
+ * has canceled this lock and is waiting for rpc_lock which is taken by
+ * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
+ * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
+ *
+ * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
+ * dropped to let ldlm_callback_handler() return EINVAL to the server. It
+ * is used when ELC RPC is already prepared and is waiting for rpc_lock,
+ * too late to send a separate CANCEL RPC. */
+#define LDLM_FL_BL_AST 0x0000400000000000ULL // bit 46
+#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG(( _l), 1ULL << 46)
+#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG(( _l), 1ULL << 46)
+#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
+
+/** whatever it might mean */
+#define LDLM_FL_BL_DONE 0x0000800000000000ULL // bit 47
+#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG(( _l), 1ULL << 47)
+#define ldlm_set_bl_done(_l) LDLM_SET_FLAG(( _l), 1ULL << 47)
+#define ldlm_clear_bl_done(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 47)
+
+/**
+ * Don't put lock into the LRU list, so that it is not canceled due
+ * to aging. Used by MGC locks, they are cancelled only at unmount or
+ * by callback. */
+#define LDLM_FL_NO_LRU 0x0001000000000000ULL // bit 48
+#define ldlm_is_no_lru(_l) LDLM_TEST_FLAG(( _l), 1ULL << 48)
+#define ldlm_set_no_lru(_l) LDLM_SET_FLAG(( _l), 1ULL << 48)
+#define ldlm_clear_no_lru(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 48)
+
+/**
+ * Set for locks that failed and where the server has been notified.
+ *
+ * Protected by lock and resource locks. */
+#define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL // bit 49
+#define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG(( _l), 1ULL << 49)
+#define ldlm_set_fail_notified(_l) LDLM_SET_FLAG(( _l), 1ULL << 49)
+#define ldlm_clear_fail_notified(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 49)
+
+/**
+ * Set for locks that were removed from class hash table and will
+ * be destroyed when last reference to them is released. Set by
+ * ldlm_lock_destroy_internal().
+ *
+ * Protected by lock and resource locks. */
+#define LDLM_FL_DESTROYED 0x0004000000000000ULL // bit 50
+#define ldlm_is_destroyed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 50)
+#define ldlm_set_destroyed(_l) LDLM_SET_FLAG(( _l), 1ULL << 50)
+#define ldlm_clear_destroyed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 50)
+
+/** flag whether this is a server namespace lock */
+#define LDLM_FL_SERVER_LOCK 0x0008000000000000ULL // bit 51
+#define ldlm_is_server_lock(_l) LDLM_TEST_FLAG(( _l), 1ULL << 51)
+#define ldlm_set_server_lock(_l) LDLM_SET_FLAG(( _l), 1ULL << 51)
+#define ldlm_clear_server_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 51)
+
+/**
+ * It's set in lock_res_and_lock() and unset in unlock_res_and_lock().
+ *
+ * NB: compared with check_res_locked(), checking this bit is cheaper.
+ * Also, spin_is_locked() is deprecated for kernel code; one reason is
+ * because it works only for SMP so user needs to add extra macros like
+ * LASSERT_SPIN_LOCKED for uniprocessor kernels. */
+#define LDLM_FL_RES_LOCKED 0x0010000000000000ULL // bit 52
+#define ldlm_is_res_locked(_l) LDLM_TEST_FLAG(( _l), 1ULL << 52)
+#define ldlm_set_res_locked(_l) LDLM_SET_FLAG(( _l), 1ULL << 52)
+#define ldlm_clear_res_locked(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 52)
+
+/**
+ * It's set once we call ldlm_add_waiting_lock_res_locked() to start the
+ * lock-timeout timer and it will never be reset.
+ *
+ * Protected by lock and resource locks. */
+#define LDLM_FL_WAITED 0x0020000000000000ULL // bit 53
+#define ldlm_is_waited(_l) LDLM_TEST_FLAG(( _l), 1ULL << 53)
+#define ldlm_set_waited(_l) LDLM_SET_FLAG(( _l), 1ULL << 53)
+#define ldlm_clear_waited(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 53)
+
+/** Flag whether this is a server namespace lock. */
+#define LDLM_FL_NS_SRV 0x0040000000000000ULL // bit 54
+#define ldlm_is_ns_srv(_l) LDLM_TEST_FLAG(( _l), 1ULL << 54)
+#define ldlm_set_ns_srv(_l) LDLM_SET_FLAG(( _l), 1ULL << 54)
+#define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54)
+
+/** test for ldlm_lock flag bit set */
+#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+
+/** set a ldlm_lock flag bit */
+#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b))
+
+/** clear a ldlm_lock flag bit */
+#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b))
+
+/** Mask of flags inherited from parent lock when doing intents. */
+#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
+
+/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
+#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
+
+/** @} subgroup */
+/** @} group */
+#ifdef WIRESHARK_COMPILE
+static int hf_lustre_ldlm_fl_lock_changed = -1;
+static int hf_lustre_ldlm_fl_block_granted = -1;
+static int hf_lustre_ldlm_fl_block_conv = -1;
+static int hf_lustre_ldlm_fl_block_wait = -1;
+static int hf_lustre_ldlm_fl_ast_sent = -1;
+static int hf_lustre_ldlm_fl_replay = -1;
+static int hf_lustre_ldlm_fl_intent_only = -1;
+static int hf_lustre_ldlm_fl_has_intent = -1;
+static int hf_lustre_ldlm_fl_discard_data = -1;
+static int hf_lustre_ldlm_fl_no_timeout = -1;
+static int hf_lustre_ldlm_fl_block_nowait = -1;
+static int hf_lustre_ldlm_fl_test_lock = -1;
+static int hf_lustre_ldlm_fl_cancel_on_block = -1;
+static int hf_lustre_ldlm_fl_deny_on_contention = -1;
+static int hf_lustre_ldlm_fl_ast_discard_data = -1;
+static int hf_lustre_ldlm_fl_fail_loc = -1;
+static int hf_lustre_ldlm_fl_skipped = -1;
+static int hf_lustre_ldlm_fl_cbpending = -1;
+static int hf_lustre_ldlm_fl_wait_noreproc = -1;
+static int hf_lustre_ldlm_fl_cancel = -1;
+static int hf_lustre_ldlm_fl_local_only = -1;
+static int hf_lustre_ldlm_fl_failed = -1;
+static int hf_lustre_ldlm_fl_canceling = -1;
+static int hf_lustre_ldlm_fl_local = -1;
+static int hf_lustre_ldlm_fl_lvb_ready = -1;
+static int hf_lustre_ldlm_fl_kms_ignore = -1;
+static int hf_lustre_ldlm_fl_cp_reqd = -1;
+static int hf_lustre_ldlm_fl_cleaned = -1;
+static int hf_lustre_ldlm_fl_atomic_cb = -1;
+static int hf_lustre_ldlm_fl_bl_ast = -1;
+static int hf_lustre_ldlm_fl_bl_done = -1;
+static int hf_lustre_ldlm_fl_no_lru = -1;
+static int hf_lustre_ldlm_fl_fail_notified = -1;
+static int hf_lustre_ldlm_fl_destroyed = -1;
+static int hf_lustre_ldlm_fl_server_lock = -1;
+static int hf_lustre_ldlm_fl_res_locked = -1;
+static int hf_lustre_ldlm_fl_waited = -1;
+static int hf_lustre_ldlm_fl_ns_srv = -1;
+
+const value_string lustre_ldlm_flags_vals[] = {
+ {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
+ {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
+ {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
+ {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
+ {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
+ {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
+ {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
+ {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
+ {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
+ {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
+ {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
+ {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
+ {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
+ {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
+ {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
+ {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
+ {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
+ {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
+ {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
+ {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
+ {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
+ {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
+ {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
+ {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
+ {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
+ {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
+ {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
+ {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
+ {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
+ {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
+ {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
+ {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
+ {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
+ {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
+ {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
+ {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
+ {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
+ {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
+ { 0, NULL }
+};
+#endif /* WIRESHARK_COMPILE */
+#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_eacl.h b/drivers/staging/lustre/lustre/include/lustre_eacl.h
new file mode 100644
index 0000000..b94f76a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_eacl.h
@@ -0,0 +1,95 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lustre/include/lustre_idmap.h
+ *
+ * MDS data structures.
+ * See also lustre_idl.h for wire formats of requests.
+ */
+
+#ifndef _LUSTRE_EACL_H
+#define _LUSTRE_EACL_H
+
+/** \defgroup eacl eacl
+ *
+ * @{
+ */
+
+#ifdef CONFIG_FS_POSIX_ACL
+
+#include <linux/posix_acl_xattr.h>
+
+typedef struct {
+ __u16 e_tag;
+ __u16 e_perm;
+ __u32 e_id;
+ __u32 e_stat;
+} ext_acl_xattr_entry;
+
+typedef struct {
+ __u32 a_count;
+ ext_acl_xattr_entry a_entries[0];
+} ext_acl_xattr_header;
+
+#define CFS_ACL_XATTR_SIZE(count, prefix) \
+ (sizeof(prefix ## _header) + (count) * sizeof(prefix ## _entry))
+
+#define CFS_ACL_XATTR_COUNT(size, prefix) \
+ (((size) - sizeof(prefix ## _header)) / sizeof(prefix ## _entry))
+
+
+extern ext_acl_xattr_header *
+lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size);
+extern int
+lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, int size,
+ posix_acl_xattr_header **out);
+extern void
+lustre_posix_acl_xattr_free(posix_acl_xattr_header *header, int size);
+extern void
+lustre_ext_acl_xattr_free(ext_acl_xattr_header *header);
+extern int
+lustre_acl_xattr_merge2posix(posix_acl_xattr_header *posix_header, int size,
+ ext_acl_xattr_header *ext_header,
+ posix_acl_xattr_header **out);
+extern ext_acl_xattr_header *
+lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
+ ext_acl_xattr_header *ext_header);
+
+#endif /* CONFIG_FS_POSIX_ACL */
+
+/** @} eacl */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
new file mode 100644
index 0000000..d61c020
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -0,0 +1,389 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+/** \defgroup obd_export PortalRPC export definitions
+ *
+ * @{
+ */
+
+#ifndef __EXPORT_H
+#define __EXPORT_H
+
+/** \defgroup export export
+ *
+ * @{
+ */
+
+#include <lprocfs_status.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_dlm.h>
+
+struct mds_client_data;
+struct mdt_client_data;
+struct mds_idmap_table;
+struct mdt_idmap_table;
+
+/**
+ * Target-specific export data
+ */
+struct tg_export_data {
+ /** Protects led_lcd below */
+ struct mutex ted_lcd_lock;
+ /** Per-client data for each export */
+ struct lsd_client_data *ted_lcd;
+ /** Offset of record in last_rcvd file */
+ loff_t ted_lr_off;
+ /** Client index in last_rcvd file */
+ int ted_lr_idx;
+};
+
+/**
+ * MDT-specific export data
+ */
+struct mdt_export_data {
+ struct tg_export_data med_ted;
+ /** List of all files opened by client on this MDT */
+ struct list_head med_open_head;
+ spinlock_t med_open_lock; /* med_open_head, mfd_list */
+ /** Bitmask of all ibit locks this MDT understands */
+ __u64 med_ibits_known;
+ struct mutex med_idmap_mutex;
+ struct lustre_idmap_table *med_idmap;
+};
+
+struct ec_export_data { /* echo client */
+ struct list_head eced_locks;
+};
+
+/* In-memory access to client data from OST struct */
+/** Filter (oss-side) specific import data */
+struct filter_export_data {
+ struct tg_export_data fed_ted;
+ spinlock_t fed_lock; /**< protects fed_mod_list */
+ long fed_dirty; /* in bytes */
+ long fed_grant; /* in bytes */
+ struct list_head fed_mod_list; /* files being modified */
+ int fed_mod_count;/* items in fed_writing list */
+ long fed_pending; /* bytes just being written */
+ __u32 fed_group;
+ __u8 fed_pagesize; /* log2 of client page size */
+};
+
+struct mgs_export_data {
+ struct list_head med_clients; /* mgc fs client via this exp */
+ spinlock_t med_lock; /* protect med_clients */
+};
+
+/**
+ * per-NID statistics structure.
+ * It tracks access patterns to this export on a per-client-NID basis
+ */
+struct nid_stat {
+ lnet_nid_t nid;
+ struct hlist_node nid_hash;
+ struct list_head nid_list;
+ struct obd_device *nid_obd;
+ struct proc_dir_entry *nid_proc;
+ struct lprocfs_stats *nid_stats;
+ struct lprocfs_stats *nid_ldlm_stats;
+ atomic_t nid_exp_ref_count; /* for obd_nid_stats_hash
+ exp_nid_stats */
+};
+
+#define nidstat_getref(nidstat) \
+do { \
+ atomic_inc(&(nidstat)->nid_exp_ref_count); \
+} while(0)
+
+#define nidstat_putref(nidstat) \
+do { \
+ atomic_dec(&(nidstat)->nid_exp_ref_count); \
+ LASSERTF(atomic_read(&(nidstat)->nid_exp_ref_count) >= 0, \
+ "stat %p nid_exp_ref_count < 0\n", nidstat); \
+} while(0)
+
+enum obd_option {
+ OBD_OPT_FORCE = 0x0001,
+ OBD_OPT_FAILOVER = 0x0002,
+ OBD_OPT_ABORT_RECOV = 0x0004,
+};
+
+/**
+ * Export structure. Represents target-side of connection in portals.
+ * Also used in Lustre to connect between layers on the same node when
+ * there is no network-connection in-between.
+ * For every connected client there is an export structure on the server
+ * attached to the same obd device.
+ */
+struct obd_export {
+ /**
+ * Export handle, it's id is provided to client on connect
+ * Subsequent client RPCs contain this handle id to identify
+ * what export they are talking to.
+ */
+ struct portals_handle exp_handle;
+ atomic_t exp_refcount;
+ /**
+ * Set of counters below is to track where export references are
+ * kept. The exp_rpc_count is used for reconnect handling also,
+ * the cb_count and locks_count are for debug purposes only for now.
+ * The sum of them should be less than exp_refcount by 3
+ */
+ atomic_t exp_rpc_count; /* RPC references */
+ atomic_t exp_cb_count; /* Commit callback references */
+ /** Number of queued replay requests to be processes */
+ atomic_t exp_replay_count;
+ atomic_t exp_locks_count; /** Lock references */
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+ struct list_head exp_locks_list;
+ spinlock_t exp_locks_list_guard;
+#endif
+ /** UUID of client connected to this export */
+ struct obd_uuid exp_client_uuid;
+ /** To link all exports on an obd device */
+ struct list_head exp_obd_chain;
+ struct hlist_node exp_uuid_hash; /** uuid-export hash*/
+ struct hlist_node exp_nid_hash; /** nid-export hash */
+ /**
+ * All exports eligible for ping evictor are linked into a list
+ * through this field in "most time since last request on this export"
+ * order
+ * protected by obd_dev_lock
+ */
+ struct list_head exp_obd_chain_timed;
+ /** Obd device of this export */
+ struct obd_device *exp_obd;
+ /**
+ * "reverse" import to send requests (e.g. from ldlm) back to client
+ * exp_lock protect its change
+ */
+ struct obd_import *exp_imp_reverse;
+ struct nid_stat *exp_nid_stats;
+ struct lprocfs_stats *exp_md_stats;
+ /** Active connetion */
+ struct ptlrpc_connection *exp_connection;
+ /** Connection count value from last succesful reconnect rpc */
+ __u32 exp_conn_cnt;
+ /** Hash list of all ldlm locks granted on this export */
+ cfs_hash_t *exp_lock_hash;
+ /**
+ * Hash list for Posix lock deadlock detection, added with
+ * ldlm_lock::l_exp_flock_hash.
+ */
+ cfs_hash_t *exp_flock_hash;
+ struct list_head exp_outstanding_replies;
+ struct list_head exp_uncommitted_replies;
+ spinlock_t exp_uncommitted_replies_lock;
+ /** Last committed transno for this export */
+ __u64 exp_last_committed;
+ /** When was last request received */
+ cfs_time_t exp_last_request_time;
+ /** On replay all requests waiting for replay are linked here */
+ struct list_head exp_req_replay_queue;
+ /**
+ * protects exp_flags, exp_outstanding_replies and the change
+ * of exp_imp_reverse
+ */
+ spinlock_t exp_lock;
+ /** Compatibility flags for this export are embedded into
+ * exp_connect_data */
+ struct obd_connect_data exp_connect_data;
+ enum obd_option exp_flags;
+ unsigned long exp_failed:1,
+ exp_in_recovery:1,
+ exp_disconnected:1,
+ exp_connecting:1,
+ /** VBR: export missed recovery */
+ exp_delayed:1,
+ /** VBR: failed version checking */
+ exp_vbr_failed:1,
+ exp_req_replay_needed:1,
+ exp_lock_replay_needed:1,
+ exp_need_sync:1,
+ exp_flvr_changed:1,
+ exp_flvr_adapt:1,
+ exp_libclient:1, /* liblustre client? */
+ /* client timed out and tried to reconnect,
+ * but couldn't because of active rpcs */
+ exp_abort_active_req:1,
+ /* if to swap nidtbl entries for 2.2 clients.
+ * Only used by the MGS to fix LU-1644. */
+ exp_need_mne_swab:1;
+ /* also protected by exp_lock */
+ enum lustre_sec_part exp_sp_peer;
+ struct sptlrpc_flavor exp_flvr; /* current */
+ struct sptlrpc_flavor exp_flvr_old[2]; /* about-to-expire */
+ cfs_time_t exp_flvr_expire[2]; /* seconds */
+
+ /** protects exp_hp_rpcs */
+ spinlock_t exp_rpc_lock;
+ struct list_head exp_hp_rpcs; /* (potential) HP RPCs */
+
+ /** blocking dlm lock list, protected by exp_bl_list_lock */
+ struct list_head exp_bl_list;
+ spinlock_t exp_bl_list_lock;
+
+ /** Target specific data */
+ union {
+ struct tg_export_data eu_target_data;
+ struct mdt_export_data eu_mdt_data;
+ struct filter_export_data eu_filter_data;
+ struct ec_export_data eu_ec_data;
+ struct mgs_export_data eu_mgs_data;
+ } u;
+};
+
+#define exp_target_data u.eu_target_data
+#define exp_mdt_data u.eu_mdt_data
+#define exp_filter_data u.eu_filter_data
+#define exp_ec_data u.eu_ec_data
+
+static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp)
+{
+ return &exp->exp_connect_data.ocd_connect_flags;
+}
+
+static inline __u64 exp_connect_flags(struct obd_export *exp)
+{
+ return *exp_connect_flags_ptr(exp);
+}
+
+static inline int exp_max_brw_size(struct obd_export *exp)
+{
+ LASSERT(exp != NULL);
+ if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE)
+ return exp->exp_connect_data.ocd_brw_size;
+
+ return ONE_MB_BRW_SIZE;
+}
+
+static inline int exp_connect_multibulk(struct obd_export *exp)
+{
+ return exp_max_brw_size(exp) > ONE_MB_BRW_SIZE;
+}
+
+static inline int exp_expired(struct obd_export *exp, cfs_duration_t age)
+{
+ LASSERT(exp->exp_delayed);
+ return cfs_time_before(cfs_time_add(exp->exp_last_request_time, age),
+ cfs_time_current_sec());
+}
+
+static inline int exp_connect_cancelset(struct obd_export *exp)
+{
+ LASSERT(exp != NULL);
+ return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET);
+}
+
+static inline int exp_connect_lru_resize(struct obd_export *exp)
+{
+ LASSERT(exp != NULL);
+ return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE);
+}
+
+static inline int exp_connect_rmtclient(struct obd_export *exp)
+{
+ LASSERT(exp != NULL);
+ return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT);
+}
+
+static inline int client_is_remote(struct obd_export *exp)
+{
+ struct obd_import *imp = class_exp2cliimp(exp);
+
+ return !!(imp->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_RMT_CLIENT);
+}
+
+static inline int exp_connect_vbr(struct obd_export *exp)
+{
+ LASSERT(exp != NULL);
+ LASSERT(exp->exp_connection);
+ return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR);
+}
+
+static inline int exp_connect_som(struct obd_export *exp)
+{
+ LASSERT(exp != NULL);
+ return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM);
+}
+
+static inline int exp_connect_umask(struct obd_export *exp)
+{
+ return !!(exp_connect_flags(exp) & OBD_CONNECT_UMASK);
+}
+
+static inline int imp_connect_lru_resize(struct obd_import *imp)
+{
+ struct obd_connect_data *ocd;
+
+ LASSERT(imp != NULL);
+ ocd = &imp->imp_connect_data;
+ return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE);
+}
+
+static inline int exp_connect_layout(struct obd_export *exp)
+{
+ return !!(exp_connect_flags(exp) & OBD_CONNECT_LAYOUTLOCK);
+}
+
+static inline bool exp_connect_lvb_type(struct obd_export *exp)
+{
+ LASSERT(exp != NULL);
+ if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE)
+ return true;
+ else
+ return false;
+}
+
+static inline bool imp_connect_lvb_type(struct obd_import *imp)
+{
+ struct obd_connect_data *ocd;
+
+ LASSERT(imp != NULL);
+ ocd = &imp->imp_connect_data;
+ if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE)
+ return true;
+ else
+ return false;
+}
+
+extern struct obd_export *class_conn2export(struct lustre_handle *conn);
+extern struct obd_device *class_conn2obd(struct lustre_handle *conn);
+
+/** @} export */
+
+#endif /* __EXPORT_H */
+/** @} obd_export */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
new file mode 100644
index 0000000..d9d5814
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -0,0 +1,773 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_fid.h
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
+ */
+
+#ifndef __LUSTRE_FID_H
+#define __LUSTRE_FID_H
+
+/** \defgroup fid fid
+ *
+ * @{
+ *
+ * http://wiki.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
+ * describes the FID namespace and interoperability requirements for FIDs.
+ * The important parts of that document are included here for reference.
+ *
+ * FID
+ * File IDentifier generated by client from range allocated by the SEQuence
+ * service and stored in struct lu_fid. The FID is composed of three parts:
+ * SEQuence, ObjectID, and VERsion. The SEQ component is a filesystem
+ * unique 64-bit integer, and only one client is ever assigned any SEQ value.
+ * The first 0x400 FID_SEQ_NORMAL [2^33, 2^33 + 0x400] values are reserved
+ * for system use. The OID component is a 32-bit value generated by the
+ * client on a per-SEQ basis to allow creating many unique FIDs without
+ * communication with the server. The VER component is a 32-bit value that
+ * distinguishes between different FID instantiations, such as snapshots or
+ * separate subtrees within the filesystem. FIDs with the same VER field
+ * are considered part of the same namespace.
+ *
+ * OLD filesystems are those upgraded from Lustre 1.x that predate FIDs, and
+ * MDTs use 32-bit ldiskfs internal inode/generation numbers (IGIFs), while
+ * OSTs use 64-bit Lustre object IDs and generation numbers.
+ *
+ * NEW filesystems are those formatted since the introduction of FIDs.
+ *
+ * IGIF
+ * Inode and Generation In FID, a surrogate FID used to globally identify
+ * an existing object on OLD formatted MDT file system. This would only be
+ * used on MDT0 in a DNE filesystem, because there cannot be more than one
+ * MDT in an OLD formatted filesystem. Belongs to sequence in [12, 2^32 - 1]
+ * range, where inode number is stored in SEQ, and inode generation is in OID.
+ * NOTE: This assumes no more than 2^32-1 inodes exist in the MDT filesystem,
+ * which is the maximum possible for an ldiskfs backend. It also assumes
+ * that the reserved ext3/ext4/ldiskfs inode numbers [0-11] are never visible
+ * to clients, which has always been true.
+ *
+ * IDIF
+ * object ID In FID, a surrogate FID used to globally identify an existing
+ * OST object on OLD formatted OST file system. Belongs to a sequence in
+ * [2^32, 2^33 - 1]. Sequence number is calculated as:
+ *
+ * 1 << 32 | (ost_index << 16) | ((objid >> 32) & 0xffff)
+ *
+ * that is, SEQ consists of 16-bit OST index, and higher 16 bits of object
+ * ID. The generation of unique SEQ values per OST allows the IDIF FIDs to
+ * be identified in the FLD correctly. The OID field is calculated as:
+ *
+ * objid & 0xffffffff
+ *
+ * that is, it consists of lower 32 bits of object ID. For objects within
+ * the IDIF range, object ID extraction will be:
+ *
+ * o_id = (fid->f_seq & 0x7fff) << 16 | fid->f_oid;
+ * o_seq = 0; // formerly group number
+ *
+ * NOTE: This assumes that no more than 2^48-1 objects have ever been created
+ * on any OST, and that no more than 65535 OSTs are in use. Both are very
+ * reasonable assumptions, i.e. an IDIF can uniquely map all objects assuming
+ * a maximum creation rate of 1M objects per second for a maximum of 9 years,
+ * or combinations thereof.
+ *
+ * OST_MDT0
+ * Surrogate FID used to identify an existing object on OLD formatted OST
+ * filesystem. Belongs to the reserved SEQuence 0, and is used prior to
+ * the introduction of FID-on-OST, at which point IDIF will be used to
+ * identify objects as residing on a specific OST.
+ *
+ * LLOG
+ * For Lustre Log objects the object sequence 1 is used. This is compatible
+ * with both OLD and NEW namespaces, as this SEQ number is in the
+ * ext3/ldiskfs reserved inode range and does not conflict with IGIF
+ * sequence numbers.
+ *
+ * ECHO
+ * For testing OST IO performance the object sequence 2 is used. This is
+ * compatible with both OLD and NEW namespaces, as this SEQ number is in
+ * the ext3/ldiskfs reserved inode range and does not conflict with IGIF
+ * sequence numbers.
+ *
+ * OST_MDT1 .. OST_MAX
+ * For testing with multiple MDTs the object sequence 3 through 9 is used,
+ * allowing direct mapping of MDTs 1 through 7 respectively, for a total
+ * of 8 MDTs including OST_MDT0. This matches the legacy CMD project "group"
+ * mappings. However, this SEQ range is only for testing prior to any
+ * production DNE release, as the objects in this range conflict across all
+ * OSTs, as the OST index is not part of the FID. For production DNE usage,
+ * OST objects created by MDT1+ will use FID_SEQ_NORMAL FIDs.
+ *
+ * DLM OST objid to IDIF mapping
+ * For compatibility with existing OLD OST network protocol structures, the
+ * FID must map onto the o_id and o_seq in a manner that ensures existing
+ * objects are identified consistently for IO, as well as onto the LDLM
+ * namespace to ensure IDIFs there is only a single resource name for any
+ * object in the DLM. The OLD OST object DLM resource mapping is:
+ *
+ * resource[] = {o_id, o_seq, 0, 0}; // o_seq == 0 for production releases
+ *
+ * The NEW OST object DLM resource mapping is the same for both MDT and OST:
+ *
+ * resource[] = {SEQ, OID, VER, HASH};
+ *
+ * NOTE: for mapping IDIF values to DLM resource names the o_id may be
+ * larger than the 2^33 reserved sequence numbers for IDIF, so it is possible
+ * for the o_id numbers to overlap FID SEQ numbers in the resource. However,
+ * in all production releases the OLD o_seq field is always zero, and all
+ * valid FID OID values are non-zero, so the lock resources will not collide.
+ * Even so, the MDT and OST resources are also in different LDLM namespaces.
+ */
+
+#include <linux/libcfs/libcfs.h>
+#include <lustre/lustre_idl.h>
+
+struct lu_env;
+struct lu_site;
+struct lu_context;
+struct obd_device;
+struct obd_export;
+
+/* Whole sequences space range and zero range definitions */
+extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE;
+extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE;
+extern const struct lu_fid LUSTRE_BFL_FID;
+extern const struct lu_fid LU_OBF_FID;
+extern const struct lu_fid LU_DOT_LUSTRE_FID;
+
+enum {
+ /*
+ * This is how may metadata FIDs may be allocated in one sequence(128k)
+ */
+ LUSTRE_METADATA_SEQ_MAX_WIDTH = 0x0000000000020000ULL,
+
+ /*
+ * This is how many data FIDs could be allocated in one sequence(4B - 1)
+ */
+ LUSTRE_DATA_SEQ_MAX_WIDTH = 0x00000000FFFFFFFFULL,
+
+ /*
+ * How many sequences to allocate to a client at once.
+ */
+ LUSTRE_SEQ_META_WIDTH = 0x0000000000000001ULL,
+
+ /*
+ * seq allocation pool size.
+ */
+ LUSTRE_SEQ_BATCH_WIDTH = LUSTRE_SEQ_META_WIDTH * 1000,
+
+ /*
+ * This is how many sequences may be in one super-sequence allocated to
+ * MDTs.
+ */
+ LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH)
+};
+
+enum {
+ /** 2^6 FIDs for OI containers */
+ OSD_OI_FID_OID_BITS = 6,
+ /** reserve enough FIDs in case we want more in the future */
+ OSD_OI_FID_OID_BITS_MAX = 10,
+};
+
+/** special OID for local objects */
+enum local_oid {
+ /** \see fld_mod_init */
+ FLD_INDEX_OID = 3UL,
+ /** \see fid_mod_init */
+ FID_SEQ_CTL_OID = 4UL,
+ FID_SEQ_SRV_OID = 5UL,
+ /** \see mdd_mod_init */
+ MDD_ROOT_INDEX_OID = 6UL, /* deprecated in 2.4 */
+ MDD_ORPHAN_OID = 7UL, /* deprecated in 2.4 */
+ MDD_LOV_OBJ_OID = 8UL,
+ MDD_CAPA_KEYS_OID = 9UL,
+ /** \see mdt_mod_init */
+ LAST_RECV_OID = 11UL,
+ OSD_FS_ROOT_OID = 13UL,
+ ACCT_USER_OID = 15UL,
+ ACCT_GROUP_OID = 16UL,
+ LFSCK_BOOKMARK_OID = 17UL,
+ OTABLE_IT_OID = 18UL,
+ /* These two definitions are obsolete
+ * OFD_GROUP0_LAST_OID = 20UL,
+ * OFD_GROUP4K_LAST_OID = 20UL+4096,
+ */
+ OFD_LAST_GROUP_OID = 4117UL,
+ LLOG_CATALOGS_OID = 4118UL,
+ MGS_CONFIGS_OID = 4119UL,
+ OFD_HEALTH_CHECK_OID = 4120UL,
+ MDD_LOV_OBJ_OSEQ = 4121UL,
+ LFSCK_NAMESPACE_OID = 4122UL,
+ REMOTE_PARENT_DIR_OID = 4123UL,
+};
+
+static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid)
+{
+ fid->f_seq = FID_SEQ_LOCAL_FILE;
+ fid->f_oid = oid;
+ fid->f_ver = 0;
+}
+
+static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid)
+{
+ fid->f_seq = FID_SEQ_LOCAL_NAME;
+ fid->f_oid = oid;
+ fid->f_ver = 0;
+}
+
+/* For new FS (>= 2.4), the root FID will be changed to
+ * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4),
+ * the root FID will still be IGIF */
+static inline int fid_is_root(const struct lu_fid *fid)
+{
+ return unlikely((fid_seq(fid) == FID_SEQ_ROOT &&
+ fid_oid(fid) == 1));
+}
+
+static inline int fid_is_dot_lustre(const struct lu_fid *fid)
+{
+ return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
+ fid_oid(fid) == FID_OID_DOT_LUSTRE);
+}
+
+static inline int fid_is_obf(const struct lu_fid *fid)
+{
+ return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
+ fid_oid(fid) == FID_OID_DOT_LUSTRE_OBF);
+}
+
+static inline int fid_is_otable_it(const struct lu_fid *fid)
+{
+ return unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
+ fid_oid(fid) == OTABLE_IT_OID);
+}
+
+static inline int fid_is_acct(const struct lu_fid *fid)
+{
+ return fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
+ (fid_oid(fid) == ACCT_USER_OID ||
+ fid_oid(fid) == ACCT_GROUP_OID);
+}
+
+static inline int fid_is_quota(const struct lu_fid *fid)
+{
+ return fid_seq(fid) == FID_SEQ_QUOTA ||
+ fid_seq(fid) == FID_SEQ_QUOTA_GLB;
+}
+
+static inline int fid_is_namespace_visible(const struct lu_fid *fid)
+{
+ const __u64 seq = fid_seq(fid);
+
+ /* Here, we cannot distinguish whether the normal FID is for OST
+ * object or not. It is caller's duty to check more if needed. */
+ return (!fid_is_last_id(fid) &&
+ (fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) ||
+ fid_is_root(fid) || fid_is_dot_lustre(fid);
+}
+
+static inline int fid_seq_in_fldb(__u64 seq)
+{
+ return fid_seq_is_igif(seq) || fid_seq_is_norm(seq) ||
+ fid_seq_is_root(seq) || fid_seq_is_dot(seq);
+}
+
+static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq)
+{
+ if (fid_seq_is_mdt0(seq)) {
+ fid->f_seq = fid_idif_seq(0, 0);
+ } else {
+ LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
+ fid_seq_is_idif(seq), LPX64"\n", seq);
+ fid->f_seq = seq;
+ }
+ fid->f_oid = 0;
+ fid->f_ver = 0;
+}
+
+/* seq client type */
+enum lu_cli_type {
+ LUSTRE_SEQ_METADATA = 1,
+ LUSTRE_SEQ_DATA
+};
+
+enum lu_mgr_type {
+ LUSTRE_SEQ_SERVER,
+ LUSTRE_SEQ_CONTROLLER
+};
+
+struct lu_server_seq;
+
+/* Client sequence manager interface. */
+struct lu_client_seq {
+ /* Sequence-controller export. */
+ struct obd_export *lcs_exp;
+ struct mutex lcs_mutex;
+
+ /*
+ * Range of allowed for allocation sequeces. When using lu_client_seq on
+ * clients, this contains meta-sequence range. And for servers this
+ * contains super-sequence range.
+ */
+ struct lu_seq_range lcs_space;
+
+ /* Seq related proc */
+ struct proc_dir_entry *lcs_proc_dir;
+
+ /* This holds last allocated fid in last obtained seq */
+ struct lu_fid lcs_fid;
+
+ /* LUSTRE_SEQ_METADATA or LUSTRE_SEQ_DATA */
+ enum lu_cli_type lcs_type;
+
+ /*
+ * Service uuid, passed from MDT + seq name to form unique seq name to
+ * use it with procfs.
+ */
+ char lcs_name[80];
+
+ /*
+ * Sequence width, that is how many objects may be allocated in one
+ * sequence. Default value for it is LUSTRE_SEQ_MAX_WIDTH.
+ */
+ __u64 lcs_width;
+
+ /* Seq-server for direct talking */
+ struct lu_server_seq *lcs_srv;
+
+ /* wait queue for fid allocation and update indicator */
+ wait_queue_head_t lcs_waitq;
+ int lcs_update;
+};
+
+/* server sequence manager interface */
+struct lu_server_seq {
+ /* Available sequences space */
+ struct lu_seq_range lss_space;
+
+ /* keeps highwater in lsr_end for seq allocation algorithm */
+ struct lu_seq_range lss_lowater_set;
+ struct lu_seq_range lss_hiwater_set;
+
+ /*
+ * Device for server side seq manager needs (saving sequences to backing
+ * store).
+ */
+ struct dt_device *lss_dev;
+
+ /* /seq file object device */
+ struct dt_object *lss_obj;
+
+ /* Seq related proc */
+ struct proc_dir_entry *lss_proc_dir;
+
+ /* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
+ enum lu_mgr_type lss_type;
+
+ /* Client interafce to request controller */
+ struct lu_client_seq *lss_cli;
+
+ /* Mutex for protecting allocation */
+ struct mutex lss_mutex;
+
+ /*
+ * Service uuid, passed from MDT + seq name to form unique seq name to
+ * use it with procfs.
+ */
+ char lss_name[80];
+
+ /*
+ * Allocation chunks for super and meta sequences. Default values are
+ * LUSTRE_SEQ_SUPER_WIDTH and LUSTRE_SEQ_META_WIDTH.
+ */
+ __u64 lss_width;
+
+ /*
+ * minimum lss_alloc_set size that should be allocated from
+ * lss_space
+ */
+ __u64 lss_set_width;
+
+ /* sync is needed for update operation */
+ __u32 lss_need_sync;
+
+ /**
+ * Pointer to site object, required to access site fld.
+ */
+ struct seq_server_site *lss_site;
+};
+
+struct com_thread_info;
+int seq_query(struct com_thread_info *info);
+
+struct ptlrpc_request;
+int seq_handle(struct ptlrpc_request *req);
+
+/* Server methods */
+
+int seq_server_init(struct lu_server_seq *seq,
+ struct dt_device *dev,
+ const char *prefix,
+ enum lu_mgr_type type,
+ struct seq_server_site *ss,
+ const struct lu_env *env);
+
+void seq_server_fini(struct lu_server_seq *seq,
+ const struct lu_env *env);
+
+int seq_server_alloc_super(struct lu_server_seq *seq,
+ struct lu_seq_range *out,
+ const struct lu_env *env);
+
+int seq_server_alloc_meta(struct lu_server_seq *seq,
+ struct lu_seq_range *out,
+ const struct lu_env *env);
+
+int seq_server_set_cli(struct lu_server_seq *seq,
+ struct lu_client_seq *cli,
+ const struct lu_env *env);
+
+/* Client methods */
+int seq_client_init(struct lu_client_seq *seq,
+ struct obd_export *exp,
+ enum lu_cli_type type,
+ const char *prefix,
+ struct lu_server_seq *srv);
+
+void seq_client_fini(struct lu_client_seq *seq);
+
+void seq_client_flush(struct lu_client_seq *seq);
+
+int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq,
+ struct lu_fid *fid);
+int seq_client_get_seq(const struct lu_env *env, struct lu_client_seq *seq,
+ seqno_t *seqnr);
+int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss);
+/* Fids common stuff */
+int fid_is_local(const struct lu_env *env,
+ struct lu_site *site, const struct lu_fid *fid);
+
+enum lu_cli_type;
+int client_fid_init(struct obd_device *obd, struct obd_export *exp,
+ enum lu_cli_type type);
+int client_fid_fini(struct obd_device *obd);
+
+/* fid locking */
+
+struct ldlm_namespace;
+
+/*
+ * Build (DLM) resource name from FID.
+ *
+ * NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
+ * but was moved into name[1] along with the OID to avoid consuming the
+ * renaming name[2,3] fields that need to be used for the quota identifier.
+ */
+static inline struct ldlm_res_id *
+fid_build_reg_res_name(const struct lu_fid *fid, struct ldlm_res_id *res)
+{
+ memset(res, 0, sizeof(*res));
+ res->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(fid);
+ res->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(fid);
+
+ return res;
+}
+
+/*
+ * Return true if resource is for object identified by FID.
+ */
+static inline int fid_res_name_eq(const struct lu_fid *fid,
+ const struct ldlm_res_id *res)
+{
+ return res->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(fid) &&
+ res->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(fid);
+}
+
+/*
+ * Extract FID from LDLM resource. Reverse of fid_build_reg_res_name().
+ */
+static inline struct lu_fid *
+fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res)
+{
+ fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
+ fid->f_oid = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF]);
+ fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
+ LASSERT(fid_res_name_eq(fid, res));
+
+ return fid;
+}
+
+/*
+ * Build (DLM) resource identifier from global quota FID and quota ID.
+ */
+static inline struct ldlm_res_id *
+fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid,
+ struct ldlm_res_id *res)
+{
+ fid_build_reg_res_name(glb_fid, res);
+ res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid);
+ res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid);
+
+ return res;
+}
+
+/*
+ * Extract global FID and quota ID from resource name
+ */
+static inline void fid_extract_from_quota_res(struct lu_fid *glb_fid,
+ union lquota_id *qid,
+ const struct ldlm_res_id *res)
+{
+ fid_extract_from_res_name(glb_fid, res);
+ qid->qid_fid.f_seq = res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF];
+ qid->qid_fid.f_oid = (__u32)res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF];
+ qid->qid_fid.f_ver =
+ (__u32)(res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] >> 32);
+}
+
+static inline struct ldlm_res_id *
+fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
+ struct ldlm_res_id *res)
+{
+ fid_build_reg_res_name(fid, res);
+ res->name[LUSTRE_RES_ID_HSH_OFF] = hash;
+
+ return res;
+}
+
+/**
+ * Build DLM resource name from object id & seq, which will be removed
+ * finally, when we replace ost_id with FID in data stack.
+ *
+ * Currently, resid from the old client, whose res[0] = object_id,
+ * res[1] = object_seq, is just oposite with Metatdata
+ * resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
+ * To unifiy the resid identification, we will reverse the data
+ * resid to keep it same with Metadata resid, i.e.
+ *
+ * For resid from the old client,
+ * res[0] = objid, res[1] = 0, still keep the original order,
+ * for compatiblity.
+ *
+ * For new resid
+ * res will be built from normal FID directly, i.e. res[0] = f_seq,
+ * res[1] = f_oid + f_ver.
+ */
+static inline void ostid_build_res_name(struct ost_id *oi,
+ struct ldlm_res_id *name)
+{
+ memset(name, 0, sizeof *name);
+ if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ name->name[LUSTRE_RES_ID_SEQ_OFF] = ostid_id(oi);
+ name->name[LUSTRE_RES_ID_VER_OID_OFF] = ostid_seq(oi);
+ } else {
+ fid_build_reg_res_name(&oi->oi_fid, name);
+ }
+}
+
+static inline void ostid_res_name_to_id(struct ost_id *oi,
+ struct ldlm_res_id *name)
+{
+ if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_SEQ_OFF])) {
+ /* old resid */
+ ostid_set_seq(oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
+ ostid_set_id(oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
+ } else {
+ /* new resid */
+ fid_extract_from_res_name(&oi->oi_fid, name);
+ }
+}
+
+/**
+ * Return true if the resource is for the object identified by this id & group.
+ */
+static inline int ostid_res_name_eq(struct ost_id *oi,
+ struct ldlm_res_id *name)
+{
+ /* Note: it is just a trick here to save some effort, probably the
+ * correct way would be turn them into the FID and compare */
+ if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) &&
+ name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi);
+ } else {
+ return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_seq(oi) &&
+ name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_id(oi);
+ }
+}
+
+/* The same as osc_build_res_name() */
+static inline void ost_fid_build_resid(const struct lu_fid *fid,
+ struct ldlm_res_id *resname)
+{
+ if (fid_is_mdt0(fid) || fid_is_idif(fid)) {
+ struct ost_id oi;
+ oi.oi.oi_id = 0; /* gcc 4.7.2 complains otherwise */
+ if (fid_to_ostid(fid, &oi) != 0)
+ return;
+ ostid_build_res_name(&oi, resname);
+ } else {
+ fid_build_reg_res_name(fid, resname);
+ }
+}
+
+static inline void ost_fid_from_resid(struct lu_fid *fid,
+ const struct ldlm_res_id *name)
+{
+ if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
+ /* old resid */
+ struct ost_id oi;
+ ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
+ ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
+ ostid_to_fid(fid, &oi, 0);
+ } else {
+ /* new resid */
+ fid_extract_from_res_name(fid, name);
+ }
+}
+
+/**
+ * Flatten 128-bit FID values into a 64-bit value for use as an inode number.
+ * For non-IGIF FIDs this starts just over 2^32, and continues without
+ * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ
+ * into the range where there may not be many OID values in use, to minimize
+ * the risk of conflict.
+ *
+ * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true,
+ * the time between re-used inode numbers is very long - 2^40 SEQ numbers,
+ * or about 2^40 client mounts, if clients create less than 2^24 files/mount.
+ */
+static inline __u64 fid_flatten(const struct lu_fid *fid)
+{
+ __u64 ino;
+ __u64 seq;
+
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ return ino;
+ }
+
+ seq = fid_seq(fid);
+
+ ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
+
+ return ino ? ino : fid_oid(fid);
+}
+
+static inline __u32 fid_hash(const struct lu_fid *f, int bits)
+{
+ /* all objects with same id and different versions will belong to same
+ * collisions list. */
+ return cfs_hash_long(fid_flatten(f), bits);
+}
+
+/**
+ * map fid to 32 bit value for ino on 32bit systems. */
+static inline __u32 fid_flatten32(const struct lu_fid *fid)
+{
+ __u32 ino;
+ __u64 seq;
+
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ return ino;
+ }
+
+ seq = fid_seq(fid) - FID_SEQ_START;
+
+ /* Map the high bits of the OID into higher bits of the inode number so
+ * that inodes generated at about the same time have a reduced chance
+ * of collisions. This will give a period of 2^12 = 1024 unique clients
+ * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
+ * (from OID), or up to 128M inodes without collisions for new files. */
+ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
+ (seq >> (64 - (40-8)) & 0xffffff00) +
+ (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
+
+ return ino ? ino : fid_oid(fid);
+}
+
+static inline int lu_fid_diff(struct lu_fid *fid1, struct lu_fid *fid2)
+{
+ LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID", fid2:"DFID"\n",
+ PFID(fid1), PFID(fid2));
+
+ if (fid_is_idif(fid1) && fid_is_idif(fid2))
+ return fid_idif_id(fid1->f_seq, fid1->f_oid, fid1->f_ver) -
+ fid_idif_id(fid2->f_seq, fid2->f_oid, fid2->f_ver);
+
+ return fid_oid(fid1) - fid_oid(fid2);
+}
+
+#define LUSTRE_SEQ_SRV_NAME "seq_srv"
+#define LUSTRE_SEQ_CTL_NAME "seq_ctl"
+
+/* Range common stuff */
+static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
+{
+ dst->lsr_start = cpu_to_le64(src->lsr_start);
+ dst->lsr_end = cpu_to_le64(src->lsr_end);
+ dst->lsr_index = cpu_to_le32(src->lsr_index);
+ dst->lsr_flags = cpu_to_le32(src->lsr_flags);
+}
+
+static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
+{
+ dst->lsr_start = le64_to_cpu(src->lsr_start);
+ dst->lsr_end = le64_to_cpu(src->lsr_end);
+ dst->lsr_index = le32_to_cpu(src->lsr_index);
+ dst->lsr_flags = le32_to_cpu(src->lsr_flags);
+}
+
+static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
+{
+ dst->lsr_start = cpu_to_be64(src->lsr_start);
+ dst->lsr_end = cpu_to_be64(src->lsr_end);
+ dst->lsr_index = cpu_to_be32(src->lsr_index);
+ dst->lsr_flags = cpu_to_be32(src->lsr_flags);
+}
+
+static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
+{
+ dst->lsr_start = be64_to_cpu(src->lsr_start);
+ dst->lsr_end = be64_to_cpu(src->lsr_end);
+ dst->lsr_index = be32_to_cpu(src->lsr_index);
+ dst->lsr_flags = be32_to_cpu(src->lsr_flags);
+}
+
+/** @} fid */
+
+#endif /* __LUSTRE_FID_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
new file mode 100644
index 0000000..550fff5
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -0,0 +1,161 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2013, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LINUX_FLD_H
+#define __LINUX_FLD_H
+
+/** \defgroup fld fld
+ *
+ * @{
+ */
+
+#include <lustre/lustre_idl.h>
+#include <linux/libcfs/libcfs.h>
+
+struct lu_client_fld;
+struct lu_server_fld;
+struct lu_fld_hash;
+struct fld_cache;
+
+extern const struct dt_index_features fld_index_features;
+extern const char fld_index_name[];
+
+/*
+ * FLD (Fid Location Database) interface.
+ */
+enum {
+ LUSTRE_CLI_FLD_HASH_DHT = 0,
+ LUSTRE_CLI_FLD_HASH_RRB
+};
+
+
+struct lu_fld_target {
+ struct list_head ft_chain;
+ struct obd_export *ft_exp;
+ struct lu_server_fld *ft_srv;
+ __u64 ft_idx;
+};
+
+struct lu_server_fld {
+ /**
+ * Fld dir proc entry. */
+ struct proc_dir_entry *lsf_proc_dir;
+
+ /**
+ * /fld file object device */
+ struct dt_object *lsf_obj;
+
+ /**
+ * super sequence controller export, needed to forward fld
+ * lookup request. */
+ struct obd_export *lsf_control_exp;
+
+ /**
+ * Client FLD cache. */
+ struct fld_cache *lsf_cache;
+
+ /**
+ * Protect index modifications */
+ struct mutex lsf_lock;
+
+ /**
+ * Fld service name in form "fld-srv-lustre-MDTXXX" */
+ char lsf_name[80];
+
+};
+
+struct lu_client_fld {
+ /**
+ * Client side proc entry. */
+ struct proc_dir_entry *lcf_proc_dir;
+
+ /**
+ * List of exports client FLD knows about. */
+ struct list_head lcf_targets;
+
+ /**
+ * Current hash to be used to chose an export. */
+ struct lu_fld_hash *lcf_hash;
+
+ /**
+ * Exports count. */
+ int lcf_count;
+
+ /**
+ * Lock protecting exports list and fld_hash. */
+ spinlock_t lcf_lock;
+
+ /**
+ * Client FLD cache. */
+ struct fld_cache *lcf_cache;
+
+ /**
+ * Client fld proc entry name. */
+ char lcf_name[80];
+
+ int lcf_flags;
+};
+
+/* Client methods */
+int fld_client_init(struct lu_client_fld *fld,
+ const char *prefix, int hash);
+
+void fld_client_fini(struct lu_client_fld *fld);
+
+void fld_client_flush(struct lu_client_fld *fld);
+
+int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
+ __u32 flags, const struct lu_env *env);
+
+int fld_client_create(struct lu_client_fld *fld,
+ struct lu_seq_range *range,
+ const struct lu_env *env);
+
+int fld_client_delete(struct lu_client_fld *fld,
+ seqno_t seq,
+ const struct lu_env *env);
+
+int fld_client_add_target(struct lu_client_fld *fld,
+ struct lu_fld_target *tar);
+
+int fld_client_del_target(struct lu_client_fld *fld,
+ __u64 idx);
+
+void fld_client_proc_fini(struct lu_client_fld *fld);
+
+/** @} fld */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_fsfilt.h b/drivers/staging/lustre/lustre/include/lustre_fsfilt.h
new file mode 100644
index 0000000..9dcc332
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_fsfilt.h
@@ -0,0 +1,48 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_fsfilt.h
+ *
+ * Filesystem interface helper.
+ */
+
+#ifndef _LUSTRE_FSFILT_H
+#define _LUSTRE_FSFILT_H
+
+#include <linux/lustre_fsfilt.h>
+
+#define LU221_BAD_TIME (0x80000000U + 24 * 3600)
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
new file mode 100644
index 0000000..105f6d6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -0,0 +1,67 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LUSTRE_HA_H
+#define _LUSTRE_HA_H
+
+/** \defgroup ha ha
+ *
+ * @{
+ */
+
+struct obd_import;
+struct obd_export;
+struct obd_device;
+struct ptlrpc_request;
+
+
+int ptlrpc_replay(struct obd_import *imp);
+int ptlrpc_resend(struct obd_import *imp);
+void ptlrpc_free_committed(struct obd_import *imp);
+void ptlrpc_wake_delayed(struct obd_import *imp);
+int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async);
+int ptlrpc_set_import_active(struct obd_import *imp, int active);
+void ptlrpc_activate_import(struct obd_import *imp);
+void ptlrpc_deactivate_import(struct obd_import *imp);
+void ptlrpc_invalidate_import(struct obd_import *imp);
+void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt);
+int ptlrpc_check_suspend(void);
+void ptlrpc_activate_timeouts(struct obd_import *imp);
+void ptlrpc_deactivate_timeouts(struct obd_import *imp);
+
+/** @} ha */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
new file mode 100644
index 0000000..fcd40f3
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_handles.h
@@ -0,0 +1,93 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LUSTRE_HANDLES_H_
+#define __LUSTRE_HANDLES_H_
+
+/** \defgroup handles handles
+ *
+ * @{
+ */
+
+#include <linux/lustre_handles.h>
+
+#include <linux/libcfs/libcfs.h>
+
+
+struct portals_handle_ops {
+ void (*hop_addref)(void *object);
+ void (*hop_free)(void *object, int size);
+};
+
+/* These handles are most easily used by having them appear at the very top of
+ * whatever object that you want to make handles for. ie:
+ *
+ * struct ldlm_lock {
+ * struct portals_handle handle;
+ * ...
+ * };
+ *
+ * Now you're able to assign the results of cookie2handle directly to an
+ * ldlm_lock. If it's not at the top, you'll want to use container_of()
+ * to compute the start of the structure based on the handle field. */
+struct portals_handle {
+ struct list_head h_link;
+ __u64 h_cookie;
+ struct portals_handle_ops *h_ops;
+
+ /* newly added fields to handle the RCU issue. -jxiong */
+ cfs_rcu_head_t h_rcu;
+ spinlock_t h_lock;
+ unsigned int h_size:31;
+ unsigned int h_in:1;
+};
+#define RCU2HANDLE(rcu) container_of(rcu, struct portals_handle, h_rcu)
+
+/* handles.c */
+
+/* Add a handle to the hash table */
+void class_handle_hash(struct portals_handle *,
+ struct portals_handle_ops *ops);
+void class_handle_unhash(struct portals_handle *);
+void class_handle_hash_back(struct portals_handle *);
+void *class_handle2object(__u64 cookie);
+void class_handle_free_cb(cfs_rcu_head_t *);
+int class_handle_init(void);
+void class_handle_cleanup(void);
+
+/** @} handles */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_idmap.h b/drivers/staging/lustre/lustre/include/lustre_idmap.h
new file mode 100644
index 0000000..2da8596
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_idmap.h
@@ -0,0 +1,104 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lustre/include/lustre_idmap.h
+ *
+ * MDS data structures.
+ * See also lustre_idl.h for wire formats of requests.
+ */
+
+#ifndef _LUSTRE_IDMAP_H
+#define _LUSTRE_IDMAP_H
+
+/** \defgroup idmap idmap
+ *
+ * @{
+ */
+
+#include <linux/libcfs/libcfs.h>
+
+#define CFS_NGROUPS_PER_BLOCK ((int)(PAGE_CACHE_SIZE / sizeof(gid_t)))
+
+#define CFS_GROUP_AT(gi, i) \
+ ((gi)->blocks[(i) / CFS_NGROUPS_PER_BLOCK][(i) % CFS_NGROUPS_PER_BLOCK])
+
+enum {
+ CFS_IC_NOTHING = 0, /* convert nothing */
+ CFS_IC_ALL = 1, /* convert all items */
+ CFS_IC_MAPPED = 2, /* convert mapped uid/gid */
+ CFS_IC_UNMAPPED = 3 /* convert unmapped uid/gid */
+};
+
+#define CFS_IDMAP_NOTFOUND (-1)
+
+#define CFS_IDMAP_HASHSIZE 32
+
+enum lustre_idmap_idx {
+ RMT_UIDMAP_IDX,
+ LCL_UIDMAP_IDX,
+ RMT_GIDMAP_IDX,
+ LCL_GIDMAP_IDX,
+ CFS_IDMAP_N_HASHES
+};
+
+struct lustre_idmap_table {
+ spinlock_t lit_lock;
+ struct list_head lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
+};
+
+struct lu_ucred;
+
+extern void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist);
+extern void lustre_groups_sort(struct group_info *group_info);
+extern int lustre_in_group_p(struct lu_ucred *mu, gid_t grp);
+
+extern int lustre_idmap_add(struct lustre_idmap_table *t,
+ uid_t ruid, uid_t luid,
+ gid_t rgid, gid_t lgid);
+extern int lustre_idmap_del(struct lustre_idmap_table *t,
+ uid_t ruid, uid_t luid,
+ gid_t rgid, gid_t lgid);
+extern int lustre_idmap_lookup_uid(struct lu_ucred *mu,
+ struct lustre_idmap_table *t,
+ int reverse, uid_t uid);
+extern int lustre_idmap_lookup_gid(struct lu_ucred *mu,
+ struct lustre_idmap_table *t,
+ int reverse, gid_t gid);
+extern struct lustre_idmap_table *lustre_idmap_init(void);
+extern void lustre_idmap_fini(struct lustre_idmap_table *t);
+
+/** @} idmap */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
new file mode 100644
index 0000000..67259eb
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -0,0 +1,369 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+/** \defgroup obd_import PtlRPC import definitions
+ * Imports are client-side representation of remote obd target.
+ *
+ * @{
+ */
+
+#ifndef __IMPORT_H
+#define __IMPORT_H
+
+/** \defgroup export export
+ *
+ * @{
+ */
+
+#include <lustre_handles.h>
+#include <lustre/lustre_idl.h>
+
+
+/**
+ * Adaptive Timeout stuff
+ *
+ * @{
+ */
+#define D_ADAPTTO D_OTHER
+#define AT_BINS 4 /* "bin" means "N seconds of history" */
+#define AT_FLG_NOHIST 0x1 /* use last reported value only */
+
+struct adaptive_timeout {
+ time_t at_binstart; /* bin start time */
+ unsigned int at_hist[AT_BINS]; /* timeout history bins */
+ unsigned int at_flags;
+ unsigned int at_current; /* current timeout value */
+ unsigned int at_worst_ever; /* worst-ever timeout value */
+ time_t at_worst_time; /* worst-ever timeout timestamp */
+ spinlock_t at_lock;
+};
+
+struct ptlrpc_at_array {
+ struct list_head *paa_reqs_array; /** array to hold requests */
+ __u32 paa_size; /** the size of array */
+ __u32 paa_count; /** the total count of reqs */
+ time_t paa_deadline; /** the earliest deadline of reqs */
+ __u32 *paa_reqs_count; /** the count of reqs in each entry */
+};
+
+#define IMP_AT_MAX_PORTALS 8
+struct imp_at {
+ int iat_portal[IMP_AT_MAX_PORTALS];
+ struct adaptive_timeout iat_net_latency;
+ struct adaptive_timeout iat_service_estimate[IMP_AT_MAX_PORTALS];
+};
+
+
+/** @} */
+
+/** Possible import states */
+enum lustre_imp_state {
+ LUSTRE_IMP_CLOSED = 1,
+ LUSTRE_IMP_NEW = 2,
+ LUSTRE_IMP_DISCON = 3,
+ LUSTRE_IMP_CONNECTING = 4,
+ LUSTRE_IMP_REPLAY = 5,
+ LUSTRE_IMP_REPLAY_LOCKS = 6,
+ LUSTRE_IMP_REPLAY_WAIT = 7,
+ LUSTRE_IMP_RECOVER = 8,
+ LUSTRE_IMP_FULL = 9,
+ LUSTRE_IMP_EVICTED = 10,
+};
+
+/** Returns test string representation of numeric import state \a state */
+static inline char * ptlrpc_import_state_name(enum lustre_imp_state state)
+{
+ static char* import_state_names[] = {
+ "<UNKNOWN>", "CLOSED", "NEW", "DISCONN",
+ "CONNECTING", "REPLAY", "REPLAY_LOCKS", "REPLAY_WAIT",
+ "RECOVER", "FULL", "EVICTED",
+ };
+
+ LASSERT (state <= LUSTRE_IMP_EVICTED);
+ return import_state_names[state];
+}
+
+/**
+ * List of import event types
+ */
+enum obd_import_event {
+ IMP_EVENT_DISCON = 0x808001,
+ IMP_EVENT_INACTIVE = 0x808002,
+ IMP_EVENT_INVALIDATE = 0x808003,
+ IMP_EVENT_ACTIVE = 0x808004,
+ IMP_EVENT_OCD = 0x808005,
+ IMP_EVENT_DEACTIVATE = 0x808006,
+ IMP_EVENT_ACTIVATE = 0x808007,
+};
+
+/**
+ * Definition of import connection structure
+ */
+struct obd_import_conn {
+ /** Item for linking connections together */
+ struct list_head oic_item;
+ /** Pointer to actual PortalRPC connection */
+ struct ptlrpc_connection *oic_conn;
+ /** uuid of remote side */
+ struct obd_uuid oic_uuid;
+ /**
+ * Time (64 bit jiffies) of last connection attempt on this connection
+ */
+ __u64 oic_last_attempt;
+};
+
+/* state history */
+#define IMP_STATE_HIST_LEN 16
+struct import_state_hist {
+ enum lustre_imp_state ish_state;
+ time_t ish_time;
+};
+
+/**
+ * Defintion of PortalRPC import structure.
+ * Imports are representing client-side view to remote target.
+ */
+struct obd_import {
+ /** Local handle (== id) for this import. */
+ struct portals_handle imp_handle;
+ /** Reference counter */
+ atomic_t imp_refcount;
+ struct lustre_handle imp_dlm_handle; /* client's ldlm export */
+ /** Currently active connection */
+ struct ptlrpc_connection *imp_connection;
+ /** PortalRPC client structure for this import */
+ struct ptlrpc_client *imp_client;
+ /** List element for linking into pinger chain */
+ struct list_head imp_pinger_chain;
+ /** List element for linking into chain for destruction */
+ struct list_head imp_zombie_chain;
+
+ /**
+ * Lists of requests that are retained for replay, waiting for a reply,
+ * or waiting for recovery to complete, respectively.
+ * @{
+ */
+ struct list_head imp_replay_list;
+ struct list_head imp_sending_list;
+ struct list_head imp_delayed_list;
+ /** @} */
+
+ /** obd device for this import */
+ struct obd_device *imp_obd;
+
+ /**
+ * some seciruty-related fields
+ * @{
+ */
+ struct ptlrpc_sec *imp_sec;
+ struct mutex imp_sec_mutex;
+ cfs_time_t imp_sec_expire;
+ /** @} */
+
+ /** Wait queue for those who need to wait for recovery completion */
+ wait_queue_head_t imp_recovery_waitq;
+
+ /** Number of requests currently in-flight */
+ atomic_t imp_inflight;
+ /** Number of requests currently unregistering */
+ atomic_t imp_unregistering;
+ /** Number of replay requests inflight */
+ atomic_t imp_replay_inflight;
+ /** Number of currently happening import invalidations */
+ atomic_t imp_inval_count;
+ /** Numbner of request timeouts */
+ atomic_t imp_timeouts;
+ /** Current import state */
+ enum lustre_imp_state imp_state;
+ /** History of import states */
+ struct import_state_hist imp_state_hist[IMP_STATE_HIST_LEN];
+ int imp_state_hist_idx;
+ /** Current import generation. Incremented on every reconnect */
+ int imp_generation;
+ /** Incremented every time we send reconnection request */
+ __u32 imp_conn_cnt;
+ /**
+ * \see ptlrpc_free_committed remembers imp_generation value here
+ * after a check to save on unnecessary replay list iterations
+ */
+ int imp_last_generation_checked;
+ /** Last tranno we replayed */
+ __u64 imp_last_replay_transno;
+ /** Last transno committed on remote side */
+ __u64 imp_peer_committed_transno;
+ /**
+ * \see ptlrpc_free_committed remembers last_transno since its last
+ * check here and if last_transno did not change since last run of
+ * ptlrpc_free_committed and import generation is the same, we can
+ * skip looking for requests to remove from replay list as optimisation
+ */
+ __u64 imp_last_transno_checked;
+ /**
+ * Remote export handle. This is how remote side knows what export
+ * we are talking to. Filled from response to connect request
+ */
+ struct lustre_handle imp_remote_handle;
+ /** When to perform next ping. time in jiffies. */
+ cfs_time_t imp_next_ping;
+ /** When we last succesfully connected. time in 64bit jiffies */
+ __u64 imp_last_success_conn;
+
+ /** List of all possible connection for import. */
+ struct list_head imp_conn_list;
+ /**
+ * Current connection. \a imp_connection is imp_conn_current->oic_conn
+ */
+ struct obd_import_conn *imp_conn_current;
+
+ /** Protects flags, level, generation, conn_cnt, *_list */
+ spinlock_t imp_lock;
+
+ /* flags */
+ unsigned long imp_no_timeout:1, /* timeouts are disabled */
+ imp_invalid:1, /* evicted */
+ /* administratively disabled */
+ imp_deactive:1,
+ /* try to recover the import */
+ imp_replayable:1,
+ /* don't run recovery (timeout instead) */
+ imp_dlm_fake:1,
+ /* use 1/2 timeout on MDS' OSCs */
+ imp_server_timeout:1,
+ /* VBR: imp in delayed recovery */
+ imp_delayed_recovery:1,
+ /* VBR: if gap was found then no lock replays
+ */
+ imp_no_lock_replay:1,
+ /* recovery by versions was failed */
+ imp_vbr_failed:1,
+ /* force an immidiate ping */
+ imp_force_verify:1,
+ /* force a scheduled ping */
+ imp_force_next_verify:1,
+ /* pingable */
+ imp_pingable:1,
+ /* resend for replay */
+ imp_resend_replay:1,
+ /* disable normal recovery, for test only. */
+ imp_no_pinger_recover:1,
+ /* need IR MNE swab */
+ imp_need_mne_swab:1,
+ /* import must be reconnected instead of
+ * chouse new connection */
+ imp_force_reconnect:1,
+ /* import has tried to connect with server */
+ imp_connect_tried:1;
+ __u32 imp_connect_op;
+ struct obd_connect_data imp_connect_data;
+ __u64 imp_connect_flags_orig;
+ int imp_connect_error;
+
+ __u32 imp_msg_magic;
+ __u32 imp_msghdr_flags; /* adjusted based on server capability */
+
+ struct ptlrpc_request_pool *imp_rq_pool; /* emergency request pool */
+
+ struct imp_at imp_at; /* adaptive timeout data */
+ time_t imp_last_reply_time; /* for health check */
+};
+
+typedef void (*obd_import_callback)(struct obd_import *imp, void *closure,
+ int event, void *event_arg, void *cb_data);
+
+/**
+ * Structure for import observer.
+ * It is possible to register "observer" on an import and every time
+ * something happens to an import (like connect/evict/disconnect)
+ * obderver will get its callback called with event type
+ */
+struct obd_import_observer {
+ struct list_head oio_chain;
+ obd_import_callback oio_cb;
+ void *oio_cb_data;
+};
+
+void class_observe_import(struct obd_import *imp, obd_import_callback cb,
+ void *cb_data);
+void class_unobserve_import(struct obd_import *imp, obd_import_callback cb,
+ void *cb_data);
+void class_notify_import_observers(struct obd_import *imp, int event,
+ void *event_arg);
+
+/* import.c */
+static inline unsigned int at_est2timeout(unsigned int val)
+{
+ /* add an arbitrary minimum: 125% +5 sec */
+ return (val + (val >> 2) + 5);
+}
+
+static inline unsigned int at_timeout2est(unsigned int val)
+{
+ /* restore estimate value from timeout: e=4/5(t-5) */
+ LASSERT(val);
+ return (max((val << 2) / 5, 5U) - 4);
+}
+
+static inline void at_reset(struct adaptive_timeout *at, int val) {
+ spin_lock(&at->at_lock);
+ at->at_current = val;
+ at->at_worst_ever = val;
+ at->at_worst_time = cfs_time_current_sec();
+ spin_unlock(&at->at_lock);
+}
+static inline void at_init(struct adaptive_timeout *at, int val, int flags) {
+ memset(at, 0, sizeof(*at));
+ spin_lock_init(&at->at_lock);
+ at->at_flags = flags;
+ at_reset(at, val);
+}
+extern unsigned int at_min;
+static inline int at_get(struct adaptive_timeout *at) {
+ return (at->at_current > at_min) ? at->at_current : at_min;
+}
+int at_measured(struct adaptive_timeout *at, unsigned int val);
+int import_at_get_index(struct obd_import *imp, int portal);
+extern unsigned int at_max;
+#define AT_OFF (at_max == 0)
+
+/* genops.c */
+struct obd_export;
+extern struct obd_import *class_exp2cliimp(struct obd_export *);
+extern struct obd_import *class_conn2cliimp(struct lustre_handle *);
+
+/** @} import */
+
+#endif /* __IMPORT_H */
+
+/** @} obd_import */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
new file mode 100644
index 0000000..5e11107
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -0,0 +1,664 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_lib.h
+ *
+ * Basic Lustre library routines.
+ */
+
+#ifndef _LUSTRE_LIB_H
+#define _LUSTRE_LIB_H
+
+/** \defgroup lib lib
+ *
+ * @{
+ */
+
+#include <linux/libcfs/libcfs.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_ver.h>
+#include <lustre_cfg.h>
+#include <linux/lustre_lib.h>
+
+/* target.c */
+struct ptlrpc_request;
+struct obd_export;
+struct lu_target;
+struct l_wait_info;
+#include <lustre_ha.h>
+#include <lustre_net.h>
+#include <lvfs.h>
+
+
+int target_pack_pool_reply(struct ptlrpc_request *req);
+int do_set_info_async(struct obd_import *imp,
+ int opcode, int version,
+ obd_count keylen, void *key,
+ obd_count vallen, void *val,
+ struct ptlrpc_request_set *set);
+
+#define OBD_RECOVERY_MAX_TIME (obd_timeout * 18) /* b13079 */
+#define OBD_MAX_IOCTL_BUFFER CONFIG_LUSTRE_OBD_MAX_IOCTL_BUFFER
+
+void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
+
+/* client.c */
+
+int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
+struct client_obd *client_conn2cli(struct lustre_handle *conn);
+
+struct md_open_data;
+struct obd_client_handle {
+ struct lustre_handle och_fh;
+ struct lu_fid och_fid;
+ struct md_open_data *och_mod;
+ __u32 och_magic;
+ int och_flags;
+};
+#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
+
+/* statfs_pack.c */
+void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
+void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
+
+/* l_lock.c */
+struct lustre_lock {
+ int l_depth;
+ struct task_struct *l_owner;
+ struct semaphore l_sem;
+ spinlock_t l_spin;
+};
+
+void l_lock_init(struct lustre_lock *);
+void l_lock(struct lustre_lock *);
+void l_unlock(struct lustre_lock *);
+int l_has_lock(struct lustre_lock *);
+
+/*
+ * For md echo client
+ */
+enum md_echo_cmd {
+ ECHO_MD_CREATE = 1, /* Open/Create file on MDT */
+ ECHO_MD_MKDIR = 2, /* Mkdir on MDT */
+ ECHO_MD_DESTROY = 3, /* Unlink file on MDT */
+ ECHO_MD_RMDIR = 4, /* Rmdir on MDT */
+ ECHO_MD_LOOKUP = 5, /* Lookup on MDT */
+ ECHO_MD_GETATTR = 6, /* Getattr on MDT */
+ ECHO_MD_SETATTR = 7, /* Setattr on MDT */
+ ECHO_MD_ALLOC_FID = 8, /* Get FIDs from MDT */
+};
+
+/*
+ * OBD IOCTLS
+ */
+#define OBD_IOCTL_VERSION 0x00010004
+
+struct obd_ioctl_data {
+ __u32 ioc_len;
+ __u32 ioc_version;
+
+ union {
+ __u64 ioc_cookie;
+ __u64 ioc_u64_1;
+ };
+ union {
+ __u32 ioc_conn1;
+ __u32 ioc_u32_1;
+ };
+ union {
+ __u32 ioc_conn2;
+ __u32 ioc_u32_2;
+ };
+
+ struct obdo ioc_obdo1;
+ struct obdo ioc_obdo2;
+
+ obd_size ioc_count;
+ obd_off ioc_offset;
+ __u32 ioc_dev;
+ __u32 ioc_command;
+
+ __u64 ioc_nid;
+ __u32 ioc_nal;
+ __u32 ioc_type;
+
+ /* buffers the kernel will treat as user pointers */
+ __u32 ioc_plen1;
+ char *ioc_pbuf1;
+ __u32 ioc_plen2;
+ char *ioc_pbuf2;
+
+ /* inline buffers for various arguments */
+ __u32 ioc_inllen1;
+ char *ioc_inlbuf1;
+ __u32 ioc_inllen2;
+ char *ioc_inlbuf2;
+ __u32 ioc_inllen3;
+ char *ioc_inlbuf3;
+ __u32 ioc_inllen4;
+ char *ioc_inlbuf4;
+
+ char ioc_bulk[0];
+};
+
+struct obd_ioctl_hdr {
+ __u32 ioc_len;
+ __u32 ioc_version;
+};
+
+static inline int obd_ioctl_packlen(struct obd_ioctl_data *data)
+{
+ int len = cfs_size_round(sizeof(struct obd_ioctl_data));
+ len += cfs_size_round(data->ioc_inllen1);
+ len += cfs_size_round(data->ioc_inllen2);
+ len += cfs_size_round(data->ioc_inllen3);
+ len += cfs_size_round(data->ioc_inllen4);
+ return len;
+}
+
+
+static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
+{
+ if (data->ioc_len > (1<<30)) {
+ CERROR("OBD ioctl: ioc_len larger than 1<<30\n");
+ return 1;
+ }
+ if (data->ioc_inllen1 > (1<<30)) {
+ CERROR("OBD ioctl: ioc_inllen1 larger than 1<<30\n");
+ return 1;
+ }
+ if (data->ioc_inllen2 > (1<<30)) {
+ CERROR("OBD ioctl: ioc_inllen2 larger than 1<<30\n");
+ return 1;
+ }
+ if (data->ioc_inllen3 > (1<<30)) {
+ CERROR("OBD ioctl: ioc_inllen3 larger than 1<<30\n");
+ return 1;
+ }
+ if (data->ioc_inllen4 > (1<<30)) {
+ CERROR("OBD ioctl: ioc_inllen4 larger than 1<<30\n");
+ return 1;
+ }
+ if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
+ CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
+ CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_inlbuf3 && !data->ioc_inllen3) {
+ CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_inlbuf4 && !data->ioc_inllen4) {
+ CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_pbuf1 && !data->ioc_plen1) {
+ CERROR("OBD ioctl: pbuf1 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_pbuf2 && !data->ioc_plen2) {
+ CERROR("OBD ioctl: pbuf2 pointer but 0 length\n");
+ return 1;
+ }
+ if (data->ioc_plen1 && !data->ioc_pbuf1) {
+ CERROR("OBD ioctl: plen1 set but NULL pointer\n");
+ return 1;
+ }
+ if (data->ioc_plen2 && !data->ioc_pbuf2) {
+ CERROR("OBD ioctl: plen2 set but NULL pointer\n");
+ return 1;
+ }
+ if (obd_ioctl_packlen(data) > data->ioc_len) {
+ CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n",
+ obd_ioctl_packlen(data), data->ioc_len);
+ return 1;
+ }
+ return 0;
+}
+
+
+#include <obd_support.h>
+
+/* function defined in lustre/obdclass/<platform>/<platform>-module.c */
+int obd_ioctl_getdata(char **buf, int *len, void *arg);
+int obd_ioctl_popdata(void *arg, void *data, int len);
+
+static inline void obd_ioctl_freedata(char *buf, int len)
+{
+ OBD_FREE_LARGE(buf, len);
+ return;
+}
+
+/*
+ * BSD ioctl description:
+ * #define IOC_V1 _IOR(g, n1, long)
+ * #define IOC_V2 _IOW(g, n2, long)
+ *
+ * ioctl(f, IOC_V1, arg);
+ * arg will be treated as a long value,
+ *
+ * ioctl(f, IOC_V2, arg)
+ * arg will be treated as a pointer, bsd will call
+ * copyin(buf, arg, sizeof(long))
+ *
+ * To make BSD ioctl handles argument correctly and simplely,
+ * we change _IOR to _IOWR so BSD will copyin obd_ioctl_data
+ * for us. Does this change affect Linux? (XXX Liang)
+ */
+#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_DESTROY _IOW ('f', 104, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_SETATTR _IOW ('f', 107, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_GETATTR _IOWR ('f', 108, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
+
+
+#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SYNC _IOW ('f', 114, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_COPY _IOWR('f', 120, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_MIGR _IOWR('f', 121, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PUNCH _IOWR('f', 122, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_MODULE_DEBUG _IOWR('f', 124, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_BRW_READ _IOWR('f', 125, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_BRW_WRITE _IOWR('f', 126, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_NAME2DEV _IOWR('f', 127, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_UUID2DEV _IOWR('f', 130, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_GETNAME _IOWR('f', 131, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_GETMDNAME _IOR('f', 131, char[MAX_OBD_NAME])
+#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME
+
+#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_CLIENT_RECOVER _IOW ('f', 133, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PING_TARGET _IOW ('f', 136, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139 )
+#define OBD_IOC_NO_TRANSNO _IOW ('f', 140, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SET_READONLY _IOW ('f', 141, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_ABORT_RECOVERY _IOR ('f', 142, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE)
+
+#define OBD_GET_VERSION _IOWR ('f', 144, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_GSS_SUPPORT _IOWR('f', 145, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_CHANGELOG_SEND _IOW ('f', 148, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_GETDEVICE _IOWR ('f', 149, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_FID2PATH _IOWR ('f', 150, OBD_IOC_DATA_TYPE)
+/* see also <lustre/lustre_user.h> for ioctls 151-153 */
+/* OBD_IOC_LOV_SETSTRIPE: See also LL_IOC_LOV_SETSTRIPE */
+#define OBD_IOC_LOV_SETSTRIPE _IOW ('f', 154, OBD_IOC_DATA_TYPE)
+/* OBD_IOC_LOV_GETSTRIPE: See also LL_IOC_LOV_GETSTRIPE */
+#define OBD_IOC_LOV_GETSTRIPE _IOW ('f', 155, OBD_IOC_DATA_TYPE)
+/* OBD_IOC_LOV_SETEA: See also LL_IOC_LOV_SETEA */
+#define OBD_IOC_LOV_SETEA _IOW ('f', 156, OBD_IOC_DATA_TYPE)
+/* see <lustre/lustre_user.h> for ioctls 157-159 */
+/* OBD_IOC_QUOTACHECK: See also LL_IOC_QUOTACHECK */
+#define OBD_IOC_QUOTACHECK _IOW ('f', 160, int)
+/* OBD_IOC_POLL_QUOTACHECK: See also LL_IOC_POLL_QUOTACHECK */
+#define OBD_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
+/* OBD_IOC_QUOTACTL: See also LL_IOC_QUOTACTL */
+#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
+/* see also <lustre/lustre_user.h> for ioctls 163-176 */
+#define OBD_IOC_CHANGELOG_REG _IOW ('f', 177, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_DEREG _IOW ('f', 178, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_CLEAR _IOW ('f', 179, struct obd_ioctl_data)
+#define OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_DORECORD _IOWR('f', 183, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PARAM _IOW ('f', 187, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_CATLOGLIST _IOWR('f', 190, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_INFO _IOWR('f', 191, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_PRINT _IOWR('f', 192, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_CANCEL _IOWR('f', 193, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_REMOVE _IOWR('f', 194, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LLOG_CHECK _IOWR('f', 195, OBD_IOC_DATA_TYPE)
+/* OBD_IOC_LLOG_CATINFO is deprecated */
+#define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE)
+
+#define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE)
+#define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE)
+#define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE)
+#define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE)
+
+#define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE)
+
+/* <lustre/lustre_user.h> defines ioctl number 218-219 */
+#define OBD_IOC_GET_MNTOPT _IOW('f', 220, mntopt_t)
+
+#define OBD_IOC_ECHO_MD _IOR('f', 221, struct obd_ioctl_data)
+#define OBD_IOC_ECHO_ALLOC_SEQ _IOWR('f', 222, struct obd_ioctl_data)
+
+#define OBD_IOC_START_LFSCK _IOWR('f', 230, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_STOP_LFSCK _IOW('f', 231, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PAUSE_LFSCK _IOW('f', 232, OBD_IOC_DATA_TYPE)
+
+/* XXX _IOWR('f', 250, long) has been defined in
+ * libcfs/include/libcfs/libcfs_private.h for debug, don't use it
+ */
+
+/* Until such time as we get_info the per-stripe maximum from the OST,
+ * we define this to be 2T - 4k, which is the ext3 maxbytes. */
+#define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
+
+/* Special values for remove LOV EA from disk */
+#define LOVEA_DELETE_VALUES(size, count, offset) (size == 0 && count == 0 && \
+ offset == (typeof(offset))(-1))
+
+/* #define POISON_BULK 0 */
+
+/*
+ * l_wait_event is a flexible sleeping function, permitting simple caller
+ * configuration of interrupt and timeout sensitivity along with actions to
+ * be performed in the event of either exception.
+ *
+ * The first form of usage looks like this:
+ *
+ * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
+ * intr_handler, callback_data);
+ * rc = l_wait_event(waitq, condition, &lwi);
+ *
+ * l_wait_event() makes the current process wait on 'waitq' until 'condition'
+ * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It
+ * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
+ * 'condition' becomes true, it optionally calls the specified 'intr_handler'
+ * if not NULL, and returns -EINTR.
+ *
+ * If a non-zero timeout is specified, signals are ignored until the timeout
+ * has expired. At this time, if 'timeout_handler' is not NULL it is called.
+ * If it returns FALSE l_wait_event() continues to wait as described above with
+ * signals enabled. Otherwise it returns -ETIMEDOUT.
+ *
+ * LWI_INTR(intr_handler, callback_data) is shorthand for
+ * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
+ *
+ * The second form of usage looks like this:
+ *
+ * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
+ * rc = l_wait_event(waitq, condition, &lwi);
+ *
+ * This form is the same as the first except that it COMPLETELY IGNORES
+ * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if
+ * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
+ * can unblock the current process is 'condition' becoming TRUE.
+ *
+ * Another form of usage is:
+ * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
+ * timeout_handler);
+ * rc = l_wait_event(waitq, condition, &lwi);
+ * This is the same as previous case, but condition is checked once every
+ * 'interval' jiffies (if non-zero).
+ *
+ * Subtle synchronization point: this macro does *not* necessary takes
+ * wait-queue spin-lock before returning, and, hence, following idiom is safe
+ * ONLY when caller provides some external locking:
+ *
+ * Thread1 Thread2
+ *
+ * l_wait_event(&obj->wq, ....); (1)
+ *
+ * wake_up(&obj->wq): (2)
+ * spin_lock(&q->lock); (2.1)
+ * __wake_up_common(q, ...); (2.2)
+ * spin_unlock(&q->lock, flags); (2.3)
+ *
+ * OBD_FREE_PTR(obj); (3)
+ *
+ * As l_wait_event() may "short-cut" execution and return without taking
+ * wait-queue spin-lock, some additional synchronization is necessary to
+ * guarantee that step (3) can begin only after (2.3) finishes.
+ *
+ * XXX nikita: some ptlrpc daemon threads have races of that sort.
+ *
+ */
+static inline int back_to_sleep(void *arg)
+{
+ return 0;
+}
+
+#define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
+
+struct l_wait_info {
+ cfs_duration_t lwi_timeout;
+ cfs_duration_t lwi_interval;
+ int lwi_allow_intr;
+ int (*lwi_on_timeout)(void *);
+ void (*lwi_on_signal)(void *);
+ void *lwi_cb_data;
+};
+
+/* NB: LWI_TIMEOUT ignores signals completely */
+#define LWI_TIMEOUT(time, cb, data) \
+((struct l_wait_info) { \
+ .lwi_timeout = time, \
+ .lwi_on_timeout = cb, \
+ .lwi_cb_data = data, \
+ .lwi_interval = 0, \
+ .lwi_allow_intr = 0 \
+})
+
+#define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \
+((struct l_wait_info) { \
+ .lwi_timeout = time, \
+ .lwi_on_timeout = cb, \
+ .lwi_cb_data = data, \
+ .lwi_interval = interval, \
+ .lwi_allow_intr = 0 \
+})
+
+#define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
+((struct l_wait_info) { \
+ .lwi_timeout = time, \
+ .lwi_on_timeout = time_cb, \
+ .lwi_on_signal = sig_cb, \
+ .lwi_cb_data = data, \
+ .lwi_interval = 0, \
+ .lwi_allow_intr = 0 \
+})
+
+#define LWI_TIMEOUT_INTR_ALL(time, time_cb, sig_cb, data) \
+((struct l_wait_info) { \
+ .lwi_timeout = time, \
+ .lwi_on_timeout = time_cb, \
+ .lwi_on_signal = sig_cb, \
+ .lwi_cb_data = data, \
+ .lwi_interval = 0, \
+ .lwi_allow_intr = 1 \
+})
+
+#define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
+
+
+/*
+ * wait for @condition to become true, but no longer than timeout, specified
+ * by @info.
+ */
+#define __l_wait_event(wq, condition, info, ret, l_add_wait) \
+do { \
+ wait_queue_t __wait; \
+ cfs_duration_t __timeout = info->lwi_timeout; \
+ sigset_t __blocked; \
+ int __allow_intr = info->lwi_allow_intr; \
+ \
+ ret = 0; \
+ if (condition) \
+ break; \
+ \
+ init_waitqueue_entry_current(&__wait); \
+ l_add_wait(&wq, &__wait); \
+ \
+ /* Block all signals (just the non-fatal ones if no timeout). */ \
+ if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
+ __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
+ else \
+ __blocked = cfs_block_sigsinv(0); \
+ \
+ for (;;) { \
+ unsigned __wstate; \
+ \
+ __wstate = info->lwi_on_signal != NULL && \
+ (__timeout == 0 || __allow_intr) ? \
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; \
+ \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ \
+ if (condition) \
+ break; \
+ \
+ if (__timeout == 0) { \
+ waitq_wait(&__wait, __wstate); \
+ } else { \
+ cfs_duration_t interval = info->lwi_interval? \
+ min_t(cfs_duration_t, \
+ info->lwi_interval,__timeout):\
+ __timeout; \
+ cfs_duration_t remaining = waitq_timedwait(&__wait,\
+ __wstate, \
+ interval); \
+ __timeout = cfs_time_sub(__timeout, \
+ cfs_time_sub(interval, remaining));\
+ if (__timeout == 0) { \
+ if (info->lwi_on_timeout == NULL || \
+ info->lwi_on_timeout(info->lwi_cb_data)) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ /* Take signals after the timeout expires. */ \
+ if (info->lwi_on_signal != NULL) \
+ (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
+ } \
+ } \
+ \
+ if (condition) \
+ break; \
+ if (cfs_signal_pending()) { \
+ if (info->lwi_on_signal != NULL && \
+ (__timeout == 0 || __allow_intr)) { \
+ if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
+ info->lwi_on_signal(info->lwi_cb_data);\
+ ret = -EINTR; \
+ break; \
+ } \
+ /* We have to do this here because some signals */ \
+ /* are not blockable - ie from strace(1). */ \
+ /* In these cases we want to schedule_timeout() */ \
+ /* again, because we don't want that to return */ \
+ /* -EINTR when the RPC actually succeeded. */ \
+ /* the recalc_sigpending() below will deliver the */ \
+ /* signal properly. */ \
+ cfs_clear_sigpending(); \
+ } \
+ } \
+ \
+ cfs_restore_sigs(__blocked); \
+ \
+ set_current_state(TASK_RUNNING); \
+ remove_wait_queue(&wq, &__wait); \
+} while (0)
+
+
+
+#define l_wait_event(wq, condition, info) \
+({ \
+ int __ret; \
+ struct l_wait_info *__info = (info); \
+ \
+ __l_wait_event(wq, condition, __info, \
+ __ret, add_wait_queue); \
+ __ret; \
+})
+
+#define l_wait_event_exclusive(wq, condition, info) \
+({ \
+ int __ret; \
+ struct l_wait_info *__info = (info); \
+ \
+ __l_wait_event(wq, condition, __info, \
+ __ret, add_wait_queue_exclusive); \
+ __ret; \
+})
+
+#define l_wait_event_exclusive_head(wq, condition, info) \
+({ \
+ int __ret; \
+ struct l_wait_info *__info = (info); \
+ \
+ __l_wait_event(wq, condition, __info, \
+ __ret, add_wait_queue_exclusive_head); \
+ __ret; \
+})
+
+#define l_wait_condition(wq, condition) \
+({ \
+ struct l_wait_info lwi = { 0 }; \
+ l_wait_event(wq, condition, &lwi); \
+})
+
+#define l_wait_condition_exclusive(wq, condition) \
+({ \
+ struct l_wait_info lwi = { 0 }; \
+ l_wait_event_exclusive(wq, condition, &lwi); \
+})
+
+#define l_wait_condition_exclusive_head(wq, condition) \
+({ \
+ struct l_wait_info lwi = { 0 }; \
+ l_wait_event_exclusive_head(wq, condition, &lwi); \
+})
+
+#define LIBLUSTRE_CLIENT (0)
+
+/** @} lib */
+
+#endif /* _LUSTRE_LIB_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_linkea.h b/drivers/staging/lustre/lustre/include/lustre_linkea.h
new file mode 100644
index 0000000..5790be9
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_linkea.h
@@ -0,0 +1,57 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2013, Intel Corporation.
+ * Use is subject to license terms.
+ *
+ * Author: di wang <di.wang@intel.com>
+ */
+
+struct linkea_data {
+ /**
+ * Buffer to keep link EA body.
+ */
+ struct lu_buf *ld_buf;
+ /**
+ * The matched header, entry and its lenght in the EA
+ */
+ struct link_ea_header *ld_leh;
+ struct link_ea_entry *ld_lee;
+ int ld_reclen;
+};
+
+int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf);
+int linkea_init(struct linkea_data *ldata);
+void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen,
+ struct lu_name *lname, struct lu_fid *pfid);
+int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname,
+ const struct lu_fid *pfid);
+void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname);
+int linkea_links_find(struct linkea_data *ldata, const struct lu_name *lname,
+ const struct lu_fid *pfid);
+
+#define LINKEA_NEXT_ENTRY(ldata) \
+ (struct link_ea_entry *)((char *)ldata.ld_lee + ldata.ld_reclen)
+
+#define LINKEA_FIRST_ENTRY(ldata) \
+ (struct link_ea_entry *)(ldata.ld_leh + 1)
diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h
new file mode 100644
index 0000000..25f8bfa
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_lite.h
@@ -0,0 +1,147 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LL_H
+#define _LL_H
+
+/** \defgroup lite lite
+ *
+ * @{
+ */
+
+#include <linux/lustre_lite.h>
+
+#include <obd_class.h>
+#include <obd_ost.h>
+#include <lustre_net.h>
+#include <lustre_mds.h>
+#include <lustre_ha.h>
+
+/* 4UL * 1024 * 1024 */
+#define LL_MAX_BLKSIZE_BITS (22)
+#define LL_MAX_BLKSIZE (1UL<<LL_MAX_BLKSIZE_BITS)
+
+#include <lustre/lustre_user.h>
+
+
+struct lustre_rw_params {
+ int lrp_lock_mode;
+ ldlm_policy_data_t lrp_policy;
+ obd_flag lrp_brw_flags;
+ int lrp_ast_flags;
+};
+
+/*
+ * XXX nikita: this function lives in the header because it is used by both
+ * llite kernel module and liblustre library, and there is no (?) better place
+ * to put it in.
+ */
+static inline void lustre_build_lock_params(int cmd, unsigned long open_flags,
+ __u64 connect_flags,
+ loff_t pos, ssize_t len,
+ struct lustre_rw_params *params)
+{
+ params->lrp_lock_mode = (cmd == OBD_BRW_READ) ? LCK_PR : LCK_PW;
+ params->lrp_brw_flags = 0;
+
+ params->lrp_policy.l_extent.start = pos;
+ params->lrp_policy.l_extent.end = pos + len - 1;
+ /*
+ * for now O_APPEND always takes local locks.
+ */
+ if (cmd == OBD_BRW_WRITE && (open_flags & O_APPEND)) {
+ params->lrp_policy.l_extent.start = 0;
+ params->lrp_policy.l_extent.end = OBD_OBJECT_EOF;
+ } else if (LIBLUSTRE_CLIENT && (connect_flags & OBD_CONNECT_SRVLOCK)) {
+ /*
+ * liblustre: OST-side locking for all non-O_APPEND
+ * reads/writes.
+ */
+ params->lrp_lock_mode = LCK_NL;
+ params->lrp_brw_flags = OBD_BRW_SRVLOCK;
+ } else {
+ /*
+ * nothing special for the kernel. In the future llite may use
+ * OST-side locks for small writes into highly contended
+ * files.
+ */
+ }
+ params->lrp_ast_flags = (open_flags & O_NONBLOCK) ?
+ LDLM_FL_BLOCK_NOWAIT : 0;
+}
+
+/*
+ * This is embedded into liblustre and llite super-blocks to keep track of
+ * connect flags (capabilities) supported by all imports given mount is
+ * connected to.
+ */
+struct lustre_client_ocd {
+ /*
+ * This is conjunction of connect_flags across all imports (LOVs) this
+ * mount is connected to. This field is updated by cl_ocd_update()
+ * under ->lco_lock.
+ */
+ __u64 lco_flags;
+ struct mutex lco_lock;
+ struct obd_export *lco_md_exp;
+ struct obd_export *lco_dt_exp;
+};
+
+/*
+ * Chain of hash overflow pages.
+ */
+struct ll_dir_chain {
+ /* XXX something. Later */
+};
+
+static inline void ll_dir_chain_init(struct ll_dir_chain *chain)
+{
+}
+
+static inline void ll_dir_chain_fini(struct ll_dir_chain *chain)
+{
+}
+
+static inline unsigned long hash_x_index(__u64 hash, int hash64)
+{
+ if (BITS_PER_LONG == 32 && hash64)
+ hash >>= 32;
+ return ~0UL - hash;
+}
+
+/** @} lite */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
new file mode 100644
index 0000000..721aa05
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -0,0 +1,568 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_log.h
+ *
+ * Generic infrastructure for managing a collection of logs.
+ * These logs are used for:
+ *
+ * - orphan recovery: OST adds record on create
+ * - mtime/size consistency: the OST adds a record on first write
+ * - open/unlinked objects: OST adds a record on destroy
+ *
+ * - mds unlink log: the MDS adds an entry upon delete
+ *
+ * - raid1 replication log between OST's
+ * - MDS replication logs
+ */
+
+#ifndef _LUSTRE_LOG_H
+#define _LUSTRE_LOG_H
+
+/** \defgroup log log
+ *
+ * @{
+ */
+
+#include <linux/lustre_log.h>
+
+#include <obd_class.h>
+#include <obd_ost.h>
+#include <lustre/lustre_idl.h>
+#include <dt_object.h>
+
+#define LOG_NAME_LIMIT(logname, name) \
+ snprintf(logname, sizeof(logname), "LOGS/%s", name)
+#define LLOG_EEMPTY 4711
+
+enum llog_open_param {
+ LLOG_OPEN_EXISTS = 0x0000,
+ LLOG_OPEN_NEW = 0x0001,
+};
+
+struct plain_handle_data {
+ struct list_head phd_entry;
+ struct llog_handle *phd_cat_handle;
+ struct llog_cookie phd_cookie; /* cookie of this log in its cat */
+};
+
+struct cat_handle_data {
+ struct list_head chd_head;
+ struct llog_handle *chd_current_log; /* currently open log */
+ struct llog_handle *chd_next_log; /* llog to be used next */
+};
+
+static inline void logid_to_fid(struct llog_logid *id, struct lu_fid *fid)
+{
+ /* For compatibility purposes we identify pre-OSD (~< 2.3.51 MDS)
+ * logid's by non-zero ogen (inode generation) and convert them
+ * into IGIF */
+ if (id->lgl_ogen == 0) {
+ fid->f_seq = id->lgl_oi.oi.oi_seq;
+ fid->f_oid = id->lgl_oi.oi.oi_id;
+ fid->f_ver = 0;
+ } else {
+ lu_igif_build(fid, id->lgl_oi.oi.oi_id, id->lgl_ogen);
+ }
+}
+
+static inline void fid_to_logid(struct lu_fid *fid, struct llog_logid *id)
+{
+ id->lgl_oi.oi.oi_seq = fid->f_seq;
+ id->lgl_oi.oi.oi_id = fid->f_oid;
+ id->lgl_ogen = 0;
+}
+
+static inline void logid_set_id(struct llog_logid *log_id, __u64 id)
+{
+ log_id->lgl_oi.oi.oi_id = id;
+}
+
+static inline __u64 logid_id(struct llog_logid *log_id)
+{
+ return log_id->lgl_oi.oi.oi_id;
+}
+
+struct llog_handle;
+
+/* llog.c - general API */
+int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
+ int flags, struct obd_uuid *uuid);
+int llog_copy_handler(const struct lu_env *env, struct llog_handle *llh,
+ struct llog_rec_hdr *rec, void *data);
+int llog_process(const struct lu_env *env, struct llog_handle *loghandle,
+ llog_cb_t cb, void *data, void *catdata);
+int llog_process_or_fork(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ llog_cb_t cb, void *data, void *catdata, bool fork);
+int llog_reverse_process(const struct lu_env *env,
+ struct llog_handle *loghandle, llog_cb_t cb,
+ void *data, void *catdata);
+int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
+ int index);
+int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct llog_handle **lgh, struct llog_logid *logid,
+ char *name, enum llog_open_param open_param);
+int llog_close(const struct lu_env *env, struct llog_handle *cathandle);
+int llog_get_size(struct llog_handle *loghandle);
+
+/* llog_process flags */
+#define LLOG_FLAG_NODEAMON 0x0001
+
+/* llog_cat.c - catalog api */
+struct llog_process_data {
+ /**
+ * Any useful data needed while processing catalog. This is
+ * passed later to process callback.
+ */
+ void *lpd_data;
+ /**
+ * Catalog process callback function, called for each record
+ * in catalog.
+ */
+ llog_cb_t lpd_cb;
+ /**
+ * Start processing the catalog from startcat/startidx
+ */
+ int lpd_startcat;
+ int lpd_startidx;
+};
+
+struct llog_process_cat_data {
+ /**
+ * Temporary stored first_idx while scanning log.
+ */
+ int lpcd_first_idx;
+ /**
+ * Temporary stored last_idx while scanning log.
+ */
+ int lpcd_last_idx;
+};
+
+int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle);
+int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
+ struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
+ void *buf, struct thandle *th);
+int llog_cat_declare_add_rec(const struct lu_env *env,
+ struct llog_handle *cathandle,
+ struct llog_rec_hdr *rec, struct thandle *th);
+int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
+ struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
+ void *buf);
+int llog_cat_cancel_records(const struct lu_env *env,
+ struct llog_handle *cathandle, int count,
+ struct llog_cookie *cookies);
+int llog_cat_process_or_fork(const struct lu_env *env,
+ struct llog_handle *cat_llh, llog_cb_t cb,
+ void *data, int startcat, int startidx, bool fork);
+int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
+ llog_cb_t cb, void *data, int startcat, int startidx);
+int llog_cat_reverse_process(const struct lu_env *env,
+ struct llog_handle *cat_llh, llog_cb_t cb,
+ void *data);
+int llog_cat_init_and_process(const struct lu_env *env,
+ struct llog_handle *llh);
+
+/* llog_obd.c */
+int llog_setup(const struct lu_env *env, struct obd_device *obd,
+ struct obd_llog_group *olg, int index,
+ struct obd_device *disk_obd, struct llog_operations *op);
+int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt);
+int llog_cleanup(const struct lu_env *env, struct llog_ctxt *);
+int llog_sync(struct llog_ctxt *ctxt, struct obd_export *exp, int flags);
+int llog_obd_add(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct llog_rec_hdr *rec, struct lov_stripe_md *lsm,
+ struct llog_cookie *logcookies, int numcookies);
+int llog_cancel(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct lov_stripe_md *lsm, int count,
+ struct llog_cookie *cookies, int flags);
+
+int obd_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *disk_obd, int *idx);
+
+int obd_llog_finish(struct obd_device *obd, int count);
+
+/* llog_ioctl.c */
+int llog_ioctl(const struct lu_env *env, struct llog_ctxt *ctxt, int cmd,
+ struct obd_ioctl_data *data);
+
+/* llog_net.c */
+int llog_initiator_connect(struct llog_ctxt *ctxt);
+
+struct llog_operations {
+ int (*lop_destroy)(const struct lu_env *env,
+ struct llog_handle *handle);
+ int (*lop_next_block)(const struct lu_env *env, struct llog_handle *h,
+ int *curr_idx, int next_idx, __u64 *offset,
+ void *buf, int len);
+ int (*lop_prev_block)(const struct lu_env *env, struct llog_handle *h,
+ int prev_idx, void *buf, int len);
+ int (*lop_read_header)(const struct lu_env *env,
+ struct llog_handle *handle);
+ int (*lop_setup)(const struct lu_env *env, struct obd_device *obd,
+ struct obd_llog_group *olg, int ctxt_idx,
+ struct obd_device *disk_obd);
+ int (*lop_sync)(struct llog_ctxt *ctxt, struct obd_export *exp,
+ int flags);
+ int (*lop_cleanup)(const struct lu_env *env, struct llog_ctxt *ctxt);
+ int (*lop_cancel)(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct lov_stripe_md *lsm, int count,
+ struct llog_cookie *cookies, int flags);
+ int (*lop_connect)(struct llog_ctxt *ctxt, struct llog_logid *logid,
+ struct llog_gen *gen, struct obd_uuid *uuid);
+ /**
+ * Any llog file must be opened first using llog_open(). Llog can be
+ * opened by name, logid or without both, in last case the new logid
+ * will be generated.
+ */
+ int (*lop_open)(const struct lu_env *env, struct llog_handle *lgh,
+ struct llog_logid *logid, char *name,
+ enum llog_open_param);
+ /**
+ * Opened llog may not exist and this must be checked where needed using
+ * the llog_exist() call.
+ */
+ int (*lop_exist)(struct llog_handle *lgh);
+ /**
+ * Close llog file and calls llog_free_handle() implicitly.
+ * Any opened llog must be closed by llog_close() call.
+ */
+ int (*lop_close)(const struct lu_env *env, struct llog_handle *handle);
+ /**
+ * Create new llog file. The llog must be opened.
+ * Must be used only for local llog operations.
+ */
+ int (*lop_declare_create)(const struct lu_env *env,
+ struct llog_handle *handle,
+ struct thandle *th);
+ int (*lop_create)(const struct lu_env *env, struct llog_handle *handle,
+ struct thandle *th);
+ /**
+ * write new record in llog. It appends records usually but can edit
+ * existing records too.
+ */
+ int (*lop_declare_write_rec)(const struct lu_env *env,
+ struct llog_handle *lgh,
+ struct llog_rec_hdr *rec,
+ int idx, struct thandle *th);
+ int (*lop_write_rec)(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ struct llog_rec_hdr *rec,
+ struct llog_cookie *cookie, int cookiecount,
+ void *buf, int idx, struct thandle *th);
+ /**
+ * Add new record in llog catalog. Does the same as llog_write_rec()
+ * but using llog catalog.
+ */
+ int (*lop_declare_add)(const struct lu_env *env,
+ struct llog_handle *lgh,
+ struct llog_rec_hdr *rec, struct thandle *th);
+ int (*lop_add)(const struct lu_env *env, struct llog_handle *lgh,
+ struct llog_rec_hdr *rec, struct llog_cookie *cookie,
+ void *buf, struct thandle *th);
+ /* Old llog_add version, used in MDS-LOV-OSC now and will gone with
+ * LOD/OSP replacement */
+ int (*lop_obd_add)(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct llog_rec_hdr *rec, struct lov_stripe_md *lsm,
+ struct llog_cookie *logcookies, int numcookies);
+};
+
+/* In-memory descriptor for a log object or log catalog */
+struct llog_handle {
+ struct rw_semaphore lgh_lock;
+ spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */
+ struct llog_logid lgh_id; /* id of this log */
+ struct llog_log_hdr *lgh_hdr;
+ struct file *lgh_file;
+ struct dt_object *lgh_obj;
+ int lgh_last_idx;
+ int lgh_cur_idx; /* used during llog_process */
+ __u64 lgh_cur_offset; /* used during llog_process */
+ struct llog_ctxt *lgh_ctxt;
+ union {
+ struct plain_handle_data phd;
+ struct cat_handle_data chd;
+ } u;
+ char *lgh_name;
+ void *private_data;
+ struct llog_operations *lgh_logops;
+ atomic_t lgh_refcount;
+};
+
+/* llog_lvfs.c */
+extern struct llog_operations llog_lvfs_ops;
+
+/* llog_osd.c */
+extern struct llog_operations llog_osd_ops;
+int llog_osd_get_cat_list(const struct lu_env *env, struct dt_device *d,
+ int idx, int count,
+ struct llog_catid *idarray);
+int llog_osd_put_cat_list(const struct lu_env *env, struct dt_device *d,
+ int idx, int count,
+ struct llog_catid *idarray);
+
+#define LLOG_CTXT_FLAG_UNINITIALIZED 0x00000001
+#define LLOG_CTXT_FLAG_STOP 0x00000002
+
+struct llog_ctxt {
+ int loc_idx; /* my index the obd array of ctxt's */
+ struct obd_device *loc_obd; /* points back to the containing obd*/
+ struct obd_llog_group *loc_olg; /* group containing that ctxt */
+ struct obd_export *loc_exp; /* parent "disk" export (e.g. MDS) */
+ struct obd_import *loc_imp; /* to use in RPC's: can be backward
+ pointing import */
+ struct llog_operations *loc_logops;
+ struct llog_handle *loc_handle;
+ struct mutex loc_mutex; /* protect loc_imp */
+ atomic_t loc_refcount;
+ long loc_flags; /* flags, see above defines */
+ struct dt_object *loc_dir;
+};
+
+#define LLOG_PROC_BREAK 0x0001
+#define LLOG_DEL_RECORD 0x0002
+
+static inline int llog_obd2ops(struct llog_ctxt *ctxt,
+ struct llog_operations **lop)
+{
+ if (ctxt == NULL)
+ return -ENOTCONN;
+
+ *lop = ctxt->loc_logops;
+ if (*lop == NULL)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static inline int llog_handle2ops(struct llog_handle *loghandle,
+ struct llog_operations **lop)
+{
+ if (loghandle == NULL || loghandle->lgh_logops == NULL)
+ return -EINVAL;
+
+ *lop = loghandle->lgh_logops;
+ return 0;
+}
+
+static inline int llog_data_len(int len)
+{
+ return cfs_size_round(len);
+}
+
+static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
+{
+ atomic_inc(&ctxt->loc_refcount);
+ CDEBUG(D_INFO, "GETting ctxt %p : new refcount %d\n", ctxt,
+ atomic_read(&ctxt->loc_refcount));
+ return ctxt;
+}
+
+static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
+{
+ if (ctxt == NULL)
+ return;
+ LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON);
+ CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
+ atomic_read(&ctxt->loc_refcount) - 1);
+ __llog_ctxt_put(NULL, ctxt);
+}
+
+static inline void llog_group_init(struct obd_llog_group *olg, int group)
+{
+ init_waitqueue_head(&olg->olg_waitq);
+ spin_lock_init(&olg->olg_lock);
+ mutex_init(&olg->olg_cat_processing);
+ olg->olg_seq = group;
+}
+
+static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
+ struct llog_ctxt *ctxt, int index)
+{
+ LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
+
+ spin_lock(&olg->olg_lock);
+ if (olg->olg_ctxts[index] != NULL) {
+ spin_unlock(&olg->olg_lock);
+ return -EEXIST;
+ }
+ olg->olg_ctxts[index] = ctxt;
+ spin_unlock(&olg->olg_lock);
+ return 0;
+}
+
+static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg,
+ int index)
+{
+ struct llog_ctxt *ctxt;
+
+ LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
+
+ spin_lock(&olg->olg_lock);
+ if (olg->olg_ctxts[index] == NULL)
+ ctxt = NULL;
+ else
+ ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
+ spin_unlock(&olg->olg_lock);
+ return ctxt;
+}
+
+static inline void llog_group_clear_ctxt(struct obd_llog_group *olg, int index)
+{
+ LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
+ spin_lock(&olg->olg_lock);
+ olg->olg_ctxts[index] = NULL;
+ spin_unlock(&olg->olg_lock);
+}
+
+static inline struct llog_ctxt *llog_get_context(struct obd_device *obd,
+ int index)
+{
+ return llog_group_get_ctxt(&obd->obd_olg, index);
+}
+
+static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index)
+{
+ return (olg->olg_ctxts[index] == NULL);
+}
+
+static inline int llog_ctxt_null(struct obd_device *obd, int index)
+{
+ return (llog_group_ctxt_null(&obd->obd_olg, index));
+}
+
+static inline int llog_destroy(const struct lu_env *env,
+ struct llog_handle *handle)
+{
+ struct llog_operations *lop;
+ int rc;
+
+ rc = llog_handle2ops(handle, &lop);
+ if (rc)
+ return rc;
+ if (lop->lop_destroy == NULL)
+ return -EOPNOTSUPP;
+
+ rc = lop->lop_destroy(env, handle);
+ return rc;
+}
+
+static inline int llog_next_block(const struct lu_env *env,
+ struct llog_handle *loghandle, int *cur_idx,
+ int next_idx, __u64 *cur_offset, void *buf,
+ int len)
+{
+ struct llog_operations *lop;
+ int rc;
+
+ rc = llog_handle2ops(loghandle, &lop);
+ if (rc)
+ return rc;
+ if (lop->lop_next_block == NULL)
+ return -EOPNOTSUPP;
+
+ rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx,
+ cur_offset, buf, len);
+ return rc;
+}
+
+static inline int llog_prev_block(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ int prev_idx, void *buf, int len)
+{
+ struct llog_operations *lop;
+ int rc;
+
+ rc = llog_handle2ops(loghandle, &lop);
+ if (rc)
+ return rc;
+ if (lop->lop_prev_block == NULL)
+ return -EOPNOTSUPP;
+
+ rc = lop->lop_prev_block(env, loghandle, prev_idx, buf, len);
+ return rc;
+}
+
+static inline int llog_connect(struct llog_ctxt *ctxt,
+ struct llog_logid *logid, struct llog_gen *gen,
+ struct obd_uuid *uuid)
+{
+ struct llog_operations *lop;
+ int rc;
+
+ rc = llog_obd2ops(ctxt, &lop);
+ if (rc)
+ return rc;
+ if (lop->lop_connect == NULL)
+ return -EOPNOTSUPP;
+
+ rc = lop->lop_connect(ctxt, logid, gen, uuid);
+ return rc;
+}
+
+/* llog.c */
+int llog_exist(struct llog_handle *loghandle);
+int llog_declare_create(const struct lu_env *env,
+ struct llog_handle *loghandle, struct thandle *th);
+int llog_create(const struct lu_env *env, struct llog_handle *handle,
+ struct thandle *th);
+int llog_declare_write_rec(const struct lu_env *env,
+ struct llog_handle *handle,
+ struct llog_rec_hdr *rec, int idx,
+ struct thandle *th);
+int llog_write_rec(const struct lu_env *env, struct llog_handle *handle,
+ struct llog_rec_hdr *rec, struct llog_cookie *logcookies,
+ int numcookies, void *buf, int idx, struct thandle *th);
+int llog_add(const struct lu_env *env, struct llog_handle *lgh,
+ struct llog_rec_hdr *rec, struct llog_cookie *logcookies,
+ void *buf, struct thandle *th);
+int llog_declare_add(const struct lu_env *env, struct llog_handle *lgh,
+ struct llog_rec_hdr *rec, struct thandle *th);
+int lustre_process_log(struct super_block *sb, char *logname,
+ struct config_llog_instance *cfg);
+int lustre_end_log(struct super_block *sb, char *logname,
+ struct config_llog_instance *cfg);
+int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct llog_handle **res, struct llog_logid *logid,
+ char *name);
+int llog_erase(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct llog_logid *logid, char *name);
+int llog_write(const struct lu_env *env, struct llog_handle *loghandle,
+ struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
+ int cookiecount, void *buf, int idx);
+
+/** @} log */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
new file mode 100644
index 0000000..1900025
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -0,0 +1,174 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_mdc.h
+ *
+ * MDS data structures.
+ * See also lustre_idl.h for wire formats of requests.
+ */
+
+#ifndef _LUSTRE_MDC_H
+#define _LUSTRE_MDC_H
+
+/** \defgroup mdc mdc
+ *
+ * @{
+ */
+
+# include <linux/fs.h>
+# include <linux/dcache.h>
+# ifdef CONFIG_FS_POSIX_ACL
+# include <linux/posix_acl_xattr.h>
+# endif /* CONFIG_FS_POSIX_ACL */
+# include <linux/lustre_intent.h>
+#include <lustre_handles.h>
+#include <linux/libcfs/libcfs.h>
+#include <obd_class.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_lib.h>
+#include <lustre_dlm.h>
+#include <lustre_export.h>
+
+struct ptlrpc_client;
+struct obd_export;
+struct ptlrpc_request;
+struct obd_device;
+
+struct mdc_rpc_lock {
+ struct mutex rpcl_mutex;
+ struct lookup_intent *rpcl_it;
+ int rpcl_fakes;
+};
+
+#define MDC_FAKE_RPCL_IT ((void *)0x2c0012bfUL)
+
+static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
+{
+ mutex_init(&lck->rpcl_mutex);
+ lck->rpcl_it = NULL;
+}
+
+static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
+ struct lookup_intent *it)
+{
+ if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT))
+ return;
+
+ /* This would normally block until the existing request finishes.
+ * If fail_loc is set it will block until the regular request is
+ * done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set
+ * it will only be cleared when all fake requests are finished.
+ * Only when all fake requests are finished can normal requests
+ * be sent, to ensure they are recoverable again. */
+ again:
+ mutex_lock(&lck->rpcl_mutex);
+
+ if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM)) {
+ lck->rpcl_it = MDC_FAKE_RPCL_IT;
+ lck->rpcl_fakes++;
+ mutex_unlock(&lck->rpcl_mutex);
+ return;
+ }
+
+ /* This will only happen when the CFS_FAIL_CHECK() was
+ * just turned off but there are still requests in progress.
+ * Wait until they finish. It doesn't need to be efficient
+ * in this extremely rare case, just have low overhead in
+ * the common case when it isn't true. */
+ while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
+ mutex_unlock(&lck->rpcl_mutex);
+ schedule_timeout(cfs_time_seconds(1) / 4);
+ goto again;
+ }
+
+ LASSERT(lck->rpcl_it == NULL);
+ lck->rpcl_it = it;
+}
+
+static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
+ struct lookup_intent *it)
+{
+ if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT))
+ return;
+
+ if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
+ mutex_lock(&lck->rpcl_mutex);
+
+ LASSERTF(lck->rpcl_fakes > 0, "%d\n", lck->rpcl_fakes);
+ lck->rpcl_fakes--;
+
+ if (lck->rpcl_fakes == 0)
+ lck->rpcl_it = NULL;
+
+ } else {
+ LASSERTF(it == lck->rpcl_it, "%p != %p\n", it, lck->rpcl_it);
+ lck->rpcl_it = NULL;
+ }
+
+ mutex_unlock(&lck->rpcl_mutex);
+}
+
+static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
+ struct mdt_body *body)
+{
+ if (body->valid & OBD_MD_FLMODEASIZE) {
+ if (exp->exp_obd->u.cli.cl_max_mds_easize < body->max_mdsize)
+ exp->exp_obd->u.cli.cl_max_mds_easize =
+ body->max_mdsize;
+ if (exp->exp_obd->u.cli.cl_max_mds_cookiesize <
+ body->max_cookiesize)
+ exp->exp_obd->u.cli.cl_max_mds_cookiesize =
+ body->max_cookiesize;
+ }
+}
+
+
+struct mdc_cache_waiter {
+ struct list_head mcw_entry;
+ wait_queue_head_t mcw_waitq;
+};
+
+/* mdc/mdc_locks.c */
+int it_disposition(struct lookup_intent *it, int flag);
+void it_clear_disposition(struct lookup_intent *it, int flag);
+void it_set_disposition(struct lookup_intent *it, int flag);
+int it_open_error(int phase, struct lookup_intent *it);
+
+/** @} mdc */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_mds.h b/drivers/staging/lustre/lustre/include/lustre_mds.h
new file mode 100644
index 0000000..b386f87
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_mds.h
@@ -0,0 +1,81 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_mds.h
+ *
+ * MDS data structures.
+ * See also lustre_idl.h for wire formats of requests.
+ */
+
+#ifndef _LUSTRE_MDS_H
+#define _LUSTRE_MDS_H
+
+/** \defgroup mds mds
+ *
+ * @{
+ */
+
+#include <lustre_handles.h>
+#include <linux/libcfs/libcfs.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_lib.h>
+#include <lustre_dlm.h>
+#include <lustre_export.h>
+
+struct mds_group_info {
+ struct obd_uuid *uuid;
+ int group;
+};
+
+struct mds_capa_info {
+ struct obd_uuid *uuid;
+ struct lustre_capa_key *capa;
+};
+
+#define MDD_OBD_NAME "mdd_obd"
+#define MDD_OBD_UUID "mdd_obd_uuid"
+
+static inline int md_should_create(__u64 flags)
+{
+ return !(flags & MDS_OPEN_DELAY_CREATE ||
+ !(flags & FMODE_WRITE));
+}
+
+/* these are local flags, used only on the client, private */
+#define M_CHECK_STALE 0200000000
+
+/** @} mds */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
new file mode 100644
index 0000000..e947002
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -0,0 +1,3488 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+/** \defgroup PtlRPC Portal RPC and networking module.
+ *
+ * PortalRPC is the layer used by rest of lustre code to achieve network
+ * communications: establish connections with corresponding export and import
+ * states, listen for a service, send and receive RPCs.
+ * PortalRPC also includes base recovery framework: packet resending and
+ * replaying, reconnections, pinger.
+ *
+ * PortalRPC utilizes LNet as its transport layer.
+ *
+ * @{
+ */
+
+
+#ifndef _LUSTRE_NET_H
+#define _LUSTRE_NET_H
+
+/** \defgroup net net
+ *
+ * @{
+ */
+
+#include <linux/lustre_net.h>
+
+#include <linux/libcfs/libcfs.h>
+// #include <obd.h>
+#include <linux/lnet/lnet.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_ha.h>
+#include <lustre_sec.h>
+#include <lustre_import.h>
+#include <lprocfs_status.h>
+#include <lu_object.h>
+#include <lustre_req_layout.h>
+
+#include <obd_support.h>
+#include <lustre_ver.h>
+
+/* MD flags we _always_ use */
+#define PTLRPC_MD_OPTIONS 0
+
+/**
+ * Max # of bulk operations in one request.
+ * In order for the client and server to properly negotiate the maximum
+ * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
+ * value. The client is free to limit the actual RPC size for any bulk
+ * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
+#define PTLRPC_BULK_OPS_BITS 2
+#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
+/**
+ * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
+ * should not be used on the server at all. Otherwise, it imposes a
+ * protocol limitation on the maximum RPC size that can be used by any
+ * RPC sent to that server in the future. Instead, the server should
+ * use the negotiated per-client ocd_brw_size to determine the bulk
+ * RPC count. */
+#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
+
+/**
+ * Define maxima for bulk I/O.
+ *
+ * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
+ * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
+ * currently supported maximum between peers at connect via ocd_brw_size.
+ */
+#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
+#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
+#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+
+#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
+#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
+#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
+#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
+
+/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
+# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
+# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
+# endif
+# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
+# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
+# endif
+# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
+# error "PTLRPC_MAX_BRW_SIZE too big"
+# endif
+# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
+# error "PTLRPC_MAX_BRW_PAGES too big"
+# endif
+
+#define PTLRPC_NTHRS_INIT 2
+
+/**
+ * Buffer Constants
+ *
+ * Constants determine how memory is used to buffer incoming service requests.
+ *
+ * ?_NBUFS # buffers to allocate when growing the pool
+ * ?_BUFSIZE # bytes in a single request buffer
+ * ?_MAXREQSIZE # maximum request service will receive
+ *
+ * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
+ * of ?_NBUFS is added to the pool.
+ *
+ * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
+ * considered full when less than ?_MAXREQSIZE is left in them.
+ */
+/**
+ * Thread Constants
+ *
+ * Constants determine how threads are created for ptlrpc service.
+ *
+ * ?_NTHRS_INIT # threads to create for each service partition on
+ * initializing. If it's non-affinity service and
+ * there is only one partition, it's the overall #
+ * threads for the service while initializing.
+ * ?_NTHRS_BASE # threads should be created at least for each
+ * ptlrpc partition to keep the service healthy.
+ * It's the low-water mark of threads upper-limit
+ * for each partition.
+ * ?_THR_FACTOR # threads can be added on threads upper-limit for
+ * each CPU core. This factor is only for reference,
+ * we might decrease value of factor if number of cores
+ * per CPT is above a limit.
+ * ?_NTHRS_MAX # overall threads can be created for a service,
+ * it's a soft limit because if service is running
+ * on machine with hundreds of cores and tens of
+ * CPU partitions, we need to guarantee each partition
+ * has ?_NTHRS_BASE threads, which means total threads
+ * will be ?_NTHRS_BASE * number_of_cpts which can
+ * exceed ?_NTHRS_MAX.
+ *
+ * Examples
+ *
+ * #define MDS_NTHRS_INIT 2
+ * #define MDS_NTHRS_BASE 64
+ * #define MDS_NTHRS_FACTOR 8
+ * #define MDS_NTHRS_MAX 1024
+ *
+ * Example 1):
+ * ---------------------------------------------------------------------
+ * Server(A) has 16 cores, user configured it to 4 partitions so each
+ * partition has 4 cores, then actual number of service threads on each
+ * partition is:
+ * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
+ *
+ * Total number of threads for the service is:
+ * 96 * partitions(4) = 384
+ *
+ * Example 2):
+ * ---------------------------------------------------------------------
+ * Server(B) has 32 cores, user configured it to 4 partitions so each
+ * partition has 8 cores, then actual number of service threads on each
+ * partition is:
+ * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
+ *
+ * Total number of threads for the service is:
+ * 128 * partitions(4) = 512
+ *
+ * Example 3):
+ * ---------------------------------------------------------------------
+ * Server(B) has 96 cores, user configured it to 8 partitions so each
+ * partition has 12 cores, then actual number of service threads on each
+ * partition is:
+ * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
+ *
+ * Total number of threads for the service is:
+ * 160 * partitions(8) = 1280
+ *
+ * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
+ * as upper limit of threads number for each partition:
+ * MDS_NTHRS_MAX(1024) / partitions(8) = 128
+ *
+ * Example 4):
+ * ---------------------------------------------------------------------
+ * Server(C) have a thousand of cores and user configured it to 32 partitions
+ * MDS_NTHRS_BASE(64) * 32 = 2048
+ *
+ * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
+ * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
+ * to keep service healthy, so total number of threads will just be 2048.
+ *
+ * NB: we don't suggest to choose server with that many cores because backend
+ * filesystem itself, buffer cache, or underlying network stack might
+ * have some SMP scalability issues at that large scale.
+ *
+ * If user already has a fat machine with hundreds or thousands of cores,
+ * there are two choices for configuration:
+ * a) create CPU table from subset of all CPUs and run Lustre on
+ * top of this subset
+ * b) bind service threads on a few partitions, see modparameters of
+ * MDS and OSS for details
+*
+ * NB: these calculations (and examples below) are simplified to help
+ * understanding, the real implementation is a little more complex,
+ * please see ptlrpc_server_nthreads_check() for details.
+ *
+ */
+
+ /*
+ * LDLM threads constants:
+ *
+ * Given 8 as factor and 24 as base threads number
+ *
+ * example 1)
+ * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
+ *
+ * example 2)
+ * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
+ * threads for each partition and total threads number will be 112.
+ *
+ * example 3)
+ * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
+ * threads for each partition to keep service healthy, so total threads
+ * number should be 24 * 8 = 192.
+ *
+ * So with these constants, threads number will be at the similar level
+ * of old versions, unless target machine has over a hundred cores
+ */
+#define LDLM_THR_FACTOR 8
+#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
+#define LDLM_NTHRS_BASE 24
+#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
+
+#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
+#define LDLM_CLIENT_NBUFS 1
+#define LDLM_SERVER_NBUFS 64
+#define LDLM_BUFSIZE (8 * 1024)
+#define LDLM_MAXREQSIZE (5 * 1024)
+#define LDLM_MAXREPSIZE (1024)
+
+ /*
+ * MDS threads constants:
+ *
+ * Please see examples in "Thread Constants", MDS threads number will be at
+ * the comparable level of old versions, unless the server has many cores.
+ */
+#ifndef MDS_MAX_THREADS
+#define MDS_MAX_THREADS 1024
+#define MDS_MAX_OTHR_THREADS 256
+
+#else /* MDS_MAX_THREADS */
+#if MDS_MAX_THREADS < PTLRPC_NTHRS_INIT
+#undef MDS_MAX_THREADS
+#define MDS_MAX_THREADS PTLRPC_NTHRS_INIT
+#endif
+#define MDS_MAX_OTHR_THREADS max(PTLRPC_NTHRS_INIT, MDS_MAX_THREADS / 2)
+#endif
+
+/* default service */
+#define MDS_THR_FACTOR 8
+#define MDS_NTHRS_INIT PTLRPC_NTHRS_INIT
+#define MDS_NTHRS_MAX MDS_MAX_THREADS
+#define MDS_NTHRS_BASE min(64, MDS_NTHRS_MAX)
+
+/* read-page service */
+#define MDS_RDPG_THR_FACTOR 4
+#define MDS_RDPG_NTHRS_INIT PTLRPC_NTHRS_INIT
+#define MDS_RDPG_NTHRS_MAX MDS_MAX_OTHR_THREADS
+#define MDS_RDPG_NTHRS_BASE min(48, MDS_RDPG_NTHRS_MAX)
+
+/* these should be removed when we remove setattr service in the future */
+#define MDS_SETA_THR_FACTOR 4
+#define MDS_SETA_NTHRS_INIT PTLRPC_NTHRS_INIT
+#define MDS_SETA_NTHRS_MAX MDS_MAX_OTHR_THREADS
+#define MDS_SETA_NTHRS_BASE min(48, MDS_SETA_NTHRS_MAX)
+
+/* non-affinity threads */
+#define MDS_OTHR_NTHRS_INIT PTLRPC_NTHRS_INIT
+#define MDS_OTHR_NTHRS_MAX MDS_MAX_OTHR_THREADS
+
+#define MDS_NBUFS 64
+
+/**
+ * Assume file name length = FNAME_MAX = 256 (true for ext3).
+ * path name length = PATH_MAX = 4096
+ * LOV MD size max = EA_MAX = 24 * 2000
+ * (NB: 24 is size of lov_ost_data)
+ * LOV LOGCOOKIE size max = 32 * 2000
+ * (NB: 32 is size of llog_cookie)
+ * symlink: FNAME_MAX + PATH_MAX <- largest
+ * link: FNAME_MAX + PATH_MAX (mds_rec_link < mds_rec_create)
+ * rename: FNAME_MAX + FNAME_MAX
+ * open: FNAME_MAX + EA_MAX
+ *
+ * MDS_MAXREQSIZE ~= 4736 bytes =
+ * lustre_msg + ldlm_request + mdt_body + mds_rec_create + FNAME_MAX + PATH_MAX
+ * MDS_MAXREPSIZE ~= 8300 bytes = lustre_msg + llog_header
+ *
+ * Realistic size is about 512 bytes (20 character name + 128 char symlink),
+ * except in the open case where there are a large number of OSTs in a LOV.
+ */
+#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
+#define MDS_MAXREPSIZE (9 * 1024) /* >= 8300 */
+
+/**
+ * MDS incoming request with LOV EA
+ * 24 = sizeof(struct lov_ost_data), i.e: replay of opencreate
+ */
+#define MDS_LOV_MAXREQSIZE max(MDS_MAXREQSIZE, \
+ 362 + LOV_MAX_STRIPE_COUNT * 24)
+/**
+ * MDS outgoing reply with LOV EA
+ *
+ * NB: max reply size Lustre 2.4+ client can get from old MDS is:
+ * LOV_MAX_STRIPE_COUNT * (llog_cookie + lov_ost_data) + extra bytes
+ *
+ * but 2.4 or later MDS will never send reply with llog_cookie to any
+ * version client. This macro is defined for server side reply buffer size.
+ */
+#define MDS_LOV_MAXREPSIZE MDS_LOV_MAXREQSIZE
+
+/**
+ * This is the size of a maximum REINT_SETXATTR request:
+ *
+ * lustre_msg 56 (32 + 4 x 5 + 4)
+ * ptlrpc_body 184
+ * mdt_rec_setxattr 136
+ * lustre_capa 120
+ * name 256 (XATTR_NAME_MAX)
+ * value 65536 (XATTR_SIZE_MAX)
+ */
+#define MDS_EA_MAXREQSIZE 66288
+
+/**
+ * These are the maximum request and reply sizes (rounded up to 1 KB
+ * boundaries) for the "regular" MDS_REQUEST_PORTAL and MDS_REPLY_PORTAL.
+ */
+#define MDS_REG_MAXREQSIZE (((max(MDS_EA_MAXREQSIZE, \
+ MDS_LOV_MAXREQSIZE) + 1023) >> 10) << 10)
+#define MDS_REG_MAXREPSIZE MDS_REG_MAXREQSIZE
+
+/**
+ * The update request includes all of updates from the create, which might
+ * include linkea (4K maxim), together with other updates, we set it to 9K:
+ * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K)
+ */
+#define MDS_OUT_MAXREQSIZE (9 * 1024)
+#define MDS_OUT_MAXREPSIZE MDS_MAXREPSIZE
+
+/** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */
+#define MDS_BUFSIZE max(MDS_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
+ 8 * 1024)
+
+/**
+ * MDS_REG_BUFSIZE should at least be MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD.
+ * However, we need to allocate a much larger buffer for it because LNet
+ * requires each MD(rqbd) has at least MDS_REQ_MAXREQSIZE bytes left to avoid
+ * dropping of maximum-sized incoming request. So if MDS_REG_BUFSIZE is only a
+ * little larger than MDS_REG_MAXREQSIZE, then it can only fit in one request
+ * even there are about MDS_REG_MAX_REQSIZE bytes left in a rqbd, and memory
+ * utilization is very low.
+ *
+ * In the meanwhile, size of rqbd can't be too large, because rqbd can't be
+ * reused until all requests fit in it have been processed and released,
+ * which means one long blocked request can prevent the rqbd be reused.
+ * Now we set request buffer size to 160 KB, so even each rqbd is unlinked
+ * from LNet with unused 65 KB, buffer utilization will be about 59%.
+ * Please check LU-2432 for details.
+ */
+#define MDS_REG_BUFSIZE max(MDS_REG_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
+ 160 * 1024)
+
+/**
+ * MDS_OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is
+ * about 10K, for the same reason as MDS_REG_BUFSIZE, we also give some
+ * extra bytes to each request buffer to improve buffer utilization rate.
+ */
+#define MDS_OUT_BUFSIZE max(MDS_OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \
+ 24 * 1024)
+
+/** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */
+#define FLD_MAXREQSIZE (160)
+
+/** FLD_MAXREPSIZE == lustre_msg + ptlrpc_body */
+#define FLD_MAXREPSIZE (152)
+#define FLD_BUFSIZE (1 << 12)
+
+/**
+ * SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range +
+ * __u32 padding */
+#define SEQ_MAXREQSIZE (160)
+
+/** SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */
+#define SEQ_MAXREPSIZE (152)
+#define SEQ_BUFSIZE (1 << 12)
+
+/** MGS threads must be >= 3, see bug 22458 comment #28 */
+#define MGS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
+#define MGS_NTHRS_MAX 32
+
+#define MGS_NBUFS 64
+#define MGS_BUFSIZE (8 * 1024)
+#define MGS_MAXREQSIZE (7 * 1024)
+#define MGS_MAXREPSIZE (9 * 1024)
+
+ /*
+ * OSS threads constants:
+ *
+ * Given 8 as factor and 64 as base threads number
+ *
+ * example 1):
+ * On 8-core server configured to 2 partitions, we will have
+ * 64 + 8 * 4 = 96 threads for each partition, 192 total threads.
+ *
+ * example 2):
+ * On 32-core machine configured to 4 partitions, we will have
+ * 64 + 8 * 8 = 112 threads for each partition, so total threads number
+ * will be 112 * 4 = 448.
+ *
+ * example 3):
+ * On 64-core machine configured to 4 partitions, we will have
+ * 64 + 16 * 8 = 192 threads for each partition, so total threads number
+ * will be 192 * 4 = 768 which is above limit OSS_NTHRS_MAX(512), so we
+ * cut off the value to OSS_NTHRS_MAX(512) / 4 which is 128 threads
+ * for each partition.
+ *
+ * So we can see that with these constants, threads number wil be at the
+ * similar level of old versions, unless the server has many cores.
+ */
+ /* depress threads factor for VM with small memory size */
+#define OSS_THR_FACTOR min_t(int, 8, \
+ NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT))
+#define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1)
+#define OSS_NTHRS_BASE 64
+#define OSS_NTHRS_MAX 512
+
+/* threads for handling "create" request */
+#define OSS_CR_THR_FACTOR 1
+#define OSS_CR_NTHRS_INIT PTLRPC_NTHRS_INIT
+#define OSS_CR_NTHRS_BASE 8
+#define OSS_CR_NTHRS_MAX 64
+
+/**
+ * OST_IO_MAXREQSIZE ~=
+ * lustre_msg + ptlrpc_body + obdo + obd_ioobj +
+ * DT_MAX_BRW_PAGES * niobuf_remote
+ *
+ * - single object with 16 pages is 512 bytes
+ * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover
+ * - Must be a multiple of 1024
+ * - actual size is about 18K
+ */
+#define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \
+ sizeof(struct ptlrpc_body) + \
+ sizeof(struct obdo) + \
+ sizeof(struct obd_ioobj) + \
+ sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES)
+/**
+ * FIEMAP request can be 4K+ for now
+ */
+#define OST_MAXREQSIZE (5 * 1024)
+#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \
+ (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1))
+
+#define OST_MAXREPSIZE (9 * 1024)
+#define OST_IO_MAXREPSIZE OST_MAXREPSIZE
+
+#define OST_NBUFS 64
+/** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */
+#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024)
+/**
+ * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization
+ * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details.
+ */
+#define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024)
+
+/* Macro to hide a typecast. */
+#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
+
+/**
+ * Structure to single define portal connection.
+ */
+struct ptlrpc_connection {
+ /** linkage for connections hash table */
+ struct hlist_node c_hash;
+ /** Our own lnet nid for this connection */
+ lnet_nid_t c_self;
+ /** Remote side nid for this connection */
+ lnet_process_id_t c_peer;
+ /** UUID of the other side */
+ struct obd_uuid c_remote_uuid;
+ /** reference counter for this connection */
+ atomic_t c_refcount;
+};
+
+/** Client definition for PortalRPC */
+struct ptlrpc_client {
+ /** What lnet portal does this client send messages to by default */
+ __u32 cli_request_portal;
+ /** What portal do we expect replies on */
+ __u32 cli_reply_portal;
+ /** Name of the client */
+ char *cli_name;
+};
+
+/** state flags of requests */
+/* XXX only ones left are those used by the bulk descs as well! */
+#define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
+#define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
+
+#define REQ_MAX_ACK_LOCKS 8
+
+union ptlrpc_async_args {
+ /**
+ * Scratchpad for passing args to completion interpreter. Users
+ * cast to the struct of their choosing, and CLASSERT that this is
+ * big enough. For _tons_ of context, OBD_ALLOC a struct and store
+ * a pointer to it here. The pointer_arg ensures this struct is at
+ * least big enough for that.
+ */
+ void *pointer_arg[11];
+ __u64 space[7];
+};
+
+struct ptlrpc_request_set;
+typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
+typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
+
+/**
+ * Definition of request set structure.
+ * Request set is a list of requests (not necessary to the same target) that
+ * once populated with RPCs could be sent in parallel.
+ * There are two kinds of request sets. General purpose and with dedicated
+ * serving thread. Example of the latter is ptlrpcd set.
+ * For general purpose sets once request set started sending it is impossible
+ * to add new requests to such set.
+ * Provides a way to call "completion callbacks" when all requests in the set
+ * returned.
+ */
+struct ptlrpc_request_set {
+ atomic_t set_refcount;
+ /** number of in queue requests */
+ atomic_t set_new_count;
+ /** number of uncompleted requests */
+ atomic_t set_remaining;
+ /** wait queue to wait on for request events */
+ wait_queue_head_t set_waitq;
+ wait_queue_head_t *set_wakeup_ptr;
+ /** List of requests in the set */
+ struct list_head set_requests;
+ /**
+ * List of completion callbacks to be called when the set is completed
+ * This is only used if \a set_interpret is NULL.
+ * Links struct ptlrpc_set_cbdata.
+ */
+ struct list_head set_cblist;
+ /** Completion callback, if only one. */
+ set_interpreter_func set_interpret;
+ /** opaq argument passed to completion \a set_interpret callback. */
+ void *set_arg;
+ /**
+ * Lock for \a set_new_requests manipulations
+ * locked so that any old caller can communicate requests to
+ * the set holder who can then fold them into the lock-free set
+ */
+ spinlock_t set_new_req_lock;
+ /** List of new yet unsent requests. Only used with ptlrpcd now. */
+ struct list_head set_new_requests;
+
+ /** rq_status of requests that have been freed already */
+ int set_rc;
+ /** Additional fields used by the flow control extension */
+ /** Maximum number of RPCs in flight */
+ int set_max_inflight;
+ /** Callback function used to generate RPCs */
+ set_producer_func set_producer;
+ /** opaq argument passed to the producer callback */
+ void *set_producer_arg;
+};
+
+/**
+ * Description of a single ptrlrpc_set callback
+ */
+struct ptlrpc_set_cbdata {
+ /** List linkage item */
+ struct list_head psc_item;
+ /** Pointer to interpreting function */
+ set_interpreter_func psc_interpret;
+ /** Opaq argument to pass to the callback */
+ void *psc_data;
+};
+
+struct ptlrpc_bulk_desc;
+struct ptlrpc_service_part;
+struct ptlrpc_service;
+
+/**
+ * ptlrpc callback & work item stuff
+ */
+struct ptlrpc_cb_id {
+ void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
+ void *cbid_arg; /* additional arg */
+};
+
+/** Maximum number of locks to fit into reply state */
+#define RS_MAX_LOCKS 8
+#define RS_DEBUG 0
+
+/**
+ * Structure to define reply state on the server
+ * Reply state holds various reply message information. Also for "difficult"
+ * replies (rep-ack case) we store the state after sending reply and wait
+ * for the client to acknowledge the reception. In these cases locks could be
+ * added to the state for replay/failover consistency guarantees.
+ */
+struct ptlrpc_reply_state {
+ /** Callback description */
+ struct ptlrpc_cb_id rs_cb_id;
+ /** Linkage for list of all reply states in a system */
+ struct list_head rs_list;
+ /** Linkage for list of all reply states on same export */
+ struct list_head rs_exp_list;
+ /** Linkage for list of all reply states for same obd */
+ struct list_head rs_obd_list;
+#if RS_DEBUG
+ struct list_head rs_debug_list;
+#endif
+ /** A spinlock to protect the reply state flags */
+ spinlock_t rs_lock;
+ /** Reply state flags */
+ unsigned long rs_difficult:1; /* ACK/commit stuff */
+ unsigned long rs_no_ack:1; /* no ACK, even for
+ difficult requests */
+ unsigned long rs_scheduled:1; /* being handled? */
+ unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
+ unsigned long rs_handled:1; /* been handled yet? */
+ unsigned long rs_on_net:1; /* reply_out_callback pending? */
+ unsigned long rs_prealloc:1; /* rs from prealloc list */
+ unsigned long rs_committed:1;/* the transaction was committed
+ and the rs was dispatched
+ by ptlrpc_commit_replies */
+ /** Size of the state */
+ int rs_size;
+ /** opcode */
+ __u32 rs_opc;
+ /** Transaction number */
+ __u64 rs_transno;
+ /** xid */
+ __u64 rs_xid;
+ struct obd_export *rs_export;
+ struct ptlrpc_service_part *rs_svcpt;
+ /** Lnet metadata handle for the reply */
+ lnet_handle_md_t rs_md_h;
+ atomic_t rs_refcount;
+
+ /** Context for the sevice thread */
+ struct ptlrpc_svc_ctx *rs_svc_ctx;
+ /** Reply buffer (actually sent to the client), encoded if needed */
+ struct lustre_msg *rs_repbuf; /* wrapper */
+ /** Size of the reply buffer */
+ int rs_repbuf_len; /* wrapper buf length */
+ /** Size of the reply message */
+ int rs_repdata_len; /* wrapper msg length */
+ /**
+ * Actual reply message. Its content is encrupted (if needed) to
+ * produce reply buffer for actual sending. In simple case
+ * of no network encryption we jus set \a rs_repbuf to \a rs_msg
+ */
+ struct lustre_msg *rs_msg; /* reply message */
+
+ /** Number of locks awaiting client ACK */
+ int rs_nlocks;
+ /** Handles of locks awaiting client reply ACK */
+ struct lustre_handle rs_locks[RS_MAX_LOCKS];
+ /** Lock modes of locks in \a rs_locks */
+ ldlm_mode_t rs_modes[RS_MAX_LOCKS];
+};
+
+struct ptlrpc_thread;
+
+/** RPC stages */
+enum rq_phase {
+ RQ_PHASE_NEW = 0xebc0de00,
+ RQ_PHASE_RPC = 0xebc0de01,
+ RQ_PHASE_BULK = 0xebc0de02,
+ RQ_PHASE_INTERPRET = 0xebc0de03,
+ RQ_PHASE_COMPLETE = 0xebc0de04,
+ RQ_PHASE_UNREGISTERING = 0xebc0de05,
+ RQ_PHASE_UNDEFINED = 0xebc0de06
+};
+
+/** Type of request interpreter call-back */
+typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *arg, int rc);
+
+/**
+ * Definition of request pool structure.
+ * The pool is used to store empty preallocated requests for the case
+ * when we would actually need to send something without performing
+ * any allocations (to avoid e.g. OOM).
+ */
+struct ptlrpc_request_pool {
+ /** Locks the list */
+ spinlock_t prp_lock;
+ /** list of ptlrpc_request structs */
+ struct list_head prp_req_list;
+ /** Maximum message size that would fit into a rquest from this pool */
+ int prp_rq_size;
+ /** Function to allocate more requests for this pool */
+ void (*prp_populate)(struct ptlrpc_request_pool *, int);
+};
+
+struct lu_context;
+struct lu_env;
+
+struct ldlm_lock;
+
+/**
+ * \defgroup nrs Network Request Scheduler
+ * @{
+ */
+struct ptlrpc_nrs_policy;
+struct ptlrpc_nrs_resource;
+struct ptlrpc_nrs_request;
+
+/**
+ * NRS control operations.
+ *
+ * These are common for all policies.
+ */
+enum ptlrpc_nrs_ctl {
+ /**
+ * Not a valid opcode.
+ */
+ PTLRPC_NRS_CTL_INVALID,
+ /**
+ * Activate the policy.
+ */
+ PTLRPC_NRS_CTL_START,
+ /**
+ * Reserved for multiple primary policies, which may be a possibility
+ * in the future.
+ */
+ PTLRPC_NRS_CTL_STOP,
+ /**
+ * Policies can start using opcodes from this value and onwards for
+ * their own purposes; the assigned value itself is arbitrary.
+ */
+ PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
+};
+
+/**
+ * ORR policy operations
+ */
+enum nrs_ctl_orr {
+ NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
+ NRS_CTL_ORR_WR_QUANTUM,
+ NRS_CTL_ORR_RD_OFF_TYPE,
+ NRS_CTL_ORR_WR_OFF_TYPE,
+ NRS_CTL_ORR_RD_SUPP_REQ,
+ NRS_CTL_ORR_WR_SUPP_REQ,
+};
+
+/**
+ * NRS policy operations.
+ *
+ * These determine the behaviour of a policy, and are called in response to
+ * NRS core events.
+ */
+struct ptlrpc_nrs_pol_ops {
+ /**
+ * Called during policy registration; this operation is optional.
+ *
+ * \param[in,out] policy The policy being initialized
+ */
+ int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
+ /**
+ * Called during policy unregistration; this operation is optional.
+ *
+ * \param[in,out] policy The policy being unregistered/finalized
+ */
+ void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
+ /**
+ * Called when activating a policy via lprocfs; policies allocate and
+ * initialize their resources here; this operation is optional.
+ *
+ * \param[in,out] policy The policy being started
+ *
+ * \see nrs_policy_start_locked()
+ */
+ int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
+ /**
+ * Called when deactivating a policy via lprocfs; policies deallocate
+ * their resources here; this operation is optional
+ *
+ * \param[in,out] policy The policy being stopped
+ *
+ * \see nrs_policy_stop0()
+ */
+ void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
+ /**
+ * Used for policy-specific operations; i.e. not generic ones like
+ * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
+ * to an ioctl; this operation is optional.
+ *
+ * \param[in,out] policy The policy carrying out operation \a opc
+ * \param[in] opc The command operation being carried out
+ * \param[in,out] arg An generic buffer for communication between the
+ * user and the control operation
+ *
+ * \retval -ve error
+ * \retval 0 success
+ *
+ * \see ptlrpc_nrs_policy_control()
+ */
+ int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc, void *arg);
+
+ /**
+ * Called when obtaining references to the resources of the resource
+ * hierarchy for a request that has arrived for handling at the PTLRPC
+ * service. Policies should return -ve for requests they do not wish
+ * to handle. This operation is mandatory.
+ *
+ * \param[in,out] policy The policy we're getting resources for.
+ * \param[in,out] nrq The request we are getting resources for.
+ * \param[in] parent The parent resource of the resource being
+ * requested; set to NULL if none.
+ * \param[out] resp The resource is to be returned here; the
+ * fallback policy in an NRS head should
+ * \e always return a non-NULL pointer value.
+ * \param[in] moving_req When set, signifies that this is an attempt
+ * to obtain resources for a request being moved
+ * to the high-priority NRS head by
+ * ldlm_lock_reorder_req().
+ * This implies two things:
+ * 1. We are under obd_export::exp_rpc_lock and
+ * so should not sleep.
+ * 2. We should not perform non-idempotent or can
+ * skip performing idempotent operations that
+ * were carried out when resources were first
+ * taken for the request when it was initialized
+ * in ptlrpc_nrs_req_initialize().
+ *
+ * \retval 0, +ve The level of the returned resource in the resource
+ * hierarchy; currently only 0 (for a non-leaf resource)
+ * and 1 (for a leaf resource) are supported by the
+ * framework.
+ * \retval -ve error
+ *
+ * \see ptlrpc_nrs_req_initialize()
+ * \see ptlrpc_nrs_hpreq_add_nolock()
+ * \see ptlrpc_nrs_req_hp_move()
+ */
+ int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp,
+ bool moving_req);
+ /**
+ * Called when releasing references taken for resources in the resource
+ * hierarchy for the request; this operation is optional.
+ *
+ * \param[in,out] policy The policy the resource belongs to
+ * \param[in] res The resource to be freed
+ *
+ * \see ptlrpc_nrs_req_finalize()
+ * \see ptlrpc_nrs_hpreq_add_nolock()
+ * \see ptlrpc_nrs_req_hp_move()
+ */
+ void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
+ const struct ptlrpc_nrs_resource *res);
+
+ /**
+ * Obtains a request for handling from the policy, and optionally
+ * removes the request from the policy; this operation is mandatory.
+ *
+ * \param[in,out] policy The policy to poll
+ * \param[in] peek When set, signifies that we just want to
+ * examine the request, and not handle it, so the
+ * request is not removed from the policy.
+ * \param[in] force When set, it will force a policy to return a
+ * request if it has one queued.
+ *
+ * \retval NULL No request available for handling
+ * \retval valid-pointer The request polled for handling
+ *
+ * \see ptlrpc_nrs_req_get_nolock()
+ */
+ struct ptlrpc_nrs_request *
+ (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
+ bool force);
+ /**
+ * Called when attempting to add a request to a policy for later
+ * handling; this operation is mandatory.
+ *
+ * \param[in,out] policy The policy on which to enqueue \a nrq
+ * \param[in,out] nrq The request to enqueue
+ *
+ * \retval 0 success
+ * \retval != 0 error
+ *
+ * \see ptlrpc_nrs_req_add_nolock()
+ */
+ int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
+ /**
+ * Removes a request from the policy's set of pending requests. Normally
+ * called after a request has been polled successfully from the policy
+ * for handling; this operation is mandatory.
+ *
+ * \param[in,out] policy The policy the request \a nrq belongs to
+ * \param[in,out] nrq The request to dequeue
+ *
+ * \see ptlrpc_nrs_req_del_nolock()
+ */
+ void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
+ /**
+ * Called after the request being carried out. Could be used for
+ * job/resource control; this operation is optional.
+ *
+ * \param[in,out] policy The policy which is stopping to handle request
+ * \a nrq
+ * \param[in,out] nrq The request
+ *
+ * \pre spin_is_locked(&svcpt->scp_req_lock)
+ *
+ * \see ptlrpc_nrs_req_stop_nolock()
+ */
+ void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
+ /**
+ * Registers the policy's lprocfs interface with a PTLRPC service.
+ *
+ * \param[in] svc The service
+ *
+ * \retval 0 success
+ * \retval != 0 error
+ */
+ int (*op_lprocfs_init) (struct ptlrpc_service *svc);
+ /**
+ * Unegisters the policy's lprocfs interface with a PTLRPC service.
+ *
+ * In cases of failed policy registration in
+ * \e ptlrpc_nrs_policy_register(), this function may be called for a
+ * service which has not registered the policy successfully, so
+ * implementations of this method should make sure their operations are
+ * safe in such cases.
+ *
+ * \param[in] svc The service
+ */
+ void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
+};
+
+/**
+ * Policy flags
+ */
+enum nrs_policy_flags {
+ /**
+ * Fallback policy, use this flag only on a single supported policy per
+ * service. The flag cannot be used on policies that use
+ * \e PTLRPC_NRS_FL_REG_EXTERN
+ */
+ PTLRPC_NRS_FL_FALLBACK = (1 << 0),
+ /**
+ * Start policy immediately after registering.
+ */
+ PTLRPC_NRS_FL_REG_START = (1 << 1),
+ /**
+ * This is a policy registering from a module different to the one NRS
+ * core ships in (currently ptlrpc).
+ */
+ PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
+};
+
+/**
+ * NRS queue type.
+ *
+ * Denotes whether an NRS instance is for handling normal or high-priority
+ * RPCs, or whether an operation pertains to one or both of the NRS instances
+ * in a service.
+ */
+enum ptlrpc_nrs_queue_type {
+ PTLRPC_NRS_QUEUE_REG = (1 << 0),
+ PTLRPC_NRS_QUEUE_HP = (1 << 1),
+ PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
+};
+
+/**
+ * NRS head
+ *
+ * A PTLRPC service has at least one NRS head instance for handling normal
+ * priority RPCs, and may optionally have a second NRS head instance for
+ * handling high-priority RPCs. Each NRS head maintains a list of available
+ * policies, of which one and only one policy is acting as the fallback policy,
+ * and optionally a different policy may be acting as the primary policy. For
+ * all RPCs handled by this NRS head instance, NRS core will first attempt to
+ * enqueue the RPC using the primary policy (if any). The fallback policy is
+ * used in the following cases:
+ * - when there was no primary policy in the
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
+ * was initialized.
+ * - when the primary policy that was at the
+ * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
+ * RPC was initialized, denoted it did not wish, or for some other reason was
+ * not able to handle the request, by returning a non-valid NRS resource
+ * reference.
+ * - when the primary policy that was at the
+ * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
+ * RPC was initialized, fails later during the request enqueueing stage.
+ *
+ * \see nrs_resource_get_safe()
+ * \see nrs_request_enqueue()
+ */
+struct ptlrpc_nrs {
+ spinlock_t nrs_lock;
+ /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
+ /**
+ * List of registered policies
+ */
+ struct list_head nrs_policy_list;
+ /**
+ * List of policies with queued requests. Policies that have any
+ * outstanding requests are queued here, and this list is queried
+ * in a round-robin manner from NRS core when obtaining a request
+ * for handling. This ensures that requests from policies that at some
+ * point transition away from the
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
+ */
+ struct list_head nrs_policy_queued;
+ /**
+ * Service partition for this NRS head
+ */
+ struct ptlrpc_service_part *nrs_svcpt;
+ /**
+ * Primary policy, which is the preferred policy for handling RPCs
+ */
+ struct ptlrpc_nrs_policy *nrs_policy_primary;
+ /**
+ * Fallback policy, which is the backup policy for handling RPCs
+ */
+ struct ptlrpc_nrs_policy *nrs_policy_fallback;
+ /**
+ * This NRS head handles either HP or regular requests
+ */
+ enum ptlrpc_nrs_queue_type nrs_queue_type;
+ /**
+ * # queued requests from all policies in this NRS head
+ */
+ unsigned long nrs_req_queued;
+ /**
+ * # scheduled requests from all policies in this NRS head
+ */
+ unsigned long nrs_req_started;
+ /**
+ * # policies on this NRS
+ */
+ unsigned nrs_num_pols;
+ /**
+ * This NRS head is in progress of starting a policy
+ */
+ unsigned nrs_policy_starting:1;
+ /**
+ * In progress of shutting down the whole NRS head; used during
+ * unregistration
+ */
+ unsigned nrs_stopping:1;
+};
+
+#define NRS_POL_NAME_MAX 16
+
+struct ptlrpc_nrs_pol_desc;
+
+/**
+ * Service compatibility predicate; this determines whether a policy is adequate
+ * for handling RPCs of a particular PTLRPC service.
+ *
+ * XXX:This should give the same result during policy registration and
+ * unregistration, and for all partitions of a service; so the result should not
+ * depend on temporal service or other properties, that may influence the
+ * result.
+ */
+typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc);
+
+struct ptlrpc_nrs_pol_conf {
+ /**
+ * Human-readable policy name
+ */
+ char nc_name[NRS_POL_NAME_MAX];
+ /**
+ * NRS operations for this policy
+ */
+ const struct ptlrpc_nrs_pol_ops *nc_ops;
+ /**
+ * Service compatibility predicate
+ */
+ nrs_pol_desc_compat_t nc_compat;
+ /**
+ * Set for policies that support a single ptlrpc service, i.e. ones that
+ * have \a pd_compat set to nrs_policy_compat_one(). The variable value
+ * depicts the name of the single service that such policies are
+ * compatible with.
+ */
+ const char *nc_compat_svc_name;
+ /**
+ * Owner module for this policy descriptor; policies registering from a
+ * different module to the one the NRS framework is held within
+ * (currently ptlrpc), should set this field to THIS_MODULE.
+ */
+ struct module *nc_owner;
+ /**
+ * Policy registration flags; a bitmast of \e nrs_policy_flags
+ */
+ unsigned nc_flags;
+};
+
+/**
+ * NRS policy registering descriptor
+ *
+ * Is used to hold a description of a policy that can be passed to NRS core in
+ * order to register the policy with NRS heads in different PTLRPC services.
+ */
+struct ptlrpc_nrs_pol_desc {
+ /**
+ * Human-readable policy name
+ */
+ char pd_name[NRS_POL_NAME_MAX];
+ /**
+ * Link into nrs_core::nrs_policies
+ */
+ struct list_head pd_list;
+ /**
+ * NRS operations for this policy
+ */
+ const struct ptlrpc_nrs_pol_ops *pd_ops;
+ /**
+ * Service compatibility predicate
+ */
+ nrs_pol_desc_compat_t pd_compat;
+ /**
+ * Set for policies that are compatible with only one PTLRPC service.
+ *
+ * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
+ */
+ const char *pd_compat_svc_name;
+ /**
+ * Owner module for this policy descriptor.
+ *
+ * We need to hold a reference to the module whenever we might make use
+ * of any of the module's contents, i.e.
+ * - If one or more instances of the policy are at a state where they
+ * might be handling a request, i.e.
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
+ * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
+ * is taken on the module when
+ * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
+ * becomes 0, so that we hold only one reference to the module maximum
+ * at any time.
+ *
+ * We do not need to hold a reference to the module, even though we
+ * might use code and data from the module, in the following cases:
+ * - During external policy registration, because this should happen in
+ * the module's init() function, in which case the module is safe from
+ * removal because a reference is being held on the module by the
+ * kernel, and iirc kmod (and I guess module-init-tools also) will
+ * serialize any racing processes properly anyway.
+ * - During external policy unregistration, because this should happen
+ * in a module's exit() function, and any attempts to start a policy
+ * instance would need to take a reference on the module, and this is
+ * not possible once we have reached the point where the exit()
+ * handler is called.
+ * - During service registration and unregistration, as service setup
+ * and cleanup, and policy registration, unregistration and policy
+ * instance starting, are serialized by \e nrs_core::nrs_mutex, so
+ * as long as users adhere to the convention of registering policies
+ * in init() and unregistering them in module exit() functions, there
+ * should not be a race between these operations.
+ * - During any policy-specific lprocfs operations, because a reference
+ * is held by the kernel on a proc entry that has been entered by a
+ * syscall, so as long as proc entries are removed during unregistration time,
+ * then unregistration and lprocfs operations will be properly
+ * serialized.
+ */
+ struct module *pd_owner;
+ /**
+ * Bitmask of \e nrs_policy_flags
+ */
+ unsigned pd_flags;
+ /**
+ * # of references on this descriptor
+ */
+ atomic_t pd_refs;
+};
+
+/**
+ * NRS policy state
+ *
+ * Policies transition from one state to the other during their lifetime
+ */
+enum ptlrpc_nrs_pol_state {
+ /**
+ * Not a valid policy state.
+ */
+ NRS_POL_STATE_INVALID,
+ /**
+ * Policies are at this state either at the start of their life, or
+ * transition here when the user selects a different policy to act
+ * as the primary one.
+ */
+ NRS_POL_STATE_STOPPED,
+ /**
+ * Policy is progress of stopping
+ */
+ NRS_POL_STATE_STOPPING,
+ /**
+ * Policy is in progress of starting
+ */
+ NRS_POL_STATE_STARTING,
+ /**
+ * A policy is in this state in two cases:
+ * - it is the fallback policy, which is always in this state.
+ * - it has been activated by the user; i.e. it is the primary policy,
+ */
+ NRS_POL_STATE_STARTED,
+};
+
+/**
+ * NRS policy information
+ *
+ * Used for obtaining information for the status of a policy via lprocfs
+ */
+struct ptlrpc_nrs_pol_info {
+ /**
+ * Policy name
+ */
+ char pi_name[NRS_POL_NAME_MAX];
+ /**
+ * Current policy state
+ */
+ enum ptlrpc_nrs_pol_state pi_state;
+ /**
+ * # RPCs enqueued for later dispatching by the policy
+ */
+ long pi_req_queued;
+ /**
+ * # RPCs started for dispatch by the policy
+ */
+ long pi_req_started;
+ /**
+ * Is this a fallback policy?
+ */
+ unsigned pi_fallback:1;
+};
+
+/**
+ * NRS policy
+ *
+ * There is one instance of this for each policy in each NRS head of each
+ * PTLRPC service partition.
+ */
+struct ptlrpc_nrs_policy {
+ /**
+ * Linkage into the NRS head's list of policies,
+ * ptlrpc_nrs:nrs_policy_list
+ */
+ struct list_head pol_list;
+ /**
+ * Linkage into the NRS head's list of policies with enqueued
+ * requests ptlrpc_nrs:nrs_policy_queued
+ */
+ struct list_head pol_list_queued;
+ /**
+ * Current state of this policy
+ */
+ enum ptlrpc_nrs_pol_state pol_state;
+ /**
+ * Bitmask of nrs_policy_flags
+ */
+ unsigned pol_flags;
+ /**
+ * # RPCs enqueued for later dispatching by the policy
+ */
+ long pol_req_queued;
+ /**
+ * # RPCs started for dispatch by the policy
+ */
+ long pol_req_started;
+ /**
+ * Usage Reference count taken on the policy instance
+ */
+ long pol_ref;
+ /**
+ * The NRS head this policy has been created at
+ */
+ struct ptlrpc_nrs *pol_nrs;
+ /**
+ * Private policy data; varies by policy type
+ */
+ void *pol_private;
+ /**
+ * Policy descriptor for this policy instance.
+ */
+ struct ptlrpc_nrs_pol_desc *pol_desc;
+};
+
+/**
+ * NRS resource
+ *
+ * Resources are embedded into two types of NRS entities:
+ * - Inside NRS policies, in the policy's private data in
+ * ptlrpc_nrs_policy::pol_private
+ * - In objects that act as prime-level scheduling entities in different NRS
+ * policies; e.g. on a policy that performs round robin or similar order
+ * scheduling across client NIDs, there would be one NRS resource per unique
+ * client NID. On a policy which performs round robin scheduling across
+ * backend filesystem objects, there would be one resource associated with
+ * each of the backend filesystem objects partaking in the scheduling
+ * performed by the policy.
+ *
+ * NRS resources share a parent-child relationship, in which resources embedded
+ * in policy instances are the parent entities, with all scheduling entities
+ * a policy schedules across being the children, thus forming a simple resource
+ * hierarchy. This hierarchy may be extended with one or more levels in the
+ * future if the ability to have more than one primary policy is added.
+ *
+ * Upon request initialization, references to the then active NRS policies are
+ * taken and used to later handle the dispatching of the request with one of
+ * these policies.
+ *
+ * \see nrs_resource_get_safe()
+ * \see ptlrpc_nrs_req_add()
+ */
+struct ptlrpc_nrs_resource {
+ /**
+ * This NRS resource's parent; is NULL for resources embedded in NRS
+ * policy instances; i.e. those are top-level ones.
+ */
+ struct ptlrpc_nrs_resource *res_parent;
+ /**
+ * The policy associated with this resource.
+ */
+ struct ptlrpc_nrs_policy *res_policy;
+};
+
+enum {
+ NRS_RES_FALLBACK,
+ NRS_RES_PRIMARY,
+ NRS_RES_MAX
+};
+
+/* \name fifo
+ *
+ * FIFO policy
+ *
+ * This policy is a logical wrapper around previous, non-NRS functionality.
+ * It dispatches RPCs in the same order as they arrive from the network. This
+ * policy is currently used as the fallback policy, and the only enabled policy
+ * on all NRS heads of all PTLRPC service partitions.
+ * @{
+ */
+
+/**
+ * Private data structure for the FIFO policy
+ */
+struct nrs_fifo_head {
+ /**
+ * Resource object for policy instance.
+ */
+ struct ptlrpc_nrs_resource fh_res;
+ /**
+ * List of queued requests.
+ */
+ struct list_head fh_list;
+ /**
+ * For debugging purposes.
+ */
+ __u64 fh_sequence;
+};
+
+struct nrs_fifo_req {
+ struct list_head fr_list;
+ __u64 fr_sequence;
+};
+
+/** @} fifo */
+
+/**
+ * \name CRR-N
+ *
+ * CRR-N, Client Round Robin over NIDs
+ * @{
+ */
+
+/**
+ * private data structure for CRR-N NRS
+ */
+struct nrs_crrn_net {
+ struct ptlrpc_nrs_resource cn_res;
+ cfs_binheap_t *cn_binheap;
+ cfs_hash_t *cn_cli_hash;
+ /**
+ * Used when a new scheduling round commences, in order to synchronize
+ * all clients with the new round number.
+ */
+ __u64 cn_round;
+ /**
+ * Determines the relevant ordering amongst request batches within a
+ * scheduling round.
+ */
+ __u64 cn_sequence;
+ /**
+ * Round Robin quantum; the maximum number of RPCs that each request
+ * batch for each client can have in a scheduling round.
+ */
+ __u16 cn_quantum;
+};
+
+/**
+ * Object representing a client in CRR-N, as identified by its NID
+ */
+struct nrs_crrn_client {
+ struct ptlrpc_nrs_resource cc_res;
+ struct hlist_node cc_hnode;
+ lnet_nid_t cc_nid;
+ /**
+ * The round number against which this client is currently scheduling
+ * requests.
+ */
+ __u64 cc_round;
+ /**
+ * The sequence number used for requests scheduled by this client during
+ * the current round number.
+ */
+ __u64 cc_sequence;
+ atomic_t cc_ref;
+ /**
+ * Round Robin quantum; the maximum number of RPCs the client is allowed
+ * to schedule in a single batch of each round.
+ */
+ __u16 cc_quantum;
+ /**
+ * # of pending requests for this client, on all existing rounds
+ */
+ __u16 cc_active;
+};
+
+/**
+ * CRR-N NRS request definition
+ */
+struct nrs_crrn_req {
+ /**
+ * Round number for this request; shared with all other requests in the
+ * same batch.
+ */
+ __u64 cr_round;
+ /**
+ * Sequence number for this request; shared with all other requests in
+ * the same batch.
+ */
+ __u64 cr_sequence;
+};
+
+/**
+ * CRR-N policy operations.
+ */
+enum nrs_ctl_crr {
+ /**
+ * Read the RR quantum size of a CRR-N policy.
+ */
+ NRS_CTL_CRRN_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
+ /**
+ * Write the RR quantum size of a CRR-N policy.
+ */
+ NRS_CTL_CRRN_WR_QUANTUM,
+};
+
+/** @} CRR-N */
+
+/**
+ * \name ORR/TRR
+ *
+ * ORR/TRR (Object-based Round Robin/Target-based Round Robin) NRS policies
+ * @{
+ */
+
+/**
+ * Lower and upper byte offsets of a brw RPC
+ */
+struct nrs_orr_req_range {
+ __u64 or_start;
+ __u64 or_end;
+};
+
+/**
+ * RPC types supported by the ORR/TRR policies
+ */
+enum nrs_orr_supp {
+ NOS_OST_READ = (1 << 0),
+ NOS_OST_WRITE = (1 << 1),
+ NOS_OST_RW = (NOS_OST_READ | NOS_OST_WRITE),
+ /**
+ * Default value for policies.
+ */
+ NOS_DFLT = NOS_OST_READ
+};
+
+/**
+ * As unique keys for grouping RPCs together, we use the object's OST FID for
+ * the ORR policy, and the OST index for the TRR policy.
+ *
+ * XXX: We waste some space for TRR policy instances by using a union, but it
+ * allows to consolidate some of the code between ORR and TRR, and these
+ * policies will probably eventually merge into one anyway.
+ */
+struct nrs_orr_key {
+ union {
+ /** object FID for ORR */
+ struct lu_fid ok_fid;
+ /** OST index for TRR */
+ __u32 ok_idx;
+ };
+};
+
+/**
+ * The largest base string for unique hash/slab object names is
+ * "nrs_orr_reg_", so 13 characters. We add 3 to this to be used for the CPT
+ * id number, so this _should_ be more than enough for the maximum number of
+ * CPTs on any system. If it does happen that this statement is incorrect,
+ * nrs_orr_genobjname() will inevitably yield a non-unique name and cause
+ * kmem_cache_create() to complain (on Linux), so the erroneous situation
+ * will hopefully not go unnoticed.
+ */
+#define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3)
+
+/**
+ * private data structure for ORR and TRR NRS
+ */
+struct nrs_orr_data {
+ struct ptlrpc_nrs_resource od_res;
+ cfs_binheap_t *od_binheap;
+ cfs_hash_t *od_obj_hash;
+ struct kmem_cache *od_cache;
+ /**
+ * Used when a new scheduling round commences, in order to synchronize
+ * all object or OST batches with the new round number.
+ */
+ __u64 od_round;
+ /**
+ * Determines the relevant ordering amongst request batches within a
+ * scheduling round.
+ */
+ __u64 od_sequence;
+ /**
+ * RPC types that are currently supported.
+ */
+ enum nrs_orr_supp od_supp;
+ /**
+ * Round Robin quantum; the maxium number of RPCs that each request
+ * batch for each object or OST can have in a scheduling round.
+ */
+ __u16 od_quantum;
+ /**
+ * Whether to use physical disk offsets or logical file offsets.
+ */
+ bool od_physical;
+ /**
+ * XXX: We need to provide a persistently allocated string to hold
+ * unique object names for this policy, since in currently supported
+ * versions of Linux by Lustre, kmem_cache_create() just sets a pointer
+ * to the name string provided. kstrdup() is used in the version of
+ * kmeme_cache_create() in current Linux mainline, so we may be able to
+ * remove this in the future.
+ */
+ char od_objname[NRS_ORR_OBJ_NAME_MAX];
+};
+
+/**
+ * Represents a backend-fs object or OST in the ORR and TRR policies
+ * respectively
+ */
+struct nrs_orr_object {
+ struct ptlrpc_nrs_resource oo_res;
+ struct hlist_node oo_hnode;
+ /**
+ * The round number against which requests are being scheduled for this
+ * object or OST
+ */
+ __u64 oo_round;
+ /**
+ * The sequence number used for requests scheduled for this object or
+ * OST during the current round number.
+ */
+ __u64 oo_sequence;
+ /**
+ * The key of the object or OST for which this structure instance is
+ * scheduling RPCs
+ */
+ struct nrs_orr_key oo_key;
+ atomic_t oo_ref;
+ /**
+ * Round Robin quantum; the maximum number of RPCs that are allowed to
+ * be scheduled for the object or OST in a single batch of each round.
+ */
+ __u16 oo_quantum;
+ /**
+ * # of pending requests for this object or OST, on all existing rounds
+ */
+ __u16 oo_active;
+};
+
+/**
+ * ORR/TRR NRS request definition
+ */
+struct nrs_orr_req {
+ /**
+ * The offset range this request covers
+ */
+ struct nrs_orr_req_range or_range;
+ /**
+ * Round number for this request; shared with all other requests in the
+ * same batch.
+ */
+ __u64 or_round;
+ /**
+ * Sequence number for this request; shared with all other requests in
+ * the same batch.
+ */
+ __u64 or_sequence;
+ /**
+ * For debugging purposes.
+ */
+ struct nrs_orr_key or_key;
+ /**
+ * An ORR policy instance has filled in request information while
+ * enqueueing the request on the service partition's regular NRS head.
+ */
+ unsigned int or_orr_set:1;
+ /**
+ * A TRR policy instance has filled in request information while
+ * enqueueing the request on the service partition's regular NRS head.
+ */
+ unsigned int or_trr_set:1;
+ /**
+ * Request offset ranges have been filled in with logical offset
+ * values.
+ */
+ unsigned int or_logical_set:1;
+ /**
+ * Request offset ranges have been filled in with physical offset
+ * values.
+ */
+ unsigned int or_physical_set:1;
+};
+
+/** @} ORR/TRR */
+
+/**
+ * NRS request
+ *
+ * Instances of this object exist embedded within ptlrpc_request; the main
+ * purpose of this object is to hold references to the request's resources
+ * for the lifetime of the request, and to hold properties that policies use
+ * use for determining the request's scheduling priority.
+ * */
+struct ptlrpc_nrs_request {
+ /**
+ * The request's resource hierarchy.
+ */
+ struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
+ /**
+ * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
+ * policy that was used to enqueue the request.
+ *
+ * \see nrs_request_enqueue()
+ */
+ unsigned nr_res_idx;
+ unsigned nr_initialized:1;
+ unsigned nr_enqueued:1;
+ unsigned nr_started:1;
+ unsigned nr_finalized:1;
+ cfs_binheap_node_t nr_node;
+
+ /**
+ * Policy-specific fields, used for determining a request's scheduling
+ * priority, and other supporting functionality.
+ */
+ union {
+ /**
+ * Fields for the FIFO policy
+ */
+ struct nrs_fifo_req fifo;
+ /**
+ * CRR-N request defintion
+ */
+ struct nrs_crrn_req crr;
+ /** ORR and TRR share the same request definition */
+ struct nrs_orr_req orr;
+ } nr_u;
+ /**
+ * Externally-registering policies may want to use this to allocate
+ * their own request properties.
+ */
+ void *ext;
+};
+
+/** @} nrs */
+
+/**
+ * Basic request prioritization operations structure.
+ * The whole idea is centered around locks and RPCs that might affect locks.
+ * When a lock is contended we try to give priority to RPCs that might lead
+ * to fastest release of that lock.
+ * Currently only implemented for OSTs only in a way that makes all
+ * IO and truncate RPCs that are coming from a locked region where a lock is
+ * contended a priority over other requests.
+ */
+struct ptlrpc_hpreq_ops {
+ /**
+ * Check if the lock handle of the given lock is the same as
+ * taken from the request.
+ */
+ int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
+ /**
+ * Check if the request is a high priority one.
+ */
+ int (*hpreq_check)(struct ptlrpc_request *);
+ /**
+ * Called after the request has been handled.
+ */
+ void (*hpreq_fini)(struct ptlrpc_request *);
+};
+
+/**
+ * Represents remote procedure call.
+ *
+ * This is a staple structure used by everybody wanting to send a request
+ * in Lustre.
+ */
+struct ptlrpc_request {
+ /* Request type: one of PTL_RPC_MSG_* */
+ int rq_type;
+ /** Result of request processing */
+ int rq_status;
+ /**
+ * Linkage item through which this request is included into
+ * sending/delayed lists on client and into rqbd list on server
+ */
+ struct list_head rq_list;
+ /**
+ * Server side list of incoming unserved requests sorted by arrival
+ * time. Traversed from time to time to notice about to expire
+ * requests and sent back "early replies" to clients to let them
+ * know server is alive and well, just very busy to service their
+ * requests in time
+ */
+ struct list_head rq_timed_list;
+ /** server-side history, used for debuging purposes. */
+ struct list_head rq_history_list;
+ /** server-side per-export list */
+ struct list_head rq_exp_list;
+ /** server-side hp handlers */
+ struct ptlrpc_hpreq_ops *rq_ops;
+
+ /** initial thread servicing this request */
+ struct ptlrpc_thread *rq_svc_thread;
+
+ /** history sequence # */
+ __u64 rq_history_seq;
+ /** \addtogroup nrs
+ * @{
+ */
+ /** stub for NRS request */
+ struct ptlrpc_nrs_request rq_nrq;
+ /** @} nrs */
+ /** the index of service's srv_at_array into which request is linked */
+ time_t rq_at_index;
+ /** Lock to protect request flags and some other important bits, like
+ * rq_list
+ */
+ spinlock_t rq_lock;
+ /** client-side flags are serialized by rq_lock */
+ unsigned int rq_intr:1, rq_replied:1, rq_err:1,
+ rq_timedout:1, rq_resend:1, rq_restart:1,
+ /**
+ * when ->rq_replay is set, request is kept by the client even
+ * after server commits corresponding transaction. This is
+ * used for operations that require sequence of multiple
+ * requests to be replayed. The only example currently is file
+ * open/close. When last request in such a sequence is
+ * committed, ->rq_replay is cleared on all requests in the
+ * sequence.
+ */
+ rq_replay:1,
+ rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
+ rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
+ rq_early:1, rq_must_unlink:1,
+ rq_memalloc:1, /* req originated from "kswapd" */
+ /* server-side flags */
+ rq_packed_final:1, /* packed final reply */
+ rq_hp:1, /* high priority RPC */
+ rq_at_linked:1, /* link into service's srv_at_array */
+ rq_reply_truncate:1,
+ rq_committed:1,
+ /* whether the "rq_set" is a valid one */
+ rq_invalid_rqset:1,
+ rq_generation_set:1,
+ /* do not resend request on -EINPROGRESS */
+ rq_no_retry_einprogress:1,
+ /* allow the req to be sent if the import is in recovery
+ * status */
+ rq_allow_replay:1;
+
+ unsigned int rq_nr_resend;
+
+ enum rq_phase rq_phase; /* one of RQ_PHASE_* */
+ enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
+ atomic_t rq_refcount;/* client-side refcount for SENT race,
+ server-side refcounf for multiple replies */
+
+ /** Portal to which this request would be sent */
+ short rq_request_portal; /* XXX FIXME bug 249 */
+ /** Portal where to wait for reply and where reply would be sent */
+ short rq_reply_portal; /* XXX FIXME bug 249 */
+
+ /**
+ * client-side:
+ * !rq_truncate : # reply bytes actually received,
+ * rq_truncate : required repbuf_len for resend
+ */
+ int rq_nob_received;
+ /** Request length */
+ int rq_reqlen;
+ /** Reply length */
+ int rq_replen;
+ /** Request message - what client sent */
+ struct lustre_msg *rq_reqmsg;
+ /** Reply message - server response */
+ struct lustre_msg *rq_repmsg;
+ /** Transaction number */
+ __u64 rq_transno;
+ /** xid */
+ __u64 rq_xid;
+ /**
+ * List item to for replay list. Not yet commited requests get linked
+ * there.
+ * Also see \a rq_replay comment above.
+ */
+ struct list_head rq_replay_list;
+
+ /**
+ * security and encryption data
+ * @{ */
+ struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
+ struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
+ struct list_head rq_ctx_chain; /**< link to waited ctx */
+
+ struct sptlrpc_flavor rq_flvr; /**< for client & server */
+ enum lustre_sec_part rq_sp_from;
+
+ /* client/server security flags */
+ unsigned int
+ rq_ctx_init:1, /* context initiation */
+ rq_ctx_fini:1, /* context destroy */
+ rq_bulk_read:1, /* request bulk read */
+ rq_bulk_write:1, /* request bulk write */
+ /* server authentication flags */
+ rq_auth_gss:1, /* authenticated by gss */
+ rq_auth_remote:1, /* authed as remote user */
+ rq_auth_usr_root:1, /* authed as root */
+ rq_auth_usr_mdt:1, /* authed as mdt */
+ rq_auth_usr_ost:1, /* authed as ost */
+ /* security tfm flags */
+ rq_pack_udesc:1,
+ rq_pack_bulk:1,
+ /* doesn't expect reply FIXME */
+ rq_no_reply:1,
+ rq_pill_init:1; /* pill initialized */
+
+ uid_t rq_auth_uid; /* authed uid */
+ uid_t rq_auth_mapped_uid; /* authed uid mapped to */
+
+ /* (server side), pointed directly into req buffer */
+ struct ptlrpc_user_desc *rq_user_desc;
+
+ /* various buffer pointers */
+ struct lustre_msg *rq_reqbuf; /* req wrapper */
+ char *rq_repbuf; /* rep buffer */
+ struct lustre_msg *rq_repdata; /* rep wrapper msg */
+ struct lustre_msg *rq_clrbuf; /* only in priv mode */
+ int rq_reqbuf_len; /* req wrapper buf len */
+ int rq_reqdata_len; /* req wrapper msg len */
+ int rq_repbuf_len; /* rep buffer len */
+ int rq_repdata_len; /* rep wrapper msg len */
+ int rq_clrbuf_len; /* only in priv mode */
+ int rq_clrdata_len; /* only in priv mode */
+
+ /** early replies go to offset 0, regular replies go after that */
+ unsigned int rq_reply_off;
+
+ /** @} */
+
+ /** Fields that help to see if request and reply were swabbed or not */
+ __u32 rq_req_swab_mask;
+ __u32 rq_rep_swab_mask;
+
+ /** What was import generation when this request was sent */
+ int rq_import_generation;
+ enum lustre_imp_state rq_send_state;
+
+ /** how many early replies (for stats) */
+ int rq_early_count;
+
+ /** client+server request */
+ lnet_handle_md_t rq_req_md_h;
+ struct ptlrpc_cb_id rq_req_cbid;
+ /** optional time limit for send attempts */
+ cfs_duration_t rq_delay_limit;
+ /** time request was first queued */
+ cfs_time_t rq_queued_time;
+
+ /* server-side... */
+ /** request arrival time */
+ struct timeval rq_arrival_time;
+ /** separated reply state */
+ struct ptlrpc_reply_state *rq_reply_state;
+ /** incoming request buffer */
+ struct ptlrpc_request_buffer_desc *rq_rqbd;
+
+ /** client-only incoming reply */
+ lnet_handle_md_t rq_reply_md_h;
+ wait_queue_head_t rq_reply_waitq;
+ struct ptlrpc_cb_id rq_reply_cbid;
+
+ /** our LNet NID */
+ lnet_nid_t rq_self;
+ /** Peer description (the other side) */
+ lnet_process_id_t rq_peer;
+ /** Server-side, export on which request was received */
+ struct obd_export *rq_export;
+ /** Client side, import where request is being sent */
+ struct obd_import *rq_import;
+
+ /** Replay callback, called after request is replayed at recovery */
+ void (*rq_replay_cb)(struct ptlrpc_request *);
+ /**
+ * Commit callback, called when request is committed and about to be
+ * freed.
+ */
+ void (*rq_commit_cb)(struct ptlrpc_request *);
+ /** Opaq data for replay and commit callbacks. */
+ void *rq_cb_data;
+
+ /** For bulk requests on client only: bulk descriptor */
+ struct ptlrpc_bulk_desc *rq_bulk;
+
+ /** client outgoing req */
+ /**
+ * when request/reply sent (secs), or time when request should be sent
+ */
+ time_t rq_sent;
+ /** time for request really sent out */
+ time_t rq_real_sent;
+
+ /** when request must finish. volatile
+ * so that servers' early reply updates to the deadline aren't
+ * kept in per-cpu cache */
+ volatile time_t rq_deadline;
+ /** when req reply unlink must finish. */
+ time_t rq_reply_deadline;
+ /** when req bulk unlink must finish. */
+ time_t rq_bulk_deadline;
+ /**
+ * service time estimate (secs)
+ * If the requestsis not served by this time, it is marked as timed out.
+ */
+ int rq_timeout;
+
+ /** Multi-rpc bits */
+ /** Per-request waitq introduced by bug 21938 for recovery waiting */
+ wait_queue_head_t rq_set_waitq;
+ /** Link item for request set lists */
+ struct list_head rq_set_chain;
+ /** Link back to the request set */
+ struct ptlrpc_request_set *rq_set;
+ /** Async completion handler, called when reply is received */
+ ptlrpc_interpterer_t rq_interpret_reply;
+ /** Async completion context */
+ union ptlrpc_async_args rq_async_args;
+
+ /** Pool if request is from preallocated list */
+ struct ptlrpc_request_pool *rq_pool;
+
+ struct lu_context rq_session;
+ struct lu_context rq_recov_session;
+
+ /** request format description */
+ struct req_capsule rq_pill;
+};
+
+/**
+ * Call completion handler for rpc if any, return it's status or original
+ * rc if there was no handler defined for this request.
+ */
+static inline int ptlrpc_req_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, int rc)
+{
+ if (req->rq_interpret_reply != NULL) {
+ req->rq_status = req->rq_interpret_reply(env, req,
+ &req->rq_async_args,
+ rc);
+ return req->rq_status;
+ }
+ return rc;
+}
+
+/** \addtogroup nrs
+ * @{
+ */
+int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf);
+int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf);
+void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req);
+void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_pol_info *info);
+
+/*
+ * Can the request be moved from the regular NRS head to the high-priority NRS
+ * head (of the same PTLRPC service partition), if any?
+ *
+ * For a reliable result, this should be checked under svcpt->scp_req lock.
+ */
+static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
+{
+ struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
+
+ /**
+ * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
+ * request has been enqueued first, and ptlrpc_nrs_request::nr_started
+ * to make sure it has not been scheduled yet (analogous to previous
+ * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
+ */
+ return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
+}
+/** @} nrs */
+
+/**
+ * Returns 1 if request buffer at offset \a index was already swabbed
+ */
+static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
+{
+ LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
+ return req->rq_req_swab_mask & (1 << index);
+}
+
+/**
+ * Returns 1 if request reply buffer at offset \a index was already swabbed
+ */
+static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
+{
+ LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
+ return req->rq_rep_swab_mask & (1 << index);
+}
+
+/**
+ * Returns 1 if request needs to be swabbed into local cpu byteorder
+ */
+static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
+{
+ return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+}
+
+/**
+ * Returns 1 if request reply needs to be swabbed into local cpu byteorder
+ */
+static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
+{
+ return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+}
+
+/**
+ * Mark request buffer at offset \a index that it was already swabbed
+ */
+static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
+{
+ LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
+ LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
+ req->rq_req_swab_mask |= 1 << index;
+}
+
+/**
+ * Mark request reply buffer at offset \a index that it was already swabbed
+ */
+static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
+{
+ LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
+ LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
+ req->rq_rep_swab_mask |= 1 << index;
+}
+
+/**
+ * Convert numerical request phase value \a phase into text string description
+ */
+static inline const char *
+ptlrpc_phase2str(enum rq_phase phase)
+{
+ switch (phase) {
+ case RQ_PHASE_NEW:
+ return "New";
+ case RQ_PHASE_RPC:
+ return "Rpc";
+ case RQ_PHASE_BULK:
+ return "Bulk";
+ case RQ_PHASE_INTERPRET:
+ return "Interpret";
+ case RQ_PHASE_COMPLETE:
+ return "Complete";
+ case RQ_PHASE_UNREGISTERING:
+ return "Unregistering";
+ default:
+ return "?Phase?";
+ }
+}
+
+/**
+ * Convert numerical request phase of the request \a req into text stringi
+ * description
+ */
+static inline const char *
+ptlrpc_rqphase2str(struct ptlrpc_request *req)
+{
+ return ptlrpc_phase2str(req->rq_phase);
+}
+
+/**
+ * Debugging functions and helpers to print request structure into debug log
+ * @{
+ */
+/* Spare the preprocessor, spoil the bugs. */
+#define FLAG(field, str) (field ? str : "")
+
+/** Convert bit flags into a string */
+#define DEBUG_REQ_FLAGS(req) \
+ ptlrpc_rqphase2str(req), \
+ FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
+ FLAG(req->rq_err, "E"), \
+ FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
+ FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
+ FLAG(req->rq_no_resend, "N"), \
+ FLAG(req->rq_waiting, "W"), \
+ FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
+ FLAG(req->rq_committed, "M")
+
+#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
+
+void _debug_req(struct ptlrpc_request *req,
+ struct libcfs_debug_msg_data *data, const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+/**
+ * Helper that decides if we need to print request accordig to current debug
+ * level settings
+ */
+#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
+do { \
+ CFS_CHECK_STACK(msgdata, mask, cdls); \
+ \
+ if (((mask) & D_CANTMASK) != 0 || \
+ ((libcfs_debug & (mask)) != 0 && \
+ (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
+ _debug_req((req), msgdata, fmt, ##a); \
+} while(0)
+
+/**
+ * This is the debug print function you need to use to print request sturucture
+ * content into lustre debug log.
+ * for most callers (level is a constant) this is resolved at compile time */
+#define DEBUG_REQ(level, req, fmt, args...) \
+do { \
+ if ((level) & (D_ERROR | D_WARNING)) { \
+ static cfs_debug_limit_state_t cdls; \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
+ debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
+ } else { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
+ debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
+ } \
+} while (0)
+/** @} */
+
+/**
+ * Structure that defines a single page of a bulk transfer
+ */
+struct ptlrpc_bulk_page {
+ /** Linkage to list of pages in a bulk */
+ struct list_head bp_link;
+ /**
+ * Number of bytes in a page to transfer starting from \a bp_pageoffset
+ */
+ int bp_buflen;
+ /** offset within a page */
+ int bp_pageoffset;
+ /** The page itself */
+ struct page *bp_page;
+};
+
+#define BULK_GET_SOURCE 0
+#define BULK_PUT_SINK 1
+#define BULK_GET_SINK 2
+#define BULK_PUT_SOURCE 3
+
+/**
+ * Definition of bulk descriptor.
+ * Bulks are special "Two phase" RPCs where initial request message
+ * is sent first and it is followed bt a transfer (o receiving) of a large
+ * amount of data to be settled into pages referenced from the bulk descriptors.
+ * Bulks transfers (the actual data following the small requests) are done
+ * on separate LNet portals.
+ * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
+ * Another user is readpage for MDT.
+ */
+struct ptlrpc_bulk_desc {
+ /** completed with failure */
+ unsigned long bd_failure:1;
+ /** {put,get}{source,sink} */
+ unsigned long bd_type:2;
+ /** client side */
+ unsigned long bd_registered:1;
+ /** For serialization with callback */
+ spinlock_t bd_lock;
+ /** Import generation when request for this bulk was sent */
+ int bd_import_generation;
+ /** LNet portal for this bulk */
+ __u32 bd_portal;
+ /** Server side - export this bulk created for */
+ struct obd_export *bd_export;
+ /** Client side - import this bulk was sent on */
+ struct obd_import *bd_import;
+ /** Back pointer to the request */
+ struct ptlrpc_request *bd_req;
+ wait_queue_head_t bd_waitq; /* server side only WQ */
+ int bd_iov_count; /* # entries in bd_iov */
+ int bd_max_iov; /* allocated size of bd_iov */
+ int bd_nob; /* # bytes covered */
+ int bd_nob_transferred; /* # bytes GOT/PUT */
+
+ __u64 bd_last_xid;
+
+ struct ptlrpc_cb_id bd_cbid; /* network callback info */
+ lnet_nid_t bd_sender; /* stash event::sender */
+ int bd_md_count; /* # valid entries in bd_mds */
+ int bd_md_max_brw; /* max entries in bd_mds */
+ /** array of associated MDs */
+ lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
+
+ /*
+ * encrypt iov, size is either 0 or bd_iov_count.
+ */
+ lnet_kiov_t *bd_enc_iov;
+
+ lnet_kiov_t bd_iov[0];
+};
+
+enum {
+ SVC_STOPPED = 1 << 0,
+ SVC_STOPPING = 1 << 1,
+ SVC_STARTING = 1 << 2,
+ SVC_RUNNING = 1 << 3,
+ SVC_EVENT = 1 << 4,
+ SVC_SIGNAL = 1 << 5,
+};
+
+#define PTLRPC_THR_NAME_LEN 32
+/**
+ * Definition of server service thread structure
+ */
+struct ptlrpc_thread {
+ /**
+ * List of active threads in svc->srv_threads
+ */
+ struct list_head t_link;
+ /**
+ * thread-private data (preallocated memory)
+ */
+ void *t_data;
+ __u32 t_flags;
+ /**
+ * service thread index, from ptlrpc_start_threads
+ */
+ unsigned int t_id;
+ /**
+ * service thread pid
+ */
+ pid_t t_pid;
+ /**
+ * put watchdog in the structure per thread b=14840
+ *
+ * Lustre watchdog is removed for client in the hope
+ * of a generic watchdog can be merged in kernel.
+ * When that happens, we should add below back.
+ *
+ * struct lc_watchdog *t_watchdog;
+ */
+ /**
+ * the svc this thread belonged to b=18582
+ */
+ struct ptlrpc_service_part *t_svcpt;
+ wait_queue_head_t t_ctl_waitq;
+ struct lu_env *t_env;
+ char t_name[PTLRPC_THR_NAME_LEN];
+};
+
+static inline int thread_is_init(struct ptlrpc_thread *thread)
+{
+ return thread->t_flags == 0;
+}
+
+static inline int thread_is_stopped(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_STOPPED);
+}
+
+static inline int thread_is_stopping(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_STOPPING);
+}
+
+static inline int thread_is_starting(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_STARTING);
+}
+
+static inline int thread_is_running(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_RUNNING);
+}
+
+static inline int thread_is_event(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_EVENT);
+}
+
+static inline int thread_is_signal(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_SIGNAL);
+}
+
+static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
+{
+ thread->t_flags &= ~flags;
+}
+
+static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
+{
+ thread->t_flags = flags;
+}
+
+static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
+{
+ thread->t_flags |= flags;
+}
+
+static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
+ __u32 flags)
+{
+ if (thread->t_flags & flags) {
+ thread->t_flags &= ~flags;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Request buffer descriptor structure.
+ * This is a structure that contains one posted request buffer for service.
+ * Once data land into a buffer, event callback creates actual request and
+ * notifies wakes one of the service threads to process new incoming request.
+ * More than one request can fit into the buffer.
+ */
+struct ptlrpc_request_buffer_desc {
+ /** Link item for rqbds on a service */
+ struct list_head rqbd_list;
+ /** History of requests for this buffer */
+ struct list_head rqbd_reqs;
+ /** Back pointer to service for which this buffer is registered */
+ struct ptlrpc_service_part *rqbd_svcpt;
+ /** LNet descriptor */
+ lnet_handle_md_t rqbd_md_h;
+ int rqbd_refcount;
+ /** The buffer itself */
+ char *rqbd_buffer;
+ struct ptlrpc_cb_id rqbd_cbid;
+ /**
+ * This "embedded" request structure is only used for the
+ * last request to fit into the buffer
+ */
+ struct ptlrpc_request rqbd_req;
+};
+
+typedef int (*svc_handler_t)(struct ptlrpc_request *req);
+
+struct ptlrpc_service_ops {
+ /**
+ * if non-NULL called during thread creation (ptlrpc_start_thread())
+ * to initialize service specific per-thread state.
+ */
+ int (*so_thr_init)(struct ptlrpc_thread *thr);
+ /**
+ * if non-NULL called during thread shutdown (ptlrpc_main()) to
+ * destruct state created by ->srv_init().
+ */
+ void (*so_thr_done)(struct ptlrpc_thread *thr);
+ /**
+ * Handler function for incoming requests for this service
+ */
+ int (*so_req_handler)(struct ptlrpc_request *req);
+ /**
+ * function to determine priority of the request, it's called
+ * on every new request
+ */
+ int (*so_hpreq_handler)(struct ptlrpc_request *);
+ /**
+ * service-specific print fn
+ */
+ void (*so_req_printer)(void *, struct ptlrpc_request *);
+};
+
+#ifndef __cfs_cacheline_aligned
+/* NB: put it here for reducing patche dependence */
+# define __cfs_cacheline_aligned
+#endif
+
+/**
+ * How many high priority requests to serve before serving one normal
+ * priority request
+ */
+#define PTLRPC_SVC_HP_RATIO 10
+
+/**
+ * Definition of PortalRPC service.
+ * The service is listening on a particular portal (like tcp port)
+ * and perform actions for a specific server like IO service for OST
+ * or general metadata service for MDS.
+ */
+struct ptlrpc_service {
+ /** serialize /proc operations */
+ spinlock_t srv_lock;
+ /** most often accessed fields */
+ /** chain thru all services */
+ struct list_head srv_list;
+ /** service operations table */
+ struct ptlrpc_service_ops srv_ops;
+ /** only statically allocated strings here; we don't clean them */
+ char *srv_name;
+ /** only statically allocated strings here; we don't clean them */
+ char *srv_thread_name;
+ /** service thread list */
+ struct list_head srv_threads;
+ /** threads # should be created for each partition on initializing */
+ int srv_nthrs_cpt_init;
+ /** limit of threads number for each partition */
+ int srv_nthrs_cpt_limit;
+ /** Root of /proc dir tree for this service */
+ struct proc_dir_entry *srv_procroot;
+ /** Pointer to statistic data for this service */
+ struct lprocfs_stats *srv_stats;
+ /** # hp per lp reqs to handle */
+ int srv_hpreq_ratio;
+ /** biggest request to receive */
+ int srv_max_req_size;
+ /** biggest reply to send */
+ int srv_max_reply_size;
+ /** size of individual buffers */
+ int srv_buf_size;
+ /** # buffers to allocate in 1 group */
+ int srv_nbuf_per_group;
+ /** Local portal on which to receive requests */
+ __u32 srv_req_portal;
+ /** Portal on the client to send replies to */
+ __u32 srv_rep_portal;
+ /**
+ * Tags for lu_context associated with this thread, see struct
+ * lu_context.
+ */
+ __u32 srv_ctx_tags;
+ /** soft watchdog timeout multiplier */
+ int srv_watchdog_factor;
+ /** under unregister_service */
+ unsigned srv_is_stopping:1;
+
+ /** max # request buffers in history per partition */
+ int srv_hist_nrqbds_cpt_max;
+ /** number of CPTs this service bound on */
+ int srv_ncpts;
+ /** CPTs array this service bound on */
+ __u32 *srv_cpts;
+ /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
+ int srv_cpt_bits;
+ /** CPT table this service is running over */
+ struct cfs_cpt_table *srv_cptable;
+ /**
+ * partition data for ptlrpc service
+ */
+ struct ptlrpc_service_part *srv_parts[0];
+};
+
+/**
+ * Definition of PortalRPC service partition data.
+ * Although a service only has one instance of it right now, but we
+ * will have multiple instances very soon (instance per CPT).
+ *
+ * it has four locks:
+ * \a scp_lock
+ * serialize operations on rqbd and requests waiting for preprocess
+ * \a scp_req_lock
+ * serialize operations active requests sent to this portal
+ * \a scp_at_lock
+ * serialize adaptive timeout stuff
+ * \a scp_rep_lock
+ * serialize operations on RS list (reply states)
+ *
+ * We don't have any use-case to take two or more locks at the same time
+ * for now, so there is no lock order issue.
+ */
+struct ptlrpc_service_part {
+ /** back reference to owner */
+ struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
+ /* CPT id, reserved */
+ int scp_cpt;
+ /** always increasing number */
+ int scp_thr_nextid;
+ /** # of starting threads */
+ int scp_nthrs_starting;
+ /** # of stopping threads, reserved for shrinking threads */
+ int scp_nthrs_stopping;
+ /** # running threads */
+ int scp_nthrs_running;
+ /** service threads list */
+ struct list_head scp_threads;
+
+ /**
+ * serialize the following fields, used for protecting
+ * rqbd list and incoming requests waiting for preprocess,
+ * threads starting & stopping are also protected by this lock.
+ */
+ spinlock_t scp_lock __cfs_cacheline_aligned;
+ /** total # req buffer descs allocated */
+ int scp_nrqbds_total;
+ /** # posted request buffers for receiving */
+ int scp_nrqbds_posted;
+ /** in progress of allocating rqbd */
+ int scp_rqbd_allocating;
+ /** # incoming reqs */
+ int scp_nreqs_incoming;
+ /** request buffers to be reposted */
+ struct list_head scp_rqbd_idle;
+ /** req buffers receiving */
+ struct list_head scp_rqbd_posted;
+ /** incoming reqs */
+ struct list_head scp_req_incoming;
+ /** timeout before re-posting reqs, in tick */
+ cfs_duration_t scp_rqbd_timeout;
+ /**
+ * all threads sleep on this. This wait-queue is signalled when new
+ * incoming request arrives and when difficult reply has to be handled.
+ */
+ wait_queue_head_t scp_waitq;
+
+ /** request history */
+ struct list_head scp_hist_reqs;
+ /** request buffer history */
+ struct list_head scp_hist_rqbds;
+ /** # request buffers in history */
+ int scp_hist_nrqbds;
+ /** sequence number for request */
+ __u64 scp_hist_seq;
+ /** highest seq culled from history */
+ __u64 scp_hist_seq_culled;
+
+ /**
+ * serialize the following fields, used for processing requests
+ * sent to this portal
+ */
+ spinlock_t scp_req_lock __cfs_cacheline_aligned;
+ /** # reqs in either of the NRS heads below */
+ /** # reqs being served */
+ int scp_nreqs_active;
+ /** # HPreqs being served */
+ int scp_nhreqs_active;
+ /** # hp requests handled */
+ int scp_hreq_count;
+
+ /** NRS head for regular requests */
+ struct ptlrpc_nrs scp_nrs_reg;
+ /** NRS head for HP requests; this is only valid for services that can
+ * handle HP requests */
+ struct ptlrpc_nrs *scp_nrs_hp;
+
+ /** AT stuff */
+ /** @{ */
+ /**
+ * serialize the following fields, used for changes on
+ * adaptive timeout
+ */
+ spinlock_t scp_at_lock __cfs_cacheline_aligned;
+ /** estimated rpc service time */
+ struct adaptive_timeout scp_at_estimate;
+ /** reqs waiting for replies */
+ struct ptlrpc_at_array scp_at_array;
+ /** early reply timer */
+ struct timer_list scp_at_timer;
+ /** debug */
+ cfs_time_t scp_at_checktime;
+ /** check early replies */
+ unsigned scp_at_check;
+ /** @} */
+
+ /**
+ * serialize the following fields, used for processing
+ * replies for this portal
+ */
+ spinlock_t scp_rep_lock __cfs_cacheline_aligned;
+ /** all the active replies */
+ struct list_head scp_rep_active;
+ /** List of free reply_states */
+ struct list_head scp_rep_idle;
+ /** waitq to run, when adding stuff to srv_free_rs_list */
+ wait_queue_head_t scp_rep_waitq;
+ /** # 'difficult' replies */
+ atomic_t scp_nreps_difficult;
+};
+
+#define ptlrpc_service_for_each_part(part, i, svc) \
+ for (i = 0; \
+ i < (svc)->srv_ncpts && \
+ (svc)->srv_parts != NULL && \
+ ((part) = (svc)->srv_parts[i]) != NULL; i++)
+
+/**
+ * Declaration of ptlrpcd control structure
+ */
+struct ptlrpcd_ctl {
+ /**
+ * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
+ */
+ unsigned long pc_flags;
+ /**
+ * Thread lock protecting structure fields.
+ */
+ spinlock_t pc_lock;
+ /**
+ * Start completion.
+ */
+ struct completion pc_starting;
+ /**
+ * Stop completion.
+ */
+ struct completion pc_finishing;
+ /**
+ * Thread requests set.
+ */
+ struct ptlrpc_request_set *pc_set;
+ /**
+ * Thread name used in cfs_daemonize()
+ */
+ char pc_name[16];
+ /**
+ * Environment for request interpreters to run in.
+ */
+ struct lu_env pc_env;
+ /**
+ * Index of ptlrpcd thread in the array.
+ */
+ int pc_index;
+ /**
+ * Number of the ptlrpcd's partners.
+ */
+ int pc_npartners;
+ /**
+ * Pointer to the array of partners' ptlrpcd_ctl structure.
+ */
+ struct ptlrpcd_ctl **pc_partners;
+ /**
+ * Record the partner index to be processed next.
+ */
+ int pc_cursor;
+};
+
+/* Bits for pc_flags */
+enum ptlrpcd_ctl_flags {
+ /**
+ * Ptlrpc thread start flag.
+ */
+ LIOD_START = 1 << 0,
+ /**
+ * Ptlrpc thread stop flag.
+ */
+ LIOD_STOP = 1 << 1,
+ /**
+ * Ptlrpc thread force flag (only stop force so far).
+ * This will cause aborting any inflight rpcs handled
+ * by thread if LIOD_STOP is specified.
+ */
+ LIOD_FORCE = 1 << 2,
+ /**
+ * This is a recovery ptlrpc thread.
+ */
+ LIOD_RECOVERY = 1 << 3,
+ /**
+ * The ptlrpcd is bound to some CPU core.
+ */
+ LIOD_BIND = 1 << 4,
+};
+
+/**
+ * \addtogroup nrs
+ * @{
+ *
+ * Service compatibility function; the policy is compatible with all services.
+ *
+ * \param[in] svc The service the policy is attempting to register with.
+ * \param[in] desc The policy descriptor
+ *
+ * \retval true The policy is compatible with the service
+ *
+ * \see ptlrpc_nrs_pol_desc::pd_compat()
+ */
+static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc)
+{
+ return true;
+}
+
+/**
+ * Service compatibility function; the policy is compatible with only a specific
+ * service which is identified by its human-readable name at
+ * ptlrpc_service::srv_name.
+ *
+ * \param[in] svc The service the policy is attempting to register with.
+ * \param[in] desc The policy descriptor
+ *
+ * \retval false The policy is not compatible with the service
+ * \retval true The policy is compatible with the service
+ *
+ * \see ptlrpc_nrs_pol_desc::pd_compat()
+ */
+static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc)
+{
+ LASSERT(desc->pd_compat_svc_name != NULL);
+ return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
+}
+
+/** @} nrs */
+
+/* ptlrpc/events.c */
+extern lnet_handle_eq_t ptlrpc_eq_h;
+extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
+ lnet_process_id_t *peer, lnet_nid_t *self);
+/**
+ * These callbacks are invoked by LNet when something happened to
+ * underlying buffer
+ * @{
+ */
+extern void request_out_callback(lnet_event_t *ev);
+extern void reply_in_callback(lnet_event_t *ev);
+extern void client_bulk_callback(lnet_event_t *ev);
+extern void request_in_callback(lnet_event_t *ev);
+extern void reply_out_callback(lnet_event_t *ev);
+/** @} */
+
+/* ptlrpc/connection.c */
+struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
+ lnet_nid_t self,
+ struct obd_uuid *uuid);
+int ptlrpc_connection_put(struct ptlrpc_connection *c);
+struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
+int ptlrpc_connection_init(void);
+void ptlrpc_connection_fini(void);
+extern lnet_pid_t ptl_get_pid(void);
+
+/* ptlrpc/niobuf.c */
+/**
+ * Actual interfacing with LNet to put/get/register/unregister stuff
+ * @{
+ */
+
+int ptlrpc_register_bulk(struct ptlrpc_request *req);
+int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
+
+static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *desc;
+ int rc;
+
+ LASSERT(req != NULL);
+ desc = req->rq_bulk;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
+ req->rq_bulk_deadline > cfs_time_current_sec())
+ return 1;
+
+ if (!desc)
+ return 0;
+
+ spin_lock(&desc->bd_lock);
+ rc = desc->bd_md_count;
+ spin_unlock(&desc->bd_lock);
+ return rc;
+}
+
+#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
+#define PTLRPC_REPLY_EARLY 0x02
+int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
+int ptlrpc_reply(struct ptlrpc_request *req);
+int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
+int ptlrpc_error(struct ptlrpc_request *req);
+void ptlrpc_resend_req(struct ptlrpc_request *request);
+int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
+int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
+int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
+/** @} */
+
+/* ptlrpc/client.c */
+/**
+ * Client-side portals API. Everything to send requests, receive replies,
+ * request queues, request management, etc.
+ * @{
+ */
+void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
+ struct ptlrpc_client *);
+void ptlrpc_cleanup_client(struct obd_import *imp);
+struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
+
+int ptlrpc_queue_wait(struct ptlrpc_request *req);
+int ptlrpc_replay_req(struct ptlrpc_request *req);
+int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
+void ptlrpc_restart_req(struct ptlrpc_request *req);
+void ptlrpc_abort_inflight(struct obd_import *imp);
+void ptlrpc_cleanup_imp(struct obd_import *imp);
+void ptlrpc_abort_set(struct ptlrpc_request_set *set);
+
+struct ptlrpc_request_set *ptlrpc_prep_set(void);
+struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
+ void *arg);
+int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
+ set_interpreter_func fn, void *data);
+int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
+int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
+int ptlrpc_set_wait(struct ptlrpc_request_set *);
+int ptlrpc_expired_set(void *data);
+void ptlrpc_interrupted_set(void *data);
+void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
+void ptlrpc_set_destroy(struct ptlrpc_request_set *);
+void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
+void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
+ struct ptlrpc_request *req);
+
+void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
+void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
+
+struct ptlrpc_request_pool *
+ptlrpc_init_rq_pool(int, int,
+ void (*populate_pool)(struct ptlrpc_request_pool *, int));
+
+void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
+struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
+ const struct req_format *format);
+struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
+ struct ptlrpc_request_pool *,
+ const struct req_format *format);
+void ptlrpc_request_free(struct ptlrpc_request *request);
+int ptlrpc_request_pack(struct ptlrpc_request *request,
+ __u32 version, int opcode);
+struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
+ const struct req_format *format,
+ __u32 version, int opcode);
+int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
+ __u32 version, int opcode, char **bufs,
+ struct ptlrpc_cli_ctx *ctx);
+struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
+ int opcode, int count, __u32 *lengths,
+ char **bufs);
+struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
+ __u32 version, int opcode,
+ int count, __u32 *lengths, char **bufs,
+ struct ptlrpc_request_pool *pool);
+void ptlrpc_req_finished(struct ptlrpc_request *request);
+void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
+struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
+struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
+ unsigned npages, unsigned max_brw,
+ unsigned type, unsigned portal);
+void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
+static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
+{
+ __ptlrpc_free_bulk(bulk, 1);
+}
+static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
+{
+ __ptlrpc_free_bulk(bulk, 0);
+}
+void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
+ struct page *page, int pageoffset, int len, int);
+static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
+ struct page *page, int pageoffset,
+ int len)
+{
+ __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
+}
+
+static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
+ struct page *page, int pageoffset,
+ int len)
+{
+ __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
+}
+
+void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
+ struct obd_import *imp);
+__u64 ptlrpc_next_xid(void);
+__u64 ptlrpc_sample_next_xid(void);
+__u64 ptlrpc_req_xid(struct ptlrpc_request *request);
+
+/* Set of routines to run a function in ptlrpcd context */
+void *ptlrpcd_alloc_work(struct obd_import *imp,
+ int (*cb)(const struct lu_env *, void *), void *data);
+void ptlrpcd_destroy_work(void *handler);
+int ptlrpcd_queue_work(void *handler);
+
+/** @} */
+struct ptlrpc_service_buf_conf {
+ /* nbufs is buffers # to allocate when growing the pool */
+ unsigned int bc_nbufs;
+ /* buffer size to post */
+ unsigned int bc_buf_size;
+ /* portal to listed for requests on */
+ unsigned int bc_req_portal;
+ /* portal of where to send replies to */
+ unsigned int bc_rep_portal;
+ /* maximum request size to be accepted for this service */
+ unsigned int bc_req_max_size;
+ /* maximum reply size this service can ever send */
+ unsigned int bc_rep_max_size;
+};
+
+struct ptlrpc_service_thr_conf {
+ /* threadname should be 8 characters or less - 6 will be added on */
+ char *tc_thr_name;
+ /* threads increasing factor for each CPU */
+ unsigned int tc_thr_factor;
+ /* service threads # to start on each partition while initializing */
+ unsigned int tc_nthrs_init;
+ /*
+ * low water of threads # upper-limit on each partition while running,
+ * service availability may be impacted if threads number is lower
+ * than this value. It can be ZERO if the service doesn't require
+ * CPU affinity or there is only one partition.
+ */
+ unsigned int tc_nthrs_base;
+ /* "soft" limit for total threads number */
+ unsigned int tc_nthrs_max;
+ /* user specified threads number, it will be validated due to
+ * other members of this structure. */
+ unsigned int tc_nthrs_user;
+ /* set NUMA node affinity for service threads */
+ unsigned int tc_cpu_affinity;
+ /* Tags for lu_context associated with service thread */
+ __u32 tc_ctx_tags;
+};
+
+struct ptlrpc_service_cpt_conf {
+ struct cfs_cpt_table *cc_cptable;
+ /* string pattern to describe CPTs for a service */
+ char *cc_pattern;
+};
+
+struct ptlrpc_service_conf {
+ /* service name */
+ char *psc_name;
+ /* soft watchdog timeout multiplifier to print stuck service traces */
+ unsigned int psc_watchdog_factor;
+ /* buffer information */
+ struct ptlrpc_service_buf_conf psc_buf;
+ /* thread information */
+ struct ptlrpc_service_thr_conf psc_thr;
+ /* CPU partition information */
+ struct ptlrpc_service_cpt_conf psc_cpt;
+ /* function table */
+ struct ptlrpc_service_ops psc_ops;
+};
+
+/* ptlrpc/service.c */
+/**
+ * Server-side services API. Register/unregister service, request state
+ * management, service thread management
+ *
+ * @{
+ */
+void ptlrpc_save_lock(struct ptlrpc_request *req,
+ struct lustre_handle *lock, int mode, int no_ack);
+void ptlrpc_commit_replies(struct obd_export *exp);
+void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
+void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
+int ptlrpc_hpreq_handler(struct ptlrpc_request *req);
+struct ptlrpc_service *ptlrpc_register_service(
+ struct ptlrpc_service_conf *conf,
+ struct proc_dir_entry *proc_entry);
+void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
+
+int ptlrpc_start_threads(struct ptlrpc_service *svc);
+int ptlrpc_unregister_service(struct ptlrpc_service *service);
+int liblustre_check_services(void *arg);
+void ptlrpc_daemonize(char *name);
+int ptlrpc_service_health_check(struct ptlrpc_service *);
+void ptlrpc_server_drop_request(struct ptlrpc_request *req);
+void ptlrpc_request_change_export(struct ptlrpc_request *req,
+ struct obd_export *export);
+
+int ptlrpc_hr_init(void);
+void ptlrpc_hr_fini(void);
+
+/** @} */
+
+/* ptlrpc/import.c */
+/**
+ * Import API
+ * @{
+ */
+int ptlrpc_connect_import(struct obd_import *imp);
+int ptlrpc_init_import(struct obd_import *imp);
+int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
+int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
+void deuuidify(char *uuid, const char *prefix, char **uuid_start,
+ int *uuid_len);
+
+/* ptlrpc/pack_generic.c */
+int ptlrpc_reconnect_import(struct obd_import *imp);
+/** @} */
+
+/**
+ * ptlrpc msg buffer and swab interface
+ *
+ * @{
+ */
+int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
+ int index);
+void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
+ int index);
+int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
+int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
+
+int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
+void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
+ char **bufs);
+int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
+ __u32 *lens, char **bufs);
+int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
+ char **bufs);
+int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
+ __u32 *lens, char **bufs, int flags);
+#define LPRFL_EARLY_REPLY 1
+int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
+ char **bufs, int flags);
+int lustre_shrink_msg(struct lustre_msg *msg, int segment,
+ unsigned int newlen, int move_data);
+void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
+int __lustre_unpack_msg(struct lustre_msg *m, int len);
+int lustre_msg_hdr_size(__u32 magic, int count);
+int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
+int lustre_msg_size_v2(int count, __u32 *lengths);
+int lustre_packed_msg_size(struct lustre_msg *msg);
+int lustre_msg_early_size(void);
+void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
+void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
+int lustre_msg_buflen(struct lustre_msg *m, int n);
+void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
+int lustre_msg_bufcount(struct lustre_msg *m);
+char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
+__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
+void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
+__u32 lustre_msg_get_flags(struct lustre_msg *msg);
+void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
+void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
+void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
+__u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
+void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
+void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
+struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
+__u32 lustre_msg_get_type(struct lustre_msg *msg);
+__u32 lustre_msg_get_version(struct lustre_msg *msg);
+void lustre_msg_add_version(struct lustre_msg *msg, int version);
+__u32 lustre_msg_get_opc(struct lustre_msg *msg);
+__u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
+__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
+__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
+__u64 lustre_msg_get_transno(struct lustre_msg *msg);
+__u64 lustre_msg_get_slv(struct lustre_msg *msg);
+__u32 lustre_msg_get_limit(struct lustre_msg *msg);
+void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
+void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
+int lustre_msg_get_status(struct lustre_msg *msg);
+__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
+int lustre_msg_is_v1(struct lustre_msg *msg);
+__u32 lustre_msg_get_magic(struct lustre_msg *msg);
+__u32 lustre_msg_get_timeout(struct lustre_msg *msg);
+__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
+char *lustre_msg_get_jobid(struct lustre_msg *msg);
+__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
+__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
+#else
+# warning "remove checksum compatibility support for b1_8"
+__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
+#endif
+void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
+void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
+void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
+void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
+void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
+void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
+void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
+void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
+void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
+void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
+void ptlrpc_request_set_replen(struct ptlrpc_request *req);
+void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
+void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
+void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
+void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
+
+static inline void
+lustre_shrink_reply(struct ptlrpc_request *req, int segment,
+ unsigned int newlen, int move_data)
+{
+ LASSERT(req->rq_reply_state);
+ LASSERT(req->rq_repmsg);
+ req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
+ newlen, move_data);
+}
+
+#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
+
+static inline int ptlrpc_status_hton(int h)
+{
+ /*
+ * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
+ * ELDLM_LOCK_ABORTED, etc.
+ */
+ if (h < 0)
+ return -lustre_errno_hton(-h);
+ else
+ return h;
+}
+
+static inline int ptlrpc_status_ntoh(int n)
+{
+ /*
+ * See the comment in ptlrpc_status_hton().
+ */
+ if (n < 0)
+ return -lustre_errno_ntoh(-n);
+ else
+ return n;
+}
+
+#else
+
+#define ptlrpc_status_hton(h) (h)
+#define ptlrpc_status_ntoh(n) (n)
+
+#endif
+/** @} */
+
+/** Change request phase of \a req to \a new_phase */
+static inline void
+ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
+{
+ if (req->rq_phase == new_phase)
+ return;
+
+ if (new_phase == RQ_PHASE_UNREGISTERING) {
+ req->rq_next_phase = req->rq_phase;
+ if (req->rq_import)
+ atomic_inc(&req->rq_import->imp_unregistering);
+ }
+
+ if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+ if (req->rq_import)
+ atomic_dec(&req->rq_import->imp_unregistering);
+ }
+
+ DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
+ ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
+
+ req->rq_phase = new_phase;
+}
+
+/**
+ * Returns true if request \a req got early reply and hard deadline is not met
+ */
+static inline int
+ptlrpc_client_early(struct ptlrpc_request *req)
+{
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
+ req->rq_reply_deadline > cfs_time_current_sec())
+ return 0;
+ return req->rq_early;
+}
+
+/**
+ * Returns true if we got real reply from server for this request
+ */
+static inline int
+ptlrpc_client_replied(struct ptlrpc_request *req)
+{
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
+ req->rq_reply_deadline > cfs_time_current_sec())
+ return 0;
+ return req->rq_replied;
+}
+
+/** Returns true if request \a req is in process of receiving server reply */
+static inline int
+ptlrpc_client_recv(struct ptlrpc_request *req)
+{
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
+ req->rq_reply_deadline > cfs_time_current_sec())
+ return 1;
+ return req->rq_receiving_reply;
+}
+
+static inline int
+ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
+{
+ int rc;
+
+ spin_lock(&req->rq_lock);
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
+ req->rq_reply_deadline > cfs_time_current_sec()) {
+ spin_unlock(&req->rq_lock);
+ return 1;
+ }
+ rc = req->rq_receiving_reply || req->rq_must_unlink;
+ spin_unlock(&req->rq_lock);
+ return rc;
+}
+
+static inline void
+ptlrpc_client_wake_req(struct ptlrpc_request *req)
+{
+ if (req->rq_set == NULL)
+ wake_up(&req->rq_reply_waitq);
+ else
+ wake_up(&req->rq_set->set_waitq);
+}
+
+static inline void
+ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
+{
+ LASSERT(atomic_read(&rs->rs_refcount) > 0);
+ atomic_inc(&rs->rs_refcount);
+}
+
+static inline void
+ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
+{
+ LASSERT(atomic_read(&rs->rs_refcount) > 0);
+ if (atomic_dec_and_test(&rs->rs_refcount))
+ lustre_free_reply_state(rs);
+}
+
+/* Should only be called once per req */
+static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
+{
+ if (req->rq_reply_state == NULL)
+ return; /* shouldn't occur */
+ ptlrpc_rs_decref(req->rq_reply_state);
+ req->rq_reply_state = NULL;
+ req->rq_repmsg = NULL;
+}
+
+static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
+{
+ return lustre_msg_get_magic(req->rq_reqmsg);
+}
+
+static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
+{
+ switch (req->rq_reqmsg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return req->rq_reqmsg->lm_repsize;
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n",
+ req->rq_reqmsg->lm_magic);
+ return -EFAULT;
+ }
+}
+
+static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
+{
+ if (req->rq_delay_limit != 0 &&
+ cfs_time_before(cfs_time_add(req->rq_queued_time,
+ cfs_time_seconds(req->rq_delay_limit)),
+ cfs_time_current())) {
+ return 1;
+ }
+ return 0;
+}
+
+static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
+{
+ if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
+ spin_lock(&req->rq_lock);
+ req->rq_no_resend = 1;
+ spin_unlock(&req->rq_lock);
+ }
+ return req->rq_no_resend;
+}
+
+static inline int
+ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
+{
+ int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
+
+ return svcpt->scp_service->srv_watchdog_factor *
+ max_t(int, at, obd_timeout);
+}
+
+static inline struct ptlrpc_service *
+ptlrpc_req2svc(struct ptlrpc_request *req)
+{
+ LASSERT(req->rq_rqbd != NULL);
+ return req->rq_rqbd->rqbd_svcpt->scp_service;
+}
+
+/* ldlm/ldlm_lib.c */
+/**
+ * Target client logic
+ * @{
+ */
+int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
+int client_obd_cleanup(struct obd_device *obddev);
+int client_connect_import(const struct lu_env *env,
+ struct obd_export **exp, struct obd_device *obd,
+ struct obd_uuid *cluuid, struct obd_connect_data *,
+ void *localdata);
+int client_disconnect_export(struct obd_export *exp);
+int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
+ int priority);
+int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
+int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
+ struct obd_uuid *uuid);
+int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
+void client_destroy_import(struct obd_import *imp);
+/** @} */
+
+
+/* ptlrpc/pinger.c */
+/**
+ * Pinger API (client side only)
+ * @{
+ */
+enum timeout_event {
+ TIMEOUT_GRANT = 1
+};
+struct timeout_item;
+typedef int (*timeout_cb_t)(struct timeout_item *, void *);
+int ptlrpc_pinger_add_import(struct obd_import *imp);
+int ptlrpc_pinger_del_import(struct obd_import *imp);
+int ptlrpc_add_timeout_client(int time, enum timeout_event event,
+ timeout_cb_t cb, void *data,
+ struct list_head *obd_list);
+int ptlrpc_del_timeout_client(struct list_head *obd_list,
+ enum timeout_event event);
+struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
+int ptlrpc_obd_ping(struct obd_device *obd);
+cfs_time_t ptlrpc_suspend_wakeup_time(void);
+void ping_evictor_start(void);
+void ping_evictor_stop(void);
+int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req);
+void ptlrpc_pinger_ir_up(void);
+void ptlrpc_pinger_ir_down(void);
+/** @} */
+int ptlrpc_pinger_suppress_pings(void);
+
+/* ptlrpc daemon bind policy */
+typedef enum {
+ /* all ptlrpcd threads are free mode */
+ PDB_POLICY_NONE = 1,
+ /* all ptlrpcd threads are bound mode */
+ PDB_POLICY_FULL = 2,
+ /* <free1 bound1> <free2 bound2> ... <freeN boundN> */
+ PDB_POLICY_PAIR = 3,
+ /* <free1 bound1> <bound1 free2> ... <freeN boundN> <boundN free1>,
+ * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1].
+ * If kernel supports NUMA, pthrpcd threads are binded and
+ * grouped by NUMA node */
+ PDB_POLICY_NEIGHBOR = 4,
+} pdb_policy_t;
+
+/* ptlrpc daemon load policy
+ * It is caller's duty to specify how to push the async RPC into some ptlrpcd
+ * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is
+ * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd,
+ * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner,
+ * depends on which is scheduled firstly, to accelerate the RPC processing. */
+typedef enum {
+ /* on the same CPU core as the caller */
+ PDL_POLICY_SAME = 1,
+ /* within the same CPU partition, but not the same core as the caller */
+ PDL_POLICY_LOCAL = 2,
+ /* round-robin on all CPU cores, but not the same core as the caller */
+ PDL_POLICY_ROUND = 3,
+ /* the specified CPU core is preferred, but not enforced */
+ PDL_POLICY_PREFERRED = 4,
+} pdl_policy_t;
+
+/* ptlrpc/ptlrpcd.c */
+void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
+void ptlrpcd_free(struct ptlrpcd_ctl *pc);
+void ptlrpcd_wake(struct ptlrpc_request *req);
+void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
+void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
+int ptlrpcd_addref(void);
+void ptlrpcd_decref(void);
+
+/* ptlrpc/lproc_ptlrpc.c */
+/**
+ * procfs output related functions
+ * @{
+ */
+const char* ll_opcode2str(__u32 opcode);
+#ifdef LPROCFS
+void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
+void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
+void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
+#else
+static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
+static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
+static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
+#endif
+/** @} */
+
+/* ptlrpc/llog_server.c */
+int llog_origin_handle_open(struct ptlrpc_request *req);
+int llog_origin_handle_destroy(struct ptlrpc_request *req);
+int llog_origin_handle_prev_block(struct ptlrpc_request *req);
+int llog_origin_handle_next_block(struct ptlrpc_request *req);
+int llog_origin_handle_read_header(struct ptlrpc_request *req);
+int llog_origin_handle_close(struct ptlrpc_request *req);
+int llog_origin_handle_cancel(struct ptlrpc_request *req);
+
+/* ptlrpc/llog_client.c */
+extern struct llog_operations llog_client_ops;
+
+/** @} net */
+
+#endif
+/** @} PtlRPC */
diff --git a/drivers/staging/lustre/lustre/include/lustre_param.h b/drivers/staging/lustre/lustre/include/lustre_param.h
new file mode 100644
index 0000000..ed65468
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_param.h
@@ -0,0 +1,121 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_param.h
+ *
+ * User-settable parameter keys
+ *
+ * Author: Nathan Rutman <nathan@clusterfs.com>
+ */
+
+#ifndef _LUSTRE_PARAM_H
+#define _LUSTRE_PARAM_H
+
+/** \defgroup param param
+ *
+ * @{
+ */
+
+/* For interoperability */
+struct cfg_interop_param {
+ char *old_param;
+ char *new_param;
+};
+
+/* obd_config.c */
+int class_find_param(char *buf, char *key, char **valp);
+struct cfg_interop_param *class_find_old_param(const char *param,
+ struct cfg_interop_param *ptr);
+int class_get_next_param(char **params, char *copy);
+int class_match_param(char *buf, char *key, char **valp);
+int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh);
+int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh);
+int class_parse_net(char *buf, __u32 *net, char **endh);
+int class_match_nid(char *buf, char *key, lnet_nid_t nid);
+int class_match_net(char *buf, char *key, __u32 net);
+/* obd_mount.c */
+int do_lcfg(char *cfgname, lnet_nid_t nid, int cmd,
+ char *s1, char *s2, char *s3, char *s4);
+
+
+
+/****************** User-settable parameter keys *********************/
+/* e.g.
+ tunefs.lustre --param="failover.node=192.168.0.13@tcp0" /dev/sda
+ lctl conf_param testfs-OST0000 failover.node=3@elan,192.168.0.3@tcp0
+ ... testfs-MDT0000.lov.stripesize=4M
+ ... testfs-OST0000.ost.client_cache_seconds=15
+ ... testfs.sys.timeout=<secs>
+ ... testfs.llite.max_read_ahead_mb=16
+*/
+
+/* System global or special params not handled in obd's proc
+ * See mgs_write_log_sys()
+ */
+#define PARAM_TIMEOUT "timeout=" /* global */
+#define PARAM_LDLM_TIMEOUT "ldlm_timeout=" /* global */
+#define PARAM_AT_MIN "at_min=" /* global */
+#define PARAM_AT_MAX "at_max=" /* global */
+#define PARAM_AT_EXTRA "at_extra=" /* global */
+#define PARAM_AT_EARLY_MARGIN "at_early_margin=" /* global */
+#define PARAM_AT_HISTORY "at_history=" /* global */
+#define PARAM_JOBID_VAR "jobid_var=" /* global */
+#define PARAM_MGSNODE "mgsnode=" /* only at mounttime */
+#define PARAM_FAILNODE "failover.node=" /* add failover nid */
+#define PARAM_FAILMODE "failover.mode=" /* initial mount only */
+#define PARAM_ACTIVE "active=" /* activate/deactivate */
+#define PARAM_NETWORK "network=" /* bind on nid */
+#define PARAM_ID_UPCALL "identity_upcall=" /* identity upcall */
+
+/* Prefixes for parameters handled by obd's proc methods (XXX_process_config) */
+#define PARAM_OST "ost."
+#define PARAM_OSC "osc."
+#define PARAM_MDT "mdt."
+#define PARAM_MDD "mdd."
+#define PARAM_MDC "mdc."
+#define PARAM_LLITE "llite."
+#define PARAM_LOV "lov."
+#define PARAM_LOD "lod."
+#define PARAM_OSP "osp."
+#define PARAM_SYS "sys." /* global */
+#define PARAM_SRPC "srpc."
+#define PARAM_SRPC_FLVR "srpc.flavor."
+#define PARAM_SRPC_UDESC "srpc.udesc.cli2mdt"
+#define PARAM_SEC "security."
+#define PARAM_QUOTA "quota." /* global */
+
+/** @} param */
+
+#endif /* _LUSTRE_PARAM_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_quota.h b/drivers/staging/lustre/lustre/include/lustre_quota.h
new file mode 100644
index 0000000..71b5d97
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_quota.h
@@ -0,0 +1,239 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ * Use is subject to license terms.
+ */
+
+#ifndef _LUSTRE_QUOTA_H
+#define _LUSTRE_QUOTA_H
+
+/** \defgroup quota quota
+ *
+ */
+
+#include <linux/lustre_quota.h>
+
+#include <dt_object.h>
+#include <lustre_fid.h>
+#include <lustre_dlm.h>
+
+#ifndef MAX_IQ_TIME
+#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */
+#endif
+
+#ifndef MAX_DQ_TIME
+#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */
+#endif
+
+struct lquota_id_info;
+struct lquota_trans;
+
+/* Gather all quota record type in an union that can be used to read any records
+ * from disk. All fields of these records must be 64-bit aligned, otherwise the
+ * OSD layer may swab them incorrectly. */
+union lquota_rec {
+ struct lquota_glb_rec lqr_glb_rec;
+ struct lquota_slv_rec lqr_slv_rec;
+ struct lquota_acct_rec lqr_acct_rec;
+};
+
+/* Index features supported by the global index objects
+ * Only used for migration purpose and should be removed once on-disk migration
+ * is no longer needed */
+extern struct dt_index_features dt_quota_iusr_features;
+extern struct dt_index_features dt_quota_busr_features;
+extern struct dt_index_features dt_quota_igrp_features;
+extern struct dt_index_features dt_quota_bgrp_features;
+
+/* Name used in the configuration logs to identify the default metadata pool
+ * (composed of all the MDTs, with pool ID 0) and the default data pool (all
+ * the OSTs, with pool ID 0 too). */
+#define QUOTA_METAPOOL_NAME "mdt="
+#define QUOTA_DATAPOOL_NAME "ost="
+
+/*
+ * Quota Master Target support
+ */
+
+/* Request handlers for quota master operations.
+ * This is used by the MDT to pass quota/lock requests to the quota master
+ * target. This won't be needed any more once the QMT is a real target and
+ * does not rely any more on the MDT service threads and namespace. */
+struct qmt_handlers {
+ /* Handle quotactl request from client. */
+ int (*qmth_quotactl)(const struct lu_env *, struct lu_device *,
+ struct obd_quotactl *);
+
+ /* Handle dqacq/dqrel request from slave. */
+ int (*qmth_dqacq)(const struct lu_env *, struct lu_device *,
+ struct ptlrpc_request *);
+
+ /* LDLM intent policy associated with quota locks */
+ int (*qmth_intent_policy)(const struct lu_env *, struct lu_device *,
+ struct ptlrpc_request *, struct ldlm_lock **,
+ int);
+
+ /* Initialize LVB of ldlm resource associated with quota objects */
+ int (*qmth_lvbo_init)(struct lu_device *, struct ldlm_resource *);
+
+ /* Update LVB of ldlm resource associated with quota objects */
+ int (*qmth_lvbo_update)(struct lu_device *, struct ldlm_resource *,
+ struct ptlrpc_request *, int);
+
+ /* Return size of LVB to be packed in ldlm message */
+ int (*qmth_lvbo_size)(struct lu_device *, struct ldlm_lock *);
+
+ /* Fill request buffer with lvb */
+ int (*qmth_lvbo_fill)(struct lu_device *, struct ldlm_lock *, void *,
+ int);
+
+ /* Free lvb associated with ldlm resource */
+ int (*qmth_lvbo_free)(struct lu_device *, struct ldlm_resource *);
+};
+
+/* actual handlers are defined in lustre/quota/qmt_handler.c */
+extern struct qmt_handlers qmt_hdls;
+
+/*
+ * Quota enforcement support on slaves
+ */
+
+struct qsd_instance;
+
+/* The quota slave feature is implemented under the form of a library.
+ * The API is the following:
+ *
+ * - qsd_init(): the user (mostly the OSD layer) should first allocate a qsd
+ * instance via qsd_init(). This creates all required structures
+ * to manage quota enforcement for this target and performs all
+ * low-level initialization which does not involve any lustre
+ * object. qsd_init() should typically be called when the OSD
+ * is being set up.
+ *
+ * - qsd_prepare(): This sets up on-disk objects associated with the quota slave
+ * feature and initiates the quota reintegration procedure if
+ * needed. qsd_prepare() should typically be called when
+ * ->ldo_prepare is invoked.
+ *
+ * - qsd_start(): a qsd instance should be started once recovery is completed
+ * (i.e. when ->ldo_recovery_complete is called). This is used
+ * to notify the qsd layer that quota should now be enforced
+ * again via the qsd_op_begin/end functions. The last step of the
+ * reintegration prodecure (namely usage reconciliation) will be
+ * completed during start.
+ *
+ * - qsd_fini(): is used to release a qsd_instance structure allocated with
+ * qsd_init(). This releases all quota slave objects and frees the
+ * structures associated with the qsd_instance.
+ *
+ * - qsd_op_begin(): is used to enforce quota, it must be called in the
+ * declaration of each operation. qsd_op_end() should then be
+ * invoked later once all operations have been completed in
+ * order to release/adjust the quota space.
+ * Running qsd_op_begin() before qsd_start() isn't fatal and
+ * will return success.
+ * Once qsd_start() has been run, qsd_op_begin() will block
+ * until the reintegration procedure is completed.
+ *
+ * - qsd_op_end(): performs the post operation quota processing. This must be
+ * called after the operation transaction stopped.
+ * While qsd_op_begin() must be invoked each time a new
+ * operation is declared, qsd_op_end() should be called only
+ * once for the whole transaction.
+ *
+ * - qsd_op_adjust(): triggers pre-acquire/release if necessary.
+ *
+ * Below are the function prototypes to be used by OSD layer to manage quota
+ * enforcement. Arguments are documented where each function is defined. */
+
+struct qsd_instance *qsd_init(const struct lu_env *, char *, struct dt_device *,
+ struct proc_dir_entry *);
+int qsd_prepare(const struct lu_env *, struct qsd_instance *);
+int qsd_start(const struct lu_env *, struct qsd_instance *);
+void qsd_fini(const struct lu_env *, struct qsd_instance *);
+int qsd_op_begin(const struct lu_env *, struct qsd_instance *,
+ struct lquota_trans *, struct lquota_id_info *, int *);
+void qsd_op_end(const struct lu_env *, struct qsd_instance *,
+ struct lquota_trans *);
+void qsd_op_adjust(const struct lu_env *, struct qsd_instance *,
+ union lquota_id *, int);
+/* This is exported for the ldiskfs quota migration only,
+ * see convert_quota_file() */
+int lquota_disk_write_glb(const struct lu_env *, struct dt_object *,
+ __u64, struct lquota_glb_rec *);
+
+/*
+ * Quota information attached to a transaction
+ */
+
+struct lquota_entry;
+
+struct lquota_id_info {
+ /* quota identifier */
+ union lquota_id lqi_id;
+
+ /* USRQUOTA or GRPQUOTA for now, could be expanded for
+ * directory quota or other types later. */
+ int lqi_type;
+
+ /* inodes or kbytes to be consumed or released, it could
+ * be negative when releasing space. */
+ long long lqi_space;
+
+ /* quota slave entry structure associated with this ID */
+ struct lquota_entry *lqi_qentry;
+
+ /* whether we are reporting blocks or inodes */
+ bool lqi_is_blk;
+};
+
+/* Since we enforce only inode quota in meta pool (MDTs), and block quota in
+ * data pool (OSTs), there are at most 4 quota ids being enforced in a single
+ * transaction, which is chown transaction:
+ * original uid and gid, new uid and gid.
+ *
+ * This value might need to be revised when directory quota is added. */
+#define QUOTA_MAX_TRANSIDS 4
+
+/* all qids involved in a single transaction */
+struct lquota_trans {
+ unsigned short lqt_id_cnt;
+ struct lquota_id_info lqt_ids[QUOTA_MAX_TRANSIDS];
+};
+
+/* flags for quota local enforcement */
+#define QUOTA_FL_OVER_USRQUOTA 0x01
+#define QUOTA_FL_OVER_GRPQUOTA 0x02
+#define QUOTA_FL_SYNC 0x04
+
+#define IS_LQUOTA_RES(res) \
+ (res->lr_name.name[LUSTRE_RES_ID_SEQ_OFF] == FID_SEQ_QUOTA || \
+ res->lr_name.name[LUSTRE_RES_ID_SEQ_OFF] == FID_SEQ_QUOTA_GLB)
+
+/* helper function used by MDT & OFD to retrieve quota accounting information
+ * on slave */
+int lquotactl_slv(const struct lu_env *, struct dt_device *,
+ struct obd_quotactl *);
+/** @} quota */
+#endif /* _LUSTRE_QUOTA_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
new file mode 100644
index 0000000..f4d3820
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -0,0 +1,334 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lustre_req_layout.h
+ *
+ * Lustre Metadata Target (mdt) request handler
+ *
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ */
+
+#ifndef _LUSTRE_REQ_LAYOUT_H__
+#define _LUSTRE_REQ_LAYOUT_H__
+
+/** \defgroup req_layout req_layout
+ *
+ * @{
+ */
+
+struct req_msg_field;
+struct req_format;
+struct req_capsule;
+
+struct ptlrpc_request;
+
+enum req_location {
+ RCL_CLIENT,
+ RCL_SERVER,
+ RCL_NR
+};
+
+/* Maximal number of fields (buffers) in a request message. */
+#define REQ_MAX_FIELD_NR 9
+
+struct req_capsule {
+ struct ptlrpc_request *rc_req;
+ const struct req_format *rc_fmt;
+ enum req_location rc_loc;
+ __u32 rc_area[RCL_NR][REQ_MAX_FIELD_NR];
+};
+
+#if !defined(__REQ_LAYOUT_USER__)
+
+/* struct ptlrpc_request, lustre_msg* */
+#include <lustre_net.h>
+
+void req_capsule_init(struct req_capsule *pill, struct ptlrpc_request *req,
+ enum req_location location);
+void req_capsule_fini(struct req_capsule *pill);
+
+void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt);
+void req_capsule_client_dump(struct req_capsule *pill);
+void req_capsule_server_dump(struct req_capsule *pill);
+void req_capsule_init_area(struct req_capsule *pill);
+int req_capsule_filled_sizes(struct req_capsule *pill, enum req_location loc);
+int req_capsule_server_pack(struct req_capsule *pill);
+
+void *req_capsule_client_get(struct req_capsule *pill,
+ const struct req_msg_field *field);
+void *req_capsule_client_swab_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ void *swabber);
+void *req_capsule_client_sized_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ int len);
+void *req_capsule_server_get(struct req_capsule *pill,
+ const struct req_msg_field *field);
+void *req_capsule_server_sized_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ int len);
+void *req_capsule_server_swab_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ void *swabber);
+void *req_capsule_server_sized_swab_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ int len, void *swabber);
+const void *req_capsule_other_get(struct req_capsule *pill,
+ const struct req_msg_field *field);
+
+void req_capsule_set_size(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc, int size);
+int req_capsule_get_size(const struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc);
+int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc);
+int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
+ enum req_location loc);
+void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt);
+
+int req_capsule_has_field(const struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc);
+int req_capsule_field_present(const struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc);
+void req_capsule_shrink(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ unsigned int newlen,
+ enum req_location loc);
+int req_capsule_server_grow(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ unsigned int newlen);
+int req_layout_init(void);
+void req_layout_fini(void);
+
+/* __REQ_LAYOUT_USER__ */
+#endif
+
+extern struct req_format RQF_OBD_PING;
+extern struct req_format RQF_OBD_SET_INFO;
+extern struct req_format RQF_SEC_CTX;
+extern struct req_format RQF_OBD_IDX_READ;
+/* MGS req_format */
+extern struct req_format RQF_MGS_TARGET_REG;
+extern struct req_format RQF_MGS_SET_INFO;
+extern struct req_format RQF_MGS_CONFIG_READ;
+/* fid/fld req_format */
+extern struct req_format RQF_SEQ_QUERY;
+extern struct req_format RQF_FLD_QUERY;
+/* MDS req_format */
+extern struct req_format RQF_MDS_CONNECT;
+extern struct req_format RQF_MDS_DISCONNECT;
+extern struct req_format RQF_MDS_STATFS;
+extern struct req_format RQF_MDS_GETSTATUS;
+extern struct req_format RQF_MDS_SYNC;
+extern struct req_format RQF_MDS_GETXATTR;
+extern struct req_format RQF_MDS_GETATTR;
+extern struct req_format RQF_UPDATE_OBJ;
+
+/*
+ * This is format of direct (non-intent) MDS_GETATTR_NAME request.
+ */
+extern struct req_format RQF_MDS_GETATTR_NAME;
+extern struct req_format RQF_MDS_CLOSE;
+extern struct req_format RQF_MDS_PIN;
+extern struct req_format RQF_MDS_UNPIN;
+extern struct req_format RQF_MDS_CONNECT;
+extern struct req_format RQF_MDS_DISCONNECT;
+extern struct req_format RQF_MDS_GET_INFO;
+extern struct req_format RQF_MDS_READPAGE;
+extern struct req_format RQF_MDS_WRITEPAGE;
+extern struct req_format RQF_MDS_IS_SUBDIR;
+extern struct req_format RQF_MDS_DONE_WRITING;
+extern struct req_format RQF_MDS_REINT;
+extern struct req_format RQF_MDS_REINT_CREATE;
+extern struct req_format RQF_MDS_REINT_CREATE_RMT_ACL;
+extern struct req_format RQF_MDS_REINT_CREATE_SLAVE;
+extern struct req_format RQF_MDS_REINT_CREATE_SYM;
+extern struct req_format RQF_MDS_REINT_OPEN;
+extern struct req_format RQF_MDS_REINT_UNLINK;
+extern struct req_format RQF_MDS_REINT_LINK;
+extern struct req_format RQF_MDS_REINT_RENAME;
+extern struct req_format RQF_MDS_REINT_SETATTR;
+extern struct req_format RQF_MDS_REINT_SETXATTR;
+extern struct req_format RQF_MDS_QUOTACHECK;
+extern struct req_format RQF_MDS_QUOTACTL;
+extern struct req_format RQF_QC_CALLBACK;
+extern struct req_format RQF_QUOTA_DQACQ;
+extern struct req_format RQF_MDS_SWAP_LAYOUTS;
+/* MDS hsm formats */
+extern struct req_format RQF_MDS_HSM_STATE_GET;
+extern struct req_format RQF_MDS_HSM_STATE_SET;
+extern struct req_format RQF_MDS_HSM_ACTION;
+extern struct req_format RQF_MDS_HSM_PROGRESS;
+extern struct req_format RQF_MDS_HSM_CT_REGISTER;
+extern struct req_format RQF_MDS_HSM_CT_UNREGISTER;
+extern struct req_format RQF_MDS_HSM_REQUEST;
+/* OST req_format */
+extern struct req_format RQF_OST_CONNECT;
+extern struct req_format RQF_OST_DISCONNECT;
+extern struct req_format RQF_OST_QUOTACHECK;
+extern struct req_format RQF_OST_QUOTACTL;
+extern struct req_format RQF_OST_GETATTR;
+extern struct req_format RQF_OST_SETATTR;
+extern struct req_format RQF_OST_CREATE;
+extern struct req_format RQF_OST_PUNCH;
+extern struct req_format RQF_OST_SYNC;
+extern struct req_format RQF_OST_DESTROY;
+extern struct req_format RQF_OST_BRW_READ;
+extern struct req_format RQF_OST_BRW_WRITE;
+extern struct req_format RQF_OST_STATFS;
+extern struct req_format RQF_OST_SET_GRANT_INFO;
+extern struct req_format RQF_OST_GET_INFO_GENERIC;
+extern struct req_format RQF_OST_GET_INFO_LAST_ID;
+extern struct req_format RQF_OST_GET_INFO_LAST_FID;
+extern struct req_format RQF_OST_SET_INFO_LAST_FID;
+extern struct req_format RQF_OST_GET_INFO_FIEMAP;
+
+/* LDLM req_format */
+extern struct req_format RQF_LDLM_ENQUEUE;
+extern struct req_format RQF_LDLM_ENQUEUE_LVB;
+extern struct req_format RQF_LDLM_CONVERT;
+extern struct req_format RQF_LDLM_INTENT;
+extern struct req_format RQF_LDLM_INTENT_BASIC;
+extern struct req_format RQF_LDLM_INTENT_LAYOUT;
+extern struct req_format RQF_LDLM_INTENT_GETATTR;
+extern struct req_format RQF_LDLM_INTENT_OPEN;
+extern struct req_format RQF_LDLM_INTENT_CREATE;
+extern struct req_format RQF_LDLM_INTENT_UNLINK;
+extern struct req_format RQF_LDLM_INTENT_QUOTA;
+extern struct req_format RQF_LDLM_CANCEL;
+extern struct req_format RQF_LDLM_CALLBACK;
+extern struct req_format RQF_LDLM_CP_CALLBACK;
+extern struct req_format RQF_LDLM_BL_CALLBACK;
+extern struct req_format RQF_LDLM_GL_CALLBACK;
+extern struct req_format RQF_LDLM_GL_DESC_CALLBACK;
+/* LOG req_format */
+extern struct req_format RQF_LOG_CANCEL;
+extern struct req_format RQF_LLOG_ORIGIN_HANDLE_CREATE;
+extern struct req_format RQF_LLOG_ORIGIN_HANDLE_DESTROY;
+extern struct req_format RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK;
+extern struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK;
+extern struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER;
+extern struct req_format RQF_LLOG_ORIGIN_CONNECT;
+
+extern struct req_msg_field RMF_GENERIC_DATA;
+extern struct req_msg_field RMF_PTLRPC_BODY;
+extern struct req_msg_field RMF_MDT_BODY;
+extern struct req_msg_field RMF_MDT_EPOCH;
+extern struct req_msg_field RMF_OBD_STATFS;
+extern struct req_msg_field RMF_NAME;
+extern struct req_msg_field RMF_SYMTGT;
+extern struct req_msg_field RMF_TGTUUID;
+extern struct req_msg_field RMF_CLUUID;
+extern struct req_msg_field RMF_SETINFO_VAL;
+extern struct req_msg_field RMF_SETINFO_KEY;
+extern struct req_msg_field RMF_GETINFO_VAL;
+extern struct req_msg_field RMF_GETINFO_VALLEN;
+extern struct req_msg_field RMF_GETINFO_KEY;
+extern struct req_msg_field RMF_IDX_INFO;
+
+/*
+ * connection handle received in MDS_CONNECT request.
+ */
+extern struct req_msg_field RMF_CONN;
+extern struct req_msg_field RMF_CONNECT_DATA;
+extern struct req_msg_field RMF_DLM_REQ;
+extern struct req_msg_field RMF_DLM_REP;
+extern struct req_msg_field RMF_DLM_LVB;
+extern struct req_msg_field RMF_DLM_GL_DESC;
+extern struct req_msg_field RMF_LDLM_INTENT;
+extern struct req_msg_field RMF_LAYOUT_INTENT;
+extern struct req_msg_field RMF_MDT_MD;
+extern struct req_msg_field RMF_REC_REINT;
+extern struct req_msg_field RMF_EADATA;
+extern struct req_msg_field RMF_ACL;
+extern struct req_msg_field RMF_LOGCOOKIES;
+extern struct req_msg_field RMF_CAPA1;
+extern struct req_msg_field RMF_CAPA2;
+extern struct req_msg_field RMF_OBD_QUOTACHECK;
+extern struct req_msg_field RMF_OBD_QUOTACTL;
+extern struct req_msg_field RMF_QUOTA_BODY;
+extern struct req_msg_field RMF_STRING;
+extern struct req_msg_field RMF_SWAP_LAYOUTS;
+extern struct req_msg_field RMF_MDS_HSM_PROGRESS;
+extern struct req_msg_field RMF_MDS_HSM_REQUEST;
+extern struct req_msg_field RMF_MDS_HSM_USER_ITEM;
+extern struct req_msg_field RMF_MDS_HSM_ARCHIVE;
+extern struct req_msg_field RMF_HSM_USER_STATE;
+extern struct req_msg_field RMF_HSM_STATE_SET;
+extern struct req_msg_field RMF_MDS_HSM_CURRENT_ACTION;
+extern struct req_msg_field RMF_MDS_HSM_REQUEST;
+
+/* seq-mgr fields */
+extern struct req_msg_field RMF_SEQ_OPC;
+extern struct req_msg_field RMF_SEQ_RANGE;
+extern struct req_msg_field RMF_FID_SPACE;
+
+/* FLD fields */
+extern struct req_msg_field RMF_FLD_OPC;
+extern struct req_msg_field RMF_FLD_MDFLD;
+
+extern struct req_msg_field RMF_LLOGD_BODY;
+extern struct req_msg_field RMF_LLOG_LOG_HDR;
+extern struct req_msg_field RMF_LLOGD_CONN_BODY;
+
+extern struct req_msg_field RMF_MGS_TARGET_INFO;
+extern struct req_msg_field RMF_MGS_SEND_PARAM;
+
+extern struct req_msg_field RMF_OST_BODY;
+extern struct req_msg_field RMF_OBD_IOOBJ;
+extern struct req_msg_field RMF_OBD_ID;
+extern struct req_msg_field RMF_FID;
+extern struct req_msg_field RMF_NIOBUF_REMOTE;
+extern struct req_msg_field RMF_RCS;
+extern struct req_msg_field RMF_FIEMAP_KEY;
+extern struct req_msg_field RMF_FIEMAP_VAL;
+extern struct req_msg_field RMF_OST_ID;
+
+/* MGS config read message format */
+extern struct req_msg_field RMF_MGS_CONFIG_BODY;
+extern struct req_msg_field RMF_MGS_CONFIG_RES;
+
+/* generic uint32 */
+extern struct req_msg_field RMF_U32;
+
+/* OBJ update format */
+extern struct req_msg_field RMF_UPDATE;
+extern struct req_msg_field RMF_UPDATE_REPLY;
+/** @} req_layout */
+
+#endif /* _LUSTRE_REQ_LAYOUT_H__ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
new file mode 100644
index 0000000..70b8b13
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -0,0 +1,1145 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LUSTRE_SEC_H_
+#define _LUSTRE_SEC_H_
+
+/** \defgroup sptlrpc sptlrpc
+ *
+ * @{
+ */
+
+/*
+ * to avoid include
+ */
+struct obd_import;
+struct obd_export;
+struct ptlrpc_request;
+struct ptlrpc_reply_state;
+struct ptlrpc_bulk_desc;
+struct brw_page;
+/* Linux specific */
+struct key;
+struct seq_file;
+
+/*
+ * forward declaration
+ */
+struct ptlrpc_sec_policy;
+struct ptlrpc_sec_cops;
+struct ptlrpc_sec_sops;
+struct ptlrpc_sec;
+struct ptlrpc_svc_ctx;
+struct ptlrpc_cli_ctx;
+struct ptlrpc_ctx_ops;
+
+/**
+ * \addtogroup flavor flavor
+ *
+ * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
+ * are unused, must be set to 0 for future expansion.
+ * <pre>
+ * ------------------------------------------------------------------------
+ * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
+ * ------------------------------------------------------------------------
+ * </pre>
+ *
+ * @{
+ */
+
+/*
+ * flavor constants
+ */
+enum sptlrpc_policy {
+ SPTLRPC_POLICY_NULL = 0,
+ SPTLRPC_POLICY_PLAIN = 1,
+ SPTLRPC_POLICY_GSS = 2,
+ SPTLRPC_POLICY_MAX,
+};
+
+enum sptlrpc_mech_null {
+ SPTLRPC_MECH_NULL = 0,
+ SPTLRPC_MECH_NULL_MAX,
+};
+
+enum sptlrpc_mech_plain {
+ SPTLRPC_MECH_PLAIN = 0,
+ SPTLRPC_MECH_PLAIN_MAX,
+};
+
+enum sptlrpc_mech_gss {
+ SPTLRPC_MECH_GSS_NULL = 0,
+ SPTLRPC_MECH_GSS_KRB5 = 1,
+ SPTLRPC_MECH_GSS_MAX,
+};
+
+enum sptlrpc_service_type {
+ SPTLRPC_SVC_NULL = 0, /**< no security */
+ SPTLRPC_SVC_AUTH = 1, /**< authentication only */
+ SPTLRPC_SVC_INTG = 2, /**< integrity */
+ SPTLRPC_SVC_PRIV = 3, /**< privacy */
+ SPTLRPC_SVC_MAX,
+};
+
+enum sptlrpc_bulk_type {
+ SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
+ SPTLRPC_BULK_HASH = 1, /**< hash integrity */
+ SPTLRPC_BULK_MAX,
+};
+
+enum sptlrpc_bulk_service {
+ SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
+ SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
+ SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
+ SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
+ SPTLRPC_BULK_SVC_MAX,
+};
+
+/*
+ * compose/extract macros
+ */
+#define FLVR_POLICY_OFFSET (0)
+#define FLVR_MECH_OFFSET (4)
+#define FLVR_SVC_OFFSET (8)
+#define FLVR_BULK_TYPE_OFFSET (12)
+#define FLVR_BULK_SVC_OFFSET (16)
+
+#define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
+ (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
+ ((__u32)(mech) << FLVR_MECH_OFFSET) | \
+ ((__u32)(svc) << FLVR_SVC_OFFSET) | \
+ ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
+ ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
+
+/*
+ * extraction
+ */
+#define SPTLRPC_FLVR_POLICY(flavor) \
+ ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
+#define SPTLRPC_FLVR_MECH(flavor) \
+ ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
+#define SPTLRPC_FLVR_SVC(flavor) \
+ ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
+#define SPTLRPC_FLVR_BULK_TYPE(flavor) \
+ ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
+#define SPTLRPC_FLVR_BULK_SVC(flavor) \
+ ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
+
+#define SPTLRPC_FLVR_BASE(flavor) \
+ ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
+#define SPTLRPC_FLVR_BASE_SUB(flavor) \
+ ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
+
+/*
+ * gss subflavors
+ */
+#define MAKE_BASE_SUBFLVR(mech, svc) \
+ ((__u32)(mech) | \
+ ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
+
+#define SPTLRPC_SUBFLVR_KRB5N \
+ MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
+#define SPTLRPC_SUBFLVR_KRB5A \
+ MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
+#define SPTLRPC_SUBFLVR_KRB5I \
+ MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
+#define SPTLRPC_SUBFLVR_KRB5P \
+ MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
+
+/*
+ * "end user" flavors
+ */
+#define SPTLRPC_FLVR_NULL \
+ MAKE_FLVR(SPTLRPC_POLICY_NULL, \
+ SPTLRPC_MECH_NULL, \
+ SPTLRPC_SVC_NULL, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_NULL)
+#define SPTLRPC_FLVR_PLAIN \
+ MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
+ SPTLRPC_MECH_PLAIN, \
+ SPTLRPC_SVC_NULL, \
+ SPTLRPC_BULK_HASH, \
+ SPTLRPC_BULK_SVC_INTG)
+#define SPTLRPC_FLVR_KRB5N \
+ MAKE_FLVR(SPTLRPC_POLICY_GSS, \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_NULL, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_NULL)
+#define SPTLRPC_FLVR_KRB5A \
+ MAKE_FLVR(SPTLRPC_POLICY_GSS, \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_AUTH, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_NULL)
+#define SPTLRPC_FLVR_KRB5I \
+ MAKE_FLVR(SPTLRPC_POLICY_GSS, \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_INTG, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_INTG)
+#define SPTLRPC_FLVR_KRB5P \
+ MAKE_FLVR(SPTLRPC_POLICY_GSS, \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_PRIV, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_PRIV)
+
+#define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
+
+#define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
+#define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
+
+/**
+ * extract the useful part from wire flavor
+ */
+#define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
+
+/** @} flavor */
+
+static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
+{
+ LASSERT(svc < SPTLRPC_SVC_MAX);
+ *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
+ SPTLRPC_FLVR_MECH(*flvr),
+ svc,
+ SPTLRPC_FLVR_BULK_TYPE(*flvr),
+ SPTLRPC_FLVR_BULK_SVC(*flvr));
+}
+
+static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
+{
+ LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
+ *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
+ SPTLRPC_FLVR_MECH(*flvr),
+ SPTLRPC_FLVR_SVC(*flvr),
+ SPTLRPC_FLVR_BULK_TYPE(*flvr),
+ svc);
+}
+
+struct bulk_spec_hash {
+ __u8 hash_alg;
+};
+
+/**
+ * Full description of flavors being used on a ptlrpc connection, include
+ * both regular RPC and bulk transfer parts.
+ */
+struct sptlrpc_flavor {
+ /**
+ * wire flavor, should be renamed to sf_wire.
+ */
+ __u32 sf_rpc;
+ /**
+ * general flags of PTLRPC_SEC_FL_*
+ */
+ __u32 sf_flags;
+ /**
+ * rpc flavor specification
+ */
+ union {
+ /* nothing for now */
+ } u_rpc;
+ /**
+ * bulk flavor specification
+ */
+ union {
+ struct bulk_spec_hash hash;
+ } u_bulk;
+};
+
+/**
+ * identify the RPC is generated from what part of Lustre. It's encoded into
+ * RPC requests and to be checked by ptlrpc service.
+ */
+enum lustre_sec_part {
+ LUSTRE_SP_CLI = 0,
+ LUSTRE_SP_MDT,
+ LUSTRE_SP_OST,
+ LUSTRE_SP_MGC,
+ LUSTRE_SP_MGS,
+ LUSTRE_SP_ANY = 0xFF
+};
+
+const char *sptlrpc_part2name(enum lustre_sec_part sp);
+enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
+
+/**
+ * A rule specifies a flavor to be used by a ptlrpc connection between
+ * two Lustre parts.
+ */
+struct sptlrpc_rule {
+ __u32 sr_netid; /* LNET network ID */
+ __u8 sr_from; /* sec_part */
+ __u8 sr_to; /* sec_part */
+ __u16 sr_padding;
+ struct sptlrpc_flavor sr_flvr;
+};
+
+/**
+ * A set of rules in memory.
+ *
+ * Rules are generated and stored on MGS, and propagated to MDT, OST,
+ * and client when needed.
+ */
+struct sptlrpc_rule_set {
+ int srs_nslot;
+ int srs_nrule;
+ struct sptlrpc_rule *srs_rules;
+};
+
+int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
+int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
+
+static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
+{
+ memset(set, 0, sizeof(*set));
+}
+
+void sptlrpc_rule_set_free(struct sptlrpc_rule_set *set);
+int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set);
+int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *set,
+ struct sptlrpc_rule *rule);
+int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
+ enum lustre_sec_part from,
+ enum lustre_sec_part to,
+ lnet_nid_t nid,
+ struct sptlrpc_flavor *sf);
+void sptlrpc_rule_set_dump(struct sptlrpc_rule_set *set);
+
+int sptlrpc_process_config(struct lustre_cfg *lcfg);
+void sptlrpc_conf_log_start(const char *logname);
+void sptlrpc_conf_log_stop(const char *logname);
+void sptlrpc_conf_log_update_begin(const char *logname);
+void sptlrpc_conf_log_update_end(const char *logname);
+void sptlrpc_conf_client_adapt(struct obd_device *obd);
+int sptlrpc_conf_target_get_rules(struct obd_device *obd,
+ struct sptlrpc_rule_set *rset,
+ int initial);
+void sptlrpc_target_choose_flavor(struct sptlrpc_rule_set *rset,
+ enum lustre_sec_part from,
+ lnet_nid_t nid,
+ struct sptlrpc_flavor *flavor);
+
+/* The maximum length of security payload. 1024 is enough for Kerberos 5,
+ * and should be enough for other future mechanisms but not sure.
+ * Only used by pre-allocated request/reply pool.
+ */
+#define SPTLRPC_MAX_PAYLOAD (1024)
+
+
+struct vfs_cred {
+ uint32_t vc_uid;
+ uint32_t vc_gid;
+};
+
+struct ptlrpc_ctx_ops {
+ /**
+ * To determine whether it's suitable to use the \a ctx for \a vcred.
+ */
+ int (*match) (struct ptlrpc_cli_ctx *ctx,
+ struct vfs_cred *vcred);
+
+ /**
+ * To bring the \a ctx uptodate.
+ */
+ int (*refresh) (struct ptlrpc_cli_ctx *ctx);
+
+ /**
+ * Validate the \a ctx.
+ */
+ int (*validate) (struct ptlrpc_cli_ctx *ctx);
+
+ /**
+ * Force the \a ctx to die.
+ */
+ void (*die) (struct ptlrpc_cli_ctx *ctx,
+ int grace);
+ int (*display) (struct ptlrpc_cli_ctx *ctx,
+ char *buf, int bufsize);
+
+ /**
+ * Sign the request message using \a ctx.
+ *
+ * \pre req->rq_reqmsg point to request message.
+ * \pre req->rq_reqlen is the request message length.
+ * \post req->rq_reqbuf point to request message with signature.
+ * \post req->rq_reqdata_len is set to the final request message size.
+ *
+ * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
+ */
+ int (*sign) (struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req);
+
+ /**
+ * Verify the reply message using \a ctx.
+ *
+ * \pre req->rq_repdata point to reply message with signature.
+ * \pre req->rq_repdata_len is the total reply message length.
+ * \post req->rq_repmsg point to reply message without signature.
+ * \post req->rq_replen is the reply message length.
+ *
+ * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
+ */
+ int (*verify) (struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req);
+
+ /**
+ * Encrypt the request message using \a ctx.
+ *
+ * \pre req->rq_reqmsg point to request message in clear text.
+ * \pre req->rq_reqlen is the request message length.
+ * \post req->rq_reqbuf point to request message.
+ * \post req->rq_reqdata_len is set to the final request message size.
+ *
+ * \see gss_cli_ctx_seal().
+ */
+ int (*seal) (struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req);
+
+ /**
+ * Decrypt the reply message using \a ctx.
+ *
+ * \pre req->rq_repdata point to encrypted reply message.
+ * \pre req->rq_repdata_len is the total cipher text length.
+ * \post req->rq_repmsg point to reply message in clear text.
+ * \post req->rq_replen is the reply message length in clear text.
+ *
+ * \see gss_cli_ctx_unseal().
+ */
+ int (*unseal) (struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req);
+
+ /**
+ * Wrap bulk request data. This is called before wrapping RPC
+ * request message.
+ *
+ * \pre bulk buffer is descripted by desc->bd_iov and
+ * desc->bd_iov_count. note for read it's just buffer, no data
+ * need to be sent; for write it contains data in clear text.
+ * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
+ * (usually inside of RPC request message).
+ * - encryption: cipher text bulk buffer is descripted by
+ * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
+ * count remains the same).
+ * - otherwise: bulk buffer is still desc->bd_iov and
+ * desc->bd_iov_count.
+ *
+ * \return 0: success.
+ * \return -ev: error code.
+ *
+ * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
+ */
+ int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+
+ /**
+ * Unwrap bulk reply data. This is called after wrapping RPC
+ * reply message.
+ *
+ * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
+ * desc->bd_iov_count, according to wrap_bulk().
+ * \post final bulk data in clear text is placed in buffer described
+ * by desc->bd_iov and desc->bd_iov_count.
+ * \return +ve nob of actual bulk data in clear text.
+ * \return -ve error code.
+ *
+ * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
+ */
+ int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+};
+
+#define PTLRPC_CTX_NEW_BIT (0) /* newly created */
+#define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
+#define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
+#define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
+#define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
+#define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
+
+#define PTLRPC_CTX_NEW (1 << PTLRPC_CTX_NEW_BIT)
+#define PTLRPC_CTX_UPTODATE (1 << PTLRPC_CTX_UPTODATE_BIT)
+#define PTLRPC_CTX_DEAD (1 << PTLRPC_CTX_DEAD_BIT)
+#define PTLRPC_CTX_ERROR (1 << PTLRPC_CTX_ERROR_BIT)
+#define PTLRPC_CTX_CACHED (1 << PTLRPC_CTX_CACHED_BIT)
+#define PTLRPC_CTX_ETERNAL (1 << PTLRPC_CTX_ETERNAL_BIT)
+
+#define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_NEW_BIT | \
+ PTLRPC_CTX_UPTODATE | \
+ PTLRPC_CTX_DEAD | \
+ PTLRPC_CTX_ERROR)
+
+struct ptlrpc_cli_ctx {
+ struct hlist_node cc_cache; /* linked into ctx cache */
+ atomic_t cc_refcount;
+ struct ptlrpc_sec *cc_sec;
+ struct ptlrpc_ctx_ops *cc_ops;
+ cfs_time_t cc_expire; /* in seconds */
+ unsigned int cc_early_expire:1;
+ unsigned long cc_flags;
+ struct vfs_cred cc_vcred;
+ spinlock_t cc_lock;
+ struct list_head cc_req_list; /* waiting reqs linked here */
+ struct list_head cc_gc_chain; /* linked to gc chain */
+};
+
+/**
+ * client side policy operation vector.
+ */
+struct ptlrpc_sec_cops {
+ /**
+ * Given an \a imp, create and initialize a ptlrpc_sec structure.
+ * \param ctx service context:
+ * - regular import: \a ctx should be NULL;
+ * - reverse import: \a ctx is obtained from incoming request.
+ * \param flavor specify what flavor to use.
+ *
+ * When necessary, policy module is responsible for taking reference
+ * on the import.
+ *
+ * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
+ */
+ struct ptlrpc_sec * (*create_sec) (struct obd_import *imp,
+ struct ptlrpc_svc_ctx *ctx,
+ struct sptlrpc_flavor *flavor);
+
+ /**
+ * Destructor of ptlrpc_sec. When called, refcount has been dropped
+ * to 0 and all contexts has been destroyed.
+ *
+ * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
+ */
+ void (*destroy_sec) (struct ptlrpc_sec *sec);
+
+ /**
+ * Notify that this ptlrpc_sec is going to die. Optionally, policy
+ * module is supposed to set sec->ps_dying and whatever necessary
+ * actions.
+ *
+ * \see plain_kill_sec(), gss_sec_kill().
+ */
+ void (*kill_sec) (struct ptlrpc_sec *sec);
+
+ /**
+ * Given \a vcred, lookup and/or create its context. The policy module
+ * is supposed to maintain its own context cache.
+ * XXX currently \a create and \a remove_dead is always 1, perhaps
+ * should be removed completely.
+ *
+ * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
+ */
+ struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec,
+ struct vfs_cred *vcred,
+ int create,
+ int remove_dead);
+
+ /**
+ * Called then the reference of \a ctx dropped to 0. The policy module
+ * is supposed to destroy this context or whatever else according to
+ * its cache maintainance mechamism.
+ *
+ * \param sync if zero, we shouldn't wait for the context being
+ * destroyed completely.
+ *
+ * \see plain_release_ctx(), gss_sec_release_ctx_kr().
+ */
+ void (*release_ctx) (struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx,
+ int sync);
+
+ /**
+ * Flush the context cache.
+ *
+ * \param uid context of which user, -1 means all contexts.
+ * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
+ * contexts should be cleared immediately.
+ * \param force if zero, only idle contexts will be flushed.
+ *
+ * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
+ */
+ int (*flush_ctx_cache)
+ (struct ptlrpc_sec *sec,
+ uid_t uid,
+ int grace,
+ int force);
+
+ /**
+ * Called periodically by garbage collector to remove dead contexts
+ * from cache.
+ *
+ * \see gss_sec_gc_ctx_kr().
+ */
+ void (*gc_ctx) (struct ptlrpc_sec *sec);
+
+ /**
+ * Given an context \a ctx, install a corresponding reverse service
+ * context on client side.
+ * XXX currently it's only used by GSS module, maybe we should remove
+ * this from general API.
+ */
+ int (*install_rctx)(struct obd_import *imp,
+ struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx);
+
+ /**
+ * To allocate request buffer for \a req.
+ *
+ * \pre req->rq_reqmsg == NULL.
+ * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
+ * we are not supposed to free it.
+ * \post if success, req->rq_reqmsg point to a buffer with size
+ * at least \a lustre_msg_size.
+ *
+ * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
+ */
+ int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int lustre_msg_size);
+
+ /**
+ * To free request buffer for \a req.
+ *
+ * \pre req->rq_reqbuf != NULL.
+ *
+ * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
+ */
+ void (*free_reqbuf) (struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req);
+
+ /**
+ * To allocate reply buffer for \a req.
+ *
+ * \pre req->rq_repbuf == NULL.
+ * \post if success, req->rq_repbuf point to a buffer with size
+ * req->rq_repbuf_len, the size should be large enough to receive
+ * reply which be transformed from \a lustre_msg_size of clear text.
+ *
+ * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
+ */
+ int (*alloc_repbuf)(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int lustre_msg_size);
+
+ /**
+ * To free reply buffer for \a req.
+ *
+ * \pre req->rq_repbuf != NULL.
+ * \post req->rq_repbuf == NULL.
+ * \post req->rq_repbuf_len == 0.
+ *
+ * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
+ */
+ void (*free_repbuf) (struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req);
+
+ /**
+ * To expand the request buffer of \a req, thus the \a segment in
+ * the request message pointed by req->rq_reqmsg can accommodate
+ * at least \a newsize of data.
+ *
+ * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
+ *
+ * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
+ * gss_enlarge_reqbuf().
+ */
+ int (*enlarge_reqbuf)
+ (struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int segment, int newsize);
+ /*
+ * misc
+ */
+ int (*display) (struct ptlrpc_sec *sec,
+ struct seq_file *seq);
+};
+
+/**
+ * server side policy operation vector.
+ */
+struct ptlrpc_sec_sops {
+ /**
+ * verify an incoming request.
+ *
+ * \pre request message is pointed by req->rq_reqbuf, size is
+ * req->rq_reqdata_len; and the message has been unpacked to
+ * host byte order.
+ *
+ * \retval SECSVC_OK success, req->rq_reqmsg point to request message
+ * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
+ * req->rq_sp_from is decoded from request.
+ * \retval SECSVC_COMPLETE success, the request has been fully
+ * processed, and reply message has been prepared; req->rq_sp_from is
+ * decoded from request.
+ * \retval SECSVC_DROP failed, this request should be dropped.
+ *
+ * \see null_accept(), plain_accept(), gss_svc_accept_kr().
+ */
+ int (*accept) (struct ptlrpc_request *req);
+
+ /**
+ * Perform security transformation upon reply message.
+ *
+ * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
+ * is req->rq_replen.
+ * \post req->rs_repdata_len is the final message size.
+ * \post req->rq_reply_off is set.
+ *
+ * \see null_authorize(), plain_authorize(), gss_svc_authorize().
+ */
+ int (*authorize) (struct ptlrpc_request *req);
+
+ /**
+ * Invalidate server context \a ctx.
+ *
+ * \see gss_svc_invalidate_ctx().
+ */
+ void (*invalidate_ctx)
+ (struct ptlrpc_svc_ctx *ctx);
+
+ /**
+ * Allocate a ptlrpc_reply_state.
+ *
+ * \param msgsize size of the reply message in clear text.
+ * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
+ * should simply use it; otherwise we'll responsible for allocating
+ * a new one.
+ * \post req->rq_reply_state != NULL;
+ * \post req->rq_reply_state->rs_msg != NULL;
+ *
+ * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
+ */
+ int (*alloc_rs) (struct ptlrpc_request *req,
+ int msgsize);
+
+ /**
+ * Free a ptlrpc_reply_state.
+ */
+ void (*free_rs) (struct ptlrpc_reply_state *rs);
+
+ /**
+ * Release the server context \a ctx.
+ *
+ * \see gss_svc_free_ctx().
+ */
+ void (*free_ctx) (struct ptlrpc_svc_ctx *ctx);
+
+ /**
+ * Install a reverse context based on the server context \a ctx.
+ *
+ * \see gss_svc_install_rctx_kr().
+ */
+ int (*install_rctx)(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *ctx);
+
+ /**
+ * Prepare buffer for incoming bulk write.
+ *
+ * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
+ * intended to receive the write.
+ *
+ * \see gss_svc_prep_bulk().
+ */
+ int (*prep_bulk) (struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+
+ /**
+ * Unwrap the bulk write data.
+ *
+ * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
+ */
+ int (*unwrap_bulk) (struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+
+ /**
+ * Wrap the bulk read data.
+ *
+ * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
+ */
+ int (*wrap_bulk) (struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+};
+
+struct ptlrpc_sec_policy {
+ struct module *sp_owner;
+ char *sp_name;
+ __u16 sp_policy; /* policy number */
+ struct ptlrpc_sec_cops *sp_cops; /* client ops */
+ struct ptlrpc_sec_sops *sp_sops; /* server ops */
+};
+
+#define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
+#define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
+#define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
+#define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
+#define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
+
+/**
+ * The ptlrpc_sec represents the client side ptlrpc security facilities,
+ * each obd_import (both regular and reverse import) must associate with
+ * a ptlrpc_sec.
+ *
+ * \see sptlrpc_import_sec_adapt().
+ */
+struct ptlrpc_sec {
+ struct ptlrpc_sec_policy *ps_policy;
+ atomic_t ps_refcount;
+ /** statistic only */
+ atomic_t ps_nctx;
+ /** unique identifier */
+ int ps_id;
+ struct sptlrpc_flavor ps_flvr;
+ enum lustre_sec_part ps_part;
+ /** after set, no more new context will be created */
+ unsigned int ps_dying:1;
+ /** owning import */
+ struct obd_import *ps_import;
+ spinlock_t ps_lock;
+
+ /*
+ * garbage collection
+ */
+ struct list_head ps_gc_list;
+ cfs_time_t ps_gc_interval; /* in seconds */
+ cfs_time_t ps_gc_next; /* in seconds */
+};
+
+static inline int sec_is_reverse(struct ptlrpc_sec *sec)
+{
+ return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
+}
+
+static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
+{
+ return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
+}
+
+
+struct ptlrpc_svc_ctx {
+ atomic_t sc_refcount;
+ struct ptlrpc_sec_policy *sc_policy;
+};
+
+/*
+ * user identity descriptor
+ */
+#define LUSTRE_MAX_GROUPS (128)
+
+struct ptlrpc_user_desc {
+ __u32 pud_uid;
+ __u32 pud_gid;
+ __u32 pud_fsuid;
+ __u32 pud_fsgid;
+ __u32 pud_cap;
+ __u32 pud_ngroups;
+ __u32 pud_groups[0];
+};
+
+/*
+ * bulk flavors
+ */
+enum sptlrpc_bulk_hash_alg {
+ BULK_HASH_ALG_NULL = 0,
+ BULK_HASH_ALG_ADLER32,
+ BULK_HASH_ALG_CRC32,
+ BULK_HASH_ALG_MD5,
+ BULK_HASH_ALG_SHA1,
+ BULK_HASH_ALG_SHA256,
+ BULK_HASH_ALG_SHA384,
+ BULK_HASH_ALG_SHA512,
+ BULK_HASH_ALG_MAX
+};
+
+const char * sptlrpc_get_hash_name(__u8 hash_alg);
+__u8 sptlrpc_get_hash_alg(const char *algname);
+
+enum {
+ BSD_FL_ERR = 1,
+};
+
+struct ptlrpc_bulk_sec_desc {
+ __u8 bsd_version; /* 0 */
+ __u8 bsd_type; /* SPTLRPC_BULK_XXX */
+ __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
+ __u8 bsd_flags; /* flags */
+ __u32 bsd_nob; /* nob of bulk data */
+ __u8 bsd_data[0]; /* policy-specific token */
+};
+
+
+/*
+ * lprocfs
+ */
+struct proc_dir_entry;
+extern struct proc_dir_entry *sptlrpc_proc_root;
+
+/*
+ * round size up to next power of 2, for slab allocation.
+ * @size must be sane (can't overflow after round up)
+ */
+static inline int size_roundup_power2(int size)
+{
+ size--;
+ size |= size >> 1;
+ size |= size >> 2;
+ size |= size >> 4;
+ size |= size >> 8;
+ size |= size >> 16;
+ size++;
+ return size;
+}
+
+/*
+ * internal support libraries
+ */
+void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
+ int segment, int newsize);
+
+/*
+ * security policies
+ */
+int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
+int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
+
+__u32 sptlrpc_name2flavor_base(const char *name);
+const char *sptlrpc_flavor2name_base(__u32 flvr);
+char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
+ char *buf, int bufsize);
+char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
+char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
+
+static inline
+struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
+{
+ __module_get(policy->sp_owner);
+ return policy;
+}
+
+static inline
+void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
+{
+ module_put(policy->sp_owner);
+}
+
+/*
+ * client credential
+ */
+static inline
+unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
+{
+ return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
+}
+
+static inline
+int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
+{
+ return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
+}
+
+static inline
+int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
+{
+ return (cli_ctx_status(ctx) != 0);
+}
+
+static inline
+int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
+{
+ return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
+}
+
+static inline
+int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
+{
+ return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
+}
+
+static inline
+int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
+{
+ return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
+}
+
+static inline
+int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
+{
+ return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
+}
+
+/*
+ * sec get/put
+ */
+struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
+void sptlrpc_sec_put(struct ptlrpc_sec *sec);
+
+/*
+ * internal apis which only used by policy impelentation
+ */
+int sptlrpc_get_next_secid(void);
+void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
+
+/*
+ * exported client context api
+ */
+struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
+void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
+void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
+void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx);
+int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
+
+/*
+ * exported client context wrap/buffers
+ */
+int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
+int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
+int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
+void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
+int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
+void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
+int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
+ int segment, int newsize);
+int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
+ struct ptlrpc_request **req_ret);
+void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
+
+void sptlrpc_request_out_callback(struct ptlrpc_request *req);
+
+/*
+ * exported higher interface of import & request
+ */
+int sptlrpc_import_sec_adapt(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *ctx,
+ struct sptlrpc_flavor *flvr);
+struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
+void sptlrpc_import_sec_put(struct obd_import *imp);
+
+int sptlrpc_import_check_ctx(struct obd_import *imp);
+void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
+void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
+void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
+int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
+void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
+int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
+int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req);
+void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
+
+int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule);
+
+/* gc */
+void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
+void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
+void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
+
+/* misc */
+const char * sec2target_str(struct ptlrpc_sec *sec);
+int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
+
+/*
+ * server side
+ */
+enum secsvc_accept_res {
+ SECSVC_OK = 0,
+ SECSVC_COMPLETE,
+ SECSVC_DROP,
+};
+
+int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
+int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
+int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
+void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
+void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
+void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
+void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req);
+
+int sptlrpc_target_export_check(struct obd_export *exp,
+ struct ptlrpc_request *req);
+void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
+ struct sptlrpc_rule_set *rset);
+
+/*
+ * reverse context
+ */
+int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *ctx);
+int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
+ struct ptlrpc_cli_ctx *ctx);
+
+/* bulk security api */
+int sptlrpc_enc_pool_add_user(void);
+int sptlrpc_enc_pool_del_user(void);
+int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc);
+void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
+
+int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc,
+ int nob);
+int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+
+/* bulk helpers (internal use only by policies) */
+int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
+ void *buf, int buflen);
+
+int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
+
+/* user descriptor helpers */
+static inline int sptlrpc_user_desc_size(int ngroups)
+{
+ return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
+}
+
+int sptlrpc_current_user_desc_size(void);
+int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
+int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
+
+
+#define CFS_CAP_CHOWN_MASK (1 << CFS_CAP_CHOWN)
+#define CFS_CAP_SYS_RESOURCE_MASK (1 << CFS_CAP_SYS_RESOURCE)
+
+enum {
+ LUSTRE_SEC_NONE = 0,
+ LUSTRE_SEC_REMOTE = 1,
+ LUSTRE_SEC_SPECIFY = 2,
+ LUSTRE_SEC_ALL = 3
+};
+
+/** @} sptlrpc */
+
+#endif /* _LUSTRE_SEC_H_ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_update.h b/drivers/staging/lustre/lustre/include/lustre_update.h
new file mode 100644
index 0000000..84defce
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_update.h
@@ -0,0 +1,189 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.htm
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2013, Intel Corporation.
+ */
+/*
+ * lustre/include/lustre_update.h
+ *
+ * Author: Di Wang <di.wang@intel.com>
+ */
+
+#ifndef _LUSTRE_UPDATE_H
+#define _LUSTRE_UPDATE_H
+
+#define UPDATE_BUFFER_SIZE 8192
+struct update_request {
+ struct dt_device *ur_dt;
+ struct list_head ur_list; /* attached itself to thandle */
+ int ur_flags;
+ int ur_rc; /* request result */
+ int ur_batchid; /* Current batch(trans) id */
+ struct update_buf *ur_buf; /* Holding the update req */
+};
+
+static inline unsigned long update_size(struct update *update)
+{
+ unsigned long size;
+ int i;
+
+ size = cfs_size_round(offsetof(struct update, u_bufs[0]));
+ for (i = 0; i < UPDATE_BUF_COUNT; i++)
+ size += cfs_size_round(update->u_lens[i]);
+
+ return size;
+}
+
+static inline void *update_param_buf(struct update *update, int index,
+ int *size)
+{
+ int i;
+ void *ptr;
+
+ if (index >= UPDATE_BUF_COUNT)
+ return NULL;
+
+ ptr = (char *)update + cfs_size_round(offsetof(struct update,
+ u_bufs[0]));
+ for (i = 0; i < index; i++) {
+ LASSERT(update->u_lens[i] > 0);
+ ptr += cfs_size_round(update->u_lens[i]);
+ }
+
+ if (size != NULL)
+ *size = update->u_lens[index];
+
+ return ptr;
+}
+
+static inline unsigned long update_buf_size(struct update_buf *buf)
+{
+ unsigned long size;
+ int i = 0;
+
+ size = cfs_size_round(offsetof(struct update_buf, ub_bufs[0]));
+ for (i = 0; i < buf->ub_count; i++) {
+ struct update *update;
+
+ update = (struct update *)((char *)buf + size);
+ size += update_size(update);
+ }
+ LASSERT(size <= UPDATE_BUFFER_SIZE);
+ return size;
+}
+
+static inline void *update_buf_get(struct update_buf *buf, int index, int *size)
+{
+ int count = buf->ub_count;
+ void *ptr;
+ int i = 0;
+
+ if (index >= count)
+ return NULL;
+
+ ptr = (char *)buf + cfs_size_round(offsetof(struct update_buf,
+ ub_bufs[0]));
+ for (i = 0; i < index; i++)
+ ptr += update_size((struct update *)ptr);
+
+ if (size != NULL)
+ *size = update_size((struct update *)ptr);
+
+ return ptr;
+}
+
+static inline void update_init_reply_buf(struct update_reply *reply, int count)
+{
+ reply->ur_version = UPDATE_REPLY_V1;
+ reply->ur_count = count;
+}
+
+static inline void *update_get_buf_internal(struct update_reply *reply,
+ int index, int *size)
+{
+ char *ptr;
+ int count = reply->ur_count;
+ int i;
+
+ if (index >= count)
+ return NULL;
+
+ ptr = (char *)reply + cfs_size_round(offsetof(struct update_reply,
+ ur_lens[count]));
+ for (i = 0; i < index; i++) {
+ LASSERT(reply->ur_lens[i] > 0);
+ ptr += cfs_size_round(reply->ur_lens[i]);
+ }
+
+ if (size != NULL)
+ *size = reply->ur_lens[index];
+
+ return ptr;
+}
+
+static inline void update_insert_reply(struct update_reply *reply, void *data,
+ int data_len, int index, int rc)
+{
+ char *ptr;
+
+ ptr = update_get_buf_internal(reply, index, NULL);
+ LASSERT(ptr != NULL);
+
+ *(int *)ptr = cpu_to_le32(rc);
+ ptr += sizeof(int);
+ if (data_len > 0) {
+ LASSERT(data != NULL);
+ memcpy(ptr, data, data_len);
+ }
+ reply->ur_lens[index] = data_len + sizeof(int);
+}
+
+static inline int update_get_reply_buf(struct update_reply *reply, void **buf,
+ int index)
+{
+ char *ptr;
+ int size = 0;
+ int result;
+
+ ptr = update_get_buf_internal(reply, index, &size);
+ result = *(int *)ptr;
+
+ if (result < 0)
+ return result;
+
+ LASSERT((ptr != NULL && size >= sizeof(int)));
+ *buf = ptr + sizeof(int);
+ return size - sizeof(int);
+}
+
+static inline int update_get_reply_result(struct update_reply *reply,
+ void **buf, int index)
+{
+ void *ptr;
+ int size;
+
+ ptr = update_get_buf_internal(reply, index, &size);
+ LASSERT(ptr != NULL && size > sizeof(int));
+ return *(int *)ptr;
+}
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_ver.h b/drivers/staging/lustre/lustre/include/lustre_ver.h
new file mode 100644
index 0000000..dc187b8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_ver.h
@@ -0,0 +1,24 @@
+#ifndef _LUSTRE_VER_H_
+#define _LUSTRE_VER_H_
+/* This file automatically generated from lustre/include/lustre_ver.h.in,
+ * based on parameters in lustre/autoconf/lustre-version.ac.
+ * Changes made directly to this file will be lost. */
+
+#define LUSTRE_MAJOR 2
+#define LUSTRE_MINOR 3
+#define LUSTRE_PATCH 64
+#define LUSTRE_FIX 0
+#define LUSTRE_VERSION_STRING "2.3.64"
+
+#define LUSTRE_VERSION_CODE OBD_OCD_VERSION(LUSTRE_MAJOR,LUSTRE_MINOR,LUSTRE_PATCH,LUSTRE_FIX)
+
+/* liblustre clients are only allowed to connect if their LUSTRE_FIX mismatches
+ * by this amount (set in lustre/autoconf/lustre-version.ac). */
+#define LUSTRE_VERSION_ALLOWED_OFFSET OBD_OCD_VERSION(0, 0, 1, 32)
+
+/* If lustre version of client and servers it connects to differs by more
+ * than this amount, client would issue a warning.
+ * (set in lustre/autoconf/lustre-version.ac) */
+#define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0)
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lvfs.h b/drivers/staging/lustre/lustre/include/lvfs.h
new file mode 100644
index 0000000..28f1a6b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lvfs.h
@@ -0,0 +1,57 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/lvfs.h
+ *
+ * lustre VFS/process permission interface
+ */
+
+#ifndef __LVFS_H__
+#define __LVFS_H__
+
+#define LL_FID_NAMELEN (16 + 1 + 8 + 1)
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lvfs.h>
+
+#include <linux/libcfs/lucache.h>
+
+
+/* lvfs_common.c */
+struct dentry *lvfs_fid2dentry(struct lvfs_run_ctxt *, __u64, __u32, __u64 ,void *data);
+
+void push_ctxt(struct lvfs_run_ctxt *save, struct lvfs_run_ctxt *new_ctx,
+ struct lvfs_ucred *cred);
+void pop_ctxt(struct lvfs_run_ctxt *saved, struct lvfs_run_ctxt *new_ctx,
+ struct lvfs_ucred *cred);
+#endif
diff --git a/drivers/staging/lustre/lustre/include/md_object.h b/drivers/staging/lustre/lustre/include/md_object.h
new file mode 100644
index 0000000..daf93af
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/md_object.h
@@ -0,0 +1,903 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/md_object.h
+ *
+ * Extention of lu_object.h for metadata objects
+ */
+
+#ifndef _LUSTRE_MD_OBJECT_H
+#define _LUSTRE_MD_OBJECT_H
+
+/** \defgroup md md
+ * Sub-class of lu_object with methods common for "meta-data" objects in MDT
+ * stack.
+ *
+ * Meta-data objects implement namespace operations: you can link, unlink
+ * them, and treat them as directories.
+ *
+ * Examples: mdt, cmm, and mdt are implementations of md interface.
+ * @{
+ */
+
+
+/*
+ * super-class definitions.
+ */
+#include <dt_object.h>
+
+struct md_device;
+struct md_device_operations;
+struct md_object;
+struct obd_export;
+
+enum {
+ UCRED_INVALID = -1,
+ UCRED_INIT = 0,
+ UCRED_OLD = 1,
+ UCRED_NEW = 2
+};
+
+enum {
+ MD_CAPAINFO_MAX = 5
+};
+
+/** there are at most 5 fids in one operation, see rename, NOTE the last one
+ * is a temporary one used for is_subdir() */
+struct md_capainfo {
+ __u32 mc_auth;
+ __u32 mc_padding;
+ struct lu_fid mc_fid[MD_CAPAINFO_MAX];
+ struct lustre_capa *mc_capa[MD_CAPAINFO_MAX];
+};
+
+struct md_quota {
+ struct obd_export *mq_exp;
+};
+
+/**
+ * Implemented in mdd/mdd_handler.c.
+ *
+ * XXX should be moved into separate .h/.c together with all md security
+ * related definitions.
+ */
+struct md_capainfo *md_capainfo(const struct lu_env *env);
+struct md_quota *md_quota(const struct lu_env *env);
+
+/** metadata attributes */
+enum ma_valid {
+ MA_INODE = (1 << 0),
+ MA_LOV = (1 << 1),
+ MA_COOKIE = (1 << 2),
+ MA_FLAGS = (1 << 3),
+ MA_LMV = (1 << 4),
+ MA_ACL_DEF = (1 << 5),
+ MA_LOV_DEF = (1 << 6),
+ MA_LAY_GEN = (1 << 7),
+ MA_HSM = (1 << 8),
+ MA_SOM = (1 << 9),
+ MA_PFID = (1 << 10)
+};
+
+typedef enum {
+ MDL_MINMODE = 0,
+ MDL_EX = 1,
+ MDL_PW = 2,
+ MDL_PR = 4,
+ MDL_CW = 8,
+ MDL_CR = 16,
+ MDL_NL = 32,
+ MDL_GROUP = 64,
+ MDL_MAXMODE
+} mdl_mode_t;
+
+typedef enum {
+ MDT_NUL_LOCK = 0,
+ MDT_REG_LOCK = (1 << 0),
+ MDT_PDO_LOCK = (1 << 1)
+} mdl_type_t;
+
+/* memory structure for hsm attributes
+ * for fields description see the on disk structure hsm_attrs
+ * which is defined in lustre_idl.h
+ */
+struct md_hsm {
+ __u32 mh_compat;
+ __u32 mh_flags;
+ __u64 mh_arch_id;
+ __u64 mh_arch_ver;
+};
+
+#define IOEPOCH_INVAL 0
+
+/* memory structure for som attributes
+ * for fields description see the on disk structure som_attrs
+ * which is defined in lustre_idl.h
+ */
+struct md_som_data {
+ __u32 msd_compat;
+ __u32 msd_incompat;
+ __u64 msd_ioepoch;
+ __u64 msd_size;
+ __u64 msd_blocks;
+ __u64 msd_mountid;
+};
+
+struct md_attr {
+ __u64 ma_valid;
+ __u64 ma_need;
+ __u64 ma_attr_flags;
+ struct lu_attr ma_attr;
+ struct lu_fid ma_pfid;
+ struct md_hsm ma_hsm;
+ struct lov_mds_md *ma_lmm;
+ struct lmv_stripe_md *ma_lmv;
+ void *ma_acl;
+ struct llog_cookie *ma_cookie;
+ struct lustre_capa *ma_capa;
+ struct md_som_data *ma_som;
+ int ma_lmm_size;
+ int ma_lmv_size;
+ int ma_acl_size;
+ int ma_cookie_size;
+ __u16 ma_layout_gen;
+};
+
+/** Additional parameters for create */
+struct md_op_spec {
+ union {
+ /** symlink target */
+ const char *sp_symname;
+ /** parent FID for cross-ref mkdir */
+ const struct lu_fid *sp_pfid;
+ /** eadata for regular files */
+ struct md_spec_reg {
+ /** lov objs exist already */
+ const struct lu_fid *fid;
+ const void *eadata;
+ int eadatalen;
+ } sp_ea;
+ } u;
+
+ /** Create flag from client: such as MDS_OPEN_CREAT, and others. */
+ __u64 sp_cr_flags;
+
+ /** don't create lov objects or llog cookie - this replay */
+ unsigned int no_create:1,
+ sp_cr_lookup:1, /* do lookup sanity check or not. */
+ sp_rm_entry:1; /* only remove name entry */
+
+ /** Current lock mode for parent dir where create is performing. */
+ mdl_mode_t sp_cr_mode;
+
+ /** to create directory */
+ const struct dt_index_features *sp_feat;
+};
+
+/**
+ * Operations implemented for each md object (both directory and leaf).
+ */
+struct md_object_operations {
+ int (*moo_permission)(const struct lu_env *env,
+ struct md_object *pobj, struct md_object *cobj,
+ struct md_attr *attr, int mask);
+
+ int (*moo_attr_get)(const struct lu_env *env, struct md_object *obj,
+ struct md_attr *attr);
+
+ int (*moo_attr_set)(const struct lu_env *env, struct md_object *obj,
+ const struct md_attr *attr);
+
+ int (*moo_xattr_get)(const struct lu_env *env, struct md_object *obj,
+ struct lu_buf *buf, const char *name);
+
+ int (*moo_xattr_list)(const struct lu_env *env, struct md_object *obj,
+ struct lu_buf *buf);
+
+ int (*moo_xattr_set)(const struct lu_env *env, struct md_object *obj,
+ const struct lu_buf *buf, const char *name,
+ int fl);
+
+ int (*moo_xattr_del)(const struct lu_env *env, struct md_object *obj,
+ const char *name);
+
+ /** This method is used to swap the layouts between 2 objects */
+ int (*moo_swap_layouts)(const struct lu_env *env,
+ struct md_object *obj1, struct md_object *obj2,
+ __u64 flags);
+
+ /** \retval number of bytes actually read upon success */
+ int (*moo_readpage)(const struct lu_env *env, struct md_object *obj,
+ const struct lu_rdpg *rdpg);
+
+ int (*moo_readlink)(const struct lu_env *env, struct md_object *obj,
+ struct lu_buf *buf);
+ int (*moo_changelog)(const struct lu_env *env,
+ enum changelog_rec_type type, int flags,
+ struct md_object *obj);
+ /** part of cross-ref operation */
+ int (*moo_object_create)(const struct lu_env *env,
+ struct md_object *obj,
+ const struct md_op_spec *spec,
+ struct md_attr *ma);
+
+ int (*moo_ref_add)(const struct lu_env *env,
+ struct md_object *obj,
+ const struct md_attr *ma);
+
+ int (*moo_ref_del)(const struct lu_env *env,
+ struct md_object *obj,
+ struct md_attr *ma);
+
+ int (*moo_open)(const struct lu_env *env,
+ struct md_object *obj, int flag);
+
+ int (*moo_close)(const struct lu_env *env, struct md_object *obj,
+ struct md_attr *ma, int mode);
+
+ int (*moo_capa_get)(const struct lu_env *, struct md_object *,
+ struct lustre_capa *, int renewal);
+
+ int (*moo_object_sync)(const struct lu_env *, struct md_object *);
+
+ int (*moo_file_lock)(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm, struct ldlm_extent *extent,
+ struct lustre_handle *lockh);
+ int (*moo_file_unlock)(const struct lu_env *env, struct md_object *obj,
+ struct lov_mds_md *lmm,
+ struct lustre_handle *lockh);
+ int (*moo_object_lock)(const struct lu_env *env, struct md_object *obj,
+ struct lustre_handle *lh,
+ struct ldlm_enqueue_info *einfo,
+ void *policy);
+};
+
+/**
+ * Operations implemented for each directory object.
+ */
+struct md_dir_operations {
+ int (*mdo_is_subdir) (const struct lu_env *env, struct md_object *obj,
+ const struct lu_fid *fid, struct lu_fid *sfid);
+
+ int (*mdo_lookup)(const struct lu_env *env, struct md_object *obj,
+ const struct lu_name *lname, struct lu_fid *fid,
+ struct md_op_spec *spec);
+
+ mdl_mode_t (*mdo_lock_mode)(const struct lu_env *env,
+ struct md_object *obj,
+ mdl_mode_t mode);
+
+ int (*mdo_create)(const struct lu_env *env, struct md_object *pobj,
+ const struct lu_name *lname, struct md_object *child,
+ struct md_op_spec *spec,
+ struct md_attr *ma);
+
+ /** This method is used for creating data object for this meta object*/
+ int (*mdo_create_data)(const struct lu_env *env, struct md_object *p,
+ struct md_object *o,
+ const struct md_op_spec *spec,
+ struct md_attr *ma);
+
+ int (*mdo_rename)(const struct lu_env *env, struct md_object *spobj,
+ struct md_object *tpobj, const struct lu_fid *lf,
+ const struct lu_name *lsname, struct md_object *tobj,
+ const struct lu_name *ltname, struct md_attr *ma);
+
+ int (*mdo_link)(const struct lu_env *env, struct md_object *tgt_obj,
+ struct md_object *src_obj, const struct lu_name *lname,
+ struct md_attr *ma);
+
+ int (*mdo_unlink)(const struct lu_env *env, struct md_object *pobj,
+ struct md_object *cobj, const struct lu_name *lname,
+ struct md_attr *ma, int no_name);
+
+ /** This method is used to compare a requested layout to an existing
+ * layout (struct lov_mds_md_v1/3 vs struct lov_mds_md_v1/3) */
+ int (*mdo_lum_lmm_cmp)(const struct lu_env *env,
+ struct md_object *cobj,
+ const struct md_op_spec *spec,
+ struct md_attr *ma);
+
+ /** partial ops for cross-ref case */
+ int (*mdo_name_insert)(const struct lu_env *env,
+ struct md_object *obj,
+ const struct lu_name *lname,
+ const struct lu_fid *fid,
+ const struct md_attr *ma);
+
+ int (*mdo_name_remove)(const struct lu_env *env,
+ struct md_object *obj,
+ const struct lu_name *lname,
+ const struct md_attr *ma);
+
+ int (*mdo_rename_tgt)(const struct lu_env *env, struct md_object *pobj,
+ struct md_object *tobj, const struct lu_fid *fid,
+ const struct lu_name *lname, struct md_attr *ma);
+};
+
+struct md_device_operations {
+ /** meta-data device related handlers. */
+ int (*mdo_root_get)(const struct lu_env *env, struct md_device *m,
+ struct lu_fid *f);
+
+ int (*mdo_maxsize_get)(const struct lu_env *env, struct md_device *m,
+ int *md_size, int *cookie_size);
+
+ int (*mdo_statfs)(const struct lu_env *env, struct md_device *m,
+ struct obd_statfs *sfs);
+
+ int (*mdo_init_capa_ctxt)(const struct lu_env *env, struct md_device *m,
+ int mode, unsigned long timeout, __u32 alg,
+ struct lustre_capa_key *keys);
+
+ int (*mdo_update_capa_key)(const struct lu_env *env,
+ struct md_device *m,
+ struct lustre_capa_key *key);
+
+ int (*mdo_llog_ctxt_get)(const struct lu_env *env,
+ struct md_device *m, int idx, void **h);
+
+ int (*mdo_iocontrol)(const struct lu_env *env, struct md_device *m,
+ unsigned int cmd, int len, void *data);
+};
+
+enum md_upcall_event {
+ /** Sync the md layer*/
+ MD_LOV_SYNC = (1 << 0),
+ /** Just for split, no need trans, for replay */
+ MD_NO_TRANS = (1 << 1),
+ MD_LOV_CONFIG = (1 << 2),
+ /** Trigger quota recovery */
+ MD_LOV_QUOTA = (1 << 3)
+};
+
+struct md_upcall {
+ /** this lock protects upcall using against its removal
+ * read lock is for usage the upcall, write - for init/fini */
+ struct rw_semaphore mu_upcall_sem;
+ /** device to call, upper layer normally */
+ struct md_device *mu_upcall_dev;
+ /** upcall function */
+ int (*mu_upcall)(const struct lu_env *env, struct md_device *md,
+ enum md_upcall_event ev, void *data);
+};
+
+struct md_device {
+ struct lu_device md_lu_dev;
+ const struct md_device_operations *md_ops;
+ struct md_upcall md_upcall;
+};
+
+static inline void md_upcall_init(struct md_device *m, void *upcl)
+{
+ init_rwsem(&m->md_upcall.mu_upcall_sem);
+ m->md_upcall.mu_upcall_dev = NULL;
+ m->md_upcall.mu_upcall = upcl;
+}
+
+static inline void md_upcall_dev_set(struct md_device *m, struct md_device *up)
+{
+ down_write(&m->md_upcall.mu_upcall_sem);
+ m->md_upcall.mu_upcall_dev = up;
+ up_write(&m->md_upcall.mu_upcall_sem);
+}
+
+static inline void md_upcall_fini(struct md_device *m)
+{
+ down_write(&m->md_upcall.mu_upcall_sem);
+ m->md_upcall.mu_upcall_dev = NULL;
+ m->md_upcall.mu_upcall = NULL;
+ up_write(&m->md_upcall.mu_upcall_sem);
+}
+
+static inline int md_do_upcall(const struct lu_env *env, struct md_device *m,
+ enum md_upcall_event ev, void *data)
+{
+ int rc = 0;
+ down_read(&m->md_upcall.mu_upcall_sem);
+ if (m->md_upcall.mu_upcall_dev != NULL &&
+ m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall != NULL) {
+ rc = m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall(env,
+ m->md_upcall.mu_upcall_dev,
+ ev, data);
+ }
+ up_read(&m->md_upcall.mu_upcall_sem);
+ return rc;
+}
+
+struct md_object {
+ struct lu_object mo_lu;
+ const struct md_object_operations *mo_ops;
+ const struct md_dir_operations *mo_dir_ops;
+};
+
+/**
+ * seq-server site.
+ */
+struct seq_server_site {
+ struct lu_site *ss_lu;
+ /**
+ * mds number of this site.
+ */
+ mdsno_t ss_node_id;
+ /**
+ * Fid location database
+ */
+ struct lu_server_fld *ss_server_fld;
+ struct lu_client_fld *ss_client_fld;
+
+ /**
+ * Server Seq Manager
+ */
+ struct lu_server_seq *ss_server_seq;
+
+ /**
+ * Controller Seq Manager
+ */
+ struct lu_server_seq *ss_control_seq;
+ struct obd_export *ss_control_exp;
+
+ /**
+ * Client Seq Manager
+ */
+ struct lu_client_seq *ss_client_seq;
+};
+
+static inline struct md_device *lu2md_dev(const struct lu_device *d)
+{
+ LASSERT(IS_ERR(d) || lu_device_is_md(d));
+ return container_of0(d, struct md_device, md_lu_dev);
+}
+
+static inline struct lu_device *md2lu_dev(struct md_device *d)
+{
+ return &d->md_lu_dev;
+}
+
+static inline struct md_object *lu2md(const struct lu_object *o)
+{
+ LASSERT(o == NULL || IS_ERR(o) || lu_device_is_md(o->lo_dev));
+ return container_of0(o, struct md_object, mo_lu);
+}
+
+static inline struct md_object *md_object_next(const struct md_object *obj)
+{
+ return (obj ? lu2md(lu_object_next(&obj->mo_lu)) : NULL);
+}
+
+static inline struct md_device *md_obj2dev(const struct md_object *o)
+{
+ LASSERT(o == NULL || IS_ERR(o) || lu_device_is_md(o->mo_lu.lo_dev));
+ return container_of0(o->mo_lu.lo_dev, struct md_device, md_lu_dev);
+}
+
+static inline int md_device_init(struct md_device *md, struct lu_device_type *t)
+{
+ return lu_device_init(&md->md_lu_dev, t);
+}
+
+static inline void md_device_fini(struct md_device *md)
+{
+ lu_device_fini(&md->md_lu_dev);
+}
+
+static inline struct md_object *md_object_find_slice(const struct lu_env *env,
+ struct md_device *md,
+ const struct lu_fid *f)
+{
+ return lu2md(lu_object_find_slice(env, md2lu_dev(md), f, NULL));
+}
+
+
+/** md operations */
+static inline int mo_permission(const struct lu_env *env,
+ struct md_object *p,
+ struct md_object *c,
+ struct md_attr *at,
+ int mask)
+{
+ LASSERT(c->mo_ops->moo_permission);
+ return c->mo_ops->moo_permission(env, p, c, at, mask);
+}
+
+static inline int mo_attr_get(const struct lu_env *env,
+ struct md_object *m,
+ struct md_attr *at)
+{
+ LASSERT(m->mo_ops->moo_attr_get);
+ return m->mo_ops->moo_attr_get(env, m, at);
+}
+
+static inline int mo_readlink(const struct lu_env *env,
+ struct md_object *m,
+ struct lu_buf *buf)
+{
+ LASSERT(m->mo_ops->moo_readlink);
+ return m->mo_ops->moo_readlink(env, m, buf);
+}
+
+static inline int mo_changelog(const struct lu_env *env,
+ enum changelog_rec_type type,
+ int flags, struct md_object *m)
+{
+ LASSERT(m->mo_ops->moo_changelog);
+ return m->mo_ops->moo_changelog(env, type, flags, m);
+}
+
+static inline int mo_attr_set(const struct lu_env *env,
+ struct md_object *m,
+ const struct md_attr *at)
+{
+ LASSERT(m->mo_ops->moo_attr_set);
+ return m->mo_ops->moo_attr_set(env, m, at);
+}
+
+static inline int mo_xattr_get(const struct lu_env *env,
+ struct md_object *m,
+ struct lu_buf *buf,
+ const char *name)
+{
+ LASSERT(m->mo_ops->moo_xattr_get);
+ return m->mo_ops->moo_xattr_get(env, m, buf, name);
+}
+
+static inline int mo_xattr_del(const struct lu_env *env,
+ struct md_object *m,
+ const char *name)
+{
+ LASSERT(m->mo_ops->moo_xattr_del);
+ return m->mo_ops->moo_xattr_del(env, m, name);
+}
+
+static inline int mo_xattr_set(const struct lu_env *env,
+ struct md_object *m,
+ const struct lu_buf *buf,
+ const char *name,
+ int flags)
+{
+ LASSERT(m->mo_ops->moo_xattr_set);
+ return m->mo_ops->moo_xattr_set(env, m, buf, name, flags);
+}
+
+static inline int mo_xattr_list(const struct lu_env *env,
+ struct md_object *m,
+ struct lu_buf *buf)
+{
+ LASSERT(m->mo_ops->moo_xattr_list);
+ return m->mo_ops->moo_xattr_list(env, m, buf);
+}
+
+static inline int mo_swap_layouts(const struct lu_env *env,
+ struct md_object *o1,
+ struct md_object *o2, __u64 flags)
+{
+ LASSERT(o1->mo_ops->moo_swap_layouts);
+ LASSERT(o2->mo_ops->moo_swap_layouts);
+ if (o1->mo_ops->moo_swap_layouts != o2->mo_ops->moo_swap_layouts)
+ return -EPERM;
+ return o1->mo_ops->moo_swap_layouts(env, o1, o2, flags);
+}
+
+static inline int mo_open(const struct lu_env *env,
+ struct md_object *m,
+ int flags)
+{
+ LASSERT(m->mo_ops->moo_open);
+ return m->mo_ops->moo_open(env, m, flags);
+}
+
+static inline int mo_close(const struct lu_env *env,
+ struct md_object *m,
+ struct md_attr *ma,
+ int mode)
+{
+ LASSERT(m->mo_ops->moo_close);
+ return m->mo_ops->moo_close(env, m, ma, mode);
+}
+
+static inline int mo_readpage(const struct lu_env *env,
+ struct md_object *m,
+ const struct lu_rdpg *rdpg)
+{
+ LASSERT(m->mo_ops->moo_readpage);
+ return m->mo_ops->moo_readpage(env, m, rdpg);
+}
+
+static inline int mo_object_create(const struct lu_env *env,
+ struct md_object *m,
+ const struct md_op_spec *spc,
+ struct md_attr *at)
+{
+ LASSERT(m->mo_ops->moo_object_create);
+ return m->mo_ops->moo_object_create(env, m, spc, at);
+}
+
+static inline int mo_ref_add(const struct lu_env *env,
+ struct md_object *m,
+ const struct md_attr *ma)
+{
+ LASSERT(m->mo_ops->moo_ref_add);
+ return m->mo_ops->moo_ref_add(env, m, ma);
+}
+
+static inline int mo_ref_del(const struct lu_env *env,
+ struct md_object *m,
+ struct md_attr *ma)
+{
+ LASSERT(m->mo_ops->moo_ref_del);
+ return m->mo_ops->moo_ref_del(env, m, ma);
+}
+
+static inline int mo_capa_get(const struct lu_env *env,
+ struct md_object *m,
+ struct lustre_capa *c,
+ int renewal)
+{
+ LASSERT(m->mo_ops->moo_capa_get);
+ return m->mo_ops->moo_capa_get(env, m, c, renewal);
+}
+
+static inline int mo_object_sync(const struct lu_env *env, struct md_object *m)
+{
+ LASSERT(m->mo_ops->moo_object_sync);
+ return m->mo_ops->moo_object_sync(env, m);
+}
+
+static inline int mo_file_lock(const struct lu_env *env, struct md_object *m,
+ struct lov_mds_md *lmm,
+ struct ldlm_extent *extent,
+ struct lustre_handle *lockh)
+{
+ LASSERT(m->mo_ops->moo_file_lock);
+ return m->mo_ops->moo_file_lock(env, m, lmm, extent, lockh);
+}
+
+static inline int mo_file_unlock(const struct lu_env *env, struct md_object *m,
+ struct lov_mds_md *lmm,
+ struct lustre_handle *lockh)
+{
+ LASSERT(m->mo_ops->moo_file_unlock);
+ return m->mo_ops->moo_file_unlock(env, m, lmm, lockh);
+}
+
+static inline int mo_object_lock(const struct lu_env *env,
+ struct md_object *m,
+ struct lustre_handle *lh,
+ struct ldlm_enqueue_info *einfo,
+ void *policy)
+{
+ LASSERT(m->mo_ops->moo_object_lock);
+ return m->mo_ops->moo_object_lock(env, m, lh, einfo, policy);
+}
+
+static inline int mdo_lookup(const struct lu_env *env,
+ struct md_object *p,
+ const struct lu_name *lname,
+ struct lu_fid *f,
+ struct md_op_spec *spec)
+{
+ LASSERT(p->mo_dir_ops->mdo_lookup);
+ return p->mo_dir_ops->mdo_lookup(env, p, lname, f, spec);
+}
+
+static inline mdl_mode_t mdo_lock_mode(const struct lu_env *env,
+ struct md_object *mo,
+ mdl_mode_t lm)
+{
+ if (mo->mo_dir_ops->mdo_lock_mode == NULL)
+ return MDL_MINMODE;
+ return mo->mo_dir_ops->mdo_lock_mode(env, mo, lm);
+}
+
+static inline int mdo_create(const struct lu_env *env,
+ struct md_object *p,
+ const struct lu_name *lchild_name,
+ struct md_object *c,
+ struct md_op_spec *spc,
+ struct md_attr *at)
+{
+ LASSERT(p->mo_dir_ops->mdo_create);
+ return p->mo_dir_ops->mdo_create(env, p, lchild_name, c, spc, at);
+}
+
+static inline int mdo_create_data(const struct lu_env *env,
+ struct md_object *p,
+ struct md_object *c,
+ const struct md_op_spec *spec,
+ struct md_attr *ma)
+{
+ LASSERT(c->mo_dir_ops->mdo_create_data);
+ return c->mo_dir_ops->mdo_create_data(env, p, c, spec, ma);
+}
+
+static inline int mdo_rename(const struct lu_env *env,
+ struct md_object *sp,
+ struct md_object *tp,
+ const struct lu_fid *lf,
+ const struct lu_name *lsname,
+ struct md_object *t,
+ const struct lu_name *ltname,
+ struct md_attr *ma)
+{
+ LASSERT(tp->mo_dir_ops->mdo_rename);
+ return tp->mo_dir_ops->mdo_rename(env, sp, tp, lf, lsname, t, ltname,
+ ma);
+}
+
+static inline int mdo_is_subdir(const struct lu_env *env,
+ struct md_object *mo,
+ const struct lu_fid *fid,
+ struct lu_fid *sfid)
+{
+ LASSERT(mo->mo_dir_ops->mdo_is_subdir);
+ return mo->mo_dir_ops->mdo_is_subdir(env, mo, fid, sfid);
+}
+
+static inline int mdo_link(const struct lu_env *env,
+ struct md_object *p,
+ struct md_object *s,
+ const struct lu_name *lname,
+ struct md_attr *ma)
+{
+ LASSERT(s->mo_dir_ops->mdo_link);
+ return s->mo_dir_ops->mdo_link(env, p, s, lname, ma);
+}
+
+static inline int mdo_unlink(const struct lu_env *env,
+ struct md_object *p,
+ struct md_object *c,
+ const struct lu_name *lname,
+ struct md_attr *ma, int no_name)
+{
+ LASSERT(p->mo_dir_ops->mdo_unlink);
+ return p->mo_dir_ops->mdo_unlink(env, p, c, lname, ma, no_name);
+}
+
+static inline int mdo_lum_lmm_cmp(const struct lu_env *env,
+ struct md_object *c,
+ const struct md_op_spec *spec,
+ struct md_attr *ma)
+{
+ LASSERT(c->mo_dir_ops->mdo_lum_lmm_cmp);
+ return c->mo_dir_ops->mdo_lum_lmm_cmp(env, c, spec, ma);
+}
+
+static inline int mdo_name_insert(const struct lu_env *env,
+ struct md_object *p,
+ const struct lu_name *lname,
+ const struct lu_fid *f,
+ const struct md_attr *ma)
+{
+ LASSERT(p->mo_dir_ops->mdo_name_insert);
+ return p->mo_dir_ops->mdo_name_insert(env, p, lname, f, ma);
+}
+
+static inline int mdo_name_remove(const struct lu_env *env,
+ struct md_object *p,
+ const struct lu_name *lname,
+ const struct md_attr *ma)
+{
+ LASSERT(p->mo_dir_ops->mdo_name_remove);
+ return p->mo_dir_ops->mdo_name_remove(env, p, lname, ma);
+}
+
+static inline int mdo_rename_tgt(const struct lu_env *env,
+ struct md_object *p,
+ struct md_object *t,
+ const struct lu_fid *lf,
+ const struct lu_name *lname,
+ struct md_attr *ma)
+{
+ if (t) {
+ LASSERT(t->mo_dir_ops->mdo_rename_tgt);
+ return t->mo_dir_ops->mdo_rename_tgt(env, p, t, lf, lname, ma);
+ } else {
+ LASSERT(p->mo_dir_ops->mdo_rename_tgt);
+ return p->mo_dir_ops->mdo_rename_tgt(env, p, t, lf, lname, ma);
+ }
+}
+
+/**
+ * Used in MDD/OUT layer for object lock rule
+ **/
+enum mdd_object_role {
+ MOR_SRC_PARENT,
+ MOR_SRC_CHILD,
+ MOR_TGT_PARENT,
+ MOR_TGT_CHILD,
+ MOR_TGT_ORPHAN
+};
+
+struct dt_device;
+/**
+ * Structure to hold object information. This is used to create object
+ * \pre llod_dir exist
+ */
+struct lu_local_obj_desc {
+ const char *llod_dir;
+ const char *llod_name;
+ __u32 llod_oid;
+ int llod_is_index;
+ const struct dt_index_features *llod_feat;
+ struct list_head llod_linkage;
+};
+
+int lustre_buf2som(void *buf, int rc, struct md_som_data *msd);
+int lustre_buf2hsm(void *buf, int rc, struct md_hsm *mh);
+void lustre_hsm2buf(void *buf, struct md_hsm *mh);
+
+struct lu_ucred {
+ __u32 uc_valid;
+ __u32 uc_o_uid;
+ __u32 uc_o_gid;
+ __u32 uc_o_fsuid;
+ __u32 uc_o_fsgid;
+ __u32 uc_uid;
+ __u32 uc_gid;
+ __u32 uc_fsuid;
+ __u32 uc_fsgid;
+ __u32 uc_suppgids[2];
+ cfs_cap_t uc_cap;
+ __u32 uc_umask;
+ struct group_info *uc_ginfo;
+ struct md_identity *uc_identity;
+};
+
+struct lu_ucred *lu_ucred(const struct lu_env *env);
+
+struct lu_ucred *lu_ucred_check(const struct lu_env *env);
+
+struct lu_ucred *lu_ucred_assert(const struct lu_env *env);
+
+int lu_ucred_global_init(void);
+
+void lu_ucred_global_fini(void);
+
+#define md_cap_t(x) (x)
+
+#define MD_CAP_TO_MASK(x) (1 << (x))
+
+#define md_cap_raised(c, flag) (md_cap_t(c) & MD_CAP_TO_MASK(flag))
+
+/* capable() is copied from linux kernel! */
+static inline int md_capable(struct lu_ucred *uc, cfs_cap_t cap)
+{
+ if (md_cap_raised(uc->uc_cap, cap))
+ return 1;
+ return 0;
+}
+
+/** @} md */
+#endif /* _LINUX_MD_OBJECT_H */
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
new file mode 100644
index 0000000..a612255
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -0,0 +1,1550 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __OBD_H
+#define __OBD_H
+
+#include <linux/obd.h>
+
+#define IOC_OSC_TYPE 'h'
+#define IOC_OSC_MIN_NR 20
+#define IOC_OSC_SET_ACTIVE _IOWR(IOC_OSC_TYPE, 21, struct obd_device *)
+#define IOC_OSC_MAX_NR 50
+
+#define IOC_MDC_TYPE 'i'
+#define IOC_MDC_MIN_NR 20
+#define IOC_MDC_MAX_NR 50
+
+#include <lustre/lustre_idl.h>
+#include <lustre_lib.h>
+#include <linux/libcfs/bitmap.h>
+#include <lu_ref.h>
+#include <lustre_export.h>
+#include <lustre_fid.h>
+#include <lustre_fld.h>
+#include <lustre_capa.h>
+
+#define MAX_OBD_DEVICES 8192
+
+struct osc_async_rc {
+ int ar_rc;
+ int ar_force_sync;
+ __u64 ar_min_xid;
+};
+
+struct lov_oinfo { /* per-stripe data structure */
+ struct ost_id loi_oi; /* object ID/Sequence on the target OST */
+ int loi_ost_idx; /* OST stripe index in lov_tgt_desc->tgts */
+ int loi_ost_gen; /* generation of this loi_ost_idx */
+
+ unsigned long loi_kms_valid:1;
+ __u64 loi_kms; /* known minimum size */
+ struct ost_lvb loi_lvb;
+ struct osc_async_rc loi_ar;
+};
+
+static inline void loi_kms_set(struct lov_oinfo *oinfo, __u64 kms)
+{
+ oinfo->loi_kms = kms;
+ oinfo->loi_kms_valid = 1;
+}
+
+static inline void loi_init(struct lov_oinfo *loi)
+{
+}
+
+struct lov_stripe_md {
+ atomic_t lsm_refc;
+ spinlock_t lsm_lock;
+ pid_t lsm_lock_owner; /* debugging */
+
+ /* maximum possible file size, might change as OSTs status changes,
+ * e.g. disconnected, deactivated */
+ __u64 lsm_maxbytes;
+ struct {
+ /* Public members. */
+ struct ost_id lw_object_oi; /* lov object id/seq */
+
+ /* LOV-private members start here -- only for use in lov/. */
+ __u32 lw_magic;
+ __u32 lw_stripe_size; /* size of the stripe */
+ __u32 lw_pattern; /* striping pattern (RAID0, RAID1) */
+ __u16 lw_stripe_count; /* number of objects being striped over */
+ __u16 lw_layout_gen; /* generation of the layout */
+ char lw_pool_name[LOV_MAXPOOLNAME]; /* pool name */
+ } lsm_wire;
+
+ struct lov_oinfo *lsm_oinfo[0];
+};
+
+#define lsm_oi lsm_wire.lw_object_oi
+#define lsm_magic lsm_wire.lw_magic
+#define lsm_layout_gen lsm_wire.lw_layout_gen
+#define lsm_stripe_size lsm_wire.lw_stripe_size
+#define lsm_pattern lsm_wire.lw_pattern
+#define lsm_stripe_count lsm_wire.lw_stripe_count
+#define lsm_pool_name lsm_wire.lw_pool_name
+
+static inline bool lsm_is_released(struct lov_stripe_md *lsm)
+{
+ return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
+}
+
+static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
+{
+ if (lsm == NULL)
+ return false;
+ if (lsm_is_released(lsm))
+ return false;
+ return true;
+}
+
+struct obd_info;
+
+typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
+
+/* obd info for a particular level (lov, osc). */
+struct obd_info {
+ /* Lock policy. It keeps an extent which is specific for a particular
+ * OSC. (e.g. lov_prep_enqueue_set initialises extent of the policy,
+ * and osc_enqueue passes it into ldlm_lock_match & ldlm_cli_enqueue. */
+ ldlm_policy_data_t oi_policy;
+ /* Flags used for set request specific flags:
+ - while lock handling, the flags obtained on the enqueue
+ request are set here.
+ - while stats, the flags used for control delay/resend.
+ - while setattr, the flags used for distinguish punch operation
+ */
+ __u64 oi_flags;
+ /* Lock handle specific for every OSC lock. */
+ struct lustre_handle *oi_lockh;
+ /* lsm data specific for every OSC. */
+ struct lov_stripe_md *oi_md;
+ /* obdo data specific for every OSC, if needed at all. */
+ struct obdo *oi_oa;
+ /* statfs data specific for every OSC, if needed at all. */
+ struct obd_statfs *oi_osfs;
+ /* An update callback which is called to update some data on upper
+ * level. E.g. it is used for update lsm->lsm_oinfo at every recieved
+ * request in osc level for enqueue requests. It is also possible to
+ * update some caller data from LOV layer if needed. */
+ obd_enqueue_update_f oi_cb_up;
+ /* oss capability, its type is obd_capa in client to avoid copy.
+ * in contrary its type is lustre_capa in OSS. */
+ void *oi_capa;
+ /* transfer jobid from ost_sync() to filter_sync()... */
+ char *oi_jobid;
+};
+
+/* compare all relevant fields. */
+static inline int lov_stripe_md_cmp(struct lov_stripe_md *m1,
+ struct lov_stripe_md *m2)
+{
+ /*
+ * ->lsm_wire contains padding, but it should be zeroed out during
+ * allocation.
+ */
+ return memcmp(&m1->lsm_wire, &m2->lsm_wire, sizeof m1->lsm_wire);
+}
+
+static inline int lov_lum_lsm_cmp(struct lov_user_md *lum,
+ struct lov_stripe_md *lsm)
+{
+ if (lsm->lsm_magic != lum->lmm_magic)
+ return 1;
+ if ((lsm->lsm_stripe_count != 0) && (lum->lmm_stripe_count != 0) &&
+ (lsm->lsm_stripe_count != lum->lmm_stripe_count))
+ return 2;
+ if ((lsm->lsm_stripe_size != 0) && (lum->lmm_stripe_size != 0) &&
+ (lsm->lsm_stripe_size != lum->lmm_stripe_size))
+ return 3;
+ if ((lsm->lsm_pattern != 0) && (lum->lmm_pattern != 0) &&
+ (lsm->lsm_pattern != lum->lmm_pattern))
+ return 4;
+ if ((lsm->lsm_magic == LOV_MAGIC_V3) &&
+ (strncmp(lsm->lsm_pool_name,
+ ((struct lov_user_md_v3 *)lum)->lmm_pool_name,
+ LOV_MAXPOOLNAME) != 0))
+ return 5;
+ return 0;
+}
+
+static inline int lov_lum_swab_if_needed(struct lov_user_md_v3 *lumv3,
+ int *lmm_magic,
+ struct lov_user_md *lum)
+{
+ if (lum && copy_from_user(lumv3, lum,sizeof(struct lov_user_md_v1)))
+ return -EFAULT;
+
+ *lmm_magic = lumv3->lmm_magic;
+
+ if (*lmm_magic == __swab32(LOV_USER_MAGIC_V1)) {
+ lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lumv3);
+ *lmm_magic = LOV_USER_MAGIC_V1;
+ } else if (*lmm_magic == LOV_USER_MAGIC_V3) {
+ if (lum && copy_from_user(lumv3, lum, sizeof(*lumv3)))
+ return -EFAULT;
+ } else if (*lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
+ if (lum && copy_from_user(lumv3, lum, sizeof(*lumv3)))
+ return -EFAULT;
+ lustre_swab_lov_user_md_v3(lumv3);
+ *lmm_magic = LOV_USER_MAGIC_V3;
+ } else if (*lmm_magic != LOV_USER_MAGIC_V1) {
+ CDEBUG(D_IOCTL,
+ "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
+ *lmm_magic, LOV_USER_MAGIC_V1, LOV_USER_MAGIC_V3);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void lov_stripe_lock(struct lov_stripe_md *md);
+void lov_stripe_unlock(struct lov_stripe_md *md);
+
+struct obd_type {
+ struct list_head typ_chain;
+ struct obd_ops *typ_dt_ops;
+ struct md_ops *typ_md_ops;
+ struct proc_dir_entry *typ_procroot;
+ char *typ_name;
+ int typ_refcnt;
+ struct lu_device_type *typ_lu;
+ spinlock_t obd_type_lock;
+};
+
+struct brw_page {
+ obd_off off;
+ struct page *pg;
+ int count;
+ obd_flag flag;
+};
+
+/* llog contexts */
+enum llog_ctxt_id {
+ LLOG_CONFIG_ORIG_CTXT = 0,
+ LLOG_CONFIG_REPL_CTXT,
+ LLOG_MDS_OST_ORIG_CTXT,
+ LLOG_MDS_OST_REPL_CTXT,
+ LLOG_SIZE_ORIG_CTXT,
+ LLOG_SIZE_REPL_CTXT,
+ LLOG_RD1_ORIG_CTXT,
+ LLOG_RD1_REPL_CTXT,
+ LLOG_TEST_ORIG_CTXT,
+ LLOG_TEST_REPL_CTXT,
+ LLOG_LOVEA_ORIG_CTXT,
+ LLOG_LOVEA_REPL_CTXT,
+ LLOG_CHANGELOG_ORIG_CTXT, /**< changelog generation on mdd */
+ LLOG_CHANGELOG_REPL_CTXT, /**< changelog access on clients */
+ LLOG_CHANGELOG_USER_ORIG_CTXT, /**< for multiple changelog consumers */
+ LLOG_AGENT_ORIG_CTXT, /**< agent requests generation on cdt */
+ LLOG_MAX_CTXTS
+};
+
+struct timeout_item {
+ enum timeout_event ti_event;
+ cfs_time_t ti_timeout;
+ timeout_cb_t ti_cb;
+ void *ti_cb_data;
+ struct list_head ti_obd_list;
+ struct list_head ti_chain;
+};
+
+#define OSC_MAX_RIF_DEFAULT 8
+#define MDS_OSC_MAX_RIF_DEFAULT 50
+#define OSC_MAX_RIF_MAX 256
+#define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4)
+#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
+#define OSC_DEFAULT_RESENDS 10
+
+/* possible values for fo_sync_lock_cancel */
+enum {
+ NEVER_SYNC_ON_CANCEL = 0,
+ BLOCKING_SYNC_ON_CANCEL = 1,
+ ALWAYS_SYNC_ON_CANCEL = 2,
+ NUM_SYNC_ON_CANCEL_STATES
+};
+
+#define MDC_MAX_RIF_DEFAULT 8
+#define MDC_MAX_RIF_MAX 512
+
+struct mdc_rpc_lock;
+struct obd_import;
+struct client_obd {
+ struct rw_semaphore cl_sem;
+ struct obd_uuid cl_target_uuid;
+ struct obd_import *cl_import; /* ptlrpc connection state */
+ int cl_conn_count;
+ /* max_mds_easize is purely a performance thing so we don't have to
+ * call obd_size_diskmd() all the time. */
+ int cl_default_mds_easize;
+ int cl_max_mds_easize;
+ int cl_max_mds_cookiesize;
+
+ enum lustre_sec_part cl_sp_me;
+ enum lustre_sec_part cl_sp_to;
+ struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
+
+ /* the grant values are protected by loi_list_lock below */
+ long cl_dirty; /* all _dirty_ in bytes */
+ long cl_dirty_max; /* allowed w/o rpc */
+ long cl_dirty_transit; /* dirty synchronous */
+ long cl_avail_grant; /* bytes of credit for ost */
+ long cl_lost_grant; /* lost credits (trunc) */
+
+ /* since we allocate grant by blocks, we don't know how many grant will
+ * be used to add a page into cache. As a solution, we reserve maximum
+ * grant before trying to dirty a page and unreserve the rest.
+ * See osc_{reserve|unreserve}_grant for details. */
+ long cl_reserved_grant;
+ struct list_head cl_cache_waiters; /* waiting for cache/grant */
+ cfs_time_t cl_next_shrink_grant; /* jiffies */
+ struct list_head cl_grant_shrink_list; /* Timeout event list */
+ int cl_grant_shrink_interval; /* seconds */
+
+ /* A chunk is an optimal size used by osc_extent to determine
+ * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */
+ int cl_chunkbits;
+ int cl_chunk;
+ int cl_extent_tax; /* extent overhead, by bytes */
+
+ /* keep track of objects that have lois that contain pages which
+ * have been queued for async brw. this lock also protects the
+ * lists of osc_client_pages that hang off of the loi */
+ /*
+ * ->cl_loi_list_lock protects consistency of
+ * ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and
+ * ->ap_completion() call-backs are executed under this lock. As we
+ * cannot guarantee that these call-backs never block on all platforms
+ * (as a matter of fact they do block on Mac OS X), type of
+ * ->cl_loi_list_lock is platform dependent: it's a spin-lock on Linux
+ * and blocking mutex on Mac OS X. (Alternative is to make this lock
+ * blocking everywhere, but we don't want to slow down fast-path of
+ * our main platform.)
+ *
+ * Exact type of ->cl_loi_list_lock is defined in arch/obd.h together
+ * with client_obd_list_{un,}lock() and
+ * client_obd_list_lock_{init,done}() functions.
+ *
+ * NB by Jinshan: though field names are still _loi_, but actually
+ * osc_object{}s are in the list.
+ */
+ client_obd_lock_t cl_loi_list_lock;
+ struct list_head cl_loi_ready_list;
+ struct list_head cl_loi_hp_ready_list;
+ struct list_head cl_loi_write_list;
+ struct list_head cl_loi_read_list;
+ int cl_r_in_flight;
+ int cl_w_in_flight;
+ /* just a sum of the loi/lop pending numbers to be exported by /proc */
+ atomic_t cl_pending_w_pages;
+ atomic_t cl_pending_r_pages;
+ __u32 cl_max_pages_per_rpc;
+ int cl_max_rpcs_in_flight;
+ struct obd_histogram cl_read_rpc_hist;
+ struct obd_histogram cl_write_rpc_hist;
+ struct obd_histogram cl_read_page_hist;
+ struct obd_histogram cl_write_page_hist;
+ struct obd_histogram cl_read_offset_hist;
+ struct obd_histogram cl_write_offset_hist;
+
+ /* lru for osc caching pages */
+ struct cl_client_cache *cl_cache;
+ struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
+ atomic_t *cl_lru_left;
+ atomic_t cl_lru_busy;
+ atomic_t cl_lru_shrinkers;
+ atomic_t cl_lru_in_list;
+ struct list_head cl_lru_list; /* lru page list */
+ client_obd_lock_t cl_lru_list_lock; /* page list protector */
+
+ /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
+ atomic_t cl_destroy_in_flight;
+ wait_queue_head_t cl_destroy_waitq;
+
+ struct mdc_rpc_lock *cl_rpc_lock;
+ struct mdc_rpc_lock *cl_close_lock;
+
+ /* mgc datastruct */
+ struct semaphore cl_mgc_sem;
+ struct vfsmount *cl_mgc_vfsmnt;
+ struct dentry *cl_mgc_configs_dir;
+ atomic_t cl_mgc_refcount;
+ struct obd_export *cl_mgc_mgsexp;
+
+ /* checksumming for data sent over the network */
+ unsigned int cl_checksum:1; /* 0 = disabled, 1 = enabled */
+ /* supported checksum types that are worked out at connect time */
+ __u32 cl_supp_cksum_types;
+ /* checksum algorithm to be used */
+ cksum_type_t cl_cksum_type;
+
+ /* also protected by the poorly named _loi_list_lock lock above */
+ struct osc_async_rc cl_ar;
+
+ /* used by quotacheck when the servers are older than 2.4 */
+ int cl_qchk_stat; /* quotacheck stat of the peer */
+#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
+#if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(2, 7, 50, 0)
+#warning "please consider removing quotacheck compatibility code"
+#endif
+
+ /* sequence manager */
+ struct lu_client_seq *cl_seq;
+
+ atomic_t cl_resends; /* resend count */
+
+ /* ptlrpc work for writeback in ptlrpcd context */
+ void *cl_writeback_work;
+ /* hash tables for osc_quota_info */
+ cfs_hash_t *cl_quota_hash[MAXQUOTAS];
+};
+#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
+
+struct obd_id_info {
+ __u32 idx;
+ obd_id *data;
+};
+
+struct echo_client_obd {
+ struct obd_export *ec_exp; /* the local connection to osc/lov */
+ spinlock_t ec_lock;
+ struct list_head ec_objects;
+ struct list_head ec_locks;
+ int ec_nstripes;
+ __u64 ec_unique;
+};
+
+struct lov_qos_oss {
+ struct obd_uuid lqo_uuid; /* ptlrpc's c_remote_uuid */
+ struct list_head lqo_oss_list; /* link to lov_qos */
+ __u64 lqo_bavail; /* total bytes avail on OSS */
+ __u64 lqo_penalty; /* current penalty */
+ __u64 lqo_penalty_per_obj;/* penalty decrease every obj*/
+ time_t lqo_used; /* last used time, seconds */
+ __u32 lqo_ost_count; /* number of osts on this oss */
+};
+
+struct ltd_qos {
+ struct lov_qos_oss *ltq_oss; /* oss info */
+ __u64 ltq_penalty; /* current penalty */
+ __u64 ltq_penalty_per_obj; /* penalty decrease every obj*/
+ __u64 ltq_weight; /* net weighting */
+ time_t ltq_used; /* last used time, seconds */
+ unsigned int ltq_usable:1; /* usable for striping */
+};
+
+/* Generic subset of OSTs */
+struct ost_pool {
+ __u32 *op_array; /* array of index of
+ lov_obd->lov_tgts */
+ unsigned int op_count; /* number of OSTs in the array */
+ unsigned int op_size; /* allocated size of lp_array */
+ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
+};
+
+/* Round-robin allocator data */
+struct lov_qos_rr {
+ __u32 lqr_start_idx; /* start index of new inode */
+ __u32 lqr_offset_idx; /* aliasing for start_idx */
+ int lqr_start_count; /* reseed counter */
+ struct ost_pool lqr_pool; /* round-robin optimized list */
+ unsigned long lqr_dirty:1; /* recalc round-robin list */
+};
+
+/* allow statfs data caching for 1 second */
+#define OBD_STATFS_CACHE_SECONDS 1
+
+struct lov_statfs_data {
+ struct obd_info lsd_oi;
+ struct obd_statfs lsd_statfs;
+};
+/* Stripe placement optimization */
+struct lov_qos {
+ struct list_head lq_oss_list; /* list of OSSs that targets use */
+ struct rw_semaphore lq_rw_sem;
+ __u32 lq_active_oss_count;
+ unsigned int lq_prio_free; /* priority for free space */
+ unsigned int lq_threshold_rr;/* priority for rr */
+ struct lov_qos_rr lq_rr; /* round robin qos data */
+ unsigned long lq_dirty:1, /* recalc qos data */
+ lq_same_space:1,/* the ost's all have approx.
+ the same space avail */
+ lq_reset:1, /* zero current penalties */
+ lq_statfs_in_progress:1; /* statfs op in
+ progress */
+ /* qos statfs data */
+ struct lov_statfs_data *lq_statfs_data;
+ wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
+ * requests completion */
+};
+
+struct lov_tgt_desc {
+ struct list_head ltd_kill;
+ struct obd_uuid ltd_uuid;
+ struct obd_device *ltd_obd;
+ struct obd_export *ltd_exp;
+ struct ltd_qos ltd_qos; /* qos info per target */
+ __u32 ltd_gen;
+ __u32 ltd_index; /* index in lov_obd->tgts */
+ unsigned long ltd_active:1,/* is this target up for requests */
+ ltd_activate:1,/* should target be activated */
+ ltd_reap:1; /* should this target be deleted */
+};
+
+/* Pool metadata */
+#define pool_tgt_size(_p) _p->pool_obds.op_size
+#define pool_tgt_count(_p) _p->pool_obds.op_count
+#define pool_tgt_array(_p) _p->pool_obds.op_array
+#define pool_tgt_rw_sem(_p) _p->pool_obds.op_rw_sem
+
+struct pool_desc {
+ char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
+ struct ost_pool pool_obds; /* pool members */
+ atomic_t pool_refcount; /* pool ref. counter */
+ struct lov_qos_rr pool_rr; /* round robin qos */
+ struct hlist_node pool_hash; /* access by poolname */
+ struct list_head pool_list; /* serial access */
+ struct proc_dir_entry *pool_proc_entry; /* file in /proc */
+ struct obd_device *pool_lobd; /* obd of the lov/lod to which
+ * this pool belongs */
+};
+
+struct lov_obd {
+ struct lov_desc desc;
+ struct lov_tgt_desc **lov_tgts; /* sparse array */
+ struct ost_pool lov_packed; /* all OSTs in a packed
+ array */
+ struct mutex lov_lock;
+ struct obd_connect_data lov_ocd;
+ atomic_t lov_refcount;
+ __u32 lov_tgt_count; /* how many OBD's */
+ __u32 lov_active_tgt_count; /* how many active */
+ __u32 lov_death_row;/* tgts scheduled to be deleted */
+ __u32 lov_tgt_size; /* size of tgts array */
+ int lov_connects;
+ int lov_pool_count;
+ cfs_hash_t *lov_pools_hash_body; /* used for key access */
+ struct list_head lov_pool_list; /* used for sequential access */
+ struct proc_dir_entry *lov_pool_proc_entry;
+ enum lustre_sec_part lov_sp_me;
+
+ /* Cached LRU pages from upper layer */
+ void *lov_cache;
+
+ struct rw_semaphore lov_notify_lock;
+};
+
+struct lmv_tgt_desc {
+ struct obd_uuid ltd_uuid;
+ struct obd_export *ltd_exp;
+ int ltd_idx;
+ struct mutex ltd_fid_mutex;
+ unsigned long ltd_active:1; /* target up for requests */
+};
+
+enum placement_policy {
+ PLACEMENT_CHAR_POLICY = 0,
+ PLACEMENT_NID_POLICY = 1,
+ PLACEMENT_INVAL_POLICY = 2,
+ PLACEMENT_MAX_POLICY
+};
+
+typedef enum placement_policy placement_policy_t;
+
+struct lmv_obd {
+ int refcount;
+ struct lu_client_fld lmv_fld;
+ spinlock_t lmv_lock;
+ placement_policy_t lmv_placement;
+ struct lmv_desc desc;
+ struct obd_uuid cluuid;
+ struct obd_export *exp;
+
+ struct mutex init_mutex;
+ int connected;
+ int max_easize;
+ int max_def_easize;
+ int max_cookiesize;
+ int server_timeout;
+
+ int tgts_size; /* size of tgts array */
+ struct lmv_tgt_desc **tgts;
+
+ struct obd_connect_data conn_data;
+};
+
+struct niobuf_local {
+ __u64 lnb_file_offset;
+ __u32 lnb_page_offset;
+ __u32 len;
+ __u32 flags;
+ struct page *page;
+ struct dentry *dentry;
+ int lnb_grant_used;
+ int rc;
+};
+
+#define LUSTRE_FLD_NAME "fld"
+#define LUSTRE_SEQ_NAME "seq"
+
+#define LUSTRE_MDD_NAME "mdd"
+#define LUSTRE_OSD_LDISKFS_NAME "osd-ldiskfs"
+#define LUSTRE_OSD_ZFS_NAME "osd-zfs"
+#define LUSTRE_VVP_NAME "vvp"
+#define LUSTRE_LMV_NAME "lmv"
+#define LUSTRE_SLP_NAME "slp"
+#define LUSTRE_LOD_NAME "lod"
+#define LUSTRE_OSP_NAME "osp"
+#define LUSTRE_LWP_NAME "lwp"
+
+/* obd device type names */
+ /* FIXME all the references to LUSTRE_MDS_NAME should be swapped with LUSTRE_MDT_NAME */
+#define LUSTRE_MDS_NAME "mds"
+#define LUSTRE_MDT_NAME "mdt"
+#define LUSTRE_MDC_NAME "mdc"
+#define LUSTRE_OSS_NAME "ost" /* FIXME change name to oss */
+#define LUSTRE_OST_NAME "obdfilter" /* FIXME change name to ost */
+#define LUSTRE_OSC_NAME "osc"
+#define LUSTRE_LOV_NAME "lov"
+#define LUSTRE_MGS_NAME "mgs"
+#define LUSTRE_MGC_NAME "mgc"
+
+#define LUSTRE_ECHO_NAME "obdecho"
+#define LUSTRE_ECHO_CLIENT_NAME "echo_client"
+#define LUSTRE_QMT_NAME "qmt"
+
+/* Constant obd names (post-rename) */
+#define LUSTRE_MDS_OBDNAME "MDS"
+#define LUSTRE_OSS_OBDNAME "OSS"
+#define LUSTRE_MGS_OBDNAME "MGS"
+#define LUSTRE_MGC_OBDNAME "MGC"
+
+static inline int is_osp_on_mdt(char *name)
+{
+ char *ptr;
+
+ ptr = strrchr(name, '-');
+ if (ptr == NULL) {
+ CERROR("%s is not a obdname\n", name);
+ return 0;
+ }
+
+ /* 1.8 OSC/OSP name on MDT is fsname-OSTxxxx-osc */
+ if (strncmp(ptr + 1, "osc", 3) == 0)
+ return 1;
+
+ if (strncmp(ptr + 1, "MDT", 3) != 0)
+ return 0;
+
+ while (*(--ptr) != '-' && ptr != name);
+
+ if (ptr == name)
+ return 0;
+
+ if (strncmp(ptr + 1, LUSTRE_OSP_NAME, strlen(LUSTRE_OSP_NAME)) != 0 &&
+ strncmp(ptr + 1, LUSTRE_OSC_NAME, strlen(LUSTRE_OSC_NAME)) != 0)
+ return 0;
+
+ return 1;
+}
+
+/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */
+#define N_LOCAL_TEMP_PAGE 0x10000000
+
+struct obd_trans_info {
+ __u64 oti_transno;
+ __u64 oti_xid;
+ /* Only used on the server side for tracking acks. */
+ struct oti_req_ack_lock {
+ struct lustre_handle lock;
+ __u32 mode;
+ } oti_ack_locks[4];
+ void *oti_handle;
+ struct llog_cookie oti_onecookie;
+ struct llog_cookie *oti_logcookies;
+ int oti_numcookies;
+ /** synchronous write is needed */
+ unsigned long oti_sync_write:1;
+
+ /* initial thread handling transaction */
+ struct ptlrpc_thread * oti_thread;
+ __u32 oti_conn_cnt;
+ /** VBR: versions */
+ __u64 oti_pre_version;
+ /** JobID */
+ char *oti_jobid;
+
+ struct obd_uuid *oti_ost_uuid;
+};
+
+static inline void oti_init(struct obd_trans_info *oti,
+ struct ptlrpc_request *req)
+{
+ if (oti == NULL)
+ return;
+ memset(oti, 0, sizeof(*oti));
+
+ if (req == NULL)
+ return;
+
+ oti->oti_xid = req->rq_xid;
+ /** VBR: take versions from request */
+ if (req->rq_reqmsg != NULL &&
+ lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
+ __u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg);
+ oti->oti_pre_version = pre_version ? pre_version[0] : 0;
+ oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg);
+ }
+
+ /** called from mds_create_objects */
+ if (req->rq_repmsg != NULL)
+ oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
+ oti->oti_thread = req->rq_svc_thread;
+ if (req->rq_reqmsg != NULL)
+ oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
+}
+
+static inline void oti_alloc_cookies(struct obd_trans_info *oti,int num_cookies)
+{
+ if (!oti)
+ return;
+
+ if (num_cookies == 1)
+ oti->oti_logcookies = &oti->oti_onecookie;
+ else
+ OBD_ALLOC_LARGE(oti->oti_logcookies,
+ num_cookies * sizeof(oti->oti_onecookie));
+
+ oti->oti_numcookies = num_cookies;
+}
+
+static inline void oti_free_cookies(struct obd_trans_info *oti)
+{
+ if (!oti || !oti->oti_logcookies)
+ return;
+
+ if (oti->oti_logcookies == &oti->oti_onecookie)
+ LASSERT(oti->oti_numcookies == 1);
+ else
+ OBD_FREE_LARGE(oti->oti_logcookies,
+ oti->oti_numcookies*sizeof(oti->oti_onecookie));
+ oti->oti_logcookies = NULL;
+ oti->oti_numcookies = 0;
+}
+
+/*
+ * Events signalled through obd_notify() upcall-chain.
+ */
+enum obd_notify_event {
+ /* target added */
+ OBD_NOTIFY_CREATE,
+ /* Device connect start */
+ OBD_NOTIFY_CONNECT,
+ /* Device activated */
+ OBD_NOTIFY_ACTIVE,
+ /* Device deactivated */
+ OBD_NOTIFY_INACTIVE,
+ /* Device disconnected */
+ OBD_NOTIFY_DISCON,
+ /* Connect data for import were changed */
+ OBD_NOTIFY_OCD,
+ /* Sync request */
+ OBD_NOTIFY_SYNC_NONBLOCK,
+ OBD_NOTIFY_SYNC,
+ /* Configuration event */
+ OBD_NOTIFY_CONFIG,
+ /* Administratively deactivate/activate event */
+ OBD_NOTIFY_DEACTIVATE,
+ OBD_NOTIFY_ACTIVATE
+};
+
+/*
+ * Data structure used to pass obd_notify()-event to non-obd listeners (llite
+ * and liblustre being main examples).
+ */
+struct obd_notify_upcall {
+ int (*onu_upcall)(struct obd_device *host, struct obd_device *watched,
+ enum obd_notify_event ev, void *owner, void *data);
+ /* Opaque datum supplied by upper layer listener */
+ void *onu_owner;
+};
+
+struct target_recovery_data {
+ svc_handler_t trd_recovery_handler;
+ pid_t trd_processing_task;
+ struct completion trd_starting;
+ struct completion trd_finishing;
+};
+
+struct obd_llog_group {
+ int olg_seq;
+ struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
+ wait_queue_head_t olg_waitq;
+ spinlock_t olg_lock;
+ struct mutex olg_cat_processing;
+};
+
+/* corresponds to one of the obd's */
+#define OBD_DEVICE_MAGIC 0XAB5CD6EF
+#define OBD_DEV_BY_DEVNAME 0xffffd0de
+
+struct obd_device {
+ struct obd_type *obd_type;
+ __u32 obd_magic;
+
+ /* common and UUID name of this device */
+ char obd_name[MAX_OBD_NAME];
+ struct obd_uuid obd_uuid;
+
+ struct lu_device *obd_lu_dev;
+
+ int obd_minor;
+ /* bitfield modification is protected by obd_dev_lock */
+ unsigned long obd_attached:1, /* finished attach */
+ obd_set_up:1, /* finished setup */
+ obd_recovering:1, /* there are recoverable clients */
+ obd_abort_recovery:1,/* recovery expired */
+ obd_version_recov:1, /* obd uses version checking */
+ obd_replayable:1, /* recovery is enabled; inform clients */
+ obd_no_transno:1, /* no committed-transno notification */
+ obd_no_recov:1, /* fail instead of retry messages */
+ obd_stopping:1, /* started cleanup */
+ obd_starting:1, /* started setup */
+ obd_force:1, /* cleanup with > 0 obd refcount */
+ obd_fail:1, /* cleanup with failover */
+ obd_async_recov:1, /* allow asynchronous orphan cleanup */
+ obd_no_conn:1, /* deny new connections */
+ obd_inactive:1, /* device active/inactive
+ * (for /proc/status only!!) */
+ obd_no_ir:1, /* no imperative recovery. */
+ obd_process_conf:1; /* device is processing mgs config */
+ /* use separate field as it is set in interrupt to don't mess with
+ * protection of other bits using _bh lock */
+ unsigned long obd_recovery_expired:1;
+ /* uuid-export hash body */
+ cfs_hash_t *obd_uuid_hash;
+ /* nid-export hash body */
+ cfs_hash_t *obd_nid_hash;
+ /* nid stats body */
+ cfs_hash_t *obd_nid_stats_hash;
+ struct list_head obd_nid_stats;
+ atomic_t obd_refcount;
+ wait_queue_head_t obd_refcount_waitq;
+ struct list_head obd_exports;
+ struct list_head obd_unlinked_exports;
+ struct list_head obd_delayed_exports;
+ int obd_num_exports;
+ spinlock_t obd_nid_lock;
+ struct ldlm_namespace *obd_namespace;
+ struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
+ /* a spinlock is OK for what we do now, may need a semaphore later */
+ spinlock_t obd_dev_lock; /* protect OBD bitfield above */
+ struct mutex obd_dev_mutex;
+ __u64 obd_last_committed;
+ struct fsfilt_operations *obd_fsops;
+ spinlock_t obd_osfs_lock;
+ struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
+ __u64 obd_osfs_age;
+ struct lvfs_run_ctxt obd_lvfs_ctxt;
+ struct obd_llog_group obd_olg; /* default llog group */
+ struct obd_device *obd_observer;
+ struct rw_semaphore obd_observer_link_sem;
+ struct obd_notify_upcall obd_upcall;
+ struct obd_export *obd_self_export;
+ /* list of exports in LRU order, for ping evictor, with obd_dev_lock */
+ struct list_head obd_exports_timed;
+ time_t obd_eviction_timer; /* for ping evictor */
+
+ int obd_max_recoverable_clients;
+ atomic_t obd_connected_clients;
+ int obd_stale_clients;
+ int obd_delayed_clients;
+ /* this lock protects all recovery list_heads, timer and
+ * obd_next_recovery_transno value */
+ spinlock_t obd_recovery_task_lock;
+ __u64 obd_next_recovery_transno;
+ int obd_replayed_requests;
+ int obd_requests_queued_for_recovery;
+ wait_queue_head_t obd_next_transno_waitq;
+ /* protected by obd_recovery_task_lock */
+ struct timer_list obd_recovery_timer;
+ time_t obd_recovery_start; /* seconds */
+ time_t obd_recovery_end; /* seconds, for lprocfs_status */
+ int obd_recovery_time_hard;
+ int obd_recovery_timeout;
+ int obd_recovery_ir_factor;
+
+ /* new recovery stuff from CMD2 */
+ struct target_recovery_data obd_recovery_data;
+ int obd_replayed_locks;
+ atomic_t obd_req_replay_clients;
+ atomic_t obd_lock_replay_clients;
+ /* all lists are protected by obd_recovery_task_lock */
+ struct list_head obd_req_replay_queue;
+ struct list_head obd_lock_replay_queue;
+ struct list_head obd_final_req_queue;
+ int obd_recovery_stage;
+
+ union {
+ struct client_obd cli;
+ struct echo_client_obd echo_client;
+ struct lov_obd lov;
+ struct lmv_obd lmv;
+ } u;
+ /* Fields used by LProcFS */
+ unsigned int obd_cntr_base;
+ struct lprocfs_stats *obd_stats;
+
+ unsigned int md_cntr_base;
+ struct lprocfs_stats *md_stats;
+
+ struct proc_dir_entry *obd_proc_entry;
+ void *obd_proc_private; /* type private PDEs */
+ struct proc_dir_entry *obd_proc_exports_entry;
+ struct proc_dir_entry *obd_svc_procroot;
+ struct lprocfs_stats *obd_svc_stats;
+ atomic_t obd_evict_inprogress;
+ wait_queue_head_t obd_evict_inprogress_waitq;
+ struct list_head obd_evict_list; /* protected with pet_lock */
+
+ /**
+ * Ldlm pool part. Save last calculated SLV and Limit.
+ */
+ rwlock_t obd_pool_lock;
+ int obd_pool_limit;
+ __u64 obd_pool_slv;
+
+ /**
+ * A list of outstanding class_incref()'s against this obd. For
+ * debugging.
+ */
+ struct lu_ref obd_reference;
+
+ int obd_conn_inprogress;
+};
+
+#define OBD_LLOG_FL_SENDNOW 0x0001
+#define OBD_LLOG_FL_EXIT 0x0002
+
+enum obd_cleanup_stage {
+/* Special case hack for MDS LOVs */
+ OBD_CLEANUP_EARLY,
+/* can be directly mapped to .ldto_device_fini() */
+ OBD_CLEANUP_EXPORTS,
+};
+
+/* get/set_info keys */
+#define KEY_ASYNC "async"
+#define KEY_BLOCKSIZE_BITS "blocksize_bits"
+#define KEY_BLOCKSIZE "blocksize"
+#define KEY_CAPA_KEY "capa_key"
+#define KEY_CHANGELOG_CLEAR "changelog_clear"
+#define KEY_FID2PATH "fid2path"
+#define KEY_CHECKSUM "checksum"
+#define KEY_CLEAR_FS "clear_fs"
+#define KEY_CONN_DATA "conn_data"
+#define KEY_EVICT_BY_NID "evict_by_nid"
+#define KEY_FIEMAP "fiemap"
+#define KEY_FLUSH_CTX "flush_ctx"
+#define KEY_GRANT_SHRINK "grant_shrink"
+#define KEY_HSM_COPYTOOL_SEND "hsm_send"
+#define KEY_INIT_RECOV_BACKUP "init_recov_bk"
+#define KEY_INIT_RECOV "initial_recov"
+#define KEY_INTERMDS "inter_mds"
+#define KEY_LAST_ID "last_id"
+#define KEY_LAST_FID "last_fid"
+#define KEY_LOCK_TO_STRIPE "lock_to_stripe"
+#define KEY_LOVDESC "lovdesc"
+#define KEY_LOV_IDX "lov_idx"
+#define KEY_MAX_EASIZE "max_easize"
+#define KEY_MDS_CONN "mds_conn"
+#define KEY_MGSSEC "mgssec"
+#define KEY_NEXT_ID "next_id"
+#define KEY_READ_ONLY "read-only"
+#define KEY_REGISTER_TARGET "register_target"
+#define KEY_SET_FS "set_fs"
+#define KEY_TGT_COUNT "tgt_count"
+/* KEY_SET_INFO in lustre_idl.h */
+#define KEY_SPTLRPC_CONF "sptlrpc_conf"
+#define KEY_CONNECT_FLAG "connect_flags"
+#define KEY_SYNC_LOCK_CANCEL "sync_lock_cancel"
+
+#define KEY_CACHE_SET "cache_set"
+#define KEY_CACHE_LRU_SHRINK "cache_lru_shrink"
+#define KEY_CHANGELOG_INDEX "changelog_index"
+
+struct lu_context;
+
+/* /!\ must be coherent with include/linux/namei.h on patched kernel */
+#define IT_OPEN (1 << 0)
+#define IT_CREAT (1 << 1)
+#define IT_READDIR (1 << 2)
+#define IT_GETATTR (1 << 3)
+#define IT_LOOKUP (1 << 4)
+#define IT_UNLINK (1 << 5)
+#define IT_TRUNC (1 << 6)
+#define IT_GETXATTR (1 << 7)
+#define IT_EXEC (1 << 8)
+#define IT_PIN (1 << 9)
+#define IT_LAYOUT (1 << 10)
+#define IT_QUOTA_DQACQ (1 << 11)
+#define IT_QUOTA_CONN (1 << 12)
+
+static inline int it_to_lock_mode(struct lookup_intent *it)
+{
+ /* CREAT needs to be tested before open (both could be set) */
+ if (it->it_op & IT_CREAT)
+ return LCK_CW;
+ else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP |
+ IT_LAYOUT))
+ return LCK_CR;
+
+ LASSERTF(0, "Invalid it_op: %d\n", it->it_op);
+ return -EINVAL;
+}
+
+struct md_op_data {
+ struct lu_fid op_fid1; /* operation fid1 (usualy parent) */
+ struct lu_fid op_fid2; /* operation fid2 (usualy child) */
+ struct lu_fid op_fid3; /* 2 extra fids to find conflicting */
+ struct lu_fid op_fid4; /* to the operation locks. */
+ mdsno_t op_mds; /* what mds server open will go to */
+ struct lustre_handle op_handle;
+ obd_time op_mod_time;
+ const char *op_name;
+ int op_namelen;
+ __u32 op_mode;
+ struct lmv_stripe_md *op_mea1;
+ struct lmv_stripe_md *op_mea2;
+ __u32 op_suppgids[2];
+ __u32 op_fsuid;
+ __u32 op_fsgid;
+ cfs_cap_t op_cap;
+ void *op_data;
+
+ /* iattr fields and blocks. */
+ struct iattr op_attr;
+ unsigned int op_attr_flags;
+ __u64 op_valid;
+ loff_t op_attr_blocks;
+
+ /* Size-on-MDS epoch and flags. */
+ __u64 op_ioepoch;
+ __u32 op_flags;
+
+ /* Capa fields */
+ struct obd_capa *op_capa1;
+ struct obd_capa *op_capa2;
+
+ /* Various operation flags. */
+ __u32 op_bias;
+
+ /* Operation type */
+ __u32 op_opc;
+
+ /* Used by readdir */
+ __u64 op_offset;
+
+ /* Used by readdir */
+ __u32 op_npages;
+
+ /* used to transfer info between the stacks of MD client
+ * see enum op_cli_flags */
+ __u32 op_cli_flags;
+};
+
+enum op_cli_flags {
+ CLI_SET_MEA = 1 << 0,
+ CLI_RM_ENTRY = 1 << 1,
+};
+
+struct md_enqueue_info;
+/* metadata stat-ahead */
+typedef int (* md_enqueue_cb_t)(struct ptlrpc_request *req,
+ struct md_enqueue_info *minfo,
+ int rc);
+
+struct md_enqueue_info {
+ struct md_op_data mi_data;
+ struct lookup_intent mi_it;
+ struct lustre_handle mi_lockh;
+ struct inode *mi_dir;
+ md_enqueue_cb_t mi_cb;
+ __u64 mi_cbdata;
+ unsigned int mi_generation;
+};
+
+struct obd_ops {
+ struct module *o_owner;
+ int (*o_iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
+ void *karg, void *uarg);
+ int (*o_get_info)(const struct lu_env *env, struct obd_export *,
+ __u32 keylen, void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm);
+ int (*o_set_info_async)(const struct lu_env *, struct obd_export *,
+ __u32 keylen, void *key,
+ __u32 vallen, void *val,
+ struct ptlrpc_request_set *set);
+ int (*o_attach)(struct obd_device *dev, obd_count len, void *data);
+ int (*o_detach)(struct obd_device *dev);
+ int (*o_setup) (struct obd_device *dev, struct lustre_cfg *cfg);
+ int (*o_precleanup)(struct obd_device *dev,
+ enum obd_cleanup_stage cleanup_stage);
+ int (*o_cleanup)(struct obd_device *dev);
+ int (*o_process_config)(struct obd_device *dev, obd_count len,
+ void *data);
+ int (*o_postrecov)(struct obd_device *dev);
+ int (*o_add_conn)(struct obd_import *imp, struct obd_uuid *uuid,
+ int priority);
+ int (*o_del_conn)(struct obd_import *imp, struct obd_uuid *uuid);
+ /* connect to the target device with given connection
+ * data. @ocd->ocd_connect_flags is modified to reflect flags actually
+ * granted by the target, which are guaranteed to be a subset of flags
+ * asked for. If @ocd == NULL, use default parameters. */
+ int (*o_connect)(const struct lu_env *env,
+ struct obd_export **exp, struct obd_device *src,
+ struct obd_uuid *cluuid, struct obd_connect_data *ocd,
+ void *localdata);
+ int (*o_reconnect)(const struct lu_env *env,
+ struct obd_export *exp, struct obd_device *src,
+ struct obd_uuid *cluuid,
+ struct obd_connect_data *ocd,
+ void *localdata);
+ int (*o_disconnect)(struct obd_export *exp);
+
+ /* Initialize/finalize fids infrastructure. */
+ int (*o_fid_init)(struct obd_device *obd,
+ struct obd_export *exp, enum lu_cli_type type);
+ int (*o_fid_fini)(struct obd_device *obd);
+
+ /* Allocate new fid according to passed @hint. */
+ int (*o_fid_alloc)(struct obd_export *exp, struct lu_fid *fid,
+ struct md_op_data *op_data);
+
+ /*
+ * Object with @fid is getting deleted, we may want to do something
+ * about this.
+ */
+ int (*o_statfs)(const struct lu_env *, struct obd_export *exp,
+ struct obd_statfs *osfs, __u64 max_age, __u32 flags);
+ int (*o_statfs_async)(struct obd_export *exp, struct obd_info *oinfo,
+ __u64 max_age, struct ptlrpc_request_set *set);
+ int (*o_packmd)(struct obd_export *exp, struct lov_mds_md **disk_tgt,
+ struct lov_stripe_md *mem_src);
+ int (*o_unpackmd)(struct obd_export *exp,struct lov_stripe_md **mem_tgt,
+ struct lov_mds_md *disk_src, int disk_len);
+ int (*o_preallocate)(struct lustre_handle *, obd_count *req,
+ obd_id *ids);
+ /* FIXME: add fid capability support for create & destroy! */
+ int (*o_precreate)(struct obd_export *exp);
+ int (*o_create)(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md **ea,
+ struct obd_trans_info *oti);
+ int (*o_create_async)(struct obd_export *exp, struct obd_info *oinfo,
+ struct lov_stripe_md **ea,
+ struct obd_trans_info *oti);
+ int (*o_destroy)(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md *ea,
+ struct obd_trans_info *oti, struct obd_export *md_exp,
+ void *capa);
+ int (*o_setattr)(const struct lu_env *, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti);
+ int (*o_setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct ptlrpc_request_set *rqset);
+ int (*o_getattr)(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo);
+ int (*o_getattr_async)(struct obd_export *exp, struct obd_info *oinfo,
+ struct ptlrpc_request_set *set);
+ int (*o_brw)(int rw, struct obd_export *exp, struct obd_info *oinfo,
+ obd_count oa_bufs, struct brw_page *pgarr,
+ struct obd_trans_info *oti);
+ int (*o_merge_lvb)(struct obd_export *exp, struct lov_stripe_md *lsm,
+ struct ost_lvb *lvb, int kms_only);
+ int (*o_adjust_kms)(struct obd_export *exp, struct lov_stripe_md *lsm,
+ obd_off size, int shrink);
+ int (*o_punch)(const struct lu_env *, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti,
+ struct ptlrpc_request_set *rqset);
+ int (*o_sync)(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, obd_size start, obd_size end,
+ struct ptlrpc_request_set *set);
+ int (*o_migrate)(struct lustre_handle *conn, struct lov_stripe_md *dst,
+ struct lov_stripe_md *src, obd_size start,
+ obd_size end, struct obd_trans_info *oti);
+ int (*o_copy)(struct lustre_handle *dstconn, struct lov_stripe_md *dst,
+ struct lustre_handle *srconn, struct lov_stripe_md *src,
+ obd_size start, obd_size end, struct obd_trans_info *);
+ int (*o_iterate)(struct lustre_handle *conn,
+ int (*)(obd_id, obd_seq, void *),
+ obd_id *startid, obd_seq seq, void *data);
+ int (*o_preprw)(const struct lu_env *env, int cmd,
+ struct obd_export *exp, struct obdo *oa, int objcount,
+ struct obd_ioobj *obj, struct niobuf_remote *remote,
+ int *nr_pages, struct niobuf_local *local,
+ struct obd_trans_info *oti, struct lustre_capa *capa);
+ int (*o_commitrw)(const struct lu_env *env, int cmd,
+ struct obd_export *exp, struct obdo *oa,
+ int objcount, struct obd_ioobj *obj,
+ struct niobuf_remote *remote, int pages,
+ struct niobuf_local *local,
+ struct obd_trans_info *oti, int rc);
+ int (*o_enqueue)(struct obd_export *, struct obd_info *oinfo,
+ struct ldlm_enqueue_info *einfo,
+ struct ptlrpc_request_set *rqset);
+ int (*o_change_cbdata)(struct obd_export *, struct lov_stripe_md *,
+ ldlm_iterator_t it, void *data);
+ int (*o_find_cbdata)(struct obd_export *, struct lov_stripe_md *,
+ ldlm_iterator_t it, void *data);
+ int (*o_cancel)(struct obd_export *, struct lov_stripe_md *md,
+ __u32 mode, struct lustre_handle *);
+ int (*o_cancel_unused)(struct obd_export *, struct lov_stripe_md *,
+ ldlm_cancel_flags_t flags, void *opaque);
+ int (*o_init_export)(struct obd_export *exp);
+ int (*o_destroy_export)(struct obd_export *exp);
+ int (*o_extent_calc)(struct obd_export *, struct lov_stripe_md *,
+ int cmd, obd_off *);
+
+ /* llog related obd_methods */
+ int (*o_llog_init)(struct obd_device *obd, struct obd_llog_group *grp,
+ struct obd_device *disk_obd, int *idx);
+ int (*o_llog_finish)(struct obd_device *obd, int count);
+ int (*o_llog_connect)(struct obd_export *, struct llogd_conn_body *);
+
+ /* metadata-only methods */
+ int (*o_pin)(struct obd_export *, const struct lu_fid *fid,
+ struct obd_capa *, struct obd_client_handle *, int flag);
+ int (*o_unpin)(struct obd_export *, struct obd_client_handle *, int);
+
+ int (*o_import_event)(struct obd_device *, struct obd_import *,
+ enum obd_import_event);
+
+ int (*o_notify)(struct obd_device *obd, struct obd_device *watched,
+ enum obd_notify_event ev, void *data);
+
+ int (*o_health_check)(const struct lu_env *env, struct obd_device *);
+ struct obd_uuid *(*o_get_uuid) (struct obd_export *exp);
+
+ /* quota methods */
+ int (*o_quotacheck)(struct obd_device *, struct obd_export *,
+ struct obd_quotactl *);
+ int (*o_quotactl)(struct obd_device *, struct obd_export *,
+ struct obd_quotactl *);
+
+ int (*o_ping)(const struct lu_env *, struct obd_export *exp);
+
+ /* pools methods */
+ int (*o_pool_new)(struct obd_device *obd, char *poolname);
+ int (*o_pool_del)(struct obd_device *obd, char *poolname);
+ int (*o_pool_add)(struct obd_device *obd, char *poolname,
+ char *ostname);
+ int (*o_pool_rem)(struct obd_device *obd, char *poolname,
+ char *ostname);
+ void (*o_getref)(struct obd_device *obd);
+ void (*o_putref)(struct obd_device *obd);
+ /*
+ * NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line
+ * to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c.
+ * Also, add a wrapper function in include/linux/obd_class.h. */
+};
+
+enum {
+ LUSTRE_OPC_MKDIR = (1 << 0),
+ LUSTRE_OPC_SYMLINK = (1 << 1),
+ LUSTRE_OPC_MKNOD = (1 << 2),
+ LUSTRE_OPC_CREATE = (1 << 3),
+ LUSTRE_OPC_ANY = (1 << 4)
+};
+
+/* lmv structures */
+#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
+#define MEA_MAGIC_ALL_CHARS 0xb222a11c
+#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
+
+#define MAX_HASH_SIZE_32 0x7fffffffUL
+#define MAX_HASH_SIZE 0x7fffffffffffffffULL
+#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
+
+struct lustre_md {
+ struct mdt_body *body;
+ struct lov_stripe_md *lsm;
+ struct lmv_stripe_md *mea;
+#ifdef CONFIG_FS_POSIX_ACL
+ struct posix_acl *posix_acl;
+#endif
+ struct mdt_remote_perm *remote_perm;
+ struct obd_capa *mds_capa;
+ struct obd_capa *oss_capa;
+};
+
+struct md_open_data {
+ struct obd_client_handle *mod_och;
+ struct ptlrpc_request *mod_open_req;
+ struct ptlrpc_request *mod_close_req;
+ atomic_t mod_refcount;
+};
+
+struct lookup_intent;
+
+struct md_ops {
+ int (*m_getstatus)(struct obd_export *, struct lu_fid *,
+ struct obd_capa **);
+ int (*m_null_inode)(struct obd_export *, const struct lu_fid *);
+ int (*m_find_cbdata)(struct obd_export *, const struct lu_fid *,
+ ldlm_iterator_t, void *);
+ int (*m_close)(struct obd_export *, struct md_op_data *,
+ struct md_open_data *, struct ptlrpc_request **);
+ int (*m_create)(struct obd_export *, struct md_op_data *,
+ const void *, int, int, __u32, __u32, cfs_cap_t,
+ __u64, struct ptlrpc_request **);
+ int (*m_done_writing)(struct obd_export *, struct md_op_data *,
+ struct md_open_data *);
+ int (*m_enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
+ struct lookup_intent *, struct md_op_data *,
+ struct lustre_handle *, void *, int,
+ struct ptlrpc_request **, __u64);
+ int (*m_getattr)(struct obd_export *, struct md_op_data *,
+ struct ptlrpc_request **);
+ int (*m_getattr_name)(struct obd_export *, struct md_op_data *,
+ struct ptlrpc_request **);
+ int (*m_intent_lock)(struct obd_export *, struct md_op_data *,
+ void *, int, struct lookup_intent *, int,
+ struct ptlrpc_request **,
+ ldlm_blocking_callback, __u64);
+ int (*m_link)(struct obd_export *, struct md_op_data *,
+ struct ptlrpc_request **);
+ int (*m_rename)(struct obd_export *, struct md_op_data *,
+ const char *, int, const char *, int,
+ struct ptlrpc_request **);
+ int (*m_is_subdir)(struct obd_export *, const struct lu_fid *,
+ const struct lu_fid *,
+ struct ptlrpc_request **);
+ int (*m_setattr)(struct obd_export *, struct md_op_data *, void *,
+ int , void *, int, struct ptlrpc_request **,
+ struct md_open_data **mod);
+ int (*m_sync)(struct obd_export *, const struct lu_fid *,
+ struct obd_capa *, struct ptlrpc_request **);
+ int (*m_readpage)(struct obd_export *, struct md_op_data *,
+ struct page **, struct ptlrpc_request **);
+
+ int (*m_unlink)(struct obd_export *, struct md_op_data *,
+ struct ptlrpc_request **);
+
+ int (*m_setxattr)(struct obd_export *, const struct lu_fid *,
+ struct obd_capa *, obd_valid, const char *,
+ const char *, int, int, int, __u32,
+ struct ptlrpc_request **);
+
+ int (*m_getxattr)(struct obd_export *, const struct lu_fid *,
+ struct obd_capa *, obd_valid, const char *,
+ const char *, int, int, int,
+ struct ptlrpc_request **);
+
+ int (*m_init_ea_size)(struct obd_export *, int, int, int);
+
+ int (*m_get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
+ struct obd_export *, struct obd_export *,
+ struct lustre_md *);
+
+ int (*m_free_lustre_md)(struct obd_export *, struct lustre_md *);
+
+ int (*m_set_open_replay_data)(struct obd_export *,
+ struct obd_client_handle *,
+ struct ptlrpc_request *);
+ int (*m_clear_open_replay_data)(struct obd_export *,
+ struct obd_client_handle *);
+ int (*m_set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
+
+ ldlm_mode_t (*m_lock_match)(struct obd_export *, __u64,
+ const struct lu_fid *, ldlm_type_t,
+ ldlm_policy_data_t *, ldlm_mode_t,
+ struct lustre_handle *);
+
+ int (*m_cancel_unused)(struct obd_export *, const struct lu_fid *,
+ ldlm_policy_data_t *, ldlm_mode_t,
+ ldlm_cancel_flags_t flags, void *opaque);
+ int (*m_renew_capa)(struct obd_export *, struct obd_capa *oc,
+ renew_capa_cb_t cb);
+ int (*m_unpack_capa)(struct obd_export *, struct ptlrpc_request *,
+ const struct req_msg_field *, struct obd_capa **);
+
+ int (*m_get_remote_perm)(struct obd_export *, const struct lu_fid *,
+ struct obd_capa *, __u32,
+ struct ptlrpc_request **);
+
+ int (*m_intent_getattr_async)(struct obd_export *,
+ struct md_enqueue_info *,
+ struct ldlm_enqueue_info *);
+
+ int (*m_revalidate_lock)(struct obd_export *, struct lookup_intent *,
+ struct lu_fid *, __u64 *bits);
+
+ /*
+ * NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
+ * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
+ * wrapper function in include/linux/obd_class.h.
+ */
+};
+
+struct lsm_operations {
+ void (*lsm_free)(struct lov_stripe_md *);
+ int (*lsm_destroy)(struct lov_stripe_md *, struct obdo *oa,
+ struct obd_export *md_exp);
+ void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, obd_off *,
+ obd_off *);
+ void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, obd_off *,
+ obd_off *);
+ int (*lsm_lmm_verify) (struct lov_mds_md *lmm, int lmm_bytes,
+ __u16 *stripe_count);
+ int (*lsm_unpackmd) (struct lov_obd *lov, struct lov_stripe_md *lsm,
+ struct lov_mds_md *lmm);
+};
+
+extern const struct lsm_operations lsm_v1_ops;
+extern const struct lsm_operations lsm_v3_ops;
+static inline const struct lsm_operations *lsm_op_find(int magic)
+{
+ switch(magic) {
+ case LOV_MAGIC_V1:
+ return &lsm_v1_ops;
+ case LOV_MAGIC_V3:
+ return &lsm_v3_ops;
+ default:
+ CERROR("Cannot recognize lsm_magic %08x\n", magic);
+ return NULL;
+ }
+}
+
+/* Requests for obd_extent_calc() */
+#define OBD_CALC_STRIPE_START 1
+#define OBD_CALC_STRIPE_END 2
+
+static inline struct lustre_capa *oinfo_capa(struct obd_info *oinfo)
+{
+ return oinfo->oi_capa;
+}
+
+static inline struct md_open_data *obd_mod_alloc(void)
+{
+ struct md_open_data *mod;
+ OBD_ALLOC_PTR(mod);
+ if (mod == NULL)
+ return NULL;
+ atomic_set(&mod->mod_refcount, 1);
+ return mod;
+}
+
+#define obd_mod_get(mod) atomic_inc(&(mod)->mod_refcount)
+#define obd_mod_put(mod) \
+({ \
+ if (atomic_dec_and_test(&(mod)->mod_refcount)) { \
+ if ((mod)->mod_open_req) \
+ ptlrpc_req_finished((mod)->mod_open_req); \
+ OBD_FREE_PTR(mod); \
+ } \
+})
+
+void obdo_from_inode(struct obdo *dst, struct inode *src, obd_flag valid);
+void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent);
+
+/* return 1 if client should be resend request */
+static inline int client_should_resend(int resend, struct client_obd *cli)
+{
+ return atomic_read(&cli->cl_resends) ?
+ atomic_read(&cli->cl_resends) > resend : 1;
+}
+
+/**
+ * Return device name for this device
+ *
+ * XXX: lu_device is declared before obd_device, while a pointer pointing
+ * back to obd_device in lu_device, so this helper function defines here
+ * instead of in lu_object.h
+ */
+static inline const char *lu_dev_name(const struct lu_device *lu_dev)
+{
+ return lu_dev->ld_obd->obd_name;
+}
+
+static inline bool filename_is_volatile(const char *name, int namelen, int *idx)
+{
+ const char *start;
+ char *end;
+
+ if (strncmp(name, LUSTRE_VOLATILE_HDR, LUSTRE_VOLATILE_HDR_LEN) != 0)
+ return false;
+
+ /* caller does not care of idx */
+ if (idx == NULL)
+ return true;
+
+ /* volatile file, the MDT can be set from name */
+ /* name format is LUSTRE_VOLATILE_HDR:[idx]: */
+ /* if no MDT is specified, use std way */
+ if (namelen < LUSTRE_VOLATILE_HDR_LEN + 2)
+ goto bad_format;
+ /* test for no MDT idx case */
+ if ((*(name + LUSTRE_VOLATILE_HDR_LEN) == ':') &&
+ (*(name + LUSTRE_VOLATILE_HDR_LEN + 1) == ':')) {
+ *idx = -1;
+ return true;
+ }
+ /* we have an idx, read it */
+ start = name + LUSTRE_VOLATILE_HDR_LEN + 1;
+ *idx = strtoul(start, &end, 0);
+ /* error cases:
+ * no digit, no trailing :, negative value
+ */
+ if (((*idx == 0) && (end == start)) ||
+ (*end != ':') || (*idx < 0))
+ goto bad_format;
+
+ return true;
+bad_format:
+ /* bad format of mdt idx, we cannot return an error
+ * to caller so we use hash algo */
+ CERROR("Bad volatile file name format: %s\n",
+ name + LUSTRE_VOLATILE_HDR_LEN);
+ return false;
+}
+
+static inline int cli_brw_size(struct obd_device *obd)
+{
+ LASSERT(obd != NULL);
+ return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+}
+
+#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_cache.h b/drivers/staging/lustre/lustre/include/obd_cache.h
new file mode 100644
index 0000000..c8249fb
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/obd_cache.h
@@ -0,0 +1,39 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _OBD_CACHE_H__
+#define _OBD_CACHE_H__
+
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
new file mode 100644
index 0000000..5f740f1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -0,0 +1,176 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __OBD_CKSUM
+#define __OBD_CKSUM
+#include <linux/libcfs/libcfs.h>
+#include <lustre/lustre_idl.h>
+
+static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type)
+{
+ switch (cksum_type) {
+ case OBD_CKSUM_CRC32:
+ return CFS_HASH_ALG_CRC32;
+ case OBD_CKSUM_ADLER:
+ return CFS_HASH_ALG_ADLER32;
+ case OBD_CKSUM_CRC32C:
+ return CFS_HASH_ALG_CRC32C;
+ default:
+ CERROR("Unknown checksum type (%x)!!!\n", cksum_type);
+ LBUG();
+ }
+ return 0;
+}
+
+/* The OBD_FL_CKSUM_* flags is packed into 5 bits of o_flags, since there can
+ * only be a single checksum type per RPC.
+ *
+ * The OBD_CHECKSUM_* type bits passed in ocd_cksum_types are a 32-bit bitmask
+ * since they need to represent the full range of checksum algorithms that
+ * both the client and server can understand.
+ *
+ * In case of an unsupported types/flags we fall back to ADLER
+ * because that is supported by all clients since 1.8
+ *
+ * In case multiple algorithms are supported the best one is used. */
+static inline obd_flag cksum_type_pack(cksum_type_t cksum_type)
+{
+ unsigned int performance = 0, tmp;
+ obd_flag flag = OBD_FL_CKSUM_ADLER;
+
+ if (cksum_type & OBD_CKSUM_CRC32) {
+ tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32));
+ if (tmp > performance) {
+ performance = tmp;
+ flag = OBD_FL_CKSUM_CRC32;
+ }
+ }
+ if (cksum_type & OBD_CKSUM_CRC32C) {
+ tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C));
+ if (tmp > performance) {
+ performance = tmp;
+ flag = OBD_FL_CKSUM_CRC32C;
+ }
+ }
+ if (cksum_type & OBD_CKSUM_ADLER) {
+ tmp = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER));
+ if (tmp > performance) {
+ performance = tmp;
+ flag = OBD_FL_CKSUM_ADLER;
+ }
+ }
+ if (unlikely(cksum_type && !(cksum_type & (OBD_CKSUM_CRC32C |
+ OBD_CKSUM_CRC32 |
+ OBD_CKSUM_ADLER))))
+ CWARN("unknown cksum type %x\n", cksum_type);
+
+ return flag;
+}
+
+static inline cksum_type_t cksum_type_unpack(obd_flag o_flags)
+{
+ switch (o_flags & OBD_FL_CKSUM_ALL) {
+ case OBD_FL_CKSUM_CRC32C:
+ return OBD_CKSUM_CRC32C;
+ case OBD_FL_CKSUM_CRC32:
+ return OBD_CKSUM_CRC32;
+ default:
+ break;
+ }
+
+ return OBD_CKSUM_ADLER;
+}
+
+/* Return a bitmask of the checksum types supported on this system.
+ * 1.8 supported ADLER it is base and not depend on hw
+ * Client uses all available local algos
+ */
+static inline cksum_type_t cksum_types_supported_client(void)
+{
+ cksum_type_t ret = OBD_CKSUM_ADLER;
+
+ CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n",
+ cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)),
+ cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)),
+ cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER)));
+
+ if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)) > 0)
+ ret |= OBD_CKSUM_CRC32C;
+ if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)) > 0)
+ ret |= OBD_CKSUM_CRC32;
+
+ return ret;
+}
+
+/* Server uses algos that perform at 50% or better of the Adler */
+static inline cksum_type_t cksum_types_supported_server(void)
+{
+ int base_speed;
+ cksum_type_t ret = OBD_CKSUM_ADLER;
+
+ CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n",
+ cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)),
+ cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)),
+ cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER)));
+
+ base_speed = cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_ADLER)) / 2;
+
+ if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32C)) >=
+ base_speed)
+ ret |= OBD_CKSUM_CRC32C;
+ if (cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)) >=
+ base_speed)
+ ret |= OBD_CKSUM_CRC32;
+
+ return ret;
+}
+
+
+/* Select the best checksum algorithm among those supplied in the cksum_types
+ * input.
+ *
+ * Currently, calling cksum_type_pack() with a mask will return the fastest
+ * checksum type due to its benchmarking at libcfs module load.
+ * Caution is advised, however, since what is fastest on a single client may
+ * not be the fastest or most efficient algorithm on the server. */
+static inline cksum_type_t cksum_type_select(cksum_type_t cksum_types)
+{
+ return cksum_type_unpack(cksum_type_pack(cksum_types));
+}
+
+/* Checksum algorithm names. Must be defined in the same order as the
+ * OBD_CKSUM_* flags. */
+#define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"}
+
+#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
new file mode 100644
index 0000000..983718f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -0,0 +1,2191 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#ifndef __CLASS_OBD_H
+#define __CLASS_OBD_H
+
+
+#include <obd_support.h>
+#include <lustre_import.h>
+#include <lustre_net.h>
+#include <obd.h>
+#include <lustre_lib.h>
+#include <lustre/lustre_idl.h>
+#include <lprocfs_status.h>
+
+#include <linux/obd_class.h>
+
+#define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay
+ * and resends for avoid deadlocks */
+#define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update
+ * obd_osfs_age */
+#define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd
+ * instead of a specific set. This
+ * means that we cannot rely on the set
+ * interpret routine to be called.
+ * lov_statfs_fini() must thus be called
+ * by the request interpret routine */
+#define OBD_STATFS_FOR_MDT0 0x0008 /* The statfs is only for retrieving
+ * information from MDT0. */
+#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */
+
+/* OBD Device Declarations */
+extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
+extern rwlock_t obd_dev_lock;
+
+/* OBD Operations Declarations */
+extern struct obd_device *class_conn2obd(struct lustre_handle *);
+extern struct obd_device *class_exp2obd(struct obd_export *);
+extern int class_handle_ioctl(unsigned int cmd, unsigned long arg);
+extern int lustre_get_jobid(char *jobid);
+
+struct lu_device_type;
+
+/* genops.c */
+struct obd_export *class_conn2export(struct lustre_handle *);
+int class_register_type(struct obd_ops *, struct md_ops *,
+ struct lprocfs_vars *, const char *nm,
+ struct lu_device_type *ldt);
+int class_unregister_type(const char *nm);
+
+struct obd_device *class_newdev(const char *type_name, const char *name);
+void class_release_dev(struct obd_device *obd);
+
+int class_name2dev(const char *name);
+struct obd_device *class_name2obd(const char *name);
+int class_uuid2dev(struct obd_uuid *uuid);
+struct obd_device *class_uuid2obd(struct obd_uuid *uuid);
+void class_obd_list(void);
+struct obd_device * class_find_client_obd(struct obd_uuid *tgt_uuid,
+ const char * typ_name,
+ struct obd_uuid *grp_uuid);
+struct obd_device * class_devices_in_group(struct obd_uuid *grp_uuid,
+ int *next);
+struct obd_device * class_num2obd(int num);
+int get_devices_count(void);
+
+int class_notify_sptlrpc_conf(const char *fsname, int namelen);
+
+char *obd_export_nid2str(struct obd_export *exp);
+
+int obd_export_evict_by_nid(struct obd_device *obd, const char *nid);
+int obd_export_evict_by_uuid(struct obd_device *obd, const char *uuid);
+int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep);
+
+int obd_zombie_impexp_init(void);
+void obd_zombie_impexp_stop(void);
+void obd_zombie_impexp_cull(void);
+void obd_zombie_barrier(void);
+void obd_exports_barrier(struct obd_device *obd);
+int kuc_len(int payload_len);
+struct kuc_hdr * kuc_ptr(void *p);
+int kuc_ispayload(void *p);
+void *kuc_alloc(int payload_len, int transport, int type);
+void kuc_free(void *p, int payload_len);
+
+struct llog_handle;
+struct llog_rec_hdr;
+typedef int (*llog_cb_t)(const struct lu_env *, struct llog_handle *,
+ struct llog_rec_hdr *, void *);
+/* obd_config.c */
+struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
+ const char *new_name);
+int class_process_config(struct lustre_cfg *lcfg);
+int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
+ struct lustre_cfg *lcfg, void *data);
+int class_attach(struct lustre_cfg *lcfg);
+int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
+int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg);
+int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg);
+struct obd_device *class_incref(struct obd_device *obd,
+ const char *scope, const void *source);
+void class_decref(struct obd_device *obd,
+ const char *scope, const void *source);
+void dump_exports(struct obd_device *obd, int locks);
+int class_config_llog_handler(const struct lu_env *env,
+ struct llog_handle *handle,
+ struct llog_rec_hdr *rec, void *data);
+int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg);
+int class_add_uuid(const char *uuid, __u64 nid);
+
+/*obdecho*/
+#ifdef LPROCFS
+extern void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars);
+#else
+static inline void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars)
+{
+ memset(lvars, 0, sizeof(*lvars));
+}
+#endif
+
+#define CFG_F_START 0x01 /* Set when we start updating from a log */
+#define CFG_F_MARKER 0x02 /* We are within a maker */
+#define CFG_F_SKIP 0x04 /* We should ignore this cfg command */
+#define CFG_F_COMPAT146 0x08 /* Allow old-style logs */
+#define CFG_F_EXCLUDE 0x10 /* OST exclusion list */
+
+/* Passed as data param to class_config_parse_llog */
+struct config_llog_instance {
+ char *cfg_obdname;
+ void *cfg_instance;
+ struct super_block *cfg_sb;
+ struct obd_uuid cfg_uuid;
+ llog_cb_t cfg_callback;
+ int cfg_last_idx; /* for partial llog processing */
+ int cfg_flags;
+};
+int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
+ char *name, struct config_llog_instance *cfg);
+int class_config_dump_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
+ char *name, struct config_llog_instance *cfg);
+
+enum {
+ CONFIG_T_CONFIG = 0,
+ CONFIG_T_SPTLRPC = 1,
+ CONFIG_T_RECOVER = 2,
+ CONFIG_T_MAX = 3
+};
+
+/* list of active configuration logs */
+struct config_llog_data {
+ struct ldlm_res_id cld_resid;
+ struct config_llog_instance cld_cfg;
+ struct list_head cld_list_chain;
+ atomic_t cld_refcount;
+ struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
+ struct config_llog_data *cld_recover; /* imperative recover log */
+ struct obd_export *cld_mgcexp;
+ struct mutex cld_lock;
+ int cld_type;
+ unsigned int cld_stopping:1, /* we were told to stop
+ * watching */
+ cld_lostlock:1; /* lock not requeued */
+ char cld_logname[0];
+};
+
+struct lustre_profile {
+ struct list_head lp_list;
+ char *lp_profile;
+ char *lp_dt;
+ char *lp_md;
+};
+
+struct lustre_profile *class_get_profile(const char * prof);
+void class_del_profile(const char *prof);
+void class_del_profiles(void);
+
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+
+void __class_export_add_lock_ref(struct obd_export *, struct ldlm_lock *);
+void __class_export_del_lock_ref(struct obd_export *, struct ldlm_lock *);
+extern void (*class_export_dump_hook)(struct obd_export *);
+
+#else
+
+#define __class_export_add_lock_ref(exp, lock) do {} while(0)
+#define __class_export_del_lock_ref(exp, lock) do {} while(0)
+
+#endif
+
+#define class_export_rpc_inc(exp) \
+({ \
+ atomic_inc(&(exp)->exp_rpc_count); \
+ CDEBUG(D_INFO, "RPC GETting export %p : new rpc_count %d\n", \
+ (exp), atomic_read(&(exp)->exp_rpc_count)); \
+})
+
+#define class_export_rpc_dec(exp) \
+({ \
+ LASSERT_ATOMIC_POS(&exp->exp_rpc_count); \
+ atomic_dec(&(exp)->exp_rpc_count); \
+ CDEBUG(D_INFO, "RPC PUTting export %p : new rpc_count %d\n", \
+ (exp), atomic_read(&(exp)->exp_rpc_count)); \
+})
+
+#define class_export_lock_get(exp, lock) \
+({ \
+ atomic_inc(&(exp)->exp_locks_count); \
+ __class_export_add_lock_ref(exp, lock); \
+ CDEBUG(D_INFO, "lock GETting export %p : new locks_count %d\n", \
+ (exp), atomic_read(&(exp)->exp_locks_count)); \
+ class_export_get(exp); \
+})
+
+#define class_export_lock_put(exp, lock) \
+({ \
+ LASSERT_ATOMIC_POS(&exp->exp_locks_count); \
+ atomic_dec(&(exp)->exp_locks_count); \
+ __class_export_del_lock_ref(exp, lock); \
+ CDEBUG(D_INFO, "lock PUTting export %p : new locks_count %d\n", \
+ (exp), atomic_read(&(exp)->exp_locks_count)); \
+ class_export_put(exp); \
+})
+
+#define class_export_cb_get(exp) \
+({ \
+ atomic_inc(&(exp)->exp_cb_count); \
+ CDEBUG(D_INFO, "callback GETting export %p : new cb_count %d\n",\
+ (exp), atomic_read(&(exp)->exp_cb_count)); \
+ class_export_get(exp); \
+})
+
+#define class_export_cb_put(exp) \
+({ \
+ LASSERT_ATOMIC_POS(&exp->exp_cb_count); \
+ atomic_dec(&(exp)->exp_cb_count); \
+ CDEBUG(D_INFO, "callback PUTting export %p : new cb_count %d\n",\
+ (exp), atomic_read(&(exp)->exp_cb_count)); \
+ class_export_put(exp); \
+})
+
+/* genops.c */
+struct obd_export *class_export_get(struct obd_export *exp);
+void class_export_put(struct obd_export *exp);
+struct obd_export *class_new_export(struct obd_device *obddev,
+ struct obd_uuid *cluuid);
+void class_unlink_export(struct obd_export *exp);
+
+struct obd_import *class_import_get(struct obd_import *);
+void class_import_put(struct obd_import *);
+struct obd_import *class_new_import(struct obd_device *obd);
+void class_destroy_import(struct obd_import *exp);
+
+struct obd_type *class_search_type(const char *name);
+struct obd_type *class_get_type(const char *name);
+void class_put_type(struct obd_type *type);
+int class_connect(struct lustre_handle *conn, struct obd_device *obd,
+ struct obd_uuid *cluuid);
+int class_disconnect(struct obd_export *exp);
+void class_fail_export(struct obd_export *exp);
+int class_connected_export(struct obd_export *exp);
+void class_disconnect_exports(struct obd_device *obddev);
+int class_manual_cleanup(struct obd_device *obd);
+void class_disconnect_stale_exports(struct obd_device *,
+ int (*test_export)(struct obd_export *));
+static inline enum obd_option exp_flags_from_obd(struct obd_device *obd)
+{
+ return ((obd->obd_fail ? OBD_OPT_FAILOVER : 0) |
+ (obd->obd_force ? OBD_OPT_FORCE : 0) |
+ (obd->obd_abort_recovery ? OBD_OPT_ABORT_RECOV : 0) |
+ 0);
+}
+
+
+void obdo_cpy_md(struct obdo *dst, struct obdo *src, obd_flag valid);
+void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj);
+void obdo_from_iattr(struct obdo *oa, struct iattr *attr,
+ unsigned int ia_valid);
+void iattr_from_obdo(struct iattr *attr, struct obdo *oa, obd_flag valid);
+void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, obd_flag valid);
+void obdo_from_md(struct obdo *oa, struct md_op_data *op_data,
+ unsigned int valid);
+
+void obdo_cpu_to_le(struct obdo *dobdo, struct obdo *sobdo);
+void obdo_le_to_cpu(struct obdo *dobdo, struct obdo *sobdo);
+
+#define OBT(dev) (dev)->obd_type
+#define OBP(dev, op) (dev)->obd_type->typ_dt_ops->o_ ## op
+#define MDP(dev, op) (dev)->obd_type->typ_md_ops->m_ ## op
+#define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op
+
+/* Ensure obd_setup: used for cleanup which must be called
+ while obd is stopping */
+#define OBD_CHECK_DEV(obd) \
+do { \
+ if (!(obd)) { \
+ CERROR("NULL device\n"); \
+ return -ENODEV; \
+ } \
+} while (0)
+
+/* ensure obd_setup and !obd_stopping */
+#define OBD_CHECK_DEV_ACTIVE(obd) \
+do { \
+ OBD_CHECK_DEV(obd); \
+ if (!(obd)->obd_set_up || (obd)->obd_stopping) { \
+ CERROR("Device %d not setup\n", \
+ (obd)->obd_minor); \
+ return -ENODEV; \
+ } \
+} while (0)
+
+
+#ifdef LPROCFS
+#define OBD_COUNTER_OFFSET(op) \
+ ((offsetof(struct obd_ops, o_ ## op) - \
+ offsetof(struct obd_ops, o_iocontrol)) \
+ / sizeof(((struct obd_ops *)(0))->o_iocontrol))
+
+#define OBD_COUNTER_INCREMENT(obdx, op) \
+ if ((obdx)->obd_stats != NULL) { \
+ unsigned int coffset; \
+ coffset = (unsigned int)((obdx)->obd_cntr_base) + \
+ OBD_COUNTER_OFFSET(op); \
+ LASSERT(coffset < (obdx)->obd_stats->ls_num); \
+ lprocfs_counter_incr((obdx)->obd_stats, coffset); \
+ }
+
+#define EXP_COUNTER_INCREMENT(export, op) \
+ if ((export)->exp_obd->obd_stats != NULL) { \
+ unsigned int coffset; \
+ coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \
+ OBD_COUNTER_OFFSET(op); \
+ LASSERT(coffset < (export)->exp_obd->obd_stats->ls_num); \
+ lprocfs_counter_incr((export)->exp_obd->obd_stats, coffset); \
+ if ((export)->exp_nid_stats != NULL && \
+ (export)->exp_nid_stats->nid_stats != NULL) \
+ lprocfs_counter_incr( \
+ (export)->exp_nid_stats->nid_stats, coffset);\
+ }
+
+#define MD_COUNTER_OFFSET(op) \
+ ((offsetof(struct md_ops, m_ ## op) - \
+ offsetof(struct md_ops, m_getstatus)) \
+ / sizeof(((struct md_ops *)(0))->m_getstatus))
+
+#define MD_COUNTER_INCREMENT(obdx, op) \
+ if ((obd)->md_stats != NULL) { \
+ unsigned int coffset; \
+ coffset = (unsigned int)((obdx)->md_cntr_base) + \
+ MD_COUNTER_OFFSET(op); \
+ LASSERT(coffset < (obdx)->md_stats->ls_num); \
+ lprocfs_counter_incr((obdx)->md_stats, coffset); \
+ }
+
+#define EXP_MD_COUNTER_INCREMENT(export, op) \
+ if ((export)->exp_obd->obd_stats != NULL) { \
+ unsigned int coffset; \
+ coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \
+ MD_COUNTER_OFFSET(op); \
+ LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \
+ lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \
+ if ((export)->exp_md_stats != NULL) \
+ lprocfs_counter_incr( \
+ (export)->exp_md_stats, coffset); \
+ }
+
+#else
+#define OBD_COUNTER_OFFSET(op)
+#define OBD_COUNTER_INCREMENT(obd, op)
+#define EXP_COUNTER_INCREMENT(exp, op)
+#define MD_COUNTER_INCREMENT(obd, op)
+#define EXP_MD_COUNTER_INCREMENT(exp, op)
+#endif
+
+static inline int lprocfs_nid_ldlm_stats_init(struct nid_stat* tmp)
+{
+ /* Always add in ldlm_stats */
+ tmp->nid_ldlm_stats = lprocfs_alloc_stats(LDLM_LAST_OPC - LDLM_FIRST_OPC
+ ,LPROCFS_STATS_FLAG_NOPERCPU);
+ if (tmp->nid_ldlm_stats == NULL)
+ return -ENOMEM;
+
+ lprocfs_init_ldlm_stats(tmp->nid_ldlm_stats);
+
+ return lprocfs_register_stats(tmp->nid_proc, "ldlm_stats",
+ tmp->nid_ldlm_stats);
+}
+
+#define OBD_CHECK_MD_OP(obd, op, err) \
+do { \
+ if (!OBT(obd) || !MDP((obd), op)) { \
+ if (err) \
+ CERROR("md_" #op ": dev %s/%d no operation\n", \
+ obd->obd_name, obd->obd_minor); \
+ return err; \
+ } \
+} while (0)
+
+#define EXP_CHECK_MD_OP(exp, op) \
+do { \
+ if ((exp) == NULL) { \
+ CERROR("obd_" #op ": NULL export\n"); \
+ return -ENODEV; \
+ } \
+ if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \
+ CERROR("obd_" #op ": cleaned up obd\n"); \
+ return -EOPNOTSUPP; \
+ } \
+ if (!OBT((exp)->exp_obd) || !MDP((exp)->exp_obd, op)) { \
+ CERROR("obd_" #op ": dev %s/%d no operation\n", \
+ (exp)->exp_obd->obd_name, \
+ (exp)->exp_obd->obd_minor); \
+ return -EOPNOTSUPP; \
+ } \
+} while (0)
+
+
+#define OBD_CHECK_DT_OP(obd, op, err) \
+do { \
+ if (!OBT(obd) || !OBP((obd), op)) { \
+ if (err) \
+ CERROR("obd_" #op ": dev %d no operation\n", \
+ obd->obd_minor); \
+ return err; \
+ } \
+} while (0)
+
+#define EXP_CHECK_DT_OP(exp, op) \
+do { \
+ if ((exp) == NULL) { \
+ CERROR("obd_" #op ": NULL export\n"); \
+ return -ENODEV; \
+ } \
+ if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \
+ CERROR("obd_" #op ": cleaned up obd\n"); \
+ return -EOPNOTSUPP; \
+ } \
+ if (!OBT((exp)->exp_obd) || !OBP((exp)->exp_obd, op)) { \
+ CERROR("obd_" #op ": dev %d no operation\n", \
+ (exp)->exp_obd->obd_minor); \
+ return -EOPNOTSUPP; \
+ } \
+} while (0)
+
+#define CTXT_CHECK_OP(ctxt, op, err) \
+do { \
+ if (!OBT(ctxt->loc_obd) || !CTXTP((ctxt), op)) { \
+ if (err) \
+ CERROR("lop_" #op ": dev %d no operation\n", \
+ ctxt->loc_obd->obd_minor); \
+ return err; \
+ } \
+} while (0)
+
+static inline int class_devno_max(void)
+{
+ return MAX_OBD_DEVICES;
+}
+
+static inline int obd_get_info(const struct lu_env *env,
+ struct obd_export *exp, __u32 keylen,
+ void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, get_info);
+ EXP_COUNTER_INCREMENT(exp, get_info);
+
+ rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val,
+ lsm);
+ return rc;
+}
+
+static inline int obd_set_info_async(const struct lu_env *env,
+ struct obd_export *exp, obd_count keylen,
+ void *key, obd_count vallen, void *val,
+ struct ptlrpc_request_set *set)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, set_info_async);
+ EXP_COUNTER_INCREMENT(exp, set_info_async);
+
+ rc = OBP(exp->exp_obd, set_info_async)(env, exp, keylen, key, vallen,
+ val, set);
+ return rc;
+}
+
+/*
+ * obd-lu integration.
+ *
+ * Functionality is being moved into new lu_device-based layering, but some
+ * pieces of configuration process are still based on obd devices.
+ *
+ * Specifically, lu_device_type_operations::ldto_device_alloc() methods fully
+ * subsume ->o_setup() methods of obd devices they replace. The same for
+ * lu_device_operations::ldo_process_config() and ->o_process_config(). As a
+ * result, obd_setup() and obd_process_config() branch and call one XOR
+ * another.
+ *
+ * Yet neither lu_device_type_operations::ldto_device_fini() nor
+ * lu_device_type_operations::ldto_device_free() fully implement the
+ * functionality of ->o_precleanup() and ->o_cleanup() they override. Hence,
+ * obd_precleanup() and obd_cleanup() call both lu_device and obd operations.
+ */
+
+#define DECLARE_LU_VARS(ldt, d) \
+ struct lu_device_type *ldt; \
+ struct lu_device *d
+
+static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
+{
+ int rc;
+ DECLARE_LU_VARS(ldt, d);
+
+ ldt = obd->obd_type->typ_lu;
+ if (ldt != NULL) {
+ struct lu_context session_ctx;
+ struct lu_env env;
+ lu_context_init(&session_ctx, LCT_SESSION);
+ session_ctx.lc_thread = NULL;
+ lu_context_enter(&session_ctx);
+
+ rc = lu_env_init(&env, ldt->ldt_ctx_tags);
+ if (rc == 0) {
+ env.le_ses = &session_ctx;
+ d = ldt->ldt_ops->ldto_device_alloc(&env, ldt, cfg);
+ lu_env_fini(&env);
+ if (!IS_ERR(d)) {
+ obd->obd_lu_dev = d;
+ d->ld_obd = obd;
+ rc = 0;
+ } else
+ rc = PTR_ERR(d);
+ }
+ lu_context_exit(&session_ctx);
+ lu_context_fini(&session_ctx);
+
+ } else {
+ OBD_CHECK_DT_OP(obd, setup, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, setup);
+ rc = OBP(obd, setup)(obd, cfg);
+ }
+ return rc;
+}
+
+static inline int obd_precleanup(struct obd_device *obd,
+ enum obd_cleanup_stage cleanup_stage)
+{
+ int rc;
+ DECLARE_LU_VARS(ldt, d);
+
+ OBD_CHECK_DEV(obd);
+ ldt = obd->obd_type->typ_lu;
+ d = obd->obd_lu_dev;
+ if (ldt != NULL && d != NULL) {
+ if (cleanup_stage == OBD_CLEANUP_EXPORTS) {
+ struct lu_env env;
+
+ rc = lu_env_init(&env, ldt->ldt_ctx_tags);
+ if (rc == 0) {
+ ldt->ldt_ops->ldto_device_fini(&env, d);
+ lu_env_fini(&env);
+ }
+ }
+ }
+ OBD_CHECK_DT_OP(obd, precleanup, 0);
+ OBD_COUNTER_INCREMENT(obd, precleanup);
+
+ rc = OBP(obd, precleanup)(obd, cleanup_stage);
+ return rc;
+}
+
+static inline int obd_cleanup(struct obd_device *obd)
+{
+ int rc;
+ DECLARE_LU_VARS(ldt, d);
+
+ OBD_CHECK_DEV(obd);
+
+ ldt = obd->obd_type->typ_lu;
+ d = obd->obd_lu_dev;
+ if (ldt != NULL && d != NULL) {
+ struct lu_env env;
+
+ rc = lu_env_init(&env, ldt->ldt_ctx_tags);
+ if (rc == 0) {
+ ldt->ldt_ops->ldto_device_free(&env, d);
+ lu_env_fini(&env);
+ obd->obd_lu_dev = NULL;
+ }
+ }
+ OBD_CHECK_DT_OP(obd, cleanup, 0);
+ OBD_COUNTER_INCREMENT(obd, cleanup);
+
+ rc = OBP(obd, cleanup)(obd);
+ return rc;
+}
+
+static inline void obd_cleanup_client_import(struct obd_device *obd)
+{
+ /* If we set up but never connected, the
+ client import will not have been cleaned. */
+ down_write(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import) {
+ struct obd_import *imp;
+ imp = obd->u.cli.cl_import;
+ CDEBUG(D_CONFIG, "%s: client import never connected\n",
+ obd->obd_name);
+ ptlrpc_invalidate_import(imp);
+ if (imp->imp_rq_pool) {
+ ptlrpc_free_rq_pool(imp->imp_rq_pool);
+ imp->imp_rq_pool = NULL;
+ }
+ client_destroy_import(imp);
+ obd->u.cli.cl_import = NULL;
+ }
+ up_write(&obd->u.cli.cl_sem);
+}
+
+static inline int
+obd_process_config(struct obd_device *obd, int datalen, void *data)
+{
+ int rc;
+ DECLARE_LU_VARS(ldt, d);
+
+ OBD_CHECK_DEV(obd);
+
+ obd->obd_process_conf = 1;
+ ldt = obd->obd_type->typ_lu;
+ d = obd->obd_lu_dev;
+ if (ldt != NULL && d != NULL) {
+ struct lu_env env;
+
+ rc = lu_env_init(&env, ldt->ldt_ctx_tags);
+ if (rc == 0) {
+ rc = d->ld_ops->ldo_process_config(&env, d, data);
+ lu_env_fini(&env);
+ }
+ } else {
+ OBD_CHECK_DT_OP(obd, process_config, -EOPNOTSUPP);
+ rc = OBP(obd, process_config)(obd, datalen, data);
+ }
+ OBD_COUNTER_INCREMENT(obd, process_config);
+ obd->obd_process_conf = 0;
+
+ return rc;
+}
+
+/* Pack an in-memory MD struct for storage on disk.
+ * Returns +ve size of packed MD (0 for free), or -ve error.
+ *
+ * If @disk_tgt == NULL, MD size is returned (max size if @mem_src == NULL).
+ * If @*disk_tgt != NULL and @mem_src == NULL, @*disk_tgt will be freed.
+ * If @*disk_tgt == NULL, it will be allocated
+ */
+static inline int obd_packmd(struct obd_export *exp,
+ struct lov_mds_md **disk_tgt,
+ struct lov_stripe_md *mem_src)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, packmd);
+ EXP_COUNTER_INCREMENT(exp, packmd);
+
+ rc = OBP(exp->exp_obd, packmd)(exp, disk_tgt, mem_src);
+ return rc;
+}
+
+static inline int obd_size_diskmd(struct obd_export *exp,
+ struct lov_stripe_md *mem_src)
+{
+ return obd_packmd(exp, NULL, mem_src);
+}
+
+/* helper functions */
+static inline int obd_alloc_diskmd(struct obd_export *exp,
+ struct lov_mds_md **disk_tgt)
+{
+ LASSERT(disk_tgt);
+ LASSERT(*disk_tgt == NULL);
+ return obd_packmd(exp, disk_tgt, NULL);
+}
+
+static inline int obd_free_diskmd(struct obd_export *exp,
+ struct lov_mds_md **disk_tgt)
+{
+ LASSERT(disk_tgt);
+ LASSERT(*disk_tgt);
+ /*
+ * LU-2590, for caller's convenience, *disk_tgt could be host
+ * endianness, it needs swab to LE if necessary, while just
+ * lov_mds_md header needs it for figuring out how much memory
+ * needs to be freed.
+ */
+ if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
+ (((*disk_tgt)->lmm_magic == LOV_MAGIC_V1) ||
+ ((*disk_tgt)->lmm_magic == LOV_MAGIC_V3)))
+ lustre_swab_lov_mds_md(*disk_tgt);
+ return obd_packmd(exp, disk_tgt, NULL);
+}
+
+/* Unpack an MD struct from disk to in-memory format.
+ * Returns +ve size of unpacked MD (0 for free), or -ve error.
+ *
+ * If @mem_tgt == NULL, MD size is returned (max size if @disk_src == NULL).
+ * If @*mem_tgt != NULL and @disk_src == NULL, @*mem_tgt will be freed.
+ * If @*mem_tgt == NULL, it will be allocated
+ */
+static inline int obd_unpackmd(struct obd_export *exp,
+ struct lov_stripe_md **mem_tgt,
+ struct lov_mds_md *disk_src,
+ int disk_len)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, unpackmd);
+ EXP_COUNTER_INCREMENT(exp, unpackmd);
+
+ rc = OBP(exp->exp_obd, unpackmd)(exp, mem_tgt, disk_src, disk_len);
+ return rc;
+}
+
+/* helper functions */
+static inline int obd_alloc_memmd(struct obd_export *exp,
+ struct lov_stripe_md **mem_tgt)
+{
+ LASSERT(mem_tgt);
+ LASSERT(*mem_tgt == NULL);
+ return obd_unpackmd(exp, mem_tgt, NULL, 0);
+}
+
+static inline int obd_free_memmd(struct obd_export *exp,
+ struct lov_stripe_md **mem_tgt)
+{
+ int rc;
+
+ LASSERT(mem_tgt);
+ LASSERT(*mem_tgt);
+ rc = obd_unpackmd(exp, mem_tgt, NULL, 0);
+ *mem_tgt = NULL;
+ return rc;
+}
+
+static inline int obd_precreate(struct obd_export *exp)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, precreate);
+ OBD_COUNTER_INCREMENT(exp->exp_obd, precreate);
+
+ rc = OBP(exp->exp_obd, precreate)(exp);
+ return rc;
+}
+
+static inline int obd_create_async(struct obd_export *exp,
+ struct obd_info *oinfo,
+ struct lov_stripe_md **ea,
+ struct obd_trans_info *oti)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, create_async);
+ EXP_COUNTER_INCREMENT(exp, create_async);
+
+ rc = OBP(exp->exp_obd, create_async)(exp, oinfo, ea, oti);
+ return rc;
+}
+
+static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *obdo, struct lov_stripe_md **ea,
+ struct obd_trans_info *oti)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, create);
+ EXP_COUNTER_INCREMENT(exp, create);
+
+ rc = OBP(exp->exp_obd, create)(env, exp, obdo, ea, oti);
+ return rc;
+}
+
+static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *obdo, struct lov_stripe_md *ea,
+ struct obd_trans_info *oti,
+ struct obd_export *md_exp, void *capa)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, destroy);
+ EXP_COUNTER_INCREMENT(exp, destroy);
+
+ rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, ea, oti, md_exp, capa);
+ return rc;
+}
+
+static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, getattr);
+ EXP_COUNTER_INCREMENT(exp, getattr);
+
+ rc = OBP(exp->exp_obd, getattr)(env, exp, oinfo);
+ return rc;
+}
+
+static inline int obd_getattr_async(struct obd_export *exp,
+ struct obd_info *oinfo,
+ struct ptlrpc_request_set *set)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, getattr_async);
+ EXP_COUNTER_INCREMENT(exp, getattr_async);
+
+ rc = OBP(exp->exp_obd, getattr_async)(exp, oinfo, set);
+ return rc;
+}
+
+static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo,
+ struct obd_trans_info *oti)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, setattr);
+ EXP_COUNTER_INCREMENT(exp, setattr);
+
+ rc = OBP(exp->exp_obd, setattr)(env, exp, oinfo, oti);
+ return rc;
+}
+
+/* This performs all the requests set init/wait/destroy actions. */
+static inline int obd_setattr_rqset(struct obd_export *exp,
+ struct obd_info *oinfo,
+ struct obd_trans_info *oti)
+{
+ struct ptlrpc_request_set *set = NULL;
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, setattr_async);
+ EXP_COUNTER_INCREMENT(exp, setattr_async);
+
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ return -ENOMEM;
+
+ rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
+ if (rc == 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+ return rc;
+}
+
+/* This adds all the requests into @set if @set != NULL, otherwise
+ all requests are sent asynchronously without waiting for response. */
+static inline int obd_setattr_async(struct obd_export *exp,
+ struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct ptlrpc_request_set *set)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, setattr_async);
+ EXP_COUNTER_INCREMENT(exp, setattr_async);
+
+ rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
+ return rc;
+}
+
+static inline int obd_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
+ int priority)
+{
+ struct obd_device *obd = imp->imp_obd;
+ int rc;
+
+ OBD_CHECK_DEV_ACTIVE(obd);
+ OBD_CHECK_DT_OP(obd, add_conn, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, add_conn);
+
+ rc = OBP(obd, add_conn)(imp, uuid, priority);
+ return rc;
+}
+
+static inline int obd_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
+{
+ struct obd_device *obd = imp->imp_obd;
+ int rc;
+
+ OBD_CHECK_DEV_ACTIVE(obd);
+ OBD_CHECK_DT_OP(obd, del_conn, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, del_conn);
+
+ rc = OBP(obd, del_conn)(imp, uuid);
+ return rc;
+}
+
+static inline struct obd_uuid *obd_get_uuid(struct obd_export *exp)
+{
+ struct obd_uuid *uuid;
+
+ OBD_CHECK_DT_OP(exp->exp_obd, get_uuid, NULL);
+ EXP_COUNTER_INCREMENT(exp, get_uuid);
+
+ uuid = OBP(exp->exp_obd, get_uuid)(exp);
+ return uuid;
+}
+
+/** Create a new /a exp on device /a obd for the uuid /a cluuid
+ * @param exp New export handle
+ * @param d Connect data, supported flags are set, flags also understood
+ * by obd are returned.
+ */
+static inline int obd_connect(const struct lu_env *env,
+ struct obd_export **exp,struct obd_device *obd,
+ struct obd_uuid *cluuid,
+ struct obd_connect_data *data,
+ void *localdata)
+{
+ int rc;
+ __u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition
+ * check */
+
+ OBD_CHECK_DEV_ACTIVE(obd);
+ OBD_CHECK_DT_OP(obd, connect, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, connect);
+
+ rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata);
+ /* check that only subset is granted */
+ LASSERT(ergo(data != NULL, (data->ocd_connect_flags & ocf) ==
+ data->ocd_connect_flags));
+ return rc;
+}
+
+static inline int obd_reconnect(const struct lu_env *env,
+ struct obd_export *exp,
+ struct obd_device *obd,
+ struct obd_uuid *cluuid,
+ struct obd_connect_data *d,
+ void *localdata)
+{
+ int rc;
+ __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition
+ * check */
+
+ OBD_CHECK_DEV_ACTIVE(obd);
+ OBD_CHECK_DT_OP(obd, reconnect, 0);
+ OBD_COUNTER_INCREMENT(obd, reconnect);
+
+ rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata);
+ /* check that only subset is granted */
+ LASSERT(ergo(d != NULL,
+ (d->ocd_connect_flags & ocf) == d->ocd_connect_flags));
+ return rc;
+}
+
+static inline int obd_disconnect(struct obd_export *exp)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, disconnect);
+ EXP_COUNTER_INCREMENT(exp, disconnect);
+
+ rc = OBP(exp->exp_obd, disconnect)(exp);
+ return rc;
+}
+
+static inline int obd_fid_init(struct obd_device *obd, struct obd_export *exp,
+ enum lu_cli_type type)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(obd, fid_init, 0);
+ OBD_COUNTER_INCREMENT(obd, fid_init);
+
+ rc = OBP(obd, fid_init)(obd, exp, type);
+ return rc;
+}
+
+static inline int obd_fid_fini(struct obd_device *obd)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(obd, fid_fini, 0);
+ OBD_COUNTER_INCREMENT(obd, fid_fini);
+
+ rc = OBP(obd, fid_fini)(obd);
+ return rc;
+}
+
+static inline int obd_fid_alloc(struct obd_export *exp,
+ struct lu_fid *fid,
+ struct md_op_data *op_data)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, fid_alloc);
+ EXP_COUNTER_INCREMENT(exp, fid_alloc);
+
+ rc = OBP(exp->exp_obd, fid_alloc)(exp, fid, op_data);
+ return rc;
+}
+
+static inline int obd_ping(const struct lu_env *env, struct obd_export *exp)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(exp->exp_obd, ping, 0);
+ EXP_COUNTER_INCREMENT(exp, ping);
+
+ rc = OBP(exp->exp_obd, ping)(env, exp);
+ return rc;
+}
+
+static inline int obd_pool_new(struct obd_device *obd, char *poolname)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(obd, pool_new, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, pool_new);
+
+ rc = OBP(obd, pool_new)(obd, poolname);
+ return rc;
+}
+
+static inline int obd_pool_del(struct obd_device *obd, char *poolname)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(obd, pool_del, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, pool_del);
+
+ rc = OBP(obd, pool_del)(obd, poolname);
+ return rc;
+}
+
+static inline int obd_pool_add(struct obd_device *obd, char *poolname, char *ostname)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(obd, pool_add, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, pool_add);
+
+ rc = OBP(obd, pool_add)(obd, poolname, ostname);
+ return rc;
+}
+
+static inline int obd_pool_rem(struct obd_device *obd, char *poolname, char *ostname)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(obd, pool_rem, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, pool_rem);
+
+ rc = OBP(obd, pool_rem)(obd, poolname, ostname);
+ return rc;
+}
+
+static inline void obd_getref(struct obd_device *obd)
+{
+ if (OBT(obd) && OBP(obd, getref)) {
+ OBD_COUNTER_INCREMENT(obd, getref);
+ OBP(obd, getref)(obd);
+ }
+}
+
+static inline void obd_putref(struct obd_device *obd)
+{
+ if (OBT(obd) && OBP(obd, putref)) {
+ OBD_COUNTER_INCREMENT(obd, putref);
+ OBP(obd, putref)(obd);
+ }
+}
+
+static inline int obd_init_export(struct obd_export *exp)
+{
+ int rc = 0;
+
+ if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
+ OBP((exp)->exp_obd, init_export))
+ rc = OBP(exp->exp_obd, init_export)(exp);
+ return rc;
+}
+
+static inline int obd_destroy_export(struct obd_export *exp)
+{
+ if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
+ OBP((exp)->exp_obd, destroy_export))
+ OBP(exp->exp_obd, destroy_export)(exp);
+ return 0;
+}
+
+static inline int obd_extent_calc(struct obd_export *exp,
+ struct lov_stripe_md *md,
+ int cmd, obd_off *offset)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, extent_calc);
+ rc = OBP(exp->exp_obd, extent_calc)(exp, md, cmd, offset);
+ return rc;
+}
+
+static inline struct dentry *
+obd_lvfs_fid2dentry(struct obd_export *exp, struct ost_id *oi, __u32 gen)
+{
+ struct lvfs_run_ctxt *ctxt = &exp->exp_obd->obd_lvfs_ctxt;
+ LASSERT(exp->exp_obd);
+
+ return ctxt->cb_ops.l_fid2dentry(ostid_id(oi), gen, ostid_seq(oi),
+ exp->exp_obd);
+}
+
+/* @max_age is the oldest time in jiffies that we accept using a cached data.
+ * If the cache is older than @max_age we will get a new value from the
+ * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
+static inline int obd_statfs_async(struct obd_export *exp,
+ struct obd_info *oinfo,
+ __u64 max_age,
+ struct ptlrpc_request_set *rqset)
+{
+ int rc = 0;
+ struct obd_device *obd;
+
+ if (exp == NULL || exp->exp_obd == NULL)
+ return -EINVAL;
+
+ obd = exp->exp_obd;
+ OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, statfs);
+
+ CDEBUG(D_SUPER, "%s: osfs %p age "LPU64", max_age "LPU64"\n",
+ obd->obd_name, &obd->obd_osfs, obd->obd_osfs_age, max_age);
+ if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
+ rc = OBP(obd, statfs_async)(exp, oinfo, max_age, rqset);
+ } else {
+ CDEBUG(D_SUPER,"%s: use %p cache blocks "LPU64"/"LPU64
+ " objects "LPU64"/"LPU64"\n",
+ obd->obd_name, &obd->obd_osfs,
+ obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
+ obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
+ spin_lock(&obd->obd_osfs_lock);
+ memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
+ spin_unlock(&obd->obd_osfs_lock);
+ oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
+ if (oinfo->oi_cb_up)
+ oinfo->oi_cb_up(oinfo, 0);
+ }
+ return rc;
+}
+
+static inline int obd_statfs_rqset(struct obd_export *exp,
+ struct obd_statfs *osfs, __u64 max_age,
+ __u32 flags)
+{
+ struct ptlrpc_request_set *set = NULL;
+ struct obd_info oinfo = { { { 0 } } };
+ int rc = 0;
+
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ return -ENOMEM;
+
+ oinfo.oi_osfs = osfs;
+ oinfo.oi_flags = flags;
+ rc = obd_statfs_async(exp, &oinfo, max_age, set);
+ if (rc == 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+ return rc;
+}
+
+/* @max_age is the oldest time in jiffies that we accept using a cached data.
+ * If the cache is older than @max_age we will get a new value from the
+ * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
+static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
+ struct obd_statfs *osfs, __u64 max_age,
+ __u32 flags)
+{
+ int rc = 0;
+ struct obd_device *obd = exp->exp_obd;
+
+ if (obd == NULL)
+ return -EINVAL;
+
+ OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
+ OBD_COUNTER_INCREMENT(obd, statfs);
+
+ CDEBUG(D_SUPER, "osfs "LPU64", max_age "LPU64"\n",
+ obd->obd_osfs_age, max_age);
+ if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
+ rc = OBP(obd, statfs)(env, exp, osfs, max_age, flags);
+ if (rc == 0) {
+ spin_lock(&obd->obd_osfs_lock);
+ memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
+ obd->obd_osfs_age = cfs_time_current_64();
+ spin_unlock(&obd->obd_osfs_lock);
+ }
+ } else {
+ CDEBUG(D_SUPER, "%s: use %p cache blocks "LPU64"/"LPU64
+ " objects "LPU64"/"LPU64"\n",
+ obd->obd_name, &obd->obd_osfs,
+ obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
+ obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
+ spin_lock(&obd->obd_osfs_lock);
+ memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
+ spin_unlock(&obd->obd_osfs_lock);
+ }
+ return rc;
+}
+
+static inline int obd_sync_rqset(struct obd_export *exp, struct obd_info *oinfo,
+ obd_size start, obd_size end)
+{
+ struct ptlrpc_request_set *set = NULL;
+ int rc;
+
+ OBD_CHECK_DT_OP(exp->exp_obd, sync, -EOPNOTSUPP);
+ EXP_COUNTER_INCREMENT(exp, sync);
+
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ return -ENOMEM;
+
+ rc = OBP(exp->exp_obd, sync)(NULL, exp, oinfo, start, end, set);
+ if (rc == 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+ return rc;
+}
+
+static inline int obd_sync(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, obd_size start, obd_size end,
+ struct ptlrpc_request_set *set)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(exp->exp_obd, sync, -EOPNOTSUPP);
+ EXP_COUNTER_INCREMENT(exp, sync);
+
+ rc = OBP(exp->exp_obd, sync)(env, exp, oinfo, start, end, set);
+ return rc;
+}
+
+static inline int obd_punch_rqset(struct obd_export *exp,
+ struct obd_info *oinfo,
+ struct obd_trans_info *oti)
+{
+ struct ptlrpc_request_set *set = NULL;
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, punch);
+ EXP_COUNTER_INCREMENT(exp, punch);
+
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ return -ENOMEM;
+
+ rc = OBP(exp->exp_obd, punch)(NULL, exp, oinfo, oti, set);
+ if (rc == 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+ return rc;
+}
+
+static inline int obd_punch(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti,
+ struct ptlrpc_request_set *rqset)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, punch);
+ EXP_COUNTER_INCREMENT(exp, punch);
+
+ rc = OBP(exp->exp_obd, punch)(env, exp, oinfo, oti, rqset);
+ return rc;
+}
+
+static inline int obd_brw(int cmd, struct obd_export *exp,
+ struct obd_info *oinfo, obd_count oa_bufs,
+ struct brw_page *pg, struct obd_trans_info *oti)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, brw);
+ EXP_COUNTER_INCREMENT(exp, brw);
+
+ if (!(cmd & (OBD_BRW_RWMASK | OBD_BRW_CHECK))) {
+ CERROR("obd_brw: cmd must be OBD_BRW_READ, OBD_BRW_WRITE, "
+ "or OBD_BRW_CHECK\n");
+ LBUG();
+ }
+
+ rc = OBP(exp->exp_obd, brw)(cmd, exp, oinfo, oa_bufs, pg, oti);
+ return rc;
+}
+
+static inline int obd_preprw(const struct lu_env *env, int cmd,
+ struct obd_export *exp, struct obdo *oa,
+ int objcount, struct obd_ioobj *obj,
+ struct niobuf_remote *remote, int *pages,
+ struct niobuf_local *local,
+ struct obd_trans_info *oti,
+ struct lustre_capa *capa)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, preprw);
+ EXP_COUNTER_INCREMENT(exp, preprw);
+
+ rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote,
+ pages, local, oti, capa);
+ return rc;
+}
+
+static inline int obd_commitrw(const struct lu_env *env, int cmd,
+ struct obd_export *exp, struct obdo *oa,
+ int objcount, struct obd_ioobj *obj,
+ struct niobuf_remote *rnb, int pages,
+ struct niobuf_local *local,
+ struct obd_trans_info *oti, int rc)
+{
+ EXP_CHECK_DT_OP(exp, commitrw);
+ EXP_COUNTER_INCREMENT(exp, commitrw);
+
+ rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj,
+ rnb, pages, local, oti, rc);
+ return rc;
+}
+
+static inline int obd_merge_lvb(struct obd_export *exp,
+ struct lov_stripe_md *lsm,
+ struct ost_lvb *lvb, int kms_only)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, merge_lvb);
+ EXP_COUNTER_INCREMENT(exp, merge_lvb);
+
+ rc = OBP(exp->exp_obd, merge_lvb)(exp, lsm, lvb, kms_only);
+ return rc;
+}
+
+static inline int obd_adjust_kms(struct obd_export *exp,
+ struct lov_stripe_md *lsm, obd_off size,
+ int shrink)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, adjust_kms);
+ EXP_COUNTER_INCREMENT(exp, adjust_kms);
+
+ rc = OBP(exp->exp_obd, adjust_kms)(exp, lsm, size, shrink);
+ return rc;
+}
+
+static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp,
+ int len, void *karg, void *uarg)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, iocontrol);
+ EXP_COUNTER_INCREMENT(exp, iocontrol);
+
+ rc = OBP(exp->exp_obd, iocontrol)(cmd, exp, len, karg, uarg);
+ return rc;
+}
+
+static inline int obd_enqueue_rqset(struct obd_export *exp,
+ struct obd_info *oinfo,
+ struct ldlm_enqueue_info *einfo)
+{
+ struct ptlrpc_request_set *set = NULL;
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, enqueue);
+ EXP_COUNTER_INCREMENT(exp, enqueue);
+
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ return -ENOMEM;
+
+ rc = OBP(exp->exp_obd, enqueue)(exp, oinfo, einfo, set);
+ if (rc == 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+ return rc;
+}
+
+static inline int obd_enqueue(struct obd_export *exp,
+ struct obd_info *oinfo,
+ struct ldlm_enqueue_info *einfo,
+ struct ptlrpc_request_set *set)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, enqueue);
+ EXP_COUNTER_INCREMENT(exp, enqueue);
+
+ rc = OBP(exp->exp_obd, enqueue)(exp, oinfo, einfo, set);
+ return rc;
+}
+
+static inline int obd_change_cbdata(struct obd_export *exp,
+ struct lov_stripe_md *lsm,
+ ldlm_iterator_t it, void *data)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, change_cbdata);
+ EXP_COUNTER_INCREMENT(exp, change_cbdata);
+
+ rc = OBP(exp->exp_obd, change_cbdata)(exp, lsm, it, data);
+ return rc;
+}
+
+static inline int obd_find_cbdata(struct obd_export *exp,
+ struct lov_stripe_md *lsm,
+ ldlm_iterator_t it, void *data)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, find_cbdata);
+ EXP_COUNTER_INCREMENT(exp, find_cbdata);
+
+ rc = OBP(exp->exp_obd, find_cbdata)(exp, lsm, it, data);
+ return rc;
+}
+
+static inline int obd_cancel(struct obd_export *exp,
+ struct lov_stripe_md *ea, __u32 mode,
+ struct lustre_handle *lockh)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, cancel);
+ EXP_COUNTER_INCREMENT(exp, cancel);
+
+ rc = OBP(exp->exp_obd, cancel)(exp, ea, mode, lockh);
+ return rc;
+}
+
+static inline int obd_cancel_unused(struct obd_export *exp,
+ struct lov_stripe_md *ea,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, cancel_unused);
+ EXP_COUNTER_INCREMENT(exp, cancel_unused);
+
+ rc = OBP(exp->exp_obd, cancel_unused)(exp, ea, flags, opaque);
+ return rc;
+}
+
+static inline int obd_pin(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, struct obd_client_handle *handle,
+ int flag)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, pin);
+ EXP_COUNTER_INCREMENT(exp, pin);
+
+ rc = OBP(exp->exp_obd, pin)(exp, fid, oc, handle, flag);
+ return rc;
+}
+
+static inline int obd_unpin(struct obd_export *exp,
+ struct obd_client_handle *handle, int flag)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, unpin);
+ EXP_COUNTER_INCREMENT(exp, unpin);
+
+ rc = OBP(exp->exp_obd, unpin)(exp, handle, flag);
+ return rc;
+}
+
+
+static inline void obd_import_event(struct obd_device *obd,
+ struct obd_import *imp,
+ enum obd_import_event event)
+{
+ if (!obd) {
+ CERROR("NULL device\n");
+ return;
+ }
+ if (obd->obd_set_up && OBP(obd, import_event)) {
+ OBD_COUNTER_INCREMENT(obd, import_event);
+ OBP(obd, import_event)(obd, imp, event);
+ }
+}
+
+static inline int obd_llog_connect(struct obd_export *exp,
+ struct llogd_conn_body *body)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(exp->exp_obd, llog_connect, 0);
+ EXP_COUNTER_INCREMENT(exp, llog_connect);
+
+ rc = OBP(exp->exp_obd, llog_connect)(exp, body);
+ return rc;
+}
+
+
+static inline int obd_notify(struct obd_device *obd,
+ struct obd_device *watched,
+ enum obd_notify_event ev,
+ void *data)
+{
+ int rc;
+
+ OBD_CHECK_DEV(obd);
+
+ /* the check for async_recov is a complete hack - I'm hereby
+ overloading the meaning to also mean "this was called from
+ mds_postsetup". I know that my mds is able to handle notifies
+ by this point, and it needs to get them to execute mds_postrecov. */
+ if (!obd->obd_set_up && !obd->obd_async_recov) {
+ CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name);
+ return -EINVAL;
+ }
+
+ if (!OBP(obd, notify)) {
+ CDEBUG(D_HA, "obd %s has no notify handler\n", obd->obd_name);
+ return -ENOSYS;
+ }
+
+ OBD_COUNTER_INCREMENT(obd, notify);
+ rc = OBP(obd, notify)(obd, watched, ev, data);
+ return rc;
+}
+
+static inline int obd_notify_observer(struct obd_device *observer,
+ struct obd_device *observed,
+ enum obd_notify_event ev,
+ void *data)
+{
+ int rc1;
+ int rc2;
+
+ struct obd_notify_upcall *onu;
+
+ if (observer->obd_observer)
+ rc1 = obd_notify(observer->obd_observer, observed, ev, data);
+ else
+ rc1 = 0;
+ /*
+ * Also, call non-obd listener, if any
+ */
+ onu = &observer->obd_upcall;
+ if (onu->onu_upcall != NULL)
+ rc2 = onu->onu_upcall(observer, observed, ev,
+ onu->onu_owner, NULL);
+ else
+ rc2 = 0;
+
+ return rc1 ? rc1 : rc2;
+}
+
+static inline int obd_quotacheck(struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, quotacheck);
+ EXP_COUNTER_INCREMENT(exp, quotacheck);
+
+ rc = OBP(exp->exp_obd, quotacheck)(exp->exp_obd, exp, oqctl);
+ return rc;
+}
+
+static inline int obd_quotactl(struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ int rc;
+
+ EXP_CHECK_DT_OP(exp, quotactl);
+ EXP_COUNTER_INCREMENT(exp, quotactl);
+
+ rc = OBP(exp->exp_obd, quotactl)(exp->exp_obd, exp, oqctl);
+ return rc;
+}
+
+static inline int obd_health_check(const struct lu_env *env,
+ struct obd_device *obd)
+{
+ /* returns: 0 on healthy
+ * >0 on unhealthy + reason code/flag
+ * however the only suppored reason == 1 right now
+ * We'll need to define some better reasons
+ * or flags in the future.
+ * <0 on error
+ */
+ int rc;
+
+ /* don't use EXP_CHECK_DT_OP, because NULL method is normal here */
+ if (obd == NULL || !OBT(obd)) {
+ CERROR("cleaned up obd\n");
+ return -EOPNOTSUPP;
+ }
+ if (!obd->obd_set_up || obd->obd_stopping)
+ return 0;
+ if (!OBP(obd, health_check))
+ return 0;
+
+ rc = OBP(obd, health_check)(env, obd);
+ return rc;
+}
+
+static inline int obd_register_observer(struct obd_device *obd,
+ struct obd_device *observer)
+{
+ OBD_CHECK_DEV(obd);
+ down_write(&obd->obd_observer_link_sem);
+ if (obd->obd_observer && observer) {
+ up_write(&obd->obd_observer_link_sem);
+ return -EALREADY;
+ }
+ obd->obd_observer = observer;
+ up_write(&obd->obd_observer_link_sem);
+ return 0;
+}
+
+static inline int obd_pin_observer(struct obd_device *obd,
+ struct obd_device **observer)
+{
+ down_read(&obd->obd_observer_link_sem);
+ if (!obd->obd_observer) {
+ *observer = NULL;
+ up_read(&obd->obd_observer_link_sem);
+ return -ENOENT;
+ }
+ *observer = obd->obd_observer;
+ return 0;
+}
+
+static inline int obd_unpin_observer(struct obd_device *obd)
+{
+ up_read(&obd->obd_observer_link_sem);
+ return 0;
+}
+
+#if 0
+static inline int obd_register_page_removal_cb(struct obd_export *exp,
+ obd_page_removal_cb_t cb,
+ obd_pin_extent_cb pin_cb)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0);
+ OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb);
+
+ rc = OBP(exp->exp_obd, register_page_removal_cb)(exp, cb, pin_cb);
+ return rc;
+}
+
+static inline int obd_unregister_page_removal_cb(struct obd_export *exp,
+ obd_page_removal_cb_t cb)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0);
+ OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb);
+
+ rc = OBP(exp->exp_obd, unregister_page_removal_cb)(exp, cb);
+ return rc;
+}
+
+static inline int obd_register_lock_cancel_cb(struct obd_export *exp,
+ obd_lock_cancel_cb cb)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0);
+ OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb);
+
+ rc = OBP(exp->exp_obd, register_lock_cancel_cb)(exp, cb);
+ return rc;
+}
+
+static inline int obd_unregister_lock_cancel_cb(struct obd_export *exp,
+ obd_lock_cancel_cb cb)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0);
+ OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb);
+
+ rc = OBP(exp->exp_obd, unregister_lock_cancel_cb)(exp, cb);
+ return rc;
+}
+#endif
+
+/* metadata helpers */
+static inline int md_getstatus(struct obd_export *exp,
+ struct lu_fid *fid, struct obd_capa **pc)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, getstatus);
+ EXP_MD_COUNTER_INCREMENT(exp, getstatus);
+ rc = MDP(exp->exp_obd, getstatus)(exp, fid, pc);
+ return rc;
+}
+
+static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, getattr);
+ EXP_MD_COUNTER_INCREMENT(exp, getattr);
+ rc = MDP(exp->exp_obd, getattr)(exp, op_data, request);
+ return rc;
+}
+
+static inline int md_null_inode(struct obd_export *exp,
+ const struct lu_fid *fid)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, null_inode);
+ EXP_MD_COUNTER_INCREMENT(exp, null_inode);
+ rc = MDP(exp->exp_obd, null_inode)(exp, fid);
+ return rc;
+}
+
+static inline int md_find_cbdata(struct obd_export *exp,
+ const struct lu_fid *fid,
+ ldlm_iterator_t it, void *data)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, find_cbdata);
+ EXP_MD_COUNTER_INCREMENT(exp, find_cbdata);
+ rc = MDP(exp->exp_obd, find_cbdata)(exp, fid, it, data);
+ return rc;
+}
+
+static inline int md_close(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_open_data *mod,
+ struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, close);
+ EXP_MD_COUNTER_INCREMENT(exp, close);
+ rc = MDP(exp->exp_obd, close)(exp, op_data, mod, request);
+ return rc;
+}
+
+static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
+ const void *data, int datalen, int mode, __u32 uid,
+ __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
+ struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, create);
+ EXP_MD_COUNTER_INCREMENT(exp, create);
+ rc = MDP(exp->exp_obd, create)(exp, op_data, data, datalen, mode,
+ uid, gid, cap_effective, rdev, request);
+ return rc;
+}
+
+static inline int md_done_writing(struct obd_export *exp,
+ struct md_op_data *op_data,
+ struct md_open_data *mod)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, done_writing);
+ EXP_MD_COUNTER_INCREMENT(exp, done_writing);
+ rc = MDP(exp->exp_obd, done_writing)(exp, op_data, mod);
+ return rc;
+}
+
+static inline int md_enqueue(struct obd_export *exp,
+ struct ldlm_enqueue_info *einfo,
+ struct lookup_intent *it,
+ struct md_op_data *op_data,
+ struct lustre_handle *lockh,
+ void *lmm, int lmmsize,
+ struct ptlrpc_request **req,
+ int extra_lock_flags)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, enqueue);
+ EXP_MD_COUNTER_INCREMENT(exp, enqueue);
+ rc = MDP(exp->exp_obd, enqueue)(exp, einfo, it, op_data, lockh,
+ lmm, lmmsize, req, extra_lock_flags);
+ return rc;
+}
+
+static inline int md_getattr_name(struct obd_export *exp,
+ struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, getattr_name);
+ EXP_MD_COUNTER_INCREMENT(exp, getattr_name);
+ rc = MDP(exp->exp_obd, getattr_name)(exp, op_data, request);
+ return rc;
+}
+
+static inline int md_intent_lock(struct obd_export *exp,
+ struct md_op_data *op_data, void *lmm,
+ int lmmsize, struct lookup_intent *it,
+ int lookup_flags, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, intent_lock);
+ EXP_MD_COUNTER_INCREMENT(exp, intent_lock);
+ rc = MDP(exp->exp_obd, intent_lock)(exp, op_data, lmm, lmmsize,
+ it, lookup_flags, reqp, cb_blocking,
+ extra_lock_flags);
+ return rc;
+}
+
+static inline int md_link(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, link);
+ EXP_MD_COUNTER_INCREMENT(exp, link);
+ rc = MDP(exp->exp_obd, link)(exp, op_data, request);
+ return rc;
+}
+
+static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
+ const char *old, int oldlen, const char *new,
+ int newlen, struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, rename);
+ EXP_MD_COUNTER_INCREMENT(exp, rename);
+ rc = MDP(exp->exp_obd, rename)(exp, op_data, old, oldlen, new,
+ newlen, request);
+ return rc;
+}
+
+static inline int md_is_subdir(struct obd_export *exp,
+ const struct lu_fid *pfid,
+ const struct lu_fid *cfid,
+ struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, is_subdir);
+ EXP_MD_COUNTER_INCREMENT(exp, is_subdir);
+ rc = MDP(exp->exp_obd, is_subdir)(exp, pfid, cfid, request);
+ return rc;
+}
+
+static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
+ void *ea, int ealen, void *ea2, int ea2len,
+ struct ptlrpc_request **request,
+ struct md_open_data **mod)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, setattr);
+ EXP_MD_COUNTER_INCREMENT(exp, setattr);
+ rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen,
+ ea2, ea2len, request, mod);
+ return rc;
+}
+
+static inline int md_sync(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, sync);
+ EXP_MD_COUNTER_INCREMENT(exp, sync);
+ rc = MDP(exp->exp_obd, sync)(exp, fid, oc, request);
+ return rc;
+}
+
+static inline int md_readpage(struct obd_export *exp, struct md_op_data *opdata,
+ struct page **pages,
+ struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, readpage);
+ EXP_MD_COUNTER_INCREMENT(exp, readpage);
+ rc = MDP(exp->exp_obd, readpage)(exp, opdata, pages, request);
+ return rc;
+}
+
+static inline int md_unlink(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, unlink);
+ EXP_MD_COUNTER_INCREMENT(exp, unlink);
+ rc = MDP(exp->exp_obd, unlink)(exp, op_data, request);
+ return rc;
+}
+
+static inline int md_get_lustre_md(struct obd_export *exp,
+ struct ptlrpc_request *req,
+ struct obd_export *dt_exp,
+ struct obd_export *md_exp,
+ struct lustre_md *md)
+{
+ EXP_CHECK_MD_OP(exp, get_lustre_md);
+ EXP_MD_COUNTER_INCREMENT(exp, get_lustre_md);
+ return MDP(exp->exp_obd, get_lustre_md)(exp, req, dt_exp, md_exp, md);
+}
+
+static inline int md_free_lustre_md(struct obd_export *exp,
+ struct lustre_md *md)
+{
+ EXP_CHECK_MD_OP(exp, free_lustre_md);
+ EXP_MD_COUNTER_INCREMENT(exp, free_lustre_md);
+ return MDP(exp->exp_obd, free_lustre_md)(exp, md);
+}
+
+static inline int md_setxattr(struct obd_export *exp,
+ const struct lu_fid *fid, struct obd_capa *oc,
+ obd_valid valid, const char *name,
+ const char *input, int input_size,
+ int output_size, int flags, __u32 suppgid,
+ struct ptlrpc_request **request)
+{
+ EXP_CHECK_MD_OP(exp, setxattr);
+ EXP_MD_COUNTER_INCREMENT(exp, setxattr);
+ return MDP(exp->exp_obd, setxattr)(exp, fid, oc, valid, name, input,
+ input_size, output_size, flags,
+ suppgid, request);
+}
+
+static inline int md_getxattr(struct obd_export *exp,
+ const struct lu_fid *fid, struct obd_capa *oc,
+ obd_valid valid, const char *name,
+ const char *input, int input_size,
+ int output_size, int flags,
+ struct ptlrpc_request **request)
+{
+ EXP_CHECK_MD_OP(exp, getxattr);
+ EXP_MD_COUNTER_INCREMENT(exp, getxattr);
+ return MDP(exp->exp_obd, getxattr)(exp, fid, oc, valid, name, input,
+ input_size, output_size, flags,
+ request);
+}
+
+static inline int md_set_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och,
+ struct ptlrpc_request *open_req)
+{
+ EXP_CHECK_MD_OP(exp, set_open_replay_data);
+ EXP_MD_COUNTER_INCREMENT(exp, set_open_replay_data);
+ return MDP(exp->exp_obd, set_open_replay_data)(exp, och, open_req);
+}
+
+static inline int md_clear_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och)
+{
+ EXP_CHECK_MD_OP(exp, clear_open_replay_data);
+ EXP_MD_COUNTER_INCREMENT(exp, clear_open_replay_data);
+ return MDP(exp->exp_obd, clear_open_replay_data)(exp, och);
+}
+
+static inline int md_set_lock_data(struct obd_export *exp,
+ __u64 *lockh, void *data, __u64 *bits)
+{
+ EXP_CHECK_MD_OP(exp, set_lock_data);
+ EXP_MD_COUNTER_INCREMENT(exp, set_lock_data);
+ return MDP(exp->exp_obd, set_lock_data)(exp, lockh, data, bits);
+}
+
+static inline int md_cancel_unused(struct obd_export *exp,
+ const struct lu_fid *fid,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, cancel_unused);
+ EXP_MD_COUNTER_INCREMENT(exp, cancel_unused);
+
+ rc = MDP(exp->exp_obd, cancel_unused)(exp, fid, policy, mode,
+ flags, opaque);
+ return rc;
+}
+
+static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid,
+ ldlm_type_t type,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode,
+ struct lustre_handle *lockh)
+{
+ EXP_CHECK_MD_OP(exp, lock_match);
+ EXP_MD_COUNTER_INCREMENT(exp, lock_match);
+ return MDP(exp->exp_obd, lock_match)(exp, flags, fid, type,
+ policy, mode, lockh);
+}
+
+static inline int md_init_ea_size(struct obd_export *exp, int easize,
+ int def_asize, int cookiesize)
+{
+ EXP_CHECK_MD_OP(exp, init_ea_size);
+ EXP_MD_COUNTER_INCREMENT(exp, init_ea_size);
+ return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize,
+ cookiesize);
+}
+
+static inline int md_get_remote_perm(struct obd_export *exp,
+ const struct lu_fid *fid,
+ struct obd_capa *oc, __u32 suppgid,
+ struct ptlrpc_request **request)
+{
+ EXP_CHECK_MD_OP(exp, get_remote_perm);
+ EXP_MD_COUNTER_INCREMENT(exp, get_remote_perm);
+ return MDP(exp->exp_obd, get_remote_perm)(exp, fid, oc, suppgid,
+ request);
+}
+
+static inline int md_renew_capa(struct obd_export *exp, struct obd_capa *ocapa,
+ renew_capa_cb_t cb)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, renew_capa);
+ EXP_MD_COUNTER_INCREMENT(exp, renew_capa);
+ rc = MDP(exp->exp_obd, renew_capa)(exp, ocapa, cb);
+ return rc;
+}
+
+static inline int md_unpack_capa(struct obd_export *exp,
+ struct ptlrpc_request *req,
+ const struct req_msg_field *field,
+ struct obd_capa **oc)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, unpack_capa);
+ EXP_MD_COUNTER_INCREMENT(exp, unpack_capa);
+ rc = MDP(exp->exp_obd, unpack_capa)(exp, req, field, oc);
+ return rc;
+}
+
+static inline int md_intent_getattr_async(struct obd_export *exp,
+ struct md_enqueue_info *minfo,
+ struct ldlm_enqueue_info *einfo)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, intent_getattr_async);
+ EXP_MD_COUNTER_INCREMENT(exp, intent_getattr_async);
+ rc = MDP(exp->exp_obd, intent_getattr_async)(exp, minfo, einfo);
+ return rc;
+}
+
+static inline int md_revalidate_lock(struct obd_export *exp,
+ struct lookup_intent *it,
+ struct lu_fid *fid, __u64 *bits)
+{
+ int rc;
+
+ EXP_CHECK_MD_OP(exp, revalidate_lock);
+ EXP_MD_COUNTER_INCREMENT(exp, revalidate_lock);
+ rc = MDP(exp->exp_obd, revalidate_lock)(exp, it, fid, bits);
+ return rc;
+}
+
+
+/* OBD Metadata Support */
+
+extern int obd_init_caches(void);
+extern void obd_cleanup_caches(void);
+
+/* support routines */
+extern struct kmem_cache *obdo_cachep;
+
+#define OBDO_ALLOC(ptr) \
+do { \
+ OBD_SLAB_ALLOC_PTR_GFP((ptr), obdo_cachep, __GFP_IO); \
+} while(0)
+
+#define OBDO_FREE(ptr) \
+do { \
+ OBD_SLAB_FREE_PTR((ptr), obdo_cachep); \
+} while(0)
+
+
+static inline void obdo2fid(struct obdo *oa, struct lu_fid *fid)
+{
+ /* something here */
+}
+
+static inline void fid2obdo(struct lu_fid *fid, struct obdo *oa)
+{
+ /* something here */
+}
+
+typedef int (*register_lwp_cb)(void *data);
+
+struct lwp_register_item {
+ struct obd_export **lri_exp;
+ register_lwp_cb lri_cb_func;
+ void *lri_cb_data;
+ struct list_head lri_list;
+ char lri_name[MTI_NAME_MAXLEN];
+};
+
+/* I'm as embarrassed about this as you are.
+ *
+ * <shaver> // XXX do not look into _superhack with remaining eye
+ * <shaver> // XXX if this were any uglier, I'd get my own show on MTV */
+extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
+
+/* obd_mount.c */
+
+/* sysctl.c */
+extern void obd_sysctl_init (void);
+extern void obd_sysctl_clean (void);
+
+/* uuid.c */
+typedef __u8 class_uuid_t[16];
+void class_uuid_unparse(class_uuid_t in, struct obd_uuid *out);
+
+/* lustre_peer.c */
+int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index);
+int class_add_uuid(const char *uuid, __u64 nid);
+int class_del_uuid (const char *uuid);
+int class_check_uuid(struct obd_uuid *uuid, __u64 nid);
+void class_init_uuidlist(void);
+void class_exit_uuidlist(void);
+
+/* mea.c */
+int mea_name2idx(struct lmv_stripe_md *mea, const char *name, int namelen);
+int raw_name2idx(int hashtype, int count, const char *name, int namelen);
+
+/* prng.c */
+#define ll_generate_random_uuid(uuid_out) cfs_get_random_bytes(uuid_out, sizeof(class_uuid_t))
+
+#endif /* __LINUX_OBD_CLASS_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_lov.h b/drivers/staging/lustre/lustre/include/obd_lov.h
new file mode 100644
index 0000000..235718b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/obd_lov.h
@@ -0,0 +1,116 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _OBD_LOV_H__
+#define _OBD_LOV_H__
+
+#define LOV_DEFAULT_STRIPE_SIZE (1 << LNET_MTU_BITS)
+
+static inline int lov_stripe_md_size(__u16 stripes)
+{
+ return sizeof(struct lov_stripe_md) + stripes*sizeof(struct lov_oinfo*);
+}
+
+struct lov_version_size {
+ __u32 lvs_magic;
+ size_t lvs_lmm_size;
+ size_t lvs_lod_size;
+};
+
+static inline __u32 lov_mds_md_stripecnt(int ea_size, __u32 lmm_magic)
+{
+ static const struct lov_version_size lmm_ver_size[] = {
+ { .lvs_magic = LOV_MAGIC_V3,
+ .lvs_lmm_size = sizeof(struct lov_mds_md_v3),
+ .lvs_lod_size = sizeof(struct lov_ost_data_v1) },
+ { .lvs_magic = LOV_MAGIC_V1,
+ .lvs_lmm_size = sizeof(struct lov_mds_md_v1),
+ .lvs_lod_size = sizeof(struct lov_ost_data_v1)} };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lmm_ver_size); i++) {
+ if (lmm_magic == lmm_ver_size[i].lvs_magic) {
+ if (ea_size <= lmm_ver_size[i].lvs_lmm_size)
+ return 0;
+ return (ea_size - lmm_ver_size[i].lvs_lmm_size) /
+ lmm_ver_size[i].lvs_lod_size;
+ }
+ }
+
+ /* Invalid LOV magic, so no stripes could fit */
+ return 0;
+}
+
+/* lov_do_div64(a, b) returns a % b, and a = a / b.
+ * The 32-bit code is LOV-specific due to knowing about stripe limits in
+ * order to reduce the divisor to a 32-bit number. If the divisor is
+ * already a 32-bit value the compiler handles this directly. */
+#if BITS_PER_LONG > 32
+# define lov_do_div64(n,base) ({ \
+ uint64_t __base = (base); \
+ uint64_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+#else
+# define lov_do_div64(n,base) ({ \
+ uint64_t __rem; \
+ if ((sizeof(base) > 4) && (((base) & 0xffffffff00000000ULL) != 0)) { \
+ int __remainder; \
+ LASSERTF(!((base) & (LOV_MIN_STRIPE_SIZE - 1)), "64 bit lov " \
+ "division %llu / %llu\n", (n), (uint64_t)(base)); \
+ __remainder = (n) & (LOV_MIN_STRIPE_SIZE - 1); \
+ (n) >>= LOV_MIN_STRIPE_BITS; \
+ __rem = do_div(n, (base) >> LOV_MIN_STRIPE_BITS); \
+ __rem <<= LOV_MIN_STRIPE_BITS; \
+ __rem += __remainder; \
+ } else { \
+ __rem = do_div(n, base); \
+ } \
+ __rem; \
+ })
+#endif
+
+#define IOC_LOV_TYPE 'g'
+#define IOC_LOV_MIN_NR 50
+#define IOC_LOV_SET_OSC_ACTIVE _IOWR('g', 50, long)
+#define IOC_LOV_MAX_NR 50
+
+#define QOS_DEFAULT_THRESHOLD 10 /* MB */
+#define QOS_DEFAULT_MAXAGE 5 /* Seconds */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/obd_ost.h b/drivers/staging/lustre/lustre/include/obd_ost.h
new file mode 100644
index 0000000..af89843
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/obd_ost.h
@@ -0,0 +1,96 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/include/obd_ost.h
+ *
+ * Data structures for object storage targets and client: OST & OSC's
+ *
+ * See also lustre_idl.h for wire formats of requests.
+ */
+
+#ifndef _LUSTRE_OST_H
+#define _LUSTRE_OST_H
+
+#include <obd_class.h>
+
+struct osc_brw_async_args {
+ struct obdo *aa_oa;
+ int aa_requested_nob;
+ int aa_nio_count;
+ obd_count aa_page_count;
+ int aa_resends;
+ struct brw_page **aa_ppga;
+ struct client_obd *aa_cli;
+ struct list_head aa_oaps;
+ struct list_head aa_exts;
+ struct obd_capa *aa_ocapa;
+ struct cl_req *aa_clerq;
+};
+
+#define osc_grant_args osc_brw_async_args
+struct osc_async_args {
+ struct obd_info *aa_oi;
+};
+
+struct osc_setattr_args {
+ struct obdo *sa_oa;
+ obd_enqueue_update_f sa_upcall;
+ void *sa_cookie;
+};
+
+struct osc_fsync_args {
+ struct obd_info *fa_oi;
+ obd_enqueue_update_f fa_upcall;
+ void *fa_cookie;
+};
+
+struct osc_enqueue_args {
+ struct obd_export *oa_exp;
+ __u64 *oa_flags;
+ obd_enqueue_update_f oa_upcall;
+ void *oa_cookie;
+ struct ost_lvb *oa_lvb;
+ struct lustre_handle *oa_lockh;
+ struct ldlm_enqueue_info *oa_ei;
+ unsigned int oa_agl:1;
+};
+
+#if 0
+int osc_extent_blocking_cb(struct ldlm_lock *lock,
+ struct ldlm_lock_desc *new, void *data,
+ int flag);
+#endif
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
new file mode 100644
index 0000000..03e6133
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -0,0 +1,852 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _OBD_SUPPORT
+#define _OBD_SUPPORT
+
+#include <linux/libcfs/libcfs.h>
+#include <lvfs.h>
+#include <lprocfs_status.h>
+
+#include <linux/obd_support.h>
+
+/* global variables */
+extern struct lprocfs_stats *obd_memory;
+enum {
+ OBD_MEMORY_STAT = 0,
+ OBD_MEMORY_PAGES_STAT = 1,
+ OBD_STATS_NUM,
+};
+
+extern unsigned int obd_debug_peer_on_timeout;
+extern unsigned int obd_dump_on_timeout;
+extern unsigned int obd_dump_on_eviction;
+/* obd_timeout should only be used for recovery, not for
+ networking / disk / timings affected by load (use Adaptive Timeouts) */
+extern unsigned int obd_timeout; /* seconds */
+extern unsigned int ldlm_timeout; /* seconds */
+extern unsigned int obd_timeout_set;
+extern unsigned int ldlm_timeout_set;
+extern unsigned int at_min;
+extern unsigned int at_max;
+extern unsigned int at_history;
+extern int at_early_margin;
+extern int at_extra;
+extern unsigned int obd_sync_filter;
+extern unsigned int obd_max_dirty_pages;
+extern atomic_t obd_dirty_pages;
+extern atomic_t obd_dirty_transit_pages;
+extern unsigned int obd_alloc_fail_rate;
+extern char obd_jobid_var[];
+
+/* lvfs.c */
+int obd_alloc_fail(const void *ptr, const char *name, const char *type,
+ size_t size, const char *file, int line);
+
+/* Some hash init argument constants */
+#define HASH_POOLS_BKT_BITS 3
+#define HASH_POOLS_CUR_BITS 3
+#define HASH_POOLS_MAX_BITS 7
+#define HASH_UUID_BKT_BITS 5
+#define HASH_UUID_CUR_BITS 7
+#define HASH_UUID_MAX_BITS 12
+#define HASH_NID_BKT_BITS 5
+#define HASH_NID_CUR_BITS 7
+#define HASH_NID_MAX_BITS 12
+#define HASH_NID_STATS_BKT_BITS 5
+#define HASH_NID_STATS_CUR_BITS 7
+#define HASH_NID_STATS_MAX_BITS 12
+#define HASH_LQE_BKT_BITS 5
+#define HASH_LQE_CUR_BITS 7
+#define HASH_LQE_MAX_BITS 12
+#define HASH_CONN_BKT_BITS 5
+#define HASH_CONN_CUR_BITS 5
+#define HASH_CONN_MAX_BITS 15
+#define HASH_EXP_LOCK_BKT_BITS 5
+#define HASH_EXP_LOCK_CUR_BITS 7
+#define HASH_EXP_LOCK_MAX_BITS 16
+#define HASH_CL_ENV_BKT_BITS 5
+#define HASH_CL_ENV_BITS 10
+#define HASH_JOB_STATS_BKT_BITS 5
+#define HASH_JOB_STATS_CUR_BITS 7
+#define HASH_JOB_STATS_MAX_BITS 12
+
+/* Timeout definitions */
+#define OBD_TIMEOUT_DEFAULT 100
+#define LDLM_TIMEOUT_DEFAULT 20
+#define MDS_LDLM_TIMEOUT_DEFAULT 6
+/* Time to wait for all clients to reconnect during recovery (hard limit) */
+#define OBD_RECOVERY_TIME_HARD (obd_timeout * 9)
+/* Time to wait for all clients to reconnect during recovery (soft limit) */
+/* Should be very conservative; must catch the first reconnect after reboot */
+#define OBD_RECOVERY_TIME_SOFT (obd_timeout * 3)
+/* Change recovery-small 26b time if you change this */
+#define PING_INTERVAL max(obd_timeout / 4, 1U)
+/* a bit more than maximal journal commit time in seconds */
+#define PING_INTERVAL_SHORT min(PING_INTERVAL, 7U)
+/* Client may skip 1 ping; we must wait at least 2.5. But for multiple
+ * failover targets the client only pings one server at a time, and pings
+ * can be lost on a loaded network. Since eviction has serious consequences,
+ * and there's no urgent need to evict a client just because it's idle, we
+ * should be very conservative here. */
+#define PING_EVICT_TIMEOUT (PING_INTERVAL * 6)
+#define DISK_TIMEOUT 50 /* Beyond this we warn about disk speed */
+#define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */
+ /* Max connect interval for nonresponsive servers; ~50s to avoid building up
+ connect requests in the LND queues, but within obd_timeout so we don't
+ miss the recovery window */
+#define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN,obd_timeout))
+#define CONNECTION_SWITCH_INC 5 /* Connection timeout backoff */
+/* In general this should be low to have quick detection of a system
+ running on a backup server. (If it's too low, import_select_connection
+ will increase the timeout anyhow.) */
+#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN,obd_timeout/20)
+/* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */
+#define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \
+ INITIAL_CONNECT_TIMEOUT)
+/* The min time a target should wait for clients to reconnect in recovery */
+#define OBD_RECOVERY_TIME_MIN (2*RECONNECT_DELAY_MAX)
+#define OBD_IR_FACTOR_MIN 1
+#define OBD_IR_FACTOR_MAX 10
+#define OBD_IR_FACTOR_DEFAULT (OBD_IR_FACTOR_MAX/2)
+/* default timeout for the MGS to become IR_FULL */
+#define OBD_IR_MGS_TIMEOUT (4*obd_timeout)
+#define LONG_UNLINK 300 /* Unlink should happen before now */
+
+/**
+ * Time interval of shrink, if the client is "idle" more than this interval,
+ * then the ll_grant thread will return the requested grant space to filter
+ */
+#define GRANT_SHRINK_INTERVAL 1200/*20 minutes*/
+
+#define OBD_FAIL_MDS 0x100
+#define OBD_FAIL_MDS_HANDLE_UNPACK 0x101
+#define OBD_FAIL_MDS_GETATTR_NET 0x102
+#define OBD_FAIL_MDS_GETATTR_PACK 0x103
+#define OBD_FAIL_MDS_READPAGE_NET 0x104
+#define OBD_FAIL_MDS_READPAGE_PACK 0x105
+#define OBD_FAIL_MDS_SENDPAGE 0x106
+#define OBD_FAIL_MDS_REINT_NET 0x107
+#define OBD_FAIL_MDS_REINT_UNPACK 0x108
+#define OBD_FAIL_MDS_REINT_SETATTR 0x109
+#define OBD_FAIL_MDS_REINT_SETATTR_WRITE 0x10a
+#define OBD_FAIL_MDS_REINT_CREATE 0x10b
+#define OBD_FAIL_MDS_REINT_CREATE_WRITE 0x10c
+#define OBD_FAIL_MDS_REINT_UNLINK 0x10d
+#define OBD_FAIL_MDS_REINT_UNLINK_WRITE 0x10e
+#define OBD_FAIL_MDS_REINT_LINK 0x10f
+#define OBD_FAIL_MDS_REINT_LINK_WRITE 0x110
+#define OBD_FAIL_MDS_REINT_RENAME 0x111
+#define OBD_FAIL_MDS_REINT_RENAME_WRITE 0x112
+#define OBD_FAIL_MDS_OPEN_NET 0x113
+#define OBD_FAIL_MDS_OPEN_PACK 0x114
+#define OBD_FAIL_MDS_CLOSE_NET 0x115
+#define OBD_FAIL_MDS_CLOSE_PACK 0x116
+#define OBD_FAIL_MDS_CONNECT_NET 0x117
+#define OBD_FAIL_MDS_CONNECT_PACK 0x118
+#define OBD_FAIL_MDS_REINT_NET_REP 0x119
+#define OBD_FAIL_MDS_DISCONNECT_NET 0x11a
+#define OBD_FAIL_MDS_GETSTATUS_NET 0x11b
+#define OBD_FAIL_MDS_GETSTATUS_PACK 0x11c
+#define OBD_FAIL_MDS_STATFS_PACK 0x11d
+#define OBD_FAIL_MDS_STATFS_NET 0x11e
+#define OBD_FAIL_MDS_GETATTR_NAME_NET 0x11f
+#define OBD_FAIL_MDS_PIN_NET 0x120
+#define OBD_FAIL_MDS_UNPIN_NET 0x121
+#define OBD_FAIL_MDS_ALL_REPLY_NET 0x122
+#define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123
+#define OBD_FAIL_MDS_SYNC_NET 0x124
+#define OBD_FAIL_MDS_SYNC_PACK 0x125
+#define OBD_FAIL_MDS_DONE_WRITING_NET 0x126
+#define OBD_FAIL_MDS_DONE_WRITING_PACK 0x127
+#define OBD_FAIL_MDS_ALLOC_OBDO 0x128
+#define OBD_FAIL_MDS_PAUSE_OPEN 0x129
+#define OBD_FAIL_MDS_STATFS_LCW_SLEEP 0x12a
+#define OBD_FAIL_MDS_OPEN_CREATE 0x12b
+#define OBD_FAIL_MDS_OST_SETATTR 0x12c
+#define OBD_FAIL_MDS_QUOTACHECK_NET 0x12d
+#define OBD_FAIL_MDS_QUOTACTL_NET 0x12e
+#define OBD_FAIL_MDS_CLIENT_ADD 0x12f
+#define OBD_FAIL_MDS_GETXATTR_NET 0x130
+#define OBD_FAIL_MDS_GETXATTR_PACK 0x131
+#define OBD_FAIL_MDS_SETXATTR_NET 0x132
+#define OBD_FAIL_MDS_SETXATTR 0x133
+#define OBD_FAIL_MDS_SETXATTR_WRITE 0x134
+#define OBD_FAIL_MDS_FS_SETUP 0x135
+#define OBD_FAIL_MDS_RESEND 0x136
+#define OBD_FAIL_MDS_LLOG_CREATE_FAILED 0x137
+#define OBD_FAIL_MDS_LOV_SYNC_RACE 0x138
+#define OBD_FAIL_MDS_OSC_PRECREATE 0x139
+#define OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a
+#define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
+#define OBD_FAIL_MDS_BLOCK_QUOTA_REQ 0x13c
+#define OBD_FAIL_MDS_DROP_QUOTA_REQ 0x13d
+#define OBD_FAIL_MDS_REMOVE_COMMON_EA 0x13e
+#define OBD_FAIL_MDS_ALLOW_COMMON_EA_SETTING 0x13f
+#define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140
+#define OBD_FAIL_MDS_LOV_PREP_CREATE 0x141
+#define OBD_FAIL_MDS_REINT_DELAY 0x142
+#define OBD_FAIL_MDS_READLINK_EPROTO 0x143
+#define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x144
+#define OBD_FAIL_MDS_PDO_LOCK 0x145
+#define OBD_FAIL_MDS_PDO_LOCK2 0x146
+#define OBD_FAIL_MDS_OSC_CREATE_FAIL 0x147
+#define OBD_FAIL_MDS_NEGATIVE_POSITIVE 0x148
+#define OBD_FAIL_MDS_HSM_STATE_GET_NET 0x149
+#define OBD_FAIL_MDS_HSM_STATE_SET_NET 0x14a
+#define OBD_FAIL_MDS_HSM_PROGRESS_NET 0x14b
+#define OBD_FAIL_MDS_HSM_REQUEST_NET 0x14c
+#define OBD_FAIL_MDS_HSM_CT_REGISTER_NET 0x14d
+#define OBD_FAIL_MDS_HSM_CT_UNREGISTER_NET 0x14e
+#define OBD_FAIL_MDS_SWAP_LAYOUTS_NET 0x14f
+#define OBD_FAIL_MDS_HSM_ACTION_NET 0x150
+#define OBD_FAIL_MDS_CHANGELOG_INIT 0x151
+
+/* layout lock */
+#define OBD_FAIL_MDS_NO_LL_GETATTR 0x170
+#define OBD_FAIL_MDS_NO_LL_OPEN 0x171
+#define OBD_FAIL_MDS_LL_BLOCK 0x172
+
+/* CMD */
+#define OBD_FAIL_MDS_IS_SUBDIR_NET 0x180
+#define OBD_FAIL_MDS_IS_SUBDIR_PACK 0x181
+#define OBD_FAIL_MDS_SET_INFO_NET 0x182
+#define OBD_FAIL_MDS_WRITEPAGE_NET 0x183
+#define OBD_FAIL_MDS_WRITEPAGE_PACK 0x184
+#define OBD_FAIL_MDS_RECOVERY_ACCEPTS_GAPS 0x185
+#define OBD_FAIL_MDS_GET_INFO_NET 0x186
+#define OBD_FAIL_MDS_DQACQ_NET 0x187
+
+/* OI scrub */
+#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
+#define OBD_FAIL_OSD_SCRUB_CRASH 0x191
+#define OBD_FAIL_OSD_SCRUB_FATAL 0x192
+#define OBD_FAIL_OSD_FID_MAPPING 0x193
+#define OBD_FAIL_OSD_LMA_INCOMPAT 0x194
+
+#define OBD_FAIL_OST 0x200
+#define OBD_FAIL_OST_CONNECT_NET 0x201
+#define OBD_FAIL_OST_DISCONNECT_NET 0x202
+#define OBD_FAIL_OST_GET_INFO_NET 0x203
+#define OBD_FAIL_OST_CREATE_NET 0x204
+#define OBD_FAIL_OST_DESTROY_NET 0x205
+#define OBD_FAIL_OST_GETATTR_NET 0x206
+#define OBD_FAIL_OST_SETATTR_NET 0x207
+#define OBD_FAIL_OST_OPEN_NET 0x208
+#define OBD_FAIL_OST_CLOSE_NET 0x209
+#define OBD_FAIL_OST_BRW_NET 0x20a
+#define OBD_FAIL_OST_PUNCH_NET 0x20b
+#define OBD_FAIL_OST_STATFS_NET 0x20c
+#define OBD_FAIL_OST_HANDLE_UNPACK 0x20d
+#define OBD_FAIL_OST_BRW_WRITE_BULK 0x20e
+#define OBD_FAIL_OST_BRW_READ_BULK 0x20f
+#define OBD_FAIL_OST_SYNC_NET 0x210
+#define OBD_FAIL_OST_ALL_REPLY_NET 0x211
+#define OBD_FAIL_OST_ALL_REQUEST_NET 0x212
+#define OBD_FAIL_OST_LDLM_REPLY_NET 0x213
+#define OBD_FAIL_OST_BRW_PAUSE_BULK 0x214
+#define OBD_FAIL_OST_ENOSPC 0x215
+#define OBD_FAIL_OST_EROFS 0x216
+#define OBD_FAIL_OST_ENOENT 0x217
+#define OBD_FAIL_OST_QUOTACHECK_NET 0x218
+#define OBD_FAIL_OST_QUOTACTL_NET 0x219
+#define OBD_FAIL_OST_CHECKSUM_RECEIVE 0x21a
+#define OBD_FAIL_OST_CHECKSUM_SEND 0x21b
+#define OBD_FAIL_OST_BRW_SIZE 0x21c
+#define OBD_FAIL_OST_DROP_REQ 0x21d
+#define OBD_FAIL_OST_SETATTR_CREDITS 0x21e
+#define OBD_FAIL_OST_HOLD_WRITE_RPC 0x21f
+#define OBD_FAIL_OST_BRW_WRITE_BULK2 0x220
+#define OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
+#define OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
+#define OBD_FAIL_OST_PAUSE_CREATE 0x223
+#define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
+#define OBD_FAIL_OST_CONNECT_NET2 0x225
+#define OBD_FAIL_OST_NOMEM 0x226
+#define OBD_FAIL_OST_BRW_PAUSE_BULK2 0x227
+#define OBD_FAIL_OST_MAPBLK_ENOSPC 0x228
+#define OBD_FAIL_OST_ENOINO 0x229
+#define OBD_FAIL_OST_DQACQ_NET 0x230
+#define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
+
+#define OBD_FAIL_LDLM 0x300
+#define OBD_FAIL_LDLM_NAMESPACE_NEW 0x301
+#define OBD_FAIL_LDLM_ENQUEUE_NET 0x302
+#define OBD_FAIL_LDLM_CONVERT_NET 0x303
+#define OBD_FAIL_LDLM_CANCEL_NET 0x304
+#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
+#define OBD_FAIL_LDLM_CP_CALLBACK_NET 0x306
+#define OBD_FAIL_LDLM_GL_CALLBACK_NET 0x307
+#define OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR 0x308
+#define OBD_FAIL_LDLM_ENQUEUE_INTENT_ERR 0x309
+#define OBD_FAIL_LDLM_CREATE_RESOURCE 0x30a
+#define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b
+#define OBD_FAIL_LDLM_REPLY 0x30c
+#define OBD_FAIL_LDLM_RECOV_CLIENTS 0x30d
+#define OBD_FAIL_LDLM_ENQUEUE_OLD_EXPORT 0x30e
+#define OBD_FAIL_LDLM_GLIMPSE 0x30f
+#define OBD_FAIL_LDLM_CANCEL_RACE 0x310
+#define OBD_FAIL_LDLM_CANCEL_EVICT_RACE 0x311
+#define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
+#define OBD_FAIL_LDLM_CLOSE_THREAD 0x313
+#define OBD_FAIL_LDLM_CANCEL_BL_CB_RACE 0x314
+#define OBD_FAIL_LDLM_CP_CB_WAIT 0x315
+#define OBD_FAIL_LDLM_OST_FAIL_RACE 0x316
+#define OBD_FAIL_LDLM_INTR_CP_AST 0x317
+#define OBD_FAIL_LDLM_CP_BL_RACE 0x318
+#define OBD_FAIL_LDLM_NEW_LOCK 0x319
+#define OBD_FAIL_LDLM_AGL_DELAY 0x31a
+#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b
+#define OBD_FAIL_LDLM_OST_LVB 0x31c
+
+/* LOCKLESS IO */
+#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
+
+#define OBD_FAIL_OSC 0x400
+#define OBD_FAIL_OSC_BRW_READ_BULK 0x401
+#define OBD_FAIL_OSC_BRW_WRITE_BULK 0x402
+#define OBD_FAIL_OSC_LOCK_BL_AST 0x403
+#define OBD_FAIL_OSC_LOCK_CP_AST 0x404
+#define OBD_FAIL_OSC_MATCH 0x405
+#define OBD_FAIL_OSC_BRW_PREP_REQ 0x406
+#define OBD_FAIL_OSC_SHUTDOWN 0x407
+#define OBD_FAIL_OSC_CHECKSUM_RECEIVE 0x408
+#define OBD_FAIL_OSC_CHECKSUM_SEND 0x409
+#define OBD_FAIL_OSC_BRW_PREP_REQ2 0x40a
+#define OBD_FAIL_OSC_CONNECT_CKSUM 0x40b
+#define OBD_FAIL_OSC_CKSUM_ADLER_ONLY 0x40c
+#define OBD_FAIL_OSC_DIO_PAUSE 0x40d
+#define OBD_FAIL_OSC_OBJECT_CONTENTION 0x40e
+#define OBD_FAIL_OSC_CP_CANCEL_RACE 0x40f
+#define OBD_FAIL_OSC_CP_ENQ_RACE 0x410
+#define OBD_FAIL_OSC_NO_GRANT 0x411
+#define OBD_FAIL_OSC_DELAY_SETTIME 0x412
+
+#define OBD_FAIL_PTLRPC 0x500
+#define OBD_FAIL_PTLRPC_ACK 0x501
+#define OBD_FAIL_PTLRPC_RQBD 0x502
+#define OBD_FAIL_PTLRPC_BULK_GET_NET 0x503
+#define OBD_FAIL_PTLRPC_BULK_PUT_NET 0x504
+#define OBD_FAIL_PTLRPC_DROP_RPC 0x505
+#define OBD_FAIL_PTLRPC_DELAY_SEND 0x506
+#define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
+#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB 0x508
+#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
+#define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
+#define OBD_FAIL_PTLRPC_IMP_DEACTIVE 0x50d
+#define OBD_FAIL_PTLRPC_DUMP_LOG 0x50e
+#define OBD_FAIL_PTLRPC_LONG_REPL_UNLINK 0x50f
+#define OBD_FAIL_PTLRPC_LONG_BULK_UNLINK 0x510
+#define OBD_FAIL_PTLRPC_HPREQ_TIMEOUT 0x511
+#define OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT 0x512
+#define OBD_FAIL_PTLRPC_DROP_REQ_OPC 0x513
+#define OBD_FAIL_PTLRPC_FINISH_REPLAY 0x514
+#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB2 0x515
+#define OBD_FAIL_PTLRPC_DELAY_IMP_FULL 0x516
+#define OBD_FAIL_PTLRPC_CANCEL_RESEND 0x517
+
+#define OBD_FAIL_OBD_PING_NET 0x600
+#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
+#define OBD_FAIL_OBD_LOGD_NET 0x602
+#define OBD_FAIL_OBD_QC_CALLBACK_NET 0x603
+#define OBD_FAIL_OBD_DQACQ 0x604
+#define OBD_FAIL_OBD_LLOG_SETUP 0x605
+#define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606
+#define OBD_FAIL_OBD_IDX_READ_NET 0x607
+#define OBD_FAIL_OBD_IDX_READ_BREAK 0x608
+#define OBD_FAIL_OBD_NO_LRU 0x609
+
+#define OBD_FAIL_TGT_REPLY_NET 0x700
+#define OBD_FAIL_TGT_CONN_RACE 0x701
+#define OBD_FAIL_TGT_FORCE_RECONNECT 0x702
+#define OBD_FAIL_TGT_DELAY_CONNECT 0x703
+#define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
+#define OBD_FAIL_TGT_DELAY_PRECREATE 0x705
+#define OBD_FAIL_TGT_TOOMANY_THREADS 0x706
+#define OBD_FAIL_TGT_REPLAY_DROP 0x707
+#define OBD_FAIL_TGT_FAKE_EXP 0x708
+#define OBD_FAIL_TGT_REPLAY_DELAY 0x709
+#define OBD_FAIL_TGT_LAST_REPLAY 0x710
+#define OBD_FAIL_TGT_CLIENT_ADD 0x711
+#define OBD_FAIL_TGT_RCVG_FLAG 0x712
+
+#define OBD_FAIL_MDC_REVALIDATE_PAUSE 0x800
+#define OBD_FAIL_MDC_ENQUEUE_PAUSE 0x801
+#define OBD_FAIL_MDC_OLD_EXT_FLAGS 0x802
+#define OBD_FAIL_MDC_GETATTR_ENQUEUE 0x803
+#define OBD_FAIL_MDC_RPCS_SEM 0x804
+#define OBD_FAIL_MDC_LIGHTWEIGHT 0x805
+
+#define OBD_FAIL_MGS 0x900
+#define OBD_FAIL_MGS_ALL_REQUEST_NET 0x901
+#define OBD_FAIL_MGS_ALL_REPLY_NET 0x902
+#define OBD_FAIL_MGC_PAUSE_PROCESS_LOG 0x903
+#define OBD_FAIL_MGS_PAUSE_REQ 0x904
+#define OBD_FAIL_MGS_PAUSE_TARGET_REG 0x905
+
+#define OBD_FAIL_QUOTA_DQACQ_NET 0xA01
+#define OBD_FAIL_QUOTA_EDQUOT 0xA02
+#define OBD_FAIL_QUOTA_DELAY_REINT 0xA03
+#define OBD_FAIL_QUOTA_RECOVERABLE_ERR 0xA04
+
+#define OBD_FAIL_LPROC_REMOVE 0xB00
+
+#define OBD_FAIL_GENERAL_ALLOC 0xC00
+
+#define OBD_FAIL_SEQ 0x1000
+#define OBD_FAIL_SEQ_QUERY_NET 0x1001
+#define OBD_FAIL_SEQ_EXHAUST 0x1002
+
+#define OBD_FAIL_FLD 0x1100
+#define OBD_FAIL_FLD_QUERY_NET 0x1101
+
+#define OBD_FAIL_SEC_CTX 0x1200
+#define OBD_FAIL_SEC_CTX_INIT_NET 0x1201
+#define OBD_FAIL_SEC_CTX_INIT_CONT_NET 0x1202
+#define OBD_FAIL_SEC_CTX_FINI_NET 0x1203
+#define OBD_FAIL_SEC_CTX_HDL_PAUSE 0x1204
+
+#define OBD_FAIL_LLOG 0x1300
+#define OBD_FAIL_LLOG_ORIGIN_CONNECT_NET 0x1301
+#define OBD_FAIL_LLOG_ORIGIN_HANDLE_CREATE_NET 0x1302
+#define OBD_FAIL_LLOG_ORIGIN_HANDLE_DESTROY_NET 0x1303
+#define OBD_FAIL_LLOG_ORIGIN_HANDLE_READ_HEADER_NET 0x1304
+#define OBD_FAIL_LLOG_ORIGIN_HANDLE_NEXT_BLOCK_NET 0x1305
+#define OBD_FAIL_LLOG_ORIGIN_HANDLE_PREV_BLOCK_NET 0x1306
+#define OBD_FAIL_LLOG_ORIGIN_HANDLE_WRITE_REC_NET 0x1307
+#define OBD_FAIL_LLOG_ORIGIN_HANDLE_CLOSE_NET 0x1308
+#define OBD_FAIL_LLOG_CATINFO_NET 0x1309
+#define OBD_FAIL_MDS_SYNC_CAPA_SL 0x1310
+#define OBD_FAIL_SEQ_ALLOC 0x1311
+
+#define OBD_FAIL_LLITE 0x1400
+#define OBD_FAIL_LLITE_FAULT_TRUNC_RACE 0x1401
+#define OBD_FAIL_LOCK_STATE_WAIT_INTR 0x1402
+#define OBD_FAIL_LOV_INIT 0x1403
+#define OBD_FAIL_GLIMPSE_DELAY 0x1404
+
+#define OBD_FAIL_FID_INDIR 0x1501
+#define OBD_FAIL_FID_INLMA 0x1502
+#define OBD_FAIL_FID_IGIF 0x1504
+#define OBD_FAIL_FID_LOOKUP 0x1505
+#define OBD_FAIL_FID_NOLMA 0x1506
+
+/* LFSCK */
+#define OBD_FAIL_LFSCK_DELAY1 0x1600
+#define OBD_FAIL_LFSCK_DELAY2 0x1601
+#define OBD_FAIL_LFSCK_DELAY3 0x1602
+#define OBD_FAIL_LFSCK_LINKEA_CRASH 0x1603
+#define OBD_FAIL_LFSCK_LINKEA_MORE 0x1604
+#define OBD_FAIL_LFSCK_LINKEA_MORE2 0x1605
+#define OBD_FAIL_LFSCK_FATAL1 0x1608
+#define OBD_FAIL_LFSCK_FATAL2 0x1609
+#define OBD_FAIL_LFSCK_CRASH 0x160a
+#define OBD_FAIL_LFSCK_NO_AUTO 0x160b
+#define OBD_FAIL_LFSCK_NO_DOUBLESCAN 0x160c
+
+/* UPDATE */
+#define OBD_FAIL_UPDATE_OBJ_NET 0x1700
+#define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
+
+
+/* Assign references to moved code to reduce code changes */
+#define OBD_FAIL_PRECHECK(id) CFS_FAIL_PRECHECK(id)
+#define OBD_FAIL_CHECK(id) CFS_FAIL_CHECK(id)
+#define OBD_FAIL_CHECK_VALUE(id, value) CFS_FAIL_CHECK_VALUE(id, value)
+#define OBD_FAIL_CHECK_ORSET(id, value) CFS_FAIL_CHECK_ORSET(id, value)
+#define OBD_FAIL_CHECK_RESET(id, value) CFS_FAIL_CHECK_RESET(id, value)
+#define OBD_FAIL_RETURN(id, ret) CFS_FAIL_RETURN(id, ret)
+#define OBD_FAIL_TIMEOUT(id, secs) CFS_FAIL_TIMEOUT(id, secs)
+#define OBD_FAIL_TIMEOUT_MS(id, ms) CFS_FAIL_TIMEOUT_MS(id, ms)
+#define OBD_FAIL_TIMEOUT_ORSET(id, value, secs) CFS_FAIL_TIMEOUT_ORSET(id, value, secs)
+#define OBD_RACE(id) CFS_RACE(id)
+#define OBD_FAIL_ONCE CFS_FAIL_ONCE
+#define OBD_FAILED CFS_FAILED
+
+extern atomic_t libcfs_kmemory;
+
+#ifdef LPROCFS
+#define obd_memory_add(size) \
+ lprocfs_counter_add(obd_memory, OBD_MEMORY_STAT, (long)(size))
+#define obd_memory_sub(size) \
+ lprocfs_counter_sub(obd_memory, OBD_MEMORY_STAT, (long)(size))
+#define obd_memory_sum() \
+ lprocfs_stats_collector(obd_memory, OBD_MEMORY_STAT, \
+ LPROCFS_FIELDS_FLAGS_SUM)
+#define obd_pages_add(order) \
+ lprocfs_counter_add(obd_memory, OBD_MEMORY_PAGES_STAT, \
+ (long)(1 << (order)))
+#define obd_pages_sub(order) \
+ lprocfs_counter_sub(obd_memory, OBD_MEMORY_PAGES_STAT, \
+ (long)(1 << (order)))
+#define obd_pages_sum() \
+ lprocfs_stats_collector(obd_memory, OBD_MEMORY_PAGES_STAT, \
+ LPROCFS_FIELDS_FLAGS_SUM)
+
+extern void obd_update_maxusage(void);
+extern __u64 obd_memory_max(void);
+extern __u64 obd_pages_max(void);
+
+#else
+
+extern __u64 obd_alloc;
+extern __u64 obd_pages;
+
+extern __u64 obd_max_alloc;
+extern __u64 obd_max_pages;
+
+static inline void obd_memory_add(long size)
+{
+ obd_alloc += size;
+ if (obd_alloc > obd_max_alloc)
+ obd_max_alloc = obd_alloc;
+}
+
+static inline void obd_memory_sub(long size)
+{
+ obd_alloc -= size;
+}
+
+static inline void obd_pages_add(int order)
+{
+ obd_pages += 1<< order;
+ if (obd_pages > obd_max_pages)
+ obd_max_pages = obd_pages;
+}
+
+static inline void obd_pages_sub(int order)
+{
+ obd_pages -= 1<< order;
+}
+
+#define obd_memory_sum() (obd_alloc)
+#define obd_pages_sum() (obd_pages)
+
+#define obd_memory_max() (obd_max_alloc)
+#define obd_pages_max() (obd_max_pages)
+
+#endif
+
+#define OBD_DEBUG_MEMUSAGE (1)
+
+#if OBD_DEBUG_MEMUSAGE
+#define OBD_ALLOC_POST(ptr, size, name) \
+ obd_memory_add(size); \
+ CDEBUG(D_MALLOC, name " '" #ptr "': %d at %p.\n", \
+ (int)(size), ptr)
+
+#define OBD_FREE_PRE(ptr, size, name) \
+ LASSERT(ptr); \
+ obd_memory_sub(size); \
+ CDEBUG(D_MALLOC, name " '" #ptr "': %d at %p.\n", \
+ (int)(size), ptr); \
+ POISON(ptr, 0x5a, size)
+
+#else /* !OBD_DEBUG_MEMUSAGE */
+
+#define OBD_ALLOC_POST(ptr, size, name) ((void)0)
+#define OBD_FREE_PRE(ptr, size, name) ((void)0)
+
+#endif /* !OBD_DEBUG_MEMUSAGE */
+
+#define HAS_FAIL_ALLOC_FLAG OBD_FAIL_CHECK(OBD_FAIL_GENERAL_ALLOC)
+
+#define OBD_ALLOC_FAIL_BITS 24
+#define OBD_ALLOC_FAIL_MASK ((1 << OBD_ALLOC_FAIL_BITS) - 1)
+#define OBD_ALLOC_FAIL_MULT (OBD_ALLOC_FAIL_MASK / 100)
+
+#if defined(LUSTRE_UTILS) /* this version is for utils only */
+#define __OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, flags) \
+do { \
+ (ptr) = (cptab) == NULL ? \
+ kmalloc(size, flags) : \
+ kmalloc_node(size, flags, cfs_cpt_spread_node(cptab, cpt)); \
+ if (unlikely((ptr) == NULL)) { \
+ CERROR("kmalloc of '" #ptr "' (%d bytes) failed at %s:%d\n", \
+ (int)(size), __FILE__, __LINE__); \
+ } else { \
+ memset(ptr, 0, size); \
+ CDEBUG(D_MALLOC, "kmalloced '" #ptr "': %d at %p\n", \
+ (int)(size), ptr); \
+ } \
+} while (0)
+
+#else /* this version is for the kernel and liblustre */
+#define OBD_FREE_RTN0(ptr) \
+({ \
+ kfree(ptr); \
+ (ptr) = NULL; \
+ 0; \
+})
+
+#define __OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, flags) \
+do { \
+ (ptr) = (cptab) == NULL ? \
+ kmalloc(size, flags | __GFP_ZERO) : \
+ kmalloc_node(size, flags | __GFP_ZERO, \
+ cfs_cpt_spread_node(cptab, cpt)); \
+ if (likely((ptr) != NULL && \
+ (!HAS_FAIL_ALLOC_FLAG || obd_alloc_fail_rate == 0 || \
+ !obd_alloc_fail(ptr, #ptr, "km", size, \
+ __FILE__, __LINE__) || \
+ OBD_FREE_RTN0(ptr)))){ \
+ OBD_ALLOC_POST(ptr, size, "kmalloced"); \
+ } \
+} while (0)
+#endif
+
+#define OBD_ALLOC_GFP(ptr, size, gfp_mask) \
+ __OBD_MALLOC_VERBOSE(ptr, NULL, 0, size, gfp_mask)
+
+#define OBD_ALLOC(ptr, size) OBD_ALLOC_GFP(ptr, size, __GFP_IO)
+#define OBD_ALLOC_WAIT(ptr, size) OBD_ALLOC_GFP(ptr, size, GFP_IOFS)
+#define OBD_ALLOC_PTR(ptr) OBD_ALLOC(ptr, sizeof *(ptr))
+#define OBD_ALLOC_PTR_WAIT(ptr) OBD_ALLOC_WAIT(ptr, sizeof *(ptr))
+
+#define OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, gfp_mask) \
+ __OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, gfp_mask)
+
+#define OBD_CPT_ALLOC(ptr, cptab, cpt, size) \
+ OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO)
+
+#define OBD_CPT_ALLOC_PTR(ptr, cptab, cpt) \
+ OBD_CPT_ALLOC(ptr, cptab, cpt, sizeof *(ptr))
+
+# define __OBD_VMALLOC_VEROBSE(ptr, cptab, cpt, size) \
+do { \
+ (ptr) = cptab == NULL ? \
+ vzalloc(size) : \
+ vzalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \
+ if (unlikely((ptr) == NULL)) { \
+ CERROR("vmalloc of '" #ptr "' (%d bytes) failed\n", \
+ (int)(size)); \
+ CERROR(LPU64" total bytes allocated by Lustre, %d by LNET\n", \
+ obd_memory_sum(), atomic_read(&libcfs_kmemory)); \
+ } else { \
+ OBD_ALLOC_POST(ptr, size, "vmalloced"); \
+ } \
+} while(0)
+
+# define OBD_VMALLOC(ptr, size) \
+ __OBD_VMALLOC_VEROBSE(ptr, NULL, 0, size)
+# define OBD_CPT_VMALLOC(ptr, cptab, cpt, size) \
+ __OBD_VMALLOC_VEROBSE(ptr, cptab, cpt, size)
+
+
+/* Allocations above this size are considered too big and could not be done
+ * atomically.
+ *
+ * Be very careful when changing this value, especially when decreasing it,
+ * since vmalloc in Linux doesn't perform well on multi-cores system, calling
+ * vmalloc in critical path would hurt peformance badly. See LU-66.
+ */
+#define OBD_ALLOC_BIG (4 * PAGE_CACHE_SIZE)
+
+#define OBD_ALLOC_LARGE(ptr, size) \
+do { \
+ if (size > OBD_ALLOC_BIG) \
+ OBD_VMALLOC(ptr, size); \
+ else \
+ OBD_ALLOC(ptr, size); \
+} while (0)
+
+#define OBD_CPT_ALLOC_LARGE(ptr, cptab, cpt, size) \
+do { \
+ if (size > OBD_ALLOC_BIG) \
+ OBD_CPT_VMALLOC(ptr, cptab, cpt, size); \
+ else \
+ OBD_CPT_ALLOC(ptr, cptab, cpt, size); \
+} while (0)
+
+#define OBD_FREE_LARGE(ptr, size) \
+do { \
+ if (size > OBD_ALLOC_BIG) \
+ OBD_VFREE(ptr, size); \
+ else \
+ OBD_FREE(ptr, size); \
+} while (0)
+
+
+#ifdef CONFIG_DEBUG_SLAB
+#define POISON(ptr, c, s) do {} while (0)
+#define POISON_PTR(ptr) ((void)0)
+#else
+#define POISON(ptr, c, s) memset(ptr, c, s)
+#define POISON_PTR(ptr) (ptr) = (void *)0xdeadbeef
+#endif
+
+#ifdef POISON_BULK
+#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_CACHE_SIZE); \
+ kunmap(page); } while (0)
+#else
+#define POISON_PAGE(page, val) do { } while (0)
+#endif
+
+#define OBD_FREE(ptr, size) \
+do { \
+ OBD_FREE_PRE(ptr, size, "kfreed"); \
+ kfree(ptr); \
+ POISON_PTR(ptr); \
+} while(0)
+
+
+#define OBD_FREE_RCU(ptr, size, handle) \
+do { \
+ struct portals_handle *__h = (handle); \
+ \
+ LASSERT(handle != NULL); \
+ __h->h_cookie = (unsigned long)(ptr); \
+ __h->h_size = (size); \
+ call_rcu(&__h->h_rcu, class_handle_free_cb); \
+ POISON_PTR(ptr); \
+} while(0)
+
+
+#define OBD_VFREE(ptr, size) \
+ do { \
+ OBD_FREE_PRE(ptr, size, "vfreed"); \
+ vfree(ptr); \
+ POISON_PTR(ptr); \
+ } while (0)
+
+/* we memset() the slab object to 0 when allocation succeeds, so DO NOT
+ * HAVE A CTOR THAT DOES ANYTHING. its work will be cleared here. we'd
+ * love to assert on that, but slab.c keeps kmem_cache_s all to itself. */
+#define OBD_SLAB_FREE_RTN0(ptr, slab) \
+({ \
+ kmem_cache_free((slab), (ptr)); \
+ (ptr) = NULL; \
+ 0; \
+})
+
+#define __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, type) \
+do { \
+ LASSERT(ergo((type) != GFP_ATOMIC, !in_interrupt())); \
+ (ptr) = (cptab) == NULL ? \
+ kmem_cache_alloc(slab, type | __GFP_ZERO) : \
+ kmem_cache_alloc_node(slab, type | __GFP_ZERO, \
+ cfs_cpt_spread_node(cptab, cpt)); \
+ if (likely((ptr) != NULL && \
+ (!HAS_FAIL_ALLOC_FLAG || obd_alloc_fail_rate == 0 || \
+ !obd_alloc_fail(ptr, #ptr, "slab-", size, \
+ __FILE__, __LINE__) || \
+ OBD_SLAB_FREE_RTN0(ptr, slab)))) { \
+ OBD_ALLOC_POST(ptr, size, "slab-alloced"); \
+ } \
+} while(0)
+
+#define OBD_SLAB_ALLOC_GFP(ptr, slab, size, flags) \
+ __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, NULL, 0, size, flags)
+#define OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, flags) \
+ __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, flags)
+
+#define OBD_FREE_PTR(ptr) OBD_FREE(ptr, sizeof *(ptr))
+
+#define OBD_SLAB_FREE(ptr, slab, size) \
+do { \
+ OBD_FREE_PRE(ptr, size, "slab-freed"); \
+ kmem_cache_free(slab, ptr); \
+ POISON_PTR(ptr); \
+} while(0)
+
+#define OBD_SLAB_ALLOC(ptr, slab, size) \
+ OBD_SLAB_ALLOC_GFP(ptr, slab, size, __GFP_IO)
+
+#define OBD_SLAB_CPT_ALLOC(ptr, slab, cptab, cpt, size) \
+ OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, __GFP_IO)
+
+#define OBD_SLAB_ALLOC_PTR(ptr, slab) \
+ OBD_SLAB_ALLOC(ptr, slab, sizeof *(ptr))
+
+#define OBD_SLAB_CPT_ALLOC_PTR(ptr, slab, cptab, cpt) \
+ OBD_SLAB_CPT_ALLOC(ptr, slab, cptab, cpt, sizeof *(ptr))
+
+#define OBD_SLAB_ALLOC_PTR_GFP(ptr, slab, flags) \
+ OBD_SLAB_ALLOC_GFP(ptr, slab, sizeof *(ptr), flags)
+
+#define OBD_SLAB_CPT_ALLOC_PTR_GFP(ptr, slab, cptab, cpt, flags) \
+ OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, sizeof *(ptr), flags)
+
+#define OBD_SLAB_FREE_PTR(ptr, slab) \
+ OBD_SLAB_FREE((ptr), (slab), sizeof *(ptr))
+
+#define KEY_IS(str) \
+ (keylen >= (sizeof(str)-1) && memcmp(key, str, (sizeof(str)-1)) == 0)
+
+/* Wrapper for contiguous page frame allocation */
+#define __OBD_PAGE_ALLOC_VERBOSE(ptr, cptab, cpt, gfp_mask) \
+do { \
+ (ptr) = (cptab) == NULL ? \
+ alloc_page(gfp_mask) : \
+ alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), gfp_mask, 0);\
+ if (unlikely((ptr) == NULL)) { \
+ CERROR("alloc_pages of '" #ptr "' %d page(s) / "LPU64" bytes "\
+ "failed\n", (int)1, \
+ (__u64)(1 << PAGE_CACHE_SHIFT)); \
+ CERROR(LPU64" total bytes and "LPU64" total pages " \
+ "("LPU64" bytes) allocated by Lustre, " \
+ "%d total bytes by LNET\n", \
+ obd_memory_sum(), \
+ obd_pages_sum() << PAGE_CACHE_SHIFT, \
+ obd_pages_sum(), \
+ atomic_read(&libcfs_kmemory)); \
+ } else { \
+ obd_pages_add(0); \
+ CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / " \
+ LPU64" bytes at %p.\n", \
+ (int)1, \
+ (__u64)(1 << PAGE_CACHE_SHIFT), ptr); \
+ } \
+} while (0)
+
+#define OBD_PAGE_ALLOC(ptr, gfp_mask) \
+ __OBD_PAGE_ALLOC_VERBOSE(ptr, NULL, 0, gfp_mask)
+#define OBD_PAGE_CPT_ALLOC(ptr, cptab, cpt, gfp_mask) \
+ __OBD_PAGE_ALLOC_VERBOSE(ptr, cptab, cpt, gfp_mask)
+
+#define OBD_PAGE_FREE(ptr) \
+do { \
+ LASSERT(ptr); \
+ obd_pages_sub(0); \
+ CDEBUG(D_MALLOC, "free_pages '" #ptr "': %d page(s) / "LPU64" bytes " \
+ "at %p.\n", \
+ (int)1, (__u64)(1 << PAGE_CACHE_SHIFT), \
+ ptr); \
+ __free_page(ptr); \
+ (ptr) = (void *)0xdeadbeef; \
+} while (0)
+
+#endif
diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/lclient/glimpse.c
new file mode 100644
index 0000000..7bbca4b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lclient/glimpse.c
@@ -0,0 +1,269 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * glimpse code shared between vvp and liblustre (and other Lustre clients in
+ * the future).
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Oleg Drokin <oleg.drokin@sun.com>
+ */
+
+#include <linux/libcfs/libcfs.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <obd.h>
+
+# include <lustre_dlm.h>
+# include <lustre_lite.h>
+# include <lustre_mdc.h>
+# include <linux/pagemap.h>
+# include <linux/file.h>
+
+#include "cl_object.h"
+#include "lclient.h"
+# include "../llite/llite_internal.h"
+
+static const struct cl_lock_descr whole_file = {
+ .cld_start = 0,
+ .cld_end = CL_PAGE_EOF,
+ .cld_mode = CLM_READ
+};
+
+/*
+ * Check whether file has possible unwriten pages.
+ *
+ * \retval 1 file is mmap-ed or has dirty pages
+ * 0 otherwise
+ */
+blkcnt_t dirty_cnt(struct inode *inode)
+{
+ blkcnt_t cnt = 0;
+ struct ccc_object *vob = cl_inode2ccc(inode);
+ void *results[1];
+
+ if (inode->i_mapping != NULL)
+ cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree,
+ results, 0, 1,
+ PAGECACHE_TAG_DIRTY);
+ if (cnt == 0 && atomic_read(&vob->cob_mmap_cnt) > 0)
+ cnt = 1;
+
+ return (cnt > 0) ? 1 : 0;
+}
+
+int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
+ struct inode *inode, struct cl_object *clob, int agl)
+{
+ struct cl_lock_descr *descr = &ccc_env_info(env)->cti_descr;
+ struct cl_inode_info *lli = cl_i2info(inode);
+ const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_lock *lock;
+ int result;
+
+ result = 0;
+ if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
+ CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid));
+ if (lli->lli_has_smd) {
+ /* NOTE: this looks like DLM lock request, but it may
+ * not be one. Due to CEF_ASYNC flag (translated
+ * to LDLM_FL_HAS_INTENT by osc), this is
+ * glimpse request, that won't revoke any
+ * conflicting DLM locks held. Instead,
+ * ll_glimpse_callback() will be called on each
+ * client holding a DLM lock against this file,
+ * and resulting size will be returned for each
+ * stripe. DLM lock on [0, EOF] is acquired only
+ * if there were no conflicting locks. If there
+ * were conflicting locks, enqueuing or waiting
+ * fails with -ENAVAIL, but valid inode
+ * attributes are returned anyway. */
+ *descr = whole_file;
+ descr->cld_obj = clob;
+ descr->cld_mode = CLM_PHANTOM;
+ descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
+ if (agl)
+ descr->cld_enq_flags |= CEF_AGL;
+ cio->cui_glimpse = 1;
+ /*
+ * CEF_ASYNC is used because glimpse sub-locks cannot
+ * deadlock (because they never conflict with other
+ * locks) and, hence, can be enqueued out-of-order.
+ *
+ * CEF_MUST protects glimpse lock from conversion into
+ * a lockless mode.
+ */
+ lock = cl_lock_request(env, io, descr, "glimpse",
+ current);
+ cio->cui_glimpse = 0;
+
+ if (lock == NULL)
+ return 0;
+
+ if (IS_ERR(lock))
+ return PTR_ERR(lock);
+
+ LASSERT(agl == 0);
+ result = cl_wait(env, lock);
+ if (result == 0) {
+ cl_merge_lvb(env, inode);
+ if (cl_isize_read(inode) > 0 &&
+ inode->i_blocks == 0) {
+ /*
+ * LU-417: Add dirty pages block count
+ * lest i_blocks reports 0, some "cp" or
+ * "tar" may think it's a completely
+ * sparse file and skip it.
+ */
+ inode->i_blocks = dirty_cnt(inode);
+ }
+ cl_unuse(env, lock);
+ }
+ cl_lock_release(env, lock, "glimpse", current);
+ } else {
+ CDEBUG(D_DLMTRACE, "No objects for inode\n");
+ cl_merge_lvb(env, inode);
+ }
+ }
+
+ return result;
+}
+
+static int cl_io_get(struct inode *inode, struct lu_env **envout,
+ struct cl_io **ioout, int *refcheck)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_inode_info *lli = cl_i2info(inode);
+ struct cl_object *clob = lli->lli_clob;
+ int result;
+
+ if (S_ISREG(cl_inode_mode(inode))) {
+ env = cl_env_get(refcheck);
+ if (!IS_ERR(env)) {
+ io = ccc_env_thread_io(env);
+ io->ci_obj = clob;
+ *envout = env;
+ *ioout = io;
+ result = +1;
+ } else
+ result = PTR_ERR(env);
+ } else
+ result = 0;
+ return result;
+}
+
+int cl_glimpse_size0(struct inode *inode, int agl)
+{
+ /*
+ * We don't need ast_flags argument to cl_glimpse_size(), because
+ * osc_lock_enqueue() takes care of the possible deadlock that said
+ * argument was introduced to avoid.
+ */
+ /*
+ * XXX but note that ll_file_seek() passes LDLM_FL_BLOCK_NOWAIT to
+ * cl_glimpse_size(), which doesn't make sense: glimpse locks are not
+ * blocking anyway.
+ */
+ struct lu_env *env = NULL;
+ struct cl_io *io = NULL;
+ int result;
+ int refcheck;
+
+ result = cl_io_get(inode, &env, &io, &refcheck);
+ if (result > 0) {
+ again:
+ io->ci_verify_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (result > 0)
+ /*
+ * nothing to do for this io. This currently happens
+ * when stripe sub-object's are not yet created.
+ */
+ result = io->ci_result;
+ else if (result == 0)
+ result = cl_glimpse_lock(env, io, inode, io->ci_obj,
+ agl);
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_GLIMPSE_DELAY, 2);
+ cl_io_fini(env, io);
+ if (unlikely(io->ci_need_restart))
+ goto again;
+ cl_env_put(env, &refcheck);
+ }
+ return result;
+}
+
+int cl_local_size(struct inode *inode)
+{
+ struct lu_env *env = NULL;
+ struct cl_io *io = NULL;
+ struct ccc_thread_info *cti;
+ struct cl_object *clob;
+ struct cl_lock_descr *descr;
+ struct cl_lock *lock;
+ int result;
+ int refcheck;
+
+ if (!cl_i2info(inode)->lli_has_smd)
+ return 0;
+
+ result = cl_io_get(inode, &env, &io, &refcheck);
+ if (result <= 0)
+ return result;
+
+ clob = io->ci_obj;
+ result = cl_io_init(env, io, CIT_MISC, clob);
+ if (result > 0)
+ result = io->ci_result;
+ else if (result == 0) {
+ cti = ccc_env_info(env);
+ descr = &cti->cti_descr;
+
+ *descr = whole_file;
+ descr->cld_obj = clob;
+ lock = cl_lock_peek(env, io, descr, "localsize", current);
+ if (lock != NULL) {
+ cl_merge_lvb(env, inode);
+ cl_unuse(env, lock);
+ cl_lock_release(env, lock, "localsize", current);
+ result = 0;
+ } else
+ result = -ENODATA;
+ }
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
+ return result;
+}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
new file mode 100644
index 0000000..8ff38c6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -0,0 +1,1312 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * cl code shared between vvp and liblustre (and other Lustre clients in the
+ * future).
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+# include <linux/libcfs/libcfs.h>
+# include <linux/fs.h>
+# include <linux/sched.h>
+# include <linux/mm.h>
+# include <linux/quotaops.h>
+# include <linux/highmem.h>
+# include <linux/pagemap.h>
+# include <linux/rbtree.h>
+
+#include <obd.h>
+#include <obd_support.h>
+#include <lustre_fid.h>
+#include <lustre_lite.h>
+#include <lustre_dlm.h>
+#include <lustre_ver.h>
+#include <lustre_mdc.h>
+#include <cl_object.h>
+
+#include <lclient.h>
+
+#include "../llite/llite_internal.h"
+
+const struct cl_req_operations ccc_req_ops;
+
+/*
+ * ccc_ prefix stands for "Common Client Code".
+ */
+
+static struct kmem_cache *ccc_lock_kmem;
+static struct kmem_cache *ccc_object_kmem;
+static struct kmem_cache *ccc_thread_kmem;
+static struct kmem_cache *ccc_session_kmem;
+static struct kmem_cache *ccc_req_kmem;
+
+static struct lu_kmem_descr ccc_caches[] = {
+ {
+ .ckd_cache = &ccc_lock_kmem,
+ .ckd_name = "ccc_lock_kmem",
+ .ckd_size = sizeof (struct ccc_lock)
+ },
+ {
+ .ckd_cache = &ccc_object_kmem,
+ .ckd_name = "ccc_object_kmem",
+ .ckd_size = sizeof (struct ccc_object)
+ },
+ {
+ .ckd_cache = &ccc_thread_kmem,
+ .ckd_name = "ccc_thread_kmem",
+ .ckd_size = sizeof (struct ccc_thread_info),
+ },
+ {
+ .ckd_cache = &ccc_session_kmem,
+ .ckd_name = "ccc_session_kmem",
+ .ckd_size = sizeof (struct ccc_session)
+ },
+ {
+ .ckd_cache = &ccc_req_kmem,
+ .ckd_name = "ccc_req_kmem",
+ .ckd_size = sizeof (struct ccc_req)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
+/*****************************************************************************
+ *
+ * Vvp device and device type functions.
+ *
+ */
+
+void *ccc_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct ccc_thread_info *info;
+
+ OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, __GFP_IO);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
+}
+
+void ccc_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct ccc_thread_info *info = data;
+ OBD_SLAB_FREE_PTR(info, ccc_thread_kmem);
+}
+
+void *ccc_session_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct ccc_session *session;
+
+ OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, __GFP_IO);
+ if (session == NULL)
+ session = ERR_PTR(-ENOMEM);
+ return session;
+}
+
+void ccc_session_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct ccc_session *session = data;
+ OBD_SLAB_FREE_PTR(session, ccc_session_kmem);
+}
+
+struct lu_context_key ccc_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = ccc_key_init,
+ .lct_fini = ccc_key_fini
+};
+
+struct lu_context_key ccc_session_key = {
+ .lct_tags = LCT_SESSION,
+ .lct_init = ccc_session_key_init,
+ .lct_fini = ccc_session_key_fini
+};
+
+
+/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
+// LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key);
+
+int ccc_device_init(const struct lu_env *env, struct lu_device *d,
+ const char *name, struct lu_device *next)
+{
+ struct ccc_device *vdv;
+ int rc;
+
+ vdv = lu2ccc_dev(d);
+ vdv->cdv_next = lu2cl_dev(next);
+
+ LASSERT(d->ld_site != NULL && next->ld_type != NULL);
+ next->ld_site = d->ld_site;
+ rc = next->ld_type->ldt_ops->ldto_device_init(
+ env, next, next->ld_type->ldt_name, NULL);
+ if (rc == 0) {
+ lu_device_get(next);
+ lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
+ }
+ return rc;
+}
+
+struct lu_device *ccc_device_fini(const struct lu_env *env,
+ struct lu_device *d)
+{
+ return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
+}
+
+struct lu_device *ccc_device_alloc(const struct lu_env *env,
+ struct lu_device_type *t,
+ struct lustre_cfg *cfg,
+ const struct lu_device_operations *luops,
+ const struct cl_device_operations *clops)
+{
+ struct ccc_device *vdv;
+ struct lu_device *lud;
+ struct cl_site *site;
+ int rc;
+
+ OBD_ALLOC_PTR(vdv);
+ if (vdv == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ lud = &vdv->cdv_cl.cd_lu_dev;
+ cl_device_init(&vdv->cdv_cl, t);
+ ccc2lu_dev(vdv)->ld_ops = luops;
+ vdv->cdv_cl.cd_ops = clops;
+
+ OBD_ALLOC_PTR(site);
+ if (site != NULL) {
+ rc = cl_site_init(site, &vdv->cdv_cl);
+ if (rc == 0)
+ rc = lu_site_init_finish(&site->cs_lu);
+ else {
+ LASSERT(lud->ld_site == NULL);
+ CERROR("Cannot init lu_site, rc %d.\n", rc);
+ OBD_FREE_PTR(site);
+ }
+ } else
+ rc = -ENOMEM;
+ if (rc != 0) {
+ ccc_device_free(env, lud);
+ lud = ERR_PTR(rc);
+ }
+ return lud;
+}
+
+struct lu_device *ccc_device_free(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct ccc_device *vdv = lu2ccc_dev(d);
+ struct cl_site *site = lu2cl_site(d->ld_site);
+ struct lu_device *next = cl2lu_dev(vdv->cdv_next);
+
+ if (d->ld_site != NULL) {
+ cl_site_fini(site);
+ OBD_FREE_PTR(site);
+ }
+ cl_device_fini(lu2cl_dev(d));
+ OBD_FREE_PTR(vdv);
+ return next;
+}
+
+int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req)
+{
+ struct ccc_req *vrq;
+ int result;
+
+ OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, __GFP_IO);
+ if (vrq != NULL) {
+ cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+/**
+ * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
+ * fails. Access to this environment is serialized by ccc_inode_fini_guard
+ * mutex.
+ */
+static struct lu_env *ccc_inode_fini_env = NULL;
+
+/**
+ * A mutex serializing calls to slp_inode_fini() under extreme memory
+ * pressure, when environments cannot be allocated.
+ */
+static DEFINE_MUTEX(ccc_inode_fini_guard);
+static int dummy_refcheck;
+
+int ccc_global_init(struct lu_device_type *device_type)
+{
+ int result;
+
+ result = lu_kmem_init(ccc_caches);
+ if (result)
+ return result;
+
+ result = lu_device_type_init(device_type);
+ if (result)
+ goto out_kmem;
+
+ ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
+ LCT_REMEMBER|LCT_NOREF);
+ if (IS_ERR(ccc_inode_fini_env)) {
+ result = PTR_ERR(ccc_inode_fini_env);
+ goto out_device;
+ }
+
+ ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
+ return 0;
+out_device:
+ lu_device_type_fini(device_type);
+out_kmem:
+ lu_kmem_fini(ccc_caches);
+ return result;
+}
+
+void ccc_global_fini(struct lu_device_type *device_type)
+{
+ if (ccc_inode_fini_env != NULL) {
+ cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
+ ccc_inode_fini_env = NULL;
+ }
+ lu_device_type_fini(device_type);
+ lu_kmem_fini(ccc_caches);
+}
+
+/*****************************************************************************
+ *
+ * Object operations.
+ *
+ */
+
+struct lu_object *ccc_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *unused,
+ struct lu_device *dev,
+ const struct cl_object_operations *clops,
+ const struct lu_object_operations *luops)
+{
+ struct ccc_object *vob;
+ struct lu_object *obj;
+
+ OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, __GFP_IO);
+ if (vob != NULL) {
+ struct cl_object_header *hdr;
+
+ obj = ccc2lu(vob);
+ hdr = &vob->cob_header;
+ cl_object_header_init(hdr);
+ lu_object_init(obj, &hdr->coh_lu, dev);
+ lu_object_add_top(&hdr->coh_lu, obj);
+
+ vob->cob_cl.co_ops = clops;
+ obj->lo_ops = luops;
+ } else
+ obj = NULL;
+ return obj;
+}
+
+int ccc_object_init0(const struct lu_env *env,
+ struct ccc_object *vob,
+ const struct cl_object_conf *conf)
+{
+ vob->cob_inode = conf->coc_inode;
+ vob->cob_transient_pages = 0;
+ cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
+ return 0;
+}
+
+int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf)
+{
+ struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
+ struct ccc_object *vob = lu2ccc(obj);
+ struct lu_object *below;
+ struct lu_device *under;
+ int result;
+
+ under = &dev->cdv_next->cd_lu_dev;
+ below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
+ if (below != NULL) {
+ const struct cl_object_conf *cconf;
+
+ cconf = lu2cl_conf(conf);
+ INIT_LIST_HEAD(&vob->cob_pending_list);
+ lu_object_add(obj, below);
+ result = ccc_object_init0(env, vob, cconf);
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
+{
+ struct ccc_object *vob = lu2ccc(obj);
+
+ lu_object_fini(obj);
+ lu_object_header_fini(obj->lo_header);
+ OBD_SLAB_FREE_PTR(vob, ccc_object_kmem);
+}
+
+int ccc_lock_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *unused,
+ const struct cl_lock_operations *lkops)
+{
+ struct ccc_lock *clk;
+ int result;
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+ OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, __GFP_IO);
+ if (clk != NULL) {
+ cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+int ccc_attr_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid)
+{
+ return 0;
+}
+
+int ccc_object_glimpse(const struct lu_env *env,
+ const struct cl_object *obj, struct ost_lvb *lvb)
+{
+ struct inode *inode = ccc_object_inode(obj);
+
+ lvb->lvb_mtime = cl_inode_mtime(inode);
+ lvb->lvb_atime = cl_inode_atime(inode);
+ lvb->lvb_ctime = cl_inode_ctime(inode);
+ /*
+ * LU-417: Add dirty pages block count lest i_blocks reports 0, some
+ * "cp" or "tar" on remote node may think it's a completely sparse file
+ * and skip it.
+ */
+ if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
+ lvb->lvb_blocks = dirty_cnt(inode);
+ return 0;
+}
+
+
+
+int ccc_conf_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf)
+{
+ /* TODO: destroy all pages attached to this object. */
+ return 0;
+}
+
+static void ccc_object_size_lock(struct cl_object *obj)
+{
+ struct inode *inode = ccc_object_inode(obj);
+
+ cl_isize_lock(inode);
+ cl_object_attr_lock(obj);
+}
+
+static void ccc_object_size_unlock(struct cl_object *obj)
+{
+ struct inode *inode = ccc_object_inode(obj);
+
+ cl_object_attr_unlock(obj);
+ cl_isize_unlock(inode);
+}
+
+/*****************************************************************************
+ *
+ * Page operations.
+ *
+ */
+
+struct page *ccc_page_vmpage(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ return cl2vm_page(slice);
+}
+
+int ccc_page_is_under_lock(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
+{
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
+ struct cl_page *page = slice->cpl_page;
+
+ int result;
+
+ if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
+ io->ci_type == CIT_FAULT) {
+ if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
+ result = -EBUSY;
+ else {
+ desc->cld_start = page->cp_index;
+ desc->cld_end = page->cp_index;
+ desc->cld_obj = page->cp_obj;
+ desc->cld_mode = CLM_READ;
+ result = cl_queue_match(&io->ci_lockset.cls_done,
+ desc) ? -EBUSY : 0;
+ }
+ } else
+ result = 0;
+ return result;
+}
+
+int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
+{
+ /*
+ * Cached read?
+ */
+ LBUG();
+ return 0;
+}
+
+void ccc_transient_page_verify(const struct cl_page *page)
+{
+}
+
+int ccc_transient_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused,
+ int nonblock)
+{
+ ccc_transient_page_verify(slice->cpl_page);
+ return 0;
+}
+
+void ccc_transient_page_assume(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ ccc_transient_page_verify(slice->cpl_page);
+}
+
+void ccc_transient_page_unassume(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ ccc_transient_page_verify(slice->cpl_page);
+}
+
+void ccc_transient_page_disown(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ ccc_transient_page_verify(slice->cpl_page);
+}
+
+void ccc_transient_page_discard(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ struct cl_page *page = slice->cpl_page;
+
+ ccc_transient_page_verify(slice->cpl_page);
+
+ /*
+ * For transient pages, remove it from the radix tree.
+ */
+ cl_page_delete(env, page);
+}
+
+int ccc_transient_page_prep(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ /* transient page should always be sent. */
+ return 0;
+}
+
+/*****************************************************************************
+ *
+ * Lock operations.
+ *
+ */
+
+void ccc_lock_delete(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
+}
+
+void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
+{
+ struct ccc_lock *clk = cl2ccc_lock(slice);
+ OBD_SLAB_FREE_PTR(clk, ccc_lock_kmem);
+}
+
+int ccc_lock_enqueue(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ struct cl_io *unused, __u32 enqflags)
+{
+ CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
+ return 0;
+}
+
+int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
+{
+ CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
+ return 0;
+}
+
+int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
+{
+ CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
+ return 0;
+}
+
+/**
+ * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
+ * layer. This function is executed every time io finds an existing lock in
+ * the lock cache while creating new lock. This function has to decide whether
+ * cached lock "fits" into io.
+ *
+ * \param slice lock to be checked
+ * \param io IO that wants a lock.
+ *
+ * \see lov_lock_fits_into().
+ */
+int ccc_lock_fits_into(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ const struct cl_lock_descr *need,
+ const struct cl_io *io)
+{
+ const struct cl_lock *lock = slice->cls_lock;
+ const struct cl_lock_descr *descr = &lock->cll_descr;
+ const struct ccc_io *cio = ccc_env_io(env);
+ int result;
+
+ /*
+ * Work around DLM peculiarity: it assumes that glimpse
+ * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
+ * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
+ * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
+ * doesn't enqueue CLM_WRITE sub-locks.
+ */
+ if (cio->cui_glimpse)
+ result = descr->cld_mode != CLM_WRITE;
+
+ /*
+ * Also, don't match incomplete write locks for read, otherwise read
+ * would enqueue missing sub-locks in the write mode.
+ */
+ else if (need->cld_mode != descr->cld_mode)
+ result = lock->cll_state >= CLS_ENQUEUED;
+ else
+ result = 1;
+ return result;
+}
+
+/**
+ * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
+ * whenever lock state changes. Transfers object attributes, that might be
+ * updated as a result of lock acquiring into inode.
+ */
+void ccc_lock_state(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ enum cl_lock_state state)
+{
+ struct cl_lock *lock = slice->cls_lock;
+
+ /*
+ * Refresh inode attributes when the lock is moving into CLS_HELD
+ * state, and only when this is a result of real enqueue, rather than
+ * of finding lock in the cache.
+ */
+ if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
+ struct cl_object *obj;
+ struct inode *inode;
+
+ obj = slice->cls_obj;
+ inode = ccc_object_inode(obj);
+
+ /* vmtruncate() sets the i_size
+ * under both a DLM lock and the
+ * ll_inode_size_lock(). If we don't get the
+ * ll_inode_size_lock() here we can match the DLM lock and
+ * reset i_size. generic_file_write can then trust the
+ * stale i_size when doing appending writes and effectively
+ * cancel the result of the truncate. Getting the
+ * ll_inode_size_lock() after the enqueue maintains the DLM
+ * -> ll_inode_size_lock() acquiring order. */
+ if (lock->cll_descr.cld_start == 0 &&
+ lock->cll_descr.cld_end == CL_PAGE_EOF)
+ cl_merge_lvb(env, inode);
+ }
+}
+
+/*****************************************************************************
+ *
+ * io operations.
+ *
+ */
+
+void ccc_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+
+ CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
+}
+
+int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ pgoff_t start, pgoff_t end)
+{
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
+ struct cl_object *obj = io->ci_obj;
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+ CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
+
+ memset(&cio->cui_link, 0, sizeof cio->cui_link);
+
+ if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ descr->cld_mode = CLM_GROUP;
+ descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
+ } else {
+ descr->cld_mode = mode;
+ }
+ descr->cld_obj = obj;
+ descr->cld_start = start;
+ descr->cld_end = end;
+ descr->cld_enq_flags = enqflags;
+
+ cl_io_lock_add(env, io, &cio->cui_link);
+ return 0;
+}
+
+void ccc_io_update_iov(const struct lu_env *env,
+ struct ccc_io *cio, struct cl_io *io)
+{
+ int i;
+ size_t size = io->u.ci_rw.crw_count;
+
+ cio->cui_iov_olen = 0;
+ if (!cl_is_normalio(env, io) || cio->cui_tot_nrsegs == 0)
+ return;
+
+ for (i = 0; i < cio->cui_tot_nrsegs; i++) {
+ struct iovec *iv = &cio->cui_iov[i];
+
+ if (iv->iov_len < size)
+ size -= iv->iov_len;
+ else {
+ if (iv->iov_len > size) {
+ cio->cui_iov_olen = iv->iov_len;
+ iv->iov_len = size;
+ }
+ break;
+ }
+ }
+
+ cio->cui_nrsegs = i + 1;
+ LASSERTF(cio->cui_tot_nrsegs >= cio->cui_nrsegs,
+ "tot_nrsegs: %lu, nrsegs: %lu\n",
+ cio->cui_tot_nrsegs, cio->cui_nrsegs);
+}
+
+int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ loff_t start, loff_t end)
+{
+ struct cl_object *obj = io->ci_obj;
+ return ccc_io_one_lock_index(env, io, enqflags, mode,
+ cl_index(obj, start), cl_index(obj, end));
+}
+
+void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ CLOBINVRNT(env, ios->cis_io->ci_obj,
+ ccc_object_invariant(ios->cis_io->ci_obj));
+}
+
+void ccc_io_advance(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ size_t nob)
+{
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = ios->cis_io->ci_obj;
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+ if (!cl_is_normalio(env, io))
+ return;
+
+ LASSERT(cio->cui_tot_nrsegs >= cio->cui_nrsegs);
+ LASSERT(cio->cui_tot_count >= nob);
+
+ cio->cui_iov += cio->cui_nrsegs;
+ cio->cui_tot_nrsegs -= cio->cui_nrsegs;
+ cio->cui_tot_count -= nob;
+
+ /* update the iov */
+ if (cio->cui_iov_olen > 0) {
+ struct iovec *iv;
+
+ cio->cui_iov--;
+ cio->cui_tot_nrsegs++;
+ iv = &cio->cui_iov[0];
+ if (io->ci_continue) {
+ iv->iov_base += iv->iov_len;
+ LASSERT(cio->cui_iov_olen > iv->iov_len);
+ iv->iov_len = cio->cui_iov_olen - iv->iov_len;
+ } else {
+ /* restore the iov_len, in case of restart io. */
+ iv->iov_len = cio->cui_iov_olen;
+ }
+ cio->cui_iov_olen = 0;
+ }
+}
+
+/**
+ * Helper function that if necessary adjusts file size (inode->i_size), when
+ * position at the offset \a pos is accessed. File size can be arbitrary stale
+ * on a Lustre client, but client at least knows KMS. If accessed area is
+ * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
+ *
+ * Locking: cl_isize_lock is used to serialize changes to inode size and to
+ * protect consistency between inode size and cl_object
+ * attributes. cl_object_size_lock() protects consistency between cl_attr's of
+ * top-object and sub-objects.
+ */
+int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, loff_t start, size_t count, int *exceed)
+{
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct inode *inode = ccc_object_inode(obj);
+ loff_t pos = start + count - 1;
+ loff_t kms;
+ int result;
+
+ /*
+ * Consistency guarantees: following possibilities exist for the
+ * relation between region being accessed and real file size at this
+ * moment:
+ *
+ * (A): the region is completely inside of the file;
+ *
+ * (B-x): x bytes of region are inside of the file, the rest is
+ * outside;
+ *
+ * (C): the region is completely outside of the file.
+ *
+ * This classification is stable under DLM lock already acquired by
+ * the caller, because to change the class, other client has to take
+ * DLM lock conflicting with our lock. Also, any updates to ->i_size
+ * by other threads on this client are serialized by
+ * ll_inode_size_lock(). This guarantees that short reads are handled
+ * correctly in the face of concurrent writes and truncates.
+ */
+ ccc_object_size_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ if (result == 0) {
+ kms = attr->cat_kms;
+ if (pos > kms) {
+ /*
+ * A glimpse is necessary to determine whether we
+ * return a short read (B) or some zeroes at the end
+ * of the buffer (C)
+ */
+ ccc_object_size_unlock(obj);
+ result = cl_glimpse_lock(env, io, inode, obj, 0);
+ if (result == 0 && exceed != NULL) {
+ /* If objective page index exceed end-of-file
+ * page index, return directly. Do not expect
+ * kernel will check such case correctly.
+ * linux-2.6.18-128.1.1 miss to do that.
+ * --bug 17336 */
+ loff_t size = cl_isize_read(inode);
+ unsigned long cur_index = start >> PAGE_CACHE_SHIFT;
+
+ if ((size == 0 && cur_index != 0) ||
+ (((size - 1) >> PAGE_CACHE_SHIFT) < cur_index))
+ *exceed = 1;
+ }
+ return result;
+ } else {
+ /*
+ * region is within kms and, hence, within real file
+ * size (A). We need to increase i_size to cover the
+ * read region so that generic_file_read() will do its
+ * job, but that doesn't mean the kms size is
+ * _correct_, it is only the _minimum_ size. If
+ * someone does a stat they will get the correct size
+ * which will always be >= the kms value here.
+ * b=11081
+ */
+ if (cl_isize_read(inode) < kms) {
+ cl_isize_write_nolock(inode, kms);
+ CDEBUG(D_VFSTRACE,
+ DFID" updating i_size "LPU64"\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ (__u64)cl_isize_read(inode));
+
+ }
+ }
+ }
+ ccc_object_size_unlock(obj);
+ return result;
+}
+
+/*****************************************************************************
+ *
+ * Transfer operations.
+ *
+ */
+
+void ccc_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret)
+{
+ struct ccc_req *vrq;
+
+ if (ioret > 0)
+ cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
+
+ vrq = cl2ccc_req(slice);
+ OBD_SLAB_FREE_PTR(vrq, ccc_req_kmem);
+}
+
+/**
+ * Implementation of struct cl_req_operations::cro_attr_set() for ccc
+ * layer. ccc is responsible for
+ *
+ * - o_[mac]time
+ *
+ * - o_mode
+ *
+ * - o_parent_seq
+ *
+ * - o_[ug]id
+ *
+ * - o_parent_oid
+ *
+ * - o_parent_ver
+ *
+ * - o_ioepoch,
+ *
+ * and capability.
+ */
+void ccc_req_attr_set(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, obd_valid flags)
+{
+ struct inode *inode;
+ struct obdo *oa;
+ obd_flag valid_flags;
+
+ oa = attr->cra_oa;
+ inode = ccc_object_inode(obj);
+ valid_flags = OBD_MD_FLTYPE;
+
+ if ((flags & OBD_MD_FLOSSCAPA) != 0) {
+ LASSERT(attr->cra_capa == NULL);
+ attr->cra_capa = cl_capa_lookup(inode,
+ slice->crs_req->crq_type);
+ }
+
+ if (slice->crs_req->crq_type == CRT_WRITE) {
+ if (flags & OBD_MD_FLEPOCH) {
+ oa->o_valid |= OBD_MD_FLEPOCH;
+ oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
+ valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLUID | OBD_MD_FLGID;
+ }
+ }
+ obdo_from_inode(oa, inode, valid_flags & flags);
+ obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
+ memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
+ JOBSTATS_JOBID_SIZE);
+}
+
+const struct cl_req_operations ccc_req_ops = {
+ .cro_attr_set = ccc_req_attr_set,
+ .cro_completion = ccc_req_completion
+};
+
+int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
+ struct obd_capa *capa)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ int result;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ io = ccc_env_thread_io(env);
+ io->ci_obj = cl_i2info(inode)->lli_clob;
+
+ io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
+ io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
+ io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
+ io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
+ io->u.ci_setattr.sa_valid = attr->ia_valid;
+ io->u.ci_setattr.sa_capa = capa;
+
+again:
+ if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
+ struct ccc_io *cio = ccc_env_io(env);
+
+ if (attr->ia_valid & ATTR_FILE)
+ /* populate the file descriptor for ftruncate to honor
+ * group lock - see LU-787 */
+ cio->cui_fd = cl_iattr2fd(inode, attr);
+
+ result = cl_io_loop(env, io);
+ } else {
+ result = io->ci_result;
+ }
+ cl_io_fini(env, io);
+ if (unlikely(io->ci_need_restart))
+ goto again;
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+/*****************************************************************************
+ *
+ * Type conversions.
+ *
+ */
+
+struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
+{
+ return &vdv->cdv_cl.cd_lu_dev;
+}
+
+struct ccc_device *lu2ccc_dev(const struct lu_device *d)
+{
+ return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
+}
+
+struct ccc_device *cl2ccc_dev(const struct cl_device *d)
+{
+ return container_of0(d, struct ccc_device, cdv_cl);
+}
+
+struct lu_object *ccc2lu(struct ccc_object *vob)
+{
+ return &vob->cob_cl.co_lu;
+}
+
+struct ccc_object *lu2ccc(const struct lu_object *obj)
+{
+ return container_of0(obj, struct ccc_object, cob_cl.co_lu);
+}
+
+struct ccc_object *cl2ccc(const struct cl_object *obj)
+{
+ return container_of0(obj, struct ccc_object, cob_cl);
+}
+
+struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
+{
+ return container_of(slice, struct ccc_lock, clk_cl);
+}
+
+struct ccc_io *cl2ccc_io(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct ccc_io *cio;
+
+ cio = container_of(slice, struct ccc_io, cui_cl);
+ LASSERT(cio == ccc_env_io(env));
+ return cio;
+}
+
+struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
+{
+ return container_of0(slice, struct ccc_req, crq_cl);
+}
+
+struct page *cl2vm_page(const struct cl_page_slice *slice)
+{
+ return cl2ccc_page(slice)->cpg_page;
+}
+
+/*****************************************************************************
+ *
+ * Accessors.
+ *
+ */
+int ccc_object_invariant(const struct cl_object *obj)
+{
+ struct inode *inode = ccc_object_inode(obj);
+ struct cl_inode_info *lli = cl_i2info(inode);
+
+ return (S_ISREG(cl_inode_mode(inode)) ||
+ /* i_mode of unlinked inode is zeroed. */
+ cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
+}
+
+struct inode *ccc_object_inode(const struct cl_object *obj)
+{
+ return cl2ccc(obj)->cob_inode;
+}
+
+/**
+ * Returns a pointer to cl_page associated with \a vmpage, without acquiring
+ * additional reference to the resulting page. This is an unsafe version of
+ * cl_vmpage_page() that can only be used under vmpage lock.
+ */
+struct cl_page *ccc_vmpage_page_transient(struct page *vmpage)
+{
+ KLASSERT(PageLocked(vmpage));
+ return (struct cl_page *)vmpage->private;
+}
+
+/**
+ * Initialize or update CLIO structures for regular files when new
+ * meta-data arrives from the server.
+ *
+ * \param inode regular file inode
+ * \param md new file metadata from MDS
+ * - allocates cl_object if necessary,
+ * - updated layout, if object was already here.
+ */
+int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
+{
+ struct lu_env *env;
+ struct cl_inode_info *lli;
+ struct cl_object *clob;
+ struct lu_site *site;
+ struct lu_fid *fid;
+ struct cl_object_conf conf = {
+ .coc_inode = inode,
+ .u = {
+ .coc_md = md
+ }
+ };
+ int result = 0;
+ int refcheck;
+
+ LASSERT(md->body->valid & OBD_MD_FLID);
+ LASSERT(S_ISREG(cl_inode_mode(inode)));
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ site = cl_i2sbi(inode)->ll_site;
+ lli = cl_i2info(inode);
+ fid = &lli->lli_fid;
+ LASSERT(fid_is_sane(fid));
+
+ if (lli->lli_clob == NULL) {
+ /* clob is slave of inode, empty lli_clob means for new inode,
+ * there is no clob in cache with the given fid, so it is
+ * unnecessary to perform lookup-alloc-lookup-insert, just
+ * alloc and insert directly. */
+ LASSERT(inode->i_state & I_NEW);
+ conf.coc_lu.loc_flags = LOC_F_NEW;
+ clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
+ fid, &conf);
+ if (!IS_ERR(clob)) {
+ /*
+ * No locking is necessary, as new inode is
+ * locked by I_NEW bit.
+ */
+ lli->lli_clob = clob;
+ lli->lli_has_smd = lsm_has_objects(md->lsm);
+ lu_object_ref_add(&clob->co_lu, "inode", inode);
+ } else
+ result = PTR_ERR(clob);
+ } else {
+ result = cl_conf_set(env, lli->lli_clob, &conf);
+ }
+
+ cl_env_put(env, &refcheck);
+
+ if (result != 0)
+ CERROR("Failure to initialize cl object "DFID": %d\n",
+ PFID(fid), result);
+ return result;
+}
+
+/**
+ * Wait for others drop their references of the object at first, then we drop
+ * the last one, which will lead to the object be destroyed immediately.
+ * Must be called after cl_object_kill() against this object.
+ *
+ * The reason we want to do this is: destroying top object will wait for sub
+ * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
+ * to initiate top object destroying which may deadlock. See bz22520.
+ */
+static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
+{
+ struct lu_object_header *header = obj->co_lu.lo_header;
+ wait_queue_t waiter;
+
+ if (unlikely(atomic_read(&header->loh_ref) != 1)) {
+ struct lu_site *site = obj->co_lu.lo_dev->ld_site;
+ struct lu_site_bkt_data *bkt;
+
+ bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
+
+ init_waitqueue_entry_current(&waiter);
+ add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&header->loh_ref) == 1)
+ break;
+ waitq_wait(&waiter, TASK_UNINTERRUPTIBLE);
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+ }
+
+ cl_object_put(env, obj);
+}
+
+void cl_inode_fini(struct inode *inode)
+{
+ struct lu_env *env;
+ struct cl_inode_info *lli = cl_i2info(inode);
+ struct cl_object *clob = lli->lli_clob;
+ int refcheck;
+ int emergency;
+
+ if (clob != NULL) {
+ void *cookie;
+
+ cookie = cl_env_reenter();
+ env = cl_env_get(&refcheck);
+ emergency = IS_ERR(env);
+ if (emergency) {
+ mutex_lock(&ccc_inode_fini_guard);
+ LASSERT(ccc_inode_fini_env != NULL);
+ cl_env_implant(ccc_inode_fini_env, &refcheck);
+ env = ccc_inode_fini_env;
+ }
+ /*
+ * cl_object cache is a slave to inode cache (which, in turn
+ * is a slave to dentry cache), don't keep cl_object in memory
+ * when its master is evicted.
+ */
+ cl_object_kill(env, clob);
+ lu_object_ref_del(&clob->co_lu, "inode", inode);
+ cl_object_put_last(env, clob);
+ lli->lli_clob = NULL;
+ if (emergency) {
+ cl_env_unplant(ccc_inode_fini_env, &refcheck);
+ mutex_unlock(&ccc_inode_fini_guard);
+ } else
+ cl_env_put(env, &refcheck);
+ cl_env_reexit(cookie);
+ }
+}
+
+/**
+ * return IF_* type for given lu_dirent entry.
+ * IF_* flag shld be converted to particular OS file type in
+ * platform llite module.
+ */
+__u16 ll_dirent_type_get(struct lu_dirent *ent)
+{
+ __u16 type = 0;
+ struct luda_type *lt;
+ int len = 0;
+
+ if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
+ const unsigned align = sizeof(struct luda_type) - 1;
+
+ len = le16_to_cpu(ent->lde_namelen);
+ len = (len + align) & ~align;
+ lt = (void *)ent->lde_name + len;
+ type = IFTODT(le16_to_cpu(lt->lt_type));
+ }
+ return type;
+}
+
+/**
+ * build inode number from passed @fid */
+__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
+{
+ if (BITS_PER_LONG == 32 || api32)
+ return fid_flatten32(fid);
+ else
+ return fid_flatten(fid);
+}
+
+/**
+ * build inode generation from passed @fid. If our FID overflows the 32-bit
+ * inode number then return a non-zero generation to distinguish them. */
+__u32 cl_fid_build_gen(const struct lu_fid *fid)
+{
+ __u32 gen;
+
+ if (fid_is_igif(fid)) {
+ gen = lu_igif_gen(fid);
+ return gen;
+ }
+
+ gen = (fid_flatten(fid) >> 32);
+ return gen;
+}
+
+/* lsm is unreliable after hsm implementation as layout can be changed at
+ * any time. This is only to support old, non-clio-ized interfaces. It will
+ * cause deadlock if clio operations are called with this extra layout refcount
+ * because in case the layout changed during the IO, ll_layout_refresh() will
+ * have to wait for the refcount to become zero to destroy the older layout.
+ *
+ * Notice that the lsm returned by this function may not be valid unless called
+ * inside layout lock - MDS_INODELOCK_LAYOUT. */
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
+{
+ return lov_lsm_get(cl_i2info(inode)->lli_clob);
+}
+
+void inline ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
+{
+ lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
+}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
new file mode 100644
index 0000000..2b4dbee
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
@@ -0,0 +1,192 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * cl code shared between vvp and liblustre (and other Lustre clients in the
+ * future).
+ *
+ */
+#include <obd_class.h>
+#include <obd_support.h>
+#include <obd.h>
+#include <cl_object.h>
+#include <lclient.h>
+
+#include <lustre_lite.h>
+
+
+/* Initialize the default and maximum LOV EA and cookie sizes. This allows
+ * us to make MDS RPCs with large enough reply buffers to hold the
+ * maximum-sized (= maximum striped) EA and cookie without having to
+ * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
+int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
+{
+ struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 };
+ __u32 valsize = sizeof(struct lov_desc);
+ int rc, easize, def_easize, cookiesize;
+ struct lov_desc desc;
+ __u16 stripes;
+
+ rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
+ &valsize, &desc, NULL);
+ if (rc)
+ return rc;
+
+ stripes = min(desc.ld_tgt_count, (__u32)LOV_MAX_STRIPE_COUNT);
+ lsm.lsm_stripe_count = stripes;
+ easize = obd_size_diskmd(dt_exp, &lsm);
+
+ lsm.lsm_stripe_count = desc.ld_default_stripe_count;
+ def_easize = obd_size_diskmd(dt_exp, &lsm);
+
+ cookiesize = stripes * sizeof(struct llog_cookie);
+
+ CDEBUG(D_HA, "updating max_mdsize/max_cookiesize: %d/%d\n",
+ easize, cookiesize);
+
+ rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize);
+ return rc;
+}
+
+/**
+ * This function is used as an upcall-callback hooked by liblustre and llite
+ * clients into obd_notify() listeners chain to handle notifications about
+ * change of import connect_flags. See llu_fsswop_mount() and
+ * lustre_common_fill_super().
+ */
+int cl_ocd_update(struct obd_device *host,
+ struct obd_device *watched,
+ enum obd_notify_event ev, void *owner, void *data)
+{
+ struct lustre_client_ocd *lco;
+ struct client_obd *cli;
+ __u64 flags;
+ int result;
+
+ if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
+ cli = &watched->u.cli;
+ lco = owner;
+ flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
+ CDEBUG(D_SUPER, "Changing connect_flags: "LPX64" -> "LPX64"\n",
+ lco->lco_flags, flags);
+ mutex_lock(&lco->lco_lock);
+ lco->lco_flags &= flags;
+ /* for each osc event update ea size */
+ if (lco->lco_dt_exp)
+ cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp);
+
+ mutex_unlock(&lco->lco_lock);
+ result = 0;
+ } else {
+ CERROR("unexpected notification from %s %s!\n",
+ watched->obd_type->typ_name,
+ watched->obd_name);
+ result = -EINVAL;
+ }
+ return result;
+}
+
+#define GROUPLOCK_SCOPE "grouplock"
+
+int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
+ struct ccc_grouplock *cg)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_lock *lock;
+ struct cl_lock_descr *descr;
+ __u32 enqflags;
+ int refcheck;
+ int rc;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ io = ccc_env_thread_io(env);
+ io->ci_obj = obj;
+ io->ci_ignore_layout = 1;
+
+ rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (rc) {
+ LASSERT(rc < 0);
+ cl_env_put(env, &refcheck);
+ return rc;
+ }
+
+ descr = &ccc_env_info(env)->cti_descr;
+ descr->cld_obj = obj;
+ descr->cld_start = 0;
+ descr->cld_end = CL_PAGE_EOF;
+ descr->cld_gid = gid;
+ descr->cld_mode = CLM_GROUP;
+
+ enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
+ descr->cld_enq_flags = enqflags;
+
+ lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
+ if (IS_ERR(lock)) {
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
+ return PTR_ERR(lock);
+ }
+
+ cg->cg_env = cl_env_get(&refcheck);
+ cg->cg_io = io;
+ cg->cg_lock = lock;
+ cg->cg_gid = gid;
+ LASSERT(cg->cg_env == env);
+
+ cl_env_unplant(env, &refcheck);
+ return 0;
+}
+
+void cl_put_grouplock(struct ccc_grouplock *cg)
+{
+ struct lu_env *env = cg->cg_env;
+ struct cl_io *io = cg->cg_io;
+ struct cl_lock *lock = cg->cg_lock;
+ int refcheck;
+
+ LASSERT(cg->cg_env);
+ LASSERT(cg->cg_gid);
+
+ cl_env_implant(env, &refcheck);
+ cl_env_put(env, &refcheck);
+
+ cl_unuse(env, lock);
+ cl_lock_release(env, lock, GROUPLOCK_SCOPE, current);
+ cl_io_fini(env, io);
+ cl_env_put(env, NULL);
+}
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
new file mode 100644
index 0000000..c65b13c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -0,0 +1,744 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/interval_tree.c
+ *
+ * Interval tree library used by ldlm extent lock code
+ *
+ * Author: Huang Wei <huangwei@clusterfs.com>
+ * Author: Jay Xiong <jinshan.xiong@sun.com>
+ */
+# include <lustre_dlm.h>
+#include <obd_support.h>
+#include <interval_tree.h>
+
+enum {
+ INTERVAL_RED = 0,
+ INTERVAL_BLACK = 1
+};
+
+static inline int node_is_left_child(struct interval_node *node)
+{
+ LASSERT(node->in_parent != NULL);
+ return node == node->in_parent->in_left;
+}
+
+static inline int node_is_right_child(struct interval_node *node)
+{
+ LASSERT(node->in_parent != NULL);
+ return node == node->in_parent->in_right;
+}
+
+static inline int node_is_red(struct interval_node *node)
+{
+ return node->in_color == INTERVAL_RED;
+}
+
+static inline int node_is_black(struct interval_node *node)
+{
+ return node->in_color == INTERVAL_BLACK;
+}
+
+static inline int extent_compare(struct interval_node_extent *e1,
+ struct interval_node_extent *e2)
+{
+ int rc;
+ if (e1->start == e2->start) {
+ if (e1->end < e2->end)
+ rc = -1;
+ else if (e1->end > e2->end)
+ rc = 1;
+ else
+ rc = 0;
+ } else {
+ if (e1->start < e2->start)
+ rc = -1;
+ else
+ rc = 1;
+ }
+ return rc;
+}
+
+static inline int extent_equal(struct interval_node_extent *e1,
+ struct interval_node_extent *e2)
+{
+ return (e1->start == e2->start) && (e1->end == e2->end);
+}
+
+static inline int extent_overlapped(struct interval_node_extent *e1,
+ struct interval_node_extent *e2)
+{
+ return (e1->start <= e2->end) && (e2->start <= e1->end);
+}
+
+static inline int node_compare(struct interval_node *n1,
+ struct interval_node *n2)
+{
+ return extent_compare(&n1->in_extent, &n2->in_extent);
+}
+
+static inline int node_equal(struct interval_node *n1,
+ struct interval_node *n2)
+{
+ return extent_equal(&n1->in_extent, &n2->in_extent);
+}
+
+static inline __u64 max_u64(__u64 x, __u64 y)
+{
+ return x > y ? x : y;
+}
+
+static inline __u64 min_u64(__u64 x, __u64 y)
+{
+ return x < y ? x : y;
+}
+
+#define interval_for_each(node, root) \
+for (node = interval_first(root); node != NULL; \
+ node = interval_next(node))
+
+#define interval_for_each_reverse(node, root) \
+for (node = interval_last(root); node != NULL; \
+ node = interval_prev(node))
+
+static struct interval_node *interval_first(struct interval_node *node)
+{
+ if (!node)
+ return NULL;
+ while (node->in_left)
+ node = node->in_left;
+ return node;
+}
+
+static struct interval_node *interval_last(struct interval_node *node)
+{
+ if (!node)
+ return NULL;
+ while (node->in_right)
+ node = node->in_right;
+ return node;
+}
+
+static struct interval_node *interval_next(struct interval_node *node)
+{
+ if (!node)
+ return NULL;
+ if (node->in_right)
+ return interval_first(node->in_right);
+ while (node->in_parent && node_is_right_child(node))
+ node = node->in_parent;
+ return node->in_parent;
+}
+
+static struct interval_node *interval_prev(struct interval_node *node)
+{
+ if (!node)
+ return NULL;
+
+ if (node->in_left)
+ return interval_last(node->in_left);
+
+ while (node->in_parent && node_is_left_child(node))
+ node = node->in_parent;
+
+ return node->in_parent;
+}
+
+enum interval_iter interval_iterate(struct interval_node *root,
+ interval_callback_t func,
+ void *data)
+{
+ struct interval_node *node;
+ enum interval_iter rc = INTERVAL_ITER_CONT;
+
+ interval_for_each(node, root) {
+ rc = func(node, data);
+ if (rc == INTERVAL_ITER_STOP)
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(interval_iterate);
+
+enum interval_iter interval_iterate_reverse(struct interval_node *root,
+ interval_callback_t func,
+ void *data)
+{
+ struct interval_node *node;
+ enum interval_iter rc = INTERVAL_ITER_CONT;
+
+ interval_for_each_reverse(node, root) {
+ rc = func(node, data);
+ if (rc == INTERVAL_ITER_STOP)
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(interval_iterate_reverse);
+
+/* try to find a node with same interval in the tree,
+ * if found, return the pointer to the node, otherwise return NULL*/
+struct interval_node *interval_find(struct interval_node *root,
+ struct interval_node_extent *ex)
+{
+ struct interval_node *walk = root;
+ int rc;
+
+ while (walk) {
+ rc = extent_compare(ex, &walk->in_extent);
+ if (rc == 0)
+ break;
+ else if (rc < 0)
+ walk = walk->in_left;
+ else
+ walk = walk->in_right;
+ }
+
+ return walk;
+}
+EXPORT_SYMBOL(interval_find);
+
+static void __rotate_change_maxhigh(struct interval_node *node,
+ struct interval_node *rotate)
+{
+ __u64 left_max, right_max;
+
+ rotate->in_max_high = node->in_max_high;
+ left_max = node->in_left ? node->in_left->in_max_high : 0;
+ right_max = node->in_right ? node->in_right->in_max_high : 0;
+ node->in_max_high = max_u64(interval_high(node),
+ max_u64(left_max,right_max));
+}
+
+/* The left rotation "pivots" around the link from node to node->right, and
+ * - node will be linked to node->right's left child, and
+ * - node->right's left child will be linked to node's right child. */
+static void __rotate_left(struct interval_node *node,
+ struct interval_node **root)
+{
+ struct interval_node *right = node->in_right;
+ struct interval_node *parent = node->in_parent;
+
+ node->in_right = right->in_left;
+ if (node->in_right)
+ right->in_left->in_parent = node;
+
+ right->in_left = node;
+ right->in_parent = parent;
+ if (parent) {
+ if (node_is_left_child(node))
+ parent->in_left = right;
+ else
+ parent->in_right = right;
+ } else {
+ *root = right;
+ }
+ node->in_parent = right;
+
+ /* update max_high for node and right */
+ __rotate_change_maxhigh(node, right);
+}
+
+/* The right rotation "pivots" around the link from node to node->left, and
+ * - node will be linked to node->left's right child, and
+ * - node->left's right child will be linked to node's left child. */
+static void __rotate_right(struct interval_node *node,
+ struct interval_node **root)
+{
+ struct interval_node *left = node->in_left;
+ struct interval_node *parent = node->in_parent;
+
+ node->in_left = left->in_right;
+ if (node->in_left)
+ left->in_right->in_parent = node;
+ left->in_right = node;
+
+ left->in_parent = parent;
+ if (parent) {
+ if (node_is_right_child(node))
+ parent->in_right = left;
+ else
+ parent->in_left = left;
+ } else {
+ *root = left;
+ }
+ node->in_parent = left;
+
+ /* update max_high for node and left */
+ __rotate_change_maxhigh(node, left);
+}
+
+#define interval_swap(a, b) do { \
+ struct interval_node *c = a; a = b; b = c; \
+} while (0)
+
+/*
+ * Operations INSERT and DELETE, when run on a tree with n keys,
+ * take O(logN) time.Because they modify the tree, the result
+ * may violate the red-black properties.To restore these properties,
+ * we must change the colors of some of the nodes in the tree
+ * and also change the pointer structure.
+ */
+static void interval_insert_color(struct interval_node *node,
+ struct interval_node **root)
+{
+ struct interval_node *parent, *gparent;
+
+ while ((parent = node->in_parent) && node_is_red(parent)) {
+ gparent = parent->in_parent;
+ /* Parent is RED, so gparent must not be NULL */
+ if (node_is_left_child(parent)) {
+ struct interval_node *uncle;
+ uncle = gparent->in_right;
+ if (uncle && node_is_red(uncle)) {
+ uncle->in_color = INTERVAL_BLACK;
+ parent->in_color = INTERVAL_BLACK;
+ gparent->in_color = INTERVAL_RED;
+ node = gparent;
+ continue;
+ }
+
+ if (parent->in_right == node) {
+ __rotate_left(parent, root);
+ interval_swap(node, parent);
+ }
+
+ parent->in_color = INTERVAL_BLACK;
+ gparent->in_color = INTERVAL_RED;
+ __rotate_right(gparent, root);
+ } else {
+ struct interval_node *uncle;
+ uncle = gparent->in_left;
+ if (uncle && node_is_red(uncle)) {
+ uncle->in_color = INTERVAL_BLACK;
+ parent->in_color = INTERVAL_BLACK;
+ gparent->in_color = INTERVAL_RED;
+ node = gparent;
+ continue;
+ }
+
+ if (node_is_left_child(node)) {
+ __rotate_right(parent, root);
+ interval_swap(node, parent);
+ }
+
+ parent->in_color = INTERVAL_BLACK;
+ gparent->in_color = INTERVAL_RED;
+ __rotate_left(gparent, root);
+ }
+ }
+
+ (*root)->in_color = INTERVAL_BLACK;
+}
+
+struct interval_node *interval_insert(struct interval_node *node,
+ struct interval_node **root)
+
+{
+ struct interval_node **p, *parent = NULL;
+
+ LASSERT(!interval_is_intree(node));
+ p = root;
+ while (*p) {
+ parent = *p;
+ if (node_equal(parent, node))
+ return parent;
+
+ /* max_high field must be updated after each iteration */
+ if (parent->in_max_high < interval_high(node))
+ parent->in_max_high = interval_high(node);
+
+ if (node_compare(node, parent) < 0)
+ p = &parent->in_left;
+ else
+ p = &parent->in_right;
+ }
+
+ /* link node into the tree */
+ node->in_parent = parent;
+ node->in_color = INTERVAL_RED;
+ node->in_left = node->in_right = NULL;
+ *p = node;
+
+ interval_insert_color(node, root);
+ node->in_intree = 1;
+
+ return NULL;
+}
+EXPORT_SYMBOL(interval_insert);
+
+static inline int node_is_black_or_0(struct interval_node *node)
+{
+ return !node || node_is_black(node);
+}
+
+static void interval_erase_color(struct interval_node *node,
+ struct interval_node *parent,
+ struct interval_node **root)
+{
+ struct interval_node *tmp;
+
+ while (node_is_black_or_0(node) && node != *root) {
+ if (parent->in_left == node) {
+ tmp = parent->in_right;
+ if (node_is_red(tmp)) {
+ tmp->in_color = INTERVAL_BLACK;
+ parent->in_color = INTERVAL_RED;
+ __rotate_left(parent, root);
+ tmp = parent->in_right;
+ }
+ if (node_is_black_or_0(tmp->in_left) &&
+ node_is_black_or_0(tmp->in_right)) {
+ tmp->in_color = INTERVAL_RED;
+ node = parent;
+ parent = node->in_parent;
+ } else {
+ if (node_is_black_or_0(tmp->in_right)) {
+ struct interval_node *o_left;
+ if ((o_left = tmp->in_left))
+ o_left->in_color = INTERVAL_BLACK;
+ tmp->in_color = INTERVAL_RED;
+ __rotate_right(tmp, root);
+ tmp = parent->in_right;
+ }
+ tmp->in_color = parent->in_color;
+ parent->in_color = INTERVAL_BLACK;
+ if (tmp->in_right)
+ tmp->in_right->in_color = INTERVAL_BLACK;
+ __rotate_left(parent, root);
+ node = *root;
+ break;
+ }
+ } else {
+ tmp = parent->in_left;
+ if (node_is_red(tmp)) {
+ tmp->in_color = INTERVAL_BLACK;
+ parent->in_color = INTERVAL_RED;
+ __rotate_right(parent, root);
+ tmp = parent->in_left;
+ }
+ if (node_is_black_or_0(tmp->in_left) &&
+ node_is_black_or_0(tmp->in_right)) {
+ tmp->in_color = INTERVAL_RED;
+ node = parent;
+ parent = node->in_parent;
+ } else {
+ if (node_is_black_or_0(tmp->in_left)) {
+ struct interval_node *o_right;
+ if ((o_right = tmp->in_right))
+ o_right->in_color = INTERVAL_BLACK;
+ tmp->in_color = INTERVAL_RED;
+ __rotate_left(tmp, root);
+ tmp = parent->in_left;
+ }
+ tmp->in_color = parent->in_color;
+ parent->in_color = INTERVAL_BLACK;
+ if (tmp->in_left)
+ tmp->in_left->in_color = INTERVAL_BLACK;
+ __rotate_right(parent, root);
+ node = *root;
+ break;
+ }
+ }
+ }
+ if (node)
+ node->in_color = INTERVAL_BLACK;
+}
+
+/*
+ * if the @max_high value of @node is changed, this function traverse a path
+ * from node up to the root to update max_high for the whole tree.
+ */
+static void update_maxhigh(struct interval_node *node,
+ __u64 old_maxhigh)
+{
+ __u64 left_max, right_max;
+
+ while (node) {
+ left_max = node->in_left ? node->in_left->in_max_high : 0;
+ right_max = node->in_right ? node->in_right->in_max_high : 0;
+ node->in_max_high = max_u64(interval_high(node),
+ max_u64(left_max, right_max));
+
+ if (node->in_max_high >= old_maxhigh)
+ break;
+ node = node->in_parent;
+ }
+}
+
+void interval_erase(struct interval_node *node,
+ struct interval_node **root)
+{
+ struct interval_node *child, *parent;
+ int color;
+
+ LASSERT(interval_is_intree(node));
+ node->in_intree = 0;
+ if (!node->in_left) {
+ child = node->in_right;
+ } else if (!node->in_right) {
+ child = node->in_left;
+ } else { /* Both left and right child are not NULL */
+ struct interval_node *old = node;
+
+ node = interval_next(node);
+ child = node->in_right;
+ parent = node->in_parent;
+ color = node->in_color;
+
+ if (child)
+ child->in_parent = parent;
+ if (parent == old)
+ parent->in_right = child;
+ else
+ parent->in_left = child;
+
+ node->in_color = old->in_color;
+ node->in_right = old->in_right;
+ node->in_left = old->in_left;
+ node->in_parent = old->in_parent;
+
+ if (old->in_parent) {
+ if (node_is_left_child(old))
+ old->in_parent->in_left = node;
+ else
+ old->in_parent->in_right = node;
+ } else {
+ *root = node;
+ }
+
+ old->in_left->in_parent = node;
+ if (old->in_right)
+ old->in_right->in_parent = node;
+ update_maxhigh(child ? : parent, node->in_max_high);
+ update_maxhigh(node, old->in_max_high);
+ if (parent == old)
+ parent = node;
+ goto color;
+ }
+ parent = node->in_parent;
+ color = node->in_color;
+
+ if (child)
+ child->in_parent = parent;
+ if (parent) {
+ if (node_is_left_child(node))
+ parent->in_left = child;
+ else
+ parent->in_right = child;
+ } else {
+ *root = child;
+ }
+
+ update_maxhigh(child ? : parent, node->in_max_high);
+
+color:
+ if (color == INTERVAL_BLACK)
+ interval_erase_color(child, parent, root);
+}
+EXPORT_SYMBOL(interval_erase);
+
+static inline int interval_may_overlap(struct interval_node *node,
+ struct interval_node_extent *ext)
+{
+ return (ext->start <= node->in_max_high &&
+ ext->end >= interval_low(node));
+}
+
+/*
+ * This function finds all intervals that overlap interval ext,
+ * and calls func to handle resulted intervals one by one.
+ * in lustre, this function will find all conflicting locks in
+ * the granted queue and add these locks to the ast work list.
+ *
+ * {
+ * if (node == NULL)
+ * return 0;
+ * if (ext->end < interval_low(node)) {
+ * interval_search(node->in_left, ext, func, data);
+ * } else if (interval_may_overlap(node, ext)) {
+ * if (extent_overlapped(ext, &node->in_extent))
+ * func(node, data);
+ * interval_search(node->in_left, ext, func, data);
+ * interval_search(node->in_right, ext, func, data);
+ * }
+ * return 0;
+ * }
+ *
+ */
+enum interval_iter interval_search(struct interval_node *node,
+ struct interval_node_extent *ext,
+ interval_callback_t func,
+ void *data)
+{
+ struct interval_node *parent;
+ enum interval_iter rc = INTERVAL_ITER_CONT;
+
+ LASSERT(ext != NULL);
+ LASSERT(func != NULL);
+
+ while (node) {
+ if (ext->end < interval_low(node)) {
+ if (node->in_left) {
+ node = node->in_left;
+ continue;
+ }
+ } else if (interval_may_overlap(node, ext)) {
+ if (extent_overlapped(ext, &node->in_extent)) {
+ rc = func(node, data);
+ if (rc == INTERVAL_ITER_STOP)
+ break;
+ }
+
+ if (node->in_left) {
+ node = node->in_left;
+ continue;
+ }
+ if (node->in_right) {
+ node = node->in_right;
+ continue;
+ }
+ }
+
+ parent = node->in_parent;
+ while (parent) {
+ if (node_is_left_child(node) &&
+ parent->in_right) {
+ /* If we ever got the left, it means that the
+ * parent met ext->end<interval_low(parent), or
+ * may_overlap(parent). If the former is true,
+ * we needn't go back. So stop early and check
+ * may_overlap(parent) after this loop. */
+ node = parent->in_right;
+ break;
+ }
+ node = parent;
+ parent = parent->in_parent;
+ }
+ if (parent == NULL || !interval_may_overlap(parent, ext))
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(interval_search);
+
+static enum interval_iter interval_overlap_cb(struct interval_node *n,
+ void *args)
+{
+ *(int *)args = 1;
+ return INTERVAL_ITER_STOP;
+}
+
+int interval_is_overlapped(struct interval_node *root,
+ struct interval_node_extent *ext)
+{
+ int has = 0;
+ (void)interval_search(root, ext, interval_overlap_cb, &has);
+ return has;
+}
+EXPORT_SYMBOL(interval_is_overlapped);
+
+/* Don't expand to low. Expanding downwards is expensive, and meaningless to
+ * some extents, because programs seldom do IO backward.
+ *
+ * The recursive algorithm of expanding low:
+ * expand_low {
+ * struct interval_node *tmp;
+ * static __u64 res = 0;
+ *
+ * if (root == NULL)
+ * return res;
+ * if (root->in_max_high < low) {
+ * res = max_u64(root->in_max_high + 1, res);
+ * return res;
+ * } else if (low < interval_low(root)) {
+ * interval_expand_low(root->in_left, low);
+ * return res;
+ * }
+ *
+ * if (interval_high(root) < low)
+ * res = max_u64(interval_high(root) + 1, res);
+ * interval_expand_low(root->in_left, low);
+ * interval_expand_low(root->in_right, low);
+ *
+ * return res;
+ * }
+ *
+ * It's much easy to eliminate the recursion, see interval_search for
+ * an example. -jay
+ */
+static inline __u64 interval_expand_low(struct interval_node *root, __u64 low)
+{
+ /* we only concern the empty tree right now. */
+ if (root == NULL)
+ return 0;
+ return low;
+}
+
+static inline __u64 interval_expand_high(struct interval_node *node, __u64 high)
+{
+ __u64 result = ~0;
+
+ while (node != NULL) {
+ if (node->in_max_high < high)
+ break;
+
+ if (interval_low(node) > high) {
+ result = interval_low(node) - 1;
+ node = node->in_left;
+ } else {
+ node = node->in_right;
+ }
+ }
+
+ return result;
+}
+
+/* expanding the extent based on @ext. */
+void interval_expand(struct interval_node *root,
+ struct interval_node_extent *ext,
+ struct interval_node_extent *limiter)
+{
+ /* The assertion of interval_is_overlapped is expensive because we may
+ * travel many nodes to find the overlapped node. */
+ LASSERT(interval_is_overlapped(root, ext) == 0);
+ if (!limiter || limiter->start < ext->start)
+ ext->start = interval_expand_low(root, ext->start);
+ if (!limiter || limiter->end > ext->end)
+ ext->end = interval_expand_high(root, ext->end);
+ LASSERT(interval_is_overlapped(root, ext) == 0);
+}
+EXPORT_SYMBOL(interval_expand);
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
new file mode 100644
index 0000000..32f4d52
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -0,0 +1,76 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+#include <linux/libcfs/libcfs.h>
+
+#include <lustre_dlm.h>
+#include <lustre_lib.h>
+
+/**
+ * Lock a lock and its resource.
+ *
+ * LDLM locking uses resource to serialize access to locks
+ * but there is a case when we change resource of lock upon
+ * enqueue reply. We rely on lock->l_resource = new_res
+ * being an atomic operation.
+ */
+struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
+{
+ /* on server-side resource of lock doesn't change */
+ if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
+ spin_lock(&lock->l_lock);
+
+ lock_res(lock->l_resource);
+
+ lock->l_flags |= LDLM_FL_RES_LOCKED;
+ return lock->l_resource;
+}
+EXPORT_SYMBOL(lock_res_and_lock);
+
+/**
+ * Unlock a lock and its resource previously locked with lock_res_and_lock
+ */
+void unlock_res_and_lock(struct ldlm_lock *lock)
+{
+ /* on server-side resource of lock doesn't change */
+ lock->l_flags &= ~LDLM_FL_RES_LOCKED;
+
+ unlock_res(lock->l_resource);
+ if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
+ spin_unlock(&lock->l_lock);
+}
+EXPORT_SYMBOL(unlock_res_and_lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
new file mode 100644
index 0000000..7e31663
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -0,0 +1,240 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_extent.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+/**
+ * This file contains implementation of EXTENT lock type
+ *
+ * EXTENT lock type is for locking a contiguous range of values, represented
+ * by 64-bit starting and ending offsets (inclusive). There are several extent
+ * lock modes, some of which may be mutually incompatible. Extent locks are
+ * considered incompatible if their modes are incompatible and their extents
+ * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+# include <linux/libcfs/libcfs.h>
+
+#include <lustre_dlm.h>
+#include <obd_support.h>
+#include <obd.h>
+#include <obd_class.h>
+#include <lustre_lib.h>
+
+#include "ldlm_internal.h"
+
+
+/* When a lock is cancelled by a client, the KMS may undergo change if this
+ * is the "highest lock". This function returns the new KMS value.
+ * Caller must hold lr_lock already.
+ *
+ * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
+__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
+{
+ struct ldlm_resource *res = lock->l_resource;
+ struct list_head *tmp;
+ struct ldlm_lock *lck;
+ __u64 kms = 0;
+
+ /* don't let another thread in ldlm_extent_shift_kms race in
+ * just after we finish and take our lock into account in its
+ * calculation of the kms */
+ lock->l_flags |= LDLM_FL_KMS_IGNORE;
+
+ list_for_each(tmp, &res->lr_granted) {
+ lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+
+ if (lck->l_flags & LDLM_FL_KMS_IGNORE)
+ continue;
+
+ if (lck->l_policy_data.l_extent.end >= old_kms)
+ return old_kms;
+
+ /* This extent _has_ to be smaller than old_kms (checked above)
+ * so kms can only ever be smaller or the same as old_kms. */
+ if (lck->l_policy_data.l_extent.end + 1 > kms)
+ kms = lck->l_policy_data.l_extent.end + 1;
+ }
+ LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
+
+ return kms;
+}
+EXPORT_SYMBOL(ldlm_extent_shift_kms);
+
+struct kmem_cache *ldlm_interval_slab;
+struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
+{
+ struct ldlm_interval *node;
+
+ LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
+ if (node == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&node->li_group);
+ ldlm_interval_attach(node, lock);
+ return node;
+}
+
+void ldlm_interval_free(struct ldlm_interval *node)
+{
+ if (node) {
+ LASSERT(list_empty(&node->li_group));
+ LASSERT(!interval_is_intree(&node->li_node));
+ OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
+ }
+}
+
+/* interval tree, for LDLM_EXTENT. */
+void ldlm_interval_attach(struct ldlm_interval *n,
+ struct ldlm_lock *l)
+{
+ LASSERT(l->l_tree_node == NULL);
+ LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
+
+ list_add_tail(&l->l_sl_policy, &n->li_group);
+ l->l_tree_node = n;
+}
+
+struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
+{
+ struct ldlm_interval *n = l->l_tree_node;
+
+ if (n == NULL)
+ return NULL;
+
+ LASSERT(!list_empty(&n->li_group));
+ l->l_tree_node = NULL;
+ list_del_init(&l->l_sl_policy);
+
+ return (list_empty(&n->li_group) ? n : NULL);
+}
+
+static inline int lock_mode_to_index(ldlm_mode_t mode)
+{
+ int index;
+
+ LASSERT(mode != 0);
+ LASSERT(IS_PO2(mode));
+ for (index = -1; mode; index++, mode >>= 1) ;
+ LASSERT(index < LCK_MODE_NUM);
+ return index;
+}
+
+/** Add newly granted lock into interval tree for the resource. */
+void ldlm_extent_add_lock(struct ldlm_resource *res,
+ struct ldlm_lock *lock)
+{
+ struct interval_node *found, **root;
+ struct ldlm_interval *node;
+ struct ldlm_extent *extent;
+ int idx;
+
+ LASSERT(lock->l_granted_mode == lock->l_req_mode);
+
+ node = lock->l_tree_node;
+ LASSERT(node != NULL);
+ LASSERT(!interval_is_intree(&node->li_node));
+
+ idx = lock_mode_to_index(lock->l_granted_mode);
+ LASSERT(lock->l_granted_mode == 1 << idx);
+ LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
+
+ /* node extent initialize */
+ extent = &lock->l_policy_data.l_extent;
+ interval_set(&node->li_node, extent->start, extent->end);
+
+ root = &res->lr_itree[idx].lit_root;
+ found = interval_insert(&node->li_node, root);
+ if (found) { /* The policy group found. */
+ struct ldlm_interval *tmp = ldlm_interval_detach(lock);
+ LASSERT(tmp != NULL);
+ ldlm_interval_free(tmp);
+ ldlm_interval_attach(to_ldlm_interval(found), lock);
+ }
+ res->lr_itree[idx].lit_size++;
+
+ /* even though we use interval tree to manage the extent lock, we also
+ * add the locks into grant list, for debug purpose, .. */
+ ldlm_resource_add_lock(res, &res->lr_granted, lock);
+}
+
+/** Remove cancelled lock from resource interval tree. */
+void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
+{
+ struct ldlm_resource *res = lock->l_resource;
+ struct ldlm_interval *node = lock->l_tree_node;
+ struct ldlm_interval_tree *tree;
+ int idx;
+
+ if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
+ return;
+
+ idx = lock_mode_to_index(lock->l_granted_mode);
+ LASSERT(lock->l_granted_mode == 1 << idx);
+ tree = &res->lr_itree[idx];
+
+ LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
+
+ tree->lit_size--;
+ node = ldlm_interval_detach(lock);
+ if (node) {
+ interval_erase(&node->li_node, &tree->lit_root);
+ ldlm_interval_free(node);
+ }
+}
+
+void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
+{
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_extent.start = wpolicy->l_extent.start;
+ lpolicy->l_extent.end = wpolicy->l_extent.end;
+ lpolicy->l_extent.gid = wpolicy->l_extent.gid;
+}
+
+void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy)
+{
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_extent.start = lpolicy->l_extent.start;
+ wpolicy->l_extent.end = lpolicy->l_extent.end;
+ wpolicy->l_extent.gid = lpolicy->l_extent.gid;
+}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
new file mode 100644
index 0000000..c68ed27
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -0,0 +1,841 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003 Hewlett-Packard Development Company LP.
+ * Developed under the sponsorship of the US Government under
+ * Subcontract No. B514193
+ *
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+/**
+ * This file implements POSIX lock type for Lustre.
+ * Its policy properties are start and end of extent and PID.
+ *
+ * These locks are only done through MDS due to POSIX semantics requiring
+ * e.g. that locks could be only partially released and as such split into
+ * two parts, and also that two adjacent locks from the same process may be
+ * merged into a single wider lock.
+ *
+ * Lock modes are mapped like this:
+ * PR and PW for READ and WRITE locks
+ * NL to request a releasing of a portion of the lock
+ *
+ * These flock locks never timeout.
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+
+#include <lustre_dlm.h>
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_lib.h>
+#include <linux/list.h>
+
+#include "ldlm_internal.h"
+
+int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag);
+
+/**
+ * list_for_remaining_safe - iterate over the remaining entries in a list
+ * and safeguard against removal of a list entry.
+ * \param pos the &struct list_head to use as a loop counter. pos MUST
+ * have been initialized prior to using it in this macro.
+ * \param n another &struct list_head to use as temporary storage
+ * \param head the head for your list.
+ */
+#define list_for_remaining_safe(pos, n, head) \
+ for (n = pos->next; pos != (head); pos = n, n = pos->next)
+
+static inline int
+ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
+{
+ return((new->l_policy_data.l_flock.owner ==
+ lock->l_policy_data.l_flock.owner) &&
+ (new->l_export == lock->l_export));
+}
+
+static inline int
+ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
+{
+ return((new->l_policy_data.l_flock.start <=
+ lock->l_policy_data.l_flock.end) &&
+ (new->l_policy_data.l_flock.end >=
+ lock->l_policy_data.l_flock.start));
+}
+
+static inline int ldlm_flock_blocking_link(struct ldlm_lock *req,
+ struct ldlm_lock *lock)
+{
+ int rc = 0;
+
+ /* For server only */
+ if (req->l_export == NULL)
+ return 0;
+
+ if (unlikely(req->l_export->exp_flock_hash == NULL)) {
+ rc = ldlm_init_flock_export(req->l_export);
+ if (rc)
+ goto error;
+ }
+
+ LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
+
+ req->l_policy_data.l_flock.blocking_owner =
+ lock->l_policy_data.l_flock.owner;
+ req->l_policy_data.l_flock.blocking_export =
+ lock->l_export;
+ req->l_policy_data.l_flock.blocking_refs = 0;
+
+ cfs_hash_add(req->l_export->exp_flock_hash,
+ &req->l_policy_data.l_flock.owner,
+ &req->l_exp_flock_hash);
+error:
+ return rc;
+}
+
+static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
+{
+ /* For server only */
+ if (req->l_export == NULL)
+ return;
+
+ check_res_locked(req->l_resource);
+ if (req->l_export->exp_flock_hash != NULL &&
+ !hlist_unhashed(&req->l_exp_flock_hash))
+ cfs_hash_del(req->l_export->exp_flock_hash,
+ &req->l_policy_data.l_flock.owner,
+ &req->l_exp_flock_hash);
+}
+
+static inline void
+ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
+{
+ LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
+ mode, flags);
+
+ /* Safe to not lock here, since it should be empty anyway */
+ LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
+
+ list_del_init(&lock->l_res_link);
+ if (flags == LDLM_FL_WAIT_NOREPROC &&
+ !(lock->l_flags & LDLM_FL_FAILED)) {
+ /* client side - set a flag to prevent sending a CANCEL */
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
+
+ /* when reaching here, it is under lock_res_and_lock(). Thus,
+ need call the nolock version of ldlm_lock_decref_internal*/
+ ldlm_lock_decref_internal_nolock(lock, mode);
+ }
+
+ ldlm_lock_destroy_nolock(lock);
+}
+
+/**
+ * POSIX locks deadlock detection code.
+ *
+ * Given a new lock \a req and an existing lock \a bl_lock it conflicts
+ * with, we need to iterate through all blocked POSIX locks for this
+ * export and see if there is a deadlock condition arising. (i.e. when
+ * one client holds a lock on something and want a lock on something
+ * else and at the same time another client has the opposite situation).
+ */
+static int
+ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
+{
+ struct obd_export *req_exp = req->l_export;
+ struct obd_export *bl_exp = bl_lock->l_export;
+ __u64 req_owner = req->l_policy_data.l_flock.owner;
+ __u64 bl_owner = bl_lock->l_policy_data.l_flock.owner;
+
+ /* For server only */
+ if (req_exp == NULL)
+ return 0;
+
+ class_export_get(bl_exp);
+ while (1) {
+ struct obd_export *bl_exp_new;
+ struct ldlm_lock *lock = NULL;
+ struct ldlm_flock *flock;
+
+ if (bl_exp->exp_flock_hash != NULL)
+ lock = cfs_hash_lookup(bl_exp->exp_flock_hash,
+ &bl_owner);
+ if (lock == NULL)
+ break;
+
+ LASSERT(req != lock);
+ flock = &lock->l_policy_data.l_flock;
+ LASSERT(flock->owner == bl_owner);
+ bl_owner = flock->blocking_owner;
+ bl_exp_new = class_export_get(flock->blocking_export);
+ class_export_put(bl_exp);
+
+ cfs_hash_put(bl_exp->exp_flock_hash, &lock->l_exp_flock_hash);
+ bl_exp = bl_exp_new;
+
+ if (bl_owner == req_owner && bl_exp == req_exp) {
+ class_export_put(bl_exp);
+ return 1;
+ }
+ }
+ class_export_put(bl_exp);
+
+ return 0;
+}
+
+/**
+ * Process a granting attempt for flock lock.
+ * Must be called under ns lock held.
+ *
+ * This function looks for any conflicts for \a lock in the granted or
+ * waiting queues. The lock is granted if no conflicts are found in
+ * either queue.
+ *
+ * It is also responsible for splitting a lock if a portion of the lock
+ * is released.
+ *
+ * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
+ * - blocking ASTs have already been sent
+ *
+ * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
+ * - blocking ASTs have not been sent yet, so list of conflicting locks
+ * would be collected and ASTs sent.
+ */
+int
+ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
+ ldlm_error_t *err, struct list_head *work_list)
+{
+ struct ldlm_resource *res = req->l_resource;
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+ struct list_head *tmp;
+ struct list_head *ownlocks = NULL;
+ struct ldlm_lock *lock = NULL;
+ struct ldlm_lock *new = req;
+ struct ldlm_lock *new2 = NULL;
+ ldlm_mode_t mode = req->l_req_mode;
+ int local = ns_is_client(ns);
+ int added = (mode == LCK_NL);
+ int overlaps = 0;
+ int splitted = 0;
+ const struct ldlm_callback_suite null_cbs = { NULL };
+ int rc;
+
+ CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
+ LPU64" end "LPU64"\n", *flags,
+ new->l_policy_data.l_flock.owner,
+ new->l_policy_data.l_flock.pid, mode,
+ req->l_policy_data.l_flock.start,
+ req->l_policy_data.l_flock.end);
+
+ *err = ELDLM_OK;
+
+ if (local) {
+ /* No blocking ASTs are sent to the clients for
+ * Posix file & record locks */
+ req->l_blocking_ast = NULL;
+ } else {
+ /* Called on the server for lock cancels. */
+ req->l_blocking_ast = ldlm_flock_blocking_ast;
+ }
+
+reprocess:
+ if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
+ /* This loop determines where this processes locks start
+ * in the resource lr_granted list. */
+ list_for_each(tmp, &res->lr_granted) {
+ lock = list_entry(tmp, struct ldlm_lock,
+ l_res_link);
+ if (ldlm_same_flock_owner(lock, req)) {
+ ownlocks = tmp;
+ break;
+ }
+ }
+ } else {
+ lockmode_verify(mode);
+
+ /* This loop determines if there are existing locks
+ * that conflict with the new lock request. */
+ list_for_each(tmp, &res->lr_granted) {
+ lock = list_entry(tmp, struct ldlm_lock,
+ l_res_link);
+
+ if (ldlm_same_flock_owner(lock, req)) {
+ if (!ownlocks)
+ ownlocks = tmp;
+ continue;
+ }
+
+ /* locks are compatible, overlap doesn't matter */
+ if (lockmode_compat(lock->l_granted_mode, mode))
+ continue;
+
+ if (!ldlm_flocks_overlap(lock, req))
+ continue;
+
+ if (!first_enq)
+ return LDLM_ITER_CONTINUE;
+
+ if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+ ldlm_flock_destroy(req, mode, *flags);
+ *err = -EAGAIN;
+ return LDLM_ITER_STOP;
+ }
+
+ if (*flags & LDLM_FL_TEST_LOCK) {
+ ldlm_flock_destroy(req, mode, *flags);
+ req->l_req_mode = lock->l_granted_mode;
+ req->l_policy_data.l_flock.pid =
+ lock->l_policy_data.l_flock.pid;
+ req->l_policy_data.l_flock.start =
+ lock->l_policy_data.l_flock.start;
+ req->l_policy_data.l_flock.end =
+ lock->l_policy_data.l_flock.end;
+ *flags |= LDLM_FL_LOCK_CHANGED;
+ return LDLM_ITER_STOP;
+ }
+
+ /* add lock to blocking list before deadlock
+ * check to prevent race */
+ rc = ldlm_flock_blocking_link(req, lock);
+ if (rc) {
+ ldlm_flock_destroy(req, mode, *flags);
+ *err = rc;
+ return LDLM_ITER_STOP;
+ }
+ if (ldlm_flock_deadlock(req, lock)) {
+ ldlm_flock_blocking_unlink(req);
+ ldlm_flock_destroy(req, mode, *flags);
+ *err = -EDEADLK;
+ return LDLM_ITER_STOP;
+ }
+
+ ldlm_resource_add_lock(res, &res->lr_waiting, req);
+ *flags |= LDLM_FL_BLOCK_GRANTED;
+ return LDLM_ITER_STOP;
+ }
+ }
+
+ if (*flags & LDLM_FL_TEST_LOCK) {
+ ldlm_flock_destroy(req, mode, *flags);
+ req->l_req_mode = LCK_NL;
+ *flags |= LDLM_FL_LOCK_CHANGED;
+ return LDLM_ITER_STOP;
+ }
+
+ /* In case we had slept on this lock request take it off of the
+ * deadlock detection hash list. */
+ ldlm_flock_blocking_unlink(req);
+
+ /* Scan the locks owned by this process that overlap this request.
+ * We may have to merge or split existing locks. */
+
+ if (!ownlocks)
+ ownlocks = &res->lr_granted;
+
+ list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
+ lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
+
+ if (!ldlm_same_flock_owner(lock, new))
+ break;
+
+ if (lock->l_granted_mode == mode) {
+ /* If the modes are the same then we need to process
+ * locks that overlap OR adjoin the new lock. The extra
+ * logic condition is necessary to deal with arithmetic
+ * overflow and underflow. */
+ if ((new->l_policy_data.l_flock.start >
+ (lock->l_policy_data.l_flock.end + 1))
+ && (lock->l_policy_data.l_flock.end !=
+ OBD_OBJECT_EOF))
+ continue;
+
+ if ((new->l_policy_data.l_flock.end <
+ (lock->l_policy_data.l_flock.start - 1))
+ && (lock->l_policy_data.l_flock.start != 0))
+ break;
+
+ if (new->l_policy_data.l_flock.start <
+ lock->l_policy_data.l_flock.start) {
+ lock->l_policy_data.l_flock.start =
+ new->l_policy_data.l_flock.start;
+ } else {
+ new->l_policy_data.l_flock.start =
+ lock->l_policy_data.l_flock.start;
+ }
+
+ if (new->l_policy_data.l_flock.end >
+ lock->l_policy_data.l_flock.end) {
+ lock->l_policy_data.l_flock.end =
+ new->l_policy_data.l_flock.end;
+ } else {
+ new->l_policy_data.l_flock.end =
+ lock->l_policy_data.l_flock.end;
+ }
+
+ if (added) {
+ ldlm_flock_destroy(lock, mode, *flags);
+ } else {
+ new = lock;
+ added = 1;
+ }
+ continue;
+ }
+
+ if (new->l_policy_data.l_flock.start >
+ lock->l_policy_data.l_flock.end)
+ continue;
+
+ if (new->l_policy_data.l_flock.end <
+ lock->l_policy_data.l_flock.start)
+ break;
+
+ ++overlaps;
+
+ if (new->l_policy_data.l_flock.start <=
+ lock->l_policy_data.l_flock.start) {
+ if (new->l_policy_data.l_flock.end <
+ lock->l_policy_data.l_flock.end) {
+ lock->l_policy_data.l_flock.start =
+ new->l_policy_data.l_flock.end + 1;
+ break;
+ }
+ ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
+ continue;
+ }
+ if (new->l_policy_data.l_flock.end >=
+ lock->l_policy_data.l_flock.end) {
+ lock->l_policy_data.l_flock.end =
+ new->l_policy_data.l_flock.start - 1;
+ continue;
+ }
+
+ /* split the existing lock into two locks */
+
+ /* if this is an F_UNLCK operation then we could avoid
+ * allocating a new lock and use the req lock passed in
+ * with the request but this would complicate the reply
+ * processing since updates to req get reflected in the
+ * reply. The client side replays the lock request so
+ * it must see the original lock data in the reply. */
+
+ /* XXX - if ldlm_lock_new() can sleep we should
+ * release the lr_lock, allocate the new lock,
+ * and restart processing this lock. */
+ if (!new2) {
+ unlock_res_and_lock(req);
+ new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
+ lock->l_granted_mode, &null_cbs,
+ NULL, 0, LVB_T_NONE);
+ lock_res_and_lock(req);
+ if (!new2) {
+ ldlm_flock_destroy(req, lock->l_granted_mode,
+ *flags);
+ *err = -ENOLCK;
+ return LDLM_ITER_STOP;
+ }
+ goto reprocess;
+ }
+
+ splitted = 1;
+
+ new2->l_granted_mode = lock->l_granted_mode;
+ new2->l_policy_data.l_flock.pid =
+ new->l_policy_data.l_flock.pid;
+ new2->l_policy_data.l_flock.owner =
+ new->l_policy_data.l_flock.owner;
+ new2->l_policy_data.l_flock.start =
+ lock->l_policy_data.l_flock.start;
+ new2->l_policy_data.l_flock.end =
+ new->l_policy_data.l_flock.start - 1;
+ lock->l_policy_data.l_flock.start =
+ new->l_policy_data.l_flock.end + 1;
+ new2->l_conn_export = lock->l_conn_export;
+ if (lock->l_export != NULL) {
+ new2->l_export = class_export_lock_get(lock->l_export, new2);
+ if (new2->l_export->exp_lock_hash &&
+ hlist_unhashed(&new2->l_exp_hash))
+ cfs_hash_add(new2->l_export->exp_lock_hash,
+ &new2->l_remote_handle,
+ &new2->l_exp_hash);
+ }
+ if (*flags == LDLM_FL_WAIT_NOREPROC)
+ ldlm_lock_addref_internal_nolock(new2,
+ lock->l_granted_mode);
+
+ /* insert new2 at lock */
+ ldlm_resource_add_lock(res, ownlocks, new2);
+ LDLM_LOCK_RELEASE(new2);
+ break;
+ }
+
+ /* if new2 is created but never used, destroy it*/
+ if (splitted == 0 && new2 != NULL)
+ ldlm_lock_destroy_nolock(new2);
+
+ /* At this point we're granting the lock request. */
+ req->l_granted_mode = req->l_req_mode;
+
+ /* Add req to the granted queue before calling ldlm_reprocess_all(). */
+ if (!added) {
+ list_del_init(&req->l_res_link);
+ /* insert new lock before ownlocks in list. */
+ ldlm_resource_add_lock(res, ownlocks, req);
+ }
+
+ if (*flags != LDLM_FL_WAIT_NOREPROC) {
+ /* The only one possible case for client-side calls flock
+ * policy function is ldlm_flock_completion_ast inside which
+ * carries LDLM_FL_WAIT_NOREPROC flag. */
+ CERROR("Illegal parameter for client-side-only module.\n");
+ LBUG();
+ }
+
+ /* In case we're reprocessing the requested lock we can't destroy
+ * it until after calling ldlm_add_ast_work_item() above so that laawi()
+ * can bump the reference count on \a req. Otherwise \a req
+ * could be freed before the completion AST can be sent. */
+ if (added)
+ ldlm_flock_destroy(req, mode, *flags);
+
+ ldlm_resource_dump(D_INFO, res);
+ return LDLM_ITER_CONTINUE;
+}
+
+struct ldlm_flock_wait_data {
+ struct ldlm_lock *fwd_lock;
+ int fwd_generation;
+};
+
+static void
+ldlm_flock_interrupted_wait(void *data)
+{
+ struct ldlm_lock *lock;
+
+ lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
+
+ /* take lock off the deadlock detection hash list. */
+ lock_res_and_lock(lock);
+ ldlm_flock_blocking_unlink(lock);
+
+ /* client side - set flag to prevent lock from being put on LRU list */
+ lock->l_flags |= LDLM_FL_CBPENDING;
+ unlock_res_and_lock(lock);
+}
+
+/**
+ * Flock completion callback function.
+ *
+ * \param lock [in,out]: A lock to be handled
+ * \param flags [in]: flags
+ * \param *data [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
+ *
+ * \retval 0 : success
+ * \retval <0 : failure
+ */
+int
+ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
+{
+ struct file_lock *getlk = lock->l_ast_data;
+ struct obd_device *obd;
+ struct obd_import *imp = NULL;
+ struct ldlm_flock_wait_data fwd;
+ struct l_wait_info lwi;
+ ldlm_error_t err;
+ int rc = 0;
+
+ CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
+ flags, data, getlk);
+
+ /* Import invalidation. We need to actually release the lock
+ * references being held, so that it can go away. No point in
+ * holding the lock even if app still believes it has it, since
+ * server already dropped it anyway. Only for granted locks too. */
+ if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
+ (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
+ if (lock->l_req_mode == lock->l_granted_mode &&
+ lock->l_granted_mode != LCK_NL &&
+ NULL == data)
+ ldlm_lock_decref_internal(lock, lock->l_req_mode);
+
+ /* Need to wake up the waiter if we were evicted */
+ wake_up(&lock->l_waitq);
+ return 0;
+ }
+
+ LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
+
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ if (NULL == data)
+ /* mds granted the lock in the reply */
+ goto granted;
+ /* CP AST RPC: lock get granted, wake it up */
+ wake_up(&lock->l_waitq);
+ return 0;
+ }
+
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
+ "sleeping");
+ fwd.fwd_lock = lock;
+ obd = class_exp2obd(lock->l_conn_export);
+
+ /* if this is a local lock, there is no import */
+ if (NULL != obd)
+ imp = obd->u.cli.cl_import;
+
+ if (NULL != imp) {
+ spin_lock(&imp->imp_lock);
+ fwd.fwd_generation = imp->imp_generation;
+ spin_unlock(&imp->imp_lock);
+ }
+
+ lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
+
+ /* Go to sleep until the lock is granted. */
+ rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
+
+ if (rc) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
+ rc);
+ return rc;
+ }
+
+granted:
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
+
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
+ return 0;
+ }
+
+ if (lock->l_flags & LDLM_FL_FAILED) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
+ return -EIO;
+ }
+
+ if (rc) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
+ rc);
+ return rc;
+ }
+
+ LDLM_DEBUG(lock, "client-side enqueue granted");
+
+ lock_res_and_lock(lock);
+
+ /* take lock off the deadlock detection hash list. */
+ ldlm_flock_blocking_unlink(lock);
+
+ /* ldlm_lock_enqueue() has already placed lock on the granted list. */
+ list_del_init(&lock->l_res_link);
+
+ if (flags & LDLM_FL_TEST_LOCK) {
+ /* fcntl(F_GETLK) request */
+ /* The old mode was saved in getlk->fl_type so that if the mode
+ * in the lock changes we can decref the appropriate refcount.*/
+ ldlm_flock_destroy(lock, flock_type(getlk),
+ LDLM_FL_WAIT_NOREPROC);
+ switch (lock->l_granted_mode) {
+ case LCK_PR:
+ flock_set_type(getlk, F_RDLCK);
+ break;
+ case LCK_PW:
+ flock_set_type(getlk, F_WRLCK);
+ break;
+ default:
+ flock_set_type(getlk, F_UNLCK);
+ }
+ flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
+ flock_set_start(getlk,
+ (loff_t)lock->l_policy_data.l_flock.start);
+ flock_set_end(getlk,
+ (loff_t)lock->l_policy_data.l_flock.end);
+ } else {
+ __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
+
+ /* We need to reprocess the lock to do merges or splits
+ * with existing locks owned by this process. */
+ ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
+ }
+ unlock_res_and_lock(lock);
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_flock_completion_ast);
+
+int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
+{
+ LASSERT(lock);
+ LASSERT(flag == LDLM_CB_CANCELING);
+
+ /* take lock off the deadlock detection hash list. */
+ lock_res_and_lock(lock);
+ ldlm_flock_blocking_unlink(lock);
+ unlock_res_and_lock(lock);
+ return 0;
+}
+
+void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
+{
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
+ lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
+ lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
+ /* Compat code, old clients had no idea about owner field and
+ * relied solely on pid for ownership. Introduced in LU-104, 2.1,
+ * April 2011 */
+ lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
+}
+
+
+void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
+{
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
+ lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
+ lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
+ lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
+}
+
+void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy)
+{
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
+ wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
+ wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
+ wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
+}
+
+/*
+ * Export handle<->flock hash operations.
+ */
+static unsigned
+ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+ return cfs_hash_u64_hash(*(__u64 *)key, mask);
+}
+
+static void *
+ldlm_export_flock_key(struct hlist_node *hnode)
+{
+ struct ldlm_lock *lock;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ return &lock->l_policy_data.l_flock.owner;
+}
+
+static int
+ldlm_export_flock_keycmp(const void *key, struct hlist_node *hnode)
+{
+ return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
+}
+
+static void *
+ldlm_export_flock_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+}
+
+static void
+ldlm_export_flock_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ldlm_lock *lock;
+ struct ldlm_flock *flock;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ LDLM_LOCK_GET(lock);
+
+ flock = &lock->l_policy_data.l_flock;
+ LASSERT(flock->blocking_export != NULL);
+ class_export_get(flock->blocking_export);
+ flock->blocking_refs++;
+}
+
+static void
+ldlm_export_flock_put(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ldlm_lock *lock;
+ struct ldlm_flock *flock;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+ LDLM_LOCK_RELEASE(lock);
+
+ flock = &lock->l_policy_data.l_flock;
+ LASSERT(flock->blocking_export != NULL);
+ class_export_put(flock->blocking_export);
+ if (--flock->blocking_refs == 0) {
+ flock->blocking_owner = 0;
+ flock->blocking_export = NULL;
+ }
+}
+
+static cfs_hash_ops_t ldlm_export_flock_ops = {
+ .hs_hash = ldlm_export_flock_hash,
+ .hs_key = ldlm_export_flock_key,
+ .hs_keycmp = ldlm_export_flock_keycmp,
+ .hs_object = ldlm_export_flock_object,
+ .hs_get = ldlm_export_flock_get,
+ .hs_put = ldlm_export_flock_put,
+ .hs_put_locked = ldlm_export_flock_put,
+};
+
+int ldlm_init_flock_export(struct obd_export *exp)
+{
+ exp->exp_flock_hash =
+ cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
+ HASH_EXP_LOCK_CUR_BITS,
+ HASH_EXP_LOCK_MAX_BITS,
+ HASH_EXP_LOCK_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
+ &ldlm_export_flock_ops,
+ CFS_HASH_DEFAULT | CFS_HASH_NBLK_CHANGE);
+ if (!exp->exp_flock_hash)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_init_flock_export);
+
+void ldlm_destroy_flock_export(struct obd_export *exp)
+{
+ if (exp->exp_flock_hash) {
+ cfs_hash_putref(exp->exp_flock_hash);
+ exp->exp_flock_hash = NULL;
+ }
+}
+EXPORT_SYMBOL(ldlm_destroy_flock_export);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
new file mode 100644
index 0000000..574b2ff
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
@@ -0,0 +1,75 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_inodebits.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+/**
+ * This file contains implementation of IBITS lock type
+ *
+ * IBITS lock type contains a bit mask determining various properties of an
+ * object. The meanings of specific bits are specific to the caller and are
+ * opaque to LDLM code.
+ *
+ * Locks with intersecting bitmasks and conflicting lock modes (e.g. LCK_PW)
+ * are considered conflicting. See the lock mode compatibility matrix
+ * in lustre_dlm.h.
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+
+#include <lustre_dlm.h>
+#include <obd_support.h>
+#include <lustre_lib.h>
+
+#include "ldlm_internal.h"
+
+
+void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
+{
+ memset(lpolicy, 0, sizeof(*lpolicy));
+ lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
+}
+
+void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy)
+{
+ memset(wpolicy, 0, sizeof(*wpolicy));
+ wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
+}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
new file mode 100644
index 0000000..8cd7963
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -0,0 +1,309 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define MAX_STRING_SIZE 128
+
+extern int ldlm_srv_namespace_nr;
+extern int ldlm_cli_namespace_nr;
+extern struct mutex ldlm_srv_namespace_lock;
+extern struct list_head ldlm_srv_namespace_list;
+extern struct mutex ldlm_cli_namespace_lock;
+extern struct list_head ldlm_cli_active_namespace_list;
+extern struct list_head ldlm_cli_inactive_namespace_list;
+
+static inline int ldlm_namespace_nr_read(ldlm_side_t client)
+{
+ return client == LDLM_NAMESPACE_SERVER ?
+ ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
+}
+
+static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
+{
+ if (client == LDLM_NAMESPACE_SERVER)
+ ldlm_srv_namespace_nr++;
+ else
+ ldlm_cli_namespace_nr++;
+}
+
+static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
+{
+ if (client == LDLM_NAMESPACE_SERVER)
+ ldlm_srv_namespace_nr--;
+ else
+ ldlm_cli_namespace_nr--;
+}
+
+static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
+{
+ return client == LDLM_NAMESPACE_SERVER ?
+ &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
+}
+
+static inline struct list_head *ldlm_namespace_inactive_list(ldlm_side_t client)
+{
+ return client == LDLM_NAMESPACE_SERVER ?
+ &ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list;
+}
+
+static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
+{
+ return client == LDLM_NAMESPACE_SERVER ?
+ &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
+}
+
+/* ns_bref is the number of resources in this namespace */
+static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
+{
+ return atomic_read(&ns->ns_bref) == 0;
+}
+
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, ldlm_side_t);
+struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
+
+/* ldlm_request.c */
+/* Cancel lru flag, it indicates we cancel aged locks. */
+enum {
+ LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */
+ LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
+ LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
+ LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
+ LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither
+ * sending nor waiting for any rpcs) */
+};
+
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
+ ldlm_cancel_flags_t sync, int flags);
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
+ struct list_head *cancels, int count, int max,
+ ldlm_cancel_flags_t cancel_flags, int flags);
+extern int ldlm_enqueue_min;
+int ldlm_get_enq_timeout(struct ldlm_lock *lock);
+
+/* ldlm_resource.c */
+int ldlm_resource_putref_locked(struct ldlm_resource *res);
+void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
+ struct ldlm_lock *new);
+void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
+ struct obd_import *imp, int force);
+void ldlm_namespace_free_post(struct ldlm_namespace *ns);
+/* ldlm_lock.c */
+
+struct ldlm_cb_set_arg {
+ struct ptlrpc_request_set *set;
+ int type; /* LDLM_{CP,BL,GL}_CALLBACK */
+ atomic_t restart;
+ struct list_head *list;
+ union ldlm_gl_desc *gl_desc; /* glimpse AST descriptor */
+};
+
+typedef enum {
+ LDLM_WORK_BL_AST,
+ LDLM_WORK_CP_AST,
+ LDLM_WORK_REVOKE_AST,
+ LDLM_WORK_GL_AST
+} ldlm_desc_ast_t;
+
+void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
+int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
+ enum req_location loc, void *data, int size);
+struct ldlm_lock *
+ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
+ ldlm_type_t type, ldlm_mode_t,
+ const struct ldlm_callback_suite *cbs,
+ void *data, __u32 lvb_len, enum lvb_type lvb_type);
+ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
+ void *cookie, __u64 *flags);
+void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode);
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
+void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
+void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
+ struct list_head *work_list);
+int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
+ ldlm_desc_ast_t ast_type);
+int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq);
+int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
+int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
+void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock);
+void ldlm_lock_add_to_lru(struct ldlm_lock *lock);
+void ldlm_lock_touch_in_lru(struct ldlm_lock *lock);
+void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
+
+void ldlm_cancel_locks_for_export(struct obd_export *export);
+
+/* ldlm_lockd.c */
+int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
+ struct ldlm_lock *lock);
+int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld,
+ struct list_head *cancels, int count,
+ ldlm_cancel_flags_t cancel_flags);
+
+void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
+
+
+/* ldlm_extent.c */
+void ldlm_extent_add_lock(struct ldlm_resource *res, struct ldlm_lock *lock);
+void ldlm_extent_unlink_lock(struct ldlm_lock *lock);
+
+/* ldlm_flock.c */
+int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
+ int first_enq, ldlm_error_t *err,
+ struct list_head *work_list);
+int ldlm_init_flock_export(struct obd_export *exp);
+void ldlm_destroy_flock_export(struct obd_export *exp);
+
+/* l_lock.c */
+void l_check_ns_lock(struct ldlm_namespace *ns);
+void l_check_no_ns_lock(struct ldlm_namespace *ns);
+
+extern struct proc_dir_entry *ldlm_svc_proc_dir;
+extern struct proc_dir_entry *ldlm_type_proc_dir;
+
+struct ldlm_state {
+ struct ptlrpc_service *ldlm_cb_service;
+ struct ptlrpc_service *ldlm_cancel_service;
+ struct ptlrpc_client *ldlm_client;
+ struct ptlrpc_connection *ldlm_server_conn;
+ struct ldlm_bl_pool *ldlm_bl_pool;
+};
+
+/* interval tree, for LDLM_EXTENT. */
+extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */
+extern void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l);
+extern struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l);
+extern struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock);
+extern void ldlm_interval_free(struct ldlm_interval *node);
+/* this function must be called with res lock held */
+static inline struct ldlm_extent *
+ldlm_interval_extent(struct ldlm_interval *node)
+{
+ struct ldlm_lock *lock;
+ LASSERT(!list_empty(&node->li_group));
+
+ lock = list_entry(node->li_group.next, struct ldlm_lock,
+ l_sl_policy);
+ return &lock->l_policy_data.l_extent;
+}
+
+int ldlm_init(void);
+void ldlm_exit(void);
+
+enum ldlm_policy_res {
+ LDLM_POLICY_CANCEL_LOCK,
+ LDLM_POLICY_KEEP_LOCK,
+ LDLM_POLICY_SKIP_LOCK
+};
+
+typedef enum ldlm_policy_res ldlm_policy_res_t;
+
+#define LDLM_POOL_PROC_READER_SEQ_SHOW(var, type) \
+ static int lprocfs_##var##_seq_show(struct seq_file *m, void *v) \
+ { \
+ struct ldlm_pool *pl = m->private; \
+ type tmp; \
+ \
+ spin_lock(&pl->pl_lock); \
+ tmp = pl->pl_##var; \
+ spin_unlock(&pl->pl_lock); \
+ \
+ return lprocfs_rd_uint(m, &tmp); \
+ } \
+ struct __##var##__dummy_read {;} /* semicolon catcher */
+
+#define LDLM_POOL_PROC_WRITER(var, type) \
+ int lprocfs_wr_##var(struct file *file, const char *buffer, \
+ unsigned long count, void *data) \
+ { \
+ struct ldlm_pool *pl = data; \
+ type tmp; \
+ int rc; \
+ \
+ rc = lprocfs_wr_uint(file, buffer, count, &tmp); \
+ if (rc < 0) { \
+ CERROR("Can't parse user input, rc = %d\n", rc); \
+ return rc; \
+ } \
+ \
+ spin_lock(&pl->pl_lock); \
+ pl->pl_##var = tmp; \
+ spin_unlock(&pl->pl_lock); \
+ \
+ return rc; \
+ } \
+ struct __##var##__dummy_write {;} /* semicolon catcher */
+
+static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
+{
+ int ret = 0;
+
+ lock_res_and_lock(lock);
+ if (((lock->l_req_mode == lock->l_granted_mode) &&
+ !(lock->l_flags & LDLM_FL_CP_REQD)) ||
+ (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL)))
+ ret = 1;
+ unlock_res_and_lock(lock);
+
+ return ret;
+}
+
+typedef void (*ldlm_policy_wire_to_local_t)(const ldlm_wire_policy_data_t *,
+ ldlm_policy_data_t *);
+
+typedef void (*ldlm_policy_local_to_wire_t)(const ldlm_policy_data_t *,
+ ldlm_wire_policy_data_t *);
+
+void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy);
+void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy);
+void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy);
+void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy);
+void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy);
+void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy);
+void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy);
+void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy);
+
+void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
new file mode 100644
index 0000000..1a8c0d7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -0,0 +1,851 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+/**
+ * This file deals with various client/target related logic including recovery.
+ *
+ * TODO: This code more logically belongs in the ptlrpc module than in ldlm and
+ * should be moved.
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+
+# include <linux/libcfs/libcfs.h>
+#include <obd.h>
+#include <obd_class.h>
+#include <lustre_dlm.h>
+#include <lustre_net.h>
+#include <lustre_sec.h>
+#include "ldlm_internal.h"
+
+/* @priority: If non-zero, move the selected connection to the list head.
+ * @create: If zero, only search in existing connections.
+ */
+static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
+ int priority, int create)
+{
+ struct ptlrpc_connection *ptlrpc_conn;
+ struct obd_import_conn *imp_conn = NULL, *item;
+ int rc = 0;
+
+ if (!create && !priority) {
+ CDEBUG(D_HA, "Nothing to do\n");
+ return -EINVAL;
+ }
+
+ ptlrpc_conn = ptlrpc_uuid_to_connection(uuid);
+ if (!ptlrpc_conn) {
+ CDEBUG(D_HA, "can't find connection %s\n", uuid->uuid);
+ return -ENOENT;
+ }
+
+ if (create) {
+ OBD_ALLOC(imp_conn, sizeof(*imp_conn));
+ if (!imp_conn) {
+ GOTO(out_put, rc = -ENOMEM);
+ }
+ }
+
+ spin_lock(&imp->imp_lock);
+ list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
+ if (obd_uuid_equals(uuid, &item->oic_uuid)) {
+ if (priority) {
+ list_del(&item->oic_item);
+ list_add(&item->oic_item,
+ &imp->imp_conn_list);
+ item->oic_last_attempt = 0;
+ }
+ CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
+ imp, imp->imp_obd->obd_name, uuid->uuid,
+ (priority ? ", moved to head" : ""));
+ spin_unlock(&imp->imp_lock);
+ GOTO(out_free, rc = 0);
+ }
+ }
+ /* No existing import connection found for \a uuid. */
+ if (create) {
+ imp_conn->oic_conn = ptlrpc_conn;
+ imp_conn->oic_uuid = *uuid;
+ imp_conn->oic_last_attempt = 0;
+ if (priority)
+ list_add(&imp_conn->oic_item, &imp->imp_conn_list);
+ else
+ list_add_tail(&imp_conn->oic_item,
+ &imp->imp_conn_list);
+ CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
+ imp, imp->imp_obd->obd_name, uuid->uuid,
+ (priority ? "head" : "tail"));
+ } else {
+ spin_unlock(&imp->imp_lock);
+ GOTO(out_free, rc = -ENOENT);
+ }
+
+ spin_unlock(&imp->imp_lock);
+ return 0;
+out_free:
+ if (imp_conn)
+ OBD_FREE(imp_conn, sizeof(*imp_conn));
+out_put:
+ ptlrpc_connection_put(ptlrpc_conn);
+ return rc;
+}
+
+int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid)
+{
+ return import_set_conn(imp, uuid, 1, 0);
+}
+
+int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
+ int priority)
+{
+ return import_set_conn(imp, uuid, priority, 1);
+}
+EXPORT_SYMBOL(client_import_add_conn);
+
+int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
+{
+ struct obd_import_conn *imp_conn;
+ struct obd_export *dlmexp;
+ int rc = -ENOENT;
+
+ spin_lock(&imp->imp_lock);
+ if (list_empty(&imp->imp_conn_list)) {
+ LASSERT(!imp->imp_connection);
+ GOTO(out, rc);
+ }
+
+ list_for_each_entry(imp_conn, &imp->imp_conn_list, oic_item) {
+ if (!obd_uuid_equals(uuid, &imp_conn->oic_uuid))
+ continue;
+ LASSERT(imp_conn->oic_conn);
+
+ if (imp_conn == imp->imp_conn_current) {
+ LASSERT(imp_conn->oic_conn == imp->imp_connection);
+
+ if (imp->imp_state != LUSTRE_IMP_CLOSED &&
+ imp->imp_state != LUSTRE_IMP_DISCON) {
+ CERROR("can't remove current connection\n");
+ GOTO(out, rc = -EBUSY);
+ }
+
+ ptlrpc_connection_put(imp->imp_connection);
+ imp->imp_connection = NULL;
+
+ dlmexp = class_conn2export(&imp->imp_dlm_handle);
+ if (dlmexp && dlmexp->exp_connection) {
+ LASSERT(dlmexp->exp_connection ==
+ imp_conn->oic_conn);
+ ptlrpc_connection_put(dlmexp->exp_connection);
+ dlmexp->exp_connection = NULL;
+ }
+ }
+
+ list_del(&imp_conn->oic_item);
+ ptlrpc_connection_put(imp_conn->oic_conn);
+ OBD_FREE(imp_conn, sizeof(*imp_conn));
+ CDEBUG(D_HA, "imp %p@%s: remove connection %s\n",
+ imp, imp->imp_obd->obd_name, uuid->uuid);
+ rc = 0;
+ break;
+ }
+out:
+ spin_unlock(&imp->imp_lock);
+ if (rc == -ENOENT)
+ CERROR("connection %s not found\n", uuid->uuid);
+ return rc;
+}
+EXPORT_SYMBOL(client_import_del_conn);
+
+/**
+ * Find conn UUID by peer NID. \a peer is a server NID. This function is used
+ * to find a conn uuid of \a imp which can reach \a peer.
+ */
+int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
+ struct obd_uuid *uuid)
+{
+ struct obd_import_conn *conn;
+ int rc = -ENOENT;
+
+ spin_lock(&imp->imp_lock);
+ list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
+ /* Check if conn UUID does have this peer NID. */
+ if (class_check_uuid(&conn->oic_uuid, peer)) {
+ *uuid = conn->oic_uuid;
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock(&imp->imp_lock);
+ return rc;
+}
+EXPORT_SYMBOL(client_import_find_conn);
+
+void client_destroy_import(struct obd_import *imp)
+{
+ /* Drop security policy instance after all RPCs have finished/aborted
+ * to let all busy contexts be released. */
+ class_import_get(imp);
+ class_destroy_import(imp);
+ sptlrpc_import_sec_put(imp);
+ class_import_put(imp);
+}
+EXPORT_SYMBOL(client_destroy_import);
+
+/**
+ * Check whether or not the OSC is on MDT.
+ * In the config log,
+ * osc on MDT
+ * setup 0:{fsname}-OSTxxxx-osc[-MDTxxxx] 1:lustre-OST0000_UUID 2:NID
+ * osc on client
+ * setup 0:{fsname}-OSTxxxx-osc 1:lustre-OST0000_UUID 2:NID
+ *
+ **/
+static int osc_on_mdt(char *obdname)
+{
+ char *ptr;
+
+ ptr = strrchr(obdname, '-');
+ if (ptr == NULL)
+ return 0;
+
+ if (strncmp(ptr + 1, "MDT", 3) == 0)
+ return 1;
+
+ return 0;
+}
+
+/* Configure an RPC client OBD device.
+ *
+ * lcfg parameters:
+ * 1 - client UUID
+ * 2 - server UUID
+ * 3 - inactive-on-startup
+ */
+int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
+{
+ struct client_obd *cli = &obddev->u.cli;
+ struct obd_import *imp;
+ struct obd_uuid server_uuid;
+ int rq_portal, rp_portal, connect_op;
+ char *name = obddev->obd_type->typ_name;
+ ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN;
+ int rc;
+ char *cli_name = lustre_cfg_buf(lcfg, 0);
+
+ /* In a more perfect world, we would hang a ptlrpc_client off of
+ * obd_type and just use the values from there. */
+ if (!strcmp(name, LUSTRE_OSC_NAME) ||
+ (!(strcmp(name, LUSTRE_OSP_NAME)) &&
+ (is_osp_on_mdt(cli_name) &&
+ strstr(lustre_cfg_buf(lcfg, 1), "OST") != NULL))) {
+ /* OSC or OSP_on_MDT for OSTs */
+ rq_portal = OST_REQUEST_PORTAL;
+ rp_portal = OSC_REPLY_PORTAL;
+ connect_op = OST_CONNECT;
+ cli->cl_sp_me = LUSTRE_SP_CLI;
+ cli->cl_sp_to = LUSTRE_SP_OST;
+ ns_type = LDLM_NS_TYPE_OSC;
+ } else if (!strcmp(name, LUSTRE_MDC_NAME) ||
+ !strcmp(name, LUSTRE_LWP_NAME) ||
+ (!strcmp(name, LUSTRE_OSP_NAME) &&
+ (is_osp_on_mdt(cli_name) &&
+ strstr(lustre_cfg_buf(lcfg, 1), "OST") == NULL))) {
+ /* MDC or OSP_on_MDT for other MDTs */
+ rq_portal = MDS_REQUEST_PORTAL;
+ rp_portal = MDC_REPLY_PORTAL;
+ connect_op = MDS_CONNECT;
+ cli->cl_sp_me = LUSTRE_SP_CLI;
+ cli->cl_sp_to = LUSTRE_SP_MDT;
+ ns_type = LDLM_NS_TYPE_MDC;
+ } else if (!strcmp(name, LUSTRE_MGC_NAME)) {
+ rq_portal = MGS_REQUEST_PORTAL;
+ rp_portal = MGC_REPLY_PORTAL;
+ connect_op = MGS_CONNECT;
+ cli->cl_sp_me = LUSTRE_SP_MGC;
+ cli->cl_sp_to = LUSTRE_SP_MGS;
+ cli->cl_flvr_mgc.sf_rpc = SPTLRPC_FLVR_INVALID;
+ ns_type = LDLM_NS_TYPE_MGC;
+ } else {
+ CERROR("unknown client OBD type \"%s\", can't setup\n",
+ name);
+ return -EINVAL;
+ }
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
+ CERROR("requires a TARGET UUID\n");
+ return -EINVAL;
+ }
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) > 37) {
+ CERROR("client UUID must be less than 38 characters\n");
+ return -EINVAL;
+ }
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 2) < 1) {
+ CERROR("setup requires a SERVER UUID\n");
+ return -EINVAL;
+ }
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 2) > 37) {
+ CERROR("target UUID must be less than 38 characters\n");
+ return -EINVAL;
+ }
+
+ init_rwsem(&cli->cl_sem);
+ sema_init(&cli->cl_mgc_sem, 1);
+ cli->cl_conn_count = 0;
+ memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
+ min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
+ sizeof(server_uuid)));
+
+ cli->cl_dirty = 0;
+ cli->cl_avail_grant = 0;
+ /* FIXME: Should limit this for the sum of all cl_dirty_max. */
+ cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
+ if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
+ cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
+ INIT_LIST_HEAD(&cli->cl_cache_waiters);
+ INIT_LIST_HEAD(&cli->cl_loi_ready_list);
+ INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
+ INIT_LIST_HEAD(&cli->cl_loi_write_list);
+ INIT_LIST_HEAD(&cli->cl_loi_read_list);
+ client_obd_list_lock_init(&cli->cl_loi_list_lock);
+ atomic_set(&cli->cl_pending_w_pages, 0);
+ atomic_set(&cli->cl_pending_r_pages, 0);
+ cli->cl_r_in_flight = 0;
+ cli->cl_w_in_flight = 0;
+
+ spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
+ spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
+ spin_lock_init(&cli->cl_read_page_hist.oh_lock);
+ spin_lock_init(&cli->cl_write_page_hist.oh_lock);
+ spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
+ spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
+
+ /* lru for osc. */
+ INIT_LIST_HEAD(&cli->cl_lru_osc);
+ atomic_set(&cli->cl_lru_shrinkers, 0);
+ atomic_set(&cli->cl_lru_busy, 0);
+ atomic_set(&cli->cl_lru_in_list, 0);
+ INIT_LIST_HEAD(&cli->cl_lru_list);
+ client_obd_list_lock_init(&cli->cl_lru_list_lock);
+
+ init_waitqueue_head(&cli->cl_destroy_waitq);
+ atomic_set(&cli->cl_destroy_in_flight, 0);
+ /* Turn on checksumming by default. */
+ cli->cl_checksum = 1;
+ /*
+ * The supported checksum types will be worked out at connect time
+ * Set cl_chksum* to CRC32 for now to avoid returning screwed info
+ * through procfs.
+ */
+ cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
+ atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
+
+ /* This value may be reduced at connect time in
+ * ptlrpc_connect_interpret() . We initialize it to only
+ * 1MB until we know what the performance looks like.
+ * In the future this should likely be increased. LU-1431 */
+ cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
+ LNET_MTU >> PAGE_CACHE_SHIFT);
+
+ if (!strcmp(name, LUSTRE_MDC_NAME)) {
+ cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
+ } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+ cli->cl_max_rpcs_in_flight = 2;
+ } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+ cli->cl_max_rpcs_in_flight = 3;
+ } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+ cli->cl_max_rpcs_in_flight = 4;
+ } else {
+ if (osc_on_mdt(obddev->obd_name))
+ cli->cl_max_rpcs_in_flight = MDS_OSC_MAX_RIF_DEFAULT;
+ else
+ cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
+ }
+ rc = ldlm_get_ref();
+ if (rc) {
+ CERROR("ldlm_get_ref failed: %d\n", rc);
+ GOTO(err, rc);
+ }
+
+ ptlrpc_init_client(rq_portal, rp_portal, name,
+ &obddev->obd_ldlm_client);
+
+ imp = class_new_import(obddev);
+ if (imp == NULL)
+ GOTO(err_ldlm, rc = -ENOENT);
+ imp->imp_client = &obddev->obd_ldlm_client;
+ imp->imp_connect_op = connect_op;
+ memcpy(cli->cl_target_uuid.uuid, lustre_cfg_buf(lcfg, 1),
+ LUSTRE_CFG_BUFLEN(lcfg, 1));
+ class_import_put(imp);
+
+ rc = client_import_add_conn(imp, &server_uuid, 1);
+ if (rc) {
+ CERROR("can't add initial connection\n");
+ GOTO(err_import, rc);
+ }
+
+ cli->cl_import = imp;
+ /* cli->cl_max_mds_{easize,cookiesize} updated by mdc_init_ea_size() */
+ cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3);
+ cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
+ if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
+ CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
+ name, obddev->obd_name,
+ cli->cl_target_uuid.uuid);
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 1;
+ spin_unlock(&imp->imp_lock);
+ }
+ }
+
+ obddev->obd_namespace = ldlm_namespace_new(obddev, obddev->obd_name,
+ LDLM_NAMESPACE_CLIENT,
+ LDLM_NAMESPACE_GREEDY,
+ ns_type);
+ if (obddev->obd_namespace == NULL) {
+ CERROR("Unable to create client namespace - %s\n",
+ obddev->obd_name);
+ GOTO(err_import, rc = -ENOMEM);
+ }
+
+ cli->cl_qchk_stat = CL_NOT_QUOTACHECKED;
+
+ return rc;
+
+err_import:
+ class_destroy_import(imp);
+err_ldlm:
+ ldlm_put_ref();
+err:
+ return rc;
+
+}
+EXPORT_SYMBOL(client_obd_setup);
+
+int client_obd_cleanup(struct obd_device *obddev)
+{
+ ldlm_namespace_free_post(obddev->obd_namespace);
+ obddev->obd_namespace = NULL;
+
+ LASSERT(obddev->u.cli.cl_import == NULL);
+
+ ldlm_put_ref();
+ return 0;
+}
+EXPORT_SYMBOL(client_obd_cleanup);
+
+/* ->o_connect() method for client side (OSC and MDC and MGC) */
+int client_connect_import(const struct lu_env *env,
+ struct obd_export **exp,
+ struct obd_device *obd, struct obd_uuid *cluuid,
+ struct obd_connect_data *data, void *localdata)
+{
+ struct client_obd *cli = &obd->u.cli;
+ struct obd_import *imp = cli->cl_import;
+ struct obd_connect_data *ocd;
+ struct lustre_handle conn = { 0 };
+ int rc;
+
+ *exp = NULL;
+ down_write(&cli->cl_sem);
+ if (cli->cl_conn_count > 0 )
+ GOTO(out_sem, rc = -EALREADY);
+
+ rc = class_connect(&conn, obd, cluuid);
+ if (rc)
+ GOTO(out_sem, rc);
+
+ cli->cl_conn_count++;
+ *exp = class_conn2export(&conn);
+
+ LASSERT(obd->obd_namespace);
+
+ imp->imp_dlm_handle = conn;
+ rc = ptlrpc_init_import(imp);
+ if (rc != 0)
+ GOTO(out_ldlm, rc);
+
+ ocd = &imp->imp_connect_data;
+ if (data) {
+ *ocd = *data;
+ imp->imp_connect_flags_orig = data->ocd_connect_flags;
+ }
+
+ rc = ptlrpc_connect_import(imp);
+ if (rc != 0) {
+ LASSERT (imp->imp_state == LUSTRE_IMP_DISCON);
+ GOTO(out_ldlm, rc);
+ }
+ LASSERT((*exp)->exp_connection);
+
+ if (data) {
+ LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
+ ocd->ocd_connect_flags, "old "LPX64", new "LPX64"\n",
+ data->ocd_connect_flags, ocd->ocd_connect_flags);
+ data->ocd_connect_flags = ocd->ocd_connect_flags;
+ }
+
+ ptlrpc_pinger_add_import(imp);
+
+ if (rc) {
+out_ldlm:
+ cli->cl_conn_count--;
+ class_disconnect(*exp);
+ *exp = NULL;
+ }
+out_sem:
+ up_write(&cli->cl_sem);
+
+ return rc;
+}
+EXPORT_SYMBOL(client_connect_import);
+
+int client_disconnect_export(struct obd_export *exp)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct client_obd *cli;
+ struct obd_import *imp;
+ int rc = 0, err;
+
+ if (!obd) {
+ CERROR("invalid export for disconnect: exp %p cookie "LPX64"\n",
+ exp, exp ? exp->exp_handle.h_cookie : -1);
+ return -EINVAL;
+ }
+
+ cli = &obd->u.cli;
+ imp = cli->cl_import;
+
+ down_write(&cli->cl_sem);
+ CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
+ cli->cl_conn_count);
+
+ if (!cli->cl_conn_count) {
+ CERROR("disconnecting disconnected device (%s)\n",
+ obd->obd_name);
+ GOTO(out_disconnect, rc = -EINVAL);
+ }
+
+ cli->cl_conn_count--;
+ if (cli->cl_conn_count)
+ GOTO(out_disconnect, rc = 0);
+
+ /* Mark import deactivated now, so we don't try to reconnect if any
+ * of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't
+ * fully deactivate the import, or that would drop all requests. */
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 1;
+ spin_unlock(&imp->imp_lock);
+
+ /* Some non-replayable imports (MDS's OSCs) are pinged, so just
+ * delete it regardless. (It's safe to delete an import that was
+ * never added.) */
+ (void)ptlrpc_pinger_del_import(imp);
+
+ if (obd->obd_namespace != NULL) {
+ /* obd_force == local only */
+ ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
+ obd->obd_force ? LCF_LOCAL : 0, NULL);
+ ldlm_namespace_free_prior(obd->obd_namespace, imp, obd->obd_force);
+ }
+
+ /* There's no need to hold sem while disconnecting an import,
+ * and it may actually cause deadlock in GSS. */
+ up_write(&cli->cl_sem);
+ rc = ptlrpc_disconnect_import(imp, 0);
+ down_write(&cli->cl_sem);
+
+ ptlrpc_invalidate_import(imp);
+
+out_disconnect:
+ /* Use server style - class_disconnect should be always called for
+ * o_disconnect. */
+ err = class_disconnect(exp);
+ if (!rc && err)
+ rc = err;
+
+ up_write(&cli->cl_sem);
+
+ return rc;
+}
+EXPORT_SYMBOL(client_disconnect_export);
+
+
+/**
+ * Packs current SLV and Limit into \a req.
+ */
+int target_pack_pool_reply(struct ptlrpc_request *req)
+{
+ struct obd_device *obd;
+
+ /* Check that we still have all structures alive as this may
+ * be some late RPC at shutdown time. */
+ if (unlikely(!req->rq_export || !req->rq_export->exp_obd ||
+ !exp_connect_lru_resize(req->rq_export))) {
+ lustre_msg_set_slv(req->rq_repmsg, 0);
+ lustre_msg_set_limit(req->rq_repmsg, 0);
+ return 0;
+ }
+
+ /* OBD is alive here as export is alive, which we checked above. */
+ obd = req->rq_export->exp_obd;
+
+ read_lock(&obd->obd_pool_lock);
+ lustre_msg_set_slv(req->rq_repmsg, obd->obd_pool_slv);
+ lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
+ read_unlock(&obd->obd_pool_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(target_pack_pool_reply);
+
+int target_send_reply_msg(struct ptlrpc_request *req, int rc, int fail_id)
+{
+ if (OBD_FAIL_CHECK_ORSET(fail_id & ~OBD_FAIL_ONCE, OBD_FAIL_ONCE)) {
+ DEBUG_REQ(D_ERROR, req, "dropping reply");
+ return (-ECOMM);
+ }
+
+ if (unlikely(rc)) {
+ DEBUG_REQ(D_NET, req, "processing error (%d)", rc);
+ req->rq_status = rc;
+ return (ptlrpc_send_error(req, 1));
+ } else {
+ DEBUG_REQ(D_NET, req, "sending reply");
+ }
+
+ return (ptlrpc_send_reply(req, PTLRPC_REPLY_MAYBE_DIFFICULT));
+}
+
+void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
+{
+ struct ptlrpc_service_part *svcpt;
+ int netrc;
+ struct ptlrpc_reply_state *rs;
+ struct obd_export *exp;
+
+ if (req->rq_no_reply) {
+ return;
+ }
+
+ svcpt = req->rq_rqbd->rqbd_svcpt;
+ rs = req->rq_reply_state;
+ if (rs == NULL || !rs->rs_difficult) {
+ /* no notifiers */
+ target_send_reply_msg (req, rc, fail_id);
+ return;
+ }
+
+ /* must be an export if locks saved */
+ LASSERT (req->rq_export != NULL);
+ /* req/reply consistent */
+ LASSERT(rs->rs_svcpt == svcpt);
+
+ /* "fresh" reply */
+ LASSERT (!rs->rs_scheduled);
+ LASSERT (!rs->rs_scheduled_ever);
+ LASSERT (!rs->rs_handled);
+ LASSERT (!rs->rs_on_net);
+ LASSERT (rs->rs_export == NULL);
+ LASSERT (list_empty(&rs->rs_obd_list));
+ LASSERT (list_empty(&rs->rs_exp_list));
+
+ exp = class_export_get (req->rq_export);
+
+ /* disable reply scheduling while I'm setting up */
+ rs->rs_scheduled = 1;
+ rs->rs_on_net = 1;
+ rs->rs_xid = req->rq_xid;
+ rs->rs_transno = req->rq_transno;
+ rs->rs_export = exp;
+ rs->rs_opc = lustre_msg_get_opc(req->rq_reqmsg);
+
+ spin_lock(&exp->exp_uncommitted_replies_lock);
+ CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
+ rs->rs_transno, exp->exp_last_committed);
+ if (rs->rs_transno > exp->exp_last_committed) {
+ /* not committed already */
+ list_add_tail(&rs->rs_obd_list,
+ &exp->exp_uncommitted_replies);
+ }
+ spin_unlock(&exp->exp_uncommitted_replies_lock);
+
+ spin_lock(&exp->exp_lock);
+ list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
+ spin_unlock(&exp->exp_lock);
+
+ netrc = target_send_reply_msg(req, rc, fail_id);
+
+ spin_lock(&svcpt->scp_rep_lock);
+
+ atomic_inc(&svcpt->scp_nreps_difficult);
+
+ if (netrc != 0) {
+ /* error sending: reply is off the net. Also we need +1
+ * reply ref until ptlrpc_handle_rs() is done
+ * with the reply state (if the send was successful, there
+ * would have been +1 ref for the net, which
+ * reply_out_callback leaves alone) */
+ rs->rs_on_net = 0;
+ ptlrpc_rs_addref(rs);
+ }
+
+ spin_lock(&rs->rs_lock);
+ if (rs->rs_transno <= exp->exp_last_committed ||
+ (!rs->rs_on_net && !rs->rs_no_ack) ||
+ list_empty(&rs->rs_exp_list) || /* completed already */
+ list_empty(&rs->rs_obd_list)) {
+ CDEBUG(D_HA, "Schedule reply immediately\n");
+ ptlrpc_dispatch_difficult_reply(rs);
+ } else {
+ list_add(&rs->rs_list, &svcpt->scp_rep_active);
+ rs->rs_scheduled = 0; /* allow notifier to schedule */
+ }
+ spin_unlock(&rs->rs_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
+}
+EXPORT_SYMBOL(target_send_reply);
+
+ldlm_mode_t lck_compat_array[] = {
+ [LCK_EX] = LCK_COMPAT_EX,
+ [LCK_PW] = LCK_COMPAT_PW,
+ [LCK_PR] = LCK_COMPAT_PR,
+ [LCK_CW] = LCK_COMPAT_CW,
+ [LCK_CR] = LCK_COMPAT_CR,
+ [LCK_NL] = LCK_COMPAT_NL,
+ [LCK_GROUP] = LCK_COMPAT_GROUP,
+ [LCK_COS] = LCK_COMPAT_COS,
+};
+
+/**
+ * Rather arbitrary mapping from LDLM error codes to errno values. This should
+ * not escape to the user level.
+ */
+int ldlm_error2errno(ldlm_error_t error)
+{
+ int result;
+
+ switch (error) {
+ case ELDLM_OK:
+ result = 0;
+ break;
+ case ELDLM_LOCK_CHANGED:
+ result = -ESTALE;
+ break;
+ case ELDLM_LOCK_ABORTED:
+ result = -ENAVAIL;
+ break;
+ case ELDLM_LOCK_REPLACED:
+ result = -ESRCH;
+ break;
+ case ELDLM_NO_LOCK_DATA:
+ result = -ENOENT;
+ break;
+ case ELDLM_NAMESPACE_EXISTS:
+ result = -EEXIST;
+ break;
+ case ELDLM_BAD_NAMESPACE:
+ result = -EBADF;
+ break;
+ default:
+ if (((int)error) < 0) /* cast to signed type */
+ result = error; /* as ldlm_error_t can be unsigned */
+ else {
+ CERROR("Invalid DLM result code: %d\n", error);
+ result = -EPROTO;
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(ldlm_error2errno);
+
+/**
+ * Dual to ldlm_error2errno(): maps errno values back to ldlm_error_t.
+ */
+ldlm_error_t ldlm_errno2error(int err_no)
+{
+ int error;
+
+ switch (err_no) {
+ case 0:
+ error = ELDLM_OK;
+ break;
+ case -ESTALE:
+ error = ELDLM_LOCK_CHANGED;
+ break;
+ case -ENAVAIL:
+ error = ELDLM_LOCK_ABORTED;
+ break;
+ case -ESRCH:
+ error = ELDLM_LOCK_REPLACED;
+ break;
+ case -ENOENT:
+ error = ELDLM_NO_LOCK_DATA;
+ break;
+ case -EEXIST:
+ error = ELDLM_NAMESPACE_EXISTS;
+ break;
+ case -EBADF:
+ error = ELDLM_BAD_NAMESPACE;
+ break;
+ default:
+ error = err_no;
+ }
+ return error;
+}
+EXPORT_SYMBOL(ldlm_errno2error);
+
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+void ldlm_dump_export_locks(struct obd_export *exp)
+{
+ spin_lock(&exp->exp_locks_list_guard);
+ if (!list_empty(&exp->exp_locks_list)) {
+ struct ldlm_lock *lock;
+
+ CERROR("dumping locks for export %p,"
+ "ignore if the unmount doesn't hang\n", exp);
+ list_for_each_entry(lock, &exp->exp_locks_list,
+ l_exp_refs_link)
+ LDLM_ERROR(lock, "lock:");
+ }
+ spin_unlock(&exp->exp_locks_list_guard);
+}
+#endif
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
new file mode 100644
index 0000000..6133b3f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -0,0 +1,2363 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_lock.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+
+# include <linux/libcfs/libcfs.h>
+# include <linux/lustre_intent.h>
+
+#include <obd_class.h>
+#include "ldlm_internal.h"
+
+/* lock types */
+char *ldlm_lockname[] = {
+ [0] = "--",
+ [LCK_EX] = "EX",
+ [LCK_PW] = "PW",
+ [LCK_PR] = "PR",
+ [LCK_CW] = "CW",
+ [LCK_CR] = "CR",
+ [LCK_NL] = "NL",
+ [LCK_GROUP] = "GROUP",
+ [LCK_COS] = "COS",
+};
+EXPORT_SYMBOL(ldlm_lockname);
+
+char *ldlm_typename[] = {
+ [LDLM_PLAIN] = "PLN",
+ [LDLM_EXTENT] = "EXT",
+ [LDLM_FLOCK] = "FLK",
+ [LDLM_IBITS] = "IBT",
+};
+EXPORT_SYMBOL(ldlm_typename);
+
+static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = {
+ [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
+ [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
+ [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire18_to_local,
+ [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
+};
+
+static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = {
+ [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
+ [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
+ [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire21_to_local,
+ [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
+};
+
+static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
+ [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_local_to_wire,
+ [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire,
+ [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_local_to_wire,
+ [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_local_to_wire,
+};
+
+/**
+ * Converts lock policy from local format to on the wire lock_desc format
+ */
+void ldlm_convert_policy_to_wire(ldlm_type_t type,
+ const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy)
+{
+ ldlm_policy_local_to_wire_t convert;
+
+ convert = ldlm_policy_local_to_wire[type - LDLM_MIN_TYPE];
+
+ convert(lpolicy, wpolicy);
+}
+
+/**
+ * Converts lock policy from on the wire lock_desc format to local format
+ */
+void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
+ const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
+{
+ ldlm_policy_wire_to_local_t convert;
+ int new_client;
+
+ /** some badness for 2.0.0 clients, but 2.0.0 isn't supported */
+ new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0;
+ if (new_client)
+ convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE];
+ else
+ convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE];
+
+ convert(wpolicy, lpolicy);
+}
+
+char *ldlm_it2str(int it)
+{
+ switch (it) {
+ case IT_OPEN:
+ return "open";
+ case IT_CREAT:
+ return "creat";
+ case (IT_OPEN | IT_CREAT):
+ return "open|creat";
+ case IT_READDIR:
+ return "readdir";
+ case IT_GETATTR:
+ return "getattr";
+ case IT_LOOKUP:
+ return "lookup";
+ case IT_UNLINK:
+ return "unlink";
+ case IT_GETXATTR:
+ return "getxattr";
+ case IT_LAYOUT:
+ return "layout";
+ default:
+ CERROR("Unknown intent %d\n", it);
+ return "UNKNOWN";
+ }
+}
+EXPORT_SYMBOL(ldlm_it2str);
+
+extern struct kmem_cache *ldlm_lock_slab;
+
+
+void ldlm_register_intent(struct ldlm_namespace *ns, ldlm_res_policy arg)
+{
+ ns->ns_policy = arg;
+}
+EXPORT_SYMBOL(ldlm_register_intent);
+
+/*
+ * REFCOUNTED LOCK OBJECTS
+ */
+
+
+/**
+ * Get a reference on a lock.
+ *
+ * Lock refcounts, during creation:
+ * - one special one for allocation, dec'd only once in destroy
+ * - one for being a lock that's in-use
+ * - one for the addref associated with a new lock
+ */
+struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock)
+{
+ atomic_inc(&lock->l_refc);
+ return lock;
+}
+EXPORT_SYMBOL(ldlm_lock_get);
+
+/**
+ * Release lock reference.
+ *
+ * Also frees the lock if it was last reference.
+ */
+void ldlm_lock_put(struct ldlm_lock *lock)
+{
+ LASSERT(lock->l_resource != LP_POISON);
+ LASSERT(atomic_read(&lock->l_refc) > 0);
+ if (atomic_dec_and_test(&lock->l_refc)) {
+ struct ldlm_resource *res;
+
+ LDLM_DEBUG(lock,
+ "final lock_put on destroyed lock, freeing it.");
+
+ res = lock->l_resource;
+ LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
+ LASSERT(list_empty(&lock->l_res_link));
+ LASSERT(list_empty(&lock->l_pending_chain));
+
+ lprocfs_counter_decr(ldlm_res_to_ns(res)->ns_stats,
+ LDLM_NSS_LOCKS);
+ lu_ref_del(&res->lr_reference, "lock", lock);
+ ldlm_resource_putref(res);
+ lock->l_resource = NULL;
+ if (lock->l_export) {
+ class_export_lock_put(lock->l_export, lock);
+ lock->l_export = NULL;
+ }
+
+ if (lock->l_lvb_data != NULL)
+ OBD_FREE(lock->l_lvb_data, lock->l_lvb_len);
+
+ ldlm_interval_free(ldlm_interval_detach(lock));
+ lu_ref_fini(&lock->l_reference);
+ OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle);
+ }
+}
+EXPORT_SYMBOL(ldlm_lock_put);
+
+/**
+ * Removes LDLM lock \a lock from LRU. Assumes LRU is already locked.
+ */
+int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
+{
+ int rc = 0;
+ if (!list_empty(&lock->l_lru)) {
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
+ LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
+ list_del_init(&lock->l_lru);
+ if (lock->l_flags & LDLM_FL_SKIPPED)
+ lock->l_flags &= ~LDLM_FL_SKIPPED;
+ LASSERT(ns->ns_nr_unused > 0);
+ ns->ns_nr_unused--;
+ rc = 1;
+ }
+ return rc;
+}
+
+/**
+ * Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
+ */
+int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
+{
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+ int rc;
+
+ if (lock->l_flags & LDLM_FL_NS_SRV) {
+ LASSERT(list_empty(&lock->l_lru));
+ return 0;
+ }
+
+ spin_lock(&ns->ns_lock);
+ rc = ldlm_lock_remove_from_lru_nolock(lock);
+ spin_unlock(&ns->ns_lock);
+ return rc;
+}
+
+/**
+ * Adds LDLM lock \a lock to namespace LRU. Assumes LRU is already locked.
+ */
+void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
+{
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
+ lock->l_last_used = cfs_time_current();
+ LASSERT(list_empty(&lock->l_lru));
+ LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
+ list_add_tail(&lock->l_lru, &ns->ns_unused_list);
+ LASSERT(ns->ns_nr_unused >= 0);
+ ns->ns_nr_unused++;
+}
+
+/**
+ * Adds LDLM lock \a lock to namespace LRU. Obtains necessary LRU locks
+ * first.
+ */
+void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
+{
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
+ spin_lock(&ns->ns_lock);
+ ldlm_lock_add_to_lru_nolock(lock);
+ spin_unlock(&ns->ns_lock);
+}
+
+/**
+ * Moves LDLM lock \a lock that is already in namespace LRU to the tail of
+ * the LRU. Performs necessary LRU locking
+ */
+void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
+{
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
+ if (lock->l_flags & LDLM_FL_NS_SRV) {
+ LASSERT(list_empty(&lock->l_lru));
+ return;
+ }
+
+ spin_lock(&ns->ns_lock);
+ if (!list_empty(&lock->l_lru)) {
+ ldlm_lock_remove_from_lru_nolock(lock);
+ ldlm_lock_add_to_lru_nolock(lock);
+ }
+ spin_unlock(&ns->ns_lock);
+}
+
+/**
+ * Helper to destroy a locked lock.
+ *
+ * Used by ldlm_lock_destroy and ldlm_lock_destroy_nolock
+ * Must be called with l_lock and lr_lock held.
+ *
+ * Does not actually free the lock data, but rather marks the lock as
+ * destroyed by setting l_destroyed field in the lock to 1. Destroys a
+ * handle->lock association too, so that the lock can no longer be found
+ * and removes the lock from LRU list. Actual lock freeing occurs when
+ * last lock reference goes away.
+ *
+ * Original comment (of some historical value):
+ * This used to have a 'strict' flag, which recovery would use to mark an
+ * in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
+ * shall explain why it's gone: with the new hash table scheme, once you call
+ * ldlm_lock_destroy, you can never drop your final references on this lock.
+ * Because it's not in the hash table anymore. -phil
+ */
+int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
+{
+ if (lock->l_readers || lock->l_writers) {
+ LDLM_ERROR(lock, "lock still has references");
+ LBUG();
+ }
+
+ if (!list_empty(&lock->l_res_link)) {
+ LDLM_ERROR(lock, "lock still on resource");
+ LBUG();
+ }
+
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
+ LASSERT(list_empty(&lock->l_lru));
+ return 0;
+ }
+ lock->l_flags |= LDLM_FL_DESTROYED;
+
+ if (lock->l_export && lock->l_export->exp_lock_hash) {
+ /* NB: it's safe to call cfs_hash_del() even lock isn't
+ * in exp_lock_hash. */
+ /* In the function below, .hs_keycmp resolves to
+ * ldlm_export_lock_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ cfs_hash_del(lock->l_export->exp_lock_hash,
+ &lock->l_remote_handle, &lock->l_exp_hash);
+ }
+
+ ldlm_lock_remove_from_lru(lock);
+ class_handle_unhash(&lock->l_handle);
+
+#if 0
+ /* Wake anyone waiting for this lock */
+ /* FIXME: I should probably add yet another flag, instead of using
+ * l_export to only call this on clients */
+ if (lock->l_export)
+ class_export_put(lock->l_export);
+ lock->l_export = NULL;
+ if (lock->l_export && lock->l_completion_ast)
+ lock->l_completion_ast(lock, 0);
+#endif
+ return 1;
+}
+
+/**
+ * Destroys a LDLM lock \a lock. Performs necessary locking first.
+ */
+void ldlm_lock_destroy(struct ldlm_lock *lock)
+{
+ int first;
+
+ lock_res_and_lock(lock);
+ first = ldlm_lock_destroy_internal(lock);
+ unlock_res_and_lock(lock);
+
+ /* drop reference from hashtable only for first destroy */
+ if (first) {
+ lu_ref_del(&lock->l_reference, "hash", lock);
+ LDLM_LOCK_RELEASE(lock);
+ }
+}
+
+/**
+ * Destroys a LDLM lock \a lock that is already locked.
+ */
+void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
+{
+ int first;
+
+ first = ldlm_lock_destroy_internal(lock);
+ /* drop reference from hashtable only for first destroy */
+ if (first) {
+ lu_ref_del(&lock->l_reference, "hash", lock);
+ LDLM_LOCK_RELEASE(lock);
+ }
+}
+
+/* this is called by portals_handle2object with the handle lock taken */
+static void lock_handle_addref(void *lock)
+{
+ LDLM_LOCK_GET((struct ldlm_lock *)lock);
+}
+
+static void lock_handle_free(void *lock, int size)
+{
+ LASSERT(size == sizeof(struct ldlm_lock));
+ OBD_SLAB_FREE(lock, ldlm_lock_slab, size);
+}
+
+struct portals_handle_ops lock_handle_ops = {
+ .hop_addref = lock_handle_addref,
+ .hop_free = lock_handle_free,
+};
+
+/**
+ *
+ * Allocate and initialize new lock structure.
+ *
+ * usage: pass in a resource on which you have done ldlm_resource_get
+ * new lock will take over the refcount.
+ * returns: lock with refcount 2 - one for current caller and one for remote
+ */
+static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
+{
+ struct ldlm_lock *lock;
+
+ if (resource == NULL)
+ LBUG();
+
+ OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, __GFP_IO);
+ if (lock == NULL)
+ return NULL;
+
+ spin_lock_init(&lock->l_lock);
+ lock->l_resource = resource;
+ lu_ref_add(&resource->lr_reference, "lock", lock);
+
+ atomic_set(&lock->l_refc, 2);
+ INIT_LIST_HEAD(&lock->l_res_link);
+ INIT_LIST_HEAD(&lock->l_lru);
+ INIT_LIST_HEAD(&lock->l_pending_chain);
+ INIT_LIST_HEAD(&lock->l_bl_ast);
+ INIT_LIST_HEAD(&lock->l_cp_ast);
+ INIT_LIST_HEAD(&lock->l_rk_ast);
+ init_waitqueue_head(&lock->l_waitq);
+ lock->l_blocking_lock = NULL;
+ INIT_LIST_HEAD(&lock->l_sl_mode);
+ INIT_LIST_HEAD(&lock->l_sl_policy);
+ INIT_HLIST_NODE(&lock->l_exp_hash);
+ INIT_HLIST_NODE(&lock->l_exp_flock_hash);
+
+ lprocfs_counter_incr(ldlm_res_to_ns(resource)->ns_stats,
+ LDLM_NSS_LOCKS);
+ INIT_LIST_HEAD(&lock->l_handle.h_link);
+ class_handle_hash(&lock->l_handle, &lock_handle_ops);
+
+ lu_ref_init(&lock->l_reference);
+ lu_ref_add(&lock->l_reference, "hash", lock);
+ lock->l_callback_timeout = 0;
+
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+ INIT_LIST_HEAD(&lock->l_exp_refs_link);
+ lock->l_exp_refs_nr = 0;
+ lock->l_exp_refs_target = NULL;
+#endif
+ INIT_LIST_HEAD(&lock->l_exp_list);
+
+ return lock;
+}
+
+/**
+ * Moves LDLM lock \a lock to another resource.
+ * This is used on client when server returns some other lock than requested
+ * (typically as a result of intent operation)
+ */
+int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
+ const struct ldlm_res_id *new_resid)
+{
+ struct ldlm_resource *oldres = lock->l_resource;
+ struct ldlm_resource *newres;
+ int type;
+
+ LASSERT(ns_is_client(ns));
+
+ lock_res_and_lock(lock);
+ if (memcmp(new_resid, &lock->l_resource->lr_name,
+ sizeof(lock->l_resource->lr_name)) == 0) {
+ /* Nothing to do */
+ unlock_res_and_lock(lock);
+ return 0;
+ }
+
+ LASSERT(new_resid->name[0] != 0);
+
+ /* This function assumes that the lock isn't on any lists */
+ LASSERT(list_empty(&lock->l_res_link));
+
+ type = oldres->lr_type;
+ unlock_res_and_lock(lock);
+
+ newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
+ if (newres == NULL)
+ return -ENOMEM;
+
+ lu_ref_add(&newres->lr_reference, "lock", lock);
+ /*
+ * To flip the lock from the old to the new resource, lock, oldres and
+ * newres have to be locked. Resource spin-locks are nested within
+ * lock->l_lock, and are taken in the memory address order to avoid
+ * dead-locks.
+ */
+ spin_lock(&lock->l_lock);
+ oldres = lock->l_resource;
+ if (oldres < newres) {
+ lock_res(oldres);
+ lock_res_nested(newres, LRT_NEW);
+ } else {
+ lock_res(newres);
+ lock_res_nested(oldres, LRT_NEW);
+ }
+ LASSERT(memcmp(new_resid, &oldres->lr_name,
+ sizeof oldres->lr_name) != 0);
+ lock->l_resource = newres;
+ unlock_res(oldres);
+ unlock_res_and_lock(lock);
+
+ /* ...and the flowers are still standing! */
+ lu_ref_del(&oldres->lr_reference, "lock", lock);
+ ldlm_resource_putref(oldres);
+
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_lock_change_resource);
+
+/** \defgroup ldlm_handles LDLM HANDLES
+ * Ways to get hold of locks without any addresses.
+ * @{
+ */
+
+/**
+ * Fills in handle for LDLM lock \a lock into supplied \a lockh
+ * Does not take any references.
+ */
+void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh)
+{
+ lockh->cookie = lock->l_handle.h_cookie;
+}
+EXPORT_SYMBOL(ldlm_lock2handle);
+
+/**
+ * Obtain a lock reference by handle.
+ *
+ * if \a flags: atomically get the lock and set the flags.
+ * Return NULL if flag already set
+ */
+struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
+ __u64 flags)
+{
+ struct ldlm_lock *lock;
+
+ LASSERT(handle);
+
+ lock = class_handle2object(handle->cookie);
+ if (lock == NULL)
+ return NULL;
+
+ /* It's unlikely but possible that someone marked the lock as
+ * destroyed after we did handle2object on it */
+ if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
+ lu_ref_add(&lock->l_reference, "handle", current);
+ return lock;
+ }
+
+ lock_res_and_lock(lock);
+
+ LASSERT(lock->l_resource != NULL);
+
+ lu_ref_add_atomic(&lock->l_reference, "handle", current);
+ if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
+ unlock_res_and_lock(lock);
+ CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
+ LDLM_LOCK_PUT(lock);
+ return NULL;
+ }
+
+ if (flags && (lock->l_flags & flags)) {
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+ return NULL;
+ }
+
+ if (flags)
+ lock->l_flags |= flags;
+
+ unlock_res_and_lock(lock);
+ return lock;
+}
+EXPORT_SYMBOL(__ldlm_handle2lock);
+/** @} ldlm_handles */
+
+/**
+ * Fill in "on the wire" representation for given LDLM lock into supplied
+ * lock descriptor \a desc structure.
+ */
+void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
+{
+ struct obd_export *exp = lock->l_export ?: lock->l_conn_export;
+
+ /* INODEBITS_INTEROP: If the other side does not support
+ * inodebits, reply with a plain lock descriptor. */
+ if ((lock->l_resource->lr_type == LDLM_IBITS) &&
+ (exp && !(exp_connect_flags(exp) & OBD_CONNECT_IBITS))) {
+ /* Make sure all the right bits are set in this lock we
+ are going to pass to client */
+ LASSERTF(lock->l_policy_data.l_inodebits.bits ==
+ (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LAYOUT),
+ "Inappropriate inode lock bits during "
+ "conversion " LPU64 "\n",
+ lock->l_policy_data.l_inodebits.bits);
+
+ ldlm_res2desc(lock->l_resource, &desc->l_resource);
+ desc->l_resource.lr_type = LDLM_PLAIN;
+
+ /* Convert "new" lock mode to something old client can
+ understand */
+ if ((lock->l_req_mode == LCK_CR) ||
+ (lock->l_req_mode == LCK_CW))
+ desc->l_req_mode = LCK_PR;
+ else
+ desc->l_req_mode = lock->l_req_mode;
+ if ((lock->l_granted_mode == LCK_CR) ||
+ (lock->l_granted_mode == LCK_CW)) {
+ desc->l_granted_mode = LCK_PR;
+ } else {
+ /* We never grant PW/EX locks to clients */
+ LASSERT((lock->l_granted_mode != LCK_PW) &&
+ (lock->l_granted_mode != LCK_EX));
+ desc->l_granted_mode = lock->l_granted_mode;
+ }
+
+ /* We do not copy policy here, because there is no
+ policy for plain locks */
+ } else {
+ ldlm_res2desc(lock->l_resource, &desc->l_resource);
+ desc->l_req_mode = lock->l_req_mode;
+ desc->l_granted_mode = lock->l_granted_mode;
+ ldlm_convert_policy_to_wire(lock->l_resource->lr_type,
+ &lock->l_policy_data,
+ &desc->l_policy_data);
+ }
+}
+EXPORT_SYMBOL(ldlm_lock2desc);
+
+/**
+ * Add a lock to list of conflicting locks to send AST to.
+ *
+ * Only add if we have not sent a blocking AST to the lock yet.
+ */
+void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
+ struct list_head *work_list)
+{
+ if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
+ LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
+ lock->l_flags |= LDLM_FL_AST_SENT;
+ /* If the enqueuing client said so, tell the AST recipient to
+ * discard dirty data, rather than writing back. */
+ if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
+ lock->l_flags |= LDLM_FL_DISCARD_DATA;
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, work_list);
+ LDLM_LOCK_GET(lock);
+ LASSERT(lock->l_blocking_lock == NULL);
+ lock->l_blocking_lock = LDLM_LOCK_GET(new);
+ }
+}
+
+/**
+ * Add a lock to list of just granted locks to send completion AST to.
+ */
+void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
+{
+ if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
+ lock->l_flags |= LDLM_FL_CP_REQD;
+ LDLM_DEBUG(lock, "lock granted; sending completion AST.");
+ LASSERT(list_empty(&lock->l_cp_ast));
+ list_add(&lock->l_cp_ast, work_list);
+ LDLM_LOCK_GET(lock);
+ }
+}
+
+/**
+ * Aggregator function to add AST work items into a list. Determines
+ * what sort of an AST work needs to be done and calls the proper
+ * adding function.
+ * Must be called with lr_lock held.
+ */
+void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
+ struct list_head *work_list)
+{
+ check_res_locked(lock->l_resource);
+ if (new)
+ ldlm_add_bl_work_item(lock, new, work_list);
+ else
+ ldlm_add_cp_work_item(lock, work_list);
+}
+
+/**
+ * Add specified reader/writer reference to LDLM lock with handle \a lockh.
+ * r/w reference type is determined by \a mode
+ * Calls ldlm_lock_addref_internal.
+ */
+void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
+{
+ struct ldlm_lock *lock;
+
+ lock = ldlm_handle2lock(lockh);
+ LASSERT(lock != NULL);
+ ldlm_lock_addref_internal(lock, mode);
+ LDLM_LOCK_PUT(lock);
+}
+EXPORT_SYMBOL(ldlm_lock_addref);
+
+/**
+ * Helper function.
+ * Add specified reader/writer reference to LDLM lock \a lock.
+ * r/w reference type is determined by \a mode
+ * Removes lock from LRU if it is there.
+ * Assumes the LDLM lock is already locked.
+ */
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+{
+ ldlm_lock_remove_from_lru(lock);
+ if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
+ lock->l_readers++;
+ lu_ref_add_atomic(&lock->l_reference, "reader", lock);
+ }
+ if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
+ lock->l_writers++;
+ lu_ref_add_atomic(&lock->l_reference, "writer", lock);
+ }
+ LDLM_LOCK_GET(lock);
+ lu_ref_add_atomic(&lock->l_reference, "user", lock);
+ LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
+}
+
+/**
+ * Attempts to add reader/writer reference to a lock with handle \a lockh, and
+ * fails if lock is already LDLM_FL_CBPENDING or destroyed.
+ *
+ * \retval 0 success, lock was addref-ed
+ *
+ * \retval -EAGAIN lock is being canceled.
+ */
+int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
+{
+ struct ldlm_lock *lock;
+ int result;
+
+ result = -EAGAIN;
+ lock = ldlm_handle2lock(lockh);
+ if (lock != NULL) {
+ lock_res_and_lock(lock);
+ if (lock->l_readers != 0 || lock->l_writers != 0 ||
+ !(lock->l_flags & LDLM_FL_CBPENDING)) {
+ ldlm_lock_addref_internal_nolock(lock, mode);
+ result = 0;
+ }
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+ }
+ return result;
+}
+EXPORT_SYMBOL(ldlm_lock_addref_try);
+
+/**
+ * Add specified reader/writer reference to LDLM lock \a lock.
+ * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
+ * Only called for local locks.
+ */
+void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
+{
+ lock_res_and_lock(lock);
+ ldlm_lock_addref_internal_nolock(lock, mode);
+ unlock_res_and_lock(lock);
+}
+
+/**
+ * Removes reader/writer reference for LDLM lock \a lock.
+ * Assumes LDLM lock is already locked.
+ * only called in ldlm_flock_destroy and for local locks.
+ * Does NOT add lock to LRU if no r/w references left to accomodate flock locks
+ * that cannot be placed in LRU.
+ */
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+{
+ LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
+ if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
+ LASSERT(lock->l_readers > 0);
+ lu_ref_del(&lock->l_reference, "reader", lock);
+ lock->l_readers--;
+ }
+ if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP | LCK_COS)) {
+ LASSERT(lock->l_writers > 0);
+ lu_ref_del(&lock->l_reference, "writer", lock);
+ lock->l_writers--;
+ }
+
+ lu_ref_del(&lock->l_reference, "user", lock);
+ LDLM_LOCK_RELEASE(lock); /* matches the LDLM_LOCK_GET() in addref */
+}
+
+/**
+ * Removes reader/writer reference for LDLM lock \a lock.
+ * Locks LDLM lock first.
+ * If the lock is determined to be client lock on a client and r/w refcount
+ * drops to zero and the lock is not blocked, the lock is added to LRU lock
+ * on the namespace.
+ * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
+ */
+void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
+{
+ struct ldlm_namespace *ns;
+
+ lock_res_and_lock(lock);
+
+ ns = ldlm_lock_to_ns(lock);
+
+ ldlm_lock_decref_internal_nolock(lock, mode);
+
+ if (lock->l_flags & LDLM_FL_LOCAL &&
+ !lock->l_readers && !lock->l_writers) {
+ /* If this is a local lock on a server namespace and this was
+ * the last reference, cancel the lock. */
+ CDEBUG(D_INFO, "forcing cancel of local lock\n");
+ lock->l_flags |= LDLM_FL_CBPENDING;
+ }
+
+ if (!lock->l_readers && !lock->l_writers &&
+ (lock->l_flags & LDLM_FL_CBPENDING)) {
+ /* If we received a blocked AST and this was the last reference,
+ * run the callback. */
+ if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
+ CERROR("FL_CBPENDING set on non-local lock--just a "
+ "warning\n");
+
+ LDLM_DEBUG(lock, "final decref done on cbpending lock");
+
+ LDLM_LOCK_GET(lock); /* dropped by bl thread */
+ ldlm_lock_remove_from_lru(lock);
+ unlock_res_and_lock(lock);
+
+ if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
+
+ if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
+ ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
+ ldlm_handle_bl_callback(ns, NULL, lock);
+ } else if (ns_is_client(ns) &&
+ !lock->l_readers && !lock->l_writers &&
+ !(lock->l_flags & LDLM_FL_NO_LRU) &&
+ !(lock->l_flags & LDLM_FL_BL_AST)) {
+
+ LDLM_DEBUG(lock, "add lock into lru list");
+
+ /* If this is a client-side namespace and this was the last
+ * reference, put it on the LRU. */
+ ldlm_lock_add_to_lru(lock);
+ unlock_res_and_lock(lock);
+
+ if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
+
+ /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
+ * are not supported by the server, otherwise, it is done on
+ * enqueue. */
+ if (!exp_connect_cancelset(lock->l_conn_export) &&
+ !ns_connect_lru_resize(ns))
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
+ } else {
+ LDLM_DEBUG(lock, "do not add lock into lru list");
+ unlock_res_and_lock(lock);
+ }
+}
+
+/**
+ * Decrease reader/writer refcount for LDLM lock with handle \a lockh
+ */
+void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
+{
+ struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
+ LASSERTF(lock != NULL, "Non-existing lock: "LPX64"\n", lockh->cookie);
+ ldlm_lock_decref_internal(lock, mode);
+ LDLM_LOCK_PUT(lock);
+}
+EXPORT_SYMBOL(ldlm_lock_decref);
+
+/**
+ * Decrease reader/writer refcount for LDLM lock with handle
+ * \a lockh and mark it for subsequent cancellation once r/w refcount
+ * drops to zero instead of putting into LRU.
+ *
+ * Typical usage is for GROUP locks which we cannot allow to be cached.
+ */
+void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
+{
+ struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
+
+ LASSERT(lock != NULL);
+
+ LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
+ lock_res_and_lock(lock);
+ lock->l_flags |= LDLM_FL_CBPENDING;
+ unlock_res_and_lock(lock);
+ ldlm_lock_decref_internal(lock, mode);
+ LDLM_LOCK_PUT(lock);
+}
+EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);
+
+struct sl_insert_point {
+ struct list_head *res_link;
+ struct list_head *mode_link;
+ struct list_head *policy_link;
+};
+
+/**
+ * Finds a position to insert the new lock into granted lock list.
+ *
+ * Used for locks eligible for skiplist optimization.
+ *
+ * Parameters:
+ * queue [input]: the granted list where search acts on;
+ * req [input]: the lock whose position to be located;
+ * prev [output]: positions within 3 lists to insert @req to
+ * Return Value:
+ * filled @prev
+ * NOTE: called by
+ * - ldlm_grant_lock_with_skiplist
+ */
+static void search_granted_lock(struct list_head *queue,
+ struct ldlm_lock *req,
+ struct sl_insert_point *prev)
+{
+ struct list_head *tmp;
+ struct ldlm_lock *lock, *mode_end, *policy_end;
+
+ list_for_each(tmp, queue) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+
+ mode_end = list_entry(lock->l_sl_mode.prev,
+ struct ldlm_lock, l_sl_mode);
+
+ if (lock->l_req_mode != req->l_req_mode) {
+ /* jump to last lock of mode group */
+ tmp = &mode_end->l_res_link;
+ continue;
+ }
+
+ /* suitable mode group is found */
+ if (lock->l_resource->lr_type == LDLM_PLAIN) {
+ /* insert point is last lock of the mode group */
+ prev->res_link = &mode_end->l_res_link;
+ prev->mode_link = &mode_end->l_sl_mode;
+ prev->policy_link = &req->l_sl_policy;
+ return;
+ } else if (lock->l_resource->lr_type == LDLM_IBITS) {
+ for (;;) {
+ policy_end =
+ list_entry(lock->l_sl_policy.prev,
+ struct ldlm_lock,
+ l_sl_policy);
+
+ if (lock->l_policy_data.l_inodebits.bits ==
+ req->l_policy_data.l_inodebits.bits) {
+ /* insert point is last lock of
+ * the policy group */
+ prev->res_link =
+ &policy_end->l_res_link;
+ prev->mode_link =
+ &policy_end->l_sl_mode;
+ prev->policy_link =
+ &policy_end->l_sl_policy;
+ return;
+ }
+
+ if (policy_end == mode_end)
+ /* done with mode group */
+ break;
+
+ /* go to next policy group within mode group */
+ tmp = policy_end->l_res_link.next;
+ lock = list_entry(tmp, struct ldlm_lock,
+ l_res_link);
+ } /* loop over policy groups within the mode group */
+
+ /* insert point is last lock of the mode group,
+ * new policy group is started */
+ prev->res_link = &mode_end->l_res_link;
+ prev->mode_link = &mode_end->l_sl_mode;
+ prev->policy_link = &req->l_sl_policy;
+ return;
+ } else {
+ LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock");
+ LBUG();
+ }
+ }
+
+ /* insert point is last lock on the queue,
+ * new mode group and new policy group are started */
+ prev->res_link = queue->prev;
+ prev->mode_link = &req->l_sl_mode;
+ prev->policy_link = &req->l_sl_policy;
+ return;
+}
+
+/**
+ * Add a lock into resource granted list after a position described by
+ * \a prev.
+ */
+static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
+ struct sl_insert_point *prev)
+{
+ struct ldlm_resource *res = lock->l_resource;
+
+ check_res_locked(res);
+
+ ldlm_resource_dump(D_INFO, res);
+ LDLM_DEBUG(lock, "About to add lock:");
+
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
+ CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
+ return;
+ }
+
+ LASSERT(list_empty(&lock->l_res_link));
+ LASSERT(list_empty(&lock->l_sl_mode));
+ LASSERT(list_empty(&lock->l_sl_policy));
+
+ /*
+ * lock->link == prev->link means lock is first starting the group.
+ * Don't re-add to itself to suppress kernel warnings.
+ */
+ if (&lock->l_res_link != prev->res_link)
+ list_add(&lock->l_res_link, prev->res_link);
+ if (&lock->l_sl_mode != prev->mode_link)
+ list_add(&lock->l_sl_mode, prev->mode_link);
+ if (&lock->l_sl_policy != prev->policy_link)
+ list_add(&lock->l_sl_policy, prev->policy_link);
+}
+
+/**
+ * Add a lock to granted list on a resource maintaining skiplist
+ * correctness.
+ */
+static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
+{
+ struct sl_insert_point prev;
+
+ LASSERT(lock->l_req_mode == lock->l_granted_mode);
+
+ search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
+ ldlm_granted_list_add_lock(lock, &prev);
+}
+
+/**
+ * Perform lock granting bookkeeping.
+ *
+ * Includes putting the lock into granted list and updating lock mode.
+ * NOTE: called by
+ * - ldlm_lock_enqueue
+ * - ldlm_reprocess_queue
+ * - ldlm_lock_convert
+ *
+ * must be called with lr_lock held
+ */
+void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
+{
+ struct ldlm_resource *res = lock->l_resource;
+
+ check_res_locked(res);
+
+ lock->l_granted_mode = lock->l_req_mode;
+ if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
+ ldlm_grant_lock_with_skiplist(lock);
+ else if (res->lr_type == LDLM_EXTENT)
+ ldlm_extent_add_lock(res, lock);
+ else
+ ldlm_resource_add_lock(res, &res->lr_granted, lock);
+
+ if (lock->l_granted_mode < res->lr_most_restr)
+ res->lr_most_restr = lock->l_granted_mode;
+
+ if (work_list && lock->l_completion_ast != NULL)
+ ldlm_add_ast_work_item(lock, NULL, work_list);
+
+ ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
+}
+
+/**
+ * Search for a lock with given properties in a queue.
+ *
+ * \retval a referenced lock or NULL. See the flag descriptions below, in the
+ * comment above ldlm_lock_match
+ */
+static struct ldlm_lock *search_queue(struct list_head *queue,
+ ldlm_mode_t *mode,
+ ldlm_policy_data_t *policy,
+ struct ldlm_lock *old_lock,
+ __u64 flags, int unref)
+{
+ struct ldlm_lock *lock;
+ struct list_head *tmp;
+
+ list_for_each(tmp, queue) {
+ ldlm_mode_t match;
+
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+
+ if (lock == old_lock)
+ break;
+
+ /* llite sometimes wants to match locks that will be
+ * canceled when their users drop, but we allow it to match
+ * if it passes in CBPENDING and the lock still has users.
+ * this is generally only going to be used by children
+ * whose parents already hold a lock so forward progress
+ * can still happen. */
+ if (lock->l_flags & LDLM_FL_CBPENDING &&
+ !(flags & LDLM_FL_CBPENDING))
+ continue;
+ if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
+ lock->l_readers == 0 && lock->l_writers == 0)
+ continue;
+
+ if (!(lock->l_req_mode & *mode))
+ continue;
+ match = lock->l_req_mode;
+
+ if (lock->l_resource->lr_type == LDLM_EXTENT &&
+ (lock->l_policy_data.l_extent.start >
+ policy->l_extent.start ||
+ lock->l_policy_data.l_extent.end < policy->l_extent.end))
+ continue;
+
+ if (unlikely(match == LCK_GROUP) &&
+ lock->l_resource->lr_type == LDLM_EXTENT &&
+ lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
+ continue;
+
+ /* We match if we have existing lock with same or wider set
+ of bits. */
+ if (lock->l_resource->lr_type == LDLM_IBITS &&
+ ((lock->l_policy_data.l_inodebits.bits &
+ policy->l_inodebits.bits) !=
+ policy->l_inodebits.bits))
+ continue;
+
+ if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
+ continue;
+
+ if ((flags & LDLM_FL_LOCAL_ONLY) &&
+ !(lock->l_flags & LDLM_FL_LOCAL))
+ continue;
+
+ if (flags & LDLM_FL_TEST_LOCK) {
+ LDLM_LOCK_GET(lock);
+ ldlm_lock_touch_in_lru(lock);
+ } else {
+ ldlm_lock_addref_internal_nolock(lock, match);
+ }
+ *mode = match;
+ return lock;
+ }
+
+ return NULL;
+}
+
+void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
+{
+ if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
+ lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
+ wake_up_all(&lock->l_waitq);
+ }
+}
+EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
+
+void ldlm_lock_fail_match(struct ldlm_lock *lock)
+{
+ lock_res_and_lock(lock);
+ ldlm_lock_fail_match_locked(lock);
+ unlock_res_and_lock(lock);
+}
+EXPORT_SYMBOL(ldlm_lock_fail_match);
+
+/**
+ * Mark lock as "matchable" by OST.
+ *
+ * Used to prevent certain races in LOV/OSC where the lock is granted, but LVB
+ * is not yet valid.
+ * Assumes LDLM lock is already locked.
+ */
+void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
+{
+ lock->l_flags |= LDLM_FL_LVB_READY;
+ wake_up_all(&lock->l_waitq);
+}
+EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
+
+/**
+ * Mark lock as "matchable" by OST.
+ * Locks the lock and then \see ldlm_lock_allow_match_locked
+ */
+void ldlm_lock_allow_match(struct ldlm_lock *lock)
+{
+ lock_res_and_lock(lock);
+ ldlm_lock_allow_match_locked(lock);
+ unlock_res_and_lock(lock);
+}
+EXPORT_SYMBOL(ldlm_lock_allow_match);
+
+/**
+ * Attempt to find a lock with specified properties.
+ *
+ * Typically returns a reference to matched lock unless LDLM_FL_TEST_LOCK is
+ * set in \a flags
+ *
+ * Can be called in two ways:
+ *
+ * If 'ns' is NULL, then lockh describes an existing lock that we want to look
+ * for a duplicate of.
+ *
+ * Otherwise, all of the fields must be filled in, to match against.
+ *
+ * If 'flags' contains LDLM_FL_LOCAL_ONLY, then only match local locks on the
+ * server (ie, connh is NULL)
+ * If 'flags' contains LDLM_FL_BLOCK_GRANTED, then only locks on the granted
+ * list will be considered
+ * If 'flags' contains LDLM_FL_CBPENDING, then locks that have been marked
+ * to be canceled can still be matched as long as they still have reader
+ * or writer refernces
+ * If 'flags' contains LDLM_FL_TEST_LOCK, then don't actually reference a lock,
+ * just tell us if we would have matched.
+ *
+ * \retval 1 if it finds an already-existing lock that is compatible; in this
+ * case, lockh is filled in with a addref()ed lock
+ *
+ * We also check security context, and if that fails we simply return 0 (to
+ * keep caller code unchanged), the context failure will be discovered by
+ * caller sometime later.
+ */
+ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
+ const struct ldlm_res_id *res_id, ldlm_type_t type,
+ ldlm_policy_data_t *policy, ldlm_mode_t mode,
+ struct lustre_handle *lockh, int unref)
+{
+ struct ldlm_resource *res;
+ struct ldlm_lock *lock, *old_lock = NULL;
+ int rc = 0;
+
+ if (ns == NULL) {
+ old_lock = ldlm_handle2lock(lockh);
+ LASSERT(old_lock);
+
+ ns = ldlm_lock_to_ns(old_lock);
+ res_id = &old_lock->l_resource->lr_name;
+ type = old_lock->l_resource->lr_type;
+ mode = old_lock->l_req_mode;
+ }
+
+ res = ldlm_resource_get(ns, NULL, res_id, type, 0);
+ if (res == NULL) {
+ LASSERT(old_lock == NULL);
+ return 0;
+ }
+
+ LDLM_RESOURCE_ADDREF(res);
+ lock_res(res);
+
+ lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
+ flags, unref);
+ if (lock != NULL)
+ GOTO(out, rc = 1);
+ if (flags & LDLM_FL_BLOCK_GRANTED)
+ GOTO(out, rc = 0);
+ lock = search_queue(&res->lr_converting, &mode, policy, old_lock,
+ flags, unref);
+ if (lock != NULL)
+ GOTO(out, rc = 1);
+ lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
+ flags, unref);
+ if (lock != NULL)
+ GOTO(out, rc = 1);
+
+ out:
+ unlock_res(res);
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+
+ if (lock) {
+ ldlm_lock2handle(lock, lockh);
+ if ((flags & LDLM_FL_LVB_READY) &&
+ (!(lock->l_flags & LDLM_FL_LVB_READY))) {
+ __u64 wait_flags = LDLM_FL_LVB_READY |
+ LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
+ struct l_wait_info lwi;
+ if (lock->l_completion_ast) {
+ int err = lock->l_completion_ast(lock,
+ LDLM_FL_WAIT_NOREPROC,
+ NULL);
+ if (err) {
+ if (flags & LDLM_FL_TEST_LOCK)
+ LDLM_LOCK_RELEASE(lock);
+ else
+ ldlm_lock_decref_internal(lock,
+ mode);
+ rc = 0;
+ goto out2;
+ }
+ }
+
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(obd_timeout),
+ NULL, LWI_ON_SIGNAL_NOOP, NULL);
+
+ /* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
+ l_wait_event(lock->l_waitq,
+ lock->l_flags & wait_flags,
+ &lwi);
+ if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
+ if (flags & LDLM_FL_TEST_LOCK)
+ LDLM_LOCK_RELEASE(lock);
+ else
+ ldlm_lock_decref_internal(lock, mode);
+ rc = 0;
+ }
+ }
+ }
+ out2:
+ if (rc) {
+ LDLM_DEBUG(lock, "matched ("LPU64" "LPU64")",
+ (type == LDLM_PLAIN || type == LDLM_IBITS) ?
+ res_id->name[2] : policy->l_extent.start,
+ (type == LDLM_PLAIN || type == LDLM_IBITS) ?
+ res_id->name[3] : policy->l_extent.end);
+
+ /* check user's security context */
+ if (lock->l_conn_export &&
+ sptlrpc_import_check_ctx(
+ class_exp2cliimp(lock->l_conn_export))) {
+ if (!(flags & LDLM_FL_TEST_LOCK))
+ ldlm_lock_decref_internal(lock, mode);
+ rc = 0;
+ }
+
+ if (flags & LDLM_FL_TEST_LOCK)
+ LDLM_LOCK_RELEASE(lock);
+
+ } else if (!(flags & LDLM_FL_TEST_LOCK)) {/*less verbose for test-only*/
+ LDLM_DEBUG_NOLOCK("not matched ns %p type %u mode %u res "
+ LPU64"/"LPU64" ("LPU64" "LPU64")", ns,
+ type, mode, res_id->name[0], res_id->name[1],
+ (type == LDLM_PLAIN || type == LDLM_IBITS) ?
+ res_id->name[2] :policy->l_extent.start,
+ (type == LDLM_PLAIN || type == LDLM_IBITS) ?
+ res_id->name[3] : policy->l_extent.end);
+ }
+ if (old_lock)
+ LDLM_LOCK_PUT(old_lock);
+
+ return rc ? mode : 0;
+}
+EXPORT_SYMBOL(ldlm_lock_match);
+
+ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+ __u64 *bits)
+{
+ struct ldlm_lock *lock;
+ ldlm_mode_t mode = 0;
+
+ lock = ldlm_handle2lock(lockh);
+ if (lock != NULL) {
+ lock_res_and_lock(lock);
+ if (lock->l_flags & LDLM_FL_GONE_MASK)
+ GOTO(out, mode);
+
+ if (lock->l_flags & LDLM_FL_CBPENDING &&
+ lock->l_readers == 0 && lock->l_writers == 0)
+ GOTO(out, mode);
+
+ if (bits)
+ *bits = lock->l_policy_data.l_inodebits.bits;
+ mode = lock->l_granted_mode;
+ ldlm_lock_addref_internal_nolock(lock, mode);
+ }
+
+out:
+ if (lock != NULL) {
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+ }
+ return mode;
+}
+EXPORT_SYMBOL(ldlm_revalidate_lock_handle);
+
+/** The caller must guarantee that the buffer is large enough. */
+int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
+ enum req_location loc, void *data, int size)
+{
+ void *lvb;
+
+ LASSERT(data != NULL);
+ LASSERT(size >= 0);
+
+ switch (lock->l_lvb_type) {
+ case LVB_T_OST:
+ if (size == sizeof(struct ost_lvb)) {
+ if (loc == RCL_CLIENT)
+ lvb = req_capsule_client_swab_get(pill,
+ &RMF_DLM_LVB,
+ lustre_swab_ost_lvb);
+ else
+ lvb = req_capsule_server_swab_get(pill,
+ &RMF_DLM_LVB,
+ lustre_swab_ost_lvb);
+ if (unlikely(lvb == NULL)) {
+ LDLM_ERROR(lock, "no LVB");
+ return -EPROTO;
+ }
+
+ memcpy(data, lvb, size);
+ } else if (size == sizeof(struct ost_lvb_v1)) {
+ struct ost_lvb *olvb = data;
+
+ if (loc == RCL_CLIENT)
+ lvb = req_capsule_client_swab_get(pill,
+ &RMF_DLM_LVB,
+ lustre_swab_ost_lvb_v1);
+ else
+ lvb = req_capsule_server_sized_swab_get(pill,
+ &RMF_DLM_LVB, size,
+ lustre_swab_ost_lvb_v1);
+ if (unlikely(lvb == NULL)) {
+ LDLM_ERROR(lock, "no LVB");
+ return -EPROTO;
+ }
+
+ memcpy(data, lvb, size);
+ olvb->lvb_mtime_ns = 0;
+ olvb->lvb_atime_ns = 0;
+ olvb->lvb_ctime_ns = 0;
+ } else {
+ LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
+ size);
+ return -EINVAL;
+ }
+ break;
+ case LVB_T_LQUOTA:
+ if (size == sizeof(struct lquota_lvb)) {
+ if (loc == RCL_CLIENT)
+ lvb = req_capsule_client_swab_get(pill,
+ &RMF_DLM_LVB,
+ lustre_swab_lquota_lvb);
+ else
+ lvb = req_capsule_server_swab_get(pill,
+ &RMF_DLM_LVB,
+ lustre_swab_lquota_lvb);
+ if (unlikely(lvb == NULL)) {
+ LDLM_ERROR(lock, "no LVB");
+ return -EPROTO;
+ }
+
+ memcpy(data, lvb, size);
+ } else {
+ LDLM_ERROR(lock, "Replied unexpected lquota LVB size %d",
+ size);
+ return -EINVAL;
+ }
+ break;
+ case LVB_T_LAYOUT:
+ if (size == 0)
+ break;
+
+ if (loc == RCL_CLIENT)
+ lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
+ else
+ lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
+ if (unlikely(lvb == NULL)) {
+ LDLM_ERROR(lock, "no LVB");
+ return -EPROTO;
+ }
+
+ memcpy(data, lvb, size);
+ break;
+ default:
+ LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type);
+ dump_stack();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Create and fill in new LDLM lock with specified properties.
+ * Returns a referenced lock
+ */
+struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
+ const struct ldlm_res_id *res_id,
+ ldlm_type_t type,
+ ldlm_mode_t mode,
+ const struct ldlm_callback_suite *cbs,
+ void *data, __u32 lvb_len,
+ enum lvb_type lvb_type)
+{
+ struct ldlm_lock *lock;
+ struct ldlm_resource *res;
+
+ res = ldlm_resource_get(ns, NULL, res_id, type, 1);
+ if (res == NULL)
+ return NULL;
+
+ lock = ldlm_lock_new(res);
+
+ if (lock == NULL)
+ return NULL;
+
+ lock->l_req_mode = mode;
+ lock->l_ast_data = data;
+ lock->l_pid = current_pid();
+ if (ns_is_server(ns))
+ lock->l_flags |= LDLM_FL_NS_SRV;
+ if (cbs) {
+ lock->l_blocking_ast = cbs->lcs_blocking;
+ lock->l_completion_ast = cbs->lcs_completion;
+ lock->l_glimpse_ast = cbs->lcs_glimpse;
+ }
+
+ lock->l_tree_node = NULL;
+ /* if this is the extent lock, allocate the interval tree node */
+ if (type == LDLM_EXTENT) {
+ if (ldlm_interval_alloc(lock) == NULL)
+ GOTO(out, 0);
+ }
+
+ if (lvb_len) {
+ lock->l_lvb_len = lvb_len;
+ OBD_ALLOC(lock->l_lvb_data, lvb_len);
+ if (lock->l_lvb_data == NULL)
+ GOTO(out, 0);
+ }
+
+ lock->l_lvb_type = lvb_type;
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
+ GOTO(out, 0);
+
+ return lock;
+
+out:
+ ldlm_lock_destroy(lock);
+ LDLM_LOCK_RELEASE(lock);
+ return NULL;
+}
+
+/**
+ * Enqueue (request) a lock.
+ *
+ * Does not block. As a result of enqueue the lock would be put
+ * into granted or waiting list.
+ *
+ * If namespace has intent policy sent and the lock has LDLM_FL_HAS_INTENT flag
+ * set, skip all the enqueueing and delegate lock processing to intent policy
+ * function.
+ */
+ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
+ struct ldlm_lock **lockp,
+ void *cookie, __u64 *flags)
+{
+ struct ldlm_lock *lock = *lockp;
+ struct ldlm_resource *res = lock->l_resource;
+ int local = ns_is_client(ldlm_res_to_ns(res));
+ ldlm_error_t rc = ELDLM_OK;
+ struct ldlm_interval *node = NULL;
+
+ lock->l_last_activity = cfs_time_current_sec();
+ /* policies are not executed on the client or during replay */
+ if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
+ && !local && ns->ns_policy) {
+ rc = ns->ns_policy(ns, lockp, cookie, lock->l_req_mode, *flags,
+ NULL);
+ if (rc == ELDLM_LOCK_REPLACED) {
+ /* The lock that was returned has already been granted,
+ * and placed into lockp. If it's not the same as the
+ * one we passed in, then destroy the old one and our
+ * work here is done. */
+ if (lock != *lockp) {
+ ldlm_lock_destroy(lock);
+ LDLM_LOCK_RELEASE(lock);
+ }
+ *flags |= LDLM_FL_LOCK_CHANGED;
+ return 0;
+ } else if (rc != ELDLM_OK ||
+ (rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
+ ldlm_lock_destroy(lock);
+ return rc;
+ }
+ }
+
+ /* For a replaying lock, it might be already in granted list. So
+ * unlinking the lock will cause the interval node to be freed, we
+ * have to allocate the interval node early otherwise we can't regrant
+ * this lock in the future. - jay */
+ if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
+
+ lock_res_and_lock(lock);
+ if (local && lock->l_req_mode == lock->l_granted_mode) {
+ /* The server returned a blocked lock, but it was granted
+ * before we got a chance to actually enqueue it. We don't
+ * need to do anything else. */
+ *flags &= ~(LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
+ GOTO(out, ELDLM_OK);
+ }
+
+ ldlm_resource_unlink_lock(lock);
+ if (res->lr_type == LDLM_EXTENT && lock->l_tree_node == NULL) {
+ if (node == NULL) {
+ ldlm_lock_destroy_nolock(lock);
+ GOTO(out, rc = -ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&node->li_group);
+ ldlm_interval_attach(node, lock);
+ node = NULL;
+ }
+
+ /* Some flags from the enqueue want to make it into the AST, via the
+ * lock's l_flags. */
+ lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
+
+ /* This distinction between local lock trees is very important; a client
+ * namespace only has information about locks taken by that client, and
+ * thus doesn't have enough information to decide for itself if it can
+ * be granted (below). In this case, we do exactly what the server
+ * tells us to do, as dictated by the 'flags'.
+ *
+ * We do exactly the same thing during recovery, when the server is
+ * more or less trusting the clients not to lie.
+ *
+ * FIXME (bug 268): Detect obvious lies by checking compatibility in
+ * granted/converting queues. */
+ if (local) {
+ if (*flags & LDLM_FL_BLOCK_CONV)
+ ldlm_resource_add_lock(res, &res->lr_converting, lock);
+ else if (*flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED))
+ ldlm_resource_add_lock(res, &res->lr_waiting, lock);
+ else
+ ldlm_grant_lock(lock, NULL);
+ GOTO(out, ELDLM_OK);
+ } else {
+ CERROR("This is client-side-only module, cannot handle "
+ "LDLM_NAMESPACE_SERVER resource type lock.\n");
+ LBUG();
+ }
+
+out:
+ unlock_res_and_lock(lock);
+ if (node)
+ OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
+ return rc;
+}
+
+
+/**
+ * Process a call to blocking AST callback for a lock in ast_work list
+ */
+static int
+ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
+{
+ struct ldlm_cb_set_arg *arg = opaq;
+ struct ldlm_lock_desc d;
+ int rc;
+ struct ldlm_lock *lock;
+
+ if (list_empty(arg->list))
+ return -ENOENT;
+
+ lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
+
+ /* nobody should touch l_bl_ast */
+ lock_res_and_lock(lock);
+ list_del_init(&lock->l_bl_ast);
+
+ LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+ LASSERT(lock->l_bl_ast_run == 0);
+ LASSERT(lock->l_blocking_lock);
+ lock->l_bl_ast_run++;
+ unlock_res_and_lock(lock);
+
+ ldlm_lock2desc(lock->l_blocking_lock, &d);
+
+ rc = lock->l_blocking_ast(lock, &d, (void *)arg, LDLM_CB_BLOCKING);
+ LDLM_LOCK_RELEASE(lock->l_blocking_lock);
+ lock->l_blocking_lock = NULL;
+ LDLM_LOCK_RELEASE(lock);
+
+ return rc;
+}
+
+/**
+ * Process a call to completion AST callback for a lock in ast_work list
+ */
+static int
+ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
+{
+ struct ldlm_cb_set_arg *arg = opaq;
+ int rc = 0;
+ struct ldlm_lock *lock;
+ ldlm_completion_callback completion_callback;
+
+ if (list_empty(arg->list))
+ return -ENOENT;
+
+ lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
+
+ /* It's possible to receive a completion AST before we've set
+ * the l_completion_ast pointer: either because the AST arrived
+ * before the reply, or simply because there's a small race
+ * window between receiving the reply and finishing the local
+ * enqueue. (bug 842)
+ *
+ * This can't happen with the blocking_ast, however, because we
+ * will never call the local blocking_ast until we drop our
+ * reader/writer reference, which we won't do until we get the
+ * reply and finish enqueueing. */
+
+ /* nobody should touch l_cp_ast */
+ lock_res_and_lock(lock);
+ list_del_init(&lock->l_cp_ast);
+ LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
+ /* save l_completion_ast since it can be changed by
+ * mds_intent_policy(), see bug 14225 */
+ completion_callback = lock->l_completion_ast;
+ lock->l_flags &= ~LDLM_FL_CP_REQD;
+ unlock_res_and_lock(lock);
+
+ if (completion_callback != NULL)
+ rc = completion_callback(lock, 0, (void *)arg);
+ LDLM_LOCK_RELEASE(lock);
+
+ return rc;
+}
+
+/**
+ * Process a call to revocation AST callback for a lock in ast_work list
+ */
+static int
+ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
+{
+ struct ldlm_cb_set_arg *arg = opaq;
+ struct ldlm_lock_desc desc;
+ int rc;
+ struct ldlm_lock *lock;
+
+ if (list_empty(arg->list))
+ return -ENOENT;
+
+ lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
+ list_del_init(&lock->l_rk_ast);
+
+ /* the desc just pretend to exclusive */
+ ldlm_lock2desc(lock, &desc);
+ desc.l_req_mode = LCK_EX;
+ desc.l_granted_mode = 0;
+
+ rc = lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
+ LDLM_LOCK_RELEASE(lock);
+
+ return rc;
+}
+
+/**
+ * Process a call to glimpse AST callback for a lock in ast_work list
+ */
+int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
+{
+ struct ldlm_cb_set_arg *arg = opaq;
+ struct ldlm_glimpse_work *gl_work;
+ struct ldlm_lock *lock;
+ int rc = 0;
+
+ if (list_empty(arg->list))
+ return -ENOENT;
+
+ gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
+ gl_list);
+ list_del_init(&gl_work->gl_list);
+
+ lock = gl_work->gl_lock;
+
+ /* transfer the glimpse descriptor to ldlm_cb_set_arg */
+ arg->gl_desc = gl_work->gl_desc;
+
+ /* invoke the actual glimpse callback */
+ if (lock->l_glimpse_ast(lock, (void*)arg) == 0)
+ rc = 1;
+
+ LDLM_LOCK_RELEASE(lock);
+
+ if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0)
+ OBD_FREE_PTR(gl_work);
+
+ return rc;
+}
+
+/**
+ * Process list of locks in need of ASTs being sent.
+ *
+ * Used on server to send multiple ASTs together instead of sending one by
+ * one.
+ */
+int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
+ ldlm_desc_ast_t ast_type)
+{
+ struct ldlm_cb_set_arg *arg;
+ set_producer_func work_ast_lock;
+ int rc;
+
+ if (list_empty(rpc_list))
+ return 0;
+
+ OBD_ALLOC_PTR(arg);
+ if (arg == NULL)
+ return -ENOMEM;
+
+ atomic_set(&arg->restart, 0);
+ arg->list = rpc_list;
+
+ switch (ast_type) {
+ case LDLM_WORK_BL_AST:
+ arg->type = LDLM_BL_CALLBACK;
+ work_ast_lock = ldlm_work_bl_ast_lock;
+ break;
+ case LDLM_WORK_CP_AST:
+ arg->type = LDLM_CP_CALLBACK;
+ work_ast_lock = ldlm_work_cp_ast_lock;
+ break;
+ case LDLM_WORK_REVOKE_AST:
+ arg->type = LDLM_BL_CALLBACK;
+ work_ast_lock = ldlm_work_revoke_ast_lock;
+ break;
+ case LDLM_WORK_GL_AST:
+ arg->type = LDLM_GL_CALLBACK;
+ work_ast_lock = ldlm_work_gl_ast_lock;
+ break;
+ default:
+ LBUG();
+ }
+
+ /* We create a ptlrpc request set with flow control extension.
+ * This request set will use the work_ast_lock function to produce new
+ * requests and will send a new request each time one completes in order
+ * to keep the number of requests in flight to ns_max_parallel_ast */
+ arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
+ work_ast_lock, arg);
+ if (arg->set == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ ptlrpc_set_wait(arg->set);
+ ptlrpc_set_destroy(arg->set);
+
+ rc = atomic_read(&arg->restart) ? -ERESTART : 0;
+ GOTO(out, rc);
+out:
+ OBD_FREE_PTR(arg);
+ return rc;
+}
+
+static int reprocess_one_queue(struct ldlm_resource *res, void *closure)
+{
+ ldlm_reprocess_all(res);
+ return LDLM_ITER_CONTINUE;
+}
+
+static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *arg)
+{
+ struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+ int rc;
+
+ rc = reprocess_one_queue(res, arg);
+
+ return rc == LDLM_ITER_STOP;
+}
+
+/**
+ * Iterate through all resources on a namespace attempting to grant waiting
+ * locks.
+ */
+void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
+{
+ if (ns != NULL) {
+ cfs_hash_for_each_nolock(ns->ns_rs_hash,
+ ldlm_reprocess_res, NULL);
+ }
+}
+EXPORT_SYMBOL(ldlm_reprocess_all_ns);
+
+/**
+ * Try to grant all waiting locks on a resource.
+ *
+ * Calls ldlm_reprocess_queue on converting and waiting queues.
+ *
+ * Typically called after some resource locks are cancelled to see
+ * if anything could be granted as a result of the cancellation.
+ */
+void ldlm_reprocess_all(struct ldlm_resource *res)
+{
+ LIST_HEAD(rpc_list);
+
+ if (!ns_is_client(ldlm_res_to_ns(res))) {
+ CERROR("This is client-side-only module, cannot handle "
+ "LDLM_NAMESPACE_SERVER resource type lock.\n");
+ LBUG();
+ }
+}
+
+/**
+ * Helper function to call blocking AST for LDLM lock \a lock in a
+ * "cancelling" mode.
+ */
+void ldlm_cancel_callback(struct ldlm_lock *lock)
+{
+ check_res_locked(lock->l_resource);
+ if (!(lock->l_flags & LDLM_FL_CANCEL)) {
+ lock->l_flags |= LDLM_FL_CANCEL;
+ if (lock->l_blocking_ast) {
+ unlock_res_and_lock(lock);
+ lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
+ LDLM_CB_CANCELING);
+ lock_res_and_lock(lock);
+ } else {
+ LDLM_DEBUG(lock, "no blocking ast");
+ }
+ }
+ lock->l_flags |= LDLM_FL_BL_DONE;
+}
+
+/**
+ * Remove skiplist-enabled LDLM lock \a req from granted list
+ */
+void ldlm_unlink_lock_skiplist(struct ldlm_lock *req)
+{
+ if (req->l_resource->lr_type != LDLM_PLAIN &&
+ req->l_resource->lr_type != LDLM_IBITS)
+ return;
+
+ list_del_init(&req->l_sl_policy);
+ list_del_init(&req->l_sl_mode);
+}
+
+/**
+ * Attempts to cancel LDLM lock \a lock that has no reader/writer references.
+ */
+void ldlm_lock_cancel(struct ldlm_lock *lock)
+{
+ struct ldlm_resource *res;
+ struct ldlm_namespace *ns;
+
+ lock_res_and_lock(lock);
+
+ res = lock->l_resource;
+ ns = ldlm_res_to_ns(res);
+
+ /* Please do not, no matter how tempting, remove this LBUG without
+ * talking to me first. -phik */
+ if (lock->l_readers || lock->l_writers) {
+ LDLM_ERROR(lock, "lock still has references");
+ LBUG();
+ }
+
+ if (lock->l_flags & LDLM_FL_WAITED)
+ ldlm_del_waiting_lock(lock);
+
+ /* Releases cancel callback. */
+ ldlm_cancel_callback(lock);
+
+ /* Yes, second time, just in case it was added again while we were
+ * running with no res lock in ldlm_cancel_callback */
+ if (lock->l_flags & LDLM_FL_WAITED)
+ ldlm_del_waiting_lock(lock);
+
+ ldlm_resource_unlink_lock(lock);
+ ldlm_lock_destroy_nolock(lock);
+
+ if (lock->l_granted_mode == lock->l_req_mode)
+ ldlm_pool_del(&ns->ns_pool, lock);
+
+ /* Make sure we will not be called again for same lock what is possible
+ * if not to zero out lock->l_granted_mode */
+ lock->l_granted_mode = LCK_MINMODE;
+ unlock_res_and_lock(lock);
+}
+EXPORT_SYMBOL(ldlm_lock_cancel);
+
+/**
+ * Set opaque data into the lock that only makes sense to upper layer.
+ */
+int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
+{
+ struct ldlm_lock *lock = ldlm_handle2lock(lockh);
+ int rc = -EINVAL;
+
+ if (lock) {
+ if (lock->l_ast_data == NULL)
+ lock->l_ast_data = data;
+ if (lock->l_ast_data == data)
+ rc = 0;
+ LDLM_LOCK_PUT(lock);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_lock_set_data);
+
+struct export_cl_data {
+ struct obd_export *ecl_exp;
+ int ecl_loop;
+};
+
+/**
+ * Iterator function for ldlm_cancel_locks_for_export.
+ * Cancels passed locks.
+ */
+int ldlm_cancel_locks_for_export_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *data)
+
+{
+ struct export_cl_data *ecl = (struct export_cl_data *)data;
+ struct obd_export *exp = ecl->ecl_exp;
+ struct ldlm_lock *lock = cfs_hash_object(hs, hnode);
+ struct ldlm_resource *res;
+
+ res = ldlm_resource_getref(lock->l_resource);
+ LDLM_LOCK_GET(lock);
+
+ LDLM_DEBUG(lock, "export %p", exp);
+ ldlm_res_lvbo_update(res, NULL, 1);
+ ldlm_lock_cancel(lock);
+ ldlm_reprocess_all(res);
+ ldlm_resource_putref(res);
+ LDLM_LOCK_RELEASE(lock);
+
+ ecl->ecl_loop++;
+ if ((ecl->ecl_loop & -ecl->ecl_loop) == ecl->ecl_loop) {
+ CDEBUG(D_INFO,
+ "Cancel lock %p for export %p (loop %d), still have "
+ "%d locks left on hash table.\n",
+ lock, exp, ecl->ecl_loop,
+ atomic_read(&hs->hs_count));
+ }
+
+ return 0;
+}
+
+/**
+ * Cancel all locks for given export.
+ *
+ * Typically called on client disconnection/eviction
+ */
+void ldlm_cancel_locks_for_export(struct obd_export *exp)
+{
+ struct export_cl_data ecl = {
+ .ecl_exp = exp,
+ .ecl_loop = 0,
+ };
+
+ cfs_hash_for_each_empty(exp->exp_lock_hash,
+ ldlm_cancel_locks_for_export_cb, &ecl);
+}
+
+/**
+ * Downgrade an exclusive lock.
+ *
+ * A fast variant of ldlm_lock_convert for convertion of exclusive
+ * locks. The convertion is always successful.
+ * Used by Commit on Sharing (COS) code.
+ *
+ * \param lock A lock to convert
+ * \param new_mode new lock mode
+ */
+void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
+{
+ LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
+ LASSERT(new_mode == LCK_COS);
+
+ lock_res_and_lock(lock);
+ ldlm_resource_unlink_lock(lock);
+ /*
+ * Remove the lock from pool as it will be added again in
+ * ldlm_grant_lock() called below.
+ */
+ ldlm_pool_del(&ldlm_lock_to_ns(lock)->ns_pool, lock);
+
+ lock->l_req_mode = new_mode;
+ ldlm_grant_lock(lock, NULL);
+ unlock_res_and_lock(lock);
+ ldlm_reprocess_all(lock->l_resource);
+}
+EXPORT_SYMBOL(ldlm_lock_downgrade);
+
+/**
+ * Attempt to convert already granted lock to a different mode.
+ *
+ * While lock conversion is not currently used, future client-side
+ * optimizations could take advantage of it to avoid discarding cached
+ * pages on a file.
+ */
+struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
+ __u32 *flags)
+{
+ LIST_HEAD(rpc_list);
+ struct ldlm_resource *res;
+ struct ldlm_namespace *ns;
+ int granted = 0;
+ struct ldlm_interval *node;
+
+ /* Just return if mode is unchanged. */
+ if (new_mode == lock->l_granted_mode) {
+ *flags |= LDLM_FL_BLOCK_GRANTED;
+ return lock->l_resource;
+ }
+
+ /* I can't check the type of lock here because the bitlock of lock
+ * is not held here, so do the allocation blindly. -jay */
+ OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
+ if (node == NULL)
+ /* Actually, this causes EDEADLOCK to be returned */
+ return NULL;
+
+ LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
+ "new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
+
+ lock_res_and_lock(lock);
+
+ res = lock->l_resource;
+ ns = ldlm_res_to_ns(res);
+
+ lock->l_req_mode = new_mode;
+ if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS) {
+ ldlm_resource_unlink_lock(lock);
+ } else {
+ ldlm_resource_unlink_lock(lock);
+ if (res->lr_type == LDLM_EXTENT) {
+ /* FIXME: ugly code, I have to attach the lock to a
+ * interval node again since perhaps it will be granted
+ * soon */
+ INIT_LIST_HEAD(&node->li_group);
+ ldlm_interval_attach(node, lock);
+ node = NULL;
+ }
+ }
+
+ /*
+ * Remove old lock from the pool before adding the lock with new
+ * mode below in ->policy()
+ */
+ ldlm_pool_del(&ns->ns_pool, lock);
+
+ /* If this is a local resource, put it on the appropriate list. */
+ if (ns_is_client(ldlm_res_to_ns(res))) {
+ if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
+ ldlm_resource_add_lock(res, &res->lr_converting, lock);
+ } else {
+ /* This should never happen, because of the way the
+ * server handles conversions. */
+ LDLM_ERROR(lock, "Erroneous flags %x on local lock\n",
+ *flags);
+ LBUG();
+
+ ldlm_grant_lock(lock, &rpc_list);
+ granted = 1;
+ /* FIXME: completion handling not with lr_lock held ! */
+ if (lock->l_completion_ast)
+ lock->l_completion_ast(lock, 0, NULL);
+ }
+ } else {
+ CERROR("This is client-side-only module, cannot handle "
+ "LDLM_NAMESPACE_SERVER resource type lock.\n");
+ LBUG();
+ }
+ unlock_res_and_lock(lock);
+
+ if (granted)
+ ldlm_run_ast_work(ns, &rpc_list, LDLM_WORK_CP_AST);
+ if (node)
+ OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
+ return res;
+}
+EXPORT_SYMBOL(ldlm_lock_convert);
+
+/**
+ * Print lock with lock handle \a lockh description into debug log.
+ *
+ * Used when printing all locks on a resource for debug purposes.
+ */
+void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
+{
+ struct ldlm_lock *lock;
+
+ if (!((libcfs_debug | D_ERROR) & level))
+ return;
+
+ lock = ldlm_handle2lock(lockh);
+ if (lock == NULL)
+ return;
+
+ LDLM_DEBUG_LIMIT(level, lock, "###");
+
+ LDLM_LOCK_PUT(lock);
+}
+EXPORT_SYMBOL(ldlm_lock_dump_handle);
+
+/**
+ * Print lock information with custom message into debug log.
+ * Helper function.
+ */
+void _ldlm_lock_debug(struct ldlm_lock *lock,
+ struct libcfs_debug_msg_data *msgdata,
+ const char *fmt, ...)
+{
+ va_list args;
+ struct obd_export *exp = lock->l_export;
+ struct ldlm_resource *resource = lock->l_resource;
+ char *nid = "local";
+
+ va_start(args, fmt);
+
+ if (exp && exp->exp_connection) {
+ nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
+ } else if (exp && exp->exp_obd != NULL) {
+ struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
+ nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
+ }
+
+ if (resource == NULL) {
+ libcfs_debug_vmsg2(msgdata, fmt, args,
+ " ns: \?\? lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+ "res: \?\? rrc=\?\? type: \?\?\? flags: "LPX64" nid: %s "
+ "remote: "LPX64" expref: %d pid: %u timeout: %lu "
+ "lvb_type: %d\n",
+ lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
+ va_end(args);
+ return;
+ }
+
+ switch (resource->lr_type) {
+ case LDLM_EXTENT:
+ libcfs_debug_vmsg2(msgdata, fmt, args,
+ " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+ "res: "DLDLMRES" rrc: %d type: %s ["LPU64"->"LPU64"] "
+ "(req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote: "
+ LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock), lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_policy_data.l_extent.start,
+ lock->l_policy_data.l_extent.end,
+ lock->l_req_extent.start, lock->l_req_extent.end,
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
+ break;
+
+ case LDLM_FLOCK:
+ libcfs_debug_vmsg2(msgdata, fmt, args,
+ " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+ "res: "DLDLMRES" rrc: %d type: %s pid: %d "
+ "["LPU64"->"LPU64"] flags: "LPX64" nid: %s "
+ "remote: "LPX64" expref: %d pid: %u timeout: %lu\n",
+ ldlm_lock_to_ns_name(lock), lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_policy_data.l_flock.pid,
+ lock->l_policy_data.l_flock.start,
+ lock->l_policy_data.l_flock.end,
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout);
+ break;
+
+ case LDLM_IBITS:
+ libcfs_debug_vmsg2(msgdata, fmt, args,
+ " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+ "res: "DLDLMRES" bits "LPX64" rrc: %d type: %s "
+ "flags: "LPX64" nid: %s remote: "LPX64" expref: %d "
+ "pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock),
+ lock, lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ lock->l_policy_data.l_inodebits.bits,
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
+ break;
+
+ default:
+ libcfs_debug_vmsg2(msgdata, fmt, args,
+ " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+ "res: "DLDLMRES" rrc: %d type: %s flags: "LPX64" "
+ "nid: %s remote: "LPX64" expref: %d pid: %u "
+ "timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock),
+ lock, lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
+ break;
+ }
+ va_end(args);
+}
+EXPORT_SYMBOL(_ldlm_lock_debug);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
new file mode 100644
index 0000000..a100a0b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -0,0 +1,1218 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_lockd.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+
+# include <linux/libcfs/libcfs.h>
+
+#include <lustre_dlm.h>
+#include <obd_class.h>
+#include <linux/list.h>
+#include "ldlm_internal.h"
+
+static int ldlm_num_threads;
+CFS_MODULE_PARM(ldlm_num_threads, "i", int, 0444,
+ "number of DLM service threads to start");
+
+static char *ldlm_cpts;
+CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
+ "CPU partitions ldlm threads should run on");
+
+extern struct kmem_cache *ldlm_resource_slab;
+extern struct kmem_cache *ldlm_lock_slab;
+static struct mutex ldlm_ref_mutex;
+static int ldlm_refcount;
+
+struct ldlm_cb_async_args {
+ struct ldlm_cb_set_arg *ca_set_arg;
+ struct ldlm_lock *ca_lock;
+};
+
+/* LDLM state */
+
+static struct ldlm_state *ldlm_state;
+
+inline cfs_time_t round_timeout(cfs_time_t timeout)
+{
+ return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
+}
+
+/* timeout for initial callback (AST) reply (bz10399) */
+static inline unsigned int ldlm_get_rq_timeout(void)
+{
+ /* Non-AT value */
+ unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
+
+ return timeout < 1 ? 1 : timeout;
+}
+
+#define ELT_STOPPED 0
+#define ELT_READY 1
+#define ELT_TERMINATE 2
+
+struct ldlm_bl_pool {
+ spinlock_t blp_lock;
+
+ /*
+ * blp_prio_list is used for callbacks that should be handled
+ * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
+ * see bug 13843
+ */
+ struct list_head blp_prio_list;
+
+ /*
+ * blp_list is used for all other callbacks which are likely
+ * to take longer to process.
+ */
+ struct list_head blp_list;
+
+ wait_queue_head_t blp_waitq;
+ struct completion blp_comp;
+ atomic_t blp_num_threads;
+ atomic_t blp_busy_threads;
+ int blp_min_threads;
+ int blp_max_threads;
+};
+
+struct ldlm_bl_work_item {
+ struct list_head blwi_entry;
+ struct ldlm_namespace *blwi_ns;
+ struct ldlm_lock_desc blwi_ld;
+ struct ldlm_lock *blwi_lock;
+ struct list_head blwi_head;
+ int blwi_count;
+ struct completion blwi_comp;
+ ldlm_cancel_flags_t blwi_flags;
+ int blwi_mem_pressure;
+};
+
+
+int ldlm_del_waiting_lock(struct ldlm_lock *lock)
+{
+ return 0;
+}
+
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
+{
+ return 0;
+}
+
+
+
+/**
+ * Callback handler for receiving incoming blocking ASTs.
+ *
+ * This can only happen on client side.
+ */
+void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
+{
+ int do_ast;
+
+ LDLM_DEBUG(lock, "client blocking AST callback handler");
+
+ lock_res_and_lock(lock);
+ lock->l_flags |= LDLM_FL_CBPENDING;
+
+ if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
+ lock->l_flags |= LDLM_FL_CANCEL;
+
+ do_ast = (!lock->l_readers && !lock->l_writers);
+ unlock_res_and_lock(lock);
+
+ if (do_ast) {
+ CDEBUG(D_DLMTRACE, "Lock %p already unused, calling callback (%p)\n",
+ lock, lock->l_blocking_ast);
+ if (lock->l_blocking_ast != NULL)
+ lock->l_blocking_ast(lock, ld, lock->l_ast_data,
+ LDLM_CB_BLOCKING);
+ } else {
+ CDEBUG(D_DLMTRACE, "Lock %p is referenced, will be cancelled later\n",
+ lock);
+ }
+
+ LDLM_DEBUG(lock, "client blocking callback handler END");
+ LDLM_LOCK_RELEASE(lock);
+}
+
+/**
+ * Callback handler for receiving incoming completion ASTs.
+ *
+ * This only can happen on client side.
+ */
+static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
+ struct ldlm_namespace *ns,
+ struct ldlm_request *dlm_req,
+ struct ldlm_lock *lock)
+{
+ int lvb_len;
+ LIST_HEAD(ast_list);
+ int rc = 0;
+
+ LDLM_DEBUG(lock, "client completion callback handler START");
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
+ int to = cfs_time_seconds(1);
+ while (to > 0) {
+ schedule_timeout_and_set_state(
+ TASK_INTERRUPTIBLE, to);
+ if (lock->l_granted_mode == lock->l_req_mode ||
+ lock->l_flags & LDLM_FL_DESTROYED)
+ break;
+ }
+ }
+
+ lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
+ if (lvb_len < 0) {
+ LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
+ GOTO(out, rc = lvb_len);
+ } else if (lvb_len > 0) {
+ if (lock->l_lvb_len > 0) {
+ /* for extent lock, lvb contains ost_lvb{}. */
+ LASSERT(lock->l_lvb_data != NULL);
+
+ if (unlikely(lock->l_lvb_len < lvb_len)) {
+ LDLM_ERROR(lock, "Replied LVB is larger than "
+ "expectation, expected = %d, "
+ "replied = %d",
+ lock->l_lvb_len, lvb_len);
+ GOTO(out, rc = -EINVAL);
+ }
+ } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
+ * variable length */
+ void *lvb_data;
+
+ OBD_ALLOC(lvb_data, lvb_len);
+ if (lvb_data == NULL) {
+ LDLM_ERROR(lock, "No memory: %d.\n", lvb_len);
+ GOTO(out, rc = -ENOMEM);
+ }
+
+ lock_res_and_lock(lock);
+ LASSERT(lock->l_lvb_data == NULL);
+ lock->l_lvb_data = lvb_data;
+ lock->l_lvb_len = lvb_len;
+ unlock_res_and_lock(lock);
+ }
+ }
+
+ lock_res_and_lock(lock);
+ if ((lock->l_flags & LDLM_FL_DESTROYED) ||
+ lock->l_granted_mode == lock->l_req_mode) {
+ /* bug 11300: the lock has already been granted */
+ unlock_res_and_lock(lock);
+ LDLM_DEBUG(lock, "Double grant race happened");
+ GOTO(out, rc = 0);
+ }
+
+ /* If we receive the completion AST before the actual enqueue returned,
+ * then we might need to switch lock modes, resources, or extents. */
+ if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
+ lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
+ LDLM_DEBUG(lock, "completion AST, new lock mode");
+ }
+
+ if (lock->l_resource->lr_type != LDLM_PLAIN) {
+ ldlm_convert_policy_to_local(req->rq_export,
+ dlm_req->lock_desc.l_resource.lr_type,
+ &dlm_req->lock_desc.l_policy_data,
+ &lock->l_policy_data);
+ LDLM_DEBUG(lock, "completion AST, new policy data");
+ }
+
+ ldlm_resource_unlink_lock(lock);
+ if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
+ &lock->l_resource->lr_name,
+ sizeof(lock->l_resource->lr_name)) != 0) {
+ unlock_res_and_lock(lock);
+ rc = ldlm_lock_change_resource(ns, lock,
+ &dlm_req->lock_desc.l_resource.lr_name);
+ if (rc < 0) {
+ LDLM_ERROR(lock, "Failed to allocate resource");
+ GOTO(out, rc);
+ }
+ LDLM_DEBUG(lock, "completion AST, new resource");
+ CERROR("change resource!\n");
+ lock_res_and_lock(lock);
+ }
+
+ if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
+ /* BL_AST locks are not needed in LRU.
+ * Let ldlm_cancel_lru() be fast. */
+ ldlm_lock_remove_from_lru(lock);
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
+ LDLM_DEBUG(lock, "completion AST includes blocking AST");
+ }
+
+ if (lock->l_lvb_len > 0) {
+ rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
+ lock->l_lvb_data, lvb_len);
+ if (rc < 0) {
+ unlock_res_and_lock(lock);
+ GOTO(out, rc);
+ }
+ }
+
+ ldlm_grant_lock(lock, &ast_list);
+ unlock_res_and_lock(lock);
+
+ LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
+
+ /* Let Enqueue to call osc_lock_upcall() and initialize
+ * l_ast_data */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
+
+ ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
+
+ LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
+ lock);
+ GOTO(out, rc);
+
+out:
+ if (rc < 0) {
+ lock_res_and_lock(lock);
+ lock->l_flags |= LDLM_FL_FAILED;
+ unlock_res_and_lock(lock);
+ wake_up(&lock->l_waitq);
+ }
+ LDLM_LOCK_RELEASE(lock);
+}
+
+/**
+ * Callback handler for receiving incoming glimpse ASTs.
+ *
+ * This only can happen on client side. After handling the glimpse AST
+ * we also consider dropping the lock here if it is unused locally for a
+ * long time.
+ */
+static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
+ struct ldlm_namespace *ns,
+ struct ldlm_request *dlm_req,
+ struct ldlm_lock *lock)
+{
+ int rc = -ENOSYS;
+
+ LDLM_DEBUG(lock, "client glimpse AST callback handler");
+
+ if (lock->l_glimpse_ast != NULL)
+ rc = lock->l_glimpse_ast(lock, req);
+
+ if (req->rq_repmsg != NULL) {
+ ptlrpc_reply(req);
+ } else {
+ req->rq_status = rc;
+ ptlrpc_error(req);
+ }
+
+ lock_res_and_lock(lock);
+ if (lock->l_granted_mode == LCK_PW &&
+ !lock->l_readers && !lock->l_writers &&
+ cfs_time_after(cfs_time_current(),
+ cfs_time_add(lock->l_last_used,
+ cfs_time_seconds(10)))) {
+ unlock_res_and_lock(lock);
+ if (ldlm_bl_to_thread_lock(ns, NULL, lock))
+ ldlm_handle_bl_callback(ns, NULL, lock);
+
+ return;
+ }
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_RELEASE(lock);
+}
+
+static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
+{
+ if (req->rq_no_reply)
+ return 0;
+
+ req->rq_status = rc;
+ if (!req->rq_packed_final) {
+ rc = lustre_pack_reply(req, 1, NULL, NULL);
+ if (rc)
+ return rc;
+ }
+ return ptlrpc_reply(req);
+}
+
+static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
+ ldlm_cancel_flags_t cancel_flags)
+{
+ struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
+
+ spin_lock(&blp->blp_lock);
+ if (blwi->blwi_lock &&
+ blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
+ /* add LDLM_FL_DISCARD_DATA requests to the priority list */
+ list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
+ } else {
+ /* other blocking callbacks are added to the regular list */
+ list_add_tail(&blwi->blwi_entry, &blp->blp_list);
+ }
+ spin_unlock(&blp->blp_lock);
+
+ wake_up(&blp->blp_waitq);
+
+ /* can not check blwi->blwi_flags as blwi could be already freed in
+ LCF_ASYNC mode */
+ if (!(cancel_flags & LCF_ASYNC))
+ wait_for_completion(&blwi->blwi_comp);
+
+ return 0;
+}
+
+static inline void init_blwi(struct ldlm_bl_work_item *blwi,
+ struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld,
+ struct list_head *cancels, int count,
+ struct ldlm_lock *lock,
+ ldlm_cancel_flags_t cancel_flags)
+{
+ init_completion(&blwi->blwi_comp);
+ INIT_LIST_HEAD(&blwi->blwi_head);
+
+ if (memory_pressure_get())
+ blwi->blwi_mem_pressure = 1;
+
+ blwi->blwi_ns = ns;
+ blwi->blwi_flags = cancel_flags;
+ if (ld != NULL)
+ blwi->blwi_ld = *ld;
+ if (count) {
+ list_add(&blwi->blwi_head, cancels);
+ list_del_init(cancels);
+ blwi->blwi_count = count;
+ } else {
+ blwi->blwi_lock = lock;
+ }
+}
+
+/**
+ * Queues a list of locks \a cancels containing \a count locks
+ * for later processing by a blocking thread. If \a count is zero,
+ * then the lock referenced as \a lock is queued instead.
+ *
+ * The blocking thread would then call ->l_blocking_ast callback in the lock.
+ * If list addition fails an error is returned and caller is supposed to
+ * call ->l_blocking_ast itself.
+ */
+static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
+ struct ldlm_lock_desc *ld,
+ struct ldlm_lock *lock,
+ struct list_head *cancels, int count,
+ ldlm_cancel_flags_t cancel_flags)
+{
+ if (cancels && count == 0)
+ return 0;
+
+ if (cancel_flags & LCF_ASYNC) {
+ struct ldlm_bl_work_item *blwi;
+
+ OBD_ALLOC(blwi, sizeof(*blwi));
+ if (blwi == NULL)
+ return -ENOMEM;
+ init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
+
+ return __ldlm_bl_to_thread(blwi, cancel_flags);
+ } else {
+ /* if it is synchronous call do minimum mem alloc, as it could
+ * be triggered from kernel shrinker
+ */
+ struct ldlm_bl_work_item blwi;
+
+ memset(&blwi, 0, sizeof(blwi));
+ init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
+ return __ldlm_bl_to_thread(&blwi, cancel_flags);
+ }
+}
+
+
+int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
+ struct ldlm_lock *lock)
+{
+ return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC);
+}
+
+int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
+ struct list_head *cancels, int count,
+ ldlm_cancel_flags_t cancel_flags)
+{
+ return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
+}
+
+/* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
+static int ldlm_handle_setinfo(struct ptlrpc_request *req)
+{
+ struct obd_device *obd = req->rq_export->exp_obd;
+ char *key;
+ void *val;
+ int keylen, vallen;
+ int rc = -ENOSYS;
+
+ DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
+
+ req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
+
+ key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
+ if (key == NULL) {
+ DEBUG_REQ(D_IOCTL, req, "no set_info key");
+ return -EFAULT;
+ }
+ keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
+ RCL_CLIENT);
+ val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
+ if (val == NULL) {
+ DEBUG_REQ(D_IOCTL, req, "no set_info val");
+ return -EFAULT;
+ }
+ vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
+ RCL_CLIENT);
+
+ /* We are responsible for swabbing contents of val */
+
+ if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
+ /* Pass it on to mdc (the "export" in this case) */
+ rc = obd_set_info_async(req->rq_svc_thread->t_env,
+ req->rq_export,
+ sizeof(KEY_HSM_COPYTOOL_SEND),
+ KEY_HSM_COPYTOOL_SEND,
+ vallen, val, NULL);
+ else
+ DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
+
+ return rc;
+}
+
+static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
+ const char *msg, int rc,
+ struct lustre_handle *handle)
+{
+ DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
+ "%s: [nid %s] [rc %d] [lock "LPX64"]",
+ msg, libcfs_id2str(req->rq_peer), rc,
+ handle ? handle->cookie : 0);
+ if (req->rq_no_reply)
+ CWARN("No reply was sent, maybe cause bug 21636.\n");
+ else if (rc)
+ CWARN("Send reply failed, maybe cause bug 21636.\n");
+}
+
+static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
+{
+ struct obd_quotactl *oqctl;
+ struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
+
+ oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ if (oqctl == NULL) {
+ CERROR("Can't unpack obd_quotactl\n");
+ return -EPROTO;
+ }
+
+ oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
+
+ cli->cl_qchk_stat = oqctl->qc_stat;
+ return 0;
+}
+
+/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
+static int ldlm_callback_handler(struct ptlrpc_request *req)
+{
+ struct ldlm_namespace *ns;
+ struct ldlm_request *dlm_req;
+ struct ldlm_lock *lock;
+ int rc;
+
+ /* Requests arrive in sender's byte order. The ptlrpc service
+ * handler has already checked and, if necessary, byte-swapped the
+ * incoming request message body, but I am responsible for the
+ * message buffers. */
+
+ /* do nothing for sec context finalize */
+ if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
+ return 0;
+
+ req_capsule_init(&req->rq_pill, req, RCL_SERVER);
+
+ if (req->rq_export == NULL) {
+ rc = ldlm_callback_reply(req, -ENOTCONN);
+ ldlm_callback_errmsg(req, "Operate on unconnected server",
+ rc, NULL);
+ return 0;
+ }
+
+ LASSERT(req->rq_export != NULL);
+ LASSERT(req->rq_export->exp_obd != NULL);
+
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
+ case LDLM_BL_CALLBACK:
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
+ return 0;
+ break;
+ case LDLM_CP_CALLBACK:
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
+ return 0;
+ break;
+ case LDLM_GL_CALLBACK:
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
+ return 0;
+ break;
+ case LDLM_SET_INFO:
+ rc = ldlm_handle_setinfo(req);
+ ldlm_callback_reply(req, rc);
+ return 0;
+ case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
+ CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
+ req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
+ return 0;
+ rc = llog_origin_handle_cancel(req);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
+ return 0;
+ ldlm_callback_reply(req, rc);
+ return 0;
+ case LLOG_ORIGIN_HANDLE_CREATE:
+ req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
+ return 0;
+ rc = llog_origin_handle_open(req);
+ ldlm_callback_reply(req, rc);
+ return 0;
+ case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
+ req_capsule_set(&req->rq_pill,
+ &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
+ return 0;
+ rc = llog_origin_handle_next_block(req);
+ ldlm_callback_reply(req, rc);
+ return 0;
+ case LLOG_ORIGIN_HANDLE_READ_HEADER:
+ req_capsule_set(&req->rq_pill,
+ &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
+ return 0;
+ rc = llog_origin_handle_read_header(req);
+ ldlm_callback_reply(req, rc);
+ return 0;
+ case LLOG_ORIGIN_HANDLE_CLOSE:
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
+ return 0;
+ rc = llog_origin_handle_close(req);
+ ldlm_callback_reply(req, rc);
+ return 0;
+ case OBD_QC_CALLBACK:
+ req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
+ return 0;
+ rc = ldlm_handle_qc_callback(req);
+ ldlm_callback_reply(req, rc);
+ return 0;
+ default:
+ CERROR("unknown opcode %u\n",
+ lustre_msg_get_opc(req->rq_reqmsg));
+ ldlm_callback_reply(req, -EPROTO);
+ return 0;
+ }
+
+ ns = req->rq_export->exp_obd->obd_namespace;
+ LASSERT(ns != NULL);
+
+ req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
+
+ dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ if (dlm_req == NULL) {
+ rc = ldlm_callback_reply(req, -EPROTO);
+ ldlm_callback_errmsg(req, "Operate without parameter", rc,
+ NULL);
+ return 0;
+ }
+
+ /* Force a known safe race, send a cancel to the server for a lock
+ * which the server has already started a blocking callback on. */
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
+ lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
+ rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
+ if (rc < 0)
+ CERROR("ldlm_cli_cancel: %d\n", rc);
+ }
+
+ lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
+ if (!lock) {
+ CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
+ "disappeared\n", dlm_req->lock_handle[0].cookie);
+ rc = ldlm_callback_reply(req, -EINVAL);
+ ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
+ &dlm_req->lock_handle[0]);
+ return 0;
+ }
+
+ if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
+ lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
+ OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
+
+ /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
+ lock_res_and_lock(lock);
+ lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
+ LDLM_AST_FLAGS);
+ if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
+ /* If somebody cancels lock and cache is already dropped,
+ * or lock is failed before cp_ast received on client,
+ * we can tell the server we have no lock. Otherwise, we
+ * should send cancel after dropping the cache. */
+ if (((lock->l_flags & LDLM_FL_CANCELING) &&
+ (lock->l_flags & LDLM_FL_BL_DONE)) ||
+ (lock->l_flags & LDLM_FL_FAILED)) {
+ LDLM_DEBUG(lock, "callback on lock "
+ LPX64" - lock disappeared\n",
+ dlm_req->lock_handle[0].cookie);
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_RELEASE(lock);
+ rc = ldlm_callback_reply(req, -EINVAL);
+ ldlm_callback_errmsg(req, "Operate on stale lock", rc,
+ &dlm_req->lock_handle[0]);
+ return 0;
+ }
+ /* BL_AST locks are not needed in LRU.
+ * Let ldlm_cancel_lru() be fast. */
+ ldlm_lock_remove_from_lru(lock);
+ lock->l_flags |= LDLM_FL_BL_AST;
+ }
+ unlock_res_and_lock(lock);
+
+ /* We want the ost thread to get this reply so that it can respond
+ * to ost requests (write cache writeback) that might be triggered
+ * in the callback.
+ *
+ * But we'd also like to be able to indicate in the reply that we're
+ * cancelling right now, because it's unused, or have an intent result
+ * in the reply, so we might have to push the responsibility for sending
+ * the reply down into the AST handlers, alas. */
+
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
+ case LDLM_BL_CALLBACK:
+ CDEBUG(D_INODE, "blocking ast\n");
+ req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
+ if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
+ rc = ldlm_callback_reply(req, 0);
+ if (req->rq_no_reply || rc)
+ ldlm_callback_errmsg(req, "Normal process", rc,
+ &dlm_req->lock_handle[0]);
+ }
+ if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
+ ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
+ break;
+ case LDLM_CP_CALLBACK:
+ CDEBUG(D_INODE, "completion ast\n");
+ req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
+ ldlm_callback_reply(req, 0);
+ ldlm_handle_cp_callback(req, ns, dlm_req, lock);
+ break;
+ case LDLM_GL_CALLBACK:
+ CDEBUG(D_INODE, "glimpse ast\n");
+ req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
+ ldlm_handle_gl_callback(req, ns, dlm_req, lock);
+ break;
+ default:
+ LBUG(); /* checked above */
+ }
+
+ return 0;
+}
+
+
+static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
+{
+ struct ldlm_bl_work_item *blwi = NULL;
+ static unsigned int num_bl = 0;
+
+ spin_lock(&blp->blp_lock);
+ /* process a request from the blp_list at least every blp_num_threads */
+ if (!list_empty(&blp->blp_list) &&
+ (list_empty(&blp->blp_prio_list) || num_bl == 0))
+ blwi = list_entry(blp->blp_list.next,
+ struct ldlm_bl_work_item, blwi_entry);
+ else
+ if (!list_empty(&blp->blp_prio_list))
+ blwi = list_entry(blp->blp_prio_list.next,
+ struct ldlm_bl_work_item,
+ blwi_entry);
+
+ if (blwi) {
+ if (++num_bl >= atomic_read(&blp->blp_num_threads))
+ num_bl = 0;
+ list_del(&blwi->blwi_entry);
+ }
+ spin_unlock(&blp->blp_lock);
+
+ return blwi;
+}
+
+/* This only contains temporary data until the thread starts */
+struct ldlm_bl_thread_data {
+ char bltd_name[CFS_CURPROC_COMM_MAX];
+ struct ldlm_bl_pool *bltd_blp;
+ struct completion bltd_comp;
+ int bltd_num;
+};
+
+static int ldlm_bl_thread_main(void *arg);
+
+static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
+{
+ struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
+ struct task_struct *task;
+
+ init_completion(&bltd.bltd_comp);
+ bltd.bltd_num = atomic_read(&blp->blp_num_threads);
+ snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
+ "ldlm_bl_%02d", bltd.bltd_num);
+ task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
+ if (IS_ERR(task)) {
+ CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
+ atomic_read(&blp->blp_num_threads), PTR_ERR(task));
+ return PTR_ERR(task);
+ }
+ wait_for_completion(&bltd.bltd_comp);
+
+ return 0;
+}
+
+/**
+ * Main blocking requests processing thread.
+ *
+ * Callers put locks into its queue by calling ldlm_bl_to_thread.
+ * This thread in the end ends up doing actual call to ->l_blocking_ast
+ * for queued locks.
+ */
+static int ldlm_bl_thread_main(void *arg)
+{
+ struct ldlm_bl_pool *blp;
+
+ {
+ struct ldlm_bl_thread_data *bltd = arg;
+
+ blp = bltd->bltd_blp;
+
+ atomic_inc(&blp->blp_num_threads);
+ atomic_inc(&blp->blp_busy_threads);
+
+ complete(&bltd->bltd_comp);
+ /* cannot use bltd after this, it is only on caller's stack */
+ }
+
+ while (1) {
+ struct l_wait_info lwi = { 0 };
+ struct ldlm_bl_work_item *blwi = NULL;
+ int busy;
+
+ blwi = ldlm_bl_get_work(blp);
+
+ if (blwi == NULL) {
+ atomic_dec(&blp->blp_busy_threads);
+ l_wait_event_exclusive(blp->blp_waitq,
+ (blwi = ldlm_bl_get_work(blp)) != NULL,
+ &lwi);
+ busy = atomic_inc_return(&blp->blp_busy_threads);
+ } else {
+ busy = atomic_read(&blp->blp_busy_threads);
+ }
+
+ if (blwi->blwi_ns == NULL)
+ /* added by ldlm_cleanup() */
+ break;
+
+ /* Not fatal if racy and have a few too many threads */
+ if (unlikely(busy < blp->blp_max_threads &&
+ busy >= atomic_read(&blp->blp_num_threads) &&
+ !blwi->blwi_mem_pressure))
+ /* discard the return value, we tried */
+ ldlm_bl_thread_start(blp);
+
+ if (blwi->blwi_mem_pressure)
+ memory_pressure_set();
+
+ if (blwi->blwi_count) {
+ int count;
+ /* The special case when we cancel locks in LRU
+ * asynchronously, we pass the list of locks here.
+ * Thus locks are marked LDLM_FL_CANCELING, but NOT
+ * canceled locally yet. */
+ count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
+ blwi->blwi_count,
+ LCF_BL_AST);
+ ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
+ blwi->blwi_flags);
+ } else {
+ ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
+ blwi->blwi_lock);
+ }
+ if (blwi->blwi_mem_pressure)
+ memory_pressure_clr();
+
+ if (blwi->blwi_flags & LCF_ASYNC)
+ OBD_FREE(blwi, sizeof(*blwi));
+ else
+ complete(&blwi->blwi_comp);
+ }
+
+ atomic_dec(&blp->blp_busy_threads);
+ atomic_dec(&blp->blp_num_threads);
+ complete(&blp->blp_comp);
+ return 0;
+}
+
+
+static int ldlm_setup(void);
+static int ldlm_cleanup(void);
+
+int ldlm_get_ref(void)
+{
+ int rc = 0;
+
+ mutex_lock(&ldlm_ref_mutex);
+ if (++ldlm_refcount == 1) {
+ rc = ldlm_setup();
+ if (rc)
+ ldlm_refcount--;
+ }
+ mutex_unlock(&ldlm_ref_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_get_ref);
+
+void ldlm_put_ref(void)
+{
+ mutex_lock(&ldlm_ref_mutex);
+ if (ldlm_refcount == 1) {
+ int rc = ldlm_cleanup();
+ if (rc)
+ CERROR("ldlm_cleanup failed: %d\n", rc);
+ else
+ ldlm_refcount--;
+ } else {
+ ldlm_refcount--;
+ }
+ mutex_unlock(&ldlm_ref_mutex);
+}
+EXPORT_SYMBOL(ldlm_put_ref);
+
+/*
+ * Export handle<->lock hash operations.
+ */
+static unsigned
+ldlm_export_lock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+ return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
+}
+
+static void *
+ldlm_export_lock_key(struct hlist_node *hnode)
+{
+ struct ldlm_lock *lock;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ return &lock->l_remote_handle;
+}
+
+static void
+ldlm_export_lock_keycpy(struct hlist_node *hnode, void *key)
+{
+ struct ldlm_lock *lock;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ lock->l_remote_handle = *(struct lustre_handle *)key;
+}
+
+static int
+ldlm_export_lock_keycmp(const void *key, struct hlist_node *hnode)
+{
+ return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
+}
+
+static void *
+ldlm_export_lock_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+}
+
+static void
+ldlm_export_lock_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ldlm_lock *lock;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ LDLM_LOCK_GET(lock);
+}
+
+static void
+ldlm_export_lock_put(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ldlm_lock *lock;
+
+ lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
+ LDLM_LOCK_RELEASE(lock);
+}
+
+static cfs_hash_ops_t ldlm_export_lock_ops = {
+ .hs_hash = ldlm_export_lock_hash,
+ .hs_key = ldlm_export_lock_key,
+ .hs_keycmp = ldlm_export_lock_keycmp,
+ .hs_keycpy = ldlm_export_lock_keycpy,
+ .hs_object = ldlm_export_lock_object,
+ .hs_get = ldlm_export_lock_get,
+ .hs_put = ldlm_export_lock_put,
+ .hs_put_locked = ldlm_export_lock_put,
+};
+
+int ldlm_init_export(struct obd_export *exp)
+{
+ exp->exp_lock_hash =
+ cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
+ HASH_EXP_LOCK_CUR_BITS,
+ HASH_EXP_LOCK_MAX_BITS,
+ HASH_EXP_LOCK_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
+ &ldlm_export_lock_ops,
+ CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
+ CFS_HASH_NBLK_CHANGE);
+
+ if (!exp->exp_lock_hash)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_init_export);
+
+void ldlm_destroy_export(struct obd_export *exp)
+{
+ cfs_hash_putref(exp->exp_lock_hash);
+ exp->exp_lock_hash = NULL;
+
+ ldlm_destroy_flock_export(exp);
+}
+EXPORT_SYMBOL(ldlm_destroy_export);
+
+static int ldlm_setup(void)
+{
+ static struct ptlrpc_service_conf conf;
+ struct ldlm_bl_pool *blp = NULL;
+ int rc = 0;
+ int i;
+
+ if (ldlm_state != NULL)
+ return -EALREADY;
+
+ OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
+ if (ldlm_state == NULL)
+ return -ENOMEM;
+
+#ifdef LPROCFS
+ rc = ldlm_proc_setup();
+ if (rc != 0)
+ GOTO(out, rc);
+#endif
+
+ memset(&conf, 0, sizeof(conf));
+ conf = (typeof(conf)) {
+ .psc_name = "ldlm_cbd",
+ .psc_watchdog_factor = 2,
+ .psc_buf = {
+ .bc_nbufs = LDLM_CLIENT_NBUFS,
+ .bc_buf_size = LDLM_BUFSIZE,
+ .bc_req_max_size = LDLM_MAXREQSIZE,
+ .bc_rep_max_size = LDLM_MAXREPSIZE,
+ .bc_req_portal = LDLM_CB_REQUEST_PORTAL,
+ .bc_rep_portal = LDLM_CB_REPLY_PORTAL,
+ },
+ .psc_thr = {
+ .tc_thr_name = "ldlm_cb",
+ .tc_thr_factor = LDLM_THR_FACTOR,
+ .tc_nthrs_init = LDLM_NTHRS_INIT,
+ .tc_nthrs_base = LDLM_NTHRS_BASE,
+ .tc_nthrs_max = LDLM_NTHRS_MAX,
+ .tc_nthrs_user = ldlm_num_threads,
+ .tc_cpu_affinity = 1,
+ .tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD,
+ },
+ .psc_cpt = {
+ .cc_pattern = ldlm_cpts,
+ },
+ .psc_ops = {
+ .so_req_handler = ldlm_callback_handler,
+ },
+ };
+ ldlm_state->ldlm_cb_service = \
+ ptlrpc_register_service(&conf, ldlm_svc_proc_dir);
+ if (IS_ERR(ldlm_state->ldlm_cb_service)) {
+ CERROR("failed to start service\n");
+ rc = PTR_ERR(ldlm_state->ldlm_cb_service);
+ ldlm_state->ldlm_cb_service = NULL;
+ GOTO(out, rc);
+ }
+
+
+ OBD_ALLOC(blp, sizeof(*blp));
+ if (blp == NULL)
+ GOTO(out, rc = -ENOMEM);
+ ldlm_state->ldlm_bl_pool = blp;
+
+ spin_lock_init(&blp->blp_lock);
+ INIT_LIST_HEAD(&blp->blp_list);
+ INIT_LIST_HEAD(&blp->blp_prio_list);
+ init_waitqueue_head(&blp->blp_waitq);
+ atomic_set(&blp->blp_num_threads, 0);
+ atomic_set(&blp->blp_busy_threads, 0);
+
+ if (ldlm_num_threads == 0) {
+ blp->blp_min_threads = LDLM_NTHRS_INIT;
+ blp->blp_max_threads = LDLM_NTHRS_MAX;
+ } else {
+ blp->blp_min_threads = blp->blp_max_threads = \
+ min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
+ ldlm_num_threads));
+ }
+
+ for (i = 0; i < blp->blp_min_threads; i++) {
+ rc = ldlm_bl_thread_start(blp);
+ if (rc < 0)
+ GOTO(out, rc);
+ }
+
+
+ rc = ldlm_pools_init();
+ if (rc) {
+ CERROR("Failed to initialize LDLM pools: %d\n", rc);
+ GOTO(out, rc);
+ }
+ return 0;
+
+ out:
+ ldlm_cleanup();
+ return rc;
+}
+
+static int ldlm_cleanup(void)
+{
+ if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
+ !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
+ CERROR("ldlm still has namespaces; clean these up first.\n");
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
+ return -EBUSY;
+ }
+
+ ldlm_pools_fini();
+
+ if (ldlm_state->ldlm_bl_pool != NULL) {
+ struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
+
+ while (atomic_read(&blp->blp_num_threads) > 0) {
+ struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
+
+ init_completion(&blp->blp_comp);
+
+ spin_lock(&blp->blp_lock);
+ list_add_tail(&blwi.blwi_entry, &blp->blp_list);
+ wake_up(&blp->blp_waitq);
+ spin_unlock(&blp->blp_lock);
+
+ wait_for_completion(&blp->blp_comp);
+ }
+
+ OBD_FREE(blp, sizeof(*blp));
+ }
+
+ if (ldlm_state->ldlm_cb_service != NULL)
+ ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
+
+ ldlm_proc_cleanup();
+
+
+ OBD_FREE(ldlm_state, sizeof(*ldlm_state));
+ ldlm_state = NULL;
+
+ return 0;
+}
+
+int ldlm_init(void)
+{
+ mutex_init(&ldlm_ref_mutex);
+ mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
+ mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ ldlm_resource_slab = kmem_cache_create("ldlm_resources",
+ sizeof(struct ldlm_resource), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (ldlm_resource_slab == NULL)
+ return -ENOMEM;
+
+ ldlm_lock_slab = kmem_cache_create("ldlm_locks",
+ sizeof(struct ldlm_lock), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
+ if (ldlm_lock_slab == NULL) {
+ kmem_cache_destroy(ldlm_resource_slab);
+ return -ENOMEM;
+ }
+
+ ldlm_interval_slab = kmem_cache_create("interval_node",
+ sizeof(struct ldlm_interval),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (ldlm_interval_slab == NULL) {
+ kmem_cache_destroy(ldlm_resource_slab);
+ kmem_cache_destroy(ldlm_lock_slab);
+ return -ENOMEM;
+ }
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+ class_export_dump_hook = ldlm_dump_export_locks;
+#endif
+ return 0;
+}
+
+void ldlm_exit(void)
+{
+ if (ldlm_refcount)
+ CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
+ kmem_cache_destroy(ldlm_resource_slab);
+ /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
+ * synchronize_rcu() to wait a grace period elapsed, so that
+ * ldlm_lock_free() get a chance to be called. */
+ synchronize_rcu();
+ kmem_cache_destroy(ldlm_lock_slab);
+ kmem_cache_destroy(ldlm_interval_slab);
+}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
new file mode 100644
index 0000000..ec29e28
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
@@ -0,0 +1,72 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_plain.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+/**
+ * This file contains implementation of PLAIN lock type.
+ *
+ * PLAIN locks are the simplest form of LDLM locking, and are used when
+ * there only needs to be a single lock on a resource. This avoids some
+ * of the complexity of EXTENT and IBITS lock types, but doesn't allow
+ * different "parts" of a resource to be locked concurrently. Example
+ * use cases for PLAIN locks include locking of MGS configuration logs
+ * and (as of Lustre 2.4) quota records.
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+
+#include <lustre_dlm.h>
+#include <obd_support.h>
+#include <lustre_lib.h>
+
+#include "ldlm_internal.h"
+
+
+void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
+ ldlm_policy_data_t *lpolicy)
+{
+ /* No policy for plain locks */
+}
+
+void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+ ldlm_wire_policy_data_t *wpolicy)
+{
+ /* No policy for plain locks */
+}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
new file mode 100644
index 0000000..0025ee6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -0,0 +1,1425 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_pool.c
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
+ */
+
+/*
+ * Idea of this code is rather simple. Each second, for each server namespace
+ * we have SLV - server lock volume which is calculated on current number of
+ * granted locks, grant speed for past period, etc - that is, locking load.
+ * This SLV number may be thought as a flow definition for simplicity. It is
+ * sent to clients with each occasion to let them know what is current load
+ * situation on the server. By default, at the beginning, SLV on server is
+ * set max value which is calculated as the following: allow to one client
+ * have all locks of limit ->pl_limit for 10h.
+ *
+ * Next, on clients, number of cached locks is not limited artificially in any
+ * way as it was before. Instead, client calculates CLV, that is, client lock
+ * volume for each lock and compares it with last SLV from the server. CLV is
+ * calculated as the number of locks in LRU * lock live time in seconds. If
+ * CLV > SLV - lock is canceled.
+ *
+ * Client has LVF, that is, lock volume factor which regulates how much sensitive
+ * client should be about last SLV from server. The higher LVF is the more locks
+ * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
+ * that client will cancel locks 2 times faster.
+ *
+ * Locks on a client will be canceled more intensively in these cases:
+ * (1) if SLV is smaller, that is, load is higher on the server;
+ * (2) client has a lot of locks (the more locks are held by client, the bigger
+ * chances that some of them should be canceled);
+ * (3) client has old locks (taken some time ago);
+ *
+ * Thus, according to flow paradigm that we use for better understanding SLV,
+ * CLV is the volume of particle in flow described by SLV. According to this,
+ * if flow is getting thinner, more and more particles become outside of it and
+ * as particles are locks, they should be canceled.
+ *
+ * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
+ * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
+ * cleanups. Flow definition to allow more easy understanding of the logic belongs
+ * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
+ * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
+ *
+ * Glossary for terms used:
+ *
+ * pl_limit - Number of allowed locks in pool. Applies to server and client
+ * side (tunable);
+ *
+ * pl_granted - Number of granted locks (calculated);
+ * pl_grant_rate - Number of granted locks for last T (calculated);
+ * pl_cancel_rate - Number of canceled locks for last T (calculated);
+ * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
+ * pl_grant_plan - Planned number of granted locks for next T (calculated);
+ * pl_server_lock_volume - Current server lock volume (calculated);
+ *
+ * As it may be seen from list above, we have few possible tunables which may
+ * affect behavior much. They all may be modified via proc. However, they also
+ * give a possibility for constructing few pre-defined behavior policies. If
+ * none of predefines is suitable for a working pattern being used, new one may
+ * be "constructed" via proc tunables.
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+
+# include <lustre_dlm.h>
+
+#include <cl_object.h>
+
+#include <obd_class.h>
+#include <obd_support.h>
+#include "ldlm_internal.h"
+
+
+/*
+ * 50 ldlm locks for 1MB of RAM.
+ */
+#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
+
+/*
+ * Maximal possible grant step plan in %.
+ */
+#define LDLM_POOL_MAX_GSP (30)
+
+/*
+ * Minimal possible grant step plan in %.
+ */
+#define LDLM_POOL_MIN_GSP (1)
+
+/*
+ * This controls the speed of reaching LDLM_POOL_MAX_GSP
+ * with increasing thread period.
+ */
+#define LDLM_POOL_GSP_STEP_SHIFT (2)
+
+/*
+ * LDLM_POOL_GSP% of all locks is default GP.
+ */
+#define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
+
+/*
+ * Max age for locks on clients.
+ */
+#define LDLM_POOL_MAX_AGE (36000)
+
+/*
+ * The granularity of SLV calculation.
+ */
+#define LDLM_POOL_SLV_SHIFT (10)
+
+extern struct proc_dir_entry *ldlm_ns_proc_dir;
+
+static inline __u64 dru(__u64 val, __u32 shift, int round_up)
+{
+ return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
+}
+
+static inline __u64 ldlm_pool_slv_max(__u32 L)
+{
+ /*
+ * Allow to have all locks for 1 client for 10 hrs.
+ * Formula is the following: limit * 10h / 1 client.
+ */
+ __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
+ return lim;
+}
+
+static inline __u64 ldlm_pool_slv_min(__u32 L)
+{
+ return 1;
+}
+
+enum {
+ LDLM_POOL_FIRST_STAT = 0,
+ LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
+ LDLM_POOL_GRANT_STAT,
+ LDLM_POOL_CANCEL_STAT,
+ LDLM_POOL_GRANT_RATE_STAT,
+ LDLM_POOL_CANCEL_RATE_STAT,
+ LDLM_POOL_GRANT_PLAN_STAT,
+ LDLM_POOL_SLV_STAT,
+ LDLM_POOL_SHRINK_REQTD_STAT,
+ LDLM_POOL_SHRINK_FREED_STAT,
+ LDLM_POOL_RECALC_STAT,
+ LDLM_POOL_TIMING_STAT,
+ LDLM_POOL_LAST_STAT
+};
+
+static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
+{
+ return container_of(pl, struct ldlm_namespace, ns_pool);
+}
+
+/**
+ * Calculates suggested grant_step in % of available locks for passed
+ * \a period. This is later used in grant_plan calculations.
+ */
+static inline int ldlm_pool_t2gsp(unsigned int t)
+{
+ /*
+ * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
+ * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
+ *
+ * How this will affect execution is the following:
+ *
+ * - for thread period 1s we will have grant_step 1% which good from
+ * pov of taking some load off from server and push it out to clients.
+ * This is like that because 1% for grant_step means that server will
+ * not allow clients to get lots of locks in short period of time and
+ * keep all old locks in their caches. Clients will always have to
+ * get some locks back if they want to take some new;
+ *
+ * - for thread period 10s (which is default) we will have 23% which
+ * means that clients will have enough of room to take some new locks
+ * without getting some back. All locks from this 23% which were not
+ * taken by clients in current period will contribute in SLV growing.
+ * SLV growing means more locks cached on clients until limit or grant
+ * plan is reached.
+ */
+ return LDLM_POOL_MAX_GSP -
+ ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
+ (t >> LDLM_POOL_GSP_STEP_SHIFT));
+}
+
+/**
+ * Recalculates next grant limit on passed \a pl.
+ *
+ * \pre ->pl_lock is locked.
+ */
+static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
+{
+ int granted, grant_step, limit;
+
+ limit = ldlm_pool_get_limit(pl);
+ granted = atomic_read(&pl->pl_granted);
+
+ grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
+ grant_step = ((limit - granted) * grant_step) / 100;
+ pl->pl_grant_plan = granted + grant_step;
+ limit = (limit * 5) >> 2;
+ if (pl->pl_grant_plan > limit)
+ pl->pl_grant_plan = limit;
+}
+
+/**
+ * Recalculates next SLV on passed \a pl.
+ *
+ * \pre ->pl_lock is locked.
+ */
+static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
+{
+ int granted;
+ int grant_plan;
+ int round_up;
+ __u64 slv;
+ __u64 slv_factor;
+ __u64 grant_usage;
+ __u32 limit;
+
+ slv = pl->pl_server_lock_volume;
+ grant_plan = pl->pl_grant_plan;
+ limit = ldlm_pool_get_limit(pl);
+ granted = atomic_read(&pl->pl_granted);
+ round_up = granted < limit;
+
+ grant_usage = max_t(int, limit - (granted - grant_plan), 1);
+
+ /*
+ * Find out SLV change factor which is the ratio of grant usage
+ * from limit. SLV changes as fast as the ratio of grant plan
+ * consumption. The more locks from grant plan are not consumed
+ * by clients in last interval (idle time), the faster grows
+ * SLV. And the opposite, the more grant plan is over-consumed
+ * (load time) the faster drops SLV.
+ */
+ slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
+ do_div(slv_factor, limit);
+ slv = slv * slv_factor;
+ slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
+
+ if (slv > ldlm_pool_slv_max(limit)) {
+ slv = ldlm_pool_slv_max(limit);
+ } else if (slv < ldlm_pool_slv_min(limit)) {
+ slv = ldlm_pool_slv_min(limit);
+ }
+
+ pl->pl_server_lock_volume = slv;
+}
+
+/**
+ * Recalculates next stats on passed \a pl.
+ *
+ * \pre ->pl_lock is locked.
+ */
+static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
+{
+ int grant_plan = pl->pl_grant_plan;
+ __u64 slv = pl->pl_server_lock_volume;
+ int granted = atomic_read(&pl->pl_granted);
+ int grant_rate = atomic_read(&pl->pl_grant_rate);
+ int cancel_rate = atomic_read(&pl->pl_cancel_rate);
+
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
+ slv);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
+ granted);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
+ grant_rate);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
+ grant_plan);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
+ cancel_rate);
+}
+
+/**
+ * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
+ */
+static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
+{
+ struct obd_device *obd;
+
+ /*
+ * Set new SLV in obd field for using it later without accessing the
+ * pool. This is required to avoid race between sending reply to client
+ * with new SLV and cleanup server stack in which we can't guarantee
+ * that namespace is still alive. We know only that obd is alive as
+ * long as valid export is alive.
+ */
+ obd = ldlm_pl2ns(pl)->ns_obd;
+ LASSERT(obd != NULL);
+ write_lock(&obd->obd_pool_lock);
+ obd->obd_pool_slv = pl->pl_server_lock_volume;
+ write_unlock(&obd->obd_pool_lock);
+}
+
+/**
+ * Recalculates all pool fields on passed \a pl.
+ *
+ * \pre ->pl_lock is not locked.
+ */
+static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
+{
+ time_t recalc_interval_sec;
+
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period)
+ return 0;
+
+ spin_lock(&pl->pl_lock);
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period) {
+ spin_unlock(&pl->pl_lock);
+ return 0;
+ }
+ /*
+ * Recalc SLV after last period. This should be done
+ * _before_ recalculating new grant plan.
+ */
+ ldlm_pool_recalc_slv(pl);
+
+ /*
+ * Make sure that pool informed obd of last SLV changes.
+ */
+ ldlm_srv_pool_push_slv(pl);
+
+ /*
+ * Update grant_plan for new period.
+ */
+ ldlm_pool_recalc_grant_plan(pl);
+
+ pl->pl_recalc_time = cfs_time_current_sec();
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ recalc_interval_sec);
+ spin_unlock(&pl->pl_lock);
+ return 0;
+}
+
+/**
+ * This function is used on server side as main entry point for memory
+ * pressure handling. It decreases SLV on \a pl according to passed
+ * \a nr and \a gfp_mask.
+ *
+ * Our goal here is to decrease SLV such a way that clients hold \a nr
+ * locks smaller in next 10h.
+ */
+static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
+ int nr, unsigned int gfp_mask)
+{
+ __u32 limit;
+
+ /*
+ * VM is asking how many entries may be potentially freed.
+ */
+ if (nr == 0)
+ return atomic_read(&pl->pl_granted);
+
+ /*
+ * Client already canceled locks but server is already in shrinker
+ * and can't cancel anything. Let's catch this race.
+ */
+ if (atomic_read(&pl->pl_granted) == 0)
+ return 0;
+
+ spin_lock(&pl->pl_lock);
+
+ /*
+ * We want shrinker to possibly cause cancellation of @nr locks from
+ * clients or grant approximately @nr locks smaller next intervals.
+ *
+ * This is why we decreased SLV by @nr. This effect will only be as
+ * long as one re-calc interval (1s these days) and this should be
+ * enough to pass this decreased SLV to all clients. On next recalc
+ * interval pool will either increase SLV if locks load is not high
+ * or will keep on same level or even decrease again, thus, shrinker
+ * decreased SLV will affect next recalc intervals and this way will
+ * make locking load lower.
+ */
+ if (nr < pl->pl_server_lock_volume) {
+ pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
+ } else {
+ limit = ldlm_pool_get_limit(pl);
+ pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
+ }
+
+ /*
+ * Make sure that pool informed obd of last SLV changes.
+ */
+ ldlm_srv_pool_push_slv(pl);
+ spin_unlock(&pl->pl_lock);
+
+ /*
+ * We did not really free any memory here so far, it only will be
+ * freed later may be, so that we return 0 to not confuse VM.
+ */
+ return 0;
+}
+
+/**
+ * Setup server side pool \a pl with passed \a limit.
+ */
+static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
+{
+ struct obd_device *obd;
+
+ obd = ldlm_pl2ns(pl)->ns_obd;
+ LASSERT(obd != NULL && obd != LP_POISON);
+ LASSERT(obd->obd_type != LP_POISON);
+ write_lock(&obd->obd_pool_lock);
+ obd->obd_pool_limit = limit;
+ write_unlock(&obd->obd_pool_lock);
+
+ ldlm_pool_set_limit(pl, limit);
+ return 0;
+}
+
+/**
+ * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
+ */
+static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
+{
+ struct obd_device *obd;
+
+ /*
+ * Get new SLV and Limit from obd which is updated with coming
+ * RPCs.
+ */
+ obd = ldlm_pl2ns(pl)->ns_obd;
+ LASSERT(obd != NULL);
+ read_lock(&obd->obd_pool_lock);
+ pl->pl_server_lock_volume = obd->obd_pool_slv;
+ ldlm_pool_set_limit(pl, obd->obd_pool_limit);
+ read_unlock(&obd->obd_pool_lock);
+}
+
+/**
+ * Recalculates client size pool \a pl according to current SLV and Limit.
+ */
+static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
+{
+ time_t recalc_interval_sec;
+
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period)
+ return 0;
+
+ spin_lock(&pl->pl_lock);
+ /*
+ * Check if we need to recalc lists now.
+ */
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period) {
+ spin_unlock(&pl->pl_lock);
+ return 0;
+ }
+
+ /*
+ * Make sure that pool knows last SLV and Limit from obd.
+ */
+ ldlm_cli_pool_pop_slv(pl);
+
+ pl->pl_recalc_time = cfs_time_current_sec();
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ recalc_interval_sec);
+ spin_unlock(&pl->pl_lock);
+
+ /*
+ * Do not cancel locks in case lru resize is disabled for this ns.
+ */
+ if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
+ return 0;
+
+ /*
+ * In the time of canceling locks on client we do not need to maintain
+ * sharp timing, we only want to cancel locks asap according to new SLV.
+ * It may be called when SLV has changed much, this is why we do not
+ * take into account pl->pl_recalc_time here.
+ */
+ return ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
+}
+
+/**
+ * This function is main entry point for memory pressure handling on client
+ * side. Main goal of this function is to cancel some number of locks on
+ * passed \a pl according to \a nr and \a gfp_mask.
+ */
+static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
+ int nr, unsigned int gfp_mask)
+{
+ struct ldlm_namespace *ns;
+ int unused;
+
+ ns = ldlm_pl2ns(pl);
+
+ /*
+ * Do not cancel locks in case lru resize is disabled for this ns.
+ */
+ if (!ns_connect_lru_resize(ns))
+ return 0;
+
+ /*
+ * Make sure that pool knows last SLV and Limit from obd.
+ */
+ ldlm_cli_pool_pop_slv(pl);
+
+ spin_lock(&ns->ns_lock);
+ unused = ns->ns_nr_unused;
+ spin_unlock(&ns->ns_lock);
+
+ if (nr == 0)
+ return (unused / 100) * sysctl_vfs_cache_pressure;
+ else
+ return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
+}
+
+struct ldlm_pool_ops ldlm_srv_pool_ops = {
+ .po_recalc = ldlm_srv_pool_recalc,
+ .po_shrink = ldlm_srv_pool_shrink,
+ .po_setup = ldlm_srv_pool_setup
+};
+
+struct ldlm_pool_ops ldlm_cli_pool_ops = {
+ .po_recalc = ldlm_cli_pool_recalc,
+ .po_shrink = ldlm_cli_pool_shrink
+};
+
+/**
+ * Pool recalc wrapper. Will call either client or server pool recalc callback
+ * depending what pool \a pl is used.
+ */
+int ldlm_pool_recalc(struct ldlm_pool *pl)
+{
+ time_t recalc_interval_sec;
+ int count;
+
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec <= 0)
+ goto recalc;
+
+ spin_lock(&pl->pl_lock);
+ if (recalc_interval_sec > 0) {
+ /*
+ * Update pool statistics every 1s.
+ */
+ ldlm_pool_recalc_stats(pl);
+
+ /*
+ * Zero out all rates and speed for the last period.
+ */
+ atomic_set(&pl->pl_grant_rate, 0);
+ atomic_set(&pl->pl_cancel_rate, 0);
+ }
+ spin_unlock(&pl->pl_lock);
+
+ recalc:
+ if (pl->pl_ops->po_recalc != NULL) {
+ count = pl->pl_ops->po_recalc(pl);
+ lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
+ count);
+ }
+ recalc_interval_sec = pl->pl_recalc_time - cfs_time_current_sec() +
+ pl->pl_recalc_period;
+
+ return recalc_interval_sec;
+}
+
+/*
+ * Pool shrink wrapper. Will call either client or server pool recalc callback
+ * depending what pool pl is used. When nr == 0, just return the number of
+ * freeable locks. Otherwise, return the number of canceled locks.
+ */
+int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
+ unsigned int gfp_mask)
+{
+ int cancel = 0;
+
+ if (pl->pl_ops->po_shrink != NULL) {
+ cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
+ if (nr > 0) {
+ lprocfs_counter_add(pl->pl_stats,
+ LDLM_POOL_SHRINK_REQTD_STAT,
+ nr);
+ lprocfs_counter_add(pl->pl_stats,
+ LDLM_POOL_SHRINK_FREED_STAT,
+ cancel);
+ CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
+ "shrunk %d\n", pl->pl_name, nr, cancel);
+ }
+ }
+ return cancel;
+}
+EXPORT_SYMBOL(ldlm_pool_shrink);
+
+/**
+ * Pool setup wrapper. Will call either client or server pool recalc callback
+ * depending what pool \a pl is used.
+ *
+ * Sets passed \a limit into pool \a pl.
+ */
+int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
+{
+ if (pl->pl_ops->po_setup != NULL)
+ return(pl->pl_ops->po_setup(pl, limit));
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_pool_setup);
+
+static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
+{
+ int granted, grant_rate, cancel_rate, grant_step;
+ int grant_speed, grant_plan, lvf;
+ struct ldlm_pool *pl = m->private;
+ __u64 slv, clv;
+ __u32 limit;
+
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_server_lock_volume;
+ clv = pl->pl_client_lock_volume;
+ limit = ldlm_pool_get_limit(pl);
+ grant_plan = pl->pl_grant_plan;
+ granted = atomic_read(&pl->pl_granted);
+ grant_rate = atomic_read(&pl->pl_grant_rate);
+ cancel_rate = atomic_read(&pl->pl_cancel_rate);
+ grant_speed = grant_rate - cancel_rate;
+ lvf = atomic_read(&pl->pl_lock_volume_factor);
+ grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
+ spin_unlock(&pl->pl_lock);
+
+ seq_printf(m, "LDLM pool state (%s):\n"
+ " SLV: "LPU64"\n"
+ " CLV: "LPU64"\n"
+ " LVF: %d\n",
+ pl->pl_name, slv, clv, lvf);
+
+ if (ns_is_server(ldlm_pl2ns(pl))) {
+ seq_printf(m, " GSP: %d%%\n"
+ " GP: %d\n",
+ grant_step, grant_plan);
+ }
+ seq_printf(m, " GR: %d\n" " CR: %d\n" " GS: %d\n"
+ " G: %d\n" " L: %d\n",
+ grant_rate, cancel_rate, grant_speed,
+ granted, limit);
+
+ return 0;
+}
+LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
+
+static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
+{
+ struct ldlm_pool *pl = m->private;
+ int grant_speed;
+
+ spin_lock(&pl->pl_lock);
+ /* serialize with ldlm_pool_recalc */
+ grant_speed = atomic_read(&pl->pl_grant_rate) -
+ atomic_read(&pl->pl_cancel_rate);
+ spin_unlock(&pl->pl_lock);
+ return lprocfs_rd_uint(m, &grant_speed);
+}
+
+LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int);
+LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
+
+LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
+LDLM_POOL_PROC_WRITER(recalc_period, int);
+static ssize_t lprocfs_recalc_period_seq_write(struct file *file, const char *buf,
+ size_t len, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+
+ return lprocfs_wr_recalc_period(file, buf, len, seq->private);
+}
+LPROC_SEQ_FOPS(lprocfs_recalc_period);
+
+LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64);
+LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic);
+LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic);
+
+LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
+
+#define LDLM_POOL_ADD_VAR(name, var, ops) \
+ do { \
+ snprintf(var_name, MAX_STRING_SIZE, #name); \
+ pool_vars[0].data = var; \
+ pool_vars[0].fops = ops; \
+ lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);\
+ } while (0)
+
+static int ldlm_pool_proc_init(struct ldlm_pool *pl)
+{
+ struct ldlm_namespace *ns = ldlm_pl2ns(pl);
+ struct proc_dir_entry *parent_ns_proc;
+ struct lprocfs_vars pool_vars[2];
+ char *var_name = NULL;
+ int rc = 0;
+
+ OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
+ if (!var_name)
+ return -ENOMEM;
+
+ parent_ns_proc = ns->ns_proc_dir_entry;
+ if (parent_ns_proc == NULL) {
+ CERROR("%s: proc entry is not initialized\n",
+ ldlm_ns_name(ns));
+ GOTO(out_free_name, rc = -EINVAL);
+ }
+ pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
+ NULL, NULL);
+ if (IS_ERR(pl->pl_proc_dir)) {
+ CERROR("LProcFS failed in ldlm-pool-init\n");
+ rc = PTR_ERR(pl->pl_proc_dir);
+ pl->pl_proc_dir = NULL;
+ GOTO(out_free_name, rc);
+ }
+
+ var_name[MAX_STRING_SIZE] = '\0';
+ memset(pool_vars, 0, sizeof(pool_vars));
+ pool_vars[0].name = var_name;
+
+ LDLM_POOL_ADD_VAR("server_lock_volume", &pl->pl_server_lock_volume,
+ &ldlm_pool_u64_fops);
+ LDLM_POOL_ADD_VAR("limit", &pl->pl_limit, &ldlm_pool_rw_atomic_fops);
+ LDLM_POOL_ADD_VAR("granted", &pl->pl_granted, &ldlm_pool_atomic_fops);
+ LDLM_POOL_ADD_VAR("grant_speed", pl, &lprocfs_grant_speed_fops);
+ LDLM_POOL_ADD_VAR("cancel_rate", &pl->pl_cancel_rate,
+ &ldlm_pool_atomic_fops);
+ LDLM_POOL_ADD_VAR("grant_rate", &pl->pl_grant_rate,
+ &ldlm_pool_atomic_fops);
+ LDLM_POOL_ADD_VAR("grant_plan", pl, &lprocfs_grant_plan_fops);
+ LDLM_POOL_ADD_VAR("recalc_period", pl, &lprocfs_recalc_period_fops);
+ LDLM_POOL_ADD_VAR("lock_volume_factor", &pl->pl_lock_volume_factor,
+ &ldlm_pool_rw_atomic_fops);
+ LDLM_POOL_ADD_VAR("state", pl, &lprocfs_pool_state_fops);
+
+ pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
+ LDLM_POOL_FIRST_STAT, 0);
+ if (!pl->pl_stats)
+ GOTO(out_free_name, rc = -ENOMEM);
+
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "granted", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "grant", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "cancel", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "grant_rate", "locks/s");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "cancel_rate", "locks/s");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "grant_plan", "locks/s");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "slv", "slv");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "shrink_request", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "shrink_freed", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "recalc_freed", "locks");
+ lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
+ LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
+ "recalc_timing", "sec");
+ rc = lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
+
+out_free_name:
+ OBD_FREE(var_name, MAX_STRING_SIZE + 1);
+ return rc;
+}
+
+static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
+{
+ if (pl->pl_stats != NULL) {
+ lprocfs_free_stats(&pl->pl_stats);
+ pl->pl_stats = NULL;
+ }
+ if (pl->pl_proc_dir != NULL) {
+ lprocfs_remove(&pl->pl_proc_dir);
+ pl->pl_proc_dir = NULL;
+ }
+}
+
+int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
+ int idx, ldlm_side_t client)
+{
+ int rc;
+
+ spin_lock_init(&pl->pl_lock);
+ atomic_set(&pl->pl_granted, 0);
+ pl->pl_recalc_time = cfs_time_current_sec();
+ atomic_set(&pl->pl_lock_volume_factor, 1);
+
+ atomic_set(&pl->pl_grant_rate, 0);
+ atomic_set(&pl->pl_cancel_rate, 0);
+ pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
+
+ snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
+ ldlm_ns_name(ns), idx);
+
+ if (client == LDLM_NAMESPACE_SERVER) {
+ pl->pl_ops = &ldlm_srv_pool_ops;
+ ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
+ pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
+ pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
+ } else {
+ ldlm_pool_set_limit(pl, 1);
+ pl->pl_server_lock_volume = 0;
+ pl->pl_ops = &ldlm_cli_pool_ops;
+ pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
+ }
+ pl->pl_client_lock_volume = 0;
+ rc = ldlm_pool_proc_init(pl);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
+
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_pool_init);
+
+void ldlm_pool_fini(struct ldlm_pool *pl)
+{
+ ldlm_pool_proc_fini(pl);
+
+ /*
+ * Pool should not be used after this point. We can't free it here as
+ * it lives in struct ldlm_namespace, but still interested in catching
+ * any abnormal using cases.
+ */
+ POISON(pl, 0x5a, sizeof(*pl));
+}
+EXPORT_SYMBOL(ldlm_pool_fini);
+
+/**
+ * Add new taken ldlm lock \a lock into pool \a pl accounting.
+ */
+void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
+{
+ /*
+ * FLOCK locks are special in a sense that they are almost never
+ * cancelled, instead special kind of lock is used to drop them.
+ * also there is no LRU for flock locks, so no point in tracking
+ * them anyway.
+ */
+ if (lock->l_resource->lr_type == LDLM_FLOCK)
+ return;
+
+ atomic_inc(&pl->pl_granted);
+ atomic_inc(&pl->pl_grant_rate);
+ lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
+ /*
+ * Do not do pool recalc for client side as all locks which
+ * potentially may be canceled has already been packed into
+ * enqueue/cancel rpc. Also we do not want to run out of stack
+ * with too long call paths.
+ */
+ if (ns_is_server(ldlm_pl2ns(pl)))
+ ldlm_pool_recalc(pl);
+}
+EXPORT_SYMBOL(ldlm_pool_add);
+
+/**
+ * Remove ldlm lock \a lock from pool \a pl accounting.
+ */
+void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
+{
+ /*
+ * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
+ */
+ if (lock->l_resource->lr_type == LDLM_FLOCK)
+ return;
+
+ LASSERT(atomic_read(&pl->pl_granted) > 0);
+ atomic_dec(&pl->pl_granted);
+ atomic_inc(&pl->pl_cancel_rate);
+
+ lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
+
+ if (ns_is_server(ldlm_pl2ns(pl)))
+ ldlm_pool_recalc(pl);
+}
+EXPORT_SYMBOL(ldlm_pool_del);
+
+/**
+ * Returns current \a pl SLV.
+ *
+ * \pre ->pl_lock is not locked.
+ */
+__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
+{
+ __u64 slv;
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_server_lock_volume;
+ spin_unlock(&pl->pl_lock);
+ return slv;
+}
+EXPORT_SYMBOL(ldlm_pool_get_slv);
+
+/**
+ * Sets passed \a slv to \a pl.
+ *
+ * \pre ->pl_lock is not locked.
+ */
+void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
+{
+ spin_lock(&pl->pl_lock);
+ pl->pl_server_lock_volume = slv;
+ spin_unlock(&pl->pl_lock);
+}
+EXPORT_SYMBOL(ldlm_pool_set_slv);
+
+/**
+ * Returns current \a pl CLV.
+ *
+ * \pre ->pl_lock is not locked.
+ */
+__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
+{
+ __u64 slv;
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_client_lock_volume;
+ spin_unlock(&pl->pl_lock);
+ return slv;
+}
+EXPORT_SYMBOL(ldlm_pool_get_clv);
+
+/**
+ * Sets passed \a clv to \a pl.
+ *
+ * \pre ->pl_lock is not locked.
+ */
+void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
+{
+ spin_lock(&pl->pl_lock);
+ pl->pl_client_lock_volume = clv;
+ spin_unlock(&pl->pl_lock);
+}
+EXPORT_SYMBOL(ldlm_pool_set_clv);
+
+/**
+ * Returns current \a pl limit.
+ */
+__u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
+{
+ return atomic_read(&pl->pl_limit);
+}
+EXPORT_SYMBOL(ldlm_pool_get_limit);
+
+/**
+ * Sets passed \a limit to \a pl.
+ */
+void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
+{
+ atomic_set(&pl->pl_limit, limit);
+}
+EXPORT_SYMBOL(ldlm_pool_set_limit);
+
+/**
+ * Returns current LVF from \a pl.
+ */
+__u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
+{
+ return atomic_read(&pl->pl_lock_volume_factor);
+}
+EXPORT_SYMBOL(ldlm_pool_get_lvf);
+
+static int ldlm_pool_granted(struct ldlm_pool *pl)
+{
+ return atomic_read(&pl->pl_granted);
+}
+
+static struct ptlrpc_thread *ldlm_pools_thread;
+static struct completion ldlm_pools_comp;
+
+/*
+ * count locks from all namespaces (if possible). Returns number of
+ * cached locks.
+ */
+static unsigned long ldlm_pools_count(ldlm_side_t client, unsigned int gfp_mask)
+{
+ int total = 0, nr_ns;
+ struct ldlm_namespace *ns;
+ struct ldlm_namespace *ns_old = NULL; /* loop detection */
+ void *cookie;
+
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return 0;
+
+ CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
+ client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
+
+ cookie = cl_env_reenter();
+
+ /*
+ * Find out how many resources we may release.
+ */
+ for (nr_ns = ldlm_namespace_nr_read(client);
+ nr_ns > 0; nr_ns--) {
+ mutex_lock(ldlm_namespace_lock(client));
+ if (list_empty(ldlm_namespace_list(client))) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ cl_env_reexit(cookie);
+ return 0;
+ }
+ ns = ldlm_namespace_first_locked(client);
+
+ if (ns == ns_old) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+
+ if (ldlm_ns_empty(ns)) {
+ ldlm_namespace_move_to_inactive_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ continue;
+ }
+
+ if (ns_old == NULL)
+ ns_old = ns;
+
+ ldlm_namespace_get(ns);
+ ldlm_namespace_move_to_active_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
+ ldlm_namespace_put(ns);
+ }
+
+ cl_env_reexit(cookie);
+ return total;
+}
+
+static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, unsigned int gfp_mask)
+{
+ unsigned long freed = 0;
+ int tmp, nr_ns;
+ struct ldlm_namespace *ns;
+ void *cookie;
+
+ if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
+ return -1;
+
+ cookie = cl_env_reenter();
+
+ /*
+ * Shrink at least ldlm_namespace_nr_read(client) namespaces.
+ */
+ for (tmp = nr_ns = ldlm_namespace_nr_read(client);
+ tmp > 0; tmp--) {
+ int cancel, nr_locks;
+
+ /*
+ * Do not call shrink under ldlm_namespace_lock(client)
+ */
+ mutex_lock(ldlm_namespace_lock(client));
+ if (list_empty(ldlm_namespace_list(client))) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+ ns = ldlm_namespace_first_locked(client);
+ ldlm_namespace_get(ns);
+ ldlm_namespace_move_to_active_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+
+ nr_locks = ldlm_pool_granted(&ns->ns_pool);
+ /*
+ * We use to shrink propotionally but with new shrinker API,
+ * we lost the total number of freeable locks.
+ */
+ cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
+ freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
+ ldlm_namespace_put(ns);
+ }
+ cl_env_reexit(cookie);
+ /*
+ * we only decrease the SLV in server pools shrinker, return
+ * SHRINK_STOP to kernel to avoid needless loop. LU-1128
+ */
+ return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
+}
+
+static unsigned long ldlm_pools_srv_count(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
+}
+
+static unsigned long ldlm_pools_srv_scan(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
+ sc->gfp_mask);
+}
+
+static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
+}
+
+static unsigned long ldlm_pools_cli_scan(struct shrinker *s, struct shrink_control *sc)
+{
+ return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
+ sc->gfp_mask);
+}
+
+int ldlm_pools_recalc(ldlm_side_t client)
+{
+ __u32 nr_l = 0, nr_p = 0, l;
+ struct ldlm_namespace *ns;
+ struct ldlm_namespace *ns_old = NULL;
+ int nr, equal = 0;
+ int time = 50; /* seconds of sleep if no active namespaces */
+
+ /*
+ * No need to setup pool limit for client pools.
+ */
+ if (client == LDLM_NAMESPACE_SERVER) {
+ /*
+ * Check all modest namespaces first.
+ */
+ mutex_lock(ldlm_namespace_lock(client));
+ list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
+ {
+ if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
+ continue;
+
+ l = ldlm_pool_granted(&ns->ns_pool);
+ if (l == 0)
+ l = 1;
+
+ /*
+ * Set the modest pools limit equal to their avg granted
+ * locks + ~6%.
+ */
+ l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
+ ldlm_pool_setup(&ns->ns_pool, l);
+ nr_l += l;
+ nr_p++;
+ }
+
+ /*
+ * Make sure that modest namespaces did not eat more that 2/3
+ * of limit.
+ */
+ if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
+ CWARN("\"Modest\" pools eat out 2/3 of server locks "
+ "limit (%d of %lu). This means that you have too "
+ "many clients for this amount of server RAM. "
+ "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
+ equal = 1;
+ }
+
+ /*
+ * The rest is given to greedy namespaces.
+ */
+ list_for_each_entry(ns, ldlm_namespace_list(client),
+ ns_list_chain)
+ {
+ if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
+ continue;
+
+ if (equal) {
+ /*
+ * In the case 2/3 locks are eaten out by
+ * modest pools, we re-setup equal limit
+ * for _all_ pools.
+ */
+ l = LDLM_POOL_HOST_L /
+ ldlm_namespace_nr_read(client);
+ } else {
+ /*
+ * All the rest of greedy pools will have
+ * all locks in equal parts.
+ */
+ l = (LDLM_POOL_HOST_L - nr_l) /
+ (ldlm_namespace_nr_read(client) -
+ nr_p);
+ }
+ ldlm_pool_setup(&ns->ns_pool, l);
+ }
+ mutex_unlock(ldlm_namespace_lock(client));
+ }
+
+ /*
+ * Recalc at least ldlm_namespace_nr_read(client) namespaces.
+ */
+ for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
+ int skip;
+ /*
+ * Lock the list, get first @ns in the list, getref, move it
+ * to the tail, unlock and call pool recalc. This way we avoid
+ * calling recalc under @ns lock what is really good as we get
+ * rid of potential deadlock on client nodes when canceling
+ * locks synchronously.
+ */
+ mutex_lock(ldlm_namespace_lock(client));
+ if (list_empty(ldlm_namespace_list(client))) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+ ns = ldlm_namespace_first_locked(client);
+
+ if (ns_old == ns) { /* Full pass complete */
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+
+ /* We got an empty namespace, need to move it back to inactive
+ * list.
+ * The race with parallel resource creation is fine:
+ * - If they do namespace_get before our check, we fail the
+ * check and they move this item to the end of the list anyway
+ * - If we do the check and then they do namespace_get, then
+ * we move the namespace to inactive and they will move
+ * it back to active (synchronised by the lock, so no clash
+ * there).
+ */
+ if (ldlm_ns_empty(ns)) {
+ ldlm_namespace_move_to_inactive_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ continue;
+ }
+
+ if (ns_old == NULL)
+ ns_old = ns;
+
+ spin_lock(&ns->ns_lock);
+ /*
+ * skip ns which is being freed, and we don't want to increase
+ * its refcount again, not even temporarily. bz21519 & LU-499.
+ */
+ if (ns->ns_stopping) {
+ skip = 1;
+ } else {
+ skip = 0;
+ ldlm_namespace_get(ns);
+ }
+ spin_unlock(&ns->ns_lock);
+
+ ldlm_namespace_move_to_active_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+
+ /*
+ * After setup is done - recalc the pool.
+ */
+ if (!skip) {
+ int ttime = ldlm_pool_recalc(&ns->ns_pool);
+
+ if (ttime < time)
+ time = ttime;
+
+ ldlm_namespace_put(ns);
+ }
+ }
+ return time;
+}
+EXPORT_SYMBOL(ldlm_pools_recalc);
+
+static int ldlm_pools_thread_main(void *arg)
+{
+ struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
+ int s_time, c_time;
+
+ thread_set_flags(thread, SVC_RUNNING);
+ wake_up(&thread->t_ctl_waitq);
+
+ CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
+ "ldlm_poold", current_pid());
+
+ while (1) {
+ struct l_wait_info lwi;
+
+ /*
+ * Recal all pools on this tick.
+ */
+ s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
+ c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
+
+ /*
+ * Wait until the next check time, or until we're
+ * stopped.
+ */
+ lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)),
+ NULL, NULL);
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopping(thread) ||
+ thread_is_event(thread),
+ &lwi);
+
+ if (thread_test_and_clear_flags(thread, SVC_STOPPING))
+ break;
+ else
+ thread_test_and_clear_flags(thread, SVC_EVENT);
+ }
+
+ thread_set_flags(thread, SVC_STOPPED);
+ wake_up(&thread->t_ctl_waitq);
+
+ CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
+ "ldlm_poold", current_pid());
+
+ complete_and_exit(&ldlm_pools_comp, 0);
+}
+
+static int ldlm_pools_thread_start(void)
+{
+ struct l_wait_info lwi = { 0 };
+ struct task_struct *task;
+
+ if (ldlm_pools_thread != NULL)
+ return -EALREADY;
+
+ OBD_ALLOC_PTR(ldlm_pools_thread);
+ if (ldlm_pools_thread == NULL)
+ return -ENOMEM;
+
+ init_completion(&ldlm_pools_comp);
+ init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
+
+ task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
+ "ldlm_poold");
+ if (IS_ERR(task)) {
+ CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
+ OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
+ ldlm_pools_thread = NULL;
+ return PTR_ERR(task);
+ }
+ l_wait_event(ldlm_pools_thread->t_ctl_waitq,
+ thread_is_running(ldlm_pools_thread), &lwi);
+ return 0;
+}
+
+static void ldlm_pools_thread_stop(void)
+{
+ if (ldlm_pools_thread == NULL) {
+ return;
+ }
+
+ thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
+ wake_up(&ldlm_pools_thread->t_ctl_waitq);
+
+ /*
+ * Make sure that pools thread is finished before freeing @thread.
+ * This fixes possible race and oops due to accessing freed memory
+ * in pools thread.
+ */
+ wait_for_completion(&ldlm_pools_comp);
+ OBD_FREE_PTR(ldlm_pools_thread);
+ ldlm_pools_thread = NULL;
+}
+
+static struct shrinker ldlm_pools_srv_shrinker = {
+ .count_objects = ldlm_pools_srv_count,
+ .scan_objects = ldlm_pools_srv_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+static struct shrinker ldlm_pools_cli_shrinker = {
+ .count_objects = ldlm_pools_cli_count,
+ .scan_objects = ldlm_pools_cli_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+int ldlm_pools_init(void)
+{
+ int rc;
+
+ rc = ldlm_pools_thread_start();
+ if (rc == 0) {
+ register_shrinker(&ldlm_pools_srv_shrinker);
+ register_shrinker(&ldlm_pools_cli_shrinker);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_pools_init);
+
+void ldlm_pools_fini(void)
+{
+ unregister_shrinker(&ldlm_pools_srv_shrinker);
+ unregister_shrinker(&ldlm_pools_cli_shrinker);
+ ldlm_pools_thread_stop();
+}
+EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
new file mode 100644
index 0000000..21cb523
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -0,0 +1,2298 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+/**
+ * This file contains Asynchronous System Trap (AST) handlers and related
+ * LDLM request-processing routines.
+ *
+ * An AST is a callback issued on a lock when its state is changed. There are
+ * several different types of ASTs (callbacks) registered for each lock:
+ *
+ * - completion AST: when a lock is enqueued by some process, but cannot be
+ * granted immediately due to other conflicting locks on the same resource,
+ * the completion AST is sent to notify the caller when the lock is
+ * eventually granted
+ *
+ * - blocking AST: when a lock is granted to some process, if another process
+ * enqueues a conflicting (blocking) lock on a resource, a blocking AST is
+ * sent to notify the holder(s) of the lock(s) of the conflicting lock
+ * request. The lock holder(s) must release their lock(s) on that resource in
+ * a timely manner or be evicted by the server.
+ *
+ * - glimpse AST: this is used when a process wants information about a lock
+ * (i.e. the lock value block (LVB)) but does not necessarily require holding
+ * the lock. If the resource is locked, the lock holder(s) are sent glimpse
+ * ASTs and the LVB is returned to the caller, and lock holder(s) may CANCEL
+ * their lock(s) if they are idle. If the resource is not locked, the server
+ * may grant the lock.
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+
+#include <lustre_dlm.h>
+#include <obd_class.h>
+#include <obd.h>
+
+#include "ldlm_internal.h"
+
+int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
+CFS_MODULE_PARM(ldlm_enqueue_min, "i", int, 0644,
+ "lock enqueue timeout minimum");
+
+/* in client side, whether the cached locks will be canceled before replay */
+unsigned int ldlm_cancel_unused_locks_before_replay = 1;
+
+static void interrupted_completion_wait(void *data)
+{
+}
+
+struct lock_wait_data {
+ struct ldlm_lock *lwd_lock;
+ __u32 lwd_conn_cnt;
+};
+
+struct ldlm_async_args {
+ struct lustre_handle lock_handle;
+};
+
+int ldlm_expired_completion_wait(void *data)
+{
+ struct lock_wait_data *lwd = data;
+ struct ldlm_lock *lock = lwd->lwd_lock;
+ struct obd_import *imp;
+ struct obd_device *obd;
+
+ if (lock->l_conn_export == NULL) {
+ static cfs_time_t next_dump = 0, last_dump = 0;
+
+ if (ptlrpc_check_suspend())
+ return 0;
+
+ LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
+ CFS_DURATION_T"s ago)\n",
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity));
+ LDLM_DEBUG(lock, "lock timed out (enqueued at "CFS_TIME_T", "
+ CFS_DURATION_T"s ago); not entering recovery in "
+ "server code, just going back to sleep",
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity));
+ if (cfs_time_after(cfs_time_current(), next_dump)) {
+ last_dump = next_dump;
+ next_dump = cfs_time_shift(300);
+ ldlm_namespace_dump(D_DLMTRACE,
+ ldlm_lock_to_ns(lock));
+ if (last_dump == 0)
+ libcfs_debug_dumplog();
+ }
+ return 0;
+ }
+
+ obd = lock->l_conn_export->exp_obd;
+ imp = obd->u.cli.cl_import;
+ ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
+ LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
+ CFS_DURATION_T"s ago), entering recovery for %s@%s",
+ lock->l_last_activity,
+ cfs_time_sub(cfs_time_current_sec(), lock->l_last_activity),
+ obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
+
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_expired_completion_wait);
+
+/* We use the same basis for both server side and client side functions
+ from a single node. */
+int ldlm_get_enq_timeout(struct ldlm_lock *lock)
+{
+ int timeout = at_get(ldlm_lock_to_ns_at(lock));
+ if (AT_OFF)
+ return obd_timeout / 2;
+ /* Since these are non-updating timeouts, we should be conservative.
+ It would be nice to have some kind of "early reply" mechanism for
+ lock callbacks too... */
+ timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
+ return max(timeout, ldlm_enqueue_min);
+}
+EXPORT_SYMBOL(ldlm_get_enq_timeout);
+
+/**
+ * Helper function for ldlm_completion_ast(), updating timings when lock is
+ * actually granted.
+ */
+static int ldlm_completion_tail(struct ldlm_lock *lock)
+{
+ long delay;
+ int result;
+
+ if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
+ LDLM_DEBUG(lock, "client-side enqueue: destroyed");
+ result = -EIO;
+ } else {
+ delay = cfs_time_sub(cfs_time_current_sec(),
+ lock->l_last_activity);
+ LDLM_DEBUG(lock, "client-side enqueue: granted after "
+ CFS_DURATION_T"s", delay);
+
+ /* Update our time estimate */
+ at_measured(ldlm_lock_to_ns_at(lock),
+ delay);
+ result = 0;
+ }
+ return result;
+}
+
+/**
+ * Implementation of ->l_completion_ast() for a client, that doesn't wait
+ * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
+ * other threads that cannot block for long.
+ */
+int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
+{
+ if (flags == LDLM_FL_WAIT_NOREPROC) {
+ LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
+ return 0;
+ }
+
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ wake_up(&lock->l_waitq);
+ return ldlm_completion_tail(lock);
+ }
+
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
+ "going forward");
+ ldlm_reprocess_all(lock->l_resource);
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_completion_ast_async);
+
+/**
+ * Generic LDLM "completion" AST. This is called in several cases:
+ *
+ * - when a reply to an ENQUEUE RPC is received from the server
+ * (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at
+ * this point (determined by flags);
+ *
+ * - when LDLM_CP_CALLBACK RPC comes to client to notify it that lock has
+ * been granted;
+ *
+ * - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
+ * gets correct lvb;
+ *
+ * - to force all locks when resource is destroyed (cleanup_resource());
+ *
+ * - during lock conversion (not used currently).
+ *
+ * If lock is not granted in the first case, this function waits until second
+ * or penultimate cases happen in some other thread.
+ *
+ */
+int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
+{
+ /* XXX ALLOCATE - 160 bytes */
+ struct lock_wait_data lwd;
+ struct obd_device *obd;
+ struct obd_import *imp = NULL;
+ struct l_wait_info lwi;
+ __u32 timeout;
+ int rc = 0;
+
+ if (flags == LDLM_FL_WAIT_NOREPROC) {
+ LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
+ goto noreproc;
+ }
+
+ if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_BLOCK_CONV))) {
+ wake_up(&lock->l_waitq);
+ return 0;
+ }
+
+ LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
+ "sleeping");
+
+noreproc:
+
+ obd = class_exp2obd(lock->l_conn_export);
+
+ /* if this is a local lock, then there is no import */
+ if (obd != NULL) {
+ imp = obd->u.cli.cl_import;
+ }
+
+ /* Wait a long time for enqueue - server may have to callback a
+ lock from another client. Server will evict the other client if it
+ doesn't respond reasonably, and then give us the lock. */
+ timeout = ldlm_get_enq_timeout(lock) * 2;
+
+ lwd.lwd_lock = lock;
+
+ if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
+ LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
+ lwi = LWI_INTR(interrupted_completion_wait, &lwd);
+ } else {
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
+ ldlm_expired_completion_wait,
+ interrupted_completion_wait, &lwd);
+ }
+
+ if (imp != NULL) {
+ spin_lock(&imp->imp_lock);
+ lwd.lwd_conn_cnt = imp->imp_conn_cnt;
+ spin_unlock(&imp->imp_lock);
+ }
+
+ if (ns_is_client(ldlm_lock_to_ns(lock)) &&
+ OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
+ OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
+ lock->l_flags |= LDLM_FL_FAIL_LOC;
+ rc = -EINTR;
+ } else {
+ /* Go to sleep until the lock is granted or cancelled. */
+ rc = l_wait_event(lock->l_waitq,
+ is_granted_or_cancelled(lock), &lwi);
+ }
+
+ if (rc) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
+ rc);
+ return rc;
+ }
+
+ return ldlm_completion_tail(lock);
+}
+EXPORT_SYMBOL(ldlm_completion_ast);
+
+/**
+ * A helper to build a blocking AST function
+ *
+ * Perform a common operation for blocking ASTs:
+ * defferred lock cancellation.
+ *
+ * \param lock the lock blocking or canceling AST was called on
+ * \retval 0
+ * \see mdt_blocking_ast
+ * \see ldlm_blocking_ast
+ */
+int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
+{
+ int do_ast;
+
+ lock->l_flags |= LDLM_FL_CBPENDING;
+ do_ast = (!lock->l_readers && !lock->l_writers);
+ unlock_res_and_lock(lock);
+
+ if (do_ast) {
+ struct lustre_handle lockh;
+ int rc;
+
+ LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
+ if (rc < 0)
+ CERROR("ldlm_cli_cancel: %d\n", rc);
+ } else {
+ LDLM_DEBUG(lock, "Lock still has references, will be "
+ "cancelled later");
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
+
+/**
+ * Server blocking AST
+ *
+ * ->l_blocking_ast() callback for LDLM locks acquired by server-side
+ * OBDs.
+ *
+ * \param lock the lock which blocks a request or cancelling lock
+ * \param desc unused
+ * \param data unused
+ * \param flag indicates whether this cancelling or blocking callback
+ * \retval 0
+ * \see ldlm_blocking_ast_nocheck
+ */
+int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
+{
+ if (flag == LDLM_CB_CANCELING) {
+ /* Don't need to do anything here. */
+ return 0;
+ }
+
+ lock_res_and_lock(lock);
+ /* Get this: if ldlm_blocking_ast is racing with intent_policy, such
+ * that ldlm_blocking_ast is called just before intent_policy method
+ * takes the lr_lock, then by the time we get the lock, we might not
+ * be the correct blocking function anymore. So check, and return
+ * early, if so. */
+ if (lock->l_blocking_ast != ldlm_blocking_ast) {
+ unlock_res_and_lock(lock);
+ return 0;
+ }
+ return ldlm_blocking_ast_nocheck(lock);
+}
+EXPORT_SYMBOL(ldlm_blocking_ast);
+
+/**
+ * ->l_glimpse_ast() for DLM extent locks acquired on the server-side. See
+ * comment in filter_intent_policy() on why you may need this.
+ */
+int ldlm_glimpse_ast(struct ldlm_lock *lock, void *reqp)
+{
+ /*
+ * Returning -ELDLM_NO_LOCK_DATA actually works, but the reason for
+ * that is rather subtle: with OST-side locking, it may so happen that
+ * _all_ extent locks are held by the OST. If client wants to obtain
+ * current file size it calls ll{,u}_glimpse_size(), and (as locks are
+ * on the server), dummy glimpse callback fires and does
+ * nothing. Client still receives correct file size due to the
+ * following fragment in filter_intent_policy():
+ *
+ * rc = l->l_glimpse_ast(l, NULL); // this will update the LVB
+ * if (rc != 0 && res->lr_namespace->ns_lvbo &&
+ * res->lr_namespace->ns_lvbo->lvbo_update) {
+ * res->lr_namespace->ns_lvbo->lvbo_update(res, NULL, 0, 1);
+ * }
+ *
+ * that is, after glimpse_ast() fails, filter_lvbo_update() runs, and
+ * returns correct file size to the client.
+ */
+ return -ELDLM_NO_LOCK_DATA;
+}
+EXPORT_SYMBOL(ldlm_glimpse_ast);
+
+/**
+ * Enqueue a local lock (typically on a server).
+ */
+int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
+ const struct ldlm_res_id *res_id,
+ ldlm_type_t type, ldlm_policy_data_t *policy,
+ ldlm_mode_t mode, __u64 *flags,
+ ldlm_blocking_callback blocking,
+ ldlm_completion_callback completion,
+ ldlm_glimpse_callback glimpse,
+ void *data, __u32 lvb_len, enum lvb_type lvb_type,
+ const __u64 *client_cookie,
+ struct lustre_handle *lockh)
+{
+ struct ldlm_lock *lock;
+ int err;
+ const struct ldlm_callback_suite cbs = { .lcs_completion = completion,
+ .lcs_blocking = blocking,
+ .lcs_glimpse = glimpse,
+ };
+
+ LASSERT(!(*flags & LDLM_FL_REPLAY));
+ if (unlikely(ns_is_client(ns))) {
+ CERROR("Trying to enqueue local lock in a shadow namespace\n");
+ LBUG();
+ }
+
+ lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len,
+ lvb_type);
+ if (unlikely(!lock))
+ GOTO(out_nolock, err = -ENOMEM);
+
+ ldlm_lock2handle(lock, lockh);
+
+ /* NB: we don't have any lock now (lock_res_and_lock)
+ * because it's a new lock */
+ ldlm_lock_addref_internal_nolock(lock, mode);
+ lock->l_flags |= LDLM_FL_LOCAL;
+ if (*flags & LDLM_FL_ATOMIC_CB)
+ lock->l_flags |= LDLM_FL_ATOMIC_CB;
+
+ if (policy != NULL)
+ lock->l_policy_data = *policy;
+ if (client_cookie != NULL)
+ lock->l_client_cookie = *client_cookie;
+ if (type == LDLM_EXTENT)
+ lock->l_req_extent = policy->l_extent;
+
+ err = ldlm_lock_enqueue(ns, &lock, policy, flags);
+ if (unlikely(err != ELDLM_OK))
+ GOTO(out, err);
+
+ if (policy != NULL)
+ *policy = lock->l_policy_data;
+
+ if (lock->l_completion_ast)
+ lock->l_completion_ast(lock, *flags, NULL);
+
+ LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
+ out:
+ LDLM_LOCK_RELEASE(lock);
+ out_nolock:
+ return err;
+}
+EXPORT_SYMBOL(ldlm_cli_enqueue_local);
+
+static void failed_lock_cleanup(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock, int mode)
+{
+ int need_cancel = 0;
+
+ /* Set a flag to prevent us from sending a CANCEL (bug 407) */
+ lock_res_and_lock(lock);
+ /* Check that lock is not granted or failed, we might race. */
+ if ((lock->l_req_mode != lock->l_granted_mode) &&
+ !(lock->l_flags & LDLM_FL_FAILED)) {
+ /* Make sure that this lock will not be found by raced
+ * bl_ast and -EINVAL reply is sent to server anyways.
+ * bug 17645 */
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
+ LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
+ need_cancel = 1;
+ }
+ unlock_res_and_lock(lock);
+
+ if (need_cancel)
+ LDLM_DEBUG(lock,
+ "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
+ "LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
+ else
+ LDLM_DEBUG(lock, "lock was granted or failed in race");
+
+ ldlm_lock_decref_internal(lock, mode);
+
+ /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
+ * from llite/file.c/ll_file_flock(). */
+ /* This code makes for the fact that we do not have blocking handler on
+ * a client for flock locks. As such this is the place where we must
+ * completely kill failed locks. (interrupted and those that
+ * were waiting to be granted when server evicted us. */
+ if (lock->l_resource->lr_type == LDLM_FLOCK) {
+ lock_res_and_lock(lock);
+ ldlm_resource_unlink_lock(lock);
+ ldlm_lock_destroy_nolock(lock);
+ unlock_res_and_lock(lock);
+ }
+}
+
+/**
+ * Finishing portion of client lock enqueue code.
+ *
+ * Called after receiving reply from server.
+ */
+int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
+ ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
+ __u64 *flags, void *lvb, __u32 lvb_len,
+ struct lustre_handle *lockh,int rc)
+{
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ int is_replay = *flags & LDLM_FL_REPLAY;
+ struct ldlm_lock *lock;
+ struct ldlm_reply *reply;
+ int cleanup_phase = 1;
+ int size = 0;
+
+ lock = ldlm_handle2lock(lockh);
+ /* ldlm_cli_enqueue is holding a reference on this lock. */
+ if (!lock) {
+ LASSERT(type == LDLM_FLOCK);
+ return -ENOLCK;
+ }
+
+ LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len),
+ "lvb_len = %d, l_lvb_len = %d\n", lvb_len, lock->l_lvb_len);
+
+ if (rc != ELDLM_OK) {
+ LASSERT(!is_replay);
+ LDLM_DEBUG(lock, "client-side enqueue END (%s)",
+ rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
+
+ if (rc != ELDLM_LOCK_ABORTED)
+ GOTO(cleanup, rc);
+ }
+
+ /* Before we return, swab the reply */
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+ if (reply == NULL)
+ GOTO(cleanup, rc = -EPROTO);
+
+ if (lvb_len != 0) {
+ LASSERT(lvb != NULL);
+
+ size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
+ RCL_SERVER);
+ if (size < 0) {
+ LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", size);
+ GOTO(cleanup, rc = size);
+ } else if (unlikely(size > lvb_len)) {
+ LDLM_ERROR(lock, "Replied LVB is larger than "
+ "expectation, expected = %d, replied = %d",
+ lvb_len, size);
+ GOTO(cleanup, rc = -EINVAL);
+ }
+ }
+
+ if (rc == ELDLM_LOCK_ABORTED) {
+ if (lvb_len != 0)
+ rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
+ lvb, size);
+ GOTO(cleanup, rc = (rc != 0 ? rc : ELDLM_LOCK_ABORTED));
+ }
+
+ /* lock enqueued on the server */
+ cleanup_phase = 0;
+
+ lock_res_and_lock(lock);
+ /* Key change rehash lock in per-export hash with new key */
+ if (exp->exp_lock_hash) {
+ /* In the function below, .hs_keycmp resolves to
+ * ldlm_export_lock_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ cfs_hash_rehash_key(exp->exp_lock_hash,
+ &lock->l_remote_handle,
+ &reply->lock_handle,
+ &lock->l_exp_hash);
+ } else {
+ lock->l_remote_handle = reply->lock_handle;
+ }
+
+ *flags = ldlm_flags_from_wire(reply->lock_flags);
+ lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
+ LDLM_INHERIT_FLAGS);
+ /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
+ * to wait with no timeout as well */
+ lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
+ LDLM_FL_NO_TIMEOUT);
+ unlock_res_and_lock(lock);
+
+ CDEBUG(D_INFO, "local: %p, remote cookie: "LPX64", flags: 0x%llx\n",
+ lock, reply->lock_handle.cookie, *flags);
+
+ /* If enqueue returned a blocked lock but the completion handler has
+ * already run, then it fixed up the resource and we don't need to do it
+ * again. */
+ if ((*flags) & LDLM_FL_LOCK_CHANGED) {
+ int newmode = reply->lock_desc.l_req_mode;
+ LASSERT(!is_replay);
+ if (newmode && newmode != lock->l_req_mode) {
+ LDLM_DEBUG(lock, "server returned different mode %s",
+ ldlm_lockname[newmode]);
+ lock->l_req_mode = newmode;
+ }
+
+ if (memcmp(reply->lock_desc.l_resource.lr_name.name,
+ lock->l_resource->lr_name.name,
+ sizeof(struct ldlm_res_id))) {
+ CDEBUG(D_INFO, "remote intent success, locking "
+ "(%ld,%ld,%ld) instead of "
+ "(%ld,%ld,%ld)\n",
+ (long)reply->lock_desc.l_resource.lr_name.name[0],
+ (long)reply->lock_desc.l_resource.lr_name.name[1],
+ (long)reply->lock_desc.l_resource.lr_name.name[2],
+ (long)lock->l_resource->lr_name.name[0],
+ (long)lock->l_resource->lr_name.name[1],
+ (long)lock->l_resource->lr_name.name[2]);
+
+ rc = ldlm_lock_change_resource(ns, lock,
+ &reply->lock_desc.l_resource.lr_name);
+ if (rc || lock->l_resource == NULL)
+ GOTO(cleanup, rc = -ENOMEM);
+ LDLM_DEBUG(lock, "client-side enqueue, new resource");
+ }
+ if (with_policy)
+ if (!(type == LDLM_IBITS &&
+ !(exp_connect_flags(exp) & OBD_CONNECT_IBITS)))
+ /* We assume lock type cannot change on server*/
+ ldlm_convert_policy_to_local(exp,
+ lock->l_resource->lr_type,
+ &reply->lock_desc.l_policy_data,
+ &lock->l_policy_data);
+ if (type != LDLM_PLAIN)
+ LDLM_DEBUG(lock,"client-side enqueue, new policy data");
+ }
+
+ if ((*flags) & LDLM_FL_AST_SENT ||
+ /* Cancel extent locks as soon as possible on a liblustre client,
+ * because it cannot handle asynchronous ASTs robustly (see
+ * bug 7311). */
+ (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
+ lock_res_and_lock(lock);
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
+ unlock_res_and_lock(lock);
+ LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
+ }
+
+ /* If the lock has already been granted by a completion AST, don't
+ * clobber the LVB with an older one. */
+ if (lvb_len != 0) {
+ /* We must lock or a racing completion might update lvb without
+ * letting us know and we'll clobber the correct value.
+ * Cannot unlock after the check either, a that still leaves
+ * a tiny window for completion to get in */
+ lock_res_and_lock(lock);
+ if (lock->l_req_mode != lock->l_granted_mode)
+ rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
+ lock->l_lvb_data, size);
+ unlock_res_and_lock(lock);
+ if (rc < 0) {
+ cleanup_phase = 1;
+ GOTO(cleanup, rc);
+ }
+ }
+
+ if (!is_replay) {
+ rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
+ if (lock->l_completion_ast != NULL) {
+ int err = lock->l_completion_ast(lock, *flags, NULL);
+ if (!rc)
+ rc = err;
+ if (rc)
+ cleanup_phase = 1;
+ }
+ }
+
+ if (lvb_len && lvb != NULL) {
+ /* Copy the LVB here, and not earlier, because the completion
+ * AST (if any) can override what we got in the reply */
+ memcpy(lvb, lock->l_lvb_data, lvb_len);
+ }
+
+ LDLM_DEBUG(lock, "client-side enqueue END");
+cleanup:
+ if (cleanup_phase == 1 && rc)
+ failed_lock_cleanup(ns, lock, mode);
+ /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
+ LDLM_LOCK_PUT(lock);
+ LDLM_LOCK_RELEASE(lock);
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
+
+/**
+ * Estimate number of lock handles that would fit into request of given
+ * size. PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
+ * a single page on the send/receive side. XXX: 512 should be changed to
+ * more adequate value.
+ */
+static inline int ldlm_req_handles_avail(int req_size, int off)
+{
+ int avail;
+
+ avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
+ if (likely(avail >= 0))
+ avail /= (int)sizeof(struct lustre_handle);
+ else
+ avail = 0;
+ avail += LDLM_LOCKREQ_HANDLES - off;
+
+ return avail;
+}
+
+static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
+ enum req_location loc,
+ int off)
+{
+ int size = req_capsule_msg_size(pill, loc);
+ return ldlm_req_handles_avail(size, off);
+}
+
+static inline int ldlm_format_handles_avail(struct obd_import *imp,
+ const struct req_format *fmt,
+ enum req_location loc, int off)
+{
+ int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
+ return ldlm_req_handles_avail(size, off);
+}
+
+/**
+ * Cancel LRU locks and pack them into the enqueue request. Pack there the given
+ * \a count locks in \a cancels.
+ *
+ * This is to be called by functions preparing their own requests that
+ * might contain lists of locks to cancel in addition to actual operation
+ * that needs to be performed.
+ */
+int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
+ int version, int opc, int canceloff,
+ struct list_head *cancels, int count)
+{
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ struct req_capsule *pill = &req->rq_pill;
+ struct ldlm_request *dlm = NULL;
+ int flags, avail, to_free, pack = 0;
+ LIST_HEAD(head);
+ int rc;
+
+ if (cancels == NULL)
+ cancels = &head;
+ if (ns_connect_cancelset(ns)) {
+ /* Estimate the amount of available space in the request. */
+ req_capsule_filled_sizes(pill, RCL_CLIENT);
+ avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
+
+ flags = ns_connect_lru_resize(ns) ?
+ LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
+ to_free = !ns_connect_lru_resize(ns) &&
+ opc == LDLM_ENQUEUE ? 1 : 0;
+
+ /* Cancel LRU locks here _only_ if the server supports
+ * EARLY_CANCEL. Otherwise we have to send extra CANCEL
+ * RPC, which will make us slower. */
+ if (avail > count)
+ count += ldlm_cancel_lru_local(ns, cancels, to_free,
+ avail - count, 0, flags);
+ if (avail > count)
+ pack = count;
+ else
+ pack = avail;
+ req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
+ ldlm_request_bufsize(pack, opc));
+ }
+
+ rc = ptlrpc_request_pack(req, version, opc);
+ if (rc) {
+ ldlm_lock_list_put(cancels, l_bl_ast, count);
+ return rc;
+ }
+
+ if (ns_connect_cancelset(ns)) {
+ if (canceloff) {
+ dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
+ LASSERT(dlm);
+ /* Skip first lock handler in ldlm_request_pack(),
+ * this method will incrment @lock_count according
+ * to the lock handle amount actually written to
+ * the buffer. */
+ dlm->lock_count = canceloff;
+ }
+ /* Pack into the request @pack lock handles. */
+ ldlm_cli_cancel_list(cancels, pack, req, 0);
+ /* Prepare and send separate cancel RPC for others. */
+ ldlm_cli_cancel_list(cancels, count - pack, NULL, 0);
+ } else {
+ ldlm_lock_list_put(cancels, l_bl_ast, count);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_prep_elc_req);
+
+int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
+ struct list_head *cancels, int count)
+{
+ return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
+ LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
+}
+EXPORT_SYMBOL(ldlm_prep_enqueue_req);
+
+struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return ERR_PTR(rc);
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
+ ptlrpc_request_set_replen(req);
+ return req;
+}
+EXPORT_SYMBOL(ldlm_enqueue_pack);
+
+/**
+ * Client-side lock enqueue.
+ *
+ * If a request has some specific initialisation it is passed in \a reqp,
+ * otherwise it is created in ldlm_cli_enqueue.
+ *
+ * Supports sync and async requests, pass \a async flag accordingly. If a
+ * request was created in ldlm_cli_enqueue and it is the async request,
+ * pass it to the caller in \a reqp.
+ */
+int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
+ struct ldlm_enqueue_info *einfo,
+ const struct ldlm_res_id *res_id,
+ ldlm_policy_data_t const *policy, __u64 *flags,
+ void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
+ struct lustre_handle *lockh, int async)
+{
+ struct ldlm_namespace *ns;
+ struct ldlm_lock *lock;
+ struct ldlm_request *body;
+ int is_replay = *flags & LDLM_FL_REPLAY;
+ int req_passed_in = 1;
+ int rc, err;
+ struct ptlrpc_request *req;
+
+ LASSERT(exp != NULL);
+
+ ns = exp->exp_obd->obd_namespace;
+
+ /* If we're replaying this lock, just check some invariants.
+ * If we're creating a new lock, get everything all setup nice. */
+ if (is_replay) {
+ lock = ldlm_handle2lock_long(lockh, 0);
+ LASSERT(lock != NULL);
+ LDLM_DEBUG(lock, "client-side enqueue START");
+ LASSERT(exp == lock->l_conn_export);
+ } else {
+ const struct ldlm_callback_suite cbs = {
+ .lcs_completion = einfo->ei_cb_cp,
+ .lcs_blocking = einfo->ei_cb_bl,
+ .lcs_glimpse = einfo->ei_cb_gl
+ };
+ lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
+ einfo->ei_mode, &cbs, einfo->ei_cbdata,
+ lvb_len, lvb_type);
+ if (lock == NULL)
+ return -ENOMEM;
+ /* for the local lock, add the reference */
+ ldlm_lock_addref_internal(lock, einfo->ei_mode);
+ ldlm_lock2handle(lock, lockh);
+ if (policy != NULL) {
+ /* INODEBITS_INTEROP: If the server does not support
+ * inodebits, we will request a plain lock in the
+ * descriptor (ldlm_lock2desc() below) but use an
+ * inodebits lock internally with both bits set.
+ */
+ if (einfo->ei_type == LDLM_IBITS &&
+ !(exp_connect_flags(exp) &
+ OBD_CONNECT_IBITS))
+ lock->l_policy_data.l_inodebits.bits =
+ MDS_INODELOCK_LOOKUP |
+ MDS_INODELOCK_UPDATE;
+ else
+ lock->l_policy_data = *policy;
+ }
+
+ if (einfo->ei_type == LDLM_EXTENT)
+ lock->l_req_extent = policy->l_extent;
+ LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n",
+ *flags);
+ }
+
+ lock->l_conn_export = exp;
+ lock->l_export = NULL;
+ lock->l_blocking_ast = einfo->ei_cb_bl;
+ lock->l_flags |= (*flags & LDLM_FL_NO_LRU);
+
+ /* lock not sent to server yet */
+
+ if (reqp == NULL || *reqp == NULL) {
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_LDLM_ENQUEUE,
+ LUSTRE_DLM_VERSION,
+ LDLM_ENQUEUE);
+ if (req == NULL) {
+ failed_lock_cleanup(ns, lock, einfo->ei_mode);
+ LDLM_LOCK_RELEASE(lock);
+ return -ENOMEM;
+ }
+ req_passed_in = 0;
+ if (reqp)
+ *reqp = req;
+ } else {
+ int len;
+
+ req = *reqp;
+ len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ,
+ RCL_CLIENT);
+ LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n",
+ DLM_LOCKREQ_OFF, len, (int)sizeof(*body));
+ }
+
+ /* Dump lock data into the request buffer */
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ ldlm_lock2desc(lock, &body->lock_desc);
+ body->lock_flags = ldlm_flags_to_wire(*flags);
+ body->lock_handle[0] = *lockh;
+
+ /* Continue as normal. */
+ if (!req_passed_in) {
+ if (lvb_len > 0)
+ req_capsule_extend(&req->rq_pill,
+ &RQF_LDLM_ENQUEUE_LVB);
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ lvb_len);
+ ptlrpc_request_set_replen(req);
+ }
+
+ /*
+ * Liblustre client doesn't get extent locks, except for O_APPEND case
+ * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
+ * [i_size, OBD_OBJECT_EOF] lock is taken.
+ */
+ LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
+ policy->l_extent.end == OBD_OBJECT_EOF));
+
+ if (async) {
+ LASSERT(reqp != NULL);
+ return 0;
+ }
+
+ LDLM_DEBUG(lock, "sending request");
+
+ rc = ptlrpc_queue_wait(req);
+
+ err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
+ einfo->ei_mode, flags, lvb, lvb_len,
+ lockh, rc);
+
+ /* If ldlm_cli_enqueue_fini did not find the lock, we need to free
+ * one reference that we took */
+ if (err == -ENOLCK)
+ LDLM_LOCK_RELEASE(lock);
+ else
+ rc = err;
+
+ if (!req_passed_in && req != NULL) {
+ ptlrpc_req_finished(req);
+ if (reqp)
+ *reqp = NULL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_cli_enqueue);
+
+static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
+ __u32 *flags)
+{
+ struct ldlm_resource *res;
+ int rc;
+
+ if (ns_is_client(ldlm_lock_to_ns(lock))) {
+ CERROR("Trying to cancel local lock\n");
+ LBUG();
+ }
+ LDLM_DEBUG(lock, "client-side local convert");
+
+ res = ldlm_lock_convert(lock, new_mode, flags);
+ if (res) {
+ ldlm_reprocess_all(res);
+ rc = 0;
+ } else {
+ rc = LUSTRE_EDEADLK;
+ }
+ LDLM_DEBUG(lock, "client-side local convert handler END");
+ LDLM_LOCK_PUT(lock);
+ return rc;
+}
+
+/* FIXME: one of ldlm_cli_convert or the server side should reject attempted
+ * conversion of locks which are on the waiting or converting queue */
+/* Caller of this code is supposed to take care of lock readers/writers
+ accounting */
+int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, __u32 *flags)
+{
+ struct ldlm_request *body;
+ struct ldlm_reply *reply;
+ struct ldlm_lock *lock;
+ struct ldlm_resource *res;
+ struct ptlrpc_request *req;
+ int rc;
+
+ lock = ldlm_handle2lock(lockh);
+ if (!lock) {
+ LBUG();
+ return -EINVAL;
+ }
+ *flags = 0;
+
+ if (lock->l_conn_export == NULL)
+ return ldlm_cli_convert_local(lock, new_mode, flags);
+
+ LDLM_DEBUG(lock, "client-side convert");
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(lock->l_conn_export),
+ &RQF_LDLM_CONVERT, LUSTRE_DLM_VERSION,
+ LDLM_CONVERT);
+ if (req == NULL) {
+ LDLM_LOCK_PUT(lock);
+ return -ENOMEM;
+ }
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ body->lock_handle[0] = lock->l_remote_handle;
+
+ body->lock_desc.l_req_mode = new_mode;
+ body->lock_flags = ldlm_flags_to_wire(*flags);
+
+
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc != ELDLM_OK)
+ GOTO(out, rc);
+
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+ if (reply == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ if (req->rq_status)
+ GOTO(out, rc = req->rq_status);
+
+ res = ldlm_lock_convert(lock, new_mode, &reply->lock_flags);
+ if (res != NULL) {
+ ldlm_reprocess_all(res);
+ /* Go to sleep until the lock is granted. */
+ /* FIXME: or cancelled. */
+ if (lock->l_completion_ast) {
+ rc = lock->l_completion_ast(lock, LDLM_FL_WAIT_NOREPROC,
+ NULL);
+ if (rc)
+ GOTO(out, rc);
+ }
+ } else {
+ rc = LUSTRE_EDEADLK;
+ }
+ out:
+ LDLM_LOCK_PUT(lock);
+ ptlrpc_req_finished(req);
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_cli_convert);
+
+/**
+ * Cancel locks locally.
+ * Returns:
+ * \retval LDLM_FL_LOCAL_ONLY if there is no need for a CANCEL RPC to the server
+ * \retval LDLM_FL_CANCELING otherwise;
+ * \retval LDLM_FL_BL_AST if there is a need for a separate CANCEL RPC.
+ */
+static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
+{
+ __u64 rc = LDLM_FL_LOCAL_ONLY;
+
+ if (lock->l_conn_export) {
+ bool local_only;
+
+ LDLM_DEBUG(lock, "client-side cancel");
+ /* Set this flag to prevent others from getting new references*/
+ lock_res_and_lock(lock);
+ lock->l_flags |= LDLM_FL_CBPENDING;
+ local_only = !!(lock->l_flags &
+ (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
+ ldlm_cancel_callback(lock);
+ rc = (lock->l_flags & LDLM_FL_BL_AST) ?
+ LDLM_FL_BL_AST : LDLM_FL_CANCELING;
+ unlock_res_and_lock(lock);
+
+ if (local_only) {
+ CDEBUG(D_DLMTRACE, "not sending request (at caller's "
+ "instruction)\n");
+ rc = LDLM_FL_LOCAL_ONLY;
+ }
+ ldlm_lock_cancel(lock);
+ } else {
+ if (ns_is_client(ldlm_lock_to_ns(lock))) {
+ LDLM_ERROR(lock, "Trying to cancel local lock");
+ LBUG();
+ }
+ LDLM_DEBUG(lock, "server-side local cancel");
+ ldlm_lock_cancel(lock);
+ ldlm_reprocess_all(lock->l_resource);
+ }
+
+ return rc;
+}
+
+/**
+ * Pack \a count locks in \a head into ldlm_request buffer of request \a req.
+ */
+static void ldlm_cancel_pack(struct ptlrpc_request *req,
+ struct list_head *head, int count)
+{
+ struct ldlm_request *dlm;
+ struct ldlm_lock *lock;
+ int max, packed = 0;
+
+ dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ LASSERT(dlm != NULL);
+
+ /* Check the room in the request buffer. */
+ max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
+ sizeof(struct ldlm_request);
+ max /= sizeof(struct lustre_handle);
+ max += LDLM_LOCKREQ_HANDLES;
+ LASSERT(max >= dlm->lock_count + count);
+
+ /* XXX: it would be better to pack lock handles grouped by resource.
+ * so that the server cancel would call filter_lvbo_update() less
+ * frequently. */
+ list_for_each_entry(lock, head, l_bl_ast) {
+ if (!count--)
+ break;
+ LASSERT(lock->l_conn_export);
+ /* Pack the lock handle to the given request buffer. */
+ LDLM_DEBUG(lock, "packing");
+ dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle;
+ packed++;
+ }
+ CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
+}
+
+/**
+ * Prepare and send a batched cancel RPC. It will include \a count lock
+ * handles of locks given in \a cancels list. */
+int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
+ int count, ldlm_cancel_flags_t flags)
+{
+ struct ptlrpc_request *req = NULL;
+ struct obd_import *imp;
+ int free, sent = 0;
+ int rc = 0;
+
+ LASSERT(exp != NULL);
+ LASSERT(count > 0);
+
+ CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
+
+ if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
+ return count;
+
+ free = ldlm_format_handles_avail(class_exp2cliimp(exp),
+ &RQF_LDLM_CANCEL, RCL_CLIENT, 0);
+ if (count > free)
+ count = free;
+
+ while (1) {
+ imp = class_exp2cliimp(exp);
+ if (imp == NULL || imp->imp_invalid) {
+ CDEBUG(D_DLMTRACE,
+ "skipping cancel on invalid import %p\n", imp);
+ return count;
+ }
+
+ req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
+ ldlm_request_bufsize(count, LDLM_CANCEL));
+
+ rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL);
+ if (rc) {
+ ptlrpc_request_free(req);
+ GOTO(out, rc);
+ }
+
+ req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
+ req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
+
+ ldlm_cancel_pack(req, cancels, count);
+
+ ptlrpc_request_set_replen(req);
+ if (flags & LCF_ASYNC) {
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ sent = count;
+ GOTO(out, 0);
+ } else {
+ rc = ptlrpc_queue_wait(req);
+ }
+ if (rc == LUSTRE_ESTALE) {
+ CDEBUG(D_DLMTRACE, "client/server (nid %s) "
+ "out of sync -- not fatal\n",
+ libcfs_nid2str(req->rq_import->
+ imp_connection->c_peer.nid));
+ rc = 0;
+ } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/
+ req->rq_import_generation == imp->imp_generation) {
+ ptlrpc_req_finished(req);
+ continue;
+ } else if (rc != ELDLM_OK) {
+ /* -ESHUTDOWN is common on umount */
+ CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
+ "Got rc %d from cancel RPC: "
+ "canceling anyway\n", rc);
+ break;
+ }
+ sent = count;
+ break;
+ }
+
+ ptlrpc_req_finished(req);
+out:
+ return sent ? sent : rc;
+}
+EXPORT_SYMBOL(ldlm_cli_cancel_req);
+
+static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
+{
+ LASSERT(imp != NULL);
+ return &imp->imp_obd->obd_namespace->ns_pool;
+}
+
+/**
+ * Update client's OBD pool related fields with new SLV and Limit from \a req.
+ */
+int ldlm_cli_update_pool(struct ptlrpc_request *req)
+{
+ struct obd_device *obd;
+ __u64 new_slv;
+ __u32 new_limit;
+
+ if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
+ !imp_connect_lru_resize(req->rq_import)))
+ {
+ /*
+ * Do nothing for corner cases.
+ */
+ return 0;
+ }
+
+ /* In some cases RPC may contain SLV and limit zeroed out. This
+ * is the case when server does not support LRU resize feature.
+ * This is also possible in some recovery cases when server-side
+ * reqs have no reference to the OBD export and thus access to
+ * server-side namespace is not possible. */
+ if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
+ lustre_msg_get_limit(req->rq_repmsg) == 0) {
+ DEBUG_REQ(D_HA, req, "Zero SLV or Limit found "
+ "(SLV: "LPU64", Limit: %u)",
+ lustre_msg_get_slv(req->rq_repmsg),
+ lustre_msg_get_limit(req->rq_repmsg));
+ return 0;
+ }
+
+ new_limit = lustre_msg_get_limit(req->rq_repmsg);
+ new_slv = lustre_msg_get_slv(req->rq_repmsg);
+ obd = req->rq_import->imp_obd;
+
+ /* Set new SLV and limit in OBD fields to make them accessible
+ * to the pool thread. We do not access obd_namespace and pool
+ * directly here as there is no reliable way to make sure that
+ * they are still alive at cleanup time. Evil races are possible
+ * which may cause Oops at that time. */
+ write_lock(&obd->obd_pool_lock);
+ obd->obd_pool_slv = new_slv;
+ obd->obd_pool_limit = new_limit;
+ write_unlock(&obd->obd_pool_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_cli_update_pool);
+
+/**
+ * Client side lock cancel.
+ *
+ * Lock must not have any readers or writers by this time.
+ */
+int ldlm_cli_cancel(struct lustre_handle *lockh,
+ ldlm_cancel_flags_t cancel_flags)
+{
+ struct obd_export *exp;
+ int avail, flags, count = 1;
+ __u64 rc = 0;
+ struct ldlm_namespace *ns;
+ struct ldlm_lock *lock;
+ LIST_HEAD(cancels);
+
+ /* concurrent cancels on the same handle can happen */
+ lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
+ if (lock == NULL) {
+ LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
+ return 0;
+ }
+
+ rc = ldlm_cli_cancel_local(lock);
+ if (rc == LDLM_FL_LOCAL_ONLY) {
+ LDLM_LOCK_RELEASE(lock);
+ return 0;
+ }
+ /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
+ * RPC which goes to canceld portal, so we can cancel other LRU locks
+ * here and send them all as one LDLM_CANCEL RPC. */
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, &cancels);
+
+ exp = lock->l_conn_export;
+ if (exp_connect_cancelset(exp)) {
+ avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
+ &RQF_LDLM_CANCEL,
+ RCL_CLIENT, 0);
+ LASSERT(avail > 0);
+
+ ns = ldlm_lock_to_ns(lock);
+ flags = ns_connect_lru_resize(ns) ?
+ LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
+ count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
+ LCF_BL_AST, flags);
+ }
+ ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_cli_cancel);
+
+/**
+ * Locally cancel up to \a count locks in list \a cancels.
+ * Return the number of cancelled locks.
+ */
+int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
+ ldlm_cancel_flags_t flags)
+{
+ LIST_HEAD(head);
+ struct ldlm_lock *lock, *next;
+ int left = 0, bl_ast = 0;
+ __u64 rc;
+
+ left = count;
+ list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
+ if (left-- == 0)
+ break;
+
+ if (flags & LCF_LOCAL) {
+ rc = LDLM_FL_LOCAL_ONLY;
+ ldlm_lock_cancel(lock);
+ } else {
+ rc = ldlm_cli_cancel_local(lock);
+ }
+ /* Until we have compound requests and can send LDLM_CANCEL
+ * requests batched with generic RPCs, we need to send cancels
+ * with the LDLM_FL_BL_AST flag in a separate RPC from
+ * the one being generated now. */
+ if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
+ LDLM_DEBUG(lock, "Cancel lock separately");
+ list_del_init(&lock->l_bl_ast);
+ list_add(&lock->l_bl_ast, &head);
+ bl_ast++;
+ continue;
+ }
+ if (rc == LDLM_FL_LOCAL_ONLY) {
+ /* CANCEL RPC should not be sent to server. */
+ list_del_init(&lock->l_bl_ast);
+ LDLM_LOCK_RELEASE(lock);
+ count--;
+ }
+ }
+ if (bl_ast > 0) {
+ count -= bl_ast;
+ ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
+
+/**
+ * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
+ * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
+ * readahead requests, ...)
+ */
+static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
+{
+ ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
+ ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
+ lock_res_and_lock(lock);
+
+ /* don't check added & count since we want to process all locks
+ * from unused list */
+ switch (lock->l_resource->lr_type) {
+ case LDLM_EXTENT:
+ case LDLM_IBITS:
+ if (cb && cb(lock))
+ break;
+ default:
+ result = LDLM_POLICY_SKIP_LOCK;
+ lock->l_flags |= LDLM_FL_SKIPPED;
+ break;
+ }
+
+ unlock_res_and_lock(lock);
+ return result;
+}
+
+/**
+ * Callback function for LRU-resize policy. Decides whether to keep
+ * \a lock in LRU for current \a LRU size \a unused, added in current
+ * scan \a added and number of locks to be preferably canceled \a count.
+ *
+ * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
+ *
+ * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
+ */
+static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
+{
+ cfs_time_t cur = cfs_time_current();
+ struct ldlm_pool *pl = &ns->ns_pool;
+ __u64 slv, lvf, lv;
+ cfs_time_t la;
+
+ /* Stop LRU processing when we reach past @count or have checked all
+ * locks in LRU. */
+ if (count && added >= count)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ slv = ldlm_pool_get_slv(pl);
+ lvf = ldlm_pool_get_lvf(pl);
+ la = cfs_duration_sec(cfs_time_sub(cur,
+ lock->l_last_used));
+ lv = lvf * la * unused;
+
+ /* Inform pool about current CLV to see it via proc. */
+ ldlm_pool_set_clv(pl, lv);
+
+ /* Stop when SLV is not yet come from server or lv is smaller than
+ * it is. */
+ return (slv == 0 || lv < slv) ?
+ LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+}
+
+/**
+ * Callback function for proc used policy. Makes decision whether to keep
+ * \a lock in LRU for current \a LRU size \a unused, added in current scan \a
+ * added and number of locks to be preferably canceled \a count.
+ *
+ * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
+ *
+ * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
+ */
+static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
+{
+ /* Stop LRU processing when we reach past @count or have checked all
+ * locks in LRU. */
+ return (added >= count) ?
+ LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+}
+
+/**
+ * Callback function for aged policy. Makes decision whether to keep \a lock in
+ * LRU for current LRU size \a unused, added in current scan \a added and
+ * number of locks to be preferably canceled \a count.
+ *
+ * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
+ *
+ * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
+ */
+static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
+{
+ /* Stop LRU processing if young lock is found and we reach past count */
+ return ((added >= count) &&
+ cfs_time_before(cfs_time_current(),
+ cfs_time_add(lock->l_last_used,
+ ns->ns_max_age))) ?
+ LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+}
+
+/**
+ * Callback function for default policy. Makes decision whether to keep \a lock
+ * in LRU for current LRU size \a unused, added in current scan \a added and
+ * number of locks to be preferably canceled \a count.
+ *
+ * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
+ *
+ * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
+ */
+static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
+{
+ /* Stop LRU processing when we reach past count or have checked all
+ * locks in LRU. */
+ return (added >= count) ?
+ LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+}
+
+typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
+ struct ldlm_lock *, int,
+ int, int);
+
+static ldlm_cancel_lru_policy_t
+ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
+{
+ if (flags & LDLM_CANCEL_NO_WAIT)
+ return ldlm_cancel_no_wait_policy;
+
+ if (ns_connect_lru_resize(ns)) {
+ if (flags & LDLM_CANCEL_SHRINK)
+ /* We kill passed number of old locks. */
+ return ldlm_cancel_passed_policy;
+ else if (flags & LDLM_CANCEL_LRUR)
+ return ldlm_cancel_lrur_policy;
+ else if (flags & LDLM_CANCEL_PASSED)
+ return ldlm_cancel_passed_policy;
+ } else {
+ if (flags & LDLM_CANCEL_AGED)
+ return ldlm_cancel_aged_policy;
+ }
+
+ return ldlm_cancel_default_policy;
+}
+
+/**
+ * - Free space in LRU for \a count new locks,
+ * redundant unused locks are canceled locally;
+ * - also cancel locally unused aged locks;
+ * - do not cancel more than \a max locks;
+ * - GET the found locks and add them into the \a cancels list.
+ *
+ * A client lock can be added to the l_bl_ast list only when it is
+ * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing
+ * CANCEL. There are the following use cases:
+ * ldlm_cancel_resource_local(), ldlm_cancel_lru_local() and
+ * ldlm_cli_cancel(), which check and set this flag properly. As any
+ * attempt to cancel a lock rely on this flag, l_bl_ast list is accessed
+ * later without any special locking.
+ *
+ * Calling policies for enabled LRU resize:
+ * ----------------------------------------
+ * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to
+ * cancel not more than \a count locks;
+ *
+ * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at
+ * the beginning of LRU list);
+ *
+ * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
+ * memory pressre policy function;
+ *
+ * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
+ *
+ * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible
+ * (typically before replaying locks) w/o
+ * sending any RPCs or waiting for any
+ * outstanding RPC to complete.
+ */
+static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *cancels,
+ int count, int max, int flags)
+{
+ ldlm_cancel_lru_policy_t pf;
+ struct ldlm_lock *lock, *next;
+ int added = 0, unused, remained;
+
+ spin_lock(&ns->ns_lock);
+ unused = ns->ns_nr_unused;
+ remained = unused;
+
+ if (!ns_connect_lru_resize(ns))
+ count += unused - ns->ns_max_unused;
+
+ pf = ldlm_cancel_lru_policy(ns, flags);
+ LASSERT(pf != NULL);
+
+ while (!list_empty(&ns->ns_unused_list)) {
+ ldlm_policy_res_t result;
+
+ /* all unused locks */
+ if (remained-- <= 0)
+ break;
+
+ /* For any flags, stop scanning if @max is reached. */
+ if (max && added >= max)
+ break;
+
+ list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
+ l_lru) {
+ /* No locks which got blocking requests. */
+ LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+
+ if (flags & LDLM_CANCEL_NO_WAIT &&
+ lock->l_flags & LDLM_FL_SKIPPED)
+ /* already processed */
+ continue;
+
+ /* Somebody is already doing CANCEL. No need for this
+ * lock in LRU, do not traverse it again. */
+ if (!(lock->l_flags & LDLM_FL_CANCELING))
+ break;
+
+ ldlm_lock_remove_from_lru_nolock(lock);
+ }
+ if (&lock->l_lru == &ns->ns_unused_list)
+ break;
+
+ LDLM_LOCK_GET(lock);
+ spin_unlock(&ns->ns_lock);
+ lu_ref_add(&lock->l_reference, __FUNCTION__, current);
+
+ /* Pass the lock through the policy filter and see if it
+ * should stay in LRU.
+ *
+ * Even for shrinker policy we stop scanning if
+ * we find a lock that should stay in the cache.
+ * We should take into account lock age anyway
+ * as a new lock is a valuable resource even if
+ * it has a low weight.
+ *
+ * That is, for shrinker policy we drop only
+ * old locks, but additionally choose them by
+ * their weight. Big extent locks will stay in
+ * the cache. */
+ result = pf(ns, lock, unused, added, count);
+ if (result == LDLM_POLICY_KEEP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, current);
+ LDLM_LOCK_RELEASE(lock);
+ spin_lock(&ns->ns_lock);
+ break;
+ }
+ if (result == LDLM_POLICY_SKIP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __func__, current);
+ LDLM_LOCK_RELEASE(lock);
+ spin_lock(&ns->ns_lock);
+ continue;
+ }
+
+ lock_res_and_lock(lock);
+ /* Check flags again under the lock. */
+ if ((lock->l_flags & LDLM_FL_CANCELING) ||
+ (ldlm_lock_remove_from_lru(lock) == 0)) {
+ /* Another thread is removing lock from LRU, or
+ * somebody is already doing CANCEL, or there
+ * is a blocking request which will send cancel
+ * by itself, or the lock is no longer unused. */
+ unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference,
+ __FUNCTION__, current);
+ LDLM_LOCK_RELEASE(lock);
+ spin_lock(&ns->ns_lock);
+ continue;
+ }
+ LASSERT(!lock->l_readers && !lock->l_writers);
+
+ /* If we have chosen to cancel this lock voluntarily, we
+ * better send cancel notification to server, so that it
+ * frees appropriate state. This might lead to a race
+ * where while we are doing cancel here, server is also
+ * silently cancelling this lock. */
+ lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
+
+ /* Setting the CBPENDING flag is a little misleading,
+ * but prevents an important race; namely, once
+ * CBPENDING is set, the lock can accumulate no more
+ * readers/writers. Since readers and writers are
+ * already zero here, ldlm_lock_decref() won't see
+ * this flag and call l_blocking_ast */
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
+
+ /* We can't re-add to l_lru as it confuses the
+ * refcounting in ldlm_lock_remove_from_lru() if an AST
+ * arrives after we drop lr_lock below. We use l_bl_ast
+ * and can't use l_pending_chain as it is used both on
+ * server and client nevertheless bug 5666 says it is
+ * used only on server */
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, cancels);
+ unlock_res_and_lock(lock);
+ lu_ref_del(&lock->l_reference, __FUNCTION__, current);
+ spin_lock(&ns->ns_lock);
+ added++;
+ unused--;
+ }
+ spin_unlock(&ns->ns_lock);
+ return added;
+}
+
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
+ int count, int max, ldlm_cancel_flags_t cancel_flags,
+ int flags)
+{
+ int added;
+ added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
+ if (added <= 0)
+ return added;
+ return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
+}
+
+/**
+ * Cancel at least \a nr locks from given namespace LRU.
+ *
+ * When called with LCF_ASYNC the blocking callback will be handled
+ * in a thread and this function will return after the thread has been
+ * asked to call the callback. When called with LCF_ASYNC the blocking
+ * callback will be performed in this function.
+ */
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
+ ldlm_cancel_flags_t cancel_flags,
+ int flags)
+{
+ LIST_HEAD(cancels);
+ int count, rc;
+
+ /* Just prepare the list of locks, do not actually cancel them yet.
+ * Locks are cancelled later in a separate thread. */
+ count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
+ rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
+ if (rc == 0)
+ return count;
+
+ return 0;
+}
+
+/**
+ * Find and cancel locally unused locks found on resource, matched to the
+ * given policy, mode. GET the found locks and add them into the \a cancels
+ * list.
+ */
+int ldlm_cancel_resource_local(struct ldlm_resource *res,
+ struct list_head *cancels,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode, int lock_flags,
+ ldlm_cancel_flags_t cancel_flags, void *opaque)
+{
+ struct ldlm_lock *lock;
+ int count = 0;
+
+ lock_res(res);
+ list_for_each_entry(lock, &res->lr_granted, l_res_link) {
+ if (opaque != NULL && lock->l_ast_data != opaque) {
+ LDLM_ERROR(lock, "data %p doesn't match opaque %p",
+ lock->l_ast_data, opaque);
+ //LBUG();
+ continue;
+ }
+
+ if (lock->l_readers || lock->l_writers)
+ continue;
+
+ /* If somebody is already doing CANCEL, or blocking AST came,
+ * skip this lock. */
+ if (lock->l_flags & LDLM_FL_BL_AST ||
+ lock->l_flags & LDLM_FL_CANCELING)
+ continue;
+
+ if (lockmode_compat(lock->l_granted_mode, mode))
+ continue;
+
+ /* If policy is given and this is IBITS lock, add to list only
+ * those locks that match by policy. */
+ if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
+ !(lock->l_policy_data.l_inodebits.bits &
+ policy->l_inodebits.bits))
+ continue;
+
+ /* See CBPENDING comment in ldlm_cancel_lru */
+ lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
+ lock_flags;
+
+ LASSERT(list_empty(&lock->l_bl_ast));
+ list_add(&lock->l_bl_ast, cancels);
+ LDLM_LOCK_GET(lock);
+ count++;
+ }
+ unlock_res(res);
+
+ return ldlm_cli_cancel_list_local(cancels, count, cancel_flags);
+}
+EXPORT_SYMBOL(ldlm_cancel_resource_local);
+
+/**
+ * Cancel client-side locks from a list and send/prepare cancel RPCs to the
+ * server.
+ * If \a req is NULL, send CANCEL request to server with handles of locks
+ * in the \a cancels. If EARLY_CANCEL is not supported, send CANCEL requests
+ * separately per lock.
+ * If \a req is not NULL, put handles of locks in \a cancels into the request
+ * buffer at the offset \a off.
+ * Destroy \a cancels at the end.
+ */
+int ldlm_cli_cancel_list(struct list_head *cancels, int count,
+ struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
+{
+ struct ldlm_lock *lock;
+ int res = 0;
+
+ if (list_empty(cancels) || count == 0)
+ return 0;
+
+ /* XXX: requests (both batched and not) could be sent in parallel.
+ * Usually it is enough to have just 1 RPC, but it is possible that
+ * there are too many locks to be cancelled in LRU or on a resource.
+ * It would also speed up the case when the server does not support
+ * the feature. */
+ while (count > 0) {
+ LASSERT(!list_empty(cancels));
+ lock = list_entry(cancels->next, struct ldlm_lock,
+ l_bl_ast);
+ LASSERT(lock->l_conn_export);
+
+ if (exp_connect_cancelset(lock->l_conn_export)) {
+ res = count;
+ if (req)
+ ldlm_cancel_pack(req, cancels, count);
+ else
+ res = ldlm_cli_cancel_req(lock->l_conn_export,
+ cancels, count,
+ flags);
+ } else {
+ res = ldlm_cli_cancel_req(lock->l_conn_export,
+ cancels, 1, flags);
+ }
+
+ if (res < 0) {
+ CDEBUG_LIMIT(res == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
+ "ldlm_cli_cancel_list: %d\n", res);
+ res = count;
+ }
+
+ count -= res;
+ ldlm_lock_list_put(cancels, l_bl_ast, res);
+ }
+ LASSERT(count == 0);
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_cli_cancel_list);
+
+/**
+ * Cancel all locks on a resource that have 0 readers/writers.
+ *
+ * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
+ * to notify the server. */
+int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
+ const struct ldlm_res_id *res_id,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
+{
+ struct ldlm_resource *res;
+ LIST_HEAD(cancels);
+ int count;
+ int rc;
+
+ res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
+ if (res == NULL) {
+ /* This is not a problem. */
+ CDEBUG(D_INFO, "No resource "LPU64"\n", res_id->name[0]);
+ return 0;
+ }
+
+ LDLM_RESOURCE_ADDREF(res);
+ count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
+ 0, flags | LCF_BL_AST, opaque);
+ rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
+ if (rc != ELDLM_OK)
+ CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc);
+
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
+
+struct ldlm_cli_cancel_arg {
+ int lc_flags;
+ void *lc_opaque;
+};
+
+static int ldlm_cli_hash_cancel_unused(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *arg)
+{
+ struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+ struct ldlm_cli_cancel_arg *lc = arg;
+ int rc;
+
+ rc = ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
+ NULL, LCK_MINMODE,
+ lc->lc_flags, lc->lc_opaque);
+ if (rc != 0) {
+ CERROR("ldlm_cli_cancel_unused ("LPU64"): %d\n",
+ res->lr_name.name[0], rc);
+ }
+ /* must return 0 for hash iteration */
+ return 0;
+}
+
+/**
+ * Cancel all locks on a namespace (or a specific resource, if given)
+ * that have 0 readers/writers.
+ *
+ * If flags & LCF_LOCAL, throw the locks away without trying
+ * to notify the server. */
+int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
+ const struct ldlm_res_id *res_id,
+ ldlm_cancel_flags_t flags, void *opaque)
+{
+ struct ldlm_cli_cancel_arg arg = {
+ .lc_flags = flags,
+ .lc_opaque = opaque,
+ };
+
+ if (ns == NULL)
+ return ELDLM_OK;
+
+ if (res_id != NULL) {
+ return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
+ LCK_MINMODE, flags,
+ opaque);
+ } else {
+ cfs_hash_for_each_nolock(ns->ns_rs_hash,
+ ldlm_cli_hash_cancel_unused, &arg);
+ return ELDLM_OK;
+ }
+}
+EXPORT_SYMBOL(ldlm_cli_cancel_unused);
+
+/* Lock iterators. */
+
+int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
+ void *closure)
+{
+ struct list_head *tmp, *next;
+ struct ldlm_lock *lock;
+ int rc = LDLM_ITER_CONTINUE;
+
+ if (!res)
+ return LDLM_ITER_CONTINUE;
+
+ lock_res(res);
+ list_for_each_safe(tmp, next, &res->lr_granted) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+
+ if (iter(lock, closure) == LDLM_ITER_STOP)
+ GOTO(out, rc = LDLM_ITER_STOP);
+ }
+
+ list_for_each_safe(tmp, next, &res->lr_converting) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+
+ if (iter(lock, closure) == LDLM_ITER_STOP)
+ GOTO(out, rc = LDLM_ITER_STOP);
+ }
+
+ list_for_each_safe(tmp, next, &res->lr_waiting) {
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+
+ if (iter(lock, closure) == LDLM_ITER_STOP)
+ GOTO(out, rc = LDLM_ITER_STOP);
+ }
+ out:
+ unlock_res(res);
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_resource_foreach);
+
+struct iter_helper_data {
+ ldlm_iterator_t iter;
+ void *closure;
+};
+
+static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
+{
+ struct iter_helper_data *helper = closure;
+ return helper->iter(lock, helper->closure);
+}
+
+static int ldlm_res_iter_helper(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *arg)
+
+{
+ struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+
+ return ldlm_resource_foreach(res, ldlm_iter_helper, arg) ==
+ LDLM_ITER_STOP;
+}
+
+void ldlm_namespace_foreach(struct ldlm_namespace *ns,
+ ldlm_iterator_t iter, void *closure)
+
+{
+ struct iter_helper_data helper = {
+ .iter = iter,
+ .closure = closure,
+ };
+
+ cfs_hash_for_each_nolock(ns->ns_rs_hash,
+ ldlm_res_iter_helper, &helper);
+
+}
+EXPORT_SYMBOL(ldlm_namespace_foreach);
+
+/* non-blocking function to manipulate a lock whose cb_data is being put away.
+ * return 0: find no resource
+ * > 0: must be LDLM_ITER_STOP/LDLM_ITER_CONTINUE.
+ * < 0: errors
+ */
+int ldlm_resource_iterate(struct ldlm_namespace *ns,
+ const struct ldlm_res_id *res_id,
+ ldlm_iterator_t iter, void *data)
+{
+ struct ldlm_resource *res;
+ int rc;
+
+ if (ns == NULL) {
+ CERROR("must pass in namespace\n");
+ LBUG();
+ }
+
+ res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
+ if (res == NULL)
+ return 0;
+
+ LDLM_RESOURCE_ADDREF(res);
+ rc = ldlm_resource_foreach(res, iter, data);
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_resource_iterate);
+
+/* Lock replay */
+
+static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
+{
+ struct list_head *list = closure;
+
+ /* we use l_pending_chain here, because it's unused on clients. */
+ LASSERTF(list_empty(&lock->l_pending_chain),
+ "lock %p next %p prev %p\n",
+ lock, &lock->l_pending_chain.next,&lock->l_pending_chain.prev);
+ /* bug 9573: don't replay locks left after eviction, or
+ * bug 17614: locks being actively cancelled. Get a reference
+ * on a lock so that it does not disapear under us (e.g. due to cancel)
+ */
+ if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
+ list_add(&lock->l_pending_chain, list);
+ LDLM_LOCK_GET(lock);
+ }
+
+ return LDLM_ITER_CONTINUE;
+}
+
+static int replay_lock_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ struct ldlm_async_args *aa, int rc)
+{
+ struct ldlm_lock *lock;
+ struct ldlm_reply *reply;
+ struct obd_export *exp;
+
+ atomic_dec(&req->rq_import->imp_replay_inflight);
+ if (rc != ELDLM_OK)
+ GOTO(out, rc);
+
+
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+ if (reply == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ lock = ldlm_handle2lock(&aa->lock_handle);
+ if (!lock) {
+ CERROR("received replay ack for unknown local cookie "LPX64
+ " remote cookie "LPX64 " from server %s id %s\n",
+ aa->lock_handle.cookie, reply->lock_handle.cookie,
+ req->rq_export->exp_client_uuid.uuid,
+ libcfs_id2str(req->rq_peer));
+ GOTO(out, rc = -ESTALE);
+ }
+
+ /* Key change rehash lock in per-export hash with new key */
+ exp = req->rq_export;
+ if (exp && exp->exp_lock_hash) {
+ /* In the function below, .hs_keycmp resolves to
+ * ldlm_export_lock_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ cfs_hash_rehash_key(exp->exp_lock_hash,
+ &lock->l_remote_handle,
+ &reply->lock_handle,
+ &lock->l_exp_hash);
+ } else {
+ lock->l_remote_handle = reply->lock_handle;
+ }
+
+ LDLM_DEBUG(lock, "replayed lock:");
+ ptlrpc_import_recovery_state_machine(req->rq_import);
+ LDLM_LOCK_PUT(lock);
+out:
+ if (rc != ELDLM_OK)
+ ptlrpc_connect_import(req->rq_import);
+
+ return rc;
+}
+
+static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
+{
+ struct ptlrpc_request *req;
+ struct ldlm_async_args *aa;
+ struct ldlm_request *body;
+ int flags;
+
+ /* Bug 11974: Do not replay a lock which is actively being canceled */
+ if (lock->l_flags & LDLM_FL_CANCELING) {
+ LDLM_DEBUG(lock, "Not replaying canceled lock:");
+ return 0;
+ }
+
+ /* If this is reply-less callback lock, we cannot replay it, since
+ * server might have long dropped it, but notification of that event was
+ * lost by network. (and server granted conflicting lock already) */
+ if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ LDLM_DEBUG(lock, "Not replaying reply-less lock:");
+ ldlm_lock_cancel(lock);
+ return 0;
+ }
+
+ /*
+ * If granted mode matches the requested mode, this lock is granted.
+ *
+ * If they differ, but we have a granted mode, then we were granted
+ * one mode and now want another: ergo, converting.
+ *
+ * If we haven't been granted anything and are on a resource list,
+ * then we're blocked/waiting.
+ *
+ * If we haven't been granted anything and we're NOT on a resource list,
+ * then we haven't got a reply yet and don't have a known disposition.
+ * This happens whenever a lock enqueue is the request that triggers
+ * recovery.
+ */
+ if (lock->l_granted_mode == lock->l_req_mode)
+ flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
+ else if (lock->l_granted_mode)
+ flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
+ else if (!list_empty(&lock->l_res_link))
+ flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
+ else
+ flags = LDLM_FL_REPLAY;
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
+ LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
+ if (req == NULL)
+ return -ENOMEM;
+
+ /* We're part of recovery, so don't wait for it. */
+ req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
+ ldlm_lock2desc(lock, &body->lock_desc);
+ body->lock_flags = ldlm_flags_to_wire(flags);
+
+ ldlm_lock2handle(lock, &body->lock_handle[0]);
+ if (lock->l_lvb_len > 0)
+ req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ lock->l_lvb_len);
+ ptlrpc_request_set_replen(req);
+ /* notify the server we've replayed all requests.
+ * also, we mark the request to be put on a dedicated
+ * queue to be processed after all request replayes.
+ * bug 6063 */
+ lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
+
+ LDLM_DEBUG(lock, "replaying lock:");
+
+ atomic_inc(&req->rq_import->imp_replay_inflight);
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->lock_handle = body->lock_handle[0];
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+
+ return 0;
+}
+
+/**
+ * Cancel as many unused locks as possible before replay. since we are
+ * in recovery, we can't wait for any outstanding RPCs to send any RPC
+ * to the server.
+ *
+ * Called only in recovery before replaying locks. there is no need to
+ * replay locks that are unused. since the clients may hold thousands of
+ * cached unused locks, dropping the unused locks can greatly reduce the
+ * load on the servers at recovery time.
+ */
+static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
+{
+ int canceled;
+ LIST_HEAD(cancels);
+
+ CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before"
+ "replay for namespace %s (%d)\n",
+ ldlm_ns_name(ns), ns->ns_nr_unused);
+
+ /* We don't need to care whether or not LRU resize is enabled
+ * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
+ * count parameter */
+ canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
+ LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
+
+ CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
+ canceled, ldlm_ns_name(ns));
+}
+
+int ldlm_replay_locks(struct obd_import *imp)
+{
+ struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
+ LIST_HEAD(list);
+ struct ldlm_lock *lock, *next;
+ int rc = 0;
+
+ LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
+
+ /* don't replay locks if import failed recovery */
+ if (imp->imp_vbr_failed)
+ return 0;
+
+ /* ensure this doesn't fall to 0 before all have been queued */
+ atomic_inc(&imp->imp_replay_inflight);
+
+ if (ldlm_cancel_unused_locks_before_replay)
+ ldlm_cancel_unused_locks_for_replay(ns);
+
+ ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
+
+ list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
+ list_del_init(&lock->l_pending_chain);
+ if (rc) {
+ LDLM_LOCK_RELEASE(lock);
+ continue; /* or try to do the rest? */
+ }
+ rc = replay_one_lock(imp, lock);
+ LDLM_LOCK_RELEASE(lock);
+ }
+
+ atomic_dec(&imp->imp_replay_inflight);
+
+ return rc;
+}
+EXPORT_SYMBOL(ldlm_replay_locks);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
new file mode 100644
index 0000000..208751a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -0,0 +1,1434 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_resource.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Peter Braam <braam@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LDLM
+# include <lustre_dlm.h>
+
+#include <lustre_fid.h>
+#include <obd_class.h>
+#include "ldlm_internal.h"
+
+struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
+
+int ldlm_srv_namespace_nr = 0;
+int ldlm_cli_namespace_nr = 0;
+
+struct mutex ldlm_srv_namespace_lock;
+LIST_HEAD(ldlm_srv_namespace_list);
+
+struct mutex ldlm_cli_namespace_lock;
+/* Client Namespaces that have active resources in them.
+ * Once all resources go away, ldlm_poold moves such namespaces to the
+ * inactive list */
+LIST_HEAD(ldlm_cli_active_namespace_list);
+/* Client namespaces that don't have any locks in them */
+LIST_HEAD(ldlm_cli_inactive_namespace_list);
+
+struct proc_dir_entry *ldlm_type_proc_dir = NULL;
+struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
+struct proc_dir_entry *ldlm_svc_proc_dir = NULL;
+
+extern unsigned int ldlm_cancel_unused_locks_before_replay;
+
+/* during debug dump certain amount of granted locks for one resource to avoid
+ * DDOS. */
+unsigned int ldlm_dump_granted_max = 256;
+
+#ifdef LPROCFS
+static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
+ return count;
+}
+LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
+
+LPROC_SEQ_FOPS_RW_TYPE(ldlm_rw, uint);
+LPROC_SEQ_FOPS_RO_TYPE(ldlm, uint);
+
+int ldlm_proc_setup(void)
+{
+ int rc;
+ struct lprocfs_vars list[] = {
+ { "dump_namespaces", &ldlm_dump_ns_fops, 0, 0222 },
+ { "dump_granted_max", &ldlm_rw_uint_fops,
+ &ldlm_dump_granted_max },
+ { "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops,
+ &ldlm_cancel_unused_locks_before_replay },
+ { NULL }};
+ LASSERT(ldlm_ns_proc_dir == NULL);
+
+ ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
+ proc_lustre_root,
+ NULL, NULL);
+ if (IS_ERR(ldlm_type_proc_dir)) {
+ CERROR("LProcFS failed in ldlm-init\n");
+ rc = PTR_ERR(ldlm_type_proc_dir);
+ GOTO(err, rc);
+ }
+
+ ldlm_ns_proc_dir = lprocfs_register("namespaces",
+ ldlm_type_proc_dir,
+ NULL, NULL);
+ if (IS_ERR(ldlm_ns_proc_dir)) {
+ CERROR("LProcFS failed in ldlm-init\n");
+ rc = PTR_ERR(ldlm_ns_proc_dir);
+ GOTO(err_type, rc);
+ }
+
+ ldlm_svc_proc_dir = lprocfs_register("services",
+ ldlm_type_proc_dir,
+ NULL, NULL);
+ if (IS_ERR(ldlm_svc_proc_dir)) {
+ CERROR("LProcFS failed in ldlm-init\n");
+ rc = PTR_ERR(ldlm_svc_proc_dir);
+ GOTO(err_ns, rc);
+ }
+
+ rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
+
+ return 0;
+
+err_ns:
+ lprocfs_remove(&ldlm_ns_proc_dir);
+err_type:
+ lprocfs_remove(&ldlm_type_proc_dir);
+err:
+ ldlm_svc_proc_dir = NULL;
+ ldlm_type_proc_dir = NULL;
+ ldlm_ns_proc_dir = NULL;
+ return rc;
+}
+
+void ldlm_proc_cleanup(void)
+{
+ if (ldlm_svc_proc_dir)
+ lprocfs_remove(&ldlm_svc_proc_dir);
+
+ if (ldlm_ns_proc_dir)
+ lprocfs_remove(&ldlm_ns_proc_dir);
+
+ if (ldlm_type_proc_dir)
+ lprocfs_remove(&ldlm_type_proc_dir);
+
+ ldlm_svc_proc_dir = NULL;
+ ldlm_type_proc_dir = NULL;
+ ldlm_ns_proc_dir = NULL;
+}
+
+static int lprocfs_ns_resources_seq_show(struct seq_file *m, void *v)
+{
+ struct ldlm_namespace *ns = m->private;
+ __u64 res = 0;
+ cfs_hash_bd_t bd;
+ int i;
+
+ /* result is not strictly consistant */
+ cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, i)
+ res += cfs_hash_bd_count_get(&bd);
+ return lprocfs_rd_u64(m, &res);
+}
+LPROC_SEQ_FOPS_RO(lprocfs_ns_resources);
+
+static int lprocfs_ns_locks_seq_show(struct seq_file *m, void *v)
+{
+ struct ldlm_namespace *ns = m->private;
+ __u64 locks;
+
+ locks = lprocfs_stats_collector(ns->ns_stats, LDLM_NSS_LOCKS,
+ LPROCFS_FIELDS_FLAGS_SUM);
+ return lprocfs_rd_u64(m, &locks);
+}
+LPROC_SEQ_FOPS_RO(lprocfs_ns_locks);
+
+static int lprocfs_lru_size_seq_show(struct seq_file *m, void *v)
+{
+ struct ldlm_namespace *ns = m->private;
+ __u32 *nr = &ns->ns_max_unused;
+
+ if (ns_connect_lru_resize(ns))
+ nr = &ns->ns_nr_unused;
+ return lprocfs_rd_uint(m, nr);
+}
+
+static ssize_t lprocfs_lru_size_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
+ char dummy[MAX_STRING_SIZE + 1], *end;
+ unsigned long tmp;
+ int lru_resize;
+
+ dummy[MAX_STRING_SIZE] = '\0';
+ if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+ return -EFAULT;
+
+ if (strncmp(dummy, "clear", 5) == 0) {
+ CDEBUG(D_DLMTRACE,
+ "dropping all unused locks from namespace %s\n",
+ ldlm_ns_name(ns));
+ if (ns_connect_lru_resize(ns)) {
+ int canceled, unused = ns->ns_nr_unused;
+
+ /* Try to cancel all @ns_nr_unused locks. */
+ canceled = ldlm_cancel_lru(ns, unused, 0,
+ LDLM_CANCEL_PASSED);
+ if (canceled < unused) {
+ CDEBUG(D_DLMTRACE,
+ "not all requested locks are canceled, "
+ "requested: %d, canceled: %d\n", unused,
+ canceled);
+ return -EINVAL;
+ }
+ } else {
+ tmp = ns->ns_max_unused;
+ ns->ns_max_unused = 0;
+ ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
+ ns->ns_max_unused = tmp;
+ }
+ return count;
+ }
+
+ tmp = simple_strtoul(dummy, &end, 0);
+ if (dummy == end) {
+ CERROR("invalid value written\n");
+ return -EINVAL;
+ }
+ lru_resize = (tmp == 0);
+
+ if (ns_connect_lru_resize(ns)) {
+ if (!lru_resize)
+ ns->ns_max_unused = (unsigned int)tmp;
+
+ if (tmp > ns->ns_nr_unused)
+ tmp = ns->ns_nr_unused;
+ tmp = ns->ns_nr_unused - tmp;
+
+ CDEBUG(D_DLMTRACE,
+ "changing namespace %s unused locks from %u to %u\n",
+ ldlm_ns_name(ns), ns->ns_nr_unused,
+ (unsigned int)tmp);
+ ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
+
+ if (!lru_resize) {
+ CDEBUG(D_DLMTRACE,
+ "disable lru_resize for namespace %s\n",
+ ldlm_ns_name(ns));
+ ns->ns_connect_flags &= ~OBD_CONNECT_LRU_RESIZE;
+ }
+ } else {
+ CDEBUG(D_DLMTRACE,
+ "changing namespace %s max_unused from %u to %u\n",
+ ldlm_ns_name(ns), ns->ns_max_unused,
+ (unsigned int)tmp);
+ ns->ns_max_unused = (unsigned int)tmp;
+ ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
+
+ /* Make sure that LRU resize was originally supported before
+ * turning it on here. */
+ if (lru_resize &&
+ (ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
+ CDEBUG(D_DLMTRACE,
+ "enable lru_resize for namespace %s\n",
+ ldlm_ns_name(ns));
+ ns->ns_connect_flags |= OBD_CONNECT_LRU_RESIZE;
+ }
+ }
+
+ return count;
+}
+LPROC_SEQ_FOPS(lprocfs_lru_size);
+
+static int lprocfs_elc_seq_show(struct seq_file *m, void *v)
+{
+ struct ldlm_namespace *ns = m->private;
+ unsigned int supp = ns_connect_cancelset(ns);
+
+ return lprocfs_rd_uint(m, &supp);
+}
+
+static ssize_t lprocfs_elc_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct ldlm_namespace *ns = ((struct seq_file *)file->private_data)->private;
+ unsigned int supp = -1;
+ int rc;
+
+ rc = lprocfs_wr_uint(file, buffer, count, &supp);
+ if (rc < 0)
+ return rc;
+
+ if (supp == 0)
+ ns->ns_connect_flags &= ~OBD_CONNECT_CANCELSET;
+ else if (ns->ns_orig_connect_flags & OBD_CONNECT_CANCELSET)
+ ns->ns_connect_flags |= OBD_CONNECT_CANCELSET;
+ return count;
+}
+LPROC_SEQ_FOPS(lprocfs_elc);
+
+void ldlm_namespace_proc_unregister(struct ldlm_namespace *ns)
+{
+ if (ns->ns_proc_dir_entry == NULL)
+ CERROR("dlm namespace %s has no procfs dir?\n",
+ ldlm_ns_name(ns));
+ else
+ lprocfs_remove(&ns->ns_proc_dir_entry);
+
+ if (ns->ns_stats != NULL)
+ lprocfs_free_stats(&ns->ns_stats);
+}
+
+#define LDLM_NS_ADD_VAR(name, var, ops) \
+ do { \
+ snprintf(lock_name, MAX_STRING_SIZE, name); \
+ lock_vars[0].data = var; \
+ lock_vars[0].fops = ops; \
+ lprocfs_add_vars(ns_pde, lock_vars, 0); \
+ } while (0)
+
+int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
+{
+ struct lprocfs_vars lock_vars[2];
+ char lock_name[MAX_STRING_SIZE + 1];
+ struct proc_dir_entry *ns_pde;
+
+ LASSERT(ns != NULL);
+ LASSERT(ns->ns_rs_hash != NULL);
+
+ if (ns->ns_proc_dir_entry != NULL) {
+ ns_pde = ns->ns_proc_dir_entry;
+ } else {
+ ns_pde = proc_mkdir(ldlm_ns_name(ns), ldlm_ns_proc_dir);
+ if (ns_pde == NULL)
+ return -ENOMEM;
+ ns->ns_proc_dir_entry = ns_pde;
+ }
+
+ ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
+ if (ns->ns_stats == NULL)
+ return -ENOMEM;
+
+ lprocfs_counter_init(ns->ns_stats, LDLM_NSS_LOCKS,
+ LPROCFS_CNTR_AVGMINMAX, "locks", "locks");
+
+ lock_name[MAX_STRING_SIZE] = '\0';
+
+ memset(lock_vars, 0, sizeof(lock_vars));
+ lock_vars[0].name = lock_name;
+
+ LDLM_NS_ADD_VAR("resource_count", ns, &lprocfs_ns_resources_fops);
+ LDLM_NS_ADD_VAR("lock_count", ns, &lprocfs_ns_locks_fops);
+
+ if (ns_is_client(ns)) {
+ LDLM_NS_ADD_VAR("lock_unused_count", &ns->ns_nr_unused,
+ &ldlm_uint_fops);
+ LDLM_NS_ADD_VAR("lru_size", ns, &lprocfs_lru_size_fops);
+ LDLM_NS_ADD_VAR("lru_max_age", &ns->ns_max_age,
+ &ldlm_rw_uint_fops);
+ LDLM_NS_ADD_VAR("early_lock_cancel", ns, &lprocfs_elc_fops);
+ } else {
+ LDLM_NS_ADD_VAR("ctime_age_limit", &ns->ns_ctime_age_limit,
+ &ldlm_rw_uint_fops);
+ LDLM_NS_ADD_VAR("lock_timeouts", &ns->ns_timeouts,
+ &ldlm_uint_fops);
+ LDLM_NS_ADD_VAR("max_nolock_bytes", &ns->ns_max_nolock_size,
+ &ldlm_rw_uint_fops);
+ LDLM_NS_ADD_VAR("contention_seconds", &ns->ns_contention_time,
+ &ldlm_rw_uint_fops);
+ LDLM_NS_ADD_VAR("contended_locks", &ns->ns_contended_locks,
+ &ldlm_rw_uint_fops);
+ LDLM_NS_ADD_VAR("max_parallel_ast", &ns->ns_max_parallel_ast,
+ &ldlm_rw_uint_fops);
+ }
+ return 0;
+}
+#undef MAX_STRING_SIZE
+#else /* LPROCFS */
+
+#define ldlm_namespace_proc_unregister(ns) ({;})
+#define ldlm_namespace_proc_register(ns) ({0;})
+
+#endif /* LPROCFS */
+
+static unsigned ldlm_res_hop_hash(cfs_hash_t *hs,
+ const void *key, unsigned mask)
+{
+ const struct ldlm_res_id *id = key;
+ unsigned val = 0;
+ unsigned i;
+
+ for (i = 0; i < RES_NAME_SIZE; i++)
+ val += id->name[i];
+ return val & mask;
+}
+
+static unsigned ldlm_res_hop_fid_hash(cfs_hash_t *hs,
+ const void *key, unsigned mask)
+{
+ const struct ldlm_res_id *id = key;
+ struct lu_fid fid;
+ __u32 hash;
+ __u32 val;
+
+ fid.f_seq = id->name[LUSTRE_RES_ID_SEQ_OFF];
+ fid.f_oid = (__u32)id->name[LUSTRE_RES_ID_VER_OID_OFF];
+ fid.f_ver = (__u32)(id->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
+
+ hash = fid_flatten32(&fid);
+ hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
+ if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
+ val = id->name[LUSTRE_RES_ID_HSH_OFF];
+ hash += (val >> 5) + (val << 11);
+ } else {
+ val = fid_oid(&fid);
+ }
+ hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+ /* give me another random factor */
+ hash -= cfs_hash_long((unsigned long)hs, val % 11 + 3);
+
+ hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
+ hash |= ldlm_res_hop_hash(hs, key, CFS_HASH_NBKT(hs) - 1);
+
+ return hash & mask;
+}
+
+static void *ldlm_res_hop_key(struct hlist_node *hnode)
+{
+ struct ldlm_resource *res;
+
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ return &res->lr_name;
+}
+
+static int ldlm_res_hop_keycmp(const void *key, struct hlist_node *hnode)
+{
+ struct ldlm_resource *res;
+
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ return ldlm_res_eq((const struct ldlm_res_id *)key,
+ (const struct ldlm_res_id *)&res->lr_name);
+}
+
+static void *ldlm_res_hop_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct ldlm_resource, lr_hash);
+}
+
+static void ldlm_res_hop_get_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ldlm_resource *res;
+
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ ldlm_resource_getref(res);
+}
+
+static void ldlm_res_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ldlm_resource *res;
+
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ /* cfs_hash_for_each_nolock is the only chance we call it */
+ ldlm_resource_putref_locked(res);
+}
+
+static void ldlm_res_hop_put(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ldlm_resource *res;
+
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ ldlm_resource_putref(res);
+}
+
+cfs_hash_ops_t ldlm_ns_hash_ops = {
+ .hs_hash = ldlm_res_hop_hash,
+ .hs_key = ldlm_res_hop_key,
+ .hs_keycmp = ldlm_res_hop_keycmp,
+ .hs_keycpy = NULL,
+ .hs_object = ldlm_res_hop_object,
+ .hs_get = ldlm_res_hop_get_locked,
+ .hs_put_locked = ldlm_res_hop_put_locked,
+ .hs_put = ldlm_res_hop_put
+};
+
+cfs_hash_ops_t ldlm_ns_fid_hash_ops = {
+ .hs_hash = ldlm_res_hop_fid_hash,
+ .hs_key = ldlm_res_hop_key,
+ .hs_keycmp = ldlm_res_hop_keycmp,
+ .hs_keycpy = NULL,
+ .hs_object = ldlm_res_hop_object,
+ .hs_get = ldlm_res_hop_get_locked,
+ .hs_put_locked = ldlm_res_hop_put_locked,
+ .hs_put = ldlm_res_hop_put
+};
+
+typedef struct {
+ ldlm_ns_type_t nsd_type;
+ /** hash bucket bits */
+ unsigned nsd_bkt_bits;
+ /** hash bits */
+ unsigned nsd_all_bits;
+ /** hash operations */
+ cfs_hash_ops_t *nsd_hops;
+} ldlm_ns_hash_def_t;
+
+ldlm_ns_hash_def_t ldlm_ns_hash_defs[] =
+{
+ {
+ .nsd_type = LDLM_NS_TYPE_MDC,
+ .nsd_bkt_bits = 11,
+ .nsd_all_bits = 16,
+ .nsd_hops = &ldlm_ns_fid_hash_ops,
+ },
+ {
+ .nsd_type = LDLM_NS_TYPE_MDT,
+ .nsd_bkt_bits = 14,
+ .nsd_all_bits = 21,
+ .nsd_hops = &ldlm_ns_fid_hash_ops,
+ },
+ {
+ .nsd_type = LDLM_NS_TYPE_OSC,
+ .nsd_bkt_bits = 8,
+ .nsd_all_bits = 12,
+ .nsd_hops = &ldlm_ns_hash_ops,
+ },
+ {
+ .nsd_type = LDLM_NS_TYPE_OST,
+ .nsd_bkt_bits = 11,
+ .nsd_all_bits = 17,
+ .nsd_hops = &ldlm_ns_hash_ops,
+ },
+ {
+ .nsd_type = LDLM_NS_TYPE_MGC,
+ .nsd_bkt_bits = 4,
+ .nsd_all_bits = 4,
+ .nsd_hops = &ldlm_ns_hash_ops,
+ },
+ {
+ .nsd_type = LDLM_NS_TYPE_MGT,
+ .nsd_bkt_bits = 4,
+ .nsd_all_bits = 4,
+ .nsd_hops = &ldlm_ns_hash_ops,
+ },
+ {
+ .nsd_type = LDLM_NS_TYPE_UNKNOWN,
+ },
+};
+
+/**
+ * Create and initialize new empty namespace.
+ */
+struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
+ ldlm_side_t client,
+ ldlm_appetite_t apt,
+ ldlm_ns_type_t ns_type)
+{
+ struct ldlm_namespace *ns = NULL;
+ struct ldlm_ns_bucket *nsb;
+ ldlm_ns_hash_def_t *nsd;
+ cfs_hash_bd_t bd;
+ int idx;
+ int rc;
+
+ LASSERT(obd != NULL);
+
+ rc = ldlm_get_ref();
+ if (rc) {
+ CERROR("ldlm_get_ref failed: %d\n", rc);
+ return NULL;
+ }
+
+ for (idx = 0;;idx++) {
+ nsd = &ldlm_ns_hash_defs[idx];
+ if (nsd->nsd_type == LDLM_NS_TYPE_UNKNOWN) {
+ CERROR("Unknown type %d for ns %s\n", ns_type, name);
+ GOTO(out_ref, NULL);
+ }
+
+ if (nsd->nsd_type == ns_type)
+ break;
+ }
+
+ OBD_ALLOC_PTR(ns);
+ if (!ns)
+ GOTO(out_ref, NULL);
+
+ ns->ns_rs_hash = cfs_hash_create(name,
+ nsd->nsd_all_bits, nsd->nsd_all_bits,
+ nsd->nsd_bkt_bits, sizeof(*nsb),
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ nsd->nsd_hops,
+ CFS_HASH_DEPTH |
+ CFS_HASH_BIGNAME |
+ CFS_HASH_SPIN_BKTLOCK |
+ CFS_HASH_NO_ITEMREF);
+ if (ns->ns_rs_hash == NULL)
+ GOTO(out_ns, NULL);
+
+ cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
+ nsb = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
+ at_init(&nsb->nsb_at_estimate, ldlm_enqueue_min, 0);
+ nsb->nsb_namespace = ns;
+ }
+
+ ns->ns_obd = obd;
+ ns->ns_appetite = apt;
+ ns->ns_client = client;
+
+ INIT_LIST_HEAD(&ns->ns_list_chain);
+ INIT_LIST_HEAD(&ns->ns_unused_list);
+ spin_lock_init(&ns->ns_lock);
+ atomic_set(&ns->ns_bref, 0);
+ init_waitqueue_head(&ns->ns_waitq);
+
+ ns->ns_max_nolock_size = NS_DEFAULT_MAX_NOLOCK_BYTES;
+ ns->ns_contention_time = NS_DEFAULT_CONTENTION_SECONDS;
+ ns->ns_contended_locks = NS_DEFAULT_CONTENDED_LOCKS;
+
+ ns->ns_max_parallel_ast = LDLM_DEFAULT_PARALLEL_AST_LIMIT;
+ ns->ns_nr_unused = 0;
+ ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
+ ns->ns_max_age = LDLM_DEFAULT_MAX_ALIVE;
+ ns->ns_ctime_age_limit = LDLM_CTIME_AGE_LIMIT;
+ ns->ns_timeouts = 0;
+ ns->ns_orig_connect_flags = 0;
+ ns->ns_connect_flags = 0;
+ ns->ns_stopping = 0;
+ rc = ldlm_namespace_proc_register(ns);
+ if (rc != 0) {
+ CERROR("Can't initialize ns proc, rc %d\n", rc);
+ GOTO(out_hash, rc);
+ }
+
+ idx = ldlm_namespace_nr_read(client);
+ rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
+ if (rc) {
+ CERROR("Can't initialize lock pool, rc %d\n", rc);
+ GOTO(out_proc, rc);
+ }
+
+ ldlm_namespace_register(ns, client);
+ return ns;
+out_proc:
+ ldlm_namespace_proc_unregister(ns);
+ ldlm_namespace_cleanup(ns, 0);
+out_hash:
+ cfs_hash_putref(ns->ns_rs_hash);
+out_ns:
+ OBD_FREE_PTR(ns);
+out_ref:
+ ldlm_put_ref();
+ return NULL;
+}
+EXPORT_SYMBOL(ldlm_namespace_new);
+
+extern struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
+
+/**
+ * Cancel and destroy all locks on a resource.
+ *
+ * If flags contains FL_LOCAL_ONLY, don't try to tell the server, just
+ * clean up. This is currently only used for recovery, and we make
+ * certain assumptions as a result--notably, that we shouldn't cancel
+ * locks with refs.
+ */
+static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
+ __u64 flags)
+{
+ struct list_head *tmp;
+ int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
+ bool local_only = !!(flags & LDLM_FL_LOCAL_ONLY);
+
+ do {
+ struct ldlm_lock *lock = NULL;
+
+ /* First, we look for non-cleaned-yet lock
+ * all cleaned locks are marked by CLEANED flag. */
+ lock_res(res);
+ list_for_each(tmp, q) {
+ lock = list_entry(tmp, struct ldlm_lock,
+ l_res_link);
+ if (lock->l_flags & LDLM_FL_CLEANED) {
+ lock = NULL;
+ continue;
+ }
+ LDLM_LOCK_GET(lock);
+ lock->l_flags |= LDLM_FL_CLEANED;
+ break;
+ }
+
+ if (lock == NULL) {
+ unlock_res(res);
+ break;
+ }
+
+ /* Set CBPENDING so nothing in the cancellation path
+ * can match this lock. */
+ lock->l_flags |= LDLM_FL_CBPENDING;
+ lock->l_flags |= LDLM_FL_FAILED;
+ lock->l_flags |= flags;
+
+ /* ... without sending a CANCEL message for local_only. */
+ if (local_only)
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+
+ if (local_only && (lock->l_readers || lock->l_writers)) {
+ /* This is a little bit gross, but much better than the
+ * alternative: pretend that we got a blocking AST from
+ * the server, so that when the lock is decref'd, it
+ * will go away ... */
+ unlock_res(res);
+ LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
+ if (lock->l_completion_ast)
+ lock->l_completion_ast(lock, 0, NULL);
+ LDLM_LOCK_RELEASE(lock);
+ continue;
+ }
+
+ if (client) {
+ struct lustre_handle lockh;
+
+ unlock_res(res);
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
+ if (rc)
+ CERROR("ldlm_cli_cancel: %d\n", rc);
+ } else {
+ ldlm_resource_unlink_lock(lock);
+ unlock_res(res);
+ LDLM_DEBUG(lock, "Freeing a lock still held by a "
+ "client node");
+ ldlm_lock_destroy(lock);
+ }
+ LDLM_LOCK_RELEASE(lock);
+ } while (1);
+}
+
+static int ldlm_resource_clean(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *arg)
+{
+ struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+ __u64 flags = *(__u64 *)arg;
+
+ cleanup_resource(res, &res->lr_granted, flags);
+ cleanup_resource(res, &res->lr_converting, flags);
+ cleanup_resource(res, &res->lr_waiting, flags);
+
+ return 0;
+}
+
+static int ldlm_resource_complain(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *arg)
+{
+ struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+
+ lock_res(res);
+ CERROR("Namespace %s resource refcount nonzero "
+ "(%d) after lock cleanup; forcing "
+ "cleanup.\n",
+ ldlm_ns_name(ldlm_res_to_ns(res)),
+ atomic_read(&res->lr_refcount) - 1);
+
+ CERROR("Resource: %p ("LPU64"/"LPU64"/"LPU64"/"
+ LPU64") (rc: %d)\n", res,
+ res->lr_name.name[0], res->lr_name.name[1],
+ res->lr_name.name[2], res->lr_name.name[3],
+ atomic_read(&res->lr_refcount) - 1);
+
+ ldlm_resource_dump(D_ERROR, res);
+ unlock_res(res);
+ return 0;
+}
+
+/**
+ * Cancel and destroy all locks in the namespace.
+ *
+ * Typically used during evictions when server notified client that it was
+ * evicted and all of its state needs to be destroyed.
+ * Also used during shutdown.
+ */
+int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
+{
+ if (ns == NULL) {
+ CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
+ return ELDLM_OK;
+ }
+
+ cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
+ cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
+ return ELDLM_OK;
+}
+EXPORT_SYMBOL(ldlm_namespace_cleanup);
+
+/**
+ * Attempts to free namespace.
+ *
+ * Only used when namespace goes away, like during an unmount.
+ */
+static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
+{
+ /* At shutdown time, don't call the cancellation callback */
+ ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
+
+ if (atomic_read(&ns->ns_bref) > 0) {
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ int rc;
+ CDEBUG(D_DLMTRACE,
+ "dlm namespace %s free waiting on refcount %d\n",
+ ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
+force_wait:
+ if (force)
+ lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
+
+ rc = l_wait_event(ns->ns_waitq,
+ atomic_read(&ns->ns_bref) == 0, &lwi);
+
+ /* Forced cleanups should be able to reclaim all references,
+ * so it's safe to wait forever... we can't leak locks... */
+ if (force && rc == -ETIMEDOUT) {
+ LCONSOLE_ERROR("Forced cleanup waiting for %s "
+ "namespace with %d resources in use, "
+ "(rc=%d)\n", ldlm_ns_name(ns),
+ atomic_read(&ns->ns_bref), rc);
+ GOTO(force_wait, rc);
+ }
+
+ if (atomic_read(&ns->ns_bref)) {
+ LCONSOLE_ERROR("Cleanup waiting for %s namespace "
+ "with %d resources in use, (rc=%d)\n",
+ ldlm_ns_name(ns),
+ atomic_read(&ns->ns_bref), rc);
+ return ELDLM_NAMESPACE_EXISTS;
+ }
+ CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
+ ldlm_ns_name(ns));
+ }
+
+ return ELDLM_OK;
+}
+
+/**
+ * Performs various cleanups for passed \a ns to make it drop refc and be
+ * ready for freeing. Waits for refc == 0.
+ *
+ * The following is done:
+ * (0) Unregister \a ns from its list to make inaccessible for potential
+ * users like pools thread and others;
+ * (1) Clear all locks in \a ns.
+ */
+void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
+ struct obd_import *imp,
+ int force)
+{
+ int rc;
+
+ if (!ns) {
+ return;
+ }
+
+ spin_lock(&ns->ns_lock);
+ ns->ns_stopping = 1;
+ spin_unlock(&ns->ns_lock);
+
+ /*
+ * Can fail with -EINTR when force == 0 in which case try harder.
+ */
+ rc = __ldlm_namespace_free(ns, force);
+ if (rc != ELDLM_OK) {
+ if (imp) {
+ ptlrpc_disconnect_import(imp, 0);
+ ptlrpc_invalidate_import(imp);
+ }
+
+ /*
+ * With all requests dropped and the import inactive
+ * we are gaurenteed all reference will be dropped.
+ */
+ rc = __ldlm_namespace_free(ns, 1);
+ LASSERT(rc == 0);
+ }
+}
+
+/**
+ * Performs freeing memory structures related to \a ns. This is only done
+ * when ldlm_namespce_free_prior() successfully removed all resources
+ * referencing \a ns and its refc == 0.
+ */
+void ldlm_namespace_free_post(struct ldlm_namespace *ns)
+{
+ if (!ns) {
+ return;
+ }
+
+ /* Make sure that nobody can find this ns in its list. */
+ ldlm_namespace_unregister(ns, ns->ns_client);
+ /* Fini pool _before_ parent proc dir is removed. This is important as
+ * ldlm_pool_fini() removes own proc dir which is child to @dir.
+ * Removing it after @dir may cause oops. */
+ ldlm_pool_fini(&ns->ns_pool);
+
+ ldlm_namespace_proc_unregister(ns);
+ cfs_hash_putref(ns->ns_rs_hash);
+ /* Namespace \a ns should be not on list at this time, otherwise
+ * this will cause issues related to using freed \a ns in poold
+ * thread. */
+ LASSERT(list_empty(&ns->ns_list_chain));
+ OBD_FREE_PTR(ns);
+ ldlm_put_ref();
+}
+
+/**
+ * Cleanup the resource, and free namespace.
+ * bug 12864:
+ * Deadlock issue:
+ * proc1: destroy import
+ * class_disconnect_export(grab cl_sem) ->
+ * -> ldlm_namespace_free ->
+ * -> lprocfs_remove(grab _lprocfs_lock).
+ * proc2: read proc info
+ * lprocfs_fops_read(grab _lprocfs_lock) ->
+ * -> osc_rd_active, etc(grab cl_sem).
+ *
+ * So that I have to split the ldlm_namespace_free into two parts - the first
+ * part ldlm_namespace_free_prior is used to cleanup the resource which is
+ * being used; the 2nd part ldlm_namespace_free_post is used to unregister the
+ * lprocfs entries, and then free memory. It will be called w/o cli->cl_sem
+ * held.
+ */
+void ldlm_namespace_free(struct ldlm_namespace *ns,
+ struct obd_import *imp,
+ int force)
+{
+ ldlm_namespace_free_prior(ns, imp, force);
+ ldlm_namespace_free_post(ns);
+}
+EXPORT_SYMBOL(ldlm_namespace_free);
+
+void ldlm_namespace_get(struct ldlm_namespace *ns)
+{
+ atomic_inc(&ns->ns_bref);
+}
+EXPORT_SYMBOL(ldlm_namespace_get);
+
+/* This is only for callers that care about refcount */
+int ldlm_namespace_get_return(struct ldlm_namespace *ns)
+{
+ return atomic_inc_return(&ns->ns_bref);
+}
+
+void ldlm_namespace_put(struct ldlm_namespace *ns)
+{
+ if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
+ wake_up(&ns->ns_waitq);
+ spin_unlock(&ns->ns_lock);
+ }
+}
+EXPORT_SYMBOL(ldlm_namespace_put);
+
+/** Register \a ns in the list of namespaces */
+void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
+{
+ mutex_lock(ldlm_namespace_lock(client));
+ LASSERT(list_empty(&ns->ns_list_chain));
+ list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
+ ldlm_namespace_nr_inc(client);
+ mutex_unlock(ldlm_namespace_lock(client));
+}
+
+/** Unregister \a ns from the list of namespaces. */
+void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
+{
+ mutex_lock(ldlm_namespace_lock(client));
+ LASSERT(!list_empty(&ns->ns_list_chain));
+ /* Some asserts and possibly other parts of the code are still
+ * using list_empty(&ns->ns_list_chain). This is why it is
+ * important to use list_del_init() here. */
+ list_del_init(&ns->ns_list_chain);
+ ldlm_namespace_nr_dec(client);
+ mutex_unlock(ldlm_namespace_lock(client));
+}
+
+/** Should be called with ldlm_namespace_lock(client) taken. */
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
+ ldlm_side_t client)
+{
+ LASSERT(!list_empty(&ns->ns_list_chain));
+ LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
+ list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
+}
+
+/** Should be called with ldlm_namespace_lock(client) taken. */
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
+ ldlm_side_t client)
+{
+ LASSERT(!list_empty(&ns->ns_list_chain));
+ LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
+ list_move_tail(&ns->ns_list_chain,
+ ldlm_namespace_inactive_list(client));
+}
+
+/** Should be called with ldlm_namespace_lock(client) taken. */
+struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
+{
+ LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
+ LASSERT(!list_empty(ldlm_namespace_list(client)));
+ return container_of(ldlm_namespace_list(client)->next,
+ struct ldlm_namespace, ns_list_chain);
+}
+
+/** Create and initialize new resource. */
+static struct ldlm_resource *ldlm_resource_new(void)
+{
+ struct ldlm_resource *res;
+ int idx;
+
+ OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, __GFP_IO);
+ if (res == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&res->lr_granted);
+ INIT_LIST_HEAD(&res->lr_converting);
+ INIT_LIST_HEAD(&res->lr_waiting);
+
+ /* Initialize interval trees for each lock mode. */
+ for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+ res->lr_itree[idx].lit_size = 0;
+ res->lr_itree[idx].lit_mode = 1 << idx;
+ res->lr_itree[idx].lit_root = NULL;
+ }
+
+ atomic_set(&res->lr_refcount, 1);
+ spin_lock_init(&res->lr_lock);
+ lu_ref_init(&res->lr_reference);
+
+ /* The creator of the resource must unlock the mutex after LVB
+ * initialization. */
+ mutex_init(&res->lr_lvb_mutex);
+ mutex_lock(&res->lr_lvb_mutex);
+
+ return res;
+}
+
+/**
+ * Return a reference to resource with given name, creating it if necessary.
+ * Args: namespace with ns_lock unlocked
+ * Locks: takes and releases NS hash-lock and res->lr_lock
+ * Returns: referenced, unlocked ldlm_resource or NULL
+ */
+struct ldlm_resource *
+ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
+ const struct ldlm_res_id *name, ldlm_type_t type, int create)
+{
+ struct hlist_node *hnode;
+ struct ldlm_resource *res;
+ cfs_hash_bd_t bd;
+ __u64 version;
+ int ns_refcount = 0;
+
+ LASSERT(ns != NULL);
+ LASSERT(parent == NULL);
+ LASSERT(ns->ns_rs_hash != NULL);
+ LASSERT(name->name[0] != 0);
+
+ cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
+ hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
+ if (hnode != NULL) {
+ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ /* Synchronize with regard to resource creation. */
+ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
+ mutex_lock(&res->lr_lvb_mutex);
+ mutex_unlock(&res->lr_lvb_mutex);
+ }
+
+ if (unlikely(res->lr_lvb_len < 0)) {
+ ldlm_resource_putref(res);
+ res = NULL;
+ }
+ return res;
+ }
+
+ version = cfs_hash_bd_version_get(&bd);
+ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
+
+ if (create == 0)
+ return NULL;
+
+ LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
+ "type: %d\n", type);
+ res = ldlm_resource_new();
+ if (!res)
+ return NULL;
+
+ res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
+ res->lr_name = *name;
+ res->lr_type = type;
+ res->lr_most_restr = LCK_NL;
+
+ cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
+ hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
+ cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
+
+ if (hnode != NULL) {
+ /* Someone won the race and already added the resource. */
+ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
+ /* Clean lu_ref for failed resource. */
+ lu_ref_fini(&res->lr_reference);
+ /* We have taken lr_lvb_mutex. Drop it. */
+ mutex_unlock(&res->lr_lvb_mutex);
+ OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
+ /* Synchronize with regard to resource creation. */
+ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
+ mutex_lock(&res->lr_lvb_mutex);
+ mutex_unlock(&res->lr_lvb_mutex);
+ }
+
+ if (unlikely(res->lr_lvb_len < 0)) {
+ ldlm_resource_putref(res);
+ res = NULL;
+ }
+ return res;
+ }
+ /* We won! Let's add the resource. */
+ cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
+ if (cfs_hash_bd_count_get(&bd) == 1)
+ ns_refcount = ldlm_namespace_get_return(ns);
+
+ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
+ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
+ int rc;
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
+ rc = ns->ns_lvbo->lvbo_init(res);
+ if (rc < 0) {
+ CERROR("%s: lvbo_init failed for resource "LPX64":"
+ LPX64": rc = %d\n", ns->ns_obd->obd_name,
+ name->name[0], name->name[1], rc);
+ if (res->lr_lvb_data) {
+ OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
+ res->lr_lvb_data = NULL;
+ }
+ res->lr_lvb_len = rc;
+ mutex_unlock(&res->lr_lvb_mutex);
+ ldlm_resource_putref(res);
+ return NULL;
+ }
+ }
+
+ /* We create resource with locked lr_lvb_mutex. */
+ mutex_unlock(&res->lr_lvb_mutex);
+
+ /* Let's see if we happened to be the very first resource in this
+ * namespace. If so, and this is a client namespace, we need to move
+ * the namespace into the active namespaces list to be patrolled by
+ * the ldlm_poold. */
+ if (ns_is_client(ns) && ns_refcount == 1) {
+ mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
+ mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ }
+
+ return res;
+}
+EXPORT_SYMBOL(ldlm_resource_get);
+
+struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
+{
+ LASSERT(res != NULL);
+ LASSERT(res != LP_POISON);
+ atomic_inc(&res->lr_refcount);
+ CDEBUG(D_INFO, "getref res: %p count: %d\n", res,
+ atomic_read(&res->lr_refcount));
+ return res;
+}
+
+static void __ldlm_resource_putref_final(cfs_hash_bd_t *bd,
+ struct ldlm_resource *res)
+{
+ struct ldlm_ns_bucket *nsb = res->lr_ns_bucket;
+
+ if (!list_empty(&res->lr_granted)) {
+ ldlm_resource_dump(D_ERROR, res);
+ LBUG();
+ }
+
+ if (!list_empty(&res->lr_converting)) {
+ ldlm_resource_dump(D_ERROR, res);
+ LBUG();
+ }
+
+ if (!list_empty(&res->lr_waiting)) {
+ ldlm_resource_dump(D_ERROR, res);
+ LBUG();
+ }
+
+ cfs_hash_bd_del_locked(nsb->nsb_namespace->ns_rs_hash,
+ bd, &res->lr_hash);
+ lu_ref_fini(&res->lr_reference);
+ if (cfs_hash_bd_count_get(bd) == 0)
+ ldlm_namespace_put(nsb->nsb_namespace);
+}
+
+/* Returns 1 if the resource was freed, 0 if it remains. */
+int ldlm_resource_putref(struct ldlm_resource *res)
+{
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+ cfs_hash_bd_t bd;
+
+ LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
+ CDEBUG(D_INFO, "putref res: %p count: %d\n",
+ res, atomic_read(&res->lr_refcount) - 1);
+
+ cfs_hash_bd_get(ns->ns_rs_hash, &res->lr_name, &bd);
+ if (cfs_hash_bd_dec_and_lock(ns->ns_rs_hash, &bd, &res->lr_refcount)) {
+ __ldlm_resource_putref_final(&bd, res);
+ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
+ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
+ ns->ns_lvbo->lvbo_free(res);
+ OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ldlm_resource_putref);
+
+/* Returns 1 if the resource was freed, 0 if it remains. */
+int ldlm_resource_putref_locked(struct ldlm_resource *res)
+{
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+
+ LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
+ CDEBUG(D_INFO, "putref res: %p count: %d\n",
+ res, atomic_read(&res->lr_refcount) - 1);
+
+ if (atomic_dec_and_test(&res->lr_refcount)) {
+ cfs_hash_bd_t bd;
+
+ cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
+ &res->lr_name, &bd);
+ __ldlm_resource_putref_final(&bd, res);
+ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
+ /* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
+ * so we should never be here while calling cfs_hash_del,
+ * cfs_hash_for_each_nolock is the only case we can get
+ * here, which is safe to release cfs_hash_bd_lock.
+ */
+ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
+ ns->ns_lvbo->lvbo_free(res);
+ OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
+
+ cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Add a lock into a given resource into specified lock list.
+ */
+void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+ struct ldlm_lock *lock)
+{
+ check_res_locked(res);
+
+ LDLM_DEBUG(lock, "About to add this lock:\n");
+
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
+ CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
+ return;
+ }
+
+ LASSERT(list_empty(&lock->l_res_link));
+
+ list_add_tail(&lock->l_res_link, head);
+}
+
+/**
+ * Insert a lock into resource after specified lock.
+ *
+ * Obtain resource description from the lock we are inserting after.
+ */
+void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
+ struct ldlm_lock *new)
+{
+ struct ldlm_resource *res = original->l_resource;
+
+ check_res_locked(res);
+
+ ldlm_resource_dump(D_INFO, res);
+ LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
+
+ if (new->l_flags & LDLM_FL_DESTROYED) {
+ CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
+ goto out;
+ }
+
+ LASSERT(list_empty(&new->l_res_link));
+
+ list_add(&new->l_res_link, &original->l_res_link);
+ out:;
+}
+
+void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
+{
+ int type = lock->l_resource->lr_type;
+
+ check_res_locked(lock->l_resource);
+ if (type == LDLM_IBITS || type == LDLM_PLAIN)
+ ldlm_unlink_lock_skiplist(lock);
+ else if (type == LDLM_EXTENT)
+ ldlm_extent_unlink_lock(lock);
+ list_del_init(&lock->l_res_link);
+}
+EXPORT_SYMBOL(ldlm_resource_unlink_lock);
+
+void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
+{
+ desc->lr_type = res->lr_type;
+ desc->lr_name = res->lr_name;
+}
+
+/**
+ * Print information about all locks in all namespaces on this node to debug
+ * log.
+ */
+void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
+{
+ struct list_head *tmp;
+
+ if (!((libcfs_debug | D_ERROR) & level))
+ return;
+
+ mutex_lock(ldlm_namespace_lock(client));
+
+ list_for_each(tmp, ldlm_namespace_list(client)) {
+ struct ldlm_namespace *ns;
+ ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
+ ldlm_namespace_dump(level, ns);
+ }
+
+ mutex_unlock(ldlm_namespace_lock(client));
+}
+EXPORT_SYMBOL(ldlm_dump_all_namespaces);
+
+static int ldlm_res_hash_dump(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *arg)
+{
+ struct ldlm_resource *res = cfs_hash_object(hs, hnode);
+ int level = (int)(unsigned long)arg;
+
+ lock_res(res);
+ ldlm_resource_dump(level, res);
+ unlock_res(res);
+
+ return 0;
+}
+
+/**
+ * Print information about all locks in this namespace on this node to debug
+ * log.
+ */
+void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
+{
+ if (!((libcfs_debug | D_ERROR) & level))
+ return;
+
+ CDEBUG(level, "--- Namespace: %s (rc: %d, side: %s)\n",
+ ldlm_ns_name(ns), atomic_read(&ns->ns_bref),
+ ns_is_client(ns) ? "client" : "server");
+
+ if (cfs_time_before(cfs_time_current(), ns->ns_next_dump))
+ return;
+
+ cfs_hash_for_each_nolock(ns->ns_rs_hash,
+ ldlm_res_hash_dump,
+ (void *)(unsigned long)level);
+ spin_lock(&ns->ns_lock);
+ ns->ns_next_dump = cfs_time_shift(10);
+ spin_unlock(&ns->ns_lock);
+}
+EXPORT_SYMBOL(ldlm_namespace_dump);
+
+/**
+ * Print information about all locks in this resource to debug log.
+ */
+void ldlm_resource_dump(int level, struct ldlm_resource *res)
+{
+ struct ldlm_lock *lock;
+ unsigned int granted = 0;
+
+ CLASSERT(RES_NAME_SIZE == 4);
+
+ if (!((libcfs_debug | D_ERROR) & level))
+ return;
+
+ CDEBUG(level, "--- Resource: %p ("LPU64"/"LPU64"/"LPU64"/"LPU64
+ ") (rc: %d)\n", res, res->lr_name.name[0], res->lr_name.name[1],
+ res->lr_name.name[2], res->lr_name.name[3],
+ atomic_read(&res->lr_refcount));
+
+ if (!list_empty(&res->lr_granted)) {
+ CDEBUG(level, "Granted locks (in reverse order):\n");
+ list_for_each_entry_reverse(lock, &res->lr_granted,
+ l_res_link) {
+ LDLM_DEBUG_LIMIT(level, lock, "###");
+ if (!(level & D_CANTMASK) &&
+ ++granted > ldlm_dump_granted_max) {
+ CDEBUG(level, "only dump %d granted locks to "
+ "avoid DDOS.\n", granted);
+ break;
+ }
+ }
+ }
+ if (!list_empty(&res->lr_converting)) {
+ CDEBUG(level, "Converting locks:\n");
+ list_for_each_entry(lock, &res->lr_converting, l_res_link)
+ LDLM_DEBUG_LIMIT(level, lock, "###");
+ }
+ if (!list_empty(&res->lr_waiting)) {
+ CDEBUG(level, "Waiting locks:\n");
+ list_for_each_entry(lock, &res->lr_waiting, l_res_link)
+ LDLM_DEBUG_LIMIT(level, lock, "###");
+ }
+}
diff --git a/drivers/staging/lustre/lustre/libcfs/Makefile b/drivers/staging/lustre/lustre/libcfs/Makefile
new file mode 100644
index 0000000..6e489d7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/Makefile
@@ -0,0 +1,21 @@
+obj-$(CONFIG_LUSTRE_FS) += libcfs.o
+
+libcfs-linux-objs := linux-tracefile.o linux-debug.o
+libcfs-linux-objs += linux-prim.o linux-cpu.o
+libcfs-linux-objs += linux-tcpip.o
+libcfs-linux-objs += linux-proc.o linux-curproc.o
+libcfs-linux-objs += linux-module.o
+libcfs-linux-objs += linux-crypto.o
+libcfs-linux-objs += linux-crypto-adler.o
+
+libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs))
+
+libcfs-all-objs := debug.o fail.o nidstrings.o module.o tracefile.o \
+ libcfs_string.o hash.o kernel_user_comm.o \
+ prng.o workitem.o upcall_cache.o libcfs_cpu.o \
+ libcfs_mem.o libcfs_lock.o
+
+libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs)
+
+ccflags-y := -I$(src)/../include
+ccflags-y += -I$(src)/
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
new file mode 100644
index 0000000..9b9c451
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -0,0 +1,469 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/debug.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
+ *
+ */
+
+# define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+#include "tracefile.h"
+
+static char debug_file_name[1024];
+
+unsigned int libcfs_subsystem_debug = ~0;
+CFS_MODULE_PARM(libcfs_subsystem_debug, "i", int, 0644,
+ "Lustre kernel debug subsystem mask");
+EXPORT_SYMBOL(libcfs_subsystem_debug);
+
+unsigned int libcfs_debug = (D_CANTMASK |
+ D_NETERROR | D_HA | D_CONFIG | D_IOCTL);
+CFS_MODULE_PARM(libcfs_debug, "i", int, 0644,
+ "Lustre kernel debug mask");
+EXPORT_SYMBOL(libcfs_debug);
+
+unsigned int libcfs_debug_mb = 0;
+CFS_MODULE_PARM(libcfs_debug_mb, "i", uint, 0644,
+ "Total debug buffer size.");
+EXPORT_SYMBOL(libcfs_debug_mb);
+
+unsigned int libcfs_printk = D_CANTMASK;
+CFS_MODULE_PARM(libcfs_printk, "i", uint, 0644,
+ "Lustre kernel debug console mask");
+EXPORT_SYMBOL(libcfs_printk);
+
+unsigned int libcfs_console_ratelimit = 1;
+CFS_MODULE_PARM(libcfs_console_ratelimit, "i", uint, 0644,
+ "Lustre kernel debug console ratelimit (0 to disable)");
+EXPORT_SYMBOL(libcfs_console_ratelimit);
+
+unsigned int libcfs_console_max_delay;
+CFS_MODULE_PARM(libcfs_console_max_delay, "l", uint, 0644,
+ "Lustre kernel debug console max delay (jiffies)");
+EXPORT_SYMBOL(libcfs_console_max_delay);
+
+unsigned int libcfs_console_min_delay;
+CFS_MODULE_PARM(libcfs_console_min_delay, "l", uint, 0644,
+ "Lustre kernel debug console min delay (jiffies)");
+EXPORT_SYMBOL(libcfs_console_min_delay);
+
+unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF;
+CFS_MODULE_PARM(libcfs_console_backoff, "i", uint, 0644,
+ "Lustre kernel debug console backoff factor");
+EXPORT_SYMBOL(libcfs_console_backoff);
+
+unsigned int libcfs_debug_binary = 1;
+EXPORT_SYMBOL(libcfs_debug_binary);
+
+unsigned int libcfs_stack = 3 * THREAD_SIZE / 4;
+EXPORT_SYMBOL(libcfs_stack);
+
+unsigned int portal_enter_debugger;
+EXPORT_SYMBOL(portal_enter_debugger);
+
+unsigned int libcfs_catastrophe;
+EXPORT_SYMBOL(libcfs_catastrophe);
+
+unsigned int libcfs_watchdog_ratelimit = 300;
+EXPORT_SYMBOL(libcfs_watchdog_ratelimit);
+
+unsigned int libcfs_panic_on_lbug = 1;
+CFS_MODULE_PARM(libcfs_panic_on_lbug, "i", uint, 0644,
+ "Lustre kernel panic on LBUG");
+EXPORT_SYMBOL(libcfs_panic_on_lbug);
+
+atomic_t libcfs_kmemory = ATOMIC_INIT(0);
+EXPORT_SYMBOL(libcfs_kmemory);
+
+static wait_queue_head_t debug_ctlwq;
+
+char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
+
+/* We need to pass a pointer here, but elsewhere this must be a const */
+char *libcfs_debug_file_path;
+CFS_MODULE_PARM(libcfs_debug_file_path, "s", charp, 0644,
+ "Path for dumping debug logs, "
+ "set 'NONE' to prevent log dumping");
+
+int libcfs_panic_in_progress;
+
+/* libcfs_debug_token2mask() expects the returned
+ * string in lower-case */
+const char *
+libcfs_debug_subsys2str(int subsys)
+{
+ switch (1 << subsys) {
+ default:
+ return NULL;
+ case S_UNDEFINED:
+ return "undefined";
+ case S_MDC:
+ return "mdc";
+ case S_MDS:
+ return "mds";
+ case S_OSC:
+ return "osc";
+ case S_OST:
+ return "ost";
+ case S_CLASS:
+ return "class";
+ case S_LOG:
+ return "log";
+ case S_LLITE:
+ return "llite";
+ case S_RPC:
+ return "rpc";
+ case S_LNET:
+ return "lnet";
+ case S_LND:
+ return "lnd";
+ case S_PINGER:
+ return "pinger";
+ case S_FILTER:
+ return "filter";
+ case S_ECHO:
+ return "echo";
+ case S_LDLM:
+ return "ldlm";
+ case S_LOV:
+ return "lov";
+ case S_LQUOTA:
+ return "lquota";
+ case S_OSD:
+ return "osd";
+ case S_LMV:
+ return "lmv";
+ case S_SEC:
+ return "sec";
+ case S_GSS:
+ return "gss";
+ case S_MGC:
+ return "mgc";
+ case S_MGS:
+ return "mgs";
+ case S_FID:
+ return "fid";
+ case S_FLD:
+ return "fld";
+ }
+}
+
+/* libcfs_debug_token2mask() expects the returned
+ * string in lower-case */
+const char *
+libcfs_debug_dbg2str(int debug)
+{
+ switch (1 << debug) {
+ default:
+ return NULL;
+ case D_TRACE:
+ return "trace";
+ case D_INODE:
+ return "inode";
+ case D_SUPER:
+ return "super";
+ case D_EXT2:
+ return "ext2";
+ case D_MALLOC:
+ return "malloc";
+ case D_CACHE:
+ return "cache";
+ case D_INFO:
+ return "info";
+ case D_IOCTL:
+ return "ioctl";
+ case D_NETERROR:
+ return "neterror";
+ case D_NET:
+ return "net";
+ case D_WARNING:
+ return "warning";
+ case D_BUFFS:
+ return "buffs";
+ case D_OTHER:
+ return "other";
+ case D_DENTRY:
+ return "dentry";
+ case D_NETTRACE:
+ return "nettrace";
+ case D_PAGE:
+ return "page";
+ case D_DLMTRACE:
+ return "dlmtrace";
+ case D_ERROR:
+ return "error";
+ case D_EMERG:
+ return "emerg";
+ case D_HA:
+ return "ha";
+ case D_RPCTRACE:
+ return "rpctrace";
+ case D_VFSTRACE:
+ return "vfstrace";
+ case D_READA:
+ return "reada";
+ case D_MMAP:
+ return "mmap";
+ case D_CONFIG:
+ return "config";
+ case D_CONSOLE:
+ return "console";
+ case D_QUOTA:
+ return "quota";
+ case D_SEC:
+ return "sec";
+ case D_LFSCK:
+ return "lfsck";
+ }
+}
+
+int
+libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
+{
+ const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
+ libcfs_debug_dbg2str;
+ int len = 0;
+ const char *token;
+ int i;
+
+ if (mask == 0) { /* "0" */
+ if (size > 0)
+ str[0] = '0';
+ len = 1;
+ } else { /* space-separated tokens */
+ for (i = 0; i < 32; i++) {
+ if ((mask & (1 << i)) == 0)
+ continue;
+
+ token = fn(i);
+ if (token == NULL) /* unused bit */
+ continue;
+
+ if (len > 0) { /* separator? */
+ if (len < size)
+ str[len] = ' ';
+ len++;
+ }
+
+ while (*token != 0) {
+ if (len < size)
+ str[len] = *token;
+ token++;
+ len++;
+ }
+ }
+ }
+
+ /* terminate 'str' */
+ if (len < size)
+ str[len] = 0;
+ else
+ str[size - 1] = 0;
+
+ return len;
+}
+
+int
+libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
+{
+ const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
+ libcfs_debug_dbg2str;
+ int m = 0;
+ int matched;
+ int n;
+ int t;
+
+ /* Allow a number for backwards compatibility */
+
+ for (n = strlen(str); n > 0; n--)
+ if (!isspace(str[n-1]))
+ break;
+ matched = n;
+
+ if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 &&
+ matched == n) {
+ /* don't print warning for lctl set_param debug=0 or -1 */
+ if (m != 0 && m != -1)
+ CWARN("You are trying to use a numerical value for the "
+ "mask - this will be deprecated in a future "
+ "release.\n");
+ *mask = m;
+ return 0;
+ }
+
+ return cfs_str2mask(str, fn, mask, is_subsys ? 0 : D_CANTMASK,
+ 0xffffffff);
+}
+
+/**
+ * Dump Lustre log to ::debug_file_path by calling tracefile_dump_all_pages()
+ */
+void libcfs_debug_dumplog_internal(void *arg)
+{
+ void *journal_info;
+
+ journal_info = current->journal_info;
+ current->journal_info = NULL;
+
+ if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) {
+ snprintf(debug_file_name, sizeof(debug_file_name) - 1,
+ "%s.%ld." LPLD, libcfs_debug_file_path_arr,
+ cfs_time_current_sec(), (long_ptr_t)arg);
+ printk(KERN_ALERT "LustreError: dumping log to %s\n",
+ debug_file_name);
+ cfs_tracefile_dump_all_pages(debug_file_name);
+ libcfs_run_debug_log_upcall(debug_file_name);
+ }
+
+ current->journal_info = journal_info;
+}
+
+int libcfs_debug_dumplog_thread(void *arg)
+{
+ libcfs_debug_dumplog_internal(arg);
+ wake_up(&debug_ctlwq);
+ return 0;
+}
+
+void libcfs_debug_dumplog(void)
+{
+ wait_queue_t wait;
+ struct task_struct *dumper;
+
+ /* we're being careful to ensure that the kernel thread is
+ * able to set our state to running as it exits before we
+ * get to schedule() */
+ init_waitqueue_entry_current(&wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&debug_ctlwq, &wait);
+
+ dumper = kthread_run(libcfs_debug_dumplog_thread,
+ (void *)(long)current_pid(),
+ "libcfs_debug_dumper");
+ if (IS_ERR(dumper))
+ printk(KERN_ERR "LustreError: cannot start log dump thread:"
+ " %ld\n", PTR_ERR(dumper));
+ else
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
+
+ /* be sure to teardown if cfs_create_thread() failed */
+ remove_wait_queue(&debug_ctlwq, &wait);
+ set_current_state(TASK_RUNNING);
+}
+EXPORT_SYMBOL(libcfs_debug_dumplog);
+
+int libcfs_debug_init(unsigned long bufsize)
+{
+ int rc = 0;
+ unsigned int max = libcfs_debug_mb;
+
+ init_waitqueue_head(&debug_ctlwq);
+
+ if (libcfs_console_max_delay <= 0 || /* not set by user or */
+ libcfs_console_min_delay <= 0 || /* set to invalid values */
+ libcfs_console_min_delay >= libcfs_console_max_delay) {
+ libcfs_console_max_delay = CDEBUG_DEFAULT_MAX_DELAY;
+ libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
+ }
+
+ if (libcfs_debug_file_path != NULL) {
+ memset(libcfs_debug_file_path_arr, 0, PATH_MAX);
+ strncpy(libcfs_debug_file_path_arr,
+ libcfs_debug_file_path, PATH_MAX-1);
+ }
+
+ /* If libcfs_debug_mb is set to an invalid value or uninitialized
+ * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
+ if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) {
+ max = TCD_MAX_PAGES;
+ } else {
+ max = (max / num_possible_cpus());
+ max = (max << (20 - PAGE_CACHE_SHIFT));
+ }
+ rc = cfs_tracefile_init(max);
+
+ if (rc == 0)
+ libcfs_register_panic_notifier();
+
+ return rc;
+}
+
+int libcfs_debug_cleanup(void)
+{
+ libcfs_unregister_panic_notifier();
+ cfs_tracefile_exit();
+ return 0;
+}
+
+int libcfs_debug_clear_buffer(void)
+{
+ cfs_trace_flush_pages();
+ return 0;
+}
+
+/* Debug markers, although printed by S_LNET
+ * should not be be marked as such. */
+#undef DEBUG_SUBSYSTEM
+#define DEBUG_SUBSYSTEM S_UNDEFINED
+int libcfs_debug_mark_buffer(const char *text)
+{
+ CDEBUG(D_TRACE,"***************************************************\n");
+ LCONSOLE(D_WARNING, "DEBUG MARKER: %s\n", text);
+ CDEBUG(D_TRACE,"***************************************************\n");
+
+ return 0;
+}
+#undef DEBUG_SUBSYSTEM
+#define DEBUG_SUBSYSTEM S_LNET
+
+void libcfs_debug_set_level(unsigned int debug_level)
+{
+ printk(KERN_WARNING "Lustre: Setting portals debug level to %08x\n",
+ debug_level);
+ libcfs_debug = debug_level;
+}
+
+EXPORT_SYMBOL(libcfs_debug_set_level);
+
+void libcfs_log_goto(struct libcfs_debug_msg_data *msgdata, const char *label,
+ long_ptr_t rc)
+{
+ libcfs_debug_msg(msgdata, "Process leaving via %s (rc=" LPLU " : " LPLD
+ " : " LPLX ")\n", label, (ulong_ptr_t)rc, rc, rc);
+}
+EXPORT_SYMBOL(libcfs_log_goto);
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
new file mode 100644
index 0000000..c54448d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/fail.c
@@ -0,0 +1,137 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
+ * CA 94065 USA or visit www.oracle.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Oracle Corporation, Inc.
+ */
+
+#include <linux/libcfs/libcfs.h>
+
+unsigned long cfs_fail_loc = 0;
+unsigned int cfs_fail_val = 0;
+wait_queue_head_t cfs_race_waitq;
+int cfs_race_state;
+
+EXPORT_SYMBOL(cfs_fail_loc);
+EXPORT_SYMBOL(cfs_fail_val);
+EXPORT_SYMBOL(cfs_race_waitq);
+EXPORT_SYMBOL(cfs_race_state);
+
+int __cfs_fail_check_set(__u32 id, __u32 value, int set)
+{
+ static atomic_t cfs_fail_count = ATOMIC_INIT(0);
+
+ LASSERT(!(id & CFS_FAIL_ONCE));
+
+ if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
+ (CFS_FAILED | CFS_FAIL_ONCE)) {
+ atomic_set(&cfs_fail_count, 0); /* paranoia */
+ return 0;
+ }
+
+ /* Fail 1/cfs_fail_val times */
+ if (cfs_fail_loc & CFS_FAIL_RAND) {
+ if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
+ return 0;
+ }
+
+ /* Skip the first cfs_fail_val, then fail */
+ if (cfs_fail_loc & CFS_FAIL_SKIP) {
+ if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
+ return 0;
+ }
+
+ /* check cfs_fail_val... */
+ if (set == CFS_FAIL_LOC_VALUE) {
+ if (cfs_fail_val != -1 && cfs_fail_val != value)
+ return 0;
+ }
+
+ /* Fail cfs_fail_val times, overridden by FAIL_ONCE */
+ if (cfs_fail_loc & CFS_FAIL_SOME &&
+ (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
+ int count = atomic_inc_return(&cfs_fail_count);
+
+ if (count >= cfs_fail_val) {
+ set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+ atomic_set(&cfs_fail_count, 0);
+ /* we are lost race to increase */
+ if (count > cfs_fail_val)
+ return 0;
+ }
+ }
+
+ if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
+ (value & CFS_FAIL_ONCE))
+ set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+ /* Lost race to set CFS_FAILED_BIT. */
+ if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
+ /* If CFS_FAIL_ONCE is valid, only one process can fail,
+ * otherwise multi-process can fail at the same time. */
+ if (cfs_fail_loc & CFS_FAIL_ONCE)
+ return 0;
+ }
+
+ switch (set) {
+ case CFS_FAIL_LOC_NOSET:
+ case CFS_FAIL_LOC_VALUE:
+ break;
+ case CFS_FAIL_LOC_ORSET:
+ cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
+ break;
+ case CFS_FAIL_LOC_RESET:
+ cfs_fail_loc = value;
+ break;
+ default:
+ LASSERTF(0, "called with bad set %u\n", set);
+ break;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(__cfs_fail_check_set);
+
+int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
+{
+ int ret = 0;
+
+ ret = __cfs_fail_check_set(id, value, set);
+ if (ret) {
+ CERROR("cfs_fail_timeout id %x sleeping for %dms\n",
+ id, ms);
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+ cfs_time_seconds(ms) / 1000);
+ set_current_state(TASK_RUNNING);
+ CERROR("cfs_fail_timeout id %x awake\n", id);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__cfs_fail_timeout_set);
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
new file mode 100644
index 0000000..0dd12c8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -0,0 +1,2113 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/hash.c
+ *
+ * Implement a hash class for hash process in lustre system.
+ *
+ * Author: YuZhangyong <yzy@clusterfs.com>
+ *
+ * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
+ * - Simplified API and improved documentation
+ * - Added per-hash feature flags:
+ * * CFS_HASH_DEBUG additional validation
+ * * CFS_HASH_REHASH dynamic rehashing
+ * - Added per-hash statistics
+ * - General performance enhancements
+ *
+ * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
+ * - move all stuff to libcfs
+ * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
+ * - ignore hs_rwlock if without CFS_HASH_REHASH setting
+ * - buckets are allocated one by one(intead of contiguous memory),
+ * to avoid unnecessary cacheline conflict
+ *
+ * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
+ * - "bucket" is a group of hlist_head now, user can speicify bucket size
+ * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
+ * one lock for reducing memory overhead.
+ *
+ * - support lockless hash, caller will take care of locks:
+ * avoid lock overhead for hash tables that are already protected
+ * by locking in the caller for another reason
+ *
+ * - support both spin_lock/rwlock for bucket:
+ * overhead of spinlock contention is lower than read/write
+ * contention of rwlock, so using spinlock to serialize operations on
+ * bucket is more reasonable for those frequently changed hash tables
+ *
+ * - support one-single lock mode:
+ * one lock to protect all hash operations to avoid overhead of
+ * multiple locks if hash table is always small
+ *
+ * - removed a lot of unnecessary addref & decref on hash element:
+ * addref & decref are atomic operations in many use-cases which
+ * are expensive.
+ *
+ * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
+ * some lustre use-cases require these functions to be strictly
+ * non-blocking, we need to schedule required rehash on a different
+ * thread on those cases.
+ *
+ * - safer rehash on large hash table
+ * In old implementation, rehash function will exclusively lock the
+ * hash table and finish rehash in one batch, it's dangerous on SMP
+ * system because rehash millions of elements could take long time.
+ * New implemented rehash can release lock and relax CPU in middle
+ * of rehash, it's safe for another thread to search/change on the
+ * hash table even it's in rehasing.
+ *
+ * - support two different refcount modes
+ * . hash table has refcount on element
+ * . hash table doesn't change refcount on adding/removing element
+ *
+ * - support long name hash table (for param-tree)
+ *
+ * - fix a bug for cfs_hash_rehash_key:
+ * in old implementation, cfs_hash_rehash_key could screw up the
+ * hash-table because @key is overwritten without any protection.
+ * Now we need user to define hs_keycpy for those rehash enabled
+ * hash tables, cfs_hash_rehash_key will overwrite hash-key
+ * inside lock by calling hs_keycpy.
+ *
+ * - better hash iteration:
+ * Now we support both locked iteration & lockless iteration of hash
+ * table. Also, user can break the iteration by return 1 in callback.
+ */
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/seq_file.h>
+
+#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
+static unsigned int warn_on_depth = 8;
+CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
+ "warning when hash depth is high.");
+#endif
+
+struct cfs_wi_sched *cfs_sched_rehash;
+
+static inline void
+cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
+
+static inline void
+cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
+
+static inline void
+cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
+{
+ spin_lock(&lock->spin);
+}
+
+static inline void
+cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
+{
+ spin_unlock(&lock->spin);
+}
+
+static inline void
+cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
+{
+ if (!exclusive)
+ read_lock(&lock->rw);
+ else
+ write_lock(&lock->rw);
+}
+
+static inline void
+cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
+{
+ if (!exclusive)
+ read_unlock(&lock->rw);
+ else
+ write_unlock(&lock->rw);
+}
+
+/** No lock hash */
+static cfs_hash_lock_ops_t cfs_hash_nl_lops =
+{
+ .hs_lock = cfs_hash_nl_lock,
+ .hs_unlock = cfs_hash_nl_unlock,
+ .hs_bkt_lock = cfs_hash_nl_lock,
+ .hs_bkt_unlock = cfs_hash_nl_unlock,
+};
+
+/** no bucket lock, one spinlock to protect everything */
+static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
+{
+ .hs_lock = cfs_hash_spin_lock,
+ .hs_unlock = cfs_hash_spin_unlock,
+ .hs_bkt_lock = cfs_hash_nl_lock,
+ .hs_bkt_unlock = cfs_hash_nl_unlock,
+};
+
+/** spin bucket lock, rehash is enabled */
+static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
+{
+ .hs_lock = cfs_hash_rw_lock,
+ .hs_unlock = cfs_hash_rw_unlock,
+ .hs_bkt_lock = cfs_hash_spin_lock,
+ .hs_bkt_unlock = cfs_hash_spin_unlock,
+};
+
+/** rw bucket lock, rehash is enabled */
+static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
+{
+ .hs_lock = cfs_hash_rw_lock,
+ .hs_unlock = cfs_hash_rw_unlock,
+ .hs_bkt_lock = cfs_hash_rw_lock,
+ .hs_bkt_unlock = cfs_hash_rw_unlock,
+};
+
+/** spin bucket lock, rehash is disabled */
+static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
+{
+ .hs_lock = cfs_hash_nl_lock,
+ .hs_unlock = cfs_hash_nl_unlock,
+ .hs_bkt_lock = cfs_hash_spin_lock,
+ .hs_bkt_unlock = cfs_hash_spin_unlock,
+};
+
+/** rw bucket lock, rehash is disabled */
+static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
+{
+ .hs_lock = cfs_hash_nl_lock,
+ .hs_unlock = cfs_hash_nl_unlock,
+ .hs_bkt_lock = cfs_hash_rw_lock,
+ .hs_bkt_unlock = cfs_hash_rw_unlock,
+};
+
+static void
+cfs_hash_lock_setup(cfs_hash_t *hs)
+{
+ if (cfs_hash_with_no_lock(hs)) {
+ hs->hs_lops = &cfs_hash_nl_lops;
+
+ } else if (cfs_hash_with_no_bktlock(hs)) {
+ hs->hs_lops = &cfs_hash_nbl_lops;
+ spin_lock_init(&hs->hs_lock.spin);
+
+ } else if (cfs_hash_with_rehash(hs)) {
+ rwlock_init(&hs->hs_lock.rw);
+
+ if (cfs_hash_with_rw_bktlock(hs))
+ hs->hs_lops = &cfs_hash_bkt_rw_lops;
+ else if (cfs_hash_with_spin_bktlock(hs))
+ hs->hs_lops = &cfs_hash_bkt_spin_lops;
+ else
+ LBUG();
+ } else {
+ if (cfs_hash_with_rw_bktlock(hs))
+ hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
+ else if (cfs_hash_with_spin_bktlock(hs))
+ hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
+ else
+ LBUG();
+ }
+}
+
+/**
+ * Simple hash head without depth tracking
+ * new element is always added to head of hlist
+ */
+typedef struct {
+ struct hlist_head hh_head; /**< entries list */
+} cfs_hash_head_t;
+
+static int
+cfs_hash_hh_hhead_size(cfs_hash_t *hs)
+{
+ return sizeof(cfs_hash_head_t);
+}
+
+static struct hlist_head *
+cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+{
+ cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
+
+ return &head[bd->bd_offset].hh_head;
+}
+
+static int
+cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode)
+{
+ hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
+ return -1; /* unknown depth */
+}
+
+static int
+cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode)
+{
+ hlist_del_init(hnode);
+ return -1; /* unknown depth */
+}
+
+/**
+ * Simple hash head with depth tracking
+ * new element is always added to head of hlist
+ */
+typedef struct {
+ struct hlist_head hd_head; /**< entries list */
+ unsigned int hd_depth; /**< list length */
+} cfs_hash_head_dep_t;
+
+static int
+cfs_hash_hd_hhead_size(cfs_hash_t *hs)
+{
+ return sizeof(cfs_hash_head_dep_t);
+}
+
+static struct hlist_head *
+cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+{
+ cfs_hash_head_dep_t *head;
+
+ head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
+ return &head[bd->bd_offset].hd_head;
+}
+
+static int
+cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode)
+{
+ cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
+ cfs_hash_head_dep_t, hd_head);
+ hlist_add_head(hnode, &hh->hd_head);
+ return ++hh->hd_depth;
+}
+
+static int
+cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode)
+{
+ cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
+ cfs_hash_head_dep_t, hd_head);
+ hlist_del_init(hnode);
+ return --hh->hd_depth;
+}
+
+/**
+ * double links hash head without depth tracking
+ * new element is always added to tail of hlist
+ */
+typedef struct {
+ struct hlist_head dh_head; /**< entries list */
+ struct hlist_node *dh_tail; /**< the last entry */
+} cfs_hash_dhead_t;
+
+static int
+cfs_hash_dh_hhead_size(cfs_hash_t *hs)
+{
+ return sizeof(cfs_hash_dhead_t);
+}
+
+static struct hlist_head *
+cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+{
+ cfs_hash_dhead_t *head;
+
+ head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
+ return &head[bd->bd_offset].dh_head;
+}
+
+static int
+cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode)
+{
+ cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
+ cfs_hash_dhead_t, dh_head);
+
+ if (dh->dh_tail != NULL) /* not empty */
+ hlist_add_after(dh->dh_tail, hnode);
+ else /* empty list */
+ hlist_add_head(hnode, &dh->dh_head);
+ dh->dh_tail = hnode;
+ return -1; /* unknown depth */
+}
+
+static int
+cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnd)
+{
+ cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
+ cfs_hash_dhead_t, dh_head);
+
+ if (hnd->next == NULL) { /* it's the tail */
+ dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
+ container_of(hnd->pprev, struct hlist_node, next);
+ }
+ hlist_del_init(hnd);
+ return -1; /* unknown depth */
+}
+
+/**
+ * double links hash head with depth tracking
+ * new element is always added to tail of hlist
+ */
+typedef struct {
+ struct hlist_head dd_head; /**< entries list */
+ struct hlist_node *dd_tail; /**< the last entry */
+ unsigned int dd_depth; /**< list length */
+} cfs_hash_dhead_dep_t;
+
+static int
+cfs_hash_dd_hhead_size(cfs_hash_t *hs)
+{
+ return sizeof(cfs_hash_dhead_dep_t);
+}
+
+static struct hlist_head *
+cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
+{
+ cfs_hash_dhead_dep_t *head;
+
+ head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
+ return &head[bd->bd_offset].dd_head;
+}
+
+static int
+cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode)
+{
+ cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
+ cfs_hash_dhead_dep_t, dd_head);
+
+ if (dh->dd_tail != NULL) /* not empty */
+ hlist_add_after(dh->dd_tail, hnode);
+ else /* empty list */
+ hlist_add_head(hnode, &dh->dd_head);
+ dh->dd_tail = hnode;
+ return ++dh->dd_depth;
+}
+
+static int
+cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnd)
+{
+ cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
+ cfs_hash_dhead_dep_t, dd_head);
+
+ if (hnd->next == NULL) { /* it's the tail */
+ dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
+ container_of(hnd->pprev, struct hlist_node, next);
+ }
+ hlist_del_init(hnd);
+ return --dh->dd_depth;
+}
+
+static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
+ .hop_hhead = cfs_hash_hh_hhead,
+ .hop_hhead_size = cfs_hash_hh_hhead_size,
+ .hop_hnode_add = cfs_hash_hh_hnode_add,
+ .hop_hnode_del = cfs_hash_hh_hnode_del,
+};
+
+static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
+ .hop_hhead = cfs_hash_hd_hhead,
+ .hop_hhead_size = cfs_hash_hd_hhead_size,
+ .hop_hnode_add = cfs_hash_hd_hnode_add,
+ .hop_hnode_del = cfs_hash_hd_hnode_del,
+};
+
+static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
+ .hop_hhead = cfs_hash_dh_hhead,
+ .hop_hhead_size = cfs_hash_dh_hhead_size,
+ .hop_hnode_add = cfs_hash_dh_hnode_add,
+ .hop_hnode_del = cfs_hash_dh_hnode_del,
+};
+
+static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
+ .hop_hhead = cfs_hash_dd_hhead,
+ .hop_hhead_size = cfs_hash_dd_hhead_size,
+ .hop_hnode_add = cfs_hash_dd_hnode_add,
+ .hop_hnode_del = cfs_hash_dd_hnode_del,
+};
+
+static void
+cfs_hash_hlist_setup(cfs_hash_t *hs)
+{
+ if (cfs_hash_with_add_tail(hs)) {
+ hs->hs_hops = cfs_hash_with_depth(hs) ?
+ &cfs_hash_dd_hops : &cfs_hash_dh_hops;
+ } else {
+ hs->hs_hops = cfs_hash_with_depth(hs) ?
+ &cfs_hash_hd_hops : &cfs_hash_hh_hops;
+ }
+}
+
+static void
+cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
+ unsigned int bits, const void *key, cfs_hash_bd_t *bd)
+{
+ unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
+
+ LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
+
+ bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
+ bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
+}
+
+void
+cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
+{
+ /* NB: caller should hold hs->hs_rwlock if REHASH is set */
+ if (likely(hs->hs_rehash_buckets == NULL)) {
+ cfs_hash_bd_from_key(hs, hs->hs_buckets,
+ hs->hs_cur_bits, key, bd);
+ } else {
+ LASSERT(hs->hs_rehash_bits != 0);
+ cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
+ hs->hs_rehash_bits, key, bd);
+ }
+}
+EXPORT_SYMBOL(cfs_hash_bd_get);
+
+static inline void
+cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
+{
+ if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
+ return;
+
+ bd->bd_bucket->hsb_depmax = dep_cur;
+# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
+ if (likely(warn_on_depth == 0 ||
+ max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
+ return;
+
+ spin_lock(&hs->hs_dep_lock);
+ hs->hs_dep_max = dep_cur;
+ hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
+ hs->hs_dep_off = bd->bd_offset;
+ hs->hs_dep_bits = hs->hs_cur_bits;
+ spin_unlock(&hs->hs_dep_lock);
+
+ cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
+# endif
+}
+
+void
+cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode)
+{
+ int rc;
+
+ rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
+ cfs_hash_bd_dep_record(hs, bd, rc);
+ bd->bd_bucket->hsb_version++;
+ if (unlikely(bd->bd_bucket->hsb_version == 0))
+ bd->bd_bucket->hsb_version++;
+ bd->bd_bucket->hsb_count++;
+
+ if (cfs_hash_with_counter(hs))
+ atomic_inc(&hs->hs_count);
+ if (!cfs_hash_with_no_itemref(hs))
+ cfs_hash_get(hs, hnode);
+}
+EXPORT_SYMBOL(cfs_hash_bd_add_locked);
+
+void
+cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode)
+{
+ hs->hs_hops->hop_hnode_del(hs, bd, hnode);
+
+ LASSERT(bd->bd_bucket->hsb_count > 0);
+ bd->bd_bucket->hsb_count--;
+ bd->bd_bucket->hsb_version++;
+ if (unlikely(bd->bd_bucket->hsb_version == 0))
+ bd->bd_bucket->hsb_version++;
+
+ if (cfs_hash_with_counter(hs)) {
+ LASSERT(atomic_read(&hs->hs_count) > 0);
+ atomic_dec(&hs->hs_count);
+ }
+ if (!cfs_hash_with_no_itemref(hs))
+ cfs_hash_put_locked(hs, hnode);
+}
+EXPORT_SYMBOL(cfs_hash_bd_del_locked);
+
+void
+cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
+ cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
+{
+ cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
+ cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
+ int rc;
+
+ if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
+ return;
+
+ /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
+ * in cfs_hash_bd_del/add_locked */
+ hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
+ rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
+ cfs_hash_bd_dep_record(hs, bd_new, rc);
+
+ LASSERT(obkt->hsb_count > 0);
+ obkt->hsb_count--;
+ obkt->hsb_version++;
+ if (unlikely(obkt->hsb_version == 0))
+ obkt->hsb_version++;
+ nbkt->hsb_count++;
+ nbkt->hsb_version++;
+ if (unlikely(nbkt->hsb_version == 0))
+ nbkt->hsb_version++;
+}
+EXPORT_SYMBOL(cfs_hash_bd_move_locked);
+
+enum {
+ /** always set, for sanity (avoid ZERO intent) */
+ CFS_HS_LOOKUP_MASK_FIND = 1 << 0,
+ /** return entry with a ref */
+ CFS_HS_LOOKUP_MASK_REF = 1 << 1,
+ /** add entry if not existing */
+ CFS_HS_LOOKUP_MASK_ADD = 1 << 2,
+ /** delete entry, ignore other masks */
+ CFS_HS_LOOKUP_MASK_DEL = 1 << 3,
+};
+
+typedef enum cfs_hash_lookup_intent {
+ /** return item w/o refcount */
+ CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
+ /** return item with refcount */
+ CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
+ CFS_HS_LOOKUP_MASK_REF),
+ /** return item w/o refcount if existed, otherwise add */
+ CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
+ CFS_HS_LOOKUP_MASK_ADD),
+ /** return item with refcount if existed, otherwise add */
+ CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
+ CFS_HS_LOOKUP_MASK_ADD),
+ /** delete if existed */
+ CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
+ CFS_HS_LOOKUP_MASK_DEL)
+} cfs_hash_lookup_intent_t;
+
+static struct hlist_node *
+cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ const void *key, struct hlist_node *hnode,
+ cfs_hash_lookup_intent_t intent)
+
+{
+ struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
+ struct hlist_node *ehnode;
+ struct hlist_node *match;
+ int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
+
+ /* with this function, we can avoid a lot of useless refcount ops,
+ * which are expensive atomic operations most time. */
+ match = intent_add ? NULL : hnode;
+ hlist_for_each(ehnode, hhead) {
+ if (!cfs_hash_keycmp(hs, key, ehnode))
+ continue;
+
+ if (match != NULL && match != ehnode) /* can't match */
+ continue;
+
+ /* match and ... */
+ if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
+ cfs_hash_bd_del_locked(hs, bd, ehnode);
+ return ehnode;
+ }
+
+ /* caller wants refcount? */
+ if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
+ cfs_hash_get(hs, ehnode);
+ return ehnode;
+ }
+ /* no match item */
+ if (!intent_add)
+ return NULL;
+
+ LASSERT(hnode != NULL);
+ cfs_hash_bd_add_locked(hs, bd, hnode);
+ return hnode;
+}
+
+struct hlist_node *
+cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+{
+ return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
+ CFS_HS_LOOKUP_IT_FIND);
+}
+EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
+
+struct hlist_node *
+cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
+{
+ return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
+ CFS_HS_LOOKUP_IT_PEEK);
+}
+EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
+
+struct hlist_node *
+cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ const void *key, struct hlist_node *hnode,
+ int noref)
+{
+ return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
+ CFS_HS_LOOKUP_IT_ADD |
+ (!noref * CFS_HS_LOOKUP_MASK_REF));
+}
+EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
+
+struct hlist_node *
+cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ const void *key, struct hlist_node *hnode)
+{
+ /* hnode can be NULL, we find the first item with @key */
+ return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
+ CFS_HS_LOOKUP_IT_FINDDEL);
+}
+EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
+
+static void
+cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ unsigned n, int excl)
+{
+ cfs_hash_bucket_t *prev = NULL;
+ int i;
+
+ /**
+ * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
+ * NB: it's possible that several bds point to the same bucket but
+ * have different bd::bd_offset, so need take care of deadlock.
+ */
+ cfs_hash_for_each_bd(bds, n, i) {
+ if (prev == bds[i].bd_bucket)
+ continue;
+
+ LASSERT(prev == NULL ||
+ prev->hsb_index < bds[i].bd_bucket->hsb_index);
+ cfs_hash_bd_lock(hs, &bds[i], excl);
+ prev = bds[i].bd_bucket;
+ }
+}
+
+static void
+cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ unsigned n, int excl)
+{
+ cfs_hash_bucket_t *prev = NULL;
+ int i;
+
+ cfs_hash_for_each_bd(bds, n, i) {
+ if (prev != bds[i].bd_bucket) {
+ cfs_hash_bd_unlock(hs, &bds[i], excl);
+ prev = bds[i].bd_bucket;
+ }
+ }
+}
+
+static struct hlist_node *
+cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ unsigned n, const void *key)
+{
+ struct hlist_node *ehnode;
+ unsigned i;
+
+ cfs_hash_for_each_bd(bds, n, i) {
+ ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
+ CFS_HS_LOOKUP_IT_FIND);
+ if (ehnode != NULL)
+ return ehnode;
+ }
+ return NULL;
+}
+
+static struct hlist_node *
+cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
+ cfs_hash_bd_t *bds, unsigned n, const void *key,
+ struct hlist_node *hnode, int noref)
+{
+ struct hlist_node *ehnode;
+ int intent;
+ unsigned i;
+
+ LASSERT(hnode != NULL);
+ intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
+
+ cfs_hash_for_each_bd(bds, n, i) {
+ ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
+ NULL, intent);
+ if (ehnode != NULL)
+ return ehnode;
+ }
+
+ if (i == 1) { /* only one bucket */
+ cfs_hash_bd_add_locked(hs, &bds[0], hnode);
+ } else {
+ cfs_hash_bd_t mybd;
+
+ cfs_hash_bd_get(hs, key, &mybd);
+ cfs_hash_bd_add_locked(hs, &mybd, hnode);
+ }
+
+ return hnode;
+}
+
+static struct hlist_node *
+cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ unsigned n, const void *key,
+ struct hlist_node *hnode)
+{
+ struct hlist_node *ehnode;
+ unsigned i;
+
+ cfs_hash_for_each_bd(bds, n, i) {
+ ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
+ CFS_HS_LOOKUP_IT_FINDDEL);
+ if (ehnode != NULL)
+ return ehnode;
+ }
+ return NULL;
+}
+
+static void
+cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
+{
+ int rc;
+
+ if (bd2->bd_bucket == NULL)
+ return;
+
+ if (bd1->bd_bucket == NULL) {
+ *bd1 = *bd2;
+ bd2->bd_bucket = NULL;
+ return;
+ }
+
+ rc = cfs_hash_bd_compare(bd1, bd2);
+ if (rc == 0) {
+ bd2->bd_bucket = NULL;
+
+ } else if (rc > 0) { /* swab bd1 and bd2 */
+ cfs_hash_bd_t tmp;
+
+ tmp = *bd2;
+ *bd2 = *bd1;
+ *bd1 = tmp;
+ }
+}
+
+void
+cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
+{
+ /* NB: caller should hold hs_lock.rw if REHASH is set */
+ cfs_hash_bd_from_key(hs, hs->hs_buckets,
+ hs->hs_cur_bits, key, &bds[0]);
+ if (likely(hs->hs_rehash_buckets == NULL)) {
+ /* no rehash or not rehashing */
+ bds[1].bd_bucket = NULL;
+ return;
+ }
+
+ LASSERT(hs->hs_rehash_bits != 0);
+ cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
+ hs->hs_rehash_bits, key, &bds[1]);
+
+ cfs_hash_bd_order(&bds[0], &bds[1]);
+}
+EXPORT_SYMBOL(cfs_hash_dual_bd_get);
+
+void
+cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
+{
+ cfs_hash_multi_bd_lock(hs, bds, 2, excl);
+}
+EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
+
+void
+cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
+{
+ cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
+}
+EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
+
+struct hlist_node *
+cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ const void *key)
+{
+ return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
+}
+EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
+
+struct hlist_node *
+cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ const void *key, struct hlist_node *hnode,
+ int noref)
+{
+ return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
+ hnode, noref);
+}
+EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
+
+struct hlist_node *
+cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
+ const void *key, struct hlist_node *hnode)
+{
+ return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
+}
+EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
+
+static void
+cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
+ int bkt_size, int prev_size, int size)
+{
+ int i;
+
+ for (i = prev_size; i < size; i++) {
+ if (buckets[i] != NULL)
+ LIBCFS_FREE(buckets[i], bkt_size);
+ }
+
+ LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
+}
+
+/*
+ * Create or grow bucket memory. Return old_buckets if no allocation was
+ * needed, the newly allocated buckets if allocation was needed and
+ * successful, and NULL on error.
+ */
+static cfs_hash_bucket_t **
+cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
+ unsigned int old_size, unsigned int new_size)
+{
+ cfs_hash_bucket_t **new_bkts;
+ int i;
+
+ LASSERT(old_size == 0 || old_bkts != NULL);
+
+ if (old_bkts != NULL && old_size == new_size)
+ return old_bkts;
+
+ LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
+ if (new_bkts == NULL)
+ return NULL;
+
+ if (old_bkts != NULL) {
+ memcpy(new_bkts, old_bkts,
+ min(old_size, new_size) * sizeof(*old_bkts));
+ }
+
+ for (i = old_size; i < new_size; i++) {
+ struct hlist_head *hhead;
+ cfs_hash_bd_t bd;
+
+ LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
+ if (new_bkts[i] == NULL) {
+ cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
+ old_size, new_size);
+ return NULL;
+ }
+
+ new_bkts[i]->hsb_index = i;
+ new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
+ new_bkts[i]->hsb_depmax = -1; /* unknown */
+ bd.bd_bucket = new_bkts[i];
+ cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
+ INIT_HLIST_HEAD(hhead);
+
+ if (cfs_hash_with_no_lock(hs) ||
+ cfs_hash_with_no_bktlock(hs))
+ continue;
+
+ if (cfs_hash_with_rw_bktlock(hs))
+ rwlock_init(&new_bkts[i]->hsb_lock.rw);
+ else if (cfs_hash_with_spin_bktlock(hs))
+ spin_lock_init(&new_bkts[i]->hsb_lock.spin);
+ else
+ LBUG(); /* invalid use-case */
+ }
+ return new_bkts;
+}
+
+/**
+ * Initialize new libcfs hash, where:
+ * @name - Descriptive hash name
+ * @cur_bits - Initial hash table size, in bits
+ * @max_bits - Maximum allowed hash table resize, in bits
+ * @ops - Registered hash table operations
+ * @flags - CFS_HASH_REHASH enable synamic hash resizing
+ * - CFS_HASH_SORT enable chained hash sort
+ */
+static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
+
+#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
+static int cfs_hash_dep_print(cfs_workitem_t *wi)
+{
+ cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+ int dep;
+ int bkt;
+ int off;
+ int bits;
+
+ spin_lock(&hs->hs_dep_lock);
+ dep = hs->hs_dep_max;
+ bkt = hs->hs_dep_bkt;
+ off = hs->hs_dep_off;
+ bits = hs->hs_dep_bits;
+ spin_unlock(&hs->hs_dep_lock);
+
+ LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
+ hs->hs_name, bits, dep, bkt, off);
+ spin_lock(&hs->hs_dep_lock);
+ hs->hs_dep_bits = 0; /* mark as workitem done */
+ spin_unlock(&hs->hs_dep_lock);
+ return 0;
+}
+
+static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
+{
+ spin_lock_init(&hs->hs_dep_lock);
+ cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
+}
+
+static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
+{
+ if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
+ return;
+
+ spin_lock(&hs->hs_dep_lock);
+ while (hs->hs_dep_bits != 0) {
+ spin_unlock(&hs->hs_dep_lock);
+ cond_resched();
+ spin_lock(&hs->hs_dep_lock);
+ }
+ spin_unlock(&hs->hs_dep_lock);
+}
+
+#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
+
+static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
+static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
+
+#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
+
+cfs_hash_t *
+cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
+ unsigned bkt_bits, unsigned extra_bytes,
+ unsigned min_theta, unsigned max_theta,
+ cfs_hash_ops_t *ops, unsigned flags)
+{
+ cfs_hash_t *hs;
+ int len;
+
+ CLASSERT(CFS_HASH_THETA_BITS < 15);
+
+ LASSERT(name != NULL);
+ LASSERT(ops != NULL);
+ LASSERT(ops->hs_key);
+ LASSERT(ops->hs_hash);
+ LASSERT(ops->hs_object);
+ LASSERT(ops->hs_keycmp);
+ LASSERT(ops->hs_get != NULL);
+ LASSERT(ops->hs_put_locked != NULL);
+
+ if ((flags & CFS_HASH_REHASH) != 0)
+ flags |= CFS_HASH_COUNTER; /* must have counter */
+
+ LASSERT(cur_bits > 0);
+ LASSERT(cur_bits >= bkt_bits);
+ LASSERT(max_bits >= cur_bits && max_bits < 31);
+ LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
+ LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
+ (flags & CFS_HASH_NO_LOCK) == 0));
+ LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
+ ops->hs_keycpy != NULL));
+
+ len = (flags & CFS_HASH_BIGNAME) == 0 ?
+ CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
+ LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
+ if (hs == NULL)
+ return NULL;
+
+ strncpy(hs->hs_name, name, len);
+ hs->hs_name[len - 1] = '\0';
+ hs->hs_flags = flags;
+
+ atomic_set(&hs->hs_refcount, 1);
+ atomic_set(&hs->hs_count, 0);
+
+ cfs_hash_lock_setup(hs);
+ cfs_hash_hlist_setup(hs);
+
+ hs->hs_cur_bits = (__u8)cur_bits;
+ hs->hs_min_bits = (__u8)cur_bits;
+ hs->hs_max_bits = (__u8)max_bits;
+ hs->hs_bkt_bits = (__u8)bkt_bits;
+
+ hs->hs_ops = ops;
+ hs->hs_extra_bytes = extra_bytes;
+ hs->hs_rehash_bits = 0;
+ cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
+ cfs_hash_depth_wi_init(hs);
+
+ if (cfs_hash_with_rehash(hs))
+ __cfs_hash_set_theta(hs, min_theta, max_theta);
+
+ hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
+ CFS_HASH_NBKT(hs));
+ if (hs->hs_buckets != NULL)
+ return hs;
+
+ LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
+ return NULL;
+}
+EXPORT_SYMBOL(cfs_hash_create);
+
+/**
+ * Cleanup libcfs hash @hs.
+ */
+static void
+cfs_hash_destroy(cfs_hash_t *hs)
+{
+ struct hlist_node *hnode;
+ struct hlist_node *pos;
+ cfs_hash_bd_t bd;
+ int i;
+
+ LASSERT(hs != NULL);
+ LASSERT(!cfs_hash_is_exiting(hs) &&
+ !cfs_hash_is_iterating(hs));
+
+ /**
+ * prohibit further rehashes, don't need any lock because
+ * I'm the only (last) one can change it.
+ */
+ hs->hs_exiting = 1;
+ if (cfs_hash_with_rehash(hs))
+ cfs_hash_rehash_cancel(hs);
+
+ cfs_hash_depth_wi_cancel(hs);
+ /* rehash should be done/canceled */
+ LASSERT(hs->hs_buckets != NULL &&
+ hs->hs_rehash_buckets == NULL);
+
+ cfs_hash_for_each_bucket(hs, &bd, i) {
+ struct hlist_head *hhead;
+
+ LASSERT(bd.bd_bucket != NULL);
+ /* no need to take this lock, just for consistent code */
+ cfs_hash_bd_lock(hs, &bd, 1);
+
+ cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
+ hlist_for_each_safe(hnode, pos, hhead) {
+ LASSERTF(!cfs_hash_with_assert_empty(hs),
+ "hash %s bucket %u(%u) is not "
+ " empty: %u items left\n",
+ hs->hs_name, bd.bd_bucket->hsb_index,
+ bd.bd_offset, bd.bd_bucket->hsb_count);
+ /* can't assert key valicate, because we
+ * can interrupt rehash */
+ cfs_hash_bd_del_locked(hs, &bd, hnode);
+ cfs_hash_exit(hs, hnode);
+ }
+ }
+ LASSERT(bd.bd_bucket->hsb_count == 0);
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ cond_resched();
+ }
+
+ LASSERT(atomic_read(&hs->hs_count) == 0);
+
+ cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
+ 0, CFS_HASH_NBKT(hs));
+ i = cfs_hash_with_bigname(hs) ?
+ CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
+ LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
+}
+
+cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
+{
+ if (atomic_inc_not_zero(&hs->hs_refcount))
+ return hs;
+ return NULL;
+}
+EXPORT_SYMBOL(cfs_hash_getref);
+
+void cfs_hash_putref(cfs_hash_t *hs)
+{
+ if (atomic_dec_and_test(&hs->hs_refcount))
+ cfs_hash_destroy(hs);
+}
+EXPORT_SYMBOL(cfs_hash_putref);
+
+static inline int
+cfs_hash_rehash_bits(cfs_hash_t *hs)
+{
+ if (cfs_hash_with_no_lock(hs) ||
+ !cfs_hash_with_rehash(hs))
+ return -EOPNOTSUPP;
+
+ if (unlikely(cfs_hash_is_exiting(hs)))
+ return -ESRCH;
+
+ if (unlikely(cfs_hash_is_rehashing(hs)))
+ return -EALREADY;
+
+ if (unlikely(cfs_hash_is_iterating(hs)))
+ return -EAGAIN;
+
+ /* XXX: need to handle case with max_theta != 2.0
+ * and the case with min_theta != 0.5 */
+ if ((hs->hs_cur_bits < hs->hs_max_bits) &&
+ (__cfs_hash_theta(hs) > hs->hs_max_theta))
+ return hs->hs_cur_bits + 1;
+
+ if (!cfs_hash_with_shrink(hs))
+ return 0;
+
+ if ((hs->hs_cur_bits > hs->hs_min_bits) &&
+ (__cfs_hash_theta(hs) < hs->hs_min_theta))
+ return hs->hs_cur_bits - 1;
+
+ return 0;
+}
+
+/**
+ * don't allow inline rehash if:
+ * - user wants non-blocking change (add/del) on hash table
+ * - too many elements
+ */
+static inline int
+cfs_hash_rehash_inline(cfs_hash_t *hs)
+{
+ return !cfs_hash_with_nblk_change(hs) &&
+ atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
+}
+
+/**
+ * Add item @hnode to libcfs hash @hs using @key. The registered
+ * ops->hs_get function will be called when the item is added.
+ */
+void
+cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+{
+ cfs_hash_bd_t bd;
+ int bits;
+
+ LASSERT(hlist_unhashed(hnode));
+
+ cfs_hash_lock(hs, 0);
+ cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
+
+ cfs_hash_key_validate(hs, key, hnode);
+ cfs_hash_bd_add_locked(hs, &bd, hnode);
+
+ cfs_hash_bd_unlock(hs, &bd, 1);
+
+ bits = cfs_hash_rehash_bits(hs);
+ cfs_hash_unlock(hs, 0);
+ if (bits > 0)
+ cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
+}
+EXPORT_SYMBOL(cfs_hash_add);
+
+static struct hlist_node *
+cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
+ struct hlist_node *hnode, int noref)
+{
+ struct hlist_node *ehnode;
+ cfs_hash_bd_t bds[2];
+ int bits = 0;
+
+ LASSERT(hlist_unhashed(hnode));
+
+ cfs_hash_lock(hs, 0);
+ cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
+
+ cfs_hash_key_validate(hs, key, hnode);
+ ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
+ hnode, noref);
+ cfs_hash_dual_bd_unlock(hs, bds, 1);
+
+ if (ehnode == hnode) /* new item added */
+ bits = cfs_hash_rehash_bits(hs);
+ cfs_hash_unlock(hs, 0);
+ if (bits > 0)
+ cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
+
+ return ehnode;
+}
+
+/**
+ * Add item @hnode to libcfs hash @hs using @key. The registered
+ * ops->hs_get function will be called if the item was added.
+ * Returns 0 on success or -EALREADY on key collisions.
+ */
+int
+cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+{
+ return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
+ -EALREADY : 0;
+}
+EXPORT_SYMBOL(cfs_hash_add_unique);
+
+/**
+ * Add item @hnode to libcfs hash @hs using @key. If this @key
+ * already exists in the hash then ops->hs_get will be called on the
+ * conflicting entry and that entry will be returned to the caller.
+ * Otherwise ops->hs_get is called on the item which was added.
+ */
+void *
+cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
+ struct hlist_node *hnode)
+{
+ hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
+
+ return cfs_hash_object(hs, hnode);
+}
+EXPORT_SYMBOL(cfs_hash_findadd_unique);
+
+/**
+ * Delete item @hnode from the libcfs hash @hs using @key. The @key
+ * is required to ensure the correct hash bucket is locked since there
+ * is no direct linkage from the item to the bucket. The object
+ * removed from the hash will be returned and obs->hs_put is called
+ * on the removed object.
+ */
+void *
+cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
+{
+ void *obj = NULL;
+ int bits = 0;
+ cfs_hash_bd_t bds[2];
+
+ cfs_hash_lock(hs, 0);
+ cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
+
+ /* NB: do nothing if @hnode is not in hash table */
+ if (hnode == NULL || !hlist_unhashed(hnode)) {
+ if (bds[1].bd_bucket == NULL && hnode != NULL) {
+ cfs_hash_bd_del_locked(hs, &bds[0], hnode);
+ } else {
+ hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
+ key, hnode);
+ }
+ }
+
+ if (hnode != NULL) {
+ obj = cfs_hash_object(hs, hnode);
+ bits = cfs_hash_rehash_bits(hs);
+ }
+
+ cfs_hash_dual_bd_unlock(hs, bds, 1);
+ cfs_hash_unlock(hs, 0);
+ if (bits > 0)
+ cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
+
+ return obj;
+}
+EXPORT_SYMBOL(cfs_hash_del);
+
+/**
+ * Delete item given @key in libcfs hash @hs. The first @key found in
+ * the hash will be removed, if the key exists multiple times in the hash
+ * @hs this function must be called once per key. The removed object
+ * will be returned and ops->hs_put is called on the removed object.
+ */
+void *
+cfs_hash_del_key(cfs_hash_t *hs, const void *key)
+{
+ return cfs_hash_del(hs, key, NULL);
+}
+EXPORT_SYMBOL(cfs_hash_del_key);
+
+/**
+ * Lookup an item using @key in the libcfs hash @hs and return it.
+ * If the @key is found in the hash hs->hs_get() is called and the
+ * matching objects is returned. It is the callers responsibility
+ * to call the counterpart ops->hs_put using the cfs_hash_put() macro
+ * when when finished with the object. If the @key was not found
+ * in the hash @hs NULL is returned.
+ */
+void *
+cfs_hash_lookup(cfs_hash_t *hs, const void *key)
+{
+ void *obj = NULL;
+ struct hlist_node *hnode;
+ cfs_hash_bd_t bds[2];
+
+ cfs_hash_lock(hs, 0);
+ cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
+
+ hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
+ if (hnode != NULL)
+ obj = cfs_hash_object(hs, hnode);
+
+ cfs_hash_dual_bd_unlock(hs, bds, 0);
+ cfs_hash_unlock(hs, 0);
+
+ return obj;
+}
+EXPORT_SYMBOL(cfs_hash_lookup);
+
+static void
+cfs_hash_for_each_enter(cfs_hash_t *hs)
+{
+ LASSERT(!cfs_hash_is_exiting(hs));
+
+ if (!cfs_hash_with_rehash(hs))
+ return;
+ /*
+ * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
+ * because it's just an unreliable signal to rehash-thread,
+ * rehash-thread will try to finsih rehash ASAP when seeing this.
+ */
+ hs->hs_iterating = 1;
+
+ cfs_hash_lock(hs, 1);
+ hs->hs_iterators++;
+
+ /* NB: iteration is mostly called by service thread,
+ * we tend to cancel pending rehash-requst, instead of
+ * blocking service thread, we will relaunch rehash request
+ * after iteration */
+ if (cfs_hash_is_rehashing(hs))
+ cfs_hash_rehash_cancel_locked(hs);
+ cfs_hash_unlock(hs, 1);
+}
+
+static void
+cfs_hash_for_each_exit(cfs_hash_t *hs)
+{
+ int remained;
+ int bits;
+
+ if (!cfs_hash_with_rehash(hs))
+ return;
+ cfs_hash_lock(hs, 1);
+ remained = --hs->hs_iterators;
+ bits = cfs_hash_rehash_bits(hs);
+ cfs_hash_unlock(hs, 1);
+ /* NB: it's race on cfs_has_t::hs_iterating, see above */
+ if (remained == 0)
+ hs->hs_iterating = 0;
+ if (bits > 0) {
+ cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
+ CFS_HASH_LOOP_HOG);
+ }
+}
+
+/**
+ * For each item in the libcfs hash @hs call the passed callback @func
+ * and pass to it as an argument each hash item and the private @data.
+ *
+ * a) the function may sleep!
+ * b) during the callback:
+ * . the bucket lock is held so the callback must never sleep.
+ * . if @removal_safe is true, use can remove current item by
+ * cfs_hash_bd_del_locked
+ */
+static __u64
+cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
+ void *data, int remove_safe)
+{
+ struct hlist_node *hnode;
+ struct hlist_node *pos;
+ cfs_hash_bd_t bd;
+ __u64 count = 0;
+ int excl = !!remove_safe;
+ int loop = 0;
+ int i;
+
+ cfs_hash_for_each_enter(hs);
+
+ cfs_hash_lock(hs, 0);
+ LASSERT(!cfs_hash_is_rehashing(hs));
+
+ cfs_hash_for_each_bucket(hs, &bd, i) {
+ struct hlist_head *hhead;
+
+ cfs_hash_bd_lock(hs, &bd, excl);
+ if (func == NULL) { /* only glimpse size */
+ count += bd.bd_bucket->hsb_count;
+ cfs_hash_bd_unlock(hs, &bd, excl);
+ continue;
+ }
+
+ cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
+ hlist_for_each_safe(hnode, pos, hhead) {
+ cfs_hash_bucket_validate(hs, &bd, hnode);
+ count++;
+ loop++;
+ if (func(hs, &bd, hnode, data)) {
+ cfs_hash_bd_unlock(hs, &bd, excl);
+ goto out;
+ }
+ }
+ }
+ cfs_hash_bd_unlock(hs, &bd, excl);
+ if (loop < CFS_HASH_LOOP_HOG)
+ continue;
+ loop = 0;
+ cfs_hash_unlock(hs, 0);
+ cond_resched();
+ cfs_hash_lock(hs, 0);
+ }
+ out:
+ cfs_hash_unlock(hs, 0);
+
+ cfs_hash_for_each_exit(hs);
+ return count;
+}
+
+typedef struct {
+ cfs_hash_cond_opt_cb_t func;
+ void *arg;
+} cfs_hash_cond_arg_t;
+
+static int
+cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *data)
+{
+ cfs_hash_cond_arg_t *cond = data;
+
+ if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
+ cfs_hash_bd_del_locked(hs, bd, hnode);
+ return 0;
+}
+
+/**
+ * Delete item from the libcfs hash @hs when @func return true.
+ * The write lock being hold during loop for each bucket to avoid
+ * any object be reference.
+ */
+void
+cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
+{
+ cfs_hash_cond_arg_t arg = {
+ .func = func,
+ .arg = data,
+ };
+
+ cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
+}
+EXPORT_SYMBOL(cfs_hash_cond_del);
+
+void
+cfs_hash_for_each(cfs_hash_t *hs,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ cfs_hash_for_each_tight(hs, func, data, 0);
+}
+EXPORT_SYMBOL(cfs_hash_for_each);
+
+void
+cfs_hash_for_each_safe(cfs_hash_t *hs,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ cfs_hash_for_each_tight(hs, func, data, 1);
+}
+EXPORT_SYMBOL(cfs_hash_for_each_safe);
+
+static int
+cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *data)
+{
+ *(int *)data = 0;
+ return 1; /* return 1 to break the loop */
+}
+
+int
+cfs_hash_is_empty(cfs_hash_t *hs)
+{
+ int empty = 1;
+
+ cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
+ return empty;
+}
+EXPORT_SYMBOL(cfs_hash_is_empty);
+
+__u64
+cfs_hash_size_get(cfs_hash_t *hs)
+{
+ return cfs_hash_with_counter(hs) ?
+ atomic_read(&hs->hs_count) :
+ cfs_hash_for_each_tight(hs, NULL, NULL, 0);
+}
+EXPORT_SYMBOL(cfs_hash_size_get);
+
+/*
+ * cfs_hash_for_each_relax:
+ * Iterate the hash table and call @func on each item without
+ * any lock. This function can't guarantee to finish iteration
+ * if these features are enabled:
+ *
+ * a. if rehash_key is enabled, an item can be moved from
+ * one bucket to another bucket
+ * b. user can remove non-zero-ref item from hash-table,
+ * so the item can be removed from hash-table, even worse,
+ * it's possible that user changed key and insert to another
+ * hash bucket.
+ * there's no way for us to finish iteration correctly on previous
+ * two cases, so iteration has to be stopped on change.
+ */
+static int
+cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
+{
+ struct hlist_node *hnode;
+ struct hlist_node *tmp;
+ cfs_hash_bd_t bd;
+ __u32 version;
+ int count = 0;
+ int stop_on_change;
+ int rc;
+ int i;
+
+ stop_on_change = cfs_hash_with_rehash_key(hs) ||
+ !cfs_hash_with_no_itemref(hs) ||
+ CFS_HOP(hs, put_locked) == NULL;
+ cfs_hash_lock(hs, 0);
+ LASSERT(!cfs_hash_is_rehashing(hs));
+
+ cfs_hash_for_each_bucket(hs, &bd, i) {
+ struct hlist_head *hhead;
+
+ cfs_hash_bd_lock(hs, &bd, 0);
+ version = cfs_hash_bd_version_get(&bd);
+
+ cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
+ for (hnode = hhead->first; hnode != NULL;) {
+ cfs_hash_bucket_validate(hs, &bd, hnode);
+ cfs_hash_get(hs, hnode);
+ cfs_hash_bd_unlock(hs, &bd, 0);
+ cfs_hash_unlock(hs, 0);
+
+ rc = func(hs, &bd, hnode, data);
+ if (stop_on_change)
+ cfs_hash_put(hs, hnode);
+ cond_resched();
+ count++;
+
+ cfs_hash_lock(hs, 0);
+ cfs_hash_bd_lock(hs, &bd, 0);
+ if (!stop_on_change) {
+ tmp = hnode->next;
+ cfs_hash_put_locked(hs, hnode);
+ hnode = tmp;
+ } else { /* bucket changed? */
+ if (version !=
+ cfs_hash_bd_version_get(&bd))
+ break;
+ /* safe to continue because no change */
+ hnode = hnode->next;
+ }
+ if (rc) /* callback wants to break iteration */
+ break;
+ }
+ }
+ cfs_hash_bd_unlock(hs, &bd, 0);
+ }
+ cfs_hash_unlock(hs, 0);
+
+ return count;
+}
+
+int
+cfs_hash_for_each_nolock(cfs_hash_t *hs,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ if (cfs_hash_with_no_lock(hs) ||
+ cfs_hash_with_rehash_key(hs) ||
+ !cfs_hash_with_no_itemref(hs))
+ return -EOPNOTSUPP;
+
+ if (CFS_HOP(hs, get) == NULL ||
+ (CFS_HOP(hs, put) == NULL &&
+ CFS_HOP(hs, put_locked) == NULL))
+ return -EOPNOTSUPP;
+
+ cfs_hash_for_each_enter(hs);
+ cfs_hash_for_each_relax(hs, func, data);
+ cfs_hash_for_each_exit(hs);
+
+ return 0;
+}
+EXPORT_SYMBOL(cfs_hash_for_each_nolock);
+
+/**
+ * For each hash bucket in the libcfs hash @hs call the passed callback
+ * @func until all the hash buckets are empty. The passed callback @func
+ * or the previously registered callback hs->hs_put must remove the item
+ * from the hash. You may either use the cfs_hash_del() or hlist_del()
+ * functions. No rwlocks will be held during the callback @func it is
+ * safe to sleep if needed. This function will not terminate until the
+ * hash is empty. Note it is still possible to concurrently add new
+ * items in to the hash. It is the callers responsibility to ensure
+ * the required locking is in place to prevent concurrent insertions.
+ */
+int
+cfs_hash_for_each_empty(cfs_hash_t *hs,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ unsigned i = 0;
+
+ if (cfs_hash_with_no_lock(hs))
+ return -EOPNOTSUPP;
+
+ if (CFS_HOP(hs, get) == NULL ||
+ (CFS_HOP(hs, put) == NULL &&
+ CFS_HOP(hs, put_locked) == NULL))
+ return -EOPNOTSUPP;
+
+ cfs_hash_for_each_enter(hs);
+ while (cfs_hash_for_each_relax(hs, func, data)) {
+ CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
+ hs->hs_name, i++);
+ }
+ cfs_hash_for_each_exit(hs);
+ return 0;
+}
+EXPORT_SYMBOL(cfs_hash_for_each_empty);
+
+void
+cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ struct hlist_head *hhead;
+ struct hlist_node *hnode;
+ cfs_hash_bd_t bd;
+
+ cfs_hash_for_each_enter(hs);
+ cfs_hash_lock(hs, 0);
+ if (hindex >= CFS_HASH_NHLIST(hs))
+ goto out;
+
+ cfs_hash_bd_index_set(hs, hindex, &bd);
+
+ cfs_hash_bd_lock(hs, &bd, 0);
+ hhead = cfs_hash_bd_hhead(hs, &bd);
+ hlist_for_each(hnode, hhead) {
+ if (func(hs, &bd, hnode, data))
+ break;
+ }
+ cfs_hash_bd_unlock(hs, &bd, 0);
+ out:
+ cfs_hash_unlock(hs, 0);
+ cfs_hash_for_each_exit(hs);
+}
+
+EXPORT_SYMBOL(cfs_hash_hlist_for_each);
+
+/*
+ * For each item in the libcfs hash @hs which matches the @key call
+ * the passed callback @func and pass to it as an argument each hash
+ * item and the private @data. During the callback the bucket lock
+ * is held so the callback must never sleep.
+ */
+void
+cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
+ cfs_hash_for_each_cb_t func, void *data)
+{
+ struct hlist_node *hnode;
+ cfs_hash_bd_t bds[2];
+ unsigned i;
+
+ cfs_hash_lock(hs, 0);
+
+ cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
+
+ cfs_hash_for_each_bd(bds, 2, i) {
+ struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
+
+ hlist_for_each(hnode, hlist) {
+ cfs_hash_bucket_validate(hs, &bds[i], hnode);
+
+ if (cfs_hash_keycmp(hs, key, hnode)) {
+ if (func(hs, &bds[i], hnode, data))
+ break;
+ }
+ }
+ }
+
+ cfs_hash_dual_bd_unlock(hs, bds, 0);
+ cfs_hash_unlock(hs, 0);
+}
+EXPORT_SYMBOL(cfs_hash_for_each_key);
+
+/**
+ * Rehash the libcfs hash @hs to the given @bits. This can be used
+ * to grow the hash size when excessive chaining is detected, or to
+ * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
+ * flag is set in @hs the libcfs hash may be dynamically rehashed
+ * during addition or removal if the hash's theta value exceeds
+ * either the hs->hs_min_theta or hs->max_theta values. By default
+ * these values are tuned to keep the chained hash depth small, and
+ * this approach assumes a reasonably uniform hashing function. The
+ * theta thresholds for @hs are tunable via cfs_hash_set_theta().
+ */
+void
+cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
+{
+ int i;
+
+ /* need hold cfs_hash_lock(hs, 1) */
+ LASSERT(cfs_hash_with_rehash(hs) &&
+ !cfs_hash_with_no_lock(hs));
+
+ if (!cfs_hash_is_rehashing(hs))
+ return;
+
+ if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
+ hs->hs_rehash_bits = 0;
+ return;
+ }
+
+ for (i = 2; cfs_hash_is_rehashing(hs); i++) {
+ cfs_hash_unlock(hs, 1);
+ /* raise console warning while waiting too long */
+ CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
+ "hash %s is still rehashing, rescheded %d\n",
+ hs->hs_name, i - 1);
+ cond_resched();
+ cfs_hash_lock(hs, 1);
+ }
+}
+EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
+
+void
+cfs_hash_rehash_cancel(cfs_hash_t *hs)
+{
+ cfs_hash_lock(hs, 1);
+ cfs_hash_rehash_cancel_locked(hs);
+ cfs_hash_unlock(hs, 1);
+}
+EXPORT_SYMBOL(cfs_hash_rehash_cancel);
+
+int
+cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
+{
+ int rc;
+
+ LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
+
+ cfs_hash_lock(hs, 1);
+
+ rc = cfs_hash_rehash_bits(hs);
+ if (rc <= 0) {
+ cfs_hash_unlock(hs, 1);
+ return rc;
+ }
+
+ hs->hs_rehash_bits = rc;
+ if (!do_rehash) {
+ /* launch and return */
+ cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
+ cfs_hash_unlock(hs, 1);
+ return 0;
+ }
+
+ /* rehash right now */
+ cfs_hash_unlock(hs, 1);
+
+ return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
+}
+EXPORT_SYMBOL(cfs_hash_rehash);
+
+static int
+cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
+{
+ cfs_hash_bd_t new;
+ struct hlist_head *hhead;
+ struct hlist_node *hnode;
+ struct hlist_node *pos;
+ void *key;
+ int c = 0;
+
+ /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
+ cfs_hash_bd_for_each_hlist(hs, old, hhead) {
+ hlist_for_each_safe(hnode, pos, hhead) {
+ key = cfs_hash_key(hs, hnode);
+ LASSERT(key != NULL);
+ /* Validate hnode is in the correct bucket. */
+ cfs_hash_bucket_validate(hs, old, hnode);
+ /*
+ * Delete from old hash bucket; move to new bucket.
+ * ops->hs_key must be defined.
+ */
+ cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
+ hs->hs_rehash_bits, key, &new);
+ cfs_hash_bd_move_locked(hs, old, &new, hnode);
+ c++;
+ }
+ }
+
+ return c;
+}
+
+static int
+cfs_hash_rehash_worker(cfs_workitem_t *wi)
+{
+ cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
+ cfs_hash_bucket_t **bkts;
+ cfs_hash_bd_t bd;
+ unsigned int old_size;
+ unsigned int new_size;
+ int bsize;
+ int count = 0;
+ int rc = 0;
+ int i;
+
+ LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
+
+ cfs_hash_lock(hs, 0);
+ LASSERT(cfs_hash_is_rehashing(hs));
+
+ old_size = CFS_HASH_NBKT(hs);
+ new_size = CFS_HASH_RH_NBKT(hs);
+
+ cfs_hash_unlock(hs, 0);
+
+ /*
+ * don't need hs::hs_rwlock for hs::hs_buckets,
+ * because nobody can change bkt-table except me.
+ */
+ bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
+ old_size, new_size);
+ cfs_hash_lock(hs, 1);
+ if (bkts == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (bkts == hs->hs_buckets) {
+ bkts = NULL; /* do nothing */
+ goto out;
+ }
+
+ rc = __cfs_hash_theta(hs);
+ if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
+ /* free the new allocated bkt-table */
+ old_size = new_size;
+ new_size = CFS_HASH_NBKT(hs);
+ rc = -EALREADY;
+ goto out;
+ }
+
+ LASSERT(hs->hs_rehash_buckets == NULL);
+ hs->hs_rehash_buckets = bkts;
+
+ rc = 0;
+ cfs_hash_for_each_bucket(hs, &bd, i) {
+ if (cfs_hash_is_exiting(hs)) {
+ rc = -ESRCH;
+ /* someone wants to destroy the hash, abort now */
+ if (old_size < new_size) /* OK to free old bkt-table */
+ break;
+ /* it's shrinking, need free new bkt-table */
+ hs->hs_rehash_buckets = NULL;
+ old_size = new_size;
+ new_size = CFS_HASH_NBKT(hs);
+ goto out;
+ }
+
+ count += cfs_hash_rehash_bd(hs, &bd);
+ if (count < CFS_HASH_LOOP_HOG ||
+ cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
+ continue;
+ }
+
+ count = 0;
+ cfs_hash_unlock(hs, 1);
+ cond_resched();
+ cfs_hash_lock(hs, 1);
+ }
+
+ hs->hs_rehash_count++;
+
+ bkts = hs->hs_buckets;
+ hs->hs_buckets = hs->hs_rehash_buckets;
+ hs->hs_rehash_buckets = NULL;
+
+ hs->hs_cur_bits = hs->hs_rehash_bits;
+ out:
+ hs->hs_rehash_bits = 0;
+ if (rc == -ESRCH) /* never be scheduled again */
+ cfs_wi_exit(cfs_sched_rehash, wi);
+ bsize = cfs_hash_bkt_size(hs);
+ cfs_hash_unlock(hs, 1);
+ /* can't refer to @hs anymore because it could be destroyed */
+ if (bkts != NULL)
+ cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
+ if (rc != 0)
+ CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
+ /* return 1 only if cfs_wi_exit is called */
+ return rc == -ESRCH;
+}
+
+/**
+ * Rehash the object referenced by @hnode in the libcfs hash @hs. The
+ * @old_key must be provided to locate the objects previous location
+ * in the hash, and the @new_key will be used to reinsert the object.
+ * Use this function instead of a cfs_hash_add() + cfs_hash_del()
+ * combo when it is critical that there is no window in time where the
+ * object is missing from the hash. When an object is being rehashed
+ * the registered cfs_hash_get() and cfs_hash_put() functions will
+ * not be called.
+ */
+void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
+ void *new_key, struct hlist_node *hnode)
+{
+ cfs_hash_bd_t bds[3];
+ cfs_hash_bd_t old_bds[2];
+ cfs_hash_bd_t new_bd;
+
+ LASSERT(!hlist_unhashed(hnode));
+
+ cfs_hash_lock(hs, 0);
+
+ cfs_hash_dual_bd_get(hs, old_key, old_bds);
+ cfs_hash_bd_get(hs, new_key, &new_bd);
+
+ bds[0] = old_bds[0];
+ bds[1] = old_bds[1];
+ bds[2] = new_bd;
+
+ /* NB: bds[0] and bds[1] are ordered already */
+ cfs_hash_bd_order(&bds[1], &bds[2]);
+ cfs_hash_bd_order(&bds[0], &bds[1]);
+
+ cfs_hash_multi_bd_lock(hs, bds, 3, 1);
+ if (likely(old_bds[1].bd_bucket == NULL)) {
+ cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
+ } else {
+ cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
+ cfs_hash_bd_add_locked(hs, &new_bd, hnode);
+ }
+ /* overwrite key inside locks, otherwise may screw up with
+ * other operations, i.e: rehash */
+ cfs_hash_keycpy(hs, new_key, hnode);
+
+ cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
+ cfs_hash_unlock(hs, 0);
+}
+EXPORT_SYMBOL(cfs_hash_rehash_key);
+
+int cfs_hash_debug_header(struct seq_file *m)
+{
+ return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
+ CFS_HASH_BIGNAME_LEN,
+ "name", "cur", "min", "max", "theta", "t-min", "t-max",
+ "flags", "rehash", "count", "maxdep", "maxdepb",
+ " distribution");
+}
+EXPORT_SYMBOL(cfs_hash_debug_header);
+
+static cfs_hash_bucket_t **
+cfs_hash_full_bkts(cfs_hash_t *hs)
+{
+ /* NB: caller should hold hs->hs_rwlock if REHASH is set */
+ if (hs->hs_rehash_buckets == NULL)
+ return hs->hs_buckets;
+
+ LASSERT(hs->hs_rehash_bits != 0);
+ return hs->hs_rehash_bits > hs->hs_cur_bits ?
+ hs->hs_rehash_buckets : hs->hs_buckets;
+}
+
+static unsigned int
+cfs_hash_full_nbkt(cfs_hash_t *hs)
+{
+ /* NB: caller should hold hs->hs_rwlock if REHASH is set */
+ if (hs->hs_rehash_buckets == NULL)
+ return CFS_HASH_NBKT(hs);
+
+ LASSERT(hs->hs_rehash_bits != 0);
+ return hs->hs_rehash_bits > hs->hs_cur_bits ?
+ CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
+}
+
+int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
+{
+ int dist[8] = { 0, };
+ int maxdep = -1;
+ int maxdepb = -1;
+ int total = 0;
+ int theta;
+ int i;
+
+ cfs_hash_lock(hs, 0);
+ theta = __cfs_hash_theta(hs);
+
+ seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
+ CFS_HASH_BIGNAME_LEN, hs->hs_name,
+ 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
+ 1 << hs->hs_max_bits,
+ __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
+ __cfs_hash_theta_int(hs->hs_min_theta),
+ __cfs_hash_theta_frac(hs->hs_min_theta),
+ __cfs_hash_theta_int(hs->hs_max_theta),
+ __cfs_hash_theta_frac(hs->hs_max_theta),
+ hs->hs_flags, hs->hs_rehash_count);
+
+ /*
+ * The distribution is a summary of the chained hash depth in
+ * each of the libcfs hash buckets. Each buckets hsb_count is
+ * divided by the hash theta value and used to generate a
+ * histogram of the hash distribution. A uniform hash will
+ * result in all hash buckets being close to the average thus
+ * only the first few entries in the histogram will be non-zero.
+ * If you hash function results in a non-uniform hash the will
+ * be observable by outlier bucks in the distribution histogram.
+ *
+ * Uniform hash distribution: 128/128/0/0/0/0/0/0
+ * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
+ */
+ for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
+ cfs_hash_bd_t bd;
+
+ bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
+ cfs_hash_bd_lock(hs, &bd, 0);
+ if (maxdep < bd.bd_bucket->hsb_depmax) {
+ maxdep = bd.bd_bucket->hsb_depmax;
+ maxdepb = ffz(~maxdep);
+ }
+ total += bd.bd_bucket->hsb_count;
+ dist[min(__cfs_fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
+ cfs_hash_bd_unlock(hs, &bd, 0);
+ }
+
+ seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
+ for (i = 0; i < 8; i++)
+ seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
+
+ cfs_hash_unlock(hs, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(cfs_hash_debug_str);
diff --git a/drivers/staging/lustre/lustre/libcfs/heap.c b/drivers/staging/lustre/lustre/libcfs/heap.c
new file mode 100644
index 0000000..147e4fe
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/heap.c
@@ -0,0 +1,475 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details. A copy is
+ * included in the COPYING file that accompanied this code.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2011 Intel Corporation
+ */
+/*
+ * libcfs/libcfs/heap.c
+ *
+ * Author: Eric Barton <eeb@whamcloud.com>
+ * Liang Zhen <liang@whamcloud.com>
+ */
+/** \addtogroup heap
+ *
+ * @{
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+
+#define CBH_ALLOC(ptr, h) \
+do { \
+ if ((h)->cbh_flags & CBH_FLAG_ATOMIC_GROW) \
+ LIBCFS_CPT_ALLOC_GFP((ptr), h->cbh_cptab, h->cbh_cptid, \
+ CBH_NOB, GFP_ATOMIC); \
+ else \
+ LIBCFS_CPT_ALLOC((ptr), h->cbh_cptab, h->cbh_cptid, \
+ CBH_NOB); \
+} while (0)
+
+#define CBH_FREE(ptr) LIBCFS_FREE(ptr, CBH_NOB)
+
+/**
+ * Grows the capacity of a binary heap so that it can handle a larger number of
+ * \e cfs_binheap_node_t objects.
+ *
+ * \param[in] h The binary heap
+ *
+ * \retval 0 Successfully grew the heap
+ * \retval -ENOMEM OOM error
+ */
+static int
+cfs_binheap_grow(cfs_binheap_t *h)
+{
+ cfs_binheap_node_t ***frag1 = NULL;
+ cfs_binheap_node_t **frag2;
+ int hwm = h->cbh_hwm;
+
+ /* need a whole new chunk of pointers */
+ LASSERT((h->cbh_hwm & CBH_MASK) == 0);
+
+ if (hwm == 0) {
+ /* first use of single indirect */
+ CBH_ALLOC(h->cbh_elements1, h);
+ if (h->cbh_elements1 == NULL)
+ return -ENOMEM;
+
+ goto out;
+ }
+
+ hwm -= CBH_SIZE;
+ if (hwm < CBH_SIZE * CBH_SIZE) {
+ /* not filled double indirect */
+ CBH_ALLOC(frag2, h);
+ if (frag2 == NULL)
+ return -ENOMEM;
+
+ if (hwm == 0) {
+ /* first use of double indirect */
+ CBH_ALLOC(h->cbh_elements2, h);
+ if (h->cbh_elements2 == NULL) {
+ CBH_FREE(frag2);
+ return -ENOMEM;
+ }
+ }
+
+ h->cbh_elements2[hwm >> CBH_SHIFT] = frag2;
+ goto out;
+ }
+
+ hwm -= CBH_SIZE * CBH_SIZE;
+#if (CBH_SHIFT * 3 < 32)
+ if (hwm >= CBH_SIZE * CBH_SIZE * CBH_SIZE) {
+ /* filled triple indirect */
+ return -ENOMEM;
+ }
+#endif
+ CBH_ALLOC(frag2, h);
+ if (frag2 == NULL)
+ return -ENOMEM;
+
+ if (((hwm >> CBH_SHIFT) & CBH_MASK) == 0) {
+ /* first use of this 2nd level index */
+ CBH_ALLOC(frag1, h);
+ if (frag1 == NULL) {
+ CBH_FREE(frag2);
+ return -ENOMEM;
+ }
+ }
+
+ if (hwm == 0) {
+ /* first use of triple indirect */
+ CBH_ALLOC(h->cbh_elements3, h);
+ if (h->cbh_elements3 == NULL) {
+ CBH_FREE(frag2);
+ CBH_FREE(frag1);
+ return -ENOMEM;
+ }
+ }
+
+ if (frag1 != NULL) {
+ LASSERT(h->cbh_elements3[hwm >> (2 * CBH_SHIFT)] == NULL);
+ h->cbh_elements3[hwm >> (2 * CBH_SHIFT)] = frag1;
+ } else {
+ frag1 = h->cbh_elements3[hwm >> (2 * CBH_SHIFT)];
+ LASSERT(frag1 != NULL);
+ }
+
+ frag1[(hwm >> CBH_SHIFT) & CBH_MASK] = frag2;
+
+ out:
+ h->cbh_hwm += CBH_SIZE;
+ return 0;
+}
+
+/**
+ * Creates and initializes a binary heap instance.
+ *
+ * \param[in] ops The operations to be used
+ * \param[in] flags The heap flags
+ * \parm[in] count The initial heap capacity in # of elements
+ * \param[in] arg An optional private argument
+ * \param[in] cptab The CPT table this heap instance will operate over
+ * \param[in] cptid The CPT id of \a cptab this heap instance will operate over
+ *
+ * \retval valid-pointer A newly-created and initialized binary heap object
+ * \retval NULL error
+ */
+cfs_binheap_t *
+cfs_binheap_create(cfs_binheap_ops_t *ops, unsigned int flags,
+ unsigned count, void *arg, struct cfs_cpt_table *cptab,
+ int cptid)
+{
+ cfs_binheap_t *h;
+
+ LASSERT(ops != NULL);
+ LASSERT(ops->hop_compare != NULL);
+ LASSERT(cptab != NULL);
+ LASSERT(cptid == CFS_CPT_ANY ||
+ (cptid >= 0 && cptid < cptab->ctb_nparts));
+
+ LIBCFS_CPT_ALLOC(h, cptab, cptid, sizeof(*h));
+ if (h == NULL)
+ return NULL;
+
+ h->cbh_ops = ops;
+ h->cbh_nelements = 0;
+ h->cbh_hwm = 0;
+ h->cbh_private = arg;
+ h->cbh_flags = flags & (~CBH_FLAG_ATOMIC_GROW);
+ h->cbh_cptab = cptab;
+ h->cbh_cptid = cptid;
+
+ while (h->cbh_hwm < count) { /* preallocate */
+ if (cfs_binheap_grow(h) != 0) {
+ cfs_binheap_destroy(h);
+ return NULL;
+ }
+ }
+
+ h->cbh_flags |= flags & CBH_FLAG_ATOMIC_GROW;
+
+ return h;
+}
+EXPORT_SYMBOL(cfs_binheap_create);
+
+/**
+ * Releases all resources associated with a binary heap instance.
+ *
+ * Deallocates memory for all indirection levels and the binary heap object
+ * itself.
+ *
+ * \param[in] h The binary heap object
+ */
+void
+cfs_binheap_destroy(cfs_binheap_t *h)
+{
+ int idx0;
+ int idx1;
+ int n;
+
+ LASSERT(h != NULL);
+
+ n = h->cbh_hwm;
+
+ if (n > 0) {
+ CBH_FREE(h->cbh_elements1);
+ n -= CBH_SIZE;
+ }
+
+ if (n > 0) {
+ for (idx0 = 0; idx0 < CBH_SIZE && n > 0; idx0++) {
+ CBH_FREE(h->cbh_elements2[idx0]);
+ n -= CBH_SIZE;
+ }
+
+ CBH_FREE(h->cbh_elements2);
+ }
+
+ if (n > 0) {
+ for (idx0 = 0; idx0 < CBH_SIZE && n > 0; idx0++) {
+
+ for (idx1 = 0; idx1 < CBH_SIZE && n > 0; idx1++) {
+ CBH_FREE(h->cbh_elements3[idx0][idx1]);
+ n -= CBH_SIZE;
+ }
+
+ CBH_FREE(h->cbh_elements3[idx0]);
+ }
+
+ CBH_FREE(h->cbh_elements3);
+ }
+
+ LIBCFS_FREE(h, sizeof(*h));
+}
+EXPORT_SYMBOL(cfs_binheap_destroy);
+
+/**
+ * Obtains a double pointer to a heap element, given its index into the binary
+ * tree.
+ *
+ * \param[in] h The binary heap instance
+ * \param[in] idx The requested node's index
+ *
+ * \retval valid-pointer A double pointer to a heap pointer entry
+ */
+static cfs_binheap_node_t **
+cfs_binheap_pointer(cfs_binheap_t *h, unsigned int idx)
+{
+ if (idx < CBH_SIZE)
+ return &(h->cbh_elements1[idx]);
+
+ idx -= CBH_SIZE;
+ if (idx < CBH_SIZE * CBH_SIZE)
+ return &(h->cbh_elements2[idx >> CBH_SHIFT][idx & CBH_MASK]);
+
+ idx -= CBH_SIZE * CBH_SIZE;
+ return &(h->cbh_elements3[idx >> (2 * CBH_SHIFT)]\
+ [(idx >> CBH_SHIFT) & CBH_MASK]\
+ [idx & CBH_MASK]);
+}
+
+/**
+ * Obtains a pointer to a heap element, given its index into the binary tree.
+ *
+ * \param[in] h The binary heap
+ * \param[in] idx The requested node's index
+ *
+ * \retval valid-pointer The requested heap node
+ * \retval NULL Supplied index is out of bounds
+ */
+cfs_binheap_node_t *
+cfs_binheap_find(cfs_binheap_t *h, unsigned int idx)
+{
+ if (idx >= h->cbh_nelements)
+ return NULL;
+
+ return *cfs_binheap_pointer(h, idx);
+}
+EXPORT_SYMBOL(cfs_binheap_find);
+
+/**
+ * Moves a node upwards, towards the root of the binary tree.
+ *
+ * \param[in] h The heap
+ * \param[in] e The node
+ *
+ * \retval 1 The position of \a e in the tree was changed at least once
+ * \retval 0 The position of \a e in the tree was not changed
+ */
+static int
+cfs_binheap_bubble(cfs_binheap_t *h, cfs_binheap_node_t *e)
+{
+ unsigned int cur_idx = e->chn_index;
+ cfs_binheap_node_t **cur_ptr;
+ unsigned int parent_idx;
+ cfs_binheap_node_t **parent_ptr;
+ int did_sth = 0;
+
+ cur_ptr = cfs_binheap_pointer(h, cur_idx);
+ LASSERT(*cur_ptr == e);
+
+ while (cur_idx > 0) {
+ parent_idx = (cur_idx - 1) >> 1;
+
+ parent_ptr = cfs_binheap_pointer(h, parent_idx);
+ LASSERT((*parent_ptr)->chn_index == parent_idx);
+
+ if (h->cbh_ops->hop_compare(*parent_ptr, e))
+ break;
+
+ (*parent_ptr)->chn_index = cur_idx;
+ *cur_ptr = *parent_ptr;
+ cur_ptr = parent_ptr;
+ cur_idx = parent_idx;
+ did_sth = 1;
+ }
+
+ e->chn_index = cur_idx;
+ *cur_ptr = e;
+
+ return did_sth;
+}
+
+/**
+ * Moves a node downwards, towards the last level of the binary tree.
+ *
+ * \param[in] h The heap
+ * \param[in] e The node
+ *
+ * \retval 1 The position of \a e in the tree was changed at least once
+ * \retval 0 The position of \a e in the tree was not changed
+ */
+static int
+cfs_binheap_sink(cfs_binheap_t *h, cfs_binheap_node_t *e)
+{
+ unsigned int n = h->cbh_nelements;
+ unsigned int child_idx;
+ cfs_binheap_node_t **child_ptr;
+ cfs_binheap_node_t *child;
+ unsigned int child2_idx;
+ cfs_binheap_node_t **child2_ptr;
+ cfs_binheap_node_t *child2;
+ unsigned int cur_idx;
+ cfs_binheap_node_t **cur_ptr;
+ int did_sth = 0;
+
+ cur_idx = e->chn_index;
+ cur_ptr = cfs_binheap_pointer(h, cur_idx);
+ LASSERT(*cur_ptr == e);
+
+ while (cur_idx < n) {
+ child_idx = (cur_idx << 1) + 1;
+ if (child_idx >= n)
+ break;
+
+ child_ptr = cfs_binheap_pointer(h, child_idx);
+ child = *child_ptr;
+
+ child2_idx = child_idx + 1;
+ if (child2_idx < n) {
+ child2_ptr = cfs_binheap_pointer(h, child2_idx);
+ child2 = *child2_ptr;
+
+ if (h->cbh_ops->hop_compare(child2, child)) {
+ child_idx = child2_idx;
+ child_ptr = child2_ptr;
+ child = child2;
+ }
+ }
+
+ LASSERT(child->chn_index == child_idx);
+
+ if (h->cbh_ops->hop_compare(e, child))
+ break;
+
+ child->chn_index = cur_idx;
+ *cur_ptr = child;
+ cur_ptr = child_ptr;
+ cur_idx = child_idx;
+ did_sth = 1;
+ }
+
+ e->chn_index = cur_idx;
+ *cur_ptr = e;
+
+ return did_sth;
+}
+
+/**
+ * Sort-inserts a node into the binary heap.
+ *
+ * \param[in] h The heap
+ * \param[in] e The node
+ *
+ * \retval 0 Element inserted successfully
+ * \retval != 0 error
+ */
+int
+cfs_binheap_insert(cfs_binheap_t *h, cfs_binheap_node_t *e)
+{
+ cfs_binheap_node_t **new_ptr;
+ unsigned int new_idx = h->cbh_nelements;
+ int rc;
+
+ if (new_idx == h->cbh_hwm) {
+ rc = cfs_binheap_grow(h);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (h->cbh_ops->hop_enter) {
+ rc = h->cbh_ops->hop_enter(h, e);
+ if (rc != 0)
+ return rc;
+ }
+
+ e->chn_index = new_idx;
+ new_ptr = cfs_binheap_pointer(h, new_idx);
+ h->cbh_nelements++;
+ *new_ptr = e;
+
+ cfs_binheap_bubble(h, e);
+
+ return 0;
+}
+EXPORT_SYMBOL(cfs_binheap_insert);
+
+/**
+ * Removes a node from the binary heap.
+ *
+ * \param[in] h The heap
+ * \param[in] e The node
+ */
+void
+cfs_binheap_remove(cfs_binheap_t *h, cfs_binheap_node_t *e)
+{
+ unsigned int n = h->cbh_nelements;
+ unsigned int cur_idx = e->chn_index;
+ cfs_binheap_node_t **cur_ptr;
+ cfs_binheap_node_t *last;
+
+ LASSERT(cur_idx != CBH_POISON);
+ LASSERT(cur_idx < n);
+
+ cur_ptr = cfs_binheap_pointer(h, cur_idx);
+ LASSERT(*cur_ptr == e);
+
+ n--;
+ last = *cfs_binheap_pointer(h, n);
+ h->cbh_nelements = n;
+ if (last == e)
+ return;
+
+ last->chn_index = cur_idx;
+ *cur_ptr = last;
+ if (!cfs_binheap_bubble(h, *cur_ptr))
+ cfs_binheap_sink(h, *cur_ptr);
+
+ e->chn_index = CBH_POISON;
+ if (h->cbh_ops->hop_exit)
+ h->cbh_ops->hop_exit(h, e);
+}
+EXPORT_SYMBOL(cfs_binheap_remove);
+
+/** @} heap */
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
new file mode 100644
index 0000000..74a0db5
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
@@ -0,0 +1,343 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Author: Nathan Rutman <nathan.rutman@sun.com>
+ *
+ * Kernel <-> userspace communication routines.
+ * Using pipes for all arches.
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+#define D_KUC D_OTHER
+
+#include <linux/libcfs/libcfs.h>
+
+#ifdef LUSTRE_UTILS
+/* This is the userspace side. */
+
+/** Start the userspace side of a KUC pipe.
+ * @param link Private descriptor for pipe/socket.
+ * @param groups KUC broadcast group to listen to
+ * (can be null for unicast to this pid)
+ */
+int libcfs_ukuc_start(lustre_kernelcomm *link, int group)
+{
+ int pfd[2];
+
+ if (pipe(pfd) < 0)
+ return -errno;
+
+ memset(link, 0, sizeof(*link));
+ link->lk_rfd = pfd[0];
+ link->lk_wfd = pfd[1];
+ link->lk_group = group;
+ link->lk_uid = getpid();
+ return 0;
+}
+
+int libcfs_ukuc_stop(lustre_kernelcomm *link)
+{
+ if (link->lk_wfd > 0)
+ close(link->lk_wfd);
+ return close(link->lk_rfd);
+}
+
+#define lhsz sizeof(*kuch)
+
+/** Read a message from the link.
+ * Allocates memory, returns handle
+ *
+ * @param link Private descriptor for pipe/socket.
+ * @param buf Buffer to read into, must include size for kuc_hdr
+ * @param maxsize Maximum message size allowed
+ * @param transport Only listen to messages on this transport
+ * (and the generic transport)
+ */
+int libcfs_ukuc_msg_get(lustre_kernelcomm *link, char *buf, int maxsize,
+ int transport)
+{
+ struct kuc_hdr *kuch;
+ int rc = 0;
+
+ memset(buf, 0, maxsize);
+
+ CDEBUG(D_KUC, "Waiting for message from kernel on fd %d\n",
+ link->lk_rfd);
+
+ while (1) {
+ /* Read header first to get message size */
+ rc = read(link->lk_rfd, buf, lhsz);
+ if (rc <= 0) {
+ rc = -errno;
+ break;
+ }
+ kuch = (struct kuc_hdr *)buf;
+
+ CDEBUG(D_KUC, "Received message mg=%x t=%d m=%d l=%d\n",
+ kuch->kuc_magic, kuch->kuc_transport, kuch->kuc_msgtype,
+ kuch->kuc_msglen);
+
+ if (kuch->kuc_magic != KUC_MAGIC) {
+ CERROR("bad message magic %x != %x\n",
+ kuch->kuc_magic, KUC_MAGIC);
+ rc = -EPROTO;
+ break;
+ }
+
+ if (kuch->kuc_msglen > maxsize) {
+ rc = -EMSGSIZE;
+ break;
+ }
+
+ /* Read payload */
+ rc = read(link->lk_rfd, buf + lhsz, kuch->kuc_msglen - lhsz);
+ if (rc < 0) {
+ rc = -errno;
+ break;
+ }
+ if (rc < (kuch->kuc_msglen - lhsz)) {
+ CERROR("short read: got %d of %d bytes\n",
+ rc, kuch->kuc_msglen);
+ rc = -EPROTO;
+ break;
+ }
+
+ if (kuch->kuc_transport == transport ||
+ kuch->kuc_transport == KUC_TRANSPORT_GENERIC) {
+ return 0;
+ }
+ /* Drop messages for other transports */
+ }
+ return rc;
+}
+
+#else /* LUSTRE_UTILS */
+/* This is the kernel side (liblustre as well). */
+
+/**
+ * libcfs_kkuc_msg_put - send an message from kernel to userspace
+ * @param fp to send the message to
+ * @param payload Payload data. First field of payload is always
+ * struct kuc_hdr
+ */
+int libcfs_kkuc_msg_put(struct file *filp, void *payload)
+{
+ struct kuc_hdr *kuch = (struct kuc_hdr *)payload;
+ ssize_t count = kuch->kuc_msglen;
+ loff_t offset = 0;
+ mm_segment_t fs;
+ int rc = -ENOSYS;
+
+ if (filp == NULL || IS_ERR(filp))
+ return -EBADF;
+
+ if (kuch->kuc_magic != KUC_MAGIC) {
+ CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic);
+ return -ENOSYS;
+ }
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ while (count > 0) {
+ rc = vfs_write(filp, (void __force __user *)payload,
+ count, &offset);
+ if (rc < 0)
+ break;
+ count -= rc;
+ payload += rc;
+ rc = 0;
+ }
+ set_fs(fs);
+
+ if (rc < 0)
+ CWARN("message send failed (%d)\n", rc);
+ else
+ CDEBUG(D_KUC, "Sent message rc=%d, fp=%p\n", rc, filp);
+
+ return rc;
+}
+EXPORT_SYMBOL(libcfs_kkuc_msg_put);
+
+/* Broadcast groups are global across all mounted filesystems;
+ * i.e. registering for a group on 1 fs will get messages for that
+ * group from any fs */
+/** A single group reigstration has a uid and a file pointer */
+struct kkuc_reg {
+ struct list_head kr_chain;
+ int kr_uid;
+ struct file *kr_fp;
+ __u32 kr_data;
+};
+static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {};
+/* Protect message sending against remove and adds */
+static DECLARE_RWSEM(kg_sem);
+
+/** Add a receiver to a broadcast group
+ * @param filp pipe to write into
+ * @param uid identidier for this receiver
+ * @param group group number
+ */
+int libcfs_kkuc_group_add(struct file *filp, int uid, int group, __u32 data)
+{
+ struct kkuc_reg *reg;
+
+ if (group > KUC_GRP_MAX) {
+ CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
+ return -EINVAL;
+ }
+
+ /* fput in group_rem */
+ if (filp == NULL)
+ return -EBADF;
+
+ /* freed in group_rem */
+ reg = kmalloc(sizeof(*reg), 0);
+ if (reg == NULL)
+ return -ENOMEM;
+
+ reg->kr_fp = filp;
+ reg->kr_uid = uid;
+ reg->kr_data = data;
+
+ down_write(&kg_sem);
+ if (kkuc_groups[group].next == NULL)
+ INIT_LIST_HEAD(&kkuc_groups[group]);
+ list_add(&reg->kr_chain, &kkuc_groups[group]);
+ up_write(&kg_sem);
+
+ CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group);
+
+ return 0;
+}
+EXPORT_SYMBOL(libcfs_kkuc_group_add);
+
+int libcfs_kkuc_group_rem(int uid, int group)
+{
+ struct kkuc_reg *reg, *next;
+
+ if (kkuc_groups[group].next == NULL)
+ return 0;
+
+ if (uid == 0) {
+ /* Broadcast a shutdown message */
+ struct kuc_hdr lh;
+
+ lh.kuc_magic = KUC_MAGIC;
+ lh.kuc_transport = KUC_TRANSPORT_GENERIC;
+ lh.kuc_msgtype = KUC_MSG_SHUTDOWN;
+ lh.kuc_msglen = sizeof(lh);
+ libcfs_kkuc_group_put(group, &lh);
+ }
+
+ down_write(&kg_sem);
+ list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
+ if ((uid == 0) || (uid == reg->kr_uid)) {
+ list_del(&reg->kr_chain);
+ CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n",
+ reg->kr_uid, reg->kr_fp, group);
+ if (reg->kr_fp != NULL)
+ fput(reg->kr_fp);
+ kfree(reg);
+ }
+ }
+ up_write(&kg_sem);
+
+ return 0;
+}
+EXPORT_SYMBOL(libcfs_kkuc_group_rem);
+
+int libcfs_kkuc_group_put(int group, void *payload)
+{
+ struct kkuc_reg *reg;
+ int rc = 0;
+ int one_success = 0;
+
+ down_read(&kg_sem);
+ list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
+ if (reg->kr_fp != NULL) {
+ rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
+ if (rc == 0)
+ one_success = 1;
+ else if (rc == -EPIPE) {
+ fput(reg->kr_fp);
+ reg->kr_fp = NULL;
+ }
+ }
+ }
+ up_read(&kg_sem);
+
+ /* don't return an error if the message has been delivered
+ * at least to one agent */
+ if (one_success)
+ rc = 0;
+
+ return rc;
+}
+EXPORT_SYMBOL(libcfs_kkuc_group_put);
+
+/**
+ * Calls a callback function for each link of the given kuc group.
+ * @param group the group to call the function on.
+ * @param cb_func the function to be called.
+ * @param cb_arg iextra argument to be passed to the callback function.
+ */
+int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
+ void *cb_arg)
+{
+ struct kkuc_reg *reg;
+ int rc = 0;
+
+ if (group > KUC_GRP_MAX) {
+ CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
+ return -EINVAL;
+ }
+
+ /* no link for this group */
+ if (kkuc_groups[group].next == NULL)
+ return 0;
+
+ down_read(&kg_sem);
+ list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
+ if (reg->kr_fp != NULL) {
+ rc = cb_func(reg->kr_data, cb_arg);
+ }
+ }
+ up_read(&kg_sem);
+
+ return rc;
+}
+EXPORT_SYMBOL(libcfs_kkuc_group_foreach);
+
+#endif /* LUSTRE_UTILS */
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
new file mode 100644
index 0000000..1fb3700
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
@@ -0,0 +1,201 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Please see comments in libcfs/include/libcfs/libcfs_cpu.h for introduction
+ *
+ * Author: liang@whamcloud.com
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+
+/** Global CPU partition table */
+struct cfs_cpt_table *cfs_cpt_table __read_mostly = NULL;
+EXPORT_SYMBOL(cfs_cpt_table);
+
+#ifndef HAVE_LIBCFS_CPT
+
+#define CFS_CPU_VERSION_MAGIC 0xbabecafe
+
+struct cfs_cpt_table *
+cfs_cpt_table_alloc(unsigned int ncpt)
+{
+ struct cfs_cpt_table *cptab;
+
+ if (ncpt != 1) {
+ CERROR("Can't support cpu partition number %d\n", ncpt);
+ return NULL;
+ }
+
+ LIBCFS_ALLOC(cptab, sizeof(*cptab));
+ if (cptab != NULL) {
+ cptab->ctb_version = CFS_CPU_VERSION_MAGIC;
+ cptab->ctb_nparts = ncpt;
+ }
+
+ return cptab;
+}
+EXPORT_SYMBOL(cfs_cpt_table_alloc);
+
+void
+cfs_cpt_table_free(struct cfs_cpt_table *cptab)
+{
+ LASSERT(cptab->ctb_version == CFS_CPU_VERSION_MAGIC);
+
+ LIBCFS_FREE(cptab, sizeof(*cptab));
+}
+EXPORT_SYMBOL(cfs_cpt_table_free);
+
+int
+cfs_cpt_number(struct cfs_cpt_table *cptab)
+{
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_number);
+
+int
+cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
+{
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_weight);
+
+int
+cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
+{
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_online);
+
+int
+cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
+{
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_set_cpu);
+
+void
+cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
+{
+}
+EXPORT_SYMBOL(cfs_cpt_unset_cpu);
+
+int
+cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
+{
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_set_cpumask);
+
+void
+cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
+{
+}
+EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
+
+int
+cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
+{
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_set_node);
+
+void
+cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
+{
+}
+EXPORT_SYMBOL(cfs_cpt_unset_node);
+
+int
+cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
+{
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_set_nodemask);
+
+void
+cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
+{
+}
+EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
+
+void
+cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
+{
+}
+EXPORT_SYMBOL(cfs_cpt_clear);
+
+int
+cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
+{
+ return 0;
+}
+EXPORT_SYMBOL(cfs_cpt_spread_node);
+
+int
+cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
+{
+ return 0;
+}
+EXPORT_SYMBOL(cfs_cpt_current);
+
+int
+cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
+{
+ return 0;
+}
+EXPORT_SYMBOL(cfs_cpt_of_cpu);
+
+int
+cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
+{
+ return 0;
+}
+EXPORT_SYMBOL(cfs_cpt_bind);
+
+void
+cfs_cpu_fini(void)
+{
+ if (cfs_cpt_table != NULL) {
+ cfs_cpt_table_free(cfs_cpt_table);
+ cfs_cpt_table = NULL;
+ }
+}
+
+int
+cfs_cpu_init(void)
+{
+ cfs_cpt_table = cfs_cpt_table_alloc(1);
+
+ return cfs_cpt_table != NULL ? 0 : -1;
+}
+
+#endif /* HAVE_LIBCFS_CPT */
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c b/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
new file mode 100644
index 0000000..a2ce4c0
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
@@ -0,0 +1,189 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Author: liang@whamcloud.com
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+
+
+/** destroy cpu-partition lock, see libcfs_private.h for more detail */
+void
+cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
+{
+ LASSERT(pcl->pcl_locks != NULL);
+ LASSERT(!pcl->pcl_locked);
+
+ cfs_percpt_free(pcl->pcl_locks);
+ LIBCFS_FREE(pcl, sizeof(*pcl));
+}
+EXPORT_SYMBOL(cfs_percpt_lock_free);
+
+/**
+ * create cpu-partition lock, see libcfs_private.h for more detail.
+ *
+ * cpu-partition lock is designed for large-scale SMP system, so we need to
+ * reduce cacheline conflict as possible as we can, that's the
+ * reason we always allocate cacheline-aligned memory block.
+ */
+struct cfs_percpt_lock *
+cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
+{
+ struct cfs_percpt_lock *pcl;
+ spinlock_t *lock;
+ int i;
+
+ /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
+ LIBCFS_ALLOC(pcl, sizeof(*pcl));
+ if (pcl == NULL)
+ return NULL;
+
+ pcl->pcl_cptab = cptab;
+ pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock));
+ if (pcl->pcl_locks == NULL) {
+ LIBCFS_FREE(pcl, sizeof(*pcl));
+ return NULL;
+ }
+
+ cfs_percpt_for_each(lock, i, pcl->pcl_locks)
+ spin_lock_init(lock);
+
+ return pcl;
+}
+EXPORT_SYMBOL(cfs_percpt_lock_alloc);
+
+/**
+ * lock a CPU partition
+ *
+ * \a index != CFS_PERCPT_LOCK_EX
+ * hold private lock indexed by \a index
+ *
+ * \a index == CFS_PERCPT_LOCK_EX
+ * exclusively lock @pcl and nobody can take private lock
+ */
+void
+cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
+{
+ int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+ int i;
+
+ LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
+
+ if (ncpt == 1) {
+ index = 0;
+ } else { /* serialize with exclusive lock */
+ while (pcl->pcl_locked)
+ cpu_relax();
+ }
+
+ if (likely(index != CFS_PERCPT_LOCK_EX)) {
+ spin_lock(pcl->pcl_locks[index]);
+ return;
+ }
+
+ /* exclusive lock request */
+ for (i = 0; i < ncpt; i++) {
+ spin_lock(pcl->pcl_locks[i]);
+ if (i == 0) {
+ LASSERT(!pcl->pcl_locked);
+ /* nobody should take private lock after this
+ * so I wouldn't starve for too long time */
+ pcl->pcl_locked = 1;
+ }
+ }
+}
+EXPORT_SYMBOL(cfs_percpt_lock);
+
+/** unlock a CPU partition */
+void
+cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
+{
+ int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+ int i;
+
+ index = ncpt == 1 ? 0 : index;
+
+ if (likely(index != CFS_PERCPT_LOCK_EX)) {
+ spin_unlock(pcl->pcl_locks[index]);
+ return;
+ }
+
+ for (i = ncpt - 1; i >= 0; i--) {
+ if (i == 0) {
+ LASSERT(pcl->pcl_locked);
+ pcl->pcl_locked = 0;
+ }
+ spin_unlock(pcl->pcl_locks[i]);
+ }
+}
+EXPORT_SYMBOL(cfs_percpt_unlock);
+
+
+/** free cpu-partition refcount */
+void
+cfs_percpt_atomic_free(atomic_t **refs)
+{
+ cfs_percpt_free(refs);
+}
+EXPORT_SYMBOL(cfs_percpt_atomic_free);
+
+/** allocate cpu-partition refcount with initial value @init_val */
+atomic_t **
+cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
+{
+ atomic_t **refs;
+ atomic_t *ref;
+ int i;
+
+ refs = cfs_percpt_alloc(cptab, sizeof(*ref));
+ if (refs == NULL)
+ return NULL;
+
+ cfs_percpt_for_each(ref, i, refs)
+ atomic_set(ref, init_val);
+ return refs;
+}
+EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
+
+/** return sum of cpu-partition refs */
+int
+cfs_percpt_atomic_summary(atomic_t **refs)
+{
+ atomic_t *ref;
+ int i;
+ int val = 0;
+
+ cfs_percpt_for_each(ref, i, refs)
+ val += atomic_read(ref);
+
+ return val;
+}
+EXPORT_SYMBOL(cfs_percpt_atomic_summary);
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c b/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
new file mode 100644
index 0000000..feab537
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
@@ -0,0 +1,202 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Author: liang@whamcloud.com
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+
+struct cfs_var_array {
+ unsigned int va_count; /* # of buffers */
+ unsigned int va_size; /* size of each var */
+ struct cfs_cpt_table *va_cptab; /* cpu partition table */
+ void *va_ptrs[0]; /* buffer addresses */
+};
+
+/*
+ * free per-cpu data, see more detail in cfs_percpt_free
+ */
+void
+cfs_percpt_free(void *vars)
+{
+ struct cfs_var_array *arr;
+ int i;
+
+ arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
+
+ for (i = 0; i < arr->va_count; i++) {
+ if (arr->va_ptrs[i] != NULL)
+ LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
+ }
+
+ LIBCFS_FREE(arr, offsetof(struct cfs_var_array,
+ va_ptrs[arr->va_count]));
+}
+EXPORT_SYMBOL(cfs_percpt_free);
+
+/*
+ * allocate per cpu-partition variables, returned value is an array of pointers,
+ * variable can be indexed by CPU partition ID, i.e:
+ *
+ * arr = cfs_percpt_alloc(cfs_cpu_pt, size);
+ * then caller can access memory block for CPU 0 by arr[0],
+ * memory block for CPU 1 by arr[1]...
+ * memory block for CPU N by arr[N]...
+ *
+ * cacheline aligned.
+ */
+void *
+cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
+{
+ struct cfs_var_array *arr;
+ int count;
+ int i;
+
+ count = cfs_cpt_number(cptab);
+
+ LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
+ if (arr == NULL)
+ return NULL;
+
+ arr->va_size = size = L1_CACHE_ALIGN(size);
+ arr->va_count = count;
+ arr->va_cptab = cptab;
+
+ for (i = 0; i < count; i++) {
+ LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size);
+ if (arr->va_ptrs[i] == NULL) {
+ cfs_percpt_free((void *)&arr->va_ptrs[0]);
+ return NULL;
+ }
+ }
+
+ return (void *)&arr->va_ptrs[0];
+}
+EXPORT_SYMBOL(cfs_percpt_alloc);
+
+/*
+ * return number of CPUs (or number of elements in per-cpu data)
+ * according to cptab of @vars
+ */
+int
+cfs_percpt_number(void *vars)
+{
+ struct cfs_var_array *arr;
+
+ arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
+
+ return arr->va_count;
+}
+EXPORT_SYMBOL(cfs_percpt_number);
+
+/*
+ * return memory block shadowed from current CPU
+ */
+void *
+cfs_percpt_current(void *vars)
+{
+ struct cfs_var_array *arr;
+ int cpt;
+
+ arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
+ cpt = cfs_cpt_current(arr->va_cptab, 0);
+ if (cpt < 0)
+ return NULL;
+
+ return arr->va_ptrs[cpt];
+}
+EXPORT_SYMBOL(cfs_percpt_current);
+
+void *
+cfs_percpt_index(void *vars, int idx)
+{
+ struct cfs_var_array *arr;
+
+ arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
+
+ LASSERT(idx >= 0 && idx < arr->va_count);
+ return arr->va_ptrs[idx];
+}
+EXPORT_SYMBOL(cfs_percpt_index);
+
+/*
+ * free variable array, see more detail in cfs_array_alloc
+ */
+void
+cfs_array_free(void *vars)
+{
+ struct cfs_var_array *arr;
+ int i;
+
+ arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
+
+ for (i = 0; i < arr->va_count; i++) {
+ if (arr->va_ptrs[i] == NULL)
+ continue;
+
+ LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
+ }
+ LIBCFS_FREE(arr, offsetof(struct cfs_var_array,
+ va_ptrs[arr->va_count]));
+}
+EXPORT_SYMBOL(cfs_array_free);
+
+/*
+ * allocate a variable array, returned value is an array of pointers.
+ * Caller can specify length of array by @count, @size is size of each
+ * memory block in array.
+ */
+void *
+cfs_array_alloc(int count, unsigned int size)
+{
+ struct cfs_var_array *arr;
+ int i;
+
+ LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
+ if (arr == NULL)
+ return NULL;
+
+ arr->va_count = count;
+ arr->va_size = size;
+
+ for (i = 0; i < count; i++) {
+ LIBCFS_ALLOC(arr->va_ptrs[i], size);
+
+ if (arr->va_ptrs[i] == NULL) {
+ cfs_array_free((void *)&arr->va_ptrs[0]);
+ return NULL;
+ }
+ }
+
+ return (void *)&arr->va_ptrs[0];
+}
+EXPORT_SYMBOL(cfs_array_alloc);
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
new file mode 100644
index 0000000..922debd
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
@@ -0,0 +1,598 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * String manipulation functions.
+ *
+ * libcfs/libcfs/libcfs_string.c
+ *
+ * Author: Nathan Rutman <nathan.rutman@sun.com>
+ */
+
+#include <linux/libcfs/libcfs.h>
+
+/* non-0 = don't match */
+int cfs_strncasecmp(const char *s1, const char *s2, size_t n)
+{
+ if (s1 == NULL || s2 == NULL)
+ return 1;
+
+ if (n == 0)
+ return 0;
+
+ while (n-- != 0 && tolower(*s1) == tolower(*s2)) {
+ if (n == 0 || *s1 == '\0' || *s2 == '\0')
+ break;
+ s1++;
+ s2++;
+ }
+
+ return tolower(*(unsigned char *)s1) - tolower(*(unsigned char *)s2);
+}
+EXPORT_SYMBOL(cfs_strncasecmp);
+
+/* Convert a text string to a bitmask */
+int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
+ int *oldmask, int minmask, int allmask)
+{
+ const char *debugstr;
+ char op = 0;
+ int newmask = minmask, i, len, found = 0;
+
+ /* <str> must be a list of tokens separated by whitespace
+ * and optionally an operator ('+' or '-'). If an operator
+ * appears first in <str>, '*oldmask' is used as the starting point
+ * (relative), otherwise minmask is used (absolute). An operator
+ * applies to all following tokens up to the next operator. */
+ while (*str != 0) {
+ while (isspace(*str))
+ str++;
+ if (*str == 0)
+ break;
+ if (*str == '+' || *str == '-') {
+ op = *str++;
+ if (!found)
+ /* only if first token is relative */
+ newmask = *oldmask;
+ while (isspace(*str))
+ str++;
+ if (*str == 0) /* trailing op */
+ return -EINVAL;
+ }
+
+ /* find token length */
+ for (len = 0; str[len] != 0 && !isspace(str[len]) &&
+ str[len] != '+' && str[len] != '-'; len++);
+
+ /* match token */
+ found = 0;
+ for (i = 0; i < 32; i++) {
+ debugstr = bit2str(i);
+ if (debugstr != NULL &&
+ strlen(debugstr) == len &&
+ cfs_strncasecmp(str, debugstr, len) == 0) {
+ if (op == '-')
+ newmask &= ~(1 << i);
+ else
+ newmask |= (1 << i);
+ found = 1;
+ break;
+ }
+ }
+ if (!found && len == 3 &&
+ (cfs_strncasecmp(str, "ALL", len) == 0)) {
+ if (op == '-')
+ newmask = minmask;
+ else
+ newmask = allmask;
+ found = 1;
+ }
+ if (!found) {
+ CWARN("unknown mask '%.*s'.\n"
+ "mask usage: [+|-]<all|type> ...\n", len, str);
+ return -EINVAL;
+ }
+ str += len;
+ }
+
+ *oldmask = newmask;
+ return 0;
+}
+EXPORT_SYMBOL(cfs_str2mask);
+
+/* get the first string out of @str */
+char *cfs_firststr(char *str, size_t size)
+{
+ size_t i = 0;
+ char *end;
+
+ /* trim leading spaces */
+ while (i < size && *str && isspace(*str)) {
+ ++i;
+ ++str;
+ }
+
+ /* string with all spaces */
+ if (*str == '\0')
+ goto out;
+
+ end = str;
+ while (i < size && *end != '\0' && !isspace(*end)) {
+ ++i;
+ ++end;
+ }
+
+ *end= '\0';
+out:
+ return str;
+}
+EXPORT_SYMBOL(cfs_firststr);
+
+char *
+cfs_trimwhite(char *str)
+{
+ char *end;
+
+ while (cfs_iswhite(*str))
+ str++;
+
+ end = str + strlen(str);
+ while (end > str) {
+ if (!cfs_iswhite(end[-1]))
+ break;
+ end--;
+ }
+
+ *end = 0;
+ return str;
+}
+EXPORT_SYMBOL(cfs_trimwhite);
+
+/**
+ * Extracts tokens from strings.
+ *
+ * Looks for \a delim in string \a next, sets \a res to point to
+ * substring before the delimiter, sets \a next right after the found
+ * delimiter.
+ *
+ * \retval 1 if \a res points to a string of non-whitespace characters
+ * \retval 0 otherwise
+ */
+int
+cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
+{
+ char *end;
+
+ if (next->ls_str == NULL)
+ return 0;
+
+ /* skip leading white spaces */
+ while (next->ls_len) {
+ if (!cfs_iswhite(*next->ls_str))
+ break;
+ next->ls_str++;
+ next->ls_len--;
+ }
+
+ if (next->ls_len == 0) /* whitespaces only */
+ return 0;
+
+ if (*next->ls_str == delim) {
+ /* first non-writespace is the delimiter */
+ return 0;
+ }
+
+ res->ls_str = next->ls_str;
+ end = memchr(next->ls_str, delim, next->ls_len);
+ if (end == NULL) {
+ /* there is no the delimeter in the string */
+ end = next->ls_str + next->ls_len;
+ next->ls_str = NULL;
+ } else {
+ next->ls_str = end + 1;
+ next->ls_len -= (end - res->ls_str + 1);
+ }
+
+ /* skip ending whitespaces */
+ while (--end != res->ls_str) {
+ if (!cfs_iswhite(*end))
+ break;
+ }
+
+ res->ls_len = end - res->ls_str + 1;
+ return 1;
+}
+EXPORT_SYMBOL(cfs_gettok);
+
+/**
+ * Converts string to integer.
+ *
+ * Accepts decimal and hexadecimal number recordings.
+ *
+ * \retval 1 if first \a nob chars of \a str convert to decimal or
+ * hexadecimal integer in the range [\a min, \a max]
+ * \retval 0 otherwise
+ */
+int
+cfs_str2num_check(char *str, int nob, unsigned *num,
+ unsigned min, unsigned max)
+{
+ char *endp;
+
+ str = cfs_trimwhite(str);
+ *num = strtoul(str, &endp, 0);
+ if (endp == str)
+ return 0;
+
+ for (; endp < str + nob; endp++) {
+ if (!cfs_iswhite(*endp))
+ return 0;
+ }
+
+ return (*num >= min && *num <= max);
+}
+EXPORT_SYMBOL(cfs_str2num_check);
+
+/**
+ * Parses \<range_expr\> token of the syntax. If \a bracketed is false,
+ * \a src should only have a single token which can be \<number\> or \*
+ *
+ * \retval pointer to allocated range_expr and initialized
+ * range_expr::re_lo, range_expr::re_hi and range_expr:re_stride if \a
+ `* src parses to
+ * \<number\> |
+ * \<number\> '-' \<number\> |
+ * \<number\> '-' \<number\> '/' \<number\>
+ * \retval 0 will be returned if it can be parsed, otherwise -EINVAL or
+ * -ENOMEM will be returned.
+ */
+int
+cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
+ int bracketed, struct cfs_range_expr **expr)
+{
+ struct cfs_range_expr *re;
+ struct cfs_lstr tok;
+
+ LIBCFS_ALLOC(re, sizeof(*re));
+ if (re == NULL)
+ return -ENOMEM;
+
+ if (src->ls_len == 1 && src->ls_str[0] == '*') {
+ re->re_lo = min;
+ re->re_hi = max;
+ re->re_stride = 1;
+ goto out;
+ }
+
+ if (cfs_str2num_check(src->ls_str, src->ls_len,
+ &re->re_lo, min, max)) {
+ /* <number> is parsed */
+ re->re_hi = re->re_lo;
+ re->re_stride = 1;
+ goto out;
+ }
+
+ if (!bracketed || !cfs_gettok(src, '-', &tok))
+ goto failed;
+
+ if (!cfs_str2num_check(tok.ls_str, tok.ls_len,
+ &re->re_lo, min, max))
+ goto failed;
+
+ /* <number> - */
+ if (cfs_str2num_check(src->ls_str, src->ls_len,
+ &re->re_hi, min, max)) {
+ /* <number> - <number> is parsed */
+ re->re_stride = 1;
+ goto out;
+ }
+
+ /* go to check <number> '-' <number> '/' <number> */
+ if (cfs_gettok(src, '/', &tok)) {
+ if (!cfs_str2num_check(tok.ls_str, tok.ls_len,
+ &re->re_hi, min, max))
+ goto failed;
+
+ /* <number> - <number> / ... */
+ if (cfs_str2num_check(src->ls_str, src->ls_len,
+ &re->re_stride, min, max)) {
+ /* <number> - <number> / <number> is parsed */
+ goto out;
+ }
+ }
+
+ out:
+ *expr = re;
+ return 0;
+
+ failed:
+ LIBCFS_FREE(re, sizeof(*re));
+ return -EINVAL;
+}
+EXPORT_SYMBOL(cfs_range_expr_parse);
+
+/**
+ * Matches value (\a value) against ranges expression list \a expr_list.
+ *
+ * \retval 1 if \a value matches
+ * \retval 0 otherwise
+ */
+int
+cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list)
+{
+ struct cfs_range_expr *expr;
+
+ list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
+ if (value >= expr->re_lo && value <= expr->re_hi &&
+ ((value - expr->re_lo) % expr->re_stride) == 0)
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cfs_expr_list_match);
+
+/**
+ * Convert express list (\a expr_list) to an array of all matched values
+ *
+ * \retval N N is total number of all matched values
+ * \retval 0 if expression list is empty
+ * \retval < 0 for failure
+ */
+int
+cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
+{
+ struct cfs_range_expr *expr;
+ __u32 *val;
+ int count = 0;
+ int i;
+
+ list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
+ for (i = expr->re_lo; i <= expr->re_hi; i++) {
+ if (((i - expr->re_lo) % expr->re_stride) == 0)
+ count++;
+ }
+ }
+
+ if (count == 0) /* empty expression list */
+ return 0;
+
+ if (count > max) {
+ CERROR("Number of values %d exceeds max allowed %d\n",
+ max, count);
+ return -EINVAL;
+ }
+
+ LIBCFS_ALLOC(val, sizeof(val[0]) * count);
+ if (val == NULL)
+ return -ENOMEM;
+
+ count = 0;
+ list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
+ for (i = expr->re_lo; i <= expr->re_hi; i++) {
+ if (((i - expr->re_lo) % expr->re_stride) == 0)
+ val[count++] = i;
+ }
+ }
+
+ *valpp = val;
+ return count;
+}
+EXPORT_SYMBOL(cfs_expr_list_values);
+
+/**
+ * Frees cfs_range_expr structures of \a expr_list.
+ *
+ * \retval none
+ */
+void
+cfs_expr_list_free(struct cfs_expr_list *expr_list)
+{
+ while (!list_empty(&expr_list->el_exprs)) {
+ struct cfs_range_expr *expr;
+
+ expr = list_entry(expr_list->el_exprs.next,
+ struct cfs_range_expr, re_link),
+ list_del(&expr->re_link);
+ LIBCFS_FREE(expr, sizeof(*expr));
+ }
+
+ LIBCFS_FREE(expr_list, sizeof(*expr_list));
+}
+EXPORT_SYMBOL(cfs_expr_list_free);
+
+void
+cfs_expr_list_print(struct cfs_expr_list *expr_list)
+{
+ struct cfs_range_expr *expr;
+
+ list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
+ CDEBUG(D_WARNING, "%d-%d/%d\n",
+ expr->re_lo, expr->re_hi, expr->re_stride);
+ }
+}
+EXPORT_SYMBOL(cfs_expr_list_print);
+
+/**
+ * Parses \<cfs_expr_list\> token of the syntax.
+ *
+ * \retval 1 if \a str parses to \<number\> | \<expr_list\>
+ * \retval 0 otherwise
+ */
+int
+cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
+ struct cfs_expr_list **elpp)
+{
+ struct cfs_expr_list *expr_list;
+ struct cfs_range_expr *expr;
+ struct cfs_lstr src;
+ int rc;
+
+ LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
+ if (expr_list == NULL)
+ return -ENOMEM;
+
+ src.ls_str = str;
+ src.ls_len = len;
+
+ INIT_LIST_HEAD(&expr_list->el_exprs);
+
+ if (src.ls_str[0] == '[' &&
+ src.ls_str[src.ls_len - 1] == ']') {
+ src.ls_str++;
+ src.ls_len -= 2;
+
+ rc = -EINVAL;
+ while (src.ls_str != NULL) {
+ struct cfs_lstr tok;
+
+ if (!cfs_gettok(&src, ',', &tok)) {
+ rc = -EINVAL;
+ break;
+ }
+
+ rc = cfs_range_expr_parse(&tok, min, max, 1, &expr);
+ if (rc != 0)
+ break;
+
+ list_add_tail(&expr->re_link,
+ &expr_list->el_exprs);
+ }
+ } else {
+ rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
+ if (rc == 0) {
+ list_add_tail(&expr->re_link,
+ &expr_list->el_exprs);
+ }
+ }
+
+ if (rc != 0)
+ cfs_expr_list_free(expr_list);
+ else
+ *elpp = expr_list;
+
+ return rc;
+}
+EXPORT_SYMBOL(cfs_expr_list_parse);
+
+/**
+ * Frees cfs_expr_list structures of \a list.
+ *
+ * For each struct cfs_expr_list structure found on \a list it frees
+ * range_expr list attached to it and frees the cfs_expr_list itself.
+ *
+ * \retval none
+ */
+void
+cfs_expr_list_free_list(struct list_head *list)
+{
+ struct cfs_expr_list *el;
+
+ while (!list_empty(list)) {
+ el = list_entry(list->next,
+ struct cfs_expr_list, el_link);
+ list_del(&el->el_link);
+ cfs_expr_list_free(el);
+ }
+}
+EXPORT_SYMBOL(cfs_expr_list_free_list);
+
+int
+cfs_ip_addr_parse(char *str, int len, struct list_head *list)
+{
+ struct cfs_expr_list *el;
+ struct cfs_lstr src;
+ int rc;
+ int i;
+
+ src.ls_str = str;
+ src.ls_len = len;
+ i = 0;
+
+ while (src.ls_str != NULL) {
+ struct cfs_lstr res;
+
+ if (!cfs_gettok(&src, '.', &res)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el);
+ if (rc != 0)
+ goto out;
+
+ list_add_tail(&el->el_link, list);
+ i++;
+ }
+
+ if (i == 4)
+ return 0;
+
+ rc = -EINVAL;
+ out:
+ cfs_expr_list_free_list(list);
+
+ return rc;
+}
+EXPORT_SYMBOL(cfs_ip_addr_parse);
+
+/**
+ * Matches address (\a addr) against address set encoded in \a list.
+ *
+ * \retval 1 if \a addr matches
+ * \retval 0 otherwise
+ */
+int
+cfs_ip_addr_match(__u32 addr, struct list_head *list)
+{
+ struct cfs_expr_list *el;
+ int i = 0;
+
+ list_for_each_entry_reverse(el, list, el_link) {
+ if (!cfs_expr_list_match(addr & 0xff, el))
+ return 0;
+ addr >>= 8;
+ i++;
+ }
+
+ return i == 4;
+}
+EXPORT_SYMBOL(cfs_ip_addr_match);
+
+void
+cfs_ip_addr_free(struct list_head *list)
+{
+ cfs_expr_list_free_list(list);
+}
+EXPORT_SYMBOL(cfs_ip_addr_free);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
new file mode 100644
index 0000000..00ab8fd
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
@@ -0,0 +1,1045 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Author: liang@whamcloud.com
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/libcfs/libcfs.h>
+
+#ifdef CONFIG_SMP
+
+/**
+ * modparam for setting number of partitions
+ *
+ * 0 : estimate best value based on cores or NUMA nodes
+ * 1 : disable multiple partitions
+ * >1 : specify number of partitions
+ */
+static int cpu_npartitions;
+CFS_MODULE_PARM(cpu_npartitions, "i", int, 0444, "# of CPU partitions");
+
+/**
+ * modparam for setting CPU partitions patterns:
+ *
+ * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID,
+ * number in bracket is processor ID (core or HT)
+ *
+ * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
+ * are NUMA node ID, number before bracket is CPU partition ID.
+ *
+ * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
+ */
+static char *cpu_pattern = "";
+CFS_MODULE_PARM(cpu_pattern, "s", charp, 0444, "CPU partitions pattern");
+
+struct cfs_cpt_data {
+ /* serialize hotplug etc */
+ spinlock_t cpt_lock;
+ /* reserved for hotplug */
+ unsigned long cpt_version;
+ /* mutex to protect cpt_cpumask */
+ struct semaphore cpt_mutex;
+ /* scratch buffer for set/unset_node */
+ cpumask_t *cpt_cpumask;
+};
+
+static struct cfs_cpt_data cpt_data;
+
+static void cfs_cpu_core_siblings(int cpu, cpumask_t *mask)
+{
+ /* return cpumask of cores in the same socket */
+ cpumask_copy(mask, topology_core_cpumask(cpu));
+}
+
+/* return cpumask of HTs in the same core */
+static void cfs_cpu_ht_siblings(int cpu, cpumask_t *mask)
+{
+ cpumask_copy(mask, topology_thread_cpumask(cpu));
+}
+
+static void cfs_node_to_cpumask(int node, cpumask_t *mask)
+{
+ cpumask_copy(mask, cpumask_of_node(node));
+}
+
+void
+cfs_cpt_table_free(struct cfs_cpt_table *cptab)
+{
+ int i;
+
+ if (cptab->ctb_cpu2cpt != NULL) {
+ LIBCFS_FREE(cptab->ctb_cpu2cpt,
+ num_possible_cpus() *
+ sizeof(cptab->ctb_cpu2cpt[0]));
+ }
+
+ for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) {
+ struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
+
+ if (part->cpt_nodemask != NULL) {
+ LIBCFS_FREE(part->cpt_nodemask,
+ sizeof(*part->cpt_nodemask));
+ }
+
+ if (part->cpt_cpumask != NULL)
+ LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
+ }
+
+ if (cptab->ctb_parts != NULL) {
+ LIBCFS_FREE(cptab->ctb_parts,
+ cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
+ }
+
+ if (cptab->ctb_nodemask != NULL)
+ LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
+ if (cptab->ctb_cpumask != NULL)
+ LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
+
+ LIBCFS_FREE(cptab, sizeof(*cptab));
+}
+EXPORT_SYMBOL(cfs_cpt_table_free);
+
+struct cfs_cpt_table *
+cfs_cpt_table_alloc(unsigned int ncpt)
+{
+ struct cfs_cpt_table *cptab;
+ int i;
+
+ LIBCFS_ALLOC(cptab, sizeof(*cptab));
+ if (cptab == NULL)
+ return NULL;
+
+ cptab->ctb_nparts = ncpt;
+
+ LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
+ LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
+
+ if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL)
+ goto failed;
+
+ LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
+ num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
+ if (cptab->ctb_cpu2cpt == NULL)
+ goto failed;
+
+ memset(cptab->ctb_cpu2cpt, -1,
+ num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
+
+ LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
+ if (cptab->ctb_parts == NULL)
+ goto failed;
+
+ for (i = 0; i < ncpt; i++) {
+ struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
+
+ LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
+ LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
+ if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL)
+ goto failed;
+ }
+
+ spin_lock(&cpt_data.cpt_lock);
+ /* Reserved for hotplug */
+ cptab->ctb_version = cpt_data.cpt_version;
+ spin_unlock(&cpt_data.cpt_lock);
+
+ return cptab;
+
+ failed:
+ cfs_cpt_table_free(cptab);
+ return NULL;
+}
+EXPORT_SYMBOL(cfs_cpt_table_alloc);
+
+int
+cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
+{
+ char *tmp = buf;
+ int rc = 0;
+ int i;
+ int j;
+
+ for (i = 0; i < cptab->ctb_nparts; i++) {
+ if (len > 0) {
+ rc = snprintf(tmp, len, "%d\t: ", i);
+ len -= rc;
+ }
+
+ if (len <= 0) {
+ rc = -EFBIG;
+ goto out;
+ }
+
+ tmp += rc;
+ for_each_cpu_mask(j, *cptab->ctb_parts[i].cpt_cpumask) {
+ rc = snprintf(tmp, len, "%d ", j);
+ len -= rc;
+ if (len <= 0) {
+ rc = -EFBIG;
+ goto out;
+ }
+ tmp += rc;
+ }
+
+ *tmp = '\n';
+ tmp++;
+ len--;
+ }
+
+ out:
+ if (rc < 0)
+ return rc;
+
+ return tmp - buf;
+}
+EXPORT_SYMBOL(cfs_cpt_table_print);
+
+int
+cfs_cpt_number(struct cfs_cpt_table *cptab)
+{
+ return cptab->ctb_nparts;
+}
+EXPORT_SYMBOL(cfs_cpt_number);
+
+int
+cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
+{
+ LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
+
+ return cpt == CFS_CPT_ANY ?
+ cpus_weight(*cptab->ctb_cpumask) :
+ cpus_weight(*cptab->ctb_parts[cpt].cpt_cpumask);
+}
+EXPORT_SYMBOL(cfs_cpt_weight);
+
+int
+cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
+{
+ LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
+
+ return cpt == CFS_CPT_ANY ?
+ any_online_cpu(*cptab->ctb_cpumask) != NR_CPUS :
+ any_online_cpu(*cptab->ctb_parts[cpt].cpt_cpumask) != NR_CPUS;
+}
+EXPORT_SYMBOL(cfs_cpt_online);
+
+cpumask_t *
+cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
+{
+ LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
+
+ return cpt == CFS_CPT_ANY ?
+ cptab->ctb_cpumask : cptab->ctb_parts[cpt].cpt_cpumask;
+}
+EXPORT_SYMBOL(cfs_cpt_cpumask);
+
+nodemask_t *
+cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
+{
+ LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
+
+ return cpt == CFS_CPT_ANY ?
+ cptab->ctb_nodemask : cptab->ctb_parts[cpt].cpt_nodemask;
+}
+EXPORT_SYMBOL(cfs_cpt_nodemask);
+
+int
+cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
+{
+ int node;
+
+ LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
+
+ if (cpu < 0 || cpu >= NR_CPUS || !cpu_online(cpu)) {
+ CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
+ return 0;
+ }
+
+ if (cptab->ctb_cpu2cpt[cpu] != -1) {
+ CDEBUG(D_INFO, "CPU %d is already in partition %d\n",
+ cpu, cptab->ctb_cpu2cpt[cpu]);
+ return 0;
+ }
+
+ cptab->ctb_cpu2cpt[cpu] = cpt;
+
+ LASSERT(!cpu_isset(cpu, *cptab->ctb_cpumask));
+ LASSERT(!cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask));
+
+ cpu_set(cpu, *cptab->ctb_cpumask);
+ cpu_set(cpu, *cptab->ctb_parts[cpt].cpt_cpumask);
+
+ node = cpu_to_node(cpu);
+
+ /* first CPU of @node in this CPT table */
+ if (!node_isset(node, *cptab->ctb_nodemask))
+ node_set(node, *cptab->ctb_nodemask);
+
+ /* first CPU of @node in this partition */
+ if (!node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask))
+ node_set(node, *cptab->ctb_parts[cpt].cpt_nodemask);
+
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_set_cpu);
+
+void
+cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
+{
+ int node;
+ int i;
+
+ LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
+
+ if (cpu < 0 || cpu >= NR_CPUS) {
+ CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
+ return;
+ }
+
+ if (cpt == CFS_CPT_ANY) {
+ /* caller doesn't know the partition ID */
+ cpt = cptab->ctb_cpu2cpt[cpu];
+ if (cpt < 0) { /* not set in this CPT-table */
+ CDEBUG(D_INFO, "Try to unset cpu %d which is "
+ "not in CPT-table %p\n", cpt, cptab);
+ return;
+ }
+
+ } else if (cpt != cptab->ctb_cpu2cpt[cpu]) {
+ CDEBUG(D_INFO,
+ "CPU %d is not in cpu-partition %d\n", cpu, cpt);
+ return;
+ }
+
+ LASSERT(cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask));
+ LASSERT(cpu_isset(cpu, *cptab->ctb_cpumask));
+
+ cpu_clear(cpu, *cptab->ctb_parts[cpt].cpt_cpumask);
+ cpu_clear(cpu, *cptab->ctb_cpumask);
+ cptab->ctb_cpu2cpt[cpu] = -1;
+
+ node = cpu_to_node(cpu);
+
+ LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask));
+ LASSERT(node_isset(node, *cptab->ctb_nodemask));
+
+ for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask) {
+ /* this CPT has other CPU belonging to this node? */
+ if (cpu_to_node(i) == node)
+ break;
+ }
+
+ if (i == NR_CPUS)
+ node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask);
+
+ for_each_cpu_mask(i, *cptab->ctb_cpumask) {
+ /* this CPT-table has other CPU belonging to this node? */
+ if (cpu_to_node(i) == node)
+ break;
+ }
+
+ if (i == NR_CPUS)
+ node_clear(node, *cptab->ctb_nodemask);
+
+ return;
+}
+EXPORT_SYMBOL(cfs_cpt_unset_cpu);
+
+int
+cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
+{
+ int i;
+
+ if (cpus_weight(*mask) == 0 || any_online_cpu(*mask) == NR_CPUS) {
+ CDEBUG(D_INFO, "No online CPU is found in the CPU mask "
+ "for CPU partition %d\n", cpt);
+ return 0;
+ }
+
+ for_each_cpu_mask(i, *mask) {
+ if (!cfs_cpt_set_cpu(cptab, cpt, i))
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_set_cpumask);
+
+void
+cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
+{
+ int i;
+
+ for_each_cpu_mask(i, *mask)
+ cfs_cpt_unset_cpu(cptab, cpt, i);
+}
+EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
+
+int
+cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
+{
+ cpumask_t *mask;
+ int rc;
+
+ if (node < 0 || node >= MAX_NUMNODES) {
+ CDEBUG(D_INFO,
+ "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
+ return 0;
+ }
+
+ down(&cpt_data.cpt_mutex);
+
+ mask = cpt_data.cpt_cpumask;
+ cfs_node_to_cpumask(node, mask);
+
+ rc = cfs_cpt_set_cpumask(cptab, cpt, mask);
+
+ up(&cpt_data.cpt_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(cfs_cpt_set_node);
+
+void
+cfs_cpt_unset_node(struct cfs_cpt_table *cptab, int cpt, int node)
+{
+ cpumask_t *mask;
+
+ if (node < 0 || node >= MAX_NUMNODES) {
+ CDEBUG(D_INFO,
+ "Invalid NUMA id %d for CPU partition %d\n", node, cpt);
+ return;
+ }
+
+ down(&cpt_data.cpt_mutex);
+
+ mask = cpt_data.cpt_cpumask;
+ cfs_node_to_cpumask(node, mask);
+
+ cfs_cpt_unset_cpumask(cptab, cpt, mask);
+
+ up(&cpt_data.cpt_mutex);
+}
+EXPORT_SYMBOL(cfs_cpt_unset_node);
+
+int
+cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
+{
+ int i;
+
+ for_each_node_mask(i, *mask) {
+ if (!cfs_cpt_set_node(cptab, cpt, i))
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(cfs_cpt_set_nodemask);
+
+void
+cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
+{
+ int i;
+
+ for_each_node_mask(i, *mask)
+ cfs_cpt_unset_node(cptab, cpt, i);
+}
+EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
+
+void
+cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
+{
+ int last;
+ int i;
+
+ if (cpt == CFS_CPT_ANY) {
+ last = cptab->ctb_nparts - 1;
+ cpt = 0;
+ } else {
+ last = cpt;
+ }
+
+ for (; cpt <= last; cpt++) {
+ for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask)
+ cfs_cpt_unset_cpu(cptab, cpt, i);
+ }
+}
+EXPORT_SYMBOL(cfs_cpt_clear);
+
+int
+cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
+{
+ nodemask_t *mask;
+ int weight;
+ int rotor;
+ int node;
+
+ /* convert CPU partition ID to HW node id */
+
+ if (cpt < 0 || cpt >= cptab->ctb_nparts) {
+ mask = cptab->ctb_nodemask;
+ rotor = cptab->ctb_spread_rotor++;
+ } else {
+ mask = cptab->ctb_parts[cpt].cpt_nodemask;
+ rotor = cptab->ctb_parts[cpt].cpt_spread_rotor++;
+ }
+
+ weight = nodes_weight(*mask);
+ LASSERT(weight > 0);
+
+ rotor %= weight;
+
+ for_each_node_mask(node, *mask) {
+ if (rotor-- == 0)
+ return node;
+ }
+
+ LBUG();
+ return 0;
+}
+EXPORT_SYMBOL(cfs_cpt_spread_node);
+
+int
+cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
+{
+ int cpu = smp_processor_id();
+ int cpt = cptab->ctb_cpu2cpt[cpu];
+
+ if (cpt < 0) {
+ if (!remap)
+ return cpt;
+
+ /* don't return negative value for safety of upper layer,
+ * instead we shadow the unknown cpu to a valid partition ID */
+ cpt = cpu % cptab->ctb_nparts;
+ }
+
+ return cpt;
+}
+EXPORT_SYMBOL(cfs_cpt_current);
+
+int
+cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
+{
+ LASSERT(cpu >= 0 && cpu < NR_CPUS);
+
+ return cptab->ctb_cpu2cpt[cpu];
+}
+EXPORT_SYMBOL(cfs_cpt_of_cpu);
+
+int
+cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
+{
+ cpumask_t *cpumask;
+ nodemask_t *nodemask;
+ int rc;
+ int i;
+
+ LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
+
+ if (cpt == CFS_CPT_ANY) {
+ cpumask = cptab->ctb_cpumask;
+ nodemask = cptab->ctb_nodemask;
+ } else {
+ cpumask = cptab->ctb_parts[cpt].cpt_cpumask;
+ nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
+ }
+
+ if (any_online_cpu(*cpumask) == NR_CPUS) {
+ CERROR("No online CPU found in CPU partition %d, did someone "
+ "do CPU hotplug on system? You might need to reload "
+ "Lustre modules to keep system working well.\n", cpt);
+ return -EINVAL;
+ }
+
+ for_each_online_cpu(i) {
+ if (cpu_isset(i, *cpumask))
+ continue;
+
+ rc = set_cpus_allowed_ptr(current, cpumask);
+ set_mems_allowed(*nodemask);
+ if (rc == 0)
+ schedule(); /* switch to allowed CPU */
+
+ return rc;
+ }
+
+ /* don't need to set affinity because all online CPUs are covered */
+ return 0;
+}
+EXPORT_SYMBOL(cfs_cpt_bind);
+
+/**
+ * Choose max to \a number CPUs from \a node and set them in \a cpt.
+ * We always prefer to choose CPU in the same core/socket.
+ */
+static int
+cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
+ cpumask_t *node, int number)
+{
+ cpumask_t *socket = NULL;
+ cpumask_t *core = NULL;
+ int rc = 0;
+ int cpu;
+
+ LASSERT(number > 0);
+
+ if (number >= cpus_weight(*node)) {
+ while (!cpus_empty(*node)) {
+ cpu = first_cpu(*node);
+
+ rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
+ if (!rc)
+ return -EINVAL;
+ cpu_clear(cpu, *node);
+ }
+ return 0;
+ }
+
+ /* allocate scratch buffer */
+ LIBCFS_ALLOC(socket, cpumask_size());
+ LIBCFS_ALLOC(core, cpumask_size());
+ if (socket == NULL || core == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ while (!cpus_empty(*node)) {
+ cpu = first_cpu(*node);
+
+ /* get cpumask for cores in the same socket */
+ cfs_cpu_core_siblings(cpu, socket);
+ cpus_and(*socket, *socket, *node);
+
+ LASSERT(!cpus_empty(*socket));
+
+ while (!cpus_empty(*socket)) {
+ int i;
+
+ /* get cpumask for hts in the same core */
+ cfs_cpu_ht_siblings(cpu, core);
+ cpus_and(*core, *core, *node);
+
+ LASSERT(!cpus_empty(*core));
+
+ for_each_cpu_mask(i, *core) {
+ cpu_clear(i, *socket);
+ cpu_clear(i, *node);
+
+ rc = cfs_cpt_set_cpu(cptab, cpt, i);
+ if (!rc) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (--number == 0)
+ goto out;
+ }
+ cpu = first_cpu(*socket);
+ }
+ }
+
+ out:
+ if (socket != NULL)
+ LIBCFS_FREE(socket, cpumask_size());
+ if (core != NULL)
+ LIBCFS_FREE(core, cpumask_size());
+ return rc;
+}
+
+#define CPT_WEIGHT_MIN 4u
+
+static unsigned int
+cfs_cpt_num_estimate(void)
+{
+ unsigned nnode = num_online_nodes();
+ unsigned ncpu = num_online_cpus();
+ unsigned ncpt;
+
+ if (ncpu <= CPT_WEIGHT_MIN) {
+ ncpt = 1;
+ goto out;
+ }
+
+ /* generate reasonable number of CPU partitions based on total number
+ * of CPUs, Preferred N should be power2 and match this condition:
+ * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */
+ for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1) {}
+
+ if (ncpt <= nnode) { /* fat numa system */
+ while (nnode > ncpt)
+ nnode >>= 1;
+
+ } else { /* ncpt > nnode */
+ while ((nnode << 1) <= ncpt)
+ nnode <<= 1;
+ }
+
+ ncpt = nnode;
+
+ out:
+#if (BITS_PER_LONG == 32)
+ /* config many CPU partitions on 32-bit system could consume
+ * too much memory */
+ ncpt = min(2U, ncpt);
+#endif
+ while (ncpu % ncpt != 0)
+ ncpt--; /* worst case is 1 */
+
+ return ncpt;
+}
+
+static struct cfs_cpt_table *
+cfs_cpt_table_create(int ncpt)
+{
+ struct cfs_cpt_table *cptab = NULL;
+ cpumask_t *mask = NULL;
+ int cpt = 0;
+ int num;
+ int rc;
+ int i;
+
+ rc = cfs_cpt_num_estimate();
+ if (ncpt <= 0)
+ ncpt = rc;
+
+ if (ncpt > num_online_cpus() || ncpt > 4 * rc) {
+ CWARN("CPU partition number %d is larger than suggested "
+ "value (%d), your system may have performance"
+ "issue or run out of memory while under pressure\n",
+ ncpt, rc);
+ }
+
+ if (num_online_cpus() % ncpt != 0) {
+ CERROR("CPU number %d is not multiple of cpu_npartition %d, "
+ "please try different cpu_npartitions value or"
+ "set pattern string by cpu_pattern=STRING\n",
+ (int)num_online_cpus(), ncpt);
+ goto failed;
+ }
+
+ cptab = cfs_cpt_table_alloc(ncpt);
+ if (cptab == NULL) {
+ CERROR("Failed to allocate CPU map(%d)\n", ncpt);
+ goto failed;
+ }
+
+ num = num_online_cpus() / ncpt;
+ if (num == 0) {
+ CERROR("CPU changed while setting CPU partition\n");
+ goto failed;
+ }
+
+ LIBCFS_ALLOC(mask, cpumask_size());
+ if (mask == NULL) {
+ CERROR("Failed to allocate scratch cpumask\n");
+ goto failed;
+ }
+
+ for_each_online_node(i) {
+ cfs_node_to_cpumask(i, mask);
+
+ while (!cpus_empty(*mask)) {
+ struct cfs_cpu_partition *part;
+ int n;
+
+ if (cpt >= ncpt)
+ goto failed;
+
+ part = &cptab->ctb_parts[cpt];
+
+ n = num - cpus_weight(*part->cpt_cpumask);
+ LASSERT(n > 0);
+
+ rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
+ if (rc < 0)
+ goto failed;
+
+ LASSERT(num >= cpus_weight(*part->cpt_cpumask));
+ if (num == cpus_weight(*part->cpt_cpumask))
+ cpt++;
+ }
+ }
+
+ if (cpt != ncpt ||
+ num != cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
+ CERROR("Expect %d(%d) CPU partitions but got %d(%d), "
+ "CPU hotplug/unplug while setting?\n",
+ cptab->ctb_nparts, num, cpt,
+ cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask));
+ goto failed;
+ }
+
+ LIBCFS_FREE(mask, cpumask_size());
+
+ return cptab;
+
+ failed:
+ CERROR("Failed to setup CPU-partition-table with %d "
+ "CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
+ ncpt, num_online_nodes(), num_online_cpus());
+
+ if (mask != NULL)
+ LIBCFS_FREE(mask, cpumask_size());
+
+ if (cptab != NULL)
+ cfs_cpt_table_free(cptab);
+
+ return NULL;
+}
+
+static struct cfs_cpt_table *
+cfs_cpt_table_create_pattern(char *pattern)
+{
+ struct cfs_cpt_table *cptab;
+ char *str = pattern;
+ int node = 0;
+ int high;
+ int ncpt;
+ int c;
+
+ for (ncpt = 0;; ncpt++) { /* quick scan bracket */
+ str = strchr(str, '[');
+ if (str == NULL)
+ break;
+ str++;
+ }
+
+ str = cfs_trimwhite(pattern);
+ if (*str == 'n' || *str == 'N') {
+ pattern = str + 1;
+ node = 1;
+ }
+
+ if (ncpt == 0 ||
+ (node && ncpt > num_online_nodes()) ||
+ (!node && ncpt > num_online_cpus())) {
+ CERROR("Invalid pattern %s, or too many partitions %d\n",
+ pattern, ncpt);
+ return NULL;
+ }
+
+ high = node ? MAX_NUMNODES - 1 : NR_CPUS - 1;
+
+ cptab = cfs_cpt_table_alloc(ncpt);
+ if (cptab == NULL) {
+ CERROR("Failed to allocate cpu partition table\n");
+ return NULL;
+ }
+
+ for (str = cfs_trimwhite(pattern), c = 0;; c++) {
+ struct cfs_range_expr *range;
+ struct cfs_expr_list *el;
+ char *bracket = strchr(str, '[');
+ int cpt;
+ int rc;
+ int i;
+ int n;
+
+ if (bracket == NULL) {
+ if (*str != 0) {
+ CERROR("Invalid pattern %s\n", str);
+ goto failed;
+ } else if (c != ncpt) {
+ CERROR("expect %d partitions but found %d\n",
+ ncpt, c);
+ goto failed;
+ }
+ break;
+ }
+
+ if (sscanf(str, "%u%n", &cpt, &n) < 1) {
+ CERROR("Invalid cpu pattern %s\n", str);
+ goto failed;
+ }
+
+ if (cpt < 0 || cpt >= ncpt) {
+ CERROR("Invalid partition id %d, total partitions %d\n",
+ cpt, ncpt);
+ goto failed;
+ }
+
+ if (cfs_cpt_weight(cptab, cpt) != 0) {
+ CERROR("Partition %d has already been set.\n", cpt);
+ goto failed;
+ }
+
+ str = cfs_trimwhite(str + n);
+ if (str != bracket) {
+ CERROR("Invalid pattern %s\n", str);
+ goto failed;
+ }
+
+ bracket = strchr(str, ']');
+ if (bracket == NULL) {
+ CERROR("missing right bracket for cpt %d, %s\n",
+ cpt, str);
+ goto failed;
+ }
+
+ if (cfs_expr_list_parse(str, (bracket - str) + 1,
+ 0, high, &el) != 0) {
+ CERROR("Can't parse number range: %s\n", str);
+ goto failed;
+ }
+
+ list_for_each_entry(range, &el->el_exprs, re_link) {
+ for (i = range->re_lo; i <= range->re_hi; i++) {
+ if ((i - range->re_lo) % range->re_stride != 0)
+ continue;
+
+ rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
+ cfs_cpt_set_cpu(cptab, cpt, i);
+ if (!rc) {
+ cfs_expr_list_free(el);
+ goto failed;
+ }
+ }
+ }
+
+ cfs_expr_list_free(el);
+
+ if (!cfs_cpt_online(cptab, cpt)) {
+ CERROR("No online CPU is found on partition %d\n", cpt);
+ goto failed;
+ }
+
+ str = cfs_trimwhite(bracket + 1);
+ }
+
+ return cptab;
+
+ failed:
+ cfs_cpt_table_free(cptab);
+ return NULL;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int
+cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ spin_lock(&cpt_data.cpt_lock);
+ cpt_data.cpt_version++;
+ spin_unlock(&cpt_data.cpt_lock);
+ default:
+ CWARN("Lustre: can't support CPU hotplug well now, "
+ "performance and stability could be impacted"
+ "[CPU %u notify: %lx]\n", cpu, action);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cfs_cpu_notifier = {
+ .notifier_call = cfs_cpu_notify,
+ .priority = 0
+};
+
+#endif
+
+void
+cfs_cpu_fini(void)
+{
+ if (cfs_cpt_table != NULL)
+ cfs_cpt_table_free(cfs_cpt_table);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_hotcpu_notifier(&cfs_cpu_notifier);
+#endif
+ if (cpt_data.cpt_cpumask != NULL)
+ LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
+}
+
+int
+cfs_cpu_init(void)
+{
+ LASSERT(cfs_cpt_table == NULL);
+
+ memset(&cpt_data, 0, sizeof(cpt_data));
+
+ LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
+ if (cpt_data.cpt_cpumask == NULL) {
+ CERROR("Failed to allocate scratch buffer\n");
+ return -1;
+ }
+
+ spin_lock_init(&cpt_data.cpt_lock);
+ sema_init(&cpt_data.cpt_mutex, 1);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ register_hotcpu_notifier(&cfs_cpu_notifier);
+#endif
+
+ if (*cpu_pattern != 0) {
+ cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
+ if (cfs_cpt_table == NULL) {
+ CERROR("Failed to create cptab from pattern %s\n",
+ cpu_pattern);
+ goto failed;
+ }
+
+ } else {
+ cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
+ if (cfs_cpt_table == NULL) {
+ CERROR("Failed to create ptable with npartitions %d\n",
+ cpu_npartitions);
+ goto failed;
+ }
+ }
+
+ spin_lock(&cpt_data.cpt_lock);
+ if (cfs_cpt_table->ctb_version != cpt_data.cpt_version) {
+ spin_unlock(&cpt_data.cpt_lock);
+ CERROR("CPU hotplug/unplug during setup\n");
+ goto failed;
+ }
+ spin_unlock(&cpt_data.cpt_lock);
+
+ LCONSOLE(0, "HW CPU cores: %d, npartitions: %d\n",
+ num_online_cpus(), cfs_cpt_number(cfs_cpt_table));
+ return 0;
+
+ failed:
+ cfs_cpu_fini();
+ return -1;
+}
+
+#endif
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c
new file mode 100644
index 0000000..20b2d61
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c
@@ -0,0 +1,144 @@
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ */
+
+/*
+ * This is crypto api shash wrappers to zlib_adler32.
+ */
+
+#include <linux/module.h>
+#include <linux/zutil.h>
+#include <crypto/internal/hash.h>
+
+
+#define CHKSUM_BLOCK_SIZE 1
+#define CHKSUM_DIGEST_SIZE 4
+
+
+static u32 __adler32(u32 cksum, unsigned char const *p, size_t len)
+{
+ return zlib_adler32(cksum, p, len);
+}
+
+static int adler32_cra_init(struct crypto_tfm *tfm)
+{
+ u32 *key = crypto_tfm_ctx(tfm);
+
+ *key = 1;
+
+ return 0;
+}
+
+static int adler32_setkey(struct crypto_shash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ u32 *mctx = crypto_shash_ctx(hash);
+
+ if (keylen != sizeof(u32)) {
+ crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ *mctx = *(u32 *)key;
+ return 0;
+}
+
+static int adler32_init(struct shash_desc *desc)
+{
+ u32 *mctx = crypto_shash_ctx(desc->tfm);
+ u32 *cksump = shash_desc_ctx(desc);
+
+ *cksump = *mctx;
+
+ return 0;
+}
+
+static int adler32_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ u32 *cksump = shash_desc_ctx(desc);
+
+ *cksump = __adler32(*cksump, data, len);
+ return 0;
+}
+static int __adler32_finup(u32 *cksump, const u8 *data, unsigned int len,
+ u8 *out)
+{
+ *(u32 *)out = __adler32(*cksump, data, len);
+ return 0;
+}
+
+static int adler32_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __adler32_finup(shash_desc_ctx(desc), data, len, out);
+}
+
+static int adler32_final(struct shash_desc *desc, u8 *out)
+{
+ u32 *cksump = shash_desc_ctx(desc);
+
+ *(u32 *)out = *cksump;
+ return 0;
+}
+
+static int adler32_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len,
+ out);
+}
+static struct shash_alg alg = {
+ .setkey = adler32_setkey,
+ .init = adler32_init,
+ .update = adler32_update,
+ .final = adler32_final,
+ .finup = adler32_finup,
+ .digest = adler32_digest,
+ .descsize = sizeof(u32),
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .base = {
+ .cra_name = "adler32",
+ .cra_driver_name = "adler32-zlib",
+ .cra_priority = 100,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(u32),
+ .cra_module = THIS_MODULE,
+ .cra_init = adler32_cra_init,
+ }
+};
+
+
+int cfs_crypto_adler32_register(void)
+{
+ return crypto_register_shash(&alg);
+}
+EXPORT_SYMBOL(cfs_crypto_adler32_register);
+
+void cfs_crypto_adler32_unregister(void)
+{
+ crypto_unregister_shash(&alg);
+}
+EXPORT_SYMBOL(cfs_crypto_adler32_unregister);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
new file mode 100644
index 0000000..b6c79bc
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
@@ -0,0 +1,291 @@
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/libcfs/libcfs.h>
+#include <linux/libcfs/linux/linux-crypto.h>
+/**
+ * Array of hash algorithm speed in MByte per second
+ */
+static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
+
+
+
+static int cfs_crypto_hash_alloc(unsigned char alg_id,
+ const struct cfs_crypto_hash_type **type,
+ struct hash_desc *desc, unsigned char *key,
+ unsigned int key_len)
+{
+ int err = 0;
+
+ *type = cfs_crypto_hash_type(alg_id);
+
+ if (*type == NULL) {
+ CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
+ alg_id, CFS_HASH_ALG_MAX);
+ return -EINVAL;
+ }
+ desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0);
+
+ if (desc->tfm == NULL)
+ return -EINVAL;
+
+ if (IS_ERR(desc->tfm)) {
+ CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
+ (*type)->cht_name);
+ return PTR_ERR(desc->tfm);
+ }
+
+ desc->flags = 0;
+
+ /** Shash have different logic for initialization then digest
+ * shash: crypto_hash_setkey, crypto_hash_init
+ * digest: crypto_digest_init, crypto_digest_setkey
+ * Skip this function for digest, because we use shash logic at
+ * cfs_crypto_hash_alloc.
+ */
+ if (key != NULL) {
+ err = crypto_hash_setkey(desc->tfm, key, key_len);
+ } else if ((*type)->cht_key != 0) {
+ err = crypto_hash_setkey(desc->tfm,
+ (unsigned char *)&((*type)->cht_key),
+ (*type)->cht_size);
+ }
+
+ if (err != 0) {
+ crypto_free_hash(desc->tfm);
+ return err;
+ }
+
+ CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
+ (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name,
+ (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name,
+ cfs_crypto_hash_speeds[alg_id]);
+
+ return crypto_hash_init(desc);
+}
+
+int cfs_crypto_hash_digest(unsigned char alg_id,
+ const void *buf, unsigned int buf_len,
+ unsigned char *key, unsigned int key_len,
+ unsigned char *hash, unsigned int *hash_len)
+{
+ struct scatterlist sl;
+ struct hash_desc hdesc;
+ int err;
+ const struct cfs_crypto_hash_type *type;
+
+ if (buf == NULL || buf_len == 0 || hash_len == NULL)
+ return -EINVAL;
+
+ err = cfs_crypto_hash_alloc(alg_id, &type, &hdesc, key, key_len);
+ if (err != 0)
+ return err;
+
+ if (hash == NULL || *hash_len < type->cht_size) {
+ *hash_len = type->cht_size;
+ crypto_free_hash(hdesc.tfm);
+ return -ENOSPC;
+ }
+ sg_init_one(&sl, (void *)buf, buf_len);
+
+ hdesc.flags = 0;
+ err = crypto_hash_digest(&hdesc, &sl, sl.length, hash);
+ crypto_free_hash(hdesc.tfm);
+
+ return err;
+}
+EXPORT_SYMBOL(cfs_crypto_hash_digest);
+
+struct cfs_crypto_hash_desc *
+ cfs_crypto_hash_init(unsigned char alg_id,
+ unsigned char *key, unsigned int key_len)
+{
+
+ struct hash_desc *hdesc;
+ int err;
+ const struct cfs_crypto_hash_type *type;
+
+ hdesc = kmalloc(sizeof(*hdesc), 0);
+ if (hdesc == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len);
+
+ if (err) {
+ kfree(hdesc);
+ return ERR_PTR(err);
+ }
+ return (struct cfs_crypto_hash_desc *)hdesc;
+}
+EXPORT_SYMBOL(cfs_crypto_hash_init);
+
+int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
+ struct page *page, unsigned int offset,
+ unsigned int len)
+{
+ struct scatterlist sl;
+
+ sg_init_table(&sl, 1);
+ sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK);
+
+ return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
+}
+EXPORT_SYMBOL(cfs_crypto_hash_update_page);
+
+int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
+ const void *buf, unsigned int buf_len)
+{
+ struct scatterlist sl;
+
+ sg_init_one(&sl, (void *)buf, buf_len);
+
+ return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
+}
+EXPORT_SYMBOL(cfs_crypto_hash_update);
+
+/* If hash_len pointer is NULL - destroy descriptor. */
+int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
+ unsigned char *hash, unsigned int *hash_len)
+{
+ int err;
+ int size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm);
+
+ if (hash_len == NULL) {
+ crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
+ kfree(hdesc);
+ return 0;
+ }
+ if (hash == NULL || *hash_len < size) {
+ *hash_len = size;
+ return -ENOSPC;
+ }
+ err = crypto_hash_final((struct hash_desc *) hdesc, hash);
+
+ if (err < 0) {
+ /* May be caller can fix error */
+ return err;
+ }
+ crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
+ kfree(hdesc);
+ return err;
+}
+EXPORT_SYMBOL(cfs_crypto_hash_final);
+
+static void cfs_crypto_performance_test(unsigned char alg_id,
+ const unsigned char *buf,
+ unsigned int buf_len)
+{
+ unsigned long start, end;
+ int bcount, err = 0;
+ int sec = 1; /* do test only 1 sec */
+ unsigned char hash[64];
+ unsigned int hash_len = 64;
+
+ for (start = jiffies, end = start + sec * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0,
+ hash, &hash_len);
+ if (err)
+ break;
+
+ }
+ end = jiffies;
+
+ if (err) {
+ cfs_crypto_hash_speeds[alg_id] = -1;
+ CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n",
+ cfs_crypto_hash_name(alg_id), err);
+ } else {
+ unsigned long tmp;
+ tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
+ 1000) / (1024 * 1024);
+ cfs_crypto_hash_speeds[alg_id] = (int)tmp;
+ }
+ CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n",
+ cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]);
+}
+
+int cfs_crypto_hash_speed(unsigned char hash_alg)
+{
+ if (hash_alg < CFS_HASH_ALG_MAX)
+ return cfs_crypto_hash_speeds[hash_alg];
+ else
+ return -1;
+}
+EXPORT_SYMBOL(cfs_crypto_hash_speed);
+
+/**
+ * Do performance test for all hash algorithms.
+ */
+static int cfs_crypto_test_hashes(void)
+{
+ unsigned char i;
+ unsigned char *data;
+ unsigned int j;
+ /* Data block size for testing hash. Maximum
+ * kmalloc size for 2.6.18 kernel is 128K */
+ unsigned int data_len = 1 * 128 * 1024;
+
+ data = kmalloc(data_len, 0);
+ if (data == NULL) {
+ CERROR("Failed to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < data_len; j++)
+ data[j] = j & 0xff;
+
+ for (i = 0; i < CFS_HASH_ALG_MAX; i++)
+ cfs_crypto_performance_test(i, data, data_len);
+
+ kfree(data);
+ return 0;
+}
+
+static int adler32;
+
+int cfs_crypto_register(void)
+{
+ request_module("crc32c");
+
+ adler32 = cfs_crypto_adler32_register();
+
+ /* check all algorithms and do performance test */
+ cfs_crypto_test_hashes();
+ return 0;
+}
+void cfs_crypto_unregister(void)
+{
+ if (adler32 == 0)
+ cfs_crypto_adler32_unregister();
+
+ return;
+}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
new file mode 100644
index 0000000..ea9e949
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
@@ -0,0 +1,322 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/linux/linux-curproc.c
+ *
+ * Lustre curproc API implementation for Linux kernel
+ *
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ */
+
+#include <linux/sched.h>
+#include <linux/fs_struct.h>
+
+#include <linux/compat.h>
+#include <linux/thread_info.h>
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+
+/*
+ * Implementation of cfs_curproc API (see portals/include/libcfs/curproc.h)
+ * for Linux kernel.
+ */
+
+int cfs_curproc_groups_nr(void)
+{
+ int nr;
+
+ task_lock(current);
+ nr = current_cred()->group_info->ngroups;
+ task_unlock(current);
+ return nr;
+}
+
+/* Currently all the CFS_CAP_* defines match CAP_* ones. */
+#define cfs_cap_pack(cap) (cap)
+#define cfs_cap_unpack(cap) (cap)
+
+void cfs_cap_raise(cfs_cap_t cap)
+{
+ struct cred *cred;
+ if ((cred = prepare_creds())) {
+ cap_raise(cred->cap_effective, cfs_cap_unpack(cap));
+ commit_creds(cred);
+ }
+}
+
+void cfs_cap_lower(cfs_cap_t cap)
+{
+ struct cred *cred;
+ if ((cred = prepare_creds())) {
+ cap_lower(cred->cap_effective, cfs_cap_unpack(cap));
+ commit_creds(cred);
+ }
+}
+
+int cfs_cap_raised(cfs_cap_t cap)
+{
+ return cap_raised(current_cap(), cfs_cap_unpack(cap));
+}
+
+void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap)
+{
+#if defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x19980330
+ *cap = cfs_cap_pack(kcap);
+#elif defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x20071026
+ *cap = cfs_cap_pack(kcap[0]);
+#elif defined(_KERNEL_CAPABILITY_VERSION) && _KERNEL_CAPABILITY_VERSION == 0x20080522
+ /* XXX lost high byte */
+ *cap = cfs_cap_pack(kcap.cap[0]);
+#else
+ #error "need correct _KERNEL_CAPABILITY_VERSION "
+#endif
+}
+
+void cfs_kernel_cap_unpack(kernel_cap_t *kcap, cfs_cap_t cap)
+{
+#if defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x19980330
+ *kcap = cfs_cap_unpack(cap);
+#elif defined (_LINUX_CAPABILITY_VERSION) && _LINUX_CAPABILITY_VERSION == 0x20071026
+ (*kcap)[0] = cfs_cap_unpack(cap);
+#elif defined(_KERNEL_CAPABILITY_VERSION) && _KERNEL_CAPABILITY_VERSION == 0x20080522
+ kcap->cap[0] = cfs_cap_unpack(cap);
+#else
+ #error "need correct _KERNEL_CAPABILITY_VERSION "
+#endif
+}
+
+cfs_cap_t cfs_curproc_cap_pack(void)
+{
+ cfs_cap_t cap;
+ cfs_kernel_cap_pack(current_cap(), &cap);
+ return cap;
+}
+
+void cfs_curproc_cap_unpack(cfs_cap_t cap)
+{
+ struct cred *cred;
+ if ((cred = prepare_creds())) {
+ cfs_kernel_cap_unpack(&cred->cap_effective, cap);
+ commit_creds(cred);
+ }
+}
+
+int cfs_capable(cfs_cap_t cap)
+{
+ return capable(cfs_cap_unpack(cap));
+}
+
+/* Check if task is running in 32-bit API mode, for the purpose of
+ * userspace binary interfaces. On 32-bit Linux this is (unfortunately)
+ * always true, even if the application is using LARGEFILE64 and 64-bit
+ * APIs, because Linux provides no way for the filesystem to know if it
+ * is called via 32-bit or 64-bit APIs. Other clients may vary. On
+ * 64-bit systems, this will only be true if the binary is calling a
+ * 32-bit system call. */
+int current_is_32bit(void)
+{
+ return is_compat_task();
+}
+
+static int cfs_access_process_vm(struct task_struct *tsk, unsigned long addr,
+ void *buf, int len, int write)
+{
+ /* Just copied from kernel for the kernels which doesn't
+ * have access_process_vm() exported */
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ struct page *page;
+ void *old_buf = buf;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ return 0;
+
+ down_read(&mm->mmap_sem);
+ /* ignore errors, just check how much was sucessfully transfered */
+ while (len) {
+ int bytes, rc, offset;
+ void *maddr;
+
+ rc = get_user_pages(tsk, mm, addr, 1,
+ write, 1, &page, &vma);
+ if (rc <= 0)
+ break;
+
+ bytes = len;
+ offset = addr & (PAGE_SIZE-1);
+ if (bytes > PAGE_SIZE-offset)
+ bytes = PAGE_SIZE-offset;
+
+ maddr = kmap(page);
+ if (write) {
+ copy_to_user_page(vma, page, addr,
+ maddr + offset, buf, bytes);
+ set_page_dirty_lock(page);
+ } else {
+ copy_from_user_page(vma, page, addr,
+ buf, maddr + offset, bytes);
+ }
+ kunmap(page);
+ page_cache_release(page);
+ len -= bytes;
+ buf += bytes;
+ addr += bytes;
+ }
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+
+ return buf - old_buf;
+}
+
+/* Read the environment variable of current process specified by @key. */
+int cfs_get_environ(const char *key, char *value, int *val_len)
+{
+ struct mm_struct *mm;
+ char *buffer, *tmp_buf = NULL;
+ int buf_len = PAGE_CACHE_SIZE;
+ int key_len = strlen(key);
+ unsigned long addr;
+ int rc;
+
+ buffer = kmalloc(buf_len, GFP_USER);
+ if (!buffer)
+ return -ENOMEM;
+
+ mm = get_task_mm(current);
+ if (!mm) {
+ kfree(buffer);
+ return -EINVAL;
+ }
+
+ /* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
+ * which is already holding mmap_sem for writes. If some other
+ * thread gets the write lock in the meantime, this thread will
+ * block, but at least it won't deadlock on itself. LU-1735 */
+ if (down_read_trylock(&mm->mmap_sem) == 0)
+ return -EDEADLK;
+ up_read(&mm->mmap_sem);
+
+ addr = mm->env_start;
+ while (addr < mm->env_end) {
+ int this_len, retval, scan_len;
+ char *env_start, *env_end;
+
+ memset(buffer, 0, buf_len);
+
+ this_len = min_t(int, mm->env_end - addr, buf_len);
+ retval = cfs_access_process_vm(current, addr, buffer,
+ this_len, 0);
+ if (retval != this_len)
+ break;
+
+ addr += retval;
+
+ /* Parse the buffer to find out the specified key/value pair.
+ * The "key=value" entries are separated by '\0'. */
+ env_start = buffer;
+ scan_len = this_len;
+ while (scan_len) {
+ char *entry;
+ int entry_len;
+
+ env_end = memscan(env_start, '\0', scan_len);
+ LASSERT(env_end >= env_start &&
+ env_end <= env_start + scan_len);
+
+ /* The last entry of this buffer cross the buffer
+ * boundary, reread it in next cycle. */
+ if (unlikely(env_end - env_start == scan_len)) {
+ /* This entry is too large to fit in buffer */
+ if (unlikely(scan_len == this_len)) {
+ CERROR("Too long env variable.\n");
+ GOTO(out, rc = -EINVAL);
+ }
+ addr -= scan_len;
+ break;
+ }
+
+ entry = env_start;
+ entry_len = env_end - env_start;
+
+ /* Key length + length of '=' */
+ if (entry_len > key_len + 1 &&
+ !memcmp(entry, key, key_len)) {
+ entry += key_len + 1;
+ entry_len -= key_len + 1;
+ /* The 'value' buffer passed in is too small.*/
+ if (entry_len >= *val_len)
+ GOTO(out, rc = -EOVERFLOW);
+
+ memcpy(value, entry, entry_len);
+ *val_len = entry_len;
+ GOTO(out, rc = 0);
+ }
+
+ scan_len -= (env_end - env_start + 1);
+ env_start = env_end + 1;
+ }
+ }
+ GOTO(out, rc = -ENOENT);
+
+out:
+ mmput(mm);
+ kfree((void *)buffer);
+ if (tmp_buf)
+ kfree((void *)tmp_buf);
+ return rc;
+}
+EXPORT_SYMBOL(cfs_get_environ);
+
+EXPORT_SYMBOL(cfs_curproc_groups_nr);
+EXPORT_SYMBOL(cfs_cap_raise);
+EXPORT_SYMBOL(cfs_cap_lower);
+EXPORT_SYMBOL(cfs_cap_raised);
+EXPORT_SYMBOL(cfs_curproc_cap_pack);
+EXPORT_SYMBOL(cfs_curproc_cap_unpack);
+EXPORT_SYMBOL(cfs_capable);
+EXPORT_SYMBOL(current_is_32bit);
+
+/*
+ * Local variables:
+ * c-indentation-style: "K&R"
+ * c-basic-offset: 8
+ * tab-width: 8
+ * fill-column: 80
+ * scroll-step: 1
+ * End:
+ */
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
new file mode 100644
index 0000000..ab1e731
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
@@ -0,0 +1,203 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/linux/linux-debug.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <linux/miscdevice.h>
+
+# define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/libcfs/linux/portals_compat25.h>
+
+#include "tracefile.h"
+
+#include <linux/kallsyms.h>
+
+char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall";
+char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
+
+/**
+ * Upcall function once a Lustre log has been dumped.
+ *
+ * \param file path of the dumped log
+ */
+void libcfs_run_debug_log_upcall(char *file)
+{
+ char *argv[3];
+ int rc;
+ char *envp[] = {
+ "HOME=/",
+ "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
+ NULL};
+
+ argv[0] = lnet_debug_log_upcall;
+
+ LASSERTF(file != NULL, "called on a null filename\n");
+ argv[1] = file; //only need to pass the path of the file
+
+ argv[2] = NULL;
+
+ rc = USERMODEHELPER(argv[0], argv, envp);
+ if (rc < 0 && rc != -ENOENT) {
+ CERROR("Error %d invoking LNET debug log upcall %s %s; "
+ "check /proc/sys/lnet/debug_log_upcall\n",
+ rc, argv[0], argv[1]);
+ } else {
+ CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n",
+ argv[0], argv[1]);
+ }
+}
+
+void libcfs_run_upcall(char **argv)
+{
+ int rc;
+ int argc;
+ char *envp[] = {
+ "HOME=/",
+ "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
+ NULL};
+
+ argv[0] = lnet_upcall;
+ argc = 1;
+ while (argv[argc] != NULL)
+ argc++;
+
+ LASSERT(argc >= 2);
+
+ rc = USERMODEHELPER(argv[0], argv, envp);
+ if (rc < 0 && rc != -ENOENT) {
+ CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; "
+ "check /proc/sys/lnet/upcall\n",
+ rc, argv[0], argv[1],
+ argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
+ argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
+ argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
+ argc < 6 ? "" : ",...");
+ } else {
+ CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n",
+ argv[0], argv[1],
+ argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
+ argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
+ argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
+ argc < 6 ? "" : ",...");
+ }
+}
+
+void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
+{
+ char *argv[6];
+ char buf[32];
+
+ snprintf (buf, sizeof buf, "%d", msgdata->msg_line);
+
+ argv[1] = "LBUG";
+ argv[2] = (char *)msgdata->msg_file;
+ argv[3] = (char *)msgdata->msg_fn;
+ argv[4] = buf;
+ argv[5] = NULL;
+
+ libcfs_run_upcall (argv);
+}
+
+/* coverity[+kill] */
+void lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
+{
+ libcfs_catastrophe = 1;
+ libcfs_debug_msg(msgdata, "LBUG\n");
+
+ if (in_interrupt()) {
+ panic("LBUG in interrupt.\n");
+ /* not reached */
+ }
+
+ dump_stack();
+ if (!libcfs_panic_on_lbug)
+ libcfs_debug_dumplog();
+ libcfs_run_lbug_upcall(msgdata);
+ if (libcfs_panic_on_lbug)
+ panic("LBUG");
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ while (1)
+ schedule();
+}
+
+static int panic_notifier(struct notifier_block *self, unsigned long unused1,
+ void *unused2)
+{
+ if (libcfs_panic_in_progress)
+ return 0;
+
+ libcfs_panic_in_progress = 1;
+ mb();
+
+ return 0;
+}
+
+static struct notifier_block libcfs_panic_notifier = {
+ .notifier_call = panic_notifier,
+ .next = NULL,
+ .priority = 10000,
+};
+
+void libcfs_register_panic_notifier(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list, &libcfs_panic_notifier);
+}
+
+void libcfs_unregister_panic_notifier(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list, &libcfs_panic_notifier);
+}
+
+EXPORT_SYMBOL(libcfs_run_upcall);
+EXPORT_SYMBOL(libcfs_run_lbug_upcall);
+EXPORT_SYMBOL(lbug_with_loc);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
new file mode 100644
index 0000000..55296a3
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
@@ -0,0 +1,182 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+
+#define LNET_MINOR 240
+
+int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
+{
+ struct libcfs_ioctl_hdr *hdr;
+ struct libcfs_ioctl_data *data;
+ int err;
+
+ hdr = (struct libcfs_ioctl_hdr *)buf;
+ data = (struct libcfs_ioctl_data *)buf;
+
+ err = copy_from_user(buf, (void *)arg, sizeof(*hdr));
+ if (err)
+ return err;
+
+ if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) {
+ CERROR("PORTALS: version mismatch kernel vs application\n");
+ return -EINVAL;
+ }
+
+ if (hdr->ioc_len + buf >= end) {
+ CERROR("PORTALS: user buffer exceeds kernel buffer\n");
+ return -EINVAL;
+ }
+
+
+ if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) {
+ CERROR("PORTALS: user buffer too small for ioctl\n");
+ return -EINVAL;
+ }
+
+ err = copy_from_user(buf, (void *)arg, hdr->ioc_len);
+ if (err)
+ return err;
+
+ if (libcfs_ioctl_is_invalid(data)) {
+ CERROR("PORTALS: ioctl not correctly formatted\n");
+ return -EINVAL;
+ }
+
+ if (data->ioc_inllen1)
+ data->ioc_inlbuf1 = &data->ioc_bulk[0];
+
+ if (data->ioc_inllen2)
+ data->ioc_inlbuf2 = &data->ioc_bulk[0] +
+ cfs_size_round(data->ioc_inllen1);
+
+ return 0;
+}
+
+int libcfs_ioctl_popdata(void *arg, void *data, int size)
+{
+ if (copy_to_user((char *)arg, data, size))
+ return -EFAULT;
+ return 0;
+}
+
+extern struct cfs_psdev_ops libcfs_psdev_ops;
+
+static int
+libcfs_psdev_open(struct inode * inode, struct file * file)
+{
+ struct libcfs_device_userstate **pdu = NULL;
+ int rc = 0;
+
+ if (!inode)
+ return (-EINVAL);
+ pdu = (struct libcfs_device_userstate **)&file->private_data;
+ if (libcfs_psdev_ops.p_open != NULL)
+ rc = libcfs_psdev_ops.p_open(0, (void *)pdu);
+ else
+ return (-EPERM);
+ return rc;
+}
+
+/* called when closing /dev/device */
+static int
+libcfs_psdev_release(struct inode * inode, struct file * file)
+{
+ struct libcfs_device_userstate *pdu;
+ int rc = 0;
+
+ if (!inode)
+ return (-EINVAL);
+ pdu = file->private_data;
+ if (libcfs_psdev_ops.p_close != NULL)
+ rc = libcfs_psdev_ops.p_close(0, (void *)pdu);
+ else
+ rc = -EPERM;
+ return rc;
+}
+
+static long libcfs_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct cfs_psdev_file pfile;
+ int rc = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if ( _IOC_TYPE(cmd) != IOC_LIBCFS_TYPE ||
+ _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR ||
+ _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR ) {
+ CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n",
+ _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
+ return (-EINVAL);
+ }
+
+ /* Handle platform-dependent IOC requests */
+ switch (cmd) {
+ case IOC_LIBCFS_PANIC:
+ if (!cfs_capable(CFS_CAP_SYS_BOOT))
+ return (-EPERM);
+ panic("debugctl-invoked panic");
+ return (0);
+ case IOC_LIBCFS_MEMHOG:
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+ /* go thought */
+ }
+
+ pfile.off = 0;
+ pfile.private_data = file->private_data;
+ if (libcfs_psdev_ops.p_ioctl != NULL)
+ rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void *)arg);
+ else
+ rc = -EPERM;
+ return (rc);
+}
+
+static struct file_operations libcfs_fops = {
+ .unlocked_ioctl = libcfs_ioctl,
+ .open = libcfs_psdev_open,
+ .release = libcfs_psdev_release,
+};
+
+struct miscdevice libcfs_dev = {
+ .minor = LNET_MINOR,
+ .name = "lnet",
+ .fops = &libcfs_fops,
+};
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
new file mode 100644
index 0000000..cc9829f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
@@ -0,0 +1,258 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs_struct.h>
+#include <linux/sched.h>
+
+#include <linux/libcfs/libcfs.h>
+
+#if defined(CONFIG_KGDB)
+#include <asm/kgdb.h>
+#endif
+
+#define LINUX_WAITQ(w) ((wait_queue_t *) w)
+#define LINUX_WAITQ_HEAD(w) ((wait_queue_head_t *) w)
+
+void
+init_waitqueue_entry_current(wait_queue_t *link)
+{
+ init_waitqueue_entry(LINUX_WAITQ(link), current);
+}
+EXPORT_SYMBOL(init_waitqueue_entry_current);
+
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+void
+add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+ __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
+ spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+}
+EXPORT_SYMBOL(add_wait_queue_exclusive_head);
+
+void
+waitq_wait(wait_queue_t *link, long state)
+{
+ schedule();
+}
+EXPORT_SYMBOL(waitq_wait);
+
+int64_t
+waitq_timedwait(wait_queue_t *link, long state, int64_t timeout)
+{
+ return schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(waitq_timedwait);
+
+void
+schedule_timeout_and_set_state(long state, int64_t timeout)
+{
+ set_current_state(state);
+ schedule_timeout(timeout);
+}
+EXPORT_SYMBOL(schedule_timeout_and_set_state);
+
+/* deschedule for a bit... */
+void
+cfs_pause(cfs_duration_t ticks)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(ticks);
+}
+EXPORT_SYMBOL(cfs_pause);
+
+void cfs_init_timer(struct timer_list *t)
+{
+ init_timer(t);
+}
+EXPORT_SYMBOL(cfs_init_timer);
+
+void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg)
+{
+ init_timer(t);
+ t->function = func;
+ t->data = (unsigned long)arg;
+}
+EXPORT_SYMBOL(cfs_timer_init);
+
+void cfs_timer_done(struct timer_list *t)
+{
+ return;
+}
+EXPORT_SYMBOL(cfs_timer_done);
+
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline)
+{
+ mod_timer(t, deadline);
+}
+EXPORT_SYMBOL(cfs_timer_arm);
+
+void cfs_timer_disarm(struct timer_list *t)
+{
+ del_timer(t);
+}
+EXPORT_SYMBOL(cfs_timer_disarm);
+
+int cfs_timer_is_armed(struct timer_list *t)
+{
+ return timer_pending(t);
+}
+EXPORT_SYMBOL(cfs_timer_is_armed);
+
+cfs_time_t cfs_timer_deadline(struct timer_list *t)
+{
+ return t->expires;
+}
+EXPORT_SYMBOL(cfs_timer_deadline);
+
+void cfs_enter_debugger(void)
+{
+#if defined(CONFIG_KGDB)
+// BREAKPOINT();
+#else
+ /* nothing */
+#endif
+}
+
+
+sigset_t
+cfs_block_allsigs(void)
+{
+ unsigned long flags;
+ sigset_t old;
+
+ SIGNAL_MASK_LOCK(current, flags);
+ old = current->blocked;
+ sigfillset(&current->blocked);
+ recalc_sigpending();
+ SIGNAL_MASK_UNLOCK(current, flags);
+
+ return old;
+}
+
+sigset_t cfs_block_sigs(unsigned long sigs)
+{
+ unsigned long flags;
+ sigset_t old;
+
+ SIGNAL_MASK_LOCK(current, flags);
+ old = current->blocked;
+ sigaddsetmask(&current->blocked, sigs);
+ recalc_sigpending();
+ SIGNAL_MASK_UNLOCK(current, flags);
+ return old;
+}
+
+/* Block all signals except for the @sigs */
+sigset_t cfs_block_sigsinv(unsigned long sigs)
+{
+ unsigned long flags;
+ sigset_t old;
+
+ SIGNAL_MASK_LOCK(current, flags);
+ old = current->blocked;
+ sigaddsetmask(&current->blocked, ~sigs);
+ recalc_sigpending();
+ SIGNAL_MASK_UNLOCK(current, flags);
+
+ return old;
+}
+
+void
+cfs_restore_sigs (sigset_t old)
+{
+ unsigned long flags;
+
+ SIGNAL_MASK_LOCK(current, flags);
+ current->blocked = old;
+ recalc_sigpending();
+ SIGNAL_MASK_UNLOCK(current, flags);
+}
+
+int
+cfs_signal_pending(void)
+{
+ return signal_pending(current);
+}
+
+void
+cfs_clear_sigpending(void)
+{
+ unsigned long flags;
+
+ SIGNAL_MASK_LOCK(current, flags);
+ clear_tsk_thread_flag(current, TIF_SIGPENDING);
+ SIGNAL_MASK_UNLOCK(current, flags);
+}
+
+int
+libcfs_arch_init(void)
+{
+ return 0;
+}
+
+void
+libcfs_arch_cleanup(void)
+{
+ return;
+}
+
+EXPORT_SYMBOL(libcfs_arch_init);
+EXPORT_SYMBOL(libcfs_arch_cleanup);
+EXPORT_SYMBOL(cfs_enter_debugger);
+EXPORT_SYMBOL(cfs_block_allsigs);
+EXPORT_SYMBOL(cfs_block_sigs);
+EXPORT_SYMBOL(cfs_block_sigsinv);
+EXPORT_SYMBOL(cfs_restore_sigs);
+EXPORT_SYMBOL(cfs_signal_pending);
+EXPORT_SYMBOL(cfs_clear_sigpending);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
new file mode 100644
index 0000000..fc6c977
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
@@ -0,0 +1,578 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/linux/linux-proc.c
+ *
+ * Author: Zach Brown <zab@zabbo.net>
+ * Author: Peter J. Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <net/sock.h>
+#include <linux/uio.h>
+
+#include <asm/uaccess.h>
+
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/list.h>
+
+#include <linux/proc_fs.h>
+#include <linux/sysctl.h>
+
+# define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+#include <asm/div64.h>
+#include "tracefile.h"
+
+#ifdef CONFIG_SYSCTL
+static ctl_table_header_t *lnet_table_header = NULL;
+#endif
+extern char lnet_upcall[1024];
+/**
+ * The path of debug log dump upcall script.
+ */
+extern char lnet_debug_log_upcall[1024];
+
+#define CTL_LNET (0x100)
+enum {
+ PSDEV_DEBUG = 1, /* control debugging */
+ PSDEV_SUBSYSTEM_DEBUG, /* control debugging */
+ PSDEV_PRINTK, /* force all messages to console */
+ PSDEV_CONSOLE_RATELIMIT, /* ratelimit console messages */
+ PSDEV_CONSOLE_MAX_DELAY_CS, /* maximum delay over which we skip messages */
+ PSDEV_CONSOLE_MIN_DELAY_CS, /* initial delay over which we skip messages */
+ PSDEV_CONSOLE_BACKOFF, /* delay increase factor */
+ PSDEV_DEBUG_PATH, /* crashdump log location */
+ PSDEV_DEBUG_DUMP_PATH, /* crashdump tracelog location */
+ PSDEV_CPT_TABLE, /* information about cpu partitions */
+ PSDEV_LNET_UPCALL, /* User mode upcall script */
+ PSDEV_LNET_MEMUSED, /* bytes currently PORTAL_ALLOCated */
+ PSDEV_LNET_CATASTROPHE, /* if we have LBUGged or panic'd */
+ PSDEV_LNET_PANIC_ON_LBUG, /* flag to panic on LBUG */
+ PSDEV_LNET_DUMP_KERNEL, /* snapshot kernel debug buffer to file */
+ PSDEV_LNET_DAEMON_FILE, /* spool kernel debug buffer to file */
+ PSDEV_LNET_DEBUG_MB, /* size of debug buffer */
+ PSDEV_LNET_DEBUG_LOG_UPCALL, /* debug log upcall script */
+ PSDEV_LNET_WATCHDOG_RATELIMIT, /* ratelimit watchdog messages */
+ PSDEV_LNET_FORCE_LBUG, /* hook to force an LBUG */
+ PSDEV_LNET_FAIL_LOC, /* control test failures instrumentation */
+ PSDEV_LNET_FAIL_VAL, /* userdata for fail loc */
+};
+
+int
+proc_call_handler(void *data, int write,
+ loff_t *ppos, void *buffer, size_t *lenp,
+ int (*handler)(void *data, int write,
+ loff_t pos, void *buffer, int len))
+{
+ int rc = handler(data, write, *ppos, buffer, *lenp);
+
+ if (rc < 0)
+ return rc;
+
+ if (write) {
+ *ppos += *lenp;
+ } else {
+ *lenp = rc;
+ *ppos += rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(proc_call_handler);
+
+static int __proc_dobitmasks(void *data, int write,
+ loff_t pos, void *buffer, int nob)
+{
+ const int tmpstrlen = 512;
+ char *tmpstr;
+ int rc;
+ unsigned int *mask = data;
+ int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
+ int is_printk = (mask == &libcfs_printk) ? 1 : 0;
+
+ rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
+ if (rc < 0)
+ return rc;
+
+ if (!write) {
+ libcfs_debug_mask2str(tmpstr, tmpstrlen, *mask, is_subsys);
+ rc = strlen(tmpstr);
+
+ if (pos >= rc) {
+ rc = 0;
+ } else {
+ rc = cfs_trace_copyout_string(buffer, nob,
+ tmpstr + pos, "\n");
+ }
+ } else {
+ rc = cfs_trace_copyin_string(tmpstr, tmpstrlen, buffer, nob);
+ if (rc < 0) {
+ cfs_trace_free_string_buffer(tmpstr, tmpstrlen);
+ return rc;
+ }
+
+ rc = libcfs_debug_str2mask(mask, tmpstr, is_subsys);
+ /* Always print LBUG/LASSERT to console, so keep this mask */
+ if (is_printk)
+ *mask |= D_EMERG;
+ }
+
+ cfs_trace_free_string_buffer(tmpstr, tmpstrlen);
+ return rc;
+}
+
+DECLARE_PROC_HANDLER(proc_dobitmasks)
+
+static int min_watchdog_ratelimit = 0; /* disable ratelimiting */
+static int max_watchdog_ratelimit = (24*60*60); /* limit to once per day */
+
+static int __proc_dump_kernel(void *data, int write,
+ loff_t pos, void *buffer, int nob)
+{
+ if (!write)
+ return 0;
+
+ return cfs_trace_dump_debug_buffer_usrstr(buffer, nob);
+}
+
+DECLARE_PROC_HANDLER(proc_dump_kernel)
+
+static int __proc_daemon_file(void *data, int write,
+ loff_t pos, void *buffer, int nob)
+{
+ if (!write) {
+ int len = strlen(cfs_tracefile);
+
+ if (pos >= len)
+ return 0;
+
+ return cfs_trace_copyout_string(buffer, nob,
+ cfs_tracefile + pos, "\n");
+ }
+
+ return cfs_trace_daemon_command_usrstr(buffer, nob);
+}
+
+DECLARE_PROC_HANDLER(proc_daemon_file)
+
+static int __proc_debug_mb(void *data, int write,
+ loff_t pos, void *buffer, int nob)
+{
+ if (!write) {
+ char tmpstr[32];
+ int len = snprintf(tmpstr, sizeof(tmpstr), "%d",
+ cfs_trace_get_debug_mb());
+
+ if (pos >= len)
+ return 0;
+
+ return cfs_trace_copyout_string(buffer, nob, tmpstr + pos,
+ "\n");
+ }
+
+ return cfs_trace_set_debug_mb_usrstr(buffer, nob);
+}
+
+DECLARE_PROC_HANDLER(proc_debug_mb)
+
+int LL_PROC_PROTO(proc_console_max_delay_cs)
+{
+ int rc, max_delay_cs;
+ ctl_table_t dummy = *table;
+ cfs_duration_t d;
+
+ dummy.data = &max_delay_cs;
+ dummy.proc_handler = &proc_dointvec;
+
+ if (!write) { /* read */
+ max_delay_cs = cfs_duration_sec(libcfs_console_max_delay * 100);
+ rc = ll_proc_dointvec(&dummy, write, filp, buffer, lenp, ppos);
+ return rc;
+ }
+
+ /* write */
+ max_delay_cs = 0;
+ rc = ll_proc_dointvec(&dummy, write, filp, buffer, lenp, ppos);
+ if (rc < 0)
+ return rc;
+ if (max_delay_cs <= 0)
+ return -EINVAL;
+
+ d = cfs_time_seconds(max_delay_cs) / 100;
+ if (d == 0 || d < libcfs_console_min_delay)
+ return -EINVAL;
+ libcfs_console_max_delay = d;
+
+ return rc;
+}
+
+int LL_PROC_PROTO(proc_console_min_delay_cs)
+{
+ int rc, min_delay_cs;
+ ctl_table_t dummy = *table;
+ cfs_duration_t d;
+
+ dummy.data = &min_delay_cs;
+ dummy.proc_handler = &proc_dointvec;
+
+ if (!write) { /* read */
+ min_delay_cs = cfs_duration_sec(libcfs_console_min_delay * 100);
+ rc = ll_proc_dointvec(&dummy, write, filp, buffer, lenp, ppos);
+ return rc;
+ }
+
+ /* write */
+ min_delay_cs = 0;
+ rc = ll_proc_dointvec(&dummy, write, filp, buffer, lenp, ppos);
+ if (rc < 0)
+ return rc;
+ if (min_delay_cs <= 0)
+ return -EINVAL;
+
+ d = cfs_time_seconds(min_delay_cs) / 100;
+ if (d == 0 || d > libcfs_console_max_delay)
+ return -EINVAL;
+ libcfs_console_min_delay = d;
+
+ return rc;
+}
+
+int LL_PROC_PROTO(proc_console_backoff)
+{
+ int rc, backoff;
+ ctl_table_t dummy = *table;
+
+ dummy.data = &backoff;
+ dummy.proc_handler = &proc_dointvec;
+
+ if (!write) { /* read */
+ backoff= libcfs_console_backoff;
+ rc = ll_proc_dointvec(&dummy, write, filp, buffer, lenp, ppos);
+ return rc;
+ }
+
+ /* write */
+ backoff = 0;
+ rc = ll_proc_dointvec(&dummy, write, filp, buffer, lenp, ppos);
+ if (rc < 0)
+ return rc;
+ if (backoff <= 0)
+ return -EINVAL;
+
+ libcfs_console_backoff = backoff;
+
+ return rc;
+}
+
+int LL_PROC_PROTO(libcfs_force_lbug)
+{
+ if (write)
+ LBUG();
+ return 0;
+}
+
+int LL_PROC_PROTO(proc_fail_loc)
+{
+ int rc;
+ long old_fail_loc = cfs_fail_loc;
+
+ rc = ll_proc_dolongvec(table, write, filp, buffer, lenp, ppos);
+ if (old_fail_loc != cfs_fail_loc)
+ wake_up(&cfs_race_waitq);
+ return rc;
+}
+
+static int __proc_cpt_table(void *data, int write,
+ loff_t pos, void *buffer, int nob)
+{
+ char *buf = NULL;
+ int len = 4096;
+ int rc = 0;
+
+ if (write)
+ return -EPERM;
+
+ LASSERT(cfs_cpt_table != NULL);
+
+ while (1) {
+ LIBCFS_ALLOC(buf, len);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ rc = cfs_cpt_table_print(cfs_cpt_table, buf, len);
+ if (rc >= 0)
+ break;
+
+ LIBCFS_FREE(buf, len);
+ if (rc == -EFBIG) {
+ len <<= 1;
+ continue;
+ }
+ goto out;
+ }
+
+ if (pos >= rc) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL);
+ out:
+ if (buf != NULL)
+ LIBCFS_FREE(buf, len);
+ return rc;
+}
+DECLARE_PROC_HANDLER(proc_cpt_table)
+
+static ctl_table_t lnet_table[] = {
+ /*
+ * NB No .strategy entries have been provided since sysctl(8) prefers
+ * to go via /proc for portability.
+ */
+ {
+ INIT_CTL_NAME(PSDEV_DEBUG)
+ .procname = "debug",
+ .data = &libcfs_debug,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dobitmasks,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_SUBSYSTEM_DEBUG)
+ .procname = "subsystem_debug",
+ .data = &libcfs_subsystem_debug,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dobitmasks,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_PRINTK)
+ .procname = "printk",
+ .data = &libcfs_printk,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dobitmasks,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_CONSOLE_RATELIMIT)
+ .procname = "console_ratelimit",
+ .data = &libcfs_console_ratelimit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ INIT_CTL_NAME(PSDEV_CONSOLE_MAX_DELAY_CS)
+ .procname = "console_max_delay_centisecs",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_console_max_delay_cs
+ },
+ {
+ INIT_CTL_NAME(PSDEV_CONSOLE_MIN_DELAY_CS)
+ .procname = "console_min_delay_centisecs",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_console_min_delay_cs
+ },
+ {
+ INIT_CTL_NAME(PSDEV_CONSOLE_BACKOFF)
+ .procname = "console_backoff",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_console_backoff
+ },
+
+ {
+ INIT_CTL_NAME(PSDEV_DEBUG_PATH)
+ .procname = "debug_path",
+ .data = libcfs_debug_file_path_arr,
+ .maxlen = sizeof(libcfs_debug_file_path_arr),
+ .mode = 0644,
+ .proc_handler = &proc_dostring,
+ },
+
+ {
+ INIT_CTL_NAME(PSDEV_CPT_TABLE)
+ .procname = "cpu_partition_table",
+ .maxlen = 128,
+ .mode = 0444,
+ .proc_handler = &proc_cpt_table,
+ },
+
+ {
+ INIT_CTL_NAME(PSDEV_LNET_UPCALL)
+ .procname = "upcall",
+ .data = lnet_upcall,
+ .maxlen = sizeof(lnet_upcall),
+ .mode = 0644,
+ .proc_handler = &proc_dostring,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_DEBUG_LOG_UPCALL)
+ .procname = "debug_log_upcall",
+ .data = lnet_debug_log_upcall,
+ .maxlen = sizeof(lnet_debug_log_upcall),
+ .mode = 0644,
+ .proc_handler = &proc_dostring,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_MEMUSED)
+ .procname = "lnet_memused",
+ .data = (int *)&libcfs_kmemory.counter,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ INIT_STRATEGY(&sysctl_intvec)
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_CATASTROPHE)
+ .procname = "catastrophe",
+ .data = &libcfs_catastrophe,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ INIT_STRATEGY(&sysctl_intvec)
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_PANIC_ON_LBUG)
+ .procname = "panic_on_lbug",
+ .data = &libcfs_panic_on_lbug,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ INIT_STRATEGY(&sysctl_intvec)
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_DUMP_KERNEL)
+ .procname = "dump_kernel",
+ .maxlen = 256,
+ .mode = 0200,
+ .proc_handler = &proc_dump_kernel,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_DAEMON_FILE)
+ .procname = "daemon_file",
+ .mode = 0644,
+ .maxlen = 256,
+ .proc_handler = &proc_daemon_file,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_DEBUG_MB)
+ .procname = "debug_mb",
+ .mode = 0644,
+ .proc_handler = &proc_debug_mb,
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_WATCHDOG_RATELIMIT)
+ .procname = "watchdog_ratelimit",
+ .data = &libcfs_watchdog_ratelimit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = &min_watchdog_ratelimit,
+ .extra2 = &max_watchdog_ratelimit,
+ },
+ { INIT_CTL_NAME(PSDEV_LNET_FORCE_LBUG)
+ .procname = "force_lbug",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0200,
+ .proc_handler = &libcfs_force_lbug
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_FAIL_LOC)
+ .procname = "fail_loc",
+ .data = &cfs_fail_loc,
+ .maxlen = sizeof(cfs_fail_loc),
+ .mode = 0644,
+ .proc_handler = &proc_fail_loc
+ },
+ {
+ INIT_CTL_NAME(PSDEV_LNET_FAIL_VAL)
+ .procname = "fail_val",
+ .data = &cfs_fail_val,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ INIT_CTL_NAME(0)
+ }
+};
+
+#ifdef CONFIG_SYSCTL
+static ctl_table_t top_table[] = {
+ {
+ INIT_CTL_NAME(CTL_LNET)
+ .procname = "lnet",
+ .mode = 0555,
+ .data = NULL,
+ .maxlen = 0,
+ .child = lnet_table,
+ },
+ {
+ INIT_CTL_NAME(0)
+ }
+};
+#endif
+
+int insert_proc(void)
+{
+#ifdef CONFIG_SYSCTL
+ if (lnet_table_header == NULL)
+ lnet_table_header = register_sysctl_table(top_table);
+#endif
+ return 0;
+}
+
+void remove_proc(void)
+{
+#ifdef CONFIG_SYSCTL
+ if (lnet_table_header != NULL)
+ unregister_sysctl_table(lnet_table_header);
+
+ lnet_table_header = NULL;
+#endif
+}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
new file mode 100644
index 0000000..e6069d7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
@@ -0,0 +1,658 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/file.h>
+/* For sys_open & sys_close */
+#include <linux/syscalls.h>
+
+int
+libcfs_sock_ioctl(int cmd, unsigned long arg)
+{
+ mm_segment_t oldmm = get_fs();
+ struct socket *sock;
+ int rc;
+ struct file *sock_filp;
+
+ rc = sock_create (PF_INET, SOCK_STREAM, 0, &sock);
+ if (rc != 0) {
+ CERROR ("Can't create socket: %d\n", rc);
+ return rc;
+ }
+
+ sock_filp = sock_alloc_file(sock, 0, NULL);
+ if (IS_ERR(sock_filp)) {
+ sock_release(sock);
+ rc = PTR_ERR(sock_filp);
+ goto out;
+ }
+
+ set_fs(KERNEL_DS);
+ if (sock_filp->f_op->unlocked_ioctl)
+ rc = sock_filp->f_op->unlocked_ioctl(sock_filp, cmd, arg);
+ set_fs(oldmm);
+
+ fput(sock_filp);
+out:
+ return rc;
+}
+
+int
+libcfs_ipif_query (char *name, int *up, __u32 *ip, __u32 *mask)
+{
+ struct ifreq ifr;
+ int nob;
+ int rc;
+ __u32 val;
+
+ nob = strnlen(name, IFNAMSIZ);
+ if (nob == IFNAMSIZ) {
+ CERROR("Interface name %s too long\n", name);
+ return -EINVAL;
+ }
+
+ CLASSERT (sizeof(ifr.ifr_name) >= IFNAMSIZ);
+
+ strcpy(ifr.ifr_name, name);
+ rc = libcfs_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr);
+
+ if (rc != 0) {
+ CERROR("Can't get flags for interface %s\n", name);
+ return rc;
+ }
+
+ if ((ifr.ifr_flags & IFF_UP) == 0) {
+ CDEBUG(D_NET, "Interface %s down\n", name);
+ *up = 0;
+ *ip = *mask = 0;
+ return 0;
+ }
+
+ *up = 1;
+
+ strcpy(ifr.ifr_name, name);
+ ifr.ifr_addr.sa_family = AF_INET;
+ rc = libcfs_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr);
+
+ if (rc != 0) {
+ CERROR("Can't get IP address for interface %s\n", name);
+ return rc;
+ }
+
+ val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr;
+ *ip = ntohl(val);
+
+ strcpy(ifr.ifr_name, name);
+ ifr.ifr_addr.sa_family = AF_INET;
+ rc = libcfs_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr);
+
+ if (rc != 0) {
+ CERROR("Can't get netmask for interface %s\n", name);
+ return rc;
+ }
+
+ val = ((struct sockaddr_in *)&ifr.ifr_netmask)->sin_addr.s_addr;
+ *mask = ntohl(val);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(libcfs_ipif_query);
+
+int
+libcfs_ipif_enumerate (char ***namesp)
+{
+ /* Allocate and fill in 'names', returning # interfaces/error */
+ char **names;
+ int toobig;
+ int nalloc;
+ int nfound;
+ struct ifreq *ifr;
+ struct ifconf ifc;
+ int rc;
+ int nob;
+ int i;
+
+
+ nalloc = 16; /* first guess at max interfaces */
+ toobig = 0;
+ for (;;) {
+ if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
+ toobig = 1;
+ nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
+ CWARN("Too many interfaces: only enumerating first %d\n",
+ nalloc);
+ }
+
+ LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr));
+ if (ifr == NULL) {
+ CERROR ("ENOMEM enumerating up to %d interfaces\n", nalloc);
+ rc = -ENOMEM;
+ goto out0;
+ }
+
+ ifc.ifc_buf = (char *)ifr;
+ ifc.ifc_len = nalloc * sizeof(*ifr);
+
+ rc = libcfs_sock_ioctl(SIOCGIFCONF, (unsigned long)&ifc);
+
+ if (rc < 0) {
+ CERROR ("Error %d enumerating interfaces\n", rc);
+ goto out1;
+ }
+
+ LASSERT (rc == 0);
+
+ nfound = ifc.ifc_len/sizeof(*ifr);
+ LASSERT (nfound <= nalloc);
+
+ if (nfound < nalloc || toobig)
+ break;
+
+ LIBCFS_FREE(ifr, nalloc * sizeof(*ifr));
+ nalloc *= 2;
+ }
+
+ if (nfound == 0)
+ goto out1;
+
+ LIBCFS_ALLOC(names, nfound * sizeof(*names));
+ if (names == NULL) {
+ rc = -ENOMEM;
+ goto out1;
+ }
+ /* NULL out all names[i] */
+ memset (names, 0, nfound * sizeof(*names));
+
+ for (i = 0; i < nfound; i++) {
+
+ nob = strnlen (ifr[i].ifr_name, IFNAMSIZ);
+ if (nob == IFNAMSIZ) {
+ /* no space for terminating NULL */
+ CERROR("interface name %.*s too long (%d max)\n",
+ nob, ifr[i].ifr_name, IFNAMSIZ);
+ rc = -ENAMETOOLONG;
+ goto out2;
+ }
+
+ LIBCFS_ALLOC(names[i], IFNAMSIZ);
+ if (names[i] == NULL) {
+ rc = -ENOMEM;
+ goto out2;
+ }
+
+ memcpy(names[i], ifr[i].ifr_name, nob);
+ names[i][nob] = 0;
+ }
+
+ *namesp = names;
+ rc = nfound;
+
+ out2:
+ if (rc < 0)
+ libcfs_ipif_free_enumeration(names, nfound);
+ out1:
+ LIBCFS_FREE(ifr, nalloc * sizeof(*ifr));
+ out0:
+ return rc;
+}
+
+EXPORT_SYMBOL(libcfs_ipif_enumerate);
+
+void
+libcfs_ipif_free_enumeration (char **names, int n)
+{
+ int i;
+
+ LASSERT (n > 0);
+
+ for (i = 0; i < n && names[i] != NULL; i++)
+ LIBCFS_FREE(names[i], IFNAMSIZ);
+
+ LIBCFS_FREE(names, n * sizeof(*names));
+}
+
+EXPORT_SYMBOL(libcfs_ipif_free_enumeration);
+
+int
+libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
+{
+ int rc;
+ mm_segment_t oldmm = get_fs();
+ long ticks = timeout * HZ;
+ unsigned long then;
+ struct timeval tv;
+
+ LASSERT (nob > 0);
+ /* Caller may pass a zero timeout if she thinks the socket buffer is
+ * empty enough to take the whole message immediately */
+
+ for (;;) {
+ struct iovec iov = {
+ .iov_base = buffer,
+ .iov_len = nob
+ };
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0
+ };
+
+ if (timeout != 0) {
+ /* Set send timeout to remaining time */
+ tv = (struct timeval) {
+ .tv_sec = ticks / HZ,
+ .tv_usec = ((ticks % HZ) * 1000000) / HZ
+ };
+ set_fs(KERNEL_DS);
+ rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
+ (char *)&tv, sizeof(tv));
+ set_fs(oldmm);
+ if (rc != 0) {
+ CERROR("Can't set socket send timeout "
+ "%ld.%06d: %d\n",
+ (long)tv.tv_sec, (int)tv.tv_usec, rc);
+ return rc;
+ }
+ }
+
+ set_fs (KERNEL_DS);
+ then = jiffies;
+ rc = sock_sendmsg (sock, &msg, iov.iov_len);
+ ticks -= jiffies - then;
+ set_fs (oldmm);
+
+ if (rc == nob)
+ return 0;
+
+ if (rc < 0)
+ return rc;
+
+ if (rc == 0) {
+ CERROR ("Unexpected zero rc\n");
+ return (-ECONNABORTED);
+ }
+
+ if (ticks <= 0)
+ return -EAGAIN;
+
+ buffer = ((char *)buffer) + rc;
+ nob -= rc;
+ }
+
+ return (0);
+}
+EXPORT_SYMBOL(libcfs_sock_write);
+
+int
+libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
+{
+ int rc;
+ mm_segment_t oldmm = get_fs();
+ long ticks = timeout * HZ;
+ unsigned long then;
+ struct timeval tv;
+
+ LASSERT (nob > 0);
+ LASSERT (ticks > 0);
+
+ for (;;) {
+ struct iovec iov = {
+ .iov_base = buffer,
+ .iov_len = nob
+ };
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_flags = 0
+ };
+
+ /* Set receive timeout to remaining time */
+ tv = (struct timeval) {
+ .tv_sec = ticks / HZ,
+ .tv_usec = ((ticks % HZ) * 1000000) / HZ
+ };
+ set_fs(KERNEL_DS);
+ rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
+ (char *)&tv, sizeof(tv));
+ set_fs(oldmm);
+ if (rc != 0) {
+ CERROR("Can't set socket recv timeout %ld.%06d: %d\n",
+ (long)tv.tv_sec, (int)tv.tv_usec, rc);
+ return rc;
+ }
+
+ set_fs(KERNEL_DS);
+ then = jiffies;
+ rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
+ ticks -= jiffies - then;
+ set_fs(oldmm);
+
+ if (rc < 0)
+ return rc;
+
+ if (rc == 0)
+ return -ECONNRESET;
+
+ buffer = ((char *)buffer) + rc;
+ nob -= rc;
+
+ if (nob == 0)
+ return 0;
+
+ if (ticks <= 0)
+ return -ETIMEDOUT;
+ }
+}
+
+EXPORT_SYMBOL(libcfs_sock_read);
+
+static int
+libcfs_sock_create (struct socket **sockp, int *fatal,
+ __u32 local_ip, int local_port)
+{
+ struct sockaddr_in locaddr;
+ struct socket *sock;
+ int rc;
+ int option;
+ mm_segment_t oldmm = get_fs();
+
+ /* All errors are fatal except bind failure if the port is in use */
+ *fatal = 1;
+
+ rc = sock_create (PF_INET, SOCK_STREAM, 0, &sock);
+ *sockp = sock;
+ if (rc != 0) {
+ CERROR ("Can't create socket: %d\n", rc);
+ return (rc);
+ }
+
+ set_fs (KERNEL_DS);
+ option = 1;
+ rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+ (char *)&option, sizeof (option));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc);
+ goto failed;
+ }
+
+ if (local_ip != 0 || local_port != 0) {
+ memset(&locaddr, 0, sizeof(locaddr));
+ locaddr.sin_family = AF_INET;
+ locaddr.sin_port = htons(local_port);
+ locaddr.sin_addr.s_addr = (local_ip == 0) ?
+ INADDR_ANY : htonl(local_ip);
+
+ rc = sock->ops->bind(sock, (struct sockaddr *)&locaddr,
+ sizeof(locaddr));
+ if (rc == -EADDRINUSE) {
+ CDEBUG(D_NET, "Port %d already in use\n", local_port);
+ *fatal = 0;
+ goto failed;
+ }
+ if (rc != 0) {
+ CERROR("Error trying to bind to port %d: %d\n",
+ local_port, rc);
+ goto failed;
+ }
+ }
+
+ return 0;
+
+ failed:
+ sock_release(sock);
+ return rc;
+}
+
+int
+libcfs_sock_setbuf (struct socket *sock, int txbufsize, int rxbufsize)
+{
+ mm_segment_t oldmm = get_fs();
+ int option;
+ int rc;
+
+ if (txbufsize != 0) {
+ option = txbufsize;
+ set_fs (KERNEL_DS);
+ rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
+ (char *)&option, sizeof (option));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR ("Can't set send buffer %d: %d\n",
+ option, rc);
+ return (rc);
+ }
+ }
+
+ if (rxbufsize != 0) {
+ option = rxbufsize;
+ set_fs (KERNEL_DS);
+ rc = sock_setsockopt (sock, SOL_SOCKET, SO_RCVBUF,
+ (char *)&option, sizeof (option));
+ set_fs (oldmm);
+ if (rc != 0) {
+ CERROR ("Can't set receive buffer %d: %d\n",
+ option, rc);
+ return (rc);
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(libcfs_sock_setbuf);
+
+int
+libcfs_sock_getaddr (struct socket *sock, int remote, __u32 *ip, int *port)
+{
+ struct sockaddr_in sin;
+ int len = sizeof (sin);
+ int rc;
+
+ rc = sock->ops->getname (sock, (struct sockaddr *)&sin, &len,
+ remote ? 2 : 0);
+ if (rc != 0) {
+ CERROR ("Error %d getting sock %s IP/port\n",
+ rc, remote ? "peer" : "local");
+ return rc;
+ }
+
+ if (ip != NULL)
+ *ip = ntohl (sin.sin_addr.s_addr);
+
+ if (port != NULL)
+ *port = ntohs (sin.sin_port);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(libcfs_sock_getaddr);
+
+int
+libcfs_sock_getbuf (struct socket *sock, int *txbufsize, int *rxbufsize)
+{
+
+ if (txbufsize != NULL) {
+ *txbufsize = sock->sk->sk_sndbuf;
+ }
+
+ if (rxbufsize != NULL) {
+ *rxbufsize = sock->sk->sk_rcvbuf;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(libcfs_sock_getbuf);
+
+int
+libcfs_sock_listen (struct socket **sockp,
+ __u32 local_ip, int local_port, int backlog)
+{
+ int fatal;
+ int rc;
+
+ rc = libcfs_sock_create(sockp, &fatal, local_ip, local_port);
+ if (rc != 0) {
+ if (!fatal)
+ CERROR("Can't create socket: port %d already in use\n",
+ local_port);
+ return rc;
+ }
+
+ rc = (*sockp)->ops->listen(*sockp, backlog);
+ if (rc == 0)
+ return 0;
+
+ CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
+ sock_release(*sockp);
+ return rc;
+}
+
+EXPORT_SYMBOL(libcfs_sock_listen);
+
+int
+libcfs_sock_accept (struct socket **newsockp, struct socket *sock)
+{
+ wait_queue_t wait;
+ struct socket *newsock;
+ int rc;
+
+ init_waitqueue_entry(&wait, current);
+
+ /* XXX this should add a ref to sock->ops->owner, if
+ * TCP could be a module */
+ rc = sock_create_lite(PF_PACKET, sock->type, IPPROTO_TCP, &newsock);
+ if (rc) {
+ CERROR("Can't allocate socket\n");
+ return rc;
+ }
+
+ newsock->ops = sock->ops;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(cfs_sk_sleep(sock->sk), &wait);
+
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ if (rc == -EAGAIN) {
+ /* Nothing ready, so wait for activity */
+ schedule();
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ }
+
+ remove_wait_queue(cfs_sk_sleep(sock->sk), &wait);
+ set_current_state(TASK_RUNNING);
+
+ if (rc != 0)
+ goto failed;
+
+ *newsockp = newsock;
+ return 0;
+
+ failed:
+ sock_release(newsock);
+ return rc;
+}
+
+EXPORT_SYMBOL(libcfs_sock_accept);
+
+void
+libcfs_sock_abort_accept (struct socket *sock)
+{
+ wake_up_all(cfs_sk_sleep(sock->sk));
+}
+
+EXPORT_SYMBOL(libcfs_sock_abort_accept);
+
+int
+libcfs_sock_connect (struct socket **sockp, int *fatal,
+ __u32 local_ip, int local_port,
+ __u32 peer_ip, int peer_port)
+{
+ struct sockaddr_in srvaddr;
+ int rc;
+
+ rc = libcfs_sock_create(sockp, fatal, local_ip, local_port);
+ if (rc != 0)
+ return rc;
+
+ memset (&srvaddr, 0, sizeof (srvaddr));
+ srvaddr.sin_family = AF_INET;
+ srvaddr.sin_port = htons(peer_port);
+ srvaddr.sin_addr.s_addr = htonl(peer_ip);
+
+ rc = (*sockp)->ops->connect(*sockp,
+ (struct sockaddr *)&srvaddr, sizeof(srvaddr),
+ 0);
+ if (rc == 0)
+ return 0;
+
+ /* EADDRNOTAVAIL probably means we're already connected to the same
+ * peer/port on the same local port on a differently typed
+ * connection. Let our caller retry with a different local
+ * port... */
+ *fatal = !(rc == -EADDRNOTAVAIL);
+
+ CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET,
+ "Error %d connecting %pI4h/%d -> %pI4h/%d\n", rc,
+ &local_ip, local_port, &peer_ip, peer_port);
+
+ sock_release(*sockp);
+ return rc;
+}
+
+EXPORT_SYMBOL(libcfs_sock_connect);
+
+void
+libcfs_sock_release (struct socket *sock)
+{
+ sock_release(sock);
+}
+
+EXPORT_SYMBOL(libcfs_sock_release);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
new file mode 100644
index 0000000..162beee
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
@@ -0,0 +1,275 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+#define LUSTRE_TRACEFILE_PRIVATE
+
+#include <linux/libcfs/libcfs.h>
+#include "tracefile.h"
+
+/* percents to share the total debug memory for each type */
+static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
+ 80, /* 80% pages for CFS_TCD_TYPE_PROC */
+ 10, /* 10% pages for CFS_TCD_TYPE_SOFTIRQ */
+ 10 /* 10% pages for CFS_TCD_TYPE_IRQ */
+};
+
+char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
+
+struct rw_semaphore cfs_tracefile_sem;
+
+int cfs_tracefile_init_arch(void)
+{
+ int i;
+ int j;
+ struct cfs_trace_cpu_data *tcd;
+
+ init_rwsem(&cfs_tracefile_sem);
+
+ /* initialize trace_data */
+ memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
+ for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
+ cfs_trace_data[i] =
+ kmalloc(sizeof(union cfs_trace_data_union) *
+ num_possible_cpus(), GFP_KERNEL);
+ if (cfs_trace_data[i] == NULL)
+ goto out;
+
+ }
+
+ /* arch related info initialized */
+ cfs_tcd_for_each(tcd, i, j) {
+ spin_lock_init(&tcd->tcd_lock);
+ tcd->tcd_pages_factor = pages_factor[i];
+ tcd->tcd_type = i;
+ tcd->tcd_cpu = j;
+ }
+
+ for (i = 0; i < num_possible_cpus(); i++)
+ for (j = 0; j < 3; j++) {
+ cfs_trace_console_buffers[i][j] =
+ kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
+ GFP_KERNEL);
+
+ if (cfs_trace_console_buffers[i][j] == NULL)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ cfs_tracefile_fini_arch();
+ printk(KERN_ERR "lnet: Not enough memory\n");
+ return -ENOMEM;
+}
+
+void cfs_tracefile_fini_arch(void)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < num_possible_cpus(); i++)
+ for (j = 0; j < 3; j++)
+ if (cfs_trace_console_buffers[i][j] != NULL) {
+ kfree(cfs_trace_console_buffers[i][j]);
+ cfs_trace_console_buffers[i][j] = NULL;
+ }
+
+ for (i = 0; cfs_trace_data[i] != NULL; i++) {
+ kfree(cfs_trace_data[i]);
+ cfs_trace_data[i] = NULL;
+ }
+
+ fini_rwsem(&cfs_tracefile_sem);
+}
+
+void cfs_tracefile_read_lock(void)
+{
+ down_read(&cfs_tracefile_sem);
+}
+
+void cfs_tracefile_read_unlock(void)
+{
+ up_read(&cfs_tracefile_sem);
+}
+
+void cfs_tracefile_write_lock(void)
+{
+ down_write(&cfs_tracefile_sem);
+}
+
+void cfs_tracefile_write_unlock(void)
+{
+ up_write(&cfs_tracefile_sem);
+}
+
+cfs_trace_buf_type_t cfs_trace_buf_idx_get(void)
+{
+ if (in_irq())
+ return CFS_TCD_TYPE_IRQ;
+ else if (in_softirq())
+ return CFS_TCD_TYPE_SOFTIRQ;
+ else
+ return CFS_TCD_TYPE_PROC;
+}
+
+/*
+ * The walking argument indicates the locking comes from all tcd types
+ * iterator and we must lock it and dissable local irqs to avoid deadlocks
+ * with other interrupt locks that might be happening. See LU-1311
+ * for details.
+ */
+int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
+{
+ __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ spin_lock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ spin_lock_irq(&tcd->tcd_lock);
+ else
+ spin_lock(&tcd->tcd_lock);
+ return 1;
+}
+
+void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
+{
+ __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ spin_unlock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ spin_unlock_irq(&tcd->tcd_lock);
+ else
+ spin_unlock(&tcd->tcd_lock);
+}
+
+int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
+ struct cfs_trace_page *tage)
+{
+ /*
+ * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
+ return tcd->tcd_cpu == tage->cpu;
+}
+
+void
+cfs_set_ptldebug_header(struct ptldebug_header *header,
+ struct libcfs_debug_msg_data *msgdata,
+ unsigned long stack)
+{
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+
+ header->ph_subsys = msgdata->msg_subsys;
+ header->ph_mask = msgdata->msg_mask;
+ header->ph_cpu_id = smp_processor_id();
+ header->ph_type = cfs_trace_buf_idx_get();
+ header->ph_sec = (__u32)tv.tv_sec;
+ header->ph_usec = tv.tv_usec;
+ header->ph_stack = stack;
+ header->ph_pid = current->pid;
+ header->ph_line_num = msgdata->msg_line;
+ header->ph_extern_pid = 0;
+ return;
+}
+
+static char *
+dbghdr_to_err_string(struct ptldebug_header *hdr)
+{
+ switch (hdr->ph_subsys) {
+
+ case S_LND:
+ case S_LNET:
+ return "LNetError";
+ default:
+ return "LustreError";
+ }
+}
+
+static char *
+dbghdr_to_info_string(struct ptldebug_header *hdr)
+{
+ switch (hdr->ph_subsys) {
+
+ case S_LND:
+ case S_LNET:
+ return "LNet";
+ default:
+ return "Lustre";
+ }
+}
+
+void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+ const char *buf, int len, const char *file,
+ const char *fn)
+{
+ char *prefix = "Lustre", *ptype = NULL;
+
+ if ((mask & D_EMERG) != 0) {
+ prefix = dbghdr_to_err_string(hdr);
+ ptype = KERN_EMERG;
+ } else if ((mask & D_ERROR) != 0) {
+ prefix = dbghdr_to_err_string(hdr);
+ ptype = KERN_ERR;
+ } else if ((mask & D_WARNING) != 0) {
+ prefix = dbghdr_to_info_string(hdr);
+ ptype = KERN_WARNING;
+ } else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
+ prefix = dbghdr_to_info_string(hdr);
+ ptype = KERN_INFO;
+ }
+
+ if ((mask & D_CONSOLE) != 0) {
+ printk("%s%s: %.*s", ptype, prefix, len, buf);
+ } else {
+ printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
+ hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
+ fn, len, buf);
+ }
+ return;
+}
+
+int cfs_trace_max_debug_mb(void)
+{
+ int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
+
+ return MAX(512, (total_mb * 80)/100);
+}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.h b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.h
new file mode 100644
index 0000000..ba84e4f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.h
@@ -0,0 +1,48 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LIBCFS_LINUX_TRACEFILE_H__
+#define __LIBCFS_LINUX_TRACEFILE_H__
+
+/**
+ * three types of trace_data in linux
+ */
+typedef enum {
+ CFS_TCD_TYPE_PROC = 0,
+ CFS_TCD_TYPE_SOFTIRQ,
+ CFS_TCD_TYPE_IRQ,
+ CFS_TCD_TYPE_MAX
+} cfs_trace_buf_type_t;
+
+#endif
diff --git a/drivers/staging/lustre/lustre/libcfs/lwt.c b/drivers/staging/lustre/lustre/libcfs/lwt.c
new file mode 100644
index 0000000..b631f7d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/lwt.c
@@ -0,0 +1,266 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/lwt.c
+ *
+ * Author: Eric Barton <eeb@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+
+#if LWT_SUPPORT
+
+#if !KLWT_SUPPORT
+int lwt_enabled;
+lwt_cpu_t lwt_cpus[NR_CPUS];
+#endif
+
+int lwt_pages_per_cpu;
+
+/* NB only root is allowed to retrieve LWT info; it's an open door into the
+ * kernel... */
+
+int
+lwt_lookup_string (int *size, char *knl_ptr,
+ char *user_ptr, int user_size)
+{
+ int maxsize = 128;
+
+ /* knl_ptr was retrieved from an LWT snapshot and the caller wants to
+ * turn it into a string. NB we can crash with an access violation
+ * trying to determine the string length, so we're trusting our
+ * caller... */
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ return (-EPERM);
+
+ if (user_size > 0 &&
+ maxsize > user_size)
+ maxsize = user_size;
+
+ *size = strnlen (knl_ptr, maxsize - 1) + 1;
+
+ if (user_ptr != NULL) {
+ if (user_size < 4)
+ return (-EINVAL);
+
+ if (copy_to_user (user_ptr, knl_ptr, *size))
+ return (-EFAULT);
+
+ /* Did I truncate the string? */
+ if (knl_ptr[*size - 1] != 0)
+ copy_to_user (user_ptr + *size - 4, "...", 4);
+ }
+
+ return (0);
+}
+
+int
+lwt_control (int enable, int clear)
+{
+ lwt_page_t *p;
+ int i;
+ int j;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ return (-EPERM);
+
+ if (!enable) {
+ LWT_EVENT(0,0,0,0);
+ lwt_enabled = 0;
+ mb();
+ /* give people some time to stop adding traces */
+ schedule_timeout(10);
+ }
+
+ for (i = 0; i < num_online_cpus(); i++) {
+ p = lwt_cpus[i].lwtc_current_page;
+
+ if (p == NULL)
+ return (-ENODATA);
+
+ if (!clear)
+ continue;
+
+ for (j = 0; j < lwt_pages_per_cpu; j++) {
+ memset (p->lwtp_events, 0, PAGE_CACHE_SIZE);
+
+ p = list_entry (p->lwtp_list.next,
+ lwt_page_t, lwtp_list);
+ }
+ }
+
+ if (enable) {
+ lwt_enabled = 1;
+ mb();
+ LWT_EVENT(0,0,0,0);
+ }
+
+ return (0);
+}
+
+int
+lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
+ void *user_ptr, int user_size)
+{
+ const int events_per_page = PAGE_CACHE_SIZE / sizeof(lwt_event_t);
+ const int bytes_per_page = events_per_page * sizeof(lwt_event_t);
+ lwt_page_t *p;
+ int i;
+ int j;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ return (-EPERM);
+
+ *ncpu = num_online_cpus();
+ *total_size = num_online_cpus() * lwt_pages_per_cpu *
+ bytes_per_page;
+ *now = get_cycles();
+
+ if (user_ptr == NULL)
+ return (0);
+
+ for (i = 0; i < num_online_cpus(); i++) {
+ p = lwt_cpus[i].lwtc_current_page;
+
+ if (p == NULL)
+ return (-ENODATA);
+
+ for (j = 0; j < lwt_pages_per_cpu; j++) {
+ if (copy_to_user(user_ptr, p->lwtp_events,
+ bytes_per_page))
+ return (-EFAULT);
+
+ user_ptr = ((char *)user_ptr) + bytes_per_page;
+ p = list_entry(p->lwtp_list.next,
+ lwt_page_t, lwtp_list);
+ }
+ }
+
+ return (0);
+}
+
+int
+lwt_init ()
+{
+ int i;
+ int j;
+
+ for (i = 0; i < num_online_cpus(); i++)
+ if (lwt_cpus[i].lwtc_current_page != NULL)
+ return (-EALREADY);
+
+ LASSERT (!lwt_enabled);
+
+ /* NULL pointers, zero scalars */
+ memset (lwt_cpus, 0, sizeof (lwt_cpus));
+ lwt_pages_per_cpu =
+ LWT_MEMORY / (num_online_cpus() * PAGE_CACHE_SIZE);
+
+ for (i = 0; i < num_online_cpus(); i++)
+ for (j = 0; j < lwt_pages_per_cpu; j++) {
+ struct page *page = alloc_page (GFP_KERNEL);
+ lwt_page_t *lwtp;
+
+ if (page == NULL) {
+ CERROR ("Can't allocate page\n");
+ lwt_fini ();
+ return (-ENOMEM);
+ }
+
+ LIBCFS_ALLOC(lwtp, sizeof (*lwtp));
+ if (lwtp == NULL) {
+ CERROR ("Can't allocate lwtp\n");
+ __free_page(page);
+ lwt_fini ();
+ return (-ENOMEM);
+ }
+
+ lwtp->lwtp_page = page;
+ lwtp->lwtp_events = page_address(page);
+ memset (lwtp->lwtp_events, 0, PAGE_CACHE_SIZE);
+
+ if (j == 0) {
+ INIT_LIST_HEAD (&lwtp->lwtp_list);
+ lwt_cpus[i].lwtc_current_page = lwtp;
+ } else {
+ list_add (&lwtp->lwtp_list,
+ &lwt_cpus[i].lwtc_current_page->lwtp_list);
+ }
+ }
+
+ lwt_enabled = 1;
+ mb();
+
+ LWT_EVENT(0,0,0,0);
+
+ return (0);
+}
+
+void
+lwt_fini ()
+{
+ int i;
+
+ lwt_control(0, 0);
+
+ for (i = 0; i < num_online_cpus(); i++)
+ while (lwt_cpus[i].lwtc_current_page != NULL) {
+ lwt_page_t *lwtp = lwt_cpus[i].lwtc_current_page;
+
+ if (list_empty (&lwtp->lwtp_list)) {
+ lwt_cpus[i].lwtc_current_page = NULL;
+ } else {
+ lwt_cpus[i].lwtc_current_page =
+ list_entry (lwtp->lwtp_list.next,
+ lwt_page_t, lwtp_list);
+
+ list_del (&lwtp->lwtp_list);
+ }
+
+ __free_page (lwtp->lwtp_page);
+ LIBCFS_FREE (lwtp, sizeof (*lwtp));
+ }
+}
+
+EXPORT_SYMBOL(lwt_enabled);
+EXPORT_SYMBOL(lwt_cpus);
+
+EXPORT_SYMBOL(lwt_init);
+EXPORT_SYMBOL(lwt_fini);
+EXPORT_SYMBOL(lwt_lookup_string);
+EXPORT_SYMBOL(lwt_control);
+EXPORT_SYMBOL(lwt_snapshot);
+#endif
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
new file mode 100644
index 0000000..f3108c7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -0,0 +1,496 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/libcfs/libcfs_crypto.h>
+#include <linux/lnet/lib-lnet.h>
+#include <linux/lnet/lnet.h>
+#include "tracefile.h"
+
+void
+kportal_memhog_free (struct libcfs_device_userstate *ldu)
+{
+ struct page **level0p = &ldu->ldu_memhog_root_page;
+ struct page **level1p;
+ struct page **level2p;
+ int count1;
+ int count2;
+
+ if (*level0p != NULL) {
+
+ level1p = (struct page **)page_address(*level0p);
+ count1 = 0;
+
+ while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
+ *level1p != NULL) {
+
+ level2p = (struct page **)page_address(*level1p);
+ count2 = 0;
+
+ while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
+ *level2p != NULL) {
+
+ __free_page(*level2p);
+ ldu->ldu_memhog_pages--;
+ level2p++;
+ count2++;
+ }
+
+ __free_page(*level1p);
+ ldu->ldu_memhog_pages--;
+ level1p++;
+ count1++;
+ }
+
+ __free_page(*level0p);
+ ldu->ldu_memhog_pages--;
+
+ *level0p = NULL;
+ }
+
+ LASSERT (ldu->ldu_memhog_pages == 0);
+}
+
+int
+kportal_memhog_alloc (struct libcfs_device_userstate *ldu, int npages, int flags)
+{
+ struct page **level0p;
+ struct page **level1p;
+ struct page **level2p;
+ int count1;
+ int count2;
+
+ LASSERT (ldu->ldu_memhog_pages == 0);
+ LASSERT (ldu->ldu_memhog_root_page == NULL);
+
+ if (npages < 0)
+ return -EINVAL;
+
+ if (npages == 0)
+ return 0;
+
+ level0p = &ldu->ldu_memhog_root_page;
+ *level0p = alloc_page(flags);
+ if (*level0p == NULL)
+ return -ENOMEM;
+ ldu->ldu_memhog_pages++;
+
+ level1p = (struct page **)page_address(*level0p);
+ count1 = 0;
+ memset(level1p, 0, PAGE_CACHE_SIZE);
+
+ while (ldu->ldu_memhog_pages < npages &&
+ count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
+
+ if (cfs_signal_pending())
+ return (-EINTR);
+
+ *level1p = alloc_page(flags);
+ if (*level1p == NULL)
+ return -ENOMEM;
+ ldu->ldu_memhog_pages++;
+
+ level2p = (struct page **)page_address(*level1p);
+ count2 = 0;
+ memset(level2p, 0, PAGE_CACHE_SIZE);
+
+ while (ldu->ldu_memhog_pages < npages &&
+ count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
+
+ if (cfs_signal_pending())
+ return (-EINTR);
+
+ *level2p = alloc_page(flags);
+ if (*level2p == NULL)
+ return (-ENOMEM);
+ ldu->ldu_memhog_pages++;
+
+ level2p++;
+ count2++;
+ }
+
+ level1p++;
+ count1++;
+ }
+
+ return 0;
+}
+
+/* called when opening /dev/device */
+static int libcfs_psdev_open(unsigned long flags, void *args)
+{
+ struct libcfs_device_userstate *ldu;
+
+ try_module_get(THIS_MODULE);
+
+ LIBCFS_ALLOC(ldu, sizeof(*ldu));
+ if (ldu != NULL) {
+ ldu->ldu_memhog_pages = 0;
+ ldu->ldu_memhog_root_page = NULL;
+ }
+ *(struct libcfs_device_userstate **)args = ldu;
+
+ return 0;
+}
+
+/* called when closing /dev/device */
+static int libcfs_psdev_release(unsigned long flags, void *args)
+{
+ struct libcfs_device_userstate *ldu;
+
+ ldu = (struct libcfs_device_userstate *)args;
+ if (ldu != NULL) {
+ kportal_memhog_free(ldu);
+ LIBCFS_FREE(ldu, sizeof(*ldu));
+ }
+
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+static struct rw_semaphore ioctl_list_sem;
+static struct list_head ioctl_list;
+
+int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
+{
+ int rc = 0;
+
+ down_write(&ioctl_list_sem);
+ if (!list_empty(&hand->item))
+ rc = -EBUSY;
+ else
+ list_add_tail(&hand->item, &ioctl_list);
+ up_write(&ioctl_list_sem);
+
+ return rc;
+}
+EXPORT_SYMBOL(libcfs_register_ioctl);
+
+int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
+{
+ int rc = 0;
+
+ down_write(&ioctl_list_sem);
+ if (list_empty(&hand->item))
+ rc = -ENOENT;
+ else
+ list_del_init(&hand->item);
+ up_write(&ioctl_list_sem);
+
+ return rc;
+}
+EXPORT_SYMBOL(libcfs_deregister_ioctl);
+
+static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
+ void *arg, struct libcfs_ioctl_data *data)
+{
+ int err = -EINVAL;
+
+ switch (cmd) {
+ case IOC_LIBCFS_CLEAR_DEBUG:
+ libcfs_debug_clear_buffer();
+ return 0;
+ /*
+ * case IOC_LIBCFS_PANIC:
+ * Handled in arch/cfs_module.c
+ */
+ case IOC_LIBCFS_MARK_DEBUG:
+ if (data->ioc_inlbuf1 == NULL ||
+ data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0')
+ return -EINVAL;
+ libcfs_debug_mark_buffer(data->ioc_inlbuf1);
+ return 0;
+#if LWT_SUPPORT
+ case IOC_LIBCFS_LWT_CONTROL:
+ err = lwt_control ((data->ioc_flags & 1) != 0,
+ (data->ioc_flags & 2) != 0);
+ break;
+
+ case IOC_LIBCFS_LWT_SNAPSHOT: {
+ cfs_cycles_t now;
+ int ncpu;
+ int total_size;
+
+ err = lwt_snapshot (&now, &ncpu, &total_size,
+ data->ioc_pbuf1, data->ioc_plen1);
+ data->ioc_u64[0] = now;
+ data->ioc_u32[0] = ncpu;
+ data->ioc_u32[1] = total_size;
+
+ /* Hedge against broken user/kernel typedefs (e.g. cycles_t) */
+ data->ioc_u32[2] = sizeof(lwt_event_t);
+ data->ioc_u32[3] = offsetof(lwt_event_t, lwte_where);
+
+ if (err == 0 &&
+ libcfs_ioctl_popdata(arg, data, sizeof (*data)))
+ err = -EFAULT;
+ break;
+ }
+
+ case IOC_LIBCFS_LWT_LOOKUP_STRING:
+ err = lwt_lookup_string (&data->ioc_count, data->ioc_pbuf1,
+ data->ioc_pbuf2, data->ioc_plen2);
+ if (err == 0 &&
+ libcfs_ioctl_popdata(arg, data, sizeof (*data)))
+ err = -EFAULT;
+ break;
+#endif
+ case IOC_LIBCFS_MEMHOG:
+ if (pfile->private_data == NULL) {
+ err = -EINVAL;
+ } else {
+ kportal_memhog_free(pfile->private_data);
+ /* XXX The ioc_flags is not GFP flags now, need to be fixed */
+ err = kportal_memhog_alloc(pfile->private_data,
+ data->ioc_count,
+ data->ioc_flags);
+ if (err != 0)
+ kportal_memhog_free(pfile->private_data);
+ }
+ break;
+
+ case IOC_LIBCFS_PING_TEST: {
+ extern void (kping_client)(struct libcfs_ioctl_data *);
+ void (*ping)(struct libcfs_ioctl_data *);
+
+ CDEBUG(D_IOCTL, "doing %d pings to nid %s (%s)\n",
+ data->ioc_count, libcfs_nid2str(data->ioc_nid),
+ libcfs_nid2str(data->ioc_nid));
+ ping = symbol_get(kping_client);
+ if (!ping)
+ CERROR("symbol_get failed\n");
+ else {
+ ping(data);
+ symbol_put(kping_client);
+ }
+ return 0;
+ }
+
+ default: {
+ struct libcfs_ioctl_handler *hand;
+ err = -EINVAL;
+ down_read(&ioctl_list_sem);
+ list_for_each_entry(hand, &ioctl_list, item) {
+ err = hand->handle_ioctl(cmd, data);
+ if (err != -EINVAL) {
+ if (err == 0)
+ err = libcfs_ioctl_popdata(arg,
+ data, sizeof (*data));
+ break;
+ }
+ }
+ up_read(&ioctl_list_sem);
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg)
+{
+ char *buf;
+ struct libcfs_ioctl_data *data;
+ int err = 0;
+
+ LIBCFS_ALLOC_GFP(buf, 1024, GFP_IOFS);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ /* 'cmd' and permissions get checked in our arch-specific caller */
+ if (libcfs_ioctl_getdata(buf, buf + 800, (void *)arg)) {
+ CERROR("PORTALS ioctl: data error\n");
+ GOTO(out, err = -EINVAL);
+ }
+ data = (struct libcfs_ioctl_data *)buf;
+
+ err = libcfs_ioctl_int(pfile, cmd, arg, data);
+
+out:
+ LIBCFS_FREE(buf, 1024);
+ return err;
+}
+
+
+struct cfs_psdev_ops libcfs_psdev_ops = {
+ libcfs_psdev_open,
+ libcfs_psdev_release,
+ NULL,
+ NULL,
+ libcfs_ioctl
+};
+
+extern int insert_proc(void);
+extern void remove_proc(void);
+MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
+MODULE_DESCRIPTION("Portals v3.1");
+MODULE_LICENSE("GPL");
+
+extern struct miscdevice libcfs_dev;
+extern struct rw_semaphore cfs_tracefile_sem;
+extern struct mutex cfs_trace_thread_mutex;
+extern struct cfs_wi_sched *cfs_sched_rehash;
+
+extern void libcfs_init_nidstrings(void);
+extern int libcfs_arch_init(void);
+extern void libcfs_arch_cleanup(void);
+
+static int init_libcfs_module(void)
+{
+ int rc;
+
+ libcfs_arch_init();
+ libcfs_init_nidstrings();
+ init_rwsem(&cfs_tracefile_sem);
+ mutex_init(&cfs_trace_thread_mutex);
+ init_rwsem(&ioctl_list_sem);
+ INIT_LIST_HEAD(&ioctl_list);
+ init_waitqueue_head(&cfs_race_waitq);
+
+ rc = libcfs_debug_init(5 * 1024 * 1024);
+ if (rc < 0) {
+ printk(KERN_ERR "LustreError: libcfs_debug_init: %d\n", rc);
+ return (rc);
+ }
+
+ rc = cfs_cpu_init();
+ if (rc != 0)
+ goto cleanup_debug;
+
+#if LWT_SUPPORT
+ rc = lwt_init();
+ if (rc != 0) {
+ CERROR("lwt_init: error %d\n", rc);
+ goto cleanup_debug;
+ }
+#endif
+ rc = misc_register(&libcfs_dev);
+ if (rc) {
+ CERROR("misc_register: error %d\n", rc);
+ goto cleanup_lwt;
+ }
+
+ rc = cfs_wi_startup();
+ if (rc) {
+ CERROR("initialize workitem: error %d\n", rc);
+ goto cleanup_deregister;
+ }
+
+ /* max to 4 threads, should be enough for rehash */
+ rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
+ rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
+ rc, &cfs_sched_rehash);
+ if (rc != 0) {
+ CERROR("Startup workitem scheduler: error: %d\n", rc);
+ goto cleanup_deregister;
+ }
+
+ rc = cfs_crypto_register();
+ if (rc) {
+ CERROR("cfs_crypto_regster: error %d\n", rc);
+ goto cleanup_wi;
+ }
+
+
+ rc = insert_proc();
+ if (rc) {
+ CERROR("insert_proc: error %d\n", rc);
+ goto cleanup_crypto;
+ }
+
+ CDEBUG (D_OTHER, "portals setup OK\n");
+ return 0;
+ cleanup_crypto:
+ cfs_crypto_unregister();
+ cleanup_wi:
+ cfs_wi_shutdown();
+ cleanup_deregister:
+ misc_deregister(&libcfs_dev);
+ cleanup_lwt:
+#if LWT_SUPPORT
+ lwt_fini();
+#endif
+ cleanup_debug:
+ libcfs_debug_cleanup();
+ return rc;
+}
+
+static void exit_libcfs_module(void)
+{
+ int rc;
+
+ remove_proc();
+
+ CDEBUG(D_MALLOC, "before Portals cleanup: kmem %d\n",
+ atomic_read(&libcfs_kmemory));
+
+ if (cfs_sched_rehash != NULL) {
+ cfs_wi_sched_destroy(cfs_sched_rehash);
+ cfs_sched_rehash = NULL;
+ }
+
+ cfs_crypto_unregister();
+ cfs_wi_shutdown();
+
+ rc = misc_deregister(&libcfs_dev);
+ if (rc)
+ CERROR("misc_deregister error %d\n", rc);
+
+#if LWT_SUPPORT
+ lwt_fini();
+#endif
+ cfs_cpu_fini();
+
+ if (atomic_read(&libcfs_kmemory) != 0)
+ CERROR("Portals memory leaked: %d bytes\n",
+ atomic_read(&libcfs_kmemory));
+
+ rc = libcfs_debug_cleanup();
+ if (rc)
+ printk(KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n",
+ rc);
+
+ fini_rwsem(&ioctl_list_sem);
+ fini_rwsem(&cfs_tracefile_sem);
+
+ libcfs_arch_cleanup();
+}
+
+MODULE_VERSION("1.0.0");
+module_init(init_libcfs_module);
+module_exit(exit_libcfs_module);
diff --git a/drivers/staging/lustre/lustre/libcfs/nidstrings.c b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
new file mode 100644
index 0000000..99c9e9d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
@@ -0,0 +1,865 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/nidstrings.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/lnet/lnet.h>
+
+/* CAVEAT VENDITOR! Keep the canonical string representation of nets/nids
+ * consistent in all conversion functions. Some code fragments are copied
+ * around for the sake of clarity...
+ */
+
+/* CAVEAT EMPTOR! Racey temporary buffer allocation!
+ * Choose the number of nidstrings to support the MAXIMUM expected number of
+ * concurrent users. If there are more, the returned string will be volatile.
+ * NB this number must allow for a process to be descheduled for a timeslice
+ * between getting its string and using it.
+ */
+
+static char libcfs_nidstrings[LNET_NIDSTR_COUNT][LNET_NIDSTR_SIZE];
+static int libcfs_nidstring_idx = 0;
+
+static spinlock_t libcfs_nidstring_lock;
+
+void libcfs_init_nidstrings (void)
+{
+ spin_lock_init(&libcfs_nidstring_lock);
+}
+
+# define NIDSTR_LOCK(f) spin_lock_irqsave(&libcfs_nidstring_lock, f)
+# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
+
+static char *
+libcfs_next_nidstring (void)
+{
+ char *str;
+ unsigned long flags;
+
+ NIDSTR_LOCK(flags);
+
+ str = libcfs_nidstrings[libcfs_nidstring_idx++];
+ if (libcfs_nidstring_idx ==
+ sizeof(libcfs_nidstrings)/sizeof(libcfs_nidstrings[0]))
+ libcfs_nidstring_idx = 0;
+
+ NIDSTR_UNLOCK(flags);
+ return str;
+}
+
+static int libcfs_lo_str2addr(const char *str, int nob, __u32 *addr);
+static void libcfs_ip_addr2str(__u32 addr, char *str);
+static int libcfs_ip_str2addr(const char *str, int nob, __u32 *addr);
+static void libcfs_decnum_addr2str(__u32 addr, char *str);
+static void libcfs_hexnum_addr2str(__u32 addr, char *str);
+static int libcfs_num_str2addr(const char *str, int nob, __u32 *addr);
+static int libcfs_num_parse(char *str, int len, struct list_head *list);
+static int libcfs_num_match(__u32 addr, struct list_head *list);
+
+struct netstrfns {
+ int nf_type;
+ char *nf_name;
+ char *nf_modname;
+ void (*nf_addr2str)(__u32 addr, char *str);
+ int (*nf_str2addr)(const char *str, int nob, __u32 *addr);
+ int (*nf_parse_addrlist)(char *str, int len,
+ struct list_head *list);
+ int (*nf_match_addr)(__u32 addr, struct list_head *list);
+};
+
+static struct netstrfns libcfs_netstrfns[] = {
+ {/* .nf_type */ LOLND,
+ /* .nf_name */ "lo",
+ /* .nf_modname */ "klolnd",
+ /* .nf_addr2str */ libcfs_decnum_addr2str,
+ /* .nf_str2addr */ libcfs_lo_str2addr,
+ /* .nf_parse_addr*/ libcfs_num_parse,
+ /* .nf_match_addr*/ libcfs_num_match},
+ {/* .nf_type */ SOCKLND,
+ /* .nf_name */ "tcp",
+ /* .nf_modname */ "ksocklnd",
+ /* .nf_addr2str */ libcfs_ip_addr2str,
+ /* .nf_str2addr */ libcfs_ip_str2addr,
+ /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
+ /* .nf_match_addr*/ cfs_ip_addr_match},
+ {/* .nf_type */ O2IBLND,
+ /* .nf_name */ "o2ib",
+ /* .nf_modname */ "ko2iblnd",
+ /* .nf_addr2str */ libcfs_ip_addr2str,
+ /* .nf_str2addr */ libcfs_ip_str2addr,
+ /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
+ /* .nf_match_addr*/ cfs_ip_addr_match},
+ {/* .nf_type */ CIBLND,
+ /* .nf_name */ "cib",
+ /* .nf_modname */ "kciblnd",
+ /* .nf_addr2str */ libcfs_ip_addr2str,
+ /* .nf_str2addr */ libcfs_ip_str2addr,
+ /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
+ /* .nf_match_addr*/ cfs_ip_addr_match},
+ {/* .nf_type */ OPENIBLND,
+ /* .nf_name */ "openib",
+ /* .nf_modname */ "kopeniblnd",
+ /* .nf_addr2str */ libcfs_ip_addr2str,
+ /* .nf_str2addr */ libcfs_ip_str2addr,
+ /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
+ /* .nf_match_addr*/ cfs_ip_addr_match},
+ {/* .nf_type */ IIBLND,
+ /* .nf_name */ "iib",
+ /* .nf_modname */ "kiiblnd",
+ /* .nf_addr2str */ libcfs_ip_addr2str,
+ /* .nf_str2addr */ libcfs_ip_str2addr,
+ /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
+ /* .nf_match_addr*/ cfs_ip_addr_match},
+ {/* .nf_type */ VIBLND,
+ /* .nf_name */ "vib",
+ /* .nf_modname */ "kviblnd",
+ /* .nf_addr2str */ libcfs_ip_addr2str,
+ /* .nf_str2addr */ libcfs_ip_str2addr,
+ /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
+ /* .nf_match_addr*/ cfs_ip_addr_match},
+ {/* .nf_type */ RALND,
+ /* .nf_name */ "ra",
+ /* .nf_modname */ "kralnd",
+ /* .nf_addr2str */ libcfs_ip_addr2str,
+ /* .nf_str2addr */ libcfs_ip_str2addr,
+ /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
+ /* .nf_match_addr*/ cfs_ip_addr_match},
+ {/* .nf_type */ QSWLND,
+ /* .nf_name */ "elan",
+ /* .nf_modname */ "kqswlnd",
+ /* .nf_addr2str */ libcfs_decnum_addr2str,
+ /* .nf_str2addr */ libcfs_num_str2addr,
+ /* .nf_parse_addrlist*/ libcfs_num_parse,
+ /* .nf_match_addr*/ libcfs_num_match},
+ {/* .nf_type */ GMLND,
+ /* .nf_name */ "gm",
+ /* .nf_modname */ "kgmlnd",
+ /* .nf_addr2str */ libcfs_hexnum_addr2str,
+ /* .nf_str2addr */ libcfs_num_str2addr,
+ /* .nf_parse_addrlist*/ libcfs_num_parse,
+ /* .nf_match_addr*/ libcfs_num_match},
+ {/* .nf_type */ MXLND,
+ /* .nf_name */ "mx",
+ /* .nf_modname */ "kmxlnd",
+ /* .nf_addr2str */ libcfs_ip_addr2str,
+ /* .nf_str2addr */ libcfs_ip_str2addr,
+ /* .nf_parse_addrlist*/ cfs_ip_addr_parse,
+ /* .nf_match_addr*/ cfs_ip_addr_match},
+ {/* .nf_type */ PTLLND,
+ /* .nf_name */ "ptl",
+ /* .nf_modname */ "kptllnd",
+ /* .nf_addr2str */ libcfs_decnum_addr2str,
+ /* .nf_str2addr */ libcfs_num_str2addr,
+ /* .nf_parse_addrlist*/ libcfs_num_parse,
+ /* .nf_match_addr*/ libcfs_num_match},
+ {/* .nf_type */ GNILND,
+ /* .nf_name */ "gni",
+ /* .nf_modname */ "kgnilnd",
+ /* .nf_addr2str */ libcfs_decnum_addr2str,
+ /* .nf_str2addr */ libcfs_num_str2addr,
+ /* .nf_parse_addrlist*/ libcfs_num_parse,
+ /* .nf_match_addr*/ libcfs_num_match},
+ /* placeholder for net0 alias. It MUST BE THE LAST ENTRY */
+ {/* .nf_type */ -1},
+};
+
+const int libcfs_nnetstrfns = sizeof(libcfs_netstrfns)/sizeof(libcfs_netstrfns[0]);
+
+int
+libcfs_lo_str2addr(const char *str, int nob, __u32 *addr)
+{
+ *addr = 0;
+ return 1;
+}
+
+void
+libcfs_ip_addr2str(__u32 addr, char *str)
+{
+#if 0 /* never lookup */
+#endif
+ snprintf(str, LNET_NIDSTR_SIZE, "%u.%u.%u.%u",
+ (addr >> 24) & 0xff, (addr >> 16) & 0xff,
+ (addr >> 8) & 0xff, addr & 0xff);
+}
+
+/* CAVEAT EMPTOR XscanfX
+ * I use "%n" at the end of a sscanf format to detect trailing junk. However
+ * sscanf may return immediately if it sees the terminating '0' in a string, so
+ * I initialise the %n variable to the expected length. If sscanf sets it;
+ * fine, if it doesn't, then the scan ended at the end of the string, which is
+ * fine too :) */
+
+int
+libcfs_ip_str2addr(const char *str, int nob, __u32 *addr)
+{
+ int a;
+ int b;
+ int c;
+ int d;
+ int n = nob; /* XscanfX */
+
+ /* numeric IP? */
+ if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 &&
+ n == nob &&
+ (a & ~0xff) == 0 && (b & ~0xff) == 0 &&
+ (c & ~0xff) == 0 && (d & ~0xff) == 0) {
+ *addr = ((a<<24)|(b<<16)|(c<<8)|d);
+ return 1;
+ }
+
+ return 0;
+}
+
+void
+libcfs_decnum_addr2str(__u32 addr, char *str)
+{
+ snprintf(str, LNET_NIDSTR_SIZE, "%u", addr);
+}
+
+void
+libcfs_hexnum_addr2str(__u32 addr, char *str)
+{
+ snprintf(str, LNET_NIDSTR_SIZE, "0x%x", addr);
+}
+
+int
+libcfs_num_str2addr(const char *str, int nob, __u32 *addr)
+{
+ int n;
+
+ n = nob;
+ if (sscanf(str, "0x%x%n", addr, &n) >= 1 && n == nob)
+ return 1;
+
+ n = nob;
+ if (sscanf(str, "0X%x%n", addr, &n) >= 1 && n == nob)
+ return 1;
+
+ n = nob;
+ if (sscanf(str, "%u%n", addr, &n) >= 1 && n == nob)
+ return 1;
+
+ return 0;
+}
+
+struct netstrfns *
+libcfs_lnd2netstrfns(int lnd)
+{
+ int i;
+
+ if (lnd >= 0)
+ for (i = 0; i < libcfs_nnetstrfns; i++)
+ if (lnd == libcfs_netstrfns[i].nf_type)
+ return &libcfs_netstrfns[i];
+
+ return NULL;
+}
+
+struct netstrfns *
+libcfs_namenum2netstrfns(const char *name)
+{
+ struct netstrfns *nf;
+ int i;
+
+ for (i = 0; i < libcfs_nnetstrfns; i++) {
+ nf = &libcfs_netstrfns[i];
+ if (nf->nf_type >= 0 &&
+ !strncmp(name, nf->nf_name, strlen(nf->nf_name)))
+ return nf;
+ }
+ return NULL;
+}
+
+struct netstrfns *
+libcfs_name2netstrfns(const char *name)
+{
+ int i;
+
+ for (i = 0; i < libcfs_nnetstrfns; i++)
+ if (libcfs_netstrfns[i].nf_type >= 0 &&
+ !strcmp(libcfs_netstrfns[i].nf_name, name))
+ return &libcfs_netstrfns[i];
+
+ return NULL;
+}
+
+int
+libcfs_isknown_lnd(int type)
+{
+ return libcfs_lnd2netstrfns(type) != NULL;
+}
+
+char *
+libcfs_lnd2modname(int lnd)
+{
+ struct netstrfns *nf = libcfs_lnd2netstrfns(lnd);
+
+ return (nf == NULL) ? NULL : nf->nf_modname;
+}
+
+char *
+libcfs_lnd2str(int lnd)
+{
+ char *str;
+ struct netstrfns *nf = libcfs_lnd2netstrfns(lnd);
+
+ if (nf != NULL)
+ return nf->nf_name;
+
+ str = libcfs_next_nidstring();
+ snprintf(str, LNET_NIDSTR_SIZE, "?%u?", lnd);
+ return str;
+}
+
+int
+libcfs_str2lnd(const char *str)
+{
+ struct netstrfns *nf = libcfs_name2netstrfns(str);
+
+ if (nf != NULL)
+ return nf->nf_type;
+
+ return -1;
+}
+
+char *
+libcfs_net2str(__u32 net)
+{
+ int lnd = LNET_NETTYP(net);
+ int num = LNET_NETNUM(net);
+ struct netstrfns *nf = libcfs_lnd2netstrfns(lnd);
+ char *str = libcfs_next_nidstring();
+
+ if (nf == NULL)
+ snprintf(str, LNET_NIDSTR_SIZE, "<%u:%u>", lnd, num);
+ else if (num == 0)
+ snprintf(str, LNET_NIDSTR_SIZE, "%s", nf->nf_name);
+ else
+ snprintf(str, LNET_NIDSTR_SIZE, "%s%u", nf->nf_name, num);
+
+ return str;
+}
+
+char *
+libcfs_nid2str(lnet_nid_t nid)
+{
+ __u32 addr = LNET_NIDADDR(nid);
+ __u32 net = LNET_NIDNET(nid);
+ int lnd = LNET_NETTYP(net);
+ int nnum = LNET_NETNUM(net);
+ struct netstrfns *nf;
+ char *str;
+ int nob;
+
+ if (nid == LNET_NID_ANY)
+ return "<?>";
+
+ nf = libcfs_lnd2netstrfns(lnd);
+ str = libcfs_next_nidstring();
+
+ if (nf == NULL)
+ snprintf(str, LNET_NIDSTR_SIZE, "%x@<%u:%u>", addr, lnd, nnum);
+ else {
+ nf->nf_addr2str(addr, str);
+ nob = strlen(str);
+ if (nnum == 0)
+ snprintf(str + nob, LNET_NIDSTR_SIZE - nob, "@%s",
+ nf->nf_name);
+ else
+ snprintf(str + nob, LNET_NIDSTR_SIZE - nob, "@%s%u",
+ nf->nf_name, nnum);
+ }
+
+ return str;
+}
+
+static struct netstrfns *
+libcfs_str2net_internal(const char *str, __u32 *net)
+{
+ struct netstrfns *uninitialized_var(nf);
+ int nob;
+ int netnum;
+ int i;
+
+ for (i = 0; i < libcfs_nnetstrfns; i++) {
+ nf = &libcfs_netstrfns[i];
+ if (nf->nf_type >= 0 &&
+ !strncmp(str, nf->nf_name, strlen(nf->nf_name)))
+ break;
+ }
+
+ if (i == libcfs_nnetstrfns)
+ return NULL;
+
+ nob = strlen(nf->nf_name);
+
+ if (strlen(str) == (unsigned int)nob) {
+ netnum = 0;
+ } else {
+ if (nf->nf_type == LOLND) /* net number not allowed */
+ return NULL;
+
+ str += nob;
+ i = strlen(str);
+ if (sscanf(str, "%u%n", &netnum, &i) < 1 ||
+ i != (int)strlen(str))
+ return NULL;
+ }
+
+ *net = LNET_MKNET(nf->nf_type, netnum);
+ return nf;
+}
+
+__u32
+libcfs_str2net(const char *str)
+{
+ __u32 net;
+
+ if (libcfs_str2net_internal(str, &net) != NULL)
+ return net;
+
+ return LNET_NIDNET(LNET_NID_ANY);
+}
+
+lnet_nid_t
+libcfs_str2nid(const char *str)
+{
+ const char *sep = strchr(str, '@');
+ struct netstrfns *nf;
+ __u32 net;
+ __u32 addr;
+
+ if (sep != NULL) {
+ nf = libcfs_str2net_internal(sep + 1, &net);
+ if (nf == NULL)
+ return LNET_NID_ANY;
+ } else {
+ sep = str + strlen(str);
+ net = LNET_MKNET(SOCKLND, 0);
+ nf = libcfs_lnd2netstrfns(SOCKLND);
+ LASSERT (nf != NULL);
+ }
+
+ if (!nf->nf_str2addr(str, (int)(sep - str), &addr))
+ return LNET_NID_ANY;
+
+ return LNET_MKNID(net, addr);
+}
+
+char *
+libcfs_id2str(lnet_process_id_t id)
+{
+ char *str = libcfs_next_nidstring();
+
+ if (id.pid == LNET_PID_ANY) {
+ snprintf(str, LNET_NIDSTR_SIZE,
+ "LNET_PID_ANY-%s", libcfs_nid2str(id.nid));
+ return str;
+ }
+
+ snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s",
+ ((id.pid & LNET_PID_USERFLAG) != 0) ? "U" : "",
+ (id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid));
+ return str;
+}
+
+int
+libcfs_str2anynid(lnet_nid_t *nidp, const char *str)
+{
+ if (!strcmp(str, "*")) {
+ *nidp = LNET_NID_ANY;
+ return 1;
+ }
+
+ *nidp = libcfs_str2nid(str);
+ return *nidp != LNET_NID_ANY;
+}
+
+/**
+ * Nid range list syntax.
+ * \verbatim
+ *
+ * <nidlist> :== <nidrange> [ ' ' <nidrange> ]
+ * <nidrange> :== <addrrange> '@' <net>
+ * <addrrange> :== '*' |
+ * <ipaddr_range> |
+ * <cfs_expr_list>
+ * <ipaddr_range> :== <cfs_expr_list>.<cfs_expr_list>.<cfs_expr_list>.
+ * <cfs_expr_list>
+ * <cfs_expr_list> :== <number> |
+ * <expr_list>
+ * <expr_list> :== '[' <range_expr> [ ',' <range_expr>] ']'
+ * <range_expr> :== <number> |
+ * <number> '-' <number> |
+ * <number> '-' <number> '/' <number>
+ * <net> :== <netname> | <netname><number>
+ * <netname> :== "lo" | "tcp" | "o2ib" | "cib" | "openib" | "iib" |
+ * "vib" | "ra" | "elan" | "mx" | "ptl"
+ * \endverbatim
+ */
+
+/**
+ * Structure to represent \<nidrange\> token of the syntax.
+ *
+ * One of this is created for each \<net\> parsed.
+ */
+struct nidrange {
+ /**
+ * Link to list of this structures which is built on nid range
+ * list parsing.
+ */
+ struct list_head nr_link;
+ /**
+ * List head for addrrange::ar_link.
+ */
+ struct list_head nr_addrranges;
+ /**
+ * Flag indicating that *@<net> is found.
+ */
+ int nr_all;
+ /**
+ * Pointer to corresponding element of libcfs_netstrfns.
+ */
+ struct netstrfns *nr_netstrfns;
+ /**
+ * Number of network. E.g. 5 if \<net\> is "elan5".
+ */
+ int nr_netnum;
+};
+
+/**
+ * Structure to represent \<addrrange\> token of the syntax.
+ */
+struct addrrange {
+ /**
+ * Link to nidrange::nr_addrranges.
+ */
+ struct list_head ar_link;
+ /**
+ * List head for cfs_expr_list::el_list.
+ */
+ struct list_head ar_numaddr_ranges;
+};
+
+/**
+ * Nf_parse_addrlist method for networks using numeric addresses.
+ *
+ * Examples of such networks are gm and elan.
+ *
+ * \retval 0 if \a str parsed to numeric address
+ * \retval errno otherwise
+ */
+static int
+libcfs_num_parse(char *str, int len, struct list_head *list)
+{
+ struct cfs_expr_list *el;
+ int rc;
+
+ rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
+ if (rc == 0)
+ list_add_tail(&el->el_link, list);
+
+ return rc;
+}
+
+/**
+ * Parses \<addrrange\> token on the syntax.
+ *
+ * Allocates struct addrrange and links to \a nidrange via
+ * (nidrange::nr_addrranges)
+ *
+ * \retval 1 if \a src parses to '*' | \<ipaddr_range\> | \<cfs_expr_list\>
+ * \retval 0 otherwise
+ */
+static int
+parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange)
+{
+ struct addrrange *addrrange;
+
+ if (src->ls_len == 1 && src->ls_str[0] == '*') {
+ nidrange->nr_all = 1;
+ return 1;
+ }
+
+ LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
+ if (addrrange == NULL)
+ return 0;
+ list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
+ INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);
+
+ return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str,
+ src->ls_len,
+ &addrrange->ar_numaddr_ranges);
+}
+
+/**
+ * Finds or creates struct nidrange.
+ *
+ * Checks if \a src is a valid network name, looks for corresponding
+ * nidrange on the ist of nidranges (\a nidlist), creates new struct
+ * nidrange if it is not found.
+ *
+ * \retval pointer to struct nidrange matching network specified via \a src
+ * \retval NULL if \a src does not match any network
+ */
+static struct nidrange *
+add_nidrange(const struct cfs_lstr *src,
+ struct list_head *nidlist)
+{
+ struct netstrfns *nf;
+ struct nidrange *nr;
+ int endlen;
+ unsigned netnum;
+
+ if (src->ls_len >= LNET_NIDSTR_SIZE)
+ return NULL;
+
+ nf = libcfs_namenum2netstrfns(src->ls_str);
+ if (nf == NULL)
+ return NULL;
+ endlen = src->ls_len - strlen(nf->nf_name);
+ if (endlen == 0)
+ /* network name only, e.g. "elan" or "tcp" */
+ netnum = 0;
+ else {
+ /* e.g. "elan25" or "tcp23", refuse to parse if
+ * network name is not appended with decimal or
+ * hexadecimal number */
+ if (!cfs_str2num_check(src->ls_str + strlen(nf->nf_name),
+ endlen, &netnum, 0, MAX_NUMERIC_VALUE))
+ return NULL;
+ }
+
+ list_for_each_entry(nr, nidlist, nr_link) {
+ if (nr->nr_netstrfns != nf)
+ continue;
+ if (nr->nr_netnum != netnum)
+ continue;
+ return nr;
+ }
+
+ LIBCFS_ALLOC(nr, sizeof(struct nidrange));
+ if (nr == NULL)
+ return NULL;
+ list_add_tail(&nr->nr_link, nidlist);
+ INIT_LIST_HEAD(&nr->nr_addrranges);
+ nr->nr_netstrfns = nf;
+ nr->nr_all = 0;
+ nr->nr_netnum = netnum;
+
+ return nr;
+}
+
+/**
+ * Parses \<nidrange\> token of the syntax.
+ *
+ * \retval 1 if \a src parses to \<addrrange\> '@' \<net\>
+ * \retval 0 otherwise
+ */
+static int
+parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
+{
+ struct cfs_lstr addrrange;
+ struct cfs_lstr net;
+ struct cfs_lstr tmp;
+ struct nidrange *nr;
+
+ tmp = *src;
+ if (cfs_gettok(src, '@', &addrrange) == 0)
+ goto failed;
+
+ if (cfs_gettok(src, '@', &net) == 0 || src->ls_str != NULL)
+ goto failed;
+
+ nr = add_nidrange(&net, nidlist);
+ if (nr == NULL)
+ goto failed;
+
+ if (parse_addrange(&addrrange, nr) != 0)
+ goto failed;
+
+ return 1;
+ failed:
+ CWARN("can't parse nidrange: \"%.*s\"\n", tmp.ls_len, tmp.ls_str);
+ return 0;
+}
+
+/**
+ * Frees addrrange structures of \a list.
+ *
+ * For each struct addrrange structure found on \a list it frees
+ * cfs_expr_list list attached to it and frees the addrrange itself.
+ *
+ * \retval none
+ */
+static void
+free_addrranges(struct list_head *list)
+{
+ while (!list_empty(list)) {
+ struct addrrange *ar;
+
+ ar = list_entry(list->next, struct addrrange, ar_link);
+
+ cfs_expr_list_free_list(&ar->ar_numaddr_ranges);
+ list_del(&ar->ar_link);
+ LIBCFS_FREE(ar, sizeof(struct addrrange));
+ }
+}
+
+/**
+ * Frees nidrange strutures of \a list.
+ *
+ * For each struct nidrange structure found on \a list it frees
+ * addrrange list attached to it and frees the nidrange itself.
+ *
+ * \retval none
+ */
+void
+cfs_free_nidlist(struct list_head *list)
+{
+ struct list_head *pos, *next;
+ struct nidrange *nr;
+
+ list_for_each_safe(pos, next, list) {
+ nr = list_entry(pos, struct nidrange, nr_link);
+ free_addrranges(&nr->nr_addrranges);
+ list_del(pos);
+ LIBCFS_FREE(nr, sizeof(struct nidrange));
+ }
+}
+
+/**
+ * Parses nid range list.
+ *
+ * Parses with rigorous syntax and overflow checking \a str into
+ * \<nidrange\> [ ' ' \<nidrange\> ], compiles \a str into set of
+ * structures and links that structure to \a nidlist. The resulting
+ * list can be used to match a NID againts set of NIDS defined by \a
+ * str.
+ * \see cfs_match_nid
+ *
+ * \retval 1 on success
+ * \retval 0 otherwise
+ */
+int
+cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
+{
+ struct cfs_lstr src;
+ struct cfs_lstr res;
+ int rc;
+
+ src.ls_str = str;
+ src.ls_len = len;
+ INIT_LIST_HEAD(nidlist);
+ while (src.ls_str) {
+ rc = cfs_gettok(&src, ' ', &res);
+ if (rc == 0) {
+ cfs_free_nidlist(nidlist);
+ return 0;
+ }
+ rc = parse_nidrange(&res, nidlist);
+ if (rc == 0) {
+ cfs_free_nidlist(nidlist);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * Nf_match_addr method for networks using numeric addresses
+ *
+ * \retval 1 on match
+ * \retval 0 otherwise
+ */
+static int
+libcfs_num_match(__u32 addr, struct list_head *numaddr)
+{
+ struct cfs_expr_list *el;
+
+ LASSERT(!list_empty(numaddr));
+ el = list_entry(numaddr->next, struct cfs_expr_list, el_link);
+
+ return cfs_expr_list_match(addr, el);
+}
+
+/**
+ * Matches a nid (\a nid) against the compiled list of nidranges (\a nidlist).
+ *
+ * \see cfs_parse_nidlist()
+ *
+ * \retval 1 on match
+ * \retval 0 otherwises
+ */
+int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
+{
+ struct nidrange *nr;
+ struct addrrange *ar;
+
+ list_for_each_entry(nr, nidlist, nr_link) {
+ if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid)))
+ continue;
+ if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid)))
+ continue;
+ if (nr->nr_all)
+ return 1;
+ list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
+ if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
+ &ar->ar_numaddr_ranges))
+ return 1;
+ }
+ return 0;
+}
+
+
+EXPORT_SYMBOL(libcfs_isknown_lnd);
+EXPORT_SYMBOL(libcfs_lnd2modname);
+EXPORT_SYMBOL(libcfs_lnd2str);
+EXPORT_SYMBOL(libcfs_str2lnd);
+EXPORT_SYMBOL(libcfs_net2str);
+EXPORT_SYMBOL(libcfs_nid2str);
+EXPORT_SYMBOL(libcfs_str2net);
+EXPORT_SYMBOL(libcfs_str2nid);
+EXPORT_SYMBOL(libcfs_id2str);
+EXPORT_SYMBOL(libcfs_str2anynid);
+EXPORT_SYMBOL(cfs_free_nidlist);
+EXPORT_SYMBOL(cfs_parse_nidlist);
+EXPORT_SYMBOL(cfs_match_nid);
diff --git a/drivers/staging/lustre/lustre/libcfs/prng.c b/drivers/staging/lustre/lustre/libcfs/prng.c
new file mode 100644
index 0000000..69224d8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/prng.c
@@ -0,0 +1,139 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/prng.c
+ *
+ * concatenation of following two 16-bit multiply with carry generators
+ * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16,
+ * number and carry packed within the same 32 bit integer.
+ * algorithm recommended by Marsaglia
+*/
+
+#include <linux/libcfs/libcfs.h>
+
+/*
+From: George Marsaglia <geo@stat.fsu.edu>
+Newsgroups: sci.math
+Subject: Re: A RANDOM NUMBER GENERATOR FOR C
+Date: Tue, 30 Sep 1997 05:29:35 -0700
+
+ * You may replace the two constants 36969 and 18000 by any
+ * pair of distinct constants from this list:
+ * 18000 18030 18273 18513 18879 19074 19098 19164 19215 19584
+ * 19599 19950 20088 20508 20544 20664 20814 20970 21153 21243
+ * 21423 21723 21954 22125 22188 22293 22860 22938 22965 22974
+ * 23109 23124 23163 23208 23508 23520 23553 23658 23865 24114
+ * 24219 24660 24699 24864 24948 25023 25308 25443 26004 26088
+ * 26154 26550 26679 26838 27183 27258 27753 27795 27810 27834
+ * 27960 28320 28380 28689 28710 28794 28854 28959 28980 29013
+ * 29379 29889 30135 30345 30459 30714 30903 30963 31059 31083
+ * (or any other 16-bit constants k for which both k*2^16-1
+ * and k*2^15-1 are prime) */
+
+#define RANDOM_CONST_A 18030
+#define RANDOM_CONST_B 29013
+
+static unsigned int seed_x = 521288629;
+static unsigned int seed_y = 362436069;
+
+/**
+ * cfs_rand - creates new seeds
+ *
+ * First it creates new seeds from the previous seeds. Then it generates a
+ * new psuedo random number for use.
+ *
+ * Returns a pseudo-random 32-bit integer
+ */
+unsigned int cfs_rand(void)
+{
+ seed_x = RANDOM_CONST_A * (seed_x & 65535) + (seed_x >> 16);
+ seed_y = RANDOM_CONST_B * (seed_y & 65535) + (seed_y >> 16);
+
+ return ((seed_x << 16) + (seed_y & 65535));
+}
+EXPORT_SYMBOL(cfs_rand);
+
+/**
+ * cfs_srand - sets the inital seed
+ * @seed1 : (seed_x) should have the most entropy in the low bits of the word
+ * @seed2 : (seed_y) should have the most entropy in the high bits of the word
+ *
+ * Replaces the original seeds with new values. Used to generate a new pseudo
+ * random numbers.
+ */
+void cfs_srand(unsigned int seed1, unsigned int seed2)
+{
+ if (seed1)
+ seed_x = seed1; /* use default seeds if parameter is 0 */
+ if (seed2)
+ seed_y = seed2;
+}
+EXPORT_SYMBOL(cfs_srand);
+
+/**
+ * cfs_get_random_bytes - generate a bunch of random numbers
+ * @buf : buffer to fill with random numbers
+ * @size: size of passed in buffer
+ *
+ * Fills a buffer with random bytes
+ */
+void cfs_get_random_bytes(void *buf, int size)
+{
+ int *p = buf;
+ int rem, tmp;
+
+ LASSERT(size >= 0);
+
+ rem = min((int)((unsigned long)buf & (sizeof(int) - 1)), size);
+ if (rem) {
+ get_random_bytes(&tmp, sizeof(tmp));
+ tmp ^= cfs_rand();
+ memcpy(buf, &tmp, rem);
+ p = buf + rem;
+ size -= rem;
+ }
+
+ while (size >= sizeof(int)) {
+ get_random_bytes(&tmp, sizeof(tmp));
+ *p = cfs_rand() ^ tmp;
+ size -= sizeof(int);
+ p++;
+ }
+ buf = p;
+ if (size) {
+ get_random_bytes(&tmp, sizeof(tmp));
+ tmp ^= cfs_rand();
+ memcpy(buf, &tmp, size);
+ }
+}
+EXPORT_SYMBOL(cfs_get_random_bytes);
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
new file mode 100644
index 0000000..357f400
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -0,0 +1,1192 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/tracefile.c
+ *
+ * Author: Zach Brown <zab@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+
+#define DEBUG_SUBSYSTEM S_LNET
+#define LUSTRE_TRACEFILE_PRIVATE
+#include "tracefile.h"
+
+#include <linux/libcfs/libcfs.h>
+
+/* XXX move things up to the top, comment */
+union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
+
+char cfs_tracefile[TRACEFILE_NAME_SIZE];
+long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
+static struct tracefiled_ctl trace_tctl;
+struct mutex cfs_trace_thread_mutex;
+static int thread_running = 0;
+
+atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
+
+static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
+ struct cfs_trace_cpu_data *tcd);
+
+static inline struct cfs_trace_page *
+cfs_tage_from_list(struct list_head *list)
+{
+ return list_entry(list, struct cfs_trace_page, linkage);
+}
+
+static struct cfs_trace_page *cfs_tage_alloc(int gfp)
+{
+ struct page *page;
+ struct cfs_trace_page *tage;
+
+ /* My caller is trying to free memory */
+ if (!in_interrupt() && memory_pressure_get())
+ return NULL;
+
+ /*
+ * Don't spam console with allocation failures: they will be reported
+ * by upper layer anyway.
+ */
+ gfp |= __GFP_NOWARN;
+ page = alloc_page(gfp);
+ if (page == NULL)
+ return NULL;
+
+ tage = kmalloc(sizeof(*tage), gfp);
+ if (tage == NULL) {
+ __free_page(page);
+ return NULL;
+ }
+
+ tage->page = page;
+ atomic_inc(&cfs_tage_allocated);
+ return tage;
+}
+
+static void cfs_tage_free(struct cfs_trace_page *tage)
+{
+ __LASSERT(tage != NULL);
+ __LASSERT(tage->page != NULL);
+
+ __free_page(tage->page);
+ kfree(tage);
+ atomic_dec(&cfs_tage_allocated);
+}
+
+static void cfs_tage_to_tail(struct cfs_trace_page *tage,
+ struct list_head *queue)
+{
+ __LASSERT(tage != NULL);
+ __LASSERT(queue != NULL);
+
+ list_move_tail(&tage->linkage, queue);
+}
+
+int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
+ struct list_head *stock)
+{
+ int i;
+
+ /*
+ * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
+
+ for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
+ struct cfs_trace_page *tage;
+
+ tage = cfs_tage_alloc(gfp);
+ if (tage == NULL)
+ break;
+ list_add_tail(&tage->linkage, stock);
+ }
+ return i;
+}
+
+/* return a page that has 'len' bytes left at the end */
+static struct cfs_trace_page *
+cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
+{
+ struct cfs_trace_page *tage;
+
+ if (tcd->tcd_cur_pages > 0) {
+ __LASSERT(!list_empty(&tcd->tcd_pages));
+ tage = cfs_tage_from_list(tcd->tcd_pages.prev);
+ if (tage->used + len <= PAGE_CACHE_SIZE)
+ return tage;
+ }
+
+ if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
+ if (tcd->tcd_cur_stock_pages > 0) {
+ tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
+ --tcd->tcd_cur_stock_pages;
+ list_del_init(&tage->linkage);
+ } else {
+ tage = cfs_tage_alloc(GFP_ATOMIC);
+ if (unlikely(tage == NULL)) {
+ if ((!memory_pressure_get() ||
+ in_interrupt()) && printk_ratelimit())
+ printk(KERN_WARNING
+ "cannot allocate a tage (%ld)\n",
+ tcd->tcd_cur_pages);
+ return NULL;
+ }
+ }
+
+ tage->used = 0;
+ tage->cpu = smp_processor_id();
+ tage->type = tcd->tcd_type;
+ list_add_tail(&tage->linkage, &tcd->tcd_pages);
+ tcd->tcd_cur_pages++;
+
+ if (tcd->tcd_cur_pages > 8 && thread_running) {
+ struct tracefiled_ctl *tctl = &trace_tctl;
+ /*
+ * wake up tracefiled to process some pages.
+ */
+ wake_up(&tctl->tctl_waitq);
+ }
+ return tage;
+ }
+ return NULL;
+}
+
+static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
+{
+ int pgcount = tcd->tcd_cur_pages / 10;
+ struct page_collection pc;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+
+ /*
+ * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
+
+ if (printk_ratelimit())
+ printk(KERN_WARNING "debug daemon buffer overflowed; "
+ "discarding 10%% of pages (%d of %ld)\n",
+ pgcount + 1, tcd->tcd_cur_pages);
+
+ INIT_LIST_HEAD(&pc.pc_pages);
+ spin_lock_init(&pc.pc_lock);
+
+ list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
+ if (pgcount-- == 0)
+ break;
+
+ list_move_tail(&tage->linkage, &pc.pc_pages);
+ tcd->tcd_cur_pages--;
+ }
+ put_pages_on_tcd_daemon_list(&pc, tcd);
+}
+
+/* return a page that has 'len' bytes left at the end */
+static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
+ unsigned long len)
+{
+ struct cfs_trace_page *tage;
+
+ /*
+ * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
+ * from here: this will lead to infinite recursion.
+ */
+
+ if (len > PAGE_CACHE_SIZE) {
+ printk(KERN_ERR
+ "cowardly refusing to write %lu bytes in a page\n", len);
+ return NULL;
+ }
+
+ tage = cfs_trace_get_tage_try(tcd, len);
+ if (tage != NULL)
+ return tage;
+ if (thread_running)
+ cfs_tcd_shrink(tcd);
+ if (tcd->tcd_cur_pages > 0) {
+ tage = cfs_tage_from_list(tcd->tcd_pages.next);
+ tage->used = 0;
+ cfs_tage_to_tail(tage, &tcd->tcd_pages);
+ }
+ return tage;
+}
+
+int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
+ const char *format, ...)
+{
+ va_list args;
+ int rc;
+
+ va_start(args, format);
+ rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
+ va_end(args);
+
+ return rc;
+}
+EXPORT_SYMBOL(libcfs_debug_msg);
+
+int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
+ const char *format1, va_list args,
+ const char *format2, ...)
+{
+ struct cfs_trace_cpu_data *tcd = NULL;
+ struct ptldebug_header header = {0};
+ struct cfs_trace_page *tage;
+ /* string_buf is used only if tcd != NULL, and is always set then */
+ char *string_buf = NULL;
+ char *debug_buf;
+ int known_size;
+ int needed = 85; /* average message length */
+ int max_nob;
+ va_list ap;
+ int depth;
+ int i;
+ int remain;
+ int mask = msgdata->msg_mask;
+ const char *file = kbasename(msgdata->msg_file);
+ cfs_debug_limit_state_t *cdls = msgdata->msg_cdls;
+
+ tcd = cfs_trace_get_tcd();
+
+ /* cfs_trace_get_tcd() grabs a lock, which disables preemption and
+ * pins us to a particular CPU. This avoids an smp_processor_id()
+ * warning on Linux when debugging is enabled. */
+ cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
+
+ if (tcd == NULL) /* arch may not log in IRQ context */
+ goto console;
+
+ if (tcd->tcd_cur_pages == 0)
+ header.ph_flags |= PH_FLAG_FIRST_RECORD;
+
+ if (tcd->tcd_shutting_down) {
+ cfs_trace_put_tcd(tcd);
+ tcd = NULL;
+ goto console;
+ }
+
+ depth = __current_nesting_level();
+ known_size = strlen(file) + 1 + depth;
+ if (msgdata->msg_fn)
+ known_size += strlen(msgdata->msg_fn) + 1;
+
+ if (libcfs_debug_binary)
+ known_size += sizeof(header);
+
+ /*/
+ * '2' used because vsnprintf return real size required for output
+ * _without_ terminating NULL.
+ * if needed is to small for this format.
+ */
+ for (i = 0; i < 2; i++) {
+ tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
+ if (tage == NULL) {
+ if (needed + known_size > PAGE_CACHE_SIZE)
+ mask |= D_ERROR;
+
+ cfs_trace_put_tcd(tcd);
+ tcd = NULL;
+ goto console;
+ }
+
+ string_buf = (char *)page_address(tage->page) +
+ tage->used + known_size;
+
+ max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
+ if (max_nob <= 0) {
+ printk(KERN_EMERG "negative max_nob: %d\n",
+ max_nob);
+ mask |= D_ERROR;
+ cfs_trace_put_tcd(tcd);
+ tcd = NULL;
+ goto console;
+ }
+
+ needed = 0;
+ if (format1) {
+ va_copy(ap, args);
+ needed = vsnprintf(string_buf, max_nob, format1, ap);
+ va_end(ap);
+ }
+
+ if (format2) {
+ remain = max_nob - needed;
+ if (remain < 0)
+ remain = 0;
+
+ va_start(ap, format2);
+ needed += vsnprintf(string_buf + needed, remain,
+ format2, ap);
+ va_end(ap);
+ }
+
+ if (needed < max_nob) /* well. printing ok.. */
+ break;
+ }
+
+ if (*(string_buf+needed-1) != '\n')
+ printk(KERN_INFO "format at %s:%d:%s doesn't end in "
+ "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
+
+ header.ph_len = known_size + needed;
+ debug_buf = (char *)page_address(tage->page) + tage->used;
+
+ if (libcfs_debug_binary) {
+ memcpy(debug_buf, &header, sizeof(header));
+ tage->used += sizeof(header);
+ debug_buf += sizeof(header);
+ }
+
+ /* indent message according to the nesting level */
+ while (depth-- > 0) {
+ *(debug_buf++) = '.';
+ ++ tage->used;
+ }
+
+ strcpy(debug_buf, file);
+ tage->used += strlen(file) + 1;
+ debug_buf += strlen(file) + 1;
+
+ if (msgdata->msg_fn) {
+ strcpy(debug_buf, msgdata->msg_fn);
+ tage->used += strlen(msgdata->msg_fn) + 1;
+ debug_buf += strlen(msgdata->msg_fn) + 1;
+ }
+
+ __LASSERT(debug_buf == string_buf);
+
+ tage->used += needed;
+ __LASSERT (tage->used <= PAGE_CACHE_SIZE);
+
+console:
+ if ((mask & libcfs_printk) == 0) {
+ /* no console output requested */
+ if (tcd != NULL)
+ cfs_trace_put_tcd(tcd);
+ return 1;
+ }
+
+ if (cdls != NULL) {
+ if (libcfs_console_ratelimit &&
+ cdls->cdls_next != 0 && /* not first time ever */
+ !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
+ /* skipping a console message */
+ cdls->cdls_count++;
+ if (tcd != NULL)
+ cfs_trace_put_tcd(tcd);
+ return 1;
+ }
+
+ if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
+ libcfs_console_max_delay
+ + cfs_time_seconds(10))) {
+ /* last timeout was a long time ago */
+ cdls->cdls_delay /= libcfs_console_backoff * 4;
+ } else {
+ cdls->cdls_delay *= libcfs_console_backoff;
+
+ if (cdls->cdls_delay < libcfs_console_min_delay)
+ cdls->cdls_delay = libcfs_console_min_delay;
+ else if (cdls->cdls_delay > libcfs_console_max_delay)
+ cdls->cdls_delay = libcfs_console_max_delay;
+ }
+
+ /* ensure cdls_next is never zero after it's been seen */
+ cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
+ }
+
+ if (tcd != NULL) {
+ cfs_print_to_console(&header, mask, string_buf, needed, file,
+ msgdata->msg_fn);
+ cfs_trace_put_tcd(tcd);
+ } else {
+ string_buf = cfs_trace_get_console_buffer();
+
+ needed = 0;
+ if (format1 != NULL) {
+ va_copy(ap, args);
+ needed = vsnprintf(string_buf,
+ CFS_TRACE_CONSOLE_BUFFER_SIZE,
+ format1, ap);
+ va_end(ap);
+ }
+ if (format2 != NULL) {
+ remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
+ if (remain > 0) {
+ va_start(ap, format2);
+ needed += vsnprintf(string_buf+needed, remain,
+ format2, ap);
+ va_end(ap);
+ }
+ }
+ cfs_print_to_console(&header, mask,
+ string_buf, needed, file, msgdata->msg_fn);
+
+ cfs_trace_put_console_buffer(string_buf);
+ }
+
+ if (cdls != NULL && cdls->cdls_count != 0) {
+ string_buf = cfs_trace_get_console_buffer();
+
+ needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
+ "Skipped %d previous similar message%s\n",
+ cdls->cdls_count,
+ (cdls->cdls_count > 1) ? "s" : "");
+
+ cfs_print_to_console(&header, mask,
+ string_buf, needed, file, msgdata->msg_fn);
+
+ cfs_trace_put_console_buffer(string_buf);
+ cdls->cdls_count = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(libcfs_debug_vmsg2);
+
+void
+cfs_trace_assertion_failed(const char *str,
+ struct libcfs_debug_msg_data *msgdata)
+{
+ struct ptldebug_header hdr;
+
+ libcfs_panic_in_progress = 1;
+ libcfs_catastrophe = 1;
+ mb();
+
+ cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
+
+ cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
+ msgdata->msg_file, msgdata->msg_fn);
+
+ panic("Lustre debug assertion failure\n");
+
+ /* not reached */
+}
+
+static void
+panic_collect_pages(struct page_collection *pc)
+{
+ /* Do the collect_pages job on a single CPU: assumes that all other
+ * CPUs have been stopped during a panic. If this isn't true for some
+ * arch, this will have to be implemented separately in each arch. */
+ int i;
+ int j;
+ struct cfs_trace_cpu_data *tcd;
+
+ INIT_LIST_HEAD(&pc->pc_pages);
+
+ cfs_tcd_for_each(tcd, i, j) {
+ list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ tcd->tcd_cur_pages = 0;
+
+ if (pc->pc_want_daemon_pages) {
+ list_splice_init(&tcd->tcd_daemon_pages,
+ &pc->pc_pages);
+ tcd->tcd_cur_daemon_pages = 0;
+ }
+ }
+}
+
+static void collect_pages_on_all_cpus(struct page_collection *pc)
+{
+ struct cfs_trace_cpu_data *tcd;
+ int i, cpu;
+
+ spin_lock(&pc->pc_lock);
+ for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu) {
+ list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
+ tcd->tcd_cur_pages = 0;
+ if (pc->pc_want_daemon_pages) {
+ list_splice_init(&tcd->tcd_daemon_pages,
+ &pc->pc_pages);
+ tcd->tcd_cur_daemon_pages = 0;
+ }
+ }
+ }
+ spin_unlock(&pc->pc_lock);
+}
+
+static void collect_pages(struct page_collection *pc)
+{
+ INIT_LIST_HEAD(&pc->pc_pages);
+
+ if (libcfs_panic_in_progress)
+ panic_collect_pages(pc);
+ else
+ collect_pages_on_all_cpus(pc);
+}
+
+static void put_pages_back_on_all_cpus(struct page_collection *pc)
+{
+ struct cfs_trace_cpu_data *tcd;
+ struct list_head *cur_head;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+ int i, cpu;
+
+ spin_lock(&pc->pc_lock);
+ for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu) {
+ cur_head = tcd->tcd_pages.next;
+
+ list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
+ linkage) {
+
+ __LASSERT_TAGE_INVARIANT(tage);
+
+ if (tage->cpu != cpu || tage->type != i)
+ continue;
+
+ cfs_tage_to_tail(tage, cur_head);
+ tcd->tcd_cur_pages++;
+ }
+ }
+ }
+ spin_unlock(&pc->pc_lock);
+}
+
+static void put_pages_back(struct page_collection *pc)
+{
+ if (!libcfs_panic_in_progress)
+ put_pages_back_on_all_cpus(pc);
+}
+
+/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
+ * we have a good amount of data at all times for dumping during an LBUG, even
+ * if we have been steadily writing (and otherwise discarding) pages via the
+ * debug daemon. */
+static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
+ struct cfs_trace_cpu_data *tcd)
+{
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+
+ spin_lock(&pc->pc_lock);
+ list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
+
+ __LASSERT_TAGE_INVARIANT(tage);
+
+ if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
+ continue;
+
+ cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
+ tcd->tcd_cur_daemon_pages++;
+
+ if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
+ struct cfs_trace_page *victim;
+
+ __LASSERT(!list_empty(&tcd->tcd_daemon_pages));
+ victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
+
+ __LASSERT_TAGE_INVARIANT(victim);
+
+ list_del(&victim->linkage);
+ cfs_tage_free(victim);
+ tcd->tcd_cur_daemon_pages--;
+ }
+ }
+ spin_unlock(&pc->pc_lock);
+}
+
+static void put_pages_on_daemon_list(struct page_collection *pc)
+{
+ struct cfs_trace_cpu_data *tcd;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu)
+ put_pages_on_tcd_daemon_list(pc, tcd);
+ }
+}
+
+void cfs_trace_debug_print(void)
+{
+ struct page_collection pc;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+
+ spin_lock_init(&pc.pc_lock);
+
+ pc.pc_want_daemon_pages = 1;
+ collect_pages(&pc);
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
+ char *p, *file, *fn;
+ struct page *page;
+
+ __LASSERT_TAGE_INVARIANT(tage);
+
+ page = tage->page;
+ p = page_address(page);
+ while (p < ((char *)page_address(page) + tage->used)) {
+ struct ptldebug_header *hdr;
+ int len;
+ hdr = (void *)p;
+ p += sizeof(*hdr);
+ file = p;
+ p += strlen(file) + 1;
+ fn = p;
+ p += strlen(fn) + 1;
+ len = hdr->ph_len - (int)(p - (char *)hdr);
+
+ cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
+
+ p += len;
+ }
+
+ list_del(&tage->linkage);
+ cfs_tage_free(tage);
+ }
+}
+
+int cfs_tracefile_dump_all_pages(char *filename)
+{
+ struct page_collection pc;
+ struct file *filp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+ int rc;
+
+ DECL_MMSPACE;
+
+ cfs_tracefile_write_lock();
+
+ filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
+ if (IS_ERR(filp)) {
+ rc = PTR_ERR(filp);
+ filp = NULL;
+ printk(KERN_ERR "LustreError: can't open %s for dump: rc %d\n",
+ filename, rc);
+ goto out;
+ }
+
+ spin_lock_init(&pc.pc_lock);
+ pc.pc_want_daemon_pages = 1;
+ collect_pages(&pc);
+ if (list_empty(&pc.pc_pages)) {
+ rc = 0;
+ goto close;
+ }
+
+ /* ok, for now, just write the pages. in the future we'll be building
+ * iobufs with the pages and calling generic_direct_IO */
+ MMSPACE_OPEN;
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
+
+ __LASSERT_TAGE_INVARIANT(tage);
+
+ rc = filp_write(filp, page_address(tage->page),
+ tage->used, filp_poff(filp));
+ if (rc != (int)tage->used) {
+ printk(KERN_WARNING "wanted to write %u but wrote "
+ "%d\n", tage->used, rc);
+ put_pages_back(&pc);
+ __LASSERT(list_empty(&pc.pc_pages));
+ break;
+ }
+ list_del(&tage->linkage);
+ cfs_tage_free(tage);
+ }
+ MMSPACE_CLOSE;
+ rc = filp_fsync(filp);
+ if (rc)
+ printk(KERN_ERR "sync returns %d\n", rc);
+close:
+ filp_close(filp, NULL);
+out:
+ cfs_tracefile_write_unlock();
+ return rc;
+}
+
+void cfs_trace_flush_pages(void)
+{
+ struct page_collection pc;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+
+ spin_lock_init(&pc.pc_lock);
+
+ pc.pc_want_daemon_pages = 1;
+ collect_pages(&pc);
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
+
+ __LASSERT_TAGE_INVARIANT(tage);
+
+ list_del(&tage->linkage);
+ cfs_tage_free(tage);
+ }
+}
+
+int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
+ const char *usr_buffer, int usr_buffer_nob)
+{
+ int nob;
+
+ if (usr_buffer_nob > knl_buffer_nob)
+ return -EOVERFLOW;
+
+ if (copy_from_user((void *)knl_buffer,
+ (void *)usr_buffer, usr_buffer_nob))
+ return -EFAULT;
+
+ nob = strnlen(knl_buffer, usr_buffer_nob);
+ while (nob-- >= 0) /* strip trailing whitespace */
+ if (!isspace(knl_buffer[nob]))
+ break;
+
+ if (nob < 0) /* empty string */
+ return -EINVAL;
+
+ if (nob == knl_buffer_nob) /* no space to terminate */
+ return -EOVERFLOW;
+
+ knl_buffer[nob + 1] = 0; /* terminate */
+ return 0;
+}
+EXPORT_SYMBOL(cfs_trace_copyin_string);
+
+int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
+ const char *knl_buffer, char *append)
+{
+ /* NB if 'append' != NULL, it's a single character to append to the
+ * copied out string - usually "\n", for /proc entries and "" (i.e. a
+ * terminating zero byte) for sysctl entries */
+ int nob = strlen(knl_buffer);
+
+ if (nob > usr_buffer_nob)
+ nob = usr_buffer_nob;
+
+ if (copy_to_user(usr_buffer, knl_buffer, nob))
+ return -EFAULT;
+
+ if (append != NULL && nob < usr_buffer_nob) {
+ if (copy_to_user(usr_buffer + nob, append, 1))
+ return -EFAULT;
+
+ nob++;
+ }
+
+ return nob;
+}
+EXPORT_SYMBOL(cfs_trace_copyout_string);
+
+int cfs_trace_allocate_string_buffer(char **str, int nob)
+{
+ if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
+ return -EINVAL;
+
+ *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
+ if (*str == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void cfs_trace_free_string_buffer(char *str, int nob)
+{
+ kfree(str);
+}
+
+int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
+{
+ char *str;
+ int rc;
+
+ rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
+ if (rc != 0)
+ return rc;
+
+ rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
+ usr_str, usr_str_nob);
+ if (rc != 0)
+ goto out;
+
+ if (str[0] != '/') {
+ rc = -EINVAL;
+ goto out;
+ }
+ rc = cfs_tracefile_dump_all_pages(str);
+out:
+ cfs_trace_free_string_buffer(str, usr_str_nob + 1);
+ return rc;
+}
+
+int cfs_trace_daemon_command(char *str)
+{
+ int rc = 0;
+
+ cfs_tracefile_write_lock();
+
+ if (strcmp(str, "stop") == 0) {
+ cfs_tracefile_write_unlock();
+ cfs_trace_stop_thread();
+ cfs_tracefile_write_lock();
+ memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
+
+ } else if (strncmp(str, "size=", 5) == 0) {
+ cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
+ if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
+ cfs_tracefile_size = CFS_TRACEFILE_SIZE;
+ else
+ cfs_tracefile_size <<= 20;
+
+ } else if (strlen(str) >= sizeof(cfs_tracefile)) {
+ rc = -ENAMETOOLONG;
+ } else if (str[0] != '/') {
+ rc = -EINVAL;
+ } else {
+ strcpy(cfs_tracefile, str);
+
+ printk(KERN_INFO
+ "Lustre: debug daemon will attempt to start writing "
+ "to %s (%lukB max)\n", cfs_tracefile,
+ (long)(cfs_tracefile_size >> 10));
+
+ cfs_trace_start_thread();
+ }
+
+ cfs_tracefile_write_unlock();
+ return rc;
+}
+
+int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob)
+{
+ char *str;
+ int rc;
+
+ rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
+ if (rc != 0)
+ return rc;
+
+ rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
+ usr_str, usr_str_nob);
+ if (rc == 0)
+ rc = cfs_trace_daemon_command(str);
+
+ cfs_trace_free_string_buffer(str, usr_str_nob + 1);
+ return rc;
+}
+
+int cfs_trace_set_debug_mb(int mb)
+{
+ int i;
+ int j;
+ int pages;
+ int limit = cfs_trace_max_debug_mb();
+ struct cfs_trace_cpu_data *tcd;
+
+ if (mb < num_possible_cpus()) {
+ printk(KERN_WARNING
+ "Lustre: %d MB is too small for debug buffer size, "
+ "setting it to %d MB.\n", mb, num_possible_cpus());
+ mb = num_possible_cpus();
+ }
+
+ if (mb > limit) {
+ printk(KERN_WARNING
+ "Lustre: %d MB is too large for debug buffer size, "
+ "setting it to %d MB.\n", mb, limit);
+ mb = limit;
+ }
+
+ mb /= num_possible_cpus();
+ pages = mb << (20 - PAGE_CACHE_SHIFT);
+
+ cfs_tracefile_write_lock();
+
+ cfs_tcd_for_each(tcd, i, j)
+ tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
+
+ cfs_tracefile_write_unlock();
+
+ return 0;
+}
+
+int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob)
+{
+ char str[32];
+ int rc;
+
+ rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
+ if (rc < 0)
+ return rc;
+
+ return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0));
+}
+
+int cfs_trace_get_debug_mb(void)
+{
+ int i;
+ int j;
+ struct cfs_trace_cpu_data *tcd;
+ int total_pages = 0;
+
+ cfs_tracefile_read_lock();
+
+ cfs_tcd_for_each(tcd, i, j)
+ total_pages += tcd->tcd_max_pages;
+
+ cfs_tracefile_read_unlock();
+
+ return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
+}
+
+static int tracefiled(void *arg)
+{
+ struct page_collection pc;
+ struct tracefiled_ctl *tctl = arg;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+ struct file *filp;
+ int last_loop = 0;
+ int rc;
+
+ DECL_MMSPACE;
+
+ /* we're started late enough that we pick up init's fs context */
+ /* this is so broken in uml? what on earth is going on? */
+
+ spin_lock_init(&pc.pc_lock);
+ complete(&tctl->tctl_start);
+
+ while (1) {
+ wait_queue_t __wait;
+
+ pc.pc_want_daemon_pages = 0;
+ collect_pages(&pc);
+ if (list_empty(&pc.pc_pages))
+ goto end_loop;
+
+ filp = NULL;
+ cfs_tracefile_read_lock();
+ if (cfs_tracefile[0] != 0) {
+ filp = filp_open(cfs_tracefile,
+ O_CREAT | O_RDWR | O_LARGEFILE,
+ 0600);
+ if (IS_ERR(filp)) {
+ rc = PTR_ERR(filp);
+ filp = NULL;
+ printk(KERN_WARNING "couldn't open %s: "
+ "%d\n", cfs_tracefile, rc);
+ }
+ }
+ cfs_tracefile_read_unlock();
+ if (filp == NULL) {
+ put_pages_on_daemon_list(&pc);
+ __LASSERT(list_empty(&pc.pc_pages));
+ goto end_loop;
+ }
+
+ MMSPACE_OPEN;
+
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
+ linkage) {
+ static loff_t f_pos;
+
+ __LASSERT_TAGE_INVARIANT(tage);
+
+ if (f_pos >= (off_t)cfs_tracefile_size)
+ f_pos = 0;
+ else if (f_pos > (off_t)filp_size(filp))
+ f_pos = filp_size(filp);
+
+ rc = filp_write(filp, page_address(tage->page),
+ tage->used, &f_pos);
+ if (rc != (int)tage->used) {
+ printk(KERN_WARNING "wanted to write %u "
+ "but wrote %d\n", tage->used, rc);
+ put_pages_back(&pc);
+ __LASSERT(list_empty(&pc.pc_pages));
+ }
+ }
+ MMSPACE_CLOSE;
+
+ filp_close(filp, NULL);
+ put_pages_on_daemon_list(&pc);
+ if (!list_empty(&pc.pc_pages)) {
+ int i;
+
+ printk(KERN_ALERT "Lustre: trace pages aren't "
+ " empty\n");
+ printk(KERN_ERR "total cpus(%d): ",
+ num_possible_cpus());
+ for (i = 0; i < num_possible_cpus(); i++)
+ if (cpu_online(i))
+ printk(KERN_ERR "%d(on) ", i);
+ else
+ printk(KERN_ERR "%d(off) ", i);
+ printk(KERN_ERR "\n");
+
+ i = 0;
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
+ linkage)
+ printk(KERN_ERR "page %d belongs to cpu "
+ "%d\n", ++i, tage->cpu);
+ printk(KERN_ERR "There are %d pages unwritten\n",
+ i);
+ }
+ __LASSERT(list_empty(&pc.pc_pages));
+end_loop:
+ if (atomic_read(&tctl->tctl_shutdown)) {
+ if (last_loop == 0) {
+ last_loop = 1;
+ continue;
+ } else {
+ break;
+ }
+ }
+ init_waitqueue_entry_current(&__wait);
+ add_wait_queue(&tctl->tctl_waitq, &__wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ waitq_timedwait(&__wait, TASK_INTERRUPTIBLE,
+ cfs_time_seconds(1));
+ remove_wait_queue(&tctl->tctl_waitq, &__wait);
+ }
+ complete(&tctl->tctl_stop);
+ return 0;
+}
+
+int cfs_trace_start_thread(void)
+{
+ struct tracefiled_ctl *tctl = &trace_tctl;
+ int rc = 0;
+
+ mutex_lock(&cfs_trace_thread_mutex);
+ if (thread_running)
+ goto out;
+
+ init_completion(&tctl->tctl_start);
+ init_completion(&tctl->tctl_stop);
+ init_waitqueue_head(&tctl->tctl_waitq);
+ atomic_set(&tctl->tctl_shutdown, 0);
+
+ if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
+ rc = -ECHILD;
+ goto out;
+ }
+
+ wait_for_completion(&tctl->tctl_start);
+ thread_running = 1;
+out:
+ mutex_unlock(&cfs_trace_thread_mutex);
+ return rc;
+}
+
+void cfs_trace_stop_thread(void)
+{
+ struct tracefiled_ctl *tctl = &trace_tctl;
+
+ mutex_lock(&cfs_trace_thread_mutex);
+ if (thread_running) {
+ printk(KERN_INFO
+ "Lustre: shutting down debug daemon thread...\n");
+ atomic_set(&tctl->tctl_shutdown, 1);
+ wait_for_completion(&tctl->tctl_stop);
+ thread_running = 0;
+ }
+ mutex_unlock(&cfs_trace_thread_mutex);
+}
+
+int cfs_tracefile_init(int max_pages)
+{
+ struct cfs_trace_cpu_data *tcd;
+ int i;
+ int j;
+ int rc;
+ int factor;
+
+ rc = cfs_tracefile_init_arch();
+ if (rc != 0)
+ return rc;
+
+ cfs_tcd_for_each(tcd, i, j) {
+ /* tcd_pages_factor is initialized int tracefile_init_arch. */
+ factor = tcd->tcd_pages_factor;
+ INIT_LIST_HEAD(&tcd->tcd_pages);
+ INIT_LIST_HEAD(&tcd->tcd_stock_pages);
+ INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
+ tcd->tcd_cur_pages = 0;
+ tcd->tcd_cur_stock_pages = 0;
+ tcd->tcd_cur_daemon_pages = 0;
+ tcd->tcd_max_pages = (max_pages * factor) / 100;
+ LASSERT(tcd->tcd_max_pages > 0);
+ tcd->tcd_shutting_down = 0;
+ }
+
+ return 0;
+}
+
+static void trace_cleanup_on_all_cpus(void)
+{
+ struct cfs_trace_cpu_data *tcd;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ cfs_tcd_for_each_type_lock(tcd, i, cpu) {
+ tcd->tcd_shutting_down = 1;
+
+ list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
+ linkage) {
+ __LASSERT_TAGE_INVARIANT(tage);
+
+ list_del(&tage->linkage);
+ cfs_tage_free(tage);
+ }
+
+ tcd->tcd_cur_pages = 0;
+ }
+ }
+}
+
+static void cfs_trace_cleanup(void)
+{
+ struct page_collection pc;
+
+ INIT_LIST_HEAD(&pc.pc_pages);
+ spin_lock_init(&pc.pc_lock);
+
+ trace_cleanup_on_all_cpus();
+
+ cfs_tracefile_fini_arch();
+}
+
+void cfs_tracefile_exit(void)
+{
+ cfs_trace_stop_thread();
+ cfs_trace_cleanup();
+}
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.h b/drivers/staging/lustre/lustre/libcfs/tracefile.h
new file mode 100644
index 0000000..7e8d17c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.h
@@ -0,0 +1,340 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LIBCFS_TRACEFILE_H__
+#define __LIBCFS_TRACEFILE_H__
+
+#include <linux/libcfs/libcfs.h>
+
+#include "linux/linux-tracefile.h"
+
+/* trace file lock routines */
+
+#define TRACEFILE_NAME_SIZE 1024
+extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
+extern long long cfs_tracefile_size;
+
+extern void libcfs_run_debug_log_upcall(char *file);
+
+int cfs_tracefile_init_arch(void);
+void cfs_tracefile_fini_arch(void);
+
+void cfs_tracefile_read_lock(void);
+void cfs_tracefile_read_unlock(void);
+void cfs_tracefile_write_lock(void);
+void cfs_tracefile_write_unlock(void);
+
+int cfs_tracefile_dump_all_pages(char *filename);
+void cfs_trace_debug_print(void);
+void cfs_trace_flush_pages(void);
+int cfs_trace_start_thread(void);
+void cfs_trace_stop_thread(void);
+int cfs_tracefile_init(int max_pages);
+void cfs_tracefile_exit(void);
+
+
+
+int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
+ const char *usr_buffer, int usr_buffer_nob);
+int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
+ const char *knl_str, char *append);
+int cfs_trace_allocate_string_buffer(char **str, int nob);
+void cfs_trace_free_string_buffer(char *str, int nob);
+int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob);
+int cfs_trace_daemon_command(char *str);
+int cfs_trace_daemon_command_usrstr(void *usr_str, int usr_str_nob);
+int cfs_trace_set_debug_mb(int mb);
+int cfs_trace_set_debug_mb_usrstr(void *usr_str, int usr_str_nob);
+int cfs_trace_get_debug_mb(void);
+
+extern void libcfs_debug_dumplog_internal(void *arg);
+extern void libcfs_register_panic_notifier(void);
+extern void libcfs_unregister_panic_notifier(void);
+extern int libcfs_panic_in_progress;
+extern int cfs_trace_max_debug_mb(void);
+
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
+#define CFS_TRACEFILE_SIZE (500 << 20)
+
+#ifdef LUSTRE_TRACEFILE_PRIVATE
+
+/*
+ * Private declare for tracefile
+ */
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
+
+#define CFS_TRACEFILE_SIZE (500 << 20)
+
+/* Size of a buffer for sprinting console messages if we can't get a page
+ * from system */
+#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024
+
+union cfs_trace_data_union {
+ struct cfs_trace_cpu_data {
+ /*
+ * Even though this structure is meant to be per-CPU, locking
+ * is needed because in some places the data may be accessed
+ * from other CPUs. This lock is directly used in trace_get_tcd
+ * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
+ * tcd_for_each_type_lock
+ */
+ spinlock_t tcd_lock;
+ unsigned long tcd_lock_flags;
+
+ /*
+ * pages with trace records not yet processed by tracefiled.
+ */
+ struct list_head tcd_pages;
+ /* number of pages on ->tcd_pages */
+ unsigned long tcd_cur_pages;
+
+ /*
+ * pages with trace records already processed by
+ * tracefiled. These pages are kept in memory, so that some
+ * portion of log can be written in the event of LBUG. This
+ * list is maintained in LRU order.
+ *
+ * Pages are moved to ->tcd_daemon_pages by tracefiled()
+ * (put_pages_on_daemon_list()). LRU pages from this list are
+ * discarded when list grows too large.
+ */
+ struct list_head tcd_daemon_pages;
+ /* number of pages on ->tcd_daemon_pages */
+ unsigned long tcd_cur_daemon_pages;
+
+ /*
+ * Maximal number of pages allowed on ->tcd_pages and
+ * ->tcd_daemon_pages each.
+ * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
+ * implementation.
+ */
+ unsigned long tcd_max_pages;
+
+ /*
+ * preallocated pages to write trace records into. Pages from
+ * ->tcd_stock_pages are moved to ->tcd_pages by
+ * portals_debug_msg().
+ *
+ * This list is necessary, because on some platforms it's
+ * impossible to perform efficient atomic page allocation in a
+ * non-blockable context.
+ *
+ * Such platforms fill ->tcd_stock_pages "on occasion", when
+ * tracing code is entered in blockable context.
+ *
+ * trace_get_tage_try() tries to get a page from
+ * ->tcd_stock_pages first and resorts to atomic page
+ * allocation only if this queue is empty. ->tcd_stock_pages
+ * is replenished when tracing code is entered in blocking
+ * context (darwin-tracefile.c:trace_get_tcd()). We try to
+ * maintain TCD_STOCK_PAGES (40 by default) pages in this
+ * queue. Atomic allocation is only required if more than
+ * TCD_STOCK_PAGES pagesful are consumed by trace records all
+ * emitted in non-blocking contexts. Which is quite unlikely.
+ */
+ struct list_head tcd_stock_pages;
+ /* number of pages on ->tcd_stock_pages */
+ unsigned long tcd_cur_stock_pages;
+
+ unsigned short tcd_shutting_down;
+ unsigned short tcd_cpu;
+ unsigned short tcd_type;
+ /* The factors to share debug memory. */
+ unsigned short tcd_pages_factor;
+ } tcd;
+ char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
+};
+
+#define TCD_MAX_TYPES 8
+extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS];
+
+#define cfs_tcd_for_each(tcd, i, j) \
+ for (i = 0; cfs_trace_data[i] != NULL; i++) \
+ for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
+ j < num_possible_cpus(); \
+ j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
+
+#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
+ for (i = 0; cfs_trace_data[i] && \
+ (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
+ cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
+
+/* XXX nikita: this declaration is internal to tracefile.c and should probably
+ * be moved there */
+struct page_collection {
+ struct list_head pc_pages;
+ /*
+ * spin-lock protecting ->pc_pages. It is taken by smp_call_function()
+ * call-back functions. XXX nikita: Which is horrible: all processors
+ * receive NMI at the same time only to be serialized by this
+ * lock. Probably ->pc_pages should be replaced with an array of
+ * NR_CPUS elements accessed locklessly.
+ */
+ spinlock_t pc_lock;
+ /*
+ * if this flag is set, collect_pages() will spill both
+ * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
+ * only ->tcd_pages are spilled.
+ */
+ int pc_want_daemon_pages;
+};
+
+/* XXX nikita: this declaration is internal to tracefile.c and should probably
+ * be moved there */
+struct tracefiled_ctl {
+ struct completion tctl_start;
+ struct completion tctl_stop;
+ wait_queue_head_t tctl_waitq;
+ pid_t tctl_pid;
+ atomic_t tctl_shutdown;
+};
+
+/*
+ * small data-structure for each page owned by tracefiled.
+ */
+/* XXX nikita: this declaration is internal to tracefile.c and should probably
+ * be moved there */
+struct cfs_trace_page {
+ /*
+ * page itself
+ */
+ struct page *page;
+ /*
+ * linkage into one of the lists in trace_data_union or
+ * page_collection
+ */
+ struct list_head linkage;
+ /*
+ * number of bytes used within this page
+ */
+ unsigned int used;
+ /*
+ * cpu that owns this page
+ */
+ unsigned short cpu;
+ /*
+ * type(context) of this page
+ */
+ unsigned short type;
+};
+
+extern void cfs_set_ptldebug_header(struct ptldebug_header *header,
+ struct libcfs_debug_msg_data *m,
+ unsigned long stack);
+extern void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+ const char *buf, int len, const char *file,
+ const char *fn);
+
+extern int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
+extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
+
+/**
+ * trace_buf_type_t, trace_buf_idx_get() and trace_console_buffers[][]
+ * are not public libcfs API; they should be defined in
+ * platform-specific tracefile include files
+ * (see, for example, linux-tracefile.h).
+ */
+
+extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
+extern cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
+
+static inline char *
+cfs_trace_get_console_buffer(void)
+{
+ unsigned int i = get_cpu();
+ unsigned int j = cfs_trace_buf_idx_get();
+
+ return cfs_trace_console_buffers[i][j];
+}
+
+static inline void
+cfs_trace_put_console_buffer(char *buffer)
+{
+ put_cpu();
+}
+
+static inline struct cfs_trace_cpu_data *
+cfs_trace_get_tcd(void)
+{
+ struct cfs_trace_cpu_data *tcd =
+ &(*cfs_trace_data[cfs_trace_buf_idx_get()])[get_cpu()].tcd;
+
+ cfs_trace_lock_tcd(tcd, 0);
+
+ return tcd;
+}
+
+static inline void
+cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
+{
+ cfs_trace_unlock_tcd(tcd, 0);
+
+ put_cpu();
+}
+
+int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, int gfp,
+ struct list_head *stock);
+
+
+int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
+ struct cfs_trace_page *tage);
+
+extern void cfs_trace_assertion_failed(const char *str,
+ struct libcfs_debug_msg_data *m);
+
+/* ASSERTION that is safe to use within the debug system */
+#define __LASSERT(cond) \
+do { \
+ if (unlikely(!(cond))) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
+ cfs_trace_assertion_failed("ASSERTION("#cond") failed", \
+ &msgdata); \
+ } \
+} while (0)
+
+#define __LASSERT_TAGE_INVARIANT(tage) \
+do { \
+ __LASSERT(tage != NULL); \
+ __LASSERT(tage->page != NULL); \
+ __LASSERT(tage->used <= PAGE_CACHE_SIZE); \
+ __LASSERT(page_count(tage->page) > 0); \
+} while (0)
+
+#endif /* LUSTRE_TRACEFILE_PRIVATE */
+
+#endif /* __LIBCFS_TRACEFILE_H__ */
diff --git a/drivers/staging/lustre/lustre/libcfs/upcall_cache.c b/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
new file mode 100644
index 0000000..245b46f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
@@ -0,0 +1,452 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/upcall_cache.c
+ *
+ * Supplementary groups cache.
+ */
+#define DEBUG_SUBSYSTEM S_SEC
+
+#include <linux/libcfs/lucache.h>
+
+static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
+ __u64 key, void *args)
+{
+ struct upcall_cache_entry *entry;
+
+ LIBCFS_ALLOC(entry, sizeof(*entry));
+ if (!entry)
+ return NULL;
+
+ UC_CACHE_SET_NEW(entry);
+ INIT_LIST_HEAD(&entry->ue_hash);
+ entry->ue_key = key;
+ atomic_set(&entry->ue_refcount, 0);
+ init_waitqueue_head(&entry->ue_waitq);
+ if (cache->uc_ops->init_entry)
+ cache->uc_ops->init_entry(entry, args);
+ return entry;
+}
+
+/* protected by cache lock */
+static void free_entry(struct upcall_cache *cache,
+ struct upcall_cache_entry *entry)
+{
+ if (cache->uc_ops->free_entry)
+ cache->uc_ops->free_entry(cache, entry);
+
+ list_del(&entry->ue_hash);
+ CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
+ entry, entry->ue_key);
+ LIBCFS_FREE(entry, sizeof(*entry));
+}
+
+static inline int upcall_compare(struct upcall_cache *cache,
+ struct upcall_cache_entry *entry,
+ __u64 key, void *args)
+{
+ if (entry->ue_key != key)
+ return -1;
+
+ if (cache->uc_ops->upcall_compare)
+ return cache->uc_ops->upcall_compare(cache, entry, key, args);
+
+ return 0;
+}
+
+static inline int downcall_compare(struct upcall_cache *cache,
+ struct upcall_cache_entry *entry,
+ __u64 key, void *args)
+{
+ if (entry->ue_key != key)
+ return -1;
+
+ if (cache->uc_ops->downcall_compare)
+ return cache->uc_ops->downcall_compare(cache, entry, key, args);
+
+ return 0;
+}
+
+static inline void get_entry(struct upcall_cache_entry *entry)
+{
+ atomic_inc(&entry->ue_refcount);
+}
+
+static inline void put_entry(struct upcall_cache *cache,
+ struct upcall_cache_entry *entry)
+{
+ if (atomic_dec_and_test(&entry->ue_refcount) &&
+ (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
+ free_entry(cache, entry);
+ }
+}
+
+static int check_unlink_entry(struct upcall_cache *cache,
+ struct upcall_cache_entry *entry)
+{
+ if (UC_CACHE_IS_VALID(entry) &&
+ cfs_time_before(cfs_time_current(), entry->ue_expire))
+ return 0;
+
+ if (UC_CACHE_IS_ACQUIRING(entry)) {
+ if (entry->ue_acquire_expire == 0 ||
+ cfs_time_before(cfs_time_current(),
+ entry->ue_acquire_expire))
+ return 0;
+
+ UC_CACHE_SET_EXPIRED(entry);
+ wake_up_all(&entry->ue_waitq);
+ } else if (!UC_CACHE_IS_INVALID(entry)) {
+ UC_CACHE_SET_EXPIRED(entry);
+ }
+
+ list_del_init(&entry->ue_hash);
+ if (!atomic_read(&entry->ue_refcount))
+ free_entry(cache, entry);
+ return 1;
+}
+
+static inline int refresh_entry(struct upcall_cache *cache,
+ struct upcall_cache_entry *entry)
+{
+ LASSERT(cache->uc_ops->do_upcall);
+ return cache->uc_ops->do_upcall(cache, entry);
+}
+
+struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
+ __u64 key, void *args)
+{
+ struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
+ struct list_head *head;
+ wait_queue_t wait;
+ int rc, found;
+
+ LASSERT(cache);
+
+ head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
+find_again:
+ found = 0;
+ spin_lock(&cache->uc_lock);
+ list_for_each_entry_safe(entry, next, head, ue_hash) {
+ /* check invalid & expired items */
+ if (check_unlink_entry(cache, entry))
+ continue;
+ if (upcall_compare(cache, entry, key, args) == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ if (!new) {
+ spin_unlock(&cache->uc_lock);
+ new = alloc_entry(cache, key, args);
+ if (!new) {
+ CERROR("fail to alloc entry\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ goto find_again;
+ } else {
+ list_add(&new->ue_hash, head);
+ entry = new;
+ }
+ } else {
+ if (new) {
+ free_entry(cache, new);
+ new = NULL;
+ }
+ list_move(&entry->ue_hash, head);
+ }
+ get_entry(entry);
+
+ /* acquire for new one */
+ if (UC_CACHE_IS_NEW(entry)) {
+ UC_CACHE_SET_ACQUIRING(entry);
+ UC_CACHE_CLEAR_NEW(entry);
+ spin_unlock(&cache->uc_lock);
+ rc = refresh_entry(cache, entry);
+ spin_lock(&cache->uc_lock);
+ entry->ue_acquire_expire =
+ cfs_time_shift(cache->uc_acquire_expire);
+ if (rc < 0) {
+ UC_CACHE_CLEAR_ACQUIRING(entry);
+ UC_CACHE_SET_INVALID(entry);
+ wake_up_all(&entry->ue_waitq);
+ if (unlikely(rc == -EREMCHG)) {
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(rc));
+ }
+ }
+ }
+ /* someone (and only one) is doing upcall upon this item,
+ * wait it to complete */
+ if (UC_CACHE_IS_ACQUIRING(entry)) {
+ long expiry = (entry == new) ?
+ cfs_time_seconds(cache->uc_acquire_expire) :
+ MAX_SCHEDULE_TIMEOUT;
+ long left;
+
+ init_waitqueue_entry_current(&wait);
+ add_wait_queue(&entry->ue_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&cache->uc_lock);
+
+ left = waitq_timedwait(&wait, TASK_INTERRUPTIBLE,
+ expiry);
+
+ spin_lock(&cache->uc_lock);
+ remove_wait_queue(&entry->ue_waitq, &wait);
+ if (UC_CACHE_IS_ACQUIRING(entry)) {
+ /* we're interrupted or upcall failed in the middle */
+ rc = left > 0 ? -EINTR : -ETIMEDOUT;
+ CERROR("acquire for key "LPU64": error %d\n",
+ entry->ue_key, rc);
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(rc));
+ }
+ }
+
+ /* invalid means error, don't need to try again */
+ if (UC_CACHE_IS_INVALID(entry)) {
+ put_entry(cache, entry);
+ GOTO(out, entry = ERR_PTR(-EIDRM));
+ }
+
+ /* check expired
+ * We can't refresh the existing one because some
+ * memory might be shared by multiple processes.
+ */
+ if (check_unlink_entry(cache, entry)) {
+ /* if expired, try again. but if this entry is
+ * created by me but too quickly turn to expired
+ * without any error, should at least give a
+ * chance to use it once.
+ */
+ if (entry != new) {
+ put_entry(cache, entry);
+ spin_unlock(&cache->uc_lock);
+ new = NULL;
+ goto find_again;
+ }
+ }
+
+ /* Now we know it's good */
+out:
+ spin_unlock(&cache->uc_lock);
+ return entry;
+}
+EXPORT_SYMBOL(upcall_cache_get_entry);
+
+void upcall_cache_put_entry(struct upcall_cache *cache,
+ struct upcall_cache_entry *entry)
+{
+ if (!entry) {
+ return;
+ }
+
+ LASSERT(atomic_read(&entry->ue_refcount) > 0);
+ spin_lock(&cache->uc_lock);
+ put_entry(cache, entry);
+ spin_unlock(&cache->uc_lock);
+}
+EXPORT_SYMBOL(upcall_cache_put_entry);
+
+int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
+ void *args)
+{
+ struct upcall_cache_entry *entry = NULL;
+ struct list_head *head;
+ int found = 0, rc = 0;
+
+ LASSERT(cache);
+
+ head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
+
+ spin_lock(&cache->uc_lock);
+ list_for_each_entry(entry, head, ue_hash) {
+ if (downcall_compare(cache, entry, key, args) == 0) {
+ found = 1;
+ get_entry(entry);
+ break;
+ }
+ }
+
+ if (!found) {
+ CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
+ cache->uc_name, key);
+ /* haven't found, it's possible */
+ spin_unlock(&cache->uc_lock);
+ return -EINVAL;
+ }
+
+ if (err) {
+ CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
+ cache->uc_name, entry->ue_key, err);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ if (!UC_CACHE_IS_ACQUIRING(entry)) {
+ CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
+ cache->uc_name, entry, entry->ue_key);
+ GOTO(out, rc = 0);
+ }
+
+ if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
+ CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
+ cache->uc_name, entry, entry->ue_key);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ spin_unlock(&cache->uc_lock);
+ if (cache->uc_ops->parse_downcall)
+ rc = cache->uc_ops->parse_downcall(cache, entry, args);
+ spin_lock(&cache->uc_lock);
+ if (rc)
+ GOTO(out, rc);
+
+ entry->ue_expire = cfs_time_shift(cache->uc_entry_expire);
+ UC_CACHE_SET_VALID(entry);
+ CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
+ cache->uc_name, entry, entry->ue_key);
+out:
+ if (rc) {
+ UC_CACHE_SET_INVALID(entry);
+ list_del_init(&entry->ue_hash);
+ }
+ UC_CACHE_CLEAR_ACQUIRING(entry);
+ spin_unlock(&cache->uc_lock);
+ wake_up_all(&entry->ue_waitq);
+ put_entry(cache, entry);
+
+ return rc;
+}
+EXPORT_SYMBOL(upcall_cache_downcall);
+
+static void cache_flush(struct upcall_cache *cache, int force)
+{
+ struct upcall_cache_entry *entry, *next;
+ int i;
+
+ spin_lock(&cache->uc_lock);
+ for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
+ list_for_each_entry_safe(entry, next,
+ &cache->uc_hashtable[i], ue_hash) {
+ if (!force && atomic_read(&entry->ue_refcount)) {
+ UC_CACHE_SET_EXPIRED(entry);
+ continue;
+ }
+ LASSERT(!atomic_read(&entry->ue_refcount));
+ free_entry(cache, entry);
+ }
+ }
+ spin_unlock(&cache->uc_lock);
+}
+
+void upcall_cache_flush_idle(struct upcall_cache *cache)
+{
+ cache_flush(cache, 0);
+}
+EXPORT_SYMBOL(upcall_cache_flush_idle);
+
+void upcall_cache_flush_all(struct upcall_cache *cache)
+{
+ cache_flush(cache, 1);
+}
+EXPORT_SYMBOL(upcall_cache_flush_all);
+
+void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
+{
+ struct list_head *head;
+ struct upcall_cache_entry *entry;
+ int found = 0;
+
+ head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
+
+ spin_lock(&cache->uc_lock);
+ list_for_each_entry(entry, head, ue_hash) {
+ if (upcall_compare(cache, entry, key, args) == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
+ "cur %lu, ex %ld/%ld\n",
+ cache->uc_name, entry, entry->ue_key,
+ atomic_read(&entry->ue_refcount), entry->ue_flags,
+ cfs_time_current_sec(), entry->ue_acquire_expire,
+ entry->ue_expire);
+ UC_CACHE_SET_EXPIRED(entry);
+ if (!atomic_read(&entry->ue_refcount))
+ free_entry(cache, entry);
+ }
+ spin_unlock(&cache->uc_lock);
+}
+EXPORT_SYMBOL(upcall_cache_flush_one);
+
+struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
+ struct upcall_cache_ops *ops)
+{
+ struct upcall_cache *cache;
+ int i;
+
+ LIBCFS_ALLOC(cache, sizeof(*cache));
+ if (!cache)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&cache->uc_lock);
+ rwlock_init(&cache->uc_upcall_rwlock);
+ for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&cache->uc_hashtable[i]);
+ strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
+ /* upcall pathname proc tunable */
+ strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
+ cache->uc_entry_expire = 20 * 60;
+ cache->uc_acquire_expire = 30;
+ cache->uc_ops = ops;
+
+ return cache;
+}
+EXPORT_SYMBOL(upcall_cache_init);
+
+void upcall_cache_cleanup(struct upcall_cache *cache)
+{
+ if (!cache)
+ return;
+ upcall_cache_flush_all(cache);
+ LIBCFS_FREE(cache, sizeof(*cache));
+}
+EXPORT_SYMBOL(upcall_cache_cleanup);
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
new file mode 100644
index 0000000..1a55c81
--- /dev/null
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -0,0 +1,476 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * libcfs/libcfs/workitem.c
+ *
+ * Author: Isaac Huang <isaac@clusterfs.com>
+ * Liang Zhen <zhen.liang@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include <linux/libcfs/libcfs.h>
+
+#define CFS_WS_NAME_LEN 16
+
+typedef struct cfs_wi_sched {
+ struct list_head ws_list; /* chain on global list */
+ /** serialised workitems */
+ spinlock_t ws_lock;
+ /** where schedulers sleep */
+ wait_queue_head_t ws_waitq;
+ /** concurrent workitems */
+ struct list_head ws_runq;
+ /** rescheduled running-workitems, a workitem can be rescheduled
+ * while running in wi_action(), but we don't to execute it again
+ * unless it returns from wi_action(), so we put it on ws_rerunq
+ * while rescheduling, and move it to runq after it returns
+ * from wi_action() */
+ struct list_head ws_rerunq;
+ /** CPT-table for this scheduler */
+ struct cfs_cpt_table *ws_cptab;
+ /** CPT id for affinity */
+ int ws_cpt;
+ /** number of scheduled workitems */
+ int ws_nscheduled;
+ /** started scheduler thread, protected by cfs_wi_data::wi_glock */
+ unsigned int ws_nthreads:30;
+ /** shutting down, protected by cfs_wi_data::wi_glock */
+ unsigned int ws_stopping:1;
+ /** serialize starting thread, protected by cfs_wi_data::wi_glock */
+ unsigned int ws_starting:1;
+ /** scheduler name */
+ char ws_name[CFS_WS_NAME_LEN];
+} cfs_wi_sched_t;
+
+struct cfs_workitem_data {
+ /** serialize */
+ spinlock_t wi_glock;
+ /** list of all schedulers */
+ struct list_head wi_scheds;
+ /** WI module is initialized */
+ int wi_init;
+ /** shutting down the whole WI module */
+ int wi_stopping;
+} cfs_wi_data;
+
+static inline void
+cfs_wi_sched_lock(cfs_wi_sched_t *sched)
+{
+ spin_lock(&sched->ws_lock);
+}
+
+static inline void
+cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
+{
+ spin_unlock(&sched->ws_lock);
+}
+
+static inline int
+cfs_wi_sched_cansleep(cfs_wi_sched_t *sched)
+{
+ cfs_wi_sched_lock(sched);
+ if (sched->ws_stopping) {
+ cfs_wi_sched_unlock(sched);
+ return 0;
+ }
+
+ if (!list_empty(&sched->ws_runq)) {
+ cfs_wi_sched_unlock(sched);
+ return 0;
+ }
+ cfs_wi_sched_unlock(sched);
+ return 1;
+}
+
+
+/* XXX:
+ * 0. it only works when called from wi->wi_action.
+ * 1. when it returns no one shall try to schedule the workitem.
+ */
+void
+cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+{
+ LASSERT(!in_interrupt()); /* because we use plain spinlock */
+ LASSERT(!sched->ws_stopping);
+
+ cfs_wi_sched_lock(sched);
+
+ LASSERT(wi->wi_running);
+ if (wi->wi_scheduled) { /* cancel pending schedules */
+ LASSERT(!list_empty(&wi->wi_list));
+ list_del_init(&wi->wi_list);
+
+ LASSERT(sched->ws_nscheduled > 0);
+ sched->ws_nscheduled--;
+ }
+
+ LASSERT(list_empty(&wi->wi_list));
+
+ wi->wi_scheduled = 1; /* LBUG future schedule attempts */
+ cfs_wi_sched_unlock(sched);
+
+ return;
+}
+EXPORT_SYMBOL(cfs_wi_exit);
+
+/**
+ * cancel schedule request of workitem \a wi
+ */
+int
+cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+{
+ int rc;
+
+ LASSERT(!in_interrupt()); /* because we use plain spinlock */
+ LASSERT(!sched->ws_stopping);
+
+ /*
+ * return 0 if it's running already, otherwise return 1, which
+ * means the workitem will not be scheduled and will not have
+ * any race with wi_action.
+ */
+ cfs_wi_sched_lock(sched);
+
+ rc = !(wi->wi_running);
+
+ if (wi->wi_scheduled) { /* cancel pending schedules */
+ LASSERT(!list_empty(&wi->wi_list));
+ list_del_init(&wi->wi_list);
+
+ LASSERT(sched->ws_nscheduled > 0);
+ sched->ws_nscheduled--;
+
+ wi->wi_scheduled = 0;
+ }
+
+ LASSERT (list_empty(&wi->wi_list));
+
+ cfs_wi_sched_unlock(sched);
+ return rc;
+}
+EXPORT_SYMBOL(cfs_wi_deschedule);
+
+/*
+ * Workitem scheduled with (serial == 1) is strictly serialised not only with
+ * itself, but also with others scheduled this way.
+ *
+ * Now there's only one static serialised queue, but in the future more might
+ * be added, and even dynamic creation of serialised queues might be supported.
+ */
+void
+cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+{
+ LASSERT(!in_interrupt()); /* because we use plain spinlock */
+ LASSERT(!sched->ws_stopping);
+
+ cfs_wi_sched_lock(sched);
+
+ if (!wi->wi_scheduled) {
+ LASSERT (list_empty(&wi->wi_list));
+
+ wi->wi_scheduled = 1;
+ sched->ws_nscheduled++;
+ if (!wi->wi_running) {
+ list_add_tail(&wi->wi_list, &sched->ws_runq);
+ wake_up(&sched->ws_waitq);
+ } else {
+ list_add(&wi->wi_list, &sched->ws_rerunq);
+ }
+ }
+
+ LASSERT (!list_empty(&wi->wi_list));
+ cfs_wi_sched_unlock(sched);
+ return;
+}
+EXPORT_SYMBOL(cfs_wi_schedule);
+
+
+static int
+cfs_wi_scheduler (void *arg)
+{
+ struct cfs_wi_sched *sched = (cfs_wi_sched_t *)arg;
+
+ cfs_block_allsigs();
+
+ /* CPT affinity scheduler? */
+ if (sched->ws_cptab != NULL)
+ cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt);
+
+ spin_lock(&cfs_wi_data.wi_glock);
+
+ LASSERT(sched->ws_starting == 1);
+ sched->ws_starting--;
+ sched->ws_nthreads++;
+
+ spin_unlock(&cfs_wi_data.wi_glock);
+
+ cfs_wi_sched_lock(sched);
+
+ while (!sched->ws_stopping) {
+ int nloops = 0;
+ int rc;
+ cfs_workitem_t *wi;
+
+ while (!list_empty(&sched->ws_runq) &&
+ nloops < CFS_WI_RESCHED) {
+ wi = list_entry(sched->ws_runq.next,
+ cfs_workitem_t, wi_list);
+ LASSERT(wi->wi_scheduled && !wi->wi_running);
+
+ list_del_init(&wi->wi_list);
+
+ LASSERT(sched->ws_nscheduled > 0);
+ sched->ws_nscheduled--;
+
+ wi->wi_running = 1;
+ wi->wi_scheduled = 0;
+
+
+ cfs_wi_sched_unlock(sched);
+ nloops++;
+
+ rc = (*wi->wi_action) (wi);
+
+ cfs_wi_sched_lock(sched);
+ if (rc != 0) /* WI should be dead, even be freed! */
+ continue;
+
+ wi->wi_running = 0;
+ if (list_empty(&wi->wi_list))
+ continue;
+
+ LASSERT(wi->wi_scheduled);
+ /* wi is rescheduled, should be on rerunq now, we
+ * move it to runq so it can run action now */
+ list_move_tail(&wi->wi_list, &sched->ws_runq);
+ }
+
+ if (!list_empty(&sched->ws_runq)) {
+ cfs_wi_sched_unlock(sched);
+ /* don't sleep because some workitems still
+ * expect me to come back soon */
+ cond_resched();
+ cfs_wi_sched_lock(sched);
+ continue;
+ }
+
+ cfs_wi_sched_unlock(sched);
+ cfs_wait_event_interruptible_exclusive(sched->ws_waitq,
+ !cfs_wi_sched_cansleep(sched), rc);
+ cfs_wi_sched_lock(sched);
+ }
+
+ cfs_wi_sched_unlock(sched);
+
+ spin_lock(&cfs_wi_data.wi_glock);
+ sched->ws_nthreads--;
+ spin_unlock(&cfs_wi_data.wi_glock);
+
+ return 0;
+}
+
+
+void
+cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
+{
+ int i;
+
+ LASSERT(cfs_wi_data.wi_init);
+ LASSERT(!cfs_wi_data.wi_stopping);
+
+ spin_lock(&cfs_wi_data.wi_glock);
+ if (sched->ws_stopping) {
+ CDEBUG(D_INFO, "%s is in progress of stopping\n",
+ sched->ws_name);
+ spin_unlock(&cfs_wi_data.wi_glock);
+ return;
+ }
+
+ LASSERT(!list_empty(&sched->ws_list));
+ sched->ws_stopping = 1;
+
+ spin_unlock(&cfs_wi_data.wi_glock);
+
+ i = 2;
+ wake_up_all(&sched->ws_waitq);
+
+ spin_lock(&cfs_wi_data.wi_glock);
+ while (sched->ws_nthreads > 0) {
+ CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
+ "waiting for %d threads of WI sched[%s] to terminate\n",
+ sched->ws_nthreads, sched->ws_name);
+
+ spin_unlock(&cfs_wi_data.wi_glock);
+ cfs_pause(cfs_time_seconds(1) / 20);
+ spin_lock(&cfs_wi_data.wi_glock);
+ }
+
+ list_del(&sched->ws_list);
+
+ spin_unlock(&cfs_wi_data.wi_glock);
+ LASSERT(sched->ws_nscheduled == 0);
+
+ LIBCFS_FREE(sched, sizeof(*sched));
+}
+EXPORT_SYMBOL(cfs_wi_sched_destroy);
+
+int
+cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
+ int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
+{
+ struct cfs_wi_sched *sched;
+ int rc;
+
+ LASSERT(cfs_wi_data.wi_init);
+ LASSERT(!cfs_wi_data.wi_stopping);
+ LASSERT(cptab == NULL || cpt == CFS_CPT_ANY ||
+ (cpt >= 0 && cpt < cfs_cpt_number(cptab)));
+
+ LIBCFS_ALLOC(sched, sizeof(*sched));
+ if (sched == NULL)
+ return -ENOMEM;
+
+ strncpy(sched->ws_name, name, CFS_WS_NAME_LEN);
+ sched->ws_cptab = cptab;
+ sched->ws_cpt = cpt;
+
+ spin_lock_init(&sched->ws_lock);
+ init_waitqueue_head(&sched->ws_waitq);
+ INIT_LIST_HEAD(&sched->ws_runq);
+ INIT_LIST_HEAD(&sched->ws_rerunq);
+ INIT_LIST_HEAD(&sched->ws_list);
+
+ rc = 0;
+ while (nthrs > 0) {
+ char name[16];
+ struct task_struct *task;
+
+ spin_lock(&cfs_wi_data.wi_glock);
+ while (sched->ws_starting > 0) {
+ spin_unlock(&cfs_wi_data.wi_glock);
+ schedule();
+ spin_lock(&cfs_wi_data.wi_glock);
+ }
+
+ sched->ws_starting++;
+ spin_unlock(&cfs_wi_data.wi_glock);
+
+ if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
+ snprintf(name, sizeof(name), "%s_%02d_%02d",
+ sched->ws_name, sched->ws_cpt,
+ sched->ws_nthreads);
+ } else {
+ snprintf(name, sizeof(name), "%s_%02d",
+ sched->ws_name, sched->ws_nthreads);
+ }
+
+ task = kthread_run(cfs_wi_scheduler, sched, "%s", name);
+ if (!IS_ERR(task)) {
+ nthrs--;
+ continue;
+ }
+ rc = PTR_ERR(task);
+
+ CERROR("Failed to create thread for WI scheduler %s: %d\n",
+ name, rc);
+
+ spin_lock(&cfs_wi_data.wi_glock);
+
+ /* make up for cfs_wi_sched_destroy */
+ list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
+ sched->ws_starting--;
+
+ spin_unlock(&cfs_wi_data.wi_glock);
+
+ cfs_wi_sched_destroy(sched);
+ return rc;
+ }
+ spin_lock(&cfs_wi_data.wi_glock);
+ list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
+ spin_unlock(&cfs_wi_data.wi_glock);
+
+ *sched_pp = sched;
+ return 0;
+}
+EXPORT_SYMBOL(cfs_wi_sched_create);
+
+int
+cfs_wi_startup(void)
+{
+ memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
+
+ spin_lock_init(&cfs_wi_data.wi_glock);
+ INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
+ cfs_wi_data.wi_init = 1;
+
+ return 0;
+}
+
+void
+cfs_wi_shutdown (void)
+{
+ struct cfs_wi_sched *sched;
+
+ spin_lock(&cfs_wi_data.wi_glock);
+ cfs_wi_data.wi_stopping = 1;
+ spin_unlock(&cfs_wi_data.wi_glock);
+
+ /* nobody should contend on this list */
+ list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
+ sched->ws_stopping = 1;
+ wake_up_all(&sched->ws_waitq);
+ }
+
+ list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
+ spin_lock(&cfs_wi_data.wi_glock);
+
+ while (sched->ws_nthreads != 0) {
+ spin_unlock(&cfs_wi_data.wi_glock);
+ cfs_pause(cfs_time_seconds(1) / 20);
+ spin_lock(&cfs_wi_data.wi_glock);
+ }
+ spin_unlock(&cfs_wi_data.wi_glock);
+ }
+ while (!list_empty(&cfs_wi_data.wi_scheds)) {
+ sched = list_entry(cfs_wi_data.wi_scheds.next,
+ struct cfs_wi_sched, ws_list);
+ list_del(&sched->ws_list);
+ LIBCFS_FREE(sched, sizeof(*sched));
+ }
+
+ cfs_wi_data.wi_stopping = 0;
+ cfs_wi_data.wi_init = 0;
+}
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
new file mode 100644
index 0000000..f493e07
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_LUSTRE_FS) += lustre.o
+obj-$(CONFIG_LUSTRE_LLITE_LLOOP) += llite_lloop.o
+lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
+ rw.o lproc_llite.o namei.o symlink.o llite_mmap.o \
+ xattr.o remote_perm.o llite_rmtacl.o llite_capa.o \
+ rw26.o super25.o statahead.o \
+ ../lclient/glimpse.o ../lclient/lcommon_cl.o ../lclient/lcommon_misc.o \
+ vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o
+
+llite_lloop-y := lloop.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
new file mode 100644
index 0000000..e7629be
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -0,0 +1,659 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/quotaops.h>
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <obd_support.h>
+#include <lustre_lite.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_dlm.h>
+
+#include "llite_internal.h"
+
+static void free_dentry_data(struct rcu_head *head)
+{
+ struct ll_dentry_data *lld;
+
+ lld = container_of(head, struct ll_dentry_data, lld_rcu_head);
+ OBD_FREE_PTR(lld);
+}
+
+/* should NOT be called with the dcache lock, see fs/dcache.c */
+static void ll_release(struct dentry *de)
+{
+ struct ll_dentry_data *lld;
+
+ LASSERT(de != NULL);
+ lld = ll_d2d(de);
+ if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */
+ return;
+
+ if (lld->lld_it) {
+ ll_intent_release(lld->lld_it);
+ OBD_FREE(lld->lld_it, sizeof(*lld->lld_it));
+ }
+ LASSERT(lld->lld_cwd_count == 0);
+ LASSERT(lld->lld_mnt_count == 0);
+ de->d_fsdata = NULL;
+ call_rcu(&lld->lld_rcu_head, free_dentry_data);
+}
+
+/* Compare if two dentries are the same. Don't match if the existing dentry
+ * is marked invalid. Returns 1 if different, 0 if the same.
+ *
+ * This avoids a race where ll_lookup_it() instantiates a dentry, but we get
+ * an AST before calling d_revalidate_it(). The dentry still exists (marked
+ * INVALID) so d_lookup() matches it, but we have no lock on it (so
+ * lock_match() fails) and we spin around real_lookup(). */
+int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
+ unsigned int len, const char *str, const struct qstr *name)
+{
+ if (len != name->len)
+ return 1;
+
+ if (memcmp(str, name->name, len))
+ return 1;
+
+ CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n",
+ name->len, name->name, dentry, dentry->d_flags,
+ d_count(dentry));
+
+ /* mountpoint is always valid */
+ if (d_mountpoint((struct dentry *)dentry))
+ return 0;
+
+ if (d_lustre_invalid(dentry))
+ return 1;
+
+ return 0;
+}
+
+static inline int return_if_equal(struct ldlm_lock *lock, void *data)
+{
+ if ((lock->l_flags &
+ (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
+ (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
+ return LDLM_ITER_CONTINUE;
+ return LDLM_ITER_STOP;
+}
+
+/* find any ldlm lock of the inode in mdc and lov
+ * return 0 not find
+ * 1 find one
+ * < 0 error */
+static int find_cbdata(struct inode *inode)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct lov_stripe_md *lsm;
+ int rc = 0;
+
+ LASSERT(inode);
+ rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
+ return_if_equal, NULL);
+ if (rc != 0)
+ return rc;
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm == NULL)
+ return rc;
+
+ rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
+ ccc_inode_lsm_put(inode, lsm);
+
+ return rc;
+}
+
+/**
+ * Called when last reference to a dentry is dropped and dcache wants to know
+ * whether or not it should cache it:
+ * - return 1 to delete the dentry immediately
+ * - return 0 to cache the dentry
+ * Should NOT be called with the dcache lock, see fs/dcache.c
+ */
+static int ll_ddelete(const struct dentry *de)
+{
+ LASSERT(de);
+
+ CDEBUG(D_DENTRY, "%s dentry %.*s (%p, parent %p, inode %p) %s%s\n",
+ d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping",
+ de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
+ d_unhashed((struct dentry *)de) ? "" : "hashed,",
+ list_empty(&de->d_subdirs) ? "" : "subdirs");
+
+ /* kernel >= 2.6.38 last refcount is decreased after this function. */
+ LASSERT(d_count(de) == 1);
+
+ /* Disable this piece of code temproarily because this is called
+ * inside dcache_lock so it's not appropriate to do lots of work
+ * here. ATTENTION: Before this piece of code enabling, LU-2487 must be
+ * resolved. */
+#if 0
+ /* if not ldlm lock for this inode, set i_nlink to 0 so that
+ * this inode can be recycled later b=20433 */
+ if (de->d_inode && !find_cbdata(de->d_inode))
+ clear_nlink(de->d_inode);
+#endif
+
+ if (d_lustre_invalid((struct dentry *)de))
+ return 1;
+ return 0;
+}
+
+static int ll_set_dd(struct dentry *de)
+{
+ LASSERT(de != NULL);
+
+ CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
+ de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
+ d_count(de));
+
+ if (de->d_fsdata == NULL) {
+ struct ll_dentry_data *lld;
+
+ OBD_ALLOC_PTR(lld);
+ if (likely(lld != NULL)) {
+ spin_lock(&de->d_lock);
+ if (likely(de->d_fsdata == NULL))
+ de->d_fsdata = lld;
+ else
+ OBD_FREE_PTR(lld);
+ spin_unlock(&de->d_lock);
+ } else {
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int ll_dops_init(struct dentry *de, int block, int init_sa)
+{
+ struct ll_dentry_data *lld = ll_d2d(de);
+ int rc = 0;
+
+ if (lld == NULL && block != 0) {
+ rc = ll_set_dd(de);
+ if (rc)
+ return rc;
+
+ lld = ll_d2d(de);
+ }
+
+ if (lld != NULL && init_sa != 0)
+ lld->lld_sa_generation = 0;
+
+ /* kernel >= 2.6.38 d_op is set in d_alloc() */
+ LASSERT(de->d_op == &ll_d_ops);
+ return rc;
+}
+
+void ll_intent_drop_lock(struct lookup_intent *it)
+{
+ if (it->it_op && it->d.lustre.it_lock_mode) {
+ struct lustre_handle handle;
+
+ handle.cookie = it->d.lustre.it_lock_handle;
+
+ CDEBUG(D_DLMTRACE, "releasing lock with cookie "LPX64
+ " from it %p\n", handle.cookie, it);
+ ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode);
+
+ /* bug 494: intent_release may be called multiple times, from
+ * this thread and we don't want to double-decref this lock */
+ it->d.lustre.it_lock_mode = 0;
+ if (it->d.lustre.it_remote_lock_mode != 0) {
+ handle.cookie = it->d.lustre.it_remote_lock_handle;
+
+ CDEBUG(D_DLMTRACE, "releasing remote lock with cookie"
+ LPX64" from it %p\n", handle.cookie, it);
+ ldlm_lock_decref(&handle,
+ it->d.lustre.it_remote_lock_mode);
+ it->d.lustre.it_remote_lock_mode = 0;
+ }
+ }
+}
+
+void ll_intent_release(struct lookup_intent *it)
+{
+ CDEBUG(D_INFO, "intent %p released\n", it);
+ ll_intent_drop_lock(it);
+ /* We are still holding extra reference on a request, need to free it */
+ if (it_disposition(it, DISP_ENQ_OPEN_REF))
+ ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
+ if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
+ ptlrpc_req_finished(it->d.lustre.it_data);
+ if (it_disposition(it, DISP_ENQ_COMPLETE)) /* saved req from revalidate
+ * to lookup */
+ ptlrpc_req_finished(it->d.lustre.it_data);
+
+ it->d.lustre.it_disposition = 0;
+ it->d.lustre.it_data = NULL;
+}
+
+void ll_invalidate_aliases(struct inode *inode)
+{
+ struct dentry *dentry;
+ struct ll_d_hlist_node *p;
+
+ LASSERT(inode != NULL);
+
+ CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
+ inode->i_ino, inode->i_generation, inode);
+
+ ll_lock_dcache(inode);
+ ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
+ CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p "
+ "inode %p flags %d\n", dentry->d_name.len,
+ dentry->d_name.name, dentry, dentry->d_parent,
+ dentry->d_inode, dentry->d_flags);
+
+ if (unlikely(dentry == dentry->d_sb->s_root)) {
+ CERROR("%s: called on root dentry=%p, fid="DFID"\n",
+ ll_get_fsname(dentry->d_sb, NULL, 0),
+ dentry, PFID(ll_inode2fid(inode)));
+ lustre_dump_dentry(dentry, 1);
+ dump_stack();
+ }
+
+ d_lustre_invalidate(dentry, 0);
+ }
+ ll_unlock_dcache(inode);
+}
+
+int ll_revalidate_it_finish(struct ptlrpc_request *request,
+ struct lookup_intent *it,
+ struct dentry *de)
+{
+ int rc = 0;
+
+ if (!request)
+ return 0;
+
+ if (it_disposition(it, DISP_LOOKUP_NEG))
+ return -ENOENT;
+
+ rc = ll_prep_inode(&de->d_inode, request, NULL, it);
+
+ return rc;
+}
+
+void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
+{
+ LASSERT(it != NULL);
+ LASSERT(dentry != NULL);
+
+ if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) {
+ struct inode *inode = dentry->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
+
+ CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
+ inode, inode->i_ino, inode->i_generation);
+ ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
+ }
+
+ /* drop lookup or getattr locks immediately */
+ if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) {
+ /* on 2.6 there are situation when several lookups and
+ * revalidations may be requested during single operation.
+ * therefore, we don't release intent here -bzzz */
+ ll_intent_drop_lock(it);
+ }
+}
+
+void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft)
+{
+ struct lookup_intent *it = *itp;
+
+ if (!it || it->it_op == IT_GETXATTR)
+ it = *itp = deft;
+
+}
+
+int ll_revalidate_it(struct dentry *de, int lookup_flags,
+ struct lookup_intent *it)
+{
+ struct md_op_data *op_data;
+ struct ptlrpc_request *req = NULL;
+ struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
+ struct obd_export *exp;
+ struct inode *parent = de->d_parent->d_inode;
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%s,intent=%s\n", de->d_name.name,
+ LL_IT2STR(it));
+
+ if (de->d_inode == NULL) {
+ __u64 ibits;
+
+ /* We can only use negative dentries if this is stat or lookup,
+ for opens and stuff we do need to query server. */
+ /* If there is IT_CREAT in intent op set, then we must throw
+ away this negative dentry and actually do the request to
+ kernel to create whatever needs to be created (if possible)*/
+ if (it && (it->it_op & IT_CREAT))
+ return 0;
+
+ if (d_lustre_invalid(de))
+ return 0;
+
+ ibits = MDS_INODELOCK_UPDATE;
+ rc = ll_have_md_lock(parent, &ibits, LCK_MINMODE);
+ GOTO(out_sa, rc);
+ }
+
+ /* Never execute intents for mount points.
+ * Attributes will be fixed up in ll_inode_revalidate_it */
+ if (d_mountpoint(de))
+ GOTO(out_sa, rc = 1);
+
+ /* need to get attributes in case root got changed from other client */
+ if (de == de->d_sb->s_root) {
+ rc = __ll_inode_revalidate_it(de, it, MDS_INODELOCK_LOOKUP);
+ if (rc == 0)
+ rc = 1;
+ GOTO(out_sa, rc);
+ }
+
+ exp = ll_i2mdexp(de->d_inode);
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_REVALIDATE_PAUSE, 5);
+ ll_frob_intent(&it, &lookup_it);
+ LASSERT(it);
+
+ if (it->it_op == IT_LOOKUP && !d_lustre_invalid(de))
+ return 1;
+
+ if (it->it_op == IT_OPEN) {
+ struct inode *inode = de->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_client_handle **och_p;
+ __u64 *och_usecount;
+ __u64 ibits;
+
+ /*
+ * We used to check for MDS_INODELOCK_OPEN here, but in fact
+ * just having LOOKUP lock is enough to justify inode is the
+ * same. And if inode is the same and we have suitable
+ * openhandle, then there is no point in doing another OPEN RPC
+ * just to throw away newly received openhandle. There are no
+ * security implications too, if file owner or access mode is
+ * change, LOOKUP lock is revoked.
+ */
+
+
+ if (it->it_flags & FMODE_WRITE) {
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else if (it->it_flags & FMODE_EXEC) {
+ och_p = &lli->lli_mds_exec_och;
+ och_usecount = &lli->lli_open_fd_exec_count;
+ } else {
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
+ /* Check for the proper lock. */
+ ibits = MDS_INODELOCK_LOOKUP;
+ if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
+ goto do_lock;
+ mutex_lock(&lli->lli_och_mutex);
+ if (*och_p) { /* Everything is open already, do nothing */
+ /*(*och_usecount)++; Do not let them steal our open
+ handle from under us */
+ SET_BUT_UNUSED(och_usecount);
+ /* XXX The code above was my original idea, but in case
+ we have the handle, but we cannot use it due to later
+ checks (e.g. O_CREAT|O_EXCL flags set), nobody
+ would decrement counter increased here. So we just
+ hope the lock won't be invalidated in between. But
+ if it would be, we'll reopen the open request to
+ MDS later during file open path */
+ mutex_unlock(&lli->lli_och_mutex);
+ return 1;
+ } else {
+ mutex_unlock(&lli->lli_och_mutex);
+ }
+ }
+
+ if (it->it_op == IT_GETATTR) {
+ rc = ll_statahead_enter(parent, &de, 0);
+ if (rc == 1)
+ goto mark;
+ else if (rc != -EAGAIN && rc != 0)
+ GOTO(out, rc = 0);
+ }
+
+do_lock:
+ op_data = ll_prep_md_op_data(NULL, parent, de->d_inode,
+ de->d_name.name, de->d_name.len,
+ 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ if (!IS_POSIXACL(parent) || !exp_connect_umask(exp))
+ it->it_create_mode &= ~current_umask();
+ it->it_create_mode |= M_CHECK_STALE;
+ rc = md_intent_lock(exp, op_data, NULL, 0, it,
+ lookup_flags,
+ &req, ll_md_blocking_ast, 0);
+ it->it_create_mode &= ~M_CHECK_STALE;
+ ll_finish_md_op_data(op_data);
+
+ /* If req is NULL, then md_intent_lock only tried to do a lock match;
+ * if all was well, it will return 1 if it found locks, 0 otherwise. */
+ if (req == NULL && rc >= 0) {
+ if (!rc)
+ goto do_lookup;
+ GOTO(out, rc);
+ }
+
+ if (rc < 0) {
+ if (rc != -ESTALE) {
+ CDEBUG(D_INFO, "ll_intent_lock: rc %d : it->it_status "
+ "%d\n", rc, it->d.lustre.it_status);
+ }
+ GOTO(out, rc = 0);
+ }
+
+revalidate_finish:
+ rc = ll_revalidate_it_finish(req, it, de);
+ if (rc != 0) {
+ if (rc != -ESTALE && rc != -ENOENT)
+ ll_intent_release(it);
+ GOTO(out, rc = 0);
+ }
+
+ if ((it->it_op & IT_OPEN) && de->d_inode &&
+ !S_ISREG(de->d_inode->i_mode) &&
+ !S_ISDIR(de->d_inode->i_mode)) {
+ ll_release_openhandle(de, it);
+ }
+ rc = 1;
+
+out:
+ /* We do not free request as it may be reused during following lookup
+ * (see comment in mdc/mdc_locks.c::mdc_intent_lock()), request will
+ * be freed in ll_lookup_it or in ll_intent_release. But if
+ * request was not completed, we need to free it. (bug 5154, 9903) */
+ if (req != NULL && !it_disposition(it, DISP_ENQ_COMPLETE))
+ ptlrpc_req_finished(req);
+ if (rc == 0) {
+ /* mdt may grant layout lock for the newly created file, so
+ * release the lock to avoid leaking */
+ ll_intent_drop_lock(it);
+ ll_invalidate_aliases(de->d_inode);
+ } else {
+ __u64 bits = 0;
+ __u64 matched_bits = 0;
+
+ CDEBUG(D_DENTRY, "revalidated dentry %.*s (%p) parent %p "
+ "inode %p refc %d\n", de->d_name.len,
+ de->d_name.name, de, de->d_parent, de->d_inode,
+ d_count(de));
+
+ ll_set_lock_data(exp, de->d_inode, it, &bits);
+
+ /* Note: We have to match both LOOKUP and PERM lock
+ * here to make sure the dentry is valid and no one
+ * changing the permission.
+ * But if the client connects < 2.4 server, which will
+ * only grant LOOKUP lock, so we can only Match LOOKUP
+ * lock for old server */
+ if (exp_connect_flags(ll_i2mdexp(de->d_inode)) &&
+ OBD_CONNECT_LVB_TYPE)
+ matched_bits =
+ MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM;
+ else
+ matched_bits = MDS_INODELOCK_LOOKUP;
+
+ if (((bits & matched_bits) == matched_bits) &&
+ d_lustre_invalid(de))
+ d_lustre_revalidate(de);
+ ll_lookup_finish_locks(it, de);
+ }
+
+mark:
+ if (it != NULL && it->it_op == IT_GETATTR && rc > 0)
+ ll_statahead_mark(parent, de);
+ return rc;
+
+ /*
+ * This part is here to combat evil-evil race in real_lookup on 2.6
+ * kernels. The race details are: We enter do_lookup() looking for some
+ * name, there is nothing in dcache for this name yet and d_lookup()
+ * returns NULL. We proceed to real_lookup(), and while we do this,
+ * another process does open on the same file we looking up (most simple
+ * reproducer), open succeeds and the dentry is added. Now back to
+ * us. In real_lookup() we do d_lookup() again and suddenly find the
+ * dentry, so we call d_revalidate on it, but there is no lock, so
+ * without this code we would return 0, but unpatched real_lookup just
+ * returns -ENOENT in such a case instead of retrying the lookup. Once
+ * this is dealt with in real_lookup(), all of this ugly mess can go and
+ * we can just check locks in ->d_revalidate without doing any RPCs
+ * ever.
+ */
+do_lookup:
+ if (it != &lookup_it) {
+ /* MDS_INODELOCK_UPDATE needed for IT_GETATTR case. */
+ if (it->it_op == IT_GETATTR)
+ lookup_it.it_op = IT_GETATTR;
+ ll_lookup_finish_locks(it, de);
+ it = &lookup_it;
+ }
+
+ /* Do real lookup here. */
+ op_data = ll_prep_md_op_data(NULL, parent, NULL, de->d_name.name,
+ de->d_name.len, 0, (it->it_op & IT_CREAT ?
+ LUSTRE_OPC_CREATE :
+ LUSTRE_OPC_ANY), NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ rc = md_intent_lock(exp, op_data, NULL, 0, it, 0, &req,
+ ll_md_blocking_ast, 0);
+ if (rc >= 0) {
+ struct mdt_body *mdt_body;
+ struct lu_fid fid = {.f_seq = 0, .f_oid = 0, .f_ver = 0};
+ mdt_body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+
+ if (de->d_inode)
+ fid = *ll_inode2fid(de->d_inode);
+
+ /* see if we got same inode, if not - return error */
+ if (lu_fid_eq(&fid, &mdt_body->fid1)) {
+ ll_finish_md_op_data(op_data);
+ op_data = NULL;
+ goto revalidate_finish;
+ }
+ ll_intent_release(it);
+ }
+ ll_finish_md_op_data(op_data);
+ GOTO(out, rc = 0);
+
+out_sa:
+ /*
+ * For rc == 1 case, should not return directly to prevent losing
+ * statahead windows; for rc == 0 case, the "lookup" will be done later.
+ */
+ if (it != NULL && it->it_op == IT_GETATTR && rc == 1)
+ ll_statahead_enter(parent, &de, 1);
+ goto mark;
+}
+
+/*
+ * Always trust cached dentries. Update statahead window if necessary.
+ */
+int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
+{
+ struct inode *parent = dentry->d_parent->d_inode;
+ int unplug = 0;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%s,flags=%u\n",
+ dentry->d_name.name, flags);
+
+ if (!(flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE)) &&
+ ll_need_statahead(parent, dentry) > 0) {
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ if (dentry->d_inode == NULL)
+ unplug = 1;
+ do_statahead_enter(parent, &dentry, unplug);
+ ll_statahead_mark(parent, dentry);
+ }
+
+ return 1;
+}
+
+
+void ll_d_iput(struct dentry *de, struct inode *inode)
+{
+ LASSERT(inode);
+ if (!find_cbdata(inode))
+ clear_nlink(inode);
+ iput(inode);
+}
+
+struct dentry_operations ll_d_ops = {
+ .d_revalidate = ll_revalidate_nd,
+ .d_release = ll_release,
+ .d_delete = ll_ddelete,
+ .d_iput = ll_d_iput,
+ .d_compare = ll_dcompare,
+};
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
new file mode 100644
index 0000000..09844be
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -0,0 +1,1955 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/dir.c
+ *
+ * Directory code for lustre client.
+ */
+
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <linux/buffer_head.h> // for wait_on_buffer
+#include <linux/pagevec.h>
+#include <linux/prefetch.h>
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_lib.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_lite.h>
+#include <lustre_dlm.h>
+#include <lustre_fid.h>
+#include "llite_internal.h"
+
+/*
+ * (new) readdir implementation overview.
+ *
+ * Original lustre readdir implementation cached exact copy of raw directory
+ * pages on the client. These pages were indexed in client page cache by
+ * logical offset in the directory file. This design, while very simple and
+ * intuitive had some inherent problems:
+ *
+ * . it implies that byte offset to the directory entry serves as a
+ * telldir(3)/seekdir(3) cookie, but that offset is not stable: in
+ * ext3/htree directory entries may move due to splits, and more
+ * importantly,
+ *
+ * . it is incompatible with the design of split directories for cmd3,
+ * that assumes that names are distributed across nodes based on their
+ * hash, and so readdir should be done in hash order.
+ *
+ * New readdir implementation does readdir in hash order, and uses hash of a
+ * file name as a telldir/seekdir cookie. This led to number of complications:
+ *
+ * . hash is not unique, so it cannot be used to index cached directory
+ * pages on the client (note, that it requires a whole pageful of hash
+ * collided entries to cause two pages to have identical hashes);
+ *
+ * . hash is not unique, so it cannot, strictly speaking, be used as an
+ * entry cookie. ext3/htree has the same problem and lustre implementation
+ * mimics their solution: seekdir(hash) positions directory at the first
+ * entry with the given hash.
+ *
+ * Client side.
+ *
+ * 0. caching
+ *
+ * Client caches directory pages using hash of the first entry as an index. As
+ * noted above hash is not unique, so this solution doesn't work as is:
+ * special processing is needed for "page hash chains" (i.e., sequences of
+ * pages filled with entries all having the same hash value).
+ *
+ * First, such chains have to be detected. To this end, server returns to the
+ * client the hash of the first entry on the page next to one returned. When
+ * client detects that this hash is the same as hash of the first entry on the
+ * returned page, page hash collision has to be handled. Pages in the
+ * hash chain, except first one, are termed "overflow pages".
+ *
+ * Solution to index uniqueness problem is to not cache overflow
+ * pages. Instead, when page hash collision is detected, all overflow pages
+ * from emerging chain are immediately requested from the server and placed in
+ * a special data structure (struct ll_dir_chain). This data structure is used
+ * by ll_readdir() to process entries from overflow pages. When readdir
+ * invocation finishes, overflow pages are discarded. If page hash collision
+ * chain weren't completely processed, next call to readdir will again detect
+ * page hash collision, again read overflow pages in, process next portion of
+ * entries and again discard the pages. This is not as wasteful as it looks,
+ * because, given reasonable hash, page hash collisions are extremely rare.
+ *
+ * 1. directory positioning
+ *
+ * When seekdir(hash) is called, original
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * Server.
+ *
+ * identification of and access to overflow pages
+ *
+ * page format
+ *
+ * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
+ * a header lu_dirpage which describes the start/end hash, and whether this
+ * page is empty (contains no dir entry) or hash collide with next page.
+ * After client receives reply, several pages will be integrated into dir page
+ * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
+ * lu_dirpage for this integrated page will be adjusted. See
+ * lmv_adjust_dirpages().
+ *
+ */
+
+/* returns the page unlocked, but with a reference */
+static int ll_dir_filler(void *_hash, struct page *page0)
+{
+ struct inode *inode = page0->mapping->host;
+ int hash64 = ll_i2sbi(inode)->ll_flags & LL_SBI_64BIT_HASH;
+ struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
+ struct ptlrpc_request *request;
+ struct mdt_body *body;
+ struct md_op_data *op_data;
+ __u64 hash = *((__u64 *)_hash);
+ struct page **page_pool;
+ struct page *page;
+ struct lu_dirpage *dp;
+ int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
+ int nrdpgs = 0; /* number of pages read actually */
+ int npages;
+ int i;
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash "LPU64"\n",
+ inode->i_ino, inode->i_generation, inode, hash);
+
+ LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
+
+ OBD_ALLOC(page_pool, sizeof(page) * max_pages);
+ if (page_pool != NULL) {
+ page_pool[0] = page0;
+ } else {
+ page_pool = &page0;
+ max_pages = 1;
+ }
+ for (npages = 1; npages < max_pages; npages++) {
+ page = page_cache_alloc_cold(inode->i_mapping);
+ if (!page)
+ break;
+ page_pool[npages] = page;
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ op_data->op_npages = npages;
+ op_data->op_offset = hash;
+ rc = md_readpage(exp, op_data, page_pool, &request);
+ ll_finish_md_op_data(op_data);
+ if (rc == 0) {
+ body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
+ /* Checked by mdc_readpage() */
+ LASSERT(body != NULL);
+
+ if (body->valid & OBD_MD_FLSIZE)
+ cl_isize_write(inode, body->size);
+
+ nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
+ >> PAGE_CACHE_SHIFT;
+ SetPageUptodate(page0);
+ }
+ unlock_page(page0);
+ ptlrpc_req_finished(request);
+
+ CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
+
+ ll_pagevec_init(&lru_pvec, 0);
+ for (i = 1; i < npages; i++) {
+ unsigned long offset;
+ int ret;
+
+ page = page_pool[i];
+
+ if (rc < 0 || i >= nrdpgs) {
+ page_cache_release(page);
+ continue;
+ }
+
+ SetPageUptodate(page);
+
+ dp = kmap(page);
+ hash = le64_to_cpu(dp->ldp_hash_start);
+ kunmap(page);
+
+ offset = hash_x_index(hash, hash64);
+
+ prefetchw(&page->flags);
+ ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
+ GFP_KERNEL);
+ if (ret == 0) {
+ unlock_page(page);
+ if (ll_pagevec_add(&lru_pvec, page) == 0)
+ ll_pagevec_lru_add_file(&lru_pvec);
+ } else {
+ CDEBUG(D_VFSTRACE, "page %lu add to page cache failed:"
+ " %d\n", offset, ret);
+ }
+ page_cache_release(page);
+ }
+ ll_pagevec_lru_add_file(&lru_pvec);
+
+ if (page_pool != &page0)
+ OBD_FREE(page_pool, sizeof(struct page *) * max_pages);
+ return rc;
+}
+
+static void ll_check_page(struct inode *dir, struct page *page)
+{
+ /* XXX: check page format later */
+ SetPageChecked(page);
+}
+
+void ll_release_page(struct page *page, int remove)
+{
+ kunmap(page);
+ if (remove) {
+ lock_page(page);
+ if (likely(page->mapping != NULL))
+ truncate_complete_page(page->mapping, page);
+ unlock_page(page);
+ }
+ page_cache_release(page);
+}
+
+/*
+ * Find, kmap and return page that contains given hash.
+ */
+static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
+ __u64 *start, __u64 *end)
+{
+ int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
+ struct address_space *mapping = dir->i_mapping;
+ /*
+ * Complement of hash is used as an index so that
+ * radix_tree_gang_lookup() can be used to find a page with starting
+ * hash _smaller_ than one we are looking for.
+ */
+ unsigned long offset = hash_x_index(*hash, hash64);
+ struct page *page;
+ int found;
+
+ TREE_READ_LOCK_IRQ(mapping);
+ found = radix_tree_gang_lookup(&mapping->page_tree,
+ (void **)&page, offset, 1);
+ if (found > 0) {
+ struct lu_dirpage *dp;
+
+ page_cache_get(page);
+ TREE_READ_UNLOCK_IRQ(mapping);
+ /*
+ * In contrast to find_lock_page() we are sure that directory
+ * page cannot be truncated (while DLM lock is held) and,
+ * hence, can avoid restart.
+ *
+ * In fact, page cannot be locked here at all, because
+ * ll_dir_filler() does synchronous io.
+ */
+ wait_on_page_locked(page);
+ if (PageUptodate(page)) {
+ dp = kmap(page);
+ if (BITS_PER_LONG == 32 && hash64) {
+ *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
+ *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
+ *hash = *hash >> 32;
+ } else {
+ *start = le64_to_cpu(dp->ldp_hash_start);
+ *end = le64_to_cpu(dp->ldp_hash_end);
+ }
+ LASSERTF(*start <= *hash, "start = "LPX64",end = "
+ LPX64",hash = "LPX64"\n", *start, *end, *hash);
+ CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash "LPU64"\n",
+ offset, *start, *end, *hash);
+ if (*hash > *end) {
+ ll_release_page(page, 0);
+ page = NULL;
+ } else if (*end != *start && *hash == *end) {
+ /*
+ * upon hash collision, remove this page,
+ * otherwise put page reference, and
+ * ll_get_dir_page() will issue RPC to fetch
+ * the page we want.
+ */
+ ll_release_page(page,
+ le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
+ page = NULL;
+ }
+ } else {
+ page_cache_release(page);
+ page = ERR_PTR(-EIO);
+ }
+
+ } else {
+ TREE_READ_UNLOCK_IRQ(mapping);
+ page = NULL;
+ }
+ return page;
+}
+
+struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
+ struct ll_dir_chain *chain)
+{
+ ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
+ struct address_space *mapping = dir->i_mapping;
+ struct lustre_handle lockh;
+ struct lu_dirpage *dp;
+ struct page *page;
+ ldlm_mode_t mode;
+ int rc;
+ __u64 start = 0;
+ __u64 end = 0;
+ __u64 lhash = hash;
+ struct ll_inode_info *lli = ll_i2info(dir);
+ int hash64 = ll_i2sbi(dir)->ll_flags & LL_SBI_64BIT_HASH;
+
+ mode = LCK_PR;
+ rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
+ ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
+ if (!rc) {
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = mode,
+ .ei_cb_bl = ll_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
+ struct lookup_intent it = { .it_op = IT_READDIR };
+ struct ptlrpc_request *request;
+ struct md_op_data *op_data;
+
+ op_data = ll_prep_md_op_data(NULL, dir, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return (void *)op_data;
+
+ rc = md_enqueue(ll_i2sbi(dir)->ll_md_exp, &einfo, &it,
+ op_data, &lockh, NULL, 0, NULL, 0);
+
+ ll_finish_md_op_data(op_data);
+
+ request = (struct ptlrpc_request *)it.d.lustre.it_data;
+ if (request)
+ ptlrpc_req_finished(request);
+ if (rc < 0) {
+ CERROR("lock enqueue: "DFID" at "LPU64": rc %d\n",
+ PFID(ll_inode2fid(dir)), hash, rc);
+ return ERR_PTR(rc);
+ }
+
+ CDEBUG(D_INODE, "setting lr_lvb_inode to inode %p (%lu/%u)\n",
+ dir, dir->i_ino, dir->i_generation);
+ md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
+ &it.d.lustre.it_lock_handle, dir, NULL);
+ } else {
+ /* for cross-ref object, l_ast_data of the lock may not be set,
+ * we reset it here */
+ md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
+ dir, NULL);
+ }
+ ldlm_lock_dump_handle(D_OTHER, &lockh);
+
+ mutex_lock(&lli->lli_readdir_mutex);
+ page = ll_dir_page_locate(dir, &lhash, &start, &end);
+ if (IS_ERR(page)) {
+ CERROR("dir page locate: "DFID" at "LPU64": rc %ld\n",
+ PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
+ GOTO(out_unlock, page);
+ } else if (page != NULL) {
+ /*
+ * XXX nikita: not entirely correct handling of a corner case:
+ * suppose hash chain of entries with hash value HASH crosses
+ * border between pages P0 and P1. First both P0 and P1 are
+ * cached, seekdir() is called for some entry from the P0 part
+ * of the chain. Later P0 goes out of cache. telldir(HASH)
+ * happens and finds P1, as it starts with matching hash
+ * value. Remaining entries from P0 part of the chain are
+ * skipped. (Is that really a bug?)
+ *
+ * Possible solutions: 0. don't cache P1 is such case, handle
+ * it as an "overflow" page. 1. invalidate all pages at
+ * once. 2. use HASH|1 as an index for P1.
+ */
+ GOTO(hash_collision, page);
+ }
+
+ page = read_cache_page(mapping, hash_x_index(hash, hash64),
+ ll_dir_filler, &lhash);
+ if (IS_ERR(page)) {
+ CERROR("read cache page: "DFID" at "LPU64": rc %ld\n",
+ PFID(ll_inode2fid(dir)), hash, PTR_ERR(page));
+ GOTO(out_unlock, page);
+ }
+
+ wait_on_page_locked(page);
+ (void)kmap(page);
+ if (!PageUptodate(page)) {
+ CERROR("page not updated: "DFID" at "LPU64": rc %d\n",
+ PFID(ll_inode2fid(dir)), hash, -5);
+ goto fail;
+ }
+ if (!PageChecked(page))
+ ll_check_page(dir, page);
+ if (PageError(page)) {
+ CERROR("page error: "DFID" at "LPU64": rc %d\n",
+ PFID(ll_inode2fid(dir)), hash, -5);
+ goto fail;
+ }
+hash_collision:
+ dp = page_address(page);
+ if (BITS_PER_LONG == 32 && hash64) {
+ start = le64_to_cpu(dp->ldp_hash_start) >> 32;
+ end = le64_to_cpu(dp->ldp_hash_end) >> 32;
+ lhash = hash >> 32;
+ } else {
+ start = le64_to_cpu(dp->ldp_hash_start);
+ end = le64_to_cpu(dp->ldp_hash_end);
+ lhash = hash;
+ }
+ if (end == start) {
+ LASSERT(start == lhash);
+ CWARN("Page-wide hash collision: "LPU64"\n", end);
+ if (BITS_PER_LONG == 32 && hash64)
+ CWARN("Real page-wide hash collision at ["LPU64" "LPU64
+ "] with hash "LPU64"\n",
+ le64_to_cpu(dp->ldp_hash_start),
+ le64_to_cpu(dp->ldp_hash_end), hash);
+ /*
+ * Fetch whole overflow chain...
+ *
+ * XXX not yet.
+ */
+ goto fail;
+ }
+out_unlock:
+ mutex_unlock(&lli->lli_readdir_mutex);
+ ldlm_lock_decref(&lockh, mode);
+ return page;
+
+fail:
+ ll_release_page(page, 1);
+ page = ERR_PTR(-EIO);
+ goto out_unlock;
+}
+
+int ll_dir_read(struct inode *inode, struct dir_context *ctx)
+{
+ struct ll_inode_info *info = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ __u64 pos = ctx->pos;
+ int api32 = ll_need_32bit_api(sbi);
+ int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
+ struct page *page;
+ struct ll_dir_chain chain;
+ int done = 0;
+ int rc = 0;
+
+ ll_dir_chain_init(&chain);
+
+ page = ll_get_dir_page(inode, pos, &chain);
+
+ while (rc == 0 && !done) {
+ struct lu_dirpage *dp;
+ struct lu_dirent *ent;
+
+ if (!IS_ERR(page)) {
+ /*
+ * If page is empty (end of directory is reached),
+ * use this value.
+ */
+ __u64 hash = MDS_DIR_END_OFF;
+ __u64 next;
+
+ dp = page_address(page);
+ for (ent = lu_dirent_start(dp); ent != NULL && !done;
+ ent = lu_dirent_next(ent)) {
+ __u16 type;
+ int namelen;
+ struct lu_fid fid;
+ __u64 lhash;
+ __u64 ino;
+
+ /*
+ * XXX: implement correct swabbing here.
+ */
+
+ hash = le64_to_cpu(ent->lde_hash);
+ if (hash < pos)
+ /*
+ * Skip until we find target hash
+ * value.
+ */
+ continue;
+
+ namelen = le16_to_cpu(ent->lde_namelen);
+ if (namelen == 0)
+ /*
+ * Skip dummy record.
+ */
+ continue;
+
+ if (api32 && hash64)
+ lhash = hash >> 32;
+ else
+ lhash = hash;
+ fid_le_to_cpu(&fid, &ent->lde_fid);
+ ino = cl_fid_build_ino(&fid, api32);
+ type = ll_dirent_type_get(ent);
+ ctx->pos = lhash;
+ /* For 'll_nfs_get_name_filldir()', it will try
+ * to access the 'ent' through its 'lde_name',
+ * so the parameter 'name' for 'ctx->actor()'
+ * must be part of the 'ent'.
+ */
+ done = !dir_emit(ctx, ent->lde_name,
+ namelen, ino, type);
+ }
+ next = le64_to_cpu(dp->ldp_hash_end);
+ if (!done) {
+ pos = next;
+ if (pos == MDS_DIR_END_OFF) {
+ /*
+ * End of directory reached.
+ */
+ done = 1;
+ ll_release_page(page, 0);
+ } else if (1 /* chain is exhausted*/) {
+ /*
+ * Normal case: continue to the next
+ * page.
+ */
+ ll_release_page(page,
+ le32_to_cpu(dp->ldp_flags) &
+ LDF_COLLIDE);
+ next = pos;
+ page = ll_get_dir_page(inode, pos,
+ &chain);
+ } else {
+ /*
+ * go into overflow page.
+ */
+ LASSERT(le32_to_cpu(dp->ldp_flags) &
+ LDF_COLLIDE);
+ ll_release_page(page, 1);
+ }
+ } else {
+ pos = hash;
+ ll_release_page(page, 0);
+ }
+ } else {
+ rc = PTR_ERR(page);
+ CERROR("error reading dir "DFID" at %lu: rc %d\n",
+ PFID(&info->lli_fid), (unsigned long)pos, rc);
+ }
+ }
+
+ ctx->pos = pos;
+ ll_dir_chain_fini(&chain);
+ return rc;
+}
+
+static int ll_readdir(struct file *filp, struct dir_context *ctx)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
+ int api32 = ll_need_32bit_api(sbi);
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu "
+ " 32bit_api %d\n", inode->i_ino, inode->i_generation,
+ inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
+
+ if (lfd->lfd_pos == MDS_DIR_END_OFF)
+ /*
+ * end-of-file.
+ */
+ GOTO(out, rc = 0);
+
+ ctx->pos = lfd->lfd_pos;
+ rc = ll_dir_read(inode, ctx);
+ lfd->lfd_pos = ctx->pos;
+ if (ctx->pos == MDS_DIR_END_OFF) {
+ if (api32)
+ ctx->pos = LL_DIR_END_OFF_32BIT;
+ else
+ ctx->pos = LL_DIR_END_OFF;
+ } else {
+ if (api32 && hash64)
+ ctx->pos >>= 32;
+ }
+ filp->f_version = inode->i_version;
+
+out:
+ if (!rc)
+ ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
+
+ return rc;
+}
+
+int ll_send_mgc_param(struct obd_export *mgc, char *string)
+{
+ struct mgs_send_param *msp;
+ int rc = 0;
+
+ OBD_ALLOC_PTR(msp);
+ if (!msp)
+ return -ENOMEM;
+
+ strncpy(msp->mgs_param, string, MGS_PARAM_MAXLEN);
+ rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
+ sizeof(struct mgs_send_param), msp, NULL);
+ if (rc)
+ CERROR("Failed to set parameter: %d\n", rc);
+ OBD_FREE_PTR(msp);
+
+ return rc;
+}
+
+int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
+ char *filename)
+{
+ struct ptlrpc_request *request = NULL;
+ struct md_op_data *op_data;
+ struct ll_sb_info *sbi = ll_i2sbi(dir);
+ int mode;
+ int err;
+
+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
+ op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
+ strlen(filename), mode, LUSTRE_OPC_MKDIR,
+ lump);
+ if (IS_ERR(op_data))
+ GOTO(err_exit, err = PTR_ERR(op_data));
+
+ op_data->op_cli_flags |= CLI_SET_MEA;
+ err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode,
+ from_kuid(&init_user_ns, current_fsuid()),
+ from_kgid(&init_user_ns, current_fsgid()),
+ cfs_curproc_cap_pack(), 0, &request);
+ ll_finish_md_op_data(op_data);
+ if (err)
+ GOTO(err_exit, err);
+err_exit:
+ ptlrpc_req_finished(request);
+ return err;
+}
+
+int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
+ int set_default)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct md_op_data *op_data;
+ struct ptlrpc_request *req = NULL;
+ int rc = 0;
+ struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
+ struct obd_device *mgc = lsi->lsi_mgc;
+ int lum_size;
+
+ if (lump != NULL) {
+ /*
+ * This is coming from userspace, so should be in
+ * local endian. But the MDS would like it in little
+ * endian, so we swab it before we send it.
+ */
+ switch (lump->lmm_magic) {
+ case LOV_USER_MAGIC_V1: {
+ if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V1))
+ lustre_swab_lov_user_md_v1(lump);
+ lum_size = sizeof(struct lov_user_md_v1);
+ break;
+ }
+ case LOV_USER_MAGIC_V3: {
+ if (lump->lmm_magic != cpu_to_le32(LOV_USER_MAGIC_V3))
+ lustre_swab_lov_user_md_v3(
+ (struct lov_user_md_v3 *)lump);
+ lum_size = sizeof(struct lov_user_md_v3);
+ break;
+ }
+ default: {
+ CDEBUG(D_IOCTL, "bad userland LOV MAGIC:"
+ " %#08x != %#08x nor %#08x\n",
+ lump->lmm_magic, LOV_USER_MAGIC_V1,
+ LOV_USER_MAGIC_V3);
+ return -EINVAL;
+ }
+ }
+ } else {
+ lum_size = sizeof(struct lov_user_md_v1);
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
+ op_data->op_cli_flags |= CLI_SET_MEA;
+
+ /* swabbing is done in lov_setstripe() on server side */
+ rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
+ NULL, 0, &req, NULL);
+ ll_finish_md_op_data(op_data);
+ ptlrpc_req_finished(req);
+ if (rc) {
+ if (rc != -EPERM && rc != -EACCES)
+ CERROR("mdc_setattr fails: rc = %d\n", rc);
+ }
+
+ /* In the following we use the fact that LOV_USER_MAGIC_V1 and
+ LOV_USER_MAGIC_V3 have the same initial fields so we do not
+ need the make the distiction between the 2 versions */
+ if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
+ char *param = NULL;
+ char *buf;
+
+ OBD_ALLOC(param, MGS_PARAM_MAXLEN);
+ if (param == NULL)
+ GOTO(end, rc = -ENOMEM);
+
+ buf = param;
+ /* Get fsname and assume devname to be -MDT0000. */
+ ll_get_fsname(inode->i_sb, buf, MTI_NAME_MAXLEN);
+ strcat(buf, "-MDT0000.lov");
+ buf += strlen(buf);
+
+ /* Set root stripesize */
+ sprintf(buf, ".stripesize=%u",
+ lump ? le32_to_cpu(lump->lmm_stripe_size) : 0);
+ rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
+ if (rc)
+ GOTO(end, rc);
+
+ /* Set root stripecount */
+ sprintf(buf, ".stripecount=%hd",
+ lump ? le16_to_cpu(lump->lmm_stripe_count) : 0);
+ rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
+ if (rc)
+ GOTO(end, rc);
+
+ /* Set root stripeoffset */
+ sprintf(buf, ".stripeoffset=%hd",
+ lump ? le16_to_cpu(lump->lmm_stripe_offset) :
+ (typeof(lump->lmm_stripe_offset))(-1));
+ rc = ll_send_mgc_param(mgc->u.cli.cl_mgc_mgsexp, param);
+
+end:
+ if (param != NULL)
+ OBD_FREE(param, MGS_PARAM_MAXLEN);
+ }
+ return rc;
+}
+
+int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
+ int *lmm_size, struct ptlrpc_request **request)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct mdt_body *body;
+ struct lov_mds_md *lmm = NULL;
+ struct ptlrpc_request *req = NULL;
+ int rc, lmmsize;
+ struct md_op_data *op_data;
+
+ rc = ll_get_max_mdsize(sbi, &lmmsize);
+ if (rc)
+ return rc;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
+ 0, lmmsize, LUSTRE_OPC_ANY,
+ NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
+ rc = md_getattr(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
+ if (rc < 0) {
+ CDEBUG(D_INFO, "md_getattr failed on inode "
+ "%lu/%u: rc %d\n", inode->i_ino,
+ inode->i_generation, rc);
+ GOTO(out, rc);
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body != NULL);
+
+ lmmsize = body->eadatasize;
+
+ if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
+ lmmsize == 0) {
+ GOTO(out, rc = -ENODATA);
+ }
+
+ lmm = req_capsule_server_sized_get(&req->rq_pill,
+ &RMF_MDT_MD, lmmsize);
+ LASSERT(lmm != NULL);
+
+ /*
+ * This is coming from the MDS, so is probably in
+ * little endian. We convert it to host endian before
+ * passing it to userspace.
+ */
+ /* We don't swab objects for directories */
+ switch (le32_to_cpu(lmm->lmm_magic)) {
+ case LOV_MAGIC_V1:
+ if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
+ lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
+ break;
+ case LOV_MAGIC_V3:
+ if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC))
+ lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
+ break;
+ default:
+ CERROR("unknown magic: %lX\n", (unsigned long)lmm->lmm_magic);
+ rc = -EPROTO;
+ }
+out:
+ *lmmp = lmm;
+ *lmm_size = lmmsize;
+ *request = req;
+ return rc;
+}
+
+/*
+ * Get MDT index for the inode.
+ */
+int ll_get_mdt_idx(struct inode *inode)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct md_op_data *op_data;
+ int rc, mdtidx;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
+ 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ op_data->op_flags |= MF_GET_MDT_IDX;
+ rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
+ mdtidx = op_data->op_mds;
+ ll_finish_md_op_data(op_data);
+ if (rc < 0) {
+ CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
+ return rc;
+ }
+ return mdtidx;
+}
+
+/**
+ * Generic handler to do any pre-copy work.
+ *
+ * It send a first hsm_progress (with extent length == 0) to coordinator as a
+ * first information for it that real work has started.
+ *
+ * Moreover, for a ARCHIVE request, it will sample the file data version and
+ * store it in \a copy.
+ *
+ * \return 0 on success.
+ */
+static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
+{
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct hsm_progress_kernel hpk;
+ int rc;
+
+ /* Forge a hsm_progress based on data from copy. */
+ hpk.hpk_fid = copy->hc_hai.hai_fid;
+ hpk.hpk_cookie = copy->hc_hai.hai_cookie;
+ hpk.hpk_extent.offset = copy->hc_hai.hai_extent.offset;
+ hpk.hpk_extent.length = 0;
+ hpk.hpk_flags = 0;
+ hpk.hpk_errval = 0;
+ hpk.hpk_data_version = 0;
+
+
+ /* For archive request, we need to read the current file version. */
+ if (copy->hc_hai.hai_action == HSMA_ARCHIVE) {
+ struct inode *inode;
+ __u64 data_version = 0;
+
+ /* Get inode for this fid */
+ inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
+ if (IS_ERR(inode)) {
+ hpk.hpk_flags |= HP_FLAG_RETRY;
+ /* hpk_errval is >= 0 */
+ hpk.hpk_errval = -PTR_ERR(inode);
+ GOTO(progress, rc = PTR_ERR(inode));
+ }
+
+ /* Read current file data version */
+ rc = ll_data_version(inode, &data_version, 1);
+ iput(inode);
+ if (rc != 0) {
+ CDEBUG(D_HSM, "Could not read file data version of "
+ DFID" (rc = %d). Archive request ("
+ LPX64") could not be done.\n",
+ PFID(&copy->hc_hai.hai_fid), rc,
+ copy->hc_hai.hai_cookie);
+ hpk.hpk_flags |= HP_FLAG_RETRY;
+ /* hpk_errval must be >= 0 */
+ hpk.hpk_errval = -rc;
+ GOTO(progress, rc);
+ }
+
+ /* Store it the hsm_copy for later copytool use.
+ * Always modified even if no lsm. */
+ copy->hc_data_version = data_version;
+ }
+
+progress:
+ rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
+ &hpk, NULL);
+
+ return rc;
+}
+
+/**
+ * Generic handler to do any post-copy work.
+ *
+ * It will send the last hsm_progress update to coordinator to inform it
+ * that copy is finished and whether it was successful or not.
+ *
+ * Moreover,
+ * - for ARCHIVE request, it will sample the file data version and compare it
+ * with the version saved in ll_ioc_copy_start(). If they do not match, copy
+ * will be considered as failed.
+ * - for RESTORE request, it will sample the file data version and send it to
+ * coordinator which is useful if the file was imported as 'released'.
+ *
+ * \return 0 on success.
+ */
+static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
+{
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct hsm_progress_kernel hpk;
+ int rc;
+
+ /* If you modify the logic here, also check llapi_hsm_copy_end(). */
+ /* Take care: copy->hc_hai.hai_action, len, gid and data are not
+ * initialized if copy_end was called with copy == NULL.
+ */
+
+ /* Forge a hsm_progress based on data from copy. */
+ hpk.hpk_fid = copy->hc_hai.hai_fid;
+ hpk.hpk_cookie = copy->hc_hai.hai_cookie;
+ hpk.hpk_extent = copy->hc_hai.hai_extent;
+ hpk.hpk_flags = copy->hc_flags | HP_FLAG_COMPLETED;
+ hpk.hpk_errval = copy->hc_errval;
+ hpk.hpk_data_version = 0;
+
+ /* For archive request, we need to check the file data was not changed.
+ *
+ * For restore request, we need to send the file data version, this is
+ * useful when the file was created using hsm_import.
+ */
+ if (((copy->hc_hai.hai_action == HSMA_ARCHIVE) ||
+ (copy->hc_hai.hai_action == HSMA_RESTORE)) &&
+ (copy->hc_errval == 0)) {
+ struct inode *inode;
+ __u64 data_version = 0;
+
+ /* Get lsm for this fid */
+ inode = search_inode_for_lustre(sb, &copy->hc_hai.hai_fid);
+ if (IS_ERR(inode)) {
+ hpk.hpk_flags |= HP_FLAG_RETRY;
+ /* hpk_errval must be >= 0 */
+ hpk.hpk_errval = -PTR_ERR(inode);
+ GOTO(progress, rc = PTR_ERR(inode));
+ }
+
+ rc = ll_data_version(inode, &data_version,
+ copy->hc_hai.hai_action == HSMA_ARCHIVE);
+ iput(inode);
+ if (rc) {
+ CDEBUG(D_HSM, "Could not read file data version. "
+ "Request could not be confirmed.\n");
+ if (hpk.hpk_errval == 0)
+ hpk.hpk_errval = -rc;
+ GOTO(progress, rc);
+ }
+
+ /* Store it the hsm_copy for later copytool use.
+ * Always modified even if no lsm. */
+ hpk.hpk_data_version = data_version;
+
+ /* File could have been stripped during archiving, so we need
+ * to check anyway. */
+ if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
+ (copy->hc_data_version != data_version)) {
+ CDEBUG(D_HSM, "File data version mismatched. "
+ "File content was changed during archiving. "
+ DFID", start:"LPX64" current:"LPX64"\n",
+ PFID(&copy->hc_hai.hai_fid),
+ copy->hc_data_version, data_version);
+ /* File was changed, send error to cdt. Do not ask for
+ * retry because if a file is modified frequently,
+ * the cdt will loop on retried archive requests.
+ * The policy engine will ask for a new archive later
+ * when the file will not be modified for some tunable
+ * time */
+ /* we do not notify caller */
+ hpk.hpk_flags &= ~HP_FLAG_RETRY;
+ /* hpk_errval must be >= 0 */
+ hpk.hpk_errval = EBUSY;
+ }
+
+ }
+
+progress:
+ rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
+ &hpk, NULL);
+
+ return rc;
+}
+
+
+static int copy_and_ioctl(int cmd, struct obd_export *exp, void *data, int len)
+{
+ void *ptr;
+ int rc;
+
+ OBD_ALLOC(ptr, len);
+ if (ptr == NULL)
+ return -ENOMEM;
+ if (copy_from_user(ptr, data, len)) {
+ OBD_FREE(ptr, len);
+ return -EFAULT;
+ }
+ rc = obd_iocontrol(cmd, exp, len, data, NULL);
+ OBD_FREE(ptr, len);
+ return rc;
+}
+
+static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
+{
+ int cmd = qctl->qc_cmd;
+ int type = qctl->qc_type;
+ int id = qctl->qc_id;
+ int valid = qctl->qc_valid;
+ int rc = 0;
+
+ switch (cmd) {
+ case LUSTRE_Q_INVALIDATE:
+ case LUSTRE_Q_FINVALIDATE:
+ case Q_QUOTAON:
+ case Q_QUOTAOFF:
+ case Q_SETQUOTA:
+ case Q_SETINFO:
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ return -EPERM;
+ break;
+ case Q_GETQUOTA:
+ if (((type == USRQUOTA &&
+ uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+ (type == GRPQUOTA &&
+ !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
+ (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ sbi->ll_flags & LL_SBI_RMT_CLIENT))
+ return -EPERM;
+ break;
+ case Q_GETINFO:
+ break;
+ default:
+ CERROR("unsupported quotactl op: %#x\n", cmd);
+ return -ENOTTY;
+ }
+
+ if (valid != QC_GENERAL) {
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ return -EOPNOTSUPP;
+
+ if (cmd == Q_GETINFO)
+ qctl->qc_cmd = Q_GETOINFO;
+ else if (cmd == Q_GETQUOTA)
+ qctl->qc_cmd = Q_GETOQUOTA;
+ else
+ return -EINVAL;
+
+ switch (valid) {
+ case QC_MDTIDX:
+ rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
+ sizeof(*qctl), qctl, NULL);
+ break;
+ case QC_OSTIDX:
+ rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_dt_exp,
+ sizeof(*qctl), qctl, NULL);
+ break;
+ case QC_UUID:
+ rc = obd_iocontrol(OBD_IOC_QUOTACTL, sbi->ll_md_exp,
+ sizeof(*qctl), qctl, NULL);
+ if (rc == -EAGAIN)
+ rc = obd_iocontrol(OBD_IOC_QUOTACTL,
+ sbi->ll_dt_exp,
+ sizeof(*qctl), qctl, NULL);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ return rc;
+
+ qctl->qc_cmd = cmd;
+ } else {
+ struct obd_quotactl *oqctl;
+
+ OBD_ALLOC_PTR(oqctl);
+ if (oqctl == NULL)
+ return -ENOMEM;
+
+ QCTL_COPY(oqctl, qctl);
+ rc = obd_quotactl(sbi->ll_md_exp, oqctl);
+ if (rc) {
+ if (rc != -EALREADY && cmd == Q_QUOTAON) {
+ oqctl->qc_cmd = Q_QUOTAOFF;
+ obd_quotactl(sbi->ll_md_exp, oqctl);
+ }
+ OBD_FREE_PTR(oqctl);
+ return rc;
+ }
+ /* If QIF_SPACE is not set, client should collect the
+ * space usage from OSSs by itself */
+ if (cmd == Q_GETQUOTA &&
+ !(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
+ !oqctl->qc_dqblk.dqb_curspace) {
+ struct obd_quotactl *oqctl_tmp;
+
+ OBD_ALLOC_PTR(oqctl_tmp);
+ if (oqctl_tmp == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ oqctl_tmp->qc_cmd = Q_GETOQUOTA;
+ oqctl_tmp->qc_id = oqctl->qc_id;
+ oqctl_tmp->qc_type = oqctl->qc_type;
+
+ /* collect space usage from OSTs */
+ oqctl_tmp->qc_dqblk.dqb_curspace = 0;
+ rc = obd_quotactl(sbi->ll_dt_exp, oqctl_tmp);
+ if (!rc || rc == -EREMOTEIO) {
+ oqctl->qc_dqblk.dqb_curspace =
+ oqctl_tmp->qc_dqblk.dqb_curspace;
+ oqctl->qc_dqblk.dqb_valid |= QIF_SPACE;
+ }
+
+ /* collect space & inode usage from MDTs */
+ oqctl_tmp->qc_dqblk.dqb_curspace = 0;
+ oqctl_tmp->qc_dqblk.dqb_curinodes = 0;
+ rc = obd_quotactl(sbi->ll_md_exp, oqctl_tmp);
+ if (!rc || rc == -EREMOTEIO) {
+ oqctl->qc_dqblk.dqb_curspace +=
+ oqctl_tmp->qc_dqblk.dqb_curspace;
+ oqctl->qc_dqblk.dqb_curinodes =
+ oqctl_tmp->qc_dqblk.dqb_curinodes;
+ oqctl->qc_dqblk.dqb_valid |= QIF_INODES;
+ } else {
+ oqctl->qc_dqblk.dqb_valid &= ~QIF_SPACE;
+ }
+
+ OBD_FREE_PTR(oqctl_tmp);
+ }
+out:
+ QCTL_COPY(qctl, oqctl);
+ OBD_FREE_PTR(oqctl);
+ }
+
+ return rc;
+}
+
+static char *
+ll_getname(const char __user *filename)
+{
+ int ret = 0, len;
+ char *tmp = __getname();
+
+ if (!tmp)
+ return ERR_PTR(-ENOMEM);
+
+ len = strncpy_from_user(tmp, filename, PATH_MAX);
+ if (len == 0)
+ ret = -ENOENT;
+ else if (len > PATH_MAX)
+ ret = -ENAMETOOLONG;
+
+ if (ret) {
+ __putname(tmp);
+ tmp = ERR_PTR(ret);
+ }
+ return tmp;
+}
+
+#define ll_putname(filename) __putname(filename)
+
+static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct obd_ioctl_data *data;
+ int rc = 0;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
+ inode->i_ino, inode->i_generation, inode, cmd);
+
+ /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
+ if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
+ return -ENOTTY;
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
+ switch(cmd) {
+ case FSFILT_IOC_GETFLAGS:
+ case FSFILT_IOC_SETFLAGS:
+ return ll_iocontrol(inode, file, cmd, arg);
+ case FSFILT_IOC_GETVERSION_OLD:
+ case FSFILT_IOC_GETVERSION:
+ return put_user(inode->i_generation, (int *)arg);
+ /* We need to special case any other ioctls we want to handle,
+ * to send them to the MDS/OST as appropriate and to properly
+ * network encode the arg field.
+ case FSFILT_IOC_SETVERSION_OLD:
+ case FSFILT_IOC_SETVERSION:
+ */
+ case LL_IOC_GET_MDTIDX: {
+ int mdtidx;
+
+ mdtidx = ll_get_mdt_idx(inode);
+ if (mdtidx < 0)
+ return mdtidx;
+
+ if (put_user((int)mdtidx, (int*)arg))
+ return -EFAULT;
+
+ return 0;
+ }
+ case IOC_MDC_LOOKUP: {
+ struct ptlrpc_request *request = NULL;
+ int namelen, len = 0;
+ char *buf = NULL;
+ char *filename;
+ struct md_op_data *op_data;
+
+ rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
+ if (rc)
+ return rc;
+ data = (void *)buf;
+
+ filename = data->ioc_inlbuf1;
+ namelen = strlen(filename);
+
+ if (namelen < 1) {
+ CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
+ GOTO(out_free, rc = -EINVAL);
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, filename, namelen,
+ 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ GOTO(out_free, rc = PTR_ERR(op_data));
+
+ op_data->op_valid = OBD_MD_FLID;
+ rc = md_getattr_name(sbi->ll_md_exp, op_data, &request);
+ ll_finish_md_op_data(op_data);
+ if (rc < 0) {
+ CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
+ GOTO(out_free, rc);
+ }
+ ptlrpc_req_finished(request);
+out_free:
+ obd_ioctl_freedata(buf, len);
+ return rc;
+ }
+ case LL_IOC_LMV_SETSTRIPE: {
+ struct lmv_user_md *lum;
+ char *buf = NULL;
+ char *filename;
+ int namelen = 0;
+ int lumlen = 0;
+ int len;
+ int rc;
+
+ rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
+ if (rc)
+ return rc;
+
+ data = (void *)buf;
+ if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
+ data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0)
+ GOTO(lmv_out_free, rc = -EINVAL);
+
+ filename = data->ioc_inlbuf1;
+ namelen = data->ioc_inllen1;
+
+ if (namelen < 1) {
+ CDEBUG(D_INFO, "IOC_MDC_LOOKUP missing filename\n");
+ GOTO(lmv_out_free, rc = -EINVAL);
+ }
+ lum = (struct lmv_user_md *)data->ioc_inlbuf2;
+ lumlen = data->ioc_inllen2;
+
+ if (lum->lum_magic != LMV_USER_MAGIC ||
+ lumlen != sizeof(*lum)) {
+ CERROR("%s: wrong lum magic %x or size %d: rc = %d\n",
+ filename, lum->lum_magic, lumlen, -EFAULT);
+ GOTO(lmv_out_free, rc = -EINVAL);
+ }
+
+ /**
+ * ll_dir_setdirstripe will be used to set dir stripe
+ * mdc_create--->mdt_reint_create (with dirstripe)
+ */
+ rc = ll_dir_setdirstripe(inode, lum, filename);
+lmv_out_free:
+ obd_ioctl_freedata(buf, len);
+ return rc;
+
+ }
+ case LL_IOC_LOV_SETSTRIPE: {
+ struct lov_user_md_v3 lumv3;
+ struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
+ struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
+ struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
+
+ int set_default = 0;
+
+ LASSERT(sizeof(lumv3) == sizeof(*lumv3p));
+ LASSERT(sizeof(lumv3.lmm_objects[0]) ==
+ sizeof(lumv3p->lmm_objects[0]));
+ /* first try with v1 which is smaller than v3 */
+ if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
+ return -EFAULT;
+
+ if ((lumv1->lmm_magic == LOV_USER_MAGIC_V3) ) {
+ if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
+ return -EFAULT;
+ }
+
+ if (inode->i_sb->s_root == file->f_dentry)
+ set_default = 1;
+
+ /* in v1 and v3 cases lumv1 points to data */
+ rc = ll_dir_setstripe(inode, lumv1, set_default);
+
+ return rc;
+ }
+ case LL_IOC_LMV_GETSTRIPE: {
+ struct lmv_user_md *lump = (struct lmv_user_md *)arg;
+ struct lmv_user_md lum;
+ struct lmv_user_md *tmp;
+ int lum_size;
+ int rc = 0;
+ int mdtindex;
+
+ if (copy_from_user(&lum, lump, sizeof(struct lmv_user_md)))
+ return -EFAULT;
+
+ if (lum.lum_magic != LMV_MAGIC_V1)
+ return -EINVAL;
+
+ lum_size = lmv_user_md_size(1, LMV_MAGIC_V1);
+ OBD_ALLOC(tmp, lum_size);
+ if (tmp == NULL)
+ GOTO(free_lmv, rc = -ENOMEM);
+
+ memcpy(tmp, &lum, sizeof(lum));
+ tmp->lum_type = LMV_STRIPE_TYPE;
+ tmp->lum_stripe_count = 1;
+ mdtindex = ll_get_mdt_idx(inode);
+ if (mdtindex < 0)
+ GOTO(free_lmv, rc = -ENOMEM);
+
+ tmp->lum_stripe_offset = mdtindex;
+ tmp->lum_objects[0].lum_mds = mdtindex;
+ memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode),
+ sizeof(struct lu_fid));
+ if (copy_to_user((void *)arg, tmp, lum_size))
+ GOTO(free_lmv, rc = -EFAULT);
+free_lmv:
+ if (tmp)
+ OBD_FREE(tmp, lum_size);
+ return rc;
+ }
+ case LL_IOC_REMOVE_ENTRY: {
+ char *filename = NULL;
+ int namelen = 0;
+ int rc;
+
+ /* Here is a little hack to avoid sending REINT_RMENTRY to
+ * unsupported server, which might crash the server(LU-2730),
+ * Because both LVB_TYPE and REINT_RMENTRY will be supported
+ * on 2.4, we use OBD_CONNECT_LVB_TYPE to detect whether the
+ * server will support REINT_RMENTRY XXX*/
+ if (!(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_LVB_TYPE))
+ return -ENOTSUPP;
+
+ filename = ll_getname((const char *)arg);
+ if (IS_ERR(filename))
+ return PTR_ERR(filename);
+
+ namelen = strlen(filename);
+ if (namelen < 1)
+ GOTO(out_rmdir, rc = -EINVAL);
+
+ rc = ll_rmdir_entry(inode, filename, namelen);
+out_rmdir:
+ if (filename)
+ ll_putname(filename);
+ return rc;
+ }
+ case LL_IOC_LOV_SWAP_LAYOUTS:
+ return -EPERM;
+ case LL_IOC_OBD_STATFS:
+ return ll_obd_statfs(inode, (void *)arg);
+ case LL_IOC_LOV_GETSTRIPE:
+ case LL_IOC_MDC_GETINFO:
+ case IOC_MDC_GETFILEINFO:
+ case IOC_MDC_GETFILESTRIPE: {
+ struct ptlrpc_request *request = NULL;
+ struct lov_user_md *lump;
+ struct lov_mds_md *lmm = NULL;
+ struct mdt_body *body;
+ char *filename = NULL;
+ int lmmsize;
+
+ if (cmd == IOC_MDC_GETFILEINFO ||
+ cmd == IOC_MDC_GETFILESTRIPE) {
+ filename = ll_getname((const char *)arg);
+ if (IS_ERR(filename))
+ return PTR_ERR(filename);
+
+ rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
+ &lmmsize, &request);
+ } else {
+ rc = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
+ }
+
+ if (request) {
+ body = req_capsule_server_get(&request->rq_pill,
+ &RMF_MDT_BODY);
+ LASSERT(body != NULL);
+ } else {
+ GOTO(out_req, rc);
+ }
+
+ if (rc < 0) {
+ if (rc == -ENODATA && (cmd == IOC_MDC_GETFILEINFO ||
+ cmd == LL_IOC_MDC_GETINFO))
+ GOTO(skip_lmm, rc = 0);
+ else
+ GOTO(out_req, rc);
+ }
+
+ if (cmd == IOC_MDC_GETFILESTRIPE ||
+ cmd == LL_IOC_LOV_GETSTRIPE) {
+ lump = (struct lov_user_md *)arg;
+ } else {
+ struct lov_user_mds_data *lmdp;
+ lmdp = (struct lov_user_mds_data *)arg;
+ lump = &lmdp->lmd_lmm;
+ }
+ if (copy_to_user(lump, lmm, lmmsize)) {
+ if (copy_to_user(lump, lmm, sizeof(*lump)))
+ GOTO(out_req, rc = -EFAULT);
+ rc = -EOVERFLOW;
+ }
+ skip_lmm:
+ if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
+ struct lov_user_mds_data *lmdp;
+ lstat_t st = { 0 };
+
+ st.st_dev = inode->i_sb->s_dev;
+ st.st_mode = body->mode;
+ st.st_nlink = body->nlink;
+ st.st_uid = body->uid;
+ st.st_gid = body->gid;
+ st.st_rdev = body->rdev;
+ st.st_size = body->size;
+ st.st_blksize = PAGE_CACHE_SIZE;
+ st.st_blocks = body->blocks;
+ st.st_atime = body->atime;
+ st.st_mtime = body->mtime;
+ st.st_ctime = body->ctime;
+ st.st_ino = inode->i_ino;
+
+ lmdp = (struct lov_user_mds_data *)arg;
+ if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st)))
+ GOTO(out_req, rc = -EFAULT);
+ }
+
+ out_req:
+ ptlrpc_req_finished(request);
+ if (filename)
+ ll_putname(filename);
+ return rc;
+ }
+ case IOC_LOV_GETINFO: {
+ struct lov_user_mds_data *lumd;
+ struct lov_stripe_md *lsm;
+ struct lov_user_md *lum;
+ struct lov_mds_md *lmm;
+ int lmmsize;
+ lstat_t st;
+
+ lumd = (struct lov_user_mds_data *)arg;
+ lum = &lumd->lmd_lmm;
+
+ rc = ll_get_max_mdsize(sbi, &lmmsize);
+ if (rc)
+ return rc;
+
+ OBD_ALLOC_LARGE(lmm, lmmsize);
+ if (lmm == NULL)
+ return -ENOMEM;
+ if (copy_from_user(lmm, lum, lmmsize))
+ GOTO(free_lmm, rc = -EFAULT);
+
+ switch (lmm->lmm_magic) {
+ case LOV_USER_MAGIC_V1:
+ if (LOV_USER_MAGIC_V1 == cpu_to_le32(LOV_USER_MAGIC_V1))
+ break;
+ /* swab objects first so that stripes num will be sane */
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v1 *)lmm)->lmm_objects,
+ ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
+ lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
+ break;
+ case LOV_USER_MAGIC_V3:
+ if (LOV_USER_MAGIC_V3 == cpu_to_le32(LOV_USER_MAGIC_V3))
+ break;
+ /* swab objects first so that stripes num will be sane */
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v3 *)lmm)->lmm_objects,
+ ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
+ lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
+ break;
+ default:
+ GOTO(free_lmm, rc = -EINVAL);
+ }
+
+ rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
+ if (rc < 0)
+ GOTO(free_lmm, rc = -ENOMEM);
+
+ /* Perform glimpse_size operation. */
+ memset(&st, 0, sizeof(st));
+
+ rc = ll_glimpse_ioctl(sbi, lsm, &st);
+ if (rc)
+ GOTO(free_lsm, rc);
+
+ if (copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
+ GOTO(free_lsm, rc = -EFAULT);
+
+ free_lsm:
+ obd_free_memmd(sbi->ll_dt_exp, &lsm);
+ free_lmm:
+ OBD_FREE_LARGE(lmm, lmmsize);
+ return rc;
+ }
+ case OBD_IOC_LLOG_CATINFO: {
+ return -EOPNOTSUPP;
+ }
+ case OBD_IOC_QUOTACHECK: {
+ struct obd_quotactl *oqctl;
+ int error = 0;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ return -EPERM;
+
+ OBD_ALLOC_PTR(oqctl);
+ if (!oqctl)
+ return -ENOMEM;
+ oqctl->qc_type = arg;
+ rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
+ if (rc < 0) {
+ CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
+ error = rc;
+ }
+
+ rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
+ if (rc < 0)
+ CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
+
+ OBD_FREE_PTR(oqctl);
+ return error ?: rc;
+ }
+ case OBD_IOC_POLL_QUOTACHECK: {
+ struct if_quotacheck *check;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ return -EPERM;
+
+ OBD_ALLOC_PTR(check);
+ if (!check)
+ return -ENOMEM;
+
+ rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
+ NULL);
+ if (rc) {
+ CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
+ if (copy_to_user((void *)arg, check,
+ sizeof(*check)))
+ CDEBUG(D_QUOTA, "copy_to_user failed\n");
+ GOTO(out_poll, rc);
+ }
+
+ rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
+ NULL);
+ if (rc) {
+ CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
+ if (copy_to_user((void *)arg, check,
+ sizeof(*check)))
+ CDEBUG(D_QUOTA, "copy_to_user failed\n");
+ GOTO(out_poll, rc);
+ }
+ out_poll:
+ OBD_FREE_PTR(check);
+ return rc;
+ }
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
+ case LL_IOC_QUOTACTL_18: {
+ /* copy the old 1.x quota struct for internal use, then copy
+ * back into old format struct. For 1.8 compatibility. */
+ struct if_quotactl_18 *qctl_18;
+ struct if_quotactl *qctl_20;
+
+ OBD_ALLOC_PTR(qctl_18);
+ if (!qctl_18)
+ return -ENOMEM;
+
+ OBD_ALLOC_PTR(qctl_20);
+ if (!qctl_20)
+ GOTO(out_quotactl_18, rc = -ENOMEM);
+
+ if (copy_from_user(qctl_18, (void *)arg, sizeof(*qctl_18)))
+ GOTO(out_quotactl_20, rc = -ENOMEM);
+
+ QCTL_COPY(qctl_20, qctl_18);
+ qctl_20->qc_idx = 0;
+
+ /* XXX: dqb_valid was borrowed as a flag to mark that
+ * only mds quota is wanted */
+ if (qctl_18->qc_cmd == Q_GETQUOTA &&
+ qctl_18->qc_dqblk.dqb_valid) {
+ qctl_20->qc_valid = QC_MDTIDX;
+ qctl_20->qc_dqblk.dqb_valid = 0;
+ } else if (qctl_18->obd_uuid.uuid[0] != '\0') {
+ qctl_20->qc_valid = QC_UUID;
+ qctl_20->obd_uuid = qctl_18->obd_uuid;
+ } else {
+ qctl_20->qc_valid = QC_GENERAL;
+ }
+
+ rc = quotactl_ioctl(sbi, qctl_20);
+
+ if (rc == 0) {
+ QCTL_COPY(qctl_18, qctl_20);
+ qctl_18->obd_uuid = qctl_20->obd_uuid;
+
+ if (copy_to_user((void *)arg, qctl_18,
+ sizeof(*qctl_18)))
+ rc = -EFAULT;
+ }
+
+ out_quotactl_20:
+ OBD_FREE_PTR(qctl_20);
+ out_quotactl_18:
+ OBD_FREE_PTR(qctl_18);
+ return rc;
+ }
+#else
+#warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
+#endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
+ case LL_IOC_QUOTACTL: {
+ struct if_quotactl *qctl;
+
+ OBD_ALLOC_PTR(qctl);
+ if (!qctl)
+ return -ENOMEM;
+
+ if (copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
+ GOTO(out_quotactl, rc = -EFAULT);
+
+ rc = quotactl_ioctl(sbi, qctl);
+
+ if (rc == 0 && copy_to_user((void *)arg,qctl,sizeof(*qctl)))
+ rc = -EFAULT;
+
+ out_quotactl:
+ OBD_FREE_PTR(qctl);
+ return rc;
+ }
+ case OBD_IOC_GETDTNAME:
+ case OBD_IOC_GETMDNAME:
+ return ll_get_obd_name(inode, cmd, arg);
+ case LL_IOC_FLUSHCTX:
+ return ll_flush_ctx(inode);
+#ifdef CONFIG_FS_POSIX_ACL
+ case LL_IOC_RMTACL: {
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
+ inode == inode->i_sb->s_root->d_inode) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ LASSERT(fd != NULL);
+ rc = rct_add(&sbi->ll_rct, current_pid(), arg);
+ if (!rc)
+ fd->fd_flags |= LL_FILE_RMTACL;
+ return rc;
+ } else
+ return 0;
+ }
+#endif
+ case LL_IOC_GETOBDCOUNT: {
+ int count, vallen;
+ struct obd_export *exp;
+
+ if (copy_from_user(&count, (int *)arg, sizeof(int)))
+ return -EFAULT;
+
+ /* get ost count when count is zero, get mdt count otherwise */
+ exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
+ vallen = sizeof(count);
+ rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
+ KEY_TGT_COUNT, &vallen, &count, NULL);
+ if (rc) {
+ CERROR("get target count failed: %d\n", rc);
+ return rc;
+ }
+
+ if (copy_to_user((int *)arg, &count, sizeof(int)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case LL_IOC_PATH2FID:
+ if (copy_to_user((void *)arg, ll_inode2fid(inode),
+ sizeof(struct lu_fid)))
+ return -EFAULT;
+ return 0;
+ case LL_IOC_GET_CONNECT_FLAGS: {
+ return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void*)arg);
+ }
+ case OBD_IOC_CHANGELOG_SEND:
+ case OBD_IOC_CHANGELOG_CLEAR:
+ rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
+ sizeof(struct ioc_changelog));
+ return rc;
+ case OBD_IOC_FID2PATH:
+ return ll_fid2path(inode, (void *)arg);
+ case LL_IOC_HSM_REQUEST: {
+ struct hsm_user_request *hur;
+ int totalsize;
+
+ OBD_ALLOC_PTR(hur);
+ if (hur == NULL)
+ return -ENOMEM;
+
+ /* We don't know the true size yet; copy the fixed-size part */
+ if (copy_from_user(hur, (void *)arg, sizeof(*hur))) {
+ OBD_FREE_PTR(hur);
+ return -EFAULT;
+ }
+
+ /* Compute the whole struct size */
+ totalsize = hur_len(hur);
+ OBD_FREE_PTR(hur);
+ OBD_ALLOC_LARGE(hur, totalsize);
+ if (hur == NULL)
+ return -ENOMEM;
+
+ /* Copy the whole struct */
+ if (copy_from_user(hur, (void *)arg, totalsize)) {
+ OBD_FREE_LARGE(hur, totalsize);
+ return -EFAULT;
+ }
+
+ rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
+ hur, NULL);
+
+ OBD_FREE_LARGE(hur, totalsize);
+
+ return rc;
+ }
+ case LL_IOC_HSM_PROGRESS: {
+ struct hsm_progress_kernel hpk;
+ struct hsm_progress hp;
+
+ if (copy_from_user(&hp, (void *)arg, sizeof(hp)))
+ return -EFAULT;
+
+ hpk.hpk_fid = hp.hp_fid;
+ hpk.hpk_cookie = hp.hp_cookie;
+ hpk.hpk_extent = hp.hp_extent;
+ hpk.hpk_flags = hp.hp_flags;
+ hpk.hpk_errval = hp.hp_errval;
+ hpk.hpk_data_version = 0;
+
+ /* File may not exist in Lustre; all progress
+ * reported to Lustre root */
+ rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
+ NULL);
+ return rc;
+ }
+ case LL_IOC_HSM_CT_START:
+ rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
+ sizeof(struct lustre_kernelcomm));
+ return rc;
+
+ case LL_IOC_HSM_COPY_START: {
+ struct hsm_copy *copy;
+ int rc;
+
+ OBD_ALLOC_PTR(copy);
+ if (copy == NULL)
+ return -ENOMEM;
+ if (copy_from_user(copy, (char *)arg, sizeof(*copy))) {
+ OBD_FREE_PTR(copy);
+ return -EFAULT;
+ }
+
+ rc = ll_ioc_copy_start(inode->i_sb, copy);
+ if (copy_to_user((char *)arg, copy, sizeof(*copy)))
+ rc = -EFAULT;
+
+ OBD_FREE_PTR(copy);
+ return rc;
+ }
+ case LL_IOC_HSM_COPY_END: {
+ struct hsm_copy *copy;
+ int rc;
+
+ OBD_ALLOC_PTR(copy);
+ if (copy == NULL)
+ return -ENOMEM;
+ if (copy_from_user(copy, (char *)arg, sizeof(*copy))) {
+ OBD_FREE_PTR(copy);
+ return -EFAULT;
+ }
+
+ rc = ll_ioc_copy_end(inode->i_sb, copy);
+ if (copy_to_user((char *)arg, copy, sizeof(*copy)))
+ rc = -EFAULT;
+
+ OBD_FREE_PTR(copy);
+ return rc;
+ }
+ default:
+ return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg);
+ }
+}
+
+static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ int api32 = ll_need_32bit_api(sbi);
+ loff_t ret = -EINVAL;
+
+ mutex_lock(&inode->i_mutex);
+ switch (origin) {
+ case SEEK_SET:
+ break;
+ case SEEK_CUR:
+ offset += file->f_pos;
+ break;
+ case SEEK_END:
+ if (offset > 0)
+ GOTO(out, ret);
+ if (api32)
+ offset += LL_DIR_END_OFF_32BIT;
+ else
+ offset += LL_DIR_END_OFF;
+ break;
+ default:
+ GOTO(out, ret);
+ }
+
+ if (offset >= 0 &&
+ ((api32 && offset <= LL_DIR_END_OFF_32BIT) ||
+ (!api32 && offset <= LL_DIR_END_OFF))) {
+ if (offset != file->f_pos) {
+ if ((api32 && offset == LL_DIR_END_OFF_32BIT) ||
+ (!api32 && offset == LL_DIR_END_OFF))
+ fd->lfd_pos = MDS_DIR_END_OFF;
+ else if (api32 && sbi->ll_flags & LL_SBI_64BIT_HASH)
+ fd->lfd_pos = offset << 32;
+ else
+ fd->lfd_pos = offset;
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+ ret = offset;
+ }
+ GOTO(out, ret);
+
+out:
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+}
+
+int ll_dir_open(struct inode *inode, struct file *file)
+{
+ return ll_file_open(inode, file);
+}
+
+int ll_dir_release(struct inode *inode, struct file *file)
+{
+ return ll_file_release(inode, file);
+}
+
+struct file_operations ll_dir_operations = {
+ .llseek = ll_dir_seek,
+ .open = ll_dir_open,
+ .release = ll_dir_release,
+ .read = generic_read_dir,
+ .iterate = ll_readdir,
+ .unlocked_ioctl = ll_dir_ioctl,
+ .fsync = ll_fsync,
+};
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
new file mode 100644
index 0000000..bc534db
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -0,0 +1,3152 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/file.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+#include <lustre_dlm.h>
+#include <lustre_lite.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include "llite_internal.h"
+#include <lustre/ll_fiemap.h>
+
+#include "cl_object.h"
+
+struct ll_file_data *ll_file_data_get(void)
+{
+ struct ll_file_data *fd;
+
+ OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, __GFP_IO);
+ if (fd == NULL)
+ return NULL;
+ fd->fd_write_failed = false;
+ return fd;
+}
+
+static void ll_file_data_put(struct ll_file_data *fd)
+{
+ if (fd != NULL)
+ OBD_SLAB_FREE_PTR(fd, ll_file_data_slab);
+}
+
+void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
+ struct lustre_handle *fh)
+{
+ op_data->op_fid1 = ll_i2info(inode)->lli_fid;
+ op_data->op_attr.ia_mode = inode->i_mode;
+ op_data->op_attr.ia_atime = inode->i_atime;
+ op_data->op_attr.ia_mtime = inode->i_mtime;
+ op_data->op_attr.ia_ctime = inode->i_ctime;
+ op_data->op_attr.ia_size = i_size_read(inode);
+ op_data->op_attr_blocks = inode->i_blocks;
+ ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
+ ll_inode_to_ext_flags(inode->i_flags);
+ op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
+ if (fh)
+ op_data->op_handle = *fh;
+ op_data->op_capa1 = ll_mdscapa_get(inode);
+
+ if (LLIF_DATA_MODIFIED & ll_i2info(inode)->lli_flags)
+ op_data->op_bias |= MDS_DATA_MODIFIED;
+}
+
+/**
+ * Closes the IO epoch and packs all the attributes into @op_data for
+ * the CLOSE rpc.
+ */
+static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
+ struct obd_client_handle *och)
+{
+ op_data->op_attr.ia_valid = ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET |
+ ATTR_CTIME | ATTR_CTIME_SET;
+
+ if (!(och->och_flags & FMODE_WRITE))
+ goto out;
+
+ if (!exp_connect_som(ll_i2mdexp(inode)) || !S_ISREG(inode->i_mode))
+ op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+ else
+ ll_ioepoch_close(inode, op_data, &och, 0);
+
+out:
+ ll_pack_inode2opdata(inode, op_data, &och->och_fh);
+ ll_prep_md_op_data(op_data, inode, NULL, NULL,
+ 0, 0, LUSTRE_OPC_ANY, NULL);
+}
+
+static int ll_close_inode_openhandle(struct obd_export *md_exp,
+ struct inode *inode,
+ struct obd_client_handle *och)
+{
+ struct obd_export *exp = ll_i2mdexp(inode);
+ struct md_op_data *op_data;
+ struct ptlrpc_request *req = NULL;
+ struct obd_device *obd = class_exp2obd(exp);
+ int epoch_close = 1;
+ int rc;
+
+ if (obd == NULL) {
+ /*
+ * XXX: in case of LMV, is this correct to access
+ * ->exp_handle?
+ */
+ CERROR("Invalid MDC connection handle "LPX64"\n",
+ ll_i2mdexp(inode)->exp_handle.h_cookie);
+ GOTO(out, rc = 0);
+ }
+
+ OBD_ALLOC_PTR(op_data);
+ if (op_data == NULL)
+ GOTO(out, rc = -ENOMEM); // XXX We leak openhandle and request here.
+
+ ll_prepare_close(inode, op_data, och);
+ epoch_close = (op_data->op_flags & MF_EPOCH_CLOSE);
+ rc = md_close(md_exp, op_data, och->och_mod, &req);
+ if (rc == -EAGAIN) {
+ /* This close must have the epoch closed. */
+ LASSERT(epoch_close);
+ /* MDS has instructed us to obtain Size-on-MDS attribute from
+ * OSTs and send setattr to back to MDS. */
+ rc = ll_som_update(inode, op_data);
+ if (rc) {
+ CERROR("inode %lu mdc Size-on-MDS update failed: "
+ "rc = %d\n", inode->i_ino, rc);
+ rc = 0;
+ }
+ } else if (rc) {
+ CERROR("inode %lu mdc close failed: rc = %d\n",
+ inode->i_ino, rc);
+ }
+
+ /* DATA_MODIFIED flag was successfully sent on close, cancel data
+ * modification flag. */
+ if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags &= ~LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
+
+ ll_finish_md_op_data(op_data);
+
+ if (rc == 0) {
+ rc = ll_objects_destroy(req, inode);
+ if (rc)
+ CERROR("inode %lu ll_objects destroy: rc = %d\n",
+ inode->i_ino, rc);
+ }
+
+out:
+ if (exp_connect_som(exp) && !epoch_close &&
+ S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
+ ll_queue_done_writing(inode, LLIF_DONE_WRITING);
+ } else {
+ md_clear_open_replay_data(md_exp, och);
+ /* Free @och if it is not waiting for DONE_WRITING. */
+ och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+ OBD_FREE_PTR(och);
+ }
+ if (req) /* This is close request */
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+int ll_md_real_close(struct inode *inode, int flags)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_client_handle **och_p;
+ struct obd_client_handle *och;
+ __u64 *och_usecount;
+ int rc = 0;
+
+ if (flags & FMODE_WRITE) {
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else if (flags & FMODE_EXEC) {
+ och_p = &lli->lli_mds_exec_och;
+ och_usecount = &lli->lli_open_fd_exec_count;
+ } else {
+ LASSERT(flags & FMODE_READ);
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
+
+ mutex_lock(&lli->lli_och_mutex);
+ if (*och_usecount) { /* There are still users of this handle, so
+ skip freeing it. */
+ mutex_unlock(&lli->lli_och_mutex);
+ return 0;
+ }
+ och=*och_p;
+ *och_p = NULL;
+ mutex_unlock(&lli->lli_och_mutex);
+
+ if (och) { /* There might be a race and somebody have freed this och
+ already */
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
+ inode, och);
+ }
+
+ return rc;
+}
+
+int ll_md_close(struct obd_export *md_exp, struct inode *inode,
+ struct file *file)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc = 0;
+
+ /* clear group lock, if present */
+ if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
+ ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
+
+ /* Let's see if we have good enough OPEN lock on the file and if
+ we can skip talking to MDS */
+ if (file->f_dentry->d_inode) { /* Can this ever be false? */
+ int lockmode;
+ int flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
+ struct lustre_handle lockh;
+ struct inode *inode = file->f_dentry->d_inode;
+ ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
+
+ mutex_lock(&lli->lli_och_mutex);
+ if (fd->fd_omode & FMODE_WRITE) {
+ lockmode = LCK_CW;
+ LASSERT(lli->lli_open_fd_write_count);
+ lli->lli_open_fd_write_count--;
+ } else if (fd->fd_omode & FMODE_EXEC) {
+ lockmode = LCK_PR;
+ LASSERT(lli->lli_open_fd_exec_count);
+ lli->lli_open_fd_exec_count--;
+ } else {
+ lockmode = LCK_CR;
+ LASSERT(lli->lli_open_fd_read_count);
+ lli->lli_open_fd_read_count--;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+
+ if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
+ LDLM_IBITS, &policy, lockmode,
+ &lockh)) {
+ rc = ll_md_real_close(file->f_dentry->d_inode,
+ fd->fd_omode);
+ }
+ } else {
+ CERROR("Releasing a file %p with negative dentry %p. Name %s",
+ file, file->f_dentry, file->f_dentry->d_name.name);
+ }
+
+ LUSTRE_FPRIVATE(file) = NULL;
+ ll_file_data_put(fd);
+ ll_capa_close(inode);
+
+ return rc;
+}
+
+/* While this returns an error code, fput() the caller does not, so we need
+ * to make every effort to clean up all of our state here. Also, applications
+ * rarely check close errors and even if an error is returned they will not
+ * re-try the close call.
+ */
+int ll_file_release(struct inode *inode, struct file *file)
+{
+ struct ll_file_data *fd;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
+ inode->i_generation, inode);
+
+#ifdef CONFIG_FS_POSIX_ACL
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
+ inode == inode->i_sb->s_root->d_inode) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ LASSERT(fd != NULL);
+ if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
+ fd->fd_flags &= ~LL_FILE_RMTACL;
+ rct_del(&sbi->ll_rct, current_pid());
+ et_search_free(&sbi->ll_et, current_pid());
+ }
+ }
+#endif
+
+ if (inode->i_sb->s_root != file->f_dentry)
+ ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
+ fd = LUSTRE_FPRIVATE(file);
+ LASSERT(fd != NULL);
+
+ /* The last ref on @file, maybe not the the owner pid of statahead.
+ * Different processes can open the same dir, "ll_opendir_key" means:
+ * it is me that should stop the statahead thread. */
+ if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd &&
+ lli->lli_opendir_pid != 0)
+ ll_stop_statahead(inode, lli->lli_opendir_key);
+
+ if (inode->i_sb->s_root == file->f_dentry) {
+ LUSTRE_FPRIVATE(file) = NULL;
+ ll_file_data_put(fd);
+ return 0;
+ }
+
+ if (!S_ISDIR(inode->i_mode)) {
+ lov_read_and_clear_async_rc(lli->lli_clob);
+ lli->lli_async_rc = 0;
+ }
+
+ rc = ll_md_close(sbi->ll_md_exp, inode, file);
+
+ if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
+ libcfs_debug_dumplog();
+
+ return rc;
+}
+
+static int ll_intent_file_open(struct file *file, void *lmm,
+ int lmmsize, struct lookup_intent *itp)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(file->f_dentry->d_inode);
+ struct dentry *parent = file->f_dentry->d_parent;
+ const char *name = file->f_dentry->d_name.name;
+ const int len = file->f_dentry->d_name.len;
+ struct md_op_data *op_data;
+ struct ptlrpc_request *req;
+ __u32 opc = LUSTRE_OPC_ANY;
+ int rc;
+
+ if (!parent)
+ return -ENOENT;
+
+ /* Usually we come here only for NFSD, and we want open lock.
+ But we can also get here with pre 2.6.15 patchless kernels, and in
+ that case that lock is also ok */
+ /* We can also get here if there was cached open handle in revalidate_it
+ * but it disappeared while we were getting from there to ll_file_open.
+ * But this means this file was closed and immediatelly opened which
+ * makes a good candidate for using OPEN lock */
+ /* If lmmsize & lmm are not 0, we are just setting stripe info
+ * parameters. No need for the open lock */
+ if (lmm == NULL && lmmsize == 0) {
+ itp->it_flags |= MDS_OPEN_LOCK;
+ if (itp->it_flags & FMODE_WRITE)
+ opc = LUSTRE_OPC_CREATE;
+ }
+
+ op_data = ll_prep_md_op_data(NULL, parent->d_inode,
+ file->f_dentry->d_inode, name, len,
+ O_RDWR, opc, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ itp->it_flags |= MDS_OPEN_BY_FID;
+ rc = md_intent_lock(sbi->ll_md_exp, op_data, lmm, lmmsize, itp,
+ 0 /*unused */, &req, ll_md_blocking_ast, 0);
+ ll_finish_md_op_data(op_data);
+ if (rc == -ESTALE) {
+ /* reason for keep own exit path - don`t flood log
+ * with messages with -ESTALE errors.
+ */
+ if (!it_disposition(itp, DISP_OPEN_OPEN) ||
+ it_open_error(DISP_OPEN_OPEN, itp))
+ GOTO(out, rc);
+ ll_release_openhandle(file->f_dentry, itp);
+ GOTO(out, rc);
+ }
+
+ if (it_disposition(itp, DISP_LOOKUP_NEG))
+ GOTO(out, rc = -ENOENT);
+
+ if (rc != 0 || it_open_error(DISP_OPEN_OPEN, itp)) {
+ rc = rc ? rc : it_open_error(DISP_OPEN_OPEN, itp);
+ CDEBUG(D_VFSTRACE, "lock enqueue: err: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ rc = ll_prep_inode(&file->f_dentry->d_inode, req, NULL, itp);
+ if (!rc && itp->d.lustre.it_lock_mode)
+ ll_set_lock_data(sbi->ll_md_exp, file->f_dentry->d_inode,
+ itp, NULL);
+
+out:
+ ptlrpc_req_finished(itp->d.lustre.it_data);
+ it_clear_disposition(itp, DISP_ENQ_COMPLETE);
+ ll_intent_drop_lock(itp);
+
+ return rc;
+}
+
+/**
+ * Assign an obtained @ioepoch to client's inode. No lock is needed, MDS does
+ * not believe attributes if a few ioepoch holders exist. Attributes for
+ * previous ioepoch if new one is opened are also skipped by MDS.
+ */
+void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
+{
+ if (ioepoch && lli->lli_ioepoch != ioepoch) {
+ lli->lli_ioepoch = ioepoch;
+ CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID"\n",
+ ioepoch, PFID(&lli->lli_fid));
+ }
+}
+
+static int ll_och_fill(struct obd_export *md_exp, struct ll_inode_info *lli,
+ struct lookup_intent *it, struct obd_client_handle *och)
+{
+ struct ptlrpc_request *req = it->d.lustre.it_data;
+ struct mdt_body *body;
+
+ LASSERT(och);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body != NULL); /* reply already checked out */
+
+ memcpy(&och->och_fh, &body->handle, sizeof(body->handle));
+ och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
+ och->och_fid = lli->lli_fid;
+ och->och_flags = it->it_flags;
+ ll_ioepoch_open(lli, body->ioepoch);
+
+ return md_set_open_replay_data(md_exp, och, req);
+}
+
+int ll_local_open(struct file *file, struct lookup_intent *it,
+ struct ll_file_data *fd, struct obd_client_handle *och)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ LASSERT(!LUSTRE_FPRIVATE(file));
+
+ LASSERT(fd != NULL);
+
+ if (och) {
+ struct ptlrpc_request *req = it->d.lustre.it_data;
+ struct mdt_body *body;
+ int rc;
+
+ rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, lli, it, och);
+ if (rc)
+ return rc;
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if ((it->it_flags & FMODE_WRITE) &&
+ (body->valid & OBD_MD_FLSIZE))
+ CDEBUG(D_INODE, "Epoch "LPU64" opened on "DFID"\n",
+ lli->lli_ioepoch, PFID(&lli->lli_fid));
+ }
+
+ LUSTRE_FPRIVATE(file) = fd;
+ ll_readahead_init(inode, &fd->fd_ras);
+ fd->fd_omode = it->it_flags;
+ return 0;
+}
+
+/* Open a file, and (for the very first open) create objects on the OSTs at
+ * this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
+ * creation or open until ll_lov_setstripe() ioctl is called.
+ *
+ * If we already have the stripe MD locally then we don't request it in
+ * md_open(), by passing a lmm_size = 0.
+ *
+ * It is up to the application to ensure no other processes open this file
+ * in the O_LOV_DELAY_CREATE case, or the default striping pattern will be
+ * used. We might be able to avoid races of that sort by getting lli_open_sem
+ * before returning in the O_LOV_DELAY_CREATE case and dropping it here
+ * or in ll_file_release(), but I'm not sure that is desirable/necessary.
+ */
+int ll_file_open(struct inode *inode, struct file *file)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lookup_intent *it, oit = { .it_op = IT_OPEN,
+ .it_flags = file->f_flags };
+ struct obd_client_handle **och_p = NULL;
+ __u64 *och_usecount = NULL;
+ struct ll_file_data *fd;
+ int rc = 0, opendir_set = 0;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
+ inode->i_generation, inode, file->f_flags);
+
+ it = file->private_data; /* XXX: compat macro */
+ file->private_data = NULL; /* prevent ll_local_open assertion */
+
+ fd = ll_file_data_get();
+ if (fd == NULL)
+ GOTO(out_openerr, rc = -ENOMEM);
+
+ fd->fd_file = file;
+ if (S_ISDIR(inode->i_mode)) {
+ spin_lock(&lli->lli_sa_lock);
+ if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
+ lli->lli_opendir_pid == 0) {
+ lli->lli_opendir_key = fd;
+ lli->lli_opendir_pid = current_pid();
+ opendir_set = 1;
+ }
+ spin_unlock(&lli->lli_sa_lock);
+ }
+
+ if (inode->i_sb->s_root == file->f_dentry) {
+ LUSTRE_FPRIVATE(file) = fd;
+ return 0;
+ }
+
+ if (!it || !it->d.lustre.it_disposition) {
+ /* Convert f_flags into access mode. We cannot use file->f_mode,
+ * because everything but O_ACCMODE mask was stripped from
+ * there */
+ if ((oit.it_flags + 1) & O_ACCMODE)
+ oit.it_flags++;
+ if (file->f_flags & O_TRUNC)
+ oit.it_flags |= FMODE_WRITE;
+
+ /* kernel only call f_op->open in dentry_open. filp_open calls
+ * dentry_open after call to open_namei that checks permissions.
+ * Only nfsd_open call dentry_open directly without checking
+ * permissions and because of that this code below is safe. */
+ if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
+ oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
+
+ /* We do not want O_EXCL here, presumably we opened the file
+ * already? XXX - NFS implications? */
+ oit.it_flags &= ~O_EXCL;
+
+ /* bug20584, if "it_flags" contains O_CREAT, the file will be
+ * created if necessary, then "IT_CREAT" should be set to keep
+ * consistent with it */
+ if (oit.it_flags & O_CREAT)
+ oit.it_op |= IT_CREAT;
+
+ it = &oit;
+ }
+
+restart:
+ /* Let's see if we have file open on MDS already. */
+ if (it->it_flags & FMODE_WRITE) {
+ och_p = &lli->lli_mds_write_och;
+ och_usecount = &lli->lli_open_fd_write_count;
+ } else if (it->it_flags & FMODE_EXEC) {
+ och_p = &lli->lli_mds_exec_och;
+ och_usecount = &lli->lli_open_fd_exec_count;
+ } else {
+ och_p = &lli->lli_mds_read_och;
+ och_usecount = &lli->lli_open_fd_read_count;
+ }
+
+ mutex_lock(&lli->lli_och_mutex);
+ if (*och_p) { /* Open handle is present */
+ if (it_disposition(it, DISP_OPEN_OPEN)) {
+ /* Well, there's extra open request that we do not need,
+ let's close it somehow. This will decref request. */
+ rc = it_open_error(DISP_OPEN_OPEN, it);
+ if (rc) {
+ mutex_unlock(&lli->lli_och_mutex);
+ GOTO(out_openerr, rc);
+ }
+
+ ll_release_openhandle(file->f_dentry, it);
+ }
+ (*och_usecount)++;
+
+ rc = ll_local_open(file, it, fd, NULL);
+ if (rc) {
+ (*och_usecount)--;
+ mutex_unlock(&lli->lli_och_mutex);
+ GOTO(out_openerr, rc);
+ }
+ } else {
+ LASSERT(*och_usecount == 0);
+ if (!it->d.lustre.it_disposition) {
+ /* We cannot just request lock handle now, new ELC code
+ means that one of other OPEN locks for this file
+ could be cancelled, and since blocking ast handler
+ would attempt to grab och_mutex as well, that would
+ result in a deadlock */
+ mutex_unlock(&lli->lli_och_mutex);
+ it->it_create_mode |= M_CHECK_STALE;
+ rc = ll_intent_file_open(file, NULL, 0, it);
+ it->it_create_mode &= ~M_CHECK_STALE;
+ if (rc)
+ GOTO(out_openerr, rc);
+
+ goto restart;
+ }
+ OBD_ALLOC(*och_p, sizeof (struct obd_client_handle));
+ if (!*och_p)
+ GOTO(out_och_free, rc = -ENOMEM);
+
+ (*och_usecount)++;
+
+ /* md_intent_lock() didn't get a request ref if there was an
+ * open error, so don't do cleanup on the request here
+ * (bug 3430) */
+ /* XXX (green): Should not we bail out on any error here, not
+ * just open error? */
+ rc = it_open_error(DISP_OPEN_OPEN, it);
+ if (rc)
+ GOTO(out_och_free, rc);
+
+ LASSERT(it_disposition(it, DISP_ENQ_OPEN_REF));
+
+ rc = ll_local_open(file, it, fd, *och_p);
+ if (rc)
+ GOTO(out_och_free, rc);
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+ fd = NULL;
+
+ /* Must do this outside lli_och_mutex lock to prevent deadlock where
+ different kind of OPEN lock for this same inode gets cancelled
+ by ldlm_cancel_lru */
+ if (!S_ISREG(inode->i_mode))
+ GOTO(out_och_free, rc);
+
+ ll_capa_open(inode);
+
+ if (!lli->lli_has_smd) {
+ if (file->f_flags & O_LOV_DELAY_CREATE ||
+ !(file->f_mode & FMODE_WRITE)) {
+ CDEBUG(D_INODE, "object creation was delayed\n");
+ GOTO(out_och_free, rc);
+ }
+ }
+ file->f_flags &= ~O_LOV_DELAY_CREATE;
+ GOTO(out_och_free, rc);
+
+out_och_free:
+ if (rc) {
+ if (och_p && *och_p) {
+ OBD_FREE(*och_p, sizeof (struct obd_client_handle));
+ *och_p = NULL; /* OBD_FREE writes some magic there */
+ (*och_usecount)--;
+ }
+ mutex_unlock(&lli->lli_och_mutex);
+
+out_openerr:
+ if (opendir_set != 0)
+ ll_stop_statahead(inode, lli->lli_opendir_key);
+ if (fd != NULL)
+ ll_file_data_put(fd);
+ } else {
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
+ }
+
+ if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
+ ptlrpc_req_finished(it->d.lustre.it_data);
+ it_clear_disposition(it, DISP_ENQ_OPEN_REF);
+ }
+
+ return rc;
+}
+
+/* Fills the obdo with the attributes for the lsm */
+static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
+ struct obd_capa *capa, struct obdo *obdo,
+ __u64 ioepoch, int sync)
+{
+ struct ptlrpc_request_set *set;
+ struct obd_info oinfo = { { { 0 } } };
+ int rc;
+
+ LASSERT(lsm != NULL);
+
+ oinfo.oi_md = lsm;
+ oinfo.oi_oa = obdo;
+ oinfo.oi_oa->o_oi = lsm->lsm_oi;
+ oinfo.oi_oa->o_mode = S_IFREG;
+ oinfo.oi_oa->o_ioepoch = ioepoch;
+ oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE |
+ OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
+ OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
+ OBD_MD_FLDATAVERSION;
+ oinfo.oi_capa = capa;
+ if (sync) {
+ oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
+ oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
+ }
+
+ set = ptlrpc_prep_set();
+ if (set == NULL) {
+ CERROR("can't allocate ptlrpc set\n");
+ rc = -ENOMEM;
+ } else {
+ rc = obd_getattr_async(exp, &oinfo, set);
+ if (rc == 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+ }
+ if (rc == 0)
+ oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME |
+ OBD_MD_FLCTIME | OBD_MD_FLSIZE |
+ OBD_MD_FLDATAVERSION);
+ return rc;
+}
+
+/**
+ * Performs the getattr on the inode and updates its fields.
+ * If @sync != 0, perform the getattr under the server-side lock.
+ */
+int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
+ __u64 ioepoch, int sync)
+{
+ struct obd_capa *capa = ll_mdscapa_get(inode);
+ struct lov_stripe_md *lsm;
+ int rc;
+
+ lsm = ccc_inode_lsm_get(inode);
+ rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
+ capa, obdo, ioepoch, sync);
+ capa_put(capa);
+ if (rc == 0) {
+ struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
+
+ obdo_refresh_inode(inode, obdo, obdo->o_valid);
+ CDEBUG(D_INODE, "objid "DOSTID" size %llu, blocks %llu,"
+ " blksize %lu\n", POSTID(oi), i_size_read(inode),
+ (unsigned long long)inode->i_blocks,
+ (unsigned long)ll_inode_blksize(inode));
+ }
+ ccc_inode_lsm_put(inode, lsm);
+ return rc;
+}
+
+int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_object *obj = lli->lli_clob;
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct ost_lvb lvb;
+ int rc = 0;
+
+ ll_inode_size_lock(inode);
+ /* merge timestamps the most recently obtained from mds with
+ timestamps obtained from osts */
+ LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime;
+ LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime;
+ LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime;
+ inode_init_lvb(inode, &lvb);
+
+ cl_object_attr_lock(obj);
+ rc = cl_object_attr_get(env, obj, attr);
+ cl_object_attr_unlock(obj);
+
+ if (rc == 0) {
+ if (lvb.lvb_atime < attr->cat_atime)
+ lvb.lvb_atime = attr->cat_atime;
+ if (lvb.lvb_ctime < attr->cat_ctime)
+ lvb.lvb_ctime = attr->cat_ctime;
+ if (lvb.lvb_mtime < attr->cat_mtime)
+ lvb.lvb_mtime = attr->cat_mtime;
+
+ CDEBUG(D_VFSTRACE, DFID" updating i_size "LPU64"\n",
+ PFID(&lli->lli_fid), attr->cat_size);
+ cl_isize_write_nolock(inode, attr->cat_size);
+
+ inode->i_blocks = attr->cat_blocks;
+
+ LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
+ LTIME_S(inode->i_atime) = lvb.lvb_atime;
+ LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
+ }
+ ll_inode_size_unlock(inode);
+
+ return rc;
+}
+
+int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
+ lstat_t *st)
+{
+ struct obdo obdo = { 0 };
+ int rc;
+
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, &obdo, 0, 0);
+ if (rc == 0) {
+ st->st_size = obdo.o_size;
+ st->st_blocks = obdo.o_blocks;
+ st->st_mtime = obdo.o_mtime;
+ st->st_atime = obdo.o_atime;
+ st->st_ctime = obdo.o_ctime;
+ }
+ return rc;
+}
+
+void ll_io_init(struct cl_io *io, const struct file *file, int write)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+
+ io->u.ci_rw.crw_nonblock = file->f_flags & O_NONBLOCK;
+ if (write) {
+ io->u.ci_wr.wr_append = !!(file->f_flags & O_APPEND);
+ io->u.ci_wr.wr_sync = file->f_flags & O_SYNC ||
+ file->f_flags & O_DIRECT ||
+ IS_SYNC(inode);
+ }
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ io->ci_lockreq = CILR_MAYBE;
+ if (ll_file_nolock(file)) {
+ io->ci_lockreq = CILR_NEVER;
+ io->ci_no_srvlock = 1;
+ } else if (file->f_flags & O_APPEND) {
+ io->ci_lockreq = CILR_MANDATORY;
+ }
+}
+
+static ssize_t
+ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
+ struct file *file, enum cl_io_type iot,
+ loff_t *ppos, size_t count)
+{
+ struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct cl_io *io;
+ ssize_t result;
+
+restart:
+ io = ccc_env_thread_io(env);
+ ll_io_init(io, file, iot == CIT_WRITE);
+
+ if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+ struct ccc_io *cio = ccc_env_io(env);
+ int write_mutex_locked = 0;
+
+ cio->cui_fd = LUSTRE_FPRIVATE(file);
+ vio->cui_io_subtype = args->via_io_subtype;
+
+ switch (vio->cui_io_subtype) {
+ case IO_NORMAL:
+ cio->cui_iov = args->u.normal.via_iov;
+ cio->cui_nrsegs = args->u.normal.via_nrsegs;
+ cio->cui_tot_nrsegs = cio->cui_nrsegs;
+ cio->cui_iocb = args->u.normal.via_iocb;
+ if ((iot == CIT_WRITE) &&
+ !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ if (mutex_lock_interruptible(&lli->
+ lli_write_mutex))
+ GOTO(out, result = -ERESTARTSYS);
+ write_mutex_locked = 1;
+ } else if (iot == CIT_READ) {
+ down_read(&lli->lli_trunc_sem);
+ }
+ break;
+ case IO_SENDFILE:
+ vio->u.sendfile.cui_actor = args->u.sendfile.via_actor;
+ vio->u.sendfile.cui_target = args->u.sendfile.via_target;
+ break;
+ case IO_SPLICE:
+ vio->u.splice.cui_pipe = args->u.splice.via_pipe;
+ vio->u.splice.cui_flags = args->u.splice.via_flags;
+ break;
+ default:
+ CERROR("Unknow IO type - %u\n", vio->cui_io_subtype);
+ LBUG();
+ }
+ result = cl_io_loop(env, io);
+ if (write_mutex_locked)
+ mutex_unlock(&lli->lli_write_mutex);
+ else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
+ up_read(&lli->lli_trunc_sem);
+ } else {
+ /* cl_io_rw_init() handled IO */
+ result = io->ci_result;
+ }
+
+ if (io->ci_nob > 0) {
+ result = io->ci_nob;
+ *ppos = io->u.ci_wr.wr.crw_pos;
+ }
+ GOTO(out, result);
+out:
+ cl_io_fini(env, io);
+ /* If any bit been read/written (result != 0), we just return
+ * short read/write instead of restart io. */
+ if (result == 0 && io->ci_need_restart) {
+ CDEBUG(D_VFSTRACE, "Restart %s on %s from %lld, count:%zd\n",
+ iot == CIT_READ ? "read" : "write",
+ file->f_dentry->d_name.name, *ppos, count);
+ LASSERTF(io->ci_nob == 0, "%zd", io->ci_nob);
+ goto restart;
+ }
+
+ if (iot == CIT_READ) {
+ if (result >= 0)
+ ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode),
+ LPROC_LL_READ_BYTES, result);
+ } else if (iot == CIT_WRITE) {
+ if (result >= 0) {
+ ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode),
+ LPROC_LL_WRITE_BYTES, result);
+ fd->fd_write_failed = false;
+ } else if (result != -ERESTARTSYS) {
+ fd->fd_write_failed = true;
+ }
+ }
+
+ return result;
+}
+
+
+/*
+ * XXX: exact copy from kernel code (__generic_file_aio_write_nolock)
+ */
+static int ll_file_get_iov_count(const struct iovec *iov,
+ unsigned long *nr_segs, size_t *count)
+{
+ size_t cnt = 0;
+ unsigned long seg;
+
+ for (seg = 0; seg < *nr_segs; seg++) {
+ const struct iovec *iv = &iov[seg];
+
+ /*
+ * If any segment has a negative length, or the cumulative
+ * length ever wraps negative then return -EINVAL.
+ */
+ cnt += iv->iov_len;
+ if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
+ return -EINVAL;
+ if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
+ continue;
+ if (seg == 0)
+ return -EFAULT;
+ *nr_segs = seg;
+ cnt -= iv->iov_len; /* This segment is no good */
+ break;
+ }
+ *count = cnt;
+ return 0;
+}
+
+static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
+
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ return result;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
+ args->u.normal.via_iocb = iocb;
+
+ result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
+ &iocb->ki_pos, count);
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct lu_env *env;
+ struct iovec *local_iov;
+ struct kiocb *kiocb;
+ ssize_t result;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ kiocb = &vvp_env_info(env)->vti_kiocb;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ init_sync_kiocb(kiocb, file);
+ kiocb->ki_pos = *ppos;
+ kiocb->ki_nbytes = count;
+
+ result = ll_file_aio_read(kiocb, local_iov, 1, kiocb->ki_pos);
+ *ppos = kiocb->ki_pos;
+
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+/*
+ * Write to a file (through the page cache).
+ */
+static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ size_t count;
+ ssize_t result;
+ int refcheck;
+
+ result = ll_file_get_iov_count(iov, &nr_segs, &count);
+ if (result)
+ return result;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ args = vvp_env_args(env, IO_NORMAL);
+ args->u.normal.via_iov = (struct iovec *)iov;
+ args->u.normal.via_nrsegs = nr_segs;
+ args->u.normal.via_iocb = iocb;
+
+ result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
+ &iocb->ki_pos, count);
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct lu_env *env;
+ struct iovec *local_iov;
+ struct kiocb *kiocb;
+ ssize_t result;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ local_iov = &vvp_env_info(env)->vti_local_iov;
+ kiocb = &vvp_env_info(env)->vti_kiocb;
+ local_iov->iov_base = (void __user *)buf;
+ local_iov->iov_len = count;
+ init_sync_kiocb(kiocb, file);
+ kiocb->ki_pos = *ppos;
+ kiocb->ki_nbytes = count;
+
+ result = ll_file_aio_write(kiocb, local_iov, 1, kiocb->ki_pos);
+ *ppos = kiocb->ki_pos;
+
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+
+
+/*
+ * Send file content (through pagecache) somewhere with helper
+ */
+static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t count,
+ unsigned int flags)
+{
+ struct lu_env *env;
+ struct vvp_io_args *args;
+ ssize_t result;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ args = vvp_env_args(env, IO_SPLICE);
+ args->u.splice.via_pipe = pipe;
+ args->u.splice.via_flags = flags;
+
+ result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+static int ll_lov_recreate(struct inode *inode, struct ost_id *oi,
+ obd_count ost_idx)
+{
+ struct obd_export *exp = ll_i2dtexp(inode);
+ struct obd_trans_info oti = { 0 };
+ struct obdo *oa = NULL;
+ int lsm_size;
+ int rc = 0;
+ struct lov_stripe_md *lsm = NULL, *lsm2;
+
+ OBDO_ALLOC(oa);
+ if (oa == NULL)
+ return -ENOMEM;
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (!lsm_has_objects(lsm))
+ GOTO(out, rc = -ENOENT);
+
+ lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
+ (lsm->lsm_stripe_count));
+
+ OBD_ALLOC_LARGE(lsm2, lsm_size);
+ if (lsm2 == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ oa->o_oi = *oi;
+ oa->o_nlink = ost_idx;
+ oa->o_flags |= OBD_FL_RECREATE_OBJS;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
+ obdo_from_inode(oa, inode, OBD_MD_FLTYPE | OBD_MD_FLATIME |
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME);
+ obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
+ memcpy(lsm2, lsm, lsm_size);
+ ll_inode_size_lock(inode);
+ rc = obd_create(NULL, exp, oa, &lsm2, &oti);
+ ll_inode_size_unlock(inode);
+
+ OBD_FREE_LARGE(lsm2, lsm_size);
+ GOTO(out, rc);
+out:
+ ccc_inode_lsm_put(inode, lsm);
+ OBDO_FREE(oa);
+ return rc;
+}
+
+static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
+{
+ struct ll_recreate_obj ucreat;
+ struct ost_id oi;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
+ sizeof(ucreat)))
+ return -EFAULT;
+
+ ostid_set_seq_mdt0(&oi);
+ ostid_set_id(&oi, ucreat.lrc_id);
+ return ll_lov_recreate(inode, &oi, ucreat.lrc_ost_idx);
+}
+
+static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
+{
+ struct lu_fid fid;
+ struct ost_id oi;
+ obd_count ost_idx;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid)))
+ return -EFAULT;
+
+ fid_to_ostid(&fid, &oi);
+ ost_idx = (fid_seq(&fid) >> 16) & 0xffff;
+ return ll_lov_recreate(inode, &oi, ost_idx);
+}
+
+int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
+ int flags, struct lov_user_md *lum, int lum_size)
+{
+ struct lov_stripe_md *lsm = NULL;
+ struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
+ int rc = 0;
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm != NULL) {
+ ccc_inode_lsm_put(inode, lsm);
+ CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
+ inode->i_ino);
+ return -EEXIST;
+ }
+
+ ll_inode_size_lock(inode);
+ rc = ll_intent_file_open(file, lum, lum_size, &oit);
+ if (rc)
+ GOTO(out, rc);
+ rc = oit.d.lustre.it_status;
+ if (rc < 0)
+ GOTO(out_req_free, rc);
+
+ ll_release_openhandle(file->f_dentry, &oit);
+
+ out:
+ ll_inode_size_unlock(inode);
+ ll_intent_release(&oit);
+ ccc_inode_lsm_put(inode, lsm);
+ return rc;
+out_req_free:
+ ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
+ goto out;
+}
+
+int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
+ struct lov_mds_md **lmmp, int *lmm_size,
+ struct ptlrpc_request **request)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct mdt_body *body;
+ struct lov_mds_md *lmm = NULL;
+ struct ptlrpc_request *req = NULL;
+ struct md_op_data *op_data;
+ int rc, lmmsize;
+
+ rc = ll_get_max_mdsize(sbi, &lmmsize);
+ if (rc)
+ return rc;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
+ strlen(filename), lmmsize,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
+ rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
+ if (rc < 0) {
+ CDEBUG(D_INFO, "md_getattr_name failed "
+ "on %s: rc %d\n", filename, rc);
+ GOTO(out, rc);
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body != NULL); /* checked by mdc_getattr_name */
+
+ lmmsize = body->eadatasize;
+
+ if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
+ lmmsize == 0) {
+ GOTO(out, rc = -ENODATA);
+ }
+
+ lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
+ LASSERT(lmm != NULL);
+
+ if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
+ (lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) {
+ GOTO(out, rc = -EPROTO);
+ }
+
+ /*
+ * This is coming from the MDS, so is probably in
+ * little endian. We convert it to host endian before
+ * passing it to userspace.
+ */
+ if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC)) {
+ int stripe_count;
+
+ stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
+ stripe_count = 0;
+
+ /* if function called for directory - we should
+ * avoid swab not existent lsm objects */
+ if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
+ lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
+ if (S_ISREG(body->mode))
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v1 *)lmm)->lmm_objects,
+ stripe_count);
+ } else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
+ lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
+ if (S_ISREG(body->mode))
+ lustre_swab_lov_user_md_objects(
+ ((struct lov_user_md_v3 *)lmm)->lmm_objects,
+ stripe_count);
+ }
+ }
+
+out:
+ *lmmp = lmm;
+ *lmm_size = lmmsize;
+ *request = req;
+ return rc;
+}
+
+static int ll_lov_setea(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
+ struct lov_user_md *lump;
+ int lum_size = sizeof(struct lov_user_md) +
+ sizeof(struct lov_user_ost_data);
+ int rc;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+
+ OBD_ALLOC_LARGE(lump, lum_size);
+ if (lump == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
+ OBD_FREE_LARGE(lump, lum_size);
+ return -EFAULT;
+ }
+
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
+
+ OBD_FREE_LARGE(lump, lum_size);
+ return rc;
+}
+
+static int ll_lov_setstripe(struct inode *inode, struct file *file,
+ unsigned long arg)
+{
+ struct lov_user_md_v3 lumv3;
+ struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
+ struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
+ struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
+ int lum_size, rc;
+ int flags = FMODE_WRITE;
+
+ /* first try with v1 which is smaller than v3 */
+ lum_size = sizeof(struct lov_user_md_v1);
+ if (copy_from_user(lumv1, lumv1p, lum_size))
+ return -EFAULT;
+
+ if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
+ lum_size = sizeof(struct lov_user_md_v3);
+ if (copy_from_user(&lumv3, lumv3p, lum_size))
+ return -EFAULT;
+ }
+
+ rc = ll_lov_setstripe_ea_info(inode, file, flags, lumv1, lum_size);
+ if (rc == 0) {
+ struct lov_stripe_md *lsm;
+ __u32 gen;
+
+ put_user(0, &lumv1p->lmm_stripe_count);
+
+ ll_layout_refresh(inode, &gen);
+ lsm = ccc_inode_lsm_get(inode);
+ rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
+ 0, lsm, (void *)arg);
+ ccc_inode_lsm_put(inode, lsm);
+ }
+ return rc;
+}
+
+static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
+{
+ struct lov_stripe_md *lsm;
+ int rc = -ENODATA;
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm != NULL)
+ rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0,
+ lsm, (void *)arg);
+ ccc_inode_lsm_put(inode, lsm);
+ return rc;
+}
+
+int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ccc_grouplock grouplock;
+ int rc;
+
+ if (ll_file_nolock(file))
+ return -EOPNOTSUPP;
+
+ spin_lock(&lli->lli_lock);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ CWARN("group lock already existed with gid %lu\n",
+ fd->fd_grouplock.cg_gid);
+ spin_unlock(&lli->lli_lock);
+ return -EINVAL;
+ }
+ LASSERT(fd->fd_grouplock.cg_lock == NULL);
+ spin_unlock(&lli->lli_lock);
+
+ rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
+ arg, (file->f_flags & O_NONBLOCK), &grouplock);
+ if (rc)
+ return rc;
+
+ spin_lock(&lli->lli_lock);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ spin_unlock(&lli->lli_lock);
+ CERROR("another thread just won the race\n");
+ cl_put_grouplock(&grouplock);
+ return -EINVAL;
+ }
+
+ fd->fd_flags |= LL_FILE_GROUP_LOCKED;
+ fd->fd_grouplock = grouplock;
+ spin_unlock(&lli->lli_lock);
+
+ CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
+ return 0;
+}
+
+int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ccc_grouplock grouplock;
+
+ spin_lock(&lli->lli_lock);
+ if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ spin_unlock(&lli->lli_lock);
+ CWARN("no group lock held\n");
+ return -EINVAL;
+ }
+ LASSERT(fd->fd_grouplock.cg_lock != NULL);
+
+ if (fd->fd_grouplock.cg_gid != arg) {
+ CWARN("group lock %lu doesn't match current id %lu\n",
+ arg, fd->fd_grouplock.cg_gid);
+ spin_unlock(&lli->lli_lock);
+ return -EINVAL;
+ }
+
+ grouplock = fd->fd_grouplock;
+ memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
+ fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
+ spin_unlock(&lli->lli_lock);
+
+ cl_put_grouplock(&grouplock);
+ CDEBUG(D_INFO, "group lock %lu released\n", arg);
+ return 0;
+}
+
+/**
+ * Close inode open handle
+ *
+ * \param dentry [in] dentry which contains the inode
+ * \param it [in,out] intent which contains open info and result
+ *
+ * \retval 0 success
+ * \retval <0 failure
+ */
+int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
+{
+ struct inode *inode = dentry->d_inode;
+ struct obd_client_handle *och;
+ int rc;
+
+ LASSERT(inode);
+
+ /* Root ? Do nothing. */
+ if (dentry->d_inode->i_sb->s_root == dentry)
+ return 0;
+
+ /* No open handle to close? Move away */
+ if (!it_disposition(it, DISP_OPEN_OPEN))
+ return 0;
+
+ LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
+
+ OBD_ALLOC(och, sizeof(*och));
+ if (!och)
+ GOTO(out, rc = -ENOMEM);
+
+ ll_och_fill(ll_i2sbi(inode)->ll_md_exp,
+ ll_i2info(inode), it, och);
+
+ rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
+ inode, och);
+ out:
+ /* this one is in place of ll_file_open */
+ if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
+ ptlrpc_req_finished(it->d.lustre.it_data);
+ it_clear_disposition(it, DISP_ENQ_OPEN_REF);
+ }
+ return rc;
+}
+
+/**
+ * Get size for inode for which FIEMAP mapping is requested.
+ * Make the FIEMAP get_info call and returns the result.
+ */
+int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
+ int num_bytes)
+{
+ struct obd_export *exp = ll_i2dtexp(inode);
+ struct lov_stripe_md *lsm = NULL;
+ struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, };
+ int vallen = num_bytes;
+ int rc;
+
+ /* Checks for fiemap flags */
+ if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
+ fiemap->fm_flags &= ~LUSTRE_FIEMAP_FLAGS_COMPAT;
+ return -EBADR;
+ }
+
+ /* Check for FIEMAP_FLAG_SYNC */
+ if (fiemap->fm_flags & FIEMAP_FLAG_SYNC) {
+ rc = filemap_fdatawrite(inode->i_mapping);
+ if (rc)
+ return rc;
+ }
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm == NULL)
+ return -ENOENT;
+
+ /* If the stripe_count > 1 and the application does not understand
+ * DEVICE_ORDER flag, then it cannot interpret the extents correctly.
+ */
+ if (lsm->lsm_stripe_count > 1 &&
+ !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER))
+ GOTO(out, rc = -EOPNOTSUPP);
+
+ fm_key.oa.o_oi = lsm->lsm_oi;
+ fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+
+ obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLSIZE);
+ obdo_set_parent_fid(&fm_key.oa, &ll_i2info(inode)->lli_fid);
+ /* If filesize is 0, then there would be no objects for mapping */
+ if (fm_key.oa.o_size == 0) {
+ fiemap->fm_mapped_extents = 0;
+ GOTO(out, rc = 0);
+ }
+
+ memcpy(&fm_key.fiemap, fiemap, sizeof(*fiemap));
+
+ rc = obd_get_info(NULL, exp, sizeof(fm_key), &fm_key, &vallen,
+ fiemap, lsm);
+ if (rc)
+ CERROR("obd_get_info failed: rc = %d\n", rc);
+
+out:
+ ccc_inode_lsm_put(inode, lsm);
+ return rc;
+}
+
+int ll_fid2path(struct inode *inode, void *arg)
+{
+ struct obd_export *exp = ll_i2mdexp(inode);
+ struct getinfo_fid2path *gfout, *gfin;
+ int outsize, rc;
+
+ if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
+ !(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
+ return -EPERM;
+
+ /* Need to get the buflen */
+ OBD_ALLOC_PTR(gfin);
+ if (gfin == NULL)
+ return -ENOMEM;
+ if (copy_from_user(gfin, arg, sizeof(*gfin))) {
+ OBD_FREE_PTR(gfin);
+ return -EFAULT;
+ }
+
+ outsize = sizeof(*gfout) + gfin->gf_pathlen;
+ OBD_ALLOC(gfout, outsize);
+ if (gfout == NULL) {
+ OBD_FREE_PTR(gfin);
+ return -ENOMEM;
+ }
+ memcpy(gfout, gfin, sizeof(*gfout));
+ OBD_FREE_PTR(gfin);
+
+ /* Call mdc_iocontrol */
+ rc = obd_iocontrol(OBD_IOC_FID2PATH, exp, outsize, gfout, NULL);
+ if (rc)
+ GOTO(gf_free, rc);
+
+ if (copy_to_user(arg, gfout, outsize))
+ rc = -EFAULT;
+
+gf_free:
+ OBD_FREE(gfout, outsize);
+ return rc;
+}
+
+static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
+{
+ struct ll_user_fiemap *fiemap_s;
+ size_t num_bytes, ret_bytes;
+ unsigned int extent_count;
+ int rc = 0;
+
+ /* Get the extent count so we can calculate the size of
+ * required fiemap buffer */
+ if (get_user(extent_count,
+ &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
+ return -EFAULT;
+ num_bytes = sizeof(*fiemap_s) + (extent_count *
+ sizeof(struct ll_fiemap_extent));
+
+ OBD_ALLOC_LARGE(fiemap_s, num_bytes);
+ if (fiemap_s == NULL)
+ return -ENOMEM;
+
+ /* get the fiemap value */
+ if (copy_from_user(fiemap_s, (struct ll_user_fiemap __user *)arg,
+ sizeof(*fiemap_s)))
+ GOTO(error, rc = -EFAULT);
+
+ /* If fm_extent_count is non-zero, read the first extent since
+ * it is used to calculate end_offset and device from previous
+ * fiemap call. */
+ if (extent_count) {
+ if (copy_from_user(&fiemap_s->fm_extents[0],
+ (char __user *)arg + sizeof(*fiemap_s),
+ sizeof(struct ll_fiemap_extent)))
+ GOTO(error, rc = -EFAULT);
+ }
+
+ rc = ll_do_fiemap(inode, fiemap_s, num_bytes);
+ if (rc)
+ GOTO(error, rc);
+
+ ret_bytes = sizeof(struct ll_user_fiemap);
+
+ if (extent_count != 0)
+ ret_bytes += (fiemap_s->fm_mapped_extents *
+ sizeof(struct ll_fiemap_extent));
+
+ if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
+ rc = -EFAULT;
+
+error:
+ OBD_FREE_LARGE(fiemap_s, num_bytes);
+ return rc;
+}
+
+/*
+ * Read the data_version for inode.
+ *
+ * This value is computed using stripe object version on OST.
+ * Version is computed using server side locking.
+ *
+ * @param extent_lock Take extent lock. Not needed if a process is already
+ * holding the OST object group locks.
+ */
+int ll_data_version(struct inode *inode, __u64 *data_version,
+ int extent_lock)
+{
+ struct lov_stripe_md *lsm = NULL;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct obdo *obdo = NULL;
+ int rc;
+
+ /* If no stripe, we consider version is 0. */
+ lsm = ccc_inode_lsm_get(inode);
+ if (!lsm_has_objects(lsm)) {
+ *data_version = 0;
+ CDEBUG(D_INODE, "No object for inode\n");
+ GOTO(out, rc = 0);
+ }
+
+ OBD_ALLOC_PTR(obdo);
+ if (obdo == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, obdo, 0, extent_lock);
+ if (rc == 0) {
+ if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
+ rc = -EOPNOTSUPP;
+ else
+ *data_version = obdo->o_data_version;
+ }
+
+ OBD_FREE_PTR(obdo);
+out:
+ ccc_inode_lsm_put(inode, lsm);
+ return rc;
+}
+
+struct ll_swap_stack {
+ struct iattr ia1, ia2;
+ __u64 dv1, dv2;
+ struct inode *inode1, *inode2;
+ bool check_dv1, check_dv2;
+};
+
+static int ll_swap_layouts(struct file *file1, struct file *file2,
+ struct lustre_swap_layouts *lsl)
+{
+ struct mdc_swap_layouts msl;
+ struct md_op_data *op_data;
+ __u32 gid;
+ __u64 dv;
+ struct ll_swap_stack *llss = NULL;
+ int rc;
+
+ OBD_ALLOC_PTR(llss);
+ if (llss == NULL)
+ return -ENOMEM;
+
+ llss->inode1 = file1->f_dentry->d_inode;
+ llss->inode2 = file2->f_dentry->d_inode;
+
+ if (!S_ISREG(llss->inode2->i_mode))
+ GOTO(free, rc = -EINVAL);
+
+ if (inode_permission(llss->inode1, MAY_WRITE) ||
+ inode_permission(llss->inode2, MAY_WRITE))
+ GOTO(free, rc = -EPERM);
+
+ if (llss->inode2->i_sb != llss->inode1->i_sb)
+ GOTO(free, rc = -EXDEV);
+
+ /* we use 2 bool because it is easier to swap than 2 bits */
+ if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
+ llss->check_dv1 = true;
+
+ if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV2)
+ llss->check_dv2 = true;
+
+ /* we cannot use lsl->sl_dvX directly because we may swap them */
+ llss->dv1 = lsl->sl_dv1;
+ llss->dv2 = lsl->sl_dv2;
+
+ rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
+ if (rc == 0) /* same file, done! */
+ GOTO(free, rc = 0);
+
+ if (rc < 0) { /* sequentialize it */
+ swap(llss->inode1, llss->inode2);
+ swap(file1, file2);
+ swap(llss->dv1, llss->dv2);
+ swap(llss->check_dv1, llss->check_dv2);
+ }
+
+ gid = lsl->sl_gid;
+ if (gid != 0) { /* application asks to flush dirty cache */
+ rc = ll_get_grouplock(llss->inode1, file1, gid);
+ if (rc < 0)
+ GOTO(free, rc);
+
+ rc = ll_get_grouplock(llss->inode2, file2, gid);
+ if (rc < 0) {
+ ll_put_grouplock(llss->inode1, file1, gid);
+ GOTO(free, rc);
+ }
+ }
+
+ /* to be able to restore mtime and atime after swap
+ * we need to first save them */
+ if (lsl->sl_flags &
+ (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) {
+ llss->ia1.ia_mtime = llss->inode1->i_mtime;
+ llss->ia1.ia_atime = llss->inode1->i_atime;
+ llss->ia1.ia_valid = ATTR_MTIME | ATTR_ATIME;
+ llss->ia2.ia_mtime = llss->inode2->i_mtime;
+ llss->ia2.ia_atime = llss->inode2->i_atime;
+ llss->ia2.ia_valid = ATTR_MTIME | ATTR_ATIME;
+ }
+
+ /* ultimate check, before swaping the layouts we check if
+ * dataversion has changed (if requested) */
+ if (llss->check_dv1) {
+ rc = ll_data_version(llss->inode1, &dv, 0);
+ if (rc)
+ GOTO(putgl, rc);
+ if (dv != llss->dv1)
+ GOTO(putgl, rc = -EAGAIN);
+ }
+
+ if (llss->check_dv2) {
+ rc = ll_data_version(llss->inode2, &dv, 0);
+ if (rc)
+ GOTO(putgl, rc);
+ if (dv != llss->dv2)
+ GOTO(putgl, rc = -EAGAIN);
+ }
+
+ /* struct md_op_data is used to send the swap args to the mdt
+ * only flags is missing, so we use struct mdc_swap_layouts
+ * through the md_op_data->op_data */
+ /* flags from user space have to be converted before they are send to
+ * server, no flag is sent today, they are only used on the client */
+ msl.msl_flags = 0;
+ rc = -ENOMEM;
+ op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
+ 0, LUSTRE_OPC_ANY, &msl);
+ if (IS_ERR(op_data))
+ GOTO(free, rc = PTR_ERR(op_data));
+
+ rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
+ sizeof(*op_data), op_data, NULL);
+ ll_finish_md_op_data(op_data);
+
+putgl:
+ if (gid != 0) {
+ ll_put_grouplock(llss->inode2, file2, gid);
+ ll_put_grouplock(llss->inode1, file1, gid);
+ }
+
+ /* rc can be set from obd_iocontrol() or from a GOTO(putgl, ...) */
+ if (rc != 0)
+ GOTO(free, rc);
+
+ /* clear useless flags */
+ if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_MTIME)) {
+ llss->ia1.ia_valid &= ~ATTR_MTIME;
+ llss->ia2.ia_valid &= ~ATTR_MTIME;
+ }
+
+ if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_ATIME)) {
+ llss->ia1.ia_valid &= ~ATTR_ATIME;
+ llss->ia2.ia_valid &= ~ATTR_ATIME;
+ }
+
+ /* update time if requested */
+ rc = 0;
+ if (llss->ia2.ia_valid != 0) {
+ mutex_lock(&llss->inode1->i_mutex);
+ rc = ll_setattr(file1->f_dentry, &llss->ia2);
+ mutex_unlock(&llss->inode1->i_mutex);
+ }
+
+ if (llss->ia1.ia_valid != 0) {
+ int rc1;
+
+ mutex_lock(&llss->inode2->i_mutex);
+ rc1 = ll_setattr(file2->f_dentry, &llss->ia1);
+ mutex_unlock(&llss->inode2->i_mutex);
+ if (rc == 0)
+ rc = rc1;
+ }
+
+free:
+ if (llss != NULL)
+ OBD_FREE_PTR(llss);
+
+ return rc;
+}
+
+long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ int flags, rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
+ inode->i_generation, inode, cmd);
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
+
+ /* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
+ if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
+ return -ENOTTY;
+
+ switch(cmd) {
+ case LL_IOC_GETFLAGS:
+ /* Get the current value of the file flags */
+ return put_user(fd->fd_flags, (int *)arg);
+ case LL_IOC_SETFLAGS:
+ case LL_IOC_CLRFLAGS:
+ /* Set or clear specific file flags */
+ /* XXX This probably needs checks to ensure the flags are
+ * not abused, and to handle any flag side effects.
+ */
+ if (get_user(flags, (int *) arg))
+ return -EFAULT;
+
+ if (cmd == LL_IOC_SETFLAGS) {
+ if ((flags & LL_FILE_IGNORE_LOCK) &&
+ !(file->f_flags & O_DIRECT)) {
+ CERROR("%s: unable to disable locking on "
+ "non-O_DIRECT file\n", current->comm);
+ return -EINVAL;
+ }
+
+ fd->fd_flags |= flags;
+ } else {
+ fd->fd_flags &= ~flags;
+ }
+ return 0;
+ case LL_IOC_LOV_SETSTRIPE:
+ return ll_lov_setstripe(inode, file, arg);
+ case LL_IOC_LOV_SETEA:
+ return ll_lov_setea(inode, file, arg);
+ case LL_IOC_LOV_SWAP_LAYOUTS: {
+ struct file *file2;
+ struct lustre_swap_layouts lsl;
+
+ if (copy_from_user(&lsl, (char *)arg,
+ sizeof(struct lustre_swap_layouts)))
+ return -EFAULT;
+
+ if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */
+ return -EPERM;
+
+ file2 = fget(lsl.sl_fd);
+ if (file2 == NULL)
+ return -EBADF;
+
+ rc = -EPERM;
+ if ((file2->f_flags & O_ACCMODE) != 0) /* O_WRONLY or O_RDWR */
+ rc = ll_swap_layouts(file, file2, &lsl);
+ fput(file2);
+ return rc;
+ }
+ case LL_IOC_LOV_GETSTRIPE:
+ return ll_lov_getstripe(inode, arg);
+ case LL_IOC_RECREATE_OBJ:
+ return ll_lov_recreate_obj(inode, arg);
+ case LL_IOC_RECREATE_FID:
+ return ll_lov_recreate_fid(inode, arg);
+ case FSFILT_IOC_FIEMAP:
+ return ll_ioctl_fiemap(inode, arg);
+ case FSFILT_IOC_GETFLAGS:
+ case FSFILT_IOC_SETFLAGS:
+ return ll_iocontrol(inode, file, cmd, arg);
+ case FSFILT_IOC_GETVERSION_OLD:
+ case FSFILT_IOC_GETVERSION:
+ return put_user(inode->i_generation, (int *)arg);
+ case LL_IOC_GROUP_LOCK:
+ return ll_get_grouplock(inode, file, arg);
+ case LL_IOC_GROUP_UNLOCK:
+ return ll_put_grouplock(inode, file, arg);
+ case IOC_OBD_STATFS:
+ return ll_obd_statfs(inode, (void *)arg);
+
+ /* We need to special case any other ioctls we want to handle,
+ * to send them to the MDS/OST as appropriate and to properly
+ * network encode the arg field.
+ case FSFILT_IOC_SETVERSION_OLD:
+ case FSFILT_IOC_SETVERSION:
+ */
+ case LL_IOC_FLUSHCTX:
+ return ll_flush_ctx(inode);
+ case LL_IOC_PATH2FID: {
+ if (copy_to_user((void *)arg, ll_inode2fid(inode),
+ sizeof(struct lu_fid)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case OBD_IOC_FID2PATH:
+ return ll_fid2path(inode, (void *)arg);
+ case LL_IOC_DATA_VERSION: {
+ struct ioc_data_version idv;
+ int rc;
+
+ if (copy_from_user(&idv, (char *)arg, sizeof(idv)))
+ return -EFAULT;
+
+ rc = ll_data_version(inode, &idv.idv_version,
+ !(idv.idv_flags & LL_DV_NOFLUSH));
+
+ if (rc == 0 && copy_to_user((char *) arg, &idv, sizeof(idv)))
+ return -EFAULT;
+
+ return rc;
+ }
+
+ case LL_IOC_GET_MDTIDX: {
+ int mdtidx;
+
+ mdtidx = ll_get_mdt_idx(inode);
+ if (mdtidx < 0)
+ return mdtidx;
+
+ if (put_user((int)mdtidx, (int*)arg))
+ return -EFAULT;
+
+ return 0;
+ }
+ case OBD_IOC_GETDTNAME:
+ case OBD_IOC_GETMDNAME:
+ return ll_get_obd_name(inode, cmd, arg);
+ case LL_IOC_HSM_STATE_GET: {
+ struct md_op_data *op_data;
+ struct hsm_user_state *hus;
+ int rc;
+
+ OBD_ALLOC_PTR(hus);
+ if (hus == NULL)
+ return -ENOMEM;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, hus);
+ if (IS_ERR(op_data)) {
+ OBD_FREE_PTR(hus);
+ return PTR_ERR(op_data);
+ }
+
+ rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
+ op_data, NULL);
+
+ if (copy_to_user((void *)arg, hus, sizeof(*hus)))
+ rc = -EFAULT;
+
+ ll_finish_md_op_data(op_data);
+ OBD_FREE_PTR(hus);
+ return rc;
+ }
+ case LL_IOC_HSM_STATE_SET: {
+ struct md_op_data *op_data;
+ struct hsm_state_set *hss;
+ int rc;
+
+ OBD_ALLOC_PTR(hss);
+ if (hss == NULL)
+ return -ENOMEM;
+ if (copy_from_user(hss, (char *)arg, sizeof(*hss))) {
+ OBD_FREE_PTR(hss);
+ return -EFAULT;
+ }
+
+ /* Non-root users are forbidden to set or clear flags which are
+ * NOT defined in HSM_USER_MASK. */
+ if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK)
+ && !cfs_capable(CFS_CAP_SYS_ADMIN)) {
+ OBD_FREE_PTR(hss);
+ return -EPERM;
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, hss);
+ if (IS_ERR(op_data)) {
+ OBD_FREE_PTR(hss);
+ return PTR_ERR(op_data);
+ }
+
+ rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
+ op_data, NULL);
+
+ ll_finish_md_op_data(op_data);
+
+ OBD_FREE_PTR(hss);
+ return rc;
+ }
+ case LL_IOC_HSM_ACTION: {
+ struct md_op_data *op_data;
+ struct hsm_current_action *hca;
+ int rc;
+
+ OBD_ALLOC_PTR(hca);
+ if (hca == NULL)
+ return -ENOMEM;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, hca);
+ if (IS_ERR(op_data)) {
+ OBD_FREE_PTR(hca);
+ return PTR_ERR(op_data);
+ }
+
+ rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
+ op_data, NULL);
+
+ if (copy_to_user((char *)arg, hca, sizeof(*hca)))
+ rc = -EFAULT;
+
+ ll_finish_md_op_data(op_data);
+ OBD_FREE_PTR(hca);
+ return rc;
+ }
+ default: {
+ int err;
+
+ if (LLIOC_STOP ==
+ ll_iocontrol_call(inode, file, cmd, arg, &err))
+ return err;
+
+ return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
+ (void *)arg);
+ }
+ }
+}
+
+
+loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ loff_t retval, eof = 0;
+
+ retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
+ (origin == SEEK_CUR) ? file->f_pos : 0);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%d)\n",
+ inode->i_ino, inode->i_generation, inode, retval, retval,
+ origin);
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
+
+ if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
+ retval = ll_glimpse_size(inode);
+ if (retval != 0)
+ return retval;
+ eof = i_size_read(inode);
+ }
+
+ retval = generic_file_llseek_size(file, offset, origin,
+ ll_file_maxbytes(inode), eof);
+ return retval;
+}
+
+int ll_flush(struct file *file, fl_owner_t id)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ int rc, err;
+
+ LASSERT(!S_ISDIR(inode->i_mode));
+
+ /* catch async errors that were recorded back when async writeback
+ * failed for pages in this mapping. */
+ rc = lli->lli_async_rc;
+ lli->lli_async_rc = 0;
+ err = lov_read_and_clear_async_rc(lli->lli_clob);
+ if (rc == 0)
+ rc = err;
+
+ /* The application has been told write failure already.
+ * Do not report failure again. */
+ if (fd->fd_write_failed)
+ return 0;
+ return rc ? -EIO : 0;
+}
+
+/**
+ * Called to make sure a portion of file has been written out.
+ * if @local_only is not true, it will send OST_SYNC RPCs to ost.
+ *
+ * Return how many pages have been written.
+ */
+int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
+ enum cl_fsync_mode mode, int ignore_layout)
+{
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct obd_capa *capa = NULL;
+ struct cl_fsync_io *fio;
+ int result;
+
+ if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
+ mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
+ return -EINVAL;
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ capa = ll_osscapa_get(inode, CAPA_OPC_OSS_WRITE);
+
+ io = ccc_env_thread_io(env);
+ io->ci_obj = cl_i2info(inode)->lli_clob;
+ io->ci_ignore_layout = ignore_layout;
+
+ /* initialize parameters for sync */
+ fio = &io->u.ci_fsync;
+ fio->fi_capa = capa;
+ fio->fi_start = start;
+ fio->fi_end = end;
+ fio->fi_fid = ll_inode2fid(inode);
+ fio->fi_mode = mode;
+ fio->fi_nr_written = 0;
+
+ if (cl_io_init(env, io, CIT_FSYNC, io->ci_obj) == 0)
+ result = cl_io_loop(env, io);
+ else
+ result = io->ci_result;
+ if (result == 0)
+ result = fio->fi_nr_written;
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
+
+ capa_put(capa);
+
+ return result;
+}
+
+/*
+ * When dentry is provided (the 'else' case), *file->f_dentry may be
+ * null and dentry must be used directly rather than pulled from
+ * *file->f_dentry as is done otherwise.
+ */
+
+int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ struct dentry *dentry = file->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ptlrpc_request *req;
+ struct obd_capa *oc;
+ int rc, err;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
+ inode->i_generation, inode);
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
+
+ rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ mutex_lock(&inode->i_mutex);
+
+ /* catch async errors that were recorded back when async writeback
+ * failed for pages in this mapping. */
+ if (!S_ISDIR(inode->i_mode)) {
+ err = lli->lli_async_rc;
+ lli->lli_async_rc = 0;
+ if (rc == 0)
+ rc = err;
+ err = lov_read_and_clear_async_rc(lli->lli_clob);
+ if (rc == 0)
+ rc = err;
+ }
+
+ oc = ll_mdscapa_get(inode);
+ err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), oc,
+ &req);
+ capa_put(oc);
+ if (!rc)
+ rc = err;
+ if (!err)
+ ptlrpc_req_finished(req);
+
+ if (datasync && S_ISREG(inode->i_mode)) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ err = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
+ CL_FSYNC_ALL, 0);
+ if (rc == 0 && err < 0)
+ rc = err;
+ if (rc < 0)
+ fd->fd_write_failed = true;
+ else
+ fd->fd_write_failed = false;
+ }
+
+ mutex_unlock(&inode->i_mutex);
+ return rc;
+}
+
+int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_FLOCK,
+ .ei_cb_cp = ldlm_flock_completion_ast,
+ .ei_cbdata = file_lock,
+ };
+ struct md_op_data *op_data;
+ struct lustre_handle lockh = {0};
+ ldlm_policy_data_t flock = {{0}};
+ int flags = 0;
+ int rc;
+ int rc2 = 0;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
+ inode->i_ino, file_lock);
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
+
+ if (file_lock->fl_flags & FL_FLOCK) {
+ LASSERT((cmd == F_SETLKW) || (cmd == F_SETLK));
+ /* flocks are whole-file locks */
+ flock.l_flock.end = OFFSET_MAX;
+ /* For flocks owner is determined by the local file desctiptor*/
+ flock.l_flock.owner = (unsigned long)file_lock->fl_file;
+ } else if (file_lock->fl_flags & FL_POSIX) {
+ flock.l_flock.owner = (unsigned long)file_lock->fl_owner;
+ flock.l_flock.start = file_lock->fl_start;
+ flock.l_flock.end = file_lock->fl_end;
+ } else {
+ return -EINVAL;
+ }
+ flock.l_flock.pid = file_lock->fl_pid;
+
+ /* Somewhat ugly workaround for svc lockd.
+ * lockd installs custom fl_lmops->lm_compare_owner that checks
+ * for the fl_owner to be the same (which it always is on local node
+ * I guess between lockd processes) and then compares pid.
+ * As such we assign pid to the owner field to make it all work,
+ * conflict with normal locks is unlikely since pid space and
+ * pointer space for current->files are not intersecting */
+ if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
+ flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
+
+ switch (file_lock->fl_type) {
+ case F_RDLCK:
+ einfo.ei_mode = LCK_PR;
+ break;
+ case F_UNLCK:
+ /* An unlock request may or may not have any relation to
+ * existing locks so we may not be able to pass a lock handle
+ * via a normal ldlm_lock_cancel() request. The request may even
+ * unlock a byte range in the middle of an existing lock. In
+ * order to process an unlock request we need all of the same
+ * information that is given with a normal read or write record
+ * lock request. To avoid creating another ldlm unlock (cancel)
+ * message we'll treat a LCK_NL flock request as an unlock. */
+ einfo.ei_mode = LCK_NL;
+ break;
+ case F_WRLCK:
+ einfo.ei_mode = LCK_PW;
+ break;
+ default:
+ CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n",
+ file_lock->fl_type);
+ return -ENOTSUPP;
+ }
+
+ switch (cmd) {
+ case F_SETLKW:
+#ifdef F_SETLKW64
+ case F_SETLKW64:
+#endif
+ flags = 0;
+ break;
+ case F_SETLK:
+#ifdef F_SETLK64
+ case F_SETLK64:
+#endif
+ flags = LDLM_FL_BLOCK_NOWAIT;
+ break;
+ case F_GETLK:
+#ifdef F_GETLK64
+ case F_GETLK64:
+#endif
+ flags = LDLM_FL_TEST_LOCK;
+ /* Save the old mode so that if the mode in the lock changes we
+ * can decrement the appropriate reader or writer refcount. */
+ file_lock->fl_type = einfo.ei_mode;
+ break;
+ default:
+ CERROR("unknown fcntl lock command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ CDEBUG(D_DLMTRACE, "inode=%lu, pid=%u, flags=%#x, mode=%u, "
+ "start="LPU64", end="LPU64"\n", inode->i_ino, flock.l_flock.pid,
+ flags, einfo.ei_mode, flock.l_flock.start, flock.l_flock.end);
+
+ rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
+ op_data, &lockh, &flock, 0, NULL /* req */, flags);
+
+ if ((file_lock->fl_flags & FL_FLOCK) &&
+ (rc == 0 || file_lock->fl_type == F_UNLCK))
+ rc2 = flock_lock_file_wait(file, file_lock);
+ if ((file_lock->fl_flags & FL_POSIX) &&
+ (rc == 0 || file_lock->fl_type == F_UNLCK) &&
+ !(flags & LDLM_FL_TEST_LOCK))
+ rc2 = posix_lock_file_wait(file, file_lock);
+
+ if (rc2 && file_lock->fl_type != F_UNLCK) {
+ einfo.ei_mode = LCK_NL;
+ md_enqueue(sbi->ll_md_exp, &einfo, NULL,
+ op_data, &lockh, &flock, 0, NULL /* req */, flags);
+ rc = rc2;
+ }
+
+ ll_finish_md_op_data(op_data);
+
+ return rc;
+}
+
+int ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
+{
+ return -ENOSYS;
+}
+
+/**
+ * test if some locks matching bits and l_req_mode are acquired
+ * - bits can be in different locks
+ * - if found clear the common lock bits in *bits
+ * - the bits not found, are kept in *bits
+ * \param inode [IN]
+ * \param bits [IN] searched lock bits [IN]
+ * \param l_req_mode [IN] searched lock mode
+ * \retval boolean, true iff all bits are found
+ */
+int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
+{
+ struct lustre_handle lockh;
+ ldlm_policy_data_t policy;
+ ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
+ (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
+ struct lu_fid *fid;
+ __u64 flags;
+ int i;
+
+ if (!inode)
+ return 0;
+
+ fid = &ll_i2info(inode)->lli_fid;
+ CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
+ ldlm_lockname[mode]);
+
+ flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
+ for (i = 0; i <= MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) {
+ policy.l_inodebits.bits = *bits & (1 << i);
+ if (policy.l_inodebits.bits == 0)
+ continue;
+
+ if (md_lock_match(ll_i2mdexp(inode), flags, fid, LDLM_IBITS,
+ &policy, mode, &lockh)) {
+ struct ldlm_lock *lock;
+
+ lock = ldlm_handle2lock(&lockh);
+ if (lock) {
+ *bits &=
+ ~(lock->l_policy_data.l_inodebits.bits);
+ LDLM_LOCK_PUT(lock);
+ } else {
+ *bits &= ~policy.l_inodebits.bits;
+ }
+ }
+ }
+ return *bits == 0;
+}
+
+ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
+ struct lustre_handle *lockh, __u64 flags)
+{
+ ldlm_policy_data_t policy = { .l_inodebits = {bits}};
+ struct lu_fid *fid;
+ ldlm_mode_t rc;
+
+ fid = &ll_i2info(inode)->lli_fid;
+ CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
+
+ rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
+ fid, LDLM_IBITS, &policy,
+ LCK_CR|LCK_CW|LCK_PR|LCK_PW, lockh);
+ return rc;
+}
+
+static int ll_inode_revalidate_fini(struct inode *inode, int rc)
+{
+ /* Already unlinked. Just update nlink and return success */
+ if (rc == -ENOENT) {
+ clear_nlink(inode);
+ /* This path cannot be hit for regular files unless in
+ * case of obscure races, so no need to to validate
+ * size. */
+ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+ return 0;
+ } else if (rc != 0) {
+ CERROR("%s: revalidate FID "DFID" error: rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
+ }
+
+ return rc;
+}
+
+int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
+ __u64 ibits)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ptlrpc_request *req = NULL;
+ struct obd_export *exp;
+ int rc = 0;
+
+ LASSERT(inode != NULL);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%s\n",
+ inode->i_ino, inode->i_generation, inode, dentry->d_name.name);
+
+ exp = ll_i2mdexp(inode);
+
+ /* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC.
+ * But under CMD case, it caused some lock issues, should be fixed
+ * with new CMD ibits lock. See bug 12718 */
+ if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) {
+ struct lookup_intent oit = { .it_op = IT_GETATTR };
+ struct md_op_data *op_data;
+
+ if (ibits == MDS_INODELOCK_LOOKUP)
+ oit.it_op = IT_LOOKUP;
+
+ /* Call getattr by fid, so do not provide name at all. */
+ op_data = ll_prep_md_op_data(NULL, dentry->d_parent->d_inode,
+ dentry->d_inode, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ oit.it_create_mode |= M_CHECK_STALE;
+ rc = md_intent_lock(exp, op_data, NULL, 0,
+ /* we are not interested in name
+ based lookup */
+ &oit, 0, &req,
+ ll_md_blocking_ast, 0);
+ ll_finish_md_op_data(op_data);
+ oit.it_create_mode &= ~M_CHECK_STALE;
+ if (rc < 0) {
+ rc = ll_inode_revalidate_fini(inode, rc);
+ GOTO (out, rc);
+ }
+
+ rc = ll_revalidate_it_finish(req, &oit, dentry);
+ if (rc != 0) {
+ ll_intent_release(&oit);
+ GOTO(out, rc);
+ }
+
+ /* Unlinked? Unhash dentry, so it is not picked up later by
+ do_lookup() -> ll_revalidate_it(). We cannot use d_drop
+ here to preserve get_cwd functionality on 2.6.
+ Bug 10503 */
+ if (!dentry->d_inode->i_nlink)
+ d_lustre_invalidate(dentry, 0);
+
+ ll_lookup_finish_locks(&oit, dentry);
+ } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
+ struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
+ obd_valid valid = OBD_MD_FLGETATTR;
+ struct md_op_data *op_data;
+ int ealen = 0;
+
+ if (S_ISREG(inode->i_mode)) {
+ rc = ll_get_max_mdsize(sbi, &ealen);
+ if (rc)
+ return rc;
+ valid |= OBD_MD_FLEASIZE | OBD_MD_FLMODEASIZE;
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
+ 0, ealen, LUSTRE_OPC_ANY,
+ NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ op_data->op_valid = valid;
+ /* Once OBD_CONNECT_ATTRFID is not supported, we can't find one
+ * capa for this inode. Because we only keep capas of dirs
+ * fresh. */
+ rc = md_getattr(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
+ if (rc) {
+ rc = ll_inode_revalidate_fini(inode, rc);
+ return rc;
+ }
+
+ rc = ll_prep_inode(&inode, req, NULL, NULL);
+ }
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
+ __u64 ibits)
+{
+ struct inode *inode = dentry->d_inode;
+ int rc;
+
+ rc = __ll_inode_revalidate_it(dentry, it, ibits);
+ if (rc != 0)
+ return rc;
+
+ /* if object isn't regular file, don't validate size */
+ if (!S_ISREG(inode->i_mode)) {
+ LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime;
+ LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
+ LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
+ } else {
+ rc = ll_glimpse_size(inode);
+ }
+ return rc;
+}
+
+int ll_getattr_it(struct vfsmount *mnt, struct dentry *de,
+ struct lookup_intent *it, struct kstat *stat)
+{
+ struct inode *inode = de->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int res = 0;
+
+ res = ll_inode_revalidate_it(de, it, MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LOOKUP);
+ ll_stats_ops_tally(sbi, LPROC_LL_GETATTR, 1);
+
+ if (res)
+ return res;
+
+ stat->dev = inode->i_sb->s_dev;
+ if (ll_need_32bit_api(sbi))
+ stat->ino = cl_fid_build_ino(&lli->lli_fid, 1);
+ else
+ stat->ino = inode->i_ino;
+ stat->mode = inode->i_mode;
+ stat->nlink = inode->i_nlink;
+ stat->uid = inode->i_uid;
+ stat->gid = inode->i_gid;
+ stat->rdev = inode->i_rdev;
+ stat->atime = inode->i_atime;
+ stat->mtime = inode->i_mtime;
+ stat->ctime = inode->i_ctime;
+ stat->blksize = 1 << inode->i_blkbits;
+
+ stat->size = i_size_read(inode);
+ stat->blocks = inode->i_blocks;
+
+ return 0;
+}
+int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
+{
+ struct lookup_intent it = { .it_op = IT_GETATTR };
+
+ return ll_getattr_it(mnt, de, &it, stat);
+}
+
+
+struct posix_acl * ll_get_acl(struct inode *inode, int type)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct posix_acl *acl = NULL;
+
+ spin_lock(&lli->lli_lock);
+ /* VFS' acl_permission_check->check_acl will release the refcount */
+ acl = posix_acl_dup(lli->lli_posix_acl);
+ spin_unlock(&lli->lli_lock);
+
+ return acl;
+}
+
+
+int ll_inode_permission(struct inode *inode, int mask)
+{
+ int rc = 0;
+
+#ifdef MAY_NOT_BLOCK
+ if (mask & MAY_NOT_BLOCK)
+ return -ECHILD;
+#endif
+
+ /* as root inode are NOT getting validated in lookup operation,
+ * need to do it before permission check. */
+
+ if (inode == inode->i_sb->s_root->d_inode) {
+ struct lookup_intent it = { .it_op = IT_LOOKUP };
+
+ rc = __ll_inode_revalidate_it(inode->i_sb->s_root, &it,
+ MDS_INODELOCK_LOOKUP);
+ if (rc)
+ return rc;
+ }
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
+ inode->i_ino, inode->i_generation, inode, inode->i_mode, mask);
+
+ if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
+ return lustre_check_remote_perm(inode, mask);
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
+ rc = generic_permission(inode, mask);
+
+ return rc;
+}
+
+#define READ_METHOD aio_read
+#define READ_FUNCTION ll_file_aio_read
+#define WRITE_METHOD aio_write
+#define WRITE_FUNCTION ll_file_aio_write
+
+/* -o localflock - only provides locally consistent flock locks */
+struct file_operations ll_file_operations = {
+ .read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
+ .write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
+ .unlocked_ioctl = ll_file_ioctl,
+ .open = ll_file_open,
+ .release = ll_file_release,
+ .mmap = ll_file_mmap,
+ .llseek = ll_file_seek,
+ .splice_read = ll_file_splice_read,
+ .fsync = ll_fsync,
+ .flush = ll_flush
+};
+
+struct file_operations ll_file_operations_flock = {
+ .read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
+ .write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
+ .unlocked_ioctl = ll_file_ioctl,
+ .open = ll_file_open,
+ .release = ll_file_release,
+ .mmap = ll_file_mmap,
+ .llseek = ll_file_seek,
+ .splice_read = ll_file_splice_read,
+ .fsync = ll_fsync,
+ .flush = ll_flush,
+ .flock = ll_file_flock,
+ .lock = ll_file_flock
+};
+
+/* These are for -o noflock - to return ENOSYS on flock calls */
+struct file_operations ll_file_operations_noflock = {
+ .read = ll_file_read,
+ .READ_METHOD = READ_FUNCTION,
+ .write = ll_file_write,
+ .WRITE_METHOD = WRITE_FUNCTION,
+ .unlocked_ioctl = ll_file_ioctl,
+ .open = ll_file_open,
+ .release = ll_file_release,
+ .mmap = ll_file_mmap,
+ .llseek = ll_file_seek,
+ .splice_read = ll_file_splice_read,
+ .fsync = ll_fsync,
+ .flush = ll_flush,
+ .flock = ll_file_noflock,
+ .lock = ll_file_noflock
+};
+
+struct inode_operations ll_file_inode_operations = {
+ .setattr = ll_setattr,
+ .getattr = ll_getattr,
+ .permission = ll_inode_permission,
+ .setxattr = ll_setxattr,
+ .getxattr = ll_getxattr,
+ .listxattr = ll_listxattr,
+ .removexattr = ll_removexattr,
+ .get_acl = ll_get_acl,
+};
+
+/* dynamic ioctl number support routins */
+static struct llioc_ctl_data {
+ struct rw_semaphore ioc_sem;
+ struct list_head ioc_head;
+} llioc = {
+ __RWSEM_INITIALIZER(llioc.ioc_sem),
+ LIST_HEAD_INIT(llioc.ioc_head)
+};
+
+
+struct llioc_data {
+ struct list_head iocd_list;
+ unsigned int iocd_size;
+ llioc_callback_t iocd_cb;
+ unsigned int iocd_count;
+ unsigned int iocd_cmd[0];
+};
+
+void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
+{
+ unsigned int size;
+ struct llioc_data *in_data = NULL;
+
+ if (cb == NULL || cmd == NULL ||
+ count > LLIOC_MAX_CMD || count < 0)
+ return NULL;
+
+ size = sizeof(*in_data) + count * sizeof(unsigned int);
+ OBD_ALLOC(in_data, size);
+ if (in_data == NULL)
+ return NULL;
+
+ memset(in_data, 0, sizeof(*in_data));
+ in_data->iocd_size = size;
+ in_data->iocd_cb = cb;
+ in_data->iocd_count = count;
+ memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
+
+ down_write(&llioc.ioc_sem);
+ list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
+ up_write(&llioc.ioc_sem);
+
+ return in_data;
+}
+
+void ll_iocontrol_unregister(void *magic)
+{
+ struct llioc_data *tmp;
+
+ if (magic == NULL)
+ return;
+
+ down_write(&llioc.ioc_sem);
+ list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
+ if (tmp == magic) {
+ unsigned int size = tmp->iocd_size;
+
+ list_del(&tmp->iocd_list);
+ up_write(&llioc.ioc_sem);
+
+ OBD_FREE(tmp, size);
+ return;
+ }
+ }
+ up_write(&llioc.ioc_sem);
+
+ CWARN("didn't find iocontrol register block with magic: %p\n", magic);
+}
+
+EXPORT_SYMBOL(ll_iocontrol_register);
+EXPORT_SYMBOL(ll_iocontrol_unregister);
+
+enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg, int *rcp)
+{
+ enum llioc_iter ret = LLIOC_CONT;
+ struct llioc_data *data;
+ int rc = -EINVAL, i;
+
+ down_read(&llioc.ioc_sem);
+ list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
+ for (i = 0; i < data->iocd_count; i++) {
+ if (cmd != data->iocd_cmd[i])
+ continue;
+
+ ret = data->iocd_cb(inode, file, cmd, arg, data, &rc);
+ break;
+ }
+
+ if (ret == LLIOC_STOP)
+ break;
+ }
+ up_read(&llioc.ioc_sem);
+
+ if (rcp)
+ *rcp = rc;
+ return ret;
+}
+
+int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ int result;
+
+ if (lli->lli_clob == NULL)
+ return 0;
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ result = cl_conf_set(env, lli->lli_clob, conf);
+ cl_env_nested_put(&nest, env);
+
+ if (conf->coc_opc == OBJECT_CONF_SET) {
+ struct ldlm_lock *lock = conf->coc_lock;
+
+ LASSERT(lock != NULL);
+ LASSERT(ldlm_has_layout(lock));
+ if (result == 0) {
+ /* it can only be allowed to match after layout is
+ * applied to inode otherwise false layout would be
+ * seen. Applying layout shoud happen before dropping
+ * the intent lock. */
+ ldlm_lock_allow_match(lock);
+ }
+ }
+ return result;
+}
+
+/* Fetch layout from MDT with getxattr request, if it's not ready yet */
+static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
+
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct obd_capa *oc;
+ struct ptlrpc_request *req;
+ struct mdt_body *body;
+ void *lvbdata;
+ void *lmm;
+ int lmmsize;
+ int rc;
+
+ CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
+ PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
+ lock->l_lvb_data, lock->l_lvb_len);
+
+ if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY))
+ return 0;
+
+ /* if layout lock was granted right away, the layout is returned
+ * within DLM_LVB of dlm reply; otherwise if the lock was ever
+ * blocked and then granted via completion ast, we have to fetch
+ * layout here. Please note that we can't use the LVB buffer in
+ * completion AST because it doesn't have a large enough buffer */
+ oc = ll_mdscapa_get(inode);
+ rc = ll_get_max_mdsize(sbi, &lmmsize);
+ if (rc == 0)
+ rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ OBD_MD_FLXATTR, XATTR_NAME_LOV, NULL, 0,
+ lmmsize, 0, &req);
+ capa_put(oc);
+ if (rc < 0)
+ return rc;
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL || body->eadatasize > lmmsize)
+ GOTO(out, rc = -EPROTO);
+
+ lmmsize = body->eadatasize;
+ if (lmmsize == 0) /* empty layout */
+ GOTO(out, rc = 0);
+
+ lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
+ if (lmm == NULL)
+ GOTO(out, rc = -EFAULT);
+
+ OBD_ALLOC_LARGE(lvbdata, lmmsize);
+ if (lvbdata == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ memcpy(lvbdata, lmm, lmmsize);
+ lock_res_and_lock(lock);
+ if (lock->l_lvb_data != NULL)
+ OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len);
+
+ lock->l_lvb_data = lvbdata;
+ lock->l_lvb_len = lmmsize;
+ unlock_res_and_lock(lock);
+
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+/**
+ * Apply the layout to the inode. Layout lock is held and will be released
+ * in this function.
+ */
+static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
+ struct inode *inode, __u32 *gen, bool reconf)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ldlm_lock *lock;
+ struct lustre_md md = { NULL };
+ struct cl_object_conf conf;
+ int rc = 0;
+ bool lvb_ready;
+ bool wait_layout = false;
+
+ LASSERT(lustre_handle_is_used(lockh));
+
+ lock = ldlm_handle2lock(lockh);
+ LASSERT(lock != NULL);
+ LASSERT(ldlm_has_layout(lock));
+
+ LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d.\n",
+ inode, PFID(&lli->lli_fid), reconf);
+
+ /* in case this is a caching lock and reinstate with new inode */
+ md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
+
+ lock_res_and_lock(lock);
+ lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
+ unlock_res_and_lock(lock);
+ /* checking lvb_ready is racy but this is okay. The worst case is
+ * that multi processes may configure the file on the same time. */
+ if (lvb_ready || !reconf) {
+ rc = -ENODATA;
+ if (lvb_ready) {
+ /* layout_gen must be valid if layout lock is not
+ * cancelled and stripe has already set */
+ *gen = lli->lli_layout_gen;
+ rc = 0;
+ }
+ GOTO(out, rc);
+ }
+
+ rc = ll_layout_fetch(inode, lock);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ /* for layout lock, lmm is returned in lock's lvb.
+ * lvb_data is immutable if the lock is held so it's safe to access it
+ * without res lock. See the description in ldlm_lock_decref_internal()
+ * for the condition to free lvb_data of layout lock */
+ if (lock->l_lvb_data != NULL) {
+ rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
+ lock->l_lvb_data, lock->l_lvb_len);
+ if (rc >= 0) {
+ *gen = LL_LAYOUT_GEN_EMPTY;
+ if (md.lsm != NULL)
+ *gen = md.lsm->lsm_layout_gen;
+ rc = 0;
+ } else {
+ CERROR("%s: file "DFID" unpackmd error: %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&lli->lli_fid), rc);
+ }
+ }
+ if (rc < 0)
+ GOTO(out, rc);
+
+ /* set layout to file. Unlikely this will fail as old layout was
+ * surely eliminated */
+ memset(&conf, 0, sizeof conf);
+ conf.coc_opc = OBJECT_CONF_SET;
+ conf.coc_inode = inode;
+ conf.coc_lock = lock;
+ conf.u.coc_md = &md;
+ rc = ll_layout_conf(inode, &conf);
+
+ if (md.lsm != NULL)
+ obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
+
+ /* refresh layout failed, need to wait */
+ wait_layout = rc == -EBUSY;
+
+out:
+ LDLM_LOCK_PUT(lock);
+ ldlm_lock_decref(lockh, mode);
+
+ /* wait for IO to complete if it's still being used. */
+ if (wait_layout) {
+ CDEBUG(D_INODE, "%s: %p/"DFID" wait for layout reconf.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ inode, PFID(&lli->lli_fid));
+
+ memset(&conf, 0, sizeof conf);
+ conf.coc_opc = OBJECT_CONF_WAIT;
+ conf.coc_inode = inode;
+ rc = ll_layout_conf(inode, &conf);
+ if (rc == 0)
+ rc = -EAGAIN;
+
+ CDEBUG(D_INODE, "file: "DFID" waiting layout return: %d.\n",
+ PFID(&lli->lli_fid), rc);
+ }
+ return rc;
+}
+
+/**
+ * This function checks if there exists a LAYOUT lock on the client side,
+ * or enqueues it if it doesn't have one in cache.
+ *
+ * This function will not hold layout lock so it may be revoked any time after
+ * this function returns. Any operations depend on layout should be redone
+ * in that case.
+ *
+ * This function should be called before lov_io_init() to get an uptodate
+ * layout version, the caller should save the version number and after IO
+ * is finished, this function should be called again to verify that layout
+ * is not changed during IO time.
+ */
+int ll_layout_refresh(struct inode *inode, __u32 *gen)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct md_op_data *op_data;
+ struct lookup_intent it;
+ struct lustre_handle lockh;
+ ldlm_mode_t mode;
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = ll_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
+ int rc;
+
+ *gen = lli->lli_layout_gen;
+ if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
+ return 0;
+
+ /* sanity checks */
+ LASSERT(fid_is_sane(ll_inode2fid(inode)));
+ LASSERT(S_ISREG(inode->i_mode));
+
+ /* mostly layout lock is caching on the local side, so try to match
+ * it before grabbing layout lock mutex. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0);
+ if (mode != 0) { /* hit cached lock */
+ rc = ll_layout_lock_set(&lockh, mode, inode, gen, false);
+ if (rc == 0)
+ return 0;
+
+ /* better hold lli_layout_mutex to try again otherwise
+ * it will have starvation problem. */
+ }
+
+ /* take layout lock mutex to enqueue layout lock exclusively. */
+ mutex_lock(&lli->lli_layout_mutex);
+
+again:
+ /* try again. Maybe somebody else has done this. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0);
+ if (mode != 0) { /* hit cached lock */
+ rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+ if (rc == -EAGAIN)
+ goto again;
+
+ mutex_unlock(&lli->lli_layout_mutex);
+ return rc;
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
+ 0, 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data)) {
+ mutex_unlock(&lli->lli_layout_mutex);
+ return PTR_ERR(op_data);
+ }
+
+ /* have to enqueue one */
+ memset(&it, 0, sizeof(it));
+ it.it_op = IT_LAYOUT;
+ lockh.cookie = 0ULL;
+
+ LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/"DFID".\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), inode,
+ PFID(&lli->lli_fid));
+
+ rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
+ NULL, 0, NULL, 0);
+ if (it.d.lustre.it_data != NULL)
+ ptlrpc_req_finished(it.d.lustre.it_data);
+ it.d.lustre.it_data = NULL;
+
+ ll_finish_md_op_data(op_data);
+
+ mode = it.d.lustre.it_lock_mode;
+ it.d.lustre.it_lock_mode = 0;
+ ll_intent_drop_lock(&it);
+
+ if (rc == 0) {
+ /* set lock data in case this is a new lock */
+ ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
+ rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+ if (rc == -EAGAIN)
+ goto again;
+ }
+ mutex_unlock(&lli->lli_layout_mutex);
+
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/llite/llite_capa.c b/drivers/staging/lustre/lustre/llite/llite_capa.c
new file mode 100644
index 0000000..edd512b2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/llite_capa.c
@@ -0,0 +1,653 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/llite_capa.c
+ *
+ * Author: Lai Siyao <lsy@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include <linux/file.h>
+#include <linux/kmod.h>
+
+#include <lustre_lite.h>
+#include "llite_internal.h"
+
+/* for obd_capa.c_list, client capa might stay in three places:
+ * 1. ll_capa_list.
+ * 2. ll_idle_capas.
+ * 3. stand alone: just allocated.
+ */
+
+/* capas for oss writeback and those failed to renew */
+static LIST_HEAD(ll_idle_capas);
+static struct ptlrpc_thread ll_capa_thread;
+static struct list_head *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];
+
+/* llite capa renewal timer */
+struct timer_list ll_capa_timer;
+/* for debug: indicate whether capa on llite is enabled or not */
+static atomic_t ll_capa_debug = ATOMIC_INIT(0);
+static unsigned long long ll_capa_renewed = 0;
+static unsigned long long ll_capa_renewal_noent = 0;
+static unsigned long long ll_capa_renewal_failed = 0;
+static unsigned long long ll_capa_renewal_retries = 0;
+
+static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
+{
+ if (cfs_time_before(expiry, ll_capa_timer.expires) ||
+ !timer_pending(&ll_capa_timer)) {
+ mod_timer(&ll_capa_timer, expiry);
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa,
+ "ll_capa_timer update: %lu/%lu by", expiry, jiffies);
+ }
+}
+
+static inline cfs_time_t capa_renewal_time(struct obd_capa *ocapa)
+{
+ return cfs_time_sub(ocapa->c_expiry,
+ cfs_time_seconds(ocapa->c_capa.lc_timeout) / 2);
+}
+
+static inline int capa_is_to_expire(struct obd_capa *ocapa)
+{
+ return cfs_time_beforeq(capa_renewal_time(ocapa), cfs_time_current());
+}
+
+static inline int have_expired_capa(void)
+{
+ struct obd_capa *ocapa = NULL;
+ int expired = 0;
+
+ /* if ll_capa_list has client capa to expire or ll_idle_capas has
+ * expired capa, return 1.
+ */
+ spin_lock(&capa_lock);
+ if (!list_empty(ll_capa_list)) {
+ ocapa = list_entry(ll_capa_list->next, struct obd_capa,
+ c_list);
+ expired = capa_is_to_expire(ocapa);
+ if (!expired)
+ update_capa_timer(ocapa, capa_renewal_time(ocapa));
+ } else if (!list_empty(&ll_idle_capas)) {
+ ocapa = list_entry(ll_idle_capas.next, struct obd_capa,
+ c_list);
+ expired = capa_is_expired(ocapa);
+ if (!expired)
+ update_capa_timer(ocapa, ocapa->c_expiry);
+ }
+ spin_unlock(&capa_lock);
+
+ if (expired)
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "expired");
+ return expired;
+}
+
+static void sort_add_capa(struct obd_capa *ocapa, struct list_head *head)
+{
+ struct obd_capa *tmp;
+ struct list_head *before = NULL;
+
+ /* TODO: client capa is sorted by expiry, this could be optimized */
+ list_for_each_entry_reverse(tmp, head, c_list) {
+ if (cfs_time_aftereq(ocapa->c_expiry, tmp->c_expiry)) {
+ before = &tmp->c_list;
+ break;
+ }
+ }
+
+ LASSERT(&ocapa->c_list != before);
+ list_add(&ocapa->c_list, before ?: head);
+}
+
+static inline int obd_capa_open_count(struct obd_capa *oc)
+{
+ struct ll_inode_info *lli = ll_i2info(oc->u.cli.inode);
+ return atomic_read(&lli->lli_open_count);
+}
+
+static void ll_delete_capa(struct obd_capa *ocapa)
+{
+ struct ll_inode_info *lli = ll_i2info(ocapa->u.cli.inode);
+
+ if (capa_for_mds(&ocapa->c_capa)) {
+ LASSERT(lli->lli_mds_capa == ocapa);
+ lli->lli_mds_capa = NULL;
+ } else if (capa_for_oss(&ocapa->c_capa)) {
+ list_del_init(&ocapa->u.cli.lli_list);
+ }
+
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free client");
+ list_del_init(&ocapa->c_list);
+ capa_count[CAPA_SITE_CLIENT]--;
+ /* release the ref when alloc */
+ capa_put(ocapa);
+}
+
+/* three places where client capa is deleted:
+ * 1. capa_thread_main(), main place to delete expired capa.
+ * 2. ll_clear_inode_capas() in ll_clear_inode().
+ * 3. ll_truncate_free_capa() delete truncate capa explicitly in ll_setattr_ost().
+ */
+static int capa_thread_main(void *unused)
+{
+ struct obd_capa *ocapa, *tmp, *next;
+ struct inode *inode = NULL;
+ struct l_wait_info lwi = { 0 };
+ int rc;
+
+ thread_set_flags(&ll_capa_thread, SVC_RUNNING);
+ wake_up(&ll_capa_thread.t_ctl_waitq);
+
+ while (1) {
+ l_wait_event(ll_capa_thread.t_ctl_waitq,
+ !thread_is_running(&ll_capa_thread) ||
+ have_expired_capa(),
+ &lwi);
+
+ if (!thread_is_running(&ll_capa_thread))
+ break;
+
+ next = NULL;
+
+ spin_lock(&capa_lock);
+ list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
+ __u64 ibits;
+
+ LASSERT(ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC);
+
+ if (!capa_is_to_expire(ocapa)) {
+ next = ocapa;
+ break;
+ }
+
+ list_del_init(&ocapa->c_list);
+
+ /* for MDS capability, only renew those which belong to
+ * dir, or its inode is opened, or client holds LOOKUP
+ * lock.
+ */
+ /* ibits may be changed by ll_have_md_lock() so we have
+ * to set it each time */
+ ibits = MDS_INODELOCK_LOOKUP;
+ if (capa_for_mds(&ocapa->c_capa) &&
+ !S_ISDIR(ocapa->u.cli.inode->i_mode) &&
+ obd_capa_open_count(ocapa) == 0 &&
+ !ll_have_md_lock(ocapa->u.cli.inode,
+ &ibits, LCK_MINMODE)) {
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa,
+ "skip renewal for");
+ sort_add_capa(ocapa, &ll_idle_capas);
+ continue;
+ }
+
+ /* for OSS capability, only renew those whose inode is
+ * opened.
+ */
+ if (capa_for_oss(&ocapa->c_capa) &&
+ obd_capa_open_count(ocapa) == 0) {
+ /* oss capa with open count == 0 won't renew,
+ * move to idle list */
+ sort_add_capa(ocapa, &ll_idle_capas);
+ continue;
+ }
+
+ /* NB iput() is in ll_update_capa() */
+ inode = igrab(ocapa->u.cli.inode);
+ if (inode == NULL) {
+ DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
+ "igrab failed for");
+ continue;
+ }
+
+ capa_get(ocapa);
+ ll_capa_renewed++;
+ spin_unlock(&capa_lock);
+ rc = md_renew_capa(ll_i2mdexp(inode), ocapa,
+ ll_update_capa);
+ spin_lock(&capa_lock);
+ if (rc) {
+ DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
+ "renew failed: %d", rc);
+ ll_capa_renewal_failed++;
+ }
+ }
+
+ if (next)
+ update_capa_timer(next, capa_renewal_time(next));
+
+ list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
+ c_list) {
+ if (!capa_is_expired(ocapa)) {
+ if (!next)
+ update_capa_timer(ocapa,
+ ocapa->c_expiry);
+ break;
+ }
+
+ if (atomic_read(&ocapa->c_refc) > 1) {
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa,
+ "expired(c_refc %d), don't release",
+ atomic_read(&ocapa->c_refc));
+ /* don't try to renew any more */
+ list_del_init(&ocapa->c_list);
+ continue;
+ }
+
+ /* expired capa is released. */
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "release expired");
+ ll_delete_capa(ocapa);
+ }
+
+ spin_unlock(&capa_lock);
+ }
+
+ thread_set_flags(&ll_capa_thread, SVC_STOPPED);
+ wake_up(&ll_capa_thread.t_ctl_waitq);
+ return 0;
+}
+
+void ll_capa_timer_callback(unsigned long unused)
+{
+ wake_up(&ll_capa_thread.t_ctl_waitq);
+}
+
+int ll_capa_thread_start(void)
+{
+ struct task_struct *task;
+
+ init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
+
+ task = kthread_run(capa_thread_main, NULL, "ll_capa");
+ if (IS_ERR(task)) {
+ CERROR("cannot start expired capa thread: rc %ld\n",
+ PTR_ERR(task));
+ return PTR_ERR(task);
+ }
+ wait_event(ll_capa_thread.t_ctl_waitq,
+ thread_is_running(&ll_capa_thread));
+
+ return 0;
+}
+
+void ll_capa_thread_stop(void)
+{
+ thread_set_flags(&ll_capa_thread, SVC_STOPPING);
+ wake_up(&ll_capa_thread.t_ctl_waitq);
+ wait_event(ll_capa_thread.t_ctl_waitq,
+ thread_is_stopped(&ll_capa_thread));
+}
+
+struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *ocapa;
+ int found = 0;
+
+ if ((ll_i2sbi(inode)->ll_flags & LL_SBI_OSS_CAPA) == 0)
+ return NULL;
+
+ LASSERT(opc == CAPA_OPC_OSS_WRITE || opc == CAPA_OPC_OSS_RW ||
+ opc == CAPA_OPC_OSS_TRUNC);
+
+ spin_lock(&capa_lock);
+ list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+ if (capa_is_expired(ocapa))
+ continue;
+ if ((opc & CAPA_OPC_OSS_WRITE) &&
+ capa_opc_supported(&ocapa->c_capa, CAPA_OPC_OSS_WRITE)) {
+ found = 1;
+ break;
+ } else if ((opc & CAPA_OPC_OSS_READ) &&
+ capa_opc_supported(&ocapa->c_capa,
+ CAPA_OPC_OSS_READ)) {
+ found = 1;
+ break;
+ } else if ((opc & CAPA_OPC_OSS_TRUNC) &&
+ capa_opc_supported(&ocapa->c_capa, opc)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ LASSERT(lu_fid_eq(capa_fid(&ocapa->c_capa),
+ ll_inode2fid(inode)));
+ LASSERT(ocapa->c_site == CAPA_SITE_CLIENT);
+
+ capa_get(ocapa);
+
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "found client");
+ } else {
+ ocapa = NULL;
+
+ if (atomic_read(&ll_capa_debug)) {
+ CERROR("no capability for "DFID" opc "LPX64"\n",
+ PFID(&lli->lli_fid), opc);
+ atomic_set(&ll_capa_debug, 0);
+ }
+ }
+ spin_unlock(&capa_lock);
+
+ return ocapa;
+}
+EXPORT_SYMBOL(ll_osscapa_get);
+
+struct obd_capa *ll_mdscapa_get(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *ocapa;
+
+ LASSERT(inode != NULL);
+
+ if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
+ return NULL;
+
+ spin_lock(&capa_lock);
+ ocapa = capa_get(lli->lli_mds_capa);
+ spin_unlock(&capa_lock);
+ if (!ocapa && atomic_read(&ll_capa_debug)) {
+ CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
+ atomic_set(&ll_capa_debug, 0);
+ }
+
+ return ocapa;
+}
+
+static struct obd_capa *do_add_mds_capa(struct inode *inode,
+ struct obd_capa *ocapa)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *old = lli->lli_mds_capa;
+ struct lustre_capa *capa = &ocapa->c_capa;
+
+ if (!old) {
+ ocapa->u.cli.inode = inode;
+ lli->lli_mds_capa = ocapa;
+ capa_count[CAPA_SITE_CLIENT]++;
+
+ DEBUG_CAPA(D_SEC, capa, "add MDS");
+ } else {
+ spin_lock(&old->c_lock);
+ old->c_capa = *capa;
+ spin_unlock(&old->c_lock);
+
+ DEBUG_CAPA(D_SEC, capa, "update MDS");
+
+ capa_put(ocapa);
+ ocapa = old;
+ }
+ return ocapa;
+}
+
+static struct obd_capa *do_lookup_oss_capa(struct inode *inode, int opc)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *ocapa;
+
+ /* inside capa_lock */
+ list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
+ if ((capa_opc(&ocapa->c_capa) & opc) != opc)
+ continue;
+
+ LASSERT(lu_fid_eq(capa_fid(&ocapa->c_capa),
+ ll_inode2fid(inode)));
+ LASSERT(ocapa->c_site == CAPA_SITE_CLIENT);
+
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "found client");
+ return ocapa;
+ }
+
+ return NULL;
+}
+
+static inline void inode_add_oss_capa(struct inode *inode,
+ struct obd_capa *ocapa)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *tmp;
+ struct list_head *next = NULL;
+
+ /* capa is sorted in lli_oss_capas so lookup can always find the
+ * latest one */
+ list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
+ if (cfs_time_after(ocapa->c_expiry, tmp->c_expiry)) {
+ next = &tmp->u.cli.lli_list;
+ break;
+ }
+ }
+ LASSERT(&ocapa->u.cli.lli_list != next);
+ list_move_tail(&ocapa->u.cli.lli_list, next ?: &lli->lli_oss_capas);
+}
+
+static struct obd_capa *do_add_oss_capa(struct inode *inode,
+ struct obd_capa *ocapa)
+{
+ struct obd_capa *old;
+ struct lustre_capa *capa = &ocapa->c_capa;
+
+ LASSERTF(S_ISREG(inode->i_mode),
+ "inode has oss capa, but not regular file, mode: %d\n",
+ inode->i_mode);
+
+ /* FIXME: can't replace it so easily with fine-grained opc */
+ old = do_lookup_oss_capa(inode, capa_opc(capa) & CAPA_OPC_OSS_ONLY);
+ if (!old) {
+ ocapa->u.cli.inode = inode;
+ INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
+ capa_count[CAPA_SITE_CLIENT]++;
+
+ DEBUG_CAPA(D_SEC, capa, "add OSS");
+ } else {
+ spin_lock(&old->c_lock);
+ old->c_capa = *capa;
+ spin_unlock(&old->c_lock);
+
+ DEBUG_CAPA(D_SEC, capa, "update OSS");
+
+ capa_put(ocapa);
+ ocapa = old;
+ }
+
+ inode_add_oss_capa(inode, ocapa);
+ return ocapa;
+}
+
+struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
+{
+ spin_lock(&capa_lock);
+ ocapa = capa_for_mds(&ocapa->c_capa) ? do_add_mds_capa(inode, ocapa) :
+ do_add_oss_capa(inode, ocapa);
+
+ /* truncate capa won't renew */
+ if (ocapa->c_capa.lc_opc != CAPA_OPC_OSS_TRUNC) {
+ set_capa_expiry(ocapa);
+ list_del_init(&ocapa->c_list);
+ sort_add_capa(ocapa, ll_capa_list);
+
+ update_capa_timer(ocapa, capa_renewal_time(ocapa));
+ }
+
+ spin_unlock(&capa_lock);
+
+ atomic_set(&ll_capa_debug, 1);
+ return ocapa;
+}
+
+static inline void delay_capa_renew(struct obd_capa *oc, cfs_time_t delay)
+{
+ /* NB: set a fake expiry for this capa to prevent it renew too soon */
+ oc->c_expiry = cfs_time_add(oc->c_expiry, cfs_time_seconds(delay));
+}
+
+int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
+{
+ struct inode *inode = ocapa->u.cli.inode;
+ int rc = 0;
+
+ LASSERT(ocapa);
+
+ if (IS_ERR(capa)) {
+ /* set error code */
+ rc = PTR_ERR(capa);
+ spin_lock(&capa_lock);
+ if (rc == -ENOENT) {
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa,
+ "renewal canceled because object removed");
+ ll_capa_renewal_noent++;
+ } else {
+ ll_capa_renewal_failed++;
+
+ /* failed capa won't be renewed any longer, but if -EIO,
+ * client might be doing recovery, retry in 2 min. */
+ if (rc == -EIO && !capa_is_expired(ocapa)) {
+ delay_capa_renew(ocapa, 120);
+ DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
+ "renewal failed: -EIO, "
+ "retry in 2 mins");
+ ll_capa_renewal_retries++;
+ GOTO(retry, rc);
+ } else {
+ DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
+ "renewal failed(rc: %d) for", rc);
+ }
+ }
+
+ list_del_init(&ocapa->c_list);
+ sort_add_capa(ocapa, &ll_idle_capas);
+ spin_unlock(&capa_lock);
+
+ capa_put(ocapa);
+ iput(inode);
+ return rc;
+ }
+
+ spin_lock(&ocapa->c_lock);
+ LASSERT(!memcmp(&ocapa->c_capa, capa,
+ offsetof(struct lustre_capa, lc_opc)));
+ ocapa->c_capa = *capa;
+ set_capa_expiry(ocapa);
+ spin_unlock(&ocapa->c_lock);
+
+ spin_lock(&capa_lock);
+ if (capa_for_oss(capa))
+ inode_add_oss_capa(inode, ocapa);
+ DEBUG_CAPA(D_SEC, capa, "renew");
+retry:
+ list_del_init(&ocapa->c_list);
+ sort_add_capa(ocapa, ll_capa_list);
+ update_capa_timer(ocapa, capa_renewal_time(ocapa));
+ spin_unlock(&capa_lock);
+
+ capa_put(ocapa);
+ iput(inode);
+ return rc;
+}
+
+void ll_capa_open(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
+ == 0)
+ return;
+
+ if (!S_ISREG(inode->i_mode))
+ return;
+
+ atomic_inc(&lli->lli_open_count);
+}
+
+void ll_capa_close(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ if ((ll_i2sbi(inode)->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
+ == 0)
+ return;
+
+ if (!S_ISREG(inode->i_mode))
+ return;
+
+ atomic_dec(&lli->lli_open_count);
+}
+
+/* delete CAPA_OPC_OSS_TRUNC only */
+void ll_truncate_free_capa(struct obd_capa *ocapa)
+{
+ if (!ocapa)
+ return;
+
+ LASSERT(ocapa->c_capa.lc_opc & CAPA_OPC_OSS_TRUNC);
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free truncate");
+
+ /* release ref when find */
+ capa_put(ocapa);
+ if (likely(ocapa->c_capa.lc_opc == CAPA_OPC_OSS_TRUNC)) {
+ spin_lock(&capa_lock);
+ ll_delete_capa(ocapa);
+ spin_unlock(&capa_lock);
+ }
+}
+
+void ll_clear_inode_capas(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *ocapa, *tmp;
+
+ spin_lock(&capa_lock);
+ ocapa = lli->lli_mds_capa;
+ if (ocapa)
+ ll_delete_capa(ocapa);
+
+ list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
+ u.cli.lli_list)
+ ll_delete_capa(ocapa);
+ spin_unlock(&capa_lock);
+}
+
+void ll_print_capa_stat(struct ll_sb_info *sbi)
+{
+ if (sbi->ll_flags & (LL_SBI_MDS_CAPA | LL_SBI_OSS_CAPA))
+ LCONSOLE_INFO("Fid capabilities renewed: %llu\n"
+ "Fid capabilities renewal ENOENT: %llu\n"
+ "Fid capabilities failed to renew: %llu\n"
+ "Fid capabilities renewal retries: %llu\n",
+ ll_capa_renewed, ll_capa_renewal_noent,
+ ll_capa_renewal_failed, ll_capa_renewal_retries);
+}
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
new file mode 100644
index 0000000..1f5825c8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -0,0 +1,397 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/llite_close.c
+ *
+ * Lustre Lite routines to issue a secondary close after writeback
+ */
+
+#include <linux/module.h>
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <lustre_lite.h>
+#include "llite_internal.h"
+
+/** records that a write is in flight */
+void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
+{
+ struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_SOM_DIRTY;
+ if (page != NULL && list_empty(&page->cpg_pending_linkage))
+ list_add(&page->cpg_pending_linkage,
+ &club->cob_pending_list);
+ spin_unlock(&lli->lli_lock);
+}
+
+/** records that a write has completed */
+void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
+{
+ struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+ int rc = 0;
+
+ spin_lock(&lli->lli_lock);
+ if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
+ list_del_init(&page->cpg_pending_linkage);
+ rc = 1;
+ }
+ spin_unlock(&lli->lli_lock);
+ if (rc)
+ ll_queue_done_writing(club->cob_inode, 0);
+}
+
+/** Queues DONE_WRITING if
+ * - done writing is allowed;
+ * - inode has no no dirty pages; */
+void ll_queue_done_writing(struct inode *inode, unsigned long flags)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= flags;
+
+ if ((lli->lli_flags & LLIF_DONE_WRITING) &&
+ list_empty(&club->cob_pending_list)) {
+ struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
+
+ if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
+ CWARN("ino %lu/%u(flags %u) som valid it just after "
+ "recovery\n",
+ inode->i_ino, inode->i_generation,
+ lli->lli_flags);
+ /* DONE_WRITING is allowed and inode has no dirty page. */
+ spin_lock(&lcq->lcq_lock);
+
+ LASSERT(list_empty(&lli->lli_close_list));
+ CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
+ inode->i_ino, inode->i_generation);
+ list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
+
+ /* Avoid a concurrent insertion into the close thread queue:
+ * an inode is already in the close thread, open(), write(),
+ * close() happen, epoch is closed as the inode is marked as
+ * LLIF_EPOCH_PENDING. When pages are written inode should not
+ * be inserted into the queue again, clear this flag to avoid
+ * it. */
+ lli->lli_flags &= ~LLIF_DONE_WRITING;
+
+ wake_up(&lcq->lcq_waitq);
+ spin_unlock(&lcq->lcq_lock);
+ }
+ spin_unlock(&lli->lli_lock);
+}
+
+/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
+void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ op_data->op_flags |= MF_SOM_CHANGE;
+ /* Check if Size-on-MDS attributes are valid. */
+ if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
+ CERROR("ino %lu/%u(flags %u) som valid it just after "
+ "recovery\n", inode->i_ino, inode->i_generation,
+ lli->lli_flags);
+
+ if (!cl_local_size(inode)) {
+ /* Send Size-on-MDS Attributes if valid. */
+ op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
+ ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
+ }
+}
+
+/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
+void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
+ struct obd_client_handle **och, unsigned long flags)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+
+ spin_lock(&lli->lli_lock);
+ if (!(list_empty(&club->cob_pending_list))) {
+ if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
+ LASSERT(*och != NULL);
+ LASSERT(lli->lli_pending_och == NULL);
+ /* Inode is dirty and there is no pending write done
+ * request yet, DONE_WRITE is to be sent later. */
+ lli->lli_flags |= LLIF_EPOCH_PENDING;
+ lli->lli_pending_och = *och;
+ spin_unlock(&lli->lli_lock);
+
+ inode = igrab(inode);
+ LASSERT(inode);
+ GOTO(out, 0);
+ }
+ if (flags & LLIF_DONE_WRITING) {
+ /* Some pages are still dirty, it is early to send
+ * DONE_WRITE. Wait untill all pages will be flushed
+ * and try DONE_WRITE again later. */
+ LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
+ lli->lli_flags |= LLIF_DONE_WRITING;
+ spin_unlock(&lli->lli_lock);
+
+ inode = igrab(inode);
+ LASSERT(inode);
+ GOTO(out, 0);
+ }
+ }
+ CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID"\n",
+ ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
+ op_data->op_flags |= MF_EPOCH_CLOSE;
+
+ if (flags & LLIF_DONE_WRITING) {
+ LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
+ LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
+ *och = lli->lli_pending_och;
+ lli->lli_pending_och = NULL;
+ lli->lli_flags &= ~LLIF_EPOCH_PENDING;
+ } else {
+ /* Pack Size-on-MDS inode attributes only if they has changed */
+ if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
+ spin_unlock(&lli->lli_lock);
+ GOTO(out, 0);
+ }
+
+ /* There is a pending DONE_WRITE -- close epoch with no
+ * attribute change. */
+ if (lli->lli_flags & LLIF_EPOCH_PENDING) {
+ spin_unlock(&lli->lli_lock);
+ GOTO(out, 0);
+ }
+ }
+
+ LASSERT(list_empty(&club->cob_pending_list));
+ lli->lli_flags &= ~LLIF_SOM_DIRTY;
+ spin_unlock(&lli->lli_lock);
+ ll_done_writing_attr(inode, op_data);
+
+out:
+ return;
+}
+
+/**
+ * Cliens updates SOM attributes on MDS (including llog cookies):
+ * obd_getattr with no lock and md_setattr.
+ */
+int ll_som_update(struct inode *inode, struct md_op_data *op_data)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ptlrpc_request *request = NULL;
+ __u32 old_flags;
+ struct obdo *oa;
+ int rc;
+
+ LASSERT(op_data != NULL);
+ if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
+ CERROR("ino %lu/%u(flags %u) som valid it just after "
+ "recovery\n", inode->i_ino, inode->i_generation,
+ lli->lli_flags);
+
+ OBDO_ALLOC(oa);
+ if (!oa) {
+ CERROR("can't allocate memory for Size-on-MDS update.\n");
+ return -ENOMEM;
+ }
+
+ old_flags = op_data->op_flags;
+ op_data->op_flags = MF_SOM_CHANGE;
+
+ /* If inode is already in another epoch, skip getattr from OSTs. */
+ if (lli->lli_ioepoch == op_data->op_ioepoch) {
+ rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
+ old_flags & MF_GETATTR_LOCK);
+ if (rc) {
+ oa->o_valid = 0;
+ if (rc != -ENOENT)
+ CERROR("inode_getattr failed (%d): unable to "
+ "send a Size-on-MDS attribute update "
+ "for inode %lu/%u\n", rc, inode->i_ino,
+ inode->i_generation);
+ } else {
+ CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
+ PFID(&lli->lli_fid));
+ }
+ /* Install attributes into op_data. */
+ md_from_obdo(op_data, oa, oa->o_valid);
+ }
+
+ rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
+ NULL, 0, NULL, 0, &request, NULL);
+ ptlrpc_req_finished(request);
+
+ OBDO_FREE(oa);
+ return rc;
+}
+
+/**
+ * Closes the ioepoch and packs all the attributes into @op_data for
+ * DONE_WRITING rpc.
+ */
+static void ll_prepare_done_writing(struct inode *inode,
+ struct md_op_data *op_data,
+ struct obd_client_handle **och)
+{
+ ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
+ /* If there is no @och, we do not do D_W yet. */
+ if (*och == NULL)
+ return;
+
+ ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
+ ll_prep_md_op_data(op_data, inode, NULL, NULL,
+ 0, 0, LUSTRE_OPC_ANY, NULL);
+}
+
+/** Send a DONE_WRITING rpc. */
+static void ll_done_writing(struct inode *inode)
+{
+ struct obd_client_handle *och = NULL;
+ struct md_op_data *op_data;
+ int rc;
+
+ LASSERT(exp_connect_som(ll_i2mdexp(inode)));
+
+ OBD_ALLOC_PTR(op_data);
+ if (op_data == NULL) {
+ CERROR("can't allocate op_data\n");
+ return;
+ }
+
+ ll_prepare_done_writing(inode, op_data, &och);
+ /* If there is no @och, we do not do D_W yet. */
+ if (och == NULL)
+ GOTO(out, 0);
+
+ rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
+ if (rc == -EAGAIN) {
+ /* MDS has instructed us to obtain Size-on-MDS attribute from
+ * OSTs and send setattr to back to MDS. */
+ rc = ll_som_update(inode, op_data);
+ } else if (rc) {
+ CERROR("inode %lu mdc done_writing failed: rc = %d\n",
+ inode->i_ino, rc);
+ }
+out:
+ ll_finish_md_op_data(op_data);
+ if (och) {
+ md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
+ OBD_FREE_PTR(och);
+ }
+}
+
+static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
+{
+ struct ll_inode_info *lli = NULL;
+
+ spin_lock(&lcq->lcq_lock);
+
+ if (!list_empty(&lcq->lcq_head)) {
+ lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
+ lli_close_list);
+ list_del_init(&lli->lli_close_list);
+ } else if (atomic_read(&lcq->lcq_stop))
+ lli = ERR_PTR(-EALREADY);
+
+ spin_unlock(&lcq->lcq_lock);
+ return lli;
+}
+
+static int ll_close_thread(void *arg)
+{
+ struct ll_close_queue *lcq = arg;
+
+ complete(&lcq->lcq_comp);
+
+ while (1) {
+ struct l_wait_info lwi = { 0 };
+ struct ll_inode_info *lli;
+ struct inode *inode;
+
+ l_wait_event_exclusive(lcq->lcq_waitq,
+ (lli = ll_close_next_lli(lcq)) != NULL,
+ &lwi);
+ if (IS_ERR(lli))
+ break;
+
+ inode = ll_info2i(lli);
+ CDEBUG(D_INFO, "done_writting for inode %lu/%u\n",
+ inode->i_ino, inode->i_generation);
+ ll_done_writing(inode);
+ iput(inode);
+ }
+
+ CDEBUG(D_INFO, "ll_close exiting\n");
+ complete(&lcq->lcq_comp);
+ return 0;
+}
+
+int ll_close_thread_start(struct ll_close_queue **lcq_ret)
+{
+ struct ll_close_queue *lcq;
+ struct task_struct *task;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
+ return -EINTR;
+
+ OBD_ALLOC(lcq, sizeof(*lcq));
+ if (lcq == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&lcq->lcq_lock);
+ INIT_LIST_HEAD(&lcq->lcq_head);
+ init_waitqueue_head(&lcq->lcq_waitq);
+ init_completion(&lcq->lcq_comp);
+
+ task = kthread_run(ll_close_thread, lcq, "ll_close");
+ if (IS_ERR(task)) {
+ OBD_FREE(lcq, sizeof(*lcq));
+ return PTR_ERR(task);
+ }
+
+ wait_for_completion(&lcq->lcq_comp);
+ *lcq_ret = lcq;
+ return 0;
+}
+
+void ll_close_thread_shutdown(struct ll_close_queue *lcq)
+{
+ init_completion(&lcq->lcq_comp);
+ atomic_inc(&lcq->lcq_stop);
+ wake_up(&lcq->lcq_waitq);
+ wait_for_completion(&lcq->lcq_comp);
+ OBD_FREE(lcq, sizeof(*lcq));
+}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
new file mode 100644
index 0000000..47e443d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -0,0 +1,1582 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef LLITE_INTERNAL_H
+#define LLITE_INTERNAL_H
+#include <lustre_debug.h>
+#include <lustre_ver.h>
+#include <lustre_disk.h> /* for s2sbi */
+#include <lustre_eacl.h>
+
+/* for struct cl_lock_descr and struct cl_io */
+#include <cl_object.h>
+#include <lclient.h>
+#include <lustre_mdc.h>
+#include <linux/lustre_intent.h>
+
+#ifndef FMODE_EXEC
+#define FMODE_EXEC 0
+#endif
+
+#ifndef VM_FAULT_RETRY
+#define VM_FAULT_RETRY 0
+#endif
+
+/* Kernel 3.1 kills LOOKUP_CONTINUE, LOOKUP_PARENT is equivalent to it.
+ * seem kernel commit 49084c3bb2055c401f3493c13edae14d49128ca0 */
+#ifndef LOOKUP_CONTINUE
+#define LOOKUP_CONTINUE LOOKUP_PARENT
+#endif
+
+/** Only used on client-side for indicating the tail of dir hash/offset. */
+#define LL_DIR_END_OFF 0x7fffffffffffffffULL
+#define LL_DIR_END_OFF_32BIT 0x7fffffffUL
+
+#define LL_IT2STR(it) ((it) ? ldlm_it2str((it)->it_op) : "0")
+#define LUSTRE_FPRIVATE(file) ((file)->private_data)
+
+struct ll_dentry_data {
+ int lld_cwd_count;
+ int lld_mnt_count;
+ struct obd_client_handle lld_cwd_och;
+ struct obd_client_handle lld_mnt_och;
+ struct lookup_intent *lld_it;
+ unsigned int lld_sa_generation;
+ unsigned int lld_invalid:1;
+ struct rcu_head lld_rcu_head;
+};
+
+#define ll_d2d(de) ((struct ll_dentry_data*)((de)->d_fsdata))
+
+extern struct file_operations ll_pgcache_seq_fops;
+
+#define LLI_INODE_MAGIC 0x111d0de5
+#define LLI_INODE_DEAD 0xdeadd00d
+
+/* remote client permission cache */
+#define REMOTE_PERM_HASHSIZE 16
+
+struct ll_getname_data {
+ struct dir_context ctx;
+ char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
+ struct lu_fid lgd_fid; /* target fid we are looking for */
+ int lgd_found; /* inode matched? */
+};
+
+/* llite setxid/access permission for user on remote client */
+struct ll_remote_perm {
+ struct hlist_node lrp_list;
+ uid_t lrp_uid;
+ gid_t lrp_gid;
+ uid_t lrp_fsuid;
+ gid_t lrp_fsgid;
+ int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
+ is access permission with
+ lrp_fsuid/lrp_fsgid. */
+};
+
+enum lli_flags {
+ /* MDS has an authority for the Size-on-MDS attributes. */
+ LLIF_MDS_SIZE_LOCK = (1 << 0),
+ /* Epoch close is postponed. */
+ LLIF_EPOCH_PENDING = (1 << 1),
+ /* DONE WRITING is allowed. */
+ LLIF_DONE_WRITING = (1 << 2),
+ /* Sizeon-on-MDS attributes are changed. An attribute update needs to
+ * be sent to MDS. */
+ LLIF_SOM_DIRTY = (1 << 3),
+ /* File is contented */
+ LLIF_CONTENDED = (1 << 4),
+ /* Truncate uses server lock for this file */
+ LLIF_SRVLOCK = (1 << 5),
+ /* File data is modified. */
+ LLIF_DATA_MODIFIED = (1 << 6),
+};
+
+struct ll_inode_info {
+ __u32 lli_inode_magic;
+ __u32 lli_flags;
+ __u64 lli_ioepoch;
+
+ spinlock_t lli_lock;
+ struct posix_acl *lli_posix_acl;
+
+ struct hlist_head *lli_remote_perms;
+ struct mutex lli_rmtperm_mutex;
+
+ /* identifying fields for both metadata and data stacks. */
+ struct lu_fid lli_fid;
+ /* Parent fid for accessing default stripe data on parent directory
+ * for allocating OST objects after a mknod() and later open-by-FID. */
+ struct lu_fid lli_pfid;
+
+ struct list_head lli_close_list;
+ struct list_head lli_oss_capas;
+ /* open count currently used by capability only, indicate whether
+ * capability needs renewal */
+ atomic_t lli_open_count;
+ struct obd_capa *lli_mds_capa;
+ cfs_time_t lli_rmtperm_time;
+
+ /* handle is to be sent to MDS later on done_writing and setattr.
+ * Open handle data are needed for the recovery to reconstruct
+ * the inode state on the MDS. XXX: recovery is not ready yet. */
+ struct obd_client_handle *lli_pending_och;
+
+ /* We need all three because every inode may be opened in different
+ * modes */
+ struct obd_client_handle *lli_mds_read_och;
+ struct obd_client_handle *lli_mds_write_och;
+ struct obd_client_handle *lli_mds_exec_och;
+ __u64 lli_open_fd_read_count;
+ __u64 lli_open_fd_write_count;
+ __u64 lli_open_fd_exec_count;
+ /* Protects access to och pointers and their usage counters */
+ struct mutex lli_och_mutex;
+
+ struct inode lli_vfs_inode;
+
+ /* the most recent timestamps obtained from mds */
+ struct ost_lvb lli_lvb;
+ spinlock_t lli_agl_lock;
+
+ /* Try to make the d::member and f::member are aligned. Before using
+ * these members, make clear whether it is directory or not. */
+ union {
+ /* for directory */
+ struct {
+ /* serialize normal readdir and statahead-readdir. */
+ struct mutex d_readdir_mutex;
+
+ /* metadata statahead */
+ /* since parent-child threads can share the same @file
+ * struct, "opendir_key" is the token when dir close for
+ * case of parent exit before child -- it is me should
+ * cleanup the dir readahead. */
+ void *d_opendir_key;
+ struct ll_statahead_info *d_sai;
+ struct posix_acl *d_def_acl;
+ /* protect statahead stuff. */
+ spinlock_t d_sa_lock;
+ /* "opendir_pid" is the token when lookup/revalid
+ * -- I am the owner of dir statahead. */
+ pid_t d_opendir_pid;
+ } d;
+
+#define lli_readdir_mutex u.d.d_readdir_mutex
+#define lli_opendir_key u.d.d_opendir_key
+#define lli_sai u.d.d_sai
+#define lli_def_acl u.d.d_def_acl
+#define lli_sa_lock u.d.d_sa_lock
+#define lli_opendir_pid u.d.d_opendir_pid
+
+ /* for non-directory */
+ struct {
+ struct semaphore f_size_sem;
+ void *f_size_sem_owner;
+ char *f_symlink_name;
+ __u64 f_maxbytes;
+ /*
+ * struct rw_semaphore {
+ * signed long count; // align d.d_def_acl
+ * spinlock_t wait_lock; // align d.d_sa_lock
+ * struct list_head wait_list;
+ * }
+ */
+ struct rw_semaphore f_trunc_sem;
+ struct mutex f_write_mutex;
+
+ struct rw_semaphore f_glimpse_sem;
+ cfs_time_t f_glimpse_time;
+ struct list_head f_agl_list;
+ __u64 f_agl_index;
+
+ /* for writepage() only to communicate to fsync */
+ int f_async_rc;
+
+ /* volatile file criteria is based on file name, this
+ * flag is used to keep the test result, so the strcmp
+ * is done only once
+ */
+ bool f_volatile;
+ /*
+ * whenever a process try to read/write the file, the
+ * jobid of the process will be saved here, and it'll
+ * be packed into the write PRC when flush later.
+ *
+ * so the read/write statistics for jobid will not be
+ * accurate if the file is shared by different jobs.
+ */
+ char f_jobid[JOBSTATS_JOBID_SIZE];
+ } f;
+
+#define lli_size_sem u.f.f_size_sem
+#define lli_size_sem_owner u.f.f_size_sem_owner
+#define lli_symlink_name u.f.f_symlink_name
+#define lli_maxbytes u.f.f_maxbytes
+#define lli_trunc_sem u.f.f_trunc_sem
+#define lli_write_mutex u.f.f_write_mutex
+#define lli_glimpse_sem u.f.f_glimpse_sem
+#define lli_glimpse_time u.f.f_glimpse_time
+#define lli_agl_list u.f.f_agl_list
+#define lli_agl_index u.f.f_agl_index
+#define lli_async_rc u.f.f_async_rc
+#define lli_jobid u.f.f_jobid
+#define lli_volatile u.f.f_volatile
+
+ } u;
+
+ /* XXX: For following frequent used members, although they maybe special
+ * used for non-directory object, it is some time-wasting to check
+ * whether the object is directory or not before using them. On the
+ * other hand, currently, sizeof(f) > sizeof(d), it cannot reduce
+ * the "ll_inode_info" size even if moving those members into u.f.
+ * So keep them out side.
+ *
+ * In the future, if more members are added only for directory,
+ * some of the following members can be moved into u.f.
+ */
+ bool lli_has_smd;
+ struct cl_object *lli_clob;
+
+ /* mutex to request for layout lock exclusively. */
+ struct mutex lli_layout_mutex;
+ /* valid only inside LAYOUT ibits lock, protected by lli_layout_mutex */
+ __u32 lli_layout_gen;
+};
+
+/*
+ * Locking to guarantee consistency of non-atomic updates to long long i_size,
+ * consistency between file size and KMS.
+ *
+ * Implemented by ->lli_size_sem and ->lsm_lock, nested in that order.
+ */
+
+void ll_inode_size_lock(struct inode *inode);
+void ll_inode_size_unlock(struct inode *inode);
+
+// FIXME: replace the name of this with LL_I to conform to kernel stuff
+// static inline struct ll_inode_info *LL_I(struct inode *inode)
+static inline struct ll_inode_info *ll_i2info(struct inode *inode)
+{
+ return container_of(inode, struct ll_inode_info, lli_vfs_inode);
+}
+
+/* default to about 40meg of readahead on a given system. That much tied
+ * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
+#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
+
+/* default to read-ahead full files smaller than 2MB on the second read */
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
+
+enum ra_stat {
+ RA_STAT_HIT = 0,
+ RA_STAT_MISS,
+ RA_STAT_DISTANT_READPAGE,
+ RA_STAT_MISS_IN_WINDOW,
+ RA_STAT_FAILED_GRAB_PAGE,
+ RA_STAT_FAILED_MATCH,
+ RA_STAT_DISCARDED,
+ RA_STAT_ZERO_LEN,
+ RA_STAT_ZERO_WINDOW,
+ RA_STAT_EOF,
+ RA_STAT_MAX_IN_FLIGHT,
+ RA_STAT_WRONG_GRAB_PAGE,
+ _NR_RA_STAT,
+};
+
+struct ll_ra_info {
+ atomic_t ra_cur_pages;
+ unsigned long ra_max_pages;
+ unsigned long ra_max_pages_per_file;
+ unsigned long ra_max_read_ahead_whole_pages;
+};
+
+/* ra_io_arg will be filled in the beginning of ll_readahead with
+ * ras_lock, then the following ll_read_ahead_pages will read RA
+ * pages according to this arg, all the items in this structure are
+ * counted by page index.
+ */
+struct ra_io_arg {
+ unsigned long ria_start; /* start offset of read-ahead*/
+ unsigned long ria_end; /* end offset of read-ahead*/
+ /* If stride read pattern is detected, ria_stoff means where
+ * stride read is started. Note: for normal read-ahead, the
+ * value here is meaningless, and also it will not be accessed*/
+ pgoff_t ria_stoff;
+ /* ria_length and ria_pages are the length and pages length in the
+ * stride I/O mode. And they will also be used to check whether
+ * it is stride I/O read-ahead in the read-ahead pages*/
+ unsigned long ria_length;
+ unsigned long ria_pages;
+};
+
+/* LL_HIST_MAX=32 causes an overflow */
+#define LL_HIST_MAX 28
+#define LL_HIST_START 12 /* buckets start at 2^12 = 4k */
+#define LL_PROCESS_HIST_MAX 10
+struct per_process_info {
+ pid_t pid;
+ struct obd_histogram pp_r_hist;
+ struct obd_histogram pp_w_hist;
+};
+
+/* pp_extents[LL_PROCESS_HIST_MAX] will hold the combined process info */
+struct ll_rw_extents_info {
+ struct per_process_info pp_extents[LL_PROCESS_HIST_MAX + 1];
+};
+
+#define LL_OFFSET_HIST_MAX 100
+struct ll_rw_process_info {
+ pid_t rw_pid;
+ int rw_op;
+ loff_t rw_range_start;
+ loff_t rw_range_end;
+ loff_t rw_last_file_pos;
+ loff_t rw_offset;
+ size_t rw_smallest_extent;
+ size_t rw_largest_extent;
+ struct ll_file_data *rw_last_file;
+};
+
+enum stats_track_type {
+ STATS_TRACK_ALL = 0, /* track all processes */
+ STATS_TRACK_PID, /* track process with this pid */
+ STATS_TRACK_PPID, /* track processes with this ppid */
+ STATS_TRACK_GID, /* track processes with this gid */
+ STATS_TRACK_LAST,
+};
+
+/* flags for sbi->ll_flags */
+#define LL_SBI_NOLCK 0x01 /* DLM locking disabled (directio-only) */
+#define LL_SBI_CHECKSUM 0x02 /* checksum each page as it's written */
+#define LL_SBI_FLOCK 0x04
+#define LL_SBI_USER_XATTR 0x08 /* support user xattr */
+#define LL_SBI_ACL 0x10 /* support ACL */
+#define LL_SBI_RMT_CLIENT 0x40 /* remote client */
+#define LL_SBI_MDS_CAPA 0x80 /* support mds capa */
+#define LL_SBI_OSS_CAPA 0x100 /* support oss capa */
+#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
+#define LL_SBI_LRU_RESIZE 0x400 /* lru resize support */
+#define LL_SBI_LAZYSTATFS 0x800 /* lazystatfs mount option */
+#define LL_SBI_SOM_PREVIEW 0x1000 /* SOM preview mount option */
+#define LL_SBI_32BIT_API 0x2000 /* generate 32 bit inodes. */
+#define LL_SBI_64BIT_HASH 0x4000 /* support 64-bits dir hash/offset */
+#define LL_SBI_AGL_ENABLED 0x8000 /* enable agl */
+#define LL_SBI_VERBOSE 0x10000 /* verbose mount/umount */
+#define LL_SBI_LAYOUT_LOCK 0x20000 /* layout lock support */
+#define LL_SBI_USER_FID2PATH 0x40000 /* allow fid2path by unprivileged users */
+
+#define LL_SBI_FLAGS { \
+ "nolck", \
+ "checksum", \
+ "flock", \
+ "xattr", \
+ "acl", \
+ "rmt_client", \
+ "mds_capa", \
+ "oss_capa", \
+ "flock", \
+ "lru_resize", \
+ "lazy_statfs", \
+ "som", \
+ "32bit_api", \
+ "64bit_hash", \
+ "agl", \
+ "verbose", \
+ "layout", \
+ "user_fid2path" }
+
+/* default value for ll_sb_info->contention_time */
+#define SBI_DEFAULT_CONTENTION_SECONDS 60
+/* default value for lockless_truncate_enable */
+#define SBI_DEFAULT_LOCKLESS_TRUNCATE_ENABLE 1
+#define RCE_HASHES 32
+
+struct rmtacl_ctl_entry {
+ struct list_head rce_list;
+ pid_t rce_key; /* hash key */
+ int rce_ops; /* acl operation type */
+};
+
+struct rmtacl_ctl_table {
+ spinlock_t rct_lock;
+ struct list_head rct_entries[RCE_HASHES];
+};
+
+#define EE_HASHES 32
+
+struct eacl_table {
+ spinlock_t et_lock;
+ struct list_head et_entries[EE_HASHES];
+};
+
+struct ll_sb_info {
+ struct list_head ll_list;
+ /* this protects pglist and ra_info. It isn't safe to
+ * grab from interrupt contexts */
+ spinlock_t ll_lock;
+ spinlock_t ll_pp_extent_lock; /* pp_extent entry*/
+ spinlock_t ll_process_lock; /* ll_rw_process_info */
+ struct obd_uuid ll_sb_uuid;
+ struct obd_export *ll_md_exp;
+ struct obd_export *ll_dt_exp;
+ struct proc_dir_entry* ll_proc_root;
+ struct lu_fid ll_root_fid; /* root object fid */
+
+ int ll_flags;
+ int ll_umounting:1;
+ struct list_head ll_conn_chain; /* per-conn chain of SBs */
+ struct lustre_client_ocd ll_lco;
+
+ struct list_head ll_orphan_dentry_list; /*please don't ask -p*/
+ struct ll_close_queue *ll_lcq;
+
+ struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
+
+ struct cl_client_cache ll_cache;
+
+ struct lprocfs_stats *ll_ra_stats;
+
+ struct ll_ra_info ll_ra_info;
+ unsigned int ll_namelen;
+ struct file_operations *ll_fop;
+
+ /* =0 - hold lock over whole read/write
+ * >0 - max. chunk to be read/written w/o lock re-acquiring */
+ unsigned long ll_max_rw_chunk;
+ unsigned int ll_md_brw_size; /* used by readdir */
+
+ struct lu_site *ll_site;
+ struct cl_device *ll_cl;
+ /* Statistics */
+ struct ll_rw_extents_info ll_rw_extents_info;
+ int ll_extent_process_count;
+ struct ll_rw_process_info ll_rw_process_info[LL_PROCESS_HIST_MAX];
+ unsigned int ll_offset_process_count;
+ struct ll_rw_process_info ll_rw_offset_info[LL_OFFSET_HIST_MAX];
+ unsigned int ll_rw_offset_entry_count;
+ int ll_stats_track_id;
+ enum stats_track_type ll_stats_track_type;
+ int ll_rw_stats_on;
+
+ /* metadata stat-ahead */
+ unsigned int ll_sa_max; /* max statahead RPCs */
+ atomic_t ll_sa_total; /* statahead thread started
+ * count */
+ atomic_t ll_sa_wrong; /* statahead thread stopped for
+ * low hit ratio */
+ atomic_t ll_agl_total; /* AGL thread started count */
+
+ dev_t ll_sdev_orig; /* save s_dev before assign for
+ * clustred nfs */
+ struct rmtacl_ctl_table ll_rct;
+ struct eacl_table ll_et;
+ __kernel_fsid_t ll_fsid;
+};
+
+#define LL_DEFAULT_MAX_RW_CHUNK (32 * 1024 * 1024)
+
+struct ll_ra_read {
+ pgoff_t lrr_start;
+ pgoff_t lrr_count;
+ struct task_struct *lrr_reader;
+ struct list_head lrr_linkage;
+};
+
+/*
+ * per file-descriptor read-ahead data.
+ */
+struct ll_readahead_state {
+ spinlock_t ras_lock;
+ /*
+ * index of the last page that read(2) needed and that wasn't in the
+ * cache. Used by ras_update() to detect seeks.
+ *
+ * XXX nikita: if access seeks into cached region, Lustre doesn't see
+ * this.
+ */
+ unsigned long ras_last_readpage;
+ /*
+ * number of pages read after last read-ahead window reset. As window
+ * is reset on each seek, this is effectively a number of consecutive
+ * accesses. Maybe ->ras_accessed_in_window is better name.
+ *
+ * XXX nikita: window is also reset (by ras_update()) when Lustre
+ * believes that memory pressure evicts read-ahead pages. In that
+ * case, it probably doesn't make sense to expand window to
+ * PTLRPC_MAX_BRW_PAGES on the third access.
+ */
+ unsigned long ras_consecutive_pages;
+ /*
+ * number of read requests after the last read-ahead window reset
+ * As window is reset on each seek, this is effectively the number
+ * on consecutive read request and is used to trigger read-ahead.
+ */
+ unsigned long ras_consecutive_requests;
+ /*
+ * Parameters of current read-ahead window. Handled by
+ * ras_update(). On the initial access to the file or after a seek,
+ * window is reset to 0. After 3 consecutive accesses, window is
+ * expanded to PTLRPC_MAX_BRW_PAGES. Afterwards, window is enlarged by
+ * PTLRPC_MAX_BRW_PAGES chunks up to ->ra_max_pages.
+ */
+ unsigned long ras_window_start, ras_window_len;
+ /*
+ * Where next read-ahead should start at. This lies within read-ahead
+ * window. Read-ahead window is read in pieces rather than at once
+ * because: 1. lustre limits total number of pages under read-ahead by
+ * ->ra_max_pages (see ll_ra_count_get()), 2. client cannot read pages
+ * not covered by DLM lock.
+ */
+ unsigned long ras_next_readahead;
+ /*
+ * Total number of ll_file_read requests issued, reads originating
+ * due to mmap are not counted in this total. This value is used to
+ * trigger full file read-ahead after multiple reads to a small file.
+ */
+ unsigned long ras_requests;
+ /*
+ * Page index with respect to the current request, these value
+ * will not be accurate when dealing with reads issued via mmap.
+ */
+ unsigned long ras_request_index;
+ /*
+ * list of struct ll_ra_read's one per read(2) call current in
+ * progress against this file descriptor. Used by read-ahead code,
+ * protected by ->ras_lock.
+ */
+ struct list_head ras_read_beads;
+ /*
+ * The following 3 items are used for detecting the stride I/O
+ * mode.
+ * In stride I/O mode,
+ * ...............|-----data-----|****gap*****|--------|******|....
+ * offset |-stride_pages-|-stride_gap-|
+ * ras_stride_offset = offset;
+ * ras_stride_length = stride_pages + stride_gap;
+ * ras_stride_pages = stride_pages;
+ * Note: all these three items are counted by pages.
+ */
+ unsigned long ras_stride_length;
+ unsigned long ras_stride_pages;
+ pgoff_t ras_stride_offset;
+ /*
+ * number of consecutive stride request count, and it is similar as
+ * ras_consecutive_requests, but used for stride I/O mode.
+ * Note: only more than 2 consecutive stride request are detected,
+ * stride read-ahead will be enable
+ */
+ unsigned long ras_consecutive_stride_requests;
+};
+
+extern struct kmem_cache *ll_file_data_slab;
+struct lustre_handle;
+struct ll_file_data {
+ struct ll_readahead_state fd_ras;
+ int fd_omode;
+ struct ccc_grouplock fd_grouplock;
+ __u64 lfd_pos;
+ __u32 fd_flags;
+ struct file *fd_file;
+ /* Indicate whether need to report failure when close.
+ * true: failure is known, not report again.
+ * false: unknown failure, should report. */
+ bool fd_write_failed;
+};
+
+struct lov_stripe_md;
+
+extern spinlock_t inode_lock;
+
+extern struct proc_dir_entry *proc_lustre_fs_root;
+
+static inline struct inode *ll_info2i(struct ll_inode_info *lli)
+{
+ return &lli->lli_vfs_inode;
+}
+
+struct it_cb_data {
+ struct inode *icbd_parent;
+ struct dentry **icbd_childp;
+ obd_id hash;
+};
+
+__u32 ll_i2suppgid(struct inode *i);
+void ll_i2gids(__u32 *suppgids, struct inode *i1,struct inode *i2);
+
+static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
+{
+#if BITS_PER_LONG == 32
+ return 1;
+#else
+ return unlikely(current_is_32bit() || (sbi->ll_flags & LL_SBI_32BIT_API));
+#endif
+}
+
+#define LLAP_MAGIC 98764321
+
+extern struct kmem_cache *ll_async_page_slab;
+extern size_t ll_async_page_slab_size;
+
+void ll_ra_read_in(struct file *f, struct ll_ra_read *rar);
+void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar);
+struct ll_ra_read *ll_ra_read_get(struct file *f);
+
+/* llite/lproc_llite.c */
+#ifdef LPROCFS
+int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
+ struct super_block *sb, char *osc, char *mdc);
+void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi);
+void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count);
+void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars);
+#else
+static inline int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
+ struct super_block *sb, char *osc, char *mdc){return 0;}
+static inline void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi) {}
+static void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count) {}
+static void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
+{
+ memset(lvars, 0, sizeof(*lvars));
+}
+#endif
+
+
+/* llite/dir.c */
+void ll_release_page(struct page *page, int remove);
+extern struct file_operations ll_dir_operations;
+extern struct inode_operations ll_dir_inode_operations;
+struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
+ struct ll_dir_chain *chain);
+int ll_dir_read(struct inode *inode, struct dir_context *ctx);
+
+int ll_get_mdt_idx(struct inode *inode);
+/* llite/namei.c */
+int ll_objects_destroy(struct ptlrpc_request *request,
+ struct inode *dir);
+struct inode *ll_iget(struct super_block *sb, ino_t hash,
+ struct lustre_md *lic);
+int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
+ void *data, int flag);
+struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
+int ll_rmdir_entry(struct inode *dir, char *name, int namelen);
+
+/* llite/rw.c */
+int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
+int ll_commit_write(struct file *, struct page *, unsigned from, unsigned to);
+int ll_writepage(struct page *page, struct writeback_control *wbc);
+int ll_writepages(struct address_space *, struct writeback_control *wbc);
+void ll_removepage(struct page *page);
+int ll_readpage(struct file *file, struct page *page);
+void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
+int ll_file_punch(struct inode *, loff_t, int);
+ssize_t ll_file_lockless_io(struct file *, char *, size_t, loff_t *, int);
+void ll_clear_file_contended(struct inode*);
+int ll_sync_page_range(struct inode *, struct address_space *, loff_t, size_t);
+int ll_readahead(const struct lu_env *env, struct cl_io *io,
+ struct ll_readahead_state *ras, struct address_space *mapping,
+ struct cl_page_list *queue, int flags);
+
+/* llite/file.c */
+extern struct file_operations ll_file_operations;
+extern struct file_operations ll_file_operations_flock;
+extern struct file_operations ll_file_operations_noflock;
+extern struct inode_operations ll_file_inode_operations;
+extern int ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
+ __u64);
+extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
+ ldlm_mode_t l_req_mode);
+extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
+ struct lustre_handle *lockh, __u64 flags);
+int __ll_inode_revalidate_it(struct dentry *, struct lookup_intent *,
+ __u64 bits);
+int ll_revalidate_nd(struct dentry *dentry, unsigned int flags);
+int ll_file_open(struct inode *inode, struct file *file);
+int ll_file_release(struct inode *inode, struct file *file);
+int ll_glimpse_ioctl(struct ll_sb_info *sbi,
+ struct lov_stripe_md *lsm, lstat_t *st);
+void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch);
+int ll_local_open(struct file *file,
+ struct lookup_intent *it, struct ll_file_data *fd,
+ struct obd_client_handle *och);
+int ll_release_openhandle(struct dentry *, struct lookup_intent *);
+int ll_md_close(struct obd_export *md_exp, struct inode *inode,
+ struct file *file);
+int ll_md_real_close(struct inode *inode, int flags);
+void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
+ struct obd_client_handle **och, unsigned long flags);
+void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data);
+int ll_som_update(struct inode *inode, struct md_op_data *op_data);
+int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
+ __u64 ioepoch, int sync);
+int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
+ struct md_open_data **mod);
+void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
+ struct lustre_handle *fh);
+extern void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
+ struct ll_file_data *file, loff_t pos,
+ size_t count, int rw);
+int ll_getattr_it(struct vfsmount *mnt, struct dentry *de,
+ struct lookup_intent *it, struct kstat *stat);
+int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
+struct ll_file_data *ll_file_data_get(void);
+struct posix_acl * ll_get_acl(struct inode *inode, int type);
+
+int ll_inode_permission(struct inode *inode, int mask);
+
+int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
+ int flags, struct lov_user_md *lum,
+ int lum_size);
+int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
+ struct lov_mds_md **lmm, int *lmm_size,
+ struct ptlrpc_request **request);
+int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
+ int set_default);
+int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
+ int *lmm_size, struct ptlrpc_request **request);
+int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
+int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
+ int num_bytes);
+int ll_merge_lvb(const struct lu_env *env, struct inode *inode);
+int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg);
+int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg);
+int ll_fid2path(struct inode *inode, void *arg);
+int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock);
+
+/* llite/dcache.c */
+
+int ll_dops_init(struct dentry *de, int block, int init_sa);
+extern struct dentry_operations ll_d_ops;
+void ll_intent_drop_lock(struct lookup_intent *);
+void ll_intent_release(struct lookup_intent *);
+void ll_invalidate_aliases(struct inode *);
+void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft);
+void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
+int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
+ unsigned int len, const char *str, const struct qstr *d_name);
+int ll_revalidate_it_finish(struct ptlrpc_request *request,
+ struct lookup_intent *it, struct dentry *de);
+
+/* llite/llite_lib.c */
+extern struct super_operations lustre_super_operations;
+
+char *ll_read_opt(const char *opt, char *data);
+void ll_lli_init(struct ll_inode_info *lli);
+int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
+void ll_put_super(struct super_block *sb);
+void ll_kill_super(struct super_block *sb);
+struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
+struct inode *ll_inode_from_lock(struct ldlm_lock *lock);
+void ll_clear_inode(struct inode *inode);
+int ll_setattr_raw(struct dentry *dentry, struct iattr *attr);
+int ll_setattr(struct dentry *de, struct iattr *attr);
+int ll_statfs(struct dentry *de, struct kstatfs *sfs);
+int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
+ __u64 max_age, __u32 flags);
+void ll_update_inode(struct inode *inode, struct lustre_md *md);
+void ll_read_inode2(struct inode *inode, void *opaque);
+void ll_delete_inode(struct inode *inode);
+int ll_iocontrol(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
+int ll_flush_ctx(struct inode *inode);
+void ll_umount_begin(struct super_block *sb);
+int ll_remount_fs(struct super_block *sb, int *flags, char *data);
+int ll_show_options(struct seq_file *seq, struct dentry *dentry);
+void ll_dirty_page_discard_warn(struct page *page, int ioret);
+int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
+ struct super_block *, struct lookup_intent *);
+void lustre_dump_dentry(struct dentry *, int recur);
+void lustre_dump_inode(struct inode *);
+int ll_obd_statfs(struct inode *inode, void *arg);
+int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
+int ll_process_config(struct lustre_cfg *lcfg);
+struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
+ struct inode *i1, struct inode *i2,
+ const char *name, int namelen,
+ int mode, __u32 opc, void *data);
+void ll_finish_md_op_data(struct md_op_data *op_data);
+int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg);
+char *ll_get_fsname(struct super_block *sb, char *buf, int buflen);
+
+/* llite/llite_nfs.c */
+extern struct export_operations lustre_export_operations;
+__u32 get_uuid2int(const char *name, int len);
+void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid);
+struct inode *search_inode_for_lustre(struct super_block *sb,
+ const struct lu_fid *fid);
+
+/* llite/special.c */
+extern struct inode_operations ll_special_inode_operations;
+extern struct file_operations ll_special_chr_inode_fops;
+extern struct file_operations ll_special_chr_file_fops;
+extern struct file_operations ll_special_blk_inode_fops;
+extern struct file_operations ll_special_fifo_inode_fops;
+extern struct file_operations ll_special_fifo_file_fops;
+extern struct file_operations ll_special_sock_inode_fops;
+
+/* llite/symlink.c */
+extern struct inode_operations ll_fast_symlink_inode_operations;
+
+/* llite/llite_close.c */
+struct ll_close_queue {
+ spinlock_t lcq_lock;
+ struct list_head lcq_head;
+ wait_queue_head_t lcq_waitq;
+ struct completion lcq_comp;
+ atomic_t lcq_stop;
+};
+
+struct ccc_object *cl_inode2ccc(struct inode *inode);
+
+
+void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
+void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
+
+/* specific achitecture can implement only part of this list */
+enum vvp_io_subtype {
+ /** normal IO */
+ IO_NORMAL,
+ /** io called from .sendfile */
+ IO_SENDFILE,
+ /** io started from splice_{read|write} */
+ IO_SPLICE
+};
+
+/* IO subtypes */
+struct vvp_io {
+ /** io subtype */
+ enum vvp_io_subtype cui_io_subtype;
+
+ union {
+ struct {
+ read_actor_t cui_actor;
+ void *cui_target;
+ } sendfile;
+ struct {
+ struct pipe_inode_info *cui_pipe;
+ unsigned int cui_flags;
+ } splice;
+ struct vvp_fault_io {
+ /**
+ * Inode modification time that is checked across DLM
+ * lock request.
+ */
+ time_t ft_mtime;
+ struct vm_area_struct *ft_vma;
+ /**
+ * locked page returned from vvp_io
+ */
+ struct page *ft_vmpage;
+ struct vm_fault_api {
+ /**
+ * kernel fault info
+ */
+ struct vm_fault *ft_vmf;
+ /**
+ * fault API used bitflags for return code.
+ */
+ unsigned int ft_flags;
+ } fault;
+ } fault;
+ } u;
+ /**
+ * Read-ahead state used by read and page-fault IO contexts.
+ */
+ struct ll_ra_read cui_bead;
+ /**
+ * Set when cui_bead has been initialized.
+ */
+ int cui_ra_window_set;
+ /**
+ * Partially truncated page, that vvp_io_trunc_start() keeps locked
+ * across truncate.
+ */
+ struct cl_page *cui_partpage;
+};
+
+/**
+ * IO arguments for various VFS I/O interfaces.
+ */
+struct vvp_io_args {
+ /** normal/sendfile/splice */
+ enum vvp_io_subtype via_io_subtype;
+
+ union {
+ struct {
+ struct kiocb *via_iocb;
+ struct iovec *via_iov;
+ unsigned long via_nrsegs;
+ } normal;
+ struct {
+ read_actor_t via_actor;
+ void *via_target;
+ } sendfile;
+ struct {
+ struct pipe_inode_info *via_pipe;
+ unsigned int via_flags;
+ } splice;
+ } u;
+};
+
+struct ll_cl_context {
+ void *lcc_cookie;
+ struct cl_io *lcc_io;
+ struct cl_page *lcc_page;
+ struct lu_env *lcc_env;
+ int lcc_refcheck;
+ int lcc_created;
+};
+
+struct vvp_thread_info {
+ struct ost_lvb vti_lvb;
+ struct cl_2queue vti_queue;
+ struct iovec vti_local_iov;
+ struct vvp_io_args vti_args;
+ struct ra_io_arg vti_ria;
+ struct kiocb vti_kiocb;
+ struct ll_cl_context vti_io_ctx;
+};
+
+static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
+{
+ extern struct lu_context_key vvp_key;
+ struct vvp_thread_info *info;
+
+ info = lu_context_key_get(&env->le_ctx, &vvp_key);
+ LASSERT(info != NULL);
+ return info;
+}
+
+static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env,
+ enum vvp_io_subtype type)
+{
+ struct vvp_io_args *ret = &vvp_env_info(env)->vti_args;
+
+ ret->via_io_subtype = type;
+
+ return ret;
+}
+
+struct vvp_session {
+ struct vvp_io vs_ios;
+};
+
+static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
+{
+ extern struct lu_context_key vvp_session_key;
+ struct vvp_session *ses;
+
+ ses = lu_context_key_get(env->le_ses, &vvp_session_key);
+ LASSERT(ses != NULL);
+ return ses;
+}
+
+static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
+{
+ return &vvp_env_session(env)->vs_ios;
+}
+
+void ll_queue_done_writing(struct inode *inode, unsigned long flags);
+void ll_close_thread_shutdown(struct ll_close_queue *lcq);
+int ll_close_thread_start(struct ll_close_queue **lcq_ret);
+
+/* llite/llite_mmap.c */
+typedef struct rb_root rb_root_t;
+typedef struct rb_node rb_node_t;
+
+struct ll_lock_tree_node;
+struct ll_lock_tree {
+ rb_root_t lt_root;
+ struct list_head lt_locked_list;
+ struct ll_file_data *lt_fd;
+};
+
+int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
+int ll_file_mmap(struct file * file, struct vm_area_struct * vma);
+struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
+ __u64 end, ldlm_mode_t mode);
+void policy_from_vma(ldlm_policy_data_t *policy,
+ struct vm_area_struct *vma, unsigned long addr, size_t count);
+struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
+ size_t count);
+
+static inline void ll_invalidate_page(struct page *vmpage)
+{
+ struct address_space *mapping = vmpage->mapping;
+ loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
+
+ LASSERT(PageLocked(vmpage));
+ if (mapping == NULL)
+ return;
+
+ ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
+ truncate_complete_page(mapping, vmpage);
+}
+
+#define ll_s2sbi(sb) (s2lsi(sb)->lsi_llsbi)
+
+/* don't need an addref as the sb_info should be holding one */
+static inline struct obd_export *ll_s2dtexp(struct super_block *sb)
+{
+ return ll_s2sbi(sb)->ll_dt_exp;
+}
+
+/* don't need an addref as the sb_info should be holding one */
+static inline struct obd_export *ll_s2mdexp(struct super_block *sb)
+{
+ return ll_s2sbi(sb)->ll_md_exp;
+}
+
+static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi)
+{
+ struct obd_device *obd = sbi->ll_md_exp->exp_obd;
+ if (obd == NULL)
+ LBUG();
+ return &obd->u.cli;
+}
+
+// FIXME: replace the name of this with LL_SB to conform to kernel stuff
+static inline struct ll_sb_info *ll_i2sbi(struct inode *inode)
+{
+ return ll_s2sbi(inode->i_sb);
+}
+
+static inline struct obd_export *ll_i2dtexp(struct inode *inode)
+{
+ return ll_s2dtexp(inode->i_sb);
+}
+
+static inline struct obd_export *ll_i2mdexp(struct inode *inode)
+{
+ return ll_s2mdexp(inode->i_sb);
+}
+
+static inline struct lu_fid *ll_inode2fid(struct inode *inode)
+{
+ struct lu_fid *fid;
+
+ LASSERT(inode != NULL);
+ fid = &ll_i2info(inode)->lli_fid;
+
+ return fid;
+}
+
+static inline int ll_mds_max_easize(struct super_block *sb)
+{
+ return sbi2mdc(ll_s2sbi(sb))->cl_max_mds_easize;
+}
+
+static inline __u64 ll_file_maxbytes(struct inode *inode)
+{
+ return ll_i2info(inode)->lli_maxbytes;
+}
+
+/* llite/xattr.c */
+int ll_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+ssize_t ll_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size);
+ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
+int ll_removexattr(struct dentry *dentry, const char *name);
+
+/* llite/remote_perm.c */
+extern struct kmem_cache *ll_remote_perm_cachep;
+extern struct kmem_cache *ll_rmtperm_hash_cachep;
+
+struct hlist_head *alloc_rmtperm_hash(void);
+void free_rmtperm_hash(struct hlist_head *hash);
+int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
+int lustre_check_remote_perm(struct inode *inode, int mask);
+
+/* llite/llite_capa.c */
+extern struct timer_list ll_capa_timer;
+
+int ll_capa_thread_start(void);
+void ll_capa_thread_stop(void);
+void ll_capa_timer_callback(unsigned long unused);
+
+struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa);
+int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa);
+
+void ll_capa_open(struct inode *inode);
+void ll_capa_close(struct inode *inode);
+
+struct obd_capa *ll_mdscapa_get(struct inode *inode);
+struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc);
+
+void ll_truncate_free_capa(struct obd_capa *ocapa);
+void ll_clear_inode_capas(struct inode *inode);
+void ll_print_capa_stat(struct ll_sb_info *sbi);
+
+/* llite/llite_cl.c */
+extern struct lu_device_type vvp_device_type;
+
+/**
+ * Common IO arguments for various VFS I/O interfaces.
+ */
+int cl_sb_init(struct super_block *sb);
+int cl_sb_fini(struct super_block *sb);
+enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma);
+void ll_io_init(struct cl_io *io, const struct file *file, int write);
+
+void ras_update(struct ll_sb_info *sbi, struct inode *inode,
+ struct ll_readahead_state *ras, unsigned long index,
+ unsigned hit);
+void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
+int ll_is_file_contended(struct file *file);
+void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which);
+
+/* llite/llite_rmtacl.c */
+#ifdef CONFIG_FS_POSIX_ACL
+struct eacl_entry {
+ struct list_head ee_list;
+ pid_t ee_key; /* hash key */
+ struct lu_fid ee_fid;
+ int ee_type; /* ACL type for ACCESS or DEFAULT */
+ ext_acl_xattr_header *ee_acl;
+};
+
+obd_valid rce_ops2valid(int ops);
+struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key);
+int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops);
+int rct_del(struct rmtacl_ctl_table *rct, pid_t key);
+void rct_init(struct rmtacl_ctl_table *rct);
+void rct_fini(struct rmtacl_ctl_table *rct);
+
+void ee_free(struct eacl_entry *ee);
+int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
+ ext_acl_xattr_header *header);
+struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
+ struct lu_fid *fid, int type);
+void et_search_free(struct eacl_table *et, pid_t key);
+void et_init(struct eacl_table *et);
+void et_fini(struct eacl_table *et);
+#else
+static inline obd_valid rce_ops2valid(int ops)
+{
+ return 0;
+}
+#endif
+
+/* statahead.c */
+
+#define LL_SA_RPC_MIN 2
+#define LL_SA_RPC_DEF 32
+#define LL_SA_RPC_MAX 8192
+
+#define LL_SA_CACHE_BIT 5
+#define LL_SA_CACHE_SIZE (1 << LL_SA_CACHE_BIT)
+#define LL_SA_CACHE_MASK (LL_SA_CACHE_SIZE - 1)
+
+/* per inode struct, for dir only */
+struct ll_statahead_info {
+ struct inode *sai_inode;
+ atomic_t sai_refcount; /* when access this struct, hold
+ * refcount */
+ unsigned int sai_generation; /* generation for statahead */
+ unsigned int sai_max; /* max ahead of lookup */
+ __u64 sai_sent; /* stat requests sent count */
+ __u64 sai_replied; /* stat requests which received
+ * reply */
+ __u64 sai_index; /* index of statahead entry */
+ __u64 sai_index_wait; /* index of entry which is the
+ * caller is waiting for */
+ __u64 sai_hit; /* hit count */
+ __u64 sai_miss; /* miss count:
+ * for "ls -al" case, it includes
+ * hidden dentry miss;
+ * for "ls -l" case, it does not
+ * include hidden dentry miss.
+ * "sai_miss_hidden" is used for
+ * the later case.
+ */
+ unsigned int sai_consecutive_miss; /* consecutive miss */
+ unsigned int sai_miss_hidden;/* "ls -al", but first dentry
+ * is not a hidden one */
+ unsigned int sai_skip_hidden;/* skipped hidden dentry count */
+ unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
+ * hidden entries */
+ sai_in_readpage:1,/* statahead is in readdir()*/
+ sai_agl_valid:1;/* AGL is valid for the dir */
+ wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
+ struct ptlrpc_thread sai_thread; /* stat-ahead thread */
+ struct ptlrpc_thread sai_agl_thread; /* AGL thread */
+ struct list_head sai_entries; /* entry list */
+ struct list_head sai_entries_received; /* entries returned */
+ struct list_head sai_entries_stated; /* entries stated */
+ struct list_head sai_entries_agl; /* AGL entries to be sent */
+ struct list_head sai_cache[LL_SA_CACHE_SIZE];
+ spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
+ atomic_t sai_cache_count; /* entry count in cache */
+};
+
+int do_statahead_enter(struct inode *dir, struct dentry **dentry,
+ int only_unplug);
+void ll_stop_statahead(struct inode *dir, void *key);
+
+static inline int ll_glimpse_size(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc;
+
+ down_read(&lli->lli_glimpse_sem);
+ rc = cl_glimpse_size(inode);
+ lli->lli_glimpse_time = cfs_time_current();
+ up_read(&lli->lli_glimpse_sem);
+ return rc;
+}
+
+static inline void
+ll_statahead_mark(struct inode *dir, struct dentry *dentry)
+{
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_statahead_info *sai = lli->lli_sai;
+ struct ll_dentry_data *ldd = ll_d2d(dentry);
+
+ /* not the same process, don't mark */
+ if (lli->lli_opendir_pid != current_pid())
+ return;
+
+ if (sai != NULL && ldd != NULL)
+ ldd->lld_sa_generation = sai->sai_generation;
+}
+
+static inline int
+ll_need_statahead(struct inode *dir, struct dentry *dentryp)
+{
+ struct ll_inode_info *lli;
+ struct ll_dentry_data *ldd;
+
+ if (ll_i2sbi(dir)->ll_sa_max == 0)
+ return -EAGAIN;
+
+ lli = ll_i2info(dir);
+ /* not the same process, don't statahead */
+ if (lli->lli_opendir_pid != current_pid())
+ return -EAGAIN;
+
+ /* statahead has been stopped */
+ if (lli->lli_opendir_key == NULL)
+ return -EAGAIN;
+
+ ldd = ll_d2d(dentryp);
+ /*
+ * When stats a dentry, the system trigger more than once "revalidate"
+ * or "lookup", for "getattr", for "getxattr", and maybe for others.
+ * Under patchless client mode, the operation intent is not accurate,
+ * which maybe misguide the statahead thread. For example:
+ * The "revalidate" call for "getattr" and "getxattr" of a dentry maybe
+ * have the same operation intent -- "IT_GETATTR".
+ * In fact, one dentry should has only one chance to interact with the
+ * statahead thread, otherwise the statahead windows will be confused.
+ * The solution is as following:
+ * Assign "lld_sa_generation" with "sai_generation" when a dentry
+ * "IT_GETATTR" for the first time, and the subsequent "IT_GETATTR"
+ * will bypass interacting with statahead thread for checking:
+ * "lld_sa_generation == lli_sai->sai_generation"
+ */
+ if (ldd && lli->lli_sai &&
+ ldd->lld_sa_generation == lli->lli_sai->sai_generation)
+ return -EAGAIN;
+
+ return 1;
+}
+
+static inline int
+ll_statahead_enter(struct inode *dir, struct dentry **dentryp, int only_unplug)
+{
+ int ret;
+
+ ret = ll_need_statahead(dir, *dentryp);
+ if (ret <= 0)
+ return ret;
+
+ return do_statahead_enter(dir, dentryp, only_unplug);
+}
+
+/* llite ioctl register support rountine */
+enum llioc_iter {
+ LLIOC_CONT = 0,
+ LLIOC_STOP
+};
+
+#define LLIOC_MAX_CMD 256
+
+/*
+ * Rules to write a callback function:
+ *
+ * Parameters:
+ * @magic: Dynamic ioctl call routine will feed this vaule with the pointer
+ * returned to ll_iocontrol_register. Callback functions should use this
+ * data to check the potential collasion of ioctl cmd. If collasion is
+ * found, callback function should return LLIOC_CONT.
+ * @rcp: The result of ioctl command.
+ *
+ * Return values:
+ * If @magic matches the pointer returned by ll_iocontrol_data, the
+ * callback should return LLIOC_STOP; return LLIOC_STOP otherwise.
+ */
+typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
+ struct file *file, unsigned int cmd, unsigned long arg,
+ void *magic, int *rcp);
+
+enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg, int *rcp);
+
+/* export functions */
+/* Register ioctl block dynamatically for a regular file.
+ *
+ * @cmd: the array of ioctl command set
+ * @count: number of commands in the @cmd
+ * @cb: callback function, it will be called if an ioctl command is found to
+ * belong to the command list @cmd.
+ *
+ * Return vaule:
+ * A magic pointer will be returned if success;
+ * otherwise, NULL will be returned.
+ * */
+void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
+void ll_iocontrol_unregister(void *magic);
+
+
+/* lclient compat stuff */
+#define cl_inode_info ll_inode_info
+#define cl_i2info(info) ll_i2info(info)
+#define cl_inode_mode(inode) ((inode)->i_mode)
+#define cl_i2sbi ll_i2sbi
+
+static inline struct ll_file_data *cl_iattr2fd(struct inode *inode,
+ const struct iattr *attr)
+{
+ LASSERT(attr->ia_valid & ATTR_FILE);
+ return LUSTRE_FPRIVATE(attr->ia_file);
+}
+
+static inline void cl_isize_lock(struct inode *inode)
+{
+ ll_inode_size_lock(inode);
+}
+
+static inline void cl_isize_unlock(struct inode *inode)
+{
+ ll_inode_size_unlock(inode);
+}
+
+static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms)
+{
+ LASSERT(down_trylock(&ll_i2info(inode)->lli_size_sem) != 0);
+ i_size_write(inode, kms);
+}
+
+static inline void cl_isize_write(struct inode *inode, loff_t kms)
+{
+ ll_inode_size_lock(inode);
+ i_size_write(inode, kms);
+ ll_inode_size_unlock(inode);
+}
+
+#define cl_isize_read(inode) i_size_read(inode)
+
+static inline int cl_merge_lvb(const struct lu_env *env, struct inode *inode)
+{
+ return ll_merge_lvb(env, inode);
+}
+
+#define cl_inode_atime(inode) LTIME_S((inode)->i_atime)
+#define cl_inode_ctime(inode) LTIME_S((inode)->i_ctime)
+#define cl_inode_mtime(inode) LTIME_S((inode)->i_mtime)
+
+struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt);
+
+int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
+ enum cl_fsync_mode mode, int ignore_layout);
+
+/** direct write pages */
+struct ll_dio_pages {
+ /** page array to be written. we don't support
+ * partial pages except the last one. */
+ struct page **ldp_pages;
+ /* offset of each page */
+ loff_t *ldp_offsets;
+ /** if ldp_offsets is NULL, it means a sequential
+ * pages to be written, then this is the file offset
+ * of the * first page. */
+ loff_t ldp_start_offset;
+ /** how many bytes are to be written. */
+ size_t ldp_size;
+ /** # of pages in the array. */
+ int ldp_nr;
+};
+
+static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
+ int rc)
+{
+ int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
+ LPROC_LL_OSC_WRITE;
+
+ ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc);
+}
+
+extern ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
+ int rw, struct inode *inode,
+ struct ll_dio_pages *pv);
+
+static inline int ll_file_nolock(const struct file *file)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct inode *inode = file->f_dentry->d_inode;
+
+ LASSERT(fd != NULL);
+ return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
+ (ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
+}
+
+static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
+ struct lookup_intent *it, __u64 *bits)
+{
+ if (!it->d.lustre.it_lock_set) {
+ struct lustre_handle handle;
+
+ /* If this inode is a remote object, it will get two
+ * separate locks in different namespaces, Master MDT,
+ * where the name entry is, will grant LOOKUP lock,
+ * remote MDT, where the object is, will grant
+ * UPDATE|PERM lock. The inode will be attched to both
+ * LOOKUP and PERM locks, so revoking either locks will
+ * case the dcache being cleared */
+ if (it->d.lustre.it_remote_lock_mode) {
+ handle.cookie = it->d.lustre.it_remote_lock_handle;
+ CDEBUG(D_DLMTRACE, "setting l_data to inode %p"
+ "(%lu/%u) for remote lock "LPX64"\n", inode,
+ inode->i_ino, inode->i_generation,
+ handle.cookie);
+ md_set_lock_data(exp, &handle.cookie, inode, NULL);
+ }
+
+ handle.cookie = it->d.lustre.it_lock_handle;
+
+ CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)"
+ " for lock "LPX64"\n", inode, inode->i_ino,
+ inode->i_generation, handle.cookie);
+
+ md_set_lock_data(exp, &handle.cookie, inode,
+ &it->d.lustre.it_lock_bits);
+ it->d.lustre.it_lock_set = 1;
+ }
+
+ if (bits != NULL)
+ *bits = it->d.lustre.it_lock_bits;
+}
+
+static inline void ll_lock_dcache(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+}
+
+static inline void ll_unlock_dcache(struct inode *inode)
+{
+ spin_unlock(&inode->i_lock);
+}
+
+static inline int d_lustre_invalid(const struct dentry *dentry)
+{
+ struct ll_dentry_data *lld = ll_d2d(dentry);
+
+ return (lld == NULL) || lld->lld_invalid;
+}
+
+static inline void __d_lustre_invalidate(struct dentry *dentry)
+{
+ struct ll_dentry_data *lld = ll_d2d(dentry);
+
+ if (lld != NULL)
+ lld->lld_invalid = 1;
+}
+
+/*
+ * Mark dentry INVALID, if dentry refcount is zero (this is normally case for
+ * ll_md_blocking_ast), unhash this dentry, and let dcache to reclaim it later;
+ * else dput() of the last refcount will unhash this dentry and kill it.
+ */
+static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
+{
+ CDEBUG(D_DENTRY, "invalidate dentry %.*s (%p) parent %p inode %p "
+ "refc %d\n", dentry->d_name.len, dentry->d_name.name, dentry,
+ dentry->d_parent, dentry->d_inode, d_count(dentry));
+
+ spin_lock_nested(&dentry->d_lock,
+ nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
+ __d_lustre_invalidate(dentry);
+ if (d_count(dentry) == 0)
+ __d_drop(dentry);
+ spin_unlock(&dentry->d_lock);
+}
+
+static inline void d_lustre_revalidate(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ LASSERT(ll_d2d(dentry) != NULL);
+ ll_d2d(dentry)->lld_invalid = 0;
+ spin_unlock(&dentry->d_lock);
+}
+
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
+/* Compatibility for old (1.8) compiled userspace quota code */
+struct if_quotactl_18 {
+ __u32 qc_cmd;
+ __u32 qc_type;
+ __u32 qc_id;
+ __u32 qc_stat;
+ struct obd_dqinfo qc_dqinfo;
+ struct obd_dqblk qc_dqblk;
+ char obd_type[16];
+ struct obd_uuid obd_uuid;
+};
+#define LL_IOC_QUOTACTL_18 _IOWR('f', 162, struct if_quotactl_18 *)
+/* End compatibility for old (1.8) compiled userspace quota code */
+#else
+#warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
+#endif /* LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0) */
+
+enum {
+ LL_LAYOUT_GEN_NONE = ((__u32)-2), /* layout lock was cancelled */
+ LL_LAYOUT_GEN_EMPTY = ((__u32)-1) /* for empty layout */
+};
+
+int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
+int ll_layout_refresh(struct inode *inode, __u32 *gen);
+
+#endif /* LLITE_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
new file mode 100644
index 0000000..b868c2b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -0,0 +1,2374 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/llite_lib.c
+ *
+ * Lustre Light Super operations
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <lustre_lite.h>
+#include <lustre_ha.h>
+#include <lustre_dlm.h>
+#include <lprocfs_status.h>
+#include <lustre_disk.h>
+#include <lustre_param.h>
+#include <lustre_log.h>
+#include <cl_object.h>
+#include <obd_cksum.h>
+#include "llite_internal.h"
+
+struct kmem_cache *ll_file_data_slab;
+
+LIST_HEAD(ll_super_blocks);
+DEFINE_SPINLOCK(ll_sb_lock);
+
+#ifndef MS_HAS_NEW_AOPS
+extern struct address_space_operations ll_aops;
+#else
+extern struct address_space_operations_ext ll_aops;
+#endif
+
+#ifndef log2
+#define log2(n) ffz(~(n))
+#endif
+
+static struct ll_sb_info *ll_init_sbi(void)
+{
+ struct ll_sb_info *sbi = NULL;
+ unsigned long pages;
+ unsigned long lru_page_max;
+ struct sysinfo si;
+ class_uuid_t uuid;
+ int i;
+
+ OBD_ALLOC(sbi, sizeof(*sbi));
+ if (!sbi)
+ return NULL;
+
+ spin_lock_init(&sbi->ll_lock);
+ mutex_init(&sbi->ll_lco.lco_lock);
+ spin_lock_init(&sbi->ll_pp_extent_lock);
+ spin_lock_init(&sbi->ll_process_lock);
+ sbi->ll_rw_stats_on = 0;
+
+ si_meminfo(&si);
+ pages = si.totalram - si.totalhigh;
+ if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) {
+ lru_page_max = pages / 2;
+ } else {
+ lru_page_max = (pages / 4) * 3;
+ }
+
+ /* initialize lru data */
+ atomic_set(&sbi->ll_cache.ccc_users, 0);
+ sbi->ll_cache.ccc_lru_max = lru_page_max;
+ atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
+ spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
+ INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
+
+ sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
+ SBI_DEFAULT_READAHEAD_MAX);
+ sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
+ sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
+ SBI_DEFAULT_READAHEAD_WHOLE_MAX;
+ INIT_LIST_HEAD(&sbi->ll_conn_chain);
+ INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
+
+ ll_generate_random_uuid(uuid);
+ class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
+ CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
+
+ spin_lock(&ll_sb_lock);
+ list_add_tail(&sbi->ll_list, &ll_super_blocks);
+ spin_unlock(&ll_sb_lock);
+
+ sbi->ll_flags |= LL_SBI_VERBOSE;
+ sbi->ll_flags |= LL_SBI_CHECKSUM;
+
+ sbi->ll_flags |= LL_SBI_LRU_RESIZE;
+
+ for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
+ spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
+ pp_r_hist.oh_lock);
+ spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
+ pp_w_hist.oh_lock);
+ }
+
+ /* metadata statahead is enabled by default */
+ sbi->ll_sa_max = LL_SA_RPC_DEF;
+ atomic_set(&sbi->ll_sa_total, 0);
+ atomic_set(&sbi->ll_sa_wrong, 0);
+ atomic_set(&sbi->ll_agl_total, 0);
+ sbi->ll_flags |= LL_SBI_AGL_ENABLED;
+
+ return sbi;
+}
+
+void ll_free_sbi(struct super_block *sb)
+{
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+
+ if (sbi != NULL) {
+ spin_lock(&ll_sb_lock);
+ list_del(&sbi->ll_list);
+ spin_unlock(&ll_sb_lock);
+ OBD_FREE(sbi, sizeof(*sbi));
+ }
+}
+
+static struct dentry_operations ll_d_root_ops = {
+ .d_compare = ll_dcompare,
+ .d_revalidate = ll_revalidate_nd,
+};
+
+static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
+ struct vfsmount *mnt)
+{
+ struct inode *root = 0;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct obd_device *obd;
+ struct obd_capa *oc = NULL;
+ struct obd_statfs *osfs = NULL;
+ struct ptlrpc_request *request = NULL;
+ struct obd_connect_data *data = NULL;
+ struct obd_uuid *uuid;
+ struct md_op_data *op_data;
+ struct lustre_md lmd;
+ obd_valid valid;
+ int size, err, checksum;
+
+ obd = class_name2obd(md);
+ if (!obd) {
+ CERROR("MD %s: not setup or attached\n", md);
+ return -EINVAL;
+ }
+
+ OBD_ALLOC_PTR(data);
+ if (data == NULL)
+ return -ENOMEM;
+
+ OBD_ALLOC_PTR(osfs);
+ if (osfs == NULL) {
+ OBD_FREE_PTR(data);
+ return -ENOMEM;
+ }
+
+ if (proc_lustre_fs_root) {
+ err = lprocfs_register_mountpoint(proc_lustre_fs_root, sb,
+ dt, md);
+ if (err < 0)
+ CERROR("could not register mount in /proc/fs/lustre\n");
+ }
+
+ /* indicate the features supported by this client */
+ data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
+ OBD_CONNECT_ATTRFID |
+ OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
+ OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA |
+ OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
+ OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
+ OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
+ OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
+ OBD_CONNECT_EINPROGRESS |
+ OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
+ OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
+
+ if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
+ data->ocd_connect_flags |= OBD_CONNECT_SOM;
+
+ if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
+ data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
+#ifdef CONFIG_FS_POSIX_ACL
+ data->ocd_connect_flags |= OBD_CONNECT_ACL | OBD_CONNECT_UMASK;
+#endif
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
+ /* flag mdc connection as lightweight, only used for test
+ * purpose, use with care */
+ data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
+
+ data->ocd_ibits_known = MDS_INODELOCK_FULL;
+ data->ocd_version = LUSTRE_VERSION_CODE;
+
+ if (sb->s_flags & MS_RDONLY)
+ data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
+ if (sbi->ll_flags & LL_SBI_USER_XATTR)
+ data->ocd_connect_flags |= OBD_CONNECT_XATTR;
+
+#ifdef HAVE_MS_FLOCK_LOCK
+ /* force vfs to use lustre handler for flock() calls - bug 10743 */
+ sb->s_flags |= MS_FLOCK_LOCK;
+#endif
+#ifdef MS_HAS_NEW_AOPS
+ sb->s_flags |= MS_HAS_NEW_AOPS;
+#endif
+
+ if (sbi->ll_flags & LL_SBI_FLOCK)
+ sbi->ll_fop = &ll_file_operations_flock;
+ else if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
+ sbi->ll_fop = &ll_file_operations;
+ else
+ sbi->ll_fop = &ll_file_operations_noflock;
+
+ /* real client */
+ data->ocd_connect_flags |= OBD_CONNECT_REAL;
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
+
+ data->ocd_brw_size = MD_MAX_BRW_SIZE;
+
+ err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid, data, NULL);
+ if (err == -EBUSY) {
+ LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing "
+ "recovery, of which this client is not a "
+ "part. Please wait for recovery to complete,"
+ " abort, or time out.\n", md);
+ GOTO(out, err);
+ } else if (err) {
+ CERROR("cannot connect to %s: rc = %d\n", md, err);
+ GOTO(out, err);
+ }
+
+ sbi->ll_md_exp->exp_connect_data = *data;
+
+ err = obd_fid_init(sbi->ll_md_exp->exp_obd, sbi->ll_md_exp,
+ LUSTRE_SEQ_METADATA);
+ if (err) {
+ CERROR("%s: Can't init metadata layer FID infrastructure, "
+ "rc = %d\n", sbi->ll_md_exp->exp_obd->obd_name, err);
+ GOTO(out_md, err);
+ }
+
+ /* For mount, we only need fs info from MDT0, and also in DNE, it
+ * can make sure the client can be mounted as long as MDT0 is
+ * avaible */
+ err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_FOR_MDT0);
+ if (err)
+ GOTO(out_md_fid, err);
+
+ /* This needs to be after statfs to ensure connect has finished.
+ * Note that "data" does NOT contain the valid connect reply.
+ * If connecting to a 1.8 server there will be no LMV device, so
+ * we can access the MDC export directly and exp_connect_flags will
+ * be non-zero, but if accessing an upgraded 2.1 server it will
+ * have the correct flags filled in.
+ * XXX: fill in the LMV exp_connect_flags from MDC(s). */
+ valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
+ if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
+ valid != CLIENT_CONNECT_MDT_REQD) {
+ char *buf;
+
+ OBD_ALLOC_WAIT(buf, PAGE_CACHE_SIZE);
+ obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
+ valid ^ CLIENT_CONNECT_MDT_REQD, ",");
+ LCONSOLE_ERROR_MSG(0x170, "Server %s does not support "
+ "feature(s) needed for correct operation "
+ "of this client (%s). Please upgrade "
+ "server or downgrade client.\n",
+ sbi->ll_md_exp->exp_obd->obd_name, buf);
+ OBD_FREE(buf, PAGE_CACHE_SIZE);
+ GOTO(out_md_fid, err = -EPROTO);
+ }
+
+ size = sizeof(*data);
+ err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
+ KEY_CONN_DATA, &size, data, NULL);
+ if (err) {
+ CERROR("%s: Get connect data failed: rc = %d\n",
+ sbi->ll_md_exp->exp_obd->obd_name, err);
+ GOTO(out_md_fid, err);
+ }
+
+ LASSERT(osfs->os_bsize);
+ sb->s_blocksize = osfs->os_bsize;
+ sb->s_blocksize_bits = log2(osfs->os_bsize);
+ sb->s_magic = LL_SUPER_MAGIC;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sbi->ll_namelen = osfs->os_namelen;
+ sbi->ll_max_rw_chunk = LL_DEFAULT_MAX_RW_CHUNK;
+
+ if ((sbi->ll_flags & LL_SBI_USER_XATTR) &&
+ !(data->ocd_connect_flags & OBD_CONNECT_XATTR)) {
+ LCONSOLE_INFO("Disabling user_xattr feature because "
+ "it is not supported on the server\n");
+ sbi->ll_flags &= ~LL_SBI_USER_XATTR;
+ }
+
+ if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
+#ifdef MS_POSIXACL
+ sb->s_flags |= MS_POSIXACL;
+#endif
+ sbi->ll_flags |= LL_SBI_ACL;
+ } else {
+ LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
+#ifdef MS_POSIXACL
+ sb->s_flags &= ~MS_POSIXACL;
+#endif
+ sbi->ll_flags &= ~LL_SBI_ACL;
+ }
+
+ if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
+ if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
+ sbi->ll_flags |= LL_SBI_RMT_CLIENT;
+ LCONSOLE_INFO("client is set as remote by default.\n");
+ }
+ } else {
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
+ sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
+ LCONSOLE_INFO("client claims to be remote, but server "
+ "rejected, forced to be local.\n");
+ }
+ }
+
+ if (data->ocd_connect_flags & OBD_CONNECT_MDS_CAPA) {
+ LCONSOLE_INFO("client enabled MDS capability!\n");
+ sbi->ll_flags |= LL_SBI_MDS_CAPA;
+ }
+
+ if (data->ocd_connect_flags & OBD_CONNECT_OSS_CAPA) {
+ LCONSOLE_INFO("client enabled OSS capability!\n");
+ sbi->ll_flags |= LL_SBI_OSS_CAPA;
+ }
+
+ if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
+ sbi->ll_flags |= LL_SBI_64BIT_HASH;
+
+ if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
+ sbi->ll_md_brw_size = data->ocd_brw_size;
+ else
+ sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
+
+ if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
+ LCONSOLE_INFO("Layout lock feature supported.\n");
+ sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
+ }
+
+ obd = class_name2obd(dt);
+ if (!obd) {
+ CERROR("DT %s: not setup or attached\n", dt);
+ GOTO(out_md_fid, err = -ENODEV);
+ }
+
+ data->ocd_connect_flags = OBD_CONNECT_GRANT | OBD_CONNECT_VERSION |
+ OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
+ OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
+ OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
+ OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
+ OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
+ OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
+ OBD_CONNECT_MAXBYTES |
+ OBD_CONNECT_EINPROGRESS |
+ OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
+ OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
+
+ if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
+ data->ocd_connect_flags |= OBD_CONNECT_SOM;
+
+ if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
+ /* OBD_CONNECT_CKSUM should always be set, even if checksums are
+ * disabled by default, because it can still be enabled on the
+ * fly via /proc. As a consequence, we still need to come to an
+ * agreement on the supported algorithms at connect time */
+ data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
+ data->ocd_cksum_types = OBD_CKSUM_ADLER;
+ else
+ data->ocd_cksum_types = cksum_types_supported_client();
+ }
+
+ data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
+
+ CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d "
+ "ocd_grant: %d\n", data->ocd_connect_flags,
+ data->ocd_version, data->ocd_grant);
+
+ obd->obd_upcall.onu_owner = &sbi->ll_lco;
+ obd->obd_upcall.onu_upcall = cl_ocd_update;
+
+ data->ocd_brw_size = DT_MAX_BRW_SIZE;
+
+ err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
+ NULL);
+ if (err == -EBUSY) {
+ LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing "
+ "recovery, of which this client is not a "
+ "part. Please wait for recovery to "
+ "complete, abort, or time out.\n", dt);
+ GOTO(out_md, err);
+ } else if (err) {
+ CERROR("%s: Cannot connect to %s: rc = %d\n",
+ sbi->ll_dt_exp->exp_obd->obd_name, dt, err);
+ GOTO(out_md, err);
+ }
+
+ sbi->ll_dt_exp->exp_connect_data = *data;
+
+ err = obd_fid_init(sbi->ll_dt_exp->exp_obd, sbi->ll_dt_exp,
+ LUSTRE_SEQ_METADATA);
+ if (err) {
+ CERROR("%s: Can't init data layer FID infrastructure, "
+ "rc = %d\n", sbi->ll_dt_exp->exp_obd->obd_name, err);
+ GOTO(out_dt, err);
+ }
+
+ mutex_lock(&sbi->ll_lco.lco_lock);
+ sbi->ll_lco.lco_flags = data->ocd_connect_flags;
+ sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
+ sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
+ mutex_unlock(&sbi->ll_lco.lco_lock);
+
+ fid_zero(&sbi->ll_root_fid);
+ err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
+ if (err) {
+ CERROR("cannot mds_connect: rc = %d\n", err);
+ GOTO(out_lock_cn_cb, err);
+ }
+ if (!fid_is_sane(&sbi->ll_root_fid)) {
+ CERROR("%s: Invalid root fid "DFID" during mount\n",
+ sbi->ll_md_exp->exp_obd->obd_name,
+ PFID(&sbi->ll_root_fid));
+ GOTO(out_lock_cn_cb, err = -EINVAL);
+ }
+ CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
+
+ sb->s_op = &lustre_super_operations;
+#if THREAD_SIZE >= 8192 /*b=17630*/
+ sb->s_export_op = &lustre_export_operations;
+#endif
+
+ /* make root inode
+ * XXX: move this to after cbd setup? */
+ valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMDSCAPA;
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ valid |= OBD_MD_FLRMTPERM;
+ else if (sbi->ll_flags & LL_SBI_ACL)
+ valid |= OBD_MD_FLACL;
+
+ OBD_ALLOC_PTR(op_data);
+ if (op_data == NULL)
+ GOTO(out_lock_cn_cb, err = -ENOMEM);
+
+ op_data->op_fid1 = sbi->ll_root_fid;
+ op_data->op_mode = 0;
+ op_data->op_capa1 = oc;
+ op_data->op_valid = valid;
+
+ err = md_getattr(sbi->ll_md_exp, op_data, &request);
+ if (oc)
+ capa_put(oc);
+ OBD_FREE_PTR(op_data);
+ if (err) {
+ CERROR("%s: md_getattr failed for root: rc = %d\n",
+ sbi->ll_md_exp->exp_obd->obd_name, err);
+ GOTO(out_lock_cn_cb, err);
+ }
+
+ err = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
+ sbi->ll_md_exp, &lmd);
+ if (err) {
+ CERROR("failed to understand root inode md: rc = %d\n", err);
+ ptlrpc_req_finished(request);
+ GOTO(out_lock_cn_cb, err);
+ }
+
+ LASSERT(fid_is_sane(&sbi->ll_root_fid));
+ root = ll_iget(sb, cl_fid_build_ino(&sbi->ll_root_fid,
+ sbi->ll_flags & LL_SBI_32BIT_API),
+ &lmd);
+ md_free_lustre_md(sbi->ll_md_exp, &lmd);
+ ptlrpc_req_finished(request);
+
+ if (root == NULL || IS_ERR(root)) {
+ if (lmd.lsm)
+ obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
+#ifdef CONFIG_FS_POSIX_ACL
+ if (lmd.posix_acl) {
+ posix_acl_release(lmd.posix_acl);
+ lmd.posix_acl = NULL;
+ }
+#endif
+ err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
+ root = NULL;
+ CERROR("lustre_lite: bad iget4 for root\n");
+ GOTO(out_root, err);
+ }
+
+ err = ll_close_thread_start(&sbi->ll_lcq);
+ if (err) {
+ CERROR("cannot start close thread: rc %d\n", err);
+ GOTO(out_root, err);
+ }
+
+#ifdef CONFIG_FS_POSIX_ACL
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
+ rct_init(&sbi->ll_rct);
+ et_init(&sbi->ll_et);
+ }
+#endif
+
+ checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
+ err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
+ KEY_CHECKSUM, sizeof(checksum), &checksum,
+ NULL);
+ cl_sb_init(sb);
+
+ err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
+ KEY_CACHE_SET, sizeof(sbi->ll_cache),
+ &sbi->ll_cache, NULL);
+
+ sb->s_root = d_make_root(root);
+ if (sb->s_root == NULL) {
+ CERROR("%s: can't make root dentry\n",
+ ll_get_fsname(sb, NULL, 0));
+ GOTO(out_root, err = -ENOMEM);
+ }
+
+ /* kernel >= 2.6.38 store dentry operations in sb->s_d_op. */
+ d_set_d_op(sb->s_root, &ll_d_root_ops);
+ sb->s_d_op = &ll_d_ops;
+
+ sbi->ll_sdev_orig = sb->s_dev;
+
+ /* We set sb->s_dev equal on all lustre clients in order to support
+ * NFS export clustering. NFSD requires that the FSID be the same
+ * on all clients. */
+ /* s_dev is also used in lt_compare() to compare two fs, but that is
+ * only a node-local comparison. */
+ uuid = obd_get_uuid(sbi->ll_md_exp);
+ if (uuid != NULL) {
+ sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
+ get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
+ }
+
+ if (data != NULL)
+ OBD_FREE_PTR(data);
+ if (osfs != NULL)
+ OBD_FREE_PTR(osfs);
+
+ return err;
+out_root:
+ if (root)
+ iput(root);
+out_lock_cn_cb:
+ obd_fid_fini(sbi->ll_dt_exp->exp_obd);
+out_dt:
+ obd_disconnect(sbi->ll_dt_exp);
+ sbi->ll_dt_exp = NULL;
+ /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
+ obd_zombie_barrier();
+out_md_fid:
+ obd_fid_fini(sbi->ll_md_exp->exp_obd);
+out_md:
+ obd_disconnect(sbi->ll_md_exp);
+ sbi->ll_md_exp = NULL;
+out:
+ if (data != NULL)
+ OBD_FREE_PTR(data);
+ if (osfs != NULL)
+ OBD_FREE_PTR(osfs);
+ lprocfs_unregister_mountpoint(sbi);
+ return err;
+}
+
+int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
+{
+ int size, rc;
+
+ *lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
+ size = sizeof(int);
+ rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
+ KEY_MAX_EASIZE, &size, lmmsize, NULL);
+ if (rc)
+ CERROR("Get max mdsize error rc %d \n", rc);
+
+ return rc;
+}
+
+void ll_dump_inode(struct inode *inode)
+{
+ struct ll_d_hlist_node *tmp;
+ int dentry_count = 0;
+
+ LASSERT(inode != NULL);
+
+ ll_d_hlist_for_each(tmp, &inode->i_dentry)
+ dentry_count++;
+
+ CERROR("inode %p dump: dev=%s ino=%lu mode=%o count=%u, %d dentries\n",
+ inode, ll_i2mdexp(inode)->exp_obd->obd_name, inode->i_ino,
+ inode->i_mode, atomic_read(&inode->i_count), dentry_count);
+}
+
+void lustre_dump_dentry(struct dentry *dentry, int recur)
+{
+ struct list_head *tmp;
+ int subdirs = 0;
+
+ LASSERT(dentry != NULL);
+
+ list_for_each(tmp, &dentry->d_subdirs)
+ subdirs++;
+
+ CERROR("dentry %p dump: name=%.*s parent=%.*s (%p), inode=%p, count=%u,"
+ " flags=0x%x, fsdata=%p, %d subdirs\n", dentry,
+ dentry->d_name.len, dentry->d_name.name,
+ dentry->d_parent->d_name.len, dentry->d_parent->d_name.name,
+ dentry->d_parent, dentry->d_inode, d_count(dentry),
+ dentry->d_flags, dentry->d_fsdata, subdirs);
+ if (dentry->d_inode != NULL)
+ ll_dump_inode(dentry->d_inode);
+
+ if (recur == 0)
+ return;
+
+ list_for_each(tmp, &dentry->d_subdirs) {
+ struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child);
+ lustre_dump_dentry(d, recur - 1);
+ }
+}
+
+void client_common_put_super(struct super_block *sb)
+{
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+
+#ifdef CONFIG_FS_POSIX_ACL
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
+ et_fini(&sbi->ll_et);
+ rct_fini(&sbi->ll_rct);
+ }
+#endif
+
+ ll_close_thread_shutdown(sbi->ll_lcq);
+
+ cl_sb_fini(sb);
+
+ list_del(&sbi->ll_conn_chain);
+
+ obd_fid_fini(sbi->ll_dt_exp->exp_obd);
+ obd_disconnect(sbi->ll_dt_exp);
+ sbi->ll_dt_exp = NULL;
+ /* wait till all OSCs are gone, since cl_cache is accessing sbi.
+ * see LU-2543. */
+ obd_zombie_barrier();
+
+ lprocfs_unregister_mountpoint(sbi);
+
+ obd_fid_fini(sbi->ll_md_exp->exp_obd);
+ obd_disconnect(sbi->ll_md_exp);
+ sbi->ll_md_exp = NULL;
+}
+
+void ll_kill_super(struct super_block *sb)
+{
+ struct ll_sb_info *sbi;
+
+ /* not init sb ?*/
+ if (!(sb->s_flags & MS_ACTIVE))
+ return;
+
+ sbi = ll_s2sbi(sb);
+ /* we need restore s_dev from changed for clustred NFS before put_super
+ * because new kernels have cached s_dev and change sb->s_dev in
+ * put_super not affected real removing devices */
+ if (sbi) {
+ sb->s_dev = sbi->ll_sdev_orig;
+ sbi->ll_umounting = 1;
+ }
+}
+
+char *ll_read_opt(const char *opt, char *data)
+{
+ char *value;
+ char *retval;
+
+ CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
+ if (strncmp(opt, data, strlen(opt)))
+ return NULL;
+ if ((value = strchr(data, '=')) == NULL)
+ return NULL;
+
+ value++;
+ OBD_ALLOC(retval, strlen(value) + 1);
+ if (!retval) {
+ CERROR("out of memory!\n");
+ return NULL;
+ }
+
+ memcpy(retval, value, strlen(value)+1);
+ CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
+ return retval;
+}
+
+static inline int ll_set_opt(const char *opt, char *data, int fl)
+{
+ if (strncmp(opt, data, strlen(opt)) != 0)
+ return(0);
+ else
+ return(fl);
+}
+
+/* non-client-specific mount options are parsed in lmd_parse */
+static int ll_options(char *options, int *flags)
+{
+ int tmp;
+ char *s1 = options, *s2;
+
+ if (!options)
+ return 0;
+
+ CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
+
+ while (*s1) {
+ CDEBUG(D_SUPER, "next opt=%s\n", s1);
+ tmp = ll_set_opt("nolock", s1, LL_SBI_NOLCK);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("flock", s1, LL_SBI_FLOCK);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("localflock", s1, LL_SBI_LOCALFLOCK);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("noflock", s1, LL_SBI_FLOCK|LL_SBI_LOCALFLOCK);
+ if (tmp) {
+ *flags &= ~tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("user_xattr", s1, LL_SBI_USER_XATTR);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("nouser_xattr", s1, LL_SBI_USER_XATTR);
+ if (tmp) {
+ *flags &= ~tmp;
+ goto next;
+ }
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 5, 50, 0)
+ tmp = ll_set_opt("acl", s1, LL_SBI_ACL);
+ if (tmp) {
+ /* Ignore deprecated mount option. The client will
+ * always try to mount with ACL support, whether this
+ * is used depends on whether server supports it. */
+ LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
+ "mount option 'acl'.\n");
+ goto next;
+ }
+ tmp = ll_set_opt("noacl", s1, LL_SBI_ACL);
+ if (tmp) {
+ LCONSOLE_ERROR_MSG(0x152, "Ignoring deprecated "
+ "mount option 'noacl'.\n");
+ goto next;
+ }
+#else
+#warning "{no}acl options have been deprecated since 1.8, please remove them"
+#endif
+ tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("nouser_fid2path", s1, LL_SBI_USER_FID2PATH);
+ if (tmp) {
+ *flags &= ~tmp;
+ goto next;
+ }
+
+ tmp = ll_set_opt("checksum", s1, LL_SBI_CHECKSUM);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("nochecksum", s1, LL_SBI_CHECKSUM);
+ if (tmp) {
+ *flags &= ~tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("lruresize", s1, LL_SBI_LRU_RESIZE);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("nolruresize", s1, LL_SBI_LRU_RESIZE);
+ if (tmp) {
+ *flags &= ~tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("lazystatfs", s1, LL_SBI_LAZYSTATFS);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("nolazystatfs", s1, LL_SBI_LAZYSTATFS);
+ if (tmp) {
+ *flags &= ~tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("verbose", s1, LL_SBI_VERBOSE);
+ if (tmp) {
+ *flags |= tmp;
+ goto next;
+ }
+ tmp = ll_set_opt("noverbose", s1, LL_SBI_VERBOSE);
+ if (tmp) {
+ *flags &= ~tmp;
+ goto next;
+ }
+ LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
+ s1);
+ return -EINVAL;
+
+next:
+ /* Find next opt */
+ s2 = strchr(s1, ',');
+ if (s2 == NULL)
+ break;
+ s1 = s2 + 1;
+ }
+ return 0;
+}
+
+void ll_lli_init(struct ll_inode_info *lli)
+{
+ lli->lli_inode_magic = LLI_INODE_MAGIC;
+ lli->lli_flags = 0;
+ lli->lli_ioepoch = 0;
+ lli->lli_maxbytes = MAX_LFS_FILESIZE;
+ spin_lock_init(&lli->lli_lock);
+ lli->lli_posix_acl = NULL;
+ lli->lli_remote_perms = NULL;
+ mutex_init(&lli->lli_rmtperm_mutex);
+ /* Do not set lli_fid, it has been initialized already. */
+ fid_zero(&lli->lli_pfid);
+ INIT_LIST_HEAD(&lli->lli_close_list);
+ INIT_LIST_HEAD(&lli->lli_oss_capas);
+ atomic_set(&lli->lli_open_count, 0);
+ lli->lli_mds_capa = NULL;
+ lli->lli_rmtperm_time = 0;
+ lli->lli_pending_och = NULL;
+ lli->lli_mds_read_och = NULL;
+ lli->lli_mds_write_och = NULL;
+ lli->lli_mds_exec_och = NULL;
+ lli->lli_open_fd_read_count = 0;
+ lli->lli_open_fd_write_count = 0;
+ lli->lli_open_fd_exec_count = 0;
+ mutex_init(&lli->lli_och_mutex);
+ spin_lock_init(&lli->lli_agl_lock);
+ lli->lli_has_smd = false;
+ lli->lli_layout_gen = LL_LAYOUT_GEN_NONE;
+ lli->lli_clob = NULL;
+
+ LASSERT(lli->lli_vfs_inode.i_mode != 0);
+ if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
+ mutex_init(&lli->lli_readdir_mutex);
+ lli->lli_opendir_key = NULL;
+ lli->lli_sai = NULL;
+ lli->lli_def_acl = NULL;
+ spin_lock_init(&lli->lli_sa_lock);
+ lli->lli_opendir_pid = 0;
+ } else {
+ sema_init(&lli->lli_size_sem, 1);
+ lli->lli_size_sem_owner = NULL;
+ lli->lli_symlink_name = NULL;
+ init_rwsem(&lli->lli_trunc_sem);
+ mutex_init(&lli->lli_write_mutex);
+ init_rwsem(&lli->lli_glimpse_sem);
+ lli->lli_glimpse_time = 0;
+ INIT_LIST_HEAD(&lli->lli_agl_list);
+ lli->lli_agl_index = 0;
+ lli->lli_async_rc = 0;
+ lli->lli_volatile = false;
+ }
+ mutex_init(&lli->lli_layout_mutex);
+}
+
+static inline int ll_bdi_register(struct backing_dev_info *bdi)
+{
+ static atomic_t ll_bdi_num = ATOMIC_INIT(0);
+
+ bdi->name = "lustre";
+ return bdi_register(bdi, NULL, "lustre-%d",
+ atomic_inc_return(&ll_bdi_num));
+}
+
+int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
+{
+ struct lustre_profile *lprof = NULL;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct ll_sb_info *sbi;
+ char *dt = NULL, *md = NULL;
+ char *profilenm = get_profile_name(sb);
+ struct config_llog_instance *cfg;
+ /* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
+ const int instlen = sizeof(cfg->cfg_instance) * 2 + 2;
+ int err;
+
+ CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
+
+ OBD_ALLOC_PTR(cfg);
+ if (cfg == NULL)
+ return -ENOMEM;
+
+ try_module_get(THIS_MODULE);
+
+ /* client additional sb info */
+ lsi->lsi_llsbi = sbi = ll_init_sbi();
+ if (!sbi) {
+ module_put(THIS_MODULE);
+ OBD_FREE_PTR(cfg);
+ return -ENOMEM;
+ }
+
+ err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
+ if (err)
+ GOTO(out_free, err);
+
+ err = bdi_init(&lsi->lsi_bdi);
+ if (err)
+ GOTO(out_free, err);
+ lsi->lsi_flags |= LSI_BDI_INITIALIZED;
+ lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY;
+ err = ll_bdi_register(&lsi->lsi_bdi);
+ if (err)
+ GOTO(out_free, err);
+
+ sb->s_bdi = &lsi->lsi_bdi;
+
+ /* Generate a string unique to this super, in case some joker tries
+ to mount the same fs at two mount points.
+ Use the address of the super itself.*/
+ cfg->cfg_instance = sb;
+ cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
+ cfg->cfg_callback = class_config_llog_handler;
+ /* set up client obds */
+ err = lustre_process_log(sb, profilenm, cfg);
+ if (err < 0) {
+ CERROR("Unable to process log: %d\n", err);
+ GOTO(out_free, err);
+ }
+
+ /* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
+ lprof = class_get_profile(profilenm);
+ if (lprof == NULL) {
+ LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be"
+ " read from the MGS. Does that filesystem "
+ "exist?\n", profilenm);
+ GOTO(out_free, err = -EINVAL);
+ }
+ CDEBUG(D_CONFIG, "Found profile %s: mdc=%s osc=%s\n", profilenm,
+ lprof->lp_md, lprof->lp_dt);
+
+ OBD_ALLOC(dt, strlen(lprof->lp_dt) + instlen + 2);
+ if (!dt)
+ GOTO(out_free, err = -ENOMEM);
+ sprintf(dt, "%s-%p", lprof->lp_dt, cfg->cfg_instance);
+
+ OBD_ALLOC(md, strlen(lprof->lp_md) + instlen + 2);
+ if (!md)
+ GOTO(out_free, err = -ENOMEM);
+ sprintf(md, "%s-%p", lprof->lp_md, cfg->cfg_instance);
+
+ /* connections, registrations, sb setup */
+ err = client_common_fill_super(sb, md, dt, mnt);
+
+out_free:
+ if (md)
+ OBD_FREE(md, strlen(lprof->lp_md) + instlen + 2);
+ if (dt)
+ OBD_FREE(dt, strlen(lprof->lp_dt) + instlen + 2);
+ if (err)
+ ll_put_super(sb);
+ else if (sbi->ll_flags & LL_SBI_VERBOSE)
+ LCONSOLE_WARN("Mounted %s\n", profilenm);
+
+ OBD_FREE_PTR(cfg);
+ return err;
+} /* ll_fill_super */
+
+void ll_put_super(struct super_block *sb)
+{
+ struct config_llog_instance cfg;
+ struct obd_device *obd;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ char *profilenm = get_profile_name(sb);
+ int next, force = 1;
+
+ CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
+
+ ll_print_capa_stat(sbi);
+
+ cfg.cfg_instance = sb;
+ lustre_end_log(sb, profilenm, &cfg);
+
+ if (sbi->ll_md_exp) {
+ obd = class_exp2obd(sbi->ll_md_exp);
+ if (obd)
+ force = obd->obd_force;
+ }
+
+ /* We need to set force before the lov_disconnect in
+ lustre_common_put_super, since l_d cleans up osc's as well. */
+ if (force) {
+ next = 0;
+ while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
+ &next)) != NULL) {
+ obd->obd_force = force;
+ }
+ }
+
+ if (sbi->ll_lcq) {
+ /* Only if client_common_fill_super succeeded */
+ client_common_put_super(sb);
+ }
+
+ next = 0;
+ while ((obd = class_devices_in_group(&sbi->ll_sb_uuid, &next)) !=NULL) {
+ class_manual_cleanup(obd);
+ }
+
+ if (sbi->ll_flags & LL_SBI_VERBOSE)
+ LCONSOLE_WARN("Unmounted %s\n", profilenm ? profilenm : "");
+
+ if (profilenm)
+ class_del_profile(profilenm);
+
+ if (lsi->lsi_flags & LSI_BDI_INITIALIZED) {
+ bdi_destroy(&lsi->lsi_bdi);
+ lsi->lsi_flags &= ~LSI_BDI_INITIALIZED;
+ }
+
+ ll_free_sbi(sb);
+ lsi->lsi_llsbi = NULL;
+
+ lustre_common_put_super(sb);
+
+ module_put(THIS_MODULE);
+} /* client_put_super */
+
+struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
+{
+ struct inode *inode = NULL;
+
+ /* NOTE: we depend on atomic igrab() -bzzz */
+ lock_res_and_lock(lock);
+ if (lock->l_resource->lr_lvb_inode) {
+ struct ll_inode_info * lli;
+ lli = ll_i2info(lock->l_resource->lr_lvb_inode);
+ if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
+ inode = igrab(lock->l_resource->lr_lvb_inode);
+ } else {
+ inode = lock->l_resource->lr_lvb_inode;
+ LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
+ D_WARNING, lock, "lr_lvb_inode %p is "
+ "bogus: magic %08x",
+ lock->l_resource->lr_lvb_inode,
+ lli->lli_inode_magic);
+ inode = NULL;
+ }
+ }
+ unlock_res_and_lock(lock);
+ return inode;
+}
+
+struct inode *ll_inode_from_lock(struct ldlm_lock *lock)
+{
+ struct inode *inode = NULL;
+ /* NOTE: we depend on atomic igrab() -bzzz */
+ lock_res_and_lock(lock);
+ if (lock->l_ast_data) {
+ struct ll_inode_info *lli = ll_i2info(lock->l_ast_data);
+ if (lli->lli_inode_magic == LLI_INODE_MAGIC) {
+ inode = igrab(lock->l_ast_data);
+ } else {
+ inode = lock->l_ast_data;
+ LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
+ D_WARNING, lock, "l_ast_data %p is "
+ "bogus: magic %08x", lock->l_ast_data,
+ lli->lli_inode_magic);
+ inode = NULL;
+ }
+ }
+ unlock_res_and_lock(lock);
+ return inode;
+}
+
+void ll_clear_inode(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
+ inode->i_generation, inode);
+
+ if (S_ISDIR(inode->i_mode)) {
+ /* these should have been cleared in ll_file_release */
+ LASSERT(lli->lli_opendir_key == NULL);
+ LASSERT(lli->lli_sai == NULL);
+ LASSERT(lli->lli_opendir_pid == 0);
+ }
+
+ spin_lock(&lli->lli_lock);
+ ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
+ spin_unlock(&lli->lli_lock);
+ md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
+
+ LASSERT(!lli->lli_open_fd_write_count);
+ LASSERT(!lli->lli_open_fd_read_count);
+ LASSERT(!lli->lli_open_fd_exec_count);
+
+ if (lli->lli_mds_write_och)
+ ll_md_real_close(inode, FMODE_WRITE);
+ if (lli->lli_mds_exec_och)
+ ll_md_real_close(inode, FMODE_EXEC);
+ if (lli->lli_mds_read_och)
+ ll_md_real_close(inode, FMODE_READ);
+
+ if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
+ OBD_FREE(lli->lli_symlink_name,
+ strlen(lli->lli_symlink_name) + 1);
+ lli->lli_symlink_name = NULL;
+ }
+
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
+ LASSERT(lli->lli_posix_acl == NULL);
+ if (lli->lli_remote_perms) {
+ free_rmtperm_hash(lli->lli_remote_perms);
+ lli->lli_remote_perms = NULL;
+ }
+ }
+#ifdef CONFIG_FS_POSIX_ACL
+ else if (lli->lli_posix_acl) {
+ LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
+ LASSERT(lli->lli_remote_perms == NULL);
+ posix_acl_release(lli->lli_posix_acl);
+ lli->lli_posix_acl = NULL;
+ }
+#endif
+ lli->lli_inode_magic = LLI_INODE_DEAD;
+
+ ll_clear_inode_capas(inode);
+ if (!S_ISDIR(inode->i_mode))
+ LASSERT(list_empty(&lli->lli_agl_list));
+
+ /*
+ * XXX This has to be done before lsm is freed below, because
+ * cl_object still uses inode lsm.
+ */
+ cl_inode_fini(inode);
+ lli->lli_has_smd = false;
+}
+
+int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
+ struct md_open_data **mod)
+{
+ struct lustre_md md;
+ struct inode *inode = dentry->d_inode;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *request = NULL;
+ int rc, ia_valid;
+
+ op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
+ &request, mod);
+ if (rc) {
+ ptlrpc_req_finished(request);
+ if (rc == -ENOENT) {
+ clear_nlink(inode);
+ /* Unlinked special device node? Or just a race?
+ * Pretend we done everything. */
+ if (!S_ISREG(inode->i_mode) &&
+ !S_ISDIR(inode->i_mode)) {
+ ia_valid = op_data->op_attr.ia_valid;
+ op_data->op_attr.ia_valid &= ~TIMES_SET_FLAGS;
+ rc = simple_setattr(dentry, &op_data->op_attr);
+ op_data->op_attr.ia_valid = ia_valid;
+ }
+ } else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
+ CERROR("md_setattr fails: rc = %d\n", rc);
+ }
+ return rc;
+ }
+
+ rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
+ sbi->ll_md_exp, &md);
+ if (rc) {
+ ptlrpc_req_finished(request);
+ return rc;
+ }
+
+ ia_valid = op_data->op_attr.ia_valid;
+ /* inode size will be in ll_setattr_ost, can't do it now since dirty
+ * cache is not cleared yet. */
+ op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
+ rc = simple_setattr(dentry, &op_data->op_attr);
+ op_data->op_attr.ia_valid = ia_valid;
+
+ /* Extract epoch data if obtained. */
+ op_data->op_handle = md.body->handle;
+ op_data->op_ioepoch = md.body->ioepoch;
+
+ ll_update_inode(inode, &md);
+ ptlrpc_req_finished(request);
+
+ return rc;
+}
+
+/* Close IO epoch and send Size-on-MDS attribute update. */
+static int ll_setattr_done_writing(struct inode *inode,
+ struct md_op_data *op_data,
+ struct md_open_data *mod)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc = 0;
+
+ LASSERT(op_data != NULL);
+ if (!S_ISREG(inode->i_mode))
+ return 0;
+
+ CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
+ op_data->op_ioepoch, PFID(&lli->lli_fid));
+
+ op_data->op_flags = MF_EPOCH_CLOSE;
+ ll_done_writing_attr(inode, op_data);
+ ll_pack_inode2opdata(inode, op_data, NULL);
+
+ rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
+ if (rc == -EAGAIN) {
+ /* MDS has instructed us to obtain Size-on-MDS attribute
+ * from OSTs and send setattr to back to MDS. */
+ rc = ll_som_update(inode, op_data);
+ } else if (rc) {
+ CERROR("inode %lu mdc truncate failed: rc = %d\n",
+ inode->i_ino, rc);
+ }
+ return rc;
+}
+
+static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
+{
+ struct obd_capa *capa;
+ int rc;
+
+ if (attr->ia_valid & ATTR_SIZE)
+ capa = ll_osscapa_get(inode, CAPA_OPC_OSS_TRUNC);
+ else
+ capa = ll_mdscapa_get(inode);
+
+ rc = cl_setattr_ost(inode, attr, capa);
+
+ if (attr->ia_valid & ATTR_SIZE)
+ ll_truncate_free_capa(capa);
+ else
+ capa_put(capa);
+
+ return rc;
+}
+
+
+/* If this inode has objects allocated to it (lsm != NULL), then the OST
+ * object(s) determine the file size and mtime. Otherwise, the MDS will
+ * keep these values until such a time that objects are allocated for it.
+ * We do the MDS operations first, as it is checking permissions for us.
+ * We don't to the MDS RPC if there is nothing that we want to store there,
+ * otherwise there is no harm in updating mtime/atime on the MDS if we are
+ * going to do an RPC anyways.
+ *
+ * If we are doing a truncate, we will send the mtime and ctime updates
+ * to the OST with the punch RPC, otherwise we do an explicit setattr RPC.
+ * I don't believe it is possible to get e.g. ATTR_MTIME_SET and ATTR_SIZE
+ * at the same time.
+ */
+int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct md_op_data *op_data = NULL;
+ struct md_open_data *mod = NULL;
+ int rc = 0, rc1 = 0;
+
+ CDEBUG(D_VFSTRACE, "%s: setattr inode %p/fid:"DFID" from %llu to %llu, "
+ "valid %x\n", ll_get_fsname(inode->i_sb, NULL, 0), inode,
+ PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
+ attr->ia_valid);
+
+ if (attr->ia_valid & ATTR_SIZE) {
+ /* Check new size against VFS/VM file size limit and rlimit */
+ rc = inode_newsize_ok(inode, attr->ia_size);
+ if (rc)
+ return rc;
+
+ /* The maximum Lustre file size is variable, based on the
+ * OST maximum object size and number of stripes. This
+ * needs another check in addition to the VFS check above. */
+ if (attr->ia_size > ll_file_maxbytes(inode)) {
+ CDEBUG(D_INODE,"file "DFID" too large %llu > "LPU64"\n",
+ PFID(&lli->lli_fid), attr->ia_size,
+ ll_file_maxbytes(inode));
+ return -EFBIG;
+ }
+
+ attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
+ }
+
+ /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
+ if (attr->ia_valid & TIMES_SET_FLAGS) {
+ if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
+ !cfs_capable(CFS_CAP_FOWNER))
+ return -EPERM;
+ }
+
+ /* We mark all of the fields "set" so MDS/OST does not re-set them */
+ if (attr->ia_valid & ATTR_CTIME) {
+ attr->ia_ctime = CURRENT_TIME;
+ attr->ia_valid |= ATTR_CTIME_SET;
+ }
+ if (!(attr->ia_valid & ATTR_ATIME_SET) &&
+ (attr->ia_valid & ATTR_ATIME)) {
+ attr->ia_atime = CURRENT_TIME;
+ attr->ia_valid |= ATTR_ATIME_SET;
+ }
+ if (!(attr->ia_valid & ATTR_MTIME_SET) &&
+ (attr->ia_valid & ATTR_MTIME)) {
+ attr->ia_mtime = CURRENT_TIME;
+ attr->ia_valid |= ATTR_MTIME_SET;
+ }
+
+ if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
+ CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
+ LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
+ cfs_time_current_sec());
+
+ /* If we are changing file size, file content is modified, flag it. */
+ if (attr->ia_valid & ATTR_SIZE) {
+ attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
+
+ /* We always do an MDS RPC, even if we're only changing the size;
+ * only the MDS knows whether truncate() should fail with -ETXTBUSY */
+
+ OBD_ALLOC_PTR(op_data);
+ if (op_data == NULL)
+ return -ENOMEM;
+
+ if (!S_ISDIR(inode->i_mode)) {
+ if (attr->ia_valid & ATTR_SIZE)
+ inode_dio_write_done(inode);
+ mutex_unlock(&inode->i_mutex);
+ down_write(&lli->lli_trunc_sem);
+ }
+
+ memcpy(&op_data->op_attr, attr, sizeof(*attr));
+
+ /* Open epoch for truncate. */
+ if (exp_connect_som(ll_i2mdexp(inode)) &&
+ (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
+ op_data->op_flags = MF_EPOCH_OPEN;
+
+ rc = ll_md_setattr(dentry, op_data, &mod);
+ if (rc)
+ GOTO(out, rc);
+
+ /* RPC to MDT is sent, cancel data modification flag */
+ if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags &= ~LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
+
+ ll_ioepoch_open(lli, op_data->op_ioepoch);
+ if (!S_ISREG(inode->i_mode))
+ GOTO(out, rc = 0);
+
+ if (attr->ia_valid & (ATTR_SIZE |
+ ATTR_ATIME | ATTR_ATIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET))
+ /* For truncate and utimes sending attributes to OSTs, setting
+ * mtime/atime to the past will be performed under PW [0:EOF]
+ * extent lock (new_size:EOF for truncate). It may seem
+ * excessive to send mtime/atime updates to OSTs when not
+ * setting times to past, but it is necessary due to possible
+ * time de-synchronization between MDT inode and OST objects */
+ rc = ll_setattr_ost(inode, attr);
+out:
+ if (op_data) {
+ if (op_data->op_ioepoch) {
+ rc1 = ll_setattr_done_writing(inode, op_data, mod);
+ if (!rc)
+ rc = rc1;
+ }
+ ll_finish_md_op_data(op_data);
+ }
+ if (!S_ISDIR(inode->i_mode)) {
+ up_write(&lli->lli_trunc_sem);
+ mutex_lock(&inode->i_mutex);
+ if (attr->ia_valid & ATTR_SIZE)
+ inode_dio_wait(inode);
+ }
+
+ ll_stats_ops_tally(ll_i2sbi(inode), (attr->ia_valid & ATTR_SIZE) ?
+ LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
+
+ return rc;
+}
+
+int ll_setattr(struct dentry *de, struct iattr *attr)
+{
+ int mode = de->d_inode->i_mode;
+
+ if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
+ (ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
+ attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
+
+ if (((attr->ia_valid & (ATTR_MODE|ATTR_FORCE|ATTR_SIZE)) ==
+ (ATTR_SIZE|ATTR_MODE)) &&
+ (((mode & S_ISUID) && !(attr->ia_mode & S_ISUID)) ||
+ (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
+ !(attr->ia_mode & S_ISGID))))
+ attr->ia_valid |= ATTR_FORCE;
+
+ if ((mode & S_ISUID) &&
+ !(attr->ia_mode & S_ISUID) &&
+ !(attr->ia_valid & ATTR_KILL_SUID))
+ attr->ia_valid |= ATTR_KILL_SUID;
+
+ if (((mode & (S_ISGID|S_IXGRP)) == (S_ISGID|S_IXGRP)) &&
+ !(attr->ia_mode & S_ISGID) &&
+ !(attr->ia_valid & ATTR_KILL_SGID))
+ attr->ia_valid |= ATTR_KILL_SGID;
+
+ return ll_setattr_raw(de, attr);
+}
+
+int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
+ __u64 max_age, __u32 flags)
+{
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct obd_statfs obd_osfs;
+ int rc;
+
+ rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
+ if (rc) {
+ CERROR("md_statfs fails: rc = %d\n", rc);
+ return rc;
+ }
+
+ osfs->os_type = sb->s_magic;
+
+ CDEBUG(D_SUPER, "MDC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
+ osfs->os_bavail, osfs->os_blocks, osfs->os_ffree,osfs->os_files);
+
+ if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
+ flags |= OBD_STATFS_NODELAY;
+
+ rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
+ if (rc) {
+ CERROR("obd_statfs fails: rc = %d\n", rc);
+ return rc;
+ }
+
+ CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
+ obd_osfs.os_bavail, obd_osfs.os_blocks, obd_osfs.os_ffree,
+ obd_osfs.os_files);
+
+ osfs->os_bsize = obd_osfs.os_bsize;
+ osfs->os_blocks = obd_osfs.os_blocks;
+ osfs->os_bfree = obd_osfs.os_bfree;
+ osfs->os_bavail = obd_osfs.os_bavail;
+
+ /* If we don't have as many objects free on the OST as inodes
+ * on the MDS, we reduce the total number of inodes to
+ * compensate, so that the "inodes in use" number is correct.
+ */
+ if (obd_osfs.os_ffree < osfs->os_ffree) {
+ osfs->os_files = (osfs->os_files - osfs->os_ffree) +
+ obd_osfs.os_ffree;
+ osfs->os_ffree = obd_osfs.os_ffree;
+ }
+
+ return rc;
+}
+int ll_statfs(struct dentry *de, struct kstatfs *sfs)
+{
+ struct super_block *sb = de->d_sb;
+ struct obd_statfs osfs;
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op: at "LPU64" jiffies\n", get_jiffies_64());
+ ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_STAFS, 1);
+
+ /* Some amount of caching on the client is allowed */
+ rc = ll_statfs_internal(sb, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ 0);
+ if (rc)
+ return rc;
+
+ statfs_unpack(sfs, &osfs);
+
+ /* We need to downshift for all 32-bit kernels, because we can't
+ * tell if the kernel is being called via sys_statfs64() or not.
+ * Stop before overflowing f_bsize - in which case it is better
+ * to just risk EOVERFLOW if caller is using old sys_statfs(). */
+ if (sizeof(long) < 8) {
+ while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
+ sfs->f_bsize <<= 1;
+
+ osfs.os_blocks >>= 1;
+ osfs.os_bfree >>= 1;
+ osfs.os_bavail >>= 1;
+ }
+ }
+
+ sfs->f_blocks = osfs.os_blocks;
+ sfs->f_bfree = osfs.os_bfree;
+ sfs->f_bavail = osfs.os_bavail;
+ sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
+ return 0;
+}
+
+void ll_inode_size_lock(struct inode *inode)
+{
+ struct ll_inode_info *lli;
+
+ LASSERT(!S_ISDIR(inode->i_mode));
+
+ lli = ll_i2info(inode);
+ LASSERT(lli->lli_size_sem_owner != current);
+ down(&lli->lli_size_sem);
+ LASSERT(lli->lli_size_sem_owner == NULL);
+ lli->lli_size_sem_owner = current;
+}
+
+void ll_inode_size_unlock(struct inode *inode)
+{
+ struct ll_inode_info *lli;
+
+ lli = ll_i2info(inode);
+ LASSERT(lli->lli_size_sem_owner == current);
+ lli->lli_size_sem_owner = NULL;
+ up(&lli->lli_size_sem);
+}
+
+void ll_update_inode(struct inode *inode, struct lustre_md *md)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct mdt_body *body = md->body;
+ struct lov_stripe_md *lsm = md->lsm;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+
+ LASSERT ((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
+ if (lsm != NULL) {
+ if (!lli->lli_has_smd &&
+ !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
+ cl_file_inode_init(inode, md);
+
+ lli->lli_maxbytes = lsm->lsm_maxbytes;
+ if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
+ lli->lli_maxbytes = MAX_LFS_FILESIZE;
+ }
+
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
+ if (body->valid & OBD_MD_FLRMTPERM)
+ ll_update_remote_perm(inode, md->remote_perm);
+ }
+#ifdef CONFIG_FS_POSIX_ACL
+ else if (body->valid & OBD_MD_FLACL) {
+ spin_lock(&lli->lli_lock);
+ if (lli->lli_posix_acl)
+ posix_acl_release(lli->lli_posix_acl);
+ lli->lli_posix_acl = md->posix_acl;
+ spin_unlock(&lli->lli_lock);
+ }
+#endif
+ inode->i_ino = cl_fid_build_ino(&body->fid1,
+ sbi->ll_flags & LL_SBI_32BIT_API);
+ inode->i_generation = cl_fid_build_gen(&body->fid1);
+
+ if (body->valid & OBD_MD_FLATIME) {
+ if (body->atime > LTIME_S(inode->i_atime))
+ LTIME_S(inode->i_atime) = body->atime;
+ lli->lli_lvb.lvb_atime = body->atime;
+ }
+ if (body->valid & OBD_MD_FLMTIME) {
+ if (body->mtime > LTIME_S(inode->i_mtime)) {
+ CDEBUG(D_INODE, "setting ino %lu mtime from %lu "
+ "to "LPU64"\n", inode->i_ino,
+ LTIME_S(inode->i_mtime), body->mtime);
+ LTIME_S(inode->i_mtime) = body->mtime;
+ }
+ lli->lli_lvb.lvb_mtime = body->mtime;
+ }
+ if (body->valid & OBD_MD_FLCTIME) {
+ if (body->ctime > LTIME_S(inode->i_ctime))
+ LTIME_S(inode->i_ctime) = body->ctime;
+ lli->lli_lvb.lvb_ctime = body->ctime;
+ }
+ if (body->valid & OBD_MD_FLMODE)
+ inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
+ if (body->valid & OBD_MD_FLTYPE)
+ inode->i_mode = (inode->i_mode & ~S_IFMT)|(body->mode & S_IFMT);
+ LASSERT(inode->i_mode != 0);
+ if (S_ISREG(inode->i_mode)) {
+ inode->i_blkbits = min(PTLRPC_MAX_BRW_BITS + 1, LL_MAX_BLKSIZE_BITS);
+ } else {
+ inode->i_blkbits = inode->i_sb->s_blocksize_bits;
+ }
+ if (body->valid & OBD_MD_FLUID)
+ inode->i_uid = make_kuid(&init_user_ns, body->uid);
+ if (body->valid & OBD_MD_FLGID)
+ inode->i_gid = make_kgid(&init_user_ns, body->gid);
+ if (body->valid & OBD_MD_FLFLAGS)
+ inode->i_flags = ll_ext_to_inode_flags(body->flags);
+ if (body->valid & OBD_MD_FLNLINK)
+ set_nlink(inode, body->nlink);
+ if (body->valid & OBD_MD_FLRDEV)
+ inode->i_rdev = old_decode_dev(body->rdev);
+
+ if (body->valid & OBD_MD_FLID) {
+ /* FID shouldn't be changed! */
+ if (fid_is_sane(&lli->lli_fid)) {
+ LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
+ "Trying to change FID "DFID
+ " to the "DFID", inode %lu/%u(%p)\n",
+ PFID(&lli->lli_fid), PFID(&body->fid1),
+ inode->i_ino, inode->i_generation, inode);
+ } else
+ lli->lli_fid = body->fid1;
+ }
+
+ LASSERT(fid_seq(&lli->lli_fid) != 0);
+
+ if (body->valid & OBD_MD_FLSIZE) {
+ if (exp_connect_som(ll_i2mdexp(inode)) &&
+ S_ISREG(inode->i_mode)) {
+ struct lustre_handle lockh;
+ ldlm_mode_t mode;
+
+ /* As it is possible a blocking ast has been processed
+ * by this time, we need to check there is an UPDATE
+ * lock on the client and set LLIF_MDS_SIZE_LOCK holding
+ * it. */
+ mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
+ &lockh, LDLM_FL_CBPENDING);
+ if (mode) {
+ if (lli->lli_flags & (LLIF_DONE_WRITING |
+ LLIF_EPOCH_PENDING |
+ LLIF_SOM_DIRTY)) {
+ CERROR("ino %lu flags %u still has "
+ "size authority! do not trust "
+ "the size got from MDS\n",
+ inode->i_ino, lli->lli_flags);
+ } else {
+ /* Use old size assignment to avoid
+ * deadlock bz14138 & bz14326 */
+ i_size_write(inode, body->size);
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
+ spin_unlock(&lli->lli_lock);
+ }
+ ldlm_lock_decref(&lockh, mode);
+ }
+ } else {
+ /* Use old size assignment to avoid
+ * deadlock bz14138 & bz14326 */
+ i_size_write(inode, body->size);
+
+ CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
+ inode->i_ino, (unsigned long long)body->size);
+ }
+
+ if (body->valid & OBD_MD_FLBLOCKS)
+ inode->i_blocks = body->blocks;
+ }
+
+ if (body->valid & OBD_MD_FLMDSCAPA) {
+ LASSERT(md->mds_capa);
+ ll_add_capa(inode, md->mds_capa);
+ }
+ if (body->valid & OBD_MD_FLOSSCAPA) {
+ LASSERT(md->oss_capa);
+ ll_add_capa(inode, md->oss_capa);
+ }
+}
+
+void ll_read_inode2(struct inode *inode, void *opaque)
+{
+ struct lustre_md *md = opaque;
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(&lli->lli_fid), inode);
+
+ LASSERT(!lli->lli_has_smd);
+
+ /* Core attributes from the MDS first. This is a new inode, and
+ * the VFS doesn't zero times in the core inode so we have to do
+ * it ourselves. They will be overwritten by either MDS or OST
+ * attributes - we just need to make sure they aren't newer. */
+ LTIME_S(inode->i_mtime) = 0;
+ LTIME_S(inode->i_atime) = 0;
+ LTIME_S(inode->i_ctime) = 0;
+ inode->i_rdev = 0;
+ ll_update_inode(inode, md);
+
+ /* OIDEBUG(inode); */
+
+ /* initializing backing dev info. */
+ inode->i_mapping->backing_dev_info = &s2lsi(inode->i_sb)->lsi_bdi;
+
+
+ if (S_ISREG(inode->i_mode)) {
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ inode->i_op = &ll_file_inode_operations;
+ inode->i_fop = sbi->ll_fop;
+ inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
+ } else if (S_ISDIR(inode->i_mode)) {
+ inode->i_op = &ll_dir_inode_operations;
+ inode->i_fop = &ll_dir_operations;
+ } else if (S_ISLNK(inode->i_mode)) {
+ inode->i_op = &ll_fast_symlink_inode_operations;
+ } else {
+ inode->i_op = &ll_special_inode_operations;
+
+ init_special_inode(inode, inode->i_mode,
+ inode->i_rdev);
+ }
+}
+
+void ll_delete_inode(struct inode *inode)
+{
+ struct cl_inode_info *lli = cl_i2info(inode);
+
+ if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
+ /* discard all dirty pages before truncating them, required by
+ * osc_extent implementation at LU-1030. */
+ cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
+ CL_FSYNC_DISCARD, 1);
+
+ truncate_inode_pages(&inode->i_data, 0);
+
+ /* Workaround for LU-118 */
+ if (inode->i_data.nrpages) {
+ TREE_READ_LOCK_IRQ(&inode->i_data);
+ TREE_READ_UNLOCK_IRQ(&inode->i_data);
+ LASSERTF(inode->i_data.nrpages == 0,
+ "inode=%lu/%u(%p) nrpages=%lu, see "
+ "http://jira.whamcloud.com/browse/LU-118\n",
+ inode->i_ino, inode->i_generation, inode,
+ inode->i_data.nrpages);
+ }
+ /* Workaround end */
+
+ ll_clear_inode(inode);
+ clear_inode(inode);
+}
+
+int ll_iocontrol(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req = NULL;
+ int rc, flags = 0;
+
+ switch(cmd) {
+ case FSFILT_IOC_GETFLAGS: {
+ struct mdt_body *body;
+ struct md_op_data *op_data;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
+ 0, 0, LUSTRE_OPC_ANY,
+ NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ op_data->op_valid = OBD_MD_FLFLAGS;
+ rc = md_getattr(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
+ if (rc) {
+ CERROR("failure %d inode %lu\n", rc, inode->i_ino);
+ return -abs(rc);
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+
+ flags = body->flags;
+
+ ptlrpc_req_finished(req);
+
+ return put_user(flags, (int *)arg);
+ }
+ case FSFILT_IOC_SETFLAGS: {
+ struct lov_stripe_md *lsm;
+ struct obd_info oinfo = { { { 0 } } };
+ struct md_op_data *op_data;
+
+ if (get_user(flags, (int *)arg))
+ return -EFAULT;
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
+ op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
+ rc = md_setattr(sbi->ll_md_exp, op_data,
+ NULL, 0, NULL, 0, &req, NULL);
+ ll_finish_md_op_data(op_data);
+ ptlrpc_req_finished(req);
+ if (rc)
+ return rc;
+
+ inode->i_flags = ll_ext_to_inode_flags(flags);
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (!lsm_has_objects(lsm)) {
+ ccc_inode_lsm_put(inode, lsm);
+ return 0;
+ }
+
+ OBDO_ALLOC(oinfo.oi_oa);
+ if (!oinfo.oi_oa) {
+ ccc_inode_lsm_put(inode, lsm);
+ return -ENOMEM;
+ }
+ oinfo.oi_md = lsm;
+ oinfo.oi_oa->o_oi = lsm->lsm_oi;
+ oinfo.oi_oa->o_flags = flags;
+ oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
+ OBD_MD_FLGROUP;
+ oinfo.oi_capa = ll_mdscapa_get(inode);
+ obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
+ rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
+ capa_put(oinfo.oi_capa);
+ OBDO_FREE(oinfo.oi_oa);
+ ccc_inode_lsm_put(inode, lsm);
+
+ if (rc && rc != -EPERM && rc != -EACCES)
+ CERROR("osc_setattr_async fails: rc = %d\n", rc);
+
+ return rc;
+ }
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+int ll_flush_ctx(struct inode *inode)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+
+ CDEBUG(D_SEC, "flush context for user %d\n",
+ from_kuid(&init_user_ns, current_uid()));
+
+ obd_set_info_async(NULL, sbi->ll_md_exp,
+ sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
+ 0, NULL, NULL);
+ obd_set_info_async(NULL, sbi->ll_dt_exp,
+ sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
+ 0, NULL, NULL);
+ return 0;
+}
+
+/* umount -f client means force down, don't save state */
+void ll_umount_begin(struct super_block *sb)
+{
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct obd_device *obd;
+ struct obd_ioctl_data *ioc_data;
+
+ CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
+ sb->s_count, atomic_read(&sb->s_active));
+
+ obd = class_exp2obd(sbi->ll_md_exp);
+ if (obd == NULL) {
+ CERROR("Invalid MDC connection handle "LPX64"\n",
+ sbi->ll_md_exp->exp_handle.h_cookie);
+ return;
+ }
+ obd->obd_force = 1;
+
+ obd = class_exp2obd(sbi->ll_dt_exp);
+ if (obd == NULL) {
+ CERROR("Invalid LOV connection handle "LPX64"\n",
+ sbi->ll_dt_exp->exp_handle.h_cookie);
+ return;
+ }
+ obd->obd_force = 1;
+
+ OBD_ALLOC_PTR(ioc_data);
+ if (ioc_data) {
+ obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_md_exp,
+ sizeof *ioc_data, ioc_data, NULL);
+
+ obd_iocontrol(IOC_OSC_SET_ACTIVE, sbi->ll_dt_exp,
+ sizeof *ioc_data, ioc_data, NULL);
+
+ OBD_FREE_PTR(ioc_data);
+ }
+
+ /* Really, we'd like to wait until there are no requests outstanding,
+ * and then continue. For now, we just invalidate the requests,
+ * schedule() and sleep one second if needed, and hope.
+ */
+ schedule();
+}
+
+int ll_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ char *profilenm = get_profile_name(sb);
+ int err;
+ __u32 read_only;
+
+ if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
+ read_only = *flags & MS_RDONLY;
+ err = obd_set_info_async(NULL, sbi->ll_md_exp,
+ sizeof(KEY_READ_ONLY),
+ KEY_READ_ONLY, sizeof(read_only),
+ &read_only, NULL);
+ if (err) {
+ LCONSOLE_WARN("Failed to remount %s %s (%d)\n",
+ profilenm, read_only ?
+ "read-only" : "read-write", err);
+ return err;
+ }
+
+ if (read_only)
+ sb->s_flags |= MS_RDONLY;
+ else
+ sb->s_flags &= ~MS_RDONLY;
+
+ if (sbi->ll_flags & LL_SBI_VERBOSE)
+ LCONSOLE_WARN("Remounted %s %s\n", profilenm,
+ read_only ? "read-only" : "read-write");
+ }
+ return 0;
+}
+
+int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
+ struct super_block *sb, struct lookup_intent *it)
+{
+ struct ll_sb_info *sbi = NULL;
+ struct lustre_md md;
+ int rc;
+
+ LASSERT(*inode || sb);
+ sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
+ rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
+ sbi->ll_md_exp, &md);
+ if (rc)
+ return rc;
+
+ if (*inode) {
+ ll_update_inode(*inode, &md);
+ } else {
+ LASSERT(sb != NULL);
+
+ /*
+ * At this point server returns to client's same fid as client
+ * generated for creating. So using ->fid1 is okay here.
+ */
+ LASSERT(fid_is_sane(&md.body->fid1));
+
+ *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
+ sbi->ll_flags & LL_SBI_32BIT_API),
+ &md);
+ if (*inode == NULL || IS_ERR(*inode)) {
+#ifdef CONFIG_FS_POSIX_ACL
+ if (md.posix_acl) {
+ posix_acl_release(md.posix_acl);
+ md.posix_acl = NULL;
+ }
+#endif
+ rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
+ *inode = NULL;
+ CERROR("new_inode -fatal: rc %d\n", rc);
+ GOTO(out, rc);
+ }
+ }
+
+ /* Handling piggyback layout lock.
+ * Layout lock can be piggybacked by getattr and open request.
+ * The lsm can be applied to inode only if it comes with a layout lock
+ * otherwise correct layout may be overwritten, for example:
+ * 1. proc1: mdt returns a lsm but not granting layout
+ * 2. layout was changed by another client
+ * 3. proc2: refresh layout and layout lock granted
+ * 4. proc1: to apply a stale layout */
+ if (it != NULL && it->d.lustre.it_lock_mode != 0) {
+ struct lustre_handle lockh;
+ struct ldlm_lock *lock;
+
+ lockh.cookie = it->d.lustre.it_lock_handle;
+ lock = ldlm_handle2lock(&lockh);
+ LASSERT(lock != NULL);
+ if (ldlm_has_layout(lock)) {
+ struct cl_object_conf conf;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.coc_opc = OBJECT_CONF_SET;
+ conf.coc_inode = *inode;
+ conf.coc_lock = lock;
+ conf.u.coc_md = &md;
+ (void)ll_layout_conf(*inode, &conf);
+ }
+ LDLM_LOCK_PUT(lock);
+ }
+
+out:
+ if (md.lsm != NULL)
+ obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
+ md_free_lustre_md(sbi->ll_md_exp, &md);
+ return rc;
+}
+
+int ll_obd_statfs(struct inode *inode, void *arg)
+{
+ struct ll_sb_info *sbi = NULL;
+ struct obd_export *exp;
+ char *buf = NULL;
+ struct obd_ioctl_data *data = NULL;
+ __u32 type;
+ __u32 flags;
+ int len = 0, rc;
+
+ if (!inode || !(sbi = ll_i2sbi(inode)))
+ GOTO(out_statfs, rc = -EINVAL);
+
+ rc = obd_ioctl_getdata(&buf, &len, arg);
+ if (rc)
+ GOTO(out_statfs, rc);
+
+ data = (void*)buf;
+ if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
+ !data->ioc_pbuf1 || !data->ioc_pbuf2)
+ GOTO(out_statfs, rc = -EINVAL);
+
+ if (data->ioc_inllen1 != sizeof(__u32) ||
+ data->ioc_inllen2 != sizeof(__u32) ||
+ data->ioc_plen1 != sizeof(struct obd_statfs) ||
+ data->ioc_plen2 != sizeof(struct obd_uuid))
+ GOTO(out_statfs, rc = -EINVAL);
+
+ memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
+ if (type & LL_STATFS_LMV)
+ exp = sbi->ll_md_exp;
+ else if (type & LL_STATFS_LOV)
+ exp = sbi->ll_dt_exp;
+ else
+ GOTO(out_statfs, rc = -ENODEV);
+
+ flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0;
+ rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags);
+ if (rc)
+ GOTO(out_statfs, rc);
+out_statfs:
+ if (buf)
+ obd_ioctl_freedata(buf, len);
+ return rc;
+}
+
+int ll_process_config(struct lustre_cfg *lcfg)
+{
+ char *ptr;
+ void *sb;
+ struct lprocfs_static_vars lvars;
+ unsigned long x;
+ int rc = 0;
+
+ lprocfs_llite_init_vars(&lvars);
+
+ /* The instance name contains the sb: lustre-client-aacfe000 */
+ ptr = strrchr(lustre_cfg_string(lcfg, 0), '-');
+ if (!ptr || !*(++ptr))
+ return -EINVAL;
+ if (sscanf(ptr, "%lx", &x) != 1)
+ return -EINVAL;
+ sb = (void *)x;
+ /* This better be a real Lustre superblock! */
+ LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
+
+ /* Note we have not called client_common_fill_super yet, so
+ proc fns must be able to handle that! */
+ rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
+ lcfg, sb);
+ if (rc > 0)
+ rc = 0;
+ return(rc);
+}
+
+/* this function prepares md_op_data hint for passing ot down to MD stack. */
+struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
+ struct inode *i1, struct inode *i2,
+ const char *name, int namelen,
+ int mode, __u32 opc, void *data)
+{
+ LASSERT(i1 != NULL);
+
+ if (namelen > ll_i2sbi(i1)->ll_namelen)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ if (op_data == NULL)
+ OBD_ALLOC_PTR(op_data);
+
+ if (op_data == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ll_i2gids(op_data->op_suppgids, i1, i2);
+ op_data->op_fid1 = *ll_inode2fid(i1);
+ op_data->op_capa1 = ll_mdscapa_get(i1);
+
+ if (i2) {
+ op_data->op_fid2 = *ll_inode2fid(i2);
+ op_data->op_capa2 = ll_mdscapa_get(i2);
+ } else {
+ fid_zero(&op_data->op_fid2);
+ op_data->op_capa2 = NULL;
+ }
+
+ op_data->op_name = name;
+ op_data->op_namelen = namelen;
+ op_data->op_mode = mode;
+ op_data->op_mod_time = cfs_time_current_sec();
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
+ op_data->op_cap = cfs_curproc_cap_pack();
+ op_data->op_bias = 0;
+ op_data->op_cli_flags = 0;
+ if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
+ filename_is_volatile(name, namelen, NULL))
+ op_data->op_bias |= MDS_CREATE_VOLATILE;
+ op_data->op_opc = opc;
+ op_data->op_mds = 0;
+ op_data->op_data = data;
+
+ /* If the file is being opened after mknod() (normally due to NFS)
+ * try to use the default stripe data from parent directory for
+ * allocating OST objects. Try to pass the parent FID to MDS. */
+ if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
+ !ll_i2info(i2)->lli_has_smd) {
+ struct ll_inode_info *lli = ll_i2info(i2);
+
+ spin_lock(&lli->lli_lock);
+ if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
+ op_data->op_fid1 = lli->lli_pfid;
+ spin_unlock(&lli->lli_lock);
+ /** We ignore parent's capability temporary. */
+ }
+
+ /* When called by ll_setattr_raw, file is i1. */
+ if (LLIF_DATA_MODIFIED & ll_i2info(i1)->lli_flags)
+ op_data->op_bias |= MDS_DATA_MODIFIED;
+
+ return op_data;
+}
+
+void ll_finish_md_op_data(struct md_op_data *op_data)
+{
+ capa_put(op_data->op_capa1);
+ capa_put(op_data->op_capa2);
+ OBD_FREE_PTR(op_data);
+}
+
+int ll_show_options(struct seq_file *seq, struct dentry *dentry)
+{
+ struct ll_sb_info *sbi;
+
+ LASSERT((seq != NULL) && (dentry != NULL));
+ sbi = ll_s2sbi(dentry->d_sb);
+
+ if (sbi->ll_flags & LL_SBI_NOLCK)
+ seq_puts(seq, ",nolock");
+
+ if (sbi->ll_flags & LL_SBI_FLOCK)
+ seq_puts(seq, ",flock");
+
+ if (sbi->ll_flags & LL_SBI_LOCALFLOCK)
+ seq_puts(seq, ",localflock");
+
+ if (sbi->ll_flags & LL_SBI_USER_XATTR)
+ seq_puts(seq, ",user_xattr");
+
+ if (sbi->ll_flags & LL_SBI_LAZYSTATFS)
+ seq_puts(seq, ",lazystatfs");
+
+ if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
+ seq_puts(seq, ",user_fid2path");
+
+ return 0;
+}
+
+/**
+ * Get obd name by cmd, and copy out to user space
+ */
+int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct obd_device *obd;
+
+ if (cmd == OBD_IOC_GETDTNAME)
+ obd = class_exp2obd(sbi->ll_dt_exp);
+ else if (cmd == OBD_IOC_GETMDNAME)
+ obd = class_exp2obd(sbi->ll_md_exp);
+ else
+ return -EINVAL;
+
+ if (!obd)
+ return -ENOENT;
+
+ if (copy_to_user((void *)arg, obd->obd_name,
+ strlen(obd->obd_name) + 1))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * Get lustre file system name by \a sbi. If \a buf is provided(non-NULL), the
+ * fsname will be returned in this buffer; otherwise, a static buffer will be
+ * used to store the fsname and returned to caller.
+ */
+char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
+{
+ static char fsname_static[MTI_NAME_MAXLEN];
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ char *ptr;
+ int len;
+
+ if (buf == NULL) {
+ /* this means the caller wants to use static buffer
+ * and it doesn't care about race. Usually this is
+ * in error reporting path */
+ buf = fsname_static;
+ buflen = sizeof(fsname_static);
+ }
+
+ len = strlen(lsi->lsi_lmd->lmd_profile);
+ ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
+ if (ptr && (strcmp(ptr, "-client") == 0))
+ len -= 7;
+
+ if (unlikely(len >= buflen))
+ len = buflen - 1;
+ strncpy(buf, lsi->lsi_lmd->lmd_profile, len);
+ buf[len] = '\0';
+
+ return buf;
+}
+
+static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize)
+{
+ char *path = NULL;
+
+ struct path p;
+
+ p.dentry = dentry;
+ p.mnt = current->fs->root.mnt;
+ path_get(&p);
+ path = d_path(&p, buf, bufsize);
+ path_put(&p);
+
+ return path;
+}
+
+void ll_dirty_page_discard_warn(struct page *page, int ioret)
+{
+ char *buf, *path = NULL;
+ struct dentry *dentry = NULL;
+ struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
+
+ /* this can be called inside spin lock so use GFP_ATOMIC. */
+ buf = (char *)__get_free_page(GFP_ATOMIC);
+ if (buf != NULL) {
+ dentry = d_find_alias(page->mapping->host);
+ if (dentry != NULL)
+ path = ll_d_path(dentry, buf, PAGE_SIZE);
+ }
+
+ CWARN("%s: dirty page discard: %s/fid: "DFID"/%s may get corrupted "
+ "(rc %d)\n", ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
+ s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
+ PFID(&obj->cob_header.coh_lu.loh_fid),
+ (path && !IS_ERR(path)) ? path : "", ioret);
+
+ if (dentry != NULL)
+ dput(dentry);
+
+ if (buf != NULL)
+ free_page((unsigned long)buf);
+}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
new file mode 100644
index 0000000..caed642
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -0,0 +1,498 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <asm/uaccess.h>
+
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <lustre_lite.h>
+#include "llite_internal.h"
+#include <linux/lustre_compat25.h>
+
+struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
+ int *type);
+
+static struct vm_operations_struct ll_file_vm_ops;
+
+void policy_from_vma(ldlm_policy_data_t *policy,
+ struct vm_area_struct *vma, unsigned long addr,
+ size_t count)
+{
+ policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
+ (vma->vm_pgoff << PAGE_CACHE_SHIFT);
+ policy->l_extent.end = (policy->l_extent.start + count - 1) |
+ ~CFS_PAGE_MASK;
+}
+
+struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
+ size_t count)
+{
+ struct vm_area_struct *vma, *ret = NULL;
+
+ /* mmap_sem must have been held by caller. */
+ LASSERT(!down_write_trylock(&mm->mmap_sem));
+
+ for(vma = find_vma(mm, addr);
+ vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
+ if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
+ vma->vm_flags & VM_SHARED) {
+ ret = vma;
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
+ * API independent part for page fault initialization.
+ * \param vma - virtual memory area addressed to page fault
+ * \param env - corespondent lu_env to processing
+ * \param nest - nested level
+ * \param index - page index corespondent to fault.
+ * \parm ra_flags - vma readahead flags.
+ *
+ * \return allocated and initialized env for fault operation.
+ * \retval EINVAL if env can't allocated
+ * \return other error codes from cl_io_init.
+ */
+struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
+ struct lu_env **env_ret,
+ struct cl_env_nest *nest,
+ pgoff_t index, unsigned long *ra_flags)
+{
+ struct file *file = vma->vm_file;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct cl_io *io;
+ struct cl_fault_io *fio;
+ struct lu_env *env;
+ int rc;
+
+ *env_ret = NULL;
+ if (ll_file_nolock(file))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /*
+ * page fault can be called when lustre IO is
+ * already active for the current thread, e.g., when doing read/write
+ * against user level buffer mapped from Lustre buffer. To avoid
+ * stomping on existing context, optionally force an allocation of a new
+ * one.
+ */
+ env = cl_env_nested_get(nest);
+ if (IS_ERR(env))
+ return ERR_PTR(-EINVAL);
+
+ *env_ret = env;
+
+ io = ccc_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+ LASSERT(io->ci_obj != NULL);
+
+ fio = &io->u.ci_fault;
+ fio->ft_index = index;
+ fio->ft_executable = vma->vm_flags&VM_EXEC;
+
+ /*
+ * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
+ * the kernel will not read other pages not covered by ldlm in
+ * filemap_nopage. we do our readahead in ll_readpage.
+ */
+ if (ra_flags != NULL)
+ *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
+ vma->vm_flags &= ~VM_SEQ_READ;
+ vma->vm_flags |= VM_RAND_READ;
+
+ CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
+ fio->ft_index, fio->ft_executable);
+
+ rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
+ if (rc == 0) {
+ struct ccc_io *cio = ccc_env_io(env);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ LASSERT(cio->cui_cl.cis_io == io);
+
+ /* mmap lock must be MANDATORY it has to cache
+ * pages. */
+ io->ci_lockreq = CILR_MANDATORY;
+ cio->cui_fd = fd;
+ } else {
+ LASSERT(rc < 0);
+ cl_io_fini(env, io);
+ cl_env_nested_put(nest, env);
+ io = ERR_PTR(rc);
+ }
+
+ return io;
+}
+
+/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
+static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
+ bool *retry)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio;
+ struct cl_env_nest nest;
+ int result;
+ sigset_t set;
+ struct inode *inode;
+ struct ll_inode_info *lli;
+
+ LASSERT(vmpage != NULL);
+
+ io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
+ if (IS_ERR(io))
+ GOTO(out, result = PTR_ERR(io));
+
+ result = io->ci_result;
+ if (result < 0)
+ GOTO(out_io, result);
+
+ io->u.ci_fault.ft_mkwrite = 1;
+ io->u.ci_fault.ft_writable = 1;
+
+ vio = vvp_env_io(env);
+ vio->u.fault.ft_vma = vma;
+ vio->u.fault.ft_vmpage = vmpage;
+
+ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
+
+ /* we grab lli_trunc_sem to exclude truncate case.
+ * Otherwise, we could add dirty pages into osc cache
+ * while truncate is on-going. */
+ inode = ccc_object_inode(io->ci_obj);
+ lli = ll_i2info(inode);
+ down_read(&lli->lli_trunc_sem);
+
+ result = cl_io_loop(env, io);
+
+ up_read(&lli->lli_trunc_sem);
+
+ cfs_restore_sigs(set);
+
+ if (result == 0) {
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ lock_page(vmpage);
+ if (vmpage->mapping == NULL) {
+ unlock_page(vmpage);
+
+ /* page was truncated and lock was cancelled, return
+ * ENODATA so that VM_FAULT_NOPAGE will be returned
+ * to handle_mm_fault(). */
+ if (result == 0)
+ result = -ENODATA;
+ } else if (!PageDirty(vmpage)) {
+ /* race, the page has been cleaned by ptlrpcd after
+ * it was unlocked, it has to be added into dirty
+ * cache again otherwise this soon-to-dirty page won't
+ * consume any grants, even worse if this page is being
+ * transferred because it will break RPC checksum.
+ */
+ unlock_page(vmpage);
+
+ CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
+ "been written out, retry.\n",
+ vmpage, vmpage->index);
+
+ *retry = true;
+ result = -EAGAIN;
+ }
+
+ if (result == 0) {
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
+ }
+
+out_io:
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
+out:
+ CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
+ LASSERT(ergo(result == 0, PageLocked(vmpage)));
+
+ return result;
+}
+
+
+
+static inline int to_fault_error(int result)
+{
+ switch(result) {
+ case 0:
+ result = VM_FAULT_LOCKED;
+ break;
+ case -EFAULT:
+ result = VM_FAULT_NOPAGE;
+ break;
+ case -ENOMEM:
+ result = VM_FAULT_OOM;
+ break;
+ default:
+ result = VM_FAULT_SIGBUS;
+ break;
+ }
+ return result;
+}
+
+/**
+ * Lustre implementation of a vm_operations_struct::fault() method, called by
+ * VM to server page fault (both in kernel and user space).
+ *
+ * \param vma - is virtiual area struct related to page fault
+ * \param vmf - structure which describe type and address where hit fault
+ *
+ * \return allocated and filled _locked_ page for address
+ * \retval VM_FAULT_ERROR on general error
+ * \retval NOPAGE_OOM not have memory for allocate new page
+ */
+static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio = NULL;
+ struct page *vmpage;
+ unsigned long ra_flags;
+ struct cl_env_nest nest;
+ int result;
+ int fault_ret = 0;
+
+ io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
+ if (IS_ERR(io))
+ return to_fault_error(PTR_ERR(io));
+
+ result = io->ci_result;
+ if (result == 0) {
+ vio = vvp_env_io(env);
+ vio->u.fault.ft_vma = vma;
+ vio->u.fault.ft_vmpage = NULL;
+ vio->u.fault.fault.ft_vmf = vmf;
+
+ result = cl_io_loop(env, io);
+
+ fault_ret = vio->u.fault.fault.ft_flags;
+ vmpage = vio->u.fault.ft_vmpage;
+ if (result != 0 && vmpage != NULL) {
+ page_cache_release(vmpage);
+ vmf->page = NULL;
+ }
+ }
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
+
+ vma->vm_flags |= ra_flags;
+ if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
+ fault_ret |= to_fault_error(result);
+
+ CDEBUG(D_MMAP, "%s fault %d/%d\n",
+ current->comm, fault_ret, result);
+ return fault_ret;
+}
+
+static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int count = 0;
+ bool printed = false;
+ int result;
+ sigset_t set;
+
+ /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
+ * so that it can be killed by admin but not cause segfault by
+ * other signals. */
+ set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
+
+restart:
+ result = ll_fault0(vma, vmf);
+ LASSERT(!(result & VM_FAULT_LOCKED));
+ if (result == 0) {
+ struct page *vmpage = vmf->page;
+
+ /* check if this page has been truncated */
+ lock_page(vmpage);
+ if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
+ unlock_page(vmpage);
+ page_cache_release(vmpage);
+ vmf->page = NULL;
+
+ if (!printed && ++count > 16) {
+ CWARN("the page is under heavy contention,"
+ "maybe your app(%s) needs revising :-)\n",
+ current->comm);
+ printed = true;
+ }
+
+ goto restart;
+ }
+
+ result |= VM_FAULT_LOCKED;
+ }
+ cfs_restore_sigs(set);
+ return result;
+}
+
+static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int count = 0;
+ bool printed = false;
+ bool retry;
+ int result;
+
+ do {
+ retry = false;
+ result = ll_page_mkwrite0(vma, vmf->page, &retry);
+
+ if (!printed && ++count > 16) {
+ CWARN("app(%s): the page %lu of file %lu is under heavy"
+ " contention.\n",
+ current->comm, vmf->pgoff,
+ vma->vm_file->f_dentry->d_inode->i_ino);
+ printed = true;
+ }
+ } while (retry);
+
+ switch(result) {
+ case 0:
+ LASSERT(PageLocked(vmf->page));
+ result = VM_FAULT_LOCKED;
+ break;
+ case -ENODATA:
+ case -EFAULT:
+ result = VM_FAULT_NOPAGE;
+ break;
+ case -ENOMEM:
+ result = VM_FAULT_OOM;
+ break;
+ case -EAGAIN:
+ result = VM_FAULT_RETRY;
+ break;
+ default:
+ result = VM_FAULT_SIGBUS;
+ break;
+ }
+
+ return result;
+}
+
+/**
+ * To avoid cancel the locks covering mmapped region for lock cache pressure,
+ * we track the mapped vma count in ccc_object::cob_mmap_cnt.
+ */
+static void ll_vm_open(struct vm_area_struct * vma)
+{
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct ccc_object *vob = cl_inode2ccc(inode);
+
+ LASSERT(vma->vm_file);
+ LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+ atomic_inc(&vob->cob_mmap_cnt);
+}
+
+/**
+ * Dual to ll_vm_open().
+ */
+static void ll_vm_close(struct vm_area_struct *vma)
+{
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct ccc_object *vob = cl_inode2ccc(inode);
+
+ LASSERT(vma->vm_file);
+ atomic_dec(&vob->cob_mmap_cnt);
+ LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+}
+
+
+/* return the user space pointer that maps to a file offset via a vma */
+static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
+{
+ return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
+
+}
+
+/* XXX put nice comment here. talk about __free_pte -> dirty pages and
+ * nopage's reference passing to the pte */
+int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
+{
+ int rc = -ENOENT;
+
+ LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
+ if (mapping_mapped(mapping)) {
+ rc = 0;
+ unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
+ last - first + 1, 0);
+ }
+
+ return rc;
+}
+
+static struct vm_operations_struct ll_file_vm_ops = {
+ .fault = ll_fault,
+ .page_mkwrite = ll_page_mkwrite,
+ .open = ll_vm_open,
+ .close = ll_vm_close,
+};
+
+int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ int rc;
+
+ if (ll_file_nolock(file))
+ return -EOPNOTSUPP;
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
+ rc = generic_file_mmap(file, vma);
+ if (rc == 0) {
+ vma->vm_ops = &ll_file_vm_ops;
+ vma->vm_ops->open(vma);
+ /* update the inode's size and mtime */
+ rc = ll_glimpse_size(inode);
+ }
+
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
new file mode 100644
index 0000000..1767c74
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -0,0 +1,327 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lustre/llite/llite_nfs.c
+ *
+ * NFS export of Lustre Light File System
+ *
+ * Author: Yury Umanets <umka@clusterfs.com>
+ * Author: Huang Hua <huanghua@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+#include <lustre_lite.h>
+#include "llite_internal.h"
+#include <linux/exportfs.h>
+
+__u32 get_uuid2int(const char *name, int len)
+{
+ __u32 key0 = 0x12a3fe2d, key1 = 0x37abe8f9;
+ while (len--) {
+ __u32 key = key1 + (key0 ^ (*name++ * 7152373));
+ if (key & 0x80000000) key -= 0x7fffffff;
+ key1 = key0;
+ key0 = key;
+ }
+ return (key0 << 1);
+}
+
+void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid)
+{
+ __u64 key = 0, key0 = 0x12a3fe2d, key1 = 0x37abe8f9;
+
+ while (len--) {
+ key = key1 + (key0 ^ (*name++ * 7152373));
+ if (key & 0x8000000000000000ULL)
+ key -= 0x7fffffffffffffffULL;
+ key1 = key0;
+ key0 = key;
+ }
+
+ fsid->val[0] = key;
+ fsid->val[1] = key >> 32;
+}
+
+static int ll_nfs_test_inode(struct inode *inode, void *opaque)
+{
+ return lu_fid_eq(&ll_i2info(inode)->lli_fid,
+ (struct lu_fid *)opaque);
+}
+
+struct inode *search_inode_for_lustre(struct super_block *sb,
+ const struct lu_fid *fid)
+{
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct ptlrpc_request *req = NULL;
+ struct inode *inode = NULL;
+ int eadatalen = 0;
+ unsigned long hash = cl_fid_build_ino(fid,
+ ll_need_32bit_api(sbi));
+ struct md_op_data *op_data;
+ int rc;
+
+ CDEBUG(D_INFO, "searching inode for:(%lu,"DFID")\n", hash, PFID(fid));
+
+ inode = ilookup5(sb, hash, ll_nfs_test_inode, (void *)fid);
+ if (inode)
+ return inode;
+
+ rc = ll_get_max_mdsize(sbi, &eadatalen);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* Because inode is NULL, ll_prep_md_op_data can not
+ * be used here. So we allocate op_data ourselves */
+ OBD_ALLOC_PTR(op_data);
+ if (op_data == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ op_data->op_fid1 = *fid;
+ op_data->op_mode = eadatalen;
+ op_data->op_valid = OBD_MD_FLEASIZE;
+
+ /* mds_fid2dentry ignores f_type */
+ rc = md_getattr(sbi->ll_md_exp, op_data, &req);
+ OBD_FREE_PTR(op_data);
+ if (rc) {
+ CERROR("can't get object attrs, fid "DFID", rc %d\n",
+ PFID(fid), rc);
+ return ERR_PTR(rc);
+ }
+ rc = ll_prep_inode(&inode, req, sb, NULL);
+ ptlrpc_req_finished(req);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return inode;
+}
+
+struct lustre_nfs_fid {
+ struct lu_fid lnf_child;
+ struct lu_fid lnf_parent;
+};
+
+static struct dentry *
+ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *parent)
+{
+ struct inode *inode;
+ struct dentry *result;
+
+ CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid));
+ if (!fid_is_sane(fid))
+ return ERR_PTR(-ESTALE);
+
+ inode = search_inode_for_lustre(sb, fid);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ if (is_bad_inode(inode)) {
+ /* we didn't find the right inode.. */
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
+
+ /**
+ * It is an anonymous dentry without OST objects created yet.
+ * We have to find the parent to tell MDS how to init lov objects.
+ */
+ if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd &&
+ parent != NULL) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ spin_lock(&lli->lli_lock);
+ lli->lli_pfid = *parent;
+ spin_unlock(&lli->lli_lock);
+ }
+
+ result = d_obtain_alias(inode);
+ if (IS_ERR(result))
+ return result;
+
+ ll_dops_init(result, 1, 0);
+
+ return result;
+}
+
+#define LUSTRE_NFS_FID 0x97
+
+/**
+ * \a connectable - is nfsd will connect himself or this should be done
+ * at lustre
+ *
+ * The return value is file handle type:
+ * 1 -- contains child file handle;
+ * 2 -- contains child file handle and parent file handle;
+ * 255 -- error.
+ */
+static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
+ struct inode *parent)
+{
+ struct lustre_nfs_fid *nfs_fid = (void *)fh;
+
+ CDEBUG(D_INFO, "encoding for (%lu,"DFID") maxlen=%d minlen=%d\n",
+ inode->i_ino, PFID(ll_inode2fid(inode)), *plen,
+ (int)sizeof(struct lustre_nfs_fid));
+
+ if (*plen < sizeof(struct lustre_nfs_fid) / 4)
+ return 255;
+
+ nfs_fid->lnf_child = *ll_inode2fid(inode);
+ nfs_fid->lnf_parent = *ll_inode2fid(parent);
+ *plen = sizeof(struct lustre_nfs_fid) / 4;
+
+ return LUSTRE_NFS_FID;
+}
+
+static int ll_nfs_get_name_filldir(void *cookie, const char *name, int namelen,
+ loff_t hash, u64 ino, unsigned type)
+{
+ /* It is hack to access lde_fid for comparison with lgd_fid.
+ * So the input 'name' must be part of the 'lu_dirent'. */
+ struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name);
+ struct ll_getname_data *lgd = cookie;
+ struct lu_fid fid;
+
+ fid_le_to_cpu(&fid, &lde->lde_fid);
+ if (lu_fid_eq(&fid, &lgd->lgd_fid)) {
+ memcpy(lgd->lgd_name, name, namelen);
+ lgd->lgd_name[namelen] = 0;
+ lgd->lgd_found = 1;
+ }
+ return lgd->lgd_found;
+}
+
+static int ll_get_name(struct dentry *dentry, char *name,
+ struct dentry *child)
+{
+ struct inode *dir = dentry->d_inode;
+ int rc;
+ struct ll_getname_data lgd = {
+ .lgd_name = name,
+ .lgd_fid = ll_i2info(child->d_inode)->lli_fid,
+ .ctx.actor = ll_nfs_get_name_filldir,
+ };
+
+ if (!dir || !S_ISDIR(dir->i_mode))
+ GOTO(out, rc = -ENOTDIR);
+
+ if (!dir->i_fop)
+ GOTO(out, rc = -EINVAL);
+
+ mutex_lock(&dir->i_mutex);
+ rc = ll_dir_read(dir, &lgd.ctx);
+ mutex_unlock(&dir->i_mutex);
+ if (!rc && !lgd.lgd_found)
+ rc = -ENOENT;
+out:
+ return rc;
+}
+
+static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
+
+ if (fh_type != LUSTRE_NFS_FID)
+ return ERR_PTR(-EPROTO);
+
+ return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent);
+}
+
+static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
+
+ if (fh_type != LUSTRE_NFS_FID)
+ return ERR_PTR(-EPROTO);
+
+ return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL);
+}
+
+static struct dentry *ll_get_parent(struct dentry *dchild)
+{
+ struct ptlrpc_request *req = NULL;
+ struct inode *dir = dchild->d_inode;
+ struct ll_sb_info *sbi;
+ struct dentry *result = NULL;
+ struct mdt_body *body;
+ static char dotdot[] = "..";
+ struct md_op_data *op_data;
+ int rc;
+ int lmmsize;
+
+ LASSERT(dir && S_ISDIR(dir->i_mode));
+
+ sbi = ll_s2sbi(dir->i_sb);
+
+ CDEBUG(D_INFO, "getting parent for (%lu,"DFID")\n",
+ dir->i_ino, PFID(ll_inode2fid(dir)));
+
+ rc = ll_get_max_mdsize(sbi, &lmmsize);
+ if (rc != 0)
+ return ERR_PTR(rc);
+
+ op_data = ll_prep_md_op_data(NULL, dir, NULL, dotdot,
+ strlen(dotdot), lmmsize,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return (void *)op_data;
+
+ rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
+ ll_finish_md_op_data(op_data);
+ if (rc) {
+ CERROR("failure %d inode %lu get parent\n", rc, dir->i_ino);
+ return ERR_PTR(rc);
+ }
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body->valid & OBD_MD_FLID);
+
+ CDEBUG(D_INFO, "parent for "DFID" is "DFID"\n",
+ PFID(ll_inode2fid(dir)), PFID(&body->fid1));
+
+ result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);
+
+ ptlrpc_req_finished(req);
+ return result;
+}
+
+struct export_operations lustre_export_operations = {
+ .get_parent = ll_get_parent,
+ .encode_fh = ll_encode_fh,
+ .get_name = ll_get_name,
+ .fh_to_dentry = ll_fh_to_dentry,
+ .fh_to_parent = ll_fh_to_parent,
+};
diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
new file mode 100644
index 0000000..4c61036
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
@@ -0,0 +1,301 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/llite_rmtacl.c
+ *
+ * Lustre Remote User Access Control List.
+ *
+ * Author: Fan Yong <fanyong@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#ifdef CONFIG_FS_POSIX_ACL
+
+#include <lustre_lite.h>
+#include <lustre_eacl.h>
+#include "llite_internal.h"
+
+static inline __u32 rce_hashfunc(uid_t id)
+{
+ return id & (RCE_HASHES - 1);
+}
+
+static inline __u32 ee_hashfunc(uid_t id)
+{
+ return id & (EE_HASHES - 1);
+}
+
+obd_valid rce_ops2valid(int ops)
+{
+ switch (ops) {
+ case RMT_LSETFACL:
+ return OBD_MD_FLRMTLSETFACL;
+ case RMT_LGETFACL:
+ return OBD_MD_FLRMTLGETFACL;
+ case RMT_RSETFACL:
+ return OBD_MD_FLRMTRSETFACL;
+ case RMT_RGETFACL:
+ return OBD_MD_FLRMTRGETFACL;
+ default:
+ return 0;
+ }
+}
+
+static struct rmtacl_ctl_entry *rce_alloc(pid_t key, int ops)
+{
+ struct rmtacl_ctl_entry *rce;
+
+ OBD_ALLOC_PTR(rce);
+ if (!rce)
+ return NULL;
+
+ INIT_LIST_HEAD(&rce->rce_list);
+ rce->rce_key = key;
+ rce->rce_ops = ops;
+
+ return rce;
+}
+
+static void rce_free(struct rmtacl_ctl_entry *rce)
+{
+ if (!list_empty(&rce->rce_list))
+ list_del(&rce->rce_list);
+
+ OBD_FREE_PTR(rce);
+}
+
+static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct,
+ pid_t key)
+{
+ struct rmtacl_ctl_entry *rce;
+ struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
+
+ list_for_each_entry(rce, head, rce_list)
+ if (rce->rce_key == key)
+ return rce;
+
+ return NULL;
+}
+
+struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key)
+{
+ struct rmtacl_ctl_entry *rce;
+
+ spin_lock(&rct->rct_lock);
+ rce = __rct_search(rct, key);
+ spin_unlock(&rct->rct_lock);
+ return rce;
+}
+
+int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
+{
+ struct rmtacl_ctl_entry *rce, *e;
+
+ rce = rce_alloc(key, ops);
+ if (rce == NULL)
+ return -ENOMEM;
+
+ spin_lock(&rct->rct_lock);
+ e = __rct_search(rct, key);
+ if (unlikely(e != NULL)) {
+ CWARN("Unexpected stale rmtacl_entry found: "
+ "[key: %d] [ops: %d]\n", (int)key, ops);
+ rce_free(e);
+ }
+ list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
+ spin_unlock(&rct->rct_lock);
+
+ return 0;
+}
+
+int rct_del(struct rmtacl_ctl_table *rct, pid_t key)
+{
+ struct rmtacl_ctl_entry *rce;
+
+ spin_lock(&rct->rct_lock);
+ rce = __rct_search(rct, key);
+ if (rce)
+ rce_free(rce);
+ spin_unlock(&rct->rct_lock);
+
+ return rce ? 0 : -ENOENT;
+}
+
+void rct_init(struct rmtacl_ctl_table *rct)
+{
+ int i;
+
+ spin_lock_init(&rct->rct_lock);
+ for (i = 0; i < RCE_HASHES; i++)
+ INIT_LIST_HEAD(&rct->rct_entries[i]);
+}
+
+void rct_fini(struct rmtacl_ctl_table *rct)
+{
+ struct rmtacl_ctl_entry *rce;
+ int i;
+
+ spin_lock(&rct->rct_lock);
+ for (i = 0; i < RCE_HASHES; i++)
+ while (!list_empty(&rct->rct_entries[i])) {
+ rce = list_entry(rct->rct_entries[i].next,
+ struct rmtacl_ctl_entry, rce_list);
+ rce_free(rce);
+ }
+ spin_unlock(&rct->rct_lock);
+}
+
+
+static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type,
+ ext_acl_xattr_header *header)
+{
+ struct eacl_entry *ee;
+
+ OBD_ALLOC_PTR(ee);
+ if (!ee)
+ return NULL;
+
+ INIT_LIST_HEAD(&ee->ee_list);
+ ee->ee_key = key;
+ ee->ee_fid = *fid;
+ ee->ee_type = type;
+ ee->ee_acl = header;
+
+ return ee;
+}
+
+void ee_free(struct eacl_entry *ee)
+{
+ if (!list_empty(&ee->ee_list))
+ list_del(&ee->ee_list);
+
+ if (ee->ee_acl)
+ lustre_ext_acl_xattr_free(ee->ee_acl);
+
+ OBD_FREE_PTR(ee);
+}
+
+static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key,
+ struct lu_fid *fid, int type)
+{
+ struct eacl_entry *ee;
+ struct list_head *head = &et->et_entries[ee_hashfunc(key)];
+
+ LASSERT(fid != NULL);
+ list_for_each_entry(ee, head, ee_list)
+ if (ee->ee_key == key) {
+ if (lu_fid_eq(&ee->ee_fid, fid) &&
+ ee->ee_type == type) {
+ list_del_init(&ee->ee_list);
+ return ee;
+ }
+ }
+
+ return NULL;
+}
+
+struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
+ struct lu_fid *fid, int type)
+{
+ struct eacl_entry *ee;
+
+ spin_lock(&et->et_lock);
+ ee = __et_search_del(et, key, fid, type);
+ spin_unlock(&et->et_lock);
+ return ee;
+}
+
+void et_search_free(struct eacl_table *et, pid_t key)
+{
+ struct eacl_entry *ee, *next;
+ struct list_head *head = &et->et_entries[ee_hashfunc(key)];
+
+ spin_lock(&et->et_lock);
+ list_for_each_entry_safe(ee, next, head, ee_list)
+ if (ee->ee_key == key)
+ ee_free(ee);
+
+ spin_unlock(&et->et_lock);
+}
+
+int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
+ ext_acl_xattr_header *header)
+{
+ struct eacl_entry *ee, *e;
+
+ ee = ee_alloc(key, fid, type, header);
+ if (ee == NULL)
+ return -ENOMEM;
+
+ spin_lock(&et->et_lock);
+ e = __et_search_del(et, key, fid, type);
+ if (unlikely(e != NULL)) {
+ CWARN("Unexpected stale eacl_entry found: "
+ "[key: %d] [fid: "DFID"] [type: %d]\n",
+ (int)key, PFID(fid), type);
+ ee_free(e);
+ }
+ list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
+ spin_unlock(&et->et_lock);
+
+ return 0;
+}
+
+void et_init(struct eacl_table *et)
+{
+ int i;
+
+ spin_lock_init(&et->et_lock);
+ for (i = 0; i < EE_HASHES; i++)
+ INIT_LIST_HEAD(&et->et_entries[i]);
+}
+
+void et_fini(struct eacl_table *et)
+{
+ struct eacl_entry *ee;
+ int i;
+
+ spin_lock(&et->et_lock);
+ for (i = 0; i < EE_HASHES; i++)
+ while (!list_empty(&et->et_entries[i])) {
+ ee = list_entry(et->et_entries[i].next,
+ struct eacl_entry, ee_list);
+ ee_free(ee);
+ }
+ spin_unlock(&et->et_lock);
+}
+
+#endif
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
new file mode 100644
index 0000000..2340458
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -0,0 +1,864 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+/*
+ * linux/drivers/block/loop.c
+ *
+ * Written by Theodore Ts'o, 3/29/93
+ *
+ * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
+ * permitted under the GNU General Public License.
+ *
+ * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
+ * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
+ *
+ * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
+ *
+ * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
+ *
+ * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
+ *
+ * Loadable modules and other fixes by AK, 1998
+ *
+ * Maximum number of loop devices now dynamic via max_loop module parameter.
+ * Russell Kroll <rkroll@exploits.org> 19990701
+ *
+ * Maximum number of loop devices when compiled-in now selectable by passing
+ * max_loop=<1-255> to the kernel on boot.
+ * Erik I. Bols?, <eriki@himolde.no>, Oct 31, 1999
+ *
+ * Completely rewrite request handling to be make_request_fn style and
+ * non blocking, pushing work to a helper thread. Lots of fixes from
+ * Al Viro too.
+ * Jens Axboe <axboe@suse.de>, Nov 2000
+ *
+ * Support up to 256 loop devices
+ * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
+ *
+ * Support for falling back on the write file operation when the address space
+ * operations prepare_write and/or commit_write are not available on the
+ * backing filesystem.
+ * Anton Altaparmakov, 16 Feb 2005
+ *
+ * Still To Fix:
+ * - Advisory locking is ignored here.
+ * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/major.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/init.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/writeback.h>
+#include <linux/buffer_head.h> /* for invalidate_bdev() */
+#include <linux/completion.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/pagevec.h>
+
+#include <asm/uaccess.h>
+
+#include <lustre_lib.h>
+#include <lustre_lite.h>
+#include "llite_internal.h"
+
+#define LLOOP_MAX_SEGMENTS LNET_MAX_IOV
+
+/* Possible states of device */
+enum {
+ LLOOP_UNBOUND,
+ LLOOP_BOUND,
+ LLOOP_RUNDOWN,
+};
+
+struct lloop_device {
+ int lo_number;
+ int lo_refcnt;
+ loff_t lo_offset;
+ loff_t lo_sizelimit;
+ int lo_flags;
+ int (*ioctl)(struct lloop_device *, int cmd,
+ unsigned long arg);
+
+ struct file *lo_backing_file;
+ struct block_device *lo_device;
+ unsigned lo_blocksize;
+
+ int old_gfp_mask;
+
+ spinlock_t lo_lock;
+ struct bio *lo_bio;
+ struct bio *lo_biotail;
+ int lo_state;
+ struct semaphore lo_sem;
+ struct mutex lo_ctl_mutex;
+ atomic_t lo_pending;
+ wait_queue_head_t lo_bh_wait;
+
+ struct request_queue *lo_queue;
+
+ const struct lu_env *lo_env;
+ struct cl_io lo_io;
+ struct ll_dio_pages lo_pvec;
+
+ /* data to handle bio for lustre. */
+ struct lo_request_data {
+ struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
+ loff_t lrd_offsets[LLOOP_MAX_SEGMENTS];
+ } lo_requests[1];
+};
+
+/*
+ * Loop flags
+ */
+enum {
+ LO_FLAGS_READ_ONLY = 1,
+};
+
+static int lloop_major;
+#define MAX_LOOP_DEFAULT 16
+static int max_loop = MAX_LOOP_DEFAULT;
+static struct lloop_device *loop_dev;
+static struct gendisk **disks;
+static struct mutex lloop_mutex;
+static void *ll_iocontrol_magic = NULL;
+
+static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
+{
+ loff_t size, offset, loopsize;
+
+ /* Compute loopsize in bytes */
+ size = i_size_read(file->f_mapping->host);
+ offset = lo->lo_offset;
+ loopsize = size - offset;
+ if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
+ loopsize = lo->lo_sizelimit;
+
+ /*
+ * Unfortunately, if we want to do I/O on the device,
+ * the number of 512-byte sectors has to fit into a sector_t.
+ */
+ return loopsize >> 9;
+}
+
+static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
+{
+ const struct lu_env *env = lo->lo_env;
+ struct cl_io *io = &lo->lo_io;
+ struct inode *inode = lo->lo_backing_file->f_dentry->d_inode;
+ struct cl_object *obj = ll_i2info(inode)->lli_clob;
+ pgoff_t offset;
+ int ret;
+ int i;
+ int rw;
+ obd_count page_count = 0;
+ struct bio_vec *bvec;
+ struct bio *bio;
+ ssize_t bytes;
+
+ struct ll_dio_pages *pvec = &lo->lo_pvec;
+ struct page **pages = pvec->ldp_pages;
+ loff_t *offsets = pvec->ldp_offsets;
+
+ truncate_inode_pages(inode->i_mapping, 0);
+
+ /* initialize the IO */
+ memset(io, 0, sizeof(*io));
+ io->ci_obj = obj;
+ ret = cl_io_init(env, io, CIT_MISC, obj);
+ if (ret)
+ return io->ci_result;
+ io->ci_lockreq = CILR_NEVER;
+
+ LASSERT(head != NULL);
+ rw = head->bi_rw;
+ for (bio = head; bio != NULL; bio = bio->bi_next) {
+ LASSERT(rw == bio->bi_rw);
+
+ offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset;
+ bio_for_each_segment(bvec, bio, i) {
+ BUG_ON(bvec->bv_offset != 0);
+ BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
+
+ pages[page_count] = bvec->bv_page;
+ offsets[page_count] = offset;
+ page_count++;
+ offset += bvec->bv_len;
+ }
+ LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
+ }
+
+ ll_stats_ops_tally(ll_i2sbi(inode),
+ (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
+ page_count);
+
+ pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
+ pvec->ldp_nr = page_count;
+
+ /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
+ * write those pages into OST. Even worse case is that more pages
+ * would be asked to write out to swap space, and then finally get here
+ * again.
+ * Unfortunately this is NOT easy to fix.
+ * Thoughts on solution:
+ * 0. Define a reserved pool for cl_pages, which could be a list of
+ * pre-allocated cl_pages;
+ * 1. Define a new operation in cl_object_operations{}, says clo_depth,
+ * which measures how many layers for this lustre object. Generally
+ * speaking, the depth would be 2, one for llite, and one for lovsub.
+ * However, for SNS, there will be more since we need additional page
+ * to store parity;
+ * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
+ * pool. Afterwards, the clio would allocate the pages from reserved
+ * pool, this guarantees we neeedn't allocate the cl_pages from
+ * generic cl_page slab cache.
+ * Of course, if there is NOT enough pages in the pool, we might
+ * be asked to write less pages once, this purely depends on
+ * implementation. Anyway, we should be careful to avoid deadlocking.
+ */
+ mutex_lock(&inode->i_mutex);
+ bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
+ mutex_unlock(&inode->i_mutex);
+ cl_io_fini(env, io);
+ return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
+}
+
+/*
+ * Add bio to back of pending list
+ */
+static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lo->lo_lock, flags);
+ if (lo->lo_biotail) {
+ lo->lo_biotail->bi_next = bio;
+ lo->lo_biotail = bio;
+ } else
+ lo->lo_bio = lo->lo_biotail = bio;
+ spin_unlock_irqrestore(&lo->lo_lock, flags);
+
+ atomic_inc(&lo->lo_pending);
+ if (waitqueue_active(&lo->lo_bh_wait))
+ wake_up(&lo->lo_bh_wait);
+}
+
+/*
+ * Grab first pending buffer
+ */
+static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
+{
+ struct bio *first;
+ struct bio **bio;
+ unsigned int count = 0;
+ unsigned int page_count = 0;
+ int rw;
+
+ spin_lock_irq(&lo->lo_lock);
+ first = lo->lo_bio;
+ if (unlikely(first == NULL)) {
+ spin_unlock_irq(&lo->lo_lock);
+ return 0;
+ }
+
+ /* TODO: need to split the bio, too bad. */
+ LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
+
+ rw = first->bi_rw;
+ bio = &lo->lo_bio;
+ while (*bio && (*bio)->bi_rw == rw) {
+ CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
+ (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size,
+ page_count, (*bio)->bi_vcnt);
+ if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
+ break;
+
+
+ page_count += (*bio)->bi_vcnt;
+ count++;
+ bio = &(*bio)->bi_next;
+ }
+ if (*bio) {
+ /* Some of bios can't be mergable. */
+ lo->lo_bio = *bio;
+ *bio = NULL;
+ } else {
+ /* Hit the end of queue */
+ lo->lo_biotail = NULL;
+ lo->lo_bio = NULL;
+ }
+ *req = first;
+ spin_unlock_irq(&lo->lo_lock);
+ return count;
+}
+
+static ll_mrf_ret
+loop_make_request(struct request_queue *q, struct bio *old_bio)
+{
+ struct lloop_device *lo = q->queuedata;
+ int rw = bio_rw(old_bio);
+ int inactive;
+
+ if (!lo)
+ goto err;
+
+ CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
+ (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
+
+ spin_lock_irq(&lo->lo_lock);
+ inactive = (lo->lo_state != LLOOP_BOUND);
+ spin_unlock_irq(&lo->lo_lock);
+ if (inactive)
+ goto err;
+
+ if (rw == WRITE) {
+ if (lo->lo_flags & LO_FLAGS_READ_ONLY)
+ goto err;
+ } else if (rw == READA) {
+ rw = READ;
+ } else if (rw != READ) {
+ CERROR("lloop: unknown command (%x)\n", rw);
+ goto err;
+ }
+ loop_add_bio(lo, old_bio);
+ LL_MRF_RETURN(0);
+err:
+ cfs_bio_io_error(old_bio, old_bio->bi_size);
+ LL_MRF_RETURN(0);
+}
+
+
+static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
+{
+ int ret;
+ ret = do_bio_lustrebacked(lo, bio);
+ while (bio) {
+ struct bio *tmp = bio->bi_next;
+ bio->bi_next = NULL;
+ cfs_bio_endio(bio, bio->bi_size, ret);
+ bio = tmp;
+ }
+}
+
+static inline int loop_active(struct lloop_device *lo)
+{
+ return atomic_read(&lo->lo_pending) ||
+ (lo->lo_state == LLOOP_RUNDOWN);
+}
+
+/*
+ * worker thread that handles reads/writes to file backed loop devices,
+ * to avoid blocking in our make_request_fn.
+ */
+static int loop_thread(void *data)
+{
+ struct lloop_device *lo = data;
+ struct bio *bio;
+ unsigned int count;
+ unsigned long times = 0;
+ unsigned long total_count = 0;
+
+ struct lu_env *env;
+ int refcheck;
+ int ret = 0;
+
+ set_user_nice(current, -20);
+
+ lo->lo_state = LLOOP_BOUND;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ GOTO(out, ret = PTR_ERR(env));
+
+ lo->lo_env = env;
+ memset(&lo->lo_pvec, 0, sizeof(lo->lo_pvec));
+ lo->lo_pvec.ldp_pages = lo->lo_requests[0].lrd_pages;
+ lo->lo_pvec.ldp_offsets = lo->lo_requests[0].lrd_offsets;
+
+ /*
+ * up sem, we are running
+ */
+ up(&lo->lo_sem);
+
+ for (;;) {
+ wait_event(lo->lo_bh_wait, loop_active(lo));
+ if (!atomic_read(&lo->lo_pending)) {
+ int exiting = 0;
+ spin_lock_irq(&lo->lo_lock);
+ exiting = (lo->lo_state == LLOOP_RUNDOWN);
+ spin_unlock_irq(&lo->lo_lock);
+ if (exiting)
+ break;
+ }
+
+ bio = NULL;
+ count = loop_get_bio(lo, &bio);
+ if (!count) {
+ CWARN("lloop(minor: %d): missing bio\n", lo->lo_number);
+ continue;
+ }
+
+ total_count += count;
+ if (total_count < count) { /* overflow */
+ total_count = count;
+ times = 1;
+ } else {
+ times++;
+ }
+ if ((times & 127) == 0) {
+ CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
+ total_count, times, total_count / times);
+ }
+
+ LASSERT(bio != NULL);
+ LASSERT(count <= atomic_read(&lo->lo_pending));
+ loop_handle_bio(lo, bio);
+ atomic_sub(count, &lo->lo_pending);
+ }
+ cl_env_put(env, &refcheck);
+
+out:
+ up(&lo->lo_sem);
+ return ret;
+}
+
+static int loop_set_fd(struct lloop_device *lo, struct file *unused,
+ struct block_device *bdev, struct file *file)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ int lo_flags = 0;
+ int error;
+ loff_t size;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ error = -EBUSY;
+ if (lo->lo_state != LLOOP_UNBOUND)
+ goto out;
+
+ mapping = file->f_mapping;
+ inode = mapping->host;
+
+ error = -EINVAL;
+ if (!S_ISREG(inode->i_mode) || inode->i_sb->s_magic != LL_SUPER_MAGIC)
+ goto out;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ lo_flags |= LO_FLAGS_READ_ONLY;
+
+ size = get_loop_size(lo, file);
+
+ if ((loff_t)(sector_t)size != size) {
+ error = -EFBIG;
+ goto out;
+ }
+
+ /* remove all pages in cache so as dirty pages not to be existent. */
+ truncate_inode_pages(mapping, 0);
+
+ set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
+
+ lo->lo_blocksize = PAGE_CACHE_SIZE;
+ lo->lo_device = bdev;
+ lo->lo_flags = lo_flags;
+ lo->lo_backing_file = file;
+ lo->ioctl = NULL;
+ lo->lo_sizelimit = 0;
+ lo->old_gfp_mask = mapping_gfp_mask(mapping);
+ mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+
+ lo->lo_bio = lo->lo_biotail = NULL;
+
+ /*
+ * set queue make_request_fn, and add limits based on lower level
+ * device
+ */
+ blk_queue_make_request(lo->lo_queue, loop_make_request);
+ lo->lo_queue->queuedata = lo;
+
+ /* queue parameters */
+ CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
+ blk_queue_logical_block_size(lo->lo_queue,
+ (unsigned short)PAGE_CACHE_SIZE);
+ blk_queue_max_hw_sectors(lo->lo_queue,
+ LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
+ blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
+
+ set_capacity(disks[lo->lo_number], size);
+ bd_set_size(bdev, size << 9);
+
+ set_blocksize(bdev, lo->lo_blocksize);
+
+ kthread_run(loop_thread, lo, "lloop%d", lo->lo_number);
+ down(&lo->lo_sem);
+ return 0;
+
+out:
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+ return error;
+}
+
+static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
+ int count)
+{
+ struct file *filp = lo->lo_backing_file;
+ int gfp = lo->old_gfp_mask;
+
+ if (lo->lo_state != LLOOP_BOUND)
+ return -ENXIO;
+
+ if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */
+ return -EBUSY;
+
+ if (filp == NULL)
+ return -EINVAL;
+
+ spin_lock_irq(&lo->lo_lock);
+ lo->lo_state = LLOOP_RUNDOWN;
+ spin_unlock_irq(&lo->lo_lock);
+ wake_up(&lo->lo_bh_wait);
+
+ down(&lo->lo_sem);
+ lo->lo_backing_file = NULL;
+ lo->ioctl = NULL;
+ lo->lo_device = NULL;
+ lo->lo_offset = 0;
+ lo->lo_sizelimit = 0;
+ lo->lo_flags = 0;
+ invalidate_bdev(bdev);
+ set_capacity(disks[lo->lo_number], 0);
+ bd_set_size(bdev, 0);
+ mapping_set_gfp_mask(filp->f_mapping, gfp);
+ lo->lo_state = LLOOP_UNBOUND;
+ fput(filp);
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+static int lo_open(struct block_device *bdev, fmode_t mode)
+{
+ struct lloop_device *lo = bdev->bd_disk->private_data;
+
+ mutex_lock(&lo->lo_ctl_mutex);
+ lo->lo_refcnt++;
+ mutex_unlock(&lo->lo_ctl_mutex);
+
+ return 0;
+}
+
+static void lo_release(struct gendisk *disk, fmode_t mode)
+{
+ struct lloop_device *lo = disk->private_data;
+
+ mutex_lock(&lo->lo_ctl_mutex);
+ --lo->lo_refcnt;
+ mutex_unlock(&lo->lo_ctl_mutex);
+}
+
+/* lloop device node's ioctl function. */
+static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct lloop_device *lo = bdev->bd_disk->private_data;
+ struct inode *inode = NULL;
+ int err = 0;
+
+ mutex_lock(&lloop_mutex);
+ switch (cmd) {
+ case LL_IOC_LLOOP_DETACH: {
+ err = loop_clr_fd(lo, bdev, 2);
+ if (err == 0)
+ blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
+ break;
+ }
+
+ case LL_IOC_LLOOP_INFO: {
+ struct lu_fid fid;
+
+ LASSERT(lo->lo_backing_file != NULL);
+ if (inode == NULL)
+ inode = lo->lo_backing_file->f_dentry->d_inode;
+ if (lo->lo_state == LLOOP_BOUND)
+ fid = ll_i2info(inode)->lli_fid;
+ else
+ fid_zero(&fid);
+
+ if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid)))
+ err = -EFAULT;
+ break;
+ }
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ mutex_unlock(&lloop_mutex);
+
+ return err;
+}
+
+static struct block_device_operations lo_fops = {
+ .owner = THIS_MODULE,
+ .open = lo_open,
+ .release = lo_release,
+ .ioctl = lo_ioctl,
+};
+
+/* dynamic iocontrol callback.
+ * This callback is registered in lloop_init and will be called by
+ * ll_iocontrol_call.
+ *
+ * This is a llite regular file ioctl function. It takes the responsibility
+ * of attaching or detaching a file by a lloop's device numner.
+ */
+static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
+ unsigned int cmd, unsigned long arg,
+ void *magic, int *rcp)
+{
+ struct lloop_device *lo = NULL;
+ struct block_device *bdev = NULL;
+ int err = 0;
+ dev_t dev;
+
+ if (magic != ll_iocontrol_magic)
+ return LLIOC_CONT;
+
+ if (disks == NULL)
+ GOTO(out1, err = -ENODEV);
+
+ CWARN("Enter llop_ioctl\n");
+
+ mutex_lock(&lloop_mutex);
+ switch (cmd) {
+ case LL_IOC_LLOOP_ATTACH: {
+ struct lloop_device *lo_free = NULL;
+ int i;
+
+ for (i = 0; i < max_loop; i++, lo = NULL) {
+ lo = &loop_dev[i];
+ if (lo->lo_state == LLOOP_UNBOUND) {
+ if (!lo_free)
+ lo_free = lo;
+ continue;
+ }
+ if (lo->lo_backing_file->f_dentry->d_inode ==
+ file->f_dentry->d_inode)
+ break;
+ }
+ if (lo || !lo_free)
+ GOTO(out, err = -EBUSY);
+
+ lo = lo_free;
+ dev = MKDEV(lloop_major, lo->lo_number);
+
+ /* quit if the used pointer is writable */
+ if (put_user((long)old_encode_dev(dev), (long*)arg))
+ GOTO(out, err = -EFAULT);
+
+ bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
+ if (IS_ERR(bdev))
+ GOTO(out, err = PTR_ERR(bdev));
+
+ get_file(file);
+ err = loop_set_fd(lo, NULL, bdev, file);
+ if (err) {
+ fput(file);
+ blkdev_put(bdev, 0);
+ }
+
+ break;
+ }
+
+ case LL_IOC_LLOOP_DETACH_BYDEV: {
+ int minor;
+
+ dev = old_decode_dev(arg);
+ if (MAJOR(dev) != lloop_major)
+ GOTO(out, err = -EINVAL);
+
+ minor = MINOR(dev);
+ if (minor > max_loop - 1)
+ GOTO(out, err = -EINVAL);
+
+ lo = &loop_dev[minor];
+ if (lo->lo_state != LLOOP_BOUND)
+ GOTO(out, err = -EINVAL);
+
+ bdev = lo->lo_device;
+ err = loop_clr_fd(lo, bdev, 1);
+ if (err == 0)
+ blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
+
+ break;
+ }
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+out:
+ mutex_unlock(&lloop_mutex);
+out1:
+ if (rcp)
+ *rcp = err;
+ return LLIOC_STOP;
+}
+
+static int __init lloop_init(void)
+{
+ int i;
+ unsigned int cmdlist[] = {
+ LL_IOC_LLOOP_ATTACH,
+ LL_IOC_LLOOP_DETACH_BYDEV,
+ };
+
+ if (max_loop < 1 || max_loop > 256) {
+ max_loop = MAX_LOOP_DEFAULT;
+ CWARN("lloop: invalid max_loop (must be between"
+ " 1 and 256), using default (%u)\n", max_loop);
+ }
+
+ lloop_major = register_blkdev(0, "lloop");
+ if (lloop_major < 0)
+ return -EIO;
+
+ CDEBUG(D_CONFIG, "registered lloop major %d with %u minors\n",
+ lloop_major, max_loop);
+
+ ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist);
+ if (ll_iocontrol_magic == NULL)
+ goto out_mem1;
+
+ OBD_ALLOC_WAIT(loop_dev, max_loop * sizeof(*loop_dev));
+ if (!loop_dev)
+ goto out_mem1;
+
+ OBD_ALLOC_WAIT(disks, max_loop * sizeof(*disks));
+ if (!disks)
+ goto out_mem2;
+
+ for (i = 0; i < max_loop; i++) {
+ disks[i] = alloc_disk(1);
+ if (!disks[i])
+ goto out_mem3;
+ }
+
+ mutex_init(&lloop_mutex);
+
+ for (i = 0; i < max_loop; i++) {
+ struct lloop_device *lo = &loop_dev[i];
+ struct gendisk *disk = disks[i];
+
+ lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
+ if (!lo->lo_queue)
+ goto out_mem4;
+
+ mutex_init(&lo->lo_ctl_mutex);
+ sema_init(&lo->lo_sem, 0);
+ init_waitqueue_head(&lo->lo_bh_wait);
+ lo->lo_number = i;
+ spin_lock_init(&lo->lo_lock);
+ disk->major = lloop_major;
+ disk->first_minor = i;
+ disk->fops = &lo_fops;
+ sprintf(disk->disk_name, "lloop%d", i);
+ disk->private_data = lo;
+ disk->queue = lo->lo_queue;
+ }
+
+ /* We cannot fail after we call this, so another loop!*/
+ for (i = 0; i < max_loop; i++)
+ add_disk(disks[i]);
+ return 0;
+
+out_mem4:
+ while (i--)
+ blk_cleanup_queue(loop_dev[i].lo_queue);
+ i = max_loop;
+out_mem3:
+ while (i--)
+ put_disk(disks[i]);
+ OBD_FREE(disks, max_loop * sizeof(*disks));
+out_mem2:
+ OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));
+out_mem1:
+ unregister_blkdev(lloop_major, "lloop");
+ ll_iocontrol_unregister(ll_iocontrol_magic);
+ CERROR("lloop: ran out of memory\n");
+ return -ENOMEM;
+}
+
+static void lloop_exit(void)
+{
+ int i;
+
+ ll_iocontrol_unregister(ll_iocontrol_magic);
+ for (i = 0; i < max_loop; i++) {
+ del_gendisk(disks[i]);
+ blk_cleanup_queue(loop_dev[i].lo_queue);
+ put_disk(disks[i]);
+ }
+
+ unregister_blkdev(lloop_major, "lloop");
+
+ OBD_FREE(disks, max_loop * sizeof(*disks));
+ OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));
+}
+
+module_init(lloop_init);
+module_exit(lloop_exit);
+
+CFS_MODULE_PARM(max_loop, "i", int, 0444, "maximum of lloop_device");
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre virtual block device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
new file mode 100644
index 0000000..d4d3c17
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -0,0 +1,1370 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <lustre_lite.h>
+#include <lprocfs_status.h>
+#include <linux/seq_file.h>
+#include <obd_support.h>
+
+#include "llite_internal.h"
+
+struct proc_dir_entry *proc_lustre_fs_root;
+
+#ifdef LPROCFS
+/* /proc/lustre/llite mount point registration */
+extern struct file_operations vvp_dump_pgcache_file_ops;
+struct file_operations ll_rw_extents_stats_fops;
+struct file_operations ll_rw_extents_stats_pp_fops;
+struct file_operations ll_rw_offset_stats_fops;
+
+static int ll_blksize_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = (struct super_block *)m->private;
+ struct obd_statfs osfs;
+ int rc;
+
+ LASSERT(sb != NULL);
+ rc = ll_statfs_internal(sb, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc)
+ rc = seq_printf(m, "%u\n", osfs.os_bsize);
+
+ return rc;
+}
+LPROC_SEQ_FOPS_RO(ll_blksize);
+
+static int ll_kbytestotal_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = (struct super_block *)m->private;
+ struct obd_statfs osfs;
+ int rc;
+
+ LASSERT(sb != NULL);
+ rc = ll_statfs_internal(sb, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_blocks;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ rc = seq_printf(m, LPU64"\n", result);
+ }
+ return rc;
+}
+LPROC_SEQ_FOPS_RO(ll_kbytestotal);
+
+static int ll_kbytesfree_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = (struct super_block *)m->private;
+ struct obd_statfs osfs;
+ int rc;
+
+ LASSERT(sb != NULL);
+ rc = ll_statfs_internal(sb, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_bfree;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ rc = seq_printf(m, LPU64"\n", result);
+ }
+ return rc;
+}
+LPROC_SEQ_FOPS_RO(ll_kbytesfree);
+
+static int ll_kbytesavail_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = (struct super_block *)m->private;
+ struct obd_statfs osfs;
+ int rc;
+
+ LASSERT(sb != NULL);
+ rc = ll_statfs_internal(sb, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_bavail;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ rc = seq_printf(m, LPU64"\n", result);
+ }
+ return rc;
+}
+LPROC_SEQ_FOPS_RO(ll_kbytesavail);
+
+static int ll_filestotal_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = (struct super_block *)m->private;
+ struct obd_statfs osfs;
+ int rc;
+
+ LASSERT(sb != NULL);
+ rc = ll_statfs_internal(sb, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc)
+ rc = seq_printf(m, LPU64"\n", osfs.os_files);
+ return rc;
+}
+LPROC_SEQ_FOPS_RO(ll_filestotal);
+
+static int ll_filesfree_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = (struct super_block *)m->private;
+ struct obd_statfs osfs;
+ int rc;
+
+ LASSERT(sb != NULL);
+ rc = ll_statfs_internal(sb, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc)
+ rc = seq_printf(m, LPU64"\n", osfs.os_ffree);
+ return rc;
+}
+LPROC_SEQ_FOPS_RO(ll_filesfree);
+
+static int ll_client_type_seq_show(struct seq_file *m, void *v)
+{
+ struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private);
+ int rc;
+
+ LASSERT(sbi != NULL);
+
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ rc = seq_printf(m, "remote client\n");
+ else
+ rc = seq_printf(m, "local client\n");
+
+ return rc;
+}
+LPROC_SEQ_FOPS_RO(ll_client_type);
+
+static int ll_fstype_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = (struct super_block *)m->private;
+
+ LASSERT(sb != NULL);
+ return seq_printf(m, "%s\n", sb->s_type->name);
+}
+LPROC_SEQ_FOPS_RO(ll_fstype);
+
+static int ll_sb_uuid_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = (struct super_block *)m->private;
+
+ LASSERT(sb != NULL);
+ return seq_printf(m, "%s\n", ll_s2sbi(sb)->ll_sb_uuid.uuid);
+}
+LPROC_SEQ_FOPS_RO(ll_sb_uuid);
+
+static int ll_site_stats_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+
+ /*
+ * See description of statistical counters in struct cl_site, and
+ * struct lu_site.
+ */
+ return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
+}
+LPROC_SEQ_FOPS_RO(ll_site_stats);
+
+static int ll_max_readahead_mb_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ long pages_number;
+ int mult;
+
+ spin_lock(&sbi->ll_lock);
+ pages_number = sbi->ll_ra_info.ra_max_pages;
+ spin_unlock(&sbi->ll_lock);
+
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ return lprocfs_seq_read_frac_helper(m, pages_number, mult);
+}
+
+static ssize_t ll_max_readahead_mb_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int mult, rc, pages_number;
+
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ if (rc)
+ return rc;
+
+ if (pages_number < 0 || pages_number > totalram_pages / 2) {
+ CERROR("can't set file readahead more than %lu MB\n",
+ totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
+ return -ERANGE;
+ }
+
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_ra_info.ra_max_pages = pages_number;
+ spin_unlock(&sbi->ll_lock);
+
+ return count;
+}
+LPROC_SEQ_FOPS(ll_max_readahead_mb);
+
+static int ll_max_readahead_per_file_mb_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ long pages_number;
+ int mult;
+
+ spin_lock(&sbi->ll_lock);
+ pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
+ spin_unlock(&sbi->ll_lock);
+
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ return lprocfs_seq_read_frac_helper(m, pages_number, mult);
+}
+
+static ssize_t ll_max_readahead_per_file_mb_seq_write(struct file *file,
+ const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int mult, rc, pages_number;
+
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ if (rc)
+ return rc;
+
+ if (pages_number < 0 ||
+ pages_number > sbi->ll_ra_info.ra_max_pages) {
+ CERROR("can't set file readahead more than"
+ "max_read_ahead_mb %lu MB\n",
+ sbi->ll_ra_info.ra_max_pages);
+ return -ERANGE;
+ }
+
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
+ spin_unlock(&sbi->ll_lock);
+
+ return count;
+}
+LPROC_SEQ_FOPS(ll_max_readahead_per_file_mb);
+
+static int ll_max_read_ahead_whole_mb_seq_show(struct seq_file *m, void *unused)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ long pages_number;
+ int mult;
+
+ spin_lock(&sbi->ll_lock);
+ pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
+ spin_unlock(&sbi->ll_lock);
+
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ return lprocfs_seq_read_frac_helper(m, pages_number, mult);
+}
+
+static ssize_t ll_max_read_ahead_whole_mb_seq_write(struct file *file,
+ const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int mult, rc, pages_number;
+
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ if (rc)
+ return rc;
+
+ /* Cap this at the current max readahead window size, the readahead
+ * algorithm does this anyway so it's pointless to set it larger. */
+ if (pages_number < 0 ||
+ pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
+ CERROR("can't set max_read_ahead_whole_mb more than "
+ "max_read_ahead_per_file_mb: %lu\n",
+ sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
+ return -ERANGE;
+ }
+
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
+ spin_unlock(&sbi->ll_lock);
+
+ return count;
+}
+LPROC_SEQ_FOPS(ll_max_read_ahead_whole_mb);
+
+static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct cl_client_cache *cache = &sbi->ll_cache;
+ int shift = 20 - PAGE_CACHE_SHIFT;
+ int max_cached_mb;
+ int unused_mb;
+
+ max_cached_mb = cache->ccc_lru_max >> shift;
+ unused_mb = atomic_read(&cache->ccc_lru_left) >> shift;
+ return seq_printf(m,
+ "users: %d\n"
+ "max_cached_mb: %d\n"
+ "used_mb: %d\n"
+ "unused_mb: %d\n"
+ "reclaim_count: %u\n",
+ atomic_read(&cache->ccc_users),
+ max_cached_mb,
+ max_cached_mb - unused_mb,
+ unused_mb,
+ cache->ccc_lru_shrinkers);
+}
+
+static ssize_t ll_max_cached_mb_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct cl_client_cache *cache = &sbi->ll_cache;
+ int mult, rc, pages_number;
+ int diff = 0;
+ int nrpages = 0;
+
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ buffer = lprocfs_find_named_value(buffer, "max_cached_mb:", &count);
+ rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ if (rc)
+ return rc;
+
+ if (pages_number < 0 || pages_number > totalram_pages) {
+ CERROR("%s: can't set max cache more than %lu MB\n",
+ ll_get_fsname(sb, NULL, 0),
+ totalram_pages >> (20 - PAGE_CACHE_SHIFT));
+ return -ERANGE;
+ }
+
+ if (sbi->ll_dt_exp == NULL)
+ return -ENODEV;
+
+ spin_lock(&sbi->ll_lock);
+ diff = pages_number - cache->ccc_lru_max;
+ spin_unlock(&sbi->ll_lock);
+
+ /* easy - add more LRU slots. */
+ if (diff >= 0) {
+ atomic_add(diff, &cache->ccc_lru_left);
+ GOTO(out, rc = 0);
+ }
+
+ diff = -diff;
+ while (diff > 0) {
+ int tmp;
+
+ /* reduce LRU budget from free slots. */
+ do {
+ int ov, nv;
+
+ ov = atomic_read(&cache->ccc_lru_left);
+ if (ov == 0)
+ break;
+
+ nv = ov > diff ? ov - diff : 0;
+ rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
+ if (likely(ov == rc)) {
+ diff -= ov - nv;
+ nrpages += ov - nv;
+ break;
+ }
+ } while (1);
+
+ if (diff <= 0)
+ break;
+
+ /* difficult - have to ask OSCs to drop LRU slots. */
+ tmp = diff << 1;
+ rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
+ sizeof(KEY_CACHE_LRU_SHRINK),
+ KEY_CACHE_LRU_SHRINK,
+ sizeof(tmp), &tmp, NULL);
+ if (rc < 0)
+ break;
+ }
+
+out:
+ if (rc >= 0) {
+ spin_lock(&sbi->ll_lock);
+ cache->ccc_lru_max = pages_number;
+ spin_unlock(&sbi->ll_lock);
+ rc = count;
+ } else {
+ atomic_add(nrpages, &cache->ccc_lru_left);
+ }
+ return rc;
+}
+LPROC_SEQ_FOPS(ll_max_cached_mb);
+
+static int ll_checksum_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+
+ return seq_printf(m, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
+}
+
+static ssize_t ll_checksum_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int val, rc;
+
+ if (!sbi->ll_dt_exp)
+ /* Not set up yet */
+ return -EAGAIN;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+ if (val)
+ sbi->ll_flags |= LL_SBI_CHECKSUM;
+ else
+ sbi->ll_flags &= ~LL_SBI_CHECKSUM;
+
+ rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
+ KEY_CHECKSUM, sizeof(val), &val, NULL);
+ if (rc)
+ CWARN("Failed to set OSC checksum flags: %d\n", rc);
+
+ return count;
+}
+LPROC_SEQ_FOPS(ll_checksum);
+
+static int ll_max_rw_chunk_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+
+ return seq_printf(m, "%lu\n", ll_s2sbi(sb)->ll_max_rw_chunk);
+}
+
+static ssize_t ll_max_rw_chunk_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ int rc, val;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+ ll_s2sbi(sb)->ll_max_rw_chunk = val;
+ return count;
+}
+LPROC_SEQ_FOPS(ll_max_rw_chunk);
+
+static int ll_rd_track_id(struct seq_file *m, enum stats_track_type type)
+{
+ struct super_block *sb = m->private;
+
+ if (ll_s2sbi(sb)->ll_stats_track_type == type) {
+ return seq_printf(m, "%d\n",
+ ll_s2sbi(sb)->ll_stats_track_id);
+
+ } else if (ll_s2sbi(sb)->ll_stats_track_type == STATS_TRACK_ALL) {
+ return seq_printf(m, "0 (all)\n");
+ } else {
+ return seq_printf(m, "untracked\n");
+ }
+}
+
+static int ll_wr_track_id(const char *buffer, unsigned long count, void *data,
+ enum stats_track_type type)
+{
+ struct super_block *sb = data;
+ int rc, pid;
+
+ rc = lprocfs_write_helper(buffer, count, &pid);
+ if (rc)
+ return rc;
+ ll_s2sbi(sb)->ll_stats_track_id = pid;
+ if (pid == 0)
+ ll_s2sbi(sb)->ll_stats_track_type = STATS_TRACK_ALL;
+ else
+ ll_s2sbi(sb)->ll_stats_track_type = type;
+ lprocfs_clear_stats(ll_s2sbi(sb)->ll_stats);
+ return count;
+}
+
+static int ll_track_pid_seq_show(struct seq_file *m, void *v)
+{
+ return ll_rd_track_id(m, STATS_TRACK_PID);
+}
+
+static ssize_t ll_track_pid_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ return ll_wr_track_id(buffer, count, seq->private, STATS_TRACK_PID);
+}
+LPROC_SEQ_FOPS(ll_track_pid);
+
+static int ll_track_ppid_seq_show(struct seq_file *m, void *v)
+{
+ return ll_rd_track_id(m, STATS_TRACK_PPID);
+}
+
+static ssize_t ll_track_ppid_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ return ll_wr_track_id(buffer, count, seq->private, STATS_TRACK_PPID);
+}
+LPROC_SEQ_FOPS(ll_track_ppid);
+
+static int ll_track_gid_seq_show(struct seq_file *m, void *v)
+{
+ return ll_rd_track_id(m, STATS_TRACK_GID);
+}
+
+static ssize_t ll_track_gid_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ return ll_wr_track_id(buffer, count, seq->private, STATS_TRACK_GID);
+}
+LPROC_SEQ_FOPS(ll_track_gid);
+
+static int ll_statahead_max_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+
+ return seq_printf(m, "%u\n", sbi->ll_sa_max);
+}
+
+static ssize_t ll_statahead_max_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ if (val >= 0 && val <= LL_SA_RPC_MAX)
+ sbi->ll_sa_max = val;
+ else
+ CERROR("Bad statahead_max value %d. Valid values are in the "
+ "range [0, %d]\n", val, LL_SA_RPC_MAX);
+
+ return count;
+}
+LPROC_SEQ_FOPS(ll_statahead_max);
+
+static int ll_statahead_agl_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+
+ return seq_printf(m, "%u\n",
+ sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
+}
+
+static ssize_t ll_statahead_agl_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ if (val)
+ sbi->ll_flags |= LL_SBI_AGL_ENABLED;
+ else
+ sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
+
+ return count;
+}
+LPROC_SEQ_FOPS(ll_statahead_agl);
+
+static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+
+ return seq_printf(m,
+ "statahead total: %u\n"
+ "statahead wrong: %u\n"
+ "agl total: %u\n",
+ atomic_read(&sbi->ll_sa_total),
+ atomic_read(&sbi->ll_sa_wrong),
+ atomic_read(&sbi->ll_agl_total));
+}
+LPROC_SEQ_FOPS_RO(ll_statahead_stats);
+
+static int ll_lazystatfs_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+
+ return seq_printf(m, "%u\n",
+ (sbi->ll_flags & LL_SBI_LAZYSTATFS) ? 1 : 0);
+}
+
+static ssize_t ll_lazystatfs_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ if (val)
+ sbi->ll_flags |= LL_SBI_LAZYSTATFS;
+ else
+ sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
+
+ return count;
+}
+LPROC_SEQ_FOPS(ll_lazystatfs);
+
+static int ll_maxea_size_seq_show(struct seq_file *m, void *v)
+{
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ unsigned int ealen;
+ int rc;
+
+ rc = ll_get_max_mdsize(sbi, &ealen);
+ if (rc)
+ return rc;
+
+ return seq_printf(m, "%u\n", ealen);
+}
+LPROC_SEQ_FOPS_RO(ll_maxea_size);
+
+static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
+{
+ const char *str[] = LL_SBI_FLAGS;
+ struct super_block *sb = m->private;
+ int flags = ll_s2sbi(sb)->ll_flags;
+ int i = 0;
+
+ while (flags != 0) {
+ if (ARRAY_SIZE(str) <= i) {
+ CERROR("%s: Revise array LL_SBI_FLAGS to match sbi "
+ "flags please.\n", ll_get_fsname(sb, NULL, 0));
+ return -EINVAL;
+ }
+
+ if (flags & 0x1)
+ seq_printf(m, "%s ", str[i]);
+ flags >>= 1;
+ ++i;
+ }
+ seq_printf(m, "\b\n");
+ return 0;
+}
+LPROC_SEQ_FOPS_RO(ll_sbi_flags);
+
+static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
+ { "uuid", &ll_sb_uuid_fops, 0, 0 },
+ //{ "mntpt_path", ll_rd_path, 0, 0 },
+ { "fstype", &ll_fstype_fops, 0, 0 },
+ { "site", &ll_site_stats_fops, 0, 0 },
+ { "blocksize", &ll_blksize_fops, 0, 0 },
+ { "kbytestotal", &ll_kbytestotal_fops, 0, 0 },
+ { "kbytesfree", &ll_kbytesfree_fops, 0, 0 },
+ { "kbytesavail", &ll_kbytesavail_fops, 0, 0 },
+ { "filestotal", &ll_filestotal_fops, 0, 0 },
+ { "filesfree", &ll_filesfree_fops, 0, 0 },
+ { "client_type", &ll_client_type_fops, 0, 0 },
+ //{ "filegroups", lprocfs_rd_filegroups, 0, 0 },
+ { "max_read_ahead_mb", &ll_max_readahead_mb_fops, 0 },
+ { "max_read_ahead_per_file_mb", &ll_max_readahead_per_file_mb_fops, 0 },
+ { "max_read_ahead_whole_mb", &ll_max_read_ahead_whole_mb_fops, 0 },
+ { "max_cached_mb", &ll_max_cached_mb_fops, 0 },
+ { "checksum_pages", &ll_checksum_fops, 0 },
+ { "max_rw_chunk", &ll_max_rw_chunk_fops, 0 },
+ { "stats_track_pid", &ll_track_pid_fops, 0 },
+ { "stats_track_ppid", &ll_track_ppid_fops, 0 },
+ { "stats_track_gid", &ll_track_gid_fops, 0 },
+ { "statahead_max", &ll_statahead_max_fops, 0 },
+ { "statahead_agl", &ll_statahead_agl_fops, 0 },
+ { "statahead_stats", &ll_statahead_stats_fops, 0, 0 },
+ { "lazystatfs", &ll_lazystatfs_fops, 0 },
+ { "max_easize", &ll_maxea_size_fops, 0, 0 },
+ { "sbi_flags", &ll_sbi_flags_fops, 0, 0 },
+ { 0 }
+};
+
+#define MAX_STRING_SIZE 128
+
+struct llite_file_opcode {
+ __u32 opcode;
+ __u32 type;
+ const char *opname;
+} llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
+ /* file operation */
+ { LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
+ { LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
+ { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ "read_bytes" },
+ { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ "write_bytes" },
+ { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
+ "brw_read" },
+ { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
+ "brw_write" },
+ { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ "osc_read" },
+ { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
+ "osc_write" },
+ { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
+ { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
+ { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
+ { LPROC_LL_MAP, LPROCFS_TYPE_REGS, "mmap" },
+ { LPROC_LL_LLSEEK, LPROCFS_TYPE_REGS, "seek" },
+ { LPROC_LL_FSYNC, LPROCFS_TYPE_REGS, "fsync" },
+ { LPROC_LL_READDIR, LPROCFS_TYPE_REGS, "readdir" },
+ /* inode operation */
+ { LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" },
+ { LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" },
+ { LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" },
+ { LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" },
+ /* dir inode operation */
+ { LPROC_LL_CREATE, LPROCFS_TYPE_REGS, "create" },
+ { LPROC_LL_LINK, LPROCFS_TYPE_REGS, "link" },
+ { LPROC_LL_UNLINK, LPROCFS_TYPE_REGS, "unlink" },
+ { LPROC_LL_SYMLINK, LPROCFS_TYPE_REGS, "symlink" },
+ { LPROC_LL_MKDIR, LPROCFS_TYPE_REGS, "mkdir" },
+ { LPROC_LL_RMDIR, LPROCFS_TYPE_REGS, "rmdir" },
+ { LPROC_LL_MKNOD, LPROCFS_TYPE_REGS, "mknod" },
+ { LPROC_LL_RENAME, LPROCFS_TYPE_REGS, "rename" },
+ /* special inode operation */
+ { LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" },
+ { LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
+ { LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
+ { LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
+ { LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
+ { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
+ { LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
+};
+
+void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
+{
+ if (!sbi->ll_stats)
+ return;
+ if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
+ lprocfs_counter_add(sbi->ll_stats, op, count);
+ else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
+ sbi->ll_stats_track_id == current->pid)
+ lprocfs_counter_add(sbi->ll_stats, op, count);
+ else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
+ sbi->ll_stats_track_id == current->parent->pid)
+ lprocfs_counter_add(sbi->ll_stats, op, count);
+ else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
+ sbi->ll_stats_track_id ==
+ from_kgid(&init_user_ns, current_gid()))
+ lprocfs_counter_add(sbi->ll_stats, op, count);
+}
+EXPORT_SYMBOL(ll_stats_ops_tally);
+
+static const char *ra_stat_string[] = {
+ [RA_STAT_HIT] = "hits",
+ [RA_STAT_MISS] = "misses",
+ [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
+ [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
+ [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
+ [RA_STAT_FAILED_MATCH] = "failed lock match",
+ [RA_STAT_DISCARDED] = "read but discarded",
+ [RA_STAT_ZERO_LEN] = "zero length file",
+ [RA_STAT_ZERO_WINDOW] = "zero size window",
+ [RA_STAT_EOF] = "read-ahead to EOF",
+ [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
+ [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
+};
+
+LPROC_SEQ_FOPS_RO_TYPE(llite, name);
+LPROC_SEQ_FOPS_RO_TYPE(llite, uuid);
+
+int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
+ struct super_block *sb, char *osc, char *mdc)
+{
+ struct lprocfs_vars lvars[2];
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ struct obd_device *obd;
+ struct proc_dir_entry *dir;
+ char name[MAX_STRING_SIZE + 1], *ptr;
+ int err, id, len, rc;
+
+ memset(lvars, 0, sizeof(lvars));
+
+ name[MAX_STRING_SIZE] = '\0';
+ lvars[0].name = name;
+
+ LASSERT(sbi != NULL);
+ LASSERT(mdc != NULL);
+ LASSERT(osc != NULL);
+
+ /* Get fsname */
+ len = strlen(lsi->lsi_lmd->lmd_profile);
+ ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
+ if (ptr && (strcmp(ptr, "-client") == 0))
+ len -= 7;
+
+ /* Mount info */
+ snprintf(name, MAX_STRING_SIZE, "%.*s-%p", len,
+ lsi->lsi_lmd->lmd_profile, sb);
+
+ sbi->ll_proc_root = lprocfs_register(name, parent, NULL, NULL);
+ if (IS_ERR(sbi->ll_proc_root)) {
+ err = PTR_ERR(sbi->ll_proc_root);
+ sbi->ll_proc_root = NULL;
+ return err;
+ }
+
+ rc = lprocfs_seq_create(sbi->ll_proc_root, "dump_page_cache", 0444,
+ &vvp_dump_pgcache_file_ops, sbi);
+ if (rc)
+ CWARN("Error adding the dump_page_cache file\n");
+
+ rc = lprocfs_seq_create(sbi->ll_proc_root, "extents_stats", 0644,
+ &ll_rw_extents_stats_fops, sbi);
+ if (rc)
+ CWARN("Error adding the extent_stats file\n");
+
+ rc = lprocfs_seq_create(sbi->ll_proc_root, "extents_stats_per_process",
+ 0644, &ll_rw_extents_stats_pp_fops, sbi);
+ if (rc)
+ CWARN("Error adding the extents_stats_per_process file\n");
+
+ rc = lprocfs_seq_create(sbi->ll_proc_root, "offset_stats", 0644,
+ &ll_rw_offset_stats_fops, sbi);
+ if (rc)
+ CWARN("Error adding the offset_stats file\n");
+
+ /* File operations stats */
+ sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
+ LPROCFS_STATS_FLAG_NONE);
+ if (sbi->ll_stats == NULL)
+ GOTO(out, err = -ENOMEM);
+ /* do counter init */
+ for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
+ __u32 type = llite_opcode_table[id].type;
+ void *ptr = NULL;
+ if (type & LPROCFS_TYPE_REGS)
+ ptr = "regs";
+ else if (type & LPROCFS_TYPE_BYTES)
+ ptr = "bytes";
+ else if (type & LPROCFS_TYPE_PAGES)
+ ptr = "pages";
+ lprocfs_counter_init(sbi->ll_stats,
+ llite_opcode_table[id].opcode,
+ (type & LPROCFS_CNTR_AVGMINMAX),
+ llite_opcode_table[id].opname, ptr);
+ }
+ err = lprocfs_register_stats(sbi->ll_proc_root, "stats", sbi->ll_stats);
+ if (err)
+ GOTO(out, err);
+
+ sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
+ LPROCFS_STATS_FLAG_NONE);
+ if (sbi->ll_ra_stats == NULL)
+ GOTO(out, err = -ENOMEM);
+
+ for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
+ lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
+ ra_stat_string[id], "pages");
+ err = lprocfs_register_stats(sbi->ll_proc_root, "read_ahead_stats",
+ sbi->ll_ra_stats);
+ if (err)
+ GOTO(out, err);
+
+
+ err = lprocfs_add_vars(sbi->ll_proc_root, lprocfs_llite_obd_vars, sb);
+ if (err)
+ GOTO(out, err);
+
+ /* MDC info */
+ obd = class_name2obd(mdc);
+
+ LASSERT(obd != NULL);
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+ LASSERT(obd->obd_type->typ_name != NULL);
+
+ dir = proc_mkdir(obd->obd_type->typ_name, sbi->ll_proc_root);
+ if (dir == NULL)
+ GOTO(out, err = -ENOMEM);
+
+ snprintf(name, MAX_STRING_SIZE, "common_name");
+ lvars[0].fops = &llite_name_fops;
+ err = lprocfs_add_vars(dir, lvars, obd);
+ if (err)
+ GOTO(out, err);
+
+ snprintf(name, MAX_STRING_SIZE, "uuid");
+ lvars[0].fops = &llite_uuid_fops;
+ err = lprocfs_add_vars(dir, lvars, obd);
+ if (err)
+ GOTO(out, err);
+
+ /* OSC */
+ obd = class_name2obd(osc);
+
+ LASSERT(obd != NULL);
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+ LASSERT(obd->obd_type->typ_name != NULL);
+
+ dir = proc_mkdir(obd->obd_type->typ_name, sbi->ll_proc_root);
+ if (dir == NULL)
+ GOTO(out, err = -ENOMEM);
+
+ snprintf(name, MAX_STRING_SIZE, "common_name");
+ lvars[0].fops = &llite_name_fops;
+ err = lprocfs_add_vars(dir, lvars, obd);
+ if (err)
+ GOTO(out, err);
+
+ snprintf(name, MAX_STRING_SIZE, "uuid");
+ lvars[0].fops = &llite_uuid_fops;
+ err = lprocfs_add_vars(dir, lvars, obd);
+out:
+ if (err) {
+ lprocfs_remove(&sbi->ll_proc_root);
+ lprocfs_free_stats(&sbi->ll_ra_stats);
+ lprocfs_free_stats(&sbi->ll_stats);
+ }
+ return err;
+}
+
+void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi)
+{
+ if (sbi->ll_proc_root) {
+ lprocfs_remove(&sbi->ll_proc_root);
+ lprocfs_free_stats(&sbi->ll_ra_stats);
+ lprocfs_free_stats(&sbi->ll_stats);
+ }
+}
+#undef MAX_STRING_SIZE
+
+#define pct(a,b) (b ? a * 100 / b : 0)
+
+static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
+ struct seq_file *seq, int which)
+{
+ unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
+ unsigned long start, end, r, w;
+ char *unitp = "KMGTPEZY";
+ int i, units = 10;
+ struct per_process_info *pp_info = &io_extents->pp_extents[which];
+
+ read_cum = 0;
+ write_cum = 0;
+ start = 0;
+
+ for(i = 0; i < LL_HIST_MAX; i++) {
+ read_tot += pp_info->pp_r_hist.oh_buckets[i];
+ write_tot += pp_info->pp_w_hist.oh_buckets[i];
+ }
+
+ for(i = 0; i < LL_HIST_MAX; i++) {
+ r = pp_info->pp_r_hist.oh_buckets[i];
+ w = pp_info->pp_w_hist.oh_buckets[i];
+ read_cum += r;
+ write_cum += w;
+ end = 1 << (i + LL_HIST_START - units);
+ seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | "
+ "%14lu %4lu %4lu\n", start, *unitp, end, *unitp,
+ (i == LL_HIST_MAX - 1) ? '+' : ' ',
+ r, pct(r, read_tot), pct(read_cum, read_tot),
+ w, pct(w, write_tot), pct(write_cum, write_tot));
+ start = end;
+ if (start == 1<<10) {
+ start = 1;
+ units += 10;
+ unitp++;
+ }
+ if (read_cum == read_tot && write_cum == write_tot)
+ break;
+ }
+}
+
+static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
+{
+ struct timeval now;
+ struct ll_sb_info *sbi = seq->private;
+ struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
+ int k;
+
+ do_gettimeofday(&now);
+
+ if (!sbi->ll_rw_stats_on) {
+ seq_printf(seq, "disabled\n"
+ "write anything in this file to activate, "
+ "then 0 or \"[D/d]isabled\" to deactivate\n");
+ return 0;
+ }
+ seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
+ now.tv_sec, now.tv_usec);
+ seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
+ seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
+ "extents", "calls", "%", "cum%",
+ "calls", "%", "cum%");
+ spin_lock(&sbi->ll_pp_extent_lock);
+ for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
+ if (io_extents->pp_extents[k].pid != 0) {
+ seq_printf(seq, "\nPID: %d\n",
+ io_extents->pp_extents[k].pid);
+ ll_display_extents_info(io_extents, seq, k);
+ }
+ }
+ spin_unlock(&sbi->ll_pp_extent_lock);
+ return 0;
+}
+
+static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
+ const char *buf, size_t len,
+ loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct ll_sb_info *sbi = seq->private;
+ struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
+ int i;
+ int value = 1, rc = 0;
+
+ rc = lprocfs_write_helper(buf, len, &value);
+ if (rc < 0 && (strcmp(buf, "disabled") == 0 ||
+ strcmp(buf, "Disabled") == 0))
+ value = 0;
+
+ if (value == 0)
+ sbi->ll_rw_stats_on = 0;
+ else
+ sbi->ll_rw_stats_on = 1;
+
+ spin_lock(&sbi->ll_pp_extent_lock);
+ for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
+ io_extents->pp_extents[i].pid = 0;
+ lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
+ lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
+ }
+ spin_unlock(&sbi->ll_pp_extent_lock);
+ return len;
+}
+
+LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
+
+static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
+{
+ struct timeval now;
+ struct ll_sb_info *sbi = seq->private;
+ struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
+
+ do_gettimeofday(&now);
+
+ if (!sbi->ll_rw_stats_on) {
+ seq_printf(seq, "disabled\n"
+ "write anything in this file to activate, "
+ "then 0 or \"[D/d]isabled\" to deactivate\n");
+ return 0;
+ }
+ seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
+ now.tv_sec, now.tv_usec);
+
+ seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
+ seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
+ "extents", "calls", "%", "cum%",
+ "calls", "%", "cum%");
+ spin_lock(&sbi->ll_lock);
+ ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
+ spin_unlock(&sbi->ll_lock);
+
+ return 0;
+}
+
+static ssize_t ll_rw_extents_stats_seq_write(struct file *file, const char *buf,
+ size_t len, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct ll_sb_info *sbi = seq->private;
+ struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
+ int i;
+ int value = 1, rc = 0;
+
+ rc = lprocfs_write_helper(buf, len, &value);
+ if (rc < 0 && (strcmp(buf, "disabled") == 0 ||
+ strcmp(buf, "Disabled") == 0))
+ value = 0;
+
+ if (value == 0)
+ sbi->ll_rw_stats_on = 0;
+ else
+ sbi->ll_rw_stats_on = 1;
+ spin_lock(&sbi->ll_pp_extent_lock);
+ for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
+ io_extents->pp_extents[i].pid = 0;
+ lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
+ lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
+ }
+ spin_unlock(&sbi->ll_pp_extent_lock);
+
+ return len;
+}
+
+LPROC_SEQ_FOPS(ll_rw_extents_stats);
+
+void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
+ struct ll_file_data *file, loff_t pos,
+ size_t count, int rw)
+{
+ int i, cur = -1;
+ struct ll_rw_process_info *process;
+ struct ll_rw_process_info *offset;
+ int *off_count = &sbi->ll_rw_offset_entry_count;
+ int *process_count = &sbi->ll_offset_process_count;
+ struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
+
+ if(!sbi->ll_rw_stats_on)
+ return;
+ process = sbi->ll_rw_process_info;
+ offset = sbi->ll_rw_offset_info;
+
+ spin_lock(&sbi->ll_pp_extent_lock);
+ /* Extent statistics */
+ for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
+ if(io_extents->pp_extents[i].pid == pid) {
+ cur = i;
+ break;
+ }
+ }
+
+ if (cur == -1) {
+ /* new process */
+ sbi->ll_extent_process_count =
+ (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
+ cur = sbi->ll_extent_process_count;
+ io_extents->pp_extents[cur].pid = pid;
+ lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
+ lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
+ }
+
+ for(i = 0; (count >= (1 << LL_HIST_START << i)) &&
+ (i < (LL_HIST_MAX - 1)); i++);
+ if (rw == 0) {
+ io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
+ io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
+ } else {
+ io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
+ io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
+ }
+ spin_unlock(&sbi->ll_pp_extent_lock);
+
+ spin_lock(&sbi->ll_process_lock);
+ /* Offset statistics */
+ for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
+ if (process[i].rw_pid == pid) {
+ if (process[i].rw_last_file != file) {
+ process[i].rw_range_start = pos;
+ process[i].rw_last_file_pos = pos + count;
+ process[i].rw_smallest_extent = count;
+ process[i].rw_largest_extent = count;
+ process[i].rw_offset = 0;
+ process[i].rw_last_file = file;
+ spin_unlock(&sbi->ll_process_lock);
+ return;
+ }
+ if (process[i].rw_last_file_pos != pos) {
+ *off_count =
+ (*off_count + 1) % LL_OFFSET_HIST_MAX;
+ offset[*off_count].rw_op = process[i].rw_op;
+ offset[*off_count].rw_pid = pid;
+ offset[*off_count].rw_range_start =
+ process[i].rw_range_start;
+ offset[*off_count].rw_range_end =
+ process[i].rw_last_file_pos;
+ offset[*off_count].rw_smallest_extent =
+ process[i].rw_smallest_extent;
+ offset[*off_count].rw_largest_extent =
+ process[i].rw_largest_extent;
+ offset[*off_count].rw_offset =
+ process[i].rw_offset;
+ process[i].rw_op = rw;
+ process[i].rw_range_start = pos;
+ process[i].rw_smallest_extent = count;
+ process[i].rw_largest_extent = count;
+ process[i].rw_offset = pos -
+ process[i].rw_last_file_pos;
+ }
+ if(process[i].rw_smallest_extent > count)
+ process[i].rw_smallest_extent = count;
+ if(process[i].rw_largest_extent < count)
+ process[i].rw_largest_extent = count;
+ process[i].rw_last_file_pos = pos + count;
+ spin_unlock(&sbi->ll_process_lock);
+ return;
+ }
+ }
+ *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
+ process[*process_count].rw_pid = pid;
+ process[*process_count].rw_op = rw;
+ process[*process_count].rw_range_start = pos;
+ process[*process_count].rw_last_file_pos = pos + count;
+ process[*process_count].rw_smallest_extent = count;
+ process[*process_count].rw_largest_extent = count;
+ process[*process_count].rw_offset = 0;
+ process[*process_count].rw_last_file = file;
+ spin_unlock(&sbi->ll_process_lock);
+}
+
+static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
+{
+ struct timeval now;
+ struct ll_sb_info *sbi = seq->private;
+ struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
+ struct ll_rw_process_info *process = sbi->ll_rw_process_info;
+ int i;
+
+ do_gettimeofday(&now);
+
+ if (!sbi->ll_rw_stats_on) {
+ seq_printf(seq, "disabled\n"
+ "write anything in this file to activate, "
+ "then 0 or \"[D/d]isabled\" to deactivate\n");
+ return 0;
+ }
+ spin_lock(&sbi->ll_process_lock);
+
+ seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
+ now.tv_sec, now.tv_usec);
+ seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
+ "R/W", "PID", "RANGE START", "RANGE END",
+ "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
+ /* We stored the discontiguous offsets here; print them first */
+ for(i = 0; i < LL_OFFSET_HIST_MAX; i++) {
+ if (offset[i].rw_pid != 0)
+ seq_printf(seq,
+ "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
+ offset[i].rw_op == READ ? 'R' : 'W',
+ offset[i].rw_pid,
+ offset[i].rw_range_start,
+ offset[i].rw_range_end,
+ (unsigned long)offset[i].rw_smallest_extent,
+ (unsigned long)offset[i].rw_largest_extent,
+ offset[i].rw_offset);
+ }
+ /* Then print the current offsets for each process */
+ for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
+ if (process[i].rw_pid != 0)
+ seq_printf(seq,
+ "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
+ process[i].rw_op == READ ? 'R' : 'W',
+ process[i].rw_pid,
+ process[i].rw_range_start,
+ process[i].rw_last_file_pos,
+ (unsigned long)process[i].rw_smallest_extent,
+ (unsigned long)process[i].rw_largest_extent,
+ process[i].rw_offset);
+ }
+ spin_unlock(&sbi->ll_process_lock);
+
+ return 0;
+}
+
+static ssize_t ll_rw_offset_stats_seq_write(struct file *file, const char *buf,
+ size_t len, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct ll_sb_info *sbi = seq->private;
+ struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
+ struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
+ int value = 1, rc = 0;
+
+ rc = lprocfs_write_helper(buf, len, &value);
+
+ if (rc < 0 && (strcmp(buf, "disabled") == 0 ||
+ strcmp(buf, "Disabled") == 0))
+ value = 0;
+
+ if (value == 0)
+ sbi->ll_rw_stats_on = 0;
+ else
+ sbi->ll_rw_stats_on = 1;
+
+ spin_lock(&sbi->ll_process_lock);
+ sbi->ll_offset_process_count = 0;
+ sbi->ll_rw_offset_entry_count = 0;
+ memset(process_info, 0, sizeof(struct ll_rw_process_info) *
+ LL_PROCESS_HIST_MAX);
+ memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
+ LL_OFFSET_HIST_MAX);
+ spin_unlock(&sbi->ll_process_lock);
+
+ return len;
+}
+
+LPROC_SEQ_FOPS(ll_rw_offset_stats);
+
+void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
+{
+ lvars->module_vars = NULL;
+ lvars->obd_vars = lprocfs_llite_obd_vars;
+}
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
new file mode 100644
index 0000000..34815b5
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -0,0 +1,1262 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/quotaops.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/security.h>
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <obd_support.h>
+#include <lustre_fid.h>
+#include <lustre_lite.h>
+#include <lustre_dlm.h>
+#include <lustre_ver.h>
+#include "llite_internal.h"
+
+static int ll_create_it(struct inode *, struct dentry *,
+ int, struct lookup_intent *);
+
+/*
+ * Check if we have something mounted at the named dchild.
+ * In such a case there would always be dentry present.
+ */
+static int ll_d_mountpoint(struct dentry *dparent, struct dentry *dchild,
+ struct qstr *name)
+{
+ int mounted = 0;
+
+ if (unlikely(dchild)) {
+ mounted = d_mountpoint(dchild);
+ } else if (dparent) {
+ dchild = d_lookup(dparent, name);
+ if (dchild) {
+ mounted = d_mountpoint(dchild);
+ dput(dchild);
+ }
+ }
+ return mounted;
+}
+
+int ll_unlock(__u32 mode, struct lustre_handle *lockh)
+{
+ ldlm_lock_decref(lockh, mode);
+
+ return 0;
+}
+
+
+/* called from iget5_locked->find_inode() under inode_lock spinlock */
+static int ll_test_inode(struct inode *inode, void *opaque)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lustre_md *md = opaque;
+
+ if (unlikely(!(md->body->valid & OBD_MD_FLID))) {
+ CERROR("MDS body missing FID\n");
+ return 0;
+ }
+
+ if (!lu_fid_eq(&lli->lli_fid, &md->body->fid1))
+ return 0;
+
+ return 1;
+}
+
+static int ll_set_inode(struct inode *inode, void *opaque)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct mdt_body *body = ((struct lustre_md *)opaque)->body;
+
+ if (unlikely(!(body->valid & OBD_MD_FLID))) {
+ CERROR("MDS body missing FID\n");
+ return -EINVAL;
+ }
+
+ lli->lli_fid = body->fid1;
+ if (unlikely(!(body->valid & OBD_MD_FLTYPE))) {
+ CERROR("Can not initialize inode "DFID" without object type: "
+ "valid = "LPX64"\n", PFID(&lli->lli_fid), body->valid);
+ return -EINVAL;
+ }
+
+ inode->i_mode = (inode->i_mode & ~S_IFMT) | (body->mode & S_IFMT);
+ if (unlikely(inode->i_mode == 0)) {
+ CERROR("Invalid inode "DFID" type\n", PFID(&lli->lli_fid));
+ return -EINVAL;
+ }
+
+ ll_lli_init(lli);
+
+ return 0;
+}
+
+
+/*
+ * Get an inode by inode number (already instantiated by the intent lookup).
+ * Returns inode or NULL
+ */
+struct inode *ll_iget(struct super_block *sb, ino_t hash,
+ struct lustre_md *md)
+{
+ struct inode *inode;
+
+ LASSERT(hash != 0);
+ inode = iget5_locked(sb, hash, ll_test_inode, ll_set_inode, md);
+
+ if (inode) {
+ if (inode->i_state & I_NEW) {
+ int rc = 0;
+
+ ll_read_inode2(inode, md);
+ if (S_ISREG(inode->i_mode) &&
+ ll_i2info(inode)->lli_clob == NULL) {
+ CDEBUG(D_INODE,
+ "%s: apply lsm %p to inode "DFID".\n",
+ ll_get_fsname(sb, NULL, 0), md->lsm,
+ PFID(ll_inode2fid(inode)));
+ rc = cl_file_inode_init(inode, md);
+ }
+ if (rc != 0) {
+ make_bad_inode(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ inode = ERR_PTR(rc);
+ } else
+ unlock_new_inode(inode);
+ } else if (!(inode->i_state & (I_FREEING | I_CLEAR)))
+ ll_update_inode(inode, md);
+ CDEBUG(D_VFSTRACE, "got inode: %p for "DFID"\n",
+ inode, PFID(&md->body->fid1));
+ }
+ return inode;
+}
+
+static void ll_invalidate_negative_children(struct inode *dir)
+{
+ struct dentry *dentry, *tmp_subdir;
+ struct ll_d_hlist_node *p;
+
+ ll_lock_dcache(dir);
+ ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_alias) {
+ spin_lock(&dentry->d_lock);
+ if (!list_empty(&dentry->d_subdirs)) {
+ struct dentry *child;
+
+ list_for_each_entry_safe(child, tmp_subdir,
+ &dentry->d_subdirs,
+ d_u.d_child) {
+ if (child->d_inode == NULL)
+ d_lustre_invalidate(child, 1);
+ }
+ }
+ spin_unlock(&dentry->d_lock);
+ }
+ ll_unlock_dcache(dir);
+}
+
+int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
+{
+ int rc;
+ struct lustre_handle lockh;
+
+ switch (flag) {
+ case LDLM_CB_BLOCKING:
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
+ if (rc < 0) {
+ CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
+ return rc;
+ }
+ break;
+ case LDLM_CB_CANCELING: {
+ struct inode *inode = ll_inode_from_resource_lock(lock);
+ struct ll_inode_info *lli;
+ __u64 bits = lock->l_policy_data.l_inodebits.bits;
+ struct lu_fid *fid;
+ ldlm_mode_t mode = lock->l_req_mode;
+
+ /* Inode is set to lock->l_resource->lr_lvb_inode
+ * for mdc - bug 24555 */
+ LASSERT(lock->l_ast_data == NULL);
+
+ /* Invalidate all dentries associated with this inode */
+ if (inode == NULL)
+ break;
+
+ LASSERT(lock->l_flags & LDLM_FL_CANCELING);
+ /* For OPEN locks we differentiate between lock modes
+ * LCK_CR, LCK_CW, LCK_PR - bug 22891 */
+ if (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE |
+ MDS_INODELOCK_LAYOUT | MDS_INODELOCK_PERM))
+ ll_have_md_lock(inode, &bits, LCK_MINMODE);
+
+ if (bits & MDS_INODELOCK_OPEN)
+ ll_have_md_lock(inode, &bits, mode);
+
+ fid = ll_inode2fid(inode);
+ if (lock->l_resource->lr_name.name[0] != fid_seq(fid) ||
+ lock->l_resource->lr_name.name[1] != fid_oid(fid) ||
+ lock->l_resource->lr_name.name[2] != fid_ver(fid)) {
+ LDLM_ERROR(lock, "data mismatch with object "
+ DFID" (%p)", PFID(fid), inode);
+ }
+
+ if (bits & MDS_INODELOCK_OPEN) {
+ int flags = 0;
+ switch (lock->l_req_mode) {
+ case LCK_CW:
+ flags = FMODE_WRITE;
+ break;
+ case LCK_PR:
+ flags = FMODE_EXEC;
+ break;
+ case LCK_CR:
+ flags = FMODE_READ;
+ break;
+ default:
+ CERROR("Unexpected lock mode for OPEN lock "
+ "%d, inode %ld\n", lock->l_req_mode,
+ inode->i_ino);
+ }
+ ll_md_real_close(inode, flags);
+ }
+
+ lli = ll_i2info(inode);
+ if (bits & MDS_INODELOCK_LAYOUT) {
+ struct cl_object_conf conf = { { 0 } };
+
+ conf.coc_opc = OBJECT_CONF_INVALIDATE;
+ conf.coc_inode = inode;
+ rc = ll_layout_conf(inode, &conf);
+ if (rc)
+ CDEBUG(D_INODE, "invaliding layout %d.\n", rc);
+ }
+
+ if (bits & MDS_INODELOCK_UPDATE) {
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
+ spin_unlock(&lli->lli_lock);
+ }
+
+ if (S_ISDIR(inode->i_mode) &&
+ (bits & MDS_INODELOCK_UPDATE)) {
+ CDEBUG(D_INODE, "invalidating inode %lu\n",
+ inode->i_ino);
+ truncate_inode_pages(inode->i_mapping, 0);
+ ll_invalidate_negative_children(inode);
+ }
+
+ if (inode->i_sb->s_root &&
+ inode != inode->i_sb->s_root->d_inode &&
+ (bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)))
+ ll_invalidate_aliases(inode);
+ iput(inode);
+ break;
+ }
+ default:
+ LBUG();
+ }
+
+ return 0;
+}
+
+__u32 ll_i2suppgid(struct inode *i)
+{
+ if (in_group_p(i->i_gid))
+ return (__u32)from_kgid(&init_user_ns, i->i_gid);
+ else
+ return (__u32)(-1);
+}
+
+/* Pack the required supplementary groups into the supplied groups array.
+ * If we don't need to use the groups from the target inode(s) then we
+ * instead pack one or more groups from the user's supplementary group
+ * array in case it might be useful. Not needed if doing an MDS-side upcall. */
+void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
+{
+#if 0
+ int i;
+#endif
+
+ LASSERT(i1 != NULL);
+ LASSERT(suppgids != NULL);
+
+ suppgids[0] = ll_i2suppgid(i1);
+
+ if (i2)
+ suppgids[1] = ll_i2suppgid(i2);
+ else
+ suppgids[1] = -1;
+
+#if 0
+ for (i = 0; i < current_ngroups; i++) {
+ if (suppgids[0] == -1) {
+ if (current_groups[i] != suppgids[1])
+ suppgids[0] = current_groups[i];
+ continue;
+ }
+ if (suppgids[1] == -1) {
+ if (current_groups[i] != suppgids[0])
+ suppgids[1] = current_groups[i];
+ continue;
+ }
+ break;
+ }
+#endif
+}
+
+/*
+ * try to reuse three types of dentry:
+ * 1. unhashed alias, this one is unhashed by d_invalidate (but it may be valid
+ * by concurrent .revalidate).
+ * 2. INVALID alias (common case for no valid ldlm lock held, but this flag may
+ * be cleared by others calling d_lustre_revalidate).
+ * 3. DISCONNECTED alias.
+ */
+static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
+{
+ struct dentry *alias, *discon_alias, *invalid_alias;
+ struct ll_d_hlist_node *p;
+
+ if (ll_d_hlist_empty(&inode->i_dentry))
+ return NULL;
+
+ discon_alias = invalid_alias = NULL;
+
+ ll_lock_dcache(inode);
+ ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
+ LASSERT(alias != dentry);
+
+ spin_lock(&alias->d_lock);
+ if (alias->d_flags & DCACHE_DISCONNECTED)
+ /* LASSERT(last_discon == NULL); LU-405, bz 20055 */
+ discon_alias = alias;
+ else if (alias->d_parent == dentry->d_parent &&
+ alias->d_name.hash == dentry->d_name.hash &&
+ alias->d_name.len == dentry->d_name.len &&
+ memcmp(alias->d_name.name, dentry->d_name.name,
+ dentry->d_name.len) == 0)
+ invalid_alias = alias;
+ spin_unlock(&alias->d_lock);
+
+ if (invalid_alias)
+ break;
+ }
+ alias = invalid_alias ?: discon_alias ?: NULL;
+ if (alias) {
+ spin_lock(&alias->d_lock);
+ dget_dlock(alias);
+ spin_unlock(&alias->d_lock);
+ }
+ ll_unlock_dcache(inode);
+
+ return alias;
+}
+
+/*
+ * Similar to d_splice_alias(), but lustre treats invalid alias
+ * similar to DCACHE_DISCONNECTED, and tries to use it anyway.
+ */
+struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
+{
+ struct dentry *new;
+
+ if (inode) {
+ new = ll_find_alias(inode, de);
+ if (new) {
+ ll_dops_init(new, 1, 1);
+ d_move(new, de);
+ iput(inode);
+ CDEBUG(D_DENTRY,
+ "Reuse dentry %p inode %p refc %d flags %#x\n",
+ new, new->d_inode, d_count(new), new->d_flags);
+ return new;
+ }
+ }
+ ll_dops_init(de, 1, 1);
+ __d_lustre_invalidate(de);
+ d_add(de, inode);
+ CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
+ de, de->d_inode, d_count(de), de->d_flags);
+ return de;
+}
+
+int ll_lookup_it_finish(struct ptlrpc_request *request,
+ struct lookup_intent *it, void *data)
+{
+ struct it_cb_data *icbd = data;
+ struct dentry **de = icbd->icbd_childp;
+ struct inode *parent = icbd->icbd_parent;
+ struct inode *inode = NULL;
+ __u64 bits = 0;
+ int rc;
+
+ /* NB 1 request reference will be taken away by ll_intent_lock()
+ * when I return */
+ CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
+ it->d.lustre.it_disposition);
+ if (!it_disposition(it, DISP_LOOKUP_NEG)) {
+ rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
+ if (rc)
+ return rc;
+
+ ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits);
+
+ /* We used to query real size from OSTs here, but actually
+ this is not needed. For stat() calls size would be updated
+ from subsequent do_revalidate()->ll_inode_revalidate_it() in
+ 2.4 and
+ vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
+ Everybody else who needs correct file size would call
+ ll_glimpse_size or some equivalent themselves anyway.
+ Also see bug 7198. */
+ }
+
+ /* Only hash *de if it is unhashed (new dentry).
+ * Atoimc_open may passin hashed dentries for open.
+ */
+ if (d_unhashed(*de))
+ *de = ll_splice_alias(inode, *de);
+
+ if (!it_disposition(it, DISP_LOOKUP_NEG)) {
+ /* we have lookup look - unhide dentry */
+ if (bits & MDS_INODELOCK_LOOKUP)
+ d_lustre_revalidate(*de);
+ } else if (!it_disposition(it, DISP_OPEN_CREATE)) {
+ /* If file created on server, don't depend on parent UPDATE
+ * lock to unhide it. It is left hidden and next lookup can
+ * find it in ll_splice_alias.
+ */
+ /* Check that parent has UPDATE lock. */
+ struct lookup_intent parent_it = {
+ .it_op = IT_GETATTR,
+ .d.lustre.it_lock_handle = 0 };
+
+ if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it,
+ &ll_i2info(parent)->lli_fid, NULL)) {
+ d_lustre_revalidate(*de);
+ ll_intent_release(&parent_it);
+ }
+ }
+
+ return 0;
+}
+
+static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
+ struct lookup_intent *it, int lookup_flags)
+{
+ struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
+ struct dentry *save = dentry, *retval;
+ struct ptlrpc_request *req = NULL;
+ struct md_op_data *op_data;
+ struct it_cb_data icbd;
+ __u32 opc;
+ int rc;
+
+ if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),intent=%s\n",
+ dentry->d_name.len, dentry->d_name.name, parent->i_ino,
+ parent->i_generation, parent, LL_IT2STR(it));
+
+ if (d_mountpoint(dentry))
+ CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
+
+ ll_frob_intent(&it, &lookup_it);
+
+ /* As do_lookup is called before follow_mount, root dentry may be left
+ * not valid, revalidate it here. */
+ if (parent->i_sb->s_root && (parent->i_sb->s_root->d_inode == parent) &&
+ (it->it_op & (IT_OPEN | IT_CREAT))) {
+ rc = ll_inode_revalidate_it(parent->i_sb->s_root, it,
+ MDS_INODELOCK_LOOKUP);
+ if (rc)
+ return ERR_PTR(rc);
+ }
+
+ if (it->it_op == IT_GETATTR) {
+ rc = ll_statahead_enter(parent, &dentry, 0);
+ if (rc == 1) {
+ if (dentry == save)
+ GOTO(out, retval = NULL);
+ GOTO(out, retval = dentry);
+ }
+ }
+
+ icbd.icbd_childp = &dentry;
+ icbd.icbd_parent = parent;
+
+ if (it->it_op & IT_CREAT ||
+ (it->it_op & IT_OPEN && it->it_create_mode & O_CREAT))
+ opc = LUSTRE_OPC_CREATE;
+ else
+ opc = LUSTRE_OPC_ANY;
+
+ op_data = ll_prep_md_op_data(NULL, parent, NULL, dentry->d_name.name,
+ dentry->d_name.len, lookup_flags, opc,
+ NULL);
+ if (IS_ERR(op_data))
+ return (void *)op_data;
+
+ /* enforce umask if acl disabled or MDS doesn't support umask */
+ if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
+ it->it_create_mode &= ~current_umask();
+
+ rc = md_intent_lock(ll_i2mdexp(parent), op_data, NULL, 0, it,
+ lookup_flags, &req, ll_md_blocking_ast, 0);
+ ll_finish_md_op_data(op_data);
+ if (rc < 0)
+ GOTO(out, retval = ERR_PTR(rc));
+
+ rc = ll_lookup_it_finish(req, it, &icbd);
+ if (rc != 0) {
+ ll_intent_release(it);
+ GOTO(out, retval = ERR_PTR(rc));
+ }
+
+ if ((it->it_op & IT_OPEN) && dentry->d_inode &&
+ !S_ISREG(dentry->d_inode->i_mode) &&
+ !S_ISDIR(dentry->d_inode->i_mode)) {
+ ll_release_openhandle(dentry, it);
+ }
+ ll_lookup_finish_locks(it, dentry);
+
+ if (dentry == save)
+ GOTO(out, retval = NULL);
+ else
+ GOTO(out, retval = dentry);
+ out:
+ if (req)
+ ptlrpc_req_finished(req);
+ if (it->it_op == IT_GETATTR && (retval == NULL || retval == dentry))
+ ll_statahead_mark(parent, dentry);
+ return retval;
+}
+
+static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
+ struct dentry *de;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),flags=%u\n",
+ dentry->d_name.len, dentry->d_name.name, parent->i_ino,
+ parent->i_generation, parent, flags);
+
+ /* Optimize away (CREATE && !OPEN). Let .create handle the race. */
+ if ((flags & LOOKUP_CREATE ) && !(flags & LOOKUP_OPEN)) {
+ ll_dops_init(dentry, 1, 1);
+ __d_lustre_invalidate(dentry);
+ d_add(dentry, NULL);
+ return NULL;
+ }
+
+ if (flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE))
+ itp = NULL;
+ else
+ itp = &it;
+ de = ll_lookup_it(parent, dentry, itp, 0);
+
+ if (itp != NULL)
+ ll_intent_release(itp);
+
+ return de;
+}
+
+/*
+ * For cached negative dentry and new dentry, handle lookup/create/open
+ * together.
+ */
+static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned open_flags,
+ umode_t mode, int *opened)
+{
+ struct lookup_intent *it;
+ struct dentry *de;
+ long long lookup_flags = LOOKUP_OPEN;
+ int rc = 0;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),file %p,"
+ "open_flags %x,mode %x opened %d\n",
+ dentry->d_name.len, dentry->d_name.name, dir->i_ino,
+ dir->i_generation, dir, file, open_flags, mode, *opened);
+
+ OBD_ALLOC(it, sizeof(*it));
+ if (!it)
+ return -ENOMEM;
+
+ it->it_op = IT_OPEN;
+ if (mode) {
+ it->it_op |= IT_CREAT;
+ lookup_flags |= LOOKUP_CREATE;
+ }
+ it->it_create_mode = (mode & S_IALLUGO) | S_IFREG;
+ it->it_flags = (open_flags & ~O_ACCMODE) | OPEN_FMODE(open_flags);
+
+ /* Dentry added to dcache tree in ll_lookup_it */
+ de = ll_lookup_it(dir, dentry, it, lookup_flags);
+ if (IS_ERR(de))
+ rc = PTR_ERR(de);
+ else if (de != NULL)
+ dentry = de;
+
+ if (!rc) {
+ if (it_disposition(it, DISP_OPEN_CREATE)) {
+ /* Dentry instantiated in ll_create_it. */
+ rc = ll_create_it(dir, dentry, mode, it);
+ if (rc) {
+ /* We dget in ll_splice_alias. */
+ if (de != NULL)
+ dput(de);
+ goto out_release;
+ }
+
+ *opened |= FILE_CREATED;
+ }
+ if (dentry->d_inode && it_disposition(it, DISP_OPEN_OPEN)) {
+ /* Open dentry. */
+ if (S_ISFIFO(dentry->d_inode->i_mode)) {
+ /* We cannot call open here as it would
+ * deadlock.
+ */
+ if (it_disposition(it, DISP_ENQ_OPEN_REF))
+ ptlrpc_req_finished(
+ (struct ptlrpc_request *)
+ it->d.lustre.it_data);
+ rc = finish_no_open(file, de);
+ } else {
+ file->private_data = it;
+ rc = finish_open(file, dentry, NULL, opened);
+ /* We dget in ll_splice_alias. finish_open takes
+ * care of dget for fd open.
+ */
+ if (de != NULL)
+ dput(de);
+ }
+ } else {
+ rc = finish_no_open(file, de);
+ }
+ }
+
+out_release:
+ ll_intent_release(it);
+ OBD_FREE(it, sizeof(*it));
+
+ return rc;
+}
+
+
+/* We depend on "mode" being set with the proper file type/umask by now */
+static struct inode *ll_create_node(struct inode *dir, const char *name,
+ int namelen, const void *data, int datalen,
+ int mode, __u64 extra,
+ struct lookup_intent *it)
+{
+ struct inode *inode = NULL;
+ struct ptlrpc_request *request = NULL;
+ struct ll_sb_info *sbi = ll_i2sbi(dir);
+ int rc;
+
+ LASSERT(it && it->d.lustre.it_disposition);
+
+ LASSERT(it_disposition(it, DISP_ENQ_CREATE_REF));
+ request = it->d.lustre.it_data;
+ it_clear_disposition(it, DISP_ENQ_CREATE_REF);
+ rc = ll_prep_inode(&inode, request, dir->i_sb, it);
+ if (rc)
+ GOTO(out, inode = ERR_PTR(rc));
+
+ LASSERT(ll_d_hlist_empty(&inode->i_dentry));
+
+ /* We asked for a lock on the directory, but were granted a
+ * lock on the inode. Since we finally have an inode pointer,
+ * stuff it in the lock. */
+ CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n",
+ inode, inode->i_ino, inode->i_generation);
+ ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
+ out:
+ ptlrpc_req_finished(request);
+ return inode;
+}
+
+/*
+ * By the time this is called, we already have created the directory cache
+ * entry for the new file, but it is so far negative - it has no inode.
+ *
+ * We defer creating the OBD object(s) until open, to keep the intent and
+ * non-intent code paths similar, and also because we do not have the MDS
+ * inode number before calling ll_create_node() (which is needed for LOV),
+ * so we would need to do yet another RPC to the MDS to store the LOV EA
+ * data on the MDS. If needed, we would pass the PACKED lmm as data and
+ * lmm_size in datalen (the MDS still has code which will handle that).
+ *
+ * If the create succeeds, we fill in the inode information
+ * with d_instantiate().
+ */
+static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
+ struct lookup_intent *it)
+{
+ struct inode *inode;
+ int rc = 0;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),intent=%s\n",
+ dentry->d_name.len, dentry->d_name.name, dir->i_ino,
+ dir->i_generation, dir, LL_IT2STR(it));
+
+ rc = it_open_error(DISP_OPEN_CREATE, it);
+ if (rc)
+ return rc;
+
+ inode = ll_create_node(dir, dentry->d_name.name, dentry->d_name.len,
+ NULL, 0, mode, 0, it);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ if (filename_is_volatile(dentry->d_name.name, dentry->d_name.len, NULL))
+ ll_i2info(inode)->lli_volatile = true;
+
+ d_instantiate(dentry, inode);
+ return 0;
+}
+
+static void ll_update_times(struct ptlrpc_request *request,
+ struct inode *inode)
+{
+ struct mdt_body *body = req_capsule_server_get(&request->rq_pill,
+ &RMF_MDT_BODY);
+
+ LASSERT(body);
+ if (body->valid & OBD_MD_FLMTIME &&
+ body->mtime > LTIME_S(inode->i_mtime)) {
+ CDEBUG(D_INODE, "setting ino %lu mtime from %lu to "LPU64"\n",
+ inode->i_ino, LTIME_S(inode->i_mtime), body->mtime);
+ LTIME_S(inode->i_mtime) = body->mtime;
+ }
+ if (body->valid & OBD_MD_FLCTIME &&
+ body->ctime > LTIME_S(inode->i_ctime))
+ LTIME_S(inode->i_ctime) = body->ctime;
+}
+
+static int ll_new_node(struct inode *dir, struct qstr *name,
+ const char *tgt, int mode, int rdev,
+ struct dentry *dchild, __u32 opc)
+{
+ struct ptlrpc_request *request = NULL;
+ struct md_op_data *op_data;
+ struct inode *inode = NULL;
+ struct ll_sb_info *sbi = ll_i2sbi(dir);
+ int tgt_len = 0;
+ int err;
+
+ if (unlikely(tgt != NULL))
+ tgt_len = strlen(tgt) + 1;
+
+ op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name,
+ name->len, 0, opc, NULL);
+ if (IS_ERR(op_data))
+ GOTO(err_exit, err = PTR_ERR(op_data));
+
+ err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode,
+ from_kuid(&init_user_ns, current_fsuid()),
+ from_kgid(&init_user_ns, current_fsgid()),
+ cfs_curproc_cap_pack(), rdev, &request);
+ ll_finish_md_op_data(op_data);
+ if (err)
+ GOTO(err_exit, err);
+
+ ll_update_times(request, dir);
+
+ if (dchild) {
+ err = ll_prep_inode(&inode, request, dchild->d_sb, NULL);
+ if (err)
+ GOTO(err_exit, err);
+
+ d_instantiate(dchild, inode);
+ }
+err_exit:
+ ptlrpc_req_finished(request);
+
+ return err;
+}
+
+static int ll_mknod_generic(struct inode *dir, struct qstr *name, int mode,
+ unsigned rdev, struct dentry *dchild)
+{
+ int err;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p) mode %o dev %x\n",
+ name->len, name->name, dir->i_ino, dir->i_generation, dir,
+ mode, rdev);
+
+ if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
+ mode &= ~current_umask();
+
+ switch (mode & S_IFMT) {
+ case 0:
+ mode |= S_IFREG; /* for mode = 0 case, fallthrough */
+ case S_IFREG:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ err = ll_new_node(dir, name, NULL, mode, rdev, dchild,
+ LUSTRE_OPC_MKNOD);
+ break;
+ case S_IFDIR:
+ err = -EPERM;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKNOD, 1);
+
+ return err;
+}
+
+/*
+ * Plain create. Intent create is handled in atomic_open.
+ */
+static int ll_create_nd(struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool want_excl)
+{
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),"
+ "flags=%u, excl=%d\n",
+ dentry->d_name.len, dentry->d_name.name, dir->i_ino,
+ dir->i_generation, dir, mode, want_excl);
+
+ rc = ll_mknod_generic(dir, &dentry->d_name, mode, 0, dentry);
+
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_CREATE, 1);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s, unhashed %d\n",
+ dentry->d_name.len, dentry->d_name.name, d_unhashed(dentry));
+
+ return rc;
+}
+
+static int ll_symlink_generic(struct inode *dir, struct qstr *name,
+ const char *tgt, struct dentry *dchild)
+{
+ int err;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),target=%.*s\n",
+ name->len, name->name, dir->i_ino, dir->i_generation,
+ dir, 3000, tgt);
+
+ err = ll_new_node(dir, name, (char *)tgt, S_IFLNK | S_IRWXUGO,
+ 0, dchild, LUSTRE_OPC_SYMLINK);
+
+ if (!err)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1);
+
+ return err;
+}
+
+static int ll_link_generic(struct inode *src, struct inode *dir,
+ struct qstr *name, struct dentry *dchild)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(dir);
+ struct ptlrpc_request *request = NULL;
+ struct md_op_data *op_data;
+ int err;
+
+ CDEBUG(D_VFSTRACE,
+ "VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%.*s\n",
+ src->i_ino, src->i_generation, src, dir->i_ino,
+ dir->i_generation, dir, name->len, name->name);
+
+ op_data = ll_prep_md_op_data(NULL, src, dir, name->name, name->len,
+ 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ err = md_link(sbi->ll_md_exp, op_data, &request);
+ ll_finish_md_op_data(op_data);
+ if (err)
+ GOTO(out, err);
+
+ ll_update_times(request, dir);
+ ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1);
+out:
+ ptlrpc_req_finished(request);
+ return err;
+}
+
+static int ll_mkdir_generic(struct inode *dir, struct qstr *name,
+ int mode, struct dentry *dchild)
+
+{
+ int err;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
+ name->len, name->name, dir->i_ino, dir->i_generation, dir);
+
+ if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
+ mode &= ~current_umask();
+ mode = (mode & (S_IRWXUGO|S_ISVTX)) | S_IFDIR;
+ err = ll_new_node(dir, name, NULL, mode, 0, dchild, LUSTRE_OPC_MKDIR);
+
+ if (!err)
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
+
+ return err;
+}
+
+/* Try to find the child dentry by its name.
+ If found, put the result fid into @fid. */
+static void ll_get_child_fid(struct inode * dir, struct qstr *name,
+ struct lu_fid *fid)
+{
+ struct dentry *parent, *child;
+
+ parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_alias);
+ child = d_lookup(parent, name);
+ if (child) {
+ if (child->d_inode)
+ *fid = *ll_inode2fid(child->d_inode);
+ dput(child);
+ }
+}
+
+static int ll_rmdir_generic(struct inode *dir, struct dentry *dparent,
+ struct dentry *dchild, struct qstr *name)
+{
+ struct ptlrpc_request *request = NULL;
+ struct md_op_data *op_data;
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
+ name->len, name->name, dir->i_ino, dir->i_generation, dir);
+
+ if (unlikely(ll_d_mountpoint(dparent, dchild, name)))
+ return -EBUSY;
+
+ op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name, name->len,
+ S_IFDIR, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ ll_get_child_fid(dir, name, &op_data->op_fid3);
+ op_data->op_fid2 = op_data->op_fid3;
+ rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
+ ll_finish_md_op_data(op_data);
+ if (rc == 0) {
+ ll_update_times(request, dir);
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
+ }
+
+ ptlrpc_req_finished(request);
+ return rc;
+}
+
+/**
+ * Remove dir entry
+ **/
+int ll_rmdir_entry(struct inode *dir, char *name, int namelen)
+{
+ struct ptlrpc_request *request = NULL;
+ struct md_op_data *op_data;
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
+ namelen, name, dir->i_ino, dir->i_generation, dir);
+
+ op_data = ll_prep_md_op_data(NULL, dir, NULL, name, strlen(name),
+ S_IFDIR, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+ op_data->op_cli_flags |= CLI_RM_ENTRY;
+ rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
+ ll_finish_md_op_data(op_data);
+ if (rc == 0) {
+ ll_update_times(request, dir);
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_RMDIR, 1);
+ }
+
+ ptlrpc_req_finished(request);
+ return rc;
+}
+
+int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
+{
+ struct mdt_body *body;
+ struct lov_mds_md *eadata;
+ struct lov_stripe_md *lsm = NULL;
+ struct obd_trans_info oti = { 0 };
+ struct obdo *oa;
+ struct obd_capa *oc = NULL;
+ int rc;
+
+ /* req is swabbed so this is safe */
+ body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
+ if (!(body->valid & OBD_MD_FLEASIZE))
+ return 0;
+
+ if (body->eadatasize == 0) {
+ CERROR("OBD_MD_FLEASIZE set but eadatasize zero\n");
+ GOTO(out, rc = -EPROTO);
+ }
+
+ /* The MDS sent back the EA because we unlinked the last reference
+ * to this file. Use this EA to unlink the objects on the OST.
+ * It's opaque so we don't swab here; we leave it to obd_unpackmd() to
+ * check it is complete and sensible. */
+ eadata = req_capsule_server_sized_get(&request->rq_pill, &RMF_MDT_MD,
+ body->eadatasize);
+ LASSERT(eadata != NULL);
+
+ rc = obd_unpackmd(ll_i2dtexp(dir), &lsm, eadata, body->eadatasize);
+ if (rc < 0) {
+ CERROR("obd_unpackmd: %d\n", rc);
+ GOTO(out, rc);
+ }
+ LASSERT(rc >= sizeof(*lsm));
+
+ OBDO_ALLOC(oa);
+ if (oa == NULL)
+ GOTO(out_free_memmd, rc = -ENOMEM);
+
+ oa->o_oi = lsm->lsm_oi;
+ oa->o_mode = body->mode & S_IFMT;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE | OBD_MD_FLGROUP;
+
+ if (body->valid & OBD_MD_FLCOOKIE) {
+ oa->o_valid |= OBD_MD_FLCOOKIE;
+ oti.oti_logcookies =
+ req_capsule_server_sized_get(&request->rq_pill,
+ &RMF_LOGCOOKIES,
+ sizeof(struct llog_cookie) *
+ lsm->lsm_stripe_count);
+ if (oti.oti_logcookies == NULL) {
+ oa->o_valid &= ~OBD_MD_FLCOOKIE;
+ body->valid &= ~OBD_MD_FLCOOKIE;
+ }
+ }
+
+ if (body->valid & OBD_MD_FLOSSCAPA) {
+ rc = md_unpack_capa(ll_i2mdexp(dir), request, &RMF_CAPA2, &oc);
+ if (rc)
+ GOTO(out_free_memmd, rc);
+ }
+
+ rc = obd_destroy(NULL, ll_i2dtexp(dir), oa, lsm, &oti,
+ ll_i2mdexp(dir), oc);
+ capa_put(oc);
+ if (rc)
+ CERROR("obd destroy objid "DOSTID" error %d\n",
+ POSTID(&lsm->lsm_oi), rc);
+out_free_memmd:
+ obd_free_memmd(ll_i2dtexp(dir), &lsm);
+ OBDO_FREE(oa);
+out:
+ return rc;
+}
+
+/* ll_unlink_generic() doesn't update the inode with the new link count.
+ * Instead, ll_ddelete() and ll_d_iput() will update it based upon if there
+ * is any lock existing. They will recycle dentries and inodes based upon locks
+ * too. b=20433 */
+static int ll_unlink_generic(struct inode *dir, struct dentry *dparent,
+ struct dentry *dchild, struct qstr *name)
+{
+ struct ptlrpc_request *request = NULL;
+ struct md_op_data *op_data;
+ int rc;
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
+ name->len, name->name, dir->i_ino, dir->i_generation, dir);
+
+ /*
+ * XXX: unlink bind mountpoint maybe call to here,
+ * just check it as vfs_unlink does.
+ */
+ if (unlikely(ll_d_mountpoint(dparent, dchild, name)))
+ return -EBUSY;
+
+ op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name,
+ name->len, 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ ll_get_child_fid(dir, name, &op_data->op_fid3);
+ op_data->op_fid2 = op_data->op_fid3;
+ rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
+ ll_finish_md_op_data(op_data);
+ if (rc)
+ GOTO(out, rc);
+
+ ll_update_times(request, dir);
+ ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_UNLINK, 1);
+
+ rc = ll_objects_destroy(request, dir);
+ out:
+ ptlrpc_req_finished(request);
+ return rc;
+}
+
+static int ll_rename_generic(struct inode *src, struct dentry *src_dparent,
+ struct dentry *src_dchild, struct qstr *src_name,
+ struct inode *tgt, struct dentry *tgt_dparent,
+ struct dentry *tgt_dchild, struct qstr *tgt_name)
+{
+ struct ptlrpc_request *request = NULL;
+ struct ll_sb_info *sbi = ll_i2sbi(src);
+ struct md_op_data *op_data;
+ int err;
+
+ CDEBUG(D_VFSTRACE,"VFS Op:oldname=%.*s,src_dir=%lu/%u(%p),newname=%.*s,"
+ "tgt_dir=%lu/%u(%p)\n", src_name->len, src_name->name,
+ src->i_ino, src->i_generation, src, tgt_name->len,
+ tgt_name->name, tgt->i_ino, tgt->i_generation, tgt);
+
+ if (unlikely(ll_d_mountpoint(src_dparent, src_dchild, src_name) ||
+ ll_d_mountpoint(tgt_dparent, tgt_dchild, tgt_name)))
+ return -EBUSY;
+
+ op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ ll_get_child_fid(src, src_name, &op_data->op_fid3);
+ ll_get_child_fid(tgt, tgt_name, &op_data->op_fid4);
+ err = md_rename(sbi->ll_md_exp, op_data,
+ src_name->name, src_name->len,
+ tgt_name->name, tgt_name->len, &request);
+ ll_finish_md_op_data(op_data);
+ if (!err) {
+ ll_update_times(request, src);
+ ll_update_times(request, tgt);
+ ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
+ err = ll_objects_destroy(request, src);
+ }
+
+ ptlrpc_req_finished(request);
+
+ return err;
+}
+
+static int ll_mknod(struct inode *dir, struct dentry *dchild, ll_umode_t mode,
+ dev_t rdev)
+{
+ return ll_mknod_generic(dir, &dchild->d_name, mode,
+ old_encode_dev(rdev), dchild);
+}
+
+static int ll_unlink(struct inode * dir, struct dentry *dentry)
+{
+ return ll_unlink_generic(dir, NULL, dentry, &dentry->d_name);
+}
+
+static int ll_mkdir(struct inode *dir, struct dentry *dentry, ll_umode_t mode)
+{
+ return ll_mkdir_generic(dir, &dentry->d_name, mode, dentry);
+}
+
+static int ll_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ return ll_rmdir_generic(dir, NULL, dentry, &dentry->d_name);
+}
+
+static int ll_symlink(struct inode *dir, struct dentry *dentry,
+ const char *oldname)
+{
+ return ll_symlink_generic(dir, &dentry->d_name, oldname, dentry);
+}
+
+static int ll_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry)
+{
+ return ll_link_generic(old_dentry->d_inode, dir, &new_dentry->d_name,
+ new_dentry);
+}
+
+static int ll_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ int err;
+ err = ll_rename_generic(old_dir, NULL,
+ old_dentry, &old_dentry->d_name,
+ new_dir, NULL, new_dentry,
+ &new_dentry->d_name);
+ if (!err) {
+ d_move(old_dentry, new_dentry);
+ }
+ return err;
+}
+
+struct inode_operations ll_dir_inode_operations = {
+ .mknod = ll_mknod,
+ .atomic_open = ll_atomic_open,
+ .lookup = ll_lookup_nd,
+ .create = ll_create_nd,
+ /* We need all these non-raw things for NFSD, to not patch it. */
+ .unlink = ll_unlink,
+ .mkdir = ll_mkdir,
+ .rmdir = ll_rmdir,
+ .symlink = ll_symlink,
+ .link = ll_link,
+ .rename = ll_rename,
+ .setattr = ll_setattr,
+ .getattr = ll_getattr,
+ .permission = ll_inode_permission,
+ .setxattr = ll_setxattr,
+ .getxattr = ll_getxattr,
+ .listxattr = ll_listxattr,
+ .removexattr = ll_removexattr,
+ .get_acl = ll_get_acl,
+};
+
+struct inode_operations ll_special_inode_operations = {
+ .setattr = ll_setattr,
+ .getattr = ll_getattr,
+ .permission = ll_inode_permission,
+ .setxattr = ll_setxattr,
+ .getxattr = ll_getxattr,
+ .listxattr = ll_listxattr,
+ .removexattr = ll_removexattr,
+ .get_acl = ll_get_acl,
+};
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
new file mode 100644
index 0000000..dedd56a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/remote_perm.c
@@ -0,0 +1,330 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/remote_perm.c
+ *
+ * Lustre Permission Cache for Remote Client
+ *
+ * Author: Lai Siyao <lsy@clusterfs.com>
+ * Author: Fan Yong <fanyong@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <lustre_lite.h>
+#include <lustre_ha.h>
+#include <lustre_dlm.h>
+#include <lprocfs_status.h>
+#include <lustre_disk.h>
+#include <lustre_param.h>
+#include "llite_internal.h"
+
+struct kmem_cache *ll_remote_perm_cachep = NULL;
+struct kmem_cache *ll_rmtperm_hash_cachep = NULL;
+
+static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
+{
+ struct ll_remote_perm *lrp;
+
+ OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, GFP_KERNEL);
+ if (lrp)
+ INIT_HLIST_NODE(&lrp->lrp_list);
+ return lrp;
+}
+
+static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
+{
+ if (!lrp)
+ return;
+
+ if (!hlist_unhashed(&lrp->lrp_list))
+ hlist_del(&lrp->lrp_list);
+ OBD_SLAB_FREE(lrp, ll_remote_perm_cachep, sizeof(*lrp));
+}
+
+struct hlist_head *alloc_rmtperm_hash(void)
+{
+ struct hlist_head *hash;
+ int i;
+
+ OBD_SLAB_ALLOC_GFP(hash, ll_rmtperm_hash_cachep,
+ REMOTE_PERM_HASHSIZE * sizeof(*hash),
+ GFP_IOFS);
+ if (!hash)
+ return NULL;
+
+ for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
+ INIT_HLIST_HEAD(hash + i);
+
+ return hash;
+}
+
+void free_rmtperm_hash(struct hlist_head *hash)
+{
+ int i;
+ struct ll_remote_perm *lrp;
+ struct hlist_node *next;
+
+ if(!hash)
+ return;
+
+ for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
+ hlist_for_each_entry_safe(lrp, next, hash + i,
+ lrp_list)
+ free_ll_remote_perm(lrp);
+ OBD_SLAB_FREE(hash, ll_rmtperm_hash_cachep,
+ REMOTE_PERM_HASHSIZE * sizeof(*hash));
+}
+
+static inline int remote_perm_hashfunc(uid_t uid)
+{
+ return uid & (REMOTE_PERM_HASHSIZE - 1);
+}
+
+/* NB: setxid permission is not checked here, instead it's done on
+ * MDT when client get remote permission. */
+static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
+{
+ struct hlist_head *head;
+ struct ll_remote_perm *lrp;
+ int found = 0, rc;
+
+ if (!lli->lli_remote_perms)
+ return -ENOENT;
+
+ head = lli->lli_remote_perms +
+ remote_perm_hashfunc(from_kuid(&init_user_ns, current_uid()));
+
+ spin_lock(&lli->lli_lock);
+ hlist_for_each_entry(lrp, head, lrp_list) {
+ if (lrp->lrp_uid != from_kuid(&init_user_ns, current_uid()))
+ continue;
+ if (lrp->lrp_gid != from_kgid(&init_user_ns, current_gid()))
+ continue;
+ if (lrp->lrp_fsuid != from_kuid(&init_user_ns, current_fsuid()))
+ continue;
+ if (lrp->lrp_fsgid != from_kgid(&init_user_ns, current_fsgid()))
+ continue;
+ found = 1;
+ break;
+ }
+
+ if (!found)
+ GOTO(out, rc = -ENOENT);
+
+ CDEBUG(D_SEC, "found remote perm: %u/%u/%u/%u - %#x\n",
+ lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
+ lrp->lrp_access_perm);
+ rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;
+
+out:
+ spin_unlock(&lli->lli_lock);
+ return rc;
+}
+
+int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_remote_perm *lrp = NULL, *tmp = NULL;
+ struct hlist_head *head, *perm_hash = NULL;
+
+ LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
+
+#if 0
+ if (perm->rp_uid != current->uid ||
+ perm->rp_gid != current->gid ||
+ perm->rp_fsuid != current->fsuid ||
+ perm->rp_fsgid != current->fsgid) {
+ /* user might setxid in this small period */
+ CDEBUG(D_SEC,
+ "remote perm user %u/%u/%u/%u != current %u/%u/%u/%u\n",
+ perm->rp_uid, perm->rp_gid, perm->rp_fsuid,
+ perm->rp_fsgid, current->uid, current->gid,
+ current->fsuid, current->fsgid);
+ return -EAGAIN;
+ }
+#endif
+
+ if (!lli->lli_remote_perms) {
+ perm_hash = alloc_rmtperm_hash();
+ if (perm_hash == NULL) {
+ CERROR("alloc lli_remote_perms failed!\n");
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock(&lli->lli_lock);
+
+ if (!lli->lli_remote_perms)
+ lli->lli_remote_perms = perm_hash;
+ else if (perm_hash)
+ free_rmtperm_hash(perm_hash);
+
+ head = lli->lli_remote_perms + remote_perm_hashfunc(perm->rp_uid);
+
+again:
+ hlist_for_each_entry(tmp, head, lrp_list) {
+ if (tmp->lrp_uid != perm->rp_uid)
+ continue;
+ if (tmp->lrp_gid != perm->rp_gid)
+ continue;
+ if (tmp->lrp_fsuid != perm->rp_fsuid)
+ continue;
+ if (tmp->lrp_fsgid != perm->rp_fsgid)
+ continue;
+ if (lrp)
+ free_ll_remote_perm(lrp);
+ lrp = tmp;
+ break;
+ }
+
+ if (!lrp) {
+ spin_unlock(&lli->lli_lock);
+ lrp = alloc_ll_remote_perm();
+ if (!lrp) {
+ CERROR("alloc memory for ll_remote_perm failed!\n");
+ return -ENOMEM;
+ }
+ spin_lock(&lli->lli_lock);
+ goto again;
+ }
+
+ lrp->lrp_access_perm = perm->rp_access_perm;
+ if (lrp != tmp) {
+ lrp->lrp_uid = perm->rp_uid;
+ lrp->lrp_gid = perm->rp_gid;
+ lrp->lrp_fsuid = perm->rp_fsuid;
+ lrp->lrp_fsgid = perm->rp_fsgid;
+ hlist_add_head(&lrp->lrp_list, head);
+ }
+ lli->lli_rmtperm_time = cfs_time_current();
+ spin_unlock(&lli->lli_lock);
+
+ CDEBUG(D_SEC, "new remote perm@%p: %u/%u/%u/%u - %#x\n",
+ lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
+ lrp->lrp_access_perm);
+
+ return 0;
+}
+
+int lustre_check_remote_perm(struct inode *inode, int mask)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req = NULL;
+ struct mdt_remote_perm *perm;
+ struct obd_capa *oc;
+ cfs_time_t save;
+ int i = 0, rc;
+
+ do {
+ save = lli->lli_rmtperm_time;
+ rc = do_check_remote_perm(lli, mask);
+ if (!rc || (rc != -ENOENT && i))
+ break;
+
+ might_sleep();
+
+ mutex_lock(&lli->lli_rmtperm_mutex);
+ /* check again */
+ if (save != lli->lli_rmtperm_time) {
+ rc = do_check_remote_perm(lli, mask);
+ if (!rc || (rc != -ENOENT && i)) {
+ mutex_unlock(&lli->lli_rmtperm_mutex);
+ break;
+ }
+ }
+
+ if (i++ > 5) {
+ CERROR("check remote perm falls in dead loop!\n");
+ LBUG();
+ }
+
+ oc = ll_mdscapa_get(inode);
+ rc = md_get_remote_perm(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ ll_i2suppgid(inode), &req);
+ capa_put(oc);
+ if (rc) {
+ mutex_unlock(&lli->lli_rmtperm_mutex);
+ break;
+ }
+
+ perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
+ lustre_swab_mdt_remote_perm);
+ if (unlikely(perm == NULL)) {
+ mutex_unlock(&lli->lli_rmtperm_mutex);
+ rc = -EPROTO;
+ break;
+ }
+
+ rc = ll_update_remote_perm(inode, perm);
+ mutex_unlock(&lli->lli_rmtperm_mutex);
+ if (rc == -ENOMEM)
+ break;
+
+ ptlrpc_req_finished(req);
+ req = NULL;
+ } while (1);
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+#if 0 /* NB: remote perms can't be freed in ll_mdc_blocking_ast of UPDATE lock,
+ * because it will fail sanity test 48.
+ */
+void ll_free_remote_perms(struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct hlist_head *hash = lli->lli_remote_perms;
+ struct ll_remote_perm *lrp;
+ struct hlist_node *node, *next;
+ int i;
+
+ LASSERT(hash);
+
+ spin_lock(&lli->lli_lock);
+
+ for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
+ hlist_for_each_entry_safe(lrp, node, next, hash + i,
+ lrp_list)
+ free_ll_remote_perm(lrp);
+ }
+
+ spin_unlock(&lli->lli_lock);
+}
+#endif
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
new file mode 100644
index 0000000..ae0dc44
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -0,0 +1,1298 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/llite/rw.c
+ *
+ * Lustre Lite I/O page cache routines shared by different kernel revs
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/writeback.h>
+#include <asm/uaccess.h>
+
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+/* current_is_kswapd() */
+#include <linux/swap.h>
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <lustre_lite.h>
+#include <obd_cksum.h>
+#include "llite_internal.h"
+#include <linux/lustre_compat25.h>
+
+/**
+ * Finalizes cl-data before exiting typical address_space operation. Dual to
+ * ll_cl_init().
+ */
+static void ll_cl_fini(struct ll_cl_context *lcc)
+{
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
+
+ LASSERT(lcc->lcc_cookie == current);
+ LASSERT(env != NULL);
+
+ if (page != NULL) {
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+
+ if (io && lcc->lcc_created) {
+ cl_io_end(env, io);
+ cl_io_unlock(env, io);
+ cl_io_iter_fini(env, io);
+ cl_io_fini(env, io);
+ }
+ cl_env_put(env, &lcc->lcc_refcheck);
+}
+
+/**
+ * Initializes common cl-data at the typical address_space operation entry
+ * point.
+ */
+static struct ll_cl_context *ll_cl_init(struct file *file,
+ struct page *vmpage, int create)
+{
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_object *clob;
+ struct ccc_io *cio;
+
+ int refcheck;
+ int result = 0;
+
+ clob = ll_i2info(vmpage->mapping->host)->lli_clob;
+ LASSERT(clob != NULL);
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return ERR_CAST(env);
+
+ lcc = &vvp_env_info(env)->vti_io_ctx;
+ memset(lcc, 0, sizeof(*lcc));
+ lcc->lcc_env = env;
+ lcc->lcc_refcheck = refcheck;
+ lcc->lcc_cookie = current;
+
+ cio = ccc_env_io(env);
+ io = cio->cui_cl.cis_io;
+ if (io == NULL && create) {
+ struct inode *inode = vmpage->mapping->host;
+ loff_t pos;
+
+ if (mutex_trylock(&inode->i_mutex)) {
+ mutex_unlock(&(inode)->i_mutex);
+
+ /* this is too bad. Someone is trying to write the
+ * page w/o holding inode mutex. This means we can
+ * add dirty pages into cache during truncate */
+ CERROR("Proc %s is dirting page w/o inode lock, this"
+ "will break truncate.\n", current->comm);
+ dump_stack();
+ LBUG();
+ return ERR_PTR(-EIO);
+ }
+
+ /*
+ * Loop-back driver calls ->prepare_write() and ->sendfile()
+ * methods directly, bypassing file system ->write() operation,
+ * so cl_io has to be created here.
+ */
+ io = ccc_env_thread_io(env);
+ ll_io_init(io, file, 1);
+
+ /* No lock at all for this kind of IO - we can't do it because
+ * we have held page lock, it would cause deadlock.
+ * XXX: This causes poor performance to loop device - One page
+ * per RPC.
+ * In order to get better performance, users should use
+ * lloop driver instead.
+ */
+ io->ci_lockreq = CILR_NEVER;
+
+ pos = (vmpage->index << PAGE_CACHE_SHIFT);
+
+ /* Create a temp IO to serve write. */
+ result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
+ if (result == 0) {
+ cio->cui_fd = LUSTRE_FPRIVATE(file);
+ cio->cui_iov = NULL;
+ cio->cui_nrsegs = 0;
+ result = cl_io_iter_init(env, io);
+ if (result == 0) {
+ result = cl_io_lock(env, io);
+ if (result == 0)
+ result = cl_io_start(env, io);
+ }
+ } else
+ result = io->ci_result;
+ lcc->lcc_created = 1;
+ }
+
+ lcc->lcc_io = io;
+ if (io == NULL)
+ result = -EIO;
+ if (result == 0) {
+ struct cl_page *page;
+
+ LASSERT(io != NULL);
+ LASSERT(io->ci_state == CIS_IO_GOING);
+ LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
+ page = cl_page_find(env, clob, vmpage->index, vmpage,
+ CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ lcc->lcc_page = page;
+ lu_ref_add(&page->cp_reference, "cl_io", io);
+ result = 0;
+ } else
+ result = PTR_ERR(page);
+ }
+ if (result) {
+ ll_cl_fini(lcc);
+ lcc = ERR_PTR(result);
+ }
+
+ CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
+ vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
+ env, io);
+ return lcc;
+}
+
+static struct ll_cl_context *ll_cl_get(void)
+{
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ LASSERT(!IS_ERR(env));
+ lcc = &vvp_env_info(env)->vti_io_ctx;
+ LASSERT(env == lcc->lcc_env);
+ LASSERT(current == lcc->lcc_cookie);
+ cl_env_put(env, &refcheck);
+
+ /* env has got in ll_cl_init, so it is still usable. */
+ return lcc;
+}
+
+/**
+ * ->prepare_write() address space operation called by generic_file_write()
+ * for every page during write.
+ */
+int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
+ unsigned to)
+{
+ struct ll_cl_context *lcc;
+ int result;
+
+ lcc = ll_cl_init(file, vmpage, 1);
+ if (!IS_ERR(lcc)) {
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
+
+ cl_page_assume(env, io, page);
+
+ result = cl_io_prepare_write(env, io, page, from, to);
+ if (result == 0) {
+ /*
+ * Add a reference, so that page is not evicted from
+ * the cache until ->commit_write() is called.
+ */
+ cl_page_get(page);
+ lu_ref_add(&page->cp_reference, "prepare_write",
+ current);
+ } else {
+ cl_page_unassume(env, io, page);
+ ll_cl_fini(lcc);
+ }
+ /* returning 0 in prepare assumes commit must be called
+ * afterwards */
+ } else {
+ result = PTR_ERR(lcc);
+ }
+ return result;
+}
+
+int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
+ unsigned to)
+{
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ int result = 0;
+
+ lcc = ll_cl_get();
+ env = lcc->lcc_env;
+ page = lcc->lcc_page;
+ io = lcc->lcc_io;
+
+ LASSERT(cl_page_is_owned(page, io));
+ LASSERT(from <= to);
+ if (from != to) /* handle short write case. */
+ result = cl_io_commit_write(env, io, page, from, to);
+ if (cl_page_is_owned(page, io))
+ cl_page_unassume(env, io, page);
+
+ /*
+ * Release reference acquired by ll_prepare_write().
+ */
+ lu_ref_del(&page->cp_reference, "prepare_write", current);
+ cl_page_put(env, page);
+ ll_cl_fini(lcc);
+ return result;
+}
+
+struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt)
+{
+ __u64 opc;
+
+ opc = crt == CRT_WRITE ? CAPA_OPC_OSS_WRITE : CAPA_OPC_OSS_RW;
+ return ll_osscapa_get(inode, opc);
+}
+
+static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
+
+/**
+ * Get readahead pages from the filesystem readahead pool of the client for a
+ * thread.
+ *
+ * /param sbi superblock for filesystem readahead state ll_ra_info
+ * /param ria per-thread readahead state
+ * /param pages number of pages requested for readahead for the thread.
+ *
+ * WARNING: This algorithm is used to reduce contention on sbi->ll_lock.
+ * It should work well if the ra_max_pages is much greater than the single
+ * file's read-ahead window, and not too many threads contending for
+ * these readahead pages.
+ *
+ * TODO: There may be a 'global sync problem' if many threads are trying
+ * to get an ra budget that is larger than the remaining readahead pages
+ * and reach here at exactly the same time. They will compute /a ret to
+ * consume the remaining pages, but will fail at atomic_add_return() and
+ * get a zero ra window, although there is still ra space remaining. - Jay */
+
+static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
+ struct ra_io_arg *ria,
+ unsigned long pages)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ long ret;
+
+ /* If read-ahead pages left are less than 1M, do not do read-ahead,
+ * otherwise it will form small read RPC(< 1M), which hurt server
+ * performance a lot. */
+ ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages);
+ if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages))
+ GOTO(out, ret = 0);
+
+ /* If the non-strided (ria_pages == 0) readahead window
+ * (ria_start + ret) has grown across an RPC boundary, then trim
+ * readahead size by the amount beyond the RPC so it ends on an
+ * RPC boundary. If the readahead window is already ending on
+ * an RPC boundary (beyond_rpc == 0), or smaller than a full
+ * RPC (beyond_rpc < ret) the readahead size is unchanged.
+ * The (beyond_rpc != 0) check is skipped since the conditional
+ * branch is more expensive than subtracting zero from the result.
+ *
+ * Strided read is left unaligned to avoid small fragments beyond
+ * the RPC boundary from needing an extra read RPC. */
+ if (ria->ria_pages == 0) {
+ long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
+ if (/* beyond_rpc != 0 && */ beyond_rpc < ret)
+ ret -= beyond_rpc;
+ }
+
+ if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
+ atomic_sub(ret, &ra->ra_cur_pages);
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ atomic_sub(len, &ra->ra_cur_pages);
+}
+
+static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
+{
+ LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);
+ lprocfs_counter_incr(sbi->ll_ra_stats, which);
+}
+
+void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
+ ll_ra_stats_inc_sbi(sbi, which);
+}
+
+#define RAS_CDEBUG(ras) \
+ CDEBUG(D_READA, \
+ "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
+ "csr %lu sf %lu sp %lu sl %lu \n", \
+ ras->ras_last_readpage, ras->ras_consecutive_requests, \
+ ras->ras_consecutive_pages, ras->ras_window_start, \
+ ras->ras_window_len, ras->ras_next_readahead, \
+ ras->ras_requests, ras->ras_request_index, \
+ ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
+ ras->ras_stride_pages, ras->ras_stride_length)
+
+static int index_in_window(unsigned long index, unsigned long point,
+ unsigned long before, unsigned long after)
+{
+ unsigned long start = point - before, end = point + after;
+
+ if (start > point)
+ start = 0;
+ if (end < point)
+ end = ~0;
+
+ return start <= index && index <= end;
+}
+
+static struct ll_readahead_state *ll_ras_get(struct file *f)
+{
+ struct ll_file_data *fd;
+
+ fd = LUSTRE_FPRIVATE(f);
+ return &fd->fd_ras;
+}
+
+void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
+{
+ struct ll_readahead_state *ras;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ ras->ras_requests++;
+ ras->ras_request_index = 0;
+ ras->ras_consecutive_requests++;
+ rar->lrr_reader = current;
+
+ list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ spin_unlock(&ras->ras_lock);
+}
+
+void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
+{
+ struct ll_readahead_state *ras;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ list_del_init(&rar->lrr_linkage);
+ spin_unlock(&ras->ras_lock);
+}
+
+static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
+{
+ struct ll_ra_read *scan;
+
+ list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ if (scan->lrr_reader == current)
+ return scan;
+ }
+ return NULL;
+}
+
+struct ll_ra_read *ll_ra_read_get(struct file *f)
+{
+ struct ll_readahead_state *ras;
+ struct ll_ra_read *bead;
+
+ ras = ll_ras_get(f);
+
+ spin_lock(&ras->ras_lock);
+ bead = ll_ra_read_get_locked(ras);
+ spin_unlock(&ras->ras_lock);
+ return bead;
+}
+
+static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, struct cl_page *page,
+ struct page *vmpage)
+{
+ struct ccc_page *cp;
+ int rc;
+
+ rc = 0;
+ cl_page_assume(env, io, page);
+ lu_ref_add(&page->cp_reference, "ra", current);
+ cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
+ if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
+ rc = cl_page_is_under_lock(env, io, page);
+ if (rc == -EBUSY) {
+ cp->cpg_defer_uptodate = 1;
+ cp->cpg_ra_used = 0;
+ cl_page_list_add(queue, page);
+ rc = 1;
+ } else {
+ cl_page_delete(env, page);
+ rc = -ENOLCK;
+ }
+ } else {
+ /* skip completed pages */
+ cl_page_unassume(env, io, page);
+ }
+ lu_ref_del(&page->cp_reference, "ra", current);
+ cl_page_put(env, page);
+ return rc;
+}
+
+/**
+ * Initiates read-ahead of a page with given index.
+ *
+ * \retval +ve: page was added to \a queue.
+ *
+ * \retval -ENOLCK: there is no extent lock for this part of a file, stop
+ * read-ahead.
+ *
+ * \retval -ve, 0: page wasn't added to \a queue for other reason.
+ */
+static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue,
+ pgoff_t index, struct address_space *mapping)
+{
+ struct page *vmpage;
+ struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
+ struct cl_page *page;
+ enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
+ unsigned int gfp_mask;
+ int rc = 0;
+ const char *msg = NULL;
+
+ gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
+#ifdef __GFP_NOWARN
+ gfp_mask |= __GFP_NOWARN;
+#endif
+ vmpage = grab_cache_page_nowait(mapping, index);
+ if (vmpage != NULL) {
+ /* Check if vmpage was truncated or reclaimed */
+ if (vmpage->mapping == mapping) {
+ page = cl_page_find(env, clob, vmpage->index,
+ vmpage, CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ rc = cl_read_ahead_page(env, io, queue,
+ page, vmpage);
+ if (rc == -ENOLCK) {
+ which = RA_STAT_FAILED_MATCH;
+ msg = "lock match failed";
+ }
+ } else {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "cl_page_find failed";
+ }
+ } else {
+ which = RA_STAT_WRONG_GRAB_PAGE;
+ msg = "g_c_p_n returned invalid page";
+ }
+ if (rc != 1)
+ unlock_page(vmpage);
+ page_cache_release(vmpage);
+ } else {
+ which = RA_STAT_FAILED_GRAB_PAGE;
+ msg = "g_c_p_n failed";
+ }
+ if (msg != NULL) {
+ ll_ra_stats_inc(mapping, which);
+ CDEBUG(D_READA, "%s\n", msg);
+ }
+ return rc;
+}
+
+#define RIA_DEBUG(ria) \
+ CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
+ ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
+ ria->ria_pages)
+
+/* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't
+ * know what the actual RPC size is. If this needs to change, it makes more
+ * sense to tune the i_blkbits value for the file based on the OSTs it is
+ * striped over, rather than having a constant value for all files here. */
+
+/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
+ * Temprarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
+ * by default, this should be adjusted corresponding with max_read_ahead_mb
+ * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
+ * up quickly which will affect read performance siginificantly. See LU-2816 */
+#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
+
+static inline int stride_io_mode(struct ll_readahead_state *ras)
+{
+ return ras->ras_consecutive_stride_requests > 1;
+}
+/* The function calculates how much pages will be read in
+ * [off, off + length], in such stride IO area,
+ * stride_offset = st_off, stride_lengh = st_len,
+ * stride_pages = st_pgs
+ *
+ * |------------------|*****|------------------|*****|------------|*****|....
+ * st_off
+ * |--- st_pgs ---|
+ * |----- st_len -----|
+ *
+ * How many pages it should read in such pattern
+ * |-------------------------------------------------------------|
+ * off
+ * |<------ length ------->|
+ *
+ * = |<----->| + |-------------------------------------| + |---|
+ * start_left st_pgs * i end_left
+ */
+static unsigned long
+stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
+ unsigned long off, unsigned long length)
+{
+ __u64 start = off > st_off ? off - st_off : 0;
+ __u64 end = off + length > st_off ? off + length - st_off : 0;
+ unsigned long start_left = 0;
+ unsigned long end_left = 0;
+ unsigned long pg_count;
+
+ if (st_len == 0 || length == 0 || end == 0)
+ return length;
+
+ start_left = do_div(start, st_len);
+ if (start_left < st_pgs)
+ start_left = st_pgs - start_left;
+ else
+ start_left = 0;
+
+ end_left = do_div(end, st_len);
+ if (end_left > st_pgs)
+ end_left = st_pgs;
+
+ CDEBUG(D_READA, "start "LPU64", end "LPU64" start_left %lu end_left %lu \n",
+ start, end, start_left, end_left);
+
+ if (start == end)
+ pg_count = end_left - (st_pgs - start_left);
+ else
+ pg_count = start_left + st_pgs * (end - start - 1) + end_left;
+
+ CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu"
+ "pgcount %lu\n", st_off, st_len, st_pgs, off, length, pg_count);
+
+ return pg_count;
+}
+
+static int ria_page_count(struct ra_io_arg *ria)
+{
+ __u64 length = ria->ria_end >= ria->ria_start ?
+ ria->ria_end - ria->ria_start + 1 : 0;
+
+ return stride_pg_count(ria->ria_stoff, ria->ria_length,
+ ria->ria_pages, ria->ria_start,
+ length);
+}
+
+/*Check whether the index is in the defined ra-window */
+static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
+{
+ /* If ria_length == ria_pages, it means non-stride I/O mode,
+ * idx should always inside read-ahead window in this case
+ * For stride I/O mode, just check whether the idx is inside
+ * the ria_pages. */
+ return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
+ (idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
+ ria->ria_length < ria->ria_pages);
+}
+
+static int ll_read_ahead_pages(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *queue,
+ struct ra_io_arg *ria,
+ unsigned long *reserved_pages,
+ struct address_space *mapping,
+ unsigned long *ra_end)
+{
+ int rc, count = 0, stride_ria;
+ unsigned long page_idx;
+
+ LASSERT(ria != NULL);
+ RIA_DEBUG(ria);
+
+ stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
+ for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
+ *reserved_pages > 0; page_idx++) {
+ if (ras_inside_ra_window(page_idx, ria)) {
+ /* If the page is inside the read-ahead window*/
+ rc = ll_read_ahead_page(env, io, queue,
+ page_idx, mapping);
+ if (rc == 1) {
+ (*reserved_pages)--;
+ count ++;
+ } else if (rc == -ENOLCK)
+ break;
+ } else if (stride_ria) {
+ /* If it is not in the read-ahead window, and it is
+ * read-ahead mode, then check whether it should skip
+ * the stride gap */
+ pgoff_t offset;
+ /* FIXME: This assertion only is valid when it is for
+ * forward read-ahead, it will be fixed when backward
+ * read-ahead is implemented */
+ LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu"
+ "rs %lu re %lu ro %lu rl %lu rp %lu\n", page_idx,
+ ria->ria_start, ria->ria_end, ria->ria_stoff,
+ ria->ria_length, ria->ria_pages);
+ offset = page_idx - ria->ria_stoff;
+ offset = offset % (ria->ria_length);
+ if (offset > ria->ria_pages) {
+ page_idx += ria->ria_length - offset;
+ CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
+ ria->ria_length - offset);
+ continue;
+ }
+ }
+ }
+ *ra_end = page_idx;
+ return count;
+}
+
+int ll_readahead(const struct lu_env *env, struct cl_io *io,
+ struct ll_readahead_state *ras, struct address_space *mapping,
+ struct cl_page_list *queue, int flags)
+{
+ struct vvp_io *vio = vvp_env_io(env);
+ struct vvp_thread_info *vti = vvp_env_info(env);
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ unsigned long start = 0, end = 0, reserved;
+ unsigned long ra_end, len;
+ struct inode *inode;
+ struct ll_ra_read *bead;
+ struct ra_io_arg *ria = &vti->vti_ria;
+ struct ll_inode_info *lli;
+ struct cl_object *clob;
+ int ret = 0;
+ __u64 kms;
+
+ inode = mapping->host;
+ lli = ll_i2info(inode);
+ clob = lli->lli_clob;
+
+ memset(ria, 0, sizeof *ria);
+
+ cl_object_attr_lock(clob);
+ ret = cl_object_attr_get(env, clob, attr);
+ cl_object_attr_unlock(clob);
+
+ if (ret != 0)
+ return ret;
+ kms = attr->cat_kms;
+ if (kms == 0) {
+ ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
+ return 0;
+ }
+
+ spin_lock(&ras->ras_lock);
+ if (vio->cui_ra_window_set)
+ bead = &vio->cui_bead;
+ else
+ bead = NULL;
+
+ /* Enlarge the RA window to encompass the full read */
+ if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
+ bead->lrr_start + bead->lrr_count) {
+ ras->ras_window_len = bead->lrr_start + bead->lrr_count -
+ ras->ras_window_start;
+ }
+ /* Reserve a part of the read-ahead window that we'll be issuing */
+ if (ras->ras_window_len) {
+ start = ras->ras_next_readahead;
+ end = ras->ras_window_start + ras->ras_window_len - 1;
+ }
+ if (end != 0) {
+ unsigned long rpc_boundary;
+ /*
+ * Align RA window to an optimal boundary.
+ *
+ * XXX This would be better to align to cl_max_pages_per_rpc
+ * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may
+ * be aligned to the RAID stripe size in the future and that
+ * is more important than the RPC size.
+ */
+ /* Note: we only trim the RPC, instead of extending the RPC
+ * to the boundary, so to avoid reading too much pages during
+ * random reading. */
+ rpc_boundary = ((end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1)));
+ if (rpc_boundary > 0)
+ rpc_boundary--;
+
+ if (rpc_boundary > start)
+ end = rpc_boundary;
+
+ /* Truncate RA window to end of file */
+ end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
+
+ ras->ras_next_readahead = max(end, end + 1);
+ RAS_CDEBUG(ras);
+ }
+ ria->ria_start = start;
+ ria->ria_end = end;
+ /* If stride I/O mode is detected, get stride window*/
+ if (stride_io_mode(ras)) {
+ ria->ria_stoff = ras->ras_stride_offset;
+ ria->ria_length = ras->ras_stride_length;
+ ria->ria_pages = ras->ras_stride_pages;
+ }
+ spin_unlock(&ras->ras_lock);
+
+ if (end == 0) {
+ ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
+ return 0;
+ }
+ len = ria_page_count(ria);
+ if (len == 0)
+ return 0;
+
+ reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
+ if (reserved < len)
+ ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
+
+ CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
+ atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
+ ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
+
+ ret = ll_read_ahead_pages(env, io, queue,
+ ria, &reserved, mapping, &ra_end);
+
+ LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
+ if (reserved != 0)
+ ll_ra_count_put(ll_i2sbi(inode), reserved);
+
+ if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
+ ll_ra_stats_inc(mapping, RA_STAT_EOF);
+
+ /* if we didn't get to the end of the region we reserved from
+ * the ras we need to go back and update the ras so that the
+ * next read-ahead tries from where we left off. we only do so
+ * if the region we failed to issue read-ahead on is still ahead
+ * of the app and behind the next index to start read-ahead from */
+ CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
+ ra_end, end, ria->ria_end);
+
+ if (ra_end != end + 1) {
+ spin_lock(&ras->ras_lock);
+ if (ra_end < ras->ras_next_readahead &&
+ index_in_window(ra_end, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ ras->ras_next_readahead = ra_end;
+ RAS_CDEBUG(ras);
+ }
+ spin_unlock(&ras->ras_lock);
+ }
+
+ return ret;
+}
+
+static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras,
+ unsigned long index)
+{
+ ras->ras_window_start = index & (~(RAS_INCREASE_STEP(inode) - 1));
+}
+
+/* called with the ras_lock held or from places where it doesn't matter */
+static void ras_reset(struct inode *inode, struct ll_readahead_state *ras,
+ unsigned long index)
+{
+ ras->ras_last_readpage = index;
+ ras->ras_consecutive_requests = 0;
+ ras->ras_consecutive_pages = 0;
+ ras->ras_window_len = 0;
+ ras_set_start(inode, ras, index);
+ ras->ras_next_readahead = max(ras->ras_window_start, index);
+
+ RAS_CDEBUG(ras);
+}
+
+/* called with the ras_lock held or from places where it doesn't matter */
+static void ras_stride_reset(struct ll_readahead_state *ras)
+{
+ ras->ras_consecutive_stride_requests = 0;
+ ras->ras_stride_length = 0;
+ ras->ras_stride_pages = 0;
+ RAS_CDEBUG(ras);
+}
+
+void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
+{
+ spin_lock_init(&ras->ras_lock);
+ ras_reset(inode, ras, 0);
+ ras->ras_requests = 0;
+ INIT_LIST_HEAD(&ras->ras_read_beads);
+}
+
+/*
+ * Check whether the read request is in the stride window.
+ * If it is in the stride window, return 1, otherwise return 0.
+ */
+static int index_in_stride_window(struct ll_readahead_state *ras,
+ unsigned long index)
+{
+ unsigned long stride_gap;
+
+ if (ras->ras_stride_length == 0 || ras->ras_stride_pages == 0 ||
+ ras->ras_stride_pages == ras->ras_stride_length)
+ return 0;
+
+ stride_gap = index - ras->ras_last_readpage - 1;
+
+ /* If it is contiguous read */
+ if (stride_gap == 0)
+ return ras->ras_consecutive_pages + 1 <= ras->ras_stride_pages;
+
+ /* Otherwise check the stride by itself */
+ return (ras->ras_stride_length - ras->ras_stride_pages) == stride_gap &&
+ ras->ras_consecutive_pages == ras->ras_stride_pages;
+}
+
+static void ras_update_stride_detector(struct ll_readahead_state *ras,
+ unsigned long index)
+{
+ unsigned long stride_gap = index - ras->ras_last_readpage - 1;
+
+ if (!stride_io_mode(ras) && (stride_gap != 0 ||
+ ras->ras_consecutive_stride_requests == 0)) {
+ ras->ras_stride_pages = ras->ras_consecutive_pages;
+ ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
+ }
+ LASSERT(ras->ras_request_index == 0);
+ LASSERT(ras->ras_consecutive_stride_requests == 0);
+
+ if (index <= ras->ras_last_readpage) {
+ /*Reset stride window for forward read*/
+ ras_stride_reset(ras);
+ return;
+ }
+
+ ras->ras_stride_pages = ras->ras_consecutive_pages;
+ ras->ras_stride_length = stride_gap +ras->ras_consecutive_pages;
+
+ RAS_CDEBUG(ras);
+ return;
+}
+
+static unsigned long
+stride_page_count(struct ll_readahead_state *ras, unsigned long len)
+{
+ return stride_pg_count(ras->ras_stride_offset, ras->ras_stride_length,
+ ras->ras_stride_pages, ras->ras_stride_offset,
+ len);
+}
+
+/* Stride Read-ahead window will be increased inc_len according to
+ * stride I/O pattern */
+static void ras_stride_increase_window(struct ll_readahead_state *ras,
+ struct ll_ra_info *ra,
+ unsigned long inc_len)
+{
+ unsigned long left, step, window_len;
+ unsigned long stride_len;
+
+ LASSERT(ras->ras_stride_length > 0);
+ LASSERTF(ras->ras_window_start + ras->ras_window_len
+ >= ras->ras_stride_offset, "window_start %lu, window_len %lu"
+ " stride_offset %lu\n", ras->ras_window_start,
+ ras->ras_window_len, ras->ras_stride_offset);
+
+ stride_len = ras->ras_window_start + ras->ras_window_len -
+ ras->ras_stride_offset;
+
+ left = stride_len % ras->ras_stride_length;
+ window_len = ras->ras_window_len - left;
+
+ if (left < ras->ras_stride_pages)
+ left += inc_len;
+ else
+ left = ras->ras_stride_pages + inc_len;
+
+ LASSERT(ras->ras_stride_pages != 0);
+
+ step = left / ras->ras_stride_pages;
+ left %= ras->ras_stride_pages;
+
+ window_len += step * ras->ras_stride_length + left;
+
+ if (stride_page_count(ras, window_len) <= ra->ra_max_pages_per_file)
+ ras->ras_window_len = window_len;
+
+ RAS_CDEBUG(ras);
+}
+
+static void ras_increase_window(struct inode *inode,
+ struct ll_readahead_state *ras,
+ struct ll_ra_info *ra)
+{
+ /* The stretch of ra-window should be aligned with max rpc_size
+ * but current clio architecture does not support retrieve such
+ * information from lower layer. FIXME later
+ */
+ if (stride_io_mode(ras))
+ ras_stride_increase_window(ras, ra, RAS_INCREASE_STEP(inode));
+ else
+ ras->ras_window_len = min(ras->ras_window_len +
+ RAS_INCREASE_STEP(inode),
+ ra->ra_max_pages_per_file);
+}
+
+void ras_update(struct ll_sb_info *sbi, struct inode *inode,
+ struct ll_readahead_state *ras, unsigned long index,
+ unsigned hit)
+{
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ int zero = 0, stride_detect = 0, ra_miss = 0;
+
+ spin_lock(&ras->ras_lock);
+
+ ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
+
+ /* reset the read-ahead window in two cases. First when the app seeks
+ * or reads to some other part of the file. Secondly if we get a
+ * read-ahead miss that we think we've previously issued. This can
+ * be a symptom of there being so many read-ahead pages that the VM is
+ * reclaiming it before we get to it. */
+ if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
+ zero = 1;
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
+ } else if (!hit && ras->ras_window_len &&
+ index < ras->ras_next_readahead &&
+ index_in_window(index, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ ra_miss = 1;
+ ll_ra_stats_inc_sbi(sbi, RA_STAT_MISS_IN_WINDOW);
+ }
+
+ /* On the second access to a file smaller than the tunable
+ * ra_max_read_ahead_whole_pages trigger RA on all pages in the
+ * file up to ra_max_pages_per_file. This is simply a best effort
+ * and only occurs once per open file. Normal RA behavior is reverted
+ * to for subsequent IO. The mmap case does not increment
+ * ras_requests and thus can never trigger this behavior. */
+ if (ras->ras_requests == 2 && !ras->ras_request_index) {
+ __u64 kms_pages;
+
+ kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
+
+ CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages,
+ ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
+
+ if (kms_pages &&
+ kms_pages <= ra->ra_max_read_ahead_whole_pages) {
+ ras->ras_window_start = 0;
+ ras->ras_last_readpage = 0;
+ ras->ras_next_readahead = 0;
+ ras->ras_window_len = min(ra->ra_max_pages_per_file,
+ ra->ra_max_read_ahead_whole_pages);
+ GOTO(out_unlock, 0);
+ }
+ }
+ if (zero) {
+ /* check whether it is in stride I/O mode*/
+ if (!index_in_stride_window(ras, index)) {
+ if (ras->ras_consecutive_stride_requests == 0 &&
+ ras->ras_request_index == 0) {
+ ras_update_stride_detector(ras, index);
+ ras->ras_consecutive_stride_requests++;
+ } else {
+ ras_stride_reset(ras);
+ }
+ ras_reset(inode, ras, index);
+ ras->ras_consecutive_pages++;
+ GOTO(out_unlock, 0);
+ } else {
+ ras->ras_consecutive_pages = 0;
+ ras->ras_consecutive_requests = 0;
+ if (++ras->ras_consecutive_stride_requests > 1)
+ stride_detect = 1;
+ RAS_CDEBUG(ras);
+ }
+ } else {
+ if (ra_miss) {
+ if (index_in_stride_window(ras, index) &&
+ stride_io_mode(ras)) {
+ /*If stride-RA hit cache miss, the stride dector
+ *will not be reset to avoid the overhead of
+ *redetecting read-ahead mode */
+ if (index != ras->ras_last_readpage + 1)
+ ras->ras_consecutive_pages = 0;
+ ras_reset(inode, ras, index);
+ RAS_CDEBUG(ras);
+ } else {
+ /* Reset both stride window and normal RA
+ * window */
+ ras_reset(inode, ras, index);
+ ras->ras_consecutive_pages++;
+ ras_stride_reset(ras);
+ GOTO(out_unlock, 0);
+ }
+ } else if (stride_io_mode(ras)) {
+ /* If this is contiguous read but in stride I/O mode
+ * currently, check whether stride step still is valid,
+ * if invalid, it will reset the stride ra window*/
+ if (!index_in_stride_window(ras, index)) {
+ /* Shrink stride read-ahead window to be zero */
+ ras_stride_reset(ras);
+ ras->ras_window_len = 0;
+ ras->ras_next_readahead = index;
+ }
+ }
+ }
+ ras->ras_consecutive_pages++;
+ ras->ras_last_readpage = index;
+ ras_set_start(inode, ras, index);
+
+ if (stride_io_mode(ras))
+ /* Since stride readahead is sentivite to the offset
+ * of read-ahead, so we use original offset here,
+ * instead of ras_window_start, which is RPC aligned */
+ ras->ras_next_readahead = max(index, ras->ras_next_readahead);
+ else
+ ras->ras_next_readahead = max(ras->ras_window_start,
+ ras->ras_next_readahead);
+ RAS_CDEBUG(ras);
+
+ /* Trigger RA in the mmap case where ras_consecutive_requests
+ * is not incremented and thus can't be used to trigger RA */
+ if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
+ ras->ras_window_len = RAS_INCREASE_STEP(inode);
+ GOTO(out_unlock, 0);
+ }
+
+ /* Initially reset the stride window offset to next_readahead*/
+ if (ras->ras_consecutive_stride_requests == 2 && stride_detect) {
+ /**
+ * Once stride IO mode is detected, next_readahead should be
+ * reset to make sure next_readahead > stride offset
+ */
+ ras->ras_next_readahead = max(index, ras->ras_next_readahead);
+ ras->ras_stride_offset = index;
+ ras->ras_window_len = RAS_INCREASE_STEP(inode);
+ }
+
+ /* The initial ras_window_len is set to the request size. To avoid
+ * uselessly reading and discarding pages for random IO the window is
+ * only increased once per consecutive request received. */
+ if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
+ !ras->ras_request_index)
+ ras_increase_window(inode, ras, ra);
+out_unlock:
+ RAS_CDEBUG(ras);
+ ras->ras_request_index++;
+ spin_unlock(&ras->ras_lock);
+ return;
+}
+
+int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
+{
+ struct inode *inode = vmpage->mapping->host;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ struct cl_object *clob;
+ struct cl_env_nest nest;
+ bool redirtied = false;
+ bool unlocked = false;
+ int result;
+
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageWriteback(vmpage));
+
+ LASSERT(ll_i2dtexp(inode) != NULL);
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ GOTO(out, result = PTR_ERR(env));
+
+ clob = ll_i2info(inode)->lli_clob;
+ LASSERT(clob != NULL);
+
+ io = ccc_env_thread_io(env);
+ io->ci_obj = clob;
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, clob);
+ if (result == 0) {
+ page = cl_page_find(env, clob, vmpage->index,
+ vmpage, CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
+ lu_ref_add(&page->cp_reference, "writepage",
+ current);
+ cl_page_assume(env, io, page);
+ result = cl_page_flush(env, io, page);
+ if (result != 0) {
+ /*
+ * Re-dirty page on error so it retries write,
+ * but not in case when IO has actually
+ * occurred and completed with an error.
+ */
+ if (!PageError(vmpage)) {
+ redirty_page_for_writepage(wbc, vmpage);
+ result = 0;
+ redirtied = true;
+ }
+ }
+ cl_page_disown(env, io, page);
+ unlocked = true;
+ lu_ref_del(&page->cp_reference,
+ "writepage", current);
+ cl_page_put(env, page);
+ } else {
+ result = PTR_ERR(page);
+ }
+ }
+ cl_io_fini(env, io);
+
+ if (redirtied && wbc->sync_mode == WB_SYNC_ALL) {
+ loff_t offset = cl_offset(clob, vmpage->index);
+
+ /* Flush page failed because the extent is being written out.
+ * Wait for the write of extent to be finished to avoid
+ * breaking kernel which assumes ->writepage should mark
+ * PageWriteback or clean the page. */
+ result = cl_sync_file_range(inode, offset,
+ offset + PAGE_CACHE_SIZE - 1,
+ CL_FSYNC_LOCAL, 1);
+ if (result > 0) {
+ /* actually we may have written more than one page.
+ * decreasing this page because the caller will count
+ * it. */
+ wbc->nr_to_write -= result - 1;
+ result = 0;
+ }
+ }
+
+ cl_env_nested_put(&nest, env);
+ GOTO(out, result);
+
+out:
+ if (result < 0) {
+ if (!lli->lli_async_rc)
+ lli->lli_async_rc = result;
+ SetPageError(vmpage);
+ if (!unlocked)
+ unlock_page(vmpage);
+ }
+ return result;
+}
+
+int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ loff_t start;
+ loff_t end;
+ enum cl_fsync_mode mode;
+ int range_whole = 0;
+ int result;
+ int ignore_layout = 0;
+
+ if (wbc->range_cyclic) {
+ start = mapping->writeback_index << PAGE_CACHE_SHIFT;
+ end = OBD_OBJECT_EOF;
+ } else {
+ start = wbc->range_start;
+ end = wbc->range_end;
+ if (end == LLONG_MAX) {
+ end = OBD_OBJECT_EOF;
+ range_whole = start == 0;
+ }
+ }
+
+ mode = CL_FSYNC_NONE;
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ mode = CL_FSYNC_LOCAL;
+
+ if (sbi->ll_umounting)
+ /* if the mountpoint is being umounted, all pages have to be
+ * evicted to avoid hitting LBUG when truncate_inode_pages()
+ * is called later on. */
+ ignore_layout = 1;
+ result = cl_sync_file_range(inode, start, end, mode, ignore_layout);
+ if (result > 0) {
+ wbc->nr_to_write -= result;
+ result = 0;
+ }
+
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
+ if (end == OBD_OBJECT_EOF)
+ end = i_size_read(inode);
+ mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
+ }
+ return result;
+}
+
+int ll_readpage(struct file *file, struct page *vmpage)
+{
+ struct ll_cl_context *lcc;
+ int result;
+
+ lcc = ll_cl_init(file, vmpage, 0);
+ if (!IS_ERR(lcc)) {
+ struct lu_env *env = lcc->lcc_env;
+ struct cl_io *io = lcc->lcc_io;
+ struct cl_page *page = lcc->lcc_page;
+
+ LASSERT(page->cp_type == CPT_CACHEABLE);
+ if (likely(!PageUptodate(vmpage))) {
+ cl_page_assume(env, io, page);
+ result = cl_io_read_page(env, io, page);
+ } else {
+ /* Page from a non-object file. */
+ unlock_page(vmpage);
+ result = 0;
+ }
+ ll_cl_fini(lcc);
+ } else {
+ unlock_page(vmpage);
+ result = PTR_ERR(lcc);
+ }
+ return result;
+}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
new file mode 100644
index 0000000..96c29ad
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -0,0 +1,581 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lustre/llite/rw26.c
+ *
+ * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <asm/uaccess.h>
+
+#include <linux/migrate.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+#include <linux/pagemap.h>
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <lustre_lite.h>
+#include "llite_internal.h"
+#include <linux/lustre_compat25.h>
+
+/**
+ * Implements Linux VM address_space::invalidatepage() method. This method is
+ * called when the page is truncate from a file, either as a result of
+ * explicit truncate, or when inode is removed from memory (as a result of
+ * final iput(), umount, or memory pressure induced icache shrinking).
+ *
+ * [0, offset] bytes of the page remain valid (this is for a case of not-page
+ * aligned truncate). Lustre leaves partially truncated page in the cache,
+ * relying on struct inode::i_size to limit further accesses.
+ */
+static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
+ unsigned int length)
+{
+ struct inode *inode;
+ struct lu_env *env;
+ struct cl_page *page;
+ struct cl_object *obj;
+
+ int refcheck;
+
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageWriteback(vmpage));
+
+ /*
+ * It is safe to not check anything in invalidatepage/releasepage
+ * below because they are run with page locked and all our io is
+ * happening with locked page too
+ */
+ if (offset == 0 && length == PAGE_CACHE_SIZE) {
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ inode = vmpage->mapping->host;
+ obj = ll_i2info(inode)->lli_clob;
+ if (obj != NULL) {
+ page = cl_vmpage_page(vmpage, obj);
+ if (page != NULL) {
+ lu_ref_add(&page->cp_reference,
+ "delete", vmpage);
+ cl_page_delete(env, page);
+ lu_ref_del(&page->cp_reference,
+ "delete", vmpage);
+ cl_page_put(env, page);
+ }
+ } else
+ LASSERT(vmpage->private == 0);
+ cl_env_put(env, &refcheck);
+ }
+ }
+}
+
+#ifdef HAVE_RELEASEPAGE_WITH_INT
+#define RELEASEPAGE_ARG_TYPE int
+#else
+#define RELEASEPAGE_ARG_TYPE gfp_t
+#endif
+static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
+{
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct cl_object *obj;
+ struct cl_page *page;
+ struct address_space *mapping;
+ int result;
+
+ LASSERT(PageLocked(vmpage));
+ if (PageWriteback(vmpage) || PageDirty(vmpage))
+ return 0;
+
+ mapping = vmpage->mapping;
+ if (mapping == NULL)
+ return 1;
+
+ obj = ll_i2info(mapping->host)->lli_clob;
+ if (obj == NULL)
+ return 1;
+
+ /* 1 for page allocator, 1 for cl_page and 1 for page cache */
+ if (page_count(vmpage) > 3)
+ return 0;
+
+ /* TODO: determine what gfp should be used by @gfp_mask. */
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ /* If we can't allocate an env we won't call cl_page_put()
+ * later on which further means it's impossible to drop
+ * page refcount by cl_page, so ask kernel to not free
+ * this page. */
+ return 0;
+
+ page = cl_vmpage_page(vmpage, obj);
+ result = page == NULL;
+ if (page != NULL) {
+ if (!cl_page_in_use(page)) {
+ result = 1;
+ cl_page_delete(env, page);
+ }
+ cl_page_put(env, page);
+ }
+ cl_env_nested_put(&nest, env);
+ return result;
+}
+
+static int ll_set_page_dirty(struct page *vmpage)
+{
+#if 0
+ struct cl_page *page = vvp_vmpage_page_transient(vmpage);
+ struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host);
+ struct vvp_page *cpg;
+
+ /*
+ * XXX should page method be called here?
+ */
+ LASSERT(&obj->co_cl == page->cp_obj);
+ cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
+ /*
+ * XXX cannot do much here, because page is possibly not locked:
+ * sys_munmap()->...
+ * ->unmap_page_range()->zap_pte_range()->set_page_dirty().
+ */
+ vvp_write_pending(obj, cpg);
+#endif
+ return __set_page_dirty_nobuffers(vmpage);
+}
+
+#define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL
+
+static inline int ll_get_user_pages(int rw, unsigned long user_addr,
+ size_t size, struct page ***pages,
+ int *max_pages)
+{
+ int result = -ENOMEM;
+
+ /* set an arbitrary limit to prevent arithmetic overflow */
+ if (size > MAX_DIRECTIO_SIZE) {
+ *pages = NULL;
+ return -EFBIG;
+ }
+
+ *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
+
+ OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
+ if (*pages) {
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, user_addr,
+ *max_pages, (rw == READ), 0, *pages,
+ NULL);
+ up_read(&current->mm->mmap_sem);
+ if (unlikely(result <= 0))
+ OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages));
+ }
+
+ return result;
+}
+
+/* ll_free_user_pages - tear down page struct array
+ * @pages: array of page struct pointers underlying target buffer */
+static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
+{
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ if (pages[i] == NULL)
+ break;
+ if (do_dirty)
+ set_page_dirty_lock(pages[i]);
+ page_cache_release(pages[i]);
+ }
+
+ OBD_FREE_LARGE(pages, npages * sizeof(*pages));
+}
+
+ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
+ int rw, struct inode *inode,
+ struct ll_dio_pages *pv)
+{
+ struct cl_page *clp;
+ struct cl_2queue *queue;
+ struct cl_object *obj = io->ci_obj;
+ int i;
+ ssize_t rc = 0;
+ loff_t file_offset = pv->ldp_start_offset;
+ long size = pv->ldp_size;
+ int page_count = pv->ldp_nr;
+ struct page **pages = pv->ldp_pages;
+ long page_size = cl_page_size(obj);
+ bool do_io;
+ int io_pages = 0;
+
+ queue = &io->ci_queue;
+ cl_2queue_init(queue);
+ for (i = 0; i < page_count; i++) {
+ if (pv->ldp_offsets)
+ file_offset = pv->ldp_offsets[i];
+
+ LASSERT(!(file_offset & (page_size - 1)));
+ clp = cl_page_find(env, obj, cl_index(obj, file_offset),
+ pv->ldp_pages[i], CPT_TRANSIENT);
+ if (IS_ERR(clp)) {
+ rc = PTR_ERR(clp);
+ break;
+ }
+
+ rc = cl_page_own(env, io, clp);
+ if (rc) {
+ LASSERT(clp->cp_state == CPS_FREEING);
+ cl_page_put(env, clp);
+ break;
+ }
+
+ do_io = true;
+
+ /* check the page type: if the page is a host page, then do
+ * write directly */
+ if (clp->cp_type == CPT_CACHEABLE) {
+ struct page *vmpage = cl_page_vmpage(env, clp);
+ struct page *src_page;
+ struct page *dst_page;
+ void *src;
+ void *dst;
+
+ src_page = (rw == WRITE) ? pages[i] : vmpage;
+ dst_page = (rw == WRITE) ? vmpage : pages[i];
+
+ src = kmap_atomic(src_page);
+ dst = kmap_atomic(dst_page);
+ memcpy(dst, src, min(page_size, size));
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+
+ /* make sure page will be added to the transfer by
+ * cl_io_submit()->...->vvp_page_prep_write(). */
+ if (rw == WRITE)
+ set_page_dirty(vmpage);
+
+ if (rw == READ) {
+ /* do not issue the page for read, since it
+ * may reread a ra page which has NOT uptodate
+ * bit set. */
+ cl_page_disown(env, io, clp);
+ do_io = false;
+ }
+ }
+
+ if (likely(do_io)) {
+ cl_2queue_add(queue, clp);
+
+ /*
+ * Set page clip to tell transfer formation engine
+ * that page has to be sent even if it is beyond KMS.
+ */
+ cl_page_clip(env, clp, 0, min(size, page_size));
+
+ ++io_pages;
+ }
+
+ /* drop the reference count for cl_page_find */
+ cl_page_put(env, clp);
+ size -= page_size;
+ file_offset += page_size;
+ }
+
+ if (rc == 0 && io_pages) {
+ rc = cl_io_submit_sync(env, io,
+ rw == READ ? CRT_READ : CRT_WRITE,
+ queue, 0);
+ }
+ if (rc == 0)
+ rc = pv->ldp_size;
+
+ cl_2queue_discard(env, io, queue);
+ cl_2queue_disown(env, io, queue);
+ cl_2queue_fini(env, queue);
+ return rc;
+}
+EXPORT_SYMBOL(ll_direct_rw_pages);
+
+static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
+ int rw, struct inode *inode,
+ struct address_space *mapping,
+ size_t size, loff_t file_offset,
+ struct page **pages, int page_count)
+{
+ struct ll_dio_pages pvec = { .ldp_pages = pages,
+ .ldp_nr = page_count,
+ .ldp_size = size,
+ .ldp_offsets = NULL,
+ .ldp_start_offset = file_offset
+ };
+
+ return ll_direct_rw_pages(env, io, rw, inode, &pvec);
+}
+
+#ifdef KMALLOC_MAX_SIZE
+#define MAX_MALLOC KMALLOC_MAX_SIZE
+#else
+#define MAX_MALLOC (128 * 1024)
+#endif
+
+/* This is the maximum size of a single O_DIRECT request, based on the
+ * kmalloc limit. We need to fit all of the brw_page structs, each one
+ * representing PAGE_SIZE worth of user data, into a single buffer, and
+ * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
+ * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
+#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
+ ~(DT_MAX_BRW_SIZE - 1))
+static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
+ const struct iovec *iov, loff_t file_offset,
+ unsigned long nr_segs)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ struct ccc_object *obj = cl_inode2ccc(inode);
+ long count = iov_length(iov, nr_segs);
+ long tot_bytes = 0, result = 0;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ unsigned long seg = 0;
+ long size = MAX_DIO_SIZE;
+ int refcheck;
+
+ if (!lli->lli_has_smd)
+ return -EBADF;
+
+ /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
+ if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
+ return -EINVAL;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), "
+ "offset=%lld=%llx, pages %lu (max %lu)\n",
+ inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
+ file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
+ MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
+
+ /* Check that all user buffers are aligned as well */
+ for (seg = 0; seg < nr_segs; seg++) {
+ if (((unsigned long)iov[seg].iov_base & ~CFS_PAGE_MASK) ||
+ (iov[seg].iov_len & ~CFS_PAGE_MASK))
+ return -EINVAL;
+ }
+
+ env = cl_env_get(&refcheck);
+ LASSERT(!IS_ERR(env));
+ io = ccc_env_io(env)->cui_cl.cis_io;
+ LASSERT(io != NULL);
+
+ /* 0. Need locking between buffered and direct access. and race with
+ * size changing by concurrent truncates and writes.
+ * 1. Need inode mutex to operate transient pages.
+ */
+ if (rw == READ)
+ mutex_lock(&inode->i_mutex);
+
+ LASSERT(obj->cob_transient_pages == 0);
+ for (seg = 0; seg < nr_segs; seg++) {
+ long iov_left = iov[seg].iov_len;
+ unsigned long user_addr = (unsigned long)iov[seg].iov_base;
+
+ if (rw == READ) {
+ if (file_offset >= i_size_read(inode))
+ break;
+ if (file_offset + iov_left > i_size_read(inode))
+ iov_left = i_size_read(inode) - file_offset;
+ }
+
+ while (iov_left > 0) {
+ struct page **pages;
+ int page_count, max_pages = 0;
+ long bytes;
+
+ bytes = min(size, iov_left);
+ page_count = ll_get_user_pages(rw, user_addr, bytes,
+ &pages, &max_pages);
+ if (likely(page_count > 0)) {
+ if (unlikely(page_count < max_pages))
+ bytes = page_count << PAGE_CACHE_SHIFT;
+ result = ll_direct_IO_26_seg(env, io, rw, inode,
+ file->f_mapping,
+ bytes, file_offset,
+ pages, page_count);
+ ll_free_user_pages(pages, max_pages, rw==READ);
+ } else if (page_count == 0) {
+ GOTO(out, result = -EFAULT);
+ } else {
+ result = page_count;
+ }
+ if (unlikely(result <= 0)) {
+ /* If we can't allocate a large enough buffer
+ * for the request, shrink it to a smaller
+ * PAGE_SIZE multiple and try again.
+ * We should always be able to kmalloc for a
+ * page worth of page pointers = 4MB on i386. */
+ if (result == -ENOMEM &&
+ size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
+ PAGE_CACHE_SIZE) {
+ size = ((((size / 2) - 1) |
+ ~CFS_PAGE_MASK) + 1) &
+ CFS_PAGE_MASK;
+ CDEBUG(D_VFSTRACE,"DIO size now %lu\n",
+ size);
+ continue;
+ }
+
+ GOTO(out, result);
+ }
+
+ tot_bytes += result;
+ file_offset += result;
+ iov_left -= result;
+ user_addr += result;
+ }
+ }
+out:
+ LASSERT(obj->cob_transient_pages == 0);
+ if (rw == READ)
+ mutex_unlock(&inode->i_mutex);
+
+ if (tot_bytes > 0) {
+ if (rw == WRITE) {
+ struct lov_stripe_md *lsm;
+
+ lsm = ccc_inode_lsm_get(inode);
+ LASSERT(lsm != NULL);
+ lov_stripe_lock(lsm);
+ obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
+ lov_stripe_unlock(lsm);
+ ccc_inode_lsm_put(inode, lsm);
+ }
+ }
+
+ cl_env_put(env, &refcheck);
+ return tot_bytes ? : result;
+}
+
+static int ll_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ struct page *page;
+ int rc;
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+
+ *pagep = page;
+
+ rc = ll_prepare_write(file, page, from, from + len);
+ if (rc) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ return rc;
+}
+
+static int ll_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ int rc;
+
+ rc = ll_commit_write(file, page, from, from + copied);
+ unlock_page(page);
+ page_cache_release(page);
+
+ return rc ?: copied;
+}
+
+#ifdef CONFIG_MIGRATION
+int ll_migratepage(struct address_space *mapping,
+ struct page *newpage, struct page *page
+ , enum migrate_mode mode
+ )
+{
+ /* Always fail page migration until we have a proper implementation */
+ return -EIO;
+}
+#endif
+
+#ifndef MS_HAS_NEW_AOPS
+struct address_space_operations ll_aops = {
+ .readpage = ll_readpage,
+// .readpages = ll_readpages,
+ .direct_IO = ll_direct_IO_26,
+ .writepage = ll_writepage,
+ .writepages = ll_writepages,
+ .set_page_dirty = ll_set_page_dirty,
+ .write_begin = ll_write_begin,
+ .write_end = ll_write_end,
+ .invalidatepage = ll_invalidatepage,
+ .releasepage = (void *)ll_releasepage,
+#ifdef CONFIG_MIGRATION
+ .migratepage = ll_migratepage,
+#endif
+ .bmap = NULL
+};
+#else
+struct address_space_operations_ext ll_aops = {
+ .orig_aops.readpage = ll_readpage,
+// .orig_aops.readpages = ll_readpages,
+ .orig_aops.direct_IO = ll_direct_IO_26,
+ .orig_aops.writepage = ll_writepage,
+ .orig_aops.writepages = ll_writepages,
+ .orig_aops.set_page_dirty = ll_set_page_dirty,
+ .orig_aops.prepare_write = ll_prepare_write,
+ .orig_aops.commit_write = ll_commit_write,
+ .orig_aops.invalidatepage = ll_invalidatepage,
+ .orig_aops.releasepage = ll_releasepage,
+#ifdef CONFIG_MIGRATION
+ .orig_aops.migratepage = ll_migratepage,
+#endif
+ .orig_aops.bmap = NULL,
+ .write_begin = ll_write_begin,
+ .write_end = ll_write_end
+};
+#endif
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
new file mode 100644
index 0000000..8eaa38e9
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -0,0 +1,1692 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <obd_support.h>
+#include <lustre_lite.h>
+#include <lustre_dlm.h>
+#include "llite_internal.h"
+
+#define SA_OMITTED_ENTRY_MAX 8ULL
+
+typedef enum {
+ /** negative values are for error cases */
+ SA_ENTRY_INIT = 0, /** init entry */
+ SA_ENTRY_SUCC = 1, /** stat succeed */
+ SA_ENTRY_INVA = 2, /** invalid entry */
+ SA_ENTRY_DEST = 3, /** entry to be destroyed */
+} se_stat_t;
+
+struct ll_sa_entry {
+ /* link into sai->sai_entries */
+ struct list_head se_link;
+ /* link into sai->sai_entries_{received,stated} */
+ struct list_head se_list;
+ /* link into sai hash table locally */
+ struct list_head se_hash;
+ /* entry reference count */
+ atomic_t se_refcount;
+ /* entry index in the sai */
+ __u64 se_index;
+ /* low layer ldlm lock handle */
+ __u64 se_handle;
+ /* entry status */
+ se_stat_t se_stat;
+ /* entry size, contains name */
+ int se_size;
+ /* pointer to async getattr enqueue info */
+ struct md_enqueue_info *se_minfo;
+ /* pointer to the async getattr request */
+ struct ptlrpc_request *se_req;
+ /* pointer to the target inode */
+ struct inode *se_inode;
+ /* entry name */
+ struct qstr se_qstr;
+};
+
+static unsigned int sai_generation = 0;
+static DEFINE_SPINLOCK(sai_generation_lock);
+
+static inline int ll_sa_entry_unhashed(struct ll_sa_entry *entry)
+{
+ return list_empty(&entry->se_hash);
+}
+
+/*
+ * The entry only can be released by the caller, it is necessary to hold lock.
+ */
+static inline int ll_sa_entry_stated(struct ll_sa_entry *entry)
+{
+ smp_rmb();
+ return (entry->se_stat != SA_ENTRY_INIT);
+}
+
+static inline int ll_sa_entry_hash(int val)
+{
+ return val & LL_SA_CACHE_MASK;
+}
+
+/*
+ * Insert entry to hash SA table.
+ */
+static inline void
+ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+{
+ int i = ll_sa_entry_hash(entry->se_qstr.hash);
+
+ spin_lock(&sai->sai_cache_lock[i]);
+ list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
+ spin_unlock(&sai->sai_cache_lock[i]);
+}
+
+/*
+ * Remove entry from SA table.
+ */
+static inline void
+ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+{
+ int i = ll_sa_entry_hash(entry->se_qstr.hash);
+
+ spin_lock(&sai->sai_cache_lock[i]);
+ list_del_init(&entry->se_hash);
+ spin_unlock(&sai->sai_cache_lock[i]);
+}
+
+static inline int agl_should_run(struct ll_statahead_info *sai,
+ struct inode *inode)
+{
+ return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
+}
+
+static inline struct ll_sa_entry *
+sa_first_received_entry(struct ll_statahead_info *sai)
+{
+ return list_entry(sai->sai_entries_received.next,
+ struct ll_sa_entry, se_list);
+}
+
+static inline struct ll_inode_info *
+agl_first_entry(struct ll_statahead_info *sai)
+{
+ return list_entry(sai->sai_entries_agl.next,
+ struct ll_inode_info, lli_agl_list);
+}
+
+static inline int sa_sent_full(struct ll_statahead_info *sai)
+{
+ return atomic_read(&sai->sai_cache_count) >= sai->sai_max;
+}
+
+static inline int sa_received_empty(struct ll_statahead_info *sai)
+{
+ return list_empty(&sai->sai_entries_received);
+}
+
+static inline int agl_list_empty(struct ll_statahead_info *sai)
+{
+ return list_empty(&sai->sai_entries_agl);
+}
+
+/**
+ * (1) hit ratio less than 80%
+ * or
+ * (2) consecutive miss more than 8
+ * then means low hit.
+ */
+static inline int sa_low_hit(struct ll_statahead_info *sai)
+{
+ return ((sai->sai_hit > 7 && sai->sai_hit < 4 * sai->sai_miss) ||
+ (sai->sai_consecutive_miss > 8));
+}
+
+/*
+ * If the given index is behind of statahead window more than
+ * SA_OMITTED_ENTRY_MAX, then it is old.
+ */
+static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
+{
+ return ((__u64)sai->sai_max + index + SA_OMITTED_ENTRY_MAX <
+ sai->sai_index);
+}
+
+/*
+ * Insert it into sai_entries tail when init.
+ */
+static struct ll_sa_entry *
+ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
+ const char *name, int len)
+{
+ struct ll_inode_info *lli;
+ struct ll_sa_entry *entry;
+ int entry_size;
+ char *dname;
+
+ entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
+ OBD_ALLOC(entry, entry_size);
+ if (unlikely(entry == NULL))
+ return ERR_PTR(-ENOMEM);
+
+ CDEBUG(D_READA, "alloc sa entry %.*s(%p) index "LPU64"\n",
+ len, name, entry, index);
+
+ entry->se_index = index;
+
+ /*
+ * Statahead entry reference rules:
+ *
+ * 1) When statahead entry is initialized, its reference is set as 2.
+ * One reference is used by the directory scanner. When the scanner
+ * searches the statahead cache for the given name, it can perform
+ * lockless hash lookup (only the scanner can remove entry from hash
+ * list), and once found, it needn't to call "atomic_inc()" for the
+ * entry reference. So the performance is improved. After using the
+ * statahead entry, the scanner will call "atomic_dec()" to drop the
+ * reference held when initialization. If it is the last reference,
+ * the statahead entry will be freed.
+ *
+ * 2) All other threads, including statahead thread and ptlrpcd thread,
+ * when they process the statahead entry, the reference for target
+ * should be held to guarantee the entry will not be released by the
+ * directory scanner. After processing the entry, these threads will
+ * drop the entry reference. If it is the last reference, the entry
+ * will be freed.
+ *
+ * The second reference when initializes the statahead entry is used
+ * by the statahead thread, following the rule 2).
+ */
+ atomic_set(&entry->se_refcount, 2);
+ entry->se_stat = SA_ENTRY_INIT;
+ entry->se_size = entry_size;
+ dname = (char *)entry + sizeof(struct ll_sa_entry);
+ memcpy(dname, name, len);
+ dname[len] = 0;
+ entry->se_qstr.hash = full_name_hash(name, len);
+ entry->se_qstr.len = len;
+ entry->se_qstr.name = dname;
+
+ lli = ll_i2info(sai->sai_inode);
+ spin_lock(&lli->lli_sa_lock);
+ list_add_tail(&entry->se_link, &sai->sai_entries);
+ INIT_LIST_HEAD(&entry->se_list);
+ ll_sa_entry_enhash(sai, entry);
+ spin_unlock(&lli->lli_sa_lock);
+
+ atomic_inc(&sai->sai_cache_count);
+
+ return entry;
+}
+
+/*
+ * Used by the directory scanner to search entry with name.
+ *
+ * Only the caller can remove the entry from hash, so it is unnecessary to hold
+ * hash lock. It is caller's duty to release the init refcount on the entry, so
+ * it is also unnecessary to increase refcount on the entry.
+ */
+static struct ll_sa_entry *
+ll_sa_entry_get_byname(struct ll_statahead_info *sai, const struct qstr *qstr)
+{
+ struct ll_sa_entry *entry;
+ int i = ll_sa_entry_hash(qstr->hash);
+
+ list_for_each_entry(entry, &sai->sai_cache[i], se_hash) {
+ if (entry->se_qstr.hash == qstr->hash &&
+ entry->se_qstr.len == qstr->len &&
+ memcmp(entry->se_qstr.name, qstr->name, qstr->len) == 0)
+ return entry;
+ }
+ return NULL;
+}
+
+/*
+ * Used by the async getattr request callback to find entry with index.
+ *
+ * Inside lli_sa_lock to prevent others to change the list during the search.
+ * It needs to increase entry refcount before returning to guarantee that the
+ * entry cannot be freed by others.
+ */
+static struct ll_sa_entry *
+ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
+{
+ struct ll_sa_entry *entry;
+
+ list_for_each_entry(entry, &sai->sai_entries, se_link) {
+ if (entry->se_index == index) {
+ LASSERT(atomic_read(&entry->se_refcount) > 0);
+ atomic_inc(&entry->se_refcount);
+ return entry;
+ }
+ if (entry->se_index > index)
+ break;
+ }
+ return NULL;
+}
+
+static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
+ struct ll_sa_entry *entry)
+{
+ struct md_enqueue_info *minfo = entry->se_minfo;
+ struct ptlrpc_request *req = entry->se_req;
+
+ if (minfo) {
+ entry->se_minfo = NULL;
+ ll_intent_release(&minfo->mi_it);
+ iput(minfo->mi_dir);
+ OBD_FREE_PTR(minfo);
+ }
+
+ if (req) {
+ entry->se_req = NULL;
+ ptlrpc_req_finished(req);
+ }
+}
+
+static void ll_sa_entry_put(struct ll_statahead_info *sai,
+ struct ll_sa_entry *entry)
+{
+ if (atomic_dec_and_test(&entry->se_refcount)) {
+ CDEBUG(D_READA, "free sa entry %.*s(%p) index "LPU64"\n",
+ entry->se_qstr.len, entry->se_qstr.name, entry,
+ entry->se_index);
+
+ LASSERT(list_empty(&entry->se_link));
+ LASSERT(list_empty(&entry->se_list));
+ LASSERT(ll_sa_entry_unhashed(entry));
+
+ ll_sa_entry_cleanup(sai, entry);
+ if (entry->se_inode)
+ iput(entry->se_inode);
+
+ OBD_FREE(entry, entry->se_size);
+ atomic_dec(&sai->sai_cache_count);
+ }
+}
+
+static inline void
+do_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+{
+ struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
+
+ LASSERT(!ll_sa_entry_unhashed(entry));
+ LASSERT(!list_empty(&entry->se_link));
+
+ ll_sa_entry_unhash(sai, entry);
+
+ spin_lock(&lli->lli_sa_lock);
+ entry->se_stat = SA_ENTRY_DEST;
+ list_del_init(&entry->se_link);
+ if (likely(!list_empty(&entry->se_list)))
+ list_del_init(&entry->se_list);
+ spin_unlock(&lli->lli_sa_lock);
+
+ ll_sa_entry_put(sai, entry);
+}
+
+/*
+ * Delete it from sai_entries_stated list when fini.
+ */
+static void
+ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+{
+ struct ll_sa_entry *pos, *next;
+
+ if (entry)
+ do_sa_entry_fini(sai, entry);
+
+ /* drop old entry, only 'scanner' process does this, no need to lock */
+ list_for_each_entry_safe(pos, next, &sai->sai_entries, se_link) {
+ if (!is_omitted_entry(sai, pos->se_index))
+ break;
+ do_sa_entry_fini(sai, pos);
+ }
+}
+
+/*
+ * Inside lli_sa_lock.
+ */
+static void
+do_sa_entry_to_stated(struct ll_statahead_info *sai,
+ struct ll_sa_entry *entry, se_stat_t stat)
+{
+ struct ll_sa_entry *se;
+ struct list_head *pos = &sai->sai_entries_stated;
+
+ if (!list_empty(&entry->se_list))
+ list_del_init(&entry->se_list);
+
+ list_for_each_entry_reverse(se, &sai->sai_entries_stated, se_list) {
+ if (se->se_index < entry->se_index) {
+ pos = &se->se_list;
+ break;
+ }
+ }
+
+ list_add(&entry->se_list, pos);
+ entry->se_stat = stat;
+}
+
+/*
+ * Move entry to sai_entries_stated and sort with the index.
+ * \retval 1 -- entry to be destroyed.
+ * \retval 0 -- entry is inserted into stated list.
+ */
+static int
+ll_sa_entry_to_stated(struct ll_statahead_info *sai,
+ struct ll_sa_entry *entry, se_stat_t stat)
+{
+ struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
+ int ret = 1;
+
+ ll_sa_entry_cleanup(sai, entry);
+
+ spin_lock(&lli->lli_sa_lock);
+ if (likely(entry->se_stat != SA_ENTRY_DEST)) {
+ do_sa_entry_to_stated(sai, entry, stat);
+ ret = 0;
+ }
+ spin_unlock(&lli->lli_sa_lock);
+
+ return ret;
+}
+
+/*
+ * Insert inode into the list of sai_entries_agl.
+ */
+static void ll_agl_add(struct ll_statahead_info *sai,
+ struct inode *inode, int index)
+{
+ struct ll_inode_info *child = ll_i2info(inode);
+ struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
+ int added = 0;
+
+ spin_lock(&child->lli_agl_lock);
+ if (child->lli_agl_index == 0) {
+ child->lli_agl_index = index;
+ spin_unlock(&child->lli_agl_lock);
+
+ LASSERT(list_empty(&child->lli_agl_list));
+
+ igrab(inode);
+ spin_lock(&parent->lli_agl_lock);
+ if (agl_list_empty(sai))
+ added = 1;
+ list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
+ spin_unlock(&parent->lli_agl_lock);
+ } else {
+ spin_unlock(&child->lli_agl_lock);
+ }
+
+ if (added > 0)
+ wake_up(&sai->sai_agl_thread.t_ctl_waitq);
+}
+
+static struct ll_statahead_info *ll_sai_alloc(void)
+{
+ struct ll_statahead_info *sai;
+ int i;
+
+ OBD_ALLOC_PTR(sai);
+ if (!sai)
+ return NULL;
+
+ atomic_set(&sai->sai_refcount, 1);
+
+ spin_lock(&sai_generation_lock);
+ sai->sai_generation = ++sai_generation;
+ if (unlikely(sai_generation == 0))
+ sai->sai_generation = ++sai_generation;
+ spin_unlock(&sai_generation_lock);
+
+ sai->sai_max = LL_SA_RPC_MIN;
+ sai->sai_index = 1;
+ init_waitqueue_head(&sai->sai_waitq);
+ init_waitqueue_head(&sai->sai_thread.t_ctl_waitq);
+ init_waitqueue_head(&sai->sai_agl_thread.t_ctl_waitq);
+
+ INIT_LIST_HEAD(&sai->sai_entries);
+ INIT_LIST_HEAD(&sai->sai_entries_received);
+ INIT_LIST_HEAD(&sai->sai_entries_stated);
+ INIT_LIST_HEAD(&sai->sai_entries_agl);
+
+ for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
+ INIT_LIST_HEAD(&sai->sai_cache[i]);
+ spin_lock_init(&sai->sai_cache_lock[i]);
+ }
+ atomic_set(&sai->sai_cache_count, 0);
+
+ return sai;
+}
+
+static inline struct ll_statahead_info *
+ll_sai_get(struct ll_statahead_info *sai)
+{
+ atomic_inc(&sai->sai_refcount);
+ return sai;
+}
+
+static void ll_sai_put(struct ll_statahead_info *sai)
+{
+ struct inode *inode = sai->sai_inode;
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
+ struct ll_sa_entry *entry, *next;
+
+ if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
+ /* It is race case, the interpret callback just hold
+ * a reference count */
+ spin_unlock(&lli->lli_sa_lock);
+ return;
+ }
+
+ LASSERT(lli->lli_opendir_key == NULL);
+ LASSERT(thread_is_stopped(&sai->sai_thread));
+ LASSERT(thread_is_stopped(&sai->sai_agl_thread));
+
+ lli->lli_sai = NULL;
+ lli->lli_opendir_pid = 0;
+ spin_unlock(&lli->lli_sa_lock);
+
+ if (sai->sai_sent > sai->sai_replied)
+ CDEBUG(D_READA,"statahead for dir "DFID" does not "
+ "finish: [sent:"LPU64"] [replied:"LPU64"]\n",
+ PFID(&lli->lli_fid),
+ sai->sai_sent, sai->sai_replied);
+
+ list_for_each_entry_safe(entry, next,
+ &sai->sai_entries, se_link)
+ do_sa_entry_fini(sai, entry);
+
+ LASSERT(list_empty(&sai->sai_entries));
+ LASSERT(sa_received_empty(sai));
+ LASSERT(list_empty(&sai->sai_entries_stated));
+
+ LASSERT(atomic_read(&sai->sai_cache_count) == 0);
+ LASSERT(agl_list_empty(sai));
+
+ iput(inode);
+ OBD_FREE_PTR(sai);
+ }
+}
+
+/* Do NOT forget to drop inode refcount when into sai_entries_agl. */
+static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ __u64 index = lli->lli_agl_index;
+ int rc;
+
+ LASSERT(list_empty(&lli->lli_agl_list));
+
+ /* AGL maybe fall behind statahead with one entry */
+ if (is_omitted_entry(sai, index + 1)) {
+ lli->lli_agl_index = 0;
+ iput(inode);
+ return;
+ }
+
+ /* Someone is in glimpse (sync or async), do nothing. */
+ rc = down_write_trylock(&lli->lli_glimpse_sem);
+ if (rc == 0) {
+ lli->lli_agl_index = 0;
+ iput(inode);
+ return;
+ }
+
+ /*
+ * Someone triggered glimpse within 1 sec before.
+ * 1) The former glimpse succeeded with glimpse lock granted by OST, and
+ * if the lock is still cached on client, AGL needs to do nothing. If
+ * it is cancelled by other client, AGL maybe cannot obtaion new lock
+ * for no glimpse callback triggered by AGL.
+ * 2) The former glimpse succeeded, but OST did not grant glimpse lock.
+ * Under such case, it is quite possible that the OST will not grant
+ * glimpse lock for AGL also.
+ * 3) The former glimpse failed, compared with other two cases, it is
+ * relative rare. AGL can ignore such case, and it will not muchly
+ * affect the performance.
+ */
+ if (lli->lli_glimpse_time != 0 &&
+ cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
+ up_write(&lli->lli_glimpse_sem);
+ lli->lli_agl_index = 0;
+ iput(inode);
+ return;
+ }
+
+ CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
+ DFID", idx = "LPU64"\n", PFID(&lli->lli_fid), index);
+
+ cl_agl(inode);
+ lli->lli_agl_index = 0;
+ lli->lli_glimpse_time = cfs_time_current();
+ up_write(&lli->lli_glimpse_sem);
+
+ CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
+ DFID", idx = "LPU64", rc = %d\n",
+ PFID(&lli->lli_fid), index, rc);
+
+ iput(inode);
+}
+
+static void ll_post_statahead(struct ll_statahead_info *sai)
+{
+ struct inode *dir = sai->sai_inode;
+ struct inode *child;
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_sa_entry *entry;
+ struct md_enqueue_info *minfo;
+ struct lookup_intent *it;
+ struct ptlrpc_request *req;
+ struct mdt_body *body;
+ int rc = 0;
+
+ spin_lock(&lli->lli_sa_lock);
+ if (unlikely(sa_received_empty(sai))) {
+ spin_unlock(&lli->lli_sa_lock);
+ return;
+ }
+ entry = sa_first_received_entry(sai);
+ atomic_inc(&entry->se_refcount);
+ list_del_init(&entry->se_list);
+ spin_unlock(&lli->lli_sa_lock);
+
+ LASSERT(entry->se_handle != 0);
+
+ minfo = entry->se_minfo;
+ it = &minfo->mi_it;
+ req = entry->se_req;
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ GOTO(out, rc = -EFAULT);
+
+ child = entry->se_inode;
+ if (child == NULL) {
+ /*
+ * lookup.
+ */
+ LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
+
+ /* XXX: No fid in reply, this is probaly cross-ref case.
+ * SA can't handle it yet. */
+ if (body->valid & OBD_MD_MDS)
+ GOTO(out, rc = -EAGAIN);
+ } else {
+ /*
+ * revalidate.
+ */
+ /* unlinked and re-created with the same name */
+ if (unlikely(!lu_fid_eq(&minfo->mi_data.op_fid2, &body->fid1))){
+ entry->se_inode = NULL;
+ iput(child);
+ child = NULL;
+ }
+ }
+
+ it->d.lustre.it_lock_handle = entry->se_handle;
+ rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
+ if (rc != 1)
+ GOTO(out, rc = -EAGAIN);
+
+ rc = ll_prep_inode(&child, req, dir->i_sb, it);
+ if (rc)
+ GOTO(out, rc);
+
+ CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
+ child, child->i_ino, child->i_generation);
+ ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
+
+ entry->se_inode = child;
+
+ if (agl_should_run(sai, child))
+ ll_agl_add(sai, child, entry->se_index);
+
+out:
+ /* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
+ * reference count by calling "ll_intent_drop_lock()" in spite of the
+ * above operations failed or not. Do not worry about calling
+ * "ll_intent_drop_lock()" more than once. */
+ rc = ll_sa_entry_to_stated(sai, entry,
+ rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
+ if (rc == 0 && entry->se_index == sai->sai_index_wait)
+ wake_up(&sai->sai_waitq);
+ ll_sa_entry_put(sai, entry);
+}
+
+static int ll_statahead_interpret(struct ptlrpc_request *req,
+ struct md_enqueue_info *minfo, int rc)
+{
+ struct lookup_intent *it = &minfo->mi_it;
+ struct inode *dir = minfo->mi_dir;
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_statahead_info *sai = NULL;
+ struct ll_sa_entry *entry;
+ int wakeup;
+
+ if (it_disposition(it, DISP_LOOKUP_NEG))
+ rc = -ENOENT;
+
+ spin_lock(&lli->lli_sa_lock);
+ /* stale entry */
+ if (unlikely(lli->lli_sai == NULL ||
+ lli->lli_sai->sai_generation != minfo->mi_generation)) {
+ spin_unlock(&lli->lli_sa_lock);
+ GOTO(out, rc = -ESTALE);
+ } else {
+ sai = ll_sai_get(lli->lli_sai);
+ if (unlikely(!thread_is_running(&sai->sai_thread))) {
+ sai->sai_replied++;
+ spin_unlock(&lli->lli_sa_lock);
+ GOTO(out, rc = -EBADFD);
+ }
+
+ entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
+ if (entry == NULL) {
+ sai->sai_replied++;
+ spin_unlock(&lli->lli_sa_lock);
+ GOTO(out, rc = -EIDRM);
+ }
+
+ if (rc != 0) {
+ do_sa_entry_to_stated(sai, entry, SA_ENTRY_INVA);
+ wakeup = (entry->se_index == sai->sai_index_wait);
+ } else {
+ entry->se_minfo = minfo;
+ entry->se_req = ptlrpc_request_addref(req);
+ /* Release the async ibits lock ASAP to avoid deadlock
+ * when statahead thread tries to enqueue lock on parent
+ * for readpage and other tries to enqueue lock on child
+ * with parent's lock held, for example: unlink. */
+ entry->se_handle = it->d.lustre.it_lock_handle;
+ ll_intent_drop_lock(it);
+ wakeup = sa_received_empty(sai);
+ list_add_tail(&entry->se_list,
+ &sai->sai_entries_received);
+ }
+ sai->sai_replied++;
+ spin_unlock(&lli->lli_sa_lock);
+
+ ll_sa_entry_put(sai, entry);
+ if (wakeup)
+ wake_up(&sai->sai_thread.t_ctl_waitq);
+ }
+
+out:
+ if (rc != 0) {
+ ll_intent_release(it);
+ iput(dir);
+ OBD_FREE_PTR(minfo);
+ }
+ if (sai != NULL)
+ ll_sai_put(sai);
+ return rc;
+}
+
+static void sa_args_fini(struct md_enqueue_info *minfo,
+ struct ldlm_enqueue_info *einfo)
+{
+ LASSERT(minfo && einfo);
+ iput(minfo->mi_dir);
+ capa_put(minfo->mi_data.op_capa1);
+ capa_put(minfo->mi_data.op_capa2);
+ OBD_FREE_PTR(minfo);
+ OBD_FREE_PTR(einfo);
+}
+
+/**
+ * There is race condition between "capa_put" and "ll_statahead_interpret" for
+ * accessing "op_data.op_capa[1,2]" as following:
+ * "capa_put" releases "op_data.op_capa[1,2]"'s reference count after calling
+ * "md_intent_getattr_async". But "ll_statahead_interpret" maybe run first, and
+ * fill "op_data.op_capa[1,2]" as POISON, then cause "capa_put" access invalid
+ * "ocapa". So here reserve "op_data.op_capa[1,2]" in "pcapa" before calling
+ * "md_intent_getattr_async".
+ */
+static int sa_args_init(struct inode *dir, struct inode *child,
+ struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
+ struct ldlm_enqueue_info **pei,
+ struct obd_capa **pcapa)
+{
+ struct qstr *qstr = &entry->se_qstr;
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct md_enqueue_info *minfo;
+ struct ldlm_enqueue_info *einfo;
+ struct md_op_data *op_data;
+
+ OBD_ALLOC_PTR(einfo);
+ if (einfo == NULL)
+ return -ENOMEM;
+
+ OBD_ALLOC_PTR(minfo);
+ if (minfo == NULL) {
+ OBD_FREE_PTR(einfo);
+ return -ENOMEM;
+ }
+
+ op_data = ll_prep_md_op_data(&minfo->mi_data, dir, child, qstr->name,
+ qstr->len, 0, LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data)) {
+ OBD_FREE_PTR(einfo);
+ OBD_FREE_PTR(minfo);
+ return PTR_ERR(op_data);
+ }
+
+ minfo->mi_it.it_op = IT_GETATTR;
+ minfo->mi_dir = igrab(dir);
+ minfo->mi_cb = ll_statahead_interpret;
+ minfo->mi_generation = lli->lli_sai->sai_generation;
+ minfo->mi_cbdata = entry->se_index;
+
+ einfo->ei_type = LDLM_IBITS;
+ einfo->ei_mode = it_to_lock_mode(&minfo->mi_it);
+ einfo->ei_cb_bl = ll_md_blocking_ast;
+ einfo->ei_cb_cp = ldlm_completion_ast;
+ einfo->ei_cb_gl = NULL;
+ einfo->ei_cbdata = NULL;
+
+ *pmi = minfo;
+ *pei = einfo;
+ pcapa[0] = op_data->op_capa1;
+ pcapa[1] = op_data->op_capa2;
+
+ return 0;
+}
+
+static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
+{
+ struct md_enqueue_info *minfo;
+ struct ldlm_enqueue_info *einfo;
+ struct obd_capa *capas[2];
+ int rc;
+
+ rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
+ if (rc)
+ return rc;
+
+ rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
+ if (!rc) {
+ capa_put(capas[0]);
+ capa_put(capas[1]);
+ } else {
+ sa_args_fini(minfo, einfo);
+ }
+
+ return rc;
+}
+
+/**
+ * similar to ll_revalidate_it().
+ * \retval 1 -- dentry valid
+ * \retval 0 -- will send stat-ahead request
+ * \retval others -- prepare stat-ahead request failed
+ */
+static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
+ struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct lookup_intent it = { .it_op = IT_GETATTR,
+ .d.lustre.it_lock_handle = 0 };
+ struct md_enqueue_info *minfo;
+ struct ldlm_enqueue_info *einfo;
+ struct obd_capa *capas[2];
+ int rc;
+
+ if (unlikely(inode == NULL))
+ return 1;
+
+ if (d_mountpoint(dentry))
+ return 1;
+
+ if (unlikely(dentry == dentry->d_sb->s_root))
+ return 1;
+
+ entry->se_inode = igrab(inode);
+ rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL);
+ if (rc == 1) {
+ entry->se_handle = it.d.lustre.it_lock_handle;
+ ll_intent_release(&it);
+ return 1;
+ }
+
+ rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas);
+ if (rc) {
+ entry->se_inode = NULL;
+ iput(inode);
+ return rc;
+ }
+
+ rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
+ if (!rc) {
+ capa_put(capas[0]);
+ capa_put(capas[1]);
+ } else {
+ entry->se_inode = NULL;
+ iput(inode);
+ sa_args_fini(minfo, einfo);
+ }
+
+ return rc;
+}
+
+static void ll_statahead_one(struct dentry *parent, const char* entry_name,
+ int entry_name_len)
+{
+ struct inode *dir = parent->d_inode;
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_statahead_info *sai = lli->lli_sai;
+ struct dentry *dentry = NULL;
+ struct ll_sa_entry *entry;
+ int rc;
+ int rc1;
+
+ entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
+ entry_name_len);
+ if (IS_ERR(entry))
+ return;
+
+ dentry = d_lookup(parent, &entry->se_qstr);
+ if (!dentry) {
+ rc = do_sa_lookup(dir, entry);
+ } else {
+ rc = do_sa_revalidate(dir, entry, dentry);
+ if (rc == 1 && agl_should_run(sai, dentry->d_inode))
+ ll_agl_add(sai, dentry->d_inode, entry->se_index);
+ }
+
+ if (dentry != NULL)
+ dput(dentry);
+
+ if (rc) {
+ rc1 = ll_sa_entry_to_stated(sai, entry,
+ rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
+ if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
+ wake_up(&sai->sai_waitq);
+ } else {
+ sai->sai_sent++;
+ }
+
+ sai->sai_index++;
+ /* drop one refcount on entry by ll_sa_entry_alloc */
+ ll_sa_entry_put(sai, entry);
+}
+
+static int ll_agl_thread(void *arg)
+{
+ struct dentry *parent = (struct dentry *)arg;
+ struct inode *dir = parent->d_inode;
+ struct ll_inode_info *plli = ll_i2info(dir);
+ struct ll_inode_info *clli;
+ struct ll_sb_info *sbi = ll_i2sbi(dir);
+ struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
+ struct ptlrpc_thread *thread = &sai->sai_agl_thread;
+ struct l_wait_info lwi = { 0 };
+
+ CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
+
+ atomic_inc(&sbi->ll_agl_total);
+ spin_lock(&plli->lli_agl_lock);
+ sai->sai_agl_valid = 1;
+ thread_set_flags(thread, SVC_RUNNING);
+ spin_unlock(&plli->lli_agl_lock);
+ wake_up(&thread->t_ctl_waitq);
+
+ while (1) {
+ l_wait_event(thread->t_ctl_waitq,
+ !agl_list_empty(sai) ||
+ !thread_is_running(thread),
+ &lwi);
+
+ if (!thread_is_running(thread))
+ break;
+
+ spin_lock(&plli->lli_agl_lock);
+ /* The statahead thread maybe help to process AGL entries,
+ * so check whether list empty again. */
+ if (!agl_list_empty(sai)) {
+ clli = agl_first_entry(sai);
+ list_del_init(&clli->lli_agl_list);
+ spin_unlock(&plli->lli_agl_lock);
+ ll_agl_trigger(&clli->lli_vfs_inode, sai);
+ } else {
+ spin_unlock(&plli->lli_agl_lock);
+ }
+ }
+
+ spin_lock(&plli->lli_agl_lock);
+ sai->sai_agl_valid = 0;
+ while (!agl_list_empty(sai)) {
+ clli = agl_first_entry(sai);
+ list_del_init(&clli->lli_agl_list);
+ spin_unlock(&plli->lli_agl_lock);
+ clli->lli_agl_index = 0;
+ iput(&clli->lli_vfs_inode);
+ spin_lock(&plli->lli_agl_lock);
+ }
+ thread_set_flags(thread, SVC_STOPPED);
+ spin_unlock(&plli->lli_agl_lock);
+ wake_up(&thread->t_ctl_waitq);
+ ll_sai_put(sai);
+ CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
+ return 0;
+}
+
+static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
+{
+ struct ptlrpc_thread *thread = &sai->sai_agl_thread;
+ struct l_wait_info lwi = { 0 };
+ struct ll_inode_info *plli;
+ struct task_struct *task;
+
+ CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
+
+ plli = ll_i2info(parent->d_inode);
+ task = kthread_run(ll_agl_thread, parent,
+ "ll_agl_%u", plli->lli_opendir_pid);
+ if (IS_ERR(task)) {
+ CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
+ thread_set_flags(thread, SVC_STOPPED);
+ return;
+ }
+
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread),
+ &lwi);
+}
+
+static int ll_statahead_thread(void *arg)
+{
+ struct dentry *parent = (struct dentry *)arg;
+ struct inode *dir = parent->d_inode;
+ struct ll_inode_info *plli = ll_i2info(dir);
+ struct ll_inode_info *clli;
+ struct ll_sb_info *sbi = ll_i2sbi(dir);
+ struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
+ struct ptlrpc_thread *thread = &sai->sai_thread;
+ struct ptlrpc_thread *agl_thread = &sai->sai_agl_thread;
+ struct page *page;
+ __u64 pos = 0;
+ int first = 0;
+ int rc = 0;
+ struct ll_dir_chain chain;
+ struct l_wait_info lwi = { 0 };
+
+ CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
+
+ if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
+ ll_start_agl(parent, sai);
+
+ atomic_inc(&sbi->ll_sa_total);
+ spin_lock(&plli->lli_sa_lock);
+ thread_set_flags(thread, SVC_RUNNING);
+ spin_unlock(&plli->lli_sa_lock);
+ wake_up(&thread->t_ctl_waitq);
+
+ ll_dir_chain_init(&chain);
+ page = ll_get_dir_page(dir, pos, &chain);
+
+ while (1) {
+ struct lu_dirpage *dp;
+ struct lu_dirent *ent;
+
+ if (IS_ERR(page)) {
+ rc = PTR_ERR(page);
+ CDEBUG(D_READA, "error reading dir "DFID" at "LPU64
+ "/"LPU64": [rc %d] [parent %u]\n",
+ PFID(ll_inode2fid(dir)), pos, sai->sai_index,
+ rc, plli->lli_opendir_pid);
+ GOTO(out, rc);
+ }
+
+ dp = page_address(page);
+ for (ent = lu_dirent_start(dp); ent != NULL;
+ ent = lu_dirent_next(ent)) {
+ __u64 hash;
+ int namelen;
+ char *name;
+
+ hash = le64_to_cpu(ent->lde_hash);
+ if (unlikely(hash < pos))
+ /*
+ * Skip until we find target hash value.
+ */
+ continue;
+
+ namelen = le16_to_cpu(ent->lde_namelen);
+ if (unlikely(namelen == 0))
+ /*
+ * Skip dummy record.
+ */
+ continue;
+
+ name = ent->lde_name;
+ if (name[0] == '.') {
+ if (namelen == 1) {
+ /*
+ * skip "."
+ */
+ continue;
+ } else if (name[1] == '.' && namelen == 2) {
+ /*
+ * skip ".."
+ */
+ continue;
+ } else if (!sai->sai_ls_all) {
+ /*
+ * skip hidden files.
+ */
+ sai->sai_skip_hidden++;
+ continue;
+ }
+ }
+
+ /*
+ * don't stat-ahead first entry.
+ */
+ if (unlikely(++first == 1))
+ continue;
+
+keep_it:
+ l_wait_event(thread->t_ctl_waitq,
+ !sa_sent_full(sai) ||
+ !sa_received_empty(sai) ||
+ !agl_list_empty(sai) ||
+ !thread_is_running(thread),
+ &lwi);
+
+interpret_it:
+ while (!sa_received_empty(sai))
+ ll_post_statahead(sai);
+
+ if (unlikely(!thread_is_running(thread))) {
+ ll_release_page(page, 0);
+ GOTO(out, rc = 0);
+ }
+
+ /* If no window for metadata statahead, but there are
+ * some AGL entries to be triggered, then try to help
+ * to process the AGL entries. */
+ if (sa_sent_full(sai)) {
+ spin_lock(&plli->lli_agl_lock);
+ while (!agl_list_empty(sai)) {
+ clli = agl_first_entry(sai);
+ list_del_init(&clli->lli_agl_list);
+ spin_unlock(&plli->lli_agl_lock);
+ ll_agl_trigger(&clli->lli_vfs_inode,
+ sai);
+
+ if (!sa_received_empty(sai))
+ goto interpret_it;
+
+ if (unlikely(
+ !thread_is_running(thread))) {
+ ll_release_page(page, 0);
+ GOTO(out, rc = 0);
+ }
+
+ if (!sa_sent_full(sai))
+ goto do_it;
+
+ spin_lock(&plli->lli_agl_lock);
+ }
+ spin_unlock(&plli->lli_agl_lock);
+
+ goto keep_it;
+ }
+
+do_it:
+ ll_statahead_one(parent, name, namelen);
+ }
+ pos = le64_to_cpu(dp->ldp_hash_end);
+ if (pos == MDS_DIR_END_OFF) {
+ /*
+ * End of directory reached.
+ */
+ ll_release_page(page, 0);
+ while (1) {
+ l_wait_event(thread->t_ctl_waitq,
+ !sa_received_empty(sai) ||
+ sai->sai_sent == sai->sai_replied||
+ !thread_is_running(thread),
+ &lwi);
+
+ while (!sa_received_empty(sai))
+ ll_post_statahead(sai);
+
+ if (unlikely(!thread_is_running(thread)))
+ GOTO(out, rc = 0);
+
+ if (sai->sai_sent == sai->sai_replied &&
+ sa_received_empty(sai))
+ break;
+ }
+
+ spin_lock(&plli->lli_agl_lock);
+ while (!agl_list_empty(sai) &&
+ thread_is_running(thread)) {
+ clli = agl_first_entry(sai);
+ list_del_init(&clli->lli_agl_list);
+ spin_unlock(&plli->lli_agl_lock);
+ ll_agl_trigger(&clli->lli_vfs_inode, sai);
+ spin_lock(&plli->lli_agl_lock);
+ }
+ spin_unlock(&plli->lli_agl_lock);
+
+ GOTO(out, rc = 0);
+ } else if (1) {
+ /*
+ * chain is exhausted.
+ * Normal case: continue to the next page.
+ */
+ ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
+ LDF_COLLIDE);
+ sai->sai_in_readpage = 1;
+ page = ll_get_dir_page(dir, pos, &chain);
+ sai->sai_in_readpage = 0;
+ } else {
+ LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
+ ll_release_page(page, 1);
+ /*
+ * go into overflow page.
+ */
+ }
+ }
+
+out:
+ if (sai->sai_agl_valid) {
+ spin_lock(&plli->lli_agl_lock);
+ thread_set_flags(agl_thread, SVC_STOPPING);
+ spin_unlock(&plli->lli_agl_lock);
+ wake_up(&agl_thread->t_ctl_waitq);
+
+ CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
+ current_pid());
+ l_wait_event(agl_thread->t_ctl_waitq,
+ thread_is_stopped(agl_thread),
+ &lwi);
+ } else {
+ /* Set agl_thread flags anyway. */
+ thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
+ }
+ ll_dir_chain_fini(&chain);
+ spin_lock(&plli->lli_sa_lock);
+ if (!sa_received_empty(sai)) {
+ thread_set_flags(thread, SVC_STOPPING);
+ spin_unlock(&plli->lli_sa_lock);
+
+ /* To release the resources held by received entries. */
+ while (!sa_received_empty(sai))
+ ll_post_statahead(sai);
+
+ spin_lock(&plli->lli_sa_lock);
+ }
+ thread_set_flags(thread, SVC_STOPPED);
+ spin_unlock(&plli->lli_sa_lock);
+ wake_up(&sai->sai_waitq);
+ wake_up(&thread->t_ctl_waitq);
+ ll_sai_put(sai);
+ dput(parent);
+ CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
+ return rc;
+}
+
+/**
+ * called in ll_file_release().
+ */
+void ll_stop_statahead(struct inode *dir, void *key)
+{
+ struct ll_inode_info *lli = ll_i2info(dir);
+
+ if (unlikely(key == NULL))
+ return;
+
+ spin_lock(&lli->lli_sa_lock);
+ if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
+ spin_unlock(&lli->lli_sa_lock);
+ return;
+ }
+
+ lli->lli_opendir_key = NULL;
+
+ if (lli->lli_sai) {
+ struct l_wait_info lwi = { 0 };
+ struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
+
+ if (!thread_is_stopped(thread)) {
+ thread_set_flags(thread, SVC_STOPPING);
+ spin_unlock(&lli->lli_sa_lock);
+ wake_up(&thread->t_ctl_waitq);
+
+ CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
+ current_pid());
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopped(thread),
+ &lwi);
+ } else {
+ spin_unlock(&lli->lli_sa_lock);
+ }
+
+ /*
+ * Put the ref which was held when first statahead_enter.
+ * It maybe not the last ref for some statahead requests
+ * maybe inflight.
+ */
+ ll_sai_put(lli->lli_sai);
+ } else {
+ lli->lli_opendir_pid = 0;
+ spin_unlock(&lli->lli_sa_lock);
+ }
+}
+
+enum {
+ /**
+ * not first dirent, or is "."
+ */
+ LS_NONE_FIRST_DE = 0,
+ /**
+ * the first non-hidden dirent
+ */
+ LS_FIRST_DE,
+ /**
+ * the first hidden dirent, that is "."
+ */
+ LS_FIRST_DOT_DE
+};
+
+static int is_first_dirent(struct inode *dir, struct dentry *dentry)
+{
+ struct ll_dir_chain chain;
+ struct qstr *target = &dentry->d_name;
+ struct page *page;
+ __u64 pos = 0;
+ int dot_de;
+ int rc = LS_NONE_FIRST_DE;
+
+ ll_dir_chain_init(&chain);
+ page = ll_get_dir_page(dir, pos, &chain);
+
+ while (1) {
+ struct lu_dirpage *dp;
+ struct lu_dirent *ent;
+
+ if (IS_ERR(page)) {
+ struct ll_inode_info *lli = ll_i2info(dir);
+
+ rc = PTR_ERR(page);
+ CERROR("error reading dir "DFID" at "LPU64": "
+ "[rc %d] [parent %u]\n",
+ PFID(ll_inode2fid(dir)), pos,
+ rc, lli->lli_opendir_pid);
+ break;
+ }
+
+ dp = page_address(page);
+ for (ent = lu_dirent_start(dp); ent != NULL;
+ ent = lu_dirent_next(ent)) {
+ __u64 hash;
+ int namelen;
+ char *name;
+
+ hash = le64_to_cpu(ent->lde_hash);
+ /* The ll_get_dir_page() can return any page containing
+ * the given hash which may be not the start hash. */
+ if (unlikely(hash < pos))
+ continue;
+
+ namelen = le16_to_cpu(ent->lde_namelen);
+ if (unlikely(namelen == 0))
+ /*
+ * skip dummy record.
+ */
+ continue;
+
+ name = ent->lde_name;
+ if (name[0] == '.') {
+ if (namelen == 1)
+ /*
+ * skip "."
+ */
+ continue;
+ else if (name[1] == '.' && namelen == 2)
+ /*
+ * skip ".."
+ */
+ continue;
+ else
+ dot_de = 1;
+ } else {
+ dot_de = 0;
+ }
+
+ if (dot_de && target->name[0] != '.') {
+ CDEBUG(D_READA, "%.*s skip hidden file %.*s\n",
+ target->len, target->name,
+ namelen, name);
+ continue;
+ }
+
+ if (target->len != namelen ||
+ memcmp(target->name, name, namelen) != 0)
+ rc = LS_NONE_FIRST_DE;
+ else if (!dot_de)
+ rc = LS_FIRST_DE;
+ else
+ rc = LS_FIRST_DOT_DE;
+
+ ll_release_page(page, 0);
+ GOTO(out, rc);
+ }
+ pos = le64_to_cpu(dp->ldp_hash_end);
+ if (pos == MDS_DIR_END_OFF) {
+ /*
+ * End of directory reached.
+ */
+ ll_release_page(page, 0);
+ break;
+ } else if (1) {
+ /*
+ * chain is exhausted
+ * Normal case: continue to the next page.
+ */
+ ll_release_page(page, le32_to_cpu(dp->ldp_flags) &
+ LDF_COLLIDE);
+ page = ll_get_dir_page(dir, pos, &chain);
+ } else {
+ /*
+ * go into overflow page.
+ */
+ LASSERT(le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
+ ll_release_page(page, 1);
+ }
+ }
+
+out:
+ ll_dir_chain_fini(&chain);
+ return rc;
+}
+
+static void
+ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
+{
+ struct ptlrpc_thread *thread = &sai->sai_thread;
+ struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
+ int hit;
+
+ if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
+ hit = 1;
+ else
+ hit = 0;
+
+ ll_sa_entry_fini(sai, entry);
+ if (hit) {
+ sai->sai_hit++;
+ sai->sai_consecutive_miss = 0;
+ sai->sai_max = min(2 * sai->sai_max, sbi->ll_sa_max);
+ } else {
+ struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
+
+ sai->sai_miss++;
+ sai->sai_consecutive_miss++;
+ if (sa_low_hit(sai) && thread_is_running(thread)) {
+ atomic_inc(&sbi->ll_sa_wrong);
+ CDEBUG(D_READA, "Statahead for dir "DFID" hit "
+ "ratio too low: hit/miss "LPU64"/"LPU64
+ ", sent/replied "LPU64"/"LPU64", stopping "
+ "statahead thread: pid %d\n",
+ PFID(&lli->lli_fid), sai->sai_hit,
+ sai->sai_miss, sai->sai_sent,
+ sai->sai_replied, current_pid());
+ spin_lock(&lli->lli_sa_lock);
+ if (!thread_is_stopped(thread))
+ thread_set_flags(thread, SVC_STOPPING);
+ spin_unlock(&lli->lli_sa_lock);
+ }
+ }
+
+ if (!thread_is_stopped(thread))
+ wake_up(&thread->t_ctl_waitq);
+}
+
+/**
+ * Start statahead thread if this is the first dir entry.
+ * Otherwise if a thread is started already, wait it until it is ahead of me.
+ * \retval 1 -- find entry with lock in cache, the caller needs to do
+ * nothing.
+ * \retval 0 -- find entry in cache, but without lock, the caller needs
+ * refresh from MDS.
+ * \retval others -- the caller need to process as non-statahead.
+ */
+int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
+ int only_unplug)
+{
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_statahead_info *sai = lli->lli_sai;
+ struct dentry *parent;
+ struct ll_sa_entry *entry;
+ struct ptlrpc_thread *thread;
+ struct l_wait_info lwi = { 0 };
+ int rc = 0;
+ struct ll_inode_info *plli;
+
+ LASSERT(lli->lli_opendir_pid == current_pid());
+
+ if (sai) {
+ thread = &sai->sai_thread;
+ if (unlikely(thread_is_stopped(thread) &&
+ list_empty(&sai->sai_entries_stated))) {
+ /* to release resource */
+ ll_stop_statahead(dir, lli->lli_opendir_key);
+ return -EAGAIN;
+ }
+
+ if ((*dentryp)->d_name.name[0] == '.') {
+ if (sai->sai_ls_all ||
+ sai->sai_miss_hidden >= sai->sai_skip_hidden) {
+ /*
+ * Hidden dentry is the first one, or statahead
+ * thread does not skip so many hidden dentries
+ * before "sai_ls_all" enabled as below.
+ */
+ } else {
+ if (!sai->sai_ls_all)
+ /*
+ * It maybe because hidden dentry is not
+ * the first one, "sai_ls_all" was not
+ * set, then "ls -al" missed. Enable
+ * "sai_ls_all" for such case.
+ */
+ sai->sai_ls_all = 1;
+
+ /*
+ * Such "getattr" has been skipped before
+ * "sai_ls_all" enabled as above.
+ */
+ sai->sai_miss_hidden++;
+ return -EAGAIN;
+ }
+ }
+
+ entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
+ if (entry == NULL || only_unplug) {
+ ll_sai_unplug(sai, entry);
+ return entry ? 1 : -EAGAIN;
+ }
+
+ /* if statahead is busy in readdir, help it do post-work */
+ while (!ll_sa_entry_stated(entry) &&
+ sai->sai_in_readpage &&
+ !sa_received_empty(sai))
+ ll_post_statahead(sai);
+
+ if (!ll_sa_entry_stated(entry)) {
+ sai->sai_index_wait = entry->se_index;
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
+ LWI_ON_SIGNAL_NOOP, NULL);
+ rc = l_wait_event(sai->sai_waitq,
+ ll_sa_entry_stated(entry) ||
+ thread_is_stopped(thread),
+ &lwi);
+ if (rc < 0) {
+ ll_sai_unplug(sai, entry);
+ return -EAGAIN;
+ }
+ }
+
+ if (entry->se_stat == SA_ENTRY_SUCC &&
+ entry->se_inode != NULL) {
+ struct inode *inode = entry->se_inode;
+ struct lookup_intent it = { .it_op = IT_GETATTR,
+ .d.lustre.it_lock_handle =
+ entry->se_handle };
+ __u64 bits;
+
+ rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
+ ll_inode2fid(inode), &bits);
+ if (rc == 1) {
+ if ((*dentryp)->d_inode == NULL) {
+ *dentryp = ll_splice_alias(inode,
+ *dentryp);
+ } else if ((*dentryp)->d_inode != inode) {
+ /* revalidate, but inode is recreated */
+ CDEBUG(D_READA,
+ "stale dentry %.*s inode %lu/%u, "
+ "statahead inode %lu/%u\n",
+ (*dentryp)->d_name.len,
+ (*dentryp)->d_name.name,
+ (*dentryp)->d_inode->i_ino,
+ (*dentryp)->d_inode->i_generation,
+ inode->i_ino,
+ inode->i_generation);
+ ll_sai_unplug(sai, entry);
+ return -ESTALE;
+ } else {
+ iput(inode);
+ }
+ entry->se_inode = NULL;
+
+ if ((bits & MDS_INODELOCK_LOOKUP) &&
+ d_lustre_invalid(*dentryp))
+ d_lustre_revalidate(*dentryp);
+ ll_intent_release(&it);
+ }
+ }
+
+ ll_sai_unplug(sai, entry);
+ return rc;
+ }
+
+ /* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
+ rc = is_first_dirent(dir, *dentryp);
+ if (rc == LS_NONE_FIRST_DE)
+ /* It is not "ls -{a}l" operation, no need statahead for it. */
+ GOTO(out, rc = -EAGAIN);
+
+ sai = ll_sai_alloc();
+ if (sai == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
+ sai->sai_inode = igrab(dir);
+ if (unlikely(sai->sai_inode == NULL)) {
+ CWARN("Do not start stat ahead on dying inode "DFID"\n",
+ PFID(&lli->lli_fid));
+ GOTO(out, rc = -ESTALE);
+ }
+
+ /* get parent reference count here, and put it in ll_statahead_thread */
+ parent = dget((*dentryp)->d_parent);
+ if (unlikely(sai->sai_inode != parent->d_inode)) {
+ struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
+
+ CWARN("Race condition, someone changed %.*s just now: "
+ "old parent "DFID", new parent "DFID"\n",
+ (*dentryp)->d_name.len, (*dentryp)->d_name.name,
+ PFID(&lli->lli_fid), PFID(&nlli->lli_fid));
+ dput(parent);
+ iput(sai->sai_inode);
+ GOTO(out, rc = -EAGAIN);
+ }
+
+ CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
+
+ lli->lli_sai = sai;
+
+ plli = ll_i2info(parent->d_inode);
+ rc = PTR_ERR(kthread_run(ll_statahead_thread, parent,
+ "ll_sa_%u", plli->lli_opendir_pid));
+ thread = &sai->sai_thread;
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("can't start ll_sa thread, rc: %d\n", rc);
+ dput(parent);
+ lli->lli_opendir_key = NULL;
+ thread_set_flags(thread, SVC_STOPPED);
+ thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
+ ll_sai_put(sai);
+ LASSERT(lli->lli_sai == NULL);
+ return -EAGAIN;
+ }
+
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread),
+ &lwi);
+
+ /*
+ * We don't stat-ahead for the first dirent since we are already in
+ * lookup.
+ */
+ return -EAGAIN;
+
+out:
+ if (sai != NULL)
+ OBD_FREE_PTR(sai);
+ spin_lock(&lli->lli_sa_lock);
+ lli->lli_opendir_key = NULL;
+ lli->lli_opendir_pid = 0;
+ spin_unlock(&lli->lli_sa_lock);
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
new file mode 100644
index 0000000..0beaf4e
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -0,0 +1,225 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <lustre_lite.h>
+#include <lustre_ha.h>
+#include <lustre_dlm.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <lprocfs_status.h>
+#include "llite_internal.h"
+
+static struct kmem_cache *ll_inode_cachep;
+
+static struct inode *ll_alloc_inode(struct super_block *sb)
+{
+ struct ll_inode_info *lli;
+ ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1);
+ OBD_SLAB_ALLOC_PTR_GFP(lli, ll_inode_cachep, __GFP_IO);
+ if (lli == NULL)
+ return NULL;
+
+ inode_init_once(&lli->lli_vfs_inode);
+ return &lli->lli_vfs_inode;
+}
+
+static void ll_inode_destroy_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+ struct ll_inode_info *ptr = ll_i2info(inode);
+ OBD_SLAB_FREE_PTR(ptr, ll_inode_cachep);
+}
+
+static void ll_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, ll_inode_destroy_callback);
+}
+
+int ll_init_inodecache(void)
+{
+ ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
+ sizeof(struct ll_inode_info),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (ll_inode_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void ll_destroy_inodecache(void)
+{
+ kmem_cache_destroy(ll_inode_cachep);
+}
+
+/* exported operations */
+struct super_operations lustre_super_operations =
+{
+ .alloc_inode = ll_alloc_inode,
+ .destroy_inode = ll_destroy_inode,
+ .evict_inode = ll_delete_inode,
+ .put_super = ll_put_super,
+ .statfs = ll_statfs,
+ .umount_begin = ll_umount_begin,
+ .remount_fs = ll_remount_fs,
+ .show_options = ll_show_options,
+};
+MODULE_ALIAS_FS("lustre");
+
+void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg));
+
+int vvp_global_init(void);
+void vvp_global_fini(void);
+
+static int __init init_lustre_lite(void)
+{
+ int i, rc, seed[2];
+ struct timeval tv;
+ lnet_process_id_t lnet_id;
+
+ CLASSERT(sizeof(LUSTRE_VOLATILE_HDR) == LUSTRE_VOLATILE_HDR_LEN + 1);
+
+ /* print an address of _any_ initialized kernel symbol from this
+ * module, to allow debugging with gdb that doesn't support data
+ * symbols from modules.*/
+ CDEBUG(D_INFO, "Lustre client module (%p).\n",
+ &lustre_super_operations);
+
+ rc = ll_init_inodecache();
+ if (rc)
+ return -ENOMEM;
+ ll_file_data_slab = kmem_cache_create("ll_file_data",
+ sizeof(struct ll_file_data), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (ll_file_data_slab == NULL) {
+ ll_destroy_inodecache();
+ return -ENOMEM;
+ }
+
+ ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
+ sizeof(struct ll_remote_perm),
+ 0, 0, NULL);
+ if (ll_remote_perm_cachep == NULL) {
+ kmem_cache_destroy(ll_file_data_slab);
+ ll_file_data_slab = NULL;
+ ll_destroy_inodecache();
+ return -ENOMEM;
+ }
+
+ ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
+ REMOTE_PERM_HASHSIZE *
+ sizeof(struct list_head),
+ 0, 0, NULL);
+ if (ll_rmtperm_hash_cachep == NULL) {
+ kmem_cache_destroy(ll_remote_perm_cachep);
+ ll_remote_perm_cachep = NULL;
+ kmem_cache_destroy(ll_file_data_slab);
+ ll_file_data_slab = NULL;
+ ll_destroy_inodecache();
+ return -ENOMEM;
+ }
+
+ proc_lustre_fs_root = proc_lustre_root ?
+ lprocfs_register("llite", proc_lustre_root, NULL, NULL) : NULL;
+
+ lustre_register_client_fill_super(ll_fill_super);
+ lustre_register_kill_super_cb(ll_kill_super);
+
+ lustre_register_client_process_config(ll_process_config);
+
+ cfs_get_random_bytes(seed, sizeof(seed));
+
+ /* Nodes with small feet have little entropy
+ * the NID for this node gives the most entropy in the low bits */
+ for (i=0; ; i++) {
+ if (LNetGetId(i, &lnet_id) == -ENOENT) {
+ break;
+ }
+ if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND) {
+ seed[0] ^= LNET_NIDADDR(lnet_id.nid);
+ }
+ }
+
+ do_gettimeofday(&tv);
+ cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
+
+ init_timer(&ll_capa_timer);
+ ll_capa_timer.function = ll_capa_timer_callback;
+ rc = ll_capa_thread_start();
+ /*
+ * XXX normal cleanup is needed here.
+ */
+ if (rc == 0)
+ rc = vvp_global_init();
+
+ return rc;
+}
+
+static void __exit exit_lustre_lite(void)
+{
+ vvp_global_fini();
+ del_timer(&ll_capa_timer);
+ ll_capa_thread_stop();
+ LASSERTF(capa_count[CAPA_SITE_CLIENT] == 0,
+ "client remaining capa count %d\n",
+ capa_count[CAPA_SITE_CLIENT]);
+
+ lustre_register_client_fill_super(NULL);
+ lustre_register_kill_super_cb(NULL);
+
+ lustre_register_client_process_config(NULL);
+
+ ll_destroy_inodecache();
+
+ kmem_cache_destroy(ll_rmtperm_hash_cachep);
+ ll_rmtperm_hash_cachep = NULL;
+
+ kmem_cache_destroy(ll_remote_perm_cachep);
+ ll_remote_perm_cachep = NULL;
+
+ kmem_cache_destroy(ll_file_data_slab);
+ if (proc_lustre_fs_root && !IS_ERR(proc_lustre_fs_root))
+ lprocfs_remove(&proc_lustre_fs_root);
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Lite Client File System");
+MODULE_LICENSE("GPL");
+
+module_init(init_lustre_lite);
+module_exit(exit_lustre_lite);
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
new file mode 100644
index 0000000..ab06891
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -0,0 +1,188 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/stat.h>
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <lustre_lite.h>
+#include "llite_internal.h"
+
+static int ll_readlink_internal(struct inode *inode,
+ struct ptlrpc_request **request, char **symname)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ int rc, symlen = i_size_read(inode) + 1;
+ struct mdt_body *body;
+ struct md_op_data *op_data;
+
+ *request = NULL;
+
+ if (lli->lli_symlink_name) {
+ int print_limit = min_t(int, PAGE_SIZE - 128, symlen);
+
+ *symname = lli->lli_symlink_name;
+ /* If the total CDEBUG() size is larger than a page, it
+ * will print a warning to the console, avoid this by
+ * printing just the last part of the symlink. */
+ CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n",
+ print_limit < symlen ? "..." : "", print_limit,
+ (*symname) + symlen - print_limit, symlen);
+ return 0;
+ }
+
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, symlen,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
+ op_data->op_valid = OBD_MD_LINKNAME;
+ rc = md_getattr(sbi->ll_md_exp, op_data, request);
+ ll_finish_md_op_data(op_data);
+ if (rc) {
+ if (rc != -ENOENT)
+ CERROR("inode %lu: rc = %d\n", inode->i_ino, rc);
+ GOTO (failed, rc);
+ }
+
+ body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body != NULL);
+ if ((body->valid & OBD_MD_LINKNAME) == 0) {
+ CERROR("OBD_MD_LINKNAME not set on reply\n");
+ GOTO(failed, rc = -EPROTO);
+ }
+
+ LASSERT(symlen != 0);
+ if (body->eadatasize != symlen) {
+ CERROR("inode %lu: symlink length %d not expected %d\n",
+ inode->i_ino, body->eadatasize - 1, symlen - 1);
+ GOTO(failed, rc = -EPROTO);
+ }
+
+ *symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD);
+ if (*symname == NULL ||
+ strnlen(*symname, symlen) != symlen - 1) {
+ /* not full/NULL terminated */
+ CERROR("inode %lu: symlink not NULL terminated string"
+ "of length %d\n", inode->i_ino, symlen - 1);
+ GOTO(failed, rc = -EPROTO);
+ }
+
+ OBD_ALLOC(lli->lli_symlink_name, symlen);
+ /* do not return an error if we cannot cache the symlink locally */
+ if (lli->lli_symlink_name) {
+ memcpy(lli->lli_symlink_name, *symname, symlen);
+ *symname = lli->lli_symlink_name;
+ }
+ return 0;
+
+failed:
+ return rc;
+}
+
+static int ll_readlink(struct dentry *dentry, char *buffer, int buflen)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ptlrpc_request *request;
+ char *symname;
+ int rc;
+
+ CDEBUG(D_VFSTRACE, "VFS Op\n");
+
+ ll_inode_size_lock(inode);
+ rc = ll_readlink_internal(inode, &request, &symname);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = vfs_readlink(dentry, buffer, buflen, symname);
+ out:
+ ptlrpc_req_finished(request);
+ ll_inode_size_unlock(inode);
+ return rc;
+}
+
+static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ptlrpc_request *request = NULL;
+ int rc;
+ char *symname;
+
+ CDEBUG(D_VFSTRACE, "VFS Op\n");
+ /* Limit the recursive symlink depth to 5 instead of default
+ * 8 links when kernel has 4k stack to prevent stack overflow.
+ * For 8k stacks we need to limit it to 7 for local servers. */
+ if (THREAD_SIZE < 8192 && current->link_count >= 6) {
+ rc = -ELOOP;
+ } else if (THREAD_SIZE == 8192 && current->link_count >= 8) {
+ rc = -ELOOP;
+ } else {
+ ll_inode_size_lock(inode);
+ rc = ll_readlink_internal(inode, &request, &symname);
+ ll_inode_size_unlock(inode);
+ }
+ if (rc) {
+ ptlrpc_req_finished(request);
+ request = NULL;
+ symname = ERR_PTR(rc);
+ }
+
+ nd_set_link(nd, symname);
+ /* symname may contain a pointer to the request message buffer,
+ * we delay request releasing until ll_put_link then.
+ */
+ return request;
+}
+
+static void ll_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+{
+ ptlrpc_req_finished(cookie);
+}
+
+struct inode_operations ll_fast_symlink_inode_operations = {
+ .readlink = ll_readlink,
+ .setattr = ll_setattr,
+ .follow_link = ll_follow_link,
+ .put_link = ll_put_link,
+ .getattr = ll_getattr,
+ .permission = ll_inode_permission,
+ .setxattr = ll_setxattr,
+ .getxattr = ll_getxattr,
+ .listxattr = ll_listxattr,
+ .removexattr = ll_removexattr,
+};
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
new file mode 100644
index 0000000..be125b9
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -0,0 +1,545 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * cl_device and cl_device_type implementation for VVP layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+
+#include <obd.h>
+#include <lustre_lite.h>
+
+#include "vvp_internal.h"
+
+/*****************************************************************************
+ *
+ * Vvp device and device type functions.
+ *
+ */
+
+/*
+ * vvp_ prefix stands for "Vfs Vm Posix". It corresponds to historical
+ * "llite_" (var. "ll_") prefix.
+ */
+
+struct kmem_cache *vvp_thread_kmem;
+static struct kmem_cache *vvp_session_kmem;
+static struct lu_kmem_descr vvp_caches[] = {
+ {
+ .ckd_cache = &vvp_thread_kmem,
+ .ckd_name = "vvp_thread_kmem",
+ .ckd_size = sizeof (struct vvp_thread_info),
+ },
+ {
+ .ckd_cache = &vvp_session_kmem,
+ .ckd_name = "vvp_session_kmem",
+ .ckd_size = sizeof (struct vvp_session)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
+static void *vvp_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct vvp_thread_info *info;
+
+ OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, __GFP_IO);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
+}
+
+static void vvp_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct vvp_thread_info *info = data;
+ OBD_SLAB_FREE_PTR(info, vvp_thread_kmem);
+}
+
+static void *vvp_session_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct vvp_session *session;
+
+ OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, __GFP_IO);
+ if (session == NULL)
+ session = ERR_PTR(-ENOMEM);
+ return session;
+}
+
+static void vvp_session_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct vvp_session *session = data;
+ OBD_SLAB_FREE_PTR(session, vvp_session_kmem);
+}
+
+
+struct lu_context_key vvp_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = vvp_key_init,
+ .lct_fini = vvp_key_fini
+};
+
+struct lu_context_key vvp_session_key = {
+ .lct_tags = LCT_SESSION,
+ .lct_init = vvp_session_key_init,
+ .lct_fini = vvp_session_key_fini
+};
+
+/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
+LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key);
+
+static const struct lu_device_operations vvp_lu_ops = {
+ .ldo_object_alloc = vvp_object_alloc
+};
+
+static const struct cl_device_operations vvp_cl_ops = {
+ .cdo_req_init = ccc_req_init
+};
+
+static struct lu_device *vvp_device_alloc(const struct lu_env *env,
+ struct lu_device_type *t,
+ struct lustre_cfg *cfg)
+{
+ return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops);
+}
+
+static const struct lu_device_type_operations vvp_device_type_ops = {
+ .ldto_init = vvp_type_init,
+ .ldto_fini = vvp_type_fini,
+
+ .ldto_start = vvp_type_start,
+ .ldto_stop = vvp_type_stop,
+
+ .ldto_device_alloc = vvp_device_alloc,
+ .ldto_device_free = ccc_device_free,
+ .ldto_device_init = ccc_device_init,
+ .ldto_device_fini = ccc_device_fini
+};
+
+struct lu_device_type vvp_device_type = {
+ .ldt_tags = LU_DEVICE_CL,
+ .ldt_name = LUSTRE_VVP_NAME,
+ .ldt_ops = &vvp_device_type_ops,
+ .ldt_ctx_tags = LCT_CL_THREAD
+};
+
+/**
+ * A mutex serializing calls to vvp_inode_fini() under extreme memory
+ * pressure, when environments cannot be allocated.
+ */
+int vvp_global_init(void)
+{
+ int result;
+
+ result = lu_kmem_init(vvp_caches);
+ if (result == 0) {
+ result = ccc_global_init(&vvp_device_type);
+ if (result != 0)
+ lu_kmem_fini(vvp_caches);
+ }
+ return result;
+}
+
+void vvp_global_fini(void)
+{
+ ccc_global_fini(&vvp_device_type);
+ lu_kmem_fini(vvp_caches);
+}
+
+
+/*****************************************************************************
+ *
+ * mirror obd-devices into cl devices.
+ *
+ */
+
+int cl_sb_init(struct super_block *sb)
+{
+ struct ll_sb_info *sbi;
+ struct cl_device *cl;
+ struct lu_env *env;
+ int rc = 0;
+ int refcheck;
+
+ sbi = ll_s2sbi(sb);
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ cl = cl_type_setup(env, NULL, &vvp_device_type,
+ sbi->ll_dt_exp->exp_obd->obd_lu_dev);
+ if (!IS_ERR(cl)) {
+ cl2ccc_dev(cl)->cdv_sb = sb;
+ sbi->ll_cl = cl;
+ sbi->ll_site = cl2lu_dev(cl)->ld_site;
+ }
+ cl_env_put(env, &refcheck);
+ } else
+ rc = PTR_ERR(env);
+ return rc;
+}
+
+int cl_sb_fini(struct super_block *sb)
+{
+ struct ll_sb_info *sbi;
+ struct lu_env *env;
+ struct cl_device *cld;
+ int refcheck;
+ int result;
+
+ sbi = ll_s2sbi(sb);
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ cld = sbi->ll_cl;
+
+ if (cld != NULL) {
+ cl_stack_fini(env, cld);
+ sbi->ll_cl = NULL;
+ sbi->ll_site = NULL;
+ }
+ cl_env_put(env, &refcheck);
+ result = 0;
+ } else {
+ CERROR("Cannot cleanup cl-stack due to memory shortage.\n");
+ result = PTR_ERR(env);
+ }
+ /*
+ * If mount failed (sbi->ll_cl == NULL), and this there are no other
+ * mounts, stop device types manually (this usually happens
+ * automatically when last device is destroyed).
+ */
+ lu_types_stop();
+ return result;
+}
+
+/****************************************************************************
+ *
+ * /proc/fs/lustre/llite/$MNT/dump_page_cache
+ *
+ ****************************************************************************/
+
+/*
+ * To represent contents of a page cache as a byte stream, following
+ * information if encoded in 64bit offset:
+ *
+ * - file hash bucket in lu_site::ls_hash[] 28bits
+ *
+ * - how far file is from bucket head 4bits
+ *
+ * - page index 32bits
+ *
+ * First two data identify a file in the cache uniquely.
+ */
+
+#define PGC_OBJ_SHIFT (32 + 4)
+#define PGC_DEPTH_SHIFT (32)
+
+struct vvp_pgcache_id {
+ unsigned vpi_bucket;
+ unsigned vpi_depth;
+ uint32_t vpi_index;
+
+ unsigned vpi_curdep;
+ struct lu_object_header *vpi_obj;
+};
+
+static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id)
+{
+ CLASSERT(sizeof(pos) == sizeof(__u64));
+
+ id->vpi_index = pos & 0xffffffff;
+ id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
+ id->vpi_bucket = ((unsigned long long)pos >> PGC_OBJ_SHIFT);
+}
+
+static loff_t vvp_pgcache_id_pack(struct vvp_pgcache_id *id)
+{
+ return
+ ((__u64)id->vpi_index) |
+ ((__u64)id->vpi_depth << PGC_DEPTH_SHIFT) |
+ ((__u64)id->vpi_bucket << PGC_OBJ_SHIFT);
+}
+
+static int vvp_pgcache_obj_get(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *data)
+{
+ struct vvp_pgcache_id *id = data;
+ struct lu_object_header *hdr = cfs_hash_object(hs, hnode);
+
+ if (id->vpi_curdep-- > 0)
+ return 0; /* continue */
+
+ if (lu_object_is_dying(hdr))
+ return 1;
+
+ cfs_hash_get(hs, hnode);
+ id->vpi_obj = hdr;
+ return 1;
+}
+
+static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
+ struct lu_device *dev,
+ struct vvp_pgcache_id *id)
+{
+ LASSERT(lu_device_is_cl(dev));
+
+ id->vpi_depth &= 0xf;
+ id->vpi_obj = NULL;
+ id->vpi_curdep = id->vpi_depth;
+
+ cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
+ vvp_pgcache_obj_get, id);
+ if (id->vpi_obj != NULL) {
+ struct lu_object *lu_obj;
+
+ lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
+ if (lu_obj != NULL) {
+ lu_object_ref_add(lu_obj, "dump", current);
+ return lu2cl(lu_obj);
+ }
+ lu_object_put(env, lu_object_top(id->vpi_obj));
+
+ } else if (id->vpi_curdep > 0) {
+ id->vpi_depth = 0xf;
+ }
+ return NULL;
+}
+
+static loff_t vvp_pgcache_find(const struct lu_env *env,
+ struct lu_device *dev, loff_t pos)
+{
+ struct cl_object *clob;
+ struct lu_site *site;
+ struct vvp_pgcache_id id;
+
+ site = dev->ld_site;
+ vvp_pgcache_id_unpack(pos, &id);
+
+ while (1) {
+ if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
+ return ~0ULL;
+ clob = vvp_pgcache_obj(env, dev, &id);
+ if (clob != NULL) {
+ struct cl_object_header *hdr;
+ int nr;
+ struct cl_page *pg;
+
+ /* got an object. Find next page. */
+ hdr = cl_object_header(clob);
+
+ spin_lock(&hdr->coh_page_guard);
+ nr = radix_tree_gang_lookup(&hdr->coh_tree,
+ (void **)&pg,
+ id.vpi_index, 1);
+ if (nr > 0) {
+ id.vpi_index = pg->cp_index;
+ /* Cant support over 16T file */
+ nr = !(pg->cp_index > 0xffffffff);
+ }
+ spin_unlock(&hdr->coh_page_guard);
+
+ lu_object_ref_del(&clob->co_lu, "dump", current);
+ cl_object_put(env, clob);
+ if (nr > 0)
+ return vvp_pgcache_id_pack(&id);
+ }
+ /* to the next object. */
+ ++id.vpi_depth;
+ id.vpi_depth &= 0xf;
+ if (id.vpi_depth == 0 && ++id.vpi_bucket == 0)
+ return ~0ULL;
+ id.vpi_index = 0;
+ }
+}
+
+#define seq_page_flag(seq, page, flag, has_flags) do { \
+ if (test_bit(PG_##flag, &(page)->flags)) { \
+ seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
+ has_flags = 1; \
+ } \
+} while(0)
+
+static void vvp_pgcache_page_show(const struct lu_env *env,
+ struct seq_file *seq, struct cl_page *page)
+{
+ struct ccc_page *cpg;
+ struct page *vmpage;
+ int has_flags;
+
+ cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
+ vmpage = cpg->cpg_page;
+ seq_printf(seq," %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [",
+ 0 /* gen */,
+ cpg, page,
+ "none",
+ cpg->cpg_write_queued ? "wq" : "- ",
+ cpg->cpg_defer_uptodate ? "du" : "- ",
+ PageWriteback(vmpage) ? "wb" : "-",
+ vmpage, vmpage->mapping->host->i_ino,
+ vmpage->mapping->host->i_generation,
+ vmpage->mapping->host, vmpage->index,
+ page_count(vmpage));
+ has_flags = 0;
+ seq_page_flag(seq, vmpage, locked, has_flags);
+ seq_page_flag(seq, vmpage, error, has_flags);
+ seq_page_flag(seq, vmpage, referenced, has_flags);
+ seq_page_flag(seq, vmpage, uptodate, has_flags);
+ seq_page_flag(seq, vmpage, dirty, has_flags);
+ seq_page_flag(seq, vmpage, writeback, has_flags);
+ seq_printf(seq, "%s]\n", has_flags ? "" : "-");
+}
+
+static int vvp_pgcache_show(struct seq_file *f, void *v)
+{
+ loff_t pos;
+ struct ll_sb_info *sbi;
+ struct cl_object *clob;
+ struct lu_env *env;
+ struct cl_page *page;
+ struct cl_object_header *hdr;
+ struct vvp_pgcache_id id;
+ int refcheck;
+ int result;
+
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ pos = *(loff_t *) v;
+ vvp_pgcache_id_unpack(pos, &id);
+ sbi = f->private;
+ clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
+ if (clob != NULL) {
+ hdr = cl_object_header(clob);
+
+ spin_lock(&hdr->coh_page_guard);
+ page = cl_page_lookup(hdr, id.vpi_index);
+ spin_unlock(&hdr->coh_page_guard);
+
+ seq_printf(f, "%8x@"DFID": ",
+ id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
+ if (page != NULL) {
+ vvp_pgcache_page_show(env, f, page);
+ cl_page_put(env, page);
+ } else
+ seq_puts(f, "missing\n");
+ lu_object_ref_del(&clob->co_lu, "dump", current);
+ cl_object_put(env, clob);
+ } else
+ seq_printf(f, "%llx missing\n", pos);
+ cl_env_put(env, &refcheck);
+ result = 0;
+ } else
+ result = PTR_ERR(env);
+ return result;
+}
+
+static void *vvp_pgcache_start(struct seq_file *f, loff_t *pos)
+{
+ struct ll_sb_info *sbi;
+ struct lu_env *env;
+ int refcheck;
+
+ sbi = f->private;
+
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ sbi = f->private;
+ if (sbi->ll_site->ls_obj_hash->hs_cur_bits > 64 - PGC_OBJ_SHIFT)
+ pos = ERR_PTR(-EFBIG);
+ else {
+ *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev,
+ *pos);
+ if (*pos == ~0ULL)
+ pos = NULL;
+ }
+ cl_env_put(env, &refcheck);
+ }
+ return pos;
+}
+
+static void *vvp_pgcache_next(struct seq_file *f, void *v, loff_t *pos)
+{
+ struct ll_sb_info *sbi;
+ struct lu_env *env;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ sbi = f->private;
+ *pos = vvp_pgcache_find(env, &sbi->ll_cl->cd_lu_dev, *pos + 1);
+ if (*pos == ~0ULL)
+ pos = NULL;
+ cl_env_put(env, &refcheck);
+ }
+ return pos;
+}
+
+static void vvp_pgcache_stop(struct seq_file *f, void *v)
+{
+ /* Nothing to do */
+}
+
+static struct seq_operations vvp_pgcache_ops = {
+ .start = vvp_pgcache_start,
+ .next = vvp_pgcache_next,
+ .stop = vvp_pgcache_stop,
+ .show = vvp_pgcache_show
+};
+
+static int vvp_dump_pgcache_seq_open(struct inode *inode, struct file *filp)
+{
+ struct ll_sb_info *sbi = PDE_DATA(inode);
+ struct seq_file *seq;
+ int result;
+
+ result = seq_open(filp, &vvp_pgcache_ops);
+ if (result == 0) {
+ seq = filp->private_data;
+ seq->private = sbi;
+ }
+ return result;
+}
+
+struct file_operations vvp_dump_pgcache_file_ops = {
+ .owner = THIS_MODULE,
+ .open = vvp_dump_pgcache_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
new file mode 100644
index 0000000..c82bf17
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -0,0 +1,62 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Internal definitions for VVP layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#ifndef VVP_INTERNAL_H
+#define VVP_INTERNAL_H
+
+
+#include <cl_object.h>
+#include "llite_internal.h"
+
+int vvp_io_init (const struct lu_env *env,
+ struct cl_object *obj, struct cl_io *io);
+int vvp_lock_init (const struct lu_env *env,
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *io);
+int vvp_page_init (const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
+struct lu_object *vvp_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *hdr,
+ struct lu_device *dev);
+
+struct ccc_object *cl_inode2ccc(struct inode *inode);
+
+extern struct kmem_cache *vvp_thread_kmem;
+
+#endif /* VVP_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
new file mode 100644
index 0000000..3ff664c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -0,0 +1,1175 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_io for VVP layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+
+#include <obd.h>
+#include <lustre_lite.h>
+
+#include "vvp_internal.h"
+
+static struct vvp_io *cl2vvp_io(const struct lu_env *env,
+ const struct cl_io_slice *slice);
+
+/**
+ * True, if \a io is a normal io, False for sendfile() / splice_{read|write}
+ */
+int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
+{
+ struct vvp_io *vio = vvp_env_io(env);
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ return vio->cui_io_subtype == IO_NORMAL;
+}
+
+/**
+ * For swapping layout. The file's layout may have changed.
+ * To avoid populating pages to a wrong stripe, we have to verify the
+ * correctness of layout. It works because swapping layout processes
+ * have to acquire group lock.
+ */
+static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
+ struct inode *inode)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ccc_io *cio = ccc_env_io(env);
+ bool rc = true;
+
+ switch (io->ci_type) {
+ case CIT_READ:
+ case CIT_WRITE:
+ /* don't need lock here to check lli_layout_gen as we have held
+ * extent lock and GROUP lock has to hold to swap layout */
+ if (lli->lli_layout_gen != cio->cui_layout_gen) {
+ io->ci_need_restart = 1;
+ /* this will return application a short read/write */
+ io->ci_continue = 0;
+ rc = false;
+ }
+ case CIT_FAULT:
+ /* fault is okay because we've already had a page. */
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/*****************************************************************************
+ *
+ * io operations.
+ *
+ */
+
+static int vvp_io_fault_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct inode *inode = ccc_object_inode(ios->cis_obj);
+
+ LASSERT(inode ==
+ cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode);
+ vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
+ return 0;
+}
+
+static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+ CDEBUG(D_VFSTRACE, "ignore/verify layout %d/%d, layout version %d.\n",
+ io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen);
+
+ if (!io->ci_ignore_layout && io->ci_verify_layout) {
+ __u32 gen = 0;
+
+ /* check layout version */
+ ll_layout_refresh(ccc_object_inode(obj), &gen);
+ io->ci_need_restart = cio->cui_layout_gen != gen;
+ if (io->ci_need_restart)
+ CDEBUG(D_VFSTRACE, "layout changed from %d to %d.\n",
+ cio->cui_layout_gen, gen);
+ }
+}
+
+static void vvp_io_fault_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ struct cl_page *page = io->u.ci_fault.ft_page;
+
+ CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
+
+ if (page != NULL) {
+ lu_ref_del(&page->cp_reference, "fault", io);
+ cl_page_put(env, page);
+ io->u.ci_fault.ft_page = NULL;
+ }
+ vvp_io_fini(env, ios);
+}
+
+enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
+{
+ /*
+ * we only want to hold PW locks if the mmap() can generate
+ * writes back to the file and that only happens in shared
+ * writable vmas
+ */
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+ return CLM_WRITE;
+ return CLM_READ;
+}
+
+static int vvp_mmap_locks(const struct lu_env *env,
+ struct ccc_io *vio, struct cl_io *io)
+{
+ struct ccc_thread_info *cti = ccc_env_info(env);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct cl_lock_descr *descr = &cti->cti_descr;
+ ldlm_policy_data_t policy;
+ unsigned long addr;
+ unsigned long seg;
+ ssize_t count;
+ int result;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ if (!cl_is_normalio(env, io))
+ return 0;
+
+ if (vio->cui_iov == NULL) /* nfs or loop back device write */
+ return 0;
+
+ /* No MM (e.g. NFS)? No vmas too. */
+ if (mm == NULL)
+ return 0;
+
+ for (seg = 0; seg < vio->cui_nrsegs; seg++) {
+ const struct iovec *iv = &vio->cui_iov[seg];
+
+ addr = (unsigned long)iv->iov_base;
+ count = iv->iov_len;
+ if (count == 0)
+ continue;
+
+ count += addr & (~CFS_PAGE_MASK);
+ addr &= CFS_PAGE_MASK;
+
+ down_read(&mm->mmap_sem);
+ while((vma = our_vma(mm, addr, count)) != NULL) {
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ int flags = CEF_MUST;
+
+ if (ll_file_nolock(vma->vm_file)) {
+ /*
+ * For no lock case, a lockless lock will be
+ * generated.
+ */
+ flags = CEF_NEVER;
+ }
+
+ /*
+ * XXX: Required lock mode can be weakened: CIT_WRITE
+ * io only ever reads user level buffer, and CIT_READ
+ * only writes on it.
+ */
+ policy_from_vma(&policy, vma, addr, count);
+ descr->cld_mode = vvp_mode_from_vma(vma);
+ descr->cld_obj = ll_i2info(inode)->lli_clob;
+ descr->cld_start = cl_index(descr->cld_obj,
+ policy.l_extent.start);
+ descr->cld_end = cl_index(descr->cld_obj,
+ policy.l_extent.end);
+ descr->cld_enq_flags = flags;
+ result = cl_io_lock_alloc_add(env, io, descr);
+
+ CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
+ descr->cld_mode, descr->cld_start,
+ descr->cld_end);
+
+ if (result < 0)
+ return result;
+
+ if (vma->vm_end - addr >= count)
+ break;
+
+ count -= vma->vm_end - addr;
+ addr = vma->vm_end;
+ }
+ up_read(&mm->mmap_sem);
+ }
+ return 0;
+}
+
+static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
+ enum cl_lock_mode mode, loff_t start, loff_t end)
+{
+ struct ccc_io *cio = ccc_env_io(env);
+ int result;
+ int ast_flags = 0;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ ccc_io_update_iov(env, cio, io);
+
+ if (io->u.ci_rw.crw_nonblock)
+ ast_flags |= CEF_NONBLOCK;
+ result = vvp_mmap_locks(env, cio, io);
+ if (result == 0)
+ result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
+ return result;
+}
+
+static int vvp_io_read_lock(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
+ int result;
+
+ /* XXX: Layer violation, we shouldn't see lsm at llite level. */
+ if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
+ result = vvp_io_rw_lock(env, io, CLM_READ,
+ io->u.ci_rd.rd.crw_pos,
+ io->u.ci_rd.rd.crw_pos +
+ io->u.ci_rd.rd.crw_count - 1);
+ else
+ result = 0;
+ return result;
+}
+
+static int vvp_io_fault_lock(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ /*
+ * XXX LDLM_FL_CBPENDING
+ */
+ return ccc_io_one_lock_index
+ (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
+ io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
+}
+
+static int vvp_io_write_lock(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ loff_t start;
+ loff_t end;
+
+ if (io->u.ci_wr.wr_append) {
+ start = 0;
+ end = OBD_OBJECT_EOF;
+ } else {
+ start = io->u.ci_wr.wr.crw_pos;
+ end = start + io->u.ci_wr.wr.crw_count - 1;
+ }
+ return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
+}
+
+static int vvp_io_setattr_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ return 0;
+}
+
+/**
+ * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
+ *
+ * Handles "lockless io" mode when extent locking is done by server.
+ */
+static int vvp_io_setattr_lock(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct ccc_io *cio = ccc_env_io(env);
+ struct cl_io *io = ios->cis_io;
+ __u64 new_size;
+ __u32 enqflags = 0;
+
+ if (cl_io_is_trunc(io)) {
+ new_size = io->u.ci_setattr.sa_attr.lvb_size;
+ if (new_size == 0)
+ enqflags = CEF_DISCARD_DATA;
+ } else {
+ if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
+ io->u.ci_setattr.sa_attr.lvb_ctime) ||
+ (io->u.ci_setattr.sa_attr.lvb_atime >=
+ io->u.ci_setattr.sa_attr.lvb_ctime))
+ return 0;
+ new_size = 0;
+ }
+ cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
+ return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
+ new_size, OBD_OBJECT_EOF);
+}
+
+static int vvp_do_vmtruncate(struct inode *inode, size_t size)
+{
+ int result;
+ /*
+ * Only ll_inode_size_lock is taken at this level.
+ */
+ ll_inode_size_lock(inode);
+ result = inode_newsize_ok(inode, size);
+ if (result < 0) {
+ ll_inode_size_unlock(inode);
+ return result;
+ }
+ truncate_setsize(inode, size);
+ ll_inode_size_unlock(inode);
+ return result;
+}
+
+static int vvp_io_setattr_trunc(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ struct inode *inode, loff_t size)
+{
+ inode_dio_wait(inode);
+ return 0;
+}
+
+static int vvp_io_setattr_time(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ int result;
+ unsigned valid = CAT_CTIME;
+
+ cl_object_attr_lock(obj);
+ attr->cat_ctime = io->u.ci_setattr.sa_attr.lvb_ctime;
+ if (io->u.ci_setattr.sa_valid & ATTR_ATIME_SET) {
+ attr->cat_atime = io->u.ci_setattr.sa_attr.lvb_atime;
+ valid |= CAT_ATIME;
+ }
+ if (io->u.ci_setattr.sa_valid & ATTR_MTIME_SET) {
+ attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
+ valid |= CAT_MTIME;
+ }
+ result = cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
+
+ return result;
+}
+
+static int vvp_io_setattr_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = ccc_object_inode(io->ci_obj);
+ int result = 0;
+
+ mutex_lock(&inode->i_mutex);
+ if (cl_io_is_trunc(io))
+ result = vvp_io_setattr_trunc(env, ios, inode,
+ io->u.ci_setattr.sa_attr.lvb_size);
+ if (result == 0)
+ result = vvp_io_setattr_time(env, ios);
+ return result;
+}
+
+static void vvp_io_setattr_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io = ios->cis_io;
+ struct inode *inode = ccc_object_inode(io->ci_obj);
+
+ if (cl_io_is_trunc(io)) {
+ /* Truncate in memory pages - they must be clean pages
+ * because osc has already notified to destroy osc_extents. */
+ vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
+ inode_dio_write_done(inode);
+ }
+ mutex_unlock(&inode->i_mutex);
+}
+
+static void vvp_io_setattr_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ vvp_io_fini(env, ios);
+}
+
+static ssize_t lustre_generic_file_read(struct file *file,
+ struct ccc_io *vio, loff_t *ppos)
+{
+ return generic_file_aio_read(vio->cui_iocb, vio->cui_iov,
+ vio->cui_nrsegs, *ppos);
+}
+
+static ssize_t lustre_generic_file_write(struct file *file,
+ struct ccc_io *vio, loff_t *ppos)
+{
+ return generic_file_aio_write(vio->cui_iocb, vio->cui_iov,
+ vio->cui_nrsegs, *ppos);
+}
+
+static int vvp_io_read_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = ccc_object_inode(obj);
+ struct ll_ra_read *bead = &vio->cui_bead;
+ struct file *file = cio->cui_fd->fd_file;
+
+ int result;
+ loff_t pos = io->u.ci_rd.rd.crw_pos;
+ long cnt = io->u.ci_rd.rd.crw_count;
+ long tot = cio->cui_tot_count;
+ int exceed = 0;
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+ CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
+
+ if (!can_populate_pages(env, io, inode))
+ return 0;
+
+ result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
+ if (result != 0)
+ return result;
+ else if (exceed != 0)
+ goto out;
+
+ LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
+ "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
+ inode->i_ino, cnt, pos, i_size_read(inode));
+
+ /* turn off the kernel's read-ahead */
+ cio->cui_fd->fd_file->f_ra.ra_pages = 0;
+
+ /* initialize read-ahead window once per syscall */
+ if (!vio->cui_ra_window_set) {
+ vio->cui_ra_window_set = 1;
+ bead->lrr_start = cl_index(obj, pos);
+ /*
+ * XXX: explicit PAGE_CACHE_SIZE
+ */
+ bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
+ ll_ra_read_in(file, bead);
+ }
+
+ /* BUG: 5972 */
+ file_accessed(file);
+ switch (vio->cui_io_subtype) {
+ case IO_NORMAL:
+ result = lustre_generic_file_read(file, cio, &pos);
+ break;
+ case IO_SPLICE:
+ result = generic_file_splice_read(file, &pos,
+ vio->u.splice.cui_pipe, cnt,
+ vio->u.splice.cui_flags);
+ /* LU-1109: do splice read stripe by stripe otherwise if it
+ * may make nfsd stuck if this read occupied all internal pipe
+ * buffers. */
+ io->ci_continue = 0;
+ break;
+ default:
+ CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
+ LBUG();
+ }
+
+out:
+ if (result >= 0) {
+ if (result < cnt)
+ io->ci_continue = 0;
+ io->ci_nob += result;
+ ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
+ cio->cui_fd, pos, result, READ);
+ result = 0;
+ }
+ return result;
+}
+
+static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+
+ if (vio->cui_ra_window_set)
+ ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
+
+ vvp_io_fini(env, ios);
+}
+
+static int vvp_io_write_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = ccc_object_inode(obj);
+ struct file *file = cio->cui_fd->fd_file;
+ ssize_t result = 0;
+ loff_t pos = io->u.ci_wr.wr.crw_pos;
+ size_t cnt = io->u.ci_wr.wr.crw_count;
+
+ if (!can_populate_pages(env, io, inode))
+ return 0;
+
+ if (cl_io_is_append(io)) {
+ /*
+ * PARALLEL IO This has to be changed for parallel IO doing
+ * out-of-order writes.
+ */
+ pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
+ cio->cui_iocb->ki_pos = pos;
+ }
+
+ CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+
+ if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
+ result = 0;
+ else
+ result = lustre_generic_file_write(file, cio, &pos);
+
+ if (result > 0) {
+ if (result < cnt)
+ io->ci_continue = 0;
+ io->ci_nob += result;
+ ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
+ cio->cui_fd, pos, result, WRITE);
+ result = 0;
+ }
+ return result;
+}
+
+static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
+{
+ struct vm_fault *vmf = cfio->fault.ft_vmf;
+
+ cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
+
+ if (vmf->page) {
+ LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
+ vmf->virtual_address);
+ if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
+ lock_page(vmf->page);
+ cfio->fault.ft_flags &= VM_FAULT_LOCKED;
+ }
+
+ cfio->ft_vmpage = vmf->page;
+ return 0;
+ }
+
+ if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+ CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
+ return -EFAULT;
+ }
+
+ if (cfio->fault.ft_flags & VM_FAULT_OOM) {
+ CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
+ return -ENOMEM;
+ }
+
+ if (cfio->fault.ft_flags & VM_FAULT_RETRY)
+ return -EAGAIN;
+
+ CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags);
+ return -EINVAL;
+}
+
+
+static int vvp_io_fault_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = ccc_object_inode(obj);
+ struct cl_fault_io *fio = &io->u.ci_fault;
+ struct vvp_fault_io *cfio = &vio->u.fault;
+ loff_t offset;
+ int result = 0;
+ struct page *vmpage = NULL;
+ struct cl_page *page;
+ loff_t size;
+ pgoff_t last; /* last page in a file data region */
+
+ if (fio->ft_executable &&
+ LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
+ CWARN("binary "DFID
+ " changed while waiting for the page fault lock\n",
+ PFID(lu_object_fid(&obj->co_lu)));
+
+ /* offset of the last byte on the page */
+ offset = cl_offset(obj, fio->ft_index + 1) - 1;
+ LASSERT(cl_index(obj, offset) == fio->ft_index);
+ result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
+ if (result != 0)
+ return result;
+
+ /* must return locked page */
+ if (fio->ft_mkwrite) {
+ LASSERT(cfio->ft_vmpage != NULL);
+ lock_page(cfio->ft_vmpage);
+ } else {
+ result = vvp_io_kernel_fault(cfio);
+ if (result != 0)
+ return result;
+ }
+
+ vmpage = cfio->ft_vmpage;
+ LASSERT(PageLocked(vmpage));
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_FAULT_TRUNC_RACE))
+ ll_invalidate_page(vmpage);
+
+ size = i_size_read(inode);
+ /* Though we have already held a cl_lock upon this page, but
+ * it still can be truncated locally. */
+ if (unlikely((vmpage->mapping != inode->i_mapping) ||
+ (page_offset(vmpage) > size))) {
+ CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
+
+ /* return +1 to stop cl_io_loop() and ll_fault() will catch
+ * and retry. */
+ GOTO(out, result = +1);
+ }
+
+
+ if (fio->ft_mkwrite ) {
+ pgoff_t last_index;
+ /*
+ * Capture the size while holding the lli_trunc_sem from above
+ * we want to make sure that we complete the mkwrite action
+ * while holding this lock. We need to make sure that we are
+ * not past the end of the file.
+ */
+ last_index = cl_index(obj, size - 1);
+ if (last_index < fio->ft_index) {
+ CDEBUG(D_PAGE,
+ "llite: mkwrite and truncate race happened: "
+ "%p: 0x%lx 0x%lx\n",
+ vmpage->mapping,fio->ft_index,last_index);
+ /*
+ * We need to return if we are
+ * passed the end of the file. This will propagate
+ * up the call stack to ll_page_mkwrite where
+ * we will return VM_FAULT_NOPAGE. Any non-negative
+ * value returned here will be silently
+ * converted to 0. If the vmpage->mapping is null
+ * the error code would be converted back to ENODATA
+ * in ll_page_mkwrite0. Thus we return -ENODATA
+ * to handle both cases
+ */
+ GOTO(out, result = -ENODATA);
+ }
+ }
+
+ page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(page))
+ GOTO(out, result = PTR_ERR(page));
+
+ /* if page is going to be written, we should add this page into cache
+ * earlier. */
+ if (fio->ft_mkwrite) {
+ wait_on_page_writeback(vmpage);
+ if (set_page_dirty(vmpage)) {
+ struct ccc_page *cp;
+
+ /* vvp_page_assume() calls wait_on_page_writeback(). */
+ cl_page_assume(env, io, page);
+
+ cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
+ vvp_write_pending(cl2ccc(obj), cp);
+
+ /* Do not set Dirty bit here so that in case IO is
+ * started before the page is really made dirty, we
+ * still have chance to detect it. */
+ result = cl_page_cache_add(env, io, page, CRT_WRITE);
+ LASSERT(cl_page_is_owned(page, io));
+
+ vmpage = NULL;
+ if (result < 0) {
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+
+ cl_page_put(env, page);
+
+ /* we're in big trouble, what can we do now? */
+ if (result == -EDQUOT)
+ result = -ENOSPC;
+ GOTO(out, result);
+ } else
+ cl_page_disown(env, io, page);
+ }
+ }
+
+ last = cl_index(obj, size - 1);
+ /*
+ * The ft_index is only used in the case of
+ * a mkwrite action. We need to check
+ * our assertions are correct, since
+ * we should have caught this above
+ */
+ LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
+ if (fio->ft_index == last)
+ /*
+ * Last page is mapped partially.
+ */
+ fio->ft_nob = size - cl_offset(obj, fio->ft_index);
+ else
+ fio->ft_nob = cl_page_size(obj);
+
+ lu_ref_add(&page->cp_reference, "fault", io);
+ fio->ft_page = page;
+
+out:
+ /* return unlocked vmpage to avoid deadlocking */
+ if (vmpage != NULL)
+ unlock_page(vmpage);
+ cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
+ return result;
+}
+
+static int vvp_io_fsync_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ /* we should mark TOWRITE bit to each dirty page in radix tree to
+ * verify pages have been written, but this is difficult because of
+ * race. */
+ return 0;
+}
+
+static int vvp_io_read_page(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ const struct cl_page_slice *slice)
+{
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = slice->cpl_obj;
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct cl_page *page = slice->cpl_page;
+ struct inode *inode = ccc_object_inode(obj);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
+ struct ll_readahead_state *ras = &fd->fd_ras;
+ struct page *vmpage = cp->cpg_page;
+ struct cl_2queue *queue = &io->ci_queue;
+ int rc;
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ LASSERT(slice->cpl_obj == obj);
+
+ if (sbi->ll_ra_info.ra_max_pages_per_file &&
+ sbi->ll_ra_info.ra_max_pages)
+ ras_update(sbi, inode, ras, page->cp_index,
+ cp->cpg_defer_uptodate);
+
+ /* Sanity check whether the page is protected by a lock. */
+ rc = cl_page_is_under_lock(env, io, page);
+ if (rc != -EBUSY) {
+ CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
+ rc == -ENODATA ? "without a lock" :
+ "match failed", rc);
+ if (rc != -ENODATA)
+ return rc;
+ }
+
+ if (cp->cpg_defer_uptodate) {
+ cp->cpg_ra_used = 1;
+ cl_page_export(env, page, 1);
+ }
+ /*
+ * Add page into the queue even when it is marked uptodate above.
+ * this will unlock it automatically as part of cl_page_list_disown().
+ */
+ cl_2queue_add(queue, page);
+ if (sbi->ll_ra_info.ra_max_pages_per_file &&
+ sbi->ll_ra_info.ra_max_pages)
+ ll_readahead(env, io, ras,
+ vmpage->mapping, &queue->c2_qin, fd->fd_flags);
+
+ return 0;
+}
+
+static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, struct ccc_page *cp,
+ enum cl_req_type crt)
+{
+ struct cl_2queue *queue;
+ int result;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ queue = &io->ci_queue;
+ cl_2queue_init_page(queue, page);
+
+ result = cl_io_submit_sync(env, io, crt, queue, 0);
+ LASSERT(cl_page_is_owned(page, io));
+
+ if (crt == CRT_READ)
+ /*
+ * in CRT_WRITE case page is left locked even in case of
+ * error.
+ */
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_2queue_fini(env, queue);
+
+ return result;
+}
+
+/**
+ * Prepare partially written-to page for a write.
+ */
+static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
+ struct cl_object *obj, struct cl_page *pg,
+ struct ccc_page *cp,
+ unsigned from, unsigned to)
+{
+ struct cl_attr *attr = ccc_env_thread_attr(env);
+ loff_t offset = cl_offset(obj, pg->cp_index);
+ int result;
+
+ cl_object_attr_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ cl_object_attr_unlock(obj);
+ if (result == 0) {
+ /*
+ * If are writing to a new page, no need to read old data.
+ * The extent locking will have updated the KMS, and for our
+ * purposes here we can treat it like i_size.
+ */
+ if (attr->cat_kms <= offset) {
+ char *kaddr = kmap_atomic(cp->cpg_page);
+
+ memset(kaddr, 0, cl_page_size(obj));
+ kunmap_atomic(kaddr);
+ } else if (cp->cpg_defer_uptodate)
+ cp->cpg_ra_used = 1;
+ else
+ result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
+ /*
+ * In older implementations, obdo_refresh_inode is called here
+ * to update the inode because the write might modify the
+ * object info at OST. However, this has been proven useless,
+ * since LVB functions will be called when user space program
+ * tries to retrieve inode attribute. Also, see bug 15909 for
+ * details. -jay
+ */
+ if (result == 0)
+ cl_page_export(env, pg, 1);
+ }
+ return result;
+}
+
+static int vvp_io_prepare_write(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ const struct cl_page_slice *slice,
+ unsigned from, unsigned to)
+{
+ struct cl_object *obj = slice->cpl_obj;
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct cl_page *pg = slice->cpl_page;
+ struct page *vmpage = cp->cpg_page;
+
+ int result;
+
+ LINVRNT(cl_page_is_vmlocked(env, pg));
+ LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
+
+ result = 0;
+
+ CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
+ if (!PageUptodate(vmpage)) {
+ /*
+ * We're completely overwriting an existing page, so _don't_
+ * set it up to date until commit_write
+ */
+ if (from == 0 && to == PAGE_CACHE_SIZE) {
+ CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
+ POISON_PAGE(page, 0x11);
+ } else
+ result = vvp_io_prepare_partial(env, ios->cis_io, obj,
+ pg, cp, from, to);
+ } else
+ CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
+ return result;
+}
+
+static int vvp_io_commit_write(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ const struct cl_page_slice *slice,
+ unsigned from, unsigned to)
+{
+ struct cl_object *obj = slice->cpl_obj;
+ struct cl_io *io = ios->cis_io;
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct cl_page *pg = slice->cpl_page;
+ struct inode *inode = ccc_object_inode(obj);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct page *vmpage = cp->cpg_page;
+
+ int result;
+ int tallyop;
+ loff_t size;
+
+ LINVRNT(cl_page_is_vmlocked(env, pg));
+ LASSERT(vmpage->mapping->host == inode);
+
+ LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
+ CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
+
+ /*
+ * queue a write for some time in the future the first time we
+ * dirty the page.
+ *
+ * This is different from what other file systems do: they usually
+ * just mark page (and some of its buffers) dirty and rely on
+ * balance_dirty_pages() to start a write-back. Lustre wants write-back
+ * to be started earlier for the following reasons:
+ *
+ * (1) with a large number of clients we need to limit the amount
+ * of cached data on the clients a lot;
+ *
+ * (2) large compute jobs generally want compute-only then io-only
+ * and the IO should complete as quickly as possible;
+ *
+ * (3) IO is batched up to the RPC size and is async until the
+ * client max cache is hit
+ * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
+ *
+ */
+ if (!PageDirty(vmpage)) {
+ tallyop = LPROC_LL_DIRTY_MISSES;
+ result = cl_page_cache_add(env, io, pg, CRT_WRITE);
+ if (result == 0) {
+ /* page was added into cache successfully. */
+ set_page_dirty(vmpage);
+ vvp_write_pending(cl2ccc(obj), cp);
+ } else if (result == -EDQUOT) {
+ pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+ bool need_clip = true;
+
+ /*
+ * Client ran out of disk space grant. Possible
+ * strategies are:
+ *
+ * (a) do a sync write, renewing grant;
+ *
+ * (b) stop writing on this stripe, switch to the
+ * next one.
+ *
+ * (b) is a part of "parallel io" design that is the
+ * ultimate goal. (a) is what "old" client did, and
+ * what the new code continues to do for the time
+ * being.
+ */
+ if (last_index > pg->cp_index) {
+ to = PAGE_CACHE_SIZE;
+ need_clip = false;
+ } else if (last_index == pg->cp_index) {
+ int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
+ if (to < size_to)
+ to = size_to;
+ }
+ if (need_clip)
+ cl_page_clip(env, pg, 0, to);
+ result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
+ if (result)
+ CERROR("Write page %lu of inode %p failed %d\n",
+ pg->cp_index, inode, result);
+ }
+ } else {
+ tallyop = LPROC_LL_DIRTY_HITS;
+ result = 0;
+ }
+ ll_stats_ops_tally(sbi, tallyop, 1);
+
+ /* Inode should be marked DIRTY even if no new page was marked DIRTY
+ * because page could have been not flushed between 2 modifications.
+ * It is important the file is marked DIRTY as soon as the I/O is done
+ * Indeed, when cache is flushed, file could be already closed and it
+ * is too late to warn the MDT.
+ * It is acceptable that file is marked DIRTY even if I/O is dropped
+ * for some reasons before being flushed to OST.
+ */
+ if (result == 0) {
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ }
+
+ size = cl_offset(obj, pg->cp_index) + to;
+
+ ll_inode_size_lock(inode);
+ if (result == 0) {
+ if (size > i_size_read(inode)) {
+ cl_isize_write_nolock(inode, size);
+ CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ (unsigned long)size);
+ }
+ cl_page_export(env, pg, 1);
+ } else {
+ if (size > i_size_read(inode))
+ cl_page_discard(env, io, pg);
+ }
+ ll_inode_size_unlock(inode);
+ return result;
+}
+
+static const struct cl_io_operations vvp_io_ops = {
+ .op = {
+ [CIT_READ] = {
+ .cio_fini = vvp_io_read_fini,
+ .cio_lock = vvp_io_read_lock,
+ .cio_start = vvp_io_read_start,
+ .cio_advance = ccc_io_advance
+ },
+ [CIT_WRITE] = {
+ .cio_fini = vvp_io_fini,
+ .cio_lock = vvp_io_write_lock,
+ .cio_start = vvp_io_write_start,
+ .cio_advance = ccc_io_advance
+ },
+ [CIT_SETATTR] = {
+ .cio_fini = vvp_io_setattr_fini,
+ .cio_iter_init = vvp_io_setattr_iter_init,
+ .cio_lock = vvp_io_setattr_lock,
+ .cio_start = vvp_io_setattr_start,
+ .cio_end = vvp_io_setattr_end
+ },
+ [CIT_FAULT] = {
+ .cio_fini = vvp_io_fault_fini,
+ .cio_iter_init = vvp_io_fault_iter_init,
+ .cio_lock = vvp_io_fault_lock,
+ .cio_start = vvp_io_fault_start,
+ .cio_end = ccc_io_end
+ },
+ [CIT_FSYNC] = {
+ .cio_start = vvp_io_fsync_start,
+ .cio_fini = vvp_io_fini
+ },
+ [CIT_MISC] = {
+ .cio_fini = vvp_io_fini
+ }
+ },
+ .cio_read_page = vvp_io_read_page,
+ .cio_prepare_write = vvp_io_prepare_write,
+ .cio_commit_write = vvp_io_commit_write
+};
+
+int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
+{
+ struct vvp_io *vio = vvp_env_io(env);
+ struct ccc_io *cio = ccc_env_io(env);
+ struct inode *inode = ccc_object_inode(obj);
+ int result;
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+ CL_IO_SLICE_CLEAN(cio, cui_cl);
+ cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
+ vio->cui_ra_window_set = 0;
+ result = 0;
+ if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
+ size_t count;
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ count = io->u.ci_rw.crw_count;
+ /* "If nbyte is 0, read() will return 0 and have no other
+ * results." -- Single Unix Spec */
+ if (count == 0)
+ result = 1;
+ else {
+ cio->cui_tot_count = count;
+ cio->cui_tot_nrsegs = 0;
+ }
+ /* for read/write, we store the jobid in the inode, and
+ * it'll be fetched by osc when building RPC.
+ *
+ * it's not accurate if the file is shared by different
+ * jobs.
+ */
+ lustre_get_jobid(lli->lli_jobid);
+ } else if (io->ci_type == CIT_SETATTR) {
+ if (!cl_io_is_trunc(io))
+ io->ci_lockreq = CILR_MANDATORY;
+ }
+
+ /* ignore layout change for generic CIT_MISC but not for glimpse.
+ * io context for glimpse must set ci_verify_layout to true,
+ * see cl_glimpse_size0() for details. */
+ if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
+ io->ci_ignore_layout = 1;
+
+ /* Enqueue layout lock and get layout version. We need to do this
+ * even for operations requiring to open file, such as read and write,
+ * because it might not grant layout lock in IT_OPEN. */
+ if (result == 0 && !io->ci_ignore_layout) {
+ result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+ if (result == -ENOENT)
+ /* If the inode on MDS has been removed, but the objects
+ * on OSTs haven't been destroyed (async unlink), layout
+ * fetch will return -ENOENT, we'd ingore this error
+ * and continue with dirty flush. LU-3230. */
+ result = 0;
+ if (result < 0)
+ CERROR("%s: refresh file layout " DFID " error %d.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(lu_object_fid(&obj->co_lu)), result);
+ }
+
+ return result;
+}
+
+static struct vvp_io *cl2vvp_io(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ /* Caling just for assertion */
+ cl2ccc_io(env, slice);
+ return vvp_env_io(env);
+}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
new file mode 100644
index 0000000..e16b31e
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -0,0 +1,84 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_lock for VVP layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+
+#include <obd.h>
+#include <lustre_lite.h>
+
+#include "vvp_internal.h"
+
+/*****************************************************************************
+ *
+ * Vvp lock functions.
+ *
+ */
+
+/**
+ * Estimates lock value for the purpose of managing the lock cache during
+ * memory shortages.
+ *
+ * Locks for memory mapped files are almost infinitely precious, others are
+ * junk. "Mapped locks" are heavy, but not infinitely heavy, so that they are
+ * ordered within themselves by weights assigned from other layers.
+ */
+static unsigned long vvp_lock_weigh(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct ccc_object *cob = cl2ccc(slice->cls_obj);
+
+ return atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0;
+}
+
+static const struct cl_lock_operations vvp_lock_ops = {
+ .clo_delete = ccc_lock_delete,
+ .clo_fini = ccc_lock_fini,
+ .clo_enqueue = ccc_lock_enqueue,
+ .clo_wait = ccc_lock_wait,
+ .clo_unuse = ccc_lock_unuse,
+ .clo_fits_into = ccc_lock_fits_into,
+ .clo_state = ccc_lock_state,
+ .clo_weigh = vvp_lock_weigh
+};
+
+int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io)
+{
+ return ccc_lock_init(env, obj, lock, io, &vvp_lock_ops);
+}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
new file mode 100644
index 0000000..33173fc
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -0,0 +1,186 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * cl_object implementation for VVP layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+
+#include <linux/libcfs/libcfs.h>
+
+#include <obd.h>
+#include <lustre_lite.h>
+
+#include "vvp_internal.h"
+
+/*****************************************************************************
+ *
+ * Object operations.
+ *
+ */
+
+static int vvp_object_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o)
+{
+ struct ccc_object *obj = lu2ccc(o);
+ struct inode *inode = obj->cob_inode;
+ struct ll_inode_info *lli;
+
+ (*p)(env, cookie, "(%s %d %d) inode: %p ",
+ list_empty(&obj->cob_pending_list) ? "-" : "+",
+ obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt),
+ inode);
+ if (inode) {
+ lli = ll_i2info(inode);
+ (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID,
+ inode->i_ino, inode->i_generation, inode->i_mode,
+ inode->i_nlink, atomic_read(&inode->i_count),
+ lli->lli_clob, PFID(&lli->lli_fid));
+ }
+ return 0;
+}
+
+static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr)
+{
+ struct inode *inode = ccc_object_inode(obj);
+
+ /*
+ * lov overwrites most of these fields in
+ * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
+ * attributes are newer.
+ */
+
+ attr->cat_size = i_size_read(inode);
+ attr->cat_mtime = LTIME_S(inode->i_mtime);
+ attr->cat_atime = LTIME_S(inode->i_atime);
+ attr->cat_ctime = LTIME_S(inode->i_ctime);
+ attr->cat_blocks = inode->i_blocks;
+ attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
+ attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
+ /* KMS is not known by this layer */
+ return 0; /* layers below have to fill in the rest */
+}
+
+static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid)
+{
+ struct inode *inode = ccc_object_inode(obj);
+
+ if (valid & CAT_UID)
+ inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
+ if (valid & CAT_GID)
+ inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
+ if (valid & CAT_ATIME)
+ LTIME_S(inode->i_atime) = attr->cat_atime;
+ if (valid & CAT_MTIME)
+ LTIME_S(inode->i_mtime) = attr->cat_mtime;
+ if (valid & CAT_CTIME)
+ LTIME_S(inode->i_ctime) = attr->cat_ctime;
+ if (0 && valid & CAT_SIZE)
+ cl_isize_write_nolock(inode, attr->cat_size);
+ /* not currently necessary */
+ if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
+ mark_inode_dirty(inode);
+ return 0;
+}
+
+int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf)
+{
+ struct ll_inode_info *lli = ll_i2info(conf->coc_inode);
+
+ if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
+ lli->lli_layout_gen = LL_LAYOUT_GEN_NONE;
+ return 0;
+ }
+
+ if (conf->coc_opc != OBJECT_CONF_SET)
+ return 0;
+
+ if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL) {
+ CDEBUG(D_VFSTRACE, "layout lock change: %u -> %u\n",
+ lli->lli_layout_gen,
+ conf->u.coc_md->lsm->lsm_layout_gen);
+
+ lli->lli_has_smd = true;
+ lli->lli_layout_gen = conf->u.coc_md->lsm->lsm_layout_gen;
+ } else {
+ CDEBUG(D_VFSTRACE, "layout lock destroyed: %u.\n",
+ lli->lli_layout_gen);
+
+ lli->lli_has_smd = false;
+ lli->lli_layout_gen = LL_LAYOUT_GEN_EMPTY;
+ }
+ return 0;
+}
+
+static const struct cl_object_operations vvp_ops = {
+ .coo_page_init = vvp_page_init,
+ .coo_lock_init = vvp_lock_init,
+ .coo_io_init = vvp_io_init,
+ .coo_attr_get = vvp_attr_get,
+ .coo_attr_set = vvp_attr_set,
+ .coo_conf_set = vvp_conf_set,
+ .coo_glimpse = ccc_object_glimpse
+};
+
+static const struct lu_object_operations vvp_lu_obj_ops = {
+ .loo_object_init = ccc_object_init,
+ .loo_object_free = ccc_object_free,
+ .loo_object_print = vvp_object_print
+};
+
+struct ccc_object *cl_inode2ccc(struct inode *inode)
+{
+ struct cl_inode_info *lli = cl_i2info(inode);
+ struct cl_object *obj = lli->lli_clob;
+ struct lu_object *lu;
+
+ LASSERT(obj != NULL);
+ lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
+ LASSERT(lu != NULL);
+ return lu2ccc(lu);
+}
+
+struct lu_object *vvp_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *hdr,
+ struct lu_device *dev)
+{
+ return ccc_object_alloc(env, hdr, dev, &vvp_ops, &vvp_lu_obj_ops);
+}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
new file mode 100644
index 0000000..1c02c12
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -0,0 +1,552 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_page for VVP layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+
+#include <obd.h>
+#include <lustre_lite.h>
+
+#include "vvp_internal.h"
+
+/*****************************************************************************
+ *
+ * Page operations.
+ *
+ */
+
+static void vvp_page_fini_common(struct ccc_page *cp)
+{
+ struct page *vmpage = cp->cpg_page;
+
+ LASSERT(vmpage != NULL);
+ page_cache_release(vmpage);
+}
+
+static void vvp_page_fini(const struct lu_env *env,
+ struct cl_page_slice *slice)
+{
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct page *vmpage = cp->cpg_page;
+
+ /*
+ * vmpage->private was already cleared when page was moved into
+ * VPG_FREEING state.
+ */
+ LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
+ vvp_page_fini_common(cp);
+}
+
+static int vvp_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io,
+ int nonblock)
+{
+ struct ccc_page *vpg = cl2ccc_page(slice);
+ struct page *vmpage = vpg->cpg_page;
+
+ LASSERT(vmpage != NULL);
+ if (nonblock) {
+ if (!trylock_page(vmpage))
+ return -EAGAIN;
+
+ if (unlikely(PageWriteback(vmpage))) {
+ unlock_page(vmpage);
+ return -EAGAIN;
+ }
+
+ return 0;
+ }
+
+ lock_page(vmpage);
+ wait_on_page_writeback(vmpage);
+ return 0;
+}
+
+static void vvp_page_assume(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ struct page *vmpage = cl2vm_page(slice);
+
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ wait_on_page_writeback(vmpage);
+}
+
+static void vvp_page_unassume(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ struct page *vmpage = cl2vm_page(slice);
+
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+}
+
+static void vvp_page_disown(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io)
+{
+ struct page *vmpage = cl2vm_page(slice);
+
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+
+ unlock_page(cl2vm_page(slice));
+}
+
+static void vvp_page_discard(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ struct page *vmpage = cl2vm_page(slice);
+ struct address_space *mapping;
+ struct ccc_page *cpg = cl2ccc_page(slice);
+
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+
+ mapping = vmpage->mapping;
+
+ if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
+ ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
+
+ /*
+ * truncate_complete_page() calls
+ * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
+ */
+ truncate_complete_page(mapping, vmpage);
+}
+
+static int vvp_page_unmap(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ struct page *vmpage = cl2vm_page(slice);
+ __u64 offset;
+
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+
+ offset = vmpage->index << PAGE_CACHE_SHIFT;
+
+ /*
+ * XXX is it safe to call this with the page lock held?
+ */
+ ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
+ return 0;
+}
+
+static void vvp_page_delete(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ struct page *vmpage = cl2vm_page(slice);
+ struct inode *inode = vmpage->mapping->host;
+ struct cl_object *obj = slice->cpl_obj;
+
+ LASSERT(PageLocked(vmpage));
+ LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
+ LASSERT(inode == ccc_object_inode(obj));
+
+ vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
+ ClearPagePrivate(vmpage);
+ vmpage->private = 0;
+ /*
+ * Reference from vmpage to cl_page is removed, but the reference back
+ * is still here. It is removed later in vvp_page_fini().
+ */
+}
+
+static void vvp_page_export(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int uptodate)
+{
+ struct page *vmpage = cl2vm_page(slice);
+
+ LASSERT(vmpage != NULL);
+ LASSERT(PageLocked(vmpage));
+ if (uptodate)
+ SetPageUptodate(vmpage);
+ else
+ ClearPageUptodate(vmpage);
+}
+
+static int vvp_page_is_vmlocked(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
+}
+
+static int vvp_page_prep_read(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ /* Skip the page already marked as PG_uptodate. */
+ return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
+}
+
+static int vvp_page_prep_write(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ struct page *vmpage = cl2vm_page(slice);
+
+ LASSERT(PageLocked(vmpage));
+ LASSERT(!PageDirty(vmpage));
+
+ set_page_writeback(vmpage);
+ vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
+
+ return 0;
+}
+
+/**
+ * Handles page transfer errors at VM level.
+ *
+ * This takes inode as a separate argument, because inode on which error is to
+ * be set can be different from \a vmpage inode in case of direct-io.
+ */
+static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
+{
+ struct ccc_object *obj = cl_inode2ccc(inode);
+
+ if (ioret == 0) {
+ ClearPageError(vmpage);
+ obj->cob_discard_page_warned = 0;
+ } else {
+ SetPageError(vmpage);
+ if (ioret == -ENOSPC)
+ set_bit(AS_ENOSPC, &inode->i_mapping->flags);
+ else
+ set_bit(AS_EIO, &inode->i_mapping->flags);
+
+ if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
+ obj->cob_discard_page_warned == 0) {
+ obj->cob_discard_page_warned = 1;
+ ll_dirty_page_discard_warn(vmpage, ioret);
+ }
+ }
+}
+
+static void vvp_page_completion_read(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
+{
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct page *vmpage = cp->cpg_page;
+ struct cl_page *page = cl_page_top(slice->cpl_page);
+ struct inode *inode = ccc_object_inode(page->cp_obj);
+
+ LASSERT(PageLocked(vmpage));
+ CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
+
+ if (cp->cpg_defer_uptodate)
+ ll_ra_count_put(ll_i2sbi(inode), 1);
+
+ if (ioret == 0) {
+ if (!cp->cpg_defer_uptodate)
+ cl_page_export(env, page, 1);
+ } else
+ cp->cpg_defer_uptodate = 0;
+
+ if (page->cp_sync_io == NULL)
+ unlock_page(vmpage);
+}
+
+static void vvp_page_completion_write(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
+{
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct cl_page *pg = slice->cpl_page;
+ struct page *vmpage = cp->cpg_page;
+
+ LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
+ LASSERT(PageWriteback(vmpage));
+
+ CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
+
+ /*
+ * TODO: Actually it makes sense to add the page into oap pending
+ * list again and so that we don't need to take the page out from
+ * SoM write pending list, if we just meet a recoverable error,
+ * -ENOMEM, etc.
+ * To implement this, we just need to return a non zero value in
+ * ->cpo_completion method. The underlying transfer should be notified
+ * and then re-add the page into pending transfer queue. -jay
+ */
+
+ cp->cpg_write_queued = 0;
+ vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
+
+ /*
+ * Only mark the page error only when it's an async write because
+ * applications won't wait for IO to finish.
+ */
+ if (pg->cp_sync_io == NULL)
+ vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
+
+ end_page_writeback(vmpage);
+}
+
+/**
+ * Implements cl_page_operations::cpo_make_ready() method.
+ *
+ * This is called to yank a page from the transfer cache and to send it out as
+ * a part of transfer. This function try-locks the page. If try-lock failed,
+ * page is owned by some concurrent IO, and should be skipped (this is bad,
+ * but hopefully rare situation, as it usually results in transfer being
+ * shorter than possible).
+ *
+ * \retval 0 success, page can be placed into transfer
+ *
+ * \retval -EAGAIN page is either used by concurrent IO has been
+ * truncated. Skip it.
+ */
+static int vvp_page_make_ready(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ struct page *vmpage = cl2vm_page(slice);
+ struct cl_page *pg = slice->cpl_page;
+ int result = 0;
+
+ lock_page(vmpage);
+ if (clear_page_dirty_for_io(vmpage)) {
+ LASSERT(pg->cp_state == CPS_CACHED);
+ /* This actually clears the dirty bit in the radix
+ * tree. */
+ set_page_writeback(vmpage);
+ vvp_write_pending(cl2ccc(slice->cpl_obj),
+ cl2ccc_page(slice));
+ CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
+ } else if (pg->cp_state == CPS_PAGEOUT) {
+ /* is it possible for osc_flush_async_page() to already
+ * make it ready? */
+ result = -EALREADY;
+ } else {
+ CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
+ pg->cp_state);
+ LBUG();
+ }
+ unlock_page(vmpage);
+ return result;
+}
+
+static int vvp_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
+{
+ struct ccc_page *vp = cl2ccc_page(slice);
+ struct page *vmpage = vp->cpg_page;
+
+ (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) "
+ "vm@%p ",
+ vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
+ vp->cpg_write_queued, vmpage);
+ if (vmpage != NULL) {
+ (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
+ (long)vmpage->flags, page_count(vmpage),
+ page_mapcount(vmpage), vmpage->private,
+ page_index(vmpage),
+ list_empty(&vmpage->lru) ? "not-" : "");
+ }
+ (*printer)(env, cookie, "\n");
+ return 0;
+}
+
+static const struct cl_page_operations vvp_page_ops = {
+ .cpo_own = vvp_page_own,
+ .cpo_assume = vvp_page_assume,
+ .cpo_unassume = vvp_page_unassume,
+ .cpo_disown = vvp_page_disown,
+ .cpo_vmpage = ccc_page_vmpage,
+ .cpo_discard = vvp_page_discard,
+ .cpo_delete = vvp_page_delete,
+ .cpo_unmap = vvp_page_unmap,
+ .cpo_export = vvp_page_export,
+ .cpo_is_vmlocked = vvp_page_is_vmlocked,
+ .cpo_fini = vvp_page_fini,
+ .cpo_print = vvp_page_print,
+ .cpo_is_under_lock = ccc_page_is_under_lock,
+ .io = {
+ [CRT_READ] = {
+ .cpo_prep = vvp_page_prep_read,
+ .cpo_completion = vvp_page_completion_read,
+ .cpo_make_ready = ccc_fail,
+ },
+ [CRT_WRITE] = {
+ .cpo_prep = vvp_page_prep_write,
+ .cpo_completion = vvp_page_completion_write,
+ .cpo_make_ready = vvp_page_make_ready,
+ }
+ }
+};
+
+static void vvp_transient_page_verify(const struct cl_page *page)
+{
+ struct inode *inode = ccc_object_inode(page->cp_obj);
+
+ LASSERT(!mutex_trylock(&inode->i_mutex));
+}
+
+static int vvp_transient_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused, int nonblock)
+{
+ vvp_transient_page_verify(slice->cpl_page);
+ return 0;
+}
+
+static void vvp_transient_page_assume(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ vvp_transient_page_verify(slice->cpl_page);
+}
+
+static void vvp_transient_page_unassume(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ vvp_transient_page_verify(slice->cpl_page);
+}
+
+static void vvp_transient_page_disown(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ vvp_transient_page_verify(slice->cpl_page);
+}
+
+static void vvp_transient_page_discard(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ struct cl_page *page = slice->cpl_page;
+
+ vvp_transient_page_verify(slice->cpl_page);
+
+ /*
+ * For transient pages, remove it from the radix tree.
+ */
+ cl_page_delete(env, page);
+}
+
+static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ struct inode *inode = ccc_object_inode(slice->cpl_obj);
+ int locked;
+
+ locked = !mutex_trylock(&inode->i_mutex);
+ if (!locked)
+ mutex_unlock(&inode->i_mutex);
+ return locked ? -EBUSY : -ENODATA;
+}
+
+static void
+vvp_transient_page_completion(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
+{
+ vvp_transient_page_verify(slice->cpl_page);
+}
+
+static void vvp_transient_page_fini(const struct lu_env *env,
+ struct cl_page_slice *slice)
+{
+ struct ccc_page *cp = cl2ccc_page(slice);
+ struct cl_page *clp = slice->cpl_page;
+ struct ccc_object *clobj = cl2ccc(clp->cp_obj);
+
+ vvp_page_fini_common(cp);
+ LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
+ clobj->cob_transient_pages--;
+}
+
+static const struct cl_page_operations vvp_transient_page_ops = {
+ .cpo_own = vvp_transient_page_own,
+ .cpo_assume = vvp_transient_page_assume,
+ .cpo_unassume = vvp_transient_page_unassume,
+ .cpo_disown = vvp_transient_page_disown,
+ .cpo_discard = vvp_transient_page_discard,
+ .cpo_vmpage = ccc_page_vmpage,
+ .cpo_fini = vvp_transient_page_fini,
+ .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
+ .cpo_print = vvp_page_print,
+ .cpo_is_under_lock = ccc_page_is_under_lock,
+ .io = {
+ [CRT_READ] = {
+ .cpo_prep = ccc_transient_page_prep,
+ .cpo_completion = vvp_transient_page_completion,
+ },
+ [CRT_WRITE] = {
+ .cpo_prep = ccc_transient_page_prep,
+ .cpo_completion = vvp_transient_page_completion,
+ }
+ }
+};
+
+int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage)
+{
+ struct ccc_page *cpg = cl_object_page_slice(obj, page);
+
+ CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+ cpg->cpg_page = vmpage;
+ page_cache_get(vmpage);
+
+ INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+ if (page->cp_type == CPT_CACHEABLE) {
+ SetPagePrivate(vmpage);
+ vmpage->private = (unsigned long)page;
+ cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ &vvp_page_ops);
+ } else {
+ struct ccc_object *clobj = cl2ccc(obj);
+
+ LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
+ cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ &vvp_transient_page_ops);
+ clobj->cob_transient_pages++;
+ }
+ return 0;
+}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
new file mode 100644
index 0000000..bcf86ba
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -0,0 +1,582 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/selinux.h>
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include <obd_support.h>
+#include <lustre_lite.h>
+#include <lustre_dlm.h>
+#include <lustre_ver.h>
+#include <lustre_eacl.h>
+
+#include "llite_internal.h"
+
+#define XATTR_USER_T (1)
+#define XATTR_TRUSTED_T (2)
+#define XATTR_SECURITY_T (3)
+#define XATTR_ACL_ACCESS_T (4)
+#define XATTR_ACL_DEFAULT_T (5)
+#define XATTR_LUSTRE_T (6)
+#define XATTR_OTHER_T (7)
+
+static
+int get_xattr_type(const char *name)
+{
+ if (!strcmp(name, POSIX_ACL_XATTR_ACCESS))
+ return XATTR_ACL_ACCESS_T;
+
+ if (!strcmp(name, POSIX_ACL_XATTR_DEFAULT))
+ return XATTR_ACL_DEFAULT_T;
+
+ if (!strncmp(name, XATTR_USER_PREFIX,
+ sizeof(XATTR_USER_PREFIX) - 1))
+ return XATTR_USER_T;
+
+ if (!strncmp(name, XATTR_TRUSTED_PREFIX,
+ sizeof(XATTR_TRUSTED_PREFIX) - 1))
+ return XATTR_TRUSTED_T;
+
+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ sizeof(XATTR_SECURITY_PREFIX) - 1))
+ return XATTR_SECURITY_T;
+
+ if (!strncmp(name, XATTR_LUSTRE_PREFIX,
+ sizeof(XATTR_LUSTRE_PREFIX) - 1))
+ return XATTR_LUSTRE_T;
+
+ return XATTR_OTHER_T;
+}
+
+static
+int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type)
+{
+ if ((xattr_type == XATTR_ACL_ACCESS_T ||
+ xattr_type == XATTR_ACL_DEFAULT_T) &&
+ !(sbi->ll_flags & LL_SBI_ACL))
+ return -EOPNOTSUPP;
+
+ if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR))
+ return -EOPNOTSUPP;
+ if (xattr_type == XATTR_TRUSTED_T && !cfs_capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+ if (xattr_type == XATTR_OTHER_T)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static
+int ll_setxattr_common(struct inode *inode, const char *name,
+ const void *value, size_t size,
+ int flags, __u64 valid)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req;
+ int xattr_type, rc;
+ struct obd_capa *oc;
+#ifdef CONFIG_FS_POSIX_ACL
+ posix_acl_xattr_header *new_value = NULL;
+ struct rmtacl_ctl_entry *rce = NULL;
+ ext_acl_xattr_header *acl = NULL;
+#endif
+ const char *pv = value;
+
+ xattr_type = get_xattr_type(name);
+ rc = xattr_type_filter(sbi, xattr_type);
+ if (rc)
+ return rc;
+
+ /* b10667: ignore lustre special xattr for now */
+ if ((xattr_type == XATTR_TRUSTED_T && strcmp(name, "trusted.lov") == 0) ||
+ (xattr_type == XATTR_LUSTRE_T && strcmp(name, "lustre.lov") == 0))
+ return 0;
+
+ /* b15587: ignore security.capability xattr for now */
+ if ((xattr_type == XATTR_SECURITY_T &&
+ strcmp(name, "security.capability") == 0))
+ return 0;
+
+ /* LU-549: Disable security.selinux when selinux is disabled */
+ if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
+ strcmp(name, "security.selinux") == 0)
+ return -EOPNOTSUPP;
+
+#ifdef CONFIG_FS_POSIX_ACL
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
+ (xattr_type == XATTR_ACL_ACCESS_T ||
+ xattr_type == XATTR_ACL_DEFAULT_T)) {
+ rce = rct_search(&sbi->ll_rct, current_pid());
+ if (rce == NULL ||
+ (rce->rce_ops != RMT_LSETFACL &&
+ rce->rce_ops != RMT_RSETFACL))
+ return -EOPNOTSUPP;
+
+ if (rce->rce_ops == RMT_LSETFACL) {
+ struct eacl_entry *ee;
+
+ ee = et_search_del(&sbi->ll_et, current_pid(),
+ ll_inode2fid(inode), xattr_type);
+ LASSERT(ee != NULL);
+ if (valid & OBD_MD_FLXATTR) {
+ acl = lustre_acl_xattr_merge2ext(
+ (posix_acl_xattr_header *)value,
+ size, ee->ee_acl);
+ if (IS_ERR(acl)) {
+ ee_free(ee);
+ return PTR_ERR(acl);
+ }
+ size = CFS_ACL_XATTR_SIZE(\
+ le32_to_cpu(acl->a_count), \
+ ext_acl_xattr);
+ pv = (const char *)acl;
+ }
+ ee_free(ee);
+ } else if (rce->rce_ops == RMT_RSETFACL) {
+ size = lustre_posix_acl_xattr_filter(
+ (posix_acl_xattr_header *)value,
+ size, &new_value);
+ if (unlikely(size < 0))
+ return size;
+
+ pv = (const char *)new_value;
+ } else
+ return -EOPNOTSUPP;
+
+ valid |= rce_ops2valid(rce->rce_ops);
+ }
+#endif
+ oc = ll_mdscapa_get(inode);
+ rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ valid, name, pv, size, 0, flags, ll_i2suppgid(inode),
+ &req);
+ capa_put(oc);
+#ifdef CONFIG_FS_POSIX_ACL
+ if (new_value != NULL)
+ lustre_posix_acl_xattr_free(new_value, size);
+ if (acl != NULL)
+ lustre_ext_acl_xattr_free(acl);
+#endif
+ if (rc) {
+ if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
+ LCONSOLE_INFO("Disabling user_xattr feature because "
+ "it is not supported on the server\n");
+ sbi->ll_flags &= ~LL_SBI_USER_XATTR;
+ }
+ return rc;
+ }
+
+ ptlrpc_req_finished(req);
+ return 0;
+}
+
+int ll_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+
+ LASSERT(inode);
+ LASSERT(name);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
+ inode->i_ino, inode->i_generation, inode, name);
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
+
+ if ((strncmp(name, XATTR_TRUSTED_PREFIX,
+ sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 &&
+ strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) ||
+ (strncmp(name, XATTR_LUSTRE_PREFIX,
+ sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 &&
+ strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) {
+ struct lov_user_md *lump = (struct lov_user_md *)value;
+ int rc = 0;
+
+ /* Attributes that are saved via getxattr will always have
+ * the stripe_offset as 0. Instead, the MDS should be
+ * allowed to pick the starting OST index. b=17846 */
+ if (lump != NULL && lump->lmm_stripe_offset == 0)
+ lump->lmm_stripe_offset = -1;
+
+ if (lump != NULL && S_ISREG(inode->i_mode)) {
+ struct file f;
+ int flags = FMODE_WRITE;
+ int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
+ sizeof(*lump) : sizeof(struct lov_user_md_v3);
+
+ f.f_dentry = dentry;
+ rc = ll_lov_setstripe_ea_info(inode, &f, flags, lump,
+ lum_size);
+ /* b10667: rc always be 0 here for now */
+ rc = 0;
+ } else if (S_ISDIR(inode->i_mode)) {
+ rc = ll_dir_setstripe(inode, lump, 0);
+ }
+
+ return rc;
+
+ } else if (strcmp(name, XATTR_NAME_LMA) == 0 ||
+ strcmp(name, XATTR_NAME_LINK) == 0)
+ return 0;
+
+ return ll_setxattr_common(inode, name, value, size, flags,
+ OBD_MD_FLXATTR);
+}
+
+int ll_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+
+ LASSERT(inode);
+ LASSERT(name);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
+ inode->i_ino, inode->i_generation, inode, name);
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
+ return ll_setxattr_common(inode, name, NULL, 0, 0,
+ OBD_MD_FLXATTRRM);
+}
+
+static
+int ll_getxattr_common(struct inode *inode, const char *name,
+ void *buffer, size_t size, __u64 valid)
+{
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ptlrpc_request *req = NULL;
+ struct mdt_body *body;
+ int xattr_type, rc;
+ void *xdata;
+ struct obd_capa *oc;
+ struct rmtacl_ctl_entry *rce = NULL;
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
+ inode->i_ino, inode->i_generation, inode);
+
+ /* listxattr have slightly different behavior from of ext3:
+ * without 'user_xattr' ext3 will list all xattr names but
+ * filtered out "^user..*"; we list them all for simplicity.
+ */
+ if (!name) {
+ xattr_type = XATTR_OTHER_T;
+ goto do_getxattr;
+ }
+
+ xattr_type = get_xattr_type(name);
+ rc = xattr_type_filter(sbi, xattr_type);
+ if (rc)
+ return rc;
+
+ /* b15587: ignore security.capability xattr for now */
+ if ((xattr_type == XATTR_SECURITY_T &&
+ strcmp(name, "security.capability") == 0))
+ return -ENODATA;
+
+ /* LU-549: Disable security.selinux when selinux is disabled */
+ if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
+ strcmp(name, "security.selinux") == 0)
+ return -EOPNOTSUPP;
+
+#ifdef CONFIG_FS_POSIX_ACL
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
+ (xattr_type == XATTR_ACL_ACCESS_T ||
+ xattr_type == XATTR_ACL_DEFAULT_T)) {
+ rce = rct_search(&sbi->ll_rct, current_pid());
+ if (rce == NULL ||
+ (rce->rce_ops != RMT_LSETFACL &&
+ rce->rce_ops != RMT_LGETFACL &&
+ rce->rce_ops != RMT_RSETFACL &&
+ rce->rce_ops != RMT_RGETFACL))
+ return -EOPNOTSUPP;
+ }
+
+ /* posix acl is under protection of LOOKUP lock. when calling to this,
+ * we just have path resolution to the target inode, so we have great
+ * chance that cached ACL is uptodate.
+ */
+ if (xattr_type == XATTR_ACL_ACCESS_T &&
+ !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct posix_acl *acl;
+
+ spin_lock(&lli->lli_lock);
+ acl = posix_acl_dup(lli->lli_posix_acl);
+ spin_unlock(&lli->lli_lock);
+
+ if (!acl)
+ return -ENODATA;
+
+ rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+ posix_acl_release(acl);
+ return rc;
+ }
+ if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
+ return -ENODATA;
+#endif
+
+do_getxattr:
+ oc = ll_mdscapa_get(inode);
+ rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode), oc,
+ valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
+ name, NULL, 0, size, 0, &req);
+ capa_put(oc);
+ if (rc) {
+ if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
+ LCONSOLE_INFO("Disabling user_xattr feature because "
+ "it is not supported on the server\n");
+ sbi->ll_flags &= ~LL_SBI_USER_XATTR;
+ }
+ return rc;
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body);
+
+ /* only detect the xattr size */
+ if (size == 0)
+ GOTO(out, rc = body->eadatasize);
+
+ if (size < body->eadatasize) {
+ CERROR("server bug: replied size %u > %u\n",
+ body->eadatasize, (int)size);
+ GOTO(out, rc = -ERANGE);
+ }
+
+ if (body->eadatasize == 0)
+ GOTO(out, rc = -ENODATA);
+
+ /* do not need swab xattr data */
+ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
+ body->eadatasize);
+ if (!xdata)
+ GOTO(out, rc = -EFAULT);
+
+#ifdef CONFIG_FS_POSIX_ACL
+ if (body->eadatasize >= 0 && rce && rce->rce_ops == RMT_LSETFACL) {
+ ext_acl_xattr_header *acl;
+
+ acl = lustre_posix_acl_xattr_2ext((posix_acl_xattr_header *)xdata,
+ body->eadatasize);
+ if (IS_ERR(acl))
+ GOTO(out, rc = PTR_ERR(acl));
+
+ rc = ee_add(&sbi->ll_et, current_pid(), ll_inode2fid(inode),
+ xattr_type, acl);
+ if (unlikely(rc < 0)) {
+ lustre_ext_acl_xattr_free(acl);
+ GOTO(out, rc);
+ }
+ }
+#endif
+
+ if (body->eadatasize == 0) {
+ rc = -ENODATA;
+ } else {
+ LASSERT(buffer);
+ memcpy(buffer, xdata, body->eadatasize);
+ rc = body->eadatasize;
+ }
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+ssize_t ll_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+
+ LASSERT(inode);
+ LASSERT(name);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
+ inode->i_ino, inode->i_generation, inode, name);
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
+
+ if ((strncmp(name, XATTR_TRUSTED_PREFIX,
+ sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 &&
+ strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) ||
+ (strncmp(name, XATTR_LUSTRE_PREFIX,
+ sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 &&
+ strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) {
+ struct lov_stripe_md *lsm;
+ struct lov_user_md *lump;
+ struct lov_mds_md *lmm = NULL;
+ struct ptlrpc_request *request = NULL;
+ int rc = 0, lmmsize = 0;
+
+ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+ return -ENODATA;
+
+ if (size == 0 && S_ISDIR(inode->i_mode)) {
+ /* XXX directory EA is fix for now, optimize to save
+ * RPC transfer */
+ GOTO(out, rc = sizeof(struct lov_user_md));
+ }
+
+ lsm = ccc_inode_lsm_get(inode);
+ if (lsm == NULL) {
+ if (S_ISDIR(inode->i_mode)) {
+ rc = ll_dir_getstripe(inode, &lmm,
+ &lmmsize, &request);
+ } else {
+ rc = -ENODATA;
+ }
+ } else {
+ /* LSM is present already after lookup/getattr call.
+ * we need to grab layout lock once it is implemented */
+ rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm);
+ lmmsize = rc;
+ }
+ ccc_inode_lsm_put(inode, lsm);
+
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (size == 0) {
+ /* used to call ll_get_max_mdsize() forward to get
+ * the maximum buffer size, while some apps (such as
+ * rsync 3.0.x) care much about the exact xattr value
+ * size */
+ rc = lmmsize;
+ GOTO(out, rc);
+ }
+
+ if (size < lmmsize) {
+ CERROR("server bug: replied size %d > %d for %s (%s)\n",
+ lmmsize, (int)size, dentry->d_name.name, name);
+ GOTO(out, rc = -ERANGE);
+ }
+
+ lump = (struct lov_user_md *)buffer;
+ memcpy(lump, lmm, lmmsize);
+ /* do not return layout gen for getxattr otherwise it would
+ * confuse tar --xattr by recognizing layout gen as stripe
+ * offset when the file is restored. See LU-2809. */
+ lump->lmm_layout_gen = 0;
+
+ rc = lmmsize;
+out:
+ if (request)
+ ptlrpc_req_finished(request);
+ else if (lmm)
+ obd_free_diskmd(ll_i2dtexp(inode), &lmm);
+ return(rc);
+ }
+
+ return ll_getxattr_common(inode, name, buffer, size, OBD_MD_FLXATTR);
+}
+
+ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int rc = 0, rc2 = 0;
+ struct lov_mds_md *lmm = NULL;
+ struct ptlrpc_request *request = NULL;
+ int lmmsize;
+
+ LASSERT(inode);
+
+ CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
+ inode->i_ino, inode->i_generation, inode);
+
+ ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
+
+ rc = ll_getxattr_common(inode, NULL, buffer, size, OBD_MD_FLXATTRLS);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (buffer != NULL) {
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+ char *xattr_name = buffer;
+ int xlen, rem = rc;
+
+ while (rem > 0) {
+ xlen = strnlen(xattr_name, rem - 1) + 1;
+ rem -= xlen;
+ if (xattr_type_filter(sbi,
+ get_xattr_type(xattr_name)) == 0) {
+ /* skip OK xattr type
+ * leave it in buffer
+ */
+ xattr_name += xlen;
+ continue;
+ }
+ /* move up remaining xattrs in buffer
+ * removing the xattr that is not OK
+ */
+ memmove(xattr_name, xattr_name + xlen, rem);
+ rc -= xlen;
+ }
+ }
+ if (S_ISREG(inode->i_mode)) {
+ if (!ll_i2info(inode)->lli_has_smd)
+ rc2 = -1;
+ } else if (S_ISDIR(inode->i_mode)) {
+ rc2 = ll_dir_getstripe(inode, &lmm, &lmmsize, &request);
+ }
+
+ if (rc2 < 0) {
+ GOTO(out, rc2 = 0);
+ } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) {
+ const int prefix_len = sizeof(XATTR_LUSTRE_PREFIX) - 1;
+ const size_t name_len = sizeof("lov") - 1;
+ const size_t total_len = prefix_len + name_len + 1;
+
+ if (((rc + total_len) > size) && (buffer != NULL)) {
+ ptlrpc_req_finished(request);
+ return -ERANGE;
+ }
+
+ if (buffer != NULL) {
+ buffer += rc;
+ memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
+ memcpy(buffer + prefix_len, "lov", name_len);
+ buffer[prefix_len + name_len] = '\0';
+ }
+ rc2 = total_len;
+ }
+out:
+ ptlrpc_req_finished(request);
+ rc = rc + rc2;
+
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/lmv/Makefile b/drivers/staging/lustre/lustre/lmv/Makefile
new file mode 100644
index 0000000..8cc81ad
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lmv/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_LUSTRE_FS) += lmv.o
+lmv-y := lmv_obd.o lmv_intent.o lmv_fld.o lproc_lmv.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
new file mode 100644
index 0000000..0b2d38d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -0,0 +1,85 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LMV
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <asm/div64.h>
+#include <linux/seq_file.h>
+
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_fid.h>
+#include <lustre_lib.h>
+#include <lustre_net.h>
+#include <lustre_dlm.h>
+#include <obd_class.h>
+#include <lprocfs_status.h>
+#include "lmv_internal.h"
+
+int lmv_fld_lookup(struct lmv_obd *lmv,
+ const struct lu_fid *fid,
+ mdsno_t *mds)
+{
+ int rc;
+
+ /* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and
+ * this fid_is_local check should be removed once LU-2240 is fixed */
+ LASSERTF((fid_seq_in_fldb(fid_seq(fid)) ||
+ fid_seq_is_local_file(fid_seq(fid))) &&
+ fid_is_sane(fid), DFID" is insane!\n", PFID(fid));
+
+ rc = fld_client_lookup(&lmv->lmv_fld, fid_seq(fid), mds,
+ LU_SEQ_RANGE_MDT, NULL);
+ if (rc) {
+ CERROR("Error while looking for mds number. Seq "LPX64
+ ", err = %d\n", fid_seq(fid), rc);
+ return rc;
+ }
+
+ CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
+ *mds, PFID(fid));
+
+ if (*mds >= lmv->desc.ld_tgt_count) {
+ CERROR("FLD lookup got invalid mds #%x (max: %x) "
+ "for fid="DFID"\n", *mds, lmv->desc.ld_tgt_count,
+ PFID(fid));
+ rc = -EINVAL;
+ }
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
new file mode 100644
index 0000000..511b3b4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -0,0 +1,322 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LMV
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <asm/div64.h>
+#include <linux/seq_file.h>
+#include <linux/namei.h>
+#include <linux/lustre_intent.h>
+
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_lib.h>
+#include <lustre_net.h>
+#include <lustre_dlm.h>
+#include <obd_class.h>
+#include <lprocfs_status.h>
+#include "lmv_internal.h"
+
+static int lmv_intent_remote(struct obd_export *exp, void *lmm,
+ int lmmsize, struct lookup_intent *it,
+ const struct lu_fid *parent_fid, int flags,
+ struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct ptlrpc_request *req = NULL;
+ struct lustre_handle plock;
+ struct md_op_data *op_data;
+ struct lmv_tgt_desc *tgt;
+ struct mdt_body *body;
+ int pmode;
+ int rc = 0;
+
+ body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ return -EPROTO;
+
+ LASSERT((body->valid & OBD_MD_MDS));
+
+ /*
+ * Unfortunately, we have to lie to MDC/MDS to retrieve
+ * attributes llite needs and provideproper locking.
+ */
+ if (it->it_op & IT_LOOKUP)
+ it->it_op = IT_GETATTR;
+
+ /*
+ * We got LOOKUP lock, but we really need attrs.
+ */
+ pmode = it->d.lustre.it_lock_mode;
+ if (pmode) {
+ plock.cookie = it->d.lustre.it_lock_handle;
+ it->d.lustre.it_lock_mode = 0;
+ it->d.lustre.it_data = NULL;
+ }
+
+ LASSERT(fid_is_sane(&body->fid1));
+
+ tgt = lmv_find_target(lmv, &body->fid1);
+ if (IS_ERR(tgt))
+ GOTO(out, rc = PTR_ERR(tgt));
+
+ OBD_ALLOC_PTR(op_data);
+ if (op_data == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ op_data->op_fid1 = body->fid1;
+ /* Sent the parent FID to the remote MDT */
+ if (parent_fid != NULL) {
+ /* The parent fid is only for remote open to
+ * check whether the open is from OBF,
+ * see mdt_cross_open */
+ LASSERT(it->it_op & IT_OPEN);
+ op_data->op_fid2 = *parent_fid;
+ /* Add object FID to op_fid3, in case it needs to check stale
+ * (M_CHECK_STALE), see mdc_finish_intent_lock */
+ op_data->op_fid3 = body->fid1;
+ }
+
+ op_data->op_bias = MDS_CROSS_REF;
+ CDEBUG(D_INODE, "REMOTE_INTENT with fid="DFID" -> mds #%d\n",
+ PFID(&body->fid1), tgt->ltd_idx);
+
+ it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE;
+ rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
+ flags, &req, cb_blocking, extra_lock_flags);
+ if (rc)
+ GOTO(out_free_op_data, rc);
+
+ /*
+ * LLite needs LOOKUP lock to track dentry revocation in order to
+ * maintain dcache consistency. Thus drop UPDATE|PERM lock here
+ * and put LOOKUP in request.
+ */
+ if (it->d.lustre.it_lock_mode != 0) {
+ it->d.lustre.it_remote_lock_handle =
+ it->d.lustre.it_lock_handle;
+ it->d.lustre.it_remote_lock_mode = it->d.lustre.it_lock_mode;
+ }
+
+ it->d.lustre.it_lock_handle = plock.cookie;
+ it->d.lustre.it_lock_mode = pmode;
+
+out_free_op_data:
+ OBD_FREE_PTR(op_data);
+out:
+ if (rc && pmode)
+ ldlm_lock_decref(&plock, pmode);
+
+ ptlrpc_req_finished(*reqp);
+ *reqp = req;
+ return rc;
+}
+
+/*
+ * IT_OPEN is intended to open (and create, possible) an object. Parent (pid)
+ * may be split dir.
+ */
+int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
+ void *lmm, int lmmsize, struct lookup_intent *it,
+ int flags, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ struct mdt_body *body;
+ int rc;
+
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ /* If it is ready to open the file by FID, do not need
+ * allocate FID at all, otherwise it will confuse MDT */
+ if ((it->it_op & IT_CREAT) &&
+ !(it->it_flags & MDS_OPEN_BY_FID)) {
+ /*
+ * For open with IT_CREATE and for IT_CREATE cases allocate new
+ * fid and setup FLD for it.
+ */
+ op_data->op_fid3 = op_data->op_fid2;
+ rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
+ if (rc != 0)
+ return rc;
+ }
+
+ CDEBUG(D_INODE, "OPEN_INTENT with fid1="DFID", fid2="DFID","
+ " name='%s' -> mds #%d\n", PFID(&op_data->op_fid1),
+ PFID(&op_data->op_fid2), op_data->op_name, tgt->ltd_idx);
+
+ rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it, flags,
+ reqp, cb_blocking, extra_lock_flags);
+ if (rc != 0)
+ return rc;
+ /*
+ * Nothing is found, do not access body->fid1 as it is zero and thus
+ * pointless.
+ */
+ if ((it->d.lustre.it_disposition & DISP_LOOKUP_NEG) &&
+ !(it->d.lustre.it_disposition & DISP_OPEN_CREATE) &&
+ !(it->d.lustre.it_disposition & DISP_OPEN_OPEN))
+ return rc;
+
+ body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ return -EPROTO;
+ /*
+ * Not cross-ref case, just get out of here.
+ */
+ if (likely(!(body->valid & OBD_MD_MDS)))
+ return 0;
+
+ /*
+ * Okay, MDS has returned success. Probably name has been resolved in
+ * remote inode.
+ */
+ rc = lmv_intent_remote(exp, lmm, lmmsize, it, &op_data->op_fid1, flags,
+ reqp, cb_blocking, extra_lock_flags);
+ if (rc != 0) {
+ LASSERT(rc < 0);
+ /*
+ * This is possible, that some userspace application will try to
+ * open file as directory and we will have -ENOTDIR here. As
+ * this is normal situation, we should not print error here,
+ * only debug info.
+ */
+ CDEBUG(D_INODE, "Can't handle remote %s: dir "DFID"("DFID"):"
+ "%*s: %d\n", LL_IT2STR(it), PFID(&op_data->op_fid2),
+ PFID(&op_data->op_fid1), op_data->op_namelen,
+ op_data->op_name, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+/*
+ * Handler for: getattr, lookup and revalidate cases.
+ */
+int lmv_intent_lookup(struct obd_export *exp, struct md_op_data *op_data,
+ void *lmm, int lmmsize, struct lookup_intent *it,
+ int flags, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt = NULL;
+ struct mdt_body *body;
+ int rc = 0;
+
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ if (!fid_is_sane(&op_data->op_fid2))
+ fid_zero(&op_data->op_fid2);
+
+ CDEBUG(D_INODE, "LOOKUP_INTENT with fid1="DFID", fid2="DFID
+ ", name='%s' -> mds #%d\n", PFID(&op_data->op_fid1),
+ PFID(&op_data->op_fid2),
+ op_data->op_name ? op_data->op_name : "<NULL>",
+ tgt->ltd_idx);
+
+ op_data->op_bias &= ~MDS_CROSS_REF;
+
+ rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
+ flags, reqp, cb_blocking, extra_lock_flags);
+
+ if (rc < 0 || *reqp == NULL)
+ return rc;
+
+ /*
+ * MDS has returned success. Probably name has been resolved in
+ * remote inode. Let's check this.
+ */
+ body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ return -EPROTO;
+ /* Not cross-ref case, just get out of here. */
+ if (likely(!(body->valid & OBD_MD_MDS)))
+ return 0;
+
+ rc = lmv_intent_remote(exp, lmm, lmmsize, it, NULL, flags, reqp,
+ cb_blocking, extra_lock_flags);
+
+ return rc;
+}
+
+int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
+ void *lmm, int lmmsize, struct lookup_intent *it,
+ int flags, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags)
+{
+ struct obd_device *obd = exp->exp_obd;
+ int rc;
+
+ LASSERT(it != NULL);
+ LASSERT(fid_is_sane(&op_data->op_fid1));
+
+ CDEBUG(D_INODE, "INTENT LOCK '%s' for '%*s' on "DFID"\n",
+ LL_IT2STR(it), op_data->op_namelen, op_data->op_name,
+ PFID(&op_data->op_fid1));
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_LAYOUT))
+ rc = lmv_intent_lookup(exp, op_data, lmm, lmmsize, it,
+ flags, reqp, cb_blocking,
+ extra_lock_flags);
+ else if (it->it_op & IT_OPEN)
+ rc = lmv_intent_open(exp, op_data, lmm, lmmsize, it,
+ flags, reqp, cb_blocking,
+ extra_lock_flags);
+ else
+ LBUG();
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
new file mode 100644
index 0000000..f75b0a9
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -0,0 +1,159 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _LMV_INTERNAL_H_
+#define _LMV_INTERNAL_H_
+
+#include <lustre/lustre_idl.h>
+#include <obd.h>
+
+#define LMV_MAX_TGT_COUNT 128
+
+#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex);
+#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex);
+
+#define LL_IT2STR(it) \
+ ((it) ? ldlm_it2str((it)->it_op) : "0")
+
+int lmv_check_connect(struct obd_device *obd);
+
+int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
+ void *lmm, int lmmsize, struct lookup_intent *it,
+ int flags, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags);
+
+int lmv_intent_lookup(struct obd_export *exp, struct md_op_data *op_data,
+ void *lmm, int lmmsize, struct lookup_intent *it,
+ int flags, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags);
+
+int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
+ void *lmm, int lmmsize, struct lookup_intent *it,
+ int flags, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags);
+
+int lmv_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
+ void *, int);
+int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid,
+ mdsno_t *mds);
+int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
+ mdsno_t mds);
+int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
+ struct md_op_data *op_data);
+
+static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req)
+{
+ struct mdt_body *body;
+ struct lmv_stripe_md *mea;
+
+ LASSERT(req != NULL);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+
+ if (!body || !S_ISDIR(body->mode) || !body->eadatasize)
+ return NULL;
+
+ mea = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD,
+ body->eadatasize);
+ LASSERT(mea != NULL);
+
+ if (mea->mea_count == 0)
+ return NULL;
+ if( mea->mea_magic != MEA_MAGIC_LAST_CHAR &&
+ mea->mea_magic != MEA_MAGIC_ALL_CHARS &&
+ mea->mea_magic != MEA_MAGIC_HASH_SEGMENT)
+ return NULL;
+
+ return mea;
+}
+
+static inline int lmv_get_easize(struct lmv_obd *lmv)
+{
+ return sizeof(struct lmv_stripe_md) +
+ lmv->desc.ld_tgt_count *
+ sizeof(struct lu_fid);
+}
+
+static inline struct lmv_tgt_desc *
+lmv_get_target(struct lmv_obd *lmv, mdsno_t mds)
+{
+ int count = lmv->desc.ld_tgt_count;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (lmv->tgts[i] == NULL)
+ continue;
+
+ if (lmv->tgts[i]->ltd_idx == mds)
+ return lmv->tgts[i];
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct lmv_tgt_desc *
+lmv_find_target(struct lmv_obd *lmv, const struct lu_fid *fid)
+{
+ mdsno_t mds = 0;
+ int rc;
+
+ if (lmv->desc.ld_tgt_count > 1) {
+ rc = lmv_fld_lookup(lmv, fid, &mds);
+ if (rc)
+ return ERR_PTR(rc);
+ }
+
+ return lmv_get_target(lmv, mds);
+}
+
+struct lmv_tgt_desc
+*lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
+ struct lu_fid *fid);
+/* lproc_lmv.c */
+#ifdef LPROCFS
+void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars);
+#else
+static inline void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars)
+{
+ memset(lvars, 0, sizeof(*lvars));
+}
+#endif
+extern struct file_operations lmv_proc_target_fops;
+
+#endif
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
new file mode 100644
index 0000000..c286604
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -0,0 +1,2867 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LMV
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+#include <asm/div64.h>
+#include <linux/seq_file.h>
+#include <linux/namei.h>
+#include <asm/uaccess.h>
+
+#include <lustre/lustre_idl.h>
+#include <obd_support.h>
+#include <lustre_lib.h>
+#include <lustre_net.h>
+#include <obd_class.h>
+#include <lprocfs_status.h>
+#include <lustre_lite.h>
+#include <lustre_fid.h>
+#include "lmv_internal.h"
+
+static void lmv_activate_target(struct lmv_obd *lmv,
+ struct lmv_tgt_desc *tgt,
+ int activate)
+{
+ if (tgt->ltd_active == activate)
+ return;
+
+ tgt->ltd_active = activate;
+ lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
+}
+
+/**
+ * Error codes:
+ *
+ * -EINVAL : UUID can't be found in the LMV's target list
+ * -ENOTCONN: The UUID is found, but the target connection is bad (!)
+ * -EBADF : The UUID is found, but the OBD of the wrong type (!)
+ */
+static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
+ int activate)
+{
+ struct lmv_tgt_desc *uninitialized_var(tgt);
+ struct obd_device *obd;
+ int i;
+ int rc = 0;
+
+ CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
+ lmv, uuid->uuid, activate);
+
+ spin_lock(&lmv->lmv_lock);
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ tgt = lmv->tgts[i];
+ if (tgt == NULL || tgt->ltd_exp == NULL)
+ continue;
+
+ CDEBUG(D_INFO, "Target idx %d is %s conn "LPX64"\n", i,
+ tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
+
+ if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
+ break;
+ }
+
+ if (i == lmv->desc.ld_tgt_count)
+ GOTO(out_lmv_lock, rc = -EINVAL);
+
+ obd = class_exp2obd(tgt->ltd_exp);
+ if (obd == NULL)
+ GOTO(out_lmv_lock, rc = -ENOTCONN);
+
+ CDEBUG(D_INFO, "Found OBD %s=%s device %d (%p) type %s at LMV idx %d\n",
+ obd->obd_name, obd->obd_uuid.uuid, obd->obd_minor, obd,
+ obd->obd_type->typ_name, i);
+ LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0);
+
+ if (tgt->ltd_active == activate) {
+ CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd,
+ activate ? "" : "in");
+ GOTO(out_lmv_lock, rc);
+ }
+
+ CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd,
+ activate ? "" : "in");
+ lmv_activate_target(lmv, tgt, activate);
+
+ out_lmv_lock:
+ spin_unlock(&lmv->lmv_lock);
+ return rc;
+}
+
+struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
+{
+ struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+
+ return obd_get_uuid(lmv->tgts[0]->ltd_exp);
+}
+
+static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
+ enum obd_notify_event ev, void *data)
+{
+ struct obd_connect_data *conn_data;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct obd_uuid *uuid;
+ int rc = 0;
+
+ if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
+ CERROR("unexpected notification of %s %s!\n",
+ watched->obd_type->typ_name,
+ watched->obd_name);
+ return -EINVAL;
+ }
+
+ uuid = &watched->u.cli.cl_target_uuid;
+ if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) {
+ /*
+ * Set MDC as active before notifying the observer, so the
+ * observer can use the MDC normally.
+ */
+ rc = lmv_set_mdc_active(lmv, uuid,
+ ev == OBD_NOTIFY_ACTIVE);
+ if (rc) {
+ CERROR("%sactivation of %s failed: %d\n",
+ ev == OBD_NOTIFY_ACTIVE ? "" : "de",
+ uuid->uuid, rc);
+ return rc;
+ }
+ } else if (ev == OBD_NOTIFY_OCD) {
+ conn_data = &watched->u.cli.cl_import->imp_connect_data;
+ /*
+ * XXX: Make sure that ocd_connect_flags from all targets are
+ * the same. Otherwise one of MDTs runs wrong version or
+ * something like this. --umka
+ */
+ obd->obd_self_export->exp_connect_data = *conn_data;
+ }
+#if 0
+ else if (ev == OBD_NOTIFY_DISCON) {
+ /*
+ * For disconnect event, flush fld cache for failout MDS case.
+ */
+ fld_client_flush(&lmv->lmv_fld);
+ }
+#endif
+ /*
+ * Pass the notification up the chain.
+ */
+ if (obd->obd_observer)
+ rc = obd_notify(obd->obd_observer, watched, ev, data);
+
+ return rc;
+}
+
+/**
+ * This is fake connect function. Its purpose is to initialize lmv and say
+ * caller that everything is okay. Real connection will be performed later.
+ */
+static int lmv_connect(const struct lu_env *env,
+ struct obd_export **exp, struct obd_device *obd,
+ struct obd_uuid *cluuid, struct obd_connect_data *data,
+ void *localdata)
+{
+ struct proc_dir_entry *lmv_proc_dir;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lustre_handle conn = { 0 };
+ int rc = 0;
+
+ /*
+ * We don't want to actually do the underlying connections more than
+ * once, so keep track.
+ */
+ lmv->refcount++;
+ if (lmv->refcount > 1) {
+ *exp = NULL;
+ return 0;
+ }
+
+ rc = class_connect(&conn, obd, cluuid);
+ if (rc) {
+ CERROR("class_connection() returned %d\n", rc);
+ return rc;
+ }
+
+ *exp = class_conn2export(&conn);
+ class_export_get(*exp);
+
+ lmv->exp = *exp;
+ lmv->connected = 0;
+ lmv->cluuid = *cluuid;
+
+ if (data)
+ lmv->conn_data = *data;
+
+ if (obd->obd_proc_private != NULL) {
+ lmv_proc_dir = obd->obd_proc_private;
+ } else {
+ lmv_proc_dir = lprocfs_register("target_obds", obd->obd_proc_entry,
+ NULL, NULL);
+ if (IS_ERR(lmv_proc_dir)) {
+ CERROR("could not register /proc/fs/lustre/%s/%s/target_obds.",
+ obd->obd_type->typ_name, obd->obd_name);
+ lmv_proc_dir = NULL;
+ }
+ obd->obd_proc_private = lmv_proc_dir;
+ }
+
+ /*
+ * All real clients should perform actual connection right away, because
+ * it is possible, that LMV will not have opportunity to connect targets
+ * and MDC stuff will be called directly, for instance while reading
+ * ../mdc/../kbytesfree procfs file, etc.
+ */
+ if (data->ocd_connect_flags & OBD_CONNECT_REAL)
+ rc = lmv_check_connect(obd);
+
+ if (rc && lmv_proc_dir) {
+ lprocfs_remove(&lmv_proc_dir);
+ obd->obd_proc_private = NULL;
+ }
+
+ return rc;
+}
+
+static void lmv_set_timeouts(struct obd_device *obd)
+{
+ struct lmv_tgt_desc *tgt;
+ struct lmv_obd *lmv;
+ int i;
+
+ lmv = &obd->u.lmv;
+ if (lmv->server_timeout == 0)
+ return;
+
+ if (lmv->connected == 0)
+ return;
+
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ tgt = lmv->tgts[i];
+ if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0)
+ continue;
+
+ obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS),
+ KEY_INTERMDS, 0, NULL, NULL);
+ }
+}
+
+static int lmv_init_ea_size(struct obd_export *exp, int easize,
+ int def_easize, int cookiesize)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int i;
+ int rc = 0;
+ int change = 0;
+
+ if (lmv->max_easize < easize) {
+ lmv->max_easize = easize;
+ change = 1;
+ }
+ if (lmv->max_def_easize < def_easize) {
+ lmv->max_def_easize = def_easize;
+ change = 1;
+ }
+ if (lmv->max_cookiesize < cookiesize) {
+ lmv->max_cookiesize = cookiesize;
+ change = 1;
+ }
+ if (change == 0)
+ return 0;
+
+ if (lmv->connected == 0)
+ return 0;
+
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ if (lmv->tgts[i] == NULL ||
+ lmv->tgts[i]->ltd_exp == NULL ||
+ lmv->tgts[i]->ltd_active == 0) {
+ CWARN("%s: NULL export for %d\n", obd->obd_name, i);
+ continue;
+ }
+
+ rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize,
+ cookiesize);
+ if (rc) {
+ CERROR("%s: obd_init_ea_size() failed on MDT target %d:"
+ " rc = %d.\n", obd->obd_name, i, rc);
+ break;
+ }
+ }
+ return rc;
+}
+
+#define MAX_STRING_SIZE 128
+
+int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
+{
+ struct proc_dir_entry *lmv_proc_dir;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct obd_uuid *cluuid = &lmv->cluuid;
+ struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" };
+ struct obd_device *mdc_obd;
+ struct obd_export *mdc_exp;
+ struct lu_fld_target target;
+ int rc;
+
+ mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
+ &obd->obd_uuid);
+ if (!mdc_obd) {
+ CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
+ return -EINVAL;
+ }
+
+ CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
+ mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
+ tgt->ltd_uuid.uuid, obd->obd_uuid.uuid,
+ cluuid->uuid);
+
+ if (!mdc_obd->obd_set_up) {
+ CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
+ return -EINVAL;
+ }
+
+ rc = obd_connect(NULL, &mdc_exp, mdc_obd, &lmv_mdc_uuid,
+ &lmv->conn_data, NULL);
+ if (rc) {
+ CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
+ return rc;
+ }
+
+ /*
+ * Init fid sequence client for this mdc and add new fld target.
+ */
+ rc = obd_fid_init(mdc_obd, mdc_exp, LUSTRE_SEQ_METADATA);
+ if (rc)
+ return rc;
+
+ target.ft_srv = NULL;
+ target.ft_exp = mdc_exp;
+ target.ft_idx = tgt->ltd_idx;
+
+ fld_client_add_target(&lmv->lmv_fld, &target);
+
+ rc = obd_register_observer(mdc_obd, obd);
+ if (rc) {
+ obd_disconnect(mdc_exp);
+ CERROR("target %s register_observer error %d\n",
+ tgt->ltd_uuid.uuid, rc);
+ return rc;
+ }
+
+ if (obd->obd_observer) {
+ /*
+ * Tell the observer about the new target.
+ */
+ rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd,
+ OBD_NOTIFY_ACTIVE,
+ (void *)(tgt - lmv->tgts[0]));
+ if (rc) {
+ obd_disconnect(mdc_exp);
+ return rc;
+ }
+ }
+
+ tgt->ltd_active = 1;
+ tgt->ltd_exp = mdc_exp;
+ lmv->desc.ld_active_tgt_count++;
+
+ md_init_ea_size(tgt->ltd_exp, lmv->max_easize,
+ lmv->max_def_easize, lmv->max_cookiesize);
+
+ CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
+ mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
+ atomic_read(&obd->obd_refcount));
+
+ lmv_proc_dir = obd->obd_proc_private;
+ if (lmv_proc_dir) {
+ struct proc_dir_entry *mdc_symlink;
+
+ LASSERT(mdc_obd->obd_type != NULL);
+ LASSERT(mdc_obd->obd_type->typ_name != NULL);
+ mdc_symlink = lprocfs_add_symlink(mdc_obd->obd_name,
+ lmv_proc_dir,
+ "../../../%s/%s",
+ mdc_obd->obd_type->typ_name,
+ mdc_obd->obd_name);
+ if (mdc_symlink == NULL) {
+ CERROR("Could not register LMV target "
+ "/proc/fs/lustre/%s/%s/target_obds/%s.",
+ obd->obd_type->typ_name, obd->obd_name,
+ mdc_obd->obd_name);
+ lprocfs_remove(&lmv_proc_dir);
+ obd->obd_proc_private = NULL;
+ }
+ }
+ return 0;
+}
+
+static void lmv_del_target(struct lmv_obd *lmv, int index)
+{
+ if (lmv->tgts[index] == NULL)
+ return;
+
+ OBD_FREE_PTR(lmv->tgts[index]);
+ lmv->tgts[index] = NULL;
+ return;
+}
+
+static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
+ __u32 index, int gen)
+{
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc = 0;
+
+ CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
+
+ lmv_init_lock(lmv);
+
+ if (lmv->desc.ld_tgt_count == 0) {
+ struct obd_device *mdc_obd;
+
+ mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
+ &obd->obd_uuid);
+ if (!mdc_obd) {
+ lmv_init_unlock(lmv);
+ CERROR("%s: Target %s not attached: rc = %d\n",
+ obd->obd_name, uuidp->uuid, -EINVAL);
+ return -EINVAL;
+ }
+ }
+
+ if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) {
+ tgt = lmv->tgts[index];
+ CERROR("%s: UUID %s already assigned at LOV target index %d:"
+ " rc = %d\n", obd->obd_name,
+ obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
+ lmv_init_unlock(lmv);
+ return -EEXIST;
+ }
+
+ if (index >= lmv->tgts_size) {
+ /* We need to reallocate the lmv target array. */
+ struct lmv_tgt_desc **newtgts, **old = NULL;
+ __u32 newsize = 1;
+ __u32 oldsize = 0;
+
+ while (newsize < index + 1)
+ newsize = newsize << 1;
+ OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
+ if (newtgts == NULL) {
+ lmv_init_unlock(lmv);
+ return -ENOMEM;
+ }
+
+ if (lmv->tgts_size) {
+ memcpy(newtgts, lmv->tgts,
+ sizeof(*newtgts) * lmv->tgts_size);
+ old = lmv->tgts;
+ oldsize = lmv->tgts_size;
+ }
+
+ lmv->tgts = newtgts;
+ lmv->tgts_size = newsize;
+ smp_rmb();
+ if (old)
+ OBD_FREE(old, sizeof(*old) * oldsize);
+
+ CDEBUG(D_CONFIG, "tgts: %p size: %d\n", lmv->tgts,
+ lmv->tgts_size);
+ }
+
+ OBD_ALLOC_PTR(tgt);
+ if (!tgt) {
+ lmv_init_unlock(lmv);
+ return -ENOMEM;
+ }
+
+ mutex_init(&tgt->ltd_fid_mutex);
+ tgt->ltd_idx = index;
+ tgt->ltd_uuid = *uuidp;
+ tgt->ltd_active = 0;
+ lmv->tgts[index] = tgt;
+ if (index >= lmv->desc.ld_tgt_count)
+ lmv->desc.ld_tgt_count = index + 1;
+
+ if (lmv->connected) {
+ rc = lmv_connect_mdc(obd, tgt);
+ if (rc) {
+ spin_lock(&lmv->lmv_lock);
+ lmv->desc.ld_tgt_count--;
+ memset(tgt, 0, sizeof(*tgt));
+ spin_unlock(&lmv->lmv_lock);
+ } else {
+ int easize = sizeof(struct lmv_stripe_md) +
+ lmv->desc.ld_tgt_count *
+ sizeof(struct lu_fid);
+ lmv_init_ea_size(obd->obd_self_export, easize, 0, 0);
+ }
+ }
+
+ lmv_init_unlock(lmv);
+ return rc;
+}
+
+int lmv_check_connect(struct obd_device *obd)
+{
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int i;
+ int rc;
+ int easize;
+
+ if (lmv->connected)
+ return 0;
+
+ lmv_init_lock(lmv);
+ if (lmv->connected) {
+ lmv_init_unlock(lmv);
+ return 0;
+ }
+
+ if (lmv->desc.ld_tgt_count == 0) {
+ lmv_init_unlock(lmv);
+ CERROR("%s: no targets configured.\n", obd->obd_name);
+ return -EINVAL;
+ }
+
+ CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
+ lmv->cluuid.uuid, obd->obd_name);
+
+ LASSERT(lmv->tgts != NULL);
+
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ tgt = lmv->tgts[i];
+ if (tgt == NULL)
+ continue;
+ rc = lmv_connect_mdc(obd, tgt);
+ if (rc)
+ GOTO(out_disc, rc);
+ }
+
+ lmv_set_timeouts(obd);
+ class_export_put(lmv->exp);
+ lmv->connected = 1;
+ easize = lmv_get_easize(lmv);
+ lmv_init_ea_size(obd->obd_self_export, easize, 0, 0);
+ lmv_init_unlock(lmv);
+ return 0;
+
+ out_disc:
+ while (i-- > 0) {
+ int rc2;
+ tgt = lmv->tgts[i];
+ if (tgt == NULL)
+ continue;
+ tgt->ltd_active = 0;
+ if (tgt->ltd_exp) {
+ --lmv->desc.ld_active_tgt_count;
+ rc2 = obd_disconnect(tgt->ltd_exp);
+ if (rc2) {
+ CERROR("LMV target %s disconnect on "
+ "MDC idx %d: error %d\n",
+ tgt->ltd_uuid.uuid, i, rc2);
+ }
+ }
+ }
+ class_disconnect(lmv->exp);
+ lmv_init_unlock(lmv);
+ return rc;
+}
+
+static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
+{
+ struct proc_dir_entry *lmv_proc_dir;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct obd_device *mdc_obd;
+ int rc;
+
+ LASSERT(tgt != NULL);
+ LASSERT(obd != NULL);
+
+ mdc_obd = class_exp2obd(tgt->ltd_exp);
+
+ if (mdc_obd) {
+ mdc_obd->obd_force = obd->obd_force;
+ mdc_obd->obd_fail = obd->obd_fail;
+ mdc_obd->obd_no_recov = obd->obd_no_recov;
+ }
+
+ lmv_proc_dir = obd->obd_proc_private;
+ if (lmv_proc_dir)
+ lprocfs_remove_proc_entry(mdc_obd->obd_name, lmv_proc_dir);
+
+ rc = obd_fid_fini(tgt->ltd_exp->exp_obd);
+ if (rc)
+ CERROR("Can't finanize fids factory\n");
+
+ CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
+ tgt->ltd_exp->exp_obd->obd_name,
+ tgt->ltd_exp->exp_obd->obd_uuid.uuid);
+
+ obd_register_observer(tgt->ltd_exp->exp_obd, NULL);
+ rc = obd_disconnect(tgt->ltd_exp);
+ if (rc) {
+ if (tgt->ltd_active) {
+ CERROR("Target %s disconnect error %d\n",
+ tgt->ltd_uuid.uuid, rc);
+ }
+ }
+
+ lmv_activate_target(lmv, tgt, 0);
+ tgt->ltd_exp = NULL;
+ return 0;
+}
+
+static int lmv_disconnect(struct obd_export *exp)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int rc;
+ int i;
+
+ if (!lmv->tgts)
+ goto out_local;
+
+ /*
+ * Only disconnect the underlying layers on the final disconnect.
+ */
+ lmv->refcount--;
+ if (lmv->refcount != 0)
+ goto out_local;
+
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ continue;
+
+ lmv_disconnect_mdc(obd, lmv->tgts[i]);
+ }
+
+ if (obd->obd_proc_private)
+ lprocfs_remove((struct proc_dir_entry **)&obd->obd_proc_private);
+ else
+ CERROR("/proc/fs/lustre/%s/%s/target_obds missing\n",
+ obd->obd_type->typ_name, obd->obd_name);
+
+out_local:
+ /*
+ * This is the case when no real connection is established by
+ * lmv_check_connect().
+ */
+ if (!lmv->connected)
+ class_export_put(exp);
+ rc = class_disconnect(exp);
+ if (lmv->refcount == 0)
+ lmv->connected = 0;
+ return rc;
+}
+
+static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg)
+{
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obddev->u.lmv;
+ struct getinfo_fid2path *gf;
+ struct lmv_tgt_desc *tgt;
+ struct getinfo_fid2path *remote_gf = NULL;
+ int remote_gf_size = 0;
+ int rc;
+
+ gf = (struct getinfo_fid2path *)karg;
+ tgt = lmv_find_target(lmv, &gf->gf_fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+repeat_fid2path:
+ rc = obd_iocontrol(OBD_IOC_FID2PATH, tgt->ltd_exp, len, gf, uarg);
+ if (rc != 0 && rc != -EREMOTE)
+ GOTO(out_fid2path, rc);
+
+ /* If remote_gf != NULL, it means just building the
+ * path on the remote MDT, copy this path segement to gf */
+ if (remote_gf != NULL) {
+ struct getinfo_fid2path *ori_gf;
+ char *ptr;
+
+ ori_gf = (struct getinfo_fid2path *)karg;
+ if (strlen(ori_gf->gf_path) +
+ strlen(gf->gf_path) > ori_gf->gf_pathlen)
+ GOTO(out_fid2path, rc = -EOVERFLOW);
+
+ ptr = ori_gf->gf_path;
+
+ memmove(ptr + strlen(gf->gf_path) + 1, ptr,
+ strlen(ori_gf->gf_path));
+
+ strncpy(ptr, gf->gf_path, strlen(gf->gf_path));
+ ptr += strlen(gf->gf_path);
+ *ptr = '/';
+ }
+
+ CDEBUG(D_INFO, "%s: get path %s "DFID" rec: "LPU64" ln: %u\n",
+ tgt->ltd_exp->exp_obd->obd_name,
+ gf->gf_path, PFID(&gf->gf_fid), gf->gf_recno,
+ gf->gf_linkno);
+
+ if (rc == 0)
+ GOTO(out_fid2path, rc);
+
+ /* sigh, has to go to another MDT to do path building further */
+ if (remote_gf == NULL) {
+ remote_gf_size = sizeof(*remote_gf) + PATH_MAX;
+ OBD_ALLOC(remote_gf, remote_gf_size);
+ if (remote_gf == NULL)
+ GOTO(out_fid2path, rc = -ENOMEM);
+ remote_gf->gf_pathlen = PATH_MAX;
+ }
+
+ if (!fid_is_sane(&gf->gf_fid)) {
+ CERROR("%s: invalid FID "DFID": rc = %d\n",
+ tgt->ltd_exp->exp_obd->obd_name,
+ PFID(&gf->gf_fid), -EINVAL);
+ GOTO(out_fid2path, rc = -EINVAL);
+ }
+
+ tgt = lmv_find_target(lmv, &gf->gf_fid);
+ if (IS_ERR(tgt))
+ GOTO(out_fid2path, rc = -EINVAL);
+
+ remote_gf->gf_fid = gf->gf_fid;
+ remote_gf->gf_recno = -1;
+ remote_gf->gf_linkno = -1;
+ memset(remote_gf->gf_path, 0, remote_gf->gf_pathlen);
+ gf = remote_gf;
+ goto repeat_fid2path;
+
+out_fid2path:
+ if (remote_gf != NULL)
+ OBD_FREE(remote_gf, remote_gf_size);
+ return rc;
+}
+
+static int lmv_hsm_req_count(struct lmv_obd *lmv,
+ const struct hsm_user_request *hur,
+ const struct lmv_tgt_desc *tgt_mds)
+{
+ int i, nr = 0;
+ struct lmv_tgt_desc *curr_tgt;
+
+ /* count how many requests must be sent to the given target */
+ for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
+ curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
+ if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
+ nr++;
+ }
+ return nr;
+}
+
+static void lmv_hsm_req_build(struct lmv_obd *lmv,
+ struct hsm_user_request *hur_in,
+ const struct lmv_tgt_desc *tgt_mds,
+ struct hsm_user_request *hur_out)
+{
+ int i, nr_out;
+ struct lmv_tgt_desc *curr_tgt;
+
+ /* build the hsm_user_request for the given target */
+ hur_out->hur_request = hur_in->hur_request;
+ nr_out = 0;
+ for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
+ curr_tgt = lmv_find_target(lmv,
+ &hur_in->hur_user_item[i].hui_fid);
+ if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
+ hur_out->hur_user_item[nr_out] =
+ hur_in->hur_user_item[i];
+ nr_out++;
+ }
+ }
+ hur_out->hur_request.hr_itemcount = nr_out;
+ memcpy(hur_data(hur_out), hur_data(hur_in),
+ hur_in->hur_request.hr_data_len);
+}
+
+static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
+ struct lustre_kernelcomm *lk, void *uarg)
+{
+ int i, rc = 0;
+
+ /* unregister request (call from llapi_hsm_copytool_fini) */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ /* best effort: try to clean as much as possible
+ * (continue on error) */
+ obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
+ }
+
+ /* Whatever the result, remove copytool from kuc groups.
+ * Unreached coordinators will get EPIPE on next requests
+ * and will unregister automatically.
+ */
+ rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group);
+ return rc;
+}
+
+static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
+ struct lustre_kernelcomm *lk, void *uarg)
+{
+ struct file *filp;
+ int i, j, err;
+ int rc = 0;
+ bool any_set = false;
+
+ /* All or nothing: try to register to all MDS.
+ * In case of failure, unregister from previous MDS,
+ * except if it because of inactive target. */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
+ len, lk, uarg);
+ if (err) {
+ if (lmv->tgts[i]->ltd_active) {
+ /* permanent error */
+ CERROR("error: iocontrol MDC %s on MDT"
+ "idx %d cmd %x: err = %d\n",
+ lmv->tgts[i]->ltd_uuid.uuid,
+ i, cmd, err);
+ rc = err;
+ lk->lk_flags |= LK_FLG_STOP;
+ /* unregister from previous MDS */
+ for (j = 0; j < i; j++)
+ obd_iocontrol(cmd,
+ lmv->tgts[j]->ltd_exp,
+ len, lk, uarg);
+ return rc;
+ }
+ /* else: transient error.
+ * kuc will register to the missing MDT
+ * when it is back */
+ } else {
+ any_set = true;
+ }
+ }
+
+ if (!any_set)
+ /* no registration done: return error */
+ return -ENOTCONN;
+
+ /* at least one registration done, with no failure */
+ filp = fget(lk->lk_wfd);
+ if (filp == NULL) {
+ return -EBADF;
+ }
+ rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, lk->lk_data);
+ if (rc != 0 && filp != NULL)
+ fput(filp);
+ return rc;
+}
+
+
+
+
+static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
+ int len, void *karg, void *uarg)
+{
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obddev->u.lmv;
+ int i = 0;
+ int rc = 0;
+ int set = 0;
+ int count = lmv->desc.ld_tgt_count;
+
+ if (count == 0)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case IOC_OBD_STATFS: {
+ struct obd_ioctl_data *data = karg;
+ struct obd_device *mdc_obd;
+ struct obd_statfs stat_buf = {0};
+ __u32 index;
+
+ memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
+ if ((index >= count))
+ return -ENODEV;
+
+ if (lmv->tgts[index] == NULL ||
+ lmv->tgts[index]->ltd_active == 0)
+ return -ENODATA;
+
+ mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp);
+ if (!mdc_obd)
+ return -EINVAL;
+
+ /* copy UUID */
+ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
+ min((int) data->ioc_plen2,
+ (int) sizeof(struct obd_uuid))))
+ return -EFAULT;
+
+ rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ 0);
+ if (rc)
+ return rc;
+ if (copy_to_user(data->ioc_pbuf1, &stat_buf,
+ min((int) data->ioc_plen1,
+ (int) sizeof(stat_buf))))
+ return -EFAULT;
+ break;
+ }
+ case OBD_IOC_QUOTACTL: {
+ struct if_quotactl *qctl = karg;
+ struct lmv_tgt_desc *tgt = NULL;
+ struct obd_quotactl *oqctl;
+
+ if (qctl->qc_valid == QC_MDTIDX) {
+ if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
+ return -EINVAL;
+
+ tgt = lmv->tgts[qctl->qc_idx];
+ if (tgt == NULL || tgt->ltd_exp == NULL)
+ return -EINVAL;
+ } else if (qctl->qc_valid == QC_UUID) {
+ for (i = 0; i < count; i++) {
+ tgt = lmv->tgts[i];
+ if (tgt == NULL)
+ continue;
+ if (!obd_uuid_equals(&tgt->ltd_uuid,
+ &qctl->obd_uuid))
+ continue;
+
+ if (tgt->ltd_exp == NULL)
+ return -EINVAL;
+
+ break;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ if (i >= count)
+ return -EAGAIN;
+
+ LASSERT(tgt && tgt->ltd_exp);
+ OBD_ALLOC_PTR(oqctl);
+ if (!oqctl)
+ return -ENOMEM;
+
+ QCTL_COPY(oqctl, qctl);
+ rc = obd_quotactl(tgt->ltd_exp, oqctl);
+ if (rc == 0) {
+ QCTL_COPY(qctl, oqctl);
+ qctl->qc_valid = QC_MDTIDX;
+ qctl->obd_uuid = tgt->ltd_uuid;
+ }
+ OBD_FREE_PTR(oqctl);
+ break;
+ }
+ case OBD_IOC_CHANGELOG_SEND:
+ case OBD_IOC_CHANGELOG_CLEAR: {
+ struct ioc_changelog *icc = karg;
+
+ if (icc->icc_mdtindex >= count)
+ return -ENODEV;
+
+ if (lmv->tgts[icc->icc_mdtindex] == NULL ||
+ lmv->tgts[icc->icc_mdtindex]->ltd_exp == NULL ||
+ lmv->tgts[icc->icc_mdtindex]->ltd_active == 0)
+ return -ENODEV;
+ rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp,
+ sizeof(*icc), icc, NULL);
+ break;
+ }
+ case LL_IOC_GET_CONNECT_FLAGS: {
+ if (lmv->tgts[0] == NULL)
+ return -ENODATA;
+ rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg);
+ break;
+ }
+ case OBD_IOC_FID2PATH: {
+ rc = lmv_fid2path(exp, len, karg, uarg);
+ break;
+ }
+ case LL_IOC_HSM_STATE_GET:
+ case LL_IOC_HSM_STATE_SET:
+ case LL_IOC_HSM_ACTION: {
+ struct md_op_data *op_data = karg;
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ if (tgt->ltd_exp == NULL)
+ return -EINVAL;
+
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
+ break;
+ }
+ case LL_IOC_HSM_PROGRESS: {
+ const struct hsm_progress_kernel *hpk = karg;
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, &hpk->hpk_fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
+ break;
+ }
+ case LL_IOC_HSM_REQUEST: {
+ struct hsm_user_request *hur = karg;
+ struct lmv_tgt_desc *tgt;
+ unsigned int reqcount = hur->hur_request.hr_itemcount;
+
+ if (reqcount == 0)
+ return 0;
+
+ /* if the request is about a single fid
+ * or if there is a single MDS, no need to split
+ * the request. */
+ if (reqcount == 1 || count == 1) {
+ tgt = lmv_find_target(lmv,
+ &hur->hur_user_item[0].hui_fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
+ } else {
+ /* split fid list to their respective MDS */
+ for (i = 0; i < count; i++) {
+ unsigned int nr, reqlen;
+ int rc1;
+ struct hsm_user_request *req;
+
+ nr = lmv_hsm_req_count(lmv, hur, lmv->tgts[i]);
+ if (nr == 0) /* nothing for this MDS */
+ continue;
+
+ /* build a request with fids for this MDS */
+ reqlen = offsetof(typeof(*hur),
+ hur_user_item[nr])
+ + hur->hur_request.hr_data_len;
+ OBD_ALLOC_LARGE(req, reqlen);
+ if (req == NULL)
+ return -ENOMEM;
+
+ lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req);
+
+ rc1 = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
+ reqlen, req, uarg);
+ if (rc1 != 0 && rc == 0)
+ rc = rc1;
+ OBD_FREE_LARGE(req, reqlen);
+ }
+ }
+ break;
+ }
+ case LL_IOC_LOV_SWAP_LAYOUTS: {
+ struct md_op_data *op_data = karg;
+ struct lmv_tgt_desc *tgt1, *tgt2;
+
+ tgt1 = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt1))
+ return PTR_ERR(tgt1);
+
+ tgt2 = lmv_find_target(lmv, &op_data->op_fid2);
+ if (IS_ERR(tgt2))
+ return PTR_ERR(tgt2);
+
+ if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL))
+ return -EINVAL;
+
+ /* only files on same MDT can have their layouts swapped */
+ if (tgt1->ltd_idx != tgt2->ltd_idx)
+ return -EPERM;
+
+ rc = obd_iocontrol(cmd, tgt1->ltd_exp, len, karg, uarg);
+ break;
+ }
+ case LL_IOC_HSM_CT_START: {
+ struct lustre_kernelcomm *lk = karg;
+ if (lk->lk_flags & LK_FLG_STOP)
+ rc = lmv_hsm_ct_unregister(lmv, cmd, len, lk, uarg);
+ else
+ rc = lmv_hsm_ct_register(lmv, cmd, len, lk, uarg);
+ break;
+ }
+ default:
+ for (i = 0; i < count; i++) {
+ struct obd_device *mdc_obd;
+ int err;
+
+ if (lmv->tgts[i] == NULL ||
+ lmv->tgts[i]->ltd_exp == NULL)
+ continue;
+ /* ll_umount_begin() sets force flag but for lmv, not
+ * mdc. Let's pass it through */
+ mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp);
+ mdc_obd->obd_force = obddev->obd_force;
+ err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len,
+ karg, uarg);
+ if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
+ return err;
+ } else if (err) {
+ if (lmv->tgts[i]->ltd_active) {
+ CERROR("error: iocontrol MDC %s on MDT"
+ "idx %d cmd %x: err = %d\n",
+ lmv->tgts[i]->ltd_uuid.uuid,
+ i, cmd, err);
+ if (!rc)
+ rc = err;
+ }
+ } else
+ set = 1;
+ }
+ if (!set && !rc)
+ rc = -EIO;
+ }
+ return rc;
+}
+
+#if 0
+static int lmv_all_chars_policy(int count, const char *name,
+ int len)
+{
+ unsigned int c = 0;
+
+ while (len > 0)
+ c += name[--len];
+ c = c % count;
+ return c;
+}
+
+static int lmv_nid_policy(struct lmv_obd *lmv)
+{
+ struct obd_import *imp;
+ __u32 id;
+
+ /*
+ * XXX: To get nid we assume that underlying obd device is mdc.
+ */
+ imp = class_exp2cliimp(lmv->tgts[0].ltd_exp);
+ id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32);
+ return id % lmv->desc.ld_tgt_count;
+}
+
+static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
+ placement_policy_t placement)
+{
+ switch (placement) {
+ case PLACEMENT_CHAR_POLICY:
+ return lmv_all_chars_policy(lmv->desc.ld_tgt_count,
+ op_data->op_name,
+ op_data->op_namelen);
+ case PLACEMENT_NID_POLICY:
+ return lmv_nid_policy(lmv);
+
+ default:
+ break;
+ }
+
+ CERROR("Unsupported placement policy %x\n", placement);
+ return -EINVAL;
+}
+#endif
+
+/**
+ * This is _inode_ placement policy function (not name).
+ */
+static int lmv_placement_policy(struct obd_device *obd,
+ struct md_op_data *op_data,
+ mdsno_t *mds)
+{
+ struct lmv_obd *lmv = &obd->u.lmv;
+
+ LASSERT(mds != NULL);
+
+ if (lmv->desc.ld_tgt_count == 1) {
+ *mds = 0;
+ return 0;
+ }
+
+ /**
+ * If stripe_offset is provided during setdirstripe
+ * (setdirstripe -i xx), xx MDS will be choosen.
+ */
+ if (op_data->op_cli_flags & CLI_SET_MEA) {
+ struct lmv_user_md *lum;
+
+ lum = (struct lmv_user_md *)op_data->op_data;
+ if (lum->lum_type == LMV_STRIPE_TYPE &&
+ lum->lum_stripe_offset != -1) {
+ if (lum->lum_stripe_offset >= lmv->desc.ld_tgt_count) {
+ CERROR("%s: Stripe_offset %d > MDT count %d:"
+ " rc = %d\n", obd->obd_name,
+ lum->lum_stripe_offset,
+ lmv->desc.ld_tgt_count, -ERANGE);
+ return -ERANGE;
+ }
+ *mds = lum->lum_stripe_offset;
+ return 0;
+ }
+ }
+
+ /* Allocate new fid on target according to operation type and parent
+ * home mds. */
+ *mds = op_data->op_mds;
+ return 0;
+}
+
+int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
+ mdsno_t mds)
+{
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ tgt = lmv_get_target(lmv, mds);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ /*
+ * New seq alloc and FLD setup should be atomic. Otherwise we may find
+ * on server that seq in new allocated fid is not yet known.
+ */
+ mutex_lock(&tgt->ltd_fid_mutex);
+
+ if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL)
+ GOTO(out, rc = -ENODEV);
+
+ /*
+ * Asking underlaying tgt layer to allocate new fid.
+ */
+ rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL);
+ if (rc > 0) {
+ LASSERT(fid_is_sane(fid));
+ rc = 0;
+ }
+
+out:
+ mutex_unlock(&tgt->ltd_fid_mutex);
+ return rc;
+}
+
+int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
+ struct md_op_data *op_data)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obd->u.lmv;
+ mdsno_t mds = 0;
+ int rc;
+
+ LASSERT(op_data != NULL);
+ LASSERT(fid != NULL);
+
+ rc = lmv_placement_policy(obd, op_data, &mds);
+ if (rc) {
+ CERROR("Can't get target for allocating fid, "
+ "rc %d\n", rc);
+ return rc;
+ }
+
+ rc = __lmv_fid_alloc(lmv, fid, mds);
+ if (rc) {
+ CERROR("Can't alloc new fid, rc %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lprocfs_static_vars lvars;
+ struct lmv_desc *desc;
+ int rc;
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
+ CERROR("LMV setup requires a descriptor\n");
+ return -EINVAL;
+ }
+
+ desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1);
+ if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
+ CERROR("Lmv descriptor size wrong: %d > %d\n",
+ (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
+ return -EINVAL;
+ }
+
+ OBD_ALLOC(lmv->tgts, sizeof(*lmv->tgts) * 32);
+ if (lmv->tgts == NULL)
+ return -ENOMEM;
+ lmv->tgts_size = 32;
+
+ obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
+ lmv->desc.ld_tgt_count = 0;
+ lmv->desc.ld_active_tgt_count = 0;
+ lmv->max_cookiesize = 0;
+ lmv->max_def_easize = 0;
+ lmv->max_easize = 0;
+ lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
+
+ spin_lock_init(&lmv->lmv_lock);
+ mutex_init(&lmv->init_mutex);
+
+ lprocfs_lmv_init_vars(&lvars);
+
+ lprocfs_obd_setup(obd, lvars.obd_vars);
+#ifdef LPROCFS
+ {
+ rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd",
+ 0444, &lmv_proc_target_fops, obd);
+ if (rc)
+ CWARN("%s: error adding LMV target_obd file: rc = %d\n",
+ obd->obd_name, rc);
+ }
+#endif
+ rc = fld_client_init(&lmv->lmv_fld, obd->obd_name,
+ LUSTRE_CLI_FLD_HASH_DHT);
+ if (rc) {
+ CERROR("Can't init FLD, err %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ return 0;
+
+out:
+ return rc;
+}
+
+static int lmv_cleanup(struct obd_device *obd)
+{
+ struct lmv_obd *lmv = &obd->u.lmv;
+
+ fld_client_fini(&lmv->lmv_fld);
+ if (lmv->tgts != NULL) {
+ int i;
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ if (lmv->tgts[i] == NULL)
+ continue;
+ lmv_del_target(lmv, i);
+ }
+ OBD_FREE(lmv->tgts, sizeof(*lmv->tgts) * lmv->tgts_size);
+ lmv->tgts_size = 0;
+ }
+ return 0;
+}
+
+static int lmv_process_config(struct obd_device *obd, obd_count len, void *buf)
+{
+ struct lustre_cfg *lcfg = buf;
+ struct obd_uuid obd_uuid;
+ int gen;
+ __u32 index;
+ int rc;
+
+ switch (lcfg->lcfg_command) {
+ case LCFG_ADD_MDC:
+ /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID
+ * 2:0 3:1 4:lustre-MDT0000-mdc_UUID */
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid))
+ GOTO(out, rc = -EINVAL);
+
+ obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1));
+
+ if (sscanf(lustre_cfg_buf(lcfg, 2), "%d", &index) != 1)
+ GOTO(out, rc = -EINVAL);
+ if (sscanf(lustre_cfg_buf(lcfg, 3), "%d", &gen) != 1)
+ GOTO(out, rc = -EINVAL);
+ rc = lmv_add_target(obd, &obd_uuid, index, gen);
+ GOTO(out, rc);
+ default:
+ CERROR("Unknown command: %d\n", lcfg->lcfg_command);
+ GOTO(out, rc = -EINVAL);
+ }
+out:
+ return rc;
+}
+
+static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
+ struct obd_statfs *osfs, __u64 max_age, __u32 flags)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct obd_statfs *temp;
+ int rc = 0;
+ int i;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ OBD_ALLOC(temp, sizeof(*temp));
+ if (temp == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ continue;
+
+ rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp,
+ max_age, flags);
+ if (rc) {
+ CERROR("can't stat MDS #%d (%s), error %d\n", i,
+ lmv->tgts[i]->ltd_exp->exp_obd->obd_name,
+ rc);
+ GOTO(out_free_temp, rc);
+ }
+
+ if (i == 0) {
+ *osfs = *temp;
+ /* If the statfs is from mount, it will needs
+ * retrieve necessary information from MDT0.
+ * i.e. mount does not need the merged osfs
+ * from all of MDT.
+ * And also clients can be mounted as long as
+ * MDT0 is in service*/
+ if (flags & OBD_STATFS_FOR_MDT0)
+ GOTO(out_free_temp, rc);
+ } else {
+ osfs->os_bavail += temp->os_bavail;
+ osfs->os_blocks += temp->os_blocks;
+ osfs->os_ffree += temp->os_ffree;
+ osfs->os_files += temp->os_files;
+ }
+ }
+
+out_free_temp:
+ OBD_FREE(temp, sizeof(*temp));
+ return rc;
+}
+
+static int lmv_getstatus(struct obd_export *exp,
+ struct lu_fid *fid,
+ struct obd_capa **pc)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ rc = md_getstatus(lmv->tgts[0]->ltd_exp, fid, pc);
+ return rc;
+}
+
+static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, obd_valid valid, const char *name,
+ const char *input, int input_size, int output_size,
+ int flags, struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_getxattr(tgt->ltd_exp, fid, oc, valid, name, input,
+ input_size, output_size, flags, request);
+
+ return rc;
+}
+
+static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, obd_valid valid, const char *name,
+ const char *input, int input_size, int output_size,
+ int flags, __u32 suppgid,
+ struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_setxattr(tgt->ltd_exp, fid, oc, valid, name, input,
+ input_size, output_size, flags, suppgid,
+ request);
+
+ return rc;
+}
+
+static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ if (op_data->op_flags & MF_GET_MDT_IDX) {
+ op_data->op_mds = tgt->ltd_idx;
+ return 0;
+ }
+
+ rc = md_getattr(tgt->ltd_exp, op_data, request);
+
+ return rc;
+}
+
+static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int i;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
+
+ /*
+ * With DNE every object can have two locks in different namespaces:
+ * lookup lock in space of MDT storing direntry and update/open lock in
+ * space of MDT storing inode.
+ */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ continue;
+ md_null_inode(lmv->tgts[i]->ltd_exp, fid);
+ }
+
+ return 0;
+}
+
+static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
+ ldlm_iterator_t it, void *data)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int i;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
+
+ /*
+ * With DNE every object can have two locks in different namespaces:
+ * lookup lock in space of MDT storing direntry and update/open lock in
+ * space of MDT storing inode.
+ */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ continue;
+ rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+
+static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_open_data *mod, struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ CDEBUG(D_INODE, "CLOSE "DFID"\n", PFID(&op_data->op_fid1));
+ rc = md_close(tgt->ltd_exp, op_data, mod, request);
+ return rc;
+}
+
+struct lmv_tgt_desc
+*lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
+ struct lu_fid *fid)
+{
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, fid);
+ if (IS_ERR(tgt))
+ return tgt;
+
+ op_data->op_mds = tgt->ltd_idx;
+
+ return tgt;
+}
+
+int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
+ const void *data, int datalen, int mode, __u32 uid,
+ __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
+ struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ if (!lmv->desc.ld_active_tgt_count)
+ return -EIO;
+
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_INODE, "CREATE '%*s' on "DFID" -> mds #%x\n",
+ op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
+ op_data->op_mds);
+
+ op_data->op_flags |= MF_MDC_CANCEL_FID1;
+ rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid,
+ cap_effective, rdev, request);
+
+ if (rc == 0) {
+ if (*request == NULL)
+ return rc;
+ CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
+ }
+ return rc;
+}
+
+static int lmv_done_writing(struct obd_export *exp,
+ struct md_op_data *op_data,
+ struct md_open_data *mod)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_done_writing(tgt->ltd_exp, op_data, mod);
+ return rc;
+}
+
+static int
+lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
+ struct lookup_intent *it, struct md_op_data *op_data,
+ struct lustre_handle *lockh, void *lmm, int lmmsize,
+ int extra_lock_flags)
+{
+ struct ptlrpc_request *req = it->d.lustre.it_data;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lustre_handle plock;
+ struct lmv_tgt_desc *tgt;
+ struct md_op_data *rdata;
+ struct lu_fid fid1;
+ struct mdt_body *body;
+ int rc = 0;
+ int pmode;
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body != NULL);
+
+ if (!(body->valid & OBD_MD_MDS))
+ return 0;
+
+ CDEBUG(D_INODE, "REMOTE_ENQUEUE '%s' on "DFID" -> "DFID"\n",
+ LL_IT2STR(it), PFID(&op_data->op_fid1), PFID(&body->fid1));
+
+ /*
+ * We got LOOKUP lock, but we really need attrs.
+ */
+ pmode = it->d.lustre.it_lock_mode;
+ LASSERT(pmode != 0);
+ memcpy(&plock, lockh, sizeof(plock));
+ it->d.lustre.it_lock_mode = 0;
+ it->d.lustre.it_data = NULL;
+ fid1 = body->fid1;
+
+ it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE;
+ ptlrpc_req_finished(req);
+
+ tgt = lmv_find_target(lmv, &fid1);
+ if (IS_ERR(tgt))
+ GOTO(out, rc = PTR_ERR(tgt));
+
+ OBD_ALLOC_PTR(rdata);
+ if (rdata == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ rdata->op_fid1 = fid1;
+ rdata->op_bias = MDS_CROSS_REF;
+
+ rc = md_enqueue(tgt->ltd_exp, einfo, it, rdata, lockh,
+ lmm, lmmsize, NULL, extra_lock_flags);
+ OBD_FREE_PTR(rdata);
+out:
+ ldlm_lock_decref(&plock, pmode);
+ return rc;
+}
+
+static int
+lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
+ struct lookup_intent *it, struct md_op_data *op_data,
+ struct lustre_handle *lockh, void *lmm, int lmmsize,
+ struct ptlrpc_request **req, __u64 extra_lock_flags)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n",
+ LL_IT2STR(it), PFID(&op_data->op_fid1));
+
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n",
+ LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
+
+ rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data, lockh,
+ lmm, lmmsize, req, extra_lock_flags);
+
+ if (rc == 0 && it && it->it_op == IT_OPEN) {
+ rc = lmv_enqueue_remote(exp, einfo, it, op_data, lockh,
+ lmm, lmmsize, extra_lock_flags);
+ }
+ return rc;
+}
+
+static int
+lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req = NULL;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ struct mdt_body *body;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" -> mds #%d\n",
+ op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
+ tgt->ltd_idx);
+
+ rc = md_getattr_name(tgt->ltd_exp, op_data, request);
+ if (rc != 0)
+ return rc;
+
+ body = req_capsule_server_get(&(*request)->rq_pill,
+ &RMF_MDT_BODY);
+ LASSERT(body != NULL);
+
+ if (body->valid & OBD_MD_MDS) {
+ struct lu_fid rid = body->fid1;
+ CDEBUG(D_INODE, "Request attrs for "DFID"\n",
+ PFID(&rid));
+
+ tgt = lmv_find_target(lmv, &rid);
+ if (IS_ERR(tgt)) {
+ ptlrpc_req_finished(*request);
+ return PTR_ERR(tgt);
+ }
+
+ op_data->op_fid1 = rid;
+ op_data->op_valid |= OBD_MD_FLCROSSREF;
+ op_data->op_namelen = 0;
+ op_data->op_name = NULL;
+ rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
+ ptlrpc_req_finished(*request);
+ *request = req;
+ }
+
+ return rc;
+}
+
+#define md_op_data_fid(op_data, fl) \
+ (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \
+ fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \
+ fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \
+ fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
+ NULL)
+
+static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
+ int op_tgt, ldlm_mode_t mode, int bits, int flag)
+{
+ struct lu_fid *fid = md_op_data_fid(op_data, flag);
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ ldlm_policy_data_t policy = {{0}};
+ int rc = 0;
+
+ if (!fid_is_sane(fid))
+ return 0;
+
+ tgt = lmv_find_target(lmv, fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ if (tgt->ltd_idx != op_tgt) {
+ CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
+ policy.l_inodebits.bits = bits;
+ rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
+ mode, LCF_ASYNC, NULL);
+ } else {
+ CDEBUG(D_INODE,
+ "EARLY_CANCEL skip operation target %d on "DFID"\n",
+ op_tgt, PFID(fid));
+ op_data->op_flags |= flag;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/*
+ * llite passes fid of an target inode in op_data->op_fid1 and id of directory in
+ * op_data->op_fid2
+ */
+static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ LASSERT(op_data->op_namelen != 0);
+
+ CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n",
+ PFID(&op_data->op_fid2), op_data->op_namelen,
+ op_data->op_name, PFID(&op_data->op_fid1));
+
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
+ op_data->op_cap = cfs_curproc_cap_pack();
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ /*
+ * Cancel UPDATE lock on child (fid1).
+ */
+ op_data->op_flags |= MF_MDC_CANCEL_FID2;
+ rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
+ MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
+ if (rc != 0)
+ return rc;
+
+ rc = md_link(tgt->ltd_exp, op_data, request);
+
+ return rc;
+}
+
+static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
+ const char *old, int oldlen, const char *new, int newlen,
+ struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *src_tgt;
+ struct lmv_tgt_desc *tgt_tgt;
+ int rc;
+
+ LASSERT(oldlen != 0);
+
+ CDEBUG(D_INODE, "RENAME %*s in "DFID" to %*s in "DFID"\n",
+ oldlen, old, PFID(&op_data->op_fid1),
+ newlen, new, PFID(&op_data->op_fid2));
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
+ op_data->op_cap = cfs_curproc_cap_pack();
+ src_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+ if (IS_ERR(src_tgt))
+ return PTR_ERR(src_tgt);
+
+ tgt_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
+ if (IS_ERR(tgt_tgt))
+ return PTR_ERR(tgt_tgt);
+ /*
+ * LOOKUP lock on src child (fid3) should also be cancelled for
+ * src_tgt in mdc_rename.
+ */
+ op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
+
+ /*
+ * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its
+ * own target.
+ */
+ rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
+ LCK_EX, MDS_INODELOCK_UPDATE,
+ MF_MDC_CANCEL_FID2);
+
+ /*
+ * Cancel LOOKUP locks on tgt child (fid4) for parent tgt_tgt.
+ */
+ if (rc == 0) {
+ rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
+ LCK_EX, MDS_INODELOCK_LOOKUP,
+ MF_MDC_CANCEL_FID4);
+ }
+
+ /*
+ * Cancel all the locks on tgt child (fid4).
+ */
+ if (rc == 0)
+ rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
+ LCK_EX, MDS_INODELOCK_FULL,
+ MF_MDC_CANCEL_FID4);
+
+ if (rc == 0)
+ rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
+ new, newlen, request);
+ return rc;
+}
+
+static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
+ void *ea, int ealen, void *ea2, int ea2len,
+ struct ptlrpc_request **request,
+ struct md_open_data **mod)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc = 0;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x\n",
+ PFID(&op_data->op_fid1), op_data->op_attr.ia_valid);
+
+ op_data->op_flags |= MF_MDC_CANCEL_FID1;
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2,
+ ea2len, request, mod);
+
+ return rc;
+}
+
+static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_sync(tgt->ltd_exp, fid, oc, request);
+ return rc;
+}
+
+/*
+ * Adjust a set of pages, each page containing an array of lu_dirpages,
+ * so that each page can be used as a single logical lu_dirpage.
+ *
+ * A lu_dirpage is laid out as follows, where s = ldp_hash_start,
+ * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a
+ * struct lu_dirent. It has size up to LU_PAGE_SIZE. The ldp_hash_end
+ * value is used as a cookie to request the next lu_dirpage in a
+ * directory listing that spans multiple pages (two in this example):
+ * ________
+ * | |
+ * .|--------v------- -----.
+ * |s|e|f|p|ent|ent| ... |ent|
+ * '--|-------------- -----' Each CFS_PAGE contains a single
+ * '------. lu_dirpage.
+ * .---------v------- -----.
+ * |s|e|f|p|ent| 0 | ... | 0 |
+ * '----------------- -----'
+ *
+ * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
+ * larger than LU_PAGE_SIZE, a single host page may contain multiple
+ * lu_dirpages. After reading the lu_dirpages from the MDS, the
+ * ldp_hash_end of the first lu_dirpage refers to the one immediately
+ * after it in the same CFS_PAGE (arrows simplified for brevity, but
+ * in general e0==s1, e1==s2, etc.):
+ *
+ * .-------------------- -----.
+ * |s0|e0|f0|p|ent|ent| ... |ent|
+ * |---v---------------- -----|
+ * |s1|e1|f1|p|ent|ent| ... |ent|
+ * |---v---------------- -----| Here, each CFS_PAGE contains
+ * ... multiple lu_dirpages.
+ * |---v---------------- -----|
+ * |s'|e'|f'|p|ent|ent| ... |ent|
+ * '---|---------------- -----'
+ * v
+ * .----------------------------.
+ * | next CFS_PAGE |
+ *
+ * This structure is transformed into a single logical lu_dirpage as follows:
+ *
+ * - Replace e0 with e' so the request for the next lu_dirpage gets the page
+ * labeled 'next CFS_PAGE'.
+ *
+ * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
+ * a hash collision with the next page exists.
+ *
+ * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
+ * to the first entry of the next lu_dirpage.
+ */
+#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
+static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
+{
+ int i;
+
+ for (i = 0; i < ncfspgs; i++) {
+ struct lu_dirpage *dp = kmap(pages[i]);
+ struct lu_dirpage *first = dp;
+ struct lu_dirent *end_dirent = NULL;
+ struct lu_dirent *ent;
+ __u64 hash_end = dp->ldp_hash_end;
+ __u32 flags = dp->ldp_flags;
+
+ while (--nlupgs > 0) {
+ ent = lu_dirent_start(dp);
+ for (end_dirent = ent; ent != NULL;
+ end_dirent = ent, ent = lu_dirent_next(ent));
+
+ /* Advance dp to next lu_dirpage. */
+ dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
+
+ /* Check if we've reached the end of the CFS_PAGE. */
+ if (!((unsigned long)dp & ~CFS_PAGE_MASK))
+ break;
+
+ /* Save the hash and flags of this lu_dirpage. */
+ hash_end = dp->ldp_hash_end;
+ flags = dp->ldp_flags;
+
+ /* Check if lu_dirpage contains no entries. */
+ if (!end_dirent)
+ break;
+
+ /* Enlarge the end entry lde_reclen from 0 to
+ * first entry of next lu_dirpage. */
+ LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0);
+ end_dirent->lde_reclen =
+ cpu_to_le16((char *)(dp->ldp_entries) -
+ (char *)end_dirent);
+ }
+
+ first->ldp_hash_end = hash_end;
+ first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
+ first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
+
+ kunmap(pages[i]);
+ }
+ LASSERTF(nlupgs == 0, "left = %d", nlupgs);
+}
+#else
+#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
+#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
+
+static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
+ struct page **pages, struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ __u64 offset = op_data->op_offset;
+ int rc;
+ int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
+ int nlupgs; /* pages read in LU_PAGE_SIZE */
+ struct lmv_tgt_desc *tgt;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_INODE, "READPAGE at "LPX64" from "DFID"\n",
+ offset, PFID(&op_data->op_fid1));
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_readpage(tgt->ltd_exp, op_data, pages, request);
+ if (rc != 0)
+ return rc;
+
+ ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1)
+ >> PAGE_CACHE_SHIFT;
+ nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
+ LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
+ LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
+
+ CDEBUG(D_INODE, "read %d(%d)/%d pages\n", ncfspgs, nlupgs,
+ op_data->op_npages);
+
+ lmv_adjust_dirpages(pages, ncfspgs, nlupgs);
+
+ return rc;
+}
+
+static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt = NULL;
+ struct mdt_body *body;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+retry:
+ /* Send unlink requests to the MDT where the child is located */
+ if (likely(!fid_is_zero(&op_data->op_fid2)))
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
+ else
+ tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
+ op_data->op_cap = cfs_curproc_cap_pack();
+
+ /*
+ * If child's fid is given, cancel unused locks for it if it is from
+ * another export than parent.
+ *
+ * LOOKUP lock for child (fid3) should also be cancelled on parent
+ * tgt_tgt in mdc_unlink().
+ */
+ op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
+
+ /*
+ * Cancel FULL locks on child (fid3).
+ */
+ rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
+ MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
+
+ if (rc != 0)
+ return rc;
+
+ CDEBUG(D_INODE, "unlink with fid="DFID"/"DFID" -> mds #%d\n",
+ PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), tgt->ltd_idx);
+
+ rc = md_unlink(tgt->ltd_exp, op_data, request);
+ if (rc != 0 && rc != -EREMOTE)
+ return rc;
+
+ body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ return -EPROTO;
+
+ /* Not cross-ref case, just get out of here. */
+ if (likely(!(body->valid & OBD_MD_MDS)))
+ return 0;
+
+ CDEBUG(D_INODE, "%s: try unlink to another MDT for "DFID"\n",
+ exp->exp_obd->obd_name, PFID(&body->fid1));
+
+ /* This is a remote object, try remote MDT, Note: it may
+ * try more than 1 time here, Considering following case
+ * /mnt/lustre is root on MDT0, remote1 is on MDT1
+ * 1. Initially A does not know where remote1 is, it send
+ * unlink RPC to MDT0, MDT0 return -EREMOTE, it will
+ * resend unlink RPC to MDT1 (retry 1st time).
+ *
+ * 2. During the unlink RPC in flight,
+ * client B mv /mnt/lustre/remote1 /mnt/lustre/remote2
+ * and create new remote1, but on MDT0
+ *
+ * 3. MDT1 get unlink RPC(from A), then do remote lock on
+ * /mnt/lustre, then lookup get fid of remote1, and find
+ * it is remote dir again, and replay -EREMOTE again.
+ *
+ * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times).
+ *
+ * In theory, it might try unlimited time here, but it should
+ * be very rare case. */
+ op_data->op_fid2 = body->fid1;
+ ptlrpc_req_finished(*request);
+ *request = NULL;
+
+ goto retry;
+}
+
+static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+{
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int rc = 0;
+
+ switch (stage) {
+ case OBD_CLEANUP_EARLY:
+ /* XXX: here should be calling obd_precleanup() down to
+ * stack. */
+ break;
+ case OBD_CLEANUP_EXPORTS:
+ fld_client_proc_fini(&lmv->lmv_fld);
+ lprocfs_obd_cleanup(obd);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
+static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
+ __u32 keylen, void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
+{
+ struct obd_device *obd;
+ struct lmv_obd *lmv;
+ int rc = 0;
+
+ obd = class_exp2obd(exp);
+ if (obd == NULL) {
+ CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
+ exp->exp_handle.h_cookie);
+ return -EINVAL;
+ }
+
+ lmv = &obd->u.lmv;
+ if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
+ struct lmv_tgt_desc *tgt;
+ int i;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ LASSERT(*vallen == sizeof(__u32));
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ tgt = lmv->tgts[i];
+ /*
+ * All tgts should be connected when this gets called.
+ */
+ if (tgt == NULL || tgt->ltd_exp == NULL)
+ continue;
+
+ if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
+ vallen, val, NULL))
+ return 0;
+ }
+ return -EINVAL;
+ } else if (KEY_IS(KEY_MAX_EASIZE) || KEY_IS(KEY_CONN_DATA)) {
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ /*
+ * Forwarding this request to first MDS, it should know LOV
+ * desc.
+ */
+ rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key,
+ vallen, val, NULL);
+ if (!rc && KEY_IS(KEY_CONN_DATA))
+ exp->exp_connect_data = *(struct obd_connect_data *)val;
+ return rc;
+ } else if (KEY_IS(KEY_TGT_COUNT)) {
+ *((int *)val) = lmv->desc.ld_tgt_count;
+ return 0;
+ }
+
+ CDEBUG(D_IOCTL, "Invalid key\n");
+ return -EINVAL;
+}
+
+int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
+ obd_count keylen, void *key, obd_count vallen,
+ void *val, struct ptlrpc_request_set *set)
+{
+ struct lmv_tgt_desc *tgt;
+ struct obd_device *obd;
+ struct lmv_obd *lmv;
+ int rc = 0;
+
+ obd = class_exp2obd(exp);
+ if (obd == NULL) {
+ CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
+ exp->exp_handle.h_cookie);
+ return -EINVAL;
+ }
+ lmv = &obd->u.lmv;
+
+ if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX)) {
+ int i, err = 0;
+
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ tgt = lmv->tgts[i];
+
+ if (tgt == NULL || tgt->ltd_exp == NULL)
+ continue;
+
+ err = obd_set_info_async(env, tgt->ltd_exp,
+ keylen, key, vallen, val, set);
+ if (err && rc == 0)
+ rc = err;
+ }
+
+ return rc;
+ }
+
+ return -EINVAL;
+}
+
+int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
+ struct lov_stripe_md *lsm)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_stripe_md *meap;
+ struct lmv_stripe_md *lsmp;
+ int mea_size;
+ int i;
+
+ mea_size = lmv_get_easize(lmv);
+ if (!lmmp)
+ return mea_size;
+
+ if (*lmmp && !lsm) {
+ OBD_FREE_LARGE(*lmmp, mea_size);
+ *lmmp = NULL;
+ return 0;
+ }
+
+ if (*lmmp == NULL) {
+ OBD_ALLOC_LARGE(*lmmp, mea_size);
+ if (*lmmp == NULL)
+ return -ENOMEM;
+ }
+
+ if (!lsm)
+ return mea_size;
+
+ lsmp = (struct lmv_stripe_md *)lsm;
+ meap = (struct lmv_stripe_md *)*lmmp;
+
+ if (lsmp->mea_magic != MEA_MAGIC_LAST_CHAR &&
+ lsmp->mea_magic != MEA_MAGIC_ALL_CHARS)
+ return -EINVAL;
+
+ meap->mea_magic = cpu_to_le32(lsmp->mea_magic);
+ meap->mea_count = cpu_to_le32(lsmp->mea_count);
+ meap->mea_master = cpu_to_le32(lsmp->mea_master);
+
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ meap->mea_ids[i] = lsmp->mea_ids[i];
+ fid_cpu_to_le(&meap->mea_ids[i], &lsmp->mea_ids[i]);
+ }
+
+ return mea_size;
+}
+
+int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
+ struct lov_mds_md *lmm, int lmm_size)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lmv_stripe_md **tmea = (struct lmv_stripe_md **)lsmp;
+ struct lmv_stripe_md *mea = (struct lmv_stripe_md *)lmm;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int mea_size;
+ int i;
+ __u32 magic;
+
+ mea_size = lmv_get_easize(lmv);
+ if (lsmp == NULL)
+ return mea_size;
+
+ if (*lsmp != NULL && lmm == NULL) {
+ OBD_FREE_LARGE(*tmea, mea_size);
+ *lsmp = NULL;
+ return 0;
+ }
+
+ LASSERT(mea_size == lmm_size);
+
+ OBD_ALLOC_LARGE(*tmea, mea_size);
+ if (*tmea == NULL)
+ return -ENOMEM;
+
+ if (!lmm)
+ return mea_size;
+
+ if (mea->mea_magic == MEA_MAGIC_LAST_CHAR ||
+ mea->mea_magic == MEA_MAGIC_ALL_CHARS ||
+ mea->mea_magic == MEA_MAGIC_HASH_SEGMENT)
+ {
+ magic = le32_to_cpu(mea->mea_magic);
+ } else {
+ /*
+ * Old mea is not handled here.
+ */
+ CERROR("Old not supportable EA is found\n");
+ LBUG();
+ }
+
+ (*tmea)->mea_magic = magic;
+ (*tmea)->mea_count = le32_to_cpu(mea->mea_count);
+ (*tmea)->mea_master = le32_to_cpu(mea->mea_master);
+
+ for (i = 0; i < (*tmea)->mea_count; i++) {
+ (*tmea)->mea_ids[i] = mea->mea_ids[i];
+ fid_le_to_cpu(&(*tmea)->mea_ids[i], &(*tmea)->mea_ids[i]);
+ }
+ return mea_size;
+}
+
+static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
+ ldlm_policy_data_t *policy, ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags, void *opaque)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ int rc = 0;
+ int err;
+ int i;
+
+ LASSERT(fid != NULL);
+
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL ||
+ lmv->tgts[i]->ltd_active == 0)
+ continue;
+
+ err = md_cancel_unused(lmv->tgts[i]->ltd_exp, fid,
+ policy, mode, flags, opaque);
+ if (!rc)
+ rc = err;
+ }
+ return rc;
+}
+
+int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
+ __u64 *bits)
+{
+ struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ int rc;
+
+ rc = md_set_lock_data(lmv->tgts[0]->ltd_exp, lockh, data, bits);
+ return rc;
+}
+
+ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid, ldlm_type_t type,
+ ldlm_policy_data_t *policy, ldlm_mode_t mode,
+ struct lustre_handle *lockh)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ ldlm_mode_t rc;
+ int i;
+
+ CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
+
+ /*
+ * With CMD every object can have two locks in different namespaces:
+ * lookup lock in space of mds storing direntry and update/open lock in
+ * space of mds storing inode. Thus we check all targets, not only that
+ * one fid was created in.
+ */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ if (lmv->tgts[i] == NULL ||
+ lmv->tgts[i]->ltd_exp == NULL ||
+ lmv->tgts[i]->ltd_active == 0)
+ continue;
+
+ rc = md_lock_match(lmv->tgts[i]->ltd_exp, flags, fid,
+ type, policy, mode, lockh);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
+ struct obd_export *dt_exp, struct obd_export *md_exp,
+ struct lustre_md *md)
+{
+ struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+
+ return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md);
+}
+
+int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+
+ if (md->mea)
+ obd_free_memmd(exp, (void *)&md->mea);
+ return md_free_lustre_md(lmv->tgts[0]->ltd_exp, md);
+}
+
+int lmv_set_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och,
+ struct ptlrpc_request *open_req)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, &och->och_fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ return md_set_open_replay_data(tgt->ltd_exp, och, open_req);
+}
+
+int lmv_clear_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, &och->och_fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ return md_clear_open_replay_data(tgt->ltd_exp, och);
+}
+
+static int lmv_get_remote_perm(struct obd_export *exp,
+ const struct lu_fid *fid,
+ struct obd_capa *oc, __u32 suppgid,
+ struct ptlrpc_request **request)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_get_remote_perm(tgt->ltd_exp, fid, oc, suppgid, request);
+ return rc;
+}
+
+static int lmv_renew_capa(struct obd_export *exp, struct obd_capa *oc,
+ renew_capa_cb_t cb)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, &oc->c_capa.lc_fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_renew_capa(tgt->ltd_exp, oc, cb);
+ return rc;
+}
+
+int lmv_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
+ const struct req_msg_field *field, struct obd_capa **oc)
+{
+ struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+
+ return md_unpack_capa(lmv->tgts[0]->ltd_exp, req, field, oc);
+}
+
+int lmv_intent_getattr_async(struct obd_export *exp,
+ struct md_enqueue_info *minfo,
+ struct ldlm_enqueue_info *einfo)
+{
+ struct md_op_data *op_data = &minfo->mi_data;
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt = NULL;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_intent_getattr_async(tgt->ltd_exp, minfo, einfo);
+ return rc;
+}
+
+int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
+ struct lu_fid *fid, __u64 *bits)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int rc;
+
+ rc = lmv_check_connect(obd);
+ if (rc)
+ return rc;
+
+ tgt = lmv_find_target(lmv, fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
+ return rc;
+}
+
+/**
+ * For lmv, only need to send request to master MDT, and the master MDT will
+ * process with other slave MDTs. The only exception is Q_GETOQUOTA for which
+ * we directly fetch data from the slave MDTs.
+ */
+int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
+ int rc = 0, i;
+ __u64 curspace, curinodes;
+
+ if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
+ CERROR("master lmv inactive\n");
+ return -EIO;
+ }
+
+ if (oqctl->qc_cmd != Q_GETOQUOTA) {
+ rc = obd_quotactl(tgt->ltd_exp, oqctl);
+ return rc;
+ }
+
+ curspace = curinodes = 0;
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ int err;
+ tgt = lmv->tgts[i];
+
+ if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0)
+ continue;
+ if (!tgt->ltd_active) {
+ CDEBUG(D_HA, "mdt %d is inactive.\n", i);
+ continue;
+ }
+
+ err = obd_quotactl(tgt->ltd_exp, oqctl);
+ if (err) {
+ CERROR("getquota on mdt %d failed. %d\n", i, err);
+ if (!rc)
+ rc = err;
+ } else {
+ curspace += oqctl->qc_dqblk.dqb_curspace;
+ curinodes += oqctl->qc_dqblk.dqb_curinodes;
+ }
+ }
+ oqctl->qc_dqblk.dqb_curspace = curspace;
+ oqctl->qc_dqblk.dqb_curinodes = curinodes;
+
+ return rc;
+}
+
+int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt;
+ int i, rc = 0;
+
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ int err;
+ tgt = lmv->tgts[i];
+ if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) {
+ CERROR("lmv idx %d inactive\n", i);
+ return -EIO;
+ }
+
+ err = obd_quotacheck(tgt->ltd_exp, oqctl);
+ if (err && !rc)
+ rc = err;
+ }
+
+ return rc;
+}
+
+struct obd_ops lmv_obd_ops = {
+ .o_owner = THIS_MODULE,
+ .o_setup = lmv_setup,
+ .o_cleanup = lmv_cleanup,
+ .o_precleanup = lmv_precleanup,
+ .o_process_config = lmv_process_config,
+ .o_connect = lmv_connect,
+ .o_disconnect = lmv_disconnect,
+ .o_statfs = lmv_statfs,
+ .o_get_info = lmv_get_info,
+ .o_set_info_async = lmv_set_info_async,
+ .o_packmd = lmv_packmd,
+ .o_unpackmd = lmv_unpackmd,
+ .o_notify = lmv_notify,
+ .o_get_uuid = lmv_get_uuid,
+ .o_iocontrol = lmv_iocontrol,
+ .o_quotacheck = lmv_quotacheck,
+ .o_quotactl = lmv_quotactl
+};
+
+struct md_ops lmv_md_ops = {
+ .m_getstatus = lmv_getstatus,
+ .m_null_inode = lmv_null_inode,
+ .m_find_cbdata = lmv_find_cbdata,
+ .m_close = lmv_close,
+ .m_create = lmv_create,
+ .m_done_writing = lmv_done_writing,
+ .m_enqueue = lmv_enqueue,
+ .m_getattr = lmv_getattr,
+ .m_getxattr = lmv_getxattr,
+ .m_getattr_name = lmv_getattr_name,
+ .m_intent_lock = lmv_intent_lock,
+ .m_link = lmv_link,
+ .m_rename = lmv_rename,
+ .m_setattr = lmv_setattr,
+ .m_setxattr = lmv_setxattr,
+ .m_sync = lmv_sync,
+ .m_readpage = lmv_readpage,
+ .m_unlink = lmv_unlink,
+ .m_init_ea_size = lmv_init_ea_size,
+ .m_cancel_unused = lmv_cancel_unused,
+ .m_set_lock_data = lmv_set_lock_data,
+ .m_lock_match = lmv_lock_match,
+ .m_get_lustre_md = lmv_get_lustre_md,
+ .m_free_lustre_md = lmv_free_lustre_md,
+ .m_set_open_replay_data = lmv_set_open_replay_data,
+ .m_clear_open_replay_data = lmv_clear_open_replay_data,
+ .m_renew_capa = lmv_renew_capa,
+ .m_unpack_capa = lmv_unpack_capa,
+ .m_get_remote_perm = lmv_get_remote_perm,
+ .m_intent_getattr_async = lmv_intent_getattr_async,
+ .m_revalidate_lock = lmv_revalidate_lock
+};
+
+int __init lmv_init(void)
+{
+ struct lprocfs_static_vars lvars;
+ int rc;
+
+ lprocfs_lmv_init_vars(&lvars);
+
+ rc = class_register_type(&lmv_obd_ops, &lmv_md_ops,
+ lvars.module_vars, LUSTRE_LMV_NAME, NULL);
+ return rc;
+}
+
+static void lmv_exit(void)
+{
+ class_unregister_type(LUSTRE_LMV_NAME);
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Logical Metadata Volume OBD driver");
+MODULE_LICENSE("GPL");
+
+module_init(lmv_init);
+module_exit(lmv_exit);
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
new file mode 100644
index 0000000..edb5a3a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -0,0 +1,234 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/seq_file.h>
+#include <asm/statfs.h>
+#include <lprocfs_status.h>
+#include <obd_class.h>
+
+#ifndef LPROCFS
+static struct lprocfs_vars lprocfs_module_vars[] = { {0} };
+static struct lprocfs_vars lprocfs_obd_vars[] = { {0} };
+#else
+static int lmv_numobd_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lmv_desc *desc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lmv.desc;
+ return seq_printf(m, "%u\n", desc->ld_tgt_count);
+}
+LPROC_SEQ_FOPS_RO(lmv_numobd);
+
+static const char *placement_name[] = {
+ [PLACEMENT_CHAR_POLICY] = "CHAR",
+ [PLACEMENT_NID_POLICY] = "NID",
+ [PLACEMENT_INVAL_POLICY] = "INVAL"
+};
+
+static placement_policy_t placement_name2policy(char *name, int len)
+{
+ int i;
+
+ for (i = 0; i < PLACEMENT_MAX_POLICY; i++) {
+ if (!strncmp(placement_name[i], name, len))
+ return i;
+ }
+ return PLACEMENT_INVAL_POLICY;
+}
+
+static const char *placement_policy2name(placement_policy_t placement)
+{
+ LASSERT(placement < PLACEMENT_MAX_POLICY);
+ return placement_name[placement];
+}
+
+static int lmv_placement_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lmv_obd *lmv;
+
+ LASSERT(dev != NULL);
+ lmv = &dev->u.lmv;
+ return seq_printf(m, "%s\n", placement_policy2name(lmv->lmv_placement));
+}
+
+#define MAX_POLICY_STRING_SIZE 64
+
+static ssize_t lmv_placement_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ char dummy[MAX_POLICY_STRING_SIZE + 1];
+ int len = count;
+ placement_policy_t policy;
+ struct lmv_obd *lmv;
+
+ if (copy_from_user(dummy, buffer, MAX_POLICY_STRING_SIZE))
+ return -EFAULT;
+
+ LASSERT(dev != NULL);
+ lmv = &dev->u.lmv;
+
+ if (len > MAX_POLICY_STRING_SIZE)
+ len = MAX_POLICY_STRING_SIZE;
+
+ if (dummy[len - 1] == '\n')
+ len--;
+ dummy[len] = '\0';
+
+ policy = placement_name2policy(dummy, len);
+ if (policy != PLACEMENT_INVAL_POLICY) {
+ spin_lock(&lmv->lmv_lock);
+ lmv->lmv_placement = policy;
+ spin_unlock(&lmv->lmv_lock);
+ } else {
+ CERROR("Invalid placement policy \"%s\"!\n", dummy);
+ return -EINVAL;
+ }
+ return count;
+}
+LPROC_SEQ_FOPS(lmv_placement);
+
+static int lmv_activeobd_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lmv_desc *desc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lmv.desc;
+ return seq_printf(m, "%u\n", desc->ld_active_tgt_count);
+}
+LPROC_SEQ_FOPS_RO(lmv_activeobd);
+
+static int lmv_desc_uuid_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lmv_obd *lmv;
+
+ LASSERT(dev != NULL);
+ lmv = &dev->u.lmv;
+ return seq_printf(m, "%s\n", lmv->desc.ld_uuid.uuid);
+}
+LPROC_SEQ_FOPS_RO(lmv_desc_uuid);
+
+static void *lmv_tgt_seq_start(struct seq_file *p, loff_t *pos)
+{
+ struct obd_device *dev = p->private;
+ struct lmv_obd *lmv = &dev->u.lmv;
+ return (*pos >= lmv->desc.ld_tgt_count) ? NULL : lmv->tgts[*pos];
+}
+
+static void lmv_tgt_seq_stop(struct seq_file *p, void *v)
+{
+ return;
+}
+
+static void *lmv_tgt_seq_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ struct obd_device *dev = p->private;
+ struct lmv_obd *lmv = &dev->u.lmv;
+ ++*pos;
+ return (*pos >= lmv->desc.ld_tgt_count) ? NULL : lmv->tgts[*pos];
+}
+
+static int lmv_tgt_seq_show(struct seq_file *p, void *v)
+{
+ struct lmv_tgt_desc *tgt = v;
+
+ if (tgt == NULL)
+ return 0;
+ return seq_printf(p, "%d: %s %sACTIVE\n", tgt->ltd_idx,
+ tgt->ltd_uuid.uuid, tgt->ltd_active ? "" : "IN");
+}
+
+struct seq_operations lmv_tgt_sops = {
+ .start = lmv_tgt_seq_start,
+ .stop = lmv_tgt_seq_stop,
+ .next = lmv_tgt_seq_next,
+ .show = lmv_tgt_seq_show,
+};
+
+static int lmv_target_seq_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ int rc;
+
+ rc = seq_open(file, &lmv_tgt_sops);
+ if (rc)
+ return rc;
+
+ seq = file->private_data;
+ seq->private = PDE_DATA(inode);
+
+ return 0;
+}
+
+LPROC_SEQ_FOPS_RO_TYPE(lmv, uuid);
+
+struct lprocfs_vars lprocfs_lmv_obd_vars[] = {
+ { "numobd", &lmv_numobd_fops, 0, 0 },
+ { "placement", &lmv_placement_fops, 0, 0 },
+ { "activeobd", &lmv_activeobd_fops, 0, 0 },
+ { "uuid", &lmv_uuid_fops, 0, 0 },
+ { "desc_uuid", &lmv_desc_uuid_fops, 0, 0 },
+ { 0 }
+};
+
+LPROC_SEQ_FOPS_RO_TYPE(lmv, numrefs);
+
+static struct lprocfs_vars lprocfs_lmv_module_vars[] = {
+ { "num_refs", &lmv_numrefs_fops, 0, 0 },
+ { 0 }
+};
+
+struct file_operations lmv_proc_target_fops = {
+ .owner = THIS_MODULE,
+ .open = lmv_target_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#endif /* LPROCFS */
+void lprocfs_lmv_init_vars(struct lprocfs_static_vars *lvars)
+{
+ lvars->module_vars = lprocfs_lmv_module_vars;
+ lvars->obd_vars = lprocfs_lmv_obd_vars;
+}
diff --git a/drivers/staging/lustre/lustre/lov/Makefile b/drivers/staging/lustre/lustre/lov/Makefile
new file mode 100644
index 0000000..67eaec2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_LUSTRE_FS) += lov.o
+lov-y := lov_log.o lov_obd.o lov_pack.o lproc_lov.o lov_offset.o lov_merge.o \
+ lov_request.o lov_ea.o lov_dev.o lov_object.o lov_page.o \
+ lov_lock.o lov_io.o lovsub_dev.o lovsub_object.o lovsub_page.o \
+ lovsub_lock.o lovsub_io.o lov_pool.o
+
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
new file mode 100644
index 0000000..33d9ce6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -0,0 +1,823 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Internal interfaces of LOV layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
+ */
+
+#ifndef LOV_CL_INTERNAL_H
+#define LOV_CL_INTERNAL_H
+
+# include <linux/libcfs/libcfs.h>
+
+#include <obd.h>
+#include <cl_object.h>
+#include "lov_internal.h"
+
+/** \defgroup lov lov
+ * Logical object volume layer. This layer implements data striping (raid0).
+ *
+ * At the lov layer top-entity (object, page, lock, io) is connected to one or
+ * more sub-entities: top-object, representing a file is connected to a set of
+ * sub-objects, each representing a stripe, file-level top-lock is connected
+ * to a set of per-stripe sub-locks, top-page is connected to a (single)
+ * sub-page, and a top-level IO is connected to a set of (potentially
+ * concurrent) sub-IO's.
+ *
+ * Sub-object, sub-page, and sub-io have well-defined top-object and top-page
+ * respectively, while a single sub-lock can be part of multiple top-locks.
+ *
+ * Reference counting models are different for different types of entities:
+ *
+ * - top-object keeps a reference to its sub-objects, and destroys them
+ * when it is destroyed.
+ *
+ * - top-page keeps a reference to its sub-page, and destroys it when it
+ * is destroyed.
+ *
+ * - sub-lock keep a reference to its top-locks. Top-lock keeps a
+ * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
+ * actively using them (that is, in cl_lock_state::CLS_QUEUING,
+ * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
+ * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
+ * hold. From this moment top-lock has only a 'weak' reference to its
+ * sub-locks. This reference is protected by top-lock
+ * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
+ * when the latter is destroyed. When a sub-lock is canceled, a
+ * reference to it is removed from the top-lock array, and top-lock is
+ * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
+ * while their top-lock is in CLS_HELD or CLS_CACHED states.
+ *
+ * - IO's are not reference counted.
+ *
+ * To implement a connection between top and sub entities, lov layer is split
+ * into two pieces: lov ("upper half"), and lovsub ("bottom half"), both
+ * implementing full set of cl-interfaces. For example, top-object has vvp and
+ * lov layers, and it's sub-object has lovsub and osc layers. lovsub layer is
+ * used to track child-parent relationship.
+ *
+ * @{
+ */
+
+struct lovsub_device;
+struct lovsub_object;
+struct lovsub_lock;
+
+enum lov_device_flags {
+ LOV_DEV_INITIALIZED = 1 << 0
+};
+
+/*
+ * Upper half.
+ */
+
+/**
+ * Resources that are used in memory-cleaning path, and whose allocation
+ * cannot fail even when memory is tight. They are preallocated in sufficient
+ * quantities in lov_device::ld_emerg[], and access to them is serialized
+ * lov_device::ld_mutex.
+ */
+struct lov_device_emerg {
+ /**
+ * Page list used to submit IO when memory is in pressure.
+ */
+ struct cl_page_list emrg_page_list;
+ /**
+ * sub-io's shared by all threads accessing this device when memory is
+ * too low to allocate sub-io's dynamically.
+ */
+ struct cl_io emrg_subio;
+ /**
+ * Environments used by sub-io's in
+ * lov_device_emerg::emrg_subio.
+ */
+ struct lu_env *emrg_env;
+ /**
+ * Refchecks for lov_device_emerg::emrg_env.
+ *
+ * \see cl_env_get()
+ */
+ int emrg_refcheck;
+};
+
+struct lov_device {
+ /*
+ * XXX Locking of lov-private data is missing.
+ */
+ struct cl_device ld_cl;
+ struct lov_obd *ld_lov;
+ /** size of lov_device::ld_target[] array */
+ __u32 ld_target_nr;
+ struct lovsub_device **ld_target;
+ __u32 ld_flags;
+
+ /** Emergency resources used in memory-cleansing paths. */
+ struct lov_device_emerg **ld_emrg;
+ /**
+ * Serializes access to lov_device::ld_emrg in low-memory
+ * conditions.
+ */
+ struct mutex ld_mutex;
+};
+
+/**
+ * Layout type.
+ */
+enum lov_layout_type {
+ LLT_EMPTY, /** empty file without body (mknod + truncate) */
+ LLT_RAID0, /** striped file */
+ LLT_RELEASED, /** file with no objects (data in HSM) */
+ LLT_NR
+};
+
+/**
+ * lov-specific file state.
+ *
+ * lov object has particular layout type, determining how top-object is built
+ * on top of sub-objects. Layout type can change dynamically. When this
+ * happens, lov_object::lo_type_guard semaphore is taken in exclusive mode,
+ * all state pertaining to the old layout type is destroyed, and new state is
+ * constructed. All object methods take said semaphore in the shared mode,
+ * providing serialization against transition between layout types.
+ *
+ * To avoid multiple `if' or `switch' statements, selecting behavior for the
+ * current layout type, object methods perform double-dispatch, invoking
+ * function corresponding to the current layout type.
+ */
+struct lov_object {
+ struct cl_object lo_cl;
+ /**
+ * Serializes object operations with transitions between layout types.
+ *
+ * This semaphore is taken in shared mode by all object methods, and
+ * is taken in exclusive mode when object type is changed.
+ *
+ * \see lov_object::lo_type
+ */
+ struct rw_semaphore lo_type_guard;
+ /**
+ * Type of an object. Protected by lov_object::lo_type_guard.
+ */
+ enum lov_layout_type lo_type;
+ /**
+ * True if layout is invalid. This bit is cleared when layout lock
+ * is lost.
+ */
+ bool lo_layout_invalid;
+ /**
+ * How many IOs are on going on this object. Layout can be changed
+ * only if there is no active IO.
+ */
+ atomic_t lo_active_ios;
+ /**
+ * Waitq - wait for no one else is using lo_lsm
+ */
+ wait_queue_head_t lo_waitq;
+ /**
+ * Layout metadata. NULL if empty layout.
+ */
+ struct lov_stripe_md *lo_lsm;
+
+ union lov_layout_state {
+ struct lov_layout_raid0 {
+ unsigned lo_nr;
+ /**
+ * When this is true, lov_object::lo_attr contains
+ * valid up to date attributes for a top-level
+ * object. This field is reset to 0 when attributes of
+ * any sub-object change.
+ */
+ int lo_attr_valid;
+ /**
+ * Array of sub-objects. Allocated when top-object is
+ * created (lov_init_raid0()).
+ *
+ * Top-object is a strict master of its sub-objects:
+ * it is created before them, and outlives its
+ * children (this later is necessary so that basic
+ * functions like cl_object_top() always
+ * work). Top-object keeps a reference on every
+ * sub-object.
+ *
+ * When top-object is destroyed (lov_delete_raid0())
+ * it releases its reference to a sub-object and waits
+ * until the latter is finally destroyed.
+ */
+ struct lovsub_object **lo_sub;
+ /**
+ * protect lo_sub
+ */
+ spinlock_t lo_sub_lock;
+ /**
+ * Cached object attribute, built from sub-object
+ * attributes.
+ */
+ struct cl_attr lo_attr;
+ } raid0;
+ struct lov_layout_state_empty {
+ } empty;
+ struct lov_layout_state_released {
+ } released;
+ } u;
+ /**
+ * Thread that acquired lov_object::lo_type_guard in an exclusive
+ * mode.
+ */
+ struct task_struct *lo_owner;
+};
+
+/**
+ * Flags that top-lock can set on each of its sub-locks.
+ */
+enum lov_sub_flags {
+ /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
+ LSF_HELD = 1 << 0
+};
+
+/**
+ * State lov_lock keeps for each sub-lock.
+ */
+struct lov_lock_sub {
+ /** sub-lock itself */
+ struct lovsub_lock *sub_lock;
+ /** An array of per-sub-lock flags, taken from enum lov_sub_flags */
+ unsigned sub_flags;
+ int sub_stripe;
+ struct cl_lock_descr sub_descr;
+ struct cl_lock_descr sub_got;
+};
+
+/**
+ * lov-specific lock state.
+ */
+struct lov_lock {
+ struct cl_lock_slice lls_cl;
+ /** Number of sub-locks in this lock */
+ int lls_nr;
+ /**
+ * Number of existing sub-locks.
+ */
+ unsigned lls_nr_filled;
+ /**
+ * Set when sub-lock was canceled, while top-lock was being
+ * used, or unused.
+ */
+ unsigned int lls_cancel_race:1;
+ /**
+ * An array of sub-locks
+ *
+ * There are two issues with managing sub-locks:
+ *
+ * - sub-locks are concurrently canceled, and
+ *
+ * - sub-locks are shared with other top-locks.
+ *
+ * To manage cancellation, top-lock acquires a hold on a sublock
+ * (lov_sublock_adopt()) when the latter is inserted into
+ * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
+ * when top-lock is going into CLS_CACHED state or destroyed. Hold
+ * prevents sub-lock from cancellation.
+ *
+ * Sub-lock sharing means, among other things, that top-lock that is
+ * in the process of creation (i.e., not yet inserted into lock list)
+ * is already accessible to other threads once at least one of its
+ * sub-locks is created, see lov_lock_sub_init().
+ *
+ * Sub-lock can be in one of the following states:
+ *
+ * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
+ * sub-lock was either never created (top-lock is in CLS_NEW
+ * state), or it was created, then canceled, then destroyed
+ * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
+ *
+ * - sub-lock exists and is on
+ * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
+ * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
+ * of a top-lock.
+ *
+ * - sub-lock exists, but is not held by the top-lock. This
+ * happens after top-lock released a hold on sub-locks before
+ * going into cache (lov_lock_unuse()).
+ *
+ * \todo To support wide-striping, array has to be replaced with a set
+ * of queues to avoid scanning.
+ */
+ struct lov_lock_sub *lls_sub;
+ /**
+ * Original description with which lock was enqueued.
+ */
+ struct cl_lock_descr lls_orig;
+};
+
+struct lov_page {
+ struct cl_page_slice lps_cl;
+ int lps_invalid;
+};
+
+/*
+ * Bottom half.
+ */
+
+struct lovsub_device {
+ struct cl_device acid_cl;
+ struct lov_device *acid_super;
+ int acid_idx;
+ struct cl_device *acid_next;
+};
+
+struct lovsub_object {
+ struct cl_object_header lso_header;
+ struct cl_object lso_cl;
+ struct lov_object *lso_super;
+ int lso_index;
+};
+
+/**
+ * A link between a top-lock and a sub-lock. Separate data-structure is
+ * necessary, because top-locks and sub-locks are in M:N relationship.
+ *
+ * \todo This can be optimized for a (by far) most frequent case of a single
+ * top-lock per sub-lock.
+ */
+struct lov_lock_link {
+ struct lov_lock *lll_super;
+ /** An index within parent lock. */
+ int lll_idx;
+ /**
+ * A linkage into per sub-lock list of all corresponding top-locks,
+ * hanging off lovsub_lock::lss_parents.
+ */
+ struct list_head lll_list;
+};
+
+/**
+ * Lock state at lovsub layer.
+ */
+struct lovsub_lock {
+ struct cl_lock_slice lss_cl;
+ /**
+ * List of top-locks that have given sub-lock as their part. Protected
+ * by cl_lock::cll_guard mutex.
+ */
+ struct list_head lss_parents;
+ /**
+ * Top-lock that initiated current operation on this sub-lock. This is
+ * only set during top-to-bottom lock operations like enqueue, and is
+ * used to optimize state change notification. Protected by
+ * cl_lock::cll_guard mutex.
+ *
+ * \see lovsub_lock_state_one().
+ */
+ struct cl_lock *lss_active;
+};
+
+/**
+ * Describe the environment settings for sublocks.
+ */
+struct lov_sublock_env {
+ const struct lu_env *lse_env;
+ struct cl_io *lse_io;
+ struct lov_io_sub *lse_sub;
+};
+
+struct lovsub_page {
+ struct cl_page_slice lsb_cl;
+};
+
+
+struct lov_thread_info {
+ struct cl_object_conf lti_stripe_conf;
+ struct lu_fid lti_fid;
+ struct cl_lock_descr lti_ldescr;
+ struct ost_lvb lti_lvb;
+ struct cl_2queue lti_cl2q;
+ struct cl_lock_closure lti_closure;
+ wait_queue_t lti_waiter;
+};
+
+/**
+ * State that lov_io maintains for every sub-io.
+ */
+struct lov_io_sub {
+ int sub_stripe;
+ /**
+ * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
+ * independently, with lov acting as a scheduler to maximize overall
+ * throughput.
+ */
+ struct cl_io *sub_io;
+ /**
+ * Linkage into a list (hanging off lov_io::lis_active) of all
+ * sub-io's active for the current IO iteration.
+ */
+ struct list_head sub_linkage;
+ /**
+ * true, iff cl_io_init() was successfully executed against
+ * lov_io_sub::sub_io.
+ */
+ int sub_io_initialized;
+ /**
+ * True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't
+ * allocated, but borrowed from a per-device emergency pool.
+ */
+ int sub_borrowed;
+ /**
+ * environment, in which sub-io executes.
+ */
+ struct lu_env *sub_env;
+ /**
+ * environment's refcheck.
+ *
+ * \see cl_env_get()
+ */
+ int sub_refcheck;
+ int sub_refcheck2;
+ int sub_reenter;
+ void *sub_cookie;
+};
+
+/**
+ * IO state private for LOV.
+ */
+struct lov_io {
+ /** super-class */
+ struct cl_io_slice lis_cl;
+ /**
+ * Pointer to the object slice. This is a duplicate of
+ * lov_io::lis_cl::cis_object.
+ */
+ struct lov_object *lis_object;
+ /**
+ * Original end-of-io position for this IO, set by the upper layer as
+ * cl_io::u::ci_rw::pos + cl_io::u::ci_rw::count. lov remembers this,
+ * changes pos and count to fit IO into a single stripe and uses saved
+ * value to determine when IO iterations have to stop.
+ *
+ * This is used only for CIT_READ and CIT_WRITE io's.
+ */
+ loff_t lis_io_endpos;
+
+ /**
+ * starting position within a file, for the current io loop iteration
+ * (stripe), used by ci_io_loop().
+ */
+ obd_off lis_pos;
+ /**
+ * end position with in a file, for the current stripe io. This is
+ * exclusive (i.e., next offset after last byte affected by io).
+ */
+ obd_off lis_endpos;
+
+ int lis_mem_frozen;
+ int lis_stripe_count;
+ int lis_active_subios;
+
+ /**
+ * the index of ls_single_subio in ls_subios array
+ */
+ int lis_single_subio_index;
+ struct cl_io lis_single_subio;
+
+ /**
+ * size of ls_subios array, actually the highest stripe #
+ */
+ int lis_nr_subios;
+ struct lov_io_sub *lis_subs;
+ /**
+ * List of active sub-io's.
+ */
+ struct list_head lis_active;
+};
+
+struct lov_session {
+ struct lov_io ls_io;
+ struct lov_sublock_env ls_subenv;
+};
+
+/**
+ * State of transfer for lov.
+ */
+struct lov_req {
+ struct cl_req_slice lr_cl;
+};
+
+/**
+ * State of transfer for lovsub.
+ */
+struct lovsub_req {
+ struct cl_req_slice lsrq_cl;
+};
+
+extern struct lu_device_type lov_device_type;
+extern struct lu_device_type lovsub_device_type;
+
+extern struct lu_context_key lov_key;
+extern struct lu_context_key lov_session_key;
+
+extern struct kmem_cache *lov_lock_kmem;
+extern struct kmem_cache *lov_object_kmem;
+extern struct kmem_cache *lov_thread_kmem;
+extern struct kmem_cache *lov_session_kmem;
+extern struct kmem_cache *lov_req_kmem;
+
+extern struct kmem_cache *lovsub_lock_kmem;
+extern struct kmem_cache *lovsub_object_kmem;
+extern struct kmem_cache *lovsub_req_kmem;
+
+extern struct kmem_cache *lov_lock_link_kmem;
+
+int lov_object_init (const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf);
+int lovsub_object_init (const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf);
+int lov_lock_init (const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
+int lov_io_init (const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+int lovsub_lock_init (const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
+
+int lov_lock_init_raid0 (const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
+int lov_lock_init_empty (const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
+int lov_io_init_raid0 (const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+int lov_io_init_empty (const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+void lov_lock_unlink (const struct lu_env *env, struct lov_lock_link *link,
+ struct lovsub_lock *sub);
+
+struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
+ int stripe);
+void lov_sub_put (struct lov_io_sub *sub);
+int lov_sublock_modify (const struct lu_env *env, struct lov_lock *lov,
+ struct lovsub_lock *sublock,
+ const struct cl_lock_descr *d, int idx);
+
+
+int lov_page_init (const struct lu_env *env, struct cl_object *ob,
+ struct cl_page *page, struct page *vmpage);
+int lovsub_page_init (const struct lu_env *env, struct cl_object *ob,
+ struct cl_page *page, struct page *vmpage);
+
+int lov_page_init_empty (const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
+int lov_page_init_raid0 (const struct lu_env *env,
+ struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
+struct lu_object *lov_object_alloc (const struct lu_env *env,
+ const struct lu_object_header *hdr,
+ struct lu_device *dev);
+struct lu_object *lovsub_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *hdr,
+ struct lu_device *dev);
+
+struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
+ struct lov_lock *lck,
+ struct lovsub_lock *sub);
+struct lov_io_sub *lov_page_subio (const struct lu_env *env,
+ struct lov_io *lio,
+ const struct cl_page_slice *slice);
+
+void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm);
+struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
+
+#define lov_foreach_target(lov, var) \
+ for (var = 0; var < lov_targets_nr(lov); ++var)
+
+/*****************************************************************************
+ *
+ * Type conversions.
+ *
+ * Accessors.
+ *
+ */
+
+static inline struct lov_session *lov_env_session(const struct lu_env *env)
+{
+ struct lov_session *ses;
+
+ ses = lu_context_key_get(env->le_ses, &lov_session_key);
+ LASSERT(ses != NULL);
+ return ses;
+}
+
+static inline struct lov_io *lov_env_io(const struct lu_env *env)
+{
+ return &lov_env_session(env)->ls_io;
+}
+
+static inline int lov_is_object(const struct lu_object *obj)
+{
+ return obj->lo_dev->ld_type == &lov_device_type;
+}
+
+static inline int lovsub_is_object(const struct lu_object *obj)
+{
+ return obj->lo_dev->ld_type == &lovsub_device_type;
+}
+
+static inline struct lu_device *lov2lu_dev(struct lov_device *lov)
+{
+ return &lov->ld_cl.cd_lu_dev;
+}
+
+static inline struct lov_device *lu2lov_dev(const struct lu_device *d)
+{
+ LINVRNT(d->ld_type == &lov_device_type);
+ return container_of0(d, struct lov_device, ld_cl.cd_lu_dev);
+}
+
+static inline struct cl_device *lovsub2cl_dev(struct lovsub_device *lovsub)
+{
+ return &lovsub->acid_cl;
+}
+
+static inline struct lu_device *lovsub2lu_dev(struct lovsub_device *lovsub)
+{
+ return &lovsub2cl_dev(lovsub)->cd_lu_dev;
+}
+
+static inline struct lovsub_device *lu2lovsub_dev(const struct lu_device *d)
+{
+ LINVRNT(d->ld_type == &lovsub_device_type);
+ return container_of0(d, struct lovsub_device, acid_cl.cd_lu_dev);
+}
+
+static inline struct lovsub_device *cl2lovsub_dev(const struct cl_device *d)
+{
+ LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
+ return container_of0(d, struct lovsub_device, acid_cl);
+}
+
+static inline struct lu_object *lov2lu(struct lov_object *lov)
+{
+ return &lov->lo_cl.co_lu;
+}
+
+static inline struct cl_object *lov2cl(struct lov_object *lov)
+{
+ return &lov->lo_cl;
+}
+
+static inline struct lov_object *lu2lov(const struct lu_object *obj)
+{
+ LINVRNT(lov_is_object(obj));
+ return container_of0(obj, struct lov_object, lo_cl.co_lu);
+}
+
+static inline struct lov_object *cl2lov(const struct cl_object *obj)
+{
+ LINVRNT(lov_is_object(&obj->co_lu));
+ return container_of0(obj, struct lov_object, lo_cl);
+}
+
+static inline struct lu_object *lovsub2lu(struct lovsub_object *los)
+{
+ return &los->lso_cl.co_lu;
+}
+
+static inline struct cl_object *lovsub2cl(struct lovsub_object *los)
+{
+ return &los->lso_cl;
+}
+
+static inline struct lovsub_object *cl2lovsub(const struct cl_object *obj)
+{
+ LINVRNT(lovsub_is_object(&obj->co_lu));
+ return container_of0(obj, struct lovsub_object, lso_cl);
+}
+
+static inline struct lovsub_object *lu2lovsub(const struct lu_object *obj)
+{
+ LINVRNT(lovsub_is_object(obj));
+ return container_of0(obj, struct lovsub_object, lso_cl.co_lu);
+}
+
+static inline struct lovsub_lock *
+cl2lovsub_lock(const struct cl_lock_slice *slice)
+{
+ LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu));
+ return container_of(slice, struct lovsub_lock, lss_cl);
+}
+
+static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock)
+{
+ const struct cl_lock_slice *slice;
+
+ slice = cl_lock_at(lock, &lovsub_device_type);
+ LASSERT(slice != NULL);
+ return cl2lovsub_lock(slice);
+}
+
+static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice)
+{
+ LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
+ return container_of(slice, struct lov_lock, lls_cl);
+}
+
+static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
+{
+ LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
+ return container_of0(slice, struct lov_page, lps_cl);
+}
+
+static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
+{
+ return container_of0(slice, struct lov_req, lr_cl);
+}
+
+static inline struct lovsub_page *
+cl2lovsub_page(const struct cl_page_slice *slice)
+{
+ LINVRNT(lovsub_is_object(&slice->cpl_obj->co_lu));
+ return container_of0(slice, struct lovsub_page, lsb_cl);
+}
+
+static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
+{
+ return container_of0(slice, struct lovsub_req, lsrq_cl);
+}
+
+static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
+{
+ return slice->cpl_page->cp_child;
+}
+
+static inline struct lov_io *cl2lov_io(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct lov_io *lio;
+
+ lio = container_of(ios, struct lov_io, lis_cl);
+ LASSERT(lio == lov_env_io(env));
+ return lio;
+}
+
+static inline int lov_targets_nr(const struct lov_device *lov)
+{
+ return lov->ld_lov->desc.ld_tgt_count;
+}
+
+static inline struct lov_thread_info *lov_env_info(const struct lu_env *env)
+{
+ struct lov_thread_info *info;
+
+ info = lu_context_key_get(&env->le_ctx, &lov_key);
+ LASSERT(info != NULL);
+ return info;
+}
+
+static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov)
+{
+ LASSERT(lov->lo_type == LLT_RAID0);
+ LASSERT(lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC ||
+ lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC_V3);
+ return &lov->u.raid0;
+}
+
+/** @} lov */
+
+#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
new file mode 100644
index 0000000..a4006ef
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -0,0 +1,526 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_device and cl_device_type for LOV layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+/* class_name2obd() */
+#include <obd_class.h>
+
+#include "lov_cl_internal.h"
+
+struct kmem_cache *lov_lock_kmem;
+struct kmem_cache *lov_object_kmem;
+struct kmem_cache *lov_thread_kmem;
+struct kmem_cache *lov_session_kmem;
+struct kmem_cache *lov_req_kmem;
+
+struct kmem_cache *lovsub_lock_kmem;
+struct kmem_cache *lovsub_object_kmem;
+struct kmem_cache *lovsub_req_kmem;
+
+struct kmem_cache *lov_lock_link_kmem;
+
+/** Lock class of lov_device::ld_mutex. */
+struct lock_class_key cl_lov_device_mutex_class;
+
+struct lu_kmem_descr lov_caches[] = {
+ {
+ .ckd_cache = &lov_lock_kmem,
+ .ckd_name = "lov_lock_kmem",
+ .ckd_size = sizeof (struct lov_lock)
+ },
+ {
+ .ckd_cache = &lov_object_kmem,
+ .ckd_name = "lov_object_kmem",
+ .ckd_size = sizeof (struct lov_object)
+ },
+ {
+ .ckd_cache = &lov_thread_kmem,
+ .ckd_name = "lov_thread_kmem",
+ .ckd_size = sizeof (struct lov_thread_info)
+ },
+ {
+ .ckd_cache = &lov_session_kmem,
+ .ckd_name = "lov_session_kmem",
+ .ckd_size = sizeof (struct lov_session)
+ },
+ {
+ .ckd_cache = &lov_req_kmem,
+ .ckd_name = "lov_req_kmem",
+ .ckd_size = sizeof (struct lov_req)
+ },
+ {
+ .ckd_cache = &lovsub_lock_kmem,
+ .ckd_name = "lovsub_lock_kmem",
+ .ckd_size = sizeof (struct lovsub_lock)
+ },
+ {
+ .ckd_cache = &lovsub_object_kmem,
+ .ckd_name = "lovsub_object_kmem",
+ .ckd_size = sizeof (struct lovsub_object)
+ },
+ {
+ .ckd_cache = &lovsub_req_kmem,
+ .ckd_name = "lovsub_req_kmem",
+ .ckd_size = sizeof (struct lovsub_req)
+ },
+ {
+ .ckd_cache = &lov_lock_link_kmem,
+ .ckd_name = "lov_lock_link_kmem",
+ .ckd_size = sizeof (struct lov_lock_link)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
+/*****************************************************************************
+ *
+ * Lov transfer operations.
+ *
+ */
+
+static void lov_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret)
+{
+ struct lov_req *lr;
+
+ lr = cl2lov_req(slice);
+ OBD_SLAB_FREE_PTR(lr, lov_req_kmem);
+}
+
+static const struct cl_req_operations lov_req_ops = {
+ .cro_completion = lov_req_completion
+};
+
+/*****************************************************************************
+ *
+ * Lov device and device type functions.
+ *
+ */
+
+static void *lov_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct lov_thread_info *info;
+
+ OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, __GFP_IO);
+ if (info != NULL)
+ INIT_LIST_HEAD(&info->lti_closure.clc_list);
+ else
+ info = ERR_PTR(-ENOMEM);
+ return info;
+}
+
+static void lov_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct lov_thread_info *info = data;
+ LINVRNT(list_empty(&info->lti_closure.clc_list));
+ OBD_SLAB_FREE_PTR(info, lov_thread_kmem);
+}
+
+struct lu_context_key lov_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = lov_key_init,
+ .lct_fini = lov_key_fini
+};
+
+static void *lov_session_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct lov_session *info;
+
+ OBD_SLAB_ALLOC_PTR_GFP(info, lov_session_kmem, __GFP_IO);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
+}
+
+static void lov_session_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct lov_session *info = data;
+ OBD_SLAB_FREE_PTR(info, lov_session_kmem);
+}
+
+struct lu_context_key lov_session_key = {
+ .lct_tags = LCT_SESSION,
+ .lct_init = lov_session_key_init,
+ .lct_fini = lov_session_key_fini
+};
+
+/* type constructor/destructor: lov_type_{init,fini,start,stop}() */
+LU_TYPE_INIT_FINI(lov, &lov_key, &lov_session_key);
+
+static struct lu_device *lov_device_fini(const struct lu_env *env,
+ struct lu_device *d)
+{
+ int i;
+ struct lov_device *ld = lu2lov_dev(d);
+
+ LASSERT(ld->ld_lov != NULL);
+ if (ld->ld_target == NULL)
+ return NULL;
+
+ lov_foreach_target(ld, i) {
+ struct lovsub_device *lsd;
+
+ lsd = ld->ld_target[i];
+ if (lsd != NULL) {
+ cl_stack_fini(env, lovsub2cl_dev(lsd));
+ ld->ld_target[i] = NULL;
+ }
+ }
+ return NULL;
+}
+
+static int lov_device_init(const struct lu_env *env, struct lu_device *d,
+ const char *name, struct lu_device *next)
+{
+ struct lov_device *ld = lu2lov_dev(d);
+ int i;
+ int rc = 0;
+
+ LASSERT(d->ld_site != NULL);
+ if (ld->ld_target == NULL)
+ return rc;
+
+ lov_foreach_target(ld, i) {
+ struct lovsub_device *lsd;
+ struct cl_device *cl;
+ struct lov_tgt_desc *desc;
+
+ desc = ld->ld_lov->lov_tgts[i];
+ if (desc == NULL)
+ continue;
+
+ cl = cl_type_setup(env, d->ld_site, &lovsub_device_type,
+ desc->ltd_obd->obd_lu_dev);
+ if (IS_ERR(cl)) {
+ rc = PTR_ERR(cl);
+ break;
+ }
+ lsd = cl2lovsub_dev(cl);
+ lsd->acid_idx = i;
+ lsd->acid_super = ld;
+ ld->ld_target[i] = lsd;
+ }
+
+ if (rc)
+ lov_device_fini(env, d);
+ else
+ ld->ld_flags |= LOV_DEV_INITIALIZED;
+
+ return rc;
+}
+
+static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req)
+{
+ struct lov_req *lr;
+ int result;
+
+ OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, __GFP_IO);
+ if (lr != NULL) {
+ cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+static const struct cl_device_operations lov_cl_ops = {
+ .cdo_req_init = lov_req_init
+};
+
+static void lov_emerg_free(struct lov_device_emerg **emrg, int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; ++i) {
+ struct lov_device_emerg *em;
+
+ em = emrg[i];
+ if (em != NULL) {
+ LASSERT(em->emrg_page_list.pl_nr == 0);
+ if (em->emrg_env != NULL)
+ cl_env_put(em->emrg_env, &em->emrg_refcheck);
+ OBD_FREE_PTR(em);
+ }
+ }
+ OBD_FREE(emrg, nr * sizeof emrg[0]);
+}
+
+static struct lu_device *lov_device_free(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct lov_device *ld = lu2lov_dev(d);
+ const int nr = ld->ld_target_nr;
+
+ cl_device_fini(lu2cl_dev(d));
+ if (ld->ld_target != NULL)
+ OBD_FREE(ld->ld_target, nr * sizeof ld->ld_target[0]);
+ if (ld->ld_emrg != NULL)
+ lov_emerg_free(ld->ld_emrg, nr);
+ OBD_FREE_PTR(ld);
+ return NULL;
+}
+
+static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev,
+ __u32 index)
+{
+ struct lov_device *ld = lu2lov_dev(dev);
+
+ if (ld->ld_target[index] != NULL) {
+ cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index]));
+ ld->ld_target[index] = NULL;
+ }
+}
+
+static struct lov_device_emerg **lov_emerg_alloc(int nr)
+{
+ struct lov_device_emerg **emerg;
+ int i;
+ int result;
+
+ OBD_ALLOC(emerg, nr * sizeof emerg[0]);
+ if (emerg == NULL)
+ return ERR_PTR(-ENOMEM);
+ for (result = i = 0; i < nr && result == 0; i++) {
+ struct lov_device_emerg *em;
+
+ OBD_ALLOC_PTR(em);
+ if (em != NULL) {
+ emerg[i] = em;
+ cl_page_list_init(&em->emrg_page_list);
+ em->emrg_env = cl_env_alloc(&em->emrg_refcheck,
+ LCT_REMEMBER|LCT_NOREF);
+ if (!IS_ERR(em->emrg_env))
+ em->emrg_env->le_ctx.lc_cookie = 0x2;
+ else {
+ result = PTR_ERR(em->emrg_env);
+ em->emrg_env = NULL;
+ }
+ } else
+ result = -ENOMEM;
+ }
+ if (result != 0) {
+ lov_emerg_free(emerg, nr);
+ emerg = ERR_PTR(result);
+ }
+ return emerg;
+}
+
+static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
+{
+ int result;
+ __u32 tgt_size;
+ __u32 sub_size;
+
+ result = 0;
+ tgt_size = dev->ld_lov->lov_tgt_size;
+ sub_size = dev->ld_target_nr;
+ if (sub_size < tgt_size) {
+ struct lovsub_device **newd;
+ struct lov_device_emerg **emerg;
+ const size_t sz = sizeof newd[0];
+
+ emerg = lov_emerg_alloc(tgt_size);
+ if (IS_ERR(emerg))
+ return PTR_ERR(emerg);
+
+ OBD_ALLOC(newd, tgt_size * sz);
+ if (newd != NULL) {
+ mutex_lock(&dev->ld_mutex);
+ if (sub_size > 0) {
+ memcpy(newd, dev->ld_target, sub_size * sz);
+ OBD_FREE(dev->ld_target, sub_size * sz);
+ }
+ dev->ld_target = newd;
+ dev->ld_target_nr = tgt_size;
+
+ if (dev->ld_emrg != NULL)
+ lov_emerg_free(dev->ld_emrg, sub_size);
+ dev->ld_emrg = emerg;
+ mutex_unlock(&dev->ld_mutex);
+ } else {
+ lov_emerg_free(emerg, tgt_size);
+ result = -ENOMEM;
+ }
+ }
+ return result;
+}
+
+static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev,
+ __u32 index)
+{
+ struct obd_device *obd = dev->ld_obd;
+ struct lov_device *ld = lu2lov_dev(dev);
+ struct lov_tgt_desc *tgt;
+ struct lovsub_device *lsd;
+ struct cl_device *cl;
+ int rc;
+
+ obd_getref(obd);
+
+ tgt = obd->u.lov.lov_tgts[index];
+ LASSERT(tgt != NULL);
+ LASSERT(tgt->ltd_obd != NULL);
+
+ if (!tgt->ltd_obd->obd_set_up) {
+ CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid));
+ return -EINVAL;
+ }
+
+ rc = lov_expand_targets(env, ld);
+ if (rc == 0 && ld->ld_flags & LOV_DEV_INITIALIZED) {
+ LASSERT(dev->ld_site != NULL);
+
+ cl = cl_type_setup(env, dev->ld_site, &lovsub_device_type,
+ tgt->ltd_obd->obd_lu_dev);
+ if (!IS_ERR(cl)) {
+ lsd = cl2lovsub_dev(cl);
+ lsd->acid_idx = index;
+ lsd->acid_super = ld;
+ ld->ld_target[index] = lsd;
+ } else {
+ CERROR("add failed (%d), deleting %s\n", rc,
+ obd_uuid2str(&tgt->ltd_uuid));
+ lov_cl_del_target(env, dev, index);
+ rc = PTR_ERR(cl);
+ }
+ }
+ obd_putref(obd);
+ return rc;
+}
+
+static int lov_process_config(const struct lu_env *env,
+ struct lu_device *d, struct lustre_cfg *cfg)
+{
+ struct obd_device *obd = d->ld_obd;
+ int cmd;
+ int rc;
+ int gen;
+ __u32 index;
+
+ obd_getref(obd);
+
+ cmd = cfg->lcfg_command;
+ rc = lov_process_config_base(d->ld_obd, cfg, &index, &gen);
+ if (rc == 0) {
+ switch(cmd) {
+ case LCFG_LOV_ADD_OBD:
+ case LCFG_LOV_ADD_INA:
+ rc = lov_cl_add_target(env, d, index);
+ if (rc != 0)
+ lov_del_target(d->ld_obd, index, 0, 0);
+ break;
+ case LCFG_LOV_DEL_OBD:
+ lov_cl_del_target(env, d, index);
+ break;
+ }
+ }
+ obd_putref(obd);
+ return rc;
+}
+
+static const struct lu_device_operations lov_lu_ops = {
+ .ldo_object_alloc = lov_object_alloc,
+ .ldo_process_config = lov_process_config,
+};
+
+static struct lu_device *lov_device_alloc(const struct lu_env *env,
+ struct lu_device_type *t,
+ struct lustre_cfg *cfg)
+{
+ struct lu_device *d;
+ struct lov_device *ld;
+ struct obd_device *obd;
+ int rc;
+
+ OBD_ALLOC_PTR(ld);
+ if (ld == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ cl_device_init(&ld->ld_cl, t);
+ d = lov2lu_dev(ld);
+ d->ld_ops = &lov_lu_ops;
+ ld->ld_cl.cd_ops = &lov_cl_ops;
+
+ mutex_init(&ld->ld_mutex);
+ lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
+
+ /* setup the LOV OBD */
+ obd = class_name2obd(lustre_cfg_string(cfg, 0));
+ LASSERT(obd != NULL);
+ rc = lov_setup(obd, cfg);
+ if (rc) {
+ lov_device_free(env, d);
+ return ERR_PTR(rc);
+ }
+
+ ld->ld_lov = &obd->u.lov;
+ return d;
+}
+
+static const struct lu_device_type_operations lov_device_type_ops = {
+ .ldto_init = lov_type_init,
+ .ldto_fini = lov_type_fini,
+
+ .ldto_start = lov_type_start,
+ .ldto_stop = lov_type_stop,
+
+ .ldto_device_alloc = lov_device_alloc,
+ .ldto_device_free = lov_device_free,
+
+ .ldto_device_init = lov_device_init,
+ .ldto_device_fini = lov_device_fini
+};
+
+struct lu_device_type lov_device_type = {
+ .ldt_tags = LU_DEVICE_CL,
+ .ldt_name = LUSTRE_LOV_NAME,
+ .ldt_ops = &lov_device_type_ops,
+ .ldt_ctx_tags = LCT_CL_THREAD
+};
+EXPORT_SYMBOL(lov_device_type);
+
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
new file mode 100644
index 0000000..e6c6015
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -0,0 +1,348 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lov/lov_ea.c
+ *
+ * Author: Wang Di <wangdi@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include <asm/div64.h>
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_class.h>
+#include <obd_lov.h>
+#include <lustre/lustre_idl.h>
+
+#include "lov_internal.h"
+
+struct lovea_unpack_args {
+ struct lov_stripe_md *lsm;
+ int cursor;
+};
+
+static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
+ __u16 stripe_count)
+{
+ if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
+ CERROR("bad stripe count %d\n", stripe_count);
+ lov_dump_lmm_common(D_WARNING, lmm);
+ return -EINVAL;
+ }
+
+ if (lmm_oi_id(&lmm->lmm_oi) == 0) {
+ CERROR("zero object id\n");
+ lov_dump_lmm_common(D_WARNING, lmm);
+ return -EINVAL;
+ }
+
+ if (lov_pattern(le32_to_cpu(lmm->lmm_pattern)) != LOV_PATTERN_RAID0) {
+ CERROR("bad striping pattern\n");
+ lov_dump_lmm_common(D_WARNING, lmm);
+ return -EINVAL;
+ }
+
+ if (lmm->lmm_stripe_size == 0 ||
+ (le32_to_cpu(lmm->lmm_stripe_size)&(LOV_MIN_STRIPE_SIZE-1)) != 0) {
+ CERROR("bad stripe size %u\n",
+ le32_to_cpu(lmm->lmm_stripe_size));
+ lov_dump_lmm_common(D_WARNING, lmm);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
+{
+ struct lov_stripe_md *lsm;
+ struct lov_oinfo *loi;
+ int i, oinfo_ptrs_size;
+
+ LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);
+
+ oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
+ *size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size;
+
+ OBD_ALLOC_LARGE(lsm, *size);
+ if (!lsm)
+ return NULL;;
+
+ for (i = 0; i < stripe_count; i++) {
+ OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, __GFP_IO);
+ if (loi == NULL)
+ goto err;
+ lsm->lsm_oinfo[i] = loi;
+ }
+ lsm->lsm_stripe_count = stripe_count;
+ return lsm;
+
+err:
+ while (--i >= 0)
+ OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab, sizeof(*loi));
+ OBD_FREE_LARGE(lsm, *size);
+ return NULL;
+}
+
+void lsm_free_plain(struct lov_stripe_md *lsm)
+{
+ __u16 stripe_count = lsm->lsm_stripe_count;
+ int i;
+
+ for (i = 0; i < stripe_count; i++)
+ OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
+ sizeof(struct lov_oinfo));
+ OBD_FREE_LARGE(lsm, sizeof(struct lov_stripe_md) +
+ stripe_count * sizeof(struct lov_oinfo *));
+}
+
+static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
+ struct lov_mds_md *lmm)
+{
+ /*
+ * This supposes lov_mds_md_v1/v3 first fields are
+ * are the same
+ */
+ lmm_oi_le_to_cpu(&lsm->lsm_oi, &lmm->lmm_oi);
+ lsm->lsm_stripe_size = le32_to_cpu(lmm->lmm_stripe_size);
+ lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern);
+ lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
+ lsm->lsm_pool_name[0] = '\0';
+}
+
+static void
+lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
+ obd_off *lov_off, obd_off *swidth)
+{
+ if (swidth)
+ *swidth = (obd_off)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
+}
+
+static void
+lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno,
+ obd_off *lov_off, obd_off *swidth)
+{
+ if (swidth)
+ *swidth = (obd_off)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
+}
+
+static int lsm_destroy_plain(struct lov_stripe_md *lsm, struct obdo *oa,
+ struct obd_export *md_exp)
+{
+ return 0;
+}
+
+/* Find minimum stripe maxbytes value. For inactive or
+ * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. */
+static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
+{
+ struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
+
+ if (imp == NULL || !tgt->ltd_active) {
+ *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+ return;
+ }
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_FULL &&
+ (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
+ imp->imp_connect_data.ocd_maxbytes > 0) {
+ if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
+ *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
+ } else {
+ *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+ }
+ spin_unlock(&imp->imp_lock);
+}
+
+static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
+ __u16 *stripe_count)
+{
+ if (lmm_bytes < sizeof(*lmm)) {
+ CERROR("lov_mds_md_v1 too small: %d, need at least %d\n",
+ lmm_bytes, (int)sizeof(*lmm));
+ return -EINVAL;
+ }
+
+ *stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
+ *stripe_count = 0;
+
+ if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V1)) {
+ CERROR("LOV EA V1 too small: %d, need %d\n",
+ lmm_bytes, lov_mds_md_size(*stripe_count, LOV_MAGIC_V1));
+ lov_dump_lmm_common(D_WARNING, lmm);
+ return -EINVAL;
+ }
+
+ return lsm_lmm_verify_common(lmm, lmm_bytes, *stripe_count);
+}
+
+int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
+ struct lov_mds_md_v1 *lmm)
+{
+ struct lov_oinfo *loi;
+ int i;
+ int stripe_count;
+ __u64 stripe_maxbytes = OBD_OBJECT_EOF;
+
+ lsm_unpackmd_common(lsm, lmm);
+
+ stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
+
+ for (i = 0; i < stripe_count; i++) {
+ /* XXX LOV STACKING call down to osc_unpackmd() */
+ loi = lsm->lsm_oinfo[i];
+ ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
+ loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
+ loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
+ if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
+ CERROR("OST index %d more than OST count %d\n",
+ loi->loi_ost_idx, lov->desc.ld_tgt_count);
+ lov_dump_lmm_v1(D_WARNING, lmm);
+ return -EINVAL;
+ }
+ if (!lov->lov_tgts[loi->loi_ost_idx]) {
+ CERROR("OST index %d missing\n", loi->loi_ost_idx);
+ lov_dump_lmm_v1(D_WARNING, lmm);
+ return -EINVAL;
+ }
+ /* calculate the minimum stripe max bytes */
+ lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
+ &stripe_maxbytes);
+ }
+
+ lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
+ if (lsm->lsm_stripe_count == 0)
+ lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
+
+ return 0;
+}
+
+const struct lsm_operations lsm_v1_ops = {
+ .lsm_free = lsm_free_plain,
+ .lsm_destroy = lsm_destroy_plain,
+ .lsm_stripe_by_index = lsm_stripe_by_index_plain,
+ .lsm_stripe_by_offset = lsm_stripe_by_offset_plain,
+ .lsm_lmm_verify = lsm_lmm_verify_v1,
+ .lsm_unpackmd = lsm_unpackmd_v1,
+};
+
+static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes,
+ __u16 *stripe_count)
+{
+ struct lov_mds_md_v3 *lmm;
+
+ lmm = (struct lov_mds_md_v3 *)lmmv1;
+
+ if (lmm_bytes < sizeof(*lmm)) {
+ CERROR("lov_mds_md_v3 too small: %d, need at least %d\n",
+ lmm_bytes, (int)sizeof(*lmm));
+ return -EINVAL;
+ }
+
+ *stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
+ *stripe_count = 0;
+
+ if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V3)) {
+ CERROR("LOV EA V3 too small: %d, need %d\n",
+ lmm_bytes, lov_mds_md_size(*stripe_count, LOV_MAGIC_V3));
+ lov_dump_lmm_common(D_WARNING, lmm);
+ return -EINVAL;
+ }
+
+ return lsm_lmm_verify_common((struct lov_mds_md_v1 *)lmm, lmm_bytes,
+ *stripe_count);
+}
+
+int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
+ struct lov_mds_md *lmmv1)
+{
+ struct lov_mds_md_v3 *lmm;
+ struct lov_oinfo *loi;
+ int i;
+ int stripe_count;
+ __u64 stripe_maxbytes = OBD_OBJECT_EOF;
+ int cplen = 0;
+
+ lmm = (struct lov_mds_md_v3 *)lmmv1;
+
+ lsm_unpackmd_common(lsm, (struct lov_mds_md_v1 *)lmm);
+
+ stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
+
+ cplen = strlcpy(lsm->lsm_pool_name, lmm->lmm_pool_name,
+ sizeof(lsm->lsm_pool_name));
+ if (cplen >= sizeof(lsm->lsm_pool_name))
+ return -E2BIG;
+
+ for (i = 0; i < stripe_count; i++) {
+ /* XXX LOV STACKING call down to osc_unpackmd() */
+ loi = lsm->lsm_oinfo[i];
+ ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
+ loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
+ loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
+ if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
+ CERROR("OST index %d more than OST count %d\n",
+ loi->loi_ost_idx, lov->desc.ld_tgt_count);
+ lov_dump_lmm_v3(D_WARNING, lmm);
+ return -EINVAL;
+ }
+ if (!lov->lov_tgts[loi->loi_ost_idx]) {
+ CERROR("OST index %d missing\n", loi->loi_ost_idx);
+ lov_dump_lmm_v3(D_WARNING, lmm);
+ return -EINVAL;
+ }
+ /* calculate the minimum stripe max bytes */
+ lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
+ &stripe_maxbytes);
+ }
+
+ lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
+ if (lsm->lsm_stripe_count == 0)
+ lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
+
+ return 0;
+}
+
+const struct lsm_operations lsm_v3_ops = {
+ .lsm_free = lsm_free_plain,
+ .lsm_destroy = lsm_destroy_plain,
+ .lsm_stripe_by_index = lsm_stripe_by_index_plain,
+ .lsm_stripe_by_offset = lsm_stripe_by_offset_plain,
+ .lsm_lmm_verify = lsm_lmm_verify_v3,
+ .lsm_unpackmd = lsm_unpackmd_v3,
+};
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
new file mode 100644
index 0000000..16770d1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -0,0 +1,323 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef LOV_INTERNAL_H
+#define LOV_INTERNAL_H
+
+#include <obd_class.h>
+#include <obd_lov.h>
+#include <lustre/lustre_user.h>
+
+struct lov_lock_handles {
+ struct portals_handle llh_handle;
+ atomic_t llh_refcount;
+ int llh_stripe_count;
+ struct lustre_handle llh_handles[0];
+};
+
+struct lov_request {
+ struct obd_info rq_oi;
+ struct lov_request_set *rq_rqset;
+
+ struct list_head rq_link;
+
+ int rq_idx; /* index in lov->tgts array */
+ int rq_stripe; /* stripe number */
+ int rq_complete;
+ int rq_rc;
+ int rq_buflen; /* length of sub_md */
+
+ obd_count rq_oabufs;
+ obd_count rq_pgaidx;
+};
+
+struct lov_request_set {
+ struct ldlm_enqueue_info *set_ei;
+ struct obd_info *set_oi;
+ atomic_t set_refcount;
+ struct obd_export *set_exp;
+ /* XXX: There is @set_exp already, however obd_statfs gets obd_device
+ only. */
+ struct obd_device *set_obd;
+ int set_count;
+ atomic_t set_completes;
+ atomic_t set_success;
+ atomic_t set_finish_checked;
+ struct llog_cookie *set_cookies;
+ int set_cookie_sent;
+ struct obd_trans_info *set_oti;
+ obd_count set_oabufs;
+ struct brw_page *set_pga;
+ struct lov_lock_handles *set_lockh;
+ struct list_head set_list;
+ wait_queue_head_t set_waitq;
+ spinlock_t set_lock;
+};
+
+extern struct kmem_cache *lov_oinfo_slab;
+
+void lov_finish_set(struct lov_request_set *set);
+
+static inline void lov_get_reqset(struct lov_request_set *set)
+{
+ LASSERT(set != NULL);
+ LASSERT(atomic_read(&set->set_refcount) > 0);
+ atomic_inc(&set->set_refcount);
+}
+
+static inline void lov_put_reqset(struct lov_request_set *set)
+{
+ if (atomic_dec_and_test(&set->set_refcount))
+ lov_finish_set(set);
+}
+
+static inline struct lov_lock_handles *
+lov_handle2llh(struct lustre_handle *handle)
+{
+ LASSERT(handle != NULL);
+ return(class_handle2object(handle->cookie));
+}
+
+static inline void lov_llh_put(struct lov_lock_handles *llh)
+{
+ CDEBUG(D_INFO, "PUTting llh %p : new refcount %d\n", llh,
+ atomic_read(&llh->llh_refcount) - 1);
+ LASSERT(atomic_read(&llh->llh_refcount) > 0 &&
+ atomic_read(&llh->llh_refcount) < 0x5a5a);
+ if (atomic_dec_and_test(&llh->llh_refcount)) {
+ class_handle_unhash(&llh->llh_handle);
+ /* The structure may be held by other threads because RCU.
+ * -jxiong */
+ if (atomic_read(&llh->llh_refcount))
+ return;
+
+ OBD_FREE_RCU(llh, sizeof *llh +
+ sizeof(*llh->llh_handles) * llh->llh_stripe_count,
+ &llh->llh_handle);
+ }
+}
+
+#define lov_uuid2str(lv, index) \
+ (char *)((lv)->lov_tgts[index]->ltd_uuid.uuid)
+
+/* lov_merge.c */
+void lov_merge_attrs(struct obdo *tgt, struct obdo *src, obd_valid valid,
+ struct lov_stripe_md *lsm, int stripeno, int *set);
+int lov_merge_lvb(struct obd_export *exp, struct lov_stripe_md *lsm,
+ struct ost_lvb *lvb, int kms_only);
+int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
+ obd_off size, int shrink);
+int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
+ struct ost_lvb *lvb, __u64 *kms_place);
+
+/* lov_offset.c */
+obd_size lov_stripe_size(struct lov_stripe_md *lsm, obd_size ost_size,
+ int stripeno);
+int lov_stripe_offset(struct lov_stripe_md *lsm, obd_off lov_off,
+ int stripeno, obd_off *obd_off);
+obd_off lov_size_to_stripe(struct lov_stripe_md *lsm, obd_off file_size,
+ int stripeno);
+int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
+ obd_off start, obd_off end,
+ obd_off *obd_start, obd_off *obd_end);
+int lov_stripe_number(struct lov_stripe_md *lsm, obd_off lov_off);
+
+/* lov_qos.c */
+#define LOV_USES_ASSIGNED_STRIPE 0
+#define LOV_USES_DEFAULT_STRIPE 1
+int qos_add_tgt(struct obd_device *obd, __u32 index);
+int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt);
+void qos_shrink_lsm(struct lov_request_set *set);
+int qos_prep_create(struct obd_export *exp, struct lov_request_set *set);
+void qos_update(struct lov_obd *lov);
+void qos_statfs_done(struct lov_obd *lov);
+void qos_statfs_update(struct obd_device *obd, __u64 max_age, int wait);
+int qos_remedy_create(struct lov_request_set *set, struct lov_request *req);
+
+/* lov_request.c */
+void lov_set_add_req(struct lov_request *req, struct lov_request_set *set);
+int lov_set_finished(struct lov_request_set *set, int idempotent);
+void lov_update_set(struct lov_request_set *set,
+ struct lov_request *req, int rc);
+int lov_update_common_set(struct lov_request_set *set,
+ struct lov_request *req, int rc);
+int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx);
+int lov_prep_create_set(struct obd_export *exp, struct obd_info *oifo,
+ struct lov_stripe_md **ea, struct obdo *src_oa,
+ struct obd_trans_info *oti,
+ struct lov_request_set **reqset);
+int cb_create_update(void *cookie, int rc);
+int lov_fini_create_set(struct lov_request_set *set, struct lov_stripe_md **ea);
+int lov_prep_brw_set(struct obd_export *exp, struct obd_info *oinfo,
+ obd_count oa_bufs, struct brw_page *pga,
+ struct obd_trans_info *oti,
+ struct lov_request_set **reqset);
+int lov_fini_brw_set(struct lov_request_set *set);
+int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct lov_request_set **reqset);
+int lov_fini_getattr_set(struct lov_request_set *set);
+int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct obdo *src_oa, struct lov_stripe_md *lsm,
+ struct obd_trans_info *oti,
+ struct lov_request_set **reqset);
+int lov_update_destroy_set(struct lov_request_set *set,
+ struct lov_request *req, int rc);
+int lov_fini_destroy_set(struct lov_request_set *set);
+int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct lov_request_set **reqset);
+int lov_update_setattr_set(struct lov_request_set *set,
+ struct lov_request *req, int rc);
+int lov_fini_setattr_set(struct lov_request_set *set);
+int lov_prep_punch_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct lov_request_set **reqset);
+int lov_fini_punch_set(struct lov_request_set *set);
+int lov_prep_sync_set(struct obd_export *exp, struct obd_info *obd_info,
+ obd_off start, obd_off end,
+ struct lov_request_set **reqset);
+int lov_fini_sync_set(struct lov_request_set *set);
+int lov_prep_enqueue_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct ldlm_enqueue_info *einfo,
+ struct lov_request_set **reqset);
+int lov_fini_enqueue_set(struct lov_request_set *set, __u32 mode, int rc,
+ struct ptlrpc_request_set *rqset);
+int lov_prep_match_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct lov_stripe_md *lsm,
+ ldlm_policy_data_t *policy, __u32 mode,
+ struct lustre_handle *lockh,
+ struct lov_request_set **reqset);
+int lov_fini_match_set(struct lov_request_set *set, __u32 mode, int flags);
+int lov_prep_cancel_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct lov_stripe_md *lsm,
+ __u32 mode, struct lustre_handle *lockh,
+ struct lov_request_set **reqset);
+int lov_fini_cancel_set(struct lov_request_set *set);
+int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
+ struct lov_request_set **reqset);
+void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs,
+ int success);
+int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,
+ int success);
+int lov_fini_statfs_set(struct lov_request_set *set);
+int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc);
+
+/* lov_obd.c */
+void lov_fix_desc(struct lov_desc *desc);
+void lov_fix_desc_stripe_size(__u64 *val);
+void lov_fix_desc_stripe_count(__u32 *val);
+void lov_fix_desc_pattern(__u32 *val);
+void lov_fix_desc_qos_maxage(__u32 *val);
+__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count);
+int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
+ struct obd_connect_data *data);
+int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
+int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
+ __u32 *indexp, int *genp);
+int lov_del_target(struct obd_device *obd, __u32 index,
+ struct obd_uuid *uuidp, int gen);
+/* lov_log.c */
+int lov_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *tgt, int *idx);
+int lov_llog_finish(struct obd_device *obd, int count);
+
+/* lov_pack.c */
+int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm,
+ struct lov_stripe_md *lsm);
+int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
+ struct lov_mds_md *lmm, int lmm_bytes);
+int lov_setstripe(struct obd_export *exp, int max_lmm_size,
+ struct lov_stripe_md **lsmp, struct lov_user_md *lump);
+int lov_setea(struct obd_export *exp, struct lov_stripe_md **lsmp,
+ struct lov_user_md *lump);
+int lov_getstripe(struct obd_export *exp,
+ struct lov_stripe_md *lsm, struct lov_user_md *lump);
+int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
+ int pattern, int magic);
+int lov_free_memmd(struct lov_stripe_md **lsmp);
+
+void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm);
+void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm);
+void lov_dump_lmm_common(int level, void *lmmp);
+void lov_dump_lmm(int level, void *lmm);
+
+/* lov_ea.c */
+struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size);
+void lsm_free_plain(struct lov_stripe_md *lsm);
+
+int lovea_destroy_object(struct lov_obd *lov, struct lov_stripe_md *lsm,
+ struct obdo *oa, void *data);
+/* lproc_lov.c */
+extern struct file_operations lov_proc_target_fops;
+#ifdef LPROCFS
+void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars);
+#else
+static inline void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
+{
+ memset(lvars, 0, sizeof(*lvars));
+}
+#endif
+
+/* lov_cl.c */
+extern struct lu_device_type lov_device_type;
+
+/* pools */
+extern cfs_hash_ops_t pool_hash_operations;
+/* ost_pool methods */
+int lov_ost_pool_init(struct ost_pool *op, unsigned int count);
+int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count);
+int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count);
+int lov_ost_pool_remove(struct ost_pool *op, __u32 idx);
+int lov_ost_pool_free(struct ost_pool *op);
+
+/* high level pool methods */
+int lov_pool_new(struct obd_device *obd, char *poolname);
+int lov_pool_del(struct obd_device *obd, char *poolname);
+int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname);
+int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname);
+void lov_dump_pool(int level, struct pool_desc *pool);
+struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname);
+int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool);
+void lov_pool_putref(struct pool_desc *pool);
+
+static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm)
+{
+ LASSERT(atomic_read(&lsm->lsm_refc) > 0);
+ atomic_inc(&lsm->lsm_refc);
+ return lsm;
+}
+
+#endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
new file mode 100644
index 0000000..b611aa4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -0,0 +1,968 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_io for LOV layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include "lov_cl_internal.h"
+
+/** \addtogroup lov
+ * @{
+ */
+
+static inline void lov_sub_enter(struct lov_io_sub *sub)
+{
+ sub->sub_reenter++;
+}
+static inline void lov_sub_exit(struct lov_io_sub *sub)
+{
+ sub->sub_reenter--;
+}
+
+static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
+ struct lov_io_sub *sub)
+{
+ if (sub->sub_io != NULL) {
+ if (sub->sub_io_initialized) {
+ lov_sub_enter(sub);
+ cl_io_fini(sub->sub_env, sub->sub_io);
+ lov_sub_exit(sub);
+ sub->sub_io_initialized = 0;
+ lio->lis_active_subios--;
+ }
+ if (sub->sub_stripe == lio->lis_single_subio_index)
+ lio->lis_single_subio_index = -1;
+ else if (!sub->sub_borrowed)
+ OBD_FREE_PTR(sub->sub_io);
+ sub->sub_io = NULL;
+ }
+ if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) {
+ if (!sub->sub_borrowed)
+ cl_env_put(sub->sub_env, &sub->sub_refcheck);
+ sub->sub_env = NULL;
+ }
+}
+
+static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
+ int stripe, loff_t start, loff_t end)
+{
+ struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ struct cl_io *parent = lio->lis_cl.cis_io;
+
+ switch(io->ci_type) {
+ case CIT_SETATTR: {
+ io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
+ io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
+ io->u.ci_setattr.sa_capa = parent->u.ci_setattr.sa_capa;
+ if (cl_io_is_trunc(io)) {
+ loff_t new_size = parent->u.ci_setattr.sa_attr.lvb_size;
+
+ new_size = lov_size_to_stripe(lsm, new_size, stripe);
+ io->u.ci_setattr.sa_attr.lvb_size = new_size;
+ }
+ break;
+ }
+ case CIT_FAULT: {
+ struct cl_object *obj = parent->ci_obj;
+ loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
+
+ io->u.ci_fault = parent->u.ci_fault;
+ off = lov_size_to_stripe(lsm, off, stripe);
+ io->u.ci_fault.ft_index = cl_index(obj, off);
+ break;
+ }
+ case CIT_FSYNC: {
+ io->u.ci_fsync.fi_start = start;
+ io->u.ci_fsync.fi_end = end;
+ io->u.ci_fsync.fi_capa = parent->u.ci_fsync.fi_capa;
+ io->u.ci_fsync.fi_fid = parent->u.ci_fsync.fi_fid;
+ io->u.ci_fsync.fi_mode = parent->u.ci_fsync.fi_mode;
+ break;
+ }
+ case CIT_READ:
+ case CIT_WRITE: {
+ io->u.ci_wr.wr_sync = cl_io_is_sync_write(parent);
+ if (cl_io_is_append(parent)) {
+ io->u.ci_wr.wr_append = 1;
+ } else {
+ io->u.ci_rw.crw_pos = start;
+ io->u.ci_rw.crw_count = end - start;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
+ struct lov_io_sub *sub)
+{
+ struct lov_object *lov = lio->lis_object;
+ struct lov_device *ld = lu2lov_dev(lov2cl(lov)->co_lu.lo_dev);
+ struct cl_io *sub_io;
+ struct cl_object *sub_obj;
+ struct cl_io *io = lio->lis_cl.cis_io;
+
+ int stripe = sub->sub_stripe;
+ int result;
+
+ LASSERT(sub->sub_io == NULL);
+ LASSERT(sub->sub_env == NULL);
+ LASSERT(sub->sub_stripe < lio->lis_stripe_count);
+
+ result = 0;
+ sub->sub_io_initialized = 0;
+ sub->sub_borrowed = 0;
+
+ if (lio->lis_mem_frozen) {
+ LASSERT(mutex_is_locked(&ld->ld_mutex));
+ sub->sub_io = &ld->ld_emrg[stripe]->emrg_subio;
+ sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
+ sub->sub_borrowed = 1;
+ } else {
+ void *cookie;
+
+ /* obtain new environment */
+ cookie = cl_env_reenter();
+ sub->sub_env = cl_env_get(&sub->sub_refcheck);
+ cl_env_reexit(cookie);
+ if (IS_ERR(sub->sub_env))
+ result = PTR_ERR(sub->sub_env);
+
+ if (result == 0) {
+ /*
+ * First sub-io. Use ->lis_single_subio to
+ * avoid dynamic allocation.
+ */
+ if (lio->lis_active_subios == 0) {
+ sub->sub_io = &lio->lis_single_subio;
+ lio->lis_single_subio_index = stripe;
+ } else {
+ OBD_ALLOC_PTR(sub->sub_io);
+ if (sub->sub_io == NULL)
+ result = -ENOMEM;
+ }
+ }
+ }
+
+ if (result == 0) {
+ sub_obj = lovsub2cl(lov_r0(lov)->lo_sub[stripe]);
+ sub_io = sub->sub_io;
+
+ sub_io->ci_obj = sub_obj;
+ sub_io->ci_result = 0;
+
+ sub_io->ci_parent = io;
+ sub_io->ci_lockreq = io->ci_lockreq;
+ sub_io->ci_type = io->ci_type;
+ sub_io->ci_no_srvlock = io->ci_no_srvlock;
+
+ lov_sub_enter(sub);
+ result = cl_io_sub_init(sub->sub_env, sub_io,
+ io->ci_type, sub_obj);
+ lov_sub_exit(sub);
+ if (result >= 0) {
+ lio->lis_active_subios++;
+ sub->sub_io_initialized = 1;
+ result = 0;
+ }
+ }
+ if (result != 0)
+ lov_io_sub_fini(env, lio, sub);
+ return result;
+}
+
+struct lov_io_sub *lov_sub_get(const struct lu_env *env,
+ struct lov_io *lio, int stripe)
+{
+ int rc;
+ struct lov_io_sub *sub = &lio->lis_subs[stripe];
+
+ LASSERT(stripe < lio->lis_stripe_count);
+
+ if (!sub->sub_io_initialized) {
+ sub->sub_stripe = stripe;
+ rc = lov_io_sub_init(env, lio, sub);
+ } else
+ rc = 0;
+ if (rc == 0)
+ lov_sub_enter(sub);
+ else
+ sub = ERR_PTR(rc);
+ return sub;
+}
+
+void lov_sub_put(struct lov_io_sub *sub)
+{
+ lov_sub_exit(sub);
+}
+
+/*****************************************************************************
+ *
+ * Lov io operations.
+ *
+ */
+
+static int lov_page_stripe(const struct cl_page *page)
+{
+ struct lovsub_object *subobj;
+
+ subobj = lu2lovsub(
+ lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
+ &lovsub_device_type));
+ LASSERT(subobj != NULL);
+ return subobj->lso_index;
+}
+
+struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
+ const struct cl_page_slice *slice)
+{
+ struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ struct cl_page *page = slice->cpl_page;
+ int stripe;
+
+ LASSERT(lio->lis_cl.cis_io != NULL);
+ LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object);
+ LASSERT(lsm != NULL);
+ LASSERT(lio->lis_nr_subios > 0);
+
+ stripe = lov_page_stripe(page);
+ return lov_sub_get(env, lio, stripe);
+}
+
+
+static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
+ struct cl_io *io)
+{
+ struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ int result;
+
+ LASSERT(lio->lis_object != NULL);
+
+ /*
+ * Need to be optimized, we can't afford to allocate a piece of memory
+ * when writing a page. -jay
+ */
+ OBD_ALLOC_LARGE(lio->lis_subs,
+ lsm->lsm_stripe_count * sizeof lio->lis_subs[0]);
+ if (lio->lis_subs != NULL) {
+ lio->lis_nr_subios = lio->lis_stripe_count;
+ lio->lis_single_subio_index = -1;
+ lio->lis_active_subios = 0;
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+static void lov_io_slice_init(struct lov_io *lio,
+ struct lov_object *obj, struct cl_io *io)
+{
+ io->ci_result = 0;
+ lio->lis_object = obj;
+
+ LASSERT(obj->lo_lsm != NULL);
+ lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count;
+
+ switch (io->ci_type) {
+ case CIT_READ:
+ case CIT_WRITE:
+ lio->lis_pos = io->u.ci_rw.crw_pos;
+ lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
+ lio->lis_io_endpos = lio->lis_endpos;
+ if (cl_io_is_append(io)) {
+ LASSERT(io->ci_type == CIT_WRITE);
+ lio->lis_pos = 0;
+ lio->lis_endpos = OBD_OBJECT_EOF;
+ }
+ break;
+
+ case CIT_SETATTR:
+ if (cl_io_is_trunc(io))
+ lio->lis_pos = io->u.ci_setattr.sa_attr.lvb_size;
+ else
+ lio->lis_pos = 0;
+ lio->lis_endpos = OBD_OBJECT_EOF;
+ break;
+
+ case CIT_FAULT: {
+ pgoff_t index = io->u.ci_fault.ft_index;
+ lio->lis_pos = cl_offset(io->ci_obj, index);
+ lio->lis_endpos = cl_offset(io->ci_obj, index + 1);
+ break;
+ }
+
+ case CIT_FSYNC: {
+ lio->lis_pos = io->u.ci_fsync.fi_start;
+ lio->lis_endpos = io->u.ci_fsync.fi_end;
+ break;
+ }
+
+ case CIT_MISC:
+ lio->lis_pos = 0;
+ lio->lis_endpos = OBD_OBJECT_EOF;
+ break;
+
+ default:
+ LBUG();
+ }
+}
+
+static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_object *lov = cl2lov(ios->cis_obj);
+ int i;
+
+ if (lio->lis_subs != NULL) {
+ for (i = 0; i < lio->lis_nr_subios; i++)
+ lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
+ OBD_FREE_LARGE(lio->lis_subs,
+ lio->lis_nr_subios * sizeof lio->lis_subs[0]);
+ lio->lis_nr_subios = 0;
+ }
+
+ LASSERT(atomic_read(&lov->lo_active_ios) > 0);
+ if (atomic_dec_and_test(&lov->lo_active_ios))
+ wake_up_all(&lov->lo_waitq);
+}
+
+static obd_off lov_offset_mod(obd_off val, int delta)
+{
+ if (val != OBD_OBJECT_EOF)
+ val += delta;
+ return val;
+}
+
+static int lov_io_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ struct lov_io_sub *sub;
+ obd_off endpos;
+ obd_off start;
+ obd_off end;
+ int stripe;
+ int rc = 0;
+
+ endpos = lov_offset_mod(lio->lis_endpos, -1);
+ for (stripe = 0; stripe < lio->lis_stripe_count; stripe++) {
+ if (!lov_stripe_intersects(lsm, stripe, lio->lis_pos,
+ endpos, &start, &end))
+ continue;
+
+ end = lov_offset_mod(end, +1);
+ sub = lov_sub_get(env, lio, stripe);
+ if (!IS_ERR(sub)) {
+ lov_io_sub_inherit(sub->sub_io, lio, stripe,
+ start, end);
+ rc = cl_io_iter_init(sub->sub_env, sub->sub_io);
+ lov_sub_put(sub);
+ CDEBUG(D_VFSTRACE, "shrink: %d ["LPU64", "LPU64")\n",
+ stripe, start, end);
+ } else
+ rc = PTR_ERR(sub);
+
+ if (!rc)
+ list_add_tail(&sub->sub_linkage, &lio->lis_active);
+ else
+ break;
+ }
+ return rc;
+}
+
+static int lov_io_rw_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ __u64 start = io->u.ci_rw.crw_pos;
+ loff_t next;
+ unsigned long ssize = lsm->lsm_stripe_size;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ /* fast path for common case. */
+ if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
+
+ lov_do_div64(start, ssize);
+ next = (start + 1) * ssize;
+ if (next <= start * ssize)
+ next = ~0ull;
+
+ io->ci_continue = next < lio->lis_io_endpos;
+ io->u.ci_rw.crw_count = min_t(loff_t, lio->lis_io_endpos,
+ next) - io->u.ci_rw.crw_pos;
+ lio->lis_pos = io->u.ci_rw.crw_pos;
+ lio->lis_endpos = io->u.ci_rw.crw_pos + io->u.ci_rw.crw_count;
+ CDEBUG(D_VFSTRACE, "stripe: "LPU64" chunk: ["LPU64", "LPU64") "
+ LPU64"\n", (__u64)start, lio->lis_pos, lio->lis_endpos,
+ (__u64)lio->lis_io_endpos);
+ }
+ /*
+ * XXX The following call should be optimized: we know, that
+ * [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
+ */
+ return lov_io_iter_init(env, ios);
+}
+
+static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
+ int (*iofunc)(const struct lu_env *, struct cl_io *))
+{
+ struct cl_io *parent = lio->lis_cl.cis_io;
+ struct lov_io_sub *sub;
+ int rc = 0;
+
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ lov_sub_enter(sub);
+ rc = iofunc(sub->sub_env, sub->sub_io);
+ lov_sub_exit(sub);
+ if (rc)
+ break;
+
+ if (parent->ci_result == 0)
+ parent->ci_result = sub->sub_io->ci_result;
+ }
+ return rc;
+}
+
+static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ return lov_io_call(env, cl2lov_io(env, ios), cl_io_lock);
+}
+
+static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ return lov_io_call(env, cl2lov_io(env, ios), cl_io_start);
+}
+
+static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
+{
+ /*
+ * It's possible that lov_io_start() wasn't called against this
+ * sub-io, either because previous sub-io failed, or upper layer
+ * completed IO.
+ */
+ if (io->ci_state == CIS_IO_GOING)
+ cl_io_end(env, io);
+ else
+ io->ci_state = CIS_IO_FINISHED;
+ return 0;
+}
+
+static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
+{
+ cl_io_iter_fini(env, io);
+ return 0;
+}
+
+static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io)
+{
+ cl_io_unlock(env, io);
+ return 0;
+}
+
+static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+ int rc;
+
+ rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_end_wrapper);
+ LASSERT(rc == 0);
+}
+
+static void lov_io_iter_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ int rc;
+
+ rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
+ LASSERT(rc == 0);
+ while (!list_empty(&lio->lis_active))
+ list_del_init(lio->lis_active.next);
+}
+
+static void lov_io_unlock(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ int rc;
+
+ rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
+ LASSERT(rc == 0);
+}
+
+
+static struct cl_page_list *lov_io_submit_qin(struct lov_device *ld,
+ struct cl_page_list *qin,
+ int idx, int alloc)
+{
+ return alloc ? &qin[idx] : &ld->ld_emrg[idx]->emrg_page_list;
+}
+
+/**
+ * lov implementation of cl_operations::cio_submit() method. It takes a list
+ * of pages in \a queue, splits it into per-stripe sub-lists, invokes
+ * cl_io_submit() on underlying devices to submit sub-lists, and then splices
+ * everything back.
+ *
+ * Major complication of this function is a need to handle memory cleansing:
+ * cl_io_submit() is called to write out pages as a part of VM memory
+ * reclamation, and hence it may not fail due to memory shortages (system
+ * dead-locks otherwise). To deal with this, some resources (sub-lists,
+ * sub-environment, etc.) are allocated per-device on "startup" (i.e., in a
+ * not-memory cleansing context), and in case of memory shortage, these
+ * pre-allocated resources are used by lov_io_submit() under
+ * lov_device::ld_mutex mutex.
+ */
+static int lov_io_submit(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ enum cl_req_type crt, struct cl_2queue *queue)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_object *obj = lio->lis_object;
+ struct lov_device *ld = lu2lov_dev(lov2cl(obj)->co_lu.lo_dev);
+ struct cl_page_list *qin = &queue->c2_qin;
+ struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
+ struct cl_page_list *stripes_qin = NULL;
+ struct cl_page *page;
+ struct cl_page *tmp;
+ int stripe;
+
+#define QIN(stripe) lov_io_submit_qin(ld, stripes_qin, stripe, alloc)
+
+ int rc = 0;
+ int alloc =
+ !(current->flags & PF_MEMALLOC);
+
+ if (lio->lis_active_subios == 1) {
+ int idx = lio->lis_single_subio_index;
+ struct lov_io_sub *sub;
+
+ LASSERT(idx < lio->lis_nr_subios);
+ sub = lov_sub_get(env, lio, idx);
+ LASSERT(!IS_ERR(sub));
+ LASSERT(sub->sub_io == &lio->lis_single_subio);
+ rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
+ crt, queue);
+ lov_sub_put(sub);
+ return rc;
+ }
+
+ LASSERT(lio->lis_subs != NULL);
+ if (alloc) {
+ OBD_ALLOC_LARGE(stripes_qin,
+ sizeof(*stripes_qin) * lio->lis_nr_subios);
+ if (stripes_qin == NULL)
+ return -ENOMEM;
+
+ for (stripe = 0; stripe < lio->lis_nr_subios; stripe++)
+ cl_page_list_init(&stripes_qin[stripe]);
+ } else {
+ /*
+ * If we get here, it means pageout & swap doesn't help.
+ * In order to not make things worse, even don't try to
+ * allocate the memory with __GFP_NOWARN. -jay
+ */
+ mutex_lock(&ld->ld_mutex);
+ lio->lis_mem_frozen = 1;
+ }
+
+ cl_2queue_init(cl2q);
+ cl_page_list_for_each_safe(page, tmp, qin) {
+ stripe = lov_page_stripe(page);
+ cl_page_list_move(QIN(stripe), qin, page);
+ }
+
+ for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
+ struct lov_io_sub *sub;
+ struct cl_page_list *sub_qin = QIN(stripe);
+
+ if (list_empty(&sub_qin->pl_pages))
+ continue;
+
+ cl_page_list_splice(sub_qin, &cl2q->c2_qin);
+ sub = lov_sub_get(env, lio, stripe);
+ if (!IS_ERR(sub)) {
+ rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
+ crt, cl2q);
+ lov_sub_put(sub);
+ } else
+ rc = PTR_ERR(sub);
+ cl_page_list_splice(&cl2q->c2_qin, &queue->c2_qin);
+ cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
+ if (rc != 0)
+ break;
+ }
+
+ for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
+ struct cl_page_list *sub_qin = QIN(stripe);
+
+ if (list_empty(&sub_qin->pl_pages))
+ continue;
+
+ cl_page_list_splice(sub_qin, qin);
+ }
+
+ if (alloc) {
+ OBD_FREE_LARGE(stripes_qin,
+ sizeof(*stripes_qin) * lio->lis_nr_subios);
+ } else {
+ int i;
+
+ for (i = 0; i < lio->lis_nr_subios; i++) {
+ struct cl_io *cio = lio->lis_subs[i].sub_io;
+
+ if (cio && cio == &ld->ld_emrg[i]->emrg_subio)
+ lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
+ }
+ lio->lis_mem_frozen = 0;
+ mutex_unlock(&ld->ld_mutex);
+ }
+
+ return rc;
+#undef QIN
+}
+
+static int lov_io_prepare_write(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ const struct cl_page_slice *slice,
+ unsigned from, unsigned to)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct cl_page *sub_page = lov_sub_page(slice);
+ struct lov_io_sub *sub;
+ int result;
+
+ sub = lov_page_subio(env, lio, slice);
+ if (!IS_ERR(sub)) {
+ result = cl_io_prepare_write(sub->sub_env, sub->sub_io,
+ sub_page, from, to);
+ lov_sub_put(sub);
+ } else
+ result = PTR_ERR(sub);
+ return result;
+}
+
+static int lov_io_commit_write(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ const struct cl_page_slice *slice,
+ unsigned from, unsigned to)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct cl_page *sub_page = lov_sub_page(slice);
+ struct lov_io_sub *sub;
+ int result;
+
+ sub = lov_page_subio(env, lio, slice);
+ if (!IS_ERR(sub)) {
+ result = cl_io_commit_write(sub->sub_env, sub->sub_io,
+ sub_page, from, to);
+ lov_sub_put(sub);
+ } else
+ result = PTR_ERR(sub);
+ return result;
+}
+
+static int lov_io_fault_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_fault_io *fio;
+ struct lov_io *lio;
+ struct lov_io_sub *sub;
+
+ fio = &ios->cis_io->u.ci_fault;
+ lio = cl2lov_io(env, ios);
+ sub = lov_sub_get(env, lio, lov_page_stripe(fio->ft_page));
+ sub->sub_io->u.ci_fault.ft_nob = fio->ft_nob;
+ lov_sub_put(sub);
+ return lov_io_start(env, ios);
+}
+
+static void lov_io_fsync_end(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_io_sub *sub;
+ unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
+
+ *written = 0;
+ list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+ struct cl_io *subio = sub->sub_io;
+
+ lov_sub_enter(sub);
+ lov_io_end_wrapper(sub->sub_env, subio);
+ lov_sub_exit(sub);
+
+ if (subio->ci_result == 0)
+ *written += subio->u.ci_fsync.fi_nr_written;
+ }
+}
+
+static const struct cl_io_operations lov_io_ops = {
+ .op = {
+ [CIT_READ] = {
+ .cio_fini = lov_io_fini,
+ .cio_iter_init = lov_io_rw_iter_init,
+ .cio_iter_fini = lov_io_iter_fini,
+ .cio_lock = lov_io_lock,
+ .cio_unlock = lov_io_unlock,
+ .cio_start = lov_io_start,
+ .cio_end = lov_io_end
+ },
+ [CIT_WRITE] = {
+ .cio_fini = lov_io_fini,
+ .cio_iter_init = lov_io_rw_iter_init,
+ .cio_iter_fini = lov_io_iter_fini,
+ .cio_lock = lov_io_lock,
+ .cio_unlock = lov_io_unlock,
+ .cio_start = lov_io_start,
+ .cio_end = lov_io_end
+ },
+ [CIT_SETATTR] = {
+ .cio_fini = lov_io_fini,
+ .cio_iter_init = lov_io_iter_init,
+ .cio_iter_fini = lov_io_iter_fini,
+ .cio_lock = lov_io_lock,
+ .cio_unlock = lov_io_unlock,
+ .cio_start = lov_io_start,
+ .cio_end = lov_io_end
+ },
+ [CIT_FAULT] = {
+ .cio_fini = lov_io_fini,
+ .cio_iter_init = lov_io_iter_init,
+ .cio_iter_fini = lov_io_iter_fini,
+ .cio_lock = lov_io_lock,
+ .cio_unlock = lov_io_unlock,
+ .cio_start = lov_io_fault_start,
+ .cio_end = lov_io_end
+ },
+ [CIT_FSYNC] = {
+ .cio_fini = lov_io_fini,
+ .cio_iter_init = lov_io_iter_init,
+ .cio_iter_fini = lov_io_iter_fini,
+ .cio_lock = lov_io_lock,
+ .cio_unlock = lov_io_unlock,
+ .cio_start = lov_io_start,
+ .cio_end = lov_io_fsync_end
+ },
+ [CIT_MISC] = {
+ .cio_fini = lov_io_fini
+ }
+ },
+ .req_op = {
+ [CRT_READ] = {
+ .cio_submit = lov_io_submit
+ },
+ [CRT_WRITE] = {
+ .cio_submit = lov_io_submit
+ }
+ },
+ .cio_prepare_write = lov_io_prepare_write,
+ .cio_commit_write = lov_io_commit_write
+};
+
+/*****************************************************************************
+ *
+ * Empty lov io operations.
+ *
+ */
+
+static void lov_empty_io_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct lov_object *lov = cl2lov(ios->cis_obj);
+
+ if (atomic_dec_and_test(&lov->lo_active_ios))
+ wake_up_all(&lov->lo_waitq);
+}
+
+static void lov_empty_impossible(const struct lu_env *env,
+ struct cl_io_slice *ios)
+{
+ LBUG();
+}
+
+#define LOV_EMPTY_IMPOSSIBLE ((void *)lov_empty_impossible)
+
+/**
+ * An io operation vector for files without stripes.
+ */
+static const struct cl_io_operations lov_empty_io_ops = {
+ .op = {
+ [CIT_READ] = {
+ .cio_fini = lov_empty_io_fini,
+#if 0
+ .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
+ .cio_lock = LOV_EMPTY_IMPOSSIBLE,
+ .cio_start = LOV_EMPTY_IMPOSSIBLE,
+ .cio_end = LOV_EMPTY_IMPOSSIBLE
+#endif
+ },
+ [CIT_WRITE] = {
+ .cio_fini = lov_empty_io_fini,
+ .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
+ .cio_lock = LOV_EMPTY_IMPOSSIBLE,
+ .cio_start = LOV_EMPTY_IMPOSSIBLE,
+ .cio_end = LOV_EMPTY_IMPOSSIBLE
+ },
+ [CIT_SETATTR] = {
+ .cio_fini = lov_empty_io_fini,
+ .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
+ .cio_lock = LOV_EMPTY_IMPOSSIBLE,
+ .cio_start = LOV_EMPTY_IMPOSSIBLE,
+ .cio_end = LOV_EMPTY_IMPOSSIBLE
+ },
+ [CIT_FAULT] = {
+ .cio_fini = lov_empty_io_fini,
+ .cio_iter_init = LOV_EMPTY_IMPOSSIBLE,
+ .cio_lock = LOV_EMPTY_IMPOSSIBLE,
+ .cio_start = LOV_EMPTY_IMPOSSIBLE,
+ .cio_end = LOV_EMPTY_IMPOSSIBLE
+ },
+ [CIT_FSYNC] = {
+ .cio_fini = lov_empty_io_fini
+ },
+ [CIT_MISC] = {
+ .cio_fini = lov_empty_io_fini
+ }
+ },
+ .req_op = {
+ [CRT_READ] = {
+ .cio_submit = LOV_EMPTY_IMPOSSIBLE
+ },
+ [CRT_WRITE] = {
+ .cio_submit = LOV_EMPTY_IMPOSSIBLE
+ }
+ },
+ .cio_commit_write = LOV_EMPTY_IMPOSSIBLE
+};
+
+int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
+{
+ struct lov_io *lio = lov_env_io(env);
+ struct lov_object *lov = cl2lov(obj);
+
+ INIT_LIST_HEAD(&lio->lis_active);
+ lov_io_slice_init(lio, lov, io);
+ if (io->ci_result == 0) {
+ io->ci_result = lov_io_subio_init(env, lio, io);
+ if (io->ci_result == 0) {
+ cl_io_slice_add(io, &lio->lis_cl, obj, &lov_io_ops);
+ atomic_inc(&lov->lo_active_ios);
+ }
+ }
+ return io->ci_result;
+}
+
+int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_io *lio = lov_env_io(env);
+ int result;
+
+ lio->lis_object = lov;
+ switch (io->ci_type) {
+ default:
+ LBUG();
+ case CIT_MISC:
+ case CIT_READ:
+ result = 0;
+ break;
+ case CIT_FSYNC:
+ case CIT_SETATTR:
+ result = +1;
+ break;
+ case CIT_WRITE:
+ result = -EBADF;
+ break;
+ case CIT_FAULT:
+ result = -EFAULT;
+ CERROR("Page fault on a file without stripes: "DFID"\n",
+ PFID(lu_object_fid(&obj->co_lu)));
+ break;
+ }
+ if (result == 0) {
+ cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
+ atomic_inc(&lov->lo_active_ios);
+ }
+
+ io->ci_result = result < 0 ? result : 0;
+ return result != 0;
+}
+
+int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_io *lio = lov_env_io(env);
+ int result;
+
+ LASSERT(lov->lo_lsm != NULL);
+ lio->lis_object = lov;
+
+ switch (io->ci_type) {
+ default:
+ LASSERTF(0, "invalid type %d\n", io->ci_type);
+ case CIT_MISC:
+ case CIT_FSYNC:
+ result = +1;
+ break;
+ case CIT_SETATTR:
+ case CIT_READ:
+ case CIT_WRITE:
+ case CIT_FAULT:
+ /* TODO: need to restore the file. */
+ result = -EBADF;
+ break;
+ }
+ if (result == 0) {
+ cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
+ atomic_inc(&lov->lo_active_ios);
+ }
+
+ io->ci_result = result < 0 ? result : 0;
+ return result != 0;
+}
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
new file mode 100644
index 0000000..ec297e8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -0,0 +1,1218 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_lock for LOV layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include "lov_cl_internal.h"
+
+/** \addtogroup lov
+ * @{
+ */
+
+static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
+ struct cl_lock *parent);
+
+static int lov_lock_unuse(const struct lu_env *env,
+ const struct cl_lock_slice *slice);
+/*****************************************************************************
+ *
+ * Lov lock operations.
+ *
+ */
+
+static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
+ struct cl_lock *parent,
+ struct lov_lock_sub *lls)
+{
+ struct lov_sublock_env *subenv;
+ struct lov_io *lio = lov_env_io(env);
+ struct cl_io *io = lio->lis_cl.cis_io;
+ struct lov_io_sub *sub;
+
+ subenv = &lov_env_session(env)->ls_subenv;
+
+ /*
+ * FIXME: We tend to use the subio's env & io to call the sublock
+ * lock operations because osc lock sometimes stores some control
+ * variables in thread's IO infomation(Now only lockless information).
+ * However, if the lock's host(object) is different from the object
+ * for current IO, we have no way to get the subenv and subio because
+ * they are not initialized at all. As a temp fix, in this case,
+ * we still borrow the parent's env to call sublock operations.
+ */
+ if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
+ subenv->lse_env = env;
+ subenv->lse_io = io;
+ subenv->lse_sub = NULL;
+ } else {
+ sub = lov_sub_get(env, lio, lls->sub_stripe);
+ if (!IS_ERR(sub)) {
+ subenv->lse_env = sub->sub_env;
+ subenv->lse_io = sub->sub_io;
+ subenv->lse_sub = sub;
+ } else {
+ subenv = (void*)sub;
+ }
+ }
+ return subenv;
+}
+
+static void lov_sublock_env_put(struct lov_sublock_env *subenv)
+{
+ if (subenv && subenv->lse_sub)
+ lov_sub_put(subenv->lse_sub);
+}
+
+static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
+ struct cl_lock *sublock, int idx,
+ struct lov_lock_link *link)
+{
+ struct lovsub_lock *lsl;
+ struct cl_lock *parent = lck->lls_cl.cls_lock;
+ int rc;
+
+ LASSERT(cl_lock_is_mutexed(parent));
+ LASSERT(cl_lock_is_mutexed(sublock));
+
+ lsl = cl2sub_lock(sublock);
+ /*
+ * check that sub-lock doesn't have lock link to this top-lock.
+ */
+ LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
+ LASSERT(idx < lck->lls_nr);
+
+ lck->lls_sub[idx].sub_lock = lsl;
+ lck->lls_nr_filled++;
+ LASSERT(lck->lls_nr_filled <= lck->lls_nr);
+ list_add_tail(&link->lll_list, &lsl->lss_parents);
+ link->lll_idx = idx;
+ link->lll_super = lck;
+ cl_lock_get(parent);
+ lu_ref_add(&parent->cll_reference, "lov-child", sublock);
+ lck->lls_sub[idx].sub_flags |= LSF_HELD;
+ cl_lock_user_add(env, sublock);
+
+ rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
+ LASSERT(rc == 0); /* there is no way this can fail, currently */
+}
+
+static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
+ const struct cl_io *io,
+ struct lov_lock *lck,
+ int idx, struct lov_lock_link **out)
+{
+ struct cl_lock *sublock;
+ struct cl_lock *parent;
+ struct lov_lock_link *link;
+
+ LASSERT(idx < lck->lls_nr);
+
+ OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, __GFP_IO);
+ if (link != NULL) {
+ struct lov_sublock_env *subenv;
+ struct lov_lock_sub *lls;
+ struct cl_lock_descr *descr;
+
+ parent = lck->lls_cl.cls_lock;
+ lls = &lck->lls_sub[idx];
+ descr = &lls->sub_got;
+
+ subenv = lov_sublock_env_get(env, parent, lls);
+ if (!IS_ERR(subenv)) {
+ /* CAVEAT: Don't try to add a field in lov_lock_sub
+ * to remember the subio. This is because lock is able
+ * to be cached, but this is not true for IO. This
+ * further means a sublock might be referenced in
+ * different io context. -jay */
+
+ sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
+ descr, "lov-parent", parent);
+ lov_sublock_env_put(subenv);
+ } else {
+ /* error occurs. */
+ sublock = (void*)subenv;
+ }
+
+ if (!IS_ERR(sublock))
+ *out = link;
+ else
+ OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
+ } else
+ sublock = ERR_PTR(-ENOMEM);
+ return sublock;
+}
+
+static void lov_sublock_unlock(const struct lu_env *env,
+ struct lovsub_lock *lsl,
+ struct cl_lock_closure *closure,
+ struct lov_sublock_env *subenv)
+{
+ lov_sublock_env_put(subenv);
+ lsl->lss_active = NULL;
+ cl_lock_disclosure(env, closure);
+}
+
+static int lov_sublock_lock(const struct lu_env *env,
+ struct lov_lock *lck,
+ struct lov_lock_sub *lls,
+ struct cl_lock_closure *closure,
+ struct lov_sublock_env **lsep)
+{
+ struct lovsub_lock *sublock;
+ struct cl_lock *child;
+ int result = 0;
+
+ LASSERT(list_empty(&closure->clc_list));
+
+ sublock = lls->sub_lock;
+ child = sublock->lss_cl.cls_lock;
+ result = cl_lock_closure_build(env, child, closure);
+ if (result == 0) {
+ struct cl_lock *parent = closure->clc_origin;
+
+ LASSERT(cl_lock_is_mutexed(child));
+ sublock->lss_active = parent;
+
+ if (unlikely((child->cll_state == CLS_FREEING) ||
+ (child->cll_flags & CLF_CANCELLED))) {
+ struct lov_lock_link *link;
+ /*
+ * we could race with lock deletion which temporarily
+ * put the lock in freeing state, bug 19080.
+ */
+ LASSERT(!(lls->sub_flags & LSF_HELD));
+
+ link = lov_lock_link_find(env, lck, sublock);
+ LASSERT(link != NULL);
+ lov_lock_unlink(env, link, sublock);
+ lov_sublock_unlock(env, sublock, closure, NULL);
+ lck->lls_cancel_race = 1;
+ result = CLO_REPEAT;
+ } else if (lsep) {
+ struct lov_sublock_env *subenv;
+ subenv = lov_sublock_env_get(env, parent, lls);
+ if (IS_ERR(subenv)) {
+ lov_sublock_unlock(env, sublock,
+ closure, NULL);
+ result = PTR_ERR(subenv);
+ } else {
+ *lsep = subenv;
+ }
+ }
+ }
+ return result;
+}
+
+/**
+ * Updates the result of a top-lock operation from a result of sub-lock
+ * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
+ * over sub-locks and lov_subresult() is used to calculate return value of a
+ * top-operation. To this end, possible return values of sub-operations are
+ * ordered as
+ *
+ * - 0 success
+ * - CLO_WAIT wait for event
+ * - CLO_REPEAT repeat top-operation
+ * - -ne fundamental error
+ *
+ * Top-level return code can only go down through this list. CLO_REPEAT
+ * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
+ * has to be rechecked by the upper layer.
+ */
+static int lov_subresult(int result, int rc)
+{
+ int result_rank;
+ int rc_rank;
+
+ LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
+ "result = %d", result);
+ LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
+ "rc = %d\n", rc);
+ CLASSERT(CLO_WAIT < CLO_REPEAT);
+
+ /* calculate ranks in the ordering above */
+ result_rank = result < 0 ? 1 + CLO_REPEAT : result;
+ rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
+
+ if (result_rank < rc_rank)
+ result = rc;
+ return result;
+}
+
+/**
+ * Creates sub-locks for a given lov_lock for the first time.
+ *
+ * Goes through all sub-objects of top-object, and creates sub-locks on every
+ * sub-object intersecting with top-lock extent. This is complicated by the
+ * fact that top-lock (that is being created) can be accessed concurrently
+ * through already created sub-locks (possibly shared with other top-locks).
+ */
+static int lov_lock_sub_init(const struct lu_env *env,
+ struct lov_lock *lck, const struct cl_io *io)
+{
+ int result = 0;
+ int i;
+ int nr;
+ obd_off start;
+ obd_off end;
+ obd_off file_start;
+ obd_off file_end;
+
+ struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
+ struct lov_layout_raid0 *r0 = lov_r0(loo);
+ struct cl_lock *parent = lck->lls_cl.cls_lock;
+
+ lck->lls_orig = parent->cll_descr;
+ file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
+ file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
+
+ for (i = 0, nr = 0; i < r0->lo_nr; i++) {
+ /*
+ * XXX for wide striping smarter algorithm is desirable,
+ * breaking out of the loop, early.
+ */
+ if (lov_stripe_intersects(loo->lo_lsm, i,
+ file_start, file_end, &start, &end))
+ nr++;
+ }
+ LASSERT(nr > 0);
+ OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
+ if (lck->lls_sub == NULL)
+ return -ENOMEM;
+
+ lck->lls_nr = nr;
+ /*
+ * First, fill in sub-lock descriptions in
+ * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
+ * (called below in this function, and by lov_lock_enqueue()) to
+ * create sub-locks. At this moment, no other thread can access
+ * top-lock.
+ */
+ for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
+ if (lov_stripe_intersects(loo->lo_lsm, i,
+ file_start, file_end, &start, &end)) {
+ struct cl_lock_descr *descr;
+
+ descr = &lck->lls_sub[nr].sub_descr;
+
+ LASSERT(descr->cld_obj == NULL);
+ descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
+ descr->cld_start = cl_index(descr->cld_obj, start);
+ descr->cld_end = cl_index(descr->cld_obj, end);
+ descr->cld_mode = parent->cll_descr.cld_mode;
+ descr->cld_gid = parent->cll_descr.cld_gid;
+ descr->cld_enq_flags = parent->cll_descr.cld_enq_flags;
+ /* XXX has no effect */
+ lck->lls_sub[nr].sub_got = *descr;
+ lck->lls_sub[nr].sub_stripe = i;
+ nr++;
+ }
+ }
+ LASSERT(nr == lck->lls_nr);
+ /*
+ * Then, create sub-locks. Once at least one sub-lock was created,
+ * top-lock can be reached by other threads.
+ */
+ for (i = 0; i < lck->lls_nr; ++i) {
+ struct cl_lock *sublock;
+ struct lov_lock_link *link;
+
+ if (lck->lls_sub[i].sub_lock == NULL) {
+ sublock = lov_sublock_alloc(env, io, lck, i, &link);
+ if (IS_ERR(sublock)) {
+ result = PTR_ERR(sublock);
+ break;
+ }
+ cl_lock_get_trust(sublock);
+ cl_lock_mutex_get(env, sublock);
+ cl_lock_mutex_get(env, parent);
+ /*
+ * recheck under mutex that sub-lock wasn't created
+ * concurrently, and that top-lock is still alive.
+ */
+ if (lck->lls_sub[i].sub_lock == NULL &&
+ parent->cll_state < CLS_FREEING) {
+ lov_sublock_adopt(env, lck, sublock, i, link);
+ cl_lock_mutex_put(env, parent);
+ } else {
+ OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
+ cl_lock_mutex_put(env, parent);
+ cl_lock_unhold(env, sublock,
+ "lov-parent", parent);
+ }
+ cl_lock_mutex_put(env, sublock);
+ cl_lock_put(env, sublock);
+ }
+ }
+ /*
+ * Some sub-locks can be missing at this point. This is not a problem,
+ * because enqueue will create them anyway. Main duty of this function
+ * is to fill in sub-lock descriptions in a race free manner.
+ */
+ return result;
+}
+
+static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
+ int i, int deluser, int rc)
+{
+ struct cl_lock *parent = lck->lls_cl.cls_lock;
+
+ LASSERT(cl_lock_is_mutexed(parent));
+
+ if (lck->lls_sub[i].sub_flags & LSF_HELD) {
+ struct cl_lock *sublock;
+ int dying;
+
+ LASSERT(lck->lls_sub[i].sub_lock != NULL);
+ sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
+ LASSERT(cl_lock_is_mutexed(sublock));
+
+ lck->lls_sub[i].sub_flags &= ~LSF_HELD;
+ if (deluser)
+ cl_lock_user_del(env, sublock);
+ /*
+ * If the last hold is released, and cancellation is pending
+ * for a sub-lock, release parent mutex, to avoid keeping it
+ * while sub-lock is being paged out.
+ */
+ dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
+ sublock->cll_descr.cld_mode == CLM_GROUP ||
+ (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
+ sublock->cll_holds == 1;
+ if (dying)
+ cl_lock_mutex_put(env, parent);
+ cl_lock_unhold(env, sublock, "lov-parent", parent);
+ if (dying) {
+ cl_lock_mutex_get(env, parent);
+ rc = lov_subresult(rc, CLO_REPEAT);
+ }
+ /*
+ * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
+ * not backed by a reference on a
+ * sub-lock. lovsub_lock_delete() will clear
+ * lck->lls_sub[i].sub_lock under semaphores, just before
+ * sub-lock is destroyed.
+ */
+ }
+ return rc;
+}
+
+static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
+ int i)
+{
+ struct cl_lock *parent = lck->lls_cl.cls_lock;
+
+ LASSERT(cl_lock_is_mutexed(parent));
+
+ if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
+ struct cl_lock *sublock;
+
+ LASSERT(lck->lls_sub[i].sub_lock != NULL);
+ sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
+ LASSERT(cl_lock_is_mutexed(sublock));
+ LASSERT(sublock->cll_state != CLS_FREEING);
+
+ lck->lls_sub[i].sub_flags |= LSF_HELD;
+
+ cl_lock_get_trust(sublock);
+ cl_lock_hold_add(env, sublock, "lov-parent", parent);
+ cl_lock_user_add(env, sublock);
+ cl_lock_put(env, sublock);
+ }
+}
+
+static void lov_lock_fini(const struct lu_env *env,
+ struct cl_lock_slice *slice)
+{
+ struct lov_lock *lck;
+ int i;
+
+ lck = cl2lov_lock(slice);
+ LASSERT(lck->lls_nr_filled == 0);
+ if (lck->lls_sub != NULL) {
+ for (i = 0; i < lck->lls_nr; ++i)
+ /*
+ * No sub-locks exists at this point, as sub-lock has
+ * a reference on its parent.
+ */
+ LASSERT(lck->lls_sub[i].sub_lock == NULL);
+ OBD_FREE_LARGE(lck->lls_sub,
+ lck->lls_nr * sizeof lck->lls_sub[0]);
+ }
+ OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
+}
+
+static int lov_lock_enqueue_wait(const struct lu_env *env,
+ struct lov_lock *lck,
+ struct cl_lock *sublock)
+{
+ struct cl_lock *lock = lck->lls_cl.cls_lock;
+ int result;
+
+ LASSERT(cl_lock_is_mutexed(lock));
+
+ cl_lock_mutex_put(env, lock);
+ result = cl_lock_enqueue_wait(env, sublock, 0);
+ cl_lock_mutex_get(env, lock);
+ return result ?: CLO_REPEAT;
+}
+
+/**
+ * Tries to advance a state machine of a given sub-lock toward enqueuing of
+ * the top-lock.
+ *
+ * \retval 0 if state-transition can proceed
+ * \retval -ve otherwise.
+ */
+static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
+ struct cl_lock *sublock,
+ struct cl_io *io, __u32 enqflags, int last)
+{
+ int result;
+
+ /* first, try to enqueue a sub-lock ... */
+ result = cl_enqueue_try(env, sublock, io, enqflags);
+ if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
+ /* if it is enqueued, try to `wait' on it---maybe it's already
+ * granted */
+ result = cl_wait_try(env, sublock);
+ if (result == CLO_REENQUEUED)
+ result = CLO_WAIT;
+ }
+ /*
+ * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
+ * parallel, otherwise---enqueue has to wait until sub-lock is granted
+ * before proceeding to the next one.
+ */
+ if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
+ (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
+ result = 0;
+ return result;
+}
+
+/**
+ * Helper function for lov_lock_enqueue() that creates missing sub-lock.
+ */
+static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
+ struct cl_io *io, struct lov_lock *lck, int idx)
+{
+ struct lov_lock_link *link;
+ struct cl_lock *sublock;
+ int result;
+
+ LASSERT(parent->cll_depth == 1);
+ cl_lock_mutex_put(env, parent);
+ sublock = lov_sublock_alloc(env, io, lck, idx, &link);
+ if (!IS_ERR(sublock))
+ cl_lock_mutex_get(env, sublock);
+ cl_lock_mutex_get(env, parent);
+
+ if (!IS_ERR(sublock)) {
+ cl_lock_get_trust(sublock);
+ if (parent->cll_state == CLS_QUEUING &&
+ lck->lls_sub[idx].sub_lock == NULL) {
+ lov_sublock_adopt(env, lck, sublock, idx, link);
+ } else {
+ OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
+ /* other thread allocated sub-lock, or enqueue is no
+ * longer going on */
+ cl_lock_mutex_put(env, parent);
+ cl_lock_unhold(env, sublock, "lov-parent", parent);
+ cl_lock_mutex_get(env, parent);
+ }
+ cl_lock_mutex_put(env, sublock);
+ cl_lock_put(env, sublock);
+ result = CLO_REPEAT;
+ } else
+ result = PTR_ERR(sublock);
+ return result;
+}
+
+/**
+ * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
+ * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
+ * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
+ * state machines in the face of sub-locks sharing (by multiple top-locks),
+ * and concurrent sub-lock cancellations.
+ */
+static int lov_lock_enqueue(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ struct cl_io *io, __u32 enqflags)
+{
+ struct cl_lock *lock = slice->cls_lock;
+ struct lov_lock *lck = cl2lov_lock(slice);
+ struct cl_lock_closure *closure = lov_closure_get(env, lock);
+ int i;
+ int result;
+ enum cl_lock_state minstate;
+
+ for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
+ int rc;
+ struct lovsub_lock *sub;
+ struct lov_lock_sub *lls;
+ struct cl_lock *sublock;
+ struct lov_sublock_env *subenv;
+
+ if (lock->cll_state != CLS_QUEUING) {
+ /*
+ * Lock might have left QUEUING state if previous
+ * iteration released its mutex. Stop enqueing in this
+ * case and let the upper layer to decide what to do.
+ */
+ LASSERT(i > 0 && result != 0);
+ break;
+ }
+
+ lls = &lck->lls_sub[i];
+ sub = lls->sub_lock;
+ /*
+ * Sub-lock might have been canceled, while top-lock was
+ * cached.
+ */
+ if (sub == NULL) {
+ result = lov_sublock_fill(env, lock, io, lck, i);
+ /* lov_sublock_fill() released @lock mutex,
+ * restart. */
+ break;
+ }
+ sublock = sub->lss_cl.cls_lock;
+ rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
+ if (rc == 0) {
+ lov_sublock_hold(env, lck, i);
+ rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
+ subenv->lse_io, enqflags,
+ i == lck->lls_nr - 1);
+ minstate = min(minstate, sublock->cll_state);
+ if (rc == CLO_WAIT) {
+ switch (sublock->cll_state) {
+ case CLS_QUEUING:
+ /* take recursive mutex, the lock is
+ * released in lov_lock_enqueue_wait.
+ */
+ cl_lock_mutex_get(env, sublock);
+ lov_sublock_unlock(env, sub, closure,
+ subenv);
+ rc = lov_lock_enqueue_wait(env, lck,
+ sublock);
+ break;
+ case CLS_CACHED:
+ cl_lock_get(sublock);
+ /* take recursive mutex of sublock */
+ cl_lock_mutex_get(env, sublock);
+ /* need to release all locks in closure
+ * otherwise it may deadlock. LU-2683.*/
+ lov_sublock_unlock(env, sub, closure,
+ subenv);
+ /* sublock and parent are held. */
+ rc = lov_sublock_release(env, lck, i,
+ 1, rc);
+ cl_lock_mutex_put(env, sublock);
+ cl_lock_put(env, sublock);
+ break;
+ default:
+ lov_sublock_unlock(env, sub, closure,
+ subenv);
+ break;
+ }
+ } else {
+ LASSERT(sublock->cll_conflict == NULL);
+ lov_sublock_unlock(env, sub, closure, subenv);
+ }
+ }
+ result = lov_subresult(result, rc);
+ if (result != 0)
+ break;
+ }
+ cl_lock_closure_fini(closure);
+ return result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT;
+}
+
+static int lov_lock_unuse(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct lov_lock *lck = cl2lov_lock(slice);
+ struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+ int i;
+ int result;
+
+ for (result = 0, i = 0; i < lck->lls_nr; ++i) {
+ int rc;
+ struct lovsub_lock *sub;
+ struct cl_lock *sublock;
+ struct lov_lock_sub *lls;
+ struct lov_sublock_env *subenv;
+
+ /* top-lock state cannot change concurrently, because single
+ * thread (one that released the last hold) carries unlocking
+ * to the completion. */
+ LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
+ lls = &lck->lls_sub[i];
+ sub = lls->sub_lock;
+ if (sub == NULL)
+ continue;
+
+ sublock = sub->lss_cl.cls_lock;
+ rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
+ if (rc == 0) {
+ if (lls->sub_flags & LSF_HELD) {
+ LASSERT(sublock->cll_state == CLS_HELD ||
+ sublock->cll_state == CLS_ENQUEUED);
+ rc = cl_unuse_try(subenv->lse_env, sublock);
+ rc = lov_sublock_release(env, lck, i, 0, rc);
+ }
+ lov_sublock_unlock(env, sub, closure, subenv);
+ }
+ result = lov_subresult(result, rc);
+ }
+
+ if (result == 0 && lck->lls_cancel_race) {
+ lck->lls_cancel_race = 0;
+ result = -ESTALE;
+ }
+ cl_lock_closure_fini(closure);
+ return result;
+}
+
+
+static void lov_lock_cancel(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct lov_lock *lck = cl2lov_lock(slice);
+ struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+ int i;
+ int result;
+
+ for (result = 0, i = 0; i < lck->lls_nr; ++i) {
+ int rc;
+ struct lovsub_lock *sub;
+ struct cl_lock *sublock;
+ struct lov_lock_sub *lls;
+ struct lov_sublock_env *subenv;
+
+ /* top-lock state cannot change concurrently, because single
+ * thread (one that released the last hold) carries unlocking
+ * to the completion. */
+ lls = &lck->lls_sub[i];
+ sub = lls->sub_lock;
+ if (sub == NULL)
+ continue;
+
+ sublock = sub->lss_cl.cls_lock;
+ rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
+ if (rc == 0) {
+ if (!(lls->sub_flags & LSF_HELD)) {
+ lov_sublock_unlock(env, sub, closure, subenv);
+ continue;
+ }
+
+ switch(sublock->cll_state) {
+ case CLS_HELD:
+ rc = cl_unuse_try(subenv->lse_env, sublock);
+ lov_sublock_release(env, lck, i, 0, 0);
+ break;
+ default:
+ lov_sublock_release(env, lck, i, 1, 0);
+ break;
+ }
+ lov_sublock_unlock(env, sub, closure, subenv);
+ }
+
+ if (rc == CLO_REPEAT) {
+ --i;
+ continue;
+ }
+
+ result = lov_subresult(result, rc);
+ }
+
+ if (result)
+ CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
+ "lov_lock_cancel fails with %d.\n", result);
+
+ cl_lock_closure_fini(closure);
+}
+
+static int lov_lock_wait(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct lov_lock *lck = cl2lov_lock(slice);
+ struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+ enum cl_lock_state minstate;
+ int reenqueued;
+ int result;
+ int i;
+
+again:
+ for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
+ i < lck->lls_nr; ++i) {
+ int rc;
+ struct lovsub_lock *sub;
+ struct cl_lock *sublock;
+ struct lov_lock_sub *lls;
+ struct lov_sublock_env *subenv;
+
+ lls = &lck->lls_sub[i];
+ sub = lls->sub_lock;
+ LASSERT(sub != NULL);
+ sublock = sub->lss_cl.cls_lock;
+ rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
+ if (rc == 0) {
+ LASSERT(sublock->cll_state >= CLS_ENQUEUED);
+ if (sublock->cll_state < CLS_HELD)
+ rc = cl_wait_try(env, sublock);
+
+ minstate = min(minstate, sublock->cll_state);
+ lov_sublock_unlock(env, sub, closure, subenv);
+ }
+ if (rc == CLO_REENQUEUED) {
+ reenqueued++;
+ rc = 0;
+ }
+ result = lov_subresult(result, rc);
+ if (result != 0)
+ break;
+ }
+ /* Each sublock only can be reenqueued once, so will not loop for
+ * ever. */
+ if (result == 0 && reenqueued != 0)
+ goto again;
+ cl_lock_closure_fini(closure);
+ return result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT;
+}
+
+static int lov_lock_use(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct lov_lock *lck = cl2lov_lock(slice);
+ struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+ int result;
+ int i;
+
+ LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
+
+ for (result = 0, i = 0; i < lck->lls_nr; ++i) {
+ int rc;
+ struct lovsub_lock *sub;
+ struct cl_lock *sublock;
+ struct lov_lock_sub *lls;
+ struct lov_sublock_env *subenv;
+
+ LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
+
+ lls = &lck->lls_sub[i];
+ sub = lls->sub_lock;
+ if (sub == NULL) {
+ /*
+ * Sub-lock might have been canceled, while top-lock was
+ * cached.
+ */
+ result = -ESTALE;
+ break;
+ }
+
+ sublock = sub->lss_cl.cls_lock;
+ rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
+ if (rc == 0) {
+ LASSERT(sublock->cll_state != CLS_FREEING);
+ lov_sublock_hold(env, lck, i);
+ if (sublock->cll_state == CLS_CACHED) {
+ rc = cl_use_try(subenv->lse_env, sublock, 0);
+ if (rc != 0)
+ rc = lov_sublock_release(env, lck,
+ i, 1, rc);
+ } else if (sublock->cll_state == CLS_NEW) {
+ /* Sub-lock might have been canceled, while
+ * top-lock was cached. */
+ result = -ESTALE;
+ lov_sublock_release(env, lck, i, 1, result);
+ }
+ lov_sublock_unlock(env, sub, closure, subenv);
+ }
+ result = lov_subresult(result, rc);
+ if (result != 0)
+ break;
+ }
+
+ if (lck->lls_cancel_race) {
+ /*
+ * If there is unlocking happened at the same time, then
+ * sublock_lock state should be FREEING, and lov_sublock_lock
+ * should return CLO_REPEAT. In this case, it should return
+ * ESTALE, and up layer should reset the lock state to be NEW.
+ */
+ lck->lls_cancel_race = 0;
+ LASSERT(result != 0);
+ result = -ESTALE;
+ }
+ cl_lock_closure_fini(closure);
+ return result;
+}
+
+#if 0
+static int lock_lock_multi_match()
+{
+ struct cl_lock *lock = slice->cls_lock;
+ struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr;
+ struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj);
+ struct lov_layout_raid0 *r0 = lov_r0(loo);
+ struct lov_lock_sub *sub;
+ struct cl_object *subobj;
+ obd_off fstart;
+ obd_off fend;
+ obd_off start;
+ obd_off end;
+ int i;
+
+ fstart = cl_offset(need->cld_obj, need->cld_start);
+ fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
+ subneed->cld_mode = need->cld_mode;
+ cl_lock_mutex_get(env, lock);
+ for (i = 0; i < lov->lls_nr; ++i) {
+ sub = &lov->lls_sub[i];
+ if (sub->sub_lock == NULL)
+ continue;
+ subobj = sub->sub_descr.cld_obj;
+ if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe,
+ fstart, fend, &start, &end))
+ continue;
+ subneed->cld_start = cl_index(subobj, start);
+ subneed->cld_end = cl_index(subobj, end);
+ subneed->cld_obj = subobj;
+ if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
+ result = 0;
+ break;
+ }
+ }
+ cl_lock_mutex_put(env, lock);
+}
+#endif
+
+/**
+ * Check if the extent region \a descr is covered by \a child against the
+ * specific \a stripe.
+ */
+static int lov_lock_stripe_is_matching(const struct lu_env *env,
+ struct lov_object *lov, int stripe,
+ const struct cl_lock_descr *child,
+ const struct cl_lock_descr *descr)
+{
+ struct lov_stripe_md *lsm = lov->lo_lsm;
+ obd_off start;
+ obd_off end;
+ int result;
+
+ if (lov_r0(lov)->lo_nr == 1)
+ return cl_lock_ext_match(child, descr);
+
+ /*
+ * For a multi-stripes object:
+ * - make sure the descr only covers child's stripe, and
+ * - check if extent is matching.
+ */
+ start = cl_offset(&lov->lo_cl, descr->cld_start);
+ end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
+ result = end - start <= lsm->lsm_stripe_size &&
+ stripe == lov_stripe_number(lsm, start) &&
+ stripe == lov_stripe_number(lsm, end);
+ if (result) {
+ struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
+ obd_off sub_start;
+ obd_off sub_end;
+
+ subd->cld_obj = NULL; /* don't need sub object at all */
+ subd->cld_mode = descr->cld_mode;
+ subd->cld_gid = descr->cld_gid;
+ result = lov_stripe_intersects(lsm, stripe, start, end,
+ &sub_start, &sub_end);
+ LASSERT(result);
+ subd->cld_start = cl_index(child->cld_obj, sub_start);
+ subd->cld_end = cl_index(child->cld_obj, sub_end);
+ result = cl_lock_ext_match(child, subd);
+ }
+ return result;
+}
+
+/**
+ * An implementation of cl_lock_operations::clo_fits_into() method.
+ *
+ * Checks whether a lock (given by \a slice) is suitable for \a
+ * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
+ * O_APPEND write.
+ *
+ * \see ccc_lock_fits_into().
+ */
+static int lov_lock_fits_into(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ const struct cl_lock_descr *need,
+ const struct cl_io *io)
+{
+ struct lov_lock *lov = cl2lov_lock(slice);
+ struct lov_object *obj = cl2lov(slice->cls_obj);
+ int result;
+
+ LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
+ LASSERT(lov->lls_nr > 0);
+
+ /* for top lock, it's necessary to match enq flags otherwise it will
+ * run into problem if a sublock is missing and reenqueue. */
+ if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
+ return 0;
+
+ if (need->cld_mode == CLM_GROUP)
+ /*
+ * always allow to match group lock.
+ */
+ result = cl_lock_ext_match(&lov->lls_orig, need);
+ else if (lov->lls_nr == 1) {
+ struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
+ result = lov_lock_stripe_is_matching(env,
+ cl2lov(slice->cls_obj),
+ lov->lls_sub[0].sub_stripe,
+ got, need);
+ } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
+ !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
+ /*
+ * Multi-stripe locks are only suitable for `quick' IO and for
+ * glimpse.
+ */
+ result = 0;
+ else
+ /*
+ * Most general case: multi-stripe existing lock, and
+ * (potentially) multi-stripe @need lock. Check that @need is
+ * covered by @lov's sub-locks.
+ *
+ * For now, ignore lock expansions made by the server, and
+ * match against original lock extent.
+ */
+ result = cl_lock_ext_match(&lov->lls_orig, need);
+ CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
+ PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
+ lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
+ result);
+ return result;
+}
+
+void lov_lock_unlink(const struct lu_env *env,
+ struct lov_lock_link *link, struct lovsub_lock *sub)
+{
+ struct lov_lock *lck = link->lll_super;
+ struct cl_lock *parent = lck->lls_cl.cls_lock;
+
+ LASSERT(cl_lock_is_mutexed(parent));
+ LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
+
+ list_del_init(&link->lll_list);
+ LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
+ /* yank this sub-lock from parent's array */
+ lck->lls_sub[link->lll_idx].sub_lock = NULL;
+ LASSERT(lck->lls_nr_filled > 0);
+ lck->lls_nr_filled--;
+ lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
+ cl_lock_put(env, parent);
+ OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
+}
+
+struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
+ struct lov_lock *lck,
+ struct lovsub_lock *sub)
+{
+ struct lov_lock_link *scan;
+
+ LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
+
+ list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ if (scan->lll_super == lck)
+ return scan;
+ }
+ return NULL;
+}
+
+/**
+ * An implementation of cl_lock_operations::clo_delete() method. This is
+ * invoked for "top-to-bottom" delete, when lock destruction starts from the
+ * top-lock, e.g., as a result of inode destruction.
+ *
+ * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
+ * this is done separately elsewhere:
+ *
+ * - for inode destruction, lov_object_delete() calls cl_object_kill() for
+ * each sub-object, purging its locks;
+ *
+ * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
+ * left in the cache.
+ */
+static void lov_lock_delete(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct lov_lock *lck = cl2lov_lock(slice);
+ struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+ struct lov_lock_link *link;
+ int rc;
+ int i;
+
+ LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
+
+ for (i = 0; i < lck->lls_nr; ++i) {
+ struct lov_lock_sub *lls = &lck->lls_sub[i];
+ struct lovsub_lock *lsl = lls->sub_lock;
+
+ if (lsl == NULL) /* already removed */
+ continue;
+
+ rc = lov_sublock_lock(env, lck, lls, closure, NULL);
+ if (rc == CLO_REPEAT) {
+ --i;
+ continue;
+ }
+
+ LASSERT(rc == 0);
+ LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
+
+ if (lls->sub_flags & LSF_HELD)
+ lov_sublock_release(env, lck, i, 1, 0);
+
+ link = lov_lock_link_find(env, lck, lsl);
+ LASSERT(link != NULL);
+ lov_lock_unlink(env, link, lsl);
+ LASSERT(lck->lls_sub[i].sub_lock == NULL);
+
+ lov_sublock_unlock(env, lsl, closure, NULL);
+ }
+
+ cl_lock_closure_fini(closure);
+}
+
+static int lov_lock_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct cl_lock_slice *slice)
+{
+ struct lov_lock *lck = cl2lov_lock(slice);
+ int i;
+
+ (*p)(env, cookie, "%d\n", lck->lls_nr);
+ for (i = 0; i < lck->lls_nr; ++i) {
+ struct lov_lock_sub *sub;
+
+ sub = &lck->lls_sub[i];
+ (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
+ if (sub->sub_lock != NULL)
+ cl_lock_print(env, cookie, p,
+ sub->sub_lock->lss_cl.cls_lock);
+ else
+ (*p)(env, cookie, "---\n");
+ }
+ return 0;
+}
+
+static const struct cl_lock_operations lov_lock_ops = {
+ .clo_fini = lov_lock_fini,
+ .clo_enqueue = lov_lock_enqueue,
+ .clo_wait = lov_lock_wait,
+ .clo_use = lov_lock_use,
+ .clo_unuse = lov_lock_unuse,
+ .clo_cancel = lov_lock_cancel,
+ .clo_fits_into = lov_lock_fits_into,
+ .clo_delete = lov_lock_delete,
+ .clo_print = lov_lock_print
+};
+
+int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io)
+{
+ struct lov_lock *lck;
+ int result;
+
+ OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
+ if (lck != NULL) {
+ cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
+ result = lov_lock_sub_init(env, lck, io);
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+static void lov_empty_lock_fini(const struct lu_env *env,
+ struct cl_lock_slice *slice)
+{
+ struct lov_lock *lck = cl2lov_lock(slice);
+ OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
+}
+
+static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct cl_lock_slice *slice)
+{
+ (*p)(env, cookie, "empty\n");
+ return 0;
+}
+
+/* XXX: more methods will be added later. */
+static const struct cl_lock_operations lov_empty_lock_ops = {
+ .clo_fini = lov_empty_lock_fini,
+ .clo_print = lov_empty_lock_print
+};
+
+int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io)
+{
+ struct lov_lock *lck;
+ int result = -ENOMEM;
+
+ OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
+ if (lck != NULL) {
+ cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
+ lck->lls_orig = lock->cll_descr;
+ result = 0;
+ }
+ return result;
+}
+
+static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
+ struct cl_lock *parent)
+{
+ struct cl_lock_closure *closure;
+
+ closure = &lov_env_info(env)->lti_closure;
+ LASSERT(list_empty(&closure->clc_list));
+ cl_lock_closure_init(env, closure, parent, 1);
+ return closure;
+}
+
+
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_log.c b/drivers/staging/lustre/lustre/lov/lov_log.c
new file mode 100644
index 0000000..3eedd93
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_log.c
@@ -0,0 +1,272 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lov/lov_log.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Mike Shaver <shaver@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_support.h>
+#include <lustre_lib.h>
+#include <lustre_net.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_dlm.h>
+#include <lustre_mds.h>
+#include <obd_class.h>
+#include <obd_lov.h>
+#include <obd_ost.h>
+#include <lprocfs_status.h>
+#include <lustre_log.h>
+
+#include "lov_internal.h"
+
+/* Add log records for each OSC that this object is striped over, and return
+ * cookies for each one. We _would_ have nice abstraction here, except that
+ * we need to keep cookies in stripe order, even if some are NULL, so that
+ * the right cookies are passed back to the right OSTs at the client side.
+ * Unset cookies should be all-zero (which will never occur naturally). */
+static int lov_llog_origin_add(const struct lu_env *env,
+ struct llog_ctxt *ctxt,
+ struct llog_rec_hdr *rec,
+ struct lov_stripe_md *lsm,
+ struct llog_cookie *logcookies, int numcookies)
+{
+ struct obd_device *obd = ctxt->loc_obd;
+ struct lov_obd *lov = &obd->u.lov;
+ int i, rc = 0, cookies = 0;
+
+ LASSERTF(logcookies && numcookies >= lsm->lsm_stripe_count,
+ "logcookies %p, numcookies %d lsm->lsm_stripe_count %d \n",
+ logcookies, numcookies, lsm->lsm_stripe_count);
+
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+ struct obd_device *child =
+ lov->lov_tgts[loi->loi_ost_idx]->ltd_exp->exp_obd;
+ struct llog_ctxt *cctxt = llog_get_context(child, ctxt->loc_idx);
+
+ /* fill mds unlink/setattr log record */
+ switch (rec->lrh_type) {
+ case MDS_UNLINK_REC: {
+ struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec;
+ lur->lur_oid = ostid_id(&loi->loi_oi);
+ lur->lur_oseq = (__u32)ostid_seq(&loi->loi_oi);
+ break;
+ }
+ case MDS_SETATTR64_REC: {
+ struct llog_setattr64_rec *lsr = (struct llog_setattr64_rec *)rec;
+ lsr->lsr_oi = loi->loi_oi;
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* inject error in llog_obd_add() below */
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_FAIL_LOV_LOG_ADD)) {
+ llog_ctxt_put(cctxt);
+ cctxt = NULL;
+ }
+ rc = llog_obd_add(env, cctxt, rec, NULL, logcookies + cookies,
+ numcookies - cookies);
+ llog_ctxt_put(cctxt);
+ if (rc < 0) {
+ CERROR("Can't add llog (rc = %d) for stripe %d\n",
+ rc, cookies);
+ memset(logcookies + cookies, 0,
+ sizeof(struct llog_cookie));
+ rc = 1; /* skip this cookie */
+ }
+ /* Note that rc is always 1 if llog_obd_add was successful */
+ cookies += rc;
+ }
+ return cookies;
+}
+
+static int lov_llog_origin_connect(struct llog_ctxt *ctxt,
+ struct llog_logid *logid,
+ struct llog_gen *gen,
+ struct obd_uuid *uuid)
+{
+ struct obd_device *obd = ctxt->loc_obd;
+ struct lov_obd *lov = &obd->u.lov;
+ int i, rc = 0, err = 0;
+
+ obd_getref(obd);
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ struct obd_device *child;
+ struct llog_ctxt *cctxt;
+
+ if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
+ continue;
+ if (uuid && !obd_uuid_equals(uuid, &lov->lov_tgts[i]->ltd_uuid))
+ continue;
+ CDEBUG(D_CONFIG, "connect %d/%d\n", i, lov->desc.ld_tgt_count);
+ child = lov->lov_tgts[i]->ltd_exp->exp_obd;
+ cctxt = llog_get_context(child, ctxt->loc_idx);
+ rc = llog_connect(cctxt, logid, gen, uuid);
+ llog_ctxt_put(cctxt);
+
+ if (rc) {
+ CERROR("error osc_llog_connect tgt %d (%d)\n", i, rc);
+ if (!err)
+ err = rc;
+ }
+ }
+ obd_putref(obd);
+
+ return err;
+}
+
+/* the replicators commit callback */
+static int lov_llog_repl_cancel(const struct lu_env *env,
+ struct llog_ctxt *ctxt,
+ struct lov_stripe_md *lsm,
+ int count, struct llog_cookie *cookies,
+ int flags)
+{
+ struct lov_obd *lov;
+ struct obd_device *obd = ctxt->loc_obd;
+ int rc = 0, i;
+
+ LASSERT(lsm != NULL);
+ LASSERT(count == lsm->lsm_stripe_count);
+
+ lov = &obd->u.lov;
+ obd_getref(obd);
+ for (i = 0; i < count; i++, cookies++) {
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+ struct obd_device *child =
+ lov->lov_tgts[loi->loi_ost_idx]->ltd_exp->exp_obd;
+ struct llog_ctxt *cctxt =
+ llog_get_context(child, ctxt->loc_idx);
+ int err;
+
+ err = llog_cancel(env, cctxt, NULL, 1, cookies, flags);
+ llog_ctxt_put(cctxt);
+ if (err && lov->lov_tgts[loi->loi_ost_idx]->ltd_active) {
+ CERROR("%s: objid "DOSTID" subobj "DOSTID
+ " on OST idx %d: rc = %d\n",
+ obd->obd_name, POSTID(&lsm->lsm_oi),
+ POSTID(&loi->loi_oi), loi->loi_ost_idx, err);
+ if (!rc)
+ rc = err;
+ }
+ }
+ obd_putref(obd);
+ return rc;
+}
+
+static struct llog_operations lov_mds_ost_orig_logops = {
+ .lop_obd_add = lov_llog_origin_add,
+ .lop_connect = lov_llog_origin_connect,
+};
+
+static struct llog_operations lov_size_repl_logops = {
+ .lop_cancel = lov_llog_repl_cancel,
+};
+
+int lov_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *disk_obd, int *index)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct obd_device *child;
+ int i, rc = 0;
+
+ LASSERT(olg == &obd->obd_olg);
+ rc = llog_setup(NULL, obd, olg, LLOG_MDS_OST_ORIG_CTXT, disk_obd,
+ &lov_mds_ost_orig_logops);
+ if (rc)
+ return rc;
+
+ rc = llog_setup(NULL, obd, olg, LLOG_SIZE_REPL_CTXT, disk_obd,
+ &lov_size_repl_logops);
+ if (rc)
+ GOTO(err_cleanup, rc);
+
+ obd_getref(obd);
+ /* count may not match lov->desc.ld_tgt_count during dynamic ost add */
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ if (!lov->lov_tgts[i])
+ continue;
+
+ if (index && i != *index)
+ continue;
+
+ child = lov->lov_tgts[i]->ltd_obd;
+ rc = obd_llog_init(child, &child->obd_olg, disk_obd, &i);
+ if (rc)
+ CERROR("error osc_llog_init idx %d osc '%s' tgt '%s' "
+ "(rc=%d)\n", i, child->obd_name,
+ disk_obd->obd_name, rc);
+ rc = 0;
+ }
+ obd_putref(obd);
+ GOTO(err_cleanup, rc);
+err_cleanup:
+ if (rc) {
+ struct llog_ctxt *ctxt =
+ llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+ ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+ }
+ return rc;
+}
+
+int lov_llog_finish(struct obd_device *obd, int count)
+{
+ struct llog_ctxt *ctxt;
+
+ /* cleanup our llogs only if the ctxts have been setup
+ * (client lov doesn't setup, mds lov does). */
+ ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+
+ ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+
+ /* lov->tgt llogs are cleaned during osc_cleanup. */
+ return 0;
+}
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
new file mode 100644
index 0000000..d204fed
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -0,0 +1,216 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_class.h>
+#include <obd_lov.h>
+
+#include "lov_internal.h"
+
+/** Merge the lock value block(&lvb) attributes and KMS from each of the
+ * stripes in a file into a single lvb. It is expected that the caller
+ * initializes the current atime, mtime, ctime to avoid regressing a more
+ * uptodate time on the local client.
+ */
+int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
+ struct ost_lvb *lvb, __u64 *kms_place)
+{
+ __u64 size = 0;
+ __u64 kms = 0;
+ __u64 blocks = 0;
+ obd_time current_mtime = lvb->lvb_mtime;
+ obd_time current_atime = lvb->lvb_atime;
+ obd_time current_ctime = lvb->lvb_ctime;
+ int i;
+ int rc = 0;
+
+ LASSERT(spin_is_locked(&lsm->lsm_lock));
+ LASSERT(lsm->lsm_lock_owner == current_pid());
+
+ CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s="LPU64" m="LPU64
+ " a="LPU64" c="LPU64" b="LPU64"\n", POSTID(&lsm->lsm_oi),
+ lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime, lvb->lvb_ctime,
+ lvb->lvb_blocks);
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+ obd_size lov_size, tmpsize;
+
+ if (OST_LVB_IS_ERR(loi->loi_lvb.lvb_blocks)) {
+ rc = OST_LVB_GET_ERR(loi->loi_lvb.lvb_blocks);
+ continue;
+ }
+
+ tmpsize = loi->loi_kms;
+ lov_size = lov_stripe_size(lsm, tmpsize, i);
+ if (lov_size > kms)
+ kms = lov_size;
+
+ if (loi->loi_lvb.lvb_size > tmpsize)
+ tmpsize = loi->loi_lvb.lvb_size;
+
+ lov_size = lov_stripe_size(lsm, tmpsize, i);
+ if (lov_size > size)
+ size = lov_size;
+ /* merge blocks, mtime, atime */
+ blocks += loi->loi_lvb.lvb_blocks;
+ if (loi->loi_lvb.lvb_mtime > current_mtime)
+ current_mtime = loi->loi_lvb.lvb_mtime;
+ if (loi->loi_lvb.lvb_atime > current_atime)
+ current_atime = loi->loi_lvb.lvb_atime;
+ if (loi->loi_lvb.lvb_ctime > current_ctime)
+ current_ctime = loi->loi_lvb.lvb_ctime;
+
+ CDEBUG(D_INODE, "MDT ID "DOSTID" on OST[%u]: s="LPU64" m="LPU64
+ " a="LPU64" c="LPU64" b="LPU64"\n", POSTID(&lsm->lsm_oi),
+ loi->loi_ost_idx, loi->loi_lvb.lvb_size,
+ loi->loi_lvb.lvb_mtime, loi->loi_lvb.lvb_atime,
+ loi->loi_lvb.lvb_ctime, loi->loi_lvb.lvb_blocks);
+ }
+
+ *kms_place = kms;
+ lvb->lvb_size = size;
+ lvb->lvb_blocks = blocks;
+ lvb->lvb_mtime = current_mtime;
+ lvb->lvb_atime = current_atime;
+ lvb->lvb_ctime = current_ctime;
+ return rc;
+}
+
+/** Merge the lock value block(&lvb) attributes from each of the stripes in a
+ * file into a single lvb. It is expected that the caller initializes the
+ * current atime, mtime, ctime to avoid regressing a more uptodate time on
+ * the local client.
+ *
+ * If \a kms_only is set then we do not consider the recently seen size (rss)
+ * when updating the known minimum size (kms). Even when merging RSS, we will
+ * take the KMS value if it's larger. This prevents getattr from stomping on
+ * dirty cached pages which extend the file size. */
+int lov_merge_lvb(struct obd_export *exp,
+ struct lov_stripe_md *lsm, struct ost_lvb *lvb, int kms_only)
+{
+ int rc;
+ __u64 kms;
+
+ lov_stripe_lock(lsm);
+ rc = lov_merge_lvb_kms(lsm, lvb, &kms);
+ lov_stripe_unlock(lsm);
+ if (kms_only)
+ lvb->lvb_size = kms;
+
+ CDEBUG(D_INODE, "merged for ID "DOSTID" s="LPU64" m="LPU64" a="LPU64
+ " c="LPU64" b="LPU64"\n", POSTID(&lsm->lsm_oi), lvb->lvb_size,
+ lvb->lvb_mtime, lvb->lvb_atime, lvb->lvb_ctime, lvb->lvb_blocks);
+ return rc;
+}
+
+/* Must be called under the lov_stripe_lock() */
+int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
+ obd_off size, int shrink)
+{
+ struct lov_oinfo *loi;
+ int stripe = 0;
+ __u64 kms;
+
+ LASSERT(spin_is_locked(&lsm->lsm_lock));
+ LASSERT(lsm->lsm_lock_owner == current_pid());
+
+ if (shrink) {
+ for (; stripe < lsm->lsm_stripe_count; stripe++) {
+ struct lov_oinfo *loi = lsm->lsm_oinfo[stripe];
+ kms = lov_size_to_stripe(lsm, size, stripe);
+ CDEBUG(D_INODE,
+ "stripe %d KMS %sing "LPU64"->"LPU64"\n",
+ stripe, kms > loi->loi_kms ? "increas":"shrink",
+ loi->loi_kms, kms);
+ loi_kms_set(loi, loi->loi_lvb.lvb_size = kms);
+ }
+ return 0;
+ }
+
+ if (size > 0)
+ stripe = lov_stripe_number(lsm, size - 1);
+ kms = lov_size_to_stripe(lsm, size, stripe);
+ loi = lsm->lsm_oinfo[stripe];
+
+ CDEBUG(D_INODE, "stripe %d KMS %sincreasing "LPU64"->"LPU64"\n",
+ stripe, kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms);
+ if (kms > loi->loi_kms)
+ loi_kms_set(loi, kms);
+
+ return 0;
+}
+
+void lov_merge_attrs(struct obdo *tgt, struct obdo *src, obd_valid valid,
+ struct lov_stripe_md *lsm, int stripeno, int *set)
+{
+ valid &= src->o_valid;
+
+ if (*set) {
+ if (valid & OBD_MD_FLSIZE) {
+ /* this handles sparse files properly */
+ obd_size lov_size;
+
+ lov_size = lov_stripe_size(lsm, src->o_size, stripeno);
+ if (lov_size > tgt->o_size)
+ tgt->o_size = lov_size;
+ }
+ if (valid & OBD_MD_FLBLOCKS)
+ tgt->o_blocks += src->o_blocks;
+ if (valid & OBD_MD_FLBLKSZ)
+ tgt->o_blksize += src->o_blksize;
+ if (valid & OBD_MD_FLCTIME && tgt->o_ctime < src->o_ctime)
+ tgt->o_ctime = src->o_ctime;
+ if (valid & OBD_MD_FLMTIME && tgt->o_mtime < src->o_mtime)
+ tgt->o_mtime = src->o_mtime;
+ if (valid & OBD_MD_FLDATAVERSION)
+ tgt->o_data_version += src->o_data_version;
+ } else {
+ memcpy(tgt, src, sizeof(*tgt));
+ tgt->o_oi = lsm->lsm_oi;
+ if (valid & OBD_MD_FLSIZE)
+ tgt->o_size = lov_stripe_size(lsm, src->o_size,
+ stripeno);
+ }
+
+ /* data_version needs to be valid on all stripes to be correct! */
+ if (!(valid & OBD_MD_FLDATAVERSION))
+ tgt->o_valid &= ~OBD_MD_FLDATAVERSION;
+
+ *set += 1;
+}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
new file mode 100644
index 0000000..0b47aba
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -0,0 +1,2876 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lov/lov_obd.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Mike Shaver <shaver@clusterfs.com>
+ * Author: Nathan Rutman <nathan@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_support.h>
+#include <lustre_lib.h>
+#include <lustre_net.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_dlm.h>
+#include <lustre_mds.h>
+#include <lustre_debug.h>
+#include <obd_class.h>
+#include <obd_lov.h>
+#include <obd_ost.h>
+#include <lprocfs_status.h>
+#include <lustre_param.h>
+#include <cl_object.h>
+#include <lclient.h> /* for cl_client_lru */
+#include <lustre/ll_fiemap.h>
+#include <lustre_log.h>
+#include <lustre_fid.h>
+
+#include "lov_internal.h"
+
+/* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion.
+ Any function that expects lov_tgts to remain stationary must take a ref. */
+static void lov_getref(struct obd_device *obd)
+{
+ struct lov_obd *lov = &obd->u.lov;
+
+ /* nobody gets through here until lov_putref is done */
+ mutex_lock(&lov->lov_lock);
+ atomic_inc(&lov->lov_refcount);
+ mutex_unlock(&lov->lov_lock);
+ return;
+}
+
+static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt);
+
+static void lov_putref(struct obd_device *obd)
+{
+ struct lov_obd *lov = &obd->u.lov;
+
+ mutex_lock(&lov->lov_lock);
+ /* ok to dec to 0 more than once -- ltd_exp's will be null */
+ if (atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
+ LIST_HEAD(kill);
+ int i;
+ struct lov_tgt_desc *tgt, *n;
+ CDEBUG(D_CONFIG, "destroying %d lov targets\n",
+ lov->lov_death_row);
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ tgt = lov->lov_tgts[i];
+
+ if (!tgt || !tgt->ltd_reap)
+ continue;
+ list_add(&tgt->ltd_kill, &kill);
+ /* XXX - right now there is a dependency on ld_tgt_count
+ * being the maximum tgt index for computing the
+ * mds_max_easize. So we can't shrink it. */
+ lov_ost_pool_remove(&lov->lov_packed, i);
+ lov->lov_tgts[i] = NULL;
+ lov->lov_death_row--;
+ }
+ mutex_unlock(&lov->lov_lock);
+
+ list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
+ list_del(&tgt->ltd_kill);
+ /* Disconnect */
+ __lov_del_obd(obd, tgt);
+ }
+ } else {
+ mutex_unlock(&lov->lov_lock);
+ }
+}
+
+static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
+ enum obd_notify_event ev);
+static int lov_notify(struct obd_device *obd, struct obd_device *watched,
+ enum obd_notify_event ev, void *data);
+
+
+#define MAX_STRING_SIZE 128
+int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
+ struct obd_connect_data *data)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct obd_uuid *tgt_uuid;
+ struct obd_device *tgt_obd;
+ static struct obd_uuid lov_osc_uuid = { "LOV_OSC_UUID" };
+ struct obd_import *imp;
+ struct proc_dir_entry *lov_proc_dir;
+ int rc;
+
+ if (!lov->lov_tgts[index])
+ return -EINVAL;
+
+ tgt_uuid = &lov->lov_tgts[index]->ltd_uuid;
+ tgt_obd = lov->lov_tgts[index]->ltd_obd;
+
+ if (!tgt_obd->obd_set_up) {
+ CERROR("Target %s not set up\n", obd_uuid2str(tgt_uuid));
+ return -EINVAL;
+ }
+
+ /* override the sp_me from lov */
+ tgt_obd->u.cli.cl_sp_me = lov->lov_sp_me;
+
+ if (data && (data->ocd_connect_flags & OBD_CONNECT_INDEX))
+ data->ocd_index = index;
+
+ /*
+ * Divine LOV knows that OBDs under it are OSCs.
+ */
+ imp = tgt_obd->u.cli.cl_import;
+
+ if (activate) {
+ tgt_obd->obd_no_recov = 0;
+ /* FIXME this is probably supposed to be
+ ptlrpc_set_import_active. Horrible naming. */
+ ptlrpc_activate_import(imp);
+ }
+
+ rc = obd_register_observer(tgt_obd, obd);
+ if (rc) {
+ CERROR("Target %s register_observer error %d\n",
+ obd_uuid2str(tgt_uuid), rc);
+ return rc;
+ }
+
+
+ if (imp->imp_invalid) {
+ CDEBUG(D_CONFIG, "not connecting OSC %s; administratively "
+ "disabled\n", obd_uuid2str(tgt_uuid));
+ return 0;
+ }
+
+ rc = obd_connect(NULL, &lov->lov_tgts[index]->ltd_exp, tgt_obd,
+ &lov_osc_uuid, data, NULL);
+ if (rc || !lov->lov_tgts[index]->ltd_exp) {
+ CERROR("Target %s connect error %d\n",
+ obd_uuid2str(tgt_uuid), rc);
+ return -ENODEV;
+ }
+
+ lov->lov_tgts[index]->ltd_reap = 0;
+
+ CDEBUG(D_CONFIG, "Connected tgt idx %d %s (%s) %sactive\n", index,
+ obd_uuid2str(tgt_uuid), tgt_obd->obd_name, activate ? "":"in");
+
+ lov_proc_dir = obd->obd_proc_private;
+ if (lov_proc_dir) {
+ struct obd_device *osc_obd = lov->lov_tgts[index]->ltd_exp->exp_obd;
+ struct proc_dir_entry *osc_symlink;
+
+ LASSERT(osc_obd != NULL);
+ LASSERT(osc_obd->obd_magic == OBD_DEVICE_MAGIC);
+ LASSERT(osc_obd->obd_type->typ_name != NULL);
+
+ osc_symlink = lprocfs_add_symlink(osc_obd->obd_name,
+ lov_proc_dir,
+ "../../../%s/%s",
+ osc_obd->obd_type->typ_name,
+ osc_obd->obd_name);
+ if (osc_symlink == NULL) {
+ CERROR("could not register LOV target "
+ "/proc/fs/lustre/%s/%s/target_obds/%s.",
+ obd->obd_type->typ_name, obd->obd_name,
+ osc_obd->obd_name);
+ lprocfs_remove(&lov_proc_dir);
+ obd->obd_proc_private = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int lov_connect(const struct lu_env *env,
+ struct obd_export **exp, struct obd_device *obd,
+ struct obd_uuid *cluuid, struct obd_connect_data *data,
+ void *localdata)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct lov_tgt_desc *tgt;
+ struct lustre_handle conn;
+ int i, rc;
+
+ CDEBUG(D_CONFIG, "connect #%d\n", lov->lov_connects);
+
+ rc = class_connect(&conn, obd, cluuid);
+ if (rc)
+ return rc;
+
+ *exp = class_conn2export(&conn);
+
+ /* Why should there ever be more than 1 connect? */
+ lov->lov_connects++;
+ LASSERT(lov->lov_connects == 1);
+
+ memset(&lov->lov_ocd, 0, sizeof(lov->lov_ocd));
+ if (data)
+ lov->lov_ocd = *data;
+
+ obd_getref(obd);
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ tgt = lov->lov_tgts[i];
+ if (!tgt || obd_uuid_empty(&tgt->ltd_uuid))
+ continue;
+ /* Flags will be lowest common denominator */
+ rc = lov_connect_obd(obd, i, tgt->ltd_activate, &lov->lov_ocd);
+ if (rc) {
+ CERROR("%s: lov connect tgt %d failed: %d\n",
+ obd->obd_name, i, rc);
+ continue;
+ }
+ /* connect to administrative disabled ost */
+ if (!lov->lov_tgts[i]->ltd_exp)
+ continue;
+
+ rc = lov_notify(obd, lov->lov_tgts[i]->ltd_exp->exp_obd,
+ OBD_NOTIFY_CONNECT, (void *)&i);
+ if (rc) {
+ CERROR("%s error sending notify %d\n",
+ obd->obd_name, rc);
+ }
+ }
+ obd_putref(obd);
+
+ return 0;
+}
+
+static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
+{
+ struct proc_dir_entry *lov_proc_dir;
+ struct lov_obd *lov = &obd->u.lov;
+ struct obd_device *osc_obd;
+ int rc;
+
+ osc_obd = class_exp2obd(tgt->ltd_exp);
+ CDEBUG(D_CONFIG, "%s: disconnecting target %s\n",
+ obd->obd_name, osc_obd->obd_name);
+
+ if (tgt->ltd_active) {
+ tgt->ltd_active = 0;
+ lov->desc.ld_active_tgt_count--;
+ tgt->ltd_exp->exp_obd->obd_inactive = 1;
+ }
+
+ lov_proc_dir = obd->obd_proc_private;
+ if (lov_proc_dir)
+ lprocfs_remove_proc_entry(osc_obd->obd_name, lov_proc_dir);
+
+ if (osc_obd) {
+ /* Pass it on to our clients.
+ * XXX This should be an argument to disconnect,
+ * XXX not a back-door flag on the OBD. Ah well.
+ */
+ osc_obd->obd_force = obd->obd_force;
+ osc_obd->obd_fail = obd->obd_fail;
+ osc_obd->obd_no_recov = obd->obd_no_recov;
+ }
+
+ obd_register_observer(osc_obd, NULL);
+
+ rc = obd_disconnect(tgt->ltd_exp);
+ if (rc) {
+ CERROR("Target %s disconnect error %d\n",
+ tgt->ltd_uuid.uuid, rc);
+ rc = 0;
+ }
+
+ tgt->ltd_exp = NULL;
+ return 0;
+}
+
+static int lov_disconnect(struct obd_export *exp)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lov_obd *lov = &obd->u.lov;
+ int i, rc;
+
+ if (!lov->lov_tgts)
+ goto out;
+
+ /* Only disconnect the underlying layers on the final disconnect. */
+ lov->lov_connects--;
+ if (lov->lov_connects != 0) {
+ /* why should there be more than 1 connect? */
+ CERROR("disconnect #%d\n", lov->lov_connects);
+ goto out;
+ }
+
+ /* Let's hold another reference so lov_del_obd doesn't spin through
+ putref every time */
+ obd_getref(obd);
+
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ if (lov->lov_tgts[i] && lov->lov_tgts[i]->ltd_exp) {
+ /* Disconnection is the last we know about an obd */
+ lov_del_target(obd, i, 0, lov->lov_tgts[i]->ltd_gen);
+ }
+ }
+ obd_putref(obd);
+
+out:
+ rc = class_disconnect(exp); /* bz 9811 */
+ return rc;
+}
+
+/* Error codes:
+ *
+ * -EINVAL : UUID can't be found in the LOV's target list
+ * -ENOTCONN: The UUID is found, but the target connection is bad (!)
+ * -EBADF : The UUID is found, but the OBD is the wrong type (!)
+ * any >= 0 : is log target index
+ */
+static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
+ enum obd_notify_event ev)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct lov_tgt_desc *tgt;
+ int index, activate, active;
+
+ CDEBUG(D_INFO, "Searching in lov %p for uuid %s event(%d)\n",
+ lov, uuid->uuid, ev);
+
+ obd_getref(obd);
+ for (index = 0; index < lov->desc.ld_tgt_count; index++) {
+ tgt = lov->lov_tgts[index];
+ if (!tgt)
+ continue;
+ /*
+ * LU-642, initially inactive OSC could miss the obd_connect,
+ * we make up for it here.
+ */
+ if (ev == OBD_NOTIFY_ACTIVATE && tgt->ltd_exp == NULL &&
+ obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
+ struct obd_uuid lov_osc_uuid = {"LOV_OSC_UUID"};
+
+ obd_connect(NULL, &tgt->ltd_exp, tgt->ltd_obd,
+ &lov_osc_uuid, &lov->lov_ocd, NULL);
+ }
+ if (!tgt->ltd_exp)
+ continue;
+
+ CDEBUG(D_INFO, "lov idx %d is %s conn "LPX64"\n",
+ index, obd_uuid2str(&tgt->ltd_uuid),
+ tgt->ltd_exp->exp_handle.h_cookie);
+ if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
+ break;
+ }
+
+ if (index == lov->desc.ld_tgt_count)
+ GOTO(out, index = -EINVAL);
+
+ if (ev == OBD_NOTIFY_DEACTIVATE || ev == OBD_NOTIFY_ACTIVATE) {
+ activate = (ev == OBD_NOTIFY_ACTIVATE) ? 1 : 0;
+
+ if (lov->lov_tgts[index]->ltd_activate == activate) {
+ CDEBUG(D_INFO, "OSC %s already %sactivate!\n",
+ uuid->uuid, activate ? "" : "de");
+ } else {
+ lov->lov_tgts[index]->ltd_activate = activate;
+ CDEBUG(D_CONFIG, "%sactivate OSC %s\n",
+ activate ? "" : "de", obd_uuid2str(uuid));
+ }
+
+ } else if (ev == OBD_NOTIFY_INACTIVE || ev == OBD_NOTIFY_ACTIVE) {
+ active = (ev == OBD_NOTIFY_ACTIVE) ? 1 : 0;
+
+ if (lov->lov_tgts[index]->ltd_active == active) {
+ CDEBUG(D_INFO, "OSC %s already %sactive!\n",
+ uuid->uuid, active ? "" : "in");
+ GOTO(out, index);
+ } else {
+ CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n",
+ obd_uuid2str(uuid), active ? "" : "in");
+ }
+
+ lov->lov_tgts[index]->ltd_active = active;
+ if (active) {
+ lov->desc.ld_active_tgt_count++;
+ lov->lov_tgts[index]->ltd_exp->exp_obd->obd_inactive = 0;
+ } else {
+ lov->desc.ld_active_tgt_count--;
+ lov->lov_tgts[index]->ltd_exp->exp_obd->obd_inactive = 1;
+ }
+ } else {
+ CERROR("Unknown event(%d) for uuid %s", ev, uuid->uuid);
+ }
+
+ out:
+ obd_putref(obd);
+ return index;
+}
+
+static int lov_notify(struct obd_device *obd, struct obd_device *watched,
+ enum obd_notify_event ev, void *data)
+{
+ int rc = 0;
+ struct lov_obd *lov = &obd->u.lov;
+
+ down_read(&lov->lov_notify_lock);
+ if (!lov->lov_connects) {
+ up_read(&lov->lov_notify_lock);
+ return rc;
+ }
+
+ if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE ||
+ ev == OBD_NOTIFY_ACTIVATE || ev == OBD_NOTIFY_DEACTIVATE) {
+ struct obd_uuid *uuid;
+
+ LASSERT(watched);
+
+ if (strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
+ up_read(&lov->lov_notify_lock);
+ CERROR("unexpected notification of %s %s!\n",
+ watched->obd_type->typ_name,
+ watched->obd_name);
+ return -EINVAL;
+ }
+ uuid = &watched->u.cli.cl_target_uuid;
+
+ /* Set OSC as active before notifying the observer, so the
+ * observer can use the OSC normally.
+ */
+ rc = lov_set_osc_active(obd, uuid, ev);
+ if (rc < 0) {
+ up_read(&lov->lov_notify_lock);
+ CERROR("event(%d) of %s failed: %d\n", ev,
+ obd_uuid2str(uuid), rc);
+ return rc;
+ }
+ /* active event should be pass lov target index as data */
+ data = &rc;
+ }
+
+ /* Pass the notification up the chain. */
+ if (watched) {
+ rc = obd_notify_observer(obd, watched, ev, data);
+ } else {
+ /* NULL watched means all osc's in the lov (only for syncs) */
+ /* sync event should be send lov idx as data */
+ struct lov_obd *lov = &obd->u.lov;
+ int i, is_sync;
+
+ data = &i;
+ is_sync = (ev == OBD_NOTIFY_SYNC) ||
+ (ev == OBD_NOTIFY_SYNC_NONBLOCK);
+
+ obd_getref(obd);
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ if (!lov->lov_tgts[i])
+ continue;
+
+ /* don't send sync event if target not
+ * connected/activated */
+ if (is_sync && !lov->lov_tgts[i]->ltd_active)
+ continue;
+
+ rc = obd_notify_observer(obd, lov->lov_tgts[i]->ltd_obd,
+ ev, data);
+ if (rc) {
+ CERROR("%s: notify %s of %s failed %d\n",
+ obd->obd_name,
+ obd->obd_observer->obd_name,
+ lov->lov_tgts[i]->ltd_obd->obd_name,
+ rc);
+ }
+ }
+ obd_putref(obd);
+ }
+
+ up_read(&lov->lov_notify_lock);
+ return rc;
+}
+
+static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
+ __u32 index, int gen, int active)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct lov_tgt_desc *tgt;
+ struct obd_device *tgt_obd;
+ int rc;
+
+ CDEBUG(D_CONFIG, "uuid:%s idx:%d gen:%d active:%d\n",
+ uuidp->uuid, index, gen, active);
+
+ if (gen <= 0) {
+ CERROR("request to add OBD %s with invalid generation: %d\n",
+ uuidp->uuid, gen);
+ return -EINVAL;
+ }
+
+ tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME,
+ &obd->obd_uuid);
+ if (tgt_obd == NULL)
+ return -EINVAL;
+
+ mutex_lock(&lov->lov_lock);
+
+ if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) {
+ tgt = lov->lov_tgts[index];
+ CERROR("UUID %s already assigned at LOV target index %d\n",
+ obd_uuid2str(&tgt->ltd_uuid), index);
+ mutex_unlock(&lov->lov_lock);
+ return -EEXIST;
+ }
+
+ if (index >= lov->lov_tgt_size) {
+ /* We need to reallocate the lov target array. */
+ struct lov_tgt_desc **newtgts, **old = NULL;
+ __u32 newsize, oldsize = 0;
+
+ newsize = max(lov->lov_tgt_size, (__u32)2);
+ while (newsize < index + 1)
+ newsize = newsize << 1;
+ OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
+ if (newtgts == NULL) {
+ mutex_unlock(&lov->lov_lock);
+ return -ENOMEM;
+ }
+
+ if (lov->lov_tgt_size) {
+ memcpy(newtgts, lov->lov_tgts, sizeof(*newtgts) *
+ lov->lov_tgt_size);
+ old = lov->lov_tgts;
+ oldsize = lov->lov_tgt_size;
+ }
+
+ lov->lov_tgts = newtgts;
+ lov->lov_tgt_size = newsize;
+ smp_rmb();
+ if (old)
+ OBD_FREE(old, sizeof(*old) * oldsize);
+
+ CDEBUG(D_CONFIG, "tgts: %p size: %d\n",
+ lov->lov_tgts, lov->lov_tgt_size);
+ }
+
+ OBD_ALLOC_PTR(tgt);
+ if (!tgt) {
+ mutex_unlock(&lov->lov_lock);
+ return -ENOMEM;
+ }
+
+ rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size);
+ if (rc) {
+ mutex_unlock(&lov->lov_lock);
+ OBD_FREE_PTR(tgt);
+ return rc;
+ }
+
+ tgt->ltd_uuid = *uuidp;
+ tgt->ltd_obd = tgt_obd;
+ /* XXX - add a sanity check on the generation number. */
+ tgt->ltd_gen = gen;
+ tgt->ltd_index = index;
+ tgt->ltd_activate = active;
+ lov->lov_tgts[index] = tgt;
+ if (index >= lov->desc.ld_tgt_count)
+ lov->desc.ld_tgt_count = index + 1;
+
+ mutex_unlock(&lov->lov_lock);
+
+ CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
+ index, tgt->ltd_gen, lov->desc.ld_tgt_count);
+
+ rc = obd_notify(obd, tgt_obd, OBD_NOTIFY_CREATE, &index);
+
+ if (lov->lov_connects == 0) {
+ /* lov_connect hasn't been called yet. We'll do the
+ lov_connect_obd on this target when that fn first runs,
+ because we don't know the connect flags yet. */
+ return 0;
+ }
+
+ obd_getref(obd);
+
+ rc = lov_connect_obd(obd, index, active, &lov->lov_ocd);
+ if (rc)
+ GOTO(out, rc);
+
+ /* connect to administrative disabled ost */
+ if (!tgt->ltd_exp)
+ GOTO(out, rc = 0);
+
+ if (lov->lov_cache != NULL) {
+ rc = obd_set_info_async(NULL, tgt->ltd_exp,
+ sizeof(KEY_CACHE_SET), KEY_CACHE_SET,
+ sizeof(struct cl_client_cache), lov->lov_cache,
+ NULL);
+ if (rc < 0)
+ GOTO(out, rc);
+ }
+
+ rc = lov_notify(obd, tgt->ltd_exp->exp_obd,
+ active ? OBD_NOTIFY_CONNECT : OBD_NOTIFY_INACTIVE,
+ (void *)&index);
+
+out:
+ if (rc) {
+ CERROR("add failed (%d), deleting %s\n", rc,
+ obd_uuid2str(&tgt->ltd_uuid));
+ lov_del_target(obd, index, 0, 0);
+ }
+ obd_putref(obd);
+ return rc;
+}
+
+/* Schedule a target for deletion */
+int lov_del_target(struct obd_device *obd, __u32 index,
+ struct obd_uuid *uuidp, int gen)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ int count = lov->desc.ld_tgt_count;
+ int rc = 0;
+
+ if (index >= count) {
+ CERROR("LOV target index %d >= number of LOV OBDs %d.\n",
+ index, count);
+ return -EINVAL;
+ }
+
+ /* to make sure there's no ongoing lov_notify() now */
+ down_write(&lov->lov_notify_lock);
+ obd_getref(obd);
+
+ if (!lov->lov_tgts[index]) {
+ CERROR("LOV target at index %d is not setup.\n", index);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ if (uuidp && !obd_uuid_equals(uuidp, &lov->lov_tgts[index]->ltd_uuid)) {
+ CERROR("LOV target UUID %s at index %d doesn't match %s.\n",
+ lov_uuid2str(lov, index), index,
+ obd_uuid2str(uuidp));
+ GOTO(out, rc = -EINVAL);
+ }
+
+ CDEBUG(D_CONFIG, "uuid: %s idx: %d gen: %d exp: %p active: %d\n",
+ lov_uuid2str(lov, index), index,
+ lov->lov_tgts[index]->ltd_gen, lov->lov_tgts[index]->ltd_exp,
+ lov->lov_tgts[index]->ltd_active);
+
+ lov->lov_tgts[index]->ltd_reap = 1;
+ lov->lov_death_row++;
+ /* we really delete it from obd_putref */
+out:
+ obd_putref(obd);
+ up_write(&lov->lov_notify_lock);
+
+ return rc;
+}
+
+static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
+{
+ struct obd_device *osc_obd;
+
+ LASSERT(tgt);
+ LASSERT(tgt->ltd_reap);
+
+ osc_obd = class_exp2obd(tgt->ltd_exp);
+
+ CDEBUG(D_CONFIG, "Removing tgt %s : %s\n",
+ tgt->ltd_uuid.uuid,
+ osc_obd ? osc_obd->obd_name : "<no obd>");
+
+ if (tgt->ltd_exp)
+ lov_disconnect_obd(obd, tgt);
+
+ OBD_FREE_PTR(tgt);
+
+ /* Manual cleanup - no cleanup logs to clean up the osc's. We must
+ do it ourselves. And we can't do it from lov_cleanup,
+ because we just lost our only reference to it. */
+ if (osc_obd)
+ class_manual_cleanup(osc_obd);
+}
+
+void lov_fix_desc_stripe_size(__u64 *val)
+{
+ if (*val < LOV_MIN_STRIPE_SIZE) {
+ if (*val != 0)
+ LCONSOLE_INFO("Increasing default stripe size to "
+ "minimum %u\n",
+ LOV_DEFAULT_STRIPE_SIZE);
+ *val = LOV_DEFAULT_STRIPE_SIZE;
+ } else if (*val & (LOV_MIN_STRIPE_SIZE - 1)) {
+ *val &= ~(LOV_MIN_STRIPE_SIZE - 1);
+ LCONSOLE_WARN("Changing default stripe size to "LPU64" (a "
+ "multiple of %u)\n",
+ *val, LOV_MIN_STRIPE_SIZE);
+ }
+}
+
+void lov_fix_desc_stripe_count(__u32 *val)
+{
+ if (*val == 0)
+ *val = 1;
+}
+
+void lov_fix_desc_pattern(__u32 *val)
+{
+ /* from lov_setstripe */
+ if ((*val != 0) && (*val != LOV_PATTERN_RAID0)) {
+ LCONSOLE_WARN("Unknown stripe pattern: %#x\n", *val);
+ *val = 0;
+ }
+}
+
+void lov_fix_desc_qos_maxage(__u32 *val)
+{
+ /* fix qos_maxage */
+ if (*val == 0)
+ *val = QOS_DEFAULT_MAXAGE;
+}
+
+void lov_fix_desc(struct lov_desc *desc)
+{
+ lov_fix_desc_stripe_size(&desc->ld_default_stripe_size);
+ lov_fix_desc_stripe_count(&desc->ld_default_stripe_count);
+ lov_fix_desc_pattern(&desc->ld_pattern);
+ lov_fix_desc_qos_maxage(&desc->ld_qos_maxage);
+}
+
+int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ struct lprocfs_static_vars lvars = { 0 };
+ struct lov_desc *desc;
+ struct lov_obd *lov = &obd->u.lov;
+ int rc;
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
+ CERROR("LOV setup requires a descriptor\n");
+ return -EINVAL;
+ }
+
+ desc = (struct lov_desc *)lustre_cfg_buf(lcfg, 1);
+
+ if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
+ CERROR("descriptor size wrong: %d > %d\n",
+ (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
+ return -EINVAL;
+ }
+
+ if (desc->ld_magic != LOV_DESC_MAGIC) {
+ if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) {
+ CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n",
+ obd->obd_name, desc);
+ lustre_swab_lov_desc(desc);
+ } else {
+ CERROR("%s: Bad lov desc magic: %#x\n",
+ obd->obd_name, desc->ld_magic);
+ return -EINVAL;
+ }
+ }
+
+ lov_fix_desc(desc);
+
+ desc->ld_active_tgt_count = 0;
+ lov->desc = *desc;
+ lov->lov_tgt_size = 0;
+
+ mutex_init(&lov->lov_lock);
+ atomic_set(&lov->lov_refcount, 0);
+ lov->lov_sp_me = LUSTRE_SP_CLI;
+
+ init_rwsem(&lov->lov_notify_lock);
+
+ lov->lov_pools_hash_body = cfs_hash_create("POOLS", HASH_POOLS_CUR_BITS,
+ HASH_POOLS_MAX_BITS,
+ HASH_POOLS_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &pool_hash_operations,
+ CFS_HASH_DEFAULT);
+ INIT_LIST_HEAD(&lov->lov_pool_list);
+ lov->lov_pool_count = 0;
+ rc = lov_ost_pool_init(&lov->lov_packed, 0);
+ if (rc)
+ GOTO(out, rc);
+
+ lprocfs_lov_init_vars(&lvars);
+ lprocfs_obd_setup(obd, lvars.obd_vars);
+#ifdef LPROCFS
+ {
+ int rc1;
+
+ rc1 = lprocfs_seq_create(obd->obd_proc_entry, "target_obd",
+ 0444, &lov_proc_target_fops, obd);
+ if (rc1)
+ CWARN("Error adding the target_obd file\n");
+ }
+#endif
+ lov->lov_pool_proc_entry = lprocfs_register("pools",
+ obd->obd_proc_entry,
+ NULL, NULL);
+
+ return 0;
+
+out:
+ return rc;
+}
+
+static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+{
+ int rc = 0;
+ struct lov_obd *lov = &obd->u.lov;
+
+ switch (stage) {
+ case OBD_CLEANUP_EARLY: {
+ int i;
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
+ continue;
+ obd_precleanup(class_exp2obd(lov->lov_tgts[i]->ltd_exp),
+ OBD_CLEANUP_EARLY);
+ }
+ break;
+ }
+ case OBD_CLEANUP_EXPORTS:
+ rc = obd_llog_finish(obd, 0);
+ if (rc != 0)
+ CERROR("failed to cleanup llogging subsystems\n");
+ break;
+ }
+ return rc;
+}
+
+static int lov_cleanup(struct obd_device *obd)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct list_head *pos, *tmp;
+ struct pool_desc *pool;
+
+ list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
+ pool = list_entry(pos, struct pool_desc, pool_list);
+ /* free pool structs */
+ CDEBUG(D_INFO, "delete pool %p\n", pool);
+ /* In the function below, .hs_keycmp resolves to
+ * pool_hashkey_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ lov_pool_del(obd, pool->pool_name);
+ }
+ cfs_hash_putref(lov->lov_pools_hash_body);
+ lov_ost_pool_free(&lov->lov_packed);
+
+ lprocfs_obd_cleanup(obd);
+ if (lov->lov_tgts) {
+ int i;
+ obd_getref(obd);
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ if (!lov->lov_tgts[i])
+ continue;
+
+ /* Inactive targets may never have connected */
+ if (lov->lov_tgts[i]->ltd_active ||
+ atomic_read(&lov->lov_refcount))
+ /* We should never get here - these
+ should have been removed in the
+ disconnect. */
+ CERROR("lov tgt %d not cleaned!"
+ " deathrow=%d, lovrc=%d\n",
+ i, lov->lov_death_row,
+ atomic_read(&lov->lov_refcount));
+ lov_del_target(obd, i, 0, 0);
+ }
+ obd_putref(obd);
+ OBD_FREE(lov->lov_tgts, sizeof(*lov->lov_tgts) *
+ lov->lov_tgt_size);
+ lov->lov_tgt_size = 0;
+ }
+ return 0;
+}
+
+int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
+ __u32 *indexp, int *genp)
+{
+ struct obd_uuid obd_uuid;
+ int cmd;
+ int rc = 0;
+
+ switch(cmd = lcfg->lcfg_command) {
+ case LCFG_LOV_ADD_OBD:
+ case LCFG_LOV_ADD_INA:
+ case LCFG_LOV_DEL_OBD: {
+ __u32 index;
+ int gen;
+ /* lov_modify_tgts add 0:lov_mdsA 1:ost1_UUID 2:0 3:1 */
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid))
+ GOTO(out, rc = -EINVAL);
+
+ obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1));
+
+ if (sscanf(lustre_cfg_buf(lcfg, 2), "%d", indexp) != 1)
+ GOTO(out, rc = -EINVAL);
+ if (sscanf(lustre_cfg_buf(lcfg, 3), "%d", genp) != 1)
+ GOTO(out, rc = -EINVAL);
+ index = *indexp;
+ gen = *genp;
+ if (cmd == LCFG_LOV_ADD_OBD)
+ rc = lov_add_target(obd, &obd_uuid, index, gen, 1);
+ else if (cmd == LCFG_LOV_ADD_INA)
+ rc = lov_add_target(obd, &obd_uuid, index, gen, 0);
+ else
+ rc = lov_del_target(obd, index, &obd_uuid, gen);
+ GOTO(out, rc);
+ }
+ case LCFG_PARAM: {
+ struct lprocfs_static_vars lvars = { 0 };
+ struct lov_desc *desc = &(obd->u.lov.desc);
+
+ if (!desc)
+ GOTO(out, rc = -EINVAL);
+
+ lprocfs_lov_init_vars(&lvars);
+
+ rc = class_process_proc_param(PARAM_LOV, lvars.obd_vars,
+ lcfg, obd);
+ if (rc > 0)
+ rc = 0;
+ GOTO(out, rc);
+ }
+ case LCFG_POOL_NEW:
+ case LCFG_POOL_ADD:
+ case LCFG_POOL_DEL:
+ case LCFG_POOL_REM:
+ GOTO(out, rc);
+
+ default: {
+ CERROR("Unknown command: %d\n", lcfg->lcfg_command);
+ GOTO(out, rc = -EINVAL);
+
+ }
+ }
+out:
+ return rc;
+}
+
+static int lov_recreate(struct obd_export *exp, struct obdo *src_oa,
+ struct lov_stripe_md **ea, struct obd_trans_info *oti)
+{
+ struct lov_stripe_md *obj_mdp, *lsm;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ unsigned ost_idx;
+ int rc, i;
+
+ LASSERT(src_oa->o_valid & OBD_MD_FLFLAGS &&
+ src_oa->o_flags & OBD_FL_RECREATE_OBJS);
+
+ OBD_ALLOC(obj_mdp, sizeof(*obj_mdp));
+ if (obj_mdp == NULL)
+ return -ENOMEM;
+
+ ost_idx = src_oa->o_nlink;
+ lsm = *ea;
+ if (lsm == NULL)
+ GOTO(out, rc = -EINVAL);
+ if (ost_idx >= lov->desc.ld_tgt_count ||
+ !lov->lov_tgts[ost_idx])
+ GOTO(out, rc = -EINVAL);
+
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ if (lsm->lsm_oinfo[i]->loi_ost_idx == ost_idx) {
+ if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) !=
+ ostid_id(&src_oa->o_oi))
+ GOTO(out, rc = -EINVAL);
+ break;
+ }
+ }
+ if (i == lsm->lsm_stripe_count)
+ GOTO(out, rc = -EINVAL);
+
+ rc = obd_create(NULL, lov->lov_tgts[ost_idx]->ltd_exp,
+ src_oa, &obj_mdp, oti);
+out:
+ OBD_FREE(obj_mdp, sizeof(*obj_mdp));
+ return rc;
+}
+
+/* the LOV expects oa->o_id to be set to the LOV object id */
+static int lov_create(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *src_oa, struct lov_stripe_md **ea,
+ struct obd_trans_info *oti)
+{
+ struct lov_obd *lov;
+ int rc = 0;
+
+ LASSERT(ea != NULL);
+ if (exp == NULL)
+ return -EINVAL;
+
+ if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
+ src_oa->o_flags == OBD_FL_DELORPHAN) {
+ /* should be used with LOV anymore */
+ LBUG();
+ }
+
+ lov = &exp->exp_obd->u.lov;
+ if (!lov->desc.ld_active_tgt_count)
+ return -EIO;
+
+ obd_getref(exp->exp_obd);
+ /* Recreate a specific object id at the given OST index */
+ if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
+ (src_oa->o_flags & OBD_FL_RECREATE_OBJS)) {
+ rc = lov_recreate(exp, src_oa, ea, oti);
+ }
+
+ obd_putref(exp->exp_obd);
+ return rc;
+}
+
+#define ASSERT_LSM_MAGIC(lsmp) \
+do { \
+ LASSERT((lsmp) != NULL); \
+ LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \
+ (lsmp)->lsm_magic == LOV_MAGIC_V3), \
+ "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \
+} while (0)
+
+static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md *lsm,
+ struct obd_trans_info *oti, struct obd_export *md_exp,
+ void *capa)
+{
+ struct lov_request_set *set;
+ struct obd_info oinfo;
+ struct lov_request *req;
+ struct list_head *pos;
+ struct lov_obd *lov;
+ int rc = 0, err = 0;
+
+ ASSERT_LSM_MAGIC(lsm);
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ if (oa->o_valid & OBD_MD_FLCOOKIE) {
+ LASSERT(oti);
+ LASSERT(oti->oti_logcookies);
+ }
+
+ lov = &exp->exp_obd->u.lov;
+ obd_getref(exp->exp_obd);
+ rc = lov_prep_destroy_set(exp, &oinfo, oa, lsm, oti, &set);
+ if (rc)
+ GOTO(out, rc);
+
+ list_for_each (pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ if (oa->o_valid & OBD_MD_FLCOOKIE)
+ oti->oti_logcookies = set->set_cookies + req->rq_stripe;
+
+ err = obd_destroy(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
+ req->rq_oi.oi_oa, NULL, oti, NULL, capa);
+ err = lov_update_common_set(set, req, err);
+ if (err) {
+ CERROR("%s: destroying objid "DOSTID" subobj "
+ DOSTID" on OST idx %d: rc = %d\n",
+ exp->exp_obd->obd_name, POSTID(&oa->o_oi),
+ POSTID(&req->rq_oi.oi_oa->o_oi),
+ req->rq_idx, err);
+ if (!rc)
+ rc = err;
+ }
+ }
+
+ if (rc == 0) {
+ LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
+ rc = lsm_op_find(lsm->lsm_magic)->lsm_destroy(lsm, oa, md_exp);
+ }
+ err = lov_fini_destroy_set(set);
+out:
+ obd_putref(exp->exp_obd);
+ return rc ? rc : err;
+}
+
+static int lov_getattr(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo)
+{
+ struct lov_request_set *set;
+ struct lov_request *req;
+ struct list_head *pos;
+ struct lov_obd *lov;
+ int err = 0, rc = 0;
+
+ LASSERT(oinfo);
+ ASSERT_LSM_MAGIC(oinfo->oi_md);
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ lov = &exp->exp_obd->u.lov;
+
+ rc = lov_prep_getattr_set(exp, oinfo, &set);
+ if (rc)
+ return rc;
+
+ list_for_each (pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
+ " %u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
+ POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
+
+ rc = obd_getattr(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
+ &req->rq_oi);
+ err = lov_update_common_set(set, req, rc);
+ if (err) {
+ CERROR("%s: getattr objid "DOSTID" subobj "
+ DOSTID" on OST idx %d: rc = %d\n",
+ exp->exp_obd->obd_name,
+ POSTID(&oinfo->oi_oa->o_oi),
+ POSTID(&req->rq_oi.oi_oa->o_oi),
+ req->rq_idx, err);
+ break;
+ }
+ }
+
+ rc = lov_fini_getattr_set(set);
+ if (err)
+ rc = err;
+ return rc;
+}
+
+static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
+ void *data, int rc)
+{
+ struct lov_request_set *lovset = (struct lov_request_set *)data;
+ int err;
+
+ /* don't do attribute merge if this aysnc op failed */
+ if (rc)
+ atomic_set(&lovset->set_completes, 0);
+ err = lov_fini_getattr_set(lovset);
+ return rc ? rc : err;
+}
+
+static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
+ struct ptlrpc_request_set *rqset)
+{
+ struct lov_request_set *lovset;
+ struct lov_obd *lov;
+ struct list_head *pos;
+ struct lov_request *req;
+ int rc = 0, err;
+
+ LASSERT(oinfo);
+ ASSERT_LSM_MAGIC(oinfo->oi_md);
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ lov = &exp->exp_obd->u.lov;
+
+ rc = lov_prep_getattr_set(exp, oinfo, &lovset);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
+ POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
+ oinfo->oi_md->lsm_stripe_size);
+
+ list_for_each(pos, &lovset->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
+ "%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
+ POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
+ rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
+ &req->rq_oi, rqset);
+ if (rc) {
+ CERROR("%s: getattr objid "DOSTID" subobj"
+ DOSTID" on OST idx %d: rc = %d\n",
+ exp->exp_obd->obd_name,
+ POSTID(&oinfo->oi_oa->o_oi),
+ POSTID(&req->rq_oi.oi_oa->o_oi),
+ req->rq_idx, rc);
+ GOTO(out, rc);
+ }
+ }
+
+ if (!list_empty(&rqset->set_requests)) {
+ LASSERT(rc == 0);
+ LASSERT (rqset->set_interpret == NULL);
+ rqset->set_interpret = lov_getattr_interpret;
+ rqset->set_arg = (void *)lovset;
+ return rc;
+ }
+out:
+ if (rc)
+ atomic_set(&lovset->set_completes, 0);
+ err = lov_fini_getattr_set(lovset);
+ return rc ? rc : err;
+}
+
+static int lov_setattr(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti)
+{
+ struct lov_request_set *set;
+ struct lov_obd *lov;
+ struct list_head *pos;
+ struct lov_request *req;
+ int err = 0, rc = 0;
+
+ LASSERT(oinfo);
+ ASSERT_LSM_MAGIC(oinfo->oi_md);
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ /* for now, we only expect the following updates here */
+ LASSERT(!(oinfo->oi_oa->o_valid & ~(OBD_MD_FLID | OBD_MD_FLTYPE |
+ OBD_MD_FLMODE | OBD_MD_FLATIME |
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLFLAGS | OBD_MD_FLSIZE |
+ OBD_MD_FLGROUP | OBD_MD_FLUID |
+ OBD_MD_FLGID | OBD_MD_FLFID |
+ OBD_MD_FLGENER)));
+ lov = &exp->exp_obd->u.lov;
+ rc = lov_prep_setattr_set(exp, oinfo, oti, &set);
+ if (rc)
+ return rc;
+
+ list_for_each (pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ rc = obd_setattr(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
+ &req->rq_oi, NULL);
+ err = lov_update_setattr_set(set, req, rc);
+ if (err) {
+ CERROR("%s: setattr objid "DOSTID" subobj "
+ DOSTID" on OST idx %d: rc = %d\n",
+ exp->exp_obd->obd_name,
+ POSTID(&set->set_oi->oi_oa->o_oi),
+ POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx,
+ err);
+ if (!rc)
+ rc = err;
+ }
+ }
+ err = lov_fini_setattr_set(set);
+ if (!rc)
+ rc = err;
+ return rc;
+}
+
+static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
+ void *data, int rc)
+{
+ struct lov_request_set *lovset = (struct lov_request_set *)data;
+ int err;
+
+ if (rc)
+ atomic_set(&lovset->set_completes, 0);
+ err = lov_fini_setattr_set(lovset);
+ return rc ? rc : err;
+}
+
+/* If @oti is given, the request goes from MDS and responses from OSTs are not
+ needed. Otherwise, a client is waiting for responses. */
+static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct ptlrpc_request_set *rqset)
+{
+ struct lov_request_set *set;
+ struct lov_request *req;
+ struct list_head *pos;
+ struct lov_obd *lov;
+ int rc = 0;
+
+ LASSERT(oinfo);
+ ASSERT_LSM_MAGIC(oinfo->oi_md);
+ if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
+ LASSERT(oti);
+ LASSERT(oti->oti_logcookies);
+ }
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ lov = &exp->exp_obd->u.lov;
+ rc = lov_prep_setattr_set(exp, oinfo, oti, &set);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
+ POSTID(&oinfo->oi_md->lsm_oi),
+ oinfo->oi_md->lsm_stripe_count,
+ oinfo->oi_md->lsm_stripe_size);
+
+ list_for_each(pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
+ oti->oti_logcookies = set->set_cookies + req->rq_stripe;
+
+ CDEBUG(D_INFO, "objid "DOSTID"[%d] has subobj "DOSTID" at idx"
+ "%u\n", POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
+ POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
+
+ rc = obd_setattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
+ &req->rq_oi, oti, rqset);
+ if (rc) {
+ CERROR("error: setattr objid "DOSTID" subobj"
+ DOSTID" on OST idx %d: rc = %d\n",
+ POSTID(&set->set_oi->oi_oa->o_oi),
+ POSTID(&req->rq_oi.oi_oa->o_oi),
+ req->rq_idx, rc);
+ break;
+ }
+ }
+
+ /* If we are not waiting for responses on async requests, return. */
+ if (rc || !rqset || list_empty(&rqset->set_requests)) {
+ int err;
+ if (rc)
+ atomic_set(&set->set_completes, 0);
+ err = lov_fini_setattr_set(set);
+ return rc ? rc : err;
+ }
+
+ LASSERT(rqset->set_interpret == NULL);
+ rqset->set_interpret = lov_setattr_interpret;
+ rqset->set_arg = (void *)set;
+
+ return 0;
+}
+
+static int lov_punch_interpret(struct ptlrpc_request_set *rqset,
+ void *data, int rc)
+{
+ struct lov_request_set *lovset = (struct lov_request_set *)data;
+ int err;
+
+ if (rc)
+ atomic_set(&lovset->set_completes, 0);
+ err = lov_fini_punch_set(lovset);
+ return rc ? rc : err;
+}
+
+/* FIXME: maybe we'll just make one node the authoritative attribute node, then
+ * we can send this 'punch' to just the authoritative node and the nodes
+ * that the punch will affect. */
+static int lov_punch(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti,
+ struct ptlrpc_request_set *rqset)
+{
+ struct lov_request_set *set;
+ struct lov_obd *lov;
+ struct list_head *pos;
+ struct lov_request *req;
+ int rc = 0;
+
+ LASSERT(oinfo);
+ ASSERT_LSM_MAGIC(oinfo->oi_md);
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ lov = &exp->exp_obd->u.lov;
+ rc = lov_prep_punch_set(exp, oinfo, oti, &set);
+ if (rc)
+ return rc;
+
+ list_for_each (pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ rc = obd_punch(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
+ &req->rq_oi, NULL, rqset);
+ if (rc) {
+ CERROR("%s: punch objid "DOSTID" subobj "DOSTID
+ " on OST idx %d: rc = %d\n",
+ exp->exp_obd->obd_name,
+ POSTID(&set->set_oi->oi_oa->o_oi),
+ POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx, rc);
+ break;
+ }
+ }
+
+ if (rc || list_empty(&rqset->set_requests)) {
+ int err;
+ err = lov_fini_punch_set(set);
+ return rc ? rc : err;
+ }
+
+ LASSERT(rqset->set_interpret == NULL);
+ rqset->set_interpret = lov_punch_interpret;
+ rqset->set_arg = (void *)set;
+
+ return 0;
+}
+
+static int lov_sync_interpret(struct ptlrpc_request_set *rqset,
+ void *data, int rc)
+{
+ struct lov_request_set *lovset = data;
+ int err;
+
+ if (rc)
+ atomic_set(&lovset->set_completes, 0);
+ err = lov_fini_sync_set(lovset);
+ return rc ?: err;
+}
+
+static int lov_sync(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, obd_off start, obd_off end,
+ struct ptlrpc_request_set *rqset)
+{
+ struct lov_request_set *set = NULL;
+ struct lov_obd *lov;
+ struct list_head *pos;
+ struct lov_request *req;
+ int rc = 0;
+
+ ASSERT_LSM_MAGIC(oinfo->oi_md);
+ LASSERT(rqset != NULL);
+
+ if (!exp->exp_obd)
+ return -ENODEV;
+
+ lov = &exp->exp_obd->u.lov;
+ rc = lov_prep_sync_set(exp, oinfo, start, end, &set);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_INFO, "fsync objid "DOSTID" ["LPX64", "LPX64"]\n",
+ POSTID(&set->set_oi->oi_oa->o_oi), start, end);
+
+ list_for_each (pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ rc = obd_sync(env, lov->lov_tgts[req->rq_idx]->ltd_exp,
+ &req->rq_oi, req->rq_oi.oi_policy.l_extent.start,
+ req->rq_oi.oi_policy.l_extent.end, rqset);
+ if (rc) {
+ CERROR("%s: fsync objid "DOSTID" subobj "DOSTID
+ " on OST idx %d: rc = %d\n",
+ exp->exp_obd->obd_name,
+ POSTID(&set->set_oi->oi_oa->o_oi),
+ POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx,
+ rc);
+ break;
+ }
+ }
+
+ /* If we are not waiting for responses on async requests, return. */
+ if (rc || list_empty(&rqset->set_requests)) {
+ int err = lov_fini_sync_set(set);
+
+ return rc ?: err;
+ }
+
+ LASSERT(rqset->set_interpret == NULL);
+ rqset->set_interpret = lov_sync_interpret;
+ rqset->set_arg = (void *)set;
+
+ return 0;
+}
+
+static int lov_brw_check(struct lov_obd *lov, struct obd_info *lov_oinfo,
+ obd_count oa_bufs, struct brw_page *pga)
+{
+ struct obd_info oinfo = { { { 0 } } };
+ int i, rc = 0;
+
+ oinfo.oi_oa = lov_oinfo->oi_oa;
+
+ /* The caller just wants to know if there's a chance that this
+ * I/O can succeed */
+ for (i = 0; i < oa_bufs; i++) {
+ int stripe = lov_stripe_number(lov_oinfo->oi_md, pga[i].off);
+ int ost = lov_oinfo->oi_md->lsm_oinfo[stripe]->loi_ost_idx;
+ obd_off start, end;
+
+ if (!lov_stripe_intersects(lov_oinfo->oi_md, i, pga[i].off,
+ pga[i].off + pga[i].count - 1,
+ &start, &end))
+ continue;
+
+ if (!lov->lov_tgts[ost] || !lov->lov_tgts[ost]->ltd_active) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", ost);
+ return -EIO;
+ }
+
+ rc = obd_brw(OBD_BRW_CHECK, lov->lov_tgts[ost]->ltd_exp, &oinfo,
+ 1, &pga[i], NULL);
+ if (rc)
+ break;
+ }
+ return rc;
+}
+
+static int lov_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
+ obd_count oa_bufs, struct brw_page *pga,
+ struct obd_trans_info *oti)
+{
+ struct lov_request_set *set;
+ struct lov_request *req;
+ struct list_head *pos;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ int err, rc = 0;
+
+ ASSERT_LSM_MAGIC(oinfo->oi_md);
+
+ if (cmd == OBD_BRW_CHECK) {
+ rc = lov_brw_check(lov, oinfo, oa_bufs, pga);
+ return rc;
+ }
+
+ rc = lov_prep_brw_set(exp, oinfo, oa_bufs, pga, oti, &set);
+ if (rc)
+ return rc;
+
+ list_for_each (pos, &set->set_list) {
+ struct obd_export *sub_exp;
+ struct brw_page *sub_pga;
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ sub_exp = lov->lov_tgts[req->rq_idx]->ltd_exp;
+ sub_pga = set->set_pga + req->rq_pgaidx;
+ rc = obd_brw(cmd, sub_exp, &req->rq_oi, req->rq_oabufs,
+ sub_pga, oti);
+ if (rc)
+ break;
+ lov_update_common_set(set, req, rc);
+ }
+
+ err = lov_fini_brw_set(set);
+ if (!rc)
+ rc = err;
+ return rc;
+}
+
+static int lov_enqueue_interpret(struct ptlrpc_request_set *rqset,
+ void *data, int rc)
+{
+ struct lov_request_set *lovset = (struct lov_request_set *)data;
+
+ rc = lov_fini_enqueue_set(lovset, lovset->set_ei->ei_mode, rc, rqset);
+ return rc;
+}
+
+static int lov_enqueue(struct obd_export *exp, struct obd_info *oinfo,
+ struct ldlm_enqueue_info *einfo,
+ struct ptlrpc_request_set *rqset)
+{
+ ldlm_mode_t mode = einfo->ei_mode;
+ struct lov_request_set *set;
+ struct lov_request *req;
+ struct list_head *pos;
+ struct lov_obd *lov;
+ ldlm_error_t rc;
+
+ LASSERT(oinfo);
+ ASSERT_LSM_MAGIC(oinfo->oi_md);
+ LASSERT(mode == (mode & -mode));
+
+ /* we should never be asked to replay a lock this way. */
+ LASSERT((oinfo->oi_flags & LDLM_FL_REPLAY) == 0);
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ lov = &exp->exp_obd->u.lov;
+ rc = lov_prep_enqueue_set(exp, oinfo, einfo, &set);
+ if (rc)
+ return rc;
+
+ list_for_each (pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ rc = obd_enqueue(lov->lov_tgts[req->rq_idx]->ltd_exp,
+ &req->rq_oi, einfo, rqset);
+ if (rc != ELDLM_OK)
+ GOTO(out, rc);
+ }
+
+ if (rqset && !list_empty(&rqset->set_requests)) {
+ LASSERT(rc == 0);
+ LASSERT(rqset->set_interpret == NULL);
+ rqset->set_interpret = lov_enqueue_interpret;
+ rqset->set_arg = (void *)set;
+ return rc;
+ }
+out:
+ rc = lov_fini_enqueue_set(set, mode, rc, rqset);
+ return rc;
+}
+
+static int lov_change_cbdata(struct obd_export *exp,
+ struct lov_stripe_md *lsm, ldlm_iterator_t it,
+ void *data)
+{
+ struct lov_obd *lov;
+ int rc = 0, i;
+
+ ASSERT_LSM_MAGIC(lsm);
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ lov = &exp->exp_obd->u.lov;
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ struct lov_stripe_md submd;
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+
+ if (!lov->lov_tgts[loi->loi_ost_idx]) {
+ CDEBUG(D_HA, "lov idx %d NULL \n", loi->loi_ost_idx);
+ continue;
+ }
+
+ submd.lsm_oi = loi->loi_oi;
+ submd.lsm_stripe_count = 0;
+ rc = obd_change_cbdata(lov->lov_tgts[loi->loi_ost_idx]->ltd_exp,
+ &submd, it, data);
+ }
+ return rc;
+}
+
+/* find any ldlm lock of the inode in lov
+ * return 0 not find
+ * 1 find one
+ * < 0 error */
+static int lov_find_cbdata(struct obd_export *exp,
+ struct lov_stripe_md *lsm, ldlm_iterator_t it,
+ void *data)
+{
+ struct lov_obd *lov;
+ int rc = 0, i;
+
+ ASSERT_LSM_MAGIC(lsm);
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ lov = &exp->exp_obd->u.lov;
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ struct lov_stripe_md submd;
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+
+ if (!lov->lov_tgts[loi->loi_ost_idx]) {
+ CDEBUG(D_HA, "lov idx %d NULL \n", loi->loi_ost_idx);
+ continue;
+ }
+ submd.lsm_oi = loi->loi_oi;
+ submd.lsm_stripe_count = 0;
+ rc = obd_find_cbdata(lov->lov_tgts[loi->loi_ost_idx]->ltd_exp,
+ &submd, it, data);
+ if (rc != 0)
+ return rc;
+ }
+ return rc;
+}
+
+static int lov_cancel(struct obd_export *exp, struct lov_stripe_md *lsm,
+ __u32 mode, struct lustre_handle *lockh)
+{
+ struct lov_request_set *set;
+ struct obd_info oinfo;
+ struct lov_request *req;
+ struct list_head *pos;
+ struct lov_obd *lov;
+ struct lustre_handle *lov_lockhp;
+ int err = 0, rc = 0;
+
+ ASSERT_LSM_MAGIC(lsm);
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ LASSERT(lockh);
+ lov = &exp->exp_obd->u.lov;
+ rc = lov_prep_cancel_set(exp, &oinfo, lsm, mode, lockh, &set);
+ if (rc)
+ return rc;
+
+ list_for_each(pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+ lov_lockhp = set->set_lockh->llh_handles + req->rq_stripe;
+
+ rc = obd_cancel(lov->lov_tgts[req->rq_idx]->ltd_exp,
+ req->rq_oi.oi_md, mode, lov_lockhp);
+ rc = lov_update_common_set(set, req, rc);
+ if (rc) {
+ CERROR("%s: cancel objid "DOSTID" subobj "
+ DOSTID" on OST idx %d: rc = %d\n",
+ exp->exp_obd->obd_name, POSTID(&lsm->lsm_oi),
+ POSTID(&req->rq_oi.oi_md->lsm_oi),
+ req->rq_idx, rc);
+ err = rc;
+ }
+
+ }
+ lov_fini_cancel_set(set);
+ return err;
+}
+
+static int lov_cancel_unused(struct obd_export *exp,
+ struct lov_stripe_md *lsm,
+ ldlm_cancel_flags_t flags, void *opaque)
+{
+ struct lov_obd *lov;
+ int rc = 0, i;
+
+ if (!exp || !exp->exp_obd)
+ return -ENODEV;
+
+ lov = &exp->exp_obd->u.lov;
+ if (lsm == NULL) {
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ int err;
+ if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_exp)
+ continue;
+
+ err = obd_cancel_unused(lov->lov_tgts[i]->ltd_exp, NULL,
+ flags, opaque);
+ if (!rc)
+ rc = err;
+ }
+ return rc;
+ }
+
+ ASSERT_LSM_MAGIC(lsm);
+
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ struct lov_stripe_md submd;
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+ int idx = loi->loi_ost_idx;
+ int err;
+
+ if (!lov->lov_tgts[idx]) {
+ CDEBUG(D_HA, "lov idx %d NULL\n", idx);
+ continue;
+ }
+
+ if (!lov->lov_tgts[idx]->ltd_active)
+ CDEBUG(D_HA, "lov idx %d inactive\n", idx);
+
+ submd.lsm_oi = loi->loi_oi;
+ submd.lsm_stripe_count = 0;
+ err = obd_cancel_unused(lov->lov_tgts[idx]->ltd_exp,
+ &submd, flags, opaque);
+ if (err && lov->lov_tgts[idx]->ltd_active) {
+ CERROR("%s: cancel unused objid "DOSTID
+ " subobj "DOSTID" on OST idx %d: rc = %d\n",
+ exp->exp_obd->obd_name, POSTID(&lsm->lsm_oi),
+ POSTID(&loi->loi_oi), idx, err);
+ if (!rc)
+ rc = err;
+ }
+ }
+ return rc;
+}
+
+int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
+{
+ struct lov_request_set *lovset = (struct lov_request_set *)data;
+ int err;
+
+ if (rc)
+ atomic_set(&lovset->set_completes, 0);
+
+ err = lov_fini_statfs_set(lovset);
+ return rc ? rc : err;
+}
+
+static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
+ __u64 max_age, struct ptlrpc_request_set *rqset)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lov_request_set *set;
+ struct lov_request *req;
+ struct list_head *pos;
+ struct lov_obd *lov;
+ int rc = 0;
+
+ LASSERT(oinfo != NULL);
+ LASSERT(oinfo->oi_osfs != NULL);
+
+ lov = &obd->u.lov;
+ rc = lov_prep_statfs_set(obd, oinfo, &set);
+ if (rc)
+ return rc;
+
+ list_for_each (pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+ rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
+ &req->rq_oi, max_age, rqset);
+ if (rc)
+ break;
+ }
+
+ if (rc || list_empty(&rqset->set_requests)) {
+ int err;
+ if (rc)
+ atomic_set(&set->set_completes, 0);
+ err = lov_fini_statfs_set(set);
+ return rc ? rc : err;
+ }
+
+ LASSERT(rqset->set_interpret == NULL);
+ rqset->set_interpret = lov_statfs_interpret;
+ rqset->set_arg = (void *)set;
+ return 0;
+}
+
+static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
+ struct obd_statfs *osfs, __u64 max_age, __u32 flags)
+{
+ struct ptlrpc_request_set *set = NULL;
+ struct obd_info oinfo = { { { 0 } } };
+ int rc = 0;
+
+ /* for obdclass we forbid using obd_statfs_rqset, but prefer using async
+ * statfs requests */
+ set = ptlrpc_prep_set();
+ if (set == NULL)
+ return -ENOMEM;
+
+ oinfo.oi_osfs = osfs;
+ oinfo.oi_flags = flags;
+ rc = lov_statfs_async(exp, &oinfo, max_age, set);
+ if (rc == 0)
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+
+ return rc;
+}
+
+static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
+ void *karg, void *uarg)
+{
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct lov_obd *lov = &obddev->u.lov;
+ int i = 0, rc = 0, count = lov->desc.ld_tgt_count;
+ struct obd_uuid *uuidp;
+
+ switch (cmd) {
+ case IOC_OBD_STATFS: {
+ struct obd_ioctl_data *data = karg;
+ struct obd_device *osc_obd;
+ struct obd_statfs stat_buf = {0};
+ __u32 index;
+ __u32 flags;
+
+ memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
+ if ((index >= count))
+ return -ENODEV;
+
+ if (!lov->lov_tgts[index])
+ /* Try again with the next index */
+ return -EAGAIN;
+ if (!lov->lov_tgts[index]->ltd_active)
+ return -ENODATA;
+
+ osc_obd = class_exp2obd(lov->lov_tgts[index]->ltd_exp);
+ if (!osc_obd)
+ return -EINVAL;
+
+ /* copy UUID */
+ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
+ min((int) data->ioc_plen2,
+ (int) sizeof(struct obd_uuid))))
+ return -EFAULT;
+
+ flags = uarg ? *(__u32*)uarg : 0;
+ /* got statfs data */
+ rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ flags);
+ if (rc)
+ return rc;
+ if (copy_to_user(data->ioc_pbuf1, &stat_buf,
+ min((int) data->ioc_plen1,
+ (int) sizeof(stat_buf))))
+ return -EFAULT;
+ break;
+ }
+ case OBD_IOC_LOV_GET_CONFIG: {
+ struct obd_ioctl_data *data;
+ struct lov_desc *desc;
+ char *buf = NULL;
+ __u32 *genp;
+
+ len = 0;
+ if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
+ return -EINVAL;
+
+ data = (struct obd_ioctl_data *)buf;
+
+ if (sizeof(*desc) > data->ioc_inllen1) {
+ obd_ioctl_freedata(buf, len);
+ return -EINVAL;
+ }
+
+ if (sizeof(uuidp->uuid) * count > data->ioc_inllen2) {
+ obd_ioctl_freedata(buf, len);
+ return -EINVAL;
+ }
+
+ if (sizeof(__u32) * count > data->ioc_inllen3) {
+ obd_ioctl_freedata(buf, len);
+ return -EINVAL;
+ }
+
+ desc = (struct lov_desc *)data->ioc_inlbuf1;
+ memcpy(desc, &(lov->desc), sizeof(*desc));
+
+ uuidp = (struct obd_uuid *)data->ioc_inlbuf2;
+ genp = (__u32 *)data->ioc_inlbuf3;
+ /* the uuid will be empty for deleted OSTs */
+ for (i = 0; i < count; i++, uuidp++, genp++) {
+ if (!lov->lov_tgts[i])
+ continue;
+ *uuidp = lov->lov_tgts[i]->ltd_uuid;
+ *genp = lov->lov_tgts[i]->ltd_gen;
+ }
+
+ if (copy_to_user((void *)uarg, buf, len))
+ rc = -EFAULT;
+ obd_ioctl_freedata(buf, len);
+ break;
+ }
+ case LL_IOC_LOV_SETSTRIPE:
+ rc = lov_setstripe(exp, len, karg, uarg);
+ break;
+ case LL_IOC_LOV_GETSTRIPE:
+ rc = lov_getstripe(exp, karg, uarg);
+ break;
+ case LL_IOC_LOV_SETEA:
+ rc = lov_setea(exp, karg, uarg);
+ break;
+ case OBD_IOC_QUOTACTL: {
+ struct if_quotactl *qctl = karg;
+ struct lov_tgt_desc *tgt = NULL;
+ struct obd_quotactl *oqctl;
+
+ if (qctl->qc_valid == QC_OSTIDX) {
+ if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
+ return -EINVAL;
+
+ tgt = lov->lov_tgts[qctl->qc_idx];
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+ } else if (qctl->qc_valid == QC_UUID) {
+ for (i = 0; i < count; i++) {
+ tgt = lov->lov_tgts[i];
+ if (!tgt ||
+ !obd_uuid_equals(&tgt->ltd_uuid,
+ &qctl->obd_uuid))
+ continue;
+
+ if (tgt->ltd_exp == NULL)
+ return -EINVAL;
+
+ break;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ if (i >= count)
+ return -EAGAIN;
+
+ LASSERT(tgt && tgt->ltd_exp);
+ OBD_ALLOC_PTR(oqctl);
+ if (!oqctl)
+ return -ENOMEM;
+
+ QCTL_COPY(oqctl, qctl);
+ rc = obd_quotactl(tgt->ltd_exp, oqctl);
+ if (rc == 0) {
+ QCTL_COPY(qctl, oqctl);
+ qctl->qc_valid = QC_OSTIDX;
+ qctl->obd_uuid = tgt->ltd_uuid;
+ }
+ OBD_FREE_PTR(oqctl);
+ break;
+ }
+ default: {
+ int set = 0;
+
+ if (count == 0)
+ return -ENOTTY;
+
+ for (i = 0; i < count; i++) {
+ int err;
+ struct obd_device *osc_obd;
+
+ /* OST was disconnected */
+ if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_exp)
+ continue;
+
+ /* ll_umount_begin() sets force flag but for lov, not
+ * osc. Let's pass it through */
+ osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp);
+ osc_obd->obd_force = obddev->obd_force;
+ err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
+ len, karg, uarg);
+ if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
+ return err;
+ } else if (err) {
+ if (lov->lov_tgts[i]->ltd_active) {
+ CDEBUG(err == -ENOTTY ?
+ D_IOCTL : D_WARNING,
+ "iocontrol OSC %s on OST "
+ "idx %d cmd %x: err = %d\n",
+ lov_uuid2str(lov, i),
+ i, cmd, err);
+ if (!rc)
+ rc = err;
+ }
+ } else {
+ set = 1;
+ }
+ }
+ if (!set && !rc)
+ rc = -EIO;
+ }
+ }
+
+ return rc;
+}
+
+#define FIEMAP_BUFFER_SIZE 4096
+
+/**
+ * Non-zero fe_logical indicates that this is a continuation FIEMAP
+ * call. The local end offset and the device are sent in the first
+ * fm_extent. This function calculates the stripe number from the index.
+ * This function returns a stripe_no on which mapping is to be restarted.
+ *
+ * This function returns fm_end_offset which is the in-OST offset at which
+ * mapping should be restarted. If fm_end_offset=0 is returned then caller
+ * will re-calculate proper offset in next stripe.
+ * Note that the first extent is passed to lov_get_info via the value field.
+ *
+ * \param fiemap fiemap request header
+ * \param lsm striping information for the file
+ * \param fm_start logical start of mapping
+ * \param fm_end logical end of mapping
+ * \param start_stripe starting stripe will be returned in this
+ */
+obd_size fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
+ struct lov_stripe_md *lsm, obd_size fm_start,
+ obd_size fm_end, int *start_stripe)
+{
+ obd_size local_end = fiemap->fm_extents[0].fe_logical;
+ obd_off lun_start, lun_end;
+ obd_size fm_end_offset;
+ int stripe_no = -1, i;
+
+ if (fiemap->fm_extent_count == 0 ||
+ fiemap->fm_extents[0].fe_logical == 0)
+ return 0;
+
+ /* Find out stripe_no from ost_index saved in the fe_device */
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ if (lsm->lsm_oinfo[i]->loi_ost_idx ==
+ fiemap->fm_extents[0].fe_device) {
+ stripe_no = i;
+ break;
+ }
+ }
+ if (stripe_no == -1)
+ return -EINVAL;
+
+ /* If we have finished mapping on previous device, shift logical
+ * offset to start of next device */
+ if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
+ &lun_start, &lun_end)) != 0 &&
+ local_end < lun_end) {
+ fm_end_offset = local_end;
+ *start_stripe = stripe_no;
+ } else {
+ /* This is a special value to indicate that caller should
+ * calculate offset in next stripe. */
+ fm_end_offset = 0;
+ *start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
+ }
+
+ return fm_end_offset;
+}
+
+/**
+ * We calculate on which OST the mapping will end. If the length of mapping
+ * is greater than (stripe_size * stripe_count) then the last_stripe will
+ * will be one just before start_stripe. Else we check if the mapping
+ * intersects each OST and find last_stripe.
+ * This function returns the last_stripe and also sets the stripe_count
+ * over which the mapping is spread
+ *
+ * \param lsm striping information for the file
+ * \param fm_start logical start of mapping
+ * \param fm_end logical end of mapping
+ * \param start_stripe starting stripe of the mapping
+ * \param stripe_count the number of stripes across which to map is returned
+ *
+ * \retval last_stripe return the last stripe of the mapping
+ */
+int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, obd_size fm_start,
+ obd_size fm_end, int start_stripe,
+ int *stripe_count)
+{
+ int last_stripe;
+ obd_off obd_start, obd_end;
+ int i, j;
+
+ if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
+ last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
+ start_stripe - 1);
+ *stripe_count = lsm->lsm_stripe_count;
+ } else {
+ for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
+ i = (i + 1) % lsm->lsm_stripe_count, j++) {
+ if ((lov_stripe_intersects(lsm, i, fm_start, fm_end,
+ &obd_start, &obd_end)) == 0)
+ break;
+ }
+ *stripe_count = j;
+ last_stripe = (start_stripe + j - 1) %lsm->lsm_stripe_count;
+ }
+
+ return last_stripe;
+}
+
+/**
+ * Set fe_device and copy extents from local buffer into main return buffer.
+ *
+ * \param fiemap fiemap request header
+ * \param lcl_fm_ext array of local fiemap extents to be copied
+ * \param ost_index OST index to be written into the fm_device field for each
+ extent
+ * \param ext_count number of extents to be copied
+ * \param current_extent where to start copying in main extent array
+ */
+void fiemap_prepare_and_copy_exts(struct ll_user_fiemap *fiemap,
+ struct ll_fiemap_extent *lcl_fm_ext,
+ int ost_index, unsigned int ext_count,
+ int current_extent)
+{
+ char *to;
+ int ext;
+
+ for (ext = 0; ext < ext_count; ext++) {
+ lcl_fm_ext[ext].fe_device = ost_index;
+ lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
+ }
+
+ /* Copy fm_extent's from fm_local to return buffer */
+ to = (char *)fiemap + fiemap_count_to_size(current_extent);
+ memcpy(to, lcl_fm_ext, ext_count * sizeof(struct ll_fiemap_extent));
+}
+
+/**
+ * Break down the FIEMAP request and send appropriate calls to individual OSTs.
+ * This also handles the restarting of FIEMAP calls in case mapping overflows
+ * the available number of extents in single call.
+ */
+static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
+ __u32 *vallen, void *val, struct lov_stripe_md *lsm)
+{
+ struct ll_fiemap_info_key *fm_key = key;
+ struct ll_user_fiemap *fiemap = val;
+ struct ll_user_fiemap *fm_local = NULL;
+ struct ll_fiemap_extent *lcl_fm_ext;
+ int count_local;
+ unsigned int get_num_extents = 0;
+ int ost_index = 0, actual_start_stripe, start_stripe;
+ obd_size fm_start, fm_end, fm_length, fm_end_offset;
+ obd_size curr_loc;
+ int current_extent = 0, rc = 0, i;
+ int ost_eof = 0; /* EOF for object */
+ int ost_done = 0; /* done with required mapping for this OST? */
+ int last_stripe;
+ int cur_stripe = 0, cur_stripe_wrap = 0, stripe_count;
+ unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
+
+ if (!lsm_has_objects(lsm))
+ GOTO(out, rc = 0);
+
+ if (fiemap_count_to_size(fm_key->fiemap.fm_extent_count) < buffer_size)
+ buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count);
+
+ OBD_ALLOC_LARGE(fm_local, buffer_size);
+ if (fm_local == NULL)
+ GOTO(out, rc = -ENOMEM);
+ lcl_fm_ext = &fm_local->fm_extents[0];
+
+ count_local = fiemap_size_to_count(buffer_size);
+
+ memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
+ fm_start = fiemap->fm_start;
+ fm_length = fiemap->fm_length;
+ /* Calculate start stripe, last stripe and length of mapping */
+ actual_start_stripe = start_stripe = lov_stripe_number(lsm, fm_start);
+ fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size :
+ fm_start + fm_length - 1);
+ /* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */
+ if (fm_end > fm_key->oa.o_size)
+ fm_end = fm_key->oa.o_size;
+
+ last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
+ actual_start_stripe, &stripe_count);
+
+ fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start,
+ fm_end, &start_stripe);
+ if (fm_end_offset == -EINVAL)
+ GOTO(out, rc = -EINVAL);
+
+ if (fiemap->fm_extent_count == 0) {
+ get_num_extents = 1;
+ count_local = 0;
+ }
+
+ /* Check each stripe */
+ for (cur_stripe = start_stripe, i = 0; i < stripe_count;
+ i++, cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
+ obd_size req_fm_len; /* Stores length of required mapping */
+ obd_size len_mapped_single_call;
+ obd_off lun_start, lun_end, obd_object_end;
+ unsigned int ext_count;
+
+ cur_stripe_wrap = cur_stripe;
+
+ /* Find out range of mapping on this stripe */
+ if ((lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
+ &lun_start, &obd_object_end)) == 0)
+ continue;
+
+ /* If this is a continuation FIEMAP call and we are on
+ * starting stripe then lun_start needs to be set to
+ * fm_end_offset */
+ if (fm_end_offset != 0 && cur_stripe == start_stripe)
+ lun_start = fm_end_offset;
+
+ if (fm_length != ~0ULL) {
+ /* Handle fm_start + fm_length overflow */
+ if (fm_start + fm_length < fm_start)
+ fm_length = ~0ULL - fm_start;
+ lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
+ cur_stripe);
+ } else {
+ lun_end = ~0ULL;
+ }
+
+ if (lun_start == lun_end)
+ continue;
+
+ req_fm_len = obd_object_end - lun_start;
+ fm_local->fm_length = 0;
+ len_mapped_single_call = 0;
+
+ /* If the output buffer is very large and the objects have many
+ * extents we may need to loop on a single OST repeatedly */
+ ost_eof = 0;
+ ost_done = 0;
+ do {
+ if (get_num_extents == 0) {
+ /* Don't get too many extents. */
+ if (current_extent + count_local >
+ fiemap->fm_extent_count)
+ count_local = fiemap->fm_extent_count -
+ current_extent;
+ }
+
+ lun_start += len_mapped_single_call;
+ fm_local->fm_length = req_fm_len - len_mapped_single_call;
+ req_fm_len = fm_local->fm_length;
+ fm_local->fm_extent_count = count_local;
+ fm_local->fm_mapped_extents = 0;
+ fm_local->fm_flags = fiemap->fm_flags;
+
+ fm_key->oa.o_oi = lsm->lsm_oinfo[cur_stripe]->loi_oi;
+ ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
+
+ if (ost_index < 0 || ost_index >=lov->desc.ld_tgt_count)
+ GOTO(out, rc = -EINVAL);
+
+ /* If OST is inactive, return extent with UNKNOWN flag */
+ if (!lov->lov_tgts[ost_index]->ltd_active) {
+ fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
+ fm_local->fm_mapped_extents = 1;
+
+ lcl_fm_ext[0].fe_logical = lun_start;
+ lcl_fm_ext[0].fe_length = obd_object_end -
+ lun_start;
+ lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
+
+ goto inactive_tgt;
+ }
+
+ fm_local->fm_start = lun_start;
+ fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
+ memcpy(&fm_key->fiemap, fm_local, sizeof(*fm_local));
+ *vallen=fiemap_count_to_size(fm_local->fm_extent_count);
+ rc = obd_get_info(NULL,
+ lov->lov_tgts[ost_index]->ltd_exp,
+ keylen, key, vallen, fm_local, lsm);
+ if (rc != 0)
+ GOTO(out, rc);
+
+inactive_tgt:
+ ext_count = fm_local->fm_mapped_extents;
+ if (ext_count == 0) {
+ ost_done = 1;
+ /* If last stripe has hole at the end,
+ * then we need to return */
+ if (cur_stripe_wrap == last_stripe) {
+ fiemap->fm_mapped_extents = 0;
+ goto finish;
+ }
+ break;
+ }
+
+ /* If we just need num of extents then go to next device */
+ if (get_num_extents) {
+ current_extent += ext_count;
+ break;
+ }
+
+ len_mapped_single_call = lcl_fm_ext[ext_count-1].fe_logical -
+ lun_start + lcl_fm_ext[ext_count - 1].fe_length;
+
+ /* Have we finished mapping on this device? */
+ if (req_fm_len <= len_mapped_single_call)
+ ost_done = 1;
+
+ /* Clear the EXTENT_LAST flag which can be present on
+ * last extent */
+ if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST)
+ lcl_fm_ext[ext_count - 1].fe_flags &=
+ ~FIEMAP_EXTENT_LAST;
+
+ curr_loc = lov_stripe_size(lsm,
+ lcl_fm_ext[ext_count - 1].fe_logical+
+ lcl_fm_ext[ext_count - 1].fe_length,
+ cur_stripe);
+ if (curr_loc >= fm_key->oa.o_size)
+ ost_eof = 1;
+
+ fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
+ ost_index, ext_count,
+ current_extent);
+
+ current_extent += ext_count;
+
+ /* Ran out of available extents? */
+ if (current_extent >= fiemap->fm_extent_count)
+ goto finish;
+ } while (ost_done == 0 && ost_eof == 0);
+
+ if (cur_stripe_wrap == last_stripe)
+ goto finish;
+ }
+
+finish:
+ /* Indicate that we are returning device offsets unless file just has
+ * single stripe */
+ if (lsm->lsm_stripe_count > 1)
+ fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
+
+ if (get_num_extents)
+ goto skip_last_device_calc;
+
+ /* Check if we have reached the last stripe and whether mapping for that
+ * stripe is done. */
+ if (cur_stripe_wrap == last_stripe) {
+ if (ost_done || ost_eof)
+ fiemap->fm_extents[current_extent - 1].fe_flags |=
+ FIEMAP_EXTENT_LAST;
+ }
+
+skip_last_device_calc:
+ fiemap->fm_mapped_extents = current_extent;
+
+out:
+ OBD_FREE_LARGE(fm_local, buffer_size);
+ return rc;
+}
+
+static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
+ __u32 keylen, void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
+{
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct lov_obd *lov = &obddev->u.lov;
+ int i, rc;
+
+ if (!vallen || !val)
+ return -EFAULT;
+
+ obd_getref(obddev);
+
+ if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
+ struct {
+ char name[16];
+ struct ldlm_lock *lock;
+ } *data = key;
+ struct ldlm_res_id *res_id = &data->lock->l_resource->lr_name;
+ struct lov_oinfo *loi;
+ __u32 *stripe = val;
+
+ if (*vallen < sizeof(*stripe))
+ GOTO(out, rc = -EFAULT);
+ *vallen = sizeof(*stripe);
+
+ /* XXX This is another one of those bits that will need to
+ * change if we ever actually support nested LOVs. It uses
+ * the lock's export to find out which stripe it is. */
+ /* XXX - it's assumed all the locks for deleted OSTs have
+ * been cancelled. Also, the export for deleted OSTs will
+ * be NULL and won't match the lock's export. */
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ loi = lsm->lsm_oinfo[i];
+ if (!lov->lov_tgts[loi->loi_ost_idx])
+ continue;
+ if (lov->lov_tgts[loi->loi_ost_idx]->ltd_exp ==
+ data->lock->l_conn_export &&
+ ostid_res_name_eq(&loi->loi_oi, res_id)) {
+ *stripe = i;
+ GOTO(out, rc = 0);
+ }
+ }
+ LDLM_ERROR(data->lock, "lock on inode without such object");
+ dump_lsm(D_ERROR, lsm);
+ GOTO(out, rc = -ENXIO);
+ } else if (KEY_IS(KEY_LAST_ID)) {
+ struct obd_id_info *info = val;
+ __u32 size = sizeof(obd_id);
+ struct lov_tgt_desc *tgt;
+
+ LASSERT(*vallen == sizeof(struct obd_id_info));
+ tgt = lov->lov_tgts[info->idx];
+
+ if (!tgt || !tgt->ltd_active)
+ GOTO(out, rc = -ESRCH);
+
+ rc = obd_get_info(env, tgt->ltd_exp, keylen, key,
+ &size, info->data, NULL);
+ GOTO(out, rc = 0);
+ } else if (KEY_IS(KEY_LOVDESC)) {
+ struct lov_desc *desc_ret = val;
+ *desc_ret = lov->desc;
+
+ GOTO(out, rc = 0);
+ } else if (KEY_IS(KEY_FIEMAP)) {
+ rc = lov_fiemap(lov, keylen, key, vallen, val, lsm);
+ GOTO(out, rc);
+ } else if (KEY_IS(KEY_CONNECT_FLAG)) {
+ struct lov_tgt_desc *tgt;
+ __u64 ost_idx = *((__u64*)val);
+
+ LASSERT(*vallen == sizeof(__u64));
+ LASSERT(ost_idx < lov->desc.ld_tgt_count);
+ tgt = lov->lov_tgts[ost_idx];
+
+ if (!tgt || !tgt->ltd_exp)
+ GOTO(out, rc = -ESRCH);
+
+ *((__u64 *)val) = exp_connect_flags(tgt->ltd_exp);
+ GOTO(out, rc = 0);
+ } else if (KEY_IS(KEY_TGT_COUNT)) {
+ *((int *)val) = lov->desc.ld_tgt_count;
+ GOTO(out, rc = 0);
+ }
+
+ rc = -EINVAL;
+
+out:
+ obd_putref(obddev);
+ return rc;
+}
+
+static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
+ obd_count keylen, void *key, obd_count vallen,
+ void *val, struct ptlrpc_request_set *set)
+{
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct lov_obd *lov = &obddev->u.lov;
+ obd_count count;
+ int i, rc = 0, err;
+ struct lov_tgt_desc *tgt;
+ unsigned incr, check_uuid,
+ do_inactive, no_set;
+ unsigned next_id = 0, mds_con = 0, capa = 0;
+
+ incr = check_uuid = do_inactive = no_set = 0;
+ if (set == NULL) {
+ no_set = 1;
+ set = ptlrpc_prep_set();
+ if (!set)
+ return -ENOMEM;
+ }
+
+ obd_getref(obddev);
+ count = lov->desc.ld_tgt_count;
+
+ if (KEY_IS(KEY_NEXT_ID)) {
+ count = vallen / sizeof(struct obd_id_info);
+ vallen = sizeof(obd_id);
+ incr = sizeof(struct obd_id_info);
+ do_inactive = 1;
+ next_id = 1;
+ } else if (KEY_IS(KEY_CHECKSUM)) {
+ do_inactive = 1;
+ } else if (KEY_IS(KEY_EVICT_BY_NID)) {
+ /* use defaults: do_inactive = incr = 0; */
+ } else if (KEY_IS(KEY_MDS_CONN)) {
+ mds_con = 1;
+ } else if (KEY_IS(KEY_CAPA_KEY)) {
+ capa = 1;
+ } else if (KEY_IS(KEY_CACHE_SET)) {
+ LASSERT(lov->lov_cache == NULL);
+ lov->lov_cache = val;
+ do_inactive = 1;
+ }
+
+ for (i = 0; i < count; i++, val = (char *)val + incr) {
+ if (next_id) {
+ tgt = lov->lov_tgts[((struct obd_id_info*)val)->idx];
+ } else {
+ tgt = lov->lov_tgts[i];
+ }
+ /* OST was disconnected */
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
+ /* OST is inactive and we don't want inactive OSCs */
+ if (!tgt->ltd_active && !do_inactive)
+ continue;
+
+ if (mds_con) {
+ struct mds_group_info *mgi;
+
+ LASSERT(vallen == sizeof(*mgi));
+ mgi = (struct mds_group_info *)val;
+
+ /* Only want a specific OSC */
+ if (mgi->uuid && !obd_uuid_equals(mgi->uuid,
+ &tgt->ltd_uuid))
+ continue;
+
+ err = obd_set_info_async(env, tgt->ltd_exp,
+ keylen, key, sizeof(int),
+ &mgi->group, set);
+ } else if (next_id) {
+ err = obd_set_info_async(env, tgt->ltd_exp,
+ keylen, key, vallen,
+ ((struct obd_id_info*)val)->data, set);
+ } else if (capa) {
+ struct mds_capa_info *info = (struct mds_capa_info*)val;
+
+ LASSERT(vallen == sizeof(*info));
+
+ /* Only want a specific OSC */
+ if (info->uuid &&
+ !obd_uuid_equals(info->uuid, &tgt->ltd_uuid))
+ continue;
+
+ err = obd_set_info_async(env, tgt->ltd_exp, keylen,
+ key, sizeof(*info->capa),
+ info->capa, set);
+ } else {
+ /* Only want a specific OSC */
+ if (check_uuid &&
+ !obd_uuid_equals(val, &tgt->ltd_uuid))
+ continue;
+
+ err = obd_set_info_async(env, tgt->ltd_exp,
+ keylen, key, vallen, val, set);
+ }
+
+ if (!rc)
+ rc = err;
+ }
+
+ obd_putref(obddev);
+ if (no_set) {
+ err = ptlrpc_set_wait(set);
+ if (!rc)
+ rc = err;
+ ptlrpc_set_destroy(set);
+ }
+ return rc;
+}
+
+static int lov_extent_calc(struct obd_export *exp, struct lov_stripe_md *lsm,
+ int cmd, __u64 *offset)
+{
+ __u32 ssize = lsm->lsm_stripe_size;
+ __u64 start;
+
+ start = *offset;
+ lov_do_div64(start, ssize);
+ start = start * ssize;
+
+ CDEBUG(D_DLMTRACE, "offset "LPU64", stripe %u, start "LPU64
+ ", end "LPU64"\n", *offset, ssize, start,
+ start + ssize - 1);
+ if (cmd == OBD_CALC_STRIPE_END) {
+ *offset = start + ssize - 1;
+ } else if (cmd == OBD_CALC_STRIPE_START) {
+ *offset = start;
+ } else {
+ LBUG();
+ }
+
+ return 0;
+}
+
+void lov_stripe_lock(struct lov_stripe_md *md)
+{
+ LASSERT(md->lsm_lock_owner != current_pid());
+ spin_lock(&md->lsm_lock);
+ LASSERT(md->lsm_lock_owner == 0);
+ md->lsm_lock_owner = current_pid();
+}
+EXPORT_SYMBOL(lov_stripe_lock);
+
+void lov_stripe_unlock(struct lov_stripe_md *md)
+{
+ LASSERT(md->lsm_lock_owner == current_pid());
+ md->lsm_lock_owner = 0;
+ spin_unlock(&md->lsm_lock);
+}
+EXPORT_SYMBOL(lov_stripe_unlock);
+
+static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct lov_tgt_desc *tgt;
+ __u64 curspace = 0;
+ __u64 bhardlimit = 0;
+ int i, rc = 0;
+
+ if (oqctl->qc_cmd != LUSTRE_Q_QUOTAON &&
+ oqctl->qc_cmd != LUSTRE_Q_QUOTAOFF &&
+ oqctl->qc_cmd != Q_GETOQUOTA &&
+ oqctl->qc_cmd != Q_INITQUOTA &&
+ oqctl->qc_cmd != LUSTRE_Q_SETQUOTA &&
+ oqctl->qc_cmd != Q_FINVALIDATE) {
+ CERROR("bad quota opc %x for lov obd", oqctl->qc_cmd);
+ return -EFAULT;
+ }
+
+ /* for lov tgt */
+ obd_getref(obd);
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ int err;
+
+ tgt = lov->lov_tgts[i];
+
+ if (!tgt)
+ continue;
+
+ if (!tgt->ltd_active || tgt->ltd_reap) {
+ if (oqctl->qc_cmd == Q_GETOQUOTA &&
+ lov->lov_tgts[i]->ltd_activate) {
+ rc = -EREMOTEIO;
+ CERROR("ost %d is inactive\n", i);
+ } else {
+ CDEBUG(D_HA, "ost %d is inactive\n", i);
+ }
+ continue;
+ }
+
+ err = obd_quotactl(tgt->ltd_exp, oqctl);
+ if (err) {
+ if (tgt->ltd_active && !rc)
+ rc = err;
+ continue;
+ }
+
+ if (oqctl->qc_cmd == Q_GETOQUOTA) {
+ curspace += oqctl->qc_dqblk.dqb_curspace;
+ bhardlimit += oqctl->qc_dqblk.dqb_bhardlimit;
+ }
+ }
+ obd_putref(obd);
+
+ if (oqctl->qc_cmd == Q_GETOQUOTA) {
+ oqctl->qc_dqblk.dqb_curspace = curspace;
+ oqctl->qc_dqblk.dqb_bhardlimit = bhardlimit;
+ }
+ return rc;
+}
+
+static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ int i, rc = 0;
+
+ obd_getref(obd);
+
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ if (!lov->lov_tgts[i])
+ continue;
+
+ /* Skip quota check on the administratively disabled OSTs. */
+ if (!lov->lov_tgts[i]->ltd_activate) {
+ CWARN("lov idx %d was administratively disabled, "
+ "skip quotacheck on it.\n", i);
+ continue;
+ }
+
+ if (!lov->lov_tgts[i]->ltd_active) {
+ CERROR("lov idx %d inactive\n", i);
+ rc = -EIO;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ int err;
+
+ if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_activate)
+ continue;
+
+ err = obd_quotacheck(lov->lov_tgts[i]->ltd_exp, oqctl);
+ if (err && !rc)
+ rc = err;
+ }
+
+out:
+ obd_putref(obd);
+
+ return rc;
+}
+
+struct obd_ops lov_obd_ops = {
+ .o_owner = THIS_MODULE,
+ .o_setup = lov_setup,
+ .o_precleanup = lov_precleanup,
+ .o_cleanup = lov_cleanup,
+ //.o_process_config = lov_process_config,
+ .o_connect = lov_connect,
+ .o_disconnect = lov_disconnect,
+ .o_statfs = lov_statfs,
+ .o_statfs_async = lov_statfs_async,
+ .o_packmd = lov_packmd,
+ .o_unpackmd = lov_unpackmd,
+ .o_create = lov_create,
+ .o_destroy = lov_destroy,
+ .o_getattr = lov_getattr,
+ .o_getattr_async = lov_getattr_async,
+ .o_setattr = lov_setattr,
+ .o_setattr_async = lov_setattr_async,
+ .o_brw = lov_brw,
+ .o_merge_lvb = lov_merge_lvb,
+ .o_adjust_kms = lov_adjust_kms,
+ .o_punch = lov_punch,
+ .o_sync = lov_sync,
+ .o_enqueue = lov_enqueue,
+ .o_change_cbdata = lov_change_cbdata,
+ .o_find_cbdata = lov_find_cbdata,
+ .o_cancel = lov_cancel,
+ .o_cancel_unused = lov_cancel_unused,
+ .o_iocontrol = lov_iocontrol,
+ .o_get_info = lov_get_info,
+ .o_set_info_async = lov_set_info_async,
+ .o_extent_calc = lov_extent_calc,
+ .o_llog_init = lov_llog_init,
+ .o_llog_finish = lov_llog_finish,
+ .o_notify = lov_notify,
+ .o_pool_new = lov_pool_new,
+ .o_pool_rem = lov_pool_remove,
+ .o_pool_add = lov_pool_add,
+ .o_pool_del = lov_pool_del,
+ .o_getref = lov_getref,
+ .o_putref = lov_putref,
+ .o_quotactl = lov_quotactl,
+ .o_quotacheck = lov_quotacheck,
+};
+
+struct kmem_cache *lov_oinfo_slab;
+
+extern struct lu_kmem_descr lov_caches[];
+
+int __init lov_init(void)
+{
+ struct lprocfs_static_vars lvars = { 0 };
+ int rc;
+
+ /* print an address of _any_ initialized kernel symbol from this
+ * module, to allow debugging with gdb that doesn't support data
+ * symbols from modules.*/
+ CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches);
+
+ rc = lu_kmem_init(lov_caches);
+ if (rc)
+ return rc;
+
+ lov_oinfo_slab = kmem_cache_create("lov_oinfo",
+ sizeof(struct lov_oinfo),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (lov_oinfo_slab == NULL) {
+ lu_kmem_fini(lov_caches);
+ return -ENOMEM;
+ }
+ lprocfs_lov_init_vars(&lvars);
+
+ rc = class_register_type(&lov_obd_ops, NULL, lvars.module_vars,
+ LUSTRE_LOV_NAME, &lov_device_type);
+
+ if (rc) {
+ kmem_cache_destroy(lov_oinfo_slab);
+ lu_kmem_fini(lov_caches);
+ }
+
+ return rc;
+}
+
+static void /*__exit*/ lov_exit(void)
+{
+ class_unregister_type(LUSTRE_LOV_NAME);
+ kmem_cache_destroy(lov_oinfo_slab);
+
+ lu_kmem_fini(lov_caches);
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Logical Object Volume OBD driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
+
+module_init(lov_init);
+module_exit(lov_exit);
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
new file mode 100644
index 0000000..84e55ce
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -0,0 +1,972 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_object for LOV layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include "lov_cl_internal.h"
+#include <lustre_debug.h>
+
+/** \addtogroup lov
+ * @{
+ */
+
+/*****************************************************************************
+ *
+ * Layout operations.
+ *
+ */
+
+struct lov_layout_operations {
+ int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
+ struct lov_object *lov,
+ const struct cl_object_conf *conf,
+ union lov_layout_state *state);
+ int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
+ union lov_layout_state *state);
+ void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
+ union lov_layout_state *state);
+ void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
+ union lov_layout_state *state);
+ int (*llo_print)(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o);
+ int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
+ int (*llo_lock_init)(const struct lu_env *env,
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *io);
+ int (*llo_io_init)(const struct lu_env *env,
+ struct cl_object *obj, struct cl_io *io);
+ int (*llo_getattr)(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr);
+};
+
+static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
+
+/*****************************************************************************
+ *
+ * Lov object layout operations.
+ *
+ */
+
+static void lov_install_empty(const struct lu_env *env,
+ struct lov_object *lov,
+ union lov_layout_state *state)
+{
+ /*
+ * File without objects.
+ */
+}
+
+static int lov_init_empty(const struct lu_env *env,
+ struct lov_device *dev, struct lov_object *lov,
+ const struct cl_object_conf *conf,
+ union lov_layout_state *state)
+{
+ return 0;
+}
+
+static void lov_install_raid0(const struct lu_env *env,
+ struct lov_object *lov,
+ union lov_layout_state *state)
+{
+}
+
+static struct cl_object *lov_sub_find(const struct lu_env *env,
+ struct cl_device *dev,
+ const struct lu_fid *fid,
+ const struct cl_object_conf *conf)
+{
+ struct lu_object *o;
+
+ o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
+ LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
+ return lu2cl(o);
+}
+
+static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
+ struct cl_object *stripe,
+ struct lov_layout_raid0 *r0, int idx)
+{
+ struct cl_object_header *hdr;
+ struct cl_object_header *subhdr;
+ struct cl_object_header *parent;
+ struct lov_oinfo *oinfo;
+ int result;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_LOV_INIT)) {
+ /* For sanity:test_206.
+ * Do not leave the object in cache to avoid accessing
+ * freed memory. This is because osc_object is referring to
+ * lov_oinfo of lsm_stripe_data which will be freed due to
+ * this failure. */
+ cl_object_kill(env, stripe);
+ cl_object_put(env, stripe);
+ return -EIO;
+ }
+
+ hdr = cl_object_header(lov2cl(lov));
+ subhdr = cl_object_header(stripe);
+ parent = subhdr->coh_parent;
+
+ oinfo = lov->lo_lsm->lsm_oinfo[idx];
+ CDEBUG(D_INODE, DFID"@%p[%d] -> "DFID"@%p: ostid: "DOSTID
+ " idx: %d gen: %d\n",
+ PFID(&subhdr->coh_lu.loh_fid), subhdr, idx,
+ PFID(&hdr->coh_lu.loh_fid), hdr, POSTID(&oinfo->loi_oi),
+ oinfo->loi_ost_idx, oinfo->loi_ost_gen);
+
+ if (parent == NULL) {
+ subhdr->coh_parent = hdr;
+ subhdr->coh_nesting = hdr->coh_nesting + 1;
+ lu_object_ref_add(&stripe->co_lu, "lov-parent", lov);
+ r0->lo_sub[idx] = cl2lovsub(stripe);
+ r0->lo_sub[idx]->lso_super = lov;
+ r0->lo_sub[idx]->lso_index = idx;
+ result = 0;
+ } else {
+ struct lu_object *old_obj;
+ struct lov_object *old_lov;
+ unsigned int mask = D_INODE;
+
+ old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
+ LASSERT(old_obj != NULL);
+ old_lov = cl2lov(lu2cl(old_obj));
+ if (old_lov->lo_layout_invalid) {
+ /* the object's layout has already changed but isn't
+ * refreshed */
+ lu_object_unhash(env, &stripe->co_lu);
+ result = -EAGAIN;
+ } else {
+ mask = D_ERROR;
+ result = -EIO;
+ }
+
+ LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
+ "stripe %d is already owned.\n", idx);
+ LU_OBJECT_DEBUG(mask, env, old_obj, "owned.\n");
+ LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
+ cl_object_put(env, stripe);
+ }
+ return result;
+}
+
+static int lov_init_raid0(const struct lu_env *env,
+ struct lov_device *dev, struct lov_object *lov,
+ const struct cl_object_conf *conf,
+ union lov_layout_state *state)
+{
+ int result;
+ int i;
+
+ struct cl_object *stripe;
+ struct lov_thread_info *lti = lov_env_info(env);
+ struct cl_object_conf *subconf = &lti->lti_stripe_conf;
+ struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
+ struct lu_fid *ofid = &lti->lti_fid;
+ struct lov_layout_raid0 *r0 = &state->raid0;
+
+ if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
+ dump_lsm(D_ERROR, lsm);
+ LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
+ LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
+ }
+
+ LASSERT(lov->lo_lsm == NULL);
+ lov->lo_lsm = lsm_addref(lsm);
+ r0->lo_nr = lsm->lsm_stripe_count;
+ LASSERT(r0->lo_nr <= lov_targets_nr(dev));
+
+ OBD_ALLOC_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
+ if (r0->lo_sub != NULL) {
+ result = 0;
+ subconf->coc_inode = conf->coc_inode;
+ spin_lock_init(&r0->lo_sub_lock);
+ /*
+ * Create stripe cl_objects.
+ */
+ for (i = 0; i < r0->lo_nr && result == 0; ++i) {
+ struct cl_device *subdev;
+ struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
+ int ost_idx = oinfo->loi_ost_idx;
+
+ result = ostid_to_fid(ofid, &oinfo->loi_oi,
+ oinfo->loi_ost_idx);
+ if (result != 0)
+ GOTO(out, result);
+
+ subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
+ subconf->u.coc_oinfo = oinfo;
+ LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ stripe = lov_sub_find(env, subdev, ofid, subconf);
+ if (!IS_ERR(stripe)) {
+ result = lov_init_sub(env, lov, stripe, r0, i);
+ if (result == -EAGAIN) { /* try again */
+ --i;
+ result = 0;
+ }
+ } else {
+ result = PTR_ERR(stripe);
+ }
+ }
+ } else
+ result = -ENOMEM;
+out:
+ return result;
+}
+
+static int lov_init_released(const struct lu_env *env,
+ struct lov_device *dev, struct lov_object *lov,
+ const struct cl_object_conf *conf,
+ union lov_layout_state *state)
+{
+ struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
+
+ LASSERT(lsm != NULL);
+ LASSERT(lsm_is_released(lsm));
+ LASSERT(lov->lo_lsm == NULL);
+
+ lov->lo_lsm = lsm_addref(lsm);
+ return 0;
+}
+
+static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
+ union lov_layout_state *state)
+{
+ LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
+
+ lov_layout_wait(env, lov);
+
+ cl_object_prune(env, &lov->lo_cl);
+ return 0;
+}
+
+static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
+ struct lovsub_object *los, int idx)
+{
+ struct cl_object *sub;
+ struct lov_layout_raid0 *r0;
+ struct lu_site *site;
+ struct lu_site_bkt_data *bkt;
+ wait_queue_t *waiter;
+
+ r0 = &lov->u.raid0;
+ LASSERT(r0->lo_sub[idx] == los);
+
+ sub = lovsub2cl(los);
+ site = sub->co_lu.lo_dev->ld_site;
+ bkt = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
+
+ cl_object_kill(env, sub);
+ /* release a reference to the sub-object and ... */
+ lu_object_ref_del(&sub->co_lu, "lov-parent", lov);
+ cl_object_put(env, sub);
+
+ /* ... wait until it is actually destroyed---sub-object clears its
+ * ->lo_sub[] slot in lovsub_object_fini() */
+ if (r0->lo_sub[idx] == los) {
+ waiter = &lov_env_info(env)->lti_waiter;
+ init_waitqueue_entry_current(waiter);
+ add_wait_queue(&bkt->lsb_marche_funebre, waiter);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ while (1) {
+ /* this wait-queue is signaled at the end of
+ * lu_object_free(). */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_lock(&r0->lo_sub_lock);
+ if (r0->lo_sub[idx] == los) {
+ spin_unlock(&r0->lo_sub_lock);
+ waitq_wait(waiter, TASK_UNINTERRUPTIBLE);
+ } else {
+ spin_unlock(&r0->lo_sub_lock);
+ set_current_state(TASK_RUNNING);
+ break;
+ }
+ }
+ remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
+ }
+ LASSERT(r0->lo_sub[idx] == NULL);
+}
+
+static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
+ union lov_layout_state *state)
+{
+ struct lov_layout_raid0 *r0 = &state->raid0;
+ struct lov_stripe_md *lsm = lov->lo_lsm;
+ int i;
+
+ dump_lsm(D_INODE, lsm);
+
+ lov_layout_wait(env, lov);
+ if (r0->lo_sub != NULL) {
+ for (i = 0; i < r0->lo_nr; ++i) {
+ struct lovsub_object *los = r0->lo_sub[i];
+
+ if (los != NULL) {
+ cl_locks_prune(env, &los->lso_cl, 1);
+ /*
+ * If top-level object is to be evicted from
+ * the cache, so are its sub-objects.
+ */
+ lov_subobject_kill(env, lov, los, i);
+ }
+ }
+ }
+ cl_object_prune(env, &lov->lo_cl);
+ return 0;
+}
+
+static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
+ union lov_layout_state *state)
+{
+ LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
+}
+
+static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
+ union lov_layout_state *state)
+{
+ struct lov_layout_raid0 *r0 = &state->raid0;
+
+ if (r0->lo_sub != NULL) {
+ OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
+ r0->lo_sub = NULL;
+ }
+
+ dump_lsm(D_INODE, lov->lo_lsm);
+ lov_free_memmd(&lov->lo_lsm);
+}
+
+static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
+ union lov_layout_state *state)
+{
+ dump_lsm(D_INODE, lov->lo_lsm);
+ lov_free_memmd(&lov->lo_lsm);
+}
+
+static int lov_print_empty(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o)
+{
+ (*p)(env, cookie, "empty %d\n", lu2lov(o)->lo_layout_invalid);
+ return 0;
+}
+
+static int lov_print_raid0(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o)
+{
+ struct lov_object *lov = lu2lov(o);
+ struct lov_layout_raid0 *r0 = lov_r0(lov);
+ struct lov_stripe_md *lsm = lov->lo_lsm;
+ int i;
+
+ (*p)(env, cookie, "stripes: %d, %svalid, lsm{%p 0x%08X %d %u %u}: \n",
+ r0->lo_nr, lov->lo_layout_invalid ? "in" : "", lsm,
+ lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
+ lsm->lsm_stripe_count, lsm->lsm_layout_gen);
+ for (i = 0; i < r0->lo_nr; ++i) {
+ struct lu_object *sub;
+
+ if (r0->lo_sub[i] != NULL) {
+ sub = lovsub2lu(r0->lo_sub[i]);
+ lu_object_print(env, cookie, p, sub);
+ } else
+ (*p)(env, cookie, "sub %d absent\n", i);
+ }
+ return 0;
+}
+
+static int lov_print_released(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o)
+{
+ (*p)(env, cookie, "released\n");
+ return 0;
+}
+
+/**
+ * Implements cl_object_operations::coo_attr_get() method for an object
+ * without stripes (LLT_EMPTY layout type).
+ *
+ * The only attributes this layer is authoritative in this case is
+ * cl_attr::cat_blocks---it's 0.
+ */
+static int lov_attr_get_empty(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr)
+{
+ attr->cat_blocks = 0;
+ return 0;
+}
+
+static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_layout_raid0 *r0 = lov_r0(lov);
+ struct cl_attr *lov_attr = &r0->lo_attr;
+ int result = 0;
+
+ /* this is called w/o holding type guard mutex, so it must be inside
+ * an on going IO otherwise lsm may be replaced.
+ * LU-2117: it turns out there exists one exception. For mmaped files,
+ * the lock of those files may be requested in the other file's IO
+ * context, and this function is called in ccc_lock_state(), it will
+ * hit this assertion.
+ * Anyway, it's still okay to call attr_get w/o type guard as layout
+ * can't go if locks exist. */
+ /* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
+
+ if (!r0->lo_attr_valid) {
+ struct lov_stripe_md *lsm = lov->lo_lsm;
+ struct ost_lvb *lvb = &lov_env_info(env)->lti_lvb;
+ __u64 kms = 0;
+
+ memset(lvb, 0, sizeof(*lvb));
+ /* XXX: timestamps can be negative by sanity:test_39m,
+ * how can it be? */
+ lvb->lvb_atime = LLONG_MIN;
+ lvb->lvb_ctime = LLONG_MIN;
+ lvb->lvb_mtime = LLONG_MIN;
+
+ /*
+ * XXX that should be replaced with a loop over sub-objects,
+ * doing cl_object_attr_get() on them. But for now, let's
+ * reuse old lov code.
+ */
+
+ /*
+ * XXX take lsm spin-lock to keep lov_merge_lvb_kms()
+ * happy. It's not needed, because new code uses
+ * ->coh_attr_guard spin-lock to protect consistency of
+ * sub-object attributes.
+ */
+ lov_stripe_lock(lsm);
+ result = lov_merge_lvb_kms(lsm, lvb, &kms);
+ lov_stripe_unlock(lsm);
+ if (result == 0) {
+ cl_lvb2attr(lov_attr, lvb);
+ lov_attr->cat_kms = kms;
+ r0->lo_attr_valid = 1;
+ }
+ }
+ if (result == 0) { /* merge results */
+ attr->cat_blocks = lov_attr->cat_blocks;
+ attr->cat_size = lov_attr->cat_size;
+ attr->cat_kms = lov_attr->cat_kms;
+ if (attr->cat_atime < lov_attr->cat_atime)
+ attr->cat_atime = lov_attr->cat_atime;
+ if (attr->cat_ctime < lov_attr->cat_ctime)
+ attr->cat_ctime = lov_attr->cat_ctime;
+ if (attr->cat_mtime < lov_attr->cat_mtime)
+ attr->cat_mtime = lov_attr->cat_mtime;
+ }
+ return result;
+}
+
+const static struct lov_layout_operations lov_dispatch[] = {
+ [LLT_EMPTY] = {
+ .llo_init = lov_init_empty,
+ .llo_delete = lov_delete_empty,
+ .llo_fini = lov_fini_empty,
+ .llo_install = lov_install_empty,
+ .llo_print = lov_print_empty,
+ .llo_page_init = lov_page_init_empty,
+ .llo_lock_init = lov_lock_init_empty,
+ .llo_io_init = lov_io_init_empty,
+ .llo_getattr = lov_attr_get_empty
+ },
+ [LLT_RAID0] = {
+ .llo_init = lov_init_raid0,
+ .llo_delete = lov_delete_raid0,
+ .llo_fini = lov_fini_raid0,
+ .llo_install = lov_install_raid0,
+ .llo_print = lov_print_raid0,
+ .llo_page_init = lov_page_init_raid0,
+ .llo_lock_init = lov_lock_init_raid0,
+ .llo_io_init = lov_io_init_raid0,
+ .llo_getattr = lov_attr_get_raid0
+ },
+ [LLT_RELEASED] = {
+ .llo_init = lov_init_released,
+ .llo_delete = lov_delete_empty,
+ .llo_fini = lov_fini_released,
+ .llo_install = lov_install_empty,
+ .llo_print = lov_print_released,
+ .llo_page_init = lov_page_init_empty,
+ .llo_lock_init = lov_lock_init_empty,
+ .llo_io_init = lov_io_init_released,
+ .llo_getattr = lov_attr_get_empty
+ }
+};
+
+/**
+ * Performs a double-dispatch based on the layout type of an object.
+ */
+#define LOV_2DISPATCH_NOLOCK(obj, op, ...) \
+({ \
+ struct lov_object *__obj = (obj); \
+ enum lov_layout_type __llt; \
+ \
+ __llt = __obj->lo_type; \
+ LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
+ lov_dispatch[__llt].op(__VA_ARGS__); \
+})
+
+/**
+ * Return lov_layout_type associated with a given lsm
+ */
+enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
+{
+ if (lsm == NULL)
+ return LLT_EMPTY;
+ if (lsm_is_released(lsm))
+ return LLT_RELEASED;
+ return LLT_RAID0;
+}
+
+static inline void lov_conf_freeze(struct lov_object *lov)
+{
+ if (lov->lo_owner != current)
+ down_read(&lov->lo_type_guard);
+}
+
+static inline void lov_conf_thaw(struct lov_object *lov)
+{
+ if (lov->lo_owner != current)
+ up_read(&lov->lo_type_guard);
+}
+
+#define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
+({ \
+ struct lov_object *__obj = (obj); \
+ int __lock = !!(lock); \
+ typeof(lov_dispatch[0].op(__VA_ARGS__)) __result; \
+ \
+ if (__lock) \
+ lov_conf_freeze(__obj); \
+ __result = LOV_2DISPATCH_NOLOCK(obj, op, __VA_ARGS__); \
+ if (__lock) \
+ lov_conf_thaw(__obj); \
+ __result; \
+})
+
+/**
+ * Performs a locked double-dispatch based on the layout type of an object.
+ */
+#define LOV_2DISPATCH(obj, op, ...) \
+ LOV_2DISPATCH_MAYLOCK(obj, op, 1, __VA_ARGS__)
+
+#define LOV_2DISPATCH_VOID(obj, op, ...) \
+do { \
+ struct lov_object *__obj = (obj); \
+ enum lov_layout_type __llt; \
+ \
+ lov_conf_freeze(__obj); \
+ __llt = __obj->lo_type; \
+ LASSERT(0 <= __llt && __llt < ARRAY_SIZE(lov_dispatch)); \
+ lov_dispatch[__llt].op(__VA_ARGS__); \
+ lov_conf_thaw(__obj); \
+} while (0)
+
+static void lov_conf_lock(struct lov_object *lov)
+{
+ LASSERT(lov->lo_owner != current);
+ down_write(&lov->lo_type_guard);
+ LASSERT(lov->lo_owner == NULL);
+ lov->lo_owner = current;
+}
+
+static void lov_conf_unlock(struct lov_object *lov)
+{
+ lov->lo_owner = NULL;
+ up_write(&lov->lo_type_guard);
+}
+
+static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
+{
+ struct l_wait_info lwi = { 0 };
+
+ while (atomic_read(&lov->lo_active_ios) > 0) {
+ CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
+ PFID(lu_object_fid(lov2lu(lov))),
+ atomic_read(&lov->lo_active_ios));
+
+ l_wait_event(lov->lo_waitq,
+ atomic_read(&lov->lo_active_ios) == 0, &lwi);
+ }
+ return 0;
+}
+
+static int lov_layout_change(const struct lu_env *unused,
+ struct lov_object *lov,
+ const struct cl_object_conf *conf)
+{
+ int result;
+ enum lov_layout_type llt = LLT_EMPTY;
+ union lov_layout_state *state = &lov->u;
+ const struct lov_layout_operations *old_ops;
+ const struct lov_layout_operations *new_ops;
+
+ struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
+ void *cookie;
+ struct lu_env *env;
+ int refcheck;
+
+ LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
+
+ if (conf->u.coc_md != NULL)
+ llt = lov_type(conf->u.coc_md->lsm);
+ LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
+
+ cookie = cl_env_reenter();
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env)) {
+ cl_env_reexit(cookie);
+ return PTR_ERR(env);
+ }
+
+ old_ops = &lov_dispatch[lov->lo_type];
+ new_ops = &lov_dispatch[llt];
+
+ result = old_ops->llo_delete(env, lov, &lov->u);
+ if (result == 0) {
+ old_ops->llo_fini(env, lov, &lov->u);
+
+ LASSERT(atomic_read(&lov->lo_active_ios) == 0);
+ LASSERT(hdr->coh_tree.rnode == NULL);
+ LASSERT(hdr->coh_pages == 0);
+
+ lov->lo_type = LLT_EMPTY;
+ result = new_ops->llo_init(env,
+ lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
+ lov, conf, state);
+ if (result == 0) {
+ new_ops->llo_install(env, lov, state);
+ lov->lo_type = llt;
+ } else {
+ new_ops->llo_delete(env, lov, state);
+ new_ops->llo_fini(env, lov, state);
+ /* this file becomes an EMPTY file. */
+ }
+ }
+
+ cl_env_put(env, &refcheck);
+ cl_env_reexit(cookie);
+ return result;
+}
+
+/*****************************************************************************
+ *
+ * Lov object operations.
+ *
+ */
+int lov_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf)
+{
+ struct lov_device *dev = lu2lov_dev(obj->lo_dev);
+ struct lov_object *lov = lu2lov(obj);
+ const struct cl_object_conf *cconf = lu2cl_conf(conf);
+ union lov_layout_state *set = &lov->u;
+ const struct lov_layout_operations *ops;
+ int result;
+
+ init_rwsem(&lov->lo_type_guard);
+ atomic_set(&lov->lo_active_ios, 0);
+ init_waitqueue_head(&lov->lo_waitq);
+
+ cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
+
+ /* no locking is necessary, as object is being created */
+ lov->lo_type = lov_type(cconf->u.coc_md->lsm);
+ ops = &lov_dispatch[lov->lo_type];
+ result = ops->llo_init(env, dev, lov, cconf, set);
+ if (result == 0)
+ ops->llo_install(env, lov, set);
+ return result;
+}
+
+static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf)
+{
+ struct lov_stripe_md *lsm = NULL;
+ struct lov_object *lov = cl2lov(obj);
+ int result = 0;
+
+ lov_conf_lock(lov);
+ if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
+ lov->lo_layout_invalid = true;
+ GOTO(out, result = 0);
+ }
+
+ if (conf->coc_opc == OBJECT_CONF_WAIT) {
+ if (lov->lo_layout_invalid &&
+ atomic_read(&lov->lo_active_ios) > 0) {
+ lov_conf_unlock(lov);
+ result = lov_layout_wait(env, lov);
+ lov_conf_lock(lov);
+ }
+ GOTO(out, result);
+ }
+
+ LASSERT(conf->coc_opc == OBJECT_CONF_SET);
+
+ if (conf->u.coc_md != NULL)
+ lsm = conf->u.coc_md->lsm;
+ if ((lsm == NULL && lov->lo_lsm == NULL) ||
+ (lsm != NULL && lov->lo_lsm != NULL &&
+ lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen)) {
+ /* same version of layout */
+ lov->lo_layout_invalid = false;
+ GOTO(out, result = 0);
+ }
+
+ /* will change layout - check if there still exists active IO. */
+ if (atomic_read(&lov->lo_active_ios) > 0) {
+ lov->lo_layout_invalid = true;
+ GOTO(out, result = -EBUSY);
+ }
+
+ lov->lo_layout_invalid = lov_layout_change(env, lov, conf);
+
+out:
+ lov_conf_unlock(lov);
+ return result;
+}
+
+static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
+{
+ struct lov_object *lov = lu2lov(obj);
+
+ LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
+}
+
+static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
+{
+ struct lov_object *lov = lu2lov(obj);
+
+ LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
+ lu_object_fini(obj);
+ OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
+}
+
+static int lov_object_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o)
+{
+ return LOV_2DISPATCH_NOLOCK(lu2lov(o), llo_print, env, cookie, p, o);
+}
+
+int lov_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage)
+{
+ return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
+ llo_page_init, env, obj, page, vmpage);
+}
+
+/**
+ * Implements cl_object_operations::clo_io_init() method for lov
+ * layer. Dispatches to the appropriate layout io initialization method.
+ */
+int lov_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
+{
+ CL_IO_SLICE_CLEAN(lov_env_io(env), lis_cl);
+ return LOV_2DISPATCH_MAYLOCK(cl2lov(obj), llo_io_init,
+ !io->ci_ignore_layout, env, obj, io);
+}
+
+/**
+ * An implementation of cl_object_operations::clo_attr_get() method for lov
+ * layer. For raid0 layout this collects and merges attributes of all
+ * sub-objects.
+ */
+static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr)
+{
+ /* do not take lock, as this function is called under a
+ * spin-lock. Layout is protected from changing by ongoing IO. */
+ return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
+}
+
+static int lov_attr_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid)
+{
+ /*
+ * No dispatch is required here, as no layout implements this.
+ */
+ return 0;
+}
+
+int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io)
+{
+ /* No need to lock because we've taken one refcount of layout. */
+ return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_lock_init, env, obj, lock,
+ io);
+}
+
+static const struct cl_object_operations lov_ops = {
+ .coo_page_init = lov_page_init,
+ .coo_lock_init = lov_lock_init,
+ .coo_io_init = lov_io_init,
+ .coo_attr_get = lov_attr_get,
+ .coo_attr_set = lov_attr_set,
+ .coo_conf_set = lov_conf_set
+};
+
+static const struct lu_object_operations lov_lu_obj_ops = {
+ .loo_object_init = lov_object_init,
+ .loo_object_delete = lov_object_delete,
+ .loo_object_release = NULL,
+ .loo_object_free = lov_object_free,
+ .loo_object_print = lov_object_print,
+ .loo_object_invariant = NULL
+};
+
+struct lu_object *lov_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *unused,
+ struct lu_device *dev)
+{
+ struct lov_object *lov;
+ struct lu_object *obj;
+
+ OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, __GFP_IO);
+ if (lov != NULL) {
+ obj = lov2lu(lov);
+ lu_object_init(obj, NULL, dev);
+ lov->lo_cl.co_ops = &lov_ops;
+ lov->lo_type = -1; /* invalid, to catch uninitialized type */
+ /*
+ * object io operation vector (cl_object::co_iop) is installed
+ * later in lov_object_init(), as different vectors are used
+ * for object with different layouts.
+ */
+ obj->lo_ops = &lov_lu_obj_ops;
+ } else
+ obj = NULL;
+ return obj;
+}
+
+struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
+{
+ struct lov_stripe_md *lsm = NULL;
+
+ lov_conf_freeze(lov);
+ if (lov->lo_lsm != NULL) {
+ lsm = lsm_addref(lov->lo_lsm);
+ CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
+ lsm, atomic_read(&lsm->lsm_refc),
+ lov->lo_layout_invalid, current);
+ }
+ lov_conf_thaw(lov);
+ return lsm;
+}
+
+void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm)
+{
+ if (lsm == NULL)
+ return;
+
+ CDEBUG(D_INODE, "lsm %p decref %d by %p.\n",
+ lsm, atomic_read(&lsm->lsm_refc), current);
+
+ lov_free_memmd(&lsm);
+}
+
+struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
+{
+ struct lu_object *luobj;
+ struct lov_stripe_md *lsm = NULL;
+
+ if (clobj == NULL)
+ return NULL;
+
+ luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
+ &lov_device_type);
+ if (luobj != NULL)
+ lsm = lov_lsm_addref(lu2lov(luobj));
+ return lsm;
+}
+EXPORT_SYMBOL(lov_lsm_get);
+
+void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
+{
+ if (lsm != NULL)
+ lov_free_memmd(&lsm);
+}
+EXPORT_SYMBOL(lov_lsm_put);
+
+int lov_read_and_clear_async_rc(struct cl_object *clob)
+{
+ struct lu_object *luobj;
+ int rc = 0;
+
+ luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
+ &lov_device_type);
+ if (luobj != NULL) {
+ struct lov_object *lov = lu2lov(luobj);
+
+ lov_conf_freeze(lov);
+ switch (lov->lo_type) {
+ case LLT_RAID0: {
+ struct lov_stripe_md *lsm;
+ int i;
+
+ lsm = lov->lo_lsm;
+ LASSERT(lsm != NULL);
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+ if (loi->loi_ar.ar_rc && !rc)
+ rc = loi->loi_ar.ar_rc;
+ loi->loi_ar.ar_rc = 0;
+ }
+ }
+ case LLT_RELEASED:
+ case LLT_EMPTY:
+ break;
+ default:
+ LBUG();
+ }
+ lov_conf_thaw(lov);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lov_read_and_clear_async_rc);
+
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
new file mode 100644
index 0000000..04863a7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -0,0 +1,266 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_class.h>
+#include <obd_lov.h>
+
+#include "lov_internal.h"
+
+/* compute object size given "stripeno" and the ost size */
+obd_size lov_stripe_size(struct lov_stripe_md *lsm, obd_size ost_size,
+ int stripeno)
+{
+ unsigned long ssize = lsm->lsm_stripe_size;
+ unsigned long stripe_size;
+ obd_off swidth;
+ obd_size lov_size;
+ int magic = lsm->lsm_magic;
+
+ if (ost_size == 0)
+ return 0;
+
+ LASSERT(lsm_op_find(magic) != NULL);
+ lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth);
+
+ /* lov_do_div64(a, b) returns a % b, and a = a / b */
+ stripe_size = lov_do_div64(ost_size, ssize);
+ if (stripe_size)
+ lov_size = ost_size * swidth + stripeno * ssize + stripe_size;
+ else
+ lov_size = (ost_size - 1) * swidth + (stripeno + 1) * ssize;
+
+ return lov_size;
+}
+
+/* we have an offset in file backed by an lov and want to find out where
+ * that offset lands in our given stripe of the file. for the easy
+ * case where the offset is within the stripe, we just have to scale the
+ * offset down to make it relative to the stripe instead of the lov.
+ *
+ * the harder case is what to do when the offset doesn't intersect the
+ * stripe. callers will want start offsets clamped ahead to the start
+ * of the nearest stripe in the file. end offsets similarly clamped to the
+ * nearest ending byte of a stripe in the file:
+ *
+ * all this function does is move offsets to the nearest region of the
+ * stripe, and it does its work "mod" the full length of all the stripes.
+ * consider a file with 3 stripes:
+ *
+ * S E
+ * ---------------------------------------------------------------------
+ * | 0 | 1 | 2 | 0 | 1 | 2 |
+ * ---------------------------------------------------------------------
+ *
+ * to find stripe 1's offsets for S and E, it divides by the full stripe
+ * width and does its math in the context of a single set of stripes:
+ *
+ * S E
+ * -----------------------------------
+ * | 0 | 1 | 2 |
+ * -----------------------------------
+ *
+ * it'll notice that E is outside stripe 1 and clamp it to the end of the
+ * stripe, then multiply it back out by lov_off to give the real offsets in
+ * the stripe:
+ *
+ * S E
+ * ---------------------------------------------------------------------
+ * | 1 | 1 | 1 | 1 | 1 | 1 |
+ * ---------------------------------------------------------------------
+ *
+ * it would have done similarly and pulled S forward to the start of a 1
+ * stripe if, say, S had landed in a 0 stripe.
+ *
+ * this rounding isn't always correct. consider an E lov offset that lands
+ * on a 0 stripe, the "mod stripe width" math will pull it forward to the
+ * start of a 1 stripe, when in fact it wanted to be rounded back to the end
+ * of a previous 1 stripe. this logic is handled by callers and this is why:
+ *
+ * this function returns < 0 when the offset was "before" the stripe and
+ * was moved forward to the start of the stripe in question; 0 when it
+ * falls in the stripe and no shifting was done; > 0 when the offset
+ * was outside the stripe and was pulled back to its final byte. */
+int lov_stripe_offset(struct lov_stripe_md *lsm, obd_off lov_off,
+ int stripeno, obd_off *obdoff)
+{
+ unsigned long ssize = lsm->lsm_stripe_size;
+ obd_off stripe_off, this_stripe, swidth;
+ int magic = lsm->lsm_magic;
+ int ret = 0;
+
+ if (lov_off == OBD_OBJECT_EOF) {
+ *obdoff = OBD_OBJECT_EOF;
+ return 0;
+ }
+
+ LASSERT(lsm_op_find(magic) != NULL);
+
+ lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off,
+ &swidth);
+
+ /* lov_do_div64(a, b) returns a % b, and a = a / b */
+ stripe_off = lov_do_div64(lov_off, swidth);
+
+ this_stripe = (obd_off)stripeno * ssize;
+ if (stripe_off < this_stripe) {
+ stripe_off = 0;
+ ret = -1;
+ } else {
+ stripe_off -= this_stripe;
+
+ if (stripe_off >= ssize) {
+ stripe_off = ssize;
+ ret = 1;
+ }
+ }
+
+ *obdoff = lov_off * ssize + stripe_off;
+ return ret;
+}
+
+/* Given a whole-file size and a stripe number, give the file size which
+ * corresponds to the individual object of that stripe.
+ *
+ * This behaves basically in the same was as lov_stripe_offset, except that
+ * file sizes falling before the beginning of a stripe are clamped to the end
+ * of the previous stripe, not the beginning of the next:
+ *
+ * S
+ * ---------------------------------------------------------------------
+ * | 0 | 1 | 2 | 0 | 1 | 2 |
+ * ---------------------------------------------------------------------
+ *
+ * if clamped to stripe 2 becomes:
+ *
+ * S
+ * ---------------------------------------------------------------------
+ * | 0 | 1 | 2 | 0 | 1 | 2 |
+ * ---------------------------------------------------------------------
+ */
+obd_off lov_size_to_stripe(struct lov_stripe_md *lsm, obd_off file_size,
+ int stripeno)
+{
+ unsigned long ssize = lsm->lsm_stripe_size;
+ obd_off stripe_off, this_stripe, swidth;
+ int magic = lsm->lsm_magic;
+
+ if (file_size == OBD_OBJECT_EOF)
+ return OBD_OBJECT_EOF;
+
+ LASSERT(lsm_op_find(magic) != NULL);
+ lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size,
+ &swidth);
+
+ /* lov_do_div64(a, b) returns a % b, and a = a / b */
+ stripe_off = lov_do_div64(file_size, swidth);
+
+ this_stripe = (obd_off)stripeno * ssize;
+ if (stripe_off < this_stripe) {
+ /* Move to end of previous stripe, or zero */
+ if (file_size > 0) {
+ file_size--;
+ stripe_off = ssize;
+ } else {
+ stripe_off = 0;
+ }
+ } else {
+ stripe_off -= this_stripe;
+
+ if (stripe_off >= ssize) {
+ /* Clamp to end of this stripe */
+ stripe_off = ssize;
+ }
+ }
+
+ return (file_size * ssize + stripe_off);
+}
+
+/* given an extent in an lov and a stripe, calculate the extent of the stripe
+ * that is contained within the lov extent. this returns true if the given
+ * stripe does intersect with the lov extent. */
+int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
+ obd_off start, obd_off end,
+ obd_off *obd_start, obd_off *obd_end)
+{
+ int start_side, end_side;
+
+ start_side = lov_stripe_offset(lsm, start, stripeno, obd_start);
+ end_side = lov_stripe_offset(lsm, end, stripeno, obd_end);
+
+ CDEBUG(D_INODE, "["LPU64"->"LPU64"] -> [(%d) "LPU64"->"LPU64" (%d)]\n",
+ start, end, start_side, *obd_start, *obd_end, end_side);
+
+ /* this stripe doesn't intersect the file extent when neither
+ * start or the end intersected the stripe and obd_start and
+ * obd_end got rounded up to the save value. */
+ if (start_side != 0 && end_side != 0 && *obd_start == *obd_end)
+ return 0;
+
+ /* as mentioned in the lov_stripe_offset commentary, end
+ * might have been shifted in the wrong direction. This
+ * happens when an end offset is before the stripe when viewed
+ * through the "mod stripe size" math. we detect it being shifted
+ * in the wrong direction and touch it up.
+ * interestingly, this can't underflow since end must be > start
+ * if we passed through the previous check.
+ * (should we assert for that somewhere?) */
+ if (end_side != 0)
+ (*obd_end)--;
+
+ return 1;
+}
+
+/* compute which stripe number "lov_off" will be written into */
+int lov_stripe_number(struct lov_stripe_md *lsm, obd_off lov_off)
+{
+ unsigned long ssize = lsm->lsm_stripe_size;
+ obd_off stripe_off, swidth;
+ int magic = lsm->lsm_magic;
+
+ LASSERT(lsm_op_find(magic) != NULL);
+ lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth);
+
+ stripe_off = lov_do_div64(lov_off, swidth);
+
+ /* Puts stripe_off/ssize result into stripe_off */
+ lov_do_div64(stripe_off, ssize);
+
+ return stripe_off;
+}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
new file mode 100644
index 0000000..55ec267
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -0,0 +1,678 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lov/lov_pack.c
+ *
+ * (Un)packing of OST/MDS requests
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include <lustre_net.h>
+#include <obd.h>
+#include <obd_lov.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_user.h>
+
+#include "lov_internal.h"
+
+void lov_dump_lmm_common(int level, void *lmmp)
+{
+ struct lov_mds_md *lmm = lmmp;
+ struct ost_id oi;
+
+ lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
+ CDEBUG(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
+ POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
+ le32_to_cpu(lmm->lmm_pattern));
+ CDEBUG(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
+ le32_to_cpu(lmm->lmm_stripe_size),
+ le16_to_cpu(lmm->lmm_stripe_count),
+ le16_to_cpu(lmm->lmm_layout_gen));
+}
+
+static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
+ int stripe_count)
+{
+ int i;
+
+ if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
+ CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
+ stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
+ return;
+ }
+
+ for (i = 0; i < stripe_count; ++i, ++lod) {
+ struct ost_id oi;
+
+ ostid_le_to_cpu(&lod->l_ost_oi, &oi);
+ CDEBUG(level, "stripe %u idx %u subobj "DOSTID"\n", i,
+ le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
+ }
+}
+
+void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
+{
+ lov_dump_lmm_common(level, lmm);
+ lov_dump_lmm_objects(level, lmm->lmm_objects,
+ le16_to_cpu(lmm->lmm_stripe_count));
+}
+
+void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
+{
+ lov_dump_lmm_common(level, lmm);
+ CDEBUG(level,"pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
+ lov_dump_lmm_objects(level, lmm->lmm_objects,
+ le16_to_cpu(lmm->lmm_stripe_count));
+}
+
+void lov_dump_lmm(int level, void *lmm)
+{
+ int magic;
+
+ magic = ((struct lov_mds_md_v1 *)(lmm))->lmm_magic;
+ switch (magic) {
+ case LOV_MAGIC_V1:
+ return lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)(lmm));
+ case LOV_MAGIC_V3:
+ return lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)(lmm));
+ default:
+ CERROR("Cannot recognize lmm_magic %x", magic);
+ }
+ return;
+}
+
+#define LMM_ASSERT(test) \
+do { \
+ if (!(test)) lov_dump_lmm(D_ERROR, lmm); \
+ LASSERT(test); /* so we know what assertion failed */ \
+} while(0)
+
+/* Pack LOV object metadata for disk storage. It is packed in LE byte
+ * order and is opaque to the networking layer.
+ *
+ * XXX In the future, this will be enhanced to get the EA size from the
+ * underlying OSC device(s) to get their EA sizes so we can stack
+ * LOVs properly. For now lov_mds_md_size() just assumes one obd_id
+ * per stripe.
+ */
+int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
+ struct lov_stripe_md *lsm)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lov_obd *lov = &obd->u.lov;
+ struct lov_mds_md_v1 *lmmv1;
+ struct lov_mds_md_v3 *lmmv3;
+ __u16 stripe_count;
+ struct lov_ost_data_v1 *lmm_objects;
+ int lmm_size, lmm_magic;
+ int i;
+ int cplen = 0;
+
+ if (lsm) {
+ lmm_magic = lsm->lsm_magic;
+ } else {
+ if (lmmp && *lmmp)
+ lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
+ else
+ /* lsm == NULL and lmmp == NULL */
+ lmm_magic = LOV_MAGIC;
+ }
+
+ if ((lmm_magic != LOV_MAGIC_V1) &&
+ (lmm_magic != LOV_MAGIC_V3)) {
+ CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
+ lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
+ return -EINVAL;
+
+ }
+
+ if (lsm) {
+ /* If we are just sizing the EA, limit the stripe count
+ * to the actual number of OSTs in this filesystem. */
+ if (!lmmp) {
+ stripe_count = lov_get_stripecnt(lov, lmm_magic,
+ lsm->lsm_stripe_count);
+ lsm->lsm_stripe_count = stripe_count;
+ } else if (!lsm_is_released(lsm)) {
+ stripe_count = lsm->lsm_stripe_count;
+ } else {
+ stripe_count = 0;
+ }
+ } else {
+ /* No need to allocate more than maximum supported stripes.
+ * Anyway, this is pretty inaccurate since ld_tgt_count now
+ * represents max index and we should rely on the actual number
+ * of OSTs instead */
+ stripe_count = lov_mds_md_stripecnt(lov->lov_ocd.ocd_max_easize,
+ lmm_magic);
+ if (stripe_count > lov->desc.ld_tgt_count)
+ stripe_count = lov->desc.ld_tgt_count;
+ }
+
+ /* XXX LOV STACKING call into osc for sizes */
+ lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
+
+ if (!lmmp)
+ return lmm_size;
+
+ if (*lmmp && !lsm) {
+ stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
+ lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
+ OBD_FREE_LARGE(*lmmp, lmm_size);
+ *lmmp = NULL;
+ return 0;
+ }
+
+ if (!*lmmp) {
+ OBD_ALLOC_LARGE(*lmmp, lmm_size);
+ if (!*lmmp)
+ return -ENOMEM;
+ }
+
+ CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
+ lmm_magic, lmm_size);
+
+ lmmv1 = *lmmp;
+ lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
+ if (lmm_magic == LOV_MAGIC_V3)
+ lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
+ else
+ lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
+
+ if (!lsm)
+ return lmm_size;
+
+ /* lmmv1 and lmmv3 point to the same struct and have the
+ * same first fields
+ */
+ lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
+ lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
+ lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
+ lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
+ lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
+ if (lsm->lsm_magic == LOV_MAGIC_V3) {
+ cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
+ sizeof(lmmv3->lmm_pool_name));
+ if (cplen >= sizeof(lmmv3->lmm_pool_name))
+ return -E2BIG;
+ lmm_objects = lmmv3->lmm_objects;
+ } else {
+ lmm_objects = lmmv1->lmm_objects;
+ }
+
+ for (i = 0; i < stripe_count; i++) {
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+ /* XXX LOV STACKING call down to osc_packmd() to do packing */
+ LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
+ " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
+ i, stripe_count, loi->loi_ost_idx);
+ ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
+ lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
+ lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
+ }
+
+ return lmm_size;
+}
+
+/* Find the max stripecount we should use */
+__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
+{
+ __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
+
+ if (!stripe_count)
+ stripe_count = lov->desc.ld_default_stripe_count;
+ if (stripe_count > lov->desc.ld_active_tgt_count)
+ stripe_count = lov->desc.ld_active_tgt_count;
+ if (!stripe_count)
+ stripe_count = 1;
+
+ /* stripe count is based on whether ldiskfs can handle
+ * larger EA sizes */
+ if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
+ lov->lov_ocd.ocd_max_easize)
+ max_stripes = lov_mds_md_stripecnt(lov->lov_ocd.ocd_max_easize,
+ magic);
+
+ if (stripe_count > max_stripes)
+ stripe_count = max_stripes;
+
+ return stripe_count;
+}
+
+
+static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
+{
+ int rc;
+
+ if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) {
+ char *buffer;
+ int sz;
+
+ CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
+ le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
+ sz = lmm_bytes * 2 + 1;
+ OBD_ALLOC_LARGE(buffer, sz);
+ if (buffer != NULL) {
+ int i;
+
+ for (i = 0; i < lmm_bytes; i++)
+ sprintf(buffer+2*i, "%.2X", ((char *)lmm)[i]);
+ buffer[sz - 1] = '\0';
+ CERROR("%s\n", buffer);
+ OBD_FREE_LARGE(buffer, sz);
+ }
+ return -EINVAL;
+ }
+ rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm,
+ lmm_bytes, stripe_count);
+ return rc;
+}
+
+int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
+ int pattern, int magic)
+{
+ int i, lsm_size;
+
+ CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
+
+ *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
+ if (!*lsmp) {
+ CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
+ return -ENOMEM;
+ }
+
+ atomic_set(&(*lsmp)->lsm_refc, 1);
+ spin_lock_init(&(*lsmp)->lsm_lock);
+ (*lsmp)->lsm_magic = magic;
+ (*lsmp)->lsm_stripe_count = stripe_count;
+ (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
+ (*lsmp)->lsm_pattern = pattern;
+ (*lsmp)->lsm_pool_name[0] = '\0';
+ (*lsmp)->lsm_layout_gen = 0;
+ if (stripe_count > 0)
+ (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
+
+ for (i = 0; i < stripe_count; i++)
+ loi_init((*lsmp)->lsm_oinfo[i]);
+
+ return lsm_size;
+}
+
+int lov_free_memmd(struct lov_stripe_md **lsmp)
+{
+ struct lov_stripe_md *lsm = *lsmp;
+ int refc;
+
+ *lsmp = NULL;
+ LASSERT(atomic_read(&lsm->lsm_refc) > 0);
+ if ((refc = atomic_dec_return(&lsm->lsm_refc)) == 0) {
+ LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
+ lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
+ }
+ return refc;
+}
+
+
+/* Unpack LOV object metadata from disk storage. It is packed in LE byte
+ * order and is opaque to the networking layer.
+ */
+int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
+ struct lov_mds_md *lmm, int lmm_bytes)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lov_obd *lov = &obd->u.lov;
+ int rc = 0, lsm_size;
+ __u16 stripe_count;
+ __u32 magic;
+ __u32 pattern;
+
+ /* If passed an MDS struct use values from there, otherwise defaults */
+ if (lmm) {
+ rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
+ if (rc)
+ return rc;
+ magic = le32_to_cpu(lmm->lmm_magic);
+ } else {
+ magic = LOV_MAGIC;
+ stripe_count = lov_get_stripecnt(lov, magic, 0);
+ }
+
+ /* If we aren't passed an lsmp struct, we just want the size */
+ if (!lsmp) {
+ /* XXX LOV STACKING call into osc for sizes */
+ LBUG();
+ return lov_stripe_md_size(stripe_count);
+ }
+ /* If we are passed an allocated struct but nothing to unpack, free */
+ if (*lsmp && !lmm) {
+ lov_free_memmd(lsmp);
+ return 0;
+ }
+
+ pattern = le32_to_cpu(lmm->lmm_pattern);
+ lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
+ if (lsm_size < 0)
+ return lsm_size;
+
+ /* If we are passed a pointer but nothing to unpack, we only alloc */
+ if (!lmm)
+ return lsm_size;
+
+ LASSERT(lsm_op_find(magic) != NULL);
+ rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
+ if (rc) {
+ lov_free_memmd(lsmp);
+ return rc;
+ }
+
+ return lsm_size;
+}
+
+static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
+ struct lov_stripe_md **lsmp,
+ struct lov_user_md *lump)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct lov_obd *lov = &obd->u.lov;
+ char buffer[sizeof(struct lov_user_md_v3)];
+ struct lov_user_md_v3 *lumv3 = (struct lov_user_md_v3 *)&buffer[0];
+ struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&buffer[0];
+ int lmm_magic;
+ __u16 stripe_count;
+ int rc;
+ int cplen = 0;
+
+ rc = lov_lum_swab_if_needed(lumv3, &lmm_magic, lump);
+ if (rc)
+ return rc;
+
+ /* in the rest of the tests, as *lumv1 and lumv3 have the same
+ * fields, we use lumv1 to avoid code duplication */
+
+ if (lumv1->lmm_pattern == 0) {
+ lumv1->lmm_pattern = lov->desc.ld_pattern ?
+ lov->desc.ld_pattern : LOV_PATTERN_RAID0;
+ }
+
+ if (lov_pattern(lumv1->lmm_pattern) != LOV_PATTERN_RAID0) {
+ CDEBUG(D_IOCTL, "bad userland stripe pattern: %#x\n",
+ lumv1->lmm_pattern);
+ return -EINVAL;
+ }
+
+ /* 64kB is the largest common page size we see (ia64), and matches the
+ * check in lfs */
+ if (lumv1->lmm_stripe_size & (LOV_MIN_STRIPE_SIZE - 1)) {
+ CDEBUG(D_IOCTL, "stripe size %u not multiple of %u, fixing\n",
+ lumv1->lmm_stripe_size, LOV_MIN_STRIPE_SIZE);
+ lumv1->lmm_stripe_size = LOV_MIN_STRIPE_SIZE;
+ }
+
+ if ((lumv1->lmm_stripe_offset >= lov->desc.ld_tgt_count) &&
+ (lumv1->lmm_stripe_offset !=
+ (typeof(lumv1->lmm_stripe_offset))(-1))) {
+ CDEBUG(D_IOCTL, "stripe offset %u > number of OSTs %u\n",
+ lumv1->lmm_stripe_offset, lov->desc.ld_tgt_count);
+ return -EINVAL;
+ }
+ stripe_count = lov_get_stripecnt(lov, lmm_magic,
+ lumv1->lmm_stripe_count);
+
+ if (max_lmm_size) {
+ int max_stripes = (max_lmm_size -
+ lov_mds_md_size(0, lmm_magic)) /
+ sizeof(struct lov_ost_data_v1);
+ if (unlikely(max_stripes < stripe_count)) {
+ CDEBUG(D_IOCTL, "stripe count reset from %d to %d\n",
+ stripe_count, max_stripes);
+ stripe_count = max_stripes;
+ }
+ }
+
+ if (lmm_magic == LOV_USER_MAGIC_V3) {
+ struct pool_desc *pool;
+
+ /* In the function below, .hs_keycmp resolves to
+ * pool_hashkey_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ pool = lov_find_pool(lov, lumv3->lmm_pool_name);
+ if (pool != NULL) {
+ if (lumv3->lmm_stripe_offset !=
+ (typeof(lumv3->lmm_stripe_offset))(-1)) {
+ rc = lov_check_index_in_pool(
+ lumv3->lmm_stripe_offset, pool);
+ if (rc < 0) {
+ lov_pool_putref(pool);
+ return -EINVAL;
+ }
+ }
+
+ if (stripe_count > pool_tgt_count(pool))
+ stripe_count = pool_tgt_count(pool);
+
+ lov_pool_putref(pool);
+ }
+ }
+
+ if (lumv1->lmm_pattern & LOV_PATTERN_F_RELEASED)
+ stripe_count = 0;
+
+ rc = lov_alloc_memmd(lsmp, stripe_count, lumv1->lmm_pattern, lmm_magic);
+
+ if (rc >= 0) {
+ (*lsmp)->lsm_oinfo[0]->loi_ost_idx = lumv1->lmm_stripe_offset;
+ (*lsmp)->lsm_stripe_size = lumv1->lmm_stripe_size;
+ if (lmm_magic == LOV_USER_MAGIC_V3) {
+ cplen = strlcpy((*lsmp)->lsm_pool_name,
+ lumv3->lmm_pool_name,
+ sizeof((*lsmp)->lsm_pool_name));
+ if (cplen >= sizeof((*lsmp)->lsm_pool_name))
+ rc = -E2BIG;
+ }
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/* Configure object striping information on a new file.
+ *
+ * @lmmu is a pointer to a user struct with one or more of the fields set to
+ * indicate the application preference: lmm_stripe_count, lmm_stripe_size,
+ * lmm_stripe_offset, and lmm_stripe_pattern. lmm_magic must be LOV_MAGIC.
+ * @lsmp is a pointer to an in-core stripe MD that needs to be filled in.
+ */
+int lov_setstripe(struct obd_export *exp, int max_lmm_size,
+ struct lov_stripe_md **lsmp, struct lov_user_md *lump)
+{
+ int rc;
+ mm_segment_t seg;
+
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+
+ rc = __lov_setstripe(exp, max_lmm_size, lsmp, lump);
+ set_fs(seg);
+ return rc;
+}
+
+int lov_setea(struct obd_export *exp, struct lov_stripe_md **lsmp,
+ struct lov_user_md *lump)
+{
+ int i;
+ int rc;
+ struct obd_export *oexp;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ obd_id last_id = 0;
+ struct lov_user_ost_data_v1 *lmm_objects;
+
+ if (lump->lmm_magic == LOV_USER_MAGIC_V3)
+ lmm_objects = ((struct lov_user_md_v3 *)lump)->lmm_objects;
+ else
+ lmm_objects = lump->lmm_objects;
+
+ for (i = 0; i < lump->lmm_stripe_count; i++) {
+ __u32 len = sizeof(last_id);
+ oexp = lov->lov_tgts[lmm_objects[i].l_ost_idx]->ltd_exp;
+ rc = obd_get_info(NULL, oexp, sizeof(KEY_LAST_ID), KEY_LAST_ID,
+ &len, &last_id, NULL);
+ if (rc)
+ return rc;
+ if (ostid_id(&lmm_objects[i].l_ost_oi) > last_id) {
+ CERROR("Setting EA for object > than last id on"
+ " ost idx %d "DOSTID" > "LPD64" \n",
+ lmm_objects[i].l_ost_idx,
+ POSTID(&lmm_objects[i].l_ost_oi), last_id);
+ return -EINVAL;
+ }
+ }
+
+ rc = lov_setstripe(exp, 0, lsmp, lump);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < lump->lmm_stripe_count; i++) {
+ (*lsmp)->lsm_oinfo[i]->loi_ost_idx =
+ lmm_objects[i].l_ost_idx;
+ (*lsmp)->lsm_oinfo[i]->loi_oi = lmm_objects[i].l_ost_oi;
+ }
+ return 0;
+}
+
+
+/* Retrieve object striping information.
+ *
+ * @lump is a pointer to an in-core struct with lmm_ost_count indicating
+ * the maximum number of OST indices which will fit in the user buffer.
+ * lmm_magic must be LOV_USER_MAGIC.
+ */
+int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
+ struct lov_user_md *lump)
+{
+ /*
+ * XXX huge struct allocated on stack.
+ */
+ /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
+ struct lov_user_md_v3 lum;
+ struct lov_mds_md *lmmk = NULL;
+ int rc, lmm_size;
+ int lum_size;
+ mm_segment_t seg;
+
+ if (!lsm)
+ return -ENODATA;
+
+ /*
+ * "Switch to kernel segment" to allow copying from kernel space by
+ * copy_{to,from}_user().
+ */
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* we only need the header part from user space to get lmm_magic and
+ * lmm_stripe_count, (the header part is common to v1 and v3) */
+ lum_size = sizeof(struct lov_user_md_v1);
+ if (copy_from_user(&lum, lump, lum_size))
+ GOTO(out_set, rc = -EFAULT);
+ else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
+ (lum.lmm_magic != LOV_USER_MAGIC_V3))
+ GOTO(out_set, rc = -EINVAL);
+
+ if (lum.lmm_stripe_count &&
+ (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
+ /* Return right size of stripe to user */
+ lum.lmm_stripe_count = lsm->lsm_stripe_count;
+ rc = copy_to_user(lump, &lum, lum_size);
+ GOTO(out_set, rc = -EOVERFLOW);
+ }
+ rc = lov_packmd(exp, &lmmk, lsm);
+ if (rc < 0)
+ GOTO(out_set, rc);
+ lmm_size = rc;
+ rc = 0;
+
+ /* FIXME: Bug 1185 - copy fields properly when structs change */
+ /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
+ CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
+ CLASSERT(sizeof lum.lmm_objects[0] == sizeof lmmk->lmm_objects[0]);
+
+ if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
+ ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) ||
+ (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) {
+ lustre_swab_lov_mds_md(lmmk);
+ lustre_swab_lov_user_md_objects(
+ (struct lov_user_ost_data*)lmmk->lmm_objects,
+ lmmk->lmm_stripe_count);
+ }
+ if (lum.lmm_magic == LOV_USER_MAGIC) {
+ /* User request for v1, we need skip lmm_pool_name */
+ if (lmmk->lmm_magic == LOV_MAGIC_V3) {
+ memmove((char*)(&lmmk->lmm_stripe_count) +
+ sizeof(lmmk->lmm_stripe_count),
+ ((struct lov_mds_md_v3*)lmmk)->lmm_objects,
+ lmmk->lmm_stripe_count *
+ sizeof(struct lov_ost_data_v1));
+ lmm_size -= LOV_MAXPOOLNAME;
+ }
+ } else {
+ /* if v3 we just have to update the lum_size */
+ lum_size = sizeof(struct lov_user_md_v3);
+ }
+
+ /* User wasn't expecting this many OST entries */
+ if (lum.lmm_stripe_count == 0)
+ lmm_size = lum_size;
+ else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count)
+ GOTO(out_set, rc = -EOVERFLOW);
+ /*
+ * Have a difference between lov_mds_md & lov_user_md.
+ * So we have to re-order the data before copy to user.
+ */
+ lum.lmm_stripe_count = lmmk->lmm_stripe_count;
+ lum.lmm_layout_gen = lmmk->lmm_layout_gen;
+ ((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen;
+ ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
+ if (copy_to_user(lump, lmmk, lmm_size))
+ rc = -EFAULT;
+
+ obd_free_diskmd(exp, &lmmk);
+out_set:
+ set_fs(seg);
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
new file mode 100644
index 0000000..674e617
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -0,0 +1,228 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_page for LOV layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include "lov_cl_internal.h"
+
+/** \addtogroup lov
+ * @{
+ */
+
+/*****************************************************************************
+ *
+ * Lov page operations.
+ *
+ */
+
+static int lov_page_invariant(const struct cl_page_slice *slice)
+{
+ const struct cl_page *page = slice->cpl_page;
+ const struct cl_page *sub = lov_sub_page(slice);
+
+ return ergo(sub != NULL,
+ page->cp_child == sub &&
+ sub->cp_parent == page &&
+ page->cp_state == sub->cp_state);
+}
+
+static void lov_page_fini(const struct lu_env *env,
+ struct cl_page_slice *slice)
+{
+ struct cl_page *sub = lov_sub_page(slice);
+
+ LINVRNT(lov_page_invariant(slice));
+
+ if (sub != NULL) {
+ LASSERT(sub->cp_state == CPS_FREEING);
+ lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
+ sub->cp_parent = NULL;
+ slice->cpl_page->cp_child = NULL;
+ cl_page_put(env, sub);
+ }
+}
+
+static int lov_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io,
+ int nonblock)
+{
+ struct lov_io *lio = lov_env_io(env);
+ struct lov_io_sub *sub;
+
+ LINVRNT(lov_page_invariant(slice));
+ LINVRNT(!cl2lov_page(slice)->lps_invalid);
+
+ sub = lov_page_subio(env, lio, slice);
+ if (!IS_ERR(sub)) {
+ lov_sub_page(slice)->cp_owner = sub->sub_io;
+ lov_sub_put(sub);
+ } else
+ LBUG(); /* Arrgh */
+ return 0;
+}
+
+static void lov_page_assume(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io)
+{
+ lov_page_own(env, slice, io, 0);
+}
+
+static int lov_page_cache_add(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
+{
+ struct lov_io *lio = lov_env_io(env);
+ struct lov_io_sub *sub;
+ int rc = 0;
+
+ LINVRNT(lov_page_invariant(slice));
+ LINVRNT(!cl2lov_page(slice)->lps_invalid);
+
+ sub = lov_page_subio(env, lio, slice);
+ if (!IS_ERR(sub)) {
+ rc = cl_page_cache_add(sub->sub_env, sub->sub_io,
+ slice->cpl_page->cp_child, CRT_WRITE);
+ lov_sub_put(sub);
+ } else {
+ rc = PTR_ERR(sub);
+ CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
+ }
+ return rc;
+}
+
+static int lov_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
+{
+ struct lov_page *lp = cl2lov_page(slice);
+
+ return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p\n", lp);
+}
+
+static const struct cl_page_operations lov_page_ops = {
+ .cpo_fini = lov_page_fini,
+ .cpo_own = lov_page_own,
+ .cpo_assume = lov_page_assume,
+ .io = {
+ [CRT_WRITE] = {
+ .cpo_cache_add = lov_page_cache_add
+ }
+ },
+ .cpo_print = lov_page_print
+};
+
+static void lov_empty_page_fini(const struct lu_env *env,
+ struct cl_page_slice *slice)
+{
+ LASSERT(slice->cpl_page->cp_child == NULL);
+}
+
+int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage)
+{
+ struct lov_object *loo = cl2lov(obj);
+ struct lov_layout_raid0 *r0 = lov_r0(loo);
+ struct lov_io *lio = lov_env_io(env);
+ struct cl_page *subpage;
+ struct cl_object *subobj;
+ struct lov_io_sub *sub;
+ struct lov_page *lpg = cl_object_page_slice(obj, page);
+ loff_t offset;
+ obd_off suboff;
+ int stripe;
+ int rc;
+
+ offset = cl_offset(obj, page->cp_index);
+ stripe = lov_stripe_number(loo->lo_lsm, offset);
+ LASSERT(stripe < r0->lo_nr);
+ rc = lov_stripe_offset(loo->lo_lsm, offset, stripe,
+ &suboff);
+ LASSERT(rc == 0);
+
+ lpg->lps_invalid = 1;
+ cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
+
+ sub = lov_sub_get(env, lio, stripe);
+ if (IS_ERR(sub))
+ GOTO(out, rc = PTR_ERR(sub));
+
+ subobj = lovsub2cl(r0->lo_sub[stripe]);
+ subpage = cl_page_find_sub(sub->sub_env, subobj,
+ cl_index(subobj, suboff), vmpage, page);
+ lov_sub_put(sub);
+ if (IS_ERR(subpage))
+ GOTO(out, rc = PTR_ERR(subpage));
+
+ if (likely(subpage->cp_parent == page)) {
+ lu_ref_add(&subpage->cp_reference, "lov", page);
+ lpg->lps_invalid = 0;
+ rc = 0;
+ } else {
+ CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
+ CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
+ LASSERT(0);
+ }
+
+out:
+ return rc;
+}
+
+
+static const struct cl_page_operations lov_empty_page_ops = {
+ .cpo_fini = lov_empty_page_fini,
+ .cpo_print = lov_page_print
+};
+
+int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage)
+{
+ struct lov_page *lpg = cl_object_page_slice(obj, page);
+ void *addr;
+
+ cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
+ addr = kmap(vmpage);
+ memset(addr, 0, cl_page_size(obj));
+ kunmap(vmpage);
+ cl_page_export(env, page, 1);
+ return 0;
+}
+
+
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
new file mode 100644
index 0000000..dd3c07d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -0,0 +1,663 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see [sun.com URL with a
+ * copy of GPLv2].
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lov/lov_pool.c
+ *
+ * OST pool methods
+ *
+ * Author: Jacques-Charles LAFOUCRIERE <jc.lafoucriere@cea.fr>
+ * Author: Alex Lyashkov <Alexey.Lyashkov@Sun.COM>
+ * Author: Nathaniel Rutman <Nathan.Rutman@Sun.COM>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include <linux/libcfs/libcfs.h>
+
+#include <obd.h>
+#include "lov_internal.h"
+
+#define pool_tgt(_p, _i) \
+ _p->pool_lobd->u.lov.lov_tgts[_p->pool_obds.op_array[_i]]
+
+static void lov_pool_getref(struct pool_desc *pool)
+{
+ CDEBUG(D_INFO, "pool %p\n", pool);
+ atomic_inc(&pool->pool_refcount);
+}
+
+void lov_pool_putref(struct pool_desc *pool)
+{
+ CDEBUG(D_INFO, "pool %p\n", pool);
+ if (atomic_dec_and_test(&pool->pool_refcount)) {
+ LASSERT(hlist_unhashed(&pool->pool_hash));
+ LASSERT(list_empty(&pool->pool_list));
+ LASSERT(pool->pool_proc_entry == NULL);
+ lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
+ lov_ost_pool_free(&(pool->pool_obds));
+ OBD_FREE_PTR(pool);
+ }
+}
+
+void lov_pool_putref_locked(struct pool_desc *pool)
+{
+ CDEBUG(D_INFO, "pool %p\n", pool);
+ LASSERT(atomic_read(&pool->pool_refcount) > 1);
+
+ atomic_dec(&pool->pool_refcount);
+}
+
+/*
+ * hash function using a Rotating Hash algorithm
+ * Knuth, D. The Art of Computer Programming,
+ * Volume 3: Sorting and Searching,
+ * Chapter 6.4.
+ * Addison Wesley, 1973
+ */
+static __u32 pool_hashfn(cfs_hash_t *hash_body, const void *key, unsigned mask)
+{
+ int i;
+ __u32 result;
+ char *poolname;
+
+ result = 0;
+ poolname = (char *)key;
+ for (i = 0; i < LOV_MAXPOOLNAME; i++) {
+ if (poolname[i] == '\0')
+ break;
+ result = (result << 4)^(result >> 28) ^ poolname[i];
+ }
+ return (result % mask);
+}
+
+static void *pool_key(struct hlist_node *hnode)
+{
+ struct pool_desc *pool;
+
+ pool = hlist_entry(hnode, struct pool_desc, pool_hash);
+ return (pool->pool_name);
+}
+
+static int pool_hashkey_keycmp(const void *key, struct hlist_node *compared_hnode)
+{
+ char *pool_name;
+ struct pool_desc *pool;
+
+ pool_name = (char *)key;
+ pool = hlist_entry(compared_hnode, struct pool_desc, pool_hash);
+ return !strncmp(pool_name, pool->pool_name, LOV_MAXPOOLNAME);
+}
+
+static void *pool_hashobject(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct pool_desc, pool_hash);
+}
+
+static void pool_hashrefcount_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct pool_desc *pool;
+
+ pool = hlist_entry(hnode, struct pool_desc, pool_hash);
+ lov_pool_getref(pool);
+}
+
+static void pool_hashrefcount_put_locked(cfs_hash_t *hs,
+ struct hlist_node *hnode)
+{
+ struct pool_desc *pool;
+
+ pool = hlist_entry(hnode, struct pool_desc, pool_hash);
+ lov_pool_putref_locked(pool);
+}
+
+cfs_hash_ops_t pool_hash_operations = {
+ .hs_hash = pool_hashfn,
+ .hs_key = pool_key,
+ .hs_keycmp = pool_hashkey_keycmp,
+ .hs_object = pool_hashobject,
+ .hs_get = pool_hashrefcount_get,
+ .hs_put_locked = pool_hashrefcount_put_locked,
+
+};
+
+#ifdef LPROCFS
+/* ifdef needed for liblustre support */
+/*
+ * pool /proc seq_file methods
+ */
+/*
+ * iterator is used to go through the target pool entries
+ * index is the current entry index in the lp_array[] array
+ * index >= pos returned to the seq_file interface
+ * pos is from 0 to (pool->pool_obds.op_count - 1)
+ */
+#define POOL_IT_MAGIC 0xB001CEA0
+struct pool_iterator {
+ int magic;
+ struct pool_desc *pool;
+ int idx; /* from 0 to pool_tgt_size - 1 */
+};
+
+static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct pool_iterator *iter = (struct pool_iterator *)s->private;
+ int prev_idx;
+
+ LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic);
+
+ /* test if end of file */
+ if (*pos >= pool_tgt_count(iter->pool))
+ return NULL;
+
+ /* iterate to find a non empty entry */
+ prev_idx = iter->idx;
+ down_read(&pool_tgt_rw_sem(iter->pool));
+ iter->idx++;
+ if (iter->idx == pool_tgt_count(iter->pool)) {
+ iter->idx = prev_idx; /* we stay on the last entry */
+ up_read(&pool_tgt_rw_sem(iter->pool));
+ return NULL;
+ }
+ up_read(&pool_tgt_rw_sem(iter->pool));
+ (*pos)++;
+ /* return != NULL to continue */
+ return iter;
+}
+
+static void *pool_proc_start(struct seq_file *s, loff_t *pos)
+{
+ struct pool_desc *pool = (struct pool_desc *)s->private;
+ struct pool_iterator *iter;
+
+ lov_pool_getref(pool);
+ if ((pool_tgt_count(pool) == 0) ||
+ (*pos >= pool_tgt_count(pool))) {
+ /* iter is not created, so stop() has no way to
+ * find pool to dec ref */
+ lov_pool_putref(pool);
+ return NULL;
+ }
+
+ OBD_ALLOC_PTR(iter);
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+ iter->magic = POOL_IT_MAGIC;
+ iter->pool = pool;
+ iter->idx = 0;
+
+ /* we use seq_file private field to memorized iterator so
+ * we can free it at stop() */
+ /* /!\ do not forget to restore it to pool before freeing it */
+ s->private = iter;
+ if (*pos > 0) {
+ loff_t i;
+ void *ptr;
+
+ i = 0;
+ do {
+ ptr = pool_proc_next(s, &iter, &i);
+ } while ((i < *pos) && (ptr != NULL));
+ return ptr;
+ }
+ return iter;
+}
+
+static void pool_proc_stop(struct seq_file *s, void *v)
+{
+ struct pool_iterator *iter = (struct pool_iterator *)s->private;
+
+ /* in some cases stop() method is called 2 times, without
+ * calling start() method (see seq_read() from fs/seq_file.c)
+ * we have to free only if s->private is an iterator */
+ if ((iter) && (iter->magic == POOL_IT_MAGIC)) {
+ /* we restore s->private so next call to pool_proc_start()
+ * will work */
+ s->private = iter->pool;
+ lov_pool_putref(iter->pool);
+ OBD_FREE_PTR(iter);
+ }
+ return;
+}
+
+static int pool_proc_show(struct seq_file *s, void *v)
+{
+ struct pool_iterator *iter = (struct pool_iterator *)v;
+ struct lov_tgt_desc *tgt;
+
+ LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic);
+ LASSERT(iter->pool != NULL);
+ LASSERT(iter->idx <= pool_tgt_count(iter->pool));
+
+ down_read(&pool_tgt_rw_sem(iter->pool));
+ tgt = pool_tgt(iter->pool, iter->idx);
+ up_read(&pool_tgt_rw_sem(iter->pool));
+ if (tgt)
+ seq_printf(s, "%s\n", obd_uuid2str(&(tgt->ltd_uuid)));
+
+ return 0;
+}
+
+static struct seq_operations pool_proc_ops = {
+ .start = pool_proc_start,
+ .next = pool_proc_next,
+ .stop = pool_proc_stop,
+ .show = pool_proc_show,
+};
+
+static int pool_proc_open(struct inode *inode, struct file *file)
+{
+ int rc;
+
+ rc = seq_open(file, &pool_proc_ops);
+ if (!rc) {
+ struct seq_file *s = file->private_data;
+ s->private = PDE_DATA(inode);
+ }
+ return rc;
+}
+
+static struct file_operations pool_proc_operations = {
+ .open = pool_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif /* LPROCFS */
+
+void lov_dump_pool(int level, struct pool_desc *pool)
+{
+ int i;
+
+ lov_pool_getref(pool);
+
+ CDEBUG(level, "pool "LOV_POOLNAMEF" has %d members\n",
+ pool->pool_name, pool->pool_obds.op_count);
+ down_read(&pool_tgt_rw_sem(pool));
+
+ for (i = 0; i < pool_tgt_count(pool) ; i++) {
+ if (!pool_tgt(pool, i) || !(pool_tgt(pool, i))->ltd_exp)
+ continue;
+ CDEBUG(level, "pool "LOV_POOLNAMEF"[%d] = %s\n",
+ pool->pool_name, i,
+ obd_uuid2str(&((pool_tgt(pool, i))->ltd_uuid)));
+ }
+
+ up_read(&pool_tgt_rw_sem(pool));
+ lov_pool_putref(pool);
+}
+
+#define LOV_POOL_INIT_COUNT 2
+int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
+{
+ if (count == 0)
+ count = LOV_POOL_INIT_COUNT;
+ op->op_array = NULL;
+ op->op_count = 0;
+ init_rwsem(&op->op_rw_sem);
+ op->op_size = count;
+ OBD_ALLOC(op->op_array, op->op_size * sizeof(op->op_array[0]));
+ if (op->op_array == NULL) {
+ op->op_size = 0;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Caller must hold write op_rwlock */
+int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count)
+{
+ __u32 *new;
+ int new_size;
+
+ LASSERT(min_count != 0);
+
+ if (op->op_count < op->op_size)
+ return 0;
+
+ new_size = max(min_count, 2 * op->op_size);
+ OBD_ALLOC(new, new_size * sizeof(op->op_array[0]));
+ if (new == NULL)
+ return -ENOMEM;
+
+ /* copy old array to new one */
+ memcpy(new, op->op_array, op->op_size * sizeof(op->op_array[0]));
+ OBD_FREE(op->op_array, op->op_size * sizeof(op->op_array[0]));
+ op->op_array = new;
+ op->op_size = new_size;
+ return 0;
+}
+
+int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
+{
+ int rc = 0, i;
+
+ down_write(&op->op_rw_sem);
+
+ rc = lov_ost_pool_extend(op, min_count);
+ if (rc)
+ GOTO(out, rc);
+
+ /* search ost in pool array */
+ for (i = 0; i < op->op_count; i++) {
+ if (op->op_array[i] == idx)
+ GOTO(out, rc = -EEXIST);
+ }
+ /* ost not found we add it */
+ op->op_array[op->op_count] = idx;
+ op->op_count++;
+out:
+ up_write(&op->op_rw_sem);
+ return rc;
+}
+
+int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
+{
+ int i;
+
+ down_write(&op->op_rw_sem);
+
+ for (i = 0; i < op->op_count; i++) {
+ if (op->op_array[i] == idx) {
+ memmove(&op->op_array[i], &op->op_array[i + 1],
+ (op->op_count - i - 1) * sizeof(op->op_array[0]));
+ op->op_count--;
+ up_write(&op->op_rw_sem);
+ return 0;
+ }
+ }
+
+ up_write(&op->op_rw_sem);
+ return -EINVAL;
+}
+
+int lov_ost_pool_free(struct ost_pool *op)
+{
+ if (op->op_size == 0)
+ return 0;
+
+ down_write(&op->op_rw_sem);
+
+ OBD_FREE(op->op_array, op->op_size * sizeof(op->op_array[0]));
+ op->op_array = NULL;
+ op->op_count = 0;
+ op->op_size = 0;
+
+ up_write(&op->op_rw_sem);
+ return 0;
+}
+
+
+int lov_pool_new(struct obd_device *obd, char *poolname)
+{
+ struct lov_obd *lov;
+ struct pool_desc *new_pool;
+ int rc;
+
+ lov = &(obd->u.lov);
+
+ if (strlen(poolname) > LOV_MAXPOOLNAME)
+ return -ENAMETOOLONG;
+
+ OBD_ALLOC_PTR(new_pool);
+ if (new_pool == NULL)
+ return -ENOMEM;
+
+ strncpy(new_pool->pool_name, poolname, LOV_MAXPOOLNAME);
+ new_pool->pool_name[LOV_MAXPOOLNAME] = '\0';
+ new_pool->pool_lobd = obd;
+ /* ref count init to 1 because when created a pool is always used
+ * up to deletion
+ */
+ atomic_set(&new_pool->pool_refcount, 1);
+ rc = lov_ost_pool_init(&new_pool->pool_obds, 0);
+ if (rc)
+ GOTO(out_err, rc);
+
+ memset(&(new_pool->pool_rr), 0, sizeof(struct lov_qos_rr));
+ rc = lov_ost_pool_init(&new_pool->pool_rr.lqr_pool, 0);
+ if (rc)
+ GOTO(out_free_pool_obds, rc);
+
+ INIT_HLIST_NODE(&new_pool->pool_hash);
+
+#ifdef LPROCFS
+ /* we need this assert seq_file is not implementated for liblustre */
+ /* get ref for /proc file */
+ lov_pool_getref(new_pool);
+ new_pool->pool_proc_entry = lprocfs_add_simple(lov->lov_pool_proc_entry,
+ poolname, new_pool,
+ &pool_proc_operations);
+ if (IS_ERR(new_pool->pool_proc_entry)) {
+ CWARN("Cannot add proc pool entry "LOV_POOLNAMEF"\n", poolname);
+ new_pool->pool_proc_entry = NULL;
+ lov_pool_putref(new_pool);
+ }
+ CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool, new_pool->pool_proc_entry);
+#endif
+
+ spin_lock(&obd->obd_dev_lock);
+ list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
+ lov->lov_pool_count++;
+ spin_unlock(&obd->obd_dev_lock);
+
+ /* add to find only when it fully ready */
+ rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
+ &new_pool->pool_hash);
+ if (rc)
+ GOTO(out_err, rc = -EEXIST);
+
+ CDEBUG(D_CONFIG, LOV_POOLNAMEF" is pool #%d\n",
+ poolname, lov->lov_pool_count);
+
+ return 0;
+
+out_err:
+ spin_lock(&obd->obd_dev_lock);
+ list_del_init(&new_pool->pool_list);
+ lov->lov_pool_count--;
+ spin_unlock(&obd->obd_dev_lock);
+
+ lprocfs_remove(&new_pool->pool_proc_entry);
+
+ lov_ost_pool_free(&new_pool->pool_rr.lqr_pool);
+out_free_pool_obds:
+ lov_ost_pool_free(&new_pool->pool_obds);
+ OBD_FREE_PTR(new_pool);
+ return rc;
+}
+
+int lov_pool_del(struct obd_device *obd, char *poolname)
+{
+ struct lov_obd *lov;
+ struct pool_desc *pool;
+
+ lov = &(obd->u.lov);
+
+ /* lookup and kill hash reference */
+ pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
+ if (pool == NULL)
+ return -ENOENT;
+
+ if (pool->pool_proc_entry != NULL) {
+ CDEBUG(D_INFO, "proc entry %p\n", pool->pool_proc_entry);
+ lprocfs_remove(&pool->pool_proc_entry);
+ lov_pool_putref(pool);
+ }
+
+ spin_lock(&obd->obd_dev_lock);
+ list_del_init(&pool->pool_list);
+ lov->lov_pool_count--;
+ spin_unlock(&obd->obd_dev_lock);
+
+ /* release last reference */
+ lov_pool_putref(pool);
+
+ return 0;
+}
+
+
+int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
+{
+ struct obd_uuid ost_uuid;
+ struct lov_obd *lov;
+ struct pool_desc *pool;
+ unsigned int lov_idx;
+ int rc;
+
+ lov = &(obd->u.lov);
+
+ pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
+ if (pool == NULL)
+ return -ENOENT;
+
+ obd_str2uuid(&ost_uuid, ostname);
+
+
+ /* search ost in lov array */
+ obd_getref(obd);
+ for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
+ if (!lov->lov_tgts[lov_idx])
+ continue;
+ if (obd_uuid_equals(&ost_uuid,
+ &(lov->lov_tgts[lov_idx]->ltd_uuid)))
+ break;
+ }
+ /* test if ost found in lov */
+ if (lov_idx == lov->desc.ld_tgt_count)
+ GOTO(out, rc = -EINVAL);
+
+ rc = lov_ost_pool_add(&pool->pool_obds, lov_idx, lov->lov_tgt_size);
+ if (rc)
+ GOTO(out, rc);
+
+ pool->pool_rr.lqr_dirty = 1;
+
+ CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
+ ostname, poolname, pool_tgt_count(pool));
+
+out:
+ obd_putref(obd);
+ lov_pool_putref(pool);
+ return rc;
+}
+
+int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
+{
+ struct obd_uuid ost_uuid;
+ struct lov_obd *lov;
+ struct pool_desc *pool;
+ unsigned int lov_idx;
+ int rc = 0;
+
+ lov = &(obd->u.lov);
+
+ pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
+ if (pool == NULL)
+ return -ENOENT;
+
+ obd_str2uuid(&ost_uuid, ostname);
+
+ obd_getref(obd);
+ /* search ost in lov array, to get index */
+ for (lov_idx = 0; lov_idx < lov->desc.ld_tgt_count; lov_idx++) {
+ if (!lov->lov_tgts[lov_idx])
+ continue;
+
+ if (obd_uuid_equals(&ost_uuid,
+ &(lov->lov_tgts[lov_idx]->ltd_uuid)))
+ break;
+ }
+
+ /* test if ost found in lov */
+ if (lov_idx == lov->desc.ld_tgt_count)
+ GOTO(out, rc = -EINVAL);
+
+ lov_ost_pool_remove(&pool->pool_obds, lov_idx);
+
+ pool->pool_rr.lqr_dirty = 1;
+
+ CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
+ poolname);
+
+out:
+ obd_putref(obd);
+ lov_pool_putref(pool);
+ return rc;
+}
+
+int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
+{
+ int i, rc;
+
+ /* caller may no have a ref on pool if it got the pool
+ * without calling lov_find_pool() (e.g. go through the lov pool
+ * list)
+ */
+ lov_pool_getref(pool);
+
+ down_read(&pool_tgt_rw_sem(pool));
+
+ for (i = 0; i < pool_tgt_count(pool); i++) {
+ if (pool_tgt_array(pool)[i] == idx)
+ GOTO(out, rc = 0);
+ }
+ rc = -ENOENT;
+out:
+ up_read(&pool_tgt_rw_sem(pool));
+
+ lov_pool_putref(pool);
+ return rc;
+}
+
+struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname)
+{
+ struct pool_desc *pool;
+
+ pool = NULL;
+ if (poolname[0] != '\0') {
+ pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
+ if (pool == NULL)
+ CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n",
+ poolname);
+ if ((pool != NULL) && (pool_tgt_count(pool) == 0)) {
+ CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n",
+ poolname);
+ /* pool is ignored, so we remove ref on it */
+ lov_pool_putref(pool);
+ pool = NULL;
+ }
+ }
+ return pool;
+}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
new file mode 100644
index 0000000..61e6d0b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -0,0 +1,1518 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_class.h>
+#include <obd_lov.h>
+#include <lustre/lustre_idl.h>
+
+#include "lov_internal.h"
+
+static void lov_init_set(struct lov_request_set *set)
+{
+ set->set_count = 0;
+ atomic_set(&set->set_completes, 0);
+ atomic_set(&set->set_success, 0);
+ atomic_set(&set->set_finish_checked, 0);
+ set->set_cookies = 0;
+ INIT_LIST_HEAD(&set->set_list);
+ atomic_set(&set->set_refcount, 1);
+ init_waitqueue_head(&set->set_waitq);
+ spin_lock_init(&set->set_lock);
+}
+
+void lov_finish_set(struct lov_request_set *set)
+{
+ struct list_head *pos, *n;
+
+ LASSERT(set);
+ list_for_each_safe(pos, n, &set->set_list) {
+ struct lov_request *req = list_entry(pos,
+ struct lov_request,
+ rq_link);
+ list_del_init(&req->rq_link);
+
+ if (req->rq_oi.oi_oa)
+ OBDO_FREE(req->rq_oi.oi_oa);
+ if (req->rq_oi.oi_md)
+ OBD_FREE_LARGE(req->rq_oi.oi_md, req->rq_buflen);
+ if (req->rq_oi.oi_osfs)
+ OBD_FREE(req->rq_oi.oi_osfs,
+ sizeof(*req->rq_oi.oi_osfs));
+ OBD_FREE(req, sizeof(*req));
+ }
+
+ if (set->set_pga) {
+ int len = set->set_oabufs * sizeof(*set->set_pga);
+ OBD_FREE_LARGE(set->set_pga, len);
+ }
+ if (set->set_lockh)
+ lov_llh_put(set->set_lockh);
+
+ OBD_FREE(set, sizeof(*set));
+}
+
+int lov_set_finished(struct lov_request_set *set, int idempotent)
+{
+ int completes = atomic_read(&set->set_completes);
+
+ CDEBUG(D_INFO, "check set %d/%d\n", completes, set->set_count);
+
+ if (completes == set->set_count) {
+ if (idempotent)
+ return 1;
+ if (atomic_inc_return(&set->set_finish_checked) == 1)
+ return 1;
+ }
+ return 0;
+}
+
+void lov_update_set(struct lov_request_set *set,
+ struct lov_request *req, int rc)
+{
+ req->rq_complete = 1;
+ req->rq_rc = rc;
+
+ atomic_inc(&set->set_completes);
+ if (rc == 0)
+ atomic_inc(&set->set_success);
+
+ wake_up(&set->set_waitq);
+}
+
+int lov_update_common_set(struct lov_request_set *set,
+ struct lov_request *req, int rc)
+{
+ struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
+
+ lov_update_set(set, req, rc);
+
+ /* grace error on inactive ost */
+ if (rc && !(lov->lov_tgts[req->rq_idx] &&
+ lov->lov_tgts[req->rq_idx]->ltd_active))
+ rc = 0;
+
+ /* FIXME in raid1 regime, should return 0 */
+ return rc;
+}
+
+void lov_set_add_req(struct lov_request *req, struct lov_request_set *set)
+{
+ list_add_tail(&req->rq_link, &set->set_list);
+ set->set_count++;
+ req->rq_rqset = set;
+}
+
+static int lov_check_set(struct lov_obd *lov, int idx)
+{
+ int rc = 0;
+ mutex_lock(&lov->lov_lock);
+
+ if (lov->lov_tgts[idx] == NULL ||
+ lov->lov_tgts[idx]->ltd_active ||
+ (lov->lov_tgts[idx]->ltd_exp != NULL &&
+ class_exp2cliimp(lov->lov_tgts[idx]->ltd_exp)->imp_connect_tried))
+ rc = 1;
+
+ mutex_unlock(&lov->lov_lock);
+ return rc;
+}
+
+/* Check if the OSC connection exists and is active.
+ * If the OSC has not yet had a chance to connect to the OST the first time,
+ * wait once for it to connect instead of returning an error.
+ */
+int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
+{
+ wait_queue_head_t waitq;
+ struct l_wait_info lwi;
+ struct lov_tgt_desc *tgt;
+ int rc = 0;
+
+ mutex_lock(&lov->lov_lock);
+
+ tgt = lov->lov_tgts[ost_idx];
+
+ if (unlikely(tgt == NULL))
+ GOTO(out, rc = 0);
+
+ if (likely(tgt->ltd_active))
+ GOTO(out, rc = 1);
+
+ if (tgt->ltd_exp && class_exp2cliimp(tgt->ltd_exp)->imp_connect_tried)
+ GOTO(out, rc = 0);
+
+ mutex_unlock(&lov->lov_lock);
+
+ init_waitqueue_head(&waitq);
+ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(obd_timeout),
+ cfs_time_seconds(1), NULL, NULL);
+
+ rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi);
+ if (tgt != NULL && tgt->ltd_active)
+ return 1;
+
+ return 0;
+
+out:
+ mutex_unlock(&lov->lov_lock);
+ return rc;
+}
+
+extern void osc_update_enqueue(struct lustre_handle *lov_lockhp,
+ struct lov_oinfo *loi, int flags,
+ struct ost_lvb *lvb, __u32 mode, int rc);
+
+static int lov_update_enqueue_lov(struct obd_export *exp,
+ struct lustre_handle *lov_lockhp,
+ struct lov_oinfo *loi, int flags, int idx,
+ struct ost_id *oi, int rc)
+{
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+
+ if (rc != ELDLM_OK &&
+ !(rc == ELDLM_LOCK_ABORTED && (flags & LDLM_FL_HAS_INTENT))) {
+ memset(lov_lockhp, 0, sizeof(*lov_lockhp));
+ if (lov->lov_tgts[idx] && lov->lov_tgts[idx]->ltd_active) {
+ /* -EUSERS used by OST to report file contention */
+ if (rc != -EINTR && rc != -EUSERS)
+ CERROR("%s: enqueue objid "DOSTID" subobj"
+ DOSTID" on OST idx %d: rc %d\n",
+ exp->exp_obd->obd_name,
+ POSTID(oi), POSTID(&loi->loi_oi),
+ loi->loi_ost_idx, rc);
+ } else
+ rc = ELDLM_OK;
+ }
+ return rc;
+}
+
+int lov_update_enqueue_set(struct lov_request *req, __u32 mode, int rc)
+{
+ struct lov_request_set *set = req->rq_rqset;
+ struct lustre_handle *lov_lockhp;
+ struct obd_info *oi = set->set_oi;
+ struct lov_oinfo *loi;
+
+ LASSERT(oi != NULL);
+
+ lov_lockhp = set->set_lockh->llh_handles + req->rq_stripe;
+ loi = oi->oi_md->lsm_oinfo[req->rq_stripe];
+
+ /* XXX LOV STACKING: OSC gets a copy, created in lov_prep_enqueue_set
+ * and that copy can be arbitrarily out of date.
+ *
+ * The LOV API is due for a serious rewriting anyways, and this
+ * can be addressed then. */
+
+ lov_stripe_lock(oi->oi_md);
+ osc_update_enqueue(lov_lockhp, loi, oi->oi_flags,
+ &req->rq_oi.oi_md->lsm_oinfo[0]->loi_lvb, mode, rc);
+ if (rc == ELDLM_LOCK_ABORTED && (oi->oi_flags & LDLM_FL_HAS_INTENT))
+ memset(lov_lockhp, 0, sizeof *lov_lockhp);
+ rc = lov_update_enqueue_lov(set->set_exp, lov_lockhp, loi, oi->oi_flags,
+ req->rq_idx, &oi->oi_md->lsm_oi, rc);
+ lov_stripe_unlock(oi->oi_md);
+ lov_update_set(set, req, rc);
+ return rc;
+}
+
+/* The callback for osc_enqueue that updates lov info for every OSC request. */
+static int cb_update_enqueue(void *cookie, int rc)
+{
+ struct obd_info *oinfo = cookie;
+ struct ldlm_enqueue_info *einfo;
+ struct lov_request *lovreq;
+
+ lovreq = container_of(oinfo, struct lov_request, rq_oi);
+ einfo = lovreq->rq_rqset->set_ei;
+ return lov_update_enqueue_set(lovreq, einfo->ei_mode, rc);
+}
+
+static int enqueue_done(struct lov_request_set *set, __u32 mode)
+{
+ struct lov_request *req;
+ struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
+ int completes = atomic_read(&set->set_completes);
+ int rc = 0;
+
+ /* enqueue/match success, just return */
+ if (completes && completes == atomic_read(&set->set_success))
+ return 0;
+
+ /* cancel enqueued/matched locks */
+ list_for_each_entry(req, &set->set_list, rq_link) {
+ struct lustre_handle *lov_lockhp;
+
+ if (!req->rq_complete || req->rq_rc)
+ continue;
+
+ lov_lockhp = set->set_lockh->llh_handles + req->rq_stripe;
+ LASSERT(lov_lockhp);
+ if (!lustre_handle_is_used(lov_lockhp))
+ continue;
+
+ rc = obd_cancel(lov->lov_tgts[req->rq_idx]->ltd_exp,
+ req->rq_oi.oi_md, mode, lov_lockhp);
+ if (rc && lov->lov_tgts[req->rq_idx] &&
+ lov->lov_tgts[req->rq_idx]->ltd_active)
+ CERROR("%s: cancelling obdjid "DOSTID" on OST"
+ "idx %d error: rc = %d\n",
+ set->set_exp->exp_obd->obd_name,
+ POSTID(&req->rq_oi.oi_md->lsm_oi),
+ req->rq_idx, rc);
+ }
+ if (set->set_lockh)
+ lov_llh_put(set->set_lockh);
+ return rc;
+}
+
+int lov_fini_enqueue_set(struct lov_request_set *set, __u32 mode, int rc,
+ struct ptlrpc_request_set *rqset)
+{
+ int ret = 0;
+
+ if (set == NULL)
+ return 0;
+ LASSERT(set->set_exp);
+ /* Do enqueue_done only for sync requests and if any request
+ * succeeded. */
+ if (!rqset) {
+ if (rc)
+ atomic_set(&set->set_completes, 0);
+ ret = enqueue_done(set, mode);
+ } else if (set->set_lockh)
+ lov_llh_put(set->set_lockh);
+
+ lov_put_reqset(set);
+
+ return rc ? rc : ret;
+}
+
+static void lov_llh_addref(void *llhp)
+{
+ struct lov_lock_handles *llh = llhp;
+
+ atomic_inc(&llh->llh_refcount);
+ CDEBUG(D_INFO, "GETting llh %p : new refcount %d\n", llh,
+ atomic_read(&llh->llh_refcount));
+}
+
+static struct portals_handle_ops lov_handle_ops = {
+ .hop_addref = lov_llh_addref,
+ .hop_free = NULL,
+};
+
+static struct lov_lock_handles *lov_llh_new(struct lov_stripe_md *lsm)
+{
+ struct lov_lock_handles *llh;
+
+ OBD_ALLOC(llh, sizeof *llh +
+ sizeof(*llh->llh_handles) * lsm->lsm_stripe_count);
+ if (llh == NULL)
+ return NULL;
+
+ atomic_set(&llh->llh_refcount, 2);
+ llh->llh_stripe_count = lsm->lsm_stripe_count;
+ INIT_LIST_HEAD(&llh->llh_handle.h_link);
+ class_handle_hash(&llh->llh_handle, &lov_handle_ops);
+
+ return llh;
+}
+
+int lov_prep_enqueue_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct ldlm_enqueue_info *einfo,
+ struct lov_request_set **reqset)
+{
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ struct lov_request_set *set;
+ int i, rc = 0;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_exp = exp;
+ set->set_oi = oinfo;
+ set->set_ei = einfo;
+ set->set_lockh = lov_llh_new(oinfo->oi_md);
+ if (set->set_lockh == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+ oinfo->oi_lockh->cookie = set->set_lockh->llh_handle.h_cookie;
+
+ for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
+ struct lov_oinfo *loi;
+ struct lov_request *req;
+ obd_off start, end;
+
+ loi = oinfo->oi_md->lsm_oinfo[i];
+ if (!lov_stripe_intersects(oinfo->oi_md, i,
+ oinfo->oi_policy.l_extent.start,
+ oinfo->oi_policy.l_extent.end,
+ &start, &end))
+ continue;
+
+ if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
+ continue;
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+
+ req->rq_buflen = sizeof(*req->rq_oi.oi_md) +
+ sizeof(struct lov_oinfo *) +
+ sizeof(struct lov_oinfo);
+ OBD_ALLOC_LARGE(req->rq_oi.oi_md, req->rq_buflen);
+ if (req->rq_oi.oi_md == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out_set, rc = -ENOMEM);
+ }
+ req->rq_oi.oi_md->lsm_oinfo[0] =
+ ((void *)req->rq_oi.oi_md) + sizeof(*req->rq_oi.oi_md) +
+ sizeof(struct lov_oinfo *);
+
+ /* Set lov request specific parameters. */
+ req->rq_oi.oi_lockh = set->set_lockh->llh_handles + i;
+ req->rq_oi.oi_cb_up = cb_update_enqueue;
+ req->rq_oi.oi_flags = oinfo->oi_flags;
+
+ LASSERT(req->rq_oi.oi_lockh);
+
+ req->rq_oi.oi_policy.l_extent.gid =
+ oinfo->oi_policy.l_extent.gid;
+ req->rq_oi.oi_policy.l_extent.start = start;
+ req->rq_oi.oi_policy.l_extent.end = end;
+
+ req->rq_idx = loi->loi_ost_idx;
+ req->rq_stripe = i;
+
+ /* XXX LOV STACKING: submd should be from the subobj */
+ req->rq_oi.oi_md->lsm_oi = loi->loi_oi;
+ req->rq_oi.oi_md->lsm_stripe_count = 0;
+ req->rq_oi.oi_md->lsm_oinfo[0]->loi_kms_valid =
+ loi->loi_kms_valid;
+ req->rq_oi.oi_md->lsm_oinfo[0]->loi_kms = loi->loi_kms;
+ req->rq_oi.oi_md->lsm_oinfo[0]->loi_lvb = loi->loi_lvb;
+
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out_set, rc = -EIO);
+ *reqset = set;
+ return 0;
+out_set:
+ lov_fini_enqueue_set(set, einfo->ei_mode, rc, NULL);
+ return rc;
+}
+
+int lov_fini_match_set(struct lov_request_set *set, __u32 mode, int flags)
+{
+ int rc = 0;
+
+ if (set == NULL)
+ return 0;
+ LASSERT(set->set_exp);
+ rc = enqueue_done(set, mode);
+ if ((set->set_count == atomic_read(&set->set_success)) &&
+ (flags & LDLM_FL_TEST_LOCK))
+ lov_llh_put(set->set_lockh);
+
+ lov_put_reqset(set);
+
+ return rc;
+}
+
+int lov_prep_match_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct lov_stripe_md *lsm, ldlm_policy_data_t *policy,
+ __u32 mode, struct lustre_handle *lockh,
+ struct lov_request_set **reqset)
+{
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ struct lov_request_set *set;
+ int i, rc = 0;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_exp = exp;
+ set->set_oi = oinfo;
+ set->set_oi->oi_md = lsm;
+ set->set_lockh = lov_llh_new(lsm);
+ if (set->set_lockh == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+ lockh->cookie = set->set_lockh->llh_handle.h_cookie;
+
+ for (i = 0; i < lsm->lsm_stripe_count; i++){
+ struct lov_oinfo *loi;
+ struct lov_request *req;
+ obd_off start, end;
+
+ loi = lsm->lsm_oinfo[i];
+ if (!lov_stripe_intersects(lsm, i, policy->l_extent.start,
+ policy->l_extent.end, &start, &end))
+ continue;
+
+ /* FIXME raid1 should grace this error */
+ if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
+ GOTO(out_set, rc = -EIO);
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+
+ req->rq_buflen = sizeof(*req->rq_oi.oi_md);
+ OBD_ALLOC_LARGE(req->rq_oi.oi_md, req->rq_buflen);
+ if (req->rq_oi.oi_md == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out_set, rc = -ENOMEM);
+ }
+
+ req->rq_oi.oi_policy.l_extent.start = start;
+ req->rq_oi.oi_policy.l_extent.end = end;
+ req->rq_oi.oi_policy.l_extent.gid = policy->l_extent.gid;
+
+ req->rq_idx = loi->loi_ost_idx;
+ req->rq_stripe = i;
+
+ /* XXX LOV STACKING: submd should be from the subobj */
+ req->rq_oi.oi_md->lsm_oi = loi->loi_oi;
+ req->rq_oi.oi_md->lsm_stripe_count = 0;
+
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out_set, rc = -EIO);
+ *reqset = set;
+ return rc;
+out_set:
+ lov_fini_match_set(set, mode, 0);
+ return rc;
+}
+
+int lov_fini_cancel_set(struct lov_request_set *set)
+{
+ int rc = 0;
+
+ if (set == NULL)
+ return 0;
+
+ LASSERT(set->set_exp);
+ if (set->set_lockh)
+ lov_llh_put(set->set_lockh);
+
+ lov_put_reqset(set);
+
+ return rc;
+}
+
+int lov_prep_cancel_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct lov_stripe_md *lsm, __u32 mode,
+ struct lustre_handle *lockh,
+ struct lov_request_set **reqset)
+{
+ struct lov_request_set *set;
+ int i, rc = 0;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_exp = exp;
+ set->set_oi = oinfo;
+ set->set_oi->oi_md = lsm;
+ set->set_lockh = lov_handle2llh(lockh);
+ if (set->set_lockh == NULL) {
+ CERROR("LOV: invalid lov lock handle %p\n", lockh);
+ GOTO(out_set, rc = -EINVAL);
+ }
+ lockh->cookie = set->set_lockh->llh_handle.h_cookie;
+
+ for (i = 0; i < lsm->lsm_stripe_count; i++){
+ struct lov_request *req;
+ struct lustre_handle *lov_lockhp;
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+
+ lov_lockhp = set->set_lockh->llh_handles + i;
+ if (!lustre_handle_is_used(lov_lockhp)) {
+ CDEBUG(D_INFO, "lov idx %d subobj "DOSTID" no lock\n",
+ loi->loi_ost_idx, POSTID(&loi->loi_oi));
+ continue;
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+
+ req->rq_buflen = sizeof(*req->rq_oi.oi_md);
+ OBD_ALLOC_LARGE(req->rq_oi.oi_md, req->rq_buflen);
+ if (req->rq_oi.oi_md == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out_set, rc = -ENOMEM);
+ }
+
+ req->rq_idx = loi->loi_ost_idx;
+ req->rq_stripe = i;
+
+ /* XXX LOV STACKING: submd should be from the subobj */
+ req->rq_oi.oi_md->lsm_oi = loi->loi_oi;
+ req->rq_oi.oi_md->lsm_stripe_count = 0;
+
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out_set, rc = -EIO);
+ *reqset = set;
+ return rc;
+out_set:
+ lov_fini_cancel_set(set);
+ return rc;
+}
+static int common_attr_done(struct lov_request_set *set)
+{
+ struct list_head *pos;
+ struct lov_request *req;
+ struct obdo *tmp_oa;
+ int rc = 0, attrset = 0;
+
+ LASSERT(set->set_oi != NULL);
+
+ if (set->set_oi->oi_oa == NULL)
+ return 0;
+
+ if (!atomic_read(&set->set_success))
+ return -EIO;
+
+ OBDO_ALLOC(tmp_oa);
+ if (tmp_oa == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ list_for_each (pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ if (!req->rq_complete || req->rq_rc)
+ continue;
+ if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */
+ continue;
+ lov_merge_attrs(tmp_oa, req->rq_oi.oi_oa,
+ req->rq_oi.oi_oa->o_valid,
+ set->set_oi->oi_md, req->rq_stripe, &attrset);
+ }
+ if (!attrset) {
+ CERROR("No stripes had valid attrs\n");
+ rc = -EIO;
+ }
+ if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) &&
+ (set->set_oi->oi_md->lsm_stripe_count != attrset)) {
+ /* When we take attributes of some epoch, we require all the
+ * ost to be active. */
+ CERROR("Not all the stripes had valid attrs\n");
+ GOTO(out, rc = -EIO);
+ }
+
+ tmp_oa->o_oi = set->set_oi->oi_oa->o_oi;
+ memcpy(set->set_oi->oi_oa, tmp_oa, sizeof(*set->set_oi->oi_oa));
+out:
+ if (tmp_oa)
+ OBDO_FREE(tmp_oa);
+ return rc;
+
+}
+
+static int brw_done(struct lov_request_set *set)
+{
+ struct lov_stripe_md *lsm = set->set_oi->oi_md;
+ struct lov_oinfo *loi = NULL;
+ struct list_head *pos;
+ struct lov_request *req;
+
+ list_for_each (pos, &set->set_list) {
+ req = list_entry(pos, struct lov_request, rq_link);
+
+ if (!req->rq_complete || req->rq_rc)
+ continue;
+
+ loi = lsm->lsm_oinfo[req->rq_stripe];
+
+ if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLBLOCKS)
+ loi->loi_lvb.lvb_blocks = req->rq_oi.oi_oa->o_blocks;
+ }
+
+ return 0;
+}
+
+int lov_fini_brw_set(struct lov_request_set *set)
+{
+ int rc = 0;
+
+ if (set == NULL)
+ return 0;
+ LASSERT(set->set_exp);
+ if (atomic_read(&set->set_completes)) {
+ rc = brw_done(set);
+ /* FIXME update qos data here */
+ }
+ lov_put_reqset(set);
+
+ return rc;
+}
+
+int lov_prep_brw_set(struct obd_export *exp, struct obd_info *oinfo,
+ obd_count oa_bufs, struct brw_page *pga,
+ struct obd_trans_info *oti,
+ struct lov_request_set **reqset)
+{
+ struct {
+ obd_count index;
+ obd_count count;
+ obd_count off;
+ } *info = NULL;
+ struct lov_request_set *set;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ int rc = 0, i, shift;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_exp = exp;
+ set->set_oti = oti;
+ set->set_oi = oinfo;
+ set->set_oabufs = oa_bufs;
+ OBD_ALLOC_LARGE(set->set_pga, oa_bufs * sizeof(*set->set_pga));
+ if (!set->set_pga)
+ GOTO(out, rc = -ENOMEM);
+
+ OBD_ALLOC_LARGE(info, sizeof(*info) * oinfo->oi_md->lsm_stripe_count);
+ if (!info)
+ GOTO(out, rc = -ENOMEM);
+
+ /* calculate the page count for each stripe */
+ for (i = 0; i < oa_bufs; i++) {
+ int stripe = lov_stripe_number(oinfo->oi_md, pga[i].off);
+ info[stripe].count++;
+ }
+
+ /* alloc and initialize lov request */
+ shift = 0;
+ for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++){
+ struct lov_oinfo *loi = NULL;
+ struct lov_request *req;
+
+ if (info[i].count == 0)
+ continue;
+
+ loi = oinfo->oi_md->lsm_oinfo[i];
+ if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
+ GOTO(out, rc = -EIO);
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ OBDO_ALLOC(req->rq_oi.oi_oa);
+ if (req->rq_oi.oi_oa == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out, rc = -ENOMEM);
+ }
+
+ if (oinfo->oi_oa) {
+ memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
+ sizeof(*req->rq_oi.oi_oa));
+ }
+ req->rq_oi.oi_oa->o_oi = loi->loi_oi;
+ req->rq_oi.oi_oa->o_stripe_idx = i;
+
+ req->rq_buflen = sizeof(*req->rq_oi.oi_md);
+ OBD_ALLOC_LARGE(req->rq_oi.oi_md, req->rq_buflen);
+ if (req->rq_oi.oi_md == NULL) {
+ OBDO_FREE(req->rq_oi.oi_oa);
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out, rc = -ENOMEM);
+ }
+
+ req->rq_idx = loi->loi_ost_idx;
+ req->rq_stripe = i;
+
+ /* XXX LOV STACKING */
+ req->rq_oi.oi_md->lsm_oi = loi->loi_oi;
+ req->rq_oabufs = info[i].count;
+ req->rq_pgaidx = shift;
+ shift += req->rq_oabufs;
+
+ /* remember the index for sort brw_page array */
+ info[i].index = req->rq_pgaidx;
+
+ req->rq_oi.oi_capa = oinfo->oi_capa;
+
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out, rc = -EIO);
+
+ /* rotate & sort the brw_page array */
+ for (i = 0; i < oa_bufs; i++) {
+ int stripe = lov_stripe_number(oinfo->oi_md, pga[i].off);
+
+ shift = info[stripe].index + info[stripe].off;
+ LASSERT(shift < oa_bufs);
+ set->set_pga[shift] = pga[i];
+ lov_stripe_offset(oinfo->oi_md, pga[i].off, stripe,
+ &set->set_pga[shift].off);
+ info[stripe].off++;
+ }
+out:
+ if (info)
+ OBD_FREE_LARGE(info,
+ sizeof(*info) * oinfo->oi_md->lsm_stripe_count);
+
+ if (rc == 0)
+ *reqset = set;
+ else
+ lov_fini_brw_set(set);
+
+ return rc;
+}
+
+int lov_fini_getattr_set(struct lov_request_set *set)
+{
+ int rc = 0;
+
+ if (set == NULL)
+ return 0;
+ LASSERT(set->set_exp);
+ if (atomic_read(&set->set_completes))
+ rc = common_attr_done(set);
+
+ lov_put_reqset(set);
+
+ return rc;
+}
+
+/* The callback for osc_getattr_async that finilizes a request info when a
+ * response is received. */
+static int cb_getattr_update(void *cookie, int rc)
+{
+ struct obd_info *oinfo = cookie;
+ struct lov_request *lovreq;
+ lovreq = container_of(oinfo, struct lov_request, rq_oi);
+ return lov_update_common_set(lovreq->rq_rqset, lovreq, rc);
+}
+
+int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct lov_request_set **reqset)
+{
+ struct lov_request_set *set;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ int rc = 0, i;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_exp = exp;
+ set->set_oi = oinfo;
+
+ for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
+ struct lov_oinfo *loi;
+ struct lov_request *req;
+
+ loi = oinfo->oi_md->lsm_oinfo[i];
+ if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
+ if (oinfo->oi_oa->o_valid & OBD_MD_FLEPOCH)
+ /* SOM requires all the OSTs to be active. */
+ GOTO(out_set, rc = -EIO);
+ continue;
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+
+ req->rq_stripe = i;
+ req->rq_idx = loi->loi_ost_idx;
+
+ OBDO_ALLOC(req->rq_oi.oi_oa);
+ if (req->rq_oi.oi_oa == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out_set, rc = -ENOMEM);
+ }
+ memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
+ sizeof(*req->rq_oi.oi_oa));
+ req->rq_oi.oi_oa->o_oi = loi->loi_oi;
+ req->rq_oi.oi_cb_up = cb_getattr_update;
+ req->rq_oi.oi_capa = oinfo->oi_capa;
+
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out_set, rc = -EIO);
+ *reqset = set;
+ return rc;
+out_set:
+ lov_fini_getattr_set(set);
+ return rc;
+}
+
+int lov_fini_destroy_set(struct lov_request_set *set)
+{
+ if (set == NULL)
+ return 0;
+ LASSERT(set->set_exp);
+ if (atomic_read(&set->set_completes)) {
+ /* FIXME update qos data here */
+ }
+
+ lov_put_reqset(set);
+
+ return 0;
+}
+
+int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct obdo *src_oa, struct lov_stripe_md *lsm,
+ struct obd_trans_info *oti,
+ struct lov_request_set **reqset)
+{
+ struct lov_request_set *set;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ int rc = 0, i;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_exp = exp;
+ set->set_oi = oinfo;
+ set->set_oi->oi_md = lsm;
+ set->set_oi->oi_oa = src_oa;
+ set->set_oti = oti;
+ if (oti != NULL && src_oa->o_valid & OBD_MD_FLCOOKIE)
+ set->set_cookies = oti->oti_logcookies;
+
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ struct lov_oinfo *loi;
+ struct lov_request *req;
+
+ loi = lsm->lsm_oinfo[i];
+ if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
+ continue;
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+
+ req->rq_stripe = i;
+ req->rq_idx = loi->loi_ost_idx;
+
+ OBDO_ALLOC(req->rq_oi.oi_oa);
+ if (req->rq_oi.oi_oa == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out_set, rc = -ENOMEM);
+ }
+ memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
+ req->rq_oi.oi_oa->o_oi = loi->loi_oi;
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out_set, rc = -EIO);
+ *reqset = set;
+ return rc;
+out_set:
+ lov_fini_destroy_set(set);
+ return rc;
+}
+
+int lov_fini_setattr_set(struct lov_request_set *set)
+{
+ int rc = 0;
+
+ if (set == NULL)
+ return 0;
+ LASSERT(set->set_exp);
+ if (atomic_read(&set->set_completes)) {
+ rc = common_attr_done(set);
+ /* FIXME update qos data here */
+ }
+
+ lov_put_reqset(set);
+ return rc;
+}
+
+int lov_update_setattr_set(struct lov_request_set *set,
+ struct lov_request *req, int rc)
+{
+ struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
+ struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
+
+ lov_update_set(set, req, rc);
+
+ /* grace error on inactive ost */
+ if (rc && !(lov->lov_tgts[req->rq_idx] &&
+ lov->lov_tgts[req->rq_idx]->ltd_active))
+ rc = 0;
+
+ if (rc == 0) {
+ if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLCTIME)
+ lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_ctime =
+ req->rq_oi.oi_oa->o_ctime;
+ if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLMTIME)
+ lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_mtime =
+ req->rq_oi.oi_oa->o_mtime;
+ if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLATIME)
+ lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_atime =
+ req->rq_oi.oi_oa->o_atime;
+ }
+
+ return rc;
+}
+
+/* The callback for osc_setattr_async that finilizes a request info when a
+ * response is received. */
+static int cb_setattr_update(void *cookie, int rc)
+{
+ struct obd_info *oinfo = cookie;
+ struct lov_request *lovreq;
+ lovreq = container_of(oinfo, struct lov_request, rq_oi);
+ return lov_update_setattr_set(lovreq->rq_rqset, lovreq, rc);
+}
+
+int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct lov_request_set **reqset)
+{
+ struct lov_request_set *set;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ int rc = 0, i;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_exp = exp;
+ set->set_oti = oti;
+ set->set_oi = oinfo;
+ if (oti != NULL && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
+ set->set_cookies = oti->oti_logcookies;
+
+ for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
+ struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i];
+ struct lov_request *req;
+
+ if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
+ continue;
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+ req->rq_stripe = i;
+ req->rq_idx = loi->loi_ost_idx;
+
+ OBDO_ALLOC(req->rq_oi.oi_oa);
+ if (req->rq_oi.oi_oa == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out_set, rc = -ENOMEM);
+ }
+ memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
+ sizeof(*req->rq_oi.oi_oa));
+ req->rq_oi.oi_oa->o_oi = loi->loi_oi;
+ req->rq_oi.oi_oa->o_stripe_idx = i;
+ req->rq_oi.oi_cb_up = cb_setattr_update;
+ req->rq_oi.oi_capa = oinfo->oi_capa;
+
+ if (oinfo->oi_oa->o_valid & OBD_MD_FLSIZE) {
+ int off = lov_stripe_offset(oinfo->oi_md,
+ oinfo->oi_oa->o_size, i,
+ &req->rq_oi.oi_oa->o_size);
+
+ if (off < 0 && req->rq_oi.oi_oa->o_size)
+ req->rq_oi.oi_oa->o_size--;
+
+ CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
+ i, req->rq_oi.oi_oa->o_size,
+ oinfo->oi_oa->o_size);
+ }
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out_set, rc = -EIO);
+ *reqset = set;
+ return rc;
+out_set:
+ lov_fini_setattr_set(set);
+ return rc;
+}
+
+int lov_fini_punch_set(struct lov_request_set *set)
+{
+ int rc = 0;
+
+ if (set == NULL)
+ return 0;
+ LASSERT(set->set_exp);
+ if (atomic_read(&set->set_completes)) {
+ rc = -EIO;
+ /* FIXME update qos data here */
+ if (atomic_read(&set->set_success))
+ rc = common_attr_done(set);
+ }
+
+ lov_put_reqset(set);
+
+ return rc;
+}
+
+int lov_update_punch_set(struct lov_request_set *set,
+ struct lov_request *req, int rc)
+{
+ struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
+ struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
+
+ lov_update_set(set, req, rc);
+
+ /* grace error on inactive ost */
+ if (rc && !lov->lov_tgts[req->rq_idx]->ltd_active)
+ rc = 0;
+
+ if (rc == 0) {
+ lov_stripe_lock(lsm);
+ if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLBLOCKS) {
+ lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_blocks =
+ req->rq_oi.oi_oa->o_blocks;
+ }
+
+ lov_stripe_unlock(lsm);
+ }
+
+ return rc;
+}
+
+/* The callback for osc_punch that finilizes a request info when a response
+ * is received. */
+static int cb_update_punch(void *cookie, int rc)
+{
+ struct obd_info *oinfo = cookie;
+ struct lov_request *lovreq;
+ lovreq = container_of(oinfo, struct lov_request, rq_oi);
+ return lov_update_punch_set(lovreq->rq_rqset, lovreq, rc);
+}
+
+int lov_prep_punch_set(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct lov_request_set **reqset)
+{
+ struct lov_request_set *set;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ int rc = 0, i;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_oi = oinfo;
+ set->set_exp = exp;
+
+ for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
+ struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i];
+ struct lov_request *req;
+ obd_off rs, re;
+
+ if (!lov_stripe_intersects(oinfo->oi_md, i,
+ oinfo->oi_policy.l_extent.start,
+ oinfo->oi_policy.l_extent.end,
+ &rs, &re))
+ continue;
+
+ if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
+ GOTO(out_set, rc = -EIO);
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+ req->rq_stripe = i;
+ req->rq_idx = loi->loi_ost_idx;
+
+ OBDO_ALLOC(req->rq_oi.oi_oa);
+ if (req->rq_oi.oi_oa == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out_set, rc = -ENOMEM);
+ }
+ memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
+ sizeof(*req->rq_oi.oi_oa));
+ req->rq_oi.oi_oa->o_oi = loi->loi_oi;
+ req->rq_oi.oi_oa->o_valid |= OBD_MD_FLGROUP;
+
+ req->rq_oi.oi_oa->o_stripe_idx = i;
+ req->rq_oi.oi_cb_up = cb_update_punch;
+
+ req->rq_oi.oi_policy.l_extent.start = rs;
+ req->rq_oi.oi_policy.l_extent.end = re;
+ req->rq_oi.oi_policy.l_extent.gid = -1;
+
+ req->rq_oi.oi_capa = oinfo->oi_capa;
+
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out_set, rc = -EIO);
+ *reqset = set;
+ return rc;
+out_set:
+ lov_fini_punch_set(set);
+ return rc;
+}
+
+int lov_fini_sync_set(struct lov_request_set *set)
+{
+ int rc = 0;
+
+ if (set == NULL)
+ return 0;
+ LASSERT(set->set_exp);
+ if (atomic_read(&set->set_completes)) {
+ if (!atomic_read(&set->set_success))
+ rc = -EIO;
+ /* FIXME update qos data here */
+ }
+
+ lov_put_reqset(set);
+
+ return rc;
+}
+
+/* The callback for osc_sync that finilizes a request info when a
+ * response is recieved. */
+static int cb_sync_update(void *cookie, int rc)
+{
+ struct obd_info *oinfo = cookie;
+ struct lov_request *lovreq;
+
+ lovreq = container_of(oinfo, struct lov_request, rq_oi);
+ return lov_update_common_set(lovreq->rq_rqset, lovreq, rc);
+}
+
+int lov_prep_sync_set(struct obd_export *exp, struct obd_info *oinfo,
+ obd_off start, obd_off end,
+ struct lov_request_set **reqset)
+{
+ struct lov_request_set *set;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ int rc = 0, i;
+
+ OBD_ALLOC_PTR(set);
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_exp = exp;
+ set->set_oi = oinfo;
+
+ for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
+ struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i];
+ struct lov_request *req;
+ obd_off rs, re;
+
+ if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
+ continue;
+ }
+
+ if (!lov_stripe_intersects(oinfo->oi_md, i, start, end, &rs,
+ &re))
+ continue;
+
+ OBD_ALLOC_PTR(req);
+ if (req == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+ req->rq_stripe = i;
+ req->rq_idx = loi->loi_ost_idx;
+
+ OBDO_ALLOC(req->rq_oi.oi_oa);
+ if (req->rq_oi.oi_oa == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out_set, rc = -ENOMEM);
+ }
+ *req->rq_oi.oi_oa = *oinfo->oi_oa;
+ req->rq_oi.oi_oa->o_oi = loi->loi_oi;
+ req->rq_oi.oi_oa->o_stripe_idx = i;
+
+ req->rq_oi.oi_policy.l_extent.start = rs;
+ req->rq_oi.oi_policy.l_extent.end = re;
+ req->rq_oi.oi_policy.l_extent.gid = -1;
+ req->rq_oi.oi_cb_up = cb_sync_update;
+
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out_set, rc = -EIO);
+ *reqset = set;
+ return rc;
+out_set:
+ lov_fini_sync_set(set);
+ return rc;
+}
+
+#define LOV_U64_MAX ((__u64)~0ULL)
+#define LOV_SUM_MAX(tot, add) \
+ do { \
+ if ((tot) + (add) < (tot)) \
+ (tot) = LOV_U64_MAX; \
+ else \
+ (tot) += (add); \
+ } while(0)
+
+int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,int success)
+{
+ if (success) {
+ __u32 expected_stripes = lov_get_stripecnt(&obd->u.lov,
+ LOV_MAGIC, 0);
+ if (osfs->os_files != LOV_U64_MAX)
+ lov_do_div64(osfs->os_files, expected_stripes);
+ if (osfs->os_ffree != LOV_U64_MAX)
+ lov_do_div64(osfs->os_ffree, expected_stripes);
+
+ spin_lock(&obd->obd_osfs_lock);
+ memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
+ obd->obd_osfs_age = cfs_time_current_64();
+ spin_unlock(&obd->obd_osfs_lock);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+int lov_fini_statfs_set(struct lov_request_set *set)
+{
+ int rc = 0;
+
+ if (set == NULL)
+ return 0;
+
+ if (atomic_read(&set->set_completes)) {
+ rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs,
+ atomic_read(&set->set_success));
+ }
+ lov_put_reqset(set);
+ return rc;
+}
+
+void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs,
+ int success)
+{
+ int shift = 0, quit = 0;
+ __u64 tmp;
+
+ if (success == 0) {
+ memcpy(osfs, lov_sfs, sizeof(*lov_sfs));
+ } else {
+ if (osfs->os_bsize != lov_sfs->os_bsize) {
+ /* assume all block sizes are always powers of 2 */
+ /* get the bits difference */
+ tmp = osfs->os_bsize | lov_sfs->os_bsize;
+ for (shift = 0; shift <= 64; ++shift) {
+ if (tmp & 1) {
+ if (quit)
+ break;
+ else
+ quit = 1;
+ shift = 0;
+ }
+ tmp >>= 1;
+ }
+ }
+
+ if (osfs->os_bsize < lov_sfs->os_bsize) {
+ osfs->os_bsize = lov_sfs->os_bsize;
+
+ osfs->os_bfree >>= shift;
+ osfs->os_bavail >>= shift;
+ osfs->os_blocks >>= shift;
+ } else if (shift != 0) {
+ lov_sfs->os_bfree >>= shift;
+ lov_sfs->os_bavail >>= shift;
+ lov_sfs->os_blocks >>= shift;
+ }
+ osfs->os_bfree += lov_sfs->os_bfree;
+ osfs->os_bavail += lov_sfs->os_bavail;
+ osfs->os_blocks += lov_sfs->os_blocks;
+ /* XXX not sure about this one - depends on policy.
+ * - could be minimum if we always stripe on all OBDs
+ * (but that would be wrong for any other policy,
+ * if one of the OBDs has no more objects left)
+ * - could be sum if we stripe whole objects
+ * - could be average, just to give a nice number
+ *
+ * To give a "reasonable" (if not wholly accurate)
+ * number, we divide the total number of free objects
+ * by expected stripe count (watch out for overflow).
+ */
+ LOV_SUM_MAX(osfs->os_files, lov_sfs->os_files);
+ LOV_SUM_MAX(osfs->os_ffree, lov_sfs->os_ffree);
+ }
+}
+
+/* The callback for osc_statfs_async that finilizes a request info when a
+ * response is received. */
+static int cb_statfs_update(void *cookie, int rc)
+{
+ struct obd_info *oinfo = cookie;
+ struct lov_request *lovreq;
+ struct lov_request_set *set;
+ struct obd_statfs *osfs, *lov_sfs;
+ struct lov_obd *lov;
+ struct lov_tgt_desc *tgt;
+ struct obd_device *lovobd, *tgtobd;
+ int success;
+
+ lovreq = container_of(oinfo, struct lov_request, rq_oi);
+ set = lovreq->rq_rqset;
+ lovobd = set->set_obd;
+ lov = &lovobd->u.lov;
+ osfs = set->set_oi->oi_osfs;
+ lov_sfs = oinfo->oi_osfs;
+ success = atomic_read(&set->set_success);
+ /* XXX: the same is done in lov_update_common_set, however
+ lovset->set_exp is not initialized. */
+ lov_update_set(set, lovreq, rc);
+ if (rc)
+ GOTO(out, rc);
+
+ obd_getref(lovobd);
+ tgt = lov->lov_tgts[lovreq->rq_idx];
+ if (!tgt || !tgt->ltd_active)
+ GOTO(out_update, rc);
+
+ tgtobd = class_exp2obd(tgt->ltd_exp);
+ spin_lock(&tgtobd->obd_osfs_lock);
+ memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
+ if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
+ tgtobd->obd_osfs_age = cfs_time_current_64();
+ spin_unlock(&tgtobd->obd_osfs_lock);
+
+out_update:
+ lov_update_statfs(osfs, lov_sfs, success);
+ obd_putref(lovobd);
+
+out:
+ if (set->set_oi->oi_flags & OBD_STATFS_PTLRPCD &&
+ lov_set_finished(set, 0)) {
+ lov_statfs_interpret(NULL, set, set->set_count !=
+ atomic_read(&set->set_success));
+ }
+
+ return 0;
+}
+
+int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
+ struct lov_request_set **reqset)
+{
+ struct lov_request_set *set;
+ struct lov_obd *lov = &obd->u.lov;
+ int rc = 0, i;
+
+ OBD_ALLOC(set, sizeof(*set));
+ if (set == NULL)
+ return -ENOMEM;
+ lov_init_set(set);
+
+ set->set_obd = obd;
+ set->set_oi = oinfo;
+
+ /* We only get block data from the OBD */
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ struct lov_request *req;
+
+ if (lov->lov_tgts[i] == NULL ||
+ (!lov_check_and_wait_active(lov, i) &&
+ (oinfo->oi_flags & OBD_STATFS_NODELAY))) {
+ CDEBUG(D_HA, "lov idx %d inactive\n", i);
+ continue;
+ }
+
+ /* skip targets that have been explicitely disabled by the
+ * administrator */
+ if (!lov->lov_tgts[i]->ltd_exp) {
+ CDEBUG(D_HA, "lov idx %d administratively disabled\n", i);
+ continue;
+ }
+
+ OBD_ALLOC(req, sizeof(*req));
+ if (req == NULL)
+ GOTO(out_set, rc = -ENOMEM);
+
+ OBD_ALLOC(req->rq_oi.oi_osfs, sizeof(*req->rq_oi.oi_osfs));
+ if (req->rq_oi.oi_osfs == NULL) {
+ OBD_FREE(req, sizeof(*req));
+ GOTO(out_set, rc = -ENOMEM);
+ }
+
+ req->rq_idx = i;
+ req->rq_oi.oi_cb_up = cb_statfs_update;
+ req->rq_oi.oi_flags = oinfo->oi_flags;
+
+ lov_set_add_req(req, set);
+ }
+ if (!set->set_count)
+ GOTO(out_set, rc = -EIO);
+ *reqset = set;
+ return rc;
+out_set:
+ lov_fini_statfs_set(set);
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
new file mode 100644
index 0000000..998ea1c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -0,0 +1,205 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_device and cl_device_type for LOVSUB layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include "lov_cl_internal.h"
+
+/** \addtogroup lov
+ * @{
+ */
+
+/*****************************************************************************
+ *
+ * Lovsub transfer operations.
+ *
+ */
+
+static void lovsub_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret)
+{
+ struct lovsub_req *lsr;
+
+ lsr = cl2lovsub_req(slice);
+ OBD_SLAB_FREE_PTR(lsr, lovsub_req_kmem);
+}
+
+/**
+ * Implementation of struct cl_req_operations::cro_attr_set() for lovsub
+ * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
+ * field, which is filled there.
+ */
+static void lovsub_req_attr_set(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, obd_valid flags)
+{
+ struct lovsub_object *subobj;
+
+ subobj = cl2lovsub(obj);
+ /*
+ * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
+ * unconditionally. It never changes anyway.
+ */
+ attr->cra_oa->o_stripe_idx = subobj->lso_index;
+}
+
+static const struct cl_req_operations lovsub_req_ops = {
+ .cro_attr_set = lovsub_req_attr_set,
+ .cro_completion = lovsub_req_completion
+};
+
+/*****************************************************************************
+ *
+ * Lov-sub device and device type functions.
+ *
+ */
+
+static int lovsub_device_init(const struct lu_env *env, struct lu_device *d,
+ const char *name, struct lu_device *next)
+{
+ struct lovsub_device *lsd = lu2lovsub_dev(d);
+ struct lu_device_type *ldt;
+ int rc;
+
+ next->ld_site = d->ld_site;
+ ldt = next->ld_type;
+ LASSERT(ldt != NULL);
+ rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL);
+ if (rc) {
+ next->ld_site = NULL;
+ return rc;
+ }
+
+ lu_device_get(next);
+ lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
+ lsd->acid_next = lu2cl_dev(next);
+ return rc;
+}
+
+static struct lu_device *lovsub_device_fini(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct lu_device *next;
+ struct lovsub_device *lsd;
+
+ lsd = lu2lovsub_dev(d);
+ next = cl2lu_dev(lsd->acid_next);
+ lsd->acid_super = NULL;
+ lsd->acid_next = NULL;
+ return next;
+}
+
+static struct lu_device *lovsub_device_free(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct lovsub_device *lsd = lu2lovsub_dev(d);
+ struct lu_device *next = cl2lu_dev(lsd->acid_next);
+
+ cl_device_fini(lu2cl_dev(d));
+ OBD_FREE_PTR(lsd);
+ return next;
+}
+
+static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req)
+{
+ struct lovsub_req *lsr;
+ int result;
+
+ OBD_SLAB_ALLOC_PTR_GFP(lsr, lovsub_req_kmem, __GFP_IO);
+ if (lsr != NULL) {
+ cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+static const struct lu_device_operations lovsub_lu_ops = {
+ .ldo_object_alloc = lovsub_object_alloc,
+ .ldo_process_config = NULL,
+ .ldo_recovery_complete = NULL
+};
+
+static const struct cl_device_operations lovsub_cl_ops = {
+ .cdo_req_init = lovsub_req_init
+};
+
+static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
+ struct lu_device_type *t,
+ struct lustre_cfg *cfg)
+{
+ struct lu_device *d;
+ struct lovsub_device *lsd;
+
+ OBD_ALLOC_PTR(lsd);
+ if (lsd != NULL) {
+ int result;
+
+ result = cl_device_init(&lsd->acid_cl, t);
+ if (result == 0) {
+ d = lovsub2lu_dev(lsd);
+ d->ld_ops = &lovsub_lu_ops;
+ lsd->acid_cl.cd_ops = &lovsub_cl_ops;
+ } else
+ d = ERR_PTR(result);
+ } else
+ d = ERR_PTR(-ENOMEM);
+ return d;
+}
+
+static const struct lu_device_type_operations lovsub_device_type_ops = {
+ .ldto_device_alloc = lovsub_device_alloc,
+ .ldto_device_free = lovsub_device_free,
+
+ .ldto_device_init = lovsub_device_init,
+ .ldto_device_fini = lovsub_device_fini
+};
+
+#define LUSTRE_LOVSUB_NAME "lovsub"
+
+struct lu_device_type lovsub_device_type = {
+ .ldt_tags = LU_DEVICE_CL,
+ .ldt_name = LUSTRE_LOVSUB_NAME,
+ .ldt_ops = &lovsub_device_type_ops,
+ .ldt_ctx_tags = LCT_CL_THREAD
+};
+
+
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_io.c b/drivers/staging/lustre/lustre/lov/lovsub_io.c
new file mode 100644
index 0000000..783ec68
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lovsub_io.c
@@ -0,0 +1,55 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_io for LOVSUB layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include "lov_cl_internal.h"
+
+/** \addtogroup lov
+ * @{
+ */
+
+/*****************************************************************************
+ *
+ * Lovsub io operations.
+ *
+ */
+
+/* All trivial */
+
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
new file mode 100644
index 0000000..80305aa
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -0,0 +1,466 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_lock for LOVSUB layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include "lov_cl_internal.h"
+
+/** \addtogroup lov
+ * @{
+ */
+
+/*****************************************************************************
+ *
+ * Lovsub lock operations.
+ *
+ */
+
+static void lovsub_lock_fini(const struct lu_env *env,
+ struct cl_lock_slice *slice)
+{
+ struct lovsub_lock *lsl;
+
+ lsl = cl2lovsub_lock(slice);
+ LASSERT(list_empty(&lsl->lss_parents));
+ OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
+}
+
+static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
+{
+ struct cl_lock *parent;
+
+ parent = lov->lls_cl.cls_lock;
+ cl_lock_get(parent);
+ lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
+ cl_lock_mutex_get(env, parent);
+}
+
+static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
+{
+ struct cl_lock *parent;
+
+ parent = lov->lls_cl.cls_lock;
+ cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
+ lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
+ cl_lock_put(env, parent);
+}
+
+/**
+ * Implements cl_lock_operations::clo_state() method for lovsub layer, which
+ * method is called whenever sub-lock state changes. Propagates state change
+ * to the top-locks.
+ */
+static void lovsub_lock_state(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ enum cl_lock_state state)
+{
+ struct lovsub_lock *sub = cl2lovsub_lock(slice);
+ struct lov_lock_link *scan;
+
+ LASSERT(cl_lock_is_mutexed(slice->cls_lock));
+
+ list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ struct lov_lock *lov = scan->lll_super;
+ struct cl_lock *parent = lov->lls_cl.cls_lock;
+
+ if (sub->lss_active != parent) {
+ lovsub_parent_lock(env, lov);
+ cl_lock_signal(env, parent);
+ lovsub_parent_unlock(env, lov);
+ }
+ }
+}
+
+/**
+ * Implementation of cl_lock_operation::clo_weigh() estimating lock weight by
+ * asking parent lock.
+ */
+static unsigned long lovsub_lock_weigh(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct lovsub_lock *lock = cl2lovsub_lock(slice);
+ struct lov_lock *lov;
+ unsigned long dumbbell;
+
+ LASSERT(cl_lock_is_mutexed(slice->cls_lock));
+
+ if (!list_empty(&lock->lss_parents)) {
+ /*
+ * It is not clear whether all parents have to be asked and
+ * their estimations summed, or it is enough to ask one. For
+ * the current usages, one is always enough.
+ */
+ lov = container_of(lock->lss_parents.next,
+ struct lov_lock_link, lll_list)->lll_super;
+
+ lovsub_parent_lock(env, lov);
+ dumbbell = cl_lock_weigh(env, lov->lls_cl.cls_lock);
+ lovsub_parent_unlock(env, lov);
+ } else
+ dumbbell = 0;
+
+ return dumbbell;
+}
+
+/**
+ * Maps start/end offsets within a stripe, to offsets within a file.
+ */
+static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
+ struct lov_object *lov,
+ int stripe, struct cl_lock_descr *out)
+{
+ pgoff_t size; /* stripe size in pages */
+ pgoff_t skip; /* how many pages in every stripe are occupied by
+ * "other" stripes */
+ pgoff_t start;
+ pgoff_t end;
+
+ start = in->cld_start;
+ end = in->cld_end;
+
+ if (lov->lo_lsm->lsm_stripe_count > 1) {
+ size = cl_index(lov2cl(lov), lov->lo_lsm->lsm_stripe_size);
+ skip = (lov->lo_lsm->lsm_stripe_count - 1) * size;
+
+ /* XXX overflow check here? */
+ start += start/size * skip + stripe * size;
+
+ if (end != CL_PAGE_EOF) {
+ end += end/size * skip + stripe * size;
+ /*
+ * And check for overflow...
+ */
+ if (end < in->cld_end)
+ end = CL_PAGE_EOF;
+ }
+ }
+ out->cld_start = start;
+ out->cld_end = end;
+}
+
+/**
+ * Adjusts parent lock extent when a sub-lock is attached to a parent. This is
+ * called in two ways:
+ *
+ * - as part of receive call-back, when server returns granted extent to
+ * the client, and
+ *
+ * - when top-lock finds existing sub-lock in the cache.
+ *
+ * Note, that lock mode is not propagated to the parent: i.e., if CLM_READ
+ * top-lock matches CLM_WRITE sub-lock, top-lock is still CLM_READ.
+ */
+int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
+ struct lovsub_lock *sublock,
+ const struct cl_lock_descr *d, int idx)
+{
+ struct cl_lock *parent;
+ struct lovsub_object *subobj;
+ struct cl_lock_descr *pd;
+ struct cl_lock_descr *parent_descr;
+ int result;
+
+ parent = lov->lls_cl.cls_lock;
+ parent_descr = &parent->cll_descr;
+ LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode));
+
+ subobj = cl2lovsub(sublock->lss_cl.cls_obj);
+ pd = &lov_env_info(env)->lti_ldescr;
+
+ pd->cld_obj = parent_descr->cld_obj;
+ pd->cld_mode = parent_descr->cld_mode;
+ pd->cld_gid = parent_descr->cld_gid;
+ lovsub_lock_descr_map(d, subobj->lso_super, subobj->lso_index, pd);
+ lov->lls_sub[idx].sub_got = *d;
+ /*
+ * Notify top-lock about modification, if lock description changes
+ * materially.
+ */
+ if (!cl_lock_ext_match(parent_descr, pd))
+ result = cl_lock_modify(env, parent, pd);
+ else
+ result = 0;
+ return result;
+}
+
+static int lovsub_lock_modify(const struct lu_env *env,
+ const struct cl_lock_slice *s,
+ const struct cl_lock_descr *d)
+{
+ struct lovsub_lock *lock = cl2lovsub_lock(s);
+ struct lov_lock_link *scan;
+ struct lov_lock *lov;
+ int result = 0;
+
+ LASSERT(cl_lock_mode_match(d->cld_mode,
+ s->cls_lock->cll_descr.cld_mode));
+ list_for_each_entry(scan, &lock->lss_parents, lll_list) {
+ int rc;
+
+ lov = scan->lll_super;
+ lovsub_parent_lock(env, lov);
+ rc = lov_sublock_modify(env, lov, lock, d, scan->lll_idx);
+ lovsub_parent_unlock(env, lov);
+ result = result ?: rc;
+ }
+ return result;
+}
+
+static int lovsub_lock_closure(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ struct cl_lock_closure *closure)
+{
+ struct lovsub_lock *sub;
+ struct cl_lock *parent;
+ struct lov_lock_link *scan;
+ int result;
+
+ LASSERT(cl_lock_is_mutexed(slice->cls_lock));
+
+ sub = cl2lovsub_lock(slice);
+ result = 0;
+
+ list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ parent = scan->lll_super->lls_cl.cls_lock;
+ result = cl_lock_closure_build(env, parent, closure);
+ if (result != 0)
+ break;
+ }
+ return result;
+}
+
+/**
+ * A helper function for lovsub_lock_delete() that deals with a given parent
+ * top-lock.
+ */
+static int lovsub_lock_delete_one(const struct lu_env *env,
+ struct cl_lock *child, struct lov_lock *lov)
+{
+ struct cl_lock *parent;
+ int result;
+
+ parent = lov->lls_cl.cls_lock;
+ if (parent->cll_error)
+ return 0;
+
+ result = 0;
+ switch (parent->cll_state) {
+ case CLS_ENQUEUED:
+ /* See LU-1355 for the case that a glimpse lock is
+ * interrupted by signal */
+ LASSERT(parent->cll_flags & CLF_CANCELLED);
+ break;
+ case CLS_QUEUING:
+ case CLS_FREEING:
+ cl_lock_signal(env, parent);
+ break;
+ case CLS_INTRANSIT:
+ /*
+ * Here lies a problem: a sub-lock is canceled while top-lock
+ * is being unlocked. Top-lock cannot be moved into CLS_NEW
+ * state, because unlocking has to succeed eventually by
+ * placing lock into CLS_CACHED (or failing it), see
+ * cl_unuse_try(). Nor can top-lock be left in CLS_CACHED
+ * state, because lov maintains an invariant that all
+ * sub-locks exist in CLS_CACHED (this allows cached top-lock
+ * to be reused immediately). Nor can we wait for top-lock
+ * state to change, because this can be synchronous to the
+ * current thread.
+ *
+ * We know for sure that lov_lock_unuse() will be called at
+ * least one more time to finish un-using, so leave a mark on
+ * the top-lock, that will be seen by the next call to
+ * lov_lock_unuse().
+ */
+ if (cl_lock_is_intransit(parent))
+ lov->lls_cancel_race = 1;
+ break;
+ case CLS_CACHED:
+ /*
+ * if a sub-lock is canceled move its top-lock into CLS_NEW
+ * state to preserve an invariant that a top-lock in
+ * CLS_CACHED is immediately ready for re-use (i.e., has all
+ * sub-locks), and so that next attempt to re-use the top-lock
+ * enqueues missing sub-lock.
+ */
+ cl_lock_state_set(env, parent, CLS_NEW);
+ /* fall through */
+ case CLS_NEW:
+ /*
+ * if last sub-lock is canceled, destroy the top-lock (which
+ * is now `empty') proactively.
+ */
+ if (lov->lls_nr_filled == 0) {
+ /* ... but unfortunately, this cannot be done easily,
+ * as cancellation of a top-lock might acquire mutices
+ * of its other sub-locks, violating lock ordering,
+ * see cl_lock_{cancel,delete}() preconditions.
+ *
+ * To work around this, the mutex of this sub-lock is
+ * released, top-lock is destroyed, and sub-lock mutex
+ * acquired again. The list of parents has to be
+ * re-scanned from the beginning after this.
+ *
+ * Only do this if no mutices other than on @child and
+ * @parent are held by the current thread.
+ *
+ * TODO: The lock modal here is too complex, because
+ * the lock may be canceled and deleted by voluntarily:
+ * cl_lock_request
+ * -> osc_lock_enqueue_wait
+ * -> osc_lock_cancel_wait
+ * -> cl_lock_delete
+ * -> lovsub_lock_delete
+ * -> cl_lock_cancel/delete
+ * -> ...
+ *
+ * The better choice is to spawn a kernel thread for
+ * this purpose. -jay
+ */
+ if (cl_lock_nr_mutexed(env) == 2) {
+ cl_lock_mutex_put(env, child);
+ cl_lock_cancel(env, parent);
+ cl_lock_delete(env, parent);
+ result = 1;
+ }
+ }
+ break;
+ case CLS_HELD:
+ CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n");
+ default:
+ CERROR("Impossible state: %d\n", parent->cll_state);
+ LBUG();
+ break;
+ }
+
+ return result;
+}
+
+/**
+ * An implementation of cl_lock_operations::clo_delete() method. This is
+ * invoked in "bottom-to-top" delete, when lock destruction starts from the
+ * sub-lock (e.g, as a result of ldlm lock LRU policy).
+ */
+static void lovsub_lock_delete(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct cl_lock *child = slice->cls_lock;
+ struct lovsub_lock *sub = cl2lovsub_lock(slice);
+ int restart;
+
+ LASSERT(cl_lock_is_mutexed(child));
+
+ /*
+ * Destruction of a sub-lock might take multiple iterations, because
+ * when the last sub-lock of a given top-lock is deleted, top-lock is
+ * canceled proactively, and this requires to release sub-lock
+ * mutex. Once sub-lock mutex has been released, list of its parents
+ * has to be re-scanned from the beginning.
+ */
+ do {
+ struct lov_lock *lov;
+ struct lov_lock_link *scan;
+ struct lov_lock_link *temp;
+ struct lov_lock_sub *subdata;
+
+ restart = 0;
+ list_for_each_entry_safe(scan, temp,
+ &sub->lss_parents, lll_list) {
+ lov = scan->lll_super;
+ subdata = &lov->lls_sub[scan->lll_idx];
+ lovsub_parent_lock(env, lov);
+ subdata->sub_got = subdata->sub_descr;
+ lov_lock_unlink(env, scan, sub);
+ restart = lovsub_lock_delete_one(env, child, lov);
+ lovsub_parent_unlock(env, lov);
+
+ if (restart) {
+ cl_lock_mutex_get(env, child);
+ break;
+ }
+ }
+ } while (restart);
+}
+
+static int lovsub_lock_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct cl_lock_slice *slice)
+{
+ struct lovsub_lock *sub = cl2lovsub_lock(slice);
+ struct lov_lock *lov;
+ struct lov_lock_link *scan;
+
+ list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ lov = scan->lll_super;
+ (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
+ if (lov != NULL)
+ cl_lock_descr_print(env, cookie, p,
+ &lov->lls_cl.cls_lock->cll_descr);
+ (*p)(env, cookie, "] ");
+ }
+ return 0;
+}
+
+static const struct cl_lock_operations lovsub_lock_ops = {
+ .clo_fini = lovsub_lock_fini,
+ .clo_state = lovsub_lock_state,
+ .clo_delete = lovsub_lock_delete,
+ .clo_modify = lovsub_lock_modify,
+ .clo_closure = lovsub_lock_closure,
+ .clo_weigh = lovsub_lock_weigh,
+ .clo_print = lovsub_lock_print
+};
+
+int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io)
+{
+ struct lovsub_lock *lsk;
+ int result;
+
+ OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, __GFP_IO);
+ if (lsk != NULL) {
+ INIT_LIST_HEAD(&lsk->lss_parents);
+ cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
new file mode 100644
index 0000000..89760b3
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -0,0 +1,164 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_object for LOVSUB layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include "lov_cl_internal.h"
+
+/** \addtogroup lov
+ * @{
+ */
+
+/*****************************************************************************
+ *
+ * Lovsub object operations.
+ *
+ */
+
+int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf)
+{
+ struct lovsub_device *dev = lu2lovsub_dev(obj->lo_dev);
+ struct lu_object *below;
+ struct lu_device *under;
+
+ int result;
+
+ under = &dev->acid_next->cd_lu_dev;
+ below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
+ if (below != NULL) {
+ lu_object_add(obj, below);
+ cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
+
+}
+
+static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
+{
+ struct lovsub_object *los = lu2lovsub(obj);
+ struct lov_object *lov = los->lso_super;
+
+ /* We can't assume lov was assigned here, because of the shadow
+ * object handling in lu_object_find.
+ */
+ if (lov) {
+ LASSERT(lov->lo_type == LLT_RAID0);
+ LASSERT(lov->u.raid0.lo_sub[los->lso_index] == los);
+ spin_lock(&lov->u.raid0.lo_sub_lock);
+ lov->u.raid0.lo_sub[los->lso_index] = NULL;
+ spin_unlock(&lov->u.raid0.lo_sub_lock);
+ }
+
+ lu_object_fini(obj);
+ lu_object_header_fini(&los->lso_header.coh_lu);
+ OBD_SLAB_FREE_PTR(los, lovsub_object_kmem);
+}
+
+static int lovsub_object_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *obj)
+{
+ struct lovsub_object *los = lu2lovsub(obj);
+
+ return (*p)(env, cookie, "[%d]", los->lso_index);
+}
+
+static int lovsub_attr_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid)
+{
+ struct lov_object *lov = cl2lovsub(obj)->lso_super;
+
+ lov_r0(lov)->lo_attr_valid = 0;
+ return 0;
+}
+
+static int lovsub_object_glimpse(const struct lu_env *env,
+ const struct cl_object *obj,
+ struct ost_lvb *lvb)
+{
+ struct lovsub_object *los = cl2lovsub(obj);
+
+ return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb);
+}
+
+
+
+static const struct cl_object_operations lovsub_ops = {
+ .coo_page_init = lovsub_page_init,
+ .coo_lock_init = lovsub_lock_init,
+ .coo_attr_set = lovsub_attr_set,
+ .coo_glimpse = lovsub_object_glimpse
+};
+
+static const struct lu_object_operations lovsub_lu_obj_ops = {
+ .loo_object_init = lovsub_object_init,
+ .loo_object_delete = NULL,
+ .loo_object_release = NULL,
+ .loo_object_free = lovsub_object_free,
+ .loo_object_print = lovsub_object_print,
+ .loo_object_invariant = NULL
+};
+
+struct lu_object *lovsub_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *unused,
+ struct lu_device *dev)
+{
+ struct lovsub_object *los;
+ struct lu_object *obj;
+
+ OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, __GFP_IO);
+ if (los != NULL) {
+ struct cl_object_header *hdr;
+
+ obj = lovsub2lu(los);
+ hdr = &los->lso_header;
+ cl_object_header_init(hdr);
+ lu_object_init(obj, &hdr->coh_lu, dev);
+ lu_object_add_top(&hdr->coh_lu, obj);
+ los->lso_cl.co_ops = &lovsub_ops;
+ obj->lo_ops = &lovsub_lu_obj_ops;
+ } else
+ obj = NULL;
+ return obj;
+}
+
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
new file mode 100644
index 0000000..3f00ce9
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -0,0 +1,71 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_page for LOVSUB layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOV
+
+#include "lov_cl_internal.h"
+
+/** \addtogroup lov
+ * @{
+ */
+
+/*****************************************************************************
+ *
+ * Lovsub page operations.
+ *
+ */
+
+static void lovsub_page_fini(const struct lu_env *env,
+ struct cl_page_slice *slice)
+{
+}
+
+static const struct cl_page_operations lovsub_page_ops = {
+ .cpo_fini = lovsub_page_fini
+};
+
+int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *unused)
+{
+ struct lovsub_page *lsb = cl_object_page_slice(obj, page);
+
+ cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
+ return 0;
+}
+
+/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
new file mode 100644
index 0000000..15744e1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -0,0 +1,301 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <asm/statfs.h>
+#include <lprocfs_status.h>
+#include <obd_class.h>
+#include <linux/seq_file.h>
+#include "lov_internal.h"
+
+#ifdef LPROCFS
+static int lov_stripesize_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lov_desc *desc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ return seq_printf(m, LPU64"\n", desc->ld_default_stripe_size);
+}
+
+static ssize_t lov_stripesize_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ struct lov_desc *desc;
+ __u64 val;
+ int rc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ rc = lprocfs_write_u64_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ lov_fix_desc_stripe_size(&val);
+ desc->ld_default_stripe_size = val;
+ return count;
+}
+LPROC_SEQ_FOPS(lov_stripesize);
+
+static int lov_stripeoffset_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lov_desc *desc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ return seq_printf(m, LPU64"\n", desc->ld_default_stripe_offset);
+}
+
+static ssize_t lov_stripeoffset_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ struct lov_desc *desc;
+ __u64 val;
+ int rc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ rc = lprocfs_write_u64_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ desc->ld_default_stripe_offset = val;
+ return count;
+}
+LPROC_SEQ_FOPS(lov_stripeoffset);
+
+static int lov_stripetype_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lov_desc *desc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ return seq_printf(m, "%u\n", desc->ld_pattern);
+}
+
+static ssize_t lov_stripetype_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ struct lov_desc *desc;
+ int val, rc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ lov_fix_desc_pattern(&val);
+ desc->ld_pattern = val;
+ return count;
+}
+LPROC_SEQ_FOPS(lov_stripetype);
+
+static int lov_stripecount_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lov_desc *desc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ return seq_printf(m, "%d\n",
+ (__s16)(desc->ld_default_stripe_count + 1) - 1);
+}
+
+static ssize_t lov_stripecount_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ struct lov_desc *desc;
+ int val, rc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ lov_fix_desc_stripe_count(&val);
+ desc->ld_default_stripe_count = val;
+ return count;
+}
+LPROC_SEQ_FOPS(lov_stripecount);
+
+static int lov_numobd_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lov_desc *desc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ return seq_printf(m, "%u\n", desc->ld_tgt_count);
+}
+LPROC_SEQ_FOPS_RO(lov_numobd);
+
+static int lov_activeobd_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lov_desc *desc;
+
+ LASSERT(dev != NULL);
+ desc = &dev->u.lov.desc;
+ return seq_printf(m, "%u\n", desc->ld_active_tgt_count);
+}
+LPROC_SEQ_FOPS_RO(lov_activeobd);
+
+static int lov_desc_uuid_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = (struct obd_device *)m->private;
+ struct lov_obd *lov;
+
+ LASSERT(dev != NULL);
+ lov = &dev->u.lov;
+ return seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid);
+}
+LPROC_SEQ_FOPS_RO(lov_desc_uuid);
+
+static void *lov_tgt_seq_start(struct seq_file *p, loff_t *pos)
+{
+ struct obd_device *dev = p->private;
+ struct lov_obd *lov = &dev->u.lov;
+
+ while (*pos < lov->desc.ld_tgt_count) {
+ if (lov->lov_tgts[*pos])
+ return lov->lov_tgts[*pos];
+ ++*pos;
+ }
+ return NULL;
+}
+
+static void lov_tgt_seq_stop(struct seq_file *p, void *v)
+{
+}
+
+static void *lov_tgt_seq_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ struct obd_device *dev = p->private;
+ struct lov_obd *lov = &dev->u.lov;
+
+ while (++*pos < lov->desc.ld_tgt_count) {
+ if (lov->lov_tgts[*pos])
+ return lov->lov_tgts[*pos];
+ }
+ return NULL;
+}
+
+static int lov_tgt_seq_show(struct seq_file *p, void *v)
+{
+ struct lov_tgt_desc *tgt = v;
+ return seq_printf(p, "%d: %s %sACTIVE\n", tgt->ltd_index,
+ obd_uuid2str(&tgt->ltd_uuid),
+ tgt->ltd_active ? "" : "IN");
+}
+
+struct seq_operations lov_tgt_sops = {
+ .start = lov_tgt_seq_start,
+ .stop = lov_tgt_seq_stop,
+ .next = lov_tgt_seq_next,
+ .show = lov_tgt_seq_show,
+};
+
+static int lov_target_seq_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ int rc;
+
+ rc = seq_open(file, &lov_tgt_sops);
+ if (rc)
+ return rc;
+
+ seq = file->private_data;
+ seq->private = PDE_DATA(inode);
+ return 0;
+}
+
+LPROC_SEQ_FOPS_RO_TYPE(lov, uuid);
+LPROC_SEQ_FOPS_RO_TYPE(lov, filestotal);
+LPROC_SEQ_FOPS_RO_TYPE(lov, filesfree);
+LPROC_SEQ_FOPS_RO_TYPE(lov, blksize);
+LPROC_SEQ_FOPS_RO_TYPE(lov, kbytestotal);
+LPROC_SEQ_FOPS_RO_TYPE(lov, kbytesfree);
+LPROC_SEQ_FOPS_RO_TYPE(lov, kbytesavail);
+
+struct lprocfs_vars lprocfs_lov_obd_vars[] = {
+ { "uuid", &lov_uuid_fops, 0, 0 },
+ { "stripesize", &lov_stripesize_fops, 0 },
+ { "stripeoffset", &lov_stripeoffset_fops, 0 },
+ { "stripecount", &lov_stripecount_fops, 0 },
+ { "stripetype", &lov_stripetype_fops, 0 },
+ { "numobd", &lov_numobd_fops, 0, 0 },
+ { "activeobd", &lov_activeobd_fops, 0, 0 },
+ { "filestotal", &lov_filestotal_fops, 0, 0 },
+ { "filesfree", &lov_filesfree_fops, 0, 0 },
+ /*{ "filegroups", lprocfs_rd_filegroups, 0, 0 },*/
+ { "blocksize", &lov_blksize_fops, 0, 0 },
+ { "kbytestotal", &lov_kbytestotal_fops, 0, 0 },
+ { "kbytesfree", &lov_kbytesfree_fops, 0, 0 },
+ { "kbytesavail", &lov_kbytesavail_fops, 0, 0 },
+ { "desc_uuid", &lov_desc_uuid_fops, 0, 0 },
+ { 0 }
+};
+
+LPROC_SEQ_FOPS_RO_TYPE(lov, numrefs);
+
+static struct lprocfs_vars lprocfs_lov_module_vars[] = {
+ { "num_refs", &lov_numrefs_fops, 0, 0 },
+ { 0 }
+};
+
+void lprocfs_lov_init_vars(struct lprocfs_static_vars *lvars)
+{
+ lvars->module_vars = lprocfs_lov_module_vars;
+ lvars->obd_vars = lprocfs_lov_obd_vars;
+}
+
+struct file_operations lov_proc_target_fops = {
+ .owner = THIS_MODULE,
+ .open = lov_target_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = lprocfs_seq_release,
+};
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/lvfs/Makefile b/drivers/staging/lustre/lustre/lvfs/Makefile
new file mode 100644
index 0000000..f50b1c5
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lvfs/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_LUSTRE_FS) += lvfs.o
+
+lvfs-y := lvfs_linux.o fsfilt.o lvfs_lib.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/lvfs/fsfilt.c b/drivers/staging/lustre/lustre/lvfs/fsfilt.c
new file mode 100644
index 0000000..e86df73
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lvfs/fsfilt.c
@@ -0,0 +1,137 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_FILTER
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/libcfs/libcfs.h>
+#include <lustre_fsfilt.h>
+
+LIST_HEAD(fsfilt_types);
+
+static struct fsfilt_operations *fsfilt_search_type(const char *type)
+{
+ struct fsfilt_operations *found;
+ struct list_head *p;
+
+ list_for_each(p, &fsfilt_types) {
+ found = list_entry(p, struct fsfilt_operations, fs_list);
+ if (!strcmp(found->fs_type, type)) {
+ return found;
+ }
+ }
+ return NULL;
+}
+
+int fsfilt_register_ops(struct fsfilt_operations *fs_ops)
+{
+ struct fsfilt_operations *found;
+
+ /* lock fsfilt_types list */
+ if ((found = fsfilt_search_type(fs_ops->fs_type))) {
+ if (found != fs_ops) {
+ CERROR("different operations for type %s\n",
+ fs_ops->fs_type);
+ /* unlock fsfilt_types list */
+ return -EEXIST;
+ }
+ } else {
+ try_module_get(THIS_MODULE);
+ list_add(&fs_ops->fs_list, &fsfilt_types);
+ }
+
+ /* unlock fsfilt_types list */
+ return 0;
+}
+EXPORT_SYMBOL(fsfilt_register_ops);
+
+void fsfilt_unregister_ops(struct fsfilt_operations *fs_ops)
+{
+ struct list_head *p;
+
+ /* lock fsfilt_types list */
+ list_for_each(p, &fsfilt_types) {
+ struct fsfilt_operations *found;
+
+ found = list_entry(p, typeof(*found), fs_list);
+ if (found == fs_ops) {
+ list_del(p);
+ module_put(THIS_MODULE);
+ break;
+ }
+ }
+ /* unlock fsfilt_types list */
+}
+EXPORT_SYMBOL(fsfilt_unregister_ops);
+
+struct fsfilt_operations *fsfilt_get_ops(const char *type)
+{
+ struct fsfilt_operations *fs_ops;
+
+ /* lock fsfilt_types list */
+ if (!(fs_ops = fsfilt_search_type(type))) {
+ char name[32];
+ int rc;
+
+ snprintf(name, sizeof(name) - 1, "fsfilt_%s", type);
+ name[sizeof(name) - 1] = '\0';
+
+ if (!(rc = request_module("%s", name))) {
+ fs_ops = fsfilt_search_type(type);
+ CDEBUG(D_INFO, "Loaded module '%s'\n", name);
+ if (!fs_ops)
+ rc = -ENOENT;
+ }
+
+ if (rc) {
+ CERROR("Can't find %s interface\n", name);
+ return ERR_PTR(rc < 0 ? rc : -rc);
+ /* unlock fsfilt_types list */
+ }
+ }
+ try_module_get(fs_ops->fs_owner);
+ /* unlock fsfilt_types list */
+
+ return fs_ops;
+}
+EXPORT_SYMBOL(fsfilt_get_ops);
+
+void fsfilt_put_ops(struct fsfilt_operations *fs_ops)
+{
+ module_put(fs_ops->fs_owner);
+}
+EXPORT_SYMBOL(fsfilt_put_ops);
diff --git a/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c b/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c
new file mode 100644
index 0000000..ee75994
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c
@@ -0,0 +1,760 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lvfs/fsfilt_ext3.c
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_FILTER
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <ldiskfs/ldiskfs_config.h>
+#include <ext4/ext4.h>
+#include <ext4/ext4_jbd2.h>
+#include <linux/bitops.h>
+#include <linux/quota.h>
+
+#include <linux/libcfs/libcfs.h>
+#include <lustre_fsfilt.h>
+#include <obd.h>
+#include <linux/lustre_compat25.h>
+#include <linux/lprocfs_status.h>
+
+#include <ext4/ext4_extents.h>
+
+#ifdef HAVE_EXT_PBLOCK /* Name changed to ext4_ext_pblock for kernel 2.6.35 */
+#define ext3_ext_pblock(ex) ext_pblock((ex))
+#endif
+
+/* for kernels 2.6.18 and later */
+#define FSFILT_SINGLEDATA_TRANS_BLOCKS(sb) EXT3_SINGLEDATA_TRANS_BLOCKS(sb)
+
+#define fsfilt_ext3_ext_insert_extent(handle, inode, path, newext, flag) \
+ ext3_ext_insert_extent(handle, inode, path, newext, flag)
+
+#define ext3_mb_discard_inode_preallocations(inode) \
+ ext3_discard_preallocations(inode)
+
+#define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid)
+#define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid)
+
+static struct kmem_cache *fcb_cache;
+
+struct fsfilt_cb_data {
+ struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */
+ fsfilt_cb_t cb_func; /* MDS/OBD completion function */
+ struct obd_device *cb_obd; /* MDS/OBD completion device */
+ __u64 cb_last_rcvd; /* MDS/OST last committed operation */
+ void *cb_data; /* MDS/OST completion function data */
+};
+
+static char *fsfilt_ext3_get_label(struct super_block *sb)
+{
+ return EXT3_SB(sb)->s_es->s_volume_name;
+}
+
+/* kernel has ext4_blocks_for_truncate since linux-3.1.1 */
+# include <ext4/truncate.h>
+
+/*
+ * We don't currently need any additional blocks for rmdir and
+ * unlink transactions because we are storing the OST oa_id inside
+ * the inode (which we will be changing anyways as part of this
+ * transaction).
+ */
+static void *fsfilt_ext3_start(struct inode *inode, int op, void *desc_private,
+ int logs)
+{
+ /* For updates to the last received file */
+ int nblocks = FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb);
+ journal_t *journal;
+ void *handle;
+
+ if (current->journal_info) {
+ CDEBUG(D_INODE, "increasing refcount on %p\n",
+ current->journal_info);
+ goto journal_start;
+ }
+
+ switch(op) {
+ case FSFILT_OP_UNLINK:
+ /* delete one file + create/update logs for each stripe */
+ nblocks += EXT3_DELETE_TRANS_BLOCKS(inode->i_sb);
+ nblocks += (EXT3_INDEX_EXTRA_TRANS_BLOCKS +
+ FSFILT_SINGLEDATA_TRANS_BLOCKS(inode->i_sb)) * logs;
+ break;
+ case FSFILT_OP_CANCEL_UNLINK:
+ LASSERT(logs == 1);
+
+ /* blocks for log header bitmap update OR
+ * blocks for catalog header bitmap update + unlink of logs +
+ * blocks for delete the inode (include blocks truncating). */
+ nblocks = (LLOG_CHUNK_SIZE >> inode->i_blkbits) +
+ EXT3_DELETE_TRANS_BLOCKS(inode->i_sb) +
+ ext4_blocks_for_truncate(inode) + 3;
+ break;
+ default: CERROR("unknown transaction start op %d\n", op);
+ LBUG();
+ }
+
+ LASSERT(current->journal_info == desc_private);
+ journal = EXT3_SB(inode->i_sb)->s_journal;
+ if (nblocks > journal->j_max_transaction_buffers) {
+ CWARN("too many credits %d for op %ux%u using %d instead\n",
+ nblocks, op, logs, journal->j_max_transaction_buffers);
+ nblocks = journal->j_max_transaction_buffers;
+ }
+
+ journal_start:
+ LASSERTF(nblocks > 0, "can't start %d credit transaction\n", nblocks);
+ handle = ext3_journal_start(inode, nblocks);
+
+ if (!IS_ERR(handle))
+ LASSERT(current->journal_info == handle);
+ else
+ CERROR("error starting handle for op %u (%u credits): rc %ld\n",
+ op, nblocks, PTR_ERR(handle));
+ return handle;
+}
+
+static int fsfilt_ext3_commit(struct inode *inode, void *h, int force_sync)
+{
+ int rc;
+ handle_t *handle = h;
+
+ LASSERT(current->journal_info == handle);
+ if (force_sync)
+ handle->h_sync = 1; /* recovery likes this */
+
+ rc = ext3_journal_stop(handle);
+
+ return rc;
+}
+
+#ifndef EXT3_EXTENTS_FL
+#define EXT3_EXTENTS_FL 0x00080000 /* Inode uses extents */
+#endif
+
+#ifndef EXT_ASSERT
+#define EXT_ASSERT(cond) BUG_ON(!(cond))
+#endif
+
+#define EXT_GENERATION(inode) (EXT4_I(inode)->i_ext_generation)
+#define ext3_ext_base inode
+#define ext3_ext_base2inode(inode) (inode)
+#define EXT_DEPTH(inode) ext_depth(inode)
+#define fsfilt_ext3_ext_walk_space(inode, block, num, cb, cbdata) \
+ ext3_ext_walk_space(inode, block, num, cb, cbdata);
+
+struct bpointers {
+ unsigned long *blocks;
+ unsigned long start;
+ int num;
+ int init_num;
+ int create;
+};
+
+static long ext3_ext_find_goal(struct inode *inode, struct ext3_ext_path *path,
+ unsigned long block, int *aflags)
+{
+ struct ext3_inode_info *ei = EXT3_I(inode);
+ unsigned long bg_start;
+ unsigned long colour;
+ int depth;
+
+ if (path) {
+ struct ext3_extent *ex;
+ depth = path->p_depth;
+
+ /* try to predict block placement */
+ if ((ex = path[depth].p_ext))
+ return ext4_ext_pblock(ex) + (block - le32_to_cpu(ex->ee_block));
+
+ /* it looks index is empty
+ * try to find starting from index itself */
+ if (path[depth].p_bh)
+ return path[depth].p_bh->b_blocknr;
+ }
+
+ /* OK. use inode's group */
+ bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
+ le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
+ colour = (current->pid % 16) *
+ (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+ return bg_start + colour + block;
+}
+
+#define ll_unmap_underlying_metadata(sb, blocknr) \
+ unmap_underlying_metadata((sb)->s_bdev, blocknr)
+
+#ifndef EXT3_MB_HINT_GROUP_ALLOC
+static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
+ struct ext3_ext_path *path, unsigned long block,
+ unsigned long *count, int *err)
+{
+ unsigned long pblock, goal;
+ int aflags = 0;
+ struct inode *inode = ext3_ext_base2inode(base);
+
+ goal = ext3_ext_find_goal(inode, path, block, &aflags);
+ aflags |= 2; /* block have been already reserved */
+ pblock = ext3_mb_new_blocks(handle, inode, goal, count, aflags, err);
+ return pblock;
+
+}
+#else
+static unsigned long new_blocks(handle_t *handle, struct ext3_ext_base *base,
+ struct ext3_ext_path *path, unsigned long block,
+ unsigned long *count, int *err)
+{
+ struct inode *inode = ext3_ext_base2inode(base);
+ struct ext3_allocation_request ar;
+ unsigned long pblock;
+ int aflags;
+
+ /* find neighbour allocated blocks */
+ ar.lleft = block;
+ *err = ext3_ext_search_left(base, path, &ar.lleft, &ar.pleft);
+ if (*err)
+ return 0;
+ ar.lright = block;
+ *err = ext3_ext_search_right(base, path, &ar.lright, &ar.pright);
+ if (*err)
+ return 0;
+
+ /* allocate new block */
+ ar.goal = ext3_ext_find_goal(inode, path, block, &aflags);
+ ar.inode = inode;
+ ar.logical = block;
+ ar.len = *count;
+ ar.flags = EXT3_MB_HINT_DATA;
+ pblock = ext3_mb_new_blocks(handle, &ar, err);
+ *count = ar.len;
+ return pblock;
+}
+#endif
+
+static int ext3_ext_new_extent_cb(struct ext3_ext_base *base,
+ struct ext3_ext_path *path,
+ struct ext3_ext_cache *cex,
+#ifdef HAVE_EXT_PREPARE_CB_EXTENT
+ struct ext3_extent *ex,
+#endif
+ void *cbdata)
+{
+ struct bpointers *bp = cbdata;
+ struct inode *inode = ext3_ext_base2inode(base);
+ struct ext3_extent nex;
+ unsigned long pblock;
+ unsigned long tgen;
+ int err, i;
+ unsigned long count;
+ handle_t *handle;
+
+#ifdef EXT3_EXT_CACHE_EXTENT
+ if (cex->ec_type == EXT3_EXT_CACHE_EXTENT)
+#else
+ if ((cex->ec_len != 0) && (cex->ec_start != 0))
+#endif
+ {
+ err = EXT_CONTINUE;
+ goto map;
+ }
+
+ if (bp->create == 0) {
+ i = 0;
+ if (cex->ec_block < bp->start)
+ i = bp->start - cex->ec_block;
+ if (i >= cex->ec_len)
+ CERROR("nothing to do?! i = %d, e_num = %u\n",
+ i, cex->ec_len);
+ for (; i < cex->ec_len && bp->num; i++) {
+ *(bp->blocks) = 0;
+ bp->blocks++;
+ bp->num--;
+ bp->start++;
+ }
+
+ return EXT_CONTINUE;
+ }
+
+ tgen = EXT_GENERATION(base);
+ count = ext3_ext_calc_credits_for_insert(base, path);
+
+ handle = ext3_journal_start(inode, count+EXT3_ALLOC_NEEDED+1);
+ if (IS_ERR(handle)) {
+ return PTR_ERR(handle);
+ }
+
+ if (tgen != EXT_GENERATION(base)) {
+ /* the tree has changed. so path can be invalid at moment */
+ ext3_journal_stop(handle);
+ return EXT_REPEAT;
+ }
+
+ /* In 2.6.32 kernel, ext4_ext_walk_space()'s callback func is not
+ * protected by i_data_sem as whole. so we patch it to store
+ * generation to path and now verify the tree hasn't changed */
+ down_write((&EXT4_I(inode)->i_data_sem));
+
+ /* validate extent, make sure the extent tree does not changed */
+ if (EXT_GENERATION(base) != path[0].p_generation) {
+ /* cex is invalid, try again */
+ up_write(&EXT4_I(inode)->i_data_sem);
+ ext3_journal_stop(handle);
+ return EXT_REPEAT;
+ }
+
+ count = cex->ec_len;
+ pblock = new_blocks(handle, base, path, cex->ec_block, &count, &err);
+ if (!pblock)
+ goto out;
+ EXT_ASSERT(count <= cex->ec_len);
+
+ /* insert new extent */
+ nex.ee_block = cpu_to_le32(cex->ec_block);
+ ext3_ext_store_pblock(&nex, pblock);
+ nex.ee_len = cpu_to_le16(count);
+ err = fsfilt_ext3_ext_insert_extent(handle, base, path, &nex, 0);
+ if (err) {
+ /* free data blocks we just allocated */
+ /* not a good idea to call discard here directly,
+ * but otherwise we'd need to call it every free() */
+#ifdef EXT3_MB_HINT_GROUP_ALLOC
+ ext3_mb_discard_inode_preallocations(inode);
+#endif
+#ifdef HAVE_EXT_FREE_BLOCK_WITH_BUFFER_HEAD /* Introduced in 2.6.32-rc7 */
+ ext3_free_blocks(handle, inode, NULL, ext4_ext_pblock(&nex),
+ cpu_to_le16(nex.ee_len), 0);
+#else
+ ext3_free_blocks(handle, inode, ext4_ext_pblock(&nex),
+ cpu_to_le16(nex.ee_len), 0);
+#endif
+ goto out;
+ }
+
+ /*
+ * Putting len of the actual extent we just inserted,
+ * we are asking ext3_ext_walk_space() to continue
+ * scaning after that block
+ */
+ cex->ec_len = le16_to_cpu(nex.ee_len);
+ cex->ec_start = ext4_ext_pblock(&nex);
+ BUG_ON(le16_to_cpu(nex.ee_len) == 0);
+ BUG_ON(le32_to_cpu(nex.ee_block) != cex->ec_block);
+
+out:
+ up_write((&EXT4_I(inode)->i_data_sem));
+ ext3_journal_stop(handle);
+map:
+ if (err >= 0) {
+ /* map blocks */
+ if (bp->num == 0) {
+ CERROR("hmm. why do we find this extent?\n");
+ CERROR("initial space: %lu:%u\n",
+ bp->start, bp->init_num);
+#ifdef EXT3_EXT_CACHE_EXTENT
+ CERROR("current extent: %u/%u/%llu %d\n",
+ cex->ec_block, cex->ec_len,
+ (unsigned long long)cex->ec_start,
+ cex->ec_type);
+#else
+ CERROR("current extent: %u/%u/%llu\n",
+ cex->ec_block, cex->ec_len,
+ (unsigned long long)cex->ec_start);
+#endif
+ }
+ i = 0;
+ if (cex->ec_block < bp->start)
+ i = bp->start - cex->ec_block;
+ if (i >= cex->ec_len)
+ CERROR("nothing to do?! i = %d, e_num = %u\n",
+ i, cex->ec_len);
+ for (; i < cex->ec_len && bp->num; i++) {
+ *(bp->blocks) = cex->ec_start + i;
+#ifdef EXT3_EXT_CACHE_EXTENT
+ if (cex->ec_type != EXT3_EXT_CACHE_EXTENT)
+#else
+ if ((cex->ec_len == 0) || (cex->ec_start == 0))
+#endif
+ {
+ /* unmap any possible underlying metadata from
+ * the block device mapping. bug 6998. */
+ ll_unmap_underlying_metadata(inode->i_sb,
+ *(bp->blocks));
+ }
+ bp->blocks++;
+ bp->num--;
+ bp->start++;
+ }
+ }
+ return err;
+}
+
+int fsfilt_map_nblocks(struct inode *inode, unsigned long block,
+ unsigned long num, unsigned long *blocks,
+ int create)
+{
+ struct ext3_ext_base *base = inode;
+ struct bpointers bp;
+ int err;
+
+ CDEBUG(D_OTHER, "blocks %lu-%lu requested for inode %u\n",
+ block, block + num - 1, (unsigned) inode->i_ino);
+
+ bp.blocks = blocks;
+ bp.start = block;
+ bp.init_num = bp.num = num;
+ bp.create = create;
+
+ err = fsfilt_ext3_ext_walk_space(base, block, num,
+ ext3_ext_new_extent_cb, &bp);
+ ext3_ext_invalidate_cache(base);
+
+ return err;
+}
+
+int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page,
+ int pages, unsigned long *blocks,
+ int create)
+{
+ int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ int rc = 0, i = 0;
+ struct page *fp = NULL;
+ int clen = 0;
+
+ CDEBUG(D_OTHER, "inode %lu: map %d pages from %lu\n",
+ inode->i_ino, pages, (*page)->index);
+
+ /* pages are sorted already. so, we just have to find
+ * contig. space and process them properly */
+ while (i < pages) {
+ if (fp == NULL) {
+ /* start new extent */
+ fp = *page++;
+ clen = 1;
+ i++;
+ continue;
+ } else if (fp->index + clen == (*page)->index) {
+ /* continue the extent */
+ page++;
+ clen++;
+ i++;
+ continue;
+ }
+
+ /* process found extent */
+ rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
+ clen * blocks_per_page, blocks,
+ create);
+ if (rc)
+ GOTO(cleanup, rc);
+
+ /* look for next extent */
+ fp = NULL;
+ blocks += blocks_per_page * clen;
+ }
+
+ if (fp)
+ rc = fsfilt_map_nblocks(inode, fp->index * blocks_per_page,
+ clen * blocks_per_page, blocks,
+ create);
+cleanup:
+ return rc;
+}
+
+int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
+ int pages, unsigned long *blocks,
+ int create)
+{
+ int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
+ unsigned long *b;
+ int rc = 0, i;
+
+ for (i = 0, b = blocks; i < pages; i++, page++) {
+ rc = ext3_map_inode_page(inode, *page, b, create);
+ if (rc) {
+ CERROR("ino %lu, blk %lu create %d: rc %d\n",
+ inode->i_ino, *b, create, rc);
+ break;
+ }
+
+ b += blocks_per_page;
+ }
+ return rc;
+}
+
+int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
+ int pages, unsigned long *blocks,
+ int create, struct mutex *optional_mutex)
+{
+ int rc;
+
+ if (EXT3_I(inode)->i_flags & EXT3_EXTENTS_FL) {
+ rc = fsfilt_ext3_map_ext_inode_pages(inode, page, pages,
+ blocks, create);
+ return rc;
+ }
+ if (optional_mutex != NULL)
+ mutex_lock(optional_mutex);
+ rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks, create);
+ if (optional_mutex != NULL)
+ mutex_unlock(optional_mutex);
+
+ return rc;
+}
+
+int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
+{
+ unsigned long block;
+ struct buffer_head *bh;
+ int err, blocksize, csize, boffs, osize = size;
+
+ /* prevent reading after eof */
+ spin_lock(&inode->i_lock);
+ if (i_size_read(inode) < *offs + size) {
+ size = i_size_read(inode) - *offs;
+ spin_unlock(&inode->i_lock);
+ if (size < 0) {
+ CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
+ i_size_read(inode), *offs);
+ return -EBADR;
+ } else if (size == 0) {
+ return 0;
+ }
+ } else {
+ spin_unlock(&inode->i_lock);
+ }
+
+ blocksize = 1 << inode->i_blkbits;
+
+ while (size > 0) {
+ block = *offs >> inode->i_blkbits;
+ boffs = *offs & (blocksize - 1);
+ csize = min(blocksize - boffs, size);
+ bh = ext3_bread(NULL, inode, block, 0, &err);
+ if (!bh) {
+ CERROR("can't read block: %d\n", err);
+ return err;
+ }
+
+ memcpy(buf, bh->b_data + boffs, csize);
+ brelse(bh);
+
+ *offs += csize;
+ buf += csize;
+ size -= csize;
+ }
+ return osize;
+}
+EXPORT_SYMBOL(fsfilt_ext3_read);
+
+static int fsfilt_ext3_read_record(struct file * file, void *buf,
+ int size, loff_t *offs)
+{
+ int rc;
+ rc = fsfilt_ext3_read(file->f_dentry->d_inode, buf, size, offs);
+ if (rc > 0)
+ rc = 0;
+ return rc;
+}
+
+int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
+ loff_t *offs, handle_t *handle)
+{
+ struct buffer_head *bh = NULL;
+ loff_t old_size = i_size_read(inode), offset = *offs;
+ loff_t new_size = i_size_read(inode);
+ unsigned long block;
+ int err = 0, blocksize = 1 << inode->i_blkbits, size, boffs;
+
+ while (bufsize > 0) {
+ if (bh != NULL)
+ brelse(bh);
+
+ block = offset >> inode->i_blkbits;
+ boffs = offset & (blocksize - 1);
+ size = min(blocksize - boffs, bufsize);
+ bh = ext3_bread(handle, inode, block, 1, &err);
+ if (!bh) {
+ CERROR("can't read/create block: %d\n", err);
+ break;
+ }
+
+ err = ext3_journal_get_write_access(handle, bh);
+ if (err) {
+ CERROR("journal_get_write_access() returned error %d\n",
+ err);
+ break;
+ }
+ LASSERT(bh->b_data + boffs + size <= bh->b_data + bh->b_size);
+ memcpy(bh->b_data + boffs, buf, size);
+ err = ext3_journal_dirty_metadata(handle, bh);
+ if (err) {
+ CERROR("journal_dirty_metadata() returned error %d\n",
+ err);
+ break;
+ }
+ if (offset + size > new_size)
+ new_size = offset + size;
+ offset += size;
+ bufsize -= size;
+ buf += size;
+ }
+ if (bh)
+ brelse(bh);
+
+ /* correct in-core and on-disk sizes */
+ if (new_size > i_size_read(inode)) {
+ spin_lock(&inode->i_lock);
+ if (new_size > i_size_read(inode))
+ i_size_write(inode, new_size);
+ if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
+ EXT3_I(inode)->i_disksize = i_size_read(inode);
+ if (i_size_read(inode) > old_size) {
+ spin_unlock(&inode->i_lock);
+ mark_inode_dirty(inode);
+ } else {
+ spin_unlock(&inode->i_lock);
+ }
+ }
+
+ if (err == 0)
+ *offs = offset;
+ return err;
+}
+EXPORT_SYMBOL(fsfilt_ext3_write_handle);
+
+static int fsfilt_ext3_write_record(struct file *file, void *buf, int bufsize,
+ loff_t *offs, int force_sync)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ handle_t *handle;
+ int err, block_count = 0, blocksize;
+
+ /* Determine how many transaction credits are needed */
+ blocksize = 1 << inode->i_blkbits;
+ block_count = (*offs & (blocksize - 1)) + bufsize;
+ block_count = (block_count + blocksize - 1) >> inode->i_blkbits;
+
+ handle = ext3_journal_start(inode,
+ block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2);
+ if (IS_ERR(handle)) {
+ CERROR("can't start transaction for %d blocks (%d bytes)\n",
+ block_count * EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + 2,
+ bufsize);
+ return PTR_ERR(handle);
+ }
+
+ err = fsfilt_ext3_write_handle(inode, buf, bufsize, offs, handle);
+
+ if (!err && force_sync)
+ handle->h_sync = 1; /* recovery likes this */
+
+ ext3_journal_stop(handle);
+
+ return err;
+}
+
+static int fsfilt_ext3_setup(struct super_block *sb)
+{
+ if (!EXT3_HAS_COMPAT_FEATURE(sb,
+ EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
+ CERROR("ext3 mounted without journal\n");
+ return -EINVAL;
+ }
+
+#ifdef S_PDIROPS
+ CWARN("Enabling PDIROPS\n");
+ set_opt(EXT3_SB(sb)->s_mount_opt, PDIROPS);
+ sb->s_flags |= S_PDIROPS;
+#endif
+ if (!EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
+ CWARN("filesystem doesn't have dir_index feature enabled\n");
+ return 0;
+}
+static struct fsfilt_operations fsfilt_ext3_ops = {
+ .fs_type = "ext3",
+ .fs_owner = THIS_MODULE,
+ .fs_getlabel = fsfilt_ext3_get_label,
+ .fs_start = fsfilt_ext3_start,
+ .fs_commit = fsfilt_ext3_commit,
+ .fs_map_inode_pages = fsfilt_ext3_map_inode_pages,
+ .fs_write_record = fsfilt_ext3_write_record,
+ .fs_read_record = fsfilt_ext3_read_record,
+ .fs_setup = fsfilt_ext3_setup,
+};
+
+static int __init fsfilt_ext3_init(void)
+{
+ int rc;
+
+ fcb_cache = kmem_cache_create("fsfilt_ext3_fcb",
+ sizeof(struct fsfilt_cb_data), 0, 0);
+ if (!fcb_cache) {
+ CERROR("error allocating fsfilt journal callback cache\n");
+ GOTO(out, rc = -ENOMEM);
+ }
+
+ rc = fsfilt_register_ops(&fsfilt_ext3_ops);
+
+ if (rc) {
+ int err = kmem_cache_destroy(fcb_cache);
+ LASSERTF(err == 0, "error destroying new cache: rc %d\n", err);
+ }
+out:
+ return rc;
+}
+
+static void __exit fsfilt_ext3_exit(void)
+{
+ int rc;
+
+ fsfilt_unregister_ops(&fsfilt_ext3_ops);
+ rc = kmem_cache_destroy(fcb_cache);
+ LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n");
+}
+
+module_init(fsfilt_ext3_init);
+module_exit(fsfilt_ext3_exit);
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre ext3 Filesystem Helper v0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c b/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
new file mode 100644
index 0000000..97a8be2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_lib.c
@@ -0,0 +1,173 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lvfs/lvfs_lib.c
+ *
+ * Lustre filesystem abstraction routines
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+#include <linux/module.h>
+#include <lustre_lib.h>
+#include <lprocfs_status.h>
+
+#ifdef LPROCFS
+void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount)
+{
+ struct lprocfs_counter *percpu_cntr;
+ struct lprocfs_counter_header *header;
+ int smp_id;
+ unsigned long flags = 0;
+
+ if (stats == NULL)
+ return;
+
+ /* With per-client stats, statistics are allocated only for
+ * single CPU area, so the smp_id should be 0 always. */
+ smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
+ if (smp_id < 0)
+ return;
+
+ header = &stats->ls_cnt_header[idx];
+ percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx);
+ percpu_cntr->lc_count++;
+
+ if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
+ /*
+ * lprocfs_counter_add() can be called in interrupt context,
+ * as memory allocation could trigger memory shrinker call
+ * ldlm_pool_shrink(), which calls lprocfs_counter_add().
+ * LU-1727.
+ *
+ * Only obd_memory uses LPROCFS_STATS_FLAG_IRQ_SAFE
+ * flag, because it needs accurate counting lest memory leak
+ * check reports error.
+ */
+ if (in_interrupt() &&
+ (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ percpu_cntr->lc_sum_irq += amount;
+ else
+ percpu_cntr->lc_sum += amount;
+
+ if (header->lc_config & LPROCFS_CNTR_STDDEV)
+ percpu_cntr->lc_sumsquare += (__s64)amount * amount;
+ if (amount < percpu_cntr->lc_min)
+ percpu_cntr->lc_min = amount;
+ if (amount > percpu_cntr->lc_max)
+ percpu_cntr->lc_max = amount;
+ }
+ lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
+}
+EXPORT_SYMBOL(lprocfs_counter_add);
+
+void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount)
+{
+ struct lprocfs_counter *percpu_cntr;
+ struct lprocfs_counter_header *header;
+ int smp_id;
+ unsigned long flags = 0;
+
+ if (stats == NULL)
+ return;
+
+ /* With per-client stats, statistics are allocated only for
+ * single CPU area, so the smp_id should be 0 always. */
+ smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
+ if (smp_id < 0)
+ return;
+
+ header = &stats->ls_cnt_header[idx];
+ percpu_cntr = lprocfs_stats_counter_get(stats, smp_id, idx);
+ if (header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
+ /*
+ * Sometimes we use RCU callbacks to free memory which calls
+ * lprocfs_counter_sub(), and RCU callbacks may execute in
+ * softirq context - right now that's the only case we're in
+ * softirq context here, use separate counter for that.
+ * bz20650.
+ *
+ * Only obd_memory uses LPROCFS_STATS_FLAG_IRQ_SAFE
+ * flag, because it needs accurate counting lest memory leak
+ * check reports error.
+ */
+ if (in_interrupt() &&
+ (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ percpu_cntr->lc_sum_irq -= amount;
+ else
+ percpu_cntr->lc_sum -= amount;
+ }
+ lprocfs_stats_unlock(stats, LPROCFS_GET_SMP_ID, &flags);
+}
+EXPORT_SYMBOL(lprocfs_counter_sub);
+
+int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
+{
+ struct lprocfs_counter *cntr;
+ unsigned int percpusize;
+ int rc = -ENOMEM;
+ unsigned long flags = 0;
+ int i;
+
+ LASSERT(stats->ls_percpu[cpuid] == NULL);
+ LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
+
+ percpusize = lprocfs_stats_counter_size(stats);
+ LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize);
+ if (stats->ls_percpu[cpuid] != NULL) {
+ rc = 0;
+ if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_lock_irqsave(&stats->ls_lock, flags);
+ else
+ spin_lock(&stats->ls_lock);
+ if (stats->ls_biggest_alloc_num <= cpuid)
+ stats->ls_biggest_alloc_num = cpuid + 1;
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
+ spin_unlock_irqrestore(&stats->ls_lock, flags);
+ } else {
+ spin_unlock(&stats->ls_lock);
+ }
+ }
+ /* initialize the ls_percpu[cpuid] non-zero counter */
+ for (i = 0; i < stats->ls_num; ++i) {
+ cntr = lprocfs_stats_counter_get(stats, cpuid, i);
+ cntr->lc_min = LC_MIN_INIT;
+ }
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_stats_alloc_one);
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
new file mode 100644
index 0000000..18e1b47
--- /dev/null
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
@@ -0,0 +1,290 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lvfs/lvfs_linux.c
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_FILTER
+
+#include <linux/fs.h>
+#include <asm/unistd.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/libcfs/libcfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/lustre_compat25.h>
+#include <lvfs.h>
+
+#include <obd.h>
+#include <lustre_lib.h>
+
+struct lprocfs_stats *obd_memory = NULL;
+EXPORT_SYMBOL(obd_memory);
+/* refine later and change to seqlock or simlar from libcfs */
+
+/* Debugging check only needed during development */
+#ifdef OBD_CTXT_DEBUG
+# define ASSERT_CTXT_MAGIC(magic) LASSERT((magic) == OBD_RUN_CTXT_MAGIC)
+# define ASSERT_NOT_KERNEL_CTXT(msg) LASSERTF(!segment_eq(get_fs(), get_ds()),\
+ msg)
+# define ASSERT_KERNEL_CTXT(msg) LASSERTF(segment_eq(get_fs(), get_ds()), msg)
+#else
+# define ASSERT_CTXT_MAGIC(magic) do {} while(0)
+# define ASSERT_NOT_KERNEL_CTXT(msg) do {} while(0)
+# define ASSERT_KERNEL_CTXT(msg) do {} while(0)
+#endif
+
+static void push_group_info(struct lvfs_run_ctxt *save,
+ struct group_info *ginfo)
+{
+ if (!ginfo) {
+ save->ngroups = current_ngroups;
+ current_ngroups = 0;
+ } else {
+ struct cred *cred;
+ task_lock(current);
+ save->group_info = current_cred()->group_info;
+ if ((cred = prepare_creds())) {
+ cred->group_info = ginfo;
+ commit_creds(cred);
+ }
+ task_unlock(current);
+ }
+}
+
+static void pop_group_info(struct lvfs_run_ctxt *save,
+ struct group_info *ginfo)
+{
+ if (!ginfo) {
+ current_ngroups = save->ngroups;
+ } else {
+ struct cred *cred;
+ task_lock(current);
+ if ((cred = prepare_creds())) {
+ cred->group_info = save->group_info;
+ commit_creds(cred);
+ }
+ task_unlock(current);
+ }
+}
+
+/* push / pop to root of obd store */
+void push_ctxt(struct lvfs_run_ctxt *save, struct lvfs_run_ctxt *new_ctx,
+ struct lvfs_ucred *uc)
+{
+ /* if there is underlaying dt_device then push_ctxt is not needed */
+ if (new_ctx->dt != NULL)
+ return;
+
+ //ASSERT_NOT_KERNEL_CTXT("already in kernel context!\n");
+ ASSERT_CTXT_MAGIC(new_ctx->magic);
+ OBD_SET_CTXT_MAGIC(save);
+
+ save->fs = get_fs();
+ LASSERT(d_count(cfs_fs_pwd(current->fs)));
+ LASSERT(d_count(new_ctx->pwd));
+ save->pwd = dget(cfs_fs_pwd(current->fs));
+ save->pwdmnt = mntget(cfs_fs_mnt(current->fs));
+ save->luc.luc_umask = current_umask();
+ save->ngroups = current_cred()->group_info->ngroups;
+
+ LASSERT(save->pwd);
+ LASSERT(save->pwdmnt);
+ LASSERT(new_ctx->pwd);
+ LASSERT(new_ctx->pwdmnt);
+
+ if (uc) {
+ struct cred *cred;
+ save->luc.luc_uid = current_uid();
+ save->luc.luc_gid = current_gid();
+ save->luc.luc_fsuid = current_fsuid();
+ save->luc.luc_fsgid = current_fsgid();
+ save->luc.luc_cap = current_cap();
+
+ if ((cred = prepare_creds())) {
+ cred->uid = uc->luc_uid;
+ cred->gid = uc->luc_gid;
+ cred->fsuid = uc->luc_fsuid;
+ cred->fsgid = uc->luc_fsgid;
+ cred->cap_effective = uc->luc_cap;
+ commit_creds(cred);
+ }
+
+ push_group_info(save,
+ uc->luc_ginfo ?:
+ uc->luc_identity ? uc->luc_identity->mi_ginfo :
+ NULL);
+ }
+ current->fs->umask = 0; /* umask already applied on client */
+ set_fs(new_ctx->fs);
+ ll_set_fs_pwd(current->fs, new_ctx->pwdmnt, new_ctx->pwd);
+}
+EXPORT_SYMBOL(push_ctxt);
+
+void pop_ctxt(struct lvfs_run_ctxt *saved, struct lvfs_run_ctxt *new_ctx,
+ struct lvfs_ucred *uc)
+{
+ /* if there is underlaying dt_device then pop_ctxt is not needed */
+ if (new_ctx->dt != NULL)
+ return;
+
+ ASSERT_CTXT_MAGIC(saved->magic);
+ ASSERT_KERNEL_CTXT("popping non-kernel context!\n");
+
+ LASSERTF(cfs_fs_pwd(current->fs) == new_ctx->pwd, "%p != %p\n",
+ cfs_fs_pwd(current->fs), new_ctx->pwd);
+ LASSERTF(cfs_fs_mnt(current->fs) == new_ctx->pwdmnt, "%p != %p\n",
+ cfs_fs_mnt(current->fs), new_ctx->pwdmnt);
+
+ set_fs(saved->fs);
+ ll_set_fs_pwd(current->fs, saved->pwdmnt, saved->pwd);
+
+ dput(saved->pwd);
+ mntput(saved->pwdmnt);
+ current->fs->umask = saved->luc.luc_umask;
+ if (uc) {
+ struct cred *cred;
+ if ((cred = prepare_creds())) {
+ cred->uid = saved->luc.luc_uid;
+ cred->gid = saved->luc.luc_gid;
+ cred->fsuid = saved->luc.luc_fsuid;
+ cred->fsgid = saved->luc.luc_fsgid;
+ cred->cap_effective = saved->luc.luc_cap;
+ commit_creds(cred);
+ }
+
+ pop_group_info(saved,
+ uc->luc_ginfo ?:
+ uc->luc_identity ? uc->luc_identity->mi_ginfo :
+ NULL);
+ }
+}
+EXPORT_SYMBOL(pop_ctxt);
+
+/* utility to rename a file */
+int lustre_rename(struct dentry *dir, struct vfsmount *mnt,
+ char *oldname, char *newname)
+{
+ struct dentry *dchild_old, *dchild_new;
+ int err = 0;
+
+ ASSERT_KERNEL_CTXT("kernel doing rename outside kernel context\n");
+ CDEBUG(D_INODE, "renaming file %.*s to %.*s\n",
+ (int)strlen(oldname), oldname, (int)strlen(newname), newname);
+
+ dchild_old = ll_lookup_one_len(oldname, dir, strlen(oldname));
+ if (IS_ERR(dchild_old))
+ return PTR_ERR(dchild_old);
+
+ if (!dchild_old->d_inode)
+ GOTO(put_old, err = -ENOENT);
+
+ dchild_new = ll_lookup_one_len(newname, dir, strlen(newname));
+ if (IS_ERR(dchild_new))
+ GOTO(put_old, err = PTR_ERR(dchild_new));
+
+ err = ll_vfs_rename(dir->d_inode, dchild_old, mnt,
+ dir->d_inode, dchild_new, mnt);
+
+ dput(dchild_new);
+put_old:
+ dput(dchild_old);
+ return err;
+}
+EXPORT_SYMBOL(lustre_rename);
+
+/* Note: dput(dchild) will *not* be called if there is an error */
+struct l_file *l_dentry_open(struct lvfs_run_ctxt *ctxt, struct l_dentry *de,
+ int flags)
+{
+ struct path path = {
+ .dentry = de,
+ .mnt = ctxt->pwdmnt,
+ };
+ return dentry_open(&path, flags, current_cred());
+}
+EXPORT_SYMBOL(l_dentry_open);
+
+#ifdef LPROCFS
+__s64 lprocfs_read_helper(struct lprocfs_counter *lc,
+ struct lprocfs_counter_header *header,
+ enum lprocfs_stats_flags flags,
+ enum lprocfs_fields_flags field)
+{
+ __s64 ret = 0;
+
+ if (lc == NULL || header == NULL)
+ return 0;
+
+ switch (field) {
+ case LPROCFS_FIELDS_FLAGS_CONFIG:
+ ret = header->lc_config;
+ break;
+ case LPROCFS_FIELDS_FLAGS_SUM:
+ ret = lc->lc_sum;
+ if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ ret += lc->lc_sum_irq;
+ break;
+ case LPROCFS_FIELDS_FLAGS_MIN:
+ ret = lc->lc_min;
+ break;
+ case LPROCFS_FIELDS_FLAGS_MAX:
+ ret = lc->lc_max;
+ break;
+ case LPROCFS_FIELDS_FLAGS_AVG:
+ ret = (lc->lc_max - lc->lc_min) / 2;
+ break;
+ case LPROCFS_FIELDS_FLAGS_SUMSQUARE:
+ ret = lc->lc_sumsquare;
+ break;
+ case LPROCFS_FIELDS_FLAGS_COUNT:
+ ret = lc->lc_count;
+ break;
+ default:
+ break;
+ };
+
+ return ret;
+}
+EXPORT_SYMBOL(lprocfs_read_helper);
+#endif /* LPROCFS */
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre VFS Filesystem Helper v0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/lustre/lustre/mdc/Makefile b/drivers/staging/lustre/lustre/mdc/Makefile
new file mode 100644
index 0000000..93bae24
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mdc/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_LUSTRE_FS) += mdc.o
+mdc-y := mdc_request.o mdc_reint.o lproc_mdc.o mdc_lib.o mdc_locks.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
new file mode 100644
index 0000000..e0b8f18
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -0,0 +1,217 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/vfs.h>
+#include <obd_class.h>
+#include <lprocfs_status.h>
+
+#ifdef LPROCFS
+
+static int mdc_max_rpcs_in_flight_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = m->private;
+ struct client_obd *cli = &dev->u.cli;
+ int rc;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = seq_printf(m, "%u\n", cli->cl_max_rpcs_in_flight);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return rc;
+}
+
+static ssize_t mdc_max_rpcs_in_flight_seq_write(struct file *file,
+ const char *buffer,
+ size_t count,
+ loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ struct client_obd *cli = &dev->u.cli;
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ if (val < 1 || val > MDC_MAX_RIF_MAX)
+ return -ERANGE;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_max_rpcs_in_flight = val;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ return count;
+}
+LPROC_SEQ_FOPS(mdc_max_rpcs_in_flight);
+
+static int mdc_kuc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, PDE_DATA(inode));
+}
+
+/* temporary for testing */
+static ssize_t mdc_kuc_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ struct kuc_hdr *lh;
+ struct hsm_action_list *hal;
+ struct hsm_action_item *hai;
+ int len;
+ int fd, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &fd);
+ if (rc)
+ return rc;
+
+ if (fd < 0)
+ return -ERANGE;
+ CWARN("message to fd %d\n", fd);
+
+ len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN +
+ /* for mockup below */ 2 * cfs_size_round(sizeof(*hai));
+
+ OBD_ALLOC(lh, len);
+
+ lh->kuc_magic = KUC_MAGIC;
+ lh->kuc_transport = KUC_TRANSPORT_HSM;
+ lh->kuc_msgtype = HMT_ACTION_LIST;
+ lh->kuc_msglen = len;
+
+ hal = (struct hsm_action_list *)(lh + 1);
+ hal->hal_version = HAL_VERSION;
+ hal->hal_archive_id = 1;
+ hal->hal_flags = 0;
+ obd_uuid2fsname(hal->hal_fsname, obd->obd_name, MTI_NAME_MAXLEN);
+
+ /* mock up an action list */
+ hal->hal_count = 2;
+ hai = hai_zero(hal);
+ hai->hai_action = HSMA_ARCHIVE;
+ hai->hai_fid.f_oid = 5;
+ hai->hai_len = sizeof(*hai);
+ hai = hai_next(hai);
+ hai->hai_action = HSMA_RESTORE;
+ hai->hai_fid.f_oid = 10;
+ hai->hai_len = sizeof(*hai);
+
+ /* This works for either broadcast or unicast to a single fd */
+ if (fd == 0) {
+ rc = libcfs_kkuc_group_put(KUC_GRP_HSM, lh);
+ } else {
+ struct file *fp = fget(fd);
+
+ rc = libcfs_kkuc_msg_put(fp, lh);
+ fput(fp);
+ }
+ OBD_FREE(lh, len);
+ if (rc < 0)
+ return rc;
+ return count;
+}
+
+struct file_operations mdc_kuc_fops = {
+ .open = mdc_kuc_open,
+ .write = mdc_kuc_write,
+ .release = single_release,
+};
+
+LPROC_SEQ_FOPS_WR_ONLY(mdc, ping);
+
+LPROC_SEQ_FOPS_RO_TYPE(mdc, uuid);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, blksize);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, kbytestotal);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, kbytesfree);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, kbytesavail);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, filestotal);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, filesfree);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, server_uuid);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, conn_uuid);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, timeouts);
+LPROC_SEQ_FOPS_RO_TYPE(mdc, state);
+
+static int mdc_obd_max_pages_per_rpc_seq_show(struct seq_file *m, void *v)
+{
+ return lprocfs_obd_rd_max_pages_per_rpc(m, m->private);
+}
+LPROC_SEQ_FOPS_RO(mdc_obd_max_pages_per_rpc);
+
+LPROC_SEQ_FOPS_RW_TYPE(mdc, import);
+LPROC_SEQ_FOPS_RW_TYPE(mdc, pinger_recov);
+
+static struct lprocfs_vars lprocfs_mdc_obd_vars[] = {
+ { "uuid", &mdc_uuid_fops, 0, 0 },
+ { "ping", &mdc_ping_fops, 0, 0222 },
+ { "connect_flags", &mdc_connect_flags_fops, 0, 0 },
+ { "blocksize", &mdc_blksize_fops, 0, 0 },
+ { "kbytestotal", &mdc_kbytestotal_fops, 0, 0 },
+ { "kbytesfree", &mdc_kbytesfree_fops, 0, 0 },
+ { "kbytesavail", &mdc_kbytesavail_fops, 0, 0 },
+ { "filestotal", &mdc_filestotal_fops, 0, 0 },
+ { "filesfree", &mdc_filesfree_fops, 0, 0 },
+ /*{ "filegroups", lprocfs_rd_filegroups, 0, 0 },*/
+ { "mds_server_uuid", &mdc_server_uuid_fops, 0, 0 },
+ { "mds_conn_uuid", &mdc_conn_uuid_fops, 0, 0 },
+ /*
+ * FIXME: below proc entry is provided, but not in used, instead
+ * sbi->sb_md_brw_size is used, the per obd variable should be used
+ * when CMD is enabled, and dir pages are managed in MDC layer.
+ * Remember to enable proc write function.
+ */
+ { "max_pages_per_rpc", &mdc_obd_max_pages_per_rpc_fops, 0, 0 },
+ { "max_rpcs_in_flight", &mdc_max_rpcs_in_flight_fops, 0, 0 },
+ { "timeouts", &mdc_timeouts_fops, 0, 0 },
+ { "import", &mdc_import_fops, 0 },
+ { "state", &mdc_state_fops, 0, 0 },
+ { "hsm_nl", &mdc_kuc_fops, 0, 0200 },
+ { "pinger_recov", &mdc_pinger_recov_fops, 0, 0 },
+ { 0 }
+};
+
+LPROC_SEQ_FOPS_RO_TYPE(mdc, numrefs);
+
+static struct lprocfs_vars lprocfs_mdc_module_vars[] = {
+ { "num_refs", &mdc_numrefs_fops, 0, 0 },
+ { 0 }
+};
+
+void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars)
+{
+ lvars->module_vars = lprocfs_mdc_module_vars;
+ lvars->obd_vars = lprocfs_mdc_obd_vars;
+}
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
new file mode 100644
index 0000000..2aeff0e
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -0,0 +1,180 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _MDC_INTERNAL_H
+#define _MDC_INTERNAL_H
+
+#include <lustre_mdc.h>
+#include <lustre_mds.h>
+
+#ifdef LPROCFS
+void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars);
+#else
+static inline void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars)
+{
+ memset(lvars, 0, sizeof(*lvars));
+}
+#endif
+
+void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
+ struct obd_capa *oc, __u64 valid, int ea_size,
+ __u32 suppgid, int flags);
+void mdc_pack_capa(struct ptlrpc_request *req,
+ const struct req_msg_field *field, struct obd_capa *oc);
+int mdc_pack_req(struct ptlrpc_request *req, int version, int opc);
+void mdc_is_subdir_pack(struct ptlrpc_request *req, const struct lu_fid *pfid,
+ const struct lu_fid *cfid, int flags);
+void mdc_swap_layouts_pack(struct ptlrpc_request *req,
+ struct md_op_data *op_data);
+void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, __u32 size,
+ const struct lu_fid *fid, struct obd_capa *oc);
+void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
+ struct md_op_data *data, int ea_size);
+void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
+ void *ea, int ealen, void *ea2, int ea2len);
+void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
+ const void *data, int datalen, __u32 mode, __u32 uid,
+ __u32 gid, cfs_cap_t capability, __u64 rdev);
+void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
+ __u32 mode, __u64 rdev, __u32 flags, const void *data,
+ int datalen);
+void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
+void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
+void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
+ const char *old, int oldlen, const char *new, int newlen);
+void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
+int mdc_enter_request(struct client_obd *cli);
+void mdc_exit_request(struct client_obd *cli);
+
+/* mdc/mdc_locks.c */
+int mdc_set_lock_data(struct obd_export *exp,
+ __u64 *lockh, void *data, __u64 *bits);
+
+int mdc_null_inode(struct obd_export *exp, const struct lu_fid *fid);
+
+int mdc_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
+ ldlm_iterator_t it, void *data);
+
+int mdc_intent_lock(struct obd_export *exp,
+ struct md_op_data *,
+ void *lmm, int lmmsize,
+ struct lookup_intent *, int,
+ struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags);
+int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
+ struct lookup_intent *it, struct md_op_data *op_data,
+ struct lustre_handle *lockh, void *lmm, int lmmsize,
+ struct ptlrpc_request **req, __u64 extra_lock_flags);
+
+int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
+ struct list_head *cancels, ldlm_mode_t mode,
+ __u64 bits);
+/* mdc/mdc_request.c */
+int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
+ struct md_op_data *op_data);
+
+int mdc_open(struct obd_export *exp, obd_id ino, int type, int flags,
+ struct lov_mds_md *lmm, int lmm_size, struct lustre_handle *fh,
+ struct ptlrpc_request **);
+
+struct obd_client_handle;
+
+int mdc_get_lustre_md(struct obd_export *md_exp, struct ptlrpc_request *req,
+ struct obd_export *dt_exp, struct obd_export *lmv_exp,
+ struct lustre_md *md);
+
+int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md);
+
+int mdc_set_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och,
+ struct ptlrpc_request *open_req);
+
+int mdc_clear_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och);
+void mdc_commit_open(struct ptlrpc_request *req);
+void mdc_replay_open(struct ptlrpc_request *req);
+
+int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
+ const void *data, int datalen, int mode, __u32 uid, __u32 gid,
+ cfs_cap_t capability, __u64 rdev,
+ struct ptlrpc_request **request);
+int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request);
+int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
+ const char *old, int oldlen, const char *new, int newlen,
+ struct ptlrpc_request **request);
+int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
+ void *ea, int ealen, void *ea2, int ea2len,
+ struct ptlrpc_request **request, struct md_open_data **mod);
+int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request);
+int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
+ ldlm_policy_data_t *policy, ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags, void *opaque);
+
+static inline void mdc_set_capa_size(struct ptlrpc_request *req,
+ const struct req_msg_field *field,
+ struct obd_capa *oc)
+{
+ if (oc == NULL)
+ req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
+ else
+ /* it is already calculated as sizeof struct obd_capa */
+ ;
+}
+
+int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
+ struct lu_fid *fid, __u64 *bits);
+
+int mdc_intent_getattr_async(struct obd_export *exp,
+ struct md_enqueue_info *minfo,
+ struct ldlm_enqueue_info *einfo);
+
+ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid, ldlm_type_t type,
+ ldlm_policy_data_t *policy, ldlm_mode_t mode,
+ struct lustre_handle *lockh);
+
+static inline int mdc_prep_elc_req(struct obd_export *exp,
+ struct ptlrpc_request *req, int opc,
+ struct list_head *cancels, int count)
+{
+ return ldlm_prep_elc_req(exp, req, LUSTRE_MDS_VERSION, opc, 0, cancels,
+ count);
+}
+
+#endif
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
new file mode 100644
index 0000000..b2de478
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -0,0 +1,565 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_MDC
+#include <lustre_net.h>
+#include <lustre/lustre_idl.h>
+#include "mdc_internal.h"
+
+
+static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
+{
+ LASSERT (b != NULL);
+
+ b->suppgid = suppgid;
+ b->uid = from_kuid(&init_user_ns, current_uid());
+ b->gid = from_kgid(&init_user_ns, current_gid());
+ b->fsuid = from_kuid(&init_user_ns, current_fsuid());
+ b->fsgid = from_kgid(&init_user_ns, current_fsgid());
+ b->capability = cfs_curproc_cap_pack();
+}
+
+void mdc_pack_capa(struct ptlrpc_request *req, const struct req_msg_field *field,
+ struct obd_capa *oc)
+{
+ struct req_capsule *pill = &req->rq_pill;
+ struct lustre_capa *c;
+
+ if (oc == NULL) {
+ LASSERT(req_capsule_get_size(pill, field, RCL_CLIENT) == 0);
+ return;
+ }
+
+ c = req_capsule_client_get(pill, field);
+ LASSERT(c != NULL);
+ capa_cpy(c, oc);
+ DEBUG_CAPA(D_SEC, c, "pack");
+}
+
+void mdc_is_subdir_pack(struct ptlrpc_request *req, const struct lu_fid *pfid,
+ const struct lu_fid *cfid, int flags)
+{
+ struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
+ &RMF_MDT_BODY);
+
+ if (pfid) {
+ b->fid1 = *pfid;
+ b->valid = OBD_MD_FLID;
+ }
+ if (cfid)
+ b->fid2 = *cfid;
+ b->flags = flags;
+}
+
+void mdc_swap_layouts_pack(struct ptlrpc_request *req,
+ struct md_op_data *op_data)
+{
+ struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
+ &RMF_MDT_BODY);
+
+ __mdc_pack_body(b, op_data->op_suppgids[0]);
+ b->fid1 = op_data->op_fid1;
+ b->fid2 = op_data->op_fid2;
+ b->valid |= OBD_MD_FLID;
+
+ mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
+ mdc_pack_capa(req, &RMF_CAPA2, op_data->op_capa2);
+}
+
+void mdc_pack_body(struct ptlrpc_request *req,
+ const struct lu_fid *fid, struct obd_capa *oc,
+ __u64 valid, int ea_size, __u32 suppgid, int flags)
+{
+ struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
+ &RMF_MDT_BODY);
+ LASSERT(b != NULL);
+ b->valid = valid;
+ b->eadatasize = ea_size;
+ b->flags = flags;
+ __mdc_pack_body(b, suppgid);
+ if (fid) {
+ b->fid1 = *fid;
+ b->valid |= OBD_MD_FLID;
+ mdc_pack_capa(req, &RMF_CAPA1, oc);
+ }
+}
+
+void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff,
+ __u32 size, const struct lu_fid *fid, struct obd_capa *oc)
+{
+ struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
+ &RMF_MDT_BODY);
+ b->fid1 = *fid;
+ b->valid |= OBD_MD_FLID;
+ b->size = pgoff; /* !! */
+ b->nlink = size; /* !! */
+ __mdc_pack_body(b, -1);
+ b->mode = LUDA_FID | LUDA_TYPE;
+
+ mdc_pack_capa(req, &RMF_CAPA1, oc);
+}
+
+/* packing of MDS records */
+void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
+ const void *data, int datalen, __u32 mode,
+ __u32 uid, __u32 gid, cfs_cap_t cap_effective, __u64 rdev)
+{
+ struct mdt_rec_create *rec;
+ char *tmp;
+ __u64 flags;
+
+ CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+
+
+ rec->cr_opcode = REINT_CREATE;
+ rec->cr_fsuid = uid;
+ rec->cr_fsgid = gid;
+ rec->cr_cap = cap_effective;
+ rec->cr_fid1 = op_data->op_fid1;
+ rec->cr_fid2 = op_data->op_fid2;
+ rec->cr_mode = mode;
+ rec->cr_rdev = rdev;
+ rec->cr_time = op_data->op_mod_time;
+ rec->cr_suppgid1 = op_data->op_suppgids[0];
+ rec->cr_suppgid2 = op_data->op_suppgids[1];
+ flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
+ if (op_data->op_bias & MDS_CREATE_VOLATILE)
+ flags |= MDS_OPEN_VOLATILE;
+ set_mrc_cr_flags(rec, flags);
+ rec->cr_bias = op_data->op_bias;
+ rec->cr_umask = current_umask();
+
+ mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ LOGL0(op_data->op_name, op_data->op_namelen, tmp);
+
+ if (data) {
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
+ memcpy(tmp, data, datalen);
+ }
+}
+
+static __u64 mds_pack_open_flags(__u32 flags, __u32 mode)
+{
+ __u64 cr_flags = (flags & (FMODE_READ | FMODE_WRITE |
+ MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS |
+ MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK |
+ MDS_OPEN_BY_FID));
+ if (flags & O_CREAT)
+ cr_flags |= MDS_OPEN_CREAT;
+ if (flags & O_EXCL)
+ cr_flags |= MDS_OPEN_EXCL;
+ if (flags & O_TRUNC)
+ cr_flags |= MDS_OPEN_TRUNC;
+ if (flags & O_APPEND)
+ cr_flags |= MDS_OPEN_APPEND;
+ if (flags & O_SYNC)
+ cr_flags |= MDS_OPEN_SYNC;
+ if (flags & O_DIRECTORY)
+ cr_flags |= MDS_OPEN_DIRECTORY;
+#ifdef FMODE_EXEC
+ if (flags & FMODE_EXEC)
+ cr_flags |= MDS_FMODE_EXEC;
+#endif
+ if (flags & O_LOV_DELAY_CREATE)
+ cr_flags |= MDS_OPEN_DELAY_CREATE;
+
+ if (flags & O_NONBLOCK)
+ cr_flags |= MDS_OPEN_NORESTORE;
+
+ return cr_flags;
+}
+
+/* packing of MDS records */
+void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
+ __u32 mode, __u64 rdev, __u32 flags, const void *lmm,
+ int lmmlen)
+{
+ struct mdt_rec_create *rec;
+ char *tmp;
+ __u64 cr_flags;
+
+ CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+
+ /* XXX do something about time, uid, gid */
+ rec->cr_opcode = REINT_OPEN;
+ rec->cr_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ rec->cr_fsgid = from_kgid(&init_user_ns, current_fsgid());
+ rec->cr_cap = cfs_curproc_cap_pack();
+ if (op_data != NULL) {
+ rec->cr_fid1 = op_data->op_fid1;
+ rec->cr_fid2 = op_data->op_fid2;
+ }
+ rec->cr_mode = mode;
+ cr_flags = mds_pack_open_flags(flags, mode);
+ rec->cr_rdev = rdev;
+ rec->cr_time = op_data->op_mod_time;
+ rec->cr_suppgid1 = op_data->op_suppgids[0];
+ rec->cr_suppgid2 = op_data->op_suppgids[1];
+ rec->cr_bias = op_data->op_bias;
+ rec->cr_umask = current_umask();
+
+ mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
+ /* the next buffer is child capa, which is used for replay,
+ * will be packed from the data in reply message. */
+
+ if (op_data->op_name) {
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ LOGL0(op_data->op_name, op_data->op_namelen, tmp);
+ if (op_data->op_bias & MDS_CREATE_VOLATILE)
+ cr_flags |= MDS_OPEN_VOLATILE;
+ }
+
+ if (lmm) {
+ cr_flags |= MDS_OPEN_HAS_EA;
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
+ memcpy(tmp, lmm, lmmlen);
+ }
+ set_mrc_cr_flags(rec, cr_flags);
+}
+
+static inline __u64 attr_pack(unsigned int ia_valid) {
+ __u64 sa_valid = 0;
+
+ if (ia_valid & ATTR_MODE)
+ sa_valid |= MDS_ATTR_MODE;
+ if (ia_valid & ATTR_UID)
+ sa_valid |= MDS_ATTR_UID;
+ if (ia_valid & ATTR_GID)
+ sa_valid |= MDS_ATTR_GID;
+ if (ia_valid & ATTR_SIZE)
+ sa_valid |= MDS_ATTR_SIZE;
+ if (ia_valid & ATTR_ATIME)
+ sa_valid |= MDS_ATTR_ATIME;
+ if (ia_valid & ATTR_MTIME)
+ sa_valid |= MDS_ATTR_MTIME;
+ if (ia_valid & ATTR_CTIME)
+ sa_valid |= MDS_ATTR_CTIME;
+ if (ia_valid & ATTR_ATIME_SET)
+ sa_valid |= MDS_ATTR_ATIME_SET;
+ if (ia_valid & ATTR_MTIME_SET)
+ sa_valid |= MDS_ATTR_MTIME_SET;
+ if (ia_valid & ATTR_FORCE)
+ sa_valid |= MDS_ATTR_FORCE;
+ if (ia_valid & ATTR_ATTR_FLAG)
+ sa_valid |= MDS_ATTR_ATTR_FLAG;
+ if (ia_valid & ATTR_KILL_SUID)
+ sa_valid |= MDS_ATTR_KILL_SUID;
+ if (ia_valid & ATTR_KILL_SGID)
+ sa_valid |= MDS_ATTR_KILL_SGID;
+ if (ia_valid & ATTR_CTIME_SET)
+ sa_valid |= MDS_ATTR_CTIME_SET;
+ if (ia_valid & ATTR_FROM_OPEN)
+ sa_valid |= MDS_ATTR_FROM_OPEN;
+ if (ia_valid & ATTR_BLOCKS)
+ sa_valid |= MDS_ATTR_BLOCKS;
+ if (ia_valid & MDS_OPEN_OWNEROVERRIDE)
+ /* NFSD hack (see bug 5781) */
+ sa_valid |= MDS_OPEN_OWNEROVERRIDE;
+ return sa_valid;
+}
+
+static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
+ struct md_op_data *op_data)
+{
+ rec->sa_opcode = REINT_SETATTR;
+ rec->sa_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ rec->sa_fsgid = from_kgid(&init_user_ns, current_fsgid());
+ rec->sa_cap = cfs_curproc_cap_pack();
+ rec->sa_suppgid = -1;
+
+ rec->sa_fid = op_data->op_fid1;
+ rec->sa_valid = attr_pack(op_data->op_attr.ia_valid);
+ rec->sa_mode = op_data->op_attr.ia_mode;
+ rec->sa_uid = from_kuid(&init_user_ns, op_data->op_attr.ia_uid);
+ rec->sa_gid = from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
+ rec->sa_size = op_data->op_attr.ia_size;
+ rec->sa_blocks = op_data->op_attr_blocks;
+ rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime);
+ rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime);
+ rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
+ rec->sa_attr_flags = ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
+ if ((op_data->op_attr.ia_valid & ATTR_GID) &&
+ in_group_p(op_data->op_attr.ia_gid))
+ rec->sa_suppgid =
+ from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
+ else
+ rec->sa_suppgid = op_data->op_suppgids[0];
+
+ rec->sa_bias = op_data->op_bias;
+}
+
+static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch,
+ struct md_op_data *op_data)
+{
+ memcpy(&epoch->handle, &op_data->op_handle, sizeof(epoch->handle));
+ epoch->ioepoch = op_data->op_ioepoch;
+ epoch->flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
+}
+
+void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
+ void *ea, int ealen, void *ea2, int ea2len)
+{
+ struct mdt_rec_setattr *rec;
+ struct mdt_ioepoch *epoch;
+ struct lov_user_md *lum = NULL;
+
+ CLASSERT(sizeof(struct mdt_rec_reint) ==sizeof(struct mdt_rec_setattr));
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+ mdc_setattr_pack_rec(rec, op_data);
+
+ mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
+
+ if (op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) {
+ epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
+ mdc_ioepoch_pack(epoch, op_data);
+ }
+
+ if (ealen == 0)
+ return;
+
+ lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
+ if (ea == NULL) { /* Remove LOV EA */
+ lum->lmm_magic = LOV_USER_MAGIC_V1;
+ lum->lmm_stripe_size = 0;
+ lum->lmm_stripe_count = 0;
+ lum->lmm_stripe_offset = (typeof(lum->lmm_stripe_offset))(-1);
+ } else {
+ memcpy(lum, ea, ealen);
+ }
+
+ if (ea2len == 0)
+ return;
+
+ memcpy(req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES), ea2,
+ ea2len);
+}
+
+void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
+{
+ struct mdt_rec_unlink *rec;
+ char *tmp;
+
+ CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink));
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+ LASSERT(rec != NULL);
+
+ rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ?
+ REINT_RMENTRY : REINT_UNLINK;
+ rec->ul_fsuid = op_data->op_fsuid;
+ rec->ul_fsgid = op_data->op_fsgid;
+ rec->ul_cap = op_data->op_cap;
+ rec->ul_mode = op_data->op_mode;
+ rec->ul_suppgid1= op_data->op_suppgids[0];
+ rec->ul_suppgid2= -1;
+ rec->ul_fid1 = op_data->op_fid1;
+ rec->ul_fid2 = op_data->op_fid2;
+ rec->ul_time = op_data->op_mod_time;
+ rec->ul_bias = op_data->op_bias;
+
+ mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ LASSERT(tmp != NULL);
+ LOGL0(op_data->op_name, op_data->op_namelen, tmp);
+}
+
+void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
+{
+ struct mdt_rec_link *rec;
+ char *tmp;
+
+ CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link));
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+ LASSERT (rec != NULL);
+
+ rec->lk_opcode = REINT_LINK;
+ rec->lk_fsuid = op_data->op_fsuid;//current->fsuid;
+ rec->lk_fsgid = op_data->op_fsgid;//current->fsgid;
+ rec->lk_cap = op_data->op_cap;//current->cap_effective;
+ rec->lk_suppgid1 = op_data->op_suppgids[0];
+ rec->lk_suppgid2 = op_data->op_suppgids[1];
+ rec->lk_fid1 = op_data->op_fid1;
+ rec->lk_fid2 = op_data->op_fid2;
+ rec->lk_time = op_data->op_mod_time;
+ rec->lk_bias = op_data->op_bias;
+
+ mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
+ mdc_pack_capa(req, &RMF_CAPA2, op_data->op_capa2);
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ LOGL0(op_data->op_name, op_data->op_namelen, tmp);
+}
+
+void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
+ const char *old, int oldlen, const char *new, int newlen)
+{
+ struct mdt_rec_rename *rec;
+ char *tmp;
+
+ CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_rename));
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+
+ /* XXX do something about time, uid, gid */
+ rec->rn_opcode = REINT_RENAME;
+ rec->rn_fsuid = op_data->op_fsuid;
+ rec->rn_fsgid = op_data->op_fsgid;
+ rec->rn_cap = op_data->op_cap;
+ rec->rn_suppgid1 = op_data->op_suppgids[0];
+ rec->rn_suppgid2 = op_data->op_suppgids[1];
+ rec->rn_fid1 = op_data->op_fid1;
+ rec->rn_fid2 = op_data->op_fid2;
+ rec->rn_time = op_data->op_mod_time;
+ rec->rn_mode = op_data->op_mode;
+ rec->rn_bias = op_data->op_bias;
+
+ mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
+ mdc_pack_capa(req, &RMF_CAPA2, op_data->op_capa2);
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ LOGL0(old, oldlen, tmp);
+
+ if (new) {
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SYMTGT);
+ LOGL0(new, newlen, tmp);
+ }
+}
+
+void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
+ struct md_op_data *op_data, int ea_size)
+{
+ struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
+ &RMF_MDT_BODY);
+
+ b->valid = valid;
+ if (op_data->op_bias & MDS_CHECK_SPLIT)
+ b->valid |= OBD_MD_FLCKSPLIT;
+ if (op_data->op_bias & MDS_CROSS_REF)
+ b->valid |= OBD_MD_FLCROSSREF;
+ b->eadatasize = ea_size;
+ b->flags = flags;
+ __mdc_pack_body(b, op_data->op_suppgids[0]);
+
+ b->fid1 = op_data->op_fid1;
+ b->fid2 = op_data->op_fid2;
+ b->valid |= OBD_MD_FLID;
+
+ mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
+
+ if (op_data->op_name) {
+ char *tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ LOGL0(op_data->op_name, op_data->op_namelen, tmp);
+
+ }
+}
+
+void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
+{
+ struct mdt_ioepoch *epoch;
+ struct mdt_rec_setattr *rec;
+
+ epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+
+ mdc_setattr_pack_rec(rec, op_data);
+ mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
+ mdc_ioepoch_pack(epoch, op_data);
+}
+
+static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
+{
+ int rc;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = list_empty(&mcw->mcw_entry);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return rc;
+};
+
+/* We record requests in flight in cli->cl_r_in_flight here.
+ * There is only one write rpc possible in mdc anyway. If this to change
+ * in the future - the code may need to be revisited. */
+int mdc_enter_request(struct client_obd *cli)
+{
+ int rc = 0;
+ struct mdc_cache_waiter mcw;
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
+ init_waitqueue_head(&mcw.mcw_waitq);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw), &lwi);
+ if (rc) {
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (list_empty(&mcw.mcw_entry))
+ cli->cl_r_in_flight--;
+ list_del_init(&mcw.mcw_entry);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ }
+ } else {
+ cli->cl_r_in_flight++;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ }
+ return rc;
+}
+
+void mdc_exit_request(struct client_obd *cli)
+{
+ struct list_head *l, *tmp;
+ struct mdc_cache_waiter *mcw;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_r_in_flight--;
+ list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+ if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
+ /* No free request slots anymore */
+ break;
+ }
+
+ mcw = list_entry(l, struct mdc_cache_waiter, mcw_entry);
+ list_del_init(&mcw->mcw_entry);
+ cli->cl_r_in_flight++;
+ wake_up(&mcw->mcw_waitq);
+ }
+ /* Empty waiting list? Decrease reqs in-flight number */
+
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
new file mode 100644
index 0000000..fb5a995
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -0,0 +1,1230 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_MDC
+
+# include <linux/module.h>
+# include <linux/pagemap.h>
+# include <linux/miscdevice.h>
+# include <linux/init.h>
+
+#include <lustre_acl.h>
+#include <obd_class.h>
+#include <lustre_dlm.h>
+/* fid_res_name_eq() */
+#include <lustre_fid.h>
+#include <lprocfs_status.h>
+#include "mdc_internal.h"
+
+struct mdc_getattr_args {
+ struct obd_export *ga_exp;
+ struct md_enqueue_info *ga_minfo;
+ struct ldlm_enqueue_info *ga_einfo;
+};
+
+int it_disposition(struct lookup_intent *it, int flag)
+{
+ return it->d.lustre.it_disposition & flag;
+}
+EXPORT_SYMBOL(it_disposition);
+
+void it_set_disposition(struct lookup_intent *it, int flag)
+{
+ it->d.lustre.it_disposition |= flag;
+}
+EXPORT_SYMBOL(it_set_disposition);
+
+void it_clear_disposition(struct lookup_intent *it, int flag)
+{
+ it->d.lustre.it_disposition &= ~flag;
+}
+EXPORT_SYMBOL(it_clear_disposition);
+
+int it_open_error(int phase, struct lookup_intent *it)
+{
+ if (it_disposition(it, DISP_OPEN_OPEN)) {
+ if (phase >= DISP_OPEN_OPEN)
+ return it->d.lustre.it_status;
+ else
+ return 0;
+ }
+
+ if (it_disposition(it, DISP_OPEN_CREATE)) {
+ if (phase >= DISP_OPEN_CREATE)
+ return it->d.lustre.it_status;
+ else
+ return 0;
+ }
+
+ if (it_disposition(it, DISP_LOOKUP_EXECD)) {
+ if (phase >= DISP_LOOKUP_EXECD)
+ return it->d.lustre.it_status;
+ else
+ return 0;
+ }
+
+ if (it_disposition(it, DISP_IT_EXECD)) {
+ if (phase >= DISP_IT_EXECD)
+ return it->d.lustre.it_status;
+ else
+ return 0;
+ }
+ CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
+ it->d.lustre.it_status);
+ LBUG();
+ return 0;
+}
+EXPORT_SYMBOL(it_open_error);
+
+/* this must be called on a lockh that is known to have a referenced lock */
+int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
+ __u64 *bits)
+{
+ struct ldlm_lock *lock;
+ struct inode *new_inode = data;
+
+ if(bits)
+ *bits = 0;
+
+ if (!*lockh)
+ return 0;
+
+ lock = ldlm_handle2lock((struct lustre_handle *)lockh);
+
+ LASSERT(lock != NULL);
+ lock_res_and_lock(lock);
+ if (lock->l_resource->lr_lvb_inode &&
+ lock->l_resource->lr_lvb_inode != data) {
+ struct inode *old_inode = lock->l_resource->lr_lvb_inode;
+ LASSERTF(old_inode->i_state & I_FREEING,
+ "Found existing inode %p/%lu/%u state %lu in lock: "
+ "setting data to %p/%lu/%u\n", old_inode,
+ old_inode->i_ino, old_inode->i_generation,
+ old_inode->i_state,
+ new_inode, new_inode->i_ino, new_inode->i_generation);
+ }
+ lock->l_resource->lr_lvb_inode = new_inode;
+ if (bits)
+ *bits = lock->l_policy_data.l_inodebits.bits;
+
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+
+ return 0;
+}
+
+ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid, ldlm_type_t type,
+ ldlm_policy_data_t *policy, ldlm_mode_t mode,
+ struct lustre_handle *lockh)
+{
+ struct ldlm_res_id res_id;
+ ldlm_mode_t rc;
+
+ fid_build_reg_res_name(fid, &res_id);
+ rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
+ &res_id, type, policy, mode, lockh, 0);
+ return rc;
+}
+
+int mdc_cancel_unused(struct obd_export *exp,
+ const struct lu_fid *fid,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
+{
+ struct ldlm_res_id res_id;
+ struct obd_device *obd = class_exp2obd(exp);
+ int rc;
+
+ fid_build_reg_res_name(fid, &res_id);
+ rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
+ policy, mode, flags, opaque);
+ return rc;
+}
+
+int mdc_null_inode(struct obd_export *exp,
+ const struct lu_fid *fid)
+{
+ struct ldlm_res_id res_id;
+ struct ldlm_resource *res;
+ struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace;
+
+ LASSERTF(ns != NULL, "no namespace passed\n");
+
+ fid_build_reg_res_name(fid, &res_id);
+
+ res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
+ if(res == NULL)
+ return 0;
+
+ lock_res(res);
+ res->lr_lvb_inode = NULL;
+ unlock_res(res);
+
+ ldlm_resource_putref(res);
+ return 0;
+}
+
+/* find any ldlm lock of the inode in mdc
+ * return 0 not find
+ * 1 find one
+ * < 0 error */
+int mdc_find_cbdata(struct obd_export *exp,
+ const struct lu_fid *fid,
+ ldlm_iterator_t it, void *data)
+{
+ struct ldlm_res_id res_id;
+ int rc = 0;
+
+ fid_build_reg_res_name((struct lu_fid*)fid, &res_id);
+ rc = ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace, &res_id,
+ it, data);
+ if (rc == LDLM_ITER_STOP)
+ return 1;
+ else if (rc == LDLM_ITER_CONTINUE)
+ return 0;
+ return rc;
+}
+
+static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
+{
+ /* Don't hold error requests for replay. */
+ if (req->rq_replay) {
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
+ }
+ if (rc && req->rq_transno != 0) {
+ DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
+ LBUG();
+ }
+}
+
+/* Save a large LOV EA into the request buffer so that it is available
+ * for replay. We don't do this in the initial request because the
+ * original request doesn't need this buffer (at most it sends just the
+ * lov_mds_md) and it is a waste of RAM/bandwidth to send the empty
+ * buffer and may also be difficult to allocate and save a very large
+ * request buffer for each open. (bug 5707)
+ *
+ * OOM here may cause recovery failure if lmm is needed (only for the
+ * original open if the MDS crashed just when this client also OOM'd)
+ * but this is incredibly unlikely, and questionable whether the client
+ * could do MDS recovery under OOM anyways... */
+static void mdc_realloc_openmsg(struct ptlrpc_request *req,
+ struct mdt_body *body)
+{
+ int rc;
+
+ /* FIXME: remove this explicit offset. */
+ rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4,
+ body->eadatasize);
+ if (rc) {
+ CERROR("Can't enlarge segment %d size to %d\n",
+ DLM_INTENT_REC_OFF + 4, body->eadatasize);
+ body->valid &= ~OBD_MD_FLEASIZE;
+ body->eadatasize = 0;
+ }
+}
+
+static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
+ struct lookup_intent *it,
+ struct md_op_data *op_data,
+ void *lmm, int lmmsize,
+ void *cb_data)
+{
+ struct ptlrpc_request *req;
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct ldlm_intent *lit;
+ LIST_HEAD(cancels);
+ int count = 0;
+ int mode;
+ int rc;
+
+ it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
+
+ /* XXX: openlock is not cancelled for cross-refs. */
+ /* If inode is known, cancel conflicting OPEN locks. */
+ if (fid_is_sane(&op_data->op_fid2)) {
+ if (it->it_flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
+ mode = LCK_CW;
+#ifdef FMODE_EXEC
+ else if (it->it_flags & FMODE_EXEC)
+ mode = LCK_PR;
+#endif
+ else
+ mode = LCK_CR;
+ count = mdc_resource_get_unused(exp, &op_data->op_fid2,
+ &cancels, mode,
+ MDS_INODELOCK_OPEN);
+ }
+
+ /* If CREATE, cancel parent's UPDATE lock. */
+ if (it->it_op & IT_CREAT)
+ mode = LCK_EX;
+ else
+ mode = LCK_CR;
+ count += mdc_resource_get_unused(exp, &op_data->op_fid1,
+ &cancels, mode,
+ MDS_INODELOCK_UPDATE);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_LDLM_INTENT_OPEN);
+ if (req == NULL) {
+ ldlm_lock_list_put(&cancels, l_bl_ast, count);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* parent capability */
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ /* child capability, reserve the size according to parent capa, it will
+ * be filled after we get the reply */
+ mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa1);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
+ op_data->op_namelen + 1);
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
+ max(lmmsize, obddev->u.cli.cl_default_mds_easize));
+
+ rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return NULL;
+ }
+
+ spin_lock(&req->rq_lock);
+ req->rq_replay = req->rq_import->imp_replayable;
+ spin_unlock(&req->rq_lock);
+
+ /* pack the intent */
+ lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
+ lit->opc = (__u64)it->it_op;
+
+ /* pack the intended request */
+ mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm,
+ lmmsize);
+
+ /* for remote client, fetch remote perm for current user */
+ if (client_is_remote(exp))
+ req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
+ sizeof(struct mdt_remote_perm));
+ ptlrpc_request_set_replen(req);
+ return req;
+}
+
+static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
+ struct lookup_intent *it,
+ struct md_op_data *op_data)
+{
+ struct ptlrpc_request *req;
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct ldlm_intent *lit;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_LDLM_INTENT_UNLINK);
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
+ op_data->op_namelen + 1);
+
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return ERR_PTR(rc);
+ }
+
+ /* pack the intent */
+ lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
+ lit->opc = (__u64)it->it_op;
+
+ /* pack the intended request */
+ mdc_unlink_pack(req, op_data);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
+ obddev->u.cli.cl_max_mds_easize);
+ req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
+ obddev->u.cli.cl_max_mds_cookiesize);
+ ptlrpc_request_set_replen(req);
+ return req;
+}
+
+static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
+ struct lookup_intent *it,
+ struct md_op_data *op_data)
+{
+ struct ptlrpc_request *req;
+ struct obd_device *obddev = class_exp2obd(exp);
+ obd_valid valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
+ OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
+ OBD_MD_FLMDSCAPA | OBD_MD_MEA |
+ (client_is_remote(exp) ?
+ OBD_MD_FLRMTPERM : OBD_MD_FLACL);
+ struct ldlm_intent *lit;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_LDLM_INTENT_GETATTR);
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
+ op_data->op_namelen + 1);
+
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return ERR_PTR(rc);
+ }
+
+ /* pack the intent */
+ lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
+ lit->opc = (__u64)it->it_op;
+
+ /* pack the intended request */
+ mdc_getattr_pack(req, valid, it->it_flags, op_data,
+ obddev->u.cli.cl_max_mds_easize);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
+ obddev->u.cli.cl_max_mds_easize);
+ if (client_is_remote(exp))
+ req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
+ sizeof(struct mdt_remote_perm));
+ ptlrpc_request_set_replen(req);
+ return req;
+}
+
+static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp,
+ struct lookup_intent *it,
+ struct md_op_data *unused)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct ptlrpc_request *req;
+ struct ldlm_intent *lit;
+ struct layout_intent *layout;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_LDLM_INTENT_LAYOUT);
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0);
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return ERR_PTR(rc);
+ }
+
+ /* pack the intent */
+ lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
+ lit->opc = (__u64)it->it_op;
+
+ /* pack the layout intent request */
+ layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT);
+ /* LAYOUT_INTENT_ACCESS is generic, specific operation will be
+ * set for replication */
+ layout->li_opc = LAYOUT_INTENT_ACCESS;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ obd->u.cli.cl_max_mds_easize);
+ ptlrpc_request_set_replen(req);
+ return req;
+}
+
+static struct ptlrpc_request *
+mdc_enqueue_pack(struct obd_export *exp, int lvb_len)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return ERR_PTR(rc);
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
+ ptlrpc_request_set_replen(req);
+ return req;
+}
+
+static int mdc_finish_enqueue(struct obd_export *exp,
+ struct ptlrpc_request *req,
+ struct ldlm_enqueue_info *einfo,
+ struct lookup_intent *it,
+ struct lustre_handle *lockh,
+ int rc)
+{
+ struct req_capsule *pill = &req->rq_pill;
+ struct ldlm_request *lockreq;
+ struct ldlm_reply *lockrep;
+ struct lustre_intent_data *intent = &it->d.lustre;
+ struct ldlm_lock *lock;
+ void *lvb_data = NULL;
+ int lvb_len = 0;
+
+ LASSERT(rc >= 0);
+ /* Similarly, if we're going to replay this request, we don't want to
+ * actually get a lock, just perform the intent. */
+ if (req->rq_transno || req->rq_replay) {
+ lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ);
+ lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY);
+ }
+
+ if (rc == ELDLM_LOCK_ABORTED) {
+ einfo->ei_mode = 0;
+ memset(lockh, 0, sizeof(*lockh));
+ rc = 0;
+ } else { /* rc = 0 */
+ lock = ldlm_handle2lock(lockh);
+ LASSERT(lock != NULL);
+
+ /* If the server gave us back a different lock mode, we should
+ * fix up our variables. */
+ if (lock->l_req_mode != einfo->ei_mode) {
+ ldlm_lock_addref(lockh, lock->l_req_mode);
+ ldlm_lock_decref(lockh, einfo->ei_mode);
+ einfo->ei_mode = lock->l_req_mode;
+ }
+ LDLM_LOCK_PUT(lock);
+ }
+
+ lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
+ LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */
+
+ intent->it_disposition = (int)lockrep->lock_policy_res1;
+ intent->it_status = (int)lockrep->lock_policy_res2;
+ intent->it_lock_mode = einfo->ei_mode;
+ intent->it_lock_handle = lockh->cookie;
+ intent->it_data = req;
+
+ /* Technically speaking rq_transno must already be zero if
+ * it_status is in error, so the check is a bit redundant */
+ if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay)
+ mdc_clear_replay_flag(req, intent->it_status);
+
+ /* If we're doing an IT_OPEN which did not result in an actual
+ * successful open, then we need to remove the bit which saves
+ * this request for unconditional replay.
+ *
+ * It's important that we do this first! Otherwise we might exit the
+ * function without doing so, and try to replay a failed create
+ * (bug 3440) */
+ if (it->it_op & IT_OPEN && req->rq_replay &&
+ (!it_disposition(it, DISP_OPEN_OPEN) ||intent->it_status != 0))
+ mdc_clear_replay_flag(req, intent->it_status);
+
+ DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
+ it->it_op, intent->it_disposition, intent->it_status);
+
+ /* We know what to expect, so we do any byte flipping required here */
+ if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) {
+ struct mdt_body *body;
+
+ body = req_capsule_server_get(pill, &RMF_MDT_BODY);
+ if (body == NULL) {
+ CERROR ("Can't swab mdt_body\n");
+ return -EPROTO;
+ }
+
+ if (it_disposition(it, DISP_OPEN_OPEN) &&
+ !it_open_error(DISP_OPEN_OPEN, it)) {
+ /*
+ * If this is a successful OPEN request, we need to set
+ * replay handler and data early, so that if replay
+ * happens immediately after swabbing below, new reply
+ * is swabbed by that handler correctly.
+ */
+ mdc_set_open_replay_data(NULL, NULL, req);
+ }
+
+ if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
+ void *eadata;
+
+ mdc_update_max_ea_from_body(exp, body);
+
+ /*
+ * The eadata is opaque; just check that it is there.
+ * Eventually, obd_unpackmd() will check the contents.
+ */
+ eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
+ body->eadatasize);
+ if (eadata == NULL)
+ return -EPROTO;
+
+ /* save lvb data and length in case this is for layout
+ * lock */
+ lvb_data = eadata;
+ lvb_len = body->eadatasize;
+
+ /*
+ * We save the reply LOV EA in case we have to replay a
+ * create for recovery. If we didn't allocate a large
+ * enough request buffer above we need to reallocate it
+ * here to hold the actual LOV EA.
+ *
+ * To not save LOV EA if request is not going to replay
+ * (for example error one).
+ */
+ if ((it->it_op & IT_OPEN) && req->rq_replay) {
+ void *lmm;
+ if (req_capsule_get_size(pill, &RMF_EADATA,
+ RCL_CLIENT) <
+ body->eadatasize)
+ mdc_realloc_openmsg(req, body);
+ else
+ req_capsule_shrink(pill, &RMF_EADATA,
+ body->eadatasize,
+ RCL_CLIENT);
+
+ req_capsule_set_size(pill, &RMF_EADATA,
+ RCL_CLIENT,
+ body->eadatasize);
+
+ lmm = req_capsule_client_get(pill, &RMF_EADATA);
+ if (lmm)
+ memcpy(lmm, eadata, body->eadatasize);
+ }
+ }
+
+ if (body->valid & OBD_MD_FLRMTPERM) {
+ struct mdt_remote_perm *perm;
+
+ LASSERT(client_is_remote(exp));
+ perm = req_capsule_server_swab_get(pill, &RMF_ACL,
+ lustre_swab_mdt_remote_perm);
+ if (perm == NULL)
+ return -EPROTO;
+ }
+ if (body->valid & OBD_MD_FLMDSCAPA) {
+ struct lustre_capa *capa, *p;
+
+ capa = req_capsule_server_get(pill, &RMF_CAPA1);
+ if (capa == NULL)
+ return -EPROTO;
+
+ if (it->it_op & IT_OPEN) {
+ /* client fid capa will be checked in replay */
+ p = req_capsule_client_get(pill, &RMF_CAPA2);
+ LASSERT(p);
+ *p = *capa;
+ }
+ }
+ if (body->valid & OBD_MD_FLOSSCAPA) {
+ struct lustre_capa *capa;
+
+ capa = req_capsule_server_get(pill, &RMF_CAPA2);
+ if (capa == NULL)
+ return -EPROTO;
+ }
+ } else if (it->it_op & IT_LAYOUT) {
+ /* maybe the lock was granted right away and layout
+ * is packed into RMF_DLM_LVB of req */
+ lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER);
+ if (lvb_len > 0) {
+ lvb_data = req_capsule_server_sized_get(pill,
+ &RMF_DLM_LVB, lvb_len);
+ if (lvb_data == NULL)
+ return -EPROTO;
+ }
+ }
+
+ /* fill in stripe data for layout lock */
+ lock = ldlm_handle2lock(lockh);
+ if (lock != NULL && ldlm_has_layout(lock) && lvb_data != NULL) {
+ void *lmm;
+
+ LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n",
+ ldlm_it2str(it->it_op), lvb_len);
+
+ OBD_ALLOC_LARGE(lmm, lvb_len);
+ if (lmm == NULL) {
+ LDLM_LOCK_PUT(lock);
+ return -ENOMEM;
+ }
+ memcpy(lmm, lvb_data, lvb_len);
+
+ /* install lvb_data */
+ lock_res_and_lock(lock);
+ if (lock->l_lvb_data == NULL) {
+ lock->l_lvb_data = lmm;
+ lock->l_lvb_len = lvb_len;
+ lmm = NULL;
+ }
+ unlock_res_and_lock(lock);
+ if (lmm != NULL)
+ OBD_FREE_LARGE(lmm, lvb_len);
+ }
+ if (lock != NULL)
+ LDLM_LOCK_PUT(lock);
+
+ return rc;
+}
+
+/* We always reserve enough space in the reply packet for a stripe MD, because
+ * we don't know in advance the file type. */
+int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
+ struct lookup_intent *it, struct md_op_data *op_data,
+ struct lustre_handle *lockh, void *lmm, int lmmsize,
+ struct ptlrpc_request **reqp, __u64 extra_lock_flags)
+{
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct ptlrpc_request *req = NULL;
+ __u64 flags, saved_flags = extra_lock_flags;
+ int rc;
+ struct ldlm_res_id res_id;
+ static const ldlm_policy_data_t lookup_policy =
+ { .l_inodebits = { MDS_INODELOCK_LOOKUP } };
+ static const ldlm_policy_data_t update_policy =
+ { .l_inodebits = { MDS_INODELOCK_UPDATE } };
+ static const ldlm_policy_data_t layout_policy =
+ { .l_inodebits = { MDS_INODELOCK_LAYOUT } };
+ ldlm_policy_data_t const *policy = &lookup_policy;
+ int generation, resends = 0;
+ struct ldlm_reply *lockrep;
+ enum lvb_type lvb_type = 0;
+
+ LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n",
+ einfo->ei_type);
+
+ fid_build_reg_res_name(&op_data->op_fid1, &res_id);
+
+ if (it) {
+ saved_flags |= LDLM_FL_HAS_INTENT;
+ if (it->it_op & (IT_UNLINK | IT_GETATTR | IT_READDIR))
+ policy = &update_policy;
+ else if (it->it_op & IT_LAYOUT)
+ policy = &layout_policy;
+ }
+
+ LASSERT(reqp == NULL);
+
+ generation = obddev->u.cli.cl_import->imp_generation;
+resend:
+ flags = saved_flags;
+ if (!it) {
+ /* The only way right now is FLOCK, in this case we hide flock
+ policy as lmm, but lmmsize is 0 */
+ LASSERT(lmm && lmmsize == 0);
+ LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n",
+ einfo->ei_type);
+ policy = (ldlm_policy_data_t *)lmm;
+ res_id.name[3] = LDLM_FLOCK;
+ } else if (it->it_op & IT_OPEN) {
+ req = mdc_intent_open_pack(exp, it, op_data, lmm, lmmsize,
+ einfo->ei_cbdata);
+ policy = &update_policy;
+ einfo->ei_cbdata = NULL;
+ lmm = NULL;
+ } else if (it->it_op & IT_UNLINK) {
+ req = mdc_intent_unlink_pack(exp, it, op_data);
+ } else if (it->it_op & (IT_GETATTR | IT_LOOKUP)) {
+ req = mdc_intent_getattr_pack(exp, it, op_data);
+ } else if (it->it_op & IT_READDIR) {
+ req = mdc_enqueue_pack(exp, 0);
+ } else if (it->it_op & IT_LAYOUT) {
+ if (!imp_connect_lvb_type(class_exp2cliimp(exp)))
+ return -EOPNOTSUPP;
+
+ req = mdc_intent_layout_pack(exp, it, op_data);
+ lvb_type = LVB_T_LAYOUT;
+ } else {
+ LBUG();
+ return -EINVAL;
+ }
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (req != NULL && it && it->it_op & IT_CREAT)
+ /* ask ptlrpc not to resend on EINPROGRESS since we have our own
+ * retry logic */
+ req->rq_no_retry_einprogress = 1;
+
+ if (resends) {
+ req->rq_generation_set = 1;
+ req->rq_import_generation = generation;
+ req->rq_sent = cfs_time_current_sec() + resends;
+ }
+
+ /* It is important to obtain rpc_lock first (if applicable), so that
+ * threads that are serialised with rpc_lock are not polluting our
+ * rpcs in flight counter. We do not do flock request limiting, though*/
+ if (it) {
+ mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+ rc = mdc_enter_request(&obddev->u.cli);
+ if (rc != 0) {
+ mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+ mdc_clear_replay_flag(req, 0);
+ ptlrpc_req_finished(req);
+ return rc;
+ }
+ }
+
+ rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, policy, &flags, NULL,
+ 0, lvb_type, lockh, 0);
+ if (!it) {
+ /* For flock requests we immediatelly return without further
+ delay and let caller deal with the rest, since rest of
+ this function metadata processing makes no sense for flock
+ requests anyway. But in case of problem during comms with
+ Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we
+ can not rely on caller and this mainly for F_UNLCKs
+ (explicits or automatically generated by Kernel to clean
+ current FLocks upon exit) that can't be trashed */
+ if ((rc == -EINTR) || (rc == -ETIMEDOUT))
+ goto resend;
+ return rc;
+ }
+
+ mdc_exit_request(&obddev->u.cli);
+ mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+
+ if (rc < 0) {
+ CERROR("ldlm_cli_enqueue: %d\n", rc);
+ mdc_clear_replay_flag(req, rc);
+ ptlrpc_req_finished(req);
+ return rc;
+ }
+
+ lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+ LASSERT(lockrep != NULL);
+
+ lockrep->lock_policy_res2 =
+ ptlrpc_status_ntoh(lockrep->lock_policy_res2);
+
+ /* Retry the create infinitely when we get -EINPROGRESS from
+ * server. This is required by the new quota design. */
+ if (it && it->it_op & IT_CREAT &&
+ (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
+ mdc_clear_replay_flag(req, rc);
+ ptlrpc_req_finished(req);
+ resends++;
+
+ CDEBUG(D_HA, "%s: resend:%d op:%d "DFID"/"DFID"\n",
+ obddev->obd_name, resends, it->it_op,
+ PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));
+
+ if (generation == obddev->u.cli.cl_import->imp_generation) {
+ goto resend;
+ } else {
+ CDEBUG(D_HA, "resend cross eviction\n");
+ return -EIO;
+ }
+ }
+
+ rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc);
+ if (rc < 0) {
+ if (lustre_handle_is_used(lockh)) {
+ ldlm_lock_decref(lockh, einfo->ei_mode);
+ memset(lockh, 0, sizeof(*lockh));
+ }
+ ptlrpc_req_finished(req);
+ }
+ return rc;
+}
+
+static int mdc_finish_intent_lock(struct obd_export *exp,
+ struct ptlrpc_request *request,
+ struct md_op_data *op_data,
+ struct lookup_intent *it,
+ struct lustre_handle *lockh)
+{
+ struct lustre_handle old_lock;
+ struct mdt_body *mdt_body;
+ struct ldlm_lock *lock;
+ int rc;
+
+ LASSERT(request != NULL);
+ LASSERT(request != LP_POISON);
+ LASSERT(request->rq_repmsg != LP_POISON);
+
+ if (!it_disposition(it, DISP_IT_EXECD)) {
+ /* The server failed before it even started executing the
+ * intent, i.e. because it couldn't unpack the request. */
+ LASSERT(it->d.lustre.it_status != 0);
+ return it->d.lustre.it_status;
+ }
+ rc = it_open_error(DISP_IT_EXECD, it);
+ if (rc)
+ return rc;
+
+ mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
+ LASSERT(mdt_body != NULL); /* mdc_enqueue checked */
+
+ /* If we were revalidating a fid/name pair, mark the intent in
+ * case we fail and get called again from lookup */
+ if (fid_is_sane(&op_data->op_fid2) &&
+ it->it_create_mode & M_CHECK_STALE &&
+ it->it_op != IT_GETATTR) {
+ it_set_disposition(it, DISP_ENQ_COMPLETE);
+
+ /* Also: did we find the same inode? */
+ /* sever can return one of two fids:
+ * op_fid2 - new allocated fid - if file is created.
+ * op_fid3 - existent fid - if file only open.
+ * op_fid3 is saved in lmv_intent_open */
+ if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) &&
+ (!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) {
+ CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID
+ "\n", PFID(&op_data->op_fid2),
+ PFID(&op_data->op_fid2), PFID(&mdt_body->fid1));
+ return -ESTALE;
+ }
+ }
+
+ rc = it_open_error(DISP_LOOKUP_EXECD, it);
+ if (rc)
+ return rc;
+
+ /* keep requests around for the multiple phases of the call
+ * this shows the DISP_XX must guarantee we make it into the call
+ */
+ if (!it_disposition(it, DISP_ENQ_CREATE_REF) &&
+ it_disposition(it, DISP_OPEN_CREATE) &&
+ !it_open_error(DISP_OPEN_CREATE, it)) {
+ it_set_disposition(it, DISP_ENQ_CREATE_REF);
+ ptlrpc_request_addref(request); /* balanced in ll_create_node */
+ }
+ if (!it_disposition(it, DISP_ENQ_OPEN_REF) &&
+ it_disposition(it, DISP_OPEN_OPEN) &&
+ !it_open_error(DISP_OPEN_OPEN, it)) {
+ it_set_disposition(it, DISP_ENQ_OPEN_REF);
+ ptlrpc_request_addref(request); /* balanced in ll_file_open */
+ /* BUG 11546 - eviction in the middle of open rpc processing */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MDC_ENQUEUE_PAUSE, obd_timeout);
+ }
+
+ if (it->it_op & IT_CREAT) {
+ /* XXX this belongs in ll_create_it */
+ } else if (it->it_op == IT_OPEN) {
+ LASSERT(!it_disposition(it, DISP_OPEN_CREATE));
+ } else {
+ LASSERT(it->it_op & (IT_GETATTR | IT_LOOKUP | IT_LAYOUT));
+ }
+
+ /* If we already have a matching lock, then cancel the new
+ * one. We have to set the data here instead of in
+ * mdc_enqueue, because we need to use the child's inode as
+ * the l_ast_data to match, and that's not available until
+ * intent_finish has performed the iget().) */
+ lock = ldlm_handle2lock(lockh);
+ if (lock) {
+ ldlm_policy_data_t policy = lock->l_policy_data;
+ LDLM_DEBUG(lock, "matching against this");
+
+ LASSERTF(fid_res_name_eq(&mdt_body->fid1,
+ &lock->l_resource->lr_name),
+ "Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n",
+ (unsigned long)lock->l_resource->lr_name.name[0],
+ (unsigned long)lock->l_resource->lr_name.name[1],
+ (unsigned long)lock->l_resource->lr_name.name[2],
+ (unsigned long)fid_seq(&mdt_body->fid1),
+ (unsigned long)fid_oid(&mdt_body->fid1),
+ (unsigned long)fid_ver(&mdt_body->fid1));
+ LDLM_LOCK_PUT(lock);
+
+ memcpy(&old_lock, lockh, sizeof(*lockh));
+ if (ldlm_lock_match(NULL, LDLM_FL_BLOCK_GRANTED, NULL,
+ LDLM_IBITS, &policy, LCK_NL, &old_lock, 0)) {
+ ldlm_lock_decref_and_cancel(lockh,
+ it->d.lustre.it_lock_mode);
+ memcpy(lockh, &old_lock, sizeof(old_lock));
+ it->d.lustre.it_lock_handle = lockh->cookie;
+ }
+ }
+ CDEBUG(D_DENTRY,"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
+ op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
+ it->d.lustre.it_status, it->d.lustre.it_disposition, rc);
+ return rc;
+}
+
+int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
+ struct lu_fid *fid, __u64 *bits)
+{
+ /* We could just return 1 immediately, but since we should only
+ * be called in revalidate_it if we already have a lock, let's
+ * verify that. */
+ struct ldlm_res_id res_id;
+ struct lustre_handle lockh;
+ ldlm_policy_data_t policy;
+ ldlm_mode_t mode;
+
+ if (it->d.lustre.it_lock_handle) {
+ lockh.cookie = it->d.lustre.it_lock_handle;
+ mode = ldlm_revalidate_lock_handle(&lockh, bits);
+ } else {
+ fid_build_reg_res_name(fid, &res_id);
+ switch (it->it_op) {
+ case IT_GETATTR:
+ policy.l_inodebits.bits = MDS_INODELOCK_UPDATE;
+ break;
+ case IT_LAYOUT:
+ policy.l_inodebits.bits = MDS_INODELOCK_LAYOUT;
+ break;
+ default:
+ policy.l_inodebits.bits = MDS_INODELOCK_LOOKUP;
+ break;
+ }
+ mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
+ LDLM_FL_BLOCK_GRANTED, &res_id,
+ LDLM_IBITS, &policy,
+ LCK_CR|LCK_CW|LCK_PR|LCK_PW, &lockh, 0);
+ }
+
+ if (mode) {
+ it->d.lustre.it_lock_handle = lockh.cookie;
+ it->d.lustre.it_lock_mode = mode;
+ } else {
+ it->d.lustre.it_lock_handle = 0;
+ it->d.lustre.it_lock_mode = 0;
+ }
+
+ return !!mode;
+}
+
+/*
+ * This long block is all about fixing up the lock and request state
+ * so that it is correct as of the moment _before_ the operation was
+ * applied; that way, the VFS will think that everything is normal and
+ * call Lustre's regular VFS methods.
+ *
+ * If we're performing a creation, that means that unless the creation
+ * failed with EEXIST, we should fake up a negative dentry.
+ *
+ * For everything else, we want to lookup to succeed.
+ *
+ * One additional note: if CREATE or OPEN succeeded, we add an extra
+ * reference to the request because we need to keep it around until
+ * ll_create/ll_open gets called.
+ *
+ * The server will return to us, in it_disposition, an indication of
+ * exactly what d.lustre.it_status refers to.
+ *
+ * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
+ * otherwise if DISP_OPEN_CREATE is set, then it status is the
+ * creation failure mode. In either case, one of DISP_LOOKUP_NEG or
+ * DISP_LOOKUP_POS will be set, indicating whether the child lookup
+ * was successful.
+ *
+ * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
+ * child lookup.
+ */
+int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
+ void *lmm, int lmmsize, struct lookup_intent *it,
+ int lookup_flags, struct ptlrpc_request **reqp,
+ ldlm_blocking_callback cb_blocking,
+ __u64 extra_lock_flags)
+{
+ struct lustre_handle lockh;
+ int rc = 0;
+
+ LASSERT(it);
+
+ CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
+ ", intent: %s flags %#o\n", op_data->op_namelen,
+ op_data->op_name, PFID(&op_data->op_fid2),
+ PFID(&op_data->op_fid1), ldlm_it2str(it->it_op),
+ it->it_flags);
+
+ lockh.cookie = 0;
+ if (fid_is_sane(&op_data->op_fid2) &&
+ (it->it_op & (IT_LOOKUP | IT_GETATTR))) {
+ /* We could just return 1 immediately, but since we should only
+ * be called in revalidate_it if we already have a lock, let's
+ * verify that. */
+ it->d.lustre.it_lock_handle = 0;
+ rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL);
+ /* Only return failure if it was not GETATTR by cfid
+ (from inode_revalidate) */
+ if (rc || op_data->op_namelen != 0)
+ return rc;
+ }
+
+ /* lookup_it may be called only after revalidate_it has run, because
+ * revalidate_it cannot return errors, only zero. Returning zero causes
+ * this call to lookup, which *can* return an error.
+ *
+ * We only want to execute the request associated with the intent one
+ * time, however, so don't send the request again. Instead, skip past
+ * this and use the request from revalidate. In this case, revalidate
+ * never dropped its reference, so the refcounts are all OK */
+ if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = it_to_lock_mode(it),
+ .ei_cb_bl = cb_blocking,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
+
+ /* For case if upper layer did not alloc fid, do it now. */
+ if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
+ rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+ if (rc < 0) {
+ CERROR("Can't alloc new fid, rc %d\n", rc);
+ return rc;
+ }
+ }
+ rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh,
+ lmm, lmmsize, NULL, extra_lock_flags);
+ if (rc < 0)
+ return rc;
+ } else if (!fid_is_sane(&op_data->op_fid2) ||
+ !(it->it_create_mode & M_CHECK_STALE)) {
+ /* DISP_ENQ_COMPLETE set means there is extra reference on
+ * request referenced from this intent, saved for subsequent
+ * lookup. This path is executed when we proceed to this
+ * lookup, so we clear DISP_ENQ_COMPLETE */
+ it_clear_disposition(it, DISP_ENQ_COMPLETE);
+ }
+ *reqp = it->d.lustre.it_data;
+ rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
+ return rc;
+}
+
+static int mdc_intent_getattr_async_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *args, int rc)
+{
+ struct mdc_getattr_args *ga = args;
+ struct obd_export *exp = ga->ga_exp;
+ struct md_enqueue_info *minfo = ga->ga_minfo;
+ struct ldlm_enqueue_info *einfo = ga->ga_einfo;
+ struct lookup_intent *it;
+ struct lustre_handle *lockh;
+ struct obd_device *obddev;
+ struct ldlm_reply *lockrep;
+ __u64 flags = LDLM_FL_HAS_INTENT;
+
+ it = &minfo->mi_it;
+ lockh = &minfo->mi_lockh;
+
+ obddev = class_exp2obd(exp);
+
+ mdc_exit_request(&obddev->u.cli);
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDC_GETATTR_ENQUEUE))
+ rc = -ETIMEDOUT;
+
+ rc = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, 1, einfo->ei_mode,
+ &flags, NULL, 0, lockh, rc);
+ if (rc < 0) {
+ CERROR("ldlm_cli_enqueue_fini: %d\n", rc);
+ mdc_clear_replay_flag(req, rc);
+ GOTO(out, rc);
+ }
+
+ lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+ LASSERT(lockrep != NULL);
+
+ lockrep->lock_policy_res2 =
+ ptlrpc_status_ntoh(lockrep->lock_policy_res2);
+
+ rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = mdc_finish_intent_lock(exp, req, &minfo->mi_data, it, lockh);
+
+out:
+ OBD_FREE_PTR(einfo);
+ minfo->mi_cb(req, minfo, rc);
+ return 0;
+}
+
+int mdc_intent_getattr_async(struct obd_export *exp,
+ struct md_enqueue_info *minfo,
+ struct ldlm_enqueue_info *einfo)
+{
+ struct md_op_data *op_data = &minfo->mi_data;
+ struct lookup_intent *it = &minfo->mi_it;
+ struct ptlrpc_request *req;
+ struct mdc_getattr_args *ga;
+ struct obd_device *obddev = class_exp2obd(exp);
+ struct ldlm_res_id res_id;
+ /*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed
+ * for statahead currently. Consider CMD in future, such two bits
+ * maybe managed by different MDS, should be adjusted then. */
+ ldlm_policy_data_t policy = {
+ .l_inodebits = { MDS_INODELOCK_LOOKUP |
+ MDS_INODELOCK_UPDATE }
+ };
+ int rc = 0;
+ __u64 flags = LDLM_FL_HAS_INTENT;
+
+ CDEBUG(D_DLMTRACE,"name: %.*s in inode "DFID", intent: %s flags %#o\n",
+ op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
+ ldlm_it2str(it->it_op), it->it_flags);
+
+ fid_build_reg_res_name(&op_data->op_fid1, &res_id);
+ req = mdc_intent_getattr_pack(exp, it, op_data);
+ if (!req)
+ return -ENOMEM;
+
+ rc = mdc_enter_request(&obddev->u.cli);
+ if (rc != 0) {
+ ptlrpc_req_finished(req);
+ return rc;
+ }
+
+ rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL,
+ 0, LVB_T_NONE, &minfo->mi_lockh, 1);
+ if (rc < 0) {
+ mdc_exit_request(&obddev->u.cli);
+ ptlrpc_req_finished(req);
+ return rc;
+ }
+
+ CLASSERT(sizeof(*ga) <= sizeof(req->rq_async_args));
+ ga = ptlrpc_req_async_args(req);
+ ga->ga_exp = exp;
+ ga->ga_minfo = minfo;
+ ga->ga_einfo = einfo;
+
+ req->rq_interpret_reply = mdc_intent_getattr_async_interpret;
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+
+ return 0;
+}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
new file mode 100644
index 0000000..9f3a345
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -0,0 +1,483 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_MDC
+
+# include <linux/module.h>
+# include <linux/kernel.h>
+
+#include <obd_class.h>
+#include "mdc_internal.h"
+#include <lustre_fid.h>
+
+/* mdc_setattr does its own semaphore handling */
+static int mdc_reint(struct ptlrpc_request *request,
+ struct mdc_rpc_lock *rpc_lock,
+ int level)
+{
+ int rc;
+
+ request->rq_send_state = level;
+
+ mdc_get_rpc_lock(rpc_lock, NULL);
+ rc = ptlrpc_queue_wait(request);
+ mdc_put_rpc_lock(rpc_lock, NULL);
+ if (rc)
+ CDEBUG(D_INFO, "error in handling %d\n", rc);
+ else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY)) {
+ rc = -EPROTO;
+ }
+ return rc;
+}
+
+/* Find and cancel locally locks matched by inode @bits & @mode in the resource
+ * found by @fid. Found locks are added into @cancel list. Returns the amount of
+ * locks added to @cancels list. */
+int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
+ struct list_head *cancels, ldlm_mode_t mode,
+ __u64 bits)
+{
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ ldlm_policy_data_t policy = {{0}};
+ struct ldlm_res_id res_id;
+ struct ldlm_resource *res;
+ int count;
+
+ /* Return, i.e. cancel nothing, only if ELC is supported (flag in
+ * export) but disabled through procfs (flag in NS).
+ *
+ * This distinguishes from a case when ELC is not supported originally,
+ * when we still want to cancel locks in advance and just cancel them
+ * locally, without sending any RPC. */
+ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
+ return 0;
+
+ fid_build_reg_res_name(fid, &res_id);
+ res = ldlm_resource_get(exp->exp_obd->obd_namespace,
+ NULL, &res_id, 0, 0);
+ if (res == NULL)
+ return 0;
+ LDLM_RESOURCE_ADDREF(res);
+ /* Initialize ibits lock policy. */
+ policy.l_inodebits.bits = bits;
+ count = ldlm_cancel_resource_local(res, cancels, &policy,
+ mode, 0, 0, NULL);
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+ return count;
+}
+
+int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
+ void *ea, int ealen, void *ea2, int ea2len,
+ struct ptlrpc_request **request, struct md_open_data **mod)
+{
+ LIST_HEAD(cancels);
+ struct ptlrpc_request *req;
+ struct mdc_rpc_lock *rpc_lock;
+ struct obd_device *obd = exp->exp_obd;
+ int count = 0, rc;
+ __u64 bits;
+
+ LASSERT(op_data != NULL);
+
+ bits = MDS_INODELOCK_UPDATE;
+ if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
+ bits |= MDS_INODELOCK_LOOKUP;
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
+ (fid_is_sane(&op_data->op_fid1)) &&
+ !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
+ count = mdc_resource_get_unused(exp, &op_data->op_fid1,
+ &cancels, LCK_EX, bits);
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_REINT_SETATTR);
+ if (req == NULL) {
+ ldlm_lock_list_put(&cancels, l_bl_ast, count);
+ return -ENOMEM;
+ }
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0)
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT,
+ 0);
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
+ req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT,
+ ea2len);
+
+ rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ rpc_lock = obd->u.cli.cl_rpc_lock;
+
+ if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
+ CDEBUG(D_INODE, "setting mtime "CFS_TIME_T
+ ", ctime "CFS_TIME_T"\n",
+ LTIME_S(op_data->op_attr.ia_mtime),
+ LTIME_S(op_data->op_attr.ia_ctime));
+ mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len);
+
+ ptlrpc_request_set_replen(req);
+ if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
+ req->rq_import->imp_replayable)
+ {
+ LASSERT(*mod == NULL);
+
+ *mod = obd_mod_alloc();
+ if (*mod == NULL) {
+ DEBUG_REQ(D_ERROR, req, "Can't allocate "
+ "md_open_data");
+ } else {
+ req->rq_replay = 1;
+ req->rq_cb_data = *mod;
+ (*mod)->mod_open_req = req;
+ req->rq_commit_cb = mdc_commit_open;
+ /**
+ * Take an extra reference on \var mod, it protects \var
+ * mod from being freed on eviction (commit callback is
+ * called despite rq_replay flag).
+ * Will be put on mdc_done_writing().
+ */
+ obd_mod_get(*mod);
+ }
+ }
+
+ rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);
+
+ /* Save the obtained info in the original RPC for the replay case. */
+ if (rc == 0 && (op_data->op_flags & MF_EPOCH_OPEN)) {
+ struct mdt_ioepoch *epoch;
+ struct mdt_body *body;
+
+ epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(epoch != NULL);
+ LASSERT(body != NULL);
+ epoch->handle = body->handle;
+ epoch->ioepoch = body->ioepoch;
+ req->rq_replay_cb = mdc_replay_open;
+ /** bug 3633, open may be committed and estale answer is not error */
+ } else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) {
+ rc = 0;
+ } else if (rc == -ERESTARTSYS) {
+ rc = 0;
+ }
+ *request = req;
+ if (rc && req->rq_commit_cb) {
+ /* Put an extra reference on \var mod on error case. */
+ obd_mod_put(*mod);
+ req->rq_commit_cb(req);
+ }
+ return rc;
+}
+
+int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
+ const void *data, int datalen, int mode, __u32 uid, __u32 gid,
+ cfs_cap_t cap_effective, __u64 rdev,
+ struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req;
+ int level, rc;
+ int count, resends = 0;
+ struct obd_import *import = exp->exp_obd->u.cli.cl_import;
+ int generation = import->imp_generation;
+ LIST_HEAD(cancels);
+
+ /* For case if upper layer did not alloc fid, do it now. */
+ if (!fid_is_sane(&op_data->op_fid2)) {
+ /*
+ * mdc_fid_alloc() may return errno 1 in case of switch to new
+ * sequence, handle this.
+ */
+ rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+ if (rc < 0) {
+ CERROR("Can't alloc new fid, rc %d\n", rc);
+ return rc;
+ }
+ }
+
+rebuild:
+ count = 0;
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
+ (fid_is_sane(&op_data->op_fid1)))
+ count = mdc_resource_get_unused(exp, &op_data->op_fid1,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_UPDATE);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_REINT_CREATE_RMT_ACL);
+ if (req == NULL) {
+ ldlm_lock_list_put(&cancels, l_bl_ast, count);
+ return -ENOMEM;
+ }
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
+ op_data->op_namelen + 1);
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
+ data && datalen ? datalen : 0);
+
+ rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ /*
+ * mdc_create_pack() fills msg->bufs[1] with name and msg->bufs[2] with
+ * tgt, for symlinks or lov MD data.
+ */
+ mdc_create_pack(req, op_data, data, datalen, mode, uid,
+ gid, cap_effective, rdev);
+
+ ptlrpc_request_set_replen(req);
+
+ /* ask ptlrpc not to resend on EINPROGRESS since we have our own retry
+ * logic here */
+ req->rq_no_retry_einprogress = 1;
+
+ if (resends) {
+ req->rq_generation_set = 1;
+ req->rq_import_generation = generation;
+ req->rq_sent = cfs_time_current_sec() + resends;
+ }
+ level = LUSTRE_IMP_FULL;
+ resend:
+ rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level);
+
+ /* Resend if we were told to. */
+ if (rc == -ERESTARTSYS) {
+ level = LUSTRE_IMP_RECOVER;
+ goto resend;
+ } else if (rc == -EINPROGRESS) {
+ /* Retry create infinitely until succeed or get other
+ * error code. */
+ ptlrpc_req_finished(req);
+ resends++;
+
+ CDEBUG(D_HA, "%s: resend:%d create on "DFID"/"DFID"\n",
+ exp->exp_obd->obd_name, resends,
+ PFID(&op_data->op_fid1), PFID(&op_data->op_fid2));
+
+ if (generation == import->imp_generation) {
+ goto rebuild;
+ } else {
+ CDEBUG(D_HA, "resend cross eviction\n");
+ return -EIO;
+ }
+ } else if (rc == 0) {
+ struct mdt_body *body;
+ struct lustre_capa *capa;
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body);
+ if (body->valid & OBD_MD_FLMDSCAPA) {
+ capa = req_capsule_server_get(&req->rq_pill,
+ &RMF_CAPA1);
+ if (capa == NULL)
+ rc = -EPROTO;
+ }
+ }
+
+ *request = req;
+ return rc;
+}
+
+int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ LIST_HEAD(cancels);
+ struct obd_device *obd = class_exp2obd(exp);
+ struct ptlrpc_request *req = *request;
+ int count = 0, rc;
+
+ LASSERT(req == NULL);
+
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
+ (fid_is_sane(&op_data->op_fid1)) &&
+ !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
+ count = mdc_resource_get_unused(exp, &op_data->op_fid1,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_UPDATE);
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
+ (fid_is_sane(&op_data->op_fid3)) &&
+ !OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
+ count += mdc_resource_get_unused(exp, &op_data->op_fid3,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_FULL);
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_REINT_UNLINK);
+ if (req == NULL) {
+ ldlm_lock_list_put(&cancels, l_bl_ast, count);
+ return -ENOMEM;
+ }
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
+ op_data->op_namelen + 1);
+
+ rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_unlink_pack(req, op_data);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
+ obd->u.cli.cl_max_mds_easize);
+ req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
+ obd->u.cli.cl_max_mds_cookiesize);
+ ptlrpc_request_set_replen(req);
+
+ *request = req;
+
+ rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+ if (rc == -ERESTARTSYS)
+ rc = 0;
+ return rc;
+}
+
+int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ LIST_HEAD(cancels);
+ struct obd_device *obd = exp->exp_obd;
+ struct ptlrpc_request *req;
+ int count = 0, rc;
+
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
+ (fid_is_sane(&op_data->op_fid2)))
+ count = mdc_resource_get_unused(exp, &op_data->op_fid2,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_UPDATE);
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
+ (fid_is_sane(&op_data->op_fid1)))
+ count += mdc_resource_get_unused(exp, &op_data->op_fid1,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_UPDATE);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
+ if (req == NULL) {
+ ldlm_lock_list_put(&cancels, l_bl_ast, count);
+ return -ENOMEM;
+ }
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2);
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
+ op_data->op_namelen + 1);
+
+ rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_link_pack(req, op_data);
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+ *request = req;
+ if (rc == -ERESTARTSYS)
+ rc = 0;
+
+ return rc;
+}
+
+int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
+ const char *old, int oldlen, const char *new, int newlen,
+ struct ptlrpc_request **request)
+{
+ LIST_HEAD(cancels);
+ struct obd_device *obd = exp->exp_obd;
+ struct ptlrpc_request *req;
+ int count = 0, rc;
+
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
+ (fid_is_sane(&op_data->op_fid1)))
+ count = mdc_resource_get_unused(exp, &op_data->op_fid1,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_UPDATE);
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
+ (fid_is_sane(&op_data->op_fid2)))
+ count += mdc_resource_get_unused(exp, &op_data->op_fid2,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_UPDATE);
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID3) &&
+ (fid_is_sane(&op_data->op_fid3)))
+ count += mdc_resource_get_unused(exp, &op_data->op_fid3,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_LOOKUP);
+ if ((op_data->op_flags & MF_MDC_CANCEL_FID4) &&
+ (fid_is_sane(&op_data->op_fid4)))
+ count += mdc_resource_get_unused(exp, &op_data->op_fid4,
+ &cancels, LCK_EX,
+ MDS_INODELOCK_FULL);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_REINT_RENAME);
+ if (req == NULL) {
+ ldlm_lock_list_put(&cancels, l_bl_ast, count);
+ return -ENOMEM;
+ }
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2);
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, oldlen + 1);
+ req_capsule_set_size(&req->rq_pill, &RMF_SYMTGT, RCL_CLIENT, newlen+1);
+
+ rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ if (exp_connect_cancelset(exp) && req)
+ ldlm_cli_cancel_list(&cancels, count, req, 0);
+
+ mdc_rename_pack(req, op_data, old, oldlen, new, newlen);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
+ obd->u.cli.cl_max_mds_easize);
+ req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
+ obd->u.cli.cl_max_mds_cookiesize);
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+ *request = req;
+ if (rc == -ERESTARTSYS)
+ rc = 0;
+
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
new file mode 100644
index 0000000..ed3a7a0
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -0,0 +1,2687 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_MDC
+
+# include <linux/module.h>
+# include <linux/pagemap.h>
+# include <linux/miscdevice.h>
+# include <linux/init.h>
+# include <linux/utsname.h>
+
+#include <lustre_acl.h>
+#include <obd_class.h>
+#include <lustre_fid.h>
+#include <lprocfs_status.h>
+#include <lustre_param.h>
+#include <lustre_log.h>
+
+#include "mdc_internal.h"
+
+#define REQUEST_MINOR 244
+
+struct mdc_renew_capa_args {
+ struct obd_capa *ra_oc;
+ renew_capa_cb_t ra_cb;
+};
+
+static int mdc_cleanup(struct obd_device *obd);
+
+int mdc_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
+ const struct req_msg_field *field, struct obd_capa **oc)
+{
+ struct lustre_capa *capa;
+ struct obd_capa *c;
+
+ /* swabbed already in mdc_enqueue */
+ capa = req_capsule_server_get(&req->rq_pill, field);
+ if (capa == NULL)
+ return -EPROTO;
+
+ c = alloc_capa(CAPA_SITE_CLIENT);
+ if (IS_ERR(c)) {
+ CDEBUG(D_INFO, "alloc capa failed!\n");
+ return PTR_ERR(c);
+ } else {
+ c->c_capa = *capa;
+ *oc = c;
+ return 0;
+ }
+}
+
+static inline int mdc_queue_wait(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ int rc;
+
+ /* mdc_enter_request() ensures that this client has no more
+ * than cl_max_rpcs_in_flight RPCs simultaneously inf light
+ * against an MDT. */
+ rc = mdc_enter_request(cli);
+ if (rc != 0)
+ return rc;
+
+ rc = ptlrpc_queue_wait(req);
+ mdc_exit_request(cli);
+
+ return rc;
+}
+
+/* Helper that implements most of mdc_getstatus and signal_completed_replay. */
+/* XXX this should become mdc_get_info("key"), sending MDS_GET_INFO RPC */
+static int send_getstatus(struct obd_import *imp, struct lu_fid *rootfid,
+ struct obd_capa **pc, int level, int msg_flags)
+{
+ struct ptlrpc_request *req;
+ struct mdt_body *body;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_GETSTATUS,
+ LUSTRE_MDS_VERSION, MDS_GETSTATUS);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_pack_body(req, NULL, NULL, 0, 0, -1, 0);
+ lustre_msg_add_flags(req->rq_reqmsg, msg_flags);
+ req->rq_send_state = level;
+
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ if (body->valid & OBD_MD_FLMDSCAPA) {
+ rc = mdc_unpack_capa(NULL, req, &RMF_CAPA1, pc);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ *rootfid = body->fid1;
+ CDEBUG(D_NET,
+ "root fid="DFID", last_committed="LPU64"\n",
+ PFID(rootfid),
+ lustre_msg_get_last_committed(req->rq_repmsg));
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+/* This should be mdc_get_info("rootfid") */
+int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid,
+ struct obd_capa **pc)
+{
+ return send_getstatus(class_exp2cliimp(exp), rootfid, pc,
+ LUSTRE_IMP_FULL, 0);
+}
+
+/*
+ * This function now is known to always saying that it will receive 4 buffers
+ * from server. Even for cases when acl_size and md_size is zero, RPC header
+ * will contain 4 fields and RPC itself will contain zero size fields. This is
+ * because mdt_getattr*() _always_ returns 4 fields, but if acl is not needed
+ * and thus zero, it shrinks it, making zero size. The same story about
+ * md_size. And this is course of problem when client waits for smaller number
+ * of fields. This issue will be fixed later when client gets aware of RPC
+ * layouts. --umka
+ */
+static int mdc_getattr_common(struct obd_export *exp,
+ struct ptlrpc_request *req)
+{
+ struct req_capsule *pill = &req->rq_pill;
+ struct mdt_body *body;
+ void *eadata;
+ int rc;
+
+ /* Request message already built. */
+ rc = ptlrpc_queue_wait(req);
+ if (rc != 0)
+ return rc;
+
+ /* sanity check for the reply */
+ body = req_capsule_server_get(pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ return -EPROTO;
+
+ CDEBUG(D_NET, "mode: %o\n", body->mode);
+
+ if (body->eadatasize != 0) {
+ mdc_update_max_ea_from_body(exp, body);
+
+ eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
+ body->eadatasize);
+ if (eadata == NULL)
+ return -EPROTO;
+ }
+
+ if (body->valid & OBD_MD_FLRMTPERM) {
+ struct mdt_remote_perm *perm;
+
+ LASSERT(client_is_remote(exp));
+ perm = req_capsule_server_swab_get(pill, &RMF_ACL,
+ lustre_swab_mdt_remote_perm);
+ if (perm == NULL)
+ return -EPROTO;
+ }
+
+ if (body->valid & OBD_MD_FLMDSCAPA) {
+ struct lustre_capa *capa;
+ capa = req_capsule_server_get(pill, &RMF_CAPA1);
+ if (capa == NULL)
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ /* Single MDS without an LMV case */
+ if (op_data->op_flags & MF_GET_MDT_IDX) {
+ op_data->op_mds = 0;
+ return 0;
+ }
+ *request = NULL;
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
+ op_data->op_valid, op_data->op_mode, -1, 0);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
+ op_data->op_mode);
+ if (op_data->op_valid & OBD_MD_FLRMTPERM) {
+ LASSERT(client_is_remote(exp));
+ req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
+ sizeof(struct mdt_remote_perm));
+ }
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_getattr_common(exp, req);
+ if (rc)
+ ptlrpc_req_finished(req);
+ else
+ *request = req;
+ return rc;
+}
+
+int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
+ struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ *request = NULL;
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_GETATTR_NAME);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
+ op_data->op_namelen + 1);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
+ op_data->op_valid, op_data->op_mode,
+ op_data->op_suppgids[0], 0);
+
+ if (op_data->op_name) {
+ char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ LASSERT(strnlen(op_data->op_name, op_data->op_namelen) ==
+ op_data->op_namelen);
+ memcpy(name, op_data->op_name, op_data->op_namelen);
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
+ op_data->op_mode);
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_getattr_common(exp, req);
+ if (rc)
+ ptlrpc_req_finished(req);
+ else
+ *request = req;
+ return rc;
+}
+
+static int mdc_is_subdir(struct obd_export *exp,
+ const struct lu_fid *pfid,
+ const struct lu_fid *cfid,
+ struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ *request = NULL;
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION,
+ MDS_IS_SUBDIR);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_is_subdir_pack(req, pfid, cfid, 0);
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc && rc != -EREMOTE)
+ ptlrpc_req_finished(req);
+ else
+ *request = req;
+ return rc;
+}
+
+static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
+ const struct lu_fid *fid,
+ struct obd_capa *oc, int opcode, obd_valid valid,
+ const char *xattr_name, const char *input,
+ int input_size, int output_size, int flags,
+ __u32 suppgid, struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req;
+ int xattr_namelen = 0;
+ char *tmp;
+ int rc;
+
+ *request = NULL;
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, oc);
+ if (xattr_name) {
+ xattr_namelen = strlen(xattr_name) + 1;
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
+ xattr_namelen);
+ }
+ if (input_size) {
+ LASSERT(input);
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
+ input_size);
+ }
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ if (opcode == MDS_REINT) {
+ struct mdt_rec_setxattr *rec;
+
+ CLASSERT(sizeof(struct mdt_rec_setxattr) ==
+ sizeof(struct mdt_rec_reint));
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+ rec->sx_opcode = REINT_SETXATTR;
+ rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
+ rec->sx_cap = cfs_curproc_cap_pack();
+ rec->sx_suppgid1 = suppgid;
+ rec->sx_suppgid2 = -1;
+ rec->sx_fid = *fid;
+ rec->sx_valid = valid | OBD_MD_FLCTIME;
+ rec->sx_time = cfs_time_current_sec();
+ rec->sx_size = output_size;
+ rec->sx_flags = flags;
+
+ mdc_pack_capa(req, &RMF_CAPA1, oc);
+ } else {
+ mdc_pack_body(req, fid, oc, valid, output_size, suppgid, flags);
+ }
+
+ if (xattr_name) {
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ memcpy(tmp, xattr_name, xattr_namelen);
+ }
+ if (input_size) {
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
+ memcpy(tmp, input, input_size);
+ }
+
+ if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER))
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
+ RCL_SERVER, output_size);
+ ptlrpc_request_set_replen(req);
+
+ /* make rpc */
+ if (opcode == MDS_REINT)
+ mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+
+ rc = ptlrpc_queue_wait(req);
+
+ if (opcode == MDS_REINT)
+ mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+
+ if (rc)
+ ptlrpc_req_finished(req);
+ else
+ *request = req;
+ return rc;
+}
+
+int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, obd_valid valid, const char *xattr_name,
+ const char *input, int input_size, int output_size,
+ int flags, __u32 suppgid, struct ptlrpc_request **request)
+{
+ return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR,
+ fid, oc, MDS_REINT, valid, xattr_name,
+ input, input_size, output_size, flags,
+ suppgid, request);
+}
+
+int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, obd_valid valid, const char *xattr_name,
+ const char *input, int input_size, int output_size,
+ int flags, struct ptlrpc_request **request)
+{
+ return mdc_xattr_common(exp, &RQF_MDS_GETXATTR,
+ fid, oc, MDS_GETXATTR, valid, xattr_name,
+ input, input_size, output_size, flags,
+ -1, request);
+}
+
+#ifdef CONFIG_FS_POSIX_ACL
+static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
+{
+ struct req_capsule *pill = &req->rq_pill;
+ struct mdt_body *body = md->body;
+ struct posix_acl *acl;
+ void *buf;
+ int rc;
+
+ if (!body->aclsize)
+ return 0;
+
+ buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->aclsize);
+
+ if (!buf)
+ return -EPROTO;
+
+ acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize);
+ if (IS_ERR(acl)) {
+ rc = PTR_ERR(acl);
+ CERROR("convert xattr to acl: %d\n", rc);
+ return rc;
+ }
+
+ rc = posix_acl_valid(acl);
+ if (rc) {
+ CERROR("validate acl: %d\n", rc);
+ posix_acl_release(acl);
+ return rc;
+ }
+
+ md->posix_acl = acl;
+ return 0;
+}
+#else
+#define mdc_unpack_acl(req, md) 0
+#endif
+
+int mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
+ struct obd_export *dt_exp, struct obd_export *md_exp,
+ struct lustre_md *md)
+{
+ struct req_capsule *pill = &req->rq_pill;
+ int rc;
+
+ LASSERT(md);
+ memset(md, 0, sizeof(*md));
+
+ md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
+ LASSERT(md->body != NULL);
+
+ if (md->body->valid & OBD_MD_FLEASIZE) {
+ int lmmsize;
+ struct lov_mds_md *lmm;
+
+ if (!S_ISREG(md->body->mode)) {
+ CDEBUG(D_INFO, "OBD_MD_FLEASIZE set, should be a "
+ "regular file, but is not\n");
+ GOTO(out, rc = -EPROTO);
+ }
+
+ if (md->body->eadatasize == 0) {
+ CDEBUG(D_INFO, "OBD_MD_FLEASIZE set, "
+ "but eadatasize 0\n");
+ GOTO(out, rc = -EPROTO);
+ }
+ lmmsize = md->body->eadatasize;
+ lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize);
+ if (!lmm)
+ GOTO(out, rc = -EPROTO);
+
+ rc = obd_unpackmd(dt_exp, &md->lsm, lmm, lmmsize);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (rc < sizeof(*md->lsm)) {
+ CDEBUG(D_INFO, "lsm size too small: "
+ "rc < sizeof (*md->lsm) (%d < %d)\n",
+ rc, (int)sizeof(*md->lsm));
+ GOTO(out, rc = -EPROTO);
+ }
+
+ } else if (md->body->valid & OBD_MD_FLDIREA) {
+ int lmvsize;
+ struct lov_mds_md *lmv;
+
+ if(!S_ISDIR(md->body->mode)) {
+ CDEBUG(D_INFO, "OBD_MD_FLDIREA set, should be a "
+ "directory, but is not\n");
+ GOTO(out, rc = -EPROTO);
+ }
+
+ if (md->body->eadatasize == 0) {
+ CDEBUG(D_INFO, "OBD_MD_FLDIREA is set, "
+ "but eadatasize 0\n");
+ return -EPROTO;
+ }
+ if (md->body->valid & OBD_MD_MEA) {
+ lmvsize = md->body->eadatasize;
+ lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
+ lmvsize);
+ if (!lmv)
+ GOTO(out, rc = -EPROTO);
+
+ rc = obd_unpackmd(md_exp, (void *)&md->mea, lmv,
+ lmvsize);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (rc < sizeof(*md->mea)) {
+ CDEBUG(D_INFO, "size too small: "
+ "rc < sizeof(*md->mea) (%d < %d)\n",
+ rc, (int)sizeof(*md->mea));
+ GOTO(out, rc = -EPROTO);
+ }
+ }
+ }
+ rc = 0;
+
+ if (md->body->valid & OBD_MD_FLRMTPERM) {
+ /* remote permission */
+ LASSERT(client_is_remote(exp));
+ md->remote_perm = req_capsule_server_swab_get(pill, &RMF_ACL,
+ lustre_swab_mdt_remote_perm);
+ if (!md->remote_perm)
+ GOTO(out, rc = -EPROTO);
+ }
+ else if (md->body->valid & OBD_MD_FLACL) {
+ /* for ACL, it's possible that FLACL is set but aclsize is zero.
+ * only when aclsize != 0 there's an actual segment for ACL
+ * in reply buffer.
+ */
+ if (md->body->aclsize) {
+ rc = mdc_unpack_acl(req, md);
+ if (rc)
+ GOTO(out, rc);
+#ifdef CONFIG_FS_POSIX_ACL
+ } else {
+ md->posix_acl = NULL;
+#endif
+ }
+ }
+ if (md->body->valid & OBD_MD_FLMDSCAPA) {
+ struct obd_capa *oc = NULL;
+
+ rc = mdc_unpack_capa(NULL, req, &RMF_CAPA1, &oc);
+ if (rc)
+ GOTO(out, rc);
+ md->mds_capa = oc;
+ }
+
+ if (md->body->valid & OBD_MD_FLOSSCAPA) {
+ struct obd_capa *oc = NULL;
+
+ rc = mdc_unpack_capa(NULL, req, &RMF_CAPA2, &oc);
+ if (rc)
+ GOTO(out, rc);
+ md->oss_capa = oc;
+ }
+
+out:
+ if (rc) {
+ if (md->oss_capa) {
+ capa_put(md->oss_capa);
+ md->oss_capa = NULL;
+ }
+ if (md->mds_capa) {
+ capa_put(md->mds_capa);
+ md->mds_capa = NULL;
+ }
+#ifdef CONFIG_FS_POSIX_ACL
+ posix_acl_release(md->posix_acl);
+#endif
+ if (md->lsm)
+ obd_free_memmd(dt_exp, &md->lsm);
+ }
+ return rc;
+}
+
+int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
+{
+ return 0;
+}
+
+/**
+ * Handles both OPEN and SETATTR RPCs for OPEN-CLOSE and SETATTR-DONE_WRITING
+ * RPC chains.
+ */
+void mdc_replay_open(struct ptlrpc_request *req)
+{
+ struct md_open_data *mod = req->rq_cb_data;
+ struct ptlrpc_request *close_req;
+ struct obd_client_handle *och;
+ struct lustre_handle old;
+ struct mdt_body *body;
+
+ if (mod == NULL) {
+ DEBUG_REQ(D_ERROR, req,
+ "Can't properly replay without open data.");
+ return;
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(body != NULL);
+
+ och = mod->mod_och;
+ if (och != NULL) {
+ struct lustre_handle *file_fh;
+
+ LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
+
+ file_fh = &och->och_fh;
+ CDEBUG(D_HA, "updating handle from "LPX64" to "LPX64"\n",
+ file_fh->cookie, body->handle.cookie);
+ old = *file_fh;
+ *file_fh = body->handle;
+ }
+ close_req = mod->mod_close_req;
+ if (close_req != NULL) {
+ __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
+ struct mdt_ioepoch *epoch;
+
+ LASSERT(opc == MDS_CLOSE || opc == MDS_DONE_WRITING);
+ epoch = req_capsule_client_get(&close_req->rq_pill,
+ &RMF_MDT_EPOCH);
+ LASSERT(epoch);
+
+ if (och != NULL)
+ LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
+ DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
+ epoch->handle = body->handle;
+ }
+}
+
+void mdc_commit_open(struct ptlrpc_request *req)
+{
+ struct md_open_data *mod = req->rq_cb_data;
+ if (mod == NULL)
+ return;
+
+ /**
+ * No need to touch md_open_data::mod_och, it holds a reference on
+ * \var mod and will zero references to each other, \var mod will be
+ * freed after that when md_open_data::mod_och will put the reference.
+ */
+
+ /**
+ * Do not let open request to disappear as it still may be needed
+ * for close rpc to happen (it may happen on evict only, otherwise
+ * ptlrpc_request::rq_replay does not let mdc_commit_open() to be
+ * called), just mark this rpc as committed to distinguish these 2
+ * cases, see mdc_close() for details. The open request reference will
+ * be put along with freeing \var mod.
+ */
+ ptlrpc_request_addref(req);
+ spin_lock(&req->rq_lock);
+ req->rq_committed = 1;
+ spin_unlock(&req->rq_lock);
+ req->rq_cb_data = NULL;
+ obd_mod_put(mod);
+}
+
+int mdc_set_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och,
+ struct ptlrpc_request *open_req)
+{
+ struct md_open_data *mod;
+ struct mdt_rec_create *rec;
+ struct mdt_body *body;
+ struct obd_import *imp = open_req->rq_import;
+
+ if (!open_req->rq_replay)
+ return 0;
+
+ rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
+ body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
+ LASSERT(rec != NULL);
+ /* Incoming message in my byte order (it's been swabbed). */
+ /* Outgoing messages always in my byte order. */
+ LASSERT(body != NULL);
+
+ /* Only if the import is replayable, we set replay_open data */
+ if (och && imp->imp_replayable) {
+ mod = obd_mod_alloc();
+ if (mod == NULL) {
+ DEBUG_REQ(D_ERROR, open_req,
+ "Can't allocate md_open_data");
+ return 0;
+ }
+
+ /**
+ * Take a reference on \var mod, to be freed on mdc_close().
+ * It protects \var mod from being freed on eviction (commit
+ * callback is called despite rq_replay flag).
+ * Another reference for \var och.
+ */
+ obd_mod_get(mod);
+ obd_mod_get(mod);
+
+ spin_lock(&open_req->rq_lock);
+ och->och_mod = mod;
+ mod->mod_och = och;
+ mod->mod_open_req = open_req;
+ open_req->rq_cb_data = mod;
+ open_req->rq_commit_cb = mdc_commit_open;
+ spin_unlock(&open_req->rq_lock);
+ }
+
+ rec->cr_fid2 = body->fid1;
+ rec->cr_ioepoch = body->ioepoch;
+ rec->cr_old_handle.cookie = body->handle.cookie;
+ open_req->rq_replay_cb = mdc_replay_open;
+ if (!fid_is_sane(&body->fid1)) {
+ DEBUG_REQ(D_ERROR, open_req, "Saving replay request with "
+ "insane fid");
+ LBUG();
+ }
+
+ DEBUG_REQ(D_RPCTRACE, open_req, "Set up open replay data");
+ return 0;
+}
+
+int mdc_clear_open_replay_data(struct obd_export *exp,
+ struct obd_client_handle *och)
+{
+ struct md_open_data *mod = och->och_mod;
+
+ /**
+ * It is possible to not have \var mod in a case of eviction between
+ * lookup and ll_file_open().
+ **/
+ if (mod == NULL)
+ return 0;
+
+ LASSERT(mod != LP_POISON);
+
+ mod->mod_och = NULL;
+ och->och_mod = NULL;
+ obd_mod_put(mod);
+
+ return 0;
+}
+
+/* Prepares the request for the replay by the given reply */
+static void mdc_close_handle_reply(struct ptlrpc_request *req,
+ struct md_op_data *op_data, int rc) {
+ struct mdt_body *repbody;
+ struct mdt_ioepoch *epoch;
+
+ if (req && rc == -EAGAIN) {
+ repbody = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
+
+ epoch->flags |= MF_SOM_AU;
+ if (repbody->valid & OBD_MD_FLGETATTRLOCK)
+ op_data->op_flags |= MF_GETATTR_LOCK;
+ }
+}
+
+int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_open_data *mod, struct ptlrpc_request **request)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct ptlrpc_request *req;
+ int rc;
+
+ *request = NULL;
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_CLOSE);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
+ * portal whose threads are not taking any DLM locks and are therefore
+ * always progressing */
+ req->rq_request_portal = MDS_READPAGE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
+
+ /* Ensure that this close's handle is fixed up during replay. */
+ if (likely(mod != NULL)) {
+ LASSERTF(mod->mod_open_req != NULL &&
+ mod->mod_open_req->rq_type != LI_POISON,
+ "POISONED open %p!\n", mod->mod_open_req);
+
+ mod->mod_close_req = req;
+
+ DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
+ /* We no longer want to preserve this open for replay even
+ * though the open was committed. b=3632, b=3633 */
+ spin_lock(&mod->mod_open_req->rq_lock);
+ mod->mod_open_req->rq_replay = 0;
+ spin_unlock(&mod->mod_open_req->rq_lock);
+ } else {
+ CDEBUG(D_HA, "couldn't find open req; expecting close error\n");
+ }
+
+ mdc_close_pack(req, op_data);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
+ obd->u.cli.cl_max_mds_easize);
+ req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
+ obd->u.cli.cl_max_mds_cookiesize);
+
+ ptlrpc_request_set_replen(req);
+
+ mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+
+ if (req->rq_repmsg == NULL) {
+ CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
+ req->rq_status);
+ if (rc == 0)
+ rc = req->rq_status ?: -EIO;
+ } else if (rc == 0 || rc == -EAGAIN) {
+ struct mdt_body *body;
+
+ rc = lustre_msg_get_status(req->rq_repmsg);
+ if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
+ DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR, err "
+ "= %d", rc);
+ if (rc > 0)
+ rc = -rc;
+ }
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ rc = -EPROTO;
+ } else if (rc == -ESTALE) {
+ /**
+ * it can be allowed error after 3633 if open was committed and
+ * server failed before close was sent. Let's check if mod
+ * exists and return no error in that case
+ */
+ if (mod) {
+ DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
+ LASSERT(mod->mod_open_req != NULL);
+ if (mod->mod_open_req->rq_committed)
+ rc = 0;
+ }
+ }
+
+ if (mod) {
+ if (rc != 0)
+ mod->mod_close_req = NULL;
+ /* Since now, mod is accessed through open_req only,
+ * thus close req does not keep a reference on mod anymore. */
+ obd_mod_put(mod);
+ }
+ *request = req;
+ mdc_close_handle_reply(req, op_data, rc);
+ return rc;
+}
+
+int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
+ struct md_open_data *mod)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_DONE_WRITING);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ if (mod != NULL) {
+ LASSERTF(mod->mod_open_req != NULL &&
+ mod->mod_open_req->rq_type != LI_POISON,
+ "POISONED setattr %p!\n", mod->mod_open_req);
+
+ mod->mod_close_req = req;
+ DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
+ /* We no longer want to preserve this setattr for replay even
+ * though the open was committed. b=3632, b=3633 */
+ spin_lock(&mod->mod_open_req->rq_lock);
+ mod->mod_open_req->rq_replay = 0;
+ spin_unlock(&mod->mod_open_req->rq_lock);
+ }
+
+ mdc_close_pack(req, op_data);
+ ptlrpc_request_set_replen(req);
+
+ mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+
+ if (rc == -ESTALE) {
+ /**
+ * it can be allowed error after 3633 if open or setattr were
+ * committed and server failed before close was sent.
+ * Let's check if mod exists and return no error in that case
+ */
+ if (mod) {
+ LASSERT(mod->mod_open_req != NULL);
+ if (mod->mod_open_req->rq_committed)
+ rc = 0;
+ }
+ }
+
+ if (mod) {
+ if (rc != 0)
+ mod->mod_close_req = NULL;
+ /* Since now, mod is accessed through setattr req only,
+ * thus DW req does not keep a reference on mod anymore. */
+ obd_mod_put(mod);
+ }
+
+ mdc_close_handle_reply(req, op_data, rc);
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+
+int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
+ struct page **pages, struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req;
+ struct ptlrpc_bulk_desc *desc;
+ int i;
+ wait_queue_head_t waitq;
+ int resends = 0;
+ struct l_wait_info lwi;
+ int rc;
+
+ *request = NULL;
+ init_waitqueue_head(&waitq);
+
+restart_bulk:
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ req->rq_request_portal = MDS_READPAGE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
+
+ desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK,
+ MDS_BULK_PORTAL);
+ if (desc == NULL) {
+ ptlrpc_request_free(req);
+ return -ENOMEM;
+ }
+
+ /* NB req now owns desc and will free it when it gets freed */
+ for (i = 0; i < op_data->op_npages; i++)
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+
+ mdc_readdir_pack(req, op_data->op_offset,
+ PAGE_CACHE_SIZE * op_data->op_npages,
+ &op_data->op_fid1, op_data->op_capa1);
+
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc) {
+ ptlrpc_req_finished(req);
+ if (rc != -ETIMEDOUT)
+ return rc;
+
+ resends++;
+ if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
+ CERROR("too many resend retries, returning error\n");
+ return -EIO;
+ }
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL, NULL);
+ l_wait_event(waitq, 0, &lwi);
+
+ goto restart_bulk;
+ }
+
+ rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
+ req->rq_bulk->bd_nob_transferred);
+ if (rc < 0) {
+ ptlrpc_req_finished(req);
+ return rc;
+ }
+
+ if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
+ CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
+ req->rq_bulk->bd_nob_transferred,
+ PAGE_CACHE_SIZE * op_data->op_npages);
+ ptlrpc_req_finished(req);
+ return -EPROTO;
+ }
+
+ *request = req;
+ return 0;
+}
+
+static int mdc_statfs(const struct lu_env *env,
+ struct obd_export *exp, struct obd_statfs *osfs,
+ __u64 max_age, __u32 flags)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct ptlrpc_request *req;
+ struct obd_statfs *msfs;
+ struct obd_import *imp = NULL;
+ int rc;
+
+ /*
+ * Since the request might also come from lprocfs, so we need
+ * sync this with client_disconnect_export Bug15684
+ */
+ down_read(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import)
+ imp = class_import_get(obd->u.cli.cl_import);
+ up_read(&obd->u.cli.cl_sem);
+ if (!imp)
+ return -ENODEV;
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS,
+ LUSTRE_MDS_VERSION, MDS_STATFS);
+ if (req == NULL)
+ GOTO(output, rc = -ENOMEM);
+
+ ptlrpc_request_set_replen(req);
+
+ if (flags & OBD_STATFS_NODELAY) {
+ /* procfs requests not want stay in wait for avoid deadlock */
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ }
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc) {
+ /* check connection error first */
+ if (imp->imp_connect_error)
+ rc = imp->imp_connect_error;
+ GOTO(out, rc);
+ }
+
+ msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
+ if (msfs == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ *osfs = *msfs;
+out:
+ ptlrpc_req_finished(req);
+output:
+ class_import_put(imp);
+ return rc;
+}
+
+static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
+{
+ __u32 keylen, vallen;
+ void *key;
+ int rc;
+
+ if (gf->gf_pathlen > PATH_MAX)
+ return -ENAMETOOLONG;
+ if (gf->gf_pathlen < 2)
+ return -EOVERFLOW;
+
+ /* Key is KEY_FID2PATH + getinfo_fid2path description */
+ keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
+ OBD_ALLOC(key, keylen);
+ if (key == NULL)
+ return -ENOMEM;
+ memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
+ memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
+
+ CDEBUG(D_IOCTL, "path get "DFID" from "LPU64" #%d\n",
+ PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
+
+ if (!fid_is_sane(&gf->gf_fid))
+ GOTO(out, rc = -EINVAL);
+
+ /* Val is struct getinfo_fid2path result plus path */
+ vallen = sizeof(*gf) + gf->gf_pathlen;
+
+ rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf, NULL);
+ if (rc != 0 && rc != -EREMOTE)
+ GOTO(out, rc);
+
+ if (vallen <= sizeof(*gf))
+ GOTO(out, rc = -EPROTO);
+ else if (vallen > sizeof(*gf) + gf->gf_pathlen)
+ GOTO(out, rc = -EOVERFLOW);
+
+ CDEBUG(D_IOCTL, "path get "DFID" from "LPU64" #%d\n%s\n",
+ PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, gf->gf_path);
+
+out:
+ OBD_FREE(key, keylen);
+ return rc;
+}
+
+static int mdc_ioc_hsm_progress(struct obd_export *exp,
+ struct hsm_progress_kernel *hpk)
+{
+ struct obd_import *imp = class_exp2cliimp(exp);
+ struct hsm_progress_kernel *req_hpk;
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
+ LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+
+ /* Copy hsm_progress struct */
+ req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
+ if (req_hpk == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ *req_hpk = *hpk;
+ req_hpk->hpk_errval = lustre_errno_hton(hpk->hpk_errval);
+
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_queue_wait(req);
+ GOTO(out, rc);
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
+{
+ __u32 *archive_mask;
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER,
+ LUSTRE_MDS_VERSION,
+ MDS_HSM_CT_REGISTER);
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+
+ /* Copy hsm_progress struct */
+ archive_mask = req_capsule_client_get(&req->rq_pill,
+ &RMF_MDS_HSM_ARCHIVE);
+ if (archive_mask == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ *archive_mask = archives;
+
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_queue_wait(req);
+ GOTO(out, rc);
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mdc_ioc_hsm_current_action(struct obd_export *exp,
+ struct md_op_data *op_data)
+{
+ struct hsm_current_action *hca = op_data->op_data;
+ struct hsm_current_action *req_hca;
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_HSM_ACTION);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
+ OBD_MD_FLRMTPERM, 0, op_data->op_suppgids[0], 0);
+
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ req_hca = req_capsule_server_get(&req->rq_pill,
+ &RMF_MDS_HSM_CURRENT_ACTION);
+ if (req_hca == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ *hca = *req_hca;
+
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
+ LUSTRE_MDS_VERSION,
+ MDS_HSM_CT_UNREGISTER);
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_queue_wait(req);
+ GOTO(out, rc);
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mdc_ioc_hsm_state_get(struct obd_export *exp,
+ struct md_op_data *op_data)
+{
+ struct hsm_user_state *hus = op_data->op_data;
+ struct hsm_user_state *req_hus;
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_HSM_STATE_GET);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
+ if (rc != 0) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
+ OBD_MD_FLRMTPERM, 0, op_data->op_suppgids[0], 0);
+
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE);
+ if (req_hus == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ *hus = *req_hus;
+
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mdc_ioc_hsm_state_set(struct obd_export *exp,
+ struct md_op_data *op_data)
+{
+ struct hsm_state_set *hss = op_data->op_data;
+ struct hsm_state_set *req_hss;
+ struct ptlrpc_request *req;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_HSM_STATE_SET);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
+ OBD_MD_FLRMTPERM, 0, op_data->op_suppgids[0], 0);
+
+ /* Copy states */
+ req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET);
+ if (req_hss == NULL)
+ GOTO(out, rc = -EPROTO);
+ *req_hss = *hss;
+
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_queue_wait(req);
+ GOTO(out, rc);
+
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mdc_ioc_hsm_request(struct obd_export *exp,
+ struct hsm_user_request *hur)
+{
+ struct obd_import *imp = class_exp2cliimp(exp);
+ struct ptlrpc_request *req;
+ struct hsm_request *req_hr;
+ struct hsm_user_item *req_hui;
+ char *req_opaque;
+ int rc;
+
+ req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM, RCL_CLIENT,
+ hur->hur_request.hr_itemcount
+ * sizeof(struct hsm_user_item));
+ req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, RCL_CLIENT,
+ hur->hur_request.hr_data_len);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+
+ /* Copy hsm_request struct */
+ req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
+ if (req_hr == NULL)
+ GOTO(out, rc = -EPROTO);
+ *req_hr = hur->hur_request;
+
+ /* Copy hsm_user_item structs */
+ req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM);
+ if (req_hui == NULL)
+ GOTO(out, rc = -EPROTO);
+ memcpy(req_hui, hur->hur_user_item,
+ hur->hur_request.hr_itemcount * sizeof(struct hsm_user_item));
+
+ /* Copy opaque field */
+ req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA);
+ if (req_opaque == NULL)
+ GOTO(out, rc = -EPROTO);
+ memcpy(req_opaque, hur_data(hur), hur->hur_request.hr_data_len);
+
+ ptlrpc_request_set_replen(req);
+
+ rc = mdc_queue_wait(req);
+ GOTO(out, rc);
+
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
+{
+ struct kuc_hdr *lh = (struct kuc_hdr *)buf;
+
+ LASSERT(len <= CR_MAXSIZE);
+
+ lh->kuc_magic = KUC_MAGIC;
+ lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
+ lh->kuc_flags = flags;
+ lh->kuc_msgtype = CL_RECORD;
+ lh->kuc_msglen = len;
+ return lh;
+}
+
+#define D_CHANGELOG 0
+
+struct changelog_show {
+ __u64 cs_startrec;
+ __u32 cs_flags;
+ struct file *cs_fp;
+ char *cs_buf;
+ struct obd_device *cs_obd;
+};
+
+static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh,
+ struct llog_rec_hdr *hdr, void *data)
+{
+ struct changelog_show *cs = data;
+ struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr;
+ struct kuc_hdr *lh;
+ int len, rc;
+
+ if (rec->cr_hdr.lrh_type != CHANGELOG_REC) {
+ rc = -EINVAL;
+ CERROR("%s: not a changelog rec %x/%d: rc = %d\n",
+ cs->cs_obd->obd_name, rec->cr_hdr.lrh_type,
+ rec->cr.cr_type, rc);
+ return rc;
+ }
+
+ if (rec->cr.cr_index < cs->cs_startrec) {
+ /* Skip entries earlier than what we are interested in */
+ CDEBUG(D_CHANGELOG, "rec="LPU64" start="LPU64"\n",
+ rec->cr.cr_index, cs->cs_startrec);
+ return 0;
+ }
+
+ CDEBUG(D_CHANGELOG, LPU64" %02d%-5s "LPU64" 0x%x t="DFID" p="DFID
+ " %.*s\n", rec->cr.cr_index, rec->cr.cr_type,
+ changelog_type2str(rec->cr.cr_type), rec->cr.cr_time,
+ rec->cr.cr_flags & CLF_FLAGMASK,
+ PFID(&rec->cr.cr_tfid), PFID(&rec->cr.cr_pfid),
+ rec->cr.cr_namelen, changelog_rec_name(&rec->cr));
+
+ len = sizeof(*lh) + changelog_rec_size(&rec->cr) + rec->cr.cr_namelen;
+
+ /* Set up the message */
+ lh = changelog_kuc_hdr(cs->cs_buf, len, cs->cs_flags);
+ memcpy(lh + 1, &rec->cr, len - sizeof(*lh));
+
+ rc = libcfs_kkuc_msg_put(cs->cs_fp, lh);
+ CDEBUG(D_CHANGELOG, "kucmsg fp %p len %d rc %d\n", cs->cs_fp, len,rc);
+
+ return rc;
+}
+
+static int mdc_changelog_send_thread(void *csdata)
+{
+ struct changelog_show *cs = csdata;
+ struct llog_ctxt *ctxt = NULL;
+ struct llog_handle *llh = NULL;
+ struct kuc_hdr *kuch;
+ int rc;
+
+ CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
+ cs->cs_fp, cs->cs_startrec);
+
+ OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
+ if (cs->cs_buf == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ /* Set up the remote catalog handle */
+ ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT);
+ if (ctxt == NULL)
+ GOTO(out, rc = -ENOENT);
+ rc = llog_open(NULL, ctxt, &llh, NULL, CHANGELOG_CATALOG,
+ LLOG_OPEN_EXISTS);
+ if (rc) {
+ CERROR("%s: fail to open changelog catalog: rc = %d\n",
+ cs->cs_obd->obd_name, rc);
+ GOTO(out, rc);
+ }
+ rc = llog_init_handle(NULL, llh, LLOG_F_IS_CAT, NULL);
+ if (rc) {
+ CERROR("llog_init_handle failed %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ rc = llog_cat_process(NULL, llh, changelog_kkuc_cb, cs, 0, 0);
+
+ /* Send EOF no matter what our result */
+ if ((kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch),
+ cs->cs_flags))) {
+ kuch->kuc_msgtype = CL_EOF;
+ libcfs_kkuc_msg_put(cs->cs_fp, kuch);
+ }
+
+out:
+ fput(cs->cs_fp);
+ if (llh)
+ llog_cat_close(NULL, llh);
+ if (ctxt)
+ llog_ctxt_put(ctxt);
+ if (cs->cs_buf)
+ OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+ OBD_FREE_PTR(cs);
+ return rc;
+}
+
+static int mdc_ioc_changelog_send(struct obd_device *obd,
+ struct ioc_changelog *icc)
+{
+ struct changelog_show *cs;
+ int rc;
+
+ /* Freed in mdc_changelog_send_thread */
+ OBD_ALLOC_PTR(cs);
+ if (!cs)
+ return -ENOMEM;
+
+ cs->cs_obd = obd;
+ cs->cs_startrec = icc->icc_recno;
+ /* matching fput in mdc_changelog_send_thread */
+ cs->cs_fp = fget(icc->icc_id);
+ cs->cs_flags = icc->icc_flags;
+
+ /*
+ * New thread because we should return to user app before
+ * writing into our pipe
+ */
+ rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs,
+ "mdc_clg_send_thread"));
+ if (!IS_ERR_VALUE(rc)) {
+ CDEBUG(D_CHANGELOG, "start changelog thread\n");
+ return 0;
+ }
+
+ CERROR("Failed to start changelog thread: %d\n", rc);
+ OBD_FREE_PTR(cs);
+ return rc;
+}
+
+static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
+ struct lustre_kernelcomm *lk);
+
+static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ struct ptlrpc_request *req;
+ struct obd_quotactl *body;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
+ MDS_QUOTACHECK);
+ if (req == NULL)
+ return -ENOMEM;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ *body = *oqctl;
+
+ ptlrpc_request_set_replen(req);
+
+ /* the next poll will find -ENODATA, that means quotacheck is
+ * going on */
+ cli->cl_qchk_stat = -ENODATA;
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ cli->cl_qchk_stat = rc;
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mdc_quota_poll_check(struct obd_export *exp,
+ struct if_quotacheck *qchk)
+{
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ int rc;
+
+ qchk->obd_uuid = cli->cl_target_uuid;
+ memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
+
+ rc = cli->cl_qchk_stat;
+ /* the client is not the previous one */
+ if (rc == CL_NOT_QUOTACHECKED)
+ rc = -EINTR;
+ return rc;
+}
+
+static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct ptlrpc_request *req;
+ struct obd_quotactl *oqc;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
+ MDS_QUOTACTL);
+ if (req == NULL)
+ return -ENOMEM;
+
+ oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ *oqc = *oqctl;
+
+ ptlrpc_request_set_replen(req);
+ ptlrpc_at_set_req_timeout(req);
+ req->rq_no_resend = 1;
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
+
+ if (req->rq_repmsg &&
+ (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
+ *oqctl = *oqc;
+ } else if (!rc) {
+ CERROR ("Can't unpack obd_quotactl\n");
+ rc = -EPROTO;
+ }
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
+
+static int mdc_ioc_swap_layouts(struct obd_export *exp,
+ struct md_op_data *op_data)
+{
+ LIST_HEAD(cancels);
+ struct ptlrpc_request *req;
+ int rc, count;
+ struct mdc_swap_layouts *msl, *payload;
+
+ msl = op_data->op_data;
+
+ /* When the MDT will get the MDS_SWAP_LAYOUTS RPC the
+ * first thing it will do is to cancel the 2 layout
+ * locks hold by this client.
+ * So the client must cancel its layout locks on the 2 fids
+ * with the request RPC to avoid extra RPC round trips
+ */
+ count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels,
+ LCK_CR, MDS_INODELOCK_LAYOUT);
+ count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels,
+ LCK_CR, MDS_INODELOCK_LAYOUT);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_MDS_SWAP_LAYOUTS);
+ if (req == NULL) {
+ ldlm_lock_list_put(&cancels, l_bl_ast, count);
+ return -ENOMEM;
+ }
+
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
+ mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2);
+
+ rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_swap_layouts_pack(req, op_data);
+
+ payload = req_capsule_client_get(&req->rq_pill, &RMF_SWAP_LAYOUTS);
+ LASSERT(payload);
+
+ *payload = *msl;
+
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
+ void *karg, void *uarg)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct obd_ioctl_data *data = karg;
+ struct obd_import *imp = obd->u.cli.cl_import;
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ if (!try_module_get(THIS_MODULE)) {
+ CERROR("Can't get module. Is it alive?");
+ return -EINVAL;
+ }
+ switch (cmd) {
+ case OBD_IOC_CHANGELOG_SEND:
+ rc = mdc_ioc_changelog_send(obd, karg);
+ GOTO(out, rc);
+ case OBD_IOC_CHANGELOG_CLEAR: {
+ struct ioc_changelog *icc = karg;
+ struct changelog_setinfo cs =
+ {.cs_recno = icc->icc_recno, .cs_id = icc->icc_id};
+ rc = obd_set_info_async(NULL, exp, strlen(KEY_CHANGELOG_CLEAR),
+ KEY_CHANGELOG_CLEAR, sizeof(cs), &cs,
+ NULL);
+ GOTO(out, rc);
+ }
+ case OBD_IOC_FID2PATH:
+ rc = mdc_ioc_fid2path(exp, karg);
+ GOTO(out, rc);
+ case LL_IOC_HSM_CT_START:
+ rc = mdc_ioc_hsm_ct_start(exp, karg);
+ /* ignore if it was already registered on this MDS. */
+ if (rc == -EEXIST)
+ rc = 0;
+ GOTO(out, rc);
+ case LL_IOC_HSM_PROGRESS:
+ rc = mdc_ioc_hsm_progress(exp, karg);
+ GOTO(out, rc);
+ case LL_IOC_HSM_STATE_GET:
+ rc = mdc_ioc_hsm_state_get(exp, karg);
+ GOTO(out, rc);
+ case LL_IOC_HSM_STATE_SET:
+ rc = mdc_ioc_hsm_state_set(exp, karg);
+ case LL_IOC_HSM_ACTION:
+ rc = mdc_ioc_hsm_current_action(exp, karg);
+ GOTO(out, rc);
+ case LL_IOC_HSM_REQUEST:
+ rc = mdc_ioc_hsm_request(exp, karg);
+ GOTO(out, rc);
+ case OBD_IOC_CLIENT_RECOVER:
+ rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1, 0);
+ if (rc < 0)
+ GOTO(out, rc);
+ GOTO(out, rc = 0);
+ case IOC_OSC_SET_ACTIVE:
+ rc = ptlrpc_set_import_active(imp, data->ioc_offset);
+ GOTO(out, rc);
+ case OBD_IOC_PARSE: {
+ ctxt = llog_get_context(exp->exp_obd, LLOG_CONFIG_REPL_CTXT);
+ rc = class_config_parse_llog(NULL, ctxt, data->ioc_inlbuf1,
+ NULL);
+ llog_ctxt_put(ctxt);
+ GOTO(out, rc);
+ }
+ case OBD_IOC_LLOG_INFO:
+ case OBD_IOC_LLOG_PRINT: {
+ ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
+ rc = llog_ioctl(NULL, ctxt, cmd, data);
+ llog_ctxt_put(ctxt);
+ GOTO(out, rc);
+ }
+ case OBD_IOC_POLL_QUOTACHECK:
+ rc = mdc_quota_poll_check(exp, (struct if_quotacheck *)karg);
+ GOTO(out, rc);
+ case OBD_IOC_PING_TARGET:
+ rc = ptlrpc_obd_ping(obd);
+ GOTO(out, rc);
+ /*
+ * Normally IOC_OBD_STATFS, OBD_IOC_QUOTACTL iocontrol are handled by
+ * LMV instead of MDC. But when the cluster is upgraded from 1.8,
+ * there'd be no LMV layer thus we might be called here. Eventually
+ * this code should be removed.
+ * bz20731, LU-592.
+ */
+ case IOC_OBD_STATFS: {
+ struct obd_statfs stat_buf = {0};
+
+ if (*((__u32 *) data->ioc_inlbuf2) != 0)
+ GOTO(out, rc = -ENODEV);
+
+ /* copy UUID */
+ if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
+ min((int) data->ioc_plen2,
+ (int) sizeof(struct obd_uuid))))
+ GOTO(out, rc = -EFAULT);
+
+ rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ 0);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ if (copy_to_user(data->ioc_pbuf1, &stat_buf,
+ min((int) data->ioc_plen1,
+ (int) sizeof(stat_buf))))
+ GOTO(out, rc = -EFAULT);
+
+ GOTO(out, rc = 0);
+ }
+ case OBD_IOC_QUOTACTL: {
+ struct if_quotactl *qctl = karg;
+ struct obd_quotactl *oqctl;
+
+ OBD_ALLOC_PTR(oqctl);
+ if (!oqctl)
+ return -ENOMEM;
+
+ QCTL_COPY(oqctl, qctl);
+ rc = obd_quotactl(exp, oqctl);
+ if (rc == 0) {
+ QCTL_COPY(qctl, oqctl);
+ qctl->qc_valid = QC_MDTIDX;
+ qctl->obd_uuid = obd->u.cli.cl_target_uuid;
+ }
+ OBD_FREE_PTR(oqctl);
+ break;
+ }
+ case LL_IOC_GET_CONNECT_FLAGS: {
+ if (copy_to_user(uarg,
+ exp_connect_flags_ptr(exp),
+ sizeof(__u64)))
+ GOTO(out, rc = -EFAULT);
+ else
+ GOTO(out, rc = 0);
+ }
+ case LL_IOC_LOV_SWAP_LAYOUTS: {
+ rc = mdc_ioc_swap_layouts(exp, karg);
+ break;
+ }
+ default:
+ CERROR("mdc_ioctl(): unrecognised ioctl %#x\n", cmd);
+ GOTO(out, rc = -ENOTTY);
+ }
+out:
+ module_put(THIS_MODULE);
+
+ return rc;
+}
+
+int mdc_get_info_rpc(struct obd_export *exp,
+ obd_count keylen, void *key,
+ int vallen, void *val)
+{
+ struct obd_import *imp = class_exp2cliimp(exp);
+ struct ptlrpc_request *req;
+ char *tmp;
+ int rc = -EINVAL;
+
+ req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
+ RCL_CLIENT, keylen);
+ req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VALLEN,
+ RCL_CLIENT, sizeof(__u32));
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
+ memcpy(tmp, key, keylen);
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_VALLEN);
+ memcpy(tmp, &vallen, sizeof(__u32));
+
+ req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VAL,
+ RCL_SERVER, vallen);
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ /* -EREMOTE means the get_info result is partial, and it needs to
+ * continue on another MDT, see fid2path part in lmv_iocontrol */
+ if (rc == 0 || rc == -EREMOTE) {
+ tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL);
+ memcpy(val, tmp, vallen);
+ if (ptlrpc_rep_need_swab(req)) {
+ if (KEY_IS(KEY_FID2PATH))
+ lustre_swab_fid2path(val);
+ }
+ }
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
+
+static void lustre_swab_hai(struct hsm_action_item *h)
+{
+ __swab32s(&h->hai_len);
+ __swab32s(&h->hai_action);
+ lustre_swab_lu_fid(&h->hai_fid);
+ lustre_swab_lu_fid(&h->hai_dfid);
+ __swab64s(&h->hai_cookie);
+ __swab64s(&h->hai_extent.offset);
+ __swab64s(&h->hai_extent.length);
+ __swab64s(&h->hai_gid);
+}
+
+static void lustre_swab_hal(struct hsm_action_list *h)
+{
+ struct hsm_action_item *hai;
+ int i;
+
+ __swab32s(&h->hal_version);
+ __swab32s(&h->hal_count);
+ __swab32s(&h->hal_archive_id);
+ __swab64s(&h->hal_flags);
+ hai = hai_zero(h);
+ for (i = 0; i < h->hal_count; i++) {
+ lustre_swab_hai(hai);
+ hai = hai_next(hai);
+ }
+}
+
+static void lustre_swab_kuch(struct kuc_hdr *l)
+{
+ __swab16s(&l->kuc_magic);
+ /* __u8 l->kuc_transport */
+ __swab16s(&l->kuc_msgtype);
+ __swab16s(&l->kuc_msglen);
+}
+
+static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
+ struct lustre_kernelcomm *lk)
+{
+ struct obd_import *imp = class_exp2cliimp(exp);
+ __u32 archive = lk->lk_data;
+ int rc = 0;
+
+ if (lk->lk_group != KUC_GRP_HSM) {
+ CERROR("Bad copytool group %d\n", lk->lk_group);
+ return -EINVAL;
+ }
+
+ CDEBUG(D_HSM, "CT start r%d w%d u%d g%d f%#x\n", lk->lk_rfd, lk->lk_wfd,
+ lk->lk_uid, lk->lk_group, lk->lk_flags);
+
+ if (lk->lk_flags & LK_FLG_STOP) {
+ /* Unregister with the coordinator */
+ rc = mdc_ioc_hsm_ct_unregister(imp);
+ } else {
+ rc = mdc_ioc_hsm_ct_register(imp, archive);
+ }
+
+ return rc;
+}
+
+/**
+ * Send a message to any listening copytools
+ * @param val KUC message (kuc_hdr + hsm_action_list)
+ * @param len total length of message
+ */
+static int mdc_hsm_copytool_send(int len, void *val)
+{
+ struct kuc_hdr *lh = (struct kuc_hdr *)val;
+ struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
+ int rc;
+
+ if (len < sizeof(*lh) + sizeof(*hal)) {
+ CERROR("Short HSM message %d < %d\n", len,
+ (int) (sizeof(*lh) + sizeof(*hal)));
+ return -EPROTO;
+ }
+ if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
+ lustre_swab_kuch(lh);
+ lustre_swab_hal(hal);
+ } else if (lh->kuc_magic != KUC_MAGIC) {
+ CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC);
+ return -EPROTO;
+ }
+
+ CDEBUG(D_HSM, " Received message mg=%x t=%d m=%d l=%d actions=%d "
+ "on %s\n",
+ lh->kuc_magic, lh->kuc_transport, lh->kuc_msgtype,
+ lh->kuc_msglen, hal->hal_count, hal->hal_fsname);
+
+ /* Broadcast to HSM listeners */
+ rc = libcfs_kkuc_group_put(KUC_GRP_HSM, lh);
+
+ return rc;
+}
+
+/**
+ * callback function passed to kuc for re-registering each HSM copytool
+ * running on MDC, after MDT shutdown/recovery.
+ * @param data archive id served by the copytool
+ * @param cb_arg callback argument (obd_import)
+ */
+static int mdc_hsm_ct_reregister(__u32 data, void *cb_arg)
+{
+ struct obd_import *imp = (struct obd_import *)cb_arg;
+ __u32 archive = data;
+ int rc;
+
+ CDEBUG(D_HA, "recover copytool registration to MDT (archive=%#x)\n",
+ archive);
+ rc = mdc_ioc_hsm_ct_register(imp, archive);
+
+ /* ignore error if the copytool is already registered */
+ return ((rc != 0) && (rc != -EEXIST)) ? rc : 0;
+}
+
+/**
+ * Re-establish all kuc contexts with MDT
+ * after MDT shutdown/recovery.
+ */
+static int mdc_kuc_reregister(struct obd_import *imp)
+{
+ /* re-register HSM agents */
+ return libcfs_kkuc_group_foreach(KUC_GRP_HSM, mdc_hsm_ct_reregister,
+ (void *)imp);
+}
+
+int mdc_set_info_async(const struct lu_env *env,
+ struct obd_export *exp,
+ obd_count keylen, void *key,
+ obd_count vallen, void *val,
+ struct ptlrpc_request_set *set)
+{
+ struct obd_import *imp = class_exp2cliimp(exp);
+ int rc;
+
+ if (KEY_IS(KEY_READ_ONLY)) {
+ if (vallen != sizeof(int))
+ return -EINVAL;
+
+ spin_lock(&imp->imp_lock);
+ if (*((int *)val)) {
+ imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
+ imp->imp_connect_data.ocd_connect_flags |=
+ OBD_CONNECT_RDONLY;
+ } else {
+ imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
+ imp->imp_connect_data.ocd_connect_flags &=
+ ~OBD_CONNECT_RDONLY;
+ }
+ spin_unlock(&imp->imp_lock);
+
+ rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
+ keylen, key, vallen, val, set);
+ return rc;
+ }
+ if (KEY_IS(KEY_SPTLRPC_CONF)) {
+ sptlrpc_conf_client_adapt(exp->exp_obd);
+ return 0;
+ }
+ if (KEY_IS(KEY_FLUSH_CTX)) {
+ sptlrpc_import_flush_my_ctx(imp);
+ return 0;
+ }
+ if (KEY_IS(KEY_MDS_CONN)) {
+ /* mds-mds import */
+ spin_lock(&imp->imp_lock);
+ imp->imp_server_timeout = 1;
+ spin_unlock(&imp->imp_lock);
+ imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
+ CDEBUG(D_OTHER, "%s: timeout / 2\n", exp->exp_obd->obd_name);
+ return 0;
+ }
+ if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
+ rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
+ keylen, key, vallen, val, set);
+ return rc;
+ }
+ if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) {
+ rc = mdc_hsm_copytool_send(vallen, val);
+ return rc;
+ }
+
+ CERROR("Unknown key %s\n", (char *)key);
+ return -EINVAL;
+}
+
+int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
+ __u32 keylen, void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
+{
+ int rc = -EINVAL;
+
+ if (KEY_IS(KEY_MAX_EASIZE)) {
+ int mdsize, *max_easize;
+
+ if (*vallen != sizeof(int))
+ return -EINVAL;
+ mdsize = *(int*)val;
+ if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize)
+ exp->exp_obd->u.cli.cl_max_mds_easize = mdsize;
+ max_easize = val;
+ *max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
+ return 0;
+ } else if (KEY_IS(KEY_CONN_DATA)) {
+ struct obd_import *imp = class_exp2cliimp(exp);
+ struct obd_connect_data *data = val;
+
+ if (*vallen != sizeof(*data))
+ return -EINVAL;
+
+ *data = imp->imp_connect_data;
+ return 0;
+ } else if (KEY_IS(KEY_TGT_COUNT)) {
+ *((int *)val) = 1;
+ return 0;
+ }
+
+ rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val);
+
+ return rc;
+}
+
+static int mdc_pin(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, struct obd_client_handle *handle,
+ int flags)
+{
+ struct ptlrpc_request *req;
+ struct mdt_body *body;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_PIN);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, oc);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_PIN);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_pack_body(req, fid, oc, 0, 0, -1, flags);
+
+ ptlrpc_request_set_replen(req);
+
+ mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ if (rc) {
+ CERROR("Pin failed: %d\n", rc);
+ GOTO(err_out, rc);
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ GOTO(err_out, rc = -EPROTO);
+
+ handle->och_fh = body->handle;
+ handle->och_magic = OBD_CLIENT_HANDLE_MAGIC;
+
+ handle->och_mod = obd_mod_alloc();
+ if (handle->och_mod == NULL) {
+ DEBUG_REQ(D_ERROR, req, "can't allocate md_open_data");
+ GOTO(err_out, rc = -ENOMEM);
+ }
+ handle->och_mod->mod_open_req = req; /* will be dropped by unpin */
+
+ return 0;
+
+err_out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mdc_unpin(struct obd_export *exp, struct obd_client_handle *handle,
+ int flag)
+{
+ struct ptlrpc_request *req;
+ struct mdt_body *body;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_UNPIN,
+ LUSTRE_MDS_VERSION, MDS_UNPIN);
+ if (req == NULL)
+ return -ENOMEM;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY);
+ body->handle = handle->och_fh;
+ body->flags = flag;
+
+ ptlrpc_request_set_replen(req);
+
+ mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+ rc = ptlrpc_queue_wait(req);
+ mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+
+ if (rc != 0)
+ CERROR("Unpin failed: %d\n", rc);
+
+ ptlrpc_req_finished(req);
+ ptlrpc_req_finished(handle->och_mod->mod_open_req);
+
+ obd_mod_put(handle->och_mod);
+ return rc;
+}
+
+int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ *request = NULL;
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, oc);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_pack_body(req, fid, oc, 0, 0, -1, 0);
+
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ ptlrpc_req_finished(req);
+ else
+ *request = req;
+ return rc;
+}
+
+static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
+ enum obd_import_event event)
+{
+ int rc = 0;
+
+ LASSERT(imp->imp_obd == obd);
+
+ switch (event) {
+ case IMP_EVENT_DISCON: {
+#if 0
+ /* XXX Pass event up to OBDs stack. used only for FLD now */
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DISCON, NULL);
+#endif
+ break;
+ }
+ case IMP_EVENT_INACTIVE: {
+ struct client_obd *cli = &obd->u.cli;
+ /*
+ * Flush current sequence to make client obtain new one
+ * from server in case of disconnect/reconnect.
+ */
+ if (cli->cl_seq != NULL)
+ seq_client_flush(cli->cl_seq);
+
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
+ break;
+ }
+ case IMP_EVENT_INVALIDATE: {
+ struct ldlm_namespace *ns = obd->obd_namespace;
+
+ ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
+
+ break;
+ }
+ case IMP_EVENT_ACTIVE:
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
+ /* redo the kuc registration after reconnecting */
+ if (rc == 0)
+ rc = mdc_kuc_reregister(imp);
+ break;
+ case IMP_EVENT_OCD:
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
+ break;
+ case IMP_EVENT_DEACTIVATE:
+ case IMP_EVENT_ACTIVATE:
+ break;
+ default:
+ CERROR("Unknown import event %x\n", event);
+ LBUG();
+ }
+ return rc;
+}
+
+int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
+ struct md_op_data *op_data)
+{
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ struct lu_client_seq *seq = cli->cl_seq;
+
+ return seq_client_alloc_fid(NULL, seq, fid);
+}
+
+struct obd_uuid *mdc_get_uuid(struct obd_export *exp) {
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ return &cli->cl_target_uuid;
+}
+
+/**
+ * Determine whether the lock can be canceled before replaying it during
+ * recovery, non zero value will be return if the lock can be canceled,
+ * or zero returned for not
+ */
+static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
+{
+ if (lock->l_resource->lr_type != LDLM_IBITS)
+ return 0;
+
+ /* FIXME: if we ever get into a situation where there are too many
+ * opened files with open locks on a single node, then we really
+ * should replay these open locks to reget it */
+ if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
+ return 0;
+
+ return 1;
+}
+
+static int mdc_resource_inode_free(struct ldlm_resource *res)
+{
+ if (res->lr_lvb_inode)
+ res->lr_lvb_inode = NULL;
+
+ return 0;
+}
+
+struct ldlm_valblock_ops inode_lvbo = {
+ .lvbo_free = mdc_resource_inode_free,
+};
+
+static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
+{
+ struct client_obd *cli = &obd->u.cli;
+ struct lprocfs_static_vars lvars = { 0 };
+ int rc;
+
+ OBD_ALLOC(cli->cl_rpc_lock, sizeof (*cli->cl_rpc_lock));
+ if (!cli->cl_rpc_lock)
+ return -ENOMEM;
+ mdc_init_rpc_lock(cli->cl_rpc_lock);
+
+ ptlrpcd_addref();
+
+ OBD_ALLOC(cli->cl_close_lock, sizeof (*cli->cl_close_lock));
+ if (!cli->cl_close_lock)
+ GOTO(err_rpc_lock, rc = -ENOMEM);
+ mdc_init_rpc_lock(cli->cl_close_lock);
+
+ rc = client_obd_setup(obd, cfg);
+ if (rc)
+ GOTO(err_close_lock, rc);
+ lprocfs_mdc_init_vars(&lvars);
+ lprocfs_obd_setup(obd, lvars.obd_vars);
+ sptlrpc_lprocfs_cliobd_attach(obd);
+ ptlrpc_lprocfs_register_obd(obd);
+
+ ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
+
+ obd->obd_namespace->ns_lvbo = &inode_lvbo;
+
+ rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
+ if (rc) {
+ mdc_cleanup(obd);
+ CERROR("failed to setup llogging subsystems\n");
+ }
+
+ return rc;
+
+err_close_lock:
+ OBD_FREE(cli->cl_close_lock, sizeof (*cli->cl_close_lock));
+err_rpc_lock:
+ OBD_FREE(cli->cl_rpc_lock, sizeof (*cli->cl_rpc_lock));
+ ptlrpcd_decref();
+ return rc;
+}
+
+/* Initialize the default and maximum LOV EA and cookie sizes. This allows
+ * us to make MDS RPCs with large enough reply buffers to hold the
+ * maximum-sized (= maximum striped) EA and cookie without having to
+ * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
+static int mdc_init_ea_size(struct obd_export *exp, int easize,
+ int def_easize, int cookiesize)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct client_obd *cli = &obd->u.cli;
+
+ if (cli->cl_max_mds_easize < easize)
+ cli->cl_max_mds_easize = easize;
+
+ if (cli->cl_default_mds_easize < def_easize)
+ cli->cl_default_mds_easize = def_easize;
+
+ if (cli->cl_max_mds_cookiesize < cookiesize)
+ cli->cl_max_mds_cookiesize = cookiesize;
+
+ return 0;
+}
+
+static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+{
+ int rc = 0;
+
+ switch (stage) {
+ case OBD_CLEANUP_EARLY:
+ break;
+ case OBD_CLEANUP_EXPORTS:
+ /* Failsafe, ok if racy */
+ if (obd->obd_type->typ_refcnt <= 1)
+ libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
+
+ obd_cleanup_client_import(obd);
+ ptlrpc_lprocfs_unregister_obd(obd);
+ lprocfs_obd_cleanup(obd);
+
+ rc = obd_llog_finish(obd, 0);
+ if (rc != 0)
+ CERROR("failed to cleanup llogging subsystems\n");
+ break;
+ }
+ return rc;
+}
+
+static int mdc_cleanup(struct obd_device *obd)
+{
+ struct client_obd *cli = &obd->u.cli;
+
+ OBD_FREE(cli->cl_rpc_lock, sizeof (*cli->cl_rpc_lock));
+ OBD_FREE(cli->cl_close_lock, sizeof (*cli->cl_close_lock));
+
+ ptlrpcd_decref();
+
+ return client_obd_cleanup(obd);
+}
+
+
+static int mdc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *tgt, int *index)
+{
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ LASSERT(olg == &obd->obd_olg);
+
+ rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, tgt,
+ &llog_client_ops);
+ if (rc)
+ return rc;
+
+ ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT);
+ llog_initiator_connect(ctxt);
+ llog_ctxt_put(ctxt);
+
+ return 0;
+}
+
+static int mdc_llog_finish(struct obd_device *obd, int count)
+{
+ struct llog_ctxt *ctxt;
+
+ ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+
+ return 0;
+}
+
+static int mdc_process_config(struct obd_device *obd, obd_count len, void *buf)
+{
+ struct lustre_cfg *lcfg = buf;
+ struct lprocfs_static_vars lvars = { 0 };
+ int rc = 0;
+
+ lprocfs_mdc_init_vars(&lvars);
+ switch (lcfg->lcfg_command) {
+ default:
+ rc = class_process_proc_param(PARAM_MDC, lvars.obd_vars,
+ lcfg, obd);
+ if (rc > 0)
+ rc = 0;
+ break;
+ }
+ return(rc);
+}
+
+
+/* get remote permission for current user on fid */
+int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
+ struct obd_capa *oc, __u32 suppgid,
+ struct ptlrpc_request **request)
+{
+ struct ptlrpc_request *req;
+ int rc;
+
+ LASSERT(client_is_remote(exp));
+
+ *request = NULL;
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
+ if (req == NULL)
+ return -ENOMEM;
+
+ mdc_set_capa_size(req, &RMF_CAPA1, oc);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ mdc_pack_body(req, fid, oc, OBD_MD_FLRMTPERM, 0, suppgid, 0);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
+ sizeof(struct mdt_remote_perm));
+
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ ptlrpc_req_finished(req);
+ else
+ *request = req;
+ return rc;
+}
+
+static int mdc_interpret_renew_capa(const struct lu_env *env,
+ struct ptlrpc_request *req, void *args,
+ int status)
+{
+ struct mdc_renew_capa_args *ra = args;
+ struct mdt_body *body = NULL;
+ struct lustre_capa *capa;
+
+ if (status)
+ GOTO(out, capa = ERR_PTR(status));
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ if (body == NULL)
+ GOTO(out, capa = ERR_PTR(-EFAULT));
+
+ if ((body->valid & OBD_MD_FLOSSCAPA) == 0)
+ GOTO(out, capa = ERR_PTR(-ENOENT));
+
+ capa = req_capsule_server_get(&req->rq_pill, &RMF_CAPA2);
+ if (!capa)
+ GOTO(out, capa = ERR_PTR(-EFAULT));
+out:
+ ra->ra_cb(ra->ra_oc, capa);
+ return 0;
+}
+
+static int mdc_renew_capa(struct obd_export *exp, struct obd_capa *oc,
+ renew_capa_cb_t cb)
+{
+ struct ptlrpc_request *req;
+ struct mdc_renew_capa_args *ra;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETATTR,
+ LUSTRE_MDS_VERSION, MDS_GETATTR);
+ if (req == NULL)
+ return -ENOMEM;
+
+ /* NB, OBD_MD_FLOSSCAPA is set here, but it doesn't necessarily mean the
+ * capa to renew is oss capa.
+ */
+ mdc_pack_body(req, &oc->c_capa.lc_fid, oc, OBD_MD_FLOSSCAPA, 0, -1, 0);
+ ptlrpc_request_set_replen(req);
+
+ CLASSERT(sizeof(*ra) <= sizeof(req->rq_async_args));
+ ra = ptlrpc_req_async_args(req);
+ ra->ra_oc = oc;
+ ra->ra_cb = cb;
+ req->rq_interpret_reply = mdc_interpret_renew_capa;
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ return 0;
+}
+
+static int mdc_connect(const struct lu_env *env,
+ struct obd_export **exp,
+ struct obd_device *obd, struct obd_uuid *cluuid,
+ struct obd_connect_data *data,
+ void *localdata)
+{
+ struct obd_import *imp = obd->u.cli.cl_import;
+
+ /* mds-mds import features */
+ if (data && (data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
+ spin_lock(&imp->imp_lock);
+ imp->imp_server_timeout = 1;
+ spin_unlock(&imp->imp_lock);
+ imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
+ CDEBUG(D_OTHER, "%s: Set 'mds' portal and timeout\n",
+ obd->obd_name);
+ }
+
+ return client_connect_import(env, exp, obd, cluuid, data, NULL);
+}
+
+struct obd_ops mdc_obd_ops = {
+ .o_owner = THIS_MODULE,
+ .o_setup = mdc_setup,
+ .o_precleanup = mdc_precleanup,
+ .o_cleanup = mdc_cleanup,
+ .o_add_conn = client_import_add_conn,
+ .o_del_conn = client_import_del_conn,
+ .o_connect = mdc_connect,
+ .o_disconnect = client_disconnect_export,
+ .o_iocontrol = mdc_iocontrol,
+ .o_set_info_async = mdc_set_info_async,
+ .o_statfs = mdc_statfs,
+ .o_pin = mdc_pin,
+ .o_unpin = mdc_unpin,
+ .o_fid_init = client_fid_init,
+ .o_fid_fini = client_fid_fini,
+ .o_fid_alloc = mdc_fid_alloc,
+ .o_import_event = mdc_import_event,
+ .o_llog_init = mdc_llog_init,
+ .o_llog_finish = mdc_llog_finish,
+ .o_get_info = mdc_get_info,
+ .o_process_config = mdc_process_config,
+ .o_get_uuid = mdc_get_uuid,
+ .o_quotactl = mdc_quotactl,
+ .o_quotacheck = mdc_quotacheck
+};
+
+struct md_ops mdc_md_ops = {
+ .m_getstatus = mdc_getstatus,
+ .m_null_inode = mdc_null_inode,
+ .m_find_cbdata = mdc_find_cbdata,
+ .m_close = mdc_close,
+ .m_create = mdc_create,
+ .m_done_writing = mdc_done_writing,
+ .m_enqueue = mdc_enqueue,
+ .m_getattr = mdc_getattr,
+ .m_getattr_name = mdc_getattr_name,
+ .m_intent_lock = mdc_intent_lock,
+ .m_link = mdc_link,
+ .m_is_subdir = mdc_is_subdir,
+ .m_rename = mdc_rename,
+ .m_setattr = mdc_setattr,
+ .m_setxattr = mdc_setxattr,
+ .m_getxattr = mdc_getxattr,
+ .m_sync = mdc_sync,
+ .m_readpage = mdc_readpage,
+ .m_unlink = mdc_unlink,
+ .m_cancel_unused = mdc_cancel_unused,
+ .m_init_ea_size = mdc_init_ea_size,
+ .m_set_lock_data = mdc_set_lock_data,
+ .m_lock_match = mdc_lock_match,
+ .m_get_lustre_md = mdc_get_lustre_md,
+ .m_free_lustre_md = mdc_free_lustre_md,
+ .m_set_open_replay_data = mdc_set_open_replay_data,
+ .m_clear_open_replay_data = mdc_clear_open_replay_data,
+ .m_renew_capa = mdc_renew_capa,
+ .m_unpack_capa = mdc_unpack_capa,
+ .m_get_remote_perm = mdc_get_remote_perm,
+ .m_intent_getattr_async = mdc_intent_getattr_async,
+ .m_revalidate_lock = mdc_revalidate_lock
+};
+
+int __init mdc_init(void)
+{
+ int rc;
+ struct lprocfs_static_vars lvars = { 0 };
+ lprocfs_mdc_init_vars(&lvars);
+
+ rc = class_register_type(&mdc_obd_ops, &mdc_md_ops, lvars.module_vars,
+ LUSTRE_MDC_NAME, NULL);
+ return rc;
+}
+
+static void /*__exit*/ mdc_exit(void)
+{
+ class_unregister_type(LUSTRE_MDC_NAME);
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Metadata Client");
+MODULE_LICENSE("GPL");
+
+module_init(mdc_init);
+module_exit(mdc_exit);
diff --git a/drivers/staging/lustre/lustre/mgc/Makefile b/drivers/staging/lustre/lustre/mgc/Makefile
new file mode 100644
index 0000000..2672463
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mgc/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_LUSTRE_FS) += mgc.o
+mgc-y := mgc_request.o lproc_mgc.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/mgc/libmgc.c b/drivers/staging/lustre/lustre/mgc/libmgc.c
new file mode 100644
index 0000000..7b4947c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mgc/libmgc.c
@@ -0,0 +1,161 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/mgc/libmgc.c
+ *
+ * Lustre Management Client
+ * Author: Nathan Rutman <nathan@clusterfs.com>
+ */
+
+/* Minimal MGC for liblustre: only used to read the config log from the MGS
+ at setup time, no updates. */
+
+#define DEBUG_SUBSYSTEM S_MGC
+
+#include <liblustre.h>
+
+#include <obd_class.h>
+#include <lustre_dlm.h>
+#include <lustre_log.h>
+#include <lustre_fsfilt.h>
+#include <lustre_disk.h>
+
+
+static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ int rc;
+
+ ptlrpcd_addref();
+
+ rc = client_obd_setup(obd, lcfg);
+ if (rc)
+ GOTO(err_decref, rc);
+
+ /* liblustre only support null flavor to MGS */
+ obd->u.cli.cl_flvr_mgc.sf_rpc = SPTLRPC_FLVR_NULL;
+
+ rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
+ if (rc) {
+ CERROR("failed to setup llogging subsystems\n");
+ GOTO(err_cleanup, rc);
+ }
+
+ return rc;
+
+err_cleanup:
+ client_obd_cleanup(obd);
+err_decref:
+ ptlrpcd_decref();
+ return rc;
+}
+
+static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+{
+ int rc = 0;
+
+ switch (stage) {
+ case OBD_CLEANUP_EARLY:
+ case OBD_CLEANUP_EXPORTS:
+ obd_cleanup_client_import(obd);
+ rc = obd_llog_finish(obd, 0);
+ if (rc != 0)
+ CERROR("failed to cleanup llogging subsystems\n");
+ break;
+ }
+ return rc;
+}
+
+static int mgc_cleanup(struct obd_device *obd)
+{
+ struct client_obd *cli = &obd->u.cli;
+ int rc;
+
+ LASSERT(cli->cl_mgc_vfsmnt == NULL);
+
+ ptlrpcd_decref();
+
+ rc = client_obd_cleanup(obd);
+ return rc;
+}
+
+static int mgc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *tgt, int *index)
+{
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ LASSERT(olg == &obd->obd_olg);
+ rc = llog_setup(NULL, obd, olg, LLOG_CONFIG_REPL_CTXT, tgt,
+ &llog_client_ops);
+ if (rc < 0)
+ return rc;
+
+ ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_REPL_CTXT);
+ llog_initiator_connect(ctxt);
+ llog_ctxt_put(ctxt);
+
+ return rc;
+}
+
+static int mgc_llog_finish(struct obd_device *obd, int count)
+{
+ struct llog_ctxt *ctxt;
+
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+
+ return 0;
+}
+
+struct obd_ops mgc_obd_ops = {
+ .o_owner = THIS_MODULE,
+ .o_setup = mgc_setup,
+ .o_precleanup = mgc_precleanup,
+ .o_cleanup = mgc_cleanup,
+ .o_add_conn = client_import_add_conn,
+ .o_del_conn = client_import_del_conn,
+ .o_connect = client_connect_import,
+ .o_disconnect = client_disconnect_export,
+ .o_llog_init = mgc_llog_init,
+ .o_llog_finish = mgc_llog_finish,
+};
+
+int __init mgc_init(void)
+{
+ return class_register_type(&mgc_obd_ops, NULL,
+ NULL, LUSTRE_MGC_NAME, NULL);
+}
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
new file mode 100644
index 0000000..ebecec2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
@@ -0,0 +1,83 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/vfs.h>
+#include <obd_class.h>
+#include <lprocfs_status.h>
+#include "mgc_internal.h"
+
+#ifdef LPROCFS
+
+LPROC_SEQ_FOPS_RO_TYPE(mgc, uuid);
+LPROC_SEQ_FOPS_RO_TYPE(mgc, connect_flags);
+LPROC_SEQ_FOPS_RO_TYPE(mgc, server_uuid);
+LPROC_SEQ_FOPS_RO_TYPE(mgc, conn_uuid);
+LPROC_SEQ_FOPS_RO_TYPE(mgc, import);
+LPROC_SEQ_FOPS_RO_TYPE(mgc, state);
+
+LPROC_SEQ_FOPS_WR_ONLY(mgc, ping);
+
+static int mgc_ir_state_seq_show(struct seq_file *m, void *v)
+{
+ return lprocfs_mgc_rd_ir_state(m, m->private);
+}
+LPROC_SEQ_FOPS_RO(mgc_ir_state);
+
+static struct lprocfs_vars lprocfs_mgc_obd_vars[] = {
+ { "uuid", &mgc_uuid_fops, 0, 0 },
+ { "ping", &mgc_ping_fops, 0, 0222 },
+ { "connect_flags", &mgc_connect_flags_fops, 0, 0 },
+ { "mgs_server_uuid", &mgc_server_uuid_fops, 0, 0 },
+ { "mgs_conn_uuid", &mgc_conn_uuid_fops, 0, 0 },
+ { "import", &mgc_import_fops, 0, 0 },
+ { "state", &mgc_state_fops, 0, 0 },
+ { "ir_state", &mgc_ir_state_fops, 0, 0 },
+ { 0 }
+};
+
+LPROC_SEQ_FOPS_RO_TYPE(mgc, numrefs);
+static struct lprocfs_vars lprocfs_mgc_module_vars[] = {
+ { "num_refs", &mgc_numrefs_fops, 0, 0 },
+ { 0 }
+};
+
+void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
+{
+ lvars->module_vars = lprocfs_mgc_module_vars;
+ lvars->obd_vars = lprocfs_mgc_obd_vars;
+}
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
new file mode 100644
index 0000000..dbd6982
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
@@ -0,0 +1,73 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef _MGC_INTERNAL_H
+#define _MGC_INTERNAL_H
+
+#include <linux/libcfs/libcfs.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_lib.h>
+#include <lustre_dlm.h>
+#include <lustre_log.h>
+#include <lustre_export.h>
+
+#ifdef LPROCFS
+void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars);
+int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data);
+#else
+static void lprocfs_mgc_init_vars(struct lprocfs_static_vars *lvars)
+{
+ memset(lvars, 0, sizeof(*lvars));
+}
+static inline int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
+{
+ return 0;
+}
+#endif /* LPROCFS */
+
+int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld);
+
+static inline int cld_is_sptlrpc(struct config_llog_data *cld)
+{
+ return cld->cld_type == CONFIG_T_SPTLRPC;
+}
+
+static inline int cld_is_recover(struct config_llog_data *cld)
+{
+ return cld->cld_type == CONFIG_T_RECOVER;
+}
+
+#endif /* _MGC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
new file mode 100644
index 0000000..12a9ede
--- /dev/null
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -0,0 +1,1825 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/mgc/mgc_request.c
+ *
+ * Author: Nathan Rutman <nathan@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_MGC
+#define D_MGC D_CONFIG /*|D_WARNING*/
+
+# include <linux/module.h>
+# include <linux/pagemap.h>
+# include <linux/miscdevice.h>
+# include <linux/init.h>
+
+#include <obd_class.h>
+#include <lustre_dlm.h>
+#include <lprocfs_status.h>
+#include <lustre_log.h>
+#include <lustre_fsfilt.h>
+#include <lustre_disk.h>
+#include "mgc_internal.h"
+
+static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
+ int type)
+{
+ __u64 resname = 0;
+
+ if (len > 8) {
+ CERROR("name too long: %s\n", name);
+ return -EINVAL;
+ }
+ if (len <= 0) {
+ CERROR("missing name: %s\n", name);
+ return -EINVAL;
+ }
+ memcpy(&resname, name, len);
+
+ /* Always use the same endianness for the resid */
+ memset(res_id, 0, sizeof(*res_id));
+ res_id->name[0] = cpu_to_le64(resname);
+ /* XXX: unfortunately, sptlprc and config llog share one lock */
+ switch(type) {
+ case CONFIG_T_CONFIG:
+ case CONFIG_T_SPTLRPC:
+ resname = 0;
+ break;
+ case CONFIG_T_RECOVER:
+ resname = type;
+ break;
+ default:
+ LBUG();
+ }
+ res_id->name[1] = cpu_to_le64(resname);
+ CDEBUG(D_MGC, "log %s to resid "LPX64"/"LPX64" (%.8s)\n", name,
+ res_id->name[0], res_id->name[1], (char *)&res_id->name[0]);
+ return 0;
+}
+
+int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type)
+{
+ /* fsname is at most 8 chars long, maybe contain "-".
+ * e.g. "lustre", "SUN-000" */
+ return mgc_name2resid(fsname, strlen(fsname), res_id, type);
+}
+EXPORT_SYMBOL(mgc_fsname2resid);
+
+int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type)
+{
+ char *name_end;
+ int len;
+
+ /* logname consists of "fsname-nodetype".
+ * e.g. "lustre-MDT0001", "SUN-000-client" */
+ name_end = strrchr(logname, '-');
+ LASSERT(name_end);
+ len = name_end - logname;
+ return mgc_name2resid(logname, len, res_id, type);
+}
+
+/********************** config llog list **********************/
+static LIST_HEAD(config_llog_list);
+static DEFINE_SPINLOCK(config_list_lock);
+
+/* Take a reference to a config log */
+static int config_log_get(struct config_llog_data *cld)
+{
+ atomic_inc(&cld->cld_refcount);
+ CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
+ atomic_read(&cld->cld_refcount));
+ return 0;
+}
+
+/* Drop a reference to a config log. When no longer referenced,
+ we can free the config log data */
+static void config_log_put(struct config_llog_data *cld)
+{
+ CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
+ atomic_read(&cld->cld_refcount));
+ LASSERT(atomic_read(&cld->cld_refcount) > 0);
+
+ /* spinlock to make sure no item with 0 refcount in the list */
+ if (atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
+ list_del(&cld->cld_list_chain);
+ spin_unlock(&config_list_lock);
+
+ CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
+
+ if (cld->cld_recover)
+ config_log_put(cld->cld_recover);
+ if (cld->cld_sptlrpc)
+ config_log_put(cld->cld_sptlrpc);
+ if (cld_is_sptlrpc(cld))
+ sptlrpc_conf_log_stop(cld->cld_logname);
+
+ class_export_put(cld->cld_mgcexp);
+ OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1);
+ }
+}
+
+/* Find a config log by name */
+static
+struct config_llog_data *config_log_find(char *logname,
+ struct config_llog_instance *cfg)
+{
+ struct config_llog_data *cld;
+ struct config_llog_data *found = NULL;
+ void * instance;
+
+ LASSERT(logname != NULL);
+
+ instance = cfg ? cfg->cfg_instance : NULL;
+ spin_lock(&config_list_lock);
+ list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+ /* check if instance equals */
+ if (instance != cld->cld_cfg.cfg_instance)
+ continue;
+
+ /* instance may be NULL, should check name */
+ if (strcmp(logname, cld->cld_logname) == 0) {
+ found = cld;
+ break;
+ }
+ }
+ if (found) {
+ atomic_inc(&found->cld_refcount);
+ LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
+ }
+ spin_unlock(&config_list_lock);
+ return found;
+}
+
+static
+struct config_llog_data *do_config_log_add(struct obd_device *obd,
+ char *logname,
+ int type,
+ struct config_llog_instance *cfg,
+ struct super_block *sb)
+{
+ struct config_llog_data *cld;
+ int rc;
+
+ CDEBUG(D_MGC, "do adding config log %s:%p\n", logname,
+ cfg ? cfg->cfg_instance : 0);
+
+ OBD_ALLOC(cld, sizeof(*cld) + strlen(logname) + 1);
+ if (!cld)
+ return ERR_PTR(-ENOMEM);
+
+ strcpy(cld->cld_logname, logname);
+ if (cfg)
+ cld->cld_cfg = *cfg;
+ else
+ cld->cld_cfg.cfg_callback = class_config_llog_handler;
+ mutex_init(&cld->cld_lock);
+ cld->cld_cfg.cfg_last_idx = 0;
+ cld->cld_cfg.cfg_flags = 0;
+ cld->cld_cfg.cfg_sb = sb;
+ cld->cld_type = type;
+ atomic_set(&cld->cld_refcount, 1);
+
+ /* Keep the mgc around until we are done */
+ cld->cld_mgcexp = class_export_get(obd->obd_self_export);
+
+ if (cld_is_sptlrpc(cld)) {
+ sptlrpc_conf_log_start(logname);
+ cld->cld_cfg.cfg_obdname = obd->obd_name;
+ }
+
+ rc = mgc_logname2resid(logname, &cld->cld_resid, type);
+
+ spin_lock(&config_list_lock);
+ list_add(&cld->cld_list_chain, &config_llog_list);
+ spin_unlock(&config_list_lock);
+
+ if (rc) {
+ config_log_put(cld);
+ return ERR_PTR(rc);
+ }
+
+ if (cld_is_sptlrpc(cld)) {
+ rc = mgc_process_log(obd, cld);
+ if (rc && rc != -ENOENT)
+ CERROR("failed processing sptlrpc log: %d\n", rc);
+ }
+
+ return cld;
+}
+
+static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
+ char *fsname,
+ struct config_llog_instance *cfg,
+ struct super_block *sb)
+{
+ struct config_llog_instance lcfg = *cfg;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct config_llog_data *cld;
+ char logname[32];
+
+ if (IS_OST(lsi))
+ return NULL;
+
+ /* for osp-on-ost, see lustre_start_osp() */
+ if (IS_MDT(lsi) && lcfg.cfg_instance)
+ return NULL;
+
+ /* we have to use different llog for clients and mdts for cmd
+ * where only clients are notified if one of cmd server restarts */
+ LASSERT(strlen(fsname) < sizeof(logname) / 2);
+ strcpy(logname, fsname);
+ if (IS_SERVER(lsi)) { /* mdt */
+ LASSERT(lcfg.cfg_instance == NULL);
+ lcfg.cfg_instance = sb;
+ strcat(logname, "-mdtir");
+ } else {
+ LASSERT(lcfg.cfg_instance != NULL);
+ strcat(logname, "-cliir");
+ }
+
+ cld = do_config_log_add(obd, logname, CONFIG_T_RECOVER, &lcfg, sb);
+ return cld;
+}
+
+
+/** Add this log to the list of active logs watched by an MGC.
+ * Active means we're watching for updates.
+ * We have one active log per "mount" - client instance or servername.
+ * Each instance may be at a different point in the log.
+ */
+static int config_log_add(struct obd_device *obd, char *logname,
+ struct config_llog_instance *cfg,
+ struct super_block *sb)
+{
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct config_llog_data *cld;
+ struct config_llog_data *sptlrpc_cld;
+ char seclogname[32];
+ char *ptr;
+
+ CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
+
+ /*
+ * for each regular log, the depended sptlrpc log name is
+ * <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log.
+ */
+ ptr = strrchr(logname, '-');
+ if (ptr == NULL || ptr - logname > 8) {
+ CERROR("logname %s is too long\n", logname);
+ return -EINVAL;
+ }
+
+ memcpy(seclogname, logname, ptr - logname);
+ strcpy(seclogname + (ptr - logname), "-sptlrpc");
+
+ sptlrpc_cld = config_log_find(seclogname, NULL);
+ if (sptlrpc_cld == NULL) {
+ sptlrpc_cld = do_config_log_add(obd, seclogname,
+ CONFIG_T_SPTLRPC, NULL, NULL);
+ if (IS_ERR(sptlrpc_cld)) {
+ CERROR("can't create sptlrpc log: %s\n", seclogname);
+ return PTR_ERR(sptlrpc_cld);
+ }
+ }
+
+ cld = do_config_log_add(obd, logname, CONFIG_T_CONFIG, cfg, sb);
+ if (IS_ERR(cld)) {
+ CERROR("can't create log: %s\n", logname);
+ config_log_put(sptlrpc_cld);
+ return PTR_ERR(cld);
+ }
+
+ cld->cld_sptlrpc = sptlrpc_cld;
+
+ LASSERT(lsi->lsi_lmd);
+ if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) {
+ struct config_llog_data *recover_cld;
+ *strrchr(seclogname, '-') = 0;
+ recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
+ if (IS_ERR(recover_cld)) {
+ config_log_put(cld);
+ return PTR_ERR(recover_cld);
+ }
+ cld->cld_recover = recover_cld;
+ }
+
+ return 0;
+}
+
+DEFINE_MUTEX(llog_process_lock);
+
+/** Stop watching for updates on this log.
+ */
+static int config_log_end(char *logname, struct config_llog_instance *cfg)
+{
+ struct config_llog_data *cld;
+ struct config_llog_data *cld_sptlrpc = NULL;
+ struct config_llog_data *cld_recover = NULL;
+ int rc = 0;
+
+ cld = config_log_find(logname, cfg);
+ if (cld == NULL)
+ return -ENOENT;
+
+ mutex_lock(&cld->cld_lock);
+ /*
+ * if cld_stopping is set, it means we didn't start the log thus
+ * not owning the start ref. this can happen after previous umount:
+ * the cld still hanging there waiting for lock cancel, and we
+ * remount again but failed in the middle and call log_end without
+ * calling start_log.
+ */
+ if (unlikely(cld->cld_stopping)) {
+ mutex_unlock(&cld->cld_lock);
+ /* drop the ref from the find */
+ config_log_put(cld);
+ return rc;
+ }
+
+ cld->cld_stopping = 1;
+
+ cld_recover = cld->cld_recover;
+ cld->cld_recover = NULL;
+ mutex_unlock(&cld->cld_lock);
+
+ if (cld_recover) {
+ mutex_lock(&cld_recover->cld_lock);
+ cld_recover->cld_stopping = 1;
+ mutex_unlock(&cld_recover->cld_lock);
+ config_log_put(cld_recover);
+ }
+
+ spin_lock(&config_list_lock);
+ cld_sptlrpc = cld->cld_sptlrpc;
+ cld->cld_sptlrpc = NULL;
+ spin_unlock(&config_list_lock);
+
+ if (cld_sptlrpc)
+ config_log_put(cld_sptlrpc);
+
+ /* drop the ref from the find */
+ config_log_put(cld);
+ /* drop the start ref */
+ config_log_put(cld);
+
+ CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client",
+ rc);
+ return rc;
+}
+
+int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ struct obd_import *imp = obd->u.cli.cl_import;
+ struct obd_connect_data *ocd = &imp->imp_connect_data;
+ struct config_llog_data *cld;
+
+ seq_printf(m, "imperative_recovery: %s\n",
+ OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
+ seq_printf(m, "client_state:\n");
+
+ spin_lock(&config_list_lock);
+ list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+ if (cld->cld_recover == NULL)
+ continue;
+ seq_printf(m, " - { client: %s, nidtbl_version: %u }\n",
+ cld->cld_logname,
+ cld->cld_recover->cld_cfg.cfg_last_idx);
+ }
+ spin_unlock(&config_list_lock);
+
+ return 0;
+}
+
+/* reenqueue any lost locks */
+#define RQ_RUNNING 0x1
+#define RQ_NOW 0x2
+#define RQ_LATER 0x4
+#define RQ_STOP 0x8
+static int rq_state = 0;
+static wait_queue_head_t rq_waitq;
+static DECLARE_COMPLETION(rq_exit);
+
+static void do_requeue(struct config_llog_data *cld)
+{
+ LASSERT(atomic_read(&cld->cld_refcount) > 0);
+
+ /* Do not run mgc_process_log on a disconnected export or an
+ export which is being disconnected. Take the client
+ semaphore to make the check non-racy. */
+ down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
+ CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
+ mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
+ } else {
+ CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
+ cld->cld_logname);
+ }
+ up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+}
+
+/* this timeout represents how many seconds MGC should wait before
+ * requeue config and recover lock to the MGS. We need to randomize this
+ * in order to not flood the MGS.
+ */
+#define MGC_TIMEOUT_MIN_SECONDS 5
+#define MGC_TIMEOUT_RAND_CENTISEC 0x1ff /* ~500 */
+
+static int mgc_requeue_thread(void *data)
+{
+ int rc = 0;
+
+ CDEBUG(D_MGC, "Starting requeue thread\n");
+
+ /* Keep trying failed locks periodically */
+ spin_lock(&config_list_lock);
+ rq_state |= RQ_RUNNING;
+ while (1) {
+ struct l_wait_info lwi;
+ struct config_llog_data *cld, *cld_prev;
+ int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
+ int stopped = !!(rq_state & RQ_STOP);
+ int to;
+
+ /* Any new or requeued lostlocks will change the state */
+ rq_state &= ~(RQ_NOW | RQ_LATER);
+ spin_unlock(&config_list_lock);
+
+ /* Always wait a few seconds to allow the server who
+ caused the lock revocation to finish its setup, plus some
+ random so everyone doesn't try to reconnect at once. */
+ to = MGC_TIMEOUT_MIN_SECONDS * HZ;
+ to += rand * HZ / 100; /* rand is centi-seconds */
+ lwi = LWI_TIMEOUT(to, NULL, NULL);
+ l_wait_event(rq_waitq, rq_state & RQ_STOP, &lwi);
+
+ /*
+ * iterate & processing through the list. for each cld, process
+ * its depending sptlrpc cld firstly (if any) and then itself.
+ *
+ * it's guaranteed any item in the list must have
+ * reference > 0; and if cld_lostlock is set, at
+ * least one reference is taken by the previous enqueue.
+ */
+ cld_prev = NULL;
+
+ spin_lock(&config_list_lock);
+ list_for_each_entry(cld, &config_llog_list,
+ cld_list_chain) {
+ if (!cld->cld_lostlock)
+ continue;
+
+ spin_unlock(&config_list_lock);
+
+ LASSERT(atomic_read(&cld->cld_refcount) > 0);
+
+ /* Whether we enqueued again or not in mgc_process_log,
+ * we're done with the ref from the old enqueue */
+ if (cld_prev)
+ config_log_put(cld_prev);
+ cld_prev = cld;
+
+ cld->cld_lostlock = 0;
+ if (likely(!stopped))
+ do_requeue(cld);
+
+ spin_lock(&config_list_lock);
+ }
+ spin_unlock(&config_list_lock);
+ if (cld_prev)
+ config_log_put(cld_prev);
+
+ /* break after scanning the list so that we can drop
+ * refcount to losing lock clds */
+ if (unlikely(stopped)) {
+ spin_lock(&config_list_lock);
+ break;
+ }
+
+ /* Wait a bit to see if anyone else needs a requeue */
+ lwi = (struct l_wait_info) { 0 };
+ l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
+ &lwi);
+ spin_lock(&config_list_lock);
+ }
+ /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
+ rq_state &= ~RQ_RUNNING;
+ spin_unlock(&config_list_lock);
+
+ complete(&rq_exit);
+
+ CDEBUG(D_MGC, "Ending requeue thread\n");
+ return rc;
+}
+
+/* Add a cld to the list to requeue. Start the requeue thread if needed.
+ We are responsible for dropping the config log reference from here on out. */
+static void mgc_requeue_add(struct config_llog_data *cld)
+{
+ CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
+ cld->cld_logname, atomic_read(&cld->cld_refcount),
+ cld->cld_stopping, rq_state);
+ LASSERT(atomic_read(&cld->cld_refcount) > 0);
+
+ mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping || cld->cld_lostlock) {
+ mutex_unlock(&cld->cld_lock);
+ return;
+ }
+ /* this refcount will be released in mgc_requeue_thread. */
+ config_log_get(cld);
+ cld->cld_lostlock = 1;
+ mutex_unlock(&cld->cld_lock);
+
+ /* Hold lock for rq_state */
+ spin_lock(&config_list_lock);
+ if (rq_state & RQ_STOP) {
+ spin_unlock(&config_list_lock);
+ cld->cld_lostlock = 0;
+ config_log_put(cld);
+ } else {
+ rq_state |= RQ_NOW;
+ spin_unlock(&config_list_lock);
+ wake_up(&rq_waitq);
+ }
+}
+
+/********************** class fns **********************/
+
+static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb,
+ struct vfsmount *mnt)
+{
+ struct lvfs_run_ctxt saved;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct client_obd *cli = &obd->u.cli;
+ struct dentry *dentry;
+ char *label;
+ int err = 0;
+
+ LASSERT(lsi);
+ LASSERT(lsi->lsi_srv_mnt == mnt);
+
+ /* The mgc fs exclusion sem. Only one fs can be setup at a time. */
+ down(&cli->cl_mgc_sem);
+
+ cfs_cleanup_group_info();
+
+ obd->obd_fsops = fsfilt_get_ops(lsi->lsi_fstype);
+ if (IS_ERR(obd->obd_fsops)) {
+ up(&cli->cl_mgc_sem);
+ CERROR("%s: No fstype %s: rc = %ld\n", lsi->lsi_fstype,
+ obd->obd_name, PTR_ERR(obd->obd_fsops));
+ return PTR_ERR(obd->obd_fsops);
+ }
+
+ cli->cl_mgc_vfsmnt = mnt;
+ err = fsfilt_setup(obd, mnt->mnt_sb);
+ if (err)
+ GOTO(err_ops, err);
+
+ OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
+ obd->obd_lvfs_ctxt.pwdmnt = mnt;
+ obd->obd_lvfs_ctxt.pwd = mnt->mnt_root;
+ obd->obd_lvfs_ctxt.fs = get_ds();
+
+ push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ dentry = ll_lookup_one_len(MOUNT_CONFIGS_DIR, cfs_fs_pwd(current->fs),
+ strlen(MOUNT_CONFIGS_DIR));
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ CERROR("cannot lookup %s directory: rc = %d\n",
+ MOUNT_CONFIGS_DIR, err);
+ GOTO(err_ops, err);
+ }
+ cli->cl_mgc_configs_dir = dentry;
+
+ /* We take an obd ref to insure that we can't get to mgc_cleanup
+ without calling mgc_fs_cleanup first. */
+ class_incref(obd, "mgc_fs", obd);
+
+ label = fsfilt_get_label(obd, mnt->mnt_sb);
+ if (label)
+ CDEBUG(D_MGC, "MGC using disk labelled=%s\n", label);
+
+ /* We keep the cl_mgc_sem until mgc_fs_cleanup */
+ return 0;
+
+err_ops:
+ fsfilt_put_ops(obd->obd_fsops);
+ obd->obd_fsops = NULL;
+ cli->cl_mgc_vfsmnt = NULL;
+ up(&cli->cl_mgc_sem);
+ return err;
+}
+
+static int mgc_fs_cleanup(struct obd_device *obd)
+{
+ struct client_obd *cli = &obd->u.cli;
+ int rc = 0;
+
+ LASSERT(cli->cl_mgc_vfsmnt != NULL);
+
+ if (cli->cl_mgc_configs_dir != NULL) {
+ struct lvfs_run_ctxt saved;
+ push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ l_dput(cli->cl_mgc_configs_dir);
+ cli->cl_mgc_configs_dir = NULL;
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ class_decref(obd, "mgc_fs", obd);
+ }
+
+ cli->cl_mgc_vfsmnt = NULL;
+ if (obd->obd_fsops)
+ fsfilt_put_ops(obd->obd_fsops);
+
+ up(&cli->cl_mgc_sem);
+
+ return rc;
+}
+
+static atomic_t mgc_count = ATOMIC_INIT(0);
+static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+{
+ int rc = 0;
+
+ switch (stage) {
+ case OBD_CLEANUP_EARLY:
+ break;
+ case OBD_CLEANUP_EXPORTS:
+ if (atomic_dec_and_test(&mgc_count)) {
+ int running;
+ /* stop requeue thread */
+ spin_lock(&config_list_lock);
+ running = rq_state & RQ_RUNNING;
+ if (running)
+ rq_state |= RQ_STOP;
+ spin_unlock(&config_list_lock);
+ if (running) {
+ wake_up(&rq_waitq);
+ wait_for_completion(&rq_exit);
+ }
+ }
+ obd_cleanup_client_import(obd);
+ rc = obd_llog_finish(obd, 0);
+ if (rc != 0)
+ CERROR("failed to cleanup llogging subsystems\n");
+ break;
+ }
+ return rc;
+}
+
+static int mgc_cleanup(struct obd_device *obd)
+{
+ struct client_obd *cli = &obd->u.cli;
+ int rc;
+
+ LASSERT(cli->cl_mgc_vfsmnt == NULL);
+
+ /* COMPAT_146 - old config logs may have added profiles we don't
+ know about */
+ if (obd->obd_type->typ_refcnt <= 1)
+ /* Only for the last mgc */
+ class_del_profiles();
+
+ lprocfs_obd_cleanup(obd);
+ ptlrpcd_decref();
+
+ rc = client_obd_cleanup(obd);
+ return rc;
+}
+
+static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ struct lprocfs_static_vars lvars;
+ int rc;
+
+ ptlrpcd_addref();
+
+ rc = client_obd_setup(obd, lcfg);
+ if (rc)
+ GOTO(err_decref, rc);
+
+ rc = obd_llog_init(obd, &obd->obd_olg, obd, NULL);
+ if (rc) {
+ CERROR("failed to setup llogging subsystems\n");
+ GOTO(err_cleanup, rc);
+ }
+
+ lprocfs_mgc_init_vars(&lvars);
+ lprocfs_obd_setup(obd, lvars.obd_vars);
+ sptlrpc_lprocfs_cliobd_attach(obd);
+
+ if (atomic_inc_return(&mgc_count) == 1) {
+ rq_state = 0;
+ init_waitqueue_head(&rq_waitq);
+
+ /* start requeue thread */
+ rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
+ "ll_cfg_requeue"));
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("%s: Cannot start requeue thread (%d),"
+ "no more log updates!\n",
+ obd->obd_name, rc);
+ GOTO(err_cleanup, rc);
+ }
+ /* rc is the task_struct pointer of mgc_requeue_thread. */
+ rc = 0;
+ }
+
+ return rc;
+
+err_cleanup:
+ client_obd_cleanup(obd);
+err_decref:
+ ptlrpcd_decref();
+ return rc;
+}
+
+/* based on ll_mdc_blocking_ast */
+static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
+ void *data, int flag)
+{
+ struct lustre_handle lockh;
+ struct config_llog_data *cld = (struct config_llog_data *)data;
+ int rc = 0;
+
+ switch (flag) {
+ case LDLM_CB_BLOCKING:
+ /* mgs wants the lock, give it up... */
+ LDLM_DEBUG(lock, "MGC blocking CB");
+ ldlm_lock2handle(lock, &lockh);
+ rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
+ break;
+ case LDLM_CB_CANCELING:
+ /* We've given up the lock, prepare ourselves to update. */
+ LDLM_DEBUG(lock, "MGC cancel CB");
+
+ CDEBUG(D_MGC, "Lock res "LPX64" (%.8s)\n",
+ lock->l_resource->lr_name.name[0],
+ (char *)&lock->l_resource->lr_name.name[0]);
+
+ if (!cld) {
+ CDEBUG(D_INFO, "missing data, won't requeue\n");
+ break;
+ }
+
+ /* held at mgc_process_log(). */
+ LASSERT(atomic_read(&cld->cld_refcount) > 0);
+ /* Are we done with this log? */
+ if (cld->cld_stopping) {
+ CDEBUG(D_MGC, "log %s: stopping, won't requeue\n",
+ cld->cld_logname);
+ config_log_put(cld);
+ break;
+ }
+ /* Make sure not to re-enqueue when the mgc is stopping
+ (we get called from client_disconnect_export) */
+ if (!lock->l_conn_export ||
+ !lock->l_conn_export->exp_obd->u.cli.cl_conn_count) {
+ CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n",
+ cld->cld_logname);
+ config_log_put(cld);
+ break;
+ }
+
+ /* Re-enqueue now */
+ mgc_requeue_add(cld);
+ config_log_put(cld);
+ break;
+ default:
+ LBUG();
+ }
+
+ return rc;
+}
+
+/* Not sure where this should go... */
+#define MGC_ENQUEUE_LIMIT 50
+#define MGC_TARGET_REG_LIMIT 10
+#define MGC_SEND_PARAM_LIMIT 10
+
+/* Send parameter to MGS*/
+static int mgc_set_mgs_param(struct obd_export *exp,
+ struct mgs_send_param *msp)
+{
+ struct ptlrpc_request *req;
+ struct mgs_send_param *req_msp, *rep_msp;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_MGS_SET_INFO, LUSTRE_MGS_VERSION,
+ MGS_SET_INFO);
+ if (!req)
+ return -ENOMEM;
+
+ req_msp = req_capsule_client_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
+ if (!req_msp) {
+ ptlrpc_req_finished(req);
+ return -ENOMEM;
+ }
+
+ memcpy(req_msp, msp, sizeof(*req_msp));
+ ptlrpc_request_set_replen(req);
+
+ /* Limit how long we will wait for the enqueue to complete */
+ req->rq_delay_limit = MGC_SEND_PARAM_LIMIT;
+ rc = ptlrpc_queue_wait(req);
+ if (!rc) {
+ rep_msp = req_capsule_server_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
+ memcpy(msp, rep_msp, sizeof(*rep_msp));
+ }
+
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
+
+/* Take a config lock so we can get cancel notifications */
+static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
+ __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
+ void *data, __u32 lvb_len, void *lvb_swabber,
+ struct lustre_handle *lockh)
+{
+ struct config_llog_data *cld = (struct config_llog_data *)data;
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = type,
+ .ei_mode = mode,
+ .ei_cb_bl = mgc_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
+ struct ptlrpc_request *req;
+ int short_limit = cld_is_sptlrpc(cld);
+ int rc;
+
+ CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname,
+ cld->cld_resid.name[0]);
+
+ /* We need a callback for every lockholder, so don't try to
+ ldlm_lock_match (see rev 1.1.2.11.2.47) */
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION,
+ LDLM_ENQUEUE);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0);
+ ptlrpc_request_set_replen(req);
+
+ /* check if this is server or client */
+ if (cld->cld_cfg.cfg_sb) {
+ struct lustre_sb_info *lsi = s2lsi(cld->cld_cfg.cfg_sb);
+ if (lsi && IS_SERVER(lsi))
+ short_limit = 1;
+ }
+ /* Limit how long we will wait for the enqueue to complete */
+ req->rq_delay_limit = short_limit ? 5 : MGC_ENQUEUE_LIMIT;
+ rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags,
+ NULL, 0, LVB_T_NONE, lockh, 0);
+ /* A failed enqueue should still call the mgc_blocking_ast,
+ where it will be requeued if needed ("grant failed"). */
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int mgc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
+ __u32 mode, struct lustre_handle *lockh)
+{
+ ldlm_lock_decref(lockh, mode);
+
+ return 0;
+}
+
+static void mgc_notify_active(struct obd_device *unused)
+{
+ /* wakeup mgc_requeue_thread to requeue mgc lock */
+ spin_lock(&config_list_lock);
+ rq_state |= RQ_NOW;
+ spin_unlock(&config_list_lock);
+ wake_up(&rq_waitq);
+
+ /* TODO: Help the MGS rebuild nidtbl. -jay */
+}
+
+/* Send target_reg message to MGS */
+static int mgc_target_register(struct obd_export *exp,
+ struct mgs_target_info *mti)
+{
+ struct ptlrpc_request *req;
+ struct mgs_target_info *req_mti, *rep_mti;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION,
+ MGS_TARGET_REG);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO);
+ if (!req_mti) {
+ ptlrpc_req_finished(req);
+ return -ENOMEM;
+ }
+
+ memcpy(req_mti, mti, sizeof(*req_mti));
+ ptlrpc_request_set_replen(req);
+ CDEBUG(D_MGC, "register %s\n", mti->mti_svname);
+ /* Limit how long we will wait for the enqueue to complete */
+ req->rq_delay_limit = MGC_TARGET_REG_LIMIT;
+
+ rc = ptlrpc_queue_wait(req);
+ if (!rc) {
+ rep_mti = req_capsule_server_get(&req->rq_pill,
+ &RMF_MGS_TARGET_INFO);
+ memcpy(mti, rep_mti, sizeof(*rep_mti));
+ CDEBUG(D_MGC, "register %s got index = %d\n",
+ mti->mti_svname, mti->mti_stripe_index);
+ }
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
+
+int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
+ obd_count keylen, void *key, obd_count vallen,
+ void *val, struct ptlrpc_request_set *set)
+{
+ int rc = -EINVAL;
+
+ /* Turn off initial_recov after we try all backup servers once */
+ if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
+ struct obd_import *imp = class_exp2cliimp(exp);
+ int value;
+ if (vallen != sizeof(int))
+ return -EINVAL;
+ value = *(int *)val;
+ CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
+ imp->imp_obd->obd_name, value,
+ imp->imp_deactive, imp->imp_invalid,
+ imp->imp_replayable, imp->imp_obd->obd_replayable,
+ ptlrpc_import_state_name(imp->imp_state));
+ /* Resurrect if we previously died */
+ if ((imp->imp_state != LUSTRE_IMP_FULL &&
+ imp->imp_state != LUSTRE_IMP_NEW) || value > 1)
+ ptlrpc_reconnect_import(imp);
+ return 0;
+ }
+ /* FIXME move this to mgc_process_config */
+ if (KEY_IS(KEY_REGISTER_TARGET)) {
+ struct mgs_target_info *mti;
+ if (vallen != sizeof(struct mgs_target_info))
+ return -EINVAL;
+ mti = (struct mgs_target_info *)val;
+ CDEBUG(D_MGC, "register_target %s %#x\n",
+ mti->mti_svname, mti->mti_flags);
+ rc = mgc_target_register(exp, mti);
+ return rc;
+ }
+ if (KEY_IS(KEY_SET_FS)) {
+ struct super_block *sb = (struct super_block *)val;
+ struct lustre_sb_info *lsi;
+ if (vallen != sizeof(struct super_block))
+ return -EINVAL;
+ lsi = s2lsi(sb);
+ rc = mgc_fs_setup(exp->exp_obd, sb, lsi->lsi_srv_mnt);
+ if (rc) {
+ CERROR("set_fs got %d\n", rc);
+ }
+ return rc;
+ }
+ if (KEY_IS(KEY_CLEAR_FS)) {
+ if (vallen != 0)
+ return -EINVAL;
+ rc = mgc_fs_cleanup(exp->exp_obd);
+ if (rc) {
+ CERROR("clear_fs got %d\n", rc);
+ }
+ return rc;
+ }
+ if (KEY_IS(KEY_SET_INFO)) {
+ struct mgs_send_param *msp;
+
+ msp = (struct mgs_send_param *)val;
+ rc = mgc_set_mgs_param(exp, msp);
+ return rc;
+ }
+ if (KEY_IS(KEY_MGSSEC)) {
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ struct sptlrpc_flavor flvr;
+
+ /*
+ * empty string means using current flavor, if which haven't
+ * been set yet, set it as null.
+ *
+ * if flavor has been set previously, check the asking flavor
+ * must match the existing one.
+ */
+ if (vallen == 0) {
+ if (cli->cl_flvr_mgc.sf_rpc != SPTLRPC_FLVR_INVALID)
+ return 0;
+ val = "null";
+ vallen = 4;
+ }
+
+ rc = sptlrpc_parse_flavor(val, &flvr);
+ if (rc) {
+ CERROR("invalid sptlrpc flavor %s to MGS\n",
+ (char *) val);
+ return rc;
+ }
+
+ /*
+ * caller already hold a mutex
+ */
+ if (cli->cl_flvr_mgc.sf_rpc == SPTLRPC_FLVR_INVALID) {
+ cli->cl_flvr_mgc = flvr;
+ } else if (memcmp(&cli->cl_flvr_mgc, &flvr,
+ sizeof(flvr)) != 0) {
+ char str[20];
+
+ sptlrpc_flavor2name(&cli->cl_flvr_mgc,
+ str, sizeof(str));
+ LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but "
+ "currently %s is in use\n",
+ (char *) val, str);
+ rc = -EPERM;
+ }
+ return rc;
+ }
+
+ return rc;
+}
+
+static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
+ __u32 keylen, void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *unused)
+{
+ int rc = -EINVAL;
+
+ if (KEY_IS(KEY_CONN_DATA)) {
+ struct obd_import *imp = class_exp2cliimp(exp);
+ struct obd_connect_data *data = val;
+
+ if (*vallen == sizeof(*data)) {
+ *data = imp->imp_connect_data;
+ rc = 0;
+ }
+ }
+
+ return rc;
+}
+
+static int mgc_import_event(struct obd_device *obd,
+ struct obd_import *imp,
+ enum obd_import_event event)
+{
+ int rc = 0;
+
+ LASSERT(imp->imp_obd == obd);
+ CDEBUG(D_MGC, "import event %#x\n", event);
+
+ switch (event) {
+ case IMP_EVENT_DISCON:
+ /* MGC imports should not wait for recovery */
+ if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
+ ptlrpc_pinger_ir_down();
+ break;
+ case IMP_EVENT_INACTIVE:
+ break;
+ case IMP_EVENT_INVALIDATE: {
+ struct ldlm_namespace *ns = obd->obd_namespace;
+ ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
+ break;
+ }
+ case IMP_EVENT_ACTIVE:
+ CDEBUG(D_INFO, "%s: Reactivating import\n", obd->obd_name);
+ /* Clearing obd_no_recov allows us to continue pinging */
+ obd->obd_no_recov = 0;
+ mgc_notify_active(obd);
+ if (OCD_HAS_FLAG(&imp->imp_connect_data, IMP_RECOV))
+ ptlrpc_pinger_ir_up();
+ break;
+ case IMP_EVENT_OCD:
+ break;
+ case IMP_EVENT_DEACTIVATE:
+ case IMP_EVENT_ACTIVATE:
+ break;
+ default:
+ CERROR("Unknown import event %#x\n", event);
+ LBUG();
+ }
+ return rc;
+}
+
+static int mgc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *tgt, int *index)
+{
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ LASSERT(olg == &obd->obd_olg);
+
+
+ rc = llog_setup(NULL, obd, olg, LLOG_CONFIG_REPL_CTXT, tgt,
+ &llog_client_ops);
+ if (rc)
+ GOTO(out, rc);
+
+ ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_REPL_CTXT);
+ if (!ctxt)
+ GOTO(out, rc = -ENODEV);
+
+ llog_initiator_connect(ctxt);
+ llog_ctxt_put(ctxt);
+
+ return 0;
+out:
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+ return rc;
+}
+
+static int mgc_llog_finish(struct obd_device *obd, int count)
+{
+ struct llog_ctxt *ctxt;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+ return 0;
+}
+
+enum {
+ CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
+ CONFIG_READ_NRPAGES = 4
+};
+
+static int mgc_apply_recover_logs(struct obd_device *mgc,
+ struct config_llog_data *cld,
+ __u64 max_version,
+ void *data, int datalen, bool mne_swab)
+{
+ struct config_llog_instance *cfg = &cld->cld_cfg;
+ struct lustre_sb_info *lsi = s2lsi(cfg->cfg_sb);
+ struct mgs_nidtbl_entry *entry;
+ struct lustre_cfg *lcfg;
+ struct lustre_cfg_bufs bufs;
+ u64 prev_version = 0;
+ char *inst;
+ char *buf;
+ int bufsz;
+ int pos;
+ int rc = 0;
+ int off = 0;
+
+ LASSERT(cfg->cfg_instance != NULL);
+ LASSERT(cfg->cfg_sb == cfg->cfg_instance);
+
+ OBD_ALLOC(inst, PAGE_CACHE_SIZE);
+ if (inst == NULL)
+ return -ENOMEM;
+
+ if (!IS_SERVER(lsi)) {
+ pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
+ if (pos >= PAGE_CACHE_SIZE) {
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
+ return -E2BIG;
+ }
+ } else {
+ LASSERT(IS_MDT(lsi));
+ rc = server_name2svname(lsi->lsi_svname, inst, NULL,
+ PAGE_CACHE_SIZE);
+ if (rc) {
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
+ return -EINVAL;
+ }
+ pos = strlen(inst);
+ }
+
+ ++pos;
+ buf = inst + pos;
+ bufsz = PAGE_CACHE_SIZE - pos;
+
+ while (datalen > 0) {
+ int entry_len = sizeof(*entry);
+ int is_ost;
+ struct obd_device *obd;
+ char *obdname;
+ char *cname;
+ char *params;
+ char *uuid;
+
+ rc = -EINVAL;
+ if (datalen < sizeof(*entry))
+ break;
+
+ entry = (typeof(entry))(data + off);
+
+ /* sanity check */
+ if (entry->mne_nid_type != 0) /* only support type 0 for ipv4 */
+ break;
+ if (entry->mne_nid_count == 0) /* at least one nid entry */
+ break;
+ if (entry->mne_nid_size != sizeof(lnet_nid_t))
+ break;
+
+ entry_len += entry->mne_nid_count * entry->mne_nid_size;
+ if (datalen < entry_len) /* must have entry_len at least */
+ break;
+
+ /* Keep this swab for normal mixed endian handling. LU-1644 */
+ if (mne_swab)
+ lustre_swab_mgs_nidtbl_entry(entry);
+ if (entry->mne_length > PAGE_CACHE_SIZE) {
+ CERROR("MNE too large (%u)\n", entry->mne_length);
+ break;
+ }
+
+ if (entry->mne_length < entry_len)
+ break;
+
+ off += entry->mne_length;
+ datalen -= entry->mne_length;
+ if (datalen < 0)
+ break;
+
+ if (entry->mne_version > max_version) {
+ CERROR("entry index(%lld) is over max_index(%lld)\n",
+ entry->mne_version, max_version);
+ break;
+ }
+
+ if (prev_version >= entry->mne_version) {
+ CERROR("index unsorted, prev %lld, now %lld\n",
+ prev_version, entry->mne_version);
+ break;
+ }
+ prev_version = entry->mne_version;
+
+ /*
+ * Write a string with format "nid::instance" to
+ * lustre/<osc|mdc>/<target>-<osc|mdc>-<instance>/import.
+ */
+
+ is_ost = entry->mne_type == LDD_F_SV_TYPE_OST;
+ memset(buf, 0, bufsz);
+ obdname = buf;
+ pos = 0;
+
+ /* lustre-OST0001-osc-<instance #> */
+ strcpy(obdname, cld->cld_logname);
+ cname = strrchr(obdname, '-');
+ if (cname == NULL) {
+ CERROR("mgc %s: invalid logname %s\n",
+ mgc->obd_name, obdname);
+ break;
+ }
+
+ pos = cname - obdname;
+ obdname[pos] = 0;
+ pos += sprintf(obdname + pos, "-%s%04x",
+ is_ost ? "OST" : "MDT", entry->mne_index);
+
+ cname = is_ost ? "osc" : "mdc",
+ pos += sprintf(obdname + pos, "-%s-%s", cname, inst);
+ lustre_cfg_bufs_reset(&bufs, obdname);
+
+ /* find the obd by obdname */
+ obd = class_name2obd(obdname);
+ if (obd == NULL) {
+ CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n",
+ mgc->obd_name, obdname);
+ rc = 0;
+ /* this is a safe race, when the ost is starting up...*/
+ continue;
+ }
+
+ /* osc.import = "connection=<Conn UUID>::<target instance>" */
+ ++pos;
+ params = buf + pos;
+ pos += sprintf(params, "%s.import=%s", cname, "connection=");
+ uuid = buf + pos;
+
+ down_read(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import == NULL) {
+ /* client does not connect to the OST yet */
+ up_read(&obd->u.cli.cl_sem);
+ rc = 0;
+ continue;
+ }
+
+ /* TODO: iterate all nids to find one */
+ /* find uuid by nid */
+ rc = client_import_find_conn(obd->u.cli.cl_import,
+ entry->u.nids[0],
+ (struct obd_uuid *)uuid);
+ up_read(&obd->u.cli.cl_sem);
+ if (rc < 0) {
+ CERROR("mgc: cannot find uuid by nid %s\n",
+ libcfs_nid2str(entry->u.nids[0]));
+ break;
+ }
+
+ CDEBUG(D_INFO, "Find uuid %s by nid %s\n",
+ uuid, libcfs_nid2str(entry->u.nids[0]));
+
+ pos += strlen(uuid);
+ pos += sprintf(buf + pos, "::%u", entry->mne_instance);
+ LASSERT(pos < bufsz);
+
+ lustre_cfg_bufs_set_string(&bufs, 1, params);
+
+ rc = -ENOMEM;
+ lcfg = lustre_cfg_new(LCFG_PARAM, &bufs);
+ if (lcfg == NULL) {
+ CERROR("mgc: cannot allocate memory\n");
+ break;
+ }
+
+ CDEBUG(D_INFO, "ir apply logs "LPD64"/"LPD64" for %s -> %s\n",
+ prev_version, max_version, obdname, params);
+
+ rc = class_process_config(lcfg);
+ lustre_cfg_free(lcfg);
+ if (rc)
+ CDEBUG(D_INFO, "process config for %s error %d\n",
+ obdname, rc);
+
+ /* continue, even one with error */
+ }
+
+ OBD_FREE(inst, PAGE_CACHE_SIZE);
+ return rc;
+}
+
+/**
+ * This function is called if this client was notified for target restarting
+ * by the MGS. A CONFIG_READ RPC is going to send to fetch recovery logs.
+ */
+static int mgc_process_recover_log(struct obd_device *obd,
+ struct config_llog_data *cld)
+{
+ struct ptlrpc_request *req = NULL;
+ struct config_llog_instance *cfg = &cld->cld_cfg;
+ struct mgs_config_body *body;
+ struct mgs_config_res *res;
+ struct ptlrpc_bulk_desc *desc;
+ struct page **pages;
+ int nrpages;
+ bool eof = true;
+ bool mne_swab = false;
+ int i;
+ int ealen;
+ int rc;
+
+ /* allocate buffer for bulk transfer.
+ * if this is the first time for this mgs to read logs,
+ * CONFIG_READ_NRPAGES_INIT will be used since it will read all logs
+ * once; otherwise, it only reads increment of logs, this should be
+ * small and CONFIG_READ_NRPAGES will be used.
+ */
+ nrpages = CONFIG_READ_NRPAGES;
+ if (cfg->cfg_last_idx == 0) /* the first time */
+ nrpages = CONFIG_READ_NRPAGES_INIT;
+
+ OBD_ALLOC(pages, sizeof(*pages) * nrpages);
+ if (pages == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ for (i = 0; i < nrpages; i++) {
+ pages[i] = alloc_page(GFP_IOFS);
+ if (pages[i] == NULL)
+ GOTO(out, rc = -ENOMEM);
+ }
+
+again:
+ LASSERT(cld_is_recover(cld));
+ LASSERT(mutex_is_locked(&cld->cld_lock));
+ req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
+ &RQF_MGS_CONFIG_READ);
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_MGS_VERSION, MGS_CONFIG_READ);
+ if (rc)
+ GOTO(out, rc);
+
+ /* pack request */
+ body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
+ LASSERT(body != NULL);
+ LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
+ if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name))
+ >= sizeof(body->mcb_name))
+ GOTO(out, rc = -E2BIG);
+ body->mcb_offset = cfg->cfg_last_idx + 1;
+ body->mcb_type = cld->cld_type;
+ body->mcb_bits = PAGE_CACHE_SHIFT;
+ body->mcb_units = nrpages;
+
+ /* allocate bulk transfer descriptor */
+ desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
+ MGS_BULK_PORTAL);
+ if (desc == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ for (i = 0; i < nrpages; i++)
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ res = req_capsule_server_get(&req->rq_pill, &RMF_MGS_CONFIG_RES);
+ if (res->mcr_size < res->mcr_offset)
+ GOTO(out, rc = -EINVAL);
+
+ /* always update the index even though it might have errors with
+ * handling the recover logs */
+ cfg->cfg_last_idx = res->mcr_offset;
+ eof = res->mcr_offset == res->mcr_size;
+
+ CDEBUG(D_INFO, "Latest version "LPD64", more %d.\n",
+ res->mcr_offset, eof == false);
+
+ ealen = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, 0);
+ if (ealen < 0)
+ GOTO(out, rc = ealen);
+
+ if (ealen > nrpages << PAGE_CACHE_SHIFT)
+ GOTO(out, rc = -EINVAL);
+
+ if (ealen == 0) { /* no logs transferred */
+ if (!eof)
+ rc = -EINVAL;
+ GOTO(out, rc);
+ }
+
+ mne_swab = !!ptlrpc_rep_need_swab(req);
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
+ /* This import flag means the server did an extra swab of IR MNE
+ * records (fixed in LU-1252), reverse it here if needed. LU-1644 */
+ if (unlikely(req->rq_import->imp_need_mne_swab))
+ mne_swab = !mne_swab;
+#else
+#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
+#endif
+
+ for (i = 0; i < nrpages && ealen > 0; i++) {
+ int rc2;
+ void *ptr;
+
+ ptr = kmap(pages[i]);
+ rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
+ min_t(int, ealen, PAGE_CACHE_SIZE),
+ mne_swab);
+ kunmap(pages[i]);
+ if (rc2 < 0) {
+ CWARN("Process recover log %s error %d\n",
+ cld->cld_logname, rc2);
+ break;
+ }
+
+ ealen -= PAGE_CACHE_SIZE;
+ }
+
+out:
+ if (req)
+ ptlrpc_req_finished(req);
+
+ if (rc == 0 && !eof)
+ goto again;
+
+ if (pages) {
+ for (i = 0; i < nrpages; i++) {
+ if (pages[i] == NULL)
+ break;
+ __free_page(pages[i]);
+ }
+ OBD_FREE(pages, sizeof(*pages) * nrpages);
+ }
+ return rc;
+}
+
+
+/* local_only means it cannot get remote llogs */
+static int mgc_process_cfg_log(struct obd_device *mgc,
+ struct config_llog_data *cld,
+ int local_only)
+{
+ struct llog_ctxt *ctxt, *lctxt = NULL;
+ struct lvfs_run_ctxt *saved_ctxt;
+ struct lustre_sb_info *lsi = NULL;
+ int rc = 0, must_pop = 0;
+ bool sptlrpc_started = false;
+
+ LASSERT(cld);
+ LASSERT(mutex_is_locked(&cld->cld_lock));
+
+ /*
+ * local copy of sptlrpc log is controlled elsewhere, don't try to
+ * read it up here.
+ */
+ if (cld_is_sptlrpc(cld) && local_only)
+ return 0;
+
+ if (cld->cld_cfg.cfg_sb)
+ lsi = s2lsi(cld->cld_cfg.cfg_sb);
+
+ ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
+ if (!ctxt) {
+ CERROR("missing llog context\n");
+ return -EINVAL;
+ }
+
+ OBD_ALLOC_PTR(saved_ctxt);
+ if (saved_ctxt == NULL)
+ return -ENOMEM;
+
+ lctxt = llog_get_context(mgc, LLOG_CONFIG_ORIG_CTXT);
+
+ if (local_only) { /* no local log at client side */
+ GOTO(out_pop, rc = -EIO);
+ }
+
+ if (cld_is_sptlrpc(cld)) {
+ sptlrpc_conf_log_update_begin(cld->cld_logname);
+ sptlrpc_started = true;
+ }
+
+ /* logname and instance info should be the same, so use our
+ copy of the instance for the update. The cfg_last_idx will
+ be updated here. */
+ rc = class_config_parse_llog(NULL, ctxt, cld->cld_logname,
+ &cld->cld_cfg);
+
+out_pop:
+ llog_ctxt_put(ctxt);
+ if (lctxt)
+ llog_ctxt_put(lctxt);
+ if (must_pop)
+ pop_ctxt(saved_ctxt, &mgc->obd_lvfs_ctxt, NULL);
+
+ OBD_FREE_PTR(saved_ctxt);
+ /*
+ * update settings on existing OBDs. doing it inside
+ * of llog_process_lock so no device is attaching/detaching
+ * in parallel.
+ * the logname must be <fsname>-sptlrpc
+ */
+ if (sptlrpc_started) {
+ LASSERT(cld_is_sptlrpc(cld));
+ sptlrpc_conf_log_update_end(cld->cld_logname);
+ class_notify_sptlrpc_conf(cld->cld_logname,
+ strlen(cld->cld_logname) -
+ strlen("-sptlrpc"));
+ }
+
+ return rc;
+}
+
+/** Get a config log from the MGS and process it.
+ * This func is called for both clients and servers.
+ * Copy the log locally before parsing it if appropriate (non-MGS server)
+ */
+int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
+{
+ struct lustre_handle lockh = { 0 };
+ __u64 flags = LDLM_FL_NO_LRU;
+ int rc = 0, rcl;
+
+ LASSERT(cld);
+
+ /* I don't want multiple processes running process_log at once --
+ sounds like badness. It actually might be fine, as long as
+ we're not trying to update from the same log
+ simultaneously (in which case we should use a per-log sem.) */
+ mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping) {
+ mutex_unlock(&cld->cld_lock);
+ return 0;
+ }
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
+
+ CDEBUG(D_MGC, "Process log %s:%p from %d\n", cld->cld_logname,
+ cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
+
+ /* Get the cfg lock on the llog */
+ rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL,
+ LCK_CR, &flags, NULL, NULL, NULL,
+ cld, 0, NULL, &lockh);
+ if (rcl == 0) {
+ /* Get the cld, it will be released in mgc_blocking_ast. */
+ config_log_get(cld);
+ rc = ldlm_lock_set_data(&lockh, (void *)cld);
+ LASSERT(rc == 0);
+ } else {
+ CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
+
+ /* mark cld_lostlock so that it will requeue
+ * after MGC becomes available. */
+ cld->cld_lostlock = 1;
+ /* Get extra reference, it will be put in requeue thread */
+ config_log_get(cld);
+ }
+
+
+ if (cld_is_recover(cld)) {
+ rc = 0; /* this is not a fatal error for recover log */
+ if (rcl == 0)
+ rc = mgc_process_recover_log(mgc, cld);
+ } else {
+ rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
+ }
+
+ CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
+ mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
+
+ mutex_unlock(&cld->cld_lock);
+
+ /* Now drop the lock so MGS can revoke it */
+ if (!rcl) {
+ rcl = mgc_cancel(mgc->u.cli.cl_mgc_mgsexp, NULL,
+ LCK_CR, &lockh);
+ if (rcl)
+ CERROR("Can't drop cfg lock: %d\n", rcl);
+ }
+
+ return rc;
+}
+
+
+/** Called from lustre_process_log.
+ * LCFG_LOG_START gets the config log from the MGS, processes it to start
+ * any services, and adds it to the list logs to watch (follow).
+ */
+static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf)
+{
+ struct lustre_cfg *lcfg = buf;
+ struct config_llog_instance *cfg = NULL;
+ char *logname;
+ int rc = 0;
+
+ switch(lcfg->lcfg_command) {
+ case LCFG_LOV_ADD_OBD: {
+ /* Overloading this cfg command: register a new target */
+ struct mgs_target_info *mti;
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) !=
+ sizeof(struct mgs_target_info))
+ GOTO(out, rc = -EINVAL);
+
+ mti = (struct mgs_target_info *)lustre_cfg_buf(lcfg, 1);
+ CDEBUG(D_MGC, "add_target %s %#x\n",
+ mti->mti_svname, mti->mti_flags);
+ rc = mgc_target_register(obd->u.cli.cl_mgc_mgsexp, mti);
+ break;
+ }
+ case LCFG_LOV_DEL_OBD:
+ /* Unregister has no meaning at the moment. */
+ CERROR("lov_del_obd unimplemented\n");
+ rc = -ENOSYS;
+ break;
+ case LCFG_SPTLRPC_CONF: {
+ rc = sptlrpc_process_config(lcfg);
+ break;
+ }
+ case LCFG_LOG_START: {
+ struct config_llog_data *cld;
+ struct super_block *sb;
+
+ logname = lustre_cfg_string(lcfg, 1);
+ cfg = (struct config_llog_instance *)lustre_cfg_buf(lcfg, 2);
+ sb = *(struct super_block **)lustre_cfg_buf(lcfg, 3);
+
+ CDEBUG(D_MGC, "parse_log %s from %d\n", logname,
+ cfg->cfg_last_idx);
+
+ /* We're only called through here on the initial mount */
+ rc = config_log_add(obd, logname, cfg, sb);
+ if (rc)
+ break;
+ cld = config_log_find(logname, cfg);
+ if (cld == NULL) {
+ rc = -ENOENT;
+ break;
+ }
+
+ /* COMPAT_146 */
+ /* FIXME only set this for old logs! Right now this forces
+ us to always skip the "inside markers" check */
+ cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146;
+
+ rc = mgc_process_log(obd, cld);
+ if (rc == 0 && cld->cld_recover != NULL) {
+ if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
+ imp_connect_data, IMP_RECOV)) {
+ rc = mgc_process_log(obd, cld->cld_recover);
+ } else {
+ struct config_llog_data *cir = cld->cld_recover;
+ cld->cld_recover = NULL;
+ config_log_put(cir);
+ }
+ if (rc)
+ CERROR("Cannot process recover llog %d\n", rc);
+ }
+ config_log_put(cld);
+
+ break;
+ }
+ case LCFG_LOG_END: {
+ logname = lustre_cfg_string(lcfg, 1);
+
+ if (lcfg->lcfg_bufcount >= 2)
+ cfg = (struct config_llog_instance *)lustre_cfg_buf(
+ lcfg, 2);
+ rc = config_log_end(logname, cfg);
+ break;
+ }
+ default: {
+ CERROR("Unknown command: %d\n", lcfg->lcfg_command);
+ GOTO(out, rc = -EINVAL);
+
+ }
+ }
+out:
+ return rc;
+}
+
+struct obd_ops mgc_obd_ops = {
+ .o_owner = THIS_MODULE,
+ .o_setup = mgc_setup,
+ .o_precleanup = mgc_precleanup,
+ .o_cleanup = mgc_cleanup,
+ .o_add_conn = client_import_add_conn,
+ .o_del_conn = client_import_del_conn,
+ .o_connect = client_connect_import,
+ .o_disconnect = client_disconnect_export,
+ //.o_enqueue = mgc_enqueue,
+ .o_cancel = mgc_cancel,
+ //.o_iocontrol = mgc_iocontrol,
+ .o_set_info_async = mgc_set_info_async,
+ .o_get_info = mgc_get_info,
+ .o_import_event = mgc_import_event,
+ .o_llog_init = mgc_llog_init,
+ .o_llog_finish = mgc_llog_finish,
+ .o_process_config = mgc_process_config,
+};
+
+int __init mgc_init(void)
+{
+ return class_register_type(&mgc_obd_ops, NULL, NULL,
+ LUSTRE_MGC_NAME, NULL);
+}
+
+static void /*__exit*/ mgc_exit(void)
+{
+ class_unregister_type(LUSTRE_MGC_NAME);
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Management Client");
+MODULE_LICENSE("GPL");
+
+module_init(mgc_init);
+module_exit(mgc_exit);
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
new file mode 100644
index 0000000..8a0e08c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_LUSTRE_FS) += obdclass.o llog_test.o
+
+obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
+ llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
+ genops.o uuid.o llog_ioctl.o lprocfs_status.o \
+ lustre_handles.o lustre_peer.o llog_osd.o \
+ local_storage.o statfs_pack.o obdo.o obd_config.o obd_mount.o\
+ mea.o lu_object.o dt_object.o capa.o cl_object.o \
+ cl_page.o cl_lock.o cl_io.o lu_ref.o acl.o idmap.o \
+ lu_ucred.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
new file mode 100644
index 0000000..f0bb632
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/acl.c
@@ -0,0 +1,539 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/acl.c
+ *
+ * Lustre Access Control List.
+ *
+ * Author: Fan Yong <fanyong@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <lu_object.h>
+#include <lustre_acl.h>
+#include <lustre_eacl.h>
+#include <obd_support.h>
+
+#ifdef CONFIG_FS_POSIX_ACL
+
+#define CFS_ACL_XATTR_VERSION POSIX_ACL_XATTR_VERSION
+
+enum {
+ ES_UNK = 0, /* unknown stat */
+ ES_UNC = 1, /* ACL entry is not changed */
+ ES_MOD = 2, /* ACL entry is modified */
+ ES_ADD = 3, /* ACL entry is added */
+ ES_DEL = 4 /* ACL entry is deleted */
+};
+
+static inline void lustre_ext_acl_le_to_cpu(ext_acl_xattr_entry *d,
+ ext_acl_xattr_entry *s)
+{
+ d->e_tag = le16_to_cpu(s->e_tag);
+ d->e_perm = le16_to_cpu(s->e_perm);
+ d->e_id = le32_to_cpu(s->e_id);
+ d->e_stat = le32_to_cpu(s->e_stat);
+}
+
+static inline void lustre_ext_acl_cpu_to_le(ext_acl_xattr_entry *d,
+ ext_acl_xattr_entry *s)
+{
+ d->e_tag = cpu_to_le16(s->e_tag);
+ d->e_perm = cpu_to_le16(s->e_perm);
+ d->e_id = cpu_to_le32(s->e_id);
+ d->e_stat = cpu_to_le32(s->e_stat);
+}
+
+static inline void lustre_posix_acl_le_to_cpu(posix_acl_xattr_entry *d,
+ posix_acl_xattr_entry *s)
+{
+ d->e_tag = le16_to_cpu(s->e_tag);
+ d->e_perm = le16_to_cpu(s->e_perm);
+ d->e_id = le32_to_cpu(s->e_id);
+}
+
+static inline void lustre_posix_acl_cpu_to_le(posix_acl_xattr_entry *d,
+ posix_acl_xattr_entry *s)
+{
+ d->e_tag = cpu_to_le16(s->e_tag);
+ d->e_perm = cpu_to_le16(s->e_perm);
+ d->e_id = cpu_to_le32(s->e_id);
+}
+
+
+/* if "new_count == 0", then "new = {a_version, NULL}", NOT NULL. */
+static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
+ int old_count, int new_count)
+{
+ int old_size = CFS_ACL_XATTR_SIZE(old_count, posix_acl_xattr);
+ int new_size = CFS_ACL_XATTR_SIZE(new_count, posix_acl_xattr);
+ posix_acl_xattr_header *new;
+
+ if (unlikely(old_count <= new_count))
+ return old_size;
+
+ OBD_ALLOC(new, new_size);
+ if (unlikely(new == NULL))
+ return -ENOMEM;
+
+ memcpy(new, *header, new_size);
+ OBD_FREE(*header, old_size);
+ *header = new;
+ return new_size;
+}
+
+/* if "new_count == 0", then "new = {0, NULL}", NOT NULL. */
+static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
+ int old_count)
+{
+ int ext_count = le32_to_cpu((*header)->a_count);
+ int ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
+ int old_size = CFS_ACL_XATTR_SIZE(old_count, ext_acl_xattr);
+ ext_acl_xattr_header *new;
+
+ if (unlikely(old_count <= ext_count))
+ return 0;
+
+ OBD_ALLOC(new, ext_size);
+ if (unlikely(new == NULL))
+ return -ENOMEM;
+
+ memcpy(new, *header, ext_size);
+ OBD_FREE(*header, old_size);
+ *header = new;
+ return 0;
+}
+
+/*
+ * Generate new extended ACL based on the posix ACL.
+ */
+ext_acl_xattr_header *
+lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
+{
+ int count, i, esize;
+ ext_acl_xattr_header *new;
+
+ if (unlikely(size < 0))
+ return ERR_PTR(-EINVAL);
+ else if (!size)
+ count = 0;
+ else
+ count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
+ esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
+ OBD_ALLOC(new, esize);
+ if (unlikely(new == NULL))
+ return ERR_PTR(-ENOMEM);
+
+ new->a_count = cpu_to_le32(count);
+ for (i = 0; i < count; i++) {
+ new->a_entries[i].e_tag = header->a_entries[i].e_tag;
+ new->a_entries[i].e_perm = header->a_entries[i].e_perm;
+ new->a_entries[i].e_id = header->a_entries[i].e_id;
+ new->a_entries[i].e_stat = cpu_to_le32(ES_UNK);
+ }
+
+ return new;
+}
+EXPORT_SYMBOL(lustre_posix_acl_xattr_2ext);
+
+/*
+ * Filter out the "nobody" entries in the posix ACL.
+ */
+int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, int size,
+ posix_acl_xattr_header **out)
+{
+ int count, i, j, rc = 0;
+ __u32 id;
+ posix_acl_xattr_header *new;
+
+ if (unlikely(size < 0))
+ return -EINVAL;
+ else if (!size)
+ return 0;
+
+ OBD_ALLOC(new, size);
+ if (unlikely(new == NULL))
+ return -ENOMEM;
+
+ new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
+ count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
+ for (i = 0, j = 0; i < count; i++) {
+ id = le32_to_cpu(header->a_entries[i].e_id);
+ switch (le16_to_cpu(header->a_entries[i].e_tag)) {
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ if (id != ACL_UNDEFINED_ID)
+ GOTO(_out, rc = -EIO);
+
+ memcpy(&new->a_entries[j++], &header->a_entries[i],
+ sizeof(posix_acl_xattr_entry));
+ break;
+ case ACL_USER:
+ if (id != NOBODY_UID)
+ memcpy(&new->a_entries[j++],
+ &header->a_entries[i],
+ sizeof(posix_acl_xattr_entry));
+ break;
+ case ACL_GROUP:
+ if (id != NOBODY_GID)
+ memcpy(&new->a_entries[j++],
+ &header->a_entries[i],
+ sizeof(posix_acl_xattr_entry));
+ break;
+ default:
+ GOTO(_out, rc = -EIO);
+ }
+ }
+
+ /* free unused space. */
+ rc = lustre_posix_acl_xattr_reduce_space(&new, count, j);
+ if (rc >= 0) {
+ size = rc;
+ *out = new;
+ rc = 0;
+ }
+
+_out:
+ if (rc) {
+ OBD_FREE(new, size);
+ size = rc;
+ }
+ return size;
+}
+EXPORT_SYMBOL(lustre_posix_acl_xattr_filter);
+
+/*
+ * Release the posix ACL space.
+ */
+void lustre_posix_acl_xattr_free(posix_acl_xattr_header *header, int size)
+{
+ OBD_FREE(header, size);
+}
+EXPORT_SYMBOL(lustre_posix_acl_xattr_free);
+
+/*
+ * Release the extended ACL space.
+ */
+void lustre_ext_acl_xattr_free(ext_acl_xattr_header *header)
+{
+ OBD_FREE(header, CFS_ACL_XATTR_SIZE(le32_to_cpu(header->a_count), \
+ ext_acl_xattr));
+}
+EXPORT_SYMBOL(lustre_ext_acl_xattr_free);
+
+static ext_acl_xattr_entry *
+lustre_ext_acl_xattr_search(ext_acl_xattr_header *header,
+ posix_acl_xattr_entry *entry, int *pos)
+{
+ int once, start, end, i, j, count = le32_to_cpu(header->a_count);
+
+ once = 0;
+ start = *pos;
+ end = count;
+
+again:
+ for (i = start; i < end; i++) {
+ if (header->a_entries[i].e_tag == entry->e_tag &&
+ header->a_entries[i].e_id == entry->e_id) {
+ j = i;
+ if (++i >= count)
+ i = 0;
+ *pos = i;
+ return &header->a_entries[j];
+ }
+ }
+
+ if (!once) {
+ once = 1;
+ start = 0;
+ end = *pos;
+ goto again;
+ }
+
+ return NULL;
+}
+
+/*
+ * Merge the posix ACL and the extended ACL into new posix ACL.
+ */
+int lustre_acl_xattr_merge2posix(posix_acl_xattr_header *posix_header, int size,
+ ext_acl_xattr_header *ext_header,
+ posix_acl_xattr_header **out)
+{
+ int posix_count, posix_size, i, j;
+ int ext_count = le32_to_cpu(ext_header->a_count), pos = 0, rc = 0;
+ posix_acl_xattr_entry pe = {ACL_MASK, 0, ACL_UNDEFINED_ID};
+ posix_acl_xattr_header *new;
+ ext_acl_xattr_entry *ee, ae;
+
+ lustre_posix_acl_cpu_to_le(&pe, &pe);
+ ee = lustre_ext_acl_xattr_search(ext_header, &pe, &pos);
+ if (ee == NULL || le32_to_cpu(ee->e_stat) == ES_DEL) {
+ /* there are only base ACL entries at most. */
+ posix_count = 3;
+ posix_size = CFS_ACL_XATTR_SIZE(posix_count, posix_acl_xattr);
+ OBD_ALLOC(new, posix_size);
+ if (unlikely(new == NULL))
+ return -ENOMEM;
+
+ new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
+ for (i = 0, j = 0; i < ext_count; i++) {
+ lustre_ext_acl_le_to_cpu(&ae,
+ &ext_header->a_entries[i]);
+ switch (ae.e_tag) {
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_OTHER:
+ if (ae.e_id != ACL_UNDEFINED_ID)
+ GOTO(_out, rc = -EIO);
+
+ if (ae.e_stat != ES_DEL) {
+ new->a_entries[j].e_tag =
+ ext_header->a_entries[i].e_tag;
+ new->a_entries[j].e_perm =
+ ext_header->a_entries[i].e_perm;
+ new->a_entries[j++].e_id =
+ ext_header->a_entries[i].e_id;
+ }
+ break;
+ case ACL_MASK:
+ case ACL_USER:
+ case ACL_GROUP:
+ if (ae.e_stat == ES_DEL)
+ break;
+ default:
+ GOTO(_out, rc = -EIO);
+ }
+ }
+ } else {
+ /* maybe there are valid ACL_USER or ACL_GROUP entries in the
+ * original server-side ACL, they are regarded as ES_UNC stat.*/
+ int ori_posix_count;
+
+ if (unlikely(size < 0))
+ return -EINVAL;
+ else if (!size)
+ ori_posix_count = 0;
+ else
+ ori_posix_count =
+ CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
+ posix_count = ori_posix_count + ext_count;
+ posix_size =
+ CFS_ACL_XATTR_SIZE(posix_count, posix_acl_xattr);
+ OBD_ALLOC(new, posix_size);
+ if (unlikely(new == NULL))
+ return -ENOMEM;
+
+ new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
+ /* 1. process the unchanged ACL entries
+ * in the original server-side ACL. */
+ pos = 0;
+ for (i = 0, j = 0; i < ori_posix_count; i++) {
+ ee = lustre_ext_acl_xattr_search(ext_header,
+ &posix_header->a_entries[i], &pos);
+ if (ee == NULL)
+ memcpy(&new->a_entries[j++],
+ &posix_header->a_entries[i],
+ sizeof(posix_acl_xattr_entry));
+ }
+
+ /* 2. process the non-deleted entries
+ * from client-side extended ACL. */
+ for (i = 0; i < ext_count; i++) {
+ if (le16_to_cpu(ext_header->a_entries[i].e_stat) !=
+ ES_DEL) {
+ new->a_entries[j].e_tag =
+ ext_header->a_entries[i].e_tag;
+ new->a_entries[j].e_perm =
+ ext_header->a_entries[i].e_perm;
+ new->a_entries[j++].e_id =
+ ext_header->a_entries[i].e_id;
+ }
+ }
+ }
+
+ /* free unused space. */
+ rc = lustre_posix_acl_xattr_reduce_space(&new, posix_count, j);
+ if (rc >= 0) {
+ posix_size = rc;
+ *out = new;
+ rc = 0;
+ }
+
+_out:
+ if (rc) {
+ OBD_FREE(new, posix_size);
+ posix_size = rc;
+ }
+ return posix_size;
+}
+EXPORT_SYMBOL(lustre_acl_xattr_merge2posix);
+
+/*
+ * Merge the posix ACL and the extended ACL into new extended ACL.
+ */
+ext_acl_xattr_header *
+lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
+ ext_acl_xattr_header *ext_header)
+{
+ int ori_ext_count, posix_count, ext_count, ext_size;
+ int i, j, pos = 0, rc = 0;
+ posix_acl_xattr_entry pae;
+ ext_acl_xattr_header *new;
+ ext_acl_xattr_entry *ee, eae;
+
+ if (unlikely(size < 0))
+ return ERR_PTR(-EINVAL);
+ else if (!size)
+ posix_count = 0;
+ else
+ posix_count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
+ ori_ext_count = le32_to_cpu(ext_header->a_count);
+ ext_count = posix_count + ori_ext_count;
+ ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
+
+ OBD_ALLOC(new, ext_size);
+ if (unlikely(new == NULL))
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0, j = 0; i < posix_count; i++) {
+ lustre_posix_acl_le_to_cpu(&pae, &posix_header->a_entries[i]);
+ switch (pae.e_tag) {
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ if (pae.e_id != ACL_UNDEFINED_ID)
+ GOTO(out, rc = -EIO);
+ case ACL_USER:
+ /* ignore "nobody" entry. */
+ if (pae.e_id == NOBODY_UID)
+ break;
+
+ new->a_entries[j].e_tag =
+ posix_header->a_entries[i].e_tag;
+ new->a_entries[j].e_perm =
+ posix_header->a_entries[i].e_perm;
+ new->a_entries[j].e_id =
+ posix_header->a_entries[i].e_id;
+ ee = lustre_ext_acl_xattr_search(ext_header,
+ &posix_header->a_entries[i], &pos);
+ if (ee) {
+ if (posix_header->a_entries[i].e_perm !=
+ ee->e_perm)
+ /* entry modified. */
+ ee->e_stat =
+ new->a_entries[j++].e_stat =
+ cpu_to_le32(ES_MOD);
+ else
+ /* entry unchanged. */
+ ee->e_stat =
+ new->a_entries[j++].e_stat =
+ cpu_to_le32(ES_UNC);
+ } else {
+ /* new entry. */
+ new->a_entries[j++].e_stat =
+ cpu_to_le32(ES_ADD);
+ }
+ break;
+ case ACL_GROUP:
+ /* ignore "nobody" entry. */
+ if (pae.e_id == NOBODY_GID)
+ break;
+ new->a_entries[j].e_tag =
+ posix_header->a_entries[i].e_tag;
+ new->a_entries[j].e_perm =
+ posix_header->a_entries[i].e_perm;
+ new->a_entries[j].e_id =
+ posix_header->a_entries[i].e_id;
+ ee = lustre_ext_acl_xattr_search(ext_header,
+ &posix_header->a_entries[i], &pos);
+ if (ee) {
+ if (posix_header->a_entries[i].e_perm !=
+ ee->e_perm)
+ /* entry modified. */
+ ee->e_stat =
+ new->a_entries[j++].e_stat =
+ cpu_to_le32(ES_MOD);
+ else
+ /* entry unchanged. */
+ ee->e_stat =
+ new->a_entries[j++].e_stat =
+ cpu_to_le32(ES_UNC);
+ } else {
+ /* new entry. */
+ new->a_entries[j++].e_stat =
+ cpu_to_le32(ES_ADD);
+ }
+ break;
+ default:
+ GOTO(out, rc = -EIO);
+ }
+ }
+
+ /* process deleted entries. */
+ for (i = 0; i < ori_ext_count; i++) {
+ lustre_ext_acl_le_to_cpu(&eae, &ext_header->a_entries[i]);
+ if (eae.e_stat == ES_UNK) {
+ /* ignore "nobody" entry. */
+ if ((eae.e_tag == ACL_USER && eae.e_id == NOBODY_UID) ||
+ (eae.e_tag == ACL_GROUP && eae.e_id == NOBODY_GID))
+ continue;
+
+ new->a_entries[j].e_tag =
+ ext_header->a_entries[i].e_tag;
+ new->a_entries[j].e_perm =
+ ext_header->a_entries[i].e_perm;
+ new->a_entries[j].e_id = ext_header->a_entries[i].e_id;
+ new->a_entries[j++].e_stat = cpu_to_le32(ES_DEL);
+ }
+ }
+
+ new->a_count = cpu_to_le32(j);
+ /* free unused space. */
+ rc = lustre_ext_acl_xattr_reduce_space(&new, ext_count);
+
+out:
+ if (rc) {
+ OBD_FREE(new, ext_size);
+ new = ERR_PTR(rc);
+ }
+ return new;
+}
+EXPORT_SYMBOL(lustre_acl_xattr_merge2ext);
+
+#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/capa.c b/drivers/staging/lustre/lustre/obdclass/capa.c
new file mode 100644
index 0000000..68d797b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/capa.c
@@ -0,0 +1,418 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/capa.c
+ *
+ * Lustre Capability Hash Management
+ *
+ * Author: Lai Siyao<lsy@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+#include <linux/fs.h>
+#include <asm/unistd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/crypto.h>
+
+#include <obd_class.h>
+#include <lustre_debug.h>
+#include <lustre/lustre_idl.h>
+
+#include <linux/list.h>
+#include <lustre_capa.h>
+
+#define NR_CAPAHASH 32
+#define CAPA_HASH_SIZE 3000 /* for MDS & OSS */
+
+struct kmem_cache *capa_cachep = NULL;
+
+/* lock for capa hash/capa_list/fo_capa_keys */
+DEFINE_SPINLOCK(capa_lock);
+
+struct list_head capa_list[CAPA_SITE_MAX];
+
+static struct capa_hmac_alg capa_hmac_algs[] = {
+ DEF_CAPA_HMAC_ALG("sha1", SHA1, 20, 20),
+};
+/* capa count */
+int capa_count[CAPA_SITE_MAX] = { 0, };
+
+EXPORT_SYMBOL(capa_cachep);
+EXPORT_SYMBOL(capa_list);
+EXPORT_SYMBOL(capa_lock);
+EXPORT_SYMBOL(capa_count);
+
+static inline
+unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
+{
+ return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
+}
+
+struct hlist_head *init_capa_hash(void)
+{
+ struct hlist_head *hash;
+ int nr_hash, i;
+
+ OBD_ALLOC(hash, PAGE_CACHE_SIZE);
+ if (!hash)
+ return NULL;
+
+ nr_hash = PAGE_CACHE_SIZE / sizeof(struct hlist_head);
+ LASSERT(nr_hash > NR_CAPAHASH);
+
+ for (i = 0; i < NR_CAPAHASH; i++)
+ INIT_HLIST_HEAD(hash + i);
+ return hash;
+}
+EXPORT_SYMBOL(init_capa_hash);
+
+static inline int capa_on_server(struct obd_capa *ocapa)
+{
+ return ocapa->c_site == CAPA_SITE_SERVER;
+}
+
+static inline void capa_delete(struct obd_capa *ocapa)
+{
+ LASSERT(capa_on_server(ocapa));
+ hlist_del_init(&ocapa->u.tgt.c_hash);
+ list_del_init(&ocapa->c_list);
+ capa_count[ocapa->c_site]--;
+ /* release the ref when alloc */
+ capa_put(ocapa);
+}
+
+void cleanup_capa_hash(struct hlist_head *hash)
+{
+ int i;
+ struct hlist_node *next;
+ struct obd_capa *oc;
+
+ spin_lock(&capa_lock);
+ for (i = 0; i < NR_CAPAHASH; i++) {
+ hlist_for_each_entry_safe(oc, next, hash + i,
+ u.tgt.c_hash)
+ capa_delete(oc);
+ }
+ spin_unlock(&capa_lock);
+
+ OBD_FREE(hash, PAGE_CACHE_SIZE);
+}
+EXPORT_SYMBOL(cleanup_capa_hash);
+
+static inline int capa_hashfn(struct lu_fid *fid)
+{
+ return (fid_oid(fid) ^ fid_ver(fid)) *
+ (unsigned long)(fid_seq(fid) + 1) % NR_CAPAHASH;
+}
+
+/* capa renewal time check is earlier than that on client, which is to prevent
+ * client renew right after obtaining it. */
+static inline int capa_is_to_expire(struct obd_capa *oc)
+{
+ return cfs_time_before(cfs_time_sub(oc->c_expiry,
+ cfs_time_seconds(oc->c_capa.lc_timeout)*2/3),
+ cfs_time_current());
+}
+
+static struct obd_capa *find_capa(struct lustre_capa *capa,
+ struct hlist_head *head, int alive)
+{
+ struct obd_capa *ocapa;
+ int len = alive ? offsetof(struct lustre_capa, lc_keyid):sizeof(*capa);
+
+ hlist_for_each_entry(ocapa, head, u.tgt.c_hash) {
+ if (memcmp(&ocapa->c_capa, capa, len))
+ continue;
+ /* don't return one that will expire soon in this case */
+ if (alive && capa_is_to_expire(ocapa))
+ continue;
+
+ LASSERT(capa_on_server(ocapa));
+
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "found");
+ return ocapa;
+ }
+
+ return NULL;
+}
+
+#define LRU_CAPA_DELETE_COUNT 12
+static inline void capa_delete_lru(struct list_head *head)
+{
+ struct obd_capa *ocapa;
+ struct list_head *node = head->next;
+ int count = 0;
+
+ /* free LRU_CAPA_DELETE_COUNT unused capa from head */
+ while (count++ < LRU_CAPA_DELETE_COUNT) {
+ ocapa = list_entry(node, struct obd_capa, c_list);
+ node = node->next;
+ if (atomic_read(&ocapa->c_refc))
+ continue;
+
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru");
+ capa_delete(ocapa);
+ }
+}
+
+/* add or update */
+struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa)
+{
+ struct hlist_head *head = hash + capa_hashfn(&capa->lc_fid);
+ struct obd_capa *ocapa, *old = NULL;
+ struct list_head *list = &capa_list[CAPA_SITE_SERVER];
+
+ ocapa = alloc_capa(CAPA_SITE_SERVER);
+ if (IS_ERR(ocapa))
+ return NULL;
+
+ spin_lock(&capa_lock);
+ old = find_capa(capa, head, 0);
+ if (!old) {
+ ocapa->c_capa = *capa;
+ set_capa_expiry(ocapa);
+ hlist_add_head(&ocapa->u.tgt.c_hash, head);
+ list_add_tail(&ocapa->c_list, list);
+ capa_get(ocapa);
+ capa_count[CAPA_SITE_SERVER]++;
+ if (capa_count[CAPA_SITE_SERVER] > CAPA_HASH_SIZE)
+ capa_delete_lru(list);
+ spin_unlock(&capa_lock);
+ return ocapa;
+ } else {
+ capa_get(old);
+ spin_unlock(&capa_lock);
+ capa_put(ocapa);
+ return old;
+ }
+}
+EXPORT_SYMBOL(capa_add);
+
+struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa,
+ int alive)
+{
+ struct obd_capa *ocapa;
+
+ spin_lock(&capa_lock);
+ ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive);
+ if (ocapa) {
+ list_move_tail(&ocapa->c_list,
+ &capa_list[CAPA_SITE_SERVER]);
+ capa_get(ocapa);
+ }
+ spin_unlock(&capa_lock);
+
+ return ocapa;
+}
+EXPORT_SYMBOL(capa_lookup);
+
+static inline int ll_crypto_hmac(struct crypto_hash *tfm,
+ u8 *key, unsigned int *keylen,
+ struct scatterlist *sg,
+ unsigned int size, u8 *result)
+{
+ struct hash_desc desc;
+ int rv;
+ desc.tfm = tfm;
+ desc.flags = 0;
+ rv = crypto_hash_setkey(desc.tfm, key, *keylen);
+ if (rv) {
+ CERROR("failed to hash setkey: %d\n", rv);
+ return rv;
+ }
+ return crypto_hash_digest(&desc, sg, size, result);
+}
+
+int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
+{
+ struct crypto_hash *tfm;
+ struct capa_hmac_alg *alg;
+ int keylen;
+ struct scatterlist sl;
+
+ if (capa_alg(capa) != CAPA_HMAC_ALG_SHA1) {
+ CERROR("unknown capability hmac algorithm!\n");
+ return -EFAULT;
+ }
+
+ alg = &capa_hmac_algs[capa_alg(capa)];
+
+ tfm = crypto_alloc_hash(alg->ha_name, 0, 0);
+ if (!tfm) {
+ CERROR("crypto_alloc_tfm failed, check whether your kernel"
+ "has crypto support!\n");
+ return -ENOMEM;
+ }
+ keylen = alg->ha_keylen;
+
+ sg_set_page(&sl, virt_to_page(capa),
+ offsetof(struct lustre_capa, lc_hmac),
+ (unsigned long)(capa) % PAGE_CACHE_SIZE);
+
+ ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac);
+ crypto_free_hash(tfm);
+
+ return 0;
+}
+EXPORT_SYMBOL(capa_hmac);
+
+int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
+{
+ struct crypto_blkcipher *tfm;
+ struct scatterlist sd;
+ struct scatterlist ss;
+ struct blkcipher_desc desc;
+ unsigned int min;
+ int rc;
+ char alg[CRYPTO_MAX_ALG_NAME+1] = "aes";
+
+ /* passing "aes" in a variable instead of a constant string keeps gcc
+ * 4.3.2 happy */
+ tfm = crypto_alloc_blkcipher(alg, 0, 0 );
+ if (IS_ERR(tfm)) {
+ CERROR("failed to load transform for aes\n");
+ return PTR_ERR(tfm);
+ }
+
+ min = ll_crypto_tfm_alg_min_keysize(tfm);
+ if (keylen < min) {
+ CERROR("keylen at least %d bits for aes\n", min * 8);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ rc = crypto_blkcipher_setkey(tfm, key, min);
+ if (rc) {
+ CERROR("failed to setting key for aes\n");
+ GOTO(out, rc);
+ }
+
+ sg_set_page(&sd, virt_to_page(d), 16,
+ (unsigned long)(d) % PAGE_CACHE_SIZE);
+
+ sg_set_page(&ss, virt_to_page(s), 16,
+ (unsigned long)(s) % PAGE_CACHE_SIZE);
+ desc.tfm = tfm;
+ desc.info = NULL;
+ desc.flags = 0;
+ rc = crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
+ if (rc) {
+ CERROR("failed to encrypt for aes\n");
+ GOTO(out, rc);
+ }
+
+out:
+ crypto_free_blkcipher(tfm);
+ return rc;
+}
+EXPORT_SYMBOL(capa_encrypt_id);
+
+int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
+{
+ struct crypto_blkcipher *tfm;
+ struct scatterlist sd;
+ struct scatterlist ss;
+ struct blkcipher_desc desc;
+ unsigned int min;
+ int rc;
+ char alg[CRYPTO_MAX_ALG_NAME+1] = "aes";
+
+ /* passing "aes" in a variable instead of a constant string keeps gcc
+ * 4.3.2 happy */
+ tfm = crypto_alloc_blkcipher(alg, 0, 0 );
+ if (IS_ERR(tfm)) {
+ CERROR("failed to load transform for aes\n");
+ return PTR_ERR(tfm);
+ }
+
+ min = ll_crypto_tfm_alg_min_keysize(tfm);
+ if (keylen < min) {
+ CERROR("keylen at least %d bits for aes\n", min * 8);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ rc = crypto_blkcipher_setkey(tfm, key, min);
+ if (rc) {
+ CERROR("failed to setting key for aes\n");
+ GOTO(out, rc);
+ }
+
+ sg_set_page(&sd, virt_to_page(d), 16,
+ (unsigned long)(d) % PAGE_CACHE_SIZE);
+
+ sg_set_page(&ss, virt_to_page(s), 16,
+ (unsigned long)(s) % PAGE_CACHE_SIZE);
+
+ desc.tfm = tfm;
+ desc.info = NULL;
+ desc.flags = 0;
+ rc = crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
+ if (rc) {
+ CERROR("failed to decrypt for aes\n");
+ GOTO(out, rc);
+ }
+
+out:
+ crypto_free_blkcipher(tfm);
+ return rc;
+}
+EXPORT_SYMBOL(capa_decrypt_id);
+
+void capa_cpy(void *capa, struct obd_capa *ocapa)
+{
+ spin_lock(&ocapa->c_lock);
+ *(struct lustre_capa *)capa = ocapa->c_capa;
+ spin_unlock(&ocapa->c_lock);
+}
+EXPORT_SYMBOL(capa_cpy);
+
+void _debug_capa(struct lustre_capa *c,
+ struct libcfs_debug_msg_data *msgdata,
+ const char *fmt, ... )
+{
+ va_list args;
+ va_start(args, fmt);
+ libcfs_debug_vmsg2(msgdata, fmt, args,
+ " capability@%p fid "DFID" opc "LPX64" uid "LPU64
+ " gid "LPU64" flags %u alg %d keyid %u timeout %u "
+ "expiry %u\n", c, PFID(capa_fid(c)), capa_opc(c),
+ capa_uid(c), capa_gid(c), capa_flags(c),
+ capa_alg(c), capa_keyid(c), capa_timeout(c),
+ capa_expiry(c));
+ va_end(args);
+}
+EXPORT_SYMBOL(_debug_capa);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
new file mode 100644
index 0000000..7eb0ad7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
@@ -0,0 +1,121 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Internal cl interfaces.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+#ifndef _CL_INTERNAL_H
+#define _CL_INTERNAL_H
+
+#define CLT_PVEC_SIZE (14)
+
+/**
+ * Possible levels of the nesting. Currently this is 2: there are "top"
+ * entities (files, extent locks), and "sub" entities (stripes and stripe
+ * locks). This is used only for debugging counters right now.
+ */
+enum clt_nesting_level {
+ CNL_TOP,
+ CNL_SUB,
+ CNL_NR
+};
+
+/**
+ * Counters used to check correctness of cl_lock interface usage.
+ */
+struct cl_thread_counters {
+ /**
+ * Number of outstanding calls to cl_lock_mutex_get() made by the
+ * current thread. For debugging.
+ */
+ int ctc_nr_locks_locked;
+ /** List of locked locks. */
+ struct lu_ref ctc_locks_locked;
+ /** Number of outstanding holds on locks. */
+ int ctc_nr_held;
+ /** Number of outstanding uses on locks. */
+ int ctc_nr_used;
+ /** Number of held extent locks. */
+ int ctc_nr_locks_acquired;
+};
+
+/**
+ * Thread local state internal for generic cl-code.
+ */
+struct cl_thread_info {
+ /*
+ * Common fields.
+ */
+ struct cl_io clt_io;
+ struct cl_2queue clt_queue;
+
+ /*
+ * Fields used by cl_lock.c
+ */
+ struct cl_lock_descr clt_descr;
+ struct cl_page_list clt_list;
+ /**
+ * Counters for every level of lock nesting.
+ */
+ struct cl_thread_counters clt_counters[CNL_NR];
+ /** @} debugging */
+
+ /*
+ * Fields used by cl_page.c
+ */
+ struct cl_page *clt_pvec[CLT_PVEC_SIZE];
+
+ /*
+ * Fields used by cl_io.c
+ */
+ /**
+ * Pointer to the topmost ongoing IO in this thread.
+ */
+ struct cl_io *clt_current_io;
+ /**
+ * Used for submitting a sync io.
+ */
+ struct cl_sync_io clt_anchor;
+ /**
+ * Fields used by cl_lock_discard_pages().
+ */
+ pgoff_t clt_next_index;
+ pgoff_t clt_fn_index; /* first non-overlapped index */
+};
+
+struct cl_thread_info *cl_env_info(const struct lu_env *env);
+
+#endif /* _CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
new file mode 100644
index 0000000..4269793
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -0,0 +1,1669 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Client IO.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_fid.h>
+#include <linux/list.h>
+#include <cl_object.h>
+#include "cl_internal.h"
+
+/*****************************************************************************
+ *
+ * cl_io interface.
+ *
+ */
+
+#define cl_io_for_each(slice, io) \
+ list_for_each_entry((slice), &io->ci_layers, cis_linkage)
+#define cl_io_for_each_reverse(slice, io) \
+ list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
+
+static inline int cl_io_type_is_valid(enum cl_io_type type)
+{
+ return CIT_READ <= type && type < CIT_OP_NR;
+}
+
+static inline int cl_io_is_loopable(const struct cl_io *io)
+{
+ return cl_io_type_is_valid(io->ci_type) && io->ci_type != CIT_MISC;
+}
+
+/**
+ * Returns true iff there is an IO ongoing in the given environment.
+ */
+int cl_io_is_going(const struct lu_env *env)
+{
+ return cl_env_info(env)->clt_current_io != NULL;
+}
+EXPORT_SYMBOL(cl_io_is_going);
+
+/**
+ * cl_io invariant that holds at all times when exported cl_io_*() functions
+ * are entered and left.
+ */
+static int cl_io_invariant(const struct cl_io *io)
+{
+ struct cl_io *up;
+
+ up = io->ci_parent;
+ return
+ /*
+ * io can own pages only when it is ongoing. Sub-io might
+ * still be in CIS_LOCKED state when top-io is in
+ * CIS_IO_GOING.
+ */
+ ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
+ (io->ci_state == CIS_LOCKED && up != NULL));
+}
+
+/**
+ * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
+ */
+void cl_io_fini(const struct lu_env *env, struct cl_io *io)
+{
+ struct cl_io_slice *slice;
+ struct cl_thread_info *info;
+
+ LINVRNT(cl_io_type_is_valid(io->ci_type));
+ LINVRNT(cl_io_invariant(io));
+
+ while (!list_empty(&io->ci_layers)) {
+ slice = container_of(io->ci_layers.prev, struct cl_io_slice,
+ cis_linkage);
+ list_del_init(&slice->cis_linkage);
+ if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
+ slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
+ /*
+ * Invalidate slice to catch use after free. This assumes that
+ * slices are allocated within session and can be touched
+ * after ->cio_fini() returns.
+ */
+ slice->cis_io = NULL;
+ }
+ io->ci_state = CIS_FINI;
+ info = cl_env_info(env);
+ if (info->clt_current_io == io)
+ info->clt_current_io = NULL;
+
+ /* sanity check for layout change */
+ switch(io->ci_type) {
+ case CIT_READ:
+ case CIT_WRITE:
+ break;
+ case CIT_FAULT:
+ case CIT_FSYNC:
+ LASSERT(!io->ci_need_restart);
+ break;
+ case CIT_SETATTR:
+ case CIT_MISC:
+ /* Check ignore layout change conf */
+ LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
+ !io->ci_need_restart));
+ break;
+ default:
+ LBUG();
+ }
+}
+EXPORT_SYMBOL(cl_io_fini);
+
+static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj)
+{
+ struct cl_object *scan;
+ int result;
+
+ LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
+ LINVRNT(cl_io_type_is_valid(iot));
+ LINVRNT(cl_io_invariant(io));
+
+ io->ci_type = iot;
+ INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
+ INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
+ INIT_LIST_HEAD(&io->ci_lockset.cls_done);
+ INIT_LIST_HEAD(&io->ci_layers);
+
+ result = 0;
+ cl_object_for_each(scan, obj) {
+ if (scan->co_ops->coo_io_init != NULL) {
+ result = scan->co_ops->coo_io_init(env, scan, io);
+ if (result != 0)
+ break;
+ }
+ }
+ if (result == 0)
+ io->ci_state = CIS_INIT;
+ return result;
+}
+
+/**
+ * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
+ *
+ * \pre obj != cl_object_top(obj)
+ */
+int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj)
+{
+ struct cl_thread_info *info = cl_env_info(env);
+
+ LASSERT(obj != cl_object_top(obj));
+ if (info->clt_current_io == NULL)
+ info->clt_current_io = io;
+ return cl_io_init0(env, io, iot, obj);
+}
+EXPORT_SYMBOL(cl_io_sub_init);
+
+/**
+ * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
+ *
+ * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
+ * what the latter returned.
+ *
+ * \pre obj == cl_object_top(obj)
+ * \pre cl_io_type_is_valid(iot)
+ * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
+ */
+int cl_io_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj)
+{
+ struct cl_thread_info *info = cl_env_info(env);
+
+ LASSERT(obj == cl_object_top(obj));
+ LASSERT(info->clt_current_io == NULL);
+
+ info->clt_current_io = io;
+ return cl_io_init0(env, io, iot, obj);
+}
+EXPORT_SYMBOL(cl_io_init);
+
+/**
+ * Initialize read or write io.
+ *
+ * \pre iot == CIT_READ || iot == CIT_WRITE
+ */
+int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, loff_t pos, size_t count)
+{
+ LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
+ LINVRNT(io->ci_obj != NULL);
+
+ LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
+ "io range: %u ["LPU64", "LPU64") %u %u\n",
+ iot, (__u64)pos, (__u64)pos + count,
+ io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
+ io->u.ci_rw.crw_pos = pos;
+ io->u.ci_rw.crw_count = count;
+ return cl_io_init(env, io, iot, io->ci_obj);
+}
+EXPORT_SYMBOL(cl_io_rw_init);
+
+static inline const struct lu_fid *
+cl_lock_descr_fid(const struct cl_lock_descr *descr)
+{
+ return lu_object_fid(&descr->cld_obj->co_lu);
+}
+
+static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
+ const struct cl_lock_descr *d1)
+{
+ return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?:
+ __diff_normalize(d0->cld_start, d1->cld_start);
+}
+
+static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
+ const struct cl_lock_descr *d1)
+{
+ int ret;
+
+ ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1));
+ if (ret)
+ return ret;
+ if (d0->cld_end < d1->cld_start)
+ return -1;
+ if (d0->cld_start > d0->cld_end)
+ return 1;
+ return 0;
+}
+
+static void cl_lock_descr_merge(struct cl_lock_descr *d0,
+ const struct cl_lock_descr *d1)
+{
+ d0->cld_start = min(d0->cld_start, d1->cld_start);
+ d0->cld_end = max(d0->cld_end, d1->cld_end);
+
+ if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
+ d0->cld_mode = CLM_WRITE;
+
+ if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
+ d0->cld_mode = CLM_GROUP;
+}
+
+/*
+ * Sort locks in lexicographical order of their (fid, start-offset) pairs.
+ */
+static void cl_io_locks_sort(struct cl_io *io)
+{
+ int done = 0;
+
+ /* hidden treasure: bubble sort for now. */
+ do {
+ struct cl_io_lock_link *curr;
+ struct cl_io_lock_link *prev;
+ struct cl_io_lock_link *temp;
+
+ done = 1;
+ prev = NULL;
+
+ list_for_each_entry_safe(curr, temp,
+ &io->ci_lockset.cls_todo,
+ cill_linkage) {
+ if (prev != NULL) {
+ switch (cl_lock_descr_sort(&prev->cill_descr,
+ &curr->cill_descr)) {
+ case 0:
+ /*
+ * IMPOSSIBLE: Identical locks are
+ * already removed at
+ * this point.
+ */
+ default:
+ LBUG();
+ case +1:
+ list_move_tail(&curr->cill_linkage,
+ &prev->cill_linkage);
+ done = 0;
+ continue; /* don't change prev: it's
+ * still "previous" */
+ case -1: /* already in order */
+ break;
+ }
+ }
+ prev = curr;
+ }
+ } while (!done);
+}
+
+/**
+ * Check whether \a queue contains locks matching \a need.
+ *
+ * \retval +ve there is a matching lock in the \a queue
+ * \retval 0 there are no matching locks in the \a queue
+ */
+int cl_queue_match(const struct list_head *queue,
+ const struct cl_lock_descr *need)
+{
+ struct cl_io_lock_link *scan;
+
+ list_for_each_entry(scan, queue, cill_linkage) {
+ if (cl_lock_descr_match(&scan->cill_descr, need))
+ return +1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(cl_queue_match);
+
+static int cl_queue_merge(const struct list_head *queue,
+ const struct cl_lock_descr *need)
+{
+ struct cl_io_lock_link *scan;
+
+ list_for_each_entry(scan, queue, cill_linkage) {
+ if (cl_lock_descr_cmp(&scan->cill_descr, need))
+ continue;
+ cl_lock_descr_merge(&scan->cill_descr, need);
+ CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
+ scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
+ scan->cill_descr.cld_end);
+ return +1;
+ }
+ return 0;
+
+}
+
+static int cl_lockset_match(const struct cl_lockset *set,
+ const struct cl_lock_descr *need)
+{
+ return cl_queue_match(&set->cls_curr, need) ||
+ cl_queue_match(&set->cls_done, need);
+}
+
+static int cl_lockset_merge(const struct cl_lockset *set,
+ const struct cl_lock_descr *need)
+{
+ return cl_queue_merge(&set->cls_todo, need) ||
+ cl_lockset_match(set, need);
+}
+
+static int cl_lockset_lock_one(const struct lu_env *env,
+ struct cl_io *io, struct cl_lockset *set,
+ struct cl_io_lock_link *link)
+{
+ struct cl_lock *lock;
+ int result;
+
+ lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
+
+ if (!IS_ERR(lock)) {
+ link->cill_lock = lock;
+ list_move(&link->cill_linkage, &set->cls_curr);
+ if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
+ result = cl_wait(env, lock);
+ if (result == 0)
+ list_move(&link->cill_linkage,
+ &set->cls_done);
+ } else
+ result = 0;
+ } else
+ result = PTR_ERR(lock);
+ return result;
+}
+
+static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
+ struct cl_io_lock_link *link)
+{
+ struct cl_lock *lock = link->cill_lock;
+
+ list_del_init(&link->cill_linkage);
+ if (lock != NULL) {
+ cl_lock_release(env, lock, "io", io);
+ link->cill_lock = NULL;
+ }
+ if (link->cill_fini != NULL)
+ link->cill_fini(env, link);
+}
+
+static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
+ struct cl_lockset *set)
+{
+ struct cl_io_lock_link *link;
+ struct cl_io_lock_link *temp;
+ struct cl_lock *lock;
+ int result;
+
+ result = 0;
+ list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+ if (!cl_lockset_match(set, &link->cill_descr)) {
+ /* XXX some locking to guarantee that locks aren't
+ * expanded in between. */
+ result = cl_lockset_lock_one(env, io, set, link);
+ if (result != 0)
+ break;
+ } else
+ cl_lock_link_fini(env, io, link);
+ }
+ if (result == 0) {
+ list_for_each_entry_safe(link, temp,
+ &set->cls_curr, cill_linkage) {
+ lock = link->cill_lock;
+ result = cl_wait(env, lock);
+ if (result == 0)
+ list_move(&link->cill_linkage,
+ &set->cls_done);
+ else
+ break;
+ }
+ }
+ return result;
+}
+
+/**
+ * Takes locks necessary for the current iteration of io.
+ *
+ * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
+ * by layers for the current iteration. Then sort locks (to avoid dead-locks),
+ * and acquire them.
+ */
+int cl_io_lock(const struct lu_env *env, struct cl_io *io)
+{
+ const struct cl_io_slice *scan;
+ int result = 0;
+
+ LINVRNT(cl_io_is_loopable(io));
+ LINVRNT(io->ci_state == CIS_IT_STARTED);
+ LINVRNT(cl_io_invariant(io));
+
+ cl_io_for_each(scan, io) {
+ if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
+ continue;
+ result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
+ if (result != 0)
+ break;
+ }
+ if (result == 0) {
+ cl_io_locks_sort(io);
+ result = cl_lockset_lock(env, io, &io->ci_lockset);
+ }
+ if (result != 0)
+ cl_io_unlock(env, io);
+ else
+ io->ci_state = CIS_LOCKED;
+ return result;
+}
+EXPORT_SYMBOL(cl_io_lock);
+
+/**
+ * Release locks takes by io.
+ */
+void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
+{
+ struct cl_lockset *set;
+ struct cl_io_lock_link *link;
+ struct cl_io_lock_link *temp;
+ const struct cl_io_slice *scan;
+
+ LASSERT(cl_io_is_loopable(io));
+ LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
+ LINVRNT(cl_io_invariant(io));
+
+ set = &io->ci_lockset;
+
+ list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
+ cl_lock_link_fini(env, io, link);
+
+ list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
+ cl_lock_link_fini(env, io, link);
+
+ list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
+ cl_unuse(env, link->cill_lock);
+ cl_lock_link_fini(env, io, link);
+ }
+ cl_io_for_each_reverse(scan, io) {
+ if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
+ scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
+ }
+ io->ci_state = CIS_UNLOCKED;
+ LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
+}
+EXPORT_SYMBOL(cl_io_unlock);
+
+/**
+ * Prepares next iteration of io.
+ *
+ * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
+ * layers a chance to modify io parameters, e.g., so that lov can restrict io
+ * to a single stripe.
+ */
+int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
+{
+ const struct cl_io_slice *scan;
+ int result;
+
+ LINVRNT(cl_io_is_loopable(io));
+ LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
+ LINVRNT(cl_io_invariant(io));
+
+ result = 0;
+ cl_io_for_each(scan, io) {
+ if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
+ continue;
+ result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
+ scan);
+ if (result != 0)
+ break;
+ }
+ if (result == 0)
+ io->ci_state = CIS_IT_STARTED;
+ return result;
+}
+EXPORT_SYMBOL(cl_io_iter_init);
+
+/**
+ * Finalizes io iteration.
+ *
+ * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
+ */
+void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
+{
+ const struct cl_io_slice *scan;
+
+ LINVRNT(cl_io_is_loopable(io));
+ LINVRNT(io->ci_state == CIS_UNLOCKED);
+ LINVRNT(cl_io_invariant(io));
+
+ cl_io_for_each_reverse(scan, io) {
+ if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
+ scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
+ }
+ io->ci_state = CIS_IT_ENDED;
+}
+EXPORT_SYMBOL(cl_io_iter_fini);
+
+/**
+ * Records that read or write io progressed \a nob bytes forward.
+ */
+void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
+{
+ const struct cl_io_slice *scan;
+
+ LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
+ nob == 0);
+ LINVRNT(cl_io_is_loopable(io));
+ LINVRNT(cl_io_invariant(io));
+
+ io->u.ci_rw.crw_pos += nob;
+ io->u.ci_rw.crw_count -= nob;
+
+ /* layers have to be notified. */
+ cl_io_for_each_reverse(scan, io) {
+ if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
+ scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
+ nob);
+ }
+}
+EXPORT_SYMBOL(cl_io_rw_advance);
+
+/**
+ * Adds a lock to a lockset.
+ */
+int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_io_lock_link *link)
+{
+ int result;
+
+ if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
+ result = +1;
+ else {
+ list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
+ result = 0;
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_io_lock_add);
+
+static void cl_free_io_lock_link(const struct lu_env *env,
+ struct cl_io_lock_link *link)
+{
+ OBD_FREE_PTR(link);
+}
+
+/**
+ * Allocates new lock link, and uses it to add a lock to a lockset.
+ */
+int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock_descr *descr)
+{
+ struct cl_io_lock_link *link;
+ int result;
+
+ OBD_ALLOC_PTR(link);
+ if (link != NULL) {
+ link->cill_descr = *descr;
+ link->cill_fini = cl_free_io_lock_link;
+ result = cl_io_lock_add(env, io, link);
+ if (result) /* lock match */
+ link->cill_fini(env, link);
+ } else
+ result = -ENOMEM;
+
+ return result;
+}
+EXPORT_SYMBOL(cl_io_lock_alloc_add);
+
+/**
+ * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
+ */
+int cl_io_start(const struct lu_env *env, struct cl_io *io)
+{
+ const struct cl_io_slice *scan;
+ int result = 0;
+
+ LINVRNT(cl_io_is_loopable(io));
+ LINVRNT(io->ci_state == CIS_LOCKED);
+ LINVRNT(cl_io_invariant(io));
+
+ io->ci_state = CIS_IO_GOING;
+ cl_io_for_each(scan, io) {
+ if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
+ continue;
+ result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
+ if (result != 0)
+ break;
+ }
+ if (result >= 0)
+ result = 0;
+ return result;
+}
+EXPORT_SYMBOL(cl_io_start);
+
+/**
+ * Wait until current io iteration is finished by calling
+ * cl_io_operations::cio_end() bottom-to-top.
+ */
+void cl_io_end(const struct lu_env *env, struct cl_io *io)
+{
+ const struct cl_io_slice *scan;
+
+ LINVRNT(cl_io_is_loopable(io));
+ LINVRNT(io->ci_state == CIS_IO_GOING);
+ LINVRNT(cl_io_invariant(io));
+
+ cl_io_for_each_reverse(scan, io) {
+ if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
+ scan->cis_iop->op[io->ci_type].cio_end(env, scan);
+ /* TODO: error handling. */
+ }
+ io->ci_state = CIS_IO_FINISHED;
+}
+EXPORT_SYMBOL(cl_io_end);
+
+static const struct cl_page_slice *
+cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
+{
+ const struct cl_page_slice *slice;
+
+ slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
+ LINVRNT(slice != NULL);
+ return slice;
+}
+
+/**
+ * True iff \a page is within \a io range.
+ */
+static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
+{
+ int result = 1;
+ loff_t start;
+ loff_t end;
+ pgoff_t idx;
+
+ idx = page->cp_index;
+ switch (io->ci_type) {
+ case CIT_READ:
+ case CIT_WRITE:
+ /*
+ * check that [start, end) and [pos, pos + count) extents
+ * overlap.
+ */
+ if (!cl_io_is_append(io)) {
+ const struct cl_io_rw_common *crw = &(io->u.ci_rw);
+ start = cl_offset(page->cp_obj, idx);
+ end = cl_offset(page->cp_obj, idx + 1);
+ result = crw->crw_pos < end &&
+ start < crw->crw_pos + crw->crw_count;
+ }
+ break;
+ case CIT_FAULT:
+ result = io->u.ci_fault.ft_index == idx;
+ break;
+ default:
+ LBUG();
+ }
+ return result;
+}
+
+/**
+ * Called by read io, when page has to be read from the server.
+ *
+ * \see cl_io_operations::cio_read_page()
+ */
+int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ const struct cl_io_slice *scan;
+ struct cl_2queue *queue;
+ int result = 0;
+
+ LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
+ LINVRNT(cl_page_is_owned(page, io));
+ LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
+ LINVRNT(cl_page_in_io(page, io));
+ LINVRNT(cl_io_invariant(io));
+
+ queue = &io->ci_queue;
+
+ cl_2queue_init(queue);
+ /*
+ * ->cio_read_page() methods called in the loop below are supposed to
+ * never block waiting for network (the only subtle point is the
+ * creation of new pages for read-ahead that might result in cache
+ * shrinking, but currently only clean pages are shrunk and this
+ * requires no network io).
+ *
+ * Should this ever starts blocking, retry loop would be needed for
+ * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
+ */
+ cl_io_for_each(scan, io) {
+ if (scan->cis_iop->cio_read_page != NULL) {
+ const struct cl_page_slice *slice;
+
+ slice = cl_io_slice_page(scan, page);
+ LINVRNT(slice != NULL);
+ result = scan->cis_iop->cio_read_page(env, scan, slice);
+ if (result != 0)
+ break;
+ }
+ }
+ if (result == 0)
+ result = cl_io_submit_rw(env, io, CRT_READ, queue);
+ /*
+ * Unlock unsent pages in case of error.
+ */
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_2queue_fini(env, queue);
+ return result;
+}
+EXPORT_SYMBOL(cl_io_read_page);
+
+/**
+ * Called by write io to prepare page to receive data from user buffer.
+ *
+ * \see cl_io_operations::cio_prepare_write()
+ */
+int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, unsigned from, unsigned to)
+{
+ const struct cl_io_slice *scan;
+ int result = 0;
+
+ LINVRNT(io->ci_type == CIT_WRITE);
+ LINVRNT(cl_page_is_owned(page, io));
+ LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
+ LINVRNT(cl_io_invariant(io));
+ LASSERT(cl_page_in_io(page, io));
+
+ cl_io_for_each_reverse(scan, io) {
+ if (scan->cis_iop->cio_prepare_write != NULL) {
+ const struct cl_page_slice *slice;
+
+ slice = cl_io_slice_page(scan, page);
+ result = scan->cis_iop->cio_prepare_write(env, scan,
+ slice,
+ from, to);
+ if (result != 0)
+ break;
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_io_prepare_write);
+
+/**
+ * Called by write io after user data were copied into a page.
+ *
+ * \see cl_io_operations::cio_commit_write()
+ */
+int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, unsigned from, unsigned to)
+{
+ const struct cl_io_slice *scan;
+ int result = 0;
+
+ LINVRNT(io->ci_type == CIT_WRITE);
+ LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
+ LINVRNT(cl_io_invariant(io));
+ /*
+ * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
+ * already called cl_page_cache_add(), moving page into CPS_CACHED
+ * state. Better (and more general) way of dealing with such situation
+ * is needed.
+ */
+ LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
+ LASSERT(cl_page_in_io(page, io));
+
+ cl_io_for_each(scan, io) {
+ if (scan->cis_iop->cio_commit_write != NULL) {
+ const struct cl_page_slice *slice;
+
+ slice = cl_io_slice_page(scan, page);
+ result = scan->cis_iop->cio_commit_write(env, scan,
+ slice,
+ from, to);
+ if (result != 0)
+ break;
+ }
+ }
+ LINVRNT(result <= 0);
+ return result;
+}
+EXPORT_SYMBOL(cl_io_commit_write);
+
+/**
+ * Submits a list of pages for immediate io.
+ *
+ * After the function gets returned, The submitted pages are moved to
+ * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
+ * to be submitted, and the pages are errant to submit.
+ *
+ * \returns 0 if at least one page was submitted, error code otherwise.
+ * \see cl_io_operations::cio_submit()
+ */
+int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type crt, struct cl_2queue *queue)
+{
+ const struct cl_io_slice *scan;
+ int result = 0;
+
+ LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
+
+ cl_io_for_each(scan, io) {
+ if (scan->cis_iop->req_op[crt].cio_submit == NULL)
+ continue;
+ result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
+ queue);
+ if (result != 0)
+ break;
+ }
+ /*
+ * If ->cio_submit() failed, no pages were sent.
+ */
+ LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
+ return result;
+}
+EXPORT_SYMBOL(cl_io_submit_rw);
+
+/**
+ * Submit a sync_io and wait for the IO to be finished, or error happens.
+ * If \a timeout is zero, it means to wait for the IO unconditionally.
+ */
+int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type iot, struct cl_2queue *queue,
+ long timeout)
+{
+ struct cl_sync_io *anchor = &cl_env_info(env)->clt_anchor;
+ struct cl_page *pg;
+ int rc;
+
+ cl_page_list_for_each(pg, &queue->c2_qin) {
+ LASSERT(pg->cp_sync_io == NULL);
+ pg->cp_sync_io = anchor;
+ }
+
+ cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
+ rc = cl_io_submit_rw(env, io, iot, queue);
+ if (rc == 0) {
+ /*
+ * If some pages weren't sent for any reason (e.g.,
+ * read found up-to-date pages in the cache, or write found
+ * clean pages), count them as completed to avoid infinite
+ * wait.
+ */
+ cl_page_list_for_each(pg, &queue->c2_qin) {
+ pg->cp_sync_io = NULL;
+ cl_sync_io_note(anchor, +1);
+ }
+
+ /* wait for the IO to be finished. */
+ rc = cl_sync_io_wait(env, io, &queue->c2_qout,
+ anchor, timeout);
+ } else {
+ LASSERT(list_empty(&queue->c2_qout.pl_pages));
+ cl_page_list_for_each(pg, &queue->c2_qin)
+ pg->cp_sync_io = NULL;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(cl_io_submit_sync);
+
+/**
+ * Cancel an IO which has been submitted by cl_io_submit_rw.
+ */
+int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue)
+{
+ struct cl_page *page;
+ int result = 0;
+
+ CERROR("Canceling ongoing page trasmission\n");
+ cl_page_list_for_each(page, queue) {
+ int rc;
+
+ LINVRNT(cl_page_in_io(page, io));
+ rc = cl_page_cancel(env, page);
+ result = result ?: rc;
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_io_cancel);
+
+/**
+ * Main io loop.
+ *
+ * Pumps io through iterations calling
+ *
+ * - cl_io_iter_init()
+ *
+ * - cl_io_lock()
+ *
+ * - cl_io_start()
+ *
+ * - cl_io_end()
+ *
+ * - cl_io_unlock()
+ *
+ * - cl_io_iter_fini()
+ *
+ * repeatedly until there is no more io to do.
+ */
+int cl_io_loop(const struct lu_env *env, struct cl_io *io)
+{
+ int result = 0;
+
+ LINVRNT(cl_io_is_loopable(io));
+
+ do {
+ size_t nob;
+
+ io->ci_continue = 0;
+ result = cl_io_iter_init(env, io);
+ if (result == 0) {
+ nob = io->ci_nob;
+ result = cl_io_lock(env, io);
+ if (result == 0) {
+ /*
+ * Notify layers that locks has been taken,
+ * and do actual i/o.
+ *
+ * - llite: kms, short read;
+ * - llite: generic_file_read();
+ */
+ result = cl_io_start(env, io);
+ /*
+ * Send any remaining pending
+ * io, etc.
+ *
+ * - llite: ll_rw_stats_tally.
+ */
+ cl_io_end(env, io);
+ cl_io_unlock(env, io);
+ cl_io_rw_advance(env, io, io->ci_nob - nob);
+ }
+ }
+ cl_io_iter_fini(env, io);
+ } while (result == 0 && io->ci_continue);
+ if (result == 0)
+ result = io->ci_result;
+ return result < 0 ? result : 0;
+}
+EXPORT_SYMBOL(cl_io_loop);
+
+/**
+ * Adds io slice to the cl_io.
+ *
+ * This is called by cl_object_operations::coo_io_init() methods to add a
+ * per-layer state to the io. New state is added at the end of
+ * cl_io::ci_layers list, that is, it is at the bottom of the stack.
+ *
+ * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
+ */
+void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
+ struct cl_object *obj,
+ const struct cl_io_operations *ops)
+{
+ struct list_head *linkage = &slice->cis_linkage;
+
+ LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
+ list_empty(linkage));
+
+ list_add_tail(linkage, &io->ci_layers);
+ slice->cis_io = io;
+ slice->cis_obj = obj;
+ slice->cis_iop = ops;
+}
+EXPORT_SYMBOL(cl_io_slice_add);
+
+
+/**
+ * Initializes page list.
+ */
+void cl_page_list_init(struct cl_page_list *plist)
+{
+ plist->pl_nr = 0;
+ INIT_LIST_HEAD(&plist->pl_pages);
+ plist->pl_owner = current;
+}
+EXPORT_SYMBOL(cl_page_list_init);
+
+/**
+ * Adds a page to a page list.
+ */
+void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
+{
+ /* it would be better to check that page is owned by "current" io, but
+ * it is not passed here. */
+ LASSERT(page->cp_owner != NULL);
+ LINVRNT(plist->pl_owner == current);
+
+ lockdep_off();
+ mutex_lock(&page->cp_mutex);
+ lockdep_on();
+ LASSERT(list_empty(&page->cp_batch));
+ list_add_tail(&page->cp_batch, &plist->pl_pages);
+ ++plist->pl_nr;
+ lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
+ cl_page_get(page);
+}
+EXPORT_SYMBOL(cl_page_list_add);
+
+/**
+ * Removes a page from a page list.
+ */
+void cl_page_list_del(const struct lu_env *env,
+ struct cl_page_list *plist, struct cl_page *page)
+{
+ LASSERT(plist->pl_nr > 0);
+ LINVRNT(plist->pl_owner == current);
+
+ list_del_init(&page->cp_batch);
+ lockdep_off();
+ mutex_unlock(&page->cp_mutex);
+ lockdep_on();
+ --plist->pl_nr;
+ lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
+ cl_page_put(env, page);
+}
+EXPORT_SYMBOL(cl_page_list_del);
+
+/**
+ * Moves a page from one page list to another.
+ */
+void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page)
+{
+ LASSERT(src->pl_nr > 0);
+ LINVRNT(dst->pl_owner == current);
+ LINVRNT(src->pl_owner == current);
+
+ list_move_tail(&page->cp_batch, &dst->pl_pages);
+ --src->pl_nr;
+ ++dst->pl_nr;
+ lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+ src, dst);
+}
+EXPORT_SYMBOL(cl_page_list_move);
+
+/**
+ * splice the cl_page_list, just as list head does
+ */
+void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
+{
+ struct cl_page *page;
+ struct cl_page *tmp;
+
+ LINVRNT(list->pl_owner == current);
+ LINVRNT(head->pl_owner == current);
+
+ cl_page_list_for_each_safe(page, tmp, list)
+ cl_page_list_move(head, list, page);
+}
+EXPORT_SYMBOL(cl_page_list_splice);
+
+void cl_page_disown0(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg);
+
+/**
+ * Disowns pages in a queue.
+ */
+void cl_page_list_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist)
+{
+ struct cl_page *page;
+ struct cl_page *temp;
+
+ LINVRNT(plist->pl_owner == current);
+
+ cl_page_list_for_each_safe(page, temp, plist) {
+ LASSERT(plist->pl_nr > 0);
+
+ list_del_init(&page->cp_batch);
+ lockdep_off();
+ mutex_unlock(&page->cp_mutex);
+ lockdep_on();
+ --plist->pl_nr;
+ /*
+ * cl_page_disown0 rather than usual cl_page_disown() is used,
+ * because pages are possibly in CPS_FREEING state already due
+ * to the call to cl_page_list_discard().
+ */
+ /*
+ * XXX cl_page_disown0() will fail if page is not locked.
+ */
+ cl_page_disown0(env, io, page);
+ lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+ plist);
+ cl_page_put(env, page);
+ }
+}
+EXPORT_SYMBOL(cl_page_list_disown);
+
+/**
+ * Releases pages from queue.
+ */
+void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
+{
+ struct cl_page *page;
+ struct cl_page *temp;
+
+ LINVRNT(plist->pl_owner == current);
+
+ cl_page_list_for_each_safe(page, temp, plist)
+ cl_page_list_del(env, plist, page);
+ LASSERT(plist->pl_nr == 0);
+}
+EXPORT_SYMBOL(cl_page_list_fini);
+
+/**
+ * Owns all pages in a queue.
+ */
+int cl_page_list_own(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist)
+{
+ struct cl_page *page;
+ struct cl_page *temp;
+ pgoff_t index = 0;
+ int result;
+
+ LINVRNT(plist->pl_owner == current);
+
+ result = 0;
+ cl_page_list_for_each_safe(page, temp, plist) {
+ LASSERT(index <= page->cp_index);
+ index = page->cp_index;
+ if (cl_page_own(env, io, page) == 0)
+ result = result ?: page->cp_error;
+ else
+ cl_page_list_del(env, plist, page);
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_page_list_own);
+
+/**
+ * Assumes all pages in a queue.
+ */
+void cl_page_list_assume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist)
+{
+ struct cl_page *page;
+
+ LINVRNT(plist->pl_owner == current);
+
+ cl_page_list_for_each(page, plist)
+ cl_page_assume(env, io, page);
+}
+EXPORT_SYMBOL(cl_page_list_assume);
+
+/**
+ * Discards all pages in a queue.
+ */
+void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *plist)
+{
+ struct cl_page *page;
+
+ LINVRNT(plist->pl_owner == current);
+ cl_page_list_for_each(page, plist)
+ cl_page_discard(env, io, page);
+}
+EXPORT_SYMBOL(cl_page_list_discard);
+
+/**
+ * Unmaps all pages in a queue from user virtual memory.
+ */
+int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *plist)
+{
+ struct cl_page *page;
+ int result;
+
+ LINVRNT(plist->pl_owner == current);
+ result = 0;
+ cl_page_list_for_each(page, plist) {
+ result = cl_page_unmap(env, io, page);
+ if (result != 0)
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_page_list_unmap);
+
+/**
+ * Initialize dual page queue.
+ */
+void cl_2queue_init(struct cl_2queue *queue)
+{
+ cl_page_list_init(&queue->c2_qin);
+ cl_page_list_init(&queue->c2_qout);
+}
+EXPORT_SYMBOL(cl_2queue_init);
+
+/**
+ * Add a page to the incoming page list of 2-queue.
+ */
+void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
+{
+ cl_page_list_add(&queue->c2_qin, page);
+}
+EXPORT_SYMBOL(cl_2queue_add);
+
+/**
+ * Disown pages in both lists of a 2-queue.
+ */
+void cl_2queue_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue)
+{
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_page_list_disown(env, io, &queue->c2_qout);
+}
+EXPORT_SYMBOL(cl_2queue_disown);
+
+/**
+ * Discard (truncate) pages in both lists of a 2-queue.
+ */
+void cl_2queue_discard(const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue)
+{
+ cl_page_list_discard(env, io, &queue->c2_qin);
+ cl_page_list_discard(env, io, &queue->c2_qout);
+}
+EXPORT_SYMBOL(cl_2queue_discard);
+
+/**
+ * Assume to own the pages in cl_2queue
+ */
+void cl_2queue_assume(const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue)
+{
+ cl_page_list_assume(env, io, &queue->c2_qin);
+ cl_page_list_assume(env, io, &queue->c2_qout);
+}
+EXPORT_SYMBOL(cl_2queue_assume);
+
+/**
+ * Finalize both page lists of a 2-queue.
+ */
+void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
+{
+ cl_page_list_fini(env, &queue->c2_qout);
+ cl_page_list_fini(env, &queue->c2_qin);
+}
+EXPORT_SYMBOL(cl_2queue_fini);
+
+/**
+ * Initialize a 2-queue to contain \a page in its incoming page list.
+ */
+void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
+{
+ cl_2queue_init(queue);
+ cl_2queue_add(queue, page);
+}
+EXPORT_SYMBOL(cl_2queue_init_page);
+
+/**
+ * Returns top-level io.
+ *
+ * \see cl_object_top(), cl_page_top().
+ */
+struct cl_io *cl_io_top(struct cl_io *io)
+{
+ while (io->ci_parent != NULL)
+ io = io->ci_parent;
+ return io;
+}
+EXPORT_SYMBOL(cl_io_top);
+
+/**
+ * Prints human readable representation of \a io to the \a f.
+ */
+void cl_io_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_io *io)
+{
+}
+
+/**
+ * Adds request slice to the compound request.
+ *
+ * This is called by cl_device_operations::cdo_req_init() methods to add a
+ * per-layer state to the request. New state is added at the end of
+ * cl_req::crq_layers list, that is, it is at the bottom of the stack.
+ *
+ * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
+ */
+void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
+ struct cl_device *dev,
+ const struct cl_req_operations *ops)
+{
+ list_add_tail(&slice->crs_linkage, &req->crq_layers);
+ slice->crs_dev = dev;
+ slice->crs_ops = ops;
+ slice->crs_req = req;
+}
+EXPORT_SYMBOL(cl_req_slice_add);
+
+static void cl_req_free(const struct lu_env *env, struct cl_req *req)
+{
+ unsigned i;
+
+ LASSERT(list_empty(&req->crq_pages));
+ LASSERT(req->crq_nrpages == 0);
+ LINVRNT(list_empty(&req->crq_layers));
+ LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
+
+ if (req->crq_o != NULL) {
+ for (i = 0; i < req->crq_nrobjs; ++i) {
+ struct cl_object *obj = req->crq_o[i].ro_obj;
+ if (obj != NULL) {
+ lu_object_ref_del_at(&obj->co_lu,
+ &req->crq_o[i].ro_obj_ref,
+ "cl_req", req);
+ cl_object_put(env, obj);
+ }
+ }
+ OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
+ }
+ OBD_FREE_PTR(req);
+}
+
+static int cl_req_init(const struct lu_env *env, struct cl_req *req,
+ struct cl_page *page)
+{
+ struct cl_device *dev;
+ struct cl_page_slice *slice;
+ int result;
+
+ result = 0;
+ page = cl_page_top(page);
+ do {
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
+ if (dev->cd_ops->cdo_req_init != NULL) {
+ result = dev->cd_ops->cdo_req_init(env,
+ dev, req);
+ if (result != 0)
+ break;
+ }
+ }
+ page = page->cp_child;
+ } while (page != NULL && result == 0);
+ return result;
+}
+
+/**
+ * Invokes per-request transfer completion call-backs
+ * (cl_req_operations::cro_completion()) bottom-to-top.
+ */
+void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
+{
+ struct cl_req_slice *slice;
+
+ /*
+ * for the lack of list_for_each_entry_reverse_safe()...
+ */
+ while (!list_empty(&req->crq_layers)) {
+ slice = list_entry(req->crq_layers.prev,
+ struct cl_req_slice, crs_linkage);
+ list_del_init(&slice->crs_linkage);
+ if (slice->crs_ops->cro_completion != NULL)
+ slice->crs_ops->cro_completion(env, slice, rc);
+ }
+ cl_req_free(env, req);
+}
+EXPORT_SYMBOL(cl_req_completion);
+
+/**
+ * Allocates new transfer request.
+ */
+struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
+ enum cl_req_type crt, int nr_objects)
+{
+ struct cl_req *req;
+
+ LINVRNT(nr_objects > 0);
+
+ OBD_ALLOC_PTR(req);
+ if (req != NULL) {
+ int result;
+
+ OBD_ALLOC(req->crq_o, nr_objects * sizeof req->crq_o[0]);
+ if (req->crq_o != NULL) {
+ req->crq_nrobjs = nr_objects;
+ req->crq_type = crt;
+ INIT_LIST_HEAD(&req->crq_pages);
+ INIT_LIST_HEAD(&req->crq_layers);
+ result = cl_req_init(env, req, page);
+ } else
+ result = -ENOMEM;
+ if (result != 0) {
+ cl_req_completion(env, req, result);
+ req = ERR_PTR(result);
+ }
+ } else
+ req = ERR_PTR(-ENOMEM);
+ return req;
+}
+EXPORT_SYMBOL(cl_req_alloc);
+
+/**
+ * Adds a page to a request.
+ */
+void cl_req_page_add(const struct lu_env *env,
+ struct cl_req *req, struct cl_page *page)
+{
+ struct cl_object *obj;
+ struct cl_req_obj *rqo;
+ int i;
+
+ page = cl_page_top(page);
+
+ LASSERT(list_empty(&page->cp_flight));
+ LASSERT(page->cp_req == NULL);
+
+ CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
+ req, req->crq_type, req->crq_nrpages);
+
+ list_add_tail(&page->cp_flight, &req->crq_pages);
+ ++req->crq_nrpages;
+ page->cp_req = req;
+ obj = cl_object_top(page->cp_obj);
+ for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
+ if (rqo->ro_obj == NULL) {
+ rqo->ro_obj = obj;
+ cl_object_get(obj);
+ lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
+ "cl_req", req);
+ break;
+ }
+ }
+ LASSERT(i < req->crq_nrobjs);
+}
+EXPORT_SYMBOL(cl_req_page_add);
+
+/**
+ * Removes a page from a request.
+ */
+void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
+{
+ struct cl_req *req = page->cp_req;
+
+ page = cl_page_top(page);
+
+ LASSERT(!list_empty(&page->cp_flight));
+ LASSERT(req->crq_nrpages > 0);
+
+ list_del_init(&page->cp_flight);
+ --req->crq_nrpages;
+ page->cp_req = NULL;
+}
+EXPORT_SYMBOL(cl_req_page_done);
+
+/**
+ * Notifies layers that request is about to depart by calling
+ * cl_req_operations::cro_prep() top-to-bottom.
+ */
+int cl_req_prep(const struct lu_env *env, struct cl_req *req)
+{
+ int i;
+ int result;
+ const struct cl_req_slice *slice;
+
+ /*
+ * Check that the caller of cl_req_alloc() didn't lie about the number
+ * of objects.
+ */
+ for (i = 0; i < req->crq_nrobjs; ++i)
+ LASSERT(req->crq_o[i].ro_obj != NULL);
+
+ result = 0;
+ list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+ if (slice->crs_ops->cro_prep != NULL) {
+ result = slice->crs_ops->cro_prep(env, slice);
+ if (result != 0)
+ break;
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_req_prep);
+
+/**
+ * Fills in attributes that are passed to server together with transfer. Only
+ * attributes from \a flags may be touched. This can be called multiple times
+ * for the same request.
+ */
+void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
+ struct cl_req_attr *attr, obd_valid flags)
+{
+ const struct cl_req_slice *slice;
+ struct cl_page *page;
+ int i;
+
+ LASSERT(!list_empty(&req->crq_pages));
+
+ /* Take any page to use as a model. */
+ page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
+
+ for (i = 0; i < req->crq_nrobjs; ++i) {
+ list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
+ const struct cl_page_slice *scan;
+ const struct cl_object *obj;
+
+ scan = cl_page_at(page,
+ slice->crs_dev->cd_lu_dev.ld_type);
+ LASSERT(scan != NULL);
+ obj = scan->cpl_obj;
+ if (slice->crs_ops->cro_attr_set != NULL)
+ slice->crs_ops->cro_attr_set(env, slice, obj,
+ attr + i, flags);
+ }
+ }
+}
+EXPORT_SYMBOL(cl_req_attr_set);
+
+/* XXX complete(), init_completion(), and wait_for_completion(), until they are
+ * implemented in libcfs. */
+# include <linux/sched.h>
+
+/**
+ * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
+ */
+void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
+{
+ init_waitqueue_head(&anchor->csi_waitq);
+ atomic_set(&anchor->csi_sync_nr, nrpages);
+ atomic_set(&anchor->csi_barrier, nrpages > 0);
+ anchor->csi_sync_rc = 0;
+}
+EXPORT_SYMBOL(cl_sync_io_init);
+
+/**
+ * Wait until all transfer completes. Transfer completion routine has to call
+ * cl_sync_io_note() for every page.
+ */
+int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, struct cl_sync_io *anchor,
+ long timeout)
+{
+ struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
+ NULL, NULL, NULL);
+ int rc;
+
+ LASSERT(timeout >= 0);
+
+ rc = l_wait_event(anchor->csi_waitq,
+ atomic_read(&anchor->csi_sync_nr) == 0,
+ &lwi);
+ if (rc < 0) {
+ CERROR("SYNC IO failed with error: %d, try to cancel "
+ "%d remaining pages\n",
+ rc, atomic_read(&anchor->csi_sync_nr));
+
+ (void)cl_io_cancel(env, io, queue);
+
+ lwi = (struct l_wait_info) { 0 };
+ (void)l_wait_event(anchor->csi_waitq,
+ atomic_read(&anchor->csi_sync_nr) == 0,
+ &lwi);
+ } else {
+ rc = anchor->csi_sync_rc;
+ }
+ LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
+ cl_page_list_assume(env, io, queue);
+
+ /* wait until cl_sync_io_note() has done wakeup */
+ while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
+ cpu_relax();
+ }
+
+ POISON(anchor, 0x5a, sizeof *anchor);
+ return rc;
+}
+EXPORT_SYMBOL(cl_sync_io_wait);
+
+/**
+ * Indicate that transfer of a single page completed.
+ */
+void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
+{
+ if (anchor->csi_sync_rc == 0 && ioret < 0)
+ anchor->csi_sync_rc = ioret;
+ /*
+ * Synchronous IO done without releasing page lock (e.g., as a part of
+ * ->{prepare,commit}_write(). Completion is used to signal the end of
+ * IO.
+ */
+ LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
+ if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
+ wake_up_all(&anchor->csi_waitq);
+ /* it's safe to nuke or reuse anchor now */
+ atomic_set(&anchor->csi_barrier, 0);
+ }
+}
+EXPORT_SYMBOL(cl_sync_io_note);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
new file mode 100644
index 0000000..749eb08
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -0,0 +1,2232 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Client Extent Lock.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_fid.h>
+#include <linux/list.h>
+#include <cl_object.h>
+#include "cl_internal.h"
+
+/** Lock class of cl_lock::cll_guard */
+static struct lock_class_key cl_lock_guard_class;
+static struct kmem_cache *cl_lock_kmem;
+
+static struct lu_kmem_descr cl_lock_caches[] = {
+ {
+ .ckd_cache = &cl_lock_kmem,
+ .ckd_name = "cl_lock_kmem",
+ .ckd_size = sizeof (struct cl_lock)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
+#define CS_LOCK_INC(o, item)
+#define CS_LOCK_DEC(o, item)
+#define CS_LOCKSTATE_INC(o, state)
+#define CS_LOCKSTATE_DEC(o, state)
+
+/**
+ * Basic lock invariant that is maintained at all times. Caller either has a
+ * reference to \a lock, or somehow assures that \a lock cannot be freed.
+ *
+ * \see cl_lock_invariant()
+ */
+static int cl_lock_invariant_trusted(const struct lu_env *env,
+ const struct cl_lock *lock)
+{
+ return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
+ atomic_read(&lock->cll_ref) >= lock->cll_holds &&
+ lock->cll_holds >= lock->cll_users &&
+ lock->cll_holds >= 0 &&
+ lock->cll_users >= 0 &&
+ lock->cll_depth >= 0;
+}
+
+/**
+ * Stronger lock invariant, checking that caller has a reference on a lock.
+ *
+ * \see cl_lock_invariant_trusted()
+ */
+static int cl_lock_invariant(const struct lu_env *env,
+ const struct cl_lock *lock)
+{
+ int result;
+
+ result = atomic_read(&lock->cll_ref) > 0 &&
+ cl_lock_invariant_trusted(env, lock);
+ if (!result && env != NULL)
+ CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
+ return result;
+}
+
+/**
+ * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
+ */
+static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
+{
+ return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
+}
+
+/**
+ * Returns a set of counters for this lock, depending on a lock nesting.
+ */
+static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
+ const struct cl_lock *lock)
+{
+ struct cl_thread_info *info;
+ enum clt_nesting_level nesting;
+
+ info = cl_env_info(env);
+ nesting = cl_lock_nesting(lock);
+ LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
+ return &info->clt_counters[nesting];
+}
+
+static void cl_lock_trace0(int level, const struct lu_env *env,
+ const char *prefix, const struct cl_lock *lock,
+ const char *func, const int line)
+{
+ struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
+ CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
+ "(%p/%d/%d) at %s():%d\n",
+ prefix, lock, atomic_read(&lock->cll_ref),
+ lock->cll_guarder, lock->cll_depth,
+ lock->cll_state, lock->cll_error, lock->cll_holds,
+ lock->cll_users, lock->cll_flags,
+ env, h->coh_nesting, cl_lock_nr_mutexed(env),
+ func, line);
+}
+#define cl_lock_trace(level, env, prefix, lock) \
+ cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
+
+#define RETIP ((unsigned long)__builtin_return_address(0))
+
+#ifdef CONFIG_LOCKDEP
+static struct lock_class_key cl_lock_key;
+
+static void cl_lock_lockdep_init(struct cl_lock *lock)
+{
+ lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
+}
+
+static void cl_lock_lockdep_acquire(const struct lu_env *env,
+ struct cl_lock *lock, __u32 enqflags)
+{
+ cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
+ lock_map_acquire(&lock->dep_map);
+}
+
+static void cl_lock_lockdep_release(const struct lu_env *env,
+ struct cl_lock *lock)
+{
+ cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
+ lock_release(&lock->dep_map, 0, RETIP);
+}
+
+#else /* !CONFIG_LOCKDEP */
+
+static void cl_lock_lockdep_init(struct cl_lock *lock)
+{}
+static void cl_lock_lockdep_acquire(const struct lu_env *env,
+ struct cl_lock *lock, __u32 enqflags)
+{}
+static void cl_lock_lockdep_release(const struct lu_env *env,
+ struct cl_lock *lock)
+{}
+
+#endif /* !CONFIG_LOCKDEP */
+
+/**
+ * Adds lock slice to the compound lock.
+ *
+ * This is called by cl_object_operations::coo_lock_init() methods to add a
+ * per-layer state to the lock. New state is added at the end of
+ * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
+ *
+ * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
+ */
+void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
+ struct cl_object *obj,
+ const struct cl_lock_operations *ops)
+{
+ slice->cls_lock = lock;
+ list_add_tail(&slice->cls_linkage, &lock->cll_layers);
+ slice->cls_obj = obj;
+ slice->cls_ops = ops;
+}
+EXPORT_SYMBOL(cl_lock_slice_add);
+
+/**
+ * Returns true iff a lock with the mode \a has provides at least the same
+ * guarantees as a lock with the mode \a need.
+ */
+int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
+{
+ LINVRNT(need == CLM_READ || need == CLM_WRITE ||
+ need == CLM_PHANTOM || need == CLM_GROUP);
+ LINVRNT(has == CLM_READ || has == CLM_WRITE ||
+ has == CLM_PHANTOM || has == CLM_GROUP);
+ CLASSERT(CLM_PHANTOM < CLM_READ);
+ CLASSERT(CLM_READ < CLM_WRITE);
+ CLASSERT(CLM_WRITE < CLM_GROUP);
+
+ if (has != CLM_GROUP)
+ return need <= has;
+ else
+ return need == has;
+}
+EXPORT_SYMBOL(cl_lock_mode_match);
+
+/**
+ * Returns true iff extent portions of lock descriptions match.
+ */
+int cl_lock_ext_match(const struct cl_lock_descr *has,
+ const struct cl_lock_descr *need)
+{
+ return
+ has->cld_start <= need->cld_start &&
+ has->cld_end >= need->cld_end &&
+ cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
+ (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
+}
+EXPORT_SYMBOL(cl_lock_ext_match);
+
+/**
+ * Returns true iff a lock with the description \a has provides at least the
+ * same guarantees as a lock with the description \a need.
+ */
+int cl_lock_descr_match(const struct cl_lock_descr *has,
+ const struct cl_lock_descr *need)
+{
+ return
+ cl_object_same(has->cld_obj, need->cld_obj) &&
+ cl_lock_ext_match(has, need);
+}
+EXPORT_SYMBOL(cl_lock_descr_match);
+
+static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
+{
+ struct cl_object *obj = lock->cll_descr.cld_obj;
+
+ LINVRNT(!cl_lock_is_mutexed(lock));
+
+ cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
+ might_sleep();
+ while (!list_empty(&lock->cll_layers)) {
+ struct cl_lock_slice *slice;
+
+ slice = list_entry(lock->cll_layers.next,
+ struct cl_lock_slice, cls_linkage);
+ list_del_init(lock->cll_layers.next);
+ slice->cls_ops->clo_fini(env, slice);
+ }
+ CS_LOCK_DEC(obj, total);
+ CS_LOCKSTATE_DEC(obj, lock->cll_state);
+ lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
+ cl_object_put(env, obj);
+ lu_ref_fini(&lock->cll_reference);
+ lu_ref_fini(&lock->cll_holders);
+ mutex_destroy(&lock->cll_guard);
+ OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
+}
+
+/**
+ * Releases a reference on a lock.
+ *
+ * When last reference is released, lock is returned to the cache, unless it
+ * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
+ * immediately.
+ *
+ * \see cl_object_put(), cl_page_put()
+ */
+void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
+{
+ struct cl_object *obj;
+
+ LINVRNT(cl_lock_invariant(env, lock));
+ obj = lock->cll_descr.cld_obj;
+ LINVRNT(obj != NULL);
+
+ CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
+ atomic_read(&lock->cll_ref), lock, RETIP);
+
+ if (atomic_dec_and_test(&lock->cll_ref)) {
+ if (lock->cll_state == CLS_FREEING) {
+ LASSERT(list_empty(&lock->cll_linkage));
+ cl_lock_free(env, lock);
+ }
+ CS_LOCK_DEC(obj, busy);
+ }
+}
+EXPORT_SYMBOL(cl_lock_put);
+
+/**
+ * Acquires an additional reference to a lock.
+ *
+ * This can be called only by caller already possessing a reference to \a
+ * lock.
+ *
+ * \see cl_object_get(), cl_page_get()
+ */
+void cl_lock_get(struct cl_lock *lock)
+{
+ LINVRNT(cl_lock_invariant(NULL, lock));
+ CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
+ atomic_read(&lock->cll_ref), lock, RETIP);
+ atomic_inc(&lock->cll_ref);
+}
+EXPORT_SYMBOL(cl_lock_get);
+
+/**
+ * Acquires a reference to a lock.
+ *
+ * This is much like cl_lock_get(), except that this function can be used to
+ * acquire initial reference to the cached lock. Caller has to deal with all
+ * possible races. Use with care!
+ *
+ * \see cl_page_get_trust()
+ */
+void cl_lock_get_trust(struct cl_lock *lock)
+{
+ CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
+ atomic_read(&lock->cll_ref), lock, RETIP);
+ if (atomic_inc_return(&lock->cll_ref) == 1)
+ CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
+}
+EXPORT_SYMBOL(cl_lock_get_trust);
+
+/**
+ * Helper function destroying the lock that wasn't completely initialized.
+ *
+ * Other threads can acquire references to the top-lock through its
+ * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
+ */
+static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
+{
+ cl_lock_mutex_get(env, lock);
+ cl_lock_cancel(env, lock);
+ cl_lock_delete(env, lock);
+ cl_lock_mutex_put(env, lock);
+ cl_lock_put(env, lock);
+}
+
+static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
+ struct cl_object *obj,
+ const struct cl_io *io,
+ const struct cl_lock_descr *descr)
+{
+ struct cl_lock *lock;
+ struct lu_object_header *head;
+
+ OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO);
+ if (lock != NULL) {
+ atomic_set(&lock->cll_ref, 1);
+ lock->cll_descr = *descr;
+ lock->cll_state = CLS_NEW;
+ cl_object_get(obj);
+ lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
+ lock);
+ INIT_LIST_HEAD(&lock->cll_layers);
+ INIT_LIST_HEAD(&lock->cll_linkage);
+ INIT_LIST_HEAD(&lock->cll_inclosure);
+ lu_ref_init(&lock->cll_reference);
+ lu_ref_init(&lock->cll_holders);
+ mutex_init(&lock->cll_guard);
+ lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
+ init_waitqueue_head(&lock->cll_wq);
+ head = obj->co_lu.lo_header;
+ CS_LOCKSTATE_INC(obj, CLS_NEW);
+ CS_LOCK_INC(obj, total);
+ CS_LOCK_INC(obj, create);
+ cl_lock_lockdep_init(lock);
+ list_for_each_entry(obj, &head->loh_layers,
+ co_lu.lo_linkage) {
+ int err;
+
+ err = obj->co_ops->coo_lock_init(env, obj, lock, io);
+ if (err != 0) {
+ cl_lock_finish(env, lock);
+ lock = ERR_PTR(err);
+ break;
+ }
+ }
+ } else
+ lock = ERR_PTR(-ENOMEM);
+ return lock;
+}
+
+/**
+ * Transfer the lock into INTRANSIT state and return the original state.
+ *
+ * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
+ * \post state: CLS_INTRANSIT
+ * \see CLS_INTRANSIT
+ */
+enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
+ struct cl_lock *lock)
+{
+ enum cl_lock_state state = lock->cll_state;
+
+ LASSERT(cl_lock_is_mutexed(lock));
+ LASSERT(state != CLS_INTRANSIT);
+ LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
+ "Malformed lock state %d.\n", state);
+
+ cl_lock_state_set(env, lock, CLS_INTRANSIT);
+ lock->cll_intransit_owner = current;
+ cl_lock_hold_add(env, lock, "intransit", current);
+ return state;
+}
+EXPORT_SYMBOL(cl_lock_intransit);
+
+/**
+ * Exit the intransit state and restore the lock state to the original state
+ */
+void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
+ enum cl_lock_state state)
+{
+ LASSERT(cl_lock_is_mutexed(lock));
+ LASSERT(lock->cll_state == CLS_INTRANSIT);
+ LASSERT(state != CLS_INTRANSIT);
+ LASSERT(lock->cll_intransit_owner == current);
+
+ lock->cll_intransit_owner = NULL;
+ cl_lock_state_set(env, lock, state);
+ cl_lock_unhold(env, lock, "intransit", current);
+}
+EXPORT_SYMBOL(cl_lock_extransit);
+
+/**
+ * Checking whether the lock is intransit state
+ */
+int cl_lock_is_intransit(struct cl_lock *lock)
+{
+ LASSERT(cl_lock_is_mutexed(lock));
+ return lock->cll_state == CLS_INTRANSIT &&
+ lock->cll_intransit_owner != current;
+}
+EXPORT_SYMBOL(cl_lock_is_intransit);
+/**
+ * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
+ * truncate and O_APPEND cannot be reused for read/non-append-write, as they
+ * cover multiple stripes and can trigger cascading timeouts.
+ */
+static int cl_lock_fits_into(const struct lu_env *env,
+ const struct cl_lock *lock,
+ const struct cl_lock_descr *need,
+ const struct cl_io *io)
+{
+ const struct cl_lock_slice *slice;
+
+ LINVRNT(cl_lock_invariant_trusted(env, lock));
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_fits_into != NULL &&
+ !slice->cls_ops->clo_fits_into(env, slice, need, io))
+ return 0;
+ }
+ return 1;
+}
+
+static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
+ struct cl_object *obj,
+ const struct cl_io *io,
+ const struct cl_lock_descr *need)
+{
+ struct cl_lock *lock;
+ struct cl_object_header *head;
+
+ head = cl_object_header(obj);
+ LINVRNT(spin_is_locked(&head->coh_lock_guard));
+ CS_LOCK_INC(obj, lookup);
+ list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
+ int matched;
+
+ matched = cl_lock_ext_match(&lock->cll_descr, need) &&
+ lock->cll_state < CLS_FREEING &&
+ lock->cll_error == 0 &&
+ !(lock->cll_flags & CLF_CANCELLED) &&
+ cl_lock_fits_into(env, lock, need, io);
+ CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
+ PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
+ matched);
+ if (matched) {
+ cl_lock_get_trust(lock);
+ CS_LOCK_INC(obj, hit);
+ return lock;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Returns a lock matching description \a need.
+ *
+ * This is the main entry point into the cl_lock caching interface. First, a
+ * cache (implemented as a per-object linked list) is consulted. If lock is
+ * found there, it is returned immediately. Otherwise new lock is allocated
+ * and returned. In any case, additional reference to lock is acquired.
+ *
+ * \see cl_object_find(), cl_page_find()
+ */
+static struct cl_lock *cl_lock_find(const struct lu_env *env,
+ const struct cl_io *io,
+ const struct cl_lock_descr *need)
+{
+ struct cl_object_header *head;
+ struct cl_object *obj;
+ struct cl_lock *lock;
+
+ obj = need->cld_obj;
+ head = cl_object_header(obj);
+
+ spin_lock(&head->coh_lock_guard);
+ lock = cl_lock_lookup(env, obj, io, need);
+ spin_unlock(&head->coh_lock_guard);
+
+ if (lock == NULL) {
+ lock = cl_lock_alloc(env, obj, io, need);
+ if (!IS_ERR(lock)) {
+ struct cl_lock *ghost;
+
+ spin_lock(&head->coh_lock_guard);
+ ghost = cl_lock_lookup(env, obj, io, need);
+ if (ghost == NULL) {
+ list_add_tail(&lock->cll_linkage,
+ &head->coh_locks);
+ spin_unlock(&head->coh_lock_guard);
+ CS_LOCK_INC(obj, busy);
+ } else {
+ spin_unlock(&head->coh_lock_guard);
+ /*
+ * Other threads can acquire references to the
+ * top-lock through its sub-locks. Hence, it
+ * cannot be cl_lock_free()-ed immediately.
+ */
+ cl_lock_finish(env, lock);
+ lock = ghost;
+ }
+ }
+ }
+ return lock;
+}
+
+/**
+ * Returns existing lock matching given description. This is similar to
+ * cl_lock_find() except that no new lock is created, and returned lock is
+ * guaranteed to be in enum cl_lock_state::CLS_HELD state.
+ */
+struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
+ const struct cl_lock_descr *need,
+ const char *scope, const void *source)
+{
+ struct cl_object_header *head;
+ struct cl_object *obj;
+ struct cl_lock *lock;
+
+ obj = need->cld_obj;
+ head = cl_object_header(obj);
+
+ do {
+ spin_lock(&head->coh_lock_guard);
+ lock = cl_lock_lookup(env, obj, io, need);
+ spin_unlock(&head->coh_lock_guard);
+ if (lock == NULL)
+ return NULL;
+
+ cl_lock_mutex_get(env, lock);
+ if (lock->cll_state == CLS_INTRANSIT)
+ /* Don't care return value. */
+ cl_lock_state_wait(env, lock);
+ if (lock->cll_state == CLS_FREEING) {
+ cl_lock_mutex_put(env, lock);
+ cl_lock_put(env, lock);
+ lock = NULL;
+ }
+ } while (lock == NULL);
+
+ cl_lock_hold_add(env, lock, scope, source);
+ cl_lock_user_add(env, lock);
+ if (lock->cll_state == CLS_CACHED)
+ cl_use_try(env, lock, 1);
+ if (lock->cll_state == CLS_HELD) {
+ cl_lock_mutex_put(env, lock);
+ cl_lock_lockdep_acquire(env, lock, 0);
+ cl_lock_put(env, lock);
+ } else {
+ cl_unuse_try(env, lock);
+ cl_lock_unhold(env, lock, scope, source);
+ cl_lock_mutex_put(env, lock);
+ cl_lock_put(env, lock);
+ lock = NULL;
+ }
+
+ return lock;
+}
+EXPORT_SYMBOL(cl_lock_peek);
+
+/**
+ * Returns a slice within a lock, corresponding to the given layer in the
+ * device stack.
+ *
+ * \see cl_page_at()
+ */
+const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
+ const struct lu_device_type *dtype)
+{
+ const struct cl_lock_slice *slice;
+
+ LINVRNT(cl_lock_invariant_trusted(NULL, lock));
+
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
+ return slice;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(cl_lock_at);
+
+static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
+{
+ struct cl_thread_counters *counters;
+
+ counters = cl_lock_counters(env, lock);
+ lock->cll_depth++;
+ counters->ctc_nr_locks_locked++;
+ lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
+ cl_lock_trace(D_TRACE, env, "got mutex", lock);
+}
+
+/**
+ * Locks cl_lock object.
+ *
+ * This is used to manipulate cl_lock fields, and to serialize state
+ * transitions in the lock state machine.
+ *
+ * \post cl_lock_is_mutexed(lock)
+ *
+ * \see cl_lock_mutex_put()
+ */
+void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
+{
+ LINVRNT(cl_lock_invariant(env, lock));
+
+ if (lock->cll_guarder == current) {
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(lock->cll_depth > 0);
+ } else {
+ struct cl_object_header *hdr;
+ struct cl_thread_info *info;
+ int i;
+
+ LINVRNT(lock->cll_guarder != current);
+ hdr = cl_object_header(lock->cll_descr.cld_obj);
+ /*
+ * Check that mutices are taken in the bottom-to-top order.
+ */
+ info = cl_env_info(env);
+ for (i = 0; i < hdr->coh_nesting; ++i)
+ LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
+ mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
+ lock->cll_guarder = current;
+ LINVRNT(lock->cll_depth == 0);
+ }
+ cl_lock_mutex_tail(env, lock);
+}
+EXPORT_SYMBOL(cl_lock_mutex_get);
+
+/**
+ * Try-locks cl_lock object.
+ *
+ * \retval 0 \a lock was successfully locked
+ *
+ * \retval -EBUSY \a lock cannot be locked right now
+ *
+ * \post ergo(result == 0, cl_lock_is_mutexed(lock))
+ *
+ * \see cl_lock_mutex_get()
+ */
+int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
+{
+ int result;
+
+ LINVRNT(cl_lock_invariant_trusted(env, lock));
+
+ result = 0;
+ if (lock->cll_guarder == current) {
+ LINVRNT(lock->cll_depth > 0);
+ cl_lock_mutex_tail(env, lock);
+ } else if (mutex_trylock(&lock->cll_guard)) {
+ LINVRNT(lock->cll_depth == 0);
+ lock->cll_guarder = current;
+ cl_lock_mutex_tail(env, lock);
+ } else
+ result = -EBUSY;
+ return result;
+}
+EXPORT_SYMBOL(cl_lock_mutex_try);
+
+/**
+ {* Unlocks cl_lock object.
+ *
+ * \pre cl_lock_is_mutexed(lock)
+ *
+ * \see cl_lock_mutex_get()
+ */
+void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
+{
+ struct cl_thread_counters *counters;
+
+ LINVRNT(cl_lock_invariant(env, lock));
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(lock->cll_guarder == current);
+ LINVRNT(lock->cll_depth > 0);
+
+ counters = cl_lock_counters(env, lock);
+ LINVRNT(counters->ctc_nr_locks_locked > 0);
+
+ cl_lock_trace(D_TRACE, env, "put mutex", lock);
+ lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
+ counters->ctc_nr_locks_locked--;
+ if (--lock->cll_depth == 0) {
+ lock->cll_guarder = NULL;
+ mutex_unlock(&lock->cll_guard);
+ }
+}
+EXPORT_SYMBOL(cl_lock_mutex_put);
+
+/**
+ * Returns true iff lock's mutex is owned by the current thread.
+ */
+int cl_lock_is_mutexed(struct cl_lock *lock)
+{
+ return lock->cll_guarder == current;
+}
+EXPORT_SYMBOL(cl_lock_is_mutexed);
+
+/**
+ * Returns number of cl_lock mutices held by the current thread (environment).
+ */
+int cl_lock_nr_mutexed(const struct lu_env *env)
+{
+ struct cl_thread_info *info;
+ int i;
+ int locked;
+
+ /*
+ * NOTE: if summation across all nesting levels (currently 2) proves
+ * too expensive, a summary counter can be added to
+ * struct cl_thread_info.
+ */
+ info = cl_env_info(env);
+ for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
+ locked += info->clt_counters[i].ctc_nr_locks_locked;
+ return locked;
+}
+EXPORT_SYMBOL(cl_lock_nr_mutexed);
+
+static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
+{
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ if (!(lock->cll_flags & CLF_CANCELLED)) {
+ const struct cl_lock_slice *slice;
+
+ lock->cll_flags |= CLF_CANCELLED;
+ list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
+ if (slice->cls_ops->clo_cancel != NULL)
+ slice->cls_ops->clo_cancel(env, slice);
+ }
+ }
+}
+
+static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
+{
+ struct cl_object_header *head;
+ const struct cl_lock_slice *slice;
+
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+
+ if (lock->cll_state < CLS_FREEING) {
+ LASSERT(lock->cll_state != CLS_INTRANSIT);
+ cl_lock_state_set(env, lock, CLS_FREEING);
+
+ head = cl_object_header(lock->cll_descr.cld_obj);
+
+ spin_lock(&head->coh_lock_guard);
+ list_del_init(&lock->cll_linkage);
+ spin_unlock(&head->coh_lock_guard);
+
+ /*
+ * From now on, no new references to this lock can be acquired
+ * by cl_lock_lookup().
+ */
+ list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
+ if (slice->cls_ops->clo_delete != NULL)
+ slice->cls_ops->clo_delete(env, slice);
+ }
+ /*
+ * From now on, no new references to this lock can be acquired
+ * by layer-specific means (like a pointer from struct
+ * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
+ * lov).
+ *
+ * Lock will be finally freed in cl_lock_put() when last of
+ * existing references goes away.
+ */
+ }
+}
+
+/**
+ * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
+ * top-lock (nesting == 0) accounts for this modification in the per-thread
+ * debugging counters. Sub-lock holds can be released by a thread different
+ * from one that acquired it.
+ */
+static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
+ int delta)
+{
+ struct cl_thread_counters *counters;
+ enum clt_nesting_level nesting;
+
+ lock->cll_holds += delta;
+ nesting = cl_lock_nesting(lock);
+ if (nesting == CNL_TOP) {
+ counters = &cl_env_info(env)->clt_counters[CNL_TOP];
+ counters->ctc_nr_held += delta;
+ LASSERT(counters->ctc_nr_held >= 0);
+ }
+}
+
+/**
+ * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
+ * cl_lock_hold_mod() for the explanation of the debugging code.
+ */
+static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
+ int delta)
+{
+ struct cl_thread_counters *counters;
+ enum clt_nesting_level nesting;
+
+ lock->cll_users += delta;
+ nesting = cl_lock_nesting(lock);
+ if (nesting == CNL_TOP) {
+ counters = &cl_env_info(env)->clt_counters[CNL_TOP];
+ counters->ctc_nr_used += delta;
+ LASSERT(counters->ctc_nr_used >= 0);
+ }
+}
+
+void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source)
+{
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_holds > 0);
+
+ cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
+ lu_ref_del(&lock->cll_holders, scope, source);
+ cl_lock_hold_mod(env, lock, -1);
+ if (lock->cll_holds == 0) {
+ CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
+ if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
+ lock->cll_descr.cld_mode == CLM_GROUP ||
+ lock->cll_state != CLS_CACHED)
+ /*
+ * If lock is still phantom or grouplock when user is
+ * done with it---destroy the lock.
+ */
+ lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
+ if (lock->cll_flags & CLF_CANCELPEND) {
+ lock->cll_flags &= ~CLF_CANCELPEND;
+ cl_lock_cancel0(env, lock);
+ }
+ if (lock->cll_flags & CLF_DOOMED) {
+ /* no longer doomed: it's dead... Jim. */
+ lock->cll_flags &= ~CLF_DOOMED;
+ cl_lock_delete0(env, lock);
+ }
+ }
+}
+EXPORT_SYMBOL(cl_lock_hold_release);
+
+/**
+ * Waits until lock state is changed.
+ *
+ * This function is called with cl_lock mutex locked, atomically releases
+ * mutex and goes to sleep, waiting for a lock state change (signaled by
+ * cl_lock_signal()), and re-acquires the mutex before return.
+ *
+ * This function is used to wait until lock state machine makes some progress
+ * and to emulate synchronous operations on top of asynchronous lock
+ * interface.
+ *
+ * \retval -EINTR wait was interrupted
+ *
+ * \retval 0 wait wasn't interrupted
+ *
+ * \pre cl_lock_is_mutexed(lock)
+ *
+ * \see cl_lock_signal()
+ */
+int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
+{
+ wait_queue_t waiter;
+ sigset_t blocked;
+ int result;
+
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_depth == 1);
+ LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
+
+ cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
+ result = lock->cll_error;
+ if (result == 0) {
+ /* To avoid being interrupted by the 'non-fatal' signals
+ * (SIGCHLD, for instance), we'd block them temporarily.
+ * LU-305 */
+ blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ init_waitqueue_entry_current(&waiter);
+ add_wait_queue(&lock->cll_wq, &waiter);
+ set_current_state(TASK_INTERRUPTIBLE);
+ cl_lock_mutex_put(env, lock);
+
+ LASSERT(cl_lock_nr_mutexed(env) == 0);
+
+ /* Returning ERESTARTSYS instead of EINTR so syscalls
+ * can be restarted if signals are pending here */
+ result = -ERESTARTSYS;
+ if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
+ waitq_wait(&waiter, TASK_INTERRUPTIBLE);
+ if (!cfs_signal_pending())
+ result = 0;
+ }
+
+ cl_lock_mutex_get(env, lock);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&lock->cll_wq, &waiter);
+
+ /* Restore old blocked signals */
+ cfs_restore_sigs(blocked);
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_lock_state_wait);
+
+static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
+ enum cl_lock_state state)
+{
+ const struct cl_lock_slice *slice;
+
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
+ if (slice->cls_ops->clo_state != NULL)
+ slice->cls_ops->clo_state(env, slice, state);
+ wake_up_all(&lock->cll_wq);
+}
+
+/**
+ * Notifies waiters that lock state changed.
+ *
+ * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
+ * layers about state change by calling cl_lock_operations::clo_state()
+ * top-to-bottom.
+ */
+void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
+{
+ cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
+ cl_lock_state_signal(env, lock, lock->cll_state);
+}
+EXPORT_SYMBOL(cl_lock_signal);
+
+/**
+ * Changes lock state.
+ *
+ * This function is invoked to notify layers that lock state changed, possible
+ * as a result of an asynchronous event such as call-back reception.
+ *
+ * \post lock->cll_state == state
+ *
+ * \see cl_lock_operations::clo_state()
+ */
+void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
+ enum cl_lock_state state)
+{
+ LASSERT(lock->cll_state <= state ||
+ (lock->cll_state == CLS_CACHED &&
+ (state == CLS_HELD || /* lock found in cache */
+ state == CLS_NEW || /* sub-lock canceled */
+ state == CLS_INTRANSIT)) ||
+ /* lock is in transit state */
+ lock->cll_state == CLS_INTRANSIT);
+
+ if (lock->cll_state != state) {
+ CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
+ CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
+
+ cl_lock_state_signal(env, lock, state);
+ lock->cll_state = state;
+ }
+}
+EXPORT_SYMBOL(cl_lock_state_set);
+
+static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
+{
+ const struct cl_lock_slice *slice;
+ int result;
+
+ do {
+ result = 0;
+
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_state == CLS_INTRANSIT);
+
+ result = -ENOSYS;
+ list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
+ if (slice->cls_ops->clo_unuse != NULL) {
+ result = slice->cls_ops->clo_unuse(env, slice);
+ if (result != 0)
+ break;
+ }
+ }
+ LASSERT(result != -ENOSYS);
+ } while (result == CLO_REPEAT);
+
+ return result;
+}
+
+/**
+ * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
+ * cl_lock_operations::clo_use() top-to-bottom to notify layers.
+ * @atomic = 1, it must unuse the lock to recovery the lock to keep the
+ * use process atomic
+ */
+int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
+{
+ const struct cl_lock_slice *slice;
+ int result;
+ enum cl_lock_state state;
+
+ cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
+
+ LASSERT(lock->cll_state == CLS_CACHED);
+ if (lock->cll_error)
+ return lock->cll_error;
+
+ result = -ENOSYS;
+ state = cl_lock_intransit(env, lock);
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_use != NULL) {
+ result = slice->cls_ops->clo_use(env, slice);
+ if (result != 0)
+ break;
+ }
+ }
+ LASSERT(result != -ENOSYS);
+
+ LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
+ lock->cll_state);
+
+ if (result == 0) {
+ state = CLS_HELD;
+ } else {
+ if (result == -ESTALE) {
+ /*
+ * ESTALE means sublock being cancelled
+ * at this time, and set lock state to
+ * be NEW here and ask the caller to repeat.
+ */
+ state = CLS_NEW;
+ result = CLO_REPEAT;
+ }
+
+ /* @atomic means back-off-on-failure. */
+ if (atomic) {
+ int rc;
+ rc = cl_unuse_try_internal(env, lock);
+ /* Vet the results. */
+ if (rc < 0 && result > 0)
+ result = rc;
+ }
+
+ }
+ cl_lock_extransit(env, lock, state);
+ return result;
+}
+EXPORT_SYMBOL(cl_use_try);
+
+/**
+ * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
+ * top-to-bottom.
+ */
+static int cl_enqueue_kick(const struct lu_env *env,
+ struct cl_lock *lock,
+ struct cl_io *io, __u32 flags)
+{
+ int result;
+ const struct cl_lock_slice *slice;
+
+ result = -ENOSYS;
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_enqueue != NULL) {
+ result = slice->cls_ops->clo_enqueue(env,
+ slice, io, flags);
+ if (result != 0)
+ break;
+ }
+ }
+ LASSERT(result != -ENOSYS);
+ return result;
+}
+
+/**
+ * Tries to enqueue a lock.
+ *
+ * This function is called repeatedly by cl_enqueue() until either lock is
+ * enqueued, or error occurs. This function does not block waiting for
+ * networking communication to complete.
+ *
+ * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
+ * lock->cll_state == CLS_HELD)
+ *
+ * \see cl_enqueue() cl_lock_operations::clo_enqueue()
+ * \see cl_lock_state::CLS_ENQUEUED
+ */
+int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_io *io, __u32 flags)
+{
+ int result;
+
+ cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
+ do {
+ LINVRNT(cl_lock_is_mutexed(lock));
+
+ result = lock->cll_error;
+ if (result != 0)
+ break;
+
+ switch (lock->cll_state) {
+ case CLS_NEW:
+ cl_lock_state_set(env, lock, CLS_QUEUING);
+ /* fall-through */
+ case CLS_QUEUING:
+ /* kick layers. */
+ result = cl_enqueue_kick(env, lock, io, flags);
+ /* For AGL case, the cl_lock::cll_state may
+ * become CLS_HELD already. */
+ if (result == 0 && lock->cll_state == CLS_QUEUING)
+ cl_lock_state_set(env, lock, CLS_ENQUEUED);
+ break;
+ case CLS_INTRANSIT:
+ LASSERT(cl_lock_is_intransit(lock));
+ result = CLO_WAIT;
+ break;
+ case CLS_CACHED:
+ /* yank lock from the cache. */
+ result = cl_use_try(env, lock, 0);
+ break;
+ case CLS_ENQUEUED:
+ case CLS_HELD:
+ result = 0;
+ break;
+ default:
+ case CLS_FREEING:
+ /*
+ * impossible, only held locks with increased
+ * ->cll_holds can be enqueued, and they cannot be
+ * freed.
+ */
+ LBUG();
+ }
+ } while (result == CLO_REPEAT);
+ return result;
+}
+EXPORT_SYMBOL(cl_enqueue_try);
+
+/**
+ * Cancel the conflicting lock found during previous enqueue.
+ *
+ * \retval 0 conflicting lock has been canceled.
+ * \retval -ve error code.
+ */
+int cl_lock_enqueue_wait(const struct lu_env *env,
+ struct cl_lock *lock,
+ int keep_mutex)
+{
+ struct cl_lock *conflict;
+ int rc = 0;
+
+ LASSERT(cl_lock_is_mutexed(lock));
+ LASSERT(lock->cll_state == CLS_QUEUING);
+ LASSERT(lock->cll_conflict != NULL);
+
+ conflict = lock->cll_conflict;
+ lock->cll_conflict = NULL;
+
+ cl_lock_mutex_put(env, lock);
+ LASSERT(cl_lock_nr_mutexed(env) == 0);
+
+ cl_lock_mutex_get(env, conflict);
+ cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
+ cl_lock_cancel(env, conflict);
+ cl_lock_delete(env, conflict);
+
+ while (conflict->cll_state != CLS_FREEING) {
+ rc = cl_lock_state_wait(env, conflict);
+ if (rc != 0)
+ break;
+ }
+ cl_lock_mutex_put(env, conflict);
+ lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
+ cl_lock_put(env, conflict);
+
+ if (keep_mutex)
+ cl_lock_mutex_get(env, lock);
+
+ LASSERT(rc <= 0);
+ return rc;
+}
+EXPORT_SYMBOL(cl_lock_enqueue_wait);
+
+static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_io *io, __u32 enqflags)
+{
+ int result;
+
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_holds > 0);
+
+ cl_lock_user_add(env, lock);
+ do {
+ result = cl_enqueue_try(env, lock, io, enqflags);
+ if (result == CLO_WAIT) {
+ if (lock->cll_conflict != NULL)
+ result = cl_lock_enqueue_wait(env, lock, 1);
+ else
+ result = cl_lock_state_wait(env, lock);
+ if (result == 0)
+ continue;
+ }
+ break;
+ } while (1);
+ if (result != 0)
+ cl_unuse_try(env, lock);
+ LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
+ lock->cll_state == CLS_ENQUEUED ||
+ lock->cll_state == CLS_HELD));
+ return result;
+}
+
+/**
+ * Enqueues a lock.
+ *
+ * \pre current thread or io owns a hold on lock.
+ *
+ * \post ergo(result == 0, lock->users increased)
+ * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
+ * lock->cll_state == CLS_HELD)
+ */
+int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_io *io, __u32 enqflags)
+{
+ int result;
+
+ cl_lock_lockdep_acquire(env, lock, enqflags);
+ cl_lock_mutex_get(env, lock);
+ result = cl_enqueue_locked(env, lock, io, enqflags);
+ cl_lock_mutex_put(env, lock);
+ if (result != 0)
+ cl_lock_lockdep_release(env, lock);
+ LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
+ lock->cll_state == CLS_HELD));
+ return result;
+}
+EXPORT_SYMBOL(cl_enqueue);
+
+/**
+ * Tries to unlock a lock.
+ *
+ * This function is called to release underlying resource:
+ * 1. for top lock, the resource is sublocks it held;
+ * 2. for sublock, the resource is the reference to dlmlock.
+ *
+ * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
+ *
+ * \see cl_unuse() cl_lock_operations::clo_unuse()
+ * \see cl_lock_state::CLS_CACHED
+ */
+int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
+{
+ int result;
+ enum cl_lock_state state = CLS_NEW;
+
+ cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
+
+ if (lock->cll_users > 1) {
+ cl_lock_user_del(env, lock);
+ return 0;
+ }
+
+ /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
+ * underlying resources. */
+ if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
+ cl_lock_user_del(env, lock);
+ return 0;
+ }
+
+ /*
+ * New lock users (->cll_users) are not protecting unlocking
+ * from proceeding. From this point, lock eventually reaches
+ * CLS_CACHED, is reinitialized to CLS_NEW or fails into
+ * CLS_FREEING.
+ */
+ state = cl_lock_intransit(env, lock);
+
+ result = cl_unuse_try_internal(env, lock);
+ LASSERT(lock->cll_state == CLS_INTRANSIT);
+ LASSERT(result != CLO_WAIT);
+ cl_lock_user_del(env, lock);
+ if (result == 0 || result == -ESTALE) {
+ /*
+ * Return lock back to the cache. This is the only
+ * place where lock is moved into CLS_CACHED state.
+ *
+ * If one of ->clo_unuse() methods returned -ESTALE, lock
+ * cannot be placed into cache and has to be
+ * re-initialized. This happens e.g., when a sub-lock was
+ * canceled while unlocking was in progress.
+ */
+ if (state == CLS_HELD && result == 0)
+ state = CLS_CACHED;
+ else
+ state = CLS_NEW;
+ cl_lock_extransit(env, lock, state);
+
+ /*
+ * Hide -ESTALE error.
+ * If the lock is a glimpse lock, and it has multiple
+ * stripes. Assuming that one of its sublock returned -ENAVAIL,
+ * and other sublocks are matched write locks. In this case,
+ * we can't set this lock to error because otherwise some of
+ * its sublocks may not be canceled. This causes some dirty
+ * pages won't be written to OSTs. -jay
+ */
+ result = 0;
+ } else {
+ CERROR("result = %d, this is unlikely!\n", result);
+ state = CLS_NEW;
+ cl_lock_extransit(env, lock, state);
+ }
+ return result ?: lock->cll_error;
+}
+EXPORT_SYMBOL(cl_unuse_try);
+
+static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
+{
+ int result;
+
+ result = cl_unuse_try(env, lock);
+ if (result)
+ CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
+}
+
+/**
+ * Unlocks a lock.
+ */
+void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
+{
+ cl_lock_mutex_get(env, lock);
+ cl_unuse_locked(env, lock);
+ cl_lock_mutex_put(env, lock);
+ cl_lock_lockdep_release(env, lock);
+}
+EXPORT_SYMBOL(cl_unuse);
+
+/**
+ * Tries to wait for a lock.
+ *
+ * This function is called repeatedly by cl_wait() until either lock is
+ * granted, or error occurs. This function does not block waiting for network
+ * communication to complete.
+ *
+ * \see cl_wait() cl_lock_operations::clo_wait()
+ * \see cl_lock_state::CLS_HELD
+ */
+int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
+{
+ const struct cl_lock_slice *slice;
+ int result;
+
+ cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
+ do {
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERTF(lock->cll_state == CLS_QUEUING ||
+ lock->cll_state == CLS_ENQUEUED ||
+ lock->cll_state == CLS_HELD ||
+ lock->cll_state == CLS_INTRANSIT,
+ "lock state: %d\n", lock->cll_state);
+ LASSERT(lock->cll_users > 0);
+ LASSERT(lock->cll_holds > 0);
+
+ result = lock->cll_error;
+ if (result != 0)
+ break;
+
+ if (cl_lock_is_intransit(lock)) {
+ result = CLO_WAIT;
+ break;
+ }
+
+ if (lock->cll_state == CLS_HELD)
+ /* nothing to do */
+ break;
+
+ result = -ENOSYS;
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_wait != NULL) {
+ result = slice->cls_ops->clo_wait(env, slice);
+ if (result != 0)
+ break;
+ }
+ }
+ LASSERT(result != -ENOSYS);
+ if (result == 0) {
+ LASSERT(lock->cll_state != CLS_INTRANSIT);
+ cl_lock_state_set(env, lock, CLS_HELD);
+ }
+ } while (result == CLO_REPEAT);
+ return result;
+}
+EXPORT_SYMBOL(cl_wait_try);
+
+/**
+ * Waits until enqueued lock is granted.
+ *
+ * \pre current thread or io owns a hold on the lock
+ * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
+ * lock->cll_state == CLS_HELD)
+ *
+ * \post ergo(result == 0, lock->cll_state == CLS_HELD)
+ */
+int cl_wait(const struct lu_env *env, struct cl_lock *lock)
+{
+ int result;
+
+ cl_lock_mutex_get(env, lock);
+
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
+ "Wrong state %d \n", lock->cll_state);
+ LASSERT(lock->cll_holds > 0);
+
+ do {
+ result = cl_wait_try(env, lock);
+ if (result == CLO_WAIT) {
+ result = cl_lock_state_wait(env, lock);
+ if (result == 0)
+ continue;
+ }
+ break;
+ } while (1);
+ if (result < 0) {
+ cl_unuse_try(env, lock);
+ cl_lock_lockdep_release(env, lock);
+ }
+ cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
+ cl_lock_mutex_put(env, lock);
+ LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
+ return result;
+}
+EXPORT_SYMBOL(cl_wait);
+
+/**
+ * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
+ * value.
+ */
+unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
+{
+ const struct cl_lock_slice *slice;
+ unsigned long pound;
+ unsigned long ounce;
+
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+
+ pound = 0;
+ list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_weigh != NULL) {
+ ounce = slice->cls_ops->clo_weigh(env, slice);
+ pound += ounce;
+ if (pound < ounce) /* over-weight^Wflow */
+ pound = ~0UL;
+ }
+ }
+ return pound;
+}
+EXPORT_SYMBOL(cl_lock_weigh);
+
+/**
+ * Notifies layers that lock description changed.
+ *
+ * The server can grant client a lock different from one that was requested
+ * (e.g., larger in extent). This method is called when actually granted lock
+ * description becomes known to let layers to accommodate for changed lock
+ * description.
+ *
+ * \see cl_lock_operations::clo_modify()
+ */
+int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_lock_descr *desc)
+{
+ const struct cl_lock_slice *slice;
+ struct cl_object *obj = lock->cll_descr.cld_obj;
+ struct cl_object_header *hdr = cl_object_header(obj);
+ int result;
+
+ cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
+ /* don't allow object to change */
+ LASSERT(obj == desc->cld_obj);
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+
+ list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_modify != NULL) {
+ result = slice->cls_ops->clo_modify(env, slice, desc);
+ if (result != 0)
+ return result;
+ }
+ }
+ CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
+ PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
+ /*
+ * Just replace description in place. Nothing more is needed for
+ * now. If locks were indexed according to their extent and/or mode,
+ * that index would have to be updated here.
+ */
+ spin_lock(&hdr->coh_lock_guard);
+ lock->cll_descr = *desc;
+ spin_unlock(&hdr->coh_lock_guard);
+ return 0;
+}
+EXPORT_SYMBOL(cl_lock_modify);
+
+/**
+ * Initializes lock closure with a given origin.
+ *
+ * \see cl_lock_closure
+ */
+void cl_lock_closure_init(const struct lu_env *env,
+ struct cl_lock_closure *closure,
+ struct cl_lock *origin, int wait)
+{
+ LINVRNT(cl_lock_is_mutexed(origin));
+ LINVRNT(cl_lock_invariant(env, origin));
+
+ INIT_LIST_HEAD(&closure->clc_list);
+ closure->clc_origin = origin;
+ closure->clc_wait = wait;
+ closure->clc_nr = 0;
+}
+EXPORT_SYMBOL(cl_lock_closure_init);
+
+/**
+ * Builds a closure of \a lock.
+ *
+ * Building of a closure consists of adding initial lock (\a lock) into it,
+ * and calling cl_lock_operations::clo_closure() methods of \a lock. These
+ * methods might call cl_lock_closure_build() recursively again, adding more
+ * locks to the closure, etc.
+ *
+ * \see cl_lock_closure
+ */
+int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_lock_closure *closure)
+{
+ const struct cl_lock_slice *slice;
+ int result;
+
+ LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
+ LINVRNT(cl_lock_invariant(env, closure->clc_origin));
+
+ result = cl_lock_enclosure(env, lock, closure);
+ if (result == 0) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_closure != NULL) {
+ result = slice->cls_ops->clo_closure(env, slice,
+ closure);
+ if (result != 0)
+ break;
+ }
+ }
+ }
+ if (result != 0)
+ cl_lock_disclosure(env, closure);
+ return result;
+}
+EXPORT_SYMBOL(cl_lock_closure_build);
+
+/**
+ * Adds new lock to a closure.
+ *
+ * Try-locks \a lock and if succeeded, adds it to the closure (never more than
+ * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
+ * until next try-lock is likely to succeed.
+ */
+int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_lock_closure *closure)
+{
+ int result = 0;
+
+ cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
+ if (!cl_lock_mutex_try(env, lock)) {
+ /*
+ * If lock->cll_inclosure is not empty, lock is already in
+ * this closure.
+ */
+ if (list_empty(&lock->cll_inclosure)) {
+ cl_lock_get_trust(lock);
+ lu_ref_add(&lock->cll_reference, "closure", closure);
+ list_add(&lock->cll_inclosure, &closure->clc_list);
+ closure->clc_nr++;
+ } else
+ cl_lock_mutex_put(env, lock);
+ result = 0;
+ } else {
+ cl_lock_disclosure(env, closure);
+ if (closure->clc_wait) {
+ cl_lock_get_trust(lock);
+ lu_ref_add(&lock->cll_reference, "closure-w", closure);
+ cl_lock_mutex_put(env, closure->clc_origin);
+
+ LASSERT(cl_lock_nr_mutexed(env) == 0);
+ cl_lock_mutex_get(env, lock);
+ cl_lock_mutex_put(env, lock);
+
+ cl_lock_mutex_get(env, closure->clc_origin);
+ lu_ref_del(&lock->cll_reference, "closure-w", closure);
+ cl_lock_put(env, lock);
+ }
+ result = CLO_REPEAT;
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_lock_enclosure);
+
+/** Releases mutices of enclosed locks. */
+void cl_lock_disclosure(const struct lu_env *env,
+ struct cl_lock_closure *closure)
+{
+ struct cl_lock *scan;
+ struct cl_lock *temp;
+
+ cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
+ list_for_each_entry_safe(scan, temp, &closure->clc_list,
+ cll_inclosure){
+ list_del_init(&scan->cll_inclosure);
+ cl_lock_mutex_put(env, scan);
+ lu_ref_del(&scan->cll_reference, "closure", closure);
+ cl_lock_put(env, scan);
+ closure->clc_nr--;
+ }
+ LASSERT(closure->clc_nr == 0);
+}
+EXPORT_SYMBOL(cl_lock_disclosure);
+
+/** Finalizes a closure. */
+void cl_lock_closure_fini(struct cl_lock_closure *closure)
+{
+ LASSERT(closure->clc_nr == 0);
+ LASSERT(list_empty(&closure->clc_list));
+}
+EXPORT_SYMBOL(cl_lock_closure_fini);
+
+/**
+ * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
+ * destroyed, then destroy the lock. If there are holds on the lock, postpone
+ * destruction until all holds are released. This is called when a decision is
+ * made to destroy the lock in the future. E.g., when a blocking AST is
+ * received on it, or fatal communication error happens.
+ *
+ * Caller must have a reference on this lock to prevent a situation, when
+ * deleted lock lingers in memory for indefinite time, because nobody calls
+ * cl_lock_put() to finish it.
+ *
+ * \pre atomic_read(&lock->cll_ref) > 0
+ * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
+ * cl_lock_nr_mutexed(env) == 1)
+ * [i.e., if a top-lock is deleted, mutices of no other locks can be
+ * held, as deletion of sub-locks might require releasing a top-lock
+ * mutex]
+ *
+ * \see cl_lock_operations::clo_delete()
+ * \see cl_lock::cll_holds
+ */
+void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
+{
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
+ cl_lock_nr_mutexed(env) == 1));
+
+ cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
+ if (lock->cll_holds == 0)
+ cl_lock_delete0(env, lock);
+ else
+ lock->cll_flags |= CLF_DOOMED;
+}
+EXPORT_SYMBOL(cl_lock_delete);
+
+/**
+ * Mark lock as irrecoverably failed, and mark it for destruction. This
+ * happens when, e.g., server fails to grant a lock to us, or networking
+ * time-out happens.
+ *
+ * \pre atomic_read(&lock->cll_ref) > 0
+ *
+ * \see clo_lock_delete()
+ * \see cl_lock::cll_holds
+ */
+void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
+{
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+
+ if (lock->cll_error == 0 && error != 0) {
+ cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
+ lock->cll_error = error;
+ cl_lock_signal(env, lock);
+ cl_lock_cancel(env, lock);
+ cl_lock_delete(env, lock);
+ }
+}
+EXPORT_SYMBOL(cl_lock_error);
+
+/**
+ * Cancels this lock. Notifies layers
+ * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
+ * there are holds on the lock, postpone cancellation until
+ * all holds are released.
+ *
+ * Cancellation notification is delivered to layers at most once.
+ *
+ * \see cl_lock_operations::clo_cancel()
+ * \see cl_lock::cll_holds
+ */
+void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
+{
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+
+ cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
+ if (lock->cll_holds == 0)
+ cl_lock_cancel0(env, lock);
+ else
+ lock->cll_flags |= CLF_CANCELPEND;
+}
+EXPORT_SYMBOL(cl_lock_cancel);
+
+/**
+ * Finds an existing lock covering given index and optionally different from a
+ * given \a except lock.
+ */
+struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
+ struct cl_object *obj, pgoff_t index,
+ struct cl_lock *except,
+ int pending, int canceld)
+{
+ struct cl_object_header *head;
+ struct cl_lock *scan;
+ struct cl_lock *lock;
+ struct cl_lock_descr *need;
+
+ head = cl_object_header(obj);
+ need = &cl_env_info(env)->clt_descr;
+ lock = NULL;
+
+ need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
+ * not PHANTOM */
+ need->cld_start = need->cld_end = index;
+ need->cld_enq_flags = 0;
+
+ spin_lock(&head->coh_lock_guard);
+ /* It is fine to match any group lock since there could be only one
+ * with a uniq gid and it conflicts with all other lock modes too */
+ list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
+ if (scan != except &&
+ (scan->cll_descr.cld_mode == CLM_GROUP ||
+ cl_lock_ext_match(&scan->cll_descr, need)) &&
+ scan->cll_state >= CLS_HELD &&
+ scan->cll_state < CLS_FREEING &&
+ /*
+ * This check is racy as the lock can be canceled right
+ * after it is done, but this is fine, because page exists
+ * already.
+ */
+ (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
+ (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
+ /* Don't increase cs_hit here since this
+ * is just a helper function. */
+ cl_lock_get_trust(scan);
+ lock = scan;
+ break;
+ }
+ }
+ spin_unlock(&head->coh_lock_guard);
+ return lock;
+}
+EXPORT_SYMBOL(cl_lock_at_pgoff);
+
+/**
+ * Calculate the page offset at the layer of @lock.
+ * At the time of this writing, @page is top page and @lock is sub lock.
+ */
+static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
+{
+ struct lu_device_type *dtype;
+ const struct cl_page_slice *slice;
+
+ dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
+ slice = cl_page_at(page, dtype);
+ LASSERT(slice != NULL);
+ return slice->cpl_page->cp_index;
+}
+
+/**
+ * Check if page @page is covered by an extra lock or discard it.
+ */
+static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, void *cbdata)
+{
+ struct cl_thread_info *info = cl_env_info(env);
+ struct cl_lock *lock = cbdata;
+ pgoff_t index = pgoff_at_lock(page, lock);
+
+ if (index >= info->clt_fn_index) {
+ struct cl_lock *tmp;
+
+ /* refresh non-overlapped index */
+ tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
+ lock, 1, 0);
+ if (tmp != NULL) {
+ /* Cache the first-non-overlapped index so as to skip
+ * all pages within [index, clt_fn_index). This
+ * is safe because if tmp lock is canceled, it will
+ * discard these pages. */
+ info->clt_fn_index = tmp->cll_descr.cld_end + 1;
+ if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
+ info->clt_fn_index = CL_PAGE_EOF;
+ cl_lock_put(env, tmp);
+ } else if (cl_page_own(env, io, page) == 0) {
+ /* discard the page */
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ }
+ }
+
+ info->clt_next_index = index + 1;
+ return CLP_GANG_OKAY;
+}
+
+static int discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, void *cbdata)
+{
+ struct cl_thread_info *info = cl_env_info(env);
+ struct cl_lock *lock = cbdata;
+
+ LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
+ KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
+ !PageWriteback(cl_page_vmpage(env, page))));
+ KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
+ !PageDirty(cl_page_vmpage(env, page))));
+
+ info->clt_next_index = pgoff_at_lock(page, lock) + 1;
+ if (cl_page_own(env, io, page) == 0) {
+ /* discard the page */
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ }
+
+ return CLP_GANG_OKAY;
+}
+
+/**
+ * Discard pages protected by the given lock. This function traverses radix
+ * tree to find all covering pages and discard them. If a page is being covered
+ * by other locks, it should remain in cache.
+ *
+ * If error happens on any step, the process continues anyway (the reasoning
+ * behind this being that lock cancellation cannot be delayed indefinitely).
+ */
+int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
+{
+ struct cl_thread_info *info = cl_env_info(env);
+ struct cl_io *io = &info->clt_io;
+ struct cl_lock_descr *descr = &lock->cll_descr;
+ cl_page_gang_cb_t cb;
+ int res;
+ int result;
+
+ LINVRNT(cl_lock_invariant(env, lock));
+
+ io->ci_obj = cl_object_top(descr->cld_obj);
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (result != 0)
+ GOTO(out, result);
+
+ cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
+ info->clt_fn_index = info->clt_next_index = descr->cld_start;
+ do {
+ res = cl_page_gang_lookup(env, descr->cld_obj, io,
+ info->clt_next_index, descr->cld_end,
+ cb, (void *)lock);
+ if (info->clt_next_index > descr->cld_end)
+ break;
+
+ if (res == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (res != CLP_GANG_OKAY);
+out:
+ cl_io_fini(env, io);
+ return result;
+}
+EXPORT_SYMBOL(cl_lock_discard_pages);
+
+/**
+ * Eliminate all locks for a given object.
+ *
+ * Caller has to guarantee that no lock is in active use.
+ *
+ * \param cancel when this is set, cl_locks_prune() cancels locks before
+ * destroying.
+ */
+void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
+{
+ struct cl_object_header *head;
+ struct cl_lock *lock;
+
+ head = cl_object_header(obj);
+ /*
+ * If locks are destroyed without cancellation, all pages must be
+ * already destroyed (as otherwise they will be left unprotected).
+ */
+ LASSERT(ergo(!cancel,
+ head->coh_tree.rnode == NULL && head->coh_pages == 0));
+
+ spin_lock(&head->coh_lock_guard);
+ while (!list_empty(&head->coh_locks)) {
+ lock = container_of(head->coh_locks.next,
+ struct cl_lock, cll_linkage);
+ cl_lock_get_trust(lock);
+ spin_unlock(&head->coh_lock_guard);
+ lu_ref_add(&lock->cll_reference, "prune", current);
+
+again:
+ cl_lock_mutex_get(env, lock);
+ if (lock->cll_state < CLS_FREEING) {
+ LASSERT(lock->cll_users <= 1);
+ if (unlikely(lock->cll_users == 1)) {
+ struct l_wait_info lwi = { 0 };
+
+ cl_lock_mutex_put(env, lock);
+ l_wait_event(lock->cll_wq,
+ lock->cll_users == 0,
+ &lwi);
+ goto again;
+ }
+
+ if (cancel)
+ cl_lock_cancel(env, lock);
+ cl_lock_delete(env, lock);
+ }
+ cl_lock_mutex_put(env, lock);
+ lu_ref_del(&lock->cll_reference, "prune", current);
+ cl_lock_put(env, lock);
+ spin_lock(&head->coh_lock_guard);
+ }
+ spin_unlock(&head->coh_lock_guard);
+}
+EXPORT_SYMBOL(cl_locks_prune);
+
+static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
+ const struct cl_io *io,
+ const struct cl_lock_descr *need,
+ const char *scope, const void *source)
+{
+ struct cl_lock *lock;
+
+ while (1) {
+ lock = cl_lock_find(env, io, need);
+ if (IS_ERR(lock))
+ break;
+ cl_lock_mutex_get(env, lock);
+ if (lock->cll_state < CLS_FREEING &&
+ !(lock->cll_flags & CLF_CANCELLED)) {
+ cl_lock_hold_mod(env, lock, +1);
+ lu_ref_add(&lock->cll_holders, scope, source);
+ lu_ref_add(&lock->cll_reference, scope, source);
+ break;
+ }
+ cl_lock_mutex_put(env, lock);
+ cl_lock_put(env, lock);
+ }
+ return lock;
+}
+
+/**
+ * Returns a lock matching \a need description with a reference and a hold on
+ * it.
+ *
+ * This is much like cl_lock_find(), except that cl_lock_hold() additionally
+ * guarantees that lock is not in the CLS_FREEING state on return.
+ */
+struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
+ const struct cl_lock_descr *need,
+ const char *scope, const void *source)
+{
+ struct cl_lock *lock;
+
+ lock = cl_lock_hold_mutex(env, io, need, scope, source);
+ if (!IS_ERR(lock))
+ cl_lock_mutex_put(env, lock);
+ return lock;
+}
+EXPORT_SYMBOL(cl_lock_hold);
+
+/**
+ * Main high-level entry point of cl_lock interface that finds existing or
+ * enqueues new lock matching given description.
+ */
+struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
+ const struct cl_lock_descr *need,
+ const char *scope, const void *source)
+{
+ struct cl_lock *lock;
+ int rc;
+ __u32 enqflags = need->cld_enq_flags;
+
+ do {
+ lock = cl_lock_hold_mutex(env, io, need, scope, source);
+ if (IS_ERR(lock))
+ break;
+
+ rc = cl_enqueue_locked(env, lock, io, enqflags);
+ if (rc == 0) {
+ if (cl_lock_fits_into(env, lock, need, io)) {
+ if (!(enqflags & CEF_AGL)) {
+ cl_lock_mutex_put(env, lock);
+ cl_lock_lockdep_acquire(env, lock,
+ enqflags);
+ break;
+ }
+ rc = 1;
+ }
+ cl_unuse_locked(env, lock);
+ }
+ cl_lock_trace(D_DLMTRACE, env,
+ rc <= 0 ? "enqueue failed" : "agl succeed", lock);
+ cl_lock_hold_release(env, lock, scope, source);
+ cl_lock_mutex_put(env, lock);
+ lu_ref_del(&lock->cll_reference, scope, source);
+ cl_lock_put(env, lock);
+ if (rc > 0) {
+ LASSERT(enqflags & CEF_AGL);
+ lock = NULL;
+ } else if (rc != 0) {
+ lock = ERR_PTR(rc);
+ }
+ } while (rc == 0);
+ return lock;
+}
+EXPORT_SYMBOL(cl_lock_request);
+
+/**
+ * Adds a hold to a known lock.
+ */
+void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source)
+{
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_state != CLS_FREEING);
+
+ cl_lock_hold_mod(env, lock, +1);
+ cl_lock_get(lock);
+ lu_ref_add(&lock->cll_holders, scope, source);
+ lu_ref_add(&lock->cll_reference, scope, source);
+}
+EXPORT_SYMBOL(cl_lock_hold_add);
+
+/**
+ * Releases a hold and a reference on a lock, on which caller acquired a
+ * mutex.
+ */
+void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source)
+{
+ LINVRNT(cl_lock_invariant(env, lock));
+ cl_lock_hold_release(env, lock, scope, source);
+ lu_ref_del(&lock->cll_reference, scope, source);
+ cl_lock_put(env, lock);
+}
+EXPORT_SYMBOL(cl_lock_unhold);
+
+/**
+ * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
+ */
+void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source)
+{
+ LINVRNT(cl_lock_invariant(env, lock));
+ cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
+ cl_lock_mutex_get(env, lock);
+ cl_lock_hold_release(env, lock, scope, source);
+ cl_lock_mutex_put(env, lock);
+ lu_ref_del(&lock->cll_reference, scope, source);
+ cl_lock_put(env, lock);
+}
+EXPORT_SYMBOL(cl_lock_release);
+
+void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
+{
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+
+ cl_lock_used_mod(env, lock, +1);
+}
+EXPORT_SYMBOL(cl_lock_user_add);
+
+void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
+{
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_users > 0);
+
+ cl_lock_used_mod(env, lock, -1);
+ if (lock->cll_users == 0)
+ wake_up_all(&lock->cll_wq);
+}
+EXPORT_SYMBOL(cl_lock_user_del);
+
+const char *cl_lock_mode_name(const enum cl_lock_mode mode)
+{
+ static const char *names[] = {
+ [CLM_PHANTOM] = "P",
+ [CLM_READ] = "R",
+ [CLM_WRITE] = "W",
+ [CLM_GROUP] = "G"
+ };
+ if (0 <= mode && mode < ARRAY_SIZE(names))
+ return names[mode];
+ else
+ return "U";
+}
+EXPORT_SYMBOL(cl_lock_mode_name);
+
+/**
+ * Prints human readable representation of a lock description.
+ */
+void cl_lock_descr_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer,
+ const struct cl_lock_descr *descr)
+{
+ const struct lu_fid *fid;
+
+ fid = lu_object_fid(&descr->cld_obj->co_lu);
+ (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
+}
+EXPORT_SYMBOL(cl_lock_descr_print);
+
+/**
+ * Prints human readable representation of \a lock to the \a f.
+ */
+void cl_lock_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_lock *lock)
+{
+ const struct cl_lock_slice *slice;
+ (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
+ lock, atomic_read(&lock->cll_ref),
+ lock->cll_state, lock->cll_error, lock->cll_holds,
+ lock->cll_users, lock->cll_flags);
+ cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
+ (*printer)(env, cookie, " {\n");
+
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ (*printer)(env, cookie, " %s@%p: ",
+ slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
+ slice);
+ if (slice->cls_ops->clo_print != NULL)
+ slice->cls_ops->clo_print(env, cookie, printer, slice);
+ (*printer)(env, cookie, "\n");
+ }
+ (*printer)(env, cookie, "} lock@%p\n", lock);
+}
+EXPORT_SYMBOL(cl_lock_print);
+
+int cl_lock_init(void)
+{
+ return lu_kmem_init(cl_lock_caches);
+}
+
+void cl_lock_fini(void)
+{
+ lu_kmem_fini(cl_lock_caches);
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
new file mode 100644
index 0000000..7b0e9d2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -0,0 +1,1137 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Client Lustre Object.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+/*
+ * Locking.
+ *
+ * i_mutex
+ * PG_locked
+ * ->coh_page_guard
+ * ->coh_lock_guard
+ * ->coh_attr_guard
+ * ->ls_guard
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/libcfs/libcfs.h>
+/* class_put_type() */
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_fid.h>
+#include <linux/list.h>
+#include <linux/libcfs/libcfs_hash.h> /* for cfs_hash stuff */
+#include <cl_object.h>
+#include "cl_internal.h"
+
+static struct kmem_cache *cl_env_kmem;
+
+/** Lock class of cl_object_header::coh_page_guard */
+static struct lock_class_key cl_page_guard_class;
+/** Lock class of cl_object_header::coh_lock_guard */
+static struct lock_class_key cl_lock_guard_class;
+/** Lock class of cl_object_header::coh_attr_guard */
+static struct lock_class_key cl_attr_guard_class;
+
+extern __u32 lu_context_tags_default;
+extern __u32 lu_session_tags_default;
+/**
+ * Initialize cl_object_header.
+ */
+int cl_object_header_init(struct cl_object_header *h)
+{
+ int result;
+
+ result = lu_object_header_init(&h->coh_lu);
+ if (result == 0) {
+ spin_lock_init(&h->coh_page_guard);
+ spin_lock_init(&h->coh_lock_guard);
+ spin_lock_init(&h->coh_attr_guard);
+ lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
+ lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
+ lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
+ h->coh_pages = 0;
+ /* XXX hard coded GFP_* mask. */
+ INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
+ INIT_LIST_HEAD(&h->coh_locks);
+ h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_object_header_init);
+
+/**
+ * Finalize cl_object_header.
+ */
+void cl_object_header_fini(struct cl_object_header *h)
+{
+ LASSERT(list_empty(&h->coh_locks));
+ lu_object_header_fini(&h->coh_lu);
+}
+EXPORT_SYMBOL(cl_object_header_fini);
+
+/**
+ * Returns a cl_object with a given \a fid.
+ *
+ * Returns either cached or newly created object. Additional reference on the
+ * returned object is acquired.
+ *
+ * \see lu_object_find(), cl_page_find(), cl_lock_find()
+ */
+struct cl_object *cl_object_find(const struct lu_env *env,
+ struct cl_device *cd, const struct lu_fid *fid,
+ const struct cl_object_conf *c)
+{
+ might_sleep();
+ return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
+}
+EXPORT_SYMBOL(cl_object_find);
+
+/**
+ * Releases a reference on \a o.
+ *
+ * When last reference is released object is returned to the cache, unless
+ * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
+ *
+ * \see cl_page_put(), cl_lock_put().
+ */
+void cl_object_put(const struct lu_env *env, struct cl_object *o)
+{
+ lu_object_put(env, &o->co_lu);
+}
+EXPORT_SYMBOL(cl_object_put);
+
+/**
+ * Acquire an additional reference to the object \a o.
+ *
+ * This can only be used to acquire _additional_ reference, i.e., caller
+ * already has to possess at least one reference to \a o before calling this.
+ *
+ * \see cl_page_get(), cl_lock_get().
+ */
+void cl_object_get(struct cl_object *o)
+{
+ lu_object_get(&o->co_lu);
+}
+EXPORT_SYMBOL(cl_object_get);
+
+/**
+ * Returns the top-object for a given \a o.
+ *
+ * \see cl_page_top(), cl_io_top()
+ */
+struct cl_object *cl_object_top(struct cl_object *o)
+{
+ struct cl_object_header *hdr = cl_object_header(o);
+ struct cl_object *top;
+
+ while (hdr->coh_parent != NULL)
+ hdr = hdr->coh_parent;
+
+ top = lu2cl(lu_object_top(&hdr->coh_lu));
+ CDEBUG(D_TRACE, "%p -> %p\n", o, top);
+ return top;
+}
+EXPORT_SYMBOL(cl_object_top);
+
+/**
+ * Returns pointer to the lock protecting data-attributes for the given object
+ * \a o.
+ *
+ * Data-attributes are protected by the cl_object_header::coh_attr_guard
+ * spin-lock in the top-object.
+ *
+ * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
+ */
+static spinlock_t *cl_object_attr_guard(struct cl_object *o)
+{
+ return &cl_object_header(cl_object_top(o))->coh_attr_guard;
+}
+
+/**
+ * Locks data-attributes.
+ *
+ * Prevents data-attributes from changing, until lock is released by
+ * cl_object_attr_unlock(). This has to be called before calls to
+ * cl_object_attr_get(), cl_object_attr_set().
+ */
+void cl_object_attr_lock(struct cl_object *o)
+{
+ spin_lock(cl_object_attr_guard(o));
+}
+EXPORT_SYMBOL(cl_object_attr_lock);
+
+/**
+ * Releases data-attributes lock, acquired by cl_object_attr_lock().
+ */
+void cl_object_attr_unlock(struct cl_object *o)
+{
+ spin_unlock(cl_object_attr_guard(o));
+}
+EXPORT_SYMBOL(cl_object_attr_unlock);
+
+/**
+ * Returns data-attributes of an object \a obj.
+ *
+ * Every layer is asked (by calling cl_object_operations::coo_attr_get())
+ * top-to-bottom to fill in parts of \a attr that this layer is responsible
+ * for.
+ */
+int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr)
+{
+ struct lu_object_header *top;
+ int result;
+
+ LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+
+ top = obj->co_lu.lo_header;
+ result = 0;
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_attr_get != NULL) {
+ result = obj->co_ops->coo_attr_get(env, obj, attr);
+ if (result != 0) {
+ if (result > 0)
+ result = 0;
+ break;
+ }
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_object_attr_get);
+
+/**
+ * Updates data-attributes of an object \a obj.
+ *
+ * Only attributes, mentioned in a validness bit-mask \a v are
+ * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
+ * to top.
+ */
+int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned v)
+{
+ struct lu_object_header *top;
+ int result;
+
+ LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
+
+ top = obj->co_lu.lo_header;
+ result = 0;
+ list_for_each_entry_reverse(obj, &top->loh_layers,
+ co_lu.lo_linkage) {
+ if (obj->co_ops->coo_attr_set != NULL) {
+ result = obj->co_ops->coo_attr_set(env, obj, attr, v);
+ if (result != 0) {
+ if (result > 0)
+ result = 0;
+ break;
+ }
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_object_attr_set);
+
+/**
+ * Notifies layers (bottom-to-top) that glimpse AST was received.
+ *
+ * Layers have to fill \a lvb fields with information that will be shipped
+ * back to glimpse issuer.
+ *
+ * \see cl_lock_operations::clo_glimpse()
+ */
+int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
+ struct ost_lvb *lvb)
+{
+ struct lu_object_header *top;
+ int result;
+
+ top = obj->co_lu.lo_header;
+ result = 0;
+ list_for_each_entry_reverse(obj, &top->loh_layers,
+ co_lu.lo_linkage) {
+ if (obj->co_ops->coo_glimpse != NULL) {
+ result = obj->co_ops->coo_glimpse(env, obj, lvb);
+ if (result != 0)
+ break;
+ }
+ }
+ LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
+ "size: "LPU64" mtime: "LPU64" atime: "LPU64" "
+ "ctime: "LPU64" blocks: "LPU64"\n",
+ lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
+ lvb->lvb_ctime, lvb->lvb_blocks);
+ return result;
+}
+EXPORT_SYMBOL(cl_object_glimpse);
+
+/**
+ * Updates a configuration of an object \a obj.
+ */
+int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf)
+{
+ struct lu_object_header *top;
+ int result;
+
+ top = obj->co_lu.lo_header;
+ result = 0;
+ list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_conf_set != NULL) {
+ result = obj->co_ops->coo_conf_set(env, obj, conf);
+ if (result != 0)
+ break;
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_conf_set);
+
+/**
+ * Helper function removing all object locks, and marking object for
+ * deletion. All object pages must have been deleted at this point.
+ *
+ * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
+ * and sub- objects respectively.
+ */
+void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
+{
+ struct cl_object_header *hdr;
+
+ hdr = cl_object_header(obj);
+ LASSERT(hdr->coh_tree.rnode == NULL);
+ LASSERT(hdr->coh_pages == 0);
+
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
+ /*
+ * Destroy all locks. Object destruction (including cl_inode_fini())
+ * cannot cancel the locks, because in the case of a local client,
+ * where client and server share the same thread running
+ * prune_icache(), this can dead-lock with ldlm_cancel_handler()
+ * waiting on __wait_on_freeing_inode().
+ */
+ cl_locks_prune(env, obj, 0);
+}
+EXPORT_SYMBOL(cl_object_kill);
+
+/**
+ * Prunes caches of pages and locks for this object.
+ */
+void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ cl_pages_prune(env, obj);
+ cl_locks_prune(env, obj, 1);
+}
+EXPORT_SYMBOL(cl_object_prune);
+
+/**
+ * Check if the object has locks.
+ */
+int cl_object_has_locks(struct cl_object *obj)
+{
+ struct cl_object_header *head = cl_object_header(obj);
+ int has;
+
+ spin_lock(&head->coh_lock_guard);
+ has = list_empty(&head->coh_locks);
+ spin_unlock(&head->coh_lock_guard);
+
+ return (has == 0);
+}
+EXPORT_SYMBOL(cl_object_has_locks);
+
+void cache_stats_init(struct cache_stats *cs, const char *name)
+{
+ int i;
+
+ cs->cs_name = name;
+ for (i = 0; i < CS_NR; i++)
+ atomic_set(&cs->cs_stats[i], 0);
+}
+
+int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h)
+{
+ int i;
+ /*
+ * lookup hit total cached create
+ * env: ...... ...... ...... ...... ......
+ */
+ if (h) {
+ const char *names[CS_NR] = CS_NAMES;
+
+ seq_printf(m, "%6s", " ");
+ for (i = 0; i < CS_NR; i++)
+ seq_printf(m, "%8s", names[i]);
+ seq_printf(m, "\n");
+ }
+
+ seq_printf(m, "%5.5s:", cs->cs_name);
+ for (i = 0; i < CS_NR; i++)
+ seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
+ return 0;
+}
+
+/**
+ * Initialize client site.
+ *
+ * Perform common initialization (lu_site_init()), and initialize statistical
+ * counters. Also perform global initializations on the first call.
+ */
+int cl_site_init(struct cl_site *s, struct cl_device *d)
+{
+ int i;
+ int result;
+
+ result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
+ if (result == 0) {
+ cache_stats_init(&s->cs_pages, "pages");
+ cache_stats_init(&s->cs_locks, "locks");
+ for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
+ atomic_set(&s->cs_pages_state[0], 0);
+ for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
+ atomic_set(&s->cs_locks_state[i], 0);
+ }
+ return result;
+}
+EXPORT_SYMBOL(cl_site_init);
+
+/**
+ * Finalize client site. Dual to cl_site_init().
+ */
+void cl_site_fini(struct cl_site *s)
+{
+ lu_site_fini(&s->cs_lu);
+}
+EXPORT_SYMBOL(cl_site_fini);
+
+static struct cache_stats cl_env_stats = {
+ .cs_name = "envs",
+ .cs_stats = { ATOMIC_INIT(0), }
+};
+
+/**
+ * Outputs client site statistical counters into a buffer. Suitable for
+ * ll_rd_*()-style functions.
+ */
+int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
+{
+ int i;
+ static const char *pstate[] = {
+ [CPS_CACHED] = "c",
+ [CPS_OWNED] = "o",
+ [CPS_PAGEOUT] = "w",
+ [CPS_PAGEIN] = "r",
+ [CPS_FREEING] = "f"
+ };
+ static const char *lstate[] = {
+ [CLS_NEW] = "n",
+ [CLS_QUEUING] = "q",
+ [CLS_ENQUEUED] = "e",
+ [CLS_HELD] = "h",
+ [CLS_INTRANSIT] = "t",
+ [CLS_CACHED] = "c",
+ [CLS_FREEING] = "f"
+ };
+/*
+ lookup hit total busy create
+pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
+locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
+ env: ...... ...... ...... ...... ......
+ */
+ lu_site_stats_print(&site->cs_lu, m);
+ cache_stats_print(&site->cs_pages, m, 1);
+ seq_printf(m, " [");
+ for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
+ seq_printf(m, "%s: %u ", pstate[i],
+ atomic_read(&site->cs_pages_state[i]));
+ seq_printf(m, "]\n");
+ cache_stats_print(&site->cs_locks, m, 0);
+ seq_printf(m, " [");
+ for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
+ seq_printf(m, "%s: %u ", lstate[i],
+ atomic_read(&site->cs_locks_state[i]));
+ seq_printf(m, "]\n");
+ cache_stats_print(&cl_env_stats, m, 0);
+ seq_printf(m, "\n");
+ return 0;
+}
+EXPORT_SYMBOL(cl_site_stats_print);
+
+/*****************************************************************************
+ *
+ * lu_env handling on client.
+ *
+ */
+
+/**
+ * The most efficient way is to store cl_env pointer in task specific
+ * structures. On Linux, it wont' be easy to use task_struct->journal_info
+ * because Lustre code may call into other fs which has certain assumptions
+ * about journal_info. Currently following fields in task_struct are identified
+ * can be used for this purpose:
+ * - cl_env: for liblustre.
+ * - tux_info: ony on RedHat kernel.
+ * - ...
+ * \note As long as we use task_struct to store cl_env, we assume that once
+ * called into Lustre, we'll never call into the other part of the kernel
+ * which will use those fields in task_struct without explicitly exiting
+ * Lustre.
+ *
+ * If there's no space in task_struct is available, hash will be used.
+ * bz20044, bz22683.
+ */
+
+struct cl_env {
+ void *ce_magic;
+ struct lu_env ce_lu;
+ struct lu_context ce_ses;
+
+ /**
+ * This allows cl_env to be entered into cl_env_hash which implements
+ * the current thread -> client environment lookup.
+ */
+ struct hlist_node ce_node;
+ /**
+ * Owner for the current cl_env.
+ *
+ * If LL_TASK_CL_ENV is defined, this point to the owning current,
+ * only for debugging purpose ;
+ * Otherwise hash is used, and this is the key for cfs_hash.
+ * Now current thread pid is stored. Note using thread pointer would
+ * lead to unbalanced hash because of its specific allocation locality
+ * and could be varied for different platforms and OSes, even different
+ * OS versions.
+ */
+ void *ce_owner;
+
+ /*
+ * Linkage into global list of all client environments. Used for
+ * garbage collection.
+ */
+ struct list_head ce_linkage;
+ /*
+ *
+ */
+ int ce_ref;
+ /*
+ * Debugging field: address of the caller who made original
+ * allocation.
+ */
+ void *ce_debug;
+};
+
+#define CL_ENV_INC(counter)
+#define CL_ENV_DEC(counter)
+
+static void cl_env_init0(struct cl_env *cle, void *debug)
+{
+ LASSERT(cle->ce_ref == 0);
+ LASSERT(cle->ce_magic == &cl_env_init0);
+ LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
+
+ cle->ce_ref = 1;
+ cle->ce_debug = debug;
+ CL_ENV_INC(busy);
+}
+
+
+/*
+ * The implementation of using hash table to connect cl_env and thread
+ */
+
+static cfs_hash_t *cl_env_hash;
+
+static unsigned cl_env_hops_hash(cfs_hash_t *lh,
+ const void *key, unsigned mask)
+{
+#if BITS_PER_LONG == 64
+ return cfs_hash_u64_hash((__u64)key, mask);
+#else
+ return cfs_hash_u32_hash((__u32)key, mask);
+#endif
+}
+
+static void *cl_env_hops_obj(struct hlist_node *hn)
+{
+ struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
+ LASSERT(cle->ce_magic == &cl_env_init0);
+ return (void *)cle;
+}
+
+static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
+{
+ struct cl_env *cle = cl_env_hops_obj(hn);
+
+ LASSERT(cle->ce_owner != NULL);
+ return (key == cle->ce_owner);
+}
+
+static void cl_env_hops_noop(cfs_hash_t *hs, struct hlist_node *hn)
+{
+ struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
+ LASSERT(cle->ce_magic == &cl_env_init0);
+}
+
+static cfs_hash_ops_t cl_env_hops = {
+ .hs_hash = cl_env_hops_hash,
+ .hs_key = cl_env_hops_obj,
+ .hs_keycmp = cl_env_hops_keycmp,
+ .hs_object = cl_env_hops_obj,
+ .hs_get = cl_env_hops_noop,
+ .hs_put_locked = cl_env_hops_noop,
+};
+
+static inline struct cl_env *cl_env_fetch(void)
+{
+ struct cl_env *cle;
+
+ cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
+ LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
+ return cle;
+}
+
+static inline void cl_env_attach(struct cl_env *cle)
+{
+ if (cle) {
+ int rc;
+
+ LASSERT(cle->ce_owner == NULL);
+ cle->ce_owner = (void *) (long) current->pid;
+ rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ LASSERT(rc == 0);
+ }
+}
+
+static inline void cl_env_do_detach(struct cl_env *cle)
+{
+ void *cookie;
+
+ LASSERT(cle->ce_owner == (void *) (long) current->pid);
+ cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
+ &cle->ce_node);
+ LASSERT(cookie == cle);
+ cle->ce_owner = NULL;
+}
+
+static int cl_env_store_init(void) {
+ cl_env_hash = cfs_hash_create("cl_env",
+ HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
+ HASH_CL_ENV_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &cl_env_hops,
+ CFS_HASH_RW_BKTLOCK);
+ return cl_env_hash != NULL ? 0 :-ENOMEM;
+}
+
+static void cl_env_store_fini(void) {
+ cfs_hash_putref(cl_env_hash);
+}
+
+
+static inline struct cl_env *cl_env_detach(struct cl_env *cle)
+{
+ if (cle == NULL)
+ cle = cl_env_fetch();
+
+ if (cle && cle->ce_owner)
+ cl_env_do_detach(cle);
+
+ return cle;
+}
+
+static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
+{
+ struct lu_env *env;
+ struct cl_env *cle;
+
+ OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, __GFP_IO);
+ if (cle != NULL) {
+ int rc;
+
+ INIT_LIST_HEAD(&cle->ce_linkage);
+ cle->ce_magic = &cl_env_init0;
+ env = &cle->ce_lu;
+ rc = lu_env_init(env, LCT_CL_THREAD|ctx_tags);
+ if (rc == 0) {
+ rc = lu_context_init(&cle->ce_ses,
+ LCT_SESSION | ses_tags);
+ if (rc == 0) {
+ lu_context_enter(&cle->ce_ses);
+ env->le_ses = &cle->ce_ses;
+ cl_env_init0(cle, debug);
+ } else
+ lu_env_fini(env);
+ }
+ if (rc != 0) {
+ OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
+ env = ERR_PTR(rc);
+ } else {
+ CL_ENV_INC(create);
+ CL_ENV_INC(total);
+ }
+ } else
+ env = ERR_PTR(-ENOMEM);
+ return env;
+}
+
+static void cl_env_fini(struct cl_env *cle)
+{
+ CL_ENV_DEC(total);
+ lu_context_fini(&cle->ce_lu.le_ctx);
+ lu_context_fini(&cle->ce_ses);
+ OBD_SLAB_FREE_PTR(cle, cl_env_kmem);
+}
+
+static inline struct cl_env *cl_env_container(struct lu_env *env)
+{
+ return container_of(env, struct cl_env, ce_lu);
+}
+
+struct lu_env *cl_env_peek(int *refcheck)
+{
+ struct lu_env *env;
+ struct cl_env *cle;
+
+ CL_ENV_INC(lookup);
+
+ /* check that we don't go far from untrusted pointer */
+ CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
+
+ env = NULL;
+ cle = cl_env_fetch();
+ if (cle != NULL) {
+ CL_ENV_INC(hit);
+ env = &cle->ce_lu;
+ *refcheck = ++cle->ce_ref;
+ }
+ CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
+ return env;
+}
+EXPORT_SYMBOL(cl_env_peek);
+
+/**
+ * Returns lu_env: if there already is an environment associated with the
+ * current thread, it is returned, otherwise, new environment is allocated.
+ *
+ * \param refcheck pointer to a counter used to detect environment leaks. In
+ * the usual case cl_env_get() and cl_env_put() are called in the same lexical
+ * scope and pointer to the same integer is passed as \a refcheck. This is
+ * used to detect missed cl_env_put().
+ *
+ * \see cl_env_put()
+ */
+struct lu_env *cl_env_get(int *refcheck)
+{
+ struct lu_env *env;
+
+ env = cl_env_peek(refcheck);
+ if (env == NULL) {
+ env = cl_env_new(lu_context_tags_default,
+ lu_session_tags_default,
+ __builtin_return_address(0));
+
+ if (!IS_ERR(env)) {
+ struct cl_env *cle;
+
+ cle = cl_env_container(env);
+ cl_env_attach(cle);
+ *refcheck = cle->ce_ref;
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
+ }
+ }
+ return env;
+}
+EXPORT_SYMBOL(cl_env_get);
+
+/**
+ * Forces an allocation of a fresh environment with given tags.
+ *
+ * \see cl_env_get()
+ */
+struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
+{
+ struct lu_env *env;
+
+ LASSERT(cl_env_peek(refcheck) == NULL);
+ env = cl_env_new(tags, tags, __builtin_return_address(0));
+ if (!IS_ERR(env)) {
+ struct cl_env *cle;
+
+ cle = cl_env_container(env);
+ *refcheck = cle->ce_ref;
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
+ }
+ return env;
+}
+EXPORT_SYMBOL(cl_env_alloc);
+
+static void cl_env_exit(struct cl_env *cle)
+{
+ LASSERT(cle->ce_owner == NULL);
+ lu_context_exit(&cle->ce_lu.le_ctx);
+ lu_context_exit(&cle->ce_ses);
+}
+
+/**
+ * Release an environment.
+ *
+ * Decrement \a env reference counter. When counter drops to 0, nothing in
+ * this thread is using environment and it is returned to the allocation
+ * cache, or freed straight away, if cache is large enough.
+ */
+void cl_env_put(struct lu_env *env, int *refcheck)
+{
+ struct cl_env *cle;
+
+ cle = cl_env_container(env);
+
+ LASSERT(cle->ce_ref > 0);
+ LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
+
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
+ if (--cle->ce_ref == 0) {
+ CL_ENV_DEC(busy);
+ cl_env_detach(cle);
+ cle->ce_debug = NULL;
+ cl_env_exit(cle);
+ cl_env_fini(cle);
+ }
+}
+EXPORT_SYMBOL(cl_env_put);
+
+/**
+ * Declares a point of re-entrancy.
+ *
+ * \see cl_env_reexit()
+ */
+void *cl_env_reenter(void)
+{
+ return cl_env_detach(NULL);
+}
+EXPORT_SYMBOL(cl_env_reenter);
+
+/**
+ * Exits re-entrancy.
+ */
+void cl_env_reexit(void *cookie)
+{
+ cl_env_detach(NULL);
+ cl_env_attach(cookie);
+}
+EXPORT_SYMBOL(cl_env_reexit);
+
+/**
+ * Setup user-supplied \a env as a current environment. This is to be used to
+ * guaranteed that environment exists even when cl_env_get() fails. It is up
+ * to user to ensure proper concurrency control.
+ *
+ * \see cl_env_unplant()
+ */
+void cl_env_implant(struct lu_env *env, int *refcheck)
+{
+ struct cl_env *cle = cl_env_container(env);
+
+ LASSERT(cle->ce_ref > 0);
+
+ cl_env_attach(cle);
+ cl_env_get(refcheck);
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
+}
+EXPORT_SYMBOL(cl_env_implant);
+
+/**
+ * Detach environment installed earlier by cl_env_implant().
+ */
+void cl_env_unplant(struct lu_env *env, int *refcheck)
+{
+ struct cl_env *cle = cl_env_container(env);
+
+ LASSERT(cle->ce_ref > 1);
+
+ CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
+
+ cl_env_detach(cle);
+ cl_env_put(env, refcheck);
+}
+EXPORT_SYMBOL(cl_env_unplant);
+
+struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
+{
+ struct lu_env *env;
+
+ nest->cen_cookie = NULL;
+ env = cl_env_peek(&nest->cen_refcheck);
+ if (env != NULL) {
+ if (!cl_io_is_going(env))
+ return env;
+ else {
+ cl_env_put(env, &nest->cen_refcheck);
+ nest->cen_cookie = cl_env_reenter();
+ }
+ }
+ env = cl_env_get(&nest->cen_refcheck);
+ if (IS_ERR(env)) {
+ cl_env_reexit(nest->cen_cookie);
+ return env;
+ }
+
+ LASSERT(!cl_io_is_going(env));
+ return env;
+}
+EXPORT_SYMBOL(cl_env_nested_get);
+
+void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
+{
+ cl_env_put(env, &nest->cen_refcheck);
+ cl_env_reexit(nest->cen_cookie);
+}
+EXPORT_SYMBOL(cl_env_nested_put);
+
+/**
+ * Converts struct cl_attr to struct ost_lvb.
+ *
+ * \see cl_lvb2attr
+ */
+void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
+{
+ lvb->lvb_size = attr->cat_size;
+ lvb->lvb_mtime = attr->cat_mtime;
+ lvb->lvb_atime = attr->cat_atime;
+ lvb->lvb_ctime = attr->cat_ctime;
+ lvb->lvb_blocks = attr->cat_blocks;
+}
+EXPORT_SYMBOL(cl_attr2lvb);
+
+/**
+ * Converts struct ost_lvb to struct cl_attr.
+ *
+ * \see cl_attr2lvb
+ */
+void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
+{
+ attr->cat_size = lvb->lvb_size;
+ attr->cat_mtime = lvb->lvb_mtime;
+ attr->cat_atime = lvb->lvb_atime;
+ attr->cat_ctime = lvb->lvb_ctime;
+ attr->cat_blocks = lvb->lvb_blocks;
+}
+EXPORT_SYMBOL(cl_lvb2attr);
+
+/*****************************************************************************
+ *
+ * Temporary prototype thing: mirror obd-devices into cl devices.
+ *
+ */
+
+struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
+ struct lu_device_type *ldt,
+ struct lu_device *next)
+{
+ const char *typename;
+ struct lu_device *d;
+
+ LASSERT(ldt != NULL);
+
+ typename = ldt->ldt_name;
+ d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
+ if (!IS_ERR(d)) {
+ int rc;
+
+ if (site != NULL)
+ d->ld_site = site;
+ rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
+ if (rc == 0) {
+ lu_device_get(d);
+ lu_ref_add(&d->ld_reference,
+ "lu-stack", &lu_site_init);
+ } else {
+ ldt->ldt_ops->ldto_device_free(env, d);
+ CERROR("can't init device '%s', %d\n", typename, rc);
+ d = ERR_PTR(rc);
+ }
+ } else
+ CERROR("Cannot allocate device: '%s'\n", typename);
+ return lu2cl_dev(d);
+}
+EXPORT_SYMBOL(cl_type_setup);
+
+/**
+ * Finalize device stack by calling lu_stack_fini().
+ */
+void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
+{
+ lu_stack_fini(env, cl2lu_dev(cl));
+}
+EXPORT_SYMBOL(cl_stack_fini);
+
+int cl_lock_init(void);
+void cl_lock_fini(void);
+
+int cl_page_init(void);
+void cl_page_fini(void);
+
+static struct lu_context_key cl_key;
+
+struct cl_thread_info *cl_env_info(const struct lu_env *env)
+{
+ return lu_context_key_get(&env->le_ctx, &cl_key);
+}
+
+/* defines cl0_key_{init,fini}() */
+LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
+
+static void *cl_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct cl_thread_info *info;
+
+ info = cl0_key_init(ctx, key);
+ if (!IS_ERR(info)) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
+ lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
+ }
+ return info;
+}
+
+static void cl_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct cl_thread_info *info;
+ int i;
+
+ info = data;
+ for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
+ lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
+ cl0_key_fini(ctx, key, data);
+}
+
+static void cl_key_exit(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct cl_thread_info *info = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
+ LASSERT(info->clt_counters[i].ctc_nr_held == 0);
+ LASSERT(info->clt_counters[i].ctc_nr_used == 0);
+ LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
+ LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
+ lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
+ lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
+ }
+}
+
+static struct lu_context_key cl_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = cl_key_init,
+ .lct_fini = cl_key_fini,
+ .lct_exit = cl_key_exit
+};
+
+static struct lu_kmem_descr cl_object_caches[] = {
+ {
+ .ckd_cache = &cl_env_kmem,
+ .ckd_name = "cl_env_kmem",
+ .ckd_size = sizeof (struct cl_env)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
+/**
+ * Global initialization of cl-data. Create kmem caches, register
+ * lu_context_key's, etc.
+ *
+ * \see cl_global_fini()
+ */
+int cl_global_init(void)
+{
+ int result;
+
+ result = cl_env_store_init();
+ if (result)
+ return result;
+
+ result = lu_kmem_init(cl_object_caches);
+ if (result)
+ goto out_store;
+
+ LU_CONTEXT_KEY_INIT(&cl_key);
+ result = lu_context_key_register(&cl_key);
+ if (result)
+ goto out_kmem;
+
+ result = cl_lock_init();
+ if (result)
+ goto out_context;
+
+ result = cl_page_init();
+ if (result)
+ goto out_lock;
+
+ return 0;
+out_lock:
+ cl_lock_fini();
+out_context:
+ lu_context_key_degister(&cl_key);
+out_kmem:
+ lu_kmem_fini(cl_object_caches);
+out_store:
+ cl_env_store_fini();
+ return result;
+}
+
+/**
+ * Finalization of global cl-data. Dual to cl_global_init().
+ */
+void cl_global_fini(void)
+{
+ cl_lock_fini();
+ cl_page_fini();
+ lu_context_key_degister(&cl_key);
+ lu_kmem_fini(cl_object_caches);
+ cl_env_store_fini();
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
new file mode 100644
index 0000000..2a5ce37
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -0,0 +1,1553 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Client Lustre Page.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/libcfs/libcfs.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <linux/list.h>
+
+#include <cl_object.h>
+#include "cl_internal.h"
+
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
+ int radix);
+
+# define PASSERT(env, page, expr) \
+ do { \
+ if (unlikely(!(expr))) { \
+ CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
+ LASSERT(0); \
+ } \
+ } while (0)
+
+# define PINVRNT(env, page, exp) \
+ ((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
+
+/* Disable page statistic by default due to huge performance penalty. */
+#define CS_PAGE_INC(o, item)
+#define CS_PAGE_DEC(o, item)
+#define CS_PAGESTATE_INC(o, state)
+#define CS_PAGESTATE_DEC(o, state)
+
+/**
+ * Internal version of cl_page_top, it should be called if the page is
+ * known to be not freed, says with page referenced, or radix tree lock held,
+ * or page owned.
+ */
+static struct cl_page *cl_page_top_trusted(struct cl_page *page)
+{
+ while (page->cp_parent != NULL)
+ page = page->cp_parent;
+ return page;
+}
+
+/**
+ * Internal version of cl_page_get().
+ *
+ * This function can be used to obtain initial reference to previously
+ * unreferenced cached object. It can be called only if concurrent page
+ * reclamation is somehow prevented, e.g., by locking page radix-tree
+ * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
+ * associated with \a page.
+ *
+ * Use with care! Not exported.
+ */
+static void cl_page_get_trust(struct cl_page *page)
+{
+ LASSERT(atomic_read(&page->cp_ref) > 0);
+ atomic_inc(&page->cp_ref);
+}
+
+/**
+ * Returns a slice within a page, corresponding to the given layer in the
+ * device stack.
+ *
+ * \see cl_lock_at()
+ */
+static const struct cl_page_slice *
+cl_page_at_trusted(const struct cl_page *page,
+ const struct lu_device_type *dtype)
+{
+ const struct cl_page_slice *slice;
+
+ page = cl_page_top_trusted((struct cl_page *)page);
+ do {
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
+ return slice;
+ }
+ page = page->cp_child;
+ } while (page != NULL);
+ return NULL;
+}
+
+/**
+ * Returns a page with given index in the given object, or NULL if no page is
+ * found. Acquires a reference on \a page.
+ *
+ * Locking: called under cl_object_header::coh_page_guard spin-lock.
+ */
+struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
+{
+ struct cl_page *page;
+
+ LASSERT(spin_is_locked(&hdr->coh_page_guard));
+
+ page = radix_tree_lookup(&hdr->coh_tree, index);
+ if (page != NULL)
+ cl_page_get_trust(page);
+ return page;
+}
+EXPORT_SYMBOL(cl_page_lookup);
+
+/**
+ * Returns a list of pages by a given [start, end] of \a obj.
+ *
+ * \param resched If not NULL, then we give up before hogging CPU for too
+ * long and set *resched = 1, in that case caller should implement a retry
+ * logic.
+ *
+ * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
+ * crucial in the face of [offset, EOF] locks.
+ *
+ * Return at least one page in @queue unless there is no covered page.
+ */
+int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, pgoff_t start, pgoff_t end,
+ cl_page_gang_cb_t cb, void *cbdata)
+{
+ struct cl_object_header *hdr;
+ struct cl_page *page;
+ struct cl_page **pvec;
+ const struct cl_page_slice *slice;
+ const struct lu_device_type *dtype;
+ pgoff_t idx;
+ unsigned int nr;
+ unsigned int i;
+ unsigned int j;
+ int res = CLP_GANG_OKAY;
+ int tree_lock = 1;
+
+ idx = start;
+ hdr = cl_object_header(obj);
+ pvec = cl_env_info(env)->clt_pvec;
+ dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
+ spin_lock(&hdr->coh_page_guard);
+ while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
+ idx, CLT_PVEC_SIZE)) > 0) {
+ int end_of_region = 0;
+ idx = pvec[nr - 1]->cp_index + 1;
+ for (i = 0, j = 0; i < nr; ++i) {
+ page = pvec[i];
+ pvec[i] = NULL;
+
+ LASSERT(page->cp_type == CPT_CACHEABLE);
+ if (page->cp_index > end) {
+ end_of_region = 1;
+ break;
+ }
+ if (page->cp_state == CPS_FREEING)
+ continue;
+
+ slice = cl_page_at_trusted(page, dtype);
+ /*
+ * Pages for lsm-less file has no underneath sub-page
+ * for osc, in case of ...
+ */
+ PASSERT(env, page, slice != NULL);
+
+ page = slice->cpl_page;
+ /*
+ * Can safely call cl_page_get_trust() under
+ * radix-tree spin-lock.
+ *
+ * XXX not true, because @page is from object another
+ * than @hdr and protected by different tree lock.
+ */
+ cl_page_get_trust(page);
+ lu_ref_add_atomic(&page->cp_reference,
+ "gang_lookup", current);
+ pvec[j++] = page;
+ }
+
+ /*
+ * Here a delicate locking dance is performed. Current thread
+ * holds a reference to a page, but has to own it before it
+ * can be placed into queue. Owning implies waiting, so
+ * radix-tree lock is to be released. After a wait one has to
+ * check that pages weren't truncated (cl_page_own() returns
+ * error in the latter case).
+ */
+ spin_unlock(&hdr->coh_page_guard);
+ tree_lock = 0;
+
+ for (i = 0; i < j; ++i) {
+ page = pvec[i];
+ if (res == CLP_GANG_OKAY)
+ res = (*cb)(env, io, page, cbdata);
+ lu_ref_del(&page->cp_reference,
+ "gang_lookup", current);
+ cl_page_put(env, page);
+ }
+ if (nr < CLT_PVEC_SIZE || end_of_region)
+ break;
+
+ if (res == CLP_GANG_OKAY && need_resched())
+ res = CLP_GANG_RESCHED;
+ if (res != CLP_GANG_OKAY)
+ break;
+
+ spin_lock(&hdr->coh_page_guard);
+ tree_lock = 1;
+ }
+ if (tree_lock)
+ spin_unlock(&hdr->coh_page_guard);
+ return res;
+}
+EXPORT_SYMBOL(cl_page_gang_lookup);
+
+static void cl_page_free(const struct lu_env *env, struct cl_page *page)
+{
+ struct cl_object *obj = page->cp_obj;
+ int pagesize = cl_object_header(obj)->coh_page_bufsize;
+
+ PASSERT(env, page, list_empty(&page->cp_batch));
+ PASSERT(env, page, page->cp_owner == NULL);
+ PASSERT(env, page, page->cp_req == NULL);
+ PASSERT(env, page, page->cp_parent == NULL);
+ PASSERT(env, page, page->cp_state == CPS_FREEING);
+
+ might_sleep();
+ while (!list_empty(&page->cp_layers)) {
+ struct cl_page_slice *slice;
+
+ slice = list_entry(page->cp_layers.next,
+ struct cl_page_slice, cpl_linkage);
+ list_del_init(page->cp_layers.next);
+ slice->cpl_ops->cpo_fini(env, slice);
+ }
+ CS_PAGE_DEC(obj, total);
+ CS_PAGESTATE_DEC(obj, page->cp_state);
+ lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
+ cl_object_put(env, obj);
+ lu_ref_fini(&page->cp_reference);
+ OBD_FREE(page, pagesize);
+}
+
+/**
+ * Helper function updating page state. This is the only place in the code
+ * where cl_page::cp_state field is mutated.
+ */
+static inline void cl_page_state_set_trust(struct cl_page *page,
+ enum cl_page_state state)
+{
+ /* bypass const. */
+ *(enum cl_page_state *)&page->cp_state = state;
+}
+
+static struct cl_page *cl_page_alloc(const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind, struct page *vmpage,
+ enum cl_page_type type)
+{
+ struct cl_page *page;
+ struct lu_object_header *head;
+
+ OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
+ __GFP_IO);
+ if (page != NULL) {
+ int result = 0;
+ atomic_set(&page->cp_ref, 1);
+ if (type == CPT_CACHEABLE) /* for radix tree */
+ atomic_inc(&page->cp_ref);
+ page->cp_obj = o;
+ cl_object_get(o);
+ lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
+ page);
+ page->cp_index = ind;
+ cl_page_state_set_trust(page, CPS_CACHED);
+ page->cp_type = type;
+ INIT_LIST_HEAD(&page->cp_layers);
+ INIT_LIST_HEAD(&page->cp_batch);
+ INIT_LIST_HEAD(&page->cp_flight);
+ mutex_init(&page->cp_mutex);
+ lu_ref_init(&page->cp_reference);
+ head = o->co_lu.lo_header;
+ list_for_each_entry(o, &head->loh_layers,
+ co_lu.lo_linkage) {
+ if (o->co_ops->coo_page_init != NULL) {
+ result = o->co_ops->coo_page_init(env, o,
+ page, vmpage);
+ if (result != 0) {
+ cl_page_delete0(env, page, 0);
+ cl_page_free(env, page);
+ page = ERR_PTR(result);
+ break;
+ }
+ }
+ }
+ if (result == 0) {
+ CS_PAGE_INC(o, total);
+ CS_PAGE_INC(o, create);
+ CS_PAGESTATE_DEC(o, CPS_CACHED);
+ }
+ } else {
+ page = ERR_PTR(-ENOMEM);
+ }
+ return page;
+}
+
+/**
+ * Returns a cl_page with index \a idx at the object \a o, and associated with
+ * the VM page \a vmpage.
+ *
+ * This is the main entry point into the cl_page caching interface. First, a
+ * cache (implemented as a per-object radix tree) is consulted. If page is
+ * found there, it is returned immediately. Otherwise new page is allocated
+ * and returned. In any case, additional reference to page is acquired.
+ *
+ * \see cl_object_find(), cl_lock_find()
+ */
+static struct cl_page *cl_page_find0(const struct lu_env *env,
+ struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type,
+ struct cl_page *parent)
+{
+ struct cl_page *page = NULL;
+ struct cl_page *ghost = NULL;
+ struct cl_object_header *hdr;
+ int err;
+
+ LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
+ might_sleep();
+
+ hdr = cl_object_header(o);
+ CS_PAGE_INC(o, lookup);
+
+ CDEBUG(D_PAGE, "%lu@"DFID" %p %lx %d\n",
+ idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
+ /* fast path. */
+ if (type == CPT_CACHEABLE) {
+ /* vmpage lock is used to protect the child/parent
+ * relationship */
+ KLASSERT(PageLocked(vmpage));
+ /*
+ * cl_vmpage_page() can be called here without any locks as
+ *
+ * - "vmpage" is locked (which prevents ->private from
+ * concurrent updates), and
+ *
+ * - "o" cannot be destroyed while current thread holds a
+ * reference on it.
+ */
+ page = cl_vmpage_page(vmpage, o);
+ PINVRNT(env, page,
+ ergo(page != NULL,
+ cl_page_vmpage(env, page) == vmpage &&
+ (void *)radix_tree_lookup(&hdr->coh_tree,
+ idx) == page));
+ }
+
+ if (page != NULL) {
+ CS_PAGE_INC(o, hit);
+ return page;
+ }
+
+ /* allocate and initialize cl_page */
+ page = cl_page_alloc(env, o, idx, vmpage, type);
+ if (IS_ERR(page))
+ return page;
+
+ if (type == CPT_TRANSIENT) {
+ if (parent) {
+ LASSERT(page->cp_parent == NULL);
+ page->cp_parent = parent;
+ parent->cp_child = page;
+ }
+ return page;
+ }
+
+ /*
+ * XXX optimization: use radix_tree_preload() here, and change tree
+ * gfp mask to GFP_KERNEL in cl_object_header_init().
+ */
+ spin_lock(&hdr->coh_page_guard);
+ err = radix_tree_insert(&hdr->coh_tree, idx, page);
+ if (err != 0) {
+ ghost = page;
+ /*
+ * Noted by Jay: a lock on \a vmpage protects cl_page_find()
+ * from this race, but
+ *
+ * 0. it's better to have cl_page interface "locally
+ * consistent" so that its correctness can be reasoned
+ * about without appealing to the (obscure world of) VM
+ * locking.
+ *
+ * 1. handling this race allows ->coh_tree to remain
+ * consistent even when VM locking is somehow busted,
+ * which is very useful during diagnosing and debugging.
+ */
+ page = ERR_PTR(err);
+ CL_PAGE_DEBUG(D_ERROR, env, ghost,
+ "fail to insert into radix tree: %d\n", err);
+ } else {
+ if (parent) {
+ LASSERT(page->cp_parent == NULL);
+ page->cp_parent = parent;
+ parent->cp_child = page;
+ }
+ hdr->coh_pages++;
+ }
+ spin_unlock(&hdr->coh_page_guard);
+
+ if (unlikely(ghost != NULL)) {
+ cl_page_delete0(env, ghost, 0);
+ cl_page_free(env, ghost);
+ }
+ return page;
+}
+
+struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type)
+{
+ return cl_page_find0(env, o, idx, vmpage, type, NULL);
+}
+EXPORT_SYMBOL(cl_page_find);
+
+
+struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ struct cl_page *parent)
+{
+ return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
+}
+EXPORT_SYMBOL(cl_page_find_sub);
+
+static inline int cl_page_invariant(const struct cl_page *pg)
+{
+ struct cl_object_header *header;
+ struct cl_page *parent;
+ struct cl_page *child;
+ struct cl_io *owner;
+
+ /*
+ * Page invariant is protected by a VM lock.
+ */
+ LINVRNT(cl_page_is_vmlocked(NULL, pg));
+
+ header = cl_object_header(pg->cp_obj);
+ parent = pg->cp_parent;
+ child = pg->cp_child;
+ owner = pg->cp_owner;
+
+ return cl_page_in_use(pg) &&
+ ergo(parent != NULL, parent->cp_child == pg) &&
+ ergo(child != NULL, child->cp_parent == pg) &&
+ ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
+ ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
+ ergo(owner != NULL && parent != NULL,
+ parent->cp_owner == pg->cp_owner->ci_parent) &&
+ ergo(owner != NULL && child != NULL,
+ child->cp_owner->ci_parent == owner) &&
+ /*
+ * Either page is early in initialization (has neither child
+ * nor parent yet), or it is in the object radix tree.
+ */
+ ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
+ (void *)radix_tree_lookup(&header->coh_tree,
+ pg->cp_index) == pg ||
+ (child == NULL && parent == NULL));
+}
+
+static void cl_page_state_set0(const struct lu_env *env,
+ struct cl_page *page, enum cl_page_state state)
+{
+ enum cl_page_state old;
+
+ /*
+ * Matrix of allowed state transitions [old][new], for sanity
+ * checking.
+ */
+ static const int allowed_transitions[CPS_NR][CPS_NR] = {
+ [CPS_CACHED] = {
+ [CPS_CACHED] = 0,
+ [CPS_OWNED] = 1, /* io finds existing cached page */
+ [CPS_PAGEIN] = 0,
+ [CPS_PAGEOUT] = 1, /* write-out from the cache */
+ [CPS_FREEING] = 1, /* eviction on the memory pressure */
+ },
+ [CPS_OWNED] = {
+ [CPS_CACHED] = 1, /* release to the cache */
+ [CPS_OWNED] = 0,
+ [CPS_PAGEIN] = 1, /* start read immediately */
+ [CPS_PAGEOUT] = 1, /* start write immediately */
+ [CPS_FREEING] = 1, /* lock invalidation or truncate */
+ },
+ [CPS_PAGEIN] = {
+ [CPS_CACHED] = 1, /* io completion */
+ [CPS_OWNED] = 0,
+ [CPS_PAGEIN] = 0,
+ [CPS_PAGEOUT] = 0,
+ [CPS_FREEING] = 0,
+ },
+ [CPS_PAGEOUT] = {
+ [CPS_CACHED] = 1, /* io completion */
+ [CPS_OWNED] = 0,
+ [CPS_PAGEIN] = 0,
+ [CPS_PAGEOUT] = 0,
+ [CPS_FREEING] = 0,
+ },
+ [CPS_FREEING] = {
+ [CPS_CACHED] = 0,
+ [CPS_OWNED] = 0,
+ [CPS_PAGEIN] = 0,
+ [CPS_PAGEOUT] = 0,
+ [CPS_FREEING] = 0,
+ }
+ };
+
+ old = page->cp_state;
+ PASSERT(env, page, allowed_transitions[old][state]);
+ CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
+ for (; page != NULL; page = page->cp_child) {
+ PASSERT(env, page, page->cp_state == old);
+ PASSERT(env, page,
+ equi(state == CPS_OWNED, page->cp_owner != NULL));
+
+ CS_PAGESTATE_DEC(page->cp_obj, page->cp_state);
+ CS_PAGESTATE_INC(page->cp_obj, state);
+ cl_page_state_set_trust(page, state);
+ }
+}
+
+static void cl_page_state_set(const struct lu_env *env,
+ struct cl_page *page, enum cl_page_state state)
+{
+ cl_page_state_set0(env, page, state);
+}
+
+/**
+ * Acquires an additional reference to a page.
+ *
+ * This can be called only by caller already possessing a reference to \a
+ * page.
+ *
+ * \see cl_object_get(), cl_lock_get().
+ */
+void cl_page_get(struct cl_page *page)
+{
+ cl_page_get_trust(page);
+}
+EXPORT_SYMBOL(cl_page_get);
+
+/**
+ * Releases a reference to a page.
+ *
+ * When last reference is released, page is returned to the cache, unless it
+ * is in cl_page_state::CPS_FREEING state, in which case it is immediately
+ * destroyed.
+ *
+ * \see cl_object_put(), cl_lock_put().
+ */
+void cl_page_put(const struct lu_env *env, struct cl_page *page)
+{
+ PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
+
+ CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
+ atomic_read(&page->cp_ref));
+
+ if (atomic_dec_and_test(&page->cp_ref)) {
+ LASSERT(page->cp_state == CPS_FREEING);
+
+ LASSERT(atomic_read(&page->cp_ref) == 0);
+ PASSERT(env, page, page->cp_owner == NULL);
+ PASSERT(env, page, list_empty(&page->cp_batch));
+ /*
+ * Page is no longer reachable by other threads. Tear
+ * it down.
+ */
+ cl_page_free(env, page);
+ }
+}
+EXPORT_SYMBOL(cl_page_put);
+
+/**
+ * Returns a VM page associated with a given cl_page.
+ */
+struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
+{
+ const struct cl_page_slice *slice;
+
+ /*
+ * Find uppermost layer with ->cpo_vmpage() method, and return its
+ * result.
+ */
+ page = cl_page_top(page);
+ do {
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ if (slice->cpl_ops->cpo_vmpage != NULL)
+ return slice->cpl_ops->cpo_vmpage(env, slice);
+ }
+ page = page->cp_child;
+ } while (page != NULL);
+ LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
+}
+EXPORT_SYMBOL(cl_page_vmpage);
+
+/**
+ * Returns a cl_page associated with a VM page, and given cl_object.
+ */
+struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
+{
+ struct cl_page *top;
+ struct cl_page *page;
+
+ KLASSERT(PageLocked(vmpage));
+
+ /*
+ * NOTE: absence of races and liveness of data are guaranteed by page
+ * lock on a "vmpage". That works because object destruction has
+ * bottom-to-top pass.
+ */
+
+ /*
+ * This loop assumes that ->private points to the top-most page. This
+ * can be rectified easily.
+ */
+ top = (struct cl_page *)vmpage->private;
+ if (top == NULL)
+ return NULL;
+
+ for (page = top; page != NULL; page = page->cp_child) {
+ if (cl_object_same(page->cp_obj, obj)) {
+ cl_page_get_trust(page);
+ break;
+ }
+ }
+ LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
+ return page;
+}
+EXPORT_SYMBOL(cl_vmpage_page);
+
+/**
+ * Returns the top-page for a given page.
+ *
+ * \see cl_object_top(), cl_io_top()
+ */
+struct cl_page *cl_page_top(struct cl_page *page)
+{
+ return cl_page_top_trusted(page);
+}
+EXPORT_SYMBOL(cl_page_top);
+
+const struct cl_page_slice *cl_page_at(const struct cl_page *page,
+ const struct lu_device_type *dtype)
+{
+ return cl_page_at_trusted(page, dtype);
+}
+EXPORT_SYMBOL(cl_page_at);
+
+#define CL_PAGE_OP(opname) offsetof(struct cl_page_operations, opname)
+
+#define CL_PAGE_INVOKE(_env, _page, _op, _proto, ...) \
+({ \
+ const struct lu_env *__env = (_env); \
+ struct cl_page *__page = (_page); \
+ const struct cl_page_slice *__scan; \
+ int __result; \
+ ptrdiff_t __op = (_op); \
+ int (*__method)_proto; \
+ \
+ __result = 0; \
+ __page = cl_page_top(__page); \
+ do { \
+ list_for_each_entry(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + \
+ __op); \
+ if (__method != NULL) { \
+ __result = (*__method)(__env, __scan, \
+ ## __VA_ARGS__); \
+ if (__result != 0) \
+ break; \
+ } \
+ } \
+ __page = __page->cp_child; \
+ } while (__page != NULL && __result == 0); \
+ if (__result > 0) \
+ __result = 0; \
+ __result; \
+})
+
+#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
+do { \
+ const struct lu_env *__env = (_env); \
+ struct cl_page *__page = (_page); \
+ const struct cl_page_slice *__scan; \
+ ptrdiff_t __op = (_op); \
+ void (*__method)_proto; \
+ \
+ __page = cl_page_top(__page); \
+ do { \
+ list_for_each_entry(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + \
+ __op); \
+ if (__method != NULL) \
+ (*__method)(__env, __scan, \
+ ## __VA_ARGS__); \
+ } \
+ __page = __page->cp_child; \
+ } while (__page != NULL); \
+} while (0)
+
+#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
+do { \
+ const struct lu_env *__env = (_env); \
+ struct cl_page *__page = (_page); \
+ const struct cl_page_slice *__scan; \
+ ptrdiff_t __op = (_op); \
+ void (*__method)_proto; \
+ \
+ /* get to the bottom page. */ \
+ while (__page->cp_child != NULL) \
+ __page = __page->cp_child; \
+ do { \
+ list_for_each_entry_reverse(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + \
+ __op); \
+ if (__method != NULL) \
+ (*__method)(__env, __scan, \
+ ## __VA_ARGS__); \
+ } \
+ __page = __page->cp_parent; \
+ } while (__page != NULL); \
+} while (0)
+
+static int cl_page_invoke(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page, ptrdiff_t op)
+
+{
+ PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
+ return CL_PAGE_INVOKE(env, page, op,
+ (const struct lu_env *,
+ const struct cl_page_slice *, struct cl_io *),
+ io);
+}
+
+static void cl_page_invoid(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page, ptrdiff_t op)
+
+{
+ PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
+ CL_PAGE_INVOID(env, page, op,
+ (const struct lu_env *,
+ const struct cl_page_slice *, struct cl_io *), io);
+}
+
+static void cl_page_owner_clear(struct cl_page *page)
+{
+ for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
+ if (page->cp_owner != NULL) {
+ LASSERT(page->cp_owner->ci_owned_nr > 0);
+ page->cp_owner->ci_owned_nr--;
+ page->cp_owner = NULL;
+ page->cp_task = NULL;
+ }
+ }
+}
+
+static void cl_page_owner_set(struct cl_page *page)
+{
+ for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
+ LASSERT(page->cp_owner != NULL);
+ page->cp_owner->ci_owned_nr++;
+ }
+}
+
+void cl_page_disown0(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg)
+{
+ enum cl_page_state state;
+
+ state = pg->cp_state;
+ PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
+ PINVRNT(env, pg, cl_page_invariant(pg));
+ cl_page_owner_clear(pg);
+
+ if (state == CPS_OWNED)
+ cl_page_state_set(env, pg, CPS_CACHED);
+ /*
+ * Completion call-backs are executed in the bottom-up order, so that
+ * uppermost layer (llite), responsible for VFS/VM interaction runs
+ * last and can release locks safely.
+ */
+ CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_disown),
+ (const struct lu_env *,
+ const struct cl_page_slice *, struct cl_io *),
+ io);
+}
+
+/**
+ * returns true, iff page is owned by the given io.
+ */
+int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
+{
+ LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
+ return pg->cp_state == CPS_OWNED && pg->cp_owner == io;
+}
+EXPORT_SYMBOL(cl_page_is_owned);
+
+/**
+ * Try to own a page by IO.
+ *
+ * Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
+ * into cl_page_state::CPS_OWNED state.
+ *
+ * \pre !cl_page_is_owned(pg, io)
+ * \post result == 0 iff cl_page_is_owned(pg, io)
+ *
+ * \retval 0 success
+ *
+ * \retval -ve failure, e.g., page was destroyed (and landed in
+ * cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
+ * or, page was owned by another thread, or in IO.
+ *
+ * \see cl_page_disown()
+ * \see cl_page_operations::cpo_own()
+ * \see cl_page_own_try()
+ * \see cl_page_own
+ */
+static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, int nonblock)
+{
+ int result;
+
+ PINVRNT(env, pg, !cl_page_is_owned(pg, io));
+
+ pg = cl_page_top(pg);
+ io = cl_io_top(io);
+
+ if (pg->cp_state == CPS_FREEING) {
+ result = -ENOENT;
+ } else {
+ result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
+ (const struct lu_env *,
+ const struct cl_page_slice *,
+ struct cl_io *, int),
+ io, nonblock);
+ if (result == 0) {
+ PASSERT(env, pg, pg->cp_owner == NULL);
+ PASSERT(env, pg, pg->cp_req == NULL);
+ pg->cp_owner = io;
+ pg->cp_task = current;
+ cl_page_owner_set(pg);
+ if (pg->cp_state != CPS_FREEING) {
+ cl_page_state_set(env, pg, CPS_OWNED);
+ } else {
+ cl_page_disown0(env, io, pg);
+ result = -ENOENT;
+ }
+ }
+ }
+ PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
+ return result;
+}
+
+/**
+ * Own a page, might be blocked.
+ *
+ * \see cl_page_own0()
+ */
+int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
+{
+ return cl_page_own0(env, io, pg, 0);
+}
+EXPORT_SYMBOL(cl_page_own);
+
+/**
+ * Nonblock version of cl_page_own().
+ *
+ * \see cl_page_own0()
+ */
+int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg)
+{
+ return cl_page_own0(env, io, pg, 1);
+}
+EXPORT_SYMBOL(cl_page_own_try);
+
+
+/**
+ * Assume page ownership.
+ *
+ * Called when page is already locked by the hosting VM.
+ *
+ * \pre !cl_page_is_owned(pg, io)
+ * \post cl_page_is_owned(pg, io)
+ *
+ * \see cl_page_operations::cpo_assume()
+ */
+void cl_page_assume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg)
+{
+ PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
+
+ pg = cl_page_top(pg);
+ io = cl_io_top(io);
+
+ cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
+ PASSERT(env, pg, pg->cp_owner == NULL);
+ pg->cp_owner = io;
+ pg->cp_task = current;
+ cl_page_owner_set(pg);
+ cl_page_state_set(env, pg, CPS_OWNED);
+}
+EXPORT_SYMBOL(cl_page_assume);
+
+/**
+ * Releases page ownership without unlocking the page.
+ *
+ * Moves page into cl_page_state::CPS_CACHED without releasing a lock on the
+ * underlying VM page (as VM is supposed to do this itself).
+ *
+ * \pre cl_page_is_owned(pg, io)
+ * \post !cl_page_is_owned(pg, io)
+ *
+ * \see cl_page_assume()
+ */
+void cl_page_unassume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg)
+{
+ PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ PINVRNT(env, pg, cl_page_invariant(pg));
+
+ pg = cl_page_top(pg);
+ io = cl_io_top(io);
+ cl_page_owner_clear(pg);
+ cl_page_state_set(env, pg, CPS_CACHED);
+ CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_unassume),
+ (const struct lu_env *,
+ const struct cl_page_slice *, struct cl_io *),
+ io);
+}
+EXPORT_SYMBOL(cl_page_unassume);
+
+/**
+ * Releases page ownership.
+ *
+ * Moves page into cl_page_state::CPS_CACHED.
+ *
+ * \pre cl_page_is_owned(pg, io)
+ * \post !cl_page_is_owned(pg, io)
+ *
+ * \see cl_page_own()
+ * \see cl_page_operations::cpo_disown()
+ */
+void cl_page_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg)
+{
+ PINVRNT(env, pg, cl_page_is_owned(pg, io));
+
+ pg = cl_page_top(pg);
+ io = cl_io_top(io);
+ cl_page_disown0(env, io, pg);
+}
+EXPORT_SYMBOL(cl_page_disown);
+
+/**
+ * Called when page is to be removed from the object, e.g., as a result of
+ * truncate.
+ *
+ * Calls cl_page_operations::cpo_discard() top-to-bottom.
+ *
+ * \pre cl_page_is_owned(pg, io)
+ *
+ * \see cl_page_operations::cpo_discard()
+ */
+void cl_page_discard(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg)
+{
+ PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ PINVRNT(env, pg, cl_page_invariant(pg));
+
+ cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_discard));
+}
+EXPORT_SYMBOL(cl_page_discard);
+
+/**
+ * Version of cl_page_delete() that can be called for not fully constructed
+ * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
+ * path. Doesn't check page invariant.
+ */
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
+ int radix)
+{
+ struct cl_page *tmp = pg;
+
+ PASSERT(env, pg, pg == cl_page_top(pg));
+ PASSERT(env, pg, pg->cp_state != CPS_FREEING);
+
+ /*
+ * Severe all ways to obtain new pointers to @pg.
+ */
+ cl_page_owner_clear(pg);
+
+ /*
+ * unexport the page firstly before freeing it so that
+ * the page content is considered to be invalid.
+ * We have to do this because a CPS_FREEING cl_page may
+ * be NOT under the protection of a cl_lock.
+ * Afterwards, if this page is found by other threads, then this
+ * page will be forced to reread.
+ */
+ cl_page_export(env, pg, 0);
+ cl_page_state_set0(env, pg, CPS_FREEING);
+
+ CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
+ (const struct lu_env *, const struct cl_page_slice *));
+
+ if (tmp->cp_type == CPT_CACHEABLE) {
+ if (!radix)
+ /* !radix means that @pg is not yet in the radix tree,
+ * skip removing it.
+ */
+ tmp = pg->cp_child;
+ for (; tmp != NULL; tmp = tmp->cp_child) {
+ void *value;
+ struct cl_object_header *hdr;
+
+ hdr = cl_object_header(tmp->cp_obj);
+ spin_lock(&hdr->coh_page_guard);
+ value = radix_tree_delete(&hdr->coh_tree,
+ tmp->cp_index);
+ PASSERT(env, tmp, value == tmp);
+ PASSERT(env, tmp, hdr->coh_pages > 0);
+ hdr->coh_pages--;
+ spin_unlock(&hdr->coh_page_guard);
+ cl_page_put(env, tmp);
+ }
+ }
+}
+
+/**
+ * Called when a decision is made to throw page out of memory.
+ *
+ * Notifies all layers about page destruction by calling
+ * cl_page_operations::cpo_delete() method top-to-bottom.
+ *
+ * Moves page into cl_page_state::CPS_FREEING state (this is the only place
+ * where transition to this state happens).
+ *
+ * Eliminates all venues through which new references to the page can be
+ * obtained:
+ *
+ * - removes page from the radix trees,
+ *
+ * - breaks linkage from VM page to cl_page.
+ *
+ * Once page reaches cl_page_state::CPS_FREEING, all remaining references will
+ * drain after some time, at which point page will be recycled.
+ *
+ * \pre pg == cl_page_top(pg)
+ * \pre VM page is locked
+ * \post pg->cp_state == CPS_FREEING
+ *
+ * \see cl_page_operations::cpo_delete()
+ */
+void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
+{
+ PINVRNT(env, pg, cl_page_invariant(pg));
+ cl_page_delete0(env, pg, 1);
+}
+EXPORT_SYMBOL(cl_page_delete);
+
+/**
+ * Unmaps page from user virtual memory.
+ *
+ * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
+ * layer responsible for VM interaction has to unmap page from user space
+ * virtual memory.
+ *
+ * \see cl_page_operations::cpo_unmap()
+ */
+int cl_page_unmap(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg)
+{
+ PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ PINVRNT(env, pg, cl_page_invariant(pg));
+
+ return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
+}
+EXPORT_SYMBOL(cl_page_unmap);
+
+/**
+ * Marks page up-to-date.
+ *
+ * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
+ * layer responsible for VM interaction has to mark/clear page as up-to-date
+ * by the \a uptodate argument.
+ *
+ * \see cl_page_operations::cpo_export()
+ */
+void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate)
+{
+ PINVRNT(env, pg, cl_page_invariant(pg));
+ CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_export),
+ (const struct lu_env *,
+ const struct cl_page_slice *, int), uptodate);
+}
+EXPORT_SYMBOL(cl_page_export);
+
+/**
+ * Returns true, iff \a pg is VM locked in a suitable sense by the calling
+ * thread.
+ */
+int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
+{
+ int result;
+ const struct cl_page_slice *slice;
+
+ pg = cl_page_top_trusted((struct cl_page *)pg);
+ slice = container_of(pg->cp_layers.next,
+ const struct cl_page_slice, cpl_linkage);
+ PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
+ /*
+ * Call ->cpo_is_vmlocked() directly instead of going through
+ * CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
+ * cl_page_invariant().
+ */
+ result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
+ PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
+ return result == -EBUSY;
+}
+EXPORT_SYMBOL(cl_page_is_vmlocked);
+
+static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
+{
+ return crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN;
+}
+
+static void cl_page_io_start(const struct lu_env *env,
+ struct cl_page *pg, enum cl_req_type crt)
+{
+ /*
+ * Page is queued for IO, change its state.
+ */
+ cl_page_owner_clear(pg);
+ cl_page_state_set(env, pg, cl_req_type_state(crt));
+}
+
+/**
+ * Prepares page for immediate transfer. cl_page_operations::cpo_prep() is
+ * called top-to-bottom. Every layer either agrees to submit this page (by
+ * returning 0), or requests to omit this page (by returning -EALREADY). Layer
+ * handling interactions with the VM also has to inform VM that page is under
+ * transfer now.
+ */
+int cl_page_prep(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, enum cl_req_type crt)
+{
+ int result;
+
+ PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ PINVRNT(env, pg, cl_page_invariant(pg));
+ PINVRNT(env, pg, crt < CRT_NR);
+
+ /*
+ * XXX this has to be called bottom-to-top, so that llite can set up
+ * PG_writeback without risking other layers deciding to skip this
+ * page.
+ */
+ if (crt >= CRT_NR)
+ return -EINVAL;
+ result = cl_page_invoke(env, io, pg, CL_PAGE_OP(io[crt].cpo_prep));
+ if (result == 0)
+ cl_page_io_start(env, pg, crt);
+
+ KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
+ equi(result == 0,
+ PageWriteback(cl_page_vmpage(env, pg)))));
+ CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
+ return result;
+}
+EXPORT_SYMBOL(cl_page_prep);
+
+/**
+ * Notify layers about transfer completion.
+ *
+ * Invoked by transfer sub-system (which is a part of osc) to notify layers
+ * that a transfer, of which this page is a part of has completed.
+ *
+ * Completion call-backs are executed in the bottom-up order, so that
+ * uppermost layer (llite), responsible for the VFS/VM interaction runs last
+ * and can release locks safely.
+ *
+ * \pre pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
+ * \post pg->cp_state == CPS_CACHED
+ *
+ * \see cl_page_operations::cpo_completion()
+ */
+void cl_page_completion(const struct lu_env *env,
+ struct cl_page *pg, enum cl_req_type crt, int ioret)
+{
+ struct cl_sync_io *anchor = pg->cp_sync_io;
+
+ PASSERT(env, pg, crt < CRT_NR);
+ /* cl_page::cp_req already cleared by the caller (osc_completion()) */
+ PASSERT(env, pg, pg->cp_req == NULL);
+ PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
+
+ CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
+ if (crt == CRT_READ && ioret == 0) {
+ PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
+ pg->cp_flags |= CPF_READ_COMPLETED;
+ }
+
+ cl_page_state_set(env, pg, CPS_CACHED);
+ if (crt >= CRT_NR)
+ return;
+ CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
+ (const struct lu_env *,
+ const struct cl_page_slice *, int), ioret);
+ if (anchor) {
+ LASSERT(cl_page_is_vmlocked(env, pg));
+ LASSERT(pg->cp_sync_io == anchor);
+ pg->cp_sync_io = NULL;
+ }
+ /*
+ * As page->cp_obj is pinned by a reference from page->cp_req, it is
+ * safe to call cl_page_put() without risking object destruction in a
+ * non-blocking context.
+ */
+ cl_page_put(env, pg);
+
+ if (anchor)
+ cl_sync_io_note(anchor, ioret);
+}
+EXPORT_SYMBOL(cl_page_completion);
+
+/**
+ * Notify layers that transfer formation engine decided to yank this page from
+ * the cache and to make it a part of a transfer.
+ *
+ * \pre pg->cp_state == CPS_CACHED
+ * \post pg->cp_state == CPS_PAGEIN || pg->cp_state == CPS_PAGEOUT
+ *
+ * \see cl_page_operations::cpo_make_ready()
+ */
+int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
+ enum cl_req_type crt)
+{
+ int result;
+
+ PINVRNT(env, pg, crt < CRT_NR);
+
+ if (crt >= CRT_NR)
+ return -EINVAL;
+ result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
+ (const struct lu_env *,
+ const struct cl_page_slice *));
+ if (result == 0) {
+ PASSERT(env, pg, pg->cp_state == CPS_CACHED);
+ cl_page_io_start(env, pg, crt);
+ }
+ CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
+ return result;
+}
+EXPORT_SYMBOL(cl_page_make_ready);
+
+/**
+ * Notify layers that high level io decided to place this page into a cache
+ * for future transfer.
+ *
+ * The layer implementing transfer engine (osc) has to register this page in
+ * its queues.
+ *
+ * \pre cl_page_is_owned(pg, io)
+ * \post cl_page_is_owned(pg, io)
+ *
+ * \see cl_page_operations::cpo_cache_add()
+ */
+int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, enum cl_req_type crt)
+{
+ const struct cl_page_slice *scan;
+ int result = 0;
+
+ PINVRNT(env, pg, crt < CRT_NR);
+ PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ PINVRNT(env, pg, cl_page_invariant(pg));
+
+ if (crt >= CRT_NR)
+ return -EINVAL;
+
+ list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
+ if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
+ continue;
+
+ result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
+ if (result != 0)
+ break;
+ }
+ CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
+ return result;
+}
+EXPORT_SYMBOL(cl_page_cache_add);
+
+/**
+ * Called if a pge is being written back by kernel's intention.
+ *
+ * \pre cl_page_is_owned(pg, io)
+ * \post ergo(result == 0, pg->cp_state == CPS_PAGEOUT)
+ *
+ * \see cl_page_operations::cpo_flush()
+ */
+int cl_page_flush(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg)
+{
+ int result;
+
+ PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ PINVRNT(env, pg, cl_page_invariant(pg));
+
+ result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
+
+ CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
+ return result;
+}
+EXPORT_SYMBOL(cl_page_flush);
+
+/**
+ * Checks whether page is protected by any extent lock is at least required
+ * mode.
+ *
+ * \return the same as in cl_page_operations::cpo_is_under_lock() method.
+ * \see cl_page_operations::cpo_is_under_lock()
+ */
+int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ int rc;
+
+ PINVRNT(env, page, cl_page_invariant(page));
+
+ rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
+ (const struct lu_env *,
+ const struct cl_page_slice *, struct cl_io *),
+ io);
+ PASSERT(env, page, rc != 0);
+ return rc;
+}
+EXPORT_SYMBOL(cl_page_is_under_lock);
+
+static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, void *cbdata)
+{
+ cl_page_own(env, io, page);
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ return CLP_GANG_OKAY;
+}
+
+/**
+ * Purges all cached pages belonging to the object \a obj.
+ */
+int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
+{
+ struct cl_thread_info *info;
+ struct cl_object *obj = cl_object_top(clobj);
+ struct cl_io *io;
+ int result;
+
+ info = cl_env_info(env);
+ io = &info->clt_io;
+
+ /*
+ * initialize the io. This is ugly since we never do IO in this
+ * function, we just make cl_page_list functions happy. -jay
+ */
+ io->ci_obj = obj;
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, obj);
+ if (result != 0) {
+ cl_io_fini(env, io);
+ return io->ci_result;
+ }
+
+ do {
+ result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
+ page_prune_cb, NULL);
+ if (result == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (result != CLP_GANG_OKAY);
+
+ cl_io_fini(env, io);
+ return result;
+}
+EXPORT_SYMBOL(cl_pages_prune);
+
+/**
+ * Tells transfer engine that only part of a page is to be transmitted.
+ *
+ * \see cl_page_operations::cpo_clip()
+ */
+void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
+ int from, int to)
+{
+ PINVRNT(env, pg, cl_page_invariant(pg));
+
+ CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
+ CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
+ (const struct lu_env *,
+ const struct cl_page_slice *,int, int),
+ from, to);
+}
+EXPORT_SYMBOL(cl_page_clip);
+
+/**
+ * Prints human readable representation of \a pg to the \a f.
+ */
+void cl_page_header_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_page *pg)
+{
+ (*printer)(env, cookie,
+ "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
+ pg, atomic_read(&pg->cp_ref), pg->cp_obj,
+ pg->cp_index, pg->cp_parent, pg->cp_child,
+ pg->cp_state, pg->cp_error, pg->cp_type,
+ pg->cp_owner, pg->cp_req, pg->cp_flags);
+}
+EXPORT_SYMBOL(cl_page_header_print);
+
+/**
+ * Prints human readable representation of \a pg to the \a f.
+ */
+void cl_page_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_page *pg)
+{
+ struct cl_page *scan;
+
+ for (scan = cl_page_top((struct cl_page *)pg);
+ scan != NULL; scan = scan->cp_child)
+ cl_page_header_print(env, cookie, printer, scan);
+ CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
+ (const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t p), cookie, printer);
+ (*printer)(env, cookie, "end page@%p\n", pg);
+}
+EXPORT_SYMBOL(cl_page_print);
+
+/**
+ * Cancel a page which is still in a transfer.
+ */
+int cl_page_cancel(const struct lu_env *env, struct cl_page *page)
+{
+ return CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_cancel),
+ (const struct lu_env *,
+ const struct cl_page_slice *));
+}
+EXPORT_SYMBOL(cl_page_cancel);
+
+/**
+ * Converts a byte offset within object \a obj into a page index.
+ */
+loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
+{
+ /*
+ * XXX for now.
+ */
+ return (loff_t)idx << PAGE_CACHE_SHIFT;
+}
+EXPORT_SYMBOL(cl_offset);
+
+/**
+ * Converts a page index into a byte offset within object \a obj.
+ */
+pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
+{
+ /*
+ * XXX for now.
+ */
+ return offset >> PAGE_CACHE_SHIFT;
+}
+EXPORT_SYMBOL(cl_index);
+
+int cl_page_size(const struct cl_object *obj)
+{
+ return 1 << PAGE_CACHE_SHIFT;
+}
+EXPORT_SYMBOL(cl_page_size);
+
+/**
+ * Adds page slice to the compound page.
+ *
+ * This is called by cl_object_operations::coo_page_init() methods to add a
+ * per-layer state to the page. New state is added at the end of
+ * cl_page::cp_layers list, that is, it is at the bottom of the stack.
+ *
+ * \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
+ */
+void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
+ struct cl_object *obj,
+ const struct cl_page_operations *ops)
+{
+ list_add_tail(&slice->cpl_linkage, &page->cp_layers);
+ slice->cpl_obj = obj;
+ slice->cpl_ops = ops;
+ slice->cpl_page = page;
+}
+EXPORT_SYMBOL(cl_page_slice_add);
+
+int cl_page_init(void)
+{
+ return 0;
+}
+
+void cl_page_fini(void)
+{
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
new file mode 100644
index 0000000..b1024a6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -0,0 +1,686 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+# include <asm/atomic.h>
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <linux/lnet/lnetctl.h>
+#include <lustre_debug.h>
+#include <lprocfs_status.h>
+#include <lustre/lustre_build_version.h>
+#include <linux/list.h>
+#include <cl_object.h>
+#include "llog_internal.h"
+
+
+struct obd_device *obd_devs[MAX_OBD_DEVICES];
+EXPORT_SYMBOL(obd_devs);
+struct list_head obd_types;
+DEFINE_RWLOCK(obd_dev_lock);
+
+__u64 obd_max_pages = 0;
+__u64 obd_max_alloc = 0;
+DEFINE_SPINLOCK(obd_updatemax_lock);
+
+/* The following are visible and mutable through /proc/sys/lustre/. */
+unsigned int obd_alloc_fail_rate = 0;
+EXPORT_SYMBOL(obd_alloc_fail_rate);
+unsigned int obd_debug_peer_on_timeout;
+EXPORT_SYMBOL(obd_debug_peer_on_timeout);
+unsigned int obd_dump_on_timeout;
+EXPORT_SYMBOL(obd_dump_on_timeout);
+unsigned int obd_dump_on_eviction;
+EXPORT_SYMBOL(obd_dump_on_eviction);
+unsigned int obd_max_dirty_pages = 256;
+EXPORT_SYMBOL(obd_max_dirty_pages);
+atomic_t obd_dirty_pages;
+EXPORT_SYMBOL(obd_dirty_pages);
+unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
+EXPORT_SYMBOL(obd_timeout);
+unsigned int ldlm_timeout = LDLM_TIMEOUT_DEFAULT; /* seconds */
+EXPORT_SYMBOL(ldlm_timeout);
+unsigned int obd_timeout_set;
+EXPORT_SYMBOL(obd_timeout_set);
+unsigned int ldlm_timeout_set;
+EXPORT_SYMBOL(ldlm_timeout_set);
+/* Adaptive timeout defs here instead of ptlrpc module for /proc/sys/ access */
+unsigned int at_min = 0;
+EXPORT_SYMBOL(at_min);
+unsigned int at_max = 600;
+EXPORT_SYMBOL(at_max);
+unsigned int at_history = 600;
+EXPORT_SYMBOL(at_history);
+int at_early_margin = 5;
+EXPORT_SYMBOL(at_early_margin);
+int at_extra = 30;
+EXPORT_SYMBOL(at_extra);
+
+atomic_t obd_dirty_transit_pages;
+EXPORT_SYMBOL(obd_dirty_transit_pages);
+
+char obd_jobid_var[JOBSTATS_JOBID_VAR_MAX_LEN + 1] = JOBSTATS_DISABLE;
+EXPORT_SYMBOL(obd_jobid_var);
+
+/* Get jobid of current process by reading the environment variable
+ * stored in between the "env_start" & "env_end" of task struct.
+ *
+ * TODO:
+ * It's better to cache the jobid for later use if there is any
+ * efficient way, the cl_env code probably could be reused for this
+ * purpose.
+ *
+ * If some job scheduler doesn't store jobid in the "env_start/end",
+ * then an upcall could be issued here to get the jobid by utilizing
+ * the userspace tools/api. Then, the jobid must be cached.
+ */
+int lustre_get_jobid(char *jobid)
+{
+ int jobid_len = JOBSTATS_JOBID_SIZE;
+ int rc = 0;
+
+ memset(jobid, 0, JOBSTATS_JOBID_SIZE);
+ /* Jobstats isn't enabled */
+ if (strcmp(obd_jobid_var, JOBSTATS_DISABLE) == 0)
+ return 0;
+
+ /* Use process name + fsuid as jobid */
+ if (strcmp(obd_jobid_var, JOBSTATS_PROCNAME_UID) == 0) {
+ snprintf(jobid, JOBSTATS_JOBID_SIZE, "%s.%u",
+ current_comm(),
+ from_kuid(&init_user_ns, current_fsuid()));
+ return 0;
+ }
+
+ rc = cfs_get_environ(obd_jobid_var, jobid, &jobid_len);
+ if (rc) {
+ if (rc == -EOVERFLOW) {
+ /* For the PBS_JOBID and LOADL_STEP_ID keys (which are
+ * variable length strings instead of just numbers), it
+ * might make sense to keep the unique parts for JobID,
+ * instead of just returning an error. That means a
+ * larger temp buffer for cfs_get_environ(), then
+ * truncating the string at some separator to fit into
+ * the specified jobid_len. Fix later if needed. */
+ static bool printed;
+ if (unlikely(!printed)) {
+ LCONSOLE_ERROR_MSG(0x16b, "%s value too large "
+ "for JobID buffer (%d)\n",
+ obd_jobid_var, jobid_len);
+ printed = true;
+ }
+ } else {
+ CDEBUG((rc == -ENOENT || rc == -EINVAL ||
+ rc == -EDEADLK) ? D_INFO : D_ERROR,
+ "Get jobid for (%s) failed: rc = %d\n",
+ obd_jobid_var, rc);
+ }
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lustre_get_jobid);
+
+int obd_alloc_fail(const void *ptr, const char *name, const char *type,
+ size_t size, const char *file, int line)
+{
+ if (ptr == NULL ||
+ (cfs_rand() & OBD_ALLOC_FAIL_MASK) < obd_alloc_fail_rate) {
+ CERROR("%s%salloc of %s ("LPU64" bytes) failed at %s:%d\n",
+ ptr ? "force " :"", type, name, (__u64)size, file,
+ line);
+ CERROR(LPU64" total bytes and "LPU64" total pages "
+ "("LPU64" bytes) allocated by Lustre, "
+ "%d total bytes by LNET\n",
+ obd_memory_sum(),
+ obd_pages_sum() << PAGE_CACHE_SHIFT,
+ obd_pages_sum(),
+ atomic_read(&libcfs_kmemory));
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(obd_alloc_fail);
+
+static inline void obd_data2conn(struct lustre_handle *conn,
+ struct obd_ioctl_data *data)
+{
+ memset(conn, 0, sizeof *conn);
+ conn->cookie = data->ioc_cookie;
+}
+
+static inline void obd_conn2data(struct obd_ioctl_data *data,
+ struct lustre_handle *conn)
+{
+ data->ioc_cookie = conn->cookie;
+}
+
+int class_resolve_dev_name(__u32 len, const char *name)
+{
+ int rc;
+ int dev;
+
+ if (!len || !name) {
+ CERROR("No name passed,!\n");
+ GOTO(out, rc = -EINVAL);
+ }
+ if (name[len - 1] != 0) {
+ CERROR("Name not nul terminated!\n");
+ GOTO(out, rc = -EINVAL);
+ }
+
+ CDEBUG(D_IOCTL, "device name %s\n", name);
+ dev = class_name2dev(name);
+ if (dev == -1) {
+ CDEBUG(D_IOCTL, "No device for name %s!\n", name);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ CDEBUG(D_IOCTL, "device name %s, dev %d\n", name, dev);
+ rc = dev;
+
+out:
+ return rc;
+}
+
+int class_handle_ioctl(unsigned int cmd, unsigned long arg)
+{
+ char *buf = NULL;
+ struct obd_ioctl_data *data;
+ struct libcfs_debug_ioctl_data *debug_data;
+ struct obd_device *obd = NULL;
+ int err = 0, len = 0;
+
+ /* only for debugging */
+ if (cmd == LIBCFS_IOC_DEBUG_MASK) {
+ debug_data = (struct libcfs_debug_ioctl_data*)arg;
+ libcfs_subsystem_debug = debug_data->subs;
+ libcfs_debug = debug_data->debug;
+ return 0;
+ }
+
+ CDEBUG(D_IOCTL, "cmd = %x\n", cmd);
+ if (obd_ioctl_getdata(&buf, &len, (void *)arg)) {
+ CERROR("OBD ioctl: data error\n");
+ return -EINVAL;
+ }
+ data = (struct obd_ioctl_data *)buf;
+
+ switch (cmd) {
+ case OBD_IOC_PROCESS_CFG: {
+ struct lustre_cfg *lcfg;
+
+ if (!data->ioc_plen1 || !data->ioc_pbuf1) {
+ CERROR("No config buffer passed!\n");
+ GOTO(out, err = -EINVAL);
+ }
+ OBD_ALLOC(lcfg, data->ioc_plen1);
+ if (lcfg == NULL)
+ GOTO(out, err = -ENOMEM);
+ err = copy_from_user(lcfg, data->ioc_pbuf1,
+ data->ioc_plen1);
+ if (!err)
+ err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1);
+ if (!err)
+ err = class_process_config(lcfg);
+
+ OBD_FREE(lcfg, data->ioc_plen1);
+ GOTO(out, err);
+ }
+
+ case OBD_GET_VERSION:
+ if (!data->ioc_inlbuf1) {
+ CERROR("No buffer passed in ioctl\n");
+ GOTO(out, err = -EINVAL);
+ }
+
+ if (strlen(BUILD_VERSION) + 1 > data->ioc_inllen1) {
+ CERROR("ioctl buffer too small to hold version\n");
+ GOTO(out, err = -EINVAL);
+ }
+
+ memcpy(data->ioc_bulk, BUILD_VERSION,
+ strlen(BUILD_VERSION) + 1);
+
+ err = obd_ioctl_popdata((void *)arg, data, len);
+ if (err)
+ err = -EFAULT;
+ GOTO(out, err);
+
+ case OBD_IOC_NAME2DEV: {
+ /* Resolve a device name. This does not change the
+ * currently selected device.
+ */
+ int dev;
+
+ dev = class_resolve_dev_name(data->ioc_inllen1,
+ data->ioc_inlbuf1);
+ data->ioc_dev = dev;
+ if (dev < 0)
+ GOTO(out, err = -EINVAL);
+
+ err = obd_ioctl_popdata((void *)arg, data, sizeof(*data));
+ if (err)
+ err = -EFAULT;
+ GOTO(out, err);
+ }
+
+ case OBD_IOC_UUID2DEV: {
+ /* Resolve a device uuid. This does not change the
+ * currently selected device.
+ */
+ int dev;
+ struct obd_uuid uuid;
+
+ if (!data->ioc_inllen1 || !data->ioc_inlbuf1) {
+ CERROR("No UUID passed!\n");
+ GOTO(out, err = -EINVAL);
+ }
+ if (data->ioc_inlbuf1[data->ioc_inllen1 - 1] != 0) {
+ CERROR("UUID not NUL terminated!\n");
+ GOTO(out, err = -EINVAL);
+ }
+
+ CDEBUG(D_IOCTL, "device name %s\n", data->ioc_inlbuf1);
+ obd_str2uuid(&uuid, data->ioc_inlbuf1);
+ dev = class_uuid2dev(&uuid);
+ data->ioc_dev = dev;
+ if (dev == -1) {
+ CDEBUG(D_IOCTL, "No device for UUID %s!\n",
+ data->ioc_inlbuf1);
+ GOTO(out, err = -EINVAL);
+ }
+
+ CDEBUG(D_IOCTL, "device name %s, dev %d\n", data->ioc_inlbuf1,
+ dev);
+ err = obd_ioctl_popdata((void *)arg, data, sizeof(*data));
+ if (err)
+ err = -EFAULT;
+ GOTO(out, err);
+ }
+
+ case OBD_IOC_CLOSE_UUID: {
+ CDEBUG(D_IOCTL, "closing all connections to uuid %s (NOOP)\n",
+ data->ioc_inlbuf1);
+ GOTO(out, err = 0);
+ }
+
+ case OBD_IOC_GETDEVICE: {
+ int index = data->ioc_count;
+ char *status, *str;
+
+ if (!data->ioc_inlbuf1) {
+ CERROR("No buffer passed in ioctl\n");
+ GOTO(out, err = -EINVAL);
+ }
+ if (data->ioc_inllen1 < 128) {
+ CERROR("ioctl buffer too small to hold version\n");
+ GOTO(out, err = -EINVAL);
+ }
+
+ obd = class_num2obd(index);
+ if (!obd)
+ GOTO(out, err = -ENOENT);
+
+ if (obd->obd_stopping)
+ status = "ST";
+ else if (obd->obd_set_up)
+ status = "UP";
+ else if (obd->obd_attached)
+ status = "AT";
+ else
+ status = "--";
+ str = (char *)data->ioc_bulk;
+ snprintf(str, len - sizeof(*data), "%3d %s %s %s %s %d",
+ (int)index, status, obd->obd_type->typ_name,
+ obd->obd_name, obd->obd_uuid.uuid,
+ atomic_read(&obd->obd_refcount));
+ err = obd_ioctl_popdata((void *)arg, data, len);
+
+ GOTO(out, err = 0);
+ }
+
+ }
+
+ if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
+ if (data->ioc_inllen4 <= 0 || data->ioc_inlbuf4 == NULL)
+ GOTO(out, err = -EINVAL);
+ if (strnlen(data->ioc_inlbuf4, MAX_OBD_NAME) >= MAX_OBD_NAME)
+ GOTO(out, err = -EINVAL);
+ obd = class_name2obd(data->ioc_inlbuf4);
+ } else if (data->ioc_dev < class_devno_max()) {
+ obd = class_num2obd(data->ioc_dev);
+ } else {
+ CERROR("OBD ioctl: No device\n");
+ GOTO(out, err = -EINVAL);
+ }
+
+ if (obd == NULL) {
+ CERROR("OBD ioctl : No Device %d\n", data->ioc_dev);
+ GOTO(out, err = -EINVAL);
+ }
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+
+ if (!obd->obd_set_up || obd->obd_stopping) {
+ CERROR("OBD ioctl: device not setup %d \n", data->ioc_dev);
+ GOTO(out, err = -EINVAL);
+ }
+
+ switch(cmd) {
+ case OBD_IOC_NO_TRANSNO: {
+ if (!obd->obd_attached) {
+ CERROR("Device %d not attached\n", obd->obd_minor);
+ GOTO(out, err = -ENODEV);
+ }
+ CDEBUG(D_HA, "%s: disabling committed-transno notification\n",
+ obd->obd_name);
+ obd->obd_no_transno = 1;
+ GOTO(out, err = 0);
+ }
+
+ default: {
+ err = obd_iocontrol(cmd, obd->obd_self_export, len, data, NULL);
+ if (err)
+ GOTO(out, err);
+
+ err = obd_ioctl_popdata((void *)arg, data, len);
+ if (err)
+ err = -EFAULT;
+ GOTO(out, err);
+ }
+ }
+
+ out:
+ if (buf)
+ obd_ioctl_freedata(buf, len);
+ return err;
+} /* class_handle_ioctl */
+
+extern struct miscdevice obd_psdev;
+
+#define OBD_INIT_CHECK
+int obd_init_checks(void)
+{
+ __u64 u64val, div64val;
+ char buf[64];
+ int len, ret = 0;
+
+ CDEBUG(D_INFO, "LPU64=%s, LPD64=%s, LPX64=%s\n", LPU64, LPD64, LPX64);
+
+ CDEBUG(D_INFO, "OBD_OBJECT_EOF = "LPX64"\n", (__u64)OBD_OBJECT_EOF);
+
+ u64val = OBD_OBJECT_EOF;
+ CDEBUG(D_INFO, "u64val OBD_OBJECT_EOF = "LPX64"\n", u64val);
+ if (u64val != OBD_OBJECT_EOF) {
+ CERROR("__u64 "LPX64"(%d) != 0xffffffffffffffff\n",
+ u64val, (int)sizeof(u64val));
+ ret = -EINVAL;
+ }
+ len = snprintf(buf, sizeof(buf), LPX64, u64val);
+ if (len != 18) {
+ CWARN("LPX64 wrong length! strlen(%s)=%d != 18\n", buf, len);
+ ret = -EINVAL;
+ }
+
+ div64val = OBD_OBJECT_EOF;
+ CDEBUG(D_INFO, "u64val OBD_OBJECT_EOF = "LPX64"\n", u64val);
+ if (u64val != OBD_OBJECT_EOF) {
+ CERROR("__u64 "LPX64"(%d) != 0xffffffffffffffff\n",
+ u64val, (int)sizeof(u64val));
+ ret = -EOVERFLOW;
+ }
+ if (u64val >> 8 != OBD_OBJECT_EOF >> 8) {
+ CERROR("__u64 "LPX64"(%d) != 0xffffffffffffffff\n",
+ u64val, (int)sizeof(u64val));
+ return -EOVERFLOW;
+ }
+ if (do_div(div64val, 256) != (u64val & 255)) {
+ CERROR("do_div("LPX64",256) != "LPU64"\n", u64val, u64val &255);
+ return -EOVERFLOW;
+ }
+ if (u64val >> 8 != div64val) {
+ CERROR("do_div("LPX64",256) "LPU64" != "LPU64"\n",
+ u64val, div64val, u64val >> 8);
+ return -EOVERFLOW;
+ }
+ len = snprintf(buf, sizeof(buf), LPX64, u64val);
+ if (len != 18) {
+ CWARN("LPX64 wrong length! strlen(%s)=%d != 18\n", buf, len);
+ ret = -EINVAL;
+ }
+ len = snprintf(buf, sizeof(buf), LPU64, u64val);
+ if (len != 20) {
+ CWARN("LPU64 wrong length! strlen(%s)=%d != 20\n", buf, len);
+ ret = -EINVAL;
+ }
+ len = snprintf(buf, sizeof(buf), LPD64, u64val);
+ if (len != 2) {
+ CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
+ ret = -EINVAL;
+ }
+ if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
+ CWARN("mask failed: u64val "LPU64" >= "LPU64"\n", u64val,
+ (__u64)PAGE_CACHE_SIZE);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+extern spinlock_t obd_types_lock;
+extern int class_procfs_init(void);
+extern int class_procfs_clean(void);
+
+static int __init init_obdclass(void)
+{
+ int i, err;
+ int lustre_register_fs(void);
+
+ for (i = CAPA_SITE_CLIENT; i < CAPA_SITE_MAX; i++)
+ INIT_LIST_HEAD(&capa_list[i]);
+
+ LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n");
+
+ spin_lock_init(&obd_types_lock);
+ obd_zombie_impexp_init();
+#ifdef LPROCFS
+ obd_memory = lprocfs_alloc_stats(OBD_STATS_NUM,
+ LPROCFS_STATS_FLAG_NONE |
+ LPROCFS_STATS_FLAG_IRQ_SAFE);
+ if (obd_memory == NULL) {
+ CERROR("kmalloc of 'obd_memory' failed\n");
+ return -ENOMEM;
+ }
+
+ lprocfs_counter_init(obd_memory, OBD_MEMORY_STAT,
+ LPROCFS_CNTR_AVGMINMAX,
+ "memused", "bytes");
+ lprocfs_counter_init(obd_memory, OBD_MEMORY_PAGES_STAT,
+ LPROCFS_CNTR_AVGMINMAX,
+ "pagesused", "pages");
+#endif
+ err = obd_init_checks();
+ if (err == -EOVERFLOW)
+ return err;
+
+ class_init_uuidlist();
+ err = class_handle_init();
+ if (err)
+ return err;
+
+ INIT_LIST_HEAD(&obd_types);
+
+ err = misc_register(&obd_psdev);
+ if (err) {
+ CERROR("cannot register %d err %d\n", OBD_DEV_MINOR, err);
+ return err;
+ }
+
+ /* This struct is already zeroed for us (static global) */
+ for (i = 0; i < class_devno_max(); i++)
+ obd_devs[i] = NULL;
+
+ /* Default the dirty page cache cap to 1/2 of system memory.
+ * For clients with less memory, a larger fraction is needed
+ * for other purposes (mostly for BGL). */
+ if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
+ obd_max_dirty_pages = totalram_pages / 4;
+ else
+ obd_max_dirty_pages = totalram_pages / 2;
+
+ err = obd_init_caches();
+ if (err)
+ return err;
+ err = class_procfs_init();
+ if (err)
+ return err;
+
+ err = lu_global_init();
+ if (err)
+ return err;
+
+ err = cl_global_init();
+ if (err != 0)
+ return err;
+
+
+ err = llog_info_init();
+ if (err)
+ return err;
+
+ err = lustre_register_fs();
+
+ return err;
+}
+
+void obd_update_maxusage(void)
+{
+ __u64 max1, max2;
+
+ max1 = obd_pages_sum();
+ max2 = obd_memory_sum();
+
+ spin_lock(&obd_updatemax_lock);
+ if (max1 > obd_max_pages)
+ obd_max_pages = max1;
+ if (max2 > obd_max_alloc)
+ obd_max_alloc = max2;
+ spin_unlock(&obd_updatemax_lock);
+}
+EXPORT_SYMBOL(obd_update_maxusage);
+
+#ifdef LPROCFS
+__u64 obd_memory_max(void)
+{
+ __u64 ret;
+
+ spin_lock(&obd_updatemax_lock);
+ ret = obd_max_alloc;
+ spin_unlock(&obd_updatemax_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(obd_memory_max);
+
+__u64 obd_pages_max(void)
+{
+ __u64 ret;
+
+ spin_lock(&obd_updatemax_lock);
+ ret = obd_max_pages;
+ spin_unlock(&obd_updatemax_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(obd_pages_max);
+#endif
+
+/* liblustre doesn't call cleanup_obdclass, apparently. we carry on in this
+ * ifdef to the end of the file to cover module and versioning goo.*/
+static void cleanup_obdclass(void)
+{
+ int i;
+ int lustre_unregister_fs(void);
+ __u64 memory_leaked, pages_leaked;
+ __u64 memory_max, pages_max;
+
+ lustre_unregister_fs();
+
+ misc_deregister(&obd_psdev);
+ for (i = 0; i < class_devno_max(); i++) {
+ struct obd_device *obd = class_num2obd(i);
+ if (obd && obd->obd_set_up &&
+ OBT(obd) && OBP(obd, detach)) {
+ /* XXX should this call generic detach otherwise? */
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+ OBP(obd, detach)(obd);
+ }
+ }
+ llog_info_fini();
+ cl_global_fini();
+ lu_global_fini();
+
+ obd_cleanup_caches();
+ obd_sysctl_clean();
+
+ class_procfs_clean();
+
+ class_handle_cleanup();
+ class_exit_uuidlist();
+ obd_zombie_impexp_stop();
+
+ memory_leaked = obd_memory_sum();
+ pages_leaked = obd_pages_sum();
+
+ memory_max = obd_memory_max();
+ pages_max = obd_pages_max();
+
+ lprocfs_free_stats(&obd_memory);
+ CDEBUG((memory_leaked) ? D_ERROR : D_INFO,
+ "obd_memory max: "LPU64", leaked: "LPU64"\n",
+ memory_max, memory_leaked);
+ CDEBUG((pages_leaked) ? D_ERROR : D_INFO,
+ "obd_memory_pages max: "LPU64", leaked: "LPU64"\n",
+ pages_max, pages_leaked);
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Class Driver Build Version: " BUILD_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
+
+module_init(init_obdclass);
+module_exit(cleanup_obdclass);
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
new file mode 100644
index 0000000..15f71bb
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -0,0 +1,124 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/debug.c
+ *
+ * Helper routines for dumping data structs for debugging.
+ */
+
+#define DEBUG_SUBSYSTEM D_OTHER
+
+
+#include <obd_ost.h>
+#include <obd_support.h>
+#include <lustre_debug.h>
+#include <lustre_net.h>
+
+void dump_lniobuf(struct niobuf_local *nb)
+{
+ CDEBUG(D_RPCTRACE,
+ "niobuf_local: file_offset="LPD64", len=%d, page=%p, rc=%d\n",
+ nb->lnb_file_offset, nb->len, nb->page, nb->rc);
+ CDEBUG(D_RPCTRACE, "nb->page: index = %ld\n",
+ nb->page ? page_index(nb->page) : -1);
+}
+EXPORT_SYMBOL(dump_lniobuf);
+
+void dump_lsm(int level, struct lov_stripe_md *lsm)
+{
+ CDEBUG(level, "lsm %p, objid "DOSTID", maxbytes "LPX64", magic 0x%08X,"
+ " stripe_size %u, stripe_count %u, refc: %d,"
+ " layout_gen %u, pool ["LOV_POOLNAMEF"]\n", lsm,
+ POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic,
+ lsm->lsm_stripe_size, lsm->lsm_stripe_count,
+ atomic_read(&lsm->lsm_refc), lsm->lsm_layout_gen,
+ lsm->lsm_pool_name);
+}
+EXPORT_SYMBOL(dump_lsm);
+
+#define LPDS sizeof(__u64)
+int block_debug_setup(void *addr, int len, __u64 off, __u64 id)
+{
+ LASSERT(addr);
+
+ off = cpu_to_le64 (off);
+ id = cpu_to_le64 (id);
+ memcpy(addr, (char *)&off, LPDS);
+ memcpy(addr + LPDS, (char *)&id, LPDS);
+
+ addr += len - LPDS - LPDS;
+ memcpy(addr, (char *)&off, LPDS);
+ memcpy(addr + LPDS, (char *)&id, LPDS);
+
+ return 0;
+}
+EXPORT_SYMBOL(block_debug_setup);
+
+int block_debug_check(char *who, void *addr, int end, __u64 off, __u64 id)
+{
+ __u64 ne_off;
+ int err = 0;
+
+ LASSERT(addr);
+
+ ne_off = le64_to_cpu (off);
+ id = le64_to_cpu (id);
+ if (memcmp(addr, (char *)&ne_off, LPDS)) {
+ CDEBUG(D_ERROR, "%s: id "LPX64" offset "LPU64" off: "LPX64" != "
+ LPX64"\n", who, id, off, *(__u64 *)addr, ne_off);
+ err = -EINVAL;
+ }
+ if (memcmp(addr + LPDS, (char *)&id, LPDS)) {
+ CDEBUG(D_ERROR, "%s: id "LPX64" offset "LPU64" id: "LPX64" != "LPX64"\n",
+ who, id, off, *(__u64 *)(addr + LPDS), id);
+ err = -EINVAL;
+ }
+
+ addr += end - LPDS - LPDS;
+ if (memcmp(addr, (char *)&ne_off, LPDS)) {
+ CDEBUG(D_ERROR, "%s: id "LPX64" offset "LPU64" end off: "LPX64" != "
+ LPX64"\n", who, id, off, *(__u64 *)addr, ne_off);
+ err = -EINVAL;
+ }
+ if (memcmp(addr + LPDS, (char *)&id, LPDS)) {
+ CDEBUG(D_ERROR, "%s: id "LPX64" offset "LPU64" end id: "LPX64" != "
+ LPX64"\n", who, id, off, *(__u64 *)(addr + LPDS), id);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(block_debug_check);
+#undef LPDS
diff --git a/drivers/staging/lustre/lustre/obdclass/dt_object.c b/drivers/staging/lustre/lustre/obdclass/dt_object.c
new file mode 100644
index 0000000..1b164c7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/dt_object.c
@@ -0,0 +1,1049 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/dt_object.c
+ *
+ * Dt Object.
+ * Generic functions from dt_object.h
+ *
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <obd.h>
+#include <dt_object.h>
+#include <linux/list.h>
+/* fid_be_to_cpu() */
+#include <lustre_fid.h>
+
+#include <lustre_quota.h>
+
+/* context key constructor/destructor: dt_global_key_init, dt_global_key_fini */
+LU_KEY_INIT(dt_global, struct dt_thread_info);
+LU_KEY_FINI(dt_global, struct dt_thread_info);
+
+struct lu_context_key dt_key = {
+ .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD | LCT_MG_THREAD | LCT_LOCAL,
+ .lct_init = dt_global_key_init,
+ .lct_fini = dt_global_key_fini
+};
+EXPORT_SYMBOL(dt_key);
+
+/* no lock is necessary to protect the list, because call-backs
+ * are added during system startup. Please refer to "struct dt_device".
+ */
+void dt_txn_callback_add(struct dt_device *dev, struct dt_txn_callback *cb)
+{
+ list_add(&cb->dtc_linkage, &dev->dd_txn_callbacks);
+}
+EXPORT_SYMBOL(dt_txn_callback_add);
+
+void dt_txn_callback_del(struct dt_device *dev, struct dt_txn_callback *cb)
+{
+ list_del_init(&cb->dtc_linkage);
+}
+EXPORT_SYMBOL(dt_txn_callback_del);
+
+int dt_txn_hook_start(const struct lu_env *env,
+ struct dt_device *dev, struct thandle *th)
+{
+ int rc = 0;
+ struct dt_txn_callback *cb;
+
+ if (th->th_local)
+ return 0;
+
+ list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+ if (cb->dtc_txn_start == NULL ||
+ !(cb->dtc_tag & env->le_ctx.lc_tags))
+ continue;
+ rc = cb->dtc_txn_start(env, th, cb->dtc_cookie);
+ if (rc < 0)
+ break;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(dt_txn_hook_start);
+
+int dt_txn_hook_stop(const struct lu_env *env, struct thandle *txn)
+{
+ struct dt_device *dev = txn->th_dev;
+ struct dt_txn_callback *cb;
+ int rc = 0;
+
+ if (txn->th_local)
+ return 0;
+
+ list_for_each_entry(cb, &dev->dd_txn_callbacks, dtc_linkage) {
+ if (cb->dtc_txn_stop == NULL ||
+ !(cb->dtc_tag & env->le_ctx.lc_tags))
+ continue;
+ rc = cb->dtc_txn_stop(env, txn, cb->dtc_cookie);
+ if (rc < 0)
+ break;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(dt_txn_hook_stop);
+
+void dt_txn_hook_commit(struct thandle *txn)
+{
+ struct dt_txn_callback *cb;
+
+ if (txn->th_local)
+ return;
+
+ list_for_each_entry(cb, &txn->th_dev->dd_txn_callbacks,
+ dtc_linkage) {
+ if (cb->dtc_txn_commit)
+ cb->dtc_txn_commit(txn, cb->dtc_cookie);
+ }
+}
+EXPORT_SYMBOL(dt_txn_hook_commit);
+
+int dt_device_init(struct dt_device *dev, struct lu_device_type *t)
+{
+
+ INIT_LIST_HEAD(&dev->dd_txn_callbacks);
+ return lu_device_init(&dev->dd_lu_dev, t);
+}
+EXPORT_SYMBOL(dt_device_init);
+
+void dt_device_fini(struct dt_device *dev)
+{
+ lu_device_fini(&dev->dd_lu_dev);
+}
+EXPORT_SYMBOL(dt_device_fini);
+
+int dt_object_init(struct dt_object *obj,
+ struct lu_object_header *h, struct lu_device *d)
+
+{
+ return lu_object_init(&obj->do_lu, h, d);
+}
+EXPORT_SYMBOL(dt_object_init);
+
+void dt_object_fini(struct dt_object *obj)
+{
+ lu_object_fini(&obj->do_lu);
+}
+EXPORT_SYMBOL(dt_object_fini);
+
+int dt_try_as_dir(const struct lu_env *env, struct dt_object *obj)
+{
+ if (obj->do_index_ops == NULL)
+ obj->do_ops->do_index_try(env, obj, &dt_directory_features);
+ return obj->do_index_ops != NULL;
+}
+EXPORT_SYMBOL(dt_try_as_dir);
+
+enum dt_format_type dt_mode_to_dft(__u32 mode)
+{
+ enum dt_format_type result;
+
+ switch (mode & S_IFMT) {
+ case S_IFDIR:
+ result = DFT_DIR;
+ break;
+ case S_IFREG:
+ result = DFT_REGULAR;
+ break;
+ case S_IFLNK:
+ result = DFT_SYM;
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ result = DFT_NODE;
+ break;
+ default:
+ LBUG();
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL(dt_mode_to_dft);
+
+/**
+ * lookup fid for object named \a name in directory \a dir.
+ */
+
+int dt_lookup_dir(const struct lu_env *env, struct dt_object *dir,
+ const char *name, struct lu_fid *fid)
+{
+ if (dt_try_as_dir(env, dir))
+ return dt_lookup(env, dir, (struct dt_rec *)fid,
+ (const struct dt_key *)name, BYPASS_CAPA);
+ return -ENOTDIR;
+}
+EXPORT_SYMBOL(dt_lookup_dir);
+
+/* this differs from dt_locate by top_dev as parameter
+ * but not one from lu_site */
+struct dt_object *dt_locate_at(const struct lu_env *env,
+ struct dt_device *dev, const struct lu_fid *fid,
+ struct lu_device *top_dev)
+{
+ struct lu_object *lo, *n;
+
+ lo = lu_object_find_at(env, top_dev, fid, NULL);
+ if (IS_ERR(lo))
+ return (void *)lo;
+
+ LASSERT(lo != NULL);
+
+ list_for_each_entry(n, &lo->lo_header->loh_layers, lo_linkage) {
+ if (n->lo_dev == &dev->dd_lu_dev)
+ return container_of0(n, struct dt_object, do_lu);
+ }
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(dt_locate_at);
+
+/**
+ * find a object named \a entry in given \a dfh->dfh_o directory.
+ */
+static int dt_find_entry(const struct lu_env *env, const char *entry, void *data)
+{
+ struct dt_find_hint *dfh = data;
+ struct dt_device *dt = dfh->dfh_dt;
+ struct lu_fid *fid = dfh->dfh_fid;
+ struct dt_object *obj = dfh->dfh_o;
+ int result;
+
+ result = dt_lookup_dir(env, obj, entry, fid);
+ lu_object_put(env, &obj->do_lu);
+ if (result == 0) {
+ obj = dt_locate(env, dt, fid);
+ if (IS_ERR(obj))
+ result = PTR_ERR(obj);
+ }
+ dfh->dfh_o = obj;
+ return result;
+}
+
+/**
+ * Abstract function which parses path name. This function feeds
+ * path component to \a entry_func.
+ */
+int dt_path_parser(const struct lu_env *env,
+ char *path, dt_entry_func_t entry_func,
+ void *data)
+{
+ char *e;
+ int rc = 0;
+
+ while (1) {
+ e = strsep(&path, "/");
+ if (e == NULL)
+ break;
+
+ if (e[0] == 0) {
+ if (!path || path[0] == '\0')
+ break;
+ continue;
+ }
+ rc = entry_func(env, e, data);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+struct dt_object *
+dt_store_resolve(const struct lu_env *env, struct dt_device *dt,
+ const char *path, struct lu_fid *fid)
+{
+ struct dt_thread_info *info = dt_info(env);
+ struct dt_find_hint *dfh = &info->dti_dfh;
+ struct dt_object *obj;
+ char *local = info->dti_buf;
+ int result;
+
+
+ dfh->dfh_dt = dt;
+ dfh->dfh_fid = fid;
+
+ strncpy(local, path, DT_MAX_PATH);
+ local[DT_MAX_PATH - 1] = '\0';
+
+ result = dt->dd_ops->dt_root_get(env, dt, fid);
+ if (result == 0) {
+ obj = dt_locate(env, dt, fid);
+ if (!IS_ERR(obj)) {
+ dfh->dfh_o = obj;
+ result = dt_path_parser(env, local, dt_find_entry, dfh);
+ if (result != 0)
+ obj = ERR_PTR(result);
+ else
+ obj = dfh->dfh_o;
+ }
+ } else {
+ obj = ERR_PTR(result);
+ }
+ return obj;
+}
+EXPORT_SYMBOL(dt_store_resolve);
+
+static struct dt_object *dt_reg_open(const struct lu_env *env,
+ struct dt_device *dt,
+ struct dt_object *p,
+ const char *name,
+ struct lu_fid *fid)
+{
+ struct dt_object *o;
+ int result;
+
+ result = dt_lookup_dir(env, p, name, fid);
+ if (result == 0){
+ o = dt_locate(env, dt, fid);
+ }
+ else
+ o = ERR_PTR(result);
+
+ return o;
+}
+
+/**
+ * Open dt object named \a filename from \a dirname directory.
+ * \param dt dt device
+ * \param fid on success, object fid is stored in *fid
+ */
+struct dt_object *dt_store_open(const struct lu_env *env,
+ struct dt_device *dt,
+ const char *dirname,
+ const char *filename,
+ struct lu_fid *fid)
+{
+ struct dt_object *file;
+ struct dt_object *dir;
+
+ dir = dt_store_resolve(env, dt, dirname, fid);
+ if (!IS_ERR(dir)) {
+ file = dt_reg_open(env, dt, dir,
+ filename, fid);
+ lu_object_put(env, &dir->do_lu);
+ } else {
+ file = dir;
+ }
+ return file;
+}
+EXPORT_SYMBOL(dt_store_open);
+
+struct dt_object *dt_find_or_create(const struct lu_env *env,
+ struct dt_device *dt,
+ const struct lu_fid *fid,
+ struct dt_object_format *dof,
+ struct lu_attr *at)
+{
+ struct dt_object *dto;
+ struct thandle *th;
+ int rc;
+
+ dto = dt_locate(env, dt, fid);
+ if (IS_ERR(dto))
+ return dto;
+
+ LASSERT(dto != NULL);
+ if (dt_object_exists(dto))
+ return dto;
+
+ th = dt_trans_create(env, dt);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
+
+ rc = dt_declare_create(env, dto, at, NULL, dof, th);
+ if (rc)
+ GOTO(trans_stop, rc);
+
+ rc = dt_trans_start_local(env, dt, th);
+ if (rc)
+ GOTO(trans_stop, rc);
+
+ dt_write_lock(env, dto, 0);
+ if (dt_object_exists(dto))
+ GOTO(unlock, rc = 0);
+
+ CDEBUG(D_OTHER, "create new object "DFID"\n", PFID(fid));
+
+ rc = dt_create(env, dto, at, NULL, dof, th);
+ if (rc)
+ GOTO(unlock, rc);
+ LASSERT(dt_object_exists(dto));
+unlock:
+ dt_write_unlock(env, dto);
+trans_stop:
+ dt_trans_stop(env, dt, th);
+out:
+ if (rc) {
+ lu_object_put(env, &dto->do_lu);
+ return ERR_PTR(rc);
+ }
+ return dto;
+}
+EXPORT_SYMBOL(dt_find_or_create);
+
+/* dt class init function. */
+int dt_global_init(void)
+{
+ int result;
+
+ LU_CONTEXT_KEY_INIT(&dt_key);
+ result = lu_context_key_register(&dt_key);
+ return result;
+}
+
+void dt_global_fini(void)
+{
+ lu_context_key_degister(&dt_key);
+}
+
+/**
+ * Generic read helper. May return an error for partial reads.
+ *
+ * \param env lustre environment
+ * \param dt object to be read
+ * \param buf lu_buf to be filled, with buffer pointer and length
+ * \param pos position to start reading, updated as data is read
+ *
+ * \retval real size of data read
+ * \retval -ve errno on failure
+ */
+int dt_read(const struct lu_env *env, struct dt_object *dt,
+ struct lu_buf *buf, loff_t *pos)
+{
+ LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
+ return dt->do_body_ops->dbo_read(env, dt, buf, pos, BYPASS_CAPA);
+}
+EXPORT_SYMBOL(dt_read);
+
+/**
+ * Read structures of fixed size from storage. Unlike dt_read(), using
+ * dt_record_read() will return an error for partial reads.
+ *
+ * \param env lustre environment
+ * \param dt object to be read
+ * \param buf lu_buf to be filled, with buffer pointer and length
+ * \param pos position to start reading, updated as data is read
+ *
+ * \retval 0 on successfully reading full buffer
+ * \retval -EFAULT on short read
+ * \retval -ve errno on failure
+ */
+int dt_record_read(const struct lu_env *env, struct dt_object *dt,
+ struct lu_buf *buf, loff_t *pos)
+{
+ int rc;
+
+ LASSERTF(dt != NULL, "dt is NULL when we want to read record\n");
+
+ rc = dt->do_body_ops->dbo_read(env, dt, buf, pos, BYPASS_CAPA);
+
+ if (rc == buf->lb_len)
+ rc = 0;
+ else if (rc >= 0)
+ rc = -EFAULT;
+ return rc;
+}
+EXPORT_SYMBOL(dt_record_read);
+
+int dt_record_write(const struct lu_env *env, struct dt_object *dt,
+ const struct lu_buf *buf, loff_t *pos, struct thandle *th)
+{
+ int rc;
+
+ LASSERTF(dt != NULL, "dt is NULL when we want to write record\n");
+ LASSERT(th != NULL);
+ LASSERT(dt->do_body_ops);
+ LASSERT(dt->do_body_ops->dbo_write);
+ rc = dt->do_body_ops->dbo_write(env, dt, buf, pos, th, BYPASS_CAPA, 1);
+ if (rc == buf->lb_len)
+ rc = 0;
+ else if (rc >= 0)
+ rc = -EFAULT;
+ return rc;
+}
+EXPORT_SYMBOL(dt_record_write);
+
+int dt_declare_version_set(const struct lu_env *env, struct dt_object *o,
+ struct thandle *th)
+{
+ struct lu_buf vbuf;
+ char *xname = XATTR_NAME_VERSION;
+
+ LASSERT(o);
+ vbuf.lb_buf = NULL;
+ vbuf.lb_len = sizeof(dt_obj_version_t);
+ return dt_declare_xattr_set(env, o, &vbuf, xname, 0, th);
+
+}
+EXPORT_SYMBOL(dt_declare_version_set);
+
+void dt_version_set(const struct lu_env *env, struct dt_object *o,
+ dt_obj_version_t version, struct thandle *th)
+{
+ struct lu_buf vbuf;
+ char *xname = XATTR_NAME_VERSION;
+ int rc;
+
+ LASSERT(o);
+ vbuf.lb_buf = &version;
+ vbuf.lb_len = sizeof(version);
+
+ rc = dt_xattr_set(env, o, &vbuf, xname, 0, th, BYPASS_CAPA);
+ if (rc < 0)
+ CDEBUG(D_INODE, "Can't set version, rc %d\n", rc);
+ return;
+}
+EXPORT_SYMBOL(dt_version_set);
+
+dt_obj_version_t dt_version_get(const struct lu_env *env, struct dt_object *o)
+{
+ struct lu_buf vbuf;
+ char *xname = XATTR_NAME_VERSION;
+ dt_obj_version_t version;
+ int rc;
+
+ LASSERT(o);
+ vbuf.lb_buf = &version;
+ vbuf.lb_len = sizeof(version);
+ rc = dt_xattr_get(env, o, &vbuf, xname, BYPASS_CAPA);
+ if (rc != sizeof(version)) {
+ CDEBUG(D_INODE, "Can't get version, rc %d\n", rc);
+ version = 0;
+ }
+ return version;
+}
+EXPORT_SYMBOL(dt_version_get);
+
+/* list of all supported index types */
+
+/* directories */
+const struct dt_index_features dt_directory_features;
+EXPORT_SYMBOL(dt_directory_features);
+
+/* scrub iterator */
+const struct dt_index_features dt_otable_features;
+EXPORT_SYMBOL(dt_otable_features);
+
+/* lfsck */
+const struct dt_index_features dt_lfsck_features = {
+ .dif_flags = DT_IND_UPDATE,
+ .dif_keysize_min = sizeof(struct lu_fid),
+ .dif_keysize_max = sizeof(struct lu_fid),
+ .dif_recsize_min = sizeof(__u8),
+ .dif_recsize_max = sizeof(__u8),
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_lfsck_features);
+
+/* accounting indexes */
+const struct dt_index_features dt_acct_features = {
+ .dif_flags = DT_IND_UPDATE,
+ .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_recsize_min = sizeof(struct lquota_acct_rec), /* 16 bytes */
+ .dif_recsize_max = sizeof(struct lquota_acct_rec), /* 16 bytes */
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_acct_features);
+
+/* global quota files */
+const struct dt_index_features dt_quota_glb_features = {
+ .dif_flags = DT_IND_UPDATE,
+ /* a different key would have to be used for per-directory quota */
+ .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_recsize_min = sizeof(struct lquota_glb_rec), /* 32 bytes */
+ .dif_recsize_max = sizeof(struct lquota_glb_rec), /* 32 bytes */
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_quota_glb_features);
+
+/* slave quota files */
+const struct dt_index_features dt_quota_slv_features = {
+ .dif_flags = DT_IND_UPDATE,
+ /* a different key would have to be used for per-directory quota */
+ .dif_keysize_min = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_keysize_max = sizeof(__u64), /* 64-bit uid/gid */
+ .dif_recsize_min = sizeof(struct lquota_slv_rec), /* 8 bytes */
+ .dif_recsize_max = sizeof(struct lquota_slv_rec), /* 8 bytes */
+ .dif_ptrsize = 4
+};
+EXPORT_SYMBOL(dt_quota_slv_features);
+
+/* helper function returning what dt_index_features structure should be used
+ * based on the FID sequence. This is used by OBD_IDX_READ RPC */
+static inline const struct dt_index_features *dt_index_feat_select(__u64 seq,
+ __u32 mode)
+{
+ if (seq == FID_SEQ_QUOTA_GLB) {
+ /* global quota index */
+ if (!S_ISREG(mode))
+ /* global quota index should be a regular file */
+ return ERR_PTR(-ENOENT);
+ return &dt_quota_glb_features;
+ } else if (seq == FID_SEQ_QUOTA) {
+ /* quota slave index */
+ if (!S_ISREG(mode))
+ /* slave index should be a regular file */
+ return ERR_PTR(-ENOENT);
+ return &dt_quota_slv_features;
+ } else if (seq >= FID_SEQ_NORMAL) {
+ /* object is part of the namespace, verify that it is a
+ * directory */
+ if (!S_ISDIR(mode))
+ /* sorry, we can only deal with directory */
+ return ERR_PTR(-ENOTDIR);
+ return &dt_directory_features;
+ }
+
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+/*
+ * Fill a lu_idxpage with key/record pairs read for transfer via OBD_IDX_READ
+ * RPC
+ *
+ * \param env - is the environment passed by the caller
+ * \param lp - is a pointer to the lu_page to fill
+ * \param nob - is the maximum number of bytes that should be copied
+ * \param iops - is the index operation vector associated with the index object
+ * \param it - is a pointer to the current iterator
+ * \param attr - is the index attribute to pass to iops->rec()
+ * \param arg - is a pointer to the idx_info structure
+ */
+static int dt_index_page_build(const struct lu_env *env, union lu_page *lp,
+ int nob, const struct dt_it_ops *iops,
+ struct dt_it *it, __u32 attr, void *arg)
+{
+ struct idx_info *ii = (struct idx_info *)arg;
+ struct lu_idxpage *lip = &lp->lp_idx;
+ char *entry;
+ int rc, size;
+
+ /* no support for variable key & record size for now */
+ LASSERT((ii->ii_flags & II_FL_VARKEY) == 0);
+ LASSERT((ii->ii_flags & II_FL_VARREC) == 0);
+
+ /* initialize the header of the new container */
+ memset(lip, 0, LIP_HDR_SIZE);
+ lip->lip_magic = LIP_MAGIC;
+ nob -= LIP_HDR_SIZE;
+
+ /* compute size needed to store a key/record pair */
+ size = ii->ii_recsize + ii->ii_keysize;
+ if ((ii->ii_flags & II_FL_NOHASH) == 0)
+ /* add hash if the client wants it */
+ size += sizeof(__u64);
+
+ entry = lip->lip_entries;
+ do {
+ char *tmp_entry = entry;
+ struct dt_key *key;
+ __u64 hash;
+
+ /* fetch 64-bit hash value */
+ hash = iops->store(env, it);
+ ii->ii_hash_end = hash;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_IDX_READ_BREAK)) {
+ if (lip->lip_nr != 0)
+ GOTO(out, rc = 0);
+ }
+
+ if (nob < size) {
+ if (lip->lip_nr == 0)
+ GOTO(out, rc = -EINVAL);
+ GOTO(out, rc = 0);
+ }
+
+ if ((ii->ii_flags & II_FL_NOHASH) == 0) {
+ /* client wants to the 64-bit hash value associated with
+ * each record */
+ memcpy(tmp_entry, &hash, sizeof(hash));
+ tmp_entry += sizeof(hash);
+ }
+
+ /* then the key value */
+ LASSERT(iops->key_size(env, it) == ii->ii_keysize);
+ key = iops->key(env, it);
+ memcpy(tmp_entry, key, ii->ii_keysize);
+ tmp_entry += ii->ii_keysize;
+
+ /* and finally the record */
+ rc = iops->rec(env, it, (struct dt_rec *)tmp_entry, attr);
+ if (rc != -ESTALE) {
+ if (rc != 0)
+ GOTO(out, rc);
+
+ /* hash/key/record successfully copied! */
+ lip->lip_nr++;
+ if (unlikely(lip->lip_nr == 1 && ii->ii_count == 0))
+ ii->ii_hash_start = hash;
+ entry = tmp_entry + ii->ii_recsize;
+ nob -= size;
+ }
+
+ /* move on to the next record */
+ do {
+ rc = iops->next(env, it);
+ } while (rc == -ESTALE);
+
+ } while (rc == 0);
+
+ GOTO(out, rc);
+out:
+ if (rc >= 0 && lip->lip_nr > 0)
+ /* one more container */
+ ii->ii_count++;
+ if (rc > 0)
+ /* no more entries */
+ ii->ii_hash_end = II_END_OFF;
+ return rc;
+}
+
+/*
+ * Walk index and fill lu_page containers with key/record pairs
+ *
+ * \param env - is the environment passed by the caller
+ * \param obj - is the index object to parse
+ * \param rdpg - is the lu_rdpg descriptor associated with the transfer
+ * \param filler - is the callback function responsible for filling a lu_page
+ * with key/record pairs in the format wanted by the caller
+ * \param arg - is an opaq argument passed to the filler function
+ *
+ * \retval sum (in bytes) of all filled lu_pages
+ * \retval -ve errno on failure
+ */
+int dt_index_walk(const struct lu_env *env, struct dt_object *obj,
+ const struct lu_rdpg *rdpg, dt_index_page_build_t filler,
+ void *arg)
+{
+ struct dt_it *it;
+ const struct dt_it_ops *iops;
+ unsigned int pageidx, nob, nlupgs = 0;
+ int rc;
+
+ LASSERT(rdpg->rp_pages != NULL);
+ LASSERT(obj->do_index_ops != NULL);
+
+ nob = rdpg->rp_count;
+ if (nob <= 0)
+ return -EFAULT;
+
+ /* Iterate through index and fill containers from @rdpg */
+ iops = &obj->do_index_ops->dio_it;
+ LASSERT(iops != NULL);
+ it = iops->init(env, obj, rdpg->rp_attrs, BYPASS_CAPA);
+ if (IS_ERR(it))
+ return PTR_ERR(it);
+
+ rc = iops->load(env, it, rdpg->rp_hash);
+ if (rc == 0) {
+ /*
+ * Iterator didn't find record with exactly the key requested.
+ *
+ * It is currently either
+ *
+ * - positioned above record with key less than
+ * requested---skip it.
+ * - or not positioned at all (is in IAM_IT_SKEWED
+ * state)---position it on the next item.
+ */
+ rc = iops->next(env, it);
+ } else if (rc > 0) {
+ rc = 0;
+ }
+
+ /* Fill containers one after the other. There might be multiple
+ * containers per physical page.
+ *
+ * At this point and across for-loop:
+ * rc == 0 -> ok, proceed.
+ * rc > 0 -> end of index.
+ * rc < 0 -> error. */
+ for (pageidx = 0; rc == 0 && nob > 0; pageidx++) {
+ union lu_page *lp;
+ int i;
+
+ LASSERT(pageidx < rdpg->rp_npages);
+ lp = kmap(rdpg->rp_pages[pageidx]);
+
+ /* fill lu pages */
+ for (i = 0; i < LU_PAGE_COUNT; i++, lp++, nob -= LU_PAGE_SIZE) {
+ rc = filler(env, lp, min_t(int, nob, LU_PAGE_SIZE),
+ iops, it, rdpg->rp_attrs, arg);
+ if (rc < 0)
+ break;
+ /* one more lu_page */
+ nlupgs++;
+ if (rc > 0)
+ /* end of index */
+ break;
+ }
+ kunmap(rdpg->rp_pages[i]);
+ }
+
+ iops->put(env, it);
+ iops->fini(env, it);
+
+ if (rc >= 0)
+ rc = min_t(unsigned int, nlupgs * LU_PAGE_SIZE, rdpg->rp_count);
+
+ return rc;
+}
+EXPORT_SYMBOL(dt_index_walk);
+
+/**
+ * Walk key/record pairs of an index and copy them into 4KB containers to be
+ * transferred over the network. This is the common handler for OBD_IDX_READ
+ * RPC processing.
+ *
+ * \param env - is the environment passed by the caller
+ * \param dev - is the dt_device storing the index
+ * \param ii - is the idx_info structure packed by the client in the
+ * OBD_IDX_READ request
+ * \param rdpg - is the lu_rdpg descriptor
+ *
+ * \retval on success, return sum (in bytes) of all filled containers
+ * \retval appropriate error otherwise.
+ */
+int dt_index_read(const struct lu_env *env, struct dt_device *dev,
+ struct idx_info *ii, const struct lu_rdpg *rdpg)
+{
+ const struct dt_index_features *feat;
+ struct dt_object *obj;
+ int rc;
+
+ /* rp_count shouldn't be null and should be a multiple of the container
+ * size */
+ if (rdpg->rp_count <= 0 && (rdpg->rp_count & (LU_PAGE_SIZE - 1)) != 0)
+ return -EFAULT;
+
+ if (fid_seq(&ii->ii_fid) >= FID_SEQ_NORMAL)
+ /* we don't support directory transfer via OBD_IDX_READ for the
+ * time being */
+ return -EOPNOTSUPP;
+
+ if (!fid_is_quota(&ii->ii_fid))
+ /* block access to all local files except quota files */
+ return -EPERM;
+
+ /* lookup index object subject to the transfer */
+ obj = dt_locate(env, dev, &ii->ii_fid);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+ if (dt_object_exists(obj) == 0)
+ GOTO(out, rc = -ENOENT);
+
+ /* fetch index features associated with index object */
+ feat = dt_index_feat_select(fid_seq(&ii->ii_fid),
+ lu_object_attr(&obj->do_lu));
+ if (IS_ERR(feat))
+ GOTO(out, rc = PTR_ERR(feat));
+
+ /* load index feature if not done already */
+ if (obj->do_index_ops == NULL) {
+ rc = obj->do_ops->do_index_try(env, obj, feat);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ /* fill ii_flags with supported index features */
+ ii->ii_flags &= II_FL_NOHASH;
+
+ ii->ii_keysize = feat->dif_keysize_max;
+ if ((feat->dif_flags & DT_IND_VARKEY) != 0) {
+ /* key size is variable */
+ ii->ii_flags |= II_FL_VARKEY;
+ /* we don't support variable key size for the time being */
+ GOTO(out, rc = -EOPNOTSUPP);
+ }
+
+ ii->ii_recsize = feat->dif_recsize_max;
+ if ((feat->dif_flags & DT_IND_VARREC) != 0) {
+ /* record size is variable */
+ ii->ii_flags |= II_FL_VARREC;
+ /* we don't support variable record size for the time being */
+ GOTO(out, rc = -EOPNOTSUPP);
+ }
+
+ if ((feat->dif_flags & DT_IND_NONUNQ) != 0)
+ /* key isn't necessarily unique */
+ ii->ii_flags |= II_FL_NONUNQ;
+
+ dt_read_lock(env, obj, 0);
+ /* fetch object version before walking the index */
+ ii->ii_version = dt_version_get(env, obj);
+
+ /* walk the index and fill lu_idxpages with key/record pairs */
+ rc = dt_index_walk(env, obj, rdpg, dt_index_page_build ,ii);
+ dt_read_unlock(env, obj);
+
+ if (rc == 0) {
+ /* index is empty */
+ LASSERT(ii->ii_count == 0);
+ ii->ii_hash_end = II_END_OFF;
+ }
+
+ GOTO(out, rc);
+out:
+ lu_object_put(env, &obj->do_lu);
+ return rc;
+}
+EXPORT_SYMBOL(dt_index_read);
+
+#ifdef LPROCFS
+
+int lprocfs_dt_rd_blksize(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct dt_device *dt = data;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0) {
+ *eof = 1;
+ rc = snprintf(page, count, "%u\n",
+ (unsigned) osfs.os_bsize);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_rd_blksize);
+
+int lprocfs_dt_rd_kbytestotal(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct dt_device *dt = data;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_blocks;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ *eof = 1;
+ rc = snprintf(page, count, LPU64"\n", result);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_rd_kbytestotal);
+
+int lprocfs_dt_rd_kbytesfree(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct dt_device *dt = data;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_bfree;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ *eof = 1;
+ rc = snprintf(page, count, LPU64"\n", result);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_rd_kbytesfree);
+
+int lprocfs_dt_rd_kbytesavail(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct dt_device *dt = data;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_bavail;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ *eof = 1;
+ rc = snprintf(page, count, LPU64"\n", result);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_rd_kbytesavail);
+
+int lprocfs_dt_rd_filestotal(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct dt_device *dt = data;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0) {
+ *eof = 1;
+ rc = snprintf(page, count, LPU64"\n", osfs.os_files);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_rd_filestotal);
+
+int lprocfs_dt_rd_filesfree(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct dt_device *dt = data;
+ struct obd_statfs osfs;
+
+ int rc = dt_statfs(NULL, dt, &osfs);
+ if (rc == 0) {
+ *eof = 1;
+ rc = snprintf(page, count, LPU64"\n", osfs.os_ffree);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_dt_rd_filesfree);
+
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
new file mode 100644
index 0000000..68fe71c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -0,0 +1,1826 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/genops.c
+ *
+ * These are the only exported functions, they provide some generic
+ * infrastructure for managing object devices
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+#include <obd_ost.h>
+#include <obd_class.h>
+#include <lprocfs_status.h>
+
+extern struct list_head obd_types;
+spinlock_t obd_types_lock;
+
+struct kmem_cache *obd_device_cachep;
+struct kmem_cache *obdo_cachep;
+EXPORT_SYMBOL(obdo_cachep);
+struct kmem_cache *import_cachep;
+
+struct list_head obd_zombie_imports;
+struct list_head obd_zombie_exports;
+spinlock_t obd_zombie_impexp_lock;
+static void obd_zombie_impexp_notify(void);
+static void obd_zombie_export_add(struct obd_export *exp);
+static void obd_zombie_import_add(struct obd_import *imp);
+static void print_export_data(struct obd_export *exp,
+ const char *status, int locks);
+
+int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
+EXPORT_SYMBOL(ptlrpc_put_connection_superhack);
+
+/*
+ * support functions: we could use inter-module communication, but this
+ * is more portable to other OS's
+ */
+static struct obd_device *obd_device_alloc(void)
+{
+ struct obd_device *obd;
+
+ OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, __GFP_IO);
+ if (obd != NULL) {
+ obd->obd_magic = OBD_DEVICE_MAGIC;
+ }
+ return obd;
+}
+
+static void obd_device_free(struct obd_device *obd)
+{
+ LASSERT(obd != NULL);
+ LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n",
+ obd, obd->obd_magic, OBD_DEVICE_MAGIC);
+ if (obd->obd_namespace != NULL) {
+ CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n",
+ obd, obd->obd_namespace, obd->obd_force);
+ LBUG();
+ }
+ lu_ref_fini(&obd->obd_reference);
+ OBD_SLAB_FREE_PTR(obd, obd_device_cachep);
+}
+
+struct obd_type *class_search_type(const char *name)
+{
+ struct list_head *tmp;
+ struct obd_type *type;
+
+ spin_lock(&obd_types_lock);
+ list_for_each(tmp, &obd_types) {
+ type = list_entry(tmp, struct obd_type, typ_chain);
+ if (strcmp(type->typ_name, name) == 0) {
+ spin_unlock(&obd_types_lock);
+ return type;
+ }
+ }
+ spin_unlock(&obd_types_lock);
+ return NULL;
+}
+EXPORT_SYMBOL(class_search_type);
+
+struct obd_type *class_get_type(const char *name)
+{
+ struct obd_type *type = class_search_type(name);
+
+ if (!type) {
+ const char *modname = name;
+
+ if (strcmp(modname, "obdfilter") == 0)
+ modname = "ofd";
+
+ if (strcmp(modname, LUSTRE_LWP_NAME) == 0)
+ modname = LUSTRE_OSP_NAME;
+
+ if (!strncmp(modname, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME)))
+ modname = LUSTRE_MDT_NAME;
+
+ if (!request_module("%s", modname)) {
+ CDEBUG(D_INFO, "Loaded module '%s'\n", modname);
+ type = class_search_type(name);
+ } else {
+ LCONSOLE_ERROR_MSG(0x158, "Can't load module '%s'\n",
+ modname);
+ }
+ }
+ if (type) {
+ spin_lock(&type->obd_type_lock);
+ type->typ_refcnt++;
+ try_module_get(type->typ_dt_ops->o_owner);
+ spin_unlock(&type->obd_type_lock);
+ }
+ return type;
+}
+EXPORT_SYMBOL(class_get_type);
+
+void class_put_type(struct obd_type *type)
+{
+ LASSERT(type);
+ spin_lock(&type->obd_type_lock);
+ type->typ_refcnt--;
+ module_put(type->typ_dt_ops->o_owner);
+ spin_unlock(&type->obd_type_lock);
+}
+EXPORT_SYMBOL(class_put_type);
+
+#define CLASS_MAX_NAME 1024
+
+int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
+ struct lprocfs_vars *vars, const char *name,
+ struct lu_device_type *ldt)
+{
+ struct obd_type *type;
+ int rc = 0;
+
+ /* sanity check */
+ LASSERT(strnlen(name, CLASS_MAX_NAME) < CLASS_MAX_NAME);
+
+ if (class_search_type(name)) {
+ CDEBUG(D_IOCTL, "Type %s already registered\n", name);
+ return -EEXIST;
+ }
+
+ rc = -ENOMEM;
+ OBD_ALLOC(type, sizeof(*type));
+ if (type == NULL)
+ return rc;
+
+ OBD_ALLOC_PTR(type->typ_dt_ops);
+ OBD_ALLOC_PTR(type->typ_md_ops);
+ OBD_ALLOC(type->typ_name, strlen(name) + 1);
+
+ if (type->typ_dt_ops == NULL ||
+ type->typ_md_ops == NULL ||
+ type->typ_name == NULL)
+ GOTO (failed, rc);
+
+ *(type->typ_dt_ops) = *dt_ops;
+ /* md_ops is optional */
+ if (md_ops)
+ *(type->typ_md_ops) = *md_ops;
+ strcpy(type->typ_name, name);
+ spin_lock_init(&type->obd_type_lock);
+
+#ifdef LPROCFS
+ type->typ_procroot = lprocfs_register(type->typ_name, proc_lustre_root,
+ vars, type);
+ if (IS_ERR(type->typ_procroot)) {
+ rc = PTR_ERR(type->typ_procroot);
+ type->typ_procroot = NULL;
+ GOTO (failed, rc);
+ }
+#endif
+ if (ldt != NULL) {
+ type->typ_lu = ldt;
+ rc = lu_device_type_init(ldt);
+ if (rc != 0)
+ GOTO (failed, rc);
+ }
+
+ spin_lock(&obd_types_lock);
+ list_add(&type->typ_chain, &obd_types);
+ spin_unlock(&obd_types_lock);
+
+ return 0;
+
+ failed:
+ if (type->typ_name != NULL)
+ OBD_FREE(type->typ_name, strlen(name) + 1);
+ if (type->typ_md_ops != NULL)
+ OBD_FREE_PTR(type->typ_md_ops);
+ if (type->typ_dt_ops != NULL)
+ OBD_FREE_PTR(type->typ_dt_ops);
+ OBD_FREE(type, sizeof(*type));
+ return rc;
+}
+EXPORT_SYMBOL(class_register_type);
+
+int class_unregister_type(const char *name)
+{
+ struct obd_type *type = class_search_type(name);
+
+ if (!type) {
+ CERROR("unknown obd type\n");
+ return -EINVAL;
+ }
+
+ if (type->typ_refcnt) {
+ CERROR("type %s has refcount (%d)\n", name, type->typ_refcnt);
+ /* This is a bad situation, let's make the best of it */
+ /* Remove ops, but leave the name for debugging */
+ OBD_FREE_PTR(type->typ_dt_ops);
+ OBD_FREE_PTR(type->typ_md_ops);
+ return -EBUSY;
+ }
+
+ if (type->typ_procroot) {
+ lprocfs_remove(&type->typ_procroot);
+ }
+
+ if (type->typ_lu)
+ lu_device_type_fini(type->typ_lu);
+
+ spin_lock(&obd_types_lock);
+ list_del(&type->typ_chain);
+ spin_unlock(&obd_types_lock);
+ OBD_FREE(type->typ_name, strlen(name) + 1);
+ if (type->typ_dt_ops != NULL)
+ OBD_FREE_PTR(type->typ_dt_ops);
+ if (type->typ_md_ops != NULL)
+ OBD_FREE_PTR(type->typ_md_ops);
+ OBD_FREE(type, sizeof(*type));
+ return 0;
+} /* class_unregister_type */
+EXPORT_SYMBOL(class_unregister_type);
+
+/**
+ * Create a new obd device.
+ *
+ * Find an empty slot in ::obd_devs[], create a new obd device in it.
+ *
+ * \param[in] type_name obd device type string.
+ * \param[in] name obd device name.
+ *
+ * \retval NULL if create fails, otherwise return the obd device
+ * pointer created.
+ */
+struct obd_device *class_newdev(const char *type_name, const char *name)
+{
+ struct obd_device *result = NULL;
+ struct obd_device *newdev;
+ struct obd_type *type = NULL;
+ int i;
+ int new_obd_minor = 0;
+
+ if (strlen(name) >= MAX_OBD_NAME) {
+ CERROR("name/uuid must be < %u bytes long\n", MAX_OBD_NAME);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = class_get_type(type_name);
+ if (type == NULL){
+ CERROR("OBD: unknown type: %s\n", type_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ newdev = obd_device_alloc();
+ if (newdev == NULL)
+ GOTO(out_type, result = ERR_PTR(-ENOMEM));
+
+ LASSERT(newdev->obd_magic == OBD_DEVICE_MAGIC);
+
+ write_lock(&obd_dev_lock);
+ for (i = 0; i < class_devno_max(); i++) {
+ struct obd_device *obd = class_num2obd(i);
+
+ if (obd && (strcmp(name, obd->obd_name) == 0)) {
+ CERROR("Device %s already exists at %d, won't add\n",
+ name, i);
+ if (result) {
+ LASSERTF(result->obd_magic == OBD_DEVICE_MAGIC,
+ "%p obd_magic %08x != %08x\n", result,
+ result->obd_magic, OBD_DEVICE_MAGIC);
+ LASSERTF(result->obd_minor == new_obd_minor,
+ "%p obd_minor %d != %d\n", result,
+ result->obd_minor, new_obd_minor);
+
+ obd_devs[result->obd_minor] = NULL;
+ result->obd_name[0]='\0';
+ }
+ result = ERR_PTR(-EEXIST);
+ break;
+ }
+ if (!result && !obd) {
+ result = newdev;
+ result->obd_minor = i;
+ new_obd_minor = i;
+ result->obd_type = type;
+ strncpy(result->obd_name, name,
+ sizeof(result->obd_name) - 1);
+ obd_devs[i] = result;
+ }
+ }
+ write_unlock(&obd_dev_lock);
+
+ if (result == NULL && i >= class_devno_max()) {
+ CERROR("all %u OBD devices used, increase MAX_OBD_DEVICES\n",
+ class_devno_max());
+ GOTO(out, result = ERR_PTR(-EOVERFLOW));
+ }
+
+ if (IS_ERR(result))
+ GOTO(out, result);
+
+ CDEBUG(D_IOCTL, "Adding new device %s (%p)\n",
+ result->obd_name, result);
+
+ return result;
+out:
+ obd_device_free(newdev);
+out_type:
+ class_put_type(type);
+ return result;
+}
+
+void class_release_dev(struct obd_device *obd)
+{
+ struct obd_type *obd_type = obd->obd_type;
+
+ LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "%p obd_magic %08x != %08x\n",
+ obd, obd->obd_magic, OBD_DEVICE_MAGIC);
+ LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n",
+ obd, obd->obd_minor, obd_devs[obd->obd_minor]);
+ LASSERT(obd_type != NULL);
+
+ CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n",
+ obd->obd_name, obd->obd_minor, obd->obd_type->typ_name);
+
+ write_lock(&obd_dev_lock);
+ obd_devs[obd->obd_minor] = NULL;
+ write_unlock(&obd_dev_lock);
+ obd_device_free(obd);
+
+ class_put_type(obd_type);
+}
+
+int class_name2dev(const char *name)
+{
+ int i;
+
+ if (!name)
+ return -1;
+
+ read_lock(&obd_dev_lock);
+ for (i = 0; i < class_devno_max(); i++) {
+ struct obd_device *obd = class_num2obd(i);
+
+ if (obd && strcmp(name, obd->obd_name) == 0) {
+ /* Make sure we finished attaching before we give
+ out any references */
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+ if (obd->obd_attached) {
+ read_unlock(&obd_dev_lock);
+ return i;
+ }
+ break;
+ }
+ }
+ read_unlock(&obd_dev_lock);
+
+ return -1;
+}
+EXPORT_SYMBOL(class_name2dev);
+
+struct obd_device *class_name2obd(const char *name)
+{
+ int dev = class_name2dev(name);
+
+ if (dev < 0 || dev > class_devno_max())
+ return NULL;
+ return class_num2obd(dev);
+}
+EXPORT_SYMBOL(class_name2obd);
+
+int class_uuid2dev(struct obd_uuid *uuid)
+{
+ int i;
+
+ read_lock(&obd_dev_lock);
+ for (i = 0; i < class_devno_max(); i++) {
+ struct obd_device *obd = class_num2obd(i);
+
+ if (obd && obd_uuid_equals(uuid, &obd->obd_uuid)) {
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+ read_unlock(&obd_dev_lock);
+ return i;
+ }
+ }
+ read_unlock(&obd_dev_lock);
+
+ return -1;
+}
+EXPORT_SYMBOL(class_uuid2dev);
+
+struct obd_device *class_uuid2obd(struct obd_uuid *uuid)
+{
+ int dev = class_uuid2dev(uuid);
+ if (dev < 0)
+ return NULL;
+ return class_num2obd(dev);
+}
+EXPORT_SYMBOL(class_uuid2obd);
+
+/**
+ * Get obd device from ::obd_devs[]
+ *
+ * \param num [in] array index
+ *
+ * \retval NULL if ::obd_devs[\a num] does not contains an obd device
+ * otherwise return the obd device there.
+ */
+struct obd_device *class_num2obd(int num)
+{
+ struct obd_device *obd = NULL;
+
+ if (num < class_devno_max()) {
+ obd = obd_devs[num];
+ if (obd == NULL)
+ return NULL;
+
+ LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
+ "%p obd_magic %08x != %08x\n",
+ obd, obd->obd_magic, OBD_DEVICE_MAGIC);
+ LASSERTF(obd->obd_minor == num,
+ "%p obd_minor %0d != %0d\n",
+ obd, obd->obd_minor, num);
+ }
+
+ return obd;
+}
+EXPORT_SYMBOL(class_num2obd);
+
+/**
+ * Get obd devices count. Device in any
+ * state are counted
+ * \retval obd device count
+ */
+int get_devices_count(void)
+{
+ int index, max_index = class_devno_max(), dev_count = 0;
+
+ read_lock(&obd_dev_lock);
+ for (index = 0; index <= max_index; index++) {
+ struct obd_device *obd = class_num2obd(index);
+ if (obd != NULL)
+ dev_count++;
+ }
+ read_unlock(&obd_dev_lock);
+
+ return dev_count;
+}
+EXPORT_SYMBOL(get_devices_count);
+
+void class_obd_list(void)
+{
+ char *status;
+ int i;
+
+ read_lock(&obd_dev_lock);
+ for (i = 0; i < class_devno_max(); i++) {
+ struct obd_device *obd = class_num2obd(i);
+
+ if (obd == NULL)
+ continue;
+ if (obd->obd_stopping)
+ status = "ST";
+ else if (obd->obd_set_up)
+ status = "UP";
+ else if (obd->obd_attached)
+ status = "AT";
+ else
+ status = "--";
+ LCONSOLE(D_CONFIG, "%3d %s %s %s %s %d\n",
+ i, status, obd->obd_type->typ_name,
+ obd->obd_name, obd->obd_uuid.uuid,
+ atomic_read(&obd->obd_refcount));
+ }
+ read_unlock(&obd_dev_lock);
+ return;
+}
+
+/* Search for a client OBD connected to tgt_uuid. If grp_uuid is
+ specified, then only the client with that uuid is returned,
+ otherwise any client connected to the tgt is returned. */
+struct obd_device * class_find_client_obd(struct obd_uuid *tgt_uuid,
+ const char * typ_name,
+ struct obd_uuid *grp_uuid)
+{
+ int i;
+
+ read_lock(&obd_dev_lock);
+ for (i = 0; i < class_devno_max(); i++) {
+ struct obd_device *obd = class_num2obd(i);
+
+ if (obd == NULL)
+ continue;
+ if ((strncmp(obd->obd_type->typ_name, typ_name,
+ strlen(typ_name)) == 0)) {
+ if (obd_uuid_equals(tgt_uuid,
+ &obd->u.cli.cl_target_uuid) &&
+ ((grp_uuid)? obd_uuid_equals(grp_uuid,
+ &obd->obd_uuid) : 1)) {
+ read_unlock(&obd_dev_lock);
+ return obd;
+ }
+ }
+ }
+ read_unlock(&obd_dev_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL(class_find_client_obd);
+
+/* Iterate the obd_device list looking devices have grp_uuid. Start
+ searching at *next, and if a device is found, the next index to look
+ at is saved in *next. If next is NULL, then the first matching device
+ will always be returned. */
+struct obd_device * class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
+{
+ int i;
+
+ if (next == NULL)
+ i = 0;
+ else if (*next >= 0 && *next < class_devno_max())
+ i = *next;
+ else
+ return NULL;
+
+ read_lock(&obd_dev_lock);
+ for (; i < class_devno_max(); i++) {
+ struct obd_device *obd = class_num2obd(i);
+
+ if (obd == NULL)
+ continue;
+ if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) {
+ if (next != NULL)
+ *next = i+1;
+ read_unlock(&obd_dev_lock);
+ return obd;
+ }
+ }
+ read_unlock(&obd_dev_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL(class_devices_in_group);
+
+/**
+ * to notify sptlrpc log for \a fsname has changed, let every relevant OBD
+ * adjust sptlrpc settings accordingly.
+ */
+int class_notify_sptlrpc_conf(const char *fsname, int namelen)
+{
+ struct obd_device *obd;
+ const char *type;
+ int i, rc = 0, rc2;
+
+ LASSERT(namelen > 0);
+
+ read_lock(&obd_dev_lock);
+ for (i = 0; i < class_devno_max(); i++) {
+ obd = class_num2obd(i);
+
+ if (obd == NULL || obd->obd_set_up == 0 || obd->obd_stopping)
+ continue;
+
+ /* only notify mdc, osc, mdt, ost */
+ type = obd->obd_type->typ_name;
+ if (strcmp(type, LUSTRE_MDC_NAME) != 0 &&
+ strcmp(type, LUSTRE_OSC_NAME) != 0 &&
+ strcmp(type, LUSTRE_MDT_NAME) != 0 &&
+ strcmp(type, LUSTRE_OST_NAME) != 0)
+ continue;
+
+ if (strncmp(obd->obd_name, fsname, namelen))
+ continue;
+
+ class_incref(obd, __FUNCTION__, obd);
+ read_unlock(&obd_dev_lock);
+ rc2 = obd_set_info_async(NULL, obd->obd_self_export,
+ sizeof(KEY_SPTLRPC_CONF),
+ KEY_SPTLRPC_CONF, 0, NULL, NULL);
+ rc = rc ? rc : rc2;
+ class_decref(obd, __FUNCTION__, obd);
+ read_lock(&obd_dev_lock);
+ }
+ read_unlock(&obd_dev_lock);
+ return rc;
+}
+EXPORT_SYMBOL(class_notify_sptlrpc_conf);
+
+void obd_cleanup_caches(void)
+{
+ if (obd_device_cachep) {
+ kmem_cache_destroy(obd_device_cachep);
+ obd_device_cachep = NULL;
+ }
+ if (obdo_cachep) {
+ kmem_cache_destroy(obdo_cachep);
+ obdo_cachep = NULL;
+ }
+ if (import_cachep) {
+ kmem_cache_destroy(import_cachep);
+ import_cachep = NULL;
+ }
+ if (capa_cachep) {
+ kmem_cache_destroy(capa_cachep);
+ capa_cachep = NULL;
+ }
+}
+
+int obd_init_caches(void)
+{
+ LASSERT(obd_device_cachep == NULL);
+ obd_device_cachep = kmem_cache_create("ll_obd_dev_cache",
+ sizeof(struct obd_device),
+ 0, 0, NULL);
+ if (!obd_device_cachep)
+ GOTO(out, -ENOMEM);
+
+ LASSERT(obdo_cachep == NULL);
+ obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo),
+ 0, 0, NULL);
+ if (!obdo_cachep)
+ GOTO(out, -ENOMEM);
+
+ LASSERT(import_cachep == NULL);
+ import_cachep = kmem_cache_create("ll_import_cache",
+ sizeof(struct obd_import),
+ 0, 0, NULL);
+ if (!import_cachep)
+ GOTO(out, -ENOMEM);
+
+ LASSERT(capa_cachep == NULL);
+ capa_cachep = kmem_cache_create("capa_cache",
+ sizeof(struct obd_capa), 0, 0, NULL);
+ if (!capa_cachep)
+ GOTO(out, -ENOMEM);
+
+ return 0;
+ out:
+ obd_cleanup_caches();
+ return -ENOMEM;
+
+}
+
+/* map connection to client */
+struct obd_export *class_conn2export(struct lustre_handle *conn)
+{
+ struct obd_export *export;
+
+ if (!conn) {
+ CDEBUG(D_CACHE, "looking for null handle\n");
+ return NULL;
+ }
+
+ if (conn->cookie == -1) { /* this means assign a new connection */
+ CDEBUG(D_CACHE, "want a new connection\n");
+ return NULL;
+ }
+
+ CDEBUG(D_INFO, "looking for export cookie "LPX64"\n", conn->cookie);
+ export = class_handle2object(conn->cookie);
+ return export;
+}
+EXPORT_SYMBOL(class_conn2export);
+
+struct obd_device *class_exp2obd(struct obd_export *exp)
+{
+ if (exp)
+ return exp->exp_obd;
+ return NULL;
+}
+EXPORT_SYMBOL(class_exp2obd);
+
+struct obd_device *class_conn2obd(struct lustre_handle *conn)
+{
+ struct obd_export *export;
+ export = class_conn2export(conn);
+ if (export) {
+ struct obd_device *obd = export->exp_obd;
+ class_export_put(export);
+ return obd;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(class_conn2obd);
+
+struct obd_import *class_exp2cliimp(struct obd_export *exp)
+{
+ struct obd_device *obd = exp->exp_obd;
+ if (obd == NULL)
+ return NULL;
+ return obd->u.cli.cl_import;
+}
+EXPORT_SYMBOL(class_exp2cliimp);
+
+struct obd_import *class_conn2cliimp(struct lustre_handle *conn)
+{
+ struct obd_device *obd = class_conn2obd(conn);
+ if (obd == NULL)
+ return NULL;
+ return obd->u.cli.cl_import;
+}
+EXPORT_SYMBOL(class_conn2cliimp);
+
+/* Export management functions */
+static void class_export_destroy(struct obd_export *exp)
+{
+ struct obd_device *obd = exp->exp_obd;
+
+ LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
+ LASSERT(obd != NULL);
+
+ CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
+ exp->exp_client_uuid.uuid, obd->obd_name);
+
+ /* "Local" exports (lctl, LOV->{mdc,osc}) have no connection. */
+ if (exp->exp_connection)
+ ptlrpc_put_connection_superhack(exp->exp_connection);
+
+ LASSERT(list_empty(&exp->exp_outstanding_replies));
+ LASSERT(list_empty(&exp->exp_uncommitted_replies));
+ LASSERT(list_empty(&exp->exp_req_replay_queue));
+ LASSERT(list_empty(&exp->exp_hp_rpcs));
+ obd_destroy_export(exp);
+ class_decref(obd, "export", exp);
+
+ OBD_FREE_RCU(exp, sizeof(*exp), &exp->exp_handle);
+}
+
+static void export_handle_addref(void *export)
+{
+ class_export_get(export);
+}
+
+static struct portals_handle_ops export_handle_ops = {
+ .hop_addref = export_handle_addref,
+ .hop_free = NULL,
+};
+
+struct obd_export *class_export_get(struct obd_export *exp)
+{
+ atomic_inc(&exp->exp_refcount);
+ CDEBUG(D_INFO, "GETting export %p : new refcount %d\n", exp,
+ atomic_read(&exp->exp_refcount));
+ return exp;
+}
+EXPORT_SYMBOL(class_export_get);
+
+void class_export_put(struct obd_export *exp)
+{
+ LASSERT(exp != NULL);
+ LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON);
+ CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
+ atomic_read(&exp->exp_refcount) - 1);
+
+ if (atomic_dec_and_test(&exp->exp_refcount)) {
+ LASSERT(!list_empty(&exp->exp_obd_chain));
+ CDEBUG(D_IOCTL, "final put %p/%s\n",
+ exp, exp->exp_client_uuid.uuid);
+
+ /* release nid stat refererence */
+ lprocfs_exp_cleanup(exp);
+
+ obd_zombie_export_add(exp);
+ }
+}
+EXPORT_SYMBOL(class_export_put);
+
+/* Creates a new export, adds it to the hash table, and returns a
+ * pointer to it. The refcount is 2: one for the hash reference, and
+ * one for the pointer returned by this function. */
+struct obd_export *class_new_export(struct obd_device *obd,
+ struct obd_uuid *cluuid)
+{
+ struct obd_export *export;
+ cfs_hash_t *hash = NULL;
+ int rc = 0;
+
+ OBD_ALLOC_PTR(export);
+ if (!export)
+ return ERR_PTR(-ENOMEM);
+
+ export->exp_conn_cnt = 0;
+ export->exp_lock_hash = NULL;
+ export->exp_flock_hash = NULL;
+ atomic_set(&export->exp_refcount, 2);
+ atomic_set(&export->exp_rpc_count, 0);
+ atomic_set(&export->exp_cb_count, 0);
+ atomic_set(&export->exp_locks_count, 0);
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+ INIT_LIST_HEAD(&export->exp_locks_list);
+ spin_lock_init(&export->exp_locks_list_guard);
+#endif
+ atomic_set(&export->exp_replay_count, 0);
+ export->exp_obd = obd;
+ INIT_LIST_HEAD(&export->exp_outstanding_replies);
+ spin_lock_init(&export->exp_uncommitted_replies_lock);
+ INIT_LIST_HEAD(&export->exp_uncommitted_replies);
+ INIT_LIST_HEAD(&export->exp_req_replay_queue);
+ INIT_LIST_HEAD(&export->exp_handle.h_link);
+ INIT_LIST_HEAD(&export->exp_hp_rpcs);
+ class_handle_hash(&export->exp_handle, &export_handle_ops);
+ export->exp_last_request_time = cfs_time_current_sec();
+ spin_lock_init(&export->exp_lock);
+ spin_lock_init(&export->exp_rpc_lock);
+ INIT_HLIST_NODE(&export->exp_uuid_hash);
+ INIT_HLIST_NODE(&export->exp_nid_hash);
+ spin_lock_init(&export->exp_bl_list_lock);
+ INIT_LIST_HEAD(&export->exp_bl_list);
+
+ export->exp_sp_peer = LUSTRE_SP_ANY;
+ export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
+ export->exp_client_uuid = *cluuid;
+ obd_init_export(export);
+
+ spin_lock(&obd->obd_dev_lock);
+ /* shouldn't happen, but might race */
+ if (obd->obd_stopping)
+ GOTO(exit_unlock, rc = -ENODEV);
+
+ hash = cfs_hash_getref(obd->obd_uuid_hash);
+ if (hash == NULL)
+ GOTO(exit_unlock, rc = -ENODEV);
+ spin_unlock(&obd->obd_dev_lock);
+
+ if (!obd_uuid_equals(cluuid, &obd->obd_uuid)) {
+ rc = cfs_hash_add_unique(hash, cluuid, &export->exp_uuid_hash);
+ if (rc != 0) {
+ LCONSOLE_WARN("%s: denying duplicate export for %s, %d\n",
+ obd->obd_name, cluuid->uuid, rc);
+ GOTO(exit_err, rc = -EALREADY);
+ }
+ }
+
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_stopping) {
+ cfs_hash_del(hash, cluuid, &export->exp_uuid_hash);
+ GOTO(exit_unlock, rc = -ENODEV);
+ }
+
+ class_incref(obd, "export", export);
+ list_add(&export->exp_obd_chain, &export->exp_obd->obd_exports);
+ list_add_tail(&export->exp_obd_chain_timed,
+ &export->exp_obd->obd_exports_timed);
+ export->exp_obd->obd_num_exports++;
+ spin_unlock(&obd->obd_dev_lock);
+ cfs_hash_putref(hash);
+ return export;
+
+exit_unlock:
+ spin_unlock(&obd->obd_dev_lock);
+exit_err:
+ if (hash)
+ cfs_hash_putref(hash);
+ class_handle_unhash(&export->exp_handle);
+ LASSERT(hlist_unhashed(&export->exp_uuid_hash));
+ obd_destroy_export(export);
+ OBD_FREE_PTR(export);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(class_new_export);
+
+void class_unlink_export(struct obd_export *exp)
+{
+ class_handle_unhash(&exp->exp_handle);
+
+ spin_lock(&exp->exp_obd->obd_dev_lock);
+ /* delete an uuid-export hashitem from hashtables */
+ if (!hlist_unhashed(&exp->exp_uuid_hash))
+ cfs_hash_del(exp->exp_obd->obd_uuid_hash,
+ &exp->exp_client_uuid,
+ &exp->exp_uuid_hash);
+
+ list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
+ list_del_init(&exp->exp_obd_chain_timed);
+ exp->exp_obd->obd_num_exports--;
+ spin_unlock(&exp->exp_obd->obd_dev_lock);
+ class_export_put(exp);
+}
+EXPORT_SYMBOL(class_unlink_export);
+
+/* Import management functions */
+void class_import_destroy(struct obd_import *imp)
+{
+ CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
+ imp->imp_obd->obd_name);
+
+ LASSERT_ATOMIC_ZERO(&imp->imp_refcount);
+
+ ptlrpc_put_connection_superhack(imp->imp_connection);
+
+ while (!list_empty(&imp->imp_conn_list)) {
+ struct obd_import_conn *imp_conn;
+
+ imp_conn = list_entry(imp->imp_conn_list.next,
+ struct obd_import_conn, oic_item);
+ list_del_init(&imp_conn->oic_item);
+ ptlrpc_put_connection_superhack(imp_conn->oic_conn);
+ OBD_FREE(imp_conn, sizeof(*imp_conn));
+ }
+
+ LASSERT(imp->imp_sec == NULL);
+ class_decref(imp->imp_obd, "import", imp);
+ OBD_FREE_RCU(imp, sizeof(*imp), &imp->imp_handle);
+}
+
+static void import_handle_addref(void *import)
+{
+ class_import_get(import);
+}
+
+static struct portals_handle_ops import_handle_ops = {
+ .hop_addref = import_handle_addref,
+ .hop_free = NULL,
+};
+
+struct obd_import *class_import_get(struct obd_import *import)
+{
+ atomic_inc(&import->imp_refcount);
+ CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", import,
+ atomic_read(&import->imp_refcount),
+ import->imp_obd->obd_name);
+ return import;
+}
+EXPORT_SYMBOL(class_import_get);
+
+void class_import_put(struct obd_import *imp)
+{
+ LASSERT(list_empty(&imp->imp_zombie_chain));
+ LASSERT_ATOMIC_GT_LT(&imp->imp_refcount, 0, LI_POISON);
+
+ CDEBUG(D_INFO, "import %p refcount=%d obd=%s\n", imp,
+ atomic_read(&imp->imp_refcount) - 1,
+ imp->imp_obd->obd_name);
+
+ if (atomic_dec_and_test(&imp->imp_refcount)) {
+ CDEBUG(D_INFO, "final put import %p\n", imp);
+ obd_zombie_import_add(imp);
+ }
+
+ /* catch possible import put race */
+ LASSERT_ATOMIC_GE_LT(&imp->imp_refcount, 0, LI_POISON);
+}
+EXPORT_SYMBOL(class_import_put);
+
+static void init_imp_at(struct imp_at *at) {
+ int i;
+ at_init(&at->iat_net_latency, 0, 0);
+ for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
+ /* max service estimates are tracked on the server side, so
+ don't use the AT history here, just use the last reported
+ val. (But keep hist for proc histogram, worst_ever) */
+ at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT,
+ AT_FLG_NOHIST);
+ }
+}
+
+struct obd_import *class_new_import(struct obd_device *obd)
+{
+ struct obd_import *imp;
+
+ OBD_ALLOC(imp, sizeof(*imp));
+ if (imp == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&imp->imp_pinger_chain);
+ INIT_LIST_HEAD(&imp->imp_zombie_chain);
+ INIT_LIST_HEAD(&imp->imp_replay_list);
+ INIT_LIST_HEAD(&imp->imp_sending_list);
+ INIT_LIST_HEAD(&imp->imp_delayed_list);
+ spin_lock_init(&imp->imp_lock);
+ imp->imp_last_success_conn = 0;
+ imp->imp_state = LUSTRE_IMP_NEW;
+ imp->imp_obd = class_incref(obd, "import", imp);
+ mutex_init(&imp->imp_sec_mutex);
+ init_waitqueue_head(&imp->imp_recovery_waitq);
+
+ atomic_set(&imp->imp_refcount, 2);
+ atomic_set(&imp->imp_unregistering, 0);
+ atomic_set(&imp->imp_inflight, 0);
+ atomic_set(&imp->imp_replay_inflight, 0);
+ atomic_set(&imp->imp_inval_count, 0);
+ INIT_LIST_HEAD(&imp->imp_conn_list);
+ INIT_LIST_HEAD(&imp->imp_handle.h_link);
+ class_handle_hash(&imp->imp_handle, &import_handle_ops);
+ init_imp_at(&imp->imp_at);
+
+ /* the default magic is V2, will be used in connect RPC, and
+ * then adjusted according to the flags in request/reply. */
+ imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
+
+ return imp;
+}
+EXPORT_SYMBOL(class_new_import);
+
+void class_destroy_import(struct obd_import *import)
+{
+ LASSERT(import != NULL);
+ LASSERT(import != LP_POISON);
+
+ class_handle_unhash(&import->imp_handle);
+
+ spin_lock(&import->imp_lock);
+ import->imp_generation++;
+ spin_unlock(&import->imp_lock);
+ class_import_put(import);
+}
+EXPORT_SYMBOL(class_destroy_import);
+
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+
+void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
+{
+ spin_lock(&exp->exp_locks_list_guard);
+
+ LASSERT(lock->l_exp_refs_nr >= 0);
+
+ if (lock->l_exp_refs_target != NULL &&
+ lock->l_exp_refs_target != exp) {
+ LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n",
+ exp, lock, lock->l_exp_refs_target);
+ }
+ if ((lock->l_exp_refs_nr ++) == 0) {
+ list_add(&lock->l_exp_refs_link, &exp->exp_locks_list);
+ lock->l_exp_refs_target = exp;
+ }
+ CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
+ lock, exp, lock->l_exp_refs_nr);
+ spin_unlock(&exp->exp_locks_list_guard);
+}
+EXPORT_SYMBOL(__class_export_add_lock_ref);
+
+void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
+{
+ spin_lock(&exp->exp_locks_list_guard);
+ LASSERT(lock->l_exp_refs_nr > 0);
+ if (lock->l_exp_refs_target != exp) {
+ LCONSOLE_WARN("lock %p, "
+ "mismatching export pointers: %p, %p\n",
+ lock, lock->l_exp_refs_target, exp);
+ }
+ if (-- lock->l_exp_refs_nr == 0) {
+ list_del_init(&lock->l_exp_refs_link);
+ lock->l_exp_refs_target = NULL;
+ }
+ CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
+ lock, exp, lock->l_exp_refs_nr);
+ spin_unlock(&exp->exp_locks_list_guard);
+}
+EXPORT_SYMBOL(__class_export_del_lock_ref);
+#endif
+
+/* A connection defines an export context in which preallocation can
+ be managed. This releases the export pointer reference, and returns
+ the export handle, so the export refcount is 1 when this function
+ returns. */
+int class_connect(struct lustre_handle *conn, struct obd_device *obd,
+ struct obd_uuid *cluuid)
+{
+ struct obd_export *export;
+ LASSERT(conn != NULL);
+ LASSERT(obd != NULL);
+ LASSERT(cluuid != NULL);
+
+ export = class_new_export(obd, cluuid);
+ if (IS_ERR(export))
+ return PTR_ERR(export);
+
+ conn->cookie = export->exp_handle.h_cookie;
+ class_export_put(export);
+
+ CDEBUG(D_IOCTL, "connect: client %s, cookie "LPX64"\n",
+ cluuid->uuid, conn->cookie);
+ return 0;
+}
+EXPORT_SYMBOL(class_connect);
+
+/* if export is involved in recovery then clean up related things */
+void class_export_recovery_cleanup(struct obd_export *exp)
+{
+ struct obd_device *obd = exp->exp_obd;
+
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (exp->exp_delayed)
+ obd->obd_delayed_clients--;
+ if (obd->obd_recovering) {
+ if (exp->exp_in_recovery) {
+ spin_lock(&exp->exp_lock);
+ exp->exp_in_recovery = 0;
+ spin_unlock(&exp->exp_lock);
+ LASSERT_ATOMIC_POS(&obd->obd_connected_clients);
+ atomic_dec(&obd->obd_connected_clients);
+ }
+
+ /* if called during recovery then should update
+ * obd_stale_clients counter,
+ * lightweight exports are not counted */
+ if (exp->exp_failed &&
+ (exp_connect_flags(exp) & OBD_CONNECT_LIGHTWEIGHT) == 0)
+ exp->exp_obd->obd_stale_clients++;
+ }
+ spin_unlock(&obd->obd_recovery_task_lock);
+ /** Cleanup req replay fields */
+ if (exp->exp_req_replay_needed) {
+ spin_lock(&exp->exp_lock);
+ exp->exp_req_replay_needed = 0;
+ spin_unlock(&exp->exp_lock);
+ LASSERT(atomic_read(&obd->obd_req_replay_clients));
+ atomic_dec(&obd->obd_req_replay_clients);
+ }
+ /** Cleanup lock replay data */
+ if (exp->exp_lock_replay_needed) {
+ spin_lock(&exp->exp_lock);
+ exp->exp_lock_replay_needed = 0;
+ spin_unlock(&exp->exp_lock);
+ LASSERT(atomic_read(&obd->obd_lock_replay_clients));
+ atomic_dec(&obd->obd_lock_replay_clients);
+ }
+}
+
+/* This function removes 1-3 references from the export:
+ * 1 - for export pointer passed
+ * and if disconnect really need
+ * 2 - removing from hash
+ * 3 - in client_unlink_export
+ * The export pointer passed to this function can destroyed */
+int class_disconnect(struct obd_export *export)
+{
+ int already_disconnected;
+
+ if (export == NULL) {
+ CWARN("attempting to free NULL export %p\n", export);
+ return -EINVAL;
+ }
+
+ spin_lock(&export->exp_lock);
+ already_disconnected = export->exp_disconnected;
+ export->exp_disconnected = 1;
+ spin_unlock(&export->exp_lock);
+
+ /* class_cleanup(), abort_recovery(), and class_fail_export()
+ * all end up in here, and if any of them race we shouldn't
+ * call extra class_export_puts(). */
+ if (already_disconnected) {
+ LASSERT(hlist_unhashed(&export->exp_nid_hash));
+ GOTO(no_disconn, already_disconnected);
+ }
+
+ CDEBUG(D_IOCTL, "disconnect: cookie "LPX64"\n",
+ export->exp_handle.h_cookie);
+
+ if (!hlist_unhashed(&export->exp_nid_hash))
+ cfs_hash_del(export->exp_obd->obd_nid_hash,
+ &export->exp_connection->c_peer.nid,
+ &export->exp_nid_hash);
+
+ class_export_recovery_cleanup(export);
+ class_unlink_export(export);
+no_disconn:
+ class_export_put(export);
+ return 0;
+}
+EXPORT_SYMBOL(class_disconnect);
+
+/* Return non-zero for a fully connected export */
+int class_connected_export(struct obd_export *exp)
+{
+ if (exp) {
+ int connected;
+ spin_lock(&exp->exp_lock);
+ connected = (exp->exp_conn_cnt > 0);
+ spin_unlock(&exp->exp_lock);
+ return connected;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(class_connected_export);
+
+static void class_disconnect_export_list(struct list_head *list,
+ enum obd_option flags)
+{
+ int rc;
+ struct obd_export *exp;
+
+ /* It's possible that an export may disconnect itself, but
+ * nothing else will be added to this list. */
+ while (!list_empty(list)) {
+ exp = list_entry(list->next, struct obd_export,
+ exp_obd_chain);
+ /* need for safe call CDEBUG after obd_disconnect */
+ class_export_get(exp);
+
+ spin_lock(&exp->exp_lock);
+ exp->exp_flags = flags;
+ spin_unlock(&exp->exp_lock);
+
+ if (obd_uuid_equals(&exp->exp_client_uuid,
+ &exp->exp_obd->obd_uuid)) {
+ CDEBUG(D_HA,
+ "exp %p export uuid == obd uuid, don't discon\n",
+ exp);
+ /* Need to delete this now so we don't end up pointing
+ * to work_list later when this export is cleaned up. */
+ list_del_init(&exp->exp_obd_chain);
+ class_export_put(exp);
+ continue;
+ }
+
+ class_export_get(exp);
+ CDEBUG(D_HA, "%s: disconnecting export at %s (%p), "
+ "last request at "CFS_TIME_T"\n",
+ exp->exp_obd->obd_name, obd_export_nid2str(exp),
+ exp, exp->exp_last_request_time);
+ /* release one export reference anyway */
+ rc = obd_disconnect(exp);
+
+ CDEBUG(D_HA, "disconnected export at %s (%p): rc %d\n",
+ obd_export_nid2str(exp), exp, rc);
+ class_export_put(exp);
+ }
+}
+
+void class_disconnect_exports(struct obd_device *obd)
+{
+ struct list_head work_list;
+
+ /* Move all of the exports from obd_exports to a work list, en masse. */
+ INIT_LIST_HEAD(&work_list);
+ spin_lock(&obd->obd_dev_lock);
+ list_splice_init(&obd->obd_exports, &work_list);
+ list_splice_init(&obd->obd_delayed_exports, &work_list);
+ spin_unlock(&obd->obd_dev_lock);
+
+ if (!list_empty(&work_list)) {
+ CDEBUG(D_HA, "OBD device %d (%p) has exports, "
+ "disconnecting them\n", obd->obd_minor, obd);
+ class_disconnect_export_list(&work_list,
+ exp_flags_from_obd(obd));
+ } else
+ CDEBUG(D_HA, "OBD device %d (%p) has no exports\n",
+ obd->obd_minor, obd);
+}
+EXPORT_SYMBOL(class_disconnect_exports);
+
+/* Remove exports that have not completed recovery.
+ */
+void class_disconnect_stale_exports(struct obd_device *obd,
+ int (*test_export)(struct obd_export *))
+{
+ struct list_head work_list;
+ struct obd_export *exp, *n;
+ int evicted = 0;
+
+ INIT_LIST_HEAD(&work_list);
+ spin_lock(&obd->obd_dev_lock);
+ list_for_each_entry_safe(exp, n, &obd->obd_exports,
+ exp_obd_chain) {
+ /* don't count self-export as client */
+ if (obd_uuid_equals(&exp->exp_client_uuid,
+ &exp->exp_obd->obd_uuid))
+ continue;
+
+ /* don't evict clients which have no slot in last_rcvd
+ * (e.g. lightweight connection) */
+ if (exp->exp_target_data.ted_lr_idx == -1)
+ continue;
+
+ spin_lock(&exp->exp_lock);
+ if (exp->exp_failed || test_export(exp)) {
+ spin_unlock(&exp->exp_lock);
+ continue;
+ }
+ exp->exp_failed = 1;
+ spin_unlock(&exp->exp_lock);
+
+ list_move(&exp->exp_obd_chain, &work_list);
+ evicted++;
+ CDEBUG(D_HA, "%s: disconnect stale client %s@%s\n",
+ obd->obd_name, exp->exp_client_uuid.uuid,
+ exp->exp_connection == NULL ? "<unknown>" :
+ libcfs_nid2str(exp->exp_connection->c_peer.nid));
+ print_export_data(exp, "EVICTING", 0);
+ }
+ spin_unlock(&obd->obd_dev_lock);
+
+ if (evicted)
+ LCONSOLE_WARN("%s: disconnecting %d stale clients\n",
+ obd->obd_name, evicted);
+
+ class_disconnect_export_list(&work_list, exp_flags_from_obd(obd) |
+ OBD_OPT_ABORT_RECOV);
+}
+EXPORT_SYMBOL(class_disconnect_stale_exports);
+
+void class_fail_export(struct obd_export *exp)
+{
+ int rc, already_failed;
+
+ spin_lock(&exp->exp_lock);
+ already_failed = exp->exp_failed;
+ exp->exp_failed = 1;
+ spin_unlock(&exp->exp_lock);
+
+ if (already_failed) {
+ CDEBUG(D_HA, "disconnecting dead export %p/%s; skipping\n",
+ exp, exp->exp_client_uuid.uuid);
+ return;
+ }
+
+ CDEBUG(D_HA, "disconnecting export %p/%s\n",
+ exp, exp->exp_client_uuid.uuid);
+
+ if (obd_dump_on_timeout)
+ libcfs_debug_dumplog();
+
+ /* need for safe call CDEBUG after obd_disconnect */
+ class_export_get(exp);
+
+ /* Most callers into obd_disconnect are removing their own reference
+ * (request, for example) in addition to the one from the hash table.
+ * We don't have such a reference here, so make one. */
+ class_export_get(exp);
+ rc = obd_disconnect(exp);
+ if (rc)
+ CERROR("disconnecting export %p failed: %d\n", exp, rc);
+ else
+ CDEBUG(D_HA, "disconnected export %p/%s\n",
+ exp, exp->exp_client_uuid.uuid);
+ class_export_put(exp);
+}
+EXPORT_SYMBOL(class_fail_export);
+
+char *obd_export_nid2str(struct obd_export *exp)
+{
+ if (exp->exp_connection != NULL)
+ return libcfs_nid2str(exp->exp_connection->c_peer.nid);
+
+ return "(no nid)";
+}
+EXPORT_SYMBOL(obd_export_nid2str);
+
+int obd_export_evict_by_nid(struct obd_device *obd, const char *nid)
+{
+ cfs_hash_t *nid_hash;
+ struct obd_export *doomed_exp = NULL;
+ int exports_evicted = 0;
+
+ lnet_nid_t nid_key = libcfs_str2nid((char *)nid);
+
+ spin_lock(&obd->obd_dev_lock);
+ /* umount has run already, so evict thread should leave
+ * its task to umount thread now */
+ if (obd->obd_stopping) {
+ spin_unlock(&obd->obd_dev_lock);
+ return exports_evicted;
+ }
+ nid_hash = obd->obd_nid_hash;
+ cfs_hash_getref(nid_hash);
+ spin_unlock(&obd->obd_dev_lock);
+
+ do {
+ doomed_exp = cfs_hash_lookup(nid_hash, &nid_key);
+ if (doomed_exp == NULL)
+ break;
+
+ LASSERTF(doomed_exp->exp_connection->c_peer.nid == nid_key,
+ "nid %s found, wanted nid %s, requested nid %s\n",
+ obd_export_nid2str(doomed_exp),
+ libcfs_nid2str(nid_key), nid);
+ LASSERTF(doomed_exp != obd->obd_self_export,
+ "self-export is hashed by NID?\n");
+ exports_evicted++;
+ LCONSOLE_WARN("%s: evicting %s (at %s) by administrative "
+ "request\n", obd->obd_name,
+ obd_uuid2str(&doomed_exp->exp_client_uuid),
+ obd_export_nid2str(doomed_exp));
+ class_fail_export(doomed_exp);
+ class_export_put(doomed_exp);
+ } while (1);
+
+ cfs_hash_putref(nid_hash);
+
+ if (!exports_evicted)
+ CDEBUG(D_HA,"%s: can't disconnect NID '%s': no exports found\n",
+ obd->obd_name, nid);
+ return exports_evicted;
+}
+EXPORT_SYMBOL(obd_export_evict_by_nid);
+
+int obd_export_evict_by_uuid(struct obd_device *obd, const char *uuid)
+{
+ cfs_hash_t *uuid_hash;
+ struct obd_export *doomed_exp = NULL;
+ struct obd_uuid doomed_uuid;
+ int exports_evicted = 0;
+
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_stopping) {
+ spin_unlock(&obd->obd_dev_lock);
+ return exports_evicted;
+ }
+ uuid_hash = obd->obd_uuid_hash;
+ cfs_hash_getref(uuid_hash);
+ spin_unlock(&obd->obd_dev_lock);
+
+ obd_str2uuid(&doomed_uuid, uuid);
+ if (obd_uuid_equals(&doomed_uuid, &obd->obd_uuid)) {
+ CERROR("%s: can't evict myself\n", obd->obd_name);
+ cfs_hash_putref(uuid_hash);
+ return exports_evicted;
+ }
+
+ doomed_exp = cfs_hash_lookup(uuid_hash, &doomed_uuid);
+
+ if (doomed_exp == NULL) {
+ CERROR("%s: can't disconnect %s: no exports found\n",
+ obd->obd_name, uuid);
+ } else {
+ CWARN("%s: evicting %s at administrative request\n",
+ obd->obd_name, doomed_exp->exp_client_uuid.uuid);
+ class_fail_export(doomed_exp);
+ class_export_put(doomed_exp);
+ exports_evicted++;
+ }
+ cfs_hash_putref(uuid_hash);
+
+ return exports_evicted;
+}
+EXPORT_SYMBOL(obd_export_evict_by_uuid);
+
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+void (*class_export_dump_hook)(struct obd_export*) = NULL;
+EXPORT_SYMBOL(class_export_dump_hook);
+#endif
+
+static void print_export_data(struct obd_export *exp, const char *status,
+ int locks)
+{
+ struct ptlrpc_reply_state *rs;
+ struct ptlrpc_reply_state *first_reply = NULL;
+ int nreplies = 0;
+
+ spin_lock(&exp->exp_lock);
+ list_for_each_entry(rs, &exp->exp_outstanding_replies,
+ rs_exp_list) {
+ if (nreplies == 0)
+ first_reply = rs;
+ nreplies++;
+ }
+ spin_unlock(&exp->exp_lock);
+
+ CDEBUG(D_HA, "%s: %s %p %s %s %d (%d %d %d) %d %d %d %d: %p %s "LPU64"\n",
+ exp->exp_obd->obd_name, status, exp, exp->exp_client_uuid.uuid,
+ obd_export_nid2str(exp), atomic_read(&exp->exp_refcount),
+ atomic_read(&exp->exp_rpc_count),
+ atomic_read(&exp->exp_cb_count),
+ atomic_read(&exp->exp_locks_count),
+ exp->exp_disconnected, exp->exp_delayed, exp->exp_failed,
+ nreplies, first_reply, nreplies > 3 ? "..." : "",
+ exp->exp_last_committed);
+#if LUSTRE_TRACKS_LOCK_EXP_REFS
+ if (locks && class_export_dump_hook != NULL)
+ class_export_dump_hook(exp);
+#endif
+}
+
+void dump_exports(struct obd_device *obd, int locks)
+{
+ struct obd_export *exp;
+
+ spin_lock(&obd->obd_dev_lock);
+ list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
+ print_export_data(exp, "ACTIVE", locks);
+ list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
+ print_export_data(exp, "UNLINKED", locks);
+ list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
+ print_export_data(exp, "DELAYED", locks);
+ spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd_zombie_impexp_lock);
+ list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
+ print_export_data(exp, "ZOMBIE", locks);
+ spin_unlock(&obd_zombie_impexp_lock);
+}
+EXPORT_SYMBOL(dump_exports);
+
+void obd_exports_barrier(struct obd_device *obd)
+{
+ int waited = 2;
+ LASSERT(list_empty(&obd->obd_exports));
+ spin_lock(&obd->obd_dev_lock);
+ while (!list_empty(&obd->obd_unlinked_exports)) {
+ spin_unlock(&obd->obd_dev_lock);
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+ cfs_time_seconds(waited));
+ if (waited > 5 && IS_PO2(waited)) {
+ LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
+ "more than %d seconds. "
+ "The obd refcount = %d. Is it stuck?\n",
+ obd->obd_name, waited,
+ atomic_read(&obd->obd_refcount));
+ dump_exports(obd, 1);
+ }
+ waited *= 2;
+ spin_lock(&obd->obd_dev_lock);
+ }
+ spin_unlock(&obd->obd_dev_lock);
+}
+EXPORT_SYMBOL(obd_exports_barrier);
+
+/* Total amount of zombies to be destroyed */
+static int zombies_count = 0;
+
+/**
+ * kill zombie imports and exports
+ */
+void obd_zombie_impexp_cull(void)
+{
+ struct obd_import *import;
+ struct obd_export *export;
+
+ do {
+ spin_lock(&obd_zombie_impexp_lock);
+
+ import = NULL;
+ if (!list_empty(&obd_zombie_imports)) {
+ import = list_entry(obd_zombie_imports.next,
+ struct obd_import,
+ imp_zombie_chain);
+ list_del_init(&import->imp_zombie_chain);
+ }
+
+ export = NULL;
+ if (!list_empty(&obd_zombie_exports)) {
+ export = list_entry(obd_zombie_exports.next,
+ struct obd_export,
+ exp_obd_chain);
+ list_del_init(&export->exp_obd_chain);
+ }
+
+ spin_unlock(&obd_zombie_impexp_lock);
+
+ if (import != NULL) {
+ class_import_destroy(import);
+ spin_lock(&obd_zombie_impexp_lock);
+ zombies_count--;
+ spin_unlock(&obd_zombie_impexp_lock);
+ }
+
+ if (export != NULL) {
+ class_export_destroy(export);
+ spin_lock(&obd_zombie_impexp_lock);
+ zombies_count--;
+ spin_unlock(&obd_zombie_impexp_lock);
+ }
+
+ cond_resched();
+ } while (import != NULL || export != NULL);
+}
+
+static struct completion obd_zombie_start;
+static struct completion obd_zombie_stop;
+static unsigned long obd_zombie_flags;
+static wait_queue_head_t obd_zombie_waitq;
+static pid_t obd_zombie_pid;
+
+enum {
+ OBD_ZOMBIE_STOP = 0x0001,
+};
+
+/**
+ * check for work for kill zombie import/export thread.
+ */
+static int obd_zombie_impexp_check(void *arg)
+{
+ int rc;
+
+ spin_lock(&obd_zombie_impexp_lock);
+ rc = (zombies_count == 0) &&
+ !test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+ spin_unlock(&obd_zombie_impexp_lock);
+
+ return rc;
+}
+
+/**
+ * Add export to the obd_zombe thread and notify it.
+ */
+static void obd_zombie_export_add(struct obd_export *exp) {
+ spin_lock(&exp->exp_obd->obd_dev_lock);
+ LASSERT(!list_empty(&exp->exp_obd_chain));
+ list_del_init(&exp->exp_obd_chain);
+ spin_unlock(&exp->exp_obd->obd_dev_lock);
+ spin_lock(&obd_zombie_impexp_lock);
+ zombies_count++;
+ list_add(&exp->exp_obd_chain, &obd_zombie_exports);
+ spin_unlock(&obd_zombie_impexp_lock);
+
+ obd_zombie_impexp_notify();
+}
+
+/**
+ * Add import to the obd_zombe thread and notify it.
+ */
+static void obd_zombie_import_add(struct obd_import *imp) {
+ LASSERT(imp->imp_sec == NULL);
+ LASSERT(imp->imp_rq_pool == NULL);
+ spin_lock(&obd_zombie_impexp_lock);
+ LASSERT(list_empty(&imp->imp_zombie_chain));
+ zombies_count++;
+ list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
+ spin_unlock(&obd_zombie_impexp_lock);
+
+ obd_zombie_impexp_notify();
+}
+
+/**
+ * notify import/export destroy thread about new zombie.
+ */
+static void obd_zombie_impexp_notify(void)
+{
+ /*
+ * Make sure obd_zomebie_impexp_thread get this notification.
+ * It is possible this signal only get by obd_zombie_barrier, and
+ * barrier gulps this notification and sleeps away and hangs ensues
+ */
+ wake_up_all(&obd_zombie_waitq);
+}
+
+/**
+ * check whether obd_zombie is idle
+ */
+static int obd_zombie_is_idle(void)
+{
+ int rc;
+
+ LASSERT(!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
+ spin_lock(&obd_zombie_impexp_lock);
+ rc = (zombies_count == 0);
+ spin_unlock(&obd_zombie_impexp_lock);
+ return rc;
+}
+
+/**
+ * wait when obd_zombie import/export queues become empty
+ */
+void obd_zombie_barrier(void)
+{
+ struct l_wait_info lwi = { 0 };
+
+ if (obd_zombie_pid == current_pid())
+ /* don't wait for myself */
+ return;
+ l_wait_event(obd_zombie_waitq, obd_zombie_is_idle(), &lwi);
+}
+EXPORT_SYMBOL(obd_zombie_barrier);
+
+
+/**
+ * destroy zombie export/import thread.
+ */
+static int obd_zombie_impexp_thread(void *unused)
+{
+ unshare_fs_struct();
+ complete(&obd_zombie_start);
+
+ obd_zombie_pid = current_pid();
+
+ while (!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
+ struct l_wait_info lwi = { 0 };
+
+ l_wait_event(obd_zombie_waitq,
+ !obd_zombie_impexp_check(NULL), &lwi);
+ obd_zombie_impexp_cull();
+
+ /*
+ * Notify obd_zombie_barrier callers that queues
+ * may be empty.
+ */
+ wake_up(&obd_zombie_waitq);
+ }
+
+ complete(&obd_zombie_stop);
+
+ return 0;
+}
+
+
+/**
+ * start destroy zombie import/export thread
+ */
+int obd_zombie_impexp_init(void)
+{
+ struct task_struct *task;
+
+ INIT_LIST_HEAD(&obd_zombie_imports);
+ INIT_LIST_HEAD(&obd_zombie_exports);
+ spin_lock_init(&obd_zombie_impexp_lock);
+ init_completion(&obd_zombie_start);
+ init_completion(&obd_zombie_stop);
+ init_waitqueue_head(&obd_zombie_waitq);
+ obd_zombie_pid = 0;
+
+ task = kthread_run(obd_zombie_impexp_thread, NULL, "obd_zombid");
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
+ wait_for_completion(&obd_zombie_start);
+ return 0;
+}
+/**
+ * stop destroy zombie import/export thread
+ */
+void obd_zombie_impexp_stop(void)
+{
+ set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+ obd_zombie_impexp_notify();
+ wait_for_completion(&obd_zombie_stop);
+}
+
+/***** Kernel-userspace comm helpers *******/
+
+/* Get length of entire message, including header */
+int kuc_len(int payload_len)
+{
+ return sizeof(struct kuc_hdr) + payload_len;
+}
+EXPORT_SYMBOL(kuc_len);
+
+/* Get a pointer to kuc header, given a ptr to the payload
+ * @param p Pointer to payload area
+ * @returns Pointer to kuc header
+ */
+struct kuc_hdr * kuc_ptr(void *p)
+{
+ struct kuc_hdr *lh = ((struct kuc_hdr *)p) - 1;
+ LASSERT(lh->kuc_magic == KUC_MAGIC);
+ return lh;
+}
+EXPORT_SYMBOL(kuc_ptr);
+
+/* Test if payload is part of kuc message
+ * @param p Pointer to payload area
+ * @returns boolean
+ */
+int kuc_ispayload(void *p)
+{
+ struct kuc_hdr *kh = ((struct kuc_hdr *)p) - 1;
+
+ if (kh->kuc_magic == KUC_MAGIC)
+ return 1;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(kuc_ispayload);
+
+/* Alloc space for a message, and fill in header
+ * @return Pointer to payload area
+ */
+void *kuc_alloc(int payload_len, int transport, int type)
+{
+ struct kuc_hdr *lh;
+ int len = kuc_len(payload_len);
+
+ OBD_ALLOC(lh, len);
+ if (lh == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ lh->kuc_magic = KUC_MAGIC;
+ lh->kuc_transport = transport;
+ lh->kuc_msgtype = type;
+ lh->kuc_msglen = len;
+
+ return (void *)(lh + 1);
+}
+EXPORT_SYMBOL(kuc_alloc);
+
+/* Takes pointer to payload area */
+inline void kuc_free(void *p, int payload_len)
+{
+ struct kuc_hdr *lh = kuc_ptr(p);
+ OBD_FREE(lh, kuc_len(payload_len));
+}
+EXPORT_SYMBOL(kuc_free);
diff --git a/drivers/staging/lustre/lustre/obdclass/idmap.c b/drivers/staging/lustre/lustre/obdclass/idmap.c
new file mode 100644
index 0000000..ec2590f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/idmap.c
@@ -0,0 +1,477 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/idmap.c
+ *
+ * Lustre user identity mapping.
+ *
+ * Author: Fan Yong <fanyong@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+#include <lustre_idmap.h>
+#include <md_object.h>
+#include <obd_support.h>
+
+#define lustre_get_group_info(group_info) do { \
+ atomic_inc(&(group_info)->usage); \
+} while (0)
+
+#define lustre_put_group_info(group_info) do { \
+ if (atomic_dec_and_test(&(group_info)->usage)) \
+ groups_free(group_info); \
+} while (0)
+
+/*
+ * groups_search() is copied from linux kernel!
+ * A simple bsearch.
+ */
+static int lustre_groups_search(const struct group_info *group_info, gid_t grp)
+{
+ int left, right;
+
+ if (!group_info)
+ return 0;
+
+ left = 0;
+ right = group_info->ngroups;
+ while (left < right) {
+ int mid = (left + right) / 2;
+ int cmp = grp -
+ from_kgid(&init_user_ns, CFS_GROUP_AT(group_info, mid));
+
+ if (cmp > 0)
+ left = mid + 1;
+ else if (cmp < 0)
+ right = mid;
+ else
+ return 1;
+ }
+ return 0;
+}
+
+void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist)
+{
+ int i;
+ int count = ginfo->ngroups;
+
+ /* fill group_info from gid array */
+ for (i = 0; i < ginfo->nblocks && count > 0; i++) {
+ int cp_count = min(CFS_NGROUPS_PER_BLOCK, count);
+ int off = i * CFS_NGROUPS_PER_BLOCK;
+ int len = cp_count * sizeof(*glist);
+
+ memcpy(ginfo->blocks[i], glist + off, len);
+ count -= cp_count;
+ }
+}
+EXPORT_SYMBOL(lustre_groups_from_list);
+
+/* groups_sort() is copied from linux kernel! */
+/* a simple shell-metzner sort */
+void lustre_groups_sort(struct group_info *group_info)
+{
+ int base, max, stride;
+ int gidsetsize = group_info->ngroups;
+
+ for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
+ ; /* nothing */
+ stride /= 3;
+
+ while (stride) {
+ max = gidsetsize - stride;
+ for (base = 0; base < max; base++) {
+ int left = base;
+ int right = left + stride;
+ gid_t tmp = from_kgid(&init_user_ns,
+ CFS_GROUP_AT(group_info, right));
+
+ while (left >= 0 &&
+ tmp < from_kgid(&init_user_ns,
+ CFS_GROUP_AT(group_info, left))) {
+ CFS_GROUP_AT(group_info, right) =
+ CFS_GROUP_AT(group_info, left);
+ right = left;
+ left -= stride;
+ }
+ CFS_GROUP_AT(group_info, right) =
+ make_kgid(&init_user_ns, tmp);
+ }
+ stride /= 3;
+ }
+}
+EXPORT_SYMBOL(lustre_groups_sort);
+
+int lustre_in_group_p(struct lu_ucred *mu, gid_t grp)
+{
+ int rc = 1;
+
+ if (grp != mu->uc_fsgid) {
+ struct group_info *group_info = NULL;
+
+ if (mu->uc_ginfo || !mu->uc_identity ||
+ mu->uc_valid == UCRED_OLD)
+ if (grp == mu->uc_suppgids[0] ||
+ grp == mu->uc_suppgids[1])
+ return 1;
+
+ if (mu->uc_ginfo)
+ group_info = mu->uc_ginfo;
+ else if (mu->uc_identity)
+ group_info = mu->uc_identity->mi_ginfo;
+
+ if (!group_info)
+ return 0;
+
+ lustre_get_group_info(group_info);
+ rc = lustre_groups_search(group_info, grp);
+ lustre_put_group_info(group_info);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lustre_in_group_p);
+
+struct lustre_idmap_entry {
+ struct list_head lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
+ struct list_head lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
+ struct list_head lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
+ struct list_head lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
+ uid_t lie_rmt_uid; /* remote uid */
+ uid_t lie_lcl_uid; /* local uid */
+ gid_t lie_rmt_gid; /* remote gid */
+ gid_t lie_lcl_gid; /* local gid */
+};
+
+static inline __u32 lustre_idmap_hashfunc(__u32 id)
+{
+ return id & (CFS_IDMAP_HASHSIZE - 1);
+}
+
+static
+struct lustre_idmap_entry *idmap_entry_alloc(uid_t rmt_uid, uid_t lcl_uid,
+ gid_t rmt_gid, gid_t lcl_gid)
+{
+ struct lustre_idmap_entry *e;
+
+ OBD_ALLOC_PTR(e);
+ if (e == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&e->lie_rmt_uid_hash);
+ INIT_LIST_HEAD(&e->lie_lcl_uid_hash);
+ INIT_LIST_HEAD(&e->lie_rmt_gid_hash);
+ INIT_LIST_HEAD(&e->lie_lcl_gid_hash);
+ e->lie_rmt_uid = rmt_uid;
+ e->lie_lcl_uid = lcl_uid;
+ e->lie_rmt_gid = rmt_gid;
+ e->lie_lcl_gid = lcl_gid;
+
+ return e;
+}
+
+static void idmap_entry_free(struct lustre_idmap_entry *e)
+{
+ if (!list_empty(&e->lie_rmt_uid_hash))
+ list_del(&e->lie_rmt_uid_hash);
+ if (!list_empty(&e->lie_lcl_uid_hash))
+ list_del(&e->lie_lcl_uid_hash);
+ if (!list_empty(&e->lie_rmt_gid_hash))
+ list_del(&e->lie_rmt_gid_hash);
+ if (!list_empty(&e->lie_lcl_gid_hash))
+ list_del(&e->lie_lcl_gid_hash);
+ OBD_FREE_PTR(e);
+}
+
+/*
+ * return value
+ * NULL: not found entry
+ * ERR_PTR(-EACCES): found 1(remote):N(local) mapped entry
+ * others: found normal entry
+ */
+static
+struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t,
+ uid_t rmt_uid, uid_t lcl_uid,
+ gid_t rmt_gid, gid_t lcl_gid)
+{
+ struct list_head *head;
+ struct lustre_idmap_entry *e;
+
+ head = &t->lit_idmaps[RMT_UIDMAP_IDX][lustre_idmap_hashfunc(rmt_uid)];
+ list_for_each_entry(e, head, lie_rmt_uid_hash)
+ if (e->lie_rmt_uid == rmt_uid) {
+ if (e->lie_lcl_uid == lcl_uid) {
+ if (e->lie_rmt_gid == rmt_gid &&
+ e->lie_lcl_gid == lcl_gid)
+ /* must be quaternion match */
+ return e;
+ } else {
+ /* 1:N uid mapping */
+ CERROR("rmt uid %u already be mapped to %u"
+ " (new %u)\n", e->lie_rmt_uid,
+ e->lie_lcl_uid, lcl_uid);
+ return ERR_PTR(-EACCES);
+ }
+ }
+
+ head = &t->lit_idmaps[RMT_GIDMAP_IDX][lustre_idmap_hashfunc(rmt_gid)];
+ list_for_each_entry(e, head, lie_rmt_gid_hash)
+ if (e->lie_rmt_gid == rmt_gid) {
+ if (e->lie_lcl_gid == lcl_gid) {
+ if (unlikely(e->lie_rmt_uid == rmt_uid &&
+ e->lie_lcl_uid == lcl_uid))
+ /* after uid mapping search above,
+ * we should never come here */
+ LBUG();
+ } else {
+ /* 1:N gid mapping */
+ CERROR("rmt gid %u already be mapped to %u"
+ " (new %u)\n", e->lie_rmt_gid,
+ e->lie_lcl_gid, lcl_gid);
+ return ERR_PTR(-EACCES);
+ }
+ }
+
+ return NULL;
+}
+
+static __u32 idmap_lookup_uid(struct list_head *hash, int reverse,
+ __u32 uid)
+{
+ struct list_head *head = &hash[lustre_idmap_hashfunc(uid)];
+ struct lustre_idmap_entry *e;
+
+ if (!reverse) {
+ list_for_each_entry(e, head, lie_rmt_uid_hash)
+ if (e->lie_rmt_uid == uid)
+ return e->lie_lcl_uid;
+ } else {
+ list_for_each_entry(e, head, lie_lcl_uid_hash)
+ if (e->lie_lcl_uid == uid)
+ return e->lie_rmt_uid;
+ }
+
+ return CFS_IDMAP_NOTFOUND;
+}
+
+static __u32 idmap_lookup_gid(struct list_head *hash, int reverse, __u32 gid)
+{
+ struct list_head *head = &hash[lustre_idmap_hashfunc(gid)];
+ struct lustre_idmap_entry *e;
+
+ if (!reverse) {
+ list_for_each_entry(e, head, lie_rmt_gid_hash)
+ if (e->lie_rmt_gid == gid)
+ return e->lie_lcl_gid;
+ } else {
+ list_for_each_entry(e, head, lie_lcl_gid_hash)
+ if (e->lie_lcl_gid == gid)
+ return e->lie_rmt_gid;
+ }
+
+ return CFS_IDMAP_NOTFOUND;
+}
+
+int lustre_idmap_add(struct lustre_idmap_table *t,
+ uid_t ruid, uid_t luid,
+ gid_t rgid, gid_t lgid)
+{
+ struct lustre_idmap_entry *e0, *e1;
+
+ LASSERT(t);
+
+ spin_lock(&t->lit_lock);
+ e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
+ spin_unlock(&t->lit_lock);
+ if (!e0) {
+ e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
+ if (!e0)
+ return -ENOMEM;
+
+ spin_lock(&t->lit_lock);
+ e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
+ if (e1 == NULL) {
+ list_add_tail(&e0->lie_rmt_uid_hash,
+ &t->lit_idmaps[RMT_UIDMAP_IDX]
+ [lustre_idmap_hashfunc(ruid)]);
+ list_add_tail(&e0->lie_lcl_uid_hash,
+ &t->lit_idmaps[LCL_UIDMAP_IDX]
+ [lustre_idmap_hashfunc(luid)]);
+ list_add_tail(&e0->lie_rmt_gid_hash,
+ &t->lit_idmaps[RMT_GIDMAP_IDX]
+ [lustre_idmap_hashfunc(rgid)]);
+ list_add_tail(&e0->lie_lcl_gid_hash,
+ &t->lit_idmaps[LCL_GIDMAP_IDX]
+ [lustre_idmap_hashfunc(lgid)]);
+ }
+ spin_unlock(&t->lit_lock);
+ if (e1 != NULL) {
+ idmap_entry_free(e0);
+ if (IS_ERR(e1))
+ return PTR_ERR(e1);
+ }
+ } else if (IS_ERR(e0)) {
+ return PTR_ERR(e0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(lustre_idmap_add);
+
+int lustre_idmap_del(struct lustre_idmap_table *t,
+ uid_t ruid, uid_t luid,
+ gid_t rgid, gid_t lgid)
+{
+ struct lustre_idmap_entry *e;
+ int rc = 0;
+
+ LASSERT(t);
+
+ spin_lock(&t->lit_lock);
+ e = idmap_search_entry(t, ruid, luid, rgid, lgid);
+ if (IS_ERR(e))
+ rc = PTR_ERR(e);
+ else if (e)
+ idmap_entry_free(e);
+ spin_unlock(&t->lit_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(lustre_idmap_del);
+
+int lustre_idmap_lookup_uid(struct lu_ucred *mu,
+ struct lustre_idmap_table *t,
+ int reverse, uid_t uid)
+{
+ struct list_head *hash;
+
+ if (mu && (mu->uc_valid == UCRED_OLD || mu->uc_valid == UCRED_NEW)) {
+ if (!reverse) {
+ if (uid == mu->uc_o_uid)
+ return mu->uc_uid;
+ else if (uid == mu->uc_o_fsuid)
+ return mu->uc_fsuid;
+ } else {
+ if (uid == mu->uc_uid)
+ return mu->uc_o_uid;
+ else if (uid == mu->uc_fsuid)
+ return mu->uc_o_fsuid;
+ }
+ }
+
+ if (t == NULL)
+ return CFS_IDMAP_NOTFOUND;
+
+ hash = t->lit_idmaps[reverse ? LCL_UIDMAP_IDX : RMT_UIDMAP_IDX];
+
+ spin_lock(&t->lit_lock);
+ uid = idmap_lookup_uid(hash, reverse, uid);
+ spin_unlock(&t->lit_lock);
+
+ return uid;
+}
+EXPORT_SYMBOL(lustre_idmap_lookup_uid);
+
+int lustre_idmap_lookup_gid(struct lu_ucred *mu, struct lustre_idmap_table *t,
+ int reverse, gid_t gid)
+{
+ struct list_head *hash;
+
+ if (mu && (mu->uc_valid == UCRED_OLD || mu->uc_valid == UCRED_NEW)) {
+ if (!reverse) {
+ if (gid == mu->uc_o_gid)
+ return mu->uc_gid;
+ else if (gid == mu->uc_o_fsgid)
+ return mu->uc_fsgid;
+ } else {
+ if (gid == mu->uc_gid)
+ return mu->uc_o_gid;
+ else if (gid == mu->uc_fsgid)
+ return mu->uc_o_fsgid;
+ }
+ }
+
+ if (t == NULL)
+ return CFS_IDMAP_NOTFOUND;
+
+ hash = t->lit_idmaps[reverse ? LCL_GIDMAP_IDX : RMT_GIDMAP_IDX];
+
+ spin_lock(&t->lit_lock);
+ gid = idmap_lookup_gid(hash, reverse, gid);
+ spin_unlock(&t->lit_lock);
+
+ return gid;
+}
+EXPORT_SYMBOL(lustre_idmap_lookup_gid);
+
+struct lustre_idmap_table *lustre_idmap_init(void)
+{
+ struct lustre_idmap_table *t;
+ int i, j;
+
+ OBD_ALLOC_PTR(t);
+ if(unlikely(t == NULL))
+ return (ERR_PTR(-ENOMEM));
+
+ spin_lock_init(&t->lit_lock);
+ for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
+ for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
+ INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
+
+ return t;
+}
+EXPORT_SYMBOL(lustre_idmap_init);
+
+void lustre_idmap_fini(struct lustre_idmap_table *t)
+{
+ struct list_head *list;
+ struct lustre_idmap_entry *e;
+ int i;
+ LASSERT(t);
+
+ list = t->lit_idmaps[RMT_UIDMAP_IDX];
+ spin_lock(&t->lit_lock);
+ for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
+ while (!list_empty(&list[i])) {
+ e = list_entry(list[i].next,
+ struct lustre_idmap_entry,
+ lie_rmt_uid_hash);
+ idmap_entry_free(e);
+ }
+ spin_unlock(&t->lit_lock);
+
+ OBD_FREE_PTR(t);
+}
+EXPORT_SYMBOL(lustre_idmap_fini);
diff --git a/drivers/staging/lustre/lustre/obdclass/linkea.c b/drivers/staging/lustre/lustre/obdclass/linkea.c
new file mode 100644
index 0000000..b5c19ac
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/linkea.c
@@ -0,0 +1,194 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2013, Intel Corporation.
+ * Use is subject to license terms.
+ *
+ * Author: Di Wang <di.wang@intel.com>
+ */
+
+#include <lustre/lustre_idl.h>
+#include <obd.h>
+#include <lustre_linkea.h>
+
+int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf)
+{
+ ldata->ld_buf = lu_buf_check_and_alloc(buf, PAGE_CACHE_SIZE);
+ if (ldata->ld_buf->lb_buf == NULL)
+ return -ENOMEM;
+ ldata->ld_leh = ldata->ld_buf->lb_buf;
+ ldata->ld_leh->leh_magic = LINK_EA_MAGIC;
+ ldata->ld_leh->leh_len = sizeof(struct link_ea_header);
+ ldata->ld_leh->leh_reccount = 0;
+ return 0;
+}
+EXPORT_SYMBOL(linkea_data_new);
+
+int linkea_init(struct linkea_data *ldata)
+{
+ struct link_ea_header *leh;
+
+ LASSERT(ldata->ld_buf != NULL);
+ leh = ldata->ld_buf->lb_buf;
+ if (leh->leh_magic == __swab32(LINK_EA_MAGIC)) {
+ leh->leh_magic = LINK_EA_MAGIC;
+ leh->leh_reccount = __swab32(leh->leh_reccount);
+ leh->leh_len = __swab64(leh->leh_len);
+ /* entries are swabbed by linkea_entry_unpack */
+ }
+ if (leh->leh_magic != LINK_EA_MAGIC)
+ return -EINVAL;
+ if (leh->leh_reccount == 0)
+ return -ENODATA;
+
+ ldata->ld_leh = leh;
+ return 0;
+}
+EXPORT_SYMBOL(linkea_init);
+
+/**
+ * Pack a link_ea_entry.
+ * All elements are stored as chars to avoid alignment issues.
+ * Numbers are always big-endian
+ * \retval record length
+ */
+static int linkea_entry_pack(struct link_ea_entry *lee,
+ const struct lu_name *lname,
+ const struct lu_fid *pfid)
+{
+ struct lu_fid tmpfid;
+ int reclen;
+
+ fid_cpu_to_be(&tmpfid, pfid);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LINKEA_CRASH))
+ tmpfid.f_ver = ~0;
+ memcpy(&lee->lee_parent_fid, &tmpfid, sizeof(tmpfid));
+ memcpy(lee->lee_name, lname->ln_name, lname->ln_namelen);
+ reclen = sizeof(struct link_ea_entry) + lname->ln_namelen;
+
+ lee->lee_reclen[0] = (reclen >> 8) & 0xff;
+ lee->lee_reclen[1] = reclen & 0xff;
+ return reclen;
+}
+
+void linkea_entry_unpack(const struct link_ea_entry *lee, int *reclen,
+ struct lu_name *lname, struct lu_fid *pfid)
+{
+ *reclen = (lee->lee_reclen[0] << 8) | lee->lee_reclen[1];
+ memcpy(pfid, &lee->lee_parent_fid, sizeof(*pfid));
+ fid_be_to_cpu(pfid, pfid);
+ lname->ln_name = lee->lee_name;
+ lname->ln_namelen = *reclen - sizeof(struct link_ea_entry);
+}
+EXPORT_SYMBOL(linkea_entry_unpack);
+
+/**
+ * Add a record to the end of link ea buf
+ **/
+int linkea_add_buf(struct linkea_data *ldata, const struct lu_name *lname,
+ const struct lu_fid *pfid)
+{
+ LASSERT(ldata->ld_leh != NULL);
+
+ if (lname == NULL || pfid == NULL)
+ return -EINVAL;
+
+ ldata->ld_reclen = lname->ln_namelen + sizeof(struct link_ea_entry);
+ if (ldata->ld_leh->leh_len + ldata->ld_reclen >
+ ldata->ld_buf->lb_len) {
+ if (lu_buf_check_and_grow(ldata->ld_buf,
+ ldata->ld_leh->leh_len +
+ ldata->ld_reclen) < 0)
+ return -ENOMEM;
+ }
+
+ ldata->ld_leh = ldata->ld_buf->lb_buf;
+ ldata->ld_lee = ldata->ld_buf->lb_buf + ldata->ld_leh->leh_len;
+ ldata->ld_reclen = linkea_entry_pack(ldata->ld_lee, lname, pfid);
+ ldata->ld_leh->leh_len += ldata->ld_reclen;
+ ldata->ld_leh->leh_reccount++;
+ CDEBUG(D_INODE, "New link_ea name '%.*s' is added\n",
+ lname->ln_namelen, lname->ln_name);
+ return 0;
+}
+EXPORT_SYMBOL(linkea_add_buf);
+
+/** Del the current record from the link ea buf */
+void linkea_del_buf(struct linkea_data *ldata, const struct lu_name *lname)
+{
+ LASSERT(ldata->ld_leh != NULL && ldata->ld_lee != NULL);
+
+ ldata->ld_leh->leh_reccount--;
+ ldata->ld_leh->leh_len -= ldata->ld_reclen;
+ memmove(ldata->ld_lee, (char *)ldata->ld_lee + ldata->ld_reclen,
+ (char *)ldata->ld_leh + ldata->ld_leh->leh_len -
+ (char *)ldata->ld_lee);
+ CDEBUG(D_INODE, "Old link_ea name '%.*s' is removed\n",
+ lname->ln_namelen, lname->ln_name);
+}
+EXPORT_SYMBOL(linkea_del_buf);
+
+/**
+ * Check if such a link exists in linkEA.
+ *
+ * \param ldata link data the search to be done on
+ * \param lname name in the parent's directory entry pointing to this object
+ * \param pfid parent fid the link to be found for
+ *
+ * \retval 0 success
+ * \retval -ENOENT link does not exist
+ * \retval -ve on error
+ */
+int linkea_links_find(struct linkea_data *ldata, const struct lu_name *lname,
+ const struct lu_fid *pfid)
+{
+ struct lu_name tmpname;
+ struct lu_fid tmpfid;
+ int count;
+
+ LASSERT(ldata->ld_leh != NULL);
+
+ /* link #0 */
+ ldata->ld_lee = (struct link_ea_entry *)(ldata->ld_leh + 1);
+
+ for (count = 0; count < ldata->ld_leh->leh_reccount; count++) {
+ linkea_entry_unpack(ldata->ld_lee, &ldata->ld_reclen,
+ &tmpname, &tmpfid);
+ if (tmpname.ln_namelen == lname->ln_namelen &&
+ lu_fid_eq(&tmpfid, pfid) &&
+ (strncmp(tmpname.ln_name, lname->ln_name,
+ tmpname.ln_namelen) == 0))
+ break;
+ ldata->ld_lee = (struct link_ea_entry *)((char *)ldata->ld_lee +
+ ldata->ld_reclen);
+ }
+
+ if (count == ldata->ld_leh->leh_reccount) {
+ CDEBUG(D_INODE, "Old link_ea name '%.*s' not found\n",
+ lname->ln_namelen, lname->ln_name);
+ ldata->ld_lee = NULL;
+ return -ENOENT;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(linkea_links_find);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
new file mode 100644
index 0000000..d1a57eb
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -0,0 +1,406 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/linux/linux-module.c
+ *
+ * Object Devices Class Driver
+ * These are the only exported functions, they provide some generic
+ * infrastructure for managing object devices
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/lp.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <asm/io.h>
+#include <asm/ioctls.h>
+#include <asm/poll.h>
+#include <asm/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/seq_file.h>
+
+#include <linux/libcfs/libcfs.h>
+#include <obd_support.h>
+#include <obd_class.h>
+#include <linux/lnet/lnetctl.h>
+#include <lprocfs_status.h>
+#include <lustre_ver.h>
+#include <lustre/lustre_build_version.h>
+
+int proc_version;
+
+/* buffer MUST be at least the size of obd_ioctl_hdr */
+int obd_ioctl_getdata(char **buf, int *len, void *arg)
+{
+ struct obd_ioctl_hdr hdr;
+ struct obd_ioctl_data *data;
+ int err;
+ int offset = 0;
+
+ err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
+ if ( err )
+ return err;
+
+ if (hdr.ioc_version != OBD_IOCTL_VERSION) {
+ CERROR("Version mismatch kernel (%x) vs application (%x)\n",
+ OBD_IOCTL_VERSION, hdr.ioc_version);
+ return -EINVAL;
+ }
+
+ if (hdr.ioc_len > OBD_MAX_IOCTL_BUFFER) {
+ CERROR("User buffer len %d exceeds %d max buffer\n",
+ hdr.ioc_len, OBD_MAX_IOCTL_BUFFER);
+ return -EINVAL;
+ }
+
+ if (hdr.ioc_len < sizeof(struct obd_ioctl_data)) {
+ CERROR("User buffer too small for ioctl (%d)\n", hdr.ioc_len);
+ return -EINVAL;
+ }
+
+ /* When there are lots of processes calling vmalloc on multi-core
+ * system, the high lock contention will hurt performance badly,
+ * obdfilter-survey is an example, which relies on ioctl. So we'd
+ * better avoid vmalloc on ioctl path. LU-66 */
+ OBD_ALLOC_LARGE(*buf, hdr.ioc_len);
+ if (*buf == NULL) {
+ CERROR("Cannot allocate control buffer of len %d\n",
+ hdr.ioc_len);
+ return -EINVAL;
+ }
+ *len = hdr.ioc_len;
+ data = (struct obd_ioctl_data *)*buf;
+
+ err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
+ if ( err ) {
+ OBD_FREE_LARGE(*buf, hdr.ioc_len);
+ return err;
+ }
+
+ if (obd_ioctl_is_invalid(data)) {
+ CERROR("ioctl not correctly formatted\n");
+ OBD_FREE_LARGE(*buf, hdr.ioc_len);
+ return -EINVAL;
+ }
+
+ if (data->ioc_inllen1) {
+ data->ioc_inlbuf1 = &data->ioc_bulk[0];
+ offset += cfs_size_round(data->ioc_inllen1);
+ }
+
+ if (data->ioc_inllen2) {
+ data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
+ offset += cfs_size_round(data->ioc_inllen2);
+ }
+
+ if (data->ioc_inllen3) {
+ data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
+ offset += cfs_size_round(data->ioc_inllen3);
+ }
+
+ if (data->ioc_inllen4) {
+ data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(obd_ioctl_getdata);
+
+int obd_ioctl_popdata(void *arg, void *data, int len)
+{
+ int err;
+
+ err = copy_to_user(arg, data, len);
+ if (err)
+ err = -EFAULT;
+ return err;
+}
+EXPORT_SYMBOL(obd_ioctl_popdata);
+
+/* opening /dev/obd */
+static int obd_class_open(struct inode * inode, struct file * file)
+{
+ try_module_get(THIS_MODULE);
+ return 0;
+}
+
+/* closing /dev/obd */
+static int obd_class_release(struct inode * inode, struct file * file)
+{
+ module_put(THIS_MODULE);
+ return 0;
+}
+
+/* to control /dev/obd */
+static long obd_class_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int err = 0;
+
+ /* Allow non-root access for OBD_IOC_PING_TARGET - used by lfs check */
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET))
+ return err = -EACCES;
+ if ((cmd & 0xffffff00) == ((int)'T') << 8) /* ignore all tty ioctls */
+ return err = -ENOTTY;
+
+ err = class_handle_ioctl(cmd, (unsigned long)arg);
+
+ return err;
+}
+
+/* declare character device */
+static struct file_operations obd_psdev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = obd_class_ioctl, /* unlocked_ioctl */
+ .open = obd_class_open, /* open */
+ .release = obd_class_release, /* release */
+};
+
+/* modules setup */
+struct miscdevice obd_psdev = {
+ .minor = OBD_DEV_MINOR,
+ .name = OBD_DEV_NAME,
+ .fops = &obd_psdev_fops,
+};
+
+
+#ifdef LPROCFS
+int obd_proc_version_seq_show(struct seq_file *m, void *v)
+{
+ return seq_printf(m, "lustre: %s\nkernel: %s\nbuild: %s\n",
+ LUSTRE_VERSION_STRING, "patchless_client",
+ BUILD_VERSION);
+}
+LPROC_SEQ_FOPS_RO(obd_proc_version);
+
+int obd_proc_pinger_seq_show(struct seq_file *m, void *v)
+{
+ return seq_printf(m, "%s\n", "on");
+}
+LPROC_SEQ_FOPS_RO(obd_proc_pinger);
+
+static int obd_proc_health_seq_show(struct seq_file *m, void *v)
+{
+ int rc = 0, i;
+
+ if (libcfs_catastrophe)
+ seq_printf(m, "LBUG\n");
+
+ read_lock(&obd_dev_lock);
+ for (i = 0; i < class_devno_max(); i++) {
+ struct obd_device *obd;
+
+ obd = class_num2obd(i);
+ if (obd == NULL || !obd->obd_attached || !obd->obd_set_up)
+ continue;
+
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+ if (obd->obd_stopping)
+ continue;
+
+ class_incref(obd, __FUNCTION__, current);
+ read_unlock(&obd_dev_lock);
+
+ if (obd_health_check(NULL, obd)) {
+ seq_printf(m, "device %s reported unhealthy\n",
+ obd->obd_name);
+ rc++;
+ }
+ class_decref(obd, __FUNCTION__, current);
+ read_lock(&obd_dev_lock);
+ }
+ read_unlock(&obd_dev_lock);
+
+ if (rc == 0)
+ return seq_printf(m, "healthy\n");
+
+ seq_printf(m, "NOT HEALTHY\n");
+ return 0;
+}
+LPROC_SEQ_FOPS_RO(obd_proc_health);
+
+static int obd_proc_jobid_var_seq_show(struct seq_file *m, void *v)
+{
+ return seq_printf(m, "%s\n", obd_jobid_var);
+}
+
+static ssize_t obd_proc_jobid_var_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ if (!count || count > JOBSTATS_JOBID_VAR_MAX_LEN)
+ return -EINVAL;
+
+ memset(obd_jobid_var, 0, JOBSTATS_JOBID_VAR_MAX_LEN + 1);
+ /* Trim the trailing '\n' if any */
+ memcpy(obd_jobid_var, buffer, count - (buffer[count - 1] == '\n'));
+ return count;
+}
+LPROC_SEQ_FOPS(obd_proc_jobid_var);
+
+/* Root for /proc/fs/lustre */
+struct proc_dir_entry *proc_lustre_root = NULL;
+EXPORT_SYMBOL(proc_lustre_root);
+
+struct lprocfs_vars lprocfs_base[] = {
+ { "version", &obd_proc_version_fops },
+ { "pinger", &obd_proc_pinger_fops },
+ { "health_check", &obd_proc_health_fops },
+ { "jobid_var", &obd_proc_jobid_var_fops },
+ { 0 }
+};
+#else
+#define lprocfs_base NULL
+#endif /* LPROCFS */
+
+static void *obd_device_list_seq_start(struct seq_file *p, loff_t *pos)
+{
+ if (*pos >= class_devno_max())
+ return NULL;
+
+ return pos;
+}
+
+static void obd_device_list_seq_stop(struct seq_file *p, void *v)
+{
+}
+
+static void *obd_device_list_seq_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ ++*pos;
+ if (*pos >= class_devno_max())
+ return NULL;
+
+ return pos;
+}
+
+static int obd_device_list_seq_show(struct seq_file *p, void *v)
+{
+ loff_t index = *(loff_t *)v;
+ struct obd_device *obd = class_num2obd((int)index);
+ char *status;
+
+ if (obd == NULL)
+ return 0;
+
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+ if (obd->obd_stopping)
+ status = "ST";
+ else if (obd->obd_inactive)
+ status = "IN";
+ else if (obd->obd_set_up)
+ status = "UP";
+ else if (obd->obd_attached)
+ status = "AT";
+ else
+ status = "--";
+
+ return seq_printf(p, "%3d %s %s %s %s %d\n",
+ (int)index, status, obd->obd_type->typ_name,
+ obd->obd_name, obd->obd_uuid.uuid,
+ atomic_read(&obd->obd_refcount));
+}
+
+struct seq_operations obd_device_list_sops = {
+ .start = obd_device_list_seq_start,
+ .stop = obd_device_list_seq_stop,
+ .next = obd_device_list_seq_next,
+ .show = obd_device_list_seq_show,
+};
+
+static int obd_device_list_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ int rc = seq_open(file, &obd_device_list_sops);
+
+ if (rc)
+ return rc;
+
+ seq = file->private_data;
+ seq->private = PDE_DATA(inode);
+
+ return 0;
+}
+
+struct file_operations obd_device_list_fops = {
+ .owner = THIS_MODULE,
+ .open = obd_device_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int class_procfs_init(void)
+{
+ int rc = 0;
+
+ obd_sysctl_init();
+ proc_lustre_root = lprocfs_register("fs/lustre", NULL,
+ lprocfs_base, NULL);
+ if (IS_ERR(proc_lustre_root)) {
+ rc = PTR_ERR(proc_lustre_root);
+ proc_lustre_root = NULL;
+ goto out;
+ }
+
+ rc = lprocfs_seq_create(proc_lustre_root, "devices", 0444,
+ &obd_device_list_fops, NULL);
+out:
+ if (rc)
+ CERROR("error adding /proc/fs/lustre/devices file\n");
+ return 0;
+}
+
+int class_procfs_clean(void)
+{
+ if (proc_lustre_root) {
+ lprocfs_remove(&proc_lustre_root);
+ }
+ return 0;
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
new file mode 100644
index 0000000..d3bb5ff
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -0,0 +1,222 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/linux/linux-obdo.c
+ *
+ * Object Devices Class Driver
+ * These are the only exported functions, they provide some generic
+ * infrastructure for managing object devices
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/module.h>
+#include <obd_class.h>
+#include <lustre/lustre_idl.h>
+
+#include <linux/fs.h>
+#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
+
+/*FIXME: Just copy from obdo_from_inode*/
+void obdo_from_la(struct obdo *dst, struct lu_attr *la, __u64 valid)
+{
+ obd_flag newvalid = 0;
+
+ if (valid & LA_ATIME) {
+ dst->o_atime = la->la_atime;
+ newvalid |= OBD_MD_FLATIME;
+ }
+ if (valid & LA_MTIME) {
+ dst->o_mtime = la->la_mtime;
+ newvalid |= OBD_MD_FLMTIME;
+ }
+ if (valid & LA_CTIME) {
+ dst->o_ctime = la->la_ctime;
+ newvalid |= OBD_MD_FLCTIME;
+ }
+ if (valid & LA_SIZE) {
+ dst->o_size = la->la_size;
+ newvalid |= OBD_MD_FLSIZE;
+ }
+ if (valid & LA_BLOCKS) { /* allocation of space (x512 bytes) */
+ dst->o_blocks = la->la_blocks;
+ newvalid |= OBD_MD_FLBLOCKS;
+ }
+ if (valid & LA_TYPE) {
+ dst->o_mode = (dst->o_mode & S_IALLUGO) |
+ (la->la_mode & S_IFMT);
+ newvalid |= OBD_MD_FLTYPE;
+ }
+ if (valid & LA_MODE) {
+ dst->o_mode = (dst->o_mode & S_IFMT) |
+ (la->la_mode & S_IALLUGO);
+ newvalid |= OBD_MD_FLMODE;
+ }
+ if (valid & LA_UID) {
+ dst->o_uid = la->la_uid;
+ newvalid |= OBD_MD_FLUID;
+ }
+ if (valid & LA_GID) {
+ dst->o_gid = la->la_gid;
+ newvalid |= OBD_MD_FLGID;
+ }
+ dst->o_valid |= newvalid;
+}
+EXPORT_SYMBOL(obdo_from_la);
+
+/*FIXME: Just copy from obdo_from_inode*/
+void la_from_obdo(struct lu_attr *dst, struct obdo *obdo, obd_flag valid)
+{
+ __u64 newvalid = 0;
+
+ valid &= obdo->o_valid;
+
+ if (valid & OBD_MD_FLATIME) {
+ dst->la_atime = obdo->o_atime;
+ newvalid |= LA_ATIME;
+ }
+ if (valid & OBD_MD_FLMTIME) {
+ dst->la_mtime = obdo->o_mtime;
+ newvalid |= LA_MTIME;
+ }
+ if (valid & OBD_MD_FLCTIME) {
+ dst->la_ctime = obdo->o_ctime;
+ newvalid |= LA_CTIME;
+ }
+ if (valid & OBD_MD_FLSIZE) {
+ dst->la_size = obdo->o_size;
+ newvalid |= LA_SIZE;
+ }
+ if (valid & OBD_MD_FLBLOCKS) {
+ dst->la_blocks = obdo->o_blocks;
+ newvalid |= LA_BLOCKS;
+ }
+ if (valid & OBD_MD_FLTYPE) {
+ dst->la_mode = (dst->la_mode & S_IALLUGO) |
+ (obdo->o_mode & S_IFMT);
+ newvalid |= LA_TYPE;
+ }
+ if (valid & OBD_MD_FLMODE) {
+ dst->la_mode = (dst->la_mode & S_IFMT) |
+ (obdo->o_mode & S_IALLUGO);
+ newvalid |= LA_MODE;
+ }
+ if (valid & OBD_MD_FLUID) {
+ dst->la_uid = obdo->o_uid;
+ newvalid |= LA_UID;
+ }
+ if (valid & OBD_MD_FLGID) {
+ dst->la_gid = obdo->o_gid;
+ newvalid |= LA_GID;
+ }
+ dst->la_valid = newvalid;
+}
+EXPORT_SYMBOL(la_from_obdo);
+
+void obdo_refresh_inode(struct inode *dst, struct obdo *src, obd_flag valid)
+{
+ valid &= src->o_valid;
+
+ if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
+ CDEBUG(D_INODE,
+ "valid "LPX64", cur time %lu/%lu, new "LPU64"/"LPU64"\n",
+ src->o_valid, LTIME_S(dst->i_mtime),
+ LTIME_S(dst->i_ctime), src->o_mtime, src->o_ctime);
+
+ if (valid & OBD_MD_FLATIME && src->o_atime > LTIME_S(dst->i_atime))
+ LTIME_S(dst->i_atime) = src->o_atime;
+ if (valid & OBD_MD_FLMTIME && src->o_mtime > LTIME_S(dst->i_mtime))
+ LTIME_S(dst->i_mtime) = src->o_mtime;
+ if (valid & OBD_MD_FLCTIME && src->o_ctime > LTIME_S(dst->i_ctime))
+ LTIME_S(dst->i_ctime) = src->o_ctime;
+ if (valid & OBD_MD_FLSIZE)
+ i_size_write(dst, src->o_size);
+ /* optimum IO size */
+ if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
+ dst->i_blkbits = ffs(src->o_blksize) - 1;
+
+ if (dst->i_blkbits < PAGE_CACHE_SHIFT)
+ dst->i_blkbits = PAGE_CACHE_SHIFT;
+
+ /* allocation of space */
+ if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
+ /*
+ * XXX shouldn't overflow be checked here like in
+ * obdo_to_inode().
+ */
+ dst->i_blocks = src->o_blocks;
+}
+EXPORT_SYMBOL(obdo_refresh_inode);
+
+void obdo_to_inode(struct inode *dst, struct obdo *src, obd_flag valid)
+{
+ valid &= src->o_valid;
+
+ LASSERTF(!(valid & (OBD_MD_FLTYPE | OBD_MD_FLGENER | OBD_MD_FLFID |
+ OBD_MD_FLID | OBD_MD_FLGROUP)),
+ "object "DOSTID", valid %x\n", POSTID(&src->o_oi), valid);
+
+ if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
+ CDEBUG(D_INODE,
+ "valid "LPX64", cur time %lu/%lu, new "LPU64"/"LPU64"\n",
+ src->o_valid, LTIME_S(dst->i_mtime),
+ LTIME_S(dst->i_ctime), src->o_mtime, src->o_ctime);
+
+ if (valid & OBD_MD_FLATIME)
+ LTIME_S(dst->i_atime) = src->o_atime;
+ if (valid & OBD_MD_FLMTIME)
+ LTIME_S(dst->i_mtime) = src->o_mtime;
+ if (valid & OBD_MD_FLCTIME && src->o_ctime > LTIME_S(dst->i_ctime))
+ LTIME_S(dst->i_ctime) = src->o_ctime;
+ if (valid & OBD_MD_FLSIZE)
+ i_size_write(dst, src->o_size);
+ if (valid & OBD_MD_FLBLOCKS) { /* allocation of space */
+ dst->i_blocks = src->o_blocks;
+ if (dst->i_blocks < src->o_blocks) /* overflow */
+ dst->i_blocks = -1;
+
+ }
+ if (valid & OBD_MD_FLBLKSZ)
+ dst->i_blkbits = ffs(src->o_blksize)-1;
+ if (valid & OBD_MD_FLMODE)
+ dst->i_mode = (dst->i_mode & S_IFMT) | (src->o_mode & ~S_IFMT);
+ if (valid & OBD_MD_FLUID)
+ dst->i_uid = make_kuid(&init_user_ns, src->o_uid);
+ if (valid & OBD_MD_FLGID)
+ dst->i_gid = make_kgid(&init_user_ns, src->o_gid);
+ if (valid & OBD_MD_FLFLAGS)
+ dst->i_flags = src->o_flags;
+}
+EXPORT_SYMBOL(obdo_to_inode);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
new file mode 100644
index 0000000..acd2619
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -0,0 +1,443 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/sysctl.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+#include <linux/utsname.h>
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <obd_support.h>
+#include <lprocfs_status.h>
+
+#ifdef CONFIG_SYSCTL
+ctl_table_header_t *obd_table_header = NULL;
+#endif
+
+
+#define OBD_SYSCTL 300
+
+enum {
+ OBD_TIMEOUT = 3, /* RPC timeout before recovery/intr */
+ OBD_DUMP_ON_TIMEOUT, /* dump kernel debug log upon eviction */
+ OBD_MEMUSED, /* bytes currently OBD_ALLOCated */
+ OBD_PAGESUSED, /* pages currently OBD_PAGE_ALLOCated */
+ OBD_MAXMEMUSED, /* maximum bytes OBD_ALLOCated concurrently */
+ OBD_MAXPAGESUSED, /* maximum pages OBD_PAGE_ALLOCated concurrently */
+ OBD_SYNCFILTER, /* XXX temporary, as we play with sync osts.. */
+ OBD_LDLM_TIMEOUT, /* LDLM timeout for ASTs before client eviction */
+ OBD_DUMP_ON_EVICTION, /* dump kernel debug log upon eviction */
+ OBD_DEBUG_PEER_ON_TIMEOUT, /* dump peer debug when RPC times out */
+ OBD_ALLOC_FAIL_RATE, /* memory allocation random failure rate */
+ OBD_MAX_DIRTY_PAGES, /* maximum dirty pages */
+ OBD_AT_MIN, /* Adaptive timeouts params */
+ OBD_AT_MAX,
+ OBD_AT_EXTRA,
+ OBD_AT_EARLY_MARGIN,
+ OBD_AT_HISTORY,
+};
+
+
+int LL_PROC_PROTO(proc_set_timeout)
+{
+ int rc;
+
+ rc = ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+ if (ldlm_timeout >= obd_timeout)
+ ldlm_timeout = max(obd_timeout / 3, 1U);
+ return rc;
+}
+
+int LL_PROC_PROTO(proc_memory_alloc)
+{
+ char buf[22];
+ int len;
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ if (!*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+ if (write)
+ return -EINVAL;
+
+ len = snprintf(buf, sizeof(buf), LPU64"\n", obd_memory_sum());
+ if (len > *lenp)
+ len = *lenp;
+ buf[len] = '\0';
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+ *lenp = len;
+ *ppos += *lenp;
+ return 0;
+}
+
+int LL_PROC_PROTO(proc_pages_alloc)
+{
+ char buf[22];
+ int len;
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ if (!*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+ if (write)
+ return -EINVAL;
+
+ len = snprintf(buf, sizeof(buf), LPU64"\n", obd_pages_sum());
+ if (len > *lenp)
+ len = *lenp;
+ buf[len] = '\0';
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+ *lenp = len;
+ *ppos += *lenp;
+ return 0;
+}
+
+int LL_PROC_PROTO(proc_mem_max)
+{
+ char buf[22];
+ int len;
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ if (!*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+ if (write)
+ return -EINVAL;
+
+ len = snprintf(buf, sizeof(buf), LPU64"\n", obd_memory_max());
+ if (len > *lenp)
+ len = *lenp;
+ buf[len] = '\0';
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+ *lenp = len;
+ *ppos += *lenp;
+ return 0;
+}
+
+int LL_PROC_PROTO(proc_pages_max)
+{
+ char buf[22];
+ int len;
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ if (!*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+ if (write)
+ return -EINVAL;
+
+ len = snprintf(buf, sizeof(buf), LPU64"\n", obd_pages_max());
+ if (len > *lenp)
+ len = *lenp;
+ buf[len] = '\0';
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+ *lenp = len;
+ *ppos += *lenp;
+ return 0;
+}
+
+int LL_PROC_PROTO(proc_max_dirty_pages_in_mb)
+{
+ int rc = 0;
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+ if (write) {
+ rc = lprocfs_write_frac_helper(buffer, *lenp,
+ (unsigned int*)table->data,
+ 1 << (20 - PAGE_CACHE_SHIFT));
+ /* Don't allow them to let dirty pages exceed 90% of system
+ * memory and set a hard minimum of 4MB. */
+ if (obd_max_dirty_pages > ((totalram_pages / 10) * 9)) {
+ CERROR("Refusing to set max dirty pages to %u, which "
+ "is more than 90%% of available RAM; setting "
+ "to %lu\n", obd_max_dirty_pages,
+ ((totalram_pages / 10) * 9));
+ obd_max_dirty_pages = ((totalram_pages / 10) * 9);
+ } else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) {
+ obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT);
+ }
+ } else {
+ char buf[21];
+ int len;
+
+ len = lprocfs_read_frac_helper(buf, sizeof(buf),
+ *(unsigned int*)table->data,
+ 1 << (20 - PAGE_CACHE_SHIFT));
+ if (len > *lenp)
+ len = *lenp;
+ buf[len] = '\0';
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+ *lenp = len;
+ }
+ *ppos += *lenp;
+ return rc;
+}
+
+int LL_PROC_PROTO(proc_alloc_fail_rate)
+{
+ int rc = 0;
+ DECLARE_LL_PROC_PPOS_DECL;
+
+ if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+ if (write) {
+ rc = lprocfs_write_frac_helper(buffer, *lenp,
+ (unsigned int*)table->data,
+ OBD_ALLOC_FAIL_MULT);
+ } else {
+ char buf[21];
+ int len;
+
+ len = lprocfs_read_frac_helper(buf, 21,
+ *(unsigned int*)table->data,
+ OBD_ALLOC_FAIL_MULT);
+ if (len > *lenp)
+ len = *lenp;
+ buf[len] = '\0';
+ if (copy_to_user(buffer, buf, len))
+ return -EFAULT;
+ *lenp = len;
+ }
+ *ppos += *lenp;
+ return rc;
+}
+
+int LL_PROC_PROTO(proc_at_min)
+{
+ return ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+}
+int LL_PROC_PROTO(proc_at_max)
+{
+ return ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+}
+int LL_PROC_PROTO(proc_at_extra)
+{
+ return ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+}
+int LL_PROC_PROTO(proc_at_early_margin)
+{
+ return ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+}
+int LL_PROC_PROTO(proc_at_history)
+{
+ return ll_proc_dointvec(table, write, filp, buffer, lenp, ppos);
+}
+
+#ifdef CONFIG_SYSCTL
+static ctl_table_t obd_table[] = {
+ {
+ INIT_CTL_NAME(OBD_TIMEOUT)
+ .procname = "timeout",
+ .data = &obd_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_set_timeout
+ },
+ {
+ INIT_CTL_NAME(OBD_DEBUG_PEER_ON_TIMEOUT)
+ .procname = "debug_peer_on_timeout",
+ .data = &obd_debug_peer_on_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ INIT_CTL_NAME(OBD_DUMP_ON_TIMEOUT)
+ .procname = "dump_on_timeout",
+ .data = &obd_dump_on_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ INIT_CTL_NAME(OBD_DUMP_ON_EVICTION)
+ .procname = "dump_on_eviction",
+ .data = &obd_dump_on_eviction,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec
+ },
+ {
+ INIT_CTL_NAME(OBD_MEMUSED)
+ .procname = "memused",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = &proc_memory_alloc
+ },
+ {
+ INIT_CTL_NAME(OBD_PAGESUSED)
+ .procname = "pagesused",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = &proc_pages_alloc
+ },
+ {
+ INIT_CTL_NAME(OBD_MAXMEMUSED)
+ .procname = "memused_max",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = &proc_mem_max
+ },
+ {
+ INIT_CTL_NAME(OBD_MAXPAGESUSED)
+ .procname = "pagesused_max",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0444,
+ .proc_handler = &proc_pages_max
+ },
+ {
+ INIT_CTL_NAME(OBD_LDLM_TIMEOUT)
+ .procname = "ldlm_timeout",
+ .data = &ldlm_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_set_timeout
+ },
+ {
+ INIT_CTL_NAME(OBD_ALLOC_FAIL_RATE)
+ .procname = "alloc_fail_rate",
+ .data = &obd_alloc_fail_rate,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_alloc_fail_rate
+ },
+ {
+ INIT_CTL_NAME(OBD_MAX_DIRTY_PAGES)
+ .procname = "max_dirty_mb",
+ .data = &obd_max_dirty_pages,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_max_dirty_pages_in_mb
+ },
+ {
+ INIT_CTL_NAME(OBD_AT_MIN)
+ .procname = "at_min",
+ .data = &at_min,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_at_min
+ },
+ {
+ INIT_CTL_NAME(OBD_AT_MAX)
+ .procname = "at_max",
+ .data = &at_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_at_max
+ },
+ {
+ INIT_CTL_NAME(OBD_AT_EXTRA)
+ .procname = "at_extra",
+ .data = &at_extra,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_at_extra
+ },
+ {
+ INIT_CTL_NAME(OBD_AT_EARLY_MARGIN)
+ .procname = "at_early_margin",
+ .data = &at_early_margin,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_at_early_margin
+ },
+ {
+ INIT_CTL_NAME(OBD_AT_HISTORY)
+ .procname = "at_history",
+ .data = &at_history,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_at_history
+ },
+ { INIT_CTL_NAME(0) }
+};
+
+static ctl_table_t parent_table[] = {
+ {
+ INIT_CTL_NAME(OBD_SYSCTL)
+ .procname = "lustre",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = obd_table
+ },
+ { INIT_CTL_NAME(0) }
+};
+#endif
+
+void obd_sysctl_init (void)
+{
+#ifdef CONFIG_SYSCTL
+ if ( !obd_table_header )
+ obd_table_header = register_sysctl_table(parent_table);
+#endif
+}
+
+void obd_sysctl_clean (void)
+{
+#ifdef CONFIG_SYSCTL
+ if ( obd_table_header )
+ unregister_sysctl_table(obd_table_header);
+ obd_table_header = NULL;
+#endif
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
new file mode 100644
index 0000000..0cb4428
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -0,0 +1,934 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/llog.c
+ *
+ * OST<->MDS recovery logging infrastructure.
+ * Invariants in implementation:
+ * - we do not share logs among different OST<->MDS connections, so that
+ * if an OST or MDS fails it need only look at log(s) relevant to itself
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ * Author: Alex Zhuravlev <bzzz@whamcloud.com>
+ * Author: Mikhail Pershin <tappro@whamcloud.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+
+#include <obd_class.h>
+#include <lustre_log.h>
+#include "llog_internal.h"
+
+/*
+ * Allocate a new log or catalog handle
+ * Used inside llog_open().
+ */
+struct llog_handle *llog_alloc_handle(void)
+{
+ struct llog_handle *loghandle;
+
+ OBD_ALLOC_PTR(loghandle);
+ if (loghandle == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ init_rwsem(&loghandle->lgh_lock);
+ spin_lock_init(&loghandle->lgh_hdr_lock);
+ INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
+ atomic_set(&loghandle->lgh_refcount, 1);
+
+ return loghandle;
+}
+
+/*
+ * Free llog handle and header data if exists. Used in llog_close() only
+ */
+void llog_free_handle(struct llog_handle *loghandle)
+{
+ LASSERT(loghandle != NULL);
+
+ /* failed llog_init_handle */
+ if (!loghandle->lgh_hdr)
+ goto out;
+
+ if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)
+ LASSERT(list_empty(&loghandle->u.phd.phd_entry));
+ else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
+ LASSERT(list_empty(&loghandle->u.chd.chd_head));
+ LASSERT(sizeof(*(loghandle->lgh_hdr)) == LLOG_CHUNK_SIZE);
+ OBD_FREE(loghandle->lgh_hdr, LLOG_CHUNK_SIZE);
+out:
+ OBD_FREE_PTR(loghandle);
+}
+
+void llog_handle_get(struct llog_handle *loghandle)
+{
+ atomic_inc(&loghandle->lgh_refcount);
+}
+
+void llog_handle_put(struct llog_handle *loghandle)
+{
+ LASSERT(atomic_read(&loghandle->lgh_refcount) > 0);
+ if (atomic_dec_and_test(&loghandle->lgh_refcount))
+ llog_free_handle(loghandle);
+}
+
+/* returns negative on error; 0 if success; 1 if success & log destroyed */
+int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
+ int index)
+{
+ struct llog_log_hdr *llh = loghandle->lgh_hdr;
+ int rc = 0;
+
+ CDEBUG(D_RPCTRACE, "Canceling %d in log "DOSTID"\n",
+ index, POSTID(&loghandle->lgh_id.lgl_oi));
+
+ if (index == 0) {
+ CERROR("Can't cancel index 0 which is header\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&loghandle->lgh_hdr_lock);
+ if (!ext2_clear_bit(index, llh->llh_bitmap)) {
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", index);
+ return -ENOENT;
+ }
+
+ llh->llh_count--;
+
+ if ((llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
+ (llh->llh_count == 1) &&
+ (loghandle->lgh_last_idx == (LLOG_BITMAP_BYTES * 8) - 1)) {
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ rc = llog_destroy(env, loghandle);
+ if (rc < 0) {
+ CERROR("%s: can't destroy empty llog #"DOSTID
+ "#%08x: rc = %d\n",
+ loghandle->lgh_ctxt->loc_obd->obd_name,
+ POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, rc);
+ GOTO(out_err, rc);
+ }
+ return 1;
+ }
+ spin_unlock(&loghandle->lgh_hdr_lock);
+
+ rc = llog_write(env, loghandle, &llh->llh_hdr, NULL, 0, NULL, 0);
+ if (rc < 0) {
+ CERROR("%s: fail to write header for llog #"DOSTID
+ "#%08x: rc = %d\n",
+ loghandle->lgh_ctxt->loc_obd->obd_name,
+ POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, rc);
+ GOTO(out_err, rc);
+ }
+ return 0;
+out_err:
+ spin_lock(&loghandle->lgh_hdr_lock);
+ ext2_set_bit(index, llh->llh_bitmap);
+ llh->llh_count++;
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ return rc;
+}
+EXPORT_SYMBOL(llog_cancel_rec);
+
+static int llog_read_header(const struct lu_env *env,
+ struct llog_handle *handle,
+ struct obd_uuid *uuid)
+{
+ struct llog_operations *lop;
+ int rc;
+
+ rc = llog_handle2ops(handle, &lop);
+ if (rc)
+ return rc;
+
+ if (lop->lop_read_header == NULL)
+ return -EOPNOTSUPP;
+
+ rc = lop->lop_read_header(env, handle);
+ if (rc == LLOG_EEMPTY) {
+ struct llog_log_hdr *llh = handle->lgh_hdr;
+
+ handle->lgh_last_idx = 0; /* header is record with index 0 */
+ llh->llh_count = 1; /* for the header record */
+ llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
+ llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
+ llh->llh_hdr.lrh_index = llh->llh_tail.lrt_index = 0;
+ llh->llh_timestamp = cfs_time_current_sec();
+ if (uuid)
+ memcpy(&llh->llh_tgtuuid, uuid,
+ sizeof(llh->llh_tgtuuid));
+ llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
+ ext2_set_bit(0, llh->llh_bitmap);
+ rc = 0;
+ }
+ return rc;
+}
+
+int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
+ int flags, struct obd_uuid *uuid)
+{
+ struct llog_log_hdr *llh;
+ int rc;
+
+ LASSERT(handle->lgh_hdr == NULL);
+
+ OBD_ALLOC_PTR(llh);
+ if (llh == NULL)
+ return -ENOMEM;
+ handle->lgh_hdr = llh;
+ /* first assign flags to use llog_client_ops */
+ llh->llh_flags = flags;
+ rc = llog_read_header(env, handle, uuid);
+ if (rc == 0) {
+ if (unlikely((llh->llh_flags & LLOG_F_IS_PLAIN &&
+ flags & LLOG_F_IS_CAT) ||
+ (llh->llh_flags & LLOG_F_IS_CAT &&
+ flags & LLOG_F_IS_PLAIN))) {
+ CERROR("%s: llog type is %s but initializing %s\n",
+ handle->lgh_ctxt->loc_obd->obd_name,
+ llh->llh_flags & LLOG_F_IS_CAT ?
+ "catalog" : "plain",
+ flags & LLOG_F_IS_CAT ? "catalog" : "plain");
+ GOTO(out, rc = -EINVAL);
+ } else if (llh->llh_flags &
+ (LLOG_F_IS_PLAIN | LLOG_F_IS_CAT)) {
+ /*
+ * it is possible to open llog without specifying llog
+ * type so it is taken from llh_flags
+ */
+ flags = llh->llh_flags;
+ } else {
+ /* for some reason the llh_flags has no type set */
+ CERROR("llog type is not specified!\n");
+ GOTO(out, rc = -EINVAL);
+ }
+ if (unlikely(uuid &&
+ !obd_uuid_equals(uuid, &llh->llh_tgtuuid))) {
+ CERROR("%s: llog uuid mismatch: %s/%s\n",
+ handle->lgh_ctxt->loc_obd->obd_name,
+ (char *)uuid->uuid,
+ (char *)llh->llh_tgtuuid.uuid);
+ GOTO(out, rc = -EEXIST);
+ }
+ }
+ if (flags & LLOG_F_IS_CAT) {
+ LASSERT(list_empty(&handle->u.chd.chd_head));
+ INIT_LIST_HEAD(&handle->u.chd.chd_head);
+ llh->llh_size = sizeof(struct llog_logid_rec);
+ } else if (!(flags & LLOG_F_IS_PLAIN)) {
+ CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
+ handle->lgh_ctxt->loc_obd->obd_name,
+ flags, LLOG_F_IS_CAT, LLOG_F_IS_PLAIN);
+ rc = -EINVAL;
+ }
+out:
+ if (rc) {
+ OBD_FREE_PTR(llh);
+ handle->lgh_hdr = NULL;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(llog_init_handle);
+
+int llog_copy_handler(const struct lu_env *env,
+ struct llog_handle *llh,
+ struct llog_rec_hdr *rec,
+ void *data)
+{
+ struct llog_rec_hdr local_rec = *rec;
+ struct llog_handle *local_llh = (struct llog_handle *)data;
+ char *cfg_buf = (char*) (rec + 1);
+ struct lustre_cfg *lcfg;
+ int rc = 0;
+
+ /* Append all records */
+ local_rec.lrh_len -= sizeof(*rec) + sizeof(struct llog_rec_tail);
+ rc = llog_write(env, local_llh, &local_rec, NULL, 0,
+ (void *)cfg_buf, -1);
+
+ lcfg = (struct lustre_cfg *)cfg_buf;
+ CDEBUG(D_INFO, "idx=%d, rc=%d, len=%d, cmd %x %s %s\n",
+ rec->lrh_index, rc, rec->lrh_len, lcfg->lcfg_command,
+ lustre_cfg_string(lcfg, 0), lustre_cfg_string(lcfg, 1));
+
+ return rc;
+}
+EXPORT_SYMBOL(llog_copy_handler);
+
+static int llog_process_thread(void *arg)
+{
+ struct llog_process_info *lpi = arg;
+ struct llog_handle *loghandle = lpi->lpi_loghandle;
+ struct llog_log_hdr *llh = loghandle->lgh_hdr;
+ struct llog_process_cat_data *cd = lpi->lpi_catdata;
+ char *buf;
+ __u64 cur_offset = LLOG_CHUNK_SIZE;
+ __u64 last_offset;
+ int rc = 0, index = 1, last_index;
+ int saved_index = 0;
+ int last_called_index = 0;
+
+ LASSERT(llh);
+
+ OBD_ALLOC(buf, LLOG_CHUNK_SIZE);
+ if (!buf) {
+ lpi->lpi_rc = -ENOMEM;
+ return 0;
+ }
+
+ if (cd != NULL) {
+ last_called_index = cd->lpcd_first_idx;
+ index = cd->lpcd_first_idx + 1;
+ }
+ if (cd != NULL && cd->lpcd_last_idx)
+ last_index = cd->lpcd_last_idx;
+ else
+ last_index = LLOG_BITMAP_BYTES * 8 - 1;
+
+ while (rc == 0) {
+ struct llog_rec_hdr *rec;
+
+ /* skip records not set in bitmap */
+ while (index <= last_index &&
+ !ext2_test_bit(index, llh->llh_bitmap))
+ ++index;
+
+ LASSERT(index <= last_index + 1);
+ if (index == last_index + 1)
+ break;
+repeat:
+ CDEBUG(D_OTHER, "index: %d last_index %d\n",
+ index, last_index);
+
+ /* get the buf with our target record; avoid old garbage */
+ memset(buf, 0, LLOG_CHUNK_SIZE);
+ last_offset = cur_offset;
+ rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index,
+ index, &cur_offset, buf, LLOG_CHUNK_SIZE);
+ if (rc)
+ GOTO(out, rc);
+
+ /* NB: when rec->lrh_len is accessed it is already swabbed
+ * since it is used at the "end" of the loop and the rec
+ * swabbing is done at the beginning of the loop. */
+ for (rec = (struct llog_rec_hdr *)buf;
+ (char *)rec < buf + LLOG_CHUNK_SIZE;
+ rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)){
+
+ CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
+ rec, rec->lrh_type);
+
+ if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
+ lustre_swab_llog_rec(rec);
+
+ CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n",
+ rec->lrh_type, rec->lrh_index);
+
+ if (rec->lrh_index == 0) {
+ /* probably another rec just got added? */
+ if (index <= loghandle->lgh_last_idx)
+ GOTO(repeat, rc = 0);
+ GOTO(out, rc = 0); /* no more records */
+ }
+ if (rec->lrh_len == 0 ||
+ rec->lrh_len > LLOG_CHUNK_SIZE) {
+ CWARN("invalid length %d in llog record for "
+ "index %d/%d\n", rec->lrh_len,
+ rec->lrh_index, index);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ if (rec->lrh_index < index) {
+ CDEBUG(D_OTHER, "skipping lrh_index %d\n",
+ rec->lrh_index);
+ continue;
+ }
+
+ CDEBUG(D_OTHER,
+ "lrh_index: %d lrh_len: %d (%d remains)\n",
+ rec->lrh_index, rec->lrh_len,
+ (int)(buf + LLOG_CHUNK_SIZE - (char *)rec));
+
+ loghandle->lgh_cur_idx = rec->lrh_index;
+ loghandle->lgh_cur_offset = (char *)rec - (char *)buf +
+ last_offset;
+
+ /* if set, process the callback on this record */
+ if (ext2_test_bit(index, llh->llh_bitmap)) {
+ rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec,
+ lpi->lpi_cbdata);
+ last_called_index = index;
+ if (rc == LLOG_PROC_BREAK) {
+ GOTO(out, rc);
+ } else if (rc == LLOG_DEL_RECORD) {
+ llog_cancel_rec(lpi->lpi_env,
+ loghandle,
+ rec->lrh_index);
+ rc = 0;
+ }
+ if (rc)
+ GOTO(out, rc);
+ } else {
+ CDEBUG(D_OTHER, "Skipped index %d\n", index);
+ }
+
+ /* next record, still in buffer? */
+ ++index;
+ if (index > last_index)
+ GOTO(out, rc = 0);
+ }
+ }
+
+out:
+ if (cd != NULL)
+ cd->lpcd_last_idx = last_called_index;
+
+ OBD_FREE(buf, LLOG_CHUNK_SIZE);
+ lpi->lpi_rc = rc;
+ return 0;
+}
+
+static int llog_process_thread_daemonize(void *arg)
+{
+ struct llog_process_info *lpi = arg;
+ struct lu_env env;
+ int rc;
+
+ unshare_fs_struct();
+
+ /* client env has no keys, tags is just 0 */
+ rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD);
+ if (rc)
+ goto out;
+ lpi->lpi_env = &env;
+
+ rc = llog_process_thread(arg);
+
+ lu_env_fini(&env);
+out:
+ complete(&lpi->lpi_completion);
+ return rc;
+}
+
+int llog_process_or_fork(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ llog_cb_t cb, void *data, void *catdata, bool fork)
+{
+ struct llog_process_info *lpi;
+ int rc;
+
+ OBD_ALLOC_PTR(lpi);
+ if (lpi == NULL) {
+ CERROR("cannot alloc pointer\n");
+ return -ENOMEM;
+ }
+ lpi->lpi_loghandle = loghandle;
+ lpi->lpi_cb = cb;
+ lpi->lpi_cbdata = data;
+ lpi->lpi_catdata = catdata;
+
+ if (fork) {
+ /* The new thread can't use parent env,
+ * init the new one in llog_process_thread_daemonize. */
+ lpi->lpi_env = NULL;
+ init_completion(&lpi->lpi_completion);
+ rc = PTR_ERR(kthread_run(llog_process_thread_daemonize, lpi,
+ "llog_process_thread"));
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("%s: cannot start thread: rc = %d\n",
+ loghandle->lgh_ctxt->loc_obd->obd_name, rc);
+ OBD_FREE_PTR(lpi);
+ return rc;
+ }
+ wait_for_completion(&lpi->lpi_completion);
+ } else {
+ lpi->lpi_env = env;
+ llog_process_thread(lpi);
+ }
+ rc = lpi->lpi_rc;
+ OBD_FREE_PTR(lpi);
+ return rc;
+}
+EXPORT_SYMBOL(llog_process_or_fork);
+
+int llog_process(const struct lu_env *env, struct llog_handle *loghandle,
+ llog_cb_t cb, void *data, void *catdata)
+{
+ return llog_process_or_fork(env, loghandle, cb, data, catdata, true);
+}
+EXPORT_SYMBOL(llog_process);
+
+inline int llog_get_size(struct llog_handle *loghandle)
+{
+ if (loghandle && loghandle->lgh_hdr)
+ return loghandle->lgh_hdr->llh_count;
+ return 0;
+}
+EXPORT_SYMBOL(llog_get_size);
+
+int llog_reverse_process(const struct lu_env *env,
+ struct llog_handle *loghandle, llog_cb_t cb,
+ void *data, void *catdata)
+{
+ struct llog_log_hdr *llh = loghandle->lgh_hdr;
+ struct llog_process_cat_data *cd = catdata;
+ void *buf;
+ int rc = 0, first_index = 1, index, idx;
+
+ OBD_ALLOC(buf, LLOG_CHUNK_SIZE);
+ if (!buf)
+ return -ENOMEM;
+
+ if (cd != NULL)
+ first_index = cd->lpcd_first_idx + 1;
+ if (cd != NULL && cd->lpcd_last_idx)
+ index = cd->lpcd_last_idx;
+ else
+ index = LLOG_BITMAP_BYTES * 8 - 1;
+
+ while (rc == 0) {
+ struct llog_rec_hdr *rec;
+ struct llog_rec_tail *tail;
+
+ /* skip records not set in bitmap */
+ while (index >= first_index &&
+ !ext2_test_bit(index, llh->llh_bitmap))
+ --index;
+
+ LASSERT(index >= first_index - 1);
+ if (index == first_index - 1)
+ break;
+
+ /* get the buf with our target record; avoid old garbage */
+ memset(buf, 0, LLOG_CHUNK_SIZE);
+ rc = llog_prev_block(env, loghandle, index, buf,
+ LLOG_CHUNK_SIZE);
+ if (rc)
+ GOTO(out, rc);
+
+ rec = buf;
+ idx = rec->lrh_index;
+ CDEBUG(D_RPCTRACE, "index %u : idx %u\n", index, idx);
+ while (idx < index) {
+ rec = (void *)rec + rec->lrh_len;
+ if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
+ lustre_swab_llog_rec(rec);
+ idx ++;
+ }
+ LASSERT(idx == index);
+ tail = (void *)rec + rec->lrh_len - sizeof(*tail);
+
+ /* process records in buffer, starting where we found one */
+ while ((void *)tail > buf) {
+ if (tail->lrt_index == 0)
+ GOTO(out, rc = 0); /* no more records */
+
+ /* if set, process the callback on this record */
+ if (ext2_test_bit(index, llh->llh_bitmap)) {
+ rec = (void *)tail - tail->lrt_len +
+ sizeof(*tail);
+
+ rc = cb(env, loghandle, rec, data);
+ if (rc == LLOG_PROC_BREAK) {
+ GOTO(out, rc);
+ } else if (rc == LLOG_DEL_RECORD) {
+ llog_cancel_rec(env, loghandle,
+ tail->lrt_index);
+ rc = 0;
+ }
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ /* previous record, still in buffer? */
+ --index;
+ if (index < first_index)
+ GOTO(out, rc = 0);
+ tail = (void *)tail - tail->lrt_len;
+ }
+ }
+
+out:
+ if (buf)
+ OBD_FREE(buf, LLOG_CHUNK_SIZE);
+ return rc;
+}
+EXPORT_SYMBOL(llog_reverse_process);
+
+/**
+ * new llog API
+ *
+ * API functions:
+ * llog_open - open llog, may not exist
+ * llog_exist - check if llog exists
+ * llog_close - close opened llog, pair for open, frees llog_handle
+ * llog_declare_create - declare llog creation
+ * llog_create - create new llog on disk, need transaction handle
+ * llog_declare_write_rec - declaration of llog write
+ * llog_write_rec - write llog record on disk, need transaction handle
+ * llog_declare_add - declare llog catalog record addition
+ * llog_add - add llog record in catalog, need transaction handle
+ */
+int llog_exist(struct llog_handle *loghandle)
+{
+ struct llog_operations *lop;
+ int rc;
+
+ rc = llog_handle2ops(loghandle, &lop);
+ if (rc)
+ return rc;
+ if (lop->lop_exist == NULL)
+ return -EOPNOTSUPP;
+
+ rc = lop->lop_exist(loghandle);
+ return rc;
+}
+EXPORT_SYMBOL(llog_exist);
+
+int llog_declare_create(const struct lu_env *env,
+ struct llog_handle *loghandle, struct thandle *th)
+{
+ struct llog_operations *lop;
+ int raised, rc;
+
+ rc = llog_handle2ops(loghandle, &lop);
+ if (rc)
+ return rc;
+ if (lop->lop_declare_create == NULL)
+ return -EOPNOTSUPP;
+
+ raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
+ if (!raised)
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
+ rc = lop->lop_declare_create(env, loghandle, th);
+ if (!raised)
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
+ return rc;
+}
+EXPORT_SYMBOL(llog_declare_create);
+
+int llog_create(const struct lu_env *env, struct llog_handle *handle,
+ struct thandle *th)
+{
+ struct llog_operations *lop;
+ int raised, rc;
+
+ rc = llog_handle2ops(handle, &lop);
+ if (rc)
+ return rc;
+ if (lop->lop_create == NULL)
+ return -EOPNOTSUPP;
+
+ raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
+ if (!raised)
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
+ rc = lop->lop_create(env, handle, th);
+ if (!raised)
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
+ return rc;
+}
+EXPORT_SYMBOL(llog_create);
+
+int llog_declare_write_rec(const struct lu_env *env,
+ struct llog_handle *handle,
+ struct llog_rec_hdr *rec, int idx,
+ struct thandle *th)
+{
+ struct llog_operations *lop;
+ int raised, rc;
+
+ rc = llog_handle2ops(handle, &lop);
+ if (rc)
+ return rc;
+ LASSERT(lop);
+ if (lop->lop_declare_write_rec == NULL)
+ return -EOPNOTSUPP;
+
+ raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
+ if (!raised)
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
+ rc = lop->lop_declare_write_rec(env, handle, rec, idx, th);
+ if (!raised)
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
+ return rc;
+}
+EXPORT_SYMBOL(llog_declare_write_rec);
+
+int llog_write_rec(const struct lu_env *env, struct llog_handle *handle,
+ struct llog_rec_hdr *rec, struct llog_cookie *logcookies,
+ int numcookies, void *buf, int idx, struct thandle *th)
+{
+ struct llog_operations *lop;
+ int raised, rc, buflen;
+
+ rc = llog_handle2ops(handle, &lop);
+ if (rc)
+ return rc;
+
+ LASSERT(lop);
+ if (lop->lop_write_rec == NULL)
+ return -EOPNOTSUPP;
+
+ if (buf)
+ buflen = rec->lrh_len + sizeof(struct llog_rec_hdr) +
+ sizeof(struct llog_rec_tail);
+ else
+ buflen = rec->lrh_len;
+ LASSERT(cfs_size_round(buflen) == buflen);
+
+ raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
+ if (!raised)
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
+ rc = lop->lop_write_rec(env, handle, rec, logcookies, numcookies,
+ buf, idx, th);
+ if (!raised)
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
+ return rc;
+}
+EXPORT_SYMBOL(llog_write_rec);
+
+int llog_add(const struct lu_env *env, struct llog_handle *lgh,
+ struct llog_rec_hdr *rec, struct llog_cookie *logcookies,
+ void *buf, struct thandle *th)
+{
+ int raised, rc;
+
+ if (lgh->lgh_logops->lop_add == NULL)
+ return -EOPNOTSUPP;
+
+ raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
+ if (!raised)
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
+ rc = lgh->lgh_logops->lop_add(env, lgh, rec, logcookies, buf, th);
+ if (!raised)
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
+ return rc;
+}
+EXPORT_SYMBOL(llog_add);
+
+int llog_declare_add(const struct lu_env *env, struct llog_handle *lgh,
+ struct llog_rec_hdr *rec, struct thandle *th)
+{
+ int raised, rc;
+
+ if (lgh->lgh_logops->lop_declare_add == NULL)
+ return -EOPNOTSUPP;
+
+ raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
+ if (!raised)
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
+ rc = lgh->lgh_logops->lop_declare_add(env, lgh, rec, th);
+ if (!raised)
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
+ return rc;
+}
+EXPORT_SYMBOL(llog_declare_add);
+
+/**
+ * Helper function to open llog or create it if doesn't exist.
+ * It hides all transaction handling from caller.
+ */
+int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct llog_handle **res, struct llog_logid *logid,
+ char *name)
+{
+ struct thandle *th;
+ int rc;
+
+ rc = llog_open(env, ctxt, res, logid, name, LLOG_OPEN_NEW);
+ if (rc)
+ return rc;
+
+ if (llog_exist(*res))
+ return 0;
+
+ if ((*res)->lgh_obj != NULL) {
+ struct dt_device *d;
+
+ d = lu2dt_dev((*res)->lgh_obj->do_lu.lo_dev);
+
+ th = dt_trans_create(env, d);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
+
+ rc = llog_declare_create(env, *res, th);
+ if (rc == 0) {
+ rc = dt_trans_start_local(env, d, th);
+ if (rc == 0)
+ rc = llog_create(env, *res, th);
+ }
+ dt_trans_stop(env, d, th);
+ } else {
+ /* lvfs compat code */
+ LASSERT((*res)->lgh_file == NULL);
+ rc = llog_create(env, *res, NULL);
+ }
+out:
+ if (rc)
+ llog_close(env, *res);
+ return rc;
+}
+EXPORT_SYMBOL(llog_open_create);
+
+/**
+ * Helper function to delete existent llog.
+ */
+int llog_erase(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct llog_logid *logid, char *name)
+{
+ struct llog_handle *handle;
+ int rc = 0, rc2;
+
+ /* nothing to erase */
+ if (name == NULL && logid == NULL)
+ return 0;
+
+ rc = llog_open(env, ctxt, &handle, logid, name, LLOG_OPEN_EXISTS);
+ if (rc < 0)
+ return rc;
+
+ rc = llog_init_handle(env, handle, LLOG_F_IS_PLAIN, NULL);
+ if (rc == 0)
+ rc = llog_destroy(env, handle);
+
+ rc2 = llog_close(env, handle);
+ if (rc == 0)
+ rc = rc2;
+ return rc;
+}
+EXPORT_SYMBOL(llog_erase);
+
+/*
+ * Helper function for write record in llog.
+ * It hides all transaction handling from caller.
+ * Valid only with local llog.
+ */
+int llog_write(const struct lu_env *env, struct llog_handle *loghandle,
+ struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
+ int cookiecount, void *buf, int idx)
+{
+ int rc;
+
+ LASSERT(loghandle);
+ LASSERT(loghandle->lgh_ctxt);
+
+ if (loghandle->lgh_obj != NULL) {
+ struct dt_device *dt;
+ struct thandle *th;
+
+ dt = lu2dt_dev(loghandle->lgh_obj->do_lu.lo_dev);
+
+ th = dt_trans_create(env, dt);
+ if (IS_ERR(th))
+ return PTR_ERR(th);
+
+ rc = llog_declare_write_rec(env, loghandle, rec, idx, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ rc = dt_trans_start_local(env, dt, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ down_write(&loghandle->lgh_lock);
+ rc = llog_write_rec(env, loghandle, rec, reccookie,
+ cookiecount, buf, idx, th);
+ up_write(&loghandle->lgh_lock);
+out_trans:
+ dt_trans_stop(env, dt, th);
+ } else { /* lvfs compatibility */
+ down_write(&loghandle->lgh_lock);
+ rc = llog_write_rec(env, loghandle, rec, reccookie,
+ cookiecount, buf, idx, NULL);
+ up_write(&loghandle->lgh_lock);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(llog_write);
+
+int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct llog_handle **lgh, struct llog_logid *logid,
+ char *name, enum llog_open_param open_param)
+{
+ int raised;
+ int rc;
+
+ LASSERT(ctxt);
+ LASSERT(ctxt->loc_logops);
+
+ if (ctxt->loc_logops->lop_open == NULL) {
+ *lgh = NULL;
+ return -EOPNOTSUPP;
+ }
+
+ *lgh = llog_alloc_handle();
+ if (*lgh == NULL)
+ return -ENOMEM;
+ (*lgh)->lgh_ctxt = ctxt;
+ (*lgh)->lgh_logops = ctxt->loc_logops;
+
+ raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
+ if (!raised)
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
+ rc = ctxt->loc_logops->lop_open(env, *lgh, logid, name, open_param);
+ if (!raised)
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
+ if (rc) {
+ llog_free_handle(*lgh);
+ *lgh = NULL;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(llog_open);
+
+int llog_close(const struct lu_env *env, struct llog_handle *loghandle)
+{
+ struct llog_operations *lop;
+ int rc;
+
+ rc = llog_handle2ops(loghandle, &lop);
+ if (rc)
+ GOTO(out, rc);
+ if (lop->lop_close == NULL)
+ GOTO(out, rc = -EOPNOTSUPP);
+ rc = lop->lop_close(env, loghandle);
+out:
+ llog_handle_put(loghandle);
+ return rc;
+}
+EXPORT_SYMBOL(llog_close);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
new file mode 100644
index 0000000..c0f3af7
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -0,0 +1,815 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/llog_cat.c
+ *
+ * OST<->MDS recovery logging infrastructure.
+ *
+ * Invariants in implementation:
+ * - we do not share logs among different OST<->MDS connections, so that
+ * if an OST or MDS fails it need only look at log(s) relevant to itself
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
+ * Author: Mikhail Pershin <mike.pershin@intel.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+
+#include <obd_class.h>
+
+#include "llog_internal.h"
+
+/* Create a new log handle and add it to the open list.
+ * This log handle will be closed when all of the records in it are removed.
+ *
+ * Assumes caller has already pushed us into the kernel context and is locking.
+ */
+static int llog_cat_new_log(const struct lu_env *env,
+ struct llog_handle *cathandle,
+ struct llog_handle *loghandle,
+ struct thandle *th)
+{
+
+ struct llog_log_hdr *llh;
+ struct llog_logid_rec rec = { { 0 }, };
+ int rc, index, bitmap_size;
+
+ llh = cathandle->lgh_hdr;
+ bitmap_size = LLOG_BITMAP_SIZE(llh);
+
+ index = (cathandle->lgh_last_idx + 1) % bitmap_size;
+
+ /* maximum number of available slots in catlog is bitmap_size - 2 */
+ if (llh->llh_cat_idx == index) {
+ CERROR("no free catalog slots for log...\n");
+ return -ENOSPC;
+ }
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED))
+ return -ENOSPC;
+
+ rc = llog_create(env, loghandle, th);
+ /* if llog is already created, no need to initialize it */
+ if (rc == -EEXIST) {
+ return 0;
+ } else if (rc != 0) {
+ CERROR("%s: can't create new plain llog in catalog: rc = %d\n",
+ loghandle->lgh_ctxt->loc_obd->obd_name, rc);
+ return rc;
+ }
+
+ rc = llog_init_handle(env, loghandle,
+ LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
+ &cathandle->lgh_hdr->llh_tgtuuid);
+ if (rc)
+ GOTO(out_destroy, rc);
+
+ if (index == 0)
+ index = 1;
+
+ spin_lock(&loghandle->lgh_hdr_lock);
+ llh->llh_count++;
+ if (ext2_set_bit(index, llh->llh_bitmap)) {
+ CERROR("argh, index %u already set in log bitmap?\n",
+ index);
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ LBUG(); /* should never happen */
+ }
+ spin_unlock(&loghandle->lgh_hdr_lock);
+
+ cathandle->lgh_last_idx = index;
+ llh->llh_tail.lrt_index = index;
+
+ CDEBUG(D_RPCTRACE,"new recovery log "DOSTID":%x for index %u of catalog"
+ DOSTID"\n", POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, index,
+ POSTID(&cathandle->lgh_id.lgl_oi));
+ /* build the record for this log in the catalog */
+ rec.lid_hdr.lrh_len = sizeof(rec);
+ rec.lid_hdr.lrh_index = index;
+ rec.lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
+ rec.lid_id = loghandle->lgh_id;
+ rec.lid_tail.lrt_len = sizeof(rec);
+ rec.lid_tail.lrt_index = index;
+
+ /* update the catalog: header and record */
+ rc = llog_write_rec(env, cathandle, &rec.lid_hdr,
+ &loghandle->u.phd.phd_cookie, 1, NULL, index, th);
+ if (rc < 0)
+ GOTO(out_destroy, rc);
+
+ loghandle->lgh_hdr->llh_cat_idx = index;
+ return 0;
+out_destroy:
+ llog_destroy(env, loghandle);
+ return rc;
+}
+
+/* Open an existent log handle and add it to the open list.
+ * This log handle will be closed when all of the records in it are removed.
+ *
+ * Assumes caller has already pushed us into the kernel context and is locking.
+ * We return a lock on the handle to ensure nobody yanks it from us.
+ *
+ * This takes extra reference on llog_handle via llog_handle_get() and require
+ * this reference to be put by caller using llog_handle_put()
+ */
+int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
+ struct llog_handle **res, struct llog_logid *logid)
+{
+ struct llog_handle *loghandle;
+ int rc = 0;
+
+ if (cathandle == NULL)
+ return -EBADF;
+
+ down_write(&cathandle->lgh_lock);
+ list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
+ u.phd.phd_entry) {
+ struct llog_logid *cgl = &loghandle->lgh_id;
+
+ if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
+ ostid_seq(&cgl->lgl_oi) == ostid_seq(&logid->lgl_oi)) {
+ if (cgl->lgl_ogen != logid->lgl_ogen) {
+ CERROR("%s: log "DOSTID" generation %x != %x\n",
+ loghandle->lgh_ctxt->loc_obd->obd_name,
+ POSTID(&logid->lgl_oi), cgl->lgl_ogen,
+ logid->lgl_ogen);
+ continue;
+ }
+ loghandle->u.phd.phd_cat_handle = cathandle;
+ up_write(&cathandle->lgh_lock);
+ GOTO(out, rc = 0);
+ }
+ }
+ up_write(&cathandle->lgh_lock);
+
+ rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
+ LLOG_OPEN_EXISTS);
+ if (rc < 0) {
+ CERROR("%s: error opening log id "DOSTID":%x: rc = %d\n",
+ cathandle->lgh_ctxt->loc_obd->obd_name,
+ POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
+ return rc;
+ }
+
+ rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN, NULL);
+ if (rc < 0) {
+ llog_close(env, loghandle);
+ loghandle = NULL;
+ return rc;
+ }
+
+ down_write(&cathandle->lgh_lock);
+ list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
+ up_write(&cathandle->lgh_lock);
+
+ loghandle->u.phd.phd_cat_handle = cathandle;
+ loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
+ loghandle->u.phd.phd_cookie.lgc_index =
+ loghandle->lgh_hdr->llh_cat_idx;
+out:
+ llog_handle_get(loghandle);
+ *res = loghandle;
+ return 0;
+}
+
+int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
+{
+ struct llog_handle *loghandle, *n;
+ int rc;
+
+ list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
+ u.phd.phd_entry) {
+ struct llog_log_hdr *llh = loghandle->lgh_hdr;
+ int index;
+
+ /* unlink open-not-created llogs */
+ list_del_init(&loghandle->u.phd.phd_entry);
+ llh = loghandle->lgh_hdr;
+ if (loghandle->lgh_obj != NULL && llh != NULL &&
+ (llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
+ (llh->llh_count == 1)) {
+ rc = llog_destroy(env, loghandle);
+ if (rc)
+ CERROR("%s: failure destroying log during "
+ "cleanup: rc = %d\n",
+ loghandle->lgh_ctxt->loc_obd->obd_name,
+ rc);
+
+ index = loghandle->u.phd.phd_cookie.lgc_index;
+ llog_cat_cleanup(env, cathandle, NULL, index);
+ }
+ llog_close(env, loghandle);
+ }
+ /* if handle was stored in ctxt, remove it too */
+ if (cathandle->lgh_ctxt->loc_handle == cathandle)
+ cathandle->lgh_ctxt->loc_handle = NULL;
+ rc = llog_close(env, cathandle);
+ return rc;
+}
+EXPORT_SYMBOL(llog_cat_close);
+
+/**
+ * lockdep markers for nested struct llog_handle::lgh_lock locking.
+ */
+enum {
+ LLOGH_CAT,
+ LLOGH_LOG
+};
+
+/** Return the currently active log handle. If the current log handle doesn't
+ * have enough space left for the current record, start a new one.
+ *
+ * If reclen is 0, we only want to know what the currently active log is,
+ * otherwise we get a lock on this log so nobody can steal our space.
+ *
+ * Assumes caller has already pushed us into the kernel context and is locking.
+ *
+ * NOTE: loghandle is write-locked upon successful return
+ */
+static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
+ struct thandle *th)
+{
+ struct llog_handle *loghandle = NULL;
+
+ down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
+ loghandle = cathandle->u.chd.chd_current_log;
+ if (loghandle) {
+ struct llog_log_hdr *llh;
+
+ down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ llh = loghandle->lgh_hdr;
+ if (llh == NULL ||
+ loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
+ up_read(&cathandle->lgh_lock);
+ return loghandle;
+ } else {
+ up_write(&loghandle->lgh_lock);
+ }
+ }
+ up_read(&cathandle->lgh_lock);
+
+ /* time to use next log */
+
+ /* first, we have to make sure the state hasn't changed */
+ down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
+ loghandle = cathandle->u.chd.chd_current_log;
+ if (loghandle) {
+ struct llog_log_hdr *llh;
+
+ down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ llh = loghandle->lgh_hdr;
+ LASSERT(llh);
+ if (loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
+ up_write(&cathandle->lgh_lock);
+ return loghandle;
+ } else {
+ up_write(&loghandle->lgh_lock);
+ }
+ }
+
+ CDEBUG(D_INODE, "use next log\n");
+
+ loghandle = cathandle->u.chd.chd_next_log;
+ cathandle->u.chd.chd_current_log = loghandle;
+ cathandle->u.chd.chd_next_log = NULL;
+ down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ up_write(&cathandle->lgh_lock);
+ LASSERT(loghandle);
+ return loghandle;
+}
+
+/* Add a single record to the recovery log(s) using a catalog
+ * Returns as llog_write_record
+ *
+ * Assumes caller has already pushed us into the kernel context.
+ */
+int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
+ struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
+ void *buf, struct thandle *th)
+{
+ struct llog_handle *loghandle;
+ int rc;
+
+ LASSERT(rec->lrh_len <= LLOG_CHUNK_SIZE);
+ loghandle = llog_cat_current_log(cathandle, th);
+ LASSERT(!IS_ERR(loghandle));
+
+ /* loghandle is already locked by llog_cat_current_log() for us */
+ if (!llog_exist(loghandle)) {
+ rc = llog_cat_new_log(env, cathandle, loghandle, th);
+ if (rc < 0) {
+ up_write(&loghandle->lgh_lock);
+ return rc;
+ }
+ }
+ /* now let's try to add the record */
+ rc = llog_write_rec(env, loghandle, rec, reccookie, 1, buf, -1, th);
+ if (rc < 0)
+ CDEBUG_LIMIT(rc == -ENOSPC ? D_HA : D_ERROR,
+ "llog_write_rec %d: lh=%p\n", rc, loghandle);
+ up_write(&loghandle->lgh_lock);
+ if (rc == -ENOSPC) {
+ /* try to use next log */
+ loghandle = llog_cat_current_log(cathandle, th);
+ LASSERT(!IS_ERR(loghandle));
+ /* new llog can be created concurrently */
+ if (!llog_exist(loghandle)) {
+ rc = llog_cat_new_log(env, cathandle, loghandle, th);
+ if (rc < 0) {
+ up_write(&loghandle->lgh_lock);
+ return rc;
+ }
+ }
+ /* now let's try to add the record */
+ rc = llog_write_rec(env, loghandle, rec, reccookie, 1, buf,
+ -1, th);
+ if (rc < 0)
+ CERROR("llog_write_rec %d: lh=%p\n", rc, loghandle);
+ up_write(&loghandle->lgh_lock);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(llog_cat_add_rec);
+
+int llog_cat_declare_add_rec(const struct lu_env *env,
+ struct llog_handle *cathandle,
+ struct llog_rec_hdr *rec, struct thandle *th)
+{
+ struct llog_handle *loghandle, *next;
+ int rc = 0;
+
+ if (cathandle->u.chd.chd_current_log == NULL) {
+ /* declare new plain llog */
+ down_write(&cathandle->lgh_lock);
+ if (cathandle->u.chd.chd_current_log == NULL) {
+ rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
+ NULL, NULL, LLOG_OPEN_NEW);
+ if (rc == 0) {
+ cathandle->u.chd.chd_current_log = loghandle;
+ list_add_tail(&loghandle->u.phd.phd_entry,
+ &cathandle->u.chd.chd_head);
+ }
+ }
+ up_write(&cathandle->lgh_lock);
+ } else if (cathandle->u.chd.chd_next_log == NULL) {
+ /* declare next plain llog */
+ down_write(&cathandle->lgh_lock);
+ if (cathandle->u.chd.chd_next_log == NULL) {
+ rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
+ NULL, NULL, LLOG_OPEN_NEW);
+ if (rc == 0) {
+ cathandle->u.chd.chd_next_log = loghandle;
+ list_add_tail(&loghandle->u.phd.phd_entry,
+ &cathandle->u.chd.chd_head);
+ }
+ }
+ up_write(&cathandle->lgh_lock);
+ }
+ if (rc)
+ GOTO(out, rc);
+
+ if (!llog_exist(cathandle->u.chd.chd_current_log)) {
+ rc = llog_declare_create(env, cathandle->u.chd.chd_current_log,
+ th);
+ if (rc)
+ GOTO(out, rc);
+ llog_declare_write_rec(env, cathandle, NULL, -1, th);
+ }
+ /* declare records in the llogs */
+ rc = llog_declare_write_rec(env, cathandle->u.chd.chd_current_log,
+ rec, -1, th);
+ if (rc)
+ GOTO(out, rc);
+
+ next = cathandle->u.chd.chd_next_log;
+ if (next) {
+ if (!llog_exist(next)) {
+ rc = llog_declare_create(env, next, th);
+ llog_declare_write_rec(env, cathandle, NULL, -1, th);
+ }
+ llog_declare_write_rec(env, next, rec, -1, th);
+ }
+out:
+ return rc;
+}
+EXPORT_SYMBOL(llog_cat_declare_add_rec);
+
+int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
+ struct llog_rec_hdr *rec, struct llog_cookie *reccookie,
+ void *buf)
+{
+ struct llog_ctxt *ctxt;
+ struct dt_device *dt;
+ struct thandle *th = NULL;
+ int rc;
+
+ ctxt = cathandle->lgh_ctxt;
+ LASSERT(ctxt);
+ LASSERT(ctxt->loc_exp);
+
+ if (cathandle->lgh_obj != NULL) {
+ dt = ctxt->loc_exp->exp_obd->obd_lvfs_ctxt.dt;
+ LASSERT(dt);
+
+ th = dt_trans_create(env, dt);
+ if (IS_ERR(th))
+ return PTR_ERR(th);
+
+ rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ rc = dt_trans_start_local(env, dt, th);
+ if (rc)
+ GOTO(out_trans, rc);
+ rc = llog_cat_add_rec(env, cathandle, rec, reccookie, buf, th);
+out_trans:
+ dt_trans_stop(env, dt, th);
+ } else { /* lvfs compat code */
+ LASSERT(cathandle->lgh_file != NULL);
+ rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
+ if (rc == 0)
+ rc = llog_cat_add_rec(env, cathandle, rec, reccookie,
+ buf, th);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(llog_cat_add);
+
+/* For each cookie in the cookie array, we clear the log in-use bit and either:
+ * - the log is empty, so mark it free in the catalog header and delete it
+ * - the log is not empty, just write out the log header
+ *
+ * The cookies may be in different log files, so we need to get new logs
+ * each time.
+ *
+ * Assumes caller has already pushed us into the kernel context.
+ */
+int llog_cat_cancel_records(const struct lu_env *env,
+ struct llog_handle *cathandle, int count,
+ struct llog_cookie *cookies)
+{
+ int i, index, rc = 0, failed = 0;
+
+ for (i = 0; i < count; i++, cookies++) {
+ struct llog_handle *loghandle;
+ struct llog_logid *lgl = &cookies->lgc_lgl;
+ int lrc;
+
+ rc = llog_cat_id2handle(env, cathandle, &loghandle, lgl);
+ if (rc) {
+ CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
+ cathandle->lgh_ctxt->loc_obd->obd_name,
+ POSTID(&lgl->lgl_oi), rc);
+ failed++;
+ continue;
+ }
+
+ lrc = llog_cancel_rec(env, loghandle, cookies->lgc_index);
+ if (lrc == 1) { /* log has been destroyed */
+ index = loghandle->u.phd.phd_cookie.lgc_index;
+ rc = llog_cat_cleanup(env, cathandle, loghandle,
+ index);
+ } else if (lrc == -ENOENT) {
+ if (rc == 0) /* ENOENT shouldn't rewrite any error */
+ rc = lrc;
+ } else if (lrc < 0) {
+ failed++;
+ rc = lrc;
+ }
+ llog_handle_put(loghandle);
+ }
+ if (rc)
+ CERROR("%s: fail to cancel %d of %d llog-records: rc = %d\n",
+ cathandle->lgh_ctxt->loc_obd->obd_name, failed, count,
+ rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(llog_cat_cancel_records);
+
+int llog_cat_process_cb(const struct lu_env *env, struct llog_handle *cat_llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct llog_process_data *d = data;
+ struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
+ struct llog_handle *llh;
+ int rc;
+
+ if (rec->lrh_type != LLOG_LOGID_MAGIC) {
+ CERROR("invalid record in catalog\n");
+ return -EINVAL;
+ }
+ CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
+ DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
+ rec->lrh_index, POSTID(&cat_llh->lgh_id.lgl_oi));
+
+ rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
+ if (rc) {
+ CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
+ cat_llh->lgh_ctxt->loc_obd->obd_name,
+ POSTID(&lir->lid_id.lgl_oi), rc);
+ return rc;
+ }
+
+ if (rec->lrh_index < d->lpd_startcat)
+ /* Skip processing of the logs until startcat */
+ return 0;
+
+ if (d->lpd_startidx > 0) {
+ struct llog_process_cat_data cd;
+
+ cd.lpcd_first_idx = d->lpd_startidx;
+ cd.lpcd_last_idx = 0;
+ rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
+ &cd, false);
+ /* Continue processing the next log from idx 0 */
+ d->lpd_startidx = 0;
+ } else {
+ rc = llog_process_or_fork(env, llh, d->lpd_cb, d->lpd_data,
+ NULL, false);
+ }
+ llog_handle_put(llh);
+
+ return rc;
+}
+
+int llog_cat_process_or_fork(const struct lu_env *env,
+ struct llog_handle *cat_llh,
+ llog_cb_t cb, void *data, int startcat,
+ int startidx, bool fork)
+{
+ struct llog_process_data d;
+ struct llog_log_hdr *llh = cat_llh->lgh_hdr;
+ int rc;
+
+ LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
+ d.lpd_data = data;
+ d.lpd_cb = cb;
+ d.lpd_startcat = startcat;
+ d.lpd_startidx = startidx;
+
+ if (llh->llh_cat_idx > cat_llh->lgh_last_idx) {
+ struct llog_process_cat_data cd;
+
+ CWARN("catlog "DOSTID" crosses index zero\n",
+ POSTID(&cat_llh->lgh_id.lgl_oi));
+
+ cd.lpcd_first_idx = llh->llh_cat_idx;
+ cd.lpcd_last_idx = 0;
+ rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
+ &d, &cd, fork);
+ if (rc != 0)
+ return rc;
+
+ cd.lpcd_first_idx = 0;
+ cd.lpcd_last_idx = cat_llh->lgh_last_idx;
+ rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
+ &d, &cd, fork);
+ } else {
+ rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
+ &d, NULL, fork);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(llog_cat_process_or_fork);
+
+int llog_cat_process(const struct lu_env *env, struct llog_handle *cat_llh,
+ llog_cb_t cb, void *data, int startcat, int startidx)
+{
+ return llog_cat_process_or_fork(env, cat_llh, cb, data, startcat,
+ startidx, false);
+}
+EXPORT_SYMBOL(llog_cat_process);
+
+static int llog_cat_reverse_process_cb(const struct lu_env *env,
+ struct llog_handle *cat_llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct llog_process_data *d = data;
+ struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
+ struct llog_handle *llh;
+ int rc;
+
+ if (le32_to_cpu(rec->lrh_type) != LLOG_LOGID_MAGIC) {
+ CERROR("invalid record in catalog\n");
+ return -EINVAL;
+ }
+ CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
+ DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
+ le32_to_cpu(rec->lrh_index), POSTID(&cat_llh->lgh_id.lgl_oi));
+
+ rc = llog_cat_id2handle(env, cat_llh, &llh, &lir->lid_id);
+ if (rc) {
+ CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
+ cat_llh->lgh_ctxt->loc_obd->obd_name,
+ POSTID(&lir->lid_id.lgl_oi), rc);
+ return rc;
+ }
+
+ rc = llog_reverse_process(env, llh, d->lpd_cb, d->lpd_data, NULL);
+ llog_handle_put(llh);
+ return rc;
+}
+
+int llog_cat_reverse_process(const struct lu_env *env,
+ struct llog_handle *cat_llh,
+ llog_cb_t cb, void *data)
+{
+ struct llog_process_data d;
+ struct llog_process_cat_data cd;
+ struct llog_log_hdr *llh = cat_llh->lgh_hdr;
+ int rc;
+
+ LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
+ d.lpd_data = data;
+ d.lpd_cb = cb;
+
+ if (llh->llh_cat_idx > cat_llh->lgh_last_idx) {
+ CWARN("catalog "DOSTID" crosses index zero\n",
+ POSTID(&cat_llh->lgh_id.lgl_oi));
+
+ cd.lpcd_first_idx = 0;
+ cd.lpcd_last_idx = cat_llh->lgh_last_idx;
+ rc = llog_reverse_process(env, cat_llh,
+ llog_cat_reverse_process_cb,
+ &d, &cd);
+ if (rc != 0)
+ return rc;
+
+ cd.lpcd_first_idx = le32_to_cpu(llh->llh_cat_idx);
+ cd.lpcd_last_idx = 0;
+ rc = llog_reverse_process(env, cat_llh,
+ llog_cat_reverse_process_cb,
+ &d, &cd);
+ } else {
+ rc = llog_reverse_process(env, cat_llh,
+ llog_cat_reverse_process_cb,
+ &d, NULL);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(llog_cat_reverse_process);
+
+int llog_cat_set_first_idx(struct llog_handle *cathandle, int index)
+{
+ struct llog_log_hdr *llh = cathandle->lgh_hdr;
+ int i, bitmap_size, idx;
+
+ bitmap_size = LLOG_BITMAP_SIZE(llh);
+ if (llh->llh_cat_idx == (index - 1)) {
+ idx = llh->llh_cat_idx + 1;
+ llh->llh_cat_idx = idx;
+ if (idx == cathandle->lgh_last_idx)
+ goto out;
+ for (i = (index + 1) % bitmap_size;
+ i != cathandle->lgh_last_idx;
+ i = (i + 1) % bitmap_size) {
+ if (!ext2_test_bit(i, llh->llh_bitmap)) {
+ idx = llh->llh_cat_idx + 1;
+ llh->llh_cat_idx = idx;
+ } else if (i == 0) {
+ llh->llh_cat_idx = 0;
+ } else {
+ break;
+ }
+ }
+out:
+ CDEBUG(D_RPCTRACE, "set catlog "DOSTID" first idx %u\n",
+ POSTID(&cathandle->lgh_id.lgl_oi), llh->llh_cat_idx);
+ }
+
+ return 0;
+}
+
+/* Cleanup deleted plain llog traces from catalog */
+int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
+ struct llog_handle *loghandle, int index)
+{
+ int rc;
+
+ LASSERT(index);
+ if (loghandle != NULL) {
+ /* remove destroyed llog from catalog list and
+ * chd_current_log variable */
+ down_write(&cathandle->lgh_lock);
+ if (cathandle->u.chd.chd_current_log == loghandle)
+ cathandle->u.chd.chd_current_log = NULL;
+ list_del_init(&loghandle->u.phd.phd_entry);
+ up_write(&cathandle->lgh_lock);
+ LASSERT(index == loghandle->u.phd.phd_cookie.lgc_index);
+ /* llog was opened and keep in a list, close it now */
+ llog_close(env, loghandle);
+ }
+ /* remove plain llog entry from catalog by index */
+ llog_cat_set_first_idx(cathandle, index);
+ rc = llog_cancel_rec(env, cathandle, index);
+ if (rc == 0)
+ CDEBUG(D_HA, "cancel plain log at index"
+ " %u of catalog "DOSTID"\n",
+ index, POSTID(&cathandle->lgh_id.lgl_oi));
+ return rc;
+}
+
+int cat_cancel_cb(const struct lu_env *env, struct llog_handle *cathandle,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
+ struct llog_handle *loghandle;
+ struct llog_log_hdr *llh;
+ int rc;
+
+ if (rec->lrh_type != LLOG_LOGID_MAGIC) {
+ CERROR("invalid record in catalog\n");
+ return -EINVAL;
+ }
+
+ CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
+ DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
+ rec->lrh_index, POSTID(&cathandle->lgh_id.lgl_oi));
+
+ rc = llog_cat_id2handle(env, cathandle, &loghandle, &lir->lid_id);
+ if (rc) {
+ CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
+ cathandle->lgh_ctxt->loc_obd->obd_name,
+ POSTID(&lir->lid_id.lgl_oi), rc);
+ if (rc == -ENOENT || rc == -ESTALE) {
+ /* remove index from catalog */
+ llog_cat_cleanup(env, cathandle, NULL, rec->lrh_index);
+ }
+ return rc;
+ }
+
+ llh = loghandle->lgh_hdr;
+ if ((llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
+ (llh->llh_count == 1)) {
+ rc = llog_destroy(env, loghandle);
+ if (rc)
+ CERROR("%s: fail to destroy empty log: rc = %d\n",
+ loghandle->lgh_ctxt->loc_obd->obd_name, rc);
+
+ llog_cat_cleanup(env, cathandle, loghandle,
+ loghandle->u.phd.phd_cookie.lgc_index);
+ }
+ llog_handle_put(loghandle);
+
+ return rc;
+}
+EXPORT_SYMBOL(cat_cancel_cb);
+
+/* helper to initialize catalog llog and process it to cancel */
+int llog_cat_init_and_process(const struct lu_env *env,
+ struct llog_handle *llh)
+{
+ int rc;
+
+ rc = llog_init_handle(env, llh, LLOG_F_IS_CAT, NULL);
+ if (rc)
+ return rc;
+
+ rc = llog_process_or_fork(env, llh, cat_cancel_cb, NULL, NULL, false);
+ if (rc)
+ CERROR("%s: llog_process() with cat_cancel_cb failed: rc = "
+ "%d\n", llh->lgh_ctxt->loc_obd->obd_name, rc);
+ return 0;
+}
+EXPORT_SYMBOL(llog_cat_init_and_process);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_internal.h b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
new file mode 100644
index 0000000..539e1d4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
@@ -0,0 +1,98 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef __LLOG_INTERNAL_H__
+#define __LLOG_INTERNAL_H__
+
+#include <lustre_log.h>
+
+struct llog_process_info {
+ struct llog_handle *lpi_loghandle;
+ llog_cb_t lpi_cb;
+ void *lpi_cbdata;
+ void *lpi_catdata;
+ int lpi_rc;
+ struct completion lpi_completion;
+ const struct lu_env *lpi_env;
+
+};
+
+struct llog_thread_info {
+ struct lu_attr lgi_attr;
+ struct lu_fid lgi_fid;
+ struct dt_object_format lgi_dof;
+ struct lu_buf lgi_buf;
+ loff_t lgi_off;
+ struct llog_rec_hdr lgi_lrh;
+ struct llog_rec_tail lgi_tail;
+};
+
+extern struct lu_context_key llog_thread_key;
+
+static inline struct llog_thread_info *llog_info(const struct lu_env *env)
+{
+ struct llog_thread_info *lgi;
+
+ lgi = lu_context_key_get(&env->le_ctx, &llog_thread_key);
+ LASSERT(lgi);
+ return lgi;
+}
+
+static inline void
+lustre_build_llog_lvfs_oid(struct llog_logid *logid, __u64 ino, __u32 gen)
+{
+ ostid_set_seq_llog(&logid->lgl_oi);
+ ostid_set_id(&logid->lgl_oi, ino);
+ logid->lgl_ogen = gen;
+}
+
+int llog_info_init(void);
+void llog_info_fini(void);
+
+void llog_handle_get(struct llog_handle *loghandle);
+void llog_handle_put(struct llog_handle *loghandle);
+int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
+ struct llog_handle **res, struct llog_logid *logid);
+int class_config_dump_handler(const struct lu_env *env,
+ struct llog_handle *handle,
+ struct llog_rec_hdr *rec, void *data);
+int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf, int size);
+int llog_process_or_fork(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ llog_cb_t cb, void *data, void *catdata, bool fork);
+int llog_cat_cleanup(const struct lu_env *env, struct llog_handle *cathandle,
+ struct llog_handle *loghandle, int index);
+#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_ioctl.c b/drivers/staging/lustre/lustre/obdclass/llog_ioctl.c
new file mode 100644
index 0000000..da558a5
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/llog_ioctl.c
@@ -0,0 +1,418 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+#include <obd_class.h>
+#include <lustre_log.h>
+#include "llog_internal.h"
+
+static int str2logid(struct llog_logid *logid, char *str, int len)
+{
+ char *start, *end, *endp;
+ __u64 id, seq;
+
+ start = str;
+ if (*start != '#')
+ return -EINVAL;
+
+ start++;
+ if (start - str >= len - 1)
+ return -EINVAL;
+ end = strchr(start, '#');
+ if (end == NULL || end == start)
+ return -EINVAL;
+
+ *end = '\0';
+ id = simple_strtoull(start, &endp, 0);
+ if (endp != end)
+ return -EINVAL;
+
+ start = ++end;
+ if (start - str >= len - 1)
+ return -EINVAL;
+ end = strchr(start, '#');
+ if (end == NULL || end == start)
+ return -EINVAL;
+
+ *end = '\0';
+ seq = simple_strtoull(start, &endp, 0);
+ if (endp != end)
+ return -EINVAL;
+
+ ostid_set_seq(&logid->lgl_oi, seq);
+ ostid_set_id(&logid->lgl_oi, id);
+
+ start = ++end;
+ if (start - str >= len - 1)
+ return -EINVAL;
+ logid->lgl_ogen = simple_strtoul(start, &endp, 16);
+ if (*endp != '\0')
+ return -EINVAL;
+
+ return 0;
+}
+
+static int llog_check_cb(const struct lu_env *env, struct llog_handle *handle,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct obd_ioctl_data *ioc_data = (struct obd_ioctl_data *)data;
+ static int l, remains, from, to;
+ static char *out;
+ char *endp;
+ int cur_index, rc = 0;
+
+ if (ioc_data && ioc_data->ioc_inllen1 > 0) {
+ l = 0;
+ remains = ioc_data->ioc_inllen4 +
+ cfs_size_round(ioc_data->ioc_inllen1) +
+ cfs_size_round(ioc_data->ioc_inllen2) +
+ cfs_size_round(ioc_data->ioc_inllen3);
+ from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0);
+ if (*endp != '\0')
+ return -EINVAL;
+ to = simple_strtol(ioc_data->ioc_inlbuf3, &endp, 0);
+ if (*endp != '\0')
+ return -EINVAL;
+ ioc_data->ioc_inllen1 = 0;
+ out = ioc_data->ioc_bulk;
+ }
+
+ cur_index = rec->lrh_index;
+ if (cur_index < from)
+ return 0;
+ if (to > 0 && cur_index > to)
+ return -LLOG_EEMPTY;
+
+ if (handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) {
+ struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
+ struct llog_handle *loghandle;
+
+ if (rec->lrh_type != LLOG_LOGID_MAGIC) {
+ l = snprintf(out, remains, "[index]: %05d [type]: "
+ "%02x [len]: %04d failed\n",
+ cur_index, rec->lrh_type,
+ rec->lrh_len);
+ }
+ if (handle->lgh_ctxt == NULL)
+ return -EOPNOTSUPP;
+ rc = llog_cat_id2handle(env, handle, &loghandle, &lir->lid_id);
+ if (rc) {
+ CDEBUG(D_IOCTL, "cannot find log #"DOSTID"#%08x\n",
+ POSTID(&lir->lid_id.lgl_oi),
+ lir->lid_id.lgl_ogen);
+ return rc;
+ }
+ rc = llog_process(env, loghandle, llog_check_cb, NULL, NULL);
+ llog_handle_put(loghandle);
+ } else {
+ bool ok;
+
+ switch (rec->lrh_type) {
+ case OST_SZ_REC:
+ case MDS_UNLINK_REC:
+ case MDS_UNLINK64_REC:
+ case MDS_SETATTR64_REC:
+ case OBD_CFG_REC:
+ case LLOG_GEN_REC:
+ case LLOG_HDR_MAGIC:
+ ok = true;
+ break;
+ default:
+ ok = false;
+ }
+
+ l = snprintf(out, remains, "[index]: %05d [type]: "
+ "%02x [len]: %04d %s\n",
+ cur_index, rec->lrh_type, rec->lrh_len,
+ ok ? "ok" : "failed");
+ out += l;
+ remains -= l;
+ if (remains <= 0) {
+ CERROR("%s: no space to print log records\n",
+ handle->lgh_ctxt->loc_obd->obd_name);
+ return -LLOG_EEMPTY;
+ }
+ }
+ return rc;
+}
+
+static int llog_print_cb(const struct lu_env *env, struct llog_handle *handle,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct obd_ioctl_data *ioc_data = (struct obd_ioctl_data *)data;
+ static int l, remains, from, to;
+ static char *out;
+ char *endp;
+ int cur_index;
+
+ if (ioc_data != NULL && ioc_data->ioc_inllen1 > 0) {
+ l = 0;
+ remains = ioc_data->ioc_inllen4 +
+ cfs_size_round(ioc_data->ioc_inllen1) +
+ cfs_size_round(ioc_data->ioc_inllen2) +
+ cfs_size_round(ioc_data->ioc_inllen3);
+ from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0);
+ if (*endp != '\0')
+ return -EINVAL;
+ to = simple_strtol(ioc_data->ioc_inlbuf3, &endp, 0);
+ if (*endp != '\0')
+ return -EINVAL;
+ out = ioc_data->ioc_bulk;
+ ioc_data->ioc_inllen1 = 0;
+ }
+
+ cur_index = rec->lrh_index;
+ if (cur_index < from)
+ return 0;
+ if (to > 0 && cur_index > to)
+ return -LLOG_EEMPTY;
+
+ if (handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) {
+ struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
+
+ if (rec->lrh_type != LLOG_LOGID_MAGIC) {
+ CERROR("invalid record in catalog\n");
+ return -EINVAL;
+ }
+
+ l = snprintf(out, remains,
+ "[index]: %05d [logid]: #"DOSTID"#%08x\n",
+ cur_index, POSTID(&lir->lid_id.lgl_oi),
+ lir->lid_id.lgl_ogen);
+ } else if (rec->lrh_type == OBD_CFG_REC) {
+ int rc;
+
+ rc = class_config_parse_rec(rec, out, remains);
+ if (rc < 0)
+ return rc;
+ l = rc;
+ } else {
+ l = snprintf(out, remains,
+ "[index]: %05d [type]: %02x [len]: %04d\n",
+ cur_index, rec->lrh_type, rec->lrh_len);
+ }
+ out += l;
+ remains -= l;
+ if (remains <= 0) {
+ CERROR("not enough space for print log records\n");
+ return -LLOG_EEMPTY;
+ }
+
+ return 0;
+}
+static int llog_remove_log(const struct lu_env *env, struct llog_handle *cat,
+ struct llog_logid *logid)
+{
+ struct llog_handle *log;
+ int rc;
+
+ rc = llog_cat_id2handle(env, cat, &log, logid);
+ if (rc) {
+ CDEBUG(D_IOCTL, "cannot find log #"DOSTID"#%08x\n",
+ POSTID(&logid->lgl_oi), logid->lgl_ogen);
+ return -ENOENT;
+ }
+
+ rc = llog_destroy(env, log);
+ if (rc) {
+ CDEBUG(D_IOCTL, "cannot destroy log\n");
+ GOTO(out, rc);
+ }
+ llog_cat_cleanup(env, cat, log, log->u.phd.phd_cookie.lgc_index);
+out:
+ llog_handle_put(log);
+ return rc;
+
+}
+
+static int llog_delete_cb(const struct lu_env *env, struct llog_handle *handle,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
+ int rc;
+
+ if (rec->lrh_type != LLOG_LOGID_MAGIC)
+ return -EINVAL;
+ rc = llog_remove_log(env, handle, &lir->lid_id);
+
+ return rc;
+}
+
+
+int llog_ioctl(const struct lu_env *env, struct llog_ctxt *ctxt, int cmd,
+ struct obd_ioctl_data *data)
+{
+ struct llog_logid logid;
+ int rc = 0;
+ struct llog_handle *handle = NULL;
+
+ if (*data->ioc_inlbuf1 == '#') {
+ rc = str2logid(&logid, data->ioc_inlbuf1, data->ioc_inllen1);
+ if (rc)
+ return rc;
+ rc = llog_open(env, ctxt, &handle, &logid, NULL,
+ LLOG_OPEN_EXISTS);
+ if (rc)
+ return rc;
+ } else if (*data->ioc_inlbuf1 == '$') {
+ char *name = data->ioc_inlbuf1 + 1;
+
+ rc = llog_open(env, ctxt, &handle, NULL, name,
+ LLOG_OPEN_EXISTS);
+ if (rc)
+ return rc;
+ } else {
+ return -EINVAL;
+ }
+
+ rc = llog_init_handle(env, handle, 0, NULL);
+ if (rc)
+ GOTO(out_close, rc = -ENOENT);
+
+ switch (cmd) {
+ case OBD_IOC_LLOG_INFO: {
+ int l;
+ int remains = data->ioc_inllen2 +
+ cfs_size_round(data->ioc_inllen1);
+ char *out = data->ioc_bulk;
+
+ l = snprintf(out, remains,
+ "logid: #"DOSTID"#%08x\n"
+ "flags: %x (%s)\n"
+ "records count: %d\n"
+ "last index: %d\n",
+ POSTID(&handle->lgh_id.lgl_oi),
+ handle->lgh_id.lgl_ogen,
+ handle->lgh_hdr->llh_flags,
+ handle->lgh_hdr->llh_flags &
+ LLOG_F_IS_CAT ? "cat" : "plain",
+ handle->lgh_hdr->llh_count,
+ handle->lgh_last_idx);
+ out += l;
+ remains -= l;
+ if (remains <= 0) {
+ CERROR("%s: not enough space for log header info\n",
+ ctxt->loc_obd->obd_name);
+ rc = -ENOSPC;
+ }
+ break;
+ }
+ case OBD_IOC_LLOG_CHECK:
+ LASSERT(data->ioc_inllen1 > 0);
+ rc = llog_process(env, handle, llog_check_cb, data, NULL);
+ if (rc == -LLOG_EEMPTY)
+ rc = 0;
+ else if (rc)
+ GOTO(out_close, rc);
+ break;
+ case OBD_IOC_LLOG_PRINT:
+ LASSERT(data->ioc_inllen1 > 0);
+ rc = llog_process(env, handle, llog_print_cb, data, NULL);
+ if (rc == -LLOG_EEMPTY)
+ rc = 0;
+ else if (rc)
+ GOTO(out_close, rc);
+ break;
+ case OBD_IOC_LLOG_CANCEL: {
+ struct llog_cookie cookie;
+ struct llog_logid plain;
+ char *endp;
+
+ cookie.lgc_index = simple_strtoul(data->ioc_inlbuf3, &endp, 0);
+ if (*endp != '\0')
+ GOTO(out_close, rc = -EINVAL);
+
+ if (handle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN) {
+ rc = llog_cancel_rec(NULL, handle, cookie.lgc_index);
+ GOTO(out_close, rc);
+ } else if (!(handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)) {
+ GOTO(out_close, rc = -EINVAL);
+ }
+
+ if (data->ioc_inlbuf2 == NULL) /* catalog but no logid */
+ GOTO(out_close, rc = -ENOTTY);
+
+ rc = str2logid(&plain, data->ioc_inlbuf2, data->ioc_inllen2);
+ if (rc)
+ GOTO(out_close, rc);
+ cookie.lgc_lgl = plain;
+ rc = llog_cat_cancel_records(env, handle, 1, &cookie);
+ if (rc)
+ GOTO(out_close, rc);
+ break;
+ }
+ case OBD_IOC_LLOG_REMOVE: {
+ struct llog_logid plain;
+
+ if (handle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN) {
+ rc = llog_destroy(env, handle);
+ GOTO(out_close, rc);
+ } else if (!(handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)) {
+ GOTO(out_close, rc = -EINVAL);
+ }
+
+ if (data->ioc_inlbuf2 > 0) {
+ /* remove indicate log from the catalog */
+ rc = str2logid(&plain, data->ioc_inlbuf2,
+ data->ioc_inllen2);
+ if (rc)
+ GOTO(out_close, rc);
+ rc = llog_remove_log(env, handle, &plain);
+ } else {
+ /* remove all the log of the catalog */
+ rc = llog_process(env, handle, llog_delete_cb, NULL,
+ NULL);
+ if (rc)
+ GOTO(out_close, rc);
+ }
+ break;
+ }
+ default:
+ CERROR("%s: Unknown ioctl cmd %#x\n",
+ ctxt->loc_obd->obd_name, cmd);
+ GOTO(out_close, rc = -ENOTTY);
+ }
+
+out_close:
+ if (handle->lgh_hdr &&
+ handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
+ llog_cat_close(env, handle);
+ else
+ llog_close(env, handle);
+ return rc;
+}
+EXPORT_SYMBOL(llog_ioctl);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c b/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
new file mode 100644
index 0000000..5385d8e
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
@@ -0,0 +1,847 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/llog_lvfs.c
+ *
+ * OST<->MDS recovery logging infrastructure.
+ * Invariants in implementation:
+ * - we do not share logs among different OST<->MDS connections, so that
+ * if an OST or MDS fails it need only look at log(s) relevant to itself
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+
+#include <obd.h>
+#include <obd_class.h>
+#include <lustre_log.h>
+#include <obd_ost.h>
+#include <linux/list.h>
+#include <lvfs.h>
+#include <lustre_fsfilt.h>
+#include <lustre_disk.h>
+#include "llog_internal.h"
+
+#if defined(LLOG_LVFS)
+
+static int llog_lvfs_pad(struct obd_device *obd, struct l_file *file,
+ int len, int index)
+{
+ struct llog_rec_hdr rec = { 0 };
+ struct llog_rec_tail tail;
+ int rc;
+
+ LASSERT(len >= LLOG_MIN_REC_SIZE && (len & 0x7) == 0);
+
+ tail.lrt_len = rec.lrh_len = len;
+ tail.lrt_index = rec.lrh_index = index;
+ rec.lrh_type = LLOG_PAD_MAGIC;
+
+ rc = fsfilt_write_record(obd, file, &rec, sizeof(rec), &file->f_pos, 0);
+ if (rc) {
+ CERROR("error writing padding record: rc %d\n", rc);
+ goto out;
+ }
+
+ file->f_pos += len - sizeof(rec) - sizeof(tail);
+ rc = fsfilt_write_record(obd, file, &tail, sizeof(tail),&file->f_pos,0);
+ if (rc) {
+ CERROR("error writing padding record: rc %d\n", rc);
+ goto out;
+ }
+
+ out:
+ return rc;
+}
+
+static int llog_lvfs_write_blob(struct obd_device *obd, struct l_file *file,
+ struct llog_rec_hdr *rec, void *buf, loff_t off)
+{
+ int rc;
+ struct llog_rec_tail end;
+ loff_t saved_off = file->f_pos;
+ int buflen = rec->lrh_len;
+
+ file->f_pos = off;
+
+ if (buflen == 0)
+ CWARN("0-length record\n");
+
+ if (!buf) {
+ rc = fsfilt_write_record(obd, file, rec, buflen,&file->f_pos,0);
+ if (rc) {
+ CERROR("error writing log record: rc %d\n", rc);
+ goto out;
+ }
+ GOTO(out, rc = 0);
+ }
+
+ /* the buf case */
+ rec->lrh_len = sizeof(*rec) + buflen + sizeof(end);
+ rc = fsfilt_write_record(obd, file, rec, sizeof(*rec), &file->f_pos, 0);
+ if (rc) {
+ CERROR("error writing log hdr: rc %d\n", rc);
+ goto out;
+ }
+
+ rc = fsfilt_write_record(obd, file, buf, buflen, &file->f_pos, 0);
+ if (rc) {
+ CERROR("error writing log buffer: rc %d\n", rc);
+ goto out;
+ }
+
+ end.lrt_len = rec->lrh_len;
+ end.lrt_index = rec->lrh_index;
+ rc = fsfilt_write_record(obd, file, &end, sizeof(end), &file->f_pos, 0);
+ if (rc) {
+ CERROR("error writing log tail: rc %d\n", rc);
+ goto out;
+ }
+
+ rc = 0;
+ out:
+ if (saved_off > file->f_pos)
+ file->f_pos = saved_off;
+ LASSERT(rc <= 0);
+ return rc;
+}
+
+static int llog_lvfs_read_blob(struct obd_device *obd, struct l_file *file,
+ void *buf, int size, loff_t off)
+{
+ loff_t offset = off;
+ int rc;
+
+ rc = fsfilt_read_record(obd, file, buf, size, &offset);
+ if (rc) {
+ CERROR("error reading log record: rc %d\n", rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int llog_lvfs_read_header(const struct lu_env *env,
+ struct llog_handle *handle)
+{
+ struct obd_device *obd;
+ int rc;
+
+ LASSERT(sizeof(*handle->lgh_hdr) == LLOG_CHUNK_SIZE);
+
+ obd = handle->lgh_ctxt->loc_exp->exp_obd;
+
+ if (i_size_read(handle->lgh_file->f_dentry->d_inode) == 0) {
+ CDEBUG(D_HA, "not reading header from 0-byte log\n");
+ return LLOG_EEMPTY;
+ }
+
+ rc = llog_lvfs_read_blob(obd, handle->lgh_file, handle->lgh_hdr,
+ LLOG_CHUNK_SIZE, 0);
+ if (rc) {
+ CERROR("error reading log header from %.*s\n",
+ handle->lgh_file->f_dentry->d_name.len,
+ handle->lgh_file->f_dentry->d_name.name);
+ } else {
+ struct llog_rec_hdr *llh_hdr = &handle->lgh_hdr->llh_hdr;
+
+ if (LLOG_REC_HDR_NEEDS_SWABBING(llh_hdr))
+ lustre_swab_llog_hdr(handle->lgh_hdr);
+
+ if (llh_hdr->lrh_type != LLOG_HDR_MAGIC) {
+ CERROR("bad log %.*s header magic: %#x (expected %#x)\n",
+ handle->lgh_file->f_dentry->d_name.len,
+ handle->lgh_file->f_dentry->d_name.name,
+ llh_hdr->lrh_type, LLOG_HDR_MAGIC);
+ rc = -EIO;
+ } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
+ CERROR("incorrectly sized log %.*s header: %#x "
+ "(expected %#x)\n",
+ handle->lgh_file->f_dentry->d_name.len,
+ handle->lgh_file->f_dentry->d_name.name,
+ llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
+ CERROR("you may need to re-run lconf --write_conf.\n");
+ rc = -EIO;
+ }
+ }
+
+ handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
+ handle->lgh_file->f_pos = i_size_read(handle->lgh_file->f_dentry->d_inode);
+
+ return rc;
+}
+
+/* returns negative in on error; 0 if success && reccookie == 0; 1 otherwise */
+/* appends if idx == -1, otherwise overwrites record idx. */
+static int llog_lvfs_write_rec(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ struct llog_rec_hdr *rec,
+ struct llog_cookie *reccookie, int cookiecount,
+ void *buf, int idx, struct thandle *th)
+{
+ struct llog_log_hdr *llh;
+ int reclen = rec->lrh_len, index, rc;
+ struct llog_rec_tail *lrt;
+ struct obd_device *obd;
+ struct file *file;
+ size_t left;
+
+ llh = loghandle->lgh_hdr;
+ file = loghandle->lgh_file;
+ obd = loghandle->lgh_ctxt->loc_exp->exp_obd;
+
+ /* record length should not bigger than LLOG_CHUNK_SIZE */
+ if (buf)
+ rc = (reclen > LLOG_CHUNK_SIZE - sizeof(struct llog_rec_hdr) -
+ sizeof(struct llog_rec_tail)) ? -E2BIG : 0;
+ else
+ rc = (reclen > LLOG_CHUNK_SIZE) ? -E2BIG : 0;
+ if (rc)
+ return rc;
+
+ if (buf)
+ /* write_blob adds header and tail to lrh_len. */
+ reclen = sizeof(*rec) + rec->lrh_len +
+ sizeof(struct llog_rec_tail);
+
+ if (idx != -1) {
+ loff_t saved_offset;
+
+ /* no header: only allowed to insert record 1 */
+ if (idx != 1 && !i_size_read(file->f_dentry->d_inode)) {
+ CERROR("idx != -1 in empty log\n");
+ LBUG();
+ }
+
+ if (idx && llh->llh_size && llh->llh_size != rec->lrh_len)
+ return -EINVAL;
+
+ if (!ext2_test_bit(idx, llh->llh_bitmap))
+ CERROR("Modify unset record %u\n", idx);
+ if (idx != rec->lrh_index)
+ CERROR("Index mismatch %d %u\n", idx, rec->lrh_index);
+
+ rc = llog_lvfs_write_blob(obd, file, &llh->llh_hdr, NULL, 0);
+ /* we are done if we only write the header or on error */
+ if (rc || idx == 0)
+ return rc;
+
+ if (buf) {
+ /* We assume that caller has set lgh_cur_* */
+ saved_offset = loghandle->lgh_cur_offset;
+ CDEBUG(D_OTHER,
+ "modify record "DOSTID": idx:%d/%u/%d, len:%u "
+ "offset %llu\n",
+ POSTID(&loghandle->lgh_id.lgl_oi), idx, rec->lrh_index,
+ loghandle->lgh_cur_idx, rec->lrh_len,
+ (long long)(saved_offset - sizeof(*llh)));
+ if (rec->lrh_index != loghandle->lgh_cur_idx) {
+ CERROR("modify idx mismatch %u/%d\n",
+ idx, loghandle->lgh_cur_idx);
+ return -EFAULT;
+ }
+ } else {
+ /* Assumes constant lrh_len */
+ saved_offset = sizeof(*llh) + (idx - 1) * reclen;
+ }
+
+ rc = llog_lvfs_write_blob(obd, file, rec, buf, saved_offset);
+ if (rc == 0 && reccookie) {
+ reccookie->lgc_lgl = loghandle->lgh_id;
+ reccookie->lgc_index = idx;
+ rc = 1;
+ }
+ return rc;
+ }
+
+ /* Make sure that records don't cross a chunk boundary, so we can
+ * process them page-at-a-time if needed. If it will cross a chunk
+ * boundary, write in a fake (but referenced) entry to pad the chunk.
+ *
+ * We know that llog_current_log() will return a loghandle that is
+ * big enough to hold reclen, so all we care about is padding here.
+ */
+ left = LLOG_CHUNK_SIZE - (file->f_pos & (LLOG_CHUNK_SIZE - 1));
+
+ /* NOTE: padding is a record, but no bit is set */
+ if (left != 0 && left != reclen &&
+ left < (reclen + LLOG_MIN_REC_SIZE)) {
+ index = loghandle->lgh_last_idx + 1;
+ rc = llog_lvfs_pad(obd, file, left, index);
+ if (rc)
+ return rc;
+ loghandle->lgh_last_idx++; /*for pad rec*/
+ }
+ /* if it's the last idx in log file, then return -ENOSPC */
+ if (loghandle->lgh_last_idx >= LLOG_BITMAP_SIZE(llh) - 1)
+ return -ENOSPC;
+ loghandle->lgh_last_idx++;
+ index = loghandle->lgh_last_idx;
+ LASSERT(index < LLOG_BITMAP_SIZE(llh));
+ rec->lrh_index = index;
+ if (buf == NULL) {
+ lrt = (struct llog_rec_tail *)
+ ((char *)rec + rec->lrh_len - sizeof(*lrt));
+ lrt->lrt_len = rec->lrh_len;
+ lrt->lrt_index = rec->lrh_index;
+ }
+ /*The caller should make sure only 1 process access the lgh_last_idx,
+ *Otherwise it might hit the assert.*/
+ LASSERT(index < LLOG_BITMAP_SIZE(llh));
+ spin_lock(&loghandle->lgh_hdr_lock);
+ if (ext2_set_bit(index, llh->llh_bitmap)) {
+ CERROR("argh, index %u already set in log bitmap?\n", index);
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ LBUG(); /* should never happen */
+ }
+ llh->llh_count++;
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ llh->llh_tail.lrt_index = index;
+
+ rc = llog_lvfs_write_blob(obd, file, &llh->llh_hdr, NULL, 0);
+ if (rc)
+ return rc;
+
+ rc = llog_lvfs_write_blob(obd, file, rec, buf, file->f_pos);
+ if (rc)
+ return rc;
+
+ CDEBUG(D_RPCTRACE, "added record "DOSTID": idx: %u, %u \n",
+ POSTID(&loghandle->lgh_id.lgl_oi), index, rec->lrh_len);
+ if (rc == 0 && reccookie) {
+ reccookie->lgc_lgl = loghandle->lgh_id;
+ reccookie->lgc_index = index;
+ if ((rec->lrh_type == MDS_UNLINK_REC) ||
+ (rec->lrh_type == MDS_SETATTR64_REC))
+ reccookie->lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
+ else if (rec->lrh_type == OST_SZ_REC)
+ reccookie->lgc_subsys = LLOG_SIZE_ORIG_CTXT;
+ else
+ reccookie->lgc_subsys = -1;
+ rc = 1;
+ }
+ if (rc == 0 && rec->lrh_type == LLOG_GEN_REC)
+ rc = 1;
+
+ return rc;
+}
+
+/* We can skip reading at least as many log blocks as the number of
+* minimum sized log records we are skipping. If it turns out
+* that we are not far enough along the log (because the
+* actual records are larger than minimum size) we just skip
+* some more records. */
+
+static void llog_skip_over(__u64 *off, int curr, int goal)
+{
+ if (goal <= curr)
+ return;
+ *off = (*off + (goal-curr-1) * LLOG_MIN_REC_SIZE) &
+ ~(LLOG_CHUNK_SIZE - 1);
+}
+
+
+/* sets:
+ * - cur_offset to the furthest point read in the log file
+ * - cur_idx to the log index preceeding cur_offset
+ * returns -EIO/-EINVAL on error
+ */
+static int llog_lvfs_next_block(const struct lu_env *env,
+ struct llog_handle *loghandle, int *cur_idx,
+ int next_idx, __u64 *cur_offset, void *buf,
+ int len)
+{
+ int rc;
+
+ if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
+ return -EINVAL;
+
+ CDEBUG(D_OTHER, "looking for log index %u (cur idx %u off "LPU64")\n",
+ next_idx, *cur_idx, *cur_offset);
+
+ while (*cur_offset < i_size_read(loghandle->lgh_file->f_dentry->d_inode)) {
+ struct llog_rec_hdr *rec, *last_rec;
+ struct llog_rec_tail *tail;
+ loff_t ppos;
+ int llen;
+
+ llog_skip_over(cur_offset, *cur_idx, next_idx);
+
+ /* read up to next LLOG_CHUNK_SIZE block */
+ ppos = *cur_offset;
+ llen = LLOG_CHUNK_SIZE - (*cur_offset & (LLOG_CHUNK_SIZE - 1));
+ rc = fsfilt_read_record(loghandle->lgh_ctxt->loc_exp->exp_obd,
+ loghandle->lgh_file, buf, llen,
+ cur_offset);
+ if (rc < 0) {
+ CERROR("Cant read llog block at log id "DOSTID
+ "/%u offset "LPU64"\n",
+ POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen,
+ *cur_offset);
+ return rc;
+ }
+
+ /* put number of bytes read into rc to make code simpler */
+ rc = *cur_offset - ppos;
+ if (rc < len) {
+ /* signal the end of the valid buffer to llog_process */
+ memset(buf + rc, 0, len - rc);
+ }
+
+ if (rc == 0) /* end of file, nothing to do */
+ return 0;
+
+ if (rc < sizeof(*tail)) {
+ CERROR("Invalid llog block at log id "DOSTID"/%u offset"
+ LPU64"\n", POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, *cur_offset);
+ return -EINVAL;
+ }
+
+ rec = buf;
+ if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
+ lustre_swab_llog_rec(rec);
+
+ tail = (struct llog_rec_tail *)(buf + rc -
+ sizeof(struct llog_rec_tail));
+
+ /* get the last record in block */
+ last_rec = (struct llog_rec_hdr *)(buf + rc -
+ le32_to_cpu(tail->lrt_len));
+
+ if (LLOG_REC_HDR_NEEDS_SWABBING(last_rec))
+ lustre_swab_llog_rec(last_rec);
+ LASSERT(last_rec->lrh_index == tail->lrt_index);
+
+ *cur_idx = tail->lrt_index;
+
+ /* this shouldn't happen */
+ if (tail->lrt_index == 0) {
+ CERROR("Invalid llog tail at log id "DOSTID"/%u offset "
+ LPU64"\n", POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, *cur_offset);
+ return -EINVAL;
+ }
+ if (tail->lrt_index < next_idx)
+ continue;
+
+ /* sanity check that the start of the new buffer is no farther
+ * than the record that we wanted. This shouldn't happen. */
+ if (rec->lrh_index > next_idx) {
+ CERROR("missed desired record? %u > %u\n",
+ rec->lrh_index, next_idx);
+ return -ENOENT;
+ }
+ return 0;
+ }
+ return -EIO;
+}
+
+static int llog_lvfs_prev_block(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ int prev_idx, void *buf, int len)
+{
+ __u64 cur_offset;
+ int rc;
+
+ if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
+ return -EINVAL;
+
+ CDEBUG(D_OTHER, "looking for log index %u\n", prev_idx);
+
+ cur_offset = LLOG_CHUNK_SIZE;
+ llog_skip_over(&cur_offset, 0, prev_idx);
+
+ while (cur_offset < i_size_read(loghandle->lgh_file->f_dentry->d_inode)) {
+ struct llog_rec_hdr *rec, *last_rec;
+ struct llog_rec_tail *tail;
+ loff_t ppos = cur_offset;
+
+ rc = fsfilt_read_record(loghandle->lgh_ctxt->loc_exp->exp_obd,
+ loghandle->lgh_file, buf, len,
+ &cur_offset);
+ if (rc < 0) {
+ CERROR("Cant read llog block at log id "DOSTID
+ "/%u offset "LPU64"\n",
+ POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen,
+ cur_offset);
+ return rc;
+ }
+
+ /* put number of bytes read into rc to make code simpler */
+ rc = cur_offset - ppos;
+
+ if (rc == 0) /* end of file, nothing to do */
+ return 0;
+
+ if (rc < sizeof(*tail)) {
+ CERROR("Invalid llog block at log id "DOSTID"/%u offset"
+ LPU64"\n", POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, cur_offset);
+ return -EINVAL;
+ }
+
+ rec = buf;
+ if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
+ lustre_swab_llog_rec(rec);
+
+ tail = (struct llog_rec_tail *)(buf + rc -
+ sizeof(struct llog_rec_tail));
+
+ /* get the last record in block */
+ last_rec = (struct llog_rec_hdr *)(buf + rc -
+ le32_to_cpu(tail->lrt_len));
+
+ if (LLOG_REC_HDR_NEEDS_SWABBING(last_rec))
+ lustre_swab_llog_rec(last_rec);
+ LASSERT(last_rec->lrh_index == tail->lrt_index);
+
+ /* this shouldn't happen */
+ if (tail->lrt_index == 0) {
+ CERROR("Invalid llog tail at log id "DOSTID"/%u offset"
+ LPU64"\n", POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, cur_offset);
+ return -EINVAL;
+ }
+ if (tail->lrt_index < prev_idx)
+ continue;
+
+ /* sanity check that the start of the new buffer is no farther
+ * than the record that we wanted. This shouldn't happen. */
+ if (rec->lrh_index > prev_idx) {
+ CERROR("missed desired record? %u > %u\n",
+ rec->lrh_index, prev_idx);
+ return -ENOENT;
+ }
+ return 0;
+ }
+ return -EIO;
+}
+
+static struct file *llog_filp_open(char *dir, char *name, int flags, int mode)
+{
+ char *logname;
+ struct file *filp;
+ int len;
+
+ OBD_ALLOC(logname, PATH_MAX);
+ if (logname == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ len = snprintf(logname, PATH_MAX, "%s/%s", dir, name);
+ if (len >= PATH_MAX - 1) {
+ filp = ERR_PTR(-ENAMETOOLONG);
+ } else {
+ filp = l_filp_open(logname, flags, mode);
+ if (IS_ERR(filp) && PTR_ERR(filp) != -ENOENT)
+ CERROR("logfile creation %s: %ld\n", logname,
+ PTR_ERR(filp));
+ }
+ OBD_FREE(logname, PATH_MAX);
+ return filp;
+}
+
+static int llog_lvfs_open(const struct lu_env *env, struct llog_handle *handle,
+ struct llog_logid *logid, char *name,
+ enum llog_open_param open_param)
+{
+ struct llog_ctxt *ctxt = handle->lgh_ctxt;
+ struct l_dentry *dchild = NULL;
+ struct obd_device *obd;
+ int rc = 0;
+
+ LASSERT(ctxt);
+ LASSERT(ctxt->loc_exp);
+ LASSERT(ctxt->loc_exp->exp_obd);
+ obd = ctxt->loc_exp->exp_obd;
+
+ LASSERT(handle);
+ if (logid != NULL) {
+ dchild = obd_lvfs_fid2dentry(ctxt->loc_exp, &logid->lgl_oi,
+ logid->lgl_ogen);
+ if (IS_ERR(dchild)) {
+ rc = PTR_ERR(dchild);
+ CERROR("%s: error looking up logfile #"DOSTID "#%08x:"
+ " rc = %d\n", ctxt->loc_obd->obd_name,
+ POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
+ GOTO(out, rc);
+ }
+ if (dchild->d_inode == NULL) {
+ l_dput(dchild);
+ rc = -ENOENT;
+ CERROR("%s: nonexistent llog #"DOSTID"#%08x:"
+ "rc = %d\n", ctxt->loc_obd->obd_name,
+ POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
+ GOTO(out, rc);
+ }
+ handle->lgh_file = l_dentry_open(&obd->obd_lvfs_ctxt, dchild,
+ O_RDWR | O_LARGEFILE);
+ l_dput(dchild);
+ if (IS_ERR(handle->lgh_file)) {
+ rc = PTR_ERR(handle->lgh_file);
+ handle->lgh_file = NULL;
+ CERROR("%s: error opening llog #"DOSTID"#%08x:"
+ "rc = %d\n", ctxt->loc_obd->obd_name,
+ POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
+ GOTO(out, rc);
+ }
+ handle->lgh_id = *logid;
+ } else if (name) {
+ handle->lgh_file = llog_filp_open(MOUNT_CONFIGS_DIR, name,
+ O_RDWR | O_LARGEFILE, 0644);
+ if (IS_ERR(handle->lgh_file)) {
+ rc = PTR_ERR(handle->lgh_file);
+ handle->lgh_file = NULL;
+ if (rc == -ENOENT && open_param == LLOG_OPEN_NEW) {
+ OBD_ALLOC(handle->lgh_name, strlen(name) + 1);
+ if (handle->lgh_name)
+ strcpy(handle->lgh_name, name);
+ else
+ GOTO(out, rc = -ENOMEM);
+ rc = 0;
+ } else {
+ GOTO(out, rc);
+ }
+ } else {
+ lustre_build_llog_lvfs_oid(&handle->lgh_id,
+ handle->lgh_file->f_dentry->d_inode->i_ino,
+ handle->lgh_file->f_dentry->d_inode->i_generation);
+ }
+ } else {
+ LASSERTF(open_param == LLOG_OPEN_NEW, "%#x\n", open_param);
+ handle->lgh_file = NULL;
+ }
+
+ /* No new llog is expected but doesn't exist */
+ if (open_param != LLOG_OPEN_NEW && handle->lgh_file == NULL)
+ GOTO(out_name, rc = -ENOENT);
+
+ return 0;
+out_name:
+ if (handle->lgh_name != NULL)
+ OBD_FREE(handle->lgh_name, strlen(name) + 1);
+out:
+ return rc;
+}
+
+static int llog_lvfs_exist(struct llog_handle *handle)
+{
+ return (handle->lgh_file != NULL);
+}
+
+/* This is a callback from the llog_* functions.
+ * Assumes caller has already pushed us into the kernel context. */
+static int llog_lvfs_create(const struct lu_env *env,
+ struct llog_handle *handle,
+ struct thandle *th)
+{
+ struct llog_ctxt *ctxt = handle->lgh_ctxt;
+ struct obd_device *obd;
+ struct l_dentry *dchild = NULL;
+ struct file *file;
+ struct obdo *oa = NULL;
+ int rc = 0;
+ int open_flags = O_RDWR | O_CREAT | O_LARGEFILE;
+
+ LASSERT(ctxt);
+ LASSERT(ctxt->loc_exp);
+ obd = ctxt->loc_exp->exp_obd;
+ LASSERT(handle->lgh_file == NULL);
+
+ if (handle->lgh_name) {
+ file = llog_filp_open(MOUNT_CONFIGS_DIR, handle->lgh_name,
+ open_flags, 0644);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ lustre_build_llog_lvfs_oid(&handle->lgh_id,
+ file->f_dentry->d_inode->i_ino,
+ file->f_dentry->d_inode->i_generation);
+ handle->lgh_file = file;
+ } else {
+ OBDO_ALLOC(oa);
+ if (oa == NULL)
+ return -ENOMEM;
+
+ ostid_set_seq_llog(&oa->o_oi);
+ oa->o_valid = OBD_MD_FLGENER | OBD_MD_FLGROUP;
+
+ rc = obd_create(NULL, ctxt->loc_exp, oa, NULL, NULL);
+ if (rc)
+ GOTO(out, rc);
+
+ /* FIXME: rationalize the misuse of o_generation in
+ * this API along with mds_obd_{create,destroy}.
+ * Hopefully it is only an internal API issue. */
+#define o_generation o_parent_oid
+ dchild = obd_lvfs_fid2dentry(ctxt->loc_exp, &oa->o_oi,
+ oa->o_generation);
+ if (IS_ERR(dchild))
+ GOTO(out, rc = PTR_ERR(dchild));
+
+ file = l_dentry_open(&obd->obd_lvfs_ctxt, dchild, open_flags);
+ l_dput(dchild);
+ if (IS_ERR(file))
+ GOTO(out, rc = PTR_ERR(file));
+ handle->lgh_id.lgl_oi = oa->o_oi;
+ handle->lgh_id.lgl_ogen = oa->o_generation;
+ handle->lgh_file = file;
+out:
+ OBDO_FREE(oa);
+ }
+ return rc;
+}
+
+static int llog_lvfs_close(const struct lu_env *env,
+ struct llog_handle *handle)
+{
+ int rc;
+
+ if (handle->lgh_file == NULL)
+ return 0;
+ rc = filp_close(handle->lgh_file, 0);
+ if (rc)
+ CERROR("%s: error closing llog #"DOSTID"#%08x: "
+ "rc = %d\n", handle->lgh_ctxt->loc_obd->obd_name,
+ POSTID(&handle->lgh_id.lgl_oi),
+ handle->lgh_id.lgl_ogen, rc);
+ handle->lgh_file = NULL;
+ if (handle->lgh_name) {
+ OBD_FREE(handle->lgh_name, strlen(handle->lgh_name) + 1);
+ handle->lgh_name = NULL;
+ }
+ return rc;
+}
+
+static int llog_lvfs_destroy(const struct lu_env *env,
+ struct llog_handle *handle)
+{
+ struct dentry *fdentry;
+ struct obdo *oa;
+ struct obd_device *obd = handle->lgh_ctxt->loc_exp->exp_obd;
+ char *dir;
+ void *th;
+ struct inode *inode;
+ int rc, rc1;
+
+ dir = MOUNT_CONFIGS_DIR;
+
+ LASSERT(handle->lgh_file);
+ fdentry = handle->lgh_file->f_dentry;
+ inode = fdentry->d_parent->d_inode;
+ if (strcmp(fdentry->d_parent->d_name.name, dir) == 0) {
+ struct lvfs_run_ctxt saved;
+ struct vfsmount *mnt = mntget(handle->lgh_file->f_vfsmnt);
+
+ push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ dget(fdentry);
+ rc = llog_lvfs_close(env, handle);
+ if (rc == 0) {
+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
+ rc = ll_vfs_unlink(inode, fdentry, mnt);
+ mutex_unlock(&inode->i_mutex);
+ }
+ mntput(mnt);
+
+ dput(fdentry);
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ return rc;
+ }
+
+ OBDO_ALLOC(oa);
+ if (oa == NULL)
+ return -ENOMEM;
+
+ oa->o_oi = handle->lgh_id.lgl_oi;
+ oa->o_generation = handle->lgh_id.lgl_ogen;
+#undef o_generation
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLGENER;
+
+ rc = llog_lvfs_close(env, handle);
+ if (rc)
+ GOTO(out, rc);
+
+ th = fsfilt_start_log(obd, inode, FSFILT_OP_UNLINK, NULL, 1);
+ if (IS_ERR(th)) {
+ CERROR("fsfilt_start failed: %ld\n", PTR_ERR(th));
+ GOTO(out, rc = PTR_ERR(th));
+ }
+
+ rc = obd_destroy(NULL, handle->lgh_ctxt->loc_exp, oa,
+ NULL, NULL, NULL, NULL);
+
+ rc1 = fsfilt_commit(obd, inode, th, 0);
+ if (rc == 0 && rc1 != 0)
+ rc = rc1;
+ out:
+ OBDO_FREE(oa);
+ return rc;
+}
+
+static int llog_lvfs_declare_create(const struct lu_env *env,
+ struct llog_handle *res,
+ struct thandle *th)
+{
+ return 0;
+}
+
+static int llog_lvfs_declare_write_rec(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ struct llog_rec_hdr *rec,
+ int idx, struct thandle *th)
+{
+ return 0;
+}
+
+struct llog_operations llog_lvfs_ops = {
+ .lop_write_rec = llog_lvfs_write_rec,
+ .lop_next_block = llog_lvfs_next_block,
+ .lop_prev_block = llog_lvfs_prev_block,
+ .lop_read_header = llog_lvfs_read_header,
+ .lop_create = llog_lvfs_create,
+ .lop_destroy = llog_lvfs_destroy,
+ .lop_close = llog_lvfs_close,
+ .lop_open = llog_lvfs_open,
+ .lop_exist = llog_lvfs_exist,
+ .lop_declare_create = llog_lvfs_declare_create,
+ .lop_declare_write_rec = llog_lvfs_declare_write_rec,
+};
+EXPORT_SYMBOL(llog_lvfs_ops);
+#else /* !__KERNEL__ */
+struct llog_operations llog_lvfs_ops = {};
+#endif
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
new file mode 100644
index 0000000..71817af
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -0,0 +1,314 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+
+#include <obd_class.h>
+#include <lustre_log.h>
+#include "llog_internal.h"
+
+/* helper functions for calling the llog obd methods */
+static struct llog_ctxt* llog_new_ctxt(struct obd_device *obd)
+{
+ struct llog_ctxt *ctxt;
+
+ OBD_ALLOC_PTR(ctxt);
+ if (!ctxt)
+ return NULL;
+
+ ctxt->loc_obd = obd;
+ atomic_set(&ctxt->loc_refcount, 1);
+
+ return ctxt;
+}
+
+static void llog_ctxt_destroy(struct llog_ctxt *ctxt)
+{
+ if (ctxt->loc_exp) {
+ class_export_put(ctxt->loc_exp);
+ ctxt->loc_exp = NULL;
+ }
+ if (ctxt->loc_imp) {
+ class_import_put(ctxt->loc_imp);
+ ctxt->loc_imp = NULL;
+ }
+ OBD_FREE_PTR(ctxt);
+}
+
+int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt)
+{
+ struct obd_llog_group *olg = ctxt->loc_olg;
+ struct obd_device *obd;
+ int rc = 0;
+
+ spin_lock(&olg->olg_lock);
+ if (!atomic_dec_and_test(&ctxt->loc_refcount)) {
+ spin_unlock(&olg->olg_lock);
+ return rc;
+ }
+ olg->olg_ctxts[ctxt->loc_idx] = NULL;
+ spin_unlock(&olg->olg_lock);
+
+ obd = ctxt->loc_obd;
+ spin_lock(&obd->obd_dev_lock);
+ /* sync with llog ctxt user thread */
+ spin_unlock(&obd->obd_dev_lock);
+
+ /* obd->obd_starting is needed for the case of cleanup
+ * in error case while obd is starting up. */
+ LASSERTF(obd->obd_starting == 1 ||
+ obd->obd_stopping == 1 || obd->obd_set_up == 0,
+ "wrong obd state: %d/%d/%d\n", !!obd->obd_starting,
+ !!obd->obd_stopping, !!obd->obd_set_up);
+
+ /* cleanup the llog ctxt here */
+ if (CTXTP(ctxt, cleanup))
+ rc = CTXTP(ctxt, cleanup)(env, ctxt);
+
+ llog_ctxt_destroy(ctxt);
+ wake_up(&olg->olg_waitq);
+ return rc;
+}
+EXPORT_SYMBOL(__llog_ctxt_put);
+
+int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
+{
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct obd_llog_group *olg;
+ int rc, idx;
+
+ LASSERT(ctxt != NULL);
+ LASSERT(ctxt != LP_POISON);
+
+ olg = ctxt->loc_olg;
+ LASSERT(olg != NULL);
+ LASSERT(olg != LP_POISON);
+
+ idx = ctxt->loc_idx;
+
+ /*
+ * Banlance the ctxt get when calling llog_cleanup()
+ */
+ LASSERT(atomic_read(&ctxt->loc_refcount) < LI_POISON);
+ LASSERT(atomic_read(&ctxt->loc_refcount) > 1);
+ llog_ctxt_put(ctxt);
+
+ /*
+ * Try to free the ctxt.
+ */
+ rc = __llog_ctxt_put(env, ctxt);
+ if (rc)
+ CERROR("Error %d while cleaning up ctxt %p\n",
+ rc, ctxt);
+
+ l_wait_event(olg->olg_waitq,
+ llog_group_ctxt_null(olg, idx), &lwi);
+
+ return rc;
+}
+EXPORT_SYMBOL(llog_cleanup);
+
+int llog_setup(const struct lu_env *env, struct obd_device *obd,
+ struct obd_llog_group *olg, int index,
+ struct obd_device *disk_obd, struct llog_operations *op)
+{
+ struct llog_ctxt *ctxt;
+ int rc = 0;
+
+ if (index < 0 || index >= LLOG_MAX_CTXTS)
+ return -EINVAL;
+
+ LASSERT(olg != NULL);
+
+ ctxt = llog_new_ctxt(obd);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->loc_obd = obd;
+ ctxt->loc_olg = olg;
+ ctxt->loc_idx = index;
+ ctxt->loc_logops = op;
+ mutex_init(&ctxt->loc_mutex);
+ ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
+ ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
+
+ rc = llog_group_set_ctxt(olg, ctxt, index);
+ if (rc) {
+ llog_ctxt_destroy(ctxt);
+ if (rc == -EEXIST) {
+ ctxt = llog_group_get_ctxt(olg, index);
+ if (ctxt) {
+ /*
+ * mds_lov_update_desc() might call here multiple
+ * times. So if the llog is already set up then
+ * don't to do it again.
+ */
+ CDEBUG(D_CONFIG, "obd %s ctxt %d already set up\n",
+ obd->obd_name, index);
+ LASSERT(ctxt->loc_olg == olg);
+ LASSERT(ctxt->loc_obd == obd);
+ LASSERT(ctxt->loc_exp == disk_obd->obd_self_export);
+ LASSERT(ctxt->loc_logops == op);
+ llog_ctxt_put(ctxt);
+ }
+ rc = 0;
+ }
+ return rc;
+ }
+
+ if (op->lop_setup) {
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LLOG_SETUP))
+ rc = -EOPNOTSUPP;
+ else
+ rc = op->lop_setup(env, obd, olg, index, disk_obd);
+ }
+
+ if (rc) {
+ CERROR("%s: ctxt %d lop_setup=%p failed: rc = %d\n",
+ obd->obd_name, index, op->lop_setup, rc);
+ llog_group_clear_ctxt(olg, index);
+ llog_ctxt_destroy(ctxt);
+ } else {
+ CDEBUG(D_CONFIG, "obd %s ctxt %d is initialized\n",
+ obd->obd_name, index);
+ ctxt->loc_flags &= ~LLOG_CTXT_FLAG_UNINITIALIZED;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(llog_setup);
+
+int llog_sync(struct llog_ctxt *ctxt, struct obd_export *exp, int flags)
+{
+ int rc = 0;
+
+ if (!ctxt)
+ return 0;
+
+ if (CTXTP(ctxt, sync))
+ rc = CTXTP(ctxt, sync)(ctxt, exp, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL(llog_sync);
+
+int llog_obd_add(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct llog_rec_hdr *rec, struct lov_stripe_md *lsm,
+ struct llog_cookie *logcookies, int numcookies)
+{
+ int raised, rc;
+
+ if (!ctxt) {
+ CERROR("No ctxt\n");
+ return -ENODEV;
+ }
+
+ if (ctxt->loc_flags & LLOG_CTXT_FLAG_UNINITIALIZED)
+ return -ENXIO;
+
+ CTXT_CHECK_OP(ctxt, obd_add, -EOPNOTSUPP);
+ raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
+ if (!raised)
+ cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
+ rc = CTXTP(ctxt, obd_add)(env, ctxt, rec, lsm, logcookies,
+ numcookies);
+ if (!raised)
+ cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
+ return rc;
+}
+EXPORT_SYMBOL(llog_obd_add);
+
+int llog_cancel(const struct lu_env *env, struct llog_ctxt *ctxt,
+ struct lov_stripe_md *lsm, int count,
+ struct llog_cookie *cookies, int flags)
+{
+ int rc;
+
+ if (!ctxt) {
+ CERROR("No ctxt\n");
+ return -ENODEV;
+ }
+
+ CTXT_CHECK_OP(ctxt, cancel, -EOPNOTSUPP);
+ rc = CTXTP(ctxt, cancel)(env, ctxt, lsm, count, cookies, flags);
+ return rc;
+}
+EXPORT_SYMBOL(llog_cancel);
+
+int obd_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *disk_obd, int *index)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(obd, llog_init, 0);
+ OBD_COUNTER_INCREMENT(obd, llog_init);
+
+ rc = OBP(obd, llog_init)(obd, olg, disk_obd, index);
+ return rc;
+}
+EXPORT_SYMBOL(obd_llog_init);
+
+int obd_llog_finish(struct obd_device *obd, int count)
+{
+ int rc;
+
+ OBD_CHECK_DT_OP(obd, llog_finish, 0);
+ OBD_COUNTER_INCREMENT(obd, llog_finish);
+
+ rc = OBP(obd, llog_finish)(obd, count);
+ return rc;
+}
+EXPORT_SYMBOL(obd_llog_finish);
+
+/* context key constructor/destructor: llog_key_init, llog_key_fini */
+LU_KEY_INIT_FINI(llog, struct llog_thread_info);
+/* context key: llog_thread_key */
+LU_CONTEXT_KEY_DEFINE(llog, LCT_MD_THREAD | LCT_MG_THREAD | LCT_LOCAL);
+LU_KEY_INIT_GENERIC(llog);
+EXPORT_SYMBOL(llog_thread_key);
+
+int llog_info_init(void)
+{
+ llog_key_init_generic(&llog_thread_key, NULL);
+ lu_context_key_register(&llog_thread_key);
+ return 0;
+}
+
+void llog_info_fini(void)
+{
+ lu_context_key_degister(&llog_thread_key);
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_osd.c b/drivers/staging/lustre/lustre/obdclass/llog_osd.c
new file mode 100644
index 0000000..654c8e1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/llog_osd.c
@@ -0,0 +1,1290 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/llog_osd.c - low level llog routines on top of OSD API
+ *
+ * Author: Alexey Zhuravlev <alexey.zhuravlev@intel.com>
+ * Author: Mikhail Pershin <mike.pershin@intel.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+#include <obd.h>
+#include <obd_class.h>
+#include <lustre_fid.h>
+#include <dt_object.h>
+
+#include "llog_internal.h"
+#include "local_storage.h"
+
+/*
+ * - multi-chunks or big-declaration approach
+ * - use unique sequence instead of llog sb tracking unique ids
+ * - re-use existing environment
+ * - named llog support (can be used for testing only at the present)
+ * - llog_origin_connect() work with OSD API
+ */
+
+static int llog_osd_declare_new_object(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *o,
+ struct thandle *th)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+
+ lgi->lgi_attr.la_valid = LA_MODE;
+ lgi->lgi_attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
+ lgi->lgi_dof.dof_type = dt_mode_to_dft(S_IFREG);
+
+ return local_object_declare_create(env, los, o, &lgi->lgi_attr,
+ &lgi->lgi_dof, th);
+}
+
+static int llog_osd_create_new_object(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *o,
+ struct thandle *th)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+
+ lgi->lgi_attr.la_valid = LA_MODE;
+ lgi->lgi_attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
+ lgi->lgi_dof.dof_type = dt_mode_to_dft(S_IFREG);
+
+ return local_object_create(env, los, o, &lgi->lgi_attr,
+ &lgi->lgi_dof, th);
+}
+
+static int llog_osd_pad(const struct lu_env *env, struct dt_object *o,
+ loff_t *off, int len, int index, struct thandle *th)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ int rc;
+
+ LASSERT(th);
+ LASSERT(off);
+ LASSERT(len >= LLOG_MIN_REC_SIZE && (len & 0x7) == 0);
+
+ lgi->lgi_tail.lrt_len = lgi->lgi_lrh.lrh_len = len;
+ lgi->lgi_tail.lrt_index = lgi->lgi_lrh.lrh_index = index;
+ lgi->lgi_lrh.lrh_type = LLOG_PAD_MAGIC;
+
+ lgi->lgi_buf.lb_buf = &lgi->lgi_lrh;
+ lgi->lgi_buf.lb_len = sizeof(lgi->lgi_lrh);
+ dt_write_lock(env, o, 0);
+ rc = dt_record_write(env, o, &lgi->lgi_buf, off, th);
+ if (rc) {
+ CERROR("%s: error writing padding record: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, rc);
+ GOTO(out, rc);
+ }
+
+ lgi->lgi_buf.lb_buf = &lgi->lgi_tail;
+ lgi->lgi_buf.lb_len = sizeof(lgi->lgi_tail);
+ *off += len - sizeof(lgi->lgi_lrh) - sizeof(lgi->lgi_tail);
+ rc = dt_record_write(env, o, &lgi->lgi_buf, off, th);
+ if (rc)
+ CERROR("%s: error writing padding record: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, rc);
+out:
+ dt_write_unlock(env, o);
+ return rc;
+}
+
+static int llog_osd_write_blob(const struct lu_env *env, struct dt_object *o,
+ struct llog_rec_hdr *rec, void *buf,
+ loff_t *off, struct thandle *th)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ int buflen = rec->lrh_len;
+ int rc;
+
+ LASSERT(env);
+ LASSERT(o);
+
+ if (buflen == 0)
+ CWARN("0-length record\n");
+
+ CDEBUG(D_OTHER, "write blob with type %x, buf %p/%u at off %llu\n",
+ rec->lrh_type, buf, buflen, *off);
+
+ lgi->lgi_attr.la_valid = LA_SIZE;
+ lgi->lgi_attr.la_size = *off;
+
+ if (!buf) {
+ lgi->lgi_buf.lb_len = buflen;
+ lgi->lgi_buf.lb_buf = rec;
+ rc = dt_record_write(env, o, &lgi->lgi_buf, off, th);
+ if (rc)
+ CERROR("%s: error writing log record: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, rc);
+ GOTO(out, rc);
+ }
+
+ /* the buf case */
+ /* protect the following 3 writes from concurrent read */
+ dt_write_lock(env, o, 0);
+ rec->lrh_len = sizeof(*rec) + buflen + sizeof(lgi->lgi_tail);
+ lgi->lgi_buf.lb_len = sizeof(*rec);
+ lgi->lgi_buf.lb_buf = rec;
+ rc = dt_record_write(env, o, &lgi->lgi_buf, off, th);
+ if (rc) {
+ CERROR("%s: error writing log hdr: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, rc);
+ GOTO(out_unlock, rc);
+ }
+
+ lgi->lgi_buf.lb_len = buflen;
+ lgi->lgi_buf.lb_buf = buf;
+ rc = dt_record_write(env, o, &lgi->lgi_buf, off, th);
+ if (rc) {
+ CERROR("%s: error writing log buffer: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, rc);
+ GOTO(out_unlock, rc);
+ }
+
+ lgi->lgi_tail.lrt_len = rec->lrh_len;
+ lgi->lgi_tail.lrt_index = rec->lrh_index;
+ lgi->lgi_buf.lb_len = sizeof(lgi->lgi_tail);
+ lgi->lgi_buf.lb_buf = &lgi->lgi_tail;
+ rc = dt_record_write(env, o, &lgi->lgi_buf, off, th);
+ if (rc)
+ CERROR("%s: error writing log tail: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, rc);
+
+out_unlock:
+ dt_write_unlock(env, o);
+
+out:
+ /* cleanup the content written above */
+ if (rc) {
+ dt_punch(env, o, lgi->lgi_attr.la_size, OBD_OBJECT_EOF, th,
+ BYPASS_CAPA);
+ dt_attr_set(env, o, &lgi->lgi_attr, th, BYPASS_CAPA);
+ }
+
+ return rc;
+}
+
+static int llog_osd_read_header(const struct lu_env *env,
+ struct llog_handle *handle)
+{
+ struct llog_rec_hdr *llh_hdr;
+ struct dt_object *o;
+ struct llog_thread_info *lgi;
+ int rc;
+
+ LASSERT(sizeof(*handle->lgh_hdr) == LLOG_CHUNK_SIZE);
+
+ o = handle->lgh_obj;
+ LASSERT(o);
+
+ lgi = llog_info(env);
+
+ rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
+ if (rc)
+ return rc;
+
+ LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
+
+ if (lgi->lgi_attr.la_size == 0) {
+ CDEBUG(D_HA, "not reading header from 0-byte log\n");
+ return LLOG_EEMPTY;
+ }
+
+ lgi->lgi_off = 0;
+ lgi->lgi_buf.lb_buf = handle->lgh_hdr;
+ lgi->lgi_buf.lb_len = LLOG_CHUNK_SIZE;
+
+ rc = dt_record_read(env, o, &lgi->lgi_buf, &lgi->lgi_off);
+ if (rc) {
+ CERROR("%s: error reading log header from "DFID": rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ PFID(lu_object_fid(&o->do_lu)), rc);
+ return rc;
+ }
+
+ llh_hdr = &handle->lgh_hdr->llh_hdr;
+ if (LLOG_REC_HDR_NEEDS_SWABBING(llh_hdr))
+ lustre_swab_llog_hdr(handle->lgh_hdr);
+
+ if (llh_hdr->lrh_type != LLOG_HDR_MAGIC) {
+ CERROR("%s: bad log %s "DFID" header magic: %#x "
+ "(expected %#x)\n", o->do_lu.lo_dev->ld_obd->obd_name,
+ handle->lgh_name ? handle->lgh_name : "",
+ PFID(lu_object_fid(&o->do_lu)),
+ llh_hdr->lrh_type, LLOG_HDR_MAGIC);
+ return -EIO;
+ } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
+ CERROR("%s: incorrectly sized log %s "DFID" header: "
+ "%#x (expected %#x)\n"
+ "you may need to re-run lconf --write_conf.\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ handle->lgh_name ? handle->lgh_name : "",
+ PFID(lu_object_fid(&o->do_lu)),
+ llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
+ return -EIO;
+ }
+
+ handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
+
+ return 0;
+}
+
+static int llog_osd_declare_write_rec(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ struct llog_rec_hdr *rec,
+ int idx, struct thandle *th)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ struct dt_object *o;
+ int rc;
+
+ LASSERT(env);
+ LASSERT(th);
+ LASSERT(loghandle);
+
+ o = loghandle->lgh_obj;
+ LASSERT(o);
+
+ /* each time we update header */
+ rc = dt_declare_record_write(env, o, sizeof(struct llog_log_hdr), 0,
+ th);
+ if (rc || idx == 0) /* if error or just header */
+ return rc;
+
+ if (dt_object_exists(o)) {
+ rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
+ lgi->lgi_off = lgi->lgi_attr.la_size;
+ LASSERT(ergo(rc == 0, lgi->lgi_attr.la_valid & LA_SIZE));
+ if (rc)
+ return rc;
+
+ rc = dt_declare_punch(env, o, lgi->lgi_off, OBD_OBJECT_EOF, th);
+ if (rc)
+ return rc;
+ } else {
+ lgi->lgi_off = 0;
+ }
+
+ /* XXX: implement declared window or multi-chunks approach */
+ rc = dt_declare_record_write(env, o, 32 * 1024, lgi->lgi_off, th);
+
+ return rc;
+}
+
+/* returns negative in on error; 0 if success && reccookie == 0; 1 otherwise */
+/* appends if idx == -1, otherwise overwrites record idx. */
+static int llog_osd_write_rec(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ struct llog_rec_hdr *rec,
+ struct llog_cookie *reccookie, int cookiecount,
+ void *buf, int idx, struct thandle *th)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ struct llog_log_hdr *llh;
+ int reclen = rec->lrh_len;
+ int index, rc, old_tail_idx;
+ struct llog_rec_tail *lrt;
+ struct dt_object *o;
+ size_t left;
+
+ LASSERT(env);
+ llh = loghandle->lgh_hdr;
+ LASSERT(llh);
+ o = loghandle->lgh_obj;
+ LASSERT(o);
+ LASSERT(th);
+
+ CDEBUG(D_OTHER, "new record %x to "DFID"\n",
+ rec->lrh_type, PFID(lu_object_fid(&o->do_lu)));
+
+ /* record length should not bigger than LLOG_CHUNK_SIZE */
+ if (buf)
+ rc = (reclen > LLOG_CHUNK_SIZE - sizeof(struct llog_rec_hdr) -
+ sizeof(struct llog_rec_tail)) ? -E2BIG : 0;
+ else
+ rc = (reclen > LLOG_CHUNK_SIZE) ? -E2BIG : 0;
+ if (rc)
+ return rc;
+
+ rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
+ if (rc)
+ return rc;
+
+ if (buf)
+ /* write_blob adds header and tail to lrh_len. */
+ reclen = sizeof(*rec) + rec->lrh_len +
+ sizeof(struct llog_rec_tail);
+
+ if (idx != -1) {
+ /* no header: only allowed to insert record 1 */
+ if (idx != 1 && lgi->lgi_attr.la_size == 0)
+ LBUG();
+
+ if (idx && llh->llh_size && llh->llh_size != rec->lrh_len)
+ return -EINVAL;
+
+ if (!ext2_test_bit(idx, llh->llh_bitmap))
+ CERROR("%s: modify unset record %u\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, idx);
+ if (idx != rec->lrh_index)
+ CERROR("%s: index mismatch %d %u\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, idx,
+ rec->lrh_index);
+
+ lgi->lgi_off = 0;
+ rc = llog_osd_write_blob(env, o, &llh->llh_hdr, NULL,
+ &lgi->lgi_off, th);
+ /* we are done if we only write the header or on error */
+ if (rc || idx == 0)
+ return rc;
+
+ if (buf) {
+ /* We assume that caller has set lgh_cur_* */
+ lgi->lgi_off = loghandle->lgh_cur_offset;
+ CDEBUG(D_OTHER,
+ "modify record "DOSTID": idx:%d/%u/%d, len:%u "
+ "offset %llu\n",
+ POSTID(&loghandle->lgh_id.lgl_oi), idx,
+ rec->lrh_index,
+ loghandle->lgh_cur_idx, rec->lrh_len,
+ (long long)(lgi->lgi_off - sizeof(*llh)));
+ if (rec->lrh_index != loghandle->lgh_cur_idx) {
+ CERROR("%s: modify idx mismatch %u/%d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, idx,
+ loghandle->lgh_cur_idx);
+ return -EFAULT;
+ }
+ } else {
+ /* Assumes constant lrh_len */
+ lgi->lgi_off = sizeof(*llh) + (idx - 1) * reclen;
+ }
+
+ rc = llog_osd_write_blob(env, o, rec, buf, &lgi->lgi_off, th);
+ if (rc == 0 && reccookie) {
+ reccookie->lgc_lgl = loghandle->lgh_id;
+ reccookie->lgc_index = idx;
+ rc = 1;
+ }
+ return rc;
+ }
+
+ /* Make sure that records don't cross a chunk boundary, so we can
+ * process them page-at-a-time if needed. If it will cross a chunk
+ * boundary, write in a fake (but referenced) entry to pad the chunk.
+ *
+ * We know that llog_current_log() will return a loghandle that is
+ * big enough to hold reclen, so all we care about is padding here.
+ */
+ LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
+ lgi->lgi_off = lgi->lgi_attr.la_size;
+ left = LLOG_CHUNK_SIZE - (lgi->lgi_off & (LLOG_CHUNK_SIZE - 1));
+ /* NOTE: padding is a record, but no bit is set */
+ if (left != 0 && left != reclen &&
+ left < (reclen + LLOG_MIN_REC_SIZE)) {
+ index = loghandle->lgh_last_idx + 1;
+ rc = llog_osd_pad(env, o, &lgi->lgi_off, left, index, th);
+ if (rc)
+ return rc;
+ loghandle->lgh_last_idx++; /*for pad rec*/
+ }
+ /* if it's the last idx in log file, then return -ENOSPC */
+ if (loghandle->lgh_last_idx >= LLOG_BITMAP_SIZE(llh) - 1)
+ return -ENOSPC;
+
+ loghandle->lgh_last_idx++;
+ index = loghandle->lgh_last_idx;
+ LASSERT(index < LLOG_BITMAP_SIZE(llh));
+ rec->lrh_index = index;
+ if (buf == NULL) {
+ lrt = (struct llog_rec_tail *)((char *)rec + rec->lrh_len -
+ sizeof(*lrt));
+ lrt->lrt_len = rec->lrh_len;
+ lrt->lrt_index = rec->lrh_index;
+ }
+ /* The caller should make sure only 1 process access the lgh_last_idx,
+ * Otherwise it might hit the assert.*/
+ LASSERT(index < LLOG_BITMAP_SIZE(llh));
+ spin_lock(&loghandle->lgh_hdr_lock);
+ if (ext2_set_bit(index, llh->llh_bitmap)) {
+ CERROR("%s: index %u already set in log bitmap\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, index);
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ LBUG(); /* should never happen */
+ }
+ llh->llh_count++;
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ old_tail_idx = llh->llh_tail.lrt_index;
+ llh->llh_tail.lrt_index = index;
+
+ lgi->lgi_off = 0;
+ rc = llog_osd_write_blob(env, o, &llh->llh_hdr, NULL, &lgi->lgi_off,
+ th);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
+ if (rc)
+ GOTO(out, rc);
+
+ LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
+ lgi->lgi_off = lgi->lgi_attr.la_size;
+
+ rc = llog_osd_write_blob(env, o, rec, buf, &lgi->lgi_off, th);
+
+out:
+ /* cleanup llog for error case */
+ if (rc) {
+ spin_lock(&loghandle->lgh_hdr_lock);
+ ext2_clear_bit(index, llh->llh_bitmap);
+ llh->llh_count--;
+ spin_unlock(&loghandle->lgh_hdr_lock);
+
+ /* restore the header */
+ loghandle->lgh_last_idx--;
+ llh->llh_tail.lrt_index = old_tail_idx;
+ lgi->lgi_off = 0;
+ llog_osd_write_blob(env, o, &llh->llh_hdr, NULL,
+ &lgi->lgi_off, th);
+ }
+
+ CDEBUG(D_RPCTRACE, "added record "DOSTID": idx: %u, %u\n",
+ POSTID(&loghandle->lgh_id.lgl_oi), index, rec->lrh_len);
+ if (rc == 0 && reccookie) {
+ reccookie->lgc_lgl = loghandle->lgh_id;
+ reccookie->lgc_index = index;
+ if ((rec->lrh_type == MDS_UNLINK_REC) ||
+ (rec->lrh_type == MDS_SETATTR64_REC))
+ reccookie->lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
+ else if (rec->lrh_type == OST_SZ_REC)
+ reccookie->lgc_subsys = LLOG_SIZE_ORIG_CTXT;
+ else
+ reccookie->lgc_subsys = -1;
+ rc = 1;
+ }
+ return rc;
+}
+
+/* We can skip reading at least as many log blocks as the number of
+ * minimum sized log records we are skipping. If it turns out
+ * that we are not far enough along the log (because the
+ * actual records are larger than minimum size) we just skip
+ * some more records.
+ */
+static void llog_skip_over(__u64 *off, int curr, int goal)
+{
+ if (goal <= curr)
+ return;
+ *off = (*off + (goal - curr - 1) * LLOG_MIN_REC_SIZE) &
+ ~(LLOG_CHUNK_SIZE - 1);
+}
+
+/* sets:
+ * - cur_offset to the furthest point read in the log file
+ * - cur_idx to the log index preceeding cur_offset
+ * returns -EIO/-EINVAL on error
+ */
+static int llog_osd_next_block(const struct lu_env *env,
+ struct llog_handle *loghandle, int *cur_idx,
+ int next_idx, __u64 *cur_offset, void *buf,
+ int len)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ struct dt_object *o;
+ struct dt_device *dt;
+ int rc;
+
+ LASSERT(env);
+ LASSERT(lgi);
+
+ if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
+ return -EINVAL;
+
+ CDEBUG(D_OTHER, "looking for log index %u (cur idx %u off "LPU64")\n",
+ next_idx, *cur_idx, *cur_offset);
+
+ LASSERT(loghandle);
+ LASSERT(loghandle->lgh_ctxt);
+
+ o = loghandle->lgh_obj;
+ LASSERT(o);
+ LASSERT(dt_object_exists(o));
+ dt = lu2dt_dev(o->do_lu.lo_dev);
+ LASSERT(dt);
+
+ rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
+ if (rc)
+ GOTO(out, rc);
+
+ while (*cur_offset < lgi->lgi_attr.la_size) {
+ struct llog_rec_hdr *rec, *last_rec;
+ struct llog_rec_tail *tail;
+
+ llog_skip_over(cur_offset, *cur_idx, next_idx);
+
+ /* read up to next LLOG_CHUNK_SIZE block */
+ lgi->lgi_buf.lb_len = LLOG_CHUNK_SIZE -
+ (*cur_offset & (LLOG_CHUNK_SIZE - 1));
+ lgi->lgi_buf.lb_buf = buf;
+
+ /* Note: read lock is not needed around la_size get above at
+ * the time of dt_attr_get(). There are only two cases that
+ * matter. Either la_size == cur_offset, in which case the
+ * entire read is skipped, or la_size > cur_offset and the loop
+ * is entered and this thread is blocked at dt_read_lock()
+ * until the write is completed. When the write completes, then
+ * the dt_read() will be done with the full length, and will
+ * get the full data.
+ */
+ dt_read_lock(env, o, 0);
+ rc = dt_read(env, o, &lgi->lgi_buf, cur_offset);
+ dt_read_unlock(env, o);
+ if (rc < 0) {
+ CERROR("%s: can't read llog block from log "DFID
+ " offset "LPU64": rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ PFID(lu_object_fid(&o->do_lu)), *cur_offset,
+ rc);
+ GOTO(out, rc);
+ }
+
+ if (rc < len) {
+ /* signal the end of the valid buffer to
+ * llog_process */
+ memset(buf + rc, 0, len - rc);
+ }
+
+ if (rc == 0) /* end of file, nothing to do */
+ GOTO(out, rc);
+
+ if (rc < sizeof(*tail)) {
+ CERROR("%s: invalid llog block at log id "DOSTID"/%u "
+ "offset "LPU64"\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, *cur_offset);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ rec = buf;
+ if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
+ lustre_swab_llog_rec(rec);
+
+ tail = (struct llog_rec_tail *)((char *)buf + rc -
+ sizeof(struct llog_rec_tail));
+ /* get the last record in block */
+ last_rec = (struct llog_rec_hdr *)((char *)buf + rc -
+ le32_to_cpu(tail->lrt_len));
+
+ if (LLOG_REC_HDR_NEEDS_SWABBING(last_rec))
+ lustre_swab_llog_rec(last_rec);
+ LASSERT(last_rec->lrh_index == tail->lrt_index);
+
+ *cur_idx = tail->lrt_index;
+
+ /* this shouldn't happen */
+ if (tail->lrt_index == 0) {
+ CERROR("%s: invalid llog tail at log id "DOSTID"/%u "
+ "offset "LPU64"\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, *cur_offset);
+ GOTO(out, rc = -EINVAL);
+ }
+ if (tail->lrt_index < next_idx)
+ continue;
+
+ /* sanity check that the start of the new buffer is no farther
+ * than the record that we wanted. This shouldn't happen. */
+ if (rec->lrh_index > next_idx) {
+ CERROR("%s: missed desired record? %u > %u\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ rec->lrh_index, next_idx);
+ GOTO(out, rc = -ENOENT);
+ }
+ GOTO(out, rc = 0);
+ }
+ GOTO(out, rc = -EIO);
+out:
+ return rc;
+}
+
+static int llog_osd_prev_block(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ int prev_idx, void *buf, int len)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ struct dt_object *o;
+ struct dt_device *dt;
+ loff_t cur_offset;
+ int rc;
+
+ if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
+ return -EINVAL;
+
+ CDEBUG(D_OTHER, "looking for log index %u\n", prev_idx);
+
+ LASSERT(loghandle);
+ LASSERT(loghandle->lgh_ctxt);
+
+ o = loghandle->lgh_obj;
+ LASSERT(o);
+ LASSERT(dt_object_exists(o));
+ dt = lu2dt_dev(o->do_lu.lo_dev);
+ LASSERT(dt);
+
+ cur_offset = LLOG_CHUNK_SIZE;
+ llog_skip_over(&cur_offset, 0, prev_idx);
+
+ rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
+ if (rc)
+ GOTO(out, rc);
+
+ while (cur_offset < lgi->lgi_attr.la_size) {
+ struct llog_rec_hdr *rec, *last_rec;
+ struct llog_rec_tail *tail;
+
+ lgi->lgi_buf.lb_len = len;
+ lgi->lgi_buf.lb_buf = buf;
+ /* It is OK to have locking around dt_read() only, see
+ * comment in llog_osd_next_block for details
+ */
+ dt_read_lock(env, o, 0);
+ rc = dt_read(env, o, &lgi->lgi_buf, &cur_offset);
+ dt_read_unlock(env, o);
+ if (rc < 0) {
+ CERROR("%s: can't read llog block from log "DFID
+ " offset "LPU64": rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ PFID(lu_object_fid(&o->do_lu)), cur_offset, rc);
+ GOTO(out, rc);
+ }
+
+ if (rc == 0) /* end of file, nothing to do */
+ GOTO(out, rc);
+
+ if (rc < sizeof(*tail)) {
+ CERROR("%s: invalid llog block at log id "DOSTID"/%u "
+ "offset "LPU64"\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, cur_offset);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ rec = buf;
+ if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
+ lustre_swab_llog_rec(rec);
+
+ tail = (struct llog_rec_tail *)((char *)buf + rc -
+ sizeof(struct llog_rec_tail));
+ /* get the last record in block */
+ last_rec = (struct llog_rec_hdr *)((char *)buf + rc -
+ le32_to_cpu(tail->lrt_len));
+
+ if (LLOG_REC_HDR_NEEDS_SWABBING(last_rec))
+ lustre_swab_llog_rec(last_rec);
+ LASSERT(last_rec->lrh_index == tail->lrt_index);
+
+ /* this shouldn't happen */
+ if (tail->lrt_index == 0) {
+ CERROR("%s: invalid llog tail at log id "DOSTID"/%u "
+ "offset "LPU64"\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ POSTID(&loghandle->lgh_id.lgl_oi),
+ loghandle->lgh_id.lgl_ogen, cur_offset);
+ GOTO(out, rc = -EINVAL);
+ }
+ if (tail->lrt_index < prev_idx)
+ continue;
+
+ /* sanity check that the start of the new buffer is no farther
+ * than the record that we wanted. This shouldn't happen. */
+ if (rec->lrh_index > prev_idx) {
+ CERROR("%s: missed desired record? %u > %u\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ rec->lrh_index, prev_idx);
+ GOTO(out, rc = -ENOENT);
+ }
+ GOTO(out, rc = 0);
+ }
+ GOTO(out, rc = -EIO);
+out:
+ return rc;
+}
+
+struct dt_object *llog_osd_dir_get(const struct lu_env *env,
+ struct llog_ctxt *ctxt)
+{
+ struct dt_device *dt;
+ struct dt_thread_info *dti = dt_info(env);
+ struct dt_object *dir;
+ int rc;
+
+ dt = ctxt->loc_exp->exp_obd->obd_lvfs_ctxt.dt;
+ if (ctxt->loc_dir == NULL) {
+ rc = dt_root_get(env, dt, &dti->dti_fid);
+ if (rc)
+ return ERR_PTR(rc);
+ dir = dt_locate(env, dt, &dti->dti_fid);
+ } else {
+ lu_object_get(&ctxt->loc_dir->do_lu);
+ dir = ctxt->loc_dir;
+ }
+
+ return dir;
+}
+
+static int llog_osd_open(const struct lu_env *env, struct llog_handle *handle,
+ struct llog_logid *logid, char *name,
+ enum llog_open_param open_param)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ struct llog_ctxt *ctxt = handle->lgh_ctxt;
+ struct dt_object *o;
+ struct dt_device *dt;
+ struct ls_device *ls;
+ struct local_oid_storage *los;
+ int rc = 0;
+
+ LASSERT(env);
+ LASSERT(ctxt);
+ LASSERT(ctxt->loc_exp);
+ LASSERT(ctxt->loc_exp->exp_obd);
+ dt = ctxt->loc_exp->exp_obd->obd_lvfs_ctxt.dt;
+ LASSERT(dt);
+
+ ls = ls_device_get(dt);
+ if (IS_ERR(ls))
+ return PTR_ERR(ls);
+
+ mutex_lock(&ls->ls_los_mutex);
+ los = dt_los_find(ls, name != NULL ? FID_SEQ_LLOG_NAME : FID_SEQ_LLOG);
+ mutex_unlock(&ls->ls_los_mutex);
+ LASSERT(los);
+ ls_device_put(env, ls);
+
+ LASSERT(handle);
+
+ if (logid != NULL) {
+ logid_to_fid(logid, &lgi->lgi_fid);
+ } else if (name) {
+ struct dt_object *llog_dir;
+
+ llog_dir = llog_osd_dir_get(env, ctxt);
+ if (IS_ERR(llog_dir))
+ GOTO(out, rc = PTR_ERR(llog_dir));
+ dt_read_lock(env, llog_dir, 0);
+ rc = dt_lookup_dir(env, llog_dir, name, &lgi->lgi_fid);
+ dt_read_unlock(env, llog_dir);
+ lu_object_put(env, &llog_dir->do_lu);
+ if (rc == -ENOENT && open_param == LLOG_OPEN_NEW) {
+ /* generate fid for new llog */
+ rc = local_object_fid_generate(env, los,
+ &lgi->lgi_fid);
+ }
+ if (rc < 0)
+ GOTO(out, rc);
+ OBD_ALLOC(handle->lgh_name, strlen(name) + 1);
+ if (handle->lgh_name)
+ strcpy(handle->lgh_name, name);
+ else
+ GOTO(out, rc = -ENOMEM);
+ } else {
+ LASSERTF(open_param & LLOG_OPEN_NEW, "%#x\n", open_param);
+ /* generate fid for new llog */
+ rc = local_object_fid_generate(env, los, &lgi->lgi_fid);
+ if (rc < 0)
+ GOTO(out, rc);
+ }
+
+ o = ls_locate(env, ls, &lgi->lgi_fid);
+ if (IS_ERR(o))
+ GOTO(out_name, rc = PTR_ERR(o));
+
+ /* No new llog is expected but doesn't exist */
+ if (open_param != LLOG_OPEN_NEW && !dt_object_exists(o))
+ GOTO(out_put, rc = -ENOENT);
+
+ fid_to_logid(&lgi->lgi_fid, &handle->lgh_id);
+ handle->lgh_obj = o;
+ handle->private_data = los;
+ LASSERT(handle->lgh_ctxt);
+
+ return rc;
+
+out_put:
+ lu_object_put(env, &o->do_lu);
+out_name:
+ if (handle->lgh_name != NULL)
+ OBD_FREE(handle->lgh_name, strlen(name) + 1);
+out:
+ dt_los_put(los);
+ return rc;
+}
+
+static int llog_osd_exist(struct llog_handle *handle)
+{
+ LASSERT(handle->lgh_obj);
+ return (dt_object_exists(handle->lgh_obj) &&
+ !lu_object_is_dying(handle->lgh_obj->do_lu.lo_header));
+}
+
+static int llog_osd_declare_create(const struct lu_env *env,
+ struct llog_handle *res, struct thandle *th)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ struct local_oid_storage *los;
+ struct dt_object *o;
+ int rc;
+
+ LASSERT(res->lgh_obj);
+ LASSERT(th);
+
+ /* object can be created by another thread */
+ o = res->lgh_obj;
+ if (dt_object_exists(o))
+ return 0;
+
+ los = res->private_data;
+ LASSERT(los);
+
+ rc = llog_osd_declare_new_object(env, los, o, th);
+ if (rc)
+ return rc;
+
+ rc = dt_declare_record_write(env, o, LLOG_CHUNK_SIZE, 0, th);
+ if (rc)
+ return rc;
+
+ if (res->lgh_name) {
+ struct dt_object *llog_dir;
+
+ llog_dir = llog_osd_dir_get(env, res->lgh_ctxt);
+ if (IS_ERR(llog_dir))
+ return PTR_ERR(llog_dir);
+ logid_to_fid(&res->lgh_id, &lgi->lgi_fid);
+ rc = dt_declare_insert(env, llog_dir,
+ (struct dt_rec *)&lgi->lgi_fid,
+ (struct dt_key *)res->lgh_name, th);
+ lu_object_put(env, &llog_dir->do_lu);
+ if (rc)
+ CERROR("%s: can't declare named llog %s: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ res->lgh_name, rc);
+ }
+ return rc;
+}
+
+/* This is a callback from the llog_* functions.
+ * Assumes caller has already pushed us into the kernel context. */
+static int llog_osd_create(const struct lu_env *env, struct llog_handle *res,
+ struct thandle *th)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ struct local_oid_storage *los;
+ struct dt_object *o;
+ int rc = 0;
+
+ LASSERT(env);
+ o = res->lgh_obj;
+ LASSERT(o);
+
+ /* llog can be already created */
+ if (dt_object_exists(o))
+ return -EEXIST;
+
+ los = res->private_data;
+ LASSERT(los);
+
+ dt_write_lock(env, o, 0);
+ if (!dt_object_exists(o))
+ rc = llog_osd_create_new_object(env, los, o, th);
+ else
+ rc = -EEXIST;
+
+ dt_write_unlock(env, o);
+ if (rc)
+ return rc;
+
+ if (res->lgh_name) {
+ struct dt_object *llog_dir;
+
+ llog_dir = llog_osd_dir_get(env, res->lgh_ctxt);
+ if (IS_ERR(llog_dir))
+ return PTR_ERR(llog_dir);
+
+ logid_to_fid(&res->lgh_id, &lgi->lgi_fid);
+ dt_read_lock(env, llog_dir, 0);
+ rc = dt_insert(env, llog_dir,
+ (struct dt_rec *)&lgi->lgi_fid,
+ (struct dt_key *)res->lgh_name,
+ th, BYPASS_CAPA, 1);
+ dt_read_unlock(env, llog_dir);
+ lu_object_put(env, &llog_dir->do_lu);
+ if (rc)
+ CERROR("%s: can't create named llog %s: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ res->lgh_name, rc);
+ }
+ return rc;
+}
+
+static int llog_osd_close(const struct lu_env *env, struct llog_handle *handle)
+{
+ struct local_oid_storage *los;
+ int rc = 0;
+
+ LASSERT(handle->lgh_obj);
+
+ lu_object_put(env, &handle->lgh_obj->do_lu);
+
+ los = handle->private_data;
+ LASSERT(los);
+ dt_los_put(los);
+
+ if (handle->lgh_name)
+ OBD_FREE(handle->lgh_name, strlen(handle->lgh_name) + 1);
+
+ return rc;
+}
+
+static int llog_osd_destroy(const struct lu_env *env,
+ struct llog_handle *loghandle)
+{
+ struct llog_ctxt *ctxt;
+ struct dt_object *o, *llog_dir = NULL;
+ struct dt_device *d;
+ struct thandle *th;
+ char *name = NULL;
+ int rc;
+
+ ctxt = loghandle->lgh_ctxt;
+ LASSERT(ctxt);
+
+ o = loghandle->lgh_obj;
+ LASSERT(o);
+
+ d = lu2dt_dev(o->do_lu.lo_dev);
+ LASSERT(d);
+ LASSERT(d == ctxt->loc_exp->exp_obd->obd_lvfs_ctxt.dt);
+
+ th = dt_trans_create(env, d);
+ if (IS_ERR(th))
+ return PTR_ERR(th);
+
+ if (loghandle->lgh_name) {
+ llog_dir = llog_osd_dir_get(env, ctxt);
+ if (IS_ERR(llog_dir))
+ GOTO(out_trans, rc = PTR_ERR(llog_dir));
+
+ name = loghandle->lgh_name;
+ rc = dt_declare_delete(env, llog_dir,
+ (struct dt_key *)name, th);
+ if (rc)
+ GOTO(out_trans, rc);
+ }
+
+ dt_declare_ref_del(env, o, th);
+
+ rc = dt_declare_destroy(env, o, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ rc = dt_trans_start_local(env, d, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ dt_write_lock(env, o, 0);
+ if (dt_object_exists(o)) {
+ if (name) {
+ dt_read_lock(env, llog_dir, 0);
+ rc = dt_delete(env, llog_dir,
+ (struct dt_key *) name,
+ th, BYPASS_CAPA);
+ dt_read_unlock(env, llog_dir);
+ if (rc) {
+ CERROR("%s: can't remove llog %s: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ name, rc);
+ GOTO(out_unlock, rc);
+ }
+ }
+ dt_ref_del(env, o, th);
+ rc = dt_destroy(env, o, th);
+ if (rc)
+ GOTO(out_unlock, rc);
+ }
+out_unlock:
+ dt_write_unlock(env, o);
+out_trans:
+ dt_trans_stop(env, d, th);
+ if (llog_dir != NULL)
+ lu_object_put(env, &llog_dir->do_lu);
+ return rc;
+}
+
+static int llog_osd_setup(const struct lu_env *env, struct obd_device *obd,
+ struct obd_llog_group *olg, int ctxt_idx,
+ struct obd_device *disk_obd)
+{
+ struct local_oid_storage *los;
+ struct llog_thread_info *lgi = llog_info(env);
+ struct llog_ctxt *ctxt;
+ int rc = 0;
+
+ LASSERT(obd);
+ LASSERT(olg->olg_ctxts[ctxt_idx]);
+
+ ctxt = llog_ctxt_get(olg->olg_ctxts[ctxt_idx]);
+ LASSERT(ctxt);
+
+ /* initialize data allowing to generate new fids,
+ * literally we need a sequece */
+ lgi->lgi_fid.f_seq = FID_SEQ_LLOG;
+ lgi->lgi_fid.f_oid = 1;
+ lgi->lgi_fid.f_ver = 0;
+ rc = local_oid_storage_init(env, disk_obd->obd_lvfs_ctxt.dt,
+ &lgi->lgi_fid, &los);
+ if (rc < 0)
+ return rc;
+
+ lgi->lgi_fid.f_seq = FID_SEQ_LLOG_NAME;
+ lgi->lgi_fid.f_oid = 1;
+ lgi->lgi_fid.f_ver = 0;
+ rc = local_oid_storage_init(env, disk_obd->obd_lvfs_ctxt.dt,
+ &lgi->lgi_fid, &los);
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+
+static int llog_osd_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
+{
+ struct dt_device *dt;
+ struct ls_device *ls;
+ struct local_oid_storage *los, *nlos;
+
+ LASSERT(ctxt->loc_exp->exp_obd);
+ dt = ctxt->loc_exp->exp_obd->obd_lvfs_ctxt.dt;
+ ls = ls_device_get(dt);
+ if (IS_ERR(ls))
+ return PTR_ERR(ls);
+
+ mutex_lock(&ls->ls_los_mutex);
+ los = dt_los_find(ls, FID_SEQ_LLOG);
+ nlos = dt_los_find(ls, FID_SEQ_LLOG_NAME);
+ mutex_unlock(&ls->ls_los_mutex);
+ if (los != NULL) {
+ dt_los_put(los);
+ local_oid_storage_fini(env, los);
+ }
+ if (nlos != NULL) {
+ dt_los_put(nlos);
+ local_oid_storage_fini(env, nlos);
+ }
+ ls_device_put(env, ls);
+ return 0;
+}
+
+struct llog_operations llog_osd_ops = {
+ .lop_next_block = llog_osd_next_block,
+ .lop_prev_block = llog_osd_prev_block,
+ .lop_read_header = llog_osd_read_header,
+ .lop_destroy = llog_osd_destroy,
+ .lop_setup = llog_osd_setup,
+ .lop_cleanup = llog_osd_cleanup,
+ .lop_open = llog_osd_open,
+ .lop_exist = llog_osd_exist,
+ .lop_declare_create = llog_osd_declare_create,
+ .lop_create = llog_osd_create,
+ .lop_declare_write_rec = llog_osd_declare_write_rec,
+ .lop_write_rec = llog_osd_write_rec,
+ .lop_close = llog_osd_close,
+};
+EXPORT_SYMBOL(llog_osd_ops);
+
+/* reads the catalog list */
+int llog_osd_get_cat_list(const struct lu_env *env, struct dt_device *d,
+ int idx, int count, struct llog_catid *idarray)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ struct dt_object *o = NULL;
+ struct thandle *th;
+ int rc, size;
+
+ LASSERT(d);
+
+ size = sizeof(*idarray) * count;
+ lgi->lgi_off = idx * sizeof(*idarray);
+
+ lu_local_obj_fid(&lgi->lgi_fid, LLOG_CATALOGS_OID);
+
+ o = dt_locate(env, d, &lgi->lgi_fid);
+ if (IS_ERR(o))
+ return PTR_ERR(o);
+
+ if (!dt_object_exists(o)) {
+ th = dt_trans_create(env, d);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
+
+ lgi->lgi_attr.la_valid = LA_MODE;
+ lgi->lgi_attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
+ lgi->lgi_dof.dof_type = dt_mode_to_dft(S_IFREG);
+
+ rc = dt_declare_create(env, o, &lgi->lgi_attr, NULL,
+ &lgi->lgi_dof, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ rc = dt_trans_start_local(env, d, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ dt_write_lock(env, o, 0);
+ if (!dt_object_exists(o))
+ rc = dt_create(env, o, &lgi->lgi_attr, NULL,
+ &lgi->lgi_dof, th);
+ dt_write_unlock(env, o);
+out_trans:
+ dt_trans_stop(env, d, th);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
+ if (rc)
+ GOTO(out, rc);
+
+ if (!S_ISREG(lgi->lgi_attr.la_mode)) {
+ CERROR("%s: CATALOGS is not a regular file!: mode = %o\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ lgi->lgi_attr.la_mode);
+ GOTO(out, rc = -ENOENT);
+ }
+
+ CDEBUG(D_CONFIG, "cat list: disk size=%d, read=%d\n",
+ (int)lgi->lgi_attr.la_size, size);
+
+ /* return just number of llogs */
+ if (idarray == NULL) {
+ rc = lgi->lgi_attr.la_size / sizeof(*idarray);
+ GOTO(out, rc);
+ }
+
+ /* read for new ost index or for empty file */
+ memset(idarray, 0, size);
+ if (lgi->lgi_attr.la_size < lgi->lgi_off + size)
+ GOTO(out, rc = 0);
+ if (lgi->lgi_attr.la_size < lgi->lgi_off + size)
+ size = lgi->lgi_attr.la_size - lgi->lgi_off;
+
+ lgi->lgi_buf.lb_buf = idarray;
+ lgi->lgi_buf.lb_len = size;
+ rc = dt_record_read(env, o, &lgi->lgi_buf, &lgi->lgi_off);
+ if (rc) {
+ CERROR("%s: error reading CATALOGS: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, rc);
+ GOTO(out, rc);
+ }
+
+out:
+ lu_object_put(env, &o->do_lu);
+ return rc;
+}
+EXPORT_SYMBOL(llog_osd_get_cat_list);
+
+/* writes the cat list */
+int llog_osd_put_cat_list(const struct lu_env *env, struct dt_device *d,
+ int idx, int count, struct llog_catid *idarray)
+{
+ struct llog_thread_info *lgi = llog_info(env);
+ struct dt_object *o = NULL;
+ struct thandle *th;
+ int rc, size;
+
+ if (!count)
+ return 0;
+
+ LASSERT(d);
+
+ size = sizeof(*idarray) * count;
+ lgi->lgi_off = idx * sizeof(*idarray);
+
+ lu_local_obj_fid(&lgi->lgi_fid, LLOG_CATALOGS_OID);
+
+ o = dt_locate(env, d, &lgi->lgi_fid);
+ if (IS_ERR(o))
+ return PTR_ERR(o);
+
+ if (!dt_object_exists(o))
+ GOTO(out, rc = -ENOENT);
+
+ rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
+ if (rc)
+ GOTO(out, rc);
+
+ if (!S_ISREG(lgi->lgi_attr.la_mode)) {
+ CERROR("%s: CATALOGS is not a regular file!: mode = %o\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ lgi->lgi_attr.la_mode);
+ GOTO(out, rc = -ENOENT);
+ }
+
+ th = dt_trans_create(env, d);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
+
+ rc = dt_declare_record_write(env, o, size, lgi->lgi_off, th);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = dt_trans_start_local(env, d, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ lgi->lgi_buf.lb_buf = idarray;
+ lgi->lgi_buf.lb_len = size;
+ rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
+ if (rc)
+ CDEBUG(D_INODE, "error writeing CATALOGS: rc = %d\n", rc);
+out_trans:
+ dt_trans_stop(env, d, th);
+out:
+ lu_object_put(env, &o->do_lu);
+ return rc;
+}
+EXPORT_SYMBOL(llog_osd_put_cat_list);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
new file mode 100644
index 0000000..24ca099
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -0,0 +1,413 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/llog_swab.c
+ *
+ * Swabbing of llog datatypes (from disk or over the wire).
+ *
+ * Author: jacob berkman <jacob@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+
+#include <lustre_log.h>
+
+static void print_llogd_body(struct llogd_body *d)
+{
+ CDEBUG(D_OTHER, "llogd body: %p\n", d);
+ CDEBUG(D_OTHER, "\tlgd_logid.lgl_oi: "DOSTID"\n",
+ POSTID(&d->lgd_logid.lgl_oi));
+ CDEBUG(D_OTHER, "\tlgd_logid.lgl_ogen: %#x\n", d->lgd_logid.lgl_ogen);
+ CDEBUG(D_OTHER, "\tlgd_ctxt_idx: %#x\n", d->lgd_ctxt_idx);
+ CDEBUG(D_OTHER, "\tlgd_llh_flags: %#x\n", d->lgd_llh_flags);
+ CDEBUG(D_OTHER, "\tlgd_index: %#x\n", d->lgd_index);
+ CDEBUG(D_OTHER, "\tlgd_saved_index: %#x\n", d->lgd_saved_index);
+ CDEBUG(D_OTHER, "\tlgd_len: %#x\n", d->lgd_len);
+ CDEBUG(D_OTHER, "\tlgd_cur_offset: "LPX64"\n", d->lgd_cur_offset);
+}
+
+void lustre_swab_lu_fid(struct lu_fid *fid)
+{
+ __swab64s (&fid->f_seq);
+ __swab32s (&fid->f_oid);
+ __swab32s (&fid->f_ver);
+}
+EXPORT_SYMBOL(lustre_swab_lu_fid);
+
+void lustre_swab_ost_id(struct ost_id *oid)
+{
+ if (fid_seq_is_mdt0(oid->oi.oi_seq)) {
+ __swab64s(&oid->oi.oi_id);
+ __swab64s(&oid->oi.oi_seq);
+ } else {
+ lustre_swab_lu_fid(&oid->oi_fid);
+ }
+}
+EXPORT_SYMBOL(lustre_swab_ost_id);
+
+void lustre_swab_llog_id(struct llog_logid *log_id)
+{
+ __swab64s(&log_id->lgl_oi.oi.oi_id);
+ __swab64s(&log_id->lgl_oi.oi.oi_seq);
+ __swab32s(&log_id->lgl_ogen);
+}
+EXPORT_SYMBOL(lustre_swab_llog_id);
+
+void lustre_swab_llogd_body (struct llogd_body *d)
+{
+ print_llogd_body(d);
+ lustre_swab_llog_id(&d->lgd_logid);
+ __swab32s (&d->lgd_ctxt_idx);
+ __swab32s (&d->lgd_llh_flags);
+ __swab32s (&d->lgd_index);
+ __swab32s (&d->lgd_saved_index);
+ __swab32s (&d->lgd_len);
+ __swab64s (&d->lgd_cur_offset);
+ print_llogd_body(d);
+}
+EXPORT_SYMBOL(lustre_swab_llogd_body);
+
+void lustre_swab_llogd_conn_body (struct llogd_conn_body *d)
+{
+ __swab64s (&d->lgdc_gen.mnt_cnt);
+ __swab64s (&d->lgdc_gen.conn_cnt);
+ lustre_swab_llog_id(&d->lgdc_logid);
+ __swab32s (&d->lgdc_ctxt_idx);
+}
+EXPORT_SYMBOL(lustre_swab_llogd_conn_body);
+
+void lustre_swab_ll_fid(struct ll_fid *fid)
+{
+ __swab64s (&fid->id);
+ __swab32s (&fid->generation);
+ __swab32s (&fid->f_type);
+}
+EXPORT_SYMBOL(lustre_swab_ll_fid);
+
+void lustre_swab_lu_seq_range(struct lu_seq_range *range)
+{
+ __swab64s (&range->lsr_start);
+ __swab64s (&range->lsr_end);
+ __swab32s (&range->lsr_index);
+ __swab32s (&range->lsr_flags);
+}
+EXPORT_SYMBOL(lustre_swab_lu_seq_range);
+
+void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
+{
+ struct llog_rec_tail *tail = NULL;
+
+ __swab32s(&rec->lrh_len);
+ __swab32s(&rec->lrh_index);
+ __swab32s(&rec->lrh_type);
+ __swab32s(&rec->lrh_id);
+
+ switch (rec->lrh_type) {
+ case OST_SZ_REC:
+ {
+ struct llog_size_change_rec *lsc =
+ (struct llog_size_change_rec *)rec;
+
+ lustre_swab_ll_fid(&lsc->lsc_fid);
+ __swab32s(&lsc->lsc_ioepoch);
+ tail = &lsc->lsc_tail;
+ break;
+ }
+ case MDS_UNLINK_REC:
+ {
+ struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec;
+
+ __swab64s(&lur->lur_oid);
+ __swab32s(&lur->lur_oseq);
+ __swab32s(&lur->lur_count);
+ tail = &lur->lur_tail;
+ break;
+ }
+ case MDS_UNLINK64_REC:
+ {
+ struct llog_unlink64_rec *lur =
+ (struct llog_unlink64_rec *)rec;
+
+ lustre_swab_lu_fid(&lur->lur_fid);
+ __swab32s(&lur->lur_count);
+ tail = &lur->lur_tail;
+ break;
+ }
+ case CHANGELOG_REC:
+ {
+ struct llog_changelog_rec *cr = (struct llog_changelog_rec*)rec;
+
+ __swab16s(&cr->cr.cr_namelen);
+ __swab16s(&cr->cr.cr_flags);
+ __swab32s(&cr->cr.cr_type);
+ __swab64s(&cr->cr.cr_index);
+ __swab64s(&cr->cr.cr_prev);
+ __swab64s(&cr->cr.cr_time);
+ lustre_swab_lu_fid(&cr->cr.cr_tfid);
+ lustre_swab_lu_fid(&cr->cr.cr_pfid);
+ if (CHANGELOG_REC_EXTENDED(&cr->cr)) {
+ struct llog_changelog_ext_rec *ext =
+ (struct llog_changelog_ext_rec *)rec;
+
+ lustre_swab_lu_fid(&ext->cr.cr_sfid);
+ lustre_swab_lu_fid(&ext->cr.cr_spfid);
+ tail = &ext->cr_tail;
+ } else {
+ tail = &cr->cr_tail;
+ }
+ break;
+ }
+ case CHANGELOG_USER_REC:
+ {
+ struct llog_changelog_user_rec *cur =
+ (struct llog_changelog_user_rec*)rec;
+
+ __swab32s(&cur->cur_id);
+ __swab64s(&cur->cur_endrec);
+ tail = &cur->cur_tail;
+ break;
+ }
+
+ case HSM_AGENT_REC: {
+ struct llog_agent_req_rec *arr =
+ (struct llog_agent_req_rec *)rec;
+
+ __swab32s(&arr->arr_hai.hai_len);
+ __swab32s(&arr->arr_hai.hai_action);
+ lustre_swab_lu_fid(&arr->arr_hai.hai_fid);
+ lustre_swab_lu_fid(&arr->arr_hai.hai_dfid);
+ __swab64s(&arr->arr_hai.hai_cookie);
+ __swab64s(&arr->arr_hai.hai_extent.offset);
+ __swab64s(&arr->arr_hai.hai_extent.length);
+ __swab64s(&arr->arr_hai.hai_gid);
+ /* no swabing for opaque data */
+ /* hai_data[0]; */
+ break;
+ }
+
+ case MDS_SETATTR64_REC:
+ {
+ struct llog_setattr64_rec *lsr =
+ (struct llog_setattr64_rec *)rec;
+
+ lustre_swab_ost_id(&lsr->lsr_oi);
+ __swab32s(&lsr->lsr_uid);
+ __swab32s(&lsr->lsr_uid_h);
+ __swab32s(&lsr->lsr_gid);
+ __swab32s(&lsr->lsr_gid_h);
+ tail = &lsr->lsr_tail;
+ break;
+ }
+ case OBD_CFG_REC:
+ /* these are swabbed as they are consumed */
+ break;
+ case LLOG_HDR_MAGIC:
+ {
+ struct llog_log_hdr *llh = (struct llog_log_hdr *)rec;
+
+ __swab64s(&llh->llh_timestamp);
+ __swab32s(&llh->llh_count);
+ __swab32s(&llh->llh_bitmap_offset);
+ __swab32s(&llh->llh_flags);
+ __swab32s(&llh->llh_size);
+ __swab32s(&llh->llh_cat_idx);
+ tail = &llh->llh_tail;
+ break;
+ }
+ case LLOG_LOGID_MAGIC:
+ {
+ struct llog_logid_rec *lid = (struct llog_logid_rec *)rec;
+
+ lustre_swab_llog_id(&lid->lid_id);
+ tail = &lid->lid_tail;
+ break;
+ }
+ case LLOG_GEN_REC:
+ {
+ struct llog_gen_rec *lgr = (struct llog_gen_rec *)rec;
+
+ __swab64s(&lgr->lgr_gen.mnt_cnt);
+ __swab64s(&lgr->lgr_gen.conn_cnt);
+ tail = &lgr->lgr_tail;
+ break;
+ }
+ case LLOG_PAD_MAGIC:
+ break;
+ default:
+ CERROR("Unknown llog rec type %#x swabbing rec %p\n",
+ rec->lrh_type, rec);
+ }
+
+ if (tail) {
+ __swab32s(&tail->lrt_len);
+ __swab32s(&tail->lrt_index);
+ }
+}
+EXPORT_SYMBOL(lustre_swab_llog_rec);
+
+static void print_llog_hdr(struct llog_log_hdr *h)
+{
+ CDEBUG(D_OTHER, "llog header: %p\n", h);
+ CDEBUG(D_OTHER, "\tllh_hdr.lrh_index: %#x\n", h->llh_hdr.lrh_index);
+ CDEBUG(D_OTHER, "\tllh_hdr.lrh_len: %#x\n", h->llh_hdr.lrh_len);
+ CDEBUG(D_OTHER, "\tllh_hdr.lrh_type: %#x\n", h->llh_hdr.lrh_type);
+ CDEBUG(D_OTHER, "\tllh_timestamp: "LPX64"\n", h->llh_timestamp);
+ CDEBUG(D_OTHER, "\tllh_count: %#x\n", h->llh_count);
+ CDEBUG(D_OTHER, "\tllh_bitmap_offset: %#x\n", h->llh_bitmap_offset);
+ CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags);
+ CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size);
+ CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx);
+ CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n", h->llh_tail.lrt_index);
+ CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", h->llh_tail.lrt_len);
+}
+
+void lustre_swab_llog_hdr (struct llog_log_hdr *h)
+{
+ print_llog_hdr(h);
+
+ lustre_swab_llog_rec(&h->llh_hdr);
+
+ print_llog_hdr(h);
+}
+EXPORT_SYMBOL(lustre_swab_llog_hdr);
+
+static void print_lustre_cfg(struct lustre_cfg *lcfg)
+{
+ int i;
+
+ if (!(libcfs_debug & D_OTHER)) /* don't loop on nothing */
+ return;
+ CDEBUG(D_OTHER, "lustre_cfg: %p\n", lcfg);
+ CDEBUG(D_OTHER, "\tlcfg->lcfg_version: %#x\n", lcfg->lcfg_version);
+
+ CDEBUG(D_OTHER, "\tlcfg->lcfg_command: %#x\n", lcfg->lcfg_command);
+ CDEBUG(D_OTHER, "\tlcfg->lcfg_num: %#x\n", lcfg->lcfg_num);
+ CDEBUG(D_OTHER, "\tlcfg->lcfg_flags: %#x\n", lcfg->lcfg_flags);
+ CDEBUG(D_OTHER, "\tlcfg->lcfg_nid: %s\n", libcfs_nid2str(lcfg->lcfg_nid));
+
+ CDEBUG(D_OTHER, "\tlcfg->lcfg_bufcount: %d\n", lcfg->lcfg_bufcount);
+ if (lcfg->lcfg_bufcount < LUSTRE_CFG_MAX_BUFCOUNT)
+ for (i = 0; i < lcfg->lcfg_bufcount; i++)
+ CDEBUG(D_OTHER, "\tlcfg->lcfg_buflens[%d]: %d\n",
+ i, lcfg->lcfg_buflens[i]);
+}
+
+void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg)
+{
+ int i;
+
+ __swab32s(&lcfg->lcfg_version);
+
+ if (lcfg->lcfg_version != LUSTRE_CFG_VERSION) {
+ CERROR("not swabbing lustre_cfg version %#x (expecting %#x)\n",
+ lcfg->lcfg_version, LUSTRE_CFG_VERSION);
+ return;
+ }
+
+ __swab32s(&lcfg->lcfg_command);
+ __swab32s(&lcfg->lcfg_num);
+ __swab32s(&lcfg->lcfg_flags);
+ __swab64s(&lcfg->lcfg_nid);
+ __swab32s(&lcfg->lcfg_bufcount);
+ for (i = 0; i < lcfg->lcfg_bufcount && i < LUSTRE_CFG_MAX_BUFCOUNT; i++)
+ __swab32s(&lcfg->lcfg_buflens[i]);
+
+ print_lustre_cfg(lcfg);
+ return;
+}
+EXPORT_SYMBOL(lustre_swab_lustre_cfg);
+
+/* used only for compatibility with old on-disk cfg_marker data */
+struct cfg_marker32 {
+ __u32 cm_step;
+ __u32 cm_flags;
+ __u32 cm_vers;
+ __u32 padding;
+ __u32 cm_createtime;
+ __u32 cm_canceltime;
+ char cm_tgtname[MTI_NAME_MAXLEN];
+ char cm_comment[MTI_NAME_MAXLEN];
+};
+
+#define MTI_NAMELEN32 (MTI_NAME_MAXLEN - \
+ (sizeof(struct cfg_marker) - sizeof(struct cfg_marker32)))
+
+void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
+{
+ struct cfg_marker32 *cm32 = (struct cfg_marker32*)marker;
+
+ if (swab) {
+ __swab32s(&marker->cm_step);
+ __swab32s(&marker->cm_flags);
+ __swab32s(&marker->cm_vers);
+ }
+ if (size == sizeof(*cm32)) {
+ __u32 createtime, canceltime;
+ /* There was a problem with the original declaration of
+ * cfg_marker on 32-bit systems because it used time_t as
+ * a wire protocol structure, and didn't verify this in
+ * wirecheck. We now have to convert the offsets of the
+ * later fields in order to work on 32- and 64-bit systems.
+ *
+ * Fortunately, the cm_comment field has no functional use
+ * so can be sacrificed when converting the timestamp size.
+ *
+ * Overwrite fields from the end first, so they are not
+ * clobbered, and use memmove() instead of memcpy() because
+ * the source and target buffers overlap. bug 16771 */
+ createtime = cm32->cm_createtime;
+ canceltime = cm32->cm_canceltime;
+ memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32);
+ marker->cm_comment[MTI_NAMELEN32 - 1] = '\0';
+ memmove(marker->cm_tgtname, cm32->cm_tgtname,
+ sizeof(marker->cm_tgtname));
+ if (swab) {
+ __swab32s(&createtime);
+ __swab32s(&canceltime);
+ }
+ marker->cm_createtime = createtime;
+ marker->cm_canceltime = canceltime;
+ CDEBUG(D_CONFIG, "Find old cfg_marker(Srv32b,Clt64b) "
+ "for target %s, converting\n",
+ marker->cm_tgtname);
+ } else if (swab) {
+ __swab64s(&marker->cm_createtime);
+ __swab64s(&marker->cm_canceltime);
+ }
+
+ return;
+}
+EXPORT_SYMBOL(lustre_swab_cfg_marker);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_test.c b/drivers/staging/lustre/lustre/obdclass/llog_test.c
new file mode 100644
index 0000000..d9e6d12
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/llog_test.c
@@ -0,0 +1,1068 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/llog_test.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Mikhail Pershin <mike.pershin@intel.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <obd_class.h>
+#include <lustre_fid.h>
+#include <lustre_log.h>
+
+/* This is slightly more than the number of records that can fit into a
+ * single llog file, because the llog_log_header takes up some of the
+ * space in the first block that cannot be used for the bitmap. */
+#define LLOG_TEST_RECNUM (LLOG_CHUNK_SIZE * 8)
+
+static int llog_test_rand;
+static struct obd_uuid uuid = { .uuid = "test_uuid" };
+static struct llog_logid cat_logid;
+
+struct llog_mini_rec {
+ struct llog_rec_hdr lmr_hdr;
+ struct llog_rec_tail lmr_tail;
+} __attribute__((packed));
+
+static int verify_handle(char *test, struct llog_handle *llh, int num_recs)
+{
+ int i;
+ int last_idx = 0;
+ int active_recs = 0;
+
+ for (i = 0; i < LLOG_BITMAP_BYTES * 8; i++) {
+ if (ext2_test_bit(i, llh->lgh_hdr->llh_bitmap)) {
+ last_idx = i;
+ active_recs++;
+ }
+ }
+
+ if (active_recs != num_recs) {
+ CERROR("%s: expected %d active recs after write, found %d\n",
+ test, num_recs, active_recs);
+ return -ERANGE;
+ }
+
+ if (llh->lgh_hdr->llh_count != num_recs) {
+ CERROR("%s: handle->count is %d, expected %d after write\n",
+ test, llh->lgh_hdr->llh_count, num_recs);
+ return -ERANGE;
+ }
+
+ if (llh->lgh_last_idx < last_idx) {
+ CERROR("%s: handle->last_idx is %d, expected %d after write\n",
+ test, llh->lgh_last_idx, last_idx);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/* Test named-log create/open, close */
+static int llog_test_1(const struct lu_env *env,
+ struct obd_device *obd, char *name)
+{
+ struct llog_handle *llh;
+ struct llog_ctxt *ctxt;
+ int rc;
+ int rc2;
+
+ CWARN("1a: create a log with name: %s\n", name);
+ ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
+ LASSERT(ctxt);
+
+ rc = llog_open_create(env, ctxt, &llh, NULL, name);
+ if (rc) {
+ CERROR("1a: llog_create with name %s failed: %d\n", name, rc);
+ GOTO(out, rc);
+ }
+ rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, &uuid);
+ if (rc) {
+ CERROR("1a: can't init llog handle: %d\n", rc);
+ GOTO(out_close, rc);
+ }
+
+ rc = verify_handle("1", llh, 1);
+
+ CWARN("1b: close newly-created log\n");
+out_close:
+ rc2 = llog_close(env, llh);
+ if (rc2) {
+ CERROR("1b: close log %s failed: %d\n", name, rc2);
+ if (rc == 0)
+ rc = rc2;
+ }
+out:
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+
+/* Test named-log reopen; returns opened log on success */
+static int llog_test_2(const struct lu_env *env, struct obd_device *obd,
+ char *name, struct llog_handle **llh)
+{
+ struct llog_ctxt *ctxt;
+ struct llog_handle *loghandle;
+ struct llog_logid logid;
+ int rc;
+
+ CWARN("2a: re-open a log with name: %s\n", name);
+ ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
+ LASSERT(ctxt);
+
+ rc = llog_open(env, ctxt, llh, NULL, name, LLOG_OPEN_EXISTS);
+ if (rc) {
+ CERROR("2a: re-open log with name %s failed: %d\n", name, rc);
+ GOTO(out_put, rc);
+ }
+
+ rc = llog_init_handle(env, *llh, LLOG_F_IS_PLAIN, &uuid);
+ if (rc) {
+ CERROR("2a: can't init llog handle: %d\n", rc);
+ GOTO(out_close_llh, rc);
+ }
+
+ rc = verify_handle("2", *llh, 1);
+ if (rc)
+ GOTO(out_close_llh, rc);
+
+ /* XXX: there is known issue with tests 2b, MGS is not able to create
+ * anonymous llog, exit now to allow following tests run.
+ * It is fixed in upcoming llog over OSD code */
+ GOTO(out_put, rc);
+
+ CWARN("2b: create a log without specified NAME & LOGID\n");
+ rc = llog_open_create(env, ctxt, &loghandle, NULL, NULL);
+ if (rc) {
+ CERROR("2b: create log failed\n");
+ GOTO(out_close_llh, rc);
+ }
+ rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN, &uuid);
+ if (rc) {
+ CERROR("2b: can't init llog handle: %d\n", rc);
+ GOTO(out_close, rc);
+ }
+
+ logid = loghandle->lgh_id;
+ llog_close(env, loghandle);
+
+ CWARN("2c: re-open the log by LOGID\n");
+ rc = llog_open(env, ctxt, &loghandle, &logid, NULL, LLOG_OPEN_EXISTS);
+ if (rc) {
+ CERROR("2c: re-open log by LOGID failed\n");
+ GOTO(out_close_llh, rc);
+ }
+
+ rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN, &uuid);
+ if (rc) {
+ CERROR("2c: can't init llog handle: %d\n", rc);
+ GOTO(out_close, rc);
+ }
+
+ CWARN("2b: destroy this log\n");
+ rc = llog_destroy(env, loghandle);
+ if (rc)
+ CERROR("2d: destroy log failed\n");
+out_close:
+ llog_close(env, loghandle);
+out_close_llh:
+ if (rc)
+ llog_close(env, *llh);
+out_put:
+ llog_ctxt_put(ctxt);
+
+ return rc;
+}
+
+/* Test record writing, single and in bulk */
+static int llog_test_3(const struct lu_env *env, struct obd_device *obd,
+ struct llog_handle *llh)
+{
+ struct llog_gen_rec lgr;
+ int rc, i;
+ int num_recs = 1; /* 1 for the header */
+
+ lgr.lgr_hdr.lrh_len = lgr.lgr_tail.lrt_len = sizeof(lgr);
+ lgr.lgr_hdr.lrh_type = LLOG_GEN_REC;
+
+ CWARN("3a: write one create_rec\n");
+ rc = llog_write(env, llh, &lgr.lgr_hdr, NULL, 0, NULL, -1);
+ num_recs++;
+ if (rc < 0) {
+ CERROR("3a: write one log record failed: %d\n", rc);
+ return rc;
+ }
+
+ rc = verify_handle("3a", llh, num_recs);
+ if (rc)
+ return rc;
+
+ CWARN("3b: write 10 cfg log records with 8 bytes bufs\n");
+ for (i = 0; i < 10; i++) {
+ struct llog_rec_hdr hdr;
+ char buf[8];
+
+ hdr.lrh_len = 8;
+ hdr.lrh_type = OBD_CFG_REC;
+ memset(buf, 0, sizeof buf);
+ rc = llog_write(env, llh, &hdr, NULL, 0, buf, -1);
+ if (rc < 0) {
+ CERROR("3b: write 10 records failed at #%d: %d\n",
+ i + 1, rc);
+ return rc;
+ }
+ num_recs++;
+ }
+
+ rc = verify_handle("3b", llh, num_recs);
+ if (rc)
+ return rc;
+
+ CWARN("3c: write 1000 more log records\n");
+ for (i = 0; i < 1000; i++) {
+ rc = llog_write(env, llh, &lgr.lgr_hdr, NULL, 0, NULL, -1);
+ if (rc < 0) {
+ CERROR("3c: write 1000 records failed at #%d: %d\n",
+ i + 1, rc);
+ return rc;
+ }
+ num_recs++;
+ }
+
+ rc = verify_handle("3c", llh, num_recs);
+ if (rc)
+ return rc;
+
+ CWARN("3d: write log more than BITMAP_SIZE, return -ENOSPC\n");
+ for (i = 0; i < LLOG_BITMAP_SIZE(llh->lgh_hdr) + 1; i++) {
+ struct llog_rec_hdr hdr;
+ char buf_even[24];
+ char buf_odd[32];
+
+ memset(buf_odd, 0, sizeof buf_odd);
+ memset(buf_even, 0, sizeof buf_even);
+ if ((i % 2) == 0) {
+ hdr.lrh_len = 24;
+ hdr.lrh_type = OBD_CFG_REC;
+ rc = llog_write(env, llh, &hdr, NULL, 0, buf_even, -1);
+ } else {
+ hdr.lrh_len = 32;
+ hdr.lrh_type = OBD_CFG_REC;
+ rc = llog_write(env, llh, &hdr, NULL, 0, buf_odd, -1);
+ }
+ if (rc == -ENOSPC) {
+ break;
+ } else if (rc < 0) {
+ CERROR("3d: write recs failed at #%d: %d\n",
+ i + 1, rc);
+ return rc;
+ }
+ num_recs++;
+ }
+ if (rc != -ENOSPC) {
+ CWARN("3d: write record more than BITMAP size!\n");
+ return -EINVAL;
+ }
+ CWARN("3d: wrote %d more records before end of llog is reached\n",
+ num_recs);
+
+ rc = verify_handle("3d", llh, num_recs);
+
+ return rc;
+}
+
+/* Test catalogue additions */
+static int llog_test_4(const struct lu_env *env, struct obd_device *obd)
+{
+ struct llog_handle *cath;
+ char name[10];
+ int rc, rc2, i, buflen;
+ struct llog_mini_rec lmr;
+ struct llog_cookie cookie;
+ struct llog_ctxt *ctxt;
+ int num_recs = 0;
+ char *buf;
+ struct llog_rec_hdr rec;
+
+ ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
+ LASSERT(ctxt);
+
+ lmr.lmr_hdr.lrh_len = lmr.lmr_tail.lrt_len = LLOG_MIN_REC_SIZE;
+ lmr.lmr_hdr.lrh_type = 0xf00f00;
+
+ sprintf(name, "%x", llog_test_rand + 1);
+ CWARN("4a: create a catalog log with name: %s\n", name);
+ rc = llog_open_create(env, ctxt, &cath, NULL, name);
+ if (rc) {
+ CERROR("4a: llog_create with name %s failed: %d\n", name, rc);
+ GOTO(ctxt_release, rc);
+ }
+ rc = llog_init_handle(env, cath, LLOG_F_IS_CAT, &uuid);
+ if (rc) {
+ CERROR("4a: can't init llog handle: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ num_recs++;
+ cat_logid = cath->lgh_id;
+
+ CWARN("4b: write 1 record into the catalog\n");
+ rc = llog_cat_add(env, cath, &lmr.lmr_hdr, &cookie, NULL);
+ if (rc != 1) {
+ CERROR("4b: write 1 catalog record failed at: %d\n", rc);
+ GOTO(out, rc);
+ }
+ num_recs++;
+ rc = verify_handle("4b", cath, 2);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = verify_handle("4b", cath->u.chd.chd_current_log, num_recs);
+ if (rc)
+ GOTO(out, rc);
+
+ CWARN("4c: cancel 1 log record\n");
+ rc = llog_cat_cancel_records(env, cath, 1, &cookie);
+ if (rc) {
+ CERROR("4c: cancel 1 catalog based record failed: %d\n", rc);
+ GOTO(out, rc);
+ }
+ num_recs--;
+
+ rc = verify_handle("4c", cath->u.chd.chd_current_log, num_recs);
+ if (rc)
+ GOTO(out, rc);
+
+ CWARN("4d: write %d more log records\n", LLOG_TEST_RECNUM);
+ for (i = 0; i < LLOG_TEST_RECNUM; i++) {
+ rc = llog_cat_add(env, cath, &lmr.lmr_hdr, NULL, NULL);
+ if (rc) {
+ CERROR("4d: write %d records failed at #%d: %d\n",
+ LLOG_TEST_RECNUM, i + 1, rc);
+ GOTO(out, rc);
+ }
+ num_recs++;
+ }
+
+ /* make sure new plain llog appears */
+ rc = verify_handle("4d", cath, 3);
+ if (rc)
+ GOTO(out, rc);
+
+ CWARN("4e: add 5 large records, one record per block\n");
+ buflen = LLOG_CHUNK_SIZE - sizeof(struct llog_rec_hdr) -
+ sizeof(struct llog_rec_tail);
+ OBD_ALLOC(buf, buflen);
+ if (buf == NULL)
+ GOTO(out, rc = -ENOMEM);
+ for (i = 0; i < 5; i++) {
+ rec.lrh_len = buflen;
+ rec.lrh_type = OBD_CFG_REC;
+ rc = llog_cat_add(env, cath, &rec, NULL, buf);
+ if (rc) {
+ CERROR("4e: write 5 records failed at #%d: %d\n",
+ i + 1, rc);
+ GOTO(out_free, rc);
+ }
+ num_recs++;
+ }
+out_free:
+ OBD_FREE(buf, buflen);
+out:
+ CWARN("4f: put newly-created catalog\n");
+ rc2 = llog_cat_close(env, cath);
+ if (rc2) {
+ CERROR("4: close log %s failed: %d\n", name, rc2);
+ if (rc == 0)
+ rc = rc2;
+ }
+ctxt_release:
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+
+static int cat_counter;
+
+static int cat_print_cb(const struct lu_env *env, struct llog_handle *llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
+ struct lu_fid fid = {0};
+
+ if (rec->lrh_type != LLOG_LOGID_MAGIC) {
+ CERROR("invalid record in catalog\n");
+ return -EINVAL;
+ }
+
+ logid_to_fid(&lir->lid_id, &fid);
+
+ CWARN("seeing record at index %d - "DFID" in log "DFID"\n",
+ rec->lrh_index, PFID(&fid),
+ PFID(lu_object_fid(&llh->lgh_obj->do_lu)));
+
+ cat_counter++;
+
+ return 0;
+}
+
+static int plain_counter;
+
+static int plain_print_cb(const struct lu_env *env, struct llog_handle *llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct lu_fid fid = {0};
+
+ if (!(llh->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)) {
+ CERROR("log is not plain\n");
+ return -EINVAL;
+ }
+
+ logid_to_fid(&llh->lgh_id, &fid);
+
+ CDEBUG(D_INFO, "seeing record at index %d in log "DFID"\n",
+ rec->lrh_index, PFID(&fid));
+
+ plain_counter++;
+
+ return 0;
+}
+
+static int cancel_count;
+
+static int llog_cancel_rec_cb(const struct lu_env *env,
+ struct llog_handle *llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct llog_cookie cookie;
+
+ if (!(llh->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)) {
+ CERROR("log is not plain\n");
+ return -EINVAL;
+ }
+
+ cookie.lgc_lgl = llh->lgh_id;
+ cookie.lgc_index = rec->lrh_index;
+
+ llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle, 1, &cookie);
+ cancel_count++;
+ if (cancel_count == LLOG_TEST_RECNUM)
+ return -LLOG_EEMPTY;
+ return 0;
+}
+
+/* Test log and catalogue processing */
+static int llog_test_5(const struct lu_env *env, struct obd_device *obd)
+{
+ struct llog_handle *llh = NULL;
+ char name[10];
+ int rc, rc2;
+ struct llog_mini_rec lmr;
+ struct llog_ctxt *ctxt;
+
+ ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
+ LASSERT(ctxt);
+
+ lmr.lmr_hdr.lrh_len = lmr.lmr_tail.lrt_len = LLOG_MIN_REC_SIZE;
+ lmr.lmr_hdr.lrh_type = 0xf00f00;
+
+ CWARN("5a: re-open catalog by id\n");
+ rc = llog_open(env, ctxt, &llh, &cat_logid, NULL, LLOG_OPEN_EXISTS);
+ if (rc) {
+ CERROR("5a: llog_create with logid failed: %d\n", rc);
+ GOTO(out_put, rc);
+ }
+
+ rc = llog_init_handle(env, llh, LLOG_F_IS_CAT, &uuid);
+ if (rc) {
+ CERROR("5a: can't init llog handle: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ CWARN("5b: print the catalog entries.. we expect 2\n");
+ cat_counter = 0;
+ rc = llog_process(env, llh, cat_print_cb, "test 5", NULL);
+ if (rc) {
+ CERROR("5b: process with cat_print_cb failed: %d\n", rc);
+ GOTO(out, rc);
+ }
+ if (cat_counter != 2) {
+ CERROR("5b: %d entries in catalog\n", cat_counter);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ CWARN("5c: Cancel %d records, see one log zapped\n", LLOG_TEST_RECNUM);
+ cancel_count = 0;
+ rc = llog_cat_process(env, llh, llog_cancel_rec_cb, "foobar", 0, 0);
+ if (rc != -LLOG_EEMPTY) {
+ CERROR("5c: process with cat_cancel_cb failed: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ CWARN("5c: print the catalog entries.. we expect 1\n");
+ cat_counter = 0;
+ rc = llog_process(env, llh, cat_print_cb, "test 5", NULL);
+ if (rc) {
+ CERROR("5c: process with cat_print_cb failed: %d\n", rc);
+ GOTO(out, rc);
+ }
+ if (cat_counter != 1) {
+ CERROR("5c: %d entries in catalog\n", cat_counter);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ CWARN("5d: add 1 record to the log with many canceled empty pages\n");
+ rc = llog_cat_add(env, llh, &lmr.lmr_hdr, NULL, NULL);
+ if (rc) {
+ CERROR("5d: add record to the log with many canceled empty "
+ "pages failed\n");
+ GOTO(out, rc);
+ }
+
+ CWARN("5e: print plain log entries.. expect 6\n");
+ plain_counter = 0;
+ rc = llog_cat_process(env, llh, plain_print_cb, "foobar", 0, 0);
+ if (rc) {
+ CERROR("5e: process with plain_print_cb failed: %d\n", rc);
+ GOTO(out, rc);
+ }
+ if (plain_counter != 6) {
+ CERROR("5e: found %d records\n", plain_counter);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ CWARN("5f: print plain log entries reversely.. expect 6\n");
+ plain_counter = 0;
+ rc = llog_cat_reverse_process(env, llh, plain_print_cb, "foobar");
+ if (rc) {
+ CERROR("5f: reversely process with plain_print_cb failed:"
+ "%d\n", rc);
+ GOTO(out, rc);
+ }
+ if (plain_counter != 6) {
+ CERROR("5f: found %d records\n", plain_counter);
+ GOTO(out, rc = -EINVAL);
+ }
+
+out:
+ CWARN("5g: close re-opened catalog\n");
+ rc2 = llog_cat_close(env, llh);
+ if (rc2) {
+ CERROR("5g: close log %s failed: %d\n", name, rc2);
+ if (rc == 0)
+ rc = rc2;
+ }
+out_put:
+ llog_ctxt_put(ctxt);
+
+ return rc;
+}
+
+/* Test client api; open log by name and process */
+static int llog_test_6(const struct lu_env *env, struct obd_device *obd,
+ char *name)
+{
+ struct obd_device *mgc_obd;
+ struct llog_ctxt *ctxt;
+ struct obd_uuid *mgs_uuid;
+ struct obd_export *exp;
+ struct obd_uuid uuid = { "LLOG_TEST6_UUID" };
+ struct llog_handle *llh = NULL;
+ struct llog_ctxt *nctxt;
+ int rc, rc2;
+
+ ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
+ LASSERT(ctxt);
+ mgs_uuid = &ctxt->loc_exp->exp_obd->obd_uuid;
+
+ CWARN("6a: re-open log %s using client API\n", name);
+ mgc_obd = class_find_client_obd(mgs_uuid, LUSTRE_MGC_NAME, NULL);
+ if (mgc_obd == NULL) {
+ CERROR("6a: no MGC devices connected to %s found.\n",
+ mgs_uuid->uuid);
+ GOTO(ctxt_release, rc = -ENOENT);
+ }
+
+ rc = obd_connect(NULL, &exp, mgc_obd, &uuid,
+ NULL /* obd_connect_data */, NULL);
+ if (rc != -EALREADY) {
+ CERROR("6a: connect on connected MGC (%s) failed to return"
+ " -EALREADY", mgc_obd->obd_name);
+ if (rc == 0)
+ obd_disconnect(exp);
+ GOTO(ctxt_release, rc = -EINVAL);
+ }
+
+ nctxt = llog_get_context(mgc_obd, LLOG_CONFIG_REPL_CTXT);
+ rc = llog_open(env, nctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
+ if (rc) {
+ CERROR("6a: llog_open failed %d\n", rc);
+ GOTO(nctxt_put, rc);
+ }
+
+ rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc) {
+ CERROR("6a: llog_init_handle failed %d\n", rc);
+ GOTO(parse_out, rc);
+ }
+
+ plain_counter = 1; /* llog header is first record */
+ CWARN("6b: process log %s using client API\n", name);
+ rc = llog_process(env, llh, plain_print_cb, NULL, NULL);
+ if (rc)
+ CERROR("6b: llog_process failed %d\n", rc);
+ CWARN("6b: processed %d records\n", plain_counter);
+
+ rc = verify_handle("6b", llh, plain_counter);
+ if (rc)
+ GOTO(parse_out, rc);
+
+ plain_counter = 1; /* llog header is first record */
+ CWARN("6c: process log %s reversely using client API\n", name);
+ rc = llog_reverse_process(env, llh, plain_print_cb, NULL, NULL);
+ if (rc)
+ CERROR("6c: llog_reverse_process failed %d\n", rc);
+ CWARN("6c: processed %d records\n", plain_counter);
+
+ rc = verify_handle("6c", llh, plain_counter);
+ if (rc)
+ GOTO(parse_out, rc);
+
+parse_out:
+ rc2 = llog_close(env, llh);
+ if (rc2) {
+ CERROR("6: llog_close failed: rc = %d\n", rc2);
+ if (rc == 0)
+ rc = rc2;
+ }
+nctxt_put:
+ llog_ctxt_put(nctxt);
+ctxt_release:
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+
+static union {
+ struct llog_rec_hdr lrh; /* common header */
+ struct llog_logid_rec llr; /* LLOG_LOGID_MAGIC */
+ struct llog_unlink64_rec lur; /* MDS_UNLINK64_REC */
+ struct llog_setattr64_rec lsr64; /* MDS_SETATTR64_REC */
+ struct llog_size_change_rec lscr; /* OST_SZ_REC */
+ struct llog_changelog_rec lcr; /* CHANGELOG_REC */
+ struct llog_changelog_user_rec lcur; /* CHANGELOG_USER_REC */
+ struct llog_gen_rec lgr; /* LLOG_GEN_REC */
+} llog_records;
+
+static int test_7_print_cb(const struct lu_env *env, struct llog_handle *llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct lu_fid fid = {0};
+
+ logid_to_fid(&llh->lgh_id, &fid);
+
+ CDEBUG(D_OTHER, "record type %#x at index %d in log "DFID"\n",
+ rec->lrh_type, rec->lrh_index, PFID(&fid));
+
+ plain_counter++;
+ return 0;
+}
+
+static int test_7_cancel_cb(const struct lu_env *env, struct llog_handle *llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ plain_counter++;
+ /* test LLOG_DEL_RECORD is working */
+ return LLOG_DEL_RECORD;
+}
+
+static int llog_test_7_sub(const struct lu_env *env, struct llog_ctxt *ctxt)
+{
+ struct llog_handle *llh;
+ int rc = 0, i, process_count;
+ int num_recs = 0;
+
+ rc = llog_open_create(env, ctxt, &llh, NULL, NULL);
+ if (rc) {
+ CERROR("7_sub: create log failed\n");
+ return rc;
+ }
+
+ rc = llog_init_handle(env, llh,
+ LLOG_F_IS_PLAIN | LLOG_F_ZAP_WHEN_EMPTY,
+ &uuid);
+ if (rc) {
+ CERROR("7_sub: can't init llog handle: %d\n", rc);
+ GOTO(out_close, rc);
+ }
+ for (i = 0; i < LLOG_BITMAP_SIZE(llh->lgh_hdr); i++) {
+ rc = llog_write(env, llh, &llog_records.lrh, NULL, 0,
+ NULL, -1);
+ if (rc == -ENOSPC) {
+ break;
+ } else if (rc < 0) {
+ CERROR("7_sub: write recs failed at #%d: %d\n",
+ i + 1, rc);
+ GOTO(out_close, rc);
+ }
+ num_recs++;
+ }
+ if (rc != -ENOSPC) {
+ CWARN("7_sub: write record more than BITMAP size!\n");
+ GOTO(out_close, rc = -EINVAL);
+ }
+
+ rc = verify_handle("7_sub", llh, num_recs + 1);
+ if (rc) {
+ CERROR("7_sub: verify handle failed: %d\n", rc);
+ GOTO(out_close, rc);
+ }
+ if (num_recs < LLOG_BITMAP_SIZE(llh->lgh_hdr) - 1)
+ CWARN("7_sub: records are not aligned, written %d from %u\n",
+ num_recs, LLOG_BITMAP_SIZE(llh->lgh_hdr) - 1);
+
+ plain_counter = 0;
+ rc = llog_process(env, llh, test_7_print_cb, "test 7", NULL);
+ if (rc) {
+ CERROR("7_sub: llog process failed: %d\n", rc);
+ GOTO(out_close, rc);
+ }
+ process_count = plain_counter;
+ if (process_count != num_recs) {
+ CERROR("7_sub: processed %d records from %d total\n",
+ process_count, num_recs);
+ GOTO(out_close, rc = -EINVAL);
+ }
+
+ plain_counter = 0;
+ rc = llog_reverse_process(env, llh, test_7_cancel_cb, "test 7", NULL);
+ if (rc) {
+ CERROR("7_sub: reverse llog process failed: %d\n", rc);
+ GOTO(out_close, rc);
+ }
+ if (process_count != plain_counter) {
+ CERROR("7_sub: Reverse/direct processing found different"
+ "number of records: %d/%d\n",
+ plain_counter, process_count);
+ GOTO(out_close, rc = -EINVAL);
+ }
+ if (llog_exist(llh)) {
+ CERROR("7_sub: llog exists but should be zapped\n");
+ GOTO(out_close, rc = -EEXIST);
+ }
+
+ rc = verify_handle("7_sub", llh, 1);
+out_close:
+ if (rc)
+ llog_destroy(env, llh);
+ llog_close(env, llh);
+ return rc;
+}
+
+/* Test all llog records writing and processing */
+static int llog_test_7(const struct lu_env *env, struct obd_device *obd)
+{
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
+
+ CWARN("7a: test llog_logid_rec\n");
+ llog_records.llr.lid_hdr.lrh_len = sizeof(llog_records.llr);
+ llog_records.llr.lid_tail.lrt_len = sizeof(llog_records.llr);
+ llog_records.llr.lid_hdr.lrh_type = LLOG_LOGID_MAGIC;
+
+ rc = llog_test_7_sub(env, ctxt);
+ if (rc) {
+ CERROR("7a: llog_logid_rec test failed\n");
+ GOTO(out, rc);
+ }
+
+ CWARN("7b: test llog_unlink64_rec\n");
+ llog_records.lur.lur_hdr.lrh_len = sizeof(llog_records.lur);
+ llog_records.lur.lur_tail.lrt_len = sizeof(llog_records.lur);
+ llog_records.lur.lur_hdr.lrh_type = MDS_UNLINK64_REC;
+
+ rc = llog_test_7_sub(env, ctxt);
+ if (rc) {
+ CERROR("7b: llog_unlink_rec test failed\n");
+ GOTO(out, rc);
+ }
+
+ CWARN("7c: test llog_setattr64_rec\n");
+ llog_records.lsr64.lsr_hdr.lrh_len = sizeof(llog_records.lsr64);
+ llog_records.lsr64.lsr_tail.lrt_len = sizeof(llog_records.lsr64);
+ llog_records.lsr64.lsr_hdr.lrh_type = MDS_SETATTR64_REC;
+
+ rc = llog_test_7_sub(env, ctxt);
+ if (rc) {
+ CERROR("7c: llog_setattr64_rec test failed\n");
+ GOTO(out, rc);
+ }
+
+ CWARN("7d: test llog_size_change_rec\n");
+ llog_records.lscr.lsc_hdr.lrh_len = sizeof(llog_records.lscr);
+ llog_records.lscr.lsc_tail.lrt_len = sizeof(llog_records.lscr);
+ llog_records.lscr.lsc_hdr.lrh_type = OST_SZ_REC;
+
+ rc = llog_test_7_sub(env, ctxt);
+ if (rc) {
+ CERROR("7d: llog_size_change_rec test failed\n");
+ GOTO(out, rc);
+ }
+
+ CWARN("7e: test llog_changelog_rec\n");
+ llog_records.lcr.cr_hdr.lrh_len = sizeof(llog_records.lcr);
+ llog_records.lcr.cr_tail.lrt_len = sizeof(llog_records.lcr);
+ llog_records.lcr.cr_hdr.lrh_type = CHANGELOG_REC;
+
+ rc = llog_test_7_sub(env, ctxt);
+ if (rc) {
+ CERROR("7e: llog_changelog_rec test failed\n");
+ GOTO(out, rc);
+ }
+
+ CWARN("7f: test llog_changelog_user_rec\n");
+ llog_records.lcur.cur_hdr.lrh_len = sizeof(llog_records.lcur);
+ llog_records.lcur.cur_tail.lrt_len = sizeof(llog_records.lcur);
+ llog_records.lcur.cur_hdr.lrh_type = CHANGELOG_USER_REC;
+
+ rc = llog_test_7_sub(env, ctxt);
+ if (rc) {
+ CERROR("7f: llog_changelog_user_rec test failed\n");
+ GOTO(out, rc);
+ }
+
+ CWARN("7g: test llog_gen_rec\n");
+ llog_records.lgr.lgr_hdr.lrh_len = sizeof(llog_records.lgr);
+ llog_records.lgr.lgr_tail.lrt_len = sizeof(llog_records.lgr);
+ llog_records.lgr.lgr_hdr.lrh_type = LLOG_GEN_REC;
+
+ rc = llog_test_7_sub(env, ctxt);
+ if (rc) {
+ CERROR("7g: llog_size_change_rec test failed\n");
+ GOTO(out, rc);
+ }
+out:
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+
+/* -------------------------------------------------------------------------
+ * Tests above, boring obd functions below
+ * ------------------------------------------------------------------------- */
+static int llog_run_tests(const struct lu_env *env, struct obd_device *obd)
+{
+ struct llog_handle *llh = NULL;
+ struct llog_ctxt *ctxt;
+ int rc, err;
+ char name[10];
+
+ ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
+ LASSERT(ctxt);
+
+ sprintf(name, "%x", llog_test_rand);
+
+ rc = llog_test_1(env, obd, name);
+ if (rc)
+ GOTO(cleanup_ctxt, rc);
+
+ rc = llog_test_2(env, obd, name, &llh);
+ if (rc)
+ GOTO(cleanup_ctxt, rc);
+
+ rc = llog_test_3(env, obd, llh);
+ if (rc)
+ GOTO(cleanup, rc);
+
+ rc = llog_test_4(env, obd);
+ if (rc)
+ GOTO(cleanup, rc);
+
+ rc = llog_test_5(env, obd);
+ if (rc)
+ GOTO(cleanup, rc);
+
+ rc = llog_test_6(env, obd, name);
+ if (rc)
+ GOTO(cleanup, rc);
+
+ rc = llog_test_7(env, obd);
+ if (rc)
+ GOTO(cleanup, rc);
+
+cleanup:
+ err = llog_destroy(env, llh);
+ if (err)
+ CERROR("cleanup: llog_destroy failed: %d\n", err);
+ llog_close(env, llh);
+ if (rc == 0)
+ rc = err;
+cleanup_ctxt:
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+
+#ifdef LPROCFS
+static struct lprocfs_vars lprocfs_llog_test_obd_vars[] = { {0} };
+static struct lprocfs_vars lprocfs_llog_test_module_vars[] = { {0} };
+static void lprocfs_llog_test_init_vars(struct lprocfs_static_vars *lvars)
+{
+ lvars->module_vars = lprocfs_llog_test_module_vars;
+ lvars->obd_vars = lprocfs_llog_test_obd_vars;
+}
+#endif
+
+static int llog_test_cleanup(struct obd_device *obd)
+{
+ struct obd_device *tgt;
+ struct lu_env env;
+ int rc;
+
+ rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD);
+ if (rc)
+ return rc;
+
+ tgt = obd->obd_lvfs_ctxt.dt->dd_lu_dev.ld_obd;
+ rc = llog_cleanup(&env, llog_get_context(tgt, LLOG_TEST_ORIG_CTXT));
+ if (rc)
+ CERROR("failed to llog_test_llog_finish: %d\n", rc);
+ lu_env_fini(&env);
+ return rc;
+}
+
+static int llog_test_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ struct obd_device *tgt;
+ struct llog_ctxt *ctxt;
+ struct dt_object *o;
+ struct lu_env env;
+ struct lu_context test_session;
+ int rc;
+
+ if (lcfg->lcfg_bufcount < 2) {
+ CERROR("requires a TARGET OBD name\n");
+ return -EINVAL;
+ }
+
+ if (lcfg->lcfg_buflens[1] < 1) {
+ CERROR("requires a TARGET OBD name\n");
+ return -EINVAL;
+ }
+
+ /* disk obd */
+ tgt = class_name2obd(lustre_cfg_string(lcfg, 1));
+ if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) {
+ CERROR("target device not attached or not set up (%s)\n",
+ lustre_cfg_string(lcfg, 1));
+ return -EINVAL;
+ }
+
+ rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD);
+ if (rc)
+ return rc;
+
+ rc = lu_context_init(&test_session, LCT_SESSION);
+ if (rc)
+ GOTO(cleanup_env, rc);
+ test_session.lc_thread = (struct ptlrpc_thread *)current;
+ lu_context_enter(&test_session);
+ env.le_ses = &test_session;
+
+ CWARN("Setup llog-test device over %s device\n",
+ lustre_cfg_string(lcfg, 1));
+
+ OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
+ obd->obd_lvfs_ctxt.dt = lu2dt_dev(tgt->obd_lu_dev);
+
+ rc = llog_setup(&env, tgt, &tgt->obd_olg, LLOG_TEST_ORIG_CTXT, tgt,
+ &llog_osd_ops);
+ if (rc)
+ GOTO(cleanup_session, rc);
+
+ /* use MGS llog dir for tests */
+ ctxt = llog_get_context(tgt, LLOG_CONFIG_ORIG_CTXT);
+ LASSERT(ctxt);
+ o = ctxt->loc_dir;
+ llog_ctxt_put(ctxt);
+
+ ctxt = llog_get_context(tgt, LLOG_TEST_ORIG_CTXT);
+ LASSERT(ctxt);
+ ctxt->loc_dir = o;
+ llog_ctxt_put(ctxt);
+
+ llog_test_rand = cfs_rand();
+
+ rc = llog_run_tests(&env, tgt);
+ if (rc)
+ llog_test_cleanup(obd);
+cleanup_session:
+ lu_context_exit(&test_session);
+ lu_context_fini(&test_session);
+cleanup_env:
+ lu_env_fini(&env);
+ return rc;
+}
+
+static struct obd_ops llog_obd_ops = {
+ .o_owner = THIS_MODULE,
+ .o_setup = llog_test_setup,
+ .o_cleanup = llog_test_cleanup,
+};
+
+static int __init llog_test_init(void)
+{
+ struct lprocfs_static_vars lvars;
+
+ lprocfs_llog_test_init_vars(&lvars);
+ return class_register_type(&llog_obd_ops, NULL,
+ lvars.module_vars, "llog_test", NULL);
+}
+
+static void __exit llog_test_exit(void)
+{
+ class_unregister_type("llog_test");
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("llog test module");
+MODULE_LICENSE("GPL");
+
+module_init(llog_test_init);
+module_exit(llog_test_exit);
diff --git a/drivers/staging/lustre/lustre/obdclass/local_storage.c b/drivers/staging/lustre/lustre/obdclass/local_storage.c
new file mode 100644
index 0000000..cc19fba
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/local_storage.c
@@ -0,0 +1,891 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details. A copy is
+ * included in the COPYING file that accompanied this code.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * lustre/obdclass/local_storage.c
+ *
+ * Local storage for file/objects with fid generation. Works on top of OSD.
+ *
+ * Author: Mikhail Pershin <mike.pershin@intel.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include "local_storage.h"
+
+/* all initialized local storages on this node are linked on this */
+static LIST_HEAD(ls_list_head);
+static DEFINE_MUTEX(ls_list_mutex);
+
+static int ls_object_init(const struct lu_env *env, struct lu_object *o,
+ const struct lu_object_conf *unused)
+{
+ struct ls_device *ls;
+ struct lu_object *below;
+ struct lu_device *under;
+
+ ls = container_of0(o->lo_dev, struct ls_device, ls_top_dev.dd_lu_dev);
+ under = &ls->ls_osd->dd_lu_dev;
+ below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
+ if (below == NULL)
+ return -ENOMEM;
+
+ lu_object_add(o, below);
+
+ return 0;
+}
+
+static void ls_object_free(const struct lu_env *env, struct lu_object *o)
+{
+ struct ls_object *obj = lu2ls_obj(o);
+ struct lu_object_header *h = o->lo_header;
+
+ dt_object_fini(&obj->ls_obj);
+ lu_object_header_fini(h);
+ OBD_FREE_PTR(obj);
+}
+
+struct lu_object_operations ls_lu_obj_ops = {
+ .loo_object_init = ls_object_init,
+ .loo_object_free = ls_object_free,
+};
+
+struct lu_object *ls_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *_h,
+ struct lu_device *d)
+{
+ struct lu_object_header *h;
+ struct ls_object *o;
+ struct lu_object *l;
+
+ LASSERT(_h == NULL);
+
+ OBD_ALLOC_PTR(o);
+ if (o != NULL) {
+ l = &o->ls_obj.do_lu;
+ h = &o->ls_header;
+
+ lu_object_header_init(h);
+ dt_object_init(&o->ls_obj, h, d);
+ lu_object_add_top(h, l);
+
+ l->lo_ops = &ls_lu_obj_ops;
+
+ return l;
+ } else {
+ return NULL;
+ }
+}
+
+static struct lu_device_operations ls_lu_dev_ops = {
+ .ldo_object_alloc = ls_object_alloc
+};
+
+static struct ls_device *__ls_find_dev(struct dt_device *dev)
+{
+ struct ls_device *ls, *ret = NULL;
+
+ list_for_each_entry(ls, &ls_list_head, ls_linkage) {
+ if (ls->ls_osd == dev) {
+ atomic_inc(&ls->ls_refcount);
+ ret = ls;
+ break;
+ }
+ }
+ return ret;
+}
+
+struct ls_device *ls_find_dev(struct dt_device *dev)
+{
+ struct ls_device *ls;
+
+ mutex_lock(&ls_list_mutex);
+ ls = __ls_find_dev(dev);
+ mutex_unlock(&ls_list_mutex);
+
+ return ls;
+}
+
+static struct lu_device_type_operations ls_device_type_ops = {
+ .ldto_start = NULL,
+ .ldto_stop = NULL,
+};
+
+static struct lu_device_type ls_lu_type = {
+ .ldt_name = "local_storage",
+ .ldt_ops = &ls_device_type_ops,
+};
+
+struct ls_device *ls_device_get(struct dt_device *dev)
+{
+ struct ls_device *ls;
+
+ mutex_lock(&ls_list_mutex);
+ ls = __ls_find_dev(dev);
+ if (ls)
+ GOTO(out_ls, ls);
+
+ /* not found, then create */
+ OBD_ALLOC_PTR(ls);
+ if (ls == NULL)
+ GOTO(out_ls, ls = ERR_PTR(-ENOMEM));
+
+ atomic_set(&ls->ls_refcount, 1);
+ INIT_LIST_HEAD(&ls->ls_los_list);
+ mutex_init(&ls->ls_los_mutex);
+
+ ls->ls_osd = dev;
+
+ LASSERT(dev->dd_lu_dev.ld_site);
+ lu_device_init(&ls->ls_top_dev.dd_lu_dev, &ls_lu_type);
+ ls->ls_top_dev.dd_lu_dev.ld_ops = &ls_lu_dev_ops;
+ ls->ls_top_dev.dd_lu_dev.ld_site = dev->dd_lu_dev.ld_site;
+
+ /* finally add ls to the list */
+ list_add(&ls->ls_linkage, &ls_list_head);
+out_ls:
+ mutex_unlock(&ls_list_mutex);
+ return ls;
+}
+
+void ls_device_put(const struct lu_env *env, struct ls_device *ls)
+{
+ LASSERT(env);
+ if (!atomic_dec_and_test(&ls->ls_refcount))
+ return;
+
+ mutex_lock(&ls_list_mutex);
+ if (atomic_read(&ls->ls_refcount) == 0) {
+ LASSERT(list_empty(&ls->ls_los_list));
+ list_del(&ls->ls_linkage);
+ lu_site_purge(env, ls->ls_top_dev.dd_lu_dev.ld_site, ~0);
+ lu_device_fini(&ls->ls_top_dev.dd_lu_dev);
+ OBD_FREE_PTR(ls);
+ }
+ mutex_unlock(&ls_list_mutex);
+}
+
+/**
+ * local file fid generation
+ */
+int local_object_fid_generate(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct lu_fid *fid)
+{
+ LASSERT(los->los_dev);
+ LASSERT(los->los_obj);
+
+ /* take next OID */
+
+ /* to make it unique after reboot we store
+ * the latest generated fid atomically with
+ * object creation see local_object_create() */
+
+ mutex_lock(&los->los_id_lock);
+ fid->f_seq = los->los_seq;
+ fid->f_oid = ++los->los_last_oid;
+ fid->f_ver = 0;
+ mutex_unlock(&los->los_id_lock);
+
+ return 0;
+}
+
+int local_object_declare_create(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *o, struct lu_attr *attr,
+ struct dt_object_format *dof,
+ struct thandle *th)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ int rc;
+
+ /* update fid generation file */
+ if (los != NULL) {
+ LASSERT(dt_object_exists(los->los_obj));
+ rc = dt_declare_record_write(env, los->los_obj,
+ sizeof(struct los_ondisk), 0, th);
+ if (rc)
+ return rc;
+ }
+
+ rc = dt_declare_create(env, o, attr, NULL, dof, th);
+ if (rc)
+ return rc;
+
+ dti->dti_lb.lb_buf = NULL;
+ dti->dti_lb.lb_len = sizeof(dti->dti_lma);
+ rc = dt_declare_xattr_set(env, o, &dti->dti_lb, XATTR_NAME_LMA, 0, th);
+
+ return rc;
+}
+
+int local_object_create(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *o, struct lu_attr *attr,
+ struct dt_object_format *dof, struct thandle *th)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ obd_id lastid;
+ int rc;
+
+ rc = dt_create(env, o, attr, NULL, dof, th);
+ if (rc)
+ return rc;
+
+ if (los == NULL)
+ return rc;
+
+ LASSERT(los->los_obj);
+ LASSERT(dt_object_exists(los->los_obj));
+
+ /* many threads can be updated this, serialize
+ * them here to avoid the race where one thread
+ * takes the value first, but writes it last */
+ mutex_lock(&los->los_id_lock);
+
+ /* update local oid number on disk so that
+ * we know the last one used after reboot */
+ lastid = cpu_to_le64(los->los_last_oid);
+
+ dti->dti_off = 0;
+ dti->dti_lb.lb_buf = &lastid;
+ dti->dti_lb.lb_len = sizeof(lastid);
+ rc = dt_record_write(env, los->los_obj, &dti->dti_lb, &dti->dti_off,
+ th);
+ mutex_unlock(&los->los_id_lock);
+
+ return rc;
+}
+
+/*
+ * Create local named object (file, directory or index) in parent directory.
+ */
+struct dt_object *__local_file_create(const struct lu_env *env,
+ const struct lu_fid *fid,
+ struct local_oid_storage *los,
+ struct ls_device *ls,
+ struct dt_object *parent,
+ const char *name, struct lu_attr *attr,
+ struct dt_object_format *dof)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ struct dt_object *dto;
+ struct thandle *th;
+ int rc;
+
+ dto = ls_locate(env, ls, fid);
+ if (unlikely(IS_ERR(dto)))
+ return dto;
+
+ LASSERT(dto != NULL);
+ if (dt_object_exists(dto))
+ GOTO(out, rc = -EEXIST);
+
+ th = dt_trans_create(env, ls->ls_osd);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
+
+ rc = local_object_declare_create(env, los, dto, attr, dof, th);
+ if (rc)
+ GOTO(trans_stop, rc);
+
+ if (dti->dti_dof.dof_type == DFT_DIR) {
+ dt_declare_ref_add(env, dto, th);
+ dt_declare_ref_add(env, parent, th);
+ }
+
+ rc = dt_declare_insert(env, parent, (void *)fid, (void *)name, th);
+ if (rc)
+ GOTO(trans_stop, rc);
+
+ rc = dt_trans_start_local(env, ls->ls_osd, th);
+ if (rc)
+ GOTO(trans_stop, rc);
+
+ dt_write_lock(env, dto, 0);
+ if (dt_object_exists(dto))
+ GOTO(unlock, rc = 0);
+
+ CDEBUG(D_OTHER, "create new object "DFID"\n",
+ PFID(lu_object_fid(&dto->do_lu)));
+ rc = local_object_create(env, los, dto, attr, dof, th);
+ if (rc)
+ GOTO(unlock, rc);
+ LASSERT(dt_object_exists(dto));
+
+ if (dti->dti_dof.dof_type == DFT_DIR) {
+ if (!dt_try_as_dir(env, dto))
+ GOTO(destroy, rc = -ENOTDIR);
+ /* Add "." and ".." for newly created dir */
+ rc = dt_insert(env, dto, (void *)fid, (void *)".", th,
+ BYPASS_CAPA, 1);
+ if (rc)
+ GOTO(destroy, rc);
+ dt_ref_add(env, dto, th);
+ rc = dt_insert(env, dto, (void *)lu_object_fid(&parent->do_lu),
+ (void *)"..", th, BYPASS_CAPA, 1);
+ if (rc)
+ GOTO(destroy, rc);
+ }
+
+ dt_write_lock(env, parent, 0);
+ rc = dt_insert(env, parent, (const struct dt_rec *)fid,
+ (const struct dt_key *)name, th, BYPASS_CAPA, 1);
+ if (dti->dti_dof.dof_type == DFT_DIR)
+ dt_ref_add(env, parent, th);
+ dt_write_unlock(env, parent);
+ if (rc)
+ GOTO(destroy, rc);
+destroy:
+ if (rc)
+ dt_destroy(env, dto, th);
+unlock:
+ dt_write_unlock(env, dto);
+trans_stop:
+ dt_trans_stop(env, ls->ls_osd, th);
+out:
+ if (rc) {
+ lu_object_put_nocache(env, &dto->do_lu);
+ dto = ERR_PTR(rc);
+ }
+ return dto;
+}
+
+/*
+ * Look up and create (if it does not exist) a local named file or directory in
+ * parent directory.
+ */
+struct dt_object *local_file_find_or_create(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *parent,
+ const char *name, __u32 mode)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ struct dt_object *dto;
+ int rc;
+
+ LASSERT(parent);
+
+ rc = dt_lookup_dir(env, parent, name, &dti->dti_fid);
+ if (rc == 0)
+ /* name is found, get the object */
+ dto = ls_locate(env, dt2ls_dev(los->los_dev), &dti->dti_fid);
+ else if (rc != -ENOENT)
+ dto = ERR_PTR(rc);
+ else {
+ rc = local_object_fid_generate(env, los, &dti->dti_fid);
+ if (rc < 0) {
+ dto = ERR_PTR(rc);
+ } else {
+ /* create the object */
+ dti->dti_attr.la_valid = LA_MODE;
+ dti->dti_attr.la_mode = mode;
+ dti->dti_dof.dof_type = dt_mode_to_dft(mode & S_IFMT);
+ dto = __local_file_create(env, &dti->dti_fid, los,
+ dt2ls_dev(los->los_dev),
+ parent, name, &dti->dti_attr,
+ &dti->dti_dof);
+ }
+ }
+ return dto;
+}
+EXPORT_SYMBOL(local_file_find_or_create);
+
+struct dt_object *local_file_find_or_create_with_fid(const struct lu_env *env,
+ struct dt_device *dt,
+ const struct lu_fid *fid,
+ struct dt_object *parent,
+ const char *name,
+ __u32 mode)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ struct dt_object *dto;
+ int rc;
+
+ LASSERT(parent);
+
+ rc = dt_lookup_dir(env, parent, name, &dti->dti_fid);
+ if (rc == 0) {
+ dto = dt_locate(env, dt, &dti->dti_fid);
+ } else if (rc != -ENOENT) {
+ dto = ERR_PTR(rc);
+ } else {
+ struct ls_device *ls;
+
+ ls = ls_device_get(dt);
+ if (IS_ERR(ls)) {
+ dto = ERR_CAST(ls);
+ } else {
+ /* create the object */
+ dti->dti_attr.la_valid = LA_MODE;
+ dti->dti_attr.la_mode = mode;
+ dti->dti_dof.dof_type = dt_mode_to_dft(mode & S_IFMT);
+ dto = __local_file_create(env, fid, NULL, ls, parent,
+ name, &dti->dti_attr,
+ &dti->dti_dof);
+ /* ls_device_put() will finalize the ls device, we
+ * have to open the object in other device stack */
+ if (!IS_ERR(dto)) {
+ dti->dti_fid = dto->do_lu.lo_header->loh_fid;
+ lu_object_put_nocache(env, &dto->do_lu);
+ dto = dt_locate(env, dt, &dti->dti_fid);
+ }
+ ls_device_put(env, ls);
+ }
+ }
+ return dto;
+}
+EXPORT_SYMBOL(local_file_find_or_create_with_fid);
+
+/*
+ * Look up and create (if it does not exist) a local named index file in parent
+ * directory.
+ */
+struct dt_object *local_index_find_or_create(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *parent,
+ const char *name, __u32 mode,
+ const struct dt_index_features *ft)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ struct dt_object *dto;
+ int rc;
+
+ LASSERT(parent);
+
+ rc = dt_lookup_dir(env, parent, name, &dti->dti_fid);
+ if (rc == 0) {
+ /* name is found, get the object */
+ dto = ls_locate(env, dt2ls_dev(los->los_dev), &dti->dti_fid);
+ } else if (rc != -ENOENT) {
+ dto = ERR_PTR(rc);
+ } else {
+ rc = local_object_fid_generate(env, los, &dti->dti_fid);
+ if (rc < 0) {
+ dto = ERR_PTR(rc);
+ } else {
+ /* create the object */
+ dti->dti_attr.la_valid = LA_MODE;
+ dti->dti_attr.la_mode = mode;
+ dti->dti_dof.dof_type = DFT_INDEX;
+ dti->dti_dof.u.dof_idx.di_feat = ft;
+ dto = __local_file_create(env, &dti->dti_fid, los,
+ dt2ls_dev(los->los_dev),
+ parent, name, &dti->dti_attr,
+ &dti->dti_dof);
+ }
+ }
+ return dto;
+
+}
+EXPORT_SYMBOL(local_index_find_or_create);
+
+struct dt_object *
+local_index_find_or_create_with_fid(const struct lu_env *env,
+ struct dt_device *dt,
+ const struct lu_fid *fid,
+ struct dt_object *parent,
+ const char *name, __u32 mode,
+ const struct dt_index_features *ft)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ struct dt_object *dto;
+ int rc;
+
+ LASSERT(parent);
+
+ rc = dt_lookup_dir(env, parent, name, &dti->dti_fid);
+ if (rc == 0) {
+ /* name is found, get the object */
+ if (!lu_fid_eq(fid, &dti->dti_fid))
+ dto = ERR_PTR(-EINVAL);
+ else
+ dto = dt_locate(env, dt, fid);
+ } else if (rc != -ENOENT) {
+ dto = ERR_PTR(rc);
+ } else {
+ struct ls_device *ls;
+
+ ls = ls_device_get(dt);
+ if (IS_ERR(ls)) {
+ dto = ERR_CAST(ls);
+ } else {
+ /* create the object */
+ dti->dti_attr.la_valid = LA_MODE;
+ dti->dti_attr.la_mode = mode;
+ dti->dti_dof.dof_type = DFT_INDEX;
+ dti->dti_dof.u.dof_idx.di_feat = ft;
+ dto = __local_file_create(env, fid, NULL, ls, parent,
+ name, &dti->dti_attr,
+ &dti->dti_dof);
+ /* ls_device_put() will finalize the ls device, we
+ * have to open the object in other device stack */
+ if (!IS_ERR(dto)) {
+ dti->dti_fid = dto->do_lu.lo_header->loh_fid;
+ lu_object_put_nocache(env, &dto->do_lu);
+ dto = dt_locate(env, dt, &dti->dti_fid);
+ }
+ ls_device_put(env, ls);
+ }
+ }
+ return dto;
+}
+EXPORT_SYMBOL(local_index_find_or_create_with_fid);
+
+static int local_object_declare_unlink(const struct lu_env *env,
+ struct dt_device *dt,
+ struct dt_object *p,
+ struct dt_object *c, const char *name,
+ struct thandle *th)
+{
+ int rc;
+
+ rc = dt_declare_delete(env, p, (const struct dt_key *)name, th);
+ if (rc < 0)
+ return rc;
+
+ rc = dt_declare_ref_del(env, c, th);
+ if (rc < 0)
+ return rc;
+
+ return dt_declare_destroy(env, c, th);
+}
+
+int local_object_unlink(const struct lu_env *env, struct dt_device *dt,
+ struct dt_object *parent, const char *name)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ struct dt_object *dto;
+ struct thandle *th;
+ int rc;
+
+ rc = dt_lookup_dir(env, parent, name, &dti->dti_fid);
+ if (rc == -ENOENT)
+ return 0;
+ else if (rc < 0)
+ return rc;
+
+ dto = dt_locate(env, dt, &dti->dti_fid);
+ if (unlikely(IS_ERR(dto)))
+ return PTR_ERR(dto);
+
+ th = dt_trans_create(env, dt);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
+
+ rc = local_object_declare_unlink(env, dt, parent, dto, name, th);
+ if (rc < 0)
+ GOTO(stop, rc);
+
+ rc = dt_trans_start_local(env, dt, th);
+ if (rc < 0)
+ GOTO(stop, rc);
+
+ dt_write_lock(env, dto, 0);
+ rc = dt_delete(env, parent, (struct dt_key *)name, th, BYPASS_CAPA);
+ if (rc < 0)
+ GOTO(unlock, rc);
+
+ rc = dt_ref_del(env, dto, th);
+ if (rc < 0) {
+ rc = dt_insert(env, parent,
+ (const struct dt_rec *)&dti->dti_fid,
+ (const struct dt_key *)name, th, BYPASS_CAPA, 1);
+ GOTO(unlock, rc);
+ }
+
+ rc = dt_destroy(env, dto, th);
+unlock:
+ dt_write_unlock(env, dto);
+stop:
+ dt_trans_stop(env, dt, th);
+out:
+ lu_object_put_nocache(env, &dto->do_lu);
+ return rc;
+}
+EXPORT_SYMBOL(local_object_unlink);
+
+struct local_oid_storage *dt_los_find(struct ls_device *ls, __u64 seq)
+{
+ struct local_oid_storage *los, *ret = NULL;
+
+ list_for_each_entry(los, &ls->ls_los_list, los_list) {
+ if (los->los_seq == seq) {
+ atomic_inc(&los->los_refcount);
+ ret = los;
+ break;
+ }
+ }
+ return ret;
+}
+
+void dt_los_put(struct local_oid_storage *los)
+{
+ if (atomic_dec_and_test(&los->los_refcount))
+ /* should never happen, only local_oid_storage_fini should
+ * drop refcount to zero */
+ LBUG();
+ return;
+}
+
+/* after Lustre 2.3 release there may be old file to store last generated FID
+ * If such file exists then we have to read its content
+ */
+int lastid_compat_check(const struct lu_env *env, struct dt_device *dev,
+ __u64 lastid_seq, __u32 *first_oid, struct ls_device *ls)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ struct dt_object *root = NULL;
+ struct los_ondisk losd;
+ struct dt_object *o = NULL;
+ int rc = 0;
+
+ rc = dt_root_get(env, dev, &dti->dti_fid);
+ if (rc)
+ return rc;
+
+ root = ls_locate(env, ls, &dti->dti_fid);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ /* find old last_id file */
+ snprintf(dti->dti_buf, sizeof(dti->dti_buf), "seq-"LPX64"-lastid",
+ lastid_seq);
+ rc = dt_lookup_dir(env, root, dti->dti_buf, &dti->dti_fid);
+ lu_object_put_nocache(env, &root->do_lu);
+ if (rc == -ENOENT) {
+ /* old llog lastid accessed by FID only */
+ if (lastid_seq != FID_SEQ_LLOG)
+ return 0;
+ dti->dti_fid.f_seq = FID_SEQ_LLOG;
+ dti->dti_fid.f_oid = 1;
+ dti->dti_fid.f_ver = 0;
+ o = ls_locate(env, ls, &dti->dti_fid);
+ if (IS_ERR(o))
+ return PTR_ERR(o);
+
+ if (!dt_object_exists(o)) {
+ lu_object_put_nocache(env, &o->do_lu);
+ return 0;
+ }
+ CDEBUG(D_INFO, "Found old llog lastid file\n");
+ } else if (rc < 0) {
+ return rc;
+ } else {
+ CDEBUG(D_INFO, "Found old lastid file for sequence "LPX64"\n",
+ lastid_seq);
+ o = ls_locate(env, ls, &dti->dti_fid);
+ if (IS_ERR(o))
+ return PTR_ERR(o);
+ }
+ /* let's read seq-NNNNNN-lastid file value */
+ LASSERT(dt_object_exists(o));
+ dti->dti_off = 0;
+ dti->dti_lb.lb_buf = &losd;
+ dti->dti_lb.lb_len = sizeof(losd);
+ dt_read_lock(env, o, 0);
+ rc = dt_record_read(env, o, &dti->dti_lb, &dti->dti_off);
+ dt_read_unlock(env, o);
+ lu_object_put_nocache(env, &o->do_lu);
+ if (rc == 0 && le32_to_cpu(losd.lso_magic) != LOS_MAGIC) {
+ CERROR("%s: wrong content of seq-"LPX64"-lastid file, magic %x\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, lastid_seq,
+ le32_to_cpu(losd.lso_magic));
+ return -EINVAL;
+ } else if (rc < 0) {
+ CERROR("%s: failed to read seq-"LPX64"-lastid: rc = %d\n",
+ o->do_lu.lo_dev->ld_obd->obd_name, lastid_seq, rc);
+ return rc;
+ }
+ *first_oid = le32_to_cpu(losd.lso_next_oid);
+ return rc;
+}
+
+/**
+ * Initialize local OID storage for required sequence.
+ * That may be needed for services that uses local files and requires
+ * dynamic OID allocation for them.
+ *
+ * Per each sequence we have an object with 'first_fid' identificator
+ * containing the counter for OIDs of locally created files with that
+ * sequence.
+ *
+ * It is used now by llog subsystem and MGS for NID tables
+ *
+ * Function gets first_fid to create counter object.
+ * All dynamic fids will be generated with the same sequence and incremented
+ * OIDs
+ *
+ * Returned local_oid_storage is in-memory representaion of OID storage
+ */
+int local_oid_storage_init(const struct lu_env *env, struct dt_device *dev,
+ const struct lu_fid *first_fid,
+ struct local_oid_storage **los)
+{
+ struct dt_thread_info *dti = dt_info(env);
+ struct ls_device *ls;
+ obd_id lastid;
+ struct dt_object *o = NULL;
+ struct thandle *th;
+ __u32 first_oid = fid_oid(first_fid);
+ int rc = 0;
+
+ ls = ls_device_get(dev);
+ if (IS_ERR(ls))
+ return PTR_ERR(ls);
+
+ mutex_lock(&ls->ls_los_mutex);
+ *los = dt_los_find(ls, fid_seq(first_fid));
+ if (*los != NULL)
+ GOTO(out, rc = 0);
+
+ /* not found, then create */
+ OBD_ALLOC_PTR(*los);
+ if (*los == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ atomic_set(&(*los)->los_refcount, 1);
+ mutex_init(&(*los)->los_id_lock);
+ (*los)->los_dev = &ls->ls_top_dev;
+ atomic_inc(&ls->ls_refcount);
+ list_add(&(*los)->los_list, &ls->ls_los_list);
+
+ /* Use {seq, 0, 0} to create the LAST_ID file for every
+ * sequence. OIDs start at LUSTRE_FID_INIT_OID.
+ */
+ dti->dti_fid.f_seq = fid_seq(first_fid);
+ dti->dti_fid.f_oid = LUSTRE_FID_LASTID_OID;
+ dti->dti_fid.f_ver = 0;
+ o = ls_locate(env, ls, &dti->dti_fid);
+ if (IS_ERR(o))
+ GOTO(out_los, rc = PTR_ERR(o));
+
+ if (!dt_object_exists(o)) {
+ rc = lastid_compat_check(env, dev, fid_seq(first_fid),
+ &first_oid, ls);
+ if (rc < 0)
+ GOTO(out_los, rc);
+
+ th = dt_trans_create(env, dev);
+ if (IS_ERR(th))
+ GOTO(out_los, rc = PTR_ERR(th));
+
+ dti->dti_attr.la_valid = LA_MODE | LA_TYPE;
+ dti->dti_attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
+ dti->dti_dof.dof_type = dt_mode_to_dft(S_IFREG);
+
+ rc = dt_declare_create(env, o, &dti->dti_attr, NULL,
+ &dti->dti_dof, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ rc = dt_declare_record_write(env, o, sizeof(lastid), 0, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ rc = dt_trans_start_local(env, dev, th);
+ if (rc)
+ GOTO(out_trans, rc);
+
+ dt_write_lock(env, o, 0);
+ if (dt_object_exists(o))
+ GOTO(out_lock, rc = 0);
+
+ rc = dt_create(env, o, &dti->dti_attr, NULL, &dti->dti_dof,
+ th);
+ if (rc)
+ GOTO(out_lock, rc);
+
+ lastid = cpu_to_le64(first_oid);
+
+ dti->dti_off = 0;
+ dti->dti_lb.lb_buf = &lastid;
+ dti->dti_lb.lb_len = sizeof(lastid);
+ rc = dt_record_write(env, o, &dti->dti_lb, &dti->dti_off, th);
+ if (rc)
+ GOTO(out_lock, rc);
+out_lock:
+ dt_write_unlock(env, o);
+out_trans:
+ dt_trans_stop(env, dev, th);
+ } else {
+ dti->dti_off = 0;
+ dti->dti_lb.lb_buf = &lastid;
+ dti->dti_lb.lb_len = sizeof(lastid);
+ dt_read_lock(env, o, 0);
+ rc = dt_record_read(env, o, &dti->dti_lb, &dti->dti_off);
+ dt_read_unlock(env, o);
+ if (rc == 0 && le64_to_cpu(lastid) > OBIF_MAX_OID) {
+ CERROR("%s: bad oid "LPU64" is read from LAST_ID\n",
+ o->do_lu.lo_dev->ld_obd->obd_name,
+ le64_to_cpu(lastid));
+ rc = -EINVAL;
+ }
+ }
+out_los:
+ if (rc != 0) {
+ list_del(&(*los)->los_list);
+ atomic_dec(&ls->ls_refcount);
+ OBD_FREE_PTR(*los);
+ *los = NULL;
+ if (o != NULL && !IS_ERR(o))
+ lu_object_put_nocache(env, &o->do_lu);
+ } else {
+ (*los)->los_seq = fid_seq(first_fid);
+ (*los)->los_last_oid = le64_to_cpu(lastid);
+ (*los)->los_obj = o;
+ /* read value should not be less than initial one */
+ LASSERTF((*los)->los_last_oid >= first_oid, "%u < %u\n",
+ (*los)->los_last_oid, first_oid);
+ }
+out:
+ mutex_unlock(&ls->ls_los_mutex);
+ ls_device_put(env, ls);
+ return rc;
+}
+EXPORT_SYMBOL(local_oid_storage_init);
+
+void local_oid_storage_fini(const struct lu_env *env,
+ struct local_oid_storage *los)
+{
+ struct ls_device *ls;
+
+ if (!atomic_dec_and_test(&los->los_refcount))
+ return;
+
+ LASSERT(env);
+ LASSERT(los->los_dev);
+ ls = dt2ls_dev(los->los_dev);
+
+ mutex_lock(&ls->ls_los_mutex);
+ if (atomic_read(&los->los_refcount) == 0) {
+ if (los->los_obj)
+ lu_object_put_nocache(env, &los->los_obj->do_lu);
+ list_del(&los->los_list);
+ OBD_FREE_PTR(los);
+ }
+ mutex_unlock(&ls->ls_los_mutex);
+ ls_device_put(env, ls);
+}
+EXPORT_SYMBOL(local_oid_storage_fini);
diff --git a/drivers/staging/lustre/lustre/obdclass/local_storage.h b/drivers/staging/lustre/lustre/obdclass/local_storage.h
new file mode 100644
index 0000000..d553c37
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/local_storage.h
@@ -0,0 +1,88 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details. A copy is
+ * included in the COPYING file that accompanied this code.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * lustre/obdclass/local_storage.c
+ *
+ * Local storage for file/objects with fid generation. Works on top of OSD.
+ *
+ * Author: Mikhail Pershin <mike.pershin@intel.com>
+ */
+
+#include <dt_object.h>
+#include <obd.h>
+#include <lustre_fid.h>
+#include <lustre_disk.h>
+
+struct ls_device {
+ struct dt_device ls_top_dev;
+ /* all initialized ls_devices on this node linked by this */
+ struct list_head ls_linkage;
+ /* how many handle's reference this local storage */
+ atomic_t ls_refcount;
+ /* underlaying OSD device */
+ struct dt_device *ls_osd;
+ /* list of all local OID storages */
+ struct list_head ls_los_list;
+ struct mutex ls_los_mutex;
+};
+
+static inline struct ls_device *dt2ls_dev(struct dt_device *d)
+{
+ return container_of0(d, struct ls_device, ls_top_dev);
+}
+
+struct ls_object {
+ struct lu_object_header ls_header;
+ struct dt_object ls_obj;
+};
+
+static inline struct ls_object *lu2ls_obj(struct lu_object *o)
+{
+ return container_of0(o, struct ls_object, ls_obj.do_lu);
+}
+
+static inline struct dt_object *ls_locate(const struct lu_env *env,
+ struct ls_device *ls,
+ const struct lu_fid *fid)
+{
+ return dt_locate_at(env, ls->ls_osd, fid, &ls->ls_top_dev.dd_lu_dev);
+}
+
+struct ls_device *ls_device_get(struct dt_device *dev);
+void ls_device_put(const struct lu_env *env, struct ls_device *ls);
+struct local_oid_storage *dt_los_find(struct ls_device *ls, __u64 seq);
+void dt_los_put(struct local_oid_storage *los);
+
+/* Lustre 2.3 on-disk structure describing local object OIDs storage
+ * the structure to be used with any sequence managed by
+ * local object library.
+ * Obsoleted since 2.4 but is kept for compatibility reasons,
+ * see lastid_compat_check() in obdclass/local_storage.c */
+struct los_ondisk {
+ __u32 lso_magic;
+ __u32 lso_next_oid;
+};
+
+#define LOS_MAGIC 0xdecafbee
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
new file mode 100644
index 0000000..a95f60a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -0,0 +1,1986 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/lprocfs_status.c
+ *
+ * Author: Hariharan Thantry <thantry@users.sourceforge.net>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+
+#include <obd_class.h>
+#include <lprocfs_status.h>
+#include <lustre/lustre_idl.h>
+#include <linux/seq_file.h>
+
+#if defined(LPROCFS)
+
+static int lprocfs_no_percpu_stats = 0;
+CFS_MODULE_PARM(lprocfs_no_percpu_stats, "i", int, 0644,
+ "Do not alloc percpu data for lprocfs stats");
+
+#define MAX_STRING_SIZE 128
+
+int lprocfs_single_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+EXPORT_SYMBOL(lprocfs_single_release);
+
+int lprocfs_seq_release(struct inode *inode, struct file *file)
+{
+ return seq_release(inode, file);
+}
+EXPORT_SYMBOL(lprocfs_seq_release);
+
+/* lprocfs API calls */
+
+struct proc_dir_entry *lprocfs_add_simple(struct proc_dir_entry *root,
+ char *name, void *data,
+ struct file_operations *fops)
+{
+ struct proc_dir_entry *proc;
+ umode_t mode = 0;
+
+ if (root == NULL || name == NULL || fops == NULL)
+ return ERR_PTR(-EINVAL);
+
+ if (fops->read)
+ mode = 0444;
+ if (fops->write)
+ mode |= 0200;
+ proc = proc_create_data(name, mode, root, fops, data);
+ if (!proc) {
+ CERROR("LprocFS: No memory to create /proc entry %s", name);
+ return ERR_PTR(-ENOMEM);
+ }
+ return proc;
+}
+EXPORT_SYMBOL(lprocfs_add_simple);
+
+struct proc_dir_entry *lprocfs_add_symlink(const char *name,
+ struct proc_dir_entry *parent, const char *format, ...)
+{
+ struct proc_dir_entry *entry;
+ char *dest;
+ va_list ap;
+
+ if (parent == NULL || format == NULL)
+ return NULL;
+
+ OBD_ALLOC_WAIT(dest, MAX_STRING_SIZE + 1);
+ if (dest == NULL)
+ return NULL;
+
+ va_start(ap, format);
+ vsnprintf(dest, MAX_STRING_SIZE, format, ap);
+ va_end(ap);
+
+ entry = proc_symlink(name, parent, dest);
+ if (entry == NULL)
+ CERROR("LprocFS: Could not create symbolic link from %s to %s",
+ name, dest);
+
+ OBD_FREE(dest, MAX_STRING_SIZE + 1);
+ return entry;
+}
+EXPORT_SYMBOL(lprocfs_add_symlink);
+
+static struct file_operations lprocfs_generic_fops = { };
+
+/**
+ * Add /proc entries.
+ *
+ * \param root [in] The parent proc entry on which new entry will be added.
+ * \param list [in] Array of proc entries to be added.
+ * \param data [in] The argument to be passed when entries read/write routines
+ * are called through /proc file.
+ *
+ * \retval 0 on success
+ * < 0 on error
+ */
+int lprocfs_add_vars(struct proc_dir_entry *root, struct lprocfs_vars *list,
+ void *data)
+{
+ if (root == NULL || list == NULL)
+ return -EINVAL;
+
+ while (list->name != NULL) {
+ struct proc_dir_entry *proc;
+ umode_t mode = 0;
+
+ if (list->proc_mode != 0000) {
+ mode = list->proc_mode;
+ } else if (list->fops) {
+ if (list->fops->read)
+ mode = 0444;
+ if (list->fops->write)
+ mode |= 0200;
+ }
+ proc = proc_create_data(list->name, mode, root,
+ list->fops ?: &lprocfs_generic_fops,
+ list->data ?: data);
+ if (proc == NULL)
+ return -ENOMEM;
+ list++;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_add_vars);
+
+void lprocfs_remove(struct proc_dir_entry **rooth)
+{
+ proc_remove(*rooth);
+ *rooth = NULL;
+}
+EXPORT_SYMBOL(lprocfs_remove);
+
+void lprocfs_remove_proc_entry(const char *name, struct proc_dir_entry *parent)
+{
+ LASSERT(parent != NULL);
+ remove_proc_entry(name, parent);
+}
+EXPORT_SYMBOL(lprocfs_remove_proc_entry);
+
+struct proc_dir_entry *lprocfs_register(const char *name,
+ struct proc_dir_entry *parent,
+ struct lprocfs_vars *list, void *data)
+{
+ struct proc_dir_entry *entry;
+
+ entry = proc_mkdir(name, parent);
+ if (entry == NULL)
+ GOTO(out, entry = ERR_PTR(-ENOMEM));
+
+ if (list != NULL) {
+ int rc = lprocfs_add_vars(entry, list, data);
+ if (rc != 0) {
+ lprocfs_remove(&entry);
+ entry = ERR_PTR(rc);
+ }
+ }
+out:
+ return entry;
+}
+EXPORT_SYMBOL(lprocfs_register);
+
+/* Generic callbacks */
+int lprocfs_rd_uint(struct seq_file *m, void *data)
+{
+ return seq_printf(m, "%u\n", *(unsigned int *)data);
+}
+EXPORT_SYMBOL(lprocfs_rd_uint);
+
+int lprocfs_wr_uint(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ unsigned *p = data;
+ char dummy[MAX_STRING_SIZE + 1], *end;
+ unsigned long tmp;
+
+ dummy[MAX_STRING_SIZE] = '\0';
+ if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+ return -EFAULT;
+
+ tmp = simple_strtoul(dummy, &end, 0);
+ if (dummy == end)
+ return -EINVAL;
+
+ *p = (unsigned int)tmp;
+ return count;
+}
+EXPORT_SYMBOL(lprocfs_wr_uint);
+
+int lprocfs_rd_u64(struct seq_file *m, void *data)
+{
+ return seq_printf(m, LPU64"\n", *(__u64 *)data);
+}
+EXPORT_SYMBOL(lprocfs_rd_u64);
+
+int lprocfs_rd_atomic(struct seq_file *m, void *data)
+{
+ atomic_t *atom = data;
+ LASSERT(atom != NULL);
+ return seq_printf(m, "%d\n", atomic_read(atom));
+}
+EXPORT_SYMBOL(lprocfs_rd_atomic);
+
+int lprocfs_wr_atomic(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ atomic_t *atm = data;
+ int val = 0;
+ int rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc < 0)
+ return rc;
+
+ if (val <= 0)
+ return -ERANGE;
+
+ atomic_set(atm, val);
+ return count;
+}
+EXPORT_SYMBOL(lprocfs_wr_atomic);
+
+int lprocfs_rd_uuid(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+
+ LASSERT(obd != NULL);
+ return seq_printf(m, "%s\n", obd->obd_uuid.uuid);
+}
+EXPORT_SYMBOL(lprocfs_rd_uuid);
+
+int lprocfs_rd_name(struct seq_file *m, void *data)
+{
+ struct obd_device *dev = data;
+
+ LASSERT(dev != NULL);
+ return seq_printf(m, "%s\n", dev->obd_name);
+}
+EXPORT_SYMBOL(lprocfs_rd_name);
+
+int lprocfs_rd_blksize(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ struct obd_statfs osfs;
+ int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc)
+ rc = seq_printf(m, "%u\n", osfs.os_bsize);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_rd_blksize);
+
+int lprocfs_rd_kbytestotal(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ struct obd_statfs osfs;
+ int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_blocks;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ rc = seq_printf(m, LPU64"\n", result);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_rd_kbytestotal);
+
+int lprocfs_rd_kbytesfree(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ struct obd_statfs osfs;
+ int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_bfree;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ rc = seq_printf(m, LPU64"\n", result);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_rd_kbytesfree);
+
+int lprocfs_rd_kbytesavail(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ struct obd_statfs osfs;
+ int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc) {
+ __u32 blk_size = osfs.os_bsize >> 10;
+ __u64 result = osfs.os_bavail;
+
+ while (blk_size >>= 1)
+ result <<= 1;
+
+ rc = seq_printf(m, LPU64"\n", result);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_rd_kbytesavail);
+
+int lprocfs_rd_filestotal(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ struct obd_statfs osfs;
+ int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc)
+ rc = seq_printf(m, LPU64"\n", osfs.os_files);
+
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_rd_filestotal);
+
+int lprocfs_rd_filesfree(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ struct obd_statfs osfs;
+ int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_NODELAY);
+ if (!rc)
+ rc = seq_printf(m, LPU64"\n", osfs.os_ffree);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_rd_filesfree);
+
+int lprocfs_rd_server_uuid(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ struct obd_import *imp;
+ char *imp_state_name = NULL;
+ int rc = 0;
+
+ LASSERT(obd != NULL);
+ LPROCFS_CLIMP_CHECK(obd);
+ imp = obd->u.cli.cl_import;
+ imp_state_name = ptlrpc_import_state_name(imp->imp_state);
+ rc = seq_printf(m, "%s\t%s%s\n", obd2cli_tgt(obd), imp_state_name,
+ imp->imp_deactive ? "\tDEACTIVATED" : "");
+
+ LPROCFS_CLIMP_EXIT(obd);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_rd_server_uuid);
+
+int lprocfs_rd_conn_uuid(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ struct ptlrpc_connection *conn;
+ int rc = 0;
+
+ LASSERT(obd != NULL);
+
+ LPROCFS_CLIMP_CHECK(obd);
+ conn = obd->u.cli.cl_import->imp_connection;
+ if (conn && obd->u.cli.cl_import)
+ rc = seq_printf(m, "%s\n", conn->c_remote_uuid.uuid);
+ else
+ rc = seq_printf(m, "%s\n", "<none>");
+
+ LPROCFS_CLIMP_EXIT(obd);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_rd_conn_uuid);
+
+/** add up per-cpu counters */
+void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
+ struct lprocfs_counter *cnt)
+{
+ unsigned int num_entry;
+ struct lprocfs_counter *percpu_cntr;
+ struct lprocfs_counter_header *cntr_header;
+ int i;
+ unsigned long flags = 0;
+
+ memset(cnt, 0, sizeof(*cnt));
+
+ if (stats == NULL) {
+ /* set count to 1 to avoid divide-by-zero errs in callers */
+ cnt->lc_count = 1;
+ return;
+ }
+
+ cnt->lc_min = LC_MIN_INIT;
+
+ num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
+
+ for (i = 0; i < num_entry; i++) {
+ if (stats->ls_percpu[i] == NULL)
+ continue;
+ cntr_header = &stats->ls_cnt_header[idx];
+ percpu_cntr = lprocfs_stats_counter_get(stats, i, idx);
+
+ cnt->lc_count += percpu_cntr->lc_count;
+ cnt->lc_sum += percpu_cntr->lc_sum;
+ if (percpu_cntr->lc_min < cnt->lc_min)
+ cnt->lc_min = percpu_cntr->lc_min;
+ if (percpu_cntr->lc_max > cnt->lc_max)
+ cnt->lc_max = percpu_cntr->lc_max;
+ cnt->lc_sumsquare += percpu_cntr->lc_sumsquare;
+ }
+
+ lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
+}
+EXPORT_SYMBOL(lprocfs_stats_collect);
+
+/**
+ * Append a space separated list of current set flags to str.
+ */
+#define flag2str(flag, first) \
+ do { \
+ if (imp->imp_##flag) \
+ seq_printf(m, "%s" #flag, first ? "" : ", "); \
+ } while (0)
+static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m)
+{
+ bool first = true;
+
+ if (imp->imp_obd->obd_no_recov) {
+ seq_printf(m, "no_recov");
+ first = false;
+ }
+
+ flag2str(invalid, first);
+ first = false;
+ flag2str(deactive, first);
+ flag2str(replayable, first);
+ flag2str(pingable, first);
+ return 0;
+}
+#undef flags2str
+
+static const char *obd_connect_names[] = {
+ "read_only",
+ "lov_index",
+ "unused",
+ "write_grant",
+ "server_lock",
+ "version",
+ "request_portal",
+ "acl",
+ "xattr",
+ "create_on_write",
+ "truncate_lock",
+ "initial_transno",
+ "inode_bit_locks",
+ "join_file(obsolete)",
+ "getattr_by_fid",
+ "no_oh_for_devices",
+ "remote_client",
+ "remote_client_by_force",
+ "max_byte_per_rpc",
+ "64bit_qdata",
+ "mds_capability",
+ "oss_capability",
+ "early_lock_cancel",
+ "som",
+ "adaptive_timeouts",
+ "lru_resize",
+ "mds_mds_connection",
+ "real_conn",
+ "change_qunit_size",
+ "alt_checksum_algorithm",
+ "fid_is_enabled",
+ "version_recovery",
+ "pools",
+ "grant_shrink",
+ "skip_orphan",
+ "large_ea",
+ "full20",
+ "layout_lock",
+ "64bithash",
+ "object_max_bytes",
+ "imp_recov",
+ "jobstats",
+ "umask",
+ "einprogress",
+ "grant_param",
+ "flock_owner",
+ "lvb_type",
+ "nanoseconds_times",
+ "lightweight_conn",
+ "short_io",
+ "pingless",
+ "unknown",
+ NULL
+};
+
+static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep)
+{
+ __u64 mask = 1;
+ int i;
+ bool first = true;
+
+ for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
+ if (flags & mask) {
+ seq_printf(m, "%s%s",
+ first ? sep : "", obd_connect_names[i]);
+ first = false;
+ }
+ }
+ if (flags & ~(mask - 1))
+ seq_printf(m, "%sunknown flags "LPX64,
+ first ? sep : "", flags & ~(mask - 1));
+}
+
+int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
+{
+ __u64 mask = 1;
+ int i, ret = 0;
+
+ for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
+ if (flags & mask)
+ ret += snprintf(page + ret, count - ret, "%s%s",
+ ret ? sep : "", obd_connect_names[i]);
+ }
+ if (flags & ~(mask - 1))
+ ret += snprintf(page + ret, count - ret,
+ "%sunknown flags "LPX64,
+ ret ? sep : "", flags & ~(mask - 1));
+ return ret;
+}
+EXPORT_SYMBOL(obd_connect_flags2str);
+
+int lprocfs_rd_import(struct seq_file *m, void *data)
+{
+ struct lprocfs_counter ret;
+ struct lprocfs_counter_header *header;
+ struct obd_device *obd = (struct obd_device *)data;
+ struct obd_import *imp;
+ struct obd_import_conn *conn;
+ int j;
+ int k;
+ int rw = 0;
+
+ LASSERT(obd != NULL);
+ LPROCFS_CLIMP_CHECK(obd);
+ imp = obd->u.cli.cl_import;
+
+ seq_printf(m,
+ "import:\n"
+ " name: %s\n"
+ " target: %s\n"
+ " state: %s\n"
+ " instance: %u\n"
+ " connect_flags: [",
+ obd->obd_name,
+ obd2cli_tgt(obd),
+ ptlrpc_import_state_name(imp->imp_state),
+ imp->imp_connect_data.ocd_instance);
+ obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, ", ");
+ seq_printf(m,
+ "]\n"
+ " import_flags: [");
+ obd_import_flags2str(imp, m);
+
+ seq_printf(m,
+ "]\n"
+ " connection:\n"
+ " failover_nids: [");
+ spin_lock(&imp->imp_lock);
+ j = 0;
+ list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
+ seq_printf(m, "%s%s", j ? ", " : "",
+ libcfs_nid2str(conn->oic_conn->c_peer.nid));
+ j++;
+ }
+ seq_printf(m,
+ "]\n"
+ " current_connection: %s\n"
+ " connection_attempts: %u\n"
+ " generation: %u\n"
+ " in-progress_invalidations: %u\n",
+ imp->imp_connection == NULL ? "<none>" :
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ imp->imp_conn_cnt,
+ imp->imp_generation,
+ atomic_read(&imp->imp_inval_count));
+ spin_unlock(&imp->imp_lock);
+
+ if (obd->obd_svc_stats == NULL)
+ goto out_climp;
+
+ header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR];
+ lprocfs_stats_collect(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, &ret);
+ if (ret.lc_count != 0) {
+ /* first argument to do_div MUST be __u64 */
+ __u64 sum = ret.lc_sum;
+ do_div(sum, ret.lc_count);
+ ret.lc_sum = sum;
+ } else
+ ret.lc_sum = 0;
+ seq_printf(m,
+ " rpcs:\n"
+ " inflight: %u\n"
+ " unregistering: %u\n"
+ " timeouts: %u\n"
+ " avg_waittime: "LPU64" %s\n",
+ atomic_read(&imp->imp_inflight),
+ atomic_read(&imp->imp_unregistering),
+ atomic_read(&imp->imp_timeouts),
+ ret.lc_sum, header->lc_units);
+
+ k = 0;
+ for(j = 0; j < IMP_AT_MAX_PORTALS; j++) {
+ if (imp->imp_at.iat_portal[j] == 0)
+ break;
+ k = max_t(unsigned int, k,
+ at_get(&imp->imp_at.iat_service_estimate[j]));
+ }
+ seq_printf(m,
+ " service_estimates:\n"
+ " services: %u sec\n"
+ " network: %u sec\n",
+ k,
+ at_get(&imp->imp_at.iat_net_latency));
+
+ seq_printf(m,
+ " transactions:\n"
+ " last_replay: "LPU64"\n"
+ " peer_committed: "LPU64"\n"
+ " last_checked: "LPU64"\n",
+ imp->imp_last_replay_transno,
+ imp->imp_peer_committed_transno,
+ imp->imp_last_transno_checked);
+
+ /* avg data rates */
+ for (rw = 0; rw <= 1; rw++) {
+ lprocfs_stats_collect(obd->obd_svc_stats,
+ PTLRPC_LAST_CNTR + BRW_READ_BYTES + rw,
+ &ret);
+ if (ret.lc_sum > 0 && ret.lc_count > 0) {
+ /* first argument to do_div MUST be __u64 */
+ __u64 sum = ret.lc_sum;
+ do_div(sum, ret.lc_count);
+ ret.lc_sum = sum;
+ seq_printf(m,
+ " %s_data_averages:\n"
+ " bytes_per_rpc: "LPU64"\n",
+ rw ? "write" : "read",
+ ret.lc_sum);
+ }
+ k = (int)ret.lc_sum;
+ j = opcode_offset(OST_READ + rw) + EXTRA_MAX_OPCODES;
+ header = &obd->obd_svc_stats->ls_cnt_header[j];
+ lprocfs_stats_collect(obd->obd_svc_stats, j, &ret);
+ if (ret.lc_sum > 0 && ret.lc_count != 0) {
+ /* first argument to do_div MUST be __u64 */
+ __u64 sum = ret.lc_sum;
+ do_div(sum, ret.lc_count);
+ ret.lc_sum = sum;
+ seq_printf(m,
+ " %s_per_rpc: "LPU64"\n",
+ header->lc_units, ret.lc_sum);
+ j = (int)ret.lc_sum;
+ if (j > 0)
+ seq_printf(m,
+ " MB_per_sec: %u.%.02u\n",
+ k / j, (100 * k / j) % 100);
+ }
+ }
+
+out_climp:
+ LPROCFS_CLIMP_EXIT(obd);
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_rd_import);
+
+int lprocfs_rd_state(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = (struct obd_device *)data;
+ struct obd_import *imp;
+ int j, k;
+
+ LASSERT(obd != NULL);
+ LPROCFS_CLIMP_CHECK(obd);
+ imp = obd->u.cli.cl_import;
+
+ seq_printf(m, "current_state: %s\n",
+ ptlrpc_import_state_name(imp->imp_state));
+ seq_printf(m, "state_history:\n");
+ k = imp->imp_state_hist_idx;
+ for (j = 0; j < IMP_STATE_HIST_LEN; j++) {
+ struct import_state_hist *ish =
+ &imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN];
+ if (ish->ish_state == 0)
+ continue;
+ seq_printf(m, " - ["CFS_TIME_T", %s]\n",
+ ish->ish_time,
+ ptlrpc_import_state_name(ish->ish_state));
+ }
+
+ LPROCFS_CLIMP_EXIT(obd);
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_rd_state);
+
+int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at)
+{
+ int i;
+ for (i = 0; i < AT_BINS; i++)
+ seq_printf(m, "%3u ", at->at_hist[i]);
+ seq_printf(m, "\n");
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_at_hist_helper);
+
+/* See also ptlrpc_lprocfs_rd_timeouts */
+int lprocfs_rd_timeouts(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = (struct obd_device *)data;
+ struct obd_import *imp;
+ unsigned int cur, worst;
+ time_t now, worstt;
+ struct dhms ts;
+ int i;
+
+ LASSERT(obd != NULL);
+ LPROCFS_CLIMP_CHECK(obd);
+ imp = obd->u.cli.cl_import;
+
+ now = cfs_time_current_sec();
+
+ /* Some network health info for kicks */
+ s2dhms(&ts, now - imp->imp_last_reply_time);
+ seq_printf(m, "%-10s : %ld, "DHMS_FMT" ago\n",
+ "last reply", imp->imp_last_reply_time, DHMS_VARS(&ts));
+
+ cur = at_get(&imp->imp_at.iat_net_latency);
+ worst = imp->imp_at.iat_net_latency.at_worst_ever;
+ worstt = imp->imp_at.iat_net_latency.at_worst_time;
+ s2dhms(&ts, now - worstt);
+ seq_printf(m, "%-10s : cur %3u worst %3u (at %ld, "DHMS_FMT" ago) ",
+ "network", cur, worst, worstt, DHMS_VARS(&ts));
+ lprocfs_at_hist_helper(m, &imp->imp_at.iat_net_latency);
+
+ for(i = 0; i < IMP_AT_MAX_PORTALS; i++) {
+ if (imp->imp_at.iat_portal[i] == 0)
+ break;
+ cur = at_get(&imp->imp_at.iat_service_estimate[i]);
+ worst = imp->imp_at.iat_service_estimate[i].at_worst_ever;
+ worstt = imp->imp_at.iat_service_estimate[i].at_worst_time;
+ s2dhms(&ts, now - worstt);
+ seq_printf(m, "portal %-2d : cur %3u worst %3u (at %ld, "
+ DHMS_FMT" ago) ", imp->imp_at.iat_portal[i],
+ cur, worst, worstt, DHMS_VARS(&ts));
+ lprocfs_at_hist_helper(m, &imp->imp_at.iat_service_estimate[i]);
+ }
+
+ LPROCFS_CLIMP_EXIT(obd);
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_rd_timeouts);
+
+int lprocfs_rd_connect_flags(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+ __u64 flags;
+
+ LPROCFS_CLIMP_CHECK(obd);
+ flags = obd->u.cli.cl_import->imp_connect_data.ocd_connect_flags;
+ seq_printf(m, "flags="LPX64"\n", flags);
+ obd_connect_seq_flags2str(m, flags, "\n");
+ seq_printf(m, "\n");
+ LPROCFS_CLIMP_EXIT(obd);
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_rd_connect_flags);
+
+int lprocfs_rd_num_exports(struct seq_file *m, void *data)
+{
+ struct obd_device *obd = data;
+
+ LASSERT(obd != NULL);
+ return seq_printf(m, "%u\n", obd->obd_num_exports);
+}
+EXPORT_SYMBOL(lprocfs_rd_num_exports);
+
+int lprocfs_rd_numrefs(struct seq_file *m, void *data)
+{
+ struct obd_type *class = (struct obd_type*) data;
+
+ LASSERT(class != NULL);
+ return seq_printf(m, "%d\n", class->typ_refcnt);
+}
+EXPORT_SYMBOL(lprocfs_rd_numrefs);
+
+int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list)
+{
+ int rc = 0;
+
+ LASSERT(obd != NULL);
+ LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
+ LASSERT(obd->obd_type->typ_procroot != NULL);
+
+ obd->obd_proc_entry = lprocfs_register(obd->obd_name,
+ obd->obd_type->typ_procroot,
+ list, obd);
+ if (IS_ERR(obd->obd_proc_entry)) {
+ rc = PTR_ERR(obd->obd_proc_entry);
+ CERROR("error %d setting up lprocfs for %s\n",rc,obd->obd_name);
+ obd->obd_proc_entry = NULL;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_obd_setup);
+
+int lprocfs_obd_cleanup(struct obd_device *obd)
+{
+ if (!obd)
+ return -EINVAL;
+ if (obd->obd_proc_exports_entry) {
+ /* Should be no exports left */
+ lprocfs_remove(&obd->obd_proc_exports_entry);
+ obd->obd_proc_exports_entry = NULL;
+ }
+ if (obd->obd_proc_entry) {
+ lprocfs_remove(&obd->obd_proc_entry);
+ obd->obd_proc_entry = NULL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_obd_cleanup);
+
+static void lprocfs_free_client_stats(struct nid_stat *client_stat)
+{
+ CDEBUG(D_CONFIG, "stat %p - data %p/%p\n", client_stat,
+ client_stat->nid_proc, client_stat->nid_stats);
+
+ LASSERTF(atomic_read(&client_stat->nid_exp_ref_count) == 0,
+ "nid %s:count %d\n", libcfs_nid2str(client_stat->nid),
+ atomic_read(&client_stat->nid_exp_ref_count));
+
+ if (client_stat->nid_proc)
+ lprocfs_remove(&client_stat->nid_proc);
+
+ if (client_stat->nid_stats)
+ lprocfs_free_stats(&client_stat->nid_stats);
+
+ if (client_stat->nid_ldlm_stats)
+ lprocfs_free_stats(&client_stat->nid_ldlm_stats);
+
+ OBD_FREE_PTR(client_stat);
+ return;
+
+}
+
+void lprocfs_free_per_client_stats(struct obd_device *obd)
+{
+ cfs_hash_t *hash = obd->obd_nid_stats_hash;
+ struct nid_stat *stat;
+
+ /* we need extra list - because hash_exit called to early */
+ /* not need locking because all clients is died */
+ while (!list_empty(&obd->obd_nid_stats)) {
+ stat = list_entry(obd->obd_nid_stats.next,
+ struct nid_stat, nid_list);
+ list_del_init(&stat->nid_list);
+ cfs_hash_del(hash, &stat->nid, &stat->nid_hash);
+ lprocfs_free_client_stats(stat);
+ }
+}
+EXPORT_SYMBOL(lprocfs_free_per_client_stats);
+
+struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
+ enum lprocfs_stats_flags flags)
+{
+ struct lprocfs_stats *stats;
+ unsigned int num_entry;
+ unsigned int percpusize = 0;
+ int i;
+
+ if (num == 0)
+ return NULL;
+
+ if (lprocfs_no_percpu_stats != 0)
+ flags |= LPROCFS_STATS_FLAG_NOPERCPU;
+
+ if (flags & LPROCFS_STATS_FLAG_NOPERCPU)
+ num_entry = 1;
+ else
+ num_entry = num_possible_cpus();
+
+ /* alloc percpu pointers for all possible cpu slots */
+ LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
+ if (stats == NULL)
+ return NULL;
+
+ stats->ls_num = num;
+ stats->ls_flags = flags;
+ spin_lock_init(&stats->ls_lock);
+
+ /* alloc num of counter headers */
+ LIBCFS_ALLOC(stats->ls_cnt_header,
+ stats->ls_num * sizeof(struct lprocfs_counter_header));
+ if (stats->ls_cnt_header == NULL)
+ goto fail;
+
+ if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) {
+ /* contains only one set counters */
+ percpusize = lprocfs_stats_counter_size(stats);
+ LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize);
+ if (stats->ls_percpu[0] == NULL)
+ goto fail;
+ stats->ls_biggest_alloc_num = 1;
+ } else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) {
+ /* alloc all percpu data, currently only obd_memory use this */
+ for (i = 0; i < num_entry; ++i)
+ if (lprocfs_stats_alloc_one(stats, i) < 0)
+ goto fail;
+ }
+
+ return stats;
+
+fail:
+ lprocfs_free_stats(&stats);
+ return NULL;
+}
+EXPORT_SYMBOL(lprocfs_alloc_stats);
+
+void lprocfs_free_stats(struct lprocfs_stats **statsh)
+{
+ struct lprocfs_stats *stats = *statsh;
+ unsigned int num_entry;
+ unsigned int percpusize;
+ unsigned int i;
+
+ if (stats == NULL || stats->ls_num == 0)
+ return;
+ *statsh = NULL;
+
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
+ num_entry = 1;
+ else
+ num_entry = num_possible_cpus();
+
+ percpusize = lprocfs_stats_counter_size(stats);
+ for (i = 0; i < num_entry; i++)
+ if (stats->ls_percpu[i] != NULL)
+ LIBCFS_FREE(stats->ls_percpu[i], percpusize);
+ if (stats->ls_cnt_header != NULL)
+ LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num *
+ sizeof(struct lprocfs_counter_header));
+ LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
+}
+EXPORT_SYMBOL(lprocfs_free_stats);
+
+void lprocfs_clear_stats(struct lprocfs_stats *stats)
+{
+ struct lprocfs_counter *percpu_cntr;
+ struct lprocfs_counter_header *header;
+ int i;
+ int j;
+ unsigned int num_entry;
+ unsigned long flags = 0;
+
+ num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
+
+ for (i = 0; i < num_entry; i++) {
+ if (stats->ls_percpu[i] == NULL)
+ continue;
+ for (j = 0; j < stats->ls_num; j++) {
+ header = &stats->ls_cnt_header[j];
+ percpu_cntr = lprocfs_stats_counter_get(stats, i, j);
+ percpu_cntr->lc_count = 0;
+ percpu_cntr->lc_min = LC_MIN_INIT;
+ percpu_cntr->lc_max = 0;
+ percpu_cntr->lc_sumsquare = 0;
+ percpu_cntr->lc_sum = 0;
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ percpu_cntr->lc_sum_irq = 0;
+ }
+ }
+
+ lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
+}
+EXPORT_SYMBOL(lprocfs_clear_stats);
+
+static ssize_t lprocfs_stats_seq_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct lprocfs_stats *stats = seq->private;
+
+ lprocfs_clear_stats(stats);
+
+ return len;
+}
+
+static void *lprocfs_stats_seq_start(struct seq_file *p, loff_t *pos)
+{
+ struct lprocfs_stats *stats = p->private;
+
+ return (*pos < stats->ls_num) ? pos : NULL;
+}
+
+static void lprocfs_stats_seq_stop(struct seq_file *p, void *v)
+{
+}
+
+static void *lprocfs_stats_seq_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return lprocfs_stats_seq_start(p, pos);
+}
+
+/* seq file export of one lprocfs counter */
+static int lprocfs_stats_seq_show(struct seq_file *p, void *v)
+{
+ struct lprocfs_stats *stats = p->private;
+ struct lprocfs_counter_header *hdr;
+ struct lprocfs_counter ctr;
+ int idx = *(loff_t *)v;
+ int rc = 0;
+
+ if (idx == 0) {
+ struct timeval now;
+ do_gettimeofday(&now);
+ rc = seq_printf(p, "%-25s %lu.%lu secs.usecs\n",
+ "snapshot_time", now.tv_sec, now.tv_usec);
+ if (rc < 0)
+ return rc;
+ }
+ hdr = &stats->ls_cnt_header[idx];
+ lprocfs_stats_collect(stats, idx, &ctr);
+
+ if (ctr.lc_count == 0)
+ goto out;
+
+ rc = seq_printf(p, "%-25s "LPD64" samples [%s]", hdr->lc_name,
+ ctr.lc_count, hdr->lc_units);
+
+ if (rc < 0)
+ goto out;
+
+ if ((hdr->lc_config & LPROCFS_CNTR_AVGMINMAX) && (ctr.lc_count > 0)) {
+ rc = seq_printf(p, " "LPD64" "LPD64" "LPD64,
+ ctr.lc_min, ctr.lc_max, ctr.lc_sum);
+ if (rc < 0)
+ goto out;
+ if (hdr->lc_config & LPROCFS_CNTR_STDDEV)
+ rc = seq_printf(p, " "LPD64, ctr.lc_sumsquare);
+ if (rc < 0)
+ goto out;
+ }
+ rc = seq_printf(p, "\n");
+out:
+ return (rc < 0) ? rc : 0;
+}
+
+struct seq_operations lprocfs_stats_seq_sops = {
+ .start = lprocfs_stats_seq_start,
+ .stop = lprocfs_stats_seq_stop,
+ .next = lprocfs_stats_seq_next,
+ .show = lprocfs_stats_seq_show,
+};
+
+static int lprocfs_stats_seq_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ int rc;
+
+ rc = seq_open(file, &lprocfs_stats_seq_sops);
+ if (rc)
+ return rc;
+ seq = file->private_data;
+ seq->private = PDE_DATA(inode);
+ return 0;
+}
+
+struct file_operations lprocfs_stats_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = lprocfs_stats_seq_open,
+ .read = seq_read,
+ .write = lprocfs_stats_seq_write,
+ .llseek = seq_lseek,
+ .release = lprocfs_seq_release,
+};
+
+int lprocfs_register_stats(struct proc_dir_entry *root, const char *name,
+ struct lprocfs_stats *stats)
+{
+ struct proc_dir_entry *entry;
+ LASSERT(root != NULL);
+
+ entry = proc_create_data(name, 0644, root,
+ &lprocfs_stats_seq_fops, stats);
+ if (entry == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_register_stats);
+
+void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
+ unsigned conf, const char *name, const char *units)
+{
+ struct lprocfs_counter_header *header;
+ struct lprocfs_counter *percpu_cntr;
+ unsigned long flags = 0;
+ unsigned int i;
+ unsigned int num_cpu;
+
+ LASSERT(stats != NULL);
+
+ header = &stats->ls_cnt_header[index];
+ LASSERTF(header != NULL, "Failed to allocate stats header:[%d]%s/%s\n",
+ index, name, units);
+
+ header->lc_config = conf;
+ header->lc_name = name;
+ header->lc_units = units;
+
+ num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
+ for (i = 0; i < num_cpu; ++i) {
+ if (stats->ls_percpu[i] == NULL)
+ continue;
+ percpu_cntr = lprocfs_stats_counter_get(stats, i, index);
+ percpu_cntr->lc_count = 0;
+ percpu_cntr->lc_min = LC_MIN_INIT;
+ percpu_cntr->lc_max = 0;
+ percpu_cntr->lc_sumsquare = 0;
+ percpu_cntr->lc_sum = 0;
+ if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
+ percpu_cntr->lc_sum_irq = 0;
+ }
+ lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
+}
+EXPORT_SYMBOL(lprocfs_counter_init);
+
+#define LPROCFS_OBD_OP_INIT(base, stats, op) \
+do { \
+ unsigned int coffset = base + OBD_COUNTER_OFFSET(op); \
+ LASSERT(coffset < stats->ls_num); \
+ lprocfs_counter_init(stats, coffset, 0, #op, "reqs"); \
+} while (0)
+
+void lprocfs_init_ops_stats(int num_private_stats, struct lprocfs_stats *stats)
+{
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, iocontrol);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, get_info);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, set_info_async);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, attach);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, detach);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, setup);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, precleanup);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, cleanup);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, process_config);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, postrecov);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, add_conn);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, del_conn);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, connect);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, reconnect);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, disconnect);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, fid_init);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, fid_fini);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, fid_alloc);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, statfs);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, statfs_async);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, packmd);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, unpackmd);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, preallocate);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, precreate);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, create);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, create_async);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, destroy);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, setattr);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, setattr_async);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, getattr);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, getattr_async);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, brw);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, merge_lvb);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, adjust_kms);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, punch);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, sync);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, migrate);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, copy);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, iterate);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, preprw);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, commitrw);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, enqueue);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, change_cbdata);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, find_cbdata);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, cancel);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, cancel_unused);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, init_export);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, destroy_export);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, extent_calc);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, llog_init);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, llog_connect);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, llog_finish);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, pin);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, unpin);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, import_event);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, notify);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, health_check);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, get_uuid);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, quotacheck);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, quotactl);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, ping);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, pool_new);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, pool_rem);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, pool_add);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, pool_del);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, getref);
+ LPROCFS_OBD_OP_INIT(num_private_stats, stats, putref);
+}
+EXPORT_SYMBOL(lprocfs_init_ops_stats);
+
+int lprocfs_alloc_obd_stats(struct obd_device *obd, unsigned num_private_stats)
+{
+ struct lprocfs_stats *stats;
+ unsigned int num_stats;
+ int rc, i;
+
+ LASSERT(obd->obd_stats == NULL);
+ LASSERT(obd->obd_proc_entry != NULL);
+ LASSERT(obd->obd_cntr_base == 0);
+
+ num_stats = ((int)sizeof(*obd->obd_type->typ_dt_ops) / sizeof(void *)) +
+ num_private_stats - 1 /* o_owner */;
+ stats = lprocfs_alloc_stats(num_stats, 0);
+ if (stats == NULL)
+ return -ENOMEM;
+
+ lprocfs_init_ops_stats(num_private_stats, stats);
+
+ for (i = num_private_stats; i < num_stats; i++) {
+ /* If this LBUGs, it is likely that an obd
+ * operation was added to struct obd_ops in
+ * <obd.h>, and that the corresponding line item
+ * LPROCFS_OBD_OP_INIT(.., .., opname)
+ * is missing from the list above. */
+ LASSERTF(stats->ls_cnt_header[i].lc_name != NULL,
+ "Missing obd_stat initializer obd_op "
+ "operation at offset %d.\n", i - num_private_stats);
+ }
+ rc = lprocfs_register_stats(obd->obd_proc_entry, "stats", stats);
+ if (rc < 0) {
+ lprocfs_free_stats(&stats);
+ } else {
+ obd->obd_stats = stats;
+ obd->obd_cntr_base = num_private_stats;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_alloc_obd_stats);
+
+void lprocfs_free_obd_stats(struct obd_device *obd)
+{
+ if (obd->obd_stats)
+ lprocfs_free_stats(&obd->obd_stats);
+}
+EXPORT_SYMBOL(lprocfs_free_obd_stats);
+
+#define LPROCFS_MD_OP_INIT(base, stats, op) \
+do { \
+ unsigned int coffset = base + MD_COUNTER_OFFSET(op); \
+ LASSERT(coffset < stats->ls_num); \
+ lprocfs_counter_init(stats, coffset, 0, #op, "reqs"); \
+} while (0)
+
+void lprocfs_init_mps_stats(int num_private_stats, struct lprocfs_stats *stats)
+{
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, getstatus);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, null_inode);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, find_cbdata);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, close);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, create);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, done_writing);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, enqueue);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, getattr);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, getattr_name);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, intent_lock);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, link);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, rename);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, is_subdir);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, setattr);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, sync);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, readpage);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, unlink);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, setxattr);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, getxattr);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, init_ea_size);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, get_lustre_md);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, free_lustre_md);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, set_open_replay_data);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, clear_open_replay_data);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, set_lock_data);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, lock_match);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, cancel_unused);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, renew_capa);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, unpack_capa);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, get_remote_perm);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, intent_getattr_async);
+ LPROCFS_MD_OP_INIT(num_private_stats, stats, revalidate_lock);
+}
+EXPORT_SYMBOL(lprocfs_init_mps_stats);
+
+int lprocfs_alloc_md_stats(struct obd_device *obd,
+ unsigned num_private_stats)
+{
+ struct lprocfs_stats *stats;
+ unsigned int num_stats;
+ int rc, i;
+
+ LASSERT(obd->md_stats == NULL);
+ LASSERT(obd->obd_proc_entry != NULL);
+ LASSERT(obd->md_cntr_base == 0);
+
+ num_stats = 1 + MD_COUNTER_OFFSET(revalidate_lock) +
+ num_private_stats;
+ stats = lprocfs_alloc_stats(num_stats, 0);
+ if (stats == NULL)
+ return -ENOMEM;
+
+ lprocfs_init_mps_stats(num_private_stats, stats);
+
+ for (i = num_private_stats; i < num_stats; i++) {
+ if (stats->ls_cnt_header[i].lc_name == NULL) {
+ CERROR("Missing md_stat initializer md_op "
+ "operation at offset %d. Aborting.\n",
+ i - num_private_stats);
+ LBUG();
+ }
+ }
+ rc = lprocfs_register_stats(obd->obd_proc_entry, "md_stats", stats);
+ if (rc < 0) {
+ lprocfs_free_stats(&stats);
+ } else {
+ obd->md_stats = stats;
+ obd->md_cntr_base = num_private_stats;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_alloc_md_stats);
+
+void lprocfs_free_md_stats(struct obd_device *obd)
+{
+ struct lprocfs_stats *stats = obd->md_stats;
+
+ if (stats != NULL) {
+ obd->md_stats = NULL;
+ obd->md_cntr_base = 0;
+ lprocfs_free_stats(&stats);
+ }
+}
+EXPORT_SYMBOL(lprocfs_free_md_stats);
+
+void lprocfs_init_ldlm_stats(struct lprocfs_stats *ldlm_stats)
+{
+ lprocfs_counter_init(ldlm_stats,
+ LDLM_ENQUEUE - LDLM_FIRST_OPC,
+ 0, "ldlm_enqueue", "reqs");
+ lprocfs_counter_init(ldlm_stats,
+ LDLM_CONVERT - LDLM_FIRST_OPC,
+ 0, "ldlm_convert", "reqs");
+ lprocfs_counter_init(ldlm_stats,
+ LDLM_CANCEL - LDLM_FIRST_OPC,
+ 0, "ldlm_cancel", "reqs");
+ lprocfs_counter_init(ldlm_stats,
+ LDLM_BL_CALLBACK - LDLM_FIRST_OPC,
+ 0, "ldlm_bl_callback", "reqs");
+ lprocfs_counter_init(ldlm_stats,
+ LDLM_CP_CALLBACK - LDLM_FIRST_OPC,
+ 0, "ldlm_cp_callback", "reqs");
+ lprocfs_counter_init(ldlm_stats,
+ LDLM_GL_CALLBACK - LDLM_FIRST_OPC,
+ 0, "ldlm_gl_callback", "reqs");
+}
+EXPORT_SYMBOL(lprocfs_init_ldlm_stats);
+
+int lprocfs_exp_print_uuid(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *data)
+
+{
+ struct obd_export *exp = cfs_hash_object(hs, hnode);
+ struct seq_file *m = (struct seq_file *)data;
+
+ if (exp->exp_nid_stats)
+ seq_printf(m, "%s\n", obd_uuid2str(&exp->exp_client_uuid));
+
+ return 0;
+}
+
+static int
+lproc_exp_uuid_seq_show(struct seq_file *m, void *unused)
+{
+ struct nid_stat *stats = (struct nid_stat *)m->private;
+ struct obd_device *obd = stats->nid_obd;
+
+ cfs_hash_for_each_key(obd->obd_nid_hash, &stats->nid,
+ lprocfs_exp_print_uuid, m);
+ return 0;
+}
+
+LPROC_SEQ_FOPS_RO(lproc_exp_uuid);
+
+struct exp_hash_cb_data {
+ struct seq_file *m;
+ bool first;
+};
+
+int lprocfs_exp_print_hash(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *cb_data)
+
+{
+ struct exp_hash_cb_data *data = (struct exp_hash_cb_data *)cb_data;
+ struct obd_export *exp = cfs_hash_object(hs, hnode);
+
+ if (exp->exp_lock_hash != NULL) {
+ if (data->first) {
+ cfs_hash_debug_header(data->m);
+ data->first = false;
+ }
+ cfs_hash_debug_str(hs, data->m);
+ }
+
+ return 0;
+}
+
+static int
+lproc_exp_hash_seq_show(struct seq_file *m, void *unused)
+{
+ struct nid_stat *stats = (struct nid_stat *)m->private;
+ struct obd_device *obd = stats->nid_obd;
+ struct exp_hash_cb_data cb_data = {m, true};
+
+ cfs_hash_for_each_key(obd->obd_nid_hash, &stats->nid,
+ lprocfs_exp_print_hash, &cb_data);
+ return 0;
+}
+
+LPROC_SEQ_FOPS_RO(lproc_exp_hash);
+
+int lprocfs_nid_stats_clear_read(struct seq_file *m, void *data)
+{
+ return seq_printf(m, "%s\n",
+ "Write into this file to clear all nid stats and "
+ "stale nid entries");
+}
+EXPORT_SYMBOL(lprocfs_nid_stats_clear_read);
+
+static int lprocfs_nid_stats_clear_write_cb(void *obj, void *data)
+{
+ struct nid_stat *stat = obj;
+
+ CDEBUG(D_INFO,"refcnt %d\n", atomic_read(&stat->nid_exp_ref_count));
+ if (atomic_read(&stat->nid_exp_ref_count) == 1) {
+ /* object has only hash references. */
+ spin_lock(&stat->nid_obd->obd_nid_lock);
+ list_move(&stat->nid_list, data);
+ spin_unlock(&stat->nid_obd->obd_nid_lock);
+ return 1;
+ }
+ /* we has reference to object - only clear data*/
+ if (stat->nid_stats)
+ lprocfs_clear_stats(stat->nid_stats);
+
+ return 0;
+}
+
+int lprocfs_nid_stats_clear_write(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ struct obd_device *obd = (struct obd_device *)data;
+ struct nid_stat *client_stat;
+ LIST_HEAD(free_list);
+
+ cfs_hash_cond_del(obd->obd_nid_stats_hash,
+ lprocfs_nid_stats_clear_write_cb, &free_list);
+
+ while (!list_empty(&free_list)) {
+ client_stat = list_entry(free_list.next, struct nid_stat,
+ nid_list);
+ list_del_init(&client_stat->nid_list);
+ lprocfs_free_client_stats(client_stat);
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(lprocfs_nid_stats_clear_write);
+
+int lprocfs_exp_setup(struct obd_export *exp, lnet_nid_t *nid, int *newnid)
+{
+ struct nid_stat *new_stat, *old_stat;
+ struct obd_device *obd = NULL;
+ struct proc_dir_entry *entry;
+ char *buffer = NULL;
+ int rc = 0;
+
+ *newnid = 0;
+
+ if (!exp || !exp->exp_obd || !exp->exp_obd->obd_proc_exports_entry ||
+ !exp->exp_obd->obd_nid_stats_hash)
+ return -EINVAL;
+
+ /* not test against zero because eric say:
+ * You may only test nid against another nid, or LNET_NID_ANY.
+ * Anything else is nonsense.*/
+ if (!nid || *nid == LNET_NID_ANY)
+ return 0;
+
+ obd = exp->exp_obd;
+
+ CDEBUG(D_CONFIG, "using hash %p\n", obd->obd_nid_stats_hash);
+
+ OBD_ALLOC_PTR(new_stat);
+ if (new_stat == NULL)
+ return -ENOMEM;
+
+ new_stat->nid = *nid;
+ new_stat->nid_obd = exp->exp_obd;
+ /* we need set default refcount to 1 to balance obd_disconnect */
+ atomic_set(&new_stat->nid_exp_ref_count, 1);
+
+ old_stat = cfs_hash_findadd_unique(obd->obd_nid_stats_hash,
+ nid, &new_stat->nid_hash);
+ CDEBUG(D_INFO, "Found stats %p for nid %s - ref %d\n",
+ old_stat, libcfs_nid2str(*nid),
+ atomic_read(&new_stat->nid_exp_ref_count));
+
+ /* We need to release old stats because lprocfs_exp_cleanup() hasn't
+ * been and will never be called. */
+ if (exp->exp_nid_stats) {
+ nidstat_putref(exp->exp_nid_stats);
+ exp->exp_nid_stats = NULL;
+ }
+
+ /* Return -EALREADY here so that we know that the /proc
+ * entry already has been created */
+ if (old_stat != new_stat) {
+ exp->exp_nid_stats = old_stat;
+ GOTO(destroy_new, rc = -EALREADY);
+ }
+ /* not found - create */
+ OBD_ALLOC(buffer, LNET_NIDSTR_SIZE);
+ if (buffer == NULL)
+ GOTO(destroy_new, rc = -ENOMEM);
+
+ memcpy(buffer, libcfs_nid2str(*nid), LNET_NIDSTR_SIZE);
+ new_stat->nid_proc = lprocfs_register(buffer,
+ obd->obd_proc_exports_entry,
+ NULL, NULL);
+ OBD_FREE(buffer, LNET_NIDSTR_SIZE);
+
+ if (IS_ERR(new_stat->nid_proc)) {
+ CERROR("Error making export directory for nid %s\n",
+ libcfs_nid2str(*nid));
+ rc = PTR_ERR(new_stat->nid_proc);
+ new_stat->nid_proc = NULL;
+ GOTO(destroy_new_ns, rc);
+ }
+
+ entry = lprocfs_add_simple(new_stat->nid_proc, "uuid",
+ new_stat, &lproc_exp_uuid_fops);
+ if (IS_ERR(entry)) {
+ CWARN("Error adding the NID stats file\n");
+ rc = PTR_ERR(entry);
+ GOTO(destroy_new_ns, rc);
+ }
+
+ entry = lprocfs_add_simple(new_stat->nid_proc, "hash",
+ new_stat, &lproc_exp_hash_fops);
+ if (IS_ERR(entry)) {
+ CWARN("Error adding the hash file\n");
+ rc = PTR_ERR(entry);
+ GOTO(destroy_new_ns, rc);
+ }
+
+ exp->exp_nid_stats = new_stat;
+ *newnid = 1;
+ /* protect competitive add to list, not need locking on destroy */
+ spin_lock(&obd->obd_nid_lock);
+ list_add(&new_stat->nid_list, &obd->obd_nid_stats);
+ spin_unlock(&obd->obd_nid_lock);
+
+ return rc;
+
+destroy_new_ns:
+ if (new_stat->nid_proc != NULL)
+ lprocfs_remove(&new_stat->nid_proc);
+ cfs_hash_del(obd->obd_nid_stats_hash, nid, &new_stat->nid_hash);
+
+destroy_new:
+ nidstat_putref(new_stat);
+ OBD_FREE_PTR(new_stat);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_exp_setup);
+
+int lprocfs_exp_cleanup(struct obd_export *exp)
+{
+ struct nid_stat *stat = exp->exp_nid_stats;
+
+ if(!stat || !exp->exp_obd)
+ return 0;
+
+ nidstat_putref(exp->exp_nid_stats);
+ exp->exp_nid_stats = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_exp_cleanup);
+
+int lprocfs_write_helper(const char *buffer, unsigned long count,
+ int *val)
+{
+ return lprocfs_write_frac_helper(buffer, count, val, 1);
+}
+EXPORT_SYMBOL(lprocfs_write_helper);
+
+int lprocfs_write_frac_helper(const char *buffer, unsigned long count,
+ int *val, int mult)
+{
+ char kernbuf[20], *end, *pbuf;
+
+ if (count > (sizeof(kernbuf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(kernbuf, buffer, count))
+ return -EFAULT;
+
+ kernbuf[count] = '\0';
+ pbuf = kernbuf;
+ if (*pbuf == '-') {
+ mult = -mult;
+ pbuf++;
+ }
+
+ *val = (int)simple_strtoul(pbuf, &end, 10) * mult;
+ if (pbuf == end)
+ return -EINVAL;
+
+ if (end != NULL && *end == '.') {
+ int temp_val, pow = 1;
+ int i;
+
+ pbuf = end + 1;
+ if (strlen(pbuf) > 5)
+ pbuf[5] = '\0'; /*only allow 5bits fractional*/
+
+ temp_val = (int)simple_strtoul(pbuf, &end, 10) * mult;
+
+ if (pbuf < end) {
+ for (i = 0; i < (end - pbuf); i++)
+ pow *= 10;
+
+ *val += temp_val / pow;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_write_frac_helper);
+
+int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
+ int mult)
+{
+ long decimal_val, frac_val;
+ int prtn;
+
+ if (count < 10)
+ return -EINVAL;
+
+ decimal_val = val / mult;
+ prtn = snprintf(buffer, count, "%ld", decimal_val);
+ frac_val = val % mult;
+
+ if (prtn < (count - 4) && frac_val > 0) {
+ long temp_frac;
+ int i, temp_mult = 1, frac_bits = 0;
+
+ temp_frac = frac_val * 10;
+ buffer[prtn++] = '.';
+ while (frac_bits < 2 && (temp_frac / mult) < 1 ) {
+ /* only reserved 2 bits fraction */
+ buffer[prtn++] ='0';
+ temp_frac *= 10;
+ frac_bits++;
+ }
+ /*
+ * Need to think these cases :
+ * 1. #echo x.00 > /proc/xxx output result : x
+ * 2. #echo x.0x > /proc/xxx output result : x.0x
+ * 3. #echo x.x0 > /proc/xxx output result : x.x
+ * 4. #echo x.xx > /proc/xxx output result : x.xx
+ * Only reserved 2 bits fraction.
+ */
+ for (i = 0; i < (5 - prtn); i++)
+ temp_mult *= 10;
+
+ frac_bits = min((int)count - prtn, 3 - frac_bits);
+ prtn += snprintf(buffer + prtn, frac_bits, "%ld",
+ frac_val * temp_mult / mult);
+
+ prtn--;
+ while(buffer[prtn] < '1' || buffer[prtn] > '9') {
+ prtn--;
+ if (buffer[prtn] == '.') {
+ prtn--;
+ break;
+ }
+ }
+ prtn++;
+ }
+ buffer[prtn++] ='\n';
+ return prtn;
+}
+EXPORT_SYMBOL(lprocfs_read_frac_helper);
+
+int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult)
+{
+ long decimal_val, frac_val;
+
+ decimal_val = val / mult;
+ seq_printf(m, "%ld", decimal_val);
+ frac_val = val % mult;
+
+ if (frac_val > 0) {
+ frac_val *= 100;
+ frac_val /= mult;
+ }
+ if (frac_val > 0) {
+ /* Three cases: x0, xx, 0x */
+ if ((frac_val % 10) != 0)
+ seq_printf(m, ".%ld", frac_val);
+ else
+ seq_printf(m, ".%ld", frac_val / 10);
+ }
+
+ seq_printf(m, "\n");
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_seq_read_frac_helper);
+
+int lprocfs_write_u64_helper(const char *buffer, unsigned long count,__u64 *val)
+{
+ return lprocfs_write_frac_u64_helper(buffer, count, val, 1);
+}
+EXPORT_SYMBOL(lprocfs_write_u64_helper);
+
+int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
+ __u64 *val, int mult)
+{
+ char kernbuf[22], *end, *pbuf;
+ __u64 whole, frac = 0, units;
+ unsigned frac_d = 1;
+
+ if (count > (sizeof(kernbuf) - 1))
+ return -EINVAL;
+
+ if (copy_from_user(kernbuf, buffer, count))
+ return -EFAULT;
+
+ kernbuf[count] = '\0';
+ pbuf = kernbuf;
+ if (*pbuf == '-') {
+ mult = -mult;
+ pbuf++;
+ }
+
+ whole = simple_strtoull(pbuf, &end, 10);
+ if (pbuf == end)
+ return -EINVAL;
+
+ if (end != NULL && *end == '.') {
+ int i;
+ pbuf = end + 1;
+
+ /* need to limit frac_d to a __u32 */
+ if (strlen(pbuf) > 10)
+ pbuf[10] = '\0';
+
+ frac = simple_strtoull(pbuf, &end, 10);
+ /* count decimal places */
+ for (i = 0; i < (end - pbuf); i++)
+ frac_d *= 10;
+ }
+
+ units = 1;
+ switch(*end) {
+ case 'p': case 'P':
+ units <<= 10;
+ case 't': case 'T':
+ units <<= 10;
+ case 'g': case 'G':
+ units <<= 10;
+ case 'm': case 'M':
+ units <<= 10;
+ case 'k': case 'K':
+ units <<= 10;
+ }
+ /* Specified units override the multiplier */
+ if (units)
+ mult = mult < 0 ? -units : units;
+
+ frac *= mult;
+ do_div(frac, frac_d);
+ *val = whole * mult + frac;
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_write_frac_u64_helper);
+
+static char *lprocfs_strnstr(const char *s1, const char *s2, size_t len)
+{
+ size_t l2;
+
+ l2 = strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ while (len >= l2) {
+ len--;
+ if (!memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+
+/**
+ * Find the string \a name in the input \a buffer, and return a pointer to the
+ * value immediately following \a name, reducing \a count appropriately.
+ * If \a name is not found the original \a buffer is returned.
+ */
+char *lprocfs_find_named_value(const char *buffer, const char *name,
+ size_t *count)
+{
+ char *val;
+ size_t buflen = *count;
+
+ /* there is no strnstr() in rhel5 and ubuntu kernels */
+ val = lprocfs_strnstr(buffer, name, buflen);
+ if (val == NULL)
+ return (char *)buffer;
+
+ val += strlen(name); /* skip prefix */
+ while (val < buffer + buflen && isspace(*val)) /* skip separator */
+ val++;
+
+ *count = 0;
+ while (val < buffer + buflen && isalnum(*val)) {
+ ++*count;
+ ++val;
+ }
+
+ return val - *count;
+}
+EXPORT_SYMBOL(lprocfs_find_named_value);
+
+int lprocfs_seq_create(struct proc_dir_entry *parent,
+ const char *name,
+ umode_t mode,
+ const struct file_operations *seq_fops,
+ void *data)
+{
+ struct proc_dir_entry *entry;
+
+ /* Disallow secretly (un)writable entries. */
+ LASSERT((seq_fops->write == NULL) == ((mode & 0222) == 0));
+ entry = proc_create_data(name, mode, parent, seq_fops, data);
+
+ if (entry == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(lprocfs_seq_create);
+
+int lprocfs_obd_seq_create(struct obd_device *dev,
+ const char *name,
+ umode_t mode,
+ const struct file_operations *seq_fops,
+ void *data)
+{
+ return (lprocfs_seq_create(dev->obd_proc_entry, name,
+ mode, seq_fops, data));
+}
+EXPORT_SYMBOL(lprocfs_obd_seq_create);
+
+void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
+{
+ if (value >= OBD_HIST_MAX)
+ value = OBD_HIST_MAX - 1;
+
+ spin_lock(&oh->oh_lock);
+ oh->oh_buckets[value]++;
+ spin_unlock(&oh->oh_lock);
+}
+EXPORT_SYMBOL(lprocfs_oh_tally);
+
+void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value)
+{
+ unsigned int val;
+
+ for (val = 0; ((1 << val) < value) && (val <= OBD_HIST_MAX); val++)
+ ;
+
+ lprocfs_oh_tally(oh, val);
+}
+EXPORT_SYMBOL(lprocfs_oh_tally_log2);
+
+unsigned long lprocfs_oh_sum(struct obd_histogram *oh)
+{
+ unsigned long ret = 0;
+ int i;
+
+ for (i = 0; i < OBD_HIST_MAX; i++)
+ ret += oh->oh_buckets[i];
+ return ret;
+}
+EXPORT_SYMBOL(lprocfs_oh_sum);
+
+void lprocfs_oh_clear(struct obd_histogram *oh)
+{
+ spin_lock(&oh->oh_lock);
+ memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
+ spin_unlock(&oh->oh_lock);
+}
+EXPORT_SYMBOL(lprocfs_oh_clear);
+
+int lprocfs_obd_rd_max_pages_per_rpc(struct seq_file *m, void *data)
+{
+ struct obd_device *dev = data;
+ struct client_obd *cli = &dev->u.cli;
+ int rc;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = seq_printf(m, "%d\n", cli->cl_max_pages_per_rpc);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_obd_rd_max_pages_per_rpc);
+
+#endif /* LPROCFS*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
new file mode 100644
index 0000000..3a3d5bc
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -0,0 +1,2187 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/lu_object.c
+ *
+ * Lustre Object.
+ * These are the only exported functions, they provide some generic
+ * infrastructure for managing object devices
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/libcfs/libcfs.h>
+
+# include <linux/module.h>
+
+/* hash_long() */
+#include <linux/libcfs/libcfs_hash.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_disk.h>
+#include <lustre_fid.h>
+#include <lu_object.h>
+#include <lu_ref.h>
+#include <linux/list.h>
+
+static void lu_object_free(const struct lu_env *env, struct lu_object *o);
+
+/**
+ * Decrease reference counter on object. If last reference is freed, return
+ * object to the cache, unless lu_object_is_dying(o) holds. In the latter
+ * case, free object immediately.
+ */
+void lu_object_put(const struct lu_env *env, struct lu_object *o)
+{
+ struct lu_site_bkt_data *bkt;
+ struct lu_object_header *top;
+ struct lu_site *site;
+ struct lu_object *orig;
+ cfs_hash_bd_t bd;
+ const struct lu_fid *fid;
+
+ top = o->lo_header;
+ site = o->lo_dev->ld_site;
+ orig = o;
+
+ /*
+ * till we have full fids-on-OST implemented anonymous objects
+ * are possible in OSP. such an object isn't listed in the site
+ * so we should not remove it from the site.
+ */
+ fid = lu_object_fid(o);
+ if (fid_is_zero(fid)) {
+ LASSERT(top->loh_hash.next == NULL
+ && top->loh_hash.pprev == NULL);
+ LASSERT(list_empty(&top->loh_lru));
+ if (!atomic_dec_and_test(&top->loh_ref))
+ return;
+ list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+ if (o->lo_ops->loo_object_release != NULL)
+ o->lo_ops->loo_object_release(env, o);
+ }
+ lu_object_free(env, orig);
+ return;
+ }
+
+ cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
+ bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
+
+ if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
+ if (lu_object_is_dying(top)) {
+
+ /*
+ * somebody may be waiting for this, currently only
+ * used for cl_object, see cl_object_put_last().
+ */
+ wake_up_all(&bkt->lsb_marche_funebre);
+ }
+ return;
+ }
+
+ LASSERT(bkt->lsb_busy > 0);
+ bkt->lsb_busy--;
+ /*
+ * When last reference is released, iterate over object
+ * layers, and notify them that object is no longer busy.
+ */
+ list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
+ if (o->lo_ops->loo_object_release != NULL)
+ o->lo_ops->loo_object_release(env, o);
+ }
+
+ if (!lu_object_is_dying(top)) {
+ LASSERT(list_empty(&top->loh_lru));
+ list_add_tail(&top->loh_lru, &bkt->lsb_lru);
+ cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
+ return;
+ }
+
+ /*
+ * If object is dying (will not be cached), removed it
+ * from hash table and LRU.
+ *
+ * This is done with hash table and LRU lists locked. As the only
+ * way to acquire first reference to previously unreferenced
+ * object is through hash-table lookup (lu_object_find()),
+ * or LRU scanning (lu_site_purge()), that are done under hash-table
+ * and LRU lock, no race with concurrent object lookup is possible
+ * and we can safely destroy object below.
+ */
+ if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
+ cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
+ cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
+ /*
+ * Object was already removed from hash and lru above, can
+ * kill it.
+ */
+ lu_object_free(env, orig);
+}
+EXPORT_SYMBOL(lu_object_put);
+
+/**
+ * Put object and don't keep in cache. This is temporary solution for
+ * multi-site objects when its layering is not constant.
+ */
+void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
+{
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
+ return lu_object_put(env, o);
+}
+EXPORT_SYMBOL(lu_object_put_nocache);
+
+/**
+ * Kill the object and take it out of LRU cache.
+ * Currently used by client code for layout change.
+ */
+void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
+{
+ struct lu_object_header *top;
+
+ top = o->lo_header;
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
+ if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
+ cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
+ cfs_hash_bd_t bd;
+
+ cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
+ list_del_init(&top->loh_lru);
+ cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
+ cfs_hash_bd_unlock(obj_hash, &bd, 1);
+ }
+}
+EXPORT_SYMBOL(lu_object_unhash);
+
+/**
+ * Allocate new object.
+ *
+ * This follows object creation protocol, described in the comment within
+ * struct lu_device_operations definition.
+ */
+static struct lu_object *lu_object_alloc(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ struct lu_object *scan;
+ struct lu_object *top;
+ struct list_head *layers;
+ int clean;
+ int result;
+
+ /*
+ * Create top-level object slice. This will also create
+ * lu_object_header.
+ */
+ top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
+ if (top == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (IS_ERR(top))
+ return top;
+ /*
+ * This is the only place where object fid is assigned. It's constant
+ * after this point.
+ */
+ top->lo_header->loh_fid = *f;
+ layers = &top->lo_header->loh_layers;
+ do {
+ /*
+ * Call ->loo_object_init() repeatedly, until no more new
+ * object slices are created.
+ */
+ clean = 1;
+ list_for_each_entry(scan, layers, lo_linkage) {
+ if (scan->lo_flags & LU_OBJECT_ALLOCATED)
+ continue;
+ clean = 0;
+ scan->lo_header = top->lo_header;
+ result = scan->lo_ops->loo_object_init(env, scan, conf);
+ if (result != 0) {
+ lu_object_free(env, top);
+ return ERR_PTR(result);
+ }
+ scan->lo_flags |= LU_OBJECT_ALLOCATED;
+ }
+ } while (!clean);
+
+ list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ if (scan->lo_ops->loo_object_start != NULL) {
+ result = scan->lo_ops->loo_object_start(env, scan);
+ if (result != 0) {
+ lu_object_free(env, top);
+ return ERR_PTR(result);
+ }
+ }
+ }
+
+ lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
+ return top;
+}
+
+/**
+ * Free an object.
+ */
+static void lu_object_free(const struct lu_env *env, struct lu_object *o)
+{
+ struct lu_site_bkt_data *bkt;
+ struct lu_site *site;
+ struct lu_object *scan;
+ struct list_head *layers;
+ struct list_head splice;
+
+ site = o->lo_dev->ld_site;
+ layers = &o->lo_header->loh_layers;
+ bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
+ /*
+ * First call ->loo_object_delete() method to release all resources.
+ */
+ list_for_each_entry_reverse(scan, layers, lo_linkage) {
+ if (scan->lo_ops->loo_object_delete != NULL)
+ scan->lo_ops->loo_object_delete(env, scan);
+ }
+
+ /*
+ * Then, splice object layers into stand-alone list, and call
+ * ->loo_object_free() on all layers to free memory. Splice is
+ * necessary, because lu_object_header is freed together with the
+ * top-level slice.
+ */
+ INIT_LIST_HEAD(&splice);
+ list_splice_init(layers, &splice);
+ while (!list_empty(&splice)) {
+ /*
+ * Free layers in bottom-to-top order, so that object header
+ * lives as long as possible and ->loo_object_free() methods
+ * can look at its contents.
+ */
+ o = container_of0(splice.prev, struct lu_object, lo_linkage);
+ list_del_init(&o->lo_linkage);
+ LASSERT(o->lo_ops->loo_object_free != NULL);
+ o->lo_ops->loo_object_free(env, o);
+ }
+
+ if (waitqueue_active(&bkt->lsb_marche_funebre))
+ wake_up_all(&bkt->lsb_marche_funebre);
+}
+
+/**
+ * Free \a nr objects from the cold end of the site LRU list.
+ */
+int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
+{
+ struct lu_object_header *h;
+ struct lu_object_header *temp;
+ struct lu_site_bkt_data *bkt;
+ cfs_hash_bd_t bd;
+ cfs_hash_bd_t bd2;
+ struct list_head dispose;
+ int did_sth;
+ int start;
+ int count;
+ int bnr;
+ int i;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
+ return 0;
+
+ INIT_LIST_HEAD(&dispose);
+ /*
+ * Under LRU list lock, scan LRU list and move unreferenced objects to
+ * the dispose list, removing them from LRU and hash table.
+ */
+ start = s->ls_purge_start;
+ bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
+ again:
+ did_sth = 0;
+ cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
+ if (i < start)
+ continue;
+ count = bnr;
+ cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
+ bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
+
+ list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
+ LASSERT(atomic_read(&h->loh_ref) == 0);
+
+ cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
+ LASSERT(bd.bd_bucket == bd2.bd_bucket);
+
+ cfs_hash_bd_del_locked(s->ls_obj_hash,
+ &bd2, &h->loh_hash);
+ list_move(&h->loh_lru, &dispose);
+ if (did_sth == 0)
+ did_sth = 1;
+
+ if (nr != ~0 && --nr == 0)
+ break;
+
+ if (count > 0 && --count == 0)
+ break;
+
+ }
+ cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
+ cond_resched();
+ /*
+ * Free everything on the dispose list. This is safe against
+ * races due to the reasons described in lu_object_put().
+ */
+ while (!list_empty(&dispose)) {
+ h = container_of0(dispose.next,
+ struct lu_object_header, loh_lru);
+ list_del_init(&h->loh_lru);
+ lu_object_free(env, lu_object_top(h));
+ lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
+ }
+
+ if (nr == 0)
+ break;
+ }
+
+ if (nr != 0 && did_sth && start != 0) {
+ start = 0; /* restart from the first bucket */
+ goto again;
+ }
+ /* race on s->ls_purge_start, but nobody cares */
+ s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
+
+ return nr;
+}
+EXPORT_SYMBOL(lu_site_purge);
+
+/*
+ * Object printing.
+ *
+ * Code below has to jump through certain loops to output object description
+ * into libcfs_debug_msg-based log. The problem is that lu_object_print()
+ * composes object description from strings that are parts of _lines_ of
+ * output (i.e., strings that are not terminated by newline). This doesn't fit
+ * very well into libcfs_debug_msg() interface that assumes that each message
+ * supplied to it is a self-contained output line.
+ *
+ * To work around this, strings are collected in a temporary buffer
+ * (implemented as a value of lu_cdebug_key key), until terminating newline
+ * character is detected.
+ *
+ */
+
+enum {
+ /**
+ * Maximal line size.
+ *
+ * XXX overflow is not handled correctly.
+ */
+ LU_CDEBUG_LINE = 512
+};
+
+struct lu_cdebug_data {
+ /**
+ * Temporary buffer.
+ */
+ char lck_area[LU_CDEBUG_LINE];
+};
+
+/* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
+LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
+
+/**
+ * Key, holding temporary buffer. This key is registered very early by
+ * lu_global_init().
+ */
+struct lu_context_key lu_global_key = {
+ .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
+ LCT_MG_THREAD | LCT_CL_THREAD,
+ .lct_init = lu_global_key_init,
+ .lct_fini = lu_global_key_fini
+};
+
+/**
+ * Printer function emitting messages through libcfs_debug_msg().
+ */
+int lu_cdebug_printer(const struct lu_env *env,
+ void *cookie, const char *format, ...)
+{
+ struct libcfs_debug_msg_data *msgdata = cookie;
+ struct lu_cdebug_data *key;
+ int used;
+ int complete;
+ va_list args;
+
+ va_start(args, format);
+
+ key = lu_context_key_get(&env->le_ctx, &lu_global_key);
+ LASSERT(key != NULL);
+
+ used = strlen(key->lck_area);
+ complete = format[strlen(format) - 1] == '\n';
+ /*
+ * Append new chunk to the buffer.
+ */
+ vsnprintf(key->lck_area + used,
+ ARRAY_SIZE(key->lck_area) - used, format, args);
+ if (complete) {
+ if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
+ libcfs_debug_msg(msgdata, "%s", key->lck_area);
+ key->lck_area[0] = 0;
+ }
+ va_end(args);
+ return 0;
+}
+EXPORT_SYMBOL(lu_cdebug_printer);
+
+/**
+ * Print object header.
+ */
+void lu_object_header_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer,
+ const struct lu_object_header *hdr)
+{
+ (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
+ hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
+ PFID(&hdr->loh_fid),
+ hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
+ list_empty((struct list_head *)&hdr->loh_lru) ? \
+ "" : " lru",
+ hdr->loh_attr & LOHA_EXISTS ? " exist":"");
+}
+EXPORT_SYMBOL(lu_object_header_print);
+
+/**
+ * Print human readable representation of the \a o to the \a printer.
+ */
+void lu_object_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct lu_object *o)
+{
+ static const char ruler[] = "........................................";
+ struct lu_object_header *top;
+ int depth;
+
+ top = o->lo_header;
+ lu_object_header_print(env, cookie, printer, top);
+ (*printer)(env, cookie, "{ \n");
+ list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ depth = o->lo_depth + 4;
+
+ /*
+ * print `.' \a depth times followed by type name and address
+ */
+ (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
+ o->lo_dev->ld_type->ldt_name, o);
+ if (o->lo_ops->loo_object_print != NULL)
+ o->lo_ops->loo_object_print(env, cookie, printer, o);
+ (*printer)(env, cookie, "\n");
+ }
+ (*printer)(env, cookie, "} header@%p\n", top);
+}
+EXPORT_SYMBOL(lu_object_print);
+
+/**
+ * Check object consistency.
+ */
+int lu_object_invariant(const struct lu_object *o)
+{
+ struct lu_object_header *top;
+
+ top = o->lo_header;
+ list_for_each_entry(o, &top->loh_layers, lo_linkage) {
+ if (o->lo_ops->loo_object_invariant != NULL &&
+ !o->lo_ops->loo_object_invariant(o))
+ return 0;
+ }
+ return 1;
+}
+EXPORT_SYMBOL(lu_object_invariant);
+
+static struct lu_object *htable_lookup(struct lu_site *s,
+ cfs_hash_bd_t *bd,
+ const struct lu_fid *f,
+ wait_queue_t *waiter,
+ __u64 *version)
+{
+ struct lu_site_bkt_data *bkt;
+ struct lu_object_header *h;
+ struct hlist_node *hnode;
+ __u64 ver = cfs_hash_bd_version_get(bd);
+
+ if (*version == ver)
+ return ERR_PTR(-ENOENT);
+
+ *version = ver;
+ bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
+ /* cfs_hash_bd_peek_locked is a somehow "internal" function
+ * of cfs_hash, it doesn't add refcount on object. */
+ hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
+ if (hnode == NULL) {
+ lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
+ return ERR_PTR(-ENOENT);
+ }
+
+ h = container_of0(hnode, struct lu_object_header, loh_hash);
+ if (likely(!lu_object_is_dying(h))) {
+ cfs_hash_get(s->ls_obj_hash, hnode);
+ lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
+ list_del_init(&h->loh_lru);
+ return lu_object_top(h);
+ }
+
+ /*
+ * Lookup found an object being destroyed this object cannot be
+ * returned (to assure that references to dying objects are eventually
+ * drained), and moreover, lookup has to wait until object is freed.
+ */
+
+ init_waitqueue_entry_current(waiter);
+ add_wait_queue(&bkt->lsb_marche_funebre, waiter);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
+ return ERR_PTR(-EAGAIN);
+}
+
+/**
+ * Search cache for an object with the fid \a f. If such object is found,
+ * return it. Otherwise, create new object, insert it into cache and return
+ * it. In any case, additional reference is acquired on the returned object.
+ */
+struct lu_object *lu_object_find(const struct lu_env *env,
+ struct lu_device *dev, const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
+}
+EXPORT_SYMBOL(lu_object_find);
+
+static struct lu_object *lu_object_new(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ struct lu_object *o;
+ cfs_hash_t *hs;
+ cfs_hash_bd_t bd;
+ struct lu_site_bkt_data *bkt;
+
+ o = lu_object_alloc(env, dev, f, conf);
+ if (unlikely(IS_ERR(o)))
+ return o;
+
+ hs = dev->ld_site->ls_obj_hash;
+ cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
+ bkt = cfs_hash_bd_extra_get(hs, &bd);
+ cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
+ bkt->lsb_busy++;
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ return o;
+}
+
+/**
+ * Core logic of lu_object_find*() functions.
+ */
+static struct lu_object *lu_object_find_try(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf,
+ wait_queue_t *waiter)
+{
+ struct lu_object *o;
+ struct lu_object *shadow;
+ struct lu_site *s;
+ cfs_hash_t *hs;
+ cfs_hash_bd_t bd;
+ __u64 version = 0;
+
+ /*
+ * This uses standard index maintenance protocol:
+ *
+ * - search index under lock, and return object if found;
+ * - otherwise, unlock index, allocate new object;
+ * - lock index and search again;
+ * - if nothing is found (usual case), insert newly created
+ * object into index;
+ * - otherwise (race: other thread inserted object), free
+ * object just allocated.
+ * - unlock index;
+ * - return object.
+ *
+ * For "LOC_F_NEW" case, we are sure the object is new established.
+ * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
+ * just alloc and insert directly.
+ *
+ * If dying object is found during index search, add @waiter to the
+ * site wait-queue and return ERR_PTR(-EAGAIN).
+ */
+ if (conf != NULL && conf->loc_flags & LOC_F_NEW)
+ return lu_object_new(env, dev, f, conf);
+
+ s = dev->ld_site;
+ hs = s->ls_obj_hash;
+ cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
+ o = htable_lookup(s, &bd, f, waiter, &version);
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT)
+ return o;
+
+ /*
+ * Allocate new object. This may result in rather complicated
+ * operations, including fld queries, inode loading, etc.
+ */
+ o = lu_object_alloc(env, dev, f, conf);
+ if (unlikely(IS_ERR(o)))
+ return o;
+
+ LASSERT(lu_fid_eq(lu_object_fid(o), f));
+
+ cfs_hash_bd_lock(hs, &bd, 1);
+
+ shadow = htable_lookup(s, &bd, f, waiter, &version);
+ if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) {
+ struct lu_site_bkt_data *bkt;
+
+ bkt = cfs_hash_bd_extra_get(hs, &bd);
+ cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
+ bkt->lsb_busy++;
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ return o;
+ }
+
+ lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ lu_object_free(env, o);
+ return shadow;
+}
+
+/**
+ * Much like lu_object_find(), but top level device of object is specifically
+ * \a dev rather than top level device of the site. This interface allows
+ * objects of different "stacking" to be created within the same site.
+ */
+struct lu_object *lu_object_find_at(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ struct lu_site_bkt_data *bkt;
+ struct lu_object *obj;
+ wait_queue_t wait;
+
+ while (1) {
+ obj = lu_object_find_try(env, dev, f, conf, &wait);
+ if (obj != ERR_PTR(-EAGAIN))
+ return obj;
+ /*
+ * lu_object_find_try() already added waiter into the
+ * wait queue.
+ */
+ waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
+ bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
+ remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
+ }
+}
+EXPORT_SYMBOL(lu_object_find_at);
+
+/**
+ * Find object with given fid, and return its slice belonging to given device.
+ */
+struct lu_object *lu_object_find_slice(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_fid *f,
+ const struct lu_object_conf *conf)
+{
+ struct lu_object *top;
+ struct lu_object *obj;
+
+ top = lu_object_find(env, dev, f, conf);
+ if (!IS_ERR(top)) {
+ obj = lu_object_locate(top->lo_header, dev->ld_type);
+ if (obj == NULL)
+ lu_object_put(env, top);
+ } else
+ obj = top;
+ return obj;
+}
+EXPORT_SYMBOL(lu_object_find_slice);
+
+/**
+ * Global list of all device types.
+ */
+static LIST_HEAD(lu_device_types);
+
+int lu_device_type_init(struct lu_device_type *ldt)
+{
+ int result = 0;
+
+ INIT_LIST_HEAD(&ldt->ldt_linkage);
+ if (ldt->ldt_ops->ldto_init)
+ result = ldt->ldt_ops->ldto_init(ldt);
+ if (result == 0)
+ list_add(&ldt->ldt_linkage, &lu_device_types);
+ return result;
+}
+EXPORT_SYMBOL(lu_device_type_init);
+
+void lu_device_type_fini(struct lu_device_type *ldt)
+{
+ list_del_init(&ldt->ldt_linkage);
+ if (ldt->ldt_ops->ldto_fini)
+ ldt->ldt_ops->ldto_fini(ldt);
+}
+EXPORT_SYMBOL(lu_device_type_fini);
+
+void lu_types_stop(void)
+{
+ struct lu_device_type *ldt;
+
+ list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
+ if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
+ ldt->ldt_ops->ldto_stop(ldt);
+ }
+}
+EXPORT_SYMBOL(lu_types_stop);
+
+/**
+ * Global list of all sites on this node
+ */
+static LIST_HEAD(lu_sites);
+static DEFINE_MUTEX(lu_sites_guard);
+
+/**
+ * Global environment used by site shrinker.
+ */
+static struct lu_env lu_shrink_env;
+
+struct lu_site_print_arg {
+ struct lu_env *lsp_env;
+ void *lsp_cookie;
+ lu_printer_t lsp_printer;
+};
+
+static int
+lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ struct hlist_node *hnode, void *data)
+{
+ struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
+ struct lu_object_header *h;
+
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ if (!list_empty(&h->loh_layers)) {
+ const struct lu_object *o;
+
+ o = lu_object_top(h);
+ lu_object_print(arg->lsp_env, arg->lsp_cookie,
+ arg->lsp_printer, o);
+ } else {
+ lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
+ arg->lsp_printer, h);
+ }
+ return 0;
+}
+
+/**
+ * Print all objects in \a s.
+ */
+void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
+ lu_printer_t printer)
+{
+ struct lu_site_print_arg arg = {
+ .lsp_env = (struct lu_env *)env,
+ .lsp_cookie = cookie,
+ .lsp_printer = printer,
+ };
+
+ cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
+}
+EXPORT_SYMBOL(lu_site_print);
+
+enum {
+ LU_CACHE_PERCENT_MAX = 50,
+ LU_CACHE_PERCENT_DEFAULT = 20
+};
+
+static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
+CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
+ "Percentage of memory to be used as lu_object cache");
+
+/**
+ * Return desired hash table order.
+ */
+static int lu_htable_order(void)
+{
+ unsigned long cache_size;
+ int bits;
+
+ /*
+ * Calculate hash table size, assuming that we want reasonable
+ * performance when 20% of total memory is occupied by cache of
+ * lu_objects.
+ *
+ * Size of lu_object is (arbitrary) taken as 1K (together with inode).
+ */
+ cache_size = totalram_pages;
+
+#if BITS_PER_LONG == 32
+ /* limit hashtable size for lowmem systems to low RAM */
+ if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
+ cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
+#endif
+
+ /* clear off unreasonable cache setting. */
+ if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
+ CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
+ " the range of (0, %u]. Will use default value: %u.\n",
+ lu_cache_percent, LU_CACHE_PERCENT_MAX,
+ LU_CACHE_PERCENT_DEFAULT);
+
+ lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
+ }
+ cache_size = cache_size / 100 * lu_cache_percent *
+ (PAGE_CACHE_SIZE / 1024);
+
+ for (bits = 1; (1 << bits) < cache_size; ++bits) {
+ ;
+ }
+ return bits;
+}
+
+static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
+ const void *key, unsigned mask)
+{
+ struct lu_fid *fid = (struct lu_fid *)key;
+ __u32 hash;
+
+ hash = fid_flatten32(fid);
+ hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
+ hash = cfs_hash_long(hash, hs->hs_bkt_bits);
+
+ /* give me another random factor */
+ hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
+
+ hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
+ hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
+
+ return hash & mask;
+}
+
+static void *lu_obj_hop_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct lu_object_header, loh_hash);
+}
+
+static void *lu_obj_hop_key(struct hlist_node *hnode)
+{
+ struct lu_object_header *h;
+
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ return &h->loh_fid;
+}
+
+static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
+{
+ struct lu_object_header *h;
+
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
+}
+
+static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct lu_object_header *h;
+
+ h = hlist_entry(hnode, struct lu_object_header, loh_hash);
+ if (atomic_add_return(1, &h->loh_ref) == 1) {
+ struct lu_site_bkt_data *bkt;
+ cfs_hash_bd_t bd;
+
+ cfs_hash_bd_get(hs, &h->loh_fid, &bd);
+ bkt = cfs_hash_bd_extra_get(hs, &bd);
+ bkt->lsb_busy++;
+ }
+}
+
+static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ LBUG(); /* we should never called it */
+}
+
+cfs_hash_ops_t lu_site_hash_ops = {
+ .hs_hash = lu_obj_hop_hash,
+ .hs_key = lu_obj_hop_key,
+ .hs_keycmp = lu_obj_hop_keycmp,
+ .hs_object = lu_obj_hop_object,
+ .hs_get = lu_obj_hop_get,
+ .hs_put_locked = lu_obj_hop_put_locked,
+};
+
+void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
+{
+ spin_lock(&s->ls_ld_lock);
+ if (list_empty(&d->ld_linkage))
+ list_add(&d->ld_linkage, &s->ls_ld_linkage);
+ spin_unlock(&s->ls_ld_lock);
+}
+EXPORT_SYMBOL(lu_dev_add_linkage);
+
+void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
+{
+ spin_lock(&s->ls_ld_lock);
+ list_del_init(&d->ld_linkage);
+ spin_unlock(&s->ls_ld_lock);
+}
+EXPORT_SYMBOL(lu_dev_del_linkage);
+
+/**
+ * Initialize site \a s, with \a d as the top level device.
+ */
+#define LU_SITE_BITS_MIN 12
+#define LU_SITE_BITS_MAX 24
+/**
+ * total 256 buckets, we don't want too many buckets because:
+ * - consume too much memory
+ * - avoid unbalanced LRU list
+ */
+#define LU_SITE_BKT_BITS 8
+
+int lu_site_init(struct lu_site *s, struct lu_device *top)
+{
+ struct lu_site_bkt_data *bkt;
+ cfs_hash_bd_t bd;
+ char name[16];
+ int bits;
+ int i;
+
+ memset(s, 0, sizeof *s);
+ bits = lu_htable_order();
+ snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
+ for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
+ bits >= LU_SITE_BITS_MIN; bits--) {
+ s->ls_obj_hash = cfs_hash_create(name, bits, bits,
+ bits - LU_SITE_BKT_BITS,
+ sizeof(*bkt), 0, 0,
+ &lu_site_hash_ops,
+ CFS_HASH_SPIN_BKTLOCK |
+ CFS_HASH_NO_ITEMREF |
+ CFS_HASH_DEPTH |
+ CFS_HASH_ASSERT_EMPTY);
+ if (s->ls_obj_hash != NULL)
+ break;
+ }
+
+ if (s->ls_obj_hash == NULL) {
+ CERROR("failed to create lu_site hash with bits: %d\n", bits);
+ return -ENOMEM;
+ }
+
+ cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
+ bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
+ INIT_LIST_HEAD(&bkt->lsb_lru);
+ init_waitqueue_head(&bkt->lsb_marche_funebre);
+ }
+
+ s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
+ if (s->ls_stats == NULL) {
+ cfs_hash_putref(s->ls_obj_hash);
+ s->ls_obj_hash = NULL;
+ return -ENOMEM;
+ }
+
+ lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
+ 0, "created", "created");
+ lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
+ 0, "cache_hit", "cache_hit");
+ lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
+ 0, "cache_miss", "cache_miss");
+ lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
+ 0, "cache_race", "cache_race");
+ lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
+ 0, "cache_death_race", "cache_death_race");
+ lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
+ 0, "lru_purged", "lru_purged");
+
+ INIT_LIST_HEAD(&s->ls_linkage);
+ s->ls_top_dev = top;
+ top->ld_site = s;
+ lu_device_get(top);
+ lu_ref_add(&top->ld_reference, "site-top", s);
+
+ INIT_LIST_HEAD(&s->ls_ld_linkage);
+ spin_lock_init(&s->ls_ld_lock);
+
+ lu_dev_add_linkage(s, top);
+
+ return 0;
+}
+EXPORT_SYMBOL(lu_site_init);
+
+/**
+ * Finalize \a s and release its resources.
+ */
+void lu_site_fini(struct lu_site *s)
+{
+ mutex_lock(&lu_sites_guard);
+ list_del_init(&s->ls_linkage);
+ mutex_unlock(&lu_sites_guard);
+
+ if (s->ls_obj_hash != NULL) {
+ cfs_hash_putref(s->ls_obj_hash);
+ s->ls_obj_hash = NULL;
+ }
+
+ if (s->ls_top_dev != NULL) {
+ s->ls_top_dev->ld_site = NULL;
+ lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
+ lu_device_put(s->ls_top_dev);
+ s->ls_top_dev = NULL;
+ }
+
+ if (s->ls_stats != NULL)
+ lprocfs_free_stats(&s->ls_stats);
+}
+EXPORT_SYMBOL(lu_site_fini);
+
+/**
+ * Called when initialization of stack for this site is completed.
+ */
+int lu_site_init_finish(struct lu_site *s)
+{
+ int result;
+ mutex_lock(&lu_sites_guard);
+ result = lu_context_refill(&lu_shrink_env.le_ctx);
+ if (result == 0)
+ list_add(&s->ls_linkage, &lu_sites);
+ mutex_unlock(&lu_sites_guard);
+ return result;
+}
+EXPORT_SYMBOL(lu_site_init_finish);
+
+/**
+ * Acquire additional reference on device \a d
+ */
+void lu_device_get(struct lu_device *d)
+{
+ atomic_inc(&d->ld_ref);
+}
+EXPORT_SYMBOL(lu_device_get);
+
+/**
+ * Release reference on device \a d.
+ */
+void lu_device_put(struct lu_device *d)
+{
+ LASSERT(atomic_read(&d->ld_ref) > 0);
+ atomic_dec(&d->ld_ref);
+}
+EXPORT_SYMBOL(lu_device_put);
+
+/**
+ * Initialize device \a d of type \a t.
+ */
+int lu_device_init(struct lu_device *d, struct lu_device_type *t)
+{
+ if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
+ t->ldt_ops->ldto_start(t);
+ memset(d, 0, sizeof *d);
+ atomic_set(&d->ld_ref, 0);
+ d->ld_type = t;
+ lu_ref_init(&d->ld_reference);
+ INIT_LIST_HEAD(&d->ld_linkage);
+ return 0;
+}
+EXPORT_SYMBOL(lu_device_init);
+
+/**
+ * Finalize device \a d.
+ */
+void lu_device_fini(struct lu_device *d)
+{
+ struct lu_device_type *t;
+
+ t = d->ld_type;
+ if (d->ld_obd != NULL) {
+ d->ld_obd->obd_lu_dev = NULL;
+ d->ld_obd = NULL;
+ }
+
+ lu_ref_fini(&d->ld_reference);
+ LASSERTF(atomic_read(&d->ld_ref) == 0,
+ "Refcount is %u\n", atomic_read(&d->ld_ref));
+ LASSERT(t->ldt_device_nr > 0);
+ if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
+ t->ldt_ops->ldto_stop(t);
+}
+EXPORT_SYMBOL(lu_device_fini);
+
+/**
+ * Initialize object \a o that is part of compound object \a h and was created
+ * by device \a d.
+ */
+int lu_object_init(struct lu_object *o, struct lu_object_header *h,
+ struct lu_device *d)
+{
+ memset(o, 0, sizeof(*o));
+ o->lo_header = h;
+ o->lo_dev = d;
+ lu_device_get(d);
+ lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
+ INIT_LIST_HEAD(&o->lo_linkage);
+
+ return 0;
+}
+EXPORT_SYMBOL(lu_object_init);
+
+/**
+ * Finalize object and release its resources.
+ */
+void lu_object_fini(struct lu_object *o)
+{
+ struct lu_device *dev = o->lo_dev;
+
+ LASSERT(list_empty(&o->lo_linkage));
+
+ if (dev != NULL) {
+ lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
+ "lu_object", o);
+ lu_device_put(dev);
+ o->lo_dev = NULL;
+ }
+}
+EXPORT_SYMBOL(lu_object_fini);
+
+/**
+ * Add object \a o as first layer of compound object \a h
+ *
+ * This is typically called by the ->ldo_object_alloc() method of top-level
+ * device.
+ */
+void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
+{
+ list_move(&o->lo_linkage, &h->loh_layers);
+}
+EXPORT_SYMBOL(lu_object_add_top);
+
+/**
+ * Add object \a o as a layer of compound object, going after \a before.
+ *
+ * This is typically called by the ->ldo_object_alloc() method of \a
+ * before->lo_dev.
+ */
+void lu_object_add(struct lu_object *before, struct lu_object *o)
+{
+ list_move(&o->lo_linkage, &before->lo_linkage);
+}
+EXPORT_SYMBOL(lu_object_add);
+
+/**
+ * Initialize compound object.
+ */
+int lu_object_header_init(struct lu_object_header *h)
+{
+ memset(h, 0, sizeof *h);
+ atomic_set(&h->loh_ref, 1);
+ INIT_HLIST_NODE(&h->loh_hash);
+ INIT_LIST_HEAD(&h->loh_lru);
+ INIT_LIST_HEAD(&h->loh_layers);
+ lu_ref_init(&h->loh_reference);
+ return 0;
+}
+EXPORT_SYMBOL(lu_object_header_init);
+
+/**
+ * Finalize compound object.
+ */
+void lu_object_header_fini(struct lu_object_header *h)
+{
+ LASSERT(list_empty(&h->loh_layers));
+ LASSERT(list_empty(&h->loh_lru));
+ LASSERT(hlist_unhashed(&h->loh_hash));
+ lu_ref_fini(&h->loh_reference);
+}
+EXPORT_SYMBOL(lu_object_header_fini);
+
+/**
+ * Given a compound object, find its slice, corresponding to the device type
+ * \a dtype.
+ */
+struct lu_object *lu_object_locate(struct lu_object_header *h,
+ const struct lu_device_type *dtype)
+{
+ struct lu_object *o;
+
+ list_for_each_entry(o, &h->loh_layers, lo_linkage) {
+ if (o->lo_dev->ld_type == dtype)
+ return o;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(lu_object_locate);
+
+
+
+/**
+ * Finalize and free devices in the device stack.
+ *
+ * Finalize device stack by purging object cache, and calling
+ * lu_device_type_operations::ldto_device_fini() and
+ * lu_device_type_operations::ldto_device_free() on all devices in the stack.
+ */
+void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
+{
+ struct lu_site *site = top->ld_site;
+ struct lu_device *scan;
+ struct lu_device *next;
+
+ lu_site_purge(env, site, ~0);
+ for (scan = top; scan != NULL; scan = next) {
+ next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
+ lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
+ lu_device_put(scan);
+ }
+
+ /* purge again. */
+ lu_site_purge(env, site, ~0);
+
+ for (scan = top; scan != NULL; scan = next) {
+ const struct lu_device_type *ldt = scan->ld_type;
+ struct obd_type *type;
+
+ next = ldt->ldt_ops->ldto_device_free(env, scan);
+ type = ldt->ldt_obd_type;
+ if (type != NULL) {
+ type->typ_refcnt--;
+ class_put_type(type);
+ }
+ }
+}
+EXPORT_SYMBOL(lu_stack_fini);
+
+enum {
+ /**
+ * Maximal number of tld slots.
+ */
+ LU_CONTEXT_KEY_NR = 40
+};
+
+static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
+
+static DEFINE_SPINLOCK(lu_keys_guard);
+
+/**
+ * Global counter incremented whenever key is registered, unregistered,
+ * revived or quiesced. This is used to void unnecessary calls to
+ * lu_context_refill(). No locking is provided, as initialization and shutdown
+ * are supposed to be externally serialized.
+ */
+static unsigned key_set_version = 0;
+
+/**
+ * Register new key.
+ */
+int lu_context_key_register(struct lu_context_key *key)
+{
+ int result;
+ int i;
+
+ LASSERT(key->lct_init != NULL);
+ LASSERT(key->lct_fini != NULL);
+ LASSERT(key->lct_tags != 0);
+
+ result = -ENFILE;
+ spin_lock(&lu_keys_guard);
+ for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
+ if (lu_keys[i] == NULL) {
+ key->lct_index = i;
+ atomic_set(&key->lct_used, 1);
+ lu_keys[i] = key;
+ lu_ref_init(&key->lct_reference);
+ result = 0;
+ ++key_set_version;
+ break;
+ }
+ }
+ spin_unlock(&lu_keys_guard);
+ return result;
+}
+EXPORT_SYMBOL(lu_context_key_register);
+
+static void key_fini(struct lu_context *ctx, int index)
+{
+ if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
+ struct lu_context_key *key;
+
+ key = lu_keys[index];
+ LASSERT(key != NULL);
+ LASSERT(key->lct_fini != NULL);
+ LASSERT(atomic_read(&key->lct_used) > 1);
+
+ key->lct_fini(ctx, key, ctx->lc_value[index]);
+ lu_ref_del(&key->lct_reference, "ctx", ctx);
+ atomic_dec(&key->lct_used);
+
+ if ((ctx->lc_tags & LCT_NOREF) == 0) {
+#ifdef CONFIG_MODULE_UNLOAD
+ LINVRNT(module_refcount(key->lct_owner) > 0);
+#endif
+ module_put(key->lct_owner);
+ }
+ ctx->lc_value[index] = NULL;
+ }
+}
+
+/**
+ * Deregister key.
+ */
+void lu_context_key_degister(struct lu_context_key *key)
+{
+ LASSERT(atomic_read(&key->lct_used) >= 1);
+ LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
+
+ lu_context_key_quiesce(key);
+
+ ++key_set_version;
+ spin_lock(&lu_keys_guard);
+ key_fini(&lu_shrink_env.le_ctx, key->lct_index);
+ if (lu_keys[key->lct_index]) {
+ lu_keys[key->lct_index] = NULL;
+ lu_ref_fini(&key->lct_reference);
+ }
+ spin_unlock(&lu_keys_guard);
+
+ LASSERTF(atomic_read(&key->lct_used) == 1,
+ "key has instances: %d\n",
+ atomic_read(&key->lct_used));
+}
+EXPORT_SYMBOL(lu_context_key_degister);
+
+/**
+ * Register a number of keys. This has to be called after all keys have been
+ * initialized by a call to LU_CONTEXT_KEY_INIT().
+ */
+int lu_context_key_register_many(struct lu_context_key *k, ...)
+{
+ struct lu_context_key *key = k;
+ va_list args;
+ int result;
+
+ va_start(args, k);
+ do {
+ result = lu_context_key_register(key);
+ if (result)
+ break;
+ key = va_arg(args, struct lu_context_key *);
+ } while (key != NULL);
+ va_end(args);
+
+ if (result != 0) {
+ va_start(args, k);
+ while (k != key) {
+ lu_context_key_degister(k);
+ k = va_arg(args, struct lu_context_key *);
+ }
+ va_end(args);
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(lu_context_key_register_many);
+
+/**
+ * De-register a number of keys. This is a dual to
+ * lu_context_key_register_many().
+ */
+void lu_context_key_degister_many(struct lu_context_key *k, ...)
+{
+ va_list args;
+
+ va_start(args, k);
+ do {
+ lu_context_key_degister(k);
+ k = va_arg(args, struct lu_context_key*);
+ } while (k != NULL);
+ va_end(args);
+}
+EXPORT_SYMBOL(lu_context_key_degister_many);
+
+/**
+ * Revive a number of keys.
+ */
+void lu_context_key_revive_many(struct lu_context_key *k, ...)
+{
+ va_list args;
+
+ va_start(args, k);
+ do {
+ lu_context_key_revive(k);
+ k = va_arg(args, struct lu_context_key*);
+ } while (k != NULL);
+ va_end(args);
+}
+EXPORT_SYMBOL(lu_context_key_revive_many);
+
+/**
+ * Quiescent a number of keys.
+ */
+void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
+{
+ va_list args;
+
+ va_start(args, k);
+ do {
+ lu_context_key_quiesce(k);
+ k = va_arg(args, struct lu_context_key*);
+ } while (k != NULL);
+ va_end(args);
+}
+EXPORT_SYMBOL(lu_context_key_quiesce_many);
+
+/**
+ * Return value associated with key \a key in context \a ctx.
+ */
+void *lu_context_key_get(const struct lu_context *ctx,
+ const struct lu_context_key *key)
+{
+ LINVRNT(ctx->lc_state == LCS_ENTERED);
+ LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
+ LASSERT(lu_keys[key->lct_index] == key);
+ return ctx->lc_value[key->lct_index];
+}
+EXPORT_SYMBOL(lu_context_key_get);
+
+/**
+ * List of remembered contexts. XXX document me.
+ */
+static LIST_HEAD(lu_context_remembered);
+
+/**
+ * Destroy \a key in all remembered contexts. This is used to destroy key
+ * values in "shared" contexts (like service threads), when a module owning
+ * the key is about to be unloaded.
+ */
+void lu_context_key_quiesce(struct lu_context_key *key)
+{
+ struct lu_context *ctx;
+
+ if (!(key->lct_tags & LCT_QUIESCENT)) {
+ /*
+ * XXX layering violation.
+ */
+ key->lct_tags |= LCT_QUIESCENT;
+ /*
+ * XXX memory barrier has to go here.
+ */
+ spin_lock(&lu_keys_guard);
+ list_for_each_entry(ctx, &lu_context_remembered,
+ lc_remember)
+ key_fini(ctx, key->lct_index);
+ spin_unlock(&lu_keys_guard);
+ ++key_set_version;
+ }
+}
+EXPORT_SYMBOL(lu_context_key_quiesce);
+
+void lu_context_key_revive(struct lu_context_key *key)
+{
+ key->lct_tags &= ~LCT_QUIESCENT;
+ ++key_set_version;
+}
+EXPORT_SYMBOL(lu_context_key_revive);
+
+static void keys_fini(struct lu_context *ctx)
+{
+ int i;
+
+ if (ctx->lc_value == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
+ key_fini(ctx, i);
+
+ OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
+ ctx->lc_value = NULL;
+}
+
+static int keys_fill(struct lu_context *ctx)
+{
+ int i;
+
+ LINVRNT(ctx->lc_value != NULL);
+ for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
+ struct lu_context_key *key;
+
+ key = lu_keys[i];
+ if (ctx->lc_value[i] == NULL && key != NULL &&
+ (key->lct_tags & ctx->lc_tags) &&
+ /*
+ * Don't create values for a LCT_QUIESCENT key, as this
+ * will pin module owning a key.
+ */
+ !(key->lct_tags & LCT_QUIESCENT)) {
+ void *value;
+
+ LINVRNT(key->lct_init != NULL);
+ LINVRNT(key->lct_index == i);
+
+ value = key->lct_init(ctx, key);
+ if (unlikely(IS_ERR(value)))
+ return PTR_ERR(value);
+
+ if (!(ctx->lc_tags & LCT_NOREF))
+ try_module_get(key->lct_owner);
+ lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
+ atomic_inc(&key->lct_used);
+ /*
+ * This is the only place in the code, where an
+ * element of ctx->lc_value[] array is set to non-NULL
+ * value.
+ */
+ ctx->lc_value[i] = value;
+ if (key->lct_exit != NULL)
+ ctx->lc_tags |= LCT_HAS_EXIT;
+ }
+ ctx->lc_version = key_set_version;
+ }
+ return 0;
+}
+
+static int keys_init(struct lu_context *ctx)
+{
+ OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
+ if (likely(ctx->lc_value != NULL))
+ return keys_fill(ctx);
+
+ return -ENOMEM;
+}
+
+/**
+ * Initialize context data-structure. Create values for all keys.
+ */
+int lu_context_init(struct lu_context *ctx, __u32 tags)
+{
+ int rc;
+
+ memset(ctx, 0, sizeof *ctx);
+ ctx->lc_state = LCS_INITIALIZED;
+ ctx->lc_tags = tags;
+ if (tags & LCT_REMEMBER) {
+ spin_lock(&lu_keys_guard);
+ list_add(&ctx->lc_remember, &lu_context_remembered);
+ spin_unlock(&lu_keys_guard);
+ } else {
+ INIT_LIST_HEAD(&ctx->lc_remember);
+ }
+
+ rc = keys_init(ctx);
+ if (rc != 0)
+ lu_context_fini(ctx);
+
+ return rc;
+}
+EXPORT_SYMBOL(lu_context_init);
+
+/**
+ * Finalize context data-structure. Destroy key values.
+ */
+void lu_context_fini(struct lu_context *ctx)
+{
+ LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
+ ctx->lc_state = LCS_FINALIZED;
+
+ if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
+ LASSERT(list_empty(&ctx->lc_remember));
+ keys_fini(ctx);
+
+ } else { /* could race with key degister */
+ spin_lock(&lu_keys_guard);
+ keys_fini(ctx);
+ list_del_init(&ctx->lc_remember);
+ spin_unlock(&lu_keys_guard);
+ }
+}
+EXPORT_SYMBOL(lu_context_fini);
+
+/**
+ * Called before entering context.
+ */
+void lu_context_enter(struct lu_context *ctx)
+{
+ LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
+ ctx->lc_state = LCS_ENTERED;
+}
+EXPORT_SYMBOL(lu_context_enter);
+
+/**
+ * Called after exiting from \a ctx
+ */
+void lu_context_exit(struct lu_context *ctx)
+{
+ int i;
+
+ LINVRNT(ctx->lc_state == LCS_ENTERED);
+ ctx->lc_state = LCS_LEFT;
+ if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
+ for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
+ if (ctx->lc_value[i] != NULL) {
+ struct lu_context_key *key;
+
+ key = lu_keys[i];
+ LASSERT(key != NULL);
+ if (key->lct_exit != NULL)
+ key->lct_exit(ctx,
+ key, ctx->lc_value[i]);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(lu_context_exit);
+
+/**
+ * Allocate for context all missing keys that were registered after context
+ * creation. key_set_version is only changed in rare cases when modules
+ * are loaded and removed.
+ */
+int lu_context_refill(struct lu_context *ctx)
+{
+ return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
+}
+EXPORT_SYMBOL(lu_context_refill);
+
+/**
+ * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
+ * obd being added. Currently, this is only used on client side, specifically
+ * for echo device client, for other stack (like ptlrpc threads), context are
+ * predefined when the lu_device type are registered, during the module probe
+ * phase.
+ */
+__u32 lu_context_tags_default = 0;
+__u32 lu_session_tags_default = 0;
+
+void lu_context_tags_update(__u32 tags)
+{
+ spin_lock(&lu_keys_guard);
+ lu_context_tags_default |= tags;
+ key_set_version++;
+ spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_context_tags_update);
+
+void lu_context_tags_clear(__u32 tags)
+{
+ spin_lock(&lu_keys_guard);
+ lu_context_tags_default &= ~tags;
+ key_set_version++;
+ spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_context_tags_clear);
+
+void lu_session_tags_update(__u32 tags)
+{
+ spin_lock(&lu_keys_guard);
+ lu_session_tags_default |= tags;
+ key_set_version++;
+ spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_session_tags_update);
+
+void lu_session_tags_clear(__u32 tags)
+{
+ spin_lock(&lu_keys_guard);
+ lu_session_tags_default &= ~tags;
+ key_set_version++;
+ spin_unlock(&lu_keys_guard);
+}
+EXPORT_SYMBOL(lu_session_tags_clear);
+
+int lu_env_init(struct lu_env *env, __u32 tags)
+{
+ int result;
+
+ env->le_ses = NULL;
+ result = lu_context_init(&env->le_ctx, tags);
+ if (likely(result == 0))
+ lu_context_enter(&env->le_ctx);
+ return result;
+}
+EXPORT_SYMBOL(lu_env_init);
+
+void lu_env_fini(struct lu_env *env)
+{
+ lu_context_exit(&env->le_ctx);
+ lu_context_fini(&env->le_ctx);
+ env->le_ses = NULL;
+}
+EXPORT_SYMBOL(lu_env_fini);
+
+int lu_env_refill(struct lu_env *env)
+{
+ int result;
+
+ result = lu_context_refill(&env->le_ctx);
+ if (result == 0 && env->le_ses != NULL)
+ result = lu_context_refill(env->le_ses);
+ return result;
+}
+EXPORT_SYMBOL(lu_env_refill);
+
+/**
+ * Currently, this API will only be used by echo client.
+ * Because echo client and normal lustre client will share
+ * same cl_env cache. So echo client needs to refresh
+ * the env context after it get one from the cache, especially
+ * when normal client and echo client co-exist in the same client.
+ */
+int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
+ __u32 stags)
+{
+ int result;
+
+ if ((env->le_ctx.lc_tags & ctags) != ctags) {
+ env->le_ctx.lc_version = 0;
+ env->le_ctx.lc_tags |= ctags;
+ }
+
+ if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
+ env->le_ses->lc_version = 0;
+ env->le_ses->lc_tags |= stags;
+ }
+
+ result = lu_env_refill(env);
+
+ return result;
+}
+EXPORT_SYMBOL(lu_env_refill_by_tags);
+
+
+typedef struct lu_site_stats{
+ unsigned lss_populated;
+ unsigned lss_max_search;
+ unsigned lss_total;
+ unsigned lss_busy;
+} lu_site_stats_t;
+
+static void lu_site_stats_get(cfs_hash_t *hs,
+ lu_site_stats_t *stats, int populated)
+{
+ cfs_hash_bd_t bd;
+ int i;
+
+ cfs_hash_for_each_bucket(hs, &bd, i) {
+ struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
+ struct hlist_head *hhead;
+
+ cfs_hash_bd_lock(hs, &bd, 1);
+ stats->lss_busy += bkt->lsb_busy;
+ stats->lss_total += cfs_hash_bd_count_get(&bd);
+ stats->lss_max_search = max((int)stats->lss_max_search,
+ cfs_hash_bd_depmax_get(&bd));
+ if (!populated) {
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ continue;
+ }
+
+ cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
+ if (!hlist_empty(hhead))
+ stats->lss_populated++;
+ }
+ cfs_hash_bd_unlock(hs, &bd, 1);
+ }
+}
+
+
+/*
+ * There exists a potential lock inversion deadlock scenario when using
+ * Lustre on top of ZFS. This occurs between one of ZFS's
+ * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
+ * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
+ * while thread B will take the ht_lock and sleep on the lu_sites_guard
+ * lock. Obviously neither thread will wake and drop their respective hold
+ * on their lock.
+ *
+ * To prevent this from happening we must ensure the lu_sites_guard lock is
+ * not taken while down this code path. ZFS reliably does not set the
+ * __GFP_FS bit in its code paths, so this can be used to determine if it
+ * is safe to take the lu_sites_guard lock.
+ *
+ * Ideally we should accurately return the remaining number of cached
+ * objects without taking the lu_sites_guard lock, but this is not
+ * possible in the current implementation.
+ */
+static unsigned long lu_cache_shrink_count(struct shrinker *sk,
+ struct shrink_control *sc)
+{
+ lu_site_stats_t stats;
+ struct lu_site *s;
+ struct lu_site *tmp;
+ unsigned long cached = 0;
+
+ if (!(sc->gfp_mask & __GFP_FS))
+ return 0;
+
+ mutex_lock(&lu_sites_guard);
+ list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+ memset(&stats, 0, sizeof(stats));
+ lu_site_stats_get(s->ls_obj_hash, &stats, 0);
+ cached += stats.lss_total - stats.lss_busy;
+ }
+ mutex_unlock(&lu_sites_guard);
+
+ cached = (cached / 100) * sysctl_vfs_cache_pressure;
+ CDEBUG(D_INODE, "%ld objects cached\n", cached);
+ return cached;
+}
+
+static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
+ struct shrink_control *sc)
+{
+ struct lu_site *s;
+ struct lu_site *tmp;
+ unsigned long remain = sc->nr_to_scan, freed = 0;
+ LIST_HEAD(splice);
+
+ if (!(sc->gfp_mask & __GFP_FS))
+ /* We must not take the lu_sites_guard lock when
+ * __GFP_FS is *not* set because of the deadlock
+ * possibility detailed above. Additionally,
+ * since we cannot determine the number of
+ * objects in the cache without taking this
+ * lock, we're in a particularly tough spot. As
+ * a result, we'll just lie and say our cache is
+ * empty. This _should_ be ok, as we can't
+ * reclaim objects when __GFP_FS is *not* set
+ * anyways.
+ */
+ return SHRINK_STOP;
+
+ mutex_lock(&lu_sites_guard);
+ list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
+ freed = lu_site_purge(&lu_shrink_env, s, remain);
+ remain -= freed;
+ /*
+ * Move just shrunk site to the tail of site list to
+ * assure shrinking fairness.
+ */
+ list_move_tail(&s->ls_linkage, &splice);
+ }
+ list_splice(&splice, lu_sites.prev);
+ mutex_unlock(&lu_sites_guard);
+
+ return sc->nr_to_scan - remain;
+}
+
+/*
+ * Debugging stuff.
+ */
+
+/**
+ * Environment to be used in debugger, contains all tags.
+ */
+struct lu_env lu_debugging_env;
+
+/**
+ * Debugging printer function using printk().
+ */
+int lu_printk_printer(const struct lu_env *env,
+ void *unused, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vprintk(format, args);
+ va_end(args);
+ return 0;
+}
+
+static struct shrinker lu_site_shrinker = {
+ .count_objects = lu_cache_shrink_count,
+ .scan_objects = lu_cache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
+ * Initialization of global lu_* data.
+ */
+int lu_global_init(void)
+{
+ int result;
+
+ CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
+
+ result = lu_ref_global_init();
+ if (result != 0)
+ return result;
+
+ LU_CONTEXT_KEY_INIT(&lu_global_key);
+ result = lu_context_key_register(&lu_global_key);
+ if (result != 0)
+ return result;
+
+ /*
+ * At this level, we don't know what tags are needed, so allocate them
+ * conservatively. This should not be too bad, because this
+ * environment is global.
+ */
+ mutex_lock(&lu_sites_guard);
+ result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
+ mutex_unlock(&lu_sites_guard);
+ if (result != 0)
+ return result;
+
+ /*
+ * seeks estimation: 3 seeks to read a record from oi, one to read
+ * inode, one for ea. Unfortunately setting this high value results in
+ * lu_object/inode cache consuming all the memory.
+ */
+ register_shrinker(&lu_site_shrinker);
+
+ return result;
+}
+
+/**
+ * Dual to lu_global_init().
+ */
+void lu_global_fini(void)
+{
+ unregister_shrinker(&lu_site_shrinker);
+ lu_context_key_degister(&lu_global_key);
+
+ /*
+ * Tear shrinker environment down _after_ de-registering
+ * lu_global_key, because the latter has a value in the former.
+ */
+ mutex_lock(&lu_sites_guard);
+ lu_env_fini(&lu_shrink_env);
+ mutex_unlock(&lu_sites_guard);
+
+ lu_ref_global_fini();
+}
+
+static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
+{
+#ifdef LPROCFS
+ struct lprocfs_counter ret;
+
+ lprocfs_stats_collect(stats, idx, &ret);
+ return (__u32)ret.lc_count;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * Output site statistical counters into a buffer. Suitable for
+ * lprocfs_rd_*()-style functions.
+ */
+int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
+{
+ lu_site_stats_t stats;
+
+ memset(&stats, 0, sizeof(stats));
+ lu_site_stats_get(s->ls_obj_hash, &stats, 1);
+
+ return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
+ stats.lss_busy,
+ stats.lss_total,
+ stats.lss_populated,
+ CFS_HASH_NHLIST(s->ls_obj_hash),
+ stats.lss_max_search,
+ ls_stats_read(s->ls_stats, LU_SS_CREATED),
+ ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
+ ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
+ ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
+ ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
+ ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
+}
+EXPORT_SYMBOL(lu_site_stats_print);
+
+/**
+ * Helper function to initialize a number of kmem slab caches at once.
+ */
+int lu_kmem_init(struct lu_kmem_descr *caches)
+{
+ int result;
+ struct lu_kmem_descr *iter = caches;
+
+ for (result = 0; iter->ckd_cache != NULL; ++iter) {
+ *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
+ iter->ckd_size,
+ 0, 0, NULL);
+ if (*iter->ckd_cache == NULL) {
+ result = -ENOMEM;
+ /* free all previously allocated caches */
+ lu_kmem_fini(caches);
+ break;
+ }
+ }
+ return result;
+}
+EXPORT_SYMBOL(lu_kmem_init);
+
+/**
+ * Helper function to finalize a number of kmem slab cached at once. Dual to
+ * lu_kmem_init().
+ */
+void lu_kmem_fini(struct lu_kmem_descr *caches)
+{
+ for (; caches->ckd_cache != NULL; ++caches) {
+ if (*caches->ckd_cache != NULL) {
+ kmem_cache_destroy(*caches->ckd_cache);
+ *caches->ckd_cache = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(lu_kmem_fini);
+
+/**
+ * Temporary solution to be able to assign fid in ->do_create()
+ * till we have fully-functional OST fids
+ */
+void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
+ const struct lu_fid *fid)
+{
+ struct lu_site *s = o->lo_dev->ld_site;
+ struct lu_fid *old = &o->lo_header->loh_fid;
+ struct lu_site_bkt_data *bkt;
+ struct lu_object *shadow;
+ wait_queue_t waiter;
+ cfs_hash_t *hs;
+ cfs_hash_bd_t bd;
+ __u64 version = 0;
+
+ LASSERT(fid_is_zero(old));
+
+ hs = s->ls_obj_hash;
+ cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
+ shadow = htable_lookup(s, &bd, fid, &waiter, &version);
+ /* supposed to be unique */
+ LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT);
+ *old = *fid;
+ bkt = cfs_hash_bd_extra_get(hs, &bd);
+ cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
+ bkt->lsb_busy++;
+ cfs_hash_bd_unlock(hs, &bd, 1);
+}
+EXPORT_SYMBOL(lu_object_assign_fid);
+
+/**
+ * allocates object with 0 (non-assiged) fid
+ * XXX: temporary solution to be able to assign fid in ->do_create()
+ * till we have fully-functional OST fids
+ */
+struct lu_object *lu_object_anon(const struct lu_env *env,
+ struct lu_device *dev,
+ const struct lu_object_conf *conf)
+{
+ struct lu_fid fid;
+ struct lu_object *o;
+
+ fid_zero(&fid);
+ o = lu_object_alloc(env, dev, &fid, conf);
+
+ return o;
+}
+EXPORT_SYMBOL(lu_object_anon);
+
+struct lu_buf LU_BUF_NULL = {
+ .lb_buf = NULL,
+ .lb_len = 0
+};
+EXPORT_SYMBOL(LU_BUF_NULL);
+
+void lu_buf_free(struct lu_buf *buf)
+{
+ LASSERT(buf);
+ if (buf->lb_buf) {
+ LASSERT(buf->lb_len > 0);
+ OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
+ buf->lb_buf = NULL;
+ buf->lb_len = 0;
+ }
+}
+EXPORT_SYMBOL(lu_buf_free);
+
+void lu_buf_alloc(struct lu_buf *buf, int size)
+{
+ LASSERT(buf);
+ LASSERT(buf->lb_buf == NULL);
+ LASSERT(buf->lb_len == 0);
+ OBD_ALLOC_LARGE(buf->lb_buf, size);
+ if (likely(buf->lb_buf))
+ buf->lb_len = size;
+}
+EXPORT_SYMBOL(lu_buf_alloc);
+
+void lu_buf_realloc(struct lu_buf *buf, int size)
+{
+ lu_buf_free(buf);
+ lu_buf_alloc(buf, size);
+}
+EXPORT_SYMBOL(lu_buf_realloc);
+
+struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len)
+{
+ if (buf->lb_buf == NULL && buf->lb_len == 0)
+ lu_buf_alloc(buf, len);
+
+ if ((len > buf->lb_len) && (buf->lb_buf != NULL))
+ lu_buf_realloc(buf, len);
+
+ return buf;
+}
+EXPORT_SYMBOL(lu_buf_check_and_alloc);
+
+/**
+ * Increase the size of the \a buf.
+ * preserves old data in buffer
+ * old buffer remains unchanged on error
+ * \retval 0 or -ENOMEM
+ */
+int lu_buf_check_and_grow(struct lu_buf *buf, int len)
+{
+ char *ptr;
+
+ if (len <= buf->lb_len)
+ return 0;
+
+ OBD_ALLOC_LARGE(ptr, len);
+ if (ptr == NULL)
+ return -ENOMEM;
+
+ /* Free the old buf */
+ if (buf->lb_buf != NULL) {
+ memcpy(ptr, buf->lb_buf, buf->lb_len);
+ OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
+ }
+
+ buf->lb_buf = ptr;
+ buf->lb_len = len;
+ return 0;
+}
+EXPORT_SYMBOL(lu_buf_check_and_grow);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ref.c b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
new file mode 100644
index 0000000..23a76f1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
@@ -0,0 +1,50 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/lu_ref.c
+ *
+ * Lustre reference.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+# include <linux/libcfs/libcfs.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lu_ref.h>
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ucred.c b/drivers/staging/lustre/lustre/obdclass/lu_ucred.c
new file mode 100644
index 0000000..e23e545
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/lu_ucred.c
@@ -0,0 +1,107 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/lu_ucred.c
+ *
+ * Lustre user credentials context infrastructure.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Fan Yong <fan.yong@intel.com>
+ * Author: Vitaly Fertman <vitaly_fertman@xyratex.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <linux/libcfs/libcfs.h>
+#include <obd_support.h>
+#include <lu_object.h>
+#include <md_object.h>
+
+/* context key constructor/destructor: lu_ucred_key_init, lu_ucred_key_fini */
+LU_KEY_INIT_FINI(lu_ucred, struct lu_ucred);
+
+static struct lu_context_key lu_ucred_key = {
+ .lct_tags = LCT_SESSION,
+ .lct_init = lu_ucred_key_init,
+ .lct_fini = lu_ucred_key_fini
+};
+
+/**
+ * Get ucred key if session exists and ucred key is allocated on it.
+ * Return NULL otherwise.
+ */
+struct lu_ucred *lu_ucred(const struct lu_env *env)
+{
+ if (!env->le_ses)
+ return NULL;
+ return lu_context_key_get(env->le_ses, &lu_ucred_key);
+}
+EXPORT_SYMBOL(lu_ucred);
+
+/**
+ * Get ucred key and check if it is properly initialized.
+ * Return NULL otherwise.
+ */
+struct lu_ucred *lu_ucred_check(const struct lu_env *env)
+{
+ struct lu_ucred *uc = lu_ucred(env);
+ if (uc && uc->uc_valid != UCRED_OLD && uc->uc_valid != UCRED_NEW)
+ return NULL;
+ return uc;
+}
+EXPORT_SYMBOL(lu_ucred_check);
+
+/**
+ * Get ucred key, which must exist and must be properly initialized.
+ * Assert otherwise.
+ */
+struct lu_ucred *lu_ucred_assert(const struct lu_env *env)
+{
+ struct lu_ucred *uc = lu_ucred_check(env);
+ LASSERT(uc != NULL);
+ return uc;
+}
+EXPORT_SYMBOL(lu_ucred_assert);
+
+int lu_ucred_global_init(void)
+{
+ LU_CONTEXT_KEY_INIT(&lu_ucred_key);
+ return lu_context_key_register(&lu_ucred_key);
+}
+
+void lu_ucred_global_fini(void)
+{
+ lu_context_key_degister(&lu_ucred_key);
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
new file mode 100644
index 0000000..be31d32
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -0,0 +1,257 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/lustre_handles.c
+ *
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <obd_support.h>
+#include <lustre_handles.h>
+#include <lustre_lib.h>
+
+
+static __u64 handle_base;
+#define HANDLE_INCR 7
+static spinlock_t handle_base_lock;
+
+static struct handle_bucket {
+ spinlock_t lock;
+ struct list_head head;
+} *handle_hash;
+
+#define HANDLE_HASH_SIZE (1 << 16)
+#define HANDLE_HASH_MASK (HANDLE_HASH_SIZE - 1)
+
+/*
+ * Generate a unique 64bit cookie (hash) for a handle and insert it into
+ * global (per-node) hash-table.
+ */
+void class_handle_hash(struct portals_handle *h,
+ struct portals_handle_ops *ops)
+{
+ struct handle_bucket *bucket;
+
+ LASSERT(h != NULL);
+ LASSERT(list_empty(&h->h_link));
+
+ /*
+ * This is fast, but simplistic cookie generation algorithm, it will
+ * need a re-do at some point in the future for security.
+ */
+ spin_lock(&handle_base_lock);
+ handle_base += HANDLE_INCR;
+
+ if (unlikely(handle_base == 0)) {
+ /*
+ * Cookie of zero is "dangerous", because in many places it's
+ * assumed that 0 means "unassigned" handle, not bound to any
+ * object.
+ */
+ CWARN("The universe has been exhausted: cookie wrap-around.\n");
+ handle_base += HANDLE_INCR;
+ }
+ h->h_cookie = handle_base;
+ spin_unlock(&handle_base_lock);
+
+ h->h_ops = ops;
+ spin_lock_init(&h->h_lock);
+
+ bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
+ spin_lock(&bucket->lock);
+ list_add_rcu(&h->h_link, &bucket->head);
+ h->h_in = 1;
+ spin_unlock(&bucket->lock);
+
+ CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
+ h, h->h_cookie);
+}
+EXPORT_SYMBOL(class_handle_hash);
+
+static void class_handle_unhash_nolock(struct portals_handle *h)
+{
+ if (list_empty(&h->h_link)) {
+ CERROR("removing an already-removed handle ("LPX64")\n",
+ h->h_cookie);
+ return;
+ }
+
+ CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
+ h, h->h_cookie);
+
+ spin_lock(&h->h_lock);
+ if (h->h_in == 0) {
+ spin_unlock(&h->h_lock);
+ return;
+ }
+ h->h_in = 0;
+ spin_unlock(&h->h_lock);
+ list_del_rcu(&h->h_link);
+}
+
+void class_handle_unhash(struct portals_handle *h)
+{
+ struct handle_bucket *bucket;
+ bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
+
+ spin_lock(&bucket->lock);
+ class_handle_unhash_nolock(h);
+ spin_unlock(&bucket->lock);
+}
+EXPORT_SYMBOL(class_handle_unhash);
+
+void class_handle_hash_back(struct portals_handle *h)
+{
+ struct handle_bucket *bucket;
+
+ bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
+
+ spin_lock(&bucket->lock);
+ list_add_rcu(&h->h_link, &bucket->head);
+ h->h_in = 1;
+ spin_unlock(&bucket->lock);
+}
+EXPORT_SYMBOL(class_handle_hash_back);
+
+void *class_handle2object(__u64 cookie)
+{
+ struct handle_bucket *bucket;
+ struct portals_handle *h;
+ void *retval = NULL;
+
+ LASSERT(handle_hash != NULL);
+
+ /* Be careful when you want to change this code. See the
+ * rcu_read_lock() definition on top this file. - jxiong */
+ bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &bucket->head, h_link) {
+ if (h->h_cookie != cookie)
+ continue;
+
+ spin_lock(&h->h_lock);
+ if (likely(h->h_in != 0)) {
+ h->h_ops->hop_addref(h);
+ retval = h;
+ }
+ spin_unlock(&h->h_lock);
+ break;
+ }
+ rcu_read_unlock();
+
+ return retval;
+}
+EXPORT_SYMBOL(class_handle2object);
+
+void class_handle_free_cb(cfs_rcu_head_t *rcu)
+{
+ struct portals_handle *h = RCU2HANDLE(rcu);
+ void *ptr = (void *)(unsigned long)h->h_cookie;
+
+ if (h->h_ops->hop_free != NULL)
+ h->h_ops->hop_free(ptr, h->h_size);
+ else
+ OBD_FREE(ptr, h->h_size);
+}
+EXPORT_SYMBOL(class_handle_free_cb);
+
+int class_handle_init(void)
+{
+ struct handle_bucket *bucket;
+ struct timeval tv;
+ int seed[2];
+
+ LASSERT(handle_hash == NULL);
+
+ OBD_ALLOC_LARGE(handle_hash, sizeof(*bucket) * HANDLE_HASH_SIZE);
+ if (handle_hash == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&handle_base_lock);
+ for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
+ bucket--) {
+ INIT_LIST_HEAD(&bucket->head);
+ spin_lock_init(&bucket->lock);
+ }
+
+ /** bug 21430: add randomness to the initial base */
+ cfs_get_random_bytes(seed, sizeof(seed));
+ do_gettimeofday(&tv);
+ cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
+
+ cfs_get_random_bytes(&handle_base, sizeof(handle_base));
+ LASSERT(handle_base != 0ULL);
+
+ return 0;
+}
+
+static int cleanup_all_handles(void)
+{
+ int rc;
+ int i;
+
+ for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
+ struct portals_handle *h;
+
+ spin_lock(&handle_hash[i].lock);
+ list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
+ CERROR("force clean handle "LPX64" addr %p ops %p\n",
+ h->h_cookie, h, h->h_ops);
+
+ class_handle_unhash_nolock(h);
+ rc++;
+ }
+ spin_unlock(&handle_hash[i].lock);
+ }
+
+ return rc;
+}
+
+void class_handle_cleanup(void)
+{
+ int count;
+ LASSERT(handle_hash != NULL);
+
+ count = cleanup_all_handles();
+
+ OBD_FREE_LARGE(handle_hash, sizeof(*handle_hash) * HANDLE_HASH_SIZE);
+ handle_hash = NULL;
+
+ if (count != 0)
+ CERROR("handle_count at cleanup: %d\n", count);
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
new file mode 100644
index 0000000..df4936a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -0,0 +1,217 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+#include <obd.h>
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_lib.h>
+#include <lustre_ha.h>
+#include <lustre_net.h>
+#include <lprocfs_status.h>
+
+#define NIDS_MAX 32
+
+struct uuid_nid_data {
+ struct list_head un_list;
+ struct obd_uuid un_uuid;
+ int un_nid_count;
+ lnet_nid_t un_nids[NIDS_MAX];
+};
+
+/* FIXME: This should probably become more elegant than a global linked list */
+static struct list_head g_uuid_list;
+static spinlock_t g_uuid_lock;
+
+void class_init_uuidlist(void)
+{
+ INIT_LIST_HEAD(&g_uuid_list);
+ spin_lock_init(&g_uuid_lock);
+}
+
+void class_exit_uuidlist(void)
+{
+ /* delete all */
+ class_del_uuid(NULL);
+}
+
+int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index)
+{
+ struct uuid_nid_data *data;
+ struct obd_uuid tmp;
+ int rc = -ENOENT;
+
+ obd_str2uuid(&tmp, uuid);
+ spin_lock(&g_uuid_lock);
+ list_for_each_entry(data, &g_uuid_list, un_list) {
+ if (obd_uuid_equals(&data->un_uuid, &tmp)) {
+ if (index >= data->un_nid_count)
+ break;
+
+ rc = 0;
+ *peer_nid = data->un_nids[index];
+ break;
+ }
+ }
+ spin_unlock(&g_uuid_lock);
+ return rc;
+}
+EXPORT_SYMBOL(lustre_uuid_to_peer);
+
+/* Add a nid to a niduuid. Multiple nids can be added to a single uuid;
+ LNET will choose the best one. */
+int class_add_uuid(const char *uuid, __u64 nid)
+{
+ struct uuid_nid_data *data, *entry;
+ int found = 0;
+
+ LASSERT(nid != 0); /* valid newconfig NID is never zero */
+
+ if (strlen(uuid) > UUID_MAX - 1)
+ return -EOVERFLOW;
+
+ OBD_ALLOC_PTR(data);
+ if (data == NULL)
+ return -ENOMEM;
+
+ obd_str2uuid(&data->un_uuid, uuid);
+ data->un_nids[0] = nid;
+ data->un_nid_count = 1;
+
+ spin_lock(&g_uuid_lock);
+ list_for_each_entry(entry, &g_uuid_list, un_list) {
+ if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) {
+ int i;
+
+ found = 1;
+ for (i = 0; i < entry->un_nid_count; i++)
+ if (nid == entry->un_nids[i])
+ break;
+
+ if (i == entry->un_nid_count) {
+ LASSERT(entry->un_nid_count < NIDS_MAX);
+ entry->un_nids[entry->un_nid_count++] = nid;
+ }
+ break;
+ }
+ }
+ if (!found)
+ list_add(&data->un_list, &g_uuid_list);
+ spin_unlock(&g_uuid_lock);
+
+ if (found) {
+ CDEBUG(D_INFO, "found uuid %s %s cnt=%d\n", uuid,
+ libcfs_nid2str(nid), entry->un_nid_count);
+ OBD_FREE(data, sizeof(*data));
+ } else {
+ CDEBUG(D_INFO, "add uuid %s %s\n", uuid, libcfs_nid2str(nid));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(class_add_uuid);
+
+/* Delete the nids for one uuid if specified, otherwise delete all */
+int class_del_uuid(const char *uuid)
+{
+ LIST_HEAD(deathrow);
+ struct uuid_nid_data *data;
+
+ spin_lock(&g_uuid_lock);
+ if (uuid != NULL) {
+ struct obd_uuid tmp;
+
+ obd_str2uuid(&tmp, uuid);
+ list_for_each_entry(data, &g_uuid_list, un_list) {
+ if (obd_uuid_equals(&data->un_uuid, &tmp)) {
+ list_move(&data->un_list, &deathrow);
+ break;
+ }
+ }
+ } else
+ list_splice_init(&g_uuid_list, &deathrow);
+ spin_unlock(&g_uuid_lock);
+
+ if (uuid != NULL && list_empty(&deathrow)) {
+ CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
+ return -EINVAL;
+ }
+
+ while (!list_empty(&deathrow)) {
+ data = list_entry(deathrow.next, struct uuid_nid_data,
+ un_list);
+ list_del(&data->un_list);
+
+ CDEBUG(D_INFO, "del uuid %s %s/%d\n",
+ obd_uuid2str(&data->un_uuid),
+ libcfs_nid2str(data->un_nids[0]),
+ data->un_nid_count);
+
+ OBD_FREE(data, sizeof(*data));
+ }
+
+ return 0;
+}
+
+/* check if @nid exists in nid list of @uuid */
+int class_check_uuid(struct obd_uuid *uuid, __u64 nid)
+{
+ struct uuid_nid_data *entry;
+ int found = 0;
+
+ CDEBUG(D_INFO, "check if uuid %s has %s.\n",
+ obd_uuid2str(uuid), libcfs_nid2str(nid));
+
+ spin_lock(&g_uuid_lock);
+ list_for_each_entry(entry, &g_uuid_list, un_list) {
+ int i;
+
+ if (!obd_uuid_equals(&entry->un_uuid, uuid))
+ continue;
+
+ /* found the uuid, check if it has @nid */
+ for (i = 0; i < entry->un_nid_count; i++) {
+ if (entry->un_nids[i] == nid) {
+ found = 1;
+ break;
+ }
+ }
+ break;
+ }
+ spin_unlock(&g_uuid_lock);
+ return found;
+}
+EXPORT_SYMBOL(class_check_uuid);
diff --git a/drivers/staging/lustre/lustre/obdclass/md_attrs.c b/drivers/staging/lustre/lustre/obdclass/md_attrs.c
new file mode 100644
index 0000000..f718782
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/md_attrs.c
@@ -0,0 +1,199 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2012, Intel Corporation.
+ * Use is subject to license terms.
+ *
+ * Author: Johann Lombardi <johann.lombardi@intel.com>
+ */
+
+#include <lustre/lustre_idl.h>
+#include <obd.h>
+#include <md_object.h>
+
+/**
+ * Initialize new \a lma. Only fid is stored.
+ *
+ * \param lma - is the new LMA structure to be initialized
+ * \param fid - is the FID of the object this LMA belongs to
+ * \param incompat - features that MDS must understand to access object
+ */
+void lustre_lma_init(struct lustre_mdt_attrs *lma, const struct lu_fid *fid,
+ __u32 incompat)
+{
+ lma->lma_compat = 0;
+ lma->lma_incompat = incompat;
+ lma->lma_self_fid = *fid;
+
+ /* If a field is added in struct lustre_mdt_attrs, zero it explicitly
+ * and change the test below. */
+ LASSERT(sizeof(*lma) ==
+ (offsetof(struct lustre_mdt_attrs, lma_self_fid) +
+ sizeof(lma->lma_self_fid)));
+};
+EXPORT_SYMBOL(lustre_lma_init);
+
+/**
+ * Swab, if needed, LMA structure which is stored on-disk in little-endian order.
+ *
+ * \param lma - is a pointer to the LMA structure to be swabbed.
+ */
+void lustre_lma_swab(struct lustre_mdt_attrs *lma)
+{
+ /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
+ if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
+ __swab32s(&lma->lma_compat);
+ __swab32s(&lma->lma_incompat);
+ lustre_swab_lu_fid(&lma->lma_self_fid);
+ }
+};
+EXPORT_SYMBOL(lustre_lma_swab);
+
+/**
+ * Swab, if needed, SOM structure which is stored on-disk in little-endian
+ * order.
+ *
+ * \param attrs - is a pointer to the SOM structure to be swabbed.
+ */
+void lustre_som_swab(struct som_attrs *attrs)
+{
+ /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
+ if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
+ __swab32s(&attrs->som_compat);
+ __swab32s(&attrs->som_incompat);
+ __swab64s(&attrs->som_ioepoch);
+ __swab64s(&attrs->som_size);
+ __swab64s(&attrs->som_blocks);
+ __swab64s(&attrs->som_mountid);
+ }
+};
+EXPORT_SYMBOL(lustre_som_swab);
+
+/*
+ * Swab and extract SOM attributes from on-disk xattr.
+ *
+ * \param buf - is a buffer containing the on-disk SOM extended attribute.
+ * \param rc - is the SOM xattr stored in \a buf
+ * \param msd - is the md_som_data structure where to extract SOM attributes.
+ */
+int lustre_buf2som(void *buf, int rc, struct md_som_data *msd)
+{
+ struct som_attrs *attrs = (struct som_attrs *)buf;
+
+ if (rc == 0 || rc == -ENODATA)
+ /* no SOM attributes */
+ return -ENODATA;
+
+ if (rc < 0)
+ /* error hit while fetching xattr */
+ return rc;
+
+ /* check SOM compatibility */
+ if (attrs->som_incompat & ~cpu_to_le32(SOM_INCOMPAT_SUPP))
+ return -ENODATA;
+
+ /* unpack SOM attributes */
+ lustre_som_swab(attrs);
+
+ /* fill in-memory msd structure */
+ msd->msd_compat = attrs->som_compat;
+ msd->msd_incompat = attrs->som_incompat;
+ msd->msd_ioepoch = attrs->som_ioepoch;
+ msd->msd_size = attrs->som_size;
+ msd->msd_blocks = attrs->som_blocks;
+ msd->msd_mountid = attrs->som_mountid;
+
+ return 0;
+}
+EXPORT_SYMBOL(lustre_buf2som);
+
+/**
+ * Swab, if needed, HSM structure which is stored on-disk in little-endian
+ * order.
+ *
+ * \param attrs - is a pointer to the HSM structure to be swabbed.
+ */
+void lustre_hsm_swab(struct hsm_attrs *attrs)
+{
+ /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
+ if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
+ __swab32s(&attrs->hsm_compat);
+ __swab32s(&attrs->hsm_flags);
+ __swab64s(&attrs->hsm_arch_id);
+ __swab64s(&attrs->hsm_arch_ver);
+ }
+};
+EXPORT_SYMBOL(lustre_hsm_swab);
+
+/*
+ * Swab and extract HSM attributes from on-disk xattr.
+ *
+ * \param buf - is a buffer containing the on-disk HSM extended attribute.
+ * \param rc - is the HSM xattr stored in \a buf
+ * \param mh - is the md_hsm structure where to extract HSM attributes.
+ */
+int lustre_buf2hsm(void *buf, int rc, struct md_hsm *mh)
+{
+ struct hsm_attrs *attrs = (struct hsm_attrs *)buf;
+
+ if (rc == 0 || rc == -ENODATA)
+ /* no HSM attributes */
+ return -ENODATA;
+
+ if (rc < 0)
+ /* error hit while fetching xattr */
+ return rc;
+
+ /* unpack HSM attributes */
+ lustre_hsm_swab(attrs);
+
+ /* fill md_hsm structure */
+ mh->mh_compat = attrs->hsm_compat;
+ mh->mh_flags = attrs->hsm_flags;
+ mh->mh_arch_id = attrs->hsm_arch_id;
+ mh->mh_arch_ver = attrs->hsm_arch_ver;
+
+ return 0;
+}
+EXPORT_SYMBOL(lustre_buf2hsm);
+
+/*
+ * Pack HSM attributes.
+ *
+ * \param buf - is the output buffer where to pack the on-disk HSM xattr.
+ * \param mh - is the md_hsm structure to pack.
+ */
+void lustre_hsm2buf(void *buf, struct md_hsm *mh)
+{
+ struct hsm_attrs *attrs = (struct hsm_attrs *)buf;
+
+ /* copy HSM attributes */
+ attrs->hsm_compat = mh->mh_compat;
+ attrs->hsm_flags = mh->mh_flags;
+ attrs->hsm_arch_id = mh->mh_arch_id;
+ attrs->hsm_arch_ver = mh->mh_arch_ver;
+
+ /* pack xattr */
+ lustre_hsm_swab(attrs);
+}
+EXPORT_SYMBOL(lustre_hsm2buf);
diff --git a/drivers/staging/lustre/lustre/obdclass/mea.c b/drivers/staging/lustre/lustre/obdclass/mea.c
new file mode 100644
index 0000000..c4f0dbc
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/mea.c
@@ -0,0 +1,112 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+#include <obd_class.h>
+#include <linux/kmod.h> /* for request_module() */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <lprocfs_status.h>
+#include <lustre/lustre_idl.h>
+
+static int mea_last_char_hash(int count, char *name, int namelen)
+{
+ unsigned int c;
+
+ c = name[namelen - 1];
+ if (c == 0)
+ CWARN("looks like wrong len is passed\n");
+ c = c % count;
+ return c;
+}
+
+static int mea_all_chars_hash(int count, char *name, int namelen)
+{
+ unsigned int c = 0;
+
+ while (--namelen >= 0)
+ c += name[namelen];
+ c = c % count;
+ return c;
+}
+
+int raw_name2idx(int hashtype, int count, const char *name, int namelen)
+{
+ unsigned int c = 0;
+ int idx;
+
+ LASSERT(namelen > 0);
+
+ if (filename_is_volatile(name, namelen, &idx)) {
+ if ((idx >= 0) && (idx < count))
+ return idx;
+ goto hashchoice;
+ }
+
+ if (count <= 1)
+ return 0;
+
+hashchoice:
+ switch (hashtype) {
+ case MEA_MAGIC_LAST_CHAR:
+ c = mea_last_char_hash(count, (char *)name, namelen);
+ break;
+ case MEA_MAGIC_ALL_CHARS:
+ c = mea_all_chars_hash(count, (char *)name, namelen);
+ break;
+ case MEA_MAGIC_HASH_SEGMENT:
+ CERROR("Unsupported hash type MEA_MAGIC_HASH_SEGMENT\n");
+ break;
+ default:
+ CERROR("Unknown hash type 0x%x\n", hashtype);
+ }
+
+ LASSERT(c < count);
+ return c;
+}
+EXPORT_SYMBOL(raw_name2idx);
+
+int mea_name2idx(struct lmv_stripe_md *mea, const char *name, int namelen)
+{
+ unsigned int c;
+
+ LASSERT(mea && mea->mea_count);
+
+ c = raw_name2idx(mea->mea_magic, mea->mea_count, name, namelen);
+
+ LASSERT(c < mea->mea_count);
+ return c;
+}
+EXPORT_SYMBOL(mea_name2idx);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
new file mode 100644
index 0000000..d0a64ff
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -0,0 +1,1882 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/obd_config.c
+ *
+ * Config API
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+#include <obd_class.h>
+#include <linux/string.h>
+#include <lustre_log.h>
+#include <lprocfs_status.h>
+#include <lustre_param.h>
+
+#include "llog_internal.h"
+
+static cfs_hash_ops_t uuid_hash_ops;
+static cfs_hash_ops_t nid_hash_ops;
+static cfs_hash_ops_t nid_stat_hash_ops;
+
+/*********** string parsing utils *********/
+
+/* returns 0 if we find this key in the buffer, else 1 */
+int class_find_param(char *buf, char *key, char **valp)
+{
+ char *ptr;
+
+ if (!buf)
+ return 1;
+
+ if ((ptr = strstr(buf, key)) == NULL)
+ return 1;
+
+ if (valp)
+ *valp = ptr + strlen(key);
+
+ return 0;
+}
+EXPORT_SYMBOL(class_find_param);
+
+/**
+ * Check whether the proc parameter \a param is an old parameter or not from
+ * the array \a ptr which contains the mapping from old parameters to new ones.
+ * If it's an old one, then return the pointer to the cfg_interop_param struc-
+ * ture which contains both the old and new parameters.
+ *
+ * \param param proc parameter
+ * \param ptr an array which contains the mapping from
+ * old parameters to new ones
+ *
+ * \retval valid-pointer pointer to the cfg_interop_param structure
+ * which contains the old and new parameters
+ * \retval NULL \a param or \a ptr is NULL,
+ * or \a param is not an old parameter
+ */
+struct cfg_interop_param *class_find_old_param(const char *param,
+ struct cfg_interop_param *ptr)
+{
+ char *value = NULL;
+ int name_len = 0;
+
+ if (param == NULL || ptr == NULL)
+ return NULL;
+
+ value = strchr(param, '=');
+ if (value == NULL)
+ name_len = strlen(param);
+ else
+ name_len = value - param;
+
+ while (ptr->old_param != NULL) {
+ if (strncmp(param, ptr->old_param, name_len) == 0 &&
+ name_len == strlen(ptr->old_param))
+ return ptr;
+ ptr++;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(class_find_old_param);
+
+/**
+ * Finds a parameter in \a params and copies it to \a copy.
+ *
+ * Leading spaces are skipped. Next space or end of string is the
+ * parameter terminator with the exception that spaces inside single or double
+ * quotes get included into a parameter. The parameter is copied into \a copy
+ * which has to be allocated big enough by a caller, quotes are stripped in
+ * the copy and the copy is terminated by 0.
+ *
+ * On return \a params is set to next parameter or to NULL if last
+ * parameter is returned.
+ *
+ * \retval 0 if parameter is returned in \a copy
+ * \retval 1 otherwise
+ * \retval -EINVAL if unbalanced quota is found
+ */
+int class_get_next_param(char **params, char *copy)
+{
+ char *q1, *q2, *str;
+ int len;
+
+ str = *params;
+ while (*str == ' ')
+ str++;
+
+ if (*str == '\0') {
+ *params = NULL;
+ return 1;
+ }
+
+ while (1) {
+ q1 = strpbrk(str, " '\"");
+ if (q1 == NULL) {
+ len = strlen(str);
+ memcpy(copy, str, len);
+ copy[len] = '\0';
+ *params = NULL;
+ return 0;
+ }
+ len = q1 - str;
+ if (*q1 == ' ') {
+ memcpy(copy, str, len);
+ copy[len] = '\0';
+ *params = str + len;
+ return 0;
+ }
+
+ memcpy(copy, str, len);
+ copy += len;
+
+ /* search for the matching closing quote */
+ str = q1 + 1;
+ q2 = strchr(str, *q1);
+ if (q2 == NULL) {
+ CERROR("Unbalanced quota in parameters: \"%s\"\n",
+ *params);
+ return -EINVAL;
+ }
+ len = q2 - str;
+ memcpy(copy, str, len);
+ copy += len;
+ str = q2 + 1;
+ }
+ return 1;
+}
+EXPORT_SYMBOL(class_get_next_param);
+
+/* returns 0 if this is the first key in the buffer, else 1.
+ valp points to first char after key. */
+int class_match_param(char *buf, char *key, char **valp)
+{
+ if (!buf)
+ return 1;
+
+ if (memcmp(buf, key, strlen(key)) != 0)
+ return 1;
+
+ if (valp)
+ *valp = buf + strlen(key);
+
+ return 0;
+}
+EXPORT_SYMBOL(class_match_param);
+
+static int parse_nid(char *buf, void *value, int quiet)
+{
+ lnet_nid_t *nid = (lnet_nid_t *)value;
+
+ *nid = libcfs_str2nid(buf);
+ if (*nid != LNET_NID_ANY)
+ return 0;
+
+ if (!quiet)
+ LCONSOLE_ERROR_MSG(0x159, "Can't parse NID '%s'\n", buf);
+ return -EINVAL;
+}
+
+static int parse_net(char *buf, void *value)
+{
+ __u32 *net = (__u32 *)value;
+
+ *net = libcfs_str2net(buf);
+ CDEBUG(D_INFO, "Net %s\n", libcfs_net2str(*net));
+ return 0;
+}
+
+enum {
+ CLASS_PARSE_NID = 1,
+ CLASS_PARSE_NET,
+};
+
+/* 0 is good nid,
+ 1 not found
+ < 0 error
+ endh is set to next separator */
+static int class_parse_value(char *buf, int opc, void *value, char **endh,
+ int quiet)
+{
+ char *endp;
+ char tmp;
+ int rc = 0;
+
+ if (!buf)
+ return 1;
+ while (*buf == ',' || *buf == ':')
+ buf++;
+ if (*buf == ' ' || *buf == '/' || *buf == '\0')
+ return 1;
+
+ /* nid separators or end of nids */
+ endp = strpbrk(buf, ",: /");
+ if (endp == NULL)
+ endp = buf + strlen(buf);
+
+ tmp = *endp;
+ *endp = '\0';
+ switch (opc) {
+ default:
+ LBUG();
+ case CLASS_PARSE_NID:
+ rc = parse_nid(buf, value, quiet);
+ break;
+ case CLASS_PARSE_NET:
+ rc = parse_net(buf, value);
+ break;
+ }
+ *endp = tmp;
+ if (rc != 0)
+ return rc;
+ if (endh)
+ *endh = endp;
+ return 0;
+}
+
+int class_parse_nid(char *buf, lnet_nid_t *nid, char **endh)
+{
+ return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 0);
+}
+EXPORT_SYMBOL(class_parse_nid);
+
+int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh)
+{
+ return class_parse_value(buf, CLASS_PARSE_NID, (void *)nid, endh, 1);
+}
+EXPORT_SYMBOL(class_parse_nid_quiet);
+
+int class_parse_net(char *buf, __u32 *net, char **endh)
+{
+ return class_parse_value(buf, CLASS_PARSE_NET, (void *)net, endh, 0);
+}
+EXPORT_SYMBOL(class_parse_net);
+
+/* 1 param contains key and match
+ * 0 param contains key and not match
+ * -1 param does not contain key
+ */
+int class_match_nid(char *buf, char *key, lnet_nid_t nid)
+{
+ lnet_nid_t tmp;
+ int rc = -1;
+
+ while (class_find_param(buf, key, &buf) == 0) {
+ /* please restrict to the nids pertaining to
+ * the specified nids */
+ while (class_parse_nid(buf, &tmp, &buf) == 0) {
+ if (tmp == nid)
+ return 1;
+ }
+ rc = 0;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(class_match_nid);
+
+int class_match_net(char *buf, char *key, __u32 net)
+{
+ __u32 tmp;
+ int rc = -1;
+
+ while (class_find_param(buf, key, &buf) == 0) {
+ /* please restrict to the nids pertaining to
+ * the specified networks */
+ while (class_parse_net(buf, &tmp, &buf) == 0) {
+ if (tmp == net)
+ return 1;
+ }
+ rc = 0;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(class_match_net);
+
+/********************** class fns **********************/
+
+/**
+ * Create a new obd device and set the type, name and uuid. If successful,
+ * the new device can be accessed by either name or uuid.
+ */
+int class_attach(struct lustre_cfg *lcfg)
+{
+ struct obd_device *obd = NULL;
+ char *typename, *name, *uuid;
+ int rc, len;
+
+ if (!LUSTRE_CFG_BUFLEN(lcfg, 1)) {
+ CERROR("No type passed!\n");
+ return -EINVAL;
+ }
+ typename = lustre_cfg_string(lcfg, 1);
+
+ if (!LUSTRE_CFG_BUFLEN(lcfg, 0)) {
+ CERROR("No name passed!\n");
+ return -EINVAL;
+ }
+ name = lustre_cfg_string(lcfg, 0);
+
+ if (!LUSTRE_CFG_BUFLEN(lcfg, 2)) {
+ CERROR("No UUID passed!\n");
+ return -EINVAL;
+ }
+ uuid = lustre_cfg_string(lcfg, 2);
+
+ CDEBUG(D_IOCTL, "attach type %s name: %s uuid: %s\n",
+ MKSTR(typename), MKSTR(name), MKSTR(uuid));
+
+ obd = class_newdev(typename, name);
+ if (IS_ERR(obd)) {
+ /* Already exists or out of obds */
+ rc = PTR_ERR(obd);
+ obd = NULL;
+ CERROR("Cannot create device %s of type %s : %d\n",
+ name, typename, rc);
+ GOTO(out, rc);
+ }
+ LASSERTF(obd != NULL, "Cannot get obd device %s of type %s\n",
+ name, typename);
+ LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
+ "obd %p obd_magic %08X != %08X\n",
+ obd, obd->obd_magic, OBD_DEVICE_MAGIC);
+ LASSERTF(strncmp(obd->obd_name, name, strlen(name)) == 0,
+ "%p obd_name %s != %s\n", obd, obd->obd_name, name);
+
+ rwlock_init(&obd->obd_pool_lock);
+ obd->obd_pool_limit = 0;
+ obd->obd_pool_slv = 0;
+
+ INIT_LIST_HEAD(&obd->obd_exports);
+ INIT_LIST_HEAD(&obd->obd_unlinked_exports);
+ INIT_LIST_HEAD(&obd->obd_delayed_exports);
+ INIT_LIST_HEAD(&obd->obd_exports_timed);
+ INIT_LIST_HEAD(&obd->obd_nid_stats);
+ spin_lock_init(&obd->obd_nid_lock);
+ spin_lock_init(&obd->obd_dev_lock);
+ mutex_init(&obd->obd_dev_mutex);
+ spin_lock_init(&obd->obd_osfs_lock);
+ /* obd->obd_osfs_age must be set to a value in the distant
+ * past to guarantee a fresh statfs is fetched on mount. */
+ obd->obd_osfs_age = cfs_time_shift_64(-1000);
+
+ /* XXX belongs in setup not attach */
+ init_rwsem(&obd->obd_observer_link_sem);
+ /* recovery data */
+ cfs_init_timer(&obd->obd_recovery_timer);
+ spin_lock_init(&obd->obd_recovery_task_lock);
+ init_waitqueue_head(&obd->obd_next_transno_waitq);
+ init_waitqueue_head(&obd->obd_evict_inprogress_waitq);
+ INIT_LIST_HEAD(&obd->obd_req_replay_queue);
+ INIT_LIST_HEAD(&obd->obd_lock_replay_queue);
+ INIT_LIST_HEAD(&obd->obd_final_req_queue);
+ INIT_LIST_HEAD(&obd->obd_evict_list);
+
+ llog_group_init(&obd->obd_olg, FID_SEQ_LLOG);
+
+ obd->obd_conn_inprogress = 0;
+
+ len = strlen(uuid);
+ if (len >= sizeof(obd->obd_uuid)) {
+ CERROR("uuid must be < %d bytes long\n",
+ (int)sizeof(obd->obd_uuid));
+ GOTO(out, rc = -EINVAL);
+ }
+ memcpy(obd->obd_uuid.uuid, uuid, len);
+
+ /* do the attach */
+ if (OBP(obd, attach)) {
+ rc = OBP(obd,attach)(obd, sizeof *lcfg, lcfg);
+ if (rc)
+ GOTO(out, rc = -EINVAL);
+ }
+
+ /* Detach drops this */
+ spin_lock(&obd->obd_dev_lock);
+ atomic_set(&obd->obd_refcount, 1);
+ spin_unlock(&obd->obd_dev_lock);
+ lu_ref_init(&obd->obd_reference);
+ lu_ref_add(&obd->obd_reference, "attach", obd);
+
+ obd->obd_attached = 1;
+ CDEBUG(D_IOCTL, "OBD: dev %d attached type %s with refcount %d\n",
+ obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
+ return 0;
+ out:
+ if (obd != NULL) {
+ class_release_dev(obd);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(class_attach);
+
+/** Create hashes, self-export, and call type-specific setup.
+ * Setup is effectively the "start this obd" call.
+ */
+int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ int err = 0;
+ struct obd_export *exp;
+
+ LASSERT(obd != NULL);
+ LASSERTF(obd == class_num2obd(obd->obd_minor),
+ "obd %p != obd_devs[%d] %p\n",
+ obd, obd->obd_minor, class_num2obd(obd->obd_minor));
+ LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
+ "obd %p obd_magic %08x != %08x\n",
+ obd, obd->obd_magic, OBD_DEVICE_MAGIC);
+
+ /* have we attached a type to this device? */
+ if (!obd->obd_attached) {
+ CERROR("Device %d not attached\n", obd->obd_minor);
+ return -ENODEV;
+ }
+
+ if (obd->obd_set_up) {
+ CERROR("Device %d already setup (type %s)\n",
+ obd->obd_minor, obd->obd_type->typ_name);
+ return -EEXIST;
+ }
+
+ /* is someone else setting us up right now? (attach inits spinlock) */
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_starting) {
+ spin_unlock(&obd->obd_dev_lock);
+ CERROR("Device %d setup in progress (type %s)\n",
+ obd->obd_minor, obd->obd_type->typ_name);
+ return -EEXIST;
+ }
+ /* just leave this on forever. I can't use obd_set_up here because
+ other fns check that status, and we're not actually set up yet. */
+ obd->obd_starting = 1;
+ obd->obd_uuid_hash = NULL;
+ obd->obd_nid_hash = NULL;
+ obd->obd_nid_stats_hash = NULL;
+ spin_unlock(&obd->obd_dev_lock);
+
+ /* create an uuid-export lustre hash */
+ obd->obd_uuid_hash = cfs_hash_create("UUID_HASH",
+ HASH_UUID_CUR_BITS,
+ HASH_UUID_MAX_BITS,
+ HASH_UUID_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &uuid_hash_ops, CFS_HASH_DEFAULT);
+ if (!obd->obd_uuid_hash)
+ GOTO(err_hash, err = -ENOMEM);
+
+ /* create a nid-export lustre hash */
+ obd->obd_nid_hash = cfs_hash_create("NID_HASH",
+ HASH_NID_CUR_BITS,
+ HASH_NID_MAX_BITS,
+ HASH_NID_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &nid_hash_ops, CFS_HASH_DEFAULT);
+ if (!obd->obd_nid_hash)
+ GOTO(err_hash, err = -ENOMEM);
+
+ /* create a nid-stats lustre hash */
+ obd->obd_nid_stats_hash = cfs_hash_create("NID_STATS",
+ HASH_NID_STATS_CUR_BITS,
+ HASH_NID_STATS_MAX_BITS,
+ HASH_NID_STATS_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &nid_stat_hash_ops, CFS_HASH_DEFAULT);
+ if (!obd->obd_nid_stats_hash)
+ GOTO(err_hash, err = -ENOMEM);
+
+ exp = class_new_export(obd, &obd->obd_uuid);
+ if (IS_ERR(exp))
+ GOTO(err_hash, err = PTR_ERR(exp));
+
+ obd->obd_self_export = exp;
+ list_del_init(&exp->exp_obd_chain_timed);
+ class_export_put(exp);
+
+ err = obd_setup(obd, lcfg);
+ if (err)
+ GOTO(err_exp, err);
+
+ obd->obd_set_up = 1;
+
+ spin_lock(&obd->obd_dev_lock);
+ /* cleanup drops this */
+ class_incref(obd, "setup", obd);
+ spin_unlock(&obd->obd_dev_lock);
+
+ CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n",
+ obd->obd_name, obd->obd_uuid.uuid);
+
+ return 0;
+err_exp:
+ if (obd->obd_self_export) {
+ class_unlink_export(obd->obd_self_export);
+ obd->obd_self_export = NULL;
+ }
+err_hash:
+ if (obd->obd_uuid_hash) {
+ cfs_hash_putref(obd->obd_uuid_hash);
+ obd->obd_uuid_hash = NULL;
+ }
+ if (obd->obd_nid_hash) {
+ cfs_hash_putref(obd->obd_nid_hash);
+ obd->obd_nid_hash = NULL;
+ }
+ if (obd->obd_nid_stats_hash) {
+ cfs_hash_putref(obd->obd_nid_stats_hash);
+ obd->obd_nid_stats_hash = NULL;
+ }
+ obd->obd_starting = 0;
+ CERROR("setup %s failed (%d)\n", obd->obd_name, err);
+ return err;
+}
+EXPORT_SYMBOL(class_setup);
+
+/** We have finished using this obd and are ready to destroy it.
+ * There can be no more references to this obd.
+ */
+int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ if (obd->obd_set_up) {
+ CERROR("OBD device %d still set up\n", obd->obd_minor);
+ return -EBUSY;
+ }
+
+ spin_lock(&obd->obd_dev_lock);
+ if (!obd->obd_attached) {
+ spin_unlock(&obd->obd_dev_lock);
+ CERROR("OBD device %d not attached\n", obd->obd_minor);
+ return -ENODEV;
+ }
+ obd->obd_attached = 0;
+ spin_unlock(&obd->obd_dev_lock);
+
+ CDEBUG(D_IOCTL, "detach on obd %s (uuid %s)\n",
+ obd->obd_name, obd->obd_uuid.uuid);
+
+ class_decref(obd, "attach", obd);
+ return 0;
+}
+EXPORT_SYMBOL(class_detach);
+
+/** Start shutting down the obd. There may be in-progess ops when
+ * this is called. We tell them to start shutting down with a call
+ * to class_disconnect_exports().
+ */
+int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ int err = 0;
+ char *flag;
+
+ OBD_RACE(OBD_FAIL_LDLM_RECOV_CLIENTS);
+
+ if (!obd->obd_set_up) {
+ CERROR("Device %d not setup\n", obd->obd_minor);
+ return -ENODEV;
+ }
+
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_stopping) {
+ spin_unlock(&obd->obd_dev_lock);
+ CERROR("OBD %d already stopping\n", obd->obd_minor);
+ return -ENODEV;
+ }
+ /* Leave this on forever */
+ obd->obd_stopping = 1;
+
+ /* wait for already-arrived-connections to finish. */
+ while (obd->obd_conn_inprogress > 0) {
+ spin_unlock(&obd->obd_dev_lock);
+
+ cond_resched();
+
+ spin_lock(&obd->obd_dev_lock);
+ }
+ spin_unlock(&obd->obd_dev_lock);
+
+ if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) {
+ for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++)
+ switch (*flag) {
+ case 'F':
+ obd->obd_force = 1;
+ break;
+ case 'A':
+ LCONSOLE_WARN("Failing over %s\n",
+ obd->obd_name);
+ obd->obd_fail = 1;
+ obd->obd_no_transno = 1;
+ obd->obd_no_recov = 1;
+ if (OBP(obd, iocontrol)) {
+ obd_iocontrol(OBD_IOC_SYNC,
+ obd->obd_self_export,
+ 0, NULL, NULL);
+ }
+ break;
+ default:
+ CERROR("Unrecognised flag '%c'\n", *flag);
+ }
+ }
+
+ LASSERT(obd->obd_self_export);
+
+ /* The three references that should be remaining are the
+ * obd_self_export and the attach and setup references. */
+ if (atomic_read(&obd->obd_refcount) > 3) {
+ /* refcounf - 3 might be the number of real exports
+ (excluding self export). But class_incref is called
+ by other things as well, so don't count on it. */
+ CDEBUG(D_IOCTL, "%s: forcing exports to disconnect: %d\n",
+ obd->obd_name, atomic_read(&obd->obd_refcount) - 3);
+ dump_exports(obd, 0);
+ class_disconnect_exports(obd);
+ }
+
+ /* Precleanup, we must make sure all exports get destroyed. */
+ err = obd_precleanup(obd, OBD_CLEANUP_EXPORTS);
+ if (err)
+ CERROR("Precleanup %s returned %d\n",
+ obd->obd_name, err);
+
+ /* destroy an uuid-export hash body */
+ if (obd->obd_uuid_hash) {
+ cfs_hash_putref(obd->obd_uuid_hash);
+ obd->obd_uuid_hash = NULL;
+ }
+
+ /* destroy a nid-export hash body */
+ if (obd->obd_nid_hash) {
+ cfs_hash_putref(obd->obd_nid_hash);
+ obd->obd_nid_hash = NULL;
+ }
+
+ /* destroy a nid-stats hash body */
+ if (obd->obd_nid_stats_hash) {
+ cfs_hash_putref(obd->obd_nid_stats_hash);
+ obd->obd_nid_stats_hash = NULL;
+ }
+
+ class_decref(obd, "setup", obd);
+ obd->obd_set_up = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(class_cleanup);
+
+struct obd_device *class_incref(struct obd_device *obd,
+ const char *scope, const void *source)
+{
+ lu_ref_add_atomic(&obd->obd_reference, scope, source);
+ atomic_inc(&obd->obd_refcount);
+ CDEBUG(D_INFO, "incref %s (%p) now %d\n", obd->obd_name, obd,
+ atomic_read(&obd->obd_refcount));
+
+ return obd;
+}
+EXPORT_SYMBOL(class_incref);
+
+void class_decref(struct obd_device *obd, const char *scope, const void *source)
+{
+ int err;
+ int refs;
+
+ spin_lock(&obd->obd_dev_lock);
+ atomic_dec(&obd->obd_refcount);
+ refs = atomic_read(&obd->obd_refcount);
+ spin_unlock(&obd->obd_dev_lock);
+ lu_ref_del(&obd->obd_reference, scope, source);
+
+ CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs);
+
+ if ((refs == 1) && obd->obd_stopping) {
+ /* All exports have been destroyed; there should
+ be no more in-progress ops by this point.*/
+
+ spin_lock(&obd->obd_self_export->exp_lock);
+ obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
+ spin_unlock(&obd->obd_self_export->exp_lock);
+
+ /* note that we'll recurse into class_decref again */
+ class_unlink_export(obd->obd_self_export);
+ return;
+ }
+
+ if (refs == 0) {
+ CDEBUG(D_CONFIG, "finishing cleanup of obd %s (%s)\n",
+ obd->obd_name, obd->obd_uuid.uuid);
+ LASSERT(!obd->obd_attached);
+ if (obd->obd_stopping) {
+ /* If we're not stopping, we were never set up */
+ err = obd_cleanup(obd);
+ if (err)
+ CERROR("Cleanup %s returned %d\n",
+ obd->obd_name, err);
+ }
+ if (OBP(obd, detach)) {
+ err = OBP(obd, detach)(obd);
+ if (err)
+ CERROR("Detach returned %d\n", err);
+ }
+ class_release_dev(obd);
+ }
+}
+EXPORT_SYMBOL(class_decref);
+
+/** Add a failover nid location.
+ * Client obd types contact server obd types using this nid list.
+ */
+int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ struct obd_import *imp;
+ struct obd_uuid uuid;
+ int rc;
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
+ LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
+ CERROR("invalid conn_uuid\n");
+ return -EINVAL;
+ }
+ if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_OSP_NAME) &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
+ CERROR("can't add connection on non-client dev\n");
+ return -EINVAL;
+ }
+
+ imp = obd->u.cli.cl_import;
+ if (!imp) {
+ CERROR("try to add conn on immature client dev\n");
+ return -EINVAL;
+ }
+
+ obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1));
+ rc = obd_add_conn(imp, &uuid, lcfg->lcfg_num);
+
+ return rc;
+}
+EXPORT_SYMBOL(class_add_conn);
+
+/** Remove a failover nid location.
+ */
+int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ struct obd_import *imp;
+ struct obd_uuid uuid;
+ int rc;
+
+ if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
+ LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
+ CERROR("invalid conn_uuid\n");
+ return -EINVAL;
+ }
+ if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME)) {
+ CERROR("can't del connection on non-client dev\n");
+ return -EINVAL;
+ }
+
+ imp = obd->u.cli.cl_import;
+ if (!imp) {
+ CERROR("try to del conn on immature client dev\n");
+ return -EINVAL;
+ }
+
+ obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1));
+ rc = obd_del_conn(imp, &uuid);
+
+ return rc;
+}
+
+LIST_HEAD(lustre_profile_list);
+
+struct lustre_profile *class_get_profile(const char * prof)
+{
+ struct lustre_profile *lprof;
+
+ list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
+ if (!strcmp(lprof->lp_profile, prof)) {
+ return lprof;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(class_get_profile);
+
+/** Create a named "profile".
+ * This defines the mdc and osc names to use for a client.
+ * This also is used to define the lov to be used by a mdt.
+ */
+int class_add_profile(int proflen, char *prof, int osclen, char *osc,
+ int mdclen, char *mdc)
+{
+ struct lustre_profile *lprof;
+ int err = 0;
+
+ CDEBUG(D_CONFIG, "Add profile %s\n", prof);
+
+ OBD_ALLOC(lprof, sizeof(*lprof));
+ if (lprof == NULL)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&lprof->lp_list);
+
+ LASSERT(proflen == (strlen(prof) + 1));
+ OBD_ALLOC(lprof->lp_profile, proflen);
+ if (lprof->lp_profile == NULL)
+ GOTO(out, err = -ENOMEM);
+ memcpy(lprof->lp_profile, prof, proflen);
+
+ LASSERT(osclen == (strlen(osc) + 1));
+ OBD_ALLOC(lprof->lp_dt, osclen);
+ if (lprof->lp_dt == NULL)
+ GOTO(out, err = -ENOMEM);
+ memcpy(lprof->lp_dt, osc, osclen);
+
+ if (mdclen > 0) {
+ LASSERT(mdclen == (strlen(mdc) + 1));
+ OBD_ALLOC(lprof->lp_md, mdclen);
+ if (lprof->lp_md == NULL)
+ GOTO(out, err = -ENOMEM);
+ memcpy(lprof->lp_md, mdc, mdclen);
+ }
+
+ list_add(&lprof->lp_list, &lustre_profile_list);
+ return err;
+
+out:
+ if (lprof->lp_md)
+ OBD_FREE(lprof->lp_md, mdclen);
+ if (lprof->lp_dt)
+ OBD_FREE(lprof->lp_dt, osclen);
+ if (lprof->lp_profile)
+ OBD_FREE(lprof->lp_profile, proflen);
+ OBD_FREE(lprof, sizeof(*lprof));
+ return err;
+}
+
+void class_del_profile(const char *prof)
+{
+ struct lustre_profile *lprof;
+
+ CDEBUG(D_CONFIG, "Del profile %s\n", prof);
+
+ lprof = class_get_profile(prof);
+ if (lprof) {
+ list_del(&lprof->lp_list);
+ OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1);
+ OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
+ if (lprof->lp_md)
+ OBD_FREE(lprof->lp_md, strlen(lprof->lp_md) + 1);
+ OBD_FREE(lprof, sizeof *lprof);
+ }
+}
+EXPORT_SYMBOL(class_del_profile);
+
+/* COMPAT_146 */
+void class_del_profiles(void)
+{
+ struct lustre_profile *lprof, *n;
+
+ list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
+ list_del(&lprof->lp_list);
+ OBD_FREE(lprof->lp_profile, strlen(lprof->lp_profile) + 1);
+ OBD_FREE(lprof->lp_dt, strlen(lprof->lp_dt) + 1);
+ if (lprof->lp_md)
+ OBD_FREE(lprof->lp_md, strlen(lprof->lp_md) + 1);
+ OBD_FREE(lprof, sizeof *lprof);
+ }
+}
+EXPORT_SYMBOL(class_del_profiles);
+
+static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg)
+{
+ if (class_match_param(ptr, PARAM_AT_MIN, NULL) == 0)
+ at_min = val;
+ else if (class_match_param(ptr, PARAM_AT_MAX, NULL) == 0)
+ at_max = val;
+ else if (class_match_param(ptr, PARAM_AT_EXTRA, NULL) == 0)
+ at_extra = val;
+ else if (class_match_param(ptr, PARAM_AT_EARLY_MARGIN, NULL) == 0)
+ at_early_margin = val;
+ else if (class_match_param(ptr, PARAM_AT_HISTORY, NULL) == 0)
+ at_history = val;
+ else if (class_match_param(ptr, PARAM_JOBID_VAR, NULL) == 0)
+ strlcpy(obd_jobid_var, lustre_cfg_string(lcfg, 2),
+ JOBSTATS_JOBID_VAR_MAX_LEN + 1);
+ else
+ return -EINVAL;
+
+ CDEBUG(D_IOCTL, "global %s = %d\n", ptr, val);
+ return 0;
+}
+
+
+/* We can't call ll_process_config or lquota_process_config directly because
+ * it lives in a module that must be loaded after this one. */
+static int (*client_process_config)(struct lustre_cfg *lcfg) = NULL;
+static int (*quota_process_config)(struct lustre_cfg *lcfg) = NULL;
+
+void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg))
+{
+ client_process_config = cpc;
+}
+EXPORT_SYMBOL(lustre_register_client_process_config);
+
+/**
+ * Rename the proc parameter in \a cfg with a new name \a new_name.
+ *
+ * \param cfg config structure which contains the proc parameter
+ * \param new_name new name of the proc parameter
+ *
+ * \retval valid-pointer pointer to the newly-allocated config structure
+ * which contains the renamed proc parameter
+ * \retval ERR_PTR(-EINVAL) if \a cfg or \a new_name is NULL, or \a cfg does
+ * not contain a proc parameter
+ * \retval ERR_PTR(-ENOMEM) if memory allocation failure occurs
+ */
+struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
+ const char *new_name)
+{
+ struct lustre_cfg_bufs *bufs = NULL;
+ struct lustre_cfg *new_cfg = NULL;
+ char *param = NULL;
+ char *new_param = NULL;
+ char *value = NULL;
+ int name_len = 0;
+ int new_len = 0;
+
+ if (cfg == NULL || new_name == NULL)
+ return ERR_PTR(-EINVAL);
+
+ param = lustre_cfg_string(cfg, 1);
+ if (param == NULL)
+ return ERR_PTR(-EINVAL);
+
+ value = strchr(param, '=');
+ if (value == NULL)
+ name_len = strlen(param);
+ else
+ name_len = value - param;
+
+ new_len = LUSTRE_CFG_BUFLEN(cfg, 1) + strlen(new_name) - name_len;
+
+ OBD_ALLOC(new_param, new_len);
+ if (new_param == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ strcpy(new_param, new_name);
+ if (value != NULL)
+ strcat(new_param, value);
+
+ OBD_ALLOC_PTR(bufs);
+ if (bufs == NULL) {
+ OBD_FREE(new_param, new_len);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lustre_cfg_bufs_reset(bufs, NULL);
+ lustre_cfg_bufs_init(bufs, cfg);
+ lustre_cfg_bufs_set_string(bufs, 1, new_param);
+
+ new_cfg = lustre_cfg_new(cfg->lcfg_command, bufs);
+
+ OBD_FREE(new_param, new_len);
+ OBD_FREE_PTR(bufs);
+ if (new_cfg == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ new_cfg->lcfg_num = cfg->lcfg_num;
+ new_cfg->lcfg_flags = cfg->lcfg_flags;
+ new_cfg->lcfg_nid = cfg->lcfg_nid;
+ new_cfg->lcfg_nal = cfg->lcfg_nal;
+
+ return new_cfg;
+}
+EXPORT_SYMBOL(lustre_cfg_rename);
+
+void lustre_register_quota_process_config(int (*qpc)(struct lustre_cfg *lcfg))
+{
+ quota_process_config = qpc;
+}
+EXPORT_SYMBOL(lustre_register_quota_process_config);
+
+/** Process configuration commands given in lustre_cfg form.
+ * These may come from direct calls (e.g. class_manual_cleanup)
+ * or processing the config llog, or ioctl from lctl.
+ */
+int class_process_config(struct lustre_cfg *lcfg)
+{
+ struct obd_device *obd;
+ int err;
+
+ LASSERT(lcfg && !IS_ERR(lcfg));
+ CDEBUG(D_IOCTL, "processing cmd: %x\n", lcfg->lcfg_command);
+
+ /* Commands that don't need a device */
+ switch(lcfg->lcfg_command) {
+ case LCFG_ATTACH: {
+ err = class_attach(lcfg);
+ GOTO(out, err);
+ }
+ case LCFG_ADD_UUID: {
+ CDEBUG(D_IOCTL, "adding mapping from uuid %s to nid "LPX64
+ " (%s)\n", lustre_cfg_string(lcfg, 1),
+ lcfg->lcfg_nid, libcfs_nid2str(lcfg->lcfg_nid));
+
+ err = class_add_uuid(lustre_cfg_string(lcfg, 1), lcfg->lcfg_nid);
+ GOTO(out, err);
+ }
+ case LCFG_DEL_UUID: {
+ CDEBUG(D_IOCTL, "removing mappings for uuid %s\n",
+ (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) == 0)
+ ? "<all uuids>" : lustre_cfg_string(lcfg, 1));
+
+ err = class_del_uuid(lustre_cfg_string(lcfg, 1));
+ GOTO(out, err);
+ }
+ case LCFG_MOUNTOPT: {
+ CDEBUG(D_IOCTL, "mountopt: profile %s osc %s mdc %s\n",
+ lustre_cfg_string(lcfg, 1),
+ lustre_cfg_string(lcfg, 2),
+ lustre_cfg_string(lcfg, 3));
+ /* set these mount options somewhere, so ll_fill_super
+ * can find them. */
+ err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1),
+ lustre_cfg_string(lcfg, 1),
+ LUSTRE_CFG_BUFLEN(lcfg, 2),
+ lustre_cfg_string(lcfg, 2),
+ LUSTRE_CFG_BUFLEN(lcfg, 3),
+ lustre_cfg_string(lcfg, 3));
+ GOTO(out, err);
+ }
+ case LCFG_DEL_MOUNTOPT: {
+ CDEBUG(D_IOCTL, "mountopt: profile %s\n",
+ lustre_cfg_string(lcfg, 1));
+ class_del_profile(lustre_cfg_string(lcfg, 1));
+ GOTO(out, err = 0);
+ }
+ case LCFG_SET_TIMEOUT: {
+ CDEBUG(D_IOCTL, "changing lustre timeout from %d to %d\n",
+ obd_timeout, lcfg->lcfg_num);
+ obd_timeout = max(lcfg->lcfg_num, 1U);
+ obd_timeout_set = 1;
+ GOTO(out, err = 0);
+ }
+ case LCFG_SET_LDLM_TIMEOUT: {
+ CDEBUG(D_IOCTL, "changing lustre ldlm_timeout from %d to %d\n",
+ ldlm_timeout, lcfg->lcfg_num);
+ ldlm_timeout = max(lcfg->lcfg_num, 1U);
+ if (ldlm_timeout >= obd_timeout)
+ ldlm_timeout = max(obd_timeout / 3, 1U);
+ ldlm_timeout_set = 1;
+ GOTO(out, err = 0);
+ }
+ case LCFG_SET_UPCALL: {
+ LCONSOLE_ERROR_MSG(0x15a, "recovery upcall is deprecated\n");
+ /* COMPAT_146 Don't fail on old configs */
+ GOTO(out, err = 0);
+ }
+ case LCFG_MARKER: {
+ struct cfg_marker *marker;
+ marker = lustre_cfg_buf(lcfg, 1);
+ CDEBUG(D_IOCTL, "marker %d (%#x) %.16s %s\n", marker->cm_step,
+ marker->cm_flags, marker->cm_tgtname, marker->cm_comment);
+ GOTO(out, err = 0);
+ }
+ case LCFG_PARAM: {
+ char *tmp;
+ /* llite has no obd */
+ if ((class_match_param(lustre_cfg_string(lcfg, 1),
+ PARAM_LLITE, 0) == 0) &&
+ client_process_config) {
+ err = (*client_process_config)(lcfg);
+ GOTO(out, err);
+ } else if ((class_match_param(lustre_cfg_string(lcfg, 1),
+ PARAM_SYS, &tmp) == 0)) {
+ /* Global param settings */
+ err = class_set_global(tmp, lcfg->lcfg_num, lcfg);
+ /*
+ * Client or server should not fail to mount if
+ * it hits an unknown configuration parameter.
+ */
+ if (err != 0)
+ CWARN("Ignoring unknown param %s\n", tmp);
+
+ GOTO(out, err = 0);
+ } else if ((class_match_param(lustre_cfg_string(lcfg, 1),
+ PARAM_QUOTA, &tmp) == 0) &&
+ quota_process_config) {
+ err = (*quota_process_config)(lcfg);
+ GOTO(out, err);
+ }
+ /* Fall through */
+ break;
+ }
+ }
+
+ /* Commands that require a device */
+ obd = class_name2obd(lustre_cfg_string(lcfg, 0));
+ if (obd == NULL) {
+ if (!LUSTRE_CFG_BUFLEN(lcfg, 0))
+ CERROR("this lcfg command requires a device name\n");
+ else
+ CERROR("no device for: %s\n",
+ lustre_cfg_string(lcfg, 0));
+
+ GOTO(out, err = -EINVAL);
+ }
+
+ switch(lcfg->lcfg_command) {
+ case LCFG_SETUP: {
+ err = class_setup(obd, lcfg);
+ GOTO(out, err);
+ }
+ case LCFG_DETACH: {
+ err = class_detach(obd, lcfg);
+ GOTO(out, err = 0);
+ }
+ case LCFG_CLEANUP: {
+ err = class_cleanup(obd, lcfg);
+ GOTO(out, err = 0);
+ }
+ case LCFG_ADD_CONN: {
+ err = class_add_conn(obd, lcfg);
+ GOTO(out, err = 0);
+ }
+ case LCFG_DEL_CONN: {
+ err = class_del_conn(obd, lcfg);
+ GOTO(out, err = 0);
+ }
+ case LCFG_POOL_NEW: {
+ err = obd_pool_new(obd, lustre_cfg_string(lcfg, 2));
+ GOTO(out, err = 0);
+ break;
+ }
+ case LCFG_POOL_ADD: {
+ err = obd_pool_add(obd, lustre_cfg_string(lcfg, 2),
+ lustre_cfg_string(lcfg, 3));
+ GOTO(out, err = 0);
+ break;
+ }
+ case LCFG_POOL_REM: {
+ err = obd_pool_rem(obd, lustre_cfg_string(lcfg, 2),
+ lustre_cfg_string(lcfg, 3));
+ GOTO(out, err = 0);
+ break;
+ }
+ case LCFG_POOL_DEL: {
+ err = obd_pool_del(obd, lustre_cfg_string(lcfg, 2));
+ GOTO(out, err = 0);
+ break;
+ }
+ default: {
+ err = obd_process_config(obd, sizeof(*lcfg), lcfg);
+ GOTO(out, err);
+
+ }
+ }
+out:
+ if ((err < 0) && !(lcfg->lcfg_command & LCFG_REQUIRED)) {
+ CWARN("Ignoring error %d on optional command %#x\n", err,
+ lcfg->lcfg_command);
+ err = 0;
+ }
+ return err;
+}
+EXPORT_SYMBOL(class_process_config);
+
+int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
+ struct lustre_cfg *lcfg, void *data)
+{
+ struct lprocfs_vars *var;
+ struct file fakefile;
+ struct seq_file fake_seqfile;
+ char *key, *sval;
+ int i, keylen, vallen;
+ int matched = 0, j = 0;
+ int rc = 0;
+ int skip = 0;
+
+ if (lcfg->lcfg_command != LCFG_PARAM) {
+ CERROR("Unknown command: %d\n", lcfg->lcfg_command);
+ return -EINVAL;
+ }
+
+ /* fake a seq file so that var->fops->write can work... */
+ fakefile.private_data = &fake_seqfile;
+ fake_seqfile.private = data;
+ /* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt
+ or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar
+ or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 */
+ for (i = 1; i < lcfg->lcfg_bufcount; i++) {
+ key = lustre_cfg_buf(lcfg, i);
+ /* Strip off prefix */
+ class_match_param(key, prefix, &key);
+ sval = strchr(key, '=');
+ if (!sval || (*(sval + 1) == 0)) {
+ CERROR("Can't parse param %s (missing '=')\n", key);
+ /* rc = -EINVAL; continue parsing other params */
+ continue;
+ }
+ keylen = sval - key;
+ sval++;
+ vallen = strlen(sval);
+ matched = 0;
+ j = 0;
+ /* Search proc entries */
+ while (lvars[j].name) {
+ var = &lvars[j];
+ if (class_match_param(key, (char *)var->name, 0) == 0 &&
+ keylen == strlen(var->name)) {
+ matched++;
+ rc = -EROFS;
+ if (var->fops && var->fops->write) {
+ mm_segment_t oldfs;
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ rc = (var->fops->write)(&fakefile, sval,
+ vallen, NULL);
+ set_fs(oldfs);
+ }
+ break;
+ }
+ j++;
+ }
+ if (!matched) {
+ /* If the prefix doesn't match, return error so we
+ can pass it down the stack */
+ if (strnchr(key, keylen, '.'))
+ return -ENOSYS;
+ CERROR("%s: unknown param %s\n",
+ (char *)lustre_cfg_string(lcfg, 0), key);
+ /* rc = -EINVAL; continue parsing other params */
+ skip++;
+ } else if (rc < 0) {
+ CERROR("writing proc entry %s err %d\n",
+ var->name, rc);
+ rc = 0;
+ } else {
+ CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n",
+ lustre_cfg_string(lcfg, 0),
+ (int)strlen(prefix) - 1, prefix,
+ (int)(sval - key - 1), key, sval);
+ }
+ }
+
+ if (rc > 0)
+ rc = 0;
+ if (!rc && skip)
+ rc = skip;
+ return rc;
+}
+EXPORT_SYMBOL(class_process_proc_param);
+
+extern int lustre_check_exclusion(struct super_block *sb, char *svname);
+
+/** Parse a configuration llog, doing various manipulations on them
+ * for various reasons, (modifications for compatibility, skip obsolete
+ * records, change uuids, etc), then class_process_config() resulting
+ * net records.
+ */
+int class_config_llog_handler(const struct lu_env *env,
+ struct llog_handle *handle,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct config_llog_instance *clli = data;
+ int cfg_len = rec->lrh_len;
+ char *cfg_buf = (char*) (rec + 1);
+ int rc = 0;
+
+ //class_config_dump_handler(handle, rec, data);
+
+ switch (rec->lrh_type) {
+ case OBD_CFG_REC: {
+ struct lustre_cfg *lcfg, *lcfg_new;
+ struct lustre_cfg_bufs bufs;
+ char *inst_name = NULL;
+ int inst_len = 0;
+ int inst = 0, swab = 0;
+
+ lcfg = (struct lustre_cfg *)cfg_buf;
+ if (lcfg->lcfg_version == __swab32(LUSTRE_CFG_VERSION)) {
+ lustre_swab_lustre_cfg(lcfg);
+ swab = 1;
+ }
+
+ rc = lustre_cfg_sanity_check(cfg_buf, cfg_len);
+ if (rc)
+ GOTO(out, rc);
+
+ /* Figure out config state info */
+ if (lcfg->lcfg_command == LCFG_MARKER) {
+ struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1);
+ lustre_swab_cfg_marker(marker, swab,
+ LUSTRE_CFG_BUFLEN(lcfg, 1));
+ CDEBUG(D_CONFIG, "Marker, inst_flg=%#x mark_flg=%#x\n",
+ clli->cfg_flags, marker->cm_flags);
+ if (marker->cm_flags & CM_START) {
+ /* all previous flags off */
+ clli->cfg_flags = CFG_F_MARKER;
+ if (marker->cm_flags & CM_SKIP) {
+ clli->cfg_flags |= CFG_F_SKIP;
+ CDEBUG(D_CONFIG, "SKIP #%d\n",
+ marker->cm_step);
+ } else if ((marker->cm_flags & CM_EXCLUDE) ||
+ (clli->cfg_sb &&
+ lustre_check_exclusion(clli->cfg_sb,
+ marker->cm_tgtname))) {
+ clli->cfg_flags |= CFG_F_EXCLUDE;
+ CDEBUG(D_CONFIG, "EXCLUDE %d\n",
+ marker->cm_step);
+ }
+ } else if (marker->cm_flags & CM_END) {
+ clli->cfg_flags = 0;
+ }
+ }
+ /* A config command without a start marker before it is
+ illegal (post 146) */
+ if (!(clli->cfg_flags & CFG_F_COMPAT146) &&
+ !(clli->cfg_flags & CFG_F_MARKER) &&
+ (lcfg->lcfg_command != LCFG_MARKER)) {
+ CWARN("Config not inside markers, ignoring! "
+ "(inst: %p, uuid: %s, flags: %#x)\n",
+ clli->cfg_instance,
+ clli->cfg_uuid.uuid, clli->cfg_flags);
+ clli->cfg_flags |= CFG_F_SKIP;
+ }
+ if (clli->cfg_flags & CFG_F_SKIP) {
+ CDEBUG(D_CONFIG, "skipping %#x\n",
+ clli->cfg_flags);
+ rc = 0;
+ /* No processing! */
+ break;
+ }
+
+ /*
+ * For interoperability between 1.8 and 2.0,
+ * rename "mds" obd device type to "mdt".
+ */
+ {
+ char *typename = lustre_cfg_string(lcfg, 1);
+ char *index = lustre_cfg_string(lcfg, 2);
+
+ if ((lcfg->lcfg_command == LCFG_ATTACH && typename &&
+ strcmp(typename, "mds") == 0)) {
+ CWARN("For 1.8 interoperability, rename obd "
+ "type from mds to mdt\n");
+ typename[2] = 't';
+ }
+ if ((lcfg->lcfg_command == LCFG_SETUP && index &&
+ strcmp(index, "type") == 0)) {
+ CDEBUG(D_INFO, "For 1.8 interoperability, "
+ "set this index to '0'\n");
+ index[0] = '0';
+ index[1] = 0;
+ }
+ }
+
+
+ if (clli->cfg_flags & CFG_F_EXCLUDE) {
+ CDEBUG(D_CONFIG, "cmd: %x marked EXCLUDED\n",
+ lcfg->lcfg_command);
+ if (lcfg->lcfg_command == LCFG_LOV_ADD_OBD)
+ /* Add inactive instead */
+ lcfg->lcfg_command = LCFG_LOV_ADD_INA;
+ }
+
+ lustre_cfg_bufs_init(&bufs, lcfg);
+
+ if (clli && clli->cfg_instance &&
+ LUSTRE_CFG_BUFLEN(lcfg, 0) > 0){
+ inst = 1;
+ inst_len = LUSTRE_CFG_BUFLEN(lcfg, 0) +
+ sizeof(clli->cfg_instance) * 2 + 4;
+ OBD_ALLOC(inst_name, inst_len);
+ if (inst_name == NULL)
+ GOTO(out, rc = -ENOMEM);
+ sprintf(inst_name, "%s-%p",
+ lustre_cfg_string(lcfg, 0),
+ clli->cfg_instance);
+ lustre_cfg_bufs_set_string(&bufs, 0, inst_name);
+ CDEBUG(D_CONFIG, "cmd %x, instance name: %s\n",
+ lcfg->lcfg_command, inst_name);
+ }
+
+ /* we override the llog's uuid for clients, to insure they
+ are unique */
+ if (clli && clli->cfg_instance != NULL &&
+ lcfg->lcfg_command == LCFG_ATTACH) {
+ lustre_cfg_bufs_set_string(&bufs, 2,
+ clli->cfg_uuid.uuid);
+ }
+ /*
+ * sptlrpc config record, we expect 2 data segments:
+ * [0]: fs_name/target_name,
+ * [1]: rule string
+ * moving them to index [1] and [2], and insert MGC's
+ * obdname at index [0].
+ */
+ if (clli && clli->cfg_instance == NULL &&
+ lcfg->lcfg_command == LCFG_SPTLRPC_CONF) {
+ lustre_cfg_bufs_set(&bufs, 2, bufs.lcfg_buf[1],
+ bufs.lcfg_buflen[1]);
+ lustre_cfg_bufs_set(&bufs, 1, bufs.lcfg_buf[0],
+ bufs.lcfg_buflen[0]);
+ lustre_cfg_bufs_set_string(&bufs, 0,
+ clli->cfg_obdname);
+ }
+
+ lcfg_new = lustre_cfg_new(lcfg->lcfg_command, &bufs);
+
+ lcfg_new->lcfg_num = lcfg->lcfg_num;
+ lcfg_new->lcfg_flags = lcfg->lcfg_flags;
+
+ /* XXX Hack to try to remain binary compatible with
+ * pre-newconfig logs */
+ if (lcfg->lcfg_nal != 0 && /* pre-newconfig log? */
+ (lcfg->lcfg_nid >> 32) == 0) {
+ __u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff);
+
+ lcfg_new->lcfg_nid =
+ LNET_MKNID(LNET_MKNET(lcfg->lcfg_nal, 0), addr);
+ CWARN("Converted pre-newconfig NAL %d NID %x to %s\n",
+ lcfg->lcfg_nal, addr,
+ libcfs_nid2str(lcfg_new->lcfg_nid));
+ } else {
+ lcfg_new->lcfg_nid = lcfg->lcfg_nid;
+ }
+
+ lcfg_new->lcfg_nal = 0; /* illegal value for obsolete field */
+
+ rc = class_process_config(lcfg_new);
+ lustre_cfg_free(lcfg_new);
+
+ if (inst)
+ OBD_FREE(inst_name, inst_len);
+ break;
+ }
+ default:
+ CERROR("Unknown llog record type %#x encountered\n",
+ rec->lrh_type);
+ break;
+ }
+out:
+ if (rc) {
+ CERROR("%s: cfg command failed: rc = %d\n",
+ handle->lgh_ctxt->loc_obd->obd_name, rc);
+ class_config_dump_handler(NULL, handle, rec, data);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(class_config_llog_handler);
+
+int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
+ char *name, struct config_llog_instance *cfg)
+{
+ struct llog_process_cat_data cd = {0, 0};
+ struct llog_handle *llh;
+ llog_cb_t callback;
+ int rc;
+
+ CDEBUG(D_INFO, "looking up llog %s\n", name);
+ rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
+ if (rc)
+ return rc;
+
+ rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(parse_out, rc);
+
+ /* continue processing from where we last stopped to end-of-log */
+ if (cfg) {
+ cd.lpcd_first_idx = cfg->cfg_last_idx;
+ callback = cfg->cfg_callback;
+ LASSERT(callback != NULL);
+ } else {
+ callback = class_config_llog_handler;
+ }
+
+ cd.lpcd_last_idx = 0;
+
+ rc = llog_process(env, llh, callback, cfg, &cd);
+
+ CDEBUG(D_CONFIG, "Processed log %s gen %d-%d (rc=%d)\n", name,
+ cd.lpcd_first_idx + 1, cd.lpcd_last_idx, rc);
+ if (cfg)
+ cfg->cfg_last_idx = cd.lpcd_last_idx;
+
+parse_out:
+ llog_close(env, llh);
+ return rc;
+}
+EXPORT_SYMBOL(class_config_parse_llog);
+
+/**
+ * parse config record and output dump in supplied buffer.
+ * This is separated from class_config_dump_handler() to use
+ * for ioctl needs as well
+ */
+int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf, int size)
+{
+ struct lustre_cfg *lcfg = (struct lustre_cfg *)(rec + 1);
+ char *ptr = buf;
+ char *end = buf + size;
+ int rc = 0;
+
+ LASSERT(rec->lrh_type == OBD_CFG_REC);
+ rc = lustre_cfg_sanity_check(lcfg, rec->lrh_len);
+ if (rc < 0)
+ return rc;
+
+ ptr += snprintf(ptr, end-ptr, "cmd=%05x ", lcfg->lcfg_command);
+ if (lcfg->lcfg_flags)
+ ptr += snprintf(ptr, end-ptr, "flags=%#08x ",
+ lcfg->lcfg_flags);
+
+ if (lcfg->lcfg_num)
+ ptr += snprintf(ptr, end-ptr, "num=%#08x ", lcfg->lcfg_num);
+
+ if (lcfg->lcfg_nid)
+ ptr += snprintf(ptr, end-ptr, "nid=%s("LPX64")\n ",
+ libcfs_nid2str(lcfg->lcfg_nid),
+ lcfg->lcfg_nid);
+
+ if (lcfg->lcfg_command == LCFG_MARKER) {
+ struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1);
+
+ ptr += snprintf(ptr, end-ptr, "marker=%d(%#x)%s '%s'",
+ marker->cm_step, marker->cm_flags,
+ marker->cm_tgtname, marker->cm_comment);
+ } else {
+ int i;
+
+ for (i = 0; i < lcfg->lcfg_bufcount; i++) {
+ ptr += snprintf(ptr, end-ptr, "%d:%s ", i,
+ lustre_cfg_string(lcfg, i));
+ }
+ }
+ /* return consumed bytes */
+ rc = ptr - buf;
+ return rc;
+}
+
+int class_config_dump_handler(const struct lu_env *env,
+ struct llog_handle *handle,
+ struct llog_rec_hdr *rec, void *data)
+{
+ char *outstr;
+ int rc = 0;
+
+ OBD_ALLOC(outstr, 256);
+ if (outstr == NULL)
+ return -ENOMEM;
+
+ if (rec->lrh_type == OBD_CFG_REC) {
+ class_config_parse_rec(rec, outstr, 256);
+ LCONSOLE(D_WARNING, " %s\n", outstr);
+ } else {
+ LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type);
+ rc = -EINVAL;
+ }
+
+ OBD_FREE(outstr, 256);
+ return rc;
+}
+
+int class_config_dump_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
+ char *name, struct config_llog_instance *cfg)
+{
+ struct llog_handle *llh;
+ int rc;
+
+ LCONSOLE_INFO("Dumping config log %s\n", name);
+
+ rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
+ if (rc)
+ return rc;
+
+ rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(parse_out, rc);
+
+ rc = llog_process(env, llh, class_config_dump_handler, cfg, NULL);
+parse_out:
+ llog_close(env, llh);
+
+ LCONSOLE_INFO("End config log %s\n", name);
+ return rc;
+}
+EXPORT_SYMBOL(class_config_dump_llog);
+
+/** Call class_cleanup and class_detach.
+ * "Manual" only in the sense that we're faking lcfg commands.
+ */
+int class_manual_cleanup(struct obd_device *obd)
+{
+ char flags[3] = "";
+ struct lustre_cfg *lcfg;
+ struct lustre_cfg_bufs bufs;
+ int rc;
+
+ if (!obd) {
+ CERROR("empty cleanup\n");
+ return -EALREADY;
+ }
+
+ if (obd->obd_force)
+ strcat(flags, "F");
+ if (obd->obd_fail)
+ strcat(flags, "A");
+
+ CDEBUG(D_CONFIG, "Manual cleanup of %s (flags='%s')\n",
+ obd->obd_name, flags);
+
+ lustre_cfg_bufs_reset(&bufs, obd->obd_name);
+ lustre_cfg_bufs_set_string(&bufs, 1, flags);
+ lcfg = lustre_cfg_new(LCFG_CLEANUP, &bufs);
+ if (!lcfg)
+ return -ENOMEM;
+
+ rc = class_process_config(lcfg);
+ if (rc) {
+ CERROR("cleanup failed %d: %s\n", rc, obd->obd_name);
+ GOTO(out, rc);
+ }
+
+ /* the lcfg is almost the same for both ops */
+ lcfg->lcfg_command = LCFG_DETACH;
+ rc = class_process_config(lcfg);
+ if (rc)
+ CERROR("detach failed %d: %s\n", rc, obd->obd_name);
+out:
+ lustre_cfg_free(lcfg);
+ return rc;
+}
+EXPORT_SYMBOL(class_manual_cleanup);
+
+/*
+ * uuid<->export lustre hash operations
+ */
+
+static unsigned
+uuid_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+ return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
+ sizeof(((struct obd_uuid *)key)->uuid), mask);
+}
+
+static void *
+uuid_key(struct hlist_node *hnode)
+{
+ struct obd_export *exp;
+
+ exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+
+ return &exp->exp_client_uuid;
+}
+
+/*
+ * NOTE: It is impossible to find an export that is in failed
+ * state with this function
+ */
+static int
+uuid_keycmp(const void *key, struct hlist_node *hnode)
+{
+ struct obd_export *exp;
+
+ LASSERT(key);
+ exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+
+ return obd_uuid_equals(key, &exp->exp_client_uuid) &&
+ !exp->exp_failed;
+}
+
+static void *
+uuid_export_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+}
+
+static void
+uuid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct obd_export *exp;
+
+ exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ class_export_get(exp);
+}
+
+static void
+uuid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct obd_export *exp;
+
+ exp = hlist_entry(hnode, struct obd_export, exp_uuid_hash);
+ class_export_put(exp);
+}
+
+static cfs_hash_ops_t uuid_hash_ops = {
+ .hs_hash = uuid_hash,
+ .hs_key = uuid_key,
+ .hs_keycmp = uuid_keycmp,
+ .hs_object = uuid_export_object,
+ .hs_get = uuid_export_get,
+ .hs_put_locked = uuid_export_put_locked,
+};
+
+
+/*
+ * nid<->export hash operations
+ */
+
+static unsigned
+nid_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+ return cfs_hash_djb2_hash(key, sizeof(lnet_nid_t), mask);
+}
+
+static void *
+nid_key(struct hlist_node *hnode)
+{
+ struct obd_export *exp;
+
+ exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+
+ return &exp->exp_connection->c_peer.nid;
+}
+
+/*
+ * NOTE: It is impossible to find an export that is in failed
+ * state with this function
+ */
+static int
+nid_kepcmp(const void *key, struct hlist_node *hnode)
+{
+ struct obd_export *exp;
+
+ LASSERT(key);
+ exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+
+ return exp->exp_connection->c_peer.nid == *(lnet_nid_t *)key &&
+ !exp->exp_failed;
+}
+
+static void *
+nid_export_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct obd_export, exp_nid_hash);
+}
+
+static void
+nid_export_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct obd_export *exp;
+
+ exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ class_export_get(exp);
+}
+
+static void
+nid_export_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct obd_export *exp;
+
+ exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
+ class_export_put(exp);
+}
+
+static cfs_hash_ops_t nid_hash_ops = {
+ .hs_hash = nid_hash,
+ .hs_key = nid_key,
+ .hs_keycmp = nid_kepcmp,
+ .hs_object = nid_export_object,
+ .hs_get = nid_export_get,
+ .hs_put_locked = nid_export_put_locked,
+};
+
+
+/*
+ * nid<->nidstats hash operations
+ */
+
+static void *
+nidstats_key(struct hlist_node *hnode)
+{
+ struct nid_stat *ns;
+
+ ns = hlist_entry(hnode, struct nid_stat, nid_hash);
+
+ return &ns->nid;
+}
+
+static int
+nidstats_keycmp(const void *key, struct hlist_node *hnode)
+{
+ return *(lnet_nid_t *)nidstats_key(hnode) == *(lnet_nid_t *)key;
+}
+
+static void *
+nidstats_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct nid_stat, nid_hash);
+}
+
+static void
+nidstats_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct nid_stat *ns;
+
+ ns = hlist_entry(hnode, struct nid_stat, nid_hash);
+ nidstat_getref(ns);
+}
+
+static void
+nidstats_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct nid_stat *ns;
+
+ ns = hlist_entry(hnode, struct nid_stat, nid_hash);
+ nidstat_putref(ns);
+}
+
+static cfs_hash_ops_t nid_stat_hash_ops = {
+ .hs_hash = nid_hash,
+ .hs_key = nidstats_key,
+ .hs_keycmp = nidstats_keycmp,
+ .hs_object = nidstats_object,
+ .hs_get = nidstats_get,
+ .hs_put_locked = nidstats_put_locked,
+};
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
new file mode 100644
index 0000000..68a4d6a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -0,0 +1,1315 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/obd_mount.c
+ *
+ * Client mount routines
+ *
+ * Author: Nathan Rutman <nathan@clusterfs.com>
+ */
+
+
+#define DEBUG_SUBSYSTEM S_CLASS
+#define D_MOUNT (D_SUPER|D_CONFIG/*|D_WARNING */)
+#define PRINT_CMD CDEBUG
+
+#include <obd.h>
+#include <lvfs.h>
+#include <obd_class.h>
+#include <lustre/lustre_user.h>
+#include <lustre_log.h>
+#include <lustre_disk.h>
+#include <lustre_param.h>
+
+static int (*client_fill_super)(struct super_block *sb,
+ struct vfsmount *mnt);
+
+static void (*kill_super_cb)(struct super_block *sb);
+
+/**************** config llog ********************/
+
+/** Get a config log from the MGS and process it.
+ * This func is called for both clients and servers.
+ * Continue to process new statements appended to the logs
+ * (whenever the config lock is revoked) until lustre_end_log
+ * is called.
+ * @param sb The superblock is used by the MGC to write to the local copy of
+ * the config log
+ * @param logname The name of the llog to replicate from the MGS
+ * @param cfg Since the same mgc may be used to follow multiple config logs
+ * (e.g. ost1, ost2, client), the config_llog_instance keeps the state for
+ * this log, and is added to the mgc's list of logs to follow.
+ */
+int lustre_process_log(struct super_block *sb, char *logname,
+ struct config_llog_instance *cfg)
+{
+ struct lustre_cfg *lcfg;
+ struct lustre_cfg_bufs *bufs;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct obd_device *mgc = lsi->lsi_mgc;
+ int rc;
+
+ LASSERT(mgc);
+ LASSERT(cfg);
+
+ OBD_ALLOC_PTR(bufs);
+ if (bufs == NULL)
+ return -ENOMEM;
+
+ /* mgc_process_config */
+ lustre_cfg_bufs_reset(bufs, mgc->obd_name);
+ lustre_cfg_bufs_set_string(bufs, 1, logname);
+ lustre_cfg_bufs_set(bufs, 2, cfg, sizeof(*cfg));
+ lustre_cfg_bufs_set(bufs, 3, &sb, sizeof(sb));
+ lcfg = lustre_cfg_new(LCFG_LOG_START, bufs);
+ rc = obd_process_config(mgc, sizeof(*lcfg), lcfg);
+ lustre_cfg_free(lcfg);
+
+ OBD_FREE_PTR(bufs);
+
+ if (rc == -EINVAL)
+ LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s'"
+ "failed from the MGS (%d). Make sure this "
+ "client and the MGS are running compatible "
+ "versions of Lustre.\n",
+ mgc->obd_name, logname, rc);
+
+ if (rc)
+ LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' "
+ "failed (%d). This may be the result of "
+ "communication errors between this node and "
+ "the MGS, a bad configuration, or other "
+ "errors. See the syslog for more "
+ "information.\n", mgc->obd_name, logname,
+ rc);
+
+ /* class_obd_list(); */
+ return rc;
+}
+EXPORT_SYMBOL(lustre_process_log);
+
+/* Stop watching this config log for updates */
+int lustre_end_log(struct super_block *sb, char *logname,
+ struct config_llog_instance *cfg)
+{
+ struct lustre_cfg *lcfg;
+ struct lustre_cfg_bufs bufs;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct obd_device *mgc = lsi->lsi_mgc;
+ int rc;
+
+ if (!mgc)
+ return -ENOENT;
+
+ /* mgc_process_config */
+ lustre_cfg_bufs_reset(&bufs, mgc->obd_name);
+ lustre_cfg_bufs_set_string(&bufs, 1, logname);
+ if (cfg)
+ lustre_cfg_bufs_set(&bufs, 2, cfg, sizeof(*cfg));
+ lcfg = lustre_cfg_new(LCFG_LOG_END, &bufs);
+ rc = obd_process_config(mgc, sizeof(*lcfg), lcfg);
+ lustre_cfg_free(lcfg);
+ return rc;
+}
+EXPORT_SYMBOL(lustre_end_log);
+
+/**************** obd start *******************/
+
+/** lustre_cfg_bufs are a holdover from 1.4; we can still set these up from
+ * lctl (and do for echo cli/srv.
+ */
+int do_lcfg(char *cfgname, lnet_nid_t nid, int cmd,
+ char *s1, char *s2, char *s3, char *s4)
+{
+ struct lustre_cfg_bufs bufs;
+ struct lustre_cfg * lcfg = NULL;
+ int rc;
+
+ CDEBUG(D_TRACE, "lcfg %s %#x %s %s %s %s\n", cfgname,
+ cmd, s1, s2, s3, s4);
+
+ lustre_cfg_bufs_reset(&bufs, cfgname);
+ if (s1)
+ lustre_cfg_bufs_set_string(&bufs, 1, s1);
+ if (s2)
+ lustre_cfg_bufs_set_string(&bufs, 2, s2);
+ if (s3)
+ lustre_cfg_bufs_set_string(&bufs, 3, s3);
+ if (s4)
+ lustre_cfg_bufs_set_string(&bufs, 4, s4);
+
+ lcfg = lustre_cfg_new(cmd, &bufs);
+ lcfg->lcfg_nid = nid;
+ rc = class_process_config(lcfg);
+ lustre_cfg_free(lcfg);
+ return(rc);
+}
+EXPORT_SYMBOL(do_lcfg);
+
+/** Call class_attach and class_setup. These methods in turn call
+ * obd type-specific methods.
+ */
+int lustre_start_simple(char *obdname, char *type, char *uuid,
+ char *s1, char *s2, char *s3, char *s4)
+{
+ int rc;
+ CDEBUG(D_MOUNT, "Starting obd %s (typ=%s)\n", obdname, type);
+
+ rc = do_lcfg(obdname, 0, LCFG_ATTACH, type, uuid, 0, 0);
+ if (rc) {
+ CERROR("%s attach error %d\n", obdname, rc);
+ return rc;
+ }
+ rc = do_lcfg(obdname, 0, LCFG_SETUP, s1, s2, s3, s4);
+ if (rc) {
+ CERROR("%s setup error %d\n", obdname, rc);
+ do_lcfg(obdname, 0, LCFG_DETACH, 0, 0, 0, 0);
+ }
+ return rc;
+}
+
+DEFINE_MUTEX(mgc_start_lock);
+
+/** Set up a mgc obd to process startup logs
+ *
+ * \param sb [in] super block of the mgc obd
+ *
+ * \retval 0 success, otherwise error code
+ */
+int lustre_start_mgc(struct super_block *sb)
+{
+ struct obd_connect_data *data = NULL;
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct obd_device *obd;
+ struct obd_export *exp;
+ struct obd_uuid *uuid;
+ class_uuid_t uuidc;
+ lnet_nid_t nid;
+ char *mgcname = NULL, *niduuid = NULL, *mgssec = NULL;
+ char *ptr;
+ int recov_bk;
+ int rc = 0, i = 0, j, len;
+
+ LASSERT(lsi->lsi_lmd);
+
+ /* Find the first non-lo MGS nid for our MGC name */
+ if (IS_SERVER(lsi)) {
+ /* mount -o mgsnode=nid */
+ ptr = lsi->lsi_lmd->lmd_mgs;
+ if (lsi->lsi_lmd->lmd_mgs &&
+ (class_parse_nid(lsi->lsi_lmd->lmd_mgs, &nid, &ptr) == 0)) {
+ i++;
+ } else if (IS_MGS(lsi)) {
+ lnet_process_id_t id;
+ while ((rc = LNetGetId(i++, &id)) != -ENOENT) {
+ if (LNET_NETTYP(LNET_NIDNET(id.nid)) == LOLND)
+ continue;
+ nid = id.nid;
+ i++;
+ break;
+ }
+ }
+ } else { /* client */
+ /* Use nids from mount line: uml1,1@elan:uml2,2@elan:/lustre */
+ ptr = lsi->lsi_lmd->lmd_dev;
+ if (class_parse_nid(ptr, &nid, &ptr) == 0)
+ i++;
+ }
+ if (i == 0) {
+ CERROR("No valid MGS nids found.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mgc_start_lock);
+
+ len = strlen(LUSTRE_MGC_OBDNAME) + strlen(libcfs_nid2str(nid)) + 1;
+ OBD_ALLOC(mgcname, len);
+ OBD_ALLOC(niduuid, len + 2);
+ if (!mgcname || !niduuid)
+ GOTO(out_free, rc = -ENOMEM);
+ sprintf(mgcname, "%s%s", LUSTRE_MGC_OBDNAME, libcfs_nid2str(nid));
+
+ mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : "";
+
+ OBD_ALLOC_PTR(data);
+ if (data == NULL)
+ GOTO(out_free, rc = -ENOMEM);
+
+ obd = class_name2obd(mgcname);
+ if (obd && !obd->obd_stopping) {
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
+ strlen(KEY_MGSSEC), KEY_MGSSEC,
+ strlen(mgssec), mgssec, NULL);
+ if (rc)
+ GOTO(out_free, rc);
+
+ /* Re-using an existing MGC */
+ atomic_inc(&obd->u.cli.cl_mgc_refcount);
+
+ /* IR compatibility check, only for clients */
+ if (lmd_is_client(lsi->lsi_lmd)) {
+ int has_ir;
+ int vallen = sizeof(*data);
+ __u32 *flags = &lsi->lsi_lmd->lmd_flags;
+
+ rc = obd_get_info(NULL, obd->obd_self_export,
+ strlen(KEY_CONN_DATA), KEY_CONN_DATA,
+ &vallen, data, NULL);
+ LASSERT(rc == 0);
+ has_ir = OCD_HAS_FLAG(data, IMP_RECOV);
+ if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
+ /* LMD_FLG_NOIR is for test purpose only */
+ LCONSOLE_WARN(
+ "Trying to mount a client with IR setting "
+ "not compatible with current mgc. "
+ "Force to use current mgc setting that is "
+ "IR %s.\n",
+ has_ir ? "enabled" : "disabled");
+ if (has_ir)
+ *flags &= ~LMD_FLG_NOIR;
+ else
+ *flags |= LMD_FLG_NOIR;
+ }
+ }
+
+ recov_bk = 0;
+ /* If we are restarting the MGS, don't try to keep the MGC's
+ old connection, or registration will fail. */
+ if (IS_MGS(lsi)) {
+ CDEBUG(D_MOUNT, "New MGS with live MGC\n");
+ recov_bk = 1;
+ }
+
+ /* Try all connections, but only once (again).
+ We don't want to block another target from starting
+ (using its local copy of the log), but we do want to connect
+ if at all possible. */
+ recov_bk++;
+ CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname,recov_bk);
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
+ sizeof(KEY_INIT_RECOV_BACKUP),
+ KEY_INIT_RECOV_BACKUP,
+ sizeof(recov_bk), &recov_bk, NULL);
+ GOTO(out, rc = 0);
+ }
+
+ CDEBUG(D_MOUNT, "Start MGC '%s'\n", mgcname);
+
+ /* Add the primary nids for the MGS */
+ i = 0;
+ sprintf(niduuid, "%s_%x", mgcname, i);
+ if (IS_SERVER(lsi)) {
+ ptr = lsi->lsi_lmd->lmd_mgs;
+ if (IS_MGS(lsi)) {
+ /* Use local nids (including LO) */
+ lnet_process_id_t id;
+ while ((rc = LNetGetId(i++, &id)) != -ENOENT) {
+ rc = do_lcfg(mgcname, id.nid,
+ LCFG_ADD_UUID, niduuid, 0,0,0);
+ }
+ } else {
+ /* Use mgsnode= nids */
+ /* mount -o mgsnode=nid */
+ if (lsi->lsi_lmd->lmd_mgs) {
+ ptr = lsi->lsi_lmd->lmd_mgs;
+ } else if (class_find_param(ptr, PARAM_MGSNODE,
+ &ptr) != 0) {
+ CERROR("No MGS nids given.\n");
+ GOTO(out_free, rc = -EINVAL);
+ }
+ while (class_parse_nid(ptr, &nid, &ptr) == 0) {
+ rc = do_lcfg(mgcname, nid,
+ LCFG_ADD_UUID, niduuid, 0,0,0);
+ i++;
+ }
+ }
+ } else { /* client */
+ /* Use nids from mount line: uml1,1@elan:uml2,2@elan:/lustre */
+ ptr = lsi->lsi_lmd->lmd_dev;
+ while (class_parse_nid(ptr, &nid, &ptr) == 0) {
+ rc = do_lcfg(mgcname, nid,
+ LCFG_ADD_UUID, niduuid, 0,0,0);
+ i++;
+ /* Stop at the first failover nid */
+ if (*ptr == ':')
+ break;
+ }
+ }
+ if (i == 0) {
+ CERROR("No valid MGS nids found.\n");
+ GOTO(out_free, rc = -EINVAL);
+ }
+ lsi->lsi_lmd->lmd_mgs_failnodes = 1;
+
+ /* Random uuid for MGC allows easier reconnects */
+ OBD_ALLOC_PTR(uuid);
+ ll_generate_random_uuid(uuidc);
+ class_uuid_unparse(uuidc, uuid);
+
+ /* Start the MGC */
+ rc = lustre_start_simple(mgcname, LUSTRE_MGC_NAME,
+ (char *)uuid->uuid, LUSTRE_MGS_OBDNAME,
+ niduuid, 0, 0);
+ OBD_FREE_PTR(uuid);
+ if (rc)
+ GOTO(out_free, rc);
+
+ /* Add any failover MGS nids */
+ i = 1;
+ while (ptr && ((*ptr == ':' ||
+ class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) {
+ /* New failover node */
+ sprintf(niduuid, "%s_%x", mgcname, i);
+ j = 0;
+ while (class_parse_nid_quiet(ptr, &nid, &ptr) == 0) {
+ j++;
+ rc = do_lcfg(mgcname, nid,
+ LCFG_ADD_UUID, niduuid, 0,0,0);
+ if (*ptr == ':')
+ break;
+ }
+ if (j > 0) {
+ rc = do_lcfg(mgcname, 0, LCFG_ADD_CONN,
+ niduuid, 0, 0, 0);
+ i++;
+ } else {
+ /* at ":/fsname" */
+ break;
+ }
+ }
+ lsi->lsi_lmd->lmd_mgs_failnodes = i;
+
+ obd = class_name2obd(mgcname);
+ if (!obd) {
+ CERROR("Can't find mgcobd %s\n", mgcname);
+ GOTO(out_free, rc = -ENOTCONN);
+ }
+
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
+ strlen(KEY_MGSSEC), KEY_MGSSEC,
+ strlen(mgssec), mgssec, NULL);
+ if (rc)
+ GOTO(out_free, rc);
+
+ /* Keep a refcount of servers/clients who started with "mount",
+ so we know when we can get rid of the mgc. */
+ atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
+
+ /* Try all connections, but only once. */
+ recov_bk = 1;
+ rc = obd_set_info_async(NULL, obd->obd_self_export,
+ sizeof(KEY_INIT_RECOV_BACKUP),
+ KEY_INIT_RECOV_BACKUP,
+ sizeof(recov_bk), &recov_bk, NULL);
+ if (rc)
+ /* nonfatal */
+ CWARN("can't set %s %d\n", KEY_INIT_RECOV_BACKUP, rc);
+
+ /* We connect to the MGS at setup, and don't disconnect until cleanup */
+ data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT |
+ OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV |
+ OBD_CONNECT_LVB_TYPE;
+
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
+ data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB;
+#else
+#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
+#endif
+
+ if (lmd_is_client(lsi->lsi_lmd) &&
+ lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)
+ data->ocd_connect_flags &= ~OBD_CONNECT_IMP_RECOV;
+ data->ocd_version = LUSTRE_VERSION_CODE;
+ rc = obd_connect(NULL, &exp, obd, &(obd->obd_uuid), data, NULL);
+ if (rc) {
+ CERROR("connect failed %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ obd->u.cli.cl_mgc_mgsexp = exp;
+
+out:
+ /* Keep the mgc info in the sb. Note that many lsi's can point
+ to the same mgc.*/
+ lsi->lsi_mgc = obd;
+out_free:
+ mutex_unlock(&mgc_start_lock);
+
+ if (data)
+ OBD_FREE_PTR(data);
+ if (mgcname)
+ OBD_FREE(mgcname, len);
+ if (niduuid)
+ OBD_FREE(niduuid, len + 2);
+ return rc;
+}
+
+static int lustre_stop_mgc(struct super_block *sb)
+{
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct obd_device *obd;
+ char *niduuid = 0, *ptr = 0;
+ int i, rc = 0, len = 0;
+
+ if (!lsi)
+ return -ENOENT;
+ obd = lsi->lsi_mgc;
+ if (!obd)
+ return -ENOENT;
+ lsi->lsi_mgc = NULL;
+
+ mutex_lock(&mgc_start_lock);
+ LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
+ if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
+ /* This is not fatal, every client that stops
+ will call in here. */
+ CDEBUG(D_MOUNT, "mgc still has %d references.\n",
+ atomic_read(&obd->u.cli.cl_mgc_refcount));
+ GOTO(out, rc = -EBUSY);
+ }
+
+ /* The MGC has no recoverable data in any case.
+ * force shotdown set in umount_begin */
+ obd->obd_no_recov = 1;
+
+ if (obd->u.cli.cl_mgc_mgsexp) {
+ /* An error is not fatal, if we are unable to send the
+ disconnect mgs ping evictor cleans up the export */
+ rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp);
+ if (rc)
+ CDEBUG(D_MOUNT, "disconnect failed %d\n", rc);
+ }
+
+ /* Save the obdname for cleaning the nid uuids, which are
+ obdname_XX */
+ len = strlen(obd->obd_name) + 6;
+ OBD_ALLOC(niduuid, len);
+ if (niduuid) {
+ strcpy(niduuid, obd->obd_name);
+ ptr = niduuid + strlen(niduuid);
+ }
+
+ rc = class_manual_cleanup(obd);
+ if (rc)
+ GOTO(out, rc);
+
+ /* Clean the nid uuids */
+ if (!niduuid)
+ GOTO(out, rc = -ENOMEM);
+
+ for (i = 0; i < lsi->lsi_lmd->lmd_mgs_failnodes; i++) {
+ sprintf(ptr, "_%x", i);
+ rc = do_lcfg(LUSTRE_MGC_OBDNAME, 0, LCFG_DEL_UUID,
+ niduuid, 0, 0, 0);
+ if (rc)
+ CERROR("del MDC UUID %s failed: rc = %d\n",
+ niduuid, rc);
+ }
+out:
+ if (niduuid)
+ OBD_FREE(niduuid, len);
+
+ /* class_import_put will get rid of the additional connections */
+ mutex_unlock(&mgc_start_lock);
+ return rc;
+}
+
+/***************** lustre superblock **************/
+
+struct lustre_sb_info *lustre_init_lsi(struct super_block *sb)
+{
+ struct lustre_sb_info *lsi;
+
+ OBD_ALLOC_PTR(lsi);
+ if (!lsi)
+ return NULL;
+ OBD_ALLOC_PTR(lsi->lsi_lmd);
+ if (!lsi->lsi_lmd) {
+ OBD_FREE_PTR(lsi);
+ return NULL;
+ }
+
+ lsi->lsi_lmd->lmd_exclude_count = 0;
+ lsi->lsi_lmd->lmd_recovery_time_soft = 0;
+ lsi->lsi_lmd->lmd_recovery_time_hard = 0;
+ s2lsi_nocast(sb) = lsi;
+ /* we take 1 extra ref for our setup */
+ atomic_set(&lsi->lsi_mounts, 1);
+
+ /* Default umount style */
+ lsi->lsi_flags = LSI_UMOUNT_FAILOVER;
+
+ return lsi;
+}
+
+static int lustre_free_lsi(struct super_block *sb)
+{
+ struct lustre_sb_info *lsi = s2lsi(sb);
+
+ LASSERT(lsi != NULL);
+ CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
+
+ /* someone didn't call server_put_mount. */
+ LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
+
+ if (lsi->lsi_lmd != NULL) {
+ if (lsi->lsi_lmd->lmd_dev != NULL)
+ OBD_FREE(lsi->lsi_lmd->lmd_dev,
+ strlen(lsi->lsi_lmd->lmd_dev) + 1);
+ if (lsi->lsi_lmd->lmd_profile != NULL)
+ OBD_FREE(lsi->lsi_lmd->lmd_profile,
+ strlen(lsi->lsi_lmd->lmd_profile) + 1);
+ if (lsi->lsi_lmd->lmd_mgssec != NULL)
+ OBD_FREE(lsi->lsi_lmd->lmd_mgssec,
+ strlen(lsi->lsi_lmd->lmd_mgssec) + 1);
+ if (lsi->lsi_lmd->lmd_opts != NULL)
+ OBD_FREE(lsi->lsi_lmd->lmd_opts,
+ strlen(lsi->lsi_lmd->lmd_opts) + 1);
+ if (lsi->lsi_lmd->lmd_exclude_count)
+ OBD_FREE(lsi->lsi_lmd->lmd_exclude,
+ sizeof(lsi->lsi_lmd->lmd_exclude[0]) *
+ lsi->lsi_lmd->lmd_exclude_count);
+ if (lsi->lsi_lmd->lmd_mgs != NULL)
+ OBD_FREE(lsi->lsi_lmd->lmd_mgs,
+ strlen(lsi->lsi_lmd->lmd_mgs) + 1);
+ if (lsi->lsi_lmd->lmd_osd_type != NULL)
+ OBD_FREE(lsi->lsi_lmd->lmd_osd_type,
+ strlen(lsi->lsi_lmd->lmd_osd_type) + 1);
+ if (lsi->lsi_lmd->lmd_params != NULL)
+ OBD_FREE(lsi->lsi_lmd->lmd_params, 4096);
+
+ OBD_FREE(lsi->lsi_lmd, sizeof(*lsi->lsi_lmd));
+ }
+
+ LASSERT(lsi->lsi_llsbi == NULL);
+ OBD_FREE(lsi, sizeof(*lsi));
+ s2lsi_nocast(sb) = NULL;
+
+ return 0;
+}
+
+/* The lsi has one reference for every server that is using the disk -
+ e.g. MDT, MGS, and potentially MGC */
+int lustre_put_lsi(struct super_block *sb)
+{
+ struct lustre_sb_info *lsi = s2lsi(sb);
+
+ LASSERT(lsi != NULL);
+
+ CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
+ if (atomic_dec_and_test(&lsi->lsi_mounts)) {
+ if (IS_SERVER(lsi) && lsi->lsi_osd_exp) {
+ obd_disconnect(lsi->lsi_osd_exp);
+ /* wait till OSD is gone */
+ obd_zombie_barrier();
+ }
+ lustre_free_lsi(sb);
+ return 1;
+ }
+ return 0;
+}
+
+/*** SERVER NAME ***
+ * <FSNAME><SEPERATOR><TYPE><INDEX>
+ * FSNAME is between 1 and 8 characters (inclusive).
+ * Excluded characters are '/' and ':'
+ * SEPERATOR is either ':' or '-'
+ * TYPE: "OST", "MDT", etc.
+ * INDEX: Hex representation of the index
+ */
+
+/** Get the fsname ("lustre") from the server name ("lustre-OST003F").
+ * @param [in] svname server name including type and index
+ * @param [out] fsname Buffer to copy filesystem name prefix into.
+ * Must have at least 'strlen(fsname) + 1' chars.
+ * @param [out] endptr if endptr isn't NULL it is set to end of fsname
+ * rc < 0 on error
+ */
+int server_name2fsname(const char *svname, char *fsname, const char **endptr)
+{
+ const char *dash;
+
+ dash = svname + strnlen(svname, 8); /* max fsname length is 8 */
+ for (; dash > svname && *dash != '-' && *dash != ':'; dash--)
+ ;
+ if (dash == svname)
+ return -EINVAL;
+
+ if (fsname != NULL) {
+ strncpy(fsname, svname, dash - svname);
+ fsname[dash - svname] = '\0';
+ }
+
+ if (endptr != NULL)
+ *endptr = dash;
+
+ return 0;
+}
+EXPORT_SYMBOL(server_name2fsname);
+
+/**
+ * Get service name (svname) from string
+ * rc < 0 on error
+ * if endptr isn't NULL it is set to end of fsname *
+ */
+int server_name2svname(const char *label, char *svname, const char **endptr,
+ size_t svsize)
+{
+ int rc;
+ const char *dash;
+
+ /* We use server_name2fsname() just for parsing */
+ rc = server_name2fsname(label, NULL, &dash);
+ if (rc != 0)
+ return rc;
+
+ if (endptr != NULL)
+ *endptr = dash;
+
+ if (strlcpy(svname, dash + 1, svsize) >= svsize)
+ return -E2BIG;
+
+ return 0;
+}
+EXPORT_SYMBOL(server_name2svname);
+
+
+/* Get the index from the obd name.
+ rc = server type, or
+ rc < 0 on error
+ if endptr isn't NULL it is set to end of name */
+int server_name2index(const char *svname, __u32 *idx, const char **endptr)
+{
+ unsigned long index;
+ int rc;
+ const char *dash;
+
+ /* We use server_name2fsname() just for parsing */
+ rc = server_name2fsname(svname, NULL, &dash);
+ if (rc != 0)
+ return rc;
+
+ dash++;
+
+ if (strncmp(dash, "MDT", 3) == 0)
+ rc = LDD_F_SV_TYPE_MDT;
+ else if (strncmp(dash, "OST", 3) == 0)
+ rc = LDD_F_SV_TYPE_OST;
+ else
+ return -EINVAL;
+
+ dash += 3;
+
+ if (strncmp(dash, "all", 3) == 0) {
+ if (endptr != NULL)
+ *endptr = dash + 3;
+ return rc | LDD_F_SV_ALL;
+ }
+
+ index = simple_strtoul(dash, (char **)endptr, 16);
+ if (idx != NULL)
+ *idx = index;
+
+ /* Account for -mdc after index that is possible when specifying mdt */
+ if (endptr != NULL && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
+ sizeof(LUSTRE_MDC_NAME)-1) == 0)
+ *endptr += sizeof(LUSTRE_MDC_NAME);
+
+ return rc;
+}
+EXPORT_SYMBOL(server_name2index);
+
+/*************** mount common betweeen server and client ***************/
+
+/* Common umount */
+int lustre_common_put_super(struct super_block *sb)
+{
+ int rc;
+
+ CDEBUG(D_MOUNT, "dropping sb %p\n", sb);
+
+ /* Drop a ref to the MGC */
+ rc = lustre_stop_mgc(sb);
+ if (rc && (rc != -ENOENT)) {
+ if (rc != -EBUSY) {
+ CERROR("Can't stop MGC: %d\n", rc);
+ return rc;
+ }
+ /* BUSY just means that there's some other obd that
+ needs the mgc. Let him clean it up. */
+ CDEBUG(D_MOUNT, "MGC still in use\n");
+ }
+ /* Drop a ref to the mounted disk */
+ lustre_put_lsi(sb);
+ lu_types_stop();
+ return rc;
+}
+EXPORT_SYMBOL(lustre_common_put_super);
+
+static void lmd_print(struct lustre_mount_data *lmd)
+{
+ int i;
+
+ PRINT_CMD(D_MOUNT, " mount data:\n");
+ if (lmd_is_client(lmd))
+ PRINT_CMD(D_MOUNT, "profile: %s\n", lmd->lmd_profile);
+ PRINT_CMD(D_MOUNT, "device: %s\n", lmd->lmd_dev);
+ PRINT_CMD(D_MOUNT, "flags: %x\n", lmd->lmd_flags);
+
+ if (lmd->lmd_opts)
+ PRINT_CMD(D_MOUNT, "options: %s\n", lmd->lmd_opts);
+
+ if (lmd->lmd_recovery_time_soft)
+ PRINT_CMD(D_MOUNT, "recovery time soft: %d\n",
+ lmd->lmd_recovery_time_soft);
+
+ if (lmd->lmd_recovery_time_hard)
+ PRINT_CMD(D_MOUNT, "recovery time hard: %d\n",
+ lmd->lmd_recovery_time_hard);
+
+ for (i = 0; i < lmd->lmd_exclude_count; i++) {
+ PRINT_CMD(D_MOUNT, "exclude %d: OST%04x\n", i,
+ lmd->lmd_exclude[i]);
+ }
+}
+
+/* Is this server on the exclusion list */
+int lustre_check_exclusion(struct super_block *sb, char *svname)
+{
+ struct lustre_sb_info *lsi = s2lsi(sb);
+ struct lustre_mount_data *lmd = lsi->lsi_lmd;
+ __u32 index;
+ int i, rc;
+
+ rc = server_name2index(svname, &index, NULL);
+ if (rc != LDD_F_SV_TYPE_OST)
+ /* Only exclude OSTs */
+ return 0;
+
+ CDEBUG(D_MOUNT, "Check exclusion %s (%d) in %d of %s\n", svname,
+ index, lmd->lmd_exclude_count, lmd->lmd_dev);
+
+ for(i = 0; i < lmd->lmd_exclude_count; i++) {
+ if (index == lmd->lmd_exclude[i]) {
+ CWARN("Excluding %s (on exclusion list)\n", svname);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* mount -v -o exclude=lustre-OST0001:lustre-OST0002 -t lustre ... */
+static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
+{
+ const char *s1 = ptr, *s2;
+ __u32 index, *exclude_list;
+ int rc = 0, devmax;
+
+ /* The shortest an ost name can be is 8 chars: -OST0000.
+ We don't actually know the fsname at this time, so in fact
+ a user could specify any fsname. */
+ devmax = strlen(ptr) / 8 + 1;
+
+ /* temp storage until we figure out how many we have */
+ OBD_ALLOC(exclude_list, sizeof(index) * devmax);
+ if (!exclude_list)
+ return -ENOMEM;
+
+ /* we enter this fn pointing at the '=' */
+ while (*s1 && *s1 != ' ' && *s1 != ',') {
+ s1++;
+ rc = server_name2index(s1, &index, &s2);
+ if (rc < 0) {
+ CERROR("Can't parse server name '%s': rc = %d\n",
+ s1, rc);
+ break;
+ }
+ if (rc == LDD_F_SV_TYPE_OST)
+ exclude_list[lmd->lmd_exclude_count++] = index;
+ else
+ CDEBUG(D_MOUNT, "ignoring exclude %.*s: type = %#x\n",
+ (uint)(s2-s1), s1, rc);
+ s1 = s2;
+ /* now we are pointing at ':' (next exclude)
+ or ',' (end of excludes) */
+ if (lmd->lmd_exclude_count >= devmax)
+ break;
+ }
+ if (rc >= 0) /* non-err */
+ rc = 0;
+
+ if (lmd->lmd_exclude_count) {
+ /* permanent, freed in lustre_free_lsi */
+ OBD_ALLOC(lmd->lmd_exclude, sizeof(index) *
+ lmd->lmd_exclude_count);
+ if (lmd->lmd_exclude) {
+ memcpy(lmd->lmd_exclude, exclude_list,
+ sizeof(index) * lmd->lmd_exclude_count);
+ } else {
+ rc = -ENOMEM;
+ lmd->lmd_exclude_count = 0;
+ }
+ }
+ OBD_FREE(exclude_list, sizeof(index) * devmax);
+ return rc;
+}
+
+static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr)
+{
+ char *tail;
+ int length;
+
+ if (lmd->lmd_mgssec != NULL) {
+ OBD_FREE(lmd->lmd_mgssec, strlen(lmd->lmd_mgssec) + 1);
+ lmd->lmd_mgssec = NULL;
+ }
+
+ tail = strchr(ptr, ',');
+ if (tail == NULL)
+ length = strlen(ptr);
+ else
+ length = tail - ptr;
+
+ OBD_ALLOC(lmd->lmd_mgssec, length + 1);
+ if (lmd->lmd_mgssec == NULL)
+ return -ENOMEM;
+
+ memcpy(lmd->lmd_mgssec, ptr, length);
+ lmd->lmd_mgssec[length] = '\0';
+ return 0;
+}
+
+static int lmd_parse_string(char **handle, char *ptr)
+{
+ char *tail;
+ int length;
+
+ if ((handle == NULL) || (ptr == NULL))
+ return -EINVAL;
+
+ if (*handle != NULL) {
+ OBD_FREE(*handle, strlen(*handle) + 1);
+ *handle = NULL;
+ }
+
+ tail = strchr(ptr, ',');
+ if (tail == NULL)
+ length = strlen(ptr);
+ else
+ length = tail - ptr;
+
+ OBD_ALLOC(*handle, length + 1);
+ if (*handle == NULL)
+ return -ENOMEM;
+
+ memcpy(*handle, ptr, length);
+ (*handle)[length] = '\0';
+
+ return 0;
+}
+
+/* Collect multiple values for mgsnid specifiers */
+static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr)
+{
+ lnet_nid_t nid;
+ char *tail = *ptr;
+ char *mgsnid;
+ int length;
+ int oldlen = 0;
+
+ /* Find end of nidlist */
+ while (class_parse_nid_quiet(tail, &nid, &tail) == 0) {}
+ length = tail - *ptr;
+ if (length == 0) {
+ LCONSOLE_ERROR_MSG(0x159, "Can't parse NID '%s'\n", *ptr);
+ return -EINVAL;
+ }
+
+ if (lmd->lmd_mgs != NULL)
+ oldlen = strlen(lmd->lmd_mgs) + 1;
+
+ OBD_ALLOC(mgsnid, oldlen + length + 1);
+ if (mgsnid == NULL)
+ return -ENOMEM;
+
+ if (lmd->lmd_mgs != NULL) {
+ /* Multiple mgsnid= are taken to mean failover locations */
+ memcpy(mgsnid, lmd->lmd_mgs, oldlen);
+ mgsnid[oldlen - 1] = ':';
+ OBD_FREE(lmd->lmd_mgs, oldlen);
+ }
+ memcpy(mgsnid + oldlen, *ptr, length);
+ mgsnid[oldlen + length] = '\0';
+ lmd->lmd_mgs = mgsnid;
+ *ptr = tail;
+
+ return 0;
+}
+
+/** Parse mount line options
+ * e.g. mount -v -t lustre -o abort_recov uml1:uml2:/lustre-client /mnt/lustre
+ * dev is passed as device=uml1:/lustre by mount.lustre
+ */
+static int lmd_parse(char *options, struct lustre_mount_data *lmd)
+{
+ char *s1, *s2, *devname = NULL;
+ struct lustre_mount_data *raw = (struct lustre_mount_data *)options;
+ int rc = 0;
+
+ LASSERT(lmd);
+ if (!options) {
+ LCONSOLE_ERROR_MSG(0x162, "Missing mount data: check that "
+ "/sbin/mount.lustre is installed.\n");
+ return -EINVAL;
+ }
+
+ /* Options should be a string - try to detect old lmd data */
+ if ((raw->lmd_magic & 0xffffff00) == (LMD_MAGIC & 0xffffff00)) {
+ LCONSOLE_ERROR_MSG(0x163, "You're using an old version of "
+ "/sbin/mount.lustre. Please install "
+ "version %s\n", LUSTRE_VERSION_STRING);
+ return -EINVAL;
+ }
+ lmd->lmd_magic = LMD_MAGIC;
+
+ OBD_ALLOC(lmd->lmd_params, 4096);
+ if (lmd->lmd_params == NULL)
+ return -ENOMEM;
+ lmd->lmd_params[0] = '\0';
+
+ /* Set default flags here */
+
+ s1 = options;
+ while (*s1) {
+ int clear = 0;
+ int time_min = OBD_RECOVERY_TIME_MIN;
+
+ /* Skip whitespace and extra commas */
+ while (*s1 == ' ' || *s1 == ',')
+ s1++;
+
+ /* Client options are parsed in ll_options: eg. flock,
+ user_xattr, acl */
+
+ /* Parse non-ldiskfs options here. Rather than modifying
+ ldiskfs, we just zero these out here */
+ if (strncmp(s1, "abort_recov", 11) == 0) {
+ lmd->lmd_flags |= LMD_FLG_ABORT_RECOV;
+ clear++;
+ } else if (strncmp(s1, "recovery_time_soft=", 19) == 0) {
+ lmd->lmd_recovery_time_soft = max_t(int,
+ simple_strtoul(s1 + 19, NULL, 10), time_min);
+ clear++;
+ } else if (strncmp(s1, "recovery_time_hard=", 19) == 0) {
+ lmd->lmd_recovery_time_hard = max_t(int,
+ simple_strtoul(s1 + 19, NULL, 10), time_min);
+ clear++;
+ } else if (strncmp(s1, "noir", 4) == 0) {
+ lmd->lmd_flags |= LMD_FLG_NOIR; /* test purpose only. */
+ clear++;
+ } else if (strncmp(s1, "nosvc", 5) == 0) {
+ lmd->lmd_flags |= LMD_FLG_NOSVC;
+ clear++;
+ } else if (strncmp(s1, "nomgs", 5) == 0) {
+ lmd->lmd_flags |= LMD_FLG_NOMGS;
+ clear++;
+ } else if (strncmp(s1, "noscrub", 7) == 0) {
+ lmd->lmd_flags |= LMD_FLG_NOSCRUB;
+ clear++;
+ } else if (strncmp(s1, PARAM_MGSNODE,
+ sizeof(PARAM_MGSNODE) - 1) == 0) {
+ s2 = s1 + sizeof(PARAM_MGSNODE) - 1;
+ /* Assume the next mount opt is the first
+ invalid nid we get to. */
+ rc = lmd_parse_mgs(lmd, &s2);
+ if (rc)
+ goto invalid;
+ clear++;
+ } else if (strncmp(s1, "writeconf", 9) == 0) {
+ lmd->lmd_flags |= LMD_FLG_WRITECONF;
+ clear++;
+ } else if (strncmp(s1, "update", 6) == 0) {
+ lmd->lmd_flags |= LMD_FLG_UPDATE;
+ clear++;
+ } else if (strncmp(s1, "virgin", 6) == 0) {
+ lmd->lmd_flags |= LMD_FLG_VIRGIN;
+ clear++;
+ } else if (strncmp(s1, "noprimnode", 10) == 0) {
+ lmd->lmd_flags |= LMD_FLG_NO_PRIMNODE;
+ clear++;
+ } else if (strncmp(s1, "mgssec=", 7) == 0) {
+ rc = lmd_parse_mgssec(lmd, s1 + 7);
+ if (rc)
+ goto invalid;
+ clear++;
+ /* ost exclusion list */
+ } else if (strncmp(s1, "exclude=", 8) == 0) {
+ rc = lmd_make_exclusion(lmd, s1 + 7);
+ if (rc)
+ goto invalid;
+ clear++;
+ } else if (strncmp(s1, "mgs", 3) == 0) {
+ /* We are an MGS */
+ lmd->lmd_flags |= LMD_FLG_MGS;
+ clear++;
+ } else if (strncmp(s1, "svname=", 7) == 0) {
+ rc = lmd_parse_string(&lmd->lmd_profile, s1 + 7);
+ if (rc)
+ goto invalid;
+ clear++;
+ } else if (strncmp(s1, "param=", 6) == 0) {
+ int length;
+ char *tail = strchr(s1 + 6, ',');
+ if (tail == NULL)
+ length = strlen(s1);
+ else
+ length = tail - s1;
+ length -= 6;
+ strncat(lmd->lmd_params, s1 + 6, length);
+ strcat(lmd->lmd_params, " ");
+ clear++;
+ } else if (strncmp(s1, "osd=", 4) == 0) {
+ rc = lmd_parse_string(&lmd->lmd_osd_type, s1 + 4);
+ if (rc)
+ goto invalid;
+ clear++;
+ }
+ /* Linux 2.4 doesn't pass the device, so we stuck it at the
+ end of the options. */
+ else if (strncmp(s1, "device=", 7) == 0) {
+ devname = s1 + 7;
+ /* terminate options right before device. device
+ must be the last one. */
+ *s1 = '\0';
+ break;
+ }
+
+ /* Find next opt */
+ s2 = strchr(s1, ',');
+ if (s2 == NULL) {
+ if (clear)
+ *s1 = '\0';
+ break;
+ }
+ s2++;
+ if (clear)
+ memmove(s1, s2, strlen(s2) + 1);
+ else
+ s1 = s2;
+ }
+
+ if (!devname) {
+ LCONSOLE_ERROR_MSG(0x164, "Can't find the device name "
+ "(need mount option 'device=...')\n");
+ goto invalid;
+ }
+
+ s1 = strstr(devname, ":/");
+ if (s1) {
+ ++s1;
+ lmd->lmd_flags |= LMD_FLG_CLIENT;
+ /* Remove leading /s from fsname */
+ while (*++s1 == '/') ;
+ /* Freed in lustre_free_lsi */
+ OBD_ALLOC(lmd->lmd_profile, strlen(s1) + 8);
+ if (!lmd->lmd_profile)
+ return -ENOMEM;
+ sprintf(lmd->lmd_profile, "%s-client", s1);
+ }
+
+ /* Freed in lustre_free_lsi */
+ OBD_ALLOC(lmd->lmd_dev, strlen(devname) + 1);
+ if (!lmd->lmd_dev)
+ return -ENOMEM;
+ strcpy(lmd->lmd_dev, devname);
+
+ /* Save mount options */
+ s1 = options + strlen(options) - 1;
+ while (s1 >= options && (*s1 == ',' || *s1 == ' '))
+ *s1-- = 0;
+ if (*options != 0) {
+ /* Freed in lustre_free_lsi */
+ OBD_ALLOC(lmd->lmd_opts, strlen(options) + 1);
+ if (!lmd->lmd_opts)
+ return -ENOMEM;
+ strcpy(lmd->lmd_opts, options);
+ }
+
+ lmd_print(lmd);
+ lmd->lmd_magic = LMD_MAGIC;
+
+ return rc;
+
+invalid:
+ CERROR("Bad mount options %s\n", options);
+ return -EINVAL;
+}
+
+struct lustre_mount_data2 {
+ void *lmd2_data;
+ struct vfsmount *lmd2_mnt;
+};
+
+/** This is the entry point for the mount call into Lustre.
+ * This is called when a server or client is mounted,
+ * and this is where we start setting things up.
+ * @param data Mount options (e.g. -o flock,abort_recov)
+ */
+int lustre_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct lustre_mount_data *lmd;
+ struct lustre_mount_data2 *lmd2 = data;
+ struct lustre_sb_info *lsi;
+ int rc;
+
+ CDEBUG(D_MOUNT|D_VFSTRACE, "VFS Op: sb %p\n", sb);
+
+ lsi = lustre_init_lsi(sb);
+ if (!lsi)
+ return -ENOMEM;
+ lmd = lsi->lsi_lmd;
+
+ /*
+ * Disable lockdep during mount, because mount locking patterns are
+ * `special'.
+ */
+ lockdep_off();
+
+ /*
+ * LU-639: the obd cleanup of last mount may not finish yet, wait here.
+ */
+ obd_zombie_barrier();
+
+ /* Figure out the lmd from the mount options */
+ if (lmd_parse((char *)(lmd2->lmd2_data), lmd)) {
+ lustre_put_lsi(sb);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ if (lmd_is_client(lmd)) {
+ CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile);
+ if (!client_fill_super) {
+ LCONSOLE_ERROR_MSG(0x165, "Nothing registered for "
+ "client mount! Is the 'lustre' "
+ "module loaded?\n");
+ lustre_put_lsi(sb);
+ rc = -ENODEV;
+ } else {
+ rc = lustre_start_mgc(sb);
+ if (rc) {
+ lustre_put_lsi(sb);
+ GOTO(out, rc);
+ }
+ /* Connect and start */
+ /* (should always be ll_fill_super) */
+ rc = (*client_fill_super)(sb, lmd2->lmd2_mnt);
+ /* c_f_s will call lustre_common_put_super on failure */
+ }
+ } else {
+ CERROR("This is client-side-only module, "
+ "cannot handle server mount.\n");
+ rc = -EINVAL;
+ }
+
+ /* If error happens in fill_super() call, @lsi will be killed there.
+ * This is why we do not put it here. */
+ GOTO(out, rc);
+out:
+ if (rc) {
+ CERROR("Unable to mount %s (%d)\n",
+ s2lsi(sb) ? lmd->lmd_dev : "", rc);
+ } else {
+ CDEBUG(D_SUPER, "Mount %s complete\n",
+ lmd->lmd_dev);
+ }
+ lockdep_on();
+ return rc;
+}
+
+
+/* We can't call ll_fill_super by name because it lives in a module that
+ must be loaded after this one. */
+void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
+ struct vfsmount *mnt))
+{
+ client_fill_super = cfs;
+}
+EXPORT_SYMBOL(lustre_register_client_fill_super);
+
+void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb))
+{
+ kill_super_cb = cfs;
+}
+EXPORT_SYMBOL(lustre_register_kill_super_cb);
+
+/***************** FS registration ******************/
+struct dentry *lustre_mount(struct file_system_type *fs_type, int flags,
+ const char *devname, void *data)
+{
+ struct lustre_mount_data2 lmd2 = { data, NULL };
+
+ return mount_nodev(fs_type, flags, &lmd2, lustre_fill_super);
+}
+
+void lustre_kill_super(struct super_block *sb)
+{
+ struct lustre_sb_info *lsi = s2lsi(sb);
+
+ if (kill_super_cb && lsi && !IS_SERVER(lsi))
+ (*kill_super_cb)(sb);
+
+ kill_anon_super(sb);
+}
+
+/** Register the "lustre" fs type
+ */
+struct file_system_type lustre_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "lustre",
+ .mount = lustre_mount,
+ .kill_sb = lustre_kill_super,
+ .fs_flags = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV |
+ FS_HAS_FIEMAP | FS_RENAME_DOES_D_MOVE,
+};
+
+int lustre_register_fs(void)
+{
+ return register_filesystem(&lustre_fs_type);
+}
+
+int lustre_unregister_fs(void)
+{
+ return unregister_filesystem(&lustre_fs_type);
+}
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
new file mode 100644
index 0000000..7099764
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -0,0 +1,362 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/obdo.c
+ *
+ * Object Devices Class Driver
+ * These are the only exported functions, they provide some generic
+ * infrastructure for managing object devices
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <obd_class.h>
+#include <lustre/lustre_idl.h>
+
+void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent)
+{
+ dst->o_parent_oid = fid_oid(parent);
+ dst->o_parent_seq = fid_seq(parent);
+ dst->o_parent_ver = fid_ver(parent);
+ dst->o_valid |= OBD_MD_FLGENER | OBD_MD_FLFID;
+}
+EXPORT_SYMBOL(obdo_set_parent_fid);
+
+/* WARNING: the file systems must take care not to tinker with
+ attributes they don't manage (such as blocks). */
+void obdo_from_inode(struct obdo *dst, struct inode *src, obd_flag valid)
+{
+ obd_flag newvalid = 0;
+
+ if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
+ CDEBUG(D_INODE, "valid %x, new time %lu/%lu\n",
+ valid, LTIME_S(src->i_mtime),
+ LTIME_S(src->i_ctime));
+
+ if (valid & OBD_MD_FLATIME) {
+ dst->o_atime = LTIME_S(src->i_atime);
+ newvalid |= OBD_MD_FLATIME;
+ }
+ if (valid & OBD_MD_FLMTIME) {
+ dst->o_mtime = LTIME_S(src->i_mtime);
+ newvalid |= OBD_MD_FLMTIME;
+ }
+ if (valid & OBD_MD_FLCTIME) {
+ dst->o_ctime = LTIME_S(src->i_ctime);
+ newvalid |= OBD_MD_FLCTIME;
+ }
+ if (valid & OBD_MD_FLSIZE) {
+ dst->o_size = i_size_read(src);
+ newvalid |= OBD_MD_FLSIZE;
+ }
+ if (valid & OBD_MD_FLBLOCKS) { /* allocation of space (x512 bytes) */
+ dst->o_blocks = src->i_blocks;
+ newvalid |= OBD_MD_FLBLOCKS;
+ }
+ if (valid & OBD_MD_FLBLKSZ) { /* optimal block size */
+ dst->o_blksize = ll_inode_blksize(src);
+ newvalid |= OBD_MD_FLBLKSZ;
+ }
+ if (valid & OBD_MD_FLTYPE) {
+ dst->o_mode = (dst->o_mode & S_IALLUGO) |
+ (src->i_mode & S_IFMT);
+ newvalid |= OBD_MD_FLTYPE;
+ }
+ if (valid & OBD_MD_FLMODE) {
+ dst->o_mode = (dst->o_mode & S_IFMT) |
+ (src->i_mode & S_IALLUGO);
+ newvalid |= OBD_MD_FLMODE;
+ }
+ if (valid & OBD_MD_FLUID) {
+ dst->o_uid = from_kuid(&init_user_ns, src->i_uid);
+ newvalid |= OBD_MD_FLUID;
+ }
+ if (valid & OBD_MD_FLGID) {
+ dst->o_gid = from_kgid(&init_user_ns, src->i_gid);
+ newvalid |= OBD_MD_FLGID;
+ }
+ if (valid & OBD_MD_FLFLAGS) {
+ dst->o_flags = ll_inode_flags(src);
+ newvalid |= OBD_MD_FLFLAGS;
+ }
+ dst->o_valid |= newvalid;
+}
+EXPORT_SYMBOL(obdo_from_inode);
+
+void obdo_cpy_md(struct obdo *dst, struct obdo *src, obd_flag valid)
+{
+ CDEBUG(D_INODE, "src obdo "DOSTID" valid "LPX64", dst obdo "DOSTID"\n",
+ POSTID(&src->o_oi), src->o_valid, POSTID(&dst->o_oi));
+ if (valid & OBD_MD_FLATIME)
+ dst->o_atime = src->o_atime;
+ if (valid & OBD_MD_FLMTIME)
+ dst->o_mtime = src->o_mtime;
+ if (valid & OBD_MD_FLCTIME)
+ dst->o_ctime = src->o_ctime;
+ if (valid & OBD_MD_FLSIZE)
+ dst->o_size = src->o_size;
+ if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
+ dst->o_blocks = src->o_blocks;
+ if (valid & OBD_MD_FLBLKSZ)
+ dst->o_blksize = src->o_blksize;
+ if (valid & OBD_MD_FLTYPE)
+ dst->o_mode = (dst->o_mode & ~S_IFMT) | (src->o_mode & S_IFMT);
+ if (valid & OBD_MD_FLMODE)
+ dst->o_mode = (dst->o_mode & S_IFMT) | (src->o_mode & ~S_IFMT);
+ if (valid & OBD_MD_FLUID)
+ dst->o_uid = src->o_uid;
+ if (valid & OBD_MD_FLGID)
+ dst->o_gid = src->o_gid;
+ if (valid & OBD_MD_FLFLAGS)
+ dst->o_flags = src->o_flags;
+ if (valid & OBD_MD_FLFID) {
+ dst->o_parent_seq = src->o_parent_seq;
+ dst->o_parent_ver = src->o_parent_ver;
+ }
+ if (valid & OBD_MD_FLGENER)
+ dst->o_parent_oid = src->o_parent_oid;
+ if (valid & OBD_MD_FLHANDLE)
+ dst->o_handle = src->o_handle;
+ if (valid & OBD_MD_FLCOOKIE)
+ dst->o_lcookie = src->o_lcookie;
+
+ dst->o_valid |= valid;
+}
+EXPORT_SYMBOL(obdo_cpy_md);
+
+/* returns FALSE if comparison (by flags) is same, TRUE if changed */
+int obdo_cmp_md(struct obdo *dst, struct obdo *src, obd_flag compare)
+{
+ int res = 0;
+
+ if ( compare & OBD_MD_FLATIME )
+ res = (res || (dst->o_atime != src->o_atime));
+ if ( compare & OBD_MD_FLMTIME )
+ res = (res || (dst->o_mtime != src->o_mtime));
+ if ( compare & OBD_MD_FLCTIME )
+ res = (res || (dst->o_ctime != src->o_ctime));
+ if ( compare & OBD_MD_FLSIZE )
+ res = (res || (dst->o_size != src->o_size));
+ if ( compare & OBD_MD_FLBLOCKS ) /* allocation of space */
+ res = (res || (dst->o_blocks != src->o_blocks));
+ if ( compare & OBD_MD_FLBLKSZ )
+ res = (res || (dst->o_blksize != src->o_blksize));
+ if ( compare & OBD_MD_FLTYPE )
+ res = (res || (((dst->o_mode ^ src->o_mode) & S_IFMT) != 0));
+ if ( compare & OBD_MD_FLMODE )
+ res = (res || (((dst->o_mode ^ src->o_mode) & ~S_IFMT) != 0));
+ if ( compare & OBD_MD_FLUID )
+ res = (res || (dst->o_uid != src->o_uid));
+ if ( compare & OBD_MD_FLGID )
+ res = (res || (dst->o_gid != src->o_gid));
+ if ( compare & OBD_MD_FLFLAGS )
+ res = (res || (dst->o_flags != src->o_flags));
+ if ( compare & OBD_MD_FLNLINK )
+ res = (res || (dst->o_nlink != src->o_nlink));
+ if ( compare & OBD_MD_FLFID ) {
+ res = (res || (dst->o_parent_seq != src->o_parent_seq));
+ res = (res || (dst->o_parent_ver != src->o_parent_ver));
+ }
+ if ( compare & OBD_MD_FLGENER )
+ res = (res || (dst->o_parent_oid != src->o_parent_oid));
+ /* XXX Don't know if thses should be included here - wasn't previously
+ if ( compare & OBD_MD_FLINLINE )
+ res = (res || memcmp(dst->o_inline, src->o_inline));
+ */
+ return res;
+}
+EXPORT_SYMBOL(obdo_cmp_md);
+
+void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj)
+{
+ ioobj->ioo_oid = oa->o_oi;
+ if (unlikely(!(oa->o_valid & OBD_MD_FLGROUP)))
+ ostid_set_seq_mdt0(&ioobj->ioo_oid);
+
+ /* Since 2.4 this does not contain o_mode in the low 16 bits.
+ * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs */
+ ioobj->ioo_max_brw = 0;
+}
+EXPORT_SYMBOL(obdo_to_ioobj);
+
+void obdo_from_iattr(struct obdo *oa, struct iattr *attr, unsigned int ia_valid)
+{
+ if (ia_valid & ATTR_ATIME) {
+ oa->o_atime = LTIME_S(attr->ia_atime);
+ oa->o_valid |= OBD_MD_FLATIME;
+ }
+ if (ia_valid & ATTR_MTIME) {
+ oa->o_mtime = LTIME_S(attr->ia_mtime);
+ oa->o_valid |= OBD_MD_FLMTIME;
+ }
+ if (ia_valid & ATTR_CTIME) {
+ oa->o_ctime = LTIME_S(attr->ia_ctime);
+ oa->o_valid |= OBD_MD_FLCTIME;
+ }
+ if (ia_valid & ATTR_SIZE) {
+ oa->o_size = attr->ia_size;
+ oa->o_valid |= OBD_MD_FLSIZE;
+ }
+ if (ia_valid & ATTR_MODE) {
+ oa->o_mode = attr->ia_mode;
+ oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE;
+ if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
+ !cfs_capable(CFS_CAP_FSETID))
+ oa->o_mode &= ~S_ISGID;
+ }
+ if (ia_valid & ATTR_UID) {
+ oa->o_uid = from_kuid(&init_user_ns, attr->ia_uid);
+ oa->o_valid |= OBD_MD_FLUID;
+ }
+ if (ia_valid & ATTR_GID) {
+ oa->o_gid = from_kgid(&init_user_ns, attr->ia_gid);
+ oa->o_valid |= OBD_MD_FLGID;
+ }
+}
+EXPORT_SYMBOL(obdo_from_iattr);
+
+void iattr_from_obdo(struct iattr *attr, struct obdo *oa, obd_flag valid)
+{
+ valid &= oa->o_valid;
+
+ if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
+ CDEBUG(D_INODE, "valid "LPX64", new time "LPU64"/"LPU64"\n",
+ oa->o_valid, oa->o_mtime, oa->o_ctime);
+
+ attr->ia_valid = 0;
+ if (valid & OBD_MD_FLATIME) {
+ LTIME_S(attr->ia_atime) = oa->o_atime;
+ attr->ia_valid |= ATTR_ATIME;
+ }
+ if (valid & OBD_MD_FLMTIME) {
+ LTIME_S(attr->ia_mtime) = oa->o_mtime;
+ attr->ia_valid |= ATTR_MTIME;
+ }
+ if (valid & OBD_MD_FLCTIME) {
+ LTIME_S(attr->ia_ctime) = oa->o_ctime;
+ attr->ia_valid |= ATTR_CTIME;
+ }
+ if (valid & OBD_MD_FLSIZE) {
+ attr->ia_size = oa->o_size;
+ attr->ia_valid |= ATTR_SIZE;
+ }
+#if 0 /* you shouldn't be able to change a file's type with setattr */
+ if (valid & OBD_MD_FLTYPE) {
+ attr->ia_mode = (attr->ia_mode & ~S_IFMT)|(oa->o_mode & S_IFMT);
+ attr->ia_valid |= ATTR_MODE;
+ }
+#endif
+ if (valid & OBD_MD_FLMODE) {
+ attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
+ attr->ia_valid |= ATTR_MODE;
+ if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
+ !cfs_capable(CFS_CAP_FSETID))
+ attr->ia_mode &= ~S_ISGID;
+ }
+ if (valid & OBD_MD_FLUID) {
+ attr->ia_uid = make_kuid(&init_user_ns, oa->o_uid);
+ attr->ia_valid |= ATTR_UID;
+ }
+ if (valid & OBD_MD_FLGID) {
+ attr->ia_gid = make_kgid(&init_user_ns, oa->o_gid);
+ attr->ia_valid |= ATTR_GID;
+ }
+}
+EXPORT_SYMBOL(iattr_from_obdo);
+
+void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, obd_flag valid)
+{
+ iattr_from_obdo(&op_data->op_attr, oa, valid);
+ if (valid & OBD_MD_FLBLOCKS) {
+ op_data->op_attr_blocks = oa->o_blocks;
+ op_data->op_attr.ia_valid |= ATTR_BLOCKS;
+ }
+ if (valid & OBD_MD_FLFLAGS) {
+ ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
+ oa->o_flags;
+ op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
+ }
+}
+EXPORT_SYMBOL(md_from_obdo);
+
+void obdo_from_md(struct obdo *oa, struct md_op_data *op_data,
+ unsigned int valid)
+{
+ obdo_from_iattr(oa, &op_data->op_attr, valid);
+ if (valid & ATTR_BLOCKS) {
+ oa->o_blocks = op_data->op_attr_blocks;
+ oa->o_valid |= OBD_MD_FLBLOCKS;
+ }
+ if (valid & ATTR_ATTR_FLAG) {
+ oa->o_flags =
+ ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
+ oa->o_valid |= OBD_MD_FLFLAGS;
+ }
+}
+EXPORT_SYMBOL(obdo_from_md);
+
+void obdo_cpu_to_le(struct obdo *dobdo, struct obdo *sobdo)
+{
+ dobdo->o_size = cpu_to_le64(sobdo->o_size);
+ dobdo->o_mtime = cpu_to_le64(sobdo->o_mtime);
+ dobdo->o_atime = cpu_to_le64(sobdo->o_atime);
+ dobdo->o_ctime = cpu_to_le64(sobdo->o_ctime);
+ dobdo->o_blocks = cpu_to_le64(sobdo->o_blocks);
+ dobdo->o_mode = cpu_to_le32(sobdo->o_mode);
+ dobdo->o_uid = cpu_to_le32(sobdo->o_uid);
+ dobdo->o_gid = cpu_to_le32(sobdo->o_gid);
+ dobdo->o_flags = cpu_to_le32(sobdo->o_flags);
+ dobdo->o_nlink = cpu_to_le32(sobdo->o_nlink);
+ dobdo->o_blksize = cpu_to_le32(sobdo->o_blksize);
+ dobdo->o_valid = cpu_to_le64(sobdo->o_valid);
+}
+EXPORT_SYMBOL(obdo_cpu_to_le);
+
+void obdo_le_to_cpu(struct obdo *dobdo, struct obdo *sobdo)
+{
+ dobdo->o_size = le64_to_cpu(sobdo->o_size);
+ dobdo->o_mtime = le64_to_cpu(sobdo->o_mtime);
+ dobdo->o_atime = le64_to_cpu(sobdo->o_atime);
+ dobdo->o_ctime = le64_to_cpu(sobdo->o_ctime);
+ dobdo->o_blocks = le64_to_cpu(sobdo->o_blocks);
+ dobdo->o_mode = le32_to_cpu(sobdo->o_mode);
+ dobdo->o_uid = le32_to_cpu(sobdo->o_uid);
+ dobdo->o_gid = le32_to_cpu(sobdo->o_gid);
+ dobdo->o_flags = le32_to_cpu(sobdo->o_flags);
+ dobdo->o_nlink = le32_to_cpu(sobdo->o_nlink);
+ dobdo->o_blksize = le32_to_cpu(sobdo->o_blksize);
+ dobdo->o_valid = le64_to_cpu(sobdo->o_valid);
+}
+EXPORT_SYMBOL(obdo_le_to_cpu);
diff --git a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
new file mode 100644
index 0000000..c3b7a78
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
@@ -0,0 +1,75 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/statfs_pack.c
+ *
+ * (Un)packing of OST/MDS requests
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+
+#include <lustre_export.h>
+#include <lustre_net.h>
+#include <obd_support.h>
+#include <obd_class.h>
+
+void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs)
+{
+ memset(osfs, 0, sizeof(*osfs));
+ osfs->os_type = sfs->f_type;
+ osfs->os_blocks = sfs->f_blocks;
+ osfs->os_bfree = sfs->f_bfree;
+ osfs->os_bavail = sfs->f_bavail;
+ osfs->os_files = sfs->f_files;
+ osfs->os_ffree = sfs->f_ffree;
+ osfs->os_bsize = sfs->f_bsize;
+ osfs->os_namelen = sfs->f_namelen;
+}
+EXPORT_SYMBOL(statfs_pack);
+
+void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs)
+{
+ memset(sfs, 0, sizeof(*sfs));
+ sfs->f_type = osfs->os_type;
+ sfs->f_blocks = osfs->os_blocks;
+ sfs->f_bfree = osfs->os_bfree;
+ sfs->f_bavail = osfs->os_bavail;
+ sfs->f_files = osfs->os_files;
+ sfs->f_ffree = osfs->os_ffree;
+ sfs->f_bsize = osfs->os_bsize;
+ sfs->f_namelen = osfs->os_namelen;
+}
+EXPORT_SYMBOL(statfs_unpack);
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
new file mode 100644
index 0000000..af5f27f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdclass/uuid.c
@@ -0,0 +1,82 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/uuid.c
+ *
+ * Public include file for the UUID library
+ */
+
+#define DEBUG_SUBSYSTEM S_CLASS
+
+# include <linux/libcfs/libcfs.h>
+
+#include <obd_support.h>
+#include <obd_class.h>
+
+
+static inline __u32 consume(int nob, __u8 **ptr)
+{
+ __u32 value;
+
+ LASSERT(nob <= sizeof value);
+
+ for (value = 0; nob > 0; --nob)
+ value = (value << 8) | *((*ptr)++);
+ return value;
+}
+
+#define CONSUME(val, ptr) (val) = consume(sizeof(val), (ptr))
+
+static void uuid_unpack(class_uuid_t in, __u16 *uu, int nr)
+{
+ __u8 *ptr = in;
+
+ LASSERT(nr * sizeof *uu == sizeof(class_uuid_t));
+
+ while (nr-- > 0)
+ CONSUME(uu[nr], &ptr);
+}
+
+void class_uuid_unparse(class_uuid_t uu, struct obd_uuid *out)
+{
+ /* uu as an array of __u16's */
+ __u16 uuid[sizeof(class_uuid_t) / sizeof(__u16)];
+
+ CLASSERT(ARRAY_SIZE(uuid) == 8);
+
+ uuid_unpack(uu, uuid, ARRAY_SIZE(uuid));
+ sprintf(out->uuid, "%04x%04x-%04x-%04x-%04x-%04x%04x%04x",
+ uuid[0], uuid[1], uuid[2], uuid[3],
+ uuid[4], uuid[5], uuid[6], uuid[7]);
+}
+EXPORT_SYMBOL(class_uuid_unparse);
diff --git a/drivers/staging/lustre/lustre/obdecho/Makefile b/drivers/staging/lustre/lustre/obdecho/Makefile
new file mode 100644
index 0000000..4c48e24
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdecho/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_LUSTRE_FS) += obdecho.o
+obdecho-y := echo_client.o lproc_echo.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/obdecho/echo.c b/drivers/staging/lustre/lustre/obdecho/echo.c
new file mode 100644
index 0000000..debb9ce
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdecho/echo.c
@@ -0,0 +1,670 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdecho/echo.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_ECHO
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_debug.h>
+#include <lustre_dlm.h>
+#include <lprocfs_status.h>
+
+#include "echo_internal.h"
+
+/* The echo objid needs to be below 2^32, because regular FID numbers are
+ * limited to 2^32 objects in f_oid for the FID_SEQ_ECHO range. b=23335 */
+#define ECHO_INIT_OID 0x10000000ULL
+#define ECHO_HANDLE_MAGIC 0xabcd0123fedc9876ULL
+
+#define ECHO_PERSISTENT_PAGES (ECHO_PERSISTENT_SIZE >> PAGE_CACHE_SHIFT)
+static struct page *echo_persistent_pages[ECHO_PERSISTENT_PAGES];
+
+enum {
+ LPROC_ECHO_READ_BYTES = 1,
+ LPROC_ECHO_WRITE_BYTES = 2,
+ LPROC_ECHO_LAST = LPROC_ECHO_WRITE_BYTES +1
+};
+
+static int echo_connect(const struct lu_env *env,
+ struct obd_export **exp, struct obd_device *obd,
+ struct obd_uuid *cluuid, struct obd_connect_data *data,
+ void *localdata)
+{
+ struct lustre_handle conn = { 0 };
+ int rc;
+
+ data->ocd_connect_flags &= ECHO_CONNECT_SUPPORTED;
+ rc = class_connect(&conn, obd, cluuid);
+ if (rc) {
+ CERROR("can't connect %d\n", rc);
+ return rc;
+ }
+ *exp = class_conn2export(&conn);
+
+ return 0;
+}
+
+static int echo_disconnect(struct obd_export *exp)
+{
+ LASSERT (exp != NULL);
+
+ return server_disconnect_export(exp);
+}
+
+static int echo_init_export(struct obd_export *exp)
+{
+ return ldlm_init_export(exp);
+}
+
+static int echo_destroy_export(struct obd_export *exp)
+{
+ target_destroy_export(exp);
+ ldlm_destroy_export(exp);
+
+ return 0;
+}
+
+ static __u64 echo_next_id(struct obd_device *obddev)
+{
+ obd_id id;
+
+ spin_lock(&obddev->u.echo.eo_lock);
+ id = ++obddev->u.echo.eo_lastino;
+ spin_unlock(&obddev->u.echo.eo_lock);
+
+ return id;
+}
+
+static int echo_create(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md **ea,
+ struct obd_trans_info *oti)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+
+ if (!obd) {
+ CERROR("invalid client cookie "LPX64"\n",
+ exp->exp_handle.h_cookie);
+ return -EINVAL;
+ }
+
+ if (!(oa->o_mode && S_IFMT)) {
+ CERROR("echo obd: no type!\n");
+ return -ENOENT;
+ }
+
+ if (!(oa->o_valid & OBD_MD_FLTYPE)) {
+ CERROR("invalid o_valid "LPX64"\n", oa->o_valid);
+ return -EINVAL;
+ }
+
+ ostid_set_seq_echo(&oa->o_oi);
+ ostid_set_id(&oa->o_oi, echo_next_id(obd));
+ oa->o_valid = OBD_MD_FLID;
+
+ return 0;
+}
+
+static int echo_destroy(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md *ea,
+ struct obd_trans_info *oti, struct obd_export *md_exp,
+ void *capa)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+
+ if (!obd) {
+ CERROR("invalid client cookie "LPX64"\n",
+ exp->exp_handle.h_cookie);
+ return -EINVAL;
+ }
+
+ if (!(oa->o_valid & OBD_MD_FLID)) {
+ CERROR("obdo missing FLID valid flag: "LPX64"\n", oa->o_valid);
+ return -EINVAL;
+ }
+
+ if (ostid_id(&oa->o_oi) > obd->u.echo.eo_lastino ||
+ ostid_id(&oa->o_oi) < ECHO_INIT_OID) {
+ CERROR("bad destroy objid: "DOSTID"\n", POSTID(&oa->o_oi));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int echo_getattr(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ obd_id id = ostid_id(&oinfo->oi_oa->o_oi);
+
+ if (!obd) {
+ CERROR("invalid client cookie "LPX64"\n",
+ exp->exp_handle.h_cookie);
+ return -EINVAL;
+ }
+
+ if (!(oinfo->oi_oa->o_valid & OBD_MD_FLID)) {
+ CERROR("obdo missing FLID valid flag: "LPX64"\n",
+ oinfo->oi_oa->o_valid);
+ return -EINVAL;
+ }
+
+ obdo_cpy_md(oinfo->oi_oa, &obd->u.echo.eo_oa, oinfo->oi_oa->o_valid);
+ ostid_set_seq_echo(&oinfo->oi_oa->o_oi);
+ ostid_set_id(&oinfo->oi_oa->o_oi, id);
+
+ return 0;
+}
+
+static int echo_setattr(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+
+ if (!obd) {
+ CERROR("invalid client cookie "LPX64"\n",
+ exp->exp_handle.h_cookie);
+ return -EINVAL;
+ }
+
+ if (!(oinfo->oi_oa->o_valid & OBD_MD_FLID)) {
+ CERROR("obdo missing FLID valid flag: "LPX64"\n",
+ oinfo->oi_oa->o_valid);
+ return -EINVAL;
+ }
+
+ memcpy(&obd->u.echo.eo_oa, oinfo->oi_oa, sizeof(*oinfo->oi_oa));
+
+ if (ostid_id(&oinfo->oi_oa->o_oi) & 4) {
+ /* Save lock to force ACKed reply */
+ ldlm_lock_addref (&obd->u.echo.eo_nl_lock, LCK_NL);
+ oti->oti_ack_locks[0].mode = LCK_NL;
+ oti->oti_ack_locks[0].lock = obd->u.echo.eo_nl_lock;
+ }
+
+ return 0;
+}
+
+static void
+echo_page_debug_setup(struct page *page, int rw, obd_id id,
+ __u64 offset, int len)
+{
+ int page_offset = offset & ~CFS_PAGE_MASK;
+ char *addr = ((char *)kmap(page)) + page_offset;
+
+ if (len % OBD_ECHO_BLOCK_SIZE != 0)
+ CERROR("Unexpected block size %d\n", len);
+
+ while (len > 0) {
+ if (rw & OBD_BRW_READ)
+ block_debug_setup(addr, OBD_ECHO_BLOCK_SIZE,
+ offset, id);
+ else
+ block_debug_setup(addr, OBD_ECHO_BLOCK_SIZE,
+ 0xecc0ecc0ecc0ecc0ULL,
+ 0xecc0ecc0ecc0ecc0ULL);
+
+ addr += OBD_ECHO_BLOCK_SIZE;
+ offset += OBD_ECHO_BLOCK_SIZE;
+ len -= OBD_ECHO_BLOCK_SIZE;
+ }
+
+ kunmap(page);
+}
+
+static int
+echo_page_debug_check(struct page *page, obd_id id,
+ __u64 offset, int len)
+{
+ int page_offset = offset & ~CFS_PAGE_MASK;
+ char *addr = ((char *)kmap(page)) + page_offset;
+ int rc = 0;
+ int rc2;
+
+ if (len % OBD_ECHO_BLOCK_SIZE != 0)
+ CERROR("Unexpected block size %d\n", len);
+
+ while (len > 0) {
+ rc2 = block_debug_check("echo", addr, OBD_ECHO_BLOCK_SIZE,
+ offset, id);
+
+ if (rc2 != 0 && rc == 0)
+ rc = rc2;
+
+ addr += OBD_ECHO_BLOCK_SIZE;
+ offset += OBD_ECHO_BLOCK_SIZE;
+ len -= OBD_ECHO_BLOCK_SIZE;
+ }
+
+ kunmap(page);
+
+ return (rc);
+}
+
+/* This allows us to verify that desc_private is passed unmolested */
+#define DESC_PRIV 0x10293847
+
+static int echo_map_nb_to_lb(struct obdo *oa, struct obd_ioobj *obj,
+ struct niobuf_remote *nb, int *pages,
+ struct niobuf_local *lb, int cmd, int *left)
+{
+ int gfp_mask = (ostid_id(&obj->ioo_oid) & 1) ?
+ GFP_HIGHUSER : GFP_IOFS;
+ int ispersistent = ostid_id(&obj->ioo_oid) == ECHO_PERSISTENT_OBJID;
+ int debug_setup = (!ispersistent &&
+ (oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
+ (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
+ struct niobuf_local *res = lb;
+ obd_off offset = nb->offset;
+ int len = nb->len;
+
+ while (len > 0) {
+ int plen = PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1));
+ if (len < plen)
+ plen = len;
+
+ /* check for local buf overflow */
+ if (*left == 0)
+ return -EINVAL;
+
+ res->lnb_file_offset = offset;
+ res->len = plen;
+ LASSERT((res->lnb_file_offset & ~CFS_PAGE_MASK) + res->len <=
+ PAGE_CACHE_SIZE);
+
+ if (ispersistent &&
+ ((res->lnb_file_offset >> PAGE_CACHE_SHIFT) <
+ ECHO_PERSISTENT_PAGES)) {
+ res->page =
+ echo_persistent_pages[res->lnb_file_offset >>
+ PAGE_CACHE_SHIFT];
+ /* Take extra ref so __free_pages() can be called OK */
+ get_page (res->page);
+ } else {
+ OBD_PAGE_ALLOC(res->page, gfp_mask);
+ if (res->page == NULL) {
+ CERROR("can't get page for id " DOSTID"\n",
+ POSTID(&obj->ioo_oid));
+ return -ENOMEM;
+ }
+ }
+
+ CDEBUG(D_PAGE, "$$$$ get page %p @ "LPU64" for %d\n",
+ res->page, res->lnb_file_offset, res->len);
+
+ if (cmd & OBD_BRW_READ)
+ res->rc = res->len;
+
+ if (debug_setup)
+ echo_page_debug_setup(res->page, cmd,
+ ostid_id(&obj->ioo_oid),
+ res->lnb_file_offset, res->len);
+
+ offset += plen;
+ len -= plen;
+ res++;
+
+ (*left)--;
+ (*pages)++;
+ }
+
+ return 0;
+}
+
+static int echo_finalize_lb(struct obdo *oa, struct obd_ioobj *obj,
+ struct niobuf_remote *rb, int *pgs,
+ struct niobuf_local *lb, int verify)
+{
+ struct niobuf_local *res = lb;
+ obd_off start = rb->offset >> PAGE_CACHE_SHIFT;
+ obd_off end = (rb->offset + rb->len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int count = (int)(end - start);
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < count; i++, (*pgs) ++, res++) {
+ struct page *page = res->page;
+ void *addr;
+
+ if (page == NULL) {
+ CERROR("null page objid "LPU64":%p, buf %d/%d\n",
+ ostid_id(&obj->ioo_oid), page, i,
+ obj->ioo_bufcnt);
+ return -EFAULT;
+ }
+
+ addr = kmap(page);
+
+ CDEBUG(D_PAGE, "$$$$ use page %p, addr %p@"LPU64"\n",
+ res->page, addr, res->lnb_file_offset);
+
+ if (verify) {
+ int vrc = echo_page_debug_check(page,
+ ostid_id(&obj->ioo_oid),
+ res->lnb_file_offset,
+ res->len);
+ /* check all the pages always */
+ if (vrc != 0 && rc == 0)
+ rc = vrc;
+ }
+
+ kunmap(page);
+ /* NB see comment above regarding persistent pages */
+ OBD_PAGE_FREE(page);
+ }
+
+ return rc;
+}
+
+static int echo_preprw(const struct lu_env *env, int cmd,
+ struct obd_export *export, struct obdo *oa,
+ int objcount, struct obd_ioobj *obj,
+ struct niobuf_remote *nb, int *pages,
+ struct niobuf_local *res, struct obd_trans_info *oti,
+ struct lustre_capa *unused)
+{
+ struct obd_device *obd;
+ int tot_bytes = 0;
+ int rc = 0;
+ int i, left;
+
+ obd = export->exp_obd;
+ if (obd == NULL)
+ return -EINVAL;
+
+ /* Temp fix to stop falling foul of osc_announce_cached() */
+ oa->o_valid &= ~(OBD_MD_FLBLOCKS | OBD_MD_FLGRANT);
+
+ memset(res, 0, sizeof(*res) * *pages);
+
+ CDEBUG(D_PAGE, "%s %d obdos with %d IOs\n",
+ cmd == OBD_BRW_READ ? "reading" : "writing", objcount, *pages);
+
+ if (oti)
+ oti->oti_handle = (void *)DESC_PRIV;
+
+ left = *pages;
+ *pages = 0;
+
+ for (i = 0; i < objcount; i++, obj++) {
+ int j;
+
+ for (j = 0 ; j < obj->ioo_bufcnt ; j++, nb++) {
+
+ rc = echo_map_nb_to_lb(oa, obj, nb, pages,
+ res + *pages, cmd, &left);
+ if (rc)
+ GOTO(preprw_cleanup, rc);
+
+ tot_bytes += nb->len;
+ }
+ }
+
+ atomic_add(*pages, &obd->u.echo.eo_prep);
+
+ if (cmd & OBD_BRW_READ)
+ lprocfs_counter_add(obd->obd_stats, LPROC_ECHO_READ_BYTES,
+ tot_bytes);
+ else
+ lprocfs_counter_add(obd->obd_stats, LPROC_ECHO_WRITE_BYTES,
+ tot_bytes);
+
+ CDEBUG(D_PAGE, "%d pages allocated after prep\n",
+ atomic_read(&obd->u.echo.eo_prep));
+
+ return 0;
+
+preprw_cleanup:
+ /* It is possible that we would rather handle errors by allow
+ * any already-set-up pages to complete, rather than tearing them
+ * all down again. I believe that this is what the in-kernel
+ * prep/commit operations do.
+ */
+ CERROR("cleaning up %u pages (%d obdos)\n", *pages, objcount);
+ for (i = 0; i < *pages; i++) {
+ kunmap(res[i].page);
+ /* NB if this is a persistent page, __free_pages will just
+ * lose the extra ref gained above */
+ OBD_PAGE_FREE(res[i].page);
+ res[i].page = NULL;
+ atomic_dec(&obd->u.echo.eo_prep);
+ }
+
+ return rc;
+}
+
+static int echo_commitrw(const struct lu_env *env, int cmd,
+ struct obd_export *export, struct obdo *oa,
+ int objcount, struct obd_ioobj *obj,
+ struct niobuf_remote *rb, int niocount,
+ struct niobuf_local *res, struct obd_trans_info *oti,
+ int rc)
+{
+ struct obd_device *obd;
+ int pgs = 0;
+ int i;
+
+ obd = export->exp_obd;
+ if (obd == NULL)
+ return -EINVAL;
+
+ if (rc)
+ GOTO(commitrw_cleanup, rc);
+
+ if ((cmd & OBD_BRW_RWMASK) == OBD_BRW_READ) {
+ CDEBUG(D_PAGE, "reading %d obdos with %d IOs\n",
+ objcount, niocount);
+ } else {
+ CDEBUG(D_PAGE, "writing %d obdos with %d IOs\n",
+ objcount, niocount);
+ }
+
+ if (niocount && res == NULL) {
+ CERROR("NULL res niobuf with niocount %d\n", niocount);
+ return -EINVAL;
+ }
+
+ LASSERT(oti == NULL || oti->oti_handle == (void *)DESC_PRIV);
+
+ for (i = 0; i < objcount; i++, obj++) {
+ int verify = (rc == 0 &&
+ ostid_id(&obj->ioo_oid) != ECHO_PERSISTENT_OBJID &&
+ (oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
+ (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
+ int j;
+
+ for (j = 0 ; j < obj->ioo_bufcnt ; j++, rb++) {
+ int vrc = echo_finalize_lb(oa, obj, rb, &pgs, &res[pgs],
+ verify);
+ if (vrc == 0)
+ continue;
+
+ if (vrc == -EFAULT)
+ GOTO(commitrw_cleanup, rc = vrc);
+
+ if (rc == 0)
+ rc = vrc;
+ }
+
+ }
+
+ atomic_sub(pgs, &obd->u.echo.eo_prep);
+
+ CDEBUG(D_PAGE, "%d pages remain after commit\n",
+ atomic_read(&obd->u.echo.eo_prep));
+ return rc;
+
+commitrw_cleanup:
+ atomic_sub(pgs, &obd->u.echo.eo_prep);
+
+ CERROR("cleaning up %d pages (%d obdos)\n",
+ niocount - pgs - 1, objcount);
+
+ while (pgs < niocount) {
+ struct page *page = res[pgs++].page;
+
+ if (page == NULL)
+ continue;
+
+ /* NB see comment above regarding persistent pages */
+ OBD_PAGE_FREE(page);
+ atomic_dec(&obd->u.echo.eo_prep);
+ }
+ return rc;
+}
+
+static int echo_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ struct lprocfs_static_vars lvars;
+ int rc;
+ __u64 lock_flags = 0;
+ struct ldlm_res_id res_id = {.name = {1}};
+ char ns_name[48];
+
+ obd->u.echo.eo_obt.obt_magic = OBT_MAGIC;
+ spin_lock_init(&obd->u.echo.eo_lock);
+ obd->u.echo.eo_lastino = ECHO_INIT_OID;
+
+ sprintf(ns_name, "echotgt-%s", obd->obd_uuid.uuid);
+ obd->obd_namespace = ldlm_namespace_new(obd, ns_name,
+ LDLM_NAMESPACE_SERVER,
+ LDLM_NAMESPACE_MODEST,
+ LDLM_NS_TYPE_OST);
+ if (obd->obd_namespace == NULL) {
+ LBUG();
+ return -ENOMEM;
+ }
+
+ rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id, LDLM_PLAIN,
+ NULL, LCK_NL, &lock_flags, NULL,
+ ldlm_completion_ast, NULL, NULL, 0,
+ LVB_T_NONE, NULL, &obd->u.echo.eo_nl_lock);
+ LASSERT (rc == ELDLM_OK);
+
+ lprocfs_echo_init_vars(&lvars);
+ if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
+ lprocfs_alloc_obd_stats(obd, LPROC_ECHO_LAST) == 0) {
+ lprocfs_counter_init(obd->obd_stats, LPROC_ECHO_READ_BYTES,
+ LPROCFS_CNTR_AVGMINMAX,
+ "read_bytes", "bytes");
+ lprocfs_counter_init(obd->obd_stats, LPROC_ECHO_WRITE_BYTES,
+ LPROCFS_CNTR_AVGMINMAX,
+ "write_bytes", "bytes");
+ }
+
+ ptlrpc_init_client (LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
+ "echo_ldlm_cb_client", &obd->obd_ldlm_client);
+ return 0;
+}
+
+static int echo_cleanup(struct obd_device *obd)
+{
+ int leaked;
+
+ lprocfs_obd_cleanup(obd);
+ lprocfs_free_obd_stats(obd);
+
+ ldlm_lock_decref(&obd->u.echo.eo_nl_lock, LCK_NL);
+
+ /* XXX Bug 3413; wait for a bit to ensure the BL callback has
+ * happened before calling ldlm_namespace_free() */
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE, cfs_time_seconds(1));
+
+ ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
+ obd->obd_namespace = NULL;
+
+ leaked = atomic_read(&obd->u.echo.eo_prep);
+ if (leaked != 0)
+ CERROR("%d prep/commitrw pages leaked\n", leaked);
+
+ return 0;
+}
+
+struct obd_ops echo_obd_ops = {
+ .o_owner = THIS_MODULE,
+ .o_connect = echo_connect,
+ .o_disconnect = echo_disconnect,
+ .o_init_export = echo_init_export,
+ .o_destroy_export = echo_destroy_export,
+ .o_create = echo_create,
+ .o_destroy = echo_destroy,
+ .o_getattr = echo_getattr,
+ .o_setattr = echo_setattr,
+ .o_preprw = echo_preprw,
+ .o_commitrw = echo_commitrw,
+ .o_setup = echo_setup,
+ .o_cleanup = echo_cleanup
+};
+
+void echo_persistent_pages_fini(void)
+{
+ int i;
+
+ for (i = 0; i < ECHO_PERSISTENT_PAGES; i++)
+ if (echo_persistent_pages[i] != NULL) {
+ OBD_PAGE_FREE(echo_persistent_pages[i]);
+ echo_persistent_pages[i] = NULL;
+ }
+}
+
+int echo_persistent_pages_init(void)
+{
+ struct page *pg;
+ int i;
+
+ for (i = 0; i < ECHO_PERSISTENT_PAGES; i++) {
+ int gfp_mask = (i < ECHO_PERSISTENT_PAGES/2) ?
+ GFP_IOFS : GFP_HIGHUSER;
+
+ OBD_PAGE_ALLOC(pg, gfp_mask);
+ if (pg == NULL) {
+ echo_persistent_pages_fini ();
+ return (-ENOMEM);
+ }
+
+ memset (kmap (pg), 0, PAGE_CACHE_SIZE);
+ kunmap (pg);
+
+ echo_persistent_pages[i] = pg;
+ }
+
+ return (0);
+}
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
new file mode 100644
index 0000000..c8b4344
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -0,0 +1,3171 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_ECHO
+#include <linux/libcfs/libcfs.h>
+
+#include <obd.h>
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_debug.h>
+#include <lprocfs_status.h>
+#include <cl_object.h>
+#include <md_object.h>
+#include <lustre_fid.h>
+#include <lustre_acl.h>
+#include <lustre_net.h>
+#include <obd_lov.h>
+
+#include "echo_internal.h"
+
+/** \defgroup echo_client Echo Client
+ * @{
+ */
+
+struct echo_device {
+ struct cl_device ed_cl;
+ struct echo_client_obd *ed_ec;
+
+ struct cl_site ed_site_myself;
+ struct cl_site *ed_site;
+ struct lu_device *ed_next;
+ int ed_next_islov;
+ int ed_next_ismd;
+ struct lu_client_seq *ed_cl_seq;
+};
+
+struct echo_object {
+ struct cl_object eo_cl;
+ struct cl_object_header eo_hdr;
+
+ struct echo_device *eo_dev;
+ struct list_head eo_obj_chain;
+ struct lov_stripe_md *eo_lsm;
+ atomic_t eo_npages;
+ int eo_deleted;
+};
+
+struct echo_object_conf {
+ struct cl_object_conf eoc_cl;
+ struct lov_stripe_md **eoc_md;
+};
+
+struct echo_page {
+ struct cl_page_slice ep_cl;
+ struct mutex ep_lock;
+ struct page *ep_vmpage;
+};
+
+struct echo_lock {
+ struct cl_lock_slice el_cl;
+ struct list_head el_chain;
+ struct echo_object *el_object;
+ __u64 el_cookie;
+ atomic_t el_refcount;
+};
+
+struct echo_io {
+ struct cl_io_slice ei_cl;
+};
+
+#if 0
+struct echo_req {
+ struct cl_req_slice er_cl;
+};
+#endif
+
+static int echo_client_setup(const struct lu_env *env,
+ struct obd_device *obddev,
+ struct lustre_cfg *lcfg);
+static int echo_client_cleanup(struct obd_device *obddev);
+
+
+/** \defgroup echo_helpers Helper functions
+ * @{
+ */
+static inline struct echo_device *cl2echo_dev(const struct cl_device *dev)
+{
+ return container_of0(dev, struct echo_device, ed_cl);
+}
+
+static inline struct cl_device *echo_dev2cl(struct echo_device *d)
+{
+ return &d->ed_cl;
+}
+
+static inline struct echo_device *obd2echo_dev(const struct obd_device *obd)
+{
+ return cl2echo_dev(lu2cl_dev(obd->obd_lu_dev));
+}
+
+static inline struct cl_object *echo_obj2cl(struct echo_object *eco)
+{
+ return &eco->eo_cl;
+}
+
+static inline struct echo_object *cl2echo_obj(const struct cl_object *o)
+{
+ return container_of(o, struct echo_object, eo_cl);
+}
+
+static inline struct echo_page *cl2echo_page(const struct cl_page_slice *s)
+{
+ return container_of(s, struct echo_page, ep_cl);
+}
+
+static inline struct echo_lock *cl2echo_lock(const struct cl_lock_slice *s)
+{
+ return container_of(s, struct echo_lock, el_cl);
+}
+
+static inline struct cl_lock *echo_lock2cl(const struct echo_lock *ecl)
+{
+ return ecl->el_cl.cls_lock;
+}
+
+static struct lu_context_key echo_thread_key;
+static inline struct echo_thread_info *echo_env_info(const struct lu_env *env)
+{
+ struct echo_thread_info *info;
+ info = lu_context_key_get(&env->le_ctx, &echo_thread_key);
+ LASSERT(info != NULL);
+ return info;
+}
+
+static inline
+struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
+{
+ return container_of(c, struct echo_object_conf, eoc_cl);
+}
+
+/** @} echo_helpers */
+
+static struct echo_object *cl_echo_object_find(struct echo_device *d,
+ struct lov_stripe_md **lsm);
+static int cl_echo_object_put(struct echo_object *eco);
+static int cl_echo_enqueue (struct echo_object *eco, obd_off start,
+ obd_off end, int mode, __u64 *cookie);
+static int cl_echo_cancel (struct echo_device *d, __u64 cookie);
+static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
+ struct page **pages, int npages, int async);
+
+static struct echo_thread_info *echo_env_info(const struct lu_env *env);
+
+struct echo_thread_info {
+ struct echo_object_conf eti_conf;
+ struct lustre_md eti_md;
+
+ struct cl_2queue eti_queue;
+ struct cl_io eti_io;
+ struct cl_lock_descr eti_descr;
+ struct lu_fid eti_fid;
+ struct lu_fid eti_fid2;
+ struct md_op_spec eti_spec;
+ struct lov_mds_md_v3 eti_lmm;
+ struct lov_user_md_v3 eti_lum;
+ struct md_attr eti_ma;
+ struct lu_name eti_lname;
+ /* per-thread values, can be re-used */
+ void *eti_big_lmm;
+ int eti_big_lmmsize;
+ char eti_name[20];
+ struct lu_buf eti_buf;
+ char eti_xattr_buf[LUSTRE_POSIX_ACL_MAX_SIZE];
+};
+
+/* No session used right now */
+struct echo_session_info {
+ unsigned long dummy;
+};
+
+static struct kmem_cache *echo_lock_kmem;
+static struct kmem_cache *echo_object_kmem;
+static struct kmem_cache *echo_thread_kmem;
+static struct kmem_cache *echo_session_kmem;
+//static struct kmem_cache *echo_req_kmem;
+
+static struct lu_kmem_descr echo_caches[] = {
+ {
+ .ckd_cache = &echo_lock_kmem,
+ .ckd_name = "echo_lock_kmem",
+ .ckd_size = sizeof (struct echo_lock)
+ },
+ {
+ .ckd_cache = &echo_object_kmem,
+ .ckd_name = "echo_object_kmem",
+ .ckd_size = sizeof (struct echo_object)
+ },
+ {
+ .ckd_cache = &echo_thread_kmem,
+ .ckd_name = "echo_thread_kmem",
+ .ckd_size = sizeof (struct echo_thread_info)
+ },
+ {
+ .ckd_cache = &echo_session_kmem,
+ .ckd_name = "echo_session_kmem",
+ .ckd_size = sizeof (struct echo_session_info)
+ },
+#if 0
+ {
+ .ckd_cache = &echo_req_kmem,
+ .ckd_name = "echo_req_kmem",
+ .ckd_size = sizeof (struct echo_req)
+ },
+#endif
+ {
+ .ckd_cache = NULL
+ }
+};
+
+/** \defgroup echo_page Page operations
+ *
+ * Echo page operations.
+ *
+ * @{
+ */
+static struct page *echo_page_vmpage(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ return cl2echo_page(slice)->ep_vmpage;
+}
+
+static int echo_page_own(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, int nonblock)
+{
+ struct echo_page *ep = cl2echo_page(slice);
+
+ if (!nonblock)
+ mutex_lock(&ep->ep_lock);
+ else if (!mutex_trylock(&ep->ep_lock))
+ return -EAGAIN;
+ return 0;
+}
+
+static void echo_page_disown(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
+{
+ struct echo_page *ep = cl2echo_page(slice);
+
+ LASSERT(mutex_is_locked(&ep->ep_lock));
+ mutex_unlock(&ep->ep_lock);
+}
+
+static void echo_page_discard(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ cl_page_delete(env, slice->cpl_page);
+}
+
+static int echo_page_is_vmlocked(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ if (mutex_is_locked(&cl2echo_page(slice)->ep_lock))
+ return -EBUSY;
+ return -ENODATA;
+}
+
+static void echo_page_completion(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
+{
+ LASSERT(slice->cpl_page->cp_sync_io != NULL);
+}
+
+static void echo_page_fini(const struct lu_env *env,
+ struct cl_page_slice *slice)
+{
+ struct echo_page *ep = cl2echo_page(slice);
+ struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
+ struct page *vmpage = ep->ep_vmpage;
+
+ atomic_dec(&eco->eo_npages);
+ page_cache_release(vmpage);
+}
+
+static int echo_page_prep(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ return 0;
+}
+
+static int echo_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
+{
+ struct echo_page *ep = cl2echo_page(slice);
+
+ (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
+ ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
+ return 0;
+}
+
+static const struct cl_page_operations echo_page_ops = {
+ .cpo_own = echo_page_own,
+ .cpo_disown = echo_page_disown,
+ .cpo_discard = echo_page_discard,
+ .cpo_vmpage = echo_page_vmpage,
+ .cpo_fini = echo_page_fini,
+ .cpo_print = echo_page_print,
+ .cpo_is_vmlocked = echo_page_is_vmlocked,
+ .io = {
+ [CRT_READ] = {
+ .cpo_prep = echo_page_prep,
+ .cpo_completion = echo_page_completion,
+ },
+ [CRT_WRITE] = {
+ .cpo_prep = echo_page_prep,
+ .cpo_completion = echo_page_completion,
+ }
+ }
+};
+/** @} echo_page */
+
+/** \defgroup echo_lock Locking
+ *
+ * echo lock operations
+ *
+ * @{
+ */
+static void echo_lock_fini(const struct lu_env *env,
+ struct cl_lock_slice *slice)
+{
+ struct echo_lock *ecl = cl2echo_lock(slice);
+
+ LASSERT(list_empty(&ecl->el_chain));
+ OBD_SLAB_FREE_PTR(ecl, echo_lock_kmem);
+}
+
+static void echo_lock_delete(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct echo_lock *ecl = cl2echo_lock(slice);
+
+ LASSERT(list_empty(&ecl->el_chain));
+}
+
+static int echo_lock_fits_into(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ const struct cl_lock_descr *need,
+ const struct cl_io *unused)
+{
+ return 1;
+}
+
+static struct cl_lock_operations echo_lock_ops = {
+ .clo_fini = echo_lock_fini,
+ .clo_delete = echo_lock_delete,
+ .clo_fits_into = echo_lock_fits_into
+};
+
+/** @} echo_lock */
+
+/** \defgroup echo_cl_ops cl_object operations
+ *
+ * operations for cl_object
+ *
+ * @{
+ */
+static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage)
+{
+ struct echo_page *ep = cl_object_page_slice(obj, page);
+ struct echo_object *eco = cl2echo_obj(obj);
+
+ ep->ep_vmpage = vmpage;
+ page_cache_get(vmpage);
+ mutex_init(&ep->ep_lock);
+ cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
+ atomic_inc(&eco->eo_npages);
+ return 0;
+}
+
+static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
+{
+ return 0;
+}
+
+static int echo_lock_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *unused)
+{
+ struct echo_lock *el;
+
+ OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, __GFP_IO);
+ if (el != NULL) {
+ cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
+ el->el_object = cl2echo_obj(obj);
+ INIT_LIST_HEAD(&el->el_chain);
+ atomic_set(&el->el_refcount, 0);
+ }
+ return el == NULL ? -ENOMEM : 0;
+}
+
+static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf)
+{
+ return 0;
+}
+
+static const struct cl_object_operations echo_cl_obj_ops = {
+ .coo_page_init = echo_page_init,
+ .coo_lock_init = echo_lock_init,
+ .coo_io_init = echo_io_init,
+ .coo_conf_set = echo_conf_set
+};
+/** @} echo_cl_ops */
+
+/** \defgroup echo_lu_ops lu_object operations
+ *
+ * operations for echo lu object.
+ *
+ * @{
+ */
+static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf)
+{
+ struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev));
+ struct echo_client_obd *ec = ed->ed_ec;
+ struct echo_object *eco = cl2echo_obj(lu2cl(obj));
+
+ if (ed->ed_next) {
+ struct lu_object *below;
+ struct lu_device *under;
+
+ under = ed->ed_next;
+ below = under->ld_ops->ldo_object_alloc(env, obj->lo_header,
+ under);
+ if (below == NULL)
+ return -ENOMEM;
+ lu_object_add(obj, below);
+ }
+
+ if (!ed->ed_next_ismd) {
+ const struct cl_object_conf *cconf = lu2cl_conf(conf);
+ struct echo_object_conf *econf = cl2echo_conf(cconf);
+
+ LASSERT(econf->eoc_md);
+ eco->eo_lsm = *econf->eoc_md;
+ /* clear the lsm pointer so that it won't get freed. */
+ *econf->eoc_md = NULL;
+ } else {
+ eco->eo_lsm = NULL;
+ }
+
+ eco->eo_dev = ed;
+ atomic_set(&eco->eo_npages, 0);
+ cl_object_page_init(lu2cl(obj), sizeof(struct echo_page));
+
+ spin_lock(&ec->ec_lock);
+ list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+ spin_unlock(&ec->ec_lock);
+
+ return 0;
+}
+
+/* taken from osc_unpackmd() */
+static int echo_alloc_memmd(struct echo_device *ed,
+ struct lov_stripe_md **lsmp)
+{
+ int lsm_size;
+
+ /* If export is lov/osc then use their obd method */
+ if (ed->ed_next != NULL)
+ return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp);
+ /* OFD has no unpackmd method, do everything here */
+ lsm_size = lov_stripe_md_size(1);
+
+ LASSERT(*lsmp == NULL);
+ OBD_ALLOC(*lsmp, lsm_size);
+ if (*lsmp == NULL)
+ return -ENOMEM;
+
+ OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
+ if ((*lsmp)->lsm_oinfo[0] == NULL) {
+ OBD_FREE(*lsmp, lsm_size);
+ return -ENOMEM;
+ }
+
+ loi_init((*lsmp)->lsm_oinfo[0]);
+ (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+ ostid_set_seq_echo(&(*lsmp)->lsm_oi);
+
+ return lsm_size;
+}
+
+static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp)
+{
+ int lsm_size;
+
+ /* If export is lov/osc then use their obd method */
+ if (ed->ed_next != NULL)
+ return obd_free_memmd(ed->ed_ec->ec_exp, lsmp);
+ /* OFD has no unpackmd method, do everything here */
+ lsm_size = lov_stripe_md_size(1);
+
+ LASSERT(*lsmp != NULL);
+ OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
+ OBD_FREE(*lsmp, lsm_size);
+ *lsmp = NULL;
+ return 0;
+}
+
+static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
+{
+ struct echo_object *eco = cl2echo_obj(lu2cl(obj));
+ struct echo_client_obd *ec = eco->eo_dev->ed_ec;
+
+ LASSERT(atomic_read(&eco->eo_npages) == 0);
+
+ spin_lock(&ec->ec_lock);
+ list_del_init(&eco->eo_obj_chain);
+ spin_unlock(&ec->ec_lock);
+
+ lu_object_fini(obj);
+ lu_object_header_fini(obj->lo_header);
+
+ if (eco->eo_lsm)
+ echo_free_memmd(eco->eo_dev, &eco->eo_lsm);
+ OBD_SLAB_FREE_PTR(eco, echo_object_kmem);
+}
+
+static int echo_object_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o)
+{
+ struct echo_object *obj = cl2echo_obj(lu2cl(o));
+
+ return (*p)(env, cookie, "echoclient-object@%p", obj);
+}
+
+static const struct lu_object_operations echo_lu_obj_ops = {
+ .loo_object_init = echo_object_init,
+ .loo_object_delete = NULL,
+ .loo_object_release = NULL,
+ .loo_object_free = echo_object_free,
+ .loo_object_print = echo_object_print,
+ .loo_object_invariant = NULL
+};
+/** @} echo_lu_ops */
+
+/** \defgroup echo_lu_dev_ops lu_device operations
+ *
+ * Operations for echo lu device.
+ *
+ * @{
+ */
+static struct lu_object *echo_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *hdr,
+ struct lu_device *dev)
+{
+ struct echo_object *eco;
+ struct lu_object *obj = NULL;
+
+ /* we're the top dev. */
+ LASSERT(hdr == NULL);
+ OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, __GFP_IO);
+ if (eco != NULL) {
+ struct cl_object_header *hdr = &eco->eo_hdr;
+
+ obj = &echo_obj2cl(eco)->co_lu;
+ cl_object_header_init(hdr);
+ lu_object_init(obj, &hdr->coh_lu, dev);
+ lu_object_add_top(&hdr->coh_lu, obj);
+
+ eco->eo_cl.co_ops = &echo_cl_obj_ops;
+ obj->lo_ops = &echo_lu_obj_ops;
+ }
+ return obj;
+}
+
+static struct lu_device_operations echo_device_lu_ops = {
+ .ldo_object_alloc = echo_object_alloc,
+};
+
+/** @} echo_lu_dev_ops */
+
+static struct cl_device_operations echo_device_cl_ops = {
+};
+
+/** \defgroup echo_init Setup and teardown
+ *
+ * Init and fini functions for echo client.
+ *
+ * @{
+ */
+static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
+{
+ struct cl_site *site = &ed->ed_site_myself;
+ int rc;
+
+ /* initialize site */
+ rc = cl_site_init(site, &ed->ed_cl);
+ if (rc) {
+ CERROR("Cannot initialize site for echo client(%d)\n", rc);
+ return rc;
+ }
+
+ rc = lu_site_init_finish(&site->cs_lu);
+ if (rc)
+ return rc;
+
+ ed->ed_site = site;
+ return 0;
+}
+
+static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
+{
+ if (ed->ed_site) {
+ if (!ed->ed_next_ismd)
+ cl_site_fini(ed->ed_site);
+ ed->ed_site = NULL;
+ }
+}
+
+static void *echo_thread_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct echo_thread_info *info;
+
+ OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, __GFP_IO);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
+}
+
+static void echo_thread_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct echo_thread_info *info = data;
+ OBD_SLAB_FREE_PTR(info, echo_thread_kmem);
+}
+
+static void echo_thread_key_exit(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+}
+
+static struct lu_context_key echo_thread_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = echo_thread_key_init,
+ .lct_fini = echo_thread_key_fini,
+ .lct_exit = echo_thread_key_exit
+};
+
+static void *echo_session_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct echo_session_info *session;
+
+ OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, __GFP_IO);
+ if (session == NULL)
+ session = ERR_PTR(-ENOMEM);
+ return session;
+}
+
+static void echo_session_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct echo_session_info *session = data;
+ OBD_SLAB_FREE_PTR(session, echo_session_kmem);
+}
+
+static void echo_session_key_exit(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+}
+
+static struct lu_context_key echo_session_key = {
+ .lct_tags = LCT_SESSION,
+ .lct_init = echo_session_key_init,
+ .lct_fini = echo_session_key_fini,
+ .lct_exit = echo_session_key_exit
+};
+
+LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
+
+#define ECHO_SEQ_WIDTH 0xffffffff
+static int echo_fid_init(struct echo_device *ed, char *obd_name,
+ struct seq_server_site *ss)
+{
+ char *prefix;
+ int rc;
+
+ OBD_ALLOC_PTR(ed->ed_cl_seq);
+ if (ed->ed_cl_seq == NULL)
+ return -ENOMEM;
+
+ OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
+ if (prefix == NULL)
+ GOTO(out_free_seq, rc = -ENOMEM);
+
+ snprintf(prefix, MAX_OBD_NAME + 5, "srv-%s", obd_name);
+
+ /* Init client side sequence-manager */
+ rc = seq_client_init(ed->ed_cl_seq, NULL,
+ LUSTRE_SEQ_METADATA,
+ prefix, ss->ss_server_seq);
+ ed->ed_cl_seq->lcs_width = ECHO_SEQ_WIDTH;
+ OBD_FREE(prefix, MAX_OBD_NAME + 5);
+ if (rc)
+ GOTO(out_free_seq, rc);
+
+ return 0;
+
+out_free_seq:
+ OBD_FREE_PTR(ed->ed_cl_seq);
+ ed->ed_cl_seq = NULL;
+ return rc;
+}
+
+static int echo_fid_fini(struct obd_device *obddev)
+{
+ struct echo_device *ed = obd2echo_dev(obddev);
+
+ if (ed->ed_cl_seq != NULL) {
+ seq_client_fini(ed->ed_cl_seq);
+ OBD_FREE_PTR(ed->ed_cl_seq);
+ ed->ed_cl_seq = NULL;
+ }
+
+ return 0;
+}
+
+static struct lu_device *echo_device_alloc(const struct lu_env *env,
+ struct lu_device_type *t,
+ struct lustre_cfg *cfg)
+{
+ struct lu_device *next;
+ struct echo_device *ed;
+ struct cl_device *cd;
+ struct obd_device *obd = NULL; /* to keep compiler happy */
+ struct obd_device *tgt;
+ const char *tgt_type_name;
+ int rc;
+ int cleanup = 0;
+
+ OBD_ALLOC_PTR(ed);
+ if (ed == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ cleanup = 1;
+ cd = &ed->ed_cl;
+ rc = cl_device_init(cd, t);
+ if (rc)
+ GOTO(out, rc);
+
+ cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
+ cd->cd_ops = &echo_device_cl_ops;
+
+ cleanup = 2;
+ obd = class_name2obd(lustre_cfg_string(cfg, 0));
+ LASSERT(obd != NULL);
+ LASSERT(env != NULL);
+
+ tgt = class_name2obd(lustre_cfg_string(cfg, 1));
+ if (tgt == NULL) {
+ CERROR("Can not find tgt device %s\n",
+ lustre_cfg_string(cfg, 1));
+ GOTO(out, rc = -ENODEV);
+ }
+
+ next = tgt->obd_lu_dev;
+ if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
+ ed->ed_next_ismd = 1;
+ } else {
+ ed->ed_next_ismd = 0;
+ rc = echo_site_init(env, ed);
+ if (rc)
+ GOTO(out, rc);
+ }
+ cleanup = 3;
+
+ rc = echo_client_setup(env, obd, cfg);
+ if (rc)
+ GOTO(out, rc);
+
+ ed->ed_ec = &obd->u.echo_client;
+ cleanup = 4;
+
+ if (ed->ed_next_ismd) {
+ /* Suppose to connect to some Metadata layer */
+ struct lu_site *ls;
+ struct lu_device *ld;
+ int found = 0;
+
+ if (next == NULL) {
+ CERROR("%s is not lu device type!\n",
+ lustre_cfg_string(cfg, 1));
+ GOTO(out, rc = -EINVAL);
+ }
+
+ tgt_type_name = lustre_cfg_string(cfg, 2);
+ if (!tgt_type_name) {
+ CERROR("%s no type name for echo %s setup\n",
+ lustre_cfg_string(cfg, 1),
+ tgt->obd_type->typ_name);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ ls = next->ld_site;
+
+ spin_lock(&ls->ls_ld_lock);
+ list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
+ if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ls->ls_ld_lock);
+
+ if (found == 0) {
+ CERROR("%s is not lu device type!\n",
+ lustre_cfg_string(cfg, 1));
+ GOTO(out, rc = -EINVAL);
+ }
+
+ next = ld;
+ /* For MD echo client, it will use the site in MDS stack */
+ ed->ed_site_myself.cs_lu = *ls;
+ ed->ed_site = &ed->ed_site_myself;
+ ed->ed_cl.cd_lu_dev.ld_site = &ed->ed_site_myself.cs_lu;
+ rc = echo_fid_init(ed, obd->obd_name, lu_site2seq(ls));
+ if (rc) {
+ CERROR("echo fid init error %d\n", rc);
+ GOTO(out, rc);
+ }
+ } else {
+ /* if echo client is to be stacked upon ost device, the next is
+ * NULL since ost is not a clio device so far */
+ if (next != NULL && !lu_device_is_cl(next))
+ next = NULL;
+
+ tgt_type_name = tgt->obd_type->typ_name;
+ if (next != NULL) {
+ LASSERT(next != NULL);
+ if (next->ld_site != NULL)
+ GOTO(out, rc = -EBUSY);
+
+ next->ld_site = &ed->ed_site->cs_lu;
+ rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
+ next->ld_type->ldt_name,
+ NULL);
+ if (rc)
+ GOTO(out, rc);
+
+ /* Tricky case, I have to determine the obd type since
+ * CLIO uses the different parameters to initialize
+ * objects for lov & osc. */
+ if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0)
+ ed->ed_next_islov = 1;
+ else
+ LASSERT(strcmp(tgt_type_name,
+ LUSTRE_OSC_NAME) == 0);
+ } else
+ LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
+ }
+
+ ed->ed_next = next;
+ return &cd->cd_lu_dev;
+out:
+ switch(cleanup) {
+ case 4: {
+ int rc2;
+ rc2 = echo_client_cleanup(obd);
+ if (rc2)
+ CERROR("Cleanup obd device %s error(%d)\n",
+ obd->obd_name, rc2);
+ }
+
+ case 3:
+ echo_site_fini(env, ed);
+ case 2:
+ cl_device_fini(&ed->ed_cl);
+ case 1:
+ OBD_FREE_PTR(ed);
+ case 0:
+ default:
+ break;
+ }
+ return(ERR_PTR(rc));
+}
+
+static int echo_device_init(const struct lu_env *env, struct lu_device *d,
+ const char *name, struct lu_device *next)
+{
+ LBUG();
+ return 0;
+}
+
+static struct lu_device *echo_device_fini(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
+ struct lu_device *next = ed->ed_next;
+
+ while (next && !ed->ed_next_ismd)
+ next = next->ld_type->ldt_ops->ldto_device_fini(env, next);
+ return NULL;
+}
+
+static void echo_lock_release(const struct lu_env *env,
+ struct echo_lock *ecl,
+ int still_used)
+{
+ struct cl_lock *clk = echo_lock2cl(ecl);
+
+ cl_lock_get(clk);
+ cl_unuse(env, clk);
+ cl_lock_release(env, clk, "ec enqueue", ecl->el_object);
+ if (!still_used) {
+ cl_lock_mutex_get(env, clk);
+ cl_lock_cancel(env, clk);
+ cl_lock_delete(env, clk);
+ cl_lock_mutex_put(env, clk);
+ }
+ cl_lock_put(env, clk);
+}
+
+static struct lu_device *echo_device_free(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct echo_device *ed = cl2echo_dev(lu2cl_dev(d));
+ struct echo_client_obd *ec = ed->ed_ec;
+ struct echo_object *eco;
+ struct lu_device *next = ed->ed_next;
+
+ CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
+ ed, next);
+
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+
+ /* check if there are objects still alive.
+ * It shouldn't have any object because lu_site_purge would cleanup
+ * all of cached objects. Anyway, probably the echo device is being
+ * parallelly accessed.
+ */
+ spin_lock(&ec->ec_lock);
+ list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+ eco->eo_deleted = 1;
+ spin_unlock(&ec->ec_lock);
+
+ /* purge again */
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+
+ CDEBUG(D_INFO,
+ "Waiting for the reference of echo object to be dropped\n");
+
+ /* Wait for the last reference to be dropped. */
+ spin_lock(&ec->ec_lock);
+ while (!list_empty(&ec->ec_objects)) {
+ spin_unlock(&ec->ec_lock);
+ CERROR("echo_client still has objects at cleanup time, "
+ "wait for 1 second\n");
+ schedule_timeout_and_set_state(TASK_UNINTERRUPTIBLE,
+ cfs_time_seconds(1));
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ spin_lock(&ec->ec_lock);
+ }
+ spin_unlock(&ec->ec_lock);
+
+ LASSERT(list_empty(&ec->ec_locks));
+
+ CDEBUG(D_INFO, "No object exists, exiting...\n");
+
+ echo_client_cleanup(d->ld_obd);
+ echo_fid_fini(d->ld_obd);
+ while (next && !ed->ed_next_ismd)
+ next = next->ld_type->ldt_ops->ldto_device_free(env, next);
+
+ LASSERT(ed->ed_site == lu2cl_site(d->ld_site));
+ echo_site_fini(env, ed);
+ cl_device_fini(&ed->ed_cl);
+ OBD_FREE_PTR(ed);
+
+ return NULL;
+}
+
+static const struct lu_device_type_operations echo_device_type_ops = {
+ .ldto_init = echo_type_init,
+ .ldto_fini = echo_type_fini,
+
+ .ldto_start = echo_type_start,
+ .ldto_stop = echo_type_stop,
+
+ .ldto_device_alloc = echo_device_alloc,
+ .ldto_device_free = echo_device_free,
+ .ldto_device_init = echo_device_init,
+ .ldto_device_fini = echo_device_fini
+};
+
+static struct lu_device_type echo_device_type = {
+ .ldt_tags = LU_DEVICE_CL,
+ .ldt_name = LUSTRE_ECHO_CLIENT_NAME,
+ .ldt_ops = &echo_device_type_ops,
+ .ldt_ctx_tags = LCT_CL_THREAD | LCT_MD_THREAD | LCT_DT_THREAD,
+};
+/** @} echo_init */
+
+/** \defgroup echo_exports Exported operations
+ *
+ * exporting functions to echo client
+ *
+ * @{
+ */
+
+/* Interfaces to echo client obd device */
+static struct echo_object *cl_echo_object_find(struct echo_device *d,
+ struct lov_stripe_md **lsmp)
+{
+ struct lu_env *env;
+ struct echo_thread_info *info;
+ struct echo_object_conf *conf;
+ struct lov_stripe_md *lsm;
+ struct echo_object *eco;
+ struct cl_object *obj;
+ struct lu_fid *fid;
+ int refcheck;
+ int rc;
+
+ LASSERT(lsmp);
+ lsm = *lsmp;
+ LASSERT(lsm);
+ LASSERTF(ostid_id(&lsm->lsm_oi) != 0, DOSTID"\n", POSTID(&lsm->lsm_oi));
+ LASSERTF(ostid_seq(&lsm->lsm_oi) == FID_SEQ_ECHO, DOSTID"\n",
+ POSTID(&lsm->lsm_oi));
+
+ /* Never return an object if the obd is to be freed. */
+ if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping)
+ return ERR_PTR(-ENODEV);
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return (void *)env;
+
+ info = echo_env_info(env);
+ conf = &info->eti_conf;
+ if (d->ed_next) {
+ if (!d->ed_next_islov) {
+ struct lov_oinfo *oinfo = lsm->lsm_oinfo[0];
+ LASSERT(oinfo != NULL);
+ oinfo->loi_oi = lsm->lsm_oi;
+ conf->eoc_cl.u.coc_oinfo = oinfo;
+ } else {
+ struct lustre_md *md;
+ md = &info->eti_md;
+ memset(md, 0, sizeof *md);
+ md->lsm = lsm;
+ conf->eoc_cl.u.coc_md = md;
+ }
+ }
+ conf->eoc_md = lsmp;
+
+ fid = &info->eti_fid;
+ rc = ostid_to_fid(fid, &lsm->lsm_oi, 0);
+ if (rc != 0)
+ GOTO(out, eco = ERR_PTR(rc));
+
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
+ if (IS_ERR(obj))
+ GOTO(out, eco = (void*)obj);
+
+ eco = cl2echo_obj(obj);
+ if (eco->eo_deleted) {
+ cl_object_put(env, obj);
+ eco = ERR_PTR(-EAGAIN);
+ }
+
+out:
+ cl_env_put(env, &refcheck);
+ return eco;
+}
+
+static int cl_echo_object_put(struct echo_object *eco)
+{
+ struct lu_env *env;
+ struct cl_object *obj = echo_obj2cl(eco);
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ /* an external function to kill an object? */
+ if (eco->eo_deleted) {
+ struct lu_object_header *loh = obj->co_lu.lo_header;
+ LASSERT(&eco->eo_hdr == luh2coh(loh));
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
+ }
+
+ cl_object_put(env, obj);
+ cl_env_put(env, &refcheck);
+ return 0;
+}
+
+static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
+ obd_off start, obd_off end, int mode,
+ __u64 *cookie , __u32 enqflags)
+{
+ struct cl_io *io;
+ struct cl_lock *lck;
+ struct cl_object *obj;
+ struct cl_lock_descr *descr;
+ struct echo_thread_info *info;
+ int rc = -ENOMEM;
+
+ info = echo_env_info(env);
+ io = &info->eti_io;
+ descr = &info->eti_descr;
+ obj = echo_obj2cl(eco);
+
+ descr->cld_obj = obj;
+ descr->cld_start = cl_index(obj, start);
+ descr->cld_end = cl_index(obj, end);
+ descr->cld_mode = mode == LCK_PW ? CLM_WRITE : CLM_READ;
+ descr->cld_enq_flags = enqflags;
+ io->ci_obj = obj;
+
+ lck = cl_lock_request(env, io, descr, "ec enqueue", eco);
+ if (lck) {
+ struct echo_client_obd *ec = eco->eo_dev->ed_ec;
+ struct echo_lock *el;
+
+ rc = cl_wait(env, lck);
+ if (rc == 0) {
+ el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
+ spin_lock(&ec->ec_lock);
+ if (list_empty(&el->el_chain)) {
+ list_add(&el->el_chain, &ec->ec_locks);
+ el->el_cookie = ++ec->ec_unique;
+ }
+ atomic_inc(&el->el_refcount);
+ *cookie = el->el_cookie;
+ spin_unlock(&ec->ec_lock);
+ } else {
+ cl_lock_release(env, lck, "ec enqueue", current);
+ }
+ }
+ return rc;
+}
+
+static int cl_echo_enqueue(struct echo_object *eco, obd_off start, obd_off end,
+ int mode, __u64 *cookie)
+{
+ struct echo_thread_info *info;
+ struct lu_env *env;
+ struct cl_io *io;
+ int refcheck;
+ int result;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ info = echo_env_info(env);
+ io = &info->eti_io;
+
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco));
+ if (result < 0)
+ GOTO(out, result);
+ LASSERT(result == 0);
+
+ result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0);
+ cl_io_fini(env, io);
+
+out:
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
+ __u64 cookie)
+{
+ struct echo_client_obd *ec = ed->ed_ec;
+ struct echo_lock *ecl = NULL;
+ struct list_head *el;
+ int found = 0, still_used = 0;
+
+ LASSERT(ec != NULL);
+ spin_lock(&ec->ec_lock);
+ list_for_each (el, &ec->ec_locks) {
+ ecl = list_entry (el, struct echo_lock, el_chain);
+ CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
+ found = (ecl->el_cookie == cookie);
+ if (found) {
+ if (atomic_dec_and_test(&ecl->el_refcount))
+ list_del_init(&ecl->el_chain);
+ else
+ still_used = 1;
+ break;
+ }
+ }
+ spin_unlock(&ec->ec_lock);
+
+ if (!found)
+ return -ENOENT;
+
+ echo_lock_release(env, ecl, still_used);
+ return 0;
+}
+
+static int cl_echo_cancel(struct echo_device *ed, __u64 cookie)
+{
+ struct lu_env *env;
+ int refcheck;
+ int rc;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ rc = cl_echo_cancel0(env, ed, cookie);
+
+ cl_env_put(env, &refcheck);
+ return rc;
+}
+
+static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type unused, struct cl_2queue *queue)
+{
+ struct cl_page *clp;
+ struct cl_page *temp;
+ int result = 0;
+
+ cl_page_list_for_each_safe(clp, temp, &queue->c2_qin) {
+ int rc;
+ rc = cl_page_cache_add(env, io, clp, CRT_WRITE);
+ if (rc == 0)
+ continue;
+ result = result ?: rc;
+ }
+ return result;
+}
+
+static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
+ struct page **pages, int npages, int async)
+{
+ struct lu_env *env;
+ struct echo_thread_info *info;
+ struct cl_object *obj = echo_obj2cl(eco);
+ struct echo_device *ed = eco->eo_dev;
+ struct cl_2queue *queue;
+ struct cl_io *io;
+ struct cl_page *clp;
+ struct lustre_handle lh = { 0 };
+ int page_size = cl_page_size(obj);
+ int refcheck;
+ int rc;
+ int i;
+
+ LASSERT((offset & ~CFS_PAGE_MASK) == 0);
+ LASSERT(ed->ed_next != NULL);
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ info = echo_env_info(env);
+ io = &info->eti_io;
+ queue = &info->eti_queue;
+
+ cl_2queue_init(queue);
+
+ io->ci_ignore_layout = 1;
+ rc = cl_io_init(env, io, CIT_MISC, obj);
+ if (rc < 0)
+ GOTO(out, rc);
+ LASSERT(rc == 0);
+
+
+ rc = cl_echo_enqueue0(env, eco, offset,
+ offset + npages * PAGE_CACHE_SIZE - 1,
+ rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
+ CEF_NEVER);
+ if (rc < 0)
+ GOTO(error_lock, rc);
+
+ for (i = 0; i < npages; i++) {
+ LASSERT(pages[i]);
+ clp = cl_page_find(env, obj, cl_index(obj, offset),
+ pages[i], CPT_TRANSIENT);
+ if (IS_ERR(clp)) {
+ rc = PTR_ERR(clp);
+ break;
+ }
+ LASSERT(clp->cp_type == CPT_TRANSIENT);
+
+ rc = cl_page_own(env, io, clp);
+ if (rc) {
+ LASSERT(clp->cp_state == CPS_FREEING);
+ cl_page_put(env, clp);
+ break;
+ }
+
+ cl_2queue_add(queue, clp);
+
+ /* drop the reference count for cl_page_find, so that the page
+ * will be freed in cl_2queue_fini. */
+ cl_page_put(env, clp);
+ cl_page_clip(env, clp, 0, page_size);
+
+ offset += page_size;
+ }
+
+ if (rc == 0) {
+ enum cl_req_type typ = rw == READ ? CRT_READ : CRT_WRITE;
+
+ async = async && (typ == CRT_WRITE);
+ if (async)
+ rc = cl_echo_async_brw(env, io, typ, queue);
+ else
+ rc = cl_io_submit_sync(env, io, typ, queue, 0);
+ CDEBUG(D_INFO, "echo_client %s write returns %d\n",
+ async ? "async" : "sync", rc);
+ }
+
+ cl_echo_cancel0(env, ed, lh.cookie);
+error_lock:
+ cl_2queue_discard(env, io, queue);
+ cl_2queue_disown(env, io, queue);
+ cl_2queue_fini(env, queue);
+ cl_io_fini(env, io);
+out:
+ cl_env_put(env, &refcheck);
+ return rc;
+}
+/** @} echo_exports */
+
+
+static obd_id last_object_id;
+
+static int
+echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
+{
+ struct lov_stripe_md *ulsm = _ulsm;
+ int nob, i;
+
+ nob = offsetof (struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
+ if (nob > ulsm_nob)
+ return (-EINVAL);
+
+ if (copy_to_user (ulsm, lsm, sizeof(*ulsm)))
+ return (-EFAULT);
+
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ if (copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
+ sizeof(lsm->lsm_oinfo[0])))
+ return (-EFAULT);
+ }
+ return 0;
+}
+
+static int
+echo_copyin_lsm (struct echo_device *ed, struct lov_stripe_md *lsm,
+ void *ulsm, int ulsm_nob)
+{
+ struct echo_client_obd *ec = ed->ed_ec;
+ int i;
+
+ if (ulsm_nob < sizeof (*lsm))
+ return (-EINVAL);
+
+ if (copy_from_user (lsm, ulsm, sizeof (*lsm)))
+ return (-EFAULT);
+
+ if (lsm->lsm_stripe_count > ec->ec_nstripes ||
+ lsm->lsm_magic != LOV_MAGIC ||
+ (lsm->lsm_stripe_size & (~CFS_PAGE_MASK)) != 0 ||
+ ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
+ return (-EINVAL);
+
+
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ if (copy_from_user(lsm->lsm_oinfo[i],
+ ((struct lov_stripe_md *)ulsm)-> \
+ lsm_oinfo[i],
+ sizeof(lsm->lsm_oinfo[0])))
+ return (-EFAULT);
+ }
+ return (0);
+}
+
+static inline void echo_md_build_name(struct lu_name *lname, char *name,
+ __u64 id)
+{
+ sprintf(name, LPU64, id);
+ lname->ln_name = name;
+ lname->ln_namelen = strlen(name);
+}
+
+/* similar to mdt_attr_get_complex */
+static int echo_big_lmm_get(const struct lu_env *env, struct md_object *o,
+ struct md_attr *ma)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+ int rc;
+
+ LASSERT(ma->ma_lmm_size > 0);
+
+ rc = mo_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LOV);
+ if (rc < 0)
+ return rc;
+
+ /* big_lmm may need to be grown */
+ if (info->eti_big_lmmsize < rc) {
+ int size = size_roundup_power2(rc);
+
+ if (info->eti_big_lmmsize > 0) {
+ /* free old buffer */
+ LASSERT(info->eti_big_lmm);
+ OBD_FREE_LARGE(info->eti_big_lmm,
+ info->eti_big_lmmsize);
+ info->eti_big_lmm = NULL;
+ info->eti_big_lmmsize = 0;
+ }
+
+ OBD_ALLOC_LARGE(info->eti_big_lmm, size);
+ if (info->eti_big_lmm == NULL)
+ return -ENOMEM;
+ info->eti_big_lmmsize = size;
+ }
+ LASSERT(info->eti_big_lmmsize >= rc);
+
+ info->eti_buf.lb_buf = info->eti_big_lmm;
+ info->eti_buf.lb_len = info->eti_big_lmmsize;
+ rc = mo_xattr_get(env, o, &info->eti_buf, XATTR_NAME_LOV);
+ if (rc < 0)
+ return rc;
+
+ ma->ma_valid |= MA_LOV;
+ ma->ma_lmm = info->eti_big_lmm;
+ ma->ma_lmm_size = rc;
+
+ return 0;
+}
+
+int echo_attr_get_complex(const struct lu_env *env, struct md_object *next,
+ struct md_attr *ma)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_buf *buf = &info->eti_buf;
+ umode_t mode = lu_object_attr(&next->mo_lu);
+ int need = ma->ma_need;
+ int rc = 0, rc2;
+
+ ma->ma_valid = 0;
+
+ if (need & MA_INODE) {
+ ma->ma_need = MA_INODE;
+ rc = mo_attr_get(env, next, ma);
+ if (rc)
+ GOTO(out, rc);
+ ma->ma_valid |= MA_INODE;
+ }
+
+ if (need & MA_LOV) {
+ if (S_ISREG(mode) || S_ISDIR(mode)) {
+ LASSERT(ma->ma_lmm_size > 0);
+ buf->lb_buf = ma->ma_lmm;
+ buf->lb_len = ma->ma_lmm_size;
+ rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_LOV);
+ if (rc2 > 0) {
+ ma->ma_lmm_size = rc2;
+ ma->ma_valid |= MA_LOV;
+ } else if (rc2 == -ENODATA) {
+ /* no LOV EA */
+ ma->ma_lmm_size = 0;
+ } else if (rc2 == -ERANGE) {
+ rc2 = echo_big_lmm_get(env, next, ma);
+ if (rc2 < 0)
+ GOTO(out, rc = rc2);
+ } else {
+ GOTO(out, rc = rc2);
+ }
+ }
+ }
+
+#ifdef CONFIG_FS_POSIX_ACL
+ if (need & MA_ACL_DEF && S_ISDIR(mode)) {
+ buf->lb_buf = ma->ma_acl;
+ buf->lb_len = ma->ma_acl_size;
+ rc2 = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_DEFAULT);
+ if (rc2 > 0) {
+ ma->ma_acl_size = rc2;
+ ma->ma_valid |= MA_ACL_DEF;
+ } else if (rc2 == -ENODATA) {
+ /* no ACLs */
+ ma->ma_acl_size = 0;
+ } else {
+ GOTO(out, rc = rc2);
+ }
+ }
+#endif
+out:
+ ma->ma_need = need;
+ CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
+ rc, ma->ma_valid, ma->ma_lmm);
+ return rc;
+}
+
+static int
+echo_md_create_internal(const struct lu_env *env, struct echo_device *ed,
+ struct md_object *parent, struct lu_fid *fid,
+ struct lu_name *lname, struct md_op_spec *spec,
+ struct md_attr *ma)
+{
+ struct lu_object *ec_child, *child;
+ struct lu_device *ld = ed->ed_next;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_fid *fid2 = &info->eti_fid2;
+ struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
+ int rc;
+
+ rc = mdo_lookup(env, parent, lname, fid2, spec);
+ if (rc == 0)
+ return -EEXIST;
+ else if (rc != -ENOENT)
+ return rc;
+
+ ec_child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev,
+ fid, &conf);
+ if (IS_ERR(ec_child)) {
+ CERROR("Can not find the child "DFID": rc = %ld\n", PFID(fid),
+ PTR_ERR(ec_child));
+ return PTR_ERR(ec_child);
+ }
+
+ child = lu_object_locate(ec_child->lo_header, ld->ld_type);
+ if (child == NULL) {
+ CERROR("Can not locate the child "DFID"\n", PFID(fid));
+ GOTO(out_put, rc = -EINVAL);
+ }
+
+ CDEBUG(D_RPCTRACE, "Start creating object "DFID" %s %p\n",
+ PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
+
+ /*
+ * Do not perform lookup sanity check. We know that name does not exist.
+ */
+ spec->sp_cr_lookup = 0;
+ rc = mdo_create(env, parent, lname, lu2md(child), spec, ma);
+ if (rc) {
+ CERROR("Can not create child "DFID": rc = %d\n", PFID(fid), rc);
+ GOTO(out_put, rc);
+ }
+ CDEBUG(D_RPCTRACE, "End creating object "DFID" %s %p rc = %d\n",
+ PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent, rc);
+out_put:
+ lu_object_put(env, ec_child);
+ return rc;
+}
+
+static int echo_set_lmm_size(const struct lu_env *env, struct lu_device *ld,
+ struct md_attr *ma)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+
+ if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
+ ma->ma_lmm = (void *)&info->eti_lmm;
+ ma->ma_lmm_size = sizeof(info->eti_lmm);
+ } else {
+ LASSERT(info->eti_big_lmmsize);
+ ma->ma_lmm = info->eti_big_lmm;
+ ma->ma_lmm_size = info->eti_big_lmmsize;
+ }
+
+ return 0;
+}
+
+static int echo_create_md_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ struct lu_fid *fid,
+ char *name, int namelen,
+ __u64 id, __u32 mode, int count,
+ int stripe_count, int stripe_offset)
+{
+ struct lu_object *parent;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ struct md_op_spec *spec = &info->eti_spec;
+ struct md_attr *ma = &info->eti_ma;
+ struct lu_device *ld = ed->ed_next;
+ int rc = 0;
+ int i;
+
+ if (ec_parent == NULL)
+ return -1;
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ return -ENXIO;
+
+ memset(ma, 0, sizeof(*ma));
+ memset(spec, 0, sizeof(*spec));
+ if (stripe_count != 0) {
+ spec->sp_cr_flags |= FMODE_WRITE;
+ echo_set_lmm_size(env, ld, ma);
+ if (stripe_count != -1) {
+ struct lov_user_md_v3 *lum = &info->eti_lum;
+
+ lum->lmm_magic = LOV_USER_MAGIC_V3;
+ lum->lmm_stripe_count = stripe_count;
+ lum->lmm_stripe_offset = stripe_offset;
+ lum->lmm_pattern = 0;
+ spec->u.sp_ea.eadata = lum;
+ spec->u.sp_ea.eadatalen = sizeof(*lum);
+ spec->sp_cr_flags |= MDS_OPEN_HAS_EA;
+ }
+ }
+
+ ma->ma_attr.la_mode = mode;
+ ma->ma_attr.la_valid = LA_CTIME | LA_MODE;
+ ma->ma_attr.la_ctime = cfs_time_current_64();
+
+ if (name != NULL) {
+ lname->ln_name = name;
+ lname->ln_namelen = namelen;
+ /* If name is specified, only create one object by name */
+ rc = echo_md_create_internal(env, ed, lu2md(parent), fid, lname,
+ spec, ma);
+ return rc;
+ }
+
+ /* Create multiple object sequenced by id */
+ for (i = 0; i < count; i++) {
+ char *tmp_name = info->eti_name;
+
+ echo_md_build_name(lname, tmp_name, id);
+
+ rc = echo_md_create_internal(env, ed, lu2md(parent), fid, lname,
+ spec, ma);
+ if (rc) {
+ CERROR("Can not create child %s: rc = %d\n", tmp_name,
+ rc);
+ break;
+ }
+ id++;
+ fid->f_oid++;
+ }
+
+ return rc;
+}
+
+static struct lu_object *echo_md_lookup(const struct lu_env *env,
+ struct echo_device *ed,
+ struct md_object *parent,
+ struct lu_name *lname)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_fid *fid = &info->eti_fid;
+ struct lu_object *child;
+ int rc;
+
+ CDEBUG(D_INFO, "lookup %s in parent "DFID" %p\n", lname->ln_name,
+ PFID(fid), parent);
+ rc = mdo_lookup(env, parent, lname, fid, NULL);
+ if (rc) {
+ CERROR("lookup %s: rc = %d\n", lname->ln_name, rc);
+ return ERR_PTR(rc);
+ }
+
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
+
+ return child;
+}
+
+static int echo_setattr_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ __u64 id, int count)
+{
+ struct lu_object *parent;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ char *name = info->eti_name;
+ struct lu_device *ld = ed->ed_next;
+ struct lu_buf *buf = &info->eti_buf;
+ int rc = 0;
+ int i;
+
+ if (ec_parent == NULL)
+ return -1;
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ return -ENXIO;
+
+ for (i = 0; i < count; i++) {
+ struct lu_object *ec_child, *child;
+
+ echo_md_build_name(lname, name, id);
+
+ ec_child = echo_md_lookup(env, ed, lu2md(parent), lname);
+ if (IS_ERR(ec_child)) {
+ CERROR("Can't find child %s: rc = %ld\n",
+ lname->ln_name, PTR_ERR(ec_child));
+ return PTR_ERR(ec_child);
+ }
+
+ child = lu_object_locate(ec_child->lo_header, ld->ld_type);
+ if (child == NULL) {
+ CERROR("Can not locate the child %s\n", lname->ln_name);
+ lu_object_put(env, ec_child);
+ rc = -EINVAL;
+ break;
+ }
+
+ CDEBUG(D_RPCTRACE, "Start setattr object "DFID"\n",
+ PFID(lu_object_fid(child)));
+
+ buf->lb_buf = info->eti_xattr_buf;
+ buf->lb_len = sizeof(info->eti_xattr_buf);
+
+ sprintf(name, "%s.test1", XATTR_USER_PREFIX);
+ rc = mo_xattr_set(env, lu2md(child), buf, name,
+ LU_XATTR_CREATE);
+ if (rc < 0) {
+ CERROR("Can not setattr child "DFID": rc = %d\n",
+ PFID(lu_object_fid(child)), rc);
+ lu_object_put(env, ec_child);
+ break;
+ }
+ CDEBUG(D_RPCTRACE, "End setattr object "DFID"\n",
+ PFID(lu_object_fid(child)));
+ id++;
+ lu_object_put(env, ec_child);
+ }
+ return rc;
+}
+
+static int echo_getattr_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ __u64 id, int count)
+{
+ struct lu_object *parent;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ char *name = info->eti_name;
+ struct md_attr *ma = &info->eti_ma;
+ struct lu_device *ld = ed->ed_next;
+ int rc = 0;
+ int i;
+
+ if (ec_parent == NULL)
+ return -1;
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ return -ENXIO;
+
+ memset(ma, 0, sizeof(*ma));
+ ma->ma_need |= MA_INODE | MA_LOV | MA_PFID | MA_HSM | MA_ACL_DEF;
+ ma->ma_acl = info->eti_xattr_buf;
+ ma->ma_acl_size = sizeof(info->eti_xattr_buf);
+
+ for (i = 0; i < count; i++) {
+ struct lu_object *ec_child, *child;
+
+ ma->ma_valid = 0;
+ echo_md_build_name(lname, name, id);
+ echo_set_lmm_size(env, ld, ma);
+
+ ec_child = echo_md_lookup(env, ed, lu2md(parent), lname);
+ if (IS_ERR(ec_child)) {
+ CERROR("Can't find child %s: rc = %ld\n",
+ lname->ln_name, PTR_ERR(ec_child));
+ return PTR_ERR(ec_child);
+ }
+
+ child = lu_object_locate(ec_child->lo_header, ld->ld_type);
+ if (child == NULL) {
+ CERROR("Can not locate the child %s\n", lname->ln_name);
+ lu_object_put(env, ec_child);
+ return -EINVAL;
+ }
+
+ CDEBUG(D_RPCTRACE, "Start getattr object "DFID"\n",
+ PFID(lu_object_fid(child)));
+ rc = echo_attr_get_complex(env, lu2md(child), ma);
+ if (rc) {
+ CERROR("Can not getattr child "DFID": rc = %d\n",
+ PFID(lu_object_fid(child)), rc);
+ lu_object_put(env, ec_child);
+ break;
+ }
+ CDEBUG(D_RPCTRACE, "End getattr object "DFID"\n",
+ PFID(lu_object_fid(child)));
+ id++;
+ lu_object_put(env, ec_child);
+ }
+
+ return rc;
+}
+
+static int echo_lookup_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ __u64 id, int count)
+{
+ struct lu_object *parent;
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ char *name = info->eti_name;
+ struct lu_fid *fid = &info->eti_fid;
+ struct lu_device *ld = ed->ed_next;
+ int rc = 0;
+ int i;
+
+ if (ec_parent == NULL)
+ return -1;
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ return -ENXIO;
+
+ /*prepare the requests*/
+ for (i = 0; i < count; i++) {
+ echo_md_build_name(lname, name, id);
+
+ CDEBUG(D_RPCTRACE, "Start lookup object "DFID" %s %p\n",
+ PFID(lu_object_fid(parent)), lname->ln_name, parent);
+
+ rc = mdo_lookup(env, lu2md(parent), lname, fid, NULL);
+ if (rc) {
+ CERROR("Can not lookup child %s: rc = %d\n", name, rc);
+ break;
+ }
+ CDEBUG(D_RPCTRACE, "End lookup object "DFID" %s %p\n",
+ PFID(lu_object_fid(parent)), lname->ln_name, parent);
+
+ id++;
+ }
+ return rc;
+}
+
+static int echo_md_destroy_internal(const struct lu_env *env,
+ struct echo_device *ed,
+ struct md_object *parent,
+ struct lu_name *lname,
+ struct md_attr *ma)
+{
+ struct lu_device *ld = ed->ed_next;
+ struct lu_object *ec_child;
+ struct lu_object *child;
+ int rc;
+
+ ec_child = echo_md_lookup(env, ed, parent, lname);
+ if (IS_ERR(ec_child)) {
+ CERROR("Can't find child %s: rc = %ld\n", lname->ln_name,
+ PTR_ERR(ec_child));
+ return PTR_ERR(ec_child);
+ }
+
+ child = lu_object_locate(ec_child->lo_header, ld->ld_type);
+ if (child == NULL) {
+ CERROR("Can not locate the child %s\n", lname->ln_name);
+ GOTO(out_put, rc = -EINVAL);
+ }
+
+ CDEBUG(D_RPCTRACE, "Start destroy object "DFID" %s %p\n",
+ PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
+
+ rc = mdo_unlink(env, parent, lu2md(child), lname, ma, 0);
+ if (rc) {
+ CERROR("Can not unlink child %s: rc = %d\n",
+ lname->ln_name, rc);
+ GOTO(out_put, rc);
+ }
+ CDEBUG(D_RPCTRACE, "End destroy object "DFID" %s %p\n",
+ PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent);
+out_put:
+ lu_object_put(env, ec_child);
+ return rc;
+}
+
+static int echo_destroy_object(const struct lu_env *env,
+ struct echo_device *ed,
+ struct lu_object *ec_parent,
+ char *name, int namelen,
+ __u64 id, __u32 mode,
+ int count)
+{
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_name *lname = &info->eti_lname;
+ struct md_attr *ma = &info->eti_ma;
+ struct lu_device *ld = ed->ed_next;
+ struct lu_object *parent;
+ int rc = 0;
+ int i;
+
+ parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
+ if (parent == NULL)
+ return -EINVAL;
+
+ memset(ma, 0, sizeof(*ma));
+ ma->ma_attr.la_mode = mode;
+ ma->ma_attr.la_valid = LA_CTIME;
+ ma->ma_attr.la_ctime = cfs_time_current_64();
+ ma->ma_need = MA_INODE;
+ ma->ma_valid = 0;
+
+ if (name != NULL) {
+ lname->ln_name = name;
+ lname->ln_namelen = namelen;
+ rc = echo_md_destroy_internal(env, ed, lu2md(parent), lname,
+ ma);
+ return rc;
+ }
+
+ /*prepare the requests*/
+ for (i = 0; i < count; i++) {
+ char *tmp_name = info->eti_name;
+
+ ma->ma_valid = 0;
+ echo_md_build_name(lname, tmp_name, id);
+
+ rc = echo_md_destroy_internal(env, ed, lu2md(parent), lname,
+ ma);
+ if (rc) {
+ CERROR("Can not unlink child %s: rc = %d\n", name, rc);
+ break;
+ }
+ id++;
+ }
+
+ return rc;
+}
+
+static struct lu_object *echo_resolve_path(const struct lu_env *env,
+ struct echo_device *ed, char *path,
+ int path_len)
+{
+ struct lu_device *ld = ed->ed_next;
+ struct md_device *md = lu2md_dev(ld);
+ struct echo_thread_info *info = echo_env_info(env);
+ struct lu_fid *fid = &info->eti_fid;
+ struct lu_name *lname = &info->eti_lname;
+ struct lu_object *parent = NULL;
+ struct lu_object *child = NULL;
+ int rc = 0;
+
+ /*Only support MDD layer right now*/
+ rc = md->md_ops->mdo_root_get(env, md, fid);
+ if (rc) {
+ CERROR("get root error: rc = %d\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ parent = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
+ if (IS_ERR(parent)) {
+ CERROR("Can not find the parent "DFID": rc = %ld\n",
+ PFID(fid), PTR_ERR(parent));
+ return parent;
+ }
+
+ while (1) {
+ struct lu_object *ld_parent;
+ char *e;
+
+ e = strsep(&path, "/");
+ if (e == NULL)
+ break;
+
+ if (e[0] == 0) {
+ if (!path || path[0] == '\0')
+ break;
+ continue;
+ }
+
+ lname->ln_name = e;
+ lname->ln_namelen = strlen(e);
+
+ ld_parent = lu_object_locate(parent->lo_header, ld->ld_type);
+ if (ld_parent == NULL) {
+ lu_object_put(env, parent);
+ rc = -EINVAL;
+ break;
+ }
+
+ child = echo_md_lookup(env, ed, lu2md(ld_parent), lname);
+ lu_object_put(env, parent);
+ if (IS_ERR(child)) {
+ rc = (int)PTR_ERR(child);
+ CERROR("lookup %s under parent "DFID": rc = %d\n",
+ lname->ln_name, PFID(lu_object_fid(ld_parent)),
+ rc);
+ break;
+ }
+ parent = child;
+ }
+ if (rc)
+ return ERR_PTR(rc);
+
+ return parent;
+}
+
+static void echo_ucred_init(struct lu_env *env)
+{
+ struct lu_ucred *ucred = lu_ucred(env);
+
+ ucred->uc_valid = UCRED_INVALID;
+
+ ucred->uc_suppgids[0] = -1;
+ ucred->uc_suppgids[1] = -1;
+
+ ucred->uc_uid = ucred->uc_o_uid =
+ from_kuid(&init_user_ns, current_uid());
+ ucred->uc_gid = ucred->uc_o_gid =
+ from_kgid(&init_user_ns, current_gid());
+ ucred->uc_fsuid = ucred->uc_o_fsuid =
+ from_kuid(&init_user_ns, current_fsuid());
+ ucred->uc_fsgid = ucred->uc_o_fsgid =
+ from_kgid(&init_user_ns, current_fsgid());
+ ucred->uc_cap = cfs_curproc_cap_pack();
+
+ /* remove fs privilege for non-root user. */
+ if (ucred->uc_fsuid)
+ ucred->uc_cap &= ~CFS_CAP_FS_MASK;
+ ucred->uc_valid = UCRED_NEW;
+}
+
+static void echo_ucred_fini(struct lu_env *env)
+{
+ struct lu_ucred *ucred = lu_ucred(env);
+ ucred->uc_valid = UCRED_INIT;
+}
+
+#define ECHO_MD_CTX_TAG (LCT_REMEMBER | LCT_MD_THREAD)
+#define ECHO_MD_SES_TAG (LCT_REMEMBER | LCT_SESSION)
+static int echo_md_handler(struct echo_device *ed, int command,
+ char *path, int path_len, __u64 id, int count,
+ struct obd_ioctl_data *data)
+{
+ struct echo_thread_info *info;
+ struct lu_device *ld = ed->ed_next;
+ struct lu_env *env;
+ int refcheck;
+ struct lu_object *parent;
+ char *name = NULL;
+ int namelen = data->ioc_plen2;
+ int rc = 0;
+
+ if (ld == NULL) {
+ CERROR("MD echo client is not being initialized properly\n");
+ return -EINVAL;
+ }
+
+ if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
+ CERROR("Only support MDD layer right now!\n");
+ return -EINVAL;
+ }
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ rc = lu_env_refill_by_tags(env, ECHO_MD_CTX_TAG, ECHO_MD_SES_TAG);
+ if (rc != 0)
+ GOTO(out_env, rc);
+
+ /* init big_lmm buffer */
+ info = echo_env_info(env);
+ LASSERT(info->eti_big_lmm == NULL);
+ OBD_ALLOC_LARGE(info->eti_big_lmm, MIN_MD_SIZE);
+ if (info->eti_big_lmm == NULL)
+ GOTO(out_env, rc = -ENOMEM);
+ info->eti_big_lmmsize = MIN_MD_SIZE;
+
+ parent = echo_resolve_path(env, ed, path, path_len);
+ if (IS_ERR(parent)) {
+ CERROR("Can not resolve the path %s: rc = %ld\n", path,
+ PTR_ERR(parent));
+ GOTO(out_free, rc = PTR_ERR(parent));
+ }
+
+ if (namelen > 0) {
+ OBD_ALLOC(name, namelen + 1);
+ if (name == NULL)
+ GOTO(out_put, rc = -ENOMEM);
+ if (copy_from_user(name, data->ioc_pbuf2, namelen))
+ GOTO(out_name, rc = -EFAULT);
+ }
+
+ echo_ucred_init(env);
+
+ switch (command) {
+ case ECHO_MD_CREATE:
+ case ECHO_MD_MKDIR: {
+ struct echo_thread_info *info = echo_env_info(env);
+ __u32 mode = data->ioc_obdo2.o_mode;
+ struct lu_fid *fid = &info->eti_fid;
+ int stripe_count = (int)data->ioc_obdo2.o_misc;
+ int stripe_index = (int)data->ioc_obdo2.o_stripe_idx;
+
+ rc = ostid_to_fid(fid, &data->ioc_obdo1.o_oi, 0);
+ if (rc != 0)
+ break;
+
+ /* In the function below, .hs_keycmp resolves to
+ * lu_obj_hop_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ rc = echo_create_md_object(env, ed, parent, fid, name, namelen,
+ id, mode, count, stripe_count,
+ stripe_index);
+ break;
+ }
+ case ECHO_MD_DESTROY:
+ case ECHO_MD_RMDIR: {
+ __u32 mode = data->ioc_obdo2.o_mode;
+
+ rc = echo_destroy_object(env, ed, parent, name, namelen,
+ id, mode, count);
+ break;
+ }
+ case ECHO_MD_LOOKUP:
+ rc = echo_lookup_object(env, ed, parent, id, count);
+ break;
+ case ECHO_MD_GETATTR:
+ rc = echo_getattr_object(env, ed, parent, id, count);
+ break;
+ case ECHO_MD_SETATTR:
+ rc = echo_setattr_object(env, ed, parent, id, count);
+ break;
+ default:
+ CERROR("unknown command %d\n", command);
+ rc = -EINVAL;
+ break;
+ }
+ echo_ucred_fini(env);
+
+out_name:
+ if (name != NULL)
+ OBD_FREE(name, namelen + 1);
+out_put:
+ lu_object_put(env, parent);
+out_free:
+ LASSERT(info->eti_big_lmm);
+ OBD_FREE_LARGE(info->eti_big_lmm, info->eti_big_lmmsize);
+ info->eti_big_lmm = NULL;
+ info->eti_big_lmmsize = 0;
+out_env:
+ cl_env_put(env, &refcheck);
+ return rc;
+}
+
+static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
+ int on_target, struct obdo *oa, void *ulsm,
+ int ulsm_nob, struct obd_trans_info *oti)
+{
+ struct echo_object *eco;
+ struct echo_client_obd *ec = ed->ed_ec;
+ struct lov_stripe_md *lsm = NULL;
+ int rc;
+ int created = 0;
+
+ if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */
+ (on_target || /* set_stripe */
+ ec->ec_nstripes != 0)) { /* LOV */
+ CERROR ("No valid oid\n");
+ return -EINVAL;
+ }
+
+ rc = echo_alloc_memmd(ed, &lsm);
+ if (rc < 0) {
+ CERROR("Cannot allocate md: rc = %d\n", rc);
+ GOTO(failed, rc);
+ }
+
+ if (ulsm != NULL) {
+ int i, idx;
+
+ rc = echo_copyin_lsm (ed, lsm, ulsm, ulsm_nob);
+ if (rc != 0)
+ GOTO(failed, rc);
+
+ if (lsm->lsm_stripe_count == 0)
+ lsm->lsm_stripe_count = ec->ec_nstripes;
+
+ if (lsm->lsm_stripe_size == 0)
+ lsm->lsm_stripe_size = PAGE_CACHE_SIZE;
+
+ idx = cfs_rand();
+
+ /* setup stripes: indices + default ids if required */
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) == 0)
+ lsm->lsm_oinfo[i]->loi_oi = lsm->lsm_oi;
+
+ lsm->lsm_oinfo[i]->loi_ost_idx =
+ (idx + i) % ec->ec_nstripes;
+ }
+ }
+
+ /* setup object ID here for !on_target and LOV hint */
+ if (oa->o_valid & OBD_MD_FLID) {
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
+ lsm->lsm_oi = oa->o_oi;
+ }
+
+ if (ostid_id(&lsm->lsm_oi) == 0)
+ ostid_set_id(&lsm->lsm_oi, ++last_object_id);
+
+ rc = 0;
+ if (on_target) {
+ /* Only echo objects are allowed to be created */
+ LASSERT((oa->o_valid & OBD_MD_FLGROUP) &&
+ (ostid_seq(&oa->o_oi) == FID_SEQ_ECHO));
+ rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
+ if (rc != 0) {
+ CERROR("Cannot create objects: rc = %d\n", rc);
+ GOTO(failed, rc);
+ }
+ created = 1;
+ }
+
+ /* See what object ID we were given */
+ oa->o_oi = lsm->lsm_oi;
+ oa->o_valid |= OBD_MD_FLID;
+
+ eco = cl_echo_object_find(ed, &lsm);
+ if (IS_ERR(eco))
+ GOTO(failed, rc = PTR_ERR(eco));
+ cl_echo_object_put(eco);
+
+ CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
+
+ failed:
+ if (created && rc)
+ obd_destroy(env, ec->ec_exp, oa, lsm, oti, NULL, NULL);
+ if (lsm)
+ echo_free_memmd(ed, &lsm);
+ if (rc)
+ CERROR("create object failed with: rc = %d\n", rc);
+ return (rc);
+}
+
+static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
+ struct obdo *oa)
+{
+ struct lov_stripe_md *lsm = NULL;
+ struct echo_object *eco;
+ int rc;
+
+ if ((oa->o_valid & OBD_MD_FLID) == 0 || ostid_id(&oa->o_oi) == 0) {
+ /* disallow use of object id 0 */
+ CERROR ("No valid oid\n");
+ return -EINVAL;
+ }
+
+ rc = echo_alloc_memmd(ed, &lsm);
+ if (rc < 0)
+ return rc;
+
+ lsm->lsm_oi = oa->o_oi;
+ if (!(oa->o_valid & OBD_MD_FLGROUP))
+ ostid_set_seq_echo(&lsm->lsm_oi);
+
+ rc = 0;
+ eco = cl_echo_object_find(ed, &lsm);
+ if (!IS_ERR(eco))
+ *ecop = eco;
+ else
+ rc = PTR_ERR(eco);
+ if (lsm)
+ echo_free_memmd(ed, &lsm);
+ return rc;
+}
+
+static void echo_put_object(struct echo_object *eco)
+{
+ if (cl_echo_object_put(eco))
+ CERROR("echo client: drop an object failed");
+}
+
+static void
+echo_get_stripe_off_id (struct lov_stripe_md *lsm, obd_off *offp, obd_id *idp)
+{
+ unsigned long stripe_count;
+ unsigned long stripe_size;
+ unsigned long width;
+ unsigned long woffset;
+ int stripe_index;
+ obd_off offset;
+
+ if (lsm->lsm_stripe_count <= 1)
+ return;
+
+ offset = *offp;
+ stripe_size = lsm->lsm_stripe_size;
+ stripe_count = lsm->lsm_stripe_count;
+
+ /* width = # bytes in all stripes */
+ width = stripe_size * stripe_count;
+
+ /* woffset = offset within a width; offset = whole number of widths */
+ woffset = do_div (offset, width);
+
+ stripe_index = woffset / stripe_size;
+
+ *idp = ostid_id(&lsm->lsm_oinfo[stripe_index]->loi_oi);
+ *offp = offset * stripe_size + woffset % stripe_size;
+}
+
+static void
+echo_client_page_debug_setup(struct lov_stripe_md *lsm,
+ struct page *page, int rw, obd_id id,
+ obd_off offset, obd_off count)
+{
+ char *addr;
+ obd_off stripe_off;
+ obd_id stripe_id;
+ int delta;
+
+ /* no partial pages on the client */
+ LASSERT(count == PAGE_CACHE_SIZE);
+
+ addr = kmap(page);
+
+ for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ if (rw == OBD_BRW_WRITE) {
+ stripe_off = offset + delta;
+ stripe_id = id;
+ echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id);
+ } else {
+ stripe_off = 0xdeadbeef00c0ffeeULL;
+ stripe_id = 0xdeadbeef00c0ffeeULL;
+ }
+ block_debug_setup(addr + delta, OBD_ECHO_BLOCK_SIZE,
+ stripe_off, stripe_id);
+ }
+
+ kunmap(page);
+}
+
+static int echo_client_page_debug_check(struct lov_stripe_md *lsm,
+ struct page *page, obd_id id,
+ obd_off offset, obd_off count)
+{
+ obd_off stripe_off;
+ obd_id stripe_id;
+ char *addr;
+ int delta;
+ int rc;
+ int rc2;
+
+ /* no partial pages on the client */
+ LASSERT(count == PAGE_CACHE_SIZE);
+
+ addr = kmap(page);
+
+ for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ stripe_off = offset + delta;
+ stripe_id = id;
+ echo_get_stripe_off_id (lsm, &stripe_off, &stripe_id);
+
+ rc2 = block_debug_check("test_brw",
+ addr + delta, OBD_ECHO_BLOCK_SIZE,
+ stripe_off, stripe_id);
+ if (rc2 != 0) {
+ CERROR ("Error in echo object "LPX64"\n", id);
+ rc = rc2;
+ }
+ }
+
+ kunmap(page);
+ return rc;
+}
+
+static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
+ struct echo_object *eco, obd_off offset,
+ obd_size count, int async,
+ struct obd_trans_info *oti)
+{
+ struct lov_stripe_md *lsm = eco->eo_lsm;
+ obd_count npages;
+ struct brw_page *pga;
+ struct brw_page *pgp;
+ struct page **pages;
+ obd_off off;
+ int i;
+ int rc;
+ int verify;
+ int gfp_mask;
+ int brw_flags = 0;
+
+ verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
+ (oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
+ (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0);
+
+ gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_IOFS : GFP_HIGHUSER;
+
+ LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
+ LASSERT(lsm != NULL);
+ LASSERT(ostid_id(&lsm->lsm_oi) == ostid_id(&oa->o_oi));
+
+ if (count <= 0 ||
+ (count & (~CFS_PAGE_MASK)) != 0)
+ return -EINVAL;
+
+ /* XXX think again with misaligned I/O */
+ npages = count >> PAGE_CACHE_SHIFT;
+
+ if (rw == OBD_BRW_WRITE)
+ brw_flags = OBD_BRW_ASYNC;
+
+ OBD_ALLOC(pga, npages * sizeof(*pga));
+ if (pga == NULL)
+ return -ENOMEM;
+
+ OBD_ALLOC(pages, npages * sizeof(*pages));
+ if (pages == NULL) {
+ OBD_FREE(pga, npages * sizeof(*pga));
+ return -ENOMEM;
+ }
+
+ for (i = 0, pgp = pga, off = offset;
+ i < npages;
+ i++, pgp++, off += PAGE_CACHE_SIZE) {
+
+ LASSERT (pgp->pg == NULL); /* for cleanup */
+
+ rc = -ENOMEM;
+ OBD_PAGE_ALLOC(pgp->pg, gfp_mask);
+ if (pgp->pg == NULL)
+ goto out;
+
+ pages[i] = pgp->pg;
+ pgp->count = PAGE_CACHE_SIZE;
+ pgp->off = off;
+ pgp->flag = brw_flags;
+
+ if (verify)
+ echo_client_page_debug_setup(lsm, pgp->pg, rw,
+ ostid_id(&oa->o_oi), off,
+ pgp->count);
+ }
+
+ /* brw mode can only be used at client */
+ LASSERT(ed->ed_next != NULL);
+ rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
+
+ out:
+ if (rc != 0 || rw != OBD_BRW_READ)
+ verify = 0;
+
+ for (i = 0, pgp = pga; i < npages; i++, pgp++) {
+ if (pgp->pg == NULL)
+ continue;
+
+ if (verify) {
+ int vrc;
+ vrc = echo_client_page_debug_check(lsm, pgp->pg,
+ ostid_id(&oa->o_oi),
+ pgp->off, pgp->count);
+ if (vrc != 0 && rc == 0)
+ rc = vrc;
+ }
+ OBD_PAGE_FREE(pgp->pg);
+ }
+ OBD_FREE(pga, npages * sizeof(*pga));
+ OBD_FREE(pages, npages * sizeof(*pages));
+ return rc;
+}
+
+static int echo_client_prep_commit(const struct lu_env *env,
+ struct obd_export *exp, int rw,
+ struct obdo *oa, struct echo_object *eco,
+ obd_off offset, obd_size count,
+ obd_size batch, struct obd_trans_info *oti,
+ int async)
+{
+ struct lov_stripe_md *lsm = eco->eo_lsm;
+ struct obd_ioobj ioo;
+ struct niobuf_local *lnb;
+ struct niobuf_remote *rnb;
+ obd_off off;
+ obd_size npages, tot_pages;
+ int i, ret = 0, brw_flags = 0;
+
+ if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
+ (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
+ return -EINVAL;
+
+ npages = batch >> PAGE_CACHE_SHIFT;
+ tot_pages = count >> PAGE_CACHE_SHIFT;
+
+ OBD_ALLOC(lnb, npages * sizeof(struct niobuf_local));
+ OBD_ALLOC(rnb, npages * sizeof(struct niobuf_remote));
+
+ if (lnb == NULL || rnb == NULL)
+ GOTO(out, ret = -ENOMEM);
+
+ if (rw == OBD_BRW_WRITE && async)
+ brw_flags |= OBD_BRW_ASYNC;
+
+ obdo_to_ioobj(oa, &ioo);
+
+ off = offset;
+
+ for(; tot_pages; tot_pages -= npages) {
+ int lpages;
+
+ if (tot_pages < npages)
+ npages = tot_pages;
+
+ for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
+ rnb[i].offset = off;
+ rnb[i].len = PAGE_CACHE_SIZE;
+ rnb[i].flags = brw_flags;
+ }
+
+ ioo.ioo_bufcnt = npages;
+ oti->oti_transno = 0;
+
+ lpages = npages;
+ ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
+ lnb, oti, NULL);
+ if (ret != 0)
+ GOTO(out, ret);
+ LASSERT(lpages == npages);
+
+ for (i = 0; i < lpages; i++) {
+ struct page *page = lnb[i].page;
+
+ /* read past eof? */
+ if (page == NULL && lnb[i].rc == 0)
+ continue;
+
+ if (async)
+ lnb[i].flags |= OBD_BRW_ASYNC;
+
+ if (ostid_id(&oa->o_oi) == ECHO_PERSISTENT_OBJID ||
+ (oa->o_valid & OBD_MD_FLFLAGS) == 0 ||
+ (oa->o_flags & OBD_FL_DEBUG_CHECK) == 0)
+ continue;
+
+ if (rw == OBD_BRW_WRITE)
+ echo_client_page_debug_setup(lsm, page, rw,
+ ostid_id(&oa->o_oi),
+ rnb[i].offset,
+ rnb[i].len);
+ else
+ echo_client_page_debug_check(lsm, page,
+ ostid_id(&oa->o_oi),
+ rnb[i].offset,
+ rnb[i].len);
+ }
+
+ ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
+ rnb, npages, lnb, oti, ret);
+ if (ret != 0)
+ GOTO(out, ret);
+
+ /* Reset oti otherwise it would confuse ldiskfs. */
+ memset(oti, 0, sizeof(*oti));
+
+ /* Reuse env context. */
+ lu_context_exit((struct lu_context *)&env->le_ctx);
+ lu_context_enter((struct lu_context *)&env->le_ctx);
+ }
+
+out:
+ if (lnb)
+ OBD_FREE(lnb, npages * sizeof(struct niobuf_local));
+ if (rnb)
+ OBD_FREE(rnb, npages * sizeof(struct niobuf_remote));
+ return ret;
+}
+
+static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
+ struct obd_export *exp,
+ struct obd_ioctl_data *data,
+ struct obd_trans_info *dummy_oti)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct echo_device *ed = obd2echo_dev(obd);
+ struct echo_client_obd *ec = ed->ed_ec;
+ struct obdo *oa = &data->ioc_obdo1;
+ struct echo_object *eco;
+ int rc;
+ int async = 1;
+ long test_mode;
+
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
+
+ rc = echo_get_object(&eco, ed, oa);
+ if (rc)
+ return rc;
+
+ oa->o_valid &= ~OBD_MD_FLHANDLE;
+
+ /* OFD/obdfilter works only via prep/commit */
+ test_mode = (long)data->ioc_pbuf1;
+ if (test_mode == 1)
+ async = 0;
+
+ if (ed->ed_next == NULL && test_mode != 3) {
+ test_mode = 3;
+ data->ioc_plen1 = data->ioc_count;
+ }
+
+ /* Truncate batch size to maximum */
+ if (data->ioc_plen1 > PTLRPC_MAX_BRW_SIZE)
+ data->ioc_plen1 = PTLRPC_MAX_BRW_SIZE;
+
+ switch (test_mode) {
+ case 1:
+ /* fall through */
+ case 2:
+ rc = echo_client_kbrw(ed, rw, oa,
+ eco, data->ioc_offset,
+ data->ioc_count, async, dummy_oti);
+ break;
+ case 3:
+ rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa,
+ eco, data->ioc_offset,
+ data->ioc_count, data->ioc_plen1,
+ dummy_oti, async);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ echo_put_object(eco);
+ return rc;
+}
+
+static int
+echo_client_enqueue(struct obd_export *exp, struct obdo *oa,
+ int mode, obd_off offset, obd_size nob)
+{
+ struct echo_device *ed = obd2echo_dev(exp->exp_obd);
+ struct lustre_handle *ulh = &oa->o_handle;
+ struct echo_object *eco;
+ obd_off end;
+ int rc;
+
+ if (ed->ed_next == NULL)
+ return -EOPNOTSUPP;
+
+ if (!(mode == LCK_PR || mode == LCK_PW))
+ return -EINVAL;
+
+ if ((offset & (~CFS_PAGE_MASK)) != 0 ||
+ (nob & (~CFS_PAGE_MASK)) != 0)
+ return -EINVAL;
+
+ rc = echo_get_object (&eco, ed, oa);
+ if (rc != 0)
+ return rc;
+
+ end = (nob == 0) ? ((obd_off) -1) : (offset + nob - 1);
+ rc = cl_echo_enqueue(eco, offset, end, mode, &ulh->cookie);
+ if (rc == 0) {
+ oa->o_valid |= OBD_MD_FLHANDLE;
+ CDEBUG(D_INFO, "Cookie is "LPX64"\n", ulh->cookie);
+ }
+ echo_put_object(eco);
+ return rc;
+}
+
+static int
+echo_client_cancel(struct obd_export *exp, struct obdo *oa)
+{
+ struct echo_device *ed = obd2echo_dev(exp->exp_obd);
+ __u64 cookie = oa->o_handle.cookie;
+
+ if ((oa->o_valid & OBD_MD_FLHANDLE) == 0)
+ return -EINVAL;
+
+ CDEBUG(D_INFO, "Cookie is "LPX64"\n", cookie);
+ return cl_echo_cancel(ed, cookie);
+}
+
+static int
+echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
+ void *karg, void *uarg)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct echo_device *ed = obd2echo_dev(obd);
+ struct echo_client_obd *ec = ed->ed_ec;
+ struct echo_object *eco;
+ struct obd_ioctl_data *data = karg;
+ struct obd_trans_info dummy_oti;
+ struct lu_env *env;
+ struct oti_req_ack_lock *ack_lock;
+ struct obdo *oa;
+ struct lu_fid fid;
+ int rw = OBD_BRW_READ;
+ int rc = 0;
+ int i;
+
+ memset(&dummy_oti, 0, sizeof(dummy_oti));
+
+ oa = &data->ioc_obdo1;
+ if (!(oa->o_valid & OBD_MD_FLGROUP)) {
+ oa->o_valid |= OBD_MD_FLGROUP;
+ ostid_set_seq_echo(&oa->o_oi);
+ }
+
+ /* This FID is unpacked just for validation at this point */
+ rc = ostid_to_fid(&fid, &oa->o_oi, 0);
+ if (rc < 0)
+ return rc;
+
+ OBD_ALLOC_PTR(env);
+ if (env == NULL)
+ return -ENOMEM;
+
+ rc = lu_env_init(env, LCT_DT_THREAD);
+ if (rc)
+ GOTO(out, rc = -ENOMEM);
+
+ switch (cmd) {
+ case OBD_IOC_CREATE: /* may create echo object */
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO (out, rc = -EPERM);
+
+ rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
+ data->ioc_plen1, &dummy_oti);
+ GOTO(out, rc);
+
+ case OBD_IOC_ECHO_MD: {
+ int count;
+ int cmd;
+ char *dir = NULL;
+ int dirlen;
+ __u64 id;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO(out, rc = -EPERM);
+
+ count = data->ioc_count;
+ cmd = data->ioc_command;
+
+ id = ostid_id(&data->ioc_obdo2.o_oi);
+
+ dirlen = data->ioc_plen1;
+ OBD_ALLOC(dir, dirlen + 1);
+ if (dir == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ if (copy_from_user(dir, data->ioc_pbuf1, dirlen)) {
+ OBD_FREE(dir, data->ioc_plen1 + 1);
+ GOTO(out, rc = -EFAULT);
+ }
+
+ rc = echo_md_handler(ed, cmd, dir, dirlen, id, count, data);
+ OBD_FREE(dir, dirlen + 1);
+ GOTO(out, rc);
+ }
+ case OBD_IOC_ECHO_ALLOC_SEQ: {
+ struct lu_env *cl_env;
+ int refcheck;
+ __u64 seq;
+ int max_count;
+
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO(out, rc = -EPERM);
+
+ cl_env = cl_env_get(&refcheck);
+ if (IS_ERR(cl_env))
+ GOTO(out, rc = PTR_ERR(cl_env));
+
+ rc = lu_env_refill_by_tags(cl_env, ECHO_MD_CTX_TAG,
+ ECHO_MD_SES_TAG);
+ if (rc != 0) {
+ cl_env_put(cl_env, &refcheck);
+ GOTO(out, rc);
+ }
+
+ rc = seq_client_get_seq(cl_env, ed->ed_cl_seq, &seq);
+ cl_env_put(cl_env, &refcheck);
+ if (rc < 0) {
+ CERROR("%s: Can not alloc seq: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out, rc);
+ }
+
+ if (copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1))
+ return -EFAULT;
+
+ max_count = LUSTRE_METADATA_SEQ_MAX_WIDTH;
+ if (copy_to_user(data->ioc_pbuf2, &max_count,
+ data->ioc_plen2))
+ return -EFAULT;
+ GOTO(out, rc);
+ }
+ case OBD_IOC_DESTROY:
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO (out, rc = -EPERM);
+
+ rc = echo_get_object(&eco, ed, oa);
+ if (rc == 0) {
+ rc = obd_destroy(env, ec->ec_exp, oa, eco->eo_lsm,
+ &dummy_oti, NULL, NULL);
+ if (rc == 0)
+ eco->eo_deleted = 1;
+ echo_put_object(eco);
+ }
+ GOTO(out, rc);
+
+ case OBD_IOC_GETATTR:
+ rc = echo_get_object(&eco, ed, oa);
+ if (rc == 0) {
+ struct obd_info oinfo = { { { 0 } } };
+ oinfo.oi_md = eco->eo_lsm;
+ oinfo.oi_oa = oa;
+ rc = obd_getattr(env, ec->ec_exp, &oinfo);
+ echo_put_object(eco);
+ }
+ GOTO(out, rc);
+
+ case OBD_IOC_SETATTR:
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO (out, rc = -EPERM);
+
+ rc = echo_get_object(&eco, ed, oa);
+ if (rc == 0) {
+ struct obd_info oinfo = { { { 0 } } };
+ oinfo.oi_oa = oa;
+ oinfo.oi_md = eco->eo_lsm;
+
+ rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
+ echo_put_object(eco);
+ }
+ GOTO(out, rc);
+
+ case OBD_IOC_BRW_WRITE:
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO (out, rc = -EPERM);
+
+ rw = OBD_BRW_WRITE;
+ /* fall through */
+ case OBD_IOC_BRW_READ:
+ rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
+ GOTO(out, rc);
+
+ case ECHO_IOC_GET_STRIPE:
+ rc = echo_get_object(&eco, ed, oa);
+ if (rc == 0) {
+ rc = echo_copyout_lsm(eco->eo_lsm, data->ioc_pbuf1,
+ data->ioc_plen1);
+ echo_put_object(eco);
+ }
+ GOTO(out, rc);
+
+ case ECHO_IOC_SET_STRIPE:
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO (out, rc = -EPERM);
+
+ if (data->ioc_pbuf1 == NULL) { /* unset */
+ rc = echo_get_object(&eco, ed, oa);
+ if (rc == 0) {
+ eco->eo_deleted = 1;
+ echo_put_object(eco);
+ }
+ } else {
+ rc = echo_create_object(env, ed, 0, oa,
+ data->ioc_pbuf1,
+ data->ioc_plen1, &dummy_oti);
+ }
+ GOTO (out, rc);
+
+ case ECHO_IOC_ENQUEUE:
+ if (!cfs_capable(CFS_CAP_SYS_ADMIN))
+ GOTO (out, rc = -EPERM);
+
+ rc = echo_client_enqueue(exp, oa,
+ data->ioc_conn1, /* lock mode */
+ data->ioc_offset,
+ data->ioc_count);/*extent*/
+ GOTO (out, rc);
+
+ case ECHO_IOC_CANCEL:
+ rc = echo_client_cancel(exp, oa);
+ GOTO (out, rc);
+
+ default:
+ CERROR ("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
+ GOTO (out, rc = -ENOTTY);
+ }
+
+out:
+ lu_env_fini(env);
+ OBD_FREE_PTR(env);
+
+ /* XXX this should be in a helper also called by target_send_reply */
+ for (ack_lock = dummy_oti.oti_ack_locks, i = 0; i < 4;
+ i++, ack_lock++) {
+ if (!ack_lock->mode)
+ break;
+ ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
+ }
+
+ return rc;
+}
+
+static int echo_client_setup(const struct lu_env *env,
+ struct obd_device *obddev, struct lustre_cfg *lcfg)
+{
+ struct echo_client_obd *ec = &obddev->u.echo_client;
+ struct obd_device *tgt;
+ struct obd_uuid echo_uuid = { "ECHO_UUID" };
+ struct obd_connect_data *ocd = NULL;
+ int rc;
+
+ if (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
+ CERROR("requires a TARGET OBD name\n");
+ return -EINVAL;
+ }
+
+ tgt = class_name2obd(lustre_cfg_string(lcfg, 1));
+ if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) {
+ CERROR("device not attached or not set up (%s)\n",
+ lustre_cfg_string(lcfg, 1));
+ return -EINVAL;
+ }
+
+ spin_lock_init(&ec->ec_lock);
+ INIT_LIST_HEAD (&ec->ec_objects);
+ INIT_LIST_HEAD (&ec->ec_locks);
+ ec->ec_unique = 0;
+ ec->ec_nstripes = 0;
+
+ if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
+ lu_context_tags_update(ECHO_MD_CTX_TAG);
+ lu_session_tags_update(ECHO_MD_SES_TAG);
+ return 0;
+ }
+
+ OBD_ALLOC(ocd, sizeof(*ocd));
+ if (ocd == NULL) {
+ CERROR("Can't alloc ocd connecting to %s\n",
+ lustre_cfg_string(lcfg, 1));
+ return -ENOMEM;
+ }
+
+ ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
+ OBD_CONNECT_BRW_SIZE |
+ OBD_CONNECT_GRANT | OBD_CONNECT_FULL20 |
+ OBD_CONNECT_64BITHASH | OBD_CONNECT_LVB_TYPE |
+ OBD_CONNECT_FID;
+ ocd->ocd_brw_size = DT_MAX_BRW_SIZE;
+ ocd->ocd_version = LUSTRE_VERSION_CODE;
+ ocd->ocd_group = FID_SEQ_ECHO;
+
+ rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
+ if (rc == 0) {
+ /* Turn off pinger because it connects to tgt obd directly. */
+ spin_lock(&tgt->obd_dev_lock);
+ list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+ spin_unlock(&tgt->obd_dev_lock);
+ }
+
+ OBD_FREE(ocd, sizeof(*ocd));
+
+ if (rc != 0) {
+ CERROR("fail to connect to device %s\n",
+ lustre_cfg_string(lcfg, 1));
+ return (rc);
+ }
+
+ return rc;
+}
+
+static int echo_client_cleanup(struct obd_device *obddev)
+{
+ struct echo_device *ed = obd2echo_dev(obddev);
+ struct echo_client_obd *ec = &obddev->u.echo_client;
+ int rc;
+
+ /*Do nothing for Metadata echo client*/
+ if (ed == NULL )
+ return 0;
+
+ if (ed->ed_next_ismd) {
+ lu_context_tags_clear(ECHO_MD_CTX_TAG);
+ lu_session_tags_clear(ECHO_MD_SES_TAG);
+ return 0;
+ }
+
+ if (!list_empty(&obddev->obd_exports)) {
+ CERROR("still has clients!\n");
+ return -EBUSY;
+ }
+
+ LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0);
+ rc = obd_disconnect(ec->ec_exp);
+ if (rc != 0)
+ CERROR("fail to disconnect device: %d\n", rc);
+
+ return rc;
+}
+
+static int echo_client_connect(const struct lu_env *env,
+ struct obd_export **exp,
+ struct obd_device *src, struct obd_uuid *cluuid,
+ struct obd_connect_data *data, void *localdata)
+{
+ int rc;
+ struct lustre_handle conn = { 0 };
+
+ rc = class_connect(&conn, src, cluuid);
+ if (rc == 0) {
+ *exp = class_conn2export(&conn);
+ }
+
+ return rc;
+}
+
+static int echo_client_disconnect(struct obd_export *exp)
+{
+#if 0
+ struct obd_device *obd;
+ struct echo_client_obd *ec;
+ struct ec_lock *ecl;
+#endif
+ int rc;
+
+ if (exp == NULL)
+ GOTO(out, rc = -EINVAL);
+
+#if 0
+ obd = exp->exp_obd;
+ ec = &obd->u.echo_client;
+
+ /* no more contention on export's lock list */
+ while (!list_empty (&exp->exp_ec_data.eced_locks)) {
+ ecl = list_entry (exp->exp_ec_data.eced_locks.next,
+ struct ec_lock, ecl_exp_chain);
+ list_del (&ecl->ecl_exp_chain);
+
+ rc = obd_cancel(ec->ec_exp, ecl->ecl_object->eco_lsm,
+ ecl->ecl_mode, &ecl->ecl_lock_handle);
+
+ CDEBUG (D_INFO, "Cancel lock on object "LPX64" on disconnect "
+ "(%d)\n", ecl->ecl_object->eco_id, rc);
+
+ echo_put_object (ecl->ecl_object);
+ OBD_FREE (ecl, sizeof (*ecl));
+ }
+#endif
+
+ rc = class_disconnect(exp);
+ GOTO(out, rc);
+ out:
+ return rc;
+}
+
+static struct obd_ops echo_client_obd_ops = {
+ .o_owner = THIS_MODULE,
+
+#if 0
+ .o_setup = echo_client_setup,
+ .o_cleanup = echo_client_cleanup,
+#endif
+
+ .o_iocontrol = echo_client_iocontrol,
+ .o_connect = echo_client_connect,
+ .o_disconnect = echo_client_disconnect
+};
+
+int echo_client_init(void)
+{
+ struct lprocfs_static_vars lvars = { 0 };
+ int rc;
+
+ lprocfs_echo_init_vars(&lvars);
+
+ rc = lu_kmem_init(echo_caches);
+ if (rc == 0) {
+ rc = class_register_type(&echo_client_obd_ops, NULL,
+ lvars.module_vars,
+ LUSTRE_ECHO_CLIENT_NAME,
+ &echo_device_type);
+ if (rc)
+ lu_kmem_fini(echo_caches);
+ }
+ return rc;
+}
+
+void echo_client_exit(void)
+{
+ class_unregister_type(LUSTRE_ECHO_CLIENT_NAME);
+ lu_kmem_fini(echo_caches);
+}
+
+static int __init obdecho_init(void)
+{
+ struct lprocfs_static_vars lvars;
+ int rc;
+
+ LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
+
+ LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
+
+ lprocfs_echo_init_vars(&lvars);
+
+
+ rc = echo_client_init();
+
+ return rc;
+}
+
+static void /*__exit*/ obdecho_exit(void)
+{
+ echo_client_exit();
+
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Testing Echo OBD driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
+
+module_init(obdecho_init);
+module_exit(obdecho_exit);
+
+/** @} echo_client */
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
new file mode 100644
index 0000000..8e9dbc2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
@@ -0,0 +1,47 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Whamcloud, Inc.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdecho/echo_internal.h
+ */
+
+#ifndef _ECHO_INTERNAL_H
+#define _ECHO_INTERNAL_H
+
+/* The persistent object (i.e. actually stores stuff!) */
+#define ECHO_PERSISTENT_OBJID 1ULL
+#define ECHO_PERSISTENT_SIZE ((__u64)(1<<20))
+
+/* block size to use for data verification */
+#define OBD_ECHO_BLOCK_SIZE (4<<10)
+
+
+#endif
diff --git a/drivers/staging/lustre/lustre/obdecho/lproc_echo.c b/drivers/staging/lustre/lustre/obdecho/lproc_echo.c
new file mode 100644
index 0000000..b9abac1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/obdecho/lproc_echo.c
@@ -0,0 +1,57 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#define DEBUG_SUBSYSTEM S_ECHO
+
+#include <lprocfs_status.h>
+#include <obd_class.h>
+
+#ifdef LPROCFS
+LPROC_SEQ_FOPS_RO_TYPE(echo, uuid);
+static struct lprocfs_vars lprocfs_echo_obd_vars[] = {
+ { "uuid", &echo_uuid_fops, 0, 0 },
+ { 0 }
+};
+
+LPROC_SEQ_FOPS_RO_TYPE(echo, numrefs);
+static struct lprocfs_vars lprocfs_echo_module_vars[] = {
+ { "num_refs", &echo_numrefs_fops, 0, 0 },
+ { 0 }
+};
+
+void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars)
+{
+ lvars->module_vars = lprocfs_echo_module_vars;
+ lvars->obd_vars = lprocfs_echo_obd_vars;
+}
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/osc/Makefile b/drivers/staging/lustre/lustre/osc/Makefile
new file mode 100644
index 0000000..bbd2f77
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_LUSTRE_FS) += osc.o
+osc-y := osc_request.o lproc_osc.o osc_dev.o osc_object.o \
+ osc_page.o osc_lock.o osc_io.o osc_quota.o osc_cache.o
+
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
new file mode 100644
index 0000000..90d24d8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -0,0 +1,727 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#define DEBUG_SUBSYSTEM S_CLASS
+
+#include <asm/statfs.h>
+#include <obd_cksum.h>
+#include <obd_class.h>
+#include <lprocfs_status.h>
+#include <linux/seq_file.h>
+#include "osc_internal.h"
+
+#ifdef LPROCFS
+static int osc_active_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = m->private;
+ int rc;
+
+ LPROCFS_CLIMP_CHECK(dev);
+ rc = seq_printf(m, "%d\n", !dev->u.cli.cl_import->imp_deactive);
+ LPROCFS_CLIMP_EXIT(dev);
+ return rc;
+}
+
+static ssize_t osc_active_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+ if (val < 0 || val > 1)
+ return -ERANGE;
+
+ /* opposite senses */
+ if (dev->u.cli.cl_import->imp_deactive == val)
+ rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val);
+ else
+ CDEBUG(D_CONFIG, "activate %d: ignoring repeat request\n", val);
+
+ return count;
+}
+LPROC_SEQ_FOPS(osc_active);
+
+static int osc_max_rpcs_in_flight_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = m->private;
+ struct client_obd *cli = &dev->u.cli;
+ int rc;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = seq_printf(m, "%u\n", cli->cl_max_rpcs_in_flight);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return rc;
+}
+
+static ssize_t osc_max_rpcs_in_flight_seq_write(struct file *file,
+ const char *buffer, size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ struct client_obd *cli = &dev->u.cli;
+ struct ptlrpc_request_pool *pool = cli->cl_import->imp_rq_pool;
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ if (val < 1 || val > OSC_MAX_RIF_MAX)
+ return -ERANGE;
+
+ LPROCFS_CLIMP_CHECK(dev);
+ if (pool && val > cli->cl_max_rpcs_in_flight)
+ pool->prp_populate(pool, val-cli->cl_max_rpcs_in_flight);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_max_rpcs_in_flight = val;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ LPROCFS_CLIMP_EXIT(dev);
+ return count;
+}
+LPROC_SEQ_FOPS(osc_max_rpcs_in_flight);
+
+static int osc_max_dirty_mb_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = m->private;
+ struct client_obd *cli = &dev->u.cli;
+ long val;
+ int mult;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ val = cli->cl_dirty_max;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ mult = 1 << 20;
+ return lprocfs_seq_read_frac_helper(m, val, mult);
+}
+
+static ssize_t osc_max_dirty_mb_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ struct client_obd *cli = &dev->u.cli;
+ int pages_number, mult, rc;
+
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ if (rc)
+ return rc;
+
+ if (pages_number <= 0 ||
+ pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
+ pages_number > totalram_pages / 4) /* 1/4 of RAM */
+ return -ERANGE;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_dirty_max = (obd_count)(pages_number << PAGE_CACHE_SHIFT);
+ osc_wake_cache_waiters(cli);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ return count;
+}
+LPROC_SEQ_FOPS(osc_max_dirty_mb);
+
+static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = m->private;
+ struct client_obd *cli = &dev->u.cli;
+ int shift = 20 - PAGE_CACHE_SHIFT;
+ int rc;
+
+ rc = seq_printf(m,
+ "used_mb: %d\n"
+ "busy_cnt: %d\n",
+ (atomic_read(&cli->cl_lru_in_list) +
+ atomic_read(&cli->cl_lru_busy)) >> shift,
+ atomic_read(&cli->cl_lru_busy));
+
+ return rc;
+}
+
+/* shrink the number of caching pages to a specific number */
+static ssize_t osc_cached_mb_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ struct client_obd *cli = &dev->u.cli;
+ int pages_number, mult, rc;
+
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ buffer = lprocfs_find_named_value(buffer, "used_mb:", &count);
+ rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ if (rc)
+ return rc;
+
+ if (pages_number < 0)
+ return -ERANGE;
+
+ rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
+ if (rc > 0)
+ (void)osc_lru_shrink(cli, rc);
+
+ return count;
+}
+LPROC_SEQ_FOPS(osc_cached_mb);
+
+static int osc_cur_dirty_bytes_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = m->private;
+ struct client_obd *cli = &dev->u.cli;
+ int rc;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = seq_printf(m, "%lu\n", cli->cl_dirty);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return rc;
+}
+LPROC_SEQ_FOPS_RO(osc_cur_dirty_bytes);
+
+static int osc_cur_grant_bytes_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = m->private;
+ struct client_obd *cli = &dev->u.cli;
+ int rc;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = seq_printf(m, "%lu\n", cli->cl_avail_grant);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return rc;
+}
+
+static ssize_t osc_cur_grant_bytes_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ struct client_obd *cli = &obd->u.cli;
+ int rc;
+ __u64 val;
+
+ if (obd == NULL)
+ return 0;
+
+ rc = lprocfs_write_u64_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ /* this is only for shrinking grant */
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (val >= cli->cl_avail_grant) {
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return 0;
+ }
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ LPROCFS_CLIMP_CHECK(obd);
+ if (cli->cl_import->imp_state == LUSTRE_IMP_FULL)
+ rc = osc_shrink_grant_to_target(cli, val);
+ LPROCFS_CLIMP_EXIT(obd);
+ if (rc)
+ return rc;
+ return count;
+}
+LPROC_SEQ_FOPS(osc_cur_grant_bytes);
+
+static int osc_cur_lost_grant_bytes_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *dev = m->private;
+ struct client_obd *cli = &dev->u.cli;
+ int rc;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = seq_printf(m, "%lu\n", cli->cl_lost_grant);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return rc;
+}
+LPROC_SEQ_FOPS_RO(osc_cur_lost_grant_bytes);
+
+static int osc_grant_shrink_interval_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *obd = m->private;
+
+ if (obd == NULL)
+ return 0;
+ return seq_printf(m, "%d\n",
+ obd->u.cli.cl_grant_shrink_interval);
+}
+
+static ssize_t osc_grant_shrink_interval_seq_write(struct file *file,
+ const char *buffer, size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ int val, rc;
+
+ if (obd == NULL)
+ return 0;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ if (val <= 0)
+ return -ERANGE;
+
+ obd->u.cli.cl_grant_shrink_interval = val;
+
+ return count;
+}
+LPROC_SEQ_FOPS(osc_grant_shrink_interval);
+
+static int osc_checksum_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *obd = m->private;
+
+ if (obd == NULL)
+ return 0;
+
+ return seq_printf(m, "%d\n",
+ obd->u.cli.cl_checksum ? 1 : 0);
+}
+
+static ssize_t osc_checksum_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ int val, rc;
+
+ if (obd == NULL)
+ return 0;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ obd->u.cli.cl_checksum = (val ? 1 : 0);
+
+ return count;
+}
+LPROC_SEQ_FOPS(osc_checksum);
+
+static int osc_checksum_type_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *obd = m->private;
+ int i;
+ DECLARE_CKSUM_NAME;
+
+ if (obd == NULL)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
+ if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0)
+ continue;
+ if (obd->u.cli.cl_cksum_type == (1 << i))
+ seq_printf(m, "[%s] ", cksum_name[i]);
+ else
+ seq_printf(m, "%s ", cksum_name[i]);
+ }
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static ssize_t osc_checksum_type_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ int i;
+ DECLARE_CKSUM_NAME;
+ char kernbuf[10];
+
+ if (obd == NULL)
+ return 0;
+
+ if (count > sizeof(kernbuf) - 1)
+ return -EINVAL;
+ if (copy_from_user(kernbuf, buffer, count))
+ return -EFAULT;
+ if (count > 0 && kernbuf[count - 1] == '\n')
+ kernbuf[count - 1] = '\0';
+ else
+ kernbuf[count] = '\0';
+
+ for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
+ if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0)
+ continue;
+ if (!strcmp(kernbuf, cksum_name[i])) {
+ obd->u.cli.cl_cksum_type = 1 << i;
+ return count;
+ }
+ }
+ return -EINVAL;
+}
+LPROC_SEQ_FOPS(osc_checksum_type);
+
+static int osc_resend_count_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *obd = m->private;
+
+ return seq_printf(m, "%u\n", atomic_read(&obd->u.cli.cl_resends));
+}
+
+static ssize_t osc_resend_count_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ if (val < 0)
+ return -EINVAL;
+
+ atomic_set(&obd->u.cli.cl_resends, val);
+
+ return count;
+}
+LPROC_SEQ_FOPS(osc_resend_count);
+
+static int osc_contention_seconds_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *obd = m->private;
+ struct osc_device *od = obd2osc_dev(obd);
+
+ return seq_printf(m, "%u\n", od->od_contention_time);
+}
+
+static ssize_t osc_contention_seconds_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ struct osc_device *od = obd2osc_dev(obd);
+
+ return lprocfs_write_helper(buffer, count, &od->od_contention_time) ?:
+ count;
+}
+LPROC_SEQ_FOPS(osc_contention_seconds);
+
+static int osc_lockless_truncate_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *obd = m->private;
+ struct osc_device *od = obd2osc_dev(obd);
+
+ return seq_printf(m, "%u\n", od->od_lockless_truncate);
+}
+
+static ssize_t osc_lockless_truncate_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ struct osc_device *od = obd2osc_dev(obd);
+
+ return lprocfs_write_helper(buffer, count, &od->od_lockless_truncate) ?:
+ count;
+}
+LPROC_SEQ_FOPS(osc_lockless_truncate);
+
+static int osc_destroys_in_flight_seq_show(struct seq_file *m, void *v)
+{
+ struct obd_device *obd = m->private;
+ return seq_printf(m, "%u\n",
+ atomic_read(&obd->u.cli.cl_destroy_in_flight));
+}
+LPROC_SEQ_FOPS_RO(osc_destroys_in_flight);
+
+static int osc_obd_max_pages_per_rpc_seq_show(struct seq_file *m, void *v)
+{
+ return lprocfs_obd_rd_max_pages_per_rpc(m, m->private);
+}
+
+static ssize_t osc_obd_max_pages_per_rpc_seq_write(struct file *file,
+ const char *buffer, size_t count, loff_t *off)
+{
+ struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
+ struct client_obd *cli = &dev->u.cli;
+ struct obd_connect_data *ocd = &cli->cl_import->imp_connect_data;
+ int chunk_mask, rc;
+ __u64 val;
+
+ rc = lprocfs_write_u64_helper(buffer, count, &val);
+ if (rc)
+ return rc;
+
+ /* if the max_pages is specified in bytes, convert to pages */
+ if (val >= ONE_MB_BRW_SIZE)
+ val >>= PAGE_CACHE_SHIFT;
+
+ LPROCFS_CLIMP_CHECK(dev);
+
+ chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
+ /* max_pages_per_rpc must be chunk aligned */
+ val = (val + ~chunk_mask) & chunk_mask;
+ if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) {
+ LPROCFS_CLIMP_EXIT(dev);
+ return -ERANGE;
+ }
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_max_pages_per_rpc = val;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ LPROCFS_CLIMP_EXIT(dev);
+ return count;
+}
+LPROC_SEQ_FOPS(osc_obd_max_pages_per_rpc);
+
+LPROC_SEQ_FOPS_RO_TYPE(osc, uuid);
+LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
+LPROC_SEQ_FOPS_RO_TYPE(osc, blksize);
+LPROC_SEQ_FOPS_RO_TYPE(osc, kbytestotal);
+LPROC_SEQ_FOPS_RO_TYPE(osc, kbytesfree);
+LPROC_SEQ_FOPS_RO_TYPE(osc, kbytesavail);
+LPROC_SEQ_FOPS_RO_TYPE(osc, filestotal);
+LPROC_SEQ_FOPS_RO_TYPE(osc, filesfree);
+LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
+LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid);
+LPROC_SEQ_FOPS_RO_TYPE(osc, timeouts);
+LPROC_SEQ_FOPS_RO_TYPE(osc, state);
+
+LPROC_SEQ_FOPS_WR_ONLY(osc, ping);
+
+LPROC_SEQ_FOPS_RW_TYPE(osc, import);
+LPROC_SEQ_FOPS_RW_TYPE(osc, pinger_recov);
+
+static struct lprocfs_vars lprocfs_osc_obd_vars[] = {
+ { "uuid", &osc_uuid_fops, 0, 0 },
+ { "ping", &osc_ping_fops, 0, 0222 },
+ { "connect_flags", &osc_connect_flags_fops, 0, 0 },
+ { "blocksize", &osc_blksize_fops, 0, 0 },
+ { "kbytestotal", &osc_kbytestotal_fops, 0, 0 },
+ { "kbytesfree", &osc_kbytesfree_fops, 0, 0 },
+ { "kbytesavail", &osc_kbytesavail_fops, 0, 0 },
+ { "filestotal", &osc_filestotal_fops, 0, 0 },
+ { "filesfree", &osc_filesfree_fops, 0, 0 },
+ //{ "filegroups", lprocfs_rd_filegroups, 0, 0 },
+ { "ost_server_uuid", &osc_server_uuid_fops, 0, 0 },
+ { "ost_conn_uuid", &osc_conn_uuid_fops, 0, 0 },
+ { "active", &osc_active_fops, 0 },
+ { "max_pages_per_rpc", &osc_obd_max_pages_per_rpc_fops, 0 },
+ { "max_rpcs_in_flight", &osc_max_rpcs_in_flight_fops, 0 },
+ { "destroys_in_flight", &osc_destroys_in_flight_fops, 0, 0 },
+ { "max_dirty_mb", &osc_max_dirty_mb_fops, 0 },
+ { "osc_cached_mb", &osc_cached_mb_fops, 0 },
+ { "cur_dirty_bytes", &osc_cur_dirty_bytes_fops, 0, 0 },
+ { "cur_grant_bytes", &osc_cur_grant_bytes_fops, 0 },
+ { "cur_lost_grant_bytes", &osc_cur_lost_grant_bytes_fops, 0, 0},
+ { "grant_shrink_interval", &osc_grant_shrink_interval_fops, 0 },
+ { "checksums", &osc_checksum_fops, 0 },
+ { "checksum_type", &osc_checksum_type_fops, 0 },
+ { "resend_count", &osc_resend_count_fops, 0},
+ { "timeouts", &osc_timeouts_fops, 0, 0 },
+ { "contention_seconds", &osc_contention_seconds_fops, 0 },
+ { "lockless_truncate", &osc_lockless_truncate_fops, 0 },
+ { "import", &osc_import_fops, 0 },
+ { "state", &osc_state_fops, 0, 0 },
+ { "pinger_recov", &osc_pinger_recov_fops, 0 },
+ { 0 }
+};
+
+LPROC_SEQ_FOPS_RO_TYPE(osc, numrefs);
+static struct lprocfs_vars lprocfs_osc_module_vars[] = {
+ { "num_refs", &osc_numrefs_fops, 0, 0 },
+ { 0 }
+};
+
+#define pct(a,b) (b ? a * 100 / b : 0)
+
+static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
+{
+ struct timeval now;
+ struct obd_device *dev = seq->private;
+ struct client_obd *cli = &dev->u.cli;
+ unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
+ int i;
+
+ do_gettimeofday(&now);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+
+ seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
+ now.tv_sec, now.tv_usec);
+ seq_printf(seq, "read RPCs in flight: %d\n",
+ cli->cl_r_in_flight);
+ seq_printf(seq, "write RPCs in flight: %d\n",
+ cli->cl_w_in_flight);
+ seq_printf(seq, "pending write pages: %d\n",
+ atomic_read(&cli->cl_pending_w_pages));
+ seq_printf(seq, "pending read pages: %d\n",
+ atomic_read(&cli->cl_pending_r_pages));
+
+ seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
+ seq_printf(seq, "pages per rpc rpcs %% cum %% |");
+ seq_printf(seq, " rpcs %% cum %%\n");
+
+ read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist);
+ write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist);
+
+ read_cum = 0;
+ write_cum = 0;
+ for (i = 0; i < OBD_HIST_MAX; i++) {
+ unsigned long r = cli->cl_read_page_hist.oh_buckets[i];
+ unsigned long w = cli->cl_write_page_hist.oh_buckets[i];
+ read_cum += r;
+ write_cum += w;
+ seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
+ 1 << i, r, pct(r, read_tot),
+ pct(read_cum, read_tot), w,
+ pct(w, write_tot),
+ pct(write_cum, write_tot));
+ if (read_cum == read_tot && write_cum == write_tot)
+ break;
+ }
+
+ seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
+ seq_printf(seq, "rpcs in flight rpcs %% cum %% |");
+ seq_printf(seq, " rpcs %% cum %%\n");
+
+ read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist);
+ write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist);
+
+ read_cum = 0;
+ write_cum = 0;
+ for (i = 0; i < OBD_HIST_MAX; i++) {
+ unsigned long r = cli->cl_read_rpc_hist.oh_buckets[i];
+ unsigned long w = cli->cl_write_rpc_hist.oh_buckets[i];
+ read_cum += r;
+ write_cum += w;
+ seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
+ i, r, pct(r, read_tot),
+ pct(read_cum, read_tot), w,
+ pct(w, write_tot),
+ pct(write_cum, write_tot));
+ if (read_cum == read_tot && write_cum == write_tot)
+ break;
+ }
+
+ seq_printf(seq, "\n\t\t\tread\t\t\twrite\n");
+ seq_printf(seq, "offset rpcs %% cum %% |");
+ seq_printf(seq, " rpcs %% cum %%\n");
+
+ read_tot = lprocfs_oh_sum(&cli->cl_read_offset_hist);
+ write_tot = lprocfs_oh_sum(&cli->cl_write_offset_hist);
+
+ read_cum = 0;
+ write_cum = 0;
+ for (i = 0; i < OBD_HIST_MAX; i++) {
+ unsigned long r = cli->cl_read_offset_hist.oh_buckets[i];
+ unsigned long w = cli->cl_write_offset_hist.oh_buckets[i];
+ read_cum += r;
+ write_cum += w;
+ seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
+ (i == 0) ? 0 : 1 << (i - 1),
+ r, pct(r, read_tot), pct(read_cum, read_tot),
+ w, pct(w, write_tot), pct(write_cum, write_tot));
+ if (read_cum == read_tot && write_cum == write_tot)
+ break;
+ }
+
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ return 0;
+}
+#undef pct
+
+static ssize_t osc_rpc_stats_seq_write(struct file *file, const char *buf,
+ size_t len, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct obd_device *dev = seq->private;
+ struct client_obd *cli = &dev->u.cli;
+
+ lprocfs_oh_clear(&cli->cl_read_rpc_hist);
+ lprocfs_oh_clear(&cli->cl_write_rpc_hist);
+ lprocfs_oh_clear(&cli->cl_read_page_hist);
+ lprocfs_oh_clear(&cli->cl_write_page_hist);
+ lprocfs_oh_clear(&cli->cl_read_offset_hist);
+ lprocfs_oh_clear(&cli->cl_write_offset_hist);
+
+ return len;
+}
+
+LPROC_SEQ_FOPS(osc_rpc_stats);
+
+static int osc_stats_seq_show(struct seq_file *seq, void *v)
+{
+ struct timeval now;
+ struct obd_device *dev = seq->private;
+ struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
+
+ do_gettimeofday(&now);
+
+ seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
+ now.tv_sec, now.tv_usec);
+ seq_printf(seq, "lockless_write_bytes\t\t"LPU64"\n",
+ stats->os_lockless_writes);
+ seq_printf(seq, "lockless_read_bytes\t\t"LPU64"\n",
+ stats->os_lockless_reads);
+ seq_printf(seq, "lockless_truncate\t\t"LPU64"\n",
+ stats->os_lockless_truncates);
+ return 0;
+}
+
+static ssize_t osc_stats_seq_write(struct file *file, const char *buf,
+ size_t len, loff_t *off)
+{
+ struct seq_file *seq = file->private_data;
+ struct obd_device *dev = seq->private;
+ struct osc_stats *stats = &obd2osc_dev(dev)->od_stats;
+
+ memset(stats, 0, sizeof(*stats));
+ return len;
+}
+
+LPROC_SEQ_FOPS(osc_stats);
+
+int lproc_osc_attach_seqstat(struct obd_device *dev)
+{
+ int rc;
+
+ rc = lprocfs_seq_create(dev->obd_proc_entry, "osc_stats", 0644,
+ &osc_stats_fops, dev);
+ if (rc == 0)
+ rc = lprocfs_obd_seq_create(dev, "rpc_stats", 0644,
+ &osc_rpc_stats_fops, dev);
+
+ return rc;
+}
+
+void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars)
+{
+ lvars->module_vars = lprocfs_osc_module_vars;
+ lvars->obd_vars = lprocfs_osc_obd_vars;
+}
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
new file mode 100644
index 0000000..00295da
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -0,0 +1,2875 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * osc cache management.
+ *
+ * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_OSC
+
+#include "osc_cl_internal.h"
+#include "osc_internal.h"
+
+static int extent_debug; /* set it to be true for more debug */
+
+static void osc_update_pending(struct osc_object *obj, int cmd, int delta);
+static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
+ int state);
+static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
+ struct osc_async_page *oap, int sent, int rc);
+static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
+ int cmd);
+static int osc_refresh_count(const struct lu_env *env,
+ struct osc_async_page *oap, int cmd);
+static int osc_io_unplug_async(const struct lu_env *env,
+ struct client_obd *cli, struct osc_object *osc);
+static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
+ unsigned int lost_grant);
+
+static void osc_extent_tree_dump0(int level, struct osc_object *obj,
+ const char *func, int line);
+#define osc_extent_tree_dump(lvl, obj) \
+ osc_extent_tree_dump0(lvl, obj, __func__, __LINE__)
+
+/** \addtogroup osc
+ * @{
+ */
+
+/* ------------------ osc extent ------------------ */
+static inline char *ext_flags(struct osc_extent *ext, char *flags)
+{
+ char *buf = flags;
+ *buf++ = ext->oe_rw ? 'r' : 'w';
+ if (ext->oe_intree)
+ *buf++ = 'i';
+ if (ext->oe_srvlock)
+ *buf++ = 's';
+ if (ext->oe_hp)
+ *buf++ = 'h';
+ if (ext->oe_urgent)
+ *buf++ = 'u';
+ if (ext->oe_memalloc)
+ *buf++ = 'm';
+ if (ext->oe_trunc_pending)
+ *buf++ = 't';
+ if (ext->oe_fsync_wait)
+ *buf++ = 'Y';
+ *buf = 0;
+ return flags;
+}
+
+static inline char list_empty_marker(struct list_head *list)
+{
+ return list_empty(list) ? '-' : '+';
+}
+
+#define EXTSTR "[%lu -> %lu/%lu]"
+#define EXTPARA(ext) (ext)->oe_start, (ext)->oe_end, (ext)->oe_max_end
+static const char *oes_strings[] = {
+ "inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
+
+#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
+ struct osc_extent *__ext = (extent); \
+ char __buf[16]; \
+ \
+ CDEBUG(lvl, \
+ "extent %p@{" EXTSTR ", " \
+ "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
+ /* ----- extent part 0 ----- */ \
+ __ext, EXTPARA(__ext), \
+ /* ----- part 1 ----- */ \
+ atomic_read(&__ext->oe_refc), \
+ atomic_read(&__ext->oe_users), \
+ list_empty_marker(&__ext->oe_link), \
+ oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
+ __ext->oe_obj, \
+ /* ----- part 2 ----- */ \
+ __ext->oe_grants, __ext->oe_nr_pages, \
+ list_empty_marker(&__ext->oe_pages), \
+ waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
+ __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
+ /* ----- part 4 ----- */ \
+ ## __VA_ARGS__); \
+} while (0)
+
+#undef EASSERTF
+#define EASSERTF(expr, ext, fmt, args...) do { \
+ if (!(expr)) { \
+ OSC_EXTENT_DUMP(D_ERROR, (ext), fmt, ##args); \
+ osc_extent_tree_dump(D_ERROR, (ext)->oe_obj); \
+ LASSERT(expr); \
+ } \
+} while (0)
+
+#undef EASSERT
+#define EASSERT(expr, ext) EASSERTF(expr, ext, "\n")
+
+static inline struct osc_extent *rb_extent(struct rb_node *n)
+{
+ if (n == NULL)
+ return NULL;
+
+ return container_of(n, struct osc_extent, oe_node);
+}
+
+static inline struct osc_extent *next_extent(struct osc_extent *ext)
+{
+ if (ext == NULL)
+ return NULL;
+
+ LASSERT(ext->oe_intree);
+ return rb_extent(rb_next(&ext->oe_node));
+}
+
+static inline struct osc_extent *prev_extent(struct osc_extent *ext)
+{
+ if (ext == NULL)
+ return NULL;
+
+ LASSERT(ext->oe_intree);
+ return rb_extent(rb_prev(&ext->oe_node));
+}
+
+static inline struct osc_extent *first_extent(struct osc_object *obj)
+{
+ return rb_extent(rb_first(&obj->oo_root));
+}
+
+/* object must be locked by caller. */
+static int osc_extent_sanity_check0(struct osc_extent *ext,
+ const char *func, const int line)
+{
+ struct osc_object *obj = ext->oe_obj;
+ struct osc_async_page *oap;
+ int page_count;
+ int rc = 0;
+
+ if (!osc_object_is_locked(obj))
+ GOTO(out, rc = 9);
+
+ if (ext->oe_state >= OES_STATE_MAX)
+ GOTO(out, rc = 10);
+
+ if (atomic_read(&ext->oe_refc) <= 0)
+ GOTO(out, rc = 20);
+
+ if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users))
+ GOTO(out, rc = 30);
+
+ switch (ext->oe_state) {
+ case OES_INV:
+ if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages))
+ GOTO(out, rc = 35);
+ GOTO(out, rc = 0);
+ break;
+ case OES_ACTIVE:
+ if (atomic_read(&ext->oe_users) == 0)
+ GOTO(out, rc = 40);
+ if (ext->oe_hp)
+ GOTO(out, rc = 50);
+ if (ext->oe_fsync_wait && !ext->oe_urgent)
+ GOTO(out, rc = 55);
+ break;
+ case OES_CACHE:
+ if (ext->oe_grants == 0)
+ GOTO(out, rc = 60);
+ if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp)
+ GOTO(out, rc = 65);
+ default:
+ if (atomic_read(&ext->oe_users) > 0)
+ GOTO(out, rc = 70);
+ }
+
+ if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start)
+ GOTO(out, rc = 80);
+
+ if (ext->oe_osclock == NULL && ext->oe_grants > 0)
+ GOTO(out, rc = 90);
+
+ if (ext->oe_osclock) {
+ struct cl_lock_descr *descr;
+ descr = &ext->oe_osclock->cll_descr;
+ if (!(descr->cld_start <= ext->oe_start &&
+ descr->cld_end >= ext->oe_max_end))
+ GOTO(out, rc = 100);
+ }
+
+ if (ext->oe_nr_pages > ext->oe_mppr)
+ GOTO(out, rc = 105);
+
+ /* Do not verify page list if extent is in RPC. This is because an
+ * in-RPC extent is supposed to be exclusively accessible w/o lock. */
+ if (ext->oe_state > OES_CACHE)
+ GOTO(out, rc = 0);
+
+ if (!extent_debug)
+ GOTO(out, rc = 0);
+
+ page_count = 0;
+ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ pgoff_t index = oap2cl_page(oap)->cp_index;
+ ++page_count;
+ if (index > ext->oe_end || index < ext->oe_start)
+ GOTO(out, rc = 110);
+ }
+ if (page_count != ext->oe_nr_pages)
+ GOTO(out, rc = 120);
+
+out:
+ if (rc != 0)
+ OSC_EXTENT_DUMP(D_ERROR, ext,
+ "%s:%d sanity check %p failed with rc = %d\n",
+ func, line, ext, rc);
+ return rc;
+}
+
+#define sanity_check_nolock(ext) \
+ osc_extent_sanity_check0(ext, __func__, __LINE__)
+
+#define sanity_check(ext) ({ \
+ int __res; \
+ osc_object_lock((ext)->oe_obj); \
+ __res = sanity_check_nolock(ext); \
+ osc_object_unlock((ext)->oe_obj); \
+ __res; \
+})
+
+
+/**
+ * sanity check - to make sure there is no overlapped extent in the tree.
+ */
+static int osc_extent_is_overlapped(struct osc_object *obj,
+ struct osc_extent *ext)
+{
+ struct osc_extent *tmp;
+
+ LASSERT(osc_object_is_locked(obj));
+
+ if (!extent_debug)
+ return 0;
+
+ for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) {
+ if (tmp == ext)
+ continue;
+ if (tmp->oe_end >= ext->oe_start &&
+ tmp->oe_start <= ext->oe_end)
+ return 1;
+ }
+ return 0;
+}
+
+static void osc_extent_state_set(struct osc_extent *ext, int state)
+{
+ LASSERT(osc_object_is_locked(ext->oe_obj));
+ LASSERT(state >= OES_INV && state < OES_STATE_MAX);
+
+ /* Never try to sanity check a state changing extent :-) */
+ /* LASSERT(sanity_check_nolock(ext) == 0); */
+
+ /* TODO: validate the state machine */
+ ext->oe_state = state;
+ wake_up_all(&ext->oe_waitq);
+}
+
+static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
+{
+ struct osc_extent *ext;
+
+ OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, GFP_IOFS);
+ if (ext == NULL)
+ return NULL;
+
+ RB_CLEAR_NODE(&ext->oe_node);
+ ext->oe_obj = obj;
+ atomic_set(&ext->oe_refc, 1);
+ atomic_set(&ext->oe_users, 0);
+ INIT_LIST_HEAD(&ext->oe_link);
+ ext->oe_state = OES_INV;
+ INIT_LIST_HEAD(&ext->oe_pages);
+ init_waitqueue_head(&ext->oe_waitq);
+ ext->oe_osclock = NULL;
+
+ return ext;
+}
+
+static void osc_extent_free(struct osc_extent *ext)
+{
+ OBD_SLAB_FREE_PTR(ext, osc_extent_kmem);
+}
+
+static struct osc_extent *osc_extent_get(struct osc_extent *ext)
+{
+ LASSERT(atomic_read(&ext->oe_refc) >= 0);
+ atomic_inc(&ext->oe_refc);
+ return ext;
+}
+
+static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
+{
+ LASSERT(atomic_read(&ext->oe_refc) > 0);
+ if (atomic_dec_and_test(&ext->oe_refc)) {
+ LASSERT(list_empty(&ext->oe_link));
+ LASSERT(atomic_read(&ext->oe_users) == 0);
+ LASSERT(ext->oe_state == OES_INV);
+ LASSERT(!ext->oe_intree);
+
+ if (ext->oe_osclock) {
+ cl_lock_put(env, ext->oe_osclock);
+ ext->oe_osclock = NULL;
+ }
+ osc_extent_free(ext);
+ }
+}
+
+/**
+ * osc_extent_put_trust() is a special version of osc_extent_put() when
+ * it's known that the caller is not the last user. This is to address the
+ * problem of lacking of lu_env ;-).
+ */
+static void osc_extent_put_trust(struct osc_extent *ext)
+{
+ LASSERT(atomic_read(&ext->oe_refc) > 1);
+ LASSERT(osc_object_is_locked(ext->oe_obj));
+ atomic_dec(&ext->oe_refc);
+}
+
+/**
+ * Return the extent which includes pgoff @index, or return the greatest
+ * previous extent in the tree.
+ */
+static struct osc_extent *osc_extent_search(struct osc_object *obj,
+ pgoff_t index)
+{
+ struct rb_node *n = obj->oo_root.rb_node;
+ struct osc_extent *tmp, *p = NULL;
+
+ LASSERT(osc_object_is_locked(obj));
+ while (n != NULL) {
+ tmp = rb_extent(n);
+ if (index < tmp->oe_start) {
+ n = n->rb_left;
+ } else if (index > tmp->oe_end) {
+ p = rb_extent(n);
+ n = n->rb_right;
+ } else {
+ return tmp;
+ }
+ }
+ return p;
+}
+
+/*
+ * Return the extent covering @index, otherwise return NULL.
+ * caller must have held object lock.
+ */
+static struct osc_extent *osc_extent_lookup(struct osc_object *obj,
+ pgoff_t index)
+{
+ struct osc_extent *ext;
+
+ ext = osc_extent_search(obj, index);
+ if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end)
+ return osc_extent_get(ext);
+ return NULL;
+}
+
+/* caller must have held object lock. */
+static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext)
+{
+ struct rb_node **n = &obj->oo_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct osc_extent *tmp;
+
+ LASSERT(ext->oe_intree == 0);
+ LASSERT(ext->oe_obj == obj);
+ LASSERT(osc_object_is_locked(obj));
+ while (*n != NULL) {
+ tmp = rb_extent(*n);
+ parent = *n;
+
+ if (ext->oe_end < tmp->oe_start)
+ n = &(*n)->rb_left;
+ else if (ext->oe_start > tmp->oe_end)
+ n = &(*n)->rb_right;
+ else
+ EASSERTF(0, tmp, EXTSTR, EXTPARA(ext));
+ }
+ rb_link_node(&ext->oe_node, parent, n);
+ rb_insert_color(&ext->oe_node, &obj->oo_root);
+ osc_extent_get(ext);
+ ext->oe_intree = 1;
+}
+
+/* caller must have held object lock. */
+static void osc_extent_erase(struct osc_extent *ext)
+{
+ struct osc_object *obj = ext->oe_obj;
+ LASSERT(osc_object_is_locked(obj));
+ if (ext->oe_intree) {
+ rb_erase(&ext->oe_node, &obj->oo_root);
+ ext->oe_intree = 0;
+ /* rbtree held a refcount */
+ osc_extent_put_trust(ext);
+ }
+}
+
+static struct osc_extent *osc_extent_hold(struct osc_extent *ext)
+{
+ struct osc_object *obj = ext->oe_obj;
+
+ LASSERT(osc_object_is_locked(obj));
+ LASSERT(ext->oe_state == OES_ACTIVE || ext->oe_state == OES_CACHE);
+ if (ext->oe_state == OES_CACHE) {
+ osc_extent_state_set(ext, OES_ACTIVE);
+ osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages);
+ }
+ atomic_inc(&ext->oe_users);
+ list_del_init(&ext->oe_link);
+ return osc_extent_get(ext);
+}
+
+static void __osc_extent_remove(struct osc_extent *ext)
+{
+ LASSERT(osc_object_is_locked(ext->oe_obj));
+ LASSERT(list_empty(&ext->oe_pages));
+ osc_extent_erase(ext);
+ list_del_init(&ext->oe_link);
+ osc_extent_state_set(ext, OES_INV);
+ OSC_EXTENT_DUMP(D_CACHE, ext, "destroyed.\n");
+}
+
+static void osc_extent_remove(struct osc_extent *ext)
+{
+ struct osc_object *obj = ext->oe_obj;
+
+ osc_object_lock(obj);
+ __osc_extent_remove(ext);
+ osc_object_unlock(obj);
+}
+
+/**
+ * This function is used to merge extents to get better performance. It checks
+ * if @cur and @victim are contiguous at chunk level.
+ */
+static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
+ struct osc_extent *victim)
+{
+ struct osc_object *obj = cur->oe_obj;
+ pgoff_t chunk_start;
+ pgoff_t chunk_end;
+ int ppc_bits;
+
+ LASSERT(cur->oe_state == OES_CACHE);
+ LASSERT(osc_object_is_locked(obj));
+ if (victim == NULL)
+ return -EINVAL;
+
+ if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait)
+ return -EBUSY;
+
+ if (cur->oe_max_end != victim->oe_max_end)
+ return -ERANGE;
+
+ LASSERT(cur->oe_osclock == victim->oe_osclock);
+ ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
+ chunk_start = cur->oe_start >> ppc_bits;
+ chunk_end = cur->oe_end >> ppc_bits;
+ if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
+ chunk_end + 1 != victim->oe_start >> ppc_bits)
+ return -ERANGE;
+
+ OSC_EXTENT_DUMP(D_CACHE, victim, "will be merged by %p.\n", cur);
+
+ cur->oe_start = min(cur->oe_start, victim->oe_start);
+ cur->oe_end = max(cur->oe_end, victim->oe_end);
+ cur->oe_grants += victim->oe_grants;
+ cur->oe_nr_pages += victim->oe_nr_pages;
+ /* only the following bits are needed to merge */
+ cur->oe_urgent |= victim->oe_urgent;
+ cur->oe_memalloc |= victim->oe_memalloc;
+ list_splice_init(&victim->oe_pages, &cur->oe_pages);
+ list_del_init(&victim->oe_link);
+ victim->oe_nr_pages = 0;
+
+ osc_extent_get(victim);
+ __osc_extent_remove(victim);
+ osc_extent_put(env, victim);
+
+ OSC_EXTENT_DUMP(D_CACHE, cur, "after merging %p.\n", victim);
+ return 0;
+}
+
+/**
+ * Drop user count of osc_extent, and unplug IO asynchronously.
+ */
+int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
+{
+ struct osc_object *obj = ext->oe_obj;
+ int rc = 0;
+
+ LASSERT(atomic_read(&ext->oe_users) > 0);
+ LASSERT(sanity_check(ext) == 0);
+ LASSERT(ext->oe_grants > 0);
+
+ if (atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
+ LASSERT(ext->oe_state == OES_ACTIVE);
+ if (ext->oe_trunc_pending) {
+ /* a truncate process is waiting for this extent.
+ * This may happen due to a race, check
+ * osc_cache_truncate_start(). */
+ osc_extent_state_set(ext, OES_TRUNC);
+ ext->oe_trunc_pending = 0;
+ } else {
+ osc_extent_state_set(ext, OES_CACHE);
+ osc_update_pending(obj, OBD_BRW_WRITE,
+ ext->oe_nr_pages);
+
+ /* try to merge the previous and next extent. */
+ osc_extent_merge(env, ext, prev_extent(ext));
+ osc_extent_merge(env, ext, next_extent(ext));
+
+ if (ext->oe_urgent)
+ list_move_tail(&ext->oe_link,
+ &obj->oo_urgent_exts);
+ }
+ osc_object_unlock(obj);
+
+ osc_io_unplug_async(env, osc_cli(obj), obj);
+ }
+ osc_extent_put(env, ext);
+ return rc;
+}
+
+static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
+{
+ return !(ex1->oe_end < ex2->oe_start || ex2->oe_end < ex1->oe_start);
+}
+
+/**
+ * Find or create an extent which includes @index, core function to manage
+ * extent tree.
+ */
+struct osc_extent *osc_extent_find(const struct lu_env *env,
+ struct osc_object *obj, pgoff_t index,
+ int *grants)
+
+{
+ struct client_obd *cli = osc_cli(obj);
+ struct cl_lock *lock;
+ struct osc_extent *cur;
+ struct osc_extent *ext;
+ struct osc_extent *conflict = NULL;
+ struct osc_extent *found = NULL;
+ pgoff_t chunk;
+ pgoff_t max_end;
+ int max_pages; /* max_pages_per_rpc */
+ int chunksize;
+ int ppc_bits; /* pages per chunk bits */
+ int chunk_mask;
+ int rc;
+
+ cur = osc_extent_alloc(obj);
+ if (cur == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
+ LASSERT(lock != NULL);
+ LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
+
+ LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
+ ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ chunk_mask = ~((1 << ppc_bits) - 1);
+ chunksize = 1 << cli->cl_chunkbits;
+ chunk = index >> ppc_bits;
+
+ /* align end to rpc edge, rpc size may not be a power 2 integer. */
+ max_pages = cli->cl_max_pages_per_rpc;
+ LASSERT((max_pages & ~chunk_mask) == 0);
+ max_end = index - (index % max_pages) + max_pages - 1;
+ max_end = min_t(pgoff_t, max_end, lock->cll_descr.cld_end);
+
+ /* initialize new extent by parameters so far */
+ cur->oe_max_end = max_end;
+ cur->oe_start = index & chunk_mask;
+ cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
+ if (cur->oe_start < lock->cll_descr.cld_start)
+ cur->oe_start = lock->cll_descr.cld_start;
+ if (cur->oe_end > max_end)
+ cur->oe_end = max_end;
+ cur->oe_osclock = lock;
+ cur->oe_grants = 0;
+ cur->oe_mppr = max_pages;
+
+ /* grants has been allocated by caller */
+ LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
+ "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax);
+ LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur));
+
+restart:
+ osc_object_lock(obj);
+ ext = osc_extent_search(obj, cur->oe_start);
+ if (ext == NULL)
+ ext = first_extent(obj);
+ while (ext != NULL) {
+ loff_t ext_chk_start = ext->oe_start >> ppc_bits;
+ loff_t ext_chk_end = ext->oe_end >> ppc_bits;
+
+ LASSERT(sanity_check_nolock(ext) == 0);
+ if (chunk > ext_chk_end + 1)
+ break;
+
+ /* if covering by different locks, no chance to match */
+ if (lock != ext->oe_osclock) {
+ EASSERTF(!overlapped(ext, cur), ext,
+ EXTSTR, EXTPARA(cur));
+
+ ext = next_extent(ext);
+ continue;
+ }
+
+ /* discontiguous chunks? */
+ if (chunk + 1 < ext_chk_start) {
+ ext = next_extent(ext);
+ continue;
+ }
+
+ /* ok, from now on, ext and cur have these attrs:
+ * 1. covered by the same lock
+ * 2. contiguous at chunk level or overlapping. */
+
+ if (overlapped(ext, cur)) {
+ /* cur is the minimum unit, so overlapping means
+ * full contain. */
+ EASSERTF((ext->oe_start <= cur->oe_start &&
+ ext->oe_end >= cur->oe_end),
+ ext, EXTSTR, EXTPARA(cur));
+
+ if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
+ /* for simplicity, we wait for this extent to
+ * finish before going forward. */
+ conflict = osc_extent_get(ext);
+ break;
+ }
+
+ found = osc_extent_hold(ext);
+ break;
+ }
+
+ /* non-overlapped extent */
+ if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) {
+ /* we can't do anything for a non OES_CACHE extent, or
+ * if there is someone waiting for this extent to be
+ * flushed, try next one. */
+ ext = next_extent(ext);
+ continue;
+ }
+
+ /* check if they belong to the same rpc slot before trying to
+ * merge. the extents are not overlapped and contiguous at
+ * chunk level to get here. */
+ if (ext->oe_max_end != max_end) {
+ /* if they don't belong to the same RPC slot or
+ * max_pages_per_rpc has ever changed, do not merge. */
+ ext = next_extent(ext);
+ continue;
+ }
+
+ /* it's required that an extent must be contiguous at chunk
+ * level so that we know the whole extent is covered by grant
+ * (the pages in the extent are NOT required to be contiguous).
+ * Otherwise, it will be too much difficult to know which
+ * chunks have grants allocated. */
+
+ /* try to do front merge - extend ext's start */
+ if (chunk + 1 == ext_chk_start) {
+ /* ext must be chunk size aligned */
+ EASSERT((ext->oe_start & ~chunk_mask) == 0, ext);
+
+ /* pull ext's start back to cover cur */
+ ext->oe_start = cur->oe_start;
+ ext->oe_grants += chunksize;
+ *grants -= chunksize;
+
+ found = osc_extent_hold(ext);
+ } else if (chunk == ext_chk_end + 1) {
+ /* rear merge */
+ ext->oe_end = cur->oe_end;
+ ext->oe_grants += chunksize;
+ *grants -= chunksize;
+
+ /* try to merge with the next one because we just fill
+ * in a gap */
+ if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
+ /* we can save extent tax from next extent */
+ *grants += cli->cl_extent_tax;
+
+ found = osc_extent_hold(ext);
+ }
+ if (found != NULL)
+ break;
+
+ ext = next_extent(ext);
+ }
+
+ osc_extent_tree_dump(D_CACHE, obj);
+ if (found != NULL) {
+ LASSERT(conflict == NULL);
+ if (!IS_ERR(found)) {
+ LASSERT(found->oe_osclock == cur->oe_osclock);
+ OSC_EXTENT_DUMP(D_CACHE, found,
+ "found caching ext for %lu.\n", index);
+ }
+ } else if (conflict == NULL) {
+ /* create a new extent */
+ EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
+ cur->oe_grants = chunksize + cli->cl_extent_tax;
+ *grants -= cur->oe_grants;
+ LASSERT(*grants >= 0);
+
+ cur->oe_state = OES_CACHE;
+ found = osc_extent_hold(cur);
+ osc_extent_insert(obj, cur);
+ OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
+ index, lock->cll_descr.cld_end);
+ }
+ osc_object_unlock(obj);
+
+ if (conflict != NULL) {
+ LASSERT(found == NULL);
+
+ /* waiting for IO to finish. Please notice that it's impossible
+ * to be an OES_TRUNC extent. */
+ rc = osc_extent_wait(env, conflict, OES_INV);
+ osc_extent_put(env, conflict);
+ conflict = NULL;
+ if (rc < 0)
+ GOTO(out, found = ERR_PTR(rc));
+
+ goto restart;
+ }
+
+out:
+ osc_extent_put(env, cur);
+ LASSERT(*grants >= 0);
+ return found;
+}
+
+/**
+ * Called when IO is finished to an extent.
+ */
+int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
+ int sent, int rc)
+{
+ struct client_obd *cli = osc_cli(ext->oe_obj);
+ struct osc_async_page *oap;
+ struct osc_async_page *tmp;
+ int nr_pages = ext->oe_nr_pages;
+ int lost_grant = 0;
+ int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
+ __u64 last_off = 0;
+ int last_count = -1;
+
+ OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n");
+
+ ext->oe_rc = rc ?: ext->oe_nr_pages;
+ EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
+ list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
+ oap_pending_item) {
+ list_del_init(&oap->oap_rpc_item);
+ list_del_init(&oap->oap_pending_item);
+ if (last_off <= oap->oap_obj_off) {
+ last_off = oap->oap_obj_off;
+ last_count = oap->oap_count;
+ }
+
+ --ext->oe_nr_pages;
+ osc_ap_completion(env, cli, oap, sent, rc);
+ }
+ EASSERT(ext->oe_nr_pages == 0, ext);
+
+ if (!sent) {
+ lost_grant = ext->oe_grants;
+ } else if (blocksize < PAGE_CACHE_SIZE &&
+ last_count != PAGE_CACHE_SIZE) {
+ /* For short writes we shouldn't count parts of pages that
+ * span a whole chunk on the OST side, or our accounting goes
+ * wrong. Should match the code in filter_grant_check. */
+ int offset = oap->oap_page_off & ~CFS_PAGE_MASK;
+ int count = oap->oap_count + (offset & (blocksize - 1));
+ int end = (offset + oap->oap_count) & (blocksize - 1);
+ if (end)
+ count += blocksize - end;
+
+ lost_grant = PAGE_CACHE_SIZE - count;
+ }
+ if (ext->oe_grants > 0)
+ osc_free_grant(cli, nr_pages, lost_grant);
+
+ osc_extent_remove(ext);
+ /* put the refcount for RPC */
+ osc_extent_put(env, ext);
+ return 0;
+}
+
+static int extent_wait_cb(struct osc_extent *ext, int state)
+{
+ int ret;
+
+ osc_object_lock(ext->oe_obj);
+ ret = ext->oe_state == state;
+ osc_object_unlock(ext->oe_obj);
+
+ return ret;
+}
+
+/**
+ * Wait for the extent's state to become @state.
+ */
+static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
+ int state)
+{
+ struct osc_object *obj = ext->oe_obj;
+ struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
+ LWI_ON_SIGNAL_NOOP, NULL);
+ int rc = 0;
+
+ osc_object_lock(obj);
+ LASSERT(sanity_check_nolock(ext) == 0);
+ /* `Kick' this extent only if the caller is waiting for it to be
+ * written out. */
+ if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp) {
+ if (ext->oe_state == OES_ACTIVE) {
+ ext->oe_urgent = 1;
+ } else if (ext->oe_state == OES_CACHE) {
+ ext->oe_urgent = 1;
+ osc_extent_hold(ext);
+ rc = 1;
+ }
+ }
+ osc_object_unlock(obj);
+ if (rc == 1)
+ osc_extent_release(env, ext);
+
+ /* wait for the extent until its state becomes @state */
+ rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), &lwi);
+ if (rc == -ETIMEDOUT) {
+ OSC_EXTENT_DUMP(D_ERROR, ext,
+ "%s: wait ext to %d timedout, recovery in progress?\n",
+ osc_export(obj)->exp_obd->obd_name, state);
+
+ lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
+ &lwi);
+ }
+ if (rc == 0 && ext->oe_rc < 0)
+ rc = ext->oe_rc;
+ return rc;
+}
+
+/**
+ * Discard pages with index greater than @size. If @ext is overlapped with
+ * @size, then partial truncate happens.
+ */
+static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
+ bool partial)
+{
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct osc_object *obj = ext->oe_obj;
+ struct client_obd *cli = osc_cli(obj);
+ struct osc_async_page *oap;
+ struct osc_async_page *tmp;
+ int pages_in_chunk = 0;
+ int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ __u64 trunc_chunk = trunc_index >> ppc_bits;
+ int grants = 0;
+ int nr_pages = 0;
+ int rc = 0;
+
+ LASSERT(sanity_check(ext) == 0);
+ LASSERT(ext->oe_state == OES_TRUNC);
+ LASSERT(!ext->oe_urgent);
+
+ /* Request new lu_env.
+ * We can't use that env from osc_cache_truncate_start() because
+ * it's from lov_io_sub and not fully initialized. */
+ env = cl_env_nested_get(&nest);
+ io = &osc_env_info(env)->oti_io;
+ io->ci_obj = cl_object_top(osc2cl(obj));
+ rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ /* discard all pages with index greater then trunc_index */
+ list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
+ oap_pending_item) {
+ struct cl_page *sub = oap2cl_page(oap);
+ struct cl_page *page = cl_page_top(sub);
+
+ LASSERT(list_empty(&oap->oap_rpc_item));
+
+ /* only discard the pages with their index greater than
+ * trunc_index, and ... */
+ if (sub->cp_index < trunc_index ||
+ (sub->cp_index == trunc_index && partial)) {
+ /* accounting how many pages remaining in the chunk
+ * so that we can calculate grants correctly. */
+ if (sub->cp_index >> ppc_bits == trunc_chunk)
+ ++pages_in_chunk;
+ continue;
+ }
+
+ list_del_init(&oap->oap_pending_item);
+
+ cl_page_get(page);
+ lu_ref_add(&page->cp_reference, "truncate", current);
+
+ if (cl_page_own(env, io, page) == 0) {
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ LASSERT(0);
+ }
+
+ lu_ref_del(&page->cp_reference, "truncate", current);
+ cl_page_put(env, page);
+
+ --ext->oe_nr_pages;
+ ++nr_pages;
+ }
+ EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
+ ext->oe_nr_pages == 0),
+ ext, "trunc_index %lu, partial %d\n", trunc_index, partial);
+
+ osc_object_lock(obj);
+ if (ext->oe_nr_pages == 0) {
+ LASSERT(pages_in_chunk == 0);
+ grants = ext->oe_grants;
+ ext->oe_grants = 0;
+ } else { /* calculate how many grants we can free */
+ int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk;
+ pgoff_t last_index;
+
+
+ /* if there is no pages in this chunk, we can also free grants
+ * for the last chunk */
+ if (pages_in_chunk == 0) {
+ /* if this is the 1st chunk and no pages in this chunk,
+ * ext->oe_nr_pages must be zero, so we should be in
+ * the other if-clause. */
+ LASSERT(trunc_chunk > 0);
+ --trunc_chunk;
+ ++chunks;
+ }
+
+ /* this is what we can free from this extent */
+ grants = chunks << cli->cl_chunkbits;
+ ext->oe_grants -= grants;
+ last_index = ((trunc_chunk + 1) << ppc_bits) - 1;
+ ext->oe_end = min(last_index, ext->oe_max_end);
+ LASSERT(ext->oe_end >= ext->oe_start);
+ LASSERT(ext->oe_grants > 0);
+ }
+ osc_object_unlock(obj);
+
+ if (grants > 0 || nr_pages > 0)
+ osc_free_grant(cli, nr_pages, grants);
+
+out:
+ cl_io_fini(env, io);
+ cl_env_nested_put(&nest, env);
+ return rc;
+}
+
+/**
+ * This function is used to make the extent prepared for transfer.
+ * A race with flusing page - ll_writepage() has to be handled cautiously.
+ */
+static int osc_extent_make_ready(const struct lu_env *env,
+ struct osc_extent *ext)
+{
+ struct osc_async_page *oap;
+ struct osc_async_page *last = NULL;
+ struct osc_object *obj = ext->oe_obj;
+ int page_count = 0;
+ int rc;
+
+ /* we're going to grab page lock, so object lock must not be taken. */
+ LASSERT(sanity_check(ext) == 0);
+ /* in locking state, any process should not touch this extent. */
+ EASSERT(ext->oe_state == OES_LOCKING, ext);
+ EASSERT(ext->oe_owner != NULL, ext);
+
+ OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
+
+ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ ++page_count;
+ if (last == NULL || last->oap_obj_off < oap->oap_obj_off)
+ last = oap;
+
+ /* checking ASYNC_READY is race safe */
+ if ((oap->oap_async_flags & ASYNC_READY) != 0)
+ continue;
+
+ rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
+ switch (rc) {
+ case 0:
+ spin_lock(&oap->oap_lock);
+ oap->oap_async_flags |= ASYNC_READY;
+ spin_unlock(&oap->oap_lock);
+ break;
+ case -EALREADY:
+ LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
+ break;
+ default:
+ LASSERTF(0, "unknown return code: %d\n", rc);
+ }
+ }
+
+ LASSERT(page_count == ext->oe_nr_pages);
+ LASSERT(last != NULL);
+ /* the last page is the only one we need to refresh its count by
+ * the size of file. */
+ if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
+ last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
+ LASSERT(last->oap_count > 0);
+ LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
+ last->oap_async_flags |= ASYNC_COUNT_STABLE;
+ }
+
+ /* for the rest of pages, we don't need to call osf_refresh_count()
+ * because it's known they are not the last page */
+ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
+ oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
+ oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ }
+ }
+
+ osc_object_lock(obj);
+ osc_extent_state_set(ext, OES_RPC);
+ osc_object_unlock(obj);
+ /* get a refcount for RPC. */
+ osc_extent_get(ext);
+
+ return 0;
+}
+
+/**
+ * Quick and simple version of osc_extent_find(). This function is frequently
+ * called to expand the extent for the same IO. To expand the extent, the
+ * page index must be in the same or next chunk of ext->oe_end.
+ */
+static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
+{
+ struct osc_object *obj = ext->oe_obj;
+ struct client_obd *cli = osc_cli(obj);
+ struct osc_extent *next;
+ int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ pgoff_t chunk = index >> ppc_bits;
+ pgoff_t end_chunk;
+ pgoff_t end_index;
+ int chunksize = 1 << cli->cl_chunkbits;
+ int rc = 0;
+
+ LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
+ osc_object_lock(obj);
+ LASSERT(sanity_check_nolock(ext) == 0);
+ end_chunk = ext->oe_end >> ppc_bits;
+ if (chunk > end_chunk + 1)
+ GOTO(out, rc = -ERANGE);
+
+ if (end_chunk >= chunk)
+ GOTO(out, rc = 0);
+
+ LASSERT(end_chunk + 1 == chunk);
+ /* try to expand this extent to cover @index */
+ end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
+
+ next = next_extent(ext);
+ if (next != NULL && next->oe_start <= end_index)
+ /* complex mode - overlapped with the next extent,
+ * this case will be handled by osc_extent_find() */
+ GOTO(out, rc = -EAGAIN);
+
+ ext->oe_end = end_index;
+ ext->oe_grants += chunksize;
+ *grants -= chunksize;
+ LASSERT(*grants >= 0);
+ EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext,
+ "overlapped after expanding for %lu.\n", index);
+
+out:
+ osc_object_unlock(obj);
+ return rc;
+}
+
+static void osc_extent_tree_dump0(int level, struct osc_object *obj,
+ const char *func, int line)
+{
+ struct osc_extent *ext;
+ int cnt;
+
+ CDEBUG(level, "Dump object %p extents at %s:%d, mppr: %u.\n",
+ obj, func, line, osc_cli(obj)->cl_max_pages_per_rpc);
+
+ /* osc_object_lock(obj); */
+ cnt = 1;
+ for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext))
+ OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++);
+
+ cnt = 1;
+ list_for_each_entry(ext, &obj->oo_hp_exts, oe_link)
+ OSC_EXTENT_DUMP(level, ext, "hp %d.\n", cnt++);
+
+ cnt = 1;
+ list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link)
+ OSC_EXTENT_DUMP(level, ext, "urgent %d.\n", cnt++);
+
+ cnt = 1;
+ list_for_each_entry(ext, &obj->oo_reading_exts, oe_link)
+ OSC_EXTENT_DUMP(level, ext, "reading %d.\n", cnt++);
+ /* osc_object_unlock(obj); */
+}
+
+/* ------------------ osc extent end ------------------ */
+
+static inline int osc_is_ready(struct osc_object *osc)
+{
+ return !list_empty(&osc->oo_ready_item) ||
+ !list_empty(&osc->oo_hp_ready_item);
+}
+
+#define OSC_IO_DEBUG(OSC, STR, args...) \
+ CDEBUG(D_CACHE, "obj %p ready %d|%c|%c wr %d|%c|%c rd %d|%c " STR, \
+ (OSC), osc_is_ready(OSC), \
+ list_empty_marker(&(OSC)->oo_hp_ready_item), \
+ list_empty_marker(&(OSC)->oo_ready_item), \
+ atomic_read(&(OSC)->oo_nr_writes), \
+ list_empty_marker(&(OSC)->oo_hp_exts), \
+ list_empty_marker(&(OSC)->oo_urgent_exts), \
+ atomic_read(&(OSC)->oo_nr_reads), \
+ list_empty_marker(&(OSC)->oo_reading_exts), \
+ ##args)
+
+static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
+ int cmd)
+{
+ struct osc_page *opg = oap2osc_page(oap);
+ struct cl_page *page = cl_page_top(oap2cl_page(oap));
+ int result;
+
+ LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
+
+ result = cl_page_make_ready(env, page, CRT_WRITE);
+ if (result == 0)
+ opg->ops_submit_time = cfs_time_current();
+ return result;
+}
+
+static int osc_refresh_count(const struct lu_env *env,
+ struct osc_async_page *oap, int cmd)
+{
+ struct osc_page *opg = oap2osc_page(oap);
+ struct cl_page *page = oap2cl_page(oap);
+ struct cl_object *obj;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+
+ int result;
+ loff_t kms;
+
+ /* readpage queues with _COUNT_STABLE, shouldn't get here. */
+ LASSERT(!(cmd & OBD_BRW_READ));
+ LASSERT(opg != NULL);
+ obj = opg->ops_cl.cpl_obj;
+
+ cl_object_attr_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ cl_object_attr_unlock(obj);
+ if (result < 0)
+ return result;
+ kms = attr->cat_kms;
+ if (cl_offset(obj, page->cp_index) >= kms)
+ /* catch race with truncate */
+ return 0;
+ else if (cl_offset(obj, page->cp_index + 1) > kms)
+ /* catch sub-page write at end of file */
+ return kms % PAGE_CACHE_SIZE;
+ else
+ return PAGE_CACHE_SIZE;
+}
+
+static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
+ int cmd, int rc)
+{
+ struct osc_page *opg = oap2osc_page(oap);
+ struct cl_page *page = cl_page_top(oap2cl_page(oap));
+ struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
+ enum cl_req_type crt;
+ int srvlock;
+
+ cmd &= ~OBD_BRW_NOQUOTA;
+ LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
+ LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
+ LASSERT(opg->ops_transfer_pinned);
+
+ /*
+ * page->cp_req can be NULL if io submission failed before
+ * cl_req was allocated.
+ */
+ if (page->cp_req != NULL)
+ cl_req_page_done(env, page);
+ LASSERT(page->cp_req == NULL);
+
+ crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
+ /* Clear opg->ops_transfer_pinned before VM lock is released. */
+ opg->ops_transfer_pinned = 0;
+
+ spin_lock(&obj->oo_seatbelt);
+ LASSERT(opg->ops_submitter != NULL);
+ LASSERT(!list_empty(&opg->ops_inflight));
+ list_del_init(&opg->ops_inflight);
+ opg->ops_submitter = NULL;
+ spin_unlock(&obj->oo_seatbelt);
+
+ opg->ops_submit_time = 0;
+ srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
+
+ /* statistic */
+ if (rc == 0 && srvlock) {
+ struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
+ struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
+ int bytes = oap->oap_count;
+
+ if (crt == CRT_READ)
+ stats->os_lockless_reads += bytes;
+ else
+ stats->os_lockless_writes += bytes;
+ }
+
+ /*
+ * This has to be the last operation with the page, as locks are
+ * released in cl_page_completion() and nothing except for the
+ * reference counter protects page from concurrent reclaim.
+ */
+ lu_ref_del(&page->cp_reference, "transfer", page);
+
+ cl_page_completion(env, page, crt, rc);
+
+ return 0;
+}
+
+#define OSC_DUMP_GRANT(cli, fmt, args...) do { \
+ struct client_obd *__tmp = (cli); \
+ CDEBUG(D_CACHE, "%s: { dirty: %ld/%ld dirty_pages: %d/%d " \
+ "dropped: %ld avail: %ld, reserved: %ld, flight: %d } " fmt, \
+ __tmp->cl_import->imp_obd->obd_name, \
+ __tmp->cl_dirty, __tmp->cl_dirty_max, \
+ atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
+ __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
+ __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, ##args); \
+} while (0)
+
+/* caller must hold loi_list_lock */
+static void osc_consume_write_grant(struct client_obd *cli,
+ struct brw_page *pga)
+{
+ LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
+ LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
+ atomic_inc(&obd_dirty_pages);
+ cli->cl_dirty += PAGE_CACHE_SIZE;
+ pga->flag |= OBD_BRW_FROM_GRANT;
+ CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
+ PAGE_CACHE_SIZE, pga, pga->pg);
+ osc_update_next_shrink(cli);
+}
+
+/* the companion to osc_consume_write_grant, called when a brw has completed.
+ * must be called with the loi lock held. */
+static void osc_release_write_grant(struct client_obd *cli,
+ struct brw_page *pga)
+{
+ LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
+ if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
+ return;
+ }
+
+ pga->flag &= ~OBD_BRW_FROM_GRANT;
+ atomic_dec(&obd_dirty_pages);
+ cli->cl_dirty -= PAGE_CACHE_SIZE;
+ if (pga->flag & OBD_BRW_NOCACHE) {
+ pga->flag &= ~OBD_BRW_NOCACHE;
+ atomic_dec(&obd_dirty_transit_pages);
+ cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
+ }
+}
+
+/**
+ * To avoid sleeping with object lock held, it's good for us allocate enough
+ * grants before entering into critical section.
+ *
+ * client_obd_list_lock held by caller
+ */
+static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
+{
+ int rc = -EDQUOT;
+
+ if (cli->cl_avail_grant >= bytes) {
+ cli->cl_avail_grant -= bytes;
+ cli->cl_reserved_grant += bytes;
+ rc = 0;
+ }
+ return rc;
+}
+
+static void __osc_unreserve_grant(struct client_obd *cli,
+ unsigned int reserved, unsigned int unused)
+{
+ /* it's quite normal for us to get more grant than reserved.
+ * Thinking about a case that two extents merged by adding a new
+ * chunk, we can save one extent tax. If extent tax is greater than
+ * one chunk, we can save more grant by adding a new chunk */
+ cli->cl_reserved_grant -= reserved;
+ if (unused > reserved) {
+ cli->cl_avail_grant += reserved;
+ cli->cl_lost_grant += unused - reserved;
+ } else {
+ cli->cl_avail_grant += unused;
+ }
+}
+
+void osc_unreserve_grant(struct client_obd *cli,
+ unsigned int reserved, unsigned int unused)
+{
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ __osc_unreserve_grant(cli, reserved, unused);
+ if (unused > 0)
+ osc_wake_cache_waiters(cli);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+}
+
+/**
+ * Free grant after IO is finished or canceled.
+ *
+ * @lost_grant is used to remember how many grants we have allocated but not
+ * used, we should return these grants to OST. There're two cases where grants
+ * can be lost:
+ * 1. truncate;
+ * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
+ * written. In this case OST may use less chunks to serve this partial
+ * write. OSTs don't actually know the page size on the client side. so
+ * clients have to calculate lost grant by the blocksize on the OST.
+ * See filter_grant_check() for details.
+ */
+static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
+ unsigned int lost_grant)
+{
+ int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ atomic_sub(nr_pages, &obd_dirty_pages);
+ cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
+ cli->cl_lost_grant += lost_grant;
+ if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
+ /* borrow some grant from truncate to avoid the case that
+ * truncate uses up all avail grant */
+ cli->cl_lost_grant -= grant;
+ cli->cl_avail_grant += grant;
+ }
+ osc_wake_cache_waiters(cli);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
+ lost_grant, cli->cl_lost_grant,
+ cli->cl_avail_grant, cli->cl_dirty);
+}
+
+/**
+ * The companion to osc_enter_cache(), called when @oap is no longer part of
+ * the dirty accounting due to error.
+ */
+static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
+{
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ osc_release_write_grant(cli, &oap->oap_brw_page);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+}
+
+/**
+ * Non-blocking version of osc_enter_cache() that consumes grant only when it
+ * is available.
+ */
+static int osc_enter_cache_try(struct client_obd *cli,
+ struct osc_async_page *oap,
+ int bytes, int transient)
+{
+ int rc;
+
+ OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
+
+ rc = osc_reserve_grant(cli, bytes);
+ if (rc < 0)
+ return 0;
+
+ if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
+ atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
+ osc_consume_write_grant(cli, &oap->oap_brw_page);
+ if (transient) {
+ cli->cl_dirty_transit += PAGE_CACHE_SIZE;
+ atomic_inc(&obd_dirty_transit_pages);
+ oap->oap_brw_flags |= OBD_BRW_NOCACHE;
+ }
+ rc = 1;
+ } else {
+ __osc_unreserve_grant(cli, bytes, bytes);
+ rc = 0;
+ }
+ return rc;
+}
+
+static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
+{
+ int rc;
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = list_empty(&ocw->ocw_entry);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return rc;
+}
+
+/**
+ * The main entry to reserve dirty page accounting. Usually the grant reserved
+ * in this function will be freed in bulk in osc_free_grant() unless it fails
+ * to add osc cache, in that case, it will be freed in osc_exit_cache().
+ *
+ * The process will be put into sleep if it's already run out of grant.
+ */
+static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
+ struct osc_async_page *oap, int bytes)
+{
+ struct osc_object *osc = oap->oap_obj;
+ struct lov_oinfo *loi = osc->oo_oinfo;
+ struct osc_cache_waiter ocw;
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ int rc = -EDQUOT;
+
+ OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+
+ /* force the caller to try sync io. this can jump the list
+ * of queued writes and create a discontiguous rpc stream */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
+ cli->cl_dirty_max < PAGE_CACHE_SIZE ||
+ cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
+ GOTO(out, rc = -EDQUOT);
+
+ /* Hopefully normal case - cache space and write credits available */
+ if (osc_enter_cache_try(cli, oap, bytes, 0))
+ GOTO(out, rc = 0);
+
+ /* We can get here for two reasons: too many dirty pages in cache, or
+ * run out of grants. In both cases we should write dirty pages out.
+ * Adding a cache waiter will trigger urgent write-out no matter what
+ * RPC size will be.
+ * The exiting condition is no avail grants and no dirty pages caching,
+ * that really means there is no space on the OST. */
+ init_waitqueue_head(&ocw.ocw_waitq);
+ ocw.ocw_oap = oap;
+ ocw.ocw_grant = bytes;
+ while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
+ list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
+ ocw.ocw_rc = 0;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ osc_io_unplug_async(env, cli, NULL);
+
+ CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
+ cli->cl_import->imp_obd->obd_name, &ocw, oap);
+
+ rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+
+ /* l_wait_event is interrupted by signal */
+ if (rc < 0) {
+ list_del_init(&ocw.ocw_entry);
+ GOTO(out, rc);
+ }
+
+ LASSERT(list_empty(&ocw.ocw_entry));
+ rc = ocw.ocw_rc;
+
+ if (rc != -EDQUOT)
+ GOTO(out, rc);
+ if (osc_enter_cache_try(cli, oap, bytes, 0))
+ GOTO(out, rc = 0);
+ }
+out:
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ OSC_DUMP_GRANT(cli, "returned %d.\n", rc);
+ return rc;
+}
+
+/* caller must hold loi_list_lock */
+void osc_wake_cache_waiters(struct client_obd *cli)
+{
+ struct list_head *l, *tmp;
+ struct osc_cache_waiter *ocw;
+
+ list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
+ ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
+ list_del_init(&ocw->ocw_entry);
+
+ ocw->ocw_rc = -EDQUOT;
+ /* we can't dirty more */
+ if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
+ (atomic_read(&obd_dirty_pages) + 1 >
+ obd_max_dirty_pages)) {
+ CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
+ "osc max %ld, sys max %d\n", cli->cl_dirty,
+ cli->cl_dirty_max, obd_max_dirty_pages);
+ goto wakeup;
+ }
+
+ ocw->ocw_rc = 0;
+ if (!osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
+ ocw->ocw_rc = -EDQUOT;
+
+wakeup:
+ CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
+ ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
+
+ wake_up(&ocw->ocw_waitq);
+ }
+}
+
+static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
+{
+ int hprpc = !!list_empty(&osc->oo_hp_exts);
+ return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
+}
+
+/* This maintains the lists of pending pages to read/write for a given object
+ * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint()
+ * to quickly find objects that are ready to send an RPC. */
+static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
+ int cmd)
+{
+ int invalid_import = 0;
+
+ /* if we have an invalid import we want to drain the queued pages
+ * by forcing them through rpcs that immediately fail and complete
+ * the pages. recovery relies on this to empty the queued pages
+ * before canceling the locks and evicting down the llite pages */
+ if ((cli->cl_import == NULL || cli->cl_import->imp_invalid))
+ invalid_import = 1;
+
+ if (cmd & OBD_BRW_WRITE) {
+ if (atomic_read(&osc->oo_nr_writes) == 0)
+ return 0;
+ if (invalid_import) {
+ CDEBUG(D_CACHE, "invalid import forcing RPC\n");
+ return 1;
+ }
+ if (!list_empty(&osc->oo_hp_exts)) {
+ CDEBUG(D_CACHE, "high prio request forcing RPC\n");
+ return 1;
+ }
+ if (!list_empty(&osc->oo_urgent_exts)) {
+ CDEBUG(D_CACHE, "urgent request forcing RPC\n");
+ return 1;
+ }
+ /* trigger a write rpc stream as long as there are dirtiers
+ * waiting for space. as they're waiting, they're not going to
+ * create more pages to coalesce with what's waiting.. */
+ if (!list_empty(&cli->cl_cache_waiters)) {
+ CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
+ return 1;
+ }
+ if (atomic_read(&osc->oo_nr_writes) >=
+ cli->cl_max_pages_per_rpc)
+ return 1;
+ } else {
+ if (atomic_read(&osc->oo_nr_reads) == 0)
+ return 0;
+ if (invalid_import) {
+ CDEBUG(D_CACHE, "invalid import forcing RPC\n");
+ return 1;
+ }
+ /* all read are urgent. */
+ if (!list_empty(&osc->oo_reading_exts))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void osc_update_pending(struct osc_object *obj, int cmd, int delta)
+{
+ struct client_obd *cli = osc_cli(obj);
+ if (cmd & OBD_BRW_WRITE) {
+ atomic_add(delta, &obj->oo_nr_writes);
+ atomic_add(delta, &cli->cl_pending_w_pages);
+ LASSERT(atomic_read(&obj->oo_nr_writes) >= 0);
+ } else {
+ atomic_add(delta, &obj->oo_nr_reads);
+ atomic_add(delta, &cli->cl_pending_r_pages);
+ LASSERT(atomic_read(&obj->oo_nr_reads) >= 0);
+ }
+ OSC_IO_DEBUG(obj, "update pending cmd %d delta %d.\n", cmd, delta);
+}
+
+static int osc_makes_hprpc(struct osc_object *obj)
+{
+ return !list_empty(&obj->oo_hp_exts);
+}
+
+static void on_list(struct list_head *item, struct list_head *list, int should_be_on)
+{
+ if (list_empty(item) && should_be_on)
+ list_add_tail(item, list);
+ else if (!list_empty(item) && !should_be_on)
+ list_del_init(item);
+}
+
+/* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
+ * can find pages to build into rpcs quickly */
+static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
+{
+ if (osc_makes_hprpc(osc)) {
+ /* HP rpc */
+ on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list, 0);
+ on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
+ } else {
+ on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
+ on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list,
+ osc_makes_rpc(cli, osc, OBD_BRW_WRITE) ||
+ osc_makes_rpc(cli, osc, OBD_BRW_READ));
+ }
+
+ on_list(&osc->oo_write_item, &cli->cl_loi_write_list,
+ atomic_read(&osc->oo_nr_writes) > 0);
+
+ on_list(&osc->oo_read_item, &cli->cl_loi_read_list,
+ atomic_read(&osc->oo_nr_reads) > 0);
+
+ return osc_is_ready(osc);
+}
+
+static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
+{
+ int is_ready;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ is_ready = __osc_list_maint(cli, osc);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ return is_ready;
+}
+
+/* this is trying to propogate async writeback errors back up to the
+ * application. As an async write fails we record the error code for later if
+ * the app does an fsync. As long as errors persist we force future rpcs to be
+ * sync so that the app can get a sync error and break the cycle of queueing
+ * pages for which writeback will fail. */
+static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
+ int rc)
+{
+ if (rc) {
+ if (!ar->ar_rc)
+ ar->ar_rc = rc;
+
+ ar->ar_force_sync = 1;
+ ar->ar_min_xid = ptlrpc_sample_next_xid();
+ return;
+
+ }
+
+ if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
+ ar->ar_force_sync = 0;
+}
+
+
+/* this must be called holding the loi list lock to give coverage to exit_cache,
+ * async_flag maintenance, and oap_request */
+static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
+ struct osc_async_page *oap, int sent, int rc)
+{
+ struct osc_object *osc = oap->oap_obj;
+ struct lov_oinfo *loi = osc->oo_oinfo;
+ __u64 xid = 0;
+
+ if (oap->oap_request != NULL) {
+ xid = ptlrpc_req_xid(oap->oap_request);
+ ptlrpc_req_finished(oap->oap_request);
+ oap->oap_request = NULL;
+ }
+
+ /* As the transfer for this page is being done, clear the flags */
+ spin_lock(&oap->oap_lock);
+ oap->oap_async_flags = 0;
+ spin_unlock(&oap->oap_lock);
+ oap->oap_interrupted = 0;
+
+ if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ osc_process_ar(&cli->cl_ar, xid, rc);
+ osc_process_ar(&loi->loi_ar, xid, rc);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ }
+
+ rc = osc_completion(env, oap, oap->oap_cmd, rc);
+ if (rc)
+ CERROR("completion on oap %p obj %p returns %d.\n",
+ oap, osc, rc);
+}
+
+/**
+ * Try to add extent to one RPC. We need to think about the following things:
+ * - # of pages must not be over max_pages_per_rpc
+ * - extent must be compatible with previous ones
+ */
+static int try_to_add_extent_for_io(struct client_obd *cli,
+ struct osc_extent *ext, struct list_head *rpclist,
+ int *pc, unsigned int *max_pages)
+{
+ struct osc_extent *tmp;
+
+ EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
+ ext);
+
+ *max_pages = max(ext->oe_mppr, *max_pages);
+ if (*pc + ext->oe_nr_pages > *max_pages)
+ return 0;
+
+ list_for_each_entry(tmp, rpclist, oe_link) {
+ EASSERT(tmp->oe_owner == current, tmp);
+#if 0
+ if (overlapped(tmp, ext)) {
+ OSC_EXTENT_DUMP(D_ERROR, tmp, "overlapped %p.\n", ext);
+ EASSERT(0, ext);
+ }
+#endif
+
+ if (tmp->oe_srvlock != ext->oe_srvlock ||
+ !tmp->oe_grants != !ext->oe_grants)
+ return 0;
+
+ /* remove break for strict check */
+ break;
+ }
+
+ *pc += ext->oe_nr_pages;
+ list_move_tail(&ext->oe_link, rpclist);
+ ext->oe_owner = current;
+ return 1;
+}
+
+/**
+ * In order to prevent multiple ptlrpcd from breaking contiguous extents,
+ * get_write_extent() takes all appropriate extents in atomic.
+ *
+ * The following policy is used to collect extents for IO:
+ * 1. Add as many HP extents as possible;
+ * 2. Add the first urgent extent in urgent extent list and take it out of
+ * urgent list;
+ * 3. Add subsequent extents of this urgent extent;
+ * 4. If urgent list is not empty, goto 2;
+ * 5. Traverse the extent tree from the 1st extent;
+ * 6. Above steps exit if there is no space in this RPC.
+ */
+static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
+{
+ struct client_obd *cli = osc_cli(obj);
+ struct osc_extent *ext;
+ int page_count = 0;
+ unsigned int max_pages = cli->cl_max_pages_per_rpc;
+
+ LASSERT(osc_object_is_locked(obj));
+ while (!list_empty(&obj->oo_hp_exts)) {
+ ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
+ oe_link);
+ LASSERT(ext->oe_state == OES_CACHE);
+ if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
+ &max_pages))
+ return page_count;
+ EASSERT(ext->oe_nr_pages <= max_pages, ext);
+ }
+ if (page_count == max_pages)
+ return page_count;
+
+ while (!list_empty(&obj->oo_urgent_exts)) {
+ ext = list_entry(obj->oo_urgent_exts.next,
+ struct osc_extent, oe_link);
+ if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
+ &max_pages))
+ return page_count;
+
+ if (!ext->oe_intree)
+ continue;
+
+ while ((ext = next_extent(ext)) != NULL) {
+ if ((ext->oe_state != OES_CACHE) ||
+ (!list_empty(&ext->oe_link) &&
+ ext->oe_owner != NULL))
+ continue;
+
+ if (!try_to_add_extent_for_io(cli, ext, rpclist,
+ &page_count, &max_pages))
+ return page_count;
+ }
+ }
+ if (page_count == max_pages)
+ return page_count;
+
+ ext = first_extent(obj);
+ while (ext != NULL) {
+ if ((ext->oe_state != OES_CACHE) ||
+ /* this extent may be already in current rpclist */
+ (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) {
+ ext = next_extent(ext);
+ continue;
+ }
+
+ if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
+ &max_pages))
+ return page_count;
+
+ ext = next_extent(ext);
+ }
+ return page_count;
+}
+
+static int
+osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
+ struct osc_object *osc, pdl_policy_t pol)
+{
+ LIST_HEAD(rpclist);
+ struct osc_extent *ext;
+ struct osc_extent *tmp;
+ struct osc_extent *first = NULL;
+ obd_count page_count = 0;
+ int srvlock = 0;
+ int rc = 0;
+
+ LASSERT(osc_object_is_locked(osc));
+
+ page_count = get_write_extents(osc, &rpclist);
+ LASSERT(equi(page_count == 0, list_empty(&rpclist)));
+
+ if (list_empty(&rpclist))
+ return 0;
+
+ osc_update_pending(osc, OBD_BRW_WRITE, -page_count);
+
+ list_for_each_entry(ext, &rpclist, oe_link) {
+ LASSERT(ext->oe_state == OES_CACHE ||
+ ext->oe_state == OES_LOCK_DONE);
+ if (ext->oe_state == OES_CACHE)
+ osc_extent_state_set(ext, OES_LOCKING);
+ else
+ osc_extent_state_set(ext, OES_RPC);
+ }
+
+ /* we're going to grab page lock, so release object lock because
+ * lock order is page lock -> object lock. */
+ osc_object_unlock(osc);
+
+ list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
+ if (ext->oe_state == OES_LOCKING) {
+ rc = osc_extent_make_ready(env, ext);
+ if (unlikely(rc < 0)) {
+ list_del_init(&ext->oe_link);
+ osc_extent_finish(env, ext, 0, rc);
+ continue;
+ }
+ }
+ if (first == NULL) {
+ first = ext;
+ srvlock = ext->oe_srvlock;
+ } else {
+ LASSERT(srvlock == ext->oe_srvlock);
+ }
+ }
+
+ if (!list_empty(&rpclist)) {
+ LASSERT(page_count > 0);
+ rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE, pol);
+ LASSERT(list_empty(&rpclist));
+ }
+
+ osc_object_lock(osc);
+ return rc;
+}
+
+/**
+ * prepare pages for ASYNC io and put pages in send queue.
+ *
+ * \param cmd OBD_BRW_* macroses
+ * \param lop pending pages
+ *
+ * \return zero if no page added to send queue.
+ * \return 1 if pages successfully added to send queue.
+ * \return negative on errors.
+ */
+static int
+osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
+ struct osc_object *osc, pdl_policy_t pol)
+{
+ struct osc_extent *ext;
+ struct osc_extent *next;
+ LIST_HEAD(rpclist);
+ int page_count = 0;
+ unsigned int max_pages = cli->cl_max_pages_per_rpc;
+ int rc = 0;
+
+ LASSERT(osc_object_is_locked(osc));
+ list_for_each_entry_safe(ext, next,
+ &osc->oo_reading_exts, oe_link) {
+ EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
+ if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count,
+ &max_pages))
+ break;
+ osc_extent_state_set(ext, OES_RPC);
+ EASSERT(ext->oe_nr_pages <= max_pages, ext);
+ }
+ LASSERT(page_count <= max_pages);
+
+ osc_update_pending(osc, OBD_BRW_READ, -page_count);
+
+ if (!list_empty(&rpclist)) {
+ osc_object_unlock(osc);
+
+ LASSERT(page_count > 0);
+ rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ, pol);
+ LASSERT(list_empty(&rpclist));
+
+ osc_object_lock(osc);
+ }
+ return rc;
+}
+
+#define list_to_obj(list, item) ({ \
+ struct list_head *__tmp = (list)->next; \
+ list_del_init(__tmp); \
+ list_entry(__tmp, struct osc_object, oo_##item); \
+})
+
+/* This is called by osc_check_rpcs() to find which objects have pages that
+ * we could be sending. These lists are maintained by osc_makes_rpc(). */
+static struct osc_object *osc_next_obj(struct client_obd *cli)
+{
+ /* First return objects that have blocked locks so that they
+ * will be flushed quickly and other clients can get the lock,
+ * then objects which have pages ready to be stuffed into RPCs */
+ if (!list_empty(&cli->cl_loi_hp_ready_list))
+ return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item);
+ if (!list_empty(&cli->cl_loi_ready_list))
+ return list_to_obj(&cli->cl_loi_ready_list, ready_item);
+
+ /* then if we have cache waiters, return all objects with queued
+ * writes. This is especially important when many small files
+ * have filled up the cache and not been fired into rpcs because
+ * they don't pass the nr_pending/object threshhold */
+ if (!list_empty(&cli->cl_cache_waiters) &&
+ !list_empty(&cli->cl_loi_write_list))
+ return list_to_obj(&cli->cl_loi_write_list, write_item);
+
+ /* then return all queued objects when we have an invalid import
+ * so that they get flushed */
+ if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
+ if (!list_empty(&cli->cl_loi_write_list))
+ return list_to_obj(&cli->cl_loi_write_list, write_item);
+ if (!list_empty(&cli->cl_loi_read_list))
+ return list_to_obj(&cli->cl_loi_read_list, read_item);
+ }
+ return NULL;
+}
+
+/* called with the loi list lock held */
+static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
+ pdl_policy_t pol)
+{
+ struct osc_object *osc;
+ int rc = 0;
+
+ while ((osc = osc_next_obj(cli)) != NULL) {
+ struct cl_object *obj = osc2cl(osc);
+ struct lu_ref_link link;
+
+ OSC_IO_DEBUG(osc, "%lu in flight\n", rpcs_in_flight(cli));
+
+ if (osc_max_rpc_in_flight(cli, osc)) {
+ __osc_list_maint(cli, osc);
+ break;
+ }
+
+ cl_object_get(obj);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ lu_object_ref_add_at(&obj->co_lu, &link, "check",
+ current);
+
+ /* attempt some read/write balancing by alternating between
+ * reads and writes in an object. The makes_rpc checks here
+ * would be redundant if we were getting read/write work items
+ * instead of objects. we don't want send_oap_rpc to drain a
+ * partial read pending queue when we're given this object to
+ * do io on writes while there are cache waiters */
+ osc_object_lock(osc);
+ if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
+ rc = osc_send_write_rpc(env, cli, osc, pol);
+ if (rc < 0) {
+ CERROR("Write request failed with %d\n", rc);
+
+ /* osc_send_write_rpc failed, mostly because of
+ * memory pressure.
+ *
+ * It can't break here, because if:
+ * - a page was submitted by osc_io_submit, so
+ * page locked;
+ * - no request in flight
+ * - no subsequent request
+ * The system will be in live-lock state,
+ * because there is no chance to call
+ * osc_io_unplug() and osc_check_rpcs() any
+ * more. pdflush can't help in this case,
+ * because it might be blocked at grabbing
+ * the page lock as we mentioned.
+ *
+ * Anyway, continue to drain pages. */
+ /* break; */
+ }
+ }
+ if (osc_makes_rpc(cli, osc, OBD_BRW_READ)) {
+ rc = osc_send_read_rpc(env, cli, osc, pol);
+ if (rc < 0)
+ CERROR("Read request failed with %d\n", rc);
+ }
+ osc_object_unlock(osc);
+
+ osc_list_maint(cli, osc);
+ lu_object_ref_del_at(&obj->co_lu, &link, "check",
+ current);
+ cl_object_put(env, obj);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ }
+}
+
+static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
+ struct osc_object *osc, pdl_policy_t pol, int async)
+{
+ int rc = 0;
+
+ if (osc != NULL && osc_list_maint(cli, osc) == 0)
+ return 0;
+
+ if (!async) {
+ /* disable osc_lru_shrink() temporarily to avoid
+ * potential stack overrun problem. LU-2859 */
+ atomic_inc(&cli->cl_lru_shrinkers);
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ osc_check_rpcs(env, cli, pol);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ atomic_dec(&cli->cl_lru_shrinkers);
+ } else {
+ CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
+ LASSERT(cli->cl_writeback_work != NULL);
+ rc = ptlrpcd_queue_work(cli->cl_writeback_work);
+ }
+ return rc;
+}
+
+static int osc_io_unplug_async(const struct lu_env *env,
+ struct client_obd *cli, struct osc_object *osc)
+{
+ /* XXX: policy is no use actually. */
+ return osc_io_unplug0(env, cli, osc, PDL_POLICY_ROUND, 1);
+}
+
+void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
+ struct osc_object *osc, pdl_policy_t pol)
+{
+ (void)osc_io_unplug0(env, cli, osc, pol, 0);
+}
+
+int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
+ struct page *page, loff_t offset)
+{
+ struct obd_export *exp = osc_export(osc);
+ struct osc_async_page *oap = &ops->ops_oap;
+
+ if (!page)
+ return cfs_size_round(sizeof(*oap));
+
+ oap->oap_magic = OAP_MAGIC;
+ oap->oap_cli = &exp->exp_obd->u.cli;
+ oap->oap_obj = osc;
+
+ oap->oap_page = page;
+ oap->oap_obj_off = offset;
+ LASSERT(!(offset & ~CFS_PAGE_MASK));
+
+ if (!client_is_remote(exp) && cfs_capable(CFS_CAP_SYS_RESOURCE))
+ oap->oap_brw_flags = OBD_BRW_NOQUOTA;
+
+ INIT_LIST_HEAD(&oap->oap_pending_item);
+ INIT_LIST_HEAD(&oap->oap_rpc_item);
+
+ spin_lock_init(&oap->oap_lock);
+ CDEBUG(D_INFO, "oap %p page %p obj off "LPU64"\n",
+ oap, page, oap->oap_obj_off);
+ return 0;
+}
+
+int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops)
+{
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_extent *ext = NULL;
+ struct osc_async_page *oap = &ops->ops_oap;
+ struct client_obd *cli = oap->oap_cli;
+ struct osc_object *osc = oap->oap_obj;
+ pgoff_t index;
+ int grants = 0;
+ int brw_flags = OBD_BRW_ASYNC;
+ int cmd = OBD_BRW_WRITE;
+ int need_release = 0;
+ int rc = 0;
+
+ if (oap->oap_magic != OAP_MAGIC)
+ return -EINVAL;
+
+ if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
+ return -EIO;
+
+ if (!list_empty(&oap->oap_pending_item) ||
+ !list_empty(&oap->oap_rpc_item))
+ return -EBUSY;
+
+ /* Set the OBD_BRW_SRVLOCK before the page is queued. */
+ brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
+ if (!client_is_remote(osc_export(osc)) &&
+ cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ brw_flags |= OBD_BRW_NOQUOTA;
+ cmd |= OBD_BRW_NOQUOTA;
+ }
+
+ /* check if the file's owner/group is over quota */
+ if (!(cmd & OBD_BRW_NOQUOTA)) {
+ struct cl_object *obj;
+ struct cl_attr *attr;
+ unsigned int qid[MAXQUOTAS];
+
+ obj = cl_object_top(&osc->oo_cl);
+ attr = &osc_env_info(env)->oti_attr;
+
+ cl_object_attr_lock(obj);
+ rc = cl_object_attr_get(env, obj, attr);
+ cl_object_attr_unlock(obj);
+
+ qid[USRQUOTA] = attr->cat_uid;
+ qid[GRPQUOTA] = attr->cat_gid;
+ if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA)
+ rc = -EDQUOT;
+ if (rc)
+ return rc;
+ }
+
+ oap->oap_cmd = cmd;
+ oap->oap_page_off = ops->ops_from;
+ oap->oap_count = ops->ops_to - ops->ops_from;
+ oap->oap_async_flags = 0;
+ oap->oap_brw_flags = brw_flags;
+
+ OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
+ oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
+
+ index = oap2cl_page(oap)->cp_index;
+
+ /* Add this page into extent by the following steps:
+ * 1. if there exists an active extent for this IO, mostly this page
+ * can be added to the active extent and sometimes we need to
+ * expand extent to accomodate this page;
+ * 2. otherwise, a new extent will be allocated. */
+
+ ext = oio->oi_active;
+ if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) {
+ /* one chunk plus extent overhead must be enough to write this
+ * page */
+ grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+ if (ext->oe_end >= index)
+ grants = 0;
+
+ /* it doesn't need any grant to dirty this page */
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ rc = osc_enter_cache_try(cli, oap, grants, 0);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ if (rc == 0) { /* try failed */
+ grants = 0;
+ need_release = 1;
+ } else if (ext->oe_end < index) {
+ int tmp = grants;
+ /* try to expand this extent */
+ rc = osc_extent_expand(ext, index, &tmp);
+ if (rc < 0) {
+ need_release = 1;
+ /* don't free reserved grant */
+ } else {
+ OSC_EXTENT_DUMP(D_CACHE, ext,
+ "expanded for %lu.\n", index);
+ osc_unreserve_grant(cli, grants, tmp);
+ grants = 0;
+ }
+ }
+ rc = 0;
+ } else if (ext != NULL) {
+ /* index is located outside of active extent */
+ need_release = 1;
+ }
+ if (need_release) {
+ osc_extent_release(env, ext);
+ oio->oi_active = NULL;
+ ext = NULL;
+ }
+
+ if (ext == NULL) {
+ int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+
+ /* try to find new extent to cover this page */
+ LASSERT(oio->oi_active == NULL);
+ /* we may have allocated grant for this page if we failed
+ * to expand the previous active extent. */
+ LASSERT(ergo(grants > 0, grants >= tmp));
+
+ rc = 0;
+ if (grants == 0) {
+ /* we haven't allocated grant for this page. */
+ rc = osc_enter_cache(env, cli, oap, tmp);
+ if (rc == 0)
+ grants = tmp;
+ }
+
+ tmp = grants;
+ if (rc == 0) {
+ ext = osc_extent_find(env, osc, index, &tmp);
+ if (IS_ERR(ext)) {
+ LASSERT(tmp == grants);
+ osc_exit_cache(cli, oap);
+ rc = PTR_ERR(ext);
+ ext = NULL;
+ } else {
+ oio->oi_active = ext;
+ }
+ }
+ if (grants > 0)
+ osc_unreserve_grant(cli, grants, tmp);
+ }
+
+ LASSERT(ergo(rc == 0, ext != NULL));
+ if (ext != NULL) {
+ EASSERTF(ext->oe_end >= index && ext->oe_start <= index,
+ ext, "index = %lu.\n", index);
+ LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0);
+
+ osc_object_lock(osc);
+ if (ext->oe_nr_pages == 0)
+ ext->oe_srvlock = ops->ops_srvlock;
+ else
+ LASSERT(ext->oe_srvlock == ops->ops_srvlock);
+ ++ext->oe_nr_pages;
+ list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
+ osc_object_unlock(osc);
+ }
+ return rc;
+}
+
+int osc_teardown_async_page(const struct lu_env *env,
+ struct osc_object *obj, struct osc_page *ops)
+{
+ struct osc_async_page *oap = &ops->ops_oap;
+ struct osc_extent *ext = NULL;
+ int rc = 0;
+
+ LASSERT(oap->oap_magic == OAP_MAGIC);
+
+ CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
+ oap, ops, oap2cl_page(oap)->cp_index);
+
+ osc_object_lock(obj);
+ if (!list_empty(&oap->oap_rpc_item)) {
+ CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
+ rc = -EBUSY;
+ } else if (!list_empty(&oap->oap_pending_item)) {
+ ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
+ /* only truncated pages are allowed to be taken out.
+ * See osc_extent_truncate() and osc_cache_truncate_start()
+ * for details. */
+ if (ext != NULL && ext->oe_state != OES_TRUNC) {
+ OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
+ oap2cl_page(oap)->cp_index);
+ rc = -EBUSY;
+ }
+ }
+ osc_object_unlock(obj);
+ if (ext != NULL)
+ osc_extent_put(env, ext);
+ return rc;
+}
+
+/**
+ * This is called when a page is picked up by kernel to write out.
+ *
+ * We should find out the corresponding extent and add the whole extent
+ * into urgent list. The extent may be being truncated or used, handle it
+ * carefully.
+ */
+int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops)
+{
+ struct osc_extent *ext = NULL;
+ struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
+ struct cl_page *cp = ops->ops_cl.cpl_page;
+ pgoff_t index = cp->cp_index;
+ struct osc_async_page *oap = &ops->ops_oap;
+ bool unplug = false;
+ int rc = 0;
+
+ osc_object_lock(obj);
+ ext = osc_extent_lookup(obj, index);
+ if (ext == NULL) {
+ osc_extent_tree_dump(D_ERROR, obj);
+ LASSERTF(0, "page index %lu is NOT covered.\n", index);
+ }
+
+ switch (ext->oe_state) {
+ case OES_RPC:
+ case OES_LOCK_DONE:
+ CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(cp),
+ "flush an in-rpc page?\n");
+ LASSERT(0);
+ break;
+ case OES_LOCKING:
+ /* If we know this extent is being written out, we should abort
+ * so that the writer can make this page ready. Otherwise, there
+ * exists a deadlock problem because other process can wait for
+ * page writeback bit holding page lock; and meanwhile in
+ * vvp_page_make_ready(), we need to grab page lock before
+ * really sending the RPC. */
+ case OES_TRUNC:
+ /* race with truncate, page will be redirtied */
+ GOTO(out, rc = -EAGAIN);
+ default:
+ break;
+ }
+
+ rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
+ if (rc)
+ GOTO(out, rc);
+
+ spin_lock(&oap->oap_lock);
+ oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
+ spin_unlock(&oap->oap_lock);
+
+ if (memory_pressure_get())
+ ext->oe_memalloc = 1;
+
+ ext->oe_urgent = 1;
+ if (ext->oe_state == OES_CACHE) {
+ OSC_EXTENT_DUMP(D_CACHE, ext,
+ "flush page %p make it urgent.\n", oap);
+ if (list_empty(&ext->oe_link))
+ list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
+ unplug = true;
+ }
+ rc = 0;
+
+out:
+ osc_object_unlock(obj);
+ osc_extent_put(env, ext);
+ if (unplug)
+ osc_io_unplug_async(env, osc_cli(obj), obj);
+ return rc;
+}
+
+/**
+ * this is called when a sync waiter receives an interruption. Its job is to
+ * get the caller woken as soon as possible. If its page hasn't been put in an
+ * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
+ * desiring interruption which will forcefully complete the rpc once the rpc
+ * has timed out.
+ */
+int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
+{
+ struct osc_async_page *oap = &ops->ops_oap;
+ struct osc_object *obj = oap->oap_obj;
+ struct client_obd *cli = osc_cli(obj);
+ struct osc_extent *ext;
+ struct osc_extent *found = NULL;
+ struct list_head *plist;
+ pgoff_t index = oap2cl_page(oap)->cp_index;
+ int rc = -EBUSY;
+ int cmd;
+
+ LASSERT(!oap->oap_interrupted);
+ oap->oap_interrupted = 1;
+
+ /* Find out the caching extent */
+ osc_object_lock(obj);
+ if (oap->oap_cmd & OBD_BRW_WRITE) {
+ plist = &obj->oo_urgent_exts;
+ cmd = OBD_BRW_WRITE;
+ } else {
+ plist = &obj->oo_reading_exts;
+ cmd = OBD_BRW_READ;
+ }
+ list_for_each_entry(ext, plist, oe_link) {
+ if (ext->oe_start <= index && ext->oe_end >= index) {
+ LASSERT(ext->oe_state == OES_LOCK_DONE);
+ /* For OES_LOCK_DONE state extent, it has already held
+ * a refcount for RPC. */
+ found = osc_extent_get(ext);
+ break;
+ }
+ }
+ if (found != NULL) {
+ list_del_init(&found->oe_link);
+ osc_update_pending(obj, cmd, -found->oe_nr_pages);
+ osc_object_unlock(obj);
+
+ osc_extent_finish(env, found, 0, -EINTR);
+ osc_extent_put(env, found);
+ rc = 0;
+ } else {
+ osc_object_unlock(obj);
+ /* ok, it's been put in an rpc. only one oap gets a request
+ * reference */
+ if (oap->oap_request != NULL) {
+ ptlrpc_mark_interrupted(oap->oap_request);
+ ptlrpcd_wake(oap->oap_request);
+ ptlrpc_req_finished(oap->oap_request);
+ oap->oap_request = NULL;
+ }
+ }
+
+ osc_list_maint(cli, obj);
+ return rc;
+}
+
+int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
+ struct list_head *list, int cmd, int brw_flags)
+{
+ struct client_obd *cli = osc_cli(obj);
+ struct osc_extent *ext;
+ struct osc_async_page *oap, *tmp;
+ int page_count = 0;
+ int mppr = cli->cl_max_pages_per_rpc;
+ pgoff_t start = CL_PAGE_EOF;
+ pgoff_t end = 0;
+
+ list_for_each_entry(oap, list, oap_pending_item) {
+ struct cl_page *cp = oap2cl_page(oap);
+ if (cp->cp_index > end)
+ end = cp->cp_index;
+ if (cp->cp_index < start)
+ start = cp->cp_index;
+ ++page_count;
+ mppr <<= (page_count > mppr);
+ }
+
+ ext = osc_extent_alloc(obj);
+ if (ext == NULL) {
+ list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
+ list_del_init(&oap->oap_pending_item);
+ osc_ap_completion(env, cli, oap, 0, -ENOMEM);
+ }
+ return -ENOMEM;
+ }
+
+ ext->oe_rw = !!(cmd & OBD_BRW_READ);
+ ext->oe_urgent = 1;
+ ext->oe_start = start;
+ ext->oe_end = ext->oe_max_end = end;
+ ext->oe_obj = obj;
+ ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
+ ext->oe_nr_pages = page_count;
+ ext->oe_mppr = mppr;
+ list_splice_init(list, &ext->oe_pages);
+
+ osc_object_lock(obj);
+ /* Reuse the initial refcount for RPC, don't drop it */
+ osc_extent_state_set(ext, OES_LOCK_DONE);
+ if (cmd & OBD_BRW_WRITE) {
+ list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
+ osc_update_pending(obj, OBD_BRW_WRITE, page_count);
+ } else {
+ list_add_tail(&ext->oe_link, &obj->oo_reading_exts);
+ osc_update_pending(obj, OBD_BRW_READ, page_count);
+ }
+ osc_object_unlock(obj);
+
+ osc_io_unplug(env, cli, obj, PDL_POLICY_ROUND);
+ return 0;
+}
+
+/**
+ * Called by osc_io_setattr_start() to freeze and destroy covering extents.
+ */
+int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
+ struct osc_object *obj, __u64 size)
+{
+ struct client_obd *cli = osc_cli(obj);
+ struct osc_extent *ext;
+ struct osc_extent *waiting = NULL;
+ pgoff_t index;
+ LIST_HEAD(list);
+ int result = 0;
+ bool partial;
+
+ /* pages with index greater or equal to index will be truncated. */
+ index = cl_index(osc2cl(obj), size);
+ partial = size > cl_offset(osc2cl(obj), index);
+
+again:
+ osc_object_lock(obj);
+ ext = osc_extent_search(obj, index);
+ if (ext == NULL)
+ ext = first_extent(obj);
+ else if (ext->oe_end < index)
+ ext = next_extent(ext);
+ while (ext != NULL) {
+ EASSERT(ext->oe_state != OES_TRUNC, ext);
+
+ if (ext->oe_state > OES_CACHE || ext->oe_urgent) {
+ /* if ext is in urgent state, it means there must exist
+ * a page already having been flushed by write_page().
+ * We have to wait for this extent because we can't
+ * truncate that page. */
+ LASSERT(!ext->oe_hp);
+ OSC_EXTENT_DUMP(D_CACHE, ext,
+ "waiting for busy extent\n");
+ waiting = osc_extent_get(ext);
+ break;
+ }
+
+ OSC_EXTENT_DUMP(D_CACHE, ext, "try to trunc:"LPU64".\n", size);
+
+ osc_extent_get(ext);
+ if (ext->oe_state == OES_ACTIVE) {
+ /* though we grab inode mutex for write path, but we
+ * release it before releasing extent(in osc_io_end()),
+ * so there is a race window that an extent is still
+ * in OES_ACTIVE when truncate starts. */
+ LASSERT(!ext->oe_trunc_pending);
+ ext->oe_trunc_pending = 1;
+ } else {
+ EASSERT(ext->oe_state == OES_CACHE, ext);
+ osc_extent_state_set(ext, OES_TRUNC);
+ osc_update_pending(obj, OBD_BRW_WRITE,
+ -ext->oe_nr_pages);
+ }
+ EASSERT(list_empty(&ext->oe_link), ext);
+ list_add_tail(&ext->oe_link, &list);
+
+ ext = next_extent(ext);
+ }
+ osc_object_unlock(obj);
+
+ osc_list_maint(cli, obj);
+
+ while (!list_empty(&list)) {
+ int rc;
+
+ ext = list_entry(list.next, struct osc_extent, oe_link);
+ list_del_init(&ext->oe_link);
+
+ /* extent may be in OES_ACTIVE state because inode mutex
+ * is released before osc_io_end() in file write case */
+ if (ext->oe_state != OES_TRUNC)
+ osc_extent_wait(env, ext, OES_TRUNC);
+
+ rc = osc_extent_truncate(ext, index, partial);
+ if (rc < 0) {
+ if (result == 0)
+ result = rc;
+
+ OSC_EXTENT_DUMP(D_ERROR, ext,
+ "truncate error %d\n", rc);
+ } else if (ext->oe_nr_pages == 0) {
+ osc_extent_remove(ext);
+ } else {
+ /* this must be an overlapped extent which means only
+ * part of pages in this extent have been truncated.
+ */
+ EASSERTF(ext->oe_start <= index, ext,
+ "trunc index = %lu/%d.\n", index, partial);
+ /* fix index to skip this partially truncated extent */
+ index = ext->oe_end + 1;
+ partial = false;
+
+ /* we need to hold this extent in OES_TRUNC state so
+ * that no writeback will happen. This is to avoid
+ * BUG 17397. */
+ LASSERT(oio->oi_trunc == NULL);
+ oio->oi_trunc = osc_extent_get(ext);
+ OSC_EXTENT_DUMP(D_CACHE, ext,
+ "trunc at "LPU64"\n", size);
+ }
+ osc_extent_put(env, ext);
+ }
+ if (waiting != NULL) {
+ int rc;
+
+ /* ignore the result of osc_extent_wait the write initiator
+ * should take care of it. */
+ rc = osc_extent_wait(env, waiting, OES_INV);
+ if (rc < 0)
+ OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc);
+
+ osc_extent_put(env, waiting);
+ waiting = NULL;
+ goto again;
+ }
+ return result;
+}
+
+/**
+ * Called after osc_io_setattr_end to add oio->oi_trunc back to cache.
+ */
+void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio,
+ struct osc_object *obj)
+{
+ struct osc_extent *ext = oio->oi_trunc;
+
+ oio->oi_trunc = NULL;
+ if (ext != NULL) {
+ bool unplug = false;
+
+ EASSERT(ext->oe_nr_pages > 0, ext);
+ EASSERT(ext->oe_state == OES_TRUNC, ext);
+ EASSERT(!ext->oe_urgent, ext);
+
+ OSC_EXTENT_DUMP(D_CACHE, ext, "trunc -> cache.\n");
+ osc_object_lock(obj);
+ osc_extent_state_set(ext, OES_CACHE);
+ if (ext->oe_fsync_wait && !ext->oe_urgent) {
+ ext->oe_urgent = 1;
+ list_move_tail(&ext->oe_link, &obj->oo_urgent_exts);
+ unplug = true;
+ }
+ osc_update_pending(obj, OBD_BRW_WRITE, ext->oe_nr_pages);
+ osc_object_unlock(obj);
+ osc_extent_put(env, ext);
+
+ if (unplug)
+ osc_io_unplug_async(env, osc_cli(obj), obj);
+ }
+}
+
+/**
+ * Wait for extents in a specific range to be written out.
+ * The caller must have called osc_cache_writeback_range() to issue IO
+ * otherwise it will take a long time for this function to finish.
+ *
+ * Caller must hold inode_mutex , or cancel exclusive dlm lock so that
+ * nobody else can dirty this range of file while we're waiting for
+ * extents to be written.
+ */
+int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
+ pgoff_t start, pgoff_t end)
+{
+ struct osc_extent *ext;
+ pgoff_t index = start;
+ int result = 0;
+
+again:
+ osc_object_lock(obj);
+ ext = osc_extent_search(obj, index);
+ if (ext == NULL)
+ ext = first_extent(obj);
+ else if (ext->oe_end < index)
+ ext = next_extent(ext);
+ while (ext != NULL) {
+ int rc;
+
+ if (ext->oe_start > end)
+ break;
+
+ if (!ext->oe_fsync_wait) {
+ ext = next_extent(ext);
+ continue;
+ }
+
+ EASSERT(ergo(ext->oe_state == OES_CACHE,
+ ext->oe_hp || ext->oe_urgent), ext);
+ EASSERT(ergo(ext->oe_state == OES_ACTIVE,
+ !ext->oe_hp && ext->oe_urgent), ext);
+
+ index = ext->oe_end + 1;
+ osc_extent_get(ext);
+ osc_object_unlock(obj);
+
+ rc = osc_extent_wait(env, ext, OES_INV);
+ if (result == 0)
+ result = rc;
+ osc_extent_put(env, ext);
+ goto again;
+ }
+ osc_object_unlock(obj);
+
+ OSC_IO_DEBUG(obj, "sync file range.\n");
+ return result;
+}
+
+/**
+ * Called to write out a range of osc object.
+ *
+ * @hp : should be set this is caused by lock cancel;
+ * @discard: is set if dirty pages should be dropped - file will be deleted or
+ * truncated, this implies there is no partially discarding extents.
+ *
+ * Return how many pages will be issued, or error code if error occurred.
+ */
+int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
+ pgoff_t start, pgoff_t end, int hp, int discard)
+{
+ struct osc_extent *ext;
+ LIST_HEAD(discard_list);
+ bool unplug = false;
+ int result = 0;
+
+ osc_object_lock(obj);
+ ext = osc_extent_search(obj, start);
+ if (ext == NULL)
+ ext = first_extent(obj);
+ else if (ext->oe_end < start)
+ ext = next_extent(ext);
+ while (ext != NULL) {
+ if (ext->oe_start > end)
+ break;
+
+ ext->oe_fsync_wait = 1;
+ switch (ext->oe_state) {
+ case OES_CACHE:
+ result += ext->oe_nr_pages;
+ if (!discard) {
+ struct list_head *list = NULL;
+ if (hp) {
+ EASSERT(!ext->oe_hp, ext);
+ ext->oe_hp = 1;
+ list = &obj->oo_hp_exts;
+ } else if (!ext->oe_urgent) {
+ ext->oe_urgent = 1;
+ list = &obj->oo_urgent_exts;
+ }
+ if (list != NULL)
+ list_move_tail(&ext->oe_link, list);
+ unplug = true;
+ } else {
+ /* the only discarder is lock cancelling, so
+ * [start, end] must contain this extent */
+ EASSERT(ext->oe_start >= start &&
+ ext->oe_max_end <= end, ext);
+ osc_extent_state_set(ext, OES_LOCKING);
+ ext->oe_owner = current;
+ list_move_tail(&ext->oe_link,
+ &discard_list);
+ osc_update_pending(obj, OBD_BRW_WRITE,
+ -ext->oe_nr_pages);
+ }
+ break;
+ case OES_ACTIVE:
+ /* It's pretty bad to wait for ACTIVE extents, because
+ * we don't know how long we will wait for it to be
+ * flushed since it may be blocked at awaiting more
+ * grants. We do this for the correctness of fsync. */
+ LASSERT(hp == 0 && discard == 0);
+ ext->oe_urgent = 1;
+ break;
+ case OES_TRUNC:
+ /* this extent is being truncated, can't do anything
+ * for it now. it will be set to urgent after truncate
+ * is finished in osc_cache_truncate_end(). */
+ default:
+ break;
+ }
+ ext = next_extent(ext);
+ }
+ osc_object_unlock(obj);
+
+ LASSERT(ergo(!discard, list_empty(&discard_list)));
+ if (!list_empty(&discard_list)) {
+ struct osc_extent *tmp;
+ int rc;
+
+ osc_list_maint(osc_cli(obj), obj);
+ list_for_each_entry_safe(ext, tmp, &discard_list, oe_link) {
+ list_del_init(&ext->oe_link);
+ EASSERT(ext->oe_state == OES_LOCKING, ext);
+
+ /* Discard caching pages. We don't actually write this
+ * extent out but we complete it as if we did. */
+ rc = osc_extent_make_ready(env, ext);
+ if (unlikely(rc < 0)) {
+ OSC_EXTENT_DUMP(D_ERROR, ext,
+ "make_ready returned %d\n", rc);
+ if (result >= 0)
+ result = rc;
+ }
+
+ /* finish the extent as if the pages were sent */
+ osc_extent_finish(env, ext, 0, 0);
+ }
+ }
+
+ if (unplug)
+ osc_io_unplug(env, osc_cli(obj), obj, PDL_POLICY_ROUND);
+
+ if (hp || discard) {
+ int rc;
+ rc = osc_cache_wait_range(env, obj, start, end);
+ if (result >= 0 && rc < 0)
+ result = rc;
+ }
+
+ OSC_IO_DEBUG(obj, "cache page out.\n");
+ return result;
+}
+
+/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
new file mode 100644
index 0000000..a3aa9b6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -0,0 +1,677 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Internal interfaces of OSC layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
+ */
+
+#ifndef OSC_CL_INTERNAL_H
+#define OSC_CL_INTERNAL_H
+
+# include <linux/libcfs/libcfs.h>
+
+#include <obd.h>
+/* osc_build_res_name() */
+#include <obd_ost.h>
+#include <cl_object.h>
+#include <lclient.h>
+#include "osc_internal.h"
+
+/** \defgroup osc osc
+ * @{
+ */
+
+struct osc_extent;
+
+/**
+ * State maintained by osc layer for each IO context.
+ */
+struct osc_io {
+ /** super class */
+ struct cl_io_slice oi_cl;
+ /** true if this io is lockless. */
+ int oi_lockless;
+ /** active extents, we know how many bytes is going to be written,
+ * so having an active extent will prevent it from being fragmented */
+ struct osc_extent *oi_active;
+ /** partially truncated extent, we need to hold this extent to prevent
+ * page writeback from happening. */
+ struct osc_extent *oi_trunc;
+
+ struct obd_info oi_info;
+ struct obdo oi_oa;
+ struct osc_async_cbargs {
+ bool opc_rpc_sent;
+ int opc_rc;
+ struct completion opc_sync;
+ } oi_cbarg;
+};
+
+/**
+ * State of transfer for osc.
+ */
+struct osc_req {
+ struct cl_req_slice or_cl;
+};
+
+/**
+ * State maintained by osc layer for the duration of a system call.
+ */
+struct osc_session {
+ struct osc_io os_io;
+};
+
+#define OTI_PVEC_SIZE 64
+struct osc_thread_info {
+ struct ldlm_res_id oti_resname;
+ ldlm_policy_data_t oti_policy;
+ struct cl_lock_descr oti_descr;
+ struct cl_attr oti_attr;
+ struct lustre_handle oti_handle;
+ struct cl_page_list oti_plist;
+ struct cl_io oti_io;
+ struct cl_page *oti_pvec[OTI_PVEC_SIZE];
+};
+
+struct osc_object {
+ struct cl_object oo_cl;
+ struct lov_oinfo *oo_oinfo;
+ /**
+ * True if locking against this stripe got -EUSERS.
+ */
+ int oo_contended;
+ cfs_time_t oo_contention_time;
+ /**
+ * List of pages in transfer.
+ */
+ struct list_head oo_inflight[CRT_NR];
+ /**
+ * Lock, protecting ccc_object::cob_inflight, because a seat-belt is
+ * locked during take-off and landing.
+ */
+ spinlock_t oo_seatbelt;
+
+ /**
+ * used by the osc to keep track of what objects to build into rpcs.
+ * Protected by client_obd->cli_loi_list_lock.
+ */
+ struct list_head oo_ready_item;
+ struct list_head oo_hp_ready_item;
+ struct list_head oo_write_item;
+ struct list_head oo_read_item;
+
+ /**
+ * extent is a red black tree to manage (async) dirty pages.
+ */
+ struct rb_root oo_root;
+ /**
+ * Manage write(dirty) extents.
+ */
+ struct list_head oo_hp_exts; /* list of hp extents */
+ struct list_head oo_urgent_exts; /* list of writeback extents */
+ struct list_head oo_rpc_exts;
+
+ struct list_head oo_reading_exts;
+
+ atomic_t oo_nr_reads;
+ atomic_t oo_nr_writes;
+
+ /** Protect extent tree. Will be used to protect
+ * oo_{read|write}_pages soon. */
+ spinlock_t oo_lock;
+};
+
+static inline void osc_object_lock(struct osc_object *obj)
+{
+ spin_lock(&obj->oo_lock);
+}
+
+static inline int osc_object_trylock(struct osc_object *obj)
+{
+ return spin_trylock(&obj->oo_lock);
+}
+
+static inline void osc_object_unlock(struct osc_object *obj)
+{
+ spin_unlock(&obj->oo_lock);
+}
+
+static inline int osc_object_is_locked(struct osc_object *obj)
+{
+ return spin_is_locked(&obj->oo_lock);
+}
+
+/*
+ * Lock "micro-states" for osc layer.
+ */
+enum osc_lock_state {
+ OLS_NEW,
+ OLS_ENQUEUED,
+ OLS_UPCALL_RECEIVED,
+ OLS_GRANTED,
+ OLS_RELEASED,
+ OLS_BLOCKED,
+ OLS_CANCELLED
+};
+
+/**
+ * osc-private state of cl_lock.
+ *
+ * Interaction with DLM.
+ *
+ * CLIO enqueues all DLM locks through ptlrpcd (that is, in "async" mode).
+ *
+ * Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
+ * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_lock.
+ *
+ * This pointer is protected through a reference, acquired by
+ * osc_lock_upcall0(). Also, an additional reference is acquired by
+ * ldlm_lock_addref() call protecting the lock from cancellation, until
+ * osc_lock_unuse() releases it.
+ *
+ * Below is a description of how lock references are acquired and released
+ * inside of DLM.
+ *
+ * - When new lock is created and enqueued to the server (ldlm_cli_enqueue())
+ * - ldlm_lock_create()
+ * - ldlm_lock_new(): initializes a lock with 2 references. One for
+ * the caller (released when reply from the server is received, or on
+ * error), and another for the hash table.
+ * - ldlm_lock_addref_internal(): protects the lock from cancellation.
+ *
+ * - When reply is received from the server (osc_enqueue_interpret())
+ * - ldlm_cli_enqueue_fini()
+ * - LDLM_LOCK_PUT(): releases caller reference acquired by
+ * ldlm_lock_new().
+ * - if (rc != 0)
+ * ldlm_lock_decref(): error case: matches ldlm_cli_enqueue().
+ * - ldlm_lock_decref(): for async locks, matches ldlm_cli_enqueue().
+ *
+ * - When lock is being cancelled (ldlm_lock_cancel())
+ * - ldlm_lock_destroy()
+ * - LDLM_LOCK_PUT(): releases hash-table reference acquired by
+ * ldlm_lock_new().
+ *
+ * osc_lock is detached from ldlm_lock by osc_lock_detach() that is called
+ * either when lock is cancelled (osc_lock_blocking()), or when locks is
+ * deleted without cancellation (e.g., from cl_locks_prune()). In the latter
+ * case ldlm lock remains in memory, and can be re-attached to osc_lock in the
+ * future.
+ */
+struct osc_lock {
+ struct cl_lock_slice ols_cl;
+ /** underlying DLM lock */
+ struct ldlm_lock *ols_lock;
+ /** lock value block */
+ struct ost_lvb ols_lvb;
+ /** DLM flags with which osc_lock::ols_lock was enqueued */
+ __u64 ols_flags;
+ /** osc_lock::ols_lock handle */
+ struct lustre_handle ols_handle;
+ struct ldlm_enqueue_info ols_einfo;
+ enum osc_lock_state ols_state;
+
+ /**
+ * How many pages are using this lock for io, currently only used by
+ * read-ahead. If non-zero, the underlying dlm lock won't be cancelled
+ * during recovery to avoid deadlock. see bz16774.
+ *
+ * \see osc_page::ops_lock
+ * \see osc_page_addref_lock(), osc_page_putref_lock()
+ */
+ atomic_t ols_pageref;
+
+ /**
+ * true, if ldlm_lock_addref() was called against
+ * osc_lock::ols_lock. This is used for sanity checking.
+ *
+ * \see osc_lock::ols_has_ref
+ */
+ unsigned ols_hold :1,
+ /**
+ * this is much like osc_lock::ols_hold, except that this bit is
+ * cleared _after_ reference in released in osc_lock_unuse(). This
+ * fine distinction is needed because:
+ *
+ * - if ldlm lock still has a reference, osc_ast_data_get() needs
+ * to return associated cl_lock (so that a flag is needed that is
+ * cleared after ldlm_lock_decref() returned), and
+ *
+ * - ldlm_lock_decref() can invoke blocking ast (for a
+ * LDLM_FL_CBPENDING lock), and osc_lock functions like
+ * osc_lock_cancel() called from there need to know whether to
+ * release lock reference (so that a flag is needed that is
+ * cleared before ldlm_lock_decref() is called).
+ */
+ ols_has_ref:1,
+ /**
+ * inherit the lockless attribute from top level cl_io.
+ * If true, osc_lock_enqueue is able to tolerate the -EUSERS error.
+ */
+ ols_locklessable:1,
+ /**
+ * set by osc_lock_use() to wait until blocking AST enters into
+ * osc_ldlm_blocking_ast0(), so that cl_lock mutex can be used for
+ * further synchronization.
+ */
+ ols_ast_wait:1,
+ /**
+ * If the data of this lock has been flushed to server side.
+ */
+ ols_flush:1,
+ /**
+ * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
+ * the EVAVAIL error as torerable, this will make upper logic happy
+ * to wait all glimpse locks to each OSTs to be completed.
+ * Glimpse lock converts to normal lock if the server lock is
+ * granted.
+ * Glimpse lock should be destroyed immediately after use.
+ */
+ ols_glimpse:1,
+ /**
+ * For async glimpse lock.
+ */
+ ols_agl:1;
+ /**
+ * IO that owns this lock. This field is used for a dead-lock
+ * avoidance by osc_lock_enqueue_wait().
+ *
+ * XXX: unfortunately, the owner of a osc_lock is not unique,
+ * the lock may have multiple users, if the lock is granted and
+ * then matched.
+ */
+ struct osc_io *ols_owner;
+};
+
+
+/**
+ * Page state private for osc layer.
+ */
+struct osc_page {
+ struct cl_page_slice ops_cl;
+ /**
+ * Page queues used by osc to detect when RPC can be formed.
+ */
+ struct osc_async_page ops_oap;
+ /**
+ * An offset within page from which next transfer starts. This is used
+ * by cl_page_clip() to submit partial page transfers.
+ */
+ int ops_from;
+ /**
+ * An offset within page at which next transfer ends.
+ *
+ * \see osc_page::ops_from.
+ */
+ int ops_to;
+ /**
+ * Boolean, true iff page is under transfer. Used for sanity checking.
+ */
+ unsigned ops_transfer_pinned:1,
+ /**
+ * True for a `temporary page' created by read-ahead code, probably
+ * outside of any DLM lock.
+ */
+ ops_temp:1,
+ /**
+ * in LRU?
+ */
+ ops_in_lru:1,
+ /**
+ * Set if the page must be transferred with OBD_BRW_SRVLOCK.
+ */
+ ops_srvlock:1;
+ union {
+ /**
+ * lru page list. ops_inflight and ops_lru are exclusive so
+ * that they can share the same data.
+ */
+ struct list_head ops_lru;
+ /**
+ * Linkage into a per-osc_object list of pages in flight. For
+ * debugging.
+ */
+ struct list_head ops_inflight;
+ };
+ /**
+ * Thread that submitted this page for transfer. For debugging.
+ */
+ struct task_struct *ops_submitter;
+ /**
+ * Submit time - the time when the page is starting RPC. For debugging.
+ */
+ cfs_time_t ops_submit_time;
+
+ /**
+ * A lock of which we hold a reference covers this page. Only used by
+ * read-ahead: for a readahead page, we hold it's covering lock to
+ * prevent it from being canceled during recovery.
+ *
+ * \see osc_lock::ols_pageref
+ * \see osc_page_addref_lock(), osc_page_putref_lock().
+ */
+ struct cl_lock *ops_lock;
+};
+
+extern struct kmem_cache *osc_lock_kmem;
+extern struct kmem_cache *osc_object_kmem;
+extern struct kmem_cache *osc_thread_kmem;
+extern struct kmem_cache *osc_session_kmem;
+extern struct kmem_cache *osc_req_kmem;
+extern struct kmem_cache *osc_extent_kmem;
+
+extern struct lu_device_type osc_device_type;
+extern struct lu_context_key osc_key;
+extern struct lu_context_key osc_session_key;
+
+#define OSC_FLAGS (ASYNC_URGENT|ASYNC_READY)
+
+int osc_lock_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *io);
+int osc_io_init (const struct lu_env *env,
+ struct cl_object *obj, struct cl_io *io);
+int osc_req_init (const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
+struct lu_object *osc_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *hdr,
+ struct lu_device *dev);
+int osc_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
+
+void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj,
+ pgoff_t start, pgoff_t end);
+int osc_lvb_print (const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct ost_lvb *lvb);
+
+void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
+ enum cl_req_type crt, int brw_flags);
+int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
+int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg,
+ obd_flag async_flags);
+int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
+ struct page *page, loff_t offset);
+int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops);
+int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
+ struct osc_page *ops);
+int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops);
+int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
+ struct list_head *list, int cmd, int brw_flags);
+int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
+ struct osc_object *obj, __u64 size);
+void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio,
+ struct osc_object *obj);
+int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
+ pgoff_t start, pgoff_t end, int hp, int discard);
+int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
+ pgoff_t start, pgoff_t end);
+void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
+ struct osc_object *osc, pdl_policy_t pol);
+
+void osc_object_set_contended (struct osc_object *obj);
+void osc_object_clear_contended(struct osc_object *obj);
+int osc_object_is_contended (struct osc_object *obj);
+
+int osc_lock_is_lockless (const struct osc_lock *olck);
+
+/*****************************************************************************
+ *
+ * Accessors.
+ *
+ */
+
+static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
+{
+ struct osc_thread_info *info;
+
+ info = lu_context_key_get(&env->le_ctx, &osc_key);
+ LASSERT(info != NULL);
+ return info;
+}
+
+static inline struct osc_session *osc_env_session(const struct lu_env *env)
+{
+ struct osc_session *ses;
+
+ ses = lu_context_key_get(env->le_ses, &osc_session_key);
+ LASSERT(ses != NULL);
+ return ses;
+}
+
+static inline struct osc_io *osc_env_io(const struct lu_env *env)
+{
+ return &osc_env_session(env)->os_io;
+}
+
+static inline int osc_is_object(const struct lu_object *obj)
+{
+ return obj->lo_dev->ld_type == &osc_device_type;
+}
+
+static inline struct osc_device *lu2osc_dev(const struct lu_device *d)
+{
+ LINVRNT(d->ld_type == &osc_device_type);
+ return container_of0(d, struct osc_device, od_cl.cd_lu_dev);
+}
+
+static inline struct obd_export *osc_export(const struct osc_object *obj)
+{
+ return lu2osc_dev(obj->oo_cl.co_lu.lo_dev)->od_exp;
+}
+
+static inline struct client_obd *osc_cli(const struct osc_object *obj)
+{
+ return &osc_export(obj)->exp_obd->u.cli;
+}
+
+static inline struct osc_object *cl2osc(const struct cl_object *obj)
+{
+ LINVRNT(osc_is_object(&obj->co_lu));
+ return container_of0(obj, struct osc_object, oo_cl);
+}
+
+static inline struct cl_object *osc2cl(const struct osc_object *obj)
+{
+ return (struct cl_object *)&obj->oo_cl;
+}
+
+static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode)
+{
+ LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
+ if (mode == CLM_READ)
+ return LCK_PR;
+ else if (mode == CLM_WRITE)
+ return LCK_PW;
+ else
+ return LCK_GROUP;
+}
+
+static inline enum cl_lock_mode osc_ldlm2cl_lock(ldlm_mode_t mode)
+{
+ LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
+ if (mode == LCK_PR)
+ return CLM_READ;
+ else if (mode == LCK_PW)
+ return CLM_WRITE;
+ else
+ return CLM_GROUP;
+}
+
+static inline struct osc_page *cl2osc_page(const struct cl_page_slice *slice)
+{
+ LINVRNT(osc_is_object(&slice->cpl_obj->co_lu));
+ return container_of0(slice, struct osc_page, ops_cl);
+}
+
+static inline struct osc_page *oap2osc(struct osc_async_page *oap)
+{
+ return container_of0(oap, struct osc_page, ops_oap);
+}
+
+static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
+{
+ return oap2osc(oap)->ops_cl.cpl_page;
+}
+
+static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
+{
+ return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
+}
+
+static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
+{
+ LINVRNT(osc_is_object(&slice->cls_obj->co_lu));
+ return container_of0(slice, struct osc_lock, ols_cl);
+}
+
+static inline struct osc_lock *osc_lock_at(const struct cl_lock *lock)
+{
+ return cl2osc_lock(cl_lock_at(lock, &osc_device_type));
+}
+
+static inline int osc_io_srvlock(struct osc_io *oio)
+{
+ return (oio->oi_lockless && !oio->oi_cl.cis_io->ci_no_srvlock);
+}
+
+enum osc_extent_state {
+ OES_INV = 0, /** extent is just initialized or destroyed */
+ OES_ACTIVE = 1, /** process is using this extent */
+ OES_CACHE = 2, /** extent is ready for IO */
+ OES_LOCKING = 3, /** locking page to prepare IO */
+ OES_LOCK_DONE = 4, /** locking finished, ready to send */
+ OES_RPC = 5, /** in RPC */
+ OES_TRUNC = 6, /** being truncated */
+ OES_STATE_MAX
+};
+
+/**
+ * osc_extent data to manage dirty pages.
+ * osc_extent has the following attributes:
+ * 1. all pages in the same must be in one RPC in write back;
+ * 2. # of pages must be less than max_pages_per_rpc - implied by 1;
+ * 3. must be covered by only 1 osc_lock;
+ * 4. exclusive. It's impossible to have overlapped osc_extent.
+ *
+ * The lifetime of an extent is from when the 1st page is dirtied to when
+ * all pages inside it are written out.
+ *
+ * LOCKING ORDER
+ * =============
+ * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
+ */
+struct osc_extent {
+ /** red-black tree node */
+ struct rb_node oe_node;
+ /** osc_object of this extent */
+ struct osc_object *oe_obj;
+ /** refcount, removed from red-black tree if reaches zero. */
+ atomic_t oe_refc;
+ /** busy if non-zero */
+ atomic_t oe_users;
+ /** link list of osc_object's oo_{hp|urgent|locking}_exts. */
+ struct list_head oe_link;
+ /** state of this extent */
+ unsigned int oe_state;
+ /** flags for this extent. */
+ unsigned int oe_intree:1,
+ /** 0 is write, 1 is read */
+ oe_rw:1,
+ oe_srvlock:1,
+ oe_memalloc:1,
+ /** an ACTIVE extent is going to be truncated, so when this extent
+ * is released, it will turn into TRUNC state instead of CACHE. */
+ oe_trunc_pending:1,
+ /** this extent should be written asap and someone may wait for the
+ * write to finish. This bit is usually set along with urgent if
+ * the extent was CACHE state.
+ * fsync_wait extent can't be merged because new extent region may
+ * exceed fsync range. */
+ oe_fsync_wait:1,
+ /** covering lock is being canceled */
+ oe_hp:1,
+ /** this extent should be written back asap. set if one of pages is
+ * called by page WB daemon, or sync write or reading requests. */
+ oe_urgent:1;
+ /** how many grants allocated for this extent.
+ * Grant allocated for this extent. There is no grant allocated
+ * for reading extents and sync write extents. */
+ unsigned int oe_grants;
+ /** # of dirty pages in this extent */
+ unsigned int oe_nr_pages;
+ /** list of pending oap pages. Pages in this list are NOT sorted. */
+ struct list_head oe_pages;
+ /** Since an extent has to be written out in atomic, this is used to
+ * remember the next page need to be locked to write this extent out.
+ * Not used right now.
+ */
+ struct osc_page *oe_next_page;
+ /** start and end index of this extent, include start and end
+ * themselves. Page offset here is the page index of osc_pages.
+ * oe_start is used as keyword for red-black tree. */
+ pgoff_t oe_start;
+ pgoff_t oe_end;
+ /** maximum ending index of this extent, this is limited by
+ * max_pages_per_rpc, lock extent and chunk size. */
+ pgoff_t oe_max_end;
+ /** waitqueue - for those who want to be notified if this extent's
+ * state has changed. */
+ wait_queue_head_t oe_waitq;
+ /** lock covering this extent */
+ struct cl_lock *oe_osclock;
+ /** terminator of this extent. Must be true if this extent is in IO. */
+ struct task_struct *oe_owner;
+ /** return value of writeback. If somebody is waiting for this extent,
+ * this value can be known by outside world. */
+ int oe_rc;
+ /** max pages per rpc when this extent was created */
+ unsigned int oe_mppr;
+};
+
+int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
+ int sent, int rc);
+int osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
+
+/** @} osc */
+
+#endif /* OSC_CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
new file mode 100644
index 0000000..35f2578
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -0,0 +1,260 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_device, cl_req for OSC layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_OSC
+
+/* class_name2obd() */
+#include <obd_class.h>
+
+#include "osc_cl_internal.h"
+
+/** \addtogroup osc
+ * @{
+ */
+
+struct kmem_cache *osc_lock_kmem;
+struct kmem_cache *osc_object_kmem;
+struct kmem_cache *osc_thread_kmem;
+struct kmem_cache *osc_session_kmem;
+struct kmem_cache *osc_req_kmem;
+struct kmem_cache *osc_extent_kmem;
+struct kmem_cache *osc_quota_kmem;
+
+struct lu_kmem_descr osc_caches[] = {
+ {
+ .ckd_cache = &osc_lock_kmem,
+ .ckd_name = "osc_lock_kmem",
+ .ckd_size = sizeof (struct osc_lock)
+ },
+ {
+ .ckd_cache = &osc_object_kmem,
+ .ckd_name = "osc_object_kmem",
+ .ckd_size = sizeof (struct osc_object)
+ },
+ {
+ .ckd_cache = &osc_thread_kmem,
+ .ckd_name = "osc_thread_kmem",
+ .ckd_size = sizeof (struct osc_thread_info)
+ },
+ {
+ .ckd_cache = &osc_session_kmem,
+ .ckd_name = "osc_session_kmem",
+ .ckd_size = sizeof (struct osc_session)
+ },
+ {
+ .ckd_cache = &osc_req_kmem,
+ .ckd_name = "osc_req_kmem",
+ .ckd_size = sizeof (struct osc_req)
+ },
+ {
+ .ckd_cache = &osc_extent_kmem,
+ .ckd_name = "osc_extent_kmem",
+ .ckd_size = sizeof (struct osc_extent)
+ },
+ {
+ .ckd_cache = &osc_quota_kmem,
+ .ckd_name = "osc_quota_kmem",
+ .ckd_size = sizeof(struct osc_quota_info)
+ },
+ {
+ .ckd_cache = NULL
+ }
+};
+
+struct lock_class_key osc_ast_guard_class;
+
+/*****************************************************************************
+ *
+ * Type conversions.
+ *
+ */
+
+static struct lu_device *osc2lu_dev(struct osc_device *osc)
+{
+ return &osc->od_cl.cd_lu_dev;
+}
+
+/*****************************************************************************
+ *
+ * Osc device and device type functions.
+ *
+ */
+
+static void *osc_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct osc_thread_info *info;
+
+ OBD_SLAB_ALLOC_PTR_GFP(info, osc_thread_kmem, __GFP_IO);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
+}
+
+static void osc_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct osc_thread_info *info = data;
+ OBD_SLAB_FREE_PTR(info, osc_thread_kmem);
+}
+
+struct lu_context_key osc_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = osc_key_init,
+ .lct_fini = osc_key_fini
+};
+
+static void *osc_session_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct osc_session *info;
+
+ OBD_SLAB_ALLOC_PTR_GFP(info, osc_session_kmem, __GFP_IO);
+ if (info == NULL)
+ info = ERR_PTR(-ENOMEM);
+ return info;
+}
+
+static void osc_session_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct osc_session *info = data;
+ OBD_SLAB_FREE_PTR(info, osc_session_kmem);
+}
+
+struct lu_context_key osc_session_key = {
+ .lct_tags = LCT_SESSION,
+ .lct_init = osc_session_init,
+ .lct_fini = osc_session_fini
+};
+
+/* type constructor/destructor: osc_type_{init,fini,start,stop}(). */
+LU_TYPE_INIT_FINI(osc, &osc_key, &osc_session_key);
+
+static int osc_cl_process_config(const struct lu_env *env,
+ struct lu_device *d, struct lustre_cfg *cfg)
+{
+ return osc_process_config_base(d->ld_obd, cfg);
+}
+
+static const struct lu_device_operations osc_lu_ops = {
+ .ldo_object_alloc = osc_object_alloc,
+ .ldo_process_config = osc_cl_process_config,
+ .ldo_recovery_complete = NULL
+};
+
+static const struct cl_device_operations osc_cl_ops = {
+ .cdo_req_init = osc_req_init
+};
+
+static int osc_device_init(const struct lu_env *env, struct lu_device *d,
+ const char *name, struct lu_device *next)
+{
+ return 0;
+}
+
+static struct lu_device *osc_device_fini(const struct lu_env *env,
+ struct lu_device *d)
+{
+ return 0;
+}
+
+static struct lu_device *osc_device_free(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct osc_device *od = lu2osc_dev(d);
+
+ cl_device_fini(lu2cl_dev(d));
+ OBD_FREE_PTR(od);
+ return NULL;
+}
+
+static struct lu_device *osc_device_alloc(const struct lu_env *env,
+ struct lu_device_type *t,
+ struct lustre_cfg *cfg)
+{
+ struct lu_device *d;
+ struct osc_device *od;
+ struct obd_device *obd;
+ int rc;
+
+ OBD_ALLOC_PTR(od);
+ if (od == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ cl_device_init(&od->od_cl, t);
+ d = osc2lu_dev(od);
+ d->ld_ops = &osc_lu_ops;
+ od->od_cl.cd_ops = &osc_cl_ops;
+
+ /* Setup OSC OBD */
+ obd = class_name2obd(lustre_cfg_string(cfg, 0));
+ LASSERT(obd != NULL);
+ rc = osc_setup(obd, cfg);
+ if (rc) {
+ osc_device_free(env, d);
+ return ERR_PTR(rc);
+ }
+ od->od_exp = obd->obd_self_export;
+ return d;
+}
+
+static const struct lu_device_type_operations osc_device_type_ops = {
+ .ldto_init = osc_type_init,
+ .ldto_fini = osc_type_fini,
+
+ .ldto_start = osc_type_start,
+ .ldto_stop = osc_type_stop,
+
+ .ldto_device_alloc = osc_device_alloc,
+ .ldto_device_free = osc_device_free,
+
+ .ldto_device_init = osc_device_init,
+ .ldto_device_fini = osc_device_fini
+};
+
+struct lu_device_type osc_device_type = {
+ .ldt_tags = LU_DEVICE_CL,
+ .ldt_name = LUSTRE_OSC_NAME,
+ .ldt_ops = &osc_device_type_ops,
+ .ldt_ctx_tags = LCT_CL_THREAD
+};
+
+/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
new file mode 100644
index 0000000..efc5db4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -0,0 +1,208 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#ifndef OSC_INTERNAL_H
+#define OSC_INTERNAL_H
+
+#define OAP_MAGIC 8675309
+
+struct lu_env;
+
+enum async_flags {
+ ASYNC_READY = 0x1, /* ap_make_ready will not be called before this
+ page is added to an rpc */
+ ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */
+ ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
+ to give the caller a chance to update
+ or cancel the size of the io */
+ ASYNC_HP = 0x10,
+};
+
+struct osc_async_page {
+ int oap_magic;
+ unsigned short oap_cmd;
+ unsigned short oap_interrupted:1;
+
+ struct list_head oap_pending_item;
+ struct list_head oap_rpc_item;
+
+ obd_off oap_obj_off;
+ unsigned oap_page_off;
+ enum async_flags oap_async_flags;
+
+ struct brw_page oap_brw_page;
+
+ struct ptlrpc_request *oap_request;
+ struct client_obd *oap_cli;
+ struct osc_object *oap_obj;
+
+ struct ldlm_lock *oap_ldlm_lock;
+ spinlock_t oap_lock;
+};
+
+#define oap_page oap_brw_page.pg
+#define oap_count oap_brw_page.count
+#define oap_brw_flags oap_brw_page.flag
+
+struct osc_cache_waiter {
+ struct list_head ocw_entry;
+ wait_queue_head_t ocw_waitq;
+ struct osc_async_page *ocw_oap;
+ int ocw_grant;
+ int ocw_rc;
+};
+
+int osc_create(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md **ea,
+ struct obd_trans_info *oti);
+int osc_real_create(struct obd_export *exp, struct obdo *oa,
+ struct lov_stripe_md **ea, struct obd_trans_info *oti);
+void osc_wake_cache_waiters(struct client_obd *cli);
+int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes);
+void osc_update_next_shrink(struct client_obd *cli);
+
+/*
+ * cl integration.
+ */
+#include <cl_object.h>
+
+extern struct ptlrpc_request_set *PTLRPCD_SET;
+
+int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
+ __u64 *flags, ldlm_policy_data_t *policy,
+ struct ost_lvb *lvb, int kms_valid,
+ obd_enqueue_update_f upcall,
+ void *cookie, struct ldlm_enqueue_info *einfo,
+ struct lustre_handle *lockh,
+ struct ptlrpc_request_set *rqset, int async, int agl);
+int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
+
+int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
+ __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ int *flags, void *data, struct lustre_handle *lockh,
+ int unref);
+
+int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset);
+int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset);
+int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset);
+
+int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
+int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
+ struct list_head *ext_list, int cmd, pdl_policy_t p);
+int osc_lru_shrink(struct client_obd *cli, int target);
+
+extern spinlock_t osc_ast_guard;
+
+int osc_cleanup(struct obd_device *obd);
+int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
+
+#ifdef LPROCFS
+int lproc_osc_attach_seqstat(struct obd_device *dev);
+void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars);
+#else
+static inline int lproc_osc_attach_seqstat(struct obd_device *dev) {return 0;}
+static inline void lprocfs_osc_init_vars(struct lprocfs_static_vars *lvars)
+{
+ memset(lvars, 0, sizeof(*lvars));
+}
+#endif
+
+extern struct lu_device_type osc_device_type;
+
+static inline int osc_recoverable_error(int rc)
+{
+ return (rc == -EIO || rc == -EROFS || rc == -ENOMEM ||
+ rc == -EAGAIN || rc == -EINPROGRESS);
+}
+
+static inline unsigned long rpcs_in_flight(struct client_obd *cli)
+{
+ return cli->cl_r_in_flight + cli->cl_w_in_flight;
+}
+
+#ifndef min_t
+#define min_t(type,x,y) \
+ ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
+#endif
+
+struct osc_device {
+ struct cl_device od_cl;
+ struct obd_export *od_exp;
+
+ /* Write stats is actually protected by client_obd's lock. */
+ struct osc_stats {
+ uint64_t os_lockless_writes; /* by bytes */
+ uint64_t os_lockless_reads; /* by bytes */
+ uint64_t os_lockless_truncates; /* by times */
+ } od_stats;
+
+ /* configuration item(s) */
+ int od_contention_time;
+ int od_lockless_truncate;
+};
+
+static inline struct osc_device *obd2osc_dev(const struct obd_device *d)
+{
+ return container_of0(d->obd_lu_dev, struct osc_device, od_cl.cd_lu_dev);
+}
+
+int osc_dlm_lock_pageref(struct ldlm_lock *dlm);
+
+extern struct kmem_cache *osc_quota_kmem;
+struct osc_quota_info {
+ /** linkage for quota hash table */
+ struct hlist_node oqi_hash;
+ obd_uid oqi_id;
+};
+int osc_quota_setup(struct obd_device *obd);
+int osc_quota_cleanup(struct obd_device *obd);
+int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
+ obd_flag valid, obd_flag flags);
+int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]);
+int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl);
+int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl);
+int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
+
+#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
new file mode 100644
index 0000000..3aeaf84
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -0,0 +1,828 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_io for OSC layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_OSC
+
+#include "osc_cl_internal.h"
+
+/** \addtogroup osc
+ * @{
+ */
+
+/*****************************************************************************
+ *
+ * Type conversions.
+ *
+ */
+
+static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
+{
+ LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
+ return container_of0(slice, struct osc_req, or_cl);
+}
+
+static struct osc_io *cl2osc_io(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct osc_io *oio = container_of0(slice, struct osc_io, oi_cl);
+ LINVRNT(oio == osc_env_io(env));
+ return oio;
+}
+
+static struct osc_page *osc_cl_page_osc(struct cl_page *page)
+{
+ const struct cl_page_slice *slice;
+
+ slice = cl_page_at(page, &osc_device_type);
+ LASSERT(slice != NULL);
+
+ return cl2osc_page(slice);
+}
+
+
+/*****************************************************************************
+ *
+ * io operations.
+ *
+ */
+
+static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
+{
+}
+
+/**
+ * An implementation of cl_io_operations::cio_io_submit() method for osc
+ * layer. Iterates over pages in the in-queue, prepares each for io by calling
+ * cl_page_prep() and then either submits them through osc_io_submit_page()
+ * or, if page is already submitted, changes osc flags through
+ * osc_set_async_flags().
+ */
+static int osc_io_submit(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ enum cl_req_type crt, struct cl_2queue *queue)
+{
+ struct cl_page *page;
+ struct cl_page *tmp;
+ struct client_obd *cli = NULL;
+ struct osc_object *osc = NULL; /* to keep gcc happy */
+ struct osc_page *opg;
+ struct cl_io *io;
+ LIST_HEAD (list);
+
+ struct cl_page_list *qin = &queue->c2_qin;
+ struct cl_page_list *qout = &queue->c2_qout;
+ int queued = 0;
+ int result = 0;
+ int cmd;
+ int brw_flags;
+ int max_pages;
+
+ LASSERT(qin->pl_nr > 0);
+
+ CDEBUG(D_CACHE, "%d %d\n", qin->pl_nr, crt);
+
+ osc = cl2osc(ios->cis_obj);
+ cli = osc_cli(osc);
+ max_pages = cli->cl_max_pages_per_rpc;
+
+ cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
+ brw_flags = osc_io_srvlock(cl2osc_io(env, ios)) ? OBD_BRW_SRVLOCK : 0;
+
+ /*
+ * NOTE: here @page is a top-level page. This is done to avoid
+ * creation of sub-page-list.
+ */
+ cl_page_list_for_each_safe(page, tmp, qin) {
+ struct osc_async_page *oap;
+
+ /* Top level IO. */
+ io = page->cp_owner;
+ LASSERT(io != NULL);
+
+ opg = osc_cl_page_osc(page);
+ oap = &opg->ops_oap;
+ LASSERT(osc == oap->oap_obj);
+
+ if (!list_empty(&oap->oap_pending_item) ||
+ !list_empty(&oap->oap_rpc_item)) {
+ CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
+ oap, opg);
+ result = -EBUSY;
+ break;
+ }
+
+ result = cl_page_prep(env, io, page, crt);
+ if (result != 0) {
+ LASSERT(result < 0);
+ if (result != -EALREADY)
+ break;
+ /*
+ * Handle -EALREADY error: for read case, the page is
+ * already in UPTODATE state; for write, the page
+ * is not dirty.
+ */
+ result = 0;
+ continue;
+ }
+
+ cl_page_list_move(qout, qin, page);
+ oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
+ oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+
+ osc_page_submit(env, opg, crt, brw_flags);
+ list_add_tail(&oap->oap_pending_item, &list);
+ if (++queued == max_pages) {
+ queued = 0;
+ result = osc_queue_sync_pages(env, osc, &list, cmd,
+ brw_flags);
+ if (result < 0)
+ break;
+ }
+ }
+
+ if (queued > 0)
+ result = osc_queue_sync_pages(env, osc, &list, cmd, brw_flags);
+
+ CDEBUG(D_INFO, "%d/%d %d\n", qin->pl_nr, qout->pl_nr, result);
+ return qout->pl_nr > 0 ? 0 : result;
+}
+
+static void osc_page_touch_at(const struct lu_env *env,
+ struct cl_object *obj, pgoff_t idx, unsigned to)
+{
+ struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ int valid;
+ __u64 kms;
+
+ /* offset within stripe */
+ kms = cl_offset(obj, idx) + to;
+
+ cl_object_attr_lock(obj);
+ /*
+ * XXX old code used
+ *
+ * ll_inode_size_lock(inode, 0); lov_stripe_lock(lsm);
+ *
+ * here
+ */
+ CDEBUG(D_INODE, "stripe KMS %sincreasing "LPU64"->"LPU64" "LPU64"\n",
+ kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
+ loi->loi_lvb.lvb_size);
+
+ valid = 0;
+ if (kms > loi->loi_kms) {
+ attr->cat_kms = kms;
+ valid |= CAT_KMS;
+ }
+ if (kms > loi->loi_lvb.lvb_size) {
+ attr->cat_size = kms;
+ valid |= CAT_SIZE;
+ }
+ cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
+}
+
+/**
+ * This is called when a page is accessed within file in a way that creates
+ * new page, if one were missing (i.e., if there were a hole at that place in
+ * the file, or accessed page is beyond the current file size). Examples:
+ * ->commit_write() and ->nopage() methods.
+ *
+ * Expand stripe KMS if necessary.
+ */
+static void osc_page_touch(const struct lu_env *env,
+ struct osc_page *opage, unsigned to)
+{
+ struct cl_page *page = opage->ops_cl.cpl_page;
+ struct cl_object *obj = opage->ops_cl.cpl_obj;
+
+ osc_page_touch_at(env, obj, page->cp_index, to);
+}
+
+/**
+ * Implements cl_io_operations::cio_prepare_write() method for osc layer.
+ *
+ * \retval -EIO transfer initiated against this osc will most likely fail
+ * \retval 0 transfer initiated against this osc will most likely succeed.
+ *
+ * The reason for this check is to immediately return an error to the caller
+ * in the case of a deactivated import. Note, that import can be deactivated
+ * later, while pages, dirtied by this IO, are still in the cache, but this is
+ * irrelevant, because that would still return an error to the application (if
+ * it does fsync), but many applications don't do fsync because of performance
+ * issues, and we wanted to return an -EIO at write time to notify the
+ * application.
+ */
+static int osc_io_prepare_write(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ const struct cl_page_slice *slice,
+ unsigned from, unsigned to)
+{
+ struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
+ struct obd_import *imp = class_exp2cliimp(dev->od_exp);
+ struct osc_io *oio = cl2osc_io(env, ios);
+ int result = 0;
+
+ /*
+ * This implements OBD_BRW_CHECK logic from old client.
+ */
+
+ if (imp == NULL || imp->imp_invalid)
+ result = -EIO;
+ if (result == 0 && oio->oi_lockless)
+ /* this page contains `invalid' data, but who cares?
+ * nobody can access the invalid data.
+ * in osc_io_commit_write(), we're going to write exact
+ * [from, to) bytes of this page to OST. -jay */
+ cl_page_export(env, slice->cpl_page, 1);
+
+ return result;
+}
+
+static int osc_io_commit_write(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ const struct cl_page_slice *slice,
+ unsigned from, unsigned to)
+{
+ struct osc_io *oio = cl2osc_io(env, ios);
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
+ struct osc_async_page *oap = &opg->ops_oap;
+
+ LASSERT(to > 0);
+ /*
+ * XXX instead of calling osc_page_touch() here and in
+ * osc_io_fault_start() it might be more logical to introduce
+ * cl_page_touch() method, that generic cl_io_commit_write() and page
+ * fault code calls.
+ */
+ osc_page_touch(env, cl2osc_page(slice), to);
+ if (!client_is_remote(osc_export(obj)) &&
+ cfs_capable(CFS_CAP_SYS_RESOURCE))
+ oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
+
+ if (oio->oi_lockless)
+ /* see osc_io_prepare_write() for lockless io handling. */
+ cl_page_clip(env, slice->cpl_page, from, to);
+
+ return 0;
+}
+
+static int osc_io_fault_start(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct cl_io *io;
+ struct cl_fault_io *fio;
+
+ io = ios->cis_io;
+ fio = &io->u.ci_fault;
+ CDEBUG(D_INFO, "%lu %d %d\n",
+ fio->ft_index, fio->ft_writable, fio->ft_nob);
+ /*
+ * If mapping is writeable, adjust kms to cover this page,
+ * but do not extend kms beyond actual file size.
+ * See bug 10919.
+ */
+ if (fio->ft_writable)
+ osc_page_touch_at(env, ios->cis_obj,
+ fio->ft_index, fio->ft_nob);
+ return 0;
+}
+
+static int osc_async_upcall(void *a, int rc)
+{
+ struct osc_async_cbargs *args = a;
+
+ args->opc_rc = rc;
+ complete(&args->opc_sync);
+ return 0;
+}
+
+/**
+ * Checks that there are no pages being written in the extent being truncated.
+ */
+static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, void *cbdata)
+{
+ const struct cl_page_slice *slice;
+ struct osc_page *ops;
+ struct osc_async_page *oap;
+ __u64 start = *(__u64 *)cbdata;
+
+ slice = cl_page_at(page, &osc_device_type);
+ LASSERT(slice != NULL);
+ ops = cl2osc_page(slice);
+ oap = &ops->ops_oap;
+
+ if (oap->oap_cmd & OBD_BRW_WRITE &&
+ !list_empty(&oap->oap_pending_item))
+ CL_PAGE_DEBUG(D_ERROR, env, page, "exists " LPU64 "/%s.\n",
+ start, current->comm);
+
+ {
+ struct page *vmpage = cl_page_vmpage(env, page);
+ if (PageLocked(vmpage))
+ CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
+ ops, page->cp_index,
+ (oap->oap_cmd & OBD_BRW_RWMASK));
+ }
+
+ return CLP_GANG_OKAY;
+}
+
+static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
+ struct osc_io *oio, __u64 size)
+{
+ struct cl_object *clob;
+ int partial;
+ pgoff_t start;
+
+ clob = oio->oi_cl.cis_obj;
+ start = cl_index(clob, size);
+ partial = cl_offset(clob, start) < size;
+
+ /*
+ * Complain if there are pages in the truncated region.
+ */
+ cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF,
+ trunc_check_cb, (void *)&size);
+}
+
+static int osc_io_setattr_start(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct cl_io *io = slice->cis_io;
+ struct osc_io *oio = cl2osc_io(env, slice);
+ struct cl_object *obj = slice->cis_obj;
+ struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ struct obdo *oa = &oio->oi_oa;
+ struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+ __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
+ unsigned int ia_valid = io->u.ci_setattr.sa_valid;
+ int result = 0;
+ struct obd_info oinfo = { { { 0 } } };
+
+ /* truncate cache dirty pages first */
+ if (cl_io_is_trunc(io))
+ result = osc_cache_truncate_start(env, oio, cl2osc(obj), size);
+
+ if (result == 0 && oio->oi_lockless == 0) {
+ cl_object_attr_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ if (result == 0) {
+ struct ost_lvb *lvb = &io->u.ci_setattr.sa_attr;
+ unsigned int cl_valid = 0;
+
+ if (ia_valid & ATTR_SIZE) {
+ attr->cat_size = attr->cat_kms = size;
+ cl_valid = (CAT_SIZE | CAT_KMS);
+ }
+ if (ia_valid & ATTR_MTIME_SET) {
+ attr->cat_mtime = lvb->lvb_mtime;
+ cl_valid |= CAT_MTIME;
+ }
+ if (ia_valid & ATTR_ATIME_SET) {
+ attr->cat_atime = lvb->lvb_atime;
+ cl_valid |= CAT_ATIME;
+ }
+ if (ia_valid & ATTR_CTIME_SET) {
+ attr->cat_ctime = lvb->lvb_ctime;
+ cl_valid |= CAT_CTIME;
+ }
+ result = cl_object_attr_set(env, obj, attr, cl_valid);
+ }
+ cl_object_attr_unlock(obj);
+ }
+ memset(oa, 0, sizeof(*oa));
+ if (result == 0) {
+ oa->o_oi = loi->loi_oi;
+ oa->o_mtime = attr->cat_mtime;
+ oa->o_atime = attr->cat_atime;
+ oa->o_ctime = attr->cat_ctime;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
+ OBD_MD_FLCTIME | OBD_MD_FLMTIME;
+ if (ia_valid & ATTR_SIZE) {
+ oa->o_size = size;
+ oa->o_blocks = OBD_OBJECT_EOF;
+ oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+
+ if (oio->oi_lockless) {
+ oa->o_flags = OBD_FL_SRVLOCK;
+ oa->o_valid |= OBD_MD_FLFLAGS;
+ }
+ } else {
+ LASSERT(oio->oi_lockless == 0);
+ }
+
+ oinfo.oi_oa = oa;
+ oinfo.oi_capa = io->u.ci_setattr.sa_capa;
+ init_completion(&cbargs->opc_sync);
+
+ if (ia_valid & ATTR_SIZE)
+ result = osc_punch_base(osc_export(cl2osc(obj)),
+ &oinfo, osc_async_upcall,
+ cbargs, PTLRPCD_SET);
+ else
+ result = osc_setattr_async_base(osc_export(cl2osc(obj)),
+ &oinfo, NULL,
+ osc_async_upcall,
+ cbargs, PTLRPCD_SET);
+ cbargs->opc_rpc_sent = result == 0;
+ }
+ return result;
+}
+
+static void osc_io_setattr_end(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct cl_io *io = slice->cis_io;
+ struct osc_io *oio = cl2osc_io(env, slice);
+ struct cl_object *obj = slice->cis_obj;
+ struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+ int result = 0;
+
+ if (cbargs->opc_rpc_sent) {
+ wait_for_completion(&cbargs->opc_sync);
+ result = io->ci_result = cbargs->opc_rc;
+ }
+ if (result == 0) {
+ if (oio->oi_lockless) {
+ /* lockless truncate */
+ struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
+
+ LASSERT(cl_io_is_trunc(io));
+ /* XXX: Need a lock. */
+ osd->od_stats.os_lockless_truncates++;
+ }
+ }
+
+ if (cl_io_is_trunc(io)) {
+ __u64 size = io->u.ci_setattr.sa_attr.lvb_size;
+ osc_trunc_check(env, io, oio, size);
+ if (oio->oi_trunc != NULL) {
+ osc_cache_truncate_end(env, oio, cl2osc(obj));
+ oio->oi_trunc = NULL;
+ }
+ }
+}
+
+static int osc_io_read_start(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct osc_io *oio = cl2osc_io(env, slice);
+ struct cl_object *obj = slice->cis_obj;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ int result = 0;
+
+ if (oio->oi_lockless == 0) {
+ cl_object_attr_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ if (result == 0) {
+ attr->cat_atime = LTIME_S(CURRENT_TIME);
+ result = cl_object_attr_set(env, obj, attr,
+ CAT_ATIME);
+ }
+ cl_object_attr_unlock(obj);
+ }
+ return result;
+}
+
+static int osc_io_write_start(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct osc_io *oio = cl2osc_io(env, slice);
+ struct cl_object *obj = slice->cis_obj;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ int result = 0;
+
+ if (oio->oi_lockless == 0) {
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
+ cl_object_attr_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ if (result == 0) {
+ attr->cat_mtime = attr->cat_ctime =
+ LTIME_S(CURRENT_TIME);
+ result = cl_object_attr_set(env, obj, attr,
+ CAT_MTIME | CAT_CTIME);
+ }
+ cl_object_attr_unlock(obj);
+ }
+ return result;
+}
+
+static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
+ struct cl_fsync_io *fio)
+{
+ struct osc_io *oio = osc_env_io(env);
+ struct obdo *oa = &oio->oi_oa;
+ struct obd_info *oinfo = &oio->oi_info;
+ struct lov_oinfo *loi = obj->oo_oinfo;
+ struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+ int rc = 0;
+
+ memset(oa, 0, sizeof(*oa));
+ oa->o_oi = loi->loi_oi;
+ oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+
+ /* reload size abd blocks for start and end of sync range */
+ oa->o_size = fio->fi_start;
+ oa->o_blocks = fio->fi_end;
+ oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+
+ obdo_set_parent_fid(oa, fio->fi_fid);
+
+ memset(oinfo, 0, sizeof(*oinfo));
+ oinfo->oi_oa = oa;
+ oinfo->oi_capa = fio->fi_capa;
+ init_completion(&cbargs->opc_sync);
+
+ rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
+ PTLRPCD_SET);
+ return rc;
+}
+
+static int osc_io_fsync_start(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct cl_io *io = slice->cis_io;
+ struct cl_fsync_io *fio = &io->u.ci_fsync;
+ struct cl_object *obj = slice->cis_obj;
+ struct osc_object *osc = cl2osc(obj);
+ pgoff_t start = cl_index(obj, fio->fi_start);
+ pgoff_t end = cl_index(obj, fio->fi_end);
+ int result = 0;
+
+ if (fio->fi_end == OBD_OBJECT_EOF)
+ end = CL_PAGE_EOF;
+
+ result = osc_cache_writeback_range(env, osc, start, end, 0,
+ fio->fi_mode == CL_FSYNC_DISCARD);
+ if (result > 0) {
+ fio->fi_nr_written += result;
+ result = 0;
+ }
+ if (fio->fi_mode == CL_FSYNC_ALL) {
+ int rc;
+
+ /* we have to wait for writeback to finish before we can
+ * send OST_SYNC RPC. This is bad because it causes extents
+ * to be written osc by osc. However, we usually start
+ * writeback before CL_FSYNC_ALL so this won't have any real
+ * problem. */
+ rc = osc_cache_wait_range(env, osc, start, end);
+ if (result == 0)
+ result = rc;
+ rc = osc_fsync_ost(env, osc, fio);
+ if (result == 0)
+ result = rc;
+ }
+
+ return result;
+}
+
+static void osc_io_fsync_end(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct cl_fsync_io *fio = &slice->cis_io->u.ci_fsync;
+ struct cl_object *obj = slice->cis_obj;
+ pgoff_t start = cl_index(obj, fio->fi_start);
+ pgoff_t end = cl_index(obj, fio->fi_end);
+ int result = 0;
+
+ if (fio->fi_mode == CL_FSYNC_LOCAL) {
+ result = osc_cache_wait_range(env, cl2osc(obj), start, end);
+ } else if (fio->fi_mode == CL_FSYNC_ALL) {
+ struct osc_io *oio = cl2osc_io(env, slice);
+ struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+
+ wait_for_completion(&cbargs->opc_sync);
+ if (result == 0)
+ result = cbargs->opc_rc;
+ }
+ slice->cis_io->ci_result = result;
+}
+
+static void osc_io_end(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct osc_io *oio = cl2osc_io(env, slice);
+
+ if (oio->oi_active) {
+ osc_extent_release(env, oio->oi_active);
+ oio->oi_active = NULL;
+ }
+}
+
+static const struct cl_io_operations osc_io_ops = {
+ .op = {
+ [CIT_READ] = {
+ .cio_start = osc_io_read_start,
+ .cio_fini = osc_io_fini
+ },
+ [CIT_WRITE] = {
+ .cio_start = osc_io_write_start,
+ .cio_end = osc_io_end,
+ .cio_fini = osc_io_fini
+ },
+ [CIT_SETATTR] = {
+ .cio_start = osc_io_setattr_start,
+ .cio_end = osc_io_setattr_end
+ },
+ [CIT_FAULT] = {
+ .cio_start = osc_io_fault_start,
+ .cio_end = osc_io_end,
+ .cio_fini = osc_io_fini
+ },
+ [CIT_FSYNC] = {
+ .cio_start = osc_io_fsync_start,
+ .cio_end = osc_io_fsync_end,
+ .cio_fini = osc_io_fini
+ },
+ [CIT_MISC] = {
+ .cio_fini = osc_io_fini
+ }
+ },
+ .req_op = {
+ [CRT_READ] = {
+ .cio_submit = osc_io_submit
+ },
+ [CRT_WRITE] = {
+ .cio_submit = osc_io_submit
+ }
+ },
+ .cio_prepare_write = osc_io_prepare_write,
+ .cio_commit_write = osc_io_commit_write
+};
+
+/*****************************************************************************
+ *
+ * Transfer operations.
+ *
+ */
+
+static int osc_req_prep(const struct lu_env *env,
+ const struct cl_req_slice *slice)
+{
+ return 0;
+}
+
+static void osc_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret)
+{
+ struct osc_req *or;
+
+ or = cl2osc_req(slice);
+ OBD_SLAB_FREE_PTR(or, osc_req_kmem);
+}
+
+/**
+ * Implementation of struct cl_req_operations::cro_attr_set() for osc
+ * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
+ * fields.
+ */
+static void osc_req_attr_set(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, obd_valid flags)
+{
+ struct lov_oinfo *oinfo;
+ struct cl_req *clerq;
+ struct cl_page *apage; /* _some_ page in @clerq */
+ struct cl_lock *lock; /* _some_ lock protecting @apage */
+ struct osc_lock *olck;
+ struct osc_page *opg;
+ struct obdo *oa;
+ struct ost_lvb *lvb;
+
+ oinfo = cl2osc(obj)->oo_oinfo;
+ lvb = &oinfo->loi_lvb;
+ oa = attr->cra_oa;
+
+ if ((flags & OBD_MD_FLMTIME) != 0) {
+ oa->o_mtime = lvb->lvb_mtime;
+ oa->o_valid |= OBD_MD_FLMTIME;
+ }
+ if ((flags & OBD_MD_FLATIME) != 0) {
+ oa->o_atime = lvb->lvb_atime;
+ oa->o_valid |= OBD_MD_FLATIME;
+ }
+ if ((flags & OBD_MD_FLCTIME) != 0) {
+ oa->o_ctime = lvb->lvb_ctime;
+ oa->o_valid |= OBD_MD_FLCTIME;
+ }
+ if (flags & OBD_MD_FLGROUP) {
+ ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
+ oa->o_valid |= OBD_MD_FLGROUP;
+ }
+ if (flags & OBD_MD_FLID) {
+ ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
+ oa->o_valid |= OBD_MD_FLID;
+ }
+ if (flags & OBD_MD_FLHANDLE) {
+ clerq = slice->crs_req;
+ LASSERT(!list_empty(&clerq->crq_pages));
+ apage = container_of(clerq->crq_pages.next,
+ struct cl_page, cp_flight);
+ opg = osc_cl_page_osc(apage);
+ apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
+ lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
+ if (lock == NULL) {
+ struct cl_object_header *head;
+ struct cl_lock *scan;
+
+ head = cl_object_header(apage->cp_obj);
+ list_for_each_entry(scan, &head->coh_locks,
+ cll_linkage)
+ CL_LOCK_DEBUG(D_ERROR, env, scan,
+ "no cover page!\n");
+ CL_PAGE_DEBUG(D_ERROR, env, apage,
+ "dump uncover page!\n");
+ dump_stack();
+ LBUG();
+ }
+
+ olck = osc_lock_at(lock);
+ LASSERT(olck != NULL);
+ LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL));
+ /* check for lockless io. */
+ if (olck->ols_lock != NULL) {
+ oa->o_handle = olck->ols_lock->l_remote_handle;
+ oa->o_valid |= OBD_MD_FLHANDLE;
+ }
+ cl_lock_put(env, lock);
+ }
+}
+
+static const struct cl_req_operations osc_req_ops = {
+ .cro_prep = osc_req_prep,
+ .cro_attr_set = osc_req_attr_set,
+ .cro_completion = osc_req_completion
+};
+
+
+int osc_io_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_io *io)
+{
+ struct osc_io *oio = osc_env_io(env);
+
+ CL_IO_SLICE_CLEAN(oio, oi_cl);
+ cl_io_slice_add(io, &oio->oi_cl, obj, &osc_io_ops);
+ return 0;
+}
+
+int osc_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req)
+{
+ struct osc_req *or;
+ int result;
+
+ OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, __GFP_IO);
+ if (or != NULL) {
+ cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
new file mode 100644
index 0000000..5d7bdbf
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -0,0 +1,1615 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_lock for OSC layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_OSC
+
+# include <linux/libcfs/libcfs.h>
+/* fid_build_reg_res_name() */
+#include <lustre_fid.h>
+
+#include "osc_cl_internal.h"
+
+/** \addtogroup osc
+ * @{
+ */
+
+#define _PAGEREF_MAGIC (-10000000)
+
+/*****************************************************************************
+ *
+ * Type conversions.
+ *
+ */
+
+static const struct cl_lock_operations osc_lock_ops;
+static const struct cl_lock_operations osc_lock_lockless_ops;
+static void osc_lock_to_lockless(const struct lu_env *env,
+ struct osc_lock *ols, int force);
+static int osc_lock_has_pages(struct osc_lock *olck);
+
+int osc_lock_is_lockless(const struct osc_lock *olck)
+{
+ return (olck->ols_cl.cls_ops == &osc_lock_lockless_ops);
+}
+
+/**
+ * Returns a weak pointer to the ldlm lock identified by a handle. Returned
+ * pointer cannot be dereferenced, as lock is not protected from concurrent
+ * reclaim. This function is a helper for osc_lock_invariant().
+ */
+static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
+{
+ struct ldlm_lock *lock;
+
+ lock = ldlm_handle2lock(handle);
+ if (lock != NULL)
+ LDLM_LOCK_PUT(lock);
+ return lock;
+}
+
+/**
+ * Invariant that has to be true all of the time.
+ */
+static int osc_lock_invariant(struct osc_lock *ols)
+{
+ struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
+ struct ldlm_lock *olock = ols->ols_lock;
+ int handle_used = lustre_handle_is_used(&ols->ols_handle);
+
+ if (ergo(osc_lock_is_lockless(ols),
+ ols->ols_locklessable && ols->ols_lock == NULL))
+ return 1;
+
+ /*
+ * If all the following "ergo"s are true, return 1, otherwise 0
+ */
+ if (! ergo(olock != NULL, handle_used))
+ return 0;
+
+ if (! ergo(olock != NULL,
+ olock->l_handle.h_cookie == ols->ols_handle.cookie))
+ return 0;
+
+ if (! ergo(handle_used,
+ ergo(lock != NULL && olock != NULL, lock == olock) &&
+ ergo(lock == NULL, olock == NULL)))
+ return 0;
+ /*
+ * Check that ->ols_handle and ->ols_lock are consistent, but
+ * take into account that they are set at the different time.
+ */
+ if (! ergo(ols->ols_state == OLS_CANCELLED,
+ olock == NULL && !handle_used))
+ return 0;
+ /*
+ * DLM lock is destroyed only after we have seen cancellation
+ * ast.
+ */
+ if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
+ ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+ return 0;
+
+ if (! ergo(ols->ols_state == OLS_GRANTED,
+ olock != NULL &&
+ olock->l_req_mode == olock->l_granted_mode &&
+ ols->ols_hold))
+ return 0;
+ return 1;
+}
+
+/*****************************************************************************
+ *
+ * Lock operations.
+ *
+ */
+
+/**
+ * Breaks a link between osc_lock and dlm_lock.
+ */
+static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
+{
+ struct ldlm_lock *dlmlock;
+
+ spin_lock(&osc_ast_guard);
+ dlmlock = olck->ols_lock;
+ if (dlmlock == NULL) {
+ spin_unlock(&osc_ast_guard);
+ return;
+ }
+
+ olck->ols_lock = NULL;
+ /* wb(); --- for all who checks (ols->ols_lock != NULL) before
+ * call to osc_lock_detach() */
+ dlmlock->l_ast_data = NULL;
+ olck->ols_handle.cookie = 0ULL;
+ spin_unlock(&osc_ast_guard);
+
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
+ struct cl_object *obj = olck->ols_cl.cls_obj;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ __u64 old_kms;
+
+ cl_object_attr_lock(obj);
+ /* Must get the value under the lock to avoid possible races. */
+ old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
+ /* Update the kms. Need to loop all granted locks.
+ * Not a problem for the client */
+ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
+
+ cl_object_attr_set(env, obj, attr, CAT_KMS);
+ cl_object_attr_unlock(obj);
+ }
+ unlock_res_and_lock(dlmlock);
+
+ /* release a reference taken in osc_lock_upcall0(). */
+ LASSERT(olck->ols_has_ref);
+ lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
+ LDLM_LOCK_RELEASE(dlmlock);
+ olck->ols_has_ref = 0;
+}
+
+static int osc_lock_unhold(struct osc_lock *ols)
+{
+ int result = 0;
+
+ if (ols->ols_hold) {
+ ols->ols_hold = 0;
+ result = osc_cancel_base(&ols->ols_handle,
+ ols->ols_einfo.ei_mode);
+ }
+ return result;
+}
+
+static int osc_lock_unuse(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct osc_lock *ols = cl2osc_lock(slice);
+
+ LINVRNT(osc_lock_invariant(ols));
+
+ switch (ols->ols_state) {
+ case OLS_NEW:
+ LASSERT(!ols->ols_hold);
+ LASSERT(ols->ols_agl);
+ return 0;
+ case OLS_UPCALL_RECEIVED:
+ osc_lock_unhold(ols);
+ case OLS_ENQUEUED:
+ LASSERT(!ols->ols_hold);
+ osc_lock_detach(env, ols);
+ ols->ols_state = OLS_NEW;
+ return 0;
+ case OLS_GRANTED:
+ LASSERT(!ols->ols_glimpse);
+ LASSERT(ols->ols_hold);
+ /*
+ * Move lock into OLS_RELEASED state before calling
+ * osc_cancel_base() so that possible synchronous cancellation
+ * (that always happens e.g., for liblustre) sees that lock is
+ * released.
+ */
+ ols->ols_state = OLS_RELEASED;
+ return osc_lock_unhold(ols);
+ default:
+ CERROR("Impossible state: %d\n", ols->ols_state);
+ LBUG();
+ }
+}
+
+static void osc_lock_fini(const struct lu_env *env,
+ struct cl_lock_slice *slice)
+{
+ struct osc_lock *ols = cl2osc_lock(slice);
+
+ LINVRNT(osc_lock_invariant(ols));
+ /*
+ * ->ols_hold can still be true at this point if, for example, a
+ * thread that requested a lock was killed (and released a reference
+ * to the lock), before reply from a server was received. In this case
+ * lock is destroyed immediately after upcall.
+ */
+ osc_lock_unhold(ols);
+ LASSERT(ols->ols_lock == NULL);
+ LASSERT(atomic_read(&ols->ols_pageref) == 0 ||
+ atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
+
+ OBD_SLAB_FREE_PTR(ols, osc_lock_kmem);
+}
+
+static void osc_lock_build_policy(const struct lu_env *env,
+ const struct cl_lock *lock,
+ ldlm_policy_data_t *policy)
+{
+ const struct cl_lock_descr *d = &lock->cll_descr;
+
+ osc_index2policy(policy, d->cld_obj, d->cld_start, d->cld_end);
+ policy->l_extent.gid = d->cld_gid;
+}
+
+static __u64 osc_enq2ldlm_flags(__u32 enqflags)
+{
+ __u64 result = 0;
+
+ LASSERT((enqflags & ~CEF_MASK) == 0);
+
+ if (enqflags & CEF_NONBLOCK)
+ result |= LDLM_FL_BLOCK_NOWAIT;
+ if (enqflags & CEF_ASYNC)
+ result |= LDLM_FL_HAS_INTENT;
+ if (enqflags & CEF_DISCARD_DATA)
+ result |= LDLM_FL_AST_DISCARD_DATA;
+ return result;
+}
+
+/**
+ * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
+ * pointers. Initialized in osc_init().
+ */
+spinlock_t osc_ast_guard;
+
+static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
+{
+ struct osc_lock *olck;
+
+ lock_res_and_lock(dlm_lock);
+ spin_lock(&osc_ast_guard);
+ olck = dlm_lock->l_ast_data;
+ if (olck != NULL) {
+ struct cl_lock *lock = olck->ols_cl.cls_lock;
+ /*
+ * If osc_lock holds a reference on ldlm lock, return it even
+ * when cl_lock is in CLS_FREEING state. This way
+ *
+ * osc_ast_data_get(dlmlock) == NULL
+ *
+ * guarantees that all osc references on dlmlock were
+ * released. osc_dlm_blocking_ast0() relies on that.
+ */
+ if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
+ cl_lock_get_trust(lock);
+ lu_ref_add_atomic(&lock->cll_reference,
+ "ast", current);
+ } else
+ olck = NULL;
+ }
+ spin_unlock(&osc_ast_guard);
+ unlock_res_and_lock(dlm_lock);
+ return olck;
+}
+
+static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
+{
+ struct cl_lock *lock;
+
+ lock = olck->ols_cl.cls_lock;
+ lu_ref_del(&lock->cll_reference, "ast", current);
+ cl_lock_put(env, lock);
+}
+
+/**
+ * Updates object attributes from a lock value block (lvb) received together
+ * with the DLM lock reply from the server. Copy of osc_update_enqueue()
+ * logic.
+ *
+ * This can be optimized to not update attributes when lock is a result of a
+ * local match.
+ *
+ * Called under lock and resource spin-locks.
+ */
+static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
+ int rc)
+{
+ struct ost_lvb *lvb;
+ struct cl_object *obj;
+ struct lov_oinfo *oinfo;
+ struct cl_attr *attr;
+ unsigned valid;
+
+ if (!(olck->ols_flags & LDLM_FL_LVB_READY))
+ return;
+
+ lvb = &olck->ols_lvb;
+ obj = olck->ols_cl.cls_obj;
+ oinfo = cl2osc(obj)->oo_oinfo;
+ attr = &osc_env_info(env)->oti_attr;
+ valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
+ cl_lvb2attr(attr, lvb);
+
+ cl_object_attr_lock(obj);
+ if (rc == 0) {
+ struct ldlm_lock *dlmlock;
+ __u64 size;
+
+ dlmlock = olck->ols_lock;
+ LASSERT(dlmlock != NULL);
+
+ /* re-grab LVB from a dlm lock under DLM spin-locks. */
+ *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
+ size = lvb->lvb_size;
+ /* Extend KMS up to the end of this lock and no further
+ * A lock on [x,y] means a KMS of up to y + 1 bytes! */
+ if (size > dlmlock->l_policy_data.l_extent.end)
+ size = dlmlock->l_policy_data.l_extent.end + 1;
+ if (size >= oinfo->loi_kms) {
+ LDLM_DEBUG(dlmlock, "lock acquired, setting rss="LPU64
+ ", kms="LPU64, lvb->lvb_size, size);
+ valid |= CAT_KMS;
+ attr->cat_kms = size;
+ } else {
+ LDLM_DEBUG(dlmlock, "lock acquired, setting rss="
+ LPU64"; leaving kms="LPU64", end="LPU64,
+ lvb->lvb_size, oinfo->loi_kms,
+ dlmlock->l_policy_data.l_extent.end);
+ }
+ ldlm_lock_allow_match_locked(dlmlock);
+ } else if (rc == -ENAVAIL && olck->ols_glimpse) {
+ CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
+ " kms="LPU64"\n", lvb->lvb_size, oinfo->loi_kms);
+ } else
+ valid = 0;
+
+ if (valid != 0)
+ cl_object_attr_set(env, obj, attr, valid);
+
+ cl_object_attr_unlock(obj);
+}
+
+/**
+ * Called when a lock is granted, from an upcall (when server returned a
+ * granted lock), or from completion AST, when server returned a blocked lock.
+ *
+ * Called under lock and resource spin-locks, that are released temporarily
+ * here.
+ */
+static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
+ struct ldlm_lock *dlmlock, int rc)
+{
+ struct ldlm_extent *ext;
+ struct cl_lock *lock;
+ struct cl_lock_descr *descr;
+
+ LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+
+ if (olck->ols_state < OLS_GRANTED) {
+ lock = olck->ols_cl.cls_lock;
+ ext = &dlmlock->l_policy_data.l_extent;
+ descr = &osc_env_info(env)->oti_descr;
+ descr->cld_obj = lock->cll_descr.cld_obj;
+
+ /* XXX check that ->l_granted_mode is valid. */
+ descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
+ descr->cld_start = cl_index(descr->cld_obj, ext->start);
+ descr->cld_end = cl_index(descr->cld_obj, ext->end);
+ descr->cld_gid = ext->gid;
+ /*
+ * tell upper layers the extent of the lock that was actually
+ * granted
+ */
+ olck->ols_state = OLS_GRANTED;
+ osc_lock_lvb_update(env, olck, rc);
+
+ /* release DLM spin-locks to allow cl_lock_{modify,signal}()
+ * to take a semaphore on a parent lock. This is safe, because
+ * spin-locks are needed to protect consistency of
+ * dlmlock->l_*_mode and LVB, and we have finished processing
+ * them. */
+ unlock_res_and_lock(dlmlock);
+ cl_lock_modify(env, lock, descr);
+ cl_lock_signal(env, lock);
+ LINVRNT(osc_lock_invariant(olck));
+ lock_res_and_lock(dlmlock);
+ }
+}
+
+static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
+
+{
+ struct ldlm_lock *dlmlock;
+
+ dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
+ LASSERT(dlmlock != NULL);
+
+ lock_res_and_lock(dlmlock);
+ spin_lock(&osc_ast_guard);
+ LASSERT(dlmlock->l_ast_data == olck);
+ LASSERT(olck->ols_lock == NULL);
+ olck->ols_lock = dlmlock;
+ spin_unlock(&osc_ast_guard);
+
+ /*
+ * Lock might be not yet granted. In this case, completion ast
+ * (osc_ldlm_completion_ast()) comes later and finishes lock
+ * granting.
+ */
+ if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
+ osc_lock_granted(env, olck, dlmlock, 0);
+ unlock_res_and_lock(dlmlock);
+
+ /*
+ * osc_enqueue_interpret() decrefs asynchronous locks, counter
+ * this.
+ */
+ ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
+ olck->ols_hold = 1;
+
+ /* lock reference taken by ldlm_handle2lock_long() is owned by
+ * osc_lock and released in osc_lock_detach() */
+ lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
+ olck->ols_has_ref = 1;
+}
+
+/**
+ * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
+ * received from a server, or after osc_enqueue_base() matched a local DLM
+ * lock.
+ */
+static int osc_lock_upcall(void *cookie, int errcode)
+{
+ struct osc_lock *olck = cookie;
+ struct cl_lock_slice *slice = &olck->ols_cl;
+ struct cl_lock *lock = slice->cls_lock;
+ struct lu_env *env;
+ struct cl_env_nest nest;
+
+ env = cl_env_nested_get(&nest);
+ if (!IS_ERR(env)) {
+ int rc;
+
+ cl_lock_mutex_get(env, lock);
+
+ LASSERT(lock->cll_state >= CLS_QUEUING);
+ if (olck->ols_state == OLS_ENQUEUED) {
+ olck->ols_state = OLS_UPCALL_RECEIVED;
+ rc = ldlm_error2errno(errcode);
+ } else if (olck->ols_state == OLS_CANCELLED) {
+ rc = -EIO;
+ } else {
+ CERROR("Impossible state: %d\n", olck->ols_state);
+ LBUG();
+ }
+ if (rc) {
+ struct ldlm_lock *dlmlock;
+
+ dlmlock = ldlm_handle2lock(&olck->ols_handle);
+ if (dlmlock != NULL) {
+ lock_res_and_lock(dlmlock);
+ spin_lock(&osc_ast_guard);
+ LASSERT(olck->ols_lock == NULL);
+ dlmlock->l_ast_data = NULL;
+ olck->ols_handle.cookie = 0ULL;
+ spin_unlock(&osc_ast_guard);
+ ldlm_lock_fail_match_locked(dlmlock);
+ unlock_res_and_lock(dlmlock);
+ LDLM_LOCK_PUT(dlmlock);
+ }
+ } else {
+ if (olck->ols_glimpse)
+ olck->ols_glimpse = 0;
+ osc_lock_upcall0(env, olck);
+ }
+
+ /* Error handling, some errors are tolerable. */
+ if (olck->ols_locklessable && rc == -EUSERS) {
+ /* This is a tolerable error, turn this lock into
+ * lockless lock.
+ */
+ osc_object_set_contended(cl2osc(slice->cls_obj));
+ LASSERT(slice->cls_ops == &osc_lock_ops);
+
+ /* Change this lock to ldlmlock-less lock. */
+ osc_lock_to_lockless(env, olck, 1);
+ olck->ols_state = OLS_GRANTED;
+ rc = 0;
+ } else if (olck->ols_glimpse && rc == -ENAVAIL) {
+ osc_lock_lvb_update(env, olck, rc);
+ cl_lock_delete(env, lock);
+ /* Hide the error. */
+ rc = 0;
+ }
+
+ if (rc == 0) {
+ /* For AGL case, the RPC sponsor may exits the cl_lock
+ * processing without wait() called before related OSC
+ * lock upcall(). So update the lock status according
+ * to the enqueue result inside AGL upcall(). */
+ if (olck->ols_agl) {
+ lock->cll_flags |= CLF_FROM_UPCALL;
+ cl_wait_try(env, lock);
+ lock->cll_flags &= ~CLF_FROM_UPCALL;
+ if (!olck->ols_glimpse)
+ olck->ols_agl = 0;
+ }
+ cl_lock_signal(env, lock);
+ /* del user for lock upcall cookie */
+ cl_unuse_try(env, lock);
+ } else {
+ /* del user for lock upcall cookie */
+ cl_lock_user_del(env, lock);
+ cl_lock_error(env, lock, rc);
+ }
+
+ /* release cookie reference, acquired by osc_lock_enqueue() */
+ cl_lock_hold_release(env, lock, "upcall", lock);
+ cl_lock_mutex_put(env, lock);
+
+ lu_ref_del(&lock->cll_reference, "upcall", lock);
+ /* This maybe the last reference, so must be called after
+ * cl_lock_mutex_put(). */
+ cl_lock_put(env, lock);
+
+ cl_env_nested_put(&nest, env);
+ } else {
+ /* should never happen, similar to osc_ldlm_blocking_ast(). */
+ LBUG();
+ }
+ return errcode;
+}
+
+/**
+ * Core of osc_dlm_blocking_ast() logic.
+ */
+static void osc_lock_blocking(const struct lu_env *env,
+ struct ldlm_lock *dlmlock,
+ struct osc_lock *olck, int blocking)
+{
+ struct cl_lock *lock = olck->ols_cl.cls_lock;
+
+ LASSERT(olck->ols_lock == dlmlock);
+ CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
+ LASSERT(!osc_lock_is_lockless(olck));
+
+ /*
+ * Lock might be still addref-ed here, if e.g., blocking ast
+ * is sent for a failed lock.
+ */
+ osc_lock_unhold(olck);
+
+ if (blocking && olck->ols_state < OLS_BLOCKED)
+ /*
+ * Move osc_lock into OLS_BLOCKED before canceling the lock,
+ * because it recursively re-enters osc_lock_blocking(), with
+ * the state set to OLS_CANCELLED.
+ */
+ olck->ols_state = OLS_BLOCKED;
+ /*
+ * cancel and destroy lock at least once no matter how blocking ast is
+ * entered (see comment above osc_ldlm_blocking_ast() for use
+ * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
+ */
+ cl_lock_cancel(env, lock);
+ cl_lock_delete(env, lock);
+}
+
+/**
+ * Helper for osc_dlm_blocking_ast() handling discrepancies between cl_lock
+ * and ldlm_lock caches.
+ */
+static int osc_dlm_blocking_ast0(const struct lu_env *env,
+ struct ldlm_lock *dlmlock,
+ void *data, int flag)
+{
+ struct osc_lock *olck;
+ struct cl_lock *lock;
+ int result;
+ int cancel;
+
+ LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
+
+ cancel = 0;
+ olck = osc_ast_data_get(dlmlock);
+ if (olck != NULL) {
+ lock = olck->ols_cl.cls_lock;
+ cl_lock_mutex_get(env, lock);
+ LINVRNT(osc_lock_invariant(olck));
+ if (olck->ols_ast_wait) {
+ /* wake up osc_lock_use() */
+ cl_lock_signal(env, lock);
+ olck->ols_ast_wait = 0;
+ }
+ /*
+ * Lock might have been canceled while this thread was
+ * sleeping for lock mutex, but olck is pinned in memory.
+ */
+ if (olck == dlmlock->l_ast_data) {
+ /*
+ * NOTE: DLM sends blocking AST's for failed locks
+ * (that are still in pre-OLS_GRANTED state)
+ * too, and they have to be canceled otherwise
+ * DLM lock is never destroyed and stuck in
+ * the memory.
+ *
+ * Alternatively, ldlm_cli_cancel() can be
+ * called here directly for osc_locks with
+ * ols_state < OLS_GRANTED to maintain an
+ * invariant that ->clo_cancel() is only called
+ * for locks that were granted.
+ */
+ LASSERT(data == olck);
+ osc_lock_blocking(env, dlmlock,
+ olck, flag == LDLM_CB_BLOCKING);
+ } else
+ cancel = 1;
+ cl_lock_mutex_put(env, lock);
+ osc_ast_data_put(env, olck);
+ } else
+ /*
+ * DLM lock exists, but there is no cl_lock attached to it.
+ * This is a `normal' race. cl_object and its cl_lock's can be
+ * removed by memory pressure, together with all pages.
+ */
+ cancel = (flag == LDLM_CB_BLOCKING);
+
+ if (cancel) {
+ struct lustre_handle *lockh;
+
+ lockh = &osc_env_info(env)->oti_handle;
+ ldlm_lock2handle(dlmlock, lockh);
+ result = ldlm_cli_cancel(lockh, LCF_ASYNC);
+ } else
+ result = 0;
+ return result;
+}
+
+/**
+ * Blocking ast invoked by ldlm when dlm lock is either blocking progress of
+ * some other lock, or is canceled. This function is installed as a
+ * ldlm_lock::l_blocking_ast() for client extent locks.
+ *
+ * Control flow is tricky, because ldlm uses the same call-back
+ * (ldlm_lock::l_blocking_ast()) for both blocking and cancellation ast's.
+ *
+ * \param dlmlock lock for which ast occurred.
+ *
+ * \param new description of a conflicting lock in case of blocking ast.
+ *
+ * \param data value of dlmlock->l_ast_data
+ *
+ * \param flag LDLM_CB_BLOCKING or LDLM_CB_CANCELING. Used to distinguish
+ * cancellation and blocking ast's.
+ *
+ * Possible use cases:
+ *
+ * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING) to cancel
+ * lock due to lock lru pressure, or explicit user request to purge
+ * locks.
+ *
+ * - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
+ * us that dlmlock conflicts with another lock that some client is
+ * enqueing. Lock is canceled.
+ *
+ * - cl_lock_cancel() is called. osc_lock_cancel() calls
+ * ldlm_cli_cancel() that calls
+ *
+ * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
+ *
+ * recursively entering osc_ldlm_blocking_ast().
+ *
+ * - client cancels lock voluntary (e.g., as a part of early cancellation):
+ *
+ * cl_lock_cancel()->
+ * osc_lock_cancel()->
+ * ldlm_cli_cancel()->
+ * dlmlock->l_blocking_ast(..., LDLM_CB_CANCELING)
+ *
+ */
+static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
+ struct ldlm_lock_desc *new, void *data,
+ int flag)
+{
+ struct lu_env *env;
+ struct cl_env_nest nest;
+ int result;
+
+ /*
+ * This can be called in the context of outer IO, e.g.,
+ *
+ * cl_enqueue()->...
+ * ->osc_enqueue_base()->...
+ * ->ldlm_prep_elc_req()->...
+ * ->ldlm_cancel_callback()->...
+ * ->osc_ldlm_blocking_ast()
+ *
+ * new environment has to be created to not corrupt outer context.
+ */
+ env = cl_env_nested_get(&nest);
+ if (!IS_ERR(env)) {
+ result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
+ cl_env_nested_put(&nest, env);
+ } else {
+ result = PTR_ERR(env);
+ /*
+ * XXX This should never happen, as cl_lock is
+ * stuck. Pre-allocated environment a la vvp_inode_fini_env
+ * should be used.
+ */
+ LBUG();
+ }
+ if (result != 0) {
+ if (result == -ENODATA)
+ result = 0;
+ else
+ CERROR("BAST failed: %d\n", result);
+ }
+ return result;
+}
+
+static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
+ __u64 flags, void *data)
+{
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct osc_lock *olck;
+ struct cl_lock *lock;
+ int result;
+ int dlmrc;
+
+ /* first, do dlm part of the work */
+ dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
+ /* then, notify cl_lock */
+ env = cl_env_nested_get(&nest);
+ if (!IS_ERR(env)) {
+ olck = osc_ast_data_get(dlmlock);
+ if (olck != NULL) {
+ lock = olck->ols_cl.cls_lock;
+ cl_lock_mutex_get(env, lock);
+ /*
+ * ldlm_handle_cp_callback() copied LVB from request
+ * to lock->l_lvb_data, store it in osc_lock.
+ */
+ LASSERT(dlmlock->l_lvb_data != NULL);
+ lock_res_and_lock(dlmlock);
+ olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
+ if (olck->ols_lock == NULL) {
+ /*
+ * upcall (osc_lock_upcall()) hasn't yet been
+ * called. Do nothing now, upcall will bind
+ * olck to dlmlock and signal the waiters.
+ *
+ * This maintains an invariant that osc_lock
+ * and ldlm_lock are always bound when
+ * osc_lock is in OLS_GRANTED state.
+ */
+ } else if (dlmlock->l_granted_mode ==
+ dlmlock->l_req_mode) {
+ osc_lock_granted(env, olck, dlmlock, dlmrc);
+ }
+ unlock_res_and_lock(dlmlock);
+
+ if (dlmrc != 0) {
+ CL_LOCK_DEBUG(D_ERROR, env, lock,
+ "dlmlock returned %d\n", dlmrc);
+ cl_lock_error(env, lock, dlmrc);
+ }
+ cl_lock_mutex_put(env, lock);
+ osc_ast_data_put(env, olck);
+ result = 0;
+ } else
+ result = -ELDLM_NO_LOCK_DATA;
+ cl_env_nested_put(&nest, env);
+ } else
+ result = PTR_ERR(env);
+ return dlmrc ?: result;
+}
+
+static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
+{
+ struct ptlrpc_request *req = data;
+ struct osc_lock *olck;
+ struct cl_lock *lock;
+ struct cl_object *obj;
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct ost_lvb *lvb;
+ struct req_capsule *cap;
+ int result;
+
+ LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
+
+ env = cl_env_nested_get(&nest);
+ if (!IS_ERR(env)) {
+ /* osc_ast_data_get() has to go after environment is
+ * allocated, because osc_ast_data() acquires a
+ * reference to a lock, and it can only be released in
+ * environment.
+ */
+ olck = osc_ast_data_get(dlmlock);
+ if (olck != NULL) {
+ lock = olck->ols_cl.cls_lock;
+ /* Do not grab the mutex of cl_lock for glimpse.
+ * See LU-1274 for details.
+ * BTW, it's okay for cl_lock to be cancelled during
+ * this period because server can handle this race.
+ * See ldlm_server_glimpse_ast() for details.
+ * cl_lock_mutex_get(env, lock); */
+ cap = &req->rq_pill;
+ req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
+ req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
+ sizeof *lvb);
+ result = req_capsule_server_pack(cap);
+ if (result == 0) {
+ lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
+ obj = lock->cll_descr.cld_obj;
+ result = cl_object_glimpse(env, obj, lvb);
+ }
+ if (!exp_connect_lvb_type(req->rq_export))
+ req_capsule_shrink(&req->rq_pill,
+ &RMF_DLM_LVB,
+ sizeof(struct ost_lvb_v1),
+ RCL_SERVER);
+ osc_ast_data_put(env, olck);
+ } else {
+ /*
+ * These errors are normal races, so we don't want to
+ * fill the console with messages by calling
+ * ptlrpc_error()
+ */
+ lustre_pack_reply(req, 1, NULL, NULL);
+ result = -ELDLM_NO_LOCK_DATA;
+ }
+ cl_env_nested_put(&nest, env);
+ } else
+ result = PTR_ERR(env);
+ req->rq_status = result;
+ return result;
+}
+
+static unsigned long osc_lock_weigh(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ /*
+ * don't need to grab coh_page_guard since we don't care the exact #
+ * of pages..
+ */
+ return cl_object_header(slice->cls_obj)->coh_pages;
+}
+
+static void osc_lock_build_einfo(const struct lu_env *env,
+ const struct cl_lock *clock,
+ struct osc_lock *lock,
+ struct ldlm_enqueue_info *einfo)
+{
+ enum cl_lock_mode mode;
+
+ mode = clock->cll_descr.cld_mode;
+ if (mode == CLM_PHANTOM)
+ /*
+ * For now, enqueue all glimpse locks in read mode. In the
+ * future, client might choose to enqueue LCK_PW lock for
+ * glimpse on a file opened for write.
+ */
+ mode = CLM_READ;
+
+ einfo->ei_type = LDLM_EXTENT;
+ einfo->ei_mode = osc_cl_lock2ldlm(mode);
+ einfo->ei_cb_bl = osc_ldlm_blocking_ast;
+ einfo->ei_cb_cp = osc_ldlm_completion_ast;
+ einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
+ einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
+}
+
+/**
+ * Determine if the lock should be converted into a lockless lock.
+ *
+ * Steps to check:
+ * - if the lock has an explicite requirment for a non-lockless lock;
+ * - if the io lock request type ci_lockreq;
+ * - send the enqueue rpc to ost to make the further decision;
+ * - special treat to truncate lockless lock
+ *
+ * Additional policy can be implemented here, e.g., never do lockless-io
+ * for large extents.
+ */
+static void osc_lock_to_lockless(const struct lu_env *env,
+ struct osc_lock *ols, int force)
+{
+ struct cl_lock_slice *slice = &ols->ols_cl;
+
+ LASSERT(ols->ols_state == OLS_NEW ||
+ ols->ols_state == OLS_UPCALL_RECEIVED);
+
+ if (force) {
+ ols->ols_locklessable = 1;
+ slice->cls_ops = &osc_lock_lockless_ops;
+ } else {
+ struct osc_io *oio = osc_env_io(env);
+ struct cl_io *io = oio->oi_cl.cis_io;
+ struct cl_object *obj = slice->cls_obj;
+ struct osc_object *oob = cl2osc(obj);
+ const struct osc_device *osd = lu2osc_dev(obj->co_lu.lo_dev);
+ struct obd_connect_data *ocd;
+
+ LASSERT(io->ci_lockreq == CILR_MANDATORY ||
+ io->ci_lockreq == CILR_MAYBE ||
+ io->ci_lockreq == CILR_NEVER);
+
+ ocd = &class_exp2cliimp(osc_export(oob))->imp_connect_data;
+ ols->ols_locklessable = (io->ci_type != CIT_SETATTR) &&
+ (io->ci_lockreq == CILR_MAYBE) &&
+ (ocd->ocd_connect_flags & OBD_CONNECT_SRVLOCK);
+ if (io->ci_lockreq == CILR_NEVER ||
+ /* lockless IO */
+ (ols->ols_locklessable && osc_object_is_contended(oob)) ||
+ /* lockless truncate */
+ (cl_io_is_trunc(io) &&
+ (ocd->ocd_connect_flags & OBD_CONNECT_TRUNCLOCK) &&
+ osd->od_lockless_truncate)) {
+ ols->ols_locklessable = 1;
+ slice->cls_ops = &osc_lock_lockless_ops;
+ }
+ }
+ LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
+}
+
+static int osc_lock_compatible(const struct osc_lock *qing,
+ const struct osc_lock *qed)
+{
+ enum cl_lock_mode qing_mode;
+ enum cl_lock_mode qed_mode;
+
+ qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
+ if (qed->ols_glimpse &&
+ (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
+ return 1;
+
+ qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
+ return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
+}
+
+/**
+ * Cancel all conflicting locks and wait for them to be destroyed.
+ *
+ * This function is used for two purposes:
+ *
+ * - early cancel all conflicting locks before starting IO, and
+ *
+ * - guarantee that pages added to the page cache by lockless IO are never
+ * covered by locks other than lockless IO lock, and, hence, are not
+ * visible to other threads.
+ */
+static int osc_lock_enqueue_wait(const struct lu_env *env,
+ const struct osc_lock *olck)
+{
+ struct cl_lock *lock = olck->ols_cl.cls_lock;
+ struct cl_lock_descr *descr = &lock->cll_descr;
+ struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
+ struct cl_lock *scan;
+ struct cl_lock *conflict= NULL;
+ int lockless = osc_lock_is_lockless(olck);
+ int rc = 0;
+
+ LASSERT(cl_lock_is_mutexed(lock));
+
+ /* make it enqueue anyway for glimpse lock, because we actually
+ * don't need to cancel any conflicting locks. */
+ if (olck->ols_glimpse)
+ return 0;
+
+ spin_lock(&hdr->coh_lock_guard);
+ list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
+ struct cl_lock_descr *cld = &scan->cll_descr;
+ const struct osc_lock *scan_ols;
+
+ if (scan == lock)
+ break;
+
+ if (scan->cll_state < CLS_QUEUING ||
+ scan->cll_state == CLS_FREEING ||
+ cld->cld_start > descr->cld_end ||
+ cld->cld_end < descr->cld_start)
+ continue;
+
+ /* overlapped and living locks. */
+
+ /* We're not supposed to give up group lock. */
+ if (scan->cll_descr.cld_mode == CLM_GROUP) {
+ LASSERT(descr->cld_mode != CLM_GROUP ||
+ descr->cld_gid != scan->cll_descr.cld_gid);
+ continue;
+ }
+
+ scan_ols = osc_lock_at(scan);
+
+ /* We need to cancel the compatible locks if we're enqueuing
+ * a lockless lock, for example:
+ * imagine that client has PR lock on [0, 1000], and thread T0
+ * is doing lockless IO in [500, 1500] region. Concurrent
+ * thread T1 can see lockless data in [500, 1000], which is
+ * wrong, because these data are possibly stale. */
+ if (!lockless && osc_lock_compatible(olck, scan_ols))
+ continue;
+
+ cl_lock_get_trust(scan);
+ conflict = scan;
+ break;
+ }
+ spin_unlock(&hdr->coh_lock_guard);
+
+ if (conflict) {
+ if (lock->cll_descr.cld_mode == CLM_GROUP) {
+ /* we want a group lock but a previous lock request
+ * conflicts, we do not wait but return 0 so the
+ * request is send to the server
+ */
+ CDEBUG(D_DLMTRACE, "group lock %p is conflicted "
+ "with %p, no wait, send to server\n",
+ lock, conflict);
+ cl_lock_put(env, conflict);
+ rc = 0;
+ } else {
+ CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, "
+ "will wait\n",
+ lock, conflict);
+ LASSERT(lock->cll_conflict == NULL);
+ lu_ref_add(&conflict->cll_reference, "cancel-wait",
+ lock);
+ lock->cll_conflict = conflict;
+ rc = CLO_WAIT;
+ }
+ }
+ return rc;
+}
+
+/**
+ * Implementation of cl_lock_operations::clo_enqueue() method for osc
+ * layer. This initiates ldlm enqueue:
+ *
+ * - cancels conflicting locks early (osc_lock_enqueue_wait());
+ *
+ * - calls osc_enqueue_base() to do actual enqueue.
+ *
+ * osc_enqueue_base() is supplied with an upcall function that is executed
+ * when lock is received either after a local cached ldlm lock is matched, or
+ * when a reply from the server is received.
+ *
+ * This function does not wait for the network communication to complete.
+ */
+static int osc_lock_enqueue(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ struct cl_io *unused, __u32 enqflags)
+{
+ struct osc_lock *ols = cl2osc_lock(slice);
+ struct cl_lock *lock = ols->ols_cl.cls_lock;
+ int result;
+
+ LASSERT(cl_lock_is_mutexed(lock));
+ LASSERTF(ols->ols_state == OLS_NEW,
+ "Impossible state: %d\n", ols->ols_state);
+
+ LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
+ "lock = %p, ols = %p\n", lock, ols);
+
+ result = osc_lock_enqueue_wait(env, ols);
+ if (result == 0) {
+ if (!osc_lock_is_lockless(ols)) {
+ struct osc_object *obj = cl2osc(slice->cls_obj);
+ struct osc_thread_info *info = osc_env_info(env);
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
+
+ /* lock will be passed as upcall cookie,
+ * hold ref to prevent to be released. */
+ cl_lock_hold_add(env, lock, "upcall", lock);
+ /* a user for lock also */
+ cl_lock_user_add(env, lock);
+ ols->ols_state = OLS_ENQUEUED;
+
+ /*
+ * XXX: this is possible blocking point as
+ * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
+ * LDLM_CP_CALLBACK.
+ */
+ ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
+ osc_lock_build_policy(env, lock, policy);
+ result = osc_enqueue_base(osc_export(obj), resname,
+ &ols->ols_flags, policy,
+ &ols->ols_lvb,
+ obj->oo_oinfo->loi_kms_valid,
+ osc_lock_upcall,
+ ols, einfo, &ols->ols_handle,
+ PTLRPCD_SET, 1, ols->ols_agl);
+ if (result != 0) {
+ cl_lock_user_del(env, lock);
+ cl_lock_unhold(env, lock, "upcall", lock);
+ if (unlikely(result == -ECANCELED)) {
+ ols->ols_state = OLS_NEW;
+ result = 0;
+ }
+ }
+ } else {
+ ols->ols_state = OLS_GRANTED;
+ ols->ols_owner = osc_env_io(env);
+ }
+ }
+ LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
+ return result;
+}
+
+static int osc_lock_wait(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct osc_lock *olck = cl2osc_lock(slice);
+ struct cl_lock *lock = olck->ols_cl.cls_lock;
+
+ LINVRNT(osc_lock_invariant(olck));
+
+ if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
+ if (olck->ols_flags & LDLM_FL_LVB_READY) {
+ return 0;
+ } else if (olck->ols_agl) {
+ if (lock->cll_flags & CLF_FROM_UPCALL)
+ /* It is from enqueue RPC reply upcall for
+ * updating state. Do not re-enqueue. */
+ return -ENAVAIL;
+ else
+ olck->ols_state = OLS_NEW;
+ } else {
+ LASSERT(lock->cll_error);
+ return lock->cll_error;
+ }
+ }
+
+ if (olck->ols_state == OLS_NEW) {
+ int rc;
+
+ LASSERT(olck->ols_agl);
+ olck->ols_agl = 0;
+ rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
+ if (rc != 0)
+ return rc;
+ else
+ return CLO_REENQUEUED;
+ }
+
+ LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
+ lock->cll_error == 0, olck->ols_lock != NULL));
+
+ return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
+}
+
+/**
+ * An implementation of cl_lock_operations::clo_use() method that pins cached
+ * lock.
+ */
+static int osc_lock_use(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct osc_lock *olck = cl2osc_lock(slice);
+ int rc;
+
+ LASSERT(!olck->ols_hold);
+
+ /*
+ * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
+ * flag is not set. This protects us from a concurrent blocking ast.
+ */
+ rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
+ if (rc == 0) {
+ olck->ols_hold = 1;
+ olck->ols_state = OLS_GRANTED;
+ } else {
+ struct cl_lock *lock;
+
+ /*
+ * Lock is being cancelled somewhere within
+ * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
+ * set, but osc_ldlm_blocking_ast() hasn't yet acquired
+ * cl_lock mutex.
+ */
+ lock = slice->cls_lock;
+ LASSERT(lock->cll_state == CLS_INTRANSIT);
+ LASSERT(lock->cll_users > 0);
+ /* set a flag for osc_dlm_blocking_ast0() to signal the
+ * lock.*/
+ olck->ols_ast_wait = 1;
+ rc = CLO_WAIT;
+ }
+ return rc;
+}
+
+static int osc_lock_flush(struct osc_lock *ols, int discard)
+{
+ struct cl_lock *lock = ols->ols_cl.cls_lock;
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ int result = 0;
+
+ env = cl_env_nested_get(&nest);
+ if (!IS_ERR(env)) {
+ struct osc_object *obj = cl2osc(ols->ols_cl.cls_obj);
+ struct cl_lock_descr *descr = &lock->cll_descr;
+ int rc = 0;
+
+ if (descr->cld_mode >= CLM_WRITE) {
+ result = osc_cache_writeback_range(env, obj,
+ descr->cld_start, descr->cld_end,
+ 1, discard);
+ LDLM_DEBUG(ols->ols_lock,
+ "lock %p: %d pages were %s.\n", lock, result,
+ discard ? "discarded" : "written");
+ if (result > 0)
+ result = 0;
+ }
+
+ rc = cl_lock_discard_pages(env, lock);
+ if (result == 0 && rc < 0)
+ result = rc;
+
+ cl_env_nested_put(&nest, env);
+ } else
+ result = PTR_ERR(env);
+ if (result == 0) {
+ ols->ols_flush = 1;
+ LINVRNT(!osc_lock_has_pages(ols));
+ }
+ return result;
+}
+
+/**
+ * Implements cl_lock_operations::clo_cancel() method for osc layer. This is
+ * called (as part of cl_lock_cancel()) when lock is canceled either voluntary
+ * (LRU pressure, early cancellation, umount, etc.) or due to the conflict
+ * with some other lock some where in the cluster. This function does the
+ * following:
+ *
+ * - invalidates all pages protected by this lock (after sending dirty
+ * ones to the server, as necessary);
+ *
+ * - decref's underlying ldlm lock;
+ *
+ * - cancels ldlm lock (ldlm_cli_cancel()).
+ */
+static void osc_lock_cancel(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct cl_lock *lock = slice->cls_lock;
+ struct osc_lock *olck = cl2osc_lock(slice);
+ struct ldlm_lock *dlmlock = olck->ols_lock;
+ int result = 0;
+ int discard;
+
+ LASSERT(cl_lock_is_mutexed(lock));
+ LINVRNT(osc_lock_invariant(olck));
+
+ if (dlmlock != NULL) {
+ int do_cancel;
+
+ discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
+ if (olck->ols_state >= OLS_GRANTED)
+ result = osc_lock_flush(olck, discard);
+ osc_lock_unhold(olck);
+
+ lock_res_and_lock(dlmlock);
+ /* Now that we're the only user of dlm read/write reference,
+ * mostly the ->l_readers + ->l_writers should be zero.
+ * However, there is a corner case.
+ * See bug 18829 for details.*/
+ do_cancel = (dlmlock->l_readers == 0 &&
+ dlmlock->l_writers == 0);
+ dlmlock->l_flags |= LDLM_FL_CBPENDING;
+ unlock_res_and_lock(dlmlock);
+ if (do_cancel)
+ result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
+ if (result < 0)
+ CL_LOCK_DEBUG(D_ERROR, env, lock,
+ "lock %p cancel failure with error(%d)\n",
+ lock, result);
+ }
+ olck->ols_state = OLS_CANCELLED;
+ olck->ols_flags &= ~LDLM_FL_LVB_READY;
+ osc_lock_detach(env, olck);
+}
+
+static int osc_lock_has_pages(struct osc_lock *olck)
+{
+ return 0;
+}
+
+static void osc_lock_delete(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct osc_lock *olck;
+
+ olck = cl2osc_lock(slice);
+ if (olck->ols_glimpse) {
+ LASSERT(!olck->ols_hold);
+ LASSERT(!olck->ols_lock);
+ return;
+ }
+
+ LINVRNT(osc_lock_invariant(olck));
+ LINVRNT(!osc_lock_has_pages(olck));
+
+ osc_lock_unhold(olck);
+ osc_lock_detach(env, olck);
+}
+
+/**
+ * Implements cl_lock_operations::clo_state() method for osc layer.
+ *
+ * Maintains osc_lock::ols_owner field.
+ *
+ * This assumes that lock always enters CLS_HELD (from some other state) in
+ * the same IO context as one that requested the lock. This should not be a
+ * problem, because context is by definition shared by all activity pertaining
+ * to the same high-level IO.
+ */
+static void osc_lock_state(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ enum cl_lock_state state)
+{
+ struct osc_lock *lock = cl2osc_lock(slice);
+
+ /*
+ * XXX multiple io contexts can use the lock at the same time.
+ */
+ LINVRNT(osc_lock_invariant(lock));
+ if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
+ struct osc_io *oio = osc_env_io(env);
+
+ LASSERT(lock->ols_owner == NULL);
+ lock->ols_owner = oio;
+ } else if (state != CLS_HELD)
+ lock->ols_owner = NULL;
+}
+
+static int osc_lock_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct cl_lock_slice *slice)
+{
+ struct osc_lock *lock = cl2osc_lock(slice);
+
+ /*
+ * XXX print ldlm lock and einfo properly.
+ */
+ (*p)(env, cookie, "%p %#16llx "LPX64" %d %p ",
+ lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
+ lock->ols_state, lock->ols_owner);
+ osc_lvb_print(env, cookie, p, &lock->ols_lvb);
+ return 0;
+}
+
+static int osc_lock_fits_into(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ const struct cl_lock_descr *need,
+ const struct cl_io *io)
+{
+ struct osc_lock *ols = cl2osc_lock(slice);
+
+ if (need->cld_enq_flags & CEF_NEVER)
+ return 0;
+
+ if (ols->ols_state >= OLS_CANCELLED)
+ return 0;
+
+ if (need->cld_mode == CLM_PHANTOM) {
+ if (ols->ols_agl)
+ return !(ols->ols_state > OLS_RELEASED);
+
+ /*
+ * Note: the QUEUED lock can't be matched here, otherwise
+ * it might cause the deadlocks.
+ * In read_process,
+ * P1: enqueued read lock, create sublock1
+ * P2: enqueued write lock, create sublock2(conflicted
+ * with sublock1).
+ * P1: Grant read lock.
+ * P1: enqueued glimpse lock(with holding sublock1_read),
+ * matched with sublock2, waiting sublock2 to be granted.
+ * But sublock2 can not be granted, because P1
+ * will not release sublock1. Bang!
+ */
+ if (ols->ols_state < OLS_GRANTED ||
+ ols->ols_state > OLS_RELEASED)
+ return 0;
+ } else if (need->cld_enq_flags & CEF_MUST) {
+ /*
+ * If the lock hasn't ever enqueued, it can't be matched
+ * because enqueue process brings in many information
+ * which can be used to determine things such as lockless,
+ * CEF_MUST, etc.
+ */
+ if (ols->ols_state < OLS_UPCALL_RECEIVED &&
+ ols->ols_locklessable)
+ return 0;
+ }
+ return 1;
+}
+
+static const struct cl_lock_operations osc_lock_ops = {
+ .clo_fini = osc_lock_fini,
+ .clo_enqueue = osc_lock_enqueue,
+ .clo_wait = osc_lock_wait,
+ .clo_unuse = osc_lock_unuse,
+ .clo_use = osc_lock_use,
+ .clo_delete = osc_lock_delete,
+ .clo_state = osc_lock_state,
+ .clo_cancel = osc_lock_cancel,
+ .clo_weigh = osc_lock_weigh,
+ .clo_print = osc_lock_print,
+ .clo_fits_into = osc_lock_fits_into,
+};
+
+static int osc_lock_lockless_unuse(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct osc_lock *ols = cl2osc_lock(slice);
+ struct cl_lock *lock = slice->cls_lock;
+
+ LASSERT(ols->ols_state == OLS_GRANTED);
+ LINVRNT(osc_lock_invariant(ols));
+
+ cl_lock_cancel(env, lock);
+ cl_lock_delete(env, lock);
+ return 0;
+}
+
+static void osc_lock_lockless_cancel(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct osc_lock *ols = cl2osc_lock(slice);
+ int result;
+
+ result = osc_lock_flush(ols, 0);
+ if (result)
+ CERROR("Pages for lockless lock %p were not purged(%d)\n",
+ ols, result);
+ ols->ols_state = OLS_CANCELLED;
+}
+
+static int osc_lock_lockless_wait(const struct lu_env *env,
+ const struct cl_lock_slice *slice)
+{
+ struct osc_lock *olck = cl2osc_lock(slice);
+ struct cl_lock *lock = olck->ols_cl.cls_lock;
+
+ LINVRNT(osc_lock_invariant(olck));
+ LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
+
+ return lock->cll_error;
+}
+
+static void osc_lock_lockless_state(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ enum cl_lock_state state)
+{
+ struct osc_lock *lock = cl2osc_lock(slice);
+
+ LINVRNT(osc_lock_invariant(lock));
+ if (state == CLS_HELD) {
+ struct osc_io *oio = osc_env_io(env);
+
+ LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
+ lock->ols_owner = oio;
+
+ /* set the io to be lockless if this lock is for io's
+ * host object */
+ if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
+ oio->oi_lockless = 1;
+ }
+}
+
+static int osc_lock_lockless_fits_into(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ const struct cl_lock_descr *need,
+ const struct cl_io *io)
+{
+ struct osc_lock *lock = cl2osc_lock(slice);
+
+ if (!(need->cld_enq_flags & CEF_NEVER))
+ return 0;
+
+ /* lockless lock should only be used by its owning io. b22147 */
+ return (lock->ols_owner == osc_env_io(env));
+}
+
+static const struct cl_lock_operations osc_lock_lockless_ops = {
+ .clo_fini = osc_lock_fini,
+ .clo_enqueue = osc_lock_enqueue,
+ .clo_wait = osc_lock_lockless_wait,
+ .clo_unuse = osc_lock_lockless_unuse,
+ .clo_state = osc_lock_lockless_state,
+ .clo_fits_into = osc_lock_lockless_fits_into,
+ .clo_cancel = osc_lock_lockless_cancel,
+ .clo_print = osc_lock_print
+};
+
+int osc_lock_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_lock *lock,
+ const struct cl_io *unused)
+{
+ struct osc_lock *clk;
+ int result;
+
+ OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, __GFP_IO);
+ if (clk != NULL) {
+ __u32 enqflags = lock->cll_descr.cld_enq_flags;
+
+ osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
+ atomic_set(&clk->ols_pageref, 0);
+ clk->ols_state = OLS_NEW;
+
+ clk->ols_flags = osc_enq2ldlm_flags(enqflags);
+ clk->ols_agl = !!(enqflags & CEF_AGL);
+ if (clk->ols_agl)
+ clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
+ if (clk->ols_flags & LDLM_FL_HAS_INTENT)
+ clk->ols_glimpse = 1;
+
+ cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
+
+ if (!(enqflags & CEF_MUST))
+ /* try to convert this lock to a lockless lock */
+ osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
+ if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
+ clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
+
+ LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
+ lock, clk, clk->ols_flags);
+
+ result = 0;
+ } else
+ result = -ENOMEM;
+ return result;
+}
+
+int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
+{
+ struct osc_lock *olock;
+ int rc = 0;
+
+ spin_lock(&osc_ast_guard);
+ olock = dlm->l_ast_data;
+ /*
+ * there's a very rare race with osc_page_addref_lock(), but that
+ * doesn't matter because in the worst case we don't cancel a lock
+ * which we actually can, that's no harm.
+ */
+ if (olock != NULL &&
+ atomic_add_return(_PAGEREF_MAGIC,
+ &olock->ols_pageref) != _PAGEREF_MAGIC) {
+ atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
+ rc = 1;
+ }
+ spin_unlock(&osc_ast_guard);
+ return rc;
+}
+
+/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
new file mode 100644
index 0000000..9d34de8
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -0,0 +1,274 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_object for OSC layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_OSC
+
+#include "osc_cl_internal.h"
+
+/** \addtogroup osc
+ * @{
+ */
+
+/*****************************************************************************
+ *
+ * Type conversions.
+ *
+ */
+
+static struct lu_object *osc2lu(struct osc_object *osc)
+{
+ return &osc->oo_cl.co_lu;
+}
+
+static struct osc_object *lu2osc(const struct lu_object *obj)
+{
+ LINVRNT(osc_is_object(obj));
+ return container_of0(obj, struct osc_object, oo_cl.co_lu);
+}
+
+/*****************************************************************************
+ *
+ * Object operations.
+ *
+ */
+
+static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf)
+{
+ struct osc_object *osc = lu2osc(obj);
+ const struct cl_object_conf *cconf = lu2cl_conf(conf);
+ int i;
+
+ osc->oo_oinfo = cconf->u.coc_oinfo;
+ spin_lock_init(&osc->oo_seatbelt);
+ for (i = 0; i < CRT_NR; ++i)
+ INIT_LIST_HEAD(&osc->oo_inflight[i]);
+
+ INIT_LIST_HEAD(&osc->oo_ready_item);
+ INIT_LIST_HEAD(&osc->oo_hp_ready_item);
+ INIT_LIST_HEAD(&osc->oo_write_item);
+ INIT_LIST_HEAD(&osc->oo_read_item);
+
+ osc->oo_root.rb_node = NULL;
+ INIT_LIST_HEAD(&osc->oo_hp_exts);
+ INIT_LIST_HEAD(&osc->oo_urgent_exts);
+ INIT_LIST_HEAD(&osc->oo_rpc_exts);
+ INIT_LIST_HEAD(&osc->oo_reading_exts);
+ atomic_set(&osc->oo_nr_reads, 0);
+ atomic_set(&osc->oo_nr_writes, 0);
+ spin_lock_init(&osc->oo_lock);
+
+ cl_object_page_init(lu2cl(obj), sizeof(struct osc_page));
+
+ return 0;
+}
+
+static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
+{
+ struct osc_object *osc = lu2osc(obj);
+ int i;
+
+ for (i = 0; i < CRT_NR; ++i)
+ LASSERT(list_empty(&osc->oo_inflight[i]));
+
+ LASSERT(list_empty(&osc->oo_ready_item));
+ LASSERT(list_empty(&osc->oo_hp_ready_item));
+ LASSERT(list_empty(&osc->oo_write_item));
+ LASSERT(list_empty(&osc->oo_read_item));
+
+ LASSERT(osc->oo_root.rb_node == NULL);
+ LASSERT(list_empty(&osc->oo_hp_exts));
+ LASSERT(list_empty(&osc->oo_urgent_exts));
+ LASSERT(list_empty(&osc->oo_rpc_exts));
+ LASSERT(list_empty(&osc->oo_reading_exts));
+ LASSERT(atomic_read(&osc->oo_nr_reads) == 0);
+ LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
+
+ lu_object_fini(obj);
+ OBD_SLAB_FREE_PTR(osc, osc_object_kmem);
+}
+
+int osc_lvb_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct ost_lvb *lvb)
+{
+ return (*p)(env, cookie, "size: "LPU64" mtime: "LPU64" atime: "LPU64" "
+ "ctime: "LPU64" blocks: "LPU64,
+ lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
+ lvb->lvb_ctime, lvb->lvb_blocks);
+}
+
+static int osc_object_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *obj)
+{
+ struct osc_object *osc = lu2osc(obj);
+ struct lov_oinfo *oinfo = osc->oo_oinfo;
+ struct osc_async_rc *ar = &oinfo->loi_ar;
+
+ (*p)(env, cookie, "id: "DOSTID" "
+ "idx: %d gen: %d kms_valid: %u kms "LPU64" "
+ "rc: %d force_sync: %d min_xid: "LPU64" ",
+ POSTID(&oinfo->loi_oi), oinfo->loi_ost_idx,
+ oinfo->loi_ost_gen, oinfo->loi_kms_valid, oinfo->loi_kms,
+ ar->ar_rc, ar->ar_force_sync, ar->ar_min_xid);
+ osc_lvb_print(env, cookie, p, &oinfo->loi_lvb);
+ return 0;
+}
+
+
+static int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr)
+{
+ struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
+
+ cl_lvb2attr(attr, &oinfo->loi_lvb);
+ attr->cat_kms = oinfo->loi_kms_valid ? oinfo->loi_kms : 0;
+ return 0;
+}
+
+int osc_attr_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid)
+{
+ struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
+ struct ost_lvb *lvb = &oinfo->loi_lvb;
+
+ if (valid & CAT_SIZE)
+ lvb->lvb_size = attr->cat_size;
+ if (valid & CAT_MTIME)
+ lvb->lvb_mtime = attr->cat_mtime;
+ if (valid & CAT_ATIME)
+ lvb->lvb_atime = attr->cat_atime;
+ if (valid & CAT_CTIME)
+ lvb->lvb_ctime = attr->cat_ctime;
+ if (valid & CAT_BLOCKS)
+ lvb->lvb_blocks = attr->cat_blocks;
+ if (valid & CAT_KMS) {
+ CDEBUG(D_CACHE, "set kms from "LPU64"to "LPU64"\n",
+ oinfo->loi_kms, (__u64)attr->cat_kms);
+ loi_kms_set(oinfo, attr->cat_kms);
+ }
+ return 0;
+}
+
+static int osc_object_glimpse(const struct lu_env *env,
+ const struct cl_object *obj, struct ost_lvb *lvb)
+{
+ struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
+
+ lvb->lvb_size = oinfo->loi_kms;
+ lvb->lvb_blocks = oinfo->loi_lvb.lvb_blocks;
+ return 0;
+}
+
+
+void osc_object_set_contended(struct osc_object *obj)
+{
+ obj->oo_contention_time = cfs_time_current();
+ /* mb(); */
+ obj->oo_contended = 1;
+}
+
+void osc_object_clear_contended(struct osc_object *obj)
+{
+ obj->oo_contended = 0;
+}
+
+int osc_object_is_contended(struct osc_object *obj)
+{
+ struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev);
+ int osc_contention_time = dev->od_contention_time;
+ cfs_time_t cur_time = cfs_time_current();
+ cfs_time_t retry_time;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION))
+ return 1;
+
+ if (!obj->oo_contended)
+ return 0;
+
+ /*
+ * I like copy-paste. the code is copied from
+ * ll_file_is_contended.
+ */
+ retry_time = cfs_time_add(obj->oo_contention_time,
+ cfs_time_seconds(osc_contention_time));
+ if (cfs_time_after(cur_time, retry_time)) {
+ osc_object_clear_contended(obj);
+ return 0;
+ }
+ return 1;
+}
+
+static const struct cl_object_operations osc_ops = {
+ .coo_page_init = osc_page_init,
+ .coo_lock_init = osc_lock_init,
+ .coo_io_init = osc_io_init,
+ .coo_attr_get = osc_attr_get,
+ .coo_attr_set = osc_attr_set,
+ .coo_glimpse = osc_object_glimpse
+};
+
+static const struct lu_object_operations osc_lu_obj_ops = {
+ .loo_object_init = osc_object_init,
+ .loo_object_delete = NULL,
+ .loo_object_release = NULL,
+ .loo_object_free = osc_object_free,
+ .loo_object_print = osc_object_print,
+ .loo_object_invariant = NULL
+};
+
+struct lu_object *osc_object_alloc(const struct lu_env *env,
+ const struct lu_object_header *unused,
+ struct lu_device *dev)
+{
+ struct osc_object *osc;
+ struct lu_object *obj;
+
+ OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, __GFP_IO);
+ if (osc != NULL) {
+ obj = osc2lu(osc);
+ lu_object_init(obj, NULL, dev);
+ osc->oo_cl.co_ops = &osc_ops;
+ obj->lo_ops = &osc_lu_obj_ops;
+ } else
+ obj = NULL;
+ return obj;
+}
+
+/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
new file mode 100644
index 0000000..d272322
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -0,0 +1,921 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * Implementation of cl_page for OSC layer.
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_OSC
+
+#include "osc_cl_internal.h"
+
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
+static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
+static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
+ struct osc_page *opg);
+
+/** \addtogroup osc
+ * @{
+ */
+
+/*
+ * Comment out osc_page_protected because it may sleep inside the
+ * the client_obd_list_lock.
+ * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
+ * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
+ * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
+ */
+#if 0
+static int osc_page_is_dlocked(const struct lu_env *env,
+ const struct osc_page *opg,
+ enum cl_lock_mode mode, int pending, int unref)
+{
+ struct cl_page *page;
+ struct osc_object *obj;
+ struct osc_thread_info *info;
+ struct ldlm_res_id *resname;
+ struct lustre_handle *lockh;
+ ldlm_policy_data_t *policy;
+ ldlm_mode_t dlmmode;
+ int flags;
+
+ might_sleep();
+
+ info = osc_env_info(env);
+ resname = &info->oti_resname;
+ policy = &info->oti_policy;
+ lockh = &info->oti_handle;
+ page = opg->ops_cl.cpl_page;
+ obj = cl2osc(opg->ops_cl.cpl_obj);
+
+ flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
+ if (pending)
+ flags |= LDLM_FL_CBPENDING;
+
+ dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
+ osc_lock_build_res(env, obj, resname);
+ osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
+ return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
+ dlmmode, &flags, NULL, lockh, unref);
+}
+
+/**
+ * Checks an invariant that a page in the cache is covered by a lock, as
+ * needed.
+ */
+static int osc_page_protected(const struct lu_env *env,
+ const struct osc_page *opg,
+ enum cl_lock_mode mode, int unref)
+{
+ struct cl_object_header *hdr;
+ struct cl_lock *scan;
+ struct cl_page *page;
+ struct cl_lock_descr *descr;
+ int result;
+
+ LINVRNT(!opg->ops_temp);
+
+ page = opg->ops_cl.cpl_page;
+ if (page->cp_owner != NULL &&
+ cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
+ /*
+ * If IO is done without locks (liblustre, or lloop), lock is
+ * not required.
+ */
+ result = 1;
+ else
+ /* otherwise check for a DLM lock */
+ result = osc_page_is_dlocked(env, opg, mode, 1, unref);
+ if (result == 0) {
+ /* maybe this page is a part of a lockless io? */
+ hdr = cl_object_header(opg->ops_cl.cpl_obj);
+ descr = &osc_env_info(env)->oti_descr;
+ descr->cld_mode = mode;
+ descr->cld_start = page->cp_index;
+ descr->cld_end = page->cp_index;
+ spin_lock(&hdr->coh_lock_guard);
+ list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
+ /*
+ * Lock-less sub-lock has to be either in HELD state
+ * (when io is actively going on), or in CACHED state,
+ * when top-lock is being unlocked:
+ * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
+ */
+ if ((scan->cll_state == CLS_HELD ||
+ scan->cll_state == CLS_CACHED) &&
+ cl_lock_ext_match(&scan->cll_descr, descr)) {
+ struct osc_lock *olck;
+
+ olck = osc_lock_at(scan);
+ result = osc_lock_is_lockless(olck);
+ break;
+ }
+ }
+ spin_unlock(&hdr->coh_lock_guard);
+ }
+ return result;
+}
+#else
+static int osc_page_protected(const struct lu_env *env,
+ const struct osc_page *opg,
+ enum cl_lock_mode mode, int unref)
+{
+ return 1;
+}
+#endif
+
+/*****************************************************************************
+ *
+ * Page operations.
+ *
+ */
+static void osc_page_fini(const struct lu_env *env,
+ struct cl_page_slice *slice)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ CDEBUG(D_TRACE, "%p\n", opg);
+ LASSERT(opg->ops_lock == NULL);
+}
+
+static void osc_page_transfer_get(struct osc_page *opg, const char *label)
+{
+ struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+
+ LASSERT(!opg->ops_transfer_pinned);
+ cl_page_get(page);
+ lu_ref_add_atomic(&page->cp_reference, label, page);
+ opg->ops_transfer_pinned = 1;
+}
+
+static void osc_page_transfer_put(const struct lu_env *env,
+ struct osc_page *opg)
+{
+ struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+
+ if (opg->ops_transfer_pinned) {
+ lu_ref_del(&page->cp_reference, "transfer", page);
+ opg->ops_transfer_pinned = 0;
+ cl_page_put(env, page);
+ }
+}
+
+/**
+ * This is called once for every page when it is submitted for a transfer
+ * either opportunistic (osc_page_cache_add()), or immediate
+ * (osc_page_submit()).
+ */
+static void osc_page_transfer_add(const struct lu_env *env,
+ struct osc_page *opg, enum cl_req_type crt)
+{
+ struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
+
+ /* ops_lru and ops_inflight share the same field, so take it from LRU
+ * first and then use it as inflight. */
+ osc_lru_del(osc_cli(obj), opg, false);
+
+ spin_lock(&obj->oo_seatbelt);
+ list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
+ opg->ops_submitter = current;
+ spin_unlock(&obj->oo_seatbelt);
+}
+
+static int osc_page_cache_add(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
+{
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_page *opg = cl2osc_page(slice);
+ int result;
+
+ LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
+
+ osc_page_transfer_get(opg, "transfer\0cache");
+ result = osc_queue_async_io(env, io, opg);
+ if (result != 0)
+ osc_page_transfer_put(env, opg);
+ else
+ osc_page_transfer_add(env, opg, CRT_WRITE);
+
+ /* for sync write, kernel will wait for this page to be flushed before
+ * osc_io_end() is called, so release it earlier.
+ * for mkwrite(), it's known there is no further pages. */
+ if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
+ if (oio->oi_active != NULL) {
+ osc_extent_release(env, oio->oi_active);
+ oio->oi_active = NULL;
+ }
+ }
+
+ return result;
+}
+
+void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+ pgoff_t start, pgoff_t end)
+{
+ memset(policy, 0, sizeof *policy);
+ policy->l_extent.start = cl_offset(obj, start);
+ policy->l_extent.end = cl_offset(obj, end + 1) - 1;
+}
+
+static int osc_page_addref_lock(const struct lu_env *env,
+ struct osc_page *opg,
+ struct cl_lock *lock)
+{
+ struct osc_lock *olock;
+ int rc;
+
+ LASSERT(opg->ops_lock == NULL);
+
+ olock = osc_lock_at(lock);
+ if (atomic_inc_return(&olock->ols_pageref) <= 0) {
+ atomic_dec(&olock->ols_pageref);
+ rc = -ENODATA;
+ } else {
+ cl_lock_get(lock);
+ opg->ops_lock = lock;
+ rc = 0;
+ }
+ return rc;
+}
+
+static void osc_page_putref_lock(const struct lu_env *env,
+ struct osc_page *opg)
+{
+ struct cl_lock *lock = opg->ops_lock;
+ struct osc_lock *olock;
+
+ LASSERT(lock != NULL);
+ olock = osc_lock_at(lock);
+
+ atomic_dec(&olock->ols_pageref);
+ opg->ops_lock = NULL;
+
+ cl_lock_put(env, lock);
+}
+
+static int osc_page_is_under_lock(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ struct cl_lock *lock;
+ int result = -ENODATA;
+
+ lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
+ NULL, 1, 0);
+ if (lock != NULL) {
+ if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
+ result = -EBUSY;
+ cl_lock_put(env, lock);
+ }
+ return result;
+}
+
+static void osc_page_disown(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+
+ if (unlikely(opg->ops_lock))
+ osc_page_putref_lock(env, opg);
+}
+
+static void osc_page_completion_read(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
+
+ if (likely(opg->ops_lock))
+ osc_page_putref_lock(env, opg);
+ osc_lru_add(osc_cli(obj), opg);
+}
+
+static void osc_page_completion_write(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ int ioret)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_object *obj = cl2osc(slice->cpl_obj);
+
+ osc_lru_add(osc_cli(obj), opg);
+}
+
+static int osc_page_fail(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ /*
+ * Cached read?
+ */
+ LBUG();
+ return 0;
+}
+
+
+static const char *osc_list(struct list_head *head)
+{
+ return list_empty(head) ? "-" : "+";
+}
+
+static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
+{
+ if (opg->ops_submit_time == 0)
+ return 0;
+
+ return (cfs_time_current() - opg->ops_submit_time);
+}
+
+static int osc_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_async_page *oap = &opg->ops_oap;
+ struct osc_object *obj = cl2osc(slice->cpl_obj);
+ struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
+
+ return (*printer)(env, cookie, LUSTRE_OSC_NAME"-page@%p: "
+ "1< %#x %d %u %s %s > "
+ "2< "LPU64" %u %u %#x %#x | %p %p %p > "
+ "3< %s %p %d %lu %d > "
+ "4< %d %d %d %lu %s | %s %s %s %s > "
+ "5< %s %s %s %s | %d %s | %d %s %s>\n",
+ opg,
+ /* 1 */
+ oap->oap_magic, oap->oap_cmd,
+ oap->oap_interrupted,
+ osc_list(&oap->oap_pending_item),
+ osc_list(&oap->oap_rpc_item),
+ /* 2 */
+ oap->oap_obj_off, oap->oap_page_off, oap->oap_count,
+ oap->oap_async_flags, oap->oap_brw_flags,
+ oap->oap_request, oap->oap_cli, obj,
+ /* 3 */
+ osc_list(&opg->ops_inflight),
+ opg->ops_submitter, opg->ops_transfer_pinned,
+ osc_submit_duration(opg), opg->ops_srvlock,
+ /* 4 */
+ cli->cl_r_in_flight, cli->cl_w_in_flight,
+ cli->cl_max_rpcs_in_flight,
+ cli->cl_avail_grant,
+ osc_list(&cli->cl_cache_waiters),
+ osc_list(&cli->cl_loi_ready_list),
+ osc_list(&cli->cl_loi_hp_ready_list),
+ osc_list(&cli->cl_loi_write_list),
+ osc_list(&cli->cl_loi_read_list),
+ /* 5 */
+ osc_list(&obj->oo_ready_item),
+ osc_list(&obj->oo_hp_ready_item),
+ osc_list(&obj->oo_write_item),
+ osc_list(&obj->oo_read_item),
+ atomic_read(&obj->oo_nr_reads),
+ osc_list(&obj->oo_reading_exts),
+ atomic_read(&obj->oo_nr_writes),
+ osc_list(&obj->oo_hp_exts),
+ osc_list(&obj->oo_urgent_exts));
+}
+
+static void osc_page_delete(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
+ int rc;
+
+ LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
+
+ CDEBUG(D_TRACE, "%p\n", opg);
+ osc_page_transfer_put(env, opg);
+ rc = osc_teardown_async_page(env, obj, opg);
+ if (rc) {
+ CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
+ "Trying to teardown failed: %d\n", rc);
+ LASSERT(0);
+ }
+
+ spin_lock(&obj->oo_seatbelt);
+ if (opg->ops_submitter != NULL) {
+ LASSERT(!list_empty(&opg->ops_inflight));
+ list_del_init(&opg->ops_inflight);
+ opg->ops_submitter = NULL;
+ }
+ spin_unlock(&obj->oo_seatbelt);
+
+ osc_lru_del(osc_cli(obj), opg, true);
+}
+
+void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
+ int from, int to)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_async_page *oap = &opg->ops_oap;
+
+ LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
+
+ opg->ops_from = from;
+ opg->ops_to = to;
+ spin_lock(&oap->oap_lock);
+ oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&oap->oap_lock);
+}
+
+static int osc_page_cancel(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ int rc = 0;
+
+ LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
+
+ /* Check if the transferring against this page
+ * is completed, or not even queued. */
+ if (opg->ops_transfer_pinned)
+ /* FIXME: may not be interrupted.. */
+ rc = osc_cancel_async_page(env, opg);
+ LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
+ return rc;
+}
+
+static int osc_page_flush(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io)
+{
+ struct osc_page *opg = cl2osc_page(slice);
+ int rc = 0;
+
+ rc = osc_flush_async_page(env, io, opg);
+ return rc;
+}
+
+static const struct cl_page_operations osc_page_ops = {
+ .cpo_fini = osc_page_fini,
+ .cpo_print = osc_page_print,
+ .cpo_delete = osc_page_delete,
+ .cpo_is_under_lock = osc_page_is_under_lock,
+ .cpo_disown = osc_page_disown,
+ .io = {
+ [CRT_READ] = {
+ .cpo_cache_add = osc_page_fail,
+ .cpo_completion = osc_page_completion_read
+ },
+ [CRT_WRITE] = {
+ .cpo_cache_add = osc_page_cache_add,
+ .cpo_completion = osc_page_completion_write
+ }
+ },
+ .cpo_clip = osc_page_clip,
+ .cpo_cancel = osc_page_cancel,
+ .cpo_flush = osc_page_flush
+};
+
+int osc_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage)
+{
+ struct osc_object *osc = cl2osc(obj);
+ struct osc_page *opg = cl_object_page_slice(obj, page);
+ int result;
+
+ opg->ops_from = 0;
+ opg->ops_to = PAGE_CACHE_SIZE;
+
+ result = osc_prep_async_page(osc, opg, vmpage,
+ cl_offset(obj, page->cp_index));
+ if (result == 0) {
+ struct osc_io *oio = osc_env_io(env);
+ opg->ops_srvlock = osc_io_srvlock(oio);
+ cl_page_slice_add(page, &opg->ops_cl, obj,
+ &osc_page_ops);
+ }
+ /*
+ * Cannot assert osc_page_protected() here as read-ahead
+ * creates temporary pages outside of a lock.
+ */
+ /* ops_inflight and ops_lru are the same field, but it doesn't
+ * hurt to initialize it twice :-) */
+ INIT_LIST_HEAD(&opg->ops_inflight);
+ INIT_LIST_HEAD(&opg->ops_lru);
+
+ /* reserve an LRU space for this page */
+ if (page->cp_type == CPT_CACHEABLE && result == 0)
+ result = osc_lru_reserve(env, osc, opg);
+
+ return result;
+}
+
+/**
+ * Helper function called by osc_io_submit() for every page in an immediate
+ * transfer (i.e., transferred synchronously).
+ */
+void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
+ enum cl_req_type crt, int brw_flags)
+{
+ struct osc_async_page *oap = &opg->ops_oap;
+ struct osc_object *obj = oap->oap_obj;
+
+ LINVRNT(osc_page_protected(env, opg,
+ crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
+
+ LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
+ "magic 0x%x\n", oap, oap->oap_magic);
+ LASSERT(oap->oap_async_flags & ASYNC_READY);
+ LASSERT(oap->oap_async_flags & ASYNC_COUNT_STABLE);
+
+ oap->oap_cmd = crt == CRT_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ;
+ oap->oap_page_off = opg->ops_from;
+ oap->oap_count = opg->ops_to - opg->ops_from;
+ oap->oap_brw_flags = OBD_BRW_SYNC | brw_flags;
+
+ if (!client_is_remote(osc_export(obj)) &&
+ cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
+ oap->oap_cmd |= OBD_BRW_NOQUOTA;
+ }
+
+ opg->ops_submit_time = cfs_time_current();
+ osc_page_transfer_get(opg, "transfer\0imm");
+ osc_page_transfer_add(env, opg, crt);
+}
+
+/* --------------- LRU page management ------------------ */
+
+/* OSC is a natural place to manage LRU pages as applications are specialized
+ * to write OSC by OSC. Ideally, if one OSC is used more frequently it should
+ * occupy more LRU slots. On the other hand, we should avoid using up all LRU
+ * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
+ * for free LRU slots - this will be very bad so the algorithm requires each
+ * OSC to free slots voluntarily to maintain a reasonable number of free slots
+ * at any time.
+ */
+
+static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
+static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
+/* LRU pages are freed in batch mode. OSC should at least free this
+ * number of pages to avoid running out of LRU budget, and.. */
+static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
+/* free this number at most otherwise it will take too long time to finsih. */
+static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
+
+/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
+ * we should free slots aggressively. In this way, slots are freed in a steady
+ * step to maintain fairness among OSCs.
+ *
+ * Return how many LRU pages should be freed. */
+static int osc_cache_too_much(struct client_obd *cli)
+{
+ struct cl_client_cache *cache = cli->cl_cache;
+ int pages = atomic_read(&cli->cl_lru_in_list) >> 1;
+
+ if (atomic_read(&osc_lru_waiters) > 0 &&
+ atomic_read(cli->cl_lru_left) < lru_shrink_max)
+ /* drop lru pages aggressively */
+ return min(pages, lru_shrink_max);
+
+ /* if it's going to run out LRU slots, we should free some, but not
+ * too much to maintain faireness among OSCs. */
+ if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ unsigned long tmp;
+
+ tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
+ if (pages > tmp)
+ return min(pages, lru_shrink_max);
+
+ return pages > lru_shrink_min ? lru_shrink_min : 0;
+ }
+
+ return 0;
+}
+
+/* Return how many pages are not discarded in @pvec. */
+static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
+ struct cl_page **pvec, int max_index)
+{
+ int count;
+ int i;
+
+ for (count = 0, i = 0; i < max_index; i++) {
+ struct cl_page *page = pvec[i];
+ if (cl_page_own_try(env, io, page) == 0) {
+ /* free LRU page only if nobody is using it.
+ * This check is necessary to avoid freeing the pages
+ * having already been removed from LRU and pinned
+ * for IO. */
+ if (!cl_page_in_use(page)) {
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ ++count;
+ }
+ cl_page_disown(env, io, page);
+ }
+ cl_page_put(env, page);
+ pvec[i] = NULL;
+ }
+ return max_index - count;
+}
+
+/**
+ * Drop @target of pages from LRU at most.
+ */
+int osc_lru_shrink(struct client_obd *cli, int target)
+{
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_object *clobj = NULL;
+ struct cl_page **pvec;
+ struct osc_page *opg;
+ int maxscan = 0;
+ int count = 0;
+ int index = 0;
+ int rc = 0;
+
+ LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
+ if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+ return 0;
+
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ pvec = osc_env_info(env)->oti_pvec;
+ io = &osc_env_info(env)->oti_io;
+
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+ atomic_inc(&cli->cl_lru_shrinkers);
+ maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
+ while (!list_empty(&cli->cl_lru_list)) {
+ struct cl_page *page;
+
+ if (--maxscan < 0)
+ break;
+
+ opg = list_entry(cli->cl_lru_list.next, struct osc_page,
+ ops_lru);
+ page = cl_page_top(opg->ops_cl.cpl_page);
+ if (cl_page_in_use_noref(page)) {
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ continue;
+ }
+
+ LASSERT(page->cp_obj != NULL);
+ if (clobj != page->cp_obj) {
+ struct cl_object *tmp = page->cp_obj;
+
+ cl_object_get(tmp);
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+
+ if (clobj != NULL) {
+ count -= discard_pagevec(env, io, pvec, index);
+ index = 0;
+
+ cl_io_fini(env, io);
+ cl_object_put(env, clobj);
+ clobj = NULL;
+ }
+
+ clobj = tmp;
+ io->ci_obj = clobj;
+ io->ci_ignore_layout = 1;
+ rc = cl_io_init(env, io, CIT_MISC, clobj);
+
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+
+ if (rc != 0)
+ break;
+
+ ++maxscan;
+ continue;
+ }
+
+ /* move this page to the end of list as it will be discarded
+ * soon. The page will be finally removed from LRU list in
+ * osc_page_delete(). */
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+
+ /* it's okay to grab a refcount here w/o holding lock because
+ * it has to grab cl_lru_list_lock to delete the page. */
+ cl_page_get(page);
+ pvec[index++] = page;
+ if (++count >= target)
+ break;
+
+ if (unlikely(index == OTI_PVEC_SIZE)) {
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+ count -= discard_pagevec(env, io, pvec, index);
+ index = 0;
+
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+ }
+ }
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+
+ if (clobj != NULL) {
+ count -= discard_pagevec(env, io, pvec, index);
+
+ cl_io_fini(env, io);
+ cl_object_put(env, clobj);
+ }
+ cl_env_nested_put(&nest, env);
+
+ atomic_dec(&cli->cl_lru_shrinkers);
+ return count > 0 ? count : rc;
+}
+
+static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
+{
+ bool wakeup = false;
+
+ if (!opg->ops_in_lru)
+ return;
+
+ atomic_dec(&cli->cl_lru_busy);
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+ if (list_empty(&opg->ops_lru)) {
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ atomic_inc_return(&cli->cl_lru_in_list);
+ wakeup = atomic_read(&osc_lru_waiters) > 0;
+ }
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+
+ if (wakeup) {
+ osc_lru_shrink(cli, osc_cache_too_much(cli));
+ wake_up_all(&osc_lru_waitq);
+ }
+}
+
+/* delete page from LRUlist. The page can be deleted from LRUlist for two
+ * reasons: redirtied or deleted from page cache. */
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
+{
+ if (opg->ops_in_lru) {
+ client_obd_list_lock(&cli->cl_lru_list_lock);
+ if (!list_empty(&opg->ops_lru)) {
+ LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
+ list_del_init(&opg->ops_lru);
+ atomic_dec(&cli->cl_lru_in_list);
+ if (!del)
+ atomic_inc(&cli->cl_lru_busy);
+ } else if (del) {
+ LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
+ atomic_dec(&cli->cl_lru_busy);
+ }
+ client_obd_list_unlock(&cli->cl_lru_list_lock);
+ if (del) {
+ atomic_inc(cli->cl_lru_left);
+ /* this is a great place to release more LRU pages if
+ * this osc occupies too many LRU pages and kernel is
+ * stealing one of them.
+ * cl_lru_shrinkers is to avoid recursive call in case
+ * we're already in the context of osc_lru_shrink(). */
+ if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
+ !memory_pressure_get())
+ osc_lru_shrink(cli, osc_cache_too_much(cli));
+ wake_up(&osc_lru_waitq);
+ }
+ } else {
+ LASSERT(list_empty(&opg->ops_lru));
+ }
+}
+
+static inline int max_to_shrink(struct client_obd *cli)
+{
+ return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
+}
+
+static int osc_lru_reclaim(struct client_obd *cli)
+{
+ struct cl_client_cache *cache = cli->cl_cache;
+ int max_scans;
+ int rc;
+
+ LASSERT(cache != NULL);
+ LASSERT(!list_empty(&cache->ccc_lru));
+
+ rc = osc_lru_shrink(cli, lru_shrink_min);
+ if (rc != 0) {
+ CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
+ cli->cl_import->imp_obd->obd_name, rc, cli);
+ return rc;
+ }
+
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
+ cli->cl_import->imp_obd->obd_name, cli,
+ atomic_read(&cli->cl_lru_in_list),
+ atomic_read(&cli->cl_lru_busy));
+
+ /* Reclaim LRU slots from other client_obd as it can't free enough
+ * from its own. This should rarely happen. */
+ spin_lock(&cache->ccc_lru_lock);
+ cache->ccc_lru_shrinkers++;
+ list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+
+ max_scans = atomic_read(&cache->ccc_users);
+ while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
+ cli = list_entry(cache->ccc_lru.next, struct client_obd,
+ cl_lru_osc);
+
+ CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
+ cli->cl_import->imp_obd->obd_name, cli,
+ atomic_read(&cli->cl_lru_in_list),
+ atomic_read(&cli->cl_lru_busy));
+
+ list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
+ if (atomic_read(&cli->cl_lru_in_list) > 0) {
+ spin_unlock(&cache->ccc_lru_lock);
+
+ rc = osc_lru_shrink(cli, max_to_shrink(cli));
+ spin_lock(&cache->ccc_lru_lock);
+ if (rc != 0)
+ break;
+ }
+ }
+ spin_unlock(&cache->ccc_lru_lock);
+
+ CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
+ cli->cl_import->imp_obd->obd_name, cli, rc);
+ return rc;
+}
+
+static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
+ struct osc_page *opg)
+{
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct client_obd *cli = osc_cli(obj);
+ int rc = 0;
+
+ if (cli->cl_cache == NULL) /* shall not be in LRU */
+ return 0;
+
+ LASSERT(atomic_read(cli->cl_lru_left) >= 0);
+ while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+ int gen;
+
+ /* run out of LRU spaces, try to drop some by itself */
+ rc = osc_lru_reclaim(cli);
+ if (rc < 0)
+ break;
+ if (rc > 0)
+ continue;
+
+ cond_resched();
+
+ /* slowest case, all of caching pages are busy, notifying
+ * other OSCs that we're lack of LRU slots. */
+ atomic_inc(&osc_lru_waiters);
+
+ gen = atomic_read(&cli->cl_lru_in_list);
+ rc = l_wait_event(osc_lru_waitq,
+ atomic_read(cli->cl_lru_left) > 0 ||
+ (atomic_read(&cli->cl_lru_in_list) > 0 &&
+ gen != atomic_read(&cli->cl_lru_in_list)),
+ &lwi);
+
+ atomic_dec(&osc_lru_waiters);
+ if (rc < 0)
+ break;
+ }
+
+ if (rc >= 0) {
+ atomic_inc(&cli->cl_lru_busy);
+ opg->ops_in_lru = 1;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
new file mode 100644
index 0000000..9720c0e
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -0,0 +1,325 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ *
+ * Code originally extracted from quota directory
+ */
+
+#include <obd_ost.h>
+#include "osc_internal.h"
+
+static inline struct osc_quota_info *osc_oqi_alloc(obd_uid id)
+{
+ struct osc_quota_info *oqi;
+
+ OBD_SLAB_ALLOC_PTR(oqi, osc_quota_kmem);
+ if (oqi != NULL)
+ oqi->oqi_id = id;
+
+ return oqi;
+}
+
+int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
+{
+ int type;
+
+ for (type = 0; type < MAXQUOTAS; type++) {
+ struct osc_quota_info *oqi;
+
+ oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
+ if (oqi) {
+ obd_uid id = oqi->oqi_id;
+
+ LASSERTF(id == qid[type],
+ "The ids don't match %u != %u\n",
+ id, qid[type]);
+
+ /* the slot is busy, the user is about to run out of
+ * quota space on this OST */
+ CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
+ type == USRQUOTA ? "user" : "grout", qid[type]);
+ return NO_QUOTA;
+ }
+ }
+
+ return QUOTA_OK;
+}
+
+#define MD_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_MD_FLUSRQUOTA \
+ : OBD_MD_FLGRPQUOTA)
+#define FL_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_FL_NO_USRQUOTA \
+ : OBD_FL_NO_GRPQUOTA)
+
+int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
+ obd_flag valid, obd_flag flags)
+{
+ int type;
+ int rc = 0;
+
+ if ((valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) == 0)
+ return 0;
+
+ for (type = 0; type < MAXQUOTAS; type++) {
+ struct osc_quota_info *oqi;
+
+ if ((valid & MD_QUOTA_FLAG(type)) == 0)
+ continue;
+
+ /* lookup the ID in the per-type hash table */
+ oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
+ if ((flags & FL_QUOTA_FLAG(type)) != 0) {
+ /* This ID is getting close to its quota limit, let's
+ * switch to sync I/O */
+ if (oqi != NULL)
+ continue;
+
+ oqi = osc_oqi_alloc(qid[type]);
+ if (oqi == NULL) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ rc = cfs_hash_add_unique(cli->cl_quota_hash[type],
+ &qid[type], &oqi->oqi_hash);
+ /* race with others? */
+ if (rc == -EALREADY) {
+ rc = 0;
+ OBD_SLAB_FREE_PTR(oqi, osc_quota_kmem);
+ }
+
+ CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n",
+ cli->cl_import->imp_obd->obd_name,
+ type == USRQUOTA ? "user" : "group",
+ qid[type], rc);
+ } else {
+ /* This ID is now off the hook, let's remove it from
+ * the hash table */
+ if (oqi == NULL)
+ continue;
+
+ oqi = cfs_hash_del_key(cli->cl_quota_hash[type],
+ &qid[type]);
+ if (oqi)
+ OBD_SLAB_FREE_PTR(oqi, osc_quota_kmem);
+
+ CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n",
+ cli->cl_import->imp_obd->obd_name,
+ type == USRQUOTA ? "user" : "group",
+ qid[type], oqi);
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Hash operations for uid/gid <-> osc_quota_info
+ */
+static unsigned
+oqi_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+ return cfs_hash_u32_hash(*((__u32*)key), mask);
+}
+
+static int
+oqi_keycmp(const void *key, struct hlist_node *hnode)
+{
+ struct osc_quota_info *oqi;
+ obd_uid uid;
+
+ LASSERT(key != NULL);
+ uid = *((obd_uid*)key);
+ oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
+
+ return uid == oqi->oqi_id;
+}
+
+static void *
+oqi_key(struct hlist_node *hnode)
+{
+ struct osc_quota_info *oqi;
+ oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
+ return &oqi->oqi_id;
+}
+
+static void *
+oqi_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct osc_quota_info, oqi_hash);
+}
+
+static void
+oqi_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+}
+
+static void
+oqi_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+}
+
+static void
+oqi_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct osc_quota_info *oqi;
+
+ oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
+
+ OBD_SLAB_FREE_PTR(oqi, osc_quota_kmem);
+}
+
+#define HASH_QUOTA_BKT_BITS 5
+#define HASH_QUOTA_CUR_BITS 5
+#define HASH_QUOTA_MAX_BITS 15
+
+static cfs_hash_ops_t quota_hash_ops = {
+ .hs_hash = oqi_hashfn,
+ .hs_keycmp = oqi_keycmp,
+ .hs_key = oqi_key,
+ .hs_object = oqi_object,
+ .hs_get = oqi_get,
+ .hs_put_locked = oqi_put_locked,
+ .hs_exit = oqi_exit,
+};
+
+int osc_quota_setup(struct obd_device *obd)
+{
+ struct client_obd *cli = &obd->u.cli;
+ int i, type;
+
+ for (type = 0; type < MAXQUOTAS; type++) {
+ cli->cl_quota_hash[type] = cfs_hash_create("QUOTA_HASH",
+ HASH_QUOTA_CUR_BITS,
+ HASH_QUOTA_MAX_BITS,
+ HASH_QUOTA_BKT_BITS,
+ 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &quota_hash_ops,
+ CFS_HASH_DEFAULT);
+ if (cli->cl_quota_hash[type] == NULL)
+ break;
+ }
+
+ if (type == MAXQUOTAS)
+ return 0;
+
+ for (i = 0; i < type; i++)
+ cfs_hash_putref(cli->cl_quota_hash[i]);
+
+ return -ENOMEM;
+}
+
+int osc_quota_cleanup(struct obd_device *obd)
+{
+ struct client_obd *cli = &obd->u.cli;
+ int type;
+
+ for (type = 0; type < MAXQUOTAS; type++)
+ cfs_hash_putref(cli->cl_quota_hash[type]);
+
+ return 0;
+}
+
+int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct ptlrpc_request *req;
+ struct obd_quotactl *oqc;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_OST_QUOTACTL, LUSTRE_OST_VERSION,
+ OST_QUOTACTL);
+ if (req == NULL)
+ return -ENOMEM;
+
+ oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ *oqc = *oqctl;
+
+ ptlrpc_request_set_replen(req);
+ ptlrpc_at_set_req_timeout(req);
+ req->rq_no_resend = 1;
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
+
+ if (req->rq_repmsg &&
+ (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
+ *oqctl = *oqc;
+ } else if (!rc) {
+ CERROR ("Can't unpack obd_quotactl\n");
+ rc = -EPROTO;
+ }
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
+
+int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
+ struct obd_quotactl *oqctl)
+{
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ struct ptlrpc_request *req;
+ struct obd_quotactl *body;
+ int rc;
+
+ req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
+ &RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
+ OST_QUOTACHECK);
+ if (req == NULL)
+ return -ENOMEM;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
+ *body = *oqctl;
+
+ ptlrpc_request_set_replen(req);
+
+ /* the next poll will find -ENODATA, that means quotacheck is
+ * going on */
+ cli->cl_qchk_stat = -ENODATA;
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ cli->cl_qchk_stat = rc;
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk)
+{
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ int rc;
+
+ qchk->obd_uuid = cli->cl_target_uuid;
+ memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME));
+
+ rc = cli->cl_qchk_stat;
+ /* the client is not the previous one */
+ if (rc == CL_NOT_QUOTACHECKED)
+ rc = -EINTR;
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
new file mode 100644
index 0000000..ee6707a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -0,0 +1,3662 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_OSC
+
+#include <linux/libcfs/libcfs.h>
+
+
+#include <lustre_dlm.h>
+#include <lustre_net.h>
+#include <lustre/lustre_user.h>
+#include <obd_cksum.h>
+#include <obd_ost.h>
+#include <obd_lov.h>
+
+#ifdef __CYGWIN__
+# include <ctype.h>
+#endif
+
+#include <lustre_ha.h>
+#include <lprocfs_status.h>
+#include <lustre_log.h>
+#include <lustre_debug.h>
+#include <lustre_param.h>
+#include <lustre_fid.h>
+#include "osc_internal.h"
+#include "osc_cl_internal.h"
+
+static void osc_release_ppga(struct brw_page **ppga, obd_count count);
+static int brw_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data, int rc);
+int osc_cleanup(struct obd_device *obd);
+
+/* Pack OSC object metadata for disk storage (LE byte order). */
+static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
+ struct lov_stripe_md *lsm)
+{
+ int lmm_size;
+
+ lmm_size = sizeof(**lmmp);
+ if (lmmp == NULL)
+ return lmm_size;
+
+ if (*lmmp != NULL && lsm == NULL) {
+ OBD_FREE(*lmmp, lmm_size);
+ *lmmp = NULL;
+ return 0;
+ } else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) {
+ return -EBADF;
+ }
+
+ if (*lmmp == NULL) {
+ OBD_ALLOC(*lmmp, lmm_size);
+ if (*lmmp == NULL)
+ return -ENOMEM;
+ }
+
+ if (lsm)
+ ostid_cpu_to_le(&lsm->lsm_oi, &(*lmmp)->lmm_oi);
+
+ return lmm_size;
+}
+
+/* Unpack OSC object metadata from disk storage (LE byte order). */
+static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
+ struct lov_mds_md *lmm, int lmm_bytes)
+{
+ int lsm_size;
+ struct obd_import *imp = class_exp2cliimp(exp);
+
+ if (lmm != NULL) {
+ if (lmm_bytes < sizeof(*lmm)) {
+ CERROR("%s: lov_mds_md too small: %d, need %d\n",
+ exp->exp_obd->obd_name, lmm_bytes,
+ (int)sizeof(*lmm));
+ return -EINVAL;
+ }
+ /* XXX LOV_MAGIC etc check? */
+
+ if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
+ CERROR("%s: zero lmm_object_id: rc = %d\n",
+ exp->exp_obd->obd_name, -EINVAL);
+ return -EINVAL;
+ }
+ }
+
+ lsm_size = lov_stripe_md_size(1);
+ if (lsmp == NULL)
+ return lsm_size;
+
+ if (*lsmp != NULL && lmm == NULL) {
+ OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
+ OBD_FREE(*lsmp, lsm_size);
+ *lsmp = NULL;
+ return 0;
+ }
+
+ if (*lsmp == NULL) {
+ OBD_ALLOC(*lsmp, lsm_size);
+ if (unlikely(*lsmp == NULL))
+ return -ENOMEM;
+ OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
+ if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) {
+ OBD_FREE(*lsmp, lsm_size);
+ return -ENOMEM;
+ }
+ loi_init((*lsmp)->lsm_oinfo[0]);
+ } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
+ return -EBADF;
+ }
+
+ if (lmm != NULL)
+ /* XXX zero *lsmp? */
+ ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
+
+ if (imp != NULL &&
+ (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
+ (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
+ else
+ (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+
+ return lsm_size;
+}
+
+static inline void osc_pack_capa(struct ptlrpc_request *req,
+ struct ost_body *body, void *capa)
+{
+ struct obd_capa *oc = (struct obd_capa *)capa;
+ struct lustre_capa *c;
+
+ if (!capa)
+ return;
+
+ c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
+ LASSERT(c);
+ capa_cpy(c, oc);
+ body->oa.o_valid |= OBD_MD_FLOSSCAPA;
+ DEBUG_CAPA(D_SEC, c, "pack");
+}
+
+static inline void osc_pack_req_body(struct ptlrpc_request *req,
+ struct obd_info *oinfo)
+{
+ struct ost_body *body;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
+ oinfo->oi_oa);
+ osc_pack_capa(req, body, oinfo->oi_capa);
+}
+
+static inline void osc_set_capa_size(struct ptlrpc_request *req,
+ const struct req_msg_field *field,
+ struct obd_capa *oc)
+{
+ if (oc == NULL)
+ req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
+ else
+ /* it is already calculated as sizeof struct obd_capa */
+ ;
+}
+
+static int osc_getattr_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ struct osc_async_args *aa, int rc)
+{
+ struct ost_body *body;
+
+ if (rc != 0)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body) {
+ CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
+ aa->aa_oi->oi_oa, &body->oa);
+
+ /* This should really be sent by the OST */
+ aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
+ aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
+ } else {
+ CDEBUG(D_INFO, "can't unpack ost_body\n");
+ rc = -EPROTO;
+ aa->aa_oi->oi_oa->o_valid = 0;
+ }
+out:
+ rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
+ return rc;
+}
+
+static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
+ struct ptlrpc_request_set *set)
+{
+ struct ptlrpc_request *req;
+ struct osc_async_args *aa;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
+ if (req == NULL)
+ return -ENOMEM;
+
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ osc_pack_req_body(req, oinfo);
+
+ ptlrpc_request_set_replen(req);
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
+
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->aa_oi = oinfo;
+
+ ptlrpc_set_add_req(set, req);
+ return 0;
+}
+
+static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo)
+{
+ struct ptlrpc_request *req;
+ struct ost_body *body;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
+ if (req == NULL)
+ return -ENOMEM;
+
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ osc_pack_req_body(req, oinfo);
+
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
+ &body->oa);
+
+ oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
+ oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
+
+ out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti)
+{
+ struct ptlrpc_request *req;
+ struct ost_body *body;
+ int rc;
+
+ LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
+ if (req == NULL)
+ return -ENOMEM;
+
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ osc_pack_req_body(req, oinfo);
+
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
+ &body->oa);
+
+out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int osc_setattr_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ struct osc_setattr_args *sa, int rc)
+{
+ struct ost_body *body;
+
+ if (rc != 0)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
+ &body->oa);
+out:
+ rc = sa->sa_upcall(sa->sa_cookie, rc);
+ return rc;
+}
+
+int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset)
+{
+ struct ptlrpc_request *req;
+ struct osc_setattr_args *sa;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
+ if (req == NULL)
+ return -ENOMEM;
+
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
+ oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
+
+ osc_pack_req_body(req, oinfo);
+
+ ptlrpc_request_set_replen(req);
+
+ /* do mds to ost setattr asynchronously */
+ if (!rqset) {
+ /* Do not wait for response. */
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ } else {
+ req->rq_interpret_reply =
+ (ptlrpc_interpterer_t)osc_setattr_interpret;
+
+ CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
+ sa = ptlrpc_req_async_args(req);
+ sa->sa_oa = oinfo->oi_oa;
+ sa->sa_upcall = upcall;
+ sa->sa_cookie = cookie;
+
+ if (rqset == PTLRPCD_SET)
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ else
+ ptlrpc_set_add_req(rqset, req);
+ }
+
+ return 0;
+}
+
+static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
+ struct obd_trans_info *oti,
+ struct ptlrpc_request_set *rqset)
+{
+ return osc_setattr_async_base(exp, oinfo, oti,
+ oinfo->oi_cb_up, oinfo, rqset);
+}
+
+int osc_real_create(struct obd_export *exp, struct obdo *oa,
+ struct lov_stripe_md **ea, struct obd_trans_info *oti)
+{
+ struct ptlrpc_request *req;
+ struct ost_body *body;
+ struct lov_stripe_md *lsm;
+ int rc;
+
+ LASSERT(oa);
+ LASSERT(ea);
+
+ lsm = *ea;
+ if (!lsm) {
+ rc = obd_alloc_memmd(exp, &lsm);
+ if (rc < 0)
+ return rc;
+ }
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
+ if (rc) {
+ ptlrpc_request_free(req);
+ GOTO(out, rc);
+ }
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
+
+ ptlrpc_request_set_replen(req);
+
+ if ((oa->o_valid & OBD_MD_FLFLAGS) &&
+ oa->o_flags == OBD_FL_DELORPHAN) {
+ DEBUG_REQ(D_HA, req,
+ "delorphan from OST integration");
+ /* Don't resend the delorphan req */
+ req->rq_no_resend = req->rq_no_delay = 1;
+ }
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out_req, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL)
+ GOTO(out_req, rc = -EPROTO);
+
+ CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
+
+ oa->o_blksize = cli_brw_size(exp->exp_obd);
+ oa->o_valid |= OBD_MD_FLBLKSZ;
+
+ /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
+ * have valid lsm_oinfo data structs, so don't go touching that.
+ * This needs to be fixed in a big way.
+ */
+ lsm->lsm_oi = oa->o_oi;
+ *ea = lsm;
+
+ if (oti != NULL) {
+ oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
+
+ if (oa->o_valid & OBD_MD_FLCOOKIE) {
+ if (!oti->oti_logcookies)
+ oti_alloc_cookies(oti, 1);
+ *oti->oti_logcookies = oa->o_lcookie;
+ }
+ }
+
+ CDEBUG(D_HA, "transno: "LPD64"\n",
+ lustre_msg_get_transno(req->rq_repmsg));
+out_req:
+ ptlrpc_req_finished(req);
+out:
+ if (rc && !*ea)
+ obd_free_memmd(exp, &lsm);
+ return rc;
+}
+
+int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset)
+{
+ struct ptlrpc_request *req;
+ struct osc_setattr_args *sa;
+ struct ost_body *body;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
+ if (req == NULL)
+ return -ENOMEM;
+
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+ req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
+ ptlrpc_at_set_req_timeout(req);
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
+ oinfo->oi_oa);
+ osc_pack_capa(req, body, oinfo->oi_capa);
+
+ ptlrpc_request_set_replen(req);
+
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
+ CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
+ sa = ptlrpc_req_async_args(req);
+ sa->sa_oa = oinfo->oi_oa;
+ sa->sa_upcall = upcall;
+ sa->sa_cookie = cookie;
+ if (rqset == PTLRPCD_SET)
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ else
+ ptlrpc_set_add_req(rqset, req);
+
+ return 0;
+}
+
+static int osc_punch(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti,
+ struct ptlrpc_request_set *rqset)
+{
+ oinfo->oi_oa->o_size = oinfo->oi_policy.l_extent.start;
+ oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end;
+ oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+ return osc_punch_base(exp, oinfo,
+ oinfo->oi_cb_up, oinfo, rqset);
+}
+
+static int osc_sync_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *arg, int rc)
+{
+ struct osc_fsync_args *fa = arg;
+ struct ost_body *body;
+
+ if (rc)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL) {
+ CERROR ("can't unpack ost_body\n");
+ GOTO(out, rc = -EPROTO);
+ }
+
+ *fa->fa_oi->oi_oa = body->oa;
+out:
+ rc = fa->fa_upcall(fa->fa_cookie, rc);
+ return rc;
+}
+
+int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ptlrpc_request_set *rqset)
+{
+ struct ptlrpc_request *req;
+ struct ost_body *body;
+ struct osc_fsync_args *fa;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
+ if (req == NULL)
+ return -ENOMEM;
+
+ osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ /* overload the size and blocks fields in the oa with start/end */
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
+ oinfo->oi_oa);
+ osc_pack_capa(req, body, oinfo->oi_capa);
+
+ ptlrpc_request_set_replen(req);
+ req->rq_interpret_reply = osc_sync_interpret;
+
+ CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
+ fa = ptlrpc_req_async_args(req);
+ fa->fa_oi = oinfo;
+ fa->fa_upcall = upcall;
+ fa->fa_cookie = cookie;
+
+ if (rqset == PTLRPCD_SET)
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ else
+ ptlrpc_set_add_req(rqset, req);
+
+ return 0;
+}
+
+static int osc_sync(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, obd_size start, obd_size end,
+ struct ptlrpc_request_set *set)
+{
+ if (!oinfo->oi_oa) {
+ CDEBUG(D_INFO, "oa NULL\n");
+ return -EINVAL;
+ }
+
+ oinfo->oi_oa->o_size = start;
+ oinfo->oi_oa->o_blocks = end;
+ oinfo->oi_oa->o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
+
+ return osc_sync_base(exp, oinfo, oinfo->oi_cb_up, oinfo, set);
+}
+
+/* Find and cancel locally locks matched by @mode in the resource found by
+ * @objid. Found locks are added into @cancel list. Returns the amount of
+ * locks added to @cancels list. */
+static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
+ struct list_head *cancels,
+ ldlm_mode_t mode, int lock_flags)
+{
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ struct ldlm_res_id res_id;
+ struct ldlm_resource *res;
+ int count;
+
+ /* Return, i.e. cancel nothing, only if ELC is supported (flag in
+ * export) but disabled through procfs (flag in NS).
+ *
+ * This distinguishes from a case when ELC is not supported originally,
+ * when we still want to cancel locks in advance and just cancel them
+ * locally, without sending any RPC. */
+ if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
+ return 0;
+
+ ostid_build_res_name(&oa->o_oi, &res_id);
+ res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
+ if (res == NULL)
+ return 0;
+
+ LDLM_RESOURCE_ADDREF(res);
+ count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
+ lock_flags, 0, NULL);
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+ return count;
+}
+
+static int osc_destroy_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data,
+ int rc)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+
+ atomic_dec(&cli->cl_destroy_in_flight);
+ wake_up(&cli->cl_destroy_waitq);
+ return 0;
+}
+
+static int osc_can_send_destroy(struct client_obd *cli)
+{
+ if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
+ cli->cl_max_rpcs_in_flight) {
+ /* The destroy request can be sent */
+ return 1;
+ }
+ if (atomic_dec_return(&cli->cl_destroy_in_flight) <
+ cli->cl_max_rpcs_in_flight) {
+ /*
+ * The counter has been modified between the two atomic
+ * operations.
+ */
+ wake_up(&cli->cl_destroy_waitq);
+ }
+ return 0;
+}
+
+int osc_create(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md **ea,
+ struct obd_trans_info *oti)
+{
+ int rc = 0;
+
+ LASSERT(oa);
+ LASSERT(ea);
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
+
+ if ((oa->o_valid & OBD_MD_FLFLAGS) &&
+ oa->o_flags == OBD_FL_RECREATE_OBJS) {
+ return osc_real_create(exp, oa, ea, oti);
+ }
+
+ if (!fid_seq_is_mdt(ostid_seq(&oa->o_oi)))
+ return osc_real_create(exp, oa, ea, oti);
+
+ /* we should not get here anymore */
+ LBUG();
+
+ return rc;
+}
+
+/* Destroy requests can be async always on the client, and we don't even really
+ * care about the return code since the client cannot do anything at all about
+ * a destroy failure.
+ * When the MDS is unlinking a filename, it saves the file objects into a
+ * recovery llog, and these object records are cancelled when the OST reports
+ * they were destroyed and sync'd to disk (i.e. transaction committed).
+ * If the client dies, or the OST is down when the object should be destroyed,
+ * the records are not cancelled, and when the OST reconnects to the MDS next,
+ * it will retrieve the llog unlink logs and then sends the log cancellation
+ * cookies to the MDS after committing destroy transactions. */
+static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md *ea,
+ struct obd_trans_info *oti, struct obd_export *md_export,
+ void *capa)
+{
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ struct ptlrpc_request *req;
+ struct ost_body *body;
+ LIST_HEAD(cancels);
+ int rc, count;
+
+ if (!oa) {
+ CDEBUG(D_INFO, "oa NULL\n");
+ return -EINVAL;
+ }
+
+ count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
+ LDLM_FL_DISCARD_DATA);
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
+ if (req == NULL) {
+ ldlm_lock_list_put(&cancels, l_bl_ast, count);
+ return -ENOMEM;
+ }
+
+ osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
+ rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
+ 0, &cancels, count);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
+ ptlrpc_at_set_req_timeout(req);
+
+ if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
+ oa->o_lcookie = *oti->oti_logcookies;
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
+
+ osc_pack_capa(req, body, (struct obd_capa *)capa);
+ ptlrpc_request_set_replen(req);
+
+ /* If osc_destory is for destroying the unlink orphan,
+ * sent from MDT to OST, which should not be blocked here,
+ * because the process might be triggered by ptlrpcd, and
+ * it is not good to block ptlrpcd thread (b=16006)*/
+ if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
+ req->rq_interpret_reply = osc_destroy_interpret;
+ if (!osc_can_send_destroy(cli)) {
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
+ NULL);
+
+ /*
+ * Wait until the number of on-going destroy RPCs drops
+ * under max_rpc_in_flight
+ */
+ l_wait_event_exclusive(cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli), &lwi);
+ }
+ }
+
+ /* Do not wait for response */
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ return 0;
+}
+
+static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
+ long writing_bytes)
+{
+ obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
+
+ LASSERT(!(oa->o_valid & bits));
+
+ oa->o_valid |= bits;
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ oa->o_dirty = cli->cl_dirty;
+ if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
+ cli->cl_dirty_max)) {
+ CERROR("dirty %lu - %lu > dirty_max %lu\n",
+ cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
+ oa->o_undirty = 0;
+ } else if (unlikely(atomic_read(&obd_dirty_pages) -
+ atomic_read(&obd_dirty_transit_pages) >
+ (long)(obd_max_dirty_pages + 1))) {
+ /* The atomic_read() allowing the atomic_inc() are
+ * not covered by a lock thus they may safely race and trip
+ * this CERROR() unless we add in a small fudge factor (+1). */
+ CERROR("dirty %d - %d > system dirty_max %d\n",
+ atomic_read(&obd_dirty_pages),
+ atomic_read(&obd_dirty_transit_pages),
+ obd_max_dirty_pages);
+ oa->o_undirty = 0;
+ } else if (unlikely(cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff)) {
+ CERROR("dirty %lu - dirty_max %lu too big???\n",
+ cli->cl_dirty, cli->cl_dirty_max);
+ oa->o_undirty = 0;
+ } else {
+ long max_in_flight = (cli->cl_max_pages_per_rpc <<
+ PAGE_CACHE_SHIFT)*
+ (cli->cl_max_rpcs_in_flight + 1);
+ oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
+ }
+ oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
+ oa->o_dropped = cli->cl_lost_grant;
+ cli->cl_lost_grant = 0;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
+ oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
+
+}
+
+void osc_update_next_shrink(struct client_obd *cli)
+{
+ cli->cl_next_shrink_grant =
+ cfs_time_shift(cli->cl_grant_shrink_interval);
+ CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
+ cli->cl_next_shrink_grant);
+}
+
+static void __osc_update_grant(struct client_obd *cli, obd_size grant)
+{
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_avail_grant += grant;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+}
+
+static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
+{
+ if (body->oa.o_valid & OBD_MD_FLGRANT) {
+ CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
+ __osc_update_grant(cli, body->oa.o_grant);
+ }
+}
+
+static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
+ obd_count keylen, void *key, obd_count vallen,
+ void *val, struct ptlrpc_request_set *set);
+
+static int osc_shrink_grant_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *aa, int rc)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
+ struct ost_body *body;
+
+ if (rc != 0) {
+ __osc_update_grant(cli, oa->o_grant);
+ GOTO(out, rc);
+ }
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+ osc_update_grant(cli, body);
+out:
+ OBDO_FREE(oa);
+ return rc;
+}
+
+static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
+{
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ oa->o_grant = cli->cl_avail_grant / 4;
+ cli->cl_avail_grant -= oa->o_grant;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
+ oa->o_valid |= OBD_MD_FLFLAGS;
+ oa->o_flags = 0;
+ }
+ oa->o_flags |= OBD_FL_SHRINK_GRANT;
+ osc_update_next_shrink(cli);
+}
+
+/* Shrink the current grant, either from some large amount to enough for a
+ * full set of in-flight RPCs, or if we have already shrunk to that limit
+ * then to enough for a single RPC. This avoids keeping more grant than
+ * needed, and avoids shrinking the grant piecemeal. */
+static int osc_shrink_grant(struct client_obd *cli)
+{
+ __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
+ (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (cli->cl_avail_grant <= target_bytes)
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ return osc_shrink_grant_to_target(cli, target_bytes);
+}
+
+int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
+{
+ int rc = 0;
+ struct ost_body *body;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ /* Don't shrink if we are already above or below the desired limit
+ * We don't want to shrink below a single RPC, as that will negatively
+ * impact block allocation and long-term performance. */
+ if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+
+ if (target_bytes >= cli->cl_avail_grant) {
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ return 0;
+ }
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ OBD_ALLOC_PTR(body);
+ if (!body)
+ return -ENOMEM;
+
+ osc_announce_cached(cli, &body->oa, 0);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ body->oa.o_grant = cli->cl_avail_grant - target_bytes;
+ cli->cl_avail_grant = target_bytes;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
+ body->oa.o_valid |= OBD_MD_FLFLAGS;
+ body->oa.o_flags = 0;
+ }
+ body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
+ osc_update_next_shrink(cli);
+
+ rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
+ sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
+ sizeof(*body), body, NULL);
+ if (rc != 0)
+ __osc_update_grant(cli, body->oa.o_grant);
+ OBD_FREE_PTR(body);
+ return rc;
+}
+
+static int osc_should_shrink_grant(struct client_obd *client)
+{
+ cfs_time_t time = cfs_time_current();
+ cfs_time_t next_shrink = client->cl_next_shrink_grant;
+
+ if ((client->cl_import->imp_connect_data.ocd_connect_flags &
+ OBD_CONNECT_GRANT_SHRINK) == 0)
+ return 0;
+
+ if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
+ /* Get the current RPC size directly, instead of going via:
+ * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
+ * Keep comment here so that it can be found by searching. */
+ int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+
+ if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
+ client->cl_avail_grant > brw_size)
+ return 1;
+ else
+ osc_update_next_shrink(client);
+ }
+ return 0;
+}
+
+static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
+{
+ struct client_obd *client;
+
+ list_for_each_entry(client, &item->ti_obd_list,
+ cl_grant_shrink_list) {
+ if (osc_should_shrink_grant(client))
+ osc_shrink_grant(client);
+ }
+ return 0;
+}
+
+static int osc_add_shrink_grant(struct client_obd *client)
+{
+ int rc;
+
+ rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
+ TIMEOUT_GRANT,
+ osc_grant_shrink_grant_cb, NULL,
+ &client->cl_grant_shrink_list);
+ if (rc) {
+ CERROR("add grant client %s error %d\n",
+ client->cl_import->imp_obd->obd_name, rc);
+ return rc;
+ }
+ CDEBUG(D_CACHE, "add grant client %s \n",
+ client->cl_import->imp_obd->obd_name);
+ osc_update_next_shrink(client);
+ return 0;
+}
+
+static int osc_del_shrink_grant(struct client_obd *client)
+{
+ return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
+ TIMEOUT_GRANT);
+}
+
+static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
+{
+ /*
+ * ocd_grant is the total grant amount we're expect to hold: if we've
+ * been evicted, it's the new avail_grant amount, cl_dirty will drop
+ * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
+ *
+ * race is tolerable here: if we're evicted, but imp_state already
+ * left EVICTED state, then cl_dirty must be 0 already.
+ */
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
+ cli->cl_avail_grant = ocd->ocd_grant;
+ else
+ cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
+
+ if (cli->cl_avail_grant < 0) {
+ CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
+ cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
+ ocd->ocd_grant, cli->cl_dirty);
+ /* workaround for servers which do not have the patch from
+ * LU-2679 */
+ cli->cl_avail_grant = ocd->ocd_grant;
+ }
+
+ /* determine the appropriate chunk size used by osc_extent. */
+ cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
+ "chunk bits: %d.\n", cli->cl_import->imp_obd->obd_name,
+ cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
+
+ if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
+ list_empty(&cli->cl_grant_shrink_list))
+ osc_add_shrink_grant(cli);
+}
+
+/* We assume that the reason this OSC got a short read is because it read
+ * beyond the end of a stripe file; i.e. lustre is reading a sparse file
+ * via the LOV, and it _knows_ it's reading inside the file, it's just that
+ * this stripe never got written at or beyond this stripe offset yet. */
+static void handle_short_read(int nob_read, obd_count page_count,
+ struct brw_page **pga)
+{
+ char *ptr;
+ int i = 0;
+
+ /* skip bytes read OK */
+ while (nob_read > 0) {
+ LASSERT (page_count > 0);
+
+ if (pga[i]->count > nob_read) {
+ /* EOF inside this page */
+ ptr = kmap(pga[i]->pg) +
+ (pga[i]->off & ~CFS_PAGE_MASK);
+ memset(ptr + nob_read, 0, pga[i]->count - nob_read);
+ kunmap(pga[i]->pg);
+ page_count--;
+ i++;
+ break;
+ }
+
+ nob_read -= pga[i]->count;
+ page_count--;
+ i++;
+ }
+
+ /* zero remaining pages */
+ while (page_count-- > 0) {
+ ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
+ memset(ptr, 0, pga[i]->count);
+ kunmap(pga[i]->pg);
+ i++;
+ }
+}
+
+static int check_write_rcs(struct ptlrpc_request *req,
+ int requested_nob, int niocount,
+ obd_count page_count, struct brw_page **pga)
+{
+ int i;
+ __u32 *remote_rcs;
+
+ remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
+ sizeof(*remote_rcs) *
+ niocount);
+ if (remote_rcs == NULL) {
+ CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
+ return(-EPROTO);
+ }
+
+ /* return error if any niobuf was in error */
+ for (i = 0; i < niocount; i++) {
+ if ((int)remote_rcs[i] < 0)
+ return(remote_rcs[i]);
+
+ if (remote_rcs[i] != 0) {
+ CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
+ i, remote_rcs[i], req);
+ return(-EPROTO);
+ }
+ }
+
+ if (req->rq_bulk->bd_nob_transferred != requested_nob) {
+ CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
+ req->rq_bulk->bd_nob_transferred, requested_nob);
+ return(-EPROTO);
+ }
+
+ return (0);
+}
+
+static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
+{
+ if (p1->flag != p2->flag) {
+ unsigned mask = ~(OBD_BRW_FROM_GRANT| OBD_BRW_NOCACHE|
+ OBD_BRW_SYNC|OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
+
+ /* warn if we try to combine flags that we don't know to be
+ * safe to combine */
+ if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
+ CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
+ "report this at http://bugs.whamcloud.com/\n",
+ p1->flag, p2->flag);
+ }
+ return 0;
+ }
+
+ return (p1->off + p1->count == p2->off);
+}
+
+static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
+ struct brw_page **pga, int opc,
+ cksum_type_t cksum_type)
+{
+ __u32 cksum;
+ int i = 0;
+ struct cfs_crypto_hash_desc *hdesc;
+ unsigned int bufsize;
+ int err;
+ unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
+
+ LASSERT(pg_count > 0);
+
+ hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
+ if (IS_ERR(hdesc)) {
+ CERROR("Unable to initialize checksum hash %s\n",
+ cfs_crypto_hash_name(cfs_alg));
+ return PTR_ERR(hdesc);
+ }
+
+ while (nob > 0 && pg_count > 0) {
+ int count = pga[i]->count > nob ? nob : pga[i]->count;
+
+ /* corrupt the data before we compute the checksum, to
+ * simulate an OST->client data error */
+ if (i == 0 && opc == OST_READ &&
+ OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
+ unsigned char *ptr = kmap(pga[i]->pg);
+ int off = pga[i]->off & ~CFS_PAGE_MASK;
+ memcpy(ptr + off, "bad1", min(4, nob));
+ kunmap(pga[i]->pg);
+ }
+ cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
+ pga[i]->off & ~CFS_PAGE_MASK,
+ count);
+ LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
+ (int)(pga[i]->off & ~CFS_PAGE_MASK));
+
+ nob -= pga[i]->count;
+ pg_count--;
+ i++;
+ }
+
+ bufsize = 4;
+ err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+
+ if (err)
+ cfs_crypto_hash_final(hdesc, NULL, NULL);
+
+ /* For sending we only compute the wrong checksum instead
+ * of corrupting the data so it is still correct on a redo */
+ if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
+ cksum++;
+
+ return cksum;
+}
+
+static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
+ struct lov_stripe_md *lsm, obd_count page_count,
+ struct brw_page **pga,
+ struct ptlrpc_request **reqp,
+ struct obd_capa *ocapa, int reserve,
+ int resend)
+{
+ struct ptlrpc_request *req;
+ struct ptlrpc_bulk_desc *desc;
+ struct ost_body *body;
+ struct obd_ioobj *ioobj;
+ struct niobuf_remote *niobuf;
+ int niocount, i, requested_nob, opc, rc;
+ struct osc_brw_async_args *aa;
+ struct req_capsule *pill;
+ struct brw_page *pg_prev;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
+ return -ENOMEM; /* Recoverable */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
+ return -EINVAL; /* Fatal */
+
+ if ((cmd & OBD_BRW_WRITE) != 0) {
+ opc = OST_WRITE;
+ req = ptlrpc_request_alloc_pool(cli->cl_import,
+ cli->cl_import->imp_rq_pool,
+ &RQF_OST_BRW_WRITE);
+ } else {
+ opc = OST_READ;
+ req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
+ }
+ if (req == NULL)
+ return -ENOMEM;
+
+ for (niocount = i = 1; i < page_count; i++) {
+ if (!can_merge_pages(pga[i - 1], pga[i]))
+ niocount++;
+ }
+
+ pill = &req->rq_pill;
+ req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
+ sizeof(*ioobj));
+ req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
+ niocount * sizeof(*niobuf));
+ osc_set_capa_size(req, &RMF_CAPA1, ocapa);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+ req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
+ ptlrpc_at_set_req_timeout(req);
+ /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
+ * retry logic */
+ req->rq_no_retry_einprogress = 1;
+
+ desc = ptlrpc_prep_bulk_imp(req, page_count,
+ cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
+ opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
+ OST_BULK_PORTAL);
+
+ if (desc == NULL)
+ GOTO(out, rc = -ENOMEM);
+ /* NB request now owns desc and will free it when it gets freed */
+
+ body = req_capsule_client_get(pill, &RMF_OST_BODY);
+ ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
+ niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
+ LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
+
+ lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
+
+ obdo_to_ioobj(oa, ioobj);
+ ioobj->ioo_bufcnt = niocount;
+ /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
+ * that might be send for this request. The actual number is decided
+ * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
+ * "max - 1" for old client compatibility sending "0", and also so the
+ * the actual maximum is a power-of-two number, not one less. LU-1431 */
+ ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
+ osc_pack_capa(req, body, ocapa);
+ LASSERT(page_count > 0);
+ pg_prev = pga[0];
+ for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
+ struct brw_page *pg = pga[i];
+ int poff = pg->off & ~CFS_PAGE_MASK;
+
+ LASSERT(pg->count > 0);
+ /* make sure there is no gap in the middle of page array */
+ LASSERTF(page_count == 1 ||
+ (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
+ ergo(i > 0 && i < page_count - 1,
+ poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
+ ergo(i == page_count - 1, poff == 0)),
+ "i: %d/%d pg: %p off: "LPU64", count: %u\n",
+ i, page_count, pg, pg->off, pg->count);
+ LASSERTF(i == 0 || pg->off > pg_prev->off,
+ "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
+ " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
+ i, page_count,
+ pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
+ pg_prev->pg, page_private(pg_prev->pg),
+ pg_prev->pg->index, pg_prev->off);
+ LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
+ (pg->flag & OBD_BRW_SRVLOCK));
+
+ ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
+ requested_nob += pg->count;
+
+ if (i > 0 && can_merge_pages(pg_prev, pg)) {
+ niobuf--;
+ niobuf->len += pg->count;
+ } else {
+ niobuf->offset = pg->off;
+ niobuf->len = pg->count;
+ niobuf->flags = pg->flag;
+ }
+ pg_prev = pg;
+ }
+
+ LASSERTF((void *)(niobuf - niocount) ==
+ req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
+ "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
+ &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
+
+ osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
+ if (resend) {
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
+ body->oa.o_valid |= OBD_MD_FLFLAGS;
+ body->oa.o_flags = 0;
+ }
+ body->oa.o_flags |= OBD_FL_RECOV_RESEND;
+ }
+
+ if (osc_should_shrink_grant(cli))
+ osc_shrink_grant_local(cli, &body->oa);
+
+ /* size[REQ_REC_OFF] still sizeof (*body) */
+ if (opc == OST_WRITE) {
+ if (cli->cl_checksum &&
+ !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
+ /* store cl_cksum_type in a local variable since
+ * it can be changed via lprocfs */
+ cksum_type_t cksum_type = cli->cl_cksum_type;
+
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
+ oa->o_flags &= OBD_FL_LOCAL_MASK;
+ body->oa.o_flags = 0;
+ }
+ body->oa.o_flags |= cksum_type_pack(cksum_type);
+ body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
+ body->oa.o_cksum = osc_checksum_bulk(requested_nob,
+ page_count, pga,
+ OST_WRITE,
+ cksum_type);
+ CDEBUG(D_PAGE, "checksum at write origin: %x\n",
+ body->oa.o_cksum);
+ /* save this in 'oa', too, for later checking */
+ oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
+ oa->o_flags |= cksum_type_pack(cksum_type);
+ } else {
+ /* clear out the checksum flag, in case this is a
+ * resend but cl_checksum is no longer set. b=11238 */
+ oa->o_valid &= ~OBD_MD_FLCKSUM;
+ }
+ oa->o_cksum = body->oa.o_cksum;
+ /* 1 RC per niobuf */
+ req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
+ sizeof(__u32) * niocount);
+ } else {
+ if (cli->cl_checksum &&
+ !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
+ if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
+ body->oa.o_flags = 0;
+ body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
+ body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
+ }
+ }
+ ptlrpc_request_set_replen(req);
+
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->aa_oa = oa;
+ aa->aa_requested_nob = requested_nob;
+ aa->aa_nio_count = niocount;
+ aa->aa_page_count = page_count;
+ aa->aa_resends = 0;
+ aa->aa_ppga = pga;
+ aa->aa_cli = cli;
+ INIT_LIST_HEAD(&aa->aa_oaps);
+ if (ocapa && reserve)
+ aa->aa_ocapa = capa_get(ocapa);
+
+ *reqp = req;
+ return 0;
+
+ out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
+ __u32 client_cksum, __u32 server_cksum, int nob,
+ obd_count page_count, struct brw_page **pga,
+ cksum_type_t client_cksum_type)
+{
+ __u32 new_cksum;
+ char *msg;
+ cksum_type_t cksum_type;
+
+ if (server_cksum == client_cksum) {
+ CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
+ return 0;
+ }
+
+ cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
+ oa->o_flags : 0);
+ new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
+ cksum_type);
+
+ if (cksum_type != client_cksum_type)
+ msg = "the server did not use the checksum type specified in "
+ "the original request - likely a protocol problem";
+ else if (new_cksum == server_cksum)
+ msg = "changed on the client after we checksummed it - "
+ "likely false positive due to mmap IO (bug 11742)";
+ else if (new_cksum == client_cksum)
+ msg = "changed in transit before arrival at OST";
+ else
+ msg = "changed in transit AND doesn't match the original - "
+ "likely false positive due to mmap IO (bug 11742)";
+
+ LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
+ " object "DOSTID" extent ["LPU64"-"LPU64"]\n",
+ msg, libcfs_nid2str(peer->nid),
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
+ oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
+ POSTID(&oa->o_oi), pga[0]->off,
+ pga[page_count-1]->off + pga[page_count-1]->count - 1);
+ CERROR("original client csum %x (type %x), server csum %x (type %x), "
+ "client csum now %x\n", client_cksum, client_cksum_type,
+ server_cksum, cksum_type, new_cksum);
+ return 1;
+}
+
+/* Note rc enters this function as number of bytes transferred */
+static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
+{
+ struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
+ const lnet_process_id_t *peer =
+ &req->rq_import->imp_connection->c_peer;
+ struct client_obd *cli = aa->aa_cli;
+ struct ost_body *body;
+ __u32 client_cksum = 0;
+
+ if (rc < 0 && rc != -EDQUOT) {
+ DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
+ return rc;
+ }
+
+ LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
+ body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ if (body == NULL) {
+ DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
+ return -EPROTO;
+ }
+
+ /* set/clear over quota flag for a uid/gid */
+ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
+ body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
+ unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
+
+ CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
+ body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
+ body->oa.o_flags);
+ osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
+ }
+
+ osc_update_grant(cli, body);
+
+ if (rc < 0)
+ return rc;
+
+ if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
+ client_cksum = aa->aa_oa->o_cksum; /* save for later */
+
+ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
+ if (rc > 0) {
+ CERROR("Unexpected +ve rc %d\n", rc);
+ return -EPROTO;
+ }
+ LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
+
+ if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
+ return -EAGAIN;
+
+ if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
+ check_write_checksum(&body->oa, peer, client_cksum,
+ body->oa.o_cksum, aa->aa_requested_nob,
+ aa->aa_page_count, aa->aa_ppga,
+ cksum_type_unpack(aa->aa_oa->o_flags)))
+ return -EAGAIN;
+
+ rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
+ aa->aa_page_count, aa->aa_ppga);
+ GOTO(out, rc);
+ }
+
+ /* The rest of this function executes only for OST_READs */
+
+ /* if unwrap_bulk failed, return -EAGAIN to retry */
+ rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
+ if (rc < 0)
+ GOTO(out, rc = -EAGAIN);
+
+ if (rc > aa->aa_requested_nob) {
+ CERROR("Unexpected rc %d (%d requested)\n", rc,
+ aa->aa_requested_nob);
+ return -EPROTO;
+ }
+
+ if (rc != req->rq_bulk->bd_nob_transferred) {
+ CERROR ("Unexpected rc %d (%d transferred)\n",
+ rc, req->rq_bulk->bd_nob_transferred);
+ return (-EPROTO);
+ }
+
+ if (rc < aa->aa_requested_nob)
+ handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
+
+ if (body->oa.o_valid & OBD_MD_FLCKSUM) {
+ static int cksum_counter;
+ __u32 server_cksum = body->oa.o_cksum;
+ char *via;
+ char *router;
+ cksum_type_t cksum_type;
+
+ cksum_type = cksum_type_unpack(body->oa.o_valid &OBD_MD_FLFLAGS?
+ body->oa.o_flags : 0);
+ client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
+ aa->aa_ppga, OST_READ,
+ cksum_type);
+
+ if (peer->nid == req->rq_bulk->bd_sender) {
+ via = router = "";
+ } else {
+ via = " via ";
+ router = libcfs_nid2str(req->rq_bulk->bd_sender);
+ }
+
+ if (server_cksum == ~0 && rc > 0) {
+ CERROR("Protocol error: server %s set the 'checksum' "
+ "bit, but didn't send a checksum. Not fatal, "
+ "but please notify on http://bugs.whamcloud.com/\n",
+ libcfs_nid2str(peer->nid));
+ } else if (server_cksum != client_cksum) {
+ LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
+ "%s%s%s inode "DFID" object "DOSTID
+ " extent ["LPU64"-"LPU64"]\n",
+ req->rq_import->imp_obd->obd_name,
+ libcfs_nid2str(peer->nid),
+ via, router,
+ body->oa.o_valid & OBD_MD_FLFID ?
+ body->oa.o_parent_seq : (__u64)0,
+ body->oa.o_valid & OBD_MD_FLFID ?
+ body->oa.o_parent_oid : 0,
+ body->oa.o_valid & OBD_MD_FLFID ?
+ body->oa.o_parent_ver : 0,
+ POSTID(&body->oa.o_oi),
+ aa->aa_ppga[0]->off,
+ aa->aa_ppga[aa->aa_page_count-1]->off +
+ aa->aa_ppga[aa->aa_page_count-1]->count -
+ 1);
+ CERROR("client %x, server %x, cksum_type %x\n",
+ client_cksum, server_cksum, cksum_type);
+ cksum_counter = 0;
+ aa->aa_oa->o_cksum = client_cksum;
+ rc = -EAGAIN;
+ } else {
+ cksum_counter++;
+ CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
+ rc = 0;
+ }
+ } else if (unlikely(client_cksum)) {
+ static int cksum_missed;
+
+ cksum_missed++;
+ if ((cksum_missed & (-cksum_missed)) == cksum_missed)
+ CERROR("Checksum %u requested from %s but not sent\n",
+ cksum_missed, libcfs_nid2str(peer->nid));
+ } else {
+ rc = 0;
+ }
+out:
+ if (rc >= 0)
+ lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
+ aa->aa_oa, &body->oa);
+
+ return rc;
+}
+
+static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
+ struct lov_stripe_md *lsm,
+ obd_count page_count, struct brw_page **pga,
+ struct obd_capa *ocapa)
+{
+ struct ptlrpc_request *req;
+ int rc;
+ wait_queue_head_t waitq;
+ int generation, resends = 0;
+ struct l_wait_info lwi;
+
+ init_waitqueue_head(&waitq);
+ generation = exp->exp_obd->u.cli.cl_import->imp_generation;
+
+restart_bulk:
+ rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
+ page_count, pga, &req, ocapa, 0, resends);
+ if (rc != 0)
+ return (rc);
+
+ if (resends) {
+ req->rq_generation_set = 1;
+ req->rq_import_generation = generation;
+ req->rq_sent = cfs_time_current_sec() + resends;
+ }
+
+ rc = ptlrpc_queue_wait(req);
+
+ if (rc == -ETIMEDOUT && req->rq_resend) {
+ DEBUG_REQ(D_HA, req, "BULK TIMEOUT");
+ ptlrpc_req_finished(req);
+ goto restart_bulk;
+ }
+
+ rc = osc_brw_fini_request(req, rc);
+
+ ptlrpc_req_finished(req);
+ /* When server return -EINPROGRESS, client should always retry
+ * regardless of the number of times the bulk was resent already.*/
+ if (osc_recoverable_error(rc)) {
+ resends++;
+ if (rc != -EINPROGRESS &&
+ !client_should_resend(resends, &exp->exp_obd->u.cli)) {
+ CERROR("%s: too many resend retries for object: "
+ ""DOSTID", rc = %d.\n", exp->exp_obd->obd_name,
+ POSTID(&oa->o_oi), rc);
+ goto out;
+ }
+ if (generation !=
+ exp->exp_obd->u.cli.cl_import->imp_generation) {
+ CDEBUG(D_HA, "%s: resend cross eviction for object: "
+ ""DOSTID", rc = %d.\n", exp->exp_obd->obd_name,
+ POSTID(&oa->o_oi), rc);
+ goto out;
+ }
+
+ lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL,
+ NULL);
+ l_wait_event(waitq, 0, &lwi);
+
+ goto restart_bulk;
+ }
+out:
+ if (rc == -EAGAIN || rc == -EINPROGRESS)
+ rc = -EIO;
+ return rc;
+}
+
+static int osc_brw_redo_request(struct ptlrpc_request *request,
+ struct osc_brw_async_args *aa, int rc)
+{
+ struct ptlrpc_request *new_req;
+ struct osc_brw_async_args *new_aa;
+ struct osc_async_page *oap;
+
+ DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
+ "redo for recoverable error %d", rc);
+
+ rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
+ OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
+ aa->aa_cli, aa->aa_oa,
+ NULL /* lsm unused by osc currently */,
+ aa->aa_page_count, aa->aa_ppga,
+ &new_req, aa->aa_ocapa, 0, 1);
+ if (rc)
+ return rc;
+
+ list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
+ if (oap->oap_request != NULL) {
+ LASSERTF(request == oap->oap_request,
+ "request %p != oap_request %p\n",
+ request, oap->oap_request);
+ if (oap->oap_interrupted) {
+ ptlrpc_req_finished(new_req);
+ return -EINTR;
+ }
+ }
+ }
+ /* New request takes over pga and oaps from old request.
+ * Note that copying a list_head doesn't work, need to move it... */
+ aa->aa_resends++;
+ new_req->rq_interpret_reply = request->rq_interpret_reply;
+ new_req->rq_async_args = request->rq_async_args;
+ /* cap resend delay to the current request timeout, this is similar to
+ * what ptlrpc does (see after_reply()) */
+ if (aa->aa_resends > new_req->rq_timeout)
+ new_req->rq_sent = cfs_time_current_sec() + new_req->rq_timeout;
+ else
+ new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
+ new_req->rq_generation_set = 1;
+ new_req->rq_import_generation = request->rq_import_generation;
+
+ new_aa = ptlrpc_req_async_args(new_req);
+
+ INIT_LIST_HEAD(&new_aa->aa_oaps);
+ list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
+ INIT_LIST_HEAD(&new_aa->aa_exts);
+ list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
+ new_aa->aa_resends = aa->aa_resends;
+
+ list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
+ if (oap->oap_request) {
+ ptlrpc_req_finished(oap->oap_request);
+ oap->oap_request = ptlrpc_request_addref(new_req);
+ }
+ }
+
+ new_aa->aa_ocapa = aa->aa_ocapa;
+ aa->aa_ocapa = NULL;
+
+ /* XXX: This code will run into problem if we're going to support
+ * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
+ * and wait for all of them to be finished. We should inherit request
+ * set from old request. */
+ ptlrpcd_add_req(new_req, PDL_POLICY_SAME, -1);
+
+ DEBUG_REQ(D_INFO, new_req, "new request");
+ return 0;
+}
+
+/*
+ * ugh, we want disk allocation on the target to happen in offset order. we'll
+ * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
+ * fine for our small page arrays and doesn't require allocation. its an
+ * insertion sort that swaps elements that are strides apart, shrinking the
+ * stride down until its '1' and the array is sorted.
+ */
+static void sort_brw_pages(struct brw_page **array, int num)
+{
+ int stride, i, j;
+ struct brw_page *tmp;
+
+ if (num == 1)
+ return;
+ for (stride = 1; stride < num ; stride = (stride * 3) + 1)
+ ;
+
+ do {
+ stride /= 3;
+ for (i = stride ; i < num ; i++) {
+ tmp = array[i];
+ j = i;
+ while (j >= stride && array[j - stride]->off > tmp->off) {
+ array[j] = array[j - stride];
+ j -= stride;
+ }
+ array[j] = tmp;
+ }
+ } while (stride > 1);
+}
+
+static obd_count max_unfragmented_pages(struct brw_page **pg, obd_count pages)
+{
+ int count = 1;
+ int offset;
+ int i = 0;
+
+ LASSERT (pages > 0);
+ offset = pg[i]->off & ~CFS_PAGE_MASK;
+
+ for (;;) {
+ pages--;
+ if (pages == 0) /* that's all */
+ return count;
+
+ if (offset + pg[i]->count < PAGE_CACHE_SIZE)
+ return count; /* doesn't end on page boundary */
+
+ i++;
+ offset = pg[i]->off & ~CFS_PAGE_MASK;
+ if (offset != 0) /* doesn't start on page boundary */
+ return count;
+
+ count++;
+ }
+}
+
+static struct brw_page **osc_build_ppga(struct brw_page *pga, obd_count count)
+{
+ struct brw_page **ppga;
+ int i;
+
+ OBD_ALLOC(ppga, sizeof(*ppga) * count);
+ if (ppga == NULL)
+ return NULL;
+
+ for (i = 0; i < count; i++)
+ ppga[i] = pga + i;
+ return ppga;
+}
+
+static void osc_release_ppga(struct brw_page **ppga, obd_count count)
+{
+ LASSERT(ppga != NULL);
+ OBD_FREE(ppga, sizeof(*ppga) * count);
+}
+
+static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
+ obd_count page_count, struct brw_page *pga,
+ struct obd_trans_info *oti)
+{
+ struct obdo *saved_oa = NULL;
+ struct brw_page **ppga, **orig;
+ struct obd_import *imp = class_exp2cliimp(exp);
+ struct client_obd *cli;
+ int rc, page_count_orig;
+
+ LASSERT((imp != NULL) && (imp->imp_obd != NULL));
+ cli = &imp->imp_obd->u.cli;
+
+ if (cmd & OBD_BRW_CHECK) {
+ /* The caller just wants to know if there's a chance that this
+ * I/O can succeed */
+
+ if (imp->imp_invalid)
+ return -EIO;
+ return 0;
+ }
+
+ /* test_brw with a failed create can trip this, maybe others. */
+ LASSERT(cli->cl_max_pages_per_rpc);
+
+ rc = 0;
+
+ orig = ppga = osc_build_ppga(pga, page_count);
+ if (ppga == NULL)
+ return -ENOMEM;
+ page_count_orig = page_count;
+
+ sort_brw_pages(ppga, page_count);
+ while (page_count) {
+ obd_count pages_per_brw;
+
+ if (page_count > cli->cl_max_pages_per_rpc)
+ pages_per_brw = cli->cl_max_pages_per_rpc;
+ else
+ pages_per_brw = page_count;
+
+ pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
+
+ if (saved_oa != NULL) {
+ /* restore previously saved oa */
+ *oinfo->oi_oa = *saved_oa;
+ } else if (page_count > pages_per_brw) {
+ /* save a copy of oa (brw will clobber it) */
+ OBDO_ALLOC(saved_oa);
+ if (saved_oa == NULL)
+ GOTO(out, rc = -ENOMEM);
+ *saved_oa = *oinfo->oi_oa;
+ }
+
+ rc = osc_brw_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
+ pages_per_brw, ppga, oinfo->oi_capa);
+
+ if (rc != 0)
+ break;
+
+ page_count -= pages_per_brw;
+ ppga += pages_per_brw;
+ }
+
+out:
+ osc_release_ppga(orig, page_count_orig);
+
+ if (saved_oa != NULL)
+ OBDO_FREE(saved_oa);
+
+ return rc;
+}
+
+static int brw_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data, int rc)
+{
+ struct osc_brw_async_args *aa = data;
+ struct osc_extent *ext;
+ struct osc_extent *tmp;
+ struct cl_object *obj = NULL;
+ struct client_obd *cli = aa->aa_cli;
+
+ rc = osc_brw_fini_request(req, rc);
+ CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
+ /* When server return -EINPROGRESS, client should always retry
+ * regardless of the number of times the bulk was resent already. */
+ if (osc_recoverable_error(rc)) {
+ if (req->rq_import_generation !=
+ req->rq_import->imp_generation) {
+ CDEBUG(D_HA, "%s: resend cross eviction for object: "
+ ""DOSTID", rc = %d.\n",
+ req->rq_import->imp_obd->obd_name,
+ POSTID(&aa->aa_oa->o_oi), rc);
+ } else if (rc == -EINPROGRESS ||
+ client_should_resend(aa->aa_resends, aa->aa_cli)) {
+ rc = osc_brw_redo_request(req, aa, rc);
+ } else {
+ CERROR("%s: too many resent retries for object: "
+ ""LPU64":"LPU64", rc = %d.\n",
+ req->rq_import->imp_obd->obd_name,
+ POSTID(&aa->aa_oa->o_oi), rc);
+ }
+
+ if (rc == 0)
+ return 0;
+ else if (rc == -EAGAIN || rc == -EINPROGRESS)
+ rc = -EIO;
+ }
+
+ if (aa->aa_ocapa) {
+ capa_put(aa->aa_ocapa);
+ aa->aa_ocapa = NULL;
+ }
+
+ list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
+ if (obj == NULL && rc == 0) {
+ obj = osc2cl(ext->oe_obj);
+ cl_object_get(obj);
+ }
+
+ list_del_init(&ext->oe_link);
+ osc_extent_finish(env, ext, 1, rc);
+ }
+ LASSERT(list_empty(&aa->aa_exts));
+ LASSERT(list_empty(&aa->aa_oaps));
+
+ if (obj != NULL) {
+ struct obdo *oa = aa->aa_oa;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ unsigned long valid = 0;
+
+ LASSERT(rc == 0);
+ if (oa->o_valid & OBD_MD_FLBLOCKS) {
+ attr->cat_blocks = oa->o_blocks;
+ valid |= CAT_BLOCKS;
+ }
+ if (oa->o_valid & OBD_MD_FLMTIME) {
+ attr->cat_mtime = oa->o_mtime;
+ valid |= CAT_MTIME;
+ }
+ if (oa->o_valid & OBD_MD_FLATIME) {
+ attr->cat_atime = oa->o_atime;
+ valid |= CAT_ATIME;
+ }
+ if (oa->o_valid & OBD_MD_FLCTIME) {
+ attr->cat_ctime = oa->o_ctime;
+ valid |= CAT_CTIME;
+ }
+ if (valid != 0) {
+ cl_object_attr_lock(obj);
+ cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
+ }
+ cl_object_put(env, obj);
+ }
+ OBDO_FREE(aa->aa_oa);
+
+ cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
+ req->rq_bulk->bd_nob_transferred);
+ osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
+ ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
+ * is called so we know whether to go to sync BRWs or wait for more
+ * RPCs to complete */
+ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
+ cli->cl_w_in_flight--;
+ else
+ cli->cl_r_in_flight--;
+ osc_wake_cache_waiters(cli);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
+ return rc;
+}
+
+/**
+ * Build an RPC by the list of extent @ext_list. The caller must ensure
+ * that the total pages in this list are NOT over max pages per RPC.
+ * Extents in the list must be in OES_RPC state.
+ */
+int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
+ struct list_head *ext_list, int cmd, pdl_policy_t pol)
+{
+ struct ptlrpc_request *req = NULL;
+ struct osc_extent *ext;
+ struct brw_page **pga = NULL;
+ struct osc_brw_async_args *aa = NULL;
+ struct obdo *oa = NULL;
+ struct osc_async_page *oap;
+ struct osc_async_page *tmp;
+ struct cl_req *clerq = NULL;
+ enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE :
+ CRT_READ;
+ struct ldlm_lock *lock = NULL;
+ struct cl_req_attr *crattr = NULL;
+ obd_off starting_offset = OBD_OBJECT_EOF;
+ obd_off ending_offset = 0;
+ int mpflag = 0;
+ int mem_tight = 0;
+ int page_count = 0;
+ int i;
+ int rc;
+ LIST_HEAD(rpc_list);
+
+ LASSERT(!list_empty(ext_list));
+
+ /* add pages into rpc_list to build BRW rpc */
+ list_for_each_entry(ext, ext_list, oe_link) {
+ LASSERT(ext->oe_state == OES_RPC);
+ mem_tight |= ext->oe_memalloc;
+ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+ ++page_count;
+ list_add_tail(&oap->oap_rpc_item, &rpc_list);
+ if (starting_offset > oap->oap_obj_off)
+ starting_offset = oap->oap_obj_off;
+ else
+ LASSERT(oap->oap_page_off == 0);
+ if (ending_offset < oap->oap_obj_off + oap->oap_count)
+ ending_offset = oap->oap_obj_off +
+ oap->oap_count;
+ else
+ LASSERT(oap->oap_page_off + oap->oap_count ==
+ PAGE_CACHE_SIZE);
+ }
+ }
+
+ if (mem_tight)
+ mpflag = cfs_memory_pressure_get_and_set();
+
+ OBD_ALLOC(crattr, sizeof(*crattr));
+ if (crattr == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ OBD_ALLOC(pga, sizeof(*pga) * page_count);
+ if (pga == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ OBDO_ALLOC(oa);
+ if (oa == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ i = 0;
+ list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
+ struct cl_page *page = oap2cl_page(oap);
+ if (clerq == NULL) {
+ clerq = cl_req_alloc(env, page, crt,
+ 1 /* only 1-object rpcs for now */);
+ if (IS_ERR(clerq))
+ GOTO(out, rc = PTR_ERR(clerq));
+ lock = oap->oap_ldlm_lock;
+ }
+ if (mem_tight)
+ oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
+ pga[i] = &oap->oap_brw_page;
+ pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
+ CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
+ pga[i]->pg, page_index(oap->oap_page), oap,
+ pga[i]->flag);
+ i++;
+ cl_req_page_add(env, clerq, page);
+ }
+
+ /* always get the data for the obdo for the rpc */
+ LASSERT(clerq != NULL);
+ crattr->cra_oa = oa;
+ cl_req_attr_set(env, clerq, crattr, ~0ULL);
+ if (lock) {
+ oa->o_handle = lock->l_remote_handle;
+ oa->o_valid |= OBD_MD_FLHANDLE;
+ }
+
+ rc = cl_req_prep(env, clerq);
+ if (rc != 0) {
+ CERROR("cl_req_prep failed: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ sort_brw_pages(pga, page_count);
+ rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
+ pga, &req, crattr->cra_capa, 1, 0);
+ if (rc != 0) {
+ CERROR("prep_req failed: %d\n", rc);
+ GOTO(out, rc);
+ }
+
+ req->rq_interpret_reply = brw_interpret;
+
+ if (mem_tight != 0)
+ req->rq_memalloc = 1;
+
+ /* Need to update the timestamps after the request is built in case
+ * we race with setattr (locally or in queue at OST). If OST gets
+ * later setattr before earlier BRW (as determined by the request xid),
+ * the OST will not use BRW timestamps. Sadly, there is no obvious
+ * way to do this in a single call. bug 10150 */
+ cl_req_attr_set(env, clerq, crattr,
+ OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
+
+ lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
+
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ INIT_LIST_HEAD(&aa->aa_oaps);
+ list_splice_init(&rpc_list, &aa->aa_oaps);
+ INIT_LIST_HEAD(&aa->aa_exts);
+ list_splice_init(ext_list, &aa->aa_exts);
+ aa->aa_clerq = clerq;
+
+ /* queued sync pages can be torn down while the pages
+ * were between the pending list and the rpc */
+ tmp = NULL;
+ list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
+ /* only one oap gets a request reference */
+ if (tmp == NULL)
+ tmp = oap;
+ if (oap->oap_interrupted && !req->rq_intr) {
+ CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
+ oap, req);
+ ptlrpc_mark_interrupted(req);
+ }
+ }
+ if (tmp != NULL)
+ tmp->oap_request = ptlrpc_request_addref(req);
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ starting_offset >>= PAGE_CACHE_SHIFT;
+ if (cmd == OBD_BRW_READ) {
+ cli->cl_r_in_flight++;
+ lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
+ lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
+ lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
+ starting_offset + 1);
+ } else {
+ cli->cl_w_in_flight++;
+ lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
+ lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
+ lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
+ starting_offset + 1);
+ }
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
+ page_count, aa, cli->cl_r_in_flight,
+ cli->cl_w_in_flight);
+
+ /* XXX: Maybe the caller can check the RPC bulk descriptor to
+ * see which CPU/NUMA node the majority of pages were allocated
+ * on, and try to assign the async RPC to the CPU core
+ * (PDL_POLICY_PREFERRED) to reduce cross-CPU memory traffic.
+ *
+ * But on the other hand, we expect that multiple ptlrpcd
+ * threads and the initial write sponsor can run in parallel,
+ * especially when data checksum is enabled, which is CPU-bound
+ * operation and single ptlrpcd thread cannot process in time.
+ * So more ptlrpcd threads sharing BRW load
+ * (with PDL_POLICY_ROUND) seems better.
+ */
+ ptlrpcd_add_req(req, pol, -1);
+ rc = 0;
+
+out:
+ if (mem_tight != 0)
+ cfs_memory_pressure_restore(mpflag);
+
+ if (crattr != NULL) {
+ capa_put(crattr->cra_capa);
+ OBD_FREE(crattr, sizeof(*crattr));
+ }
+
+ if (rc != 0) {
+ LASSERT(req == NULL);
+
+ if (oa)
+ OBDO_FREE(oa);
+ if (pga)
+ OBD_FREE(pga, sizeof(*pga) * page_count);
+ /* this should happen rarely and is pretty bad, it makes the
+ * pending list not follow the dirty order */
+ while (!list_empty(ext_list)) {
+ ext = list_entry(ext_list->next, struct osc_extent,
+ oe_link);
+ list_del_init(&ext->oe_link);
+ osc_extent_finish(env, ext, 0, rc);
+ }
+ if (clerq && !IS_ERR(clerq))
+ cl_req_completion(env, clerq, rc);
+ }
+ return rc;
+}
+
+static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
+ struct ldlm_enqueue_info *einfo)
+{
+ void *data = einfo->ei_cbdata;
+ int set = 0;
+
+ LASSERT(lock != NULL);
+ LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
+ LASSERT(lock->l_resource->lr_type == einfo->ei_type);
+ LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
+ LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
+
+ lock_res_and_lock(lock);
+ spin_lock(&osc_ast_guard);
+
+ if (lock->l_ast_data == NULL)
+ lock->l_ast_data = data;
+ if (lock->l_ast_data == data)
+ set = 1;
+
+ spin_unlock(&osc_ast_guard);
+ unlock_res_and_lock(lock);
+
+ return set;
+}
+
+static int osc_set_data_with_check(struct lustre_handle *lockh,
+ struct ldlm_enqueue_info *einfo)
+{
+ struct ldlm_lock *lock = ldlm_handle2lock(lockh);
+ int set = 0;
+
+ if (lock != NULL) {
+ set = osc_set_lock_data_with_check(lock, einfo);
+ LDLM_LOCK_PUT(lock);
+ } else
+ CERROR("lockh %p, data %p - client evicted?\n",
+ lockh, einfo->ei_cbdata);
+ return set;
+}
+
+static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
+ ldlm_iterator_t replace, void *data)
+{
+ struct ldlm_res_id res_id;
+ struct obd_device *obd = class_exp2obd(exp);
+
+ ostid_build_res_name(&lsm->lsm_oi, &res_id);
+ ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
+ return 0;
+}
+
+/* find any ldlm lock of the inode in osc
+ * return 0 not find
+ * 1 find one
+ * < 0 error */
+static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
+ ldlm_iterator_t replace, void *data)
+{
+ struct ldlm_res_id res_id;
+ struct obd_device *obd = class_exp2obd(exp);
+ int rc = 0;
+
+ ostid_build_res_name(&lsm->lsm_oi, &res_id);
+ rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
+ if (rc == LDLM_ITER_STOP)
+ return(1);
+ if (rc == LDLM_ITER_CONTINUE)
+ return(0);
+ return(rc);
+}
+
+static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
+ obd_enqueue_update_f upcall, void *cookie,
+ __u64 *flags, int agl, int rc)
+{
+ int intent = *flags & LDLM_FL_HAS_INTENT;
+
+ if (intent) {
+ /* The request was created before ldlm_cli_enqueue call. */
+ if (rc == ELDLM_LOCK_ABORTED) {
+ struct ldlm_reply *rep;
+ rep = req_capsule_server_get(&req->rq_pill,
+ &RMF_DLM_REP);
+
+ LASSERT(rep != NULL);
+ rep->lock_policy_res1 =
+ ptlrpc_status_ntoh(rep->lock_policy_res1);
+ if (rep->lock_policy_res1)
+ rc = rep->lock_policy_res1;
+ }
+ }
+
+ if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
+ (rc == 0)) {
+ *flags |= LDLM_FL_LVB_READY;
+ CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
+ lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
+ }
+
+ /* Call the update callback. */
+ rc = (*upcall)(cookie, rc);
+ return rc;
+}
+
+static int osc_enqueue_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ struct osc_enqueue_args *aa, int rc)
+{
+ struct ldlm_lock *lock;
+ struct lustre_handle handle;
+ __u32 mode;
+ struct ost_lvb *lvb;
+ __u32 lvb_len;
+ __u64 *flags = aa->oa_flags;
+
+ /* Make a local copy of a lock handle and a mode, because aa->oa_*
+ * might be freed anytime after lock upcall has been called. */
+ lustre_handle_copy(&handle, aa->oa_lockh);
+ mode = aa->oa_ei->ei_mode;
+
+ /* ldlm_cli_enqueue is holding a reference on the lock, so it must
+ * be valid. */
+ lock = ldlm_handle2lock(&handle);
+
+ /* Take an additional reference so that a blocking AST that
+ * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
+ * to arrive after an upcall has been executed by
+ * osc_enqueue_fini(). */
+ ldlm_lock_addref(&handle, mode);
+
+ /* Let CP AST to grant the lock first. */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
+
+ if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
+ lvb = NULL;
+ lvb_len = 0;
+ } else {
+ lvb = aa->oa_lvb;
+ lvb_len = sizeof(*aa->oa_lvb);
+ }
+
+ /* Complete obtaining the lock procedure. */
+ rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
+ mode, flags, lvb, lvb_len, &handle, rc);
+ /* Complete osc stuff. */
+ rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
+ flags, aa->oa_agl, rc);
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
+
+ /* Release the lock for async request. */
+ if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
+ /*
+ * Releases a reference taken by ldlm_cli_enqueue(), if it is
+ * not already released by
+ * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
+ */
+ ldlm_lock_decref(&handle, mode);
+
+ LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
+ aa->oa_lockh, req, aa);
+ ldlm_lock_decref(&handle, mode);
+ LDLM_LOCK_PUT(lock);
+ return rc;
+}
+
+void osc_update_enqueue(struct lustre_handle *lov_lockhp,
+ struct lov_oinfo *loi, int flags,
+ struct ost_lvb *lvb, __u32 mode, int rc)
+{
+ struct ldlm_lock *lock = ldlm_handle2lock(lov_lockhp);
+
+ if (rc == ELDLM_OK) {
+ __u64 tmp;
+
+ LASSERT(lock != NULL);
+ loi->loi_lvb = *lvb;
+ tmp = loi->loi_lvb.lvb_size;
+ /* Extend KMS up to the end of this lock and no further
+ * A lock on [x,y] means a KMS of up to y + 1 bytes! */
+ if (tmp > lock->l_policy_data.l_extent.end)
+ tmp = lock->l_policy_data.l_extent.end + 1;
+ if (tmp >= loi->loi_kms) {
+ LDLM_DEBUG(lock, "lock acquired, setting rss="LPU64
+ ", kms="LPU64, loi->loi_lvb.lvb_size, tmp);
+ loi_kms_set(loi, tmp);
+ } else {
+ LDLM_DEBUG(lock, "lock acquired, setting rss="
+ LPU64"; leaving kms="LPU64", end="LPU64,
+ loi->loi_lvb.lvb_size, loi->loi_kms,
+ lock->l_policy_data.l_extent.end);
+ }
+ ldlm_lock_allow_match(lock);
+ } else if (rc == ELDLM_LOCK_ABORTED && (flags & LDLM_FL_HAS_INTENT)) {
+ LASSERT(lock != NULL);
+ loi->loi_lvb = *lvb;
+ ldlm_lock_allow_match(lock);
+ CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
+ " kms="LPU64"\n", loi->loi_lvb.lvb_size, loi->loi_kms);
+ rc = ELDLM_OK;
+ }
+
+ if (lock != NULL) {
+ if (rc != ELDLM_OK)
+ ldlm_lock_fail_match(lock);
+
+ LDLM_LOCK_PUT(lock);
+ }
+}
+EXPORT_SYMBOL(osc_update_enqueue);
+
+struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
+
+/* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
+ * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
+ * other synchronous requests, however keeping some locks and trying to obtain
+ * others may take a considerable amount of time in a case of ost failure; and
+ * when other sync requests do not get released lock from a client, the client
+ * is excluded from the cluster -- such scenarious make the life difficult, so
+ * release locks just after they are obtained. */
+int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
+ __u64 *flags, ldlm_policy_data_t *policy,
+ struct ost_lvb *lvb, int kms_valid,
+ obd_enqueue_update_f upcall, void *cookie,
+ struct ldlm_enqueue_info *einfo,
+ struct lustre_handle *lockh,
+ struct ptlrpc_request_set *rqset, int async, int agl)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct ptlrpc_request *req = NULL;
+ int intent = *flags & LDLM_FL_HAS_INTENT;
+ int match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
+ ldlm_mode_t mode;
+ int rc;
+
+ /* Filesystem lock extents are extended to page boundaries so that
+ * dealing with the page cache is a little smoother. */
+ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
+ policy->l_extent.end |= ~CFS_PAGE_MASK;
+
+ /*
+ * kms is not valid when either object is completely fresh (so that no
+ * locks are cached), or object was evicted. In the latter case cached
+ * lock cannot be used, because it would prime inode state with
+ * potentially stale LVB.
+ */
+ if (!kms_valid)
+ goto no_match;
+
+ /* Next, search for already existing extent locks that will cover us */
+ /* If we're trying to read, we also search for an existing PW lock. The
+ * VFS and page cache already protect us locally, so lots of readers/
+ * writers can share a single PW lock.
+ *
+ * There are problems with conversion deadlocks, so instead of
+ * converting a read lock to a write lock, we'll just enqueue a new
+ * one.
+ *
+ * At some point we should cancel the read lock instead of making them
+ * send us a blocking callback, but there are problems with canceling
+ * locks out from other users right now, too. */
+ mode = einfo->ei_mode;
+ if (einfo->ei_mode == LCK_PR)
+ mode |= LCK_PW;
+ mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
+ einfo->ei_type, policy, mode, lockh, 0);
+ if (mode) {
+ struct ldlm_lock *matched = ldlm_handle2lock(lockh);
+
+ if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
+ /* For AGL, if enqueue RPC is sent but the lock is not
+ * granted, then skip to process this strpe.
+ * Return -ECANCELED to tell the caller. */
+ ldlm_lock_decref(lockh, mode);
+ LDLM_LOCK_PUT(matched);
+ return -ECANCELED;
+ } else if (osc_set_lock_data_with_check(matched, einfo)) {
+ *flags |= LDLM_FL_LVB_READY;
+ /* addref the lock only if not async requests and PW
+ * lock is matched whereas we asked for PR. */
+ if (!rqset && einfo->ei_mode != mode)
+ ldlm_lock_addref(lockh, LCK_PR);
+ if (intent) {
+ /* I would like to be able to ASSERT here that
+ * rss <= kms, but I can't, for reasons which
+ * are explained in lov_enqueue() */
+ }
+
+ /* We already have a lock, and it's referenced.
+ *
+ * At this point, the cl_lock::cll_state is CLS_QUEUING,
+ * AGL upcall may change it to CLS_HELD directly. */
+ (*upcall)(cookie, ELDLM_OK);
+
+ if (einfo->ei_mode != mode)
+ ldlm_lock_decref(lockh, LCK_PW);
+ else if (rqset)
+ /* For async requests, decref the lock. */
+ ldlm_lock_decref(lockh, einfo->ei_mode);
+ LDLM_LOCK_PUT(matched);
+ return ELDLM_OK;
+ } else {
+ ldlm_lock_decref(lockh, mode);
+ LDLM_LOCK_PUT(matched);
+ }
+ }
+
+ no_match:
+ if (intent) {
+ LIST_HEAD(cancels);
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_LDLM_ENQUEUE_LVB);
+ if (req == NULL)
+ return -ENOMEM;
+
+ rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
+ sizeof *lvb);
+ ptlrpc_request_set_replen(req);
+ }
+
+ /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
+ *flags &= ~LDLM_FL_BLOCK_GRANTED;
+
+ rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
+ sizeof(*lvb), LVB_T_OST, lockh, async);
+ if (rqset) {
+ if (!rc) {
+ struct osc_enqueue_args *aa;
+ CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->oa_ei = einfo;
+ aa->oa_exp = exp;
+ aa->oa_flags = flags;
+ aa->oa_upcall = upcall;
+ aa->oa_cookie = cookie;
+ aa->oa_lvb = lvb;
+ aa->oa_lockh = lockh;
+ aa->oa_agl = !!agl;
+
+ req->rq_interpret_reply =
+ (ptlrpc_interpterer_t)osc_enqueue_interpret;
+ if (rqset == PTLRPCD_SET)
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ else
+ ptlrpc_set_add_req(rqset, req);
+ } else if (intent) {
+ ptlrpc_req_finished(req);
+ }
+ return rc;
+ }
+
+ rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
+ if (intent)
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
+
+static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
+ struct ldlm_enqueue_info *einfo,
+ struct ptlrpc_request_set *rqset)
+{
+ struct ldlm_res_id res_id;
+ int rc;
+
+ ostid_build_res_name(&oinfo->oi_md->lsm_oi, &res_id);
+ rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
+ &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
+ oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid,
+ oinfo->oi_cb_up, oinfo, einfo, oinfo->oi_lockh,
+ rqset, rqset != NULL, 0);
+ return rc;
+}
+
+int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
+ __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+ int *flags, void *data, struct lustre_handle *lockh,
+ int unref)
+{
+ struct obd_device *obd = exp->exp_obd;
+ int lflags = *flags;
+ ldlm_mode_t rc;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
+ return -EIO;
+
+ /* Filesystem lock extents are extended to page boundaries so that
+ * dealing with the page cache is a little smoother */
+ policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
+ policy->l_extent.end |= ~CFS_PAGE_MASK;
+
+ /* Next, search for already existing extent locks that will cover us */
+ /* If we're trying to read, we also search for an existing PW lock. The
+ * VFS and page cache already protect us locally, so lots of readers/
+ * writers can share a single PW lock. */
+ rc = mode;
+ if (mode == LCK_PR)
+ rc |= LCK_PW;
+ rc = ldlm_lock_match(obd->obd_namespace, lflags,
+ res_id, type, policy, rc, lockh, unref);
+ if (rc) {
+ if (data != NULL) {
+ if (!osc_set_data_with_check(lockh, data)) {
+ if (!(lflags & LDLM_FL_TEST_LOCK))
+ ldlm_lock_decref(lockh, rc);
+ return 0;
+ }
+ }
+ if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
+ ldlm_lock_addref(lockh, LCK_PR);
+ ldlm_lock_decref(lockh, LCK_PW);
+ }
+ return rc;
+ }
+ return rc;
+}
+
+int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
+{
+ if (unlikely(mode == LCK_GROUP))
+ ldlm_lock_decref_and_cancel(lockh, mode);
+ else
+ ldlm_lock_decref(lockh, mode);
+
+ return 0;
+}
+
+static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
+ __u32 mode, struct lustre_handle *lockh)
+{
+ return osc_cancel_base(lockh, mode);
+}
+
+static int osc_cancel_unused(struct obd_export *exp,
+ struct lov_stripe_md *lsm,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct ldlm_res_id res_id, *resp = NULL;
+
+ if (lsm != NULL) {
+ ostid_build_res_name(&lsm->lsm_oi, &res_id);
+ resp = &res_id;
+ }
+
+ return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
+}
+
+static int osc_statfs_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ struct osc_async_args *aa, int rc)
+{
+ struct obd_statfs *msfs;
+
+ if (rc == -EBADR)
+ /* The request has in fact never been sent
+ * due to issues at a higher level (LOV).
+ * Exit immediately since the caller is
+ * aware of the problem and takes care
+ * of the clean up */
+ return rc;
+
+ if ((rc == -ENOTCONN || rc == -EAGAIN) &&
+ (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
+ GOTO(out, rc = 0);
+
+ if (rc != 0)
+ GOTO(out, rc);
+
+ msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
+ if (msfs == NULL) {
+ GOTO(out, rc = -EPROTO);
+ }
+
+ *aa->aa_oi->oi_osfs = *msfs;
+out:
+ rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
+ return rc;
+}
+
+static int osc_statfs_async(struct obd_export *exp,
+ struct obd_info *oinfo, __u64 max_age,
+ struct ptlrpc_request_set *rqset)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct ptlrpc_request *req;
+ struct osc_async_args *aa;
+ int rc;
+
+ /* We could possibly pass max_age in the request (as an absolute
+ * timestamp or a "seconds.usec ago") so the target can avoid doing
+ * extra calls into the filesystem if that isn't necessary (e.g.
+ * during mount that would help a bit). Having relative timestamps
+ * is not so great if request processing is slow, while absolute
+ * timestamps are not ideal because they need time synchronization. */
+ req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
+ if (req == NULL)
+ return -ENOMEM;
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+ ptlrpc_request_set_replen(req);
+ req->rq_request_portal = OST_CREATE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
+
+ if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
+ /* procfs requests not want stat in wait for avoid deadlock */
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ }
+
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
+ CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ aa->aa_oi = oinfo;
+
+ ptlrpc_set_add_req(rqset, req);
+ return 0;
+}
+
+static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
+ struct obd_statfs *osfs, __u64 max_age, __u32 flags)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct obd_statfs *msfs;
+ struct ptlrpc_request *req;
+ struct obd_import *imp = NULL;
+ int rc;
+
+ /*Since the request might also come from lprocfs, so we need
+ *sync this with client_disconnect_export Bug15684*/
+ down_read(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import)
+ imp = class_import_get(obd->u.cli.cl_import);
+ up_read(&obd->u.cli.cl_sem);
+ if (!imp)
+ return -ENODEV;
+
+ /* We could possibly pass max_age in the request (as an absolute
+ * timestamp or a "seconds.usec ago") so the target can avoid doing
+ * extra calls into the filesystem if that isn't necessary (e.g.
+ * during mount that would help a bit). Having relative timestamps
+ * is not so great if request processing is slow, while absolute
+ * timestamps are not ideal because they need time synchronization. */
+ req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
+
+ class_import_put(imp);
+
+ if (req == NULL)
+ return -ENOMEM;
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+ ptlrpc_request_set_replen(req);
+ req->rq_request_portal = OST_CREATE_PORTAL;
+ ptlrpc_at_set_req_timeout(req);
+
+ if (flags & OBD_STATFS_NODELAY) {
+ /* procfs requests not want stat in wait for avoid deadlock */
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ }
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
+ if (msfs == NULL) {
+ GOTO(out, rc = -EPROTO);
+ }
+
+ *osfs = *msfs;
+
+ out:
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+/* Retrieve object striping information.
+ *
+ * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
+ * the maximum number of OST indices which will fit in the user buffer.
+ * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
+ */
+static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
+{
+ /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
+ struct lov_user_md_v3 lum, *lumk;
+ struct lov_user_ost_data_v1 *lmm_objects;
+ int rc = 0, lum_size;
+
+ if (!lsm)
+ return -ENODATA;
+
+ /* we only need the header part from user space to get lmm_magic and
+ * lmm_stripe_count, (the header part is common to v1 and v3) */
+ lum_size = sizeof(struct lov_user_md_v1);
+ if (copy_from_user(&lum, lump, lum_size))
+ return -EFAULT;
+
+ if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
+ (lum.lmm_magic != LOV_USER_MAGIC_V3))
+ return -EINVAL;
+
+ /* lov_user_md_vX and lov_mds_md_vX must have the same size */
+ LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
+ LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
+ LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
+
+ /* we can use lov_mds_md_size() to compute lum_size
+ * because lov_user_md_vX and lov_mds_md_vX have the same size */
+ if (lum.lmm_stripe_count > 0) {
+ lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
+ OBD_ALLOC(lumk, lum_size);
+ if (!lumk)
+ return -ENOMEM;
+
+ if (lum.lmm_magic == LOV_USER_MAGIC_V1)
+ lmm_objects =
+ &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
+ else
+ lmm_objects = &(lumk->lmm_objects[0]);
+ lmm_objects->l_ost_oi = lsm->lsm_oi;
+ } else {
+ lum_size = lov_mds_md_size(0, lum.lmm_magic);
+ lumk = &lum;
+ }
+
+ lumk->lmm_oi = lsm->lsm_oi;
+ lumk->lmm_stripe_count = 1;
+
+ if (copy_to_user(lump, lumk, lum_size))
+ rc = -EFAULT;
+
+ if (lumk != &lum)
+ OBD_FREE(lumk, lum_size);
+
+ return rc;
+}
+
+
+static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
+ void *karg, void *uarg)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct obd_ioctl_data *data = karg;
+ int err = 0;
+
+ if (!try_module_get(THIS_MODULE)) {
+ CERROR("Can't get module. Is it alive?");
+ return -EINVAL;
+ }
+ switch (cmd) {
+ case OBD_IOC_LOV_GET_CONFIG: {
+ char *buf;
+ struct lov_desc *desc;
+ struct obd_uuid uuid;
+
+ buf = NULL;
+ len = 0;
+ if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
+ GOTO(out, err = -EINVAL);
+
+ data = (struct obd_ioctl_data *)buf;
+
+ if (sizeof(*desc) > data->ioc_inllen1) {
+ obd_ioctl_freedata(buf, len);
+ GOTO(out, err = -EINVAL);
+ }
+
+ if (data->ioc_inllen2 < sizeof(uuid)) {
+ obd_ioctl_freedata(buf, len);
+ GOTO(out, err = -EINVAL);
+ }
+
+ desc = (struct lov_desc *)data->ioc_inlbuf1;
+ desc->ld_tgt_count = 1;
+ desc->ld_active_tgt_count = 1;
+ desc->ld_default_stripe_count = 1;
+ desc->ld_default_stripe_size = 0;
+ desc->ld_default_stripe_offset = 0;
+ desc->ld_pattern = 0;
+ memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
+
+ memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
+
+ err = copy_to_user((void *)uarg, buf, len);
+ if (err)
+ err = -EFAULT;
+ obd_ioctl_freedata(buf, len);
+ GOTO(out, err);
+ }
+ case LL_IOC_LOV_SETSTRIPE:
+ err = obd_alloc_memmd(exp, karg);
+ if (err > 0)
+ err = 0;
+ GOTO(out, err);
+ case LL_IOC_LOV_GETSTRIPE:
+ err = osc_getstripe(karg, uarg);
+ GOTO(out, err);
+ case OBD_IOC_CLIENT_RECOVER:
+ err = ptlrpc_recover_import(obd->u.cli.cl_import,
+ data->ioc_inlbuf1, 0);
+ if (err > 0)
+ err = 0;
+ GOTO(out, err);
+ case IOC_OSC_SET_ACTIVE:
+ err = ptlrpc_set_import_active(obd->u.cli.cl_import,
+ data->ioc_offset);
+ GOTO(out, err);
+ case OBD_IOC_POLL_QUOTACHECK:
+ err = osc_quota_poll_check(exp, (struct if_quotacheck *)karg);
+ GOTO(out, err);
+ case OBD_IOC_PING_TARGET:
+ err = ptlrpc_obd_ping(obd);
+ GOTO(out, err);
+ default:
+ CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
+ cmd, current_comm());
+ GOTO(out, err = -ENOTTY);
+ }
+out:
+ module_put(THIS_MODULE);
+ return err;
+}
+
+static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
+ obd_count keylen, void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm)
+{
+ if (!vallen || !val)
+ return -EFAULT;
+
+ if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
+ __u32 *stripe = val;
+ *vallen = sizeof(*stripe);
+ *stripe = 0;
+ return 0;
+ } else if (KEY_IS(KEY_LAST_ID)) {
+ struct ptlrpc_request *req;
+ obd_id *reply;
+ char *tmp;
+ int rc;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_OST_GET_INFO_LAST_ID);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
+ RCL_CLIENT, keylen);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
+ memcpy(tmp, key, keylen);
+
+ req->rq_no_delay = req->rq_no_resend = 1;
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
+ if (reply == NULL)
+ GOTO(out, rc = -EPROTO);
+
+ *((obd_id *)val) = *reply;
+ out:
+ ptlrpc_req_finished(req);
+ return rc;
+ } else if (KEY_IS(KEY_FIEMAP)) {
+ struct ll_fiemap_info_key *fm_key =
+ (struct ll_fiemap_info_key *)key;
+ struct ldlm_res_id res_id;
+ ldlm_policy_data_t policy;
+ struct lustre_handle lockh;
+ ldlm_mode_t mode = 0;
+ struct ptlrpc_request *req;
+ struct ll_user_fiemap *reply;
+ char *tmp;
+ int rc;
+
+ if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
+ goto skip_locking;
+
+ policy.l_extent.start = fm_key->fiemap.fm_start &
+ CFS_PAGE_MASK;
+
+ if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
+ fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
+ policy.l_extent.end = OBD_OBJECT_EOF;
+ else
+ policy.l_extent.end = (fm_key->fiemap.fm_start +
+ fm_key->fiemap.fm_length +
+ PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
+
+ ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
+ mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
+ LDLM_FL_BLOCK_GRANTED |
+ LDLM_FL_LVB_READY,
+ &res_id, LDLM_EXTENT, &policy,
+ LCK_PR | LCK_PW, &lockh, 0);
+ if (mode) { /* lock is cached on client */
+ if (mode != LCK_PR) {
+ ldlm_lock_addref(&lockh, LCK_PR);
+ ldlm_lock_decref(&lockh, LCK_PW);
+ }
+ } else { /* no cached lock, needs acquire lock on server side */
+ fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
+ fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
+ }
+
+skip_locking:
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_OST_GET_INFO_FIEMAP);
+ if (req == NULL)
+ GOTO(drop_lock, rc = -ENOMEM);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
+ RCL_CLIENT, keylen);
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
+ RCL_CLIENT, *vallen);
+ req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
+ RCL_SERVER, *vallen);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ GOTO(drop_lock, rc);
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
+ memcpy(tmp, key, keylen);
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ memcpy(tmp, val, *vallen);
+
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(fini_req, rc);
+
+ reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+ if (reply == NULL)
+ GOTO(fini_req, rc = -EPROTO);
+
+ memcpy(val, reply, *vallen);
+fini_req:
+ ptlrpc_req_finished(req);
+drop_lock:
+ if (mode)
+ ldlm_lock_decref(&lockh, LCK_PR);
+ return rc;
+ }
+
+ return -EINVAL;
+}
+
+static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
+ obd_count keylen, void *key, obd_count vallen,
+ void *val, struct ptlrpc_request_set *set)
+{
+ struct ptlrpc_request *req;
+ struct obd_device *obd = exp->exp_obd;
+ struct obd_import *imp = class_exp2cliimp(exp);
+ char *tmp;
+ int rc;
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
+
+ if (KEY_IS(KEY_CHECKSUM)) {
+ if (vallen != sizeof(int))
+ return -EINVAL;
+ exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
+ return 0;
+ }
+
+ if (KEY_IS(KEY_SPTLRPC_CONF)) {
+ sptlrpc_conf_client_adapt(obd);
+ return 0;
+ }
+
+ if (KEY_IS(KEY_FLUSH_CTX)) {
+ sptlrpc_import_flush_my_ctx(imp);
+ return 0;
+ }
+
+ if (KEY_IS(KEY_CACHE_SET)) {
+ struct client_obd *cli = &obd->u.cli;
+
+ LASSERT(cli->cl_cache == NULL); /* only once */
+ cli->cl_cache = (struct cl_client_cache *)val;
+ atomic_inc(&cli->cl_cache->ccc_users);
+ cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
+
+ /* add this osc into entity list */
+ LASSERT(list_empty(&cli->cl_lru_osc));
+ spin_lock(&cli->cl_cache->ccc_lru_lock);
+ list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
+ spin_unlock(&cli->cl_cache->ccc_lru_lock);
+
+ return 0;
+ }
+
+ if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
+ struct client_obd *cli = &obd->u.cli;
+ int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
+ int target = *(int *)val;
+
+ nr = osc_lru_shrink(cli, min(nr, target));
+ *(int *)val -= nr;
+ return 0;
+ }
+
+ if (!set && !KEY_IS(KEY_GRANT_SHRINK))
+ return -EINVAL;
+
+ /* We pass all other commands directly to OST. Since nobody calls osc
+ methods directly and everybody is supposed to go through LOV, we
+ assume lov checked invalid values for us.
+ The only recognised values so far are evict_by_nid and mds_conn.
+ Even if something bad goes through, we'd get a -EINVAL from OST
+ anyway. */
+
+ req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
+ &RQF_OST_SET_GRANT_INFO :
+ &RQF_OBD_SET_INFO);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
+ RCL_CLIENT, keylen);
+ if (!KEY_IS(KEY_GRANT_SHRINK))
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
+ RCL_CLIENT, vallen);
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
+ memcpy(tmp, key, keylen);
+ tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
+ &RMF_OST_BODY :
+ &RMF_SETINFO_VAL);
+ memcpy(tmp, val, vallen);
+
+ if (KEY_IS(KEY_GRANT_SHRINK)) {
+ struct osc_grant_args *aa;
+ struct obdo *oa;
+
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ OBDO_ALLOC(oa);
+ if (!oa) {
+ ptlrpc_req_finished(req);
+ return -ENOMEM;
+ }
+ *oa = ((struct ost_body *)val)->oa;
+ aa->aa_oa = oa;
+ req->rq_interpret_reply = osc_shrink_grant_interpret;
+ }
+
+ ptlrpc_request_set_replen(req);
+ if (!KEY_IS(KEY_GRANT_SHRINK)) {
+ LASSERT(set != NULL);
+ ptlrpc_set_add_req(set, req);
+ ptlrpc_check_set(NULL, set);
+ } else
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+
+ return 0;
+}
+
+
+static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
+ struct obd_device *disk_obd, int *index)
+{
+ /* this code is not supposed to be used with LOD/OSP
+ * to be removed soon */
+ LBUG();
+ return 0;
+}
+
+static int osc_llog_finish(struct obd_device *obd, int count)
+{
+ struct llog_ctxt *ctxt;
+
+ ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
+ if (ctxt) {
+ llog_cat_close(NULL, ctxt->loc_handle);
+ llog_cleanup(NULL, ctxt);
+ }
+
+ ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
+ if (ctxt)
+ llog_cleanup(NULL, ctxt);
+ return 0;
+}
+
+static int osc_reconnect(const struct lu_env *env,
+ struct obd_export *exp, struct obd_device *obd,
+ struct obd_uuid *cluuid,
+ struct obd_connect_data *data,
+ void *localdata)
+{
+ struct client_obd *cli = &obd->u.cli;
+
+ if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
+ long lost_grant;
+
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
+ 2 * cli_brw_size(obd);
+ lost_grant = cli->cl_lost_grant;
+ cli->cl_lost_grant = 0;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+
+ CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
+ " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
+ data->ocd_version, data->ocd_grant, lost_grant);
+ }
+
+ return 0;
+}
+
+static int osc_disconnect(struct obd_export *exp)
+{
+ struct obd_device *obd = class_exp2obd(exp);
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
+ if (ctxt) {
+ if (obd->u.cli.cl_conn_count == 1) {
+ /* Flush any remaining cancel messages out to the
+ * target */
+ llog_sync(ctxt, exp, 0);
+ }
+ llog_ctxt_put(ctxt);
+ } else {
+ CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n",
+ obd);
+ }
+
+ rc = client_disconnect_export(exp);
+ /**
+ * Initially we put del_shrink_grant before disconnect_export, but it
+ * causes the following problem if setup (connect) and cleanup
+ * (disconnect) are tangled together.
+ * connect p1 disconnect p2
+ * ptlrpc_connect_import
+ * ............... class_manual_cleanup
+ * osc_disconnect
+ * del_shrink_grant
+ * ptlrpc_connect_interrupt
+ * init_grant_shrink
+ * add this client to shrink list
+ * cleanup_osc
+ * Bang! pinger trigger the shrink.
+ * So the osc should be disconnected from the shrink list, after we
+ * are sure the import has been destroyed. BUG18662
+ */
+ if (obd->u.cli.cl_import == NULL)
+ osc_del_shrink_grant(&obd->u.cli);
+ return rc;
+}
+
+static int osc_import_event(struct obd_device *obd,
+ struct obd_import *imp,
+ enum obd_import_event event)
+{
+ struct client_obd *cli;
+ int rc = 0;
+
+ LASSERT(imp->imp_obd == obd);
+
+ switch (event) {
+ case IMP_EVENT_DISCON: {
+ cli = &obd->u.cli;
+ client_obd_list_lock(&cli->cl_loi_list_lock);
+ cli->cl_avail_grant = 0;
+ cli->cl_lost_grant = 0;
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
+ break;
+ }
+ case IMP_EVENT_INACTIVE: {
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
+ break;
+ }
+ case IMP_EVENT_INVALIDATE: {
+ struct ldlm_namespace *ns = obd->obd_namespace;
+ struct lu_env *env;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ /* Reset grants */
+ cli = &obd->u.cli;
+ /* all pages go to failing rpcs due to the invalid
+ * import */
+ osc_io_unplug(env, cli, NULL, PDL_POLICY_ROUND);
+
+ ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
+ cl_env_put(env, &refcheck);
+ } else
+ rc = PTR_ERR(env);
+ break;
+ }
+ case IMP_EVENT_ACTIVE: {
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
+ break;
+ }
+ case IMP_EVENT_OCD: {
+ struct obd_connect_data *ocd = &imp->imp_connect_data;
+
+ if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
+ osc_init_grant(&obd->u.cli, ocd);
+
+ /* See bug 7198 */
+ if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
+ imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
+
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
+ break;
+ }
+ case IMP_EVENT_DEACTIVATE: {
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
+ break;
+ }
+ case IMP_EVENT_ACTIVATE: {
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
+ break;
+ }
+ default:
+ CERROR("Unknown import event %d\n", event);
+ LBUG();
+ }
+ return rc;
+}
+
+/**
+ * Determine whether the lock can be canceled before replaying the lock
+ * during recovery, see bug16774 for detailed information.
+ *
+ * \retval zero the lock can't be canceled
+ * \retval other ok to cancel
+ */
+static int osc_cancel_for_recovery(struct ldlm_lock *lock)
+{
+ check_res_locked(lock->l_resource);
+
+ /*
+ * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
+ *
+ * XXX as a future improvement, we can also cancel unused write lock
+ * if it doesn't have dirty data and active mmaps.
+ */
+ if (lock->l_resource->lr_type == LDLM_EXTENT &&
+ (lock->l_granted_mode == LCK_PR ||
+ lock->l_granted_mode == LCK_CR) &&
+ (osc_dlm_lock_pageref(lock) == 0))
+ return 1;
+
+ return 0;
+}
+
+static int brw_queue_work(const struct lu_env *env, void *data)
+{
+ struct client_obd *cli = data;
+
+ CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
+
+ osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
+ return 0;
+}
+
+int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ struct lprocfs_static_vars lvars = { 0 };
+ struct client_obd *cli = &obd->u.cli;
+ void *handler;
+ int rc;
+
+ rc = ptlrpcd_addref();
+ if (rc)
+ return rc;
+
+ rc = client_obd_setup(obd, lcfg);
+ if (rc)
+ GOTO(out_ptlrpcd, rc);
+
+ handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
+ if (IS_ERR(handler))
+ GOTO(out_client_setup, rc = PTR_ERR(handler));
+ cli->cl_writeback_work = handler;
+
+ rc = osc_quota_setup(obd);
+ if (rc)
+ GOTO(out_ptlrpcd_work, rc);
+
+ cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
+ lprocfs_osc_init_vars(&lvars);
+ if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) {
+ lproc_osc_attach_seqstat(obd);
+ sptlrpc_lprocfs_cliobd_attach(obd);
+ ptlrpc_lprocfs_register_obd(obd);
+ }
+
+ /* We need to allocate a few requests more, because
+ * brw_interpret tries to create new requests before freeing
+ * previous ones, Ideally we want to have 2x max_rpcs_in_flight
+ * reserved, but I'm afraid that might be too much wasted RAM
+ * in fact, so 2 is just my guess and still should work. */
+ cli->cl_import->imp_rq_pool =
+ ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
+ OST_MAXREQSIZE,
+ ptlrpc_add_rqs_to_pool);
+
+ INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
+ ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
+ return rc;
+
+out_ptlrpcd_work:
+ ptlrpcd_destroy_work(handler);
+out_client_setup:
+ client_obd_cleanup(obd);
+out_ptlrpcd:
+ ptlrpcd_decref();
+ return rc;
+}
+
+static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+{
+ int rc = 0;
+
+ switch (stage) {
+ case OBD_CLEANUP_EARLY: {
+ struct obd_import *imp;
+ imp = obd->u.cli.cl_import;
+ CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
+ /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
+ ptlrpc_deactivate_import(imp);
+ spin_lock(&imp->imp_lock);
+ imp->imp_pingable = 0;
+ spin_unlock(&imp->imp_lock);
+ break;
+ }
+ case OBD_CLEANUP_EXPORTS: {
+ struct client_obd *cli = &obd->u.cli;
+ /* LU-464
+ * for echo client, export may be on zombie list, wait for
+ * zombie thread to cull it, because cli.cl_import will be
+ * cleared in client_disconnect_export():
+ * class_export_destroy() -> obd_cleanup() ->
+ * echo_device_free() -> echo_client_cleanup() ->
+ * obd_disconnect() -> osc_disconnect() ->
+ * client_disconnect_export()
+ */
+ obd_zombie_barrier();
+ if (cli->cl_writeback_work) {
+ ptlrpcd_destroy_work(cli->cl_writeback_work);
+ cli->cl_writeback_work = NULL;
+ }
+ obd_cleanup_client_import(obd);
+ ptlrpc_lprocfs_unregister_obd(obd);
+ lprocfs_obd_cleanup(obd);
+ rc = obd_llog_finish(obd, 0);
+ if (rc != 0)
+ CERROR("failed to cleanup llogging subsystems\n");
+ break;
+ }
+ }
+ return rc;
+}
+
+int osc_cleanup(struct obd_device *obd)
+{
+ struct client_obd *cli = &obd->u.cli;
+ int rc;
+
+ /* lru cleanup */
+ if (cli->cl_cache != NULL) {
+ LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
+ spin_lock(&cli->cl_cache->ccc_lru_lock);
+ list_del_init(&cli->cl_lru_osc);
+ spin_unlock(&cli->cl_cache->ccc_lru_lock);
+ cli->cl_lru_left = NULL;
+ atomic_dec(&cli->cl_cache->ccc_users);
+ cli->cl_cache = NULL;
+ }
+
+ /* free memory of osc quota cache */
+ osc_quota_cleanup(obd);
+
+ rc = client_obd_cleanup(obd);
+
+ ptlrpcd_decref();
+ return rc;
+}
+
+int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
+{
+ struct lprocfs_static_vars lvars = { 0 };
+ int rc = 0;
+
+ lprocfs_osc_init_vars(&lvars);
+
+ switch (lcfg->lcfg_command) {
+ default:
+ rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
+ lcfg, obd);
+ if (rc > 0)
+ rc = 0;
+ break;
+ }
+
+ return(rc);
+}
+
+static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
+{
+ return osc_process_config_base(obd, buf);
+}
+
+struct obd_ops osc_obd_ops = {
+ .o_owner = THIS_MODULE,
+ .o_setup = osc_setup,
+ .o_precleanup = osc_precleanup,
+ .o_cleanup = osc_cleanup,
+ .o_add_conn = client_import_add_conn,
+ .o_del_conn = client_import_del_conn,
+ .o_connect = client_connect_import,
+ .o_reconnect = osc_reconnect,
+ .o_disconnect = osc_disconnect,
+ .o_statfs = osc_statfs,
+ .o_statfs_async = osc_statfs_async,
+ .o_packmd = osc_packmd,
+ .o_unpackmd = osc_unpackmd,
+ .o_create = osc_create,
+ .o_destroy = osc_destroy,
+ .o_getattr = osc_getattr,
+ .o_getattr_async = osc_getattr_async,
+ .o_setattr = osc_setattr,
+ .o_setattr_async = osc_setattr_async,
+ .o_brw = osc_brw,
+ .o_punch = osc_punch,
+ .o_sync = osc_sync,
+ .o_enqueue = osc_enqueue,
+ .o_change_cbdata = osc_change_cbdata,
+ .o_find_cbdata = osc_find_cbdata,
+ .o_cancel = osc_cancel,
+ .o_cancel_unused = osc_cancel_unused,
+ .o_iocontrol = osc_iocontrol,
+ .o_get_info = osc_get_info,
+ .o_set_info_async = osc_set_info_async,
+ .o_import_event = osc_import_event,
+ .o_llog_init = osc_llog_init,
+ .o_llog_finish = osc_llog_finish,
+ .o_process_config = osc_process_config,
+ .o_quotactl = osc_quotactl,
+ .o_quotacheck = osc_quotacheck,
+};
+
+extern struct lu_kmem_descr osc_caches[];
+extern spinlock_t osc_ast_guard;
+extern struct lock_class_key osc_ast_guard_class;
+
+int __init osc_init(void)
+{
+ struct lprocfs_static_vars lvars = { 0 };
+ int rc;
+
+ /* print an address of _any_ initialized kernel symbol from this
+ * module, to allow debugging with gdb that doesn't support data
+ * symbols from modules.*/
+ CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
+
+ rc = lu_kmem_init(osc_caches);
+ if (rc)
+ return rc;
+
+ lprocfs_osc_init_vars(&lvars);
+
+ rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
+ LUSTRE_OSC_NAME, &osc_device_type);
+ if (rc) {
+ lu_kmem_fini(osc_caches);
+ return rc;
+ }
+
+ spin_lock_init(&osc_ast_guard);
+ lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
+
+ return rc;
+}
+
+static void /*__exit*/ osc_exit(void)
+{
+ class_unregister_type(LUSTRE_OSC_NAME);
+ lu_kmem_fini(osc_caches);
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
+
+module_init(osc_init);
+module_exit(osc_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/Makefile b/drivers/staging/lustre/lustre/ptlrpc/Makefile
new file mode 100644
index 0000000..6d78b80
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/Makefile
@@ -0,0 +1,24 @@
+obj-$(CONFIG_LUSTRE_FS) += ptlrpc.o
+LDLM := ../../lustre/ldlm/
+
+ldlm_objs := $(LDLM)l_lock.o $(LDLM)ldlm_lock.o
+ldlm_objs += $(LDLM)ldlm_resource.o $(LDLM)ldlm_lib.o
+ldlm_objs += $(LDLM)ldlm_plain.o $(LDLM)ldlm_extent.o
+ldlm_objs += $(LDLM)ldlm_request.o $(LDLM)ldlm_lockd.o
+ldlm_objs += $(LDLM)ldlm_flock.o $(LDLM)ldlm_inodebits.o
+ldlm_objs += $(LDLM)ldlm_pool.o
+ldlm_objs += $(LDLM)interval_tree.o
+ptlrpc_objs := client.o recover.o connection.o niobuf.o pack_generic.o
+ptlrpc_objs += events.o ptlrpc_module.o service.o pinger.o
+ptlrpc_objs += llog_net.o llog_client.o llog_server.o import.o ptlrpcd.o
+ptlrpc_objs += pers.o lproc_ptlrpc.o wiretest.o layout.o
+ptlrpc_objs += sec.o sec_bulk.o sec_gc.o sec_config.o sec_lproc.o
+ptlrpc_objs += sec_null.o sec_plain.o nrs.o nrs_fifo.o
+
+ptlrpc-y := $(ldlm_objs) $(ptlrpc_objs)
+ptlrpc-$(CONFIG_LUSTRE_TRANSLATE_ERRNOS) += errno.o
+
+obj-$(CONFIG_PTLRPC_GSS) += gss/
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
new file mode 100644
index 0000000..810a458
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -0,0 +1,3018 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+/** Implementation of client-side PortalRPC interfaces */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_lib.h>
+#include <lustre_ha.h>
+#include <lustre_import.h>
+#include <lustre_req_layout.h>
+
+#include "ptlrpc_internal.h"
+
+static int ptlrpc_send_new_req(struct ptlrpc_request *req);
+
+/**
+ * Initialize passed in client structure \a cl.
+ */
+void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
+ struct ptlrpc_client *cl)
+{
+ cl->cli_request_portal = req_portal;
+ cl->cli_reply_portal = rep_portal;
+ cl->cli_name = name;
+}
+EXPORT_SYMBOL(ptlrpc_init_client);
+
+/**
+ * Return PortalRPC connection for remore uud \a uuid
+ */
+struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
+{
+ struct ptlrpc_connection *c;
+ lnet_nid_t self;
+ lnet_process_id_t peer;
+ int err;
+
+ /* ptlrpc_uuid_to_peer() initializes its 2nd parameter
+ * before accessing its values. */
+ /* coverity[uninit_use_in_call] */
+ err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
+ if (err != 0) {
+ CNETERR("cannot find peer %s!\n", uuid->uuid);
+ return NULL;
+ }
+
+ c = ptlrpc_connection_get(peer, self, uuid);
+ if (c) {
+ memcpy(c->c_remote_uuid.uuid,
+ uuid->uuid, sizeof(c->c_remote_uuid.uuid));
+ }
+
+ CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
+
+ return c;
+}
+EXPORT_SYMBOL(ptlrpc_uuid_to_connection);
+
+/**
+ * Allocate and initialize new bulk descriptor on the sender.
+ * Returns pointer to the descriptor or NULL on error.
+ */
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
+ unsigned type, unsigned portal)
+{
+ struct ptlrpc_bulk_desc *desc;
+ int i;
+
+ OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]));
+ if (!desc)
+ return NULL;
+
+ spin_lock_init(&desc->bd_lock);
+ init_waitqueue_head(&desc->bd_waitq);
+ desc->bd_max_iov = npages;
+ desc->bd_iov_count = 0;
+ desc->bd_portal = portal;
+ desc->bd_type = type;
+ desc->bd_md_count = 0;
+ LASSERT(max_brw > 0);
+ desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
+ /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
+ * node. Negotiated ocd_brw_size will always be <= this number. */
+ for (i = 0; i < PTLRPC_BULK_OPS_COUNT; i++)
+ LNetInvalidateHandle(&desc->bd_mds[i]);
+
+ return desc;
+}
+
+/**
+ * Prepare bulk descriptor for specified outgoing request \a req that
+ * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * the bulk to be sent. Used on client-side.
+ * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
+ * error.
+ */
+struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
+ unsigned npages, unsigned max_brw,
+ unsigned type, unsigned portal)
+{
+ struct obd_import *imp = req->rq_import;
+ struct ptlrpc_bulk_desc *desc;
+
+ LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
+ desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+ if (desc == NULL)
+ return NULL;
+
+ desc->bd_import_generation = req->rq_import_generation;
+ desc->bd_import = class_import_get(imp);
+ desc->bd_req = req;
+
+ desc->bd_cbid.cbid_fn = client_bulk_callback;
+ desc->bd_cbid.cbid_arg = desc;
+
+ /* This makes req own desc, and free it when she frees herself */
+ req->rq_bulk = desc;
+
+ return desc;
+}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
+
+/**
+ * Add a page \a page to the bulk descriptor \a desc.
+ * Data to transfer in the page starts at offset \a pageoffset and
+ * amount of data to transfer from the page is \a len
+ */
+void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
+ struct page *page, int pageoffset, int len, int pin)
+{
+ LASSERT(desc->bd_iov_count < desc->bd_max_iov);
+ LASSERT(page != NULL);
+ LASSERT(pageoffset >= 0);
+ LASSERT(len > 0);
+ LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+
+ desc->bd_nob += len;
+
+ if (pin)
+ page_cache_get(page);
+
+ ptlrpc_add_bulk_page(desc, page, pageoffset, len);
+}
+EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
+
+/**
+ * Uninitialize and free bulk descriptor \a desc.
+ * Works on bulk descriptors both from server and client side.
+ */
+void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
+{
+ int i;
+
+ LASSERT(desc != NULL);
+ LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
+ LASSERT(desc->bd_md_count == 0); /* network hands off */
+ LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
+
+ sptlrpc_enc_pool_put_pages(desc);
+
+ if (desc->bd_export)
+ class_export_put(desc->bd_export);
+ else
+ class_import_put(desc->bd_import);
+
+ if (unpin) {
+ for (i = 0; i < desc->bd_iov_count ; i++)
+ page_cache_release(desc->bd_iov[i].kiov_page);
+ }
+
+ OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
+ bd_iov[desc->bd_max_iov]));
+}
+EXPORT_SYMBOL(__ptlrpc_free_bulk);
+
+/**
+ * Set server timelimit for this req, i.e. how long are we willing to wait
+ * for reply before timing out this request.
+ */
+void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
+{
+ __u32 serv_est;
+ int idx;
+ struct imp_at *at;
+
+ LASSERT(req->rq_import);
+
+ if (AT_OFF) {
+ /* non-AT settings */
+ /**
+ * \a imp_server_timeout means this is reverse import and
+ * we send (currently only) ASTs to the client and cannot afford
+ * to wait too long for the reply, otherwise the other client
+ * (because of which we are sending this request) would
+ * timeout waiting for us
+ */
+ req->rq_timeout = req->rq_import->imp_server_timeout ?
+ obd_timeout / 2 : obd_timeout;
+ } else {
+ at = &req->rq_import->imp_at;
+ idx = import_at_get_index(req->rq_import,
+ req->rq_request_portal);
+ serv_est = at_get(&at->iat_service_estimate[idx]);
+ req->rq_timeout = at_est2timeout(serv_est);
+ }
+ /* We could get even fancier here, using history to predict increased
+ loading... */
+
+ /* Let the server know what this RPC timeout is by putting it in the
+ reqmsg*/
+ lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
+}
+EXPORT_SYMBOL(ptlrpc_at_set_req_timeout);
+
+/* Adjust max service estimate based on server value */
+static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
+ unsigned int serv_est)
+{
+ int idx;
+ unsigned int oldse;
+ struct imp_at *at;
+
+ LASSERT(req->rq_import);
+ at = &req->rq_import->imp_at;
+
+ idx = import_at_get_index(req->rq_import, req->rq_request_portal);
+ /* max service estimates are tracked on the server side,
+ so just keep minimal history here */
+ oldse = at_measured(&at->iat_service_estimate[idx], serv_est);
+ if (oldse != 0)
+ CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d "
+ "has changed from %d to %d\n",
+ req->rq_import->imp_obd->obd_name,req->rq_request_portal,
+ oldse, at_get(&at->iat_service_estimate[idx]));
+}
+
+/* Expected network latency per remote node (secs) */
+int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
+{
+ return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
+}
+
+/* Adjust expected network latency */
+static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
+ unsigned int service_time)
+{
+ unsigned int nl, oldnl;
+ struct imp_at *at;
+ time_t now = cfs_time_current_sec();
+
+ LASSERT(req->rq_import);
+ at = &req->rq_import->imp_at;
+
+ /* Network latency is total time less server processing time */
+ nl = max_t(int, now - req->rq_sent - service_time, 0) +1/*st rounding*/;
+ if (service_time > now - req->rq_sent + 3 /* bz16408 */)
+ CWARN("Reported service time %u > total measured time "
+ CFS_DURATION_T"\n", service_time,
+ cfs_time_sub(now, req->rq_sent));
+
+ oldnl = at_measured(&at->iat_net_latency, nl);
+ if (oldnl != 0)
+ CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) "
+ "has changed from %d to %d\n",
+ req->rq_import->imp_obd->obd_name,
+ obd_uuid2str(
+ &req->rq_import->imp_connection->c_remote_uuid),
+ oldnl, at_get(&at->iat_net_latency));
+}
+
+static int unpack_reply(struct ptlrpc_request *req)
+{
+ int rc;
+
+ if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
+ rc = ptlrpc_unpack_rep_msg(req, req->rq_replen);
+ if (rc) {
+ DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc);
+ return(-EPROTO);
+ }
+ }
+
+ rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
+ if (rc) {
+ DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc);
+ return(-EPROTO);
+ }
+ return 0;
+}
+
+/**
+ * Handle an early reply message, called with the rq_lock held.
+ * If anything goes wrong just ignore it - same as if it never happened
+ */
+static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
+{
+ struct ptlrpc_request *early_req;
+ time_t olddl;
+ int rc;
+
+ req->rq_early = 0;
+ spin_unlock(&req->rq_lock);
+
+ rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
+ if (rc) {
+ spin_lock(&req->rq_lock);
+ return rc;
+ }
+
+ rc = unpack_reply(early_req);
+ if (rc == 0) {
+ /* Expecting to increase the service time estimate here */
+ ptlrpc_at_adj_service(req,
+ lustre_msg_get_timeout(early_req->rq_repmsg));
+ ptlrpc_at_adj_net_latency(req,
+ lustre_msg_get_service_time(early_req->rq_repmsg));
+ }
+
+ sptlrpc_cli_finish_early_reply(early_req);
+
+ if (rc != 0) {
+ spin_lock(&req->rq_lock);
+ return rc;
+ }
+
+ /* Adjust the local timeout for this req */
+ ptlrpc_at_set_req_timeout(req);
+
+ spin_lock(&req->rq_lock);
+ olddl = req->rq_deadline;
+ /* server assumes it now has rq_timeout from when it sent the
+ * early reply, so client should give it at least that long. */
+ req->rq_deadline = cfs_time_current_sec() + req->rq_timeout +
+ ptlrpc_at_get_net_latency(req);
+
+ DEBUG_REQ(D_ADAPTTO, req,
+ "Early reply #%d, new deadline in "CFS_DURATION_T"s "
+ "("CFS_DURATION_T"s)", req->rq_early_count,
+ cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
+ cfs_time_sub(req->rq_deadline, olddl));
+
+ return rc;
+}
+
+/**
+ * Wind down request pool \a pool.
+ * Frees all requests from the pool too
+ */
+void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
+{
+ struct list_head *l, *tmp;
+ struct ptlrpc_request *req;
+
+ LASSERT(pool != NULL);
+
+ spin_lock(&pool->prp_lock);
+ list_for_each_safe(l, tmp, &pool->prp_req_list) {
+ req = list_entry(l, struct ptlrpc_request, rq_list);
+ list_del(&req->rq_list);
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
+ OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
+ OBD_FREE(req, sizeof(*req));
+ }
+ spin_unlock(&pool->prp_lock);
+ OBD_FREE(pool, sizeof(*pool));
+}
+EXPORT_SYMBOL(ptlrpc_free_rq_pool);
+
+/**
+ * Allocates, initializes and adds \a num_rq requests to the pool \a pool
+ */
+void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
+{
+ int i;
+ int size = 1;
+
+ while (size < pool->prp_rq_size)
+ size <<= 1;
+
+ LASSERTF(list_empty(&pool->prp_req_list) ||
+ size == pool->prp_rq_size,
+ "Trying to change pool size with nonempty pool "
+ "from %d to %d bytes\n", pool->prp_rq_size, size);
+
+ spin_lock(&pool->prp_lock);
+ pool->prp_rq_size = size;
+ for (i = 0; i < num_rq; i++) {
+ struct ptlrpc_request *req;
+ struct lustre_msg *msg;
+
+ spin_unlock(&pool->prp_lock);
+ OBD_ALLOC(req, sizeof(struct ptlrpc_request));
+ if (!req)
+ return;
+ OBD_ALLOC_LARGE(msg, size);
+ if (!msg) {
+ OBD_FREE(req, sizeof(struct ptlrpc_request));
+ return;
+ }
+ req->rq_reqbuf = msg;
+ req->rq_reqbuf_len = size;
+ req->rq_pool = pool;
+ spin_lock(&pool->prp_lock);
+ list_add_tail(&req->rq_list, &pool->prp_req_list);
+ }
+ spin_unlock(&pool->prp_lock);
+ return;
+}
+EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
+
+/**
+ * Create and initialize new request pool with given attributes:
+ * \a num_rq - initial number of requests to create for the pool
+ * \a msgsize - maximum message size possible for requests in thid pool
+ * \a populate_pool - function to be called when more requests need to be added
+ * to the pool
+ * Returns pointer to newly created pool or NULL on error.
+ */
+struct ptlrpc_request_pool *
+ptlrpc_init_rq_pool(int num_rq, int msgsize,
+ void (*populate_pool)(struct ptlrpc_request_pool *, int))
+{
+ struct ptlrpc_request_pool *pool;
+
+ OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
+ if (!pool)
+ return NULL;
+
+ /* Request next power of two for the allocation, because internally
+ kernel would do exactly this */
+
+ spin_lock_init(&pool->prp_lock);
+ INIT_LIST_HEAD(&pool->prp_req_list);
+ pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
+ pool->prp_populate = populate_pool;
+
+ populate_pool(pool, num_rq);
+
+ if (list_empty(&pool->prp_req_list)) {
+ /* have not allocated a single request for the pool */
+ OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
+ pool = NULL;
+ }
+ return pool;
+}
+EXPORT_SYMBOL(ptlrpc_init_rq_pool);
+
+/**
+ * Fetches one request from pool \a pool
+ */
+static struct ptlrpc_request *
+ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
+{
+ struct ptlrpc_request *request;
+ struct lustre_msg *reqbuf;
+
+ if (!pool)
+ return NULL;
+
+ spin_lock(&pool->prp_lock);
+
+ /* See if we have anything in a pool, and bail out if nothing,
+ * in writeout path, where this matters, this is safe to do, because
+ * nothing is lost in this case, and when some in-flight requests
+ * complete, this code will be called again. */
+ if (unlikely(list_empty(&pool->prp_req_list))) {
+ spin_unlock(&pool->prp_lock);
+ return NULL;
+ }
+
+ request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+ rq_list);
+ list_del_init(&request->rq_list);
+ spin_unlock(&pool->prp_lock);
+
+ LASSERT(request->rq_reqbuf);
+ LASSERT(request->rq_pool);
+
+ reqbuf = request->rq_reqbuf;
+ memset(request, 0, sizeof(*request));
+ request->rq_reqbuf = reqbuf;
+ request->rq_reqbuf_len = pool->prp_rq_size;
+ request->rq_pool = pool;
+
+ return request;
+}
+
+/**
+ * Returns freed \a request to pool.
+ */
+static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
+{
+ struct ptlrpc_request_pool *pool = request->rq_pool;
+
+ spin_lock(&pool->prp_lock);
+ LASSERT(list_empty(&request->rq_list));
+ LASSERT(!request->rq_receiving_reply);
+ list_add_tail(&request->rq_list, &pool->prp_req_list);
+ spin_unlock(&pool->prp_lock);
+}
+
+static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
+ __u32 version, int opcode,
+ int count, __u32 *lengths, char **bufs,
+ struct ptlrpc_cli_ctx *ctx)
+{
+ struct obd_import *imp = request->rq_import;
+ int rc;
+
+ if (unlikely(ctx))
+ request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
+ else {
+ rc = sptlrpc_req_get_ctx(request);
+ if (rc)
+ GOTO(out_free, rc);
+ }
+
+ sptlrpc_req_set_flavor(request, opcode);
+
+ rc = lustre_pack_request(request, imp->imp_msg_magic, count,
+ lengths, bufs);
+ if (rc) {
+ LASSERT(!request->rq_pool);
+ GOTO(out_ctx, rc);
+ }
+
+ lustre_msg_add_version(request->rq_reqmsg, version);
+ request->rq_send_state = LUSTRE_IMP_FULL;
+ request->rq_type = PTL_RPC_MSG_REQUEST;
+ request->rq_export = NULL;
+
+ request->rq_req_cbid.cbid_fn = request_out_callback;
+ request->rq_req_cbid.cbid_arg = request;
+
+ request->rq_reply_cbid.cbid_fn = reply_in_callback;
+ request->rq_reply_cbid.cbid_arg = request;
+
+ request->rq_reply_deadline = 0;
+ request->rq_phase = RQ_PHASE_NEW;
+ request->rq_next_phase = RQ_PHASE_UNDEFINED;
+
+ request->rq_request_portal = imp->imp_client->cli_request_portal;
+ request->rq_reply_portal = imp->imp_client->cli_reply_portal;
+
+ ptlrpc_at_set_req_timeout(request);
+
+ spin_lock_init(&request->rq_lock);
+ INIT_LIST_HEAD(&request->rq_list);
+ INIT_LIST_HEAD(&request->rq_timed_list);
+ INIT_LIST_HEAD(&request->rq_replay_list);
+ INIT_LIST_HEAD(&request->rq_ctx_chain);
+ INIT_LIST_HEAD(&request->rq_set_chain);
+ INIT_LIST_HEAD(&request->rq_history_list);
+ INIT_LIST_HEAD(&request->rq_exp_list);
+ init_waitqueue_head(&request->rq_reply_waitq);
+ init_waitqueue_head(&request->rq_set_waitq);
+ request->rq_xid = ptlrpc_next_xid();
+ atomic_set(&request->rq_refcount, 1);
+
+ lustre_msg_set_opc(request->rq_reqmsg, opcode);
+
+ return 0;
+out_ctx:
+ sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
+out_free:
+ class_import_put(imp);
+ return rc;
+}
+
+int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
+ __u32 version, int opcode, char **bufs,
+ struct ptlrpc_cli_ctx *ctx)
+{
+ int count;
+
+ count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
+ return __ptlrpc_request_bufs_pack(request, version, opcode, count,
+ request->rq_pill.rc_area[RCL_CLIENT],
+ bufs, ctx);
+}
+EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
+
+/**
+ * Pack request buffers for network transfer, performing necessary encryption
+ * steps if necessary.
+ */
+int ptlrpc_request_pack(struct ptlrpc_request *request,
+ __u32 version, int opcode)
+{
+ int rc;
+ rc = ptlrpc_request_bufs_pack(request, version, opcode, NULL, NULL);
+ if (rc)
+ return rc;
+
+ /* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
+ * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
+ * have to send old ptlrpc_body to keep interoprability with these
+ * clients.
+ *
+ * Only three kinds of server->client RPCs so far:
+ * - LDLM_BL_CALLBACK
+ * - LDLM_CP_CALLBACK
+ * - LDLM_GL_CALLBACK
+ *
+ * XXX This should be removed whenever we drop the interoprability with
+ * the these old clients.
+ */
+ if (opcode == LDLM_BL_CALLBACK || opcode == LDLM_CP_CALLBACK ||
+ opcode == LDLM_GL_CALLBACK)
+ req_capsule_shrink(&request->rq_pill, &RMF_PTLRPC_BODY,
+ sizeof(struct ptlrpc_body_v2), RCL_CLIENT);
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_request_pack);
+
+/**
+ * Helper function to allocate new request on import \a imp
+ * and possibly using existing request from pool \a pool if provided.
+ * Returns allocated request structure with import field filled or
+ * NULL on error.
+ */
+static inline
+struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
+ struct ptlrpc_request_pool *pool)
+{
+ struct ptlrpc_request *request = NULL;
+
+ if (pool)
+ request = ptlrpc_prep_req_from_pool(pool);
+
+ if (!request)
+ OBD_ALLOC_PTR(request);
+
+ if (request) {
+ LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
+ LASSERT(imp != LP_POISON);
+ LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p",
+ imp->imp_client);
+ LASSERT(imp->imp_client != LP_POISON);
+
+ request->rq_import = class_import_get(imp);
+ } else {
+ CERROR("request allocation out of memory\n");
+ }
+
+ return request;
+}
+
+/**
+ * Helper function for creating a request.
+ * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
+ * buffer structures according to capsule template \a format.
+ * Returns allocated request structure pointer or NULL on error.
+ */
+static struct ptlrpc_request *
+ptlrpc_request_alloc_internal(struct obd_import *imp,
+ struct ptlrpc_request_pool * pool,
+ const struct req_format *format)
+{
+ struct ptlrpc_request *request;
+
+ request = __ptlrpc_request_alloc(imp, pool);
+ if (request == NULL)
+ return NULL;
+
+ req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
+ req_capsule_set(&request->rq_pill, format);
+ return request;
+}
+
+/**
+ * Allocate new request structure for import \a imp and initialize its
+ * buffer structure according to capsule template \a format.
+ */
+struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
+ const struct req_format *format)
+{
+ return ptlrpc_request_alloc_internal(imp, NULL, format);
+}
+EXPORT_SYMBOL(ptlrpc_request_alloc);
+
+/**
+ * Allocate new request structure for import \a imp from pool \a pool and
+ * initialize its buffer structure according to capsule template \a format.
+ */
+struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
+ struct ptlrpc_request_pool * pool,
+ const struct req_format *format)
+{
+ return ptlrpc_request_alloc_internal(imp, pool, format);
+}
+EXPORT_SYMBOL(ptlrpc_request_alloc_pool);
+
+/**
+ * For requests not from pool, free memory of the request structure.
+ * For requests obtained from a pool earlier, return request back to pool.
+ */
+void ptlrpc_request_free(struct ptlrpc_request *request)
+{
+ if (request->rq_pool)
+ __ptlrpc_free_req_to_pool(request);
+ else
+ OBD_FREE_PTR(request);
+}
+EXPORT_SYMBOL(ptlrpc_request_free);
+
+/**
+ * Allocate new request for operatione \a opcode and immediatelly pack it for
+ * network transfer.
+ * Only used for simple requests like OBD_PING where the only important
+ * part of the request is operation itself.
+ * Returns allocated request or NULL on error.
+ */
+struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
+ const struct req_format *format,
+ __u32 version, int opcode)
+{
+ struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format);
+ int rc;
+
+ if (req) {
+ rc = ptlrpc_request_pack(req, version, opcode);
+ if (rc) {
+ ptlrpc_request_free(req);
+ req = NULL;
+ }
+ }
+ return req;
+}
+EXPORT_SYMBOL(ptlrpc_request_alloc_pack);
+
+/**
+ * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
+ * for operation \a opcode. Request would contain \a count buffers.
+ * Sizes of buffers are described in array \a lengths and buffers themselves
+ * are provided by a pointer \a bufs.
+ * Returns prepared request structure pointer or NULL on error.
+ */
+struct ptlrpc_request *
+ptlrpc_prep_req_pool(struct obd_import *imp,
+ __u32 version, int opcode,
+ int count, __u32 *lengths, char **bufs,
+ struct ptlrpc_request_pool *pool)
+{
+ struct ptlrpc_request *request;
+ int rc;
+
+ request = __ptlrpc_request_alloc(imp, pool);
+ if (!request)
+ return NULL;
+
+ rc = __ptlrpc_request_bufs_pack(request, version, opcode, count,
+ lengths, bufs, NULL);
+ if (rc) {
+ ptlrpc_request_free(request);
+ request = NULL;
+ }
+ return request;
+}
+EXPORT_SYMBOL(ptlrpc_prep_req_pool);
+
+/**
+ * Same as ptlrpc_prep_req_pool, but without pool
+ */
+struct ptlrpc_request *
+ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
+ __u32 *lengths, char **bufs)
+{
+ return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
+ NULL);
+}
+EXPORT_SYMBOL(ptlrpc_prep_req);
+
+/**
+ * Allocate and initialize new request set structure.
+ * Returns a pointer to the newly allocated set structure or NULL on error.
+ */
+struct ptlrpc_request_set *ptlrpc_prep_set(void)
+{
+ struct ptlrpc_request_set *set;
+
+ OBD_ALLOC(set, sizeof *set);
+ if (!set)
+ return NULL;
+ atomic_set(&set->set_refcount, 1);
+ INIT_LIST_HEAD(&set->set_requests);
+ init_waitqueue_head(&set->set_waitq);
+ atomic_set(&set->set_new_count, 0);
+ atomic_set(&set->set_remaining, 0);
+ spin_lock_init(&set->set_new_req_lock);
+ INIT_LIST_HEAD(&set->set_new_requests);
+ INIT_LIST_HEAD(&set->set_cblist);
+ set->set_max_inflight = UINT_MAX;
+ set->set_producer = NULL;
+ set->set_producer_arg = NULL;
+ set->set_rc = 0;
+
+ return set;
+}
+EXPORT_SYMBOL(ptlrpc_prep_set);
+
+/**
+ * Allocate and initialize new request set structure with flow control
+ * extension. This extension allows to control the number of requests in-flight
+ * for the whole set. A callback function to generate requests must be provided
+ * and the request set will keep the number of requests sent over the wire to
+ * @max_inflight.
+ * Returns a pointer to the newly allocated set structure or NULL on error.
+ */
+struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
+ void *arg)
+
+{
+ struct ptlrpc_request_set *set;
+
+ set = ptlrpc_prep_set();
+ if (!set)
+ return NULL;
+
+ set->set_max_inflight = max;
+ set->set_producer = func;
+ set->set_producer_arg = arg;
+
+ return set;
+}
+EXPORT_SYMBOL(ptlrpc_prep_fcset);
+
+/**
+ * Wind down and free request set structure previously allocated with
+ * ptlrpc_prep_set.
+ * Ensures that all requests on the set have completed and removes
+ * all requests from the request list in a set.
+ * If any unsent request happen to be on the list, pretends that they got
+ * an error in flight and calls their completion handler.
+ */
+void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
+{
+ struct list_head *tmp;
+ struct list_head *next;
+ int expected_phase;
+ int n = 0;
+
+ /* Requests on the set should either all be completed, or all be new */
+ expected_phase = (atomic_read(&set->set_remaining) == 0) ?
+ RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
+ list_for_each (tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+
+ LASSERT(req->rq_phase == expected_phase);
+ n++;
+ }
+
+ LASSERTF(atomic_read(&set->set_remaining) == 0 ||
+ atomic_read(&set->set_remaining) == n, "%d / %d\n",
+ atomic_read(&set->set_remaining), n);
+
+ list_for_each_safe(tmp, next, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ list_del_init(&req->rq_set_chain);
+
+ LASSERT(req->rq_phase == expected_phase);
+
+ if (req->rq_phase == RQ_PHASE_NEW) {
+ ptlrpc_req_interpret(NULL, req, -EBADR);
+ atomic_dec(&set->set_remaining);
+ }
+
+ spin_lock(&req->rq_lock);
+ req->rq_set = NULL;
+ req->rq_invalid_rqset = 0;
+ spin_unlock(&req->rq_lock);
+
+ ptlrpc_req_finished (req);
+ }
+
+ LASSERT(atomic_read(&set->set_remaining) == 0);
+
+ ptlrpc_reqset_put(set);
+}
+EXPORT_SYMBOL(ptlrpc_set_destroy);
+
+/**
+ * Add a callback function \a fn to the set.
+ * This function would be called when all requests on this set are completed.
+ * The function will be passed \a data argument.
+ */
+int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
+ set_interpreter_func fn, void *data)
+{
+ struct ptlrpc_set_cbdata *cbdata;
+
+ OBD_ALLOC_PTR(cbdata);
+ if (cbdata == NULL)
+ return -ENOMEM;
+
+ cbdata->psc_interpret = fn;
+ cbdata->psc_data = data;
+ list_add_tail(&cbdata->psc_item, &set->set_cblist);
+
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_set_add_cb);
+
+/**
+ * Add a new request to the general purpose request set.
+ * Assumes request reference from the caller.
+ */
+void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
+ struct ptlrpc_request *req)
+{
+ LASSERT(list_empty(&req->rq_set_chain));
+
+ /* The set takes over the caller's request reference */
+ list_add_tail(&req->rq_set_chain, &set->set_requests);
+ req->rq_set = set;
+ atomic_inc(&set->set_remaining);
+ req->rq_queued_time = cfs_time_current();
+
+ if (req->rq_reqmsg != NULL)
+ lustre_msg_set_jobid(req->rq_reqmsg, NULL);
+
+ if (set->set_producer != NULL)
+ /* If the request set has a producer callback, the RPC must be
+ * sent straight away */
+ ptlrpc_send_new_req(req);
+}
+EXPORT_SYMBOL(ptlrpc_set_add_req);
+
+/**
+ * Add a request to a request with dedicated server thread
+ * and wake the thread to make any necessary processing.
+ * Currently only used for ptlrpcd.
+ */
+void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
+ struct ptlrpc_request *req)
+{
+ struct ptlrpc_request_set *set = pc->pc_set;
+ int count, i;
+
+ LASSERT(req->rq_set == NULL);
+ LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
+
+ spin_lock(&set->set_new_req_lock);
+ /*
+ * The set takes over the caller's request reference.
+ */
+ req->rq_set = set;
+ req->rq_queued_time = cfs_time_current();
+ list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ count = atomic_inc_return(&set->set_new_count);
+ spin_unlock(&set->set_new_req_lock);
+
+ /* Only need to call wakeup once for the first entry. */
+ if (count == 1) {
+ wake_up(&set->set_waitq);
+
+ /* XXX: It maybe unnecessary to wakeup all the partners. But to
+ * guarantee the async RPC can be processed ASAP, we have
+ * no other better choice. It maybe fixed in future. */
+ for (i = 0; i < pc->pc_npartners; i++)
+ wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
+ }
+}
+EXPORT_SYMBOL(ptlrpc_set_add_new_req);
+
+/**
+ * Based on the current state of the import, determine if the request
+ * can be sent, is an error, or should be delayed.
+ *
+ * Returns true if this request should be delayed. If false, and
+ * *status is set, then the request can not be sent and *status is the
+ * error code. If false and status is 0, then request can be sent.
+ *
+ * The imp->imp_lock must be held.
+ */
+static int ptlrpc_import_delay_req(struct obd_import *imp,
+ struct ptlrpc_request *req, int *status)
+{
+ int delay = 0;
+
+ LASSERT (status != NULL);
+ *status = 0;
+
+ if (req->rq_ctx_init || req->rq_ctx_fini) {
+ /* always allow ctx init/fini rpc go through */
+ } else if (imp->imp_state == LUSTRE_IMP_NEW) {
+ DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
+ *status = -EIO;
+ } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
+ /* pings may safely race with umount */
+ DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ?
+ D_HA : D_ERROR, req, "IMP_CLOSED ");
+ *status = -EIO;
+ } else if (ptlrpc_send_limit_expired(req)) {
+ /* probably doesn't need to be a D_ERROR after initial testing */
+ DEBUG_REQ(D_ERROR, req, "send limit expired ");
+ *status = -EIO;
+ } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
+ imp->imp_state == LUSTRE_IMP_CONNECTING) {
+ /* allow CONNECT even if import is invalid */ ;
+ if (atomic_read(&imp->imp_inval_count) != 0) {
+ DEBUG_REQ(D_ERROR, req, "invalidate in flight");
+ *status = -EIO;
+ }
+ } else if (imp->imp_invalid || imp->imp_obd->obd_no_recov) {
+ if (!imp->imp_deactive)
+ DEBUG_REQ(D_NET, req, "IMP_INVALID");
+ *status = -ESHUTDOWN; /* bz 12940 */
+ } else if (req->rq_import_generation != imp->imp_generation) {
+ DEBUG_REQ(D_ERROR, req, "req wrong generation:");
+ *status = -EIO;
+ } else if (req->rq_send_state != imp->imp_state) {
+ /* invalidate in progress - any requests should be drop */
+ if (atomic_read(&imp->imp_inval_count) != 0) {
+ DEBUG_REQ(D_ERROR, req, "invalidate in flight");
+ *status = -EIO;
+ } else if (imp->imp_dlm_fake || req->rq_no_delay) {
+ *status = -EWOULDBLOCK;
+ } else if (req->rq_allow_replay &&
+ (imp->imp_state == LUSTRE_IMP_REPLAY ||
+ imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS ||
+ imp->imp_state == LUSTRE_IMP_REPLAY_WAIT ||
+ imp->imp_state == LUSTRE_IMP_RECOVER)) {
+ DEBUG_REQ(D_HA, req, "allow during recovery.\n");
+ } else {
+ delay = 1;
+ }
+ }
+
+ return delay;
+}
+
+/**
+ * Decide if the eror message regarding provided request \a req
+ * should be printed to the console or not.
+ * Makes it's decision on request status and other properties.
+ * Returns 1 to print error on the system console or 0 if not.
+ */
+static int ptlrpc_console_allow(struct ptlrpc_request *req)
+{
+ __u32 opc;
+ int err;
+
+ LASSERT(req->rq_reqmsg != NULL);
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+
+ /* Suppress particular reconnect errors which are to be expected. No
+ * errors are suppressed for the initial connection on an import */
+ if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
+ (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
+
+ /* Suppress timed out reconnect requests */
+ if (req->rq_timedout)
+ return 0;
+
+ /* Suppress unavailable/again reconnect requests */
+ err = lustre_msg_get_status(req->rq_repmsg);
+ if (err == -ENODEV || err == -EAGAIN)
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Check request processing status.
+ * Returns the status.
+ */
+static int ptlrpc_check_status(struct ptlrpc_request *req)
+{
+ int err;
+
+ err = lustre_msg_get_status(req->rq_repmsg);
+ if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
+ struct obd_import *imp = req->rq_import;
+ __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+ if (ptlrpc_console_allow(req))
+ LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s,"
+ " operation %s failed with %d.\n",
+ imp->imp_obd->obd_name,
+ libcfs_nid2str(
+ imp->imp_connection->c_peer.nid),
+ ll_opcode2str(opc), err);
+ return err < 0 ? err : -EINVAL;
+ }
+
+ if (err < 0) {
+ DEBUG_REQ(D_INFO, req, "status is %d", err);
+ } else if (err > 0) {
+ /* XXX: translate this error from net to host */
+ DEBUG_REQ(D_INFO, req, "status is %d", err);
+ }
+
+ return err;
+}
+
+/**
+ * save pre-versions of objects into request for replay.
+ * Versions are obtained from server reply.
+ * used for VBR.
+ */
+static void ptlrpc_save_versions(struct ptlrpc_request *req)
+{
+ struct lustre_msg *repmsg = req->rq_repmsg;
+ struct lustre_msg *reqmsg = req->rq_reqmsg;
+ __u64 *versions = lustre_msg_get_versions(repmsg);
+
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
+ return;
+
+ LASSERT(versions);
+ lustre_msg_set_versions(reqmsg, versions);
+ CDEBUG(D_INFO, "Client save versions ["LPX64"/"LPX64"]\n",
+ versions[0], versions[1]);
+}
+
+/**
+ * Callback function called when client receives RPC reply for \a req.
+ * Returns 0 on success or error code.
+ * The return alue would be assigned to req->rq_status by the caller
+ * as request processing status.
+ * This function also decides if the request needs to be saved for later replay.
+ */
+static int after_reply(struct ptlrpc_request *req)
+{
+ struct obd_import *imp = req->rq_import;
+ struct obd_device *obd = req->rq_import->imp_obd;
+ int rc;
+ struct timeval work_start;
+ long timediff;
+
+ LASSERT(obd != NULL);
+ /* repbuf must be unlinked */
+ LASSERT(!req->rq_receiving_reply && !req->rq_must_unlink);
+
+ if (req->rq_reply_truncate) {
+ if (ptlrpc_no_resend(req)) {
+ DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
+ " expected: %d, actual size: %d",
+ req->rq_nob_received, req->rq_repbuf_len);
+ return -EOVERFLOW;
+ }
+
+ sptlrpc_cli_free_repbuf(req);
+ /* Pass the required reply buffer size (include
+ * space for early reply).
+ * NB: no need to roundup because alloc_repbuf
+ * will roundup it */
+ req->rq_replen = req->rq_nob_received;
+ req->rq_nob_received = 0;
+ req->rq_resend = 1;
+ return 0;
+ }
+
+ /*
+ * NB Until this point, the whole of the incoming message,
+ * including buflens, status etc is in the sender's byte order.
+ */
+ rc = sptlrpc_cli_unwrap_reply(req);
+ if (rc) {
+ DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc);
+ return rc;
+ }
+
+ /*
+ * Security layer unwrap might ask resend this request.
+ */
+ if (req->rq_resend)
+ return 0;
+
+ rc = unpack_reply(req);
+ if (rc)
+ return rc;
+
+ /* retry indefinitely on EINPROGRESS */
+ if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
+ ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
+ time_t now = cfs_time_current_sec();
+
+ DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
+ req->rq_resend = 1;
+ req->rq_nr_resend++;
+
+ /* allocate new xid to avoid reply reconstruction */
+ if (!req->rq_bulk) {
+ /* new xid is already allocated for bulk in
+ * ptlrpc_check_set() */
+ req->rq_xid = ptlrpc_next_xid();
+ DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for "
+ "resend on EINPROGRESS");
+ }
+
+ /* Readjust the timeout for current conditions */
+ ptlrpc_at_set_req_timeout(req);
+ /* delay resend to give a chance to the server to get ready.
+ * The delay is increased by 1s on every resend and is capped to
+ * the current request timeout (i.e. obd_timeout if AT is off,
+ * or AT service time x 125% + 5s, see at_est2timeout) */
+ if (req->rq_nr_resend > req->rq_timeout)
+ req->rq_sent = now + req->rq_timeout;
+ else
+ req->rq_sent = now + req->rq_nr_resend;
+
+ return 0;
+ }
+
+ do_gettimeofday(&work_start);
+ timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
+ if (obd->obd_svc_stats != NULL) {
+ lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
+ timediff);
+ ptlrpc_lprocfs_rpc_sent(req, timediff);
+ }
+
+ if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
+ lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
+ DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
+ lustre_msg_get_type(req->rq_repmsg));
+ return -EPROTO;
+ }
+
+ if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
+ CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, cfs_fail_val);
+ ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
+ ptlrpc_at_adj_net_latency(req,
+ lustre_msg_get_service_time(req->rq_repmsg));
+
+ rc = ptlrpc_check_status(req);
+ imp->imp_connect_error = rc;
+
+ if (rc) {
+ /*
+ * Either we've been evicted, or the server has failed for
+ * some reason. Try to reconnect, and if that fails, punt to
+ * the upcall.
+ */
+ if (ll_rpc_recoverable_error(rc)) {
+ if (req->rq_send_state != LUSTRE_IMP_FULL ||
+ imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
+ return rc;
+ }
+ ptlrpc_request_handle_notconn(req);
+ return rc;
+ }
+ } else {
+ /*
+ * Let's look if server sent slv. Do it only for RPC with
+ * rc == 0.
+ */
+ ldlm_cli_update_pool(req);
+ }
+
+ /*
+ * Store transno in reqmsg for replay.
+ */
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
+ req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
+ lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
+ }
+
+ if (imp->imp_replayable) {
+ spin_lock(&imp->imp_lock);
+ /*
+ * No point in adding already-committed requests to the replay
+ * list, we will just remove them immediately. b=9829
+ */
+ if (req->rq_transno != 0 &&
+ (req->rq_transno >
+ lustre_msg_get_last_committed(req->rq_repmsg) ||
+ req->rq_replay)) {
+ /** version recovery */
+ ptlrpc_save_versions(req);
+ ptlrpc_retain_replayable_request(req, imp);
+ } else if (req->rq_commit_cb != NULL) {
+ spin_unlock(&imp->imp_lock);
+ req->rq_commit_cb(req);
+ spin_lock(&imp->imp_lock);
+ }
+
+ /*
+ * Replay-enabled imports return commit-status information.
+ */
+ if (lustre_msg_get_last_committed(req->rq_repmsg)) {
+ imp->imp_peer_committed_transno =
+ lustre_msg_get_last_committed(req->rq_repmsg);
+ }
+
+ ptlrpc_free_committed(imp);
+
+ if (!list_empty(&imp->imp_replay_list)) {
+ struct ptlrpc_request *last;
+
+ last = list_entry(imp->imp_replay_list.prev,
+ struct ptlrpc_request,
+ rq_replay_list);
+ /*
+ * Requests with rq_replay stay on the list even if no
+ * commit is expected.
+ */
+ if (last->rq_transno > imp->imp_peer_committed_transno)
+ ptlrpc_pinger_commit_expected(imp);
+ }
+
+ spin_unlock(&imp->imp_lock);
+ }
+
+ return rc;
+}
+
+/**
+ * Helper function to send request \a req over the network for the first time
+ * Also adjusts request phase.
+ * Returns 0 on success or error code.
+ */
+static int ptlrpc_send_new_req(struct ptlrpc_request *req)
+{
+ struct obd_import *imp = req->rq_import;
+ int rc;
+
+ LASSERT(req->rq_phase == RQ_PHASE_NEW);
+ if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+ (!req->rq_generation_set ||
+ req->rq_import_generation == imp->imp_generation))
+ return 0;
+
+ ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
+
+ spin_lock(&imp->imp_lock);
+
+ if (!req->rq_generation_set)
+ req->rq_import_generation = imp->imp_generation;
+
+ if (ptlrpc_import_delay_req(imp, req, &rc)) {
+ spin_lock(&req->rq_lock);
+ req->rq_waiting = 1;
+ spin_unlock(&req->rq_lock);
+
+ DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
+ "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+ ptlrpc_import_state_name(req->rq_send_state),
+ ptlrpc_import_state_name(imp->imp_state));
+ LASSERT(list_empty(&req->rq_list));
+ list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+ atomic_inc(&req->rq_import->imp_inflight);
+ spin_unlock(&imp->imp_lock);
+ return 0;
+ }
+
+ if (rc != 0) {
+ spin_unlock(&imp->imp_lock);
+ req->rq_status = rc;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ return rc;
+ }
+
+ LASSERT(list_empty(&req->rq_list));
+ list_add_tail(&req->rq_list, &imp->imp_sending_list);
+ atomic_inc(&req->rq_import->imp_inflight);
+ spin_unlock(&imp->imp_lock);
+
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
+
+ rc = sptlrpc_req_refresh_ctx(req, -1);
+ if (rc) {
+ if (req->rq_err) {
+ req->rq_status = rc;
+ return 1;
+ } else {
+ req->rq_wait_ctx = 1;
+ return 0;
+ }
+ }
+
+ CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
+ " %s:%s:%d:"LPU64":%s:%d\n", current_comm(),
+ imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
+
+ rc = ptl_send_rpc(req, 0);
+ if (rc) {
+ DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
+ req->rq_net_err = 1;
+ return rc;
+ }
+ return 0;
+}
+
+static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
+{
+ int remaining, rc;
+
+ LASSERT(set->set_producer != NULL);
+
+ remaining = atomic_read(&set->set_remaining);
+
+ /* populate the ->set_requests list with requests until we
+ * reach the maximum number of RPCs in flight for this set */
+ while (atomic_read(&set->set_remaining) < set->set_max_inflight) {
+ rc = set->set_producer(set, set->set_producer_arg);
+ if (rc == -ENOENT) {
+ /* no more RPC to produce */
+ set->set_producer = NULL;
+ set->set_producer_arg = NULL;
+ return 0;
+ }
+ }
+
+ return (atomic_read(&set->set_remaining) - remaining);
+}
+
+/**
+ * this sends any unsent RPCs in \a set and returns 1 if all are sent
+ * and no more replies are expected.
+ * (it is possible to get less replies than requests sent e.g. due to timed out
+ * requests or requests that we had trouble to send out)
+ */
+int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
+{
+ struct list_head *tmp, *next;
+ int force_timer_recalc = 0;
+
+ if (atomic_read(&set->set_remaining) == 0)
+ return 1;
+
+ list_for_each_safe(tmp, next, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ struct obd_import *imp = req->rq_import;
+ int unregistered = 0;
+ int rc = 0;
+
+ if (req->rq_phase == RQ_PHASE_NEW &&
+ ptlrpc_send_new_req(req)) {
+ force_timer_recalc = 1;
+ }
+
+ /* delayed send - skip */
+ if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
+ continue;
+
+ /* delayed resend - skip */
+ if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
+ req->rq_sent > cfs_time_current_sec())
+ continue;
+
+ if (!(req->rq_phase == RQ_PHASE_RPC ||
+ req->rq_phase == RQ_PHASE_BULK ||
+ req->rq_phase == RQ_PHASE_INTERPRET ||
+ req->rq_phase == RQ_PHASE_UNREGISTERING ||
+ req->rq_phase == RQ_PHASE_COMPLETE)) {
+ DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
+ LBUG();
+ }
+
+ if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+ LASSERT(req->rq_next_phase != req->rq_phase);
+ LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
+
+ /*
+ * Skip processing until reply is unlinked. We
+ * can't return to pool before that and we can't
+ * call interpret before that. We need to make
+ * sure that all rdma transfers finished and will
+ * not corrupt any data.
+ */
+ if (ptlrpc_client_recv_or_unlink(req) ||
+ ptlrpc_client_bulk_active(req))
+ continue;
+
+ /*
+ * Turn fail_loc off to prevent it from looping
+ * forever.
+ */
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
+ OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK,
+ OBD_FAIL_ONCE);
+ }
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
+ OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK,
+ OBD_FAIL_ONCE);
+ }
+
+ /*
+ * Move to next phase if reply was successfully
+ * unlinked.
+ */
+ ptlrpc_rqphase_move(req, req->rq_next_phase);
+ }
+
+ if (req->rq_phase == RQ_PHASE_COMPLETE)
+ continue;
+
+ if (req->rq_phase == RQ_PHASE_INTERPRET)
+ GOTO(interpret, req->rq_status);
+
+ /*
+ * Note that this also will start async reply unlink.
+ */
+ if (req->rq_net_err && !req->rq_timedout) {
+ ptlrpc_expire_one_request(req, 1);
+
+ /*
+ * Check if we still need to wait for unlink.
+ */
+ if (ptlrpc_client_recv_or_unlink(req) ||
+ ptlrpc_client_bulk_active(req))
+ continue;
+ /* If there is no need to resend, fail it now. */
+ if (req->rq_no_resend) {
+ if (req->rq_status == 0)
+ req->rq_status = -EIO;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ GOTO(interpret, req->rq_status);
+ } else {
+ continue;
+ }
+ }
+
+ if (req->rq_err) {
+ spin_lock(&req->rq_lock);
+ req->rq_replied = 0;
+ spin_unlock(&req->rq_lock);
+ if (req->rq_status == 0)
+ req->rq_status = -EIO;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ GOTO(interpret, req->rq_status);
+ }
+
+ /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
+ * so it sets rq_intr regardless of individual rpc
+ * timeouts. The synchronous IO waiting path sets
+ * rq_intr irrespective of whether ptlrpcd
+ * has seen a timeout. Our policy is to only interpret
+ * interrupted rpcs after they have timed out, so we
+ * need to enforce that here.
+ */
+
+ if (req->rq_intr && (req->rq_timedout || req->rq_waiting ||
+ req->rq_wait_ctx)) {
+ req->rq_status = -EINTR;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ GOTO(interpret, req->rq_status);
+ }
+
+ if (req->rq_phase == RQ_PHASE_RPC) {
+ if (req->rq_timedout || req->rq_resend ||
+ req->rq_waiting || req->rq_wait_ctx) {
+ int status;
+
+ if (!ptlrpc_unregister_reply(req, 1))
+ continue;
+
+ spin_lock(&imp->imp_lock);
+ if (ptlrpc_import_delay_req(imp, req, &status)){
+ /* put on delay list - only if we wait
+ * recovery finished - before send */
+ list_del_init(&req->rq_list);
+ list_add_tail(&req->rq_list,
+ &imp->
+ imp_delayed_list);
+ spin_unlock(&imp->imp_lock);
+ continue;
+ }
+
+ if (status != 0) {
+ req->rq_status = status;
+ ptlrpc_rqphase_move(req,
+ RQ_PHASE_INTERPRET);
+ spin_unlock(&imp->imp_lock);
+ GOTO(interpret, req->rq_status);
+ }
+ if (ptlrpc_no_resend(req) &&
+ !req->rq_wait_ctx) {
+ req->rq_status = -ENOTCONN;
+ ptlrpc_rqphase_move(req,
+ RQ_PHASE_INTERPRET);
+ spin_unlock(&imp->imp_lock);
+ GOTO(interpret, req->rq_status);
+ }
+
+ list_del_init(&req->rq_list);
+ list_add_tail(&req->rq_list,
+ &imp->imp_sending_list);
+
+ spin_unlock(&imp->imp_lock);
+
+ spin_lock(&req->rq_lock);
+ req->rq_waiting = 0;
+ spin_unlock(&req->rq_lock);
+
+ if (req->rq_timedout || req->rq_resend) {
+ /* This is re-sending anyways,
+ * let's mark req as resend. */
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
+ if (req->rq_bulk) {
+ __u64 old_xid;
+
+ if (!ptlrpc_unregister_bulk(req, 1))
+ continue;
+
+ /* ensure previous bulk fails */
+ old_xid = req->rq_xid;
+ req->rq_xid = ptlrpc_next_xid();
+ CDEBUG(D_HA, "resend bulk "
+ "old x"LPU64
+ " new x"LPU64"\n",
+ old_xid, req->rq_xid);
+ }
+ }
+ /*
+ * rq_wait_ctx is only touched by ptlrpcd,
+ * so no lock is needed here.
+ */
+ status = sptlrpc_req_refresh_ctx(req, -1);
+ if (status) {
+ if (req->rq_err) {
+ req->rq_status = status;
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 0;
+ spin_unlock(&req->rq_lock);
+ force_timer_recalc = 1;
+ } else {
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 1;
+ spin_unlock(&req->rq_lock);
+ }
+
+ continue;
+ } else {
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 0;
+ spin_unlock(&req->rq_lock);
+ }
+
+ rc = ptl_send_rpc(req, 0);
+ if (rc) {
+ DEBUG_REQ(D_HA, req,
+ "send failed: rc = %d", rc);
+ force_timer_recalc = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
+ }
+ /* need to reset the timeout */
+ force_timer_recalc = 1;
+ }
+
+ spin_lock(&req->rq_lock);
+
+ if (ptlrpc_client_early(req)) {
+ ptlrpc_at_recv_early_reply(req);
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
+
+ /* Still waiting for a reply? */
+ if (ptlrpc_client_recv(req)) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
+
+ /* Did we actually receive a reply? */
+ if (!ptlrpc_client_replied(req)) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
+
+ spin_unlock(&req->rq_lock);
+
+ /* unlink from net because we are going to
+ * swab in-place of reply buffer */
+ unregistered = ptlrpc_unregister_reply(req, 1);
+ if (!unregistered)
+ continue;
+
+ req->rq_status = after_reply(req);
+ if (req->rq_resend)
+ continue;
+
+ /* If there is no bulk associated with this request,
+ * then we're done and should let the interpreter
+ * process the reply. Similarly if the RPC returned
+ * an error, and therefore the bulk will never arrive.
+ */
+ if (req->rq_bulk == NULL || req->rq_status < 0) {
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ GOTO(interpret, req->rq_status);
+ }
+
+ ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
+ }
+
+ LASSERT(req->rq_phase == RQ_PHASE_BULK);
+ if (ptlrpc_client_bulk_active(req))
+ continue;
+
+ if (req->rq_bulk->bd_failure) {
+ /* The RPC reply arrived OK, but the bulk screwed
+ * up! Dead weird since the server told us the RPC
+ * was good after getting the REPLY for her GET or
+ * the ACK for her PUT. */
+ DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
+ req->rq_status = -EIO;
+ }
+
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+
+ interpret:
+ LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
+
+ /* This moves to "unregistering" phase we need to wait for
+ * reply unlink. */
+ if (!unregistered && !ptlrpc_unregister_reply(req, 1)) {
+ /* start async bulk unlink too */
+ ptlrpc_unregister_bulk(req, 1);
+ continue;
+ }
+
+ if (!ptlrpc_unregister_bulk(req, 1))
+ continue;
+
+ /* When calling interpret receiving already should be
+ * finished. */
+ LASSERT(!req->rq_receiving_reply);
+
+ ptlrpc_req_interpret(env, req, req->rq_status);
+
+ ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
+
+ CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
+ "Completed RPC pname:cluuid:pid:xid:nid:"
+ "opc %s:%s:%d:"LPU64":%s:%d\n",
+ current_comm(), imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
+
+ spin_lock(&imp->imp_lock);
+ /* Request already may be not on sending or delaying list. This
+ * may happen in the case of marking it erroneous for the case
+ * ptlrpc_import_delay_req(req, status) find it impossible to
+ * allow sending this rpc and returns *status != 0. */
+ if (!list_empty(&req->rq_list)) {
+ list_del_init(&req->rq_list);
+ atomic_dec(&imp->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
+
+ atomic_dec(&set->set_remaining);
+ wake_up_all(&imp->imp_recovery_waitq);
+
+ if (set->set_producer) {
+ /* produce a new request if possible */
+ if (ptlrpc_set_producer(set) > 0)
+ force_timer_recalc = 1;
+
+ /* free the request that has just been completed
+ * in order not to pollute set->set_requests */
+ list_del_init(&req->rq_set_chain);
+ spin_lock(&req->rq_lock);
+ req->rq_set = NULL;
+ req->rq_invalid_rqset = 0;
+ spin_unlock(&req->rq_lock);
+
+ /* record rq_status to compute the final status later */
+ if (req->rq_status != 0)
+ set->set_rc = req->rq_status;
+ ptlrpc_req_finished(req);
+ }
+ }
+
+ /* If we hit an error, we want to recover promptly. */
+ return atomic_read(&set->set_remaining) == 0 || force_timer_recalc;
+}
+EXPORT_SYMBOL(ptlrpc_check_set);
+
+/**
+ * Time out request \a req. is \a async_unlink is set, that means do not wait
+ * until LNet actually confirms network buffer unlinking.
+ * Return 1 if we should give up further retrying attempts or 0 otherwise.
+ */
+int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
+{
+ struct obd_import *imp = req->rq_import;
+ int rc = 0;
+
+ spin_lock(&req->rq_lock);
+ req->rq_timedout = 1;
+ spin_unlock(&req->rq_lock);
+
+ DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
+ "/real "CFS_DURATION_T"]",
+ req->rq_net_err ? "failed due to network error" :
+ ((req->rq_real_sent == 0 ||
+ cfs_time_before(req->rq_real_sent, req->rq_sent) ||
+ cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
+ "timed out for sent delay" : "timed out for slow reply"),
+ req->rq_sent, req->rq_real_sent);
+
+ if (imp != NULL && obd_debug_peer_on_timeout)
+ LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
+
+ ptlrpc_unregister_reply(req, async_unlink);
+ ptlrpc_unregister_bulk(req, async_unlink);
+
+ if (obd_dump_on_timeout)
+ libcfs_debug_dumplog();
+
+ if (imp == NULL) {
+ DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
+ return 1;
+ }
+
+ atomic_inc(&imp->imp_timeouts);
+
+ /* The DLM server doesn't want recovery run on its imports. */
+ if (imp->imp_dlm_fake)
+ return 1;
+
+ /* If this request is for recovery or other primordial tasks,
+ * then error it out here. */
+ if (req->rq_ctx_init || req->rq_ctx_fini ||
+ req->rq_send_state != LUSTRE_IMP_FULL ||
+ imp->imp_obd->obd_no_recov) {
+ DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
+ ptlrpc_import_state_name(req->rq_send_state),
+ ptlrpc_import_state_name(imp->imp_state));
+ spin_lock(&req->rq_lock);
+ req->rq_status = -ETIMEDOUT;
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
+ return 1;
+ }
+
+ /* if a request can't be resent we can't wait for an answer after
+ the timeout */
+ if (ptlrpc_no_resend(req)) {
+ DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
+ rc = 1;
+ }
+
+ ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
+
+ return rc;
+}
+
+/**
+ * Time out all uncompleted requests in request set pointed by \a data
+ * Callback used when waiting on sets with l_wait_event.
+ * Always returns 1.
+ */
+int ptlrpc_expired_set(void *data)
+{
+ struct ptlrpc_request_set *set = data;
+ struct list_head *tmp;
+ time_t now = cfs_time_current_sec();
+
+ LASSERT(set != NULL);
+
+ /*
+ * A timeout expired. See which reqs it applies to...
+ */
+ list_for_each (tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+
+ /* don't expire request waiting for context */
+ if (req->rq_wait_ctx)
+ continue;
+
+ /* Request in-flight? */
+ if (!((req->rq_phase == RQ_PHASE_RPC &&
+ !req->rq_waiting && !req->rq_resend) ||
+ (req->rq_phase == RQ_PHASE_BULK)))
+ continue;
+
+ if (req->rq_timedout || /* already dealt with */
+ req->rq_deadline > now) /* not expired */
+ continue;
+
+ /* Deal with this guy. Do it asynchronously to not block
+ * ptlrpcd thread. */
+ ptlrpc_expire_one_request(req, 1);
+ }
+
+ /*
+ * When waiting for a whole set, we always break out of the
+ * sleep so we can recalculate the timeout, or enable interrupts
+ * if everyone's timed out.
+ */
+ return 1;
+}
+EXPORT_SYMBOL(ptlrpc_expired_set);
+
+/**
+ * Sets rq_intr flag in \a req under spinlock.
+ */
+void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_lock);
+ req->rq_intr = 1;
+ spin_unlock(&req->rq_lock);
+}
+EXPORT_SYMBOL(ptlrpc_mark_interrupted);
+
+/**
+ * Interrupts (sets interrupted flag) all uncompleted requests in
+ * a set \a data. Callback for l_wait_event for interruptible waits.
+ */
+void ptlrpc_interrupted_set(void *data)
+{
+ struct ptlrpc_request_set *set = data;
+ struct list_head *tmp;
+
+ LASSERT(set != NULL);
+ CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
+
+ list_for_each(tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+
+ if (req->rq_phase != RQ_PHASE_RPC &&
+ req->rq_phase != RQ_PHASE_UNREGISTERING)
+ continue;
+
+ ptlrpc_mark_interrupted(req);
+ }
+}
+EXPORT_SYMBOL(ptlrpc_interrupted_set);
+
+/**
+ * Get the smallest timeout in the set; this does NOT set a timeout.
+ */
+int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
+{
+ struct list_head *tmp;
+ time_t now = cfs_time_current_sec();
+ int timeout = 0;
+ struct ptlrpc_request *req;
+ int deadline;
+
+ SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
+
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+
+ /*
+ * Request in-flight?
+ */
+ if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
+ (req->rq_phase == RQ_PHASE_BULK) ||
+ (req->rq_phase == RQ_PHASE_NEW)))
+ continue;
+
+ /*
+ * Already timed out.
+ */
+ if (req->rq_timedout)
+ continue;
+
+ /*
+ * Waiting for ctx.
+ */
+ if (req->rq_wait_ctx)
+ continue;
+
+ if (req->rq_phase == RQ_PHASE_NEW)
+ deadline = req->rq_sent;
+ else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend)
+ deadline = req->rq_sent;
+ else
+ deadline = req->rq_sent + req->rq_timeout;
+
+ if (deadline <= now) /* actually expired already */
+ timeout = 1; /* ASAP */
+ else if (timeout == 0 || timeout > deadline - now)
+ timeout = deadline - now;
+ }
+ return timeout;
+}
+EXPORT_SYMBOL(ptlrpc_set_next_timeout);
+
+/**
+ * Send all unset request from the set and then wait untill all
+ * requests in the set complete (either get a reply, timeout, get an
+ * error or otherwise be interrupted).
+ * Returns 0 on success or error code otherwise.
+ */
+int ptlrpc_set_wait(struct ptlrpc_request_set *set)
+{
+ struct list_head *tmp;
+ struct ptlrpc_request *req;
+ struct l_wait_info lwi;
+ int rc, timeout;
+
+ if (set->set_producer)
+ (void)ptlrpc_set_producer(set);
+ else
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ if (req->rq_phase == RQ_PHASE_NEW)
+ (void)ptlrpc_send_new_req(req);
+ }
+
+ if (list_empty(&set->set_requests))
+ return 0;
+
+ do {
+ timeout = ptlrpc_set_next_timeout(set);
+
+ /* wait until all complete, interrupted, or an in-flight
+ * req times out */
+ CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
+ set, timeout);
+
+ if (timeout == 0 && !cfs_signal_pending())
+ /*
+ * No requests are in-flight (ether timed out
+ * or delayed), so we can allow interrupts.
+ * We still want to block for a limited time,
+ * so we allow interrupts during the timeout.
+ */
+ lwi = LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
+ ptlrpc_expired_set,
+ ptlrpc_interrupted_set, set);
+ else
+ /*
+ * At least one request is in flight, so no
+ * interrupts are allowed. Wait until all
+ * complete, or an in-flight req times out.
+ */
+ lwi = LWI_TIMEOUT(cfs_time_seconds(timeout? timeout : 1),
+ ptlrpc_expired_set, set);
+
+ rc = l_wait_event(set->set_waitq, ptlrpc_check_set(NULL, set), &lwi);
+
+ /* LU-769 - if we ignored the signal because it was already
+ * pending when we started, we need to handle it now or we risk
+ * it being ignored forever */
+ if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
+ cfs_signal_pending()) {
+ sigset_t blocked_sigs =
+ cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ /* In fact we only interrupt for the "fatal" signals
+ * like SIGINT or SIGKILL. We still ignore less
+ * important signals since ptlrpc set is not easily
+ * reentrant from userspace again */
+ if (cfs_signal_pending())
+ ptlrpc_interrupted_set(set);
+ cfs_restore_sigs(blocked_sigs);
+ }
+
+ LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
+
+ /* -EINTR => all requests have been flagged rq_intr so next
+ * check completes.
+ * -ETIMEDOUT => someone timed out. When all reqs have
+ * timed out, signals are enabled allowing completion with
+ * EINTR.
+ * I don't really care if we go once more round the loop in
+ * the error cases -eeb. */
+ if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_set_chain);
+ spin_lock(&req->rq_lock);
+ req->rq_invalid_rqset = 1;
+ spin_unlock(&req->rq_lock);
+ }
+ }
+ } while (rc != 0 || atomic_read(&set->set_remaining) != 0);
+
+ LASSERT(atomic_read(&set->set_remaining) == 0);
+
+ rc = set->set_rc; /* rq_status of already freed requests if any */
+ list_for_each(tmp, &set->set_requests) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
+
+ LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
+ if (req->rq_status != 0)
+ rc = req->rq_status;
+ }
+
+ if (set->set_interpret != NULL) {
+ int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
+ set->set_interpret;
+ rc = interpreter (set, set->set_arg, rc);
+ } else {
+ struct ptlrpc_set_cbdata *cbdata, *n;
+ int err;
+
+ list_for_each_entry_safe(cbdata, n,
+ &set->set_cblist, psc_item) {
+ list_del_init(&cbdata->psc_item);
+ err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
+ if (err && !rc)
+ rc = err;
+ OBD_FREE_PTR(cbdata);
+ }
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_set_wait);
+
+/**
+ * Helper fuction for request freeing.
+ * Called when request count reached zero and request needs to be freed.
+ * Removes request from all sorts of sending/replay lists it might be on,
+ * frees network buffers if any are present.
+ * If \a locked is set, that means caller is already holding import imp_lock
+ * and so we no longer need to reobtain it (for certain lists manipulations)
+ */
+static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
+{
+ if (request == NULL) {
+ return;
+ }
+
+ LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
+ LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
+ LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
+ LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
+ LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
+ LASSERTF(!request->rq_replay, "req %p\n", request);
+
+ req_capsule_fini(&request->rq_pill);
+
+ /* We must take it off the imp_replay_list first. Otherwise, we'll set
+ * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
+ if (request->rq_import != NULL) {
+ if (!locked)
+ spin_lock(&request->rq_import->imp_lock);
+ list_del_init(&request->rq_replay_list);
+ if (!locked)
+ spin_unlock(&request->rq_import->imp_lock);
+ }
+ LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
+
+ if (atomic_read(&request->rq_refcount) != 0) {
+ DEBUG_REQ(D_ERROR, request,
+ "freeing request with nonzero refcount");
+ LBUG();
+ }
+
+ if (request->rq_repbuf != NULL)
+ sptlrpc_cli_free_repbuf(request);
+ if (request->rq_export != NULL) {
+ class_export_put(request->rq_export);
+ request->rq_export = NULL;
+ }
+ if (request->rq_import != NULL) {
+ class_import_put(request->rq_import);
+ request->rq_import = NULL;
+ }
+ if (request->rq_bulk != NULL)
+ ptlrpc_free_bulk_pin(request->rq_bulk);
+
+ if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
+ sptlrpc_cli_free_reqbuf(request);
+
+ if (request->rq_cli_ctx)
+ sptlrpc_req_put_ctx(request, !locked);
+
+ if (request->rq_pool)
+ __ptlrpc_free_req_to_pool(request);
+ else
+ OBD_FREE(request, sizeof(*request));
+}
+
+static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
+/**
+ * Drop one request reference. Must be called with import imp_lock held.
+ * When reference count drops to zero, reuqest is freed.
+ */
+void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
+{
+ LASSERT(spin_is_locked(&request->rq_import->imp_lock));
+ (void)__ptlrpc_req_finished(request, 1);
+}
+EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
+
+/**
+ * Helper function
+ * Drops one reference count for request \a request.
+ * \a locked set indicates that caller holds import imp_lock.
+ * Frees the request whe reference count reaches zero.
+ */
+static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
+{
+ if (request == NULL)
+ return 1;
+
+ if (request == LP_POISON ||
+ request->rq_reqmsg == LP_POISON) {
+ CERROR("dereferencing freed request (bug 575)\n");
+ LBUG();
+ return 1;
+ }
+
+ DEBUG_REQ(D_INFO, request, "refcount now %u",
+ atomic_read(&request->rq_refcount) - 1);
+
+ if (atomic_dec_and_test(&request->rq_refcount)) {
+ __ptlrpc_free_req(request, locked);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Drops one reference count for a request.
+ */
+void ptlrpc_req_finished(struct ptlrpc_request *request)
+{
+ __ptlrpc_req_finished(request, 0);
+}
+EXPORT_SYMBOL(ptlrpc_req_finished);
+
+/**
+ * Returns xid of a \a request
+ */
+__u64 ptlrpc_req_xid(struct ptlrpc_request *request)
+{
+ return request->rq_xid;
+}
+EXPORT_SYMBOL(ptlrpc_req_xid);
+
+/**
+ * Disengage the client's reply buffer from the network
+ * NB does _NOT_ unregister any client-side bulk.
+ * IDEMPOTENT, but _not_ safe against concurrent callers.
+ * The request owner (i.e. the thread doing the I/O) must call...
+ * Returns 0 on success or 1 if unregistering cannot be made.
+ */
+int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
+{
+ int rc;
+ wait_queue_head_t *wq;
+ struct l_wait_info lwi;
+
+ /*
+ * Might sleep.
+ */
+ LASSERT(!in_interrupt());
+
+ /*
+ * Let's setup deadline for reply unlink.
+ */
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
+ async && request->rq_reply_deadline == 0)
+ request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
+
+ /*
+ * Nothing left to do.
+ */
+ if (!ptlrpc_client_recv_or_unlink(request))
+ return 1;
+
+ LNetMDUnlink(request->rq_reply_md_h);
+
+ /*
+ * Let's check it once again.
+ */
+ if (!ptlrpc_client_recv_or_unlink(request))
+ return 1;
+
+ /*
+ * Move to "Unregistering" phase as reply was not unlinked yet.
+ */
+ ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
+
+ /*
+ * Do not wait for unlink to finish.
+ */
+ if (async)
+ return 0;
+
+ /*
+ * We have to l_wait_event() whatever the result, to give liblustre
+ * a chance to run reply_in_callback(), and to make sure we've
+ * unlinked before returning a req to the pool.
+ */
+ if (request->rq_set != NULL)
+ wq = &request->rq_set->set_waitq;
+ else
+ wq = &request->rq_reply_waitq;
+
+ for (;;) {
+ /* Network access will complete in finite time but the HUGE
+ * timeout lets us CWARN for visibility of sluggish NALs */
+ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
+ cfs_time_seconds(1), NULL, NULL);
+ rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request),
+ &lwi);
+ if (rc == 0) {
+ ptlrpc_rqphase_move(request, request->rq_next_phase);
+ return 1;
+ }
+
+ LASSERT(rc == -ETIMEDOUT);
+ DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout "
+ "rvcng=%d unlnk=%d", request->rq_receiving_reply,
+ request->rq_must_unlink);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_unregister_reply);
+
+/**
+ * Iterates through replay_list on import and prunes
+ * all requests have transno smaller than last_committed for the
+ * import and don't have rq_replay set.
+ * Since requests are sorted in transno order, stops when meetign first
+ * transno bigger than last_committed.
+ * caller must hold imp->imp_lock
+ */
+void ptlrpc_free_committed(struct obd_import *imp)
+{
+ struct list_head *tmp, *saved;
+ struct ptlrpc_request *req;
+ struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
+
+ LASSERT(imp != NULL);
+
+ LASSERT(spin_is_locked(&imp->imp_lock));
+
+
+ if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
+ imp->imp_generation == imp->imp_last_generation_checked) {
+ CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
+ imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
+ return;
+ }
+ CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
+ imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
+ imp->imp_generation);
+ imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
+ imp->imp_last_generation_checked = imp->imp_generation;
+
+ list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
+
+ /* XXX ok to remove when 1357 resolved - rread 05/29/03 */
+ LASSERT(req != last_req);
+ last_req = req;
+
+ if (req->rq_transno == 0) {
+ DEBUG_REQ(D_EMERG, req, "zero transno during replay");
+ LBUG();
+ }
+ if (req->rq_import_generation < imp->imp_generation) {
+ DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
+ GOTO(free_req, 0);
+ }
+
+ if (req->rq_replay) {
+ DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
+ continue;
+ }
+
+ /* not yet committed */
+ if (req->rq_transno > imp->imp_peer_committed_transno) {
+ DEBUG_REQ(D_RPCTRACE, req, "stopping search");
+ break;
+ }
+
+ DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
+ imp->imp_peer_committed_transno);
+free_req:
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
+ if (req->rq_commit_cb != NULL)
+ req->rq_commit_cb(req);
+ list_del_init(&req->rq_replay_list);
+ __ptlrpc_req_finished(req, 1);
+ }
+}
+
+void ptlrpc_cleanup_client(struct obd_import *imp)
+{
+}
+EXPORT_SYMBOL(ptlrpc_cleanup_client);
+
+/**
+ * Schedule previously sent request for resend.
+ * For bulk requests we assign new xid (to avoid problems with
+ * lost replies and therefore several transfers landing into same buffer
+ * from different sending attempts).
+ */
+void ptlrpc_resend_req(struct ptlrpc_request *req)
+{
+ DEBUG_REQ(D_HA, req, "going to resend");
+ lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
+ req->rq_status = -EAGAIN;
+
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 1;
+ req->rq_net_err = 0;
+ req->rq_timedout = 0;
+ if (req->rq_bulk) {
+ __u64 old_xid = req->rq_xid;
+
+ /* ensure previous bulk fails */
+ req->rq_xid = ptlrpc_next_xid();
+ CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
+ old_xid, req->rq_xid);
+ }
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
+}
+EXPORT_SYMBOL(ptlrpc_resend_req);
+
+/* XXX: this function and rq_status are currently unused */
+void ptlrpc_restart_req(struct ptlrpc_request *req)
+{
+ DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
+ req->rq_status = -ERESTARTSYS;
+
+ spin_lock(&req->rq_lock);
+ req->rq_restart = 1;
+ req->rq_timedout = 0;
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
+}
+EXPORT_SYMBOL(ptlrpc_restart_req);
+
+/**
+ * Grab additional reference on a request \a req
+ */
+struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
+{
+ atomic_inc(&req->rq_refcount);
+ return req;
+}
+EXPORT_SYMBOL(ptlrpc_request_addref);
+
+/**
+ * Add a request to import replay_list.
+ * Must be called under imp_lock
+ */
+void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
+ struct obd_import *imp)
+{
+ struct list_head *tmp;
+
+ LASSERT(spin_is_locked(&imp->imp_lock));
+
+ if (req->rq_transno == 0) {
+ DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
+ LBUG();
+ }
+
+ /* clear this for new requests that were resent as well
+ as resent replayed requests. */
+ lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
+
+ /* don't re-add requests that have been replayed */
+ if (!list_empty(&req->rq_replay_list))
+ return;
+
+ lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
+
+ LASSERT(imp->imp_replayable);
+ /* Balanced in ptlrpc_free_committed, usually. */
+ ptlrpc_request_addref(req);
+ list_for_each_prev(tmp, &imp->imp_replay_list) {
+ struct ptlrpc_request *iter =
+ list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
+
+ /* We may have duplicate transnos if we create and then
+ * open a file, or for closes retained if to match creating
+ * opens, so use req->rq_xid as a secondary key.
+ * (See bugs 684, 685, and 428.)
+ * XXX no longer needed, but all opens need transnos!
+ */
+ if (iter->rq_transno > req->rq_transno)
+ continue;
+
+ if (iter->rq_transno == req->rq_transno) {
+ LASSERT(iter->rq_xid != req->rq_xid);
+ if (iter->rq_xid > req->rq_xid)
+ continue;
+ }
+
+ list_add(&req->rq_replay_list, &iter->rq_replay_list);
+ return;
+ }
+
+ list_add(&req->rq_replay_list, &imp->imp_replay_list);
+}
+EXPORT_SYMBOL(ptlrpc_retain_replayable_request);
+
+/**
+ * Send request and wait until it completes.
+ * Returns request processing status.
+ */
+int ptlrpc_queue_wait(struct ptlrpc_request *req)
+{
+ struct ptlrpc_request_set *set;
+ int rc;
+
+ LASSERT(req->rq_set == NULL);
+ LASSERT(!req->rq_receiving_reply);
+
+ set = ptlrpc_prep_set();
+ if (set == NULL) {
+ CERROR("Unable to allocate ptlrpc set.");
+ return -ENOMEM;
+ }
+
+ /* for distributed debugging */
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
+
+ /* add a ref for the set (see comment in ptlrpc_set_add_req) */
+ ptlrpc_request_addref(req);
+ ptlrpc_set_add_req(set, req);
+ rc = ptlrpc_set_wait(set);
+ ptlrpc_set_destroy(set);
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_queue_wait);
+
+struct ptlrpc_replay_async_args {
+ int praa_old_state;
+ int praa_old_status;
+};
+
+/**
+ * Callback used for replayed requests reply processing.
+ * In case of succesful reply calls registeresd request replay callback.
+ * In case of error restart replay process.
+ */
+static int ptlrpc_replay_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void * data, int rc)
+{
+ struct ptlrpc_replay_async_args *aa = data;
+ struct obd_import *imp = req->rq_import;
+
+ atomic_dec(&imp->imp_replay_inflight);
+
+ if (!ptlrpc_client_replied(req)) {
+ CERROR("request replay timed out, restarting recovery\n");
+ GOTO(out, rc = -ETIMEDOUT);
+ }
+
+ if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
+ (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
+ lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
+ GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
+
+ /** VBR: check version failure */
+ if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
+ /** replay was failed due to version mismatch */
+ DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
+ spin_lock(&imp->imp_lock);
+ imp->imp_vbr_failed = 1;
+ imp->imp_no_lock_replay = 1;
+ spin_unlock(&imp->imp_lock);
+ lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
+ } else {
+ /** The transno had better not change over replay. */
+ LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
+ lustre_msg_get_transno(req->rq_repmsg) ||
+ lustre_msg_get_transno(req->rq_repmsg) == 0,
+ LPX64"/"LPX64"\n",
+ lustre_msg_get_transno(req->rq_reqmsg),
+ lustre_msg_get_transno(req->rq_repmsg));
+ }
+
+ spin_lock(&imp->imp_lock);
+ /** if replays by version then gap occur on server, no trust to locks */
+ if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
+ imp->imp_no_lock_replay = 1;
+ imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
+ spin_unlock(&imp->imp_lock);
+ LASSERT(imp->imp_last_replay_transno);
+
+ /* transaction number shouldn't be bigger than the latest replayed */
+ if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) {
+ DEBUG_REQ(D_ERROR, req,
+ "Reported transno "LPU64" is bigger than the "
+ "replayed one: "LPU64, req->rq_transno,
+ lustre_msg_get_transno(req->rq_reqmsg));
+ GOTO(out, rc = -EINVAL);
+ }
+
+ DEBUG_REQ(D_HA, req, "got rep");
+
+ /* let the callback do fixups, possibly including in the request */
+ if (req->rq_replay_cb)
+ req->rq_replay_cb(req);
+
+ if (ptlrpc_client_replied(req) &&
+ lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
+ DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
+ lustre_msg_get_status(req->rq_repmsg),
+ aa->praa_old_status);
+ } else {
+ /* Put it back for re-replay. */
+ lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
+ }
+
+ /*
+ * Errors while replay can set transno to 0, but
+ * imp_last_replay_transno shouldn't be set to 0 anyway
+ */
+ if (req->rq_transno == 0)
+ CERROR("Transno is 0 during replay!\n");
+
+ /* continue with recovery */
+ rc = ptlrpc_import_recovery_state_machine(imp);
+ out:
+ req->rq_send_state = aa->praa_old_state;
+
+ if (rc != 0)
+ /* this replay failed, so restart recovery */
+ ptlrpc_connect_import(imp);
+
+ return rc;
+}
+
+/**
+ * Prepares and queues request for replay.
+ * Adds it to ptlrpcd queue for actual sending.
+ * Returns 0 on success.
+ */
+int ptlrpc_replay_req(struct ptlrpc_request *req)
+{
+ struct ptlrpc_replay_async_args *aa;
+
+ LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
+
+ LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
+ aa = ptlrpc_req_async_args(req);
+ memset(aa, 0, sizeof *aa);
+
+ /* Prepare request to be resent with ptlrpcd */
+ aa->praa_old_state = req->rq_send_state;
+ req->rq_send_state = LUSTRE_IMP_REPLAY;
+ req->rq_phase = RQ_PHASE_NEW;
+ req->rq_next_phase = RQ_PHASE_UNDEFINED;
+ if (req->rq_repmsg)
+ aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
+ req->rq_status = 0;
+ req->rq_interpret_reply = ptlrpc_replay_interpret;
+ /* Readjust the timeout for current conditions */
+ ptlrpc_at_set_req_timeout(req);
+
+ /* Tell server the net_latency, so the server can calculate how long
+ * it should wait for next replay */
+ lustre_msg_set_service_time(req->rq_reqmsg,
+ ptlrpc_at_get_net_latency(req));
+ DEBUG_REQ(D_HA, req, "REPLAY");
+
+ atomic_inc(&req->rq_import->imp_replay_inflight);
+ ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
+
+ ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_replay_req);
+
+/**
+ * Aborts all in-flight request on import \a imp sending and delayed lists
+ */
+void ptlrpc_abort_inflight(struct obd_import *imp)
+{
+ struct list_head *tmp, *n;
+
+ /* Make sure that no new requests get processed for this import.
+ * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
+ * this flag and then putting requests on sending_list or delayed_list.
+ */
+ spin_lock(&imp->imp_lock);
+
+ /* XXX locking? Maybe we should remove each request with the list
+ * locked? Also, how do we know if the requests on the list are
+ * being freed at this time?
+ */
+ list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request, rq_list);
+
+ DEBUG_REQ(D_RPCTRACE, req, "inflight");
+
+ spin_lock(&req->rq_lock);
+ if (req->rq_import_generation < imp->imp_generation) {
+ req->rq_err = 1;
+ req->rq_status = -EIO;
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&req->rq_lock);
+ }
+
+ list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+ struct ptlrpc_request *req =
+ list_entry(tmp, struct ptlrpc_request, rq_list);
+
+ DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
+
+ spin_lock(&req->rq_lock);
+ if (req->rq_import_generation < imp->imp_generation) {
+ req->rq_err = 1;
+ req->rq_status = -EIO;
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&req->rq_lock);
+ }
+
+ /* Last chance to free reqs left on the replay list, but we
+ * will still leak reqs that haven't committed. */
+ if (imp->imp_replayable)
+ ptlrpc_free_committed(imp);
+
+ spin_unlock(&imp->imp_lock);
+}
+EXPORT_SYMBOL(ptlrpc_abort_inflight);
+
+/**
+ * Abort all uncompleted requests in request set \a set
+ */
+void ptlrpc_abort_set(struct ptlrpc_request_set *set)
+{
+ struct list_head *tmp, *pos;
+
+ LASSERT(set != NULL);
+
+ list_for_each_safe(pos, tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
+
+ spin_lock(&req->rq_lock);
+ if (req->rq_phase != RQ_PHASE_RPC) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
+
+ req->rq_err = 1;
+ req->rq_status = -EINTR;
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
+ }
+}
+
+static __u64 ptlrpc_last_xid;
+static spinlock_t ptlrpc_last_xid_lock;
+
+/**
+ * Initialize the XID for the node. This is common among all requests on
+ * this node, and only requires the property that it is monotonically
+ * increasing. It does not need to be sequential. Since this is also used
+ * as the RDMA match bits, it is important that a single client NOT have
+ * the same match bits for two different in-flight requests, hence we do
+ * NOT want to have an XID per target or similar.
+ *
+ * To avoid an unlikely collision between match bits after a client reboot
+ * (which would deliver old data into the wrong RDMA buffer) initialize
+ * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
+ * If the time is clearly incorrect, we instead use a 62-bit random number.
+ * In the worst case the random number will overflow 1M RPCs per second in
+ * 9133 years, or permutations thereof.
+ */
+#define YEAR_2004 (1ULL << 30)
+void ptlrpc_init_xid(void)
+{
+ time_t now = cfs_time_current_sec();
+
+ spin_lock_init(&ptlrpc_last_xid_lock);
+ if (now < YEAR_2004) {
+ cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
+ ptlrpc_last_xid >>= 2;
+ ptlrpc_last_xid |= (1ULL << 61);
+ } else {
+ ptlrpc_last_xid = (__u64)now << 20;
+ }
+
+ /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
+ CLASSERT((PTLRPC_BULK_OPS_COUNT & (PTLRPC_BULK_OPS_COUNT - 1)) == 0);
+ ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK;
+}
+
+/**
+ * Increase xid and returns resulting new value to the caller.
+ *
+ * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
+ * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
+ * itself uses the last bulk xid needed, so the server can determine the
+ * the number of bulk transfers from the RPC XID and a bitmask. The starting
+ * xid must align to a power-of-two value.
+ *
+ * This is assumed to be true due to the initial ptlrpc_last_xid
+ * value also being initialized to a power-of-two value. LU-1431
+ */
+__u64 ptlrpc_next_xid(void)
+{
+ __u64 next;
+
+ spin_lock(&ptlrpc_last_xid_lock);
+ next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
+ ptlrpc_last_xid = next;
+ spin_unlock(&ptlrpc_last_xid_lock);
+
+ return next;
+}
+EXPORT_SYMBOL(ptlrpc_next_xid);
+
+/**
+ * Get a glimpse at what next xid value might have been.
+ * Returns possible next xid.
+ */
+__u64 ptlrpc_sample_next_xid(void)
+{
+#if BITS_PER_LONG == 32
+ /* need to avoid possible word tearing on 32-bit systems */
+ __u64 next;
+
+ spin_lock(&ptlrpc_last_xid_lock);
+ next = ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
+ spin_unlock(&ptlrpc_last_xid_lock);
+
+ return next;
+#else
+ /* No need to lock, since returned value is racy anyways */
+ return ptlrpc_last_xid + PTLRPC_BULK_OPS_COUNT;
+#endif
+}
+EXPORT_SYMBOL(ptlrpc_sample_next_xid);
+
+/**
+ * Functions for operating ptlrpc workers.
+ *
+ * A ptlrpc work is a function which will be running inside ptlrpc context.
+ * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
+ *
+ * 1. after a work is created, it can be used many times, that is:
+ * handler = ptlrpcd_alloc_work();
+ * ptlrpcd_queue_work();
+ *
+ * queue it again when necessary:
+ * ptlrpcd_queue_work();
+ * ptlrpcd_destroy_work();
+ * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
+ * it will only be queued once in any time. Also as its name implies, it may
+ * have delay before it really runs by ptlrpcd thread.
+ */
+struct ptlrpc_work_async_args {
+ __u64 magic;
+ int (*cb)(const struct lu_env *, void *);
+ void *cbdata;
+};
+
+#define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */
+
+static int work_interpreter(const struct lu_env *env,
+ struct ptlrpc_request *req, void *data, int rc)
+{
+ struct ptlrpc_work_async_args *arg = data;
+
+ LASSERT(arg->magic == PTLRPC_WORK_MAGIC);
+ LASSERT(arg->cb != NULL);
+
+ return arg->cb(env, arg->cbdata);
+}
+
+/**
+ * Create a work for ptlrpc.
+ */
+void *ptlrpcd_alloc_work(struct obd_import *imp,
+ int (*cb)(const struct lu_env *, void *), void *cbdata)
+{
+ struct ptlrpc_request *req = NULL;
+ struct ptlrpc_work_async_args *args;
+
+ might_sleep();
+
+ if (cb == NULL)
+ return ERR_PTR(-EINVAL);
+
+ /* copy some code from deprecated fakereq. */
+ OBD_ALLOC_PTR(req);
+ if (req == NULL) {
+ CERROR("ptlrpc: run out of memory!\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req->rq_send_state = LUSTRE_IMP_FULL;
+ req->rq_type = PTL_RPC_MSG_REQUEST;
+ req->rq_import = class_import_get(imp);
+ req->rq_export = NULL;
+ req->rq_interpret_reply = work_interpreter;
+ /* don't want reply */
+ req->rq_receiving_reply = 0;
+ req->rq_must_unlink = 0;
+ req->rq_no_delay = req->rq_no_resend = 1;
+
+ spin_lock_init(&req->rq_lock);
+ INIT_LIST_HEAD(&req->rq_list);
+ INIT_LIST_HEAD(&req->rq_replay_list);
+ INIT_LIST_HEAD(&req->rq_set_chain);
+ INIT_LIST_HEAD(&req->rq_history_list);
+ INIT_LIST_HEAD(&req->rq_exp_list);
+ init_waitqueue_head(&req->rq_reply_waitq);
+ init_waitqueue_head(&req->rq_set_waitq);
+ atomic_set(&req->rq_refcount, 1);
+
+ CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
+ args = ptlrpc_req_async_args(req);
+ args->magic = PTLRPC_WORK_MAGIC;
+ args->cb = cb;
+ args->cbdata = cbdata;
+
+ return req;
+}
+EXPORT_SYMBOL(ptlrpcd_alloc_work);
+
+void ptlrpcd_destroy_work(void *handler)
+{
+ struct ptlrpc_request *req = handler;
+
+ if (req)
+ ptlrpc_req_finished(req);
+}
+EXPORT_SYMBOL(ptlrpcd_destroy_work);
+
+int ptlrpcd_queue_work(void *handler)
+{
+ struct ptlrpc_request *req = handler;
+
+ /*
+ * Check if the req is already being queued.
+ *
+ * Here comes a trick: it lacks a way of checking if a req is being
+ * processed reliably in ptlrpc. Here I have to use refcount of req
+ * for this purpose. This is okay because the caller should use this
+ * req as opaque data. - Jinshan
+ */
+ LASSERT(atomic_read(&req->rq_refcount) > 0);
+ if (atomic_read(&req->rq_refcount) > 1)
+ return -EBUSY;
+
+ if (atomic_inc_return(&req->rq_refcount) > 2) { /* race */
+ atomic_dec(&req->rq_refcount);
+ return -EBUSY;
+ }
+
+ /* re-initialize the req */
+ req->rq_timeout = obd_timeout;
+ req->rq_sent = cfs_time_current_sec();
+ req->rq_deadline = req->rq_sent + req->rq_timeout;
+ req->rq_reply_deadline = req->rq_deadline;
+ req->rq_phase = RQ_PHASE_INTERPRET;
+ req->rq_next_phase = RQ_PHASE_COMPLETE;
+ req->rq_xid = ptlrpc_next_xid();
+ req->rq_import_generation = req->rq_import->imp_generation;
+
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpcd_queue_work);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
new file mode 100644
index 0000000..17ca842
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -0,0 +1,240 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+
+#include "ptlrpc_internal.h"
+
+static cfs_hash_t *conn_hash = NULL;
+static cfs_hash_ops_t conn_hash_ops;
+
+struct ptlrpc_connection *
+ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
+ struct obd_uuid *uuid)
+{
+ struct ptlrpc_connection *conn, *conn2;
+
+ conn = cfs_hash_lookup(conn_hash, &peer);
+ if (conn)
+ GOTO(out, conn);
+
+ OBD_ALLOC_PTR(conn);
+ if (!conn)
+ return NULL;
+
+ conn->c_peer = peer;
+ conn->c_self = self;
+ INIT_HLIST_NODE(&conn->c_hash);
+ atomic_set(&conn->c_refcount, 1);
+ if (uuid)
+ obd_str2uuid(&conn->c_remote_uuid, uuid->uuid);
+
+ /*
+ * Add the newly created conn to the hash, on key collision we
+ * lost a racing addition and must destroy our newly allocated
+ * connection. The object which exists in the has will be
+ * returned and may be compared against out object.
+ */
+ /* In the function below, .hs_keycmp resolves to
+ * conn_keycmp() */
+ /* coverity[overrun-buffer-val] */
+ conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
+ if (conn != conn2) {
+ OBD_FREE_PTR(conn);
+ conn = conn2;
+ }
+out:
+ CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
+ conn, atomic_read(&conn->c_refcount),
+ libcfs_nid2str(conn->c_peer.nid));
+ return conn;
+}
+EXPORT_SYMBOL(ptlrpc_connection_get);
+
+int ptlrpc_connection_put(struct ptlrpc_connection *conn)
+{
+ int rc = 0;
+
+ if (!conn)
+ return rc;
+
+ LASSERT(atomic_read(&conn->c_refcount) > 1);
+
+ /*
+ * We do not remove connection from hashtable and
+ * do not free it even if last caller released ref,
+ * as we want to have it cached for the case it is
+ * needed again.
+ *
+ * Deallocating it and later creating new connection
+ * again would be wastful. This way we also avoid
+ * expensive locking to protect things from get/put
+ * race when found cached connection is freed by
+ * ptlrpc_connection_put().
+ *
+ * It will be freed later in module unload time,
+ * when ptlrpc_connection_fini()->lh_exit->conn_exit()
+ * path is called.
+ */
+ if (atomic_dec_return(&conn->c_refcount) == 1)
+ rc = 1;
+
+ CDEBUG(D_INFO, "PUT conn=%p refcount %d to %s\n",
+ conn, atomic_read(&conn->c_refcount),
+ libcfs_nid2str(conn->c_peer.nid));
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_connection_put);
+
+struct ptlrpc_connection *
+ptlrpc_connection_addref(struct ptlrpc_connection *conn)
+{
+ atomic_inc(&conn->c_refcount);
+ CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
+ conn, atomic_read(&conn->c_refcount),
+ libcfs_nid2str(conn->c_peer.nid));
+
+ return conn;
+}
+EXPORT_SYMBOL(ptlrpc_connection_addref);
+
+int ptlrpc_connection_init(void)
+{
+ conn_hash = cfs_hash_create("CONN_HASH",
+ HASH_CONN_CUR_BITS,
+ HASH_CONN_MAX_BITS,
+ HASH_CONN_BKT_BITS, 0,
+ CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &conn_hash_ops, CFS_HASH_DEFAULT);
+ if (!conn_hash)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_connection_init);
+
+void ptlrpc_connection_fini(void)
+{
+ cfs_hash_putref(conn_hash);
+}
+EXPORT_SYMBOL(ptlrpc_connection_fini);
+
+/*
+ * Hash operations for net_peer<->connection
+ */
+static unsigned
+conn_hashfn(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+ return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
+}
+
+static int
+conn_keycmp(const void *key, struct hlist_node *hnode)
+{
+ struct ptlrpc_connection *conn;
+ const lnet_process_id_t *conn_key;
+
+ LASSERT(key != NULL);
+ conn_key = (lnet_process_id_t*)key;
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+
+ return conn_key->nid == conn->c_peer.nid &&
+ conn_key->pid == conn->c_peer.pid;
+}
+
+static void *
+conn_key(struct hlist_node *hnode)
+{
+ struct ptlrpc_connection *conn;
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ return &conn->c_peer;
+}
+
+static void *
+conn_object(struct hlist_node *hnode)
+{
+ return hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+}
+
+static void
+conn_get(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ptlrpc_connection *conn;
+
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ atomic_inc(&conn->c_refcount);
+}
+
+static void
+conn_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ptlrpc_connection *conn;
+
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ atomic_dec(&conn->c_refcount);
+}
+
+static void
+conn_exit(cfs_hash_t *hs, struct hlist_node *hnode)
+{
+ struct ptlrpc_connection *conn;
+
+ conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
+ /*
+ * Nothing should be left. Connection user put it and
+ * connection also was deleted from table by this time
+ * so we should have 0 refs.
+ */
+ LASSERTF(atomic_read(&conn->c_refcount) == 0,
+ "Busy connection with %d refs\n",
+ atomic_read(&conn->c_refcount));
+ OBD_FREE_PTR(conn);
+}
+
+static cfs_hash_ops_t conn_hash_ops = {
+ .hs_hash = conn_hashfn,
+ .hs_keycmp = conn_keycmp,
+ .hs_key = conn_key,
+ .hs_object = conn_object,
+ .hs_get = conn_get,
+ .hs_put_locked = conn_put_locked,
+ .hs_exit = conn_exit,
+};
diff --git a/drivers/staging/lustre/lustre/ptlrpc/errno.c b/drivers/staging/lustre/lustre/ptlrpc/errno.c
new file mode 100644
index 0000000..1c10063
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/errno.c
@@ -0,0 +1,380 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.txt
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ */
+
+#include <linux/libcfs/libcfs.h>
+#include <lustre/lustre_errno.h>
+
+/*
+ * The two translation tables below must define a one-to-one mapping between
+ * host and network errnos.
+ *
+ * EWOULDBLOCK is equal to EAGAIN on all architectures except for parisc, which
+ * appears irrelevant. Thus, existing references to EWOULDBLOCK are fine.
+ *
+ * EDEADLOCK is equal to EDEADLK on x86 but not on sparc, at least. A sparc
+ * host has no context-free way to determine if a LUSTRE_EDEADLK represents an
+ * EDEADLK or an EDEADLOCK. Therefore, all existing references to EDEADLOCK
+ * that need to be transferred on wire have been replaced with EDEADLK.
+ */
+static int lustre_errno_hton_mapping[] = {
+ [EPERM] = LUSTRE_EPERM,
+ [ENOENT] = LUSTRE_ENOENT,
+ [ESRCH] = LUSTRE_ESRCH,
+ [EINTR] = LUSTRE_EINTR,
+ [EIO] = LUSTRE_EIO,
+ [ENXIO] = LUSTRE_ENXIO,
+ [E2BIG] = LUSTRE_E2BIG,
+ [ENOEXEC] = LUSTRE_ENOEXEC,
+ [EBADF] = LUSTRE_EBADF,
+ [ECHILD] = LUSTRE_ECHILD,
+ [EAGAIN] = LUSTRE_EAGAIN,
+ [ENOMEM] = LUSTRE_ENOMEM,
+ [EACCES] = LUSTRE_EACCES,
+ [EFAULT] = LUSTRE_EFAULT,
+ [ENOTBLK] = LUSTRE_ENOTBLK,
+ [EBUSY] = LUSTRE_EBUSY,
+ [EEXIST] = LUSTRE_EEXIST,
+ [EXDEV] = LUSTRE_EXDEV,
+ [ENODEV] = LUSTRE_ENODEV,
+ [ENOTDIR] = LUSTRE_ENOTDIR,
+ [EISDIR] = LUSTRE_EISDIR,
+ [EINVAL] = LUSTRE_EINVAL,
+ [ENFILE] = LUSTRE_ENFILE,
+ [EMFILE] = LUSTRE_EMFILE,
+ [ENOTTY] = LUSTRE_ENOTTY,
+ [ETXTBSY] = LUSTRE_ETXTBSY,
+ [EFBIG] = LUSTRE_EFBIG,
+ [ENOSPC] = LUSTRE_ENOSPC,
+ [ESPIPE] = LUSTRE_ESPIPE,
+ [EROFS] = LUSTRE_EROFS,
+ [EMLINK] = LUSTRE_EMLINK,
+ [EPIPE] = LUSTRE_EPIPE,
+ [EDOM] = LUSTRE_EDOM,
+ [ERANGE] = LUSTRE_ERANGE,
+ [EDEADLK] = LUSTRE_EDEADLK,
+ [ENAMETOOLONG] = LUSTRE_ENAMETOOLONG,
+ [ENOLCK] = LUSTRE_ENOLCK,
+ [ENOSYS] = LUSTRE_ENOSYS,
+ [ENOTEMPTY] = LUSTRE_ENOTEMPTY,
+ [ELOOP] = LUSTRE_ELOOP,
+ [ENOMSG] = LUSTRE_ENOMSG,
+ [EIDRM] = LUSTRE_EIDRM,
+ [ECHRNG] = LUSTRE_ECHRNG,
+ [EL2NSYNC] = LUSTRE_EL2NSYNC,
+ [EL3HLT] = LUSTRE_EL3HLT,
+ [EL3RST] = LUSTRE_EL3RST,
+ [ELNRNG] = LUSTRE_ELNRNG,
+ [EUNATCH] = LUSTRE_EUNATCH,
+ [ENOCSI] = LUSTRE_ENOCSI,
+ [EL2HLT] = LUSTRE_EL2HLT,
+ [EBADE] = LUSTRE_EBADE,
+ [EBADR] = LUSTRE_EBADR,
+ [EXFULL] = LUSTRE_EXFULL,
+ [ENOANO] = LUSTRE_ENOANO,
+ [EBADRQC] = LUSTRE_EBADRQC,
+ [EBADSLT] = LUSTRE_EBADSLT,
+ [EBFONT] = LUSTRE_EBFONT,
+ [ENOSTR] = LUSTRE_ENOSTR,
+ [ENODATA] = LUSTRE_ENODATA,
+ [ETIME] = LUSTRE_ETIME,
+ [ENOSR] = LUSTRE_ENOSR,
+ [ENONET] = LUSTRE_ENONET,
+ [ENOPKG] = LUSTRE_ENOPKG,
+ [EREMOTE] = LUSTRE_EREMOTE,
+ [ENOLINK] = LUSTRE_ENOLINK,
+ [EADV] = LUSTRE_EADV,
+ [ESRMNT] = LUSTRE_ESRMNT,
+ [ECOMM] = LUSTRE_ECOMM,
+ [EPROTO] = LUSTRE_EPROTO,
+ [EMULTIHOP] = LUSTRE_EMULTIHOP,
+ [EDOTDOT] = LUSTRE_EDOTDOT,
+ [EBADMSG] = LUSTRE_EBADMSG,
+ [EOVERFLOW] = LUSTRE_EOVERFLOW,
+ [ENOTUNIQ] = LUSTRE_ENOTUNIQ,
+ [EBADFD] = LUSTRE_EBADFD,
+ [EREMCHG] = LUSTRE_EREMCHG,
+ [ELIBACC] = LUSTRE_ELIBACC,
+ [ELIBBAD] = LUSTRE_ELIBBAD,
+ [ELIBSCN] = LUSTRE_ELIBSCN,
+ [ELIBMAX] = LUSTRE_ELIBMAX,
+ [ELIBEXEC] = LUSTRE_ELIBEXEC,
+ [EILSEQ] = LUSTRE_EILSEQ,
+ [ERESTART] = LUSTRE_ERESTART,
+ [ESTRPIPE] = LUSTRE_ESTRPIPE,
+ [EUSERS] = LUSTRE_EUSERS,
+ [ENOTSOCK] = LUSTRE_ENOTSOCK,
+ [EDESTADDRREQ] = LUSTRE_EDESTADDRREQ,
+ [EMSGSIZE] = LUSTRE_EMSGSIZE,
+ [EPROTOTYPE] = LUSTRE_EPROTOTYPE,
+ [ENOPROTOOPT] = LUSTRE_ENOPROTOOPT,
+ [EPROTONOSUPPORT] = LUSTRE_EPROTONOSUPPORT,
+ [ESOCKTNOSUPPORT] = LUSTRE_ESOCKTNOSUPPORT,
+ [EOPNOTSUPP] = LUSTRE_EOPNOTSUPP,
+ [EPFNOSUPPORT] = LUSTRE_EPFNOSUPPORT,
+ [EAFNOSUPPORT] = LUSTRE_EAFNOSUPPORT,
+ [EADDRINUSE] = LUSTRE_EADDRINUSE,
+ [EADDRNOTAVAIL] = LUSTRE_EADDRNOTAVAIL,
+ [ENETDOWN] = LUSTRE_ENETDOWN,
+ [ENETUNREACH] = LUSTRE_ENETUNREACH,
+ [ENETRESET] = LUSTRE_ENETRESET,
+ [ECONNABORTED] = LUSTRE_ECONNABORTED,
+ [ECONNRESET] = LUSTRE_ECONNRESET,
+ [ENOBUFS] = LUSTRE_ENOBUFS,
+ [EISCONN] = LUSTRE_EISCONN,
+ [ENOTCONN] = LUSTRE_ENOTCONN,
+ [ESHUTDOWN] = LUSTRE_ESHUTDOWN,
+ [ETOOMANYREFS] = LUSTRE_ETOOMANYREFS,
+ [ETIMEDOUT] = LUSTRE_ETIMEDOUT,
+ [ECONNREFUSED] = LUSTRE_ECONNREFUSED,
+ [EHOSTDOWN] = LUSTRE_EHOSTDOWN,
+ [EHOSTUNREACH] = LUSTRE_EHOSTUNREACH,
+ [EALREADY] = LUSTRE_EALREADY,
+ [EINPROGRESS] = LUSTRE_EINPROGRESS,
+ [ESTALE] = LUSTRE_ESTALE,
+ [EUCLEAN] = LUSTRE_EUCLEAN,
+ [ENOTNAM] = LUSTRE_ENOTNAM,
+ [ENAVAIL] = LUSTRE_ENAVAIL,
+ [EISNAM] = LUSTRE_EISNAM,
+ [EREMOTEIO] = LUSTRE_EREMOTEIO,
+ [EDQUOT] = LUSTRE_EDQUOT,
+ [ENOMEDIUM] = LUSTRE_ENOMEDIUM,
+ [EMEDIUMTYPE] = LUSTRE_EMEDIUMTYPE,
+ [ECANCELED] = LUSTRE_ECANCELED,
+ [ENOKEY] = LUSTRE_ENOKEY,
+ [EKEYEXPIRED] = LUSTRE_EKEYEXPIRED,
+ [EKEYREVOKED] = LUSTRE_EKEYREVOKED,
+ [EKEYREJECTED] = LUSTRE_EKEYREJECTED,
+ [EOWNERDEAD] = LUSTRE_EOWNERDEAD,
+ [ENOTRECOVERABLE] = LUSTRE_ENOTRECOVERABLE,
+ [ERESTARTSYS] = LUSTRE_ERESTARTSYS,
+ [ERESTARTNOINTR] = LUSTRE_ERESTARTNOINTR,
+ [ERESTARTNOHAND] = LUSTRE_ERESTARTNOHAND,
+ [ENOIOCTLCMD] = LUSTRE_ENOIOCTLCMD,
+ [ERESTART_RESTARTBLOCK] = LUSTRE_ERESTART_RESTARTBLOCK,
+ [EBADHANDLE] = LUSTRE_EBADHANDLE,
+ [ENOTSYNC] = LUSTRE_ENOTSYNC,
+ [EBADCOOKIE] = LUSTRE_EBADCOOKIE,
+ [ENOTSUPP] = LUSTRE_ENOTSUPP,
+ [ETOOSMALL] = LUSTRE_ETOOSMALL,
+ [ESERVERFAULT] = LUSTRE_ESERVERFAULT,
+ [EBADTYPE] = LUSTRE_EBADTYPE,
+ [EJUKEBOX] = LUSTRE_EJUKEBOX,
+ [EIOCBQUEUED] = LUSTRE_EIOCBQUEUED,
+};
+
+static int lustre_errno_ntoh_mapping[] = {
+ [LUSTRE_EPERM] = EPERM,
+ [LUSTRE_ENOENT] = ENOENT,
+ [LUSTRE_ESRCH] = ESRCH,
+ [LUSTRE_EINTR] = EINTR,
+ [LUSTRE_EIO] = EIO,
+ [LUSTRE_ENXIO] = ENXIO,
+ [LUSTRE_E2BIG] = E2BIG,
+ [LUSTRE_ENOEXEC] = ENOEXEC,
+ [LUSTRE_EBADF] = EBADF,
+ [LUSTRE_ECHILD] = ECHILD,
+ [LUSTRE_EAGAIN] = EAGAIN,
+ [LUSTRE_ENOMEM] = ENOMEM,
+ [LUSTRE_EACCES] = EACCES,
+ [LUSTRE_EFAULT] = EFAULT,
+ [LUSTRE_ENOTBLK] = ENOTBLK,
+ [LUSTRE_EBUSY] = EBUSY,
+ [LUSTRE_EEXIST] = EEXIST,
+ [LUSTRE_EXDEV] = EXDEV,
+ [LUSTRE_ENODEV] = ENODEV,
+ [LUSTRE_ENOTDIR] = ENOTDIR,
+ [LUSTRE_EISDIR] = EISDIR,
+ [LUSTRE_EINVAL] = EINVAL,
+ [LUSTRE_ENFILE] = ENFILE,
+ [LUSTRE_EMFILE] = EMFILE,
+ [LUSTRE_ENOTTY] = ENOTTY,
+ [LUSTRE_ETXTBSY] = ETXTBSY,
+ [LUSTRE_EFBIG] = EFBIG,
+ [LUSTRE_ENOSPC] = ENOSPC,
+ [LUSTRE_ESPIPE] = ESPIPE,
+ [LUSTRE_EROFS] = EROFS,
+ [LUSTRE_EMLINK] = EMLINK,
+ [LUSTRE_EPIPE] = EPIPE,
+ [LUSTRE_EDOM] = EDOM,
+ [LUSTRE_ERANGE] = ERANGE,
+ [LUSTRE_EDEADLK] = EDEADLK,
+ [LUSTRE_ENAMETOOLONG] = ENAMETOOLONG,
+ [LUSTRE_ENOLCK] = ENOLCK,
+ [LUSTRE_ENOSYS] = ENOSYS,
+ [LUSTRE_ENOTEMPTY] = ENOTEMPTY,
+ [LUSTRE_ELOOP] = ELOOP,
+ [LUSTRE_ENOMSG] = ENOMSG,
+ [LUSTRE_EIDRM] = EIDRM,
+ [LUSTRE_ECHRNG] = ECHRNG,
+ [LUSTRE_EL2NSYNC] = EL2NSYNC,
+ [LUSTRE_EL3HLT] = EL3HLT,
+ [LUSTRE_EL3RST] = EL3RST,
+ [LUSTRE_ELNRNG] = ELNRNG,
+ [LUSTRE_EUNATCH] = EUNATCH,
+ [LUSTRE_ENOCSI] = ENOCSI,
+ [LUSTRE_EL2HLT] = EL2HLT,
+ [LUSTRE_EBADE] = EBADE,
+ [LUSTRE_EBADR] = EBADR,
+ [LUSTRE_EXFULL] = EXFULL,
+ [LUSTRE_ENOANO] = ENOANO,
+ [LUSTRE_EBADRQC] = EBADRQC,
+ [LUSTRE_EBADSLT] = EBADSLT,
+ [LUSTRE_EBFONT] = EBFONT,
+ [LUSTRE_ENOSTR] = ENOSTR,
+ [LUSTRE_ENODATA] = ENODATA,
+ [LUSTRE_ETIME] = ETIME,
+ [LUSTRE_ENOSR] = ENOSR,
+ [LUSTRE_ENONET] = ENONET,
+ [LUSTRE_ENOPKG] = ENOPKG,
+ [LUSTRE_EREMOTE] = EREMOTE,
+ [LUSTRE_ENOLINK] = ENOLINK,
+ [LUSTRE_EADV] = EADV,
+ [LUSTRE_ESRMNT] = ESRMNT,
+ [LUSTRE_ECOMM] = ECOMM,
+ [LUSTRE_EPROTO] = EPROTO,
+ [LUSTRE_EMULTIHOP] = EMULTIHOP,
+ [LUSTRE_EDOTDOT] = EDOTDOT,
+ [LUSTRE_EBADMSG] = EBADMSG,
+ [LUSTRE_EOVERFLOW] = EOVERFLOW,
+ [LUSTRE_ENOTUNIQ] = ENOTUNIQ,
+ [LUSTRE_EBADFD] = EBADFD,
+ [LUSTRE_EREMCHG] = EREMCHG,
+ [LUSTRE_ELIBACC] = ELIBACC,
+ [LUSTRE_ELIBBAD] = ELIBBAD,
+ [LUSTRE_ELIBSCN] = ELIBSCN,
+ [LUSTRE_ELIBMAX] = ELIBMAX,
+ [LUSTRE_ELIBEXEC] = ELIBEXEC,
+ [LUSTRE_EILSEQ] = EILSEQ,
+ [LUSTRE_ERESTART] = ERESTART,
+ [LUSTRE_ESTRPIPE] = ESTRPIPE,
+ [LUSTRE_EUSERS] = EUSERS,
+ [LUSTRE_ENOTSOCK] = ENOTSOCK,
+ [LUSTRE_EDESTADDRREQ] = EDESTADDRREQ,
+ [LUSTRE_EMSGSIZE] = EMSGSIZE,
+ [LUSTRE_EPROTOTYPE] = EPROTOTYPE,
+ [LUSTRE_ENOPROTOOPT] = ENOPROTOOPT,
+ [LUSTRE_EPROTONOSUPPORT] = EPROTONOSUPPORT,
+ [LUSTRE_ESOCKTNOSUPPORT] = ESOCKTNOSUPPORT,
+ [LUSTRE_EOPNOTSUPP] = EOPNOTSUPP,
+ [LUSTRE_EPFNOSUPPORT] = EPFNOSUPPORT,
+ [LUSTRE_EAFNOSUPPORT] = EAFNOSUPPORT,
+ [LUSTRE_EADDRINUSE] = EADDRINUSE,
+ [LUSTRE_EADDRNOTAVAIL] = EADDRNOTAVAIL,
+ [LUSTRE_ENETDOWN] = ENETDOWN,
+ [LUSTRE_ENETUNREACH] = ENETUNREACH,
+ [LUSTRE_ENETRESET] = ENETRESET,
+ [LUSTRE_ECONNABORTED] = ECONNABORTED,
+ [LUSTRE_ECONNRESET] = ECONNRESET,
+ [LUSTRE_ENOBUFS] = ENOBUFS,
+ [LUSTRE_EISCONN] = EISCONN,
+ [LUSTRE_ENOTCONN] = ENOTCONN,
+ [LUSTRE_ESHUTDOWN] = ESHUTDOWN,
+ [LUSTRE_ETOOMANYREFS] = ETOOMANYREFS,
+ [LUSTRE_ETIMEDOUT] = ETIMEDOUT,
+ [LUSTRE_ECONNREFUSED] = ECONNREFUSED,
+ [LUSTRE_EHOSTDOWN] = EHOSTDOWN,
+ [LUSTRE_EHOSTUNREACH] = EHOSTUNREACH,
+ [LUSTRE_EALREADY] = EALREADY,
+ [LUSTRE_EINPROGRESS] = EINPROGRESS,
+ [LUSTRE_ESTALE] = ESTALE,
+ [LUSTRE_EUCLEAN] = EUCLEAN,
+ [LUSTRE_ENOTNAM] = ENOTNAM,
+ [LUSTRE_ENAVAIL] = ENAVAIL,
+ [LUSTRE_EISNAM] = EISNAM,
+ [LUSTRE_EREMOTEIO] = EREMOTEIO,
+ [LUSTRE_EDQUOT] = EDQUOT,
+ [LUSTRE_ENOMEDIUM] = ENOMEDIUM,
+ [LUSTRE_EMEDIUMTYPE] = EMEDIUMTYPE,
+ [LUSTRE_ECANCELED] = ECANCELED,
+ [LUSTRE_ENOKEY] = ENOKEY,
+ [LUSTRE_EKEYEXPIRED] = EKEYEXPIRED,
+ [LUSTRE_EKEYREVOKED] = EKEYREVOKED,
+ [LUSTRE_EKEYREJECTED] = EKEYREJECTED,
+ [LUSTRE_EOWNERDEAD] = EOWNERDEAD,
+ [LUSTRE_ENOTRECOVERABLE] = ENOTRECOVERABLE,
+ [LUSTRE_ERESTARTSYS] = ERESTARTSYS,
+ [LUSTRE_ERESTARTNOINTR] = ERESTARTNOINTR,
+ [LUSTRE_ERESTARTNOHAND] = ERESTARTNOHAND,
+ [LUSTRE_ENOIOCTLCMD] = ENOIOCTLCMD,
+ [LUSTRE_ERESTART_RESTARTBLOCK] = ERESTART_RESTARTBLOCK,
+ [LUSTRE_EBADHANDLE] = EBADHANDLE,
+ [LUSTRE_ENOTSYNC] = ENOTSYNC,
+ [LUSTRE_EBADCOOKIE] = EBADCOOKIE,
+ [LUSTRE_ENOTSUPP] = ENOTSUPP,
+ [LUSTRE_ETOOSMALL] = ETOOSMALL,
+ [LUSTRE_ESERVERFAULT] = ESERVERFAULT,
+ [LUSTRE_EBADTYPE] = EBADTYPE,
+ [LUSTRE_EJUKEBOX] = EJUKEBOX,
+ [LUSTRE_EIOCBQUEUED] = EIOCBQUEUED,
+};
+
+unsigned int lustre_errno_hton(unsigned int h)
+{
+ unsigned int n;
+
+ if (h == 0) {
+ n = 0;
+ } else if (h < ARRAY_SIZE(lustre_errno_hton_mapping)) {
+ n = lustre_errno_hton_mapping[h];
+ if (n == 0)
+ goto generic;
+ } else {
+generic:
+ /*
+ * A generic errno is better than the unknown one that could
+ * mean anything to a different host.
+ */
+ n = LUSTRE_EIO;
+ }
+
+ return n;
+}
+EXPORT_SYMBOL(lustre_errno_hton);
+
+unsigned int lustre_errno_ntoh(unsigned int n)
+{
+ unsigned int h;
+
+ if (n == 0) {
+ h = 0;
+ } else if (n < ARRAY_SIZE(lustre_errno_ntoh_mapping)) {
+ h = lustre_errno_ntoh_mapping[n];
+ if (h == 0)
+ goto generic;
+ } else {
+generic:
+ /*
+ * Similar to the situation in lustre_errno_hton(), an unknown
+ * network errno could coincide with anything. Hence, it is
+ * better to return a generic errno.
+ */
+ h = EIO;
+ }
+
+ return h;
+}
+EXPORT_SYMBOL(lustre_errno_ntoh);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
new file mode 100644
index 0000000..58d089c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -0,0 +1,583 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+# include <linux/libcfs/libcfs.h>
+# ifdef __mips64__
+# include <linux/kernel.h>
+# endif
+
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lustre_sec.h>
+#include "ptlrpc_internal.h"
+
+lnet_handle_eq_t ptlrpc_eq_h;
+
+/*
+ * Client's outgoing request callback
+ */
+void request_out_callback(lnet_event_t *ev)
+{
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_request *req = cbid->cbid_arg;
+
+ LASSERT (ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT (ev->unlinked);
+
+ DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
+
+ sptlrpc_request_out_callback(req);
+ req->rq_real_sent = cfs_time_current_sec();
+
+ if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
+
+ /* Failed send: make it seem like the reply timed out, just
+ * like failing sends in client.c does currently... */
+
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
+
+ ptlrpc_client_wake_req(req);
+ }
+
+ ptlrpc_req_finished(req);
+}
+
+/*
+ * Client's incoming reply callback
+ */
+void reply_in_callback(lnet_event_t *ev)
+{
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_request *req = cbid->cbid_arg;
+
+ DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
+
+ LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
+ LASSERT (ev->md.start == req->rq_repbuf);
+ LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
+ /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
+ for adaptive timeouts' early reply. */
+ LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
+
+ spin_lock(&req->rq_lock);
+
+ req->rq_receiving_reply = 0;
+ req->rq_early = 0;
+ if (ev->unlinked)
+ req->rq_must_unlink = 0;
+
+ if (ev->status)
+ goto out_wake;
+
+ if (ev->type == LNET_EVENT_UNLINK) {
+ LASSERT(ev->unlinked);
+ DEBUG_REQ(D_NET, req, "unlink");
+ goto out_wake;
+ }
+
+ if (ev->mlength < ev->rlength ) {
+ CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
+ req->rq_replen, ev->rlength, ev->offset);
+ req->rq_reply_truncate = 1;
+ req->rq_replied = 1;
+ req->rq_status = -EOVERFLOW;
+ req->rq_nob_received = ev->rlength + ev->offset;
+ goto out_wake;
+ }
+
+ if ((ev->offset == 0) &&
+ ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
+ /* Early reply */
+ DEBUG_REQ(D_ADAPTTO, req,
+ "Early reply received: mlen=%u offset=%d replen=%d "
+ "replied=%d unlinked=%d", ev->mlength, ev->offset,
+ req->rq_replen, req->rq_replied, ev->unlinked);
+
+ req->rq_early_count++; /* number received, client side */
+
+ if (req->rq_replied) /* already got the real reply */
+ goto out_wake;
+
+ req->rq_early = 1;
+ req->rq_reply_off = ev->offset;
+ req->rq_nob_received = ev->mlength;
+ /* And we're still receiving */
+ req->rq_receiving_reply = 1;
+ } else {
+ /* Real reply */
+ req->rq_rep_swab_mask = 0;
+ req->rq_replied = 1;
+ req->rq_reply_off = ev->offset;
+ req->rq_nob_received = ev->mlength;
+ /* LNetMDUnlink can't be called under the LNET_LOCK,
+ so we must unlink in ptlrpc_unregister_reply */
+ DEBUG_REQ(D_INFO, req,
+ "reply in flags=%x mlen=%u offset=%d replen=%d",
+ lustre_msg_get_flags(req->rq_reqmsg),
+ ev->mlength, ev->offset, req->rq_replen);
+ }
+
+ req->rq_import->imp_last_reply_time = cfs_time_current_sec();
+
+out_wake:
+ /* NB don't unlock till after wakeup; req can disappear under us
+ * since we don't have our own ref */
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
+}
+
+/*
+ * Client's bulk has been written/read
+ */
+void client_bulk_callback (lnet_event_t *ev)
+{
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
+ struct ptlrpc_request *req;
+
+ LASSERT ((desc->bd_type == BULK_PUT_SINK &&
+ ev->type == LNET_EVENT_PUT) ||
+ (desc->bd_type == BULK_GET_SOURCE &&
+ ev->type == LNET_EVENT_GET) ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT (ev->unlinked);
+
+ if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
+ ev->status = -EIO;
+
+ if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,CFS_FAIL_ONCE))
+ ev->status = -EIO;
+
+ CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
+ "event type %d, status %d, desc %p\n",
+ ev->type, ev->status, desc);
+
+ spin_lock(&desc->bd_lock);
+ req = desc->bd_req;
+ LASSERT(desc->bd_md_count > 0);
+ desc->bd_md_count--;
+
+ if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
+ desc->bd_nob_transferred += ev->mlength;
+ desc->bd_sender = ev->sender;
+ } else {
+ /* start reconnect and resend if network error hit */
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
+ }
+
+ if (ev->status != 0)
+ desc->bd_failure = 1;
+
+ /* NB don't unlock till after wakeup; desc can disappear under us
+ * otherwise */
+ if (desc->bd_md_count == 0)
+ ptlrpc_client_wake_req(desc->bd_req);
+
+ spin_unlock(&desc->bd_lock);
+}
+
+/*
+ * We will have percpt request history list for ptlrpc service in upcoming
+ * patches because we don't want to be serialized by current per-service
+ * history operations. So we require history ID can (somehow) show arriving
+ * order w/o grabbing global lock, and user can sort them in userspace.
+ *
+ * This is how we generate history ID for ptlrpc_request:
+ * ----------------------------------------------------
+ * | 32 bits | 16 bits | (16 - X)bits | X bits |
+ * ----------------------------------------------------
+ * | seconds | usec / 16 | sequence | CPT id |
+ * ----------------------------------------------------
+ *
+ * it might not be precise but should be good enough.
+ */
+
+#define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
+
+#define REQS_SEC_SHIFT 32
+#define REQS_USEC_SHIFT 16
+#define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
+
+static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req)
+{
+ __u64 sec = req->rq_arrival_time.tv_sec;
+ __u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
+ __u64 new_seq;
+
+ /* set sequence ID for request and add it to history list,
+ * it must be called with hold svcpt::scp_lock */
+
+ new_seq = (sec << REQS_SEC_SHIFT) |
+ (usec << REQS_USEC_SHIFT) |
+ (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
+
+ if (new_seq > svcpt->scp_hist_seq) {
+ /* This handles the initial case of scp_hist_seq == 0 or
+ * we just jumped into a new time window */
+ svcpt->scp_hist_seq = new_seq;
+ } else {
+ LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
+ /* NB: increase sequence number in current usec bucket,
+ * however, it's possible that we used up all bits for
+ * sequence and jumped into the next usec bucket (future time),
+ * then we hope there will be less RPCs per bucket at some
+ * point, and sequence will catch up again */
+ svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
+ new_seq = svcpt->scp_hist_seq;
+ }
+
+ req->rq_history_seq = new_seq;
+
+ list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
+}
+
+/*
+ * Server's incoming request callback
+ */
+void request_in_callback(lnet_event_t *ev)
+{
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
+ struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
+ struct ptlrpc_service *service = svcpt->scp_service;
+ struct ptlrpc_request *req;
+
+ LASSERT (ev->type == LNET_EVENT_PUT ||
+ ev->type == LNET_EVENT_UNLINK);
+ LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
+ LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
+ rqbd->rqbd_buffer + service->srv_buf_size);
+
+ CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
+ "event type %d, status %d, service %s\n",
+ ev->type, ev->status, service->srv_name);
+
+ if (ev->unlinked) {
+ /* If this is the last request message to fit in the
+ * request buffer we can use the request object embedded in
+ * rqbd. Note that if we failed to allocate a request,
+ * we'd have to re-post the rqbd, which we can't do in this
+ * context. */
+ req = &rqbd->rqbd_req;
+ memset(req, 0, sizeof (*req));
+ } else {
+ LASSERT (ev->type == LNET_EVENT_PUT);
+ if (ev->status != 0) {
+ /* We moaned above already... */
+ return;
+ }
+ OBD_ALLOC_GFP(req, sizeof(*req), ALLOC_ATOMIC_TRY);
+ if (req == NULL) {
+ CERROR("Can't allocate incoming request descriptor: "
+ "Dropping %s RPC from %s\n",
+ service->srv_name,
+ libcfs_id2str(ev->initiator));
+ return;
+ }
+ }
+
+ /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
+ * flags are reset and scalars are zero. We only set the message
+ * size to non-zero if this was a successful receive. */
+ req->rq_xid = ev->match_bits;
+ req->rq_reqbuf = ev->md.start + ev->offset;
+ if (ev->type == LNET_EVENT_PUT && ev->status == 0)
+ req->rq_reqdata_len = ev->mlength;
+ do_gettimeofday(&req->rq_arrival_time);
+ req->rq_peer = ev->initiator;
+ req->rq_self = ev->target.nid;
+ req->rq_rqbd = rqbd;
+ req->rq_phase = RQ_PHASE_NEW;
+ spin_lock_init(&req->rq_lock);
+ INIT_LIST_HEAD(&req->rq_timed_list);
+ INIT_LIST_HEAD(&req->rq_exp_list);
+ atomic_set(&req->rq_refcount, 1);
+ if (ev->type == LNET_EVENT_PUT)
+ CDEBUG(D_INFO, "incoming req@%p x"LPU64" msgsize %u\n",
+ req, req->rq_xid, ev->mlength);
+
+ CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
+
+ spin_lock(&svcpt->scp_lock);
+
+ ptlrpc_req_add_history(svcpt, req);
+
+ if (ev->unlinked) {
+ svcpt->scp_nrqbds_posted--;
+ CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
+ svcpt->scp_nrqbds_posted);
+
+ /* Normally, don't complain about 0 buffers posted; LNET won't
+ * drop incoming reqs since we set the portal lazy */
+ if (test_req_buffer_pressure &&
+ ev->type != LNET_EVENT_UNLINK &&
+ svcpt->scp_nrqbds_posted == 0)
+ CWARN("All %s request buffers busy\n",
+ service->srv_name);
+
+ /* req takes over the network's ref on rqbd */
+ } else {
+ /* req takes a ref on rqbd */
+ rqbd->rqbd_refcount++;
+ }
+
+ list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
+ svcpt->scp_nreqs_incoming++;
+
+ /* NB everything can disappear under us once the request
+ * has been queued and we unlock, so do the wake now... */
+ wake_up(&svcpt->scp_waitq);
+
+ spin_unlock(&svcpt->scp_lock);
+}
+
+/*
+ * Server's outgoing reply callback
+ */
+void reply_out_callback(lnet_event_t *ev)
+{
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ struct ptlrpc_reply_state *rs = cbid->cbid_arg;
+ struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
+
+ LASSERT (ev->type == LNET_EVENT_SEND ||
+ ev->type == LNET_EVENT_ACK ||
+ ev->type == LNET_EVENT_UNLINK);
+
+ if (!rs->rs_difficult) {
+ /* 'Easy' replies have no further processing so I drop the
+ * net's ref on 'rs' */
+ LASSERT (ev->unlinked);
+ ptlrpc_rs_decref(rs);
+ return;
+ }
+
+ LASSERT (rs->rs_on_net);
+
+ if (ev->unlinked) {
+ /* Last network callback. The net's ref on 'rs' stays put
+ * until ptlrpc_handle_rs() is done with it */
+ spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&rs->rs_lock);
+
+ rs->rs_on_net = 0;
+ if (!rs->rs_no_ack ||
+ rs->rs_transno <=
+ rs->rs_export->exp_obd->obd_last_committed)
+ ptlrpc_schedule_difficult_reply(rs);
+
+ spin_unlock(&rs->rs_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
+ }
+}
+
+
+static void ptlrpc_master_callback(lnet_event_t *ev)
+{
+ struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
+ void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
+
+ /* Honestly, it's best to find out early. */
+ LASSERT (cbid->cbid_arg != LP_POISON);
+ LASSERT (callback == request_out_callback ||
+ callback == reply_in_callback ||
+ callback == client_bulk_callback ||
+ callback == request_in_callback ||
+ callback == reply_out_callback
+ );
+
+ callback (ev);
+}
+
+int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
+ lnet_process_id_t *peer, lnet_nid_t *self)
+{
+ int best_dist = 0;
+ __u32 best_order = 0;
+ int count = 0;
+ int rc = -ENOENT;
+ int portals_compatibility;
+ int dist;
+ __u32 order;
+ lnet_nid_t dst_nid;
+ lnet_nid_t src_nid;
+
+ portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
+
+ peer->pid = LUSTRE_SRV_LNET_PID;
+
+ /* Choose the matching UUID that's closest */
+ while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
+ dist = LNetDist(dst_nid, &src_nid, &order);
+ if (dist < 0)
+ continue;
+
+ if (dist == 0) { /* local! use loopback LND */
+ peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
+ rc = 0;
+ break;
+ }
+
+ if (rc < 0 ||
+ dist < best_dist ||
+ (dist == best_dist && order < best_order)) {
+ best_dist = dist;
+ best_order = order;
+
+ if (portals_compatibility > 1) {
+ /* Strong portals compatibility: Zero the nid's
+ * NET, so if I'm reading new config logs, or
+ * getting configured by (new) lconf I can
+ * still talk to old servers. */
+ dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
+ src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
+ }
+ peer->nid = dst_nid;
+ *self = src_nid;
+ rc = 0;
+ }
+ }
+
+ CDEBUG(D_NET,"%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
+ return rc;
+}
+
+void ptlrpc_ni_fini(void)
+{
+ wait_queue_head_t waitq;
+ struct l_wait_info lwi;
+ int rc;
+ int retries;
+
+ /* Wait for the event queue to become idle since there may still be
+ * messages in flight with pending events (i.e. the fire-and-forget
+ * messages == client requests and "non-difficult" server
+ * replies */
+
+ for (retries = 0;; retries++) {
+ rc = LNetEQFree(ptlrpc_eq_h);
+ switch (rc) {
+ default:
+ LBUG();
+
+ case 0:
+ LNetNIFini();
+ return;
+
+ case -EBUSY:
+ if (retries != 0)
+ CWARN("Event queue still busy\n");
+
+ /* Wait for a bit */
+ init_waitqueue_head(&waitq);
+ lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
+ l_wait_event(waitq, 0, &lwi);
+ break;
+ }
+ }
+ /* notreached */
+}
+
+lnet_pid_t ptl_get_pid(void)
+{
+ lnet_pid_t pid;
+
+ pid = LUSTRE_SRV_LNET_PID;
+ return pid;
+}
+
+int ptlrpc_ni_init(void)
+{
+ int rc;
+ lnet_pid_t pid;
+
+ pid = ptl_get_pid();
+ CDEBUG(D_NET, "My pid is: %x\n", pid);
+
+ /* We're not passing any limits yet... */
+ rc = LNetNIInit(pid);
+ if (rc < 0) {
+ CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
+ return (-ENOENT);
+ }
+
+ /* CAVEAT EMPTOR: how we process portals events is _radically_
+ * different depending on... */
+ /* kernel LNet calls our master callback when there are new event,
+ * because we are guaranteed to get every event via callback,
+ * so we just set EQ size to 0 to avoid overhread of serializing
+ * enqueue/dequeue operations in LNet. */
+ rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
+ if (rc == 0)
+ return 0;
+
+ CERROR ("Failed to allocate event queue: %d\n", rc);
+ LNetNIFini();
+
+ return (-ENOMEM);
+}
+
+
+int ptlrpc_init_portals(void)
+{
+ int rc = ptlrpc_ni_init();
+
+ if (rc != 0) {
+ CERROR("network initialisation failed\n");
+ return -EIO;
+ }
+ rc = ptlrpcd_addref();
+ if (rc == 0)
+ return 0;
+
+ CERROR("rpcd initialisation failed\n");
+ ptlrpc_ni_fini();
+ return rc;
+}
+
+void ptlrpc_exit_portals(void)
+{
+ ptlrpcd_decref();
+ ptlrpc_ni_fini();
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/Makefile b/drivers/staging/lustre/lustre/ptlrpc/gss/Makefile
new file mode 100644
index 0000000..8cdfbee
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_LUSTRE_FS) := ptlrpc_gss.o
+
+ptlrpc_gss-y := sec_gss.o gss_bulk.o gss_cli_upcall.o gss_svc_upcall.o \
+ gss_rawobj.o lproc_gss.o gss_generic_token.o \
+ gss_mech_switch.o gss_krb5_mech.o
+
+
+ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_api.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_api.h
new file mode 100644
index 0000000..0e9f6c4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_api.h
@@ -0,0 +1,179 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * Somewhat simplified version of the gss api.
+ *
+ * Dug Song <dugsong@monkey.org>
+ * Andy Adamson <andros@umich.edu>
+ * Bruce Fields <bfields@umich.edu>
+ * Copyright (c) 2000 The Regents of the University of Michigan
+ *
+ */
+
+#ifndef __PTLRPC_GSS_GSS_API_H_
+#define __PTLRPC_GSS_GSS_API_H_
+
+struct gss_api_mech;
+
+/* The mechanism-independent gss-api context: */
+struct gss_ctx {
+ struct gss_api_mech *mech_type;
+ void *internal_ctx_id;
+};
+
+#define GSS_C_NO_BUFFER ((rawobj_t) 0)
+#define GSS_C_NO_CONTEXT ((struct gss_ctx *) 0)
+#define GSS_C_NULL_OID ((rawobj_t) 0)
+
+/*
+ * gss-api prototypes; note that these are somewhat simplified versions of
+ * the prototypes specified in RFC 2744.
+ */
+__u32 lgss_import_sec_context(
+ rawobj_t *input_token,
+ struct gss_api_mech *mech,
+ struct gss_ctx **ctx);
+__u32 lgss_copy_reverse_context(
+ struct gss_ctx *ctx,
+ struct gss_ctx **ctx_new);
+__u32 lgss_inquire_context(
+ struct gss_ctx *ctx,
+ unsigned long *endtime);
+__u32 lgss_get_mic(
+ struct gss_ctx *ctx,
+ int msgcnt,
+ rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
+ rawobj_t *mic_token);
+__u32 lgss_verify_mic(
+ struct gss_ctx *ctx,
+ int msgcnt,
+ rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
+ rawobj_t *mic_token);
+__u32 lgss_wrap(
+ struct gss_ctx *ctx,
+ rawobj_t *gsshdr,
+ rawobj_t *msg,
+ int msg_buflen,
+ rawobj_t *out_token);
+__u32 lgss_unwrap(
+ struct gss_ctx *ctx,
+ rawobj_t *gsshdr,
+ rawobj_t *token,
+ rawobj_t *out_msg);
+__u32 lgss_prep_bulk(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc);
+__u32 lgss_wrap_bulk(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token,
+ int adj_nob);
+__u32 lgss_unwrap_bulk(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token,
+ int adj_nob);
+__u32 lgss_delete_sec_context(
+ struct gss_ctx **ctx);
+int lgss_display(
+ struct gss_ctx *ctx,
+ char *buf,
+ int bufsize);
+
+struct subflavor_desc {
+ __u32 sf_subflavor;
+ __u32 sf_qop;
+ __u32 sf_service;
+ char *sf_name;
+};
+
+/* Each mechanism is described by the following struct: */
+struct gss_api_mech {
+ struct list_head gm_list;
+ struct module *gm_owner;
+ char *gm_name;
+ rawobj_t gm_oid;
+ atomic_t gm_count;
+ struct gss_api_ops *gm_ops;
+ int gm_sf_num;
+ struct subflavor_desc *gm_sfs;
+};
+
+/* and must provide the following operations: */
+struct gss_api_ops {
+ __u32 (*gss_import_sec_context)(
+ rawobj_t *input_token,
+ struct gss_ctx *ctx);
+ __u32 (*gss_copy_reverse_context)(
+ struct gss_ctx *ctx,
+ struct gss_ctx *ctx_new);
+ __u32 (*gss_inquire_context)(
+ struct gss_ctx *ctx,
+ unsigned long *endtime);
+ __u32 (*gss_get_mic)(
+ struct gss_ctx *ctx,
+ int msgcnt,
+ rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
+ rawobj_t *mic_token);
+ __u32 (*gss_verify_mic)(
+ struct gss_ctx *ctx,
+ int msgcnt,
+ rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
+ rawobj_t *mic_token);
+ __u32 (*gss_wrap)(
+ struct gss_ctx *ctx,
+ rawobj_t *gsshdr,
+ rawobj_t *msg,
+ int msg_buflen,
+ rawobj_t *out_token);
+ __u32 (*gss_unwrap)(
+ struct gss_ctx *ctx,
+ rawobj_t *gsshdr,
+ rawobj_t *token,
+ rawobj_t *out_msg);
+ __u32 (*gss_prep_bulk)(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc);
+ __u32 (*gss_wrap_bulk)(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token,
+ int adj_nob);
+ __u32 (*gss_unwrap_bulk)(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token,
+ int adj_nob);
+ void (*gss_delete_sec_context)(
+ void *ctx);
+ int (*gss_display)(
+ struct gss_ctx *ctx,
+ char *buf,
+ int bufsize);
+};
+
+int lgss_mech_register(struct gss_api_mech *mech);
+void lgss_mech_unregister(struct gss_api_mech *mech);
+
+struct gss_api_mech * lgss_OID_to_mech(rawobj_t *oid);
+struct gss_api_mech * lgss_name_to_mech(char *name);
+struct gss_api_mech * lgss_subflavor_to_mech(__u32 subflavor);
+
+struct gss_api_mech * lgss_mech_get(struct gss_api_mech *mech);
+void lgss_mech_put(struct gss_api_mech *mech);
+
+#endif /* __PTLRPC_GSS_GSS_API_H_ */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h
new file mode 100644
index 0000000..c70eb00
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_asn1.h
@@ -0,0 +1,84 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * minimal asn1 for generic encoding/decoding of gss tokens
+ *
+ * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
+ * lib/gssapi/krb5/gssapiP_krb5.h, and others
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@umich.edu>
+ */
+
+/*
+ * Copyright 1995 by the Massachusetts Institute of Technology.
+ * All Rights Reserved.
+ *
+ * Export of this software from the United States of America may
+ * require a specific license from the United States Government.
+ * It is the responsibility of any person or organization contemplating
+ * export to obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of M.I.T. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. Furthermore if you modify this software you must label
+ * your software as modified software and not distribute it in such a
+ * fashion that it might be confused with the original M.I.T. software.
+ * M.I.T. makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ */
+
+#define SIZEOF_INT 4
+
+/* from gssapi_err_generic.h */
+#define G_BAD_SERVICE_NAME (-2045022976L)
+#define G_BAD_STRING_UID (-2045022975L)
+#define G_NOUSER (-2045022974L)
+#define G_VALIDATE_FAILED (-2045022973L)
+#define G_BUFFER_ALLOC (-2045022972L)
+#define G_BAD_MSG_CTX (-2045022971L)
+#define G_WRONG_SIZE (-2045022970L)
+#define G_BAD_USAGE (-2045022969L)
+#define G_UNKNOWN_QOP (-2045022968L)
+#define G_NO_HOSTNAME (-2045022967L)
+#define G_BAD_HOSTNAME (-2045022966L)
+#define G_WRONG_MECH (-2045022965L)
+#define G_BAD_TOK_HEADER (-2045022964L)
+#define G_BAD_DIRECTION (-2045022963L)
+#define G_TOK_TRUNC (-2045022962L)
+#define G_REFLECT (-2045022961L)
+#define G_WRONG_TOKID (-2045022960L)
+
+#define g_OID_equal(o1,o2) \
+ (((o1)->len == (o2)->len) && \
+ (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0))
+
+__u32 g_verify_token_header(rawobj_t *mech,
+ int *body_size,
+ unsigned char **buf_in,
+ int toksize);
+
+__u32 g_get_mech_oid(rawobj_t *mech,
+ rawobj_t *in_buf);
+
+int g_token_size(rawobj_t *mech,
+ unsigned int body_size);
+
+void g_make_token_header(rawobj_t *mech,
+ int body_size,
+ unsigned char **buf);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
new file mode 100644
index 0000000..b518d8a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
@@ -0,0 +1,506 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/gss/gss_bulk.c
+ *
+ * Author: Eric Mei <eric.mei@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/crypto.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_sec.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+
+int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct gss_cli_ctx *gctx;
+ struct lustre_msg *msg;
+ struct ptlrpc_bulk_sec_desc *bsd;
+ rawobj_t token;
+ __u32 maj;
+ int offset;
+ int rc;
+
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_bulk_read || req->rq_bulk_write);
+
+ gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+ LASSERT(gctx->gc_mechctx);
+
+ switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+ case SPTLRPC_SVC_NULL:
+ LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
+ msg = req->rq_reqbuf;
+ offset = msg->lm_bufcount - 1;
+ break;
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
+ msg = req->rq_reqbuf;
+ offset = msg->lm_bufcount - 2;
+ break;
+ case SPTLRPC_SVC_PRIV:
+ LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
+ msg = req->rq_clrbuf;
+ offset = msg->lm_bufcount - 1;
+ break;
+ default:
+ LBUG();
+ }
+
+ bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
+ bsd->bsd_version = 0;
+ bsd->bsd_flags = 0;
+ bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
+
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ return 0;
+
+ LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
+ bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
+
+ if (req->rq_bulk_read) {
+ /*
+ * bulk read: prepare receiving pages only for privacy mode.
+ */
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
+ return gss_cli_prep_bulk(req, desc);
+ } else {
+ /*
+ * bulk write: sign or encrypt bulk pages.
+ */
+ bsd->bsd_nob = desc->bd_nob;
+
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
+ /* integrity mode */
+ token.data = bsd->bsd_data;
+ token.len = lustre_msg_buflen(msg, offset) -
+ sizeof(*bsd);
+
+ maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
+ desc->bd_iov_count, desc->bd_iov,
+ &token);
+ if (maj != GSS_S_COMPLETE) {
+ CWARN("failed to sign bulk data: %x\n", maj);
+ return -EACCES;
+ }
+ } else {
+ /* privacy mode */
+ if (desc->bd_iov_count == 0)
+ return 0;
+
+ rc = sptlrpc_enc_pool_get_pages(desc);
+ if (rc) {
+ CERROR("bulk write: failed to allocate "
+ "encryption pages: %d\n", rc);
+ return rc;
+ }
+
+ token.data = bsd->bsd_data;
+ token.len = lustre_msg_buflen(msg, offset) -
+ sizeof(*bsd);
+
+ maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
+ if (maj != GSS_S_COMPLETE) {
+ CWARN("fail to encrypt bulk data: %x\n", maj);
+ return -EACCES;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct gss_cli_ctx *gctx;
+ struct lustre_msg *rmsg, *vmsg;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ rawobj_t token;
+ __u32 maj;
+ int roff, voff;
+
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_bulk_read || req->rq_bulk_write);
+
+ switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+ case SPTLRPC_SVC_NULL:
+ vmsg = req->rq_repdata;
+ voff = vmsg->lm_bufcount - 1;
+ LASSERT(vmsg && vmsg->lm_bufcount >= 3);
+
+ rmsg = req->rq_reqbuf;
+ roff = rmsg->lm_bufcount - 1; /* last segment */
+ LASSERT(rmsg && rmsg->lm_bufcount >= 3);
+ break;
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ vmsg = req->rq_repdata;
+ voff = vmsg->lm_bufcount - 2;
+ LASSERT(vmsg && vmsg->lm_bufcount >= 4);
+
+ rmsg = req->rq_reqbuf;
+ roff = rmsg->lm_bufcount - 2; /* second last segment */
+ LASSERT(rmsg && rmsg->lm_bufcount >= 4);
+ break;
+ case SPTLRPC_SVC_PRIV:
+ vmsg = req->rq_repdata;
+ voff = vmsg->lm_bufcount - 1;
+ LASSERT(vmsg && vmsg->lm_bufcount >= 2);
+
+ rmsg = req->rq_clrbuf;
+ roff = rmsg->lm_bufcount - 1; /* last segment */
+ LASSERT(rmsg && rmsg->lm_bufcount >= 2);
+ break;
+ default:
+ LBUG();
+ }
+
+ bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
+ bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
+ LASSERT(bsdr && bsdv);
+
+ if (bsdr->bsd_version != bsdv->bsd_version ||
+ bsdr->bsd_type != bsdv->bsd_type ||
+ bsdr->bsd_svc != bsdv->bsd_svc) {
+ CERROR("bulk security descriptor mismatch: "
+ "(%u,%u,%u) != (%u,%u,%u)\n",
+ bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
+ bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
+ return -EPROTO;
+ }
+
+ LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
+ bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
+ bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
+
+ /*
+ * in privacy mode if return success, make sure bd_nob_transferred
+ * is the actual size of the clear text, otherwise upper layer
+ * may be surprised.
+ */
+ if (req->rq_bulk_write) {
+ if (bsdv->bsd_flags & BSD_FL_ERR) {
+ CERROR("server reported bulk i/o failure\n");
+ return -EIO;
+ }
+
+ if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
+ desc->bd_nob_transferred = desc->bd_nob;
+ } else {
+ /*
+ * bulk read, upon return success, bd_nob_transferred is
+ * the size of plain text actually received.
+ */
+ gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+ LASSERT(gctx->gc_mechctx);
+
+ if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
+ int i, nob;
+
+ /* fix the actual data size */
+ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
+ if (desc->bd_iov[i].kiov_len + nob >
+ desc->bd_nob_transferred) {
+ desc->bd_iov[i].kiov_len =
+ desc->bd_nob_transferred - nob;
+ }
+ nob += desc->bd_iov[i].kiov_len;
+ }
+
+ token.data = bsdv->bsd_data;
+ token.len = lustre_msg_buflen(vmsg, voff) -
+ sizeof(*bsdv);
+
+ maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
+ desc->bd_iov_count, desc->bd_iov,
+ &token);
+ if (maj != GSS_S_COMPLETE) {
+ CERROR("failed to verify bulk read: %x\n", maj);
+ return -EACCES;
+ }
+ } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
+ desc->bd_nob = bsdv->bsd_nob;
+ if (desc->bd_nob == 0)
+ return 0;
+
+ token.data = bsdv->bsd_data;
+ token.len = lustre_msg_buflen(vmsg, voff) -
+ sizeof(*bsdr);
+
+ maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc,
+ &token, 1);
+ if (maj != GSS_S_COMPLETE) {
+ CERROR("failed to decrypt bulk read: %x\n",
+ maj);
+ return -EACCES;
+ }
+
+ desc->bd_nob_transferred = desc->bd_nob;
+ }
+ }
+
+ return 0;
+}
+
+static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
+ struct gss_ctx *mechctx)
+{
+ int rc;
+
+ if (desc->bd_iov_count == 0)
+ return 0;
+
+ rc = sptlrpc_enc_pool_get_pages(desc);
+ if (rc)
+ return rc;
+
+ if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
+ return -EACCES;
+
+ return 0;
+}
+
+int gss_cli_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ int rc;
+
+ LASSERT(req->rq_cli_ctx);
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_bulk_read);
+
+ if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
+ return 0;
+
+ rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
+ if (rc)
+ CERROR("bulk read: failed to prepare encryption "
+ "pages: %d\n", rc);
+
+ return rc;
+}
+
+int gss_svc_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct gss_svc_reqctx *grctx;
+ struct ptlrpc_bulk_sec_desc *bsd;
+ int rc;
+
+ LASSERT(req->rq_svc_ctx);
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_bulk_write);
+
+ grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+ LASSERT(grctx->src_reqbsd);
+ LASSERT(grctx->src_repbsd);
+ LASSERT(grctx->src_ctx);
+ LASSERT(grctx->src_ctx->gsc_mechctx);
+
+ bsd = grctx->src_reqbsd;
+ if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
+ return 0;
+
+ rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
+ if (rc)
+ CERROR("bulk write: failed to prepare encryption "
+ "pages: %d\n", rc);
+
+ return rc;
+}
+
+int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct gss_svc_reqctx *grctx;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ rawobj_t token;
+ __u32 maj;
+
+ LASSERT(req->rq_svc_ctx);
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_bulk_write);
+
+ grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+
+ LASSERT(grctx->src_reqbsd);
+ LASSERT(grctx->src_repbsd);
+ LASSERT(grctx->src_ctx);
+ LASSERT(grctx->src_ctx->gsc_mechctx);
+
+ bsdr = grctx->src_reqbsd;
+ bsdv = grctx->src_repbsd;
+
+ /* bsdr has been sanity checked during unpacking */
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ switch (bsdv->bsd_svc) {
+ case SPTLRPC_BULK_SVC_INTG:
+ token.data = bsdr->bsd_data;
+ token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
+
+ maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
+ desc->bd_iov_count, desc->bd_iov, &token);
+ if (maj != GSS_S_COMPLETE) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("failed to verify bulk signature: %x\n", maj);
+ return -EACCES;
+ }
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ if (bsdr->bsd_nob != desc->bd_nob) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("prepared nob %d doesn't match the actual "
+ "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
+ return -EPROTO;
+ }
+
+ if (desc->bd_iov_count == 0) {
+ LASSERT(desc->bd_nob == 0);
+ break;
+ }
+
+ token.data = bsdr->bsd_data;
+ token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
+
+ maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
+ desc, &token, 0);
+ if (maj != GSS_S_COMPLETE) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("failed decrypt bulk data: %x\n", maj);
+ return -EACCES;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+int gss_svc_wrap_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct gss_svc_reqctx *grctx;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ rawobj_t token;
+ __u32 maj;
+ int rc;
+
+ LASSERT(req->rq_svc_ctx);
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_bulk_read);
+
+ grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+
+ LASSERT(grctx->src_reqbsd);
+ LASSERT(grctx->src_repbsd);
+ LASSERT(grctx->src_ctx);
+ LASSERT(grctx->src_ctx->gsc_mechctx);
+
+ bsdr = grctx->src_reqbsd;
+ bsdv = grctx->src_repbsd;
+
+ /* bsdr has been sanity checked during unpacking */
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ switch (bsdv->bsd_svc) {
+ case SPTLRPC_BULK_SVC_INTG:
+ token.data = bsdv->bsd_data;
+ token.len = grctx->src_repbsd_size - sizeof(*bsdv);
+
+ maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
+ desc->bd_iov_count, desc->bd_iov, &token);
+ if (maj != GSS_S_COMPLETE) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("failed to sign bulk data: %x\n", maj);
+ return -EACCES;
+ }
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ bsdv->bsd_nob = desc->bd_nob;
+
+ if (desc->bd_iov_count == 0) {
+ LASSERT(desc->bd_nob == 0);
+ break;
+ }
+
+ rc = sptlrpc_enc_pool_get_pages(desc);
+ if (rc) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("bulk read: failed to allocate encryption "
+ "pages: %d\n", rc);
+ return rc;
+ }
+
+ token.data = bsdv->bsd_data;
+ token.len = grctx->src_repbsd_size - sizeof(*bsdv);
+
+ maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
+ desc, &token, 1);
+ if (maj != GSS_S_COMPLETE) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("failed to encrypt bulk data: %x\n", maj);
+ return -EACCES;
+ }
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
new file mode 100644
index 0000000..55247af
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
@@ -0,0 +1,446 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/gss/gss_cli_upcall.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_sec.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+
+/**********************************************
+ * gss context init/fini helper *
+ **********************************************/
+
+static
+int ctx_init_pack_request(struct obd_import *imp,
+ struct ptlrpc_request *req,
+ int lustre_srv,
+ uid_t uid, gid_t gid,
+ long token_size,
+ char __user *token)
+{
+ struct lustre_msg *msg = req->rq_reqbuf;
+ struct gss_sec *gsec;
+ struct gss_header *ghdr;
+ struct ptlrpc_user_desc *pud;
+ __u32 *p, size, offset = 2;
+ rawobj_t obj;
+
+ LASSERT(msg->lm_bufcount <= 4);
+ LASSERT(req->rq_cli_ctx);
+ LASSERT(req->rq_cli_ctx->cc_sec);
+
+ /* gss hdr */
+ ghdr = lustre_msg_buf(msg, 0, sizeof(*ghdr));
+ ghdr->gh_version = PTLRPC_GSS_VERSION;
+ ghdr->gh_sp = (__u8) imp->imp_sec->ps_part;
+ ghdr->gh_flags = 0;
+ ghdr->gh_proc = PTLRPC_GSS_PROC_INIT;
+ ghdr->gh_seq = 0;
+ ghdr->gh_svc = SPTLRPC_SVC_NULL;
+ ghdr->gh_handle.len = 0;
+
+ /* fix the user desc */
+ if (req->rq_pack_udesc) {
+ ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
+
+ pud = lustre_msg_buf(msg, offset, sizeof(*pud));
+ LASSERT(pud);
+ pud->pud_uid = pud->pud_fsuid = uid;
+ pud->pud_gid = pud->pud_fsgid = gid;
+ pud->pud_cap = 0;
+ pud->pud_ngroups = 0;
+ offset++;
+ }
+
+ /* security payload */
+ p = lustre_msg_buf(msg, offset, 0);
+ size = msg->lm_buflens[offset];
+ LASSERT(p);
+
+ /* 1. lustre svc type */
+ LASSERT(size > 4);
+ *p++ = cpu_to_le32(lustre_srv);
+ size -= 4;
+
+ /* 2. target uuid */
+ obj.len = strlen(imp->imp_obd->u.cli.cl_target_uuid.uuid) + 1;
+ obj.data = imp->imp_obd->u.cli.cl_target_uuid.uuid;
+ if (rawobj_serialize(&obj, &p, &size))
+ LBUG();
+
+ /* 3. reverse context handle. actually only needed by root user,
+ * but we send it anyway. */
+ gsec = sec2gsec(req->rq_cli_ctx->cc_sec);
+ obj.len = sizeof(gsec->gs_rvs_hdl);
+ obj.data = (__u8 *) &gsec->gs_rvs_hdl;
+ if (rawobj_serialize(&obj, &p, &size))
+ LBUG();
+
+ /* 4. now the token */
+ LASSERT(size >= (sizeof(__u32) + token_size));
+ *p++ = cpu_to_le32(((__u32) token_size));
+ if (copy_from_user(p, token, token_size)) {
+ CERROR("can't copy token\n");
+ return -EFAULT;
+ }
+ size -= sizeof(__u32) + cfs_size_round4(token_size);
+
+ req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset,
+ msg->lm_buflens[offset] - size, 0);
+ return 0;
+}
+
+static
+int ctx_init_parse_reply(struct lustre_msg *msg, int swabbed,
+ char __user *outbuf, long outlen)
+{
+ struct gss_rep_header *ghdr;
+ __u32 obj_len, round_len;
+ __u32 status, effective = 0;
+
+ if (msg->lm_bufcount != 3) {
+ CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
+ return -EPROTO;
+ }
+
+ ghdr = (struct gss_rep_header *) gss_swab_header(msg, 0, swabbed);
+ if (ghdr == NULL) {
+ CERROR("unable to extract gss reply header\n");
+ return -EPROTO;
+ }
+
+ if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
+ CERROR("invalid gss version %u\n", ghdr->gh_version);
+ return -EPROTO;
+ }
+
+ if (outlen < (4 + 2) * 4 + cfs_size_round4(ghdr->gh_handle.len) +
+ cfs_size_round4(msg->lm_buflens[2])) {
+ CERROR("output buffer size %ld too small\n", outlen);
+ return -EFAULT;
+ }
+
+ status = 0;
+ effective = 0;
+
+ if (copy_to_user(outbuf, &status, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, &ghdr->gh_major, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, &ghdr->gh_minor, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, &ghdr->gh_seqwin, 4))
+ return -EFAULT;
+ outbuf += 4;
+ effective += 4 * 4;
+
+ /* handle */
+ obj_len = ghdr->gh_handle.len;
+ round_len = (obj_len + 3) & ~ 3;
+ if (copy_to_user(outbuf, &obj_len, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len))
+ return -EFAULT;
+ outbuf += round_len;
+ effective += 4 + round_len;
+
+ /* out token */
+ obj_len = msg->lm_buflens[2];
+ round_len = (obj_len + 3) & ~ 3;
+ if (copy_to_user(outbuf, &obj_len, 4))
+ return -EFAULT;
+ outbuf += 4;
+ if (copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len))
+ return -EFAULT;
+ outbuf += round_len;
+ effective += 4 + round_len;
+
+ return effective;
+}
+
+/* XXX move to where lgssd could see */
+struct lgssd_ioctl_param {
+ int version; /* in */
+ int secid; /* in */
+ char *uuid; /* in */
+ int lustre_svc; /* in */
+ uid_t uid; /* in */
+ gid_t gid; /* in */
+ long send_token_size;/* in */
+ char *send_token; /* in */
+ long reply_buf_size; /* in */
+ char *reply_buf; /* in */
+ long status; /* out */
+ long reply_length; /* out */
+};
+
+int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
+{
+ struct obd_import *imp;
+ struct ptlrpc_request *req;
+ struct lgssd_ioctl_param param;
+ struct obd_device *obd;
+ char obdname[64];
+ long lsize;
+ int rc;
+
+ if (count != sizeof(param)) {
+ CERROR("ioctl size %lu, expect %lu, please check lgss_keyring "
+ "version\n", count, (unsigned long) sizeof(param));
+ return -EINVAL;
+ }
+ if (copy_from_user(&param, buffer, sizeof(param))) {
+ CERROR("failed copy data from lgssd\n");
+ return -EFAULT;
+ }
+
+ if (param.version != GSSD_INTERFACE_VERSION) {
+ CERROR("gssd interface version %d (expect %d)\n",
+ param.version, GSSD_INTERFACE_VERSION);
+ return -EINVAL;
+ }
+
+ /* take name */
+ if (strncpy_from_user(obdname, param.uuid, sizeof(obdname)) <= 0) {
+ CERROR("Invalid obdname pointer\n");
+ return -EFAULT;
+ }
+
+ obd = class_name2obd(obdname);
+ if (!obd) {
+ CERROR("no such obd %s\n", obdname);
+ return -EINVAL;
+ }
+
+ if (unlikely(!obd->obd_set_up)) {
+ CERROR("obd %s not setup\n", obdname);
+ return -EINVAL;
+ }
+
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_stopping) {
+ CERROR("obd %s has stopped\n", obdname);
+ spin_unlock(&obd->obd_dev_lock);
+ return -EINVAL;
+ }
+
+ if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
+ CERROR("obd %s is not a client device\n", obdname);
+ spin_unlock(&obd->obd_dev_lock);
+ return -EINVAL;
+ }
+ spin_unlock(&obd->obd_dev_lock);
+
+ down_read(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import == NULL) {
+ CERROR("obd %s: import has gone\n", obd->obd_name);
+ up_read(&obd->u.cli.cl_sem);
+ return -EINVAL;
+ }
+ imp = class_import_get(obd->u.cli.cl_import);
+ up_read(&obd->u.cli.cl_sem);
+
+ if (imp->imp_deactive) {
+ CERROR("import has been deactivated\n");
+ class_import_put(imp);
+ return -EINVAL;
+ }
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_SEC_CTX, LUSTRE_OBD_VERSION,
+ SEC_CTX_INIT);
+ if (req == NULL) {
+ param.status = -ENOMEM;
+ goto out_copy;
+ }
+
+ if (req->rq_cli_ctx->cc_sec->ps_id != param.secid) {
+ CWARN("original secid %d, now has changed to %d, "
+ "cancel this negotiation\n", param.secid,
+ req->rq_cli_ctx->cc_sec->ps_id);
+ param.status = -EINVAL;
+ goto out_copy;
+ }
+
+ /* get token */
+ rc = ctx_init_pack_request(imp, req,
+ param.lustre_svc,
+ param.uid, param.gid,
+ param.send_token_size,
+ param.send_token);
+ if (rc) {
+ param.status = rc;
+ goto out_copy;
+ }
+
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc) {
+ /* If any _real_ denial be made, we expect server return
+ * -EACCES reply or return success but indicate gss error
+ * inside reply messsage. All other errors are treated as
+ * timeout, caller might try the negotiation repeatedly,
+ * leave recovery decisions to general ptlrpc layer.
+ *
+ * FIXME maybe some other error code shouldn't be treated
+ * as timeout. */
+ param.status = rc;
+ if (rc != -EACCES)
+ param.status = -ETIMEDOUT;
+ goto out_copy;
+ }
+
+ LASSERT(req->rq_repdata);
+ lsize = ctx_init_parse_reply(req->rq_repdata,
+ ptlrpc_rep_need_swab(req),
+ param.reply_buf, param.reply_buf_size);
+ if (lsize < 0) {
+ param.status = (int) lsize;
+ goto out_copy;
+ }
+
+ param.status = 0;
+ param.reply_length = lsize;
+
+out_copy:
+ if (copy_to_user(buffer, &param, sizeof(param)))
+ rc = -EFAULT;
+ else
+ rc = 0;
+
+ class_import_put(imp);
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
+{
+ struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
+ struct obd_import *imp = ctx->cc_sec->ps_import;
+ struct ptlrpc_request *req;
+ struct ptlrpc_user_desc *pud;
+ int rc;
+
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
+ CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
+ "don't send destroy rpc\n", ctx,
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ return 0;
+ }
+
+ might_sleep();
+
+ CWARN("%s ctx %p idx "LPX64" (%u->%s)\n",
+ sec_is_reverse(ctx->cc_sec) ?
+ "server finishing reverse" : "client finishing forward",
+ ctx, gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+
+ gctx->gc_proc = PTLRPC_GSS_PROC_DESTROY;
+
+ req = ptlrpc_request_alloc(imp, &RQF_SEC_CTX);
+ if (req == NULL) {
+ CWARN("ctx %p(%u): fail to prepare rpc, destroy locally\n",
+ ctx, ctx->cc_vcred.vc_uid);
+ GOTO(out, rc = -ENOMEM);
+ }
+
+ rc = ptlrpc_request_bufs_pack(req, LUSTRE_OBD_VERSION, SEC_CTX_FINI,
+ NULL, ctx);
+ if (rc) {
+ ptlrpc_request_free(req);
+ GOTO(out_ref, rc);
+ }
+
+ /* fix the user desc */
+ if (req->rq_pack_udesc) {
+ /* we rely the fact that this request is in AUTH mode,
+ * and user_desc at offset 2. */
+ pud = lustre_msg_buf(req->rq_reqbuf, 2, sizeof(*pud));
+ LASSERT(pud);
+ pud->pud_uid = pud->pud_fsuid = ctx->cc_vcred.vc_uid;
+ pud->pud_gid = pud->pud_fsgid = ctx->cc_vcred.vc_gid;
+ pud->pud_cap = 0;
+ pud->pud_ngroups = 0;
+ }
+
+ req->rq_phase = RQ_PHASE_RPC;
+ rc = ptl_send_rpc(req, 1);
+ if (rc)
+ CWARN("ctx %p(%u->%s): rpc error %d, destroy locally\n", ctx,
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), rc);
+
+out_ref:
+ ptlrpc_req_finished(req);
+out:
+ return rc;
+}
+
+int __init gss_init_cli_upcall(void)
+{
+ return 0;
+}
+
+void __exit gss_exit_cli_upcall(void)
+{
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h
new file mode 100644
index 0000000..1342579
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_err.h
@@ -0,0 +1,193 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * Adapted from MIT Kerberos 5-1.2.1 include/gssapi/gssapi.h
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@umich.edu>
+ */
+
+/*
+ * Copyright 1993 by OpenVision Technologies, Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appears in all copies and
+ * that both that copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OpenVision not be used
+ * in advertising or publicity pertaining to distribution of the software
+ * without specific, written prior permission. OpenVision makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __PTLRPC_GSS_GSS_ERR_H_
+#define __PTLRPC_GSS_GSS_ERR_H_
+
+typedef unsigned int OM_uint32;
+
+/*
+ * Flag bits for context-level services.
+ */
+#define GSS_C_DELEG_FLAG (1)
+#define GSS_C_MUTUAL_FLAG (2)
+#define GSS_C_REPLAY_FLAG (4)
+#define GSS_C_SEQUENCE_FLAG (8)
+#define GSS_C_CONF_FLAG (16)
+#define GSS_C_INTEG_FLAG (32)
+#define GSS_C_ANON_FLAG (64)
+#define GSS_C_PROT_READY_FLAG (128)
+#define GSS_C_TRANS_FLAG (256)
+
+/*
+ * Credential usage options
+ */
+#define GSS_C_BOTH (0)
+#define GSS_C_INITIATE (1)
+#define GSS_C_ACCEPT (2)
+
+/*
+ * Status code types for gss_display_status
+ */
+#define GSS_C_GSS_CODE (1)
+#define GSS_C_MECH_CODE (2)
+
+
+/*
+ * Define the default Quality of Protection for per-message services. Note
+ * that an implementation that offers multiple levels of QOP may either reserve
+ * a value (for example zero, as assumed here) to mean "default protection", or
+ * alternatively may simply equate GSS_C_QOP_DEFAULT to a specific explicit
+ * QOP value. However a value of 0 should always be interpreted by a GSSAPI
+ * implementation as a request for the default protection level.
+ */
+#define GSS_C_QOP_DEFAULT (0)
+
+/*
+ * Expiration time of 2^32-1 seconds means infinite lifetime for a
+ * credential or security context
+ */
+#define GSS_C_INDEFINITE ((OM_uint32) 0xfffffffful)
+
+
+/* Major status codes */
+
+#define GSS_S_COMPLETE (0)
+
+/*
+ * Some "helper" definitions to make the status code macros obvious.
+ */
+#define GSS_C_CALLING_ERROR_OFFSET (24)
+#define GSS_C_ROUTINE_ERROR_OFFSET (16)
+#define GSS_C_SUPPLEMENTARY_OFFSET (0)
+#define GSS_C_CALLING_ERROR_MASK ((OM_uint32) 0377ul)
+#define GSS_C_ROUTINE_ERROR_MASK ((OM_uint32) 0377ul)
+#define GSS_C_SUPPLEMENTARY_MASK ((OM_uint32) 0177777ul)
+
+/*
+ * The macros that test status codes for error conditions. Note that the
+ * GSS_ERROR() macro has changed slightly from the V1 GSSAPI so that it now
+ * evaluates its argument only once.
+ */
+#define GSS_CALLING_ERROR(x) \
+ ((x) & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET))
+#define GSS_ROUTINE_ERROR(x) \
+ ((x) & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))
+#define GSS_SUPPLEMENTARY_INFO(x) \
+ ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET))
+#define GSS_ERROR(x) \
+ ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \
+ (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)))
+
+/*
+ * Now the actual status code definitions
+ */
+
+/*
+ * Calling errors:
+ */
+#define GSS_S_CALL_INACCESSIBLE_READ \
+ (((OM_uint32) 1ul) << GSS_C_CALLING_ERROR_OFFSET)
+#define GSS_S_CALL_INACCESSIBLE_WRITE \
+ (((OM_uint32) 2ul) << GSS_C_CALLING_ERROR_OFFSET)
+#define GSS_S_CALL_BAD_STRUCTURE \
+ (((OM_uint32) 3ul) << GSS_C_CALLING_ERROR_OFFSET)
+
+/*
+ * Routine errors:
+ */
+#define GSS_S_BAD_MECH \
+ (((OM_uint32) 1ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_NAME \
+ (((OM_uint32) 2ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_NAMETYPE \
+ (((OM_uint32) 3ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_BINDINGS \
+ (((OM_uint32) 4ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_STATUS \
+ (((OM_uint32) 5ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_SIG \
+ (((OM_uint32) 6ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_NO_CRED \
+ (((OM_uint32) 7ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_NO_CONTEXT \
+ (((OM_uint32) 8ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_DEFECTIVE_TOKEN \
+ (((OM_uint32) 9ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_DEFECTIVE_CREDENTIAL \
+ (((OM_uint32) 10ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_CREDENTIALS_EXPIRED \
+ (((OM_uint32) 11ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_CONTEXT_EXPIRED \
+ (((OM_uint32) 12ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_FAILURE \
+ (((OM_uint32) 13ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_QOP \
+ (((OM_uint32) 14ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_UNAUTHORIZED \
+ (((OM_uint32) 15ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_UNAVAILABLE \
+ (((OM_uint32) 16ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_DUPLICATE_ELEMENT \
+ (((OM_uint32) 17ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_NAME_NOT_MN \
+ (((OM_uint32) 18ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+
+/*
+ * Supplementary info bits:
+ */
+#define GSS_S_CONTINUE_NEEDED (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 0))
+#define GSS_S_DUPLICATE_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 1))
+#define GSS_S_OLD_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 2))
+#define GSS_S_UNSEQ_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 3))
+#define GSS_S_GAP_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 4))
+
+/* XXXX these are not part of the GSSAPI C bindings! (but should be) */
+
+#define GSS_CALLING_ERROR_FIELD(x) \
+ (((x) >> GSS_C_CALLING_ERROR_OFFSET) & GSS_C_CALLING_ERROR_MASK)
+#define GSS_ROUTINE_ERROR_FIELD(x) \
+ (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
+#define GSS_SUPPLEMENTARY_INFO_FIELD(x) \
+ (((x) >> GSS_C_SUPPLEMENTARY_OFFSET) & GSS_C_SUPPLEMENTARY_MASK)
+
+/* XXXX This is a necessary evil until the spec is fixed */
+#define GSS_S_CRED_UNAVAIL GSS_S_FAILURE
+
+#endif /* __PTLRPC_GSS_GSS_ERR_H_ */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
new file mode 100644
index 0000000..20b1638
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_generic_token.c
@@ -0,0 +1,285 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * linux/net/sunrpc/gss_generic_token.c
+ *
+ * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/generic/util_token.c
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@umich.edu>
+ */
+
+/*
+ * Copyright 1993 by OpenVision Technologies, Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appears in all copies and
+ * that both that copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OpenVision not be used
+ * in advertising or publicity pertaining to distribution of the software
+ * without specific, written prior permission. OpenVision makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_sec.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+#include "gss_krb5.h"
+#include "gss_asn1.h"
+
+
+/* TWRITE_STR from gssapiP_generic.h */
+#define TWRITE_STR(ptr, str, len) \
+ memcpy((ptr), (char *) (str), (len)); \
+ (ptr) += (len);
+
+/* XXXX this code currently makes the assumption that a mech oid will
+ never be longer than 127 bytes. This assumption is not inherent in
+ the interfaces, so the code can be fixed if the OSI namespace
+ balloons unexpectedly. */
+
+/* Each token looks like this:
+
+0x60 tag for APPLICATION 0, SEQUENCE
+ (constructed, definite-length)
+ <length> possible multiple bytes, need to parse/generate
+ 0x06 tag for OBJECT IDENTIFIER
+ <moid_length> compile-time constant string (assume 1 byte)
+ <moid_bytes> compile-time constant string
+ <inner_bytes> the ANY containing the application token
+ bytes 0,1 are the token type
+ bytes 2,n are the token data
+
+For the purposes of this abstraction, the token "header" consists of
+the sequence tag and length octets, the mech OID DER encoding, and the
+first two inner bytes, which indicate the token type. The token
+"body" consists of everything else.
+
+*/
+
+static
+int der_length_size(int length)
+{
+ if (length < (1 << 7))
+ return 1;
+ else if (length < (1 << 8))
+ return 2;
+#if (SIZEOF_INT == 2)
+ else
+ return 3;
+#else
+ else if (length < (1 << 16))
+ return 3;
+ else if (length < (1 << 24))
+ return 4;
+ else
+ return 5;
+#endif
+}
+
+static
+void der_write_length(unsigned char **buf, int length)
+{
+ if (length < (1 << 7)) {
+ *(*buf)++ = (unsigned char) length;
+ } else {
+ *(*buf)++ = (unsigned char) (der_length_size(length) + 127);
+#if (SIZEOF_INT > 2)
+ if (length >= (1 << 24))
+ *(*buf)++ = (unsigned char) (length >> 24);
+ if (length >= (1 << 16))
+ *(*buf)++ = (unsigned char) ((length >> 16) & 0xff);
+#endif
+ if (length >= (1 << 8))
+ *(*buf)++ = (unsigned char) ((length >> 8) & 0xff);
+ *(*buf)++ = (unsigned char) (length & 0xff);
+ }
+}
+
+/*
+ * returns decoded length, or < 0 on failure. Advances buf and
+ * decrements bufsize
+ */
+static
+int der_read_length(unsigned char **buf, int *bufsize)
+{
+ unsigned char sf;
+ int ret;
+
+ if (*bufsize < 1)
+ return -1;
+ sf = *(*buf)++;
+ (*bufsize)--;
+ if (sf & 0x80) {
+ if ((sf &= 0x7f) > ((*bufsize) - 1))
+ return -1;
+ if (sf > SIZEOF_INT)
+ return -1;
+ ret = 0;
+ for (; sf; sf--) {
+ ret = (ret << 8) + (*(*buf)++);
+ (*bufsize)--;
+ }
+ } else {
+ ret = sf;
+ }
+
+ return ret;
+}
+
+/*
+ * returns the length of a token, given the mech oid and the body size
+ */
+int g_token_size(rawobj_t *mech, unsigned int body_size)
+{
+ /* set body_size to sequence contents size */
+ body_size += 4 + (int) mech->len; /* NEED overflow check */
+ return (1 + der_length_size(body_size) + body_size);
+}
+
+/*
+ * fills in a buffer with the token header. The buffer is assumed to
+ * be the right size. buf is advanced past the token header
+ */
+void g_make_token_header(rawobj_t *mech, int body_size, unsigned char **buf)
+{
+ *(*buf)++ = 0x60;
+ der_write_length(buf, 4 + mech->len + body_size);
+ *(*buf)++ = 0x06;
+ *(*buf)++ = (unsigned char) mech->len;
+ TWRITE_STR(*buf, mech->data, ((int) mech->len));
+}
+
+/*
+ * Given a buffer containing a token, reads and verifies the token,
+ * leaving buf advanced past the token header, and setting body_size
+ * to the number of remaining bytes. Returns 0 on success,
+ * G_BAD_TOK_HEADER for a variety of errors, and G_WRONG_MECH if the
+ * mechanism in the token does not match the mech argument. buf and
+ * *body_size are left unmodified on error.
+ */
+__u32 g_verify_token_header(rawobj_t *mech, int *body_size,
+ unsigned char **buf_in, int toksize)
+{
+ unsigned char *buf = *buf_in;
+ int seqsize;
+ rawobj_t toid;
+ int ret = 0;
+
+ if ((toksize -= 1) < 0)
+ return (G_BAD_TOK_HEADER);
+ if (*buf++ != 0x60)
+ return (G_BAD_TOK_HEADER);
+
+ if ((seqsize = der_read_length(&buf, &toksize)) < 0)
+ return(G_BAD_TOK_HEADER);
+
+ if (seqsize != toksize)
+ return (G_BAD_TOK_HEADER);
+
+ if ((toksize -= 1) < 0)
+ return (G_BAD_TOK_HEADER);
+ if (*buf++ != 0x06)
+ return (G_BAD_TOK_HEADER);
+
+ if ((toksize -= 1) < 0)
+ return (G_BAD_TOK_HEADER);
+ toid.len = *buf++;
+
+ if ((toksize -= toid.len) < 0)
+ return (G_BAD_TOK_HEADER);
+ toid.data = buf;
+ buf += toid.len;
+
+ if (!g_OID_equal(&toid, mech))
+ ret = G_WRONG_MECH;
+
+ /* G_WRONG_MECH is not returned immediately because it's more
+ * important to return G_BAD_TOK_HEADER if the token header is
+ * in fact bad
+ */
+ if ((toksize -= 2) < 0)
+ return (G_BAD_TOK_HEADER);
+
+ if (ret)
+ return (ret);
+
+ if (!ret) {
+ *buf_in = buf;
+ *body_size = toksize;
+ }
+
+ return (ret);
+}
+
+/*
+ * Given a buffer containing a token, returns a copy of the mech oid in
+ * the parameter mech.
+ */
+__u32 g_get_mech_oid(rawobj_t *mech, rawobj_t *in_buf)
+{
+ unsigned char *buf = in_buf->data;
+ int len = in_buf->len;
+ int ret = 0;
+ int seqsize;
+
+ if ((len -= 1) < 0)
+ return (G_BAD_TOK_HEADER);
+ if (*buf++ != 0x60)
+ return (G_BAD_TOK_HEADER);
+
+ if ((seqsize = der_read_length(&buf, &len)) < 0)
+ return (G_BAD_TOK_HEADER);
+
+ if ((len -= 1) < 0)
+ return (G_BAD_TOK_HEADER);
+ if (*buf++ != 0x06)
+ return (G_BAD_TOK_HEADER);
+
+ if ((len -= 1) < 0)
+ return (G_BAD_TOK_HEADER);
+ mech->len = *buf++;
+
+ if ((len -= mech->len) < 0)
+ return (G_BAD_TOK_HEADER);
+ OBD_ALLOC_LARGE(mech->data, mech->len);
+ if (!mech->data)
+ return (G_BUFFER_ALLOC);
+ memcpy(mech->data, buf, mech->len);
+
+ return ret;
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_internal.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_internal.h
new file mode 100644
index 0000000..cbfc47c
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_internal.h
@@ -0,0 +1,526 @@
+/*
+ * Modified from NFSv4 project for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#ifndef __PTLRPC_GSS_GSS_INTERNAL_H_
+#define __PTLRPC_GSS_GSS_INTERNAL_H_
+
+#include <lustre_sec.h>
+
+/*
+ * rawobj stuff
+ */
+typedef struct netobj_s {
+ __u32 len;
+ __u8 data[0];
+} netobj_t;
+
+#define NETOBJ_EMPTY ((netobj_t) { 0 })
+
+typedef struct rawobj_s {
+ __u32 len;
+ __u8 *data;
+} rawobj_t;
+
+#define RAWOBJ_EMPTY ((rawobj_t) { 0, NULL })
+
+typedef struct rawobj_buf_s {
+ __u32 dataoff;
+ __u32 datalen;
+ __u32 buflen;
+ __u8 *buf;
+} rawobj_buf_t;
+
+int rawobj_empty(rawobj_t *obj);
+int rawobj_alloc(rawobj_t *obj, char *buf, int len);
+void rawobj_free(rawobj_t *obj);
+int rawobj_equal(rawobj_t *a, rawobj_t *b);
+int rawobj_dup(rawobj_t *dest, rawobj_t *src);
+int rawobj_serialize(rawobj_t *obj, __u32 **buf, __u32 *buflen);
+int rawobj_extract(rawobj_t *obj, __u32 **buf, __u32 *buflen);
+int rawobj_extract_alloc(rawobj_t *obj, __u32 **buf, __u32 *buflen);
+int rawobj_extract_local(rawobj_t *obj, __u32 **buf, __u32 *buflen);
+int rawobj_extract_local_alloc(rawobj_t *obj, __u32 **buf, __u32 *buflen);
+int rawobj_from_netobj(rawobj_t *rawobj, netobj_t *netobj);
+int rawobj_from_netobj_alloc(rawobj_t *obj, netobj_t *netobj);
+
+int buffer_extract_bytes(const void **buf, __u32 *buflen,
+ void *res, __u32 reslen);
+
+/*
+ * several timeout values. client refresh upcall timeout we using
+ * default in pipefs implemnetation.
+ */
+#define __TIMEOUT_DELTA (10)
+
+#define GSS_SECINIT_RPC_TIMEOUT \
+ (obd_timeout < __TIMEOUT_DELTA ? \
+ __TIMEOUT_DELTA : obd_timeout - __TIMEOUT_DELTA)
+
+#define GSS_SECFINI_RPC_TIMEOUT (__TIMEOUT_DELTA)
+#define GSS_SECSVC_UPCALL_TIMEOUT (GSS_SECINIT_RPC_TIMEOUT)
+
+/*
+ * default gc interval
+ */
+#define GSS_GC_INTERVAL (60 * 60) /* 60 minutes */
+
+static inline
+unsigned long gss_round_ctx_expiry(unsigned long expiry,
+ unsigned long sec_flags)
+{
+ if (sec_flags & PTLRPC_SEC_FL_REVERSE)
+ return expiry;
+
+ if (get_seconds() + __TIMEOUT_DELTA <= expiry)
+ return expiry - __TIMEOUT_DELTA;
+
+ return expiry;
+}
+
+/*
+ * Max encryption element in block cipher algorithms.
+ */
+#define GSS_MAX_CIPHER_BLOCK (16)
+
+/*
+ * XXX make it visible of kernel and lgssd/lsvcgssd
+ */
+#define GSSD_INTERFACE_VERSION (1)
+
+#define PTLRPC_GSS_VERSION (1)
+
+
+enum ptlrpc_gss_proc {
+ PTLRPC_GSS_PROC_DATA = 0,
+ PTLRPC_GSS_PROC_INIT = 1,
+ PTLRPC_GSS_PROC_CONTINUE_INIT = 2,
+ PTLRPC_GSS_PROC_DESTROY = 3,
+ PTLRPC_GSS_PROC_ERR = 4,
+};
+
+enum ptlrpc_gss_tgt {
+ LUSTRE_GSS_TGT_MGS = 0,
+ LUSTRE_GSS_TGT_MDS = 1,
+ LUSTRE_GSS_TGT_OSS = 2,
+};
+
+enum ptlrpc_gss_header_flags {
+ LUSTRE_GSS_PACK_BULK = 1,
+ LUSTRE_GSS_PACK_USER = 2,
+};
+
+static inline
+__u32 import_to_gss_svc(struct obd_import *imp)
+{
+ const char *name = imp->imp_obd->obd_type->typ_name;
+
+ if (!strcmp(name, LUSTRE_MGC_NAME))
+ return LUSTRE_GSS_TGT_MGS;
+ if (!strcmp(name, LUSTRE_MDC_NAME))
+ return LUSTRE_GSS_TGT_MDS;
+ if (!strcmp(name, LUSTRE_OSC_NAME))
+ return LUSTRE_GSS_TGT_OSS;
+ LBUG();
+ return 0;
+}
+
+/*
+ * following 3 header must have the same size and offset
+ */
+struct gss_header {
+ __u8 gh_version; /* gss version */
+ __u8 gh_sp; /* sec part */
+ __u16 gh_pad0;
+ __u32 gh_flags; /* wrap flags */
+ __u32 gh_proc; /* proc */
+ __u32 gh_seq; /* sequence */
+ __u32 gh_svc; /* service */
+ __u32 gh_pad1;
+ __u32 gh_pad2;
+ __u32 gh_pad3;
+ netobj_t gh_handle; /* context handle */
+};
+
+struct gss_rep_header {
+ __u8 gh_version;
+ __u8 gh_sp;
+ __u16 gh_pad0;
+ __u32 gh_flags;
+ __u32 gh_proc;
+ __u32 gh_major;
+ __u32 gh_minor;
+ __u32 gh_seqwin;
+ __u32 gh_pad2;
+ __u32 gh_pad3;
+ netobj_t gh_handle;
+};
+
+struct gss_err_header {
+ __u8 gh_version;
+ __u8 gh_sp;
+ __u16 gh_pad0;
+ __u32 gh_flags;
+ __u32 gh_proc;
+ __u32 gh_major;
+ __u32 gh_minor;
+ __u32 gh_pad1;
+ __u32 gh_pad2;
+ __u32 gh_pad3;
+ netobj_t gh_handle;
+};
+
+/*
+ * part of wire context information send from client which be saved and
+ * used later by server.
+ */
+struct gss_wire_ctx {
+ __u32 gw_flags;
+ __u32 gw_proc;
+ __u32 gw_seq;
+ __u32 gw_svc;
+ rawobj_t gw_handle;
+};
+
+#define PTLRPC_GSS_MAX_HANDLE_SIZE (8)
+#define PTLRPC_GSS_HEADER_SIZE (sizeof(struct gss_header) + \
+ PTLRPC_GSS_MAX_HANDLE_SIZE)
+
+
+static inline __u64 gss_handle_to_u64(rawobj_t *handle)
+{
+ if (handle->len != PTLRPC_GSS_MAX_HANDLE_SIZE)
+ return -1;
+ return *((__u64 *) handle->data);
+}
+
+#define GSS_SEQ_WIN (2048)
+#define GSS_SEQ_WIN_MAIN GSS_SEQ_WIN
+#define GSS_SEQ_WIN_BACK (128)
+#define GSS_SEQ_REPACK_THRESHOLD (GSS_SEQ_WIN_MAIN / 2 + \
+ GSS_SEQ_WIN_MAIN / 4)
+
+struct gss_svc_seq_data {
+ spinlock_t ssd_lock;
+ /*
+ * highest sequence number seen so far, for main and back window
+ */
+ __u32 ssd_max_main;
+ __u32 ssd_max_back;
+ /*
+ * main and back window
+ * for i such that ssd_max - GSS_SEQ_WIN < i <= ssd_max, the i-th bit
+ * of ssd_win is nonzero iff sequence number i has been seen already.
+ */
+ unsigned long ssd_win_main[GSS_SEQ_WIN_MAIN/BITS_PER_LONG];
+ unsigned long ssd_win_back[GSS_SEQ_WIN_BACK/BITS_PER_LONG];
+};
+
+struct gss_svc_ctx {
+ struct gss_ctx *gsc_mechctx;
+ struct gss_svc_seq_data gsc_seqdata;
+ rawobj_t gsc_rvs_hdl;
+ __u32 gsc_rvs_seq;
+ uid_t gsc_uid;
+ gid_t gsc_gid;
+ uid_t gsc_mapped_uid;
+ unsigned int gsc_usr_root:1,
+ gsc_usr_mds:1,
+ gsc_usr_oss:1,
+ gsc_remote:1,
+ gsc_reverse:1;
+};
+
+struct gss_svc_reqctx {
+ struct ptlrpc_svc_ctx src_base;
+ /*
+ * context
+ */
+ struct gss_wire_ctx src_wirectx;
+ struct gss_svc_ctx *src_ctx;
+ /*
+ * record place of bulk_sec_desc in request/reply buffer
+ */
+ struct ptlrpc_bulk_sec_desc *src_reqbsd;
+ int src_reqbsd_size;
+ struct ptlrpc_bulk_sec_desc *src_repbsd;
+ int src_repbsd_size;
+ /*
+ * flags
+ */
+ unsigned int src_init:1,
+ src_init_continue:1,
+ src_err_notify:1;
+ int src_reserve_len;
+};
+
+struct gss_cli_ctx {
+ struct ptlrpc_cli_ctx gc_base;
+ __u32 gc_flavor;
+ __u32 gc_proc;
+ __u32 gc_win;
+ atomic_t gc_seq;
+ rawobj_t gc_handle;
+ struct gss_ctx *gc_mechctx;
+ /* handle for the buddy svc ctx */
+ rawobj_t gc_svc_handle;
+};
+
+struct gss_cli_ctx_keyring {
+ struct gss_cli_ctx gck_base;
+ struct key *gck_key;
+ struct timer_list *gck_timer;
+};
+
+struct gss_sec {
+ struct ptlrpc_sec gs_base;
+ struct gss_api_mech *gs_mech;
+ spinlock_t gs_lock;
+ __u64 gs_rvs_hdl;
+};
+
+struct gss_sec_pipefs {
+ struct gss_sec gsp_base;
+ int gsp_chash_size; /* must be 2^n */
+ struct hlist_head gsp_chash[0];
+};
+
+/*
+ * FIXME cleanup the keyring upcall mutexes
+ */
+#define HAVE_KEYRING_UPCALL_SERIALIZED 1
+
+struct gss_sec_keyring {
+ struct gss_sec gsk_base;
+ /*
+ * all contexts listed here. access is protected by sec spinlock.
+ */
+ struct hlist_head gsk_clist;
+ /*
+ * specially point to root ctx (only one at a time). access is
+ * protected by sec spinlock.
+ */
+ struct ptlrpc_cli_ctx *gsk_root_ctx;
+ /*
+ * specially serialize upcalls for root context.
+ */
+ struct mutex gsk_root_uc_lock;
+
+#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
+ struct mutex gsk_uc_lock; /* serialize upcalls */
+#endif
+};
+
+static inline struct gss_cli_ctx *ctx2gctx(struct ptlrpc_cli_ctx *ctx)
+{
+ return container_of(ctx, struct gss_cli_ctx, gc_base);
+}
+
+static inline
+struct gss_cli_ctx_keyring *ctx2gctx_keyring(struct ptlrpc_cli_ctx *ctx)
+{
+ return container_of(ctx2gctx(ctx),
+ struct gss_cli_ctx_keyring, gck_base);
+}
+
+static inline struct gss_sec *sec2gsec(struct ptlrpc_sec *sec)
+{
+ return container_of(sec, struct gss_sec, gs_base);
+}
+
+static inline struct gss_sec_pipefs *sec2gsec_pipefs(struct ptlrpc_sec *sec)
+{
+ return container_of(sec2gsec(sec), struct gss_sec_pipefs, gsp_base);
+}
+
+static inline struct gss_sec_keyring *sec2gsec_keyring(struct ptlrpc_sec *sec)
+{
+ return container_of(sec2gsec(sec), struct gss_sec_keyring, gsk_base);
+}
+
+
+#define GSS_CTX_INIT_MAX_LEN (1024)
+
+/*
+ * This only guaranteed be enough for current krb5 des-cbc-crc . We might
+ * adjust this when new enc type or mech added in.
+ */
+#define GSS_PRIVBUF_PREFIX_LEN (32)
+#define GSS_PRIVBUF_SUFFIX_LEN (32)
+
+static inline
+struct gss_svc_reqctx *gss_svc_ctx2reqctx(struct ptlrpc_svc_ctx *ctx)
+{
+ LASSERT(ctx);
+ return container_of(ctx, struct gss_svc_reqctx, src_base);
+}
+
+static inline
+struct gss_svc_ctx *gss_svc_ctx2gssctx(struct ptlrpc_svc_ctx *ctx)
+{
+ LASSERT(ctx);
+ return gss_svc_ctx2reqctx(ctx)->src_ctx;
+}
+
+/* sec_gss.c */
+int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred);
+int gss_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
+int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
+int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
+int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
+int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
+
+int gss_sec_install_rctx(struct obd_import *imp, struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx);
+int gss_alloc_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
+ int msgsize);
+void gss_free_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
+int gss_alloc_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
+ int msgsize);
+void gss_free_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
+int gss_enlarge_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
+ int segment, int newsize);
+
+int gss_svc_accept(struct ptlrpc_sec_policy *policy,
+ struct ptlrpc_request *req);
+void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx);
+int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
+int gss_svc_authorize(struct ptlrpc_request *req);
+void gss_svc_free_rs(struct ptlrpc_reply_state *rs);
+void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx);
+
+int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
+int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx);
+
+int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
+ struct ptlrpc_svc_ctx *svc_ctx);
+
+struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
+ int swabbed);
+netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment);
+
+void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx);
+int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor);
+int gss_check_seq_num(struct gss_svc_seq_data *sd, __u32 seq_num, int set);
+
+int gss_sec_create_common(struct gss_sec *gsec,
+ struct ptlrpc_sec_policy *policy,
+ struct obd_import *imp,
+ struct ptlrpc_svc_ctx *ctx,
+ struct sptlrpc_flavor *sf);
+void gss_sec_destroy_common(struct gss_sec *gsec);
+void gss_sec_kill(struct ptlrpc_sec *sec);
+
+int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_ctx_ops *ctxops,
+ struct vfs_cred *vcred);
+int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx);
+
+void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize);
+
+/* gss_keyring.c */
+int __init gss_init_keyring(void);
+void __exit gss_exit_keyring(void);
+
+/* gss_pipefs.c */
+int __init gss_init_pipefs(void);
+void __exit gss_exit_pipefs(void);
+
+/* gss_bulk.c */
+int gss_cli_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+int gss_svc_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+int gss_svc_wrap_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
+
+/* gss_mech_switch.c */
+int init_kerberos_module(void);
+void cleanup_kerberos_module(void);
+
+/* gss_generic_token.c */
+int g_token_size(rawobj_t *mech, unsigned int body_size);
+void g_make_token_header(rawobj_t *mech, int body_size, unsigned char **buf);
+__u32 g_verify_token_header(rawobj_t *mech, int *body_size,
+ unsigned char **buf_in, int toksize);
+
+
+/* gss_cli_upcall.c */
+int gss_do_ctx_init_rpc(char *buffer, unsigned long count);
+int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx);
+
+int __init gss_init_cli_upcall(void);
+void __exit gss_exit_cli_upcall(void);
+
+/* gss_svc_upcall.c */
+__u64 gss_get_next_ctx_index(void);
+int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
+ struct gss_sec *gsec,
+ struct gss_cli_ctx *gctx);
+int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle);
+int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx);
+int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq);
+int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
+ struct gss_svc_reqctx *grctx,
+ struct gss_wire_ctx *gw,
+ struct obd_device *target,
+ __u32 lustre_svc,
+ rawobj_t *rvs_hdl,
+ rawobj_t *in_token);
+struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
+ struct gss_wire_ctx *gw);
+void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx);
+void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx);
+
+int __init gss_init_svc_upcall(void);
+void __exit gss_exit_svc_upcall(void);
+
+/* lproc_gss.c */
+void gss_stat_oos_record_cli(int behind);
+void gss_stat_oos_record_svc(int phase, int replay);
+
+int __init gss_init_lproc(void);
+void __exit gss_exit_lproc(void);
+
+/* gss_krb5_mech.c */
+int __init init_kerberos_module(void);
+void __exit cleanup_kerberos_module(void);
+
+
+/* debug */
+static inline
+void __dbg_memdump(char *name, void *ptr, int size)
+{
+ char *buf, *p = (char *) ptr;
+ int bufsize = size * 2 + 1, i;
+
+ OBD_ALLOC(buf, bufsize);
+ if (!buf) {
+ CDEBUG(D_ERROR, "DUMP ERROR: can't alloc %d bytes\n", bufsize);
+ return;
+ }
+
+ for (i = 0; i < size; i++)
+ sprintf(&buf[i+i], "%02x", (__u8) p[i]);
+ buf[size + size] = '\0';
+ LCONSOLE_INFO("DUMP %s@%p(%d): %s\n", name, ptr, size, buf);
+ OBD_FREE(buf, bufsize);
+}
+
+#endif /* __PTLRPC_GSS_GSS_INTERNAL_H_ */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
new file mode 100644
index 0000000..188dbbf
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
@@ -0,0 +1,1409 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/gss/gss_keyring.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/crypto.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/key-type.h>
+#include <linux/mutex.h>
+#include <asm/atomic.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_sec.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+
+static struct ptlrpc_sec_policy gss_policy_keyring;
+static struct ptlrpc_ctx_ops gss_keyring_ctxops;
+static struct key_type gss_key_type;
+
+static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
+ struct ptlrpc_svc_ctx *svc_ctx);
+
+/*
+ * the timeout is only for the case that upcall child process die abnormally.
+ * in any other cases it should finally update kernel key.
+ *
+ * FIXME we'd better to incorporate the client & server side upcall timeouts
+ * into the framework of Adaptive Timeouts, but we need to figure out how to
+ * make sure that kernel knows the upcall processes is in-progress or died
+ * unexpectedly.
+ */
+#define KEYRING_UPCALL_TIMEOUT (obd_timeout + obd_timeout)
+
+/****************************************
+ * internal helpers *
+ ****************************************/
+
+#define DUMP_PROCESS_KEYRINGS(tsk) \
+{ \
+ CWARN("DUMP PK: %s[%u,%u/%u](<-%s[%u,%u/%u]): " \
+ "a %d, t %d, p %d, s %d, u %d, us %d, df %d\n", \
+ tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \
+ tsk->parent->comm, tsk->parent->pid, \
+ tsk->parent->uid, tsk->parent->fsuid, \
+ tsk->request_key_auth ? \
+ tsk->request_key_auth->serial : 0, \
+ key_cred(tsk)->thread_keyring ? \
+ key_cred(tsk)->thread_keyring->serial : 0, \
+ key_tgcred(tsk)->process_keyring ? \
+ key_tgcred(tsk)->process_keyring->serial : 0, \
+ key_tgcred(tsk)->session_keyring ? \
+ key_tgcred(tsk)->session_keyring->serial : 0, \
+ key_cred(tsk)->user->uid_keyring ? \
+ key_cred(tsk)->user->uid_keyring->serial : 0, \
+ key_cred(tsk)->user->session_keyring ? \
+ key_cred(tsk)->user->session_keyring->serial : 0, \
+ key_cred(tsk)->jit_keyring \
+ ); \
+}
+
+#define DUMP_KEY(key) \
+{ \
+ CWARN("DUMP KEY: %p(%d) ref %d u%u/g%u desc %s\n", \
+ key, key->serial, atomic_read(&key->usage), \
+ key->uid, key->gid, \
+ key->description ? key->description : "n/a" \
+ ); \
+}
+
+#define key_cred(tsk) ((tsk)->cred)
+#define key_tgcred(tsk) ((tsk)->cred->tgcred)
+
+static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
+{
+#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
+ mutex_lock(&gsec_kr->gsk_uc_lock);
+#endif
+}
+
+static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
+{
+#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
+ mutex_unlock(&gsec_kr->gsk_uc_lock);
+#endif
+}
+
+static inline void key_revoke_locked(struct key *key)
+{
+ set_bit(KEY_FLAG_REVOKED, &key->flags);
+}
+
+static void ctx_upcall_timeout_kr(unsigned long data)
+{
+ struct ptlrpc_cli_ctx *ctx = (struct ptlrpc_cli_ctx *) data;
+ struct key *key = ctx2gctx_keyring(ctx)->gck_key;
+
+ CWARN("ctx %p, key %p\n", ctx, key);
+
+ LASSERT(key);
+
+ cli_ctx_expire(ctx);
+ key_revoke_locked(key);
+}
+
+static
+void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
+{
+ struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
+ struct timer_list *timer = gctx_kr->gck_timer;
+
+ LASSERT(timer);
+
+ CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
+ timeout = timeout * HZ + cfs_time_current();
+
+ init_timer(timer);
+ timer->expires = timeout;
+ timer->data = (unsigned long ) ctx;
+ timer->function = ctx_upcall_timeout_kr;
+
+ add_timer(timer);
+}
+
+/*
+ * caller should make sure no race with other threads
+ */
+static
+void ctx_clear_timer_kr(struct ptlrpc_cli_ctx *ctx)
+{
+ struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
+ struct timer_list *timer = gctx_kr->gck_timer;
+
+ if (timer == NULL)
+ return;
+
+ CDEBUG(D_SEC, "ctx %p, key %p\n", ctx, gctx_kr->gck_key);
+
+ gctx_kr->gck_timer = NULL;
+
+ del_singleshot_timer_sync(timer);
+
+ OBD_FREE_PTR(timer);
+}
+
+static
+struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
+ struct vfs_cred *vcred)
+{
+ struct ptlrpc_cli_ctx *ctx;
+ struct gss_cli_ctx_keyring *gctx_kr;
+
+ OBD_ALLOC_PTR(gctx_kr);
+ if (gctx_kr == NULL)
+ return NULL;
+
+ OBD_ALLOC_PTR(gctx_kr->gck_timer);
+ if (gctx_kr->gck_timer == NULL) {
+ OBD_FREE_PTR(gctx_kr);
+ return NULL;
+ }
+ init_timer(gctx_kr->gck_timer);
+
+ ctx = &gctx_kr->gck_base.gc_base;
+
+ if (gss_cli_ctx_init_common(sec, ctx, &gss_keyring_ctxops, vcred)) {
+ OBD_FREE_PTR(gctx_kr->gck_timer);
+ OBD_FREE_PTR(gctx_kr);
+ return NULL;
+ }
+
+ ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
+ clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
+ atomic_inc(&ctx->cc_refcount); /* for the caller */
+
+ return ctx;
+}
+
+static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
+{
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
+
+ CDEBUG(D_SEC, "destroying ctx %p\n", ctx);
+
+ /* at this time the association with key has been broken. */
+ LASSERT(sec);
+ LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(gctx_kr->gck_key == NULL);
+
+ ctx_clear_timer_kr(ctx);
+ LASSERT(gctx_kr->gck_timer == NULL);
+
+ if (gss_cli_ctx_fini_common(sec, ctx))
+ return;
+
+ OBD_FREE_PTR(gctx_kr);
+
+ atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
+}
+
+static void ctx_release_kr(struct ptlrpc_cli_ctx *ctx, int sync)
+{
+ if (sync) {
+ ctx_destroy_kr(ctx);
+ } else {
+ atomic_inc(&ctx->cc_refcount);
+ sptlrpc_gc_add_ctx(ctx);
+ }
+}
+
+static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
+{
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ if (atomic_dec_and_test(&ctx->cc_refcount))
+ ctx_release_kr(ctx, sync);
+}
+
+/*
+ * key <-> ctx association and rules:
+ * - ctx might not bind with any key
+ * - key/ctx binding is protected by key semaphore (if the key present)
+ * - key and ctx each take a reference of the other
+ * - ctx enlist/unlist is protected by ctx spinlock
+ * - never enlist a ctx after it's been unlisted
+ * - whoever do enlist should also do bind, lock key before enlist:
+ * - lock key -> lock ctx -> enlist -> unlock ctx -> bind -> unlock key
+ * - whoever do unlist should also do unbind:
+ * - lock key -> lock ctx -> unlist -> unlock ctx -> unbind -> unlock key
+ * - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
+ */
+
+static inline void spin_lock_if(spinlock_t *lock, int condition)
+{
+ if (condition)
+ spin_lock(lock);
+}
+
+static inline void spin_unlock_if(spinlock_t *lock, int condition)
+{
+ if (condition)
+ spin_unlock(lock);
+}
+
+static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
+{
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+
+ LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ spin_lock_if(&sec->ps_lock, !locked);
+
+ atomic_inc(&ctx->cc_refcount);
+ set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+ if (is_root)
+ gsec_kr->gsk_root_ctx = ctx;
+
+ spin_unlock_if(&sec->ps_lock, !locked);
+}
+
+/*
+ * Note after this get called, caller should not access ctx again because
+ * it might have been freed, unless caller hold at least one refcount of
+ * the ctx.
+ *
+ * return non-zero if we indeed unlist this ctx.
+ */
+static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
+{
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+
+ /* if hashed bit has gone, leave the job to somebody who is doing it */
+ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
+ return 0;
+
+ /* drop ref inside spin lock to prevent race with other operations */
+ spin_lock_if(&sec->ps_lock, !locked);
+
+ if (gsec_kr->gsk_root_ctx == ctx)
+ gsec_kr->gsk_root_ctx = NULL;
+ hlist_del_init(&ctx->cc_cache);
+ atomic_dec(&ctx->cc_refcount);
+
+ spin_unlock_if(&sec->ps_lock, !locked);
+
+ return 1;
+}
+
+/*
+ * bind a key with a ctx together.
+ * caller must hold write lock of the key, as well as ref on key & ctx.
+ */
+static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
+{
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(atomic_read(&key->usage) > 0);
+ LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
+ LASSERT(key->payload.data == NULL);
+
+ /* at this time context may or may not in list. */
+ key_get(key);
+ atomic_inc(&ctx->cc_refcount);
+ ctx2gctx_keyring(ctx)->gck_key = key;
+ key->payload.data = ctx;
+}
+
+/*
+ * unbind a key and a ctx.
+ * caller must hold write lock, as well as a ref of the key.
+ */
+static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
+{
+ LASSERT(key->payload.data == ctx);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+
+ /* must revoke the key, or others may treat it as newly created */
+ key_revoke_locked(key);
+
+ key->payload.data = NULL;
+ ctx2gctx_keyring(ctx)->gck_key = NULL;
+
+ /* once ctx get split from key, the timer is meaningless */
+ ctx_clear_timer_kr(ctx);
+
+ ctx_put_kr(ctx, 1);
+ key_put(key);
+}
+
+/*
+ * given a ctx, unbind with its coupled key, if any.
+ * unbind could only be called once, so we don't worry the key be released
+ * by someone else.
+ */
+static void unbind_ctx_kr(struct ptlrpc_cli_ctx *ctx)
+{
+ struct key *key = ctx2gctx_keyring(ctx)->gck_key;
+
+ if (key) {
+ LASSERT(key->payload.data == ctx);
+
+ key_get(key);
+ down_write(&key->sem);
+ unbind_key_ctx(key, ctx);
+ up_write(&key->sem);
+ key_put(key);
+ }
+}
+
+/*
+ * given a key, unbind with its coupled ctx, if any.
+ * caller must hold write lock, as well as a ref of the key.
+ */
+static void unbind_key_locked(struct key *key)
+{
+ struct ptlrpc_cli_ctx *ctx = key->payload.data;
+
+ if (ctx)
+ unbind_key_ctx(key, ctx);
+}
+
+/*
+ * unlist a ctx, and unbind from coupled key
+ */
+static void kill_ctx_kr(struct ptlrpc_cli_ctx *ctx)
+{
+ if (ctx_unlist_kr(ctx, 0))
+ unbind_ctx_kr(ctx);
+}
+
+/*
+ * given a key, unlist and unbind with the coupled ctx (if any).
+ * caller must hold write lock, as well as a ref of the key.
+ */
+static void kill_key_locked(struct key *key)
+{
+ struct ptlrpc_cli_ctx *ctx = key->payload.data;
+
+ if (ctx && ctx_unlist_kr(ctx, 0))
+ unbind_key_locked(key);
+}
+
+/*
+ * caller should hold one ref on contexts in freelist.
+ */
+static void dispose_ctx_list_kr(struct hlist_head *freelist)
+{
+ struct hlist_node *next;
+ struct ptlrpc_cli_ctx *ctx;
+ struct gss_cli_ctx *gctx;
+
+ hlist_for_each_entry_safe(ctx, next, freelist, cc_cache) {
+ hlist_del_init(&ctx->cc_cache);
+
+ /* reverse ctx: update current seq to buddy svcctx if exist.
+ * ideally this should be done at gss_cli_ctx_finalize(), but
+ * the ctx destroy could be delayed by:
+ * 1) ctx still has reference;
+ * 2) ctx destroy is asynchronous;
+ * and reverse import call inval_all_ctx() require this be done
+ *_immediately_ otherwise newly created reverse ctx might copy
+ * the very old sequence number from svcctx. */
+ gctx = ctx2gctx(ctx);
+ if (!rawobj_empty(&gctx->gc_svc_handle) &&
+ sec_is_reverse(gctx->gc_base.cc_sec)) {
+ gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
+ (__u32) atomic_read(&gctx->gc_seq));
+ }
+
+ /* we need to wakeup waiting reqs here. the context might
+ * be forced released before upcall finished, then the
+ * late-arrived downcall can't find the ctx even. */
+ sptlrpc_cli_ctx_wakeup(ctx);
+
+ unbind_ctx_kr(ctx);
+ ctx_put_kr(ctx, 0);
+ }
+}
+
+/*
+ * lookup a root context directly in a sec, return root ctx with a
+ * reference taken or NULL.
+ */
+static
+struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
+{
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_cli_ctx *ctx = NULL;
+
+ spin_lock(&sec->ps_lock);
+
+ ctx = gsec_kr->gsk_root_ctx;
+
+ if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
+ struct ptlrpc_cli_ctx *tmp;
+
+ /* reverse ctx, search root ctx in list, choose the one
+ * with shortest expire time, which is most possibly have
+ * an established peer ctx at client side. */
+ hlist_for_each_entry(tmp, &gsec_kr->gsk_clist, cc_cache) {
+ if (ctx == NULL || ctx->cc_expire == 0 ||
+ ctx->cc_expire > tmp->cc_expire) {
+ ctx = tmp;
+ /* promote to be root_ctx */
+ gsec_kr->gsk_root_ctx = ctx;
+ }
+ }
+ }
+
+ if (ctx) {
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
+ atomic_inc(&ctx->cc_refcount);
+ }
+
+ spin_unlock(&sec->ps_lock);
+
+ return ctx;
+}
+
+#define RVS_CTX_EXPIRE_NICE (10)
+
+static
+void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *new_ctx,
+ struct key *key)
+{
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_cli_ctx *ctx;
+ cfs_time_t now;
+
+ LASSERT(sec_is_reverse(sec));
+
+ spin_lock(&sec->ps_lock);
+
+ now = cfs_time_current_sec();
+
+ /* set all existing ctxs short expiry */
+ hlist_for_each_entry(ctx, &gsec_kr->gsk_clist, cc_cache) {
+ if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
+ ctx->cc_early_expire = 1;
+ ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
+ }
+ }
+
+ /* if there's root_ctx there, instead obsolete the current
+ * immediately, we leave it continue operating for a little while.
+ * hopefully when the first backward rpc with newest ctx send out,
+ * the client side already have the peer ctx well established. */
+ ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
+
+ if (key)
+ bind_key_ctx(key, new_ctx);
+
+ spin_unlock(&sec->ps_lock);
+}
+
+static void construct_key_desc(void *buf, int bufsize,
+ struct ptlrpc_sec *sec, uid_t uid)
+{
+ snprintf(buf, bufsize, "%d@%x", uid, sec->ps_id);
+ ((char *)buf)[bufsize - 1] = '\0';
+}
+
+/****************************************
+ * sec apis *
+ ****************************************/
+
+static
+struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *svcctx,
+ struct sptlrpc_flavor *sf)
+{
+ struct gss_sec_keyring *gsec_kr;
+
+ OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
+ if (gsec_kr == NULL)
+ return NULL;
+
+ INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
+ gsec_kr->gsk_root_ctx = NULL;
+ mutex_init(&gsec_kr->gsk_root_uc_lock);
+#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
+ mutex_init(&gsec_kr->gsk_uc_lock);
+#endif
+
+ if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
+ imp, svcctx, sf))
+ goto err_free;
+
+ if (svcctx != NULL &&
+ sec_install_rctx_kr(&gsec_kr->gsk_base.gs_base, svcctx)) {
+ gss_sec_destroy_common(&gsec_kr->gsk_base);
+ goto err_free;
+ }
+
+ return &gsec_kr->gsk_base.gs_base;
+
+err_free:
+ OBD_FREE(gsec_kr, sizeof(*gsec_kr));
+ return NULL;
+}
+
+static
+void gss_sec_destroy_kr(struct ptlrpc_sec *sec)
+{
+ struct gss_sec *gsec = sec2gsec(sec);
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+
+ CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
+
+ LASSERT(hlist_empty(&gsec_kr->gsk_clist));
+ LASSERT(gsec_kr->gsk_root_ctx == NULL);
+
+ gss_sec_destroy_common(gsec);
+
+ OBD_FREE(gsec_kr, sizeof(*gsec_kr));
+}
+
+static inline int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred)
+{
+ /* except the ROOTONLY flag, treat it as root user only if real uid
+ * is 0, euid/fsuid being 0 are handled as setuid scenarios */
+ if (sec_is_rootonly(sec) || (vcred->vc_uid == 0))
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * unlink request key from it's ring, which is linked during request_key().
+ * sadly, we have to 'guess' which keyring it's linked to.
+ *
+ * FIXME this code is fragile, depend on how request_key_link() is implemented.
+ */
+static void request_key_unlink(struct key *key)
+{
+ struct task_struct *tsk = current;
+ struct key *ring;
+
+ switch (key_cred(tsk)->jit_keyring) {
+ case KEY_REQKEY_DEFL_DEFAULT:
+ case KEY_REQKEY_DEFL_THREAD_KEYRING:
+ ring = key_get(key_cred(tsk)->thread_keyring);
+ if (ring)
+ break;
+ case KEY_REQKEY_DEFL_PROCESS_KEYRING:
+ ring = key_get(key_tgcred(tsk)->process_keyring);
+ if (ring)
+ break;
+ case KEY_REQKEY_DEFL_SESSION_KEYRING:
+ rcu_read_lock();
+ ring = key_get(rcu_dereference(key_tgcred(tsk)
+ ->session_keyring));
+ rcu_read_unlock();
+ if (ring)
+ break;
+ case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
+ ring = key_get(key_cred(tsk)->user->session_keyring);
+ break;
+ case KEY_REQKEY_DEFL_USER_KEYRING:
+ ring = key_get(key_cred(tsk)->user->uid_keyring);
+ break;
+ case KEY_REQKEY_DEFL_GROUP_KEYRING:
+ default:
+ LBUG();
+ }
+
+ LASSERT(ring);
+ key_unlink(ring, key);
+ key_put(ring);
+}
+
+static
+struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
+ struct vfs_cred *vcred,
+ int create, int remove_dead)
+{
+ struct obd_import *imp = sec->ps_import;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_cli_ctx *ctx = NULL;
+ unsigned int is_root = 0, create_new = 0;
+ struct key *key;
+ char desc[24];
+ char *coinfo;
+ int coinfo_size;
+ char *co_flags = "";
+
+ LASSERT(imp != NULL);
+
+ is_root = user_is_root(sec, vcred);
+
+ /* a little bit optimization for root context */
+ if (is_root) {
+ ctx = sec_lookup_root_ctx_kr(sec);
+ /*
+ * Only lookup directly for REVERSE sec, which should
+ * always succeed.
+ */
+ if (ctx || sec_is_reverse(sec))
+ return ctx;
+ }
+
+ LASSERT(create != 0);
+
+ /* for root context, obtain lock and check again, this time hold
+ * the root upcall lock, make sure nobody else populated new root
+ * context after last check. */
+ if (is_root) {
+ mutex_lock(&gsec_kr->gsk_root_uc_lock);
+
+ ctx = sec_lookup_root_ctx_kr(sec);
+ if (ctx)
+ goto out;
+
+ /* update reverse handle for root user */
+ sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
+
+ switch (sec->ps_part) {
+ case LUSTRE_SP_MDT:
+ co_flags = "m";
+ break;
+ case LUSTRE_SP_OST:
+ co_flags = "o";
+ break;
+ case LUSTRE_SP_MGC:
+ co_flags = "rmo";
+ break;
+ case LUSTRE_SP_CLI:
+ co_flags = "r";
+ break;
+ case LUSTRE_SP_MGS:
+ default:
+ LBUG();
+ }
+ }
+
+ /* in case of setuid, key will be constructed as owner of fsuid/fsgid,
+ * but we do authentication based on real uid/gid. the key permission
+ * bits will be exactly as POS_ALL, so only processes who subscribed
+ * this key could have the access, although the quota might be counted
+ * on others (fsuid/fsgid).
+ *
+ * keyring will use fsuid/fsgid as upcall parameters, so we have to
+ * encode real uid/gid into callout info.
+ */
+
+ construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
+
+ /* callout info format:
+ * secid:mech:uid:gid:flags:svc_type:peer_nid:target_uuid
+ */
+ coinfo_size = sizeof(struct obd_uuid) + MAX_OBD_NAME + 64;
+ OBD_ALLOC(coinfo, coinfo_size);
+ if (coinfo == NULL)
+ goto out;
+
+ snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%d:"LPX64":%s",
+ sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
+ vcred->vc_uid, vcred->vc_gid,
+ co_flags, import_to_gss_svc(imp),
+ imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name);
+
+ CDEBUG(D_SEC, "requesting key for %s\n", desc);
+
+ keyring_upcall_lock(gsec_kr);
+ key = request_key(&gss_key_type, desc, coinfo);
+ keyring_upcall_unlock(gsec_kr);
+
+ OBD_FREE(coinfo, coinfo_size);
+
+ if (IS_ERR(key)) {
+ CERROR("failed request key: %ld\n", PTR_ERR(key));
+ goto out;
+ }
+ CDEBUG(D_SEC, "obtained key %08x for %s\n", key->serial, desc);
+
+ /* once payload.data was pointed to a ctx, it never changes until
+ * we de-associate them; but parallel request_key() may return
+ * a key with payload.data == NULL at the same time. so we still
+ * need wirtelock of key->sem to serialize them. */
+ down_write(&key->sem);
+
+ if (likely(key->payload.data != NULL)) {
+ ctx = key->payload.data;
+
+ LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
+ LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
+ LASSERT(atomic_read(&key->usage) >= 2);
+
+ /* simply take a ref and return. it's upper layer's
+ * responsibility to detect & replace dead ctx. */
+ atomic_inc(&ctx->cc_refcount);
+ } else {
+ /* pre initialization with a cli_ctx. this can't be done in
+ * key_instantiate() because we'v no enough information
+ * there. */
+ ctx = ctx_create_kr(sec, vcred);
+ if (ctx != NULL) {
+ ctx_enlist_kr(ctx, is_root, 0);
+ bind_key_ctx(key, ctx);
+
+ ctx_start_timer_kr(ctx, KEYRING_UPCALL_TIMEOUT);
+
+ CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
+ key, ctx, sec);
+ } else {
+ /* we'd prefer to call key_revoke(), but we more like
+ * to revoke it within this key->sem locked period. */
+ key_revoke_locked(key);
+ }
+
+ create_new = 1;
+ }
+
+ up_write(&key->sem);
+
+ if (is_root && create_new)
+ request_key_unlink(key);
+
+ key_put(key);
+out:
+ if (is_root)
+ mutex_unlock(&gsec_kr->gsk_root_uc_lock);
+ return ctx;
+}
+
+static
+void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx,
+ int sync)
+{
+ LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ ctx_release_kr(ctx, sync);
+}
+
+/*
+ * flush context of normal user, we must resort to keyring itself to find out
+ * contexts which belong to me.
+ *
+ * Note here we suppose only to flush _my_ context, the "uid" will
+ * be ignored in the search.
+ */
+static
+void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec,
+ uid_t uid,
+ int grace, int force)
+{
+ struct key *key;
+ char desc[24];
+
+ /* nothing to do for reverse or rootonly sec */
+ if (sec_is_reverse(sec) || sec_is_rootonly(sec))
+ return;
+
+ construct_key_desc(desc, sizeof(desc), sec, uid);
+
+ /* there should be only one valid key, but we put it in the
+ * loop in case of any weird cases */
+ for (;;) {
+ key = request_key(&gss_key_type, desc, NULL);
+ if (IS_ERR(key)) {
+ CDEBUG(D_SEC, "No more key found for current user\n");
+ break;
+ }
+
+ down_write(&key->sem);
+
+ kill_key_locked(key);
+
+ /* kill_key_locked() should usually revoke the key, but we
+ * revoke it again to make sure, e.g. some case the key may
+ * not well coupled with a context. */
+ key_revoke_locked(key);
+
+ up_write(&key->sem);
+
+ key_put(key);
+ }
+}
+
+/*
+ * flush context of root or all, we iterate through the list.
+ */
+static
+void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
+ uid_t uid,
+ int grace, int force)
+{
+ struct gss_sec_keyring *gsec_kr;
+ struct hlist_head freelist = HLIST_HEAD_INIT;
+ struct hlist_node *next;
+ struct ptlrpc_cli_ctx *ctx;
+
+ gsec_kr = sec2gsec_keyring(sec);
+
+ spin_lock(&sec->ps_lock);
+ hlist_for_each_entry_safe(ctx, next,
+ &gsec_kr->gsk_clist, cc_cache) {
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
+ continue;
+
+ /* at this moment there's at least 2 base reference:
+ * key association and in-list. */
+ if (atomic_read(&ctx->cc_refcount) > 2) {
+ if (!force)
+ continue;
+ CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
+ ctx, ctx->cc_vcred.vc_uid,
+ sec2target_str(ctx->cc_sec),
+ atomic_read(&ctx->cc_refcount) - 2);
+ }
+
+ set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ if (!grace)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+
+ atomic_inc(&ctx->cc_refcount);
+
+ if (ctx_unlist_kr(ctx, 1)) {
+ hlist_add_head(&ctx->cc_cache, &freelist);
+ } else {
+ LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
+ atomic_dec(&ctx->cc_refcount);
+ }
+ }
+ spin_unlock(&sec->ps_lock);
+
+ dispose_ctx_list_kr(&freelist);
+}
+
+static
+int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
+ uid_t uid, int grace, int force)
+{
+ CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
+ sec, atomic_read(&sec->ps_refcount),
+ atomic_read(&sec->ps_nctx),
+ uid, grace, force);
+
+ if (uid != -1 && uid != 0)
+ flush_user_ctx_cache_kr(sec, uid, grace, force);
+ else
+ flush_spec_ctx_cache_kr(sec, uid, grace, force);
+
+ return 0;
+}
+
+static
+void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
+{
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct hlist_head freelist = HLIST_HEAD_INIT;
+ struct hlist_node *next;
+ struct ptlrpc_cli_ctx *ctx;
+
+ CWARN("running gc\n");
+
+ spin_lock(&sec->ps_lock);
+ hlist_for_each_entry_safe(ctx, next,
+ &gsec_kr->gsk_clist, cc_cache) {
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ atomic_inc(&ctx->cc_refcount);
+
+ if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
+ hlist_add_head(&ctx->cc_cache, &freelist);
+ CWARN("unhashed ctx %p\n", ctx);
+ } else {
+ LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
+ atomic_dec(&ctx->cc_refcount);
+ }
+ }
+ spin_unlock(&sec->ps_lock);
+
+ dispose_ctx_list_kr(&freelist);
+}
+
+static
+int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
+{
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct hlist_node *next;
+ struct ptlrpc_cli_ctx *ctx;
+ struct gss_cli_ctx *gctx;
+ time_t now = cfs_time_current_sec();
+
+ spin_lock(&sec->ps_lock);
+ hlist_for_each_entry_safe(ctx, next,
+ &gsec_kr->gsk_clist, cc_cache) {
+ struct key *key;
+ char flags_str[40];
+ char mech[40];
+
+ gctx = ctx2gctx(ctx);
+ key = ctx2gctx_keyring(ctx)->gck_key;
+
+ gss_cli_ctx_flags2str(ctx->cc_flags,
+ flags_str, sizeof(flags_str));
+
+ if (gctx->gc_mechctx)
+ lgss_display(gctx->gc_mechctx, mech, sizeof(mech));
+ else
+ snprintf(mech, sizeof(mech), "N/A");
+ mech[sizeof(mech) - 1] = '\0';
+
+ seq_printf(seq, "%p: uid %u, ref %d, expire %ld(%+ld), fl %s, "
+ "seq %d, win %u, key %08x(ref %d), "
+ "hdl "LPX64":"LPX64", mech: %s\n",
+ ctx, ctx->cc_vcred.vc_uid,
+ atomic_read(&ctx->cc_refcount),
+ ctx->cc_expire,
+ ctx->cc_expire ? ctx->cc_expire - now : 0,
+ flags_str,
+ atomic_read(&gctx->gc_seq),
+ gctx->gc_win,
+ key ? key->serial : 0,
+ key ? atomic_read(&key->usage) : 0,
+ gss_handle_to_u64(&gctx->gc_handle),
+ gss_handle_to_u64(&gctx->gc_svc_handle),
+ mech);
+ }
+ spin_unlock(&sec->ps_lock);
+
+ return 0;
+}
+
+/****************************************
+ * cli_ctx apis *
+ ****************************************/
+
+static
+int gss_cli_ctx_refresh_kr(struct ptlrpc_cli_ctx *ctx)
+{
+ /* upcall is already on the way */
+ return 0;
+}
+
+static
+int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
+{
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
+
+ if (cli_ctx_check_death(ctx)) {
+ kill_ctx_kr(ctx);
+ return 1;
+ }
+
+ if (cli_ctx_is_ready(ctx))
+ return 0;
+ return 1;
+}
+
+static
+void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
+{
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
+
+ cli_ctx_expire(ctx);
+ kill_ctx_kr(ctx);
+}
+
+/****************************************
+ * (reverse) service *
+ ****************************************/
+
+/*
+ * reverse context could have nothing to do with keyrings. here we still keep
+ * the version which bind to a key, for future reference.
+ */
+#define HAVE_REVERSE_CTX_NOKEY
+
+
+static
+int sec_install_rctx_kr(struct ptlrpc_sec *sec,
+ struct ptlrpc_svc_ctx *svc_ctx)
+{
+ struct ptlrpc_cli_ctx *cli_ctx;
+ struct vfs_cred vcred = { 0, 0 };
+ int rc;
+
+ LASSERT(sec);
+ LASSERT(svc_ctx);
+
+ cli_ctx = ctx_create_kr(sec, &vcred);
+ if (cli_ctx == NULL)
+ return -ENOMEM;
+
+ rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
+ if (rc) {
+ CERROR("failed copy reverse cli ctx: %d\n", rc);
+
+ ctx_put_kr(cli_ctx, 1);
+ return rc;
+ }
+
+ rvs_sec_install_root_ctx_kr(sec, cli_ctx, NULL);
+
+ ctx_put_kr(cli_ctx, 1);
+
+ return 0;
+}
+
+
+/****************************************
+ * service apis *
+ ****************************************/
+
+static
+int gss_svc_accept_kr(struct ptlrpc_request *req)
+{
+ return gss_svc_accept(&gss_policy_keyring, req);
+}
+
+static
+int gss_svc_install_rctx_kr(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *svc_ctx)
+{
+ struct ptlrpc_sec *sec;
+ int rc;
+
+ sec = sptlrpc_import_sec_ref(imp);
+ LASSERT(sec);
+
+ rc = sec_install_rctx_kr(sec, svc_ctx);
+ sptlrpc_sec_put(sec);
+
+ return rc;
+}
+
+/****************************************
+ * key apis *
+ ****************************************/
+
+static
+int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
+{
+ int rc;
+
+ if (data != NULL || datalen != 0) {
+ CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
+ return -EINVAL;
+ }
+
+ if (key->payload.data != 0) {
+ CERROR("key already have payload\n");
+ return -EINVAL;
+ }
+
+ /* link the key to session keyring, so following context negotiation
+ * rpc fired from user space could find this key. This will be unlinked
+ * automatically when upcall processes die.
+ *
+ * we can't do this through keyctl from userspace, because the upcall
+ * might be neither possessor nor owner of the key (setuid).
+ *
+ * the session keyring is created upon upcall, and don't change all
+ * the way until upcall finished, so rcu lock is not needed here.
+ */
+ LASSERT(key_tgcred(current)->session_keyring);
+
+ lockdep_off();
+ rc = key_link(key_tgcred(current)->session_keyring, key);
+ lockdep_on();
+ if (unlikely(rc)) {
+ CERROR("failed to link key %08x to keyring %08x: %d\n",
+ key->serial,
+ key_tgcred(current)->session_keyring->serial, rc);
+ return rc;
+ }
+
+ CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, key->payload.data);
+ return 0;
+}
+
+/*
+ * called with key semaphore write locked. it means we can operate
+ * on the context without fear of loosing refcount.
+ */
+static
+int gss_kt_update(struct key *key, const void *data, size_t datalen)
+{
+ struct ptlrpc_cli_ctx *ctx = key->payload.data;
+ struct gss_cli_ctx *gctx;
+ rawobj_t tmpobj = RAWOBJ_EMPTY;
+ __u32 datalen32 = (__u32) datalen;
+ int rc;
+
+ if (data == NULL || datalen == 0) {
+ CWARN("invalid: data %p, len %lu\n", data, (long)datalen);
+ return -EINVAL;
+ }
+
+ /* if upcall finished negotiation too fast (mostly likely because
+ * of local error happened) and call kt_update(), the ctx
+ * might be still NULL. but the key will finally be associate
+ * with a context, or be revoked. if key status is fine, return
+ * -EAGAIN to allow userspace sleep a while and call again. */
+ if (ctx == NULL) {
+ CDEBUG(D_SEC, "update too soon: key %p(%x) flags %lx\n",
+ key, key->serial, key->flags);
+
+ rc = key_validate(key);
+ if (rc == 0)
+ return -EAGAIN;
+ else
+ return rc;
+ }
+
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
+
+ ctx_clear_timer_kr(ctx);
+
+ /* don't proceed if already refreshed */
+ if (cli_ctx_is_refreshed(ctx)) {
+ CWARN("ctx already done refresh\n");
+ return 0;
+ }
+
+ sptlrpc_cli_ctx_get(ctx);
+ gctx = ctx2gctx(ctx);
+
+ rc = buffer_extract_bytes(&data, &datalen32, &gctx->gc_win,
+ sizeof(gctx->gc_win));
+ if (rc) {
+ CERROR("failed extract seq_win\n");
+ goto out;
+ }
+
+ if (gctx->gc_win == 0) {
+ __u32 nego_rpc_err, nego_gss_err;
+
+ rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err,
+ sizeof(nego_rpc_err));
+ if (rc) {
+ CERROR("failed to extrace rpc rc\n");
+ goto out;
+ }
+
+ rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err,
+ sizeof(nego_gss_err));
+ if (rc) {
+ CERROR("failed to extrace gss rc\n");
+ goto out;
+ }
+
+ CERROR("negotiation: rpc err %d, gss err %x\n",
+ nego_rpc_err, nego_gss_err);
+
+ rc = nego_rpc_err ? nego_rpc_err : -EACCES;
+ } else {
+ rc = rawobj_extract_local_alloc(&gctx->gc_handle,
+ (__u32 **) &data, &datalen32);
+ if (rc) {
+ CERROR("failed extract handle\n");
+ goto out;
+ }
+
+ rc = rawobj_extract_local(&tmpobj, (__u32 **) &data,&datalen32);
+ if (rc) {
+ CERROR("failed extract mech\n");
+ goto out;
+ }
+
+ rc = lgss_import_sec_context(&tmpobj,
+ sec2gsec(ctx->cc_sec)->gs_mech,
+ &gctx->gc_mechctx);
+ if (rc != GSS_S_COMPLETE)
+ CERROR("failed import context\n");
+ else
+ rc = 0;
+ }
+out:
+ /* we don't care what current status of this ctx, even someone else
+ * is operating on the ctx at the same time. we just add up our own
+ * opinions here. */
+ if (rc == 0) {
+ gss_cli_ctx_uptodate(gctx);
+ } else {
+ /* this will also revoke the key. has to be done before
+ * wakeup waiters otherwise they can find the stale key */
+ kill_key_locked(key);
+
+ cli_ctx_expire(ctx);
+
+ if (rc != -ERESTART)
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ }
+
+ /* let user space think it's a success */
+ sptlrpc_cli_ctx_put(ctx, 1);
+ return 0;
+}
+
+static
+int gss_kt_match(const struct key *key, const void *desc)
+{
+ return (strcmp(key->description, (const char *) desc) == 0);
+}
+
+static
+void gss_kt_destroy(struct key *key)
+{
+ LASSERT(key->payload.data == NULL);
+ CDEBUG(D_SEC, "destroy key %p\n", key);
+}
+
+static
+void gss_kt_describe(const struct key *key, struct seq_file *s)
+{
+ if (key->description == NULL)
+ seq_puts(s, "[null]");
+ else
+ seq_puts(s, key->description);
+}
+
+static struct key_type gss_key_type =
+{
+ .name = "lgssc",
+ .def_datalen = 0,
+ .instantiate = gss_kt_instantiate,
+ .update = gss_kt_update,
+ .match = gss_kt_match,
+ .destroy = gss_kt_destroy,
+ .describe = gss_kt_describe,
+};
+
+/****************************************
+ * lustre gss keyring policy *
+ ****************************************/
+
+static struct ptlrpc_ctx_ops gss_keyring_ctxops = {
+ .match = gss_cli_ctx_match,
+ .refresh = gss_cli_ctx_refresh_kr,
+ .validate = gss_cli_ctx_validate_kr,
+ .die = gss_cli_ctx_die_kr,
+ .sign = gss_cli_ctx_sign,
+ .verify = gss_cli_ctx_verify,
+ .seal = gss_cli_ctx_seal,
+ .unseal = gss_cli_ctx_unseal,
+ .wrap_bulk = gss_cli_ctx_wrap_bulk,
+ .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
+};
+
+static struct ptlrpc_sec_cops gss_sec_keyring_cops = {
+ .create_sec = gss_sec_create_kr,
+ .destroy_sec = gss_sec_destroy_kr,
+ .kill_sec = gss_sec_kill,
+ .lookup_ctx = gss_sec_lookup_ctx_kr,
+ .release_ctx = gss_sec_release_ctx_kr,
+ .flush_ctx_cache = gss_sec_flush_ctx_cache_kr,
+ .gc_ctx = gss_sec_gc_ctx_kr,
+ .install_rctx = gss_sec_install_rctx,
+ .alloc_reqbuf = gss_alloc_reqbuf,
+ .free_reqbuf = gss_free_reqbuf,
+ .alloc_repbuf = gss_alloc_repbuf,
+ .free_repbuf = gss_free_repbuf,
+ .enlarge_reqbuf = gss_enlarge_reqbuf,
+ .display = gss_sec_display_kr,
+};
+
+static struct ptlrpc_sec_sops gss_sec_keyring_sops = {
+ .accept = gss_svc_accept_kr,
+ .invalidate_ctx = gss_svc_invalidate_ctx,
+ .alloc_rs = gss_svc_alloc_rs,
+ .authorize = gss_svc_authorize,
+ .free_rs = gss_svc_free_rs,
+ .free_ctx = gss_svc_free_ctx,
+ .prep_bulk = gss_svc_prep_bulk,
+ .unwrap_bulk = gss_svc_unwrap_bulk,
+ .wrap_bulk = gss_svc_wrap_bulk,
+ .install_rctx = gss_svc_install_rctx_kr,
+};
+
+static struct ptlrpc_sec_policy gss_policy_keyring = {
+ .sp_owner = THIS_MODULE,
+ .sp_name = "gss.keyring",
+ .sp_policy = SPTLRPC_POLICY_GSS,
+ .sp_cops = &gss_sec_keyring_cops,
+ .sp_sops = &gss_sec_keyring_sops,
+};
+
+
+int __init gss_init_keyring(void)
+{
+ int rc;
+
+ rc = register_key_type(&gss_key_type);
+ if (rc) {
+ CERROR("failed to register keyring type: %d\n", rc);
+ return rc;
+ }
+
+ rc = sptlrpc_register_policy(&gss_policy_keyring);
+ if (rc) {
+ unregister_key_type(&gss_key_type);
+ return rc;
+ }
+
+ return 0;
+}
+
+void __exit gss_exit_keyring(void)
+{
+ unregister_key_type(&gss_key_type);
+ sptlrpc_unregister_policy(&gss_policy_keyring);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5.h
new file mode 100644
index 0000000..676d4b9
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5.h
@@ -0,0 +1,163 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * linux/include/linux/sunrpc/gss_krb5_types.h
+ *
+ * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
+ * lib/gssapi/krb5/gssapiP_krb5.h, and others
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@umich.edu>
+ * Bruce Fields <bfields@umich.edu>
+ */
+
+/*
+ * Copyright 1995 by the Massachusetts Institute of Technology.
+ * All Rights Reserved.
+ *
+ * Export of this software from the United States of America may
+ * require a specific license from the United States Government.
+ * It is the responsibility of any person or organization contemplating
+ * export to obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of M.I.T. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. Furthermore if you modify this software you must label
+ * your software as modified software and not distribute it in such a
+ * fashion that it might be confused with the original M.I.T. software.
+ * M.I.T. makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ */
+
+#ifndef PTLRPC_GSS_KRB5_H
+#define PTLRPC_GSS_KRB5_H
+
+/*
+ * RFC 4142
+ */
+
+#define KG_USAGE_ACCEPTOR_SEAL 22
+#define KG_USAGE_ACCEPTOR_SIGN 23
+#define KG_USAGE_INITIATOR_SEAL 24
+#define KG_USAGE_INITIATOR_SIGN 25
+
+#define KG_TOK_MIC_MSG 0x0404
+#define KG_TOK_WRAP_MSG 0x0504
+
+#define FLAG_SENDER_IS_ACCEPTOR 0x01
+#define FLAG_WRAP_CONFIDENTIAL 0x02
+#define FLAG_ACCEPTOR_SUBKEY 0x04
+
+struct krb5_header {
+ __u16 kh_tok_id; /* token id */
+ __u8 kh_flags; /* acceptor flags */
+ __u8 kh_filler; /* 0xff */
+ __u16 kh_ec; /* extra count */
+ __u16 kh_rrc; /* right rotation count */
+ __u64 kh_seq; /* sequence number */
+ __u8 kh_cksum[0]; /* checksum */
+};
+
+struct krb5_keyblock {
+ rawobj_t kb_key;
+ struct ll_crypto_cipher *kb_tfm;
+};
+
+struct krb5_ctx {
+ unsigned int kc_initiate:1,
+ kc_cfx:1,
+ kc_seed_init:1,
+ kc_have_acceptor_subkey:1;
+ __s32 kc_endtime;
+ __u8 kc_seed[16];
+ __u64 kc_seq_send;
+ __u64 kc_seq_recv;
+ __u32 kc_enctype;
+ struct krb5_keyblock kc_keye; /* encryption */
+ struct krb5_keyblock kc_keyi; /* integrity */
+ struct krb5_keyblock kc_keyc; /* checksum */
+ rawobj_t kc_mech_used;
+};
+
+enum sgn_alg {
+ SGN_ALG_DES_MAC_MD5 = 0x0000,
+ SGN_ALG_MD2_5 = 0x0001,
+ SGN_ALG_DES_MAC = 0x0002,
+ SGN_ALG_3 = 0x0003, /* not published */
+ SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; no support */
+ SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004
+};
+
+enum seal_alg {
+ SEAL_ALG_NONE = 0xffff,
+ SEAL_ALG_DES = 0x0000,
+ SEAL_ALG_1 = 0x0001, /* not published */
+ SEAL_ALG_MICROSOFT_RC4 = 0x0010, /* microsoft w2k; no support */
+ SEAL_ALG_DES3KD = 0x0002
+};
+
+#define CKSUMTYPE_CRC32 0x0001
+#define CKSUMTYPE_RSA_MD4 0x0002
+#define CKSUMTYPE_RSA_MD4_DES 0x0003
+#define CKSUMTYPE_DESCBC 0x0004
+/* des-mac-k */
+/* rsa-md4-des-k */
+#define CKSUMTYPE_RSA_MD5 0x0007
+#define CKSUMTYPE_RSA_MD5_DES 0x0008
+#define CKSUMTYPE_NIST_SHA 0x0009
+#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c
+#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
+#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
+#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138
+
+/* from gssapi_err_krb5.h */
+#define KG_CCACHE_NOMATCH (39756032L)
+#define KG_KEYTAB_NOMATCH (39756033L)
+#define KG_TGT_MISSING (39756034L)
+#define KG_NO_SUBKEY (39756035L)
+#define KG_CONTEXT_ESTABLISHED (39756036L)
+#define KG_BAD_SIGN_TYPE (39756037L)
+#define KG_BAD_LENGTH (39756038L)
+#define KG_CTX_INCOMPLETE (39756039L)
+#define KG_CONTEXT (39756040L)
+#define KG_CRED (39756041L)
+#define KG_ENC_DESC (39756042L)
+#define KG_BAD_SEQ (39756043L)
+#define KG_EMPTY_CCACHE (39756044L)
+#define KG_NO_CTYPES (39756045L)
+
+/* per Kerberos v5 protocol spec crypto types from the wire.
+ * these get mapped to linux kernel crypto routines.
+ */
+#define ENCTYPE_NULL 0x0000
+#define ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */
+#define ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */
+#define ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */
+#define ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */
+/* XXX deprecated? */
+#define ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */
+#define ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */
+#define ENCTYPE_DES_HMAC_SHA1 0x0008
+#define ENCTYPE_DES3_CBC_SHA1 0x0010
+#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
+#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
+#define ENCTYPE_ARCFOUR_HMAC 0x0017
+#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
+#define ENCTYPE_UNKNOWN 0x01ff
+
+#endif /* PTLRPC_GSS_KRB5_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
new file mode 100644
index 0000000..c106a9e
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
@@ -0,0 +1,1786 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * linux/net/sunrpc/gss_krb5_mech.c
+ * linux/net/sunrpc/gss_krb5_crypto.c
+ * linux/net/sunrpc/gss_krb5_seal.c
+ * linux/net/sunrpc/gss_krb5_seqnum.c
+ * linux/net/sunrpc/gss_krb5_unseal.c
+ *
+ * Copyright (c) 2001 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@umich.edu>
+ * J. Bruce Fields <bfields@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <linux/mutex.h>
+#include <linux/crypto.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_sec.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+#include "gss_asn1.h"
+#include "gss_krb5.h"
+
+static spinlock_t krb5_seq_lock;
+
+struct krb5_enctype {
+ char *ke_dispname;
+ char *ke_enc_name; /* linux tfm name */
+ char *ke_hash_name; /* linux tfm name */
+ int ke_enc_mode; /* linux tfm mode */
+ int ke_hash_size; /* checksum size */
+ int ke_conf_size; /* confounder size */
+ unsigned int ke_hash_hmac:1; /* is hmac? */
+};
+
+/*
+ * NOTE: for aes128-cts and aes256-cts, MIT implementation use CTS encryption.
+ * but currently we simply CBC with padding, because linux doesn't support CTS
+ * yet. this need to be fixed in the future.
+ */
+static struct krb5_enctype enctypes[] = {
+ [ENCTYPE_DES_CBC_RAW] = { /* des-cbc-md5 */
+ "des-cbc-md5",
+ "cbc(des)",
+ "md5",
+ 0,
+ 16,
+ 8,
+ 0,
+ },
+ [ENCTYPE_DES3_CBC_RAW] = { /* des3-hmac-sha1 */
+ "des3-hmac-sha1",
+ "cbc(des3_ede)",
+ "hmac(sha1)",
+ 0,
+ 20,
+ 8,
+ 1,
+ },
+ [ENCTYPE_AES128_CTS_HMAC_SHA1_96] = { /* aes128-cts */
+ "aes128-cts-hmac-sha1-96",
+ "cbc(aes)",
+ "hmac(sha1)",
+ 0,
+ 12,
+ 16,
+ 1,
+ },
+ [ENCTYPE_AES256_CTS_HMAC_SHA1_96] = { /* aes256-cts */
+ "aes256-cts-hmac-sha1-96",
+ "cbc(aes)",
+ "hmac(sha1)",
+ 0,
+ 12,
+ 16,
+ 1,
+ },
+ [ENCTYPE_ARCFOUR_HMAC] = { /* arcfour-hmac-md5 */
+ "arcfour-hmac-md5",
+ "ecb(arc4)",
+ "hmac(md5)",
+ 0,
+ 16,
+ 8,
+ 1,
+ },
+};
+
+#define MAX_ENCTYPES sizeof(enctypes)/sizeof(struct krb5_enctype)
+
+static const char * enctype2str(__u32 enctype)
+{
+ if (enctype < MAX_ENCTYPES && enctypes[enctype].ke_dispname)
+ return enctypes[enctype].ke_dispname;
+
+ return "unknown";
+}
+
+static
+int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
+{
+ kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+ if (IS_ERR(kb->kb_tfm)) {
+ CERROR("failed to alloc tfm: %s, mode %d\n",
+ alg_name, alg_mode);
+ return -1;
+ }
+
+ if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+ CERROR("failed to set %s key, len %d\n",
+ alg_name, kb->kb_key.len);
+ return -1;
+ }
+
+ return 0;
+}
+
+static
+int krb5_init_keys(struct krb5_ctx *kctx)
+{
+ struct krb5_enctype *ke;
+
+ if (kctx->kc_enctype >= MAX_ENCTYPES ||
+ enctypes[kctx->kc_enctype].ke_hash_size == 0) {
+ CERROR("unsupported enctype %x\n", kctx->kc_enctype);
+ return -1;
+ }
+
+ ke = &enctypes[kctx->kc_enctype];
+
+ /* tfm arc4 is stateful, user should alloc-use-free by his own */
+ if (kctx->kc_enctype != ENCTYPE_ARCFOUR_HMAC &&
+ keyblock_init(&kctx->kc_keye, ke->ke_enc_name, ke->ke_enc_mode))
+ return -1;
+
+ /* tfm hmac is stateful, user should alloc-use-free by his own */
+ if (ke->ke_hash_hmac == 0 &&
+ keyblock_init(&kctx->kc_keyi, ke->ke_enc_name, ke->ke_enc_mode))
+ return -1;
+ if (ke->ke_hash_hmac == 0 &&
+ keyblock_init(&kctx->kc_keyc, ke->ke_enc_name, ke->ke_enc_mode))
+ return -1;
+
+ return 0;
+}
+
+static
+void keyblock_free(struct krb5_keyblock *kb)
+{
+ rawobj_free(&kb->kb_key);
+ if (kb->kb_tfm)
+ crypto_free_blkcipher(kb->kb_tfm);
+}
+
+static
+int keyblock_dup(struct krb5_keyblock *new, struct krb5_keyblock *kb)
+{
+ return rawobj_dup(&new->kb_key, &kb->kb_key);
+}
+
+static
+int get_bytes(char **ptr, const char *end, void *res, int len)
+{
+ char *p, *q;
+ p = *ptr;
+ q = p + len;
+ if (q > end || q < p)
+ return -1;
+ memcpy(res, p, len);
+ *ptr = q;
+ return 0;
+}
+
+static
+int get_rawobj(char **ptr, const char *end, rawobj_t *res)
+{
+ char *p, *q;
+ __u32 len;
+
+ p = *ptr;
+ if (get_bytes(&p, end, &len, sizeof(len)))
+ return -1;
+
+ q = p + len;
+ if (q > end || q < p)
+ return -1;
+
+ OBD_ALLOC_LARGE(res->data, len);
+ if (!res->data)
+ return -1;
+
+ res->len = len;
+ memcpy(res->data, p, len);
+ *ptr = q;
+ return 0;
+}
+
+static
+int get_keyblock(char **ptr, const char *end,
+ struct krb5_keyblock *kb, __u32 keysize)
+{
+ char *buf;
+
+ OBD_ALLOC_LARGE(buf, keysize);
+ if (buf == NULL)
+ return -1;
+
+ if (get_bytes(ptr, end, buf, keysize)) {
+ OBD_FREE_LARGE(buf, keysize);
+ return -1;
+ }
+
+ kb->kb_key.len = keysize;
+ kb->kb_key.data = buf;
+ return 0;
+}
+
+static
+void delete_context_kerberos(struct krb5_ctx *kctx)
+{
+ rawobj_free(&kctx->kc_mech_used);
+
+ keyblock_free(&kctx->kc_keye);
+ keyblock_free(&kctx->kc_keyi);
+ keyblock_free(&kctx->kc_keyc);
+}
+
+static
+__u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
+{
+ unsigned int tmp_uint, keysize;
+
+ /* seed_init flag */
+ if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+ goto out_err;
+ kctx->kc_seed_init = (tmp_uint != 0);
+
+ /* seed */
+ if (get_bytes(&p, end, kctx->kc_seed, sizeof(kctx->kc_seed)))
+ goto out_err;
+
+ /* sign/seal algorithm, not really used now */
+ if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
+ get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+ goto out_err;
+
+ /* end time */
+ if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
+ goto out_err;
+
+ /* seq send */
+ if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+ goto out_err;
+ kctx->kc_seq_send = tmp_uint;
+
+ /* mech oid */
+ if (get_rawobj(&p, end, &kctx->kc_mech_used))
+ goto out_err;
+
+ /* old style enc/seq keys in format:
+ * - enctype (u32)
+ * - keysize (u32)
+ * - keydata
+ * we decompose them to fit into the new context
+ */
+
+ /* enc key */
+ if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
+ goto out_err;
+
+ if (get_bytes(&p, end, &keysize, sizeof(keysize)))
+ goto out_err;
+
+ if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
+ goto out_err;
+
+ /* seq key */
+ if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
+ tmp_uint != kctx->kc_enctype)
+ goto out_err;
+
+ if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)) ||
+ tmp_uint != keysize)
+ goto out_err;
+
+ if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
+ goto out_err;
+
+ /* old style fallback */
+ if (keyblock_dup(&kctx->kc_keyi, &kctx->kc_keyc))
+ goto out_err;
+
+ if (p != end)
+ goto out_err;
+
+ CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
+ return 0;
+out_err:
+ return GSS_S_FAILURE;
+}
+
+/* Flags for version 2 context flags */
+#define KRB5_CTX_FLAG_INITIATOR 0x00000001
+#define KRB5_CTX_FLAG_CFX 0x00000002
+#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
+
+static
+__u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
+{
+ unsigned int tmp_uint, keysize;
+
+ /* end time */
+ if (get_bytes(&p, end, &kctx->kc_endtime, sizeof(kctx->kc_endtime)))
+ goto out_err;
+
+ /* flags */
+ if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+ goto out_err;
+
+ if (tmp_uint & KRB5_CTX_FLAG_INITIATOR)
+ kctx->kc_initiate = 1;
+ if (tmp_uint & KRB5_CTX_FLAG_CFX)
+ kctx->kc_cfx = 1;
+ if (tmp_uint & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
+ kctx->kc_have_acceptor_subkey = 1;
+
+ /* seq send */
+ if (get_bytes(&p, end, &kctx->kc_seq_send, sizeof(kctx->kc_seq_send)))
+ goto out_err;
+
+ /* enctype */
+ if (get_bytes(&p, end, &kctx->kc_enctype, sizeof(kctx->kc_enctype)))
+ goto out_err;
+
+ /* size of each key */
+ if (get_bytes(&p, end, &keysize, sizeof(keysize)))
+ goto out_err;
+
+ /* number of keys - should always be 3 */
+ if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint)))
+ goto out_err;
+
+ if (tmp_uint != 3) {
+ CERROR("Invalid number of keys: %u\n", tmp_uint);
+ goto out_err;
+ }
+
+ /* ke */
+ if (get_keyblock(&p, end, &kctx->kc_keye, keysize))
+ goto out_err;
+ /* ki */
+ if (get_keyblock(&p, end, &kctx->kc_keyi, keysize))
+ goto out_err;
+ /* ki */
+ if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
+ goto out_err;
+
+ CDEBUG(D_SEC, "successfully imported v2 context\n");
+ return 0;
+out_err:
+ return GSS_S_FAILURE;
+}
+
+/*
+ * The whole purpose here is trying to keep user level gss context parsing
+ * from nfs-utils unchanged as possible as we can, they are not quite mature
+ * yet, and many stuff still not clear, like heimdal etc.
+ */
+static
+__u32 gss_import_sec_context_kerberos(rawobj_t *inbuf,
+ struct gss_ctx *gctx)
+{
+ struct krb5_ctx *kctx;
+ char *p = (char *) inbuf->data;
+ char *end = (char *) (inbuf->data + inbuf->len);
+ unsigned int tmp_uint, rc;
+
+ if (get_bytes(&p, end, &tmp_uint, sizeof(tmp_uint))) {
+ CERROR("Fail to read version\n");
+ return GSS_S_FAILURE;
+ }
+
+ /* only support 0, 1 for the moment */
+ if (tmp_uint > 2) {
+ CERROR("Invalid version %u\n", tmp_uint);
+ return GSS_S_FAILURE;
+ }
+
+ OBD_ALLOC_PTR(kctx);
+ if (!kctx)
+ return GSS_S_FAILURE;
+
+ if (tmp_uint == 0 || tmp_uint == 1) {
+ kctx->kc_initiate = tmp_uint;
+ rc = import_context_rfc1964(kctx, p, end);
+ } else {
+ rc = import_context_rfc4121(kctx, p, end);
+ }
+
+ if (rc == 0)
+ rc = krb5_init_keys(kctx);
+
+ if (rc) {
+ delete_context_kerberos(kctx);
+ OBD_FREE_PTR(kctx);
+
+ return GSS_S_FAILURE;
+ }
+
+ gctx->internal_ctx_id = kctx;
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
+ struct gss_ctx *gctx_new)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_ctx *knew;
+
+ OBD_ALLOC_PTR(knew);
+ if (!knew)
+ return GSS_S_FAILURE;
+
+ knew->kc_initiate = kctx->kc_initiate ? 0 : 1;
+ knew->kc_cfx = kctx->kc_cfx;
+ knew->kc_seed_init = kctx->kc_seed_init;
+ knew->kc_have_acceptor_subkey = kctx->kc_have_acceptor_subkey;
+ knew->kc_endtime = kctx->kc_endtime;
+
+ memcpy(knew->kc_seed, kctx->kc_seed, sizeof(kctx->kc_seed));
+ knew->kc_seq_send = kctx->kc_seq_recv;
+ knew->kc_seq_recv = kctx->kc_seq_send;
+ knew->kc_enctype = kctx->kc_enctype;
+
+ if (rawobj_dup(&knew->kc_mech_used, &kctx->kc_mech_used))
+ goto out_err;
+
+ if (keyblock_dup(&knew->kc_keye, &kctx->kc_keye))
+ goto out_err;
+ if (keyblock_dup(&knew->kc_keyi, &kctx->kc_keyi))
+ goto out_err;
+ if (keyblock_dup(&knew->kc_keyc, &kctx->kc_keyc))
+ goto out_err;
+ if (krb5_init_keys(knew))
+ goto out_err;
+
+ gctx_new->internal_ctx_id = knew;
+ CDEBUG(D_SEC, "successfully copied reverse context\n");
+ return GSS_S_COMPLETE;
+
+out_err:
+ delete_context_kerberos(knew);
+ OBD_FREE_PTR(knew);
+ return GSS_S_FAILURE;
+}
+
+static
+__u32 gss_inquire_context_kerberos(struct gss_ctx *gctx,
+ unsigned long *endtime)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+
+ *endtime = (unsigned long) ((__u32) kctx->kc_endtime);
+ return GSS_S_COMPLETE;
+}
+
+static
+void gss_delete_sec_context_kerberos(void *internal_ctx)
+{
+ struct krb5_ctx *kctx = internal_ctx;
+
+ delete_context_kerberos(kctx);
+ OBD_FREE_PTR(kctx);
+}
+
+static
+void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
+{
+ sg_set_buf(sg, ptr, len);
+}
+
+static
+__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
+ int decrypt,
+ void * iv,
+ void * in,
+ void * out,
+ int length)
+{
+ struct blkcipher_desc desc;
+ struct scatterlist sg;
+ __u8 local_iv[16] = {0};
+ __u32 ret = -EINVAL;
+
+ LASSERT(tfm);
+ desc.tfm = tfm;
+ desc.info = local_iv;
+ desc.flags= 0;
+
+ if (length % crypto_blkcipher_blocksize(tfm) != 0) {
+ CERROR("output length %d mismatch blocksize %d\n",
+ length, crypto_blkcipher_blocksize(tfm));
+ goto out;
+ }
+
+ if (crypto_blkcipher_ivsize(tfm) > 16) {
+ CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
+ goto out;
+ }
+
+ if (iv)
+ memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
+
+ memcpy(out, in, length);
+ buf_to_sg(&sg, out, length);
+
+ if (decrypt)
+ ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+ else
+ ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+
+out:
+ return(ret);
+}
+
+
+static inline
+int krb5_digest_hmac(struct crypto_hash *tfm,
+ rawobj_t *key,
+ struct krb5_header *khdr,
+ int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
+ rawobj_t *cksum)
+{
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ int i;
+
+ crypto_hash_setkey(tfm, key->data, key->len);
+ desc.tfm = tfm;
+ desc.flags= 0;
+
+ crypto_hash_init(&desc);
+
+ for (i = 0; i < msgcnt; i++) {
+ if (msgs[i].len == 0)
+ continue;
+ buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
+ }
+
+ for (i = 0; i < iovcnt; i++) {
+ if (iovs[i].kiov_len == 0)
+ continue;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ }
+
+ if (khdr) {
+ buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
+ }
+
+ return crypto_hash_final(&desc, cksum->data);
+}
+
+
+static inline
+int krb5_digest_norm(struct crypto_hash *tfm,
+ struct krb5_keyblock *kb,
+ struct krb5_header *khdr,
+ int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
+ rawobj_t *cksum)
+{
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ int i;
+
+ LASSERT(kb->kb_tfm);
+ desc.tfm = tfm;
+ desc.flags= 0;
+
+ crypto_hash_init(&desc);
+
+ for (i = 0; i < msgcnt; i++) {
+ if (msgs[i].len == 0)
+ continue;
+ buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
+ }
+
+ for (i = 0; i < iovcnt; i++) {
+ if (iovs[i].kiov_len == 0)
+ continue;
+
+ sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
+ iovs[i].kiov_offset);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ }
+
+ if (khdr) {
+ buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
+ }
+
+ crypto_hash_final(&desc, cksum->data);
+
+ return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
+ cksum->data, cksum->len);
+}
+
+/*
+ * compute (keyed/keyless) checksum against the plain text which appended
+ * with krb5 wire token header.
+ */
+static
+__s32 krb5_make_checksum(__u32 enctype,
+ struct krb5_keyblock *kb,
+ struct krb5_header *khdr,
+ int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
+ rawobj_t *cksum)
+{
+ struct krb5_enctype *ke = &enctypes[enctype];
+ struct crypto_hash *tfm;
+ __u32 code = GSS_S_FAILURE;
+ int rc;
+
+ if (!(tfm = ll_crypto_alloc_hash(ke->ke_hash_name, 0, 0))) {
+ CERROR("failed to alloc TFM: %s\n", ke->ke_hash_name);
+ return GSS_S_FAILURE;
+ }
+
+ cksum->len = crypto_hash_digestsize(tfm);
+ OBD_ALLOC_LARGE(cksum->data, cksum->len);
+ if (!cksum->data) {
+ cksum->len = 0;
+ goto out_tfm;
+ }
+
+ if (ke->ke_hash_hmac)
+ rc = krb5_digest_hmac(tfm, &kb->kb_key,
+ khdr, msgcnt, msgs, iovcnt, iovs, cksum);
+ else
+ rc = krb5_digest_norm(tfm, kb,
+ khdr, msgcnt, msgs, iovcnt, iovs, cksum);
+
+ if (rc == 0)
+ code = GSS_S_COMPLETE;
+out_tfm:
+ crypto_free_hash(tfm);
+ return code;
+}
+
+static void fill_krb5_header(struct krb5_ctx *kctx,
+ struct krb5_header *khdr,
+ int privacy)
+{
+ unsigned char acceptor_flag;
+
+ acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
+
+ if (privacy) {
+ khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
+ khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
+ khdr->kh_ec = cpu_to_be16(0);
+ khdr->kh_rrc = cpu_to_be16(0);
+ } else {
+ khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
+ khdr->kh_flags = acceptor_flag;
+ khdr->kh_ec = cpu_to_be16(0xffff);
+ khdr->kh_rrc = cpu_to_be16(0xffff);
+ }
+
+ khdr->kh_filler = 0xff;
+ spin_lock(&krb5_seq_lock);
+ khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+ spin_unlock(&krb5_seq_lock);
+}
+
+static __u32 verify_krb5_header(struct krb5_ctx *kctx,
+ struct krb5_header *khdr,
+ int privacy)
+{
+ unsigned char acceptor_flag;
+ __u16 tok_id, ec_rrc;
+
+ acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
+
+ if (privacy) {
+ tok_id = KG_TOK_WRAP_MSG;
+ ec_rrc = 0x0;
+ } else {
+ tok_id = KG_TOK_MIC_MSG;
+ ec_rrc = 0xffff;
+ }
+
+ /* sanity checks */
+ if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
+ CERROR("bad token id\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
+ CERROR("bad direction flag\n");
+ return GSS_S_BAD_SIG;
+ }
+ if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
+ CERROR("missing confidential flag\n");
+ return GSS_S_BAD_SIG;
+ }
+ if (khdr->kh_filler != 0xff) {
+ CERROR("bad filler\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
+ be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
+ CERROR("bad EC or RRC\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
+ int msgcnt,
+ rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
+ rawobj_t *token)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+
+ /* fill krb5 header */
+ LASSERT(token->len >= sizeof(*khdr));
+ khdr = (struct krb5_header *) token->data;
+ fill_krb5_header(kctx, khdr, 0);
+
+ /* checksum */
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
+ khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
+ return GSS_S_FAILURE;
+
+ LASSERT(cksum.len >= ke->ke_hash_size);
+ LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
+ memcpy(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size);
+
+ token->len = sizeof(*khdr) + ke->ke_hash_size;
+ rawobj_free(&cksum);
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
+ int msgcnt,
+ rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
+ rawobj_t *token)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ __u32 major;
+
+ if (token->len < sizeof(*khdr)) {
+ CERROR("short signature: %u\n", token->len);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ khdr = (struct krb5_header *) token->data;
+
+ major = verify_krb5_header(kctx, khdr, 0);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("bad krb5 header\n");
+ return major;
+ }
+
+ if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
+ CERROR("short signature: %u, require %d\n",
+ token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
+ return GSS_S_FAILURE;
+ }
+
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
+ khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
+ CERROR("failed to make checksum\n");
+ return GSS_S_FAILURE;
+ }
+
+ LASSERT(cksum.len >= ke->ke_hash_size);
+ if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size)) {
+ CERROR("checksum mismatch\n");
+ rawobj_free(&cksum);
+ return GSS_S_BAD_SIG;
+ }
+
+ rawobj_free(&cksum);
+ return GSS_S_COMPLETE;
+}
+
+static
+int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
+{
+ int padding;
+
+ padding = (blocksize - (msg->len & (blocksize - 1))) &
+ (blocksize - 1);
+ if (!padding)
+ return 0;
+
+ if (msg->len + padding > msg_buflen) {
+ CERROR("bufsize %u too small: datalen %u, padding %u\n",
+ msg_buflen, msg->len, padding);
+ return -EINVAL;
+ }
+
+ memset(msg->data + msg->len, padding, padding);
+ msg->len += padding;
+ return 0;
+}
+
+static
+int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
+ int mode_ecb,
+ int inobj_cnt,
+ rawobj_t *inobjs,
+ rawobj_t *outobj,
+ int enc)
+{
+ struct blkcipher_desc desc;
+ struct scatterlist src, dst;
+ __u8 local_iv[16] = {0}, *buf;
+ __u32 datalen = 0;
+ int i, rc;
+
+ buf = outobj->data;
+ desc.tfm = tfm;
+ desc.info = local_iv;
+ desc.flags = 0;
+
+ for (i = 0; i < inobj_cnt; i++) {
+ LASSERT(buf + inobjs[i].len <= outobj->data + outobj->len);
+
+ buf_to_sg(&src, inobjs[i].data, inobjs[i].len);
+ buf_to_sg(&dst, buf, outobj->len - datalen);
+
+ if (mode_ecb) {
+ if (enc)
+ rc = crypto_blkcipher_encrypt(
+ &desc, &dst, &src, src.length);
+ else
+ rc = crypto_blkcipher_decrypt(
+ &desc, &dst, &src, src.length);
+ } else {
+ if (enc)
+ rc = crypto_blkcipher_encrypt_iv(
+ &desc, &dst, &src, src.length);
+ else
+ rc = crypto_blkcipher_decrypt_iv(
+ &desc, &dst, &src, src.length);
+ }
+
+ if (rc) {
+ CERROR("encrypt error %d\n", rc);
+ return rc;
+ }
+
+ datalen += inobjs[i].len;
+ buf += inobjs[i].len;
+ }
+
+ outobj->len = datalen;
+ return 0;
+}
+
+/*
+ * if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
+ */
+static
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
+ struct krb5_header *khdr,
+ char *confounder,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ int adj_nob)
+{
+ struct blkcipher_desc ciph_desc;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ int blocksize, i, rc, nob = 0;
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_iov);
+
+ blocksize = crypto_blkcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
+
+ ciph_desc.tfm = tfm;
+ ciph_desc.info = local_iv;
+ ciph_desc.flags = 0;
+
+ /* encrypt confounder */
+ buf_to_sg(&src, confounder, blocksize);
+ buf_to_sg(&dst, cipher->data, blocksize);
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ if (rc) {
+ CERROR("error to encrypt confounder: %d\n", rc);
+ return rc;
+ }
+
+ /* encrypt clear pages */
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ sg_set_page(&src, desc->bd_iov[i].kiov_page,
+ (desc->bd_iov[i].kiov_len + blocksize - 1) &
+ (~(blocksize - 1)),
+ desc->bd_iov[i].kiov_offset);
+ if (adj_nob)
+ nob += src.length;
+ sg_set_page(&dst, desc->bd_enc_iov[i].kiov_page, src.length,
+ src.offset);
+
+ desc->bd_enc_iov[i].kiov_offset = dst.offset;
+ desc->bd_enc_iov[i].kiov_len = dst.length;
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ src.length);
+ if (rc) {
+ CERROR("error to encrypt page: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* encrypt krb5 header */
+ buf_to_sg(&src, khdr, sizeof(*khdr));
+ buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc,
+ &dst, &src, sizeof(*khdr));
+ if (rc) {
+ CERROR("error to encrypt krb5 header: %d\n", rc);
+ return rc;
+ }
+
+ if (adj_nob)
+ desc->bd_nob = nob;
+
+ return 0;
+}
+
+/*
+ * desc->bd_nob_transferred is the size of cipher text received.
+ * desc->bd_nob is the target size of plain text supposed to be.
+ *
+ * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * plain text size.
+ * - for client read: we don't know data size for each page, so
+ * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ * be smaller, so we need to adjust it according to bd_enc_iov[]->kiov_len.
+ * this means we DO NOT support the situation that server send an odd size
+ * data in a page which is not the last one.
+ * - for server write: we knows exactly data size for each page being expected,
+ * thus kiov_len is accurate already, so we should not adjust it at all.
+ * and bd_enc_iov[]->kiov_len should be round_up(bd_iov[]->kiov_len) which
+ * should have been done by prep_bulk().
+ */
+static
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
+ struct krb5_header *khdr,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ rawobj_t *plain,
+ int adj_nob)
+{
+ struct blkcipher_desc ciph_desc;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ int ct_nob = 0, pt_nob = 0;
+ int blocksize, i, rc;
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_iov);
+ LASSERT(desc->bd_nob_transferred);
+
+ blocksize = crypto_blkcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
+
+ ciph_desc.tfm = tfm;
+ ciph_desc.info = local_iv;
+ ciph_desc.flags = 0;
+
+ if (desc->bd_nob_transferred % blocksize) {
+ CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
+ return -EPROTO;
+ }
+
+ /* decrypt head (confounder) */
+ buf_to_sg(&src, cipher->data, blocksize);
+ buf_to_sg(&dst, plain->data, blocksize);
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ if (rc) {
+ CERROR("error to decrypt confounder: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
+ i++) {
+ if (desc->bd_enc_iov[i].kiov_offset % blocksize != 0 ||
+ desc->bd_enc_iov[i].kiov_len % blocksize != 0) {
+ CERROR("page %d: odd offset %u len %u, blocksize %d\n",
+ i, desc->bd_enc_iov[i].kiov_offset,
+ desc->bd_enc_iov[i].kiov_len, blocksize);
+ return -EFAULT;
+ }
+
+ if (adj_nob) {
+ if (ct_nob + desc->bd_enc_iov[i].kiov_len >
+ desc->bd_nob_transferred)
+ desc->bd_enc_iov[i].kiov_len =
+ desc->bd_nob_transferred - ct_nob;
+
+ desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
+ if (pt_nob + desc->bd_enc_iov[i].kiov_len >desc->bd_nob)
+ desc->bd_iov[i].kiov_len = desc->bd_nob -pt_nob;
+ } else {
+ /* this should be guaranteed by LNET */
+ LASSERT(ct_nob + desc->bd_enc_iov[i].kiov_len <=
+ desc->bd_nob_transferred);
+ LASSERT(desc->bd_iov[i].kiov_len <=
+ desc->bd_enc_iov[i].kiov_len);
+ }
+
+ if (desc->bd_enc_iov[i].kiov_len == 0)
+ continue;
+
+ sg_set_page(&src, desc->bd_enc_iov[i].kiov_page,
+ desc->bd_enc_iov[i].kiov_len,
+ desc->bd_enc_iov[i].kiov_offset);
+ dst = src;
+ if (desc->bd_iov[i].kiov_len % blocksize == 0)
+ sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ src.length);
+ if (rc) {
+ CERROR("error to decrypt page: %d\n", rc);
+ return rc;
+ }
+
+ if (desc->bd_iov[i].kiov_len % blocksize != 0) {
+ memcpy(page_address(desc->bd_iov[i].kiov_page) +
+ desc->bd_iov[i].kiov_offset,
+ page_address(desc->bd_enc_iov[i].kiov_page) +
+ desc->bd_iov[i].kiov_offset,
+ desc->bd_iov[i].kiov_len);
+ }
+
+ ct_nob += desc->bd_enc_iov[i].kiov_len;
+ pt_nob += desc->bd_iov[i].kiov_len;
+ }
+
+ if (unlikely(ct_nob != desc->bd_nob_transferred)) {
+ CERROR("%d cipher text transferred but only %d decrypted\n",
+ desc->bd_nob_transferred, ct_nob);
+ return -EFAULT;
+ }
+
+ if (unlikely(!adj_nob && pt_nob != desc->bd_nob)) {
+ CERROR("%d plain text expected but only %d received\n",
+ desc->bd_nob, pt_nob);
+ return -EFAULT;
+ }
+
+ /* if needed, clear up the rest unused iovs */
+ if (adj_nob)
+ while (i < desc->bd_iov_count)
+ desc->bd_iov[i++].kiov_len = 0;
+
+ /* decrypt tail (krb5 header) */
+ buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
+ buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc,
+ &dst, &src, sizeof(*khdr));
+ if (rc) {
+ CERROR("error to decrypt tail: %d\n", rc);
+ return rc;
+ }
+
+ if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
+ CERROR("krb5 header doesn't match\n");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static
+__u32 gss_wrap_kerberos(struct gss_ctx *gctx,
+ rawobj_t *gsshdr,
+ rawobj_t *msg,
+ int msg_buflen,
+ rawobj_t *token)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ int blocksize;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ rawobj_t data_desc[3], cipher;
+ __u8 conf[GSS_MAX_CIPHER_BLOCK];
+ int rc = 0;
+
+ LASSERT(ke);
+ LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
+ LASSERT(kctx->kc_keye.kb_tfm == NULL ||
+ ke->ke_conf_size >=
+ crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+
+ /*
+ * final token format:
+ * ---------------------------------------------------
+ * | krb5 header | cipher text | checksum (16 bytes) |
+ * ---------------------------------------------------
+ */
+
+ /* fill krb5 header */
+ LASSERT(token->len >= sizeof(*khdr));
+ khdr = (struct krb5_header *) token->data;
+ fill_krb5_header(kctx, khdr, 1);
+
+ /* generate confounder */
+ cfs_get_random_bytes(conf, ke->ke_conf_size);
+
+ /* get encryption blocksize. note kc_keye might not associated with
+ * a tfm, currently only for arcfour-hmac */
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LASSERT(kctx->kc_keye.kb_tfm == NULL);
+ blocksize = 1;
+ } else {
+ LASSERT(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ }
+ LASSERT(blocksize <= ke->ke_conf_size);
+
+ /* padding the message */
+ if (add_padding(msg, msg_buflen, blocksize))
+ return GSS_S_FAILURE;
+
+ /*
+ * clear text layout for checksum:
+ * ------------------------------------------------------
+ * | confounder | gss header | clear msgs | krb5 header |
+ * ------------------------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+ data_desc[1].data = gsshdr->data;
+ data_desc[1].len = gsshdr->len;
+ data_desc[2].data = msg->data;
+ data_desc[2].len = msg->len;
+
+ /* compute checksum */
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 3, data_desc, 0, NULL, &cksum))
+ return GSS_S_FAILURE;
+ LASSERT(cksum.len >= ke->ke_hash_size);
+
+ /*
+ * clear text layout for encryption:
+ * -----------------------------------------
+ * | confounder | clear msgs | krb5 header |
+ * -----------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+ data_desc[1].data = msg->data;
+ data_desc[1].len = msg->len;
+ data_desc[2].data = (__u8 *) khdr;
+ data_desc[2].len = sizeof(*khdr);
+
+ /* cipher text will be directly inplace */
+ cipher.data = (__u8 *) (khdr + 1);
+ cipher.len = token->len - sizeof(*khdr);
+ LASSERT(cipher.len >= ke->ke_conf_size + msg->len + sizeof(*khdr));
+
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ rawobj_t arc4_keye;
+ struct crypto_blkcipher *arc4_tfm;
+
+ if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+ NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+ CERROR("failed to obtain arc4 enc key\n");
+ GOTO(arc4_out, rc = -EACCES);
+ }
+
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ if (IS_ERR(arc4_tfm)) {
+ CERROR("failed to alloc tfm arc4 in ECB mode\n");
+ GOTO(arc4_out_key, rc = -EACCES);
+ }
+
+ if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+ arc4_keye.len)) {
+ CERROR("failed to set arc4 key, len %d\n",
+ arc4_keye.len);
+ GOTO(arc4_out_tfm, rc = -EACCES);
+ }
+
+ rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
+ 3, data_desc, &cipher, 1);
+arc4_out_tfm:
+ crypto_free_blkcipher(arc4_tfm);
+arc4_out_key:
+ rawobj_free(&arc4_keye);
+arc4_out:
+ do {} while(0); /* just to avoid compile warning */
+ } else {
+ rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
+ 3, data_desc, &cipher, 1);
+ }
+
+ if (rc != 0) {
+ rawobj_free(&cksum);
+ return GSS_S_FAILURE;
+ }
+
+ /* fill in checksum */
+ LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
+ memcpy((char *)(khdr + 1) + cipher.len,
+ cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size);
+ rawobj_free(&cksum);
+
+ /* final token length */
+ token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ int blocksize, i;
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_iov);
+ LASSERT(kctx->kc_keye.kb_tfm);
+
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ LASSERT(desc->bd_enc_iov[i].kiov_page);
+ /*
+ * offset should always start at page boundary of either
+ * client or server side.
+ */
+ if (desc->bd_iov[i].kiov_offset & blocksize) {
+ CERROR("odd offset %d in page %d\n",
+ desc->bd_iov[i].kiov_offset, i);
+ return GSS_S_FAILURE;
+ }
+
+ desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
+ desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
+ blocksize - 1) & (~(blocksize - 1));
+ }
+
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token, int adj_nob)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ int blocksize;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ rawobj_t data_desc[1], cipher;
+ __u8 conf[GSS_MAX_CIPHER_BLOCK];
+ int rc = 0;
+
+ LASSERT(ke);
+ LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
+
+ /*
+ * final token format:
+ * --------------------------------------------------
+ * | krb5 header | head/tail cipher text | checksum |
+ * --------------------------------------------------
+ */
+
+ /* fill krb5 header */
+ LASSERT(token->len >= sizeof(*khdr));
+ khdr = (struct krb5_header *) token->data;
+ fill_krb5_header(kctx, khdr, 1);
+
+ /* generate confounder */
+ cfs_get_random_bytes(conf, ke->ke_conf_size);
+
+ /* get encryption blocksize. note kc_keye might not associated with
+ * a tfm, currently only for arcfour-hmac */
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LASSERT(kctx->kc_keye.kb_tfm == NULL);
+ blocksize = 1;
+ } else {
+ LASSERT(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ }
+
+ /*
+ * we assume the size of krb5_header (16 bytes) must be n * blocksize.
+ * the bulk token size would be exactly (sizeof(krb5_header) +
+ * blocksize + sizeof(krb5_header) + hashsize)
+ */
+ LASSERT(blocksize <= ke->ke_conf_size);
+ LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+ LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
+
+ /*
+ * clear text layout for checksum:
+ * ------------------------------------------
+ * | confounder | clear pages | krb5 header |
+ * ------------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+
+ /* compute checksum */
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 1, data_desc,
+ desc->bd_iov_count, desc->bd_iov,
+ &cksum))
+ return GSS_S_FAILURE;
+ LASSERT(cksum.len >= ke->ke_hash_size);
+
+ /*
+ * clear text layout for encryption:
+ * ------------------------------------------
+ * | confounder | clear pages | krb5 header |
+ * ------------------------------------------
+ * | | |
+ * ---------- (cipher pages) |
+ * result token: | |
+ * -------------------------------------------
+ * | krb5 header | cipher text | cipher text |
+ * -------------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+
+ cipher.data = (__u8 *) (khdr + 1);
+ cipher.len = blocksize + sizeof(*khdr);
+
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LBUG();
+ rc = 0;
+ } else {
+ rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
+ conf, desc, &cipher, adj_nob);
+ }
+
+ if (rc != 0) {
+ rawobj_free(&cksum);
+ return GSS_S_FAILURE;
+ }
+
+ /* fill in checksum */
+ LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
+ memcpy((char *)(khdr + 1) + cipher.len,
+ cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size);
+ rawobj_free(&cksum);
+
+ /* final token length */
+ token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
+ rawobj_t *gsshdr,
+ rawobj_t *token,
+ rawobj_t *msg)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ unsigned char *tmpbuf;
+ int blocksize, bodysize;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ rawobj_t cipher_in, plain_out;
+ rawobj_t hash_objs[3];
+ int rc = 0;
+ __u32 major;
+
+ LASSERT(ke);
+
+ if (token->len < sizeof(*khdr)) {
+ CERROR("short signature: %u\n", token->len);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ khdr = (struct krb5_header *) token->data;
+
+ major = verify_krb5_header(kctx, khdr, 1);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("bad krb5 header\n");
+ return major;
+ }
+
+ /* block size */
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LASSERT(kctx->kc_keye.kb_tfm == NULL);
+ blocksize = 1;
+ } else {
+ LASSERT(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ }
+
+ /* expected token layout:
+ * ----------------------------------------
+ * | krb5 header | cipher text | checksum |
+ * ----------------------------------------
+ */
+ bodysize = token->len - sizeof(*khdr) - ke->ke_hash_size;
+
+ if (bodysize % blocksize) {
+ CERROR("odd bodysize %d\n", bodysize);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (bodysize <= ke->ke_conf_size + sizeof(*khdr)) {
+ CERROR("incomplete token: bodysize %d\n", bodysize);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ if (msg->len < bodysize - ke->ke_conf_size - sizeof(*khdr)) {
+ CERROR("buffer too small: %u, require %d\n",
+ msg->len, bodysize - ke->ke_conf_size);
+ return GSS_S_FAILURE;
+ }
+
+ /* decrypting */
+ OBD_ALLOC_LARGE(tmpbuf, bodysize);
+ if (!tmpbuf)
+ return GSS_S_FAILURE;
+
+ major = GSS_S_FAILURE;
+
+ cipher_in.data = (__u8 *) (khdr + 1);
+ cipher_in.len = bodysize;
+ plain_out.data = tmpbuf;
+ plain_out.len = bodysize;
+
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ rawobj_t arc4_keye;
+ struct crypto_blkcipher *arc4_tfm;
+
+ cksum.data = token->data + token->len - ke->ke_hash_size;
+ cksum.len = ke->ke_hash_size;
+
+ if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
+ NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
+ CERROR("failed to obtain arc4 enc key\n");
+ GOTO(arc4_out, rc = -EACCES);
+ }
+
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ if (IS_ERR(arc4_tfm)) {
+ CERROR("failed to alloc tfm arc4 in ECB mode\n");
+ GOTO(arc4_out_key, rc = -EACCES);
+ }
+
+ if (crypto_blkcipher_setkey(arc4_tfm,
+ arc4_keye.data, arc4_keye.len)) {
+ CERROR("failed to set arc4 key, len %d\n",
+ arc4_keye.len);
+ GOTO(arc4_out_tfm, rc = -EACCES);
+ }
+
+ rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
+ 1, &cipher_in, &plain_out, 0);
+arc4_out_tfm:
+ crypto_free_blkcipher(arc4_tfm);
+arc4_out_key:
+ rawobj_free(&arc4_keye);
+arc4_out:
+ cksum = RAWOBJ_EMPTY;
+ } else {
+ rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
+ 1, &cipher_in, &plain_out, 0);
+ }
+
+ if (rc != 0) {
+ CERROR("error decrypt\n");
+ goto out_free;
+ }
+ LASSERT(plain_out.len == bodysize);
+
+ /* expected clear text layout:
+ * -----------------------------------------
+ * | confounder | clear msgs | krb5 header |
+ * -----------------------------------------
+ */
+
+ /* verify krb5 header in token is not modified */
+ if (memcmp(khdr, plain_out.data + plain_out.len - sizeof(*khdr),
+ sizeof(*khdr))) {
+ CERROR("decrypted krb5 header mismatch\n");
+ goto out_free;
+ }
+
+ /* verify checksum, compose clear text as layout:
+ * ------------------------------------------------------
+ * | confounder | gss header | clear msgs | krb5 header |
+ * ------------------------------------------------------
+ */
+ hash_objs[0].len = ke->ke_conf_size;
+ hash_objs[0].data = plain_out.data;
+ hash_objs[1].len = gsshdr->len;
+ hash_objs[1].data = gsshdr->data;
+ hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
+ hash_objs[2].data = plain_out.data + ke->ke_conf_size;
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 3, hash_objs, 0, NULL, &cksum))
+ goto out_free;
+
+ LASSERT(cksum.len >= ke->ke_hash_size);
+ if (memcmp((char *)(khdr + 1) + bodysize,
+ cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size)) {
+ CERROR("checksum mismatch\n");
+ goto out_free;
+ }
+
+ msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
+ memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
+
+ major = GSS_S_COMPLETE;
+out_free:
+ OBD_FREE_LARGE(tmpbuf, bodysize);
+ rawobj_free(&cksum);
+ return major;
+}
+
+static
+__u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token, int adj_nob)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ int blocksize;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ rawobj_t cipher, plain;
+ rawobj_t data_desc[1];
+ int rc;
+ __u32 major;
+
+ LASSERT(ke);
+
+ if (token->len < sizeof(*khdr)) {
+ CERROR("short signature: %u\n", token->len);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ khdr = (struct krb5_header *) token->data;
+
+ major = verify_krb5_header(kctx, khdr, 1);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("bad krb5 header\n");
+ return major;
+ }
+
+ /* block size */
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LASSERT(kctx->kc_keye.kb_tfm == NULL);
+ blocksize = 1;
+ LBUG();
+ } else {
+ LASSERT(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ }
+ LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+
+ /*
+ * token format is expected as:
+ * -----------------------------------------------
+ * | krb5 header | head/tail cipher text | cksum |
+ * -----------------------------------------------
+ */
+ if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
+ ke->ke_hash_size) {
+ CERROR("short token size: %u\n", token->len);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ cipher.data = (__u8 *) (khdr + 1);
+ cipher.len = blocksize + sizeof(*khdr);
+ plain.data = cipher.data;
+ plain.len = cipher.len;
+
+ rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
+ desc, &cipher, &plain, adj_nob);
+ if (rc)
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ /*
+ * verify checksum, compose clear text as layout:
+ * ------------------------------------------
+ * | confounder | clear pages | krb5 header |
+ * ------------------------------------------
+ */
+ data_desc[0].data = plain.data;
+ data_desc[0].len = blocksize;
+
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 1, data_desc,
+ desc->bd_iov_count, desc->bd_iov,
+ &cksum))
+ return GSS_S_FAILURE;
+ LASSERT(cksum.len >= ke->ke_hash_size);
+
+ if (memcmp(plain.data + blocksize + sizeof(*khdr),
+ cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size)) {
+ CERROR("checksum mismatch\n");
+ rawobj_free(&cksum);
+ return GSS_S_BAD_SIG;
+ }
+
+ rawobj_free(&cksum);
+ return GSS_S_COMPLETE;
+}
+
+int gss_display_kerberos(struct gss_ctx *ctx,
+ char *buf,
+ int bufsize)
+{
+ struct krb5_ctx *kctx = ctx->internal_ctx_id;
+ int written;
+
+ written = snprintf(buf, bufsize, "krb5 (%s)",
+ enctype2str(kctx->kc_enctype));
+ return written;
+}
+
+static struct gss_api_ops gss_kerberos_ops = {
+ .gss_import_sec_context = gss_import_sec_context_kerberos,
+ .gss_copy_reverse_context = gss_copy_reverse_context_kerberos,
+ .gss_inquire_context = gss_inquire_context_kerberos,
+ .gss_get_mic = gss_get_mic_kerberos,
+ .gss_verify_mic = gss_verify_mic_kerberos,
+ .gss_wrap = gss_wrap_kerberos,
+ .gss_unwrap = gss_unwrap_kerberos,
+ .gss_prep_bulk = gss_prep_bulk_kerberos,
+ .gss_wrap_bulk = gss_wrap_bulk_kerberos,
+ .gss_unwrap_bulk = gss_unwrap_bulk_kerberos,
+ .gss_delete_sec_context = gss_delete_sec_context_kerberos,
+ .gss_display = gss_display_kerberos,
+};
+
+static struct subflavor_desc gss_kerberos_sfs[] = {
+ {
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5N,
+ .sf_qop = 0,
+ .sf_service = SPTLRPC_SVC_NULL,
+ .sf_name = "krb5n"
+ },
+ {
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5A,
+ .sf_qop = 0,
+ .sf_service = SPTLRPC_SVC_AUTH,
+ .sf_name = "krb5a"
+ },
+ {
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5I,
+ .sf_qop = 0,
+ .sf_service = SPTLRPC_SVC_INTG,
+ .sf_name = "krb5i"
+ },
+ {
+ .sf_subflavor = SPTLRPC_SUBFLVR_KRB5P,
+ .sf_qop = 0,
+ .sf_service = SPTLRPC_SVC_PRIV,
+ .sf_name = "krb5p"
+ },
+};
+
+/*
+ * currently we leave module owner NULL
+ */
+static struct gss_api_mech gss_kerberos_mech = {
+ .gm_owner = NULL, /*THIS_MODULE, */
+ .gm_name = "krb5",
+ .gm_oid = (rawobj_t)
+ {9, "\052\206\110\206\367\022\001\002\002"},
+ .gm_ops = &gss_kerberos_ops,
+ .gm_sf_num = 4,
+ .gm_sfs = gss_kerberos_sfs,
+};
+
+int __init init_kerberos_module(void)
+{
+ int status;
+
+ spin_lock_init(&krb5_seq_lock);
+
+ status = lgss_mech_register(&gss_kerberos_mech);
+ if (status)
+ CERROR("Failed to register kerberos gss mechanism!\n");
+ return status;
+}
+
+void __exit cleanup_kerberos_module(void)
+{
+ lgss_mech_unregister(&gss_kerberos_mech);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c
new file mode 100644
index 0000000..8cdad80
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_mech_switch.c
@@ -0,0 +1,359 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * linux/net/sunrpc/gss_mech_switch.c
+ *
+ * Copyright (c) 2001 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * J. Bruce Fields <bfields@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_sec.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+
+static LIST_HEAD(registered_mechs);
+static DEFINE_SPINLOCK(registered_mechs_lock);
+
+int lgss_mech_register(struct gss_api_mech *gm)
+{
+ spin_lock(&registered_mechs_lock);
+ list_add(&gm->gm_list, &registered_mechs);
+ spin_unlock(&registered_mechs_lock);
+ CWARN("Register %s mechanism\n", gm->gm_name);
+ return 0;
+}
+
+void lgss_mech_unregister(struct gss_api_mech *gm)
+{
+ spin_lock(&registered_mechs_lock);
+ list_del(&gm->gm_list);
+ spin_unlock(&registered_mechs_lock);
+ CWARN("Unregister %s mechanism\n", gm->gm_name);
+}
+
+
+struct gss_api_mech *lgss_mech_get(struct gss_api_mech *gm)
+{
+ __module_get(gm->gm_owner);
+ return gm;
+}
+
+struct gss_api_mech *lgss_name_to_mech(char *name)
+{
+ struct gss_api_mech *pos, *gm = NULL;
+
+ spin_lock(&registered_mechs_lock);
+ list_for_each_entry(pos, &registered_mechs, gm_list) {
+ if (0 == strcmp(name, pos->gm_name)) {
+ if (!try_module_get(pos->gm_owner))
+ continue;
+ gm = pos;
+ break;
+ }
+ }
+ spin_unlock(&registered_mechs_lock);
+ return gm;
+
+}
+
+static inline
+int mech_supports_subflavor(struct gss_api_mech *gm, __u32 subflavor)
+{
+ int i;
+
+ for (i = 0; i < gm->gm_sf_num; i++) {
+ if (gm->gm_sfs[i].sf_subflavor == subflavor)
+ return 1;
+ }
+ return 0;
+}
+
+struct gss_api_mech *lgss_subflavor_to_mech(__u32 subflavor)
+{
+ struct gss_api_mech *pos, *gm = NULL;
+
+ spin_lock(&registered_mechs_lock);
+ list_for_each_entry(pos, &registered_mechs, gm_list) {
+ if (!try_module_get(pos->gm_owner))
+ continue;
+ if (!mech_supports_subflavor(pos, subflavor)) {
+ module_put(pos->gm_owner);
+ continue;
+ }
+ gm = pos;
+ break;
+ }
+ spin_unlock(&registered_mechs_lock);
+ return gm;
+}
+
+void lgss_mech_put(struct gss_api_mech *gm)
+{
+ module_put(gm->gm_owner);
+}
+
+/* The mech could probably be determined from the token instead, but it's just
+ * as easy for now to pass it in. */
+__u32 lgss_import_sec_context(rawobj_t *input_token,
+ struct gss_api_mech *mech,
+ struct gss_ctx **ctx_id)
+{
+ OBD_ALLOC_PTR(*ctx_id);
+ if (*ctx_id == NULL)
+ return GSS_S_FAILURE;
+
+ (*ctx_id)->mech_type = lgss_mech_get(mech);
+
+ LASSERT(mech);
+ LASSERT(mech->gm_ops);
+ LASSERT(mech->gm_ops->gss_import_sec_context);
+ return mech->gm_ops->gss_import_sec_context(input_token, *ctx_id);
+}
+
+__u32 lgss_copy_reverse_context(struct gss_ctx *ctx_id,
+ struct gss_ctx **ctx_id_new)
+{
+ struct gss_api_mech *mech = ctx_id->mech_type;
+ __u32 major;
+
+ LASSERT(mech);
+
+ OBD_ALLOC_PTR(*ctx_id_new);
+ if (*ctx_id_new == NULL)
+ return GSS_S_FAILURE;
+
+ (*ctx_id_new)->mech_type = lgss_mech_get(mech);
+
+ LASSERT(mech);
+ LASSERT(mech->gm_ops);
+ LASSERT(mech->gm_ops->gss_copy_reverse_context);
+
+ major = mech->gm_ops->gss_copy_reverse_context(ctx_id, *ctx_id_new);
+ if (major != GSS_S_COMPLETE) {
+ lgss_mech_put(mech);
+ OBD_FREE_PTR(*ctx_id_new);
+ *ctx_id_new = NULL;
+ }
+ return major;
+}
+
+/*
+ * this interface is much simplified, currently we only need endtime.
+ */
+__u32 lgss_inquire_context(struct gss_ctx *context_handle,
+ unsigned long *endtime)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_inquire_context);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_inquire_context(context_handle,
+ endtime);
+}
+
+/* gss_get_mic: compute a mic over message and return mic_token. */
+__u32 lgss_get_mic(struct gss_ctx *context_handle,
+ int msgcnt,
+ rawobj_t *msg,
+ int iovcnt,
+ lnet_kiov_t *iovs,
+ rawobj_t *mic_token)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_get_mic);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_get_mic(context_handle,
+ msgcnt,
+ msg,
+ iovcnt,
+ iovs,
+ mic_token);
+}
+
+/* gss_verify_mic: check whether the provided mic_token verifies message. */
+__u32 lgss_verify_mic(struct gss_ctx *context_handle,
+ int msgcnt,
+ rawobj_t *msg,
+ int iovcnt,
+ lnet_kiov_t *iovs,
+ rawobj_t *mic_token)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_verify_mic);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_verify_mic(context_handle,
+ msgcnt,
+ msg,
+ iovcnt,
+ iovs,
+ mic_token);
+}
+
+__u32 lgss_wrap(struct gss_ctx *context_handle,
+ rawobj_t *gsshdr,
+ rawobj_t *msg,
+ int msg_buflen,
+ rawobj_t *out_token)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_wrap);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_wrap(context_handle, gsshdr, msg, msg_buflen, out_token);
+}
+
+__u32 lgss_unwrap(struct gss_ctx *context_handle,
+ rawobj_t *gsshdr,
+ rawobj_t *token,
+ rawobj_t *out_msg)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_unwrap);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_unwrap(context_handle, gsshdr, token, out_msg);
+}
+
+
+__u32 lgss_prep_bulk(struct gss_ctx *context_handle,
+ struct ptlrpc_bulk_desc *desc)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_prep_bulk);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_prep_bulk(context_handle, desc);
+}
+
+__u32 lgss_wrap_bulk(struct gss_ctx *context_handle,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token,
+ int adj_nob)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_wrap_bulk);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_wrap_bulk(context_handle, desc, token, adj_nob);
+}
+
+__u32 lgss_unwrap_bulk(struct gss_ctx *context_handle,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token,
+ int adj_nob)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_unwrap_bulk);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_unwrap_bulk(context_handle, desc, token, adj_nob);
+}
+
+/* gss_delete_sec_context: free all resources associated with context_handle.
+ * Note this differs from the RFC 2744-specified prototype in that we don't
+ * bother returning an output token, since it would never be used anyway. */
+
+__u32 lgss_delete_sec_context(struct gss_ctx **context_handle)
+{
+ struct gss_api_mech *mech;
+
+ CDEBUG(D_SEC, "deleting %p\n", *context_handle);
+
+ if (!*context_handle)
+ return(GSS_S_NO_CONTEXT);
+
+ mech = (*context_handle)->mech_type;
+ if ((*context_handle)->internal_ctx_id != 0) {
+ LASSERT(mech);
+ LASSERT(mech->gm_ops);
+ LASSERT(mech->gm_ops->gss_delete_sec_context);
+ mech->gm_ops->gss_delete_sec_context(
+ (*context_handle)->internal_ctx_id);
+ }
+ if (mech)
+ lgss_mech_put(mech);
+
+ OBD_FREE_PTR(*context_handle);
+ *context_handle=NULL;
+ return GSS_S_COMPLETE;
+}
+
+int lgss_display(struct gss_ctx *ctx,
+ char *buf,
+ int bufsize)
+{
+ LASSERT(ctx);
+ LASSERT(ctx->mech_type);
+ LASSERT(ctx->mech_type->gm_ops);
+ LASSERT(ctx->mech_type->gm_ops->gss_display);
+
+ return ctx->mech_type->gm_ops->gss_display(ctx, buf, bufsize);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
new file mode 100644
index 0000000..c624518
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
@@ -0,0 +1,1233 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * linux/net/sunrpc/auth_gss.c
+ *
+ * RPCSEC_GSS client authentication.
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Dug Song <dugsong@monkey.org>
+ * Andy Adamson <andros@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <linux/crypto.h>
+#include <asm/atomic.h>
+struct rpc_clnt; /* for rpc_pipefs */
+#include <linux/sunrpc/rpc_pipe_fs.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_sec.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+
+static struct ptlrpc_sec_policy gss_policy_pipefs;
+static struct ptlrpc_ctx_ops gss_pipefs_ctxops;
+
+static int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx);
+
+static int gss_sec_pipe_upcall_init(struct gss_sec *gsec)
+{
+ return 0;
+}
+
+static void gss_sec_pipe_upcall_fini(struct gss_sec *gsec)
+{
+}
+
+/****************************************
+ * internel context helpers *
+ ****************************************/
+
+static
+struct ptlrpc_cli_ctx *ctx_create_pf(struct ptlrpc_sec *sec,
+ struct vfs_cred *vcred)
+{
+ struct gss_cli_ctx *gctx;
+ int rc;
+
+ OBD_ALLOC_PTR(gctx);
+ if (gctx == NULL)
+ return NULL;
+
+ rc = gss_cli_ctx_init_common(sec, &gctx->gc_base,
+ &gss_pipefs_ctxops, vcred);
+ if (rc) {
+ OBD_FREE_PTR(gctx);
+ return NULL;
+ }
+
+ return &gctx->gc_base;
+}
+
+static
+void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
+{
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+
+ if (gss_cli_ctx_fini_common(sec, ctx))
+ return;
+
+ OBD_FREE_PTR(gctx);
+
+ atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
+}
+
+static
+void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
+{
+ set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ atomic_inc(&ctx->cc_refcount);
+ hlist_add_head(&ctx->cc_cache, hash);
+}
+
+/*
+ * caller must hold spinlock
+ */
+static
+void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
+{
+ LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock));
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
+
+ clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+
+ if (atomic_dec_and_test(&ctx->cc_refcount)) {
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, freelist);
+ } else {
+ hlist_del_init(&ctx->cc_cache);
+ }
+}
+
+/*
+ * return 1 if the context is dead.
+ */
+static
+int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx,
+ struct hlist_head *freelist)
+{
+ if (cli_ctx_check_death(ctx)) {
+ if (freelist)
+ ctx_unhash_pf(ctx, freelist);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline
+int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
+ struct hlist_head *freelist)
+{
+ LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+
+ return ctx_check_death_pf(ctx, freelist);
+}
+
+static inline
+int ctx_match_pf(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
+{
+ /* a little bit optimization for null policy */
+ if (!ctx->cc_ops->match)
+ return 1;
+
+ return ctx->cc_ops->match(ctx, vcred);
+}
+
+static
+void ctx_list_destroy_pf(struct hlist_head *head)
+{
+ struct ptlrpc_cli_ctx *ctx;
+
+ while (!hlist_empty(head)) {
+ ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx,
+ cc_cache);
+
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
+ &ctx->cc_flags) == 0);
+
+ hlist_del_init(&ctx->cc_cache);
+ ctx_destroy_pf(ctx->cc_sec, ctx);
+ }
+}
+
+/****************************************
+ * context apis *
+ ****************************************/
+
+static
+int gss_cli_ctx_validate_pf(struct ptlrpc_cli_ctx *ctx)
+{
+ if (ctx_check_death_pf(ctx, NULL))
+ return 1;
+ if (cli_ctx_is_ready(ctx))
+ return 0;
+ return 1;
+}
+
+static
+void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
+{
+ LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ cli_ctx_expire(ctx);
+
+ spin_lock(&ctx->cc_sec->ps_lock);
+
+ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
+ LASSERT(atomic_read(&ctx->cc_refcount) > 1);
+
+ hlist_del_init(&ctx->cc_cache);
+ if (atomic_dec_and_test(&ctx->cc_refcount))
+ LBUG();
+ }
+
+ spin_unlock(&ctx->cc_sec->ps_lock);
+}
+
+/****************************************
+ * reverse context installation *
+ ****************************************/
+
+static inline
+unsigned int ctx_hash_index(int hashsize, __u64 key)
+{
+ return (unsigned int) (key & ((__u64) hashsize - 1));
+}
+
+static
+void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
+ struct ptlrpc_cli_ctx *new)
+{
+ struct gss_sec_pipefs *gsec_pf;
+ struct ptlrpc_cli_ctx *ctx;
+ struct hlist_node *next;
+ HLIST_HEAD(freelist);
+ unsigned int hash;
+
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+
+ hash = ctx_hash_index(gsec_pf->gsp_chash_size,
+ (__u64) new->cc_vcred.vc_uid);
+ LASSERT(hash < gsec_pf->gsp_chash_size);
+
+ spin_lock(&gsec->gs_base.ps_lock);
+
+ hlist_for_each_entry_safe(ctx, next,
+ &gsec_pf->gsp_chash[hash], cc_cache) {
+ if (!ctx_match_pf(ctx, &new->cc_vcred))
+ continue;
+
+ cli_ctx_expire(ctx);
+ ctx_unhash_pf(ctx, &freelist);
+ break;
+ }
+
+ ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
+
+ spin_unlock(&gsec->gs_base.ps_lock);
+
+ ctx_list_destroy_pf(&freelist);
+}
+
+static
+int gss_install_rvs_cli_ctx_pf(struct gss_sec *gsec,
+ struct ptlrpc_svc_ctx *svc_ctx)
+{
+ struct vfs_cred vcred;
+ struct ptlrpc_cli_ctx *cli_ctx;
+ int rc;
+
+ vcred.vc_uid = 0;
+ vcred.vc_gid = 0;
+
+ cli_ctx = ctx_create_pf(&gsec->gs_base, &vcred);
+ if (!cli_ctx)
+ return -ENOMEM;
+
+ rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
+ if (rc) {
+ ctx_destroy_pf(cli_ctx->cc_sec, cli_ctx);
+ return rc;
+ }
+
+ gss_sec_ctx_replace_pf(gsec, cli_ctx);
+ return 0;
+}
+
+static
+void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
+ struct hlist_head *freelist)
+{
+ struct ptlrpc_sec *sec;
+ struct ptlrpc_cli_ctx *ctx;
+ struct hlist_node *next;
+ int i;
+
+ sec = &gsec_pf->gsp_base.gs_base;
+
+ CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
+
+ for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
+ hlist_for_each_entry_safe(ctx, next,
+ &gsec_pf->gsp_chash[i], cc_cache)
+ ctx_check_death_locked_pf(ctx, freelist);
+ }
+
+ sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+}
+
+static
+struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *ctx,
+ struct sptlrpc_flavor *sf)
+{
+ struct gss_sec_pipefs *gsec_pf;
+ int alloc_size, hash_size, i;
+
+#define GSS_SEC_PIPEFS_CTX_HASH_SIZE (32)
+
+ if (ctx ||
+ sf->sf_flags & (PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_REVERSE))
+ hash_size = 1;
+ else
+ hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
+
+ alloc_size = sizeof(*gsec_pf) +
+ sizeof(struct hlist_head) * hash_size;
+
+ OBD_ALLOC(gsec_pf, alloc_size);
+ if (!gsec_pf)
+ return NULL;
+
+ gsec_pf->gsp_chash_size = hash_size;
+ for (i = 0; i < hash_size; i++)
+ INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
+
+ if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs,
+ imp, ctx, sf))
+ goto err_free;
+
+ if (ctx == NULL) {
+ if (gss_sec_pipe_upcall_init(&gsec_pf->gsp_base))
+ goto err_destroy;
+ } else {
+ if (gss_install_rvs_cli_ctx_pf(&gsec_pf->gsp_base, ctx))
+ goto err_destroy;
+ }
+
+ return &gsec_pf->gsp_base.gs_base;
+
+err_destroy:
+ gss_sec_destroy_common(&gsec_pf->gsp_base);
+err_free:
+ OBD_FREE(gsec_pf, alloc_size);
+ return NULL;
+}
+
+static
+void gss_sec_destroy_pf(struct ptlrpc_sec *sec)
+{
+ struct gss_sec_pipefs *gsec_pf;
+ struct gss_sec *gsec;
+
+ CWARN("destroy %s@%p\n", sec->ps_policy->sp_name, sec);
+
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+
+ LASSERT(gsec_pf->gsp_chash);
+ LASSERT(gsec_pf->gsp_chash_size);
+
+ gss_sec_pipe_upcall_fini(gsec);
+
+ gss_sec_destroy_common(gsec);
+
+ OBD_FREE(gsec, sizeof(*gsec_pf) +
+ sizeof(struct hlist_head) * gsec_pf->gsp_chash_size);
+}
+
+static
+struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec,
+ struct vfs_cred *vcred,
+ int create, int remove_dead)
+{
+ struct gss_sec *gsec;
+ struct gss_sec_pipefs *gsec_pf;
+ struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
+ struct hlist_head *hash_head;
+ struct hlist_node *next;
+ HLIST_HEAD(freelist);
+ unsigned int hash, gc = 0, found = 0;
+
+ might_sleep();
+
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+
+ hash = ctx_hash_index(gsec_pf->gsp_chash_size,
+ (__u64) vcred->vc_uid);
+ hash_head = &gsec_pf->gsp_chash[hash];
+ LASSERT(hash < gsec_pf->gsp_chash_size);
+
+retry:
+ spin_lock(&sec->ps_lock);
+
+ /* gc_next == 0 means never do gc */
+ if (remove_dead && sec->ps_gc_next &&
+ cfs_time_after(cfs_time_current_sec(), sec->ps_gc_next)) {
+ gss_ctx_cache_gc_pf(gsec_pf, &freelist);
+ gc = 1;
+ }
+
+ hlist_for_each_entry_safe(ctx, next, hash_head, cc_cache) {
+ if (gc == 0 &&
+ ctx_check_death_locked_pf(ctx,
+ remove_dead ? &freelist : NULL))
+ continue;
+
+ if (ctx_match_pf(ctx, vcred)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ if (new && new != ctx) {
+ /* lost the race, just free it */
+ hlist_add_head(&new->cc_cache, &freelist);
+ new = NULL;
+ }
+
+ /* hot node, move to head */
+ if (hash_head->first != &ctx->cc_cache) {
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, hash_head);
+ }
+ } else {
+ /* don't allocate for reverse sec */
+ if (sec_is_reverse(sec)) {
+ spin_unlock(&sec->ps_lock);
+ return NULL;
+ }
+
+ if (new) {
+ ctx_enhash_pf(new, hash_head);
+ ctx = new;
+ } else if (create) {
+ spin_unlock(&sec->ps_lock);
+ new = ctx_create_pf(sec, vcred);
+ if (new) {
+ clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
+ goto retry;
+ }
+ } else {
+ ctx = NULL;
+ }
+ }
+
+ /* hold a ref */
+ if (ctx)
+ atomic_inc(&ctx->cc_refcount);
+
+ spin_unlock(&sec->ps_lock);
+
+ /* the allocator of the context must give the first push to refresh */
+ if (new) {
+ LASSERT(new == ctx);
+ gss_cli_ctx_refresh_pf(new);
+ }
+
+ ctx_list_destroy_pf(&freelist);
+ return ctx;
+}
+
+static
+void gss_sec_release_ctx_pf(struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx,
+ int sync)
+{
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(hlist_unhashed(&ctx->cc_cache));
+
+ /* if required async, we must clear the UPTODATE bit to prevent extra
+ * rpcs during destroy procedure. */
+ if (!sync)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+
+ /* destroy this context */
+ ctx_destroy_pf(sec, ctx);
+}
+
+/*
+ * @uid: which user. "-1" means flush all.
+ * @grace: mark context DEAD, allow graceful destroy like notify
+ * server side, etc.
+ * @force: also flush busy entries.
+ *
+ * return the number of busy context encountered.
+ *
+ * In any cases, never touch "eternal" contexts.
+ */
+static
+int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
+ uid_t uid,
+ int grace, int force)
+{
+ struct gss_sec *gsec;
+ struct gss_sec_pipefs *gsec_pf;
+ struct ptlrpc_cli_ctx *ctx;
+ struct hlist_node *next;
+ HLIST_HEAD(freelist);
+ int i, busy = 0;
+
+ might_sleep_if(grace);
+
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+
+ spin_lock(&sec->ps_lock);
+ for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
+ hlist_for_each_entry_safe(ctx, next,
+ &gsec_pf->gsp_chash[i],
+ cc_cache) {
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
+ continue;
+
+ if (atomic_read(&ctx->cc_refcount) > 1) {
+ busy++;
+ if (!force)
+ continue;
+
+ CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
+ "grace %d\n",
+ atomic_read(&ctx->cc_refcount),
+ ctx, ctx->cc_vcred.vc_uid,
+ sec2target_str(ctx->cc_sec), grace);
+ }
+ ctx_unhash_pf(ctx, &freelist);
+
+ set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ if (!grace)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT,
+ &ctx->cc_flags);
+ }
+ }
+ spin_unlock(&sec->ps_lock);
+
+ ctx_list_destroy_pf(&freelist);
+ return busy;
+}
+
+/****************************************
+ * service apis *
+ ****************************************/
+
+static
+int gss_svc_accept_pf(struct ptlrpc_request *req)
+{
+ return gss_svc_accept(&gss_policy_pipefs, req);
+}
+
+static
+int gss_svc_install_rctx_pf(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *ctx)
+{
+ struct ptlrpc_sec *sec;
+ int rc;
+
+ sec = sptlrpc_import_sec_ref(imp);
+ LASSERT(sec);
+ rc = gss_install_rvs_cli_ctx_pf(sec2gsec(sec), ctx);
+
+ sptlrpc_sec_put(sec);
+ return rc;
+}
+
+/****************************************
+ * rpc_pipefs definitions *
+ ****************************************/
+
+#define LUSTRE_PIPE_ROOT "/lustre"
+#define LUSTRE_PIPE_KRB5 LUSTRE_PIPE_ROOT"/krb5"
+
+struct gss_upcall_msg_data {
+ __u32 gum_seq;
+ __u32 gum_uid;
+ __u32 gum_gid;
+ __u32 gum_svc; /* MDS/OSS... */
+ __u64 gum_nid; /* peer NID */
+ __u8 gum_obd[64]; /* client obd name */
+};
+
+struct gss_upcall_msg {
+ struct rpc_pipe_msg gum_base;
+ atomic_t gum_refcount;
+ struct list_head gum_list;
+ __u32 gum_mechidx;
+ struct gss_sec *gum_gsec;
+ struct gss_cli_ctx *gum_gctx;
+ struct gss_upcall_msg_data gum_data;
+};
+
+static atomic_t upcall_seq = ATOMIC_INIT(0);
+
+static inline
+__u32 upcall_get_sequence(void)
+{
+ return (__u32) atomic_inc_return(&upcall_seq);
+}
+
+enum mech_idx_t {
+ MECH_KRB5 = 0,
+ MECH_MAX
+};
+
+static inline
+__u32 mech_name2idx(const char *name)
+{
+ LASSERT(!strcmp(name, "krb5"));
+ return MECH_KRB5;
+}
+
+/* pipefs dentries for each mechanisms */
+static struct dentry *de_pipes[MECH_MAX] = { NULL, };
+/* all upcall messgaes linked here */
+static struct list_head upcall_lists[MECH_MAX];
+/* and protected by this */
+static spinlock_t upcall_locks[MECH_MAX];
+
+static inline
+void upcall_list_lock(int idx)
+{
+ spin_lock(&upcall_locks[idx]);
+}
+
+static inline
+void upcall_list_unlock(int idx)
+{
+ spin_unlock(&upcall_locks[idx]);
+}
+
+static
+void upcall_msg_enlist(struct gss_upcall_msg *msg)
+{
+ __u32 idx = msg->gum_mechidx;
+
+ upcall_list_lock(idx);
+ list_add(&msg->gum_list, &upcall_lists[idx]);
+ upcall_list_unlock(idx);
+}
+
+static
+void upcall_msg_delist(struct gss_upcall_msg *msg)
+{
+ __u32 idx = msg->gum_mechidx;
+
+ upcall_list_lock(idx);
+ list_del_init(&msg->gum_list);
+ upcall_list_unlock(idx);
+}
+
+/****************************************
+ * rpc_pipefs upcall helpers *
+ ****************************************/
+
+static
+void gss_release_msg(struct gss_upcall_msg *gmsg)
+{
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+
+ if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
+ return;
+ }
+
+ if (gmsg->gum_gctx) {
+ sptlrpc_cli_ctx_wakeup(&gmsg->gum_gctx->gc_base);
+ sptlrpc_cli_ctx_put(&gmsg->gum_gctx->gc_base, 1);
+ gmsg->gum_gctx = NULL;
+ }
+
+ LASSERT(list_empty(&gmsg->gum_list));
+ LASSERT(list_empty(&gmsg->gum_base.list));
+ OBD_FREE_PTR(gmsg);
+}
+
+static
+void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
+{
+ __u32 idx = gmsg->gum_mechidx;
+
+ LASSERT(idx < MECH_MAX);
+ LASSERT(spin_is_locked(&upcall_locks[idx]));
+
+ if (list_empty(&gmsg->gum_list))
+ return;
+
+ list_del_init(&gmsg->gum_list);
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
+ atomic_dec(&gmsg->gum_refcount);
+}
+
+static
+void gss_unhash_msg(struct gss_upcall_msg *gmsg)
+{
+ __u32 idx = gmsg->gum_mechidx;
+
+ LASSERT(idx < MECH_MAX);
+ upcall_list_lock(idx);
+ gss_unhash_msg_nolock(gmsg);
+ upcall_list_unlock(idx);
+}
+
+static
+void gss_msg_fail_ctx(struct gss_upcall_msg *gmsg)
+{
+ if (gmsg->gum_gctx) {
+ struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
+
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ sptlrpc_cli_ctx_expire(ctx);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ }
+}
+
+static
+struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq)
+{
+ struct gss_upcall_msg *gmsg;
+
+ upcall_list_lock(mechidx);
+ list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
+ if (gmsg->gum_data.gum_seq != seq)
+ continue;
+
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+ LASSERT(gmsg->gum_mechidx == mechidx);
+
+ atomic_inc(&gmsg->gum_refcount);
+ upcall_list_unlock(mechidx);
+ return gmsg;
+ }
+ upcall_list_unlock(mechidx);
+ return NULL;
+}
+
+static
+int simple_get_bytes(char **buf, __u32 *buflen, void *res, __u32 reslen)
+{
+ if (*buflen < reslen) {
+ CERROR("buflen %u < %u\n", *buflen, reslen);
+ return -EINVAL;
+ }
+
+ memcpy(res, *buf, reslen);
+ *buf += reslen;
+ *buflen -= reslen;
+ return 0;
+}
+
+/****************************************
+ * rpc_pipefs apis *
+ ****************************************/
+
+static
+ssize_t gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+ char *dst, size_t buflen)
+{
+ char *data = (char *)msg->data + msg->copied;
+ ssize_t mlen = msg->len;
+ ssize_t left;
+
+ if (mlen > buflen)
+ mlen = buflen;
+ left = copy_to_user(dst, data, mlen);
+ if (left < 0) {
+ msg->errno = left;
+ return left;
+ }
+ mlen -= left;
+ msg->copied += mlen;
+ msg->errno = 0;
+ return mlen;
+}
+
+static
+ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
+{
+ struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ struct gss_upcall_msg *gss_msg;
+ struct ptlrpc_cli_ctx *ctx;
+ struct gss_cli_ctx *gctx = NULL;
+ char *buf, *data;
+ int datalen;
+ int timeout, rc;
+ __u32 mechidx, seq, gss_err;
+
+ mechidx = (__u32) (long) rpci->private;
+ LASSERT(mechidx < MECH_MAX);
+
+ OBD_ALLOC(buf, mlen);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, src, mlen)) {
+ CERROR("failed copy user space data\n");
+ GOTO(out_free, rc = -EFAULT);
+ }
+ data = buf;
+ datalen = mlen;
+
+ /* data passed down format:
+ * - seq
+ * - timeout
+ * - gc_win / error
+ * - wire_ctx (rawobj)
+ * - mech_ctx (rawobj)
+ */
+ if (simple_get_bytes(&data, &datalen, &seq, sizeof(seq))) {
+ CERROR("fail to get seq\n");
+ GOTO(out_free, rc = -EFAULT);
+ }
+
+ gss_msg = gss_find_upcall(mechidx, seq);
+ if (!gss_msg) {
+ CERROR("upcall %u has aborted earlier\n", seq);
+ GOTO(out_free, rc = -EINVAL);
+ }
+
+ gss_unhash_msg(gss_msg);
+ gctx = gss_msg->gum_gctx;
+ LASSERT(gctx);
+ LASSERT(atomic_read(&gctx->gc_base.cc_refcount) > 0);
+
+ /* timeout is not in use for now */
+ if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
+ GOTO(out_msg, rc = -EFAULT);
+
+ /* lgssd signal an error by gc_win == 0 */
+ if (simple_get_bytes(&data, &datalen, &gctx->gc_win,
+ sizeof(gctx->gc_win)))
+ GOTO(out_msg, rc = -EFAULT);
+
+ if (gctx->gc_win == 0) {
+ /* followed by:
+ * - rpc error
+ * - gss error
+ */
+ if (simple_get_bytes(&data, &datalen, &rc, sizeof(rc)))
+ GOTO(out_msg, rc = -EFAULT);
+ if (simple_get_bytes(&data, &datalen, &gss_err,sizeof(gss_err)))
+ GOTO(out_msg, rc = -EFAULT);
+
+ if (rc == 0 && gss_err == GSS_S_COMPLETE) {
+ CWARN("both rpc & gss error code not set\n");
+ rc = -EPERM;
+ }
+ } else {
+ rawobj_t tmpobj;
+
+ /* handle */
+ if (rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen))
+ GOTO(out_msg, rc = -EFAULT);
+ if (rawobj_dup(&gctx->gc_handle, &tmpobj))
+ GOTO(out_msg, rc = -ENOMEM);
+
+ /* mechctx */
+ if (rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen))
+ GOTO(out_msg, rc = -EFAULT);
+ gss_err = lgss_import_sec_context(&tmpobj,
+ gss_msg->gum_gsec->gs_mech,
+ &gctx->gc_mechctx);
+ rc = 0;
+ }
+
+ if (likely(rc == 0 && gss_err == GSS_S_COMPLETE)) {
+ gss_cli_ctx_uptodate(gctx);
+ } else {
+ ctx = &gctx->gc_base;
+ sptlrpc_cli_ctx_expire(ctx);
+ if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+
+ CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
+ ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
+ test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
+ "fatal error" : "non-fatal");
+ }
+
+ rc = mlen;
+
+out_msg:
+ gss_release_msg(gss_msg);
+
+out_free:
+ OBD_FREE(buf, mlen);
+ /* FIXME
+ * hack pipefs: always return asked length unless all following
+ * downcalls might be messed up. */
+ rc = mlen;
+ return rc;
+}
+
+static
+void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
+{
+ struct gss_upcall_msg *gmsg;
+ struct gss_upcall_msg_data *gumd;
+ static cfs_time_t ratelimit = 0;
+
+ LASSERT(list_empty(&msg->list));
+
+ /* normally errno is >= 0 */
+ if (msg->errno >= 0) {
+ return;
+ }
+
+ gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
+ gumd = &gmsg->gum_data;
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+
+ CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
+ "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
+ gumd->gum_nid, (int) sizeof(gumd->gum_obd),
+ gumd->gum_obd, msg->errno);
+
+ atomic_inc(&gmsg->gum_refcount);
+ gss_unhash_msg(gmsg);
+ if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
+ cfs_time_t now = cfs_time_current_sec();
+
+ if (cfs_time_after(now, ratelimit)) {
+ CWARN("upcall timed out, is lgssd running?\n");
+ ratelimit = now + 15;
+ }
+ }
+ gss_msg_fail_ctx(gmsg);
+ gss_release_msg(gmsg);
+}
+
+static
+void gss_pipe_release(struct inode *inode)
+{
+ struct rpc_inode *rpci = RPC_I(inode);
+ __u32 idx;
+
+ idx = (__u32) (long) rpci->private;
+ LASSERT(idx < MECH_MAX);
+
+ upcall_list_lock(idx);
+ while (!list_empty(&upcall_lists[idx])) {
+ struct gss_upcall_msg *gmsg;
+ struct gss_upcall_msg_data *gumd;
+
+ gmsg = list_entry(upcall_lists[idx].next,
+ struct gss_upcall_msg, gum_list);
+ gumd = &gmsg->gum_data;
+ LASSERT(list_empty(&gmsg->gum_base.list));
+
+ CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
+ "nid "LPX64", obd %.*s\n", gmsg,
+ gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
+ gumd->gum_nid, (int) sizeof(gumd->gum_obd),
+ gumd->gum_obd);
+
+ gmsg->gum_base.errno = -EPIPE;
+ atomic_inc(&gmsg->gum_refcount);
+ gss_unhash_msg_nolock(gmsg);
+
+ gss_msg_fail_ctx(gmsg);
+
+ upcall_list_unlock(idx);
+ gss_release_msg(gmsg);
+ upcall_list_lock(idx);
+ }
+ upcall_list_unlock(idx);
+}
+
+static struct rpc_pipe_ops gss_upcall_ops = {
+ .upcall = gss_pipe_upcall,
+ .downcall = gss_pipe_downcall,
+ .destroy_msg = gss_pipe_destroy_msg,
+ .release_pipe = gss_pipe_release,
+};
+
+/****************************************
+ * upcall helper functions *
+ ****************************************/
+
+static
+int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
+{
+ struct obd_import *imp;
+ struct gss_sec *gsec;
+ struct gss_upcall_msg *gmsg;
+ int rc = 0;
+
+ might_sleep();
+
+ LASSERT(ctx->cc_sec);
+ LASSERT(ctx->cc_sec->ps_import);
+ LASSERT(ctx->cc_sec->ps_import->imp_obd);
+
+ imp = ctx->cc_sec->ps_import;
+ if (!imp->imp_connection) {
+ CERROR("import has no connection set\n");
+ return -EINVAL;
+ }
+
+ gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
+
+ OBD_ALLOC_PTR(gmsg);
+ if (!gmsg)
+ return -ENOMEM;
+
+ /* initialize pipefs base msg */
+ INIT_LIST_HEAD(&gmsg->gum_base.list);
+ gmsg->gum_base.data = &gmsg->gum_data;
+ gmsg->gum_base.len = sizeof(gmsg->gum_data);
+ gmsg->gum_base.copied = 0;
+ gmsg->gum_base.errno = 0;
+
+ /* init upcall msg */
+ atomic_set(&gmsg->gum_refcount, 1);
+ gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
+ gmsg->gum_gsec = gsec;
+ gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
+ struct gss_cli_ctx, gc_base);
+ gmsg->gum_data.gum_seq = upcall_get_sequence();
+ gmsg->gum_data.gum_uid = ctx->cc_vcred.vc_uid;
+ gmsg->gum_data.gum_gid = 0; /* not used for now */
+ gmsg->gum_data.gum_svc = import_to_gss_svc(imp);
+ gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid;
+ strncpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
+ sizeof(gmsg->gum_data.gum_obd));
+
+ /* This only could happen when sysadmin set it dead/expired
+ * using lctl by force. */
+ if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) {
+ CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ ctx->cc_flags);
+
+ LASSERT(!(ctx->cc_flags & PTLRPC_CTX_UPTODATE));
+ ctx->cc_flags |= PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR;
+
+ rc = -EIO;
+ goto err_free;
+ }
+
+ upcall_msg_enlist(gmsg);
+
+ rc = rpc_queue_upcall(de_pipes[gmsg->gum_mechidx]->d_inode,
+ &gmsg->gum_base);
+ if (rc) {
+ CERROR("rpc_queue_upcall failed: %d\n", rc);
+
+ upcall_msg_delist(gmsg);
+ goto err_free;
+ }
+
+ return 0;
+err_free:
+ OBD_FREE_PTR(gmsg);
+ return rc;
+}
+
+static
+int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
+{
+ /* if we are refreshing for root, also update the reverse
+ * handle index, do not confuse reverse contexts. */
+ if (ctx->cc_vcred.vc_uid == 0) {
+ struct gss_sec *gsec;
+
+ gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
+ gsec->gs_rvs_hdl = gss_get_next_ctx_index();
+ }
+
+ return gss_ctx_refresh_pf(ctx);
+}
+
+/****************************************
+ * lustre gss pipefs policy *
+ ****************************************/
+
+static struct ptlrpc_ctx_ops gss_pipefs_ctxops = {
+ .match = gss_cli_ctx_match,
+ .refresh = gss_cli_ctx_refresh_pf,
+ .validate = gss_cli_ctx_validate_pf,
+ .die = gss_cli_ctx_die_pf,
+ .sign = gss_cli_ctx_sign,
+ .verify = gss_cli_ctx_verify,
+ .seal = gss_cli_ctx_seal,
+ .unseal = gss_cli_ctx_unseal,
+ .wrap_bulk = gss_cli_ctx_wrap_bulk,
+ .unwrap_bulk = gss_cli_ctx_unwrap_bulk,
+};
+
+static struct ptlrpc_sec_cops gss_sec_pipefs_cops = {
+ .create_sec = gss_sec_create_pf,
+ .destroy_sec = gss_sec_destroy_pf,
+ .kill_sec = gss_sec_kill,
+ .lookup_ctx = gss_sec_lookup_ctx_pf,
+ .release_ctx = gss_sec_release_ctx_pf,
+ .flush_ctx_cache = gss_sec_flush_ctx_cache_pf,
+ .install_rctx = gss_sec_install_rctx,
+ .alloc_reqbuf = gss_alloc_reqbuf,
+ .free_reqbuf = gss_free_reqbuf,
+ .alloc_repbuf = gss_alloc_repbuf,
+ .free_repbuf = gss_free_repbuf,
+ .enlarge_reqbuf = gss_enlarge_reqbuf,
+};
+
+static struct ptlrpc_sec_sops gss_sec_pipefs_sops = {
+ .accept = gss_svc_accept_pf,
+ .invalidate_ctx = gss_svc_invalidate_ctx,
+ .alloc_rs = gss_svc_alloc_rs,
+ .authorize = gss_svc_authorize,
+ .free_rs = gss_svc_free_rs,
+ .free_ctx = gss_svc_free_ctx,
+ .unwrap_bulk = gss_svc_unwrap_bulk,
+ .wrap_bulk = gss_svc_wrap_bulk,
+ .install_rctx = gss_svc_install_rctx_pf,
+};
+
+static struct ptlrpc_sec_policy gss_policy_pipefs = {
+ .sp_owner = THIS_MODULE,
+ .sp_name = "gss.pipefs",
+ .sp_policy = SPTLRPC_POLICY_GSS_PIPEFS,
+ .sp_cops = &gss_sec_pipefs_cops,
+ .sp_sops = &gss_sec_pipefs_sops,
+};
+
+static
+int __init gss_init_pipefs_upcall(void)
+{
+ struct dentry *de;
+
+ /* pipe dir */
+ de = rpc_mkdir(LUSTRE_PIPE_ROOT, NULL);
+ if (IS_ERR(de) && PTR_ERR(de) != -EEXIST) {
+ CERROR("Failed to create gss pipe dir: %ld\n", PTR_ERR(de));
+ return PTR_ERR(de);
+ }
+
+ /* FIXME hack pipefs: dput will sometimes cause oops during module
+ * unload and lgssd close the pipe fds. */
+
+ /* krb5 mechanism */
+ de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops,
+ RPC_PIPE_WAIT_FOR_OPEN);
+ if (!de || IS_ERR(de)) {
+ CERROR("failed to make rpc_pipe %s: %ld\n",
+ LUSTRE_PIPE_KRB5, PTR_ERR(de));
+ rpc_rmdir(LUSTRE_PIPE_ROOT);
+ return PTR_ERR(de);
+ }
+
+ de_pipes[MECH_KRB5] = de;
+ INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
+ spin_lock_init(&upcall_locks[MECH_KRB5]);
+
+ return 0;
+}
+
+static
+void __exit gss_exit_pipefs_upcall(void)
+{
+ __u32 i;
+
+ for (i = 0; i < MECH_MAX; i++) {
+ LASSERT(list_empty(&upcall_lists[i]));
+
+ /* dput pipe dentry here might cause lgssd oops. */
+ de_pipes[i] = NULL;
+ }
+
+ rpc_unlink(LUSTRE_PIPE_KRB5);
+ rpc_rmdir(LUSTRE_PIPE_ROOT);
+}
+
+int __init gss_init_pipefs(void)
+{
+ int rc;
+
+ rc = gss_init_pipefs_upcall();
+ if (rc)
+ return rc;
+
+ rc = sptlrpc_register_policy(&gss_policy_pipefs);
+ if (rc) {
+ gss_exit_pipefs_upcall();
+ return rc;
+ }
+
+ return 0;
+}
+
+void __exit gss_exit_pipefs(void)
+{
+ gss_exit_pipefs_upcall();
+ sptlrpc_unregister_policy(&gss_policy_pipefs);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_rawobj.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_rawobj.c
new file mode 100644
index 0000000..fb298aef6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_rawobj.c
@@ -0,0 +1,242 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/gss/gss_rawobj.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+#include <linux/mutex.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_sec.h>
+
+#include "gss_internal.h"
+
+int rawobj_empty(rawobj_t *obj)
+{
+ LASSERT(equi(obj->len, obj->data));
+ return (obj->len == 0);
+}
+
+int rawobj_alloc(rawobj_t *obj, char *buf, int len)
+{
+ LASSERT(obj);
+ LASSERT(len >= 0);
+
+ obj->len = len;
+ if (len) {
+ OBD_ALLOC_LARGE(obj->data, len);
+ if (!obj->data) {
+ obj->len = 0;
+ return -ENOMEM;
+ }
+ memcpy(obj->data, buf, len);
+ } else
+ obj->data = NULL;
+ return 0;
+}
+
+void rawobj_free(rawobj_t *obj)
+{
+ LASSERT(obj);
+
+ if (obj->len) {
+ LASSERT(obj->data);
+ OBD_FREE_LARGE(obj->data, obj->len);
+ obj->len = 0;
+ obj->data = NULL;
+ } else
+ LASSERT(!obj->data);
+}
+
+int rawobj_equal(rawobj_t *a, rawobj_t *b)
+{
+ LASSERT(a && b);
+
+ return (a->len == b->len &&
+ (!a->len || !memcmp(a->data, b->data, a->len)));
+}
+
+int rawobj_dup(rawobj_t *dest, rawobj_t *src)
+{
+ LASSERT(src && dest);
+
+ dest->len = src->len;
+ if (dest->len) {
+ OBD_ALLOC_LARGE(dest->data, dest->len);
+ if (!dest->data) {
+ dest->len = 0;
+ return -ENOMEM;
+ }
+ memcpy(dest->data, src->data, dest->len);
+ } else
+ dest->data = NULL;
+ return 0;
+}
+
+int rawobj_serialize(rawobj_t *obj, __u32 **buf, __u32 *buflen)
+{
+ __u32 len;
+
+ LASSERT(obj);
+ LASSERT(buf);
+ LASSERT(buflen);
+
+ len = cfs_size_round4(obj->len);
+
+ if (*buflen < 4 + len) {
+ CERROR("buflen %u < %u\n", *buflen, 4 + len);
+ return -EINVAL;
+ }
+
+ *(*buf)++ = cpu_to_le32(obj->len);
+ memcpy(*buf, obj->data, obj->len);
+ *buf += (len >> 2);
+ *buflen -= (4 + len);
+
+ return 0;
+}
+
+static int __rawobj_extract(rawobj_t *obj, __u32 **buf, __u32 *buflen,
+ int alloc, int local)
+{
+ __u32 len;
+
+ if (*buflen < sizeof(__u32)) {
+ CERROR("buflen %u\n", *buflen);
+ return -EINVAL;
+ }
+
+ obj->len = *(*buf)++;
+ if (!local)
+ obj->len = le32_to_cpu(obj->len);
+ *buflen -= sizeof(__u32);
+
+ if (!obj->len) {
+ obj->data = NULL;
+ return 0;
+ }
+
+ len = local ? obj->len : cfs_size_round4(obj->len);
+ if (*buflen < len) {
+ CERROR("buflen %u < %u\n", *buflen, len);
+ obj->len = 0;
+ return -EINVAL;
+ }
+
+ if (!alloc)
+ obj->data = (__u8 *) *buf;
+ else {
+ OBD_ALLOC_LARGE(obj->data, obj->len);
+ if (!obj->data) {
+ CERROR("fail to alloc %u bytes\n", obj->len);
+ obj->len = 0;
+ return -ENOMEM;
+ }
+ memcpy(obj->data, *buf, obj->len);
+ }
+
+ *((char **)buf) += len;
+ *buflen -= len;
+
+ return 0;
+}
+
+int rawobj_extract(rawobj_t *obj, __u32 **buf, __u32 *buflen)
+{
+ return __rawobj_extract(obj, buf, buflen, 0, 0);
+}
+
+int rawobj_extract_alloc(rawobj_t *obj, __u32 **buf, __u32 *buflen)
+{
+ return __rawobj_extract(obj, buf, buflen, 1, 0);
+}
+
+int rawobj_extract_local(rawobj_t *obj, __u32 **buf, __u32 *buflen)
+{
+ return __rawobj_extract(obj, buf, buflen, 0, 1);
+}
+
+int rawobj_extract_local_alloc(rawobj_t *obj, __u32 **buf, __u32 *buflen)
+{
+ return __rawobj_extract(obj, buf, buflen, 1, 1);
+}
+
+int rawobj_from_netobj(rawobj_t *rawobj, netobj_t *netobj)
+{
+ rawobj->len = netobj->len;
+ rawobj->data = netobj->data;
+ return 0;
+}
+
+int rawobj_from_netobj_alloc(rawobj_t *rawobj, netobj_t *netobj)
+{
+ rawobj->len = 0;
+ rawobj->data = NULL;
+
+ if (netobj->len == 0)
+ return 0;
+
+ OBD_ALLOC_LARGE(rawobj->data, netobj->len);
+ if (rawobj->data == NULL)
+ return -ENOMEM;
+
+ rawobj->len = netobj->len;
+ memcpy(rawobj->data, netobj->data, netobj->len);
+ return 0;
+}
+
+/****************************************
+ * misc more *
+ ****************************************/
+
+int buffer_extract_bytes(const void **buf, __u32 *buflen,
+ void *res, __u32 reslen)
+{
+ if (*buflen < reslen) {
+ CERROR("buflen %u < %u\n", *buflen, reslen);
+ return -EINVAL;
+ }
+
+ memcpy(res, *buf, reslen);
+ *buf += reslen;
+ *buflen -= reslen;
+ return 0;
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
new file mode 100644
index 0000000..5b5365b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
@@ -0,0 +1,1093 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * Neil Brown <neilb@cse.unsw.edu.au>
+ * J. Bruce Fields <bfields@umich.edu>
+ * Andy Adamson <andros@umich.edu>
+ * Dug Song <dugsong@monkey.org>
+ *
+ * RPCSEC_GSS server authentication.
+ * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
+ * (gssapi)
+ *
+ * The RPCSEC_GSS involves three stages:
+ * 1/ context creation
+ * 2/ data exchange
+ * 3/ context destruction
+ *
+ * Context creation is handled largely by upcalls to user-space.
+ * In particular, GSS_Accept_sec_context is handled by an upcall
+ * Data exchange is handled entirely within the kernel
+ * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
+ * Context destruction is handled in-kernel
+ * GSS_Delete_sec_context is in-kernel
+ *
+ * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
+ * The context handle and gss_token are used as a key into the rpcsec_init cache.
+ * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
+ * being major_status, minor_status, context_handle, reply_token.
+ * These are sent back to the client.
+ * Sequence window management is handled by the kernel. The window size if currently
+ * a compile time constant.
+ *
+ * When user-space is happy that a context is established, it places an entry
+ * in the rpcsec_context cache. The key for this cache is the context_handle.
+ * The content includes:
+ * uid/gidlist - for determining access rights
+ * mechanism type
+ * mechanism specific information, such as a key
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hash.h>
+#include <linux/mutex.h>
+#include <linux/sunrpc/cache.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_sec.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+
+#define GSS_SVC_UPCALL_TIMEOUT (20)
+
+static spinlock_t __ctx_index_lock;
+static __u64 __ctx_index;
+
+__u64 gss_get_next_ctx_index(void)
+{
+ __u64 idx;
+
+ spin_lock(&__ctx_index_lock);
+ idx = __ctx_index++;
+ spin_unlock(&__ctx_index_lock);
+
+ return idx;
+}
+
+static inline unsigned long hash_mem(char *buf, int length, int bits)
+{
+ unsigned long hash = 0;
+ unsigned long l = 0;
+ int len = 0;
+ unsigned char c;
+
+ do {
+ if (len == length) {
+ c = (char) len;
+ len = -1;
+ } else
+ c = *buf++;
+
+ l = (l << 8) | c;
+ len++;
+
+ if ((len & (BITS_PER_LONG/8-1)) == 0)
+ hash = cfs_hash_long(hash^l, BITS_PER_LONG);
+ } while (len);
+
+ return hash >> (BITS_PER_LONG - bits);
+}
+
+/****************************************
+ * rsi cache *
+ ****************************************/
+
+#define RSI_HASHBITS (6)
+#define RSI_HASHMAX (1 << RSI_HASHBITS)
+#define RSI_HASHMASK (RSI_HASHMAX - 1)
+
+struct rsi {
+ struct cache_head h;
+ __u32 lustre_svc;
+ __u64 nid;
+ wait_queue_head_t waitq;
+ rawobj_t in_handle, in_token;
+ rawobj_t out_handle, out_token;
+ int major_status, minor_status;
+};
+
+static struct cache_head *rsi_table[RSI_HASHMAX];
+static struct cache_detail rsi_cache;
+static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
+static struct rsi *rsi_lookup(struct rsi *item);
+
+static inline int rsi_hash(struct rsi *item)
+{
+ return hash_mem((char *)item->in_handle.data, item->in_handle.len,
+ RSI_HASHBITS) ^
+ hash_mem((char *)item->in_token.data, item->in_token.len,
+ RSI_HASHBITS);
+}
+
+static inline int __rsi_match(struct rsi *item, struct rsi *tmp)
+{
+ return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
+ rawobj_equal(&item->in_token, &tmp->in_token));
+}
+
+static void rsi_free(struct rsi *rsi)
+{
+ rawobj_free(&rsi->in_handle);
+ rawobj_free(&rsi->in_token);
+ rawobj_free(&rsi->out_handle);
+ rawobj_free(&rsi->out_token);
+}
+
+static void rsi_request(struct cache_detail *cd,
+ struct cache_head *h,
+ char **bpp, int *blen)
+{
+ struct rsi *rsi = container_of(h, struct rsi, h);
+ __u64 index = 0;
+
+ /* if in_handle is null, provide kernel suggestion */
+ if (rsi->in_handle.len == 0)
+ index = gss_get_next_ctx_index();
+
+ qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
+ sizeof(rsi->lustre_svc));
+ qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
+ qword_addhex(bpp, blen, (char *) &index, sizeof(index));
+ qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
+ qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
+ (*bpp)[-1] = '\n';
+}
+
+static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
+}
+
+static inline void __rsi_init(struct rsi *new, struct rsi *item)
+{
+ new->out_handle = RAWOBJ_EMPTY;
+ new->out_token = RAWOBJ_EMPTY;
+
+ new->in_handle = item->in_handle;
+ item->in_handle = RAWOBJ_EMPTY;
+ new->in_token = item->in_token;
+ item->in_token = RAWOBJ_EMPTY;
+
+ new->lustre_svc = item->lustre_svc;
+ new->nid = item->nid;
+ init_waitqueue_head(&new->waitq);
+}
+
+static inline void __rsi_update(struct rsi *new, struct rsi *item)
+{
+ LASSERT(new->out_handle.len == 0);
+ LASSERT(new->out_token.len == 0);
+
+ new->out_handle = item->out_handle;
+ item->out_handle = RAWOBJ_EMPTY;
+ new->out_token = item->out_token;
+ item->out_token = RAWOBJ_EMPTY;
+
+ new->major_status = item->major_status;
+ new->minor_status = item->minor_status;
+}
+
+static void rsi_put(struct kref *ref)
+{
+ struct rsi *rsi = container_of(ref, struct rsi, h.ref);
+
+ LASSERT(rsi->h.next == NULL);
+ rsi_free(rsi);
+ OBD_FREE_PTR(rsi);
+}
+
+static int rsi_match(struct cache_head *a, struct cache_head *b)
+{
+ struct rsi *item = container_of(a, struct rsi, h);
+ struct rsi *tmp = container_of(b, struct rsi, h);
+
+ return __rsi_match(item, tmp);
+}
+
+static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
+{
+ struct rsi *new = container_of(cnew, struct rsi, h);
+ struct rsi *item = container_of(citem, struct rsi, h);
+
+ __rsi_init(new, item);
+}
+
+static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
+{
+ struct rsi *new = container_of(cnew, struct rsi, h);
+ struct rsi *item = container_of(citem, struct rsi, h);
+
+ __rsi_update(new, item);
+}
+
+static struct cache_head *rsi_alloc(void)
+{
+ struct rsi *rsi;
+
+ OBD_ALLOC_PTR(rsi);
+ if (rsi)
+ return &rsi->h;
+ else
+ return NULL;
+}
+
+static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
+{
+ char *buf = mesg;
+ char *ep;
+ int len;
+ struct rsi rsii, *rsip = NULL;
+ time_t expiry;
+ int status = -EINVAL;
+
+ memset(&rsii, 0, sizeof(rsii));
+
+ /* handle */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ if (rawobj_alloc(&rsii.in_handle, buf, len)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ /* token */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ if (rawobj_alloc(&rsii.in_token, buf, len)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ rsip = rsi_lookup(&rsii);
+ if (!rsip)
+ goto out;
+
+ rsii.h.flags = 0;
+ /* expiry */
+ expiry = get_expiry(&mesg);
+ if (expiry == 0)
+ goto out;
+
+ len = qword_get(&mesg, buf, mlen);
+ if (len <= 0)
+ goto out;
+
+ /* major */
+ rsii.major_status = simple_strtol(buf, &ep, 10);
+ if (*ep)
+ goto out;
+
+ /* minor */
+ len = qword_get(&mesg, buf, mlen);
+ if (len <= 0)
+ goto out;
+ rsii.minor_status = simple_strtol(buf, &ep, 10);
+ if (*ep)
+ goto out;
+
+ /* out_handle */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ if (rawobj_alloc(&rsii.out_handle, buf, len)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ /* out_token */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ if (rawobj_alloc(&rsii.out_token, buf, len)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ rsii.h.expiry_time = expiry;
+ rsip = rsi_update(&rsii, rsip);
+ status = 0;
+out:
+ rsi_free(&rsii);
+ if (rsip) {
+ wake_up_all(&rsip->waitq);
+ cache_put(&rsip->h, &rsi_cache);
+ } else {
+ status = -ENOMEM;
+ }
+
+ if (status)
+ CERROR("rsi parse error %d\n", status);
+ return status;
+}
+
+static struct cache_detail rsi_cache = {
+ .hash_size = RSI_HASHMAX,
+ .hash_table = rsi_table,
+ .name = "auth.sptlrpc.init",
+ .cache_put = rsi_put,
+ .cache_upcall = rsi_upcall,
+ .cache_parse = rsi_parse,
+ .match = rsi_match,
+ .init = rsi_init,
+ .update = update_rsi,
+ .alloc = rsi_alloc,
+};
+
+static struct rsi *rsi_lookup(struct rsi *item)
+{
+ struct cache_head *ch;
+ int hash = rsi_hash(item);
+
+ ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
+ if (ch)
+ return container_of(ch, struct rsi, h);
+ else
+ return NULL;
+}
+
+static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
+{
+ struct cache_head *ch;
+ int hash = rsi_hash(new);
+
+ ch = sunrpc_cache_update(&rsi_cache, &new->h, &old->h, hash);
+ if (ch)
+ return container_of(ch, struct rsi, h);
+ else
+ return NULL;
+}
+
+/****************************************
+ * rsc cache *
+ ****************************************/
+
+#define RSC_HASHBITS (10)
+#define RSC_HASHMAX (1 << RSC_HASHBITS)
+#define RSC_HASHMASK (RSC_HASHMAX - 1)
+
+struct rsc {
+ struct cache_head h;
+ struct obd_device *target;
+ rawobj_t handle;
+ struct gss_svc_ctx ctx;
+};
+
+static struct cache_head *rsc_table[RSC_HASHMAX];
+static struct cache_detail rsc_cache;
+static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
+static struct rsc *rsc_lookup(struct rsc *item);
+
+static void rsc_free(struct rsc *rsci)
+{
+ rawobj_free(&rsci->handle);
+ rawobj_free(&rsci->ctx.gsc_rvs_hdl);
+ lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
+}
+
+static inline int rsc_hash(struct rsc *rsci)
+{
+ return hash_mem((char *)rsci->handle.data,
+ rsci->handle.len, RSC_HASHBITS);
+}
+
+static inline int __rsc_match(struct rsc *new, struct rsc *tmp)
+{
+ return rawobj_equal(&new->handle, &tmp->handle);
+}
+
+static inline void __rsc_init(struct rsc *new, struct rsc *tmp)
+{
+ new->handle = tmp->handle;
+ tmp->handle = RAWOBJ_EMPTY;
+
+ new->target = NULL;
+ memset(&new->ctx, 0, sizeof(new->ctx));
+ new->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
+}
+
+static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
+{
+ new->ctx = tmp->ctx;
+ tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
+ tmp->ctx.gsc_mechctx = NULL;
+
+ memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
+ spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
+}
+
+static void rsc_put(struct kref *ref)
+{
+ struct rsc *rsci = container_of(ref, struct rsc, h.ref);
+
+ LASSERT(rsci->h.next == NULL);
+ rsc_free(rsci);
+ OBD_FREE_PTR(rsci);
+}
+
+static int rsc_match(struct cache_head *a, struct cache_head *b)
+{
+ struct rsc *new = container_of(a, struct rsc, h);
+ struct rsc *tmp = container_of(b, struct rsc, h);
+
+ return __rsc_match(new, tmp);
+}
+
+static void rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
+{
+ struct rsc *new = container_of(cnew, struct rsc, h);
+ struct rsc *tmp = container_of(ctmp, struct rsc, h);
+
+ __rsc_init(new, tmp);
+}
+
+static void update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
+{
+ struct rsc *new = container_of(cnew, struct rsc, h);
+ struct rsc *tmp = container_of(ctmp, struct rsc, h);
+
+ __rsc_update(new, tmp);
+}
+
+static struct cache_head * rsc_alloc(void)
+{
+ struct rsc *rsc;
+
+ OBD_ALLOC_PTR(rsc);
+ if (rsc)
+ return &rsc->h;
+ else
+ return NULL;
+}
+
+static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
+{
+ char *buf = mesg;
+ int len, rv, tmp_int;
+ struct rsc rsci, *rscp = NULL;
+ time_t expiry;
+ int status = -EINVAL;
+ struct gss_api_mech *gm = NULL;
+
+ memset(&rsci, 0, sizeof(rsci));
+
+ /* context handle */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0) goto out;
+ status = -ENOMEM;
+ if (rawobj_alloc(&rsci.handle, buf, len))
+ goto out;
+
+ rsci.h.flags = 0;
+ /* expiry */
+ expiry = get_expiry(&mesg);
+ status = -EINVAL;
+ if (expiry == 0)
+ goto out;
+
+ /* remote flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get remote flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_remote = (tmp_int != 0);
+
+ /* root user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get oss user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_root = (tmp_int != 0);
+
+ /* mds user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get mds user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_mds = (tmp_int != 0);
+
+ /* oss user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get oss user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_oss = (tmp_int != 0);
+
+ /* mapped uid */
+ rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
+ if (rv) {
+ CERROR("fail to get mapped uid\n");
+ goto out;
+ }
+
+ rscp = rsc_lookup(&rsci);
+ if (!rscp)
+ goto out;
+
+ /* uid, or NEGATIVE */
+ rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
+ if (rv == -EINVAL)
+ goto out;
+ if (rv == -ENOENT) {
+ CERROR("NOENT? set rsc entry negative\n");
+ set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ } else {
+ rawobj_t tmp_buf;
+ unsigned long ctx_expiry;
+
+ /* gid */
+ if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
+ goto out;
+
+ /* mech name */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ gm = lgss_name_to_mech(buf);
+ status = -EOPNOTSUPP;
+ if (!gm)
+ goto out;
+
+ status = -EINVAL;
+ /* mech-specific data: */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+
+ tmp_buf.len = len;
+ tmp_buf.data = (unsigned char *)buf;
+ if (lgss_import_sec_context(&tmp_buf, gm,
+ &rsci.ctx.gsc_mechctx))
+ goto out;
+
+ /* currently the expiry time passed down from user-space
+ * is invalid, here we retrive it from mech. */
+ if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
+ CERROR("unable to get expire time, drop it\n");
+ goto out;
+ }
+ expiry = (time_t) ctx_expiry;
+ }
+
+ rsci.h.expiry_time = expiry;
+ rscp = rsc_update(&rsci, rscp);
+ status = 0;
+out:
+ if (gm)
+ lgss_mech_put(gm);
+ rsc_free(&rsci);
+ if (rscp)
+ cache_put(&rscp->h, &rsc_cache);
+ else
+ status = -ENOMEM;
+
+ if (status)
+ CERROR("parse rsc error %d\n", status);
+ return status;
+}
+
+static struct cache_detail rsc_cache = {
+ .hash_size = RSC_HASHMAX,
+ .hash_table = rsc_table,
+ .name = "auth.sptlrpc.context",
+ .cache_put = rsc_put,
+ .cache_parse = rsc_parse,
+ .match = rsc_match,
+ .init = rsc_init,
+ .update = update_rsc,
+ .alloc = rsc_alloc,
+};
+
+static struct rsc *rsc_lookup(struct rsc *item)
+{
+ struct cache_head *ch;
+ int hash = rsc_hash(item);
+
+ ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash);
+ if (ch)
+ return container_of(ch, struct rsc, h);
+ else
+ return NULL;
+}
+
+static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
+{
+ struct cache_head *ch;
+ int hash = rsc_hash(new);
+
+ ch = sunrpc_cache_update(&rsc_cache, &new->h, &old->h, hash);
+ if (ch)
+ return container_of(ch, struct rsc, h);
+ else
+ return NULL;
+}
+
+#define COMPAT_RSC_PUT(item, cd) cache_put((item), (cd))
+
+/****************************************
+ * rsc cache flush *
+ ****************************************/
+
+typedef int rsc_entry_match(struct rsc *rscp, long data);
+
+static void rsc_flush(rsc_entry_match *match, long data)
+{
+ struct cache_head **ch;
+ struct rsc *rscp;
+ int n;
+
+ write_lock(&rsc_cache.hash_lock);
+ for (n = 0; n < RSC_HASHMAX; n++) {
+ for (ch = &rsc_cache.hash_table[n]; *ch;) {
+ rscp = container_of(*ch, struct rsc, h);
+
+ if (!match(rscp, data)) {
+ ch = &((*ch)->next);
+ continue;
+ }
+
+ /* it seems simply set NEGATIVE doesn't work */
+ *ch = (*ch)->next;
+ rscp->h.next = NULL;
+ cache_get(&rscp->h);
+ set_bit(CACHE_NEGATIVE, &rscp->h.flags);
+ COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
+ rsc_cache.entries--;
+ }
+ }
+ write_unlock(&rsc_cache.hash_lock);
+}
+
+static int match_uid(struct rsc *rscp, long uid)
+{
+ if ((int) uid == -1)
+ return 1;
+ return ((int) rscp->ctx.gsc_uid == (int) uid);
+}
+
+static int match_target(struct rsc *rscp, long target)
+{
+ return (rscp->target == (struct obd_device *) target);
+}
+
+static inline void rsc_flush_uid(int uid)
+{
+ if (uid == -1)
+ CWARN("flush all gss contexts...\n");
+
+ rsc_flush(match_uid, (long) uid);
+}
+
+static inline void rsc_flush_target(struct obd_device *target)
+{
+ rsc_flush(match_target, (long) target);
+}
+
+void gss_secsvc_flush(struct obd_device *target)
+{
+ rsc_flush_target(target);
+}
+EXPORT_SYMBOL(gss_secsvc_flush);
+
+static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
+{
+ struct rsc rsci;
+ struct rsc *found;
+
+ memset(&rsci, 0, sizeof(rsci));
+ if (rawobj_dup(&rsci.handle, handle))
+ return NULL;
+
+ found = rsc_lookup(&rsci);
+ rsc_free(&rsci);
+ if (!found)
+ return NULL;
+ if (cache_check(&rsc_cache, &found->h, NULL))
+ return NULL;
+ return found;
+}
+
+int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
+ struct gss_sec *gsec,
+ struct gss_cli_ctx *gctx)
+{
+ struct rsc rsci, *rscp = NULL;
+ unsigned long ctx_expiry;
+ __u32 major;
+ int rc;
+
+ memset(&rsci, 0, sizeof(rsci));
+
+ if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
+ sizeof(gsec->gs_rvs_hdl)))
+ GOTO(out, rc = -ENOMEM);
+
+ rscp = rsc_lookup(&rsci);
+ if (rscp == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ major = lgss_copy_reverse_context(gctx->gc_mechctx,
+ &rsci.ctx.gsc_mechctx);
+ if (major != GSS_S_COMPLETE)
+ GOTO(out, rc = -ENOMEM);
+
+ if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
+ CERROR("unable to get expire time, drop it\n");
+ GOTO(out, rc = -EINVAL);
+ }
+ rsci.h.expiry_time = (time_t) ctx_expiry;
+
+ if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0)
+ rsci.ctx.gsc_usr_mds = 1;
+ else if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0)
+ rsci.ctx.gsc_usr_oss = 1;
+ else
+ rsci.ctx.gsc_usr_root = 1;
+
+ rscp = rsc_update(&rsci, rscp);
+ if (rscp == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ rscp->target = imp->imp_obd;
+ rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
+
+ CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
+ &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
+ rc = 0;
+out:
+ if (rscp)
+ cache_put(&rscp->h, &rsc_cache);
+ rsc_free(&rsci);
+
+ if (rc)
+ CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
+ gsec->gs_rvs_hdl, rc);
+ return rc;
+}
+
+int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
+{
+ const cfs_time_t expire = 20;
+ struct rsc *rscp;
+
+ rscp = gss_svc_searchbyctx(handle);
+ if (rscp) {
+ CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
+ &rscp->ctx, rscp);
+
+ rscp->h.expiry_time = cfs_time_current_sec() + expire;
+ COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
+ }
+ return 0;
+}
+
+int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx)
+{
+ struct rsc *rscp = container_of(ctx, struct rsc, ctx);
+
+ return rawobj_dup(handle, &rscp->handle);
+}
+
+int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq)
+{
+ struct rsc *rscp;
+
+ rscp = gss_svc_searchbyctx(handle);
+ if (rscp) {
+ CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) update seq to %u\n",
+ &rscp->ctx, rscp, seq + 1);
+
+ rscp->ctx.gsc_rvs_seq = seq + 1;
+ COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
+ }
+ return 0;
+}
+
+static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
+{
+ return NULL;
+}
+static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
+
+int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
+ struct gss_svc_reqctx *grctx,
+ struct gss_wire_ctx *gw,
+ struct obd_device *target,
+ __u32 lustre_svc,
+ rawobj_t *rvs_hdl,
+ rawobj_t *in_token)
+{
+ struct ptlrpc_reply_state *rs;
+ struct rsc *rsci = NULL;
+ struct rsi *rsip = NULL, rsikey;
+ wait_queue_t wait;
+ int replen = sizeof(struct ptlrpc_body);
+ struct gss_rep_header *rephdr;
+ int first_check = 1;
+ int rc = SECSVC_DROP;
+
+ memset(&rsikey, 0, sizeof(rsikey));
+ rsikey.lustre_svc = lustre_svc;
+ rsikey.nid = (__u64) req->rq_peer.nid;
+
+ /* duplicate context handle. for INIT it always 0 */
+ if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
+ CERROR("fail to dup context handle\n");
+ GOTO(out, rc);
+ }
+
+ if (rawobj_dup(&rsikey.in_token, in_token)) {
+ CERROR("can't duplicate token\n");
+ rawobj_free(&rsikey.in_handle);
+ GOTO(out, rc);
+ }
+
+ rsip = rsi_lookup(&rsikey);
+ rsi_free(&rsikey);
+ if (!rsip) {
+ CERROR("error in rsi_lookup.\n");
+
+ if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
+ rc = SECSVC_COMPLETE;
+
+ GOTO(out, rc);
+ }
+
+ cache_get(&rsip->h); /* take an extra ref */
+ init_waitqueue_head(&rsip->waitq);
+ init_waitqueue_entry_current(&wait);
+ add_wait_queue(&rsip->waitq, &wait);
+
+cache_check:
+ /* Note each time cache_check() will drop a reference if return
+ * non-zero. We hold an extra reference on initial rsip, but must
+ * take care of following calls. */
+ rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
+ switch (rc) {
+ case -EAGAIN: {
+ int valid;
+
+ if (first_check) {
+ first_check = 0;
+
+ read_lock(&rsi_cache.hash_lock);
+ valid = test_bit(CACHE_VALID, &rsip->h.flags);
+ if (valid == 0)
+ set_current_state(TASK_INTERRUPTIBLE);
+ read_unlock(&rsi_cache.hash_lock);
+
+ if (valid == 0)
+ schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
+ HZ);
+
+ cache_get(&rsip->h);
+ goto cache_check;
+ }
+ CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
+ break;
+ }
+ case -ENOENT:
+ CWARN("cache_check return ENOENT, drop\n");
+ break;
+ case 0:
+ /* if not the first check, we have to release the extra
+ * reference we just added on it. */
+ if (!first_check)
+ cache_put(&rsip->h, &rsi_cache);
+ CDEBUG(D_SEC, "cache_check is good\n");
+ break;
+ }
+
+ remove_wait_queue(&rsip->waitq, &wait);
+ cache_put(&rsip->h, &rsi_cache);
+
+ if (rc)
+ GOTO(out, rc = SECSVC_DROP);
+
+ rc = SECSVC_DROP;
+ rsci = gss_svc_searchbyctx(&rsip->out_handle);
+ if (!rsci) {
+ CERROR("authentication failed\n");
+
+ if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
+ rc = SECSVC_COMPLETE;
+
+ GOTO(out, rc);
+ } else {
+ cache_get(&rsci->h);
+ grctx->src_ctx = &rsci->ctx;
+ }
+
+ if (rawobj_dup(&rsci->ctx.gsc_rvs_hdl, rvs_hdl)) {
+ CERROR("failed duplicate reverse handle\n");
+ GOTO(out, rc);
+ }
+
+ rsci->target = target;
+
+ CDEBUG(D_SEC, "server create rsc %p(%u->%s)\n",
+ rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
+
+ if (rsip->out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
+ CERROR("handle size %u too large\n", rsip->out_handle.len);
+ GOTO(out, rc = SECSVC_DROP);
+ }
+
+ grctx->src_init = 1;
+ grctx->src_reserve_len = cfs_size_round4(rsip->out_token.len);
+
+ rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
+ if (rc) {
+ CERROR("failed to pack reply: %d\n", rc);
+ GOTO(out, rc = SECSVC_DROP);
+ }
+
+ rs = req->rq_reply_state;
+ LASSERT(rs->rs_repbuf->lm_bufcount == 3);
+ LASSERT(rs->rs_repbuf->lm_buflens[0] >=
+ sizeof(*rephdr) + rsip->out_handle.len);
+ LASSERT(rs->rs_repbuf->lm_buflens[2] >= rsip->out_token.len);
+
+ rephdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
+ rephdr->gh_version = PTLRPC_GSS_VERSION;
+ rephdr->gh_flags = 0;
+ rephdr->gh_proc = PTLRPC_GSS_PROC_ERR;
+ rephdr->gh_major = rsip->major_status;
+ rephdr->gh_minor = rsip->minor_status;
+ rephdr->gh_seqwin = GSS_SEQ_WIN;
+ rephdr->gh_handle.len = rsip->out_handle.len;
+ memcpy(rephdr->gh_handle.data, rsip->out_handle.data,
+ rsip->out_handle.len);
+
+ memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0), rsip->out_token.data,
+ rsip->out_token.len);
+
+ rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
+ rsip->out_token.len, 0);
+
+ rc = SECSVC_OK;
+
+out:
+ /* it looks like here we should put rsip also, but this mess up
+ * with NFS cache mgmt code... FIXME */
+#if 0
+ if (rsip)
+ rsi_put(&rsip->h, &rsi_cache);
+#endif
+
+ if (rsci) {
+ /* if anything went wrong, we don't keep the context too */
+ if (rc != SECSVC_OK)
+ set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ else
+ CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
+ gss_handle_to_u64(&rsci->handle));
+
+ COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
+ }
+ return rc;
+}
+
+struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
+ struct gss_wire_ctx *gw)
+{
+ struct rsc *rsc;
+
+ rsc = gss_svc_searchbyctx(&gw->gw_handle);
+ if (!rsc) {
+ CWARN("Invalid gss ctx idx "LPX64" from %s\n",
+ gss_handle_to_u64(&gw->gw_handle),
+ libcfs_nid2str(req->rq_peer.nid));
+ return NULL;
+ }
+
+ return &rsc->ctx;
+}
+
+void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx)
+{
+ struct rsc *rsc = container_of(ctx, struct rsc, ctx);
+
+ COMPAT_RSC_PUT(&rsc->h, &rsc_cache);
+}
+
+void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
+{
+ struct rsc *rsc = container_of(ctx, struct rsc, ctx);
+
+ /* can't be found */
+ set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+ /* to be removed at next scan */
+ rsc->h.expiry_time = 1;
+}
+
+int __init gss_init_svc_upcall(void)
+{
+ int i;
+
+ spin_lock_init(&__ctx_index_lock);
+ /*
+ * this helps reducing context index confliction. after server reboot,
+ * conflicting request from clients might be filtered out by initial
+ * sequence number checking, thus no chance to sent error notification
+ * back to clients.
+ */
+ cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
+
+
+ cache_register(&rsi_cache);
+ cache_register(&rsc_cache);
+
+ /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
+ * the init upcall channel, otherwise there's big chance that the first
+ * upcall issued before the channel be opened thus nfsv4 cache code will
+ * drop the request direclty, thus lead to unnecessary recovery time.
+ * here we wait at miximum 1.5 seconds. */
+ for (i = 0; i < 6; i++) {
+ if (atomic_read(&rsi_cache.readers) > 0)
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ LASSERT(HZ >= 4);
+ schedule_timeout(HZ / 4);
+ }
+
+ if (atomic_read(&rsi_cache.readers) == 0)
+ CWARN("Init channel is not opened by lsvcgssd, following "
+ "request might be dropped until lsvcgssd is active\n");
+
+ return 0;
+}
+
+void __exit gss_exit_svc_upcall(void)
+{
+ cache_purge(&rsi_cache);
+ cache_unregister(&rsi_cache);
+
+ cache_purge(&rsc_cache);
+ cache_unregister(&rsc_cache);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
new file mode 100644
index 0000000..de100a1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
@@ -0,0 +1,221 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lprocfs_status.h>
+#include <lustre_sec.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+
+static struct proc_dir_entry *gss_proc_root = NULL;
+static struct proc_dir_entry *gss_proc_lk = NULL;
+
+/*
+ * statistic of "out-of-sequence-window"
+ */
+static struct {
+ spinlock_t oos_lock;
+ atomic_t oos_cli_count; /* client occurrence */
+ int oos_cli_behind; /* client max seqs behind */
+ atomic_t oos_svc_replay[3]; /* server replay detected */
+ atomic_t oos_svc_pass[3]; /* server verified ok */
+} gss_stat_oos = {
+ .oos_cli_count = ATOMIC_INIT(0),
+ .oos_cli_behind = 0,
+ .oos_svc_replay = { ATOMIC_INIT(0), },
+ .oos_svc_pass = { ATOMIC_INIT(0), },
+};
+
+void gss_stat_oos_record_cli(int behind)
+{
+ atomic_inc(&gss_stat_oos.oos_cli_count);
+
+ spin_lock(&gss_stat_oos.oos_lock);
+ if (behind > gss_stat_oos.oos_cli_behind)
+ gss_stat_oos.oos_cli_behind = behind;
+ spin_unlock(&gss_stat_oos.oos_lock);
+}
+
+void gss_stat_oos_record_svc(int phase, int replay)
+{
+ LASSERT(phase >= 0 && phase <= 2);
+
+ if (replay)
+ atomic_inc(&gss_stat_oos.oos_svc_replay[phase]);
+ else
+ atomic_inc(&gss_stat_oos.oos_svc_pass[phase]);
+}
+
+static int gss_proc_oos_seq_show(struct seq_file *m, void *v)
+{
+ return seq_printf(m,
+ "seqwin: %u\n"
+ "backwin: %u\n"
+ "client fall behind seqwin\n"
+ " occurrence: %d\n"
+ " max seq behind: %d\n"
+ "server replay detected:\n"
+ " phase 0: %d\n"
+ " phase 1: %d\n"
+ " phase 2: %d\n"
+ "server verify ok:\n"
+ " phase 2: %d\n",
+ GSS_SEQ_WIN_MAIN,
+ GSS_SEQ_WIN_BACK,
+ atomic_read(&gss_stat_oos.oos_cli_count),
+ gss_stat_oos.oos_cli_behind,
+ atomic_read(&gss_stat_oos.oos_svc_replay[0]),
+ atomic_read(&gss_stat_oos.oos_svc_replay[1]),
+ atomic_read(&gss_stat_oos.oos_svc_replay[2]),
+ atomic_read(&gss_stat_oos.oos_svc_pass[2]));
+}
+LPROC_SEQ_FOPS_RO(gss_proc_oos);
+
+static int gss_proc_write_secinit(struct file *file, const char *buffer,
+ size_t count, off_t *off)
+{
+ int rc;
+
+ rc = gss_do_ctx_init_rpc((char *) buffer, count);
+ if (rc) {
+ LASSERT(rc < 0);
+ return rc;
+ }
+
+ return count;
+}
+
+static const struct file_operations gss_proc_secinit = {
+ .write = gss_proc_write_secinit,
+};
+
+static struct lprocfs_vars gss_lprocfs_vars[] = {
+ { "replays", &gss_proc_oos_fops },
+ { "init_channel", &gss_proc_secinit, NULL, 0222 },
+ { NULL }
+};
+
+/*
+ * for userspace helper lgss_keyring.
+ *
+ * debug_level: [0, 4], defined in utils/gss/lgss_utils.h
+ */
+static int gss_lk_debug_level = 1;
+
+static int gss_lk_proc_dl_seq_show(struct seq_file *m, void *v)
+{
+ return seq_printf(m, "%u\n", gss_lk_debug_level);
+}
+
+static int gss_lk_proc_dl_seq_write(struct file *file, const char *buffer,
+ size_t count, off_t *off)
+{
+ int val, rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc < 0)
+ return rc;
+
+ if (val < 0 || val > 4)
+ return -ERANGE;
+
+ gss_lk_debug_level = val;
+ return count;
+}
+LPROC_SEQ_FOPS(gss_lk_proc_dl);
+
+static struct lprocfs_vars gss_lk_lprocfs_vars[] = {
+ { "debug_level", &gss_lk_proc_dl_fops },
+ { NULL }
+};
+
+void gss_exit_lproc(void)
+{
+ if (gss_proc_lk) {
+ lprocfs_remove(&gss_proc_lk);
+ gss_proc_lk = NULL;
+ }
+
+ if (gss_proc_root) {
+ lprocfs_remove(&gss_proc_root);
+ gss_proc_root = NULL;
+ }
+}
+
+int gss_init_lproc(void)
+{
+ int rc;
+
+ spin_lock_init(&gss_stat_oos.oos_lock);
+
+ gss_proc_root = lprocfs_register("gss", sptlrpc_proc_root,
+ gss_lprocfs_vars, NULL);
+ if (IS_ERR(gss_proc_root)) {
+ rc = PTR_ERR(gss_proc_root);
+ gss_proc_root = NULL;
+ GOTO(err_out, rc);
+ }
+
+ gss_proc_lk = lprocfs_register("lgss_keyring", gss_proc_root,
+ gss_lk_lprocfs_vars, NULL);
+ if (IS_ERR(gss_proc_lk)) {
+ rc = PTR_ERR(gss_proc_lk);
+ gss_proc_lk = NULL;
+ GOTO(err_out, rc);
+ }
+
+ return 0;
+
+err_out:
+ CERROR("failed to initialize gss lproc entries: %d\n", rc);
+ gss_exit_lproc();
+ return rc;
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
new file mode 100644
index 0000000..b42ddda
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
@@ -0,0 +1,2887 @@
+/*
+ * Modifications for Lustre
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+/*
+ * linux/net/sunrpc/auth_gss.c
+ *
+ * RPCSEC_GSS client authentication.
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Dug Song <dugsong@monkey.org>
+ * Andy Adamson <andros@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/mutex.h>
+#include <asm/atomic.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <obd_cksum.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_sec.h>
+
+#include "gss_err.h"
+#include "gss_internal.h"
+#include "gss_api.h"
+
+#include <linux/crypto.h>
+#include <linux/crc32.h>
+
+/*
+ * early reply have fixed size, respectively in privacy and integrity mode.
+ * so we calculate them only once.
+ */
+static int gss_at_reply_off_integ;
+static int gss_at_reply_off_priv;
+
+
+static inline int msg_last_segidx(struct lustre_msg *msg)
+{
+ LASSERT(msg->lm_bufcount > 0);
+ return msg->lm_bufcount - 1;
+}
+static inline int msg_last_seglen(struct lustre_msg *msg)
+{
+ return msg->lm_buflens[msg_last_segidx(msg)];
+}
+
+/********************************************
+ * wire data swabber *
+ ********************************************/
+
+static
+void gss_header_swabber(struct gss_header *ghdr)
+{
+ __swab32s(&ghdr->gh_flags);
+ __swab32s(&ghdr->gh_proc);
+ __swab32s(&ghdr->gh_seq);
+ __swab32s(&ghdr->gh_svc);
+ __swab32s(&ghdr->gh_pad1);
+ __swab32s(&ghdr->gh_handle.len);
+}
+
+struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
+ int swabbed)
+{
+ struct gss_header *ghdr;
+
+ ghdr = lustre_msg_buf(msg, segment, sizeof(*ghdr));
+ if (ghdr == NULL)
+ return NULL;
+
+ if (swabbed)
+ gss_header_swabber(ghdr);
+
+ if (sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
+ CERROR("gss header has length %d, now %u received\n",
+ (int) sizeof(*ghdr) + ghdr->gh_handle.len,
+ msg->lm_buflens[segment]);
+ return NULL;
+ }
+
+ return ghdr;
+}
+
+#if 0
+static
+void gss_netobj_swabber(netobj_t *obj)
+{
+ __swab32s(&obj->len);
+}
+
+netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
+{
+ netobj_t *obj;
+
+ obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
+ if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
+ CERROR("netobj require length %u but only %u received\n",
+ (unsigned int) sizeof(*obj) + obj->len,
+ msg->lm_buflens[segment]);
+ return NULL;
+ }
+
+ return obj;
+}
+#endif
+
+/*
+ * payload should be obtained from mechanism. but currently since we
+ * only support kerberos, we could simply use fixed value.
+ * krb5 "meta" data:
+ * - krb5 header: 16
+ * - krb5 checksum: 20
+ *
+ * for privacy mode, payload also include the cipher text which has the same
+ * size as plain text, plus possible confounder, padding both at maximum cipher
+ * block size.
+ */
+#define GSS_KRB5_INTEG_MAX_PAYLOAD (40)
+
+static inline
+int gss_mech_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
+{
+ if (privacy)
+ return GSS_KRB5_INTEG_MAX_PAYLOAD + 16 + 16 + 16 + msgsize;
+ else
+ return GSS_KRB5_INTEG_MAX_PAYLOAD;
+}
+
+/*
+ * return signature size, otherwise < 0 to indicate error
+ */
+static int gss_sign_msg(struct lustre_msg *msg,
+ struct gss_ctx *mechctx,
+ enum lustre_sec_part sp,
+ __u32 flags, __u32 proc, __u32 seq, __u32 svc,
+ rawobj_t *handle)
+{
+ struct gss_header *ghdr;
+ rawobj_t text[4], mic;
+ int textcnt, max_textcnt, mic_idx;
+ __u32 major;
+
+ LASSERT(msg->lm_bufcount >= 2);
+
+ /* gss hdr */
+ LASSERT(msg->lm_buflens[0] >=
+ sizeof(*ghdr) + (handle ? handle->len : 0));
+ ghdr = lustre_msg_buf(msg, 0, 0);
+
+ ghdr->gh_version = PTLRPC_GSS_VERSION;
+ ghdr->gh_sp = (__u8) sp;
+ ghdr->gh_flags = flags;
+ ghdr->gh_proc = proc;
+ ghdr->gh_seq = seq;
+ ghdr->gh_svc = svc;
+ if (!handle) {
+ /* fill in a fake one */
+ ghdr->gh_handle.len = 0;
+ } else {
+ ghdr->gh_handle.len = handle->len;
+ memcpy(ghdr->gh_handle.data, handle->data, handle->len);
+ }
+
+ /* no actual signature for null mode */
+ if (svc == SPTLRPC_SVC_NULL)
+ return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
+
+ /* MIC */
+ mic_idx = msg_last_segidx(msg);
+ max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
+
+ for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
+ text[textcnt].len = msg->lm_buflens[textcnt];
+ text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
+ }
+
+ mic.len = msg->lm_buflens[mic_idx];
+ mic.data = lustre_msg_buf(msg, mic_idx, 0);
+
+ major = lgss_get_mic(mechctx, textcnt, text, 0, NULL, &mic);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("fail to generate MIC: %08x\n", major);
+ return -EPERM;
+ }
+ LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
+
+ return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
+}
+
+/*
+ * return gss error
+ */
+static
+__u32 gss_verify_msg(struct lustre_msg *msg,
+ struct gss_ctx *mechctx,
+ __u32 svc)
+{
+ rawobj_t text[4], mic;
+ int textcnt, max_textcnt;
+ int mic_idx;
+ __u32 major;
+
+ LASSERT(msg->lm_bufcount >= 2);
+
+ if (svc == SPTLRPC_SVC_NULL)
+ return GSS_S_COMPLETE;
+
+ mic_idx = msg_last_segidx(msg);
+ max_textcnt = (svc == SPTLRPC_SVC_AUTH) ? 1 : mic_idx;
+
+ for (textcnt = 0; textcnt < max_textcnt; textcnt++) {
+ text[textcnt].len = msg->lm_buflens[textcnt];
+ text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
+ }
+
+ mic.len = msg->lm_buflens[mic_idx];
+ mic.data = lustre_msg_buf(msg, mic_idx, 0);
+
+ major = lgss_verify_mic(mechctx, textcnt, text, 0, NULL, &mic);
+ if (major != GSS_S_COMPLETE)
+ CERROR("mic verify error: %08x\n", major);
+
+ return major;
+}
+
+/*
+ * return gss error code
+ */
+static
+__u32 gss_unseal_msg(struct gss_ctx *mechctx,
+ struct lustre_msg *msgbuf,
+ int *msg_len, int msgbuf_len)
+{
+ rawobj_t clear_obj, hdrobj, token;
+ __u8 *clear_buf;
+ int clear_buflen;
+ __u32 major;
+
+ if (msgbuf->lm_bufcount != 2) {
+ CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
+ return GSS_S_FAILURE;
+ }
+
+ /* allocate a temporary clear text buffer, same sized as token,
+ * we assume the final clear text size <= token size */
+ clear_buflen = lustre_msg_buflen(msgbuf, 1);
+ OBD_ALLOC_LARGE(clear_buf, clear_buflen);
+ if (!clear_buf)
+ return GSS_S_FAILURE;
+
+ /* buffer objects */
+ hdrobj.len = lustre_msg_buflen(msgbuf, 0);
+ hdrobj.data = lustre_msg_buf(msgbuf, 0, 0);
+ token.len = lustre_msg_buflen(msgbuf, 1);
+ token.data = lustre_msg_buf(msgbuf, 1, 0);
+ clear_obj.len = clear_buflen;
+ clear_obj.data = clear_buf;
+
+ major = lgss_unwrap(mechctx, &hdrobj, &token, &clear_obj);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("unwrap message error: %08x\n", major);
+ GOTO(out_free, major = GSS_S_FAILURE);
+ }
+ LASSERT(clear_obj.len <= clear_buflen);
+ LASSERT(clear_obj.len <= msgbuf_len);
+
+ /* now the decrypted message */
+ memcpy(msgbuf, clear_obj.data, clear_obj.len);
+ *msg_len = clear_obj.len;
+
+ major = GSS_S_COMPLETE;
+out_free:
+ OBD_FREE_LARGE(clear_buf, clear_buflen);
+ return major;
+}
+
+/********************************************
+ * gss client context manipulation helpers *
+ ********************************************/
+
+int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
+{
+ LASSERT(atomic_read(&ctx->cc_refcount));
+
+ if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+ if (!ctx->cc_early_expire)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+
+ CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ ctx->cc_expire,
+ ctx->cc_expire == 0 ? 0 :
+ cfs_time_sub(ctx->cc_expire, cfs_time_current_sec()));
+
+ sptlrpc_cli_ctx_wakeup(ctx);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * return 1 if the context is dead.
+ */
+int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx)
+{
+ if (unlikely(cli_ctx_is_dead(ctx)))
+ return 1;
+
+ /* expire is 0 means never expire. a newly created gss context
+ * which during upcall may has 0 expiration */
+ if (ctx->cc_expire == 0)
+ return 0;
+
+ /* check real expiration */
+ if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
+ return 0;
+
+ cli_ctx_expire(ctx);
+ return 1;
+}
+
+void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
+{
+ struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
+ unsigned long ctx_expiry;
+
+ if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
+ CERROR("ctx %p(%u): unable to inquire, expire it now\n",
+ gctx, ctx->cc_vcred.vc_uid);
+ ctx_expiry = 1; /* make it expired now */
+ }
+
+ ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
+ ctx->cc_sec->ps_flvr.sf_flags);
+
+ /* At this point this ctx might have been marked as dead by
+ * someone else, in which case nobody will make further use
+ * of it. we don't care, and mark it UPTODATE will help
+ * destroying server side context when it be destroied. */
+ set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+
+ if (sec_is_reverse(ctx->cc_sec)) {
+ CWARN("server installed reverse ctx %p idx "LPX64", "
+ "expiry %lu(%+lds)\n", ctx,
+ gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
+ } else {
+ CWARN("client refreshed ctx %p idx "LPX64" (%u->%s), "
+ "expiry %lu(%+lds)\n", ctx,
+ gss_handle_to_u64(&gctx->gc_handle),
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec());
+
+ /* install reverse svc ctx for root context */
+ if (ctx->cc_vcred.vc_uid == 0)
+ gss_sec_install_rctx(ctx->cc_sec->ps_import,
+ ctx->cc_sec, ctx);
+ }
+
+ sptlrpc_cli_ctx_wakeup(ctx);
+}
+
+static void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
+{
+ LASSERT(gctx->gc_base.cc_sec);
+
+ if (gctx->gc_mechctx) {
+ lgss_delete_sec_context(&gctx->gc_mechctx);
+ gctx->gc_mechctx = NULL;
+ }
+
+ if (!rawobj_empty(&gctx->gc_svc_handle)) {
+ /* forward ctx: mark buddy reverse svcctx soon-expire. */
+ if (!sec_is_reverse(gctx->gc_base.cc_sec) &&
+ !rawobj_empty(&gctx->gc_svc_handle))
+ gss_svc_upcall_expire_rvs_ctx(&gctx->gc_svc_handle);
+
+ rawobj_free(&gctx->gc_svc_handle);
+ }
+
+ rawobj_free(&gctx->gc_handle);
+}
+
+/*
+ * Based on sequence number algorithm as specified in RFC 2203.
+ *
+ * modified for our own problem: arriving request has valid sequence number,
+ * but unwrapping request might cost a long time, after that its sequence
+ * are not valid anymore (fall behind the window). It rarely happen, mostly
+ * under extreme load.
+ *
+ * note we should not check sequence before verify the integrity of incoming
+ * request, because just one attacking request with high sequence number might
+ * cause all following request be dropped.
+ *
+ * so here we use a multi-phase approach: prepare 2 sequence windows,
+ * "main window" for normal sequence and "back window" for fall behind sequence.
+ * and 3-phase checking mechanism:
+ * 0 - before integrity verification, perform a initial sequence checking in
+ * main window, which only try and don't actually set any bits. if the
+ * sequence is high above the window or fit in the window and the bit
+ * is 0, then accept and proceed to integrity verification. otherwise
+ * reject this sequence.
+ * 1 - after integrity verification, check in main window again. if this
+ * sequence is high above the window or fit in the window and the bit
+ * is 0, then set the bit and accept; if it fit in the window but bit
+ * already set, then reject; if it fall behind the window, then proceed
+ * to phase 2.
+ * 2 - check in back window. if it is high above the window or fit in the
+ * window and the bit is 0, then set the bit and accept. otherwise reject.
+ *
+ * return value:
+ * 1: looks like a replay
+ * 0: is ok
+ * -1: is a replay
+ *
+ * note phase 0 is necessary, because otherwise replay attacking request of
+ * sequence which between the 2 windows can't be detected.
+ *
+ * this mechanism can't totally solve the problem, but could help much less
+ * number of valid requests be dropped.
+ */
+static
+int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
+ __u32 seq_num, int phase)
+{
+ LASSERT(phase >= 0 && phase <= 2);
+
+ if (seq_num > *max_seq) {
+ /*
+ * 1. high above the window
+ */
+ if (phase == 0)
+ return 0;
+
+ if (seq_num >= *max_seq + win_size) {
+ memset(window, 0, win_size / 8);
+ *max_seq = seq_num;
+ } else {
+ while(*max_seq < seq_num) {
+ (*max_seq)++;
+ __clear_bit((*max_seq) % win_size, window);
+ }
+ }
+ __set_bit(seq_num % win_size, window);
+ } else if (seq_num + win_size <= *max_seq) {
+ /*
+ * 2. low behind the window
+ */
+ if (phase == 0 || phase == 2)
+ goto replay;
+
+ CWARN("seq %u is %u behind (size %d), check backup window\n",
+ seq_num, *max_seq - win_size - seq_num, win_size);
+ return 1;
+ } else {
+ /*
+ * 3. fit into the window
+ */
+ switch (phase) {
+ case 0:
+ if (test_bit(seq_num % win_size, window))
+ goto replay;
+ break;
+ case 1:
+ case 2:
+ if (__test_and_set_bit(seq_num % win_size, window))
+ goto replay;
+ break;
+ }
+ }
+
+ return 0;
+
+replay:
+ CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
+ seq_num,
+ seq_num + win_size > *max_seq ? "in" : "behind",
+ phase == 2 ? "backup " : "main",
+ *max_seq, win_size);
+ return -1;
+}
+
+/*
+ * Based on sequence number algorithm as specified in RFC 2203.
+ *
+ * if @set == 0: initial check, don't set any bit in window
+ * if @sec == 1: final check, set bit in window
+ */
+int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
+{
+ int rc = 0;
+
+ spin_lock(&ssd->ssd_lock);
+
+ if (set == 0) {
+ /*
+ * phase 0 testing
+ */
+ rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
+ &ssd->ssd_max_main, seq_num, 0);
+ if (unlikely(rc))
+ gss_stat_oos_record_svc(0, 1);
+ } else {
+ /*
+ * phase 1 checking main window
+ */
+ rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
+ &ssd->ssd_max_main, seq_num, 1);
+ switch (rc) {
+ case -1:
+ gss_stat_oos_record_svc(1, 1);
+ /* fall through */
+ case 0:
+ goto exit;
+ }
+ /*
+ * phase 2 checking back window
+ */
+ rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
+ &ssd->ssd_max_back, seq_num, 2);
+ if (rc)
+ gss_stat_oos_record_svc(2, 1);
+ else
+ gss_stat_oos_record_svc(2, 0);
+ }
+exit:
+ spin_unlock(&ssd->ssd_lock);
+ return rc;
+}
+
+/***************************************
+ * cred APIs *
+ ***************************************/
+
+static inline int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
+ int msgsize, int privacy)
+{
+ return gss_mech_payload(NULL, msgsize, privacy);
+}
+
+static int gss_cli_bulk_payload(struct ptlrpc_cli_ctx *ctx,
+ struct sptlrpc_flavor *flvr,
+ int reply, int read)
+{
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
+
+ LASSERT(SPTLRPC_FLVR_BULK_TYPE(flvr->sf_rpc) == SPTLRPC_BULK_DEFAULT);
+
+ if ((!reply && !read) || (reply && read)) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_cli_payload(ctx, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_cli_payload(ctx, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
+
+ return payload;
+}
+
+int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
+{
+ return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
+}
+
+void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
+{
+ buf[0] = '\0';
+
+ if (flags & PTLRPC_CTX_NEW)
+ strncat(buf, "new,", bufsize);
+ if (flags & PTLRPC_CTX_UPTODATE)
+ strncat(buf, "uptodate,", bufsize);
+ if (flags & PTLRPC_CTX_DEAD)
+ strncat(buf, "dead,", bufsize);
+ if (flags & PTLRPC_CTX_ERROR)
+ strncat(buf, "error,", bufsize);
+ if (flags & PTLRPC_CTX_CACHED)
+ strncat(buf, "cached,", bufsize);
+ if (flags & PTLRPC_CTX_ETERNAL)
+ strncat(buf, "eternal,", bufsize);
+ if (buf[0] == '\0')
+ strncat(buf, "-,", bufsize);
+
+ buf[strlen(buf) - 1] = '\0';
+}
+
+int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req)
+{
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+ __u32 flags = 0, seq, svc;
+ int rc;
+
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
+ LASSERT(req->rq_cli_ctx == ctx);
+
+ /* nothing to do for context negotiation RPCs */
+ if (req->rq_ctx_init)
+ return 0;
+
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ if (req->rq_pack_bulk)
+ flags |= LUSTRE_GSS_PACK_BULK;
+ if (req->rq_pack_udesc)
+ flags |= LUSTRE_GSS_PACK_USER;
+
+redo:
+ seq = atomic_inc_return(&gctx->gc_seq);
+
+ rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
+ ctx->cc_sec->ps_part,
+ flags, gctx->gc_proc, seq, svc,
+ &gctx->gc_handle);
+ if (rc < 0)
+ return rc;
+
+ /* gss_sign_msg() msg might take long time to finish, in which period
+ * more rpcs could be wrapped up and sent out. if we found too many
+ * of them we should repack this rpc, because sent it too late might
+ * lead to the sequence number fall behind the window on server and
+ * be dropped. also applies to gss_cli_ctx_seal().
+ *
+ * Note: null mode dosen't check sequence number. */
+ if (svc != SPTLRPC_SVC_NULL &&
+ atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
+ int behind = atomic_read(&gctx->gc_seq) - seq;
+
+ gss_stat_oos_record_cli(behind);
+ CWARN("req %p: %u behind, retry signing\n", req, behind);
+ goto redo;
+ }
+
+ req->rq_reqdata_len = rc;
+ return 0;
+}
+
+static
+int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct gss_header *ghdr)
+{
+ struct gss_err_header *errhdr;
+ int rc;
+
+ LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
+
+ errhdr = (struct gss_err_header *) ghdr;
+
+ CWARN("req x"LPU64"/t"LPU64", ctx %p idx "LPX64"(%u->%s): "
+ "%sserver respond (%08x/%08x)\n",
+ req->rq_xid, req->rq_transno, ctx,
+ gss_handle_to_u64(&ctx2gctx(ctx)->gc_handle),
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ sec_is_reverse(ctx->cc_sec) ? "reverse" : "",
+ errhdr->gh_major, errhdr->gh_minor);
+
+ /* context fini rpc, let it failed */
+ if (req->rq_ctx_fini) {
+ CWARN("context fini rpc failed\n");
+ return -EINVAL;
+ }
+
+ /* reverse sec, just return error, don't expire this ctx because it's
+ * crucial to callback rpcs. note if the callback rpc failed because
+ * of bit flip during network transfer, the client will be evicted
+ * directly. so more gracefully we probably want let it retry for
+ * number of times. */
+ if (sec_is_reverse(ctx->cc_sec))
+ return -EINVAL;
+
+ if (errhdr->gh_major != GSS_S_NO_CONTEXT &&
+ errhdr->gh_major != GSS_S_BAD_SIG)
+ return -EACCES;
+
+ /* server return NO_CONTEXT might be caused by context expire
+ * or server reboot/failover. we try to refresh a new ctx which
+ * be transparent to upper layer.
+ *
+ * In some cases, our gss handle is possible to be incidentally
+ * identical to another handle since the handle itself is not
+ * fully random. In krb5 case, the GSS_S_BAD_SIG will be
+ * returned, maybe other gss error for other mechanism.
+ *
+ * if we add new mechanism, make sure the correct error are
+ * returned in this case. */
+ CWARN("%s: server might lost the context, retrying\n",
+ errhdr->gh_major == GSS_S_NO_CONTEXT ? "NO_CONTEXT" : "BAD_SIG");
+
+ sptlrpc_cli_ctx_expire(ctx);
+
+ /* we need replace the ctx right here, otherwise during
+ * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
+ * which keep the ctx with RESEND flag, thus we'll never
+ * get rid of this ctx. */
+ rc = sptlrpc_req_replace_dead_ctx(req);
+ if (rc == 0)
+ req->rq_resend = 1;
+
+ return rc;
+}
+
+int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req)
+{
+ struct gss_cli_ctx *gctx;
+ struct gss_header *ghdr, *reqhdr;
+ struct lustre_msg *msg = req->rq_repdata;
+ __u32 major;
+ int pack_bulk, swabbed, rc = 0;
+
+ LASSERT(req->rq_cli_ctx == ctx);
+ LASSERT(msg);
+
+ gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+
+ /* special case for context negotiation, rq_repmsg/rq_replen actually
+ * are not used currently. but early reply always be treated normally */
+ if (req->rq_ctx_init && !req->rq_early) {
+ req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
+ req->rq_replen = msg->lm_buflens[1];
+ return 0;
+ }
+
+ if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
+ CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
+ return -EPROTO;
+ }
+
+ swabbed = ptlrpc_rep_need_swab(req);
+
+ ghdr = gss_swab_header(msg, 0, swabbed);
+ if (ghdr == NULL) {
+ CERROR("can't decode gss header\n");
+ return -EPROTO;
+ }
+
+ /* sanity checks */
+ reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
+ LASSERT(reqhdr);
+
+ if (ghdr->gh_version != reqhdr->gh_version) {
+ CERROR("gss version %u mismatch, expect %u\n",
+ ghdr->gh_version, reqhdr->gh_version);
+ return -EPROTO;
+ }
+
+ switch (ghdr->gh_proc) {
+ case PTLRPC_GSS_PROC_DATA:
+ pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
+
+ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
+ CERROR("%s bulk flag in reply\n",
+ req->rq_pack_bulk ? "missing" : "unexpected");
+ return -EPROTO;
+ }
+
+ if (ghdr->gh_seq != reqhdr->gh_seq) {
+ CERROR("seqnum %u mismatch, expect %u\n",
+ ghdr->gh_seq, reqhdr->gh_seq);
+ return -EPROTO;
+ }
+
+ if (ghdr->gh_svc != reqhdr->gh_svc) {
+ CERROR("svc %u mismatch, expect %u\n",
+ ghdr->gh_svc, reqhdr->gh_svc);
+ return -EPROTO;
+ }
+
+ if (swabbed)
+ gss_header_swabber(ghdr);
+
+ major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("failed to verify reply: %x\n", major);
+ return -EPERM;
+ }
+
+ if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
+ __u32 cksum;
+
+ cksum = crc32_le(!(__u32) 0,
+ lustre_msg_buf(msg, 1, 0),
+ lustre_msg_buflen(msg, 1));
+ if (cksum != msg->lm_cksum) {
+ CWARN("early reply checksum mismatch: "
+ "%08x != %08x\n", cksum, msg->lm_cksum);
+ return -EPROTO;
+ }
+ }
+
+ if (pack_bulk) {
+ /* bulk checksum is right after the lustre msg */
+ if (msg->lm_bufcount < 3) {
+ CERROR("Invalid reply bufcount %u\n",
+ msg->lm_bufcount);
+ return -EPROTO;
+ }
+
+ rc = bulk_sec_desc_unpack(msg, 2, swabbed);
+ if (rc) {
+ CERROR("unpack bulk desc: %d\n", rc);
+ return rc;
+ }
+ }
+
+ req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
+ req->rq_replen = msg->lm_buflens[1];
+ break;
+ case PTLRPC_GSS_PROC_ERR:
+ if (req->rq_early) {
+ CERROR("server return error with early reply\n");
+ rc = -EPROTO;
+ } else {
+ rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ }
+ break;
+ default:
+ CERROR("unknown gss proc %d\n", ghdr->gh_proc);
+ rc = -EPROTO;
+ }
+
+ return rc;
+}
+
+int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req)
+{
+ struct gss_cli_ctx *gctx;
+ rawobj_t hdrobj, msgobj, token;
+ struct gss_header *ghdr;
+ __u32 buflens[2], major;
+ int wiresize, rc;
+
+ LASSERT(req->rq_clrbuf);
+ LASSERT(req->rq_cli_ctx == ctx);
+ LASSERT(req->rq_reqlen);
+
+ gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+
+ /* final clear data length */
+ req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
+ req->rq_clrbuf->lm_buflens);
+
+ /* calculate wire data length */
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
+ wiresize = lustre_msg_size_v2(2, buflens);
+
+ /* allocate wire buffer */
+ if (req->rq_pool) {
+ /* pre-allocated */
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf != req->rq_clrbuf);
+ LASSERT(req->rq_reqbuf_len >= wiresize);
+ } else {
+ OBD_ALLOC_LARGE(req->rq_reqbuf, wiresize);
+ if (!req->rq_reqbuf)
+ return -ENOMEM;
+ req->rq_reqbuf_len = wiresize;
+ }
+
+ lustre_init_msg_v2(req->rq_reqbuf, 2, buflens, NULL);
+ req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
+
+ /* gss header */
+ ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
+ ghdr->gh_version = PTLRPC_GSS_VERSION;
+ ghdr->gh_sp = (__u8) ctx->cc_sec->ps_part;
+ ghdr->gh_flags = 0;
+ ghdr->gh_proc = gctx->gc_proc;
+ ghdr->gh_svc = SPTLRPC_SVC_PRIV;
+ ghdr->gh_handle.len = gctx->gc_handle.len;
+ memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
+ if (req->rq_pack_bulk)
+ ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
+ if (req->rq_pack_udesc)
+ ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
+
+redo:
+ ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+
+ /* buffer objects */
+ hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
+ hdrobj.data = (__u8 *) ghdr;
+ msgobj.len = req->rq_clrdata_len;
+ msgobj.data = (__u8 *) req->rq_clrbuf;
+ token.len = lustre_msg_buflen(req->rq_reqbuf, 1);
+ token.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
+
+ major = lgss_wrap(gctx->gc_mechctx, &hdrobj, &msgobj,
+ req->rq_clrbuf_len, &token);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("priv: wrap message error: %08x\n", major);
+ GOTO(err_free, rc = -EPERM);
+ }
+ LASSERT(token.len <= buflens[1]);
+
+ /* see explain in gss_cli_ctx_sign() */
+ if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
+ GSS_SEQ_REPACK_THRESHOLD)) {
+ int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
+
+ gss_stat_oos_record_cli(behind);
+ CWARN("req %p: %u behind, retry sealing\n", req, behind);
+
+ ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
+ goto redo;
+ }
+
+ /* now set the final wire data length */
+ req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
+ return 0;
+
+err_free:
+ if (!req->rq_pool) {
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = NULL;
+ req->rq_reqbuf_len = 0;
+ }
+ return rc;
+}
+
+int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req)
+{
+ struct gss_cli_ctx *gctx;
+ struct gss_header *ghdr;
+ struct lustre_msg *msg = req->rq_repdata;
+ int msglen, pack_bulk, swabbed, rc;
+ __u32 major;
+
+ LASSERT(req->rq_cli_ctx == ctx);
+ LASSERT(req->rq_ctx_init == 0);
+ LASSERT(msg);
+
+ gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+ swabbed = ptlrpc_rep_need_swab(req);
+
+ ghdr = gss_swab_header(msg, 0, swabbed);
+ if (ghdr == NULL) {
+ CERROR("can't decode gss header\n");
+ return -EPROTO;
+ }
+
+ /* sanity checks */
+ if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
+ CERROR("gss version %u mismatch, expect %u\n",
+ ghdr->gh_version, PTLRPC_GSS_VERSION);
+ return -EPROTO;
+ }
+
+ switch (ghdr->gh_proc) {
+ case PTLRPC_GSS_PROC_DATA:
+ pack_bulk = ghdr->gh_flags & LUSTRE_GSS_PACK_BULK;
+
+ if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
+ CERROR("%s bulk flag in reply\n",
+ req->rq_pack_bulk ? "missing" : "unexpected");
+ return -EPROTO;
+ }
+
+ if (swabbed)
+ gss_header_swabber(ghdr);
+
+ /* use rq_repdata_len as buffer size, which assume unseal
+ * doesn't need extra memory space. for precise control, we'd
+ * better calculate out actual buffer size as
+ * (repbuf_len - offset - repdata_len) */
+ major = gss_unseal_msg(gctx->gc_mechctx, msg,
+ &msglen, req->rq_repdata_len);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap reply: %x\n", major);
+ rc = -EPERM;
+ break;
+ }
+
+ swabbed = __lustre_unpack_msg(msg, msglen);
+ if (swabbed < 0) {
+ CERROR("Failed to unpack after decryption\n");
+ return -EPROTO;
+ }
+
+ if (msg->lm_bufcount < 1) {
+ CERROR("Invalid reply buffer: empty\n");
+ return -EPROTO;
+ }
+
+ if (pack_bulk) {
+ if (msg->lm_bufcount < 2) {
+ CERROR("bufcount %u: missing bulk sec desc\n",
+ msg->lm_bufcount);
+ return -EPROTO;
+ }
+
+ /* bulk checksum is the last segment */
+ if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1,
+ swabbed))
+ return -EPROTO;
+ }
+
+ req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
+ req->rq_replen = msg->lm_buflens[0];
+
+ rc = 0;
+ break;
+ case PTLRPC_GSS_PROC_ERR:
+ if (req->rq_early) {
+ CERROR("server return error with early reply\n");
+ rc = -EPROTO;
+ } else {
+ rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
+ }
+ break;
+ default:
+ CERROR("unexpected proc %d\n", ghdr->gh_proc);
+ rc = -EPERM;
+ }
+
+ return rc;
+}
+
+/*********************************************
+ * reverse context installation *
+ *********************************************/
+
+static inline
+int gss_install_rvs_svc_ctx(struct obd_import *imp,
+ struct gss_sec *gsec,
+ struct gss_cli_ctx *gctx)
+{
+ return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
+}
+
+/*********************************************
+ * GSS security APIs *
+ *********************************************/
+int gss_sec_create_common(struct gss_sec *gsec,
+ struct ptlrpc_sec_policy *policy,
+ struct obd_import *imp,
+ struct ptlrpc_svc_ctx *svcctx,
+ struct sptlrpc_flavor *sf)
+{
+ struct ptlrpc_sec *sec;
+
+ LASSERT(imp);
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
+
+ gsec->gs_mech = lgss_subflavor_to_mech(
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
+ if (!gsec->gs_mech) {
+ CERROR("gss backend 0x%x not found\n",
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_init(&gsec->gs_lock);
+ gsec->gs_rvs_hdl = 0ULL;
+
+ /* initialize upper ptlrpc_sec */
+ sec = &gsec->gs_base;
+ sec->ps_policy = policy;
+ atomic_set(&sec->ps_refcount, 0);
+ atomic_set(&sec->ps_nctx, 0);
+ sec->ps_id = sptlrpc_get_next_secid();
+ sec->ps_flvr = *sf;
+ sec->ps_import = class_import_get(imp);
+ spin_lock_init(&sec->ps_lock);
+ INIT_LIST_HEAD(&sec->ps_gc_list);
+
+ if (!svcctx) {
+ sec->ps_gc_interval = GSS_GC_INTERVAL;
+ } else {
+ LASSERT(sec_is_reverse(sec));
+
+ /* never do gc on reverse sec */
+ sec->ps_gc_interval = 0;
+ }
+
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
+ sptlrpc_enc_pool_add_user();
+
+ CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
+ policy->sp_name, gsec);
+ return 0;
+}
+
+void gss_sec_destroy_common(struct gss_sec *gsec)
+{
+ struct ptlrpc_sec *sec = &gsec->gs_base;
+
+ LASSERT(sec->ps_import);
+ LASSERT(atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(atomic_read(&sec->ps_nctx) == 0);
+
+ if (gsec->gs_mech) {
+ lgss_mech_put(gsec->gs_mech);
+ gsec->gs_mech = NULL;
+ }
+
+ class_import_put(sec->ps_import);
+
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
+ sptlrpc_enc_pool_del_user();
+}
+
+void gss_sec_kill(struct ptlrpc_sec *sec)
+{
+ sec->ps_dying = 1;
+}
+
+int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_ctx_ops *ctxops,
+ struct vfs_cred *vcred)
+{
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+
+ gctx->gc_win = 0;
+ atomic_set(&gctx->gc_seq, 0);
+
+ INIT_HLIST_NODE(&ctx->cc_cache);
+ atomic_set(&ctx->cc_refcount, 0);
+ ctx->cc_sec = sec;
+ ctx->cc_ops = ctxops;
+ ctx->cc_expire = 0;
+ ctx->cc_flags = PTLRPC_CTX_NEW;
+ ctx->cc_vcred = *vcred;
+ spin_lock_init(&ctx->cc_lock);
+ INIT_LIST_HEAD(&ctx->cc_req_list);
+ INIT_LIST_HEAD(&ctx->cc_gc_chain);
+
+ /* take a ref on belonging sec, balanced in ctx destroying */
+ atomic_inc(&sec->ps_refcount);
+ /* statistic only */
+ atomic_inc(&sec->ps_nctx);
+
+ CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ return 0;
+}
+
+/*
+ * return value:
+ * 1: the context has been taken care of by someone else
+ * 0: proceed to really destroy the context locally
+ */
+int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx)
+{
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
+
+ LASSERT(atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(ctx->cc_sec == sec);
+
+ /*
+ * remove UPTODATE flag of reverse ctx thus we won't send fini rpc,
+ * this is to avoid potential problems of client side reverse svc ctx
+ * be mis-destroyed in various recovery senarios. anyway client can
+ * manage its reverse ctx well by associating it with its buddy ctx.
+ */
+ if (sec_is_reverse(sec))
+ ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE;
+
+ if (gctx->gc_mechctx) {
+ /* the final context fini rpc will use this ctx too, and it's
+ * asynchronous which finished by request_out_callback(). so
+ * we add refcount, whoever drop finally drop the refcount to
+ * 0 should responsible for the rest of destroy. */
+ atomic_inc(&ctx->cc_refcount);
+
+ gss_do_ctx_fini_rpc(gctx);
+ gss_cli_ctx_finalize(gctx);
+
+ if (!atomic_dec_and_test(&ctx->cc_refcount))
+ return 1;
+ }
+
+ if (sec_is_reverse(sec))
+ CWARN("reverse sec %p: destroy ctx %p\n",
+ ctx->cc_sec, ctx);
+ else
+ CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
+ sec->ps_policy->sp_name, ctx->cc_sec,
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+
+ return 0;
+}
+
+static
+int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int svc, int msgsize)
+{
+ int bufsize, txtsize;
+ int bufcnt = 2;
+ __u32 buflens[5];
+
+ /*
+ * on-wire data layout:
+ * - gss header
+ * - lustre message
+ * - user descriptor (optional)
+ * - bulk sec descriptor (optional)
+ * - signature (optional)
+ * - svc == NULL: NULL
+ * - svc == AUTH: signature of gss header
+ * - svc == INTG: signature of all above
+ *
+ * if this is context negotiation, reserver fixed space
+ * at the last (signature) segment regardless of svc mode.
+ */
+
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ txtsize = buflens[0];
+
+ buflens[1] = msgsize;
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[1];
+
+ if (req->rq_pack_udesc) {
+ buflens[bufcnt] = sptlrpc_current_user_desc_size();
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
+ bufcnt++;
+ }
+
+ if (req->rq_pack_bulk) {
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 0, req->rq_bulk_read);
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
+ bufcnt++;
+ }
+
+ if (req->rq_ctx_init)
+ buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
+ else if (svc != SPTLRPC_SVC_NULL)
+ buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
+
+ bufsize = lustre_msg_size_v2(bufcnt, buflens);
+
+ if (!req->rq_reqbuf) {
+ bufsize = size_roundup_power2(bufsize);
+
+ OBD_ALLOC_LARGE(req->rq_reqbuf, bufsize);
+ if (!req->rq_reqbuf)
+ return -ENOMEM;
+
+ req->rq_reqbuf_len = bufsize;
+ } else {
+ LASSERT(req->rq_pool);
+ LASSERT(req->rq_reqbuf_len >= bufsize);
+ memset(req->rq_reqbuf, 0, bufsize);
+ }
+
+ lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
+ req->rq_reqbuf->lm_secflvr = req->rq_flvr.sf_rpc;
+
+ req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
+ LASSERT(req->rq_reqmsg);
+
+ /* pack user desc here, later we might leave current user's process */
+ if (req->rq_pack_udesc)
+ sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
+
+ return 0;
+}
+
+static
+int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ __u32 ibuflens[3], wbuflens[2];
+ int ibufcnt;
+ int clearsize, wiresize;
+
+ LASSERT(req->rq_clrbuf == NULL);
+ LASSERT(req->rq_clrbuf_len == 0);
+
+ /* Inner (clear) buffers
+ * - lustre message
+ * - user descriptor (optional)
+ * - bulk checksum (optional)
+ */
+ ibufcnt = 1;
+ ibuflens[0] = msgsize;
+
+ if (req->rq_pack_udesc)
+ ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
+ if (req->rq_pack_bulk)
+ ibuflens[ibufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr, 0,
+ req->rq_bulk_read);
+
+ clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
+ /* to allow append padding during encryption */
+ clearsize += GSS_MAX_CIPHER_BLOCK;
+
+ /* Wrapper (wire) buffers
+ * - gss header
+ * - cipher text
+ */
+ wbuflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ wbuflens[1] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
+ wiresize = lustre_msg_size_v2(2, wbuflens);
+
+ if (req->rq_pool) {
+ /* rq_reqbuf is preallocated */
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf_len >= wiresize);
+
+ memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
+
+ /* if the pre-allocated buffer is big enough, we just pack
+ * both clear buf & request buf in it, to avoid more alloc. */
+ if (clearsize + wiresize <= req->rq_reqbuf_len) {
+ req->rq_clrbuf =
+ (void *) (((char *) req->rq_reqbuf) + wiresize);
+ } else {
+ CWARN("pre-allocated buf size %d is not enough for "
+ "both clear (%d) and cipher (%d) text, proceed "
+ "with extra allocation\n", req->rq_reqbuf_len,
+ clearsize, wiresize);
+ }
+ }
+
+ if (!req->rq_clrbuf) {
+ clearsize = size_roundup_power2(clearsize);
+
+ OBD_ALLOC_LARGE(req->rq_clrbuf, clearsize);
+ if (!req->rq_clrbuf)
+ return -ENOMEM;
+ }
+ req->rq_clrbuf_len = clearsize;
+
+ lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
+ req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
+
+ if (req->rq_pack_udesc)
+ sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
+
+ return 0;
+}
+
+/*
+ * NOTE: any change of request buffer allocation should also consider
+ * changing enlarge_reqbuf() series functions.
+ */
+int gss_alloc_reqbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+
+ LASSERT(!req->rq_pack_bulk ||
+ (req->rq_bulk_read || req->rq_bulk_write));
+
+ switch (svc) {
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ return gss_alloc_reqbuf_intg(sec, req, svc, msgsize);
+ case SPTLRPC_SVC_PRIV:
+ return gss_alloc_reqbuf_priv(sec, req, msgsize);
+ default:
+ LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
+ return 0;
+ }
+}
+
+void gss_free_reqbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req)
+{
+ int privacy;
+
+ LASSERT(!req->rq_pool || req->rq_reqbuf);
+ privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
+
+ if (!req->rq_clrbuf)
+ goto release_reqbuf;
+
+ /* release clear buffer */
+ LASSERT(privacy);
+ LASSERT(req->rq_clrbuf_len);
+
+ if (req->rq_pool == NULL ||
+ req->rq_clrbuf < req->rq_reqbuf ||
+ (char *) req->rq_clrbuf >=
+ (char *) req->rq_reqbuf + req->rq_reqbuf_len)
+ OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
+
+ req->rq_clrbuf = NULL;
+ req->rq_clrbuf_len = 0;
+
+release_reqbuf:
+ if (!req->rq_pool && req->rq_reqbuf) {
+ LASSERT(req->rq_reqbuf_len);
+
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = NULL;
+ req->rq_reqbuf_len = 0;
+ }
+}
+
+static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
+{
+ bufsize = size_roundup_power2(bufsize);
+
+ OBD_ALLOC_LARGE(req->rq_repbuf, bufsize);
+ if (!req->rq_repbuf)
+ return -ENOMEM;
+
+ req->rq_repbuf_len = bufsize;
+ return 0;
+}
+
+static
+int gss_alloc_repbuf_intg(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int svc, int msgsize)
+{
+ int txtsize;
+ __u32 buflens[4];
+ int bufcnt = 2;
+ int alloc_size;
+
+ /*
+ * on-wire data layout:
+ * - gss header
+ * - lustre message
+ * - bulk sec descriptor (optional)
+ * - signature (optional)
+ * - svc == NULL: NULL
+ * - svc == AUTH: signature of gss header
+ * - svc == INTG: signature of all above
+ *
+ * if this is context negotiation, reserver fixed space
+ * at the last (signature) segment regardless of svc mode.
+ */
+
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ txtsize = buflens[0];
+
+ buflens[1] = msgsize;
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[1];
+
+ if (req->rq_pack_bulk) {
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
+ bufcnt++;
+ }
+
+ if (req->rq_ctx_init)
+ buflens[bufcnt++] = GSS_CTX_INIT_MAX_LEN;
+ else if (svc != SPTLRPC_SVC_NULL)
+ buflens[bufcnt++] = gss_cli_payload(req->rq_cli_ctx, txtsize,0);
+
+ alloc_size = lustre_msg_size_v2(bufcnt, buflens);
+
+ /* add space for early reply */
+ alloc_size += gss_at_reply_off_integ;
+
+ return do_alloc_repbuf(req, alloc_size);
+}
+
+static
+int gss_alloc_repbuf_priv(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ int txtsize;
+ __u32 buflens[2];
+ int bufcnt;
+ int alloc_size;
+
+ /* inner buffers */
+ bufcnt = 1;
+ buflens[0] = msgsize;
+
+ if (req->rq_pack_bulk)
+ buflens[bufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
+ txtsize = lustre_msg_size_v2(bufcnt, buflens);
+ txtsize += GSS_MAX_CIPHER_BLOCK;
+
+ /* wrapper buffers */
+ bufcnt = 2;
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
+
+ alloc_size = lustre_msg_size_v2(bufcnt, buflens);
+ /* add space for early reply */
+ alloc_size += gss_at_reply_off_priv;
+
+ return do_alloc_repbuf(req, alloc_size);
+}
+
+int gss_alloc_repbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+
+ LASSERT(!req->rq_pack_bulk ||
+ (req->rq_bulk_read || req->rq_bulk_write));
+
+ switch (svc) {
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ return gss_alloc_repbuf_intg(sec, req, svc, msgsize);
+ case SPTLRPC_SVC_PRIV:
+ return gss_alloc_repbuf_priv(sec, req, msgsize);
+ default:
+ LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
+ return 0;
+ }
+}
+
+void gss_free_repbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req)
+{
+ OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
+ req->rq_repbuf = NULL;
+ req->rq_repbuf_len = 0;
+ req->rq_repdata = NULL;
+ req->rq_repdata_len = 0;
+}
+
+static int get_enlarged_msgsize(struct lustre_msg *msg,
+ int segment, int newsize)
+{
+ int save, newmsg_size;
+
+ LASSERT(newsize >= msg->lm_buflens[segment]);
+
+ save = msg->lm_buflens[segment];
+ msg->lm_buflens[segment] = newsize;
+ newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
+ msg->lm_buflens[segment] = save;
+
+ return newmsg_size;
+}
+
+static int get_enlarged_msgsize2(struct lustre_msg *msg,
+ int segment1, int newsize1,
+ int segment2, int newsize2)
+{
+ int save1, save2, newmsg_size;
+
+ LASSERT(newsize1 >= msg->lm_buflens[segment1]);
+ LASSERT(newsize2 >= msg->lm_buflens[segment2]);
+
+ save1 = msg->lm_buflens[segment1];
+ save2 = msg->lm_buflens[segment2];
+ msg->lm_buflens[segment1] = newsize1;
+ msg->lm_buflens[segment2] = newsize2;
+ newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
+ msg->lm_buflens[segment1] = save1;
+ msg->lm_buflens[segment2] = save2;
+
+ return newmsg_size;
+}
+
+static
+int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int svc,
+ int segment, int newsize)
+{
+ struct lustre_msg *newbuf;
+ int txtsize, sigsize = 0, i;
+ int newmsg_size, newbuf_size;
+
+ /*
+ * gss header is at seg 0;
+ * embedded msg is at seg 1;
+ * signature (if any) is at the last seg
+ */
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
+ LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
+ LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
+
+ /* 1. compute new embedded msg size */
+ newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
+ LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
+
+ /* 2. compute new wrapper msg size */
+ if (svc == SPTLRPC_SVC_NULL) {
+ /* no signature, get size directly */
+ newbuf_size = get_enlarged_msgsize(req->rq_reqbuf,
+ 1, newmsg_size);
+ } else {
+ txtsize = req->rq_reqbuf->lm_buflens[0];
+
+ if (svc == SPTLRPC_SVC_INTG) {
+ for (i = 1; i < req->rq_reqbuf->lm_bufcount; i++)
+ txtsize += req->rq_reqbuf->lm_buflens[i];
+ txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
+ }
+
+ sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
+ LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
+
+ newbuf_size = get_enlarged_msgsize2(
+ req->rq_reqbuf,
+ 1, newmsg_size,
+ msg_last_segidx(req->rq_reqbuf),
+ sigsize);
+ }
+
+ /* request from pool should always have enough buffer */
+ LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
+
+ if (req->rq_reqbuf_len < newbuf_size) {
+ newbuf_size = size_roundup_power2(newbuf_size);
+
+ OBD_ALLOC_LARGE(newbuf, newbuf_size);
+ if (newbuf == NULL)
+ return -ENOMEM;
+
+ memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
+
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = newbuf;
+ req->rq_reqbuf_len = newbuf_size;
+ req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
+ }
+
+ /* do enlargement, from wrapper to embedded, from end to begin */
+ if (svc != SPTLRPC_SVC_NULL)
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
+ msg_last_segidx(req->rq_reqbuf),
+ sigsize);
+
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
+
+ req->rq_reqlen = newmsg_size;
+ return 0;
+}
+
+static
+int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int segment, int newsize)
+{
+ struct lustre_msg *newclrbuf;
+ int newmsg_size, newclrbuf_size, newcipbuf_size;
+ __u32 buflens[3];
+
+ /*
+ * embedded msg is at seg 0 of clear buffer;
+ * cipher text is at seg 2 of cipher buffer;
+ */
+ LASSERT(req->rq_pool ||
+ (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
+ LASSERT(req->rq_reqbuf == NULL ||
+ (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
+ LASSERT(req->rq_clrbuf);
+ LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
+ LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
+
+ /* compute new embedded msg size */
+ newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
+
+ /* compute new clear buffer size */
+ newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
+ newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
+
+ /* compute new cipher buffer size */
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
+ buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
+ newcipbuf_size = lustre_msg_size_v2(3, buflens);
+
+ /* handle the case that we put both clear buf and cipher buf into
+ * pre-allocated single buffer. */
+ if (unlikely(req->rq_pool) &&
+ req->rq_clrbuf >= req->rq_reqbuf &&
+ (char *) req->rq_clrbuf <
+ (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
+ /* it couldn't be better we still fit into the
+ * pre-allocated buffer. */
+ if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
+ void *src, *dst;
+
+ /* move clear text backward. */
+ src = req->rq_clrbuf;
+ dst = (char *) req->rq_reqbuf + newcipbuf_size;
+
+ memmove(dst, src, req->rq_clrbuf_len);
+
+ req->rq_clrbuf = (struct lustre_msg *) dst;
+ req->rq_clrbuf_len = newclrbuf_size;
+ req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
+ } else {
+ /* sadly we have to split out the clear buffer */
+ LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
+ LASSERT(req->rq_clrbuf_len < newclrbuf_size);
+ }
+ }
+
+ if (req->rq_clrbuf_len < newclrbuf_size) {
+ newclrbuf_size = size_roundup_power2(newclrbuf_size);
+
+ OBD_ALLOC_LARGE(newclrbuf, newclrbuf_size);
+ if (newclrbuf == NULL)
+ return -ENOMEM;
+
+ memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
+
+ if (req->rq_reqbuf == NULL ||
+ req->rq_clrbuf < req->rq_reqbuf ||
+ (char *) req->rq_clrbuf >=
+ (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
+ OBD_FREE_LARGE(req->rq_clrbuf, req->rq_clrbuf_len);
+ }
+
+ req->rq_clrbuf = newclrbuf;
+ req->rq_clrbuf_len = newclrbuf_size;
+ req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
+ }
+
+ _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
+ req->rq_reqlen = newmsg_size;
+
+ return 0;
+}
+
+int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int segment, int newsize)
+{
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+
+ LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
+
+ switch (svc) {
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ return gss_enlarge_reqbuf_intg(sec, req, svc, segment, newsize);
+ case SPTLRPC_SVC_PRIV:
+ return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
+ default:
+ LASSERTF(0, "bad rpc flavor %x\n", req->rq_flvr.sf_rpc);
+ return 0;
+ }
+}
+
+int gss_sec_install_rctx(struct obd_import *imp,
+ struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx)
+{
+ struct gss_sec *gsec;
+ struct gss_cli_ctx *gctx;
+ int rc;
+
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+
+ rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
+ return rc;
+}
+
+/********************************************
+ * server side API *
+ ********************************************/
+
+static inline
+int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
+{
+ LASSERT(grctx);
+ return (grctx->src_init || grctx->src_init_continue ||
+ grctx->src_err_notify);
+}
+
+static
+void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
+{
+ if (grctx->src_ctx)
+ gss_svc_upcall_put_ctx(grctx->src_ctx);
+
+ sptlrpc_policy_put(grctx->src_base.sc_policy);
+ OBD_FREE_PTR(grctx);
+}
+
+static inline
+void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
+{
+ LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+ atomic_inc(&grctx->src_base.sc_refcount);
+}
+
+static inline
+void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
+{
+ LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
+
+ if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
+ gss_svc_reqctx_free(grctx);
+}
+
+static
+int gss_svc_sign(struct ptlrpc_request *req,
+ struct ptlrpc_reply_state *rs,
+ struct gss_svc_reqctx *grctx,
+ __u32 svc)
+{
+ __u32 flags = 0;
+ int rc;
+
+ LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
+
+ /* embedded lustre_msg might have been shrinked */
+ if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
+ lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
+
+ if (req->rq_pack_bulk)
+ flags |= LUSTRE_GSS_PACK_BULK;
+
+ rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
+ LUSTRE_SP_ANY, flags, PTLRPC_GSS_PROC_DATA,
+ grctx->src_wirectx.gw_seq, svc, NULL);
+ if (rc < 0)
+ return rc;
+
+ rs->rs_repdata_len = rc;
+
+ if (likely(req->rq_packed_final)) {
+ if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
+ req->rq_reply_off = gss_at_reply_off_integ;
+ else
+ req->rq_reply_off = 0;
+ } else {
+ if (svc == SPTLRPC_SVC_NULL)
+ rs->rs_repbuf->lm_cksum = crc32_le(!(__u32) 0,
+ lustre_msg_buf(rs->rs_repbuf, 1, 0),
+ lustre_msg_buflen(rs->rs_repbuf, 1));
+ req->rq_reply_off = 0;
+ }
+
+ return 0;
+}
+
+int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
+{
+ struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+ struct ptlrpc_reply_state *rs;
+ struct gss_err_header *ghdr;
+ int replen = sizeof(struct ptlrpc_body);
+ int rc;
+
+ //if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_SVCGSS_ERR_NOTIFY, OBD_FAIL_ONCE))
+ // return -EINVAL;
+
+ grctx->src_err_notify = 1;
+ grctx->src_reserve_len = 0;
+
+ rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
+ if (rc) {
+ CERROR("could not pack reply, err %d\n", rc);
+ return rc;
+ }
+
+ /* gss hdr */
+ rs = req->rq_reply_state;
+ LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
+ ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
+ ghdr->gh_version = PTLRPC_GSS_VERSION;
+ ghdr->gh_flags = 0;
+ ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
+ ghdr->gh_major = major;
+ ghdr->gh_minor = minor;
+ ghdr->gh_handle.len = 0; /* fake context handle */
+
+ rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
+ rs->rs_repbuf->lm_buflens);
+
+ CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
+ major, minor, libcfs_nid2str(req->rq_peer.nid));
+ return 0;
+}
+
+static
+int gss_svc_handle_init(struct ptlrpc_request *req,
+ struct gss_wire_ctx *gw)
+{
+ struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+ struct lustre_msg *reqbuf = req->rq_reqbuf;
+ struct obd_uuid *uuid;
+ struct obd_device *target;
+ rawobj_t uuid_obj, rvs_hdl, in_token;
+ __u32 lustre_svc;
+ __u32 *secdata, seclen;
+ int swabbed, rc;
+
+ CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
+ libcfs_nid2str(req->rq_peer.nid));
+
+ req->rq_ctx_init = 1;
+
+ if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
+ CERROR("unexpected bulk flag\n");
+ return SECSVC_DROP;
+ }
+
+ if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
+ CERROR("proc %u: invalid handle length %u\n",
+ gw->gw_proc, gw->gw_handle.len);
+ return SECSVC_DROP;
+ }
+
+ if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
+ CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
+ return SECSVC_DROP;
+ }
+
+ swabbed = ptlrpc_req_need_swab(req);
+
+ /* ctx initiate payload is in last segment */
+ secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
+ seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
+
+ if (seclen < 4 + 4) {
+ CERROR("sec size %d too small\n", seclen);
+ return SECSVC_DROP;
+ }
+
+ /* lustre svc type */
+ lustre_svc = le32_to_cpu(*secdata++);
+ seclen -= 4;
+
+ /* extract target uuid, note this code is somewhat fragile
+ * because touched internal structure of obd_uuid */
+ if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
+ CERROR("failed to extract target uuid\n");
+ return SECSVC_DROP;
+ }
+ uuid_obj.data[uuid_obj.len - 1] = '\0';
+
+ uuid = (struct obd_uuid *) uuid_obj.data;
+ target = class_uuid2obd(uuid);
+ if (!target || target->obd_stopping || !target->obd_set_up) {
+ CERROR("target '%s' is not available for context init (%s)\n",
+ uuid->uuid, target == NULL ? "no target" :
+ (target->obd_stopping ? "stopping" : "not set up"));
+ return SECSVC_DROP;
+ }
+
+ /* extract reverse handle */
+ if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
+ CERROR("failed extract reverse handle\n");
+ return SECSVC_DROP;
+ }
+
+ /* extract token */
+ if (rawobj_extract(&in_token, &secdata, &seclen)) {
+ CERROR("can't extract token\n");
+ return SECSVC_DROP;
+ }
+
+ rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
+ &rvs_hdl, &in_token);
+ if (rc != SECSVC_OK)
+ return rc;
+
+ if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
+ grctx->src_ctx->gsc_usr_root)
+ CWARN("create svc ctx %p: user from %s authenticated as %s\n",
+ grctx->src_ctx, libcfs_nid2str(req->rq_peer.nid),
+ grctx->src_ctx->gsc_usr_mds ? "mds" :
+ (grctx->src_ctx->gsc_usr_oss ? "oss" : "root"));
+ else
+ CWARN("create svc ctx %p: accept user %u from %s\n",
+ grctx->src_ctx, grctx->src_ctx->gsc_uid,
+ libcfs_nid2str(req->rq_peer.nid));
+
+ if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
+ if (reqbuf->lm_bufcount < 4) {
+ CERROR("missing user descriptor\n");
+ return SECSVC_DROP;
+ }
+ if (sptlrpc_unpack_user_desc(reqbuf, 2, swabbed)) {
+ CERROR("Mal-formed user descriptor\n");
+ return SECSVC_DROP;
+ }
+
+ req->rq_pack_udesc = 1;
+ req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
+ }
+
+ req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
+ req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
+
+ return rc;
+}
+
+/*
+ * last segment must be the gss signature.
+ */
+static
+int gss_svc_verify_request(struct ptlrpc_request *req,
+ struct gss_svc_reqctx *grctx,
+ struct gss_wire_ctx *gw,
+ __u32 *major)
+{
+ struct gss_svc_ctx *gctx = grctx->src_ctx;
+ struct lustre_msg *msg = req->rq_reqbuf;
+ int offset = 2;
+ int swabbed;
+
+ *major = GSS_S_COMPLETE;
+
+ if (msg->lm_bufcount < 2) {
+ CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
+ return -EINVAL;
+ }
+
+ if (gw->gw_svc == SPTLRPC_SVC_NULL)
+ goto verified;
+
+ if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
+ CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
+ *major = GSS_S_DUPLICATE_TOKEN;
+ return -EACCES;
+ }
+
+ *major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to verify request: %x\n", *major);
+ return -EACCES;
+ }
+
+ if (gctx->gsc_reverse == 0 &&
+ gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
+ CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
+ *major = GSS_S_DUPLICATE_TOKEN;
+ return -EACCES;
+ }
+
+verified:
+ swabbed = ptlrpc_req_need_swab(req);
+
+ /* user descriptor */
+ if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
+ if (msg->lm_bufcount < (offset + 1)) {
+ CERROR("no user desc included\n");
+ return -EINVAL;
+ }
+
+ if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
+ CERROR("Mal-formed user descriptor\n");
+ return -EINVAL;
+ }
+
+ req->rq_pack_udesc = 1;
+ req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
+ offset++;
+ }
+
+ /* check bulk_sec_desc data */
+ if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
+ if (msg->lm_bufcount < (offset + 1)) {
+ CERROR("missing bulk sec descriptor\n");
+ return -EINVAL;
+ }
+
+ if (bulk_sec_desc_unpack(msg, offset, swabbed))
+ return -EINVAL;
+
+ req->rq_pack_bulk = 1;
+ grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
+ grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
+ }
+
+ req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
+ req->rq_reqlen = msg->lm_buflens[1];
+ return 0;
+}
+
+static
+int gss_svc_unseal_request(struct ptlrpc_request *req,
+ struct gss_svc_reqctx *grctx,
+ struct gss_wire_ctx *gw,
+ __u32 *major)
+{
+ struct gss_svc_ctx *gctx = grctx->src_ctx;
+ struct lustre_msg *msg = req->rq_reqbuf;
+ int swabbed, msglen, offset = 1;
+
+ if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
+ CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
+ *major = GSS_S_DUPLICATE_TOKEN;
+ return -EACCES;
+ }
+
+ *major = gss_unseal_msg(gctx->gsc_mechctx, msg,
+ &msglen, req->rq_reqdata_len);
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap request: %x\n", *major);
+ return -EACCES;
+ }
+
+ if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
+ CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
+ *major = GSS_S_DUPLICATE_TOKEN;
+ return -EACCES;
+ }
+
+ swabbed = __lustre_unpack_msg(msg, msglen);
+ if (swabbed < 0) {
+ CERROR("Failed to unpack after decryption\n");
+ return -EINVAL;
+ }
+ req->rq_reqdata_len = msglen;
+
+ if (msg->lm_bufcount < 1) {
+ CERROR("Invalid buffer: is empty\n");
+ return -EINVAL;
+ }
+
+ if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
+ if (msg->lm_bufcount < offset + 1) {
+ CERROR("no user descriptor included\n");
+ return -EINVAL;
+ }
+
+ if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
+ CERROR("Mal-formed user descriptor\n");
+ return -EINVAL;
+ }
+
+ req->rq_pack_udesc = 1;
+ req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
+ offset++;
+ }
+
+ if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
+ if (msg->lm_bufcount < offset + 1) {
+ CERROR("no bulk checksum included\n");
+ return -EINVAL;
+ }
+
+ if (bulk_sec_desc_unpack(msg, offset, swabbed))
+ return -EINVAL;
+
+ req->rq_pack_bulk = 1;
+ grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
+ grctx->src_reqbsd_size = lustre_msg_buflen(msg, offset);
+ }
+
+ req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
+ req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
+ return 0;
+}
+
+static
+int gss_svc_handle_data(struct ptlrpc_request *req,
+ struct gss_wire_ctx *gw)
+{
+ struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+ __u32 major = 0;
+ int rc = 0;
+
+ grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
+ if (!grctx->src_ctx) {
+ major = GSS_S_NO_CONTEXT;
+ goto error;
+ }
+
+ switch (gw->gw_svc) {
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ rc = gss_svc_verify_request(req, grctx, gw, &major);
+ break;
+ case SPTLRPC_SVC_PRIV:
+ rc = gss_svc_unseal_request(req, grctx, gw, &major);
+ break;
+ default:
+ CERROR("unsupported gss service %d\n", gw->gw_svc);
+ rc = -EINVAL;
+ }
+
+ if (rc == 0)
+ return SECSVC_OK;
+
+ CERROR("svc %u failed: major 0x%08x: req xid "LPU64" ctx %p idx "
+ LPX64"(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
+ grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
+ grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
+error:
+ /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
+ * might happen after server reboot, to allow recovery. */
+ if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
+ gss_pack_err_notify(req, major, 0) == 0)
+ return SECSVC_COMPLETE;
+
+ return SECSVC_DROP;
+}
+
+static
+int gss_svc_handle_destroy(struct ptlrpc_request *req,
+ struct gss_wire_ctx *gw)
+{
+ struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+ __u32 major;
+
+ req->rq_ctx_fini = 1;
+ req->rq_no_reply = 1;
+
+ grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
+ if (!grctx->src_ctx) {
+ CDEBUG(D_SEC, "invalid gss context handle for destroy.\n");
+ return SECSVC_DROP;
+ }
+
+ if (gw->gw_svc != SPTLRPC_SVC_INTG) {
+ CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
+ return SECSVC_DROP;
+ }
+
+ if (gss_svc_verify_request(req, grctx, gw, &major))
+ return SECSVC_DROP;
+
+ CWARN("destroy svc ctx %p idx "LPX64" (%u->%s)\n",
+ grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
+ grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
+
+ gss_svc_upcall_destroy_ctx(grctx->src_ctx);
+
+ if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
+ if (req->rq_reqbuf->lm_bufcount < 4) {
+ CERROR("missing user descriptor, ignore it\n");
+ return SECSVC_OK;
+ }
+ if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2,
+ ptlrpc_req_need_swab(req))) {
+ CERROR("Mal-formed user descriptor, ignore it\n");
+ return SECSVC_OK;
+ }
+
+ req->rq_pack_udesc = 1;
+ req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
+ }
+
+ return SECSVC_OK;
+}
+
+int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
+{
+ struct gss_header *ghdr;
+ struct gss_svc_reqctx *grctx;
+ struct gss_wire_ctx *gw;
+ int swabbed, rc;
+
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_svc_ctx == NULL);
+
+ if (req->rq_reqbuf->lm_bufcount < 2) {
+ CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
+ return SECSVC_DROP;
+ }
+
+ swabbed = ptlrpc_req_need_swab(req);
+
+ ghdr = gss_swab_header(req->rq_reqbuf, 0, swabbed);
+ if (ghdr == NULL) {
+ CERROR("can't decode gss header\n");
+ return SECSVC_DROP;
+ }
+
+ /* sanity checks */
+ if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
+ CERROR("gss version %u, expect %u\n", ghdr->gh_version,
+ PTLRPC_GSS_VERSION);
+ return SECSVC_DROP;
+ }
+
+ req->rq_sp_from = ghdr->gh_sp;
+
+ /* alloc grctx data */
+ OBD_ALLOC_PTR(grctx);
+ if (!grctx)
+ return SECSVC_DROP;
+
+ grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
+ atomic_set(&grctx->src_base.sc_refcount, 1);
+ req->rq_svc_ctx = &grctx->src_base;
+ gw = &grctx->src_wirectx;
+
+ /* save wire context */
+ gw->gw_flags = ghdr->gh_flags;
+ gw->gw_proc = ghdr->gh_proc;
+ gw->gw_seq = ghdr->gh_seq;
+ gw->gw_svc = ghdr->gh_svc;
+ rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
+
+ /* keep original wire header which subject to checksum verification */
+ if (swabbed)
+ gss_header_swabber(ghdr);
+
+ switch(ghdr->gh_proc) {
+ case PTLRPC_GSS_PROC_INIT:
+ case PTLRPC_GSS_PROC_CONTINUE_INIT:
+ rc = gss_svc_handle_init(req, gw);
+ break;
+ case PTLRPC_GSS_PROC_DATA:
+ rc = gss_svc_handle_data(req, gw);
+ break;
+ case PTLRPC_GSS_PROC_DESTROY:
+ rc = gss_svc_handle_destroy(req, gw);
+ break;
+ default:
+ CERROR("unknown proc %u\n", gw->gw_proc);
+ rc = SECSVC_DROP;
+ break;
+ }
+
+ switch (rc) {
+ case SECSVC_OK:
+ LASSERT (grctx->src_ctx);
+
+ req->rq_auth_gss = 1;
+ req->rq_auth_remote = grctx->src_ctx->gsc_remote;
+ req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
+ req->rq_auth_usr_ost = grctx->src_ctx->gsc_usr_oss;
+ req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
+ req->rq_auth_uid = grctx->src_ctx->gsc_uid;
+ req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
+ break;
+ case SECSVC_COMPLETE:
+ break;
+ case SECSVC_DROP:
+ gss_svc_reqctx_free(grctx);
+ req->rq_svc_ctx = NULL;
+ break;
+ }
+
+ return rc;
+}
+
+void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
+{
+ struct gss_svc_reqctx *grctx;
+
+ if (svc_ctx == NULL) {
+ return;
+ }
+
+ grctx = gss_svc_ctx2reqctx(svc_ctx);
+
+ CWARN("gss svc invalidate ctx %p(%u)\n",
+ grctx->src_ctx, grctx->src_ctx->gsc_uid);
+ gss_svc_upcall_destroy_ctx(grctx->src_ctx);
+}
+
+static inline
+int gss_svc_payload(struct gss_svc_reqctx *grctx, int early,
+ int msgsize, int privacy)
+{
+ /* we should treat early reply normally, but which is actually sharing
+ * the same ctx with original request, so in this case we should
+ * ignore the special ctx's special flags */
+ if (early == 0 && gss_svc_reqctx_is_special(grctx))
+ return grctx->src_reserve_len;
+
+ return gss_mech_payload(NULL, msgsize, privacy);
+}
+
+static int gss_svc_bulk_payload(struct gss_svc_ctx *gctx,
+ struct sptlrpc_flavor *flvr,
+ int read)
+{
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
+
+ if (read) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_mech_payload(NULL, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_mech_payload(NULL, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
+
+ return payload;
+}
+
+int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
+{
+ struct gss_svc_reqctx *grctx;
+ struct ptlrpc_reply_state *rs;
+ int early, privacy, svc, bsd_off = 0;
+ __u32 ibuflens[2], buflens[4];
+ int ibufcnt = 0, bufcnt;
+ int txtsize, wmsg_size, rs_size;
+
+ LASSERT(msglen % 8 == 0);
+
+ if (req->rq_pack_bulk && !req->rq_bulk_read && !req->rq_bulk_write) {
+ CERROR("client request bulk sec on non-bulk rpc\n");
+ return -EPROTO;
+ }
+
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ early = (req->rq_packed_final == 0);
+
+ grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+ if (!early && gss_svc_reqctx_is_special(grctx))
+ privacy = 0;
+ else
+ privacy = (svc == SPTLRPC_SVC_PRIV);
+
+ if (privacy) {
+ /* inner clear buffers */
+ ibufcnt = 1;
+ ibuflens[0] = msglen;
+
+ if (req->rq_pack_bulk) {
+ LASSERT(grctx->src_reqbsd);
+
+ bsd_off = ibufcnt;
+ ibuflens[ibufcnt++] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
+ req->rq_bulk_read);
+ }
+
+ txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
+ txtsize += GSS_MAX_CIPHER_BLOCK;
+
+ /* wrapper buffer */
+ bufcnt = 2;
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = gss_svc_payload(grctx, early, txtsize, 1);
+ } else {
+ bufcnt = 2;
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = msglen;
+
+ txtsize = buflens[0];
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[1];
+
+ if (req->rq_pack_bulk) {
+ LASSERT(grctx->src_reqbsd);
+
+ bsd_off = bufcnt;
+ buflens[bufcnt] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
+ req->rq_bulk_read);
+ if (svc == SPTLRPC_SVC_INTG)
+ txtsize += buflens[bufcnt];
+ bufcnt++;
+ }
+
+ if ((!early && gss_svc_reqctx_is_special(grctx)) ||
+ svc != SPTLRPC_SVC_NULL)
+ buflens[bufcnt++] = gss_svc_payload(grctx, early,
+ txtsize, 0);
+ }
+
+ wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
+
+ rs_size = sizeof(*rs) + wmsg_size;
+ rs = req->rq_reply_state;
+
+ if (rs) {
+ /* pre-allocated */
+ LASSERT(rs->rs_size >= rs_size);
+ } else {
+ OBD_ALLOC_LARGE(rs, rs_size);
+ if (rs == NULL)
+ return -ENOMEM;
+
+ rs->rs_size = rs_size;
+ }
+
+ rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf_len = wmsg_size;
+
+ /* initialize the buffer */
+ if (privacy) {
+ lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
+ rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
+ } else {
+ lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
+ rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
+
+ rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 1, 0);
+ }
+
+ if (bsd_off) {
+ grctx->src_repbsd = lustre_msg_buf(rs->rs_repbuf, bsd_off, 0);
+ grctx->src_repbsd_size = lustre_msg_buflen(rs->rs_repbuf,
+ bsd_off);
+ }
+
+ gss_svc_reqctx_addref(grctx);
+ rs->rs_svc_ctx = req->rq_svc_ctx;
+
+ LASSERT(rs->rs_msg);
+ req->rq_reply_state = rs;
+ return 0;
+}
+
+static int gss_svc_seal(struct ptlrpc_request *req,
+ struct ptlrpc_reply_state *rs,
+ struct gss_svc_reqctx *grctx)
+{
+ struct gss_svc_ctx *gctx = grctx->src_ctx;
+ rawobj_t hdrobj, msgobj, token;
+ struct gss_header *ghdr;
+ __u8 *token_buf;
+ int token_buflen;
+ __u32 buflens[2], major;
+ int msglen, rc;
+
+ /* get clear data length. note embedded lustre_msg might
+ * have been shrinked */
+ if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0))
+ msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
+ else
+ msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
+ rs->rs_repbuf->lm_buflens);
+
+ /* temporarily use tail of buffer to hold gss header data */
+ LASSERT(msglen + PTLRPC_GSS_HEADER_SIZE <= rs->rs_repbuf_len);
+ ghdr = (struct gss_header *) ((char *) rs->rs_repbuf +
+ rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE);
+ ghdr->gh_version = PTLRPC_GSS_VERSION;
+ ghdr->gh_sp = LUSTRE_SP_ANY;
+ ghdr->gh_flags = 0;
+ ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
+ ghdr->gh_seq = grctx->src_wirectx.gw_seq;
+ ghdr->gh_svc = SPTLRPC_SVC_PRIV;
+ ghdr->gh_handle.len = 0;
+ if (req->rq_pack_bulk)
+ ghdr->gh_flags |= LUSTRE_GSS_PACK_BULK;
+
+ /* allocate temporary cipher buffer */
+ token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
+ OBD_ALLOC_LARGE(token_buf, token_buflen);
+ if (token_buf == NULL)
+ return -ENOMEM;
+
+ hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
+ hdrobj.data = (__u8 *) ghdr;
+ msgobj.len = msglen;
+ msgobj.data = (__u8 *) rs->rs_repbuf;
+ token.len = token_buflen;
+ token.data = token_buf;
+
+ major = lgss_wrap(gctx->gsc_mechctx, &hdrobj, &msgobj,
+ rs->rs_repbuf_len - PTLRPC_GSS_HEADER_SIZE, &token);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("wrap message error: %08x\n", major);
+ GOTO(out_free, rc = -EPERM);
+ }
+ LASSERT(token.len <= token_buflen);
+
+ /* we are about to override data at rs->rs_repbuf, nullify pointers
+ * to which to catch further illegal usage. */
+ if (req->rq_pack_bulk) {
+ grctx->src_repbsd = NULL;
+ grctx->src_repbsd_size = 0;
+ }
+
+ /* now fill the actual wire data
+ * - gss header
+ * - gss token
+ */
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = token.len;
+
+ rs->rs_repdata_len = lustre_msg_size_v2(2, buflens);
+ LASSERT(rs->rs_repdata_len <= rs->rs_repbuf_len);
+
+ lustre_init_msg_v2(rs->rs_repbuf, 2, buflens, NULL);
+ rs->rs_repbuf->lm_secflvr = req->rq_flvr.sf_rpc;
+
+ memcpy(lustre_msg_buf(rs->rs_repbuf, 0, 0), ghdr,
+ PTLRPC_GSS_HEADER_SIZE);
+ memcpy(lustre_msg_buf(rs->rs_repbuf, 1, 0), token.data, token.len);
+
+ /* reply offset */
+ if (req->rq_packed_final &&
+ (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))
+ req->rq_reply_off = gss_at_reply_off_priv;
+ else
+ req->rq_reply_off = 0;
+
+ /* to catch upper layer's further access */
+ rs->rs_msg = NULL;
+ req->rq_repmsg = NULL;
+ req->rq_replen = 0;
+
+ rc = 0;
+out_free:
+ OBD_FREE_LARGE(token_buf, token_buflen);
+ return rc;
+}
+
+int gss_svc_authorize(struct ptlrpc_request *req)
+{
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+ struct gss_wire_ctx *gw = &grctx->src_wirectx;
+ int early, rc;
+
+ early = (req->rq_packed_final == 0);
+
+ if (!early && gss_svc_reqctx_is_special(grctx)) {
+ LASSERT(rs->rs_repdata_len != 0);
+
+ req->rq_reply_off = gss_at_reply_off_integ;
+ return 0;
+ }
+
+ /* early reply could happen in many cases */
+ if (!early &&
+ gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
+ gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
+ CERROR("proc %d not support\n", gw->gw_proc);
+ return -EINVAL;
+ }
+
+ LASSERT(grctx->src_ctx);
+
+ switch (gw->gw_svc) {
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ rc = gss_svc_sign(req, rs, grctx, gw->gw_svc);
+ break;
+ case SPTLRPC_SVC_PRIV:
+ rc = gss_svc_seal(req, rs, grctx);
+ break;
+ default:
+ CERROR("Unknown service %d\n", gw->gw_svc);
+ GOTO(out, rc = -EINVAL);
+ }
+ rc = 0;
+
+out:
+ return rc;
+}
+
+void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
+{
+ struct gss_svc_reqctx *grctx;
+
+ LASSERT(rs->rs_svc_ctx);
+ grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
+
+ gss_svc_reqctx_decref(grctx);
+ rs->rs_svc_ctx = NULL;
+
+ if (!rs->rs_prealloc)
+ OBD_FREE_LARGE(rs, rs->rs_size);
+}
+
+void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
+{
+ LASSERT(atomic_read(&ctx->sc_refcount) == 0);
+ gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
+}
+
+int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
+ struct ptlrpc_svc_ctx *svc_ctx)
+{
+ struct gss_cli_ctx *cli_gctx = ctx2gctx(cli_ctx);
+ struct gss_svc_ctx *svc_gctx = gss_svc_ctx2gssctx(svc_ctx);
+ struct gss_ctx *mechctx = NULL;
+
+ LASSERT(cli_gctx);
+ LASSERT(svc_gctx && svc_gctx->gsc_mechctx);
+
+ cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
+ cli_gctx->gc_win = GSS_SEQ_WIN;
+
+ /* The problem is the reverse ctx might get lost in some recovery
+ * situations, and the same svc_ctx will be used to re-create it.
+ * if there's callback be sentout before that, new reverse ctx start
+ * with sequence 0 will lead to future callback rpc be treated as
+ * replay.
+ *
+ * each reverse root ctx will record its latest sequence number on its
+ * buddy svcctx before be destroied, so here we continue use it.
+ */
+ atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq);
+
+ if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) {
+ CERROR("failed to dup svc handle\n");
+ goto err_out;
+ }
+
+ if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) !=
+ GSS_S_COMPLETE) {
+ CERROR("failed to copy mech context\n");
+ goto err_svc_handle;
+ }
+
+ if (rawobj_dup(&cli_gctx->gc_handle, &svc_gctx->gsc_rvs_hdl)) {
+ CERROR("failed to dup reverse handle\n");
+ goto err_ctx;
+ }
+
+ cli_gctx->gc_mechctx = mechctx;
+ gss_cli_ctx_uptodate(cli_gctx);
+
+ return 0;
+
+err_ctx:
+ lgss_delete_sec_context(&mechctx);
+err_svc_handle:
+ rawobj_free(&cli_gctx->gc_svc_handle);
+err_out:
+ return -ENOMEM;
+}
+
+static void gss_init_at_reply_offset(void)
+{
+ __u32 buflens[3];
+ int clearsize;
+
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = lustre_msg_early_size();
+ buflens[2] = gss_cli_payload(NULL, buflens[1], 0);
+ gss_at_reply_off_integ = lustre_msg_size_v2(3, buflens);
+
+ buflens[0] = lustre_msg_early_size();
+ clearsize = lustre_msg_size_v2(1, buflens);
+ buflens[0] = PTLRPC_GSS_HEADER_SIZE;
+ buflens[1] = gss_cli_payload(NULL, clearsize, 0);
+ buflens[2] = gss_cli_payload(NULL, clearsize, 1);
+ gss_at_reply_off_priv = lustre_msg_size_v2(3, buflens);
+}
+
+int __init sptlrpc_gss_init(void)
+{
+ int rc;
+
+ rc = gss_init_lproc();
+ if (rc)
+ return rc;
+
+ rc = gss_init_cli_upcall();
+ if (rc)
+ goto out_lproc;
+
+ rc = gss_init_svc_upcall();
+ if (rc)
+ goto out_cli_upcall;
+
+ rc = init_kerberos_module();
+ if (rc)
+ goto out_svc_upcall;
+
+ /* register policy after all other stuff be intialized, because it
+ * might be in used immediately after the registration. */
+
+ rc = gss_init_keyring();
+ if (rc)
+ goto out_kerberos;
+
+#ifdef HAVE_GSS_PIPEFS
+ rc = gss_init_pipefs();
+ if (rc)
+ goto out_keyring;
+#endif
+
+ gss_init_at_reply_offset();
+
+ return 0;
+
+#ifdef HAVE_GSS_PIPEFS
+out_keyring:
+ gss_exit_keyring();
+#endif
+
+out_kerberos:
+ cleanup_kerberos_module();
+out_svc_upcall:
+ gss_exit_svc_upcall();
+out_cli_upcall:
+ gss_exit_cli_upcall();
+out_lproc:
+ gss_exit_lproc();
+ return rc;
+}
+
+static void __exit sptlrpc_gss_exit(void)
+{
+ gss_exit_keyring();
+#ifdef HAVE_GSS_PIPEFS
+ gss_exit_pipefs();
+#endif
+ cleanup_kerberos_module();
+ gss_exit_svc_upcall();
+ gss_exit_cli_upcall();
+ gss_exit_lproc();
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("GSS security policy for Lustre");
+MODULE_LICENSE("GPL");
+
+module_init(sptlrpc_gss_init);
+module_exit(sptlrpc_gss_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
new file mode 100644
index 0000000..5ca69ae
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -0,0 +1,1594 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/import.c
+ *
+ * Author: Mike Shaver <shaver@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+#include <obd_support.h>
+#include <lustre_ha.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_export.h>
+#include <obd.h>
+#include <obd_cksum.h>
+#include <obd_class.h>
+
+#include "ptlrpc_internal.h"
+
+struct ptlrpc_connect_async_args {
+ __u64 pcaa_peer_committed;
+ int pcaa_initial_connect;
+};
+
+/**
+ * Updates import \a imp current state to provided \a state value
+ * Helper function. Must be called under imp_lock.
+ */
+static void __import_set_state(struct obd_import *imp,
+ enum lustre_imp_state state)
+{
+ imp->imp_state = state;
+ imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
+ imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
+ cfs_time_current_sec();
+ imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
+ IMP_STATE_HIST_LEN;
+}
+
+/* A CLOSED import should remain so. */
+#define IMPORT_SET_STATE_NOLOCK(imp, state) \
+do { \
+ if (imp->imp_state != LUSTRE_IMP_CLOSED) { \
+ CDEBUG(D_HA, "%p %s: changing import state from %s to %s\n", \
+ imp, obd2cli_tgt(imp->imp_obd), \
+ ptlrpc_import_state_name(imp->imp_state), \
+ ptlrpc_import_state_name(state)); \
+ __import_set_state(imp, state); \
+ } \
+} while(0)
+
+#define IMPORT_SET_STATE(imp, state) \
+do { \
+ spin_lock(&imp->imp_lock); \
+ IMPORT_SET_STATE_NOLOCK(imp, state); \
+ spin_unlock(&imp->imp_lock); \
+} while(0)
+
+
+static int ptlrpc_connect_interpret(const struct lu_env *env,
+ struct ptlrpc_request *request,
+ void * data, int rc);
+int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
+
+/* Only this function is allowed to change the import state when it is
+ * CLOSED. I would rather refcount the import and free it after
+ * disconnection like we do with exports. To do that, the client_obd
+ * will need to save the peer info somewhere other than in the import,
+ * though. */
+int ptlrpc_init_import(struct obd_import *imp)
+{
+ spin_lock(&imp->imp_lock);
+
+ imp->imp_generation++;
+ imp->imp_state = LUSTRE_IMP_NEW;
+
+ spin_unlock(&imp->imp_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_init_import);
+
+#define UUID_STR "_UUID"
+void deuuidify(char *uuid, const char *prefix, char **uuid_start, int *uuid_len)
+{
+ *uuid_start = !prefix || strncmp(uuid, prefix, strlen(prefix))
+ ? uuid : uuid + strlen(prefix);
+
+ *uuid_len = strlen(*uuid_start);
+
+ if (*uuid_len < strlen(UUID_STR))
+ return;
+
+ if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
+ UUID_STR, strlen(UUID_STR)))
+ *uuid_len -= strlen(UUID_STR);
+}
+EXPORT_SYMBOL(deuuidify);
+
+/**
+ * Returns true if import was FULL, false if import was already not
+ * connected.
+ * @imp - import to be disconnected
+ * @conn_cnt - connection count (epoch) of the request that timed out
+ * and caused the disconnection. In some cases, multiple
+ * inflight requests can fail to a single target (e.g. OST
+ * bulk requests) and if one has already caused a reconnection
+ * (increasing the import->conn_cnt) the older failure should
+ * not also cause a reconnection. If zero it forces a reconnect.
+ */
+int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
+{
+ int rc = 0;
+
+ spin_lock(&imp->imp_lock);
+
+ if (imp->imp_state == LUSTRE_IMP_FULL &&
+ (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
+ char *target_start;
+ int target_len;
+
+ deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
+ &target_start, &target_len);
+
+ if (imp->imp_replayable) {
+ LCONSOLE_WARN("%s: Connection to %.*s (at %s) was "
+ "lost; in progress operations using this "
+ "service will wait for recovery to complete\n",
+ imp->imp_obd->obd_name, target_len, target_start,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid));
+ } else {
+ LCONSOLE_ERROR_MSG(0x166, "%s: Connection to "
+ "%.*s (at %s) was lost; in progress "
+ "operations using this service will fail\n",
+ imp->imp_obd->obd_name,
+ target_len, target_start,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid));
+ }
+ ptlrpc_deactivate_timeouts(imp);
+ IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
+ spin_unlock(&imp->imp_lock);
+
+ if (obd_dump_on_timeout)
+ libcfs_debug_dumplog();
+
+ obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
+ rc = 1;
+ } else {
+ spin_unlock(&imp->imp_lock);
+ CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
+ imp->imp_client->cli_name, imp,
+ (imp->imp_state == LUSTRE_IMP_FULL &&
+ imp->imp_conn_cnt > conn_cnt) ?
+ "reconnected" : "not connected", imp->imp_conn_cnt,
+ conn_cnt, ptlrpc_import_state_name(imp->imp_state));
+ }
+
+ return rc;
+}
+
+/* Must be called with imp_lock held! */
+static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
+{
+ LASSERT(spin_is_locked(&imp->imp_lock));
+
+ CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
+ imp->imp_invalid = 1;
+ imp->imp_generation++;
+ spin_unlock(&imp->imp_lock);
+
+ ptlrpc_abort_inflight(imp);
+ obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
+}
+
+/*
+ * This acts as a barrier; all existing requests are rejected, and
+ * no new requests will be accepted until the import is valid again.
+ */
+void ptlrpc_deactivate_import(struct obd_import *imp)
+{
+ spin_lock(&imp->imp_lock);
+ ptlrpc_deactivate_and_unlock_import(imp);
+}
+EXPORT_SYMBOL(ptlrpc_deactivate_import);
+
+static unsigned int
+ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
+{
+ long dl;
+
+ if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
+ (req->rq_phase == RQ_PHASE_BULK) ||
+ (req->rq_phase == RQ_PHASE_NEW)))
+ return 0;
+
+ if (req->rq_timedout)
+ return 0;
+
+ if (req->rq_phase == RQ_PHASE_NEW)
+ dl = req->rq_sent;
+ else
+ dl = req->rq_deadline;
+
+ if (dl <= now)
+ return 0;
+
+ return dl - now;
+}
+
+static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
+{
+ time_t now = cfs_time_current_sec();
+ struct list_head *tmp, *n;
+ struct ptlrpc_request *req;
+ unsigned int timeout = 0;
+
+ spin_lock(&imp->imp_lock);
+ list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_list);
+ timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
+ }
+ spin_unlock(&imp->imp_lock);
+ return timeout;
+}
+
+/**
+ * This function will invalidate the import, if necessary, then block
+ * for all the RPC completions, and finally notify the obd to
+ * invalidate its state (ie cancel locks, clear pending requests,
+ * etc).
+ */
+void ptlrpc_invalidate_import(struct obd_import *imp)
+{
+ struct list_head *tmp, *n;
+ struct ptlrpc_request *req;
+ struct l_wait_info lwi;
+ unsigned int timeout;
+ int rc;
+
+ atomic_inc(&imp->imp_inval_count);
+
+ if (!imp->imp_invalid || imp->imp_obd->obd_no_recov)
+ ptlrpc_deactivate_import(imp);
+
+ LASSERT(imp->imp_invalid);
+
+ /* Wait forever until inflight == 0. We really can't do it another
+ * way because in some cases we need to wait for very long reply
+ * unlink. We can't do anything before that because there is really
+ * no guarantee that some rdma transfer is not in progress right now. */
+ do {
+ /* Calculate max timeout for waiting on rpcs to error
+ * out. Use obd_timeout if calculated value is smaller
+ * than it. */
+ if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
+ timeout = ptlrpc_inflight_timeout(imp);
+ timeout += timeout / 3;
+
+ if (timeout == 0)
+ timeout = obd_timeout;
+ } else {
+ /* decrease the interval to increase race condition */
+ timeout = 1;
+ }
+
+ CDEBUG(D_RPCTRACE,"Sleeping %d sec for inflight to error out\n",
+ timeout);
+
+ /* Wait for all requests to error out and call completion
+ * callbacks. Cap it at obd_timeout -- these should all
+ * have been locally cancelled by ptlrpc_abort_inflight. */
+ lwi = LWI_TIMEOUT_INTERVAL(
+ cfs_timeout_cap(cfs_time_seconds(timeout)),
+ (timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
+ NULL, NULL);
+ rc = l_wait_event(imp->imp_recovery_waitq,
+ (atomic_read(&imp->imp_inflight) == 0),
+ &lwi);
+ if (rc) {
+ const char *cli_tgt = obd2cli_tgt(imp->imp_obd);
+
+ CERROR("%s: rc = %d waiting for callback (%d != 0)\n",
+ cli_tgt, rc,
+ atomic_read(&imp->imp_inflight));
+
+ spin_lock(&imp->imp_lock);
+ if (atomic_read(&imp->imp_inflight) == 0) {
+ int count = atomic_read(&imp->imp_unregistering);
+
+ /* We know that "unregistering" rpcs only can
+ * survive in sending or delaying lists (they
+ * maybe waiting for long reply unlink in
+ * sluggish nets). Let's check this. If there
+ * is no inflight and unregistering != 0, this
+ * is bug. */
+ LASSERTF(count == 0, "Some RPCs are still "
+ "unregistering: %d\n", count);
+
+ /* Let's save one loop as soon as inflight have
+ * dropped to zero. No new inflights possible at
+ * this point. */
+ rc = 0;
+ } else {
+ list_for_each_safe(tmp, n,
+ &imp->imp_sending_list) {
+ req = list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
+ DEBUG_REQ(D_ERROR, req,
+ "still on sending list");
+ }
+ list_for_each_safe(tmp, n,
+ &imp->imp_delayed_list) {
+ req = list_entry(tmp,
+ struct ptlrpc_request,
+ rq_list);
+ DEBUG_REQ(D_ERROR, req,
+ "still on delayed list");
+ }
+
+ CERROR("%s: RPCs in \"%s\" phase found (%d). "
+ "Network is sluggish? Waiting them "
+ "to error out.\n", cli_tgt,
+ ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
+ atomic_read(&imp->
+ imp_unregistering));
+ }
+ spin_unlock(&imp->imp_lock);
+ }
+ } while (rc != 0);
+
+ /*
+ * Let's additionally check that no new rpcs added to import in
+ * "invalidate" state.
+ */
+ LASSERT(atomic_read(&imp->imp_inflight) == 0);
+ obd_import_event(imp->imp_obd, imp, IMP_EVENT_INVALIDATE);
+ sptlrpc_import_flush_all_ctx(imp);
+
+ atomic_dec(&imp->imp_inval_count);
+ wake_up_all(&imp->imp_recovery_waitq);
+}
+EXPORT_SYMBOL(ptlrpc_invalidate_import);
+
+/* unset imp_invalid */
+void ptlrpc_activate_import(struct obd_import *imp)
+{
+ struct obd_device *obd = imp->imp_obd;
+
+ spin_lock(&imp->imp_lock);
+ imp->imp_invalid = 0;
+ ptlrpc_activate_timeouts(imp);
+ spin_unlock(&imp->imp_lock);
+ obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
+}
+EXPORT_SYMBOL(ptlrpc_activate_import);
+
+void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
+{
+ LASSERT(!imp->imp_dlm_fake);
+
+ if (ptlrpc_set_import_discon(imp, conn_cnt)) {
+ if (!imp->imp_replayable) {
+ CDEBUG(D_HA, "import %s@%s for %s not replayable, "
+ "auto-deactivating\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid,
+ imp->imp_obd->obd_name);
+ ptlrpc_deactivate_import(imp);
+ }
+
+ CDEBUG(D_HA, "%s: waking up pinger\n",
+ obd2cli_tgt(imp->imp_obd));
+
+ spin_lock(&imp->imp_lock);
+ imp->imp_force_verify = 1;
+ spin_unlock(&imp->imp_lock);
+
+ ptlrpc_pinger_wake_up();
+ }
+}
+EXPORT_SYMBOL(ptlrpc_fail_import);
+
+int ptlrpc_reconnect_import(struct obd_import *imp)
+{
+ ptlrpc_set_import_discon(imp, 0);
+ /* Force a new connect attempt */
+ ptlrpc_invalidate_import(imp);
+ /* Do a fresh connect next time by zeroing the handle */
+ ptlrpc_disconnect_import(imp, 1);
+ /* Wait for all invalidate calls to finish */
+ if (atomic_read(&imp->imp_inval_count) > 0) {
+ int rc;
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ rc = l_wait_event(imp->imp_recovery_waitq,
+ (atomic_read(&imp->imp_inval_count) == 0),
+ &lwi);
+ if (rc)
+ CERROR("Interrupted, inval=%d\n",
+ atomic_read(&imp->imp_inval_count));
+ }
+
+ /* Allow reconnect attempts */
+ imp->imp_obd->obd_no_recov = 0;
+ /* Remove 'invalid' flag */
+ ptlrpc_activate_import(imp);
+ /* Attempt a new connect */
+ ptlrpc_recover_import(imp, NULL, 0);
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_reconnect_import);
+
+/**
+ * Connection on import \a imp is changed to another one (if more than one is
+ * present). We typically chose connection that we have not tried to connect to
+ * the longest
+ */
+static int import_select_connection(struct obd_import *imp)
+{
+ struct obd_import_conn *imp_conn = NULL, *conn;
+ struct obd_export *dlmexp;
+ char *target_start;
+ int target_len, tried_all = 1;
+
+ spin_lock(&imp->imp_lock);
+
+ if (list_empty(&imp->imp_conn_list)) {
+ CERROR("%s: no connections available\n",
+ imp->imp_obd->obd_name);
+ spin_unlock(&imp->imp_lock);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
+ CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
+ imp->imp_obd->obd_name,
+ libcfs_nid2str(conn->oic_conn->c_peer.nid),
+ conn->oic_last_attempt);
+
+ /* If we have not tried this connection since
+ the last successful attempt, go with this one */
+ if ((conn->oic_last_attempt == 0) ||
+ cfs_time_beforeq_64(conn->oic_last_attempt,
+ imp->imp_last_success_conn)) {
+ imp_conn = conn;
+ tried_all = 0;
+ break;
+ }
+
+ /* If all of the connections have already been tried
+ since the last successful connection; just choose the
+ least recently used */
+ if (!imp_conn)
+ imp_conn = conn;
+ else if (cfs_time_before_64(conn->oic_last_attempt,
+ imp_conn->oic_last_attempt))
+ imp_conn = conn;
+ }
+
+ /* if not found, simply choose the current one */
+ if (!imp_conn || imp->imp_force_reconnect) {
+ LASSERT(imp->imp_conn_current);
+ imp_conn = imp->imp_conn_current;
+ tried_all = 0;
+ }
+ LASSERT(imp_conn->oic_conn);
+
+ /* If we've tried everything, and we're back to the beginning of the
+ list, increase our timeout and try again. It will be reset when
+ we do finally connect. (FIXME: really we should wait for all network
+ state associated with the last connection attempt to drain before
+ trying to reconnect on it.) */
+ if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
+ struct adaptive_timeout *at = &imp->imp_at.iat_net_latency;
+ if (at_get(at) < CONNECTION_SWITCH_MAX) {
+ at_measured(at, at_get(at) + CONNECTION_SWITCH_INC);
+ if (at_get(at) > CONNECTION_SWITCH_MAX)
+ at_reset(at, CONNECTION_SWITCH_MAX);
+ }
+ LASSERT(imp_conn->oic_last_attempt);
+ CDEBUG(D_HA, "%s: tried all connections, increasing latency "
+ "to %ds\n", imp->imp_obd->obd_name, at_get(at));
+ }
+
+ imp_conn->oic_last_attempt = cfs_time_current_64();
+
+ /* switch connection, don't mind if it's same as the current one */
+ if (imp->imp_connection)
+ ptlrpc_connection_put(imp->imp_connection);
+ imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
+
+ dlmexp = class_conn2export(&imp->imp_dlm_handle);
+ LASSERT(dlmexp != NULL);
+ if (dlmexp->exp_connection)
+ ptlrpc_connection_put(dlmexp->exp_connection);
+ dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
+ class_export_put(dlmexp);
+
+ if (imp->imp_conn_current != imp_conn) {
+ if (imp->imp_conn_current) {
+ deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
+ &target_start, &target_len);
+
+ CDEBUG(D_HA, "%s: Connection changing to"
+ " %.*s (at %s)\n",
+ imp->imp_obd->obd_name,
+ target_len, target_start,
+ libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
+ }
+
+ imp->imp_conn_current = imp_conn;
+ }
+
+ CDEBUG(D_HA, "%s: import %p using connection %s/%s\n",
+ imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
+ libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
+
+ spin_unlock(&imp->imp_lock);
+
+ return 0;
+}
+
+/*
+ * must be called under imp_lock
+ */
+static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
+{
+ struct ptlrpc_request *req;
+ struct list_head *tmp;
+
+ if (list_empty(&imp->imp_replay_list))
+ return 0;
+ tmp = imp->imp_replay_list.next;
+ req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
+ *transno = req->rq_transno;
+ if (req->rq_transno == 0) {
+ DEBUG_REQ(D_ERROR, req, "zero transno in replay");
+ LBUG();
+ }
+
+ return 1;
+}
+
+/**
+ * Attempt to (re)connect import \a imp. This includes all preparations,
+ * initializing CONNECT RPC request and passing it to ptlrpcd for
+ * actual sending.
+ * Returns 0 on success or error code.
+ */
+int ptlrpc_connect_import(struct obd_import *imp)
+{
+ struct obd_device *obd = imp->imp_obd;
+ int initial_connect = 0;
+ int set_transno = 0;
+ __u64 committed_before_reconnect = 0;
+ struct ptlrpc_request *request;
+ char *bufs[] = { NULL,
+ obd2cli_tgt(imp->imp_obd),
+ obd->obd_uuid.uuid,
+ (char *)&imp->imp_dlm_handle,
+ (char *)&imp->imp_connect_data };
+ struct ptlrpc_connect_async_args *aa;
+ int rc;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_CLOSED) {
+ spin_unlock(&imp->imp_lock);
+ CERROR("can't connect to a closed import\n");
+ return -EINVAL;
+ } else if (imp->imp_state == LUSTRE_IMP_FULL) {
+ spin_unlock(&imp->imp_lock);
+ CERROR("already connected\n");
+ return 0;
+ } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
+ spin_unlock(&imp->imp_lock);
+ CERROR("already connecting\n");
+ return -EALREADY;
+ }
+
+ IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
+
+ imp->imp_conn_cnt++;
+ imp->imp_resend_replay = 0;
+
+ if (!lustre_handle_is_used(&imp->imp_remote_handle))
+ initial_connect = 1;
+ else
+ committed_before_reconnect = imp->imp_peer_committed_transno;
+
+ set_transno = ptlrpc_first_transno(imp,
+ &imp->imp_connect_data.ocd_transno);
+ spin_unlock(&imp->imp_lock);
+
+ rc = import_select_connection(imp);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = sptlrpc_import_sec_adapt(imp, NULL, 0);
+ if (rc)
+ GOTO(out, rc);
+
+ /* Reset connect flags to the originally requested flags, in case
+ * the server is updated on-the-fly we will get the new features. */
+ imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
+ /* Reset ocd_version each time so the server knows the exact versions */
+ imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
+ imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
+ imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
+
+ rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd,
+ &obd->obd_uuid, &imp->imp_connect_data, NULL);
+ if (rc)
+ GOTO(out, rc);
+
+ request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
+ if (request == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION,
+ imp->imp_connect_op, bufs, NULL);
+ if (rc) {
+ ptlrpc_request_free(request);
+ GOTO(out, rc);
+ }
+
+ /* Report the rpc service time to the server so that it knows how long
+ * to wait for clients to join recovery */
+ lustre_msg_set_service_time(request->rq_reqmsg,
+ at_timeout2est(request->rq_timeout));
+
+ /* The amount of time we give the server to process the connect req.
+ * import_select_connection will increase the net latency on
+ * repeated reconnect attempts to cover slow networks.
+ * We override/ignore the server rpc completion estimate here,
+ * which may be large if this is a reconnect attempt */
+ request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
+ lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
+
+ lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
+
+ request->rq_no_resend = request->rq_no_delay = 1;
+ request->rq_send_state = LUSTRE_IMP_CONNECTING;
+ /* Allow a slightly larger reply for future growth compatibility */
+ req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
+ sizeof(struct obd_connect_data)+16*sizeof(__u64));
+ ptlrpc_request_set_replen(request);
+ request->rq_interpret_reply = ptlrpc_connect_interpret;
+
+ CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args));
+ aa = ptlrpc_req_async_args(request);
+ memset(aa, 0, sizeof *aa);
+
+ aa->pcaa_peer_committed = committed_before_reconnect;
+ aa->pcaa_initial_connect = initial_connect;
+
+ if (aa->pcaa_initial_connect) {
+ spin_lock(&imp->imp_lock);
+ imp->imp_replayable = 1;
+ spin_unlock(&imp->imp_lock);
+ lustre_msg_add_op_flags(request->rq_reqmsg,
+ MSG_CONNECT_INITIAL);
+ }
+
+ if (set_transno)
+ lustre_msg_add_op_flags(request->rq_reqmsg,
+ MSG_CONNECT_TRANSNO);
+
+ DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)",
+ request->rq_timeout);
+ ptlrpcd_add_req(request, PDL_POLICY_ROUND, -1);
+ rc = 0;
+out:
+ if (rc != 0) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_connect_import);
+
+static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
+{
+ int force_verify;
+
+ spin_lock(&imp->imp_lock);
+ force_verify = imp->imp_force_verify != 0;
+ spin_unlock(&imp->imp_lock);
+
+ if (force_verify)
+ ptlrpc_pinger_wake_up();
+}
+
+static int ptlrpc_busy_reconnect(int rc)
+{
+ return (rc == -EBUSY) || (rc == -EAGAIN);
+}
+
+/**
+ * interpret_reply callback for connect RPCs.
+ * Looks into returned status of connect operation and decides
+ * what to do with the import - i.e enter recovery, promote it to
+ * full state for normal operations of disconnect it due to an error.
+ */
+static int ptlrpc_connect_interpret(const struct lu_env *env,
+ struct ptlrpc_request *request,
+ void *data, int rc)
+{
+ struct ptlrpc_connect_async_args *aa = data;
+ struct obd_import *imp = request->rq_import;
+ struct client_obd *cli = &imp->imp_obd->u.cli;
+ struct lustre_handle old_hdl;
+ __u64 old_connect_flags;
+ int msg_flags;
+ struct obd_connect_data *ocd;
+ struct obd_export *exp;
+ int ret;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_CLOSED) {
+ imp->imp_connect_tried = 1;
+ spin_unlock(&imp->imp_lock);
+ return 0;
+ }
+
+ if (rc) {
+ /* if this reconnect to busy export - not need select new target
+ * for connecting*/
+ imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_maybe_ping_import_soon(imp);
+ GOTO(out, rc);
+ }
+ spin_unlock(&imp->imp_lock);
+
+ LASSERT(imp->imp_conn_current);
+
+ msg_flags = lustre_msg_get_op_flags(request->rq_repmsg);
+
+ ret = req_capsule_get_size(&request->rq_pill, &RMF_CONNECT_DATA,
+ RCL_SERVER);
+ /* server replied obd_connect_data is always bigger */
+ ocd = req_capsule_server_sized_get(&request->rq_pill,
+ &RMF_CONNECT_DATA, ret);
+
+ if (ocd == NULL) {
+ CERROR("%s: no connect data from server\n",
+ imp->imp_obd->obd_name);
+ rc = -EPROTO;
+ GOTO(out, rc);
+ }
+
+ spin_lock(&imp->imp_lock);
+
+ /* All imports are pingable */
+ imp->imp_pingable = 1;
+ imp->imp_force_reconnect = 0;
+ imp->imp_force_verify = 0;
+
+ imp->imp_connect_data = *ocd;
+
+ CDEBUG(D_HA, "%s: connect to target with instance %u\n",
+ imp->imp_obd->obd_name, ocd->ocd_instance);
+ exp = class_conn2export(&imp->imp_dlm_handle);
+
+ spin_unlock(&imp->imp_lock);
+
+ /* check that server granted subset of flags we asked for. */
+ if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
+ ocd->ocd_connect_flags) {
+ CERROR("%s: Server didn't granted asked subset of flags: "
+ "asked="LPX64" grranted="LPX64"\n",
+ imp->imp_obd->obd_name,imp->imp_connect_flags_orig,
+ ocd->ocd_connect_flags);
+ GOTO(out, rc = -EPROTO);
+ }
+
+ if (!exp) {
+ /* This could happen if export is cleaned during the
+ connect attempt */
+ CERROR("%s: missing export after connect\n",
+ imp->imp_obd->obd_name);
+ GOTO(out, rc = -ENODEV);
+ }
+ old_connect_flags = exp_connect_flags(exp);
+ exp->exp_connect_data = *ocd;
+ imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
+ class_export_put(exp);
+
+ obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
+
+ if (aa->pcaa_initial_connect) {
+ spin_lock(&imp->imp_lock);
+ if (msg_flags & MSG_CONNECT_REPLAYABLE) {
+ imp->imp_replayable = 1;
+ spin_unlock(&imp->imp_lock);
+ CDEBUG(D_HA, "connected to replayable target: %s\n",
+ obd2cli_tgt(imp->imp_obd));
+ } else {
+ imp->imp_replayable = 0;
+ spin_unlock(&imp->imp_lock);
+ }
+
+ /* if applies, adjust the imp->imp_msg_magic here
+ * according to reply flags */
+
+ imp->imp_remote_handle =
+ *lustre_msg_get_handle(request->rq_repmsg);
+
+ /* Initial connects are allowed for clients with non-random
+ * uuids when servers are in recovery. Simply signal the
+ * servers replay is complete and wait in REPLAY_WAIT. */
+ if (msg_flags & MSG_CONNECT_RECOVERING) {
+ CDEBUG(D_HA, "connect to %s during recovery\n",
+ obd2cli_tgt(imp->imp_obd));
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
+ } else {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
+ ptlrpc_activate_import(imp);
+ }
+
+ GOTO(finish, rc = 0);
+ }
+
+ /* Determine what recovery state to move the import to. */
+ if (MSG_CONNECT_RECONNECT & msg_flags) {
+ memset(&old_hdl, 0, sizeof(old_hdl));
+ if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
+ sizeof (old_hdl))) {
+ LCONSOLE_WARN("Reconnect to %s (at @%s) failed due "
+ "bad handle "LPX64"\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid,
+ imp->imp_dlm_handle.cookie);
+ GOTO(out, rc = -ENOTCONN);
+ }
+
+ if (memcmp(&imp->imp_remote_handle,
+ lustre_msg_get_handle(request->rq_repmsg),
+ sizeof(imp->imp_remote_handle))) {
+ int level = msg_flags & MSG_CONNECT_RECOVERING ?
+ D_HA : D_WARNING;
+
+ /* Bug 16611/14775: if server handle have changed,
+ * that means some sort of disconnection happened.
+ * If the server is not in recovery, that also means it
+ * already erased all of our state because of previous
+ * eviction. If it is in recovery - we are safe to
+ * participate since we can reestablish all of our state
+ * with server again */
+ if ((MSG_CONNECT_RECOVERING & msg_flags)) {
+ CDEBUG(level,"%s@%s changed server handle from "
+ LPX64" to "LPX64
+ " but is still in recovery\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid,
+ imp->imp_remote_handle.cookie,
+ lustre_msg_get_handle(
+ request->rq_repmsg)->cookie);
+ } else {
+ LCONSOLE_WARN("Evicted from %s (at %s) "
+ "after server handle changed from "
+ LPX64" to "LPX64"\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection-> \
+ c_remote_uuid.uuid,
+ imp->imp_remote_handle.cookie,
+ lustre_msg_get_handle(
+ request->rq_repmsg)->cookie);
+ }
+
+
+ imp->imp_remote_handle =
+ *lustre_msg_get_handle(request->rq_repmsg);
+
+ if (!(MSG_CONNECT_RECOVERING & msg_flags)) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
+ GOTO(finish, rc = 0);
+ }
+
+ } else {
+ CDEBUG(D_HA, "reconnected to %s@%s after partition\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid);
+ }
+
+ if (imp->imp_invalid) {
+ CDEBUG(D_HA, "%s: reconnected but import is invalid; "
+ "marking evicted\n", imp->imp_obd->obd_name);
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
+ } else if (MSG_CONNECT_RECOVERING & msg_flags) {
+ CDEBUG(D_HA, "%s: reconnected to %s during replay\n",
+ imp->imp_obd->obd_name,
+ obd2cli_tgt(imp->imp_obd));
+
+ spin_lock(&imp->imp_lock);
+ imp->imp_resend_replay = 1;
+ spin_unlock(&imp->imp_lock);
+
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
+ } else {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
+ }
+ } else if ((MSG_CONNECT_RECOVERING & msg_flags) && !imp->imp_invalid) {
+ LASSERT(imp->imp_replayable);
+ imp->imp_remote_handle =
+ *lustre_msg_get_handle(request->rq_repmsg);
+ imp->imp_last_replay_transno = 0;
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
+ } else {
+ DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags"
+ " not set: %x)", imp->imp_obd->obd_name, msg_flags);
+ imp->imp_remote_handle =
+ *lustre_msg_get_handle(request->rq_repmsg);
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_EVICTED);
+ }
+
+ /* Sanity checks for a reconnected import. */
+ if (!(imp->imp_replayable) != !(msg_flags & MSG_CONNECT_REPLAYABLE)) {
+ CERROR("imp_replayable flag does not match server "
+ "after reconnect. We should LBUG right here.\n");
+ }
+
+ if (lustre_msg_get_last_committed(request->rq_repmsg) > 0 &&
+ lustre_msg_get_last_committed(request->rq_repmsg) <
+ aa->pcaa_peer_committed) {
+ CERROR("%s went back in time (transno "LPD64
+ " was previously committed, server now claims "LPD64
+ ")! See https://bugzilla.lustre.org/show_bug.cgi?"
+ "id=9646\n",
+ obd2cli_tgt(imp->imp_obd), aa->pcaa_peer_committed,
+ lustre_msg_get_last_committed(request->rq_repmsg));
+ }
+
+finish:
+ rc = ptlrpc_import_recovery_state_machine(imp);
+ if (rc != 0) {
+ if (rc == -ENOTCONN) {
+ CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery;"
+ "invalidating and reconnecting\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid);
+ ptlrpc_connect_import(imp);
+ imp->imp_connect_tried = 1;
+ return 0;
+ }
+ } else {
+
+ spin_lock(&imp->imp_lock);
+ list_del(&imp->imp_conn_current->oic_item);
+ list_add(&imp->imp_conn_current->oic_item,
+ &imp->imp_conn_list);
+ imp->imp_last_success_conn =
+ imp->imp_conn_current->oic_last_attempt;
+
+ spin_unlock(&imp->imp_lock);
+
+ if (!ocd->ocd_ibits_known &&
+ ocd->ocd_connect_flags & OBD_CONNECT_IBITS)
+ CERROR("Inodebits aware server returned zero compatible"
+ " bits?\n");
+
+ if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
+ (ocd->ocd_version > LUSTRE_VERSION_CODE +
+ LUSTRE_VERSION_OFFSET_WARN ||
+ ocd->ocd_version < LUSTRE_VERSION_CODE -
+ LUSTRE_VERSION_OFFSET_WARN)) {
+ /* Sigh, some compilers do not like #ifdef in the middle
+ of macro arguments */
+ const char *older = "older. Consider upgrading server "
+ "or downgrading client";
+ const char *newer = "newer than client version. "
+ "Consider upgrading client";
+
+ LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) "
+ "is much %s (%s)\n",
+ obd2cli_tgt(imp->imp_obd),
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version),
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version),
+ OBD_OCD_VERSION_FIX(ocd->ocd_version),
+ ocd->ocd_version > LUSTRE_VERSION_CODE ?
+ newer : older, LUSTRE_VERSION_STRING);
+ }
+
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
+ /* Check if server has LU-1252 fix applied to not always swab
+ * the IR MNE entries. Do this only once per connection. This
+ * fixup is version-limited, because we don't want to carry the
+ * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
+ * need interop with unpatched 2.2 servers. For newer servers,
+ * the client will do MNE swabbing only as needed. LU-1644 */
+ if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
+ !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version) == 2 &&
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version) < 55 &&
+ strcmp(imp->imp_obd->obd_type->typ_name,
+ LUSTRE_MGC_NAME) == 0))
+ imp->imp_need_mne_swab = 1;
+ else /* clear if server was upgraded since last connect */
+ imp->imp_need_mne_swab = 0;
+#else
+#warning "LU-1644: Remove old OBD_CONNECT_MNE_SWAB fixup and imp_need_mne_swab"
+#endif
+
+ if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
+ /* We sent to the server ocd_cksum_types with bits set
+ * for algorithms we understand. The server masked off
+ * the checksum types it doesn't support */
+ if ((ocd->ocd_cksum_types &
+ cksum_types_supported_client()) == 0) {
+ LCONSOLE_WARN("The negotiation of the checksum "
+ "alogrithm to use with server %s "
+ "failed (%x/%x), disabling "
+ "checksums\n",
+ obd2cli_tgt(imp->imp_obd),
+ ocd->ocd_cksum_types,
+ cksum_types_supported_client());
+ cli->cl_checksum = 0;
+ cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
+ } else {
+ cli->cl_supp_cksum_types = ocd->ocd_cksum_types;
+ }
+ } else {
+ /* The server does not support OBD_CONNECT_CKSUM.
+ * Enforce ADLER for backward compatibility*/
+ cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
+ }
+ cli->cl_cksum_type =cksum_type_select(cli->cl_supp_cksum_types);
+
+ if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
+ cli->cl_max_pages_per_rpc =
+ min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
+ cli->cl_max_pages_per_rpc);
+ else if (imp->imp_connect_op == MDS_CONNECT ||
+ imp->imp_connect_op == MGS_CONNECT)
+ cli->cl_max_pages_per_rpc = 1;
+
+ /* Reset ns_connect_flags only for initial connect. It might be
+ * changed in while using FS and if we reset it in reconnect
+ * this leads to losing user settings done before such as
+ * disable lru_resize, etc. */
+ if (old_connect_flags != exp_connect_flags(exp) ||
+ aa->pcaa_initial_connect) {
+ CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server "
+ "flags: "LPX64"\n", imp->imp_obd->obd_name,
+ ocd->ocd_connect_flags);
+ imp->imp_obd->obd_namespace->ns_connect_flags =
+ ocd->ocd_connect_flags;
+ imp->imp_obd->obd_namespace->ns_orig_connect_flags =
+ ocd->ocd_connect_flags;
+ }
+
+ if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
+ (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
+ /* We need a per-message support flag, because
+ a. we don't know if the incoming connect reply
+ supports AT or not (in reply_in_callback)
+ until we unpack it.
+ b. failovered server means export and flags are gone
+ (in ptlrpc_send_reply).
+ Can only be set when we know AT is supported at
+ both ends */
+ imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
+ else
+ imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
+
+ if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
+ (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
+ imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
+ else
+ imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
+
+ LASSERT((cli->cl_max_pages_per_rpc <= PTLRPC_MAX_BRW_PAGES) &&
+ (cli->cl_max_pages_per_rpc > 0));
+ }
+
+out:
+ imp->imp_connect_tried = 1;
+
+ if (rc != 0) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
+ if (rc == -EACCES) {
+ /*
+ * Give up trying to reconnect
+ * EACCES means client has no permission for connection
+ */
+ imp->imp_obd->obd_no_recov = 1;
+ ptlrpc_deactivate_import(imp);
+ }
+
+ if (rc == -EPROTO) {
+ struct obd_connect_data *ocd;
+
+ /* reply message might not be ready */
+ if (request->rq_repmsg == NULL)
+ return -EPROTO;
+
+ ocd = req_capsule_server_get(&request->rq_pill,
+ &RMF_CONNECT_DATA);
+ if (ocd &&
+ (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
+ (ocd->ocd_version != LUSTRE_VERSION_CODE)) {
+ /* Actually servers are only supposed to refuse
+ connection from liblustre clients, so we should
+ never see this from VFS context */
+ LCONSOLE_ERROR_MSG(0x16a, "Server %s version "
+ "(%d.%d.%d.%d)"
+ " refused connection from this client "
+ "with an incompatible version (%s). "
+ "Client must be recompiled\n",
+ obd2cli_tgt(imp->imp_obd),
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version),
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version),
+ OBD_OCD_VERSION_FIX(ocd->ocd_version),
+ LUSTRE_VERSION_STRING);
+ ptlrpc_deactivate_import(imp);
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
+ }
+ return -EPROTO;
+ }
+
+ ptlrpc_maybe_ping_import_soon(imp);
+
+ CDEBUG(D_HA, "recovery of %s on %s failed (%d)\n",
+ obd2cli_tgt(imp->imp_obd),
+ (char *)imp->imp_connection->c_remote_uuid.uuid, rc);
+ }
+
+ wake_up_all(&imp->imp_recovery_waitq);
+ return rc;
+}
+
+/**
+ * interpret callback for "completed replay" RPCs.
+ * \see signal_completed_replay
+ */
+static int completed_replay_interpret(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void * data, int rc)
+{
+ atomic_dec(&req->rq_import->imp_replay_inflight);
+ if (req->rq_status == 0 &&
+ !req->rq_import->imp_vbr_failed) {
+ ptlrpc_import_recovery_state_machine(req->rq_import);
+ } else {
+ if (req->rq_import->imp_vbr_failed) {
+ CDEBUG(D_WARNING,
+ "%s: version recovery fails, reconnecting\n",
+ req->rq_import->imp_obd->obd_name);
+ } else {
+ CDEBUG(D_HA, "%s: LAST_REPLAY message error: %d, "
+ "reconnecting\n",
+ req->rq_import->imp_obd->obd_name,
+ req->rq_status);
+ }
+ ptlrpc_connect_import(req->rq_import);
+ }
+
+ return 0;
+}
+
+/**
+ * Let server know that we have no requests to replay anymore.
+ * Achieved by just sending a PING request
+ */
+static int signal_completed_replay(struct obd_import *imp)
+{
+ struct ptlrpc_request *req;
+
+ if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
+ return 0;
+
+ LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
+ atomic_inc(&imp->imp_replay_inflight);
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
+ OBD_PING);
+ if (req == NULL) {
+ atomic_dec(&imp->imp_replay_inflight);
+ return -ENOMEM;
+ }
+
+ ptlrpc_request_set_replen(req);
+ req->rq_send_state = LUSTRE_IMP_REPLAY_WAIT;
+ lustre_msg_add_flags(req->rq_reqmsg,
+ MSG_LOCK_REPLAY_DONE | MSG_REQ_REPLAY_DONE);
+ if (AT_OFF)
+ req->rq_timeout *= 3;
+ req->rq_interpret_reply = completed_replay_interpret;
+
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+ return 0;
+}
+
+/**
+ * In kernel code all import invalidation happens in its own
+ * separate thread, so that whatever application happened to encounter
+ * a problem could still be killed or otherwise continue
+ */
+static int ptlrpc_invalidate_import_thread(void *data)
+{
+ struct obd_import *imp = data;
+
+ unshare_fs_struct();
+
+ CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
+ imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid);
+
+ ptlrpc_invalidate_import(imp);
+
+ if (obd_dump_on_eviction) {
+ CERROR("dump the log upon eviction\n");
+ libcfs_debug_dumplog();
+ }
+
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
+ ptlrpc_import_recovery_state_machine(imp);
+
+ class_import_put(imp);
+ return 0;
+}
+
+/**
+ * This is the state machine for client-side recovery on import.
+ *
+ * Typicaly we have two possibly paths. If we came to server and it is not
+ * in recovery, we just enter IMP_EVICTED state, invalidate our import
+ * state and reconnect from scratch.
+ * If we came to server that is in recovery, we enter IMP_REPLAY import state.
+ * We go through our list of requests to replay and send them to server one by
+ * one.
+ * After sending all request from the list we change import state to
+ * IMP_REPLAY_LOCKS and re-request all the locks we believe we have from server
+ * and also all the locks we don't yet have and wait for server to grant us.
+ * After that we send a special "replay completed" request and change import
+ * state to IMP_REPLAY_WAIT.
+ * Upon receiving reply to that "replay completed" RPC we enter IMP_RECOVER
+ * state and resend all requests from sending list.
+ * After that we promote import to FULL state and send all delayed requests
+ * and import is fully operational after that.
+ *
+ */
+int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
+{
+ int rc = 0;
+ int inflight;
+ char *target_start;
+ int target_len;
+
+ if (imp->imp_state == LUSTRE_IMP_EVICTED) {
+ deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
+ &target_start, &target_len);
+ /* Don't care about MGC eviction */
+ if (strcmp(imp->imp_obd->obd_type->typ_name,
+ LUSTRE_MGC_NAME) != 0) {
+ LCONSOLE_ERROR_MSG(0x167, "%s: This client was evicted "
+ "by %.*s; in progress operations "
+ "using this service will fail.\n",
+ imp->imp_obd->obd_name, target_len,
+ target_start);
+ }
+ CDEBUG(D_HA, "evicted from %s@%s; invalidating\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid);
+ /* reset vbr_failed flag upon eviction */
+ spin_lock(&imp->imp_lock);
+ imp->imp_vbr_failed = 0;
+ spin_unlock(&imp->imp_lock);
+
+ {
+ struct task_struct *task;
+ /* bug 17802: XXX client_disconnect_export vs connect request
+ * race. if client will evicted at this time, we start
+ * invalidate thread without reference to import and import can
+ * be freed at same time. */
+ class_import_get(imp);
+ task = kthread_run(ptlrpc_invalidate_import_thread, imp,
+ "ll_imp_inval");
+ if (IS_ERR(task)) {
+ class_import_put(imp);
+ CERROR("error starting invalidate thread: %d\n", rc);
+ rc = PTR_ERR(task);
+ } else {
+ rc = 0;
+ }
+ return rc;
+ }
+ }
+
+ if (imp->imp_state == LUSTRE_IMP_REPLAY) {
+ CDEBUG(D_HA, "replay requested by %s\n",
+ obd2cli_tgt(imp->imp_obd));
+ rc = ptlrpc_replay_next(imp, &inflight);
+ if (inflight == 0 &&
+ atomic_read(&imp->imp_replay_inflight) == 0) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
+ rc = ldlm_replay_locks(imp);
+ if (rc)
+ GOTO(out, rc);
+ }
+ rc = 0;
+ }
+
+ if (imp->imp_state == LUSTRE_IMP_REPLAY_LOCKS) {
+ if (atomic_read(&imp->imp_replay_inflight) == 0) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_WAIT);
+ rc = signal_completed_replay(imp);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ }
+
+ if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
+ if (atomic_read(&imp->imp_replay_inflight) == 0) {
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_RECOVER);
+ }
+ }
+
+ if (imp->imp_state == LUSTRE_IMP_RECOVER) {
+ CDEBUG(D_HA, "reconnected to %s@%s\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid);
+
+ rc = ptlrpc_resend(imp);
+ if (rc)
+ GOTO(out, rc);
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
+ ptlrpc_activate_import(imp);
+
+ deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
+ &target_start, &target_len);
+ LCONSOLE_INFO("%s: Connection restored to %.*s (at %s)\n",
+ imp->imp_obd->obd_name,
+ target_len, target_start,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid));
+ }
+
+ if (imp->imp_state == LUSTRE_IMP_FULL) {
+ wake_up_all(&imp->imp_recovery_waitq);
+ ptlrpc_wake_delayed(imp);
+ }
+
+out:
+ return rc;
+}
+
+int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
+{
+ struct ptlrpc_request *req;
+ int rq_opc, rc = 0;
+ int nowait = imp->imp_obd->obd_force;
+
+ if (nowait)
+ GOTO(set_state, rc);
+
+ switch (imp->imp_connect_op) {
+ case OST_CONNECT: rq_opc = OST_DISCONNECT; break;
+ case MDS_CONNECT: rq_opc = MDS_DISCONNECT; break;
+ case MGS_CONNECT: rq_opc = MGS_DISCONNECT; break;
+ default:
+ CERROR("don't know how to disconnect from %s (connect_op %d)\n",
+ obd2cli_tgt(imp->imp_obd), imp->imp_connect_op);
+ return -EINVAL;
+ }
+
+ if (ptlrpc_import_in_recovery(imp)) {
+ struct l_wait_info lwi;
+ cfs_duration_t timeout;
+
+
+ if (AT_OFF) {
+ if (imp->imp_server_timeout)
+ timeout = cfs_time_seconds(obd_timeout / 2);
+ else
+ timeout = cfs_time_seconds(obd_timeout);
+ } else {
+ int idx = import_at_get_index(imp,
+ imp->imp_client->cli_request_portal);
+ timeout = cfs_time_seconds(
+ at_get(&imp->imp_at.iat_service_estimate[idx]));
+ }
+
+ lwi = LWI_TIMEOUT_INTR(cfs_timeout_cap(timeout),
+ back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
+ rc = l_wait_event(imp->imp_recovery_waitq,
+ !ptlrpc_import_in_recovery(imp), &lwi);
+
+ }
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state != LUSTRE_IMP_FULL)
+ GOTO(out, 0);
+
+ spin_unlock(&imp->imp_lock);
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
+ LUSTRE_OBD_VERSION, rq_opc);
+ if (req) {
+ /* We are disconnecting, do not retry a failed DISCONNECT rpc if
+ * it fails. We can get through the above with a down server
+ * if the client doesn't know the server is gone yet. */
+ req->rq_no_resend = 1;
+
+ /* We want client umounts to happen quickly, no matter the
+ server state... */
+ req->rq_timeout = min_t(int, req->rq_timeout,
+ INITIAL_CONNECT_TIMEOUT);
+
+ IMPORT_SET_STATE(imp, LUSTRE_IMP_CONNECTING);
+ req->rq_send_state = LUSTRE_IMP_CONNECTING;
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ ptlrpc_req_finished(req);
+ }
+
+set_state:
+ spin_lock(&imp->imp_lock);
+out:
+ if (noclose)
+ IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
+ else
+ IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
+ memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
+ spin_unlock(&imp->imp_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_disconnect_import);
+
+void ptlrpc_cleanup_imp(struct obd_import *imp)
+{
+ spin_lock(&imp->imp_lock);
+ IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
+ imp->imp_generation++;
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_abort_inflight(imp);
+}
+EXPORT_SYMBOL(ptlrpc_cleanup_imp);
+
+/* Adaptive Timeout utils */
+extern unsigned int at_min, at_max, at_history;
+
+/* Bin into timeslices using AT_BINS bins.
+ This gives us a max of the last binlimit*AT_BINS secs without the storage,
+ but still smoothing out a return to normalcy from a slow response.
+ (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
+int at_measured(struct adaptive_timeout *at, unsigned int val)
+{
+ unsigned int old = at->at_current;
+ time_t now = cfs_time_current_sec();
+ time_t binlimit = max_t(time_t, at_history / AT_BINS, 1);
+
+ LASSERT(at);
+ CDEBUG(D_OTHER, "add %u to %p time=%lu v=%u (%u %u %u %u)\n",
+ val, at, now - at->at_binstart, at->at_current,
+ at->at_hist[0], at->at_hist[1], at->at_hist[2], at->at_hist[3]);
+
+ if (val == 0)
+ /* 0's don't count, because we never want our timeout to
+ drop to 0, and because 0 could mean an error */
+ return 0;
+
+ spin_lock(&at->at_lock);
+
+ if (unlikely(at->at_binstart == 0)) {
+ /* Special case to remove default from history */
+ at->at_current = val;
+ at->at_worst_ever = val;
+ at->at_worst_time = now;
+ at->at_hist[0] = val;
+ at->at_binstart = now;
+ } else if (now - at->at_binstart < binlimit ) {
+ /* in bin 0 */
+ at->at_hist[0] = max(val, at->at_hist[0]);
+ at->at_current = max(val, at->at_current);
+ } else {
+ int i, shift;
+ unsigned int maxv = val;
+ /* move bins over */
+ shift = (now - at->at_binstart) / binlimit;
+ LASSERT(shift > 0);
+ for(i = AT_BINS - 1; i >= 0; i--) {
+ if (i >= shift) {
+ at->at_hist[i] = at->at_hist[i - shift];
+ maxv = max(maxv, at->at_hist[i]);
+ } else {
+ at->at_hist[i] = 0;
+ }
+ }
+ at->at_hist[0] = val;
+ at->at_current = maxv;
+ at->at_binstart += shift * binlimit;
+ }
+
+ if (at->at_current > at->at_worst_ever) {
+ at->at_worst_ever = at->at_current;
+ at->at_worst_time = now;
+ }
+
+ if (at->at_flags & AT_FLG_NOHIST)
+ /* Only keep last reported val; keeping the rest of the history
+ for proc only */
+ at->at_current = val;
+
+ if (at_max > 0)
+ at->at_current = min(at->at_current, at_max);
+ at->at_current = max(at->at_current, at_min);
+
+ if (at->at_current != old)
+ CDEBUG(D_OTHER, "AT %p change: old=%u new=%u delta=%d "
+ "(val=%u) hist %u %u %u %u\n", at,
+ old, at->at_current, at->at_current - old, val,
+ at->at_hist[0], at->at_hist[1], at->at_hist[2],
+ at->at_hist[3]);
+
+ /* if we changed, report the old value */
+ old = (at->at_current != old) ? old : 0;
+
+ spin_unlock(&at->at_lock);
+ return old;
+}
+
+/* Find the imp_at index for a given portal; assign if space available */
+int import_at_get_index(struct obd_import *imp, int portal)
+{
+ struct imp_at *at = &imp->imp_at;
+ int i;
+
+ for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
+ if (at->iat_portal[i] == portal)
+ return i;
+ if (at->iat_portal[i] == 0)
+ /* unused */
+ break;
+ }
+
+ /* Not found in list, add it under a lock */
+ spin_lock(&imp->imp_lock);
+
+ /* Check unused under lock */
+ for (; i < IMP_AT_MAX_PORTALS; i++) {
+ if (at->iat_portal[i] == portal)
+ goto out;
+ if (at->iat_portal[i] == 0)
+ /* unused */
+ break;
+ }
+
+ /* Not enough portals? */
+ LASSERT(i < IMP_AT_MAX_PORTALS);
+
+ at->iat_portal[i] = portal;
+out:
+ spin_unlock(&imp->imp_lock);
+ return i;
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
new file mode 100644
index 0000000..2f55ce2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -0,0 +1,2396 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/layout.c
+ *
+ * Lustre Metadata Target (mdt) request handler
+ *
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ */
+/*
+ * This file contains the "capsule/pill" abstraction layered above PTLRPC.
+ *
+ * Every struct ptlrpc_request contains a "pill", which points to a description
+ * of the format that the request conforms to.
+ */
+
+#if !defined(__REQ_LAYOUT_USER__)
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+#include <linux/module.h>
+
+/* LUSTRE_VERSION_CODE */
+#include <lustre_ver.h>
+
+#include <obd_support.h>
+/* lustre_swab_mdt_body */
+#include <lustre/lustre_idl.h>
+/* obd2cli_tgt() (required by DEBUG_REQ()) */
+#include <obd.h>
+
+/* __REQ_LAYOUT_USER__ */
+#endif
+/* struct ptlrpc_request, lustre_msg* */
+#include <lustre_req_layout.h>
+#include <lustre_update.h>
+#include <lustre_acl.h>
+#include <lustre_debug.h>
+
+/*
+ * RQFs (see below) refer to two struct req_msg_field arrays describing the
+ * client request and server reply, respectively.
+ */
+/* empty set of fields... for suitable definition of emptiness. */
+static const struct req_msg_field *empty[] = {
+ &RMF_PTLRPC_BODY
+};
+
+static const struct req_msg_field *mgs_target_info_only[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MGS_TARGET_INFO
+};
+
+static const struct req_msg_field *mgs_set_info[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MGS_SEND_PARAM
+};
+
+static const struct req_msg_field *mgs_config_read_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MGS_CONFIG_BODY
+};
+
+static const struct req_msg_field *mgs_config_read_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MGS_CONFIG_RES
+};
+
+static const struct req_msg_field *log_cancel_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_LOGCOOKIES
+};
+
+static const struct req_msg_field *mdt_body_only[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY
+};
+
+static const struct req_msg_field *mdt_body_capa[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_CAPA1
+};
+
+static const struct req_msg_field *quotactl_only[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_OBD_QUOTACTL
+};
+
+static const struct req_msg_field *quota_body_only[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_QUOTA_BODY
+};
+
+static const struct req_msg_field *ldlm_intent_quota_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+ &RMF_QUOTA_BODY
+};
+
+static const struct req_msg_field *ldlm_intent_quota_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REP,
+ &RMF_DLM_LVB,
+ &RMF_QUOTA_BODY
+};
+
+static const struct req_msg_field *mdt_close_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_EPOCH,
+ &RMF_REC_REINT,
+ &RMF_CAPA1
+};
+
+static const struct req_msg_field *obd_statfs_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_OBD_STATFS
+};
+
+static const struct req_msg_field *seq_query_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_SEQ_OPC,
+ &RMF_SEQ_RANGE
+};
+
+static const struct req_msg_field *seq_query_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_SEQ_RANGE
+};
+
+static const struct req_msg_field *fld_query_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_FLD_OPC,
+ &RMF_FLD_MDFLD
+};
+
+static const struct req_msg_field *fld_query_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_FLD_MDFLD
+};
+
+static const struct req_msg_field *mds_getattr_name_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_CAPA1,
+ &RMF_NAME
+};
+
+static const struct req_msg_field *mds_reint_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT
+};
+
+static const struct req_msg_field *mds_reint_create_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_NAME
+};
+
+static const struct req_msg_field *mds_reint_create_slave_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_NAME,
+ &RMF_EADATA,
+ &RMF_DLM_REQ
+};
+
+static const struct req_msg_field *mds_reint_create_rmt_acl_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_NAME,
+ &RMF_EADATA,
+ &RMF_DLM_REQ
+};
+
+static const struct req_msg_field *mds_reint_create_sym_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_NAME,
+ &RMF_SYMTGT,
+ &RMF_DLM_REQ
+};
+
+static const struct req_msg_field *mds_reint_open_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_CAPA2,
+ &RMF_NAME,
+ &RMF_EADATA
+};
+
+static const struct req_msg_field *mds_reint_open_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_MDT_MD,
+ &RMF_ACL,
+ &RMF_CAPA1,
+ &RMF_CAPA2
+};
+
+static const struct req_msg_field *mds_reint_unlink_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_NAME,
+ &RMF_DLM_REQ
+};
+
+static const struct req_msg_field *mds_reint_link_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_CAPA2,
+ &RMF_NAME,
+ &RMF_DLM_REQ
+};
+
+static const struct req_msg_field *mds_reint_rename_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_CAPA2,
+ &RMF_NAME,
+ &RMF_SYMTGT,
+ &RMF_DLM_REQ
+};
+
+static const struct req_msg_field *mds_last_unlink_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_MDT_MD,
+ &RMF_LOGCOOKIES,
+ &RMF_CAPA1,
+ &RMF_CAPA2
+};
+
+static const struct req_msg_field *mds_reint_setattr_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_MDT_EPOCH,
+ &RMF_EADATA,
+ &RMF_LOGCOOKIES,
+ &RMF_DLM_REQ
+};
+
+static const struct req_msg_field *mds_reint_setxattr_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_REC_REINT,
+ &RMF_CAPA1,
+ &RMF_NAME,
+ &RMF_EADATA
+};
+
+static const struct req_msg_field *mdt_swap_layouts[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_SWAP_LAYOUTS,
+ &RMF_CAPA1,
+ &RMF_CAPA2,
+ &RMF_DLM_REQ
+};
+
+static const struct req_msg_field *obd_connect_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_TGTUUID,
+ &RMF_CLUUID,
+ &RMF_CONN,
+ &RMF_CONNECT_DATA
+};
+
+static const struct req_msg_field *obd_connect_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_CONNECT_DATA
+};
+
+static const struct req_msg_field *obd_set_info_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_SETINFO_KEY,
+ &RMF_SETINFO_VAL
+};
+
+static const struct req_msg_field *ost_grant_shrink_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_SETINFO_KEY,
+ &RMF_OST_BODY
+};
+
+static const struct req_msg_field *mds_getinfo_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_GETINFO_KEY,
+ &RMF_GETINFO_VALLEN
+};
+
+static const struct req_msg_field *mds_getinfo_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_GETINFO_VAL,
+};
+
+static const struct req_msg_field *ldlm_enqueue_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ
+};
+
+static const struct req_msg_field *ldlm_enqueue_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REP
+};
+
+static const struct req_msg_field *ldlm_enqueue_lvb_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REP,
+ &RMF_DLM_LVB
+};
+
+static const struct req_msg_field *ldlm_cp_callback_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_DLM_LVB
+};
+
+static const struct req_msg_field *ldlm_gl_callback_desc_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_DLM_GL_DESC
+};
+
+static const struct req_msg_field *ldlm_gl_callback_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_LVB
+};
+
+static const struct req_msg_field *ldlm_intent_basic_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+};
+
+static const struct req_msg_field *ldlm_intent_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+ &RMF_REC_REINT
+};
+
+static const struct req_msg_field *ldlm_intent_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REP,
+ &RMF_MDT_BODY,
+ &RMF_MDT_MD,
+ &RMF_ACL
+};
+
+static const struct req_msg_field *ldlm_intent_layout_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+ &RMF_LAYOUT_INTENT,
+ &RMF_EADATA /* for new layout to be set up */
+};
+static const struct req_msg_field *ldlm_intent_open_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REP,
+ &RMF_MDT_BODY,
+ &RMF_MDT_MD,
+ &RMF_ACL,
+ &RMF_CAPA1,
+ &RMF_CAPA2
+};
+
+static const struct req_msg_field *ldlm_intent_getattr_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+ &RMF_MDT_BODY, /* coincides with mds_getattr_name_client[] */
+ &RMF_CAPA1,
+ &RMF_NAME
+};
+
+static const struct req_msg_field *ldlm_intent_getattr_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REP,
+ &RMF_MDT_BODY,
+ &RMF_MDT_MD,
+ &RMF_ACL,
+ &RMF_CAPA1
+};
+
+static const struct req_msg_field *ldlm_intent_create_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+ &RMF_REC_REINT, /* coincides with mds_reint_create_client[] */
+ &RMF_CAPA1,
+ &RMF_NAME,
+ &RMF_EADATA
+};
+
+static const struct req_msg_field *ldlm_intent_open_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+ &RMF_REC_REINT, /* coincides with mds_reint_open_client[] */
+ &RMF_CAPA1,
+ &RMF_CAPA2,
+ &RMF_NAME,
+ &RMF_EADATA
+};
+
+static const struct req_msg_field *ldlm_intent_unlink_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_DLM_REQ,
+ &RMF_LDLM_INTENT,
+ &RMF_REC_REINT, /* coincides with mds_reint_unlink_client[] */
+ &RMF_CAPA1,
+ &RMF_NAME
+};
+
+static const struct req_msg_field *mds_getxattr_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_CAPA1,
+ &RMF_NAME,
+ &RMF_EADATA
+};
+
+static const struct req_msg_field *mds_getxattr_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_EADATA
+};
+
+static const struct req_msg_field *mds_getattr_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_MDT_MD,
+ &RMF_ACL,
+ &RMF_CAPA1,
+ &RMF_CAPA2
+};
+
+static const struct req_msg_field *mds_setattr_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_MDT_MD,
+ &RMF_ACL,
+ &RMF_CAPA1,
+ &RMF_CAPA2
+};
+
+static const struct req_msg_field *mds_update_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_UPDATE,
+};
+
+static const struct req_msg_field *mds_update_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_UPDATE_REPLY,
+};
+
+static const struct req_msg_field *llog_origin_handle_create_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_LLOGD_BODY,
+ &RMF_NAME
+};
+
+static const struct req_msg_field *llogd_body_only[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_LLOGD_BODY
+};
+
+static const struct req_msg_field *llog_log_hdr_only[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_LLOG_LOG_HDR
+};
+
+static const struct req_msg_field *llogd_conn_body_only[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_LLOGD_CONN_BODY
+};
+
+static const struct req_msg_field *llog_origin_handle_next_block_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_LLOGD_BODY,
+ &RMF_EADATA
+};
+
+static const struct req_msg_field *obd_idx_read_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_IDX_INFO
+};
+
+static const struct req_msg_field *obd_idx_read_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_IDX_INFO
+};
+
+static const struct req_msg_field *ost_body_only[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_OST_BODY
+};
+
+static const struct req_msg_field *ost_body_capa[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_OST_BODY,
+ &RMF_CAPA1
+};
+
+static const struct req_msg_field *ost_destroy_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_OST_BODY,
+ &RMF_DLM_REQ,
+ &RMF_CAPA1
+};
+
+
+static const struct req_msg_field *ost_brw_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_OST_BODY,
+ &RMF_OBD_IOOBJ,
+ &RMF_NIOBUF_REMOTE,
+ &RMF_CAPA1
+};
+
+static const struct req_msg_field *ost_brw_read_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_OST_BODY
+};
+
+static const struct req_msg_field *ost_brw_write_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_OST_BODY,
+ &RMF_RCS
+};
+
+static const struct req_msg_field *ost_get_info_generic_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_GENERIC_DATA,
+};
+
+static const struct req_msg_field *ost_get_info_generic_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_SETINFO_KEY
+};
+
+static const struct req_msg_field *ost_get_last_id_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_OBD_ID
+};
+
+static const struct req_msg_field *ost_get_last_fid_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_FID,
+};
+
+static const struct req_msg_field *ost_get_fiemap_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_FIEMAP_KEY,
+ &RMF_FIEMAP_VAL
+};
+
+static const struct req_msg_field *ost_get_fiemap_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_FIEMAP_VAL
+};
+
+static const struct req_msg_field *mdt_hsm_progress[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_MDS_HSM_PROGRESS,
+};
+
+static const struct req_msg_field *mdt_hsm_ct_register[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_MDS_HSM_ARCHIVE,
+};
+
+static const struct req_msg_field *mdt_hsm_ct_unregister[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+};
+
+static const struct req_msg_field *mdt_hsm_action_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_MDS_HSM_CURRENT_ACTION,
+};
+
+static const struct req_msg_field *mdt_hsm_state_get_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_HSM_USER_STATE,
+};
+
+static const struct req_msg_field *mdt_hsm_state_set[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_CAPA1,
+ &RMF_HSM_STATE_SET,
+};
+
+static const struct req_msg_field *mdt_hsm_request[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_MDT_BODY,
+ &RMF_MDS_HSM_REQUEST,
+ &RMF_MDS_HSM_USER_ITEM,
+ &RMF_GENERIC_DATA,
+};
+
+static struct req_format *req_formats[] = {
+ &RQF_OBD_PING,
+ &RQF_OBD_SET_INFO,
+ &RQF_OBD_IDX_READ,
+ &RQF_SEC_CTX,
+ &RQF_MGS_TARGET_REG,
+ &RQF_MGS_SET_INFO,
+ &RQF_MGS_CONFIG_READ,
+ &RQF_SEQ_QUERY,
+ &RQF_FLD_QUERY,
+ &RQF_MDS_CONNECT,
+ &RQF_MDS_DISCONNECT,
+ &RQF_MDS_GET_INFO,
+ &RQF_MDS_GETSTATUS,
+ &RQF_MDS_STATFS,
+ &RQF_MDS_GETATTR,
+ &RQF_MDS_GETATTR_NAME,
+ &RQF_MDS_GETXATTR,
+ &RQF_MDS_SYNC,
+ &RQF_MDS_CLOSE,
+ &RQF_MDS_PIN,
+ &RQF_MDS_UNPIN,
+ &RQF_MDS_READPAGE,
+ &RQF_MDS_WRITEPAGE,
+ &RQF_MDS_IS_SUBDIR,
+ &RQF_MDS_DONE_WRITING,
+ &RQF_MDS_REINT,
+ &RQF_MDS_REINT_CREATE,
+ &RQF_MDS_REINT_CREATE_RMT_ACL,
+ &RQF_MDS_REINT_CREATE_SLAVE,
+ &RQF_MDS_REINT_CREATE_SYM,
+ &RQF_MDS_REINT_OPEN,
+ &RQF_MDS_REINT_UNLINK,
+ &RQF_MDS_REINT_LINK,
+ &RQF_MDS_REINT_RENAME,
+ &RQF_MDS_REINT_SETATTR,
+ &RQF_MDS_REINT_SETXATTR,
+ &RQF_MDS_QUOTACHECK,
+ &RQF_MDS_QUOTACTL,
+ &RQF_MDS_HSM_PROGRESS,
+ &RQF_MDS_HSM_CT_REGISTER,
+ &RQF_MDS_HSM_CT_UNREGISTER,
+ &RQF_MDS_HSM_STATE_GET,
+ &RQF_MDS_HSM_STATE_SET,
+ &RQF_MDS_HSM_ACTION,
+ &RQF_MDS_HSM_REQUEST,
+ &RQF_MDS_SWAP_LAYOUTS,
+ &RQF_UPDATE_OBJ,
+ &RQF_QC_CALLBACK,
+ &RQF_OST_CONNECT,
+ &RQF_OST_DISCONNECT,
+ &RQF_OST_QUOTACHECK,
+ &RQF_OST_QUOTACTL,
+ &RQF_OST_GETATTR,
+ &RQF_OST_SETATTR,
+ &RQF_OST_CREATE,
+ &RQF_OST_PUNCH,
+ &RQF_OST_SYNC,
+ &RQF_OST_DESTROY,
+ &RQF_OST_BRW_READ,
+ &RQF_OST_BRW_WRITE,
+ &RQF_OST_STATFS,
+ &RQF_OST_SET_GRANT_INFO,
+ &RQF_OST_GET_INFO_GENERIC,
+ &RQF_OST_GET_INFO_LAST_ID,
+ &RQF_OST_GET_INFO_LAST_FID,
+ &RQF_OST_SET_INFO_LAST_FID,
+ &RQF_OST_GET_INFO_FIEMAP,
+ &RQF_LDLM_ENQUEUE,
+ &RQF_LDLM_ENQUEUE_LVB,
+ &RQF_LDLM_CONVERT,
+ &RQF_LDLM_CANCEL,
+ &RQF_LDLM_CALLBACK,
+ &RQF_LDLM_CP_CALLBACK,
+ &RQF_LDLM_BL_CALLBACK,
+ &RQF_LDLM_GL_CALLBACK,
+ &RQF_LDLM_GL_DESC_CALLBACK,
+ &RQF_LDLM_INTENT,
+ &RQF_LDLM_INTENT_BASIC,
+ &RQF_LDLM_INTENT_LAYOUT,
+ &RQF_LDLM_INTENT_GETATTR,
+ &RQF_LDLM_INTENT_OPEN,
+ &RQF_LDLM_INTENT_CREATE,
+ &RQF_LDLM_INTENT_UNLINK,
+ &RQF_LDLM_INTENT_QUOTA,
+ &RQF_QUOTA_DQACQ,
+ &RQF_LOG_CANCEL,
+ &RQF_LLOG_ORIGIN_HANDLE_CREATE,
+ &RQF_LLOG_ORIGIN_HANDLE_DESTROY,
+ &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
+ &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
+ &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
+ &RQF_LLOG_ORIGIN_CONNECT
+};
+
+struct req_msg_field {
+ const __u32 rmf_flags;
+ const char *rmf_name;
+ /**
+ * Field length. (-1) means "variable length". If the
+ * \a RMF_F_STRUCT_ARRAY flag is set the field is also variable-length,
+ * but the actual size must be a whole multiple of \a rmf_size.
+ */
+ const int rmf_size;
+ void (*rmf_swabber)(void *);
+ void (*rmf_dumper)(void *);
+ int rmf_offset[ARRAY_SIZE(req_formats)][RCL_NR];
+};
+
+enum rmf_flags {
+ /**
+ * The field is a string, must be NUL-terminated.
+ */
+ RMF_F_STRING = 1 << 0,
+ /**
+ * The field's buffer size need not match the declared \a rmf_size.
+ */
+ RMF_F_NO_SIZE_CHECK = 1 << 1,
+ /**
+ * The field's buffer size must be a whole multiple of the declared \a
+ * rmf_size and the \a rmf_swabber function must work on the declared \a
+ * rmf_size worth of bytes.
+ */
+ RMF_F_STRUCT_ARRAY = 1 << 2
+};
+
+struct req_capsule;
+
+/*
+ * Request fields.
+ */
+#define DEFINE_MSGF(name, flags, size, swabber, dumper) { \
+ .rmf_name = (name), \
+ .rmf_flags = (flags), \
+ .rmf_size = (size), \
+ .rmf_swabber = (void (*)(void*))(swabber), \
+ .rmf_dumper = (void (*)(void*))(dumper) \
+}
+
+struct req_msg_field RMF_GENERIC_DATA =
+ DEFINE_MSGF("generic_data", 0,
+ -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_GENERIC_DATA);
+
+struct req_msg_field RMF_MGS_TARGET_INFO =
+ DEFINE_MSGF("mgs_target_info", 0,
+ sizeof(struct mgs_target_info),
+ lustre_swab_mgs_target_info, NULL);
+EXPORT_SYMBOL(RMF_MGS_TARGET_INFO);
+
+struct req_msg_field RMF_MGS_SEND_PARAM =
+ DEFINE_MSGF("mgs_send_param", 0,
+ sizeof(struct mgs_send_param),
+ NULL, NULL);
+EXPORT_SYMBOL(RMF_MGS_SEND_PARAM);
+
+struct req_msg_field RMF_MGS_CONFIG_BODY =
+ DEFINE_MSGF("mgs_config_read request", 0,
+ sizeof(struct mgs_config_body),
+ lustre_swab_mgs_config_body, NULL);
+EXPORT_SYMBOL(RMF_MGS_CONFIG_BODY);
+
+struct req_msg_field RMF_MGS_CONFIG_RES =
+ DEFINE_MSGF("mgs_config_read reply ", 0,
+ sizeof(struct mgs_config_res),
+ lustre_swab_mgs_config_res, NULL);
+EXPORT_SYMBOL(RMF_MGS_CONFIG_RES);
+
+struct req_msg_field RMF_U32 =
+ DEFINE_MSGF("generic u32", 0,
+ sizeof(__u32), lustre_swab_generic_32s, NULL);
+EXPORT_SYMBOL(RMF_U32);
+
+struct req_msg_field RMF_SETINFO_VAL =
+ DEFINE_MSGF("setinfo_val", 0, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_SETINFO_VAL);
+
+struct req_msg_field RMF_GETINFO_KEY =
+ DEFINE_MSGF("getinfo_key", 0, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_GETINFO_KEY);
+
+struct req_msg_field RMF_GETINFO_VALLEN =
+ DEFINE_MSGF("getinfo_vallen", 0,
+ sizeof(__u32), lustre_swab_generic_32s, NULL);
+EXPORT_SYMBOL(RMF_GETINFO_VALLEN);
+
+struct req_msg_field RMF_GETINFO_VAL =
+ DEFINE_MSGF("getinfo_val", 0, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_GETINFO_VAL);
+
+struct req_msg_field RMF_SEQ_OPC =
+ DEFINE_MSGF("seq_query_opc", 0,
+ sizeof(__u32), lustre_swab_generic_32s, NULL);
+EXPORT_SYMBOL(RMF_SEQ_OPC);
+
+struct req_msg_field RMF_SEQ_RANGE =
+ DEFINE_MSGF("seq_query_range", 0,
+ sizeof(struct lu_seq_range),
+ lustre_swab_lu_seq_range, NULL);
+EXPORT_SYMBOL(RMF_SEQ_RANGE);
+
+struct req_msg_field RMF_FLD_OPC =
+ DEFINE_MSGF("fld_query_opc", 0,
+ sizeof(__u32), lustre_swab_generic_32s, NULL);
+EXPORT_SYMBOL(RMF_FLD_OPC);
+
+struct req_msg_field RMF_FLD_MDFLD =
+ DEFINE_MSGF("fld_query_mdfld", 0,
+ sizeof(struct lu_seq_range),
+ lustre_swab_lu_seq_range, NULL);
+EXPORT_SYMBOL(RMF_FLD_MDFLD);
+
+struct req_msg_field RMF_MDT_BODY =
+ DEFINE_MSGF("mdt_body", 0,
+ sizeof(struct mdt_body), lustre_swab_mdt_body, NULL);
+EXPORT_SYMBOL(RMF_MDT_BODY);
+
+struct req_msg_field RMF_OBD_QUOTACTL =
+ DEFINE_MSGF("obd_quotactl", 0,
+ sizeof(struct obd_quotactl),
+ lustre_swab_obd_quotactl, NULL);
+EXPORT_SYMBOL(RMF_OBD_QUOTACTL);
+
+struct req_msg_field RMF_QUOTA_BODY =
+ DEFINE_MSGF("quota_body", 0,
+ sizeof(struct quota_body), lustre_swab_quota_body, NULL);
+EXPORT_SYMBOL(RMF_QUOTA_BODY);
+
+struct req_msg_field RMF_MDT_EPOCH =
+ DEFINE_MSGF("mdt_ioepoch", 0,
+ sizeof(struct mdt_ioepoch), lustre_swab_mdt_ioepoch, NULL);
+EXPORT_SYMBOL(RMF_MDT_EPOCH);
+
+struct req_msg_field RMF_PTLRPC_BODY =
+ DEFINE_MSGF("ptlrpc_body", 0,
+ sizeof(struct ptlrpc_body), lustre_swab_ptlrpc_body, NULL);
+EXPORT_SYMBOL(RMF_PTLRPC_BODY);
+
+struct req_msg_field RMF_OBD_STATFS =
+ DEFINE_MSGF("obd_statfs", 0,
+ sizeof(struct obd_statfs), lustre_swab_obd_statfs, NULL);
+EXPORT_SYMBOL(RMF_OBD_STATFS);
+
+struct req_msg_field RMF_SETINFO_KEY =
+ DEFINE_MSGF("setinfo_key", 0, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_SETINFO_KEY);
+
+struct req_msg_field RMF_NAME =
+ DEFINE_MSGF("name", RMF_F_STRING, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_NAME);
+
+struct req_msg_field RMF_SYMTGT =
+ DEFINE_MSGF("symtgt", RMF_F_STRING, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_SYMTGT);
+
+struct req_msg_field RMF_TGTUUID =
+ DEFINE_MSGF("tgtuuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
+ NULL);
+EXPORT_SYMBOL(RMF_TGTUUID);
+
+struct req_msg_field RMF_CLUUID =
+ DEFINE_MSGF("cluuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
+ NULL);
+EXPORT_SYMBOL(RMF_CLUUID);
+
+struct req_msg_field RMF_STRING =
+ DEFINE_MSGF("string", RMF_F_STRING, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_STRING);
+
+struct req_msg_field RMF_LLOGD_BODY =
+ DEFINE_MSGF("llogd_body", 0,
+ sizeof(struct llogd_body), lustre_swab_llogd_body, NULL);
+EXPORT_SYMBOL(RMF_LLOGD_BODY);
+
+struct req_msg_field RMF_LLOG_LOG_HDR =
+ DEFINE_MSGF("llog_log_hdr", 0,
+ sizeof(struct llog_log_hdr), lustre_swab_llog_hdr, NULL);
+EXPORT_SYMBOL(RMF_LLOG_LOG_HDR);
+
+struct req_msg_field RMF_LLOGD_CONN_BODY =
+ DEFINE_MSGF("llogd_conn_body", 0,
+ sizeof(struct llogd_conn_body),
+ lustre_swab_llogd_conn_body, NULL);
+EXPORT_SYMBOL(RMF_LLOGD_CONN_BODY);
+
+/*
+ * connection handle received in MDS_CONNECT request.
+ *
+ * No swabbing needed because struct lustre_handle contains only a 64-bit cookie
+ * that the client does not interpret at all.
+ */
+struct req_msg_field RMF_CONN =
+ DEFINE_MSGF("conn", 0, sizeof(struct lustre_handle), NULL, NULL);
+EXPORT_SYMBOL(RMF_CONN);
+
+struct req_msg_field RMF_CONNECT_DATA =
+ DEFINE_MSGF("cdata",
+ RMF_F_NO_SIZE_CHECK /* we allow extra space for interop */,
+#if LUSTRE_VERSION_CODE > OBD_OCD_VERSION(2, 7, 50, 0)
+ sizeof(struct obd_connect_data),
+#else
+/* For interoperability with 1.8 and 2.0 clients/servers.
+ * The RPC verification code allows larger RPC buffers, but not
+ * smaller buffers. Until we no longer need to keep compatibility
+ * with older servers/clients we can only check that the buffer
+ * size is at least as large as obd_connect_data_v1. That is not
+ * not in itself harmful, since the chance of just corrupting this
+ * field is low. See JIRA LU-16 for details. */
+ sizeof(struct obd_connect_data_v1),
+#endif
+ lustre_swab_connect, NULL);
+EXPORT_SYMBOL(RMF_CONNECT_DATA);
+
+struct req_msg_field RMF_DLM_REQ =
+ DEFINE_MSGF("dlm_req", RMF_F_NO_SIZE_CHECK /* ldlm_request_bufsize */,
+ sizeof(struct ldlm_request),
+ lustre_swab_ldlm_request, NULL);
+EXPORT_SYMBOL(RMF_DLM_REQ);
+
+struct req_msg_field RMF_DLM_REP =
+ DEFINE_MSGF("dlm_rep", 0,
+ sizeof(struct ldlm_reply), lustre_swab_ldlm_reply, NULL);
+EXPORT_SYMBOL(RMF_DLM_REP);
+
+struct req_msg_field RMF_LDLM_INTENT =
+ DEFINE_MSGF("ldlm_intent", 0,
+ sizeof(struct ldlm_intent), lustre_swab_ldlm_intent, NULL);
+EXPORT_SYMBOL(RMF_LDLM_INTENT);
+
+struct req_msg_field RMF_DLM_LVB =
+ DEFINE_MSGF("dlm_lvb", 0, -1, NULL, NULL);
+EXPORT_SYMBOL(RMF_DLM_LVB);
+
+struct req_msg_field RMF_DLM_GL_DESC =
+ DEFINE_MSGF("dlm_gl_desc", 0, sizeof(union ldlm_gl_desc),
+ lustre_swab_gl_desc, NULL);
+EXPORT_SYMBOL(RMF_DLM_GL_DESC);
+
+struct req_msg_field RMF_MDT_MD =
+ DEFINE_MSGF("mdt_md", RMF_F_NO_SIZE_CHECK, MIN_MD_SIZE, NULL, NULL);
+EXPORT_SYMBOL(RMF_MDT_MD);
+
+struct req_msg_field RMF_REC_REINT =
+ DEFINE_MSGF("rec_reint", 0, sizeof(struct mdt_rec_reint),
+ lustre_swab_mdt_rec_reint, NULL);
+EXPORT_SYMBOL(RMF_REC_REINT);
+
+/* FIXME: this length should be defined as a macro */
+struct req_msg_field RMF_EADATA = DEFINE_MSGF("eadata", 0, -1,
+ NULL, NULL);
+EXPORT_SYMBOL(RMF_EADATA);
+
+struct req_msg_field RMF_ACL =
+ DEFINE_MSGF("acl", RMF_F_NO_SIZE_CHECK,
+ LUSTRE_POSIX_ACL_MAX_SIZE, NULL, NULL);
+EXPORT_SYMBOL(RMF_ACL);
+
+/* FIXME: this should be made to use RMF_F_STRUCT_ARRAY */
+struct req_msg_field RMF_LOGCOOKIES =
+ DEFINE_MSGF("logcookies", RMF_F_NO_SIZE_CHECK /* multiple cookies */,
+ sizeof(struct llog_cookie), NULL, NULL);
+EXPORT_SYMBOL(RMF_LOGCOOKIES);
+
+struct req_msg_field RMF_CAPA1 =
+ DEFINE_MSGF("capa", 0, sizeof(struct lustre_capa),
+ lustre_swab_lustre_capa, NULL);
+EXPORT_SYMBOL(RMF_CAPA1);
+
+struct req_msg_field RMF_CAPA2 =
+ DEFINE_MSGF("capa", 0, sizeof(struct lustre_capa),
+ lustre_swab_lustre_capa, NULL);
+EXPORT_SYMBOL(RMF_CAPA2);
+
+struct req_msg_field RMF_LAYOUT_INTENT =
+ DEFINE_MSGF("layout_intent", 0,
+ sizeof(struct layout_intent), lustre_swab_layout_intent,
+ NULL);
+EXPORT_SYMBOL(RMF_LAYOUT_INTENT);
+
+/*
+ * OST request field.
+ */
+struct req_msg_field RMF_OST_BODY =
+ DEFINE_MSGF("ost_body", 0,
+ sizeof(struct ost_body), lustre_swab_ost_body, dump_ost_body);
+EXPORT_SYMBOL(RMF_OST_BODY);
+
+struct req_msg_field RMF_OBD_IOOBJ =
+ DEFINE_MSGF("obd_ioobj", RMF_F_STRUCT_ARRAY,
+ sizeof(struct obd_ioobj), lustre_swab_obd_ioobj, dump_ioo);
+EXPORT_SYMBOL(RMF_OBD_IOOBJ);
+
+struct req_msg_field RMF_NIOBUF_REMOTE =
+ DEFINE_MSGF("niobuf_remote", RMF_F_STRUCT_ARRAY,
+ sizeof(struct niobuf_remote), lustre_swab_niobuf_remote,
+ dump_rniobuf);
+EXPORT_SYMBOL(RMF_NIOBUF_REMOTE);
+
+struct req_msg_field RMF_RCS =
+ DEFINE_MSGF("niobuf_remote", RMF_F_STRUCT_ARRAY, sizeof(__u32),
+ lustre_swab_generic_32s, dump_rcs);
+EXPORT_SYMBOL(RMF_RCS);
+
+struct req_msg_field RMF_OBD_ID =
+ DEFINE_MSGF("obd_id", 0,
+ sizeof(obd_id), lustre_swab_ost_last_id, NULL);
+EXPORT_SYMBOL(RMF_OBD_ID);
+
+struct req_msg_field RMF_FID =
+ DEFINE_MSGF("fid", 0,
+ sizeof(struct lu_fid), lustre_swab_lu_fid, NULL);
+EXPORT_SYMBOL(RMF_FID);
+
+struct req_msg_field RMF_OST_ID =
+ DEFINE_MSGF("ost_id", 0,
+ sizeof(struct ost_id), lustre_swab_ost_id, NULL);
+EXPORT_SYMBOL(RMF_OST_ID);
+
+struct req_msg_field RMF_FIEMAP_KEY =
+ DEFINE_MSGF("fiemap", 0, sizeof(struct ll_fiemap_info_key),
+ lustre_swab_fiemap, NULL);
+EXPORT_SYMBOL(RMF_FIEMAP_KEY);
+
+struct req_msg_field RMF_FIEMAP_VAL =
+ DEFINE_MSGF("fiemap", 0, -1, lustre_swab_fiemap, NULL);
+EXPORT_SYMBOL(RMF_FIEMAP_VAL);
+
+struct req_msg_field RMF_IDX_INFO =
+ DEFINE_MSGF("idx_info", 0, sizeof(struct idx_info),
+ lustre_swab_idx_info, NULL);
+EXPORT_SYMBOL(RMF_IDX_INFO);
+struct req_msg_field RMF_HSM_USER_STATE =
+ DEFINE_MSGF("hsm_user_state", 0, sizeof(struct hsm_user_state),
+ lustre_swab_hsm_user_state, NULL);
+EXPORT_SYMBOL(RMF_HSM_USER_STATE);
+
+struct req_msg_field RMF_HSM_STATE_SET =
+ DEFINE_MSGF("hsm_state_set", 0, sizeof(struct hsm_state_set),
+ lustre_swab_hsm_state_set, NULL);
+EXPORT_SYMBOL(RMF_HSM_STATE_SET);
+
+struct req_msg_field RMF_MDS_HSM_PROGRESS =
+ DEFINE_MSGF("hsm_progress", 0, sizeof(struct hsm_progress_kernel),
+ lustre_swab_hsm_progress_kernel, NULL);
+EXPORT_SYMBOL(RMF_MDS_HSM_PROGRESS);
+
+struct req_msg_field RMF_MDS_HSM_CURRENT_ACTION =
+ DEFINE_MSGF("hsm_current_action", 0, sizeof(struct hsm_current_action),
+ lustre_swab_hsm_current_action, NULL);
+EXPORT_SYMBOL(RMF_MDS_HSM_CURRENT_ACTION);
+
+struct req_msg_field RMF_MDS_HSM_USER_ITEM =
+ DEFINE_MSGF("hsm_user_item", RMF_F_STRUCT_ARRAY,
+ sizeof(struct hsm_user_item), lustre_swab_hsm_user_item,
+ NULL);
+EXPORT_SYMBOL(RMF_MDS_HSM_USER_ITEM);
+
+struct req_msg_field RMF_MDS_HSM_ARCHIVE =
+ DEFINE_MSGF("hsm_archive", 0,
+ sizeof(__u32), lustre_swab_generic_32s, NULL);
+EXPORT_SYMBOL(RMF_MDS_HSM_ARCHIVE);
+
+struct req_msg_field RMF_MDS_HSM_REQUEST =
+ DEFINE_MSGF("hsm_request", 0, sizeof(struct hsm_request),
+ lustre_swab_hsm_request, NULL);
+EXPORT_SYMBOL(RMF_MDS_HSM_REQUEST);
+
+struct req_msg_field RMF_UPDATE = DEFINE_MSGF("update", 0, -1,
+ lustre_swab_update_buf, NULL);
+EXPORT_SYMBOL(RMF_UPDATE);
+
+struct req_msg_field RMF_UPDATE_REPLY = DEFINE_MSGF("update_reply", 0, -1,
+ lustre_swab_update_reply_buf,
+ NULL);
+EXPORT_SYMBOL(RMF_UPDATE_REPLY);
+
+struct req_msg_field RMF_SWAP_LAYOUTS =
+ DEFINE_MSGF("swap_layouts", 0, sizeof(struct mdc_swap_layouts),
+ lustre_swab_swap_layouts, NULL);
+EXPORT_SYMBOL(RMF_SWAP_LAYOUTS);
+/*
+ * Request formats.
+ */
+
+struct req_format {
+ const char *rf_name;
+ int rf_idx;
+ struct {
+ int nr;
+ const struct req_msg_field **d;
+ } rf_fields[RCL_NR];
+};
+
+#define DEFINE_REQ_FMT(name, client, client_nr, server, server_nr) { \
+ .rf_name = name, \
+ .rf_fields = { \
+ [RCL_CLIENT] = { \
+ .nr = client_nr, \
+ .d = client \
+ }, \
+ [RCL_SERVER] = { \
+ .nr = server_nr, \
+ .d = server \
+ } \
+ } \
+}
+
+#define DEFINE_REQ_FMT0(name, client, server) \
+DEFINE_REQ_FMT(name, client, ARRAY_SIZE(client), server, ARRAY_SIZE(server))
+
+struct req_format RQF_OBD_PING =
+ DEFINE_REQ_FMT0("OBD_PING", empty, empty);
+EXPORT_SYMBOL(RQF_OBD_PING);
+
+struct req_format RQF_OBD_SET_INFO =
+ DEFINE_REQ_FMT0("OBD_SET_INFO", obd_set_info_client, empty);
+EXPORT_SYMBOL(RQF_OBD_SET_INFO);
+
+/* Read index file through the network */
+struct req_format RQF_OBD_IDX_READ =
+ DEFINE_REQ_FMT0("OBD_IDX_READ",
+ obd_idx_read_client, obd_idx_read_server);
+EXPORT_SYMBOL(RQF_OBD_IDX_READ);
+
+struct req_format RQF_SEC_CTX =
+ DEFINE_REQ_FMT0("SEC_CTX", empty, empty);
+EXPORT_SYMBOL(RQF_SEC_CTX);
+
+struct req_format RQF_MGS_TARGET_REG =
+ DEFINE_REQ_FMT0("MGS_TARGET_REG", mgs_target_info_only,
+ mgs_target_info_only);
+EXPORT_SYMBOL(RQF_MGS_TARGET_REG);
+
+struct req_format RQF_MGS_SET_INFO =
+ DEFINE_REQ_FMT0("MGS_SET_INFO", mgs_set_info,
+ mgs_set_info);
+EXPORT_SYMBOL(RQF_MGS_SET_INFO);
+
+struct req_format RQF_MGS_CONFIG_READ =
+ DEFINE_REQ_FMT0("MGS_CONFIG_READ", mgs_config_read_client,
+ mgs_config_read_server);
+EXPORT_SYMBOL(RQF_MGS_CONFIG_READ);
+
+struct req_format RQF_SEQ_QUERY =
+ DEFINE_REQ_FMT0("SEQ_QUERY", seq_query_client, seq_query_server);
+EXPORT_SYMBOL(RQF_SEQ_QUERY);
+
+struct req_format RQF_FLD_QUERY =
+ DEFINE_REQ_FMT0("FLD_QUERY", fld_query_client, fld_query_server);
+EXPORT_SYMBOL(RQF_FLD_QUERY);
+
+struct req_format RQF_LOG_CANCEL =
+ DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
+EXPORT_SYMBOL(RQF_LOG_CANCEL);
+
+struct req_format RQF_MDS_QUOTACHECK =
+ DEFINE_REQ_FMT0("MDS_QUOTACHECK", quotactl_only, empty);
+EXPORT_SYMBOL(RQF_MDS_QUOTACHECK);
+
+struct req_format RQF_OST_QUOTACHECK =
+ DEFINE_REQ_FMT0("OST_QUOTACHECK", quotactl_only, empty);
+EXPORT_SYMBOL(RQF_OST_QUOTACHECK);
+
+struct req_format RQF_MDS_QUOTACTL =
+ DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only);
+EXPORT_SYMBOL(RQF_MDS_QUOTACTL);
+
+struct req_format RQF_OST_QUOTACTL =
+ DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only);
+EXPORT_SYMBOL(RQF_OST_QUOTACTL);
+
+struct req_format RQF_QC_CALLBACK =
+ DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty);
+EXPORT_SYMBOL(RQF_QC_CALLBACK);
+
+struct req_format RQF_QUOTA_DQACQ =
+ DEFINE_REQ_FMT0("QUOTA_DQACQ", quota_body_only, quota_body_only);
+EXPORT_SYMBOL(RQF_QUOTA_DQACQ);
+
+struct req_format RQF_LDLM_INTENT_QUOTA =
+ DEFINE_REQ_FMT0("LDLM_INTENT_QUOTA",
+ ldlm_intent_quota_client,
+ ldlm_intent_quota_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT_QUOTA);
+
+struct req_format RQF_MDS_GETSTATUS =
+ DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa);
+EXPORT_SYMBOL(RQF_MDS_GETSTATUS);
+
+struct req_format RQF_MDS_STATFS =
+ DEFINE_REQ_FMT0("MDS_STATFS", empty, obd_statfs_server);
+EXPORT_SYMBOL(RQF_MDS_STATFS);
+
+struct req_format RQF_MDS_SYNC =
+ DEFINE_REQ_FMT0("MDS_SYNC", mdt_body_capa, mdt_body_only);
+EXPORT_SYMBOL(RQF_MDS_SYNC);
+
+struct req_format RQF_MDS_GETATTR =
+ DEFINE_REQ_FMT0("MDS_GETATTR", mdt_body_capa, mds_getattr_server);
+EXPORT_SYMBOL(RQF_MDS_GETATTR);
+
+struct req_format RQF_MDS_GETXATTR =
+ DEFINE_REQ_FMT0("MDS_GETXATTR",
+ mds_getxattr_client, mds_getxattr_server);
+EXPORT_SYMBOL(RQF_MDS_GETXATTR);
+
+struct req_format RQF_MDS_GETATTR_NAME =
+ DEFINE_REQ_FMT0("MDS_GETATTR_NAME",
+ mds_getattr_name_client, mds_getattr_server);
+EXPORT_SYMBOL(RQF_MDS_GETATTR_NAME);
+
+struct req_format RQF_MDS_REINT =
+ DEFINE_REQ_FMT0("MDS_REINT", mds_reint_client, mdt_body_only);
+EXPORT_SYMBOL(RQF_MDS_REINT);
+
+struct req_format RQF_MDS_REINT_CREATE =
+ DEFINE_REQ_FMT0("MDS_REINT_CREATE",
+ mds_reint_create_client, mdt_body_capa);
+EXPORT_SYMBOL(RQF_MDS_REINT_CREATE);
+
+struct req_format RQF_MDS_REINT_CREATE_RMT_ACL =
+ DEFINE_REQ_FMT0("MDS_REINT_CREATE_RMT_ACL",
+ mds_reint_create_rmt_acl_client, mdt_body_capa);
+EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_RMT_ACL);
+
+struct req_format RQF_MDS_REINT_CREATE_SLAVE =
+ DEFINE_REQ_FMT0("MDS_REINT_CREATE_EA",
+ mds_reint_create_slave_client, mdt_body_capa);
+EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_SLAVE);
+
+struct req_format RQF_MDS_REINT_CREATE_SYM =
+ DEFINE_REQ_FMT0("MDS_REINT_CREATE_SYM",
+ mds_reint_create_sym_client, mdt_body_capa);
+EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_SYM);
+
+struct req_format RQF_MDS_REINT_OPEN =
+ DEFINE_REQ_FMT0("MDS_REINT_OPEN",
+ mds_reint_open_client, mds_reint_open_server);
+EXPORT_SYMBOL(RQF_MDS_REINT_OPEN);
+
+struct req_format RQF_MDS_REINT_UNLINK =
+ DEFINE_REQ_FMT0("MDS_REINT_UNLINK", mds_reint_unlink_client,
+ mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_REINT_UNLINK);
+
+struct req_format RQF_MDS_REINT_LINK =
+ DEFINE_REQ_FMT0("MDS_REINT_LINK",
+ mds_reint_link_client, mdt_body_only);
+EXPORT_SYMBOL(RQF_MDS_REINT_LINK);
+
+struct req_format RQF_MDS_REINT_RENAME =
+ DEFINE_REQ_FMT0("MDS_REINT_RENAME", mds_reint_rename_client,
+ mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_REINT_RENAME);
+
+struct req_format RQF_MDS_REINT_SETATTR =
+ DEFINE_REQ_FMT0("MDS_REINT_SETATTR",
+ mds_reint_setattr_client, mds_setattr_server);
+EXPORT_SYMBOL(RQF_MDS_REINT_SETATTR);
+
+struct req_format RQF_MDS_REINT_SETXATTR =
+ DEFINE_REQ_FMT0("MDS_REINT_SETXATTR",
+ mds_reint_setxattr_client, mdt_body_only);
+EXPORT_SYMBOL(RQF_MDS_REINT_SETXATTR);
+
+struct req_format RQF_MDS_CONNECT =
+ DEFINE_REQ_FMT0("MDS_CONNECT",
+ obd_connect_client, obd_connect_server);
+EXPORT_SYMBOL(RQF_MDS_CONNECT);
+
+struct req_format RQF_MDS_DISCONNECT =
+ DEFINE_REQ_FMT0("MDS_DISCONNECT", empty, empty);
+EXPORT_SYMBOL(RQF_MDS_DISCONNECT);
+
+struct req_format RQF_MDS_GET_INFO =
+ DEFINE_REQ_FMT0("MDS_GET_INFO", mds_getinfo_client,
+ mds_getinfo_server);
+EXPORT_SYMBOL(RQF_MDS_GET_INFO);
+
+struct req_format RQF_UPDATE_OBJ =
+ DEFINE_REQ_FMT0("OBJECT_UPDATE_OBJ", mds_update_client,
+ mds_update_server);
+EXPORT_SYMBOL(RQF_UPDATE_OBJ);
+
+struct req_format RQF_LDLM_ENQUEUE =
+ DEFINE_REQ_FMT0("LDLM_ENQUEUE",
+ ldlm_enqueue_client, ldlm_enqueue_lvb_server);
+EXPORT_SYMBOL(RQF_LDLM_ENQUEUE);
+
+struct req_format RQF_LDLM_ENQUEUE_LVB =
+ DEFINE_REQ_FMT0("LDLM_ENQUEUE_LVB",
+ ldlm_enqueue_client, ldlm_enqueue_lvb_server);
+EXPORT_SYMBOL(RQF_LDLM_ENQUEUE_LVB);
+
+struct req_format RQF_LDLM_CONVERT =
+ DEFINE_REQ_FMT0("LDLM_CONVERT",
+ ldlm_enqueue_client, ldlm_enqueue_server);
+EXPORT_SYMBOL(RQF_LDLM_CONVERT);
+
+struct req_format RQF_LDLM_CANCEL =
+ DEFINE_REQ_FMT0("LDLM_CANCEL", ldlm_enqueue_client, empty);
+EXPORT_SYMBOL(RQF_LDLM_CANCEL);
+
+struct req_format RQF_LDLM_CALLBACK =
+ DEFINE_REQ_FMT0("LDLM_CALLBACK", ldlm_enqueue_client, empty);
+EXPORT_SYMBOL(RQF_LDLM_CALLBACK);
+
+struct req_format RQF_LDLM_CP_CALLBACK =
+ DEFINE_REQ_FMT0("LDLM_CP_CALLBACK", ldlm_cp_callback_client, empty);
+EXPORT_SYMBOL(RQF_LDLM_CP_CALLBACK);
+
+struct req_format RQF_LDLM_BL_CALLBACK =
+ DEFINE_REQ_FMT0("LDLM_BL_CALLBACK", ldlm_enqueue_client, empty);
+EXPORT_SYMBOL(RQF_LDLM_BL_CALLBACK);
+
+struct req_format RQF_LDLM_GL_CALLBACK =
+ DEFINE_REQ_FMT0("LDLM_GL_CALLBACK", ldlm_enqueue_client,
+ ldlm_gl_callback_server);
+EXPORT_SYMBOL(RQF_LDLM_GL_CALLBACK);
+
+struct req_format RQF_LDLM_GL_DESC_CALLBACK =
+ DEFINE_REQ_FMT0("LDLM_GL_CALLBACK", ldlm_gl_callback_desc_client,
+ ldlm_gl_callback_server);
+EXPORT_SYMBOL(RQF_LDLM_GL_DESC_CALLBACK);
+
+struct req_format RQF_LDLM_INTENT_BASIC =
+ DEFINE_REQ_FMT0("LDLM_INTENT_BASIC",
+ ldlm_intent_basic_client, ldlm_enqueue_lvb_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT_BASIC);
+
+struct req_format RQF_LDLM_INTENT =
+ DEFINE_REQ_FMT0("LDLM_INTENT",
+ ldlm_intent_client, ldlm_intent_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT);
+
+struct req_format RQF_LDLM_INTENT_LAYOUT =
+ DEFINE_REQ_FMT0("LDLM_INTENT_LAYOUT ",
+ ldlm_intent_layout_client, ldlm_enqueue_lvb_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT_LAYOUT);
+
+struct req_format RQF_LDLM_INTENT_GETATTR =
+ DEFINE_REQ_FMT0("LDLM_INTENT_GETATTR",
+ ldlm_intent_getattr_client, ldlm_intent_getattr_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT_GETATTR);
+
+struct req_format RQF_LDLM_INTENT_OPEN =
+ DEFINE_REQ_FMT0("LDLM_INTENT_OPEN",
+ ldlm_intent_open_client, ldlm_intent_open_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT_OPEN);
+
+struct req_format RQF_LDLM_INTENT_CREATE =
+ DEFINE_REQ_FMT0("LDLM_INTENT_CREATE",
+ ldlm_intent_create_client, ldlm_intent_getattr_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT_CREATE);
+
+struct req_format RQF_LDLM_INTENT_UNLINK =
+ DEFINE_REQ_FMT0("LDLM_INTENT_UNLINK",
+ ldlm_intent_unlink_client, ldlm_intent_server);
+EXPORT_SYMBOL(RQF_LDLM_INTENT_UNLINK);
+
+struct req_format RQF_MDS_CLOSE =
+ DEFINE_REQ_FMT0("MDS_CLOSE",
+ mdt_close_client, mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_CLOSE);
+
+struct req_format RQF_MDS_PIN =
+ DEFINE_REQ_FMT0("MDS_PIN",
+ mdt_body_capa, mdt_body_only);
+EXPORT_SYMBOL(RQF_MDS_PIN);
+
+struct req_format RQF_MDS_UNPIN =
+ DEFINE_REQ_FMT0("MDS_UNPIN", mdt_body_only, empty);
+EXPORT_SYMBOL(RQF_MDS_UNPIN);
+
+struct req_format RQF_MDS_DONE_WRITING =
+ DEFINE_REQ_FMT0("MDS_DONE_WRITING",
+ mdt_close_client, mdt_body_only);
+EXPORT_SYMBOL(RQF_MDS_DONE_WRITING);
+
+struct req_format RQF_MDS_READPAGE =
+ DEFINE_REQ_FMT0("MDS_READPAGE",
+ mdt_body_capa, mdt_body_only);
+EXPORT_SYMBOL(RQF_MDS_READPAGE);
+
+struct req_format RQF_MDS_HSM_ACTION =
+ DEFINE_REQ_FMT0("MDS_HSM_ACTION", mdt_body_capa, mdt_hsm_action_server);
+EXPORT_SYMBOL(RQF_MDS_HSM_ACTION);
+
+struct req_format RQF_MDS_HSM_PROGRESS =
+ DEFINE_REQ_FMT0("MDS_HSM_PROGRESS", mdt_hsm_progress, empty);
+EXPORT_SYMBOL(RQF_MDS_HSM_PROGRESS);
+
+struct req_format RQF_MDS_HSM_CT_REGISTER =
+ DEFINE_REQ_FMT0("MDS_HSM_CT_REGISTER", mdt_hsm_ct_register, empty);
+EXPORT_SYMBOL(RQF_MDS_HSM_CT_REGISTER);
+
+struct req_format RQF_MDS_HSM_CT_UNREGISTER =
+ DEFINE_REQ_FMT0("MDS_HSM_CT_UNREGISTER", mdt_hsm_ct_unregister, empty);
+EXPORT_SYMBOL(RQF_MDS_HSM_CT_UNREGISTER);
+
+struct req_format RQF_MDS_HSM_STATE_GET =
+ DEFINE_REQ_FMT0("MDS_HSM_STATE_GET",
+ mdt_body_capa, mdt_hsm_state_get_server);
+EXPORT_SYMBOL(RQF_MDS_HSM_STATE_GET);
+
+struct req_format RQF_MDS_HSM_STATE_SET =
+ DEFINE_REQ_FMT0("MDS_HSM_STATE_SET", mdt_hsm_state_set, empty);
+EXPORT_SYMBOL(RQF_MDS_HSM_STATE_SET);
+
+struct req_format RQF_MDS_HSM_REQUEST =
+ DEFINE_REQ_FMT0("MDS_HSM_REQUEST", mdt_hsm_request, empty);
+EXPORT_SYMBOL(RQF_MDS_HSM_REQUEST);
+
+struct req_format RQF_MDS_SWAP_LAYOUTS =
+ DEFINE_REQ_FMT0("MDS_SWAP_LAYOUTS",
+ mdt_swap_layouts, empty);
+EXPORT_SYMBOL(RQF_MDS_SWAP_LAYOUTS);
+
+/* This is for split */
+struct req_format RQF_MDS_WRITEPAGE =
+ DEFINE_REQ_FMT0("MDS_WRITEPAGE",
+ mdt_body_capa, mdt_body_only);
+EXPORT_SYMBOL(RQF_MDS_WRITEPAGE);
+
+struct req_format RQF_MDS_IS_SUBDIR =
+ DEFINE_REQ_FMT0("MDS_IS_SUBDIR",
+ mdt_body_only, mdt_body_only);
+EXPORT_SYMBOL(RQF_MDS_IS_SUBDIR);
+
+struct req_format RQF_LLOG_ORIGIN_HANDLE_CREATE =
+ DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_CREATE",
+ llog_origin_handle_create_client, llogd_body_only);
+EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_CREATE);
+
+struct req_format RQF_LLOG_ORIGIN_HANDLE_DESTROY =
+ DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_DESTROY",
+ llogd_body_only, llogd_body_only);
+EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_DESTROY);
+
+struct req_format RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK =
+ DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_NEXT_BLOCK",
+ llogd_body_only, llog_origin_handle_next_block_server);
+EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
+
+struct req_format RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK =
+ DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_PREV_BLOCK",
+ llogd_body_only, llog_origin_handle_next_block_server);
+EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK);
+
+struct req_format RQF_LLOG_ORIGIN_HANDLE_READ_HEADER =
+ DEFINE_REQ_FMT0("LLOG_ORIGIN_HANDLE_READ_HEADER",
+ llogd_body_only, llog_log_hdr_only);
+EXPORT_SYMBOL(RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
+
+struct req_format RQF_LLOG_ORIGIN_CONNECT =
+ DEFINE_REQ_FMT0("LLOG_ORIGIN_CONNECT", llogd_conn_body_only, empty);
+EXPORT_SYMBOL(RQF_LLOG_ORIGIN_CONNECT);
+
+struct req_format RQF_OST_CONNECT =
+ DEFINE_REQ_FMT0("OST_CONNECT",
+ obd_connect_client, obd_connect_server);
+EXPORT_SYMBOL(RQF_OST_CONNECT);
+
+struct req_format RQF_OST_DISCONNECT =
+ DEFINE_REQ_FMT0("OST_DISCONNECT", empty, empty);
+EXPORT_SYMBOL(RQF_OST_DISCONNECT);
+
+struct req_format RQF_OST_GETATTR =
+ DEFINE_REQ_FMT0("OST_GETATTR", ost_body_capa, ost_body_only);
+EXPORT_SYMBOL(RQF_OST_GETATTR);
+
+struct req_format RQF_OST_SETATTR =
+ DEFINE_REQ_FMT0("OST_SETATTR", ost_body_capa, ost_body_only);
+EXPORT_SYMBOL(RQF_OST_SETATTR);
+
+struct req_format RQF_OST_CREATE =
+ DEFINE_REQ_FMT0("OST_CREATE", ost_body_only, ost_body_only);
+EXPORT_SYMBOL(RQF_OST_CREATE);
+
+struct req_format RQF_OST_PUNCH =
+ DEFINE_REQ_FMT0("OST_PUNCH", ost_body_capa, ost_body_only);
+EXPORT_SYMBOL(RQF_OST_PUNCH);
+
+struct req_format RQF_OST_SYNC =
+ DEFINE_REQ_FMT0("OST_SYNC", ost_body_capa, ost_body_only);
+EXPORT_SYMBOL(RQF_OST_SYNC);
+
+struct req_format RQF_OST_DESTROY =
+ DEFINE_REQ_FMT0("OST_DESTROY", ost_destroy_client, ost_body_only);
+EXPORT_SYMBOL(RQF_OST_DESTROY);
+
+struct req_format RQF_OST_BRW_READ =
+ DEFINE_REQ_FMT0("OST_BRW_READ", ost_brw_client, ost_brw_read_server);
+EXPORT_SYMBOL(RQF_OST_BRW_READ);
+
+struct req_format RQF_OST_BRW_WRITE =
+ DEFINE_REQ_FMT0("OST_BRW_WRITE", ost_brw_client, ost_brw_write_server);
+EXPORT_SYMBOL(RQF_OST_BRW_WRITE);
+
+struct req_format RQF_OST_STATFS =
+ DEFINE_REQ_FMT0("OST_STATFS", empty, obd_statfs_server);
+EXPORT_SYMBOL(RQF_OST_STATFS);
+
+struct req_format RQF_OST_SET_GRANT_INFO =
+ DEFINE_REQ_FMT0("OST_SET_GRANT_INFO", ost_grant_shrink_client,
+ ost_body_only);
+EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO);
+
+struct req_format RQF_OST_GET_INFO_GENERIC =
+ DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client,
+ ost_get_info_generic_server);
+EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC);
+
+struct req_format RQF_OST_GET_INFO_LAST_ID =
+ DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client,
+ ost_get_last_id_server);
+EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID);
+
+struct req_format RQF_OST_GET_INFO_LAST_FID =
+ DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client,
+ ost_get_last_fid_server);
+EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID);
+
+struct req_format RQF_OST_SET_INFO_LAST_FID =
+ DEFINE_REQ_FMT0("OST_SET_INFO_LAST_FID", obd_set_info_client,
+ empty);
+EXPORT_SYMBOL(RQF_OST_SET_INFO_LAST_FID);
+
+struct req_format RQF_OST_GET_INFO_FIEMAP =
+ DEFINE_REQ_FMT0("OST_GET_INFO_FIEMAP", ost_get_fiemap_client,
+ ost_get_fiemap_server);
+EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP);
+
+#if !defined(__REQ_LAYOUT_USER__)
+
+/* Convenience macro */
+#define FMT_FIELD(fmt, i, j) (fmt)->rf_fields[(i)].d[(j)]
+
+/**
+ * Initializes the capsule abstraction by computing and setting the \a rf_idx
+ * field of RQFs and the \a rmf_offset field of RMFs.
+ */
+int req_layout_init(void)
+{
+ int i;
+ int j;
+ int k;
+ struct req_format *rf = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(req_formats); ++i) {
+ rf = req_formats[i];
+ rf->rf_idx = i;
+ for (j = 0; j < RCL_NR; ++j) {
+ LASSERT(rf->rf_fields[j].nr <= REQ_MAX_FIELD_NR);
+ for (k = 0; k < rf->rf_fields[j].nr; ++k) {
+ struct req_msg_field *field;
+
+ field = (typeof(field))rf->rf_fields[j].d[k];
+ LASSERT(!(field->rmf_flags & RMF_F_STRUCT_ARRAY)
+ || field->rmf_size > 0);
+ LASSERT(field->rmf_offset[i][j] == 0);
+ /*
+ * k + 1 to detect unused format/field
+ * combinations.
+ */
+ field->rmf_offset[i][j] = k + 1;
+ }
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(req_layout_init);
+
+void req_layout_fini(void)
+{
+}
+EXPORT_SYMBOL(req_layout_fini);
+
+/**
+ * Initializes the expected sizes of each RMF in a \a pill (\a rc_area) to -1.
+ *
+ * Actual/expected field sizes are set elsewhere in functions in this file:
+ * req_capsule_init(), req_capsule_server_pack(), req_capsule_set_size() and
+ * req_capsule_msg_size(). The \a rc_area information is used by.
+ * ptlrpc_request_set_replen().
+ */
+void req_capsule_init_area(struct req_capsule *pill)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pill->rc_area[RCL_CLIENT]); i++) {
+ pill->rc_area[RCL_CLIENT][i] = -1;
+ pill->rc_area[RCL_SERVER][i] = -1;
+ }
+}
+EXPORT_SYMBOL(req_capsule_init_area);
+
+/**
+ * Initialize a pill.
+ *
+ * The \a location indicates whether the caller is executing on the client side
+ * (RCL_CLIENT) or server side (RCL_SERVER)..
+ */
+void req_capsule_init(struct req_capsule *pill,
+ struct ptlrpc_request *req,
+ enum req_location location)
+{
+ LASSERT(location == RCL_SERVER || location == RCL_CLIENT);
+
+ /*
+ * Today all capsules are embedded in ptlrpc_request structs,
+ * but just in case that ever isn't the case, we don't reach
+ * into req unless req != NULL and pill is the one embedded in
+ * the req.
+ *
+ * The req->rq_pill_init flag makes it safe to initialize a pill
+ * twice, which might happen in the OST paths as a result of the
+ * high-priority RPC queue getting peeked at before ost_handle()
+ * handles an OST RPC.
+ */
+ if (req != NULL && pill == &req->rq_pill && req->rq_pill_init)
+ return;
+
+ memset(pill, 0, sizeof *pill);
+ pill->rc_req = req;
+ pill->rc_loc = location;
+ req_capsule_init_area(pill);
+
+ if (req != NULL && pill == &req->rq_pill)
+ req->rq_pill_init = 1;
+}
+EXPORT_SYMBOL(req_capsule_init);
+
+void req_capsule_fini(struct req_capsule *pill)
+{
+}
+EXPORT_SYMBOL(req_capsule_fini);
+
+static int __req_format_is_sane(const struct req_format *fmt)
+{
+ return
+ 0 <= fmt->rf_idx && fmt->rf_idx < ARRAY_SIZE(req_formats) &&
+ req_formats[fmt->rf_idx] == fmt;
+}
+
+static struct lustre_msg *__req_msg(const struct req_capsule *pill,
+ enum req_location loc)
+{
+ struct ptlrpc_request *req;
+
+ req = pill->rc_req;
+ return loc == RCL_CLIENT ? req->rq_reqmsg : req->rq_repmsg;
+}
+
+/**
+ * Set the format (\a fmt) of a \a pill; format changes are not allowed here
+ * (see req_capsule_extend()).
+ */
+void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt)
+{
+ LASSERT(pill->rc_fmt == NULL || pill->rc_fmt == fmt);
+ LASSERT(__req_format_is_sane(fmt));
+
+ pill->rc_fmt = fmt;
+}
+EXPORT_SYMBOL(req_capsule_set);
+
+/**
+ * Fills in any parts of the \a rc_area of a \a pill that haven't been filled in
+ * yet.
+
+ * \a rc_area is an array of REQ_MAX_FIELD_NR elements, used to store sizes of
+ * variable-sized fields. The field sizes come from the declared \a rmf_size
+ * field of a \a pill's \a rc_fmt's RMF's.
+ */
+int req_capsule_filled_sizes(struct req_capsule *pill,
+ enum req_location loc)
+{
+ const struct req_format *fmt = pill->rc_fmt;
+ int i;
+
+ LASSERT(fmt != NULL);
+
+ for (i = 0; i < fmt->rf_fields[loc].nr; ++i) {
+ if (pill->rc_area[loc][i] == -1) {
+ pill->rc_area[loc][i] =
+ fmt->rf_fields[loc].d[i]->rmf_size;
+ if (pill->rc_area[loc][i] == -1) {
+ /*
+ * Skip the following fields.
+ *
+ * If this LASSERT() trips then you're missing a
+ * call to req_capsule_set_size().
+ */
+ LASSERT(loc != RCL_SERVER);
+ break;
+ }
+ }
+ }
+ return i;
+}
+EXPORT_SYMBOL(req_capsule_filled_sizes);
+
+/**
+ * Capsule equivalent of lustre_pack_request() and lustre_pack_reply().
+ *
+ * This function uses the \a pill's \a rc_area as filled in by
+ * req_capsule_set_size() or req_capsule_filled_sizes() (the latter is called by
+ * this function).
+ */
+int req_capsule_server_pack(struct req_capsule *pill)
+{
+ const struct req_format *fmt;
+ int count;
+ int rc;
+
+ LASSERT(pill->rc_loc == RCL_SERVER);
+ fmt = pill->rc_fmt;
+ LASSERT(fmt != NULL);
+
+ count = req_capsule_filled_sizes(pill, RCL_SERVER);
+ rc = lustre_pack_reply(pill->rc_req, count,
+ pill->rc_area[RCL_SERVER], NULL);
+ if (rc != 0) {
+ DEBUG_REQ(D_ERROR, pill->rc_req,
+ "Cannot pack %d fields in format `%s': ",
+ count, fmt->rf_name);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(req_capsule_server_pack);
+
+/**
+ * Returns the PTLRPC request or reply (\a loc) buffer offset of a \a pill
+ * corresponding to the given RMF (\a field).
+ */
+static int __req_capsule_offset(const struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc)
+{
+ int offset;
+
+ offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc];
+ LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n",
+ pill->rc_fmt->rf_name,
+ field->rmf_name, offset, loc);
+ offset --;
+
+ LASSERT(0 <= offset && offset < REQ_MAX_FIELD_NR);
+ return offset;
+}
+
+/**
+ * Helper for __req_capsule_get(); swabs value / array of values and/or dumps
+ * them if desired.
+ */
+static
+void
+swabber_dumper_helper(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc,
+ int offset,
+ void *value, int len, int dump, void (*swabber)( void *))
+{
+ void *p;
+ int i;
+ int n;
+ int do_swab;
+ int inout = loc == RCL_CLIENT;
+
+ swabber = swabber ?: field->rmf_swabber;
+
+ if (ptlrpc_buf_need_swab(pill->rc_req, inout, offset) &&
+ swabber != NULL && value != NULL)
+ do_swab = 1;
+ else
+ do_swab = 0;
+
+ if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY)) {
+ if (dump && field->rmf_dumper) {
+ CDEBUG(D_RPCTRACE, "Dump of %sfield %s follows\n",
+ do_swab ? "unswabbed " : "", field->rmf_name);
+ field->rmf_dumper(value);
+ }
+ if (!do_swab)
+ return;
+ swabber(value);
+ ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset);
+ if (dump) {
+ CDEBUG(D_RPCTRACE, "Dump of swabbed field %s "
+ "follows\n", field->rmf_name);
+ field->rmf_dumper(value);
+ }
+
+ return;
+ }
+
+ /*
+ * We're swabbing an array; swabber() swabs a single array element, so
+ * swab every element.
+ */
+ LASSERT((len % field->rmf_size) == 0);
+ for (p = value, i = 0, n = len / field->rmf_size;
+ i < n;
+ i++, p += field->rmf_size) {
+ if (dump && field->rmf_dumper) {
+ CDEBUG(D_RPCTRACE, "Dump of %sarray field %s, "
+ "element %d follows\n",
+ do_swab ? "unswabbed " : "", field->rmf_name, i);
+ field->rmf_dumper(p);
+ }
+ if (!do_swab)
+ continue;
+ swabber(p);
+ if (dump && field->rmf_dumper) {
+ CDEBUG(D_RPCTRACE, "Dump of swabbed array field %s, "
+ "element %d follows\n", field->rmf_name, i);
+ field->rmf_dumper(value);
+ }
+ }
+ if (do_swab)
+ ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset);
+}
+
+/**
+ * Returns the pointer to a PTLRPC request or reply (\a loc) buffer of a \a pill
+ * corresponding to the given RMF (\a field).
+ *
+ * The buffer will be swabbed using the given \a swabber. If \a swabber == NULL
+ * then the \a rmf_swabber from the RMF will be used. Soon there will be no
+ * calls to __req_capsule_get() with a non-NULL \a swabber; \a swabber will then
+ * be removed. Fields with the \a RMF_F_STRUCT_ARRAY flag set will have each
+ * element of the array swabbed.
+ */
+static void *__req_capsule_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc,
+ void (*swabber)( void *),
+ int dump)
+{
+ const struct req_format *fmt;
+ struct lustre_msg *msg;
+ void *value;
+ int len;
+ int offset;
+
+ void *(*getter)(struct lustre_msg *m, int n, int minlen);
+
+ static const char *rcl_names[RCL_NR] = {
+ [RCL_CLIENT] = "client",
+ [RCL_SERVER] = "server"
+ };
+
+ LASSERT(pill != NULL);
+ LASSERT(pill != LP_POISON);
+ fmt = pill->rc_fmt;
+ LASSERT(fmt != NULL);
+ LASSERT(fmt != LP_POISON);
+ LASSERT(__req_format_is_sane(fmt));
+
+ offset = __req_capsule_offset(pill, field, loc);
+
+ msg = __req_msg(pill, loc);
+ LASSERT(msg != NULL);
+
+ getter = (field->rmf_flags & RMF_F_STRING) ?
+ (typeof(getter))lustre_msg_string : lustre_msg_buf;
+
+ if (field->rmf_flags & RMF_F_STRUCT_ARRAY) {
+ /*
+ * We've already asserted that field->rmf_size > 0 in
+ * req_layout_init().
+ */
+ len = lustre_msg_buflen(msg, offset);
+ if ((len % field->rmf_size) != 0) {
+ CERROR("%s: array field size mismatch "
+ "%d modulo %d != 0 (%d)\n",
+ field->rmf_name, len, field->rmf_size, loc);
+ return NULL;
+ }
+ } else if (pill->rc_area[loc][offset] != -1) {
+ len = pill->rc_area[loc][offset];
+ } else {
+ len = max(field->rmf_size, 0);
+ }
+ value = getter(msg, offset, len);
+
+ if (value == NULL) {
+ DEBUG_REQ(D_ERROR, pill->rc_req,
+ "Wrong buffer for field `%s' (%d of %d) "
+ "in format `%s': %d vs. %d (%s)\n",
+ field->rmf_name, offset, lustre_msg_bufcount(msg),
+ fmt->rf_name, lustre_msg_buflen(msg, offset), len,
+ rcl_names[loc]);
+ } else {
+ swabber_dumper_helper(pill, field, loc, offset, value, len,
+ dump, swabber);
+ }
+
+ return value;
+}
+
+/**
+ * Dump a request and/or reply
+ */
+void __req_capsule_dump(struct req_capsule *pill, enum req_location loc)
+{
+ const struct req_format *fmt;
+ const struct req_msg_field *field;
+ int len;
+ int i;
+
+ fmt = pill->rc_fmt;
+
+ DEBUG_REQ(D_RPCTRACE, pill->rc_req, "BEGIN REQ CAPSULE DUMP\n");
+ for (i = 0; i < fmt->rf_fields[loc].nr; ++i) {
+ field = FMT_FIELD(fmt, loc, i);
+ if (field->rmf_dumper == NULL) {
+ /*
+ * FIXME Add a default hex dumper for fields that don't
+ * have a specific dumper
+ */
+ len = req_capsule_get_size(pill, field, loc);
+ CDEBUG(D_RPCTRACE, "Field %s has no dumper function;"
+ "field size is %d\n", field->rmf_name, len);
+ } else {
+ /* It's the dumping side-effect that we're interested in */
+ (void) __req_capsule_get(pill, field, loc, NULL, 1);
+ }
+ }
+ CDEBUG(D_RPCTRACE, "END REQ CAPSULE DUMP\n");
+}
+
+/**
+ * Dump a request.
+ */
+void req_capsule_client_dump(struct req_capsule *pill)
+{
+ __req_capsule_dump(pill, RCL_CLIENT);
+}
+EXPORT_SYMBOL(req_capsule_client_dump);
+
+/**
+ * Dump a reply
+ */
+void req_capsule_server_dump(struct req_capsule *pill)
+{
+ __req_capsule_dump(pill, RCL_SERVER);
+}
+EXPORT_SYMBOL(req_capsule_server_dump);
+
+/**
+ * Trivial wrapper around __req_capsule_get(), that returns the PTLRPC request
+ * buffer corresponding to the given RMF (\a field) of a \a pill.
+ */
+void *req_capsule_client_get(struct req_capsule *pill,
+ const struct req_msg_field *field)
+{
+ return __req_capsule_get(pill, field, RCL_CLIENT, NULL, 0);
+}
+EXPORT_SYMBOL(req_capsule_client_get);
+
+/**
+ * Same as req_capsule_client_get(), but with a \a swabber argument.
+ *
+ * Currently unused; will be removed when req_capsule_server_swab_get() is
+ * unused too.
+ */
+void *req_capsule_client_swab_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ void *swabber)
+{
+ return __req_capsule_get(pill, field, RCL_CLIENT, swabber, 0);
+}
+EXPORT_SYMBOL(req_capsule_client_swab_get);
+
+/**
+ * Utility that combines req_capsule_set_size() and req_capsule_client_get().
+ *
+ * First the \a pill's request \a field's size is set (\a rc_area) using
+ * req_capsule_set_size() with the given \a len. Then the actual buffer is
+ * returned.
+ */
+void *req_capsule_client_sized_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ int len)
+{
+ req_capsule_set_size(pill, field, RCL_CLIENT, len);
+ return __req_capsule_get(pill, field, RCL_CLIENT, NULL, 0);
+}
+EXPORT_SYMBOL(req_capsule_client_sized_get);
+
+/**
+ * Trivial wrapper around __req_capsule_get(), that returns the PTLRPC reply
+ * buffer corresponding to the given RMF (\a field) of a \a pill.
+ */
+void *req_capsule_server_get(struct req_capsule *pill,
+ const struct req_msg_field *field)
+{
+ return __req_capsule_get(pill, field, RCL_SERVER, NULL, 0);
+}
+EXPORT_SYMBOL(req_capsule_server_get);
+
+/**
+ * Same as req_capsule_server_get(), but with a \a swabber argument.
+ *
+ * Ideally all swabbing should be done pursuant to RMF definitions, with no
+ * swabbing done outside this capsule abstraction.
+ */
+void *req_capsule_server_swab_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ void *swabber)
+{
+ return __req_capsule_get(pill, field, RCL_SERVER, swabber, 0);
+}
+EXPORT_SYMBOL(req_capsule_server_swab_get);
+
+/**
+ * Utility that combines req_capsule_set_size() and req_capsule_server_get().
+ *
+ * First the \a pill's request \a field's size is set (\a rc_area) using
+ * req_capsule_set_size() with the given \a len. Then the actual buffer is
+ * returned.
+ */
+void *req_capsule_server_sized_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ int len)
+{
+ req_capsule_set_size(pill, field, RCL_SERVER, len);
+ return __req_capsule_get(pill, field, RCL_SERVER, NULL, 0);
+}
+EXPORT_SYMBOL(req_capsule_server_sized_get);
+
+void *req_capsule_server_sized_swab_get(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ int len, void *swabber)
+{
+ req_capsule_set_size(pill, field, RCL_SERVER, len);
+ return __req_capsule_get(pill, field, RCL_SERVER, swabber, 0);
+}
+EXPORT_SYMBOL(req_capsule_server_sized_swab_get);
+
+/**
+ * Returns the buffer of a \a pill corresponding to the given \a field from the
+ * request (if the caller is executing on the server-side) or reply (if the
+ * caller is executing on the client-side).
+ *
+ * This function convienient for use is code that could be executed on the
+ * client and server alike.
+ */
+const void *req_capsule_other_get(struct req_capsule *pill,
+ const struct req_msg_field *field)
+{
+ return __req_capsule_get(pill, field, pill->rc_loc ^ 1, NULL, 0);
+}
+EXPORT_SYMBOL(req_capsule_other_get);
+
+/**
+ * Set the size of the PTLRPC request/reply (\a loc) buffer for the given \a
+ * field of the given \a pill.
+ *
+ * This function must be used when constructing variable sized fields of a
+ * request or reply.
+ */
+void req_capsule_set_size(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc, int size)
+{
+ LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
+
+ if ((size != field->rmf_size) &&
+ (field->rmf_size != -1) &&
+ !(field->rmf_flags & RMF_F_NO_SIZE_CHECK) &&
+ (size > 0)) {
+ if ((field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
+ (size % field->rmf_size != 0)) {
+ CERROR("%s: array field size mismatch "
+ "%d %% %d != 0 (%d)\n",
+ field->rmf_name, size, field->rmf_size, loc);
+ LBUG();
+ } else if (!(field->rmf_flags & RMF_F_STRUCT_ARRAY) &&
+ size < field->rmf_size) {
+ CERROR("%s: field size mismatch %d != %d (%d)\n",
+ field->rmf_name, size, field->rmf_size, loc);
+ LBUG();
+ }
+ }
+
+ pill->rc_area[loc][__req_capsule_offset(pill, field, loc)] = size;
+}
+EXPORT_SYMBOL(req_capsule_set_size);
+
+/**
+ * Return the actual PTLRPC buffer length of a request or reply (\a loc)
+ * for the given \a pill's given \a field.
+ *
+ * NB: this function doesn't correspond with req_capsule_set_size(), which
+ * actually sets the size in pill.rc_area[loc][offset], but this function
+ * returns the message buflen[offset], maybe we should use another name.
+ */
+int req_capsule_get_size(const struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc)
+{
+ LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
+
+ return lustre_msg_buflen(__req_msg(pill, loc),
+ __req_capsule_offset(pill, field, loc));
+}
+EXPORT_SYMBOL(req_capsule_get_size);
+
+/**
+ * Wrapper around lustre_msg_size() that returns the PTLRPC size needed for the
+ * given \a pill's request or reply (\a loc) given the field size recorded in
+ * the \a pill's rc_area.
+ *
+ * See also req_capsule_set_size().
+ */
+int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc)
+{
+ return lustre_msg_size(pill->rc_req->rq_import->imp_msg_magic,
+ pill->rc_fmt->rf_fields[loc].nr,
+ pill->rc_area[loc]);
+}
+
+/**
+ * While req_capsule_msg_size() computes the size of a PTLRPC request or reply
+ * (\a loc) given a \a pill's \a rc_area, this function computes the size of a
+ * PTLRPC request or reply given only an RQF (\a fmt).
+ *
+ * This function should not be used for formats which contain variable size
+ * fields.
+ */
+int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
+ enum req_location loc)
+{
+ int size, i = 0;
+
+ /*
+ * This function should probably LASSERT() that fmt has no fields with
+ * RMF_F_STRUCT_ARRAY in rmf_flags, since we can't know here how many
+ * elements in the array there will ultimately be, but then, we could
+ * assume that there will be at least one element, and that's just what
+ * we do.
+ */
+ size = lustre_msg_hdr_size(magic, fmt->rf_fields[loc].nr);
+ if (size < 0)
+ return size;
+
+ for (; i < fmt->rf_fields[loc].nr; ++i)
+ if (fmt->rf_fields[loc].d[i]->rmf_size != -1)
+ size += cfs_size_round(fmt->rf_fields[loc].d[i]->
+ rmf_size);
+ return size;
+}
+
+/**
+ * Changes the format of an RPC.
+ *
+ * The pill must already have been initialized, which means that it already has
+ * a request format. The new format \a fmt must be an extension of the pill's
+ * old format. Specifically: the new format must have as many request and reply
+ * fields as the old one, and all fields shared by the old and new format must
+ * be at least as large in the new format.
+ *
+ * The new format's fields may be of different "type" than the old format, but
+ * only for fields that are "opaque" blobs: fields which have a) have no
+ * \a rmf_swabber, b) \a rmf_flags == 0 or RMF_F_NO_SIZE_CHECK, and c) \a
+ * rmf_size == -1 or \a rmf_flags == RMF_F_NO_SIZE_CHECK. For example,
+ * OBD_SET_INFO has a key field and an opaque value field that gets interpreted
+ * according to the key field. When the value, according to the key, contains a
+ * structure (or array thereof) to be swabbed, the format should be changed to
+ * one where the value field has \a rmf_size/rmf_flags/rmf_swabber set
+ * accordingly.
+ */
+void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt)
+{
+ int i;
+ int j;
+
+ const struct req_format *old;
+
+ LASSERT(pill->rc_fmt != NULL);
+ LASSERT(__req_format_is_sane(fmt));
+
+ old = pill->rc_fmt;
+ /*
+ * Sanity checking...
+ */
+ for (i = 0; i < RCL_NR; ++i) {
+ LASSERT(fmt->rf_fields[i].nr >= old->rf_fields[i].nr);
+ for (j = 0; j < old->rf_fields[i].nr - 1; ++j) {
+ const struct req_msg_field *ofield = FMT_FIELD(old, i, j);
+
+ /* "opaque" fields can be transmogrified */
+ if (ofield->rmf_swabber == NULL &&
+ (ofield->rmf_flags & ~RMF_F_NO_SIZE_CHECK) == 0 &&
+ (ofield->rmf_size == -1 ||
+ ofield->rmf_flags == RMF_F_NO_SIZE_CHECK))
+ continue;
+ LASSERT(FMT_FIELD(fmt, i, j) == FMT_FIELD(old, i, j));
+ }
+ /*
+ * Last field in old format can be shorter than in new.
+ */
+ LASSERT(FMT_FIELD(fmt, i, j)->rmf_size >=
+ FMT_FIELD(old, i, j)->rmf_size);
+ }
+
+ pill->rc_fmt = fmt;
+}
+EXPORT_SYMBOL(req_capsule_extend);
+
+/**
+ * This function returns a non-zero value if the given \a field is present in
+ * the format (\a rc_fmt) of \a pill's PTLRPC request or reply (\a loc), else it
+ * returns 0.
+ */
+int req_capsule_has_field(const struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc)
+{
+ LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
+
+ return field->rmf_offset[pill->rc_fmt->rf_idx][loc];
+}
+EXPORT_SYMBOL(req_capsule_has_field);
+
+/**
+ * Returns a non-zero value if the given \a field is present in the given \a
+ * pill's PTLRPC request or reply (\a loc), else it returns 0.
+ */
+int req_capsule_field_present(const struct req_capsule *pill,
+ const struct req_msg_field *field,
+ enum req_location loc)
+{
+ int offset;
+
+ LASSERT(loc == RCL_SERVER || loc == RCL_CLIENT);
+ LASSERT(req_capsule_has_field(pill, field, loc));
+
+ offset = __req_capsule_offset(pill, field, loc);
+ return lustre_msg_bufcount(__req_msg(pill, loc)) > offset;
+}
+EXPORT_SYMBOL(req_capsule_field_present);
+
+/**
+ * This function shrinks the size of the _buffer_ of the \a pill's PTLRPC
+ * request or reply (\a loc).
+ *
+ * This is not the opposite of req_capsule_extend().
+ */
+void req_capsule_shrink(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ unsigned int newlen,
+ enum req_location loc)
+{
+ const struct req_format *fmt;
+ struct lustre_msg *msg;
+ int len;
+ int offset;
+
+ fmt = pill->rc_fmt;
+ LASSERT(fmt != NULL);
+ LASSERT(__req_format_is_sane(fmt));
+ LASSERT(req_capsule_has_field(pill, field, loc));
+ LASSERT(req_capsule_field_present(pill, field, loc));
+
+ offset = __req_capsule_offset(pill, field, loc);
+
+ msg = __req_msg(pill, loc);
+ len = lustre_msg_buflen(msg, offset);
+ LASSERTF(newlen <= len, "%s:%s, oldlen=%d, newlen=%d\n",
+ fmt->rf_name, field->rmf_name, len, newlen);
+
+ if (loc == RCL_CLIENT)
+ pill->rc_req->rq_reqlen = lustre_shrink_msg(msg, offset, newlen,
+ 1);
+ else
+ pill->rc_req->rq_replen = lustre_shrink_msg(msg, offset, newlen,
+ 1);
+}
+EXPORT_SYMBOL(req_capsule_shrink);
+
+int req_capsule_server_grow(struct req_capsule *pill,
+ const struct req_msg_field *field,
+ unsigned int newlen)
+{
+ struct ptlrpc_reply_state *rs = pill->rc_req->rq_reply_state, *nrs;
+ char *from, *to;
+ int offset, len, rc;
+
+ LASSERT(pill->rc_fmt != NULL);
+ LASSERT(__req_format_is_sane(pill->rc_fmt));
+ LASSERT(req_capsule_has_field(pill, field, RCL_SERVER));
+ LASSERT(req_capsule_field_present(pill, field, RCL_SERVER));
+
+ len = req_capsule_get_size(pill, field, RCL_SERVER);
+ offset = __req_capsule_offset(pill, field, RCL_SERVER);
+ if (pill->rc_req->rq_repbuf_len >=
+ lustre_packed_msg_size(pill->rc_req->rq_repmsg) - len + newlen)
+ CERROR("Inplace repack might be done\n");
+
+ pill->rc_req->rq_reply_state = NULL;
+ req_capsule_set_size(pill, field, RCL_SERVER, newlen);
+ rc = req_capsule_server_pack(pill);
+ if (rc) {
+ /* put old rs back, the caller will decide what to do */
+ pill->rc_req->rq_reply_state = rs;
+ return rc;
+ }
+ nrs = pill->rc_req->rq_reply_state;
+ /* Now we need only buffers, copy first chunk */
+ to = lustre_msg_buf(nrs->rs_msg, 0, 0);
+ from = lustre_msg_buf(rs->rs_msg, 0, 0);
+ len = (char *)lustre_msg_buf(rs->rs_msg, offset, 0) - from;
+ memcpy(to, from, len);
+ /* check if we have tail and copy it too */
+ if (rs->rs_msg->lm_bufcount > offset + 1) {
+ to = lustre_msg_buf(nrs->rs_msg, offset + 1, 0);
+ from = lustre_msg_buf(rs->rs_msg, offset + 1, 0);
+ offset = rs->rs_msg->lm_bufcount - 1;
+ len = (char *)lustre_msg_buf(rs->rs_msg, offset, 0) +
+ cfs_size_round(rs->rs_msg->lm_buflens[offset]) - from;
+ memcpy(to, from, len);
+ }
+ /* drop old reply if everything is fine */
+ if (rs->rs_difficult) {
+ /* copy rs data */
+ int i;
+
+ nrs->rs_difficult = 1;
+ nrs->rs_no_ack = rs->rs_no_ack;
+ for (i = 0; i < rs->rs_nlocks; i++) {
+ nrs->rs_locks[i] = rs->rs_locks[i];
+ nrs->rs_modes[i] = rs->rs_modes[i];
+ nrs->rs_nlocks++;
+ }
+ rs->rs_nlocks = 0;
+ rs->rs_difficult = 0;
+ rs->rs_no_ack = 0;
+ }
+ ptlrpc_rs_decref(rs);
+ return 0;
+}
+EXPORT_SYMBOL(req_capsule_server_grow);
+/* __REQ_LAYOUT_USER__ */
+#endif
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
new file mode 100644
index 0000000..379e594
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -0,0 +1,345 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/llog_client.c
+ *
+ * remote api for llog - client side
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_class.h>
+#include <lustre_log.h>
+#include <lustre_net.h>
+#include <linux/list.h>
+
+#define LLOG_CLIENT_ENTRY(ctxt, imp) do { \
+ mutex_lock(&ctxt->loc_mutex); \
+ if (ctxt->loc_imp) { \
+ imp = class_import_get(ctxt->loc_imp); \
+ } else { \
+ CERROR("ctxt->loc_imp == NULL for context idx %d." \
+ "Unable to complete MDS/OSS recovery," \
+ "but I'll try again next time. Not fatal.\n", \
+ ctxt->loc_idx); \
+ imp = NULL; \
+ mutex_unlock(&ctxt->loc_mutex); \
+ return (-EINVAL); \
+ } \
+ mutex_unlock(&ctxt->loc_mutex); \
+} while(0)
+
+#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
+ mutex_lock(&ctxt->loc_mutex); \
+ if (ctxt->loc_imp != imp) \
+ CWARN("loc_imp has changed from %p to %p\n", \
+ ctxt->loc_imp, imp); \
+ class_import_put(imp); \
+ mutex_unlock(&ctxt->loc_mutex); \
+} while(0)
+
+/* This is a callback from the llog_* functions.
+ * Assumes caller has already pushed us into the kernel context. */
+static int llog_client_open(const struct lu_env *env,
+ struct llog_handle *lgh, struct llog_logid *logid,
+ char *name, enum llog_open_param open_param)
+{
+ struct obd_import *imp;
+ struct llogd_body *body;
+ struct llog_ctxt *ctxt = lgh->lgh_ctxt;
+ struct ptlrpc_request *req = NULL;
+ int rc;
+
+ LLOG_CLIENT_ENTRY(ctxt, imp);
+
+ /* client cannot create llog */
+ LASSERTF(open_param != LLOG_OPEN_NEW, "%#x\n", open_param);
+ LASSERT(lgh);
+
+ req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
+ if (req == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ if (name)
+ req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
+ strlen(name) + 1);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_LOG_VERSION,
+ LLOG_ORIGIN_HANDLE_CREATE);
+ if (rc) {
+ ptlrpc_request_free(req);
+ req = NULL;
+ GOTO(out, rc);
+ }
+ ptlrpc_request_set_replen(req);
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ if (logid)
+ body->lgd_logid = *logid;
+ body->lgd_ctxt_idx = ctxt->loc_idx - 1;
+
+ if (name) {
+ char *tmp;
+ tmp = req_capsule_client_sized_get(&req->rq_pill, &RMF_NAME,
+ strlen(name) + 1);
+ LASSERT(tmp);
+ strcpy(tmp, name);
+ }
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ if (body == NULL)
+ GOTO(out, rc = -EFAULT);
+
+ lgh->lgh_id = body->lgd_logid;
+ lgh->lgh_ctxt = ctxt;
+out:
+ LLOG_CLIENT_EXIT(ctxt, imp);
+ ptlrpc_req_finished(req);
+ return rc;
+}
+
+static int llog_client_destroy(const struct lu_env *env,
+ struct llog_handle *loghandle)
+{
+ struct obd_import *imp;
+ struct ptlrpc_request *req = NULL;
+ struct llogd_body *body;
+ int rc;
+
+ LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
+ req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_DESTROY,
+ LUSTRE_LOG_VERSION,
+ LLOG_ORIGIN_HANDLE_DESTROY);
+ if (req == NULL)
+ GOTO(err_exit, rc =-ENOMEM);
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ body->lgd_logid = loghandle->lgh_id;
+ body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags;
+
+ if (!(body->lgd_llh_flags & LLOG_F_IS_PLAIN))
+ CERROR("%s: wrong llog flags %x\n", imp->imp_obd->obd_name,
+ body->lgd_llh_flags);
+
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+
+ ptlrpc_req_finished(req);
+err_exit:
+ LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp);
+ return rc;
+}
+
+
+static int llog_client_next_block(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ int *cur_idx, int next_idx,
+ __u64 *cur_offset, void *buf, int len)
+{
+ struct obd_import *imp;
+ struct ptlrpc_request *req = NULL;
+ struct llogd_body *body;
+ void *ptr;
+ int rc;
+
+ LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
+ req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
+ LUSTRE_LOG_VERSION,
+ LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
+ if (req == NULL)
+ GOTO(err_exit, rc =-ENOMEM);
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ body->lgd_logid = loghandle->lgh_id;
+ body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1;
+ body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags;
+ body->lgd_index = next_idx;
+ body->lgd_saved_index = *cur_idx;
+ body->lgd_len = len;
+ body->lgd_cur_offset = *cur_offset;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len);
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ if (body == NULL)
+ GOTO(out, rc =-EFAULT);
+
+ /* The log records are swabbed as they are processed */
+ ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
+ if (ptr == NULL)
+ GOTO(out, rc =-EFAULT);
+
+ *cur_idx = body->lgd_saved_index;
+ *cur_offset = body->lgd_cur_offset;
+
+ memcpy(buf, ptr, len);
+out:
+ ptlrpc_req_finished(req);
+err_exit:
+ LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp);
+ return rc;
+}
+
+static int llog_client_prev_block(const struct lu_env *env,
+ struct llog_handle *loghandle,
+ int prev_idx, void *buf, int len)
+{
+ struct obd_import *imp;
+ struct ptlrpc_request *req = NULL;
+ struct llogd_body *body;
+ void *ptr;
+ int rc;
+
+ LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
+ req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
+ LUSTRE_LOG_VERSION,
+ LLOG_ORIGIN_HANDLE_PREV_BLOCK);
+ if (req == NULL)
+ GOTO(err_exit, rc = -ENOMEM);
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ body->lgd_logid = loghandle->lgh_id;
+ body->lgd_ctxt_idx = loghandle->lgh_ctxt->loc_idx - 1;
+ body->lgd_llh_flags = loghandle->lgh_hdr->llh_flags;
+ body->lgd_index = prev_idx;
+ body->lgd_len = len;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len);
+ ptlrpc_request_set_replen(req);
+
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ if (body == NULL)
+ GOTO(out, rc =-EFAULT);
+
+ ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
+ if (ptr == NULL)
+ GOTO(out, rc =-EFAULT);
+
+ memcpy(buf, ptr, len);
+out:
+ ptlrpc_req_finished(req);
+err_exit:
+ LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp);
+ return rc;
+}
+
+static int llog_client_read_header(const struct lu_env *env,
+ struct llog_handle *handle)
+{
+ struct obd_import *imp;
+ struct ptlrpc_request *req = NULL;
+ struct llogd_body *body;
+ struct llog_log_hdr *hdr;
+ struct llog_rec_hdr *llh_hdr;
+ int rc;
+
+ LLOG_CLIENT_ENTRY(handle->lgh_ctxt, imp);
+ req = ptlrpc_request_alloc_pack(imp,&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
+ LUSTRE_LOG_VERSION,
+ LLOG_ORIGIN_HANDLE_READ_HEADER);
+ if (req == NULL)
+ GOTO(err_exit, rc = -ENOMEM);
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ body->lgd_logid = handle->lgh_id;
+ body->lgd_ctxt_idx = handle->lgh_ctxt->loc_idx - 1;
+ body->lgd_llh_flags = handle->lgh_hdr->llh_flags;
+
+ ptlrpc_request_set_replen(req);
+ rc = ptlrpc_queue_wait(req);
+ if (rc)
+ GOTO(out, rc);
+
+ hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR);
+ if (hdr == NULL)
+ GOTO(out, rc =-EFAULT);
+
+ memcpy(handle->lgh_hdr, hdr, sizeof (*hdr));
+ handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
+
+ /* sanity checks */
+ llh_hdr = &handle->lgh_hdr->llh_hdr;
+ if (llh_hdr->lrh_type != LLOG_HDR_MAGIC) {
+ CERROR("bad log header magic: %#x (expecting %#x)\n",
+ llh_hdr->lrh_type, LLOG_HDR_MAGIC);
+ rc = -EIO;
+ } else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
+ CERROR("incorrectly sized log header: %#x "
+ "(expecting %#x)\n",
+ llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
+ CERROR("you may need to re-run lconf --write_conf.\n");
+ rc = -EIO;
+ }
+out:
+ ptlrpc_req_finished(req);
+err_exit:
+ LLOG_CLIENT_EXIT(handle->lgh_ctxt, imp);
+ return rc;
+}
+
+static int llog_client_close(const struct lu_env *env,
+ struct llog_handle *handle)
+{
+ /* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because
+ the servers all close the file at the end of every
+ other LLOG_ RPC. */
+ return(0);
+}
+
+struct llog_operations llog_client_ops = {
+ .lop_next_block = llog_client_next_block,
+ .lop_prev_block = llog_client_prev_block,
+ .lop_read_header = llog_client_read_header,
+ .lop_open = llog_client_open,
+ .lop_destroy = llog_client_destroy,
+ .lop_close = llog_client_close,
+};
+EXPORT_SYMBOL(llog_client_ops);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
new file mode 100644
index 0000000..17c06a3
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
@@ -0,0 +1,74 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/llog_net.c
+ *
+ * OST<->MDS recovery logging infrastructure.
+ *
+ * Invariants in implementation:
+ * - we do not share logs among different OST<->MDS connections, so that
+ * if an OST or MDS fails it need only look at log(s) relevant to itself
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_class.h>
+#include <lustre_log.h>
+#include <linux/list.h>
+#include <lvfs.h>
+#include <lustre_fsfilt.h>
+
+int llog_initiator_connect(struct llog_ctxt *ctxt)
+{
+ struct obd_import *new_imp;
+
+ LASSERT(ctxt);
+ new_imp = ctxt->loc_obd->u.cli.cl_import;
+ LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
+ "%p - %p\n", ctxt->loc_imp, new_imp);
+ mutex_lock(&ctxt->loc_mutex);
+ if (ctxt->loc_imp != new_imp) {
+ if (ctxt->loc_imp)
+ class_import_put(ctxt->loc_imp);
+ ctxt->loc_imp = class_import_get(new_imp);
+ }
+ mutex_unlock(&ctxt->loc_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(llog_initiator_connect);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_server.c b/drivers/staging/lustre/lustre/ptlrpc/llog_server.c
new file mode 100644
index 0000000..af9d2ac
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_server.c
@@ -0,0 +1,450 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/llog_server.c
+ *
+ * remote api for llog - server side
+ *
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LOG
+
+
+#include <obd_class.h>
+#include <lustre_log.h>
+#include <lustre_net.h>
+#include <lustre_fsfilt.h>
+
+#if defined(LUSTRE_LOG_SERVER)
+static int llog_origin_close(const struct lu_env *env, struct llog_handle *lgh)
+{
+ if (lgh->lgh_hdr != NULL && lgh->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
+ return llog_cat_close(env, lgh);
+ else
+ return llog_close(env, lgh);
+}
+
+/* Only open is supported, no new llog can be created remotely */
+int llog_origin_handle_open(struct ptlrpc_request *req)
+{
+ struct obd_export *exp = req->rq_export;
+ struct obd_device *obd = exp->exp_obd;
+ struct obd_device *disk_obd;
+ struct lvfs_run_ctxt saved;
+ struct llog_handle *loghandle;
+ struct llogd_body *body;
+ struct llog_logid *logid = NULL;
+ struct llog_ctxt *ctxt;
+ char *name = NULL;
+ int rc;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ if (body == NULL)
+ return -EFAULT;
+
+ if (ostid_id(&body->lgd_logid.lgl_oi) > 0)
+ logid = &body->lgd_logid;
+
+ if (req_capsule_field_present(&req->rq_pill, &RMF_NAME, RCL_CLIENT)) {
+ name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
+ if (name == NULL)
+ return -EFAULT;
+ CDEBUG(D_INFO, "%s: opening log %s\n", obd->obd_name, name);
+ }
+
+ ctxt = llog_get_context(obd, body->lgd_ctxt_idx);
+ if (ctxt == NULL) {
+ CDEBUG(D_WARNING, "%s: no ctxt. group=%p idx=%d name=%s\n",
+ obd->obd_name, &obd->obd_olg, body->lgd_ctxt_idx, name);
+ return -ENODEV;
+ }
+ disk_obd = ctxt->loc_exp->exp_obd;
+ push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+
+ rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle, logid,
+ name, LLOG_OPEN_EXISTS);
+ if (rc)
+ GOTO(out_pop, rc);
+
+ rc = req_capsule_server_pack(&req->rq_pill);
+ if (rc)
+ GOTO(out_close, rc = -ENOMEM);
+
+ body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ body->lgd_logid = loghandle->lgh_id;
+
+out_close:
+ llog_origin_close(req->rq_svc_thread->t_env, loghandle);
+out_pop:
+ pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+EXPORT_SYMBOL(llog_origin_handle_open);
+
+int llog_origin_handle_destroy(struct ptlrpc_request *req)
+{
+ struct obd_device *disk_obd;
+ struct lvfs_run_ctxt saved;
+ struct llogd_body *body;
+ struct llog_logid *logid = NULL;
+ struct llog_ctxt *ctxt;
+ int rc;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ if (body == NULL)
+ return -EFAULT;
+
+ if (ostid_id(&body->lgd_logid.lgl_oi) > 0)
+ logid = &body->lgd_logid;
+
+ if (!(body->lgd_llh_flags & LLOG_F_IS_PLAIN))
+ CERROR("%s: wrong llog flags %x\n",
+ req->rq_export->exp_obd->obd_name, body->lgd_llh_flags);
+
+ ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
+ if (ctxt == NULL)
+ return -ENODEV;
+
+ disk_obd = ctxt->loc_exp->exp_obd;
+ push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+
+ rc = req_capsule_server_pack(&req->rq_pill);
+ /* erase only if no error and logid is valid */
+ if (rc == 0)
+ rc = llog_erase(req->rq_svc_thread->t_env, ctxt, logid, NULL);
+ pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+EXPORT_SYMBOL(llog_origin_handle_destroy);
+
+int llog_origin_handle_next_block(struct ptlrpc_request *req)
+{
+ struct obd_device *disk_obd;
+ struct llog_handle *loghandle;
+ struct llogd_body *body;
+ struct llogd_body *repbody;
+ struct lvfs_run_ctxt saved;
+ struct llog_ctxt *ctxt;
+ __u32 flags;
+ void *ptr;
+ int rc;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ if (body == NULL)
+ return -EFAULT;
+
+ ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
+ if (ctxt == NULL)
+ return -ENODEV;
+
+ disk_obd = ctxt->loc_exp->exp_obd;
+ push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+
+ rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle,
+ &body->lgd_logid, NULL, LLOG_OPEN_EXISTS);
+ if (rc)
+ GOTO(out_pop, rc);
+
+ flags = body->lgd_llh_flags;
+ rc = llog_init_handle(req->rq_svc_thread->t_env, loghandle, flags,
+ NULL);
+ if (rc)
+ GOTO(out_close, rc);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER,
+ LLOG_CHUNK_SIZE);
+ rc = req_capsule_server_pack(&req->rq_pill);
+ if (rc)
+ GOTO(out_close, rc = -ENOMEM);
+
+ repbody = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ *repbody = *body;
+
+ ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
+ rc = llog_next_block(req->rq_svc_thread->t_env, loghandle,
+ &repbody->lgd_saved_index, repbody->lgd_index,
+ &repbody->lgd_cur_offset, ptr, LLOG_CHUNK_SIZE);
+ if (rc)
+ GOTO(out_close, rc);
+out_close:
+ llog_origin_close(req->rq_svc_thread->t_env, loghandle);
+out_pop:
+ pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+EXPORT_SYMBOL(llog_origin_handle_next_block);
+
+int llog_origin_handle_prev_block(struct ptlrpc_request *req)
+{
+ struct llog_handle *loghandle;
+ struct llogd_body *body;
+ struct llogd_body *repbody;
+ struct obd_device *disk_obd;
+ struct lvfs_run_ctxt saved;
+ struct llog_ctxt *ctxt;
+ __u32 flags;
+ void *ptr;
+ int rc;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ if (body == NULL)
+ return -EFAULT;
+
+ ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
+ if (ctxt == NULL)
+ return -ENODEV;
+
+ disk_obd = ctxt->loc_exp->exp_obd;
+ push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+
+ rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle,
+ &body->lgd_logid, NULL, LLOG_OPEN_EXISTS);
+ if (rc)
+ GOTO(out_pop, rc);
+
+ flags = body->lgd_llh_flags;
+ rc = llog_init_handle(req->rq_svc_thread->t_env, loghandle, flags,
+ NULL);
+ if (rc)
+ GOTO(out_close, rc);
+
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER,
+ LLOG_CHUNK_SIZE);
+ rc = req_capsule_server_pack(&req->rq_pill);
+ if (rc)
+ GOTO(out_close, rc = -ENOMEM);
+
+ repbody = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ *repbody = *body;
+
+ ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
+ rc = llog_prev_block(req->rq_svc_thread->t_env, loghandle,
+ body->lgd_index, ptr, LLOG_CHUNK_SIZE);
+ if (rc)
+ GOTO(out_close, rc);
+
+out_close:
+ llog_origin_close(req->rq_svc_thread->t_env, loghandle);
+out_pop:
+ pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+EXPORT_SYMBOL(llog_origin_handle_prev_block);
+
+int llog_origin_handle_read_header(struct ptlrpc_request *req)
+{
+ struct obd_device *disk_obd;
+ struct llog_handle *loghandle;
+ struct llogd_body *body;
+ struct llog_log_hdr *hdr;
+ struct lvfs_run_ctxt saved;
+ struct llog_ctxt *ctxt;
+ __u32 flags;
+ int rc;
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
+ if (body == NULL)
+ return -EFAULT;
+
+ ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
+ if (ctxt == NULL)
+ return -ENODEV;
+
+ disk_obd = ctxt->loc_exp->exp_obd;
+ push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+
+ rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle,
+ &body->lgd_logid, NULL, LLOG_OPEN_EXISTS);
+ if (rc)
+ GOTO(out_pop, rc);
+
+ /*
+ * llog_init_handle() reads the llog header
+ */
+ flags = body->lgd_llh_flags;
+ rc = llog_init_handle(req->rq_svc_thread->t_env, loghandle, flags,
+ NULL);
+ if (rc)
+ GOTO(out_close, rc);
+ flags = loghandle->lgh_hdr->llh_flags;
+
+ rc = req_capsule_server_pack(&req->rq_pill);
+ if (rc)
+ GOTO(out_close, rc = -ENOMEM);
+
+ hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR);
+ *hdr = *loghandle->lgh_hdr;
+out_close:
+ llog_origin_close(req->rq_svc_thread->t_env, loghandle);
+out_pop:
+ pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+EXPORT_SYMBOL(llog_origin_handle_read_header);
+
+int llog_origin_handle_close(struct ptlrpc_request *req)
+{
+ /* Nothing to do */
+ return 0;
+}
+EXPORT_SYMBOL(llog_origin_handle_close);
+
+int llog_origin_handle_cancel(struct ptlrpc_request *req)
+{
+ int num_cookies, rc = 0, err, i, failed = 0;
+ struct obd_device *disk_obd;
+ struct llog_cookie *logcookies;
+ struct llog_ctxt *ctxt = NULL;
+ struct lvfs_run_ctxt saved;
+ struct llog_handle *cathandle;
+ struct inode *inode;
+ void *handle;
+
+ logcookies = req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES);
+ num_cookies = req_capsule_get_size(&req->rq_pill, &RMF_LOGCOOKIES,
+ RCL_CLIENT) / sizeof(*logcookies);
+ if (logcookies == NULL || num_cookies == 0) {
+ DEBUG_REQ(D_HA, req, "No llog cookies sent");
+ return -EFAULT;
+ }
+
+ ctxt = llog_get_context(req->rq_export->exp_obd,
+ logcookies->lgc_subsys);
+ if (ctxt == NULL)
+ return -ENODEV;
+
+ disk_obd = ctxt->loc_exp->exp_obd;
+ push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+ for (i = 0; i < num_cookies; i++, logcookies++) {
+ cathandle = ctxt->loc_handle;
+ LASSERT(cathandle != NULL);
+ inode = cathandle->lgh_file->f_dentry->d_inode;
+
+ handle = fsfilt_start_log(disk_obd, inode,
+ FSFILT_OP_CANCEL_UNLINK, NULL, 1);
+ if (IS_ERR(handle)) {
+ CERROR("fsfilt_start_log() failed: %ld\n",
+ PTR_ERR(handle));
+ GOTO(pop_ctxt, rc = PTR_ERR(handle));
+ }
+
+ rc = llog_cat_cancel_records(req->rq_svc_thread->t_env,
+ cathandle, 1, logcookies);
+
+ /*
+ * Do not raise -ENOENT errors for resent rpcs. This rec already
+ * might be killed.
+ */
+ if (rc == -ENOENT &&
+ (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT)) {
+ /*
+ * Do not change this message, reply-single.sh test_59b
+ * expects to find this in log.
+ */
+ CDEBUG(D_RPCTRACE, "RESENT cancel req %p - ignored\n",
+ req);
+ rc = 0;
+ } else if (rc == 0) {
+ CDEBUG(D_RPCTRACE, "Canceled %d llog-records\n",
+ num_cookies);
+ }
+
+ err = fsfilt_commit(disk_obd, inode, handle, 0);
+ if (err) {
+ CERROR("Error committing transaction: %d\n", err);
+ if (!rc)
+ rc = err;
+ failed++;
+ GOTO(pop_ctxt, rc);
+ } else if (rc)
+ failed++;
+ }
+ GOTO(pop_ctxt, rc);
+pop_ctxt:
+ pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
+ if (rc)
+ CERROR("Cancel %d of %d llog-records failed: %d\n",
+ failed, num_cookies, rc);
+
+ llog_ctxt_put(ctxt);
+ return rc;
+}
+EXPORT_SYMBOL(llog_origin_handle_cancel);
+
+#else /* !__KERNEL__ */
+int llog_origin_handle_open(struct ptlrpc_request *req)
+{
+ LBUG();
+ return 0;
+}
+
+int llog_origin_handle_destroy(struct ptlrpc_request *req)
+{
+ LBUG();
+ return 0;
+}
+
+int llog_origin_handle_next_block(struct ptlrpc_request *req)
+{
+ LBUG();
+ return 0;
+}
+int llog_origin_handle_prev_block(struct ptlrpc_request *req)
+{
+ LBUG();
+ return 0;
+}
+int llog_origin_handle_read_header(struct ptlrpc_request *req)
+{
+ LBUG();
+ return 0;
+}
+int llog_origin_handle_close(struct ptlrpc_request *req)
+{
+ LBUG();
+ return 0;
+}
+int llog_origin_handle_cancel(struct ptlrpc_request *req)
+{
+ LBUG();
+ return 0;
+}
+#endif
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
new file mode 100644
index 0000000..bea44a3
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -0,0 +1,1342 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+#define DEBUG_SUBSYSTEM S_CLASS
+
+
+#include <obd_support.h>
+#include <obd.h>
+#include <lprocfs_status.h>
+#include <lustre/lustre_idl.h>
+#include <lustre_net.h>
+#include <obd_class.h>
+#include "ptlrpc_internal.h"
+
+
+struct ll_rpc_opcode {
+ __u32 opcode;
+ const char *opname;
+} ll_rpc_opcode_table[LUSTRE_MAX_OPCODES] = {
+ { OST_REPLY, "ost_reply" },
+ { OST_GETATTR, "ost_getattr" },
+ { OST_SETATTR, "ost_setattr" },
+ { OST_READ, "ost_read" },
+ { OST_WRITE, "ost_write" },
+ { OST_CREATE , "ost_create" },
+ { OST_DESTROY, "ost_destroy" },
+ { OST_GET_INFO, "ost_get_info" },
+ { OST_CONNECT, "ost_connect" },
+ { OST_DISCONNECT, "ost_disconnect" },
+ { OST_PUNCH, "ost_punch" },
+ { OST_OPEN, "ost_open" },
+ { OST_CLOSE, "ost_close" },
+ { OST_STATFS, "ost_statfs" },
+ { 14, NULL }, /* formerly OST_SAN_READ */
+ { 15, NULL }, /* formerly OST_SAN_WRITE */
+ { OST_SYNC, "ost_sync" },
+ { OST_SET_INFO, "ost_set_info" },
+ { OST_QUOTACHECK, "ost_quotacheck" },
+ { OST_QUOTACTL, "ost_quotactl" },
+ { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },
+ { MDS_GETATTR, "mds_getattr" },
+ { MDS_GETATTR_NAME, "mds_getattr_lock" },
+ { MDS_CLOSE, "mds_close" },
+ { MDS_REINT, "mds_reint" },
+ { MDS_READPAGE, "mds_readpage" },
+ { MDS_CONNECT, "mds_connect" },
+ { MDS_DISCONNECT, "mds_disconnect" },
+ { MDS_GETSTATUS, "mds_getstatus" },
+ { MDS_STATFS, "mds_statfs" },
+ { MDS_PIN, "mds_pin" },
+ { MDS_UNPIN, "mds_unpin" },
+ { MDS_SYNC, "mds_sync" },
+ { MDS_DONE_WRITING, "mds_done_writing" },
+ { MDS_SET_INFO, "mds_set_info" },
+ { MDS_QUOTACHECK, "mds_quotacheck" },
+ { MDS_QUOTACTL, "mds_quotactl" },
+ { MDS_GETXATTR, "mds_getxattr" },
+ { MDS_SETXATTR, "mds_setxattr" },
+ { MDS_WRITEPAGE, "mds_writepage" },
+ { MDS_IS_SUBDIR, "mds_is_subdir" },
+ { MDS_GET_INFO, "mds_get_info" },
+ { MDS_HSM_STATE_GET, "mds_hsm_state_get" },
+ { MDS_HSM_STATE_SET, "mds_hsm_state_set" },
+ { MDS_HSM_ACTION, "mds_hsm_action" },
+ { MDS_HSM_PROGRESS, "mds_hsm_progress" },
+ { MDS_HSM_REQUEST, "mds_hsm_request" },
+ { MDS_HSM_CT_REGISTER, "mds_hsm_ct_register" },
+ { MDS_HSM_CT_UNREGISTER, "mds_hsm_ct_unregister" },
+ { MDS_SWAP_LAYOUTS, "mds_swap_layouts" },
+ { LDLM_ENQUEUE, "ldlm_enqueue" },
+ { LDLM_CONVERT, "ldlm_convert" },
+ { LDLM_CANCEL, "ldlm_cancel" },
+ { LDLM_BL_CALLBACK, "ldlm_bl_callback" },
+ { LDLM_CP_CALLBACK, "ldlm_cp_callback" },
+ { LDLM_GL_CALLBACK, "ldlm_gl_callback" },
+ { LDLM_SET_INFO, "ldlm_set_info" },
+ { MGS_CONNECT, "mgs_connect" },
+ { MGS_DISCONNECT, "mgs_disconnect" },
+ { MGS_EXCEPTION, "mgs_exception" },
+ { MGS_TARGET_REG, "mgs_target_reg" },
+ { MGS_TARGET_DEL, "mgs_target_del" },
+ { MGS_SET_INFO, "mgs_set_info" },
+ { MGS_CONFIG_READ, "mgs_config_read" },
+ { OBD_PING, "obd_ping" },
+ { OBD_LOG_CANCEL, "llog_origin_handle_cancel" },
+ { OBD_QC_CALLBACK, "obd_quota_callback" },
+ { OBD_IDX_READ, "dt_index_read" },
+ { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_create" },
+ { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
+ { LLOG_ORIGIN_HANDLE_READ_HEADER,"llog_origin_handle_read_header" },
+ { LLOG_ORIGIN_HANDLE_WRITE_REC, "llog_origin_handle_write_rec" },
+ { LLOG_ORIGIN_HANDLE_CLOSE, "llog_origin_handle_close" },
+ { LLOG_ORIGIN_CONNECT, "llog_origin_connect" },
+ { LLOG_CATINFO, "llog_catinfo" },
+ { LLOG_ORIGIN_HANDLE_PREV_BLOCK, "llog_origin_handle_prev_block" },
+ { LLOG_ORIGIN_HANDLE_DESTROY, "llog_origin_handle_destroy" },
+ { QUOTA_DQACQ, "quota_acquire" },
+ { QUOTA_DQREL, "quota_release" },
+ { SEQ_QUERY, "seq_query" },
+ { SEC_CTX_INIT, "sec_ctx_init" },
+ { SEC_CTX_INIT_CONT,"sec_ctx_init_cont" },
+ { SEC_CTX_FINI, "sec_ctx_fini" },
+ { FLD_QUERY, "fld_query" },
+ { UPDATE_OBJ, "update_obj" },
+};
+
+struct ll_eopcode {
+ __u32 opcode;
+ const char *opname;
+} ll_eopcode_table[EXTRA_LAST_OPC] = {
+ { LDLM_GLIMPSE_ENQUEUE, "ldlm_glimpse_enqueue" },
+ { LDLM_PLAIN_ENQUEUE, "ldlm_plain_enqueue" },
+ { LDLM_EXTENT_ENQUEUE, "ldlm_extent_enqueue" },
+ { LDLM_FLOCK_ENQUEUE, "ldlm_flock_enqueue" },
+ { LDLM_IBITS_ENQUEUE, "ldlm_ibits_enqueue" },
+ { MDS_REINT_SETATTR, "mds_reint_setattr" },
+ { MDS_REINT_CREATE, "mds_reint_create" },
+ { MDS_REINT_LINK, "mds_reint_link" },
+ { MDS_REINT_UNLINK, "mds_reint_unlink" },
+ { MDS_REINT_RENAME, "mds_reint_rename" },
+ { MDS_REINT_OPEN, "mds_reint_open" },
+ { MDS_REINT_SETXATTR, "mds_reint_setxattr" },
+ { BRW_READ_BYTES, "read_bytes" },
+ { BRW_WRITE_BYTES, "write_bytes" },
+};
+
+const char *ll_opcode2str(__u32 opcode)
+{
+ /* When one of the assertions below fail, chances are that:
+ * 1) A new opcode was added in include/lustre/lustre_idl.h,
+ * but is missing from the table above.
+ * or 2) The opcode space was renumbered or rearranged,
+ * and the opcode_offset() function in
+ * ptlrpc_internal.h needs to be modified.
+ */
+ __u32 offset = opcode_offset(opcode);
+ LASSERTF(offset < LUSTRE_MAX_OPCODES,
+ "offset %u >= LUSTRE_MAX_OPCODES %u\n",
+ offset, LUSTRE_MAX_OPCODES);
+ LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
+ "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
+ offset, ll_rpc_opcode_table[offset].opcode, opcode);
+ return ll_rpc_opcode_table[offset].opname;
+}
+
+const char* ll_eopcode2str(__u32 opcode)
+{
+ LASSERT(ll_eopcode_table[opcode].opcode == opcode);
+ return ll_eopcode_table[opcode].opname;
+}
+#ifdef LPROCFS
+void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
+ char *name, struct proc_dir_entry **procroot_ret,
+ struct lprocfs_stats **stats_ret)
+{
+ struct proc_dir_entry *svc_procroot;
+ struct lprocfs_stats *svc_stats;
+ int i, rc;
+ unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
+ LPROCFS_CNTR_STDDEV;
+
+ LASSERT(*procroot_ret == NULL);
+ LASSERT(*stats_ret == NULL);
+
+ svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,0);
+ if (svc_stats == NULL)
+ return;
+
+ if (dir) {
+ svc_procroot = lprocfs_register(dir, root, NULL, NULL);
+ if (IS_ERR(svc_procroot)) {
+ lprocfs_free_stats(&svc_stats);
+ return;
+ }
+ } else {
+ svc_procroot = root;
+ }
+
+ lprocfs_counter_init(svc_stats, PTLRPC_REQWAIT_CNTR,
+ svc_counter_config, "req_waittime", "usec");
+ lprocfs_counter_init(svc_stats, PTLRPC_REQQDEPTH_CNTR,
+ svc_counter_config, "req_qdepth", "reqs");
+ lprocfs_counter_init(svc_stats, PTLRPC_REQACTIVE_CNTR,
+ svc_counter_config, "req_active", "reqs");
+ lprocfs_counter_init(svc_stats, PTLRPC_TIMEOUT,
+ svc_counter_config, "req_timeout", "sec");
+ lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
+ svc_counter_config, "reqbuf_avail", "bufs");
+ for (i = 0; i < EXTRA_LAST_OPC; i++) {
+ char *units;
+
+ switch(i) {
+ case BRW_WRITE_BYTES:
+ case BRW_READ_BYTES:
+ units = "bytes";
+ break;
+ default:
+ units = "reqs";
+ break;
+ }
+ lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
+ svc_counter_config,
+ ll_eopcode2str(i), units);
+ }
+ for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
+ __u32 opcode = ll_rpc_opcode_table[i].opcode;
+ lprocfs_counter_init(svc_stats,
+ EXTRA_MAX_OPCODES + i, svc_counter_config,
+ ll_opcode2str(opcode), "usec");
+ }
+
+ rc = lprocfs_register_stats(svc_procroot, name, svc_stats);
+ if (rc < 0) {
+ if (dir)
+ lprocfs_remove(&svc_procroot);
+ lprocfs_free_stats(&svc_stats);
+ } else {
+ if (dir)
+ *procroot_ret = svc_procroot;
+ *stats_ret = svc_stats;
+ }
+}
+
+static int
+ptlrpc_lprocfs_req_history_len_seq_show(struct seq_file *m, void *v)
+{
+ struct ptlrpc_service *svc = m->private;
+ struct ptlrpc_service_part *svcpt;
+ int total = 0;
+ int i;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc)
+ total += svcpt->scp_hist_nrqbds;
+
+ return seq_printf(m, "%d\n", total);
+}
+LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_req_history_len);
+
+static int
+ptlrpc_lprocfs_req_history_max_seq_show(struct seq_file *m, void *n)
+{
+ struct ptlrpc_service *svc = m->private;
+ struct ptlrpc_service_part *svcpt;
+ int total = 0;
+ int i;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc)
+ total += svc->srv_hist_nrqbds_cpt_max;
+
+ return seq_printf(m, "%d\n", total);
+}
+
+static ssize_t
+ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
+ int bufpages;
+ int val;
+ int rc;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc < 0)
+ return rc;
+
+ if (val < 0)
+ return -ERANGE;
+
+ /* This sanity check is more of an insanity check; we can still
+ * hose a kernel by allowing the request history to grow too
+ * far. */
+ bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (val > totalram_pages / (2 * bufpages))
+ return -ERANGE;
+
+ spin_lock(&svc->srv_lock);
+
+ if (val == 0)
+ svc->srv_hist_nrqbds_cpt_max = 0;
+ else
+ svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts));
+
+ spin_unlock(&svc->srv_lock);
+
+ return count;
+}
+LPROC_SEQ_FOPS(ptlrpc_lprocfs_req_history_max);
+
+static int
+ptlrpc_lprocfs_threads_min_seq_show(struct seq_file *m, void *n)
+{
+ struct ptlrpc_service *svc = m->private;
+
+ return seq_printf(m, "%d\n",
+ svc->srv_nthrs_cpt_init * svc->srv_ncpts);
+}
+
+static ssize_t
+ptlrpc_lprocfs_threads_min_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
+ int val;
+ int rc = lprocfs_write_helper(buffer, count, &val);
+
+ if (rc < 0)
+ return rc;
+
+ if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
+ return -ERANGE;
+
+ spin_lock(&svc->srv_lock);
+ if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) {
+ spin_unlock(&svc->srv_lock);
+ return -ERANGE;
+ }
+
+ svc->srv_nthrs_cpt_init = val / svc->srv_ncpts;
+
+ spin_unlock(&svc->srv_lock);
+
+ return count;
+}
+LPROC_SEQ_FOPS(ptlrpc_lprocfs_threads_min);
+
+static int
+ptlrpc_lprocfs_threads_started_seq_show(struct seq_file *m, void *n)
+{
+ struct ptlrpc_service *svc = m->private;
+ struct ptlrpc_service_part *svcpt;
+ int total = 0;
+ int i;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc)
+ total += svcpt->scp_nthrs_running;
+
+ return seq_printf(m, "%d\n", total);
+}
+LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_threads_started);
+
+static int
+ptlrpc_lprocfs_threads_max_seq_show(struct seq_file *m, void *n)
+{
+ struct ptlrpc_service *svc = m->private;
+
+ return seq_printf(m, "%d\n",
+ svc->srv_nthrs_cpt_limit * svc->srv_ncpts);
+}
+
+static ssize_t
+ptlrpc_lprocfs_threads_max_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
+ int val;
+ int rc = lprocfs_write_helper(buffer, count, &val);
+
+ if (rc < 0)
+ return rc;
+
+ if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
+ return -ERANGE;
+
+ spin_lock(&svc->srv_lock);
+ if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) {
+ spin_unlock(&svc->srv_lock);
+ return -ERANGE;
+ }
+
+ svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts;
+
+ spin_unlock(&svc->srv_lock);
+
+ return count;
+}
+LPROC_SEQ_FOPS(ptlrpc_lprocfs_threads_max);
+
+/**
+ * \addtogoup nrs
+ * @{
+ */
+extern struct nrs_core nrs_core;
+
+/**
+ * Translates \e ptlrpc_nrs_pol_state values to human-readable strings.
+ *
+ * \param[in] state The policy state
+ */
+static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state)
+{
+ switch (state) {
+ default:
+ LBUG();
+ case NRS_POL_STATE_INVALID:
+ return "invalid";
+ case NRS_POL_STATE_STOPPED:
+ return "stopped";
+ case NRS_POL_STATE_STOPPING:
+ return "stopping";
+ case NRS_POL_STATE_STARTING:
+ return "starting";
+ case NRS_POL_STATE_STARTED:
+ return "started";
+ }
+}
+
+/**
+ * Obtains status information for \a policy.
+ *
+ * Information is copied in \a info.
+ *
+ * \param[in] policy The policy
+ * \param[out] info Holds returned status information
+ */
+void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_pol_info *info)
+{
+ LASSERT(policy != NULL);
+ LASSERT(info != NULL);
+ LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock));
+
+ memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
+
+ info->pi_fallback = !!(policy->pol_flags & PTLRPC_NRS_FL_FALLBACK);
+ info->pi_state = policy->pol_state;
+ /**
+ * XXX: These are accessed without holding
+ * ptlrpc_service_part::scp_req_lock.
+ */
+ info->pi_req_queued = policy->pol_req_queued;
+ info->pi_req_started = policy->pol_req_started;
+}
+
+/**
+ * Reads and prints policy status information for all policies of a PTLRPC
+ * service.
+ */
+static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
+{
+ struct ptlrpc_service *svc = m->private;
+ struct ptlrpc_service_part *svcpt;
+ struct ptlrpc_nrs *nrs;
+ struct ptlrpc_nrs_policy *policy;
+ struct ptlrpc_nrs_pol_info *infos;
+ struct ptlrpc_nrs_pol_info tmp;
+ unsigned num_pols;
+ unsigned pol_idx = 0;
+ bool hp = false;
+ int i;
+ int rc = 0;
+
+ /**
+ * Serialize NRS core lprocfs operations with policy registration/
+ * unregistration.
+ */
+ mutex_lock(&nrs_core.nrs_mutex);
+
+ /**
+ * Use the first service partition's regular NRS head in order to obtain
+ * the number of policies registered with NRS heads of this service. All
+ * service partitions will have the same number of policies.
+ */
+ nrs = nrs_svcpt2nrs(svc->srv_parts[0], false);
+
+ spin_lock(&nrs->nrs_lock);
+ num_pols = svc->srv_parts[0]->scp_nrs_reg.nrs_num_pols;
+ spin_unlock(&nrs->nrs_lock);
+
+ OBD_ALLOC(infos, num_pols * sizeof(*infos));
+ if (infos == NULL)
+ GOTO(out, rc = -ENOMEM);
+again:
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ nrs = nrs_svcpt2nrs(svcpt, hp);
+ spin_lock(&nrs->nrs_lock);
+
+ pol_idx = 0;
+
+ list_for_each_entry(policy, &nrs->nrs_policy_list,
+ pol_list) {
+ LASSERT(pol_idx < num_pols);
+
+ nrs_policy_get_info_locked(policy, &tmp);
+ /**
+ * Copy values when handling the first service
+ * partition.
+ */
+ if (i == 0) {
+ memcpy(infos[pol_idx].pi_name, tmp.pi_name,
+ NRS_POL_NAME_MAX);
+ memcpy(&infos[pol_idx].pi_state, &tmp.pi_state,
+ sizeof(tmp.pi_state));
+ infos[pol_idx].pi_fallback = tmp.pi_fallback;
+ /**
+ * For the rest of the service partitions
+ * sanity-check the values we get.
+ */
+ } else {
+ LASSERT(strncmp(infos[pol_idx].pi_name,
+ tmp.pi_name,
+ NRS_POL_NAME_MAX) == 0);
+ /**
+ * Not asserting ptlrpc_nrs_pol_info::pi_state,
+ * because it may be different between
+ * instances of the same policy in different
+ * service partitions.
+ */
+ LASSERT(infos[pol_idx].pi_fallback ==
+ tmp.pi_fallback);
+ }
+
+ infos[pol_idx].pi_req_queued += tmp.pi_req_queued;
+ infos[pol_idx].pi_req_started += tmp.pi_req_started;
+
+ pol_idx++;
+ }
+ spin_unlock(&nrs->nrs_lock);
+ }
+
+ /**
+ * Policy status information output is in YAML format.
+ * For example:
+ *
+ * regular_requests:
+ * - name: fifo
+ * state: started
+ * fallback: yes
+ * queued: 0
+ * active: 0
+ *
+ * - name: crrn
+ * state: started
+ * fallback: no
+ * queued: 2015
+ * active: 384
+ *
+ * high_priority_requests:
+ * - name: fifo
+ * state: started
+ * fallback: yes
+ * queued: 0
+ * active: 2
+ *
+ * - name: crrn
+ * state: stopped
+ * fallback: no
+ * queued: 0
+ * active: 0
+ */
+ seq_printf(m, "%s\n",
+ !hp ? "\nregular_requests:" : "high_priority_requests:");
+
+ for (pol_idx = 0; pol_idx < num_pols; pol_idx++) {
+ seq_printf(m, " - name: %s\n"
+ " state: %s\n"
+ " fallback: %s\n"
+ " queued: %-20d\n"
+ " active: %-20d\n\n",
+ infos[pol_idx].pi_name,
+ nrs_state2str(infos[pol_idx].pi_state),
+ infos[pol_idx].pi_fallback ? "yes" : "no",
+ (int)infos[pol_idx].pi_req_queued,
+ (int)infos[pol_idx].pi_req_started);
+ }
+
+ if (!hp && nrs_svc_has_hp(svc)) {
+ memset(infos, 0, num_pols * sizeof(*infos));
+
+ /**
+ * Redo the processing for the service's HP NRS heads' policies.
+ */
+ hp = true;
+ goto again;
+ }
+
+out:
+ if (infos)
+ OBD_FREE(infos, num_pols * sizeof(*infos));
+
+ mutex_unlock(&nrs_core.nrs_mutex);
+
+ return rc;
+}
+
+/**
+ * The longest valid command string is the maxium policy name size, plus the
+ * length of the " reg" substring
+ */
+#define LPROCFS_NRS_WR_MAX_CMD (NRS_POL_NAME_MAX + sizeof(" reg") - 1)
+
+/**
+ * Starts and stops a given policy on a PTLRPC service.
+ *
+ * Commands consist of the policy name, followed by an optional [reg|hp] token;
+ * if the optional token is omitted, the operation is performed on both the
+ * regular and high-priority (if the service has one) NRS head.
+ */
+static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
+ enum ptlrpc_nrs_queue_type queue = PTLRPC_NRS_QUEUE_BOTH;
+ char *cmd;
+ char *cmd_copy = NULL;
+ char *token;
+ int rc = 0;
+
+ if (count >= LPROCFS_NRS_WR_MAX_CMD)
+ GOTO(out, rc = -EINVAL);
+
+ OBD_ALLOC(cmd, LPROCFS_NRS_WR_MAX_CMD);
+ if (cmd == NULL)
+ GOTO(out, rc = -ENOMEM);
+ /**
+ * strsep() modifies its argument, so keep a copy
+ */
+ cmd_copy = cmd;
+
+ if (copy_from_user(cmd, buffer, count))
+ GOTO(out, rc = -EFAULT);
+
+ cmd[count] = '\0';
+
+ token = strsep(&cmd, " ");
+
+ if (strlen(token) > NRS_POL_NAME_MAX - 1)
+ GOTO(out, rc = -EINVAL);
+
+ /**
+ * No [reg|hp] token has been specified
+ */
+ if (cmd == NULL)
+ goto default_queue;
+
+ /**
+ * The second token is either NULL, or an optional [reg|hp] string
+ */
+ if (strcmp(cmd, "reg") == 0)
+ queue = PTLRPC_NRS_QUEUE_REG;
+ else if (strcmp(cmd, "hp") == 0)
+ queue = PTLRPC_NRS_QUEUE_HP;
+ else
+ GOTO(out, rc = -EINVAL);
+
+default_queue:
+
+ if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc))
+ GOTO(out, rc = -ENODEV);
+ else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
+ queue = PTLRPC_NRS_QUEUE_REG;
+
+ /**
+ * Serialize NRS core lprocfs operations with policy registration/
+ * unregistration.
+ */
+ mutex_lock(&nrs_core.nrs_mutex);
+
+ rc = ptlrpc_nrs_policy_control(svc, queue, token, PTLRPC_NRS_CTL_START,
+ false, NULL);
+
+ mutex_unlock(&nrs_core.nrs_mutex);
+out:
+ if (cmd_copy)
+ OBD_FREE(cmd_copy, LPROCFS_NRS_WR_MAX_CMD);
+
+ return rc < 0 ? rc : count;
+}
+LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs);
+
+/** @} nrs */
+
+struct ptlrpc_srh_iterator {
+ int srhi_idx;
+ __u64 srhi_seq;
+ struct ptlrpc_request *srhi_req;
+};
+
+int
+ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_srh_iterator *srhi,
+ __u64 seq)
+{
+ struct list_head *e;
+ struct ptlrpc_request *req;
+
+ if (srhi->srhi_req != NULL &&
+ srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
+ srhi->srhi_seq <= seq) {
+ /* If srhi_req was set previously, hasn't been culled and
+ * we're searching for a seq on or after it (i.e. more
+ * recent), search from it onwards.
+ * Since the service history is LRU (i.e. culled reqs will
+ * be near the head), we shouldn't have to do long
+ * re-scans */
+ LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
+ "%s:%d: seek seq "LPU64", request seq "LPU64"\n",
+ svcpt->scp_service->srv_name, svcpt->scp_cpt,
+ srhi->srhi_seq, srhi->srhi_req->rq_history_seq);
+ LASSERTF(!list_empty(&svcpt->scp_hist_reqs),
+ "%s:%d: seek offset "LPU64", request seq "LPU64", "
+ "last culled "LPU64"\n",
+ svcpt->scp_service->srv_name, svcpt->scp_cpt,
+ seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled);
+ e = &srhi->srhi_req->rq_history_list;
+ } else {
+ /* search from start */
+ e = svcpt->scp_hist_reqs.next;
+ }
+
+ while (e != &svcpt->scp_hist_reqs) {
+ req = list_entry(e, struct ptlrpc_request, rq_history_list);
+
+ if (req->rq_history_seq >= seq) {
+ srhi->srhi_seq = req->rq_history_seq;
+ srhi->srhi_req = req;
+ return 0;
+ }
+ e = e->next;
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * ptlrpc history sequence is used as "position" of seq_file, in some case,
+ * seq_read() will increase "position" to indicate reading the next
+ * element, however, low bits of history sequence are reserved for CPT id
+ * (check the details from comments before ptlrpc_req_add_history), which
+ * means seq_read() might change CPT id of history sequence and never
+ * finish reading of requests on a CPT. To make it work, we have to shift
+ * CPT id to high bits and timestamp to low bits, so seq_read() will only
+ * increase timestamp which can correctly indicate the next position.
+ */
+
+/* convert seq_file pos to cpt */
+#define PTLRPC_REQ_POS2CPT(svc, pos) \
+ ((svc)->srv_cpt_bits == 0 ? 0 : \
+ (__u64)(pos) >> (64 - (svc)->srv_cpt_bits))
+
+/* make up seq_file pos from cpt */
+#define PTLRPC_REQ_CPT2POS(svc, cpt) \
+ ((svc)->srv_cpt_bits == 0 ? 0 : \
+ (cpt) << (64 - (svc)->srv_cpt_bits))
+
+/* convert sequence to position */
+#define PTLRPC_REQ_SEQ2POS(svc, seq) \
+ ((svc)->srv_cpt_bits == 0 ? (seq) : \
+ ((seq) >> (svc)->srv_cpt_bits) | \
+ ((seq) << (64 - (svc)->srv_cpt_bits)))
+
+/* convert position to sequence */
+#define PTLRPC_REQ_POS2SEQ(svc, pos) \
+ ((svc)->srv_cpt_bits == 0 ? (pos) : \
+ ((__u64)(pos) << (svc)->srv_cpt_bits) | \
+ ((__u64)(pos) >> (64 - (svc)->srv_cpt_bits)))
+
+static void *
+ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
+{
+ struct ptlrpc_service *svc = s->private;
+ struct ptlrpc_service_part *svcpt;
+ struct ptlrpc_srh_iterator *srhi;
+ unsigned int cpt;
+ int rc;
+ int i;
+
+ if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */
+ CWARN("Failed to read request history because size of loff_t "
+ "%d can't match size of u64\n", (int)sizeof(loff_t));
+ return NULL;
+ }
+
+ OBD_ALLOC(srhi, sizeof(*srhi));
+ if (srhi == NULL)
+ return NULL;
+
+ srhi->srhi_seq = 0;
+ srhi->srhi_req = NULL;
+
+ cpt = PTLRPC_REQ_POS2CPT(svc, *pos);
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ if (i < cpt) /* skip */
+ continue;
+ if (i > cpt) /* make up the lowest position for this CPT */
+ *pos = PTLRPC_REQ_CPT2POS(svc, i);
+
+ spin_lock(&svcpt->scp_lock);
+ rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi,
+ PTLRPC_REQ_POS2SEQ(svc, *pos));
+ spin_unlock(&svcpt->scp_lock);
+ if (rc == 0) {
+ *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
+ srhi->srhi_idx = i;
+ return srhi;
+ }
+ }
+
+ OBD_FREE(srhi, sizeof(*srhi));
+ return NULL;
+}
+
+static void
+ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter)
+{
+ struct ptlrpc_srh_iterator *srhi = iter;
+
+ if (srhi != NULL)
+ OBD_FREE(srhi, sizeof(*srhi));
+}
+
+static void *
+ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
+ void *iter, loff_t *pos)
+{
+ struct ptlrpc_service *svc = s->private;
+ struct ptlrpc_srh_iterator *srhi = iter;
+ struct ptlrpc_service_part *svcpt;
+ __u64 seq;
+ int rc;
+ int i;
+
+ for (i = srhi->srhi_idx; i < svc->srv_ncpts; i++) {
+ svcpt = svc->srv_parts[i];
+
+ if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
+ srhi->srhi_req = NULL;
+ seq = srhi->srhi_seq = 0;
+ } else { /* the next sequence */
+ seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
+ }
+
+ spin_lock(&svcpt->scp_lock);
+ rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, seq);
+ spin_unlock(&svcpt->scp_lock);
+ if (rc == 0) {
+ *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq);
+ srhi->srhi_idx = i;
+ return srhi;
+ }
+ }
+
+ OBD_FREE(srhi, sizeof(*srhi));
+ return NULL;
+}
+
+/* common ost/mdt so_req_printer */
+void target_print_req(void *seq_file, struct ptlrpc_request *req)
+{
+ /* Called holding srv_lock with irqs disabled.
+ * Print specific req contents and a newline.
+ * CAVEAT EMPTOR: check request message length before printing!!!
+ * You might have received any old crap so you must be just as
+ * careful here as the service's request parser!!! */
+ struct seq_file *sf = seq_file;
+
+ switch (req->rq_phase) {
+ case RQ_PHASE_NEW:
+ /* still awaiting a service thread's attention, or rejected
+ * because the generic request message didn't unpack */
+ seq_printf(sf, "<not swabbed>\n");
+ break;
+ case RQ_PHASE_INTERPRET:
+ /* being handled, so basic msg swabbed, and opc is valid
+ * but racing with mds_handle() */
+ case RQ_PHASE_COMPLETE:
+ /* been handled by mds_handle() reply state possibly still
+ * volatile */
+ seq_printf(sf, "opc %d\n", lustre_msg_get_opc(req->rq_reqmsg));
+ break;
+ default:
+ DEBUG_REQ(D_ERROR, req, "bad phase %d", req->rq_phase);
+ }
+}
+EXPORT_SYMBOL(target_print_req);
+
+static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
+{
+ struct ptlrpc_service *svc = s->private;
+ struct ptlrpc_srh_iterator *srhi = iter;
+ struct ptlrpc_service_part *svcpt;
+ struct ptlrpc_request *req;
+ int rc;
+
+ LASSERT(srhi->srhi_idx < svc->srv_ncpts);
+
+ svcpt = svc->srv_parts[srhi->srhi_idx];
+
+ spin_lock(&svcpt->scp_lock);
+
+ rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
+
+ if (rc == 0) {
+ req = srhi->srhi_req;
+
+ /* Print common req fields.
+ * CAVEAT EMPTOR: we're racing with the service handler
+ * here. The request could contain any old crap, so you
+ * must be just as careful as the service's request
+ * parser. Currently I only print stuff here I know is OK
+ * to look at coz it was set up in request_in_callback()!!! */
+ seq_printf(s, LPD64":%s:%s:x"LPU64":%d:%s:%ld:%lds(%+lds) ",
+ req->rq_history_seq, libcfs_nid2str(req->rq_self),
+ libcfs_id2str(req->rq_peer), req->rq_xid,
+ req->rq_reqlen, ptlrpc_rqphase2str(req),
+ req->rq_arrival_time.tv_sec,
+ req->rq_sent - req->rq_arrival_time.tv_sec,
+ req->rq_sent - req->rq_deadline);
+ if (svc->srv_ops.so_req_printer == NULL)
+ seq_printf(s, "\n");
+ else
+ svc->srv_ops.so_req_printer(s, srhi->srhi_req);
+ }
+
+ spin_unlock(&svcpt->scp_lock);
+ return rc;
+}
+
+static int
+ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file)
+{
+ static struct seq_operations sops = {
+ .start = ptlrpc_lprocfs_svc_req_history_start,
+ .stop = ptlrpc_lprocfs_svc_req_history_stop,
+ .next = ptlrpc_lprocfs_svc_req_history_next,
+ .show = ptlrpc_lprocfs_svc_req_history_show,
+ };
+ struct seq_file *seqf;
+ int rc;
+
+ rc = seq_open(file, &sops);
+ if (rc)
+ return rc;
+
+ seqf = file->private_data;
+ seqf->private = PDE_DATA(inode);
+ return 0;
+}
+
+/* See also lprocfs_rd_timeouts */
+static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n)
+{
+ struct ptlrpc_service *svc = m->private;
+ struct ptlrpc_service_part *svcpt;
+ struct dhms ts;
+ time_t worstt;
+ unsigned int cur;
+ unsigned int worst;
+ int i;
+
+ if (AT_OFF) {
+ seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n",
+ obd_timeout);
+ return 0;
+ }
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ cur = at_get(&svcpt->scp_at_estimate);
+ worst = svcpt->scp_at_estimate.at_worst_ever;
+ worstt = svcpt->scp_at_estimate.at_worst_time;
+ s2dhms(&ts, cfs_time_current_sec() - worstt);
+
+ seq_printf(m, "%10s : cur %3u worst %3u (at %ld, "
+ DHMS_FMT" ago) ", "service",
+ cur, worst, worstt, DHMS_VARS(&ts));
+
+ lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate);
+ }
+
+ return 0;
+}
+LPROC_SEQ_FOPS_RO(ptlrpc_lprocfs_timeouts);
+
+static int ptlrpc_lprocfs_hp_ratio_seq_show(struct seq_file *m, void *v)
+{
+ struct ptlrpc_service *svc = m->private;
+ return seq_printf(m, "%d", svc->srv_hpreq_ratio);
+}
+
+static ssize_t ptlrpc_lprocfs_hp_ratio_seq_write(struct file *file,
+ const char *buffer,
+ size_t count,
+ loff_t *off)
+{
+ struct ptlrpc_service *svc = ((struct seq_file *)file->private_data)->private;
+ int rc;
+ int val;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc < 0)
+ return rc;
+
+ if (val < 0)
+ return -ERANGE;
+
+ spin_lock(&svc->srv_lock);
+ svc->srv_hpreq_ratio = val;
+ spin_unlock(&svc->srv_lock);
+
+ return count;
+}
+LPROC_SEQ_FOPS(ptlrpc_lprocfs_hp_ratio);
+
+void ptlrpc_lprocfs_register_service(struct proc_dir_entry *entry,
+ struct ptlrpc_service *svc)
+{
+ struct lprocfs_vars lproc_vars[] = {
+ {.name = "high_priority_ratio",
+ .fops = &ptlrpc_lprocfs_hp_ratio_fops,
+ .data = svc},
+ {.name = "req_buffer_history_len",
+ .fops = &ptlrpc_lprocfs_req_history_len_fops,
+ .data = svc},
+ {.name = "req_buffer_history_max",
+ .fops = &ptlrpc_lprocfs_req_history_max_fops,
+ .data = svc},
+ {.name = "threads_min",
+ .fops = &ptlrpc_lprocfs_threads_min_fops,
+ .data = svc},
+ {.name = "threads_max",
+ .fops = &ptlrpc_lprocfs_threads_max_fops,
+ .data = svc},
+ {.name = "threads_started",
+ .fops = &ptlrpc_lprocfs_threads_started_fops,
+ .data = svc},
+ {.name = "timeouts",
+ .fops = &ptlrpc_lprocfs_timeouts_fops,
+ .data = svc},
+ {.name = "nrs_policies",
+ .fops = &ptlrpc_lprocfs_nrs_fops,
+ .data = svc},
+ {NULL}
+ };
+ static struct file_operations req_history_fops = {
+ .owner = THIS_MODULE,
+ .open = ptlrpc_lprocfs_svc_req_history_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = lprocfs_seq_release,
+ };
+
+ int rc;
+
+ ptlrpc_lprocfs_register(entry, svc->srv_name,
+ "stats", &svc->srv_procroot,
+ &svc->srv_stats);
+
+ if (svc->srv_procroot == NULL)
+ return;
+
+ lprocfs_add_vars(svc->srv_procroot, lproc_vars, NULL);
+
+ rc = lprocfs_seq_create(svc->srv_procroot, "req_history",
+ 0400, &req_history_fops, svc);
+ if (rc)
+ CWARN("Error adding the req_history file\n");
+}
+
+void ptlrpc_lprocfs_register_obd(struct obd_device *obddev)
+{
+ ptlrpc_lprocfs_register(obddev->obd_proc_entry, NULL, "stats",
+ &obddev->obd_svc_procroot,
+ &obddev->obd_svc_stats);
+}
+EXPORT_SYMBOL(ptlrpc_lprocfs_register_obd);
+
+void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount)
+{
+ struct lprocfs_stats *svc_stats;
+ __u32 op = lustre_msg_get_opc(req->rq_reqmsg);
+ int opc = opcode_offset(op);
+
+ svc_stats = req->rq_import->imp_obd->obd_svc_stats;
+ if (svc_stats == NULL || opc <= 0)
+ return;
+ LASSERT(opc < LUSTRE_MAX_OPCODES);
+ if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
+ lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, amount);
+}
+
+void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
+{
+ struct lprocfs_stats *svc_stats;
+ int idx;
+
+ if (!req->rq_import)
+ return;
+ svc_stats = req->rq_import->imp_obd->obd_svc_stats;
+ if (!svc_stats)
+ return;
+ idx = lustre_msg_get_opc(req->rq_reqmsg);
+ switch (idx) {
+ case OST_READ:
+ idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR;
+ break;
+ case OST_WRITE:
+ idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR;
+ break;
+ default:
+ LASSERTF(0, "unsupported opcode %u\n", idx);
+ break;
+ }
+
+ lprocfs_counter_add(svc_stats, idx, bytes);
+}
+
+EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
+
+void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
+{
+ if (svc->srv_procroot != NULL)
+ lprocfs_remove(&svc->srv_procroot);
+
+ if (svc->srv_stats)
+ lprocfs_free_stats(&svc->srv_stats);
+}
+
+void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd)
+{
+ if (obd->obd_svc_procroot)
+ lprocfs_remove(&obd->obd_svc_procroot);
+
+ if (obd->obd_svc_stats)
+ lprocfs_free_stats(&obd->obd_svc_stats);
+}
+EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd);
+
+
+#define BUFLEN (UUID_MAX + 5)
+
+int lprocfs_wr_evict_client(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ char *kbuf;
+ char *tmpbuf;
+
+ OBD_ALLOC(kbuf, BUFLEN);
+ if (kbuf == NULL)
+ return -ENOMEM;
+
+ /*
+ * OBD_ALLOC() will zero kbuf, but we only copy BUFLEN - 1
+ * bytes into kbuf, to ensure that the string is NUL-terminated.
+ * UUID_MAX should include a trailing NUL already.
+ */
+ if (copy_from_user(kbuf, buffer,
+ min_t(unsigned long, BUFLEN - 1, count))) {
+ count = -EFAULT;
+ goto out;
+ }
+ tmpbuf = cfs_firststr(kbuf, min_t(unsigned long, BUFLEN - 1, count));
+ /* Kludge code(deadlock situation): the lprocfs lock has been held
+ * since the client is evicted by writting client's
+ * uuid/nid to procfs "evict_client" entry. However,
+ * obd_export_evict_by_uuid() will call lprocfs_remove() to destroy
+ * the proc entries under the being destroyed export{}, so I have
+ * to drop the lock at first here.
+ * - jay, jxiong@clusterfs.com */
+ class_incref(obd, __FUNCTION__, current);
+
+ if (strncmp(tmpbuf, "nid:", 4) == 0)
+ obd_export_evict_by_nid(obd, tmpbuf + 4);
+ else if (strncmp(tmpbuf, "uuid:", 5) == 0)
+ obd_export_evict_by_uuid(obd, tmpbuf + 5);
+ else
+ obd_export_evict_by_uuid(obd, tmpbuf);
+
+ class_decref(obd, __FUNCTION__, current);
+
+out:
+ OBD_FREE(kbuf, BUFLEN);
+ return count;
+}
+EXPORT_SYMBOL(lprocfs_wr_evict_client);
+
+#undef BUFLEN
+
+int lprocfs_wr_ping(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ struct ptlrpc_request *req;
+ int rc;
+
+ LPROCFS_CLIMP_CHECK(obd);
+ req = ptlrpc_prep_ping(obd->u.cli.cl_import);
+ LPROCFS_CLIMP_EXIT(obd);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->rq_send_state = LUSTRE_IMP_FULL;
+
+ rc = ptlrpc_queue_wait(req);
+
+ ptlrpc_req_finished(req);
+ if (rc >= 0)
+ return count;
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_wr_ping);
+
+/* Write the connection UUID to this file to attempt to connect to that node.
+ * The connection UUID is a node's primary NID. For example,
+ * "echo connection=192.168.0.1@tcp0::instance > .../import".
+ */
+int lprocfs_wr_import(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ struct obd_import *imp = obd->u.cli.cl_import;
+ char *kbuf = NULL;
+ char *uuid;
+ char *ptr;
+ int do_reconn = 1;
+ const char prefix[] = "connection=";
+ const int prefix_len = sizeof(prefix) - 1;
+
+ if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
+ return -EINVAL;
+
+ OBD_ALLOC(kbuf, count + 1);
+ if (kbuf == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(kbuf, buffer, count))
+ GOTO(out, count = -EFAULT);
+
+ kbuf[count] = 0;
+
+ /* only support connection=uuid::instance now */
+ if (strncmp(prefix, kbuf, prefix_len) != 0)
+ GOTO(out, count = -EINVAL);
+
+ uuid = kbuf + prefix_len;
+ ptr = strstr(uuid, "::");
+ if (ptr) {
+ __u32 inst;
+ char *endptr;
+
+ *ptr = 0;
+ do_reconn = 0;
+ ptr += strlen("::");
+ inst = simple_strtol(ptr, &endptr, 10);
+ if (*endptr) {
+ CERROR("config: wrong instance # %s\n", ptr);
+ } else if (inst != imp->imp_connect_data.ocd_instance) {
+ CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted "
+ "target(%u/%u), reconnecting...\n",
+ imp->imp_obd->obd_name,
+ imp->imp_connect_data.ocd_instance, inst);
+ do_reconn = 1;
+ } else {
+ CDEBUG(D_INFO, "IR: %s has already been connecting to "
+ "new target(%u)\n",
+ imp->imp_obd->obd_name, inst);
+ }
+ }
+
+ if (do_reconn)
+ ptlrpc_recover_import(imp, uuid, 1);
+
+out:
+ OBD_FREE(kbuf, count + 1);
+ return count;
+}
+EXPORT_SYMBOL(lprocfs_wr_import);
+
+int lprocfs_rd_pinger_recov(struct seq_file *m, void *n)
+{
+ struct obd_device *obd = m->private;
+ struct obd_import *imp = obd->u.cli.cl_import;
+ int rc;
+
+ LPROCFS_CLIMP_CHECK(obd);
+ rc = seq_printf(m, "%d\n", !imp->imp_no_pinger_recover);
+ LPROCFS_CLIMP_EXIT(obd);
+
+ return rc;
+}
+EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
+
+int lprocfs_wr_pinger_recov(struct file *file, const char *buffer,
+ size_t count, loff_t *off)
+{
+ struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
+ struct client_obd *cli = &obd->u.cli;
+ struct obd_import *imp = cli->cl_import;
+ int rc, val;
+
+ rc = lprocfs_write_helper(buffer, count, &val);
+ if (rc < 0)
+ return rc;
+
+ if (val != 0 && val != 1)
+ return -ERANGE;
+
+ LPROCFS_CLIMP_CHECK(obd);
+ spin_lock(&imp->imp_lock);
+ imp->imp_no_pinger_recover = !val;
+ spin_unlock(&imp->imp_lock);
+ LPROCFS_CLIMP_EXIT(obd);
+
+ return count;
+
+}
+EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
+
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
new file mode 100644
index 0000000..a0e0097
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -0,0 +1,724 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+#include <obd_support.h>
+#include <lustre_net.h>
+#include <lustre_lib.h>
+#include <obd.h>
+#include <obd_class.h>
+#include "ptlrpc_internal.h"
+
+/**
+ * Helper function. Sends \a len bytes from \a base at offset \a offset
+ * over \a conn connection to portal \a portal.
+ * Returns 0 on success or error code.
+ */
+static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
+ lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
+ struct ptlrpc_connection *conn, int portal, __u64 xid,
+ unsigned int offset)
+{
+ int rc;
+ lnet_md_t md;
+
+ LASSERT (portal != 0);
+ LASSERT (conn != NULL);
+ CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
+ md.start = base;
+ md.length = len;
+ md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
+ md.options = PTLRPC_MD_OPTIONS;
+ md.user_ptr = cbid;
+ md.eq_handle = ptlrpc_eq_h;
+
+ if (unlikely(ack == LNET_ACK_REQ &&
+ OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_ACK, OBD_FAIL_ONCE))){
+ /* don't ask for the ack to simulate failing client */
+ ack = LNET_NOACK_REQ;
+ }
+
+ rc = LNetMDBind (md, LNET_UNLINK, mdh);
+ if (unlikely(rc != 0)) {
+ CERROR ("LNetMDBind failed: %d\n", rc);
+ LASSERT (rc == -ENOMEM);
+ return -ENOMEM;
+ }
+
+ CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
+ len, portal, xid, offset);
+
+ rc = LNetPut (conn->c_self, *mdh, ack,
+ conn->c_peer, portal, xid, offset, 0);
+ if (unlikely(rc != 0)) {
+ int rc2;
+ /* We're going to get an UNLINK event when I unlink below,
+ * which will complete just like any other failed send, so
+ * I fall through and return success here! */
+ CERROR("LNetPut(%s, %d, "LPD64") failed: %d\n",
+ libcfs_id2str(conn->c_peer), portal, xid, rc);
+ rc2 = LNetMDUnlink(*mdh);
+ LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
+ }
+
+ return 0;
+}
+
+static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ LNetMDUnlink(bd_mds[i]);
+}
+
+
+/**
+ * Register bulk at the sender for later transfer.
+ * Returns 0 on success or error code.
+ */
+int ptlrpc_register_bulk(struct ptlrpc_request *req)
+{
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ lnet_process_id_t peer;
+ int rc = 0;
+ int rc2;
+ int posted_md;
+ int total_md;
+ __u64 xid;
+ lnet_handle_me_t me_h;
+ lnet_md_t md;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
+ return 0;
+
+ /* NB no locking required until desc is on the network */
+ LASSERT(desc->bd_nob > 0);
+ LASSERT(desc->bd_md_count == 0);
+ LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
+ LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
+ LASSERT(desc->bd_req != NULL);
+ LASSERT(desc->bd_type == BULK_PUT_SINK ||
+ desc->bd_type == BULK_GET_SOURCE);
+
+ /* cleanup the state of the bulk for it will be reused */
+ if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
+ desc->bd_nob_transferred = 0;
+ else
+ LASSERT(desc->bd_nob_transferred == 0);
+
+ desc->bd_failure = 0;
+
+ peer = desc->bd_import->imp_connection->c_peer;
+
+ LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
+ LASSERT(desc->bd_cbid.cbid_arg == desc);
+
+ /* An XID is only used for a single request from the client.
+ * For retried bulk transfers, a new XID will be allocated in
+ * in ptlrpc_check_set() if it needs to be resent, so it is not
+ * using the same RDMA match bits after an error.
+ *
+ * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
+ * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */
+ xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
+ LASSERTF(!(desc->bd_registered &&
+ req->rq_send_state != LUSTRE_IMP_REPLAY) ||
+ xid != desc->bd_last_xid,
+ "registered: %d rq_xid: "LPU64" bd_last_xid: "LPU64"\n",
+ desc->bd_registered, xid, desc->bd_last_xid);
+
+ total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
+ desc->bd_registered = 1;
+ desc->bd_last_xid = xid;
+ desc->bd_md_count = total_md;
+ md.user_ptr = &desc->bd_cbid;
+ md.eq_handle = ptlrpc_eq_h;
+ md.threshold = 1; /* PUT or GET */
+
+ for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
+ md.options = PTLRPC_MD_OPTIONS |
+ ((desc->bd_type == BULK_GET_SOURCE) ?
+ LNET_MD_OP_GET : LNET_MD_OP_PUT);
+ ptlrpc_fill_bulk_md(&md, desc, posted_md);
+
+ rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
+ LNET_UNLINK, LNET_INS_AFTER, &me_h);
+ if (rc != 0) {
+ CERROR("%s: LNetMEAttach failed x"LPU64"/%d: rc = %d\n",
+ desc->bd_export->exp_obd->obd_name, xid,
+ posted_md, rc);
+ break;
+ }
+
+ /* About to let the network at it... */
+ rc = LNetMDAttach(me_h, md, LNET_UNLINK,
+ &desc->bd_mds[posted_md]);
+ if (rc != 0) {
+ CERROR("%s: LNetMDAttach failed x"LPU64"/%d: rc = %d\n",
+ desc->bd_export->exp_obd->obd_name, xid,
+ posted_md, rc);
+ rc2 = LNetMEUnlink(me_h);
+ LASSERT(rc2 == 0);
+ break;
+ }
+ }
+
+ if (rc != 0) {
+ LASSERT(rc == -ENOMEM);
+ spin_lock(&desc->bd_lock);
+ desc->bd_md_count -= total_md - posted_md;
+ spin_unlock(&desc->bd_lock);
+ LASSERT(desc->bd_md_count >= 0);
+ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
+ req->rq_status = -ENOMEM;
+ return -ENOMEM;
+ }
+
+ /* Set rq_xid to matchbits of the final bulk so that server can
+ * infer the number of bulks that were prepared */
+ req->rq_xid = --xid;
+ LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
+ "bd_last_xid = x"LPU64", rq_xid = x"LPU64"\n",
+ desc->bd_last_xid, req->rq_xid);
+
+ spin_lock(&desc->bd_lock);
+ /* Holler if peer manages to touch buffers before he knows the xid */
+ if (desc->bd_md_count != total_md)
+ CWARN("%s: Peer %s touched %d buffers while I registered\n",
+ desc->bd_export->exp_obd->obd_name, libcfs_id2str(peer),
+ total_md - desc->bd_md_count);
+ spin_unlock(&desc->bd_lock);
+
+ CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, "
+ "xid x"LPX64"-"LPX64", portal %u\n", desc->bd_md_count,
+ desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
+ desc->bd_iov_count, desc->bd_nob,
+ desc->bd_last_xid, req->rq_xid, desc->bd_portal);
+
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_register_bulk);
+
+/**
+ * Disconnect a bulk desc from the network. Idempotent. Not
+ * thread-safe (i.e. only interlocks with completion callback).
+ * Returns 1 on success or 0 if network unregistration failed for whatever
+ * reason.
+ */
+int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
+{
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ wait_queue_head_t *wq;
+ struct l_wait_info lwi;
+ int rc;
+
+ LASSERT(!in_interrupt()); /* might sleep */
+
+ /* Let's setup deadline for reply unlink. */
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
+ async && req->rq_bulk_deadline == 0)
+ req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
+
+ if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
+ return 1; /* never registered */
+
+ LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
+
+ /* the unlink ensures the callback happens ASAP and is the last
+ * one. If it fails, it must be because completion just happened,
+ * but we must still l_wait_event() in this case to give liblustre
+ * a chance to run client_bulk_callback() */
+ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
+
+ if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
+ return 1; /* never registered */
+
+ /* Move to "Unregistering" phase as bulk was not unlinked yet. */
+ ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
+
+ /* Do not wait for unlink to finish. */
+ if (async)
+ return 0;
+
+ if (req->rq_set != NULL)
+ wq = &req->rq_set->set_waitq;
+ else
+ wq = &req->rq_reply_waitq;
+
+ for (;;) {
+ /* Network access will complete in finite time but the HUGE
+ * timeout lets us CWARN for visibility of sluggish NALs */
+ lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
+ cfs_time_seconds(1), NULL, NULL);
+ rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
+ if (rc == 0) {
+ ptlrpc_rqphase_move(req, req->rq_next_phase);
+ return 1;
+ }
+
+ LASSERT(rc == -ETIMEDOUT);
+ DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
+ desc);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_unregister_bulk);
+
+static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
+{
+ struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ int service_time = max_t(int, cfs_time_current_sec() -
+ req->rq_arrival_time.tv_sec, 1);
+
+ if (!(flags & PTLRPC_REPLY_EARLY) &&
+ (req->rq_type != PTL_RPC_MSG_ERR) &&
+ (req->rq_reqmsg != NULL) &&
+ !(lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_RESENT | MSG_REPLAY |
+ MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
+ /* early replies, errors and recovery requests don't count
+ * toward our service time estimate */
+ int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
+
+ if (oldse != 0) {
+ DEBUG_REQ(D_ADAPTTO, req,
+ "svc %s changed estimate from %d to %d",
+ svc->srv_name, oldse,
+ at_get(&svcpt->scp_at_estimate));
+ }
+ }
+ /* Report actual service time for client latency calc */
+ lustre_msg_set_service_time(req->rq_repmsg, service_time);
+ /* Report service time estimate for future client reqs, but report 0
+ * (to be ignored by client) if it's a error reply during recovery.
+ * (bz15815) */
+ if (req->rq_type == PTL_RPC_MSG_ERR &&
+ (req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
+ lustre_msg_set_timeout(req->rq_repmsg, 0);
+ else
+ lustre_msg_set_timeout(req->rq_repmsg,
+ at_get(&svcpt->scp_at_estimate));
+
+ if (req->rq_reqmsg &&
+ !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
+ CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x "
+ "req_flags=%#x magic=%d:%x/%x len=%d\n",
+ flags, lustre_msg_get_flags(req->rq_reqmsg),
+ lustre_msg_is_v1(req->rq_reqmsg),
+ lustre_msg_get_magic(req->rq_reqmsg),
+ lustre_msg_get_magic(req->rq_repmsg), req->rq_replen);
+ }
+}
+
+/**
+ * Send request reply from request \a req reply buffer.
+ * \a flags defines reply types
+ * Returns 0 on sucess or error code
+ */
+int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
+{
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_connection *conn;
+ int rc;
+
+ /* We must already have a reply buffer (only ptlrpc_error() may be
+ * called without one). The reply generated by sptlrpc layer (e.g.
+ * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must
+ * have a request buffer which is either the actual (swabbed) incoming
+ * request, or a saved copy if this is a req saved in
+ * target_queue_final_reply().
+ */
+ LASSERT (req->rq_no_reply == 0);
+ LASSERT (req->rq_reqbuf != NULL);
+ LASSERT (rs != NULL);
+ LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
+ LASSERT (req->rq_repmsg != NULL);
+ LASSERT (req->rq_repmsg == rs->rs_msg);
+ LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
+ LASSERT (rs->rs_cb_id.cbid_arg == rs);
+
+ /* There may be no rq_export during failover */
+
+ if (unlikely(req->rq_export && req->rq_export->exp_obd &&
+ req->rq_export->exp_obd->obd_fail)) {
+ /* Failed obd's only send ENODEV */
+ req->rq_type = PTL_RPC_MSG_ERR;
+ req->rq_status = -ENODEV;
+ CDEBUG(D_HA, "sending ENODEV from failed obd %d\n",
+ req->rq_export->exp_obd->obd_minor);
+ }
+
+ /* In order to keep interoprability with the client (< 2.3) which
+ * doesn't have pb_jobid in ptlrpc_body, We have to shrink the
+ * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the
+ * reply buffer on client will be overflow.
+ *
+ * XXX Remove this whenver we drop the interoprability with such client.
+ */
+ req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0,
+ sizeof(struct ptlrpc_body_v2), 1);
+
+ if (req->rq_type != PTL_RPC_MSG_ERR)
+ req->rq_type = PTL_RPC_MSG_REPLY;
+
+ lustre_msg_set_type(req->rq_repmsg, req->rq_type);
+ lustre_msg_set_status(req->rq_repmsg,
+ ptlrpc_status_hton(req->rq_status));
+ lustre_msg_set_opc(req->rq_repmsg,
+ req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
+
+ target_pack_pool_reply(req);
+
+ ptlrpc_at_set_reply(req, flags);
+
+ if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
+ conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
+ else
+ conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
+
+ if (unlikely(conn == NULL)) {
+ CERROR("not replying on NULL connection\n"); /* bug 9635 */
+ return -ENOTCONN;
+ }
+ ptlrpc_rs_addref(rs); /* +1 ref for the network */
+
+ rc = sptlrpc_svc_wrap_reply(req);
+ if (unlikely(rc))
+ goto out;
+
+ req->rq_sent = cfs_time_current_sec();
+
+ rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
+ (rs->rs_difficult && !rs->rs_no_ack) ?
+ LNET_ACK_REQ : LNET_NOACK_REQ,
+ &rs->rs_cb_id, conn,
+ ptlrpc_req2svc(req)->srv_rep_portal,
+ req->rq_xid, req->rq_reply_off);
+out:
+ if (unlikely(rc != 0))
+ ptlrpc_req_drop_rs(req);
+ ptlrpc_connection_put(conn);
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_send_reply);
+
+int ptlrpc_reply (struct ptlrpc_request *req)
+{
+ if (req->rq_no_reply)
+ return 0;
+ else
+ return (ptlrpc_send_reply(req, 0));
+}
+EXPORT_SYMBOL(ptlrpc_reply);
+
+/**
+ * For request \a req send an error reply back. Create empty
+ * reply buffers if necessary.
+ */
+int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
+{
+ int rc;
+
+ if (req->rq_no_reply)
+ return 0;
+
+ if (!req->rq_repmsg) {
+ rc = lustre_pack_reply(req, 1, NULL, NULL);
+ if (rc)
+ return rc;
+ }
+
+ if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
+ req->rq_status != -EPERM && req->rq_status != -ENOENT &&
+ req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT)
+ req->rq_type = PTL_RPC_MSG_ERR;
+
+ rc = ptlrpc_send_reply(req, may_be_difficult);
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_send_error);
+
+int ptlrpc_error(struct ptlrpc_request *req)
+{
+ return ptlrpc_send_error(req, 0);
+}
+EXPORT_SYMBOL(ptlrpc_error);
+
+/**
+ * Send request \a request.
+ * if \a noreply is set, don't expect any reply back and don't set up
+ * reply buffers.
+ * Returns 0 on success or error code.
+ */
+int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
+{
+ int rc;
+ int rc2;
+ int mpflag = 0;
+ struct ptlrpc_connection *connection;
+ lnet_handle_me_t reply_me_h;
+ lnet_md_t reply_md;
+ struct obd_device *obd = request->rq_import->imp_obd;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
+ return 0;
+
+ LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
+ LASSERT(request->rq_wait_ctx == 0);
+
+ /* If this is a re-transmit, we're required to have disengaged
+ * cleanly from the previous attempt */
+ LASSERT(!request->rq_receiving_reply);
+
+ if (request->rq_import->imp_obd &&
+ request->rq_import->imp_obd->obd_fail) {
+ CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
+ request->rq_import->imp_obd->obd_name);
+ /* this prevents us from waiting in ptlrpc_queue_wait */
+ request->rq_err = 1;
+ request->rq_status = -ENODEV;
+ return -ENODEV;
+ }
+
+ connection = request->rq_import->imp_connection;
+
+ lustre_msg_set_handle(request->rq_reqmsg,
+ &request->rq_import->imp_remote_handle);
+ lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST);
+ lustre_msg_set_conn_cnt(request->rq_reqmsg,
+ request->rq_import->imp_conn_cnt);
+ lustre_msghdr_set_flags(request->rq_reqmsg,
+ request->rq_import->imp_msghdr_flags);
+
+ if (request->rq_resend)
+ lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
+
+ if (request->rq_memalloc)
+ mpflag = cfs_memory_pressure_get_and_set();
+
+ rc = sptlrpc_cli_wrap_request(request);
+ if (rc)
+ GOTO(out, rc);
+
+ /* bulk register should be done after wrap_request() */
+ if (request->rq_bulk != NULL) {
+ rc = ptlrpc_register_bulk (request);
+ if (rc != 0)
+ GOTO(out, rc);
+ }
+
+ if (!noreply) {
+ LASSERT (request->rq_replen != 0);
+ if (request->rq_repbuf == NULL) {
+ LASSERT(request->rq_repdata == NULL);
+ LASSERT(request->rq_repmsg == NULL);
+ rc = sptlrpc_cli_alloc_repbuf(request,
+ request->rq_replen);
+ if (rc) {
+ /* this prevents us from looping in
+ * ptlrpc_queue_wait */
+ request->rq_err = 1;
+ request->rq_status = rc;
+ GOTO(cleanup_bulk, rc);
+ }
+ } else {
+ request->rq_repdata = NULL;
+ request->rq_repmsg = NULL;
+ }
+
+ rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/
+ connection->c_peer, request->rq_xid, 0,
+ LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
+ if (rc != 0) {
+ CERROR("LNetMEAttach failed: %d\n", rc);
+ LASSERT (rc == -ENOMEM);
+ GOTO(cleanup_bulk, rc = -ENOMEM);
+ }
+ }
+
+ spin_lock(&request->rq_lock);
+ /* If the MD attach succeeds, there _will_ be a reply_in callback */
+ request->rq_receiving_reply = !noreply;
+ /* We are responsible for unlinking the reply buffer */
+ request->rq_must_unlink = !noreply;
+ /* Clear any flags that may be present from previous sends. */
+ request->rq_replied = 0;
+ request->rq_err = 0;
+ request->rq_timedout = 0;
+ request->rq_net_err = 0;
+ request->rq_resend = 0;
+ request->rq_restart = 0;
+ request->rq_reply_truncate = 0;
+ spin_unlock(&request->rq_lock);
+
+ if (!noreply) {
+ reply_md.start = request->rq_repbuf;
+ reply_md.length = request->rq_repbuf_len;
+ /* Allow multiple early replies */
+ reply_md.threshold = LNET_MD_THRESH_INF;
+ /* Manage remote for early replies */
+ reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT |
+ LNET_MD_MANAGE_REMOTE |
+ LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */;
+ reply_md.user_ptr = &request->rq_reply_cbid;
+ reply_md.eq_handle = ptlrpc_eq_h;
+
+ /* We must see the unlink callback to unset rq_must_unlink,
+ so we can't auto-unlink */
+ rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
+ &request->rq_reply_md_h);
+ if (rc != 0) {
+ CERROR("LNetMDAttach failed: %d\n", rc);
+ LASSERT (rc == -ENOMEM);
+ spin_lock(&request->rq_lock);
+ /* ...but the MD attach didn't succeed... */
+ request->rq_receiving_reply = 0;
+ spin_unlock(&request->rq_lock);
+ GOTO(cleanup_me, rc = -ENOMEM);
+ }
+
+ CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64
+ ", portal %u\n",
+ request->rq_repbuf_len, request->rq_xid,
+ request->rq_reply_portal);
+ }
+
+ /* add references on request for request_out_callback */
+ ptlrpc_request_addref(request);
+ if (obd->obd_svc_stats != NULL)
+ lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
+ atomic_read(&request->rq_import->imp_inflight));
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
+
+ do_gettimeofday(&request->rq_arrival_time);
+ request->rq_sent = cfs_time_current_sec();
+ /* We give the server rq_timeout secs to process the req, and
+ add the network latency for our local timeout. */
+ request->rq_deadline = request->rq_sent + request->rq_timeout +
+ ptlrpc_at_get_net_latency(request);
+
+ ptlrpc_pinger_sending_on_import(request->rq_import);
+
+ DEBUG_REQ(D_INFO, request, "send flg=%x",
+ lustre_msg_get_flags(request->rq_reqmsg));
+ rc = ptl_send_buf(&request->rq_req_md_h,
+ request->rq_reqbuf, request->rq_reqdata_len,
+ LNET_NOACK_REQ, &request->rq_req_cbid,
+ connection,
+ request->rq_request_portal,
+ request->rq_xid, 0);
+ if (rc == 0)
+ GOTO(out, rc);
+
+ ptlrpc_req_finished(request);
+ if (noreply)
+ GOTO(out, rc);
+
+ cleanup_me:
+ /* MEUnlink is safe; the PUT didn't even get off the ground, and
+ * nobody apart from the PUT's target has the right nid+XID to
+ * access the reply buffer. */
+ rc2 = LNetMEUnlink(reply_me_h);
+ LASSERT (rc2 == 0);
+ /* UNLINKED callback called synchronously */
+ LASSERT(!request->rq_receiving_reply);
+
+ cleanup_bulk:
+ /* We do sync unlink here as there was no real transfer here so
+ * the chance to have long unlink to sluggish net is smaller here. */
+ ptlrpc_unregister_bulk(request, 0);
+ out:
+ if (request->rq_memalloc)
+ cfs_memory_pressure_restore(mpflag);
+ return rc;
+}
+EXPORT_SYMBOL(ptl_send_rpc);
+
+/**
+ * Register request buffer descriptor for request receiving.
+ */
+int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
+{
+ struct ptlrpc_service *service = rqbd->rqbd_svcpt->scp_service;
+ static lnet_process_id_t match_id = {LNET_NID_ANY, LNET_PID_ANY};
+ int rc;
+ lnet_md_t md;
+ lnet_handle_me_t me_h;
+
+ CDEBUG(D_NET, "LNetMEAttach: portal %d\n",
+ service->srv_req_portal);
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_RQBD))
+ return (-ENOMEM);
+
+ /* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
+ * which means buffer can only be attached on local CPT, and LND
+ * threads can find it by grabbing a local lock */
+ rc = LNetMEAttach(service->srv_req_portal,
+ match_id, 0, ~0, LNET_UNLINK,
+ rqbd->rqbd_svcpt->scp_cpt >= 0 ?
+ LNET_INS_LOCAL : LNET_INS_AFTER, &me_h);
+ if (rc != 0) {
+ CERROR("LNetMEAttach failed: %d\n", rc);
+ return (-ENOMEM);
+ }
+
+ LASSERT(rqbd->rqbd_refcount == 0);
+ rqbd->rqbd_refcount = 1;
+
+ md.start = rqbd->rqbd_buffer;
+ md.length = service->srv_buf_size;
+ md.max_size = service->srv_max_req_size;
+ md.threshold = LNET_MD_THRESH_INF;
+ md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MAX_SIZE;
+ md.user_ptr = &rqbd->rqbd_cbid;
+ md.eq_handle = ptlrpc_eq_h;
+
+ rc = LNetMDAttach(me_h, md, LNET_UNLINK, &rqbd->rqbd_md_h);
+ if (rc == 0)
+ return (0);
+
+ CERROR("LNetMDAttach failed: %d; \n", rc);
+ LASSERT (rc == -ENOMEM);
+ rc = LNetMEUnlink (me_h);
+ LASSERT (rc == 0);
+ rqbd->rqbd_refcount = 0;
+
+ return (-ENOMEM);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
new file mode 100644
index 0000000..0abcd6d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -0,0 +1,1759 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details. A copy is
+ * included in the COPYING file that accompanied this code.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2011 Intel Corporation
+ *
+ * Copyright 2012 Xyratex Technology Limited
+ */
+/*
+ * lustre/ptlrpc/nrs.c
+ *
+ * Network Request Scheduler (NRS)
+ *
+ * Allows to reorder the handling of RPCs at servers.
+ *
+ * Author: Liang Zhen <liang@whamcloud.com>
+ * Author: Nikitas Angelinas <nikitas_angelinas@xyratex.com>
+ */
+/**
+ * \addtogoup nrs
+ * @{
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lprocfs_status.h>
+#include <linux/libcfs/libcfs.h>
+#include "ptlrpc_internal.h"
+
+/* XXX: This is just for liblustre. Remove the #if defined directive when the
+ * "cfs_" prefix is dropped from cfs_list_head. */
+extern struct list_head ptlrpc_all_services;
+
+/**
+ * NRS core object.
+ */
+struct nrs_core nrs_core;
+
+static int nrs_policy_init(struct ptlrpc_nrs_policy *policy)
+{
+ return policy->pol_desc->pd_ops->op_policy_init != NULL ?
+ policy->pol_desc->pd_ops->op_policy_init(policy) : 0;
+}
+
+static void nrs_policy_fini(struct ptlrpc_nrs_policy *policy)
+{
+ LASSERT(policy->pol_ref == 0);
+ LASSERT(policy->pol_req_queued == 0);
+
+ if (policy->pol_desc->pd_ops->op_policy_fini != NULL)
+ policy->pol_desc->pd_ops->op_policy_fini(policy);
+}
+
+static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc, void *arg)
+{
+ /**
+ * The policy may be stopped, but the lprocfs files and
+ * ptlrpc_nrs_policy instances remain present until unregistration time.
+ * Do not perform the ctl operation if the policy is stopped, as
+ * policy->pol_private will be NULL in such a case.
+ */
+ if (policy->pol_state == NRS_POL_STATE_STOPPED)
+ return -ENODEV;
+
+ return policy->pol_desc->pd_ops->op_policy_ctl != NULL ?
+ policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) :
+ -ENOSYS;
+}
+
+static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
+{
+ struct ptlrpc_nrs *nrs = policy->pol_nrs;
+
+ if (policy->pol_desc->pd_ops->op_policy_stop != NULL) {
+ spin_unlock(&nrs->nrs_lock);
+
+ policy->pol_desc->pd_ops->op_policy_stop(policy);
+
+ spin_lock(&nrs->nrs_lock);
+ }
+
+ LASSERT(list_empty(&policy->pol_list_queued));
+ LASSERT(policy->pol_req_queued == 0 &&
+ policy->pol_req_started == 0);
+
+ policy->pol_private = NULL;
+
+ policy->pol_state = NRS_POL_STATE_STOPPED;
+
+ if (atomic_dec_and_test(&policy->pol_desc->pd_refs))
+ module_put(policy->pol_desc->pd_owner);
+}
+
+static int nrs_policy_stop_locked(struct ptlrpc_nrs_policy *policy)
+{
+ struct ptlrpc_nrs *nrs = policy->pol_nrs;
+
+ if (nrs->nrs_policy_fallback == policy && !nrs->nrs_stopping)
+ return -EPERM;
+
+ if (policy->pol_state == NRS_POL_STATE_STARTING)
+ return -EAGAIN;
+
+ /* In progress or already stopped */
+ if (policy->pol_state != NRS_POL_STATE_STARTED)
+ return 0;
+
+ policy->pol_state = NRS_POL_STATE_STOPPING;
+
+ /* Immediately make it invisible */
+ if (nrs->nrs_policy_primary == policy) {
+ nrs->nrs_policy_primary = NULL;
+
+ } else {
+ LASSERT(nrs->nrs_policy_fallback == policy);
+ nrs->nrs_policy_fallback = NULL;
+ }
+
+ /* I have the only refcount */
+ if (policy->pol_ref == 1)
+ nrs_policy_stop0(policy);
+
+ return 0;
+}
+
+/**
+ * Transitions the \a nrs NRS head's primary policy to
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING and if the policy has no
+ * pending usage references, to ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED.
+ *
+ * \param[in] nrs the NRS head to carry out this operation on
+ */
+static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs)
+{
+ struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary;
+
+ if (tmp == NULL) {
+ return;
+ }
+
+ nrs->nrs_policy_primary = NULL;
+
+ LASSERT(tmp->pol_state == NRS_POL_STATE_STARTED);
+ tmp->pol_state = NRS_POL_STATE_STOPPING;
+
+ if (tmp->pol_ref == 0)
+ nrs_policy_stop0(tmp);
+}
+
+/**
+ * Transitions a policy across the ptlrpc_nrs_pol_state range of values, in
+ * response to an lprocfs command to start a policy.
+ *
+ * If a primary policy different to the current one is specified, this function
+ * will transition the new policy to the
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTING and then to
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED, and will then transition
+ * the old primary policy (if there is one) to
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING, and if there are no outstanding
+ * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED.
+ *
+ * If the fallback policy is specified, this is taken to indicate an instruction
+ * to stop the current primary policy, without substituting it with another
+ * primary policy, so the primary policy (if any) is transitioned to
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING, and if there are no outstanding
+ * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED. In
+ * this case, the fallback policy is only left active in the NRS head.
+ */
+static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy)
+{
+ struct ptlrpc_nrs *nrs = policy->pol_nrs;
+ int rc = 0;
+
+ /**
+ * Don't allow multiple starting which is too complex, and has no real
+ * benefit.
+ */
+ if (nrs->nrs_policy_starting)
+ return -EAGAIN;
+
+ LASSERT(policy->pol_state != NRS_POL_STATE_STARTING);
+
+ if (policy->pol_state == NRS_POL_STATE_STOPPING)
+ return -EAGAIN;
+
+ if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) {
+ /**
+ * This is for cases in which the user sets the policy to the
+ * fallback policy (currently fifo for all services); i.e. the
+ * user is resetting the policy to the default; so we stop the
+ * primary policy, if any.
+ */
+ if (policy == nrs->nrs_policy_fallback) {
+ nrs_policy_stop_primary(nrs);
+ return 0;
+ }
+
+ /**
+ * If we reach here, we must be setting up the fallback policy
+ * at service startup time, and only a single policy with the
+ * nrs_policy_flags::PTLRPC_NRS_FL_FALLBACK flag set can
+ * register with NRS core.
+ */
+ LASSERT(nrs->nrs_policy_fallback == NULL);
+ } else {
+ /**
+ * Shouldn't start primary policy if w/o fallback policy.
+ */
+ if (nrs->nrs_policy_fallback == NULL)
+ return -EPERM;
+
+ if (policy->pol_state == NRS_POL_STATE_STARTED)
+ return 0;
+ }
+
+ /**
+ * Increase the module usage count for policies registering from other
+ * modules.
+ */
+ if (atomic_inc_return(&policy->pol_desc->pd_refs) == 1 &&
+ !try_module_get(policy->pol_desc->pd_owner)) {
+ atomic_dec(&policy->pol_desc->pd_refs);
+ CERROR("NRS: cannot get module for policy %s; is it alive?\n",
+ policy->pol_desc->pd_name);
+ return -ENODEV;
+ }
+
+ /**
+ * Serialize policy starting across the NRS head
+ */
+ nrs->nrs_policy_starting = 1;
+
+ policy->pol_state = NRS_POL_STATE_STARTING;
+
+ if (policy->pol_desc->pd_ops->op_policy_start) {
+ spin_unlock(&nrs->nrs_lock);
+
+ rc = policy->pol_desc->pd_ops->op_policy_start(policy);
+
+ spin_lock(&nrs->nrs_lock);
+ if (rc != 0) {
+ if (atomic_dec_and_test(&policy->pol_desc->pd_refs))
+ module_put(policy->pol_desc->pd_owner);
+
+ policy->pol_state = NRS_POL_STATE_STOPPED;
+ GOTO(out, rc);
+ }
+ }
+
+ policy->pol_state = NRS_POL_STATE_STARTED;
+
+ if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) {
+ /**
+ * This path is only used at PTLRPC service setup time.
+ */
+ nrs->nrs_policy_fallback = policy;
+ } else {
+ /*
+ * Try to stop the current primary policy if there is one.
+ */
+ nrs_policy_stop_primary(nrs);
+
+ /**
+ * And set the newly-started policy as the primary one.
+ */
+ nrs->nrs_policy_primary = policy;
+ }
+
+out:
+ nrs->nrs_policy_starting = 0;
+
+ return rc;
+}
+
+/**
+ * Increases the policy's usage reference count.
+ */
+static inline void nrs_policy_get_locked(struct ptlrpc_nrs_policy *policy)
+{
+ policy->pol_ref++;
+}
+
+/**
+ * Decreases the policy's usage reference count, and stops the policy in case it
+ * was already stopping and have no more outstanding usage references (which
+ * indicates it has no more queued or started requests, and can be safely
+ * stopped).
+ */
+static void nrs_policy_put_locked(struct ptlrpc_nrs_policy *policy)
+{
+ LASSERT(policy->pol_ref > 0);
+
+ policy->pol_ref--;
+ if (unlikely(policy->pol_ref == 0 &&
+ policy->pol_state == NRS_POL_STATE_STOPPING))
+ nrs_policy_stop0(policy);
+}
+
+static void nrs_policy_put(struct ptlrpc_nrs_policy *policy)
+{
+ spin_lock(&policy->pol_nrs->nrs_lock);
+ nrs_policy_put_locked(policy);
+ spin_unlock(&policy->pol_nrs->nrs_lock);
+}
+
+/**
+ * Find and return a policy by name.
+ */
+static struct ptlrpc_nrs_policy * nrs_policy_find_locked(struct ptlrpc_nrs *nrs,
+ char *name)
+{
+ struct ptlrpc_nrs_policy *tmp;
+
+ list_for_each_entry(tmp, &nrs->nrs_policy_list, pol_list) {
+ if (strncmp(tmp->pol_desc->pd_name, name,
+ NRS_POL_NAME_MAX) == 0) {
+ nrs_policy_get_locked(tmp);
+ return tmp;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Release references for the resource hierarchy moving upwards towards the
+ * policy instance resource.
+ */
+static void nrs_resource_put(struct ptlrpc_nrs_resource *res)
+{
+ struct ptlrpc_nrs_policy *policy = res->res_policy;
+
+ if (policy->pol_desc->pd_ops->op_res_put != NULL) {
+ struct ptlrpc_nrs_resource *parent;
+
+ for (; res != NULL; res = parent) {
+ parent = res->res_parent;
+ policy->pol_desc->pd_ops->op_res_put(policy, res);
+ }
+ }
+}
+
+/**
+ * Obtains references for each resource in the resource hierarchy for request
+ * \a nrq if it is to be handled by \a policy.
+ *
+ * \param[in] policy the policy
+ * \param[in] nrq the request
+ * \param[in] moving_req denotes whether this is a call to the function by
+ * ldlm_lock_reorder_req(), in order to move \a nrq to
+ * the high-priority NRS head; we should not sleep when
+ * set.
+ *
+ * \retval NULL resource hierarchy references not obtained
+ * \retval valid-pointer the bottom level of the resource hierarchy
+ *
+ * \see ptlrpc_nrs_pol_ops::op_res_get()
+ */
+static
+struct ptlrpc_nrs_resource * nrs_resource_get(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ bool moving_req)
+{
+ /**
+ * Set to NULL to traverse the resource hierarchy from the top.
+ */
+ struct ptlrpc_nrs_resource *res = NULL;
+ struct ptlrpc_nrs_resource *tmp = NULL;
+ int rc;
+
+ while (1) {
+ rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res,
+ &tmp, moving_req);
+ if (rc < 0) {
+ if (res != NULL)
+ nrs_resource_put(res);
+ return NULL;
+ }
+
+ LASSERT(tmp != NULL);
+ tmp->res_parent = res;
+ tmp->res_policy = policy;
+ res = tmp;
+ tmp = NULL;
+ /**
+ * Return once we have obtained a reference to the bottom level
+ * of the resource hierarchy.
+ */
+ if (rc > 0)
+ return res;
+ }
+}
+
+/**
+ * Obtains resources for the resource hierarchies and policy references for
+ * the fallback and current primary policy (if any), that will later be used
+ * to handle request \a nrq.
+ *
+ * \param[in] nrs the NRS head instance that will be handling request \a nrq.
+ * \param[in] nrq the request that is being handled.
+ * \param[out] resp the array where references to the resource hierarchy are
+ * stored.
+ * \param[in] moving_req is set when obtaining resources while moving a
+ * request from a policy on the regular NRS head to a
+ * policy on the HP NRS head (via
+ * ldlm_lock_reorder_req()). It signifies that
+ * allocations to get resources should be atomic; for
+ * a full explanation, see comment in
+ * ptlrpc_nrs_pol_ops::op_res_get().
+ */
+static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs,
+ struct ptlrpc_nrs_request *nrq,
+ struct ptlrpc_nrs_resource **resp,
+ bool moving_req)
+{
+ struct ptlrpc_nrs_policy *primary = NULL;
+ struct ptlrpc_nrs_policy *fallback = NULL;
+
+ memset(resp, 0, sizeof(resp[0]) * NRS_RES_MAX);
+
+ /**
+ * Obtain policy references.
+ */
+ spin_lock(&nrs->nrs_lock);
+
+ fallback = nrs->nrs_policy_fallback;
+ nrs_policy_get_locked(fallback);
+
+ primary = nrs->nrs_policy_primary;
+ if (primary != NULL)
+ nrs_policy_get_locked(primary);
+
+ spin_unlock(&nrs->nrs_lock);
+
+ /**
+ * Obtain resource hierarchy references.
+ */
+ resp[NRS_RES_FALLBACK] = nrs_resource_get(fallback, nrq, moving_req);
+ LASSERT(resp[NRS_RES_FALLBACK] != NULL);
+
+ if (primary != NULL) {
+ resp[NRS_RES_PRIMARY] = nrs_resource_get(primary, nrq,
+ moving_req);
+ /**
+ * A primary policy may exist which may not wish to serve a
+ * particular request for different reasons; release the
+ * reference on the policy as it will not be used for this
+ * request.
+ */
+ if (resp[NRS_RES_PRIMARY] == NULL)
+ nrs_policy_put(primary);
+ }
+}
+
+/**
+ * Releases references to resource hierarchies and policies, because they are no
+ * longer required; used when request handling has been completed, or the
+ * request is moving to the high priority NRS head.
+ *
+ * \param resp the resource hierarchy that is being released
+ *
+ * \see ptlrpcnrs_req_hp_move()
+ * \see ptlrpc_nrs_req_finalize()
+ */
+static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp)
+{
+ struct ptlrpc_nrs_policy *pols[NRS_RES_MAX];
+ struct ptlrpc_nrs *nrs = NULL;
+ int i;
+
+ for (i = 0; i < NRS_RES_MAX; i++) {
+ if (resp[i] != NULL) {
+ pols[i] = resp[i]->res_policy;
+ nrs_resource_put(resp[i]);
+ resp[i] = NULL;
+ } else {
+ pols[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < NRS_RES_MAX; i++) {
+ if (pols[i] == NULL)
+ continue;
+
+ if (nrs == NULL) {
+ nrs = pols[i]->pol_nrs;
+ spin_lock(&nrs->nrs_lock);
+ }
+ nrs_policy_put_locked(pols[i]);
+ }
+
+ if (nrs != NULL)
+ spin_unlock(&nrs->nrs_lock);
+}
+
+/**
+ * Obtains an NRS request from \a policy for handling or examination; the
+ * request should be removed in the 'handling' case.
+ *
+ * Calling into this function implies we already know the policy has a request
+ * waiting to be handled.
+ *
+ * \param[in] policy the policy from which a request
+ * \param[in] peek when set, signifies that we just want to examine the
+ * request, and not handle it, so the request is not removed
+ * from the policy.
+ * \param[in] force when set, it will force a policy to return a request if it
+ * has one pending
+ *
+ * \retval the NRS request to be handled
+ */
+static inline
+struct ptlrpc_nrs_request * nrs_request_get(struct ptlrpc_nrs_policy *policy,
+ bool peek, bool force)
+{
+ struct ptlrpc_nrs_request *nrq;
+
+ LASSERT(policy->pol_req_queued > 0);
+
+ nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force);
+
+ LASSERT(ergo(nrq != NULL, nrs_request_policy(nrq) == policy));
+
+ return nrq;
+}
+
+/**
+ * Enqueues request \a nrq for later handling, via one one the policies for
+ * which resources where earlier obtained via nrs_resource_get_safe(). The
+ * function attempts to enqueue the request first on the primary policy
+ * (if any), since this is the preferred choice.
+ *
+ * \param nrq the request being enqueued
+ *
+ * \see nrs_resource_get_safe()
+ */
+static inline void nrs_request_enqueue(struct ptlrpc_nrs_request *nrq)
+{
+ struct ptlrpc_nrs_policy *policy;
+ int rc;
+ int i;
+
+ /**
+ * Try in descending order, because the primary policy (if any) is
+ * the preferred choice.
+ */
+ for (i = NRS_RES_MAX - 1; i >= 0; i--) {
+ if (nrq->nr_res_ptrs[i] == NULL)
+ continue;
+
+ nrq->nr_res_idx = i;
+ policy = nrq->nr_res_ptrs[i]->res_policy;
+
+ rc = policy->pol_desc->pd_ops->op_req_enqueue(policy, nrq);
+ if (rc == 0) {
+ policy->pol_nrs->nrs_req_queued++;
+ policy->pol_req_queued++;
+ return;
+ }
+ }
+ /**
+ * Should never get here, as at least the primary policy's
+ * ptlrpc_nrs_pol_ops::op_req_enqueue() implementation should always
+ * succeed.
+ */
+ LBUG();
+}
+
+/**
+ * Called when a request has been handled
+ *
+ * \param[in] nrs the request that has been handled; can be used for
+ * job/resource control.
+ *
+ * \see ptlrpc_nrs_req_stop_nolock()
+ */
+static inline void nrs_request_stop(struct ptlrpc_nrs_request *nrq)
+{
+ struct ptlrpc_nrs_policy *policy = nrs_request_policy(nrq);
+
+ if (policy->pol_desc->pd_ops->op_req_stop)
+ policy->pol_desc->pd_ops->op_req_stop(policy, nrq);
+
+ LASSERT(policy->pol_nrs->nrs_req_started > 0);
+ LASSERT(policy->pol_req_started > 0);
+
+ policy->pol_nrs->nrs_req_started--;
+ policy->pol_req_started--;
+}
+
+/**
+ * Handler for operations that can be carried out on policies.
+ *
+ * Handles opcodes that are common to all policy types within NRS core, and
+ * passes any unknown opcodes to the policy-specific control function.
+ *
+ * \param[in] nrs the NRS head this policy belongs to.
+ * \param[in] name the human-readable policy name; should be the same as
+ * ptlrpc_nrs_pol_desc::pd_name.
+ * \param[in] opc the opcode of the operation being carried out.
+ * \param[in,out] arg can be used to pass information in and out between when
+ * carrying an operation; usually data that is private to
+ * the policy at some level, or generic policy status
+ * information.
+ *
+ * \retval -ve error condition
+ * \retval 0 operation was carried out successfully
+ */
+static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
+ enum ptlrpc_nrs_ctl opc, void *arg)
+{
+ struct ptlrpc_nrs_policy *policy;
+ int rc = 0;
+
+ spin_lock(&nrs->nrs_lock);
+
+ policy = nrs_policy_find_locked(nrs, name);
+ if (policy == NULL)
+ GOTO(out, rc = -ENOENT);
+
+ switch (opc) {
+ /**
+ * Unknown opcode, pass it down to the policy-specific control
+ * function for handling.
+ */
+ default:
+ rc = nrs_policy_ctl_locked(policy, opc, arg);
+ break;
+
+ /**
+ * Start \e policy
+ */
+ case PTLRPC_NRS_CTL_START:
+ rc = nrs_policy_start_locked(policy);
+ break;
+ }
+out:
+ if (policy != NULL)
+ nrs_policy_put_locked(policy);
+
+ spin_unlock(&nrs->nrs_lock);
+
+ return rc;
+}
+
+/**
+ * Unregisters a policy by name.
+ *
+ * \param[in] nrs the NRS head this policy belongs to.
+ * \param[in] name the human-readable policy name; should be the same as
+ * ptlrpc_nrs_pol_desc::pd_name
+ *
+ * \retval -ve error
+ * \retval 0 success
+ */
+static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
+{
+ struct ptlrpc_nrs_policy *policy = NULL;
+
+ spin_lock(&nrs->nrs_lock);
+
+ policy = nrs_policy_find_locked(nrs, name);
+ if (policy == NULL) {
+ spin_unlock(&nrs->nrs_lock);
+
+ CERROR("Can't find NRS policy %s\n", name);
+ return -ENOENT;
+ }
+
+ if (policy->pol_ref > 1) {
+ CERROR("Policy %s is busy with %d references\n", name,
+ (int)policy->pol_ref);
+ nrs_policy_put_locked(policy);
+
+ spin_unlock(&nrs->nrs_lock);
+ return -EBUSY;
+ }
+
+ LASSERT(policy->pol_req_queued == 0);
+ LASSERT(policy->pol_req_started == 0);
+
+ if (policy->pol_state != NRS_POL_STATE_STOPPED) {
+ nrs_policy_stop_locked(policy);
+ LASSERT(policy->pol_state == NRS_POL_STATE_STOPPED);
+ }
+
+ list_del(&policy->pol_list);
+ nrs->nrs_num_pols--;
+
+ nrs_policy_put_locked(policy);
+
+ spin_unlock(&nrs->nrs_lock);
+
+ nrs_policy_fini(policy);
+
+ LASSERT(policy->pol_private == NULL);
+ OBD_FREE_PTR(policy);
+
+ return 0;
+}
+
+/**
+ * Register a policy from \policy descriptor \a desc with NRS head \a nrs.
+ *
+ * \param[in] nrs the NRS head on which the policy will be registered.
+ * \param[in] desc the policy descriptor from which the information will be
+ * obtained to register the policy.
+ *
+ * \retval -ve error
+ * \retval 0 success
+ */
+static int nrs_policy_register(struct ptlrpc_nrs *nrs,
+ struct ptlrpc_nrs_pol_desc *desc)
+{
+ struct ptlrpc_nrs_policy *policy;
+ struct ptlrpc_nrs_policy *tmp;
+ struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
+ int rc;
+
+ LASSERT(svcpt != NULL);
+ LASSERT(desc->pd_ops != NULL);
+ LASSERT(desc->pd_ops->op_res_get != NULL);
+ LASSERT(desc->pd_ops->op_req_get != NULL);
+ LASSERT(desc->pd_ops->op_req_enqueue != NULL);
+ LASSERT(desc->pd_ops->op_req_dequeue != NULL);
+ LASSERT(desc->pd_compat != NULL);
+
+ OBD_CPT_ALLOC_GFP(policy, svcpt->scp_service->srv_cptable,
+ svcpt->scp_cpt, sizeof(*policy), __GFP_IO);
+ if (policy == NULL)
+ return -ENOMEM;
+
+ policy->pol_nrs = nrs;
+ policy->pol_desc = desc;
+ policy->pol_state = NRS_POL_STATE_STOPPED;
+ policy->pol_flags = desc->pd_flags;
+
+ INIT_LIST_HEAD(&policy->pol_list);
+ INIT_LIST_HEAD(&policy->pol_list_queued);
+
+ rc = nrs_policy_init(policy);
+ if (rc != 0) {
+ OBD_FREE_PTR(policy);
+ return rc;
+ }
+
+ spin_lock(&nrs->nrs_lock);
+
+ tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name);
+ if (tmp != NULL) {
+ CERROR("NRS policy %s has been registered, can't register it "
+ "for %s\n", policy->pol_desc->pd_name,
+ svcpt->scp_service->srv_name);
+ nrs_policy_put_locked(tmp);
+
+ spin_unlock(&nrs->nrs_lock);
+ nrs_policy_fini(policy);
+ OBD_FREE_PTR(policy);
+
+ return -EEXIST;
+ }
+
+ list_add_tail(&policy->pol_list, &nrs->nrs_policy_list);
+ nrs->nrs_num_pols++;
+
+ if (policy->pol_flags & PTLRPC_NRS_FL_REG_START)
+ rc = nrs_policy_start_locked(policy);
+
+ spin_unlock(&nrs->nrs_lock);
+
+ if (rc != 0)
+ (void) nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
+
+ return rc;
+}
+
+/**
+ * Enqueue request \a req using one of the policies its resources are referring
+ * to.
+ *
+ * \param[in] req the request to enqueue.
+ */
+static void ptlrpc_nrs_req_add_nolock(struct ptlrpc_request *req)
+{
+ struct ptlrpc_nrs_policy *policy;
+
+ LASSERT(req->rq_nrq.nr_initialized);
+ LASSERT(!req->rq_nrq.nr_enqueued);
+
+ nrs_request_enqueue(&req->rq_nrq);
+ req->rq_nrq.nr_enqueued = 1;
+
+ policy = nrs_request_policy(&req->rq_nrq);
+ /**
+ * Add the policy to the NRS head's list of policies with enqueued
+ * requests, if it has not been added there.
+ */
+ if (unlikely(list_empty(&policy->pol_list_queued)))
+ list_add_tail(&policy->pol_list_queued,
+ &policy->pol_nrs->nrs_policy_queued);
+}
+
+/**
+ * Enqueue a request on the high priority NRS head.
+ *
+ * \param req the request to enqueue.
+ */
+static void ptlrpc_nrs_hpreq_add_nolock(struct ptlrpc_request *req)
+{
+ int opc = lustre_msg_get_opc(req->rq_reqmsg);
+
+ spin_lock(&req->rq_lock);
+ req->rq_hp = 1;
+ ptlrpc_nrs_req_add_nolock(req);
+ if (opc != OBD_PING)
+ DEBUG_REQ(D_NET, req, "high priority req");
+ spin_unlock(&req->rq_lock);
+}
+
+/**
+ * Returns a boolean predicate indicating whether the policy described by
+ * \a desc is adequate for use with service \a svc.
+ *
+ * \param[in] svc the service
+ * \param[in] desc the policy descriptor
+ *
+ * \retval false the policy is not compatible with the service
+ * \retval true the policy is compatible with the service
+ */
+static inline bool nrs_policy_compatible(const struct ptlrpc_service *svc,
+ const struct ptlrpc_nrs_pol_desc *desc)
+{
+ return desc->pd_compat(svc, desc);
+}
+
+/**
+ * Registers all compatible policies in nrs_core.nrs_policies, for NRS head
+ * \a nrs.
+ *
+ * \param[in] nrs the NRS head
+ *
+ * \retval -ve error
+ * \retval 0 success
+ *
+ * \pre mutex_is_locked(&nrs_core.nrs_mutex)
+ *
+ * \see ptlrpc_service_nrs_setup()
+ */
+static int nrs_register_policies_locked(struct ptlrpc_nrs *nrs)
+{
+ struct ptlrpc_nrs_pol_desc *desc;
+ /* for convenience */
+ struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ int rc = -EINVAL;
+
+ LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
+
+ list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
+ if (nrs_policy_compatible(svc, desc)) {
+ rc = nrs_policy_register(nrs, desc);
+ if (rc != 0) {
+ CERROR("Failed to register NRS policy %s for "
+ "partition %d of service %s: %d\n",
+ desc->pd_name, svcpt->scp_cpt,
+ svc->srv_name, rc);
+ /**
+ * Fail registration if any of the policies'
+ * registration fails.
+ */
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * Initializes NRS head \a nrs of service partition \a svcpt, and registers all
+ * compatible policies in NRS core, with the NRS head.
+ *
+ * \param[in] nrs the NRS head
+ * \param[in] svcpt the PTLRPC service partition to setup
+ *
+ * \retval -ve error
+ * \retval 0 success
+ *
+ * \pre mutex_is_locked(&nrs_core.nrs_mutex)
+ */
+static int nrs_svcpt_setup_locked0(struct ptlrpc_nrs *nrs,
+ struct ptlrpc_service_part *svcpt)
+{
+ int rc;
+ enum ptlrpc_nrs_queue_type queue;
+
+ LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
+
+ if (nrs == &svcpt->scp_nrs_reg)
+ queue = PTLRPC_NRS_QUEUE_REG;
+ else if (nrs == svcpt->scp_nrs_hp)
+ queue = PTLRPC_NRS_QUEUE_HP;
+ else
+ LBUG();
+
+ nrs->nrs_svcpt = svcpt;
+ nrs->nrs_queue_type = queue;
+ spin_lock_init(&nrs->nrs_lock);
+ INIT_LIST_HEAD(&nrs->nrs_policy_list);
+ INIT_LIST_HEAD(&nrs->nrs_policy_queued);
+
+ rc = nrs_register_policies_locked(nrs);
+
+ return rc;
+}
+
+/**
+ * Allocates a regular and optionally a high-priority NRS head (if the service
+ * handles high-priority RPCs), and then registers all available compatible
+ * policies on those NRS heads.
+ *
+ * \param[in,out] svcpt the PTLRPC service partition to setup
+ *
+ * \pre mutex_is_locked(&nrs_core.nrs_mutex)
+ */
+static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_nrs *nrs;
+ int rc;
+
+ LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
+
+ /**
+ * Initialize the regular NRS head.
+ */
+ nrs = nrs_svcpt2nrs(svcpt, false);
+ rc = nrs_svcpt_setup_locked0(nrs, svcpt);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ /**
+ * Optionally allocate a high-priority NRS head.
+ */
+ if (svcpt->scp_service->srv_ops.so_hpreq_handler == NULL)
+ GOTO(out, rc);
+
+ OBD_CPT_ALLOC_PTR(svcpt->scp_nrs_hp,
+ svcpt->scp_service->srv_cptable,
+ svcpt->scp_cpt);
+ if (svcpt->scp_nrs_hp == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ nrs = nrs_svcpt2nrs(svcpt, true);
+ rc = nrs_svcpt_setup_locked0(nrs, svcpt);
+
+out:
+ return rc;
+}
+
+/**
+ * Unregisters all policies on all available NRS heads in a service partition;
+ * called at PTLRPC service unregistration time.
+ *
+ * \param[in] svcpt the PTLRPC service partition
+ *
+ * \pre mutex_is_locked(&nrs_core.nrs_mutex)
+ */
+static void nrs_svcpt_cleanup_locked(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_nrs *nrs;
+ struct ptlrpc_nrs_policy *policy;
+ struct ptlrpc_nrs_policy *tmp;
+ int rc;
+ bool hp = false;
+
+ LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
+
+again:
+ nrs = nrs_svcpt2nrs(svcpt, hp);
+ nrs->nrs_stopping = 1;
+
+ list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list,
+ pol_list) {
+ rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
+ LASSERT(rc == 0);
+ }
+
+ /**
+ * If the service partition has an HP NRS head, clean that up as well.
+ */
+ if (!hp && nrs_svcpt_has_hp(svcpt)) {
+ hp = true;
+ goto again;
+ }
+
+ if (hp)
+ OBD_FREE_PTR(nrs);
+}
+
+/**
+ * Returns the descriptor for a policy as identified by by \a name.
+ *
+ * \param[in] name the policy name
+ *
+ * \retval the policy descriptor
+ * \retval NULL
+ */
+static struct ptlrpc_nrs_pol_desc *nrs_policy_find_desc_locked(const char *name)
+{
+ struct ptlrpc_nrs_pol_desc *tmp;
+
+ list_for_each_entry(tmp, &nrs_core.nrs_policies, pd_list) {
+ if (strncmp(tmp->pd_name, name, NRS_POL_NAME_MAX) == 0)
+ return tmp;
+ }
+ return NULL;
+}
+
+/**
+ * Removes the policy from all supported NRS heads of all partitions of all
+ * PTLRPC services.
+ *
+ * \param[in] desc the policy descriptor to unregister
+ *
+ * \retval -ve error
+ * \retval 0 successfully unregistered policy on all supported NRS heads
+ *
+ * \pre mutex_is_locked(&nrs_core.nrs_mutex)
+ * \pre mutex_is_locked(&ptlrpc_all_services_mutex)
+ */
+static int nrs_policy_unregister_locked(struct ptlrpc_nrs_pol_desc *desc)
+{
+ struct ptlrpc_nrs *nrs;
+ struct ptlrpc_service *svc;
+ struct ptlrpc_service_part *svcpt;
+ int i;
+ int rc = 0;
+
+ LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
+ LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
+
+ list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
+
+ if (!nrs_policy_compatible(svc, desc) ||
+ unlikely(svc->srv_is_stopping))
+ continue;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ bool hp = false;
+
+again:
+ nrs = nrs_svcpt2nrs(svcpt, hp);
+ rc = nrs_policy_unregister(nrs, desc->pd_name);
+ /**
+ * Ignore -ENOENT as the policy may not have registered
+ * successfully on all service partitions.
+ */
+ if (rc == -ENOENT) {
+ rc = 0;
+ } else if (rc != 0) {
+ CERROR("Failed to unregister NRS policy %s for "
+ "partition %d of service %s: %d\n",
+ desc->pd_name, svcpt->scp_cpt,
+ svcpt->scp_service->srv_name, rc);
+ return rc;
+ }
+
+ if (!hp && nrs_svc_has_hp(svc)) {
+ hp = true;
+ goto again;
+ }
+ }
+
+ if (desc->pd_ops->op_lprocfs_fini != NULL)
+ desc->pd_ops->op_lprocfs_fini(svc);
+ }
+
+ return rc;
+}
+
+/**
+ * Registers a new policy with NRS core.
+ *
+ * The function will only succeed if policy registration with all compatible
+ * service partitions (if any) is successful.
+ *
+ * N.B. This function should be called either at ptlrpc module initialization
+ * time when registering a policy that ships with NRS core, or in a
+ * module's init() function for policies registering from other modules.
+ *
+ * \param[in] conf configuration information for the new policy to register
+ *
+ * \retval -ve error
+ * \retval 0 success
+ */
+int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
+{
+ struct ptlrpc_service *svc;
+ struct ptlrpc_nrs_pol_desc *desc;
+ int rc = 0;
+
+ LASSERT(conf != NULL);
+ LASSERT(conf->nc_ops != NULL);
+ LASSERT(conf->nc_compat != NULL);
+ LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one,
+ conf->nc_compat_svc_name != NULL));
+ LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0,
+ conf->nc_owner != NULL));
+
+ conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0';
+
+ /**
+ * External policies are not allowed to start immediately upon
+ * registration, as there is a relatively higher chance that their
+ * registration might fail. In such a case, some policy instances may
+ * already have requests queued wen unregistration needs to happen as
+ * part o cleanup; since there is currently no way to drain requests
+ * from a policy unless the service is unregistering, we just disallow
+ * this.
+ */
+ if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) &&
+ (conf->nc_flags & (PTLRPC_NRS_FL_FALLBACK |
+ PTLRPC_NRS_FL_REG_START))) {
+ CERROR("NRS: failing to register policy %s. Please check "
+ "policy flags; external policies cannot act as fallback "
+ "policies, or be started immediately upon registration "
+ "without interaction with lprocfs\n", conf->nc_name);
+ return -EINVAL;
+ }
+
+ mutex_lock(&nrs_core.nrs_mutex);
+
+ if (nrs_policy_find_desc_locked(conf->nc_name) != NULL) {
+ CERROR("NRS: failing to register policy %s which has already "
+ "been registered with NRS core!\n",
+ conf->nc_name);
+ GOTO(fail, rc = -EEXIST);
+ }
+
+ OBD_ALLOC_PTR(desc);
+ if (desc == NULL)
+ GOTO(fail, rc = -ENOMEM);
+
+ strncpy(desc->pd_name, conf->nc_name, NRS_POL_NAME_MAX);
+ desc->pd_ops = conf->nc_ops;
+ desc->pd_compat = conf->nc_compat;
+ desc->pd_compat_svc_name = conf->nc_compat_svc_name;
+ if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0)
+ desc->pd_owner = conf->nc_owner;
+ desc->pd_flags = conf->nc_flags;
+ atomic_set(&desc->pd_refs, 0);
+
+ /**
+ * For policies that are held in the same module as NRS (currently
+ * ptlrpc), do not register the policy with all compatible services,
+ * as the services will not have started at this point, since we are
+ * calling from ptlrpc module initialization code. In such cases each
+ * service will register all compatible policies later, via
+ * ptlrpc_service_nrs_setup().
+ */
+ if ((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) == 0)
+ goto internal;
+
+ /**
+ * Register the new policy on all compatible services
+ */
+ mutex_lock(&ptlrpc_all_services_mutex);
+
+ list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
+ struct ptlrpc_service_part *svcpt;
+ int i;
+ int rc2;
+
+ if (!nrs_policy_compatible(svc, desc) ||
+ unlikely(svc->srv_is_stopping))
+ continue;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ struct ptlrpc_nrs *nrs;
+ bool hp = false;
+again:
+ nrs = nrs_svcpt2nrs(svcpt, hp);
+ rc = nrs_policy_register(nrs, desc);
+ if (rc != 0) {
+ CERROR("Failed to register NRS policy %s for "
+ "partition %d of service %s: %d\n",
+ desc->pd_name, svcpt->scp_cpt,
+ svcpt->scp_service->srv_name, rc);
+
+ rc2 = nrs_policy_unregister_locked(desc);
+ /**
+ * Should not fail at this point
+ */
+ LASSERT(rc2 == 0);
+ mutex_unlock(&ptlrpc_all_services_mutex);
+ OBD_FREE_PTR(desc);
+ GOTO(fail, rc);
+ }
+
+ if (!hp && nrs_svc_has_hp(svc)) {
+ hp = true;
+ goto again;
+ }
+ }
+
+ /**
+ * No need to take a reference to other modules here, as we
+ * will be calling from the module's init() function.
+ */
+ if (desc->pd_ops->op_lprocfs_init != NULL) {
+ rc = desc->pd_ops->op_lprocfs_init(svc);
+ if (rc != 0) {
+ rc2 = nrs_policy_unregister_locked(desc);
+ /**
+ * Should not fail at this point
+ */
+ LASSERT(rc2 == 0);
+ mutex_unlock(&ptlrpc_all_services_mutex);
+ OBD_FREE_PTR(desc);
+ GOTO(fail, rc);
+ }
+ }
+ }
+
+ mutex_unlock(&ptlrpc_all_services_mutex);
+internal:
+ list_add_tail(&desc->pd_list, &nrs_core.nrs_policies);
+fail:
+ mutex_unlock(&nrs_core.nrs_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_nrs_policy_register);
+
+/**
+ * Unregisters a previously registered policy with NRS core. All instances of
+ * the policy on all NRS heads of all supported services are removed.
+ *
+ * N.B. This function should only be called from a module's exit() function.
+ * Although it can be used for policies that ship alongside NRS core, the
+ * function is primarily intended for policies that register externally,
+ * from other modules.
+ *
+ * \param[in] conf configuration information for the policy to unregister
+ *
+ * \retval -ve error
+ * \retval 0 success
+ */
+int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf)
+{
+ struct ptlrpc_nrs_pol_desc *desc;
+ int rc;
+
+ LASSERT(conf != NULL);
+
+ if (conf->nc_flags & PTLRPC_NRS_FL_FALLBACK) {
+ CERROR("Unable to unregister a fallback policy, unless the "
+ "PTLRPC service is stopping.\n");
+ return -EPERM;
+ }
+
+ conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0';
+
+ mutex_lock(&nrs_core.nrs_mutex);
+
+ desc = nrs_policy_find_desc_locked(conf->nc_name);
+ if (desc == NULL) {
+ CERROR("Failing to unregister NRS policy %s which has "
+ "not been registered with NRS core!\n",
+ conf->nc_name);
+ GOTO(not_exist, rc = -ENOENT);
+ }
+
+ mutex_lock(&ptlrpc_all_services_mutex);
+
+ rc = nrs_policy_unregister_locked(desc);
+ if (rc < 0) {
+ if (rc == -EBUSY)
+ CERROR("Please first stop policy %s on all service "
+ "partitions and then retry to unregister the "
+ "policy.\n", conf->nc_name);
+ GOTO(fail, rc);
+ }
+
+ CDEBUG(D_INFO, "Unregistering policy %s from NRS core.\n",
+ conf->nc_name);
+
+ list_del(&desc->pd_list);
+ OBD_FREE_PTR(desc);
+
+fail:
+ mutex_unlock(&ptlrpc_all_services_mutex);
+
+not_exist:
+ mutex_unlock(&nrs_core.nrs_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_nrs_policy_unregister);
+
+/**
+ * Setup NRS heads on all service partitions of service \a svc, and register
+ * all compatible policies on those NRS heads.
+ *
+ * To be called from withing ptl
+ * \param[in] svc the service to setup
+ *
+ * \retval -ve error, the calling logic should eventually call
+ * ptlrpc_service_nrs_cleanup() to undo any work performed
+ * by this function.
+ *
+ * \see ptlrpc_register_service()
+ * \see ptlrpc_service_nrs_cleanup()
+ */
+int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc)
+{
+ struct ptlrpc_service_part *svcpt;
+ const struct ptlrpc_nrs_pol_desc *desc;
+ int i;
+ int rc = 0;
+
+ mutex_lock(&nrs_core.nrs_mutex);
+
+ /**
+ * Initialize NRS heads on all service CPTs.
+ */
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ rc = nrs_svcpt_setup_locked(svcpt);
+ if (rc != 0)
+ GOTO(failed, rc);
+ }
+
+ /**
+ * Set up lprocfs interfaces for all supported policies for the
+ * service.
+ */
+ list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
+ if (!nrs_policy_compatible(svc, desc))
+ continue;
+
+ if (desc->pd_ops->op_lprocfs_init != NULL) {
+ rc = desc->pd_ops->op_lprocfs_init(svc);
+ if (rc != 0)
+ GOTO(failed, rc);
+ }
+ }
+
+failed:
+
+ mutex_unlock(&nrs_core.nrs_mutex);
+
+ return rc;
+}
+
+/**
+ * Unregisters all policies on all service partitions of service \a svc.
+ *
+ * \param[in] svc the PTLRPC service to unregister
+ */
+void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc)
+{
+ struct ptlrpc_service_part *svcpt;
+ const struct ptlrpc_nrs_pol_desc *desc;
+ int i;
+
+ mutex_lock(&nrs_core.nrs_mutex);
+
+ /**
+ * Clean up NRS heads on all service partitions
+ */
+ ptlrpc_service_for_each_part(svcpt, i, svc)
+ nrs_svcpt_cleanup_locked(svcpt);
+
+ /**
+ * Clean up lprocfs interfaces for all supported policies for the
+ * service.
+ */
+ list_for_each_entry(desc, &nrs_core.nrs_policies, pd_list) {
+ if (!nrs_policy_compatible(svc, desc))
+ continue;
+
+ if (desc->pd_ops->op_lprocfs_fini != NULL)
+ desc->pd_ops->op_lprocfs_fini(svc);
+ }
+
+ mutex_unlock(&nrs_core.nrs_mutex);
+}
+
+/**
+ * Obtains NRS head resources for request \a req.
+ *
+ * These could be either on the regular or HP NRS head of \a svcpt; resources
+ * taken on the regular head can later be swapped for HP head resources by
+ * ldlm_lock_reorder_req().
+ *
+ * \param[in] svcpt the service partition
+ * \param[in] req the request
+ * \param[in] hp which NRS head of \a svcpt to use
+ */
+void ptlrpc_nrs_req_initialize(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req, bool hp)
+{
+ struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp);
+
+ memset(&req->rq_nrq, 0, sizeof(req->rq_nrq));
+ nrs_resource_get_safe(nrs, &req->rq_nrq, req->rq_nrq.nr_res_ptrs,
+ false);
+
+ /**
+ * It is fine to access \e nr_initialized without locking as there is
+ * no contention at this early stage.
+ */
+ req->rq_nrq.nr_initialized = 1;
+}
+
+/**
+ * Releases resources for a request; is called after the request has been
+ * handled.
+ *
+ * \param[in] req the request
+ *
+ * \see ptlrpc_server_finish_request()
+ */
+void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req)
+{
+ if (req->rq_nrq.nr_initialized) {
+ nrs_resource_put_safe(req->rq_nrq.nr_res_ptrs);
+ /* no protection on bit nr_initialized because no
+ * contention at this late stage */
+ req->rq_nrq.nr_finalized = 1;
+ }
+}
+
+void ptlrpc_nrs_req_stop_nolock(struct ptlrpc_request *req)
+{
+ if (req->rq_nrq.nr_started)
+ nrs_request_stop(&req->rq_nrq);
+}
+
+/**
+ * Enqueues request \a req on either the regular or high-priority NRS head
+ * of service partition \a svcpt.
+ *
+ * \param[in] svcpt the service partition
+ * \param[in] req the request to be enqueued
+ * \param[in] hp whether to enqueue the request on the regular or
+ * high-priority NRS head.
+ */
+void ptlrpc_nrs_req_add(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req, bool hp)
+{
+ spin_lock(&svcpt->scp_req_lock);
+
+ if (hp)
+ ptlrpc_nrs_hpreq_add_nolock(req);
+ else
+ ptlrpc_nrs_req_add_nolock(req);
+
+ spin_unlock(&svcpt->scp_req_lock);
+}
+
+static void nrs_request_removed(struct ptlrpc_nrs_policy *policy)
+{
+ LASSERT(policy->pol_nrs->nrs_req_queued > 0);
+ LASSERT(policy->pol_req_queued > 0);
+
+ policy->pol_nrs->nrs_req_queued--;
+ policy->pol_req_queued--;
+
+ /**
+ * If the policy has no more requests queued, remove it from
+ * ptlrpc_nrs::nrs_policy_queued.
+ */
+ if (unlikely(policy->pol_req_queued == 0)) {
+ list_del_init(&policy->pol_list_queued);
+
+ /**
+ * If there are other policies with queued requests, move the
+ * current policy to the end so that we can round robin over
+ * all policies and drain the requests.
+ */
+ } else if (policy->pol_req_queued != policy->pol_nrs->nrs_req_queued) {
+ LASSERT(policy->pol_req_queued <
+ policy->pol_nrs->nrs_req_queued);
+
+ list_move_tail(&policy->pol_list_queued,
+ &policy->pol_nrs->nrs_policy_queued);
+ }
+}
+
+/**
+ * Obtains a request for handling from an NRS head of service partition
+ * \a svcpt.
+ *
+ * \param[in] svcpt the service partition
+ * \param[in] hp whether to obtain a request from the regular or
+ * high-priority NRS head.
+ * \param[in] peek when set, signifies that we just want to examine the
+ * request, and not handle it, so the request is not removed
+ * from the policy.
+ * \param[in] force when set, it will force a policy to return a request if it
+ * has one pending
+ *
+ * \retval the request to be handled
+ * \retval NULL the head has no requests to serve
+ */
+struct ptlrpc_request *
+ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp,
+ bool peek, bool force)
+{
+ struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp);
+ struct ptlrpc_nrs_policy *policy;
+ struct ptlrpc_nrs_request *nrq;
+
+ /**
+ * Always try to drain requests from all NRS polices even if they are
+ * inactive, because the user can change policy status at runtime.
+ */
+ list_for_each_entry(policy, &nrs->nrs_policy_queued,
+ pol_list_queued) {
+ nrq = nrs_request_get(policy, peek, force);
+ if (nrq != NULL) {
+ if (likely(!peek)) {
+ nrq->nr_started = 1;
+
+ policy->pol_req_started++;
+ policy->pol_nrs->nrs_req_started++;
+
+ nrs_request_removed(policy);
+ }
+
+ return container_of(nrq, struct ptlrpc_request, rq_nrq);
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Dequeues request \a req from the policy it has been enqueued on.
+ *
+ * \param[in] req the request
+ */
+void ptlrpc_nrs_req_del_nolock(struct ptlrpc_request *req)
+{
+ struct ptlrpc_nrs_policy *policy = nrs_request_policy(&req->rq_nrq);
+
+ policy->pol_desc->pd_ops->op_req_dequeue(policy, &req->rq_nrq);
+
+ req->rq_nrq.nr_enqueued = 0;
+
+ nrs_request_removed(policy);
+}
+
+/**
+ * Returns whether there are any requests currently enqueued on any of the
+ * policies of service partition's \a svcpt NRS head specified by \a hp. Should
+ * be called while holding ptlrpc_service_part::scp_req_lock to get a reliable
+ * result.
+ *
+ * \param[in] svcpt the service partition to enquire.
+ * \param[in] hp whether the regular or high-priority NRS head is to be
+ * enquired.
+ *
+ * \retval false the indicated NRS head has no enqueued requests.
+ * \retval true the indicated NRS head has some enqueued requests.
+ */
+bool ptlrpc_nrs_req_pending_nolock(struct ptlrpc_service_part *svcpt, bool hp)
+{
+ struct ptlrpc_nrs *nrs = nrs_svcpt2nrs(svcpt, hp);
+
+ return nrs->nrs_req_queued > 0;
+};
+
+/**
+ * Moves request \a req from the regular to the high-priority NRS head.
+ *
+ * \param[in] req the request to move
+ */
+void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req)
+{
+ struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+ struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
+ struct ptlrpc_nrs_resource *res1[NRS_RES_MAX];
+ struct ptlrpc_nrs_resource *res2[NRS_RES_MAX];
+
+ /**
+ * Obtain the high-priority NRS head resources.
+ */
+ nrs_resource_get_safe(nrs_svcpt2nrs(svcpt, true), nrq, res1, true);
+
+ spin_lock(&svcpt->scp_req_lock);
+
+ if (!ptlrpc_nrs_req_can_move(req))
+ goto out;
+
+ ptlrpc_nrs_req_del_nolock(req);
+
+ memcpy(res2, nrq->nr_res_ptrs, NRS_RES_MAX * sizeof(res2[0]));
+ memcpy(nrq->nr_res_ptrs, res1, NRS_RES_MAX * sizeof(res1[0]));
+
+ ptlrpc_nrs_hpreq_add_nolock(req);
+
+ memcpy(res1, res2, NRS_RES_MAX * sizeof(res1[0]));
+out:
+ spin_unlock(&svcpt->scp_req_lock);
+
+ /**
+ * Release either the regular NRS head resources if we moved the
+ * request, or the high-priority NRS head resources if we took a
+ * reference earlier in this function and ptlrpc_nrs_req_can_move()
+ * returned false.
+ */
+ nrs_resource_put_safe(res1);
+}
+
+/**
+ * Carries out a control operation \a opc on the policy identified by the
+ * human-readable \a name, on either all partitions, or only on the first
+ * partition of service \a svc.
+ *
+ * \param[in] svc the service the policy belongs to.
+ * \param[in] queue whether to carry out the command on the policy which
+ * belongs to the regular, high-priority, or both NRS
+ * heads of service partitions of \a svc.
+ * \param[in] name the policy to act upon, by human-readable name
+ * \param[in] opc the opcode of the operation to carry out
+ * \param[in] single when set, the operation will only be carried out on the
+ * NRS heads of the first service partition of \a svc.
+ * This is useful for some policies which e.g. share
+ * identical values on the same parameters of different
+ * service partitions; when reading these parameters via
+ * lprocfs, these policies may just want to obtain and
+ * print out the values from the first service partition.
+ * Storing these values centrally elsewhere then could be
+ * another solution for this.
+ * \param[in,out] arg can be used as a generic in/out buffer between control
+ * operations and the user environment.
+ *
+ *\retval -ve error condition
+ *\retval 0 operation was carried out successfully
+ */
+int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc,
+ enum ptlrpc_nrs_queue_type queue, char *name,
+ enum ptlrpc_nrs_ctl opc, bool single, void *arg)
+{
+ struct ptlrpc_service_part *svcpt;
+ int i;
+ int rc = 0;
+
+ LASSERT(opc != PTLRPC_NRS_CTL_INVALID);
+
+ if ((queue & PTLRPC_NRS_QUEUE_BOTH) == 0)
+ return -EINVAL;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ if ((queue & PTLRPC_NRS_QUEUE_REG) != 0) {
+ rc = nrs_policy_ctl(nrs_svcpt2nrs(svcpt, false), name,
+ opc, arg);
+ if (rc != 0 || (queue == PTLRPC_NRS_QUEUE_REG &&
+ single))
+ GOTO(out, rc);
+ }
+
+ if ((queue & PTLRPC_NRS_QUEUE_HP) != 0) {
+ /**
+ * XXX: We could optionally check for
+ * nrs_svc_has_hp(svc) here, and return an error if it
+ * is false. Right now we rely on the policies' lprocfs
+ * handlers that call the present function to make this
+ * check; if they fail to do so, they might hit the
+ * assertion inside nrs_svcpt2nrs() below.
+ */
+ rc = nrs_policy_ctl(nrs_svcpt2nrs(svcpt, true), name,
+ opc, arg);
+ if (rc != 0 || single)
+ GOTO(out, rc);
+ }
+ }
+out:
+ return rc;
+}
+
+
+/* ptlrpc/nrs_fifo.c */
+extern struct ptlrpc_nrs_pol_conf nrs_conf_fifo;
+
+/**
+ * Adds all policies that ship with the ptlrpc module, to NRS core's list of
+ * policies \e nrs_core.nrs_policies.
+ *
+ * \retval 0 all policies have been registered successfully
+ * \retval -ve error
+ */
+int ptlrpc_nrs_init(void)
+{
+ int rc;
+
+ mutex_init(&nrs_core.nrs_mutex);
+ INIT_LIST_HEAD(&nrs_core.nrs_policies);
+
+ rc = ptlrpc_nrs_policy_register(&nrs_conf_fifo);
+ if (rc != 0)
+ GOTO(fail, rc);
+
+
+ return rc;
+fail:
+ /**
+ * Since no PTLRPC services have been started at this point, all we need
+ * to do for cleanup is to free the descriptors.
+ */
+ ptlrpc_nrs_fini();
+
+ return rc;
+}
+
+/**
+ * Removes all policy desciptors from nrs_core::nrs_policies, and frees the
+ * policy descriptors.
+ *
+ * Since all PTLRPC services are stopped at this point, there are no more
+ * instances of any policies, because each service will have stopped its policy
+ * instances in ptlrpc_service_nrs_cleanup(), so we just need to free the
+ * descriptors here.
+ */
+void ptlrpc_nrs_fini(void)
+{
+ struct ptlrpc_nrs_pol_desc *desc;
+ struct ptlrpc_nrs_pol_desc *tmp;
+
+ list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies,
+ pd_list) {
+ list_del_init(&desc->pd_list);
+ OBD_FREE_PTR(desc);
+ }
+}
+
+/** @} nrs */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_crr.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_crr.c
new file mode 100644
index 0000000..ddfb510
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs_crr.c
@@ -0,0 +1,40 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details. A copy is
+ * included in the COPYING file that accompanied this code.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2011 Intel Corporation
+ *
+ * Copyright 2012 Xyratex Technology Limited
+ */
+/*
+ * lustre/ptlrpc/nrs_crr.c
+ *
+ * Network Request Scheduler (NRS) CRR-N policy
+ *
+ * Request ordering in a batched Round-Robin manner over client NIDs
+ *
+ * Author: Liang Zhen <liang@whamcloud.com>
+ * Author: Nikitas Angelinas <nikitas_angelinas@xyratex.com>
+ */
+/**
+ * \addtogoup nrs
+ * @{
+ */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
new file mode 100644
index 0000000..7d3ee97
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
@@ -0,0 +1,270 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details. A copy is
+ * included in the COPYING file that accompanied this code.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2011 Intel Corporation
+ *
+ * Copyright 2012 Xyratex Technology Limited
+ */
+/*
+ * lustre/ptlrpc/nrs_fifo.c
+ *
+ * Network Request Scheduler (NRS) FIFO policy
+ *
+ * Handles RPCs in a FIFO manner, as received from the network. This policy is
+ * a logical wrapper around previous, non-NRS functionality. It is used as the
+ * default and fallback policy for all types of RPCs on all PTLRPC service
+ * partitions, for both regular and high-priority NRS heads. Default here means
+ * the policy is the one enabled at PTLRPC service partition startup time, and
+ * fallback means the policy is used to handle RPCs that are not handled
+ * successfully or are not handled at all by any primary policy that may be
+ * enabled on a given NRS head.
+ *
+ * Author: Liang Zhen <liang@whamcloud.com>
+ * Author: Nikitas Angelinas <nikitas_angelinas@xyratex.com>
+ */
+/**
+ * \addtogoup nrs
+ * @{
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+#include <obd_support.h>
+#include <obd_class.h>
+#include <linux/libcfs/libcfs.h>
+#include "ptlrpc_internal.h"
+
+/**
+ * \name fifo
+ *
+ * The FIFO policy is a logical wrapper around previous, non-NRS functionality.
+ * It schedules RPCs in the same order as they are queued from LNet.
+ *
+ * @{
+ */
+
+#define NRS_POL_NAME_FIFO "fifo"
+
+/**
+ * Is called before the policy transitions into
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED; allocates and initializes a
+ * policy-specific private data structure.
+ *
+ * \param[in] policy The policy to start
+ *
+ * \retval -ENOMEM OOM error
+ * \retval 0 success
+ *
+ * \see nrs_policy_register()
+ * \see nrs_policy_ctl()
+ */
+static int nrs_fifo_start(struct ptlrpc_nrs_policy *policy)
+{
+ struct nrs_fifo_head *head;
+
+ OBD_CPT_ALLOC_PTR(head, nrs_pol2cptab(policy), nrs_pol2cptid(policy));
+ if (head == NULL)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&head->fh_list);
+ policy->pol_private = head;
+ return 0;
+}
+
+/**
+ * Is called before the policy transitions into
+ * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED; deallocates the policy-specific
+ * private data structure.
+ *
+ * \param[in] policy The policy to stop
+ *
+ * \see nrs_policy_stop0()
+ */
+static void nrs_fifo_stop(struct ptlrpc_nrs_policy *policy)
+{
+ struct nrs_fifo_head *head = policy->pol_private;
+
+ LASSERT(head != NULL);
+ LASSERT(list_empty(&head->fh_list));
+
+ OBD_FREE_PTR(head);
+}
+
+/**
+ * Is called for obtaining a FIFO policy resource.
+ *
+ * \param[in] policy The policy on which the request is being asked for
+ * \param[in] nrq The request for which resources are being taken
+ * \param[in] parent Parent resource, unused in this policy
+ * \param[out] resp Resources references are placed in this array
+ * \param[in] moving_req Signifies limited caller context; unused in this
+ * policy
+ *
+ * \retval 1 The FIFO policy only has a one-level resource hierarchy, as since
+ * it implements a simple scheduling algorithm in which request
+ * priority is determined on the request arrival order, it does not
+ * need to maintain a set of resources that would otherwise be used
+ * to calculate a request's priority.
+ *
+ * \see nrs_resource_get_safe()
+ */
+static int nrs_fifo_res_get(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp, bool moving_req)
+{
+ /**
+ * Just return the resource embedded inside nrs_fifo_head, and end this
+ * resource hierarchy reference request.
+ */
+ *resp = &((struct nrs_fifo_head *)policy->pol_private)->fh_res;
+ return 1;
+}
+
+/**
+ * Called when getting a request from the FIFO policy for handling, or just
+ * peeking; removes the request from the policy when it is to be handled.
+ *
+ * \param[in] policy The policy
+ * \param[in] peek When set, signifies that we just want to examine the
+ * request, and not handle it, so the request is not removed
+ * from the policy.
+ * \param[in] force Force the policy to return a request; unused in this
+ * policy
+ *
+ * \retval The request to be handled; this is the next request in the FIFO
+ * queue
+ *
+ * \see ptlrpc_nrs_req_get_nolock()
+ * \see nrs_request_get()
+ */
+static
+struct ptlrpc_nrs_request * nrs_fifo_req_get(struct ptlrpc_nrs_policy *policy,
+ bool peek, bool force)
+{
+ struct nrs_fifo_head *head = policy->pol_private;
+ struct ptlrpc_nrs_request *nrq;
+
+ nrq = unlikely(list_empty(&head->fh_list)) ? NULL :
+ list_entry(head->fh_list.next, struct ptlrpc_nrs_request,
+ nr_u.fifo.fr_list);
+
+ if (likely(!peek && nrq != NULL)) {
+ struct ptlrpc_request *req = container_of(nrq,
+ struct ptlrpc_request,
+ rq_nrq);
+
+ list_del_init(&nrq->nr_u.fifo.fr_list);
+
+ CDEBUG(D_RPCTRACE, "NRS start %s request from %s, seq: "LPU64
+ "\n", policy->pol_desc->pd_name,
+ libcfs_id2str(req->rq_peer), nrq->nr_u.fifo.fr_sequence);
+ }
+
+ return nrq;
+}
+
+/**
+ * Adds request \a nrq to \a policy's list of queued requests
+ *
+ * \param[in] policy The policy
+ * \param[in] nrq The request to add
+ *
+ * \retval 0 success; nrs_request_enqueue() assumes this function will always
+ * succeed
+ */
+static int nrs_fifo_req_add(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq)
+{
+ struct nrs_fifo_head *head;
+
+ head = container_of(nrs_request_resource(nrq), struct nrs_fifo_head,
+ fh_res);
+ /**
+ * Only used for debugging
+ */
+ nrq->nr_u.fifo.fr_sequence = head->fh_sequence++;
+ list_add_tail(&nrq->nr_u.fifo.fr_list, &head->fh_list);
+
+ return 0;
+}
+
+/**
+ * Removes request \a nrq from \a policy's list of queued requests.
+ *
+ * \param[in] policy The policy
+ * \param[in] nrq The request to remove
+ */
+static void nrs_fifo_req_del(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq)
+{
+ LASSERT(!list_empty(&nrq->nr_u.fifo.fr_list));
+ list_del_init(&nrq->nr_u.fifo.fr_list);
+}
+
+/**
+ * Prints a debug statement right before the request \a nrq stops being
+ * handled.
+ *
+ * \param[in] policy The policy handling the request
+ * \param[in] nrq The request being handled
+ *
+ * \see ptlrpc_server_finish_request()
+ * \see ptlrpc_nrs_req_stop_nolock()
+ */
+static void nrs_fifo_req_stop(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq)
+{
+ struct ptlrpc_request *req = container_of(nrq, struct ptlrpc_request,
+ rq_nrq);
+
+ CDEBUG(D_RPCTRACE, "NRS stop %s request from %s, seq: "LPU64"\n",
+ policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer),
+ nrq->nr_u.fifo.fr_sequence);
+}
+
+/**
+ * FIFO policy operations
+ */
+static const struct ptlrpc_nrs_pol_ops nrs_fifo_ops = {
+ .op_policy_start = nrs_fifo_start,
+ .op_policy_stop = nrs_fifo_stop,
+ .op_res_get = nrs_fifo_res_get,
+ .op_req_get = nrs_fifo_req_get,
+ .op_req_enqueue = nrs_fifo_req_add,
+ .op_req_dequeue = nrs_fifo_req_del,
+ .op_req_stop = nrs_fifo_req_stop,
+};
+
+/**
+ * FIFO policy configuration
+ */
+struct ptlrpc_nrs_pol_conf nrs_conf_fifo = {
+ .nc_name = NRS_POL_NAME_FIFO,
+ .nc_ops = &nrs_fifo_ops,
+ .nc_compat = nrs_policy_compat_all,
+ .nc_flags = PTLRPC_NRS_FL_FALLBACK |
+ PTLRPC_NRS_FL_REG_START
+};
+
+/** @} fifo */
+
+/** @} nrs */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
new file mode 100644
index 0000000..cd2611a3
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -0,0 +1,2567 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/pack_generic.c
+ *
+ * (Un)packing of OST requests
+ *
+ * Author: Peter J. Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Eric Barton <eeb@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <obd_cksum.h>
+#include <lustre/ll_fiemap.h>
+
+static inline int lustre_msg_hdr_size_v2(int count)
+{
+ return cfs_size_round(offsetof(struct lustre_msg_v2,
+ lm_buflens[count]));
+}
+
+int lustre_msg_hdr_size(__u32 magic, int count)
+{
+ switch (magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_msg_hdr_size_v2(count);
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", magic);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_hdr_size);
+
+void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
+ int index)
+{
+ if (inout)
+ lustre_set_req_swabbed(req, index);
+ else
+ lustre_set_rep_swabbed(req, index);
+}
+EXPORT_SYMBOL(ptlrpc_buf_set_swabbed);
+
+int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
+ int index)
+{
+ if (inout)
+ return (ptlrpc_req_need_swab(req) &&
+ !lustre_req_swabbed(req, index));
+ else
+ return (ptlrpc_rep_need_swab(req) &&
+ !lustre_rep_swabbed(req, index));
+}
+EXPORT_SYMBOL(ptlrpc_buf_need_swab);
+
+static inline int lustre_msg_check_version_v2(struct lustre_msg_v2 *msg,
+ __u32 version)
+{
+ __u32 ver = lustre_msg_get_version(msg);
+ return (ver & LUSTRE_VERSION_MASK) != version;
+}
+
+int lustre_msg_check_version(struct lustre_msg *msg, __u32 version)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ CERROR("msg v1 not supported - please upgrade you system\n");
+ return -EINVAL;
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_msg_check_version_v2(msg, version);
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_check_version);
+
+/* early reply size */
+int lustre_msg_early_size(void)
+{
+ static int size = 0;
+ if (!size) {
+ /* Always reply old ptlrpc_body_v2 to keep interoprability
+ * with the old client (< 2.3) which doesn't have pb_jobid
+ * in the ptlrpc_body.
+ *
+ * XXX Remove this whenever we dorp interoprability with such
+ * client.
+ */
+ __u32 pblen = sizeof(struct ptlrpc_body_v2);
+ size = lustre_msg_size(LUSTRE_MSG_MAGIC_V2, 1, &pblen);
+ }
+ return size;
+}
+EXPORT_SYMBOL(lustre_msg_early_size);
+
+int lustre_msg_size_v2(int count, __u32 *lengths)
+{
+ int size;
+ int i;
+
+ size = lustre_msg_hdr_size_v2(count);
+ for (i = 0; i < count; i++)
+ size += cfs_size_round(lengths[i]);
+
+ return size;
+}
+EXPORT_SYMBOL(lustre_msg_size_v2);
+
+/* This returns the size of the buffer that is required to hold a lustre_msg
+ * with the given sub-buffer lengths.
+ * NOTE: this should only be used for NEW requests, and should always be
+ * in the form of a v2 request. If this is a connection to a v1
+ * target then the first buffer will be stripped because the ptlrpc
+ * data is part of the lustre_msg_v1 header. b=14043 */
+int lustre_msg_size(__u32 magic, int count, __u32 *lens)
+{
+ __u32 size[] = { sizeof(struct ptlrpc_body) };
+
+ if (!lens) {
+ LASSERT(count == 1);
+ lens = size;
+ }
+
+ LASSERT(count > 0);
+ LASSERT(lens[MSG_PTLRPC_BODY_OFF] >= sizeof(struct ptlrpc_body_v2));
+
+ switch (magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_msg_size_v2(count, lens);
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", magic);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_size);
+
+/* This is used to determine the size of a buffer that was already packed
+ * and will correctly handle the different message formats. */
+int lustre_packed_msg_size(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_packed_msg_size);
+
+void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
+ char **bufs)
+{
+ char *ptr;
+ int i;
+
+ msg->lm_bufcount = count;
+ /* XXX: lm_secflvr uninitialized here */
+ msg->lm_magic = LUSTRE_MSG_MAGIC_V2;
+
+ for (i = 0; i < count; i++)
+ msg->lm_buflens[i] = lens[i];
+
+ if (bufs == NULL)
+ return;
+
+ ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
+ for (i = 0; i < count; i++) {
+ char *tmp = bufs[i];
+ LOGL(tmp, lens[i], ptr);
+ }
+}
+EXPORT_SYMBOL(lustre_init_msg_v2);
+
+static int lustre_pack_request_v2(struct ptlrpc_request *req,
+ int count, __u32 *lens, char **bufs)
+{
+ int reqlen, rc;
+
+ reqlen = lustre_msg_size_v2(count, lens);
+
+ rc = sptlrpc_cli_alloc_reqbuf(req, reqlen);
+ if (rc)
+ return rc;
+
+ req->rq_reqlen = reqlen;
+
+ lustre_init_msg_v2(req->rq_reqmsg, count, lens, bufs);
+ lustre_msg_add_version(req->rq_reqmsg, PTLRPC_MSG_VERSION);
+ return 0;
+}
+
+int lustre_pack_request(struct ptlrpc_request *req, __u32 magic, int count,
+ __u32 *lens, char **bufs)
+{
+ __u32 size[] = { sizeof(struct ptlrpc_body) };
+
+ if (!lens) {
+ LASSERT(count == 1);
+ lens = size;
+ }
+
+ LASSERT(count > 0);
+ LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
+
+ /* only use new format, we don't need to be compatible with 1.4 */
+ magic = LUSTRE_MSG_MAGIC_V2;
+
+ switch (magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_pack_request_v2(req, count, lens, bufs);
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", magic);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(lustre_pack_request);
+
+#if RS_DEBUG
+LIST_HEAD(ptlrpc_rs_debug_lru);
+spinlock_t ptlrpc_rs_debug_lock;
+
+#define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
+do { \
+ spin_lock(&ptlrpc_rs_debug_lock); \
+ list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
+ spin_unlock(&ptlrpc_rs_debug_lock); \
+} while (0)
+
+#define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
+do { \
+ spin_lock(&ptlrpc_rs_debug_lock); \
+ list_del(&(rs)->rs_debug_list); \
+ spin_unlock(&ptlrpc_rs_debug_lock); \
+} while (0)
+#else
+# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
+# define PTLRPC_RS_DEBUG_LRU_DEL(rs) do {} while(0)
+#endif
+
+struct ptlrpc_reply_state *
+lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_reply_state *rs = NULL;
+
+ spin_lock(&svcpt->scp_rep_lock);
+
+ /* See if we have anything in a pool, and wait if nothing */
+ while (list_empty(&svcpt->scp_rep_idle)) {
+ struct l_wait_info lwi;
+ int rc;
+
+ spin_unlock(&svcpt->scp_rep_lock);
+ /* If we cannot get anything for some long time, we better
+ * bail out instead of waiting infinitely */
+ lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
+ rc = l_wait_event(svcpt->scp_rep_waitq,
+ !list_empty(&svcpt->scp_rep_idle), &lwi);
+ if (rc != 0)
+ goto out;
+ spin_lock(&svcpt->scp_rep_lock);
+ }
+
+ rs = list_entry(svcpt->scp_rep_idle.next,
+ struct ptlrpc_reply_state, rs_list);
+ list_del(&rs->rs_list);
+
+ spin_unlock(&svcpt->scp_rep_lock);
+
+ memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
+ rs->rs_svcpt = svcpt;
+ rs->rs_prealloc = 1;
+out:
+ return rs;
+}
+
+void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
+{
+ struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
+
+ spin_lock(&svcpt->scp_rep_lock);
+ list_add(&rs->rs_list, &svcpt->scp_rep_idle);
+ spin_unlock(&svcpt->scp_rep_lock);
+ wake_up(&svcpt->scp_rep_waitq);
+}
+
+int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
+ __u32 *lens, char **bufs, int flags)
+{
+ struct ptlrpc_reply_state *rs;
+ int msg_len, rc;
+
+ LASSERT(req->rq_reply_state == NULL);
+
+ if ((flags & LPRFL_EARLY_REPLY) == 0) {
+ spin_lock(&req->rq_lock);
+ req->rq_packed_final = 1;
+ spin_unlock(&req->rq_lock);
+ }
+
+ msg_len = lustre_msg_size_v2(count, lens);
+ rc = sptlrpc_svc_alloc_rs(req, msg_len);
+ if (rc)
+ return rc;
+
+ rs = req->rq_reply_state;
+ atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
+ rs->rs_cb_id.cbid_fn = reply_out_callback;
+ rs->rs_cb_id.cbid_arg = rs;
+ rs->rs_svcpt = req->rq_rqbd->rqbd_svcpt;
+ INIT_LIST_HEAD(&rs->rs_exp_list);
+ INIT_LIST_HEAD(&rs->rs_obd_list);
+ INIT_LIST_HEAD(&rs->rs_list);
+ spin_lock_init(&rs->rs_lock);
+
+ req->rq_replen = msg_len;
+ req->rq_reply_state = rs;
+ req->rq_repmsg = rs->rs_msg;
+
+ lustre_init_msg_v2(rs->rs_msg, count, lens, bufs);
+ lustre_msg_add_version(rs->rs_msg, PTLRPC_MSG_VERSION);
+
+ PTLRPC_RS_DEBUG_LRU_ADD(rs);
+
+ return 0;
+}
+EXPORT_SYMBOL(lustre_pack_reply_v2);
+
+int lustre_pack_reply_flags(struct ptlrpc_request *req, int count, __u32 *lens,
+ char **bufs, int flags)
+{
+ int rc = 0;
+ __u32 size[] = { sizeof(struct ptlrpc_body) };
+
+ if (!lens) {
+ LASSERT(count == 1);
+ lens = size;
+ }
+
+ LASSERT(count > 0);
+ LASSERT(lens[MSG_PTLRPC_BODY_OFF] == sizeof(struct ptlrpc_body));
+
+ switch (req->rq_reqmsg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ rc = lustre_pack_reply_v2(req, count, lens, bufs, flags);
+ break;
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n",
+ req->rq_reqmsg->lm_magic);
+ rc = -EINVAL;
+ }
+ if (rc != 0)
+ CERROR("lustre_pack_reply failed: rc=%d size=%d\n", rc,
+ lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens));
+ return rc;
+}
+EXPORT_SYMBOL(lustre_pack_reply_flags);
+
+int lustre_pack_reply(struct ptlrpc_request *req, int count, __u32 *lens,
+ char **bufs)
+{
+ return lustre_pack_reply_flags(req, count, lens, bufs, 0);
+}
+EXPORT_SYMBOL(lustre_pack_reply);
+
+void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size)
+{
+ int i, offset, buflen, bufcount;
+
+ LASSERT(m != NULL);
+ LASSERT(n >= 0);
+
+ bufcount = m->lm_bufcount;
+ if (unlikely(n >= bufcount)) {
+ CDEBUG(D_INFO, "msg %p buffer[%d] not present (count %d)\n",
+ m, n, bufcount);
+ return NULL;
+ }
+
+ buflen = m->lm_buflens[n];
+ if (unlikely(buflen < min_size)) {
+ CERROR("msg %p buffer[%d] size %d too small "
+ "(required %d, opc=%d)\n", m, n, buflen, min_size,
+ n == MSG_PTLRPC_BODY_OFF ? -1 : lustre_msg_get_opc(m));
+ return NULL;
+ }
+
+ offset = lustre_msg_hdr_size_v2(bufcount);
+ for (i = 0; i < n; i++)
+ offset += cfs_size_round(m->lm_buflens[i]);
+
+ return (char *)m + offset;
+}
+
+void *lustre_msg_buf(struct lustre_msg *m, int n, int min_size)
+{
+ switch (m->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_msg_buf_v2(m, n, min_size);
+ default:
+ LASSERTF(0, "incorrect message magic: %08x(msg:%p)\n", m->lm_magic, m);
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_buf);
+
+int lustre_shrink_msg_v2(struct lustre_msg_v2 *msg, int segment,
+ unsigned int newlen, int move_data)
+{
+ char *tail = NULL, *newpos;
+ int tail_len = 0, n;
+
+ LASSERT(msg);
+ LASSERT(msg->lm_bufcount > segment);
+ LASSERT(msg->lm_buflens[segment] >= newlen);
+
+ if (msg->lm_buflens[segment] == newlen)
+ goto out;
+
+ if (move_data && msg->lm_bufcount > segment + 1) {
+ tail = lustre_msg_buf_v2(msg, segment + 1, 0);
+ for (n = segment + 1; n < msg->lm_bufcount; n++)
+ tail_len += cfs_size_round(msg->lm_buflens[n]);
+ }
+
+ msg->lm_buflens[segment] = newlen;
+
+ if (tail && tail_len) {
+ newpos = lustre_msg_buf_v2(msg, segment + 1, 0);
+ LASSERT(newpos <= tail);
+ if (newpos != tail)
+ memmove(newpos, tail, tail_len);
+ }
+out:
+ return lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
+}
+
+/*
+ * for @msg, shrink @segment to size @newlen. if @move_data is non-zero,
+ * we also move data forward from @segment + 1.
+ *
+ * if @newlen == 0, we remove the segment completely, but we still keep the
+ * totally bufcount the same to save possible data moving. this will leave a
+ * unused segment with size 0 at the tail, but that's ok.
+ *
+ * return new msg size after shrinking.
+ *
+ * CAUTION:
+ * + if any buffers higher than @segment has been filled in, must call shrink
+ * with non-zero @move_data.
+ * + caller should NOT keep pointers to msg buffers which higher than @segment
+ * after call shrink.
+ */
+int lustre_shrink_msg(struct lustre_msg *msg, int segment,
+ unsigned int newlen, int move_data)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_shrink_msg_v2(msg, segment, newlen, move_data);
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_shrink_msg);
+
+void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
+{
+ PTLRPC_RS_DEBUG_LRU_DEL(rs);
+
+ LASSERT (atomic_read(&rs->rs_refcount) == 0);
+ LASSERT (!rs->rs_difficult || rs->rs_handled);
+ LASSERT (!rs->rs_on_net);
+ LASSERT (!rs->rs_scheduled);
+ LASSERT (rs->rs_export == NULL);
+ LASSERT (rs->rs_nlocks == 0);
+ LASSERT (list_empty(&rs->rs_exp_list));
+ LASSERT (list_empty(&rs->rs_obd_list));
+
+ sptlrpc_svc_free_rs(rs);
+}
+EXPORT_SYMBOL(lustre_free_reply_state);
+
+static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
+{
+ int swabbed, required_len, i;
+
+ /* Now we know the sender speaks my language. */
+ required_len = lustre_msg_hdr_size_v2(0);
+ if (len < required_len) {
+ /* can't even look inside the message */
+ CERROR("message length %d too small for lustre_msg\n", len);
+ return -EINVAL;
+ }
+
+ swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
+
+ if (swabbed) {
+ __swab32s(&m->lm_magic);
+ __swab32s(&m->lm_bufcount);
+ __swab32s(&m->lm_secflvr);
+ __swab32s(&m->lm_repsize);
+ __swab32s(&m->lm_cksum);
+ __swab32s(&m->lm_flags);
+ CLASSERT(offsetof(typeof(*m), lm_padding_2) != 0);
+ CLASSERT(offsetof(typeof(*m), lm_padding_3) != 0);
+ }
+
+ required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
+ if (len < required_len) {
+ /* didn't receive all the buffer lengths */
+ CERROR ("message length %d too small for %d buflens\n",
+ len, m->lm_bufcount);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < m->lm_bufcount; i++) {
+ if (swabbed)
+ __swab32s(&m->lm_buflens[i]);
+ required_len += cfs_size_round(m->lm_buflens[i]);
+ }
+
+ if (len < required_len) {
+ CERROR("len: %d, required_len %d\n", len, required_len);
+ CERROR("bufcount: %d\n", m->lm_bufcount);
+ for (i = 0; i < m->lm_bufcount; i++)
+ CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
+ return -EINVAL;
+ }
+
+ return swabbed;
+}
+
+int __lustre_unpack_msg(struct lustre_msg *m, int len)
+{
+ int required_len, rc;
+
+ /* We can provide a slightly better error log, if we check the
+ * message magic and version first. In the future, struct
+ * lustre_msg may grow, and we'd like to log a version mismatch,
+ * rather than a short message.
+ *
+ */
+ required_len = offsetof(struct lustre_msg, lm_magic) +
+ sizeof(m->lm_magic);
+ if (len < required_len) {
+ /* can't even look inside the message */
+ CERROR("message length %d too small for magic/version check\n",
+ len);
+ return -EINVAL;
+ }
+
+ rc = lustre_unpack_msg_v2(m, len);
+
+ return rc;
+}
+EXPORT_SYMBOL(__lustre_unpack_msg);
+
+int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len)
+{
+ int rc;
+ rc = __lustre_unpack_msg(req->rq_reqmsg, len);
+ if (rc == 1) {
+ lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+ rc = 0;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_unpack_req_msg);
+
+int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len)
+{
+ int rc;
+ rc = __lustre_unpack_msg(req->rq_repmsg, len);
+ if (rc == 1) {
+ lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+ rc = 0;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_unpack_rep_msg);
+
+static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
+ const int inout, int offset)
+{
+ struct ptlrpc_body *pb;
+ struct lustre_msg_v2 *m = inout ? req->rq_reqmsg : req->rq_repmsg;
+
+ pb = lustre_msg_buf_v2(m, offset, sizeof(struct ptlrpc_body_v2));
+ if (!pb) {
+ CERROR("error unpacking ptlrpc body\n");
+ return -EFAULT;
+ }
+ if (ptlrpc_buf_need_swab(req, inout, offset)) {
+ lustre_swab_ptlrpc_body(pb);
+ ptlrpc_buf_set_swabbed(req, inout, offset);
+ }
+
+ if ((pb->pb_version & ~LUSTRE_VERSION_MASK) != PTLRPC_MSG_VERSION) {
+ CERROR("wrong lustre_msg version %08x\n", pb->pb_version);
+ return -EINVAL;
+ }
+
+ if (!inout)
+ pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
+
+ return 0;
+}
+
+int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset)
+{
+ switch (req->rq_reqmsg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_unpack_ptlrpc_body_v2(req, 1, offset);
+ default:
+ CERROR("bad lustre msg magic: %08x\n",
+ req->rq_reqmsg->lm_magic);
+ return -EINVAL;
+ }
+}
+
+int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset)
+{
+ switch (req->rq_repmsg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_unpack_ptlrpc_body_v2(req, 0, offset);
+ default:
+ CERROR("bad lustre msg magic: %08x\n",
+ req->rq_repmsg->lm_magic);
+ return -EINVAL;
+ }
+}
+
+static inline int lustre_msg_buflen_v2(struct lustre_msg_v2 *m, int n)
+{
+ if (n >= m->lm_bufcount)
+ return 0;
+
+ return m->lm_buflens[n];
+}
+
+/**
+ * lustre_msg_buflen - return the length of buffer \a n in message \a m
+ * \param m lustre_msg (request or reply) to look at
+ * \param n message index (base 0)
+ *
+ * returns zero for non-existent message indices
+ */
+int lustre_msg_buflen(struct lustre_msg *m, int n)
+{
+ switch (m->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_msg_buflen_v2(m, n);
+ default:
+ CERROR("incorrect message magic: %08x\n", m->lm_magic);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_buflen);
+
+static inline void
+lustre_msg_set_buflen_v2(struct lustre_msg_v2 *m, int n, int len)
+{
+ if (n >= m->lm_bufcount)
+ LBUG();
+
+ m->lm_buflens[n] = len;
+}
+
+void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len)
+{
+ switch (m->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ lustre_msg_set_buflen_v2(m, n, len);
+ return;
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
+ }
+}
+
+EXPORT_SYMBOL(lustre_msg_set_buflen);
+
+/* NB return the bufcount for lustre_msg_v2 format, so if message is packed
+ * in V1 format, the result is one bigger. (add struct ptlrpc_body). */
+int lustre_msg_bufcount(struct lustre_msg *m)
+{
+ switch (m->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return m->lm_bufcount;
+ default:
+ CERROR("incorrect message magic: %08x\n", m->lm_magic);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_bufcount);
+
+char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
+{
+ /* max_len == 0 means the string should fill the buffer */
+ char *str;
+ int slen, blen;
+
+ switch (m->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ str = lustre_msg_buf_v2(m, index, 0);
+ blen = lustre_msg_buflen_v2(m, index);
+ break;
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
+ }
+
+ if (str == NULL) {
+ CERROR ("can't unpack string in msg %p buffer[%d]\n", m, index);
+ return NULL;
+ }
+
+ slen = strnlen(str, blen);
+
+ if (slen == blen) { /* not NULL terminated */
+ CERROR("can't unpack non-NULL terminated string in "
+ "msg %p buffer[%d] len %d\n", m, index, blen);
+ return NULL;
+ }
+
+ if (max_len == 0) {
+ if (slen != blen - 1) {
+ CERROR("can't unpack short string in msg %p "
+ "buffer[%d] len %d: strlen %d\n",
+ m, index, blen, slen);
+ return NULL;
+ }
+ } else if (slen > max_len) {
+ CERROR("can't unpack oversized string in msg %p "
+ "buffer[%d] len %d strlen %d: max %d expected\n",
+ m, index, blen, slen, max_len);
+ return NULL;
+ }
+
+ return str;
+}
+EXPORT_SYMBOL(lustre_msg_string);
+
+/* Wrap up the normal fixed length cases */
+static inline void *__lustre_swab_buf(struct lustre_msg *msg, int index,
+ int min_size, void *swabber)
+{
+ void *ptr = NULL;
+
+ LASSERT(msg != NULL);
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ ptr = lustre_msg_buf_v2(msg, index, min_size);
+ break;
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ }
+
+ if (ptr && swabber)
+ ((void (*)(void *))swabber)(ptr);
+
+ return ptr;
+}
+
+static inline struct ptlrpc_body *lustre_msg_ptlrpc_body(struct lustre_msg *msg)
+{
+ return lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
+ sizeof(struct ptlrpc_body_v2));
+}
+
+__u32 lustre_msghdr_get_flags(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ case LUSTRE_MSG_MAGIC_V1_SWABBED:
+ return 0;
+ case LUSTRE_MSG_MAGIC_V2:
+ /* already in host endian */
+ return msg->lm_flags;
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msghdr_get_flags);
+
+void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ return;
+ case LUSTRE_MSG_MAGIC_V2:
+ msg->lm_flags = flags;
+ return;
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+
+__u32 lustre_msg_get_flags(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_flags;
+ }
+ default:
+ /* flags might be printed in debug code while message
+ * uninitialized */
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_flags);
+
+void lustre_msg_add_flags(struct lustre_msg *msg, int flags)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_flags |= flags;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_add_flags);
+
+void lustre_msg_set_flags(struct lustre_msg *msg, int flags)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_flags = flags;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_flags);
+
+void lustre_msg_clear_flags(struct lustre_msg *msg, int flags)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_flags &= ~(MSG_GEN_FLAG_MASK & flags);
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_clear_flags);
+
+__u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_op_flags;
+ }
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_op_flags);
+
+void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_op_flags |= flags;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_add_op_flags);
+
+void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_op_flags |= flags;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_op_flags);
+
+struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return NULL;
+ }
+ return &pb->pb_handle;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_handle);
+
+__u32 lustre_msg_get_type(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return PTL_RPC_MSG_ERR;
+ }
+ return pb->pb_type;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return PTL_RPC_MSG_ERR;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_type);
+
+__u32 lustre_msg_get_version(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_version;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_version);
+
+void lustre_msg_add_version(struct lustre_msg *msg, int version)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_version |= version;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_add_version);
+
+__u32 lustre_msg_get_opc(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_opc;
+ }
+ default:
+ CERROR("incorrect message magic: %08x(msg:%p)\n", msg->lm_magic, msg);
+ LBUG();
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_opc);
+
+__u64 lustre_msg_get_last_xid(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_last_xid;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_last_xid);
+
+__u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_last_committed;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_last_committed);
+
+__u64 *lustre_msg_get_versions(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ return NULL;
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return NULL;
+ }
+ return pb->pb_pre_versions;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_versions);
+
+__u64 lustre_msg_get_transno(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_transno;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_transno);
+
+int lustre_msg_get_status(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return -EINVAL;
+ }
+ return pb->pb_status;
+ }
+ default:
+ /* status might be printed in debug code while message
+ * uninitialized */
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_status);
+
+__u64 lustre_msg_get_slv(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return -EINVAL;
+ }
+ return pb->pb_slv;
+ }
+ default:
+ CERROR("invalid msg magic %08x\n", msg->lm_magic);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_slv);
+
+
+void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return;
+ }
+ pb->pb_slv = slv;
+ return;
+ }
+ default:
+ CERROR("invalid msg magic %x\n", msg->lm_magic);
+ return;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_slv);
+
+__u32 lustre_msg_get_limit(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return -EINVAL;
+ }
+ return pb->pb_limit;
+ }
+ default:
+ CERROR("invalid msg magic %x\n", msg->lm_magic);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_limit);
+
+
+void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return;
+ }
+ pb->pb_limit = limit;
+ return;
+ }
+ default:
+ CERROR("invalid msg magic %08x\n", msg->lm_magic);
+ return;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_limit);
+
+__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+ }
+ return pb->pb_conn_cnt;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_conn_cnt);
+
+int lustre_msg_is_v1(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ case LUSTRE_MSG_MAGIC_V1_SWABBED:
+ return 1;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_is_v1);
+
+__u32 lustre_msg_get_magic(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return msg->lm_magic;
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_magic);
+
+__u32 lustre_msg_get_timeout(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ case LUSTRE_MSG_MAGIC_V1_SWABBED:
+ return 0;
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+
+ }
+ return pb->pb_timeout;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+
+__u32 lustre_msg_get_service_time(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ case LUSTRE_MSG_MAGIC_V1_SWABBED:
+ return 0;
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ if (!pb) {
+ CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+ return 0;
+
+ }
+ return pb->pb_service_time;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+
+char *lustre_msg_get_jobid(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ case LUSTRE_MSG_MAGIC_V1_SWABBED:
+ return NULL;
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb =
+ lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
+ sizeof(struct ptlrpc_body));
+ if (!pb)
+ return NULL;
+
+ return pb->pb_jobid;
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return NULL;
+ }
+}
+EXPORT_SYMBOL(lustre_msg_get_jobid);
+
+__u32 lustre_msg_get_cksum(struct lustre_msg *msg)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return msg->lm_cksum;
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
+/*
+ * In 1.6 and 1.8 the checksum was computed only on struct ptlrpc_body as
+ * it was in 1.6 (88 bytes, smaller than the full size in 1.8). It makes
+ * more sense to compute the checksum on the full ptlrpc_body, regardless
+ * of what size it is, but in order to keep interoperability with 1.8 we
+ * can optionally also checksum only the first 88 bytes (caller decides). */
+# define ptlrpc_body_cksum_size_compat18 88
+
+__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18)
+#else
+# warning "remove checksum compatibility support for b1_8"
+__u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
+#endif
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
+ __u32 crc;
+ unsigned int hsize = 4;
+ __u32 len = compat18 ? ptlrpc_body_cksum_size_compat18 :
+ lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
+ len, NULL, 0, (unsigned char *)&crc,
+ &hsize);
+ return crc;
+#else
+# warning "remove checksum compatibility support for b1_8"
+ __u32 crc;
+ unsigned int hsize = 4;
+ cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
+ lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF),
+ NULL, 0, (unsigned char *)&crc, &hsize);
+ return crc;
+#endif
+ }
+ default:
+ CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+ return 0;
+ }
+}
+
+void lustre_msg_set_handle(struct lustre_msg *msg, struct lustre_handle *handle)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_handle = *handle;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_handle);
+
+void lustre_msg_set_type(struct lustre_msg *msg, __u32 type)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_type = type;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_type);
+
+void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_opc = opc;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_opc);
+
+void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_last_xid = last_xid;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_last_xid);
+
+void lustre_msg_set_last_committed(struct lustre_msg *msg, __u64 last_committed)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_last_committed = last_committed;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_last_committed);
+
+void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ return;
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_pre_versions[0] = versions[0];
+ pb->pb_pre_versions[1] = versions[1];
+ pb->pb_pre_versions[2] = versions[2];
+ pb->pb_pre_versions[3] = versions[3];
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_versions);
+
+void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_transno = transno;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_transno);
+
+void lustre_msg_set_status(struct lustre_msg *msg, __u32 status)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_status = status;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_status);
+
+void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_conn_cnt = conn_cnt;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_conn_cnt);
+
+void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ return;
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_timeout = timeout;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+
+void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ return;
+ case LUSTRE_MSG_MAGIC_V2: {
+ struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+ pb->pb_service_time = service_time;
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+
+void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ return;
+ case LUSTRE_MSG_MAGIC_V2: {
+ __u32 opc = lustre_msg_get_opc(msg);
+ struct ptlrpc_body *pb;
+
+ /* Don't set jobid for ldlm ast RPCs, they've been shrinked.
+ * See the comment in ptlrpc_request_pack(). */
+ if (!opc || opc == LDLM_BL_CALLBACK ||
+ opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
+ return;
+
+ pb = lustre_msg_buf_v2(msg, MSG_PTLRPC_BODY_OFF,
+ sizeof(struct ptlrpc_body));
+ LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+
+ if (jobid != NULL)
+ memcpy(pb->pb_jobid, jobid, JOBSTATS_JOBID_SIZE);
+ else if (pb->pb_jobid[0] == '\0')
+ lustre_get_jobid(pb->pb_jobid);
+ return;
+ }
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+EXPORT_SYMBOL(lustre_msg_set_jobid);
+
+void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
+{
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V1:
+ return;
+ case LUSTRE_MSG_MAGIC_V2:
+ msg->lm_cksum = cksum;
+ return;
+ default:
+ LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+ }
+}
+
+
+void ptlrpc_request_set_replen(struct ptlrpc_request *req)
+{
+ int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
+
+ req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count,
+ req->rq_pill.rc_area[RCL_SERVER]);
+ if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
+ req->rq_reqmsg->lm_repsize = req->rq_replen;
+}
+EXPORT_SYMBOL(ptlrpc_request_set_replen);
+
+void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *lens)
+{
+ req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens);
+ if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2)
+ req->rq_reqmsg->lm_repsize = req->rq_replen;
+}
+EXPORT_SYMBOL(ptlrpc_req_set_repsize);
+
+/**
+ * Send a remote set_info_async.
+ *
+ * This may go from client to server or server to client.
+ */
+int do_set_info_async(struct obd_import *imp,
+ int opcode, int version,
+ obd_count keylen, void *key,
+ obd_count vallen, void *val,
+ struct ptlrpc_request_set *set)
+{
+ struct ptlrpc_request *req;
+ char *tmp;
+ int rc;
+
+ req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
+ RCL_CLIENT, keylen);
+ req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
+ RCL_CLIENT, vallen);
+ rc = ptlrpc_request_pack(req, version, opcode);
+ if (rc) {
+ ptlrpc_request_free(req);
+ return rc;
+ }
+
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
+ memcpy(tmp, key, keylen);
+ tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
+ memcpy(tmp, val, vallen);
+
+ ptlrpc_request_set_replen(req);
+
+ if (set) {
+ ptlrpc_set_add_req(set, req);
+ ptlrpc_check_set(NULL, set);
+ } else {
+ rc = ptlrpc_queue_wait(req);
+ ptlrpc_req_finished(req);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(do_set_info_async);
+
+/* byte flipping routines for all wire types declared in
+ * lustre_idl.h implemented here.
+ */
+void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
+{
+ __swab32s (&b->pb_type);
+ __swab32s (&b->pb_version);
+ __swab32s (&b->pb_opc);
+ __swab32s (&b->pb_status);
+ __swab64s (&b->pb_last_xid);
+ __swab64s (&b->pb_last_seen);
+ __swab64s (&b->pb_last_committed);
+ __swab64s (&b->pb_transno);
+ __swab32s (&b->pb_flags);
+ __swab32s (&b->pb_op_flags);
+ __swab32s (&b->pb_conn_cnt);
+ __swab32s (&b->pb_timeout);
+ __swab32s (&b->pb_service_time);
+ __swab32s (&b->pb_limit);
+ __swab64s (&b->pb_slv);
+ __swab64s (&b->pb_pre_versions[0]);
+ __swab64s (&b->pb_pre_versions[1]);
+ __swab64s (&b->pb_pre_versions[2]);
+ __swab64s (&b->pb_pre_versions[3]);
+ CLASSERT(offsetof(typeof(*b), pb_padding) != 0);
+ /* While we need to maintain compatibility between
+ * clients and servers without ptlrpc_body_v2 (< 2.3)
+ * do not swab any fields beyond pb_jobid, as we are
+ * using this swab function for both ptlrpc_body
+ * and ptlrpc_body_v2. */
+ CLASSERT(offsetof(typeof(*b), pb_jobid) != 0);
+}
+EXPORT_SYMBOL(lustre_swab_ptlrpc_body);
+
+void lustre_swab_connect(struct obd_connect_data *ocd)
+{
+ __swab64s(&ocd->ocd_connect_flags);
+ __swab32s(&ocd->ocd_version);
+ __swab32s(&ocd->ocd_grant);
+ __swab64s(&ocd->ocd_ibits_known);
+ __swab32s(&ocd->ocd_index);
+ __swab32s(&ocd->ocd_brw_size);
+ /* ocd_blocksize and ocd_inodespace don't need to be swabbed because
+ * they are 8-byte values */
+ __swab16s(&ocd->ocd_grant_extent);
+ __swab32s(&ocd->ocd_unused);
+ __swab64s(&ocd->ocd_transno);
+ __swab32s(&ocd->ocd_group);
+ __swab32s(&ocd->ocd_cksum_types);
+ __swab32s(&ocd->ocd_instance);
+ /* Fields after ocd_cksum_types are only accessible by the receiver
+ * if the corresponding flag in ocd_connect_flags is set. Accessing
+ * any field after ocd_maxbytes on the receiver without a valid flag
+ * may result in out-of-bound memory access and kernel oops. */
+ if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
+ __swab32s(&ocd->ocd_max_easize);
+ if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
+ __swab64s(&ocd->ocd_maxbytes);
+ CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
+ CLASSERT(offsetof(typeof(*ocd), padding2) != 0);
+ CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
+ CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
+ CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
+ CLASSERT(offsetof(typeof(*ocd), padding6) != 0);
+ CLASSERT(offsetof(typeof(*ocd), padding7) != 0);
+ CLASSERT(offsetof(typeof(*ocd), padding8) != 0);
+ CLASSERT(offsetof(typeof(*ocd), padding9) != 0);
+ CLASSERT(offsetof(typeof(*ocd), paddingA) != 0);
+ CLASSERT(offsetof(typeof(*ocd), paddingB) != 0);
+ CLASSERT(offsetof(typeof(*ocd), paddingC) != 0);
+ CLASSERT(offsetof(typeof(*ocd), paddingD) != 0);
+ CLASSERT(offsetof(typeof(*ocd), paddingE) != 0);
+ CLASSERT(offsetof(typeof(*ocd), paddingF) != 0);
+}
+
+void lustre_swab_obdo (struct obdo *o)
+{
+ __swab64s (&o->o_valid);
+ lustre_swab_ost_id(&o->o_oi);
+ __swab64s (&o->o_parent_seq);
+ __swab64s (&o->o_size);
+ __swab64s (&o->o_mtime);
+ __swab64s (&o->o_atime);
+ __swab64s (&o->o_ctime);
+ __swab64s (&o->o_blocks);
+ __swab64s (&o->o_grant);
+ __swab32s (&o->o_blksize);
+ __swab32s (&o->o_mode);
+ __swab32s (&o->o_uid);
+ __swab32s (&o->o_gid);
+ __swab32s (&o->o_flags);
+ __swab32s (&o->o_nlink);
+ __swab32s (&o->o_parent_oid);
+ __swab32s (&o->o_misc);
+ __swab64s (&o->o_ioepoch);
+ __swab32s (&o->o_stripe_idx);
+ __swab32s (&o->o_parent_ver);
+ /* o_handle is opaque */
+ /* o_lcookie is swabbed elsewhere */
+ __swab32s (&o->o_uid_h);
+ __swab32s (&o->o_gid_h);
+ __swab64s (&o->o_data_version);
+ CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
+ CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
+ CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
+
+}
+EXPORT_SYMBOL(lustre_swab_obdo);
+
+void lustre_swab_obd_statfs (struct obd_statfs *os)
+{
+ __swab64s (&os->os_type);
+ __swab64s (&os->os_blocks);
+ __swab64s (&os->os_bfree);
+ __swab64s (&os->os_bavail);
+ __swab64s (&os->os_files);
+ __swab64s (&os->os_ffree);
+ /* no need to swab os_fsid */
+ __swab32s (&os->os_bsize);
+ __swab32s (&os->os_namelen);
+ __swab64s (&os->os_maxbytes);
+ __swab32s (&os->os_state);
+ CLASSERT(offsetof(typeof(*os), os_fprecreated) != 0);
+ CLASSERT(offsetof(typeof(*os), os_spare2) != 0);
+ CLASSERT(offsetof(typeof(*os), os_spare3) != 0);
+ CLASSERT(offsetof(typeof(*os), os_spare4) != 0);
+ CLASSERT(offsetof(typeof(*os), os_spare5) != 0);
+ CLASSERT(offsetof(typeof(*os), os_spare6) != 0);
+ CLASSERT(offsetof(typeof(*os), os_spare7) != 0);
+ CLASSERT(offsetof(typeof(*os), os_spare8) != 0);
+ CLASSERT(offsetof(typeof(*os), os_spare9) != 0);
+}
+EXPORT_SYMBOL(lustre_swab_obd_statfs);
+
+void lustre_swab_obd_ioobj(struct obd_ioobj *ioo)
+{
+ lustre_swab_ost_id(&ioo->ioo_oid);
+ __swab32s(&ioo->ioo_max_brw);
+ __swab32s(&ioo->ioo_bufcnt);
+}
+EXPORT_SYMBOL(lustre_swab_obd_ioobj);
+
+void lustre_swab_niobuf_remote (struct niobuf_remote *nbr)
+{
+ __swab64s (&nbr->offset);
+ __swab32s (&nbr->len);
+ __swab32s (&nbr->flags);
+}
+EXPORT_SYMBOL(lustre_swab_niobuf_remote);
+
+void lustre_swab_ost_body (struct ost_body *b)
+{
+ lustre_swab_obdo (&b->oa);
+}
+EXPORT_SYMBOL(lustre_swab_ost_body);
+
+void lustre_swab_ost_last_id(obd_id *id)
+{
+ __swab64s(id);
+}
+EXPORT_SYMBOL(lustre_swab_ost_last_id);
+
+void lustre_swab_generic_32s(__u32 *val)
+{
+ __swab32s(val);
+}
+EXPORT_SYMBOL(lustre_swab_generic_32s);
+
+void lustre_swab_gl_desc(union ldlm_gl_desc *desc)
+{
+ lustre_swab_lu_fid(&desc->lquota_desc.gl_id.qid_fid);
+ __swab64s(&desc->lquota_desc.gl_flags);
+ __swab64s(&desc->lquota_desc.gl_ver);
+ __swab64s(&desc->lquota_desc.gl_hardlimit);
+ __swab64s(&desc->lquota_desc.gl_softlimit);
+ __swab64s(&desc->lquota_desc.gl_time);
+ CLASSERT(offsetof(typeof(desc->lquota_desc), gl_pad2) != 0);
+}
+
+void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb)
+{
+ __swab64s(&lvb->lvb_size);
+ __swab64s(&lvb->lvb_mtime);
+ __swab64s(&lvb->lvb_atime);
+ __swab64s(&lvb->lvb_ctime);
+ __swab64s(&lvb->lvb_blocks);
+}
+EXPORT_SYMBOL(lustre_swab_ost_lvb_v1);
+
+void lustre_swab_ost_lvb(struct ost_lvb *lvb)
+{
+ __swab64s(&lvb->lvb_size);
+ __swab64s(&lvb->lvb_mtime);
+ __swab64s(&lvb->lvb_atime);
+ __swab64s(&lvb->lvb_ctime);
+ __swab64s(&lvb->lvb_blocks);
+ __swab32s(&lvb->lvb_mtime_ns);
+ __swab32s(&lvb->lvb_atime_ns);
+ __swab32s(&lvb->lvb_ctime_ns);
+ __swab32s(&lvb->lvb_padding);
+}
+EXPORT_SYMBOL(lustre_swab_ost_lvb);
+
+void lustre_swab_lquota_lvb(struct lquota_lvb *lvb)
+{
+ __swab64s(&lvb->lvb_flags);
+ __swab64s(&lvb->lvb_id_may_rel);
+ __swab64s(&lvb->lvb_id_rel);
+ __swab64s(&lvb->lvb_id_qunit);
+ __swab64s(&lvb->lvb_pad1);
+}
+EXPORT_SYMBOL(lustre_swab_lquota_lvb);
+
+void lustre_swab_mdt_body (struct mdt_body *b)
+{
+ lustre_swab_lu_fid (&b->fid1);
+ lustre_swab_lu_fid (&b->fid2);
+ /* handle is opaque */
+ __swab64s (&b->valid);
+ __swab64s (&b->size);
+ __swab64s (&b->mtime);
+ __swab64s (&b->atime);
+ __swab64s (&b->ctime);
+ __swab64s (&b->blocks);
+ __swab64s (&b->ioepoch);
+ CLASSERT(offsetof(typeof(*b), unused1) != 0);
+ __swab32s (&b->fsuid);
+ __swab32s (&b->fsgid);
+ __swab32s (&b->capability);
+ __swab32s (&b->mode);
+ __swab32s (&b->uid);
+ __swab32s (&b->gid);
+ __swab32s (&b->flags);
+ __swab32s (&b->rdev);
+ __swab32s (&b->nlink);
+ CLASSERT(offsetof(typeof(*b), unused2) != 0);
+ __swab32s (&b->suppgid);
+ __swab32s (&b->eadatasize);
+ __swab32s (&b->aclsize);
+ __swab32s (&b->max_mdsize);
+ __swab32s (&b->max_cookiesize);
+ __swab32s (&b->uid_h);
+ __swab32s (&b->gid_h);
+ CLASSERT(offsetof(typeof(*b), padding_5) != 0);
+}
+EXPORT_SYMBOL(lustre_swab_mdt_body);
+
+void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b)
+{
+ /* handle is opaque */
+ __swab64s (&b->ioepoch);
+ __swab32s (&b->flags);
+ CLASSERT(offsetof(typeof(*b), padding) != 0);
+}
+EXPORT_SYMBOL(lustre_swab_mdt_ioepoch);
+
+void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
+{
+ int i;
+ __swab32s(&mti->mti_lustre_ver);
+ __swab32s(&mti->mti_stripe_index);
+ __swab32s(&mti->mti_config_ver);
+ __swab32s(&mti->mti_flags);
+ __swab32s(&mti->mti_instance);
+ __swab32s(&mti->mti_nid_count);
+ CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
+ for (i = 0; i < MTI_NIDS_MAX; i++)
+ __swab64s(&mti->mti_nids[i]);
+}
+EXPORT_SYMBOL(lustre_swab_mgs_target_info);
+
+void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
+{
+ int i;
+
+ __swab64s(&entry->mne_version);
+ __swab32s(&entry->mne_instance);
+ __swab32s(&entry->mne_index);
+ __swab32s(&entry->mne_length);
+
+ /* mne_nid_(count|type) must be one byte size because we're gonna
+ * access it w/o swapping. */
+ CLASSERT(sizeof(entry->mne_nid_count) == sizeof(__u8));
+ CLASSERT(sizeof(entry->mne_nid_type) == sizeof(__u8));
+
+ /* remove this assertion if ipv6 is supported. */
+ LASSERT(entry->mne_nid_type == 0);
+ for (i = 0; i < entry->mne_nid_count; i++) {
+ CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
+ __swab64s(&entry->u.nids[i]);
+ }
+}
+EXPORT_SYMBOL(lustre_swab_mgs_nidtbl_entry);
+
+void lustre_swab_mgs_config_body(struct mgs_config_body *body)
+{
+ __swab64s(&body->mcb_offset);
+ __swab32s(&body->mcb_units);
+ __swab16s(&body->mcb_type);
+}
+EXPORT_SYMBOL(lustre_swab_mgs_config_body);
+
+void lustre_swab_mgs_config_res(struct mgs_config_res *body)
+{
+ __swab64s(&body->mcr_offset);
+ __swab64s(&body->mcr_size);
+}
+EXPORT_SYMBOL(lustre_swab_mgs_config_res);
+
+static void lustre_swab_obd_dqinfo (struct obd_dqinfo *i)
+{
+ __swab64s (&i->dqi_bgrace);
+ __swab64s (&i->dqi_igrace);
+ __swab32s (&i->dqi_flags);
+ __swab32s (&i->dqi_valid);
+}
+
+static void lustre_swab_obd_dqblk (struct obd_dqblk *b)
+{
+ __swab64s (&b->dqb_ihardlimit);
+ __swab64s (&b->dqb_isoftlimit);
+ __swab64s (&b->dqb_curinodes);
+ __swab64s (&b->dqb_bhardlimit);
+ __swab64s (&b->dqb_bsoftlimit);
+ __swab64s (&b->dqb_curspace);
+ __swab64s (&b->dqb_btime);
+ __swab64s (&b->dqb_itime);
+ __swab32s (&b->dqb_valid);
+ CLASSERT(offsetof(typeof(*b), dqb_padding) != 0);
+}
+
+void lustre_swab_obd_quotactl (struct obd_quotactl *q)
+{
+ __swab32s (&q->qc_cmd);
+ __swab32s (&q->qc_type);
+ __swab32s (&q->qc_id);
+ __swab32s (&q->qc_stat);
+ lustre_swab_obd_dqinfo (&q->qc_dqinfo);
+ lustre_swab_obd_dqblk (&q->qc_dqblk);
+}
+EXPORT_SYMBOL(lustre_swab_obd_quotactl);
+
+void lustre_swab_mdt_remote_perm (struct mdt_remote_perm *p)
+{
+ __swab32s (&p->rp_uid);
+ __swab32s (&p->rp_gid);
+ __swab32s (&p->rp_fsuid);
+ __swab32s (&p->rp_fsuid_h);
+ __swab32s (&p->rp_fsgid);
+ __swab32s (&p->rp_fsgid_h);
+ __swab32s (&p->rp_access_perm);
+ __swab32s (&p->rp_padding);
+};
+EXPORT_SYMBOL(lustre_swab_mdt_remote_perm);
+
+void lustre_swab_fid2path(struct getinfo_fid2path *gf)
+{
+ lustre_swab_lu_fid(&gf->gf_fid);
+ __swab64s(&gf->gf_recno);
+ __swab32s(&gf->gf_linkno);
+ __swab32s(&gf->gf_pathlen);
+}
+EXPORT_SYMBOL(lustre_swab_fid2path);
+
+void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
+{
+ __swab64s(&fm_extent->fe_logical);
+ __swab64s(&fm_extent->fe_physical);
+ __swab64s(&fm_extent->fe_length);
+ __swab32s(&fm_extent->fe_flags);
+ __swab32s(&fm_extent->fe_device);
+}
+
+void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
+{
+ int i;
+
+ __swab64s(&fiemap->fm_start);
+ __swab64s(&fiemap->fm_length);
+ __swab32s(&fiemap->fm_flags);
+ __swab32s(&fiemap->fm_mapped_extents);
+ __swab32s(&fiemap->fm_extent_count);
+ __swab32s(&fiemap->fm_reserved);
+
+ for (i = 0; i < fiemap->fm_mapped_extents; i++)
+ lustre_swab_fiemap_extent(&fiemap->fm_extents[i]);
+}
+EXPORT_SYMBOL(lustre_swab_fiemap);
+
+void lustre_swab_idx_info(struct idx_info *ii)
+{
+ __swab32s(&ii->ii_magic);
+ __swab32s(&ii->ii_flags);
+ __swab16s(&ii->ii_count);
+ __swab32s(&ii->ii_attrs);
+ lustre_swab_lu_fid(&ii->ii_fid);
+ __swab64s(&ii->ii_version);
+ __swab64s(&ii->ii_hash_start);
+ __swab64s(&ii->ii_hash_end);
+ __swab16s(&ii->ii_keysize);
+ __swab16s(&ii->ii_recsize);
+}
+
+void lustre_swab_lip_header(struct lu_idxpage *lip)
+{
+ /* swab header */
+ __swab32s(&lip->lip_magic);
+ __swab16s(&lip->lip_flags);
+ __swab16s(&lip->lip_nr);
+}
+EXPORT_SYMBOL(lustre_swab_lip_header);
+
+void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
+{
+ __swab32s(&rr->rr_opcode);
+ __swab32s(&rr->rr_cap);
+ __swab32s(&rr->rr_fsuid);
+ /* rr_fsuid_h is unused */
+ __swab32s(&rr->rr_fsgid);
+ /* rr_fsgid_h is unused */
+ __swab32s(&rr->rr_suppgid1);
+ /* rr_suppgid1_h is unused */
+ __swab32s(&rr->rr_suppgid2);
+ /* rr_suppgid2_h is unused */
+ lustre_swab_lu_fid(&rr->rr_fid1);
+ lustre_swab_lu_fid(&rr->rr_fid2);
+ __swab64s(&rr->rr_mtime);
+ __swab64s(&rr->rr_atime);
+ __swab64s(&rr->rr_ctime);
+ __swab64s(&rr->rr_size);
+ __swab64s(&rr->rr_blocks);
+ __swab32s(&rr->rr_bias);
+ __swab32s(&rr->rr_mode);
+ __swab32s(&rr->rr_flags);
+ __swab32s(&rr->rr_flags_h);
+ __swab32s(&rr->rr_umask);
+
+ CLASSERT(offsetof(typeof(*rr), rr_padding_4) != 0);
+};
+EXPORT_SYMBOL(lustre_swab_mdt_rec_reint);
+
+void lustre_swab_lov_desc (struct lov_desc *ld)
+{
+ __swab32s (&ld->ld_tgt_count);
+ __swab32s (&ld->ld_active_tgt_count);
+ __swab32s (&ld->ld_default_stripe_count);
+ __swab32s (&ld->ld_pattern);
+ __swab64s (&ld->ld_default_stripe_size);
+ __swab64s (&ld->ld_default_stripe_offset);
+ __swab32s (&ld->ld_qos_maxage);
+ /* uuid endian insensitive */
+}
+EXPORT_SYMBOL(lustre_swab_lov_desc);
+
+void lustre_swab_lmv_desc (struct lmv_desc *ld)
+{
+ __swab32s (&ld->ld_tgt_count);
+ __swab32s (&ld->ld_active_tgt_count);
+ __swab32s (&ld->ld_default_stripe_count);
+ __swab32s (&ld->ld_pattern);
+ __swab64s (&ld->ld_default_hash_size);
+ __swab32s (&ld->ld_qos_maxage);
+ /* uuid endian insensitive */
+}
+
+void lustre_swab_lmv_stripe_md (struct lmv_stripe_md *mea)
+{
+ __swab32s(&mea->mea_magic);
+ __swab32s(&mea->mea_count);
+ __swab32s(&mea->mea_master);
+ CLASSERT(offsetof(typeof(*mea), mea_padding) != 0);
+}
+
+void lustre_swab_lmv_user_md(struct lmv_user_md *lum)
+{
+ int i;
+
+ __swab32s(&lum->lum_magic);
+ __swab32s(&lum->lum_stripe_count);
+ __swab32s(&lum->lum_stripe_offset);
+ __swab32s(&lum->lum_hash_type);
+ __swab32s(&lum->lum_type);
+ CLASSERT(offsetof(typeof(*lum), lum_padding1) != 0);
+ CLASSERT(offsetof(typeof(*lum), lum_padding2) != 0);
+ CLASSERT(offsetof(typeof(*lum), lum_padding3) != 0);
+
+ for (i = 0; i < lum->lum_stripe_count; i++) {
+ __swab32s(&lum->lum_objects[i].lum_mds);
+ lustre_swab_lu_fid(&lum->lum_objects[i].lum_fid);
+ }
+
+}
+EXPORT_SYMBOL(lustre_swab_lmv_user_md);
+
+static void print_lum (struct lov_user_md *lum)
+{
+ CDEBUG(D_OTHER, "lov_user_md %p:\n", lum);
+ CDEBUG(D_OTHER, "\tlmm_magic: %#x\n", lum->lmm_magic);
+ CDEBUG(D_OTHER, "\tlmm_pattern: %#x\n", lum->lmm_pattern);
+ CDEBUG(D_OTHER, "\tlmm_object_id: "LPU64"\n", lmm_oi_id(&lum->lmm_oi));
+ CDEBUG(D_OTHER, "\tlmm_object_gr: "LPU64"\n", lmm_oi_seq(&lum->lmm_oi));
+ CDEBUG(D_OTHER, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
+ CDEBUG(D_OTHER, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
+ CDEBUG(D_OTHER, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
+ lum->lmm_stripe_offset);
+}
+
+static void lustre_swab_lmm_oi(struct ost_id *oi)
+{
+ __swab64s(&oi->oi.oi_id);
+ __swab64s(&oi->oi.oi_seq);
+}
+
+static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
+{
+ __swab32s(&lum->lmm_magic);
+ __swab32s(&lum->lmm_pattern);
+ lustre_swab_lmm_oi(&lum->lmm_oi);
+ __swab32s(&lum->lmm_stripe_size);
+ __swab16s(&lum->lmm_stripe_count);
+ __swab16s(&lum->lmm_stripe_offset);
+ print_lum(lum);
+}
+
+void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
+{
+ CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
+ lustre_swab_lov_user_md_common(lum);
+}
+EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
+
+void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
+{
+ CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
+ lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
+ /* lmm_pool_name nothing to do with char */
+}
+EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
+
+void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
+{
+ CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
+ __swab32s(&lmm->lmm_magic);
+ __swab32s(&lmm->lmm_pattern);
+ lustre_swab_lmm_oi(&lmm->lmm_oi);
+ __swab32s(&lmm->lmm_stripe_size);
+ __swab16s(&lmm->lmm_stripe_count);
+ __swab16s(&lmm->lmm_layout_gen);
+}
+EXPORT_SYMBOL(lustre_swab_lov_mds_md);
+
+void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
+ int stripe_count)
+{
+ int i;
+
+ for (i = 0; i < stripe_count; i++) {
+ lustre_swab_ost_id(&(lod[i].l_ost_oi));
+ __swab32s(&(lod[i].l_ost_gen));
+ __swab32s(&(lod[i].l_ost_idx));
+ }
+}
+EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
+
+void lustre_swab_ldlm_res_id (struct ldlm_res_id *id)
+{
+ int i;
+
+ for (i = 0; i < RES_NAME_SIZE; i++)
+ __swab64s (&id->name[i]);
+}
+EXPORT_SYMBOL(lustre_swab_ldlm_res_id);
+
+void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d)
+{
+ /* the lock data is a union and the first two fields are always an
+ * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
+ * data the same way. */
+ __swab64s(&d->l_extent.start);
+ __swab64s(&d->l_extent.end);
+ __swab64s(&d->l_extent.gid);
+ __swab64s(&d->l_flock.lfw_owner);
+ __swab32s(&d->l_flock.lfw_pid);
+}
+EXPORT_SYMBOL(lustre_swab_ldlm_policy_data);
+
+void lustre_swab_ldlm_intent (struct ldlm_intent *i)
+{
+ __swab64s (&i->opc);
+}
+EXPORT_SYMBOL(lustre_swab_ldlm_intent);
+
+void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r)
+{
+ __swab32s (&r->lr_type);
+ CLASSERT(offsetof(typeof(*r), lr_padding) != 0);
+ lustre_swab_ldlm_res_id (&r->lr_name);
+}
+EXPORT_SYMBOL(lustre_swab_ldlm_resource_desc);
+
+void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l)
+{
+ lustre_swab_ldlm_resource_desc (&l->l_resource);
+ __swab32s (&l->l_req_mode);
+ __swab32s (&l->l_granted_mode);
+ lustre_swab_ldlm_policy_data (&l->l_policy_data);
+}
+EXPORT_SYMBOL(lustre_swab_ldlm_lock_desc);
+
+void lustre_swab_ldlm_request (struct ldlm_request *rq)
+{
+ __swab32s (&rq->lock_flags);
+ lustre_swab_ldlm_lock_desc (&rq->lock_desc);
+ __swab32s (&rq->lock_count);
+ /* lock_handle[] opaque */
+}
+EXPORT_SYMBOL(lustre_swab_ldlm_request);
+
+void lustre_swab_ldlm_reply (struct ldlm_reply *r)
+{
+ __swab32s (&r->lock_flags);
+ CLASSERT(offsetof(typeof(*r), lock_padding) != 0);
+ lustre_swab_ldlm_lock_desc (&r->lock_desc);
+ /* lock_handle opaque */
+ __swab64s (&r->lock_policy_res1);
+ __swab64s (&r->lock_policy_res2);
+}
+EXPORT_SYMBOL(lustre_swab_ldlm_reply);
+
+void lustre_swab_quota_body(struct quota_body *b)
+{
+ lustre_swab_lu_fid(&b->qb_fid);
+ lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
+ __swab32s(&b->qb_flags);
+ __swab64s(&b->qb_count);
+ __swab64s(&b->qb_usage);
+ __swab64s(&b->qb_slv_ver);
+}
+
+/* Dump functions */
+void dump_ioo(struct obd_ioobj *ioo)
+{
+ CDEBUG(D_RPCTRACE,
+ "obd_ioobj: ioo_oid="DOSTID", ioo_max_brw=%#x, "
+ "ioo_bufct=%d\n", POSTID(&ioo->ioo_oid), ioo->ioo_max_brw,
+ ioo->ioo_bufcnt);
+}
+EXPORT_SYMBOL(dump_ioo);
+
+void dump_rniobuf(struct niobuf_remote *nb)
+{
+ CDEBUG(D_RPCTRACE, "niobuf_remote: offset="LPU64", len=%d, flags=%x\n",
+ nb->offset, nb->len, nb->flags);
+}
+EXPORT_SYMBOL(dump_rniobuf);
+
+void dump_obdo(struct obdo *oa)
+{
+ __u32 valid = oa->o_valid;
+
+ CDEBUG(D_RPCTRACE, "obdo: o_valid = %08x\n", valid);
+ if (valid & OBD_MD_FLID)
+ CDEBUG(D_RPCTRACE, "obdo: id = "DOSTID"\n", POSTID(&oa->o_oi));
+ if (valid & OBD_MD_FLFID)
+ CDEBUG(D_RPCTRACE, "obdo: o_parent_seq = "LPX64"\n",
+ oa->o_parent_seq);
+ if (valid & OBD_MD_FLSIZE)
+ CDEBUG(D_RPCTRACE, "obdo: o_size = "LPD64"\n", oa->o_size);
+ if (valid & OBD_MD_FLMTIME)
+ CDEBUG(D_RPCTRACE, "obdo: o_mtime = "LPD64"\n", oa->o_mtime);
+ if (valid & OBD_MD_FLATIME)
+ CDEBUG(D_RPCTRACE, "obdo: o_atime = "LPD64"\n", oa->o_atime);
+ if (valid & OBD_MD_FLCTIME)
+ CDEBUG(D_RPCTRACE, "obdo: o_ctime = "LPD64"\n", oa->o_ctime);
+ if (valid & OBD_MD_FLBLOCKS) /* allocation of space */
+ CDEBUG(D_RPCTRACE, "obdo: o_blocks = "LPD64"\n", oa->o_blocks);
+ if (valid & OBD_MD_FLGRANT)
+ CDEBUG(D_RPCTRACE, "obdo: o_grant = "LPD64"\n", oa->o_grant);
+ if (valid & OBD_MD_FLBLKSZ)
+ CDEBUG(D_RPCTRACE, "obdo: o_blksize = %d\n", oa->o_blksize);
+ if (valid & (OBD_MD_FLTYPE | OBD_MD_FLMODE))
+ CDEBUG(D_RPCTRACE, "obdo: o_mode = %o\n",
+ oa->o_mode & ((valid & OBD_MD_FLTYPE ? S_IFMT : 0) |
+ (valid & OBD_MD_FLMODE ? ~S_IFMT : 0)));
+ if (valid & OBD_MD_FLUID)
+ CDEBUG(D_RPCTRACE, "obdo: o_uid = %u\n", oa->o_uid);
+ if (valid & OBD_MD_FLUID)
+ CDEBUG(D_RPCTRACE, "obdo: o_uid_h = %u\n", oa->o_uid_h);
+ if (valid & OBD_MD_FLGID)
+ CDEBUG(D_RPCTRACE, "obdo: o_gid = %u\n", oa->o_gid);
+ if (valid & OBD_MD_FLGID)
+ CDEBUG(D_RPCTRACE, "obdo: o_gid_h = %u\n", oa->o_gid_h);
+ if (valid & OBD_MD_FLFLAGS)
+ CDEBUG(D_RPCTRACE, "obdo: o_flags = %x\n", oa->o_flags);
+ if (valid & OBD_MD_FLNLINK)
+ CDEBUG(D_RPCTRACE, "obdo: o_nlink = %u\n", oa->o_nlink);
+ else if (valid & OBD_MD_FLCKSUM)
+ CDEBUG(D_RPCTRACE, "obdo: o_checksum (o_nlink) = %u\n",
+ oa->o_nlink);
+ if (valid & OBD_MD_FLGENER)
+ CDEBUG(D_RPCTRACE, "obdo: o_parent_oid = %x\n",
+ oa->o_parent_oid);
+ if (valid & OBD_MD_FLEPOCH)
+ CDEBUG(D_RPCTRACE, "obdo: o_ioepoch = "LPD64"\n",
+ oa->o_ioepoch);
+ if (valid & OBD_MD_FLFID) {
+ CDEBUG(D_RPCTRACE, "obdo: o_stripe_idx = %u\n",
+ oa->o_stripe_idx);
+ CDEBUG(D_RPCTRACE, "obdo: o_parent_ver = %x\n",
+ oa->o_parent_ver);
+ }
+ if (valid & OBD_MD_FLHANDLE)
+ CDEBUG(D_RPCTRACE, "obdo: o_handle = "LPD64"\n",
+ oa->o_handle.cookie);
+ if (valid & OBD_MD_FLCOOKIE)
+ CDEBUG(D_RPCTRACE, "obdo: o_lcookie = "
+ "(llog_cookie dumping not yet implemented)\n");
+}
+EXPORT_SYMBOL(dump_obdo);
+
+void dump_ost_body(struct ost_body *ob)
+{
+ dump_obdo(&ob->oa);
+}
+EXPORT_SYMBOL(dump_ost_body);
+
+void dump_rcs(__u32 *rc)
+{
+ CDEBUG(D_RPCTRACE, "rmf_rcs: %d\n", *rc);
+}
+EXPORT_SYMBOL(dump_rcs);
+
+static inline int req_ptlrpc_body_swabbed(struct ptlrpc_request *req)
+{
+ LASSERT(req->rq_reqmsg);
+
+ switch (req->rq_reqmsg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_req_swabbed(req, MSG_PTLRPC_BODY_OFF);
+ default:
+ CERROR("bad lustre msg magic: %#08X\n",
+ req->rq_reqmsg->lm_magic);
+ }
+ return 0;
+}
+
+static inline int rep_ptlrpc_body_swabbed(struct ptlrpc_request *req)
+{
+ LASSERT(req->rq_repmsg);
+
+ switch (req->rq_repmsg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ return lustre_rep_swabbed(req, MSG_PTLRPC_BODY_OFF);
+ default:
+ /* uninitialized yet */
+ return 0;
+ }
+}
+
+void _debug_req(struct ptlrpc_request *req,
+ struct libcfs_debug_msg_data *msgdata,
+ const char *fmt, ... )
+{
+ int req_ok = req->rq_reqmsg != NULL;
+ int rep_ok = req->rq_repmsg != NULL;
+ lnet_nid_t nid = LNET_NID_ANY;
+ va_list args;
+
+ if (ptlrpc_req_need_swab(req)) {
+ req_ok = req_ok && req_ptlrpc_body_swabbed(req);
+ rep_ok = rep_ok && rep_ptlrpc_body_swabbed(req);
+ }
+
+ if (req->rq_import && req->rq_import->imp_connection)
+ nid = req->rq_import->imp_connection->c_peer.nid;
+ else if (req->rq_export && req->rq_export->exp_connection)
+ nid = req->rq_export->exp_connection->c_peer.nid;
+
+ va_start(args, fmt);
+ libcfs_debug_vmsg2(msgdata, fmt, args,
+ " req@%p x"LPU64"/t"LPD64"("LPD64") o%d->%s@%s:%d/%d"
+ " lens %d/%d e %d to %d dl "CFS_TIME_T" ref %d "
+ "fl "REQ_FLAGS_FMT"/%x/%x rc %d/%d\n",
+ req, req->rq_xid, req->rq_transno,
+ req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
+ req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
+ req->rq_import ?
+ req->rq_import->imp_obd->obd_name :
+ req->rq_export ?
+ req->rq_export->exp_client_uuid.uuid :
+ "<?>",
+ libcfs_nid2str(nid),
+ req->rq_request_portal, req->rq_reply_portal,
+ req->rq_reqlen, req->rq_replen,
+ req->rq_early_count, req->rq_timedout,
+ req->rq_deadline,
+ atomic_read(&req->rq_refcount),
+ DEBUG_REQ_FLAGS(req),
+ req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
+ rep_ok ? lustre_msg_get_flags(req->rq_repmsg) : -1,
+ req->rq_status,
+ rep_ok ? lustre_msg_get_status(req->rq_repmsg) : -1);
+ va_end(args);
+}
+EXPORT_SYMBOL(_debug_req);
+
+void lustre_swab_lustre_capa(struct lustre_capa *c)
+{
+ lustre_swab_lu_fid(&c->lc_fid);
+ __swab64s (&c->lc_opc);
+ __swab64s (&c->lc_uid);
+ __swab64s (&c->lc_gid);
+ __swab32s (&c->lc_flags);
+ __swab32s (&c->lc_keyid);
+ __swab32s (&c->lc_timeout);
+ __swab32s (&c->lc_expiry);
+}
+EXPORT_SYMBOL(lustre_swab_lustre_capa);
+
+void lustre_swab_lustre_capa_key(struct lustre_capa_key *k)
+{
+ __swab64s (&k->lk_seq);
+ __swab32s (&k->lk_keyid);
+ CLASSERT(offsetof(typeof(*k), lk_padding) != 0);
+}
+EXPORT_SYMBOL(lustre_swab_lustre_capa_key);
+
+void lustre_swab_hsm_user_state(struct hsm_user_state *state)
+{
+ __swab32s(&state->hus_states);
+ __swab32s(&state->hus_archive_id);
+}
+EXPORT_SYMBOL(lustre_swab_hsm_user_state);
+
+void lustre_swab_hsm_state_set(struct hsm_state_set *hss)
+{
+ __swab32s(&hss->hss_valid);
+ __swab64s(&hss->hss_setmask);
+ __swab64s(&hss->hss_clearmask);
+ __swab32s(&hss->hss_archive_id);
+}
+EXPORT_SYMBOL(lustre_swab_hsm_state_set);
+
+void lustre_swab_hsm_extent(struct hsm_extent *extent)
+{
+ __swab64s(&extent->offset);
+ __swab64s(&extent->length);
+}
+
+void lustre_swab_hsm_current_action(struct hsm_current_action *action)
+{
+ __swab32s(&action->hca_state);
+ __swab32s(&action->hca_action);
+ lustre_swab_hsm_extent(&action->hca_location);
+}
+EXPORT_SYMBOL(lustre_swab_hsm_current_action);
+
+void lustre_swab_hsm_user_item(struct hsm_user_item *hui)
+{
+ lustre_swab_lu_fid(&hui->hui_fid);
+ lustre_swab_hsm_extent(&hui->hui_extent);
+}
+EXPORT_SYMBOL(lustre_swab_hsm_user_item);
+
+void lustre_swab_layout_intent(struct layout_intent *li)
+{
+ __swab32s(&li->li_opc);
+ __swab32s(&li->li_flags);
+ __swab64s(&li->li_start);
+ __swab64s(&li->li_end);
+}
+EXPORT_SYMBOL(lustre_swab_layout_intent);
+
+void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk)
+{
+ lustre_swab_lu_fid(&hpk->hpk_fid);
+ __swab64s(&hpk->hpk_cookie);
+ __swab64s(&hpk->hpk_extent.offset);
+ __swab64s(&hpk->hpk_extent.length);
+ __swab16s(&hpk->hpk_flags);
+ __swab16s(&hpk->hpk_errval);
+}
+EXPORT_SYMBOL(lustre_swab_hsm_progress_kernel);
+
+void lustre_swab_hsm_request(struct hsm_request *hr)
+{
+ __swab32s(&hr->hr_action);
+ __swab32s(&hr->hr_archive_id);
+ __swab64s(&hr->hr_flags);
+ __swab32s(&hr->hr_itemcount);
+ __swab32s(&hr->hr_data_len);
+}
+EXPORT_SYMBOL(lustre_swab_hsm_request);
+
+void lustre_swab_update_buf(struct update_buf *ub)
+{
+ __swab32s(&ub->ub_magic);
+ __swab32s(&ub->ub_count);
+}
+EXPORT_SYMBOL(lustre_swab_update_buf);
+
+void lustre_swab_update_reply_buf(struct update_reply *ur)
+{
+ int i;
+
+ __swab32s(&ur->ur_version);
+ __swab32s(&ur->ur_count);
+ for (i = 0; i < ur->ur_count; i++)
+ __swab32s(&ur->ur_lens[i]);
+}
+EXPORT_SYMBOL(lustre_swab_update_reply_buf);
+
+void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
+{
+ __swab64s(&msl->msl_flags);
+}
+EXPORT_SYMBOL(lustre_swab_swap_layouts);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
new file mode 100644
index 0000000..d926d2b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -0,0 +1,75 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_lib.h>
+#include <lustre_ha.h>
+#include <lustre_import.h>
+
+#include "ptlrpc_internal.h"
+
+
+void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
+ int mdidx)
+{
+ CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON);
+
+ LASSERT(mdidx < desc->bd_md_max_brw);
+ LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
+ LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV |
+ LNET_MD_PHYS)));
+
+ md->options |= LNET_MD_KIOV;
+ md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
+ md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
+ if (desc->bd_enc_iov)
+ md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV];
+ else
+ md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV];
+}
+
+void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
+ int pageoffset, int len)
+{
+ lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
+
+ kiov->kiov_page = page;
+ kiov->kiov_offset = pageoffset;
+ kiov->kiov_len = len;
+
+ desc->bd_iov_count++;
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
new file mode 100644
index 0000000..5dec771
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -0,0 +1,747 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/pinger.c
+ *
+ * Portal-RPC reconnection and replay operations, for use in recovery.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include "ptlrpc_internal.h"
+
+static int suppress_pings;
+CFS_MODULE_PARM(suppress_pings, "i", int, 0644, "Suppress pings");
+
+struct mutex pinger_mutex;
+static LIST_HEAD(pinger_imports);
+static struct list_head timeout_list = LIST_HEAD_INIT(timeout_list);
+
+int ptlrpc_pinger_suppress_pings(void)
+{
+ return suppress_pings;
+}
+EXPORT_SYMBOL(ptlrpc_pinger_suppress_pings);
+
+struct ptlrpc_request *
+ptlrpc_prep_ping(struct obd_import *imp)
+{
+ struct ptlrpc_request *req;
+
+ req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING,
+ LUSTRE_OBD_VERSION, OBD_PING);
+ if (req) {
+ ptlrpc_request_set_replen(req);
+ req->rq_no_resend = req->rq_no_delay = 1;
+ }
+ return req;
+}
+
+int ptlrpc_obd_ping(struct obd_device *obd)
+{
+ int rc;
+ struct ptlrpc_request *req;
+
+ req = ptlrpc_prep_ping(obd->u.cli.cl_import);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->rq_send_state = LUSTRE_IMP_FULL;
+
+ rc = ptlrpc_queue_wait(req);
+
+ ptlrpc_req_finished(req);
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_obd_ping);
+
+int ptlrpc_ping(struct obd_import *imp)
+{
+ struct ptlrpc_request *req;
+
+ req = ptlrpc_prep_ping(imp);
+ if (req == NULL) {
+ CERROR("OOM trying to ping %s->%s\n",
+ imp->imp_obd->obd_uuid.uuid,
+ obd2cli_tgt(imp->imp_obd));
+ return -ENOMEM;
+ }
+
+ DEBUG_REQ(D_INFO, req, "pinging %s->%s",
+ imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
+ ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
+
+ return 0;
+}
+
+void ptlrpc_update_next_ping(struct obd_import *imp, int soon)
+{
+ int time = soon ? PING_INTERVAL_SHORT : PING_INTERVAL;
+ if (imp->imp_state == LUSTRE_IMP_DISCON) {
+ int dtime = max_t(int, CONNECTION_SWITCH_MIN,
+ AT_OFF ? 0 :
+ at_get(&imp->imp_at.iat_net_latency));
+ time = min(time, dtime);
+ }
+ imp->imp_next_ping = cfs_time_shift(time);
+}
+
+void ptlrpc_ping_import_soon(struct obd_import *imp)
+{
+ imp->imp_next_ping = cfs_time_current();
+}
+
+static inline int imp_is_deactive(struct obd_import *imp)
+{
+ return (imp->imp_deactive ||
+ OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_IMP_DEACTIVE));
+}
+
+static inline int ptlrpc_next_reconnect(struct obd_import *imp)
+{
+ if (imp->imp_server_timeout)
+ return cfs_time_shift(obd_timeout / 2);
+ else
+ return cfs_time_shift(obd_timeout);
+}
+
+static atomic_t suspend_timeouts = ATOMIC_INIT(0);
+static cfs_time_t suspend_wakeup_time = 0;
+
+cfs_duration_t pinger_check_timeout(cfs_time_t time)
+{
+ struct timeout_item *item;
+ cfs_time_t timeout = PING_INTERVAL;
+
+ /* The timeout list is a increase order sorted list */
+ mutex_lock(&pinger_mutex);
+ list_for_each_entry(item, &timeout_list, ti_chain) {
+ int ti_timeout = item->ti_timeout;
+ if (timeout > ti_timeout)
+ timeout = ti_timeout;
+ break;
+ }
+ mutex_unlock(&pinger_mutex);
+
+ return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
+ cfs_time_current());
+}
+
+static wait_queue_head_t suspend_timeouts_waitq;
+
+cfs_time_t ptlrpc_suspend_wakeup_time(void)
+{
+ return suspend_wakeup_time;
+}
+
+void ptlrpc_deactivate_timeouts(struct obd_import *imp)
+{
+ /*XXX: disabled for now, will be replaced by adaptive timeouts */
+#if 0
+ if (imp->imp_no_timeout)
+ return;
+ imp->imp_no_timeout = 1;
+ atomic_inc(&suspend_timeouts);
+ CDEBUG(D_HA|D_WARNING, "deactivate timeouts %u\n",
+ atomic_read(&suspend_timeouts));
+#endif
+}
+
+void ptlrpc_activate_timeouts(struct obd_import *imp)
+{
+ /*XXX: disabled for now, will be replaced by adaptive timeouts */
+#if 0
+ if (!imp->imp_no_timeout)
+ return;
+ imp->imp_no_timeout = 0;
+ LASSERT(atomic_read(&suspend_timeouts) > 0);
+ if (atomic_dec_and_test(&suspend_timeouts)) {
+ suspend_wakeup_time = cfs_time_current();
+ wake_up(&suspend_timeouts_waitq);
+ }
+ CDEBUG(D_HA|D_WARNING, "activate timeouts %u\n",
+ atomic_read(&suspend_timeouts));
+#endif
+}
+
+int ptlrpc_check_suspend(void)
+{
+ if (atomic_read(&suspend_timeouts))
+ return 1;
+ return 0;
+}
+
+int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req)
+{
+ struct l_wait_info lwi;
+
+ if (atomic_read(&suspend_timeouts)) {
+ DEBUG_REQ(D_NET, req, "-- suspend %d regular timeout",
+ atomic_read(&suspend_timeouts));
+ lwi = LWI_INTR(NULL, NULL);
+ l_wait_event(suspend_timeouts_waitq,
+ atomic_read(&suspend_timeouts) == 0, &lwi);
+ DEBUG_REQ(D_NET, req, "-- recharge regular timeout");
+ return 1;
+ }
+ return 0;
+}
+
+
+static bool ir_up;
+
+void ptlrpc_pinger_ir_up(void)
+{
+ CDEBUG(D_HA, "IR up\n");
+ ir_up = true;
+}
+EXPORT_SYMBOL(ptlrpc_pinger_ir_up);
+
+void ptlrpc_pinger_ir_down(void)
+{
+ CDEBUG(D_HA, "IR down\n");
+ ir_up = false;
+}
+EXPORT_SYMBOL(ptlrpc_pinger_ir_down);
+
+static void ptlrpc_pinger_process_import(struct obd_import *imp,
+ unsigned long this_ping)
+{
+ int level;
+ int force;
+ int force_next;
+ int suppress;
+
+ spin_lock(&imp->imp_lock);
+
+ level = imp->imp_state;
+ force = imp->imp_force_verify;
+ force_next = imp->imp_force_next_verify;
+ /*
+ * This will be used below only if the import is "FULL".
+ */
+ suppress = ir_up && OCD_HAS_FLAG(&imp->imp_connect_data, PINGLESS);
+
+ imp->imp_force_verify = 0;
+
+ if (cfs_time_aftereq(imp->imp_next_ping - 5 * CFS_TICK, this_ping) &&
+ !force) {
+ spin_unlock(&imp->imp_lock);
+ return;
+ }
+
+ imp->imp_force_next_verify = 0;
+
+ spin_unlock(&imp->imp_lock);
+
+ CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA, "%s->%s: level %s/%u "
+ "force %u force_next %u deactive %u pingable %u suppress %u\n",
+ imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
+ ptlrpc_import_state_name(level), level, force, force_next,
+ imp->imp_deactive, imp->imp_pingable, suppress);
+
+ if (level == LUSTRE_IMP_DISCON && !imp_is_deactive(imp)) {
+ /* wait for a while before trying recovery again */
+ imp->imp_next_ping = ptlrpc_next_reconnect(imp);
+ if (!imp->imp_no_pinger_recover)
+ ptlrpc_initiate_recovery(imp);
+ } else if (level != LUSTRE_IMP_FULL ||
+ imp->imp_obd->obd_no_recov ||
+ imp_is_deactive(imp)) {
+ CDEBUG(D_HA, "%s->%s: not pinging (in recovery "
+ "or recovery disabled: %s)\n",
+ imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd),
+ ptlrpc_import_state_name(level));
+ } else if ((imp->imp_pingable && !suppress) || force_next || force) {
+ ptlrpc_ping(imp);
+ }
+}
+
+static int ptlrpc_pinger_main(void *arg)
+{
+ struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
+
+ /* Record that the thread is running */
+ thread_set_flags(thread, SVC_RUNNING);
+ wake_up(&thread->t_ctl_waitq);
+
+ /* And now, loop forever, pinging as needed. */
+ while (1) {
+ cfs_time_t this_ping = cfs_time_current();
+ struct l_wait_info lwi;
+ cfs_duration_t time_to_next_wake;
+ struct timeout_item *item;
+ struct list_head *iter;
+
+ mutex_lock(&pinger_mutex);
+ list_for_each_entry(item, &timeout_list, ti_chain) {
+ item->ti_cb(item, item->ti_cb_data);
+ }
+ list_for_each(iter, &pinger_imports) {
+ struct obd_import *imp =
+ list_entry(iter, struct obd_import,
+ imp_pinger_chain);
+
+ ptlrpc_pinger_process_import(imp, this_ping);
+ /* obd_timeout might have changed */
+ if (imp->imp_pingable && imp->imp_next_ping &&
+ cfs_time_after(imp->imp_next_ping,
+ cfs_time_add(this_ping,
+ cfs_time_seconds(PING_INTERVAL))))
+ ptlrpc_update_next_ping(imp, 0);
+ }
+ mutex_unlock(&pinger_mutex);
+ /* update memory usage info */
+ obd_update_maxusage();
+
+ /* Wait until the next ping time, or until we're stopped. */
+ time_to_next_wake = pinger_check_timeout(this_ping);
+ /* The ping sent by ptlrpc_send_rpc may get sent out
+ say .01 second after this.
+ ptlrpc_pinger_sending_on_import will then set the
+ next ping time to next_ping + .01 sec, which means
+ we will SKIP the next ping at next_ping, and the
+ ping will get sent 2 timeouts from now! Beware. */
+ CDEBUG(D_INFO, "next wakeup in "CFS_DURATION_T" ("
+ CFS_TIME_T")\n", time_to_next_wake,
+ cfs_time_add(this_ping,cfs_time_seconds(PING_INTERVAL)));
+ if (time_to_next_wake > 0) {
+ lwi = LWI_TIMEOUT(max_t(cfs_duration_t,
+ time_to_next_wake,
+ cfs_time_seconds(1)),
+ NULL, NULL);
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopping(thread) ||
+ thread_is_event(thread),
+ &lwi);
+ if (thread_test_and_clear_flags(thread, SVC_STOPPING)) {
+ break;
+ } else {
+ /* woken after adding import to reset timer */
+ thread_test_and_clear_flags(thread, SVC_EVENT);
+ }
+ }
+ }
+
+ thread_set_flags(thread, SVC_STOPPED);
+ wake_up(&thread->t_ctl_waitq);
+
+ CDEBUG(D_NET, "pinger thread exiting, process %d\n", current_pid());
+ return 0;
+}
+
+static struct ptlrpc_thread pinger_thread;
+
+int ptlrpc_start_pinger(void)
+{
+ struct l_wait_info lwi = { 0 };
+ int rc;
+
+ if (!thread_is_init(&pinger_thread) &&
+ !thread_is_stopped(&pinger_thread))
+ return -EALREADY;
+
+ init_waitqueue_head(&pinger_thread.t_ctl_waitq);
+ init_waitqueue_head(&suspend_timeouts_waitq);
+
+ strcpy(pinger_thread.t_name, "ll_ping");
+
+ /* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
+ * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
+ rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
+ "%s", pinger_thread.t_name));
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("cannot start thread: %d\n", rc);
+ return rc;
+ }
+ l_wait_event(pinger_thread.t_ctl_waitq,
+ thread_is_running(&pinger_thread), &lwi);
+
+ if (suppress_pings)
+ CWARN("Pings will be suppressed at the request of the "
+ "administrator. The configuration shall meet the "
+ "additional requirements described in the manual. "
+ "(Search for the \"suppress_pings\" kernel module "
+ "parameter.)\n");
+
+ return 0;
+}
+
+int ptlrpc_pinger_remove_timeouts(void);
+
+int ptlrpc_stop_pinger(void)
+{
+ struct l_wait_info lwi = { 0 };
+ int rc = 0;
+
+ if (!thread_is_init(&pinger_thread) &&
+ !thread_is_stopped(&pinger_thread))
+ return -EALREADY;
+
+ ptlrpc_pinger_remove_timeouts();
+ thread_set_flags(&pinger_thread, SVC_STOPPING);
+ wake_up(&pinger_thread.t_ctl_waitq);
+
+ l_wait_event(pinger_thread.t_ctl_waitq,
+ thread_is_stopped(&pinger_thread), &lwi);
+
+ return rc;
+}
+
+void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
+{
+ ptlrpc_update_next_ping(imp, 0);
+}
+EXPORT_SYMBOL(ptlrpc_pinger_sending_on_import);
+
+void ptlrpc_pinger_commit_expected(struct obd_import *imp)
+{
+ ptlrpc_update_next_ping(imp, 1);
+ LASSERT(spin_is_locked(&imp->imp_lock));
+ /*
+ * Avoid reading stale imp_connect_data. When not sure if pings are
+ * expected or not on next connection, we assume they are not and force
+ * one anyway to guarantee the chance of updating
+ * imp_peer_committed_transno.
+ */
+ if (imp->imp_state != LUSTRE_IMP_FULL ||
+ OCD_HAS_FLAG(&imp->imp_connect_data, PINGLESS))
+ imp->imp_force_next_verify = 1;
+}
+
+int ptlrpc_pinger_add_import(struct obd_import *imp)
+{
+ if (!list_empty(&imp->imp_pinger_chain))
+ return -EALREADY;
+
+ mutex_lock(&pinger_mutex);
+ CDEBUG(D_HA, "adding pingable import %s->%s\n",
+ imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
+ /* if we add to pinger we want recovery on this import */
+ imp->imp_obd->obd_no_recov = 0;
+ ptlrpc_update_next_ping(imp, 0);
+ /* XXX sort, blah blah */
+ list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
+ class_import_get(imp);
+
+ ptlrpc_pinger_wake_up();
+ mutex_unlock(&pinger_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_pinger_add_import);
+
+int ptlrpc_pinger_del_import(struct obd_import *imp)
+{
+ if (list_empty(&imp->imp_pinger_chain))
+ return -ENOENT;
+
+ mutex_lock(&pinger_mutex);
+ list_del_init(&imp->imp_pinger_chain);
+ CDEBUG(D_HA, "removing pingable import %s->%s\n",
+ imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
+ /* if we remove from pinger we don't want recovery on this import */
+ imp->imp_obd->obd_no_recov = 1;
+ class_import_put(imp);
+ mutex_unlock(&pinger_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_pinger_del_import);
+
+/**
+ * Register a timeout callback to the pinger list, and the callback will
+ * be called when timeout happens.
+ */
+struct timeout_item* ptlrpc_new_timeout(int time, enum timeout_event event,
+ timeout_cb_t cb, void *data)
+{
+ struct timeout_item *ti;
+
+ OBD_ALLOC_PTR(ti);
+ if (!ti)
+ return(NULL);
+
+ INIT_LIST_HEAD(&ti->ti_obd_list);
+ INIT_LIST_HEAD(&ti->ti_chain);
+ ti->ti_timeout = time;
+ ti->ti_event = event;
+ ti->ti_cb = cb;
+ ti->ti_cb_data = data;
+
+ return ti;
+}
+
+/**
+ * Register timeout event on the the pinger thread.
+ * Note: the timeout list is an sorted list with increased timeout value.
+ */
+static struct timeout_item*
+ptlrpc_pinger_register_timeout(int time, enum timeout_event event,
+ timeout_cb_t cb, void *data)
+{
+ struct timeout_item *item, *tmp;
+
+ LASSERT(mutex_is_locked(&pinger_mutex));
+
+ list_for_each_entry(item, &timeout_list, ti_chain)
+ if (item->ti_event == event)
+ goto out;
+
+ item = ptlrpc_new_timeout(time, event, cb, data);
+ if (item) {
+ list_for_each_entry_reverse(tmp, &timeout_list, ti_chain) {
+ if (tmp->ti_timeout < time) {
+ list_add(&item->ti_chain, &tmp->ti_chain);
+ goto out;
+ }
+ }
+ list_add(&item->ti_chain, &timeout_list);
+ }
+out:
+ return item;
+}
+
+/* Add a client_obd to the timeout event list, when timeout(@time)
+ * happens, the callback(@cb) will be called.
+ */
+int ptlrpc_add_timeout_client(int time, enum timeout_event event,
+ timeout_cb_t cb, void *data,
+ struct list_head *obd_list)
+{
+ struct timeout_item *ti;
+
+ mutex_lock(&pinger_mutex);
+ ti = ptlrpc_pinger_register_timeout(time, event, cb, data);
+ if (!ti) {
+ mutex_unlock(&pinger_mutex);
+ return (-EINVAL);
+ }
+ list_add(obd_list, &ti->ti_obd_list);
+ mutex_unlock(&pinger_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_add_timeout_client);
+
+int ptlrpc_del_timeout_client(struct list_head *obd_list,
+ enum timeout_event event)
+{
+ struct timeout_item *ti = NULL, *item;
+
+ if (list_empty(obd_list))
+ return 0;
+ mutex_lock(&pinger_mutex);
+ list_del_init(obd_list);
+ /**
+ * If there are no obd attached to the timeout event
+ * list, remove this timeout event from the pinger
+ */
+ list_for_each_entry(item, &timeout_list, ti_chain) {
+ if (item->ti_event == event) {
+ ti = item;
+ break;
+ }
+ }
+ LASSERTF(ti != NULL, "ti is NULL ! \n");
+ if (list_empty(&ti->ti_obd_list)) {
+ list_del(&ti->ti_chain);
+ OBD_FREE_PTR(ti);
+ }
+ mutex_unlock(&pinger_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_del_timeout_client);
+
+int ptlrpc_pinger_remove_timeouts(void)
+{
+ struct timeout_item *item, *tmp;
+
+ mutex_lock(&pinger_mutex);
+ list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
+ LASSERT(list_empty(&item->ti_obd_list));
+ list_del(&item->ti_chain);
+ OBD_FREE_PTR(item);
+ }
+ mutex_unlock(&pinger_mutex);
+ return 0;
+}
+
+void ptlrpc_pinger_wake_up(void)
+{
+ thread_add_flags(&pinger_thread, SVC_EVENT);
+ wake_up(&pinger_thread.t_ctl_waitq);
+}
+
+/* Ping evictor thread */
+#define PET_READY 1
+#define PET_TERMINATE 2
+
+static int pet_refcount = 0;
+static int pet_state;
+static wait_queue_head_t pet_waitq;
+LIST_HEAD(pet_list);
+static DEFINE_SPINLOCK(pet_lock);
+
+int ping_evictor_wake(struct obd_export *exp)
+{
+ struct obd_device *obd;
+
+ spin_lock(&pet_lock);
+ if (pet_state != PET_READY) {
+ /* eventually the new obd will call here again. */
+ spin_unlock(&pet_lock);
+ return 1;
+ }
+
+ obd = class_exp2obd(exp);
+ if (list_empty(&obd->obd_evict_list)) {
+ class_incref(obd, "evictor", obd);
+ list_add(&obd->obd_evict_list, &pet_list);
+ }
+ spin_unlock(&pet_lock);
+
+ wake_up(&pet_waitq);
+ return 0;
+}
+
+static int ping_evictor_main(void *arg)
+{
+ struct obd_device *obd;
+ struct obd_export *exp;
+ struct l_wait_info lwi = { 0 };
+ time_t expire_time;
+
+ unshare_fs_struct();
+
+ CDEBUG(D_HA, "Starting Ping Evictor\n");
+ pet_state = PET_READY;
+ while (1) {
+ l_wait_event(pet_waitq, (!list_empty(&pet_list)) ||
+ (pet_state == PET_TERMINATE), &lwi);
+
+ /* loop until all obd's will be removed */
+ if ((pet_state == PET_TERMINATE) && list_empty(&pet_list))
+ break;
+
+ /* we only get here if pet_exp != NULL, and the end of this
+ * loop is the only place which sets it NULL again, so lock
+ * is not strictly necessary. */
+ spin_lock(&pet_lock);
+ obd = list_entry(pet_list.next, struct obd_device,
+ obd_evict_list);
+ spin_unlock(&pet_lock);
+
+ expire_time = cfs_time_current_sec() - PING_EVICT_TIMEOUT;
+
+ CDEBUG(D_HA, "evicting all exports of obd %s older than %ld\n",
+ obd->obd_name, expire_time);
+
+ /* Exports can't be deleted out of the list while we hold
+ * the obd lock (class_unlink_export), which means we can't
+ * lose the last ref on the export. If they've already been
+ * removed from the list, we won't find them here. */
+ spin_lock(&obd->obd_dev_lock);
+ while (!list_empty(&obd->obd_exports_timed)) {
+ exp = list_entry(obd->obd_exports_timed.next,
+ struct obd_export,
+ exp_obd_chain_timed);
+ if (expire_time > exp->exp_last_request_time) {
+ class_export_get(exp);
+ spin_unlock(&obd->obd_dev_lock);
+ LCONSOLE_WARN("%s: haven't heard from client %s"
+ " (at %s) in %ld seconds. I think"
+ " it's dead, and I am evicting"
+ " it. exp %p, cur %ld expire %ld"
+ " last %ld\n",
+ obd->obd_name,
+ obd_uuid2str(&exp->exp_client_uuid),
+ obd_export_nid2str(exp),
+ (long)(cfs_time_current_sec() -
+ exp->exp_last_request_time),
+ exp, (long)cfs_time_current_sec(),
+ (long)expire_time,
+ (long)exp->exp_last_request_time);
+ CDEBUG(D_HA, "Last request was at %ld\n",
+ exp->exp_last_request_time);
+ class_fail_export(exp);
+ class_export_put(exp);
+ spin_lock(&obd->obd_dev_lock);
+ } else {
+ /* List is sorted, so everyone below is ok */
+ break;
+ }
+ }
+ spin_unlock(&obd->obd_dev_lock);
+
+ spin_lock(&pet_lock);
+ list_del_init(&obd->obd_evict_list);
+ spin_unlock(&pet_lock);
+
+ class_decref(obd, "evictor", obd);
+ }
+ CDEBUG(D_HA, "Exiting Ping Evictor\n");
+
+ return 0;
+}
+
+void ping_evictor_start(void)
+{
+ struct task_struct *task;
+
+ if (++pet_refcount > 1)
+ return;
+
+ init_waitqueue_head(&pet_waitq);
+
+ task = kthread_run(ping_evictor_main, NULL, "ll_evictor");
+ if (IS_ERR(task)) {
+ pet_refcount--;
+ CERROR("Cannot start ping evictor thread: %ld\n",
+ PTR_ERR(task));
+ }
+}
+EXPORT_SYMBOL(ping_evictor_start);
+
+void ping_evictor_stop(void)
+{
+ if (--pet_refcount > 0)
+ return;
+
+ pet_state = PET_TERMINATE;
+ wake_up(&pet_waitq);
+}
+EXPORT_SYMBOL(ping_evictor_stop);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
new file mode 100644
index 0000000..ab36347
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -0,0 +1,302 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+/* Intramodule declarations for ptlrpc. */
+
+#ifndef PTLRPC_INTERNAL_H
+#define PTLRPC_INTERNAL_H
+
+#include "../ldlm/ldlm_internal.h"
+
+struct ldlm_namespace;
+struct obd_import;
+struct ldlm_res_id;
+struct ptlrpc_request_set;
+extern int test_req_buffer_pressure;
+extern struct mutex ptlrpc_all_services_mutex;
+
+int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait);
+/* ptlrpcd.c */
+int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc);
+
+/* client.c */
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
+ unsigned type, unsigned portal);
+void ptlrpc_init_xid(void);
+
+/* events.c */
+int ptlrpc_init_portals(void);
+void ptlrpc_exit_portals(void);
+
+void ptlrpc_request_handle_notconn(struct ptlrpc_request *);
+void lustre_assert_wire_constants(void);
+int ptlrpc_import_in_recovery(struct obd_import *imp);
+int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt);
+void ptlrpc_handle_failed_import(struct obd_import *imp);
+int ptlrpc_replay_next(struct obd_import *imp, int *inflight);
+void ptlrpc_initiate_recovery(struct obd_import *imp);
+
+int lustre_unpack_req_ptlrpc_body(struct ptlrpc_request *req, int offset);
+int lustre_unpack_rep_ptlrpc_body(struct ptlrpc_request *req, int offset);
+
+#ifdef LPROCFS
+void ptlrpc_lprocfs_register_service(struct proc_dir_entry *proc_entry,
+ struct ptlrpc_service *svc);
+void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc);
+void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount);
+void ptlrpc_lprocfs_do_request_stat (struct ptlrpc_request *req,
+ long q_usec, long work_usec);
+#else
+#define ptlrpc_lprocfs_register_service(params...) do{}while(0)
+#define ptlrpc_lprocfs_unregister_service(params...) do{}while(0)
+#define ptlrpc_lprocfs_rpc_sent(params...) do{}while(0)
+#define ptlrpc_lprocfs_do_request_stat(params...) do{}while(0)
+#endif /* LPROCFS */
+
+/* NRS */
+
+/**
+ * NRS core object.
+ *
+ * Holds NRS core fields.
+ */
+struct nrs_core {
+ /**
+ * Protects nrs_core::nrs_policies, serializes external policy
+ * registration/unregistration, and NRS core lprocfs operations.
+ */
+ struct mutex nrs_mutex;
+ /* XXX: This is just for liblustre. Remove the #if defined directive
+ * when the * "cfs_" prefix is dropped from cfs_list_head. */
+ /**
+ * List of all policy descriptors registered with NRS core; protected
+ * by nrs_core::nrs_mutex.
+ */
+ struct list_head nrs_policies;
+
+};
+
+int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc);
+void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc);
+
+void ptlrpc_nrs_req_initialize(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req, bool hp);
+void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req);
+void ptlrpc_nrs_req_stop_nolock(struct ptlrpc_request *req);
+void ptlrpc_nrs_req_add(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req, bool hp);
+
+struct ptlrpc_request *
+ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp,
+ bool peek, bool force);
+
+static inline struct ptlrpc_request *
+ptlrpc_nrs_req_get_nolock(struct ptlrpc_service_part *svcpt, bool hp,
+ bool force)
+{
+ return ptlrpc_nrs_req_get_nolock0(svcpt, hp, false, force);
+}
+
+static inline struct ptlrpc_request *
+ptlrpc_nrs_req_peek_nolock(struct ptlrpc_service_part *svcpt, bool hp)
+{
+ return ptlrpc_nrs_req_get_nolock0(svcpt, hp, true, false);
+}
+
+void ptlrpc_nrs_req_del_nolock(struct ptlrpc_request *req);
+bool ptlrpc_nrs_req_pending_nolock(struct ptlrpc_service_part *svcpt, bool hp);
+
+int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc,
+ enum ptlrpc_nrs_queue_type queue, char *name,
+ enum ptlrpc_nrs_ctl opc, bool single, void *arg);
+
+int ptlrpc_nrs_init(void);
+void ptlrpc_nrs_fini(void);
+
+static inline bool nrs_svcpt_has_hp(const struct ptlrpc_service_part *svcpt)
+{
+ return svcpt->scp_nrs_hp != NULL;
+}
+
+static inline bool nrs_svc_has_hp(const struct ptlrpc_service *svc)
+{
+ /**
+ * If the first service partition has an HP NRS head, all service
+ * partitions will.
+ */
+ return nrs_svcpt_has_hp(svc->srv_parts[0]);
+}
+
+static inline
+struct ptlrpc_nrs *nrs_svcpt2nrs(struct ptlrpc_service_part *svcpt, bool hp)
+{
+ LASSERT(ergo(hp, nrs_svcpt_has_hp(svcpt)));
+ return hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg;
+}
+
+static inline int nrs_pol2cptid(const struct ptlrpc_nrs_policy *policy)
+{
+ return policy->pol_nrs->nrs_svcpt->scp_cpt;
+}
+
+static inline
+struct ptlrpc_service *nrs_pol2svc(struct ptlrpc_nrs_policy *policy)
+{
+ return policy->pol_nrs->nrs_svcpt->scp_service;
+}
+
+static inline
+struct ptlrpc_service_part *nrs_pol2svcpt(struct ptlrpc_nrs_policy *policy)
+{
+ return policy->pol_nrs->nrs_svcpt;
+}
+
+static inline
+struct cfs_cpt_table *nrs_pol2cptab(struct ptlrpc_nrs_policy *policy)
+{
+ return nrs_pol2svc(policy)->srv_cptable;
+}
+
+static inline struct ptlrpc_nrs_resource *
+nrs_request_resource(struct ptlrpc_nrs_request *nrq)
+{
+ LASSERT(nrq->nr_initialized);
+ LASSERT(!nrq->nr_finalized);
+
+ return nrq->nr_res_ptrs[nrq->nr_res_idx];
+}
+
+static inline
+struct ptlrpc_nrs_policy *nrs_request_policy(struct ptlrpc_nrs_request *nrq)
+{
+ return nrs_request_resource(nrq)->res_policy;
+}
+
+#define NRS_LPROCFS_QUANTUM_NAME_REG "reg_quantum:"
+#define NRS_LPROCFS_QUANTUM_NAME_HP "hp_quantum:"
+
+/**
+ * the maximum size of nrs_crrn_client::cc_quantum and nrs_orr_data::od_quantum.
+ */
+#define LPROCFS_NRS_QUANTUM_MAX 65535
+
+/**
+ * Max valid command string is the size of the labels, plus "65535" twice, plus
+ * a separating space character.
+ */
+#define LPROCFS_NRS_WR_QUANTUM_MAX_CMD \
+ sizeof(NRS_LPROCFS_QUANTUM_NAME_REG __stringify(LPROCFS_NRS_QUANTUM_MAX) " " \
+ NRS_LPROCFS_QUANTUM_NAME_HP __stringify(LPROCFS_NRS_QUANTUM_MAX))
+
+/* recovd_thread.c */
+
+int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink);
+
+/* pers.c */
+void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
+ int mdcnt);
+void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
+ int pageoffset, int len);
+
+/* pack_generic.c */
+struct ptlrpc_reply_state *
+lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt);
+void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs);
+
+/* pinger.c */
+int ptlrpc_start_pinger(void);
+int ptlrpc_stop_pinger(void);
+void ptlrpc_pinger_sending_on_import(struct obd_import *imp);
+void ptlrpc_pinger_commit_expected(struct obd_import *imp);
+void ptlrpc_pinger_wake_up(void);
+void ptlrpc_ping_import_soon(struct obd_import *imp);
+int ping_evictor_wake(struct obd_export *exp);
+
+/* sec_null.c */
+int sptlrpc_null_init(void);
+void sptlrpc_null_fini(void);
+
+/* sec_plain.c */
+int sptlrpc_plain_init(void);
+void sptlrpc_plain_fini(void);
+
+/* sec_bulk.c */
+int sptlrpc_enc_pool_init(void);
+void sptlrpc_enc_pool_fini(void);
+int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v);
+
+/* sec_lproc.c */
+int sptlrpc_lproc_init(void);
+void sptlrpc_lproc_fini(void);
+
+/* sec_gc.c */
+int sptlrpc_gc_init(void);
+void sptlrpc_gc_fini(void);
+
+/* sec_config.c */
+void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
+ enum lustre_sec_part to,
+ struct obd_uuid *target,
+ lnet_nid_t nid,
+ struct sptlrpc_flavor *sf);
+int sptlrpc_conf_init(void);
+void sptlrpc_conf_fini(void);
+
+/* sec.c */
+int sptlrpc_init(void);
+void sptlrpc_fini(void);
+
+static inline int ll_rpc_recoverable_error(int rc)
+{
+ return (rc == -ENOTCONN || rc == -ENODEV);
+}
+
+static inline int tgt_mod_init(void)
+{
+ return 0;
+}
+
+static inline void tgt_mod_exit(void)
+{
+ return;
+}
+
+static inline void ptlrpc_reqset_put(struct ptlrpc_request_set *set)
+{
+ if (atomic_dec_and_test(&set->set_refcount))
+ OBD_FREE_PTR(set);
+}
+#endif /* PTLRPC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
new file mode 100644
index 0000000..419e634
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -0,0 +1,155 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lustre_req_layout.h>
+
+#include "ptlrpc_internal.h"
+
+extern spinlock_t ptlrpc_last_xid_lock;
+#if RS_DEBUG
+extern spinlock_t ptlrpc_rs_debug_lock;
+#endif
+extern struct mutex pinger_mutex;
+extern struct mutex ptlrpcd_mutex;
+
+__init int ptlrpc_init(void)
+{
+ int rc, cleanup_phase = 0;
+
+ lustre_assert_wire_constants();
+#if RS_DEBUG
+ spin_lock_init(&ptlrpc_rs_debug_lock);
+#endif
+ mutex_init(&ptlrpc_all_services_mutex);
+ mutex_init(&pinger_mutex);
+ mutex_init(&ptlrpcd_mutex);
+ ptlrpc_init_xid();
+
+ rc = req_layout_init();
+ if (rc)
+ return rc;
+
+ rc = ptlrpc_hr_init();
+ if (rc)
+ return rc;
+
+ cleanup_phase = 1;
+
+ rc = ptlrpc_init_portals();
+ if (rc)
+ GOTO(cleanup, rc);
+ cleanup_phase = 2;
+
+ rc = ptlrpc_connection_init();
+ if (rc)
+ GOTO(cleanup, rc);
+ cleanup_phase = 3;
+
+ ptlrpc_put_connection_superhack = ptlrpc_connection_put;
+
+ rc = ptlrpc_start_pinger();
+ if (rc)
+ GOTO(cleanup, rc);
+ cleanup_phase = 4;
+
+ rc = ldlm_init();
+ if (rc)
+ GOTO(cleanup, rc);
+ cleanup_phase = 5;
+
+ rc = sptlrpc_init();
+ if (rc)
+ GOTO(cleanup, rc);
+
+ cleanup_phase = 7;
+ rc = ptlrpc_nrs_init();
+ if (rc)
+ GOTO(cleanup, rc);
+
+ cleanup_phase = 8;
+ rc = tgt_mod_init();
+ if (rc)
+ GOTO(cleanup, rc);
+ return 0;
+
+cleanup:
+ switch(cleanup_phase) {
+ case 8:
+ ptlrpc_nrs_fini();
+ case 7:
+ sptlrpc_fini();
+ case 5:
+ ldlm_exit();
+ case 4:
+ ptlrpc_stop_pinger();
+ case 3:
+ ptlrpc_connection_fini();
+ case 2:
+ ptlrpc_exit_portals();
+ case 1:
+ ptlrpc_hr_fini();
+ req_layout_fini();
+ default: ;
+ }
+
+ return rc;
+}
+
+static void __exit ptlrpc_exit(void)
+{
+ tgt_mod_exit();
+ ptlrpc_nrs_fini();
+ sptlrpc_fini();
+ ldlm_exit();
+ ptlrpc_stop_pinger();
+ ptlrpc_exit_portals();
+ ptlrpc_hr_fini();
+ ptlrpc_connection_fini();
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre Request Processor and Lock Management");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
+
+module_init(ptlrpc_init);
+module_exit(ptlrpc_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
new file mode 100644
index 0000000..89c9be9
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -0,0 +1,812 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/ptlrpcd.c
+ */
+
+/** \defgroup ptlrpcd PortalRPC daemon
+ *
+ * ptlrpcd is a special thread with its own set where other user might add
+ * requests when they don't want to wait for their completion.
+ * PtlRPCD will take care of sending such requests and then processing their
+ * replies and calling completion callbacks as necessary.
+ * The callbacks are called directly from ptlrpcd context.
+ * It is important to never significantly block (esp. on RPCs!) within such
+ * completion handler or a deadlock might occur where ptlrpcd enters some
+ * callback that attempts to send another RPC and wait for it to return,
+ * during which time ptlrpcd is completely blocked, so e.g. if import
+ * fails, recovery cannot progress because connection requests are also
+ * sent by ptlrpcd.
+ *
+ * @{
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+# include <linux/libcfs/libcfs.h>
+
+#include <lustre_net.h>
+# include <lustre_lib.h>
+
+#include <lustre_ha.h>
+#include <obd_class.h> /* for obd_zombie */
+#include <obd_support.h> /* for OBD_FAIL_CHECK */
+#include <cl_object.h> /* cl_env_{get,put}() */
+#include <lprocfs_status.h>
+
+#include "ptlrpc_internal.h"
+
+struct ptlrpcd {
+ int pd_size;
+ int pd_index;
+ int pd_nthreads;
+ struct ptlrpcd_ctl pd_thread_rcv;
+ struct ptlrpcd_ctl pd_threads[0];
+};
+
+static int max_ptlrpcds;
+CFS_MODULE_PARM(max_ptlrpcds, "i", int, 0644,
+ "Max ptlrpcd thread count to be started.");
+
+static int ptlrpcd_bind_policy = PDB_POLICY_PAIR;
+CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
+ "Ptlrpcd threads binding mode.");
+static struct ptlrpcd *ptlrpcds;
+
+struct mutex ptlrpcd_mutex;
+static int ptlrpcd_users = 0;
+
+void ptlrpcd_wake(struct ptlrpc_request *req)
+{
+ struct ptlrpc_request_set *rq_set = req->rq_set;
+
+ LASSERT(rq_set != NULL);
+
+ wake_up(&rq_set->set_waitq);
+}
+EXPORT_SYMBOL(ptlrpcd_wake);
+
+static struct ptlrpcd_ctl *
+ptlrpcd_select_pc(struct ptlrpc_request *req, pdl_policy_t policy, int index)
+{
+ int idx = 0;
+
+ if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
+ return &ptlrpcds->pd_thread_rcv;
+
+ switch (policy) {
+ case PDL_POLICY_SAME:
+ idx = smp_processor_id() % ptlrpcds->pd_nthreads;
+ break;
+ case PDL_POLICY_LOCAL:
+ /* Before CPU partition patches available, process it the same
+ * as "PDL_POLICY_ROUND". */
+# ifdef CFS_CPU_MODE_NUMA
+# warning "fix this code to use new CPU partition APIs"
+# endif
+ /* Fall through to PDL_POLICY_ROUND until the CPU
+ * CPU partition patches are available. */
+ index = -1;
+ case PDL_POLICY_PREFERRED:
+ if (index >= 0 && index < num_online_cpus()) {
+ idx = index % ptlrpcds->pd_nthreads;
+ break;
+ }
+ /* Fall through to PDL_POLICY_ROUND for bad index. */
+ default:
+ /* Fall through to PDL_POLICY_ROUND for unknown policy. */
+ case PDL_POLICY_ROUND:
+ /* We do not care whether it is strict load balance. */
+ idx = ptlrpcds->pd_index + 1;
+ if (idx == smp_processor_id())
+ idx++;
+ idx %= ptlrpcds->pd_nthreads;
+ ptlrpcds->pd_index = idx;
+ break;
+ }
+
+ return &ptlrpcds->pd_threads[idx];
+}
+
+/**
+ * Move all request from an existing request set to the ptlrpcd queue.
+ * All requests from the set must be in phase RQ_PHASE_NEW.
+ */
+void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
+{
+ struct list_head *tmp, *pos;
+ struct ptlrpcd_ctl *pc;
+ struct ptlrpc_request_set *new;
+ int count, i;
+
+ pc = ptlrpcd_select_pc(NULL, PDL_POLICY_LOCAL, -1);
+ new = pc->pc_set;
+
+ list_for_each_safe(pos, tmp, &set->set_requests) {
+ struct ptlrpc_request *req =
+ list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
+
+ LASSERT(req->rq_phase == RQ_PHASE_NEW);
+ req->rq_set = new;
+ req->rq_queued_time = cfs_time_current();
+ }
+
+ spin_lock(&new->set_new_req_lock);
+ list_splice_init(&set->set_requests, &new->set_new_requests);
+ i = atomic_read(&set->set_remaining);
+ count = atomic_add_return(i, &new->set_new_count);
+ atomic_set(&set->set_remaining, 0);
+ spin_unlock(&new->set_new_req_lock);
+ if (count == i) {
+ wake_up(&new->set_waitq);
+
+ /* XXX: It maybe unnecessary to wakeup all the partners. But to
+ * guarantee the async RPC can be processed ASAP, we have
+ * no other better choice. It maybe fixed in future. */
+ for (i = 0; i < pc->pc_npartners; i++)
+ wake_up(&pc->pc_partners[i]->pc_set->set_waitq);
+ }
+}
+EXPORT_SYMBOL(ptlrpcd_add_rqset);
+
+/**
+ * Return transferred RPCs count.
+ */
+static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
+ struct ptlrpc_request_set *src)
+{
+ struct list_head *tmp, *pos;
+ struct ptlrpc_request *req;
+ int rc = 0;
+
+ spin_lock(&src->set_new_req_lock);
+ if (likely(!list_empty(&src->set_new_requests))) {
+ list_for_each_safe(pos, tmp, &src->set_new_requests) {
+ req = list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
+ req->rq_set = des;
+ }
+ list_splice_init(&src->set_new_requests,
+ &des->set_requests);
+ rc = atomic_read(&src->set_new_count);
+ atomic_add(rc, &des->set_remaining);
+ atomic_set(&src->set_new_count, 0);
+ }
+ spin_unlock(&src->set_new_req_lock);
+ return rc;
+}
+
+/**
+ * Requests that are added to the ptlrpcd queue are sent via
+ * ptlrpcd_check->ptlrpc_check_set().
+ */
+void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
+{
+ struct ptlrpcd_ctl *pc;
+
+ if (req->rq_reqmsg)
+ lustre_msg_set_jobid(req->rq_reqmsg, NULL);
+
+ spin_lock(&req->rq_lock);
+ if (req->rq_invalid_rqset) {
+ struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
+ back_to_sleep, NULL);
+
+ req->rq_invalid_rqset = 0;
+ spin_unlock(&req->rq_lock);
+ l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
+ } else if (req->rq_set) {
+ /* If we have a vaid "rq_set", just reuse it to avoid double
+ * linked. */
+ LASSERT(req->rq_phase == RQ_PHASE_NEW);
+ LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
+
+ /* ptlrpc_check_set will decrease the count */
+ atomic_inc(&req->rq_set->set_remaining);
+ spin_unlock(&req->rq_lock);
+ wake_up(&req->rq_set->set_waitq);
+ return;
+ } else {
+ spin_unlock(&req->rq_lock);
+ }
+
+ pc = ptlrpcd_select_pc(req, policy, idx);
+
+ DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
+ req, pc->pc_name, pc->pc_index);
+
+ ptlrpc_set_add_new_req(pc, req);
+}
+EXPORT_SYMBOL(ptlrpcd_add_req);
+
+static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
+{
+ atomic_inc(&set->set_refcount);
+}
+
+/**
+ * Check if there is more work to do on ptlrpcd set.
+ * Returns 1 if yes.
+ */
+static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
+{
+ struct list_head *tmp, *pos;
+ struct ptlrpc_request *req;
+ struct ptlrpc_request_set *set = pc->pc_set;
+ int rc = 0;
+ int rc2;
+
+ if (atomic_read(&set->set_new_count)) {
+ spin_lock(&set->set_new_req_lock);
+ if (likely(!list_empty(&set->set_new_requests))) {
+ list_splice_init(&set->set_new_requests,
+ &set->set_requests);
+ atomic_add(atomic_read(&set->set_new_count),
+ &set->set_remaining);
+ atomic_set(&set->set_new_count, 0);
+ /*
+ * Need to calculate its timeout.
+ */
+ rc = 1;
+ }
+ spin_unlock(&set->set_new_req_lock);
+ }
+
+ /* We should call lu_env_refill() before handling new requests to make
+ * sure that env key the requests depending on really exists.
+ */
+ rc2 = lu_env_refill(env);
+ if (rc2 != 0) {
+ /*
+ * XXX This is very awkward situation, because
+ * execution can neither continue (request
+ * interpreters assume that env is set up), nor repeat
+ * the loop (as this potentially results in a tight
+ * loop of -ENOMEM's).
+ *
+ * Fortunately, refill only ever does something when
+ * new modules are loaded, i.e., early during boot up.
+ */
+ CERROR("Failure to refill session: %d\n", rc2);
+ return rc;
+ }
+
+ if (atomic_read(&set->set_remaining))
+ rc |= ptlrpc_check_set(env, set);
+
+ if (!list_empty(&set->set_requests)) {
+ /*
+ * XXX: our set never completes, so we prune the completed
+ * reqs after each iteration. boy could this be smarter.
+ */
+ list_for_each_safe(pos, tmp, &set->set_requests) {
+ req = list_entry(pos, struct ptlrpc_request,
+ rq_set_chain);
+ if (req->rq_phase != RQ_PHASE_COMPLETE)
+ continue;
+
+ list_del_init(&req->rq_set_chain);
+ req->rq_set = NULL;
+ ptlrpc_req_finished(req);
+ }
+ }
+
+ if (rc == 0) {
+ /*
+ * If new requests have been added, make sure to wake up.
+ */
+ rc = atomic_read(&set->set_new_count);
+
+ /* If we have nothing to do, check whether we can take some
+ * work from our partner threads. */
+ if (rc == 0 && pc->pc_npartners > 0) {
+ struct ptlrpcd_ctl *partner;
+ struct ptlrpc_request_set *ps;
+ int first = pc->pc_cursor;
+
+ do {
+ partner = pc->pc_partners[pc->pc_cursor++];
+ if (pc->pc_cursor >= pc->pc_npartners)
+ pc->pc_cursor = 0;
+ if (partner == NULL)
+ continue;
+
+ spin_lock(&partner->pc_lock);
+ ps = partner->pc_set;
+ if (ps == NULL) {
+ spin_unlock(&partner->pc_lock);
+ continue;
+ }
+
+ ptlrpc_reqset_get(ps);
+ spin_unlock(&partner->pc_lock);
+
+ if (atomic_read(&ps->set_new_count)) {
+ rc = ptlrpcd_steal_rqset(set, ps);
+ if (rc > 0)
+ CDEBUG(D_RPCTRACE, "transfer %d"
+ " async RPCs [%d->%d]\n",
+ rc, partner->pc_index,
+ pc->pc_index);
+ }
+ ptlrpc_reqset_put(ps);
+ } while (rc == 0 && pc->pc_cursor != first);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * Main ptlrpcd thread.
+ * ptlrpc's code paths like to execute in process context, so we have this
+ * thread which spins on a set which contains the rpcs and sends them.
+ *
+ */
+static int ptlrpcd(void *arg)
+{
+ struct ptlrpcd_ctl *pc = arg;
+ struct ptlrpc_request_set *set = pc->pc_set;
+ struct lu_env env = { .le_ses = NULL };
+ int rc, exit = 0;
+
+ unshare_fs_struct();
+#if defined(CONFIG_SMP)
+ if (test_bit(LIOD_BIND, &pc->pc_flags)) {
+ int index = pc->pc_index;
+
+ if (index >= 0 && index < num_possible_cpus()) {
+ while (!cpu_online(index)) {
+ if (++index >= num_possible_cpus())
+ index = 0;
+ }
+ set_cpus_allowed_ptr(current,
+ cpumask_of_node(cpu_to_node(index)));
+ }
+ }
+#endif
+ /*
+ * XXX So far only "client" ptlrpcd uses an environment. In
+ * the future, ptlrpcd thread (or a thread-set) has to given
+ * an argument, describing its "scope".
+ */
+ rc = lu_context_init(&env.le_ctx,
+ LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
+ complete(&pc->pc_starting);
+
+ if (rc != 0)
+ return rc;
+
+ /*
+ * This mainloop strongly resembles ptlrpc_set_wait() except that our
+ * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
+ * there are requests in the set. New requests come in on the set's
+ * new_req_list and ptlrpcd_check() moves them into the set.
+ */
+ do {
+ struct l_wait_info lwi;
+ int timeout;
+
+ timeout = ptlrpc_set_next_timeout(set);
+ lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
+ ptlrpc_expired_set, set);
+
+ lu_context_enter(&env.le_ctx);
+ l_wait_event(set->set_waitq,
+ ptlrpcd_check(&env, pc), &lwi);
+ lu_context_exit(&env.le_ctx);
+
+ /*
+ * Abort inflight rpcs for forced stop case.
+ */
+ if (test_bit(LIOD_STOP, &pc->pc_flags)) {
+ if (test_bit(LIOD_FORCE, &pc->pc_flags))
+ ptlrpc_abort_set(set);
+ exit++;
+ }
+
+ /*
+ * Let's make one more loop to make sure that ptlrpcd_check()
+ * copied all raced new rpcs into the set so we can kill them.
+ */
+ } while (exit < 2);
+
+ /*
+ * Wait for inflight requests to drain.
+ */
+ if (!list_empty(&set->set_requests))
+ ptlrpc_set_wait(set);
+ lu_context_fini(&env.le_ctx);
+
+ complete(&pc->pc_finishing);
+
+ return 0;
+}
+
+/* XXX: We want multiple CPU cores to share the async RPC load. So we start many
+ * ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
+ * data transfer cross-CPU cores. So we bind ptlrpcd thread to specified
+ * CPU core. But binding all ptlrpcd threads maybe cause response delay
+ * because of some CPU core(s) busy with other loads.
+ *
+ * For example: "ls -l", some async RPCs for statahead are assigned to
+ * ptlrpcd_0, and ptlrpcd_0 is bound to CPU_0, but CPU_0 may be quite busy
+ * with other non-ptlrpcd, like "ls -l" itself (we want to the "ls -l"
+ * thread, statahead thread, and ptlrpcd thread can run in parallel), under
+ * such case, the statahead async RPCs can not be processed in time, it is
+ * unexpected. If ptlrpcd_0 can be re-scheduled on other CPU core, it may
+ * be better. But it breaks former data transfer policy.
+ *
+ * So we shouldn't be blind for avoiding the data transfer. We make some
+ * compromise: divide the ptlrpcd threds pool into two parts. One part is
+ * for bound mode, each ptlrpcd thread in this part is bound to some CPU
+ * core. The other part is for free mode, all the ptlrpcd threads in the
+ * part can be scheduled on any CPU core. We specify some partnership
+ * between bound mode ptlrpcd thread(s) and free mode ptlrpcd thread(s),
+ * and the async RPC load within the partners are shared.
+ *
+ * It can partly avoid data transfer cross-CPU (if the bound mode ptlrpcd
+ * thread can be scheduled in time), and try to guarantee the async RPC
+ * processed ASAP (as long as the free mode ptlrpcd thread can be scheduled
+ * on any CPU core).
+ *
+ * As for how to specify the partnership between bound mode ptlrpcd
+ * thread(s) and free mode ptlrpcd thread(s), the simplest way is to use
+ * <free bound> pair. In future, we can specify some more complex
+ * partnership based on the patches for CPU partition. But before such
+ * patches are available, we prefer to use the simplest one.
+ */
+# ifdef CFS_CPU_MODE_NUMA
+# warning "fix ptlrpcd_bind() to use new CPU partition APIs"
+# endif
+static int ptlrpcd_bind(int index, int max)
+{
+ struct ptlrpcd_ctl *pc;
+ int rc = 0;
+#if defined(CONFIG_NUMA)
+ cpumask_t mask;
+#endif
+
+ LASSERT(index <= max - 1);
+ pc = &ptlrpcds->pd_threads[index];
+ switch (ptlrpcd_bind_policy) {
+ case PDB_POLICY_NONE:
+ pc->pc_npartners = -1;
+ break;
+ case PDB_POLICY_FULL:
+ pc->pc_npartners = 0;
+ set_bit(LIOD_BIND, &pc->pc_flags);
+ break;
+ case PDB_POLICY_PAIR:
+ LASSERT(max % 2 == 0);
+ pc->pc_npartners = 1;
+ break;
+ case PDB_POLICY_NEIGHBOR:
+#if defined(CONFIG_NUMA)
+ {
+ int i;
+ mask = *cpumask_of_node(cpu_to_node(index));
+ for (i = max; i < num_online_cpus(); i++)
+ cpu_clear(i, mask);
+ pc->pc_npartners = cpus_weight(mask) - 1;
+ set_bit(LIOD_BIND, &pc->pc_flags);
+ }
+#else
+ LASSERT(max >= 3);
+ pc->pc_npartners = 2;
+#endif
+ break;
+ default:
+ CERROR("unknown ptlrpcd bind policy %d\n", ptlrpcd_bind_policy);
+ rc = -EINVAL;
+ }
+
+ if (rc == 0 && pc->pc_npartners > 0) {
+ OBD_ALLOC(pc->pc_partners,
+ sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
+ if (pc->pc_partners == NULL) {
+ pc->pc_npartners = 0;
+ rc = -ENOMEM;
+ } else {
+ switch (ptlrpcd_bind_policy) {
+ case PDB_POLICY_PAIR:
+ if (index & 0x1) {
+ set_bit(LIOD_BIND, &pc->pc_flags);
+ pc->pc_partners[0] = &ptlrpcds->
+ pd_threads[index - 1];
+ ptlrpcds->pd_threads[index - 1].
+ pc_partners[0] = pc;
+ }
+ break;
+ case PDB_POLICY_NEIGHBOR:
+#if defined(CONFIG_NUMA)
+ {
+ struct ptlrpcd_ctl *ppc;
+ int i, pidx;
+ /* partners are cores in the same NUMA node.
+ * setup partnership only with ptlrpcd threads
+ * that are already initialized
+ */
+ for (pidx = 0, i = 0; i < index; i++) {
+ if (cpu_isset(i, mask)) {
+ ppc = &ptlrpcds->pd_threads[i];
+ pc->pc_partners[pidx++] = ppc;
+ ppc->pc_partners[ppc->
+ pc_npartners++] = pc;
+ }
+ }
+ /* adjust number of partners to the number
+ * of partnership really setup */
+ pc->pc_npartners = pidx;
+ }
+#else
+ if (index & 0x1)
+ set_bit(LIOD_BIND, &pc->pc_flags);
+ if (index > 0) {
+ pc->pc_partners[0] = &ptlrpcds->
+ pd_threads[index - 1];
+ ptlrpcds->pd_threads[index - 1].
+ pc_partners[1] = pc;
+ if (index == max - 1) {
+ pc->pc_partners[1] =
+ &ptlrpcds->pd_threads[0];
+ ptlrpcds->pd_threads[0].
+ pc_partners[0] = pc;
+ }
+ }
+#endif
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
+{
+ int rc;
+ int env = 0;
+
+ /*
+ * Do not allow start second thread for one pc.
+ */
+ if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
+ CWARN("Starting second thread (%s) for same pc %p\n",
+ name, pc);
+ return 0;
+ }
+
+ pc->pc_index = index;
+ init_completion(&pc->pc_starting);
+ init_completion(&pc->pc_finishing);
+ spin_lock_init(&pc->pc_lock);
+ strlcpy(pc->pc_name, name, sizeof(pc->pc_name));
+ pc->pc_set = ptlrpc_prep_set();
+ if (pc->pc_set == NULL)
+ GOTO(out, rc = -ENOMEM);
+ /*
+ * So far only "client" ptlrpcd uses an environment. In the future,
+ * ptlrpcd thread (or a thread-set) has to be given an argument,
+ * describing its "scope".
+ */
+ rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
+ if (rc != 0)
+ GOTO(out, rc);
+
+ env = 1;
+ {
+ struct task_struct *task;
+
+ if (index >= 0) {
+ rc = ptlrpcd_bind(index, max);
+ if (rc < 0)
+ GOTO(out, rc);
+ }
+
+ task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
+ if (IS_ERR(task))
+ GOTO(out, rc = PTR_ERR(task));
+
+ rc = 0;
+ wait_for_completion(&pc->pc_starting);
+ }
+out:
+ if (rc) {
+ if (pc->pc_set != NULL) {
+ struct ptlrpc_request_set *set = pc->pc_set;
+
+ spin_lock(&pc->pc_lock);
+ pc->pc_set = NULL;
+ spin_unlock(&pc->pc_lock);
+ ptlrpc_set_destroy(set);
+ }
+ if (env != 0)
+ lu_context_fini(&pc->pc_env.le_ctx);
+ clear_bit(LIOD_BIND, &pc->pc_flags);
+ clear_bit(LIOD_START, &pc->pc_flags);
+ }
+ return rc;
+}
+
+void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
+{
+ if (!test_bit(LIOD_START, &pc->pc_flags)) {
+ CWARN("Thread for pc %p was not started\n", pc);
+ return;
+ }
+
+ set_bit(LIOD_STOP, &pc->pc_flags);
+ if (force)
+ set_bit(LIOD_FORCE, &pc->pc_flags);
+ wake_up(&pc->pc_set->set_waitq);
+}
+
+void ptlrpcd_free(struct ptlrpcd_ctl *pc)
+{
+ struct ptlrpc_request_set *set = pc->pc_set;
+
+ if (!test_bit(LIOD_START, &pc->pc_flags)) {
+ CWARN("Thread for pc %p was not started\n", pc);
+ goto out;
+ }
+
+ wait_for_completion(&pc->pc_finishing);
+ lu_context_fini(&pc->pc_env.le_ctx);
+
+ spin_lock(&pc->pc_lock);
+ pc->pc_set = NULL;
+ spin_unlock(&pc->pc_lock);
+ ptlrpc_set_destroy(set);
+
+ clear_bit(LIOD_START, &pc->pc_flags);
+ clear_bit(LIOD_STOP, &pc->pc_flags);
+ clear_bit(LIOD_FORCE, &pc->pc_flags);
+ clear_bit(LIOD_BIND, &pc->pc_flags);
+
+out:
+ if (pc->pc_npartners > 0) {
+ LASSERT(pc->pc_partners != NULL);
+
+ OBD_FREE(pc->pc_partners,
+ sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners);
+ pc->pc_partners = NULL;
+ }
+ pc->pc_npartners = 0;
+}
+
+static void ptlrpcd_fini(void)
+{
+ int i;
+
+ if (ptlrpcds != NULL) {
+ for (i = 0; i < ptlrpcds->pd_nthreads; i++)
+ ptlrpcd_stop(&ptlrpcds->pd_threads[i], 0);
+ for (i = 0; i < ptlrpcds->pd_nthreads; i++)
+ ptlrpcd_free(&ptlrpcds->pd_threads[i]);
+ ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
+ ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
+ OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
+ ptlrpcds = NULL;
+ }
+}
+
+static int ptlrpcd_init(void)
+{
+ int nthreads = num_online_cpus();
+ char name[16];
+ int size, i = -1, j, rc = 0;
+
+ if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
+ nthreads = max_ptlrpcds;
+ if (nthreads < 2)
+ nthreads = 2;
+ if (nthreads < 3 && ptlrpcd_bind_policy == PDB_POLICY_NEIGHBOR)
+ ptlrpcd_bind_policy = PDB_POLICY_PAIR;
+ else if (nthreads % 2 != 0 && ptlrpcd_bind_policy == PDB_POLICY_PAIR)
+ nthreads &= ~1; /* make sure it is even */
+
+ size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
+ OBD_ALLOC(ptlrpcds, size);
+ if (ptlrpcds == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ snprintf(name, sizeof(name), "ptlrpcd_rcv");
+ set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
+ rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ /* XXX: We start nthreads ptlrpc daemons. Each of them can process any
+ * non-recovery async RPC to improve overall async RPC efficiency.
+ *
+ * But there are some issues with async I/O RPCs and async non-I/O
+ * RPCs processed in the same set under some cases. The ptlrpcd may
+ * be blocked by some async I/O RPC(s), then will cause other async
+ * non-I/O RPC(s) can not be processed in time.
+ *
+ * Maybe we should distinguish blocked async RPCs from non-blocked
+ * async RPCs, and process them in different ptlrpcd sets to avoid
+ * unnecessary dependency. But how to distribute async RPCs load
+ * among all the ptlrpc daemons becomes another trouble. */
+ for (i = 0; i < nthreads; i++) {
+ snprintf(name, sizeof(name), "ptlrpcd_%d", i);
+ rc = ptlrpcd_start(i, nthreads, name, &ptlrpcds->pd_threads[i]);
+ if (rc < 0)
+ GOTO(out, rc);
+ }
+
+ ptlrpcds->pd_size = size;
+ ptlrpcds->pd_index = 0;
+ ptlrpcds->pd_nthreads = nthreads;
+
+out:
+ if (rc != 0 && ptlrpcds != NULL) {
+ for (j = 0; j <= i; j++)
+ ptlrpcd_stop(&ptlrpcds->pd_threads[j], 0);
+ for (j = 0; j <= i; j++)
+ ptlrpcd_free(&ptlrpcds->pd_threads[j]);
+ ptlrpcd_stop(&ptlrpcds->pd_thread_rcv, 0);
+ ptlrpcd_free(&ptlrpcds->pd_thread_rcv);
+ OBD_FREE(ptlrpcds, size);
+ ptlrpcds = NULL;
+ }
+
+ return 0;
+}
+
+int ptlrpcd_addref(void)
+{
+ int rc = 0;
+
+ mutex_lock(&ptlrpcd_mutex);
+ if (++ptlrpcd_users == 1)
+ rc = ptlrpcd_init();
+ mutex_unlock(&ptlrpcd_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpcd_addref);
+
+void ptlrpcd_decref(void)
+{
+ mutex_lock(&ptlrpcd_mutex);
+ if (--ptlrpcd_users == 0)
+ ptlrpcd_fini();
+ mutex_unlock(&ptlrpcd_mutex);
+}
+EXPORT_SYMBOL(ptlrpcd_decref);
+/** @} ptlrpcd */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
new file mode 100644
index 0000000..84c39e0
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -0,0 +1,344 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/recover.c
+ *
+ * Author: Mike Shaver <shaver@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+# include <linux/libcfs/libcfs.h>
+
+#include <obd_support.h>
+#include <lustre_ha.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_export.h>
+#include <obd.h>
+#include <obd_ost.h>
+#include <obd_class.h>
+#include <obd_lov.h> /* for IOC_LOV_SET_OSC_ACTIVE */
+#include <linux/list.h>
+
+#include "ptlrpc_internal.h"
+
+/**
+ * Start recovery on disconnected import.
+ * This is done by just attempting a connect
+ */
+void ptlrpc_initiate_recovery(struct obd_import *imp)
+{
+ CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
+ ptlrpc_connect_import(imp);
+}
+
+/**
+ * Identify what request from replay list needs to be replayed next
+ * (based on what we have already replayed) and send it to server.
+ */
+int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
+{
+ int rc = 0;
+ struct list_head *tmp, *pos;
+ struct ptlrpc_request *req = NULL;
+ __u64 last_transno;
+
+ *inflight = 0;
+
+ /* It might have committed some after we last spoke, so make sure we
+ * get rid of them now.
+ */
+ spin_lock(&imp->imp_lock);
+ imp->imp_last_transno_checked = 0;
+ ptlrpc_free_committed(imp);
+ last_transno = imp->imp_last_replay_transno;
+ spin_unlock(&imp->imp_lock);
+
+ CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
+ imp, obd2cli_tgt(imp->imp_obd),
+ imp->imp_peer_committed_transno, last_transno);
+
+ /* Do I need to hold a lock across this iteration? We shouldn't be
+ * racing with any additions to the list, because we're in recovery
+ * and are therefore not processing additional requests to add. Calls
+ * to ptlrpc_free_committed might commit requests, but nothing "newer"
+ * than the one we're replaying (it can't be committed until it's
+ * replayed, and we're doing that here). l_f_e_safe protects against
+ * problems with the current request being committed, in the unlikely
+ * event of that race. So, in conclusion, I think that it's safe to
+ * perform this list-walk without the imp_lock held.
+ *
+ * But, the {mdc,osc}_replay_open callbacks both iterate
+ * request lists, and have comments saying they assume the
+ * imp_lock is being held by ptlrpc_replay, but it's not. it's
+ * just a little race...
+ */
+ list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_replay_list);
+
+ /* If need to resend the last sent transno (because a
+ reconnect has occurred), then stop on the matching
+ req and send it again. If, however, the last sent
+ transno has been committed then we continue replay
+ from the next request. */
+ if (req->rq_transno > last_transno) {
+ if (imp->imp_resend_replay)
+ lustre_msg_add_flags(req->rq_reqmsg,
+ MSG_RESENT);
+ break;
+ }
+ req = NULL;
+ }
+
+ spin_lock(&imp->imp_lock);
+ imp->imp_resend_replay = 0;
+ spin_unlock(&imp->imp_lock);
+
+ if (req != NULL) {
+ rc = ptlrpc_replay_req(req);
+ if (rc) {
+ CERROR("recovery replay error %d for req "
+ LPU64"\n", rc, req->rq_xid);
+ return rc;
+ }
+ *inflight = 1;
+ }
+ return rc;
+}
+
+/**
+ * Schedule resending of request on sending_list. This is done after
+ * we completed replaying of requests and locks.
+ */
+int ptlrpc_resend(struct obd_import *imp)
+{
+ struct ptlrpc_request *req, *next;
+
+ /* As long as we're in recovery, nothing should be added to the sending
+ * list, so we don't need to hold the lock during this iteration and
+ * resend process.
+ */
+ /* Well... what if lctl recover is called twice at the same time?
+ */
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state != LUSTRE_IMP_RECOVER) {
+ spin_unlock(&imp->imp_lock);
+ return -1;
+ }
+
+ list_for_each_entry_safe(req, next, &imp->imp_sending_list,
+ rq_list) {
+ LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
+ "req %p bad\n", req);
+ LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
+ if (!ptlrpc_no_resend(req))
+ ptlrpc_resend_req(req);
+ }
+ spin_unlock(&imp->imp_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_resend);
+
+/**
+ * Go through all requests in delayed list and wake their threads
+ * for resending
+ */
+void ptlrpc_wake_delayed(struct obd_import *imp)
+{
+ struct list_head *tmp, *pos;
+ struct ptlrpc_request *req;
+
+ spin_lock(&imp->imp_lock);
+ list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
+ req = list_entry(tmp, struct ptlrpc_request, rq_list);
+
+ DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&imp->imp_lock);
+}
+EXPORT_SYMBOL(ptlrpc_wake_delayed);
+
+void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
+{
+ struct obd_import *imp = failed_req->rq_import;
+
+ CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
+ imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid);
+
+ if (ptlrpc_set_import_discon(imp,
+ lustre_msg_get_conn_cnt(failed_req->rq_reqmsg))) {
+ if (!imp->imp_replayable) {
+ CDEBUG(D_HA, "import %s@%s for %s not replayable, "
+ "auto-deactivating\n",
+ obd2cli_tgt(imp->imp_obd),
+ imp->imp_connection->c_remote_uuid.uuid,
+ imp->imp_obd->obd_name);
+ ptlrpc_deactivate_import(imp);
+ }
+ /* to control recovery via lctl {disable|enable}_recovery */
+ if (imp->imp_deactive == 0)
+ ptlrpc_connect_import(imp);
+ }
+
+ /* Wait for recovery to complete and resend. If evicted, then
+ this request will be errored out later.*/
+ spin_lock(&failed_req->rq_lock);
+ if (!failed_req->rq_no_resend)
+ failed_req->rq_resend = 1;
+ spin_unlock(&failed_req->rq_lock);
+}
+
+/**
+ * Administratively active/deactive a client.
+ * This should only be called by the ioctl interface, currently
+ * - the lctl deactivate and activate commands
+ * - echo 0/1 >> /proc/osc/XXX/active
+ * - client umount -f (ll_umount_begin)
+ */
+int ptlrpc_set_import_active(struct obd_import *imp, int active)
+{
+ struct obd_device *obd = imp->imp_obd;
+ int rc = 0;
+
+ LASSERT(obd);
+
+ /* When deactivating, mark import invalid, and abort in-flight
+ * requests. */
+ if (!active) {
+ LCONSOLE_WARN("setting import %s INACTIVE by administrator "
+ "request\n", obd2cli_tgt(imp->imp_obd));
+
+ /* set before invalidate to avoid messages about imp_inval
+ * set without imp_deactive in ptlrpc_import_delay_req */
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 1;
+ spin_unlock(&imp->imp_lock);
+
+ obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
+
+ ptlrpc_invalidate_import(imp);
+ }
+
+ /* When activating, mark import valid, and attempt recovery */
+ if (active) {
+ CDEBUG(D_HA, "setting import %s VALID\n",
+ obd2cli_tgt(imp->imp_obd));
+
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 0;
+ spin_unlock(&imp->imp_lock);
+ obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
+
+ rc = ptlrpc_recover_import(imp, NULL, 0);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_set_import_active);
+
+/* Attempt to reconnect an import */
+int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
+{
+ int rc = 0;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
+ atomic_read(&imp->imp_inval_count))
+ rc = -EINVAL;
+ spin_unlock(&imp->imp_lock);
+ if (rc)
+ GOTO(out, rc);
+
+ /* force import to be disconnected. */
+ ptlrpc_set_import_discon(imp, 0);
+
+ if (new_uuid) {
+ struct obd_uuid uuid;
+
+ /* intruct import to use new uuid */
+ obd_str2uuid(&uuid, new_uuid);
+ rc = import_set_conn_priority(imp, &uuid);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ /* Check if reconnect is already in progress */
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state != LUSTRE_IMP_DISCON) {
+ imp->imp_force_verify = 1;
+ rc = -EALREADY;
+ }
+ spin_unlock(&imp->imp_lock);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = ptlrpc_connect_import(imp);
+ if (rc)
+ GOTO(out, rc);
+
+ if (!async) {
+ struct l_wait_info lwi;
+ int secs = cfs_time_seconds(obd_timeout);
+
+ CDEBUG(D_HA, "%s: recovery started, waiting %u seconds\n",
+ obd2cli_tgt(imp->imp_obd), secs);
+
+ lwi = LWI_TIMEOUT(secs, NULL, NULL);
+ rc = l_wait_event(imp->imp_recovery_waitq,
+ !ptlrpc_import_in_recovery(imp), &lwi);
+ CDEBUG(D_HA, "%s: recovery finished\n",
+ obd2cli_tgt(imp->imp_obd));
+ }
+
+out:
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_recover_import);
+
+int ptlrpc_import_in_recovery(struct obd_import *imp)
+{
+ int in_recovery = 1;
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_FULL ||
+ imp->imp_state == LUSTRE_IMP_CLOSED ||
+ imp->imp_state == LUSTRE_IMP_DISCON)
+ in_recovery = 0;
+ spin_unlock(&imp->imp_lock);
+ return in_recovery;
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
new file mode 100644
index 0000000..962b31d
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -0,0 +1,2446 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/sec.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/crypto.h>
+#include <linux/key.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_dlm.h>
+#include <lustre_sec.h>
+
+#include "ptlrpc_internal.h"
+
+/***********************************************
+ * policy registers *
+ ***********************************************/
+
+static rwlock_t policy_lock;
+static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
+ NULL,
+};
+
+int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
+{
+ __u16 number = policy->sp_policy;
+
+ LASSERT(policy->sp_name);
+ LASSERT(policy->sp_cops);
+ LASSERT(policy->sp_sops);
+
+ if (number >= SPTLRPC_POLICY_MAX)
+ return -EINVAL;
+
+ write_lock(&policy_lock);
+ if (unlikely(policies[number])) {
+ write_unlock(&policy_lock);
+ return -EALREADY;
+ }
+ policies[number] = policy;
+ write_unlock(&policy_lock);
+
+ CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_register_policy);
+
+int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
+{
+ __u16 number = policy->sp_policy;
+
+ LASSERT(number < SPTLRPC_POLICY_MAX);
+
+ write_lock(&policy_lock);
+ if (unlikely(policies[number] == NULL)) {
+ write_unlock(&policy_lock);
+ CERROR("%s: already unregistered\n", policy->sp_name);
+ return -EINVAL;
+ }
+
+ LASSERT(policies[number] == policy);
+ policies[number] = NULL;
+ write_unlock(&policy_lock);
+
+ CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_unregister_policy);
+
+static
+struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
+{
+ static DEFINE_MUTEX(load_mutex);
+ static atomic_t loaded = ATOMIC_INIT(0);
+ struct ptlrpc_sec_policy *policy;
+ __u16 number = SPTLRPC_FLVR_POLICY(flavor);
+ __u16 flag = 0;
+
+ if (number >= SPTLRPC_POLICY_MAX)
+ return NULL;
+
+ while (1) {
+ read_lock(&policy_lock);
+ policy = policies[number];
+ if (policy && !try_module_get(policy->sp_owner))
+ policy = NULL;
+ if (policy == NULL)
+ flag = atomic_read(&loaded);
+ read_unlock(&policy_lock);
+
+ if (policy != NULL || flag != 0 ||
+ number != SPTLRPC_POLICY_GSS)
+ break;
+
+ /* try to load gss module, once */
+ mutex_lock(&load_mutex);
+ if (atomic_read(&loaded) == 0) {
+ if (request_module("ptlrpc_gss") == 0)
+ CDEBUG(D_SEC,
+ "module ptlrpc_gss loaded on demand\n");
+ else
+ CERROR("Unable to load module ptlrpc_gss\n");
+
+ atomic_set(&loaded, 1);
+ }
+ mutex_unlock(&load_mutex);
+ }
+
+ return policy;
+}
+
+__u32 sptlrpc_name2flavor_base(const char *name)
+{
+ if (!strcmp(name, "null"))
+ return SPTLRPC_FLVR_NULL;
+ if (!strcmp(name, "plain"))
+ return SPTLRPC_FLVR_PLAIN;
+ if (!strcmp(name, "krb5n"))
+ return SPTLRPC_FLVR_KRB5N;
+ if (!strcmp(name, "krb5a"))
+ return SPTLRPC_FLVR_KRB5A;
+ if (!strcmp(name, "krb5i"))
+ return SPTLRPC_FLVR_KRB5I;
+ if (!strcmp(name, "krb5p"))
+ return SPTLRPC_FLVR_KRB5P;
+
+ return SPTLRPC_FLVR_INVALID;
+}
+EXPORT_SYMBOL(sptlrpc_name2flavor_base);
+
+const char *sptlrpc_flavor2name_base(__u32 flvr)
+{
+ __u32 base = SPTLRPC_FLVR_BASE(flvr);
+
+ if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
+ return "null";
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
+ return "plain";
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
+ return "krb5n";
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
+ return "krb5a";
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
+ return "krb5i";
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
+ return "krb5p";
+
+ CERROR("invalid wire flavor 0x%x\n", flvr);
+ return "invalid";
+}
+EXPORT_SYMBOL(sptlrpc_flavor2name_base);
+
+char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
+ char *buf, int bufsize)
+{
+ if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
+ snprintf(buf, bufsize, "hash:%s",
+ sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
+ else
+ snprintf(buf, bufsize, "%s",
+ sptlrpc_flavor2name_base(sf->sf_rpc));
+
+ buf[bufsize - 1] = '\0';
+ return buf;
+}
+EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
+
+char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
+{
+ snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
+
+ /*
+ * currently we don't support customized bulk specification for
+ * flavors other than plain
+ */
+ if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
+ char bspec[16];
+
+ bspec[0] = '-';
+ sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
+ strncat(buf, bspec, bufsize);
+ }
+
+ buf[bufsize - 1] = '\0';
+ return buf;
+}
+EXPORT_SYMBOL(sptlrpc_flavor2name);
+
+char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
+{
+ buf[0] = '\0';
+
+ if (flags & PTLRPC_SEC_FL_REVERSE)
+ strlcat(buf, "reverse,", bufsize);
+ if (flags & PTLRPC_SEC_FL_ROOTONLY)
+ strlcat(buf, "rootonly,", bufsize);
+ if (flags & PTLRPC_SEC_FL_UDESC)
+ strlcat(buf, "udesc,", bufsize);
+ if (flags & PTLRPC_SEC_FL_BULK)
+ strlcat(buf, "bulk,", bufsize);
+ if (buf[0] == '\0')
+ strlcat(buf, "-,", bufsize);
+
+ return buf;
+}
+EXPORT_SYMBOL(sptlrpc_secflags2str);
+
+/**************************************************
+ * client context APIs *
+ **************************************************/
+
+static
+struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
+{
+ struct vfs_cred vcred;
+ int create = 1, remove_dead = 1;
+
+ LASSERT(sec);
+ LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
+
+ if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
+ PTLRPC_SEC_FL_ROOTONLY)) {
+ vcred.vc_uid = 0;
+ vcred.vc_gid = 0;
+ if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
+ create = 0;
+ remove_dead = 0;
+ }
+ } else {
+ vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
+ vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
+ }
+
+ return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
+ create, remove_dead);
+}
+
+struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
+{
+ atomic_inc(&ctx->cc_refcount);
+ return ctx;
+}
+EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
+
+void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
+{
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+
+ LASSERT(sec);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
+
+ if (!atomic_dec_and_test(&ctx->cc_refcount))
+ return;
+
+ sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
+}
+EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
+
+/**
+ * Expire the client context immediately.
+ *
+ * \pre Caller must hold at least 1 reference on the \a ctx.
+ */
+void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
+{
+ LASSERT(ctx->cc_ops->die);
+ ctx->cc_ops->die(ctx, 0);
+}
+EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
+
+/**
+ * To wake up the threads who are waiting for this client context. Called
+ * after some status change happened on \a ctx.
+ */
+void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
+{
+ struct ptlrpc_request *req, *next;
+
+ spin_lock(&ctx->cc_lock);
+ list_for_each_entry_safe(req, next, &ctx->cc_req_list,
+ rq_ctx_chain) {
+ list_del_init(&req->rq_ctx_chain);
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&ctx->cc_lock);
+}
+EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
+
+int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
+{
+ LASSERT(ctx->cc_ops);
+
+ if (ctx->cc_ops->display == NULL)
+ return 0;
+
+ return ctx->cc_ops->display(ctx, buf, bufsize);
+}
+
+static int import_sec_check_expire(struct obd_import *imp)
+{
+ int adapt = 0;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_sec_expire &&
+ imp->imp_sec_expire < cfs_time_current_sec()) {
+ adapt = 1;
+ imp->imp_sec_expire = 0;
+ }
+ spin_unlock(&imp->imp_lock);
+
+ if (!adapt)
+ return 0;
+
+ CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
+ return sptlrpc_import_sec_adapt(imp, NULL, 0);
+}
+
+static int import_sec_validate_get(struct obd_import *imp,
+ struct ptlrpc_sec **sec)
+{
+ int rc;
+
+ if (unlikely(imp->imp_sec_expire)) {
+ rc = import_sec_check_expire(imp);
+ if (rc)
+ return rc;
+ }
+
+ *sec = sptlrpc_import_sec_ref(imp);
+ if (*sec == NULL) {
+ CERROR("import %p (%s) with no sec\n",
+ imp, ptlrpc_import_state_name(imp->imp_state));
+ return -EACCES;
+ }
+
+ if (unlikely((*sec)->ps_dying)) {
+ CERROR("attempt to use dying sec %p\n", sec);
+ sptlrpc_sec_put(*sec);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+/**
+ * Given a \a req, find or allocate a appropriate context for it.
+ * \pre req->rq_cli_ctx == NULL.
+ *
+ * \retval 0 succeed, and req->rq_cli_ctx is set.
+ * \retval -ev error number, and req->rq_cli_ctx == NULL.
+ */
+int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
+{
+ struct obd_import *imp = req->rq_import;
+ struct ptlrpc_sec *sec;
+ int rc;
+
+ LASSERT(!req->rq_cli_ctx);
+ LASSERT(imp);
+
+ rc = import_sec_validate_get(imp, &sec);
+ if (rc)
+ return rc;
+
+ req->rq_cli_ctx = get_my_ctx(sec);
+
+ sptlrpc_sec_put(sec);
+
+ if (!req->rq_cli_ctx) {
+ CERROR("req %p: fail to get context\n", req);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * Drop the context for \a req.
+ * \pre req->rq_cli_ctx != NULL.
+ * \post req->rq_cli_ctx == NULL.
+ *
+ * If \a sync == 0, this function should return quickly without sleep;
+ * otherwise it might trigger and wait for the whole process of sending
+ * an context-destroying rpc to server.
+ */
+void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
+{
+ LASSERT(req);
+ LASSERT(req->rq_cli_ctx);
+
+ /* request might be asked to release earlier while still
+ * in the context waiting list.
+ */
+ if (!list_empty(&req->rq_ctx_chain)) {
+ spin_lock(&req->rq_cli_ctx->cc_lock);
+ list_del_init(&req->rq_ctx_chain);
+ spin_unlock(&req->rq_cli_ctx->cc_lock);
+ }
+
+ sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
+ req->rq_cli_ctx = NULL;
+}
+
+static
+int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
+ struct ptlrpc_cli_ctx *oldctx,
+ struct ptlrpc_cli_ctx *newctx)
+{
+ struct sptlrpc_flavor old_flvr;
+ char *reqmsg = NULL; /* to workaround old gcc */
+ int reqmsg_size;
+ int rc = 0;
+
+ LASSERT(req->rq_reqmsg);
+ LASSERT(req->rq_reqlen);
+ LASSERT(req->rq_replen);
+
+ CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
+ "switch sec %p(%s) -> %p(%s)\n", req,
+ oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
+ newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
+ oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
+ newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
+
+ /* save flavor */
+ old_flvr = req->rq_flvr;
+
+ /* save request message */
+ reqmsg_size = req->rq_reqlen;
+ if (reqmsg_size != 0) {
+ OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
+ if (reqmsg == NULL)
+ return -ENOMEM;
+ memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
+ }
+
+ /* release old req/rep buf */
+ req->rq_cli_ctx = oldctx;
+ sptlrpc_cli_free_reqbuf(req);
+ sptlrpc_cli_free_repbuf(req);
+ req->rq_cli_ctx = newctx;
+
+ /* recalculate the flavor */
+ sptlrpc_req_set_flavor(req, 0);
+
+ /* alloc new request buffer
+ * we don't need to alloc reply buffer here, leave it to the
+ * rest procedure of ptlrpc */
+ if (reqmsg_size != 0) {
+ rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
+ if (!rc) {
+ LASSERT(req->rq_reqmsg);
+ memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
+ } else {
+ CWARN("failed to alloc reqbuf: %d\n", rc);
+ req->rq_flvr = old_flvr;
+ }
+
+ OBD_FREE_LARGE(reqmsg, reqmsg_size);
+ }
+ return rc;
+}
+
+/**
+ * If current context of \a req is dead somehow, e.g. we just switched flavor
+ * thus marked original contexts dead, we'll find a new context for it. if
+ * no switch is needed, \a req will end up with the same context.
+ *
+ * \note a request must have a context, to keep other parts of code happy.
+ * In any case of failure during the switching, we must restore the old one.
+ */
+int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
+{
+ struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
+ struct ptlrpc_cli_ctx *newctx;
+ int rc;
+
+ LASSERT(oldctx);
+
+ sptlrpc_cli_ctx_get(oldctx);
+ sptlrpc_req_put_ctx(req, 0);
+
+ rc = sptlrpc_req_get_ctx(req);
+ if (unlikely(rc)) {
+ LASSERT(!req->rq_cli_ctx);
+
+ /* restore old ctx */
+ req->rq_cli_ctx = oldctx;
+ return rc;
+ }
+
+ newctx = req->rq_cli_ctx;
+ LASSERT(newctx);
+
+ if (unlikely(newctx == oldctx &&
+ test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
+ /*
+ * still get the old dead ctx, usually means system too busy
+ */
+ CDEBUG(D_SEC,
+ "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
+ newctx, newctx->cc_flags);
+
+ schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
+ HZ);
+ } else {
+ /*
+ * it's possible newctx == oldctx if we're switching
+ * subflavor with the same sec.
+ */
+ rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
+ if (rc) {
+ /* restore old ctx */
+ sptlrpc_req_put_ctx(req, 0);
+ req->rq_cli_ctx = oldctx;
+ return rc;
+ }
+
+ LASSERT(req->rq_cli_ctx == newctx);
+ }
+
+ sptlrpc_cli_ctx_put(oldctx, 1);
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
+
+static
+int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
+{
+ if (cli_ctx_is_refreshed(ctx))
+ return 1;
+ return 0;
+}
+
+static
+int ctx_refresh_timeout(void *data)
+{
+ struct ptlrpc_request *req = data;
+ int rc;
+
+ /* conn_cnt is needed in expire_one_request */
+ lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
+
+ rc = ptlrpc_expire_one_request(req, 1);
+ /* if we started recovery, we should mark this ctx dead; otherwise
+ * in case of lgssd died nobody would retire this ctx, following
+ * connecting will still find the same ctx thus cause deadlock.
+ * there's an assumption that expire time of the request should be
+ * later than the context refresh expire time.
+ */
+ if (rc == 0)
+ req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
+ return rc;
+}
+
+static
+void ctx_refresh_interrupt(void *data)
+{
+ struct ptlrpc_request *req = data;
+
+ spin_lock(&req->rq_lock);
+ req->rq_intr = 1;
+ spin_unlock(&req->rq_lock);
+}
+
+static
+void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
+{
+ spin_lock(&ctx->cc_lock);
+ if (!list_empty(&req->rq_ctx_chain))
+ list_del_init(&req->rq_ctx_chain);
+ spin_unlock(&ctx->cc_lock);
+}
+
+/**
+ * To refresh the context of \req, if it's not up-to-date.
+ * \param timeout
+ * - < 0: don't wait
+ * - = 0: wait until success or fatal error occur
+ * - > 0: timeout value (in seconds)
+ *
+ * The status of the context could be subject to be changed by other threads
+ * at any time. We allow this race, but once we return with 0, the caller will
+ * suppose it's uptodated and keep using it until the owning rpc is done.
+ *
+ * \retval 0 only if the context is uptodated.
+ * \retval -ev error number.
+ */
+int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
+{
+ struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
+ struct ptlrpc_sec *sec;
+ struct l_wait_info lwi;
+ int rc;
+
+ LASSERT(ctx);
+
+ if (req->rq_ctx_init || req->rq_ctx_fini)
+ return 0;
+
+ /*
+ * during the process a request's context might change type even
+ * (e.g. from gss ctx to null ctx), so each loop we need to re-check
+ * everything
+ */
+again:
+ rc = import_sec_validate_get(req->rq_import, &sec);
+ if (rc)
+ return rc;
+
+ if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
+ CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
+ req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
+ req_off_ctx_list(req, ctx);
+ sptlrpc_req_replace_dead_ctx(req);
+ ctx = req->rq_cli_ctx;
+ }
+ sptlrpc_sec_put(sec);
+
+ if (cli_ctx_is_eternal(ctx))
+ return 0;
+
+ if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
+ LASSERT(ctx->cc_ops->refresh);
+ ctx->cc_ops->refresh(ctx);
+ }
+ LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
+
+ LASSERT(ctx->cc_ops->validate);
+ if (ctx->cc_ops->validate(ctx) == 0) {
+ req_off_ctx_list(req, ctx);
+ return 0;
+ }
+
+ if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
+ spin_lock(&req->rq_lock);
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
+ req_off_ctx_list(req, ctx);
+ return -EPERM;
+ }
+
+ /*
+ * There's a subtle issue for resending RPCs, suppose following
+ * situation:
+ * 1. the request was sent to server.
+ * 2. recovery was kicked start, after finished the request was
+ * marked as resent.
+ * 3. resend the request.
+ * 4. old reply from server received, we accept and verify the reply.
+ * this has to be success, otherwise the error will be aware
+ * by application.
+ * 5. new reply from server received, dropped by LNet.
+ *
+ * Note the xid of old & new request is the same. We can't simply
+ * change xid for the resent request because the server replies on
+ * it for reply reconstruction.
+ *
+ * Commonly the original context should be uptodate because we
+ * have a expiry nice time; server will keep its context because
+ * we at least hold a ref of old context which prevent context
+ * destroying RPC being sent. So server still can accept the request
+ * and finish the RPC. But if that's not the case:
+ * 1. If server side context has been trimmed, a NO_CONTEXT will
+ * be returned, gss_cli_ctx_verify/unseal will switch to new
+ * context by force.
+ * 2. Current context never be refreshed, then we are fine: we
+ * never really send request with old context before.
+ */
+ if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
+ unlikely(req->rq_reqmsg) &&
+ lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
+ req_off_ctx_list(req, ctx);
+ return 0;
+ }
+
+ if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
+ req_off_ctx_list(req, ctx);
+ /*
+ * don't switch ctx if import was deactivated
+ */
+ if (req->rq_import->imp_deactive) {
+ spin_lock(&req->rq_lock);
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
+ return -EINTR;
+ }
+
+ rc = sptlrpc_req_replace_dead_ctx(req);
+ if (rc) {
+ LASSERT(ctx == req->rq_cli_ctx);
+ CERROR("req %p: failed to replace dead ctx %p: %d\n",
+ req, ctx, rc);
+ spin_lock(&req->rq_lock);
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
+ return rc;
+ }
+
+ ctx = req->rq_cli_ctx;
+ goto again;
+ }
+
+ /*
+ * Now we're sure this context is during upcall, add myself into
+ * waiting list
+ */
+ spin_lock(&ctx->cc_lock);
+ if (list_empty(&req->rq_ctx_chain))
+ list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
+ spin_unlock(&ctx->cc_lock);
+
+ if (timeout < 0)
+ return -EWOULDBLOCK;
+
+ /* Clear any flags that may be present from previous sends */
+ LASSERT(req->rq_receiving_reply == 0);
+ spin_lock(&req->rq_lock);
+ req->rq_err = 0;
+ req->rq_timedout = 0;
+ req->rq_resend = 0;
+ req->rq_restart = 0;
+ spin_unlock(&req->rq_lock);
+
+ lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
+ ctx_refresh_interrupt, req);
+ rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
+
+ /*
+ * following cases could lead us here:
+ * - successfully refreshed;
+ * - interrupted;
+ * - timedout, and we don't want recover from the failure;
+ * - timedout, and waked up upon recovery finished;
+ * - someone else mark this ctx dead by force;
+ * - someone invalidate the req and call ptlrpc_client_wake_req(),
+ * e.g. ptlrpc_abort_inflight();
+ */
+ if (!cli_ctx_is_refreshed(ctx)) {
+ /* timed out or interruptted */
+ req_off_ctx_list(req, ctx);
+
+ LASSERT(rc != 0);
+ return rc;
+ }
+
+ goto again;
+}
+
+/**
+ * Initialize flavor settings for \a req, according to \a opcode.
+ *
+ * \note this could be called in two situations:
+ * - new request from ptlrpc_pre_req(), with proper @opcode
+ * - old request which changed ctx in the middle, with @opcode == 0
+ */
+void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
+{
+ struct ptlrpc_sec *sec;
+
+ LASSERT(req->rq_import);
+ LASSERT(req->rq_cli_ctx);
+ LASSERT(req->rq_cli_ctx->cc_sec);
+ LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
+
+ /* special security flags accoding to opcode */
+ switch (opcode) {
+ case OST_READ:
+ case MDS_READPAGE:
+ case MGS_CONFIG_READ:
+ case OBD_IDX_READ:
+ req->rq_bulk_read = 1;
+ break;
+ case OST_WRITE:
+ case MDS_WRITEPAGE:
+ req->rq_bulk_write = 1;
+ break;
+ case SEC_CTX_INIT:
+ req->rq_ctx_init = 1;
+ break;
+ case SEC_CTX_FINI:
+ req->rq_ctx_fini = 1;
+ break;
+ case 0:
+ /* init/fini rpc won't be resend, so can't be here */
+ LASSERT(req->rq_ctx_init == 0);
+ LASSERT(req->rq_ctx_fini == 0);
+
+ /* cleanup flags, which should be recalculated */
+ req->rq_pack_udesc = 0;
+ req->rq_pack_bulk = 0;
+ break;
+ }
+
+ sec = req->rq_cli_ctx->cc_sec;
+
+ spin_lock(&sec->ps_lock);
+ req->rq_flvr = sec->ps_flvr;
+ spin_unlock(&sec->ps_lock);
+
+ /* force SVC_NULL for context initiation rpc, SVC_INTG for context
+ * destruction rpc */
+ if (unlikely(req->rq_ctx_init))
+ flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
+ else if (unlikely(req->rq_ctx_fini))
+ flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
+
+ /* user descriptor flag, null security can't do it anyway */
+ if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
+ (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
+ req->rq_pack_udesc = 1;
+
+ /* bulk security flag */
+ if ((req->rq_bulk_read || req->rq_bulk_write) &&
+ sptlrpc_flavor_has_bulk(&req->rq_flvr))
+ req->rq_pack_bulk = 1;
+}
+
+void sptlrpc_request_out_callback(struct ptlrpc_request *req)
+{
+ if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
+ return;
+
+ LASSERT(req->rq_clrbuf);
+ if (req->rq_pool || !req->rq_reqbuf)
+ return;
+
+ OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = NULL;
+ req->rq_reqbuf_len = 0;
+}
+
+/**
+ * Given an import \a imp, check whether current user has a valid context
+ * or not. We may create a new context and try to refresh it, and try
+ * repeatedly try in case of non-fatal errors. Return 0 means success.
+ */
+int sptlrpc_import_check_ctx(struct obd_import *imp)
+{
+ struct ptlrpc_sec *sec;
+ struct ptlrpc_cli_ctx *ctx;
+ struct ptlrpc_request *req = NULL;
+ int rc;
+
+ might_sleep();
+
+ sec = sptlrpc_import_sec_ref(imp);
+ ctx = get_my_ctx(sec);
+ sptlrpc_sec_put(sec);
+
+ if (!ctx)
+ return -ENOMEM;
+
+ if (cli_ctx_is_eternal(ctx) ||
+ ctx->cc_ops->validate(ctx) == 0) {
+ sptlrpc_cli_ctx_put(ctx, 1);
+ return 0;
+ }
+
+ if (cli_ctx_is_error(ctx)) {
+ sptlrpc_cli_ctx_put(ctx, 1);
+ return -EACCES;
+ }
+
+ OBD_ALLOC_PTR(req);
+ if (!req)
+ return -ENOMEM;
+
+ spin_lock_init(&req->rq_lock);
+ atomic_set(&req->rq_refcount, 10000);
+ INIT_LIST_HEAD(&req->rq_ctx_chain);
+ init_waitqueue_head(&req->rq_reply_waitq);
+ init_waitqueue_head(&req->rq_set_waitq);
+ req->rq_import = imp;
+ req->rq_flvr = sec->ps_flvr;
+ req->rq_cli_ctx = ctx;
+
+ rc = sptlrpc_req_refresh_ctx(req, 0);
+ LASSERT(list_empty(&req->rq_ctx_chain));
+ sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
+ OBD_FREE_PTR(req);
+
+ return rc;
+}
+
+/**
+ * Used by ptlrpc client, to perform the pre-defined security transformation
+ * upon the request message of \a req. After this function called,
+ * req->rq_reqmsg is still accessible as clear text.
+ */
+int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
+{
+ struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
+ int rc = 0;
+
+ LASSERT(ctx);
+ LASSERT(ctx->cc_sec);
+ LASSERT(req->rq_reqbuf || req->rq_clrbuf);
+
+ /* we wrap bulk request here because now we can be sure
+ * the context is uptodate.
+ */
+ if (req->rq_bulk) {
+ rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
+ if (rc)
+ return rc;
+ }
+
+ switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ LASSERT(ctx->cc_ops->sign);
+ rc = ctx->cc_ops->sign(ctx, req);
+ break;
+ case SPTLRPC_SVC_PRIV:
+ LASSERT(ctx->cc_ops->seal);
+ rc = ctx->cc_ops->seal(ctx, req);
+ break;
+ default:
+ LBUG();
+ }
+
+ if (rc == 0) {
+ LASSERT(req->rq_reqdata_len);
+ LASSERT(req->rq_reqdata_len % 8 == 0);
+ LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
+ }
+
+ return rc;
+}
+
+static int do_cli_unwrap_reply(struct ptlrpc_request *req)
+{
+ struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
+ int rc;
+
+ LASSERT(ctx);
+ LASSERT(ctx->cc_sec);
+ LASSERT(req->rq_repbuf);
+ LASSERT(req->rq_repdata);
+ LASSERT(req->rq_repmsg == NULL);
+
+ req->rq_rep_swab_mask = 0;
+
+ rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
+ switch (rc) {
+ case 1:
+ lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+ case 0:
+ break;
+ default:
+ CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
+ return -EPROTO;
+ }
+
+ if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
+ CERROR("replied data length %d too small\n",
+ req->rq_repdata_len);
+ return -EPROTO;
+ }
+
+ if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
+ SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
+ CERROR("reply policy %u doesn't match request policy %u\n",
+ SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
+ SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
+ return -EPROTO;
+ }
+
+ switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+ case SPTLRPC_SVC_NULL:
+ case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
+ LASSERT(ctx->cc_ops->verify);
+ rc = ctx->cc_ops->verify(ctx, req);
+ break;
+ case SPTLRPC_SVC_PRIV:
+ LASSERT(ctx->cc_ops->unseal);
+ rc = ctx->cc_ops->unseal(ctx, req);
+ break;
+ default:
+ LBUG();
+ }
+ LASSERT(rc || req->rq_repmsg || req->rq_resend);
+
+ if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
+ !req->rq_ctx_init)
+ req->rq_rep_swab_mask = 0;
+ return rc;
+}
+
+/**
+ * Used by ptlrpc client, to perform security transformation upon the reply
+ * message of \a req. After return successfully, req->rq_repmsg points to
+ * the reply message in clear text.
+ *
+ * \pre the reply buffer should have been un-posted from LNet, so nothing is
+ * going to change.
+ */
+int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
+{
+ LASSERT(req->rq_repbuf);
+ LASSERT(req->rq_repdata == NULL);
+ LASSERT(req->rq_repmsg == NULL);
+ LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
+
+ if (req->rq_reply_off == 0 &&
+ (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
+ CERROR("real reply with offset 0\n");
+ return -EPROTO;
+ }
+
+ if (req->rq_reply_off % 8 != 0) {
+ CERROR("reply at odd offset %u\n", req->rq_reply_off);
+ return -EPROTO;
+ }
+
+ req->rq_repdata = (struct lustre_msg *)
+ (req->rq_repbuf + req->rq_reply_off);
+ req->rq_repdata_len = req->rq_nob_received;
+
+ return do_cli_unwrap_reply(req);
+}
+
+/**
+ * Used by ptlrpc client, to perform security transformation upon the early
+ * reply message of \a req. We expect the rq_reply_off is 0, and
+ * rq_nob_received is the early reply size.
+ *
+ * Because the receive buffer might be still posted, the reply data might be
+ * changed at any time, no matter we're holding rq_lock or not. For this reason
+ * we allocate a separate ptlrpc_request and reply buffer for early reply
+ * processing.
+ *
+ * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
+ * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
+ * \a *req_ret to release it.
+ * \retval -ev error number, and \a req_ret will not be set.
+ */
+int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
+ struct ptlrpc_request **req_ret)
+{
+ struct ptlrpc_request *early_req;
+ char *early_buf;
+ int early_bufsz, early_size;
+ int rc;
+
+ OBD_ALLOC_PTR(early_req);
+ if (early_req == NULL)
+ return -ENOMEM;
+
+ early_size = req->rq_nob_received;
+ early_bufsz = size_roundup_power2(early_size);
+ OBD_ALLOC_LARGE(early_buf, early_bufsz);
+ if (early_buf == NULL)
+ GOTO(err_req, rc = -ENOMEM);
+
+ /* sanity checkings and copy data out, do it inside spinlock */
+ spin_lock(&req->rq_lock);
+
+ if (req->rq_replied) {
+ spin_unlock(&req->rq_lock);
+ GOTO(err_buf, rc = -EALREADY);
+ }
+
+ LASSERT(req->rq_repbuf);
+ LASSERT(req->rq_repdata == NULL);
+ LASSERT(req->rq_repmsg == NULL);
+
+ if (req->rq_reply_off != 0) {
+ CERROR("early reply with offset %u\n", req->rq_reply_off);
+ spin_unlock(&req->rq_lock);
+ GOTO(err_buf, rc = -EPROTO);
+ }
+
+ if (req->rq_nob_received != early_size) {
+ /* even another early arrived the size should be the same */
+ CERROR("data size has changed from %u to %u\n",
+ early_size, req->rq_nob_received);
+ spin_unlock(&req->rq_lock);
+ GOTO(err_buf, rc = -EINVAL);
+ }
+
+ if (req->rq_nob_received < sizeof(struct lustre_msg)) {
+ CERROR("early reply length %d too small\n",
+ req->rq_nob_received);
+ spin_unlock(&req->rq_lock);
+ GOTO(err_buf, rc = -EALREADY);
+ }
+
+ memcpy(early_buf, req->rq_repbuf, early_size);
+ spin_unlock(&req->rq_lock);
+
+ spin_lock_init(&early_req->rq_lock);
+ early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
+ early_req->rq_flvr = req->rq_flvr;
+ early_req->rq_repbuf = early_buf;
+ early_req->rq_repbuf_len = early_bufsz;
+ early_req->rq_repdata = (struct lustre_msg *) early_buf;
+ early_req->rq_repdata_len = early_size;
+ early_req->rq_early = 1;
+ early_req->rq_reqmsg = req->rq_reqmsg;
+
+ rc = do_cli_unwrap_reply(early_req);
+ if (rc) {
+ DEBUG_REQ(D_ADAPTTO, early_req,
+ "error %d unwrap early reply", rc);
+ GOTO(err_ctx, rc);
+ }
+
+ LASSERT(early_req->rq_repmsg);
+ *req_ret = early_req;
+ return 0;
+
+err_ctx:
+ sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
+err_buf:
+ OBD_FREE_LARGE(early_buf, early_bufsz);
+err_req:
+ OBD_FREE_PTR(early_req);
+ return rc;
+}
+
+/**
+ * Used by ptlrpc client, to release a processed early reply \a early_req.
+ *
+ * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
+ */
+void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
+{
+ LASSERT(early_req->rq_repbuf);
+ LASSERT(early_req->rq_repdata);
+ LASSERT(early_req->rq_repmsg);
+
+ sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
+ OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
+ OBD_FREE_PTR(early_req);
+}
+
+/**************************************************
+ * sec ID *
+ **************************************************/
+
+/*
+ * "fixed" sec (e.g. null) use sec_id < 0
+ */
+static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
+
+int sptlrpc_get_next_secid(void)
+{
+ return atomic_inc_return(&sptlrpc_sec_id);
+}
+EXPORT_SYMBOL(sptlrpc_get_next_secid);
+
+/**************************************************
+ * client side high-level security APIs *
+ **************************************************/
+
+static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
+ int grace, int force)
+{
+ struct ptlrpc_sec_policy *policy = sec->ps_policy;
+
+ LASSERT(policy->sp_cops);
+ LASSERT(policy->sp_cops->flush_ctx_cache);
+
+ return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
+}
+
+static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
+{
+ struct ptlrpc_sec_policy *policy = sec->ps_policy;
+
+ LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
+ LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
+ LASSERT(policy->sp_cops->destroy_sec);
+
+ CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
+
+ policy->sp_cops->destroy_sec(sec);
+ sptlrpc_policy_put(policy);
+}
+
+void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
+{
+ sec_cop_destroy_sec(sec);
+}
+EXPORT_SYMBOL(sptlrpc_sec_destroy);
+
+static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
+{
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
+
+ if (sec->ps_policy->sp_cops->kill_sec) {
+ sec->ps_policy->sp_cops->kill_sec(sec);
+
+ sec_cop_flush_ctx_cache(sec, -1, 1, 1);
+ }
+}
+
+struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
+{
+ if (sec)
+ atomic_inc(&sec->ps_refcount);
+
+ return sec;
+}
+EXPORT_SYMBOL(sptlrpc_sec_get);
+
+void sptlrpc_sec_put(struct ptlrpc_sec *sec)
+{
+ if (sec) {
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
+
+ if (atomic_dec_and_test(&sec->ps_refcount)) {
+ sptlrpc_gc_del_sec(sec);
+ sec_cop_destroy_sec(sec);
+ }
+ }
+}
+EXPORT_SYMBOL(sptlrpc_sec_put);
+
+/*
+ * policy module is responsible for taking refrence of import
+ */
+static
+struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *svc_ctx,
+ struct sptlrpc_flavor *sf,
+ enum lustre_sec_part sp)
+{
+ struct ptlrpc_sec_policy *policy;
+ struct ptlrpc_sec *sec;
+ char str[32];
+
+ if (svc_ctx) {
+ LASSERT(imp->imp_dlm_fake == 1);
+
+ CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
+ imp->imp_obd->obd_type->typ_name,
+ imp->imp_obd->obd_name,
+ sptlrpc_flavor2name(sf, str, sizeof(str)));
+
+ policy = sptlrpc_policy_get(svc_ctx->sc_policy);
+ sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
+ } else {
+ LASSERT(imp->imp_dlm_fake == 0);
+
+ CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
+ imp->imp_obd->obd_type->typ_name,
+ imp->imp_obd->obd_name,
+ sptlrpc_flavor2name(sf, str, sizeof(str)));
+
+ policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
+ if (!policy) {
+ CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
+ return NULL;
+ }
+ }
+
+ sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
+ if (sec) {
+ atomic_inc(&sec->ps_refcount);
+
+ sec->ps_part = sp;
+
+ if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
+ sptlrpc_gc_add_sec(sec);
+ } else {
+ sptlrpc_policy_put(policy);
+ }
+
+ return sec;
+}
+
+struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
+{
+ struct ptlrpc_sec *sec;
+
+ spin_lock(&imp->imp_lock);
+ sec = sptlrpc_sec_get(imp->imp_sec);
+ spin_unlock(&imp->imp_lock);
+
+ return sec;
+}
+EXPORT_SYMBOL(sptlrpc_import_sec_ref);
+
+static void sptlrpc_import_sec_install(struct obd_import *imp,
+ struct ptlrpc_sec *sec)
+{
+ struct ptlrpc_sec *old_sec;
+
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
+
+ spin_lock(&imp->imp_lock);
+ old_sec = imp->imp_sec;
+ imp->imp_sec = sec;
+ spin_unlock(&imp->imp_lock);
+
+ if (old_sec) {
+ sptlrpc_sec_kill(old_sec);
+
+ /* balance the ref taken by this import */
+ sptlrpc_sec_put(old_sec);
+ }
+}
+
+static inline
+int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
+{
+ return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
+}
+
+static inline
+void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
+{
+ *dst = *src;
+}
+
+static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
+ struct ptlrpc_sec *sec,
+ struct sptlrpc_flavor *sf)
+{
+ char str1[32], str2[32];
+
+ if (sec->ps_flvr.sf_flags != sf->sf_flags)
+ CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
+ sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
+ str1, sizeof(str1)),
+ sptlrpc_secflags2str(sf->sf_flags,
+ str2, sizeof(str2)));
+
+ spin_lock(&sec->ps_lock);
+ flavor_copy(&sec->ps_flvr, sf);
+ spin_unlock(&sec->ps_lock);
+}
+
+/**
+ * To get an appropriate ptlrpc_sec for the \a imp, according to the current
+ * configuration. Upon called, imp->imp_sec may or may not be NULL.
+ *
+ * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
+ * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
+ */
+int sptlrpc_import_sec_adapt(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *svc_ctx,
+ struct sptlrpc_flavor *flvr)
+{
+ struct ptlrpc_connection *conn;
+ struct sptlrpc_flavor sf;
+ struct ptlrpc_sec *sec, *newsec;
+ enum lustre_sec_part sp;
+ char str[24];
+ int rc = 0;
+
+ might_sleep();
+
+ if (imp == NULL)
+ return 0;
+
+ conn = imp->imp_connection;
+
+ if (svc_ctx == NULL) {
+ struct client_obd *cliobd = &imp->imp_obd->u.cli;
+ /*
+ * normal import, determine flavor from rule set, except
+ * for mgc the flavor is predetermined.
+ */
+ if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
+ sf = cliobd->cl_flvr_mgc;
+ else
+ sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
+ cliobd->cl_sp_to,
+ &cliobd->cl_target_uuid,
+ conn->c_self, &sf);
+
+ sp = imp->imp_obd->u.cli.cl_sp_me;
+ } else {
+ /* reverse import, determine flavor from incoming reqeust */
+ sf = *flvr;
+
+ if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
+ sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
+ PTLRPC_SEC_FL_ROOTONLY;
+
+ sp = sptlrpc_target_sec_part(imp->imp_obd);
+ }
+
+ sec = sptlrpc_import_sec_ref(imp);
+ if (sec) {
+ char str2[24];
+
+ if (flavor_equal(&sf, &sec->ps_flvr))
+ GOTO(out, rc);
+
+ CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
+ imp->imp_obd->obd_name,
+ obd_uuid2str(&conn->c_remote_uuid),
+ sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
+ sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
+
+ if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
+ SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
+ SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
+ SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
+ sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
+ GOTO(out, rc);
+ }
+ } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
+ SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
+ CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
+ imp->imp_obd->obd_name,
+ obd_uuid2str(&conn->c_remote_uuid),
+ LNET_NIDNET(conn->c_self),
+ sptlrpc_flavor2name(&sf, str, sizeof(str)));
+ }
+
+ mutex_lock(&imp->imp_sec_mutex);
+
+ newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
+ if (newsec) {
+ sptlrpc_import_sec_install(imp, newsec);
+ } else {
+ CERROR("import %s->%s: failed to create new sec\n",
+ imp->imp_obd->obd_name,
+ obd_uuid2str(&conn->c_remote_uuid));
+ rc = -EPERM;
+ }
+
+ mutex_unlock(&imp->imp_sec_mutex);
+out:
+ sptlrpc_sec_put(sec);
+ return rc;
+}
+
+void sptlrpc_import_sec_put(struct obd_import *imp)
+{
+ if (imp->imp_sec) {
+ sptlrpc_sec_kill(imp->imp_sec);
+
+ sptlrpc_sec_put(imp->imp_sec);
+ imp->imp_sec = NULL;
+ }
+}
+
+static void import_flush_ctx_common(struct obd_import *imp,
+ uid_t uid, int grace, int force)
+{
+ struct ptlrpc_sec *sec;
+
+ if (imp == NULL)
+ return;
+
+ sec = sptlrpc_import_sec_ref(imp);
+ if (sec == NULL)
+ return;
+
+ sec_cop_flush_ctx_cache(sec, uid, grace, force);
+ sptlrpc_sec_put(sec);
+}
+
+void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
+{
+ /* it's important to use grace mode, see explain in
+ * sptlrpc_req_refresh_ctx() */
+ import_flush_ctx_common(imp, 0, 1, 1);
+}
+
+void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
+{
+ import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
+ 1, 1);
+}
+EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
+
+void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
+{
+ import_flush_ctx_common(imp, -1, 1, 1);
+}
+EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
+
+/**
+ * Used by ptlrpc client to allocate request buffer of \a req. Upon return
+ * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
+ */
+int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
+{
+ struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
+ struct ptlrpc_sec_policy *policy;
+ int rc;
+
+ LASSERT(ctx);
+ LASSERT(ctx->cc_sec);
+ LASSERT(ctx->cc_sec->ps_policy);
+ LASSERT(req->rq_reqmsg == NULL);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
+
+ policy = ctx->cc_sec->ps_policy;
+ rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
+ if (!rc) {
+ LASSERT(req->rq_reqmsg);
+ LASSERT(req->rq_reqbuf || req->rq_clrbuf);
+
+ /* zeroing preallocated buffer */
+ if (req->rq_pool)
+ memset(req->rq_reqmsg, 0, msgsize);
+ }
+
+ return rc;
+}
+
+/**
+ * Used by ptlrpc client to free request buffer of \a req. After this
+ * req->rq_reqmsg is set to NULL and should not be accessed anymore.
+ */
+void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
+{
+ struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
+ struct ptlrpc_sec_policy *policy;
+
+ LASSERT(ctx);
+ LASSERT(ctx->cc_sec);
+ LASSERT(ctx->cc_sec->ps_policy);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
+
+ if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
+ return;
+
+ policy = ctx->cc_sec->ps_policy;
+ policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
+ req->rq_reqmsg = NULL;
+}
+
+/*
+ * NOTE caller must guarantee the buffer size is enough for the enlargement
+ */
+void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
+ int segment, int newsize)
+{
+ void *src, *dst;
+ int oldsize, oldmsg_size, movesize;
+
+ LASSERT(segment < msg->lm_bufcount);
+ LASSERT(msg->lm_buflens[segment] <= newsize);
+
+ if (msg->lm_buflens[segment] == newsize)
+ return;
+
+ /* nothing to do if we are enlarging the last segment */
+ if (segment == msg->lm_bufcount - 1) {
+ msg->lm_buflens[segment] = newsize;
+ return;
+ }
+
+ oldsize = msg->lm_buflens[segment];
+
+ src = lustre_msg_buf(msg, segment + 1, 0);
+ msg->lm_buflens[segment] = newsize;
+ dst = lustre_msg_buf(msg, segment + 1, 0);
+ msg->lm_buflens[segment] = oldsize;
+
+ /* move from segment + 1 to end segment */
+ LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
+ oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
+ movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
+ LASSERT(movesize >= 0);
+
+ if (movesize)
+ memmove(dst, src, movesize);
+
+ /* note we don't clear the ares where old data live, not secret */
+
+ /* finally set new segment size */
+ msg->lm_buflens[segment] = newsize;
+}
+EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
+
+/**
+ * Used by ptlrpc client to enlarge the \a segment of request message pointed
+ * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
+ * preserved after the enlargement. this must be called after original request
+ * buffer being allocated.
+ *
+ * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
+ * so caller should refresh its local pointers if needed.
+ */
+int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
+ int segment, int newsize)
+{
+ struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
+ struct ptlrpc_sec_cops *cops;
+ struct lustre_msg *msg = req->rq_reqmsg;
+
+ LASSERT(ctx);
+ LASSERT(msg);
+ LASSERT(msg->lm_bufcount > segment);
+ LASSERT(msg->lm_buflens[segment] <= newsize);
+
+ if (msg->lm_buflens[segment] == newsize)
+ return 0;
+
+ cops = ctx->cc_sec->ps_policy->sp_cops;
+ LASSERT(cops->enlarge_reqbuf);
+ return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
+}
+EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
+
+/**
+ * Used by ptlrpc client to allocate reply buffer of \a req.
+ *
+ * \note After this, req->rq_repmsg is still not accessible.
+ */
+int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
+{
+ struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
+ struct ptlrpc_sec_policy *policy;
+
+ LASSERT(ctx);
+ LASSERT(ctx->cc_sec);
+ LASSERT(ctx->cc_sec->ps_policy);
+
+ if (req->rq_repbuf)
+ return 0;
+
+ policy = ctx->cc_sec->ps_policy;
+ return policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize);
+}
+
+/**
+ * Used by ptlrpc client to free reply buffer of \a req. After this
+ * req->rq_repmsg is set to NULL and should not be accessed anymore.
+ */
+void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
+{
+ struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
+ struct ptlrpc_sec_policy *policy;
+
+ LASSERT(ctx);
+ LASSERT(ctx->cc_sec);
+ LASSERT(ctx->cc_sec->ps_policy);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
+
+ if (req->rq_repbuf == NULL)
+ return;
+ LASSERT(req->rq_repbuf_len);
+
+ policy = ctx->cc_sec->ps_policy;
+ policy->sp_cops->free_repbuf(ctx->cc_sec, req);
+ req->rq_repmsg = NULL;
+}
+
+int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
+ struct ptlrpc_cli_ctx *ctx)
+{
+ struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
+
+ if (!policy->sp_cops->install_rctx)
+ return 0;
+ return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
+}
+
+int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *ctx)
+{
+ struct ptlrpc_sec_policy *policy = ctx->sc_policy;
+
+ if (!policy->sp_sops->install_rctx)
+ return 0;
+ return policy->sp_sops->install_rctx(imp, ctx);
+}
+
+/****************************************
+ * server side security *
+ ****************************************/
+
+static int flavor_allowed(struct sptlrpc_flavor *exp,
+ struct ptlrpc_request *req)
+{
+ struct sptlrpc_flavor *flvr = &req->rq_flvr;
+
+ if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
+ return 1;
+
+ if ((req->rq_ctx_init || req->rq_ctx_fini) &&
+ SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
+ SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
+ SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
+ return 1;
+
+ return 0;
+}
+
+#define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
+
+/**
+ * Given an export \a exp, check whether the flavor of incoming \a req
+ * is allowed by the export \a exp. Main logic is about taking care of
+ * changing configurations. Return 0 means success.
+ */
+int sptlrpc_target_export_check(struct obd_export *exp,
+ struct ptlrpc_request *req)
+{
+ struct sptlrpc_flavor flavor;
+
+ if (exp == NULL)
+ return 0;
+
+ /* client side export has no imp_reverse, skip
+ * FIXME maybe we should check flavor this as well??? */
+ if (exp->exp_imp_reverse == NULL)
+ return 0;
+
+ /* don't care about ctx fini rpc */
+ if (req->rq_ctx_fini)
+ return 0;
+
+ spin_lock(&exp->exp_lock);
+
+ /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
+ * the first req with the new flavor, then treat it as current flavor,
+ * adapt reverse sec according to it.
+ * note the first rpc with new flavor might not be with root ctx, in
+ * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
+ if (unlikely(exp->exp_flvr_changed) &&
+ flavor_allowed(&exp->exp_flvr_old[1], req)) {
+ /* make the new flavor as "current", and old ones as
+ * about-to-expire */
+ CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
+ exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
+ flavor = exp->exp_flvr_old[1];
+ exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
+ exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
+ exp->exp_flvr_old[0] = exp->exp_flvr;
+ exp->exp_flvr_expire[0] = cfs_time_current_sec() +
+ EXP_FLVR_UPDATE_EXPIRE;
+ exp->exp_flvr = flavor;
+
+ /* flavor change finished */
+ exp->exp_flvr_changed = 0;
+ LASSERT(exp->exp_flvr_adapt == 1);
+
+ /* if it's gss, we only interested in root ctx init */
+ if (req->rq_auth_gss &&
+ !(req->rq_ctx_init &&
+ (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
+ req->rq_auth_usr_ost))) {
+ spin_unlock(&exp->exp_lock);
+ CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
+ req->rq_auth_gss, req->rq_ctx_init,
+ req->rq_auth_usr_root, req->rq_auth_usr_mdt,
+ req->rq_auth_usr_ost);
+ return 0;
+ }
+
+ exp->exp_flvr_adapt = 0;
+ spin_unlock(&exp->exp_lock);
+
+ return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
+ req->rq_svc_ctx, &flavor);
+ }
+
+ /* if it equals to the current flavor, we accept it, but need to
+ * dealing with reverse sec/ctx */
+ if (likely(flavor_allowed(&exp->exp_flvr, req))) {
+ /* most cases should return here, we only interested in
+ * gss root ctx init */
+ if (!req->rq_auth_gss || !req->rq_ctx_init ||
+ (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
+ !req->rq_auth_usr_ost)) {
+ spin_unlock(&exp->exp_lock);
+ return 0;
+ }
+
+ /* if flavor just changed, we should not proceed, just leave
+ * it and current flavor will be discovered and replaced
+ * shortly, and let _this_ rpc pass through */
+ if (exp->exp_flvr_changed) {
+ LASSERT(exp->exp_flvr_adapt);
+ spin_unlock(&exp->exp_lock);
+ return 0;
+ }
+
+ if (exp->exp_flvr_adapt) {
+ exp->exp_flvr_adapt = 0;
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
+ exp, exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[0].sf_rpc,
+ exp->exp_flvr_old[1].sf_rpc);
+ flavor = exp->exp_flvr;
+ spin_unlock(&exp->exp_lock);
+
+ return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
+ req->rq_svc_ctx,
+ &flavor);
+ } else {
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
+ "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[0].sf_rpc,
+ exp->exp_flvr_old[1].sf_rpc);
+ spin_unlock(&exp->exp_lock);
+
+ return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
+ req->rq_svc_ctx);
+ }
+ }
+
+ if (exp->exp_flvr_expire[0]) {
+ if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
+ if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
+ "middle one ("CFS_DURATION_T")\n", exp,
+ exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[0].sf_rpc,
+ exp->exp_flvr_old[1].sf_rpc,
+ exp->exp_flvr_expire[0] -
+ cfs_time_current_sec());
+ spin_unlock(&exp->exp_lock);
+ return 0;
+ }
+ } else {
+ CDEBUG(D_SEC, "mark middle expired\n");
+ exp->exp_flvr_expire[0] = 0;
+ }
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
+ exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
+ req->rq_flvr.sf_rpc);
+ }
+
+ /* now it doesn't match the current flavor, the only chance we can
+ * accept it is match the old flavors which is not expired. */
+ if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
+ if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
+ if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
+ "oldest one ("CFS_DURATION_T")\n", exp,
+ exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[0].sf_rpc,
+ exp->exp_flvr_old[1].sf_rpc,
+ exp->exp_flvr_expire[1] -
+ cfs_time_current_sec());
+ spin_unlock(&exp->exp_lock);
+ return 0;
+ }
+ } else {
+ CDEBUG(D_SEC, "mark oldest expired\n");
+ exp->exp_flvr_expire[1] = 0;
+ }
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
+ exp, exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
+ req->rq_flvr.sf_rpc);
+ } else {
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
+ exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
+ exp->exp_flvr_old[1].sf_rpc);
+ }
+
+ spin_unlock(&exp->exp_lock);
+
+ CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
+ "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
+ exp, exp->exp_obd->obd_name,
+ req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
+ req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
+ req->rq_flvr.sf_rpc,
+ exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[0].sf_rpc,
+ exp->exp_flvr_expire[0] ?
+ (unsigned long) (exp->exp_flvr_expire[0] -
+ cfs_time_current_sec()) : 0,
+ exp->exp_flvr_old[1].sf_rpc,
+ exp->exp_flvr_expire[1] ?
+ (unsigned long) (exp->exp_flvr_expire[1] -
+ cfs_time_current_sec()) : 0);
+ return -EACCES;
+}
+EXPORT_SYMBOL(sptlrpc_target_export_check);
+
+void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
+ struct sptlrpc_rule_set *rset)
+{
+ struct obd_export *exp;
+ struct sptlrpc_flavor new_flvr;
+
+ LASSERT(obd);
+
+ spin_lock(&obd->obd_dev_lock);
+
+ list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ if (exp->exp_connection == NULL)
+ continue;
+
+ /* note if this export had just been updated flavor
+ * (exp_flvr_changed == 1), this will override the
+ * previous one. */
+ spin_lock(&exp->exp_lock);
+ sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
+ exp->exp_connection->c_peer.nid,
+ &new_flvr);
+ if (exp->exp_flvr_changed ||
+ !flavor_equal(&new_flvr, &exp->exp_flvr)) {
+ exp->exp_flvr_old[1] = new_flvr;
+ exp->exp_flvr_expire[1] = 0;
+ exp->exp_flvr_changed = 1;
+ exp->exp_flvr_adapt = 1;
+
+ CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
+ exp, sptlrpc_part2name(exp->exp_sp_peer),
+ exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[1].sf_rpc);
+ }
+ spin_unlock(&exp->exp_lock);
+ }
+
+ spin_unlock(&obd->obd_dev_lock);
+}
+EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
+
+static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
+{
+ /* peer's claim is unreliable unless gss is being used */
+ if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
+ return svc_rc;
+
+ switch (req->rq_sp_from) {
+ case LUSTRE_SP_CLI:
+ if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
+ DEBUG_REQ(D_ERROR, req, "faked source CLI");
+ svc_rc = SECSVC_DROP;
+ }
+ break;
+ case LUSTRE_SP_MDT:
+ if (!req->rq_auth_usr_mdt) {
+ DEBUG_REQ(D_ERROR, req, "faked source MDT");
+ svc_rc = SECSVC_DROP;
+ }
+ break;
+ case LUSTRE_SP_OST:
+ if (!req->rq_auth_usr_ost) {
+ DEBUG_REQ(D_ERROR, req, "faked source OST");
+ svc_rc = SECSVC_DROP;
+ }
+ break;
+ case LUSTRE_SP_MGS:
+ case LUSTRE_SP_MGC:
+ if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
+ !req->rq_auth_usr_ost) {
+ DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
+ svc_rc = SECSVC_DROP;
+ }
+ break;
+ case LUSTRE_SP_ANY:
+ default:
+ DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
+ svc_rc = SECSVC_DROP;
+ }
+
+ return svc_rc;
+}
+
+/**
+ * Used by ptlrpc server, to perform transformation upon request message of
+ * incoming \a req. This must be the first thing to do with a incoming
+ * request in ptlrpc layer.
+ *
+ * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
+ * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
+ * \retval SECSVC_COMPLETE success, the request has been fully processed, and
+ * reply message has been prepared.
+ * \retval SECSVC_DROP failed, this request should be dropped.
+ */
+int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
+{
+ struct ptlrpc_sec_policy *policy;
+ struct lustre_msg *msg = req->rq_reqbuf;
+ int rc;
+
+ LASSERT(msg);
+ LASSERT(req->rq_reqmsg == NULL);
+ LASSERT(req->rq_repmsg == NULL);
+ LASSERT(req->rq_svc_ctx == NULL);
+
+ req->rq_req_swab_mask = 0;
+
+ rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
+ switch (rc) {
+ case 1:
+ lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+ case 0:
+ break;
+ default:
+ CERROR("error unpacking request from %s x"LPU64"\n",
+ libcfs_id2str(req->rq_peer), req->rq_xid);
+ return SECSVC_DROP;
+ }
+
+ req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
+ req->rq_sp_from = LUSTRE_SP_ANY;
+ req->rq_auth_uid = -1;
+ req->rq_auth_mapped_uid = -1;
+
+ policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
+ if (!policy) {
+ CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
+ return SECSVC_DROP;
+ }
+
+ LASSERT(policy->sp_sops->accept);
+ rc = policy->sp_sops->accept(req);
+ sptlrpc_policy_put(policy);
+ LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
+ LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
+
+ /*
+ * if it's not null flavor (which means embedded packing msg),
+ * reset the swab mask for the comming inner msg unpacking.
+ */
+ if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
+ req->rq_req_swab_mask = 0;
+
+ /* sanity check for the request source */
+ rc = sptlrpc_svc_check_from(req, rc);
+ return rc;
+}
+
+/**
+ * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
+ * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
+ * a buffer of \a msglen size.
+ */
+int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
+{
+ struct ptlrpc_sec_policy *policy;
+ struct ptlrpc_reply_state *rs;
+ int rc;
+
+ LASSERT(req->rq_svc_ctx);
+ LASSERT(req->rq_svc_ctx->sc_policy);
+
+ policy = req->rq_svc_ctx->sc_policy;
+ LASSERT(policy->sp_sops->alloc_rs);
+
+ rc = policy->sp_sops->alloc_rs(req, msglen);
+ if (unlikely(rc == -ENOMEM)) {
+ /* failed alloc, try emergency pool */
+ rs = lustre_get_emerg_rs(req->rq_rqbd->rqbd_svcpt);
+ if (rs == NULL)
+ return -ENOMEM;
+
+ req->rq_reply_state = rs;
+ rc = policy->sp_sops->alloc_rs(req, msglen);
+ if (rc) {
+ lustre_put_emerg_rs(rs);
+ req->rq_reply_state = NULL;
+ }
+ }
+
+ LASSERT(rc != 0 ||
+ (req->rq_reply_state && req->rq_reply_state->rs_msg));
+
+ return rc;
+}
+
+/**
+ * Used by ptlrpc server, to perform transformation upon reply message.
+ *
+ * \post req->rq_reply_off is set to approriate server-controlled reply offset.
+ * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
+ */
+int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
+{
+ struct ptlrpc_sec_policy *policy;
+ int rc;
+
+ LASSERT(req->rq_svc_ctx);
+ LASSERT(req->rq_svc_ctx->sc_policy);
+
+ policy = req->rq_svc_ctx->sc_policy;
+ LASSERT(policy->sp_sops->authorize);
+
+ rc = policy->sp_sops->authorize(req);
+ LASSERT(rc || req->rq_reply_state->rs_repdata_len);
+
+ return rc;
+}
+
+/**
+ * Used by ptlrpc server, to free reply_state.
+ */
+void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
+{
+ struct ptlrpc_sec_policy *policy;
+ unsigned int prealloc;
+
+ LASSERT(rs->rs_svc_ctx);
+ LASSERT(rs->rs_svc_ctx->sc_policy);
+
+ policy = rs->rs_svc_ctx->sc_policy;
+ LASSERT(policy->sp_sops->free_rs);
+
+ prealloc = rs->rs_prealloc;
+ policy->sp_sops->free_rs(rs);
+
+ if (prealloc)
+ lustre_put_emerg_rs(rs);
+}
+
+void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
+{
+ struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
+
+ if (ctx != NULL)
+ atomic_inc(&ctx->sc_refcount);
+}
+
+void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
+{
+ struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
+
+ if (ctx == NULL)
+ return;
+
+ LASSERT_ATOMIC_POS(&ctx->sc_refcount);
+ if (atomic_dec_and_test(&ctx->sc_refcount)) {
+ if (ctx->sc_policy->sp_sops->free_ctx)
+ ctx->sc_policy->sp_sops->free_ctx(ctx);
+ }
+ req->rq_svc_ctx = NULL;
+}
+
+void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
+{
+ struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
+
+ if (ctx == NULL)
+ return;
+
+ LASSERT_ATOMIC_POS(&ctx->sc_refcount);
+ if (ctx->sc_policy->sp_sops->invalidate_ctx)
+ ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
+}
+EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
+
+/****************************************
+ * bulk security *
+ ****************************************/
+
+/**
+ * Perform transformation upon bulk data pointed by \a desc. This is called
+ * before transforming the request message.
+ */
+int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct ptlrpc_cli_ctx *ctx;
+
+ LASSERT(req->rq_bulk_read || req->rq_bulk_write);
+
+ if (!req->rq_pack_bulk)
+ return 0;
+
+ ctx = req->rq_cli_ctx;
+ if (ctx->cc_ops->wrap_bulk)
+ return ctx->cc_ops->wrap_bulk(ctx, req, desc);
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
+
+/**
+ * This is called after unwrap the reply message.
+ * return nob of actual plain text size received, or error code.
+ */
+int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc,
+ int nob)
+{
+ struct ptlrpc_cli_ctx *ctx;
+ int rc;
+
+ LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
+
+ if (!req->rq_pack_bulk)
+ return desc->bd_nob_transferred;
+
+ ctx = req->rq_cli_ctx;
+ if (ctx->cc_ops->unwrap_bulk) {
+ rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
+ if (rc < 0)
+ return rc;
+ }
+ return desc->bd_nob_transferred;
+}
+EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
+
+/**
+ * This is called after unwrap the reply message.
+ * return 0 for success or error code.
+ */
+int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct ptlrpc_cli_ctx *ctx;
+ int rc;
+
+ LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
+
+ if (!req->rq_pack_bulk)
+ return 0;
+
+ ctx = req->rq_cli_ctx;
+ if (ctx->cc_ops->unwrap_bulk) {
+ rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
+ if (rc < 0)
+ return rc;
+ }
+
+ /*
+ * if everything is going right, nob should equals to nob_transferred.
+ * in case of privacy mode, nob_transferred needs to be adjusted.
+ */
+ if (desc->bd_nob != desc->bd_nob_transferred) {
+ CERROR("nob %d doesn't match transferred nob %d",
+ desc->bd_nob, desc->bd_nob_transferred);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
+
+
+/****************************************
+ * user descriptor helpers *
+ ****************************************/
+
+int sptlrpc_current_user_desc_size(void)
+{
+ int ngroups;
+
+ ngroups = current_ngroups;
+
+ if (ngroups > LUSTRE_MAX_GROUPS)
+ ngroups = LUSTRE_MAX_GROUPS;
+ return sptlrpc_user_desc_size(ngroups);
+}
+EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
+
+int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
+{
+ struct ptlrpc_user_desc *pud;
+
+ pud = lustre_msg_buf(msg, offset, 0);
+
+ pud->pud_uid = from_kuid(&init_user_ns, current_uid());
+ pud->pud_gid = from_kgid(&init_user_ns, current_gid());
+ pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
+ pud->pud_cap = cfs_curproc_cap_pack();
+ pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
+
+ task_lock(current);
+ if (pud->pud_ngroups > current_ngroups)
+ pud->pud_ngroups = current_ngroups;
+ memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
+ pud->pud_ngroups * sizeof(__u32));
+ task_unlock(current);
+
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_pack_user_desc);
+
+int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
+{
+ struct ptlrpc_user_desc *pud;
+ int i;
+
+ pud = lustre_msg_buf(msg, offset, sizeof(*pud));
+ if (!pud)
+ return -EINVAL;
+
+ if (swabbed) {
+ __swab32s(&pud->pud_uid);
+ __swab32s(&pud->pud_gid);
+ __swab32s(&pud->pud_fsuid);
+ __swab32s(&pud->pud_fsgid);
+ __swab32s(&pud->pud_cap);
+ __swab32s(&pud->pud_ngroups);
+ }
+
+ if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
+ CERROR("%u groups is too large\n", pud->pud_ngroups);
+ return -EINVAL;
+ }
+
+ if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
+ msg->lm_buflens[offset]) {
+ CERROR("%u groups are claimed but bufsize only %u\n",
+ pud->pud_ngroups, msg->lm_buflens[offset]);
+ return -EINVAL;
+ }
+
+ if (swabbed) {
+ for (i = 0; i < pud->pud_ngroups; i++)
+ __swab32s(&pud->pud_groups[i]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
+
+/****************************************
+ * misc helpers *
+ ****************************************/
+
+const char * sec2target_str(struct ptlrpc_sec *sec)
+{
+ if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
+ return "*";
+ if (sec_is_reverse(sec))
+ return "c";
+ return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
+}
+EXPORT_SYMBOL(sec2target_str);
+
+/*
+ * return true if the bulk data is protected
+ */
+int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
+{
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_INTG:
+ case SPTLRPC_BULK_SVC_PRIV:
+ return 1;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
+
+/****************************************
+ * crypto API helper/alloc blkciper *
+ ****************************************/
+
+/****************************************
+ * initialize/finalize *
+ ****************************************/
+
+int sptlrpc_init(void)
+{
+ int rc;
+
+ rwlock_init(&policy_lock);
+
+ rc = sptlrpc_gc_init();
+ if (rc)
+ goto out;
+
+ rc = sptlrpc_conf_init();
+ if (rc)
+ goto out_gc;
+
+ rc = sptlrpc_enc_pool_init();
+ if (rc)
+ goto out_conf;
+
+ rc = sptlrpc_null_init();
+ if (rc)
+ goto out_pool;
+
+ rc = sptlrpc_plain_init();
+ if (rc)
+ goto out_null;
+
+ rc = sptlrpc_lproc_init();
+ if (rc)
+ goto out_plain;
+
+ return 0;
+
+out_plain:
+ sptlrpc_plain_fini();
+out_null:
+ sptlrpc_null_fini();
+out_pool:
+ sptlrpc_enc_pool_fini();
+out_conf:
+ sptlrpc_conf_fini();
+out_gc:
+ sptlrpc_gc_fini();
+out:
+ return rc;
+}
+
+void sptlrpc_fini(void)
+{
+ sptlrpc_lproc_fini();
+ sptlrpc_plain_fini();
+ sptlrpc_null_fini();
+ sptlrpc_enc_pool_fini();
+ sptlrpc_conf_fini();
+ sptlrpc_gc_fini();
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
new file mode 100644
index 0000000..6547f46
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -0,0 +1,888 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/sec_bulk.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/crypto.h>
+
+#include <obd.h>
+#include <obd_cksum.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_dlm.h>
+#include <lustre_sec.h>
+
+#include "ptlrpc_internal.h"
+
+/****************************************
+ * bulk encryption page pools *
+ ****************************************/
+
+
+#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
+#define PAGES_PER_POOL (POINTERS_PER_PAGE)
+
+#define IDLE_IDX_MAX (100)
+#define IDLE_IDX_WEIGHT (3)
+
+#define CACHE_QUIESCENT_PERIOD (20)
+
+static struct ptlrpc_enc_page_pool {
+ /*
+ * constants
+ */
+ unsigned long epp_max_pages; /* maximum pages can hold, const */
+ unsigned int epp_max_pools; /* number of pools, const */
+
+ /*
+ * wait queue in case of not enough free pages.
+ */
+ wait_queue_head_t epp_waitq; /* waiting threads */
+ unsigned int epp_waitqlen; /* wait queue length */
+ unsigned long epp_pages_short; /* # of pages wanted of in-q users */
+ unsigned int epp_growing:1; /* during adding pages */
+
+ /*
+ * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
+ * this is counted based on each time when getting pages from
+ * the pools, not based on time. which means in case that system
+ * is idled for a while but the idle_idx might still be low if no
+ * activities happened in the pools.
+ */
+ unsigned long epp_idle_idx;
+
+ /* last shrink time due to mem tight */
+ long epp_last_shrink;
+ long epp_last_access;
+
+ /*
+ * in-pool pages bookkeeping
+ */
+ spinlock_t epp_lock; /* protect following fields */
+ unsigned long epp_total_pages; /* total pages in pools */
+ unsigned long epp_free_pages; /* current pages available */
+
+ /*
+ * statistics
+ */
+ unsigned long epp_st_max_pages; /* # of pages ever reached */
+ unsigned int epp_st_grows; /* # of grows */
+ unsigned int epp_st_grow_fails; /* # of add pages failures */
+ unsigned int epp_st_shrinks; /* # of shrinks */
+ unsigned long epp_st_access; /* # of access */
+ unsigned long epp_st_missings; /* # of cache missing */
+ unsigned long epp_st_lowfree; /* lowest free pages reached */
+ unsigned int epp_st_max_wqlen; /* highest waitqueue length */
+ cfs_time_t epp_st_max_wait; /* in jeffies */
+ /*
+ * pointers to pools
+ */
+ struct page ***epp_pools;
+} page_pools;
+
+/*
+ * /proc/fs/lustre/sptlrpc/encrypt_page_pools
+ */
+int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
+{
+ int rc;
+
+ spin_lock(&page_pools.epp_lock);
+
+ rc = seq_printf(m,
+ "physical pages: %lu\n"
+ "pages per pool: %lu\n"
+ "max pages: %lu\n"
+ "max pools: %u\n"
+ "total pages: %lu\n"
+ "total free: %lu\n"
+ "idle index: %lu/100\n"
+ "last shrink: %lds\n"
+ "last access: %lds\n"
+ "max pages reached: %lu\n"
+ "grows: %u\n"
+ "grows failure: %u\n"
+ "shrinks: %u\n"
+ "cache access: %lu\n"
+ "cache missing: %lu\n"
+ "low free mark: %lu\n"
+ "max waitqueue depth: %u\n"
+ "max wait time: "CFS_TIME_T"/%u\n"
+ ,
+ totalram_pages,
+ PAGES_PER_POOL,
+ page_pools.epp_max_pages,
+ page_pools.epp_max_pools,
+ page_pools.epp_total_pages,
+ page_pools.epp_free_pages,
+ page_pools.epp_idle_idx,
+ cfs_time_current_sec() - page_pools.epp_last_shrink,
+ cfs_time_current_sec() - page_pools.epp_last_access,
+ page_pools.epp_st_max_pages,
+ page_pools.epp_st_grows,
+ page_pools.epp_st_grow_fails,
+ page_pools.epp_st_shrinks,
+ page_pools.epp_st_access,
+ page_pools.epp_st_missings,
+ page_pools.epp_st_lowfree,
+ page_pools.epp_st_max_wqlen,
+ page_pools.epp_st_max_wait, HZ
+ );
+
+ spin_unlock(&page_pools.epp_lock);
+ return rc;
+}
+
+static void enc_pools_release_free_pages(long npages)
+{
+ int p_idx, g_idx;
+ int p_idx_max1, p_idx_max2;
+
+ LASSERT(npages > 0);
+ LASSERT(npages <= page_pools.epp_free_pages);
+ LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages);
+
+ /* max pool index before the release */
+ p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL;
+
+ page_pools.epp_free_pages -= npages;
+ page_pools.epp_total_pages -= npages;
+
+ /* max pool index after the release */
+ p_idx_max1 = page_pools.epp_total_pages == 0 ? -1 :
+ ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
+
+ p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
+ g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
+ LASSERT(page_pools.epp_pools[p_idx]);
+
+ while (npages--) {
+ LASSERT(page_pools.epp_pools[p_idx]);
+ LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
+
+ __free_page(page_pools.epp_pools[p_idx][g_idx]);
+ page_pools.epp_pools[p_idx][g_idx] = NULL;
+
+ if (++g_idx == PAGES_PER_POOL) {
+ p_idx++;
+ g_idx = 0;
+ }
+ };
+
+ /* free unused pools */
+ while (p_idx_max1 < p_idx_max2) {
+ LASSERT(page_pools.epp_pools[p_idx_max2]);
+ OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_CACHE_SIZE);
+ page_pools.epp_pools[p_idx_max2] = NULL;
+ p_idx_max2--;
+ }
+}
+
+/*
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static unsigned long enc_pools_shrink_count(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ /*
+ * if no pool access for a long time, we consider it's fully idle.
+ * a little race here is fine.
+ */
+ if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+ CACHE_QUIESCENT_PERIOD)) {
+ spin_lock(&page_pools.epp_lock);
+ page_pools.epp_idle_idx = IDLE_IDX_MAX;
+ spin_unlock(&page_pools.epp_lock);
+ }
+
+ LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+ return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
+ (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+}
+
+/*
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static unsigned long enc_pools_shrink_scan(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ spin_lock(&page_pools.epp_lock);
+ sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
+ page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
+ if (sc->nr_to_scan > 0) {
+ enc_pools_release_free_pages(sc->nr_to_scan);
+ CDEBUG(D_SEC, "released %ld pages, %ld left\n",
+ (long)sc->nr_to_scan, page_pools.epp_free_pages);
+
+ page_pools.epp_st_shrinks++;
+ page_pools.epp_last_shrink = cfs_time_current_sec();
+ }
+ spin_unlock(&page_pools.epp_lock);
+
+ /*
+ * if no pool access for a long time, we consider it's fully idle.
+ * a little race here is fine.
+ */
+ if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+ CACHE_QUIESCENT_PERIOD)) {
+ spin_lock(&page_pools.epp_lock);
+ page_pools.epp_idle_idx = IDLE_IDX_MAX;
+ spin_unlock(&page_pools.epp_lock);
+ }
+
+ LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+ return sc->nr_to_scan;
+}
+
+static inline
+int npages_to_npools(unsigned long npages)
+{
+ return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
+}
+
+/*
+ * return how many pages cleaned up.
+ */
+static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
+{
+ unsigned long cleaned = 0;
+ int i, j;
+
+ for (i = 0; i < npools; i++) {
+ if (pools[i]) {
+ for (j = 0; j < PAGES_PER_POOL; j++) {
+ if (pools[i][j]) {
+ __free_page(pools[i][j]);
+ cleaned++;
+ }
+ }
+ OBD_FREE(pools[i], PAGE_CACHE_SIZE);
+ pools[i] = NULL;
+ }
+ }
+
+ return cleaned;
+}
+
+/*
+ * merge @npools pointed by @pools which contains @npages new pages
+ * into current pools.
+ *
+ * we have options to avoid most memory copy with some tricks. but we choose
+ * the simplest way to avoid complexity. It's not frequently called.
+ */
+static void enc_pools_insert(struct page ***pools, int npools, int npages)
+{
+ int freeslot;
+ int op_idx, np_idx, og_idx, ng_idx;
+ int cur_npools, end_npools;
+
+ LASSERT(npages > 0);
+ LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
+ LASSERT(npages_to_npools(npages) == npools);
+ LASSERT(page_pools.epp_growing);
+
+ spin_lock(&page_pools.epp_lock);
+
+ /*
+ * (1) fill all the free slots of current pools.
+ */
+ /* free slots are those left by rent pages, and the extra ones with
+ * index >= total_pages, locate at the tail of last pool. */
+ freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
+ if (freeslot != 0)
+ freeslot = PAGES_PER_POOL - freeslot;
+ freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
+
+ op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
+ og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
+ np_idx = npools - 1;
+ ng_idx = (npages - 1) % PAGES_PER_POOL;
+
+ while (freeslot) {
+ LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
+ LASSERT(pools[np_idx][ng_idx] != NULL);
+
+ page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
+ pools[np_idx][ng_idx] = NULL;
+
+ freeslot--;
+
+ if (++og_idx == PAGES_PER_POOL) {
+ op_idx++;
+ og_idx = 0;
+ }
+ if (--ng_idx < 0) {
+ if (np_idx == 0)
+ break;
+ np_idx--;
+ ng_idx = PAGES_PER_POOL - 1;
+ }
+ }
+
+ /*
+ * (2) add pools if needed.
+ */
+ cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
+ PAGES_PER_POOL;
+ end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
+ PAGES_PER_POOL;
+ LASSERT(end_npools <= page_pools.epp_max_pools);
+
+ np_idx = 0;
+ while (cur_npools < end_npools) {
+ LASSERT(page_pools.epp_pools[cur_npools] == NULL);
+ LASSERT(np_idx < npools);
+ LASSERT(pools[np_idx] != NULL);
+
+ page_pools.epp_pools[cur_npools++] = pools[np_idx];
+ pools[np_idx++] = NULL;
+ }
+
+ page_pools.epp_total_pages += npages;
+ page_pools.epp_free_pages += npages;
+ page_pools.epp_st_lowfree = page_pools.epp_free_pages;
+
+ if (page_pools.epp_total_pages > page_pools.epp_st_max_pages)
+ page_pools.epp_st_max_pages = page_pools.epp_total_pages;
+
+ CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
+ page_pools.epp_total_pages);
+
+ spin_unlock(&page_pools.epp_lock);
+}
+
+static int enc_pools_add_pages(int npages)
+{
+ static DEFINE_MUTEX(add_pages_mutex);
+ struct page ***pools;
+ int npools, alloced = 0;
+ int i, j, rc = -ENOMEM;
+
+ if (npages < PTLRPC_MAX_BRW_PAGES)
+ npages = PTLRPC_MAX_BRW_PAGES;
+
+ mutex_lock(&add_pages_mutex);
+
+ if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
+ npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
+ LASSERT(npages > 0);
+
+ page_pools.epp_st_grows++;
+
+ npools = npages_to_npools(npages);
+ OBD_ALLOC(pools, npools * sizeof(*pools));
+ if (pools == NULL)
+ goto out;
+
+ for (i = 0; i < npools; i++) {
+ OBD_ALLOC(pools[i], PAGE_CACHE_SIZE);
+ if (pools[i] == NULL)
+ goto out_pools;
+
+ for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
+ pools[i][j] = alloc_page(__GFP_IO |
+ __GFP_HIGHMEM);
+ if (pools[i][j] == NULL)
+ goto out_pools;
+
+ alloced++;
+ }
+ }
+ LASSERT(alloced == npages);
+
+ enc_pools_insert(pools, npools, npages);
+ CDEBUG(D_SEC, "added %d pages into pools\n", npages);
+ rc = 0;
+
+out_pools:
+ enc_pools_cleanup(pools, npools);
+ OBD_FREE(pools, npools * sizeof(*pools));
+out:
+ if (rc) {
+ page_pools.epp_st_grow_fails++;
+ CERROR("Failed to allocate %d enc pages\n", npages);
+ }
+
+ mutex_unlock(&add_pages_mutex);
+ return rc;
+}
+
+static inline void enc_pools_wakeup(void)
+{
+ LASSERT(spin_is_locked(&page_pools.epp_lock));
+ LASSERT(page_pools.epp_waitqlen >= 0);
+
+ if (unlikely(page_pools.epp_waitqlen)) {
+ LASSERT(waitqueue_active(&page_pools.epp_waitq));
+ wake_up_all(&page_pools.epp_waitq);
+ }
+}
+
+static int enc_pools_should_grow(int page_needed, long now)
+{
+ /* don't grow if someone else is growing the pools right now,
+ * or the pools has reached its full capacity
+ */
+ if (page_pools.epp_growing ||
+ page_pools.epp_total_pages == page_pools.epp_max_pages)
+ return 0;
+
+ /* if total pages is not enough, we need to grow */
+ if (page_pools.epp_total_pages < page_needed)
+ return 1;
+
+ /*
+ * we wanted to return 0 here if there was a shrink just happened
+ * moment ago, but this may cause deadlock if both client and ost
+ * live on single node.
+ */
+#if 0
+ if (now - page_pools.epp_last_shrink < 2)
+ return 0;
+#endif
+
+ /*
+ * here we perhaps need consider other factors like wait queue
+ * length, idle index, etc. ?
+ */
+
+ /* grow the pools in any other cases */
+ return 1;
+}
+
+/*
+ * we allocate the requested pages atomically.
+ */
+int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
+{
+ wait_queue_t waitlink;
+ unsigned long this_idle = -1;
+ cfs_time_t tick = 0;
+ long now;
+ int p_idx, g_idx;
+ int i;
+
+ LASSERT(desc->bd_iov_count > 0);
+ LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
+
+ /* resent bulk, enc iov might have been allocated previously */
+ if (desc->bd_enc_iov != NULL)
+ return 0;
+
+ OBD_ALLOC(desc->bd_enc_iov,
+ desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
+ if (desc->bd_enc_iov == NULL)
+ return -ENOMEM;
+
+ spin_lock(&page_pools.epp_lock);
+
+ page_pools.epp_st_access++;
+again:
+ if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
+ if (tick == 0)
+ tick = cfs_time_current();
+
+ now = cfs_time_current_sec();
+
+ page_pools.epp_st_missings++;
+ page_pools.epp_pages_short += desc->bd_iov_count;
+
+ if (enc_pools_should_grow(desc->bd_iov_count, now)) {
+ page_pools.epp_growing = 1;
+
+ spin_unlock(&page_pools.epp_lock);
+ enc_pools_add_pages(page_pools.epp_pages_short / 2);
+ spin_lock(&page_pools.epp_lock);
+
+ page_pools.epp_growing = 0;
+
+ enc_pools_wakeup();
+ } else {
+ if (++page_pools.epp_waitqlen >
+ page_pools.epp_st_max_wqlen)
+ page_pools.epp_st_max_wqlen =
+ page_pools.epp_waitqlen;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ init_waitqueue_entry_current(&waitlink);
+ add_wait_queue(&page_pools.epp_waitq, &waitlink);
+
+ spin_unlock(&page_pools.epp_lock);
+ waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE);
+ remove_wait_queue(&page_pools.epp_waitq, &waitlink);
+ LASSERT(page_pools.epp_waitqlen > 0);
+ spin_lock(&page_pools.epp_lock);
+ page_pools.epp_waitqlen--;
+ }
+
+ LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
+ page_pools.epp_pages_short -= desc->bd_iov_count;
+
+ this_idle = 0;
+ goto again;
+ }
+
+ /* record max wait time */
+ if (unlikely(tick != 0)) {
+ tick = cfs_time_current() - tick;
+ if (tick > page_pools.epp_st_max_wait)
+ page_pools.epp_st_max_wait = tick;
+ }
+
+ /* proceed with rest of allocation */
+ page_pools.epp_free_pages -= desc->bd_iov_count;
+
+ p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
+ g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
+ desc->bd_enc_iov[i].kiov_page =
+ page_pools.epp_pools[p_idx][g_idx];
+ page_pools.epp_pools[p_idx][g_idx] = NULL;
+
+ if (++g_idx == PAGES_PER_POOL) {
+ p_idx++;
+ g_idx = 0;
+ }
+ }
+
+ if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
+ page_pools.epp_st_lowfree = page_pools.epp_free_pages;
+
+ /*
+ * new idle index = (old * weight + new) / (weight + 1)
+ */
+ if (this_idle == -1) {
+ this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
+ page_pools.epp_total_pages;
+ }
+ page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
+ this_idle) /
+ (IDLE_IDX_WEIGHT + 1);
+
+ page_pools.epp_last_access = cfs_time_current_sec();
+
+ spin_unlock(&page_pools.epp_lock);
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
+
+void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
+{
+ int p_idx, g_idx;
+ int i;
+
+ if (desc->bd_enc_iov == NULL)
+ return;
+
+ LASSERT(desc->bd_iov_count > 0);
+
+ spin_lock(&page_pools.epp_lock);
+
+ p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
+ g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
+
+ LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
+ page_pools.epp_total_pages);
+ LASSERT(page_pools.epp_pools[p_idx]);
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
+ LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
+ LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
+
+ page_pools.epp_pools[p_idx][g_idx] =
+ desc->bd_enc_iov[i].kiov_page;
+
+ if (++g_idx == PAGES_PER_POOL) {
+ p_idx++;
+ g_idx = 0;
+ }
+ }
+
+ page_pools.epp_free_pages += desc->bd_iov_count;
+
+ enc_pools_wakeup();
+
+ spin_unlock(&page_pools.epp_lock);
+
+ OBD_FREE(desc->bd_enc_iov,
+ desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
+ desc->bd_enc_iov = NULL;
+}
+EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
+
+/*
+ * we don't do much stuff for add_user/del_user anymore, except adding some
+ * initial pages in add_user() if current pools are empty, rest would be
+ * handled by the pools's self-adaption.
+ */
+int sptlrpc_enc_pool_add_user(void)
+{
+ int need_grow = 0;
+
+ spin_lock(&page_pools.epp_lock);
+ if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
+ page_pools.epp_growing = 1;
+ need_grow = 1;
+ }
+ spin_unlock(&page_pools.epp_lock);
+
+ if (need_grow) {
+ enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
+ PTLRPC_MAX_BRW_PAGES);
+
+ spin_lock(&page_pools.epp_lock);
+ page_pools.epp_growing = 0;
+ enc_pools_wakeup();
+ spin_unlock(&page_pools.epp_lock);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
+
+int sptlrpc_enc_pool_del_user(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
+
+static inline void enc_pools_alloc(void)
+{
+ LASSERT(page_pools.epp_max_pools);
+ OBD_ALLOC_LARGE(page_pools.epp_pools,
+ page_pools.epp_max_pools *
+ sizeof(*page_pools.epp_pools));
+}
+
+static inline void enc_pools_free(void)
+{
+ LASSERT(page_pools.epp_max_pools);
+ LASSERT(page_pools.epp_pools);
+
+ OBD_FREE_LARGE(page_pools.epp_pools,
+ page_pools.epp_max_pools *
+ sizeof(*page_pools.epp_pools));
+}
+
+static struct shrinker pools_shrinker = {
+ .count_objects = enc_pools_shrink_count,
+ .scan_objects = enc_pools_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+int sptlrpc_enc_pool_init(void)
+{
+ /*
+ * maximum capacity is 1/8 of total physical memory.
+ * is the 1/8 a good number?
+ */
+ page_pools.epp_max_pages = totalram_pages / 8;
+ page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
+
+ init_waitqueue_head(&page_pools.epp_waitq);
+ page_pools.epp_waitqlen = 0;
+ page_pools.epp_pages_short = 0;
+
+ page_pools.epp_growing = 0;
+
+ page_pools.epp_idle_idx = 0;
+ page_pools.epp_last_shrink = cfs_time_current_sec();
+ page_pools.epp_last_access = cfs_time_current_sec();
+
+ spin_lock_init(&page_pools.epp_lock);
+ page_pools.epp_total_pages = 0;
+ page_pools.epp_free_pages = 0;
+
+ page_pools.epp_st_max_pages = 0;
+ page_pools.epp_st_grows = 0;
+ page_pools.epp_st_grow_fails = 0;
+ page_pools.epp_st_shrinks = 0;
+ page_pools.epp_st_access = 0;
+ page_pools.epp_st_missings = 0;
+ page_pools.epp_st_lowfree = 0;
+ page_pools.epp_st_max_wqlen = 0;
+ page_pools.epp_st_max_wait = 0;
+
+ enc_pools_alloc();
+ if (page_pools.epp_pools == NULL)
+ return -ENOMEM;
+
+ register_shrinker(&pools_shrinker);
+
+ return 0;
+}
+
+void sptlrpc_enc_pool_fini(void)
+{
+ unsigned long cleaned, npools;
+
+ LASSERT(page_pools.epp_pools);
+ LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
+
+ unregister_shrinker(&pools_shrinker);
+
+ npools = npages_to_npools(page_pools.epp_total_pages);
+ cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
+ LASSERT(cleaned == page_pools.epp_total_pages);
+
+ enc_pools_free();
+
+ if (page_pools.epp_st_access > 0) {
+ CDEBUG(D_SEC,
+ "max pages %lu, grows %u, grow fails %u, shrinks %u, "
+ "access %lu, missing %lu, max qlen %u, max wait "
+ CFS_TIME_T"/%d\n",
+ page_pools.epp_st_max_pages, page_pools.epp_st_grows,
+ page_pools.epp_st_grow_fails,
+ page_pools.epp_st_shrinks, page_pools.epp_st_access,
+ page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
+ page_pools.epp_st_max_wait, HZ);
+ }
+}
+
+
+static int cfs_hash_alg_id[] = {
+ [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL,
+ [BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32,
+ [BULK_HASH_ALG_CRC32] = CFS_HASH_ALG_CRC32,
+ [BULK_HASH_ALG_MD5] = CFS_HASH_ALG_MD5,
+ [BULK_HASH_ALG_SHA1] = CFS_HASH_ALG_SHA1,
+ [BULK_HASH_ALG_SHA256] = CFS_HASH_ALG_SHA256,
+ [BULK_HASH_ALG_SHA384] = CFS_HASH_ALG_SHA384,
+ [BULK_HASH_ALG_SHA512] = CFS_HASH_ALG_SHA512,
+};
+const char * sptlrpc_get_hash_name(__u8 hash_alg)
+{
+ return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
+}
+EXPORT_SYMBOL(sptlrpc_get_hash_name);
+
+__u8 sptlrpc_get_hash_alg(const char *algname)
+{
+ return cfs_crypto_hash_alg(algname);
+}
+EXPORT_SYMBOL(sptlrpc_get_hash_alg);
+
+int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
+{
+ struct ptlrpc_bulk_sec_desc *bsd;
+ int size = msg->lm_buflens[offset];
+
+ bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
+ if (bsd == NULL) {
+ CERROR("Invalid bulk sec desc: size %d\n", size);
+ return -EINVAL;
+ }
+
+ if (swabbed) {
+ __swab32s(&bsd->bsd_nob);
+ }
+
+ if (unlikely(bsd->bsd_version != 0)) {
+ CERROR("Unexpected version %u\n", bsd->bsd_version);
+ return -EPROTO;
+ }
+
+ if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) {
+ CERROR("Invalid type %u\n", bsd->bsd_type);
+ return -EPROTO;
+ }
+
+ /* FIXME more sanity check here */
+
+ if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
+ bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG &&
+ bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) {
+ CERROR("Invalid svc %u\n", bsd->bsd_svc);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bulk_sec_desc_unpack);
+
+int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
+ void *buf, int buflen)
+{
+ struct cfs_crypto_hash_desc *hdesc;
+ int hashsize;
+ char hashbuf[64];
+ unsigned int bufsize;
+ int i, err;
+
+ LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
+ LASSERT(buflen >= 4);
+
+ hdesc = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0);
+ if (IS_ERR(hdesc)) {
+ CERROR("Unable to initialize checksum hash %s\n",
+ cfs_crypto_hash_name(cfs_hash_alg_id[alg]));
+ return PTR_ERR(hdesc);
+ }
+
+ hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
+ desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
+ desc->bd_iov[i].kiov_len);
+ }
+ if (hashsize > buflen) {
+ bufsize = sizeof(hashbuf);
+ err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf,
+ &bufsize);
+ memcpy(buf, hashbuf, buflen);
+ } else {
+ bufsize = buflen;
+ err = cfs_crypto_hash_final(hdesc, (unsigned char *)buf,
+ &bufsize);
+ }
+
+ if (err)
+ cfs_crypto_hash_final(hdesc, NULL, NULL);
+ return err;
+}
+EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
new file mode 100644
index 0000000..6cc3f23
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -0,0 +1,1226 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/crypto.h>
+#include <linux/key.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_log.h>
+#include <lustre_disk.h>
+#include <lustre_dlm.h>
+#include <lustre_param.h>
+#include <lustre_sec.h>
+
+#include "ptlrpc_internal.h"
+
+const char *sptlrpc_part2name(enum lustre_sec_part part)
+{
+ switch (part) {
+ case LUSTRE_SP_CLI:
+ return "cli";
+ case LUSTRE_SP_MDT:
+ return "mdt";
+ case LUSTRE_SP_OST:
+ return "ost";
+ case LUSTRE_SP_MGC:
+ return "mgc";
+ case LUSTRE_SP_MGS:
+ return "mgs";
+ case LUSTRE_SP_ANY:
+ return "any";
+ default:
+ return "err";
+ }
+}
+EXPORT_SYMBOL(sptlrpc_part2name);
+
+enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd)
+{
+ const char *type = obd->obd_type->typ_name;
+
+ if (!strcmp(type, LUSTRE_MDT_NAME))
+ return LUSTRE_SP_MDT;
+ if (!strcmp(type, LUSTRE_OST_NAME))
+ return LUSTRE_SP_OST;
+ if (!strcmp(type, LUSTRE_MGS_NAME))
+ return LUSTRE_SP_MGS;
+
+ CERROR("unknown target %p(%s)\n", obd, type);
+ return LUSTRE_SP_ANY;
+}
+EXPORT_SYMBOL(sptlrpc_target_sec_part);
+
+/****************************************
+ * user supplied flavor string parsing *
+ ****************************************/
+
+/*
+ * format: <base_flavor>[-<bulk_type:alg_spec>]
+ */
+int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr)
+{
+ char buf[32];
+ char *bulk, *alg;
+
+ memset(flvr, 0, sizeof(*flvr));
+
+ if (str == NULL || str[0] == '\0') {
+ flvr->sf_rpc = SPTLRPC_FLVR_INVALID;
+ return 0;
+ }
+
+ strncpy(buf, str, sizeof(buf));
+ buf[sizeof(buf) - 1] = '\0';
+
+ bulk = strchr(buf, '-');
+ if (bulk)
+ *bulk++ = '\0';
+
+ flvr->sf_rpc = sptlrpc_name2flavor_base(buf);
+ if (flvr->sf_rpc == SPTLRPC_FLVR_INVALID)
+ goto err_out;
+
+ /*
+ * currently only base flavor "plain" can have bulk specification.
+ */
+ if (flvr->sf_rpc == SPTLRPC_FLVR_PLAIN) {
+ flvr->u_bulk.hash.hash_alg = BULK_HASH_ALG_ADLER32;
+ if (bulk) {
+ /*
+ * format: plain-hash:<hash_alg>
+ */
+ alg = strchr(bulk, ':');
+ if (alg == NULL)
+ goto err_out;
+ *alg++ = '\0';
+
+ if (strcmp(bulk, "hash"))
+ goto err_out;
+
+ flvr->u_bulk.hash.hash_alg = sptlrpc_get_hash_alg(alg);
+ if (flvr->u_bulk.hash.hash_alg >= BULK_HASH_ALG_MAX)
+ goto err_out;
+ }
+
+ if (flvr->u_bulk.hash.hash_alg == BULK_HASH_ALG_NULL)
+ flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_NULL);
+ else
+ flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_INTG);
+ } else {
+ if (bulk)
+ goto err_out;
+ }
+
+ flvr->sf_flags = 0;
+ return 0;
+
+err_out:
+ CERROR("invalid flavor string: %s\n", str);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(sptlrpc_parse_flavor);
+
+/****************************************
+ * configure rules *
+ ****************************************/
+
+static void get_default_flavor(struct sptlrpc_flavor *sf)
+{
+ memset(sf, 0, sizeof(*sf));
+
+ sf->sf_rpc = SPTLRPC_FLVR_NULL;
+ sf->sf_flags = 0;
+}
+
+static void sptlrpc_rule_init(struct sptlrpc_rule *rule)
+{
+ rule->sr_netid = LNET_NIDNET(LNET_NID_ANY);
+ rule->sr_from = LUSTRE_SP_ANY;
+ rule->sr_to = LUSTRE_SP_ANY;
+ rule->sr_padding = 0;
+
+ get_default_flavor(&rule->sr_flvr);
+}
+
+/*
+ * format: network[.direction]=flavor
+ */
+int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule)
+{
+ char *flavor, *dir;
+ int rc;
+
+ sptlrpc_rule_init(rule);
+
+ flavor = strchr(param, '=');
+ if (flavor == NULL) {
+ CERROR("invalid param, no '='\n");
+ return -EINVAL;
+ }
+ *flavor++ = '\0';
+
+ dir = strchr(param, '.');
+ if (dir)
+ *dir++ = '\0';
+
+ /* 1.1 network */
+ if (strcmp(param, "default")) {
+ rule->sr_netid = libcfs_str2net(param);
+ if (rule->sr_netid == LNET_NIDNET(LNET_NID_ANY)) {
+ CERROR("invalid network name: %s\n", param);
+ return -EINVAL;
+ }
+ }
+
+ /* 1.2 direction */
+ if (dir) {
+ if (!strcmp(dir, "mdt2ost")) {
+ rule->sr_from = LUSTRE_SP_MDT;
+ rule->sr_to = LUSTRE_SP_OST;
+ } else if (!strcmp(dir, "mdt2mdt")) {
+ rule->sr_from = LUSTRE_SP_MDT;
+ rule->sr_to = LUSTRE_SP_MDT;
+ } else if (!strcmp(dir, "cli2ost")) {
+ rule->sr_from = LUSTRE_SP_CLI;
+ rule->sr_to = LUSTRE_SP_OST;
+ } else if (!strcmp(dir, "cli2mdt")) {
+ rule->sr_from = LUSTRE_SP_CLI;
+ rule->sr_to = LUSTRE_SP_MDT;
+ } else {
+ CERROR("invalid rule dir segment: %s\n", dir);
+ return -EINVAL;
+ }
+ }
+
+ /* 2.1 flavor */
+ rc = sptlrpc_parse_flavor(flavor, &rule->sr_flvr);
+ if (rc)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_parse_rule);
+
+void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset)
+{
+ LASSERT(rset->srs_nslot ||
+ (rset->srs_nrule == 0 && rset->srs_rules == NULL));
+
+ if (rset->srs_nslot) {
+ OBD_FREE(rset->srs_rules,
+ rset->srs_nslot * sizeof(*rset->srs_rules));
+ sptlrpc_rule_set_init(rset);
+ }
+}
+EXPORT_SYMBOL(sptlrpc_rule_set_free);
+
+/*
+ * return 0 if the rule set could accomodate one more rule.
+ */
+int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset)
+{
+ struct sptlrpc_rule *rules;
+ int nslot;
+
+ might_sleep();
+
+ if (rset->srs_nrule < rset->srs_nslot)
+ return 0;
+
+ nslot = rset->srs_nslot + 8;
+
+ /* better use realloc() if available */
+ OBD_ALLOC(rules, nslot * sizeof(*rset->srs_rules));
+ if (rules == NULL)
+ return -ENOMEM;
+
+ if (rset->srs_nrule) {
+ LASSERT(rset->srs_nslot && rset->srs_rules);
+ memcpy(rules, rset->srs_rules,
+ rset->srs_nrule * sizeof(*rset->srs_rules));
+
+ OBD_FREE(rset->srs_rules,
+ rset->srs_nslot * sizeof(*rset->srs_rules));
+ }
+
+ rset->srs_rules = rules;
+ rset->srs_nslot = nslot;
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_rule_set_expand);
+
+static inline int rule_spec_dir(struct sptlrpc_rule *rule)
+{
+ return (rule->sr_from != LUSTRE_SP_ANY ||
+ rule->sr_to != LUSTRE_SP_ANY);
+}
+static inline int rule_spec_net(struct sptlrpc_rule *rule)
+{
+ return (rule->sr_netid != LNET_NIDNET(LNET_NID_ANY));
+}
+static inline int rule_match_dir(struct sptlrpc_rule *r1,
+ struct sptlrpc_rule *r2)
+{
+ return (r1->sr_from == r2->sr_from && r1->sr_to == r2->sr_to);
+}
+static inline int rule_match_net(struct sptlrpc_rule *r1,
+ struct sptlrpc_rule *r2)
+{
+ return (r1->sr_netid == r2->sr_netid);
+}
+
+/*
+ * merge @rule into @rset.
+ * the @rset slots might be expanded.
+ */
+int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *rset,
+ struct sptlrpc_rule *rule)
+{
+ struct sptlrpc_rule *p = rset->srs_rules;
+ int spec_dir, spec_net;
+ int rc, n, match = 0;
+
+ might_sleep();
+
+ spec_net = rule_spec_net(rule);
+ spec_dir = rule_spec_dir(rule);
+
+ for (n = 0; n < rset->srs_nrule; n++) {
+ p = &rset->srs_rules[n];
+
+ /* test network match, if failed:
+ * - spec rule: skip rules which is also spec rule match, until
+ * we hit a wild rule, which means no more chance
+ * - wild rule: skip until reach the one which is also wild
+ * and matches
+ */
+ if (!rule_match_net(p, rule)) {
+ if (spec_net) {
+ if (rule_spec_net(p))
+ continue;
+ else
+ break;
+ } else {
+ continue;
+ }
+ }
+
+ /* test dir match, same logic as net matching */
+ if (!rule_match_dir(p, rule)) {
+ if (spec_dir) {
+ if (rule_spec_dir(p))
+ continue;
+ else
+ break;
+ } else {
+ continue;
+ }
+ }
+
+ /* find a match */
+ match = 1;
+ break;
+ }
+
+ if (match) {
+ LASSERT(n >= 0 && n < rset->srs_nrule);
+
+ if (rule->sr_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
+ /* remove this rule */
+ if (n < rset->srs_nrule - 1)
+ memmove(&rset->srs_rules[n],
+ &rset->srs_rules[n + 1],
+ (rset->srs_nrule - n - 1) *
+ sizeof(*rule));
+ rset->srs_nrule--;
+ } else {
+ /* override the rule */
+ memcpy(&rset->srs_rules[n], rule, sizeof(*rule));
+ }
+ } else {
+ LASSERT(n >= 0 && n <= rset->srs_nrule);
+
+ if (rule->sr_flvr.sf_rpc != SPTLRPC_FLVR_INVALID) {
+ rc = sptlrpc_rule_set_expand(rset);
+ if (rc)
+ return rc;
+
+ if (n < rset->srs_nrule)
+ memmove(&rset->srs_rules[n + 1],
+ &rset->srs_rules[n],
+ (rset->srs_nrule - n) * sizeof(*rule));
+ memcpy(&rset->srs_rules[n], rule, sizeof(*rule));
+ rset->srs_nrule++;
+ } else {
+ CDEBUG(D_CONFIG, "ignore the unmatched deletion\n");
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_rule_set_merge);
+
+/**
+ * given from/to/nid, determine a matching flavor in ruleset.
+ * return 1 if a match found, otherwise return 0.
+ */
+int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
+ enum lustre_sec_part from,
+ enum lustre_sec_part to,
+ lnet_nid_t nid,
+ struct sptlrpc_flavor *sf)
+{
+ struct sptlrpc_rule *r;
+ int n;
+
+ for (n = 0; n < rset->srs_nrule; n++) {
+ r = &rset->srs_rules[n];
+
+ if (LNET_NIDNET(nid) != LNET_NIDNET(LNET_NID_ANY) &&
+ r->sr_netid != LNET_NIDNET(LNET_NID_ANY) &&
+ LNET_NIDNET(nid) != r->sr_netid)
+ continue;
+
+ if (from != LUSTRE_SP_ANY && r->sr_from != LUSTRE_SP_ANY &&
+ from != r->sr_from)
+ continue;
+
+ if (to != LUSTRE_SP_ANY && r->sr_to != LUSTRE_SP_ANY &&
+ to != r->sr_to)
+ continue;
+
+ *sf = r->sr_flvr;
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_rule_set_choose);
+
+void sptlrpc_rule_set_dump(struct sptlrpc_rule_set *rset)
+{
+ struct sptlrpc_rule *r;
+ int n;
+
+ for (n = 0; n < rset->srs_nrule; n++) {
+ r = &rset->srs_rules[n];
+ CDEBUG(D_SEC, "<%02d> from %x to %x, net %x, rpc %x\n", n,
+ r->sr_from, r->sr_to, r->sr_netid, r->sr_flvr.sf_rpc);
+ }
+}
+EXPORT_SYMBOL(sptlrpc_rule_set_dump);
+
+static int sptlrpc_rule_set_extract(struct sptlrpc_rule_set *gen,
+ struct sptlrpc_rule_set *tgt,
+ enum lustre_sec_part from,
+ enum lustre_sec_part to,
+ struct sptlrpc_rule_set *rset)
+{
+ struct sptlrpc_rule_set *src[2] = { gen, tgt };
+ struct sptlrpc_rule *rule;
+ int i, n, rc;
+
+ might_sleep();
+
+ /* merge general rules firstly, then target-specific rules */
+ for (i = 0; i < 2; i++) {
+ if (src[i] == NULL)
+ continue;
+
+ for (n = 0; n < src[i]->srs_nrule; n++) {
+ rule = &src[i]->srs_rules[n];
+
+ if (from != LUSTRE_SP_ANY &&
+ rule->sr_from != LUSTRE_SP_ANY &&
+ rule->sr_from != from)
+ continue;
+ if (to != LUSTRE_SP_ANY &&
+ rule->sr_to != LUSTRE_SP_ANY &&
+ rule->sr_to != to)
+ continue;
+
+ rc = sptlrpc_rule_set_merge(rset, rule);
+ if (rc) {
+ CERROR("can't merge: %d\n", rc);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**********************************
+ * sptlrpc configuration support *
+ **********************************/
+
+struct sptlrpc_conf_tgt {
+ struct list_head sct_list;
+ char sct_name[MAX_OBD_NAME];
+ struct sptlrpc_rule_set sct_rset;
+};
+
+struct sptlrpc_conf {
+ struct list_head sc_list;
+ char sc_fsname[MTI_NAME_MAXLEN];
+ unsigned int sc_modified; /* modified during updating */
+ unsigned int sc_updated:1, /* updated copy from MGS */
+ sc_local:1; /* local copy from target */
+ struct sptlrpc_rule_set sc_rset; /* fs general rules */
+ struct list_head sc_tgts; /* target-specific rules */
+};
+
+static struct mutex sptlrpc_conf_lock;
+static LIST_HEAD(sptlrpc_confs);
+
+static inline int is_hex(char c)
+{
+ return ((c >= '0' && c <= '9') ||
+ (c >= 'a' && c <= 'f'));
+}
+
+static void target2fsname(const char *tgt, char *fsname, int buflen)
+{
+ const char *ptr;
+ int len;
+
+ ptr = strrchr(tgt, '-');
+ if (ptr) {
+ if ((strncmp(ptr, "-MDT", 4) != 0 &&
+ strncmp(ptr, "-OST", 4) != 0) ||
+ !is_hex(ptr[4]) || !is_hex(ptr[5]) ||
+ !is_hex(ptr[6]) || !is_hex(ptr[7]))
+ ptr = NULL;
+ }
+
+ /* if we didn't find the pattern, treat the whole string as fsname */
+ if (ptr == NULL)
+ len = strlen(tgt);
+ else
+ len = ptr - tgt;
+
+ len = min(len, buflen - 1);
+ memcpy(fsname, tgt, len);
+ fsname[len] = '\0';
+}
+
+static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf)
+{
+ struct sptlrpc_conf_tgt *conf_tgt, *conf_tgt_next;
+
+ sptlrpc_rule_set_free(&conf->sc_rset);
+
+ list_for_each_entry_safe(conf_tgt, conf_tgt_next,
+ &conf->sc_tgts, sct_list) {
+ sptlrpc_rule_set_free(&conf_tgt->sct_rset);
+ list_del(&conf_tgt->sct_list);
+ OBD_FREE_PTR(conf_tgt);
+ }
+ LASSERT(list_empty(&conf->sc_tgts));
+
+ conf->sc_updated = 0;
+ conf->sc_local = 0;
+}
+
+static void sptlrpc_conf_free(struct sptlrpc_conf *conf)
+{
+ CDEBUG(D_SEC, "free sptlrpc conf %s\n", conf->sc_fsname);
+
+ sptlrpc_conf_free_rsets(conf);
+ list_del(&conf->sc_list);
+ OBD_FREE_PTR(conf);
+}
+
+static
+struct sptlrpc_conf_tgt *sptlrpc_conf_get_tgt(struct sptlrpc_conf *conf,
+ const char *name,
+ int create)
+{
+ struct sptlrpc_conf_tgt *conf_tgt;
+
+ list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
+ if (strcmp(conf_tgt->sct_name, name) == 0)
+ return conf_tgt;
+ }
+
+ if (!create)
+ return NULL;
+
+ OBD_ALLOC_PTR(conf_tgt);
+ if (conf_tgt) {
+ strlcpy(conf_tgt->sct_name, name, sizeof(conf_tgt->sct_name));
+ sptlrpc_rule_set_init(&conf_tgt->sct_rset);
+ list_add(&conf_tgt->sct_list, &conf->sc_tgts);
+ }
+
+ return conf_tgt;
+}
+
+static
+struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname,
+ int create)
+{
+ struct sptlrpc_conf *conf;
+
+ list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
+ if (strcmp(conf->sc_fsname, fsname) == 0)
+ return conf;
+ }
+
+ if (!create)
+ return NULL;
+
+ OBD_ALLOC_PTR(conf);
+ if (conf == NULL)
+ return NULL;
+
+ strcpy(conf->sc_fsname, fsname);
+ sptlrpc_rule_set_init(&conf->sc_rset);
+ INIT_LIST_HEAD(&conf->sc_tgts);
+ list_add(&conf->sc_list, &sptlrpc_confs);
+
+ CDEBUG(D_SEC, "create sptlrpc conf %s\n", conf->sc_fsname);
+ return conf;
+}
+
+/**
+ * caller must hold conf_lock already.
+ */
+static int sptlrpc_conf_merge_rule(struct sptlrpc_conf *conf,
+ const char *target,
+ struct sptlrpc_rule *rule)
+{
+ struct sptlrpc_conf_tgt *conf_tgt;
+ struct sptlrpc_rule_set *rule_set;
+
+ /* fsname == target means general rules for the whole fs */
+ if (strcmp(conf->sc_fsname, target) == 0) {
+ rule_set = &conf->sc_rset;
+ } else {
+ conf_tgt = sptlrpc_conf_get_tgt(conf, target, 1);
+ if (conf_tgt) {
+ rule_set = &conf_tgt->sct_rset;
+ } else {
+ CERROR("out of memory, can't merge rule!\n");
+ return -ENOMEM;
+ }
+ }
+
+ return sptlrpc_rule_set_merge(rule_set, rule);
+}
+
+/**
+ * process one LCFG_SPTLRPC_CONF record. if \a conf is NULL, we
+ * find one through the target name in the record inside conf_lock;
+ * otherwise means caller already hold conf_lock.
+ */
+static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
+ struct sptlrpc_conf *conf)
+{
+ char *target, *param;
+ char fsname[MTI_NAME_MAXLEN];
+ struct sptlrpc_rule rule;
+ int rc;
+
+ target = lustre_cfg_string(lcfg, 1);
+ if (target == NULL) {
+ CERROR("missing target name\n");
+ return -EINVAL;
+ }
+
+ param = lustre_cfg_string(lcfg, 2);
+ if (param == NULL) {
+ CERROR("missing parameter\n");
+ return -EINVAL;
+ }
+
+ CDEBUG(D_SEC, "processing rule: %s.%s\n", target, param);
+
+ /* parse rule to make sure the format is correct */
+ if (strncmp(param, PARAM_SRPC_FLVR, sizeof(PARAM_SRPC_FLVR) - 1) != 0) {
+ CERROR("Invalid sptlrpc parameter: %s\n", param);
+ return -EINVAL;
+ }
+ param += sizeof(PARAM_SRPC_FLVR) - 1;
+
+ rc = sptlrpc_parse_rule(param, &rule);
+ if (rc)
+ return -EINVAL;
+
+ if (conf == NULL) {
+ target2fsname(target, fsname, sizeof(fsname));
+
+ mutex_lock(&sptlrpc_conf_lock);
+ conf = sptlrpc_conf_get(fsname, 0);
+ if (conf == NULL) {
+ CERROR("can't find conf\n");
+ rc = -ENOMEM;
+ } else {
+ rc = sptlrpc_conf_merge_rule(conf, target, &rule);
+ }
+ mutex_unlock(&sptlrpc_conf_lock);
+ } else {
+ LASSERT(mutex_is_locked(&sptlrpc_conf_lock));
+ rc = sptlrpc_conf_merge_rule(conf, target, &rule);
+ }
+
+ if (rc == 0)
+ conf->sc_modified++;
+
+ return rc;
+}
+
+int sptlrpc_process_config(struct lustre_cfg *lcfg)
+{
+ return __sptlrpc_process_config(lcfg, NULL);
+}
+EXPORT_SYMBOL(sptlrpc_process_config);
+
+static int logname2fsname(const char *logname, char *buf, int buflen)
+{
+ char *ptr;
+ int len;
+
+ ptr = strrchr(logname, '-');
+ if (ptr == NULL || strcmp(ptr, "-sptlrpc")) {
+ CERROR("%s is not a sptlrpc config log\n", logname);
+ return -EINVAL;
+ }
+
+ len = min((int) (ptr - logname), buflen - 1);
+
+ memcpy(buf, logname, len);
+ buf[len] = '\0';
+ return 0;
+}
+
+void sptlrpc_conf_log_update_begin(const char *logname)
+{
+ struct sptlrpc_conf *conf;
+ char fsname[16];
+
+ if (logname2fsname(logname, fsname, sizeof(fsname)))
+ return;
+
+ mutex_lock(&sptlrpc_conf_lock);
+
+ conf = sptlrpc_conf_get(fsname, 0);
+ if (conf && conf->sc_local) {
+ LASSERT(conf->sc_updated == 0);
+ sptlrpc_conf_free_rsets(conf);
+ }
+ conf->sc_modified = 0;
+
+ mutex_unlock(&sptlrpc_conf_lock);
+}
+EXPORT_SYMBOL(sptlrpc_conf_log_update_begin);
+
+/**
+ * mark a config log has been updated
+ */
+void sptlrpc_conf_log_update_end(const char *logname)
+{
+ struct sptlrpc_conf *conf;
+ char fsname[16];
+
+ if (logname2fsname(logname, fsname, sizeof(fsname)))
+ return;
+
+ mutex_lock(&sptlrpc_conf_lock);
+
+ conf = sptlrpc_conf_get(fsname, 0);
+ if (conf) {
+ /*
+ * if original state is not updated, make sure the
+ * modified counter > 0 to enforce updating local copy.
+ */
+ if (conf->sc_updated == 0)
+ conf->sc_modified++;
+
+ conf->sc_updated = 1;
+ }
+
+ mutex_unlock(&sptlrpc_conf_lock);
+}
+EXPORT_SYMBOL(sptlrpc_conf_log_update_end);
+
+void sptlrpc_conf_log_start(const char *logname)
+{
+ char fsname[16];
+
+ if (logname2fsname(logname, fsname, sizeof(fsname)))
+ return;
+
+ mutex_lock(&sptlrpc_conf_lock);
+ sptlrpc_conf_get(fsname, 1);
+ mutex_unlock(&sptlrpc_conf_lock);
+}
+EXPORT_SYMBOL(sptlrpc_conf_log_start);
+
+void sptlrpc_conf_log_stop(const char *logname)
+{
+ struct sptlrpc_conf *conf;
+ char fsname[16];
+
+ if (logname2fsname(logname, fsname, sizeof(fsname)))
+ return;
+
+ mutex_lock(&sptlrpc_conf_lock);
+ conf = sptlrpc_conf_get(fsname, 0);
+ if (conf)
+ sptlrpc_conf_free(conf);
+ mutex_unlock(&sptlrpc_conf_lock);
+}
+EXPORT_SYMBOL(sptlrpc_conf_log_stop);
+
+static void inline flavor_set_flags(struct sptlrpc_flavor *sf,
+ enum lustre_sec_part from,
+ enum lustre_sec_part to,
+ unsigned int fl_udesc)
+{
+ /*
+ * null flavor doesn't need to set any flavor, and in fact
+ * we'd better not do that because everybody share a single sec.
+ */
+ if (sf->sf_rpc == SPTLRPC_FLVR_NULL)
+ return;
+
+ if (from == LUSTRE_SP_MDT) {
+ /* MDT->MDT; MDT->OST */
+ sf->sf_flags |= PTLRPC_SEC_FL_ROOTONLY;
+ } else if (from == LUSTRE_SP_CLI && to == LUSTRE_SP_OST) {
+ /* CLI->OST */
+ sf->sf_flags |= PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_BULK;
+ } else if (from == LUSTRE_SP_CLI && to == LUSTRE_SP_MDT) {
+ /* CLI->MDT */
+ if (fl_udesc && sf->sf_rpc != SPTLRPC_FLVR_NULL)
+ sf->sf_flags |= PTLRPC_SEC_FL_UDESC;
+ }
+}
+
+void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
+ enum lustre_sec_part to,
+ struct obd_uuid *target,
+ lnet_nid_t nid,
+ struct sptlrpc_flavor *sf)
+{
+ struct sptlrpc_conf *conf;
+ struct sptlrpc_conf_tgt *conf_tgt;
+ char name[MTI_NAME_MAXLEN];
+ int len, rc = 0;
+
+ target2fsname(target->uuid, name, sizeof(name));
+
+ mutex_lock(&sptlrpc_conf_lock);
+
+ conf = sptlrpc_conf_get(name, 0);
+ if (conf == NULL)
+ goto out;
+
+ /* convert uuid name (supposed end with _UUID) to target name */
+ len = strlen(target->uuid);
+ LASSERT(len > 5);
+ memcpy(name, target->uuid, len - 5);
+ name[len - 5] = '\0';
+
+ conf_tgt = sptlrpc_conf_get_tgt(conf, name, 0);
+ if (conf_tgt) {
+ rc = sptlrpc_rule_set_choose(&conf_tgt->sct_rset,
+ from, to, nid, sf);
+ if (rc)
+ goto out;
+ }
+
+ rc = sptlrpc_rule_set_choose(&conf->sc_rset, from, to, nid, sf);
+out:
+ mutex_unlock(&sptlrpc_conf_lock);
+
+ if (rc == 0)
+ get_default_flavor(sf);
+
+ flavor_set_flags(sf, from, to, 1);
+}
+
+/**
+ * called by target devices, determine the expected flavor from
+ * certain peer (from, nid).
+ */
+void sptlrpc_target_choose_flavor(struct sptlrpc_rule_set *rset,
+ enum lustre_sec_part from,
+ lnet_nid_t nid,
+ struct sptlrpc_flavor *sf)
+{
+ if (sptlrpc_rule_set_choose(rset, from, LUSTRE_SP_ANY, nid, sf) == 0)
+ get_default_flavor(sf);
+}
+EXPORT_SYMBOL(sptlrpc_target_choose_flavor);
+
+#define SEC_ADAPT_DELAY (10)
+
+/**
+ * called by client devices, notify the sptlrpc config has changed and
+ * do import_sec_adapt later.
+ */
+void sptlrpc_conf_client_adapt(struct obd_device *obd)
+{
+ struct obd_import *imp;
+
+ LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
+ strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) ==0);
+ CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
+
+ /* serialize with connect/disconnect import */
+ down_read(&obd->u.cli.cl_sem);
+
+ imp = obd->u.cli.cl_import;
+ if (imp) {
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_sec)
+ imp->imp_sec_expire = cfs_time_current_sec() +
+ SEC_ADAPT_DELAY;
+ spin_unlock(&imp->imp_lock);
+ }
+
+ up_read(&obd->u.cli.cl_sem);
+}
+EXPORT_SYMBOL(sptlrpc_conf_client_adapt);
+
+
+static void rule2string(struct sptlrpc_rule *r, char *buf, int buflen)
+{
+ char dirbuf[8];
+ char *net;
+ char *ptr = buf;
+
+ if (r->sr_netid == LNET_NIDNET(LNET_NID_ANY))
+ net = "default";
+ else
+ net = libcfs_net2str(r->sr_netid);
+
+ if (r->sr_from == LUSTRE_SP_ANY && r->sr_to == LUSTRE_SP_ANY)
+ dirbuf[0] = '\0';
+ else
+ snprintf(dirbuf, sizeof(dirbuf), ".%s2%s",
+ sptlrpc_part2name(r->sr_from),
+ sptlrpc_part2name(r->sr_to));
+
+ ptr += snprintf(buf, buflen, "srpc.flavor.%s%s=", net, dirbuf);
+
+ sptlrpc_flavor2name(&r->sr_flvr, ptr, buflen - (ptr - buf));
+ buf[buflen - 1] = '\0';
+}
+
+static int sptlrpc_record_rule_set(struct llog_handle *llh,
+ char *target,
+ struct sptlrpc_rule_set *rset)
+{
+ struct lustre_cfg_bufs bufs;
+ struct lustre_cfg *lcfg;
+ struct llog_rec_hdr rec;
+ int buflen;
+ char param[48];
+ int i, rc;
+
+ for (i = 0; i < rset->srs_nrule; i++) {
+ rule2string(&rset->srs_rules[i], param, sizeof(param));
+
+ lustre_cfg_bufs_reset(&bufs, NULL);
+ lustre_cfg_bufs_set_string(&bufs, 1, target);
+ lustre_cfg_bufs_set_string(&bufs, 2, param);
+ lcfg = lustre_cfg_new(LCFG_SPTLRPC_CONF, &bufs);
+ LASSERT(lcfg);
+
+ buflen = lustre_cfg_len(lcfg->lcfg_bufcount,
+ lcfg->lcfg_buflens);
+ rec.lrh_len = llog_data_len(buflen);
+ rec.lrh_type = OBD_CFG_REC;
+ rc = llog_write(NULL, llh, &rec, NULL, 0, (void *)lcfg, -1);
+ if (rc)
+ CERROR("failed to write a rec: rc = %d\n", rc);
+ lustre_cfg_free(lcfg);
+ }
+ return 0;
+}
+
+static int sptlrpc_record_rules(struct llog_handle *llh,
+ struct sptlrpc_conf *conf)
+{
+ struct sptlrpc_conf_tgt *conf_tgt;
+
+ sptlrpc_record_rule_set(llh, conf->sc_fsname, &conf->sc_rset);
+
+ list_for_each_entry(conf_tgt, &conf->sc_tgts, sct_list) {
+ sptlrpc_record_rule_set(llh, conf_tgt->sct_name,
+ &conf_tgt->sct_rset);
+ }
+ return 0;
+}
+
+#define LOG_SPTLRPC_TMP "sptlrpc.tmp"
+#define LOG_SPTLRPC "sptlrpc"
+
+static
+int sptlrpc_target_local_copy_conf(struct obd_device *obd,
+ struct sptlrpc_conf *conf)
+{
+ struct llog_handle *llh = NULL;
+ struct llog_ctxt *ctxt;
+ struct lvfs_run_ctxt saved;
+ struct dentry *dentry;
+ int rc;
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ if (ctxt == NULL)
+ return -EINVAL;
+
+ push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+
+ dentry = ll_lookup_one_len(MOUNT_CONFIGS_DIR, cfs_fs_pwd(current->fs),
+ strlen(MOUNT_CONFIGS_DIR));
+ if (IS_ERR(dentry)) {
+ rc = PTR_ERR(dentry);
+ CERROR("cannot lookup %s directory: rc = %d\n",
+ MOUNT_CONFIGS_DIR, rc);
+ GOTO(out_ctx, rc);
+ }
+
+ /* erase the old tmp log */
+ rc = llog_erase(NULL, ctxt, NULL, LOG_SPTLRPC_TMP);
+ if (rc < 0 && rc != -ENOENT) {
+ CERROR("%s: cannot erase temporary sptlrpc log: rc = %d\n",
+ obd->obd_name, rc);
+ GOTO(out_dput, rc);
+ }
+
+ /* write temporary log */
+ rc = llog_open_create(NULL, ctxt, &llh, NULL, LOG_SPTLRPC_TMP);
+ if (rc)
+ GOTO(out_dput, rc);
+ rc = llog_init_handle(NULL, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(out_close, rc);
+
+ rc = sptlrpc_record_rules(llh, conf);
+
+out_close:
+ llog_close(NULL, llh);
+ if (rc == 0)
+ rc = lustre_rename(dentry, obd->obd_lvfs_ctxt.pwdmnt,
+ LOG_SPTLRPC_TMP, LOG_SPTLRPC);
+out_dput:
+ l_dput(dentry);
+out_ctx:
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ llog_ctxt_put(ctxt);
+ CDEBUG(D_SEC, "target %s: write local sptlrpc conf: rc = %d\n",
+ obd->obd_name, rc);
+ return rc;
+}
+
+static int local_read_handler(const struct lu_env *env,
+ struct llog_handle *llh,
+ struct llog_rec_hdr *rec, void *data)
+{
+ struct sptlrpc_conf *conf = (struct sptlrpc_conf *) data;
+ struct lustre_cfg *lcfg = (struct lustre_cfg *)(rec + 1);
+ int cfg_len, rc;
+
+ if (rec->lrh_type != OBD_CFG_REC) {
+ CERROR("unhandled lrh_type: %#x\n", rec->lrh_type);
+ return -EINVAL;
+ }
+
+ cfg_len = rec->lrh_len - sizeof(struct llog_rec_hdr) -
+ sizeof(struct llog_rec_tail);
+
+ rc = lustre_cfg_sanity_check(lcfg, cfg_len);
+ if (rc) {
+ CERROR("Insane cfg\n");
+ return rc;
+ }
+
+ if (lcfg->lcfg_command != LCFG_SPTLRPC_CONF) {
+ CERROR("invalid command (%x)\n", lcfg->lcfg_command);
+ return -EINVAL;
+ }
+
+ return __sptlrpc_process_config(lcfg, conf);
+}
+
+static
+int sptlrpc_target_local_read_conf(struct obd_device *obd,
+ struct sptlrpc_conf *conf)
+{
+ struct llog_handle *llh = NULL;
+ struct llog_ctxt *ctxt;
+ struct lvfs_run_ctxt saved;
+ int rc;
+
+ LASSERT(conf->sc_updated == 0 && conf->sc_local == 0);
+
+ ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
+ if (ctxt == NULL) {
+ CERROR("missing llog context\n");
+ return -EINVAL;
+ }
+
+ push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+
+ rc = llog_open(NULL, ctxt, &llh, NULL, LOG_SPTLRPC, LLOG_OPEN_EXISTS);
+ if (rc < 0) {
+ if (rc == -ENOENT)
+ rc = 0;
+ GOTO(out_pop, rc);
+ }
+
+ rc = llog_init_handle(NULL, llh, LLOG_F_IS_PLAIN, NULL);
+ if (rc)
+ GOTO(out_close, rc);
+
+ if (llog_get_size(llh) <= 1) {
+ CDEBUG(D_SEC, "no local sptlrpc copy found\n");
+ GOTO(out_close, rc = 0);
+ }
+
+ rc = llog_process(NULL, llh, local_read_handler, (void *)conf, NULL);
+
+ if (rc == 0) {
+ conf->sc_local = 1;
+ } else {
+ sptlrpc_conf_free_rsets(conf);
+ }
+
+out_close:
+ llog_close(NULL, llh);
+out_pop:
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ llog_ctxt_put(ctxt);
+ CDEBUG(D_SEC, "target %s: read local sptlrpc conf: rc = %d\n",
+ obd->obd_name, rc);
+ return rc;
+}
+
+
+/**
+ * called by target devices, extract sptlrpc rules which applies to
+ * this target, to be used for future rpc flavor checking.
+ */
+int sptlrpc_conf_target_get_rules(struct obd_device *obd,
+ struct sptlrpc_rule_set *rset,
+ int initial)
+{
+ struct sptlrpc_conf *conf;
+ struct sptlrpc_conf_tgt *conf_tgt;
+ enum lustre_sec_part sp_dst;
+ char fsname[MTI_NAME_MAXLEN];
+ int rc = 0;
+
+ if (strcmp(obd->obd_type->typ_name, LUSTRE_MDT_NAME) == 0) {
+ sp_dst = LUSTRE_SP_MDT;
+ } else if (strcmp(obd->obd_type->typ_name, LUSTRE_OST_NAME) == 0) {
+ sp_dst = LUSTRE_SP_OST;
+ } else {
+ CERROR("unexpected obd type %s\n", obd->obd_type->typ_name);
+ return -EINVAL;
+ }
+ CDEBUG(D_SEC, "get rules for target %s\n", obd->obd_uuid.uuid);
+
+ target2fsname(obd->obd_uuid.uuid, fsname, sizeof(fsname));
+
+ mutex_lock(&sptlrpc_conf_lock);
+
+ conf = sptlrpc_conf_get(fsname, 0);
+ if (conf == NULL) {
+ CERROR("missing sptlrpc config log\n");
+ GOTO(out, rc);
+ }
+
+ if (conf->sc_updated == 0) {
+ /*
+ * always read from local copy. here another option is
+ * if we already have a local copy (read from another
+ * target device hosted on the same node) we simply use that.
+ */
+ if (conf->sc_local)
+ sptlrpc_conf_free_rsets(conf);
+
+ sptlrpc_target_local_read_conf(obd, conf);
+ } else {
+ LASSERT(conf->sc_local == 0);
+
+ /* write a local copy */
+ if (initial || conf->sc_modified)
+ sptlrpc_target_local_copy_conf(obd, conf);
+ else
+ CDEBUG(D_SEC, "unchanged, skip updating local copy\n");
+ }
+
+ /* extract rule set for this target */
+ conf_tgt = sptlrpc_conf_get_tgt(conf, obd->obd_name, 0);
+
+ rc = sptlrpc_rule_set_extract(&conf->sc_rset,
+ conf_tgt ? &conf_tgt->sct_rset: NULL,
+ LUSTRE_SP_ANY, sp_dst, rset);
+out:
+ mutex_unlock(&sptlrpc_conf_lock);
+ return rc;
+}
+EXPORT_SYMBOL(sptlrpc_conf_target_get_rules);
+
+int sptlrpc_conf_init(void)
+{
+ mutex_init(&sptlrpc_conf_lock);
+ return 0;
+}
+
+void sptlrpc_conf_fini(void)
+{
+ struct sptlrpc_conf *conf, *conf_next;
+
+ mutex_lock(&sptlrpc_conf_lock);
+ list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
+ sptlrpc_conf_free(conf);
+ }
+ LASSERT(list_empty(&sptlrpc_confs));
+ mutex_unlock(&sptlrpc_conf_lock);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
new file mode 100644
index 0000000..d2eb20e
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -0,0 +1,250 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/sec_gc.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+#include <linux/libcfs/libcfs.h>
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lustre_sec.h>
+
+#define SEC_GC_INTERVAL (30 * 60)
+
+
+static struct mutex sec_gc_mutex;
+static LIST_HEAD(sec_gc_list);
+static spinlock_t sec_gc_list_lock;
+
+static LIST_HEAD(sec_gc_ctx_list);
+static spinlock_t sec_gc_ctx_list_lock;
+
+static struct ptlrpc_thread sec_gc_thread;
+static atomic_t sec_gc_wait_del = ATOMIC_INIT(0);
+
+
+void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
+{
+ LASSERT(sec->ps_policy->sp_cops->gc_ctx);
+ LASSERT(sec->ps_gc_interval > 0);
+ LASSERT(list_empty(&sec->ps_gc_list));
+
+ sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+
+ spin_lock(&sec_gc_list_lock);
+ list_add_tail(&sec_gc_list, &sec->ps_gc_list);
+ spin_unlock(&sec_gc_list_lock);
+
+ CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+}
+EXPORT_SYMBOL(sptlrpc_gc_add_sec);
+
+void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
+{
+ if (list_empty(&sec->ps_gc_list))
+ return;
+
+ might_sleep();
+
+ /* signal before list_del to make iteration in gc thread safe */
+ atomic_inc(&sec_gc_wait_del);
+
+ spin_lock(&sec_gc_list_lock);
+ list_del_init(&sec->ps_gc_list);
+ spin_unlock(&sec_gc_list_lock);
+
+ /* barrier */
+ mutex_lock(&sec_gc_mutex);
+ mutex_unlock(&sec_gc_mutex);
+
+ atomic_dec(&sec_gc_wait_del);
+
+ CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+}
+EXPORT_SYMBOL(sptlrpc_gc_del_sec);
+
+void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
+{
+ LASSERT(list_empty(&ctx->cc_gc_chain));
+
+ CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ spin_lock(&sec_gc_ctx_list_lock);
+ list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
+ spin_unlock(&sec_gc_ctx_list_lock);
+
+ thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
+ wake_up(&sec_gc_thread.t_ctl_waitq);
+}
+EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
+
+static void sec_process_ctx_list(void)
+{
+ struct ptlrpc_cli_ctx *ctx;
+
+ spin_lock(&sec_gc_ctx_list_lock);
+
+ while (!list_empty(&sec_gc_ctx_list)) {
+ ctx = list_entry(sec_gc_ctx_list.next,
+ struct ptlrpc_cli_ctx, cc_gc_chain);
+ list_del_init(&ctx->cc_gc_chain);
+ spin_unlock(&sec_gc_ctx_list_lock);
+
+ LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 1);
+ CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ sptlrpc_cli_ctx_put(ctx, 1);
+
+ spin_lock(&sec_gc_ctx_list_lock);
+ }
+
+ spin_unlock(&sec_gc_ctx_list_lock);
+}
+
+static void sec_do_gc(struct ptlrpc_sec *sec)
+{
+ LASSERT(sec->ps_policy->sp_cops->gc_ctx);
+
+ if (unlikely(sec->ps_gc_next == 0)) {
+ CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
+ sec, sec->ps_policy->sp_name);
+ return;
+ }
+
+ CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+
+ if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
+ return;
+
+ sec->ps_policy->sp_cops->gc_ctx(sec);
+ sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+}
+
+static int sec_gc_main(void *arg)
+{
+ struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg;
+ struct l_wait_info lwi;
+
+ unshare_fs_struct();
+
+ /* Record that the thread is running */
+ thread_set_flags(thread, SVC_RUNNING);
+ wake_up(&thread->t_ctl_waitq);
+
+ while (1) {
+ struct ptlrpc_sec *sec;
+
+ thread_clear_flags(thread, SVC_SIGNAL);
+ sec_process_ctx_list();
+again:
+ /* go through sec list do gc.
+ * FIXME here we iterate through the whole list each time which
+ * is not optimal. we perhaps want to use balanced binary tree
+ * to trace each sec as order of expiry time.
+ * another issue here is we wakeup as fixed interval instead of
+ * according to each sec's expiry time */
+ mutex_lock(&sec_gc_mutex);
+ list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
+ /* if someone is waiting to be deleted, let it
+ * proceed as soon as possible. */
+ if (atomic_read(&sec_gc_wait_del)) {
+ CDEBUG(D_SEC, "deletion pending, start over\n");
+ mutex_unlock(&sec_gc_mutex);
+ goto again;
+ }
+
+ sec_do_gc(sec);
+ }
+ mutex_unlock(&sec_gc_mutex);
+
+ /* check ctx list again before sleep */
+ sec_process_ctx_list();
+
+ lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * HZ, NULL, NULL);
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopping(thread) ||
+ thread_is_signal(thread),
+ &lwi);
+
+ if (thread_test_and_clear_flags(thread, SVC_STOPPING))
+ break;
+ }
+
+ thread_set_flags(thread, SVC_STOPPED);
+ wake_up(&thread->t_ctl_waitq);
+ return 0;
+}
+
+int sptlrpc_gc_init(void)
+{
+ struct l_wait_info lwi = { 0 };
+ struct task_struct *task;
+
+ mutex_init(&sec_gc_mutex);
+ spin_lock_init(&sec_gc_list_lock);
+ spin_lock_init(&sec_gc_ctx_list_lock);
+
+ /* initialize thread control */
+ memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
+ init_waitqueue_head(&sec_gc_thread.t_ctl_waitq);
+
+ task = kthread_run(sec_gc_main, &sec_gc_thread, "sptlrpc_gc");
+ if (IS_ERR(task)) {
+ CERROR("can't start gc thread: %ld\n", PTR_ERR(task));
+ return PTR_ERR(task);
+ }
+
+ l_wait_event(sec_gc_thread.t_ctl_waitq,
+ thread_is_running(&sec_gc_thread), &lwi);
+ return 0;
+}
+
+void sptlrpc_gc_fini(void)
+{
+ struct l_wait_info lwi = { 0 };
+
+ thread_set_flags(&sec_gc_thread, SVC_STOPPING);
+ wake_up(&sec_gc_thread.t_ctl_waitq);
+
+ l_wait_event(sec_gc_thread.t_ctl_waitq,
+ thread_is_stopped(&sec_gc_thread), &lwi);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
new file mode 100644
index 0000000..1213621
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
@@ -0,0 +1,199 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/sec_lproc.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+#include <linux/libcfs/libcfs.h>
+#include <linux/crypto.h>
+
+#include <obd.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre_net.h>
+#include <lustre_import.h>
+#include <lustre_dlm.h>
+#include <lustre_sec.h>
+
+#include "ptlrpc_internal.h"
+
+
+struct proc_dir_entry *sptlrpc_proc_root = NULL;
+EXPORT_SYMBOL(sptlrpc_proc_root);
+
+char *sec_flags2str(unsigned long flags, char *buf, int bufsize)
+{
+ buf[0] = '\0';
+
+ if (flags & PTLRPC_SEC_FL_REVERSE)
+ strlcat(buf, "reverse,", bufsize);
+ if (flags & PTLRPC_SEC_FL_ROOTONLY)
+ strlcat(buf, "rootonly,", bufsize);
+ if (flags & PTLRPC_SEC_FL_UDESC)
+ strlcat(buf, "udesc,", bufsize);
+ if (flags & PTLRPC_SEC_FL_BULK)
+ strlcat(buf, "bulk,", bufsize);
+ if (buf[0] == '\0')
+ strlcat(buf, "-,", bufsize);
+
+ return buf;
+}
+
+static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v)
+{
+ struct obd_device *dev = seq->private;
+ struct client_obd *cli = &dev->u.cli;
+ struct ptlrpc_sec *sec = NULL;
+ char str[32];
+
+ LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 ||
+ strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
+ strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0);
+
+ if (cli->cl_import)
+ sec = sptlrpc_import_sec_ref(cli->cl_import);
+ if (sec == NULL)
+ goto out;
+
+ sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str));
+
+ seq_printf(seq, "rpc flavor: %s\n",
+ sptlrpc_flavor2name_base(sec->ps_flvr.sf_rpc));
+ seq_printf(seq, "bulk flavor: %s\n",
+ sptlrpc_flavor2name_bulk(&sec->ps_flvr, str, sizeof(str)));
+ seq_printf(seq, "flags: %s\n",
+ sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)));
+ seq_printf(seq, "id: %d\n", sec->ps_id);
+ seq_printf(seq, "refcount: %d\n",
+ atomic_read(&sec->ps_refcount));
+ seq_printf(seq, "nctx: %d\n", atomic_read(&sec->ps_nctx));
+ seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval);
+ seq_printf(seq, "gc next %ld\n",
+ sec->ps_gc_interval ?
+ sec->ps_gc_next - cfs_time_current_sec() : 0);
+
+ sptlrpc_sec_put(sec);
+out:
+ return 0;
+}
+LPROC_SEQ_FOPS_RO(sptlrpc_info_lprocfs);
+
+static int sptlrpc_ctxs_lprocfs_seq_show(struct seq_file *seq, void *v)
+{
+ struct obd_device *dev = seq->private;
+ struct client_obd *cli = &dev->u.cli;
+ struct ptlrpc_sec *sec = NULL;
+
+ LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 ||
+ strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
+ strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0);
+
+ if (cli->cl_import)
+ sec = sptlrpc_import_sec_ref(cli->cl_import);
+ if (sec == NULL)
+ goto out;
+
+ if (sec->ps_policy->sp_cops->display)
+ sec->ps_policy->sp_cops->display(sec, seq);
+
+ sptlrpc_sec_put(sec);
+out:
+ return 0;
+}
+LPROC_SEQ_FOPS_RO(sptlrpc_ctxs_lprocfs);
+
+int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev)
+{
+ int rc;
+
+ if (strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) != 0 &&
+ strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) != 0 &&
+ strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) != 0) {
+ CERROR("can't register lproc for obd type %s\n",
+ dev->obd_type->typ_name);
+ return -EINVAL;
+ }
+
+ rc = lprocfs_obd_seq_create(dev, "srpc_info", 0444,
+ &sptlrpc_info_lprocfs_fops, dev);
+ if (rc) {
+ CERROR("create proc entry srpc_info for %s: %d\n",
+ dev->obd_name, rc);
+ return rc;
+ }
+
+ rc = lprocfs_obd_seq_create(dev, "srpc_contexts", 0444,
+ &sptlrpc_ctxs_lprocfs_fops, dev);
+ if (rc) {
+ CERROR("create proc entry srpc_contexts for %s: %d\n",
+ dev->obd_name, rc);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_lprocfs_cliobd_attach);
+
+LPROC_SEQ_FOPS_RO(sptlrpc_proc_enc_pool);
+static struct lprocfs_vars sptlrpc_lprocfs_vars[] = {
+ { "encrypt_page_pools", &sptlrpc_proc_enc_pool_fops },
+ { NULL }
+};
+
+int sptlrpc_lproc_init(void)
+{
+ int rc;
+
+ LASSERT(sptlrpc_proc_root == NULL);
+
+ sptlrpc_proc_root = lprocfs_register("sptlrpc", proc_lustre_root,
+ sptlrpc_lprocfs_vars, NULL);
+ if (IS_ERR(sptlrpc_proc_root)) {
+ rc = PTR_ERR(sptlrpc_proc_root);
+ sptlrpc_proc_root = NULL;
+ return rc;
+ }
+ return 0;
+}
+
+void sptlrpc_lproc_fini(void)
+{
+ if (sptlrpc_proc_root) {
+ lprocfs_remove(&sptlrpc_proc_root);
+ sptlrpc_proc_root = NULL;
+ }
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
new file mode 100644
index 0000000..ff1137f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
@@ -0,0 +1,464 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/sec_null.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+
+#include <obd_support.h>
+#include <obd_cksum.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lustre_sec.h>
+
+static struct ptlrpc_sec_policy null_policy;
+static struct ptlrpc_sec null_sec;
+static struct ptlrpc_cli_ctx null_cli_ctx;
+static struct ptlrpc_svc_ctx null_svc_ctx;
+
+/*
+ * we can temporarily use the topmost 8-bits of lm_secflvr to identify
+ * the source sec part.
+ */
+static inline
+void null_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
+{
+ msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 24;
+}
+
+static inline
+enum lustre_sec_part null_decode_sec_part(struct lustre_msg *msg)
+{
+ return (msg->lm_secflvr >> 24) & 0xFF;
+}
+
+static int null_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
+{
+ /* should never reach here */
+ LBUG();
+ return 0;
+}
+
+static
+int null_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
+{
+ req->rq_reqbuf->lm_secflvr = SPTLRPC_FLVR_NULL;
+
+ if (!req->rq_import->imp_dlm_fake) {
+ struct obd_device *obd = req->rq_import->imp_obd;
+ null_encode_sec_part(req->rq_reqbuf,
+ obd->u.cli.cl_sp_me);
+ }
+ req->rq_reqdata_len = req->rq_reqlen;
+ return 0;
+}
+
+static
+int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
+{
+ __u32 cksums, cksumc;
+
+ LASSERT(req->rq_repdata);
+
+ req->rq_repmsg = req->rq_repdata;
+ req->rq_replen = req->rq_repdata_len;
+
+ if (req->rq_early) {
+ cksums = lustre_msg_get_cksum(req->rq_repdata);
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
+ if (lustre_msghdr_get_flags(req->rq_reqmsg) &
+ MSGHDR_CKSUM_INCOMPAT18)
+ cksumc = lustre_msg_calc_cksum(req->rq_repmsg, 0);
+ else
+ cksumc = lustre_msg_calc_cksum(req->rq_repmsg, 1);
+#else
+# warning "remove checksum compatibility support for b1_8"
+ cksumc = lustre_msg_calc_cksum(req->rq_repmsg);
+#endif
+ if (cksumc != cksums) {
+ CDEBUG(D_SEC,
+ "early reply checksum mismatch: %08x != %08x\n",
+ cksumc, cksums);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static
+struct ptlrpc_sec *null_create_sec(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *svc_ctx,
+ struct sptlrpc_flavor *sf)
+{
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_NULL);
+
+ /* general layer has take a module reference for us, because we never
+ * really destroy the sec, simply release the reference here.
+ */
+ sptlrpc_policy_put(&null_policy);
+ return &null_sec;
+}
+
+static
+void null_destroy_sec(struct ptlrpc_sec *sec)
+{
+ LASSERT(sec == &null_sec);
+}
+
+static
+struct ptlrpc_cli_ctx *null_lookup_ctx(struct ptlrpc_sec *sec,
+ struct vfs_cred *vcred,
+ int create, int remove_dead)
+{
+ atomic_inc(&null_cli_ctx.cc_refcount);
+ return &null_cli_ctx;
+}
+
+static
+int null_flush_ctx_cache(struct ptlrpc_sec *sec,
+ uid_t uid,
+ int grace, int force)
+{
+ return 0;
+}
+
+static
+int null_alloc_reqbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ if (!req->rq_reqbuf) {
+ int alloc_size = size_roundup_power2(msgsize);
+
+ LASSERT(!req->rq_pool);
+ OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_size);
+ if (!req->rq_reqbuf)
+ return -ENOMEM;
+
+ req->rq_reqbuf_len = alloc_size;
+ } else {
+ LASSERT(req->rq_pool);
+ LASSERT(req->rq_reqbuf_len >= msgsize);
+ memset(req->rq_reqbuf, 0, msgsize);
+ }
+
+ req->rq_reqmsg = req->rq_reqbuf;
+ return 0;
+}
+
+static
+void null_free_reqbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req)
+{
+ if (!req->rq_pool) {
+ LASSERTF(req->rq_reqmsg == req->rq_reqbuf,
+ "req %p: reqmsg %p is not reqbuf %p in null sec\n",
+ req, req->rq_reqmsg, req->rq_reqbuf);
+ LASSERTF(req->rq_reqbuf_len >= req->rq_reqlen,
+ "req %p: reqlen %d should smaller than buflen %d\n",
+ req, req->rq_reqlen, req->rq_reqbuf_len);
+
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = NULL;
+ req->rq_reqbuf_len = 0;
+ }
+}
+
+static
+int null_alloc_repbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ /* add space for early replied */
+ msgsize += lustre_msg_early_size();
+
+ msgsize = size_roundup_power2(msgsize);
+
+ OBD_ALLOC_LARGE(req->rq_repbuf, msgsize);
+ if (!req->rq_repbuf)
+ return -ENOMEM;
+
+ req->rq_repbuf_len = msgsize;
+ return 0;
+}
+
+static
+void null_free_repbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req)
+{
+ LASSERT(req->rq_repbuf);
+
+ OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
+ req->rq_repbuf = NULL;
+ req->rq_repbuf_len = 0;
+}
+
+static
+int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int segment, int newsize)
+{
+ struct lustre_msg *newbuf;
+ struct lustre_msg *oldbuf = req->rq_reqmsg;
+ int oldsize, newmsg_size, alloc_size;
+
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf == req->rq_reqmsg);
+ LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
+ LASSERT(req->rq_reqlen == lustre_packed_msg_size(oldbuf));
+
+ /* compute new message size */
+ oldsize = req->rq_reqbuf->lm_buflens[segment];
+ req->rq_reqbuf->lm_buflens[segment] = newsize;
+ newmsg_size = lustre_packed_msg_size(oldbuf);
+ req->rq_reqbuf->lm_buflens[segment] = oldsize;
+
+ /* request from pool should always have enough buffer */
+ LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size);
+
+ if (req->rq_reqbuf_len < newmsg_size) {
+ alloc_size = size_roundup_power2(newmsg_size);
+
+ OBD_ALLOC_LARGE(newbuf, alloc_size);
+ if (newbuf == NULL)
+ return -ENOMEM;
+
+ memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
+
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = req->rq_reqmsg = newbuf;
+ req->rq_reqbuf_len = alloc_size;
+ }
+
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
+ req->rq_reqlen = newmsg_size;
+
+ return 0;
+}
+
+static struct ptlrpc_svc_ctx null_svc_ctx = {
+ .sc_refcount = ATOMIC_INIT(1),
+ .sc_policy = &null_policy,
+};
+
+static
+int null_accept(struct ptlrpc_request *req)
+{
+ LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
+ SPTLRPC_POLICY_NULL);
+
+ if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) {
+ CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc);
+ return SECSVC_DROP;
+ }
+
+ req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf);
+
+ req->rq_reqmsg = req->rq_reqbuf;
+ req->rq_reqlen = req->rq_reqdata_len;
+
+ req->rq_svc_ctx = &null_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
+
+ return SECSVC_OK;
+}
+
+static
+int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
+{
+ struct ptlrpc_reply_state *rs;
+ int rs_size = sizeof(*rs) + msgsize;
+
+ LASSERT(msgsize % 8 == 0);
+
+ rs = req->rq_reply_state;
+
+ if (rs) {
+ /* pre-allocated */
+ LASSERT(rs->rs_size >= rs_size);
+ } else {
+ OBD_ALLOC_LARGE(rs, rs_size);
+ if (rs == NULL)
+ return -ENOMEM;
+
+ rs->rs_size = rs_size;
+ }
+
+ rs->rs_svc_ctx = req->rq_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
+
+ rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf_len = rs_size - sizeof(*rs);
+ rs->rs_msg = rs->rs_repbuf;
+
+ req->rq_reply_state = rs;
+ return 0;
+}
+
+static
+void null_free_rs(struct ptlrpc_reply_state *rs)
+{
+ LASSERT_ATOMIC_GT(&rs->rs_svc_ctx->sc_refcount, 1);
+ atomic_dec(&rs->rs_svc_ctx->sc_refcount);
+
+ if (!rs->rs_prealloc)
+ OBD_FREE_LARGE(rs, rs->rs_size);
+}
+
+static
+int null_authorize(struct ptlrpc_request *req)
+{
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+
+ LASSERT(rs);
+
+ rs->rs_repbuf->lm_secflvr = SPTLRPC_FLVR_NULL;
+ rs->rs_repdata_len = req->rq_replen;
+
+ if (likely(req->rq_packed_final)) {
+ if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
+ req->rq_reply_off = lustre_msg_early_size();
+ else
+ req->rq_reply_off = 0;
+ } else {
+ __u32 cksum;
+
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
+ if (lustre_msghdr_get_flags(req->rq_reqmsg) &
+ MSGHDR_CKSUM_INCOMPAT18)
+ cksum = lustre_msg_calc_cksum(rs->rs_repbuf, 0);
+ else
+ cksum = lustre_msg_calc_cksum(rs->rs_repbuf, 1);
+#else
+# warning "remove checksum compatibility support for b1_8"
+ cksum = lustre_msg_calc_cksum(rs->rs_repbuf);
+#endif
+ lustre_msg_set_cksum(rs->rs_repbuf, cksum);
+ req->rq_reply_off = 0;
+ }
+
+ return 0;
+}
+
+static struct ptlrpc_ctx_ops null_ctx_ops = {
+ .refresh = null_ctx_refresh,
+ .sign = null_ctx_sign,
+ .verify = null_ctx_verify,
+};
+
+static struct ptlrpc_sec_cops null_sec_cops = {
+ .create_sec = null_create_sec,
+ .destroy_sec = null_destroy_sec,
+ .lookup_ctx = null_lookup_ctx,
+ .flush_ctx_cache = null_flush_ctx_cache,
+ .alloc_reqbuf = null_alloc_reqbuf,
+ .alloc_repbuf = null_alloc_repbuf,
+ .free_reqbuf = null_free_reqbuf,
+ .free_repbuf = null_free_repbuf,
+ .enlarge_reqbuf = null_enlarge_reqbuf,
+};
+
+static struct ptlrpc_sec_sops null_sec_sops = {
+ .accept = null_accept,
+ .alloc_rs = null_alloc_rs,
+ .authorize = null_authorize,
+ .free_rs = null_free_rs,
+};
+
+static struct ptlrpc_sec_policy null_policy = {
+ .sp_owner = THIS_MODULE,
+ .sp_name = "sec.null",
+ .sp_policy = SPTLRPC_POLICY_NULL,
+ .sp_cops = &null_sec_cops,
+ .sp_sops = &null_sec_sops,
+};
+
+static void null_init_internal(void)
+{
+ static HLIST_HEAD(__list);
+
+ null_sec.ps_policy = &null_policy;
+ atomic_set(&null_sec.ps_refcount, 1); /* always busy */
+ null_sec.ps_id = -1;
+ null_sec.ps_import = NULL;
+ null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
+ null_sec.ps_flvr.sf_flags = 0;
+ null_sec.ps_part = LUSTRE_SP_ANY;
+ null_sec.ps_dying = 0;
+ spin_lock_init(&null_sec.ps_lock);
+ atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
+ INIT_LIST_HEAD(&null_sec.ps_gc_list);
+ null_sec.ps_gc_interval = 0;
+ null_sec.ps_gc_next = 0;
+
+ hlist_add_head(&null_cli_ctx.cc_cache, &__list);
+ atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
+ null_cli_ctx.cc_sec = &null_sec;
+ null_cli_ctx.cc_ops = &null_ctx_ops;
+ null_cli_ctx.cc_expire = 0;
+ null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
+ PTLRPC_CTX_UPTODATE;
+ null_cli_ctx.cc_vcred.vc_uid = 0;
+ spin_lock_init(&null_cli_ctx.cc_lock);
+ INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
+ INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
+}
+
+int sptlrpc_null_init(void)
+{
+ int rc;
+
+ null_init_internal();
+
+ rc = sptlrpc_register_policy(&null_policy);
+ if (rc)
+ CERROR("failed to register %s: %d\n", null_policy.sp_name, rc);
+
+ return rc;
+}
+
+void sptlrpc_null_fini(void)
+{
+ int rc;
+
+ rc = sptlrpc_unregister_policy(&null_policy);
+ if (rc)
+ CERROR("failed to unregister %s: %d\n", null_policy.sp_name,rc);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
new file mode 100644
index 0000000..416401b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -0,0 +1,1001 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ptlrpc/sec_plain.c
+ *
+ * Author: Eric Mei <ericm@clusterfs.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_SEC
+
+
+#include <obd_support.h>
+#include <obd_cksum.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lustre_sec.h>
+
+struct plain_sec {
+ struct ptlrpc_sec pls_base;
+ rwlock_t pls_lock;
+ struct ptlrpc_cli_ctx *pls_ctx;
+};
+
+static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
+{
+ return container_of(sec, struct plain_sec, pls_base);
+}
+
+static struct ptlrpc_sec_policy plain_policy;
+static struct ptlrpc_ctx_ops plain_ctx_ops;
+static struct ptlrpc_svc_ctx plain_svc_ctx;
+
+static unsigned int plain_at_offset;
+
+/*
+ * for simplicity, plain policy rpc use fixed layout.
+ */
+#define PLAIN_PACK_SEGMENTS (4)
+
+#define PLAIN_PACK_HDR_OFF (0)
+#define PLAIN_PACK_MSG_OFF (1)
+#define PLAIN_PACK_USER_OFF (2)
+#define PLAIN_PACK_BULK_OFF (3)
+
+#define PLAIN_FL_USER (0x01)
+#define PLAIN_FL_BULK (0x02)
+
+struct plain_header {
+ __u8 ph_ver; /* 0 */
+ __u8 ph_flags;
+ __u8 ph_sp; /* source */
+ __u8 ph_bulk_hash_alg; /* complete flavor desc */
+ __u8 ph_pad[4];
+};
+
+struct plain_bulk_token {
+ __u8 pbt_hash[8];
+};
+
+#define PLAIN_BSD_SIZE \
+ (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
+
+/****************************************
+ * bulk checksum helpers *
+ ****************************************/
+
+static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
+{
+ struct ptlrpc_bulk_sec_desc *bsd;
+
+ if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
+ return -EPROTO;
+
+ bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
+ if (bsd == NULL) {
+ CERROR("bulk sec desc has short size %d\n",
+ lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
+ return -EPROTO;
+ }
+
+ if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
+ bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
+ CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
+ __u8 hash_alg,
+ struct plain_bulk_token *token)
+{
+ if (hash_alg == BULK_HASH_ALG_NULL)
+ return 0;
+
+ memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
+ return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
+ sizeof(token->pbt_hash));
+}
+
+static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
+ __u8 hash_alg,
+ struct plain_bulk_token *tokenr)
+{
+ struct plain_bulk_token tokenv;
+ int rc;
+
+ if (hash_alg == BULK_HASH_ALG_NULL)
+ return 0;
+
+ memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
+ rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
+ sizeof(tokenv.pbt_hash));
+ if (rc)
+ return rc;
+
+ if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
+ return -EACCES;
+ return 0;
+}
+
+static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
+{
+ char *ptr;
+ unsigned int off, i;
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ if (desc->bd_iov[i].kiov_len == 0)
+ continue;
+
+ ptr = kmap(desc->bd_iov[i].kiov_page);
+ off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ ptr[off] ^= 0x1;
+ kunmap(desc->bd_iov[i].kiov_page);
+ return;
+ }
+}
+
+/****************************************
+ * cli_ctx apis *
+ ****************************************/
+
+static
+int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
+{
+ /* should never reach here */
+ LBUG();
+ return 0;
+}
+
+static
+int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
+{
+ return 0;
+}
+
+static
+int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
+{
+ struct lustre_msg *msg = req->rq_reqbuf;
+ struct plain_header *phdr;
+
+ msg->lm_secflvr = req->rq_flvr.sf_rpc;
+
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
+ phdr->ph_ver = 0;
+ phdr->ph_flags = 0;
+ phdr->ph_sp = ctx->cc_sec->ps_part;
+ phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
+
+ if (req->rq_pack_udesc)
+ phdr->ph_flags |= PLAIN_FL_USER;
+ if (req->rq_pack_bulk)
+ phdr->ph_flags |= PLAIN_FL_BULK;
+
+ req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
+ msg->lm_buflens);
+ return 0;
+}
+
+static
+int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
+{
+ struct lustre_msg *msg = req->rq_repdata;
+ struct plain_header *phdr;
+ __u32 cksum;
+ int swabbed;
+
+ if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
+ CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
+ return -EPROTO;
+ }
+
+ swabbed = ptlrpc_rep_need_swab(req);
+
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
+ if (phdr == NULL) {
+ CERROR("missing plain header\n");
+ return -EPROTO;
+ }
+
+ if (phdr->ph_ver != 0) {
+ CERROR("Invalid header version\n");
+ return -EPROTO;
+ }
+
+ /* expect no user desc in reply */
+ if (phdr->ph_flags & PLAIN_FL_USER) {
+ CERROR("Unexpected udesc flag in reply\n");
+ return -EPROTO;
+ }
+
+ if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
+ CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
+ req->rq_flvr.u_bulk.hash.hash_alg);
+ return -EPROTO;
+ }
+
+ if (unlikely(req->rq_early)) {
+ unsigned int hsize = 4;
+
+ cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
+ lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
+ lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
+ NULL, 0, (unsigned char *)&cksum, &hsize);
+ if (cksum != msg->lm_cksum) {
+ CDEBUG(D_SEC,
+ "early reply checksum mismatch: %08x != %08x\n",
+ cpu_to_le32(cksum), msg->lm_cksum);
+ return -EINVAL;
+ }
+ } else {
+ /* whether we sent with bulk or not, we expect the same
+ * in reply, except for early reply */
+ if (!req->rq_early &&
+ !equi(req->rq_pack_bulk == 1,
+ phdr->ph_flags & PLAIN_FL_BULK)) {
+ CERROR("%s bulk checksum in reply\n",
+ req->rq_pack_bulk ? "Missing" : "Unexpected");
+ return -EPROTO;
+ }
+
+ if (phdr->ph_flags & PLAIN_FL_BULK) {
+ if (plain_unpack_bsd(msg, swabbed))
+ return -EPROTO;
+ }
+ }
+
+ req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
+ req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
+ return 0;
+}
+
+static
+int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct ptlrpc_bulk_sec_desc *bsd;
+ struct plain_bulk_token *token;
+ int rc;
+
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
+
+ bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ token = (struct plain_bulk_token *) bsd->bsd_data;
+
+ bsd->bsd_version = 0;
+ bsd->bsd_flags = 0;
+ bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
+
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ return 0;
+
+ if (req->rq_bulk_read)
+ return 0;
+
+ rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ token);
+ if (rc) {
+ CERROR("bulk write: failed to compute checksum: %d\n", rc);
+ } else {
+ /*
+ * for sending we only compute the wrong checksum instead
+ * of corrupting the data so it is still correct on a redo
+ */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
+ req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
+ token->pbt_hash[0] ^= 0x1;
+ }
+
+ return rc;
+}
+
+static
+int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct ptlrpc_bulk_sec_desc *bsdv;
+ struct plain_bulk_token *tokenv;
+ int rc;
+ int i, nob;
+
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
+ LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
+
+ bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
+ tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+
+ if (req->rq_bulk_write) {
+ if (bsdv->bsd_flags & BSD_FL_ERR)
+ return -EIO;
+ return 0;
+ }
+
+ /* fix the actual data size */
+ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
+ if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
+ desc->bd_iov[i].kiov_len =
+ desc->bd_nob_transferred - nob;
+ }
+ nob += desc->bd_iov[i].kiov_len;
+ }
+
+ rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ tokenv);
+ if (rc)
+ CERROR("bulk read: client verify failed: %d\n", rc);
+
+ return rc;
+}
+
+/****************************************
+ * sec apis *
+ ****************************************/
+
+static
+struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
+{
+ struct ptlrpc_cli_ctx *ctx, *ctx_new;
+
+ OBD_ALLOC_PTR(ctx_new);
+
+ write_lock(&plsec->pls_lock);
+
+ ctx = plsec->pls_ctx;
+ if (ctx) {
+ atomic_inc(&ctx->cc_refcount);
+
+ if (ctx_new)
+ OBD_FREE_PTR(ctx_new);
+ } else if (ctx_new) {
+ ctx = ctx_new;
+
+ atomic_set(&ctx->cc_refcount, 1); /* for cache */
+ ctx->cc_sec = &plsec->pls_base;
+ ctx->cc_ops = &plain_ctx_ops;
+ ctx->cc_expire = 0;
+ ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
+ ctx->cc_vcred.vc_uid = 0;
+ spin_lock_init(&ctx->cc_lock);
+ INIT_LIST_HEAD(&ctx->cc_req_list);
+ INIT_LIST_HEAD(&ctx->cc_gc_chain);
+
+ plsec->pls_ctx = ctx;
+ atomic_inc(&plsec->pls_base.ps_nctx);
+ atomic_inc(&plsec->pls_base.ps_refcount);
+
+ atomic_inc(&ctx->cc_refcount); /* for caller */
+ }
+
+ write_unlock(&plsec->pls_lock);
+
+ return ctx;
+}
+
+static
+void plain_destroy_sec(struct ptlrpc_sec *sec)
+{
+ struct plain_sec *plsec = sec2plsec(sec);
+
+ LASSERT(sec->ps_policy == &plain_policy);
+ LASSERT(sec->ps_import);
+ LASSERT(atomic_read(&sec->ps_refcount) == 0);
+ LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT(plsec->pls_ctx == NULL);
+
+ class_import_put(sec->ps_import);
+
+ OBD_FREE_PTR(plsec);
+}
+
+static
+void plain_kill_sec(struct ptlrpc_sec *sec)
+{
+ sec->ps_dying = 1;
+}
+
+static
+struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *svc_ctx,
+ struct sptlrpc_flavor *sf)
+{
+ struct plain_sec *plsec;
+ struct ptlrpc_sec *sec;
+ struct ptlrpc_cli_ctx *ctx;
+
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
+
+ OBD_ALLOC_PTR(plsec);
+ if (plsec == NULL)
+ return NULL;
+
+ /*
+ * initialize plain_sec
+ */
+ rwlock_init(&plsec->pls_lock);
+ plsec->pls_ctx = NULL;
+
+ sec = &plsec->pls_base;
+ sec->ps_policy = &plain_policy;
+ atomic_set(&sec->ps_refcount, 0);
+ atomic_set(&sec->ps_nctx, 0);
+ sec->ps_id = sptlrpc_get_next_secid();
+ sec->ps_import = class_import_get(imp);
+ sec->ps_flvr = *sf;
+ spin_lock_init(&sec->ps_lock);
+ INIT_LIST_HEAD(&sec->ps_gc_list);
+ sec->ps_gc_interval = 0;
+ sec->ps_gc_next = 0;
+
+ /* install ctx immediately if this is a reverse sec */
+ if (svc_ctx) {
+ ctx = plain_sec_install_ctx(plsec);
+ if (ctx == NULL) {
+ plain_destroy_sec(sec);
+ return NULL;
+ }
+ sptlrpc_cli_ctx_put(ctx, 1);
+ }
+
+ return sec;
+}
+
+static
+struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
+ struct vfs_cred *vcred,
+ int create, int remove_dead)
+{
+ struct plain_sec *plsec = sec2plsec(sec);
+ struct ptlrpc_cli_ctx *ctx;
+
+ read_lock(&plsec->pls_lock);
+ ctx = plsec->pls_ctx;
+ if (ctx)
+ atomic_inc(&ctx->cc_refcount);
+ read_unlock(&plsec->pls_lock);
+
+ if (unlikely(ctx == NULL))
+ ctx = plain_sec_install_ctx(plsec);
+
+ return ctx;
+}
+
+static
+void plain_release_ctx(struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx, int sync)
+{
+ LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(ctx->cc_sec == sec);
+
+ OBD_FREE_PTR(ctx);
+
+ atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
+}
+
+static
+int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
+ uid_t uid, int grace, int force)
+{
+ struct plain_sec *plsec = sec2plsec(sec);
+ struct ptlrpc_cli_ctx *ctx;
+
+ /* do nothing unless caller want to flush for 'all' */
+ if (uid != -1)
+ return 0;
+
+ write_lock(&plsec->pls_lock);
+ ctx = plsec->pls_ctx;
+ plsec->pls_ctx = NULL;
+ write_unlock(&plsec->pls_lock);
+
+ if (ctx)
+ sptlrpc_cli_ctx_put(ctx, 1);
+ return 0;
+}
+
+static
+int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
+ int alloc_len;
+
+ buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
+ buflens[PLAIN_PACK_MSG_OFF] = msgsize;
+
+ if (req->rq_pack_udesc)
+ buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
+
+ if (req->rq_pack_bulk) {
+ LASSERT(req->rq_bulk_read || req->rq_bulk_write);
+ buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
+ }
+
+ alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
+
+ if (!req->rq_reqbuf) {
+ LASSERT(!req->rq_pool);
+
+ alloc_len = size_roundup_power2(alloc_len);
+ OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
+ if (!req->rq_reqbuf)
+ return -ENOMEM;
+
+ req->rq_reqbuf_len = alloc_len;
+ } else {
+ LASSERT(req->rq_pool);
+ LASSERT(req->rq_reqbuf_len >= alloc_len);
+ memset(req->rq_reqbuf, 0, alloc_len);
+ }
+
+ lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
+ req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
+
+ if (req->rq_pack_udesc)
+ sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
+
+ return 0;
+}
+
+static
+void plain_free_reqbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req)
+{
+ if (!req->rq_pool) {
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = NULL;
+ req->rq_reqbuf_len = 0;
+ }
+}
+
+static
+int plain_alloc_repbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int msgsize)
+{
+ __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
+ int alloc_len;
+
+ buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
+ buflens[PLAIN_PACK_MSG_OFF] = msgsize;
+
+ if (req->rq_pack_bulk) {
+ LASSERT(req->rq_bulk_read || req->rq_bulk_write);
+ buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
+ }
+
+ alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
+
+ /* add space for early reply */
+ alloc_len += plain_at_offset;
+
+ alloc_len = size_roundup_power2(alloc_len);
+
+ OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
+ if (!req->rq_repbuf)
+ return -ENOMEM;
+
+ req->rq_repbuf_len = alloc_len;
+ return 0;
+}
+
+static
+void plain_free_repbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req)
+{
+ OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
+ req->rq_repbuf = NULL;
+ req->rq_repbuf_len = 0;
+}
+
+static
+int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int segment, int newsize)
+{
+ struct lustre_msg *newbuf;
+ int oldsize;
+ int newmsg_size, newbuf_size;
+
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
+ LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
+ req->rq_reqmsg);
+
+ /* compute new embedded msg size. */
+ oldsize = req->rq_reqmsg->lm_buflens[segment];
+ req->rq_reqmsg->lm_buflens[segment] = newsize;
+ newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
+ req->rq_reqmsg->lm_buflens);
+ req->rq_reqmsg->lm_buflens[segment] = oldsize;
+
+ /* compute new wrapper msg size. */
+ oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
+ req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
+ newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
+ req->rq_reqbuf->lm_buflens);
+ req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
+
+ /* request from pool should always have enough buffer */
+ LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
+
+ if (req->rq_reqbuf_len < newbuf_size) {
+ newbuf_size = size_roundup_power2(newbuf_size);
+
+ OBD_ALLOC_LARGE(newbuf, newbuf_size);
+ if (newbuf == NULL)
+ return -ENOMEM;
+
+ memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
+
+ OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
+ req->rq_reqbuf = newbuf;
+ req->rq_reqbuf_len = newbuf_size;
+ req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
+ PLAIN_PACK_MSG_OFF, 0);
+ }
+
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
+ newmsg_size);
+ _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
+
+ req->rq_reqlen = newmsg_size;
+ return 0;
+}
+
+/****************************************
+ * service apis *
+ ****************************************/
+
+static struct ptlrpc_svc_ctx plain_svc_ctx = {
+ .sc_refcount = ATOMIC_INIT(1),
+ .sc_policy = &plain_policy,
+};
+
+static
+int plain_accept(struct ptlrpc_request *req)
+{
+ struct lustre_msg *msg = req->rq_reqbuf;
+ struct plain_header *phdr;
+ int swabbed;
+
+ LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
+ SPTLRPC_POLICY_PLAIN);
+
+ if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
+ SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
+ SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
+ SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
+ CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
+ return SECSVC_DROP;
+ }
+
+ if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
+ CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
+ return SECSVC_DROP;
+ }
+
+ swabbed = ptlrpc_req_need_swab(req);
+
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
+ if (phdr == NULL) {
+ CERROR("missing plain header\n");
+ return -EPROTO;
+ }
+
+ if (phdr->ph_ver != 0) {
+ CERROR("Invalid header version\n");
+ return -EPROTO;
+ }
+
+ if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
+ CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
+ return -EPROTO;
+ }
+
+ req->rq_sp_from = phdr->ph_sp;
+ req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
+
+ if (phdr->ph_flags & PLAIN_FL_USER) {
+ if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
+ swabbed)) {
+ CERROR("Mal-formed user descriptor\n");
+ return SECSVC_DROP;
+ }
+
+ req->rq_pack_udesc = 1;
+ req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
+ }
+
+ if (phdr->ph_flags & PLAIN_FL_BULK) {
+ if (plain_unpack_bsd(msg, swabbed))
+ return SECSVC_DROP;
+
+ req->rq_pack_bulk = 1;
+ }
+
+ req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
+ req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
+
+ req->rq_svc_ctx = &plain_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
+
+ return SECSVC_OK;
+}
+
+static
+int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
+{
+ struct ptlrpc_reply_state *rs;
+ __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
+ int rs_size = sizeof(*rs);
+
+ LASSERT(msgsize % 8 == 0);
+
+ buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
+ buflens[PLAIN_PACK_MSG_OFF] = msgsize;
+
+ if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
+ buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
+
+ rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
+
+ rs = req->rq_reply_state;
+
+ if (rs) {
+ /* pre-allocated */
+ LASSERT(rs->rs_size >= rs_size);
+ } else {
+ OBD_ALLOC_LARGE(rs, rs_size);
+ if (rs == NULL)
+ return -ENOMEM;
+
+ rs->rs_size = rs_size;
+ }
+
+ rs->rs_svc_ctx = req->rq_svc_ctx;
+ atomic_inc(&req->rq_svc_ctx->sc_refcount);
+ rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf_len = rs_size - sizeof(*rs);
+
+ lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
+ rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
+
+ req->rq_reply_state = rs;
+ return 0;
+}
+
+static
+void plain_free_rs(struct ptlrpc_reply_state *rs)
+{
+ LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
+ atomic_dec(&rs->rs_svc_ctx->sc_refcount);
+
+ if (!rs->rs_prealloc)
+ OBD_FREE_LARGE(rs, rs->rs_size);
+}
+
+static
+int plain_authorize(struct ptlrpc_request *req)
+{
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct lustre_msg_v2 *msg = rs->rs_repbuf;
+ struct plain_header *phdr;
+ int len;
+
+ LASSERT(rs);
+ LASSERT(msg);
+
+ if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
+ len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
+ req->rq_replen, 1);
+ else
+ len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
+
+ msg->lm_secflvr = req->rq_flvr.sf_rpc;
+
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
+ phdr->ph_ver = 0;
+ phdr->ph_flags = 0;
+ phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
+
+ if (req->rq_pack_bulk)
+ phdr->ph_flags |= PLAIN_FL_BULK;
+
+ rs->rs_repdata_len = len;
+
+ if (likely(req->rq_packed_final)) {
+ if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
+ req->rq_reply_off = plain_at_offset;
+ else
+ req->rq_reply_off = 0;
+ } else {
+ unsigned int hsize = 4;
+
+ cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
+ lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
+ lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
+ NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
+ req->rq_reply_off = 0;
+ }
+
+ return 0;
+}
+
+static
+int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ struct plain_bulk_token *tokenr;
+ int rc;
+
+ LASSERT(req->rq_bulk_write);
+ LASSERT(req->rq_pack_bulk);
+
+ bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
+ bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
+
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ return 0;
+
+ rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ tokenr);
+ if (rc) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("bulk write: server verify failed: %d\n", rc);
+ }
+
+ return rc;
+}
+
+static
+int plain_svc_wrap_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ struct plain_bulk_token *tokenv;
+ int rc;
+
+ LASSERT(req->rq_bulk_read);
+ LASSERT(req->rq_pack_bulk);
+
+ bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
+ tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ return 0;
+
+ rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ tokenv);
+ if (rc) {
+ CERROR("bulk read: server failed to compute "
+ "checksum: %d\n", rc);
+ } else {
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
+ corrupt_bulk_data(desc);
+ }
+
+ return rc;
+}
+
+static struct ptlrpc_ctx_ops plain_ctx_ops = {
+ .refresh = plain_ctx_refresh,
+ .validate = plain_ctx_validate,
+ .sign = plain_ctx_sign,
+ .verify = plain_ctx_verify,
+ .wrap_bulk = plain_cli_wrap_bulk,
+ .unwrap_bulk = plain_cli_unwrap_bulk,
+};
+
+static struct ptlrpc_sec_cops plain_sec_cops = {
+ .create_sec = plain_create_sec,
+ .destroy_sec = plain_destroy_sec,
+ .kill_sec = plain_kill_sec,
+ .lookup_ctx = plain_lookup_ctx,
+ .release_ctx = plain_release_ctx,
+ .flush_ctx_cache = plain_flush_ctx_cache,
+ .alloc_reqbuf = plain_alloc_reqbuf,
+ .free_reqbuf = plain_free_reqbuf,
+ .alloc_repbuf = plain_alloc_repbuf,
+ .free_repbuf = plain_free_repbuf,
+ .enlarge_reqbuf = plain_enlarge_reqbuf,
+};
+
+static struct ptlrpc_sec_sops plain_sec_sops = {
+ .accept = plain_accept,
+ .alloc_rs = plain_alloc_rs,
+ .authorize = plain_authorize,
+ .free_rs = plain_free_rs,
+ .unwrap_bulk = plain_svc_unwrap_bulk,
+ .wrap_bulk = plain_svc_wrap_bulk,
+};
+
+static struct ptlrpc_sec_policy plain_policy = {
+ .sp_owner = THIS_MODULE,
+ .sp_name = "plain",
+ .sp_policy = SPTLRPC_POLICY_PLAIN,
+ .sp_cops = &plain_sec_cops,
+ .sp_sops = &plain_sec_sops,
+};
+
+int sptlrpc_plain_init(void)
+{
+ __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
+ int rc;
+
+ buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
+ plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
+
+ rc = sptlrpc_register_policy(&plain_policy);
+ if (rc)
+ CERROR("failed to register: %d\n", rc);
+
+ return rc;
+}
+
+void sptlrpc_plain_fini(void)
+{
+ int rc;
+
+ rc = sptlrpc_unregister_policy(&plain_policy);
+ if (rc)
+ CERROR("cannot unregister: %d\n", rc);
+}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
new file mode 100644
index 0000000..acf75f3
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -0,0 +1,3115 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lu_object.h>
+#include <linux/lnet/types.h>
+#include "ptlrpc_internal.h"
+
+/* The following are visible and mutable through /sys/module/ptlrpc */
+int test_req_buffer_pressure = 0;
+CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
+ "set non-zero to put pressure on request buffer pools");
+CFS_MODULE_PARM(at_min, "i", int, 0644,
+ "Adaptive timeout minimum (sec)");
+CFS_MODULE_PARM(at_max, "i", int, 0644,
+ "Adaptive timeout maximum (sec)");
+CFS_MODULE_PARM(at_history, "i", int, 0644,
+ "Adaptive timeouts remember the slowest event that took place "
+ "within this period (sec)");
+CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
+ "How soon before an RPC deadline to send an early reply");
+CFS_MODULE_PARM(at_extra, "i", int, 0644,
+ "How much extra time to give with each early reply");
+
+
+/* forward ref */
+static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
+static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
+static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
+
+/** Holds a list of all PTLRPC services */
+LIST_HEAD(ptlrpc_all_services);
+/** Used to protect the \e ptlrpc_all_services list */
+struct mutex ptlrpc_all_services_mutex;
+
+struct ptlrpc_request_buffer_desc *
+ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ struct ptlrpc_request_buffer_desc *rqbd;
+
+ OBD_CPT_ALLOC_PTR(rqbd, svc->srv_cptable, svcpt->scp_cpt);
+ if (rqbd == NULL)
+ return NULL;
+
+ rqbd->rqbd_svcpt = svcpt;
+ rqbd->rqbd_refcount = 0;
+ rqbd->rqbd_cbid.cbid_fn = request_in_callback;
+ rqbd->rqbd_cbid.cbid_arg = rqbd;
+ INIT_LIST_HEAD(&rqbd->rqbd_reqs);
+ OBD_CPT_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_cptable,
+ svcpt->scp_cpt, svc->srv_buf_size);
+ if (rqbd->rqbd_buffer == NULL) {
+ OBD_FREE_PTR(rqbd);
+ return NULL;
+ }
+
+ spin_lock(&svcpt->scp_lock);
+ list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
+ svcpt->scp_nrqbds_total++;
+ spin_unlock(&svcpt->scp_lock);
+
+ return rqbd;
+}
+
+void
+ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
+{
+ struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
+
+ LASSERT(rqbd->rqbd_refcount == 0);
+ LASSERT(list_empty(&rqbd->rqbd_reqs));
+
+ spin_lock(&svcpt->scp_lock);
+ list_del(&rqbd->rqbd_list);
+ svcpt->scp_nrqbds_total--;
+ spin_unlock(&svcpt->scp_lock);
+
+ OBD_FREE_LARGE(rqbd->rqbd_buffer, svcpt->scp_service->srv_buf_size);
+ OBD_FREE_PTR(rqbd);
+}
+
+int
+ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
+{
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ struct ptlrpc_request_buffer_desc *rqbd;
+ int rc = 0;
+ int i;
+
+ if (svcpt->scp_rqbd_allocating)
+ goto try_post;
+
+ spin_lock(&svcpt->scp_lock);
+ /* check again with lock */
+ if (svcpt->scp_rqbd_allocating) {
+ /* NB: we might allow more than one thread in the future */
+ LASSERT(svcpt->scp_rqbd_allocating == 1);
+ spin_unlock(&svcpt->scp_lock);
+ goto try_post;
+ }
+
+ svcpt->scp_rqbd_allocating++;
+ spin_unlock(&svcpt->scp_lock);
+
+
+ for (i = 0; i < svc->srv_nbuf_per_group; i++) {
+ /* NB: another thread might have recycled enough rqbds, we
+ * need to make sure it wouldn't over-allocate, see LU-1212. */
+ if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group)
+ break;
+
+ rqbd = ptlrpc_alloc_rqbd(svcpt);
+
+ if (rqbd == NULL) {
+ CERROR("%s: Can't allocate request buffer\n",
+ svc->srv_name);
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ spin_lock(&svcpt->scp_lock);
+
+ LASSERT(svcpt->scp_rqbd_allocating == 1);
+ svcpt->scp_rqbd_allocating--;
+
+ spin_unlock(&svcpt->scp_lock);
+
+ CDEBUG(D_RPCTRACE,
+ "%s: allocate %d new %d-byte reqbufs (%d/%d left), rc = %d\n",
+ svc->srv_name, i, svc->srv_buf_size, svcpt->scp_nrqbds_posted,
+ svcpt->scp_nrqbds_total, rc);
+
+ try_post:
+ if (post && rc == 0)
+ rc = ptlrpc_server_post_idle_rqbds(svcpt);
+
+ return rc;
+}
+
+/**
+ * Part of Rep-Ack logic.
+ * Puts a lock and its mode into reply state assotiated to request reply.
+ */
+void
+ptlrpc_save_lock(struct ptlrpc_request *req,
+ struct lustre_handle *lock, int mode, int no_ack)
+{
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ int idx;
+
+ LASSERT(rs != NULL);
+ LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
+
+ if (req->rq_export->exp_disconnected) {
+ ldlm_lock_decref(lock, mode);
+ } else {
+ idx = rs->rs_nlocks++;
+ rs->rs_locks[idx] = *lock;
+ rs->rs_modes[idx] = mode;
+ rs->rs_difficult = 1;
+ rs->rs_no_ack = !!no_ack;
+ }
+}
+EXPORT_SYMBOL(ptlrpc_save_lock);
+
+
+struct ptlrpc_hr_partition;
+
+struct ptlrpc_hr_thread {
+ int hrt_id; /* thread ID */
+ spinlock_t hrt_lock;
+ wait_queue_head_t hrt_waitq;
+ struct list_head hrt_queue; /* RS queue */
+ struct ptlrpc_hr_partition *hrt_partition;
+};
+
+struct ptlrpc_hr_partition {
+ /* # of started threads */
+ atomic_t hrp_nstarted;
+ /* # of stopped threads */
+ atomic_t hrp_nstopped;
+ /* cpu partition id */
+ int hrp_cpt;
+ /* round-robin rotor for choosing thread */
+ int hrp_rotor;
+ /* total number of threads on this partition */
+ int hrp_nthrs;
+ /* threads table */
+ struct ptlrpc_hr_thread *hrp_thrs;
+};
+
+#define HRT_RUNNING 0
+#define HRT_STOPPING 1
+
+struct ptlrpc_hr_service {
+ /* CPU partition table, it's just cfs_cpt_table for now */
+ struct cfs_cpt_table *hr_cpt_table;
+ /** controller sleep waitq */
+ wait_queue_head_t hr_waitq;
+ unsigned int hr_stopping;
+ /** roundrobin rotor for non-affinity service */
+ unsigned int hr_rotor;
+ /* partition data */
+ struct ptlrpc_hr_partition **hr_partitions;
+};
+
+struct rs_batch {
+ struct list_head rsb_replies;
+ unsigned int rsb_n_replies;
+ struct ptlrpc_service_part *rsb_svcpt;
+};
+
+/** reply handling service. */
+static struct ptlrpc_hr_service ptlrpc_hr;
+
+/**
+ * maximum mumber of replies scheduled in one batch
+ */
+#define MAX_SCHEDULED 256
+
+/**
+ * Initialize a reply batch.
+ *
+ * \param b batch
+ */
+static void rs_batch_init(struct rs_batch *b)
+{
+ memset(b, 0, sizeof *b);
+ INIT_LIST_HEAD(&b->rsb_replies);
+}
+
+/**
+ * Choose an hr thread to dispatch requests to.
+ */
+static struct ptlrpc_hr_thread *
+ptlrpc_hr_select(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_hr_partition *hrp;
+ unsigned int rotor;
+
+ if (svcpt->scp_cpt >= 0 &&
+ svcpt->scp_service->srv_cptable == ptlrpc_hr.hr_cpt_table) {
+ /* directly match partition */
+ hrp = ptlrpc_hr.hr_partitions[svcpt->scp_cpt];
+
+ } else {
+ rotor = ptlrpc_hr.hr_rotor++;
+ rotor %= cfs_cpt_number(ptlrpc_hr.hr_cpt_table);
+
+ hrp = ptlrpc_hr.hr_partitions[rotor];
+ }
+
+ rotor = hrp->hrp_rotor++;
+ return &hrp->hrp_thrs[rotor % hrp->hrp_nthrs];
+}
+
+/**
+ * Dispatch all replies accumulated in the batch to one from
+ * dedicated reply handling threads.
+ *
+ * \param b batch
+ */
+static void rs_batch_dispatch(struct rs_batch *b)
+{
+ if (b->rsb_n_replies != 0) {
+ struct ptlrpc_hr_thread *hrt;
+
+ hrt = ptlrpc_hr_select(b->rsb_svcpt);
+
+ spin_lock(&hrt->hrt_lock);
+ list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
+ spin_unlock(&hrt->hrt_lock);
+
+ wake_up(&hrt->hrt_waitq);
+ b->rsb_n_replies = 0;
+ }
+}
+
+/**
+ * Add a reply to a batch.
+ * Add one reply object to a batch, schedule batched replies if overload.
+ *
+ * \param b batch
+ * \param rs reply
+ */
+static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
+{
+ struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
+
+ if (svcpt != b->rsb_svcpt || b->rsb_n_replies >= MAX_SCHEDULED) {
+ if (b->rsb_svcpt != NULL) {
+ rs_batch_dispatch(b);
+ spin_unlock(&b->rsb_svcpt->scp_rep_lock);
+ }
+ spin_lock(&svcpt->scp_rep_lock);
+ b->rsb_svcpt = svcpt;
+ }
+ spin_lock(&rs->rs_lock);
+ rs->rs_scheduled_ever = 1;
+ if (rs->rs_scheduled == 0) {
+ list_move(&rs->rs_list, &b->rsb_replies);
+ rs->rs_scheduled = 1;
+ b->rsb_n_replies++;
+ }
+ rs->rs_committed = 1;
+ spin_unlock(&rs->rs_lock);
+}
+
+/**
+ * Reply batch finalization.
+ * Dispatch remaining replies from the batch
+ * and release remaining spinlock.
+ *
+ * \param b batch
+ */
+static void rs_batch_fini(struct rs_batch *b)
+{
+ if (b->rsb_svcpt != NULL) {
+ rs_batch_dispatch(b);
+ spin_unlock(&b->rsb_svcpt->scp_rep_lock);
+ }
+}
+
+#define DECLARE_RS_BATCH(b) struct rs_batch b
+
+
+/**
+ * Put reply state into a queue for processing because we received
+ * ACK from the client
+ */
+void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
+{
+ struct ptlrpc_hr_thread *hrt;
+
+ LASSERT(list_empty(&rs->rs_list));
+
+ hrt = ptlrpc_hr_select(rs->rs_svcpt);
+
+ spin_lock(&hrt->hrt_lock);
+ list_add_tail(&rs->rs_list, &hrt->hrt_queue);
+ spin_unlock(&hrt->hrt_lock);
+
+ wake_up(&hrt->hrt_waitq);
+}
+
+void
+ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
+{
+ LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
+ LASSERT(spin_is_locked(&rs->rs_lock));
+ LASSERT (rs->rs_difficult);
+ rs->rs_scheduled_ever = 1; /* flag any notification attempt */
+
+ if (rs->rs_scheduled) { /* being set up or already notified */
+ return;
+ }
+
+ rs->rs_scheduled = 1;
+ list_del_init(&rs->rs_list);
+ ptlrpc_dispatch_difficult_reply(rs);
+}
+EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
+
+void ptlrpc_commit_replies(struct obd_export *exp)
+{
+ struct ptlrpc_reply_state *rs, *nxt;
+ DECLARE_RS_BATCH(batch);
+
+ rs_batch_init(&batch);
+ /* Find any replies that have been committed and get their service
+ * to attend to complete them. */
+
+ /* CAVEAT EMPTOR: spinlock ordering!!! */
+ spin_lock(&exp->exp_uncommitted_replies_lock);
+ list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
+ rs_obd_list) {
+ LASSERT (rs->rs_difficult);
+ /* VBR: per-export last_committed */
+ LASSERT(rs->rs_export);
+ if (rs->rs_transno <= exp->exp_last_committed) {
+ list_del_init(&rs->rs_obd_list);
+ rs_batch_add(&batch, rs);
+ }
+ }
+ spin_unlock(&exp->exp_uncommitted_replies_lock);
+ rs_batch_fini(&batch);
+}
+EXPORT_SYMBOL(ptlrpc_commit_replies);
+
+static int
+ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_request_buffer_desc *rqbd;
+ int rc;
+ int posted = 0;
+
+ for (;;) {
+ spin_lock(&svcpt->scp_lock);
+
+ if (list_empty(&svcpt->scp_rqbd_idle)) {
+ spin_unlock(&svcpt->scp_lock);
+ return posted;
+ }
+
+ rqbd = list_entry(svcpt->scp_rqbd_idle.next,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
+ list_del(&rqbd->rqbd_list);
+
+ /* assume we will post successfully */
+ svcpt->scp_nrqbds_posted++;
+ list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
+
+ spin_unlock(&svcpt->scp_lock);
+
+ rc = ptlrpc_register_rqbd(rqbd);
+ if (rc != 0)
+ break;
+
+ posted = 1;
+ }
+
+ spin_lock(&svcpt->scp_lock);
+
+ svcpt->scp_nrqbds_posted--;
+ list_del(&rqbd->rqbd_list);
+ list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
+
+ /* Don't complain if no request buffers are posted right now; LNET
+ * won't drop requests because we set the portal lazy! */
+
+ spin_unlock(&svcpt->scp_lock);
+
+ return -1;
+}
+
+static void ptlrpc_at_timer(unsigned long castmeharder)
+{
+ struct ptlrpc_service_part *svcpt;
+
+ svcpt = (struct ptlrpc_service_part *)castmeharder;
+
+ svcpt->scp_at_check = 1;
+ svcpt->scp_at_checktime = cfs_time_current();
+ wake_up(&svcpt->scp_waitq);
+}
+
+static void
+ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
+ struct ptlrpc_service_conf *conf)
+{
+ struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
+ unsigned init;
+ unsigned total;
+ unsigned nthrs;
+ int weight;
+
+ /*
+ * Common code for estimating & validating threads number.
+ * CPT affinity service could have percpt thread-pool instead
+ * of a global thread-pool, which means user might not always
+ * get the threads number they give it in conf::tc_nthrs_user
+ * even they did set. It's because we need to validate threads
+ * number for each CPT to guarantee each pool will have enough
+ * threads to keep the service healthy.
+ */
+ init = PTLRPC_NTHRS_INIT + (svc->srv_ops.so_hpreq_handler != NULL);
+ init = max_t(int, init, tc->tc_nthrs_init);
+
+ /* NB: please see comments in lustre_lnet.h for definition
+ * details of these members */
+ LASSERT(tc->tc_nthrs_max != 0);
+
+ if (tc->tc_nthrs_user != 0) {
+ /* In case there is a reason to test a service with many
+ * threads, we give a less strict check here, it can
+ * be up to 8 * nthrs_max */
+ total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user);
+ nthrs = total / svc->srv_ncpts;
+ init = max(init, nthrs);
+ goto out;
+ }
+
+ total = tc->tc_nthrs_max;
+ if (tc->tc_nthrs_base == 0) {
+ /* don't care about base threads number per partition,
+ * this is most for non-affinity service */
+ nthrs = total / svc->srv_ncpts;
+ goto out;
+ }
+
+ nthrs = tc->tc_nthrs_base;
+ if (svc->srv_ncpts == 1) {
+ int i;
+
+ /* NB: Increase the base number if it's single partition
+ * and total number of cores/HTs is larger or equal to 4.
+ * result will always < 2 * nthrs_base */
+ weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY);
+ for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */
+ (tc->tc_nthrs_base >> i) != 0; i++)
+ nthrs += tc->tc_nthrs_base >> i;
+ }
+
+ if (tc->tc_thr_factor != 0) {
+ int factor = tc->tc_thr_factor;
+ const int fade = 4;
+ cpumask_t mask;
+
+ /*
+ * User wants to increase number of threads with for
+ * each CPU core/HT, most likely the factor is larger then
+ * one thread/core because service threads are supposed to
+ * be blocked by lock or wait for IO.
+ */
+ /*
+ * Amdahl's law says that adding processors wouldn't give
+ * a linear increasing of parallelism, so it's nonsense to
+ * have too many threads no matter how many cores/HTs
+ * there are.
+ */
+ cpumask_copy(&mask, topology_thread_cpumask(0));
+ if (cpus_weight(mask) > 1) { /* weight is # of HTs */
+ /* depress thread factor for hyper-thread */
+ factor = factor - (factor >> 1) + (factor >> 3);
+ }
+
+ weight = cfs_cpt_weight(svc->srv_cptable, 0);
+ LASSERT(weight > 0);
+
+ for (; factor > 0 && weight > 0; factor--, weight -= fade)
+ nthrs += min(weight, fade) * factor;
+ }
+
+ if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
+ nthrs = max(tc->tc_nthrs_base,
+ tc->tc_nthrs_max / svc->srv_ncpts);
+ }
+ out:
+ nthrs = max(nthrs, tc->tc_nthrs_init);
+ svc->srv_nthrs_cpt_limit = nthrs;
+ svc->srv_nthrs_cpt_init = init;
+
+ if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) {
+ CDEBUG(D_OTHER, "%s: This service may have more threads (%d) "
+ "than the given soft limit (%d)\n",
+ svc->srv_name, nthrs * svc->srv_ncpts,
+ tc->tc_nthrs_max);
+ }
+}
+
+/**
+ * Initialize percpt data for a service
+ */
+static int
+ptlrpc_service_part_init(struct ptlrpc_service *svc,
+ struct ptlrpc_service_part *svcpt, int cpt)
+{
+ struct ptlrpc_at_array *array;
+ int size;
+ int index;
+ int rc;
+
+ svcpt->scp_cpt = cpt;
+ INIT_LIST_HEAD(&svcpt->scp_threads);
+
+ /* rqbd and incoming request queue */
+ spin_lock_init(&svcpt->scp_lock);
+ INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
+ INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
+ INIT_LIST_HEAD(&svcpt->scp_req_incoming);
+ init_waitqueue_head(&svcpt->scp_waitq);
+ /* history request & rqbd list */
+ INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
+ INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
+
+ /* acitve requests and hp requests */
+ spin_lock_init(&svcpt->scp_req_lock);
+
+ /* reply states */
+ spin_lock_init(&svcpt->scp_rep_lock);
+ INIT_LIST_HEAD(&svcpt->scp_rep_active);
+ INIT_LIST_HEAD(&svcpt->scp_rep_idle);
+ init_waitqueue_head(&svcpt->scp_rep_waitq);
+ atomic_set(&svcpt->scp_nreps_difficult, 0);
+
+ /* adaptive timeout */
+ spin_lock_init(&svcpt->scp_at_lock);
+ array = &svcpt->scp_at_array;
+
+ size = at_est2timeout(at_max);
+ array->paa_size = size;
+ array->paa_count = 0;
+ array->paa_deadline = -1;
+
+ /* allocate memory for scp_at_array (ptlrpc_at_array) */
+ OBD_CPT_ALLOC(array->paa_reqs_array,
+ svc->srv_cptable, cpt, sizeof(struct list_head) * size);
+ if (array->paa_reqs_array == NULL)
+ return -ENOMEM;
+
+ for (index = 0; index < size; index++)
+ INIT_LIST_HEAD(&array->paa_reqs_array[index]);
+
+ OBD_CPT_ALLOC(array->paa_reqs_count,
+ svc->srv_cptable, cpt, sizeof(__u32) * size);
+ if (array->paa_reqs_count == NULL)
+ goto failed;
+
+ cfs_timer_init(&svcpt->scp_at_timer, ptlrpc_at_timer, svcpt);
+ /* At SOW, service time should be quick; 10s seems generous. If client
+ * timeout is less than this, we'll be sending an early reply. */
+ at_init(&svcpt->scp_at_estimate, 10, 0);
+
+ /* assign this before call ptlrpc_grow_req_bufs */
+ svcpt->scp_service = svc;
+ /* Now allocate the request buffers, but don't post them now */
+ rc = ptlrpc_grow_req_bufs(svcpt, 0);
+ /* We shouldn't be under memory pressure at startup, so
+ * fail if we can't allocate all our buffers at this time. */
+ if (rc != 0)
+ goto failed;
+
+ return 0;
+
+ failed:
+ if (array->paa_reqs_count != NULL) {
+ OBD_FREE(array->paa_reqs_count, sizeof(__u32) * size);
+ array->paa_reqs_count = NULL;
+ }
+
+ if (array->paa_reqs_array != NULL) {
+ OBD_FREE(array->paa_reqs_array,
+ sizeof(struct list_head) * array->paa_size);
+ array->paa_reqs_array = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * Initialize service on a given portal.
+ * This includes starting serving threads , allocating and posting rqbds and
+ * so on.
+ */
+struct ptlrpc_service *
+ptlrpc_register_service(struct ptlrpc_service_conf *conf,
+ struct proc_dir_entry *proc_entry)
+{
+ struct ptlrpc_service_cpt_conf *cconf = &conf->psc_cpt;
+ struct ptlrpc_service *service;
+ struct ptlrpc_service_part *svcpt;
+ struct cfs_cpt_table *cptable;
+ __u32 *cpts = NULL;
+ int ncpts;
+ int cpt;
+ int rc;
+ int i;
+
+ LASSERT(conf->psc_buf.bc_nbufs > 0);
+ LASSERT(conf->psc_buf.bc_buf_size >=
+ conf->psc_buf.bc_req_max_size + SPTLRPC_MAX_PAYLOAD);
+ LASSERT(conf->psc_thr.tc_ctx_tags != 0);
+
+ cptable = cconf->cc_cptable;
+ if (cptable == NULL)
+ cptable = cfs_cpt_table;
+
+ if (!conf->psc_thr.tc_cpu_affinity) {
+ ncpts = 1;
+ } else {
+ ncpts = cfs_cpt_number(cptable);
+ if (cconf->cc_pattern != NULL) {
+ struct cfs_expr_list *el;
+
+ rc = cfs_expr_list_parse(cconf->cc_pattern,
+ strlen(cconf->cc_pattern),
+ 0, ncpts - 1, &el);
+ if (rc != 0) {
+ CERROR("%s: invalid CPT pattern string: %s",
+ conf->psc_name, cconf->cc_pattern);
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = cfs_expr_list_values(el, ncpts, &cpts);
+ cfs_expr_list_free(el);
+ if (rc <= 0) {
+ CERROR("%s: failed to parse CPT array %s: %d\n",
+ conf->psc_name, cconf->cc_pattern, rc);
+ if (cpts != NULL)
+ OBD_FREE(cpts, sizeof(*cpts) * ncpts);
+ return ERR_PTR(rc < 0 ? rc : -EINVAL);
+ }
+ ncpts = rc;
+ }
+ }
+
+ OBD_ALLOC(service, offsetof(struct ptlrpc_service, srv_parts[ncpts]));
+ if (service == NULL) {
+ if (cpts != NULL)
+ OBD_FREE(cpts, sizeof(*cpts) * ncpts);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ service->srv_cptable = cptable;
+ service->srv_cpts = cpts;
+ service->srv_ncpts = ncpts;
+
+ service->srv_cpt_bits = 0; /* it's zero already, easy to read... */
+ while ((1 << service->srv_cpt_bits) < cfs_cpt_number(cptable))
+ service->srv_cpt_bits++;
+
+ /* public members */
+ spin_lock_init(&service->srv_lock);
+ service->srv_name = conf->psc_name;
+ service->srv_watchdog_factor = conf->psc_watchdog_factor;
+ INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
+
+ /* buffer configuration */
+ service->srv_nbuf_per_group = test_req_buffer_pressure ?
+ 1 : conf->psc_buf.bc_nbufs;
+ service->srv_max_req_size = conf->psc_buf.bc_req_max_size +
+ SPTLRPC_MAX_PAYLOAD;
+ service->srv_buf_size = conf->psc_buf.bc_buf_size;
+ service->srv_rep_portal = conf->psc_buf.bc_rep_portal;
+ service->srv_req_portal = conf->psc_buf.bc_req_portal;
+
+ /* Increase max reply size to next power of two */
+ service->srv_max_reply_size = 1;
+ while (service->srv_max_reply_size <
+ conf->psc_buf.bc_rep_max_size + SPTLRPC_MAX_PAYLOAD)
+ service->srv_max_reply_size <<= 1;
+
+ service->srv_thread_name = conf->psc_thr.tc_thr_name;
+ service->srv_ctx_tags = conf->psc_thr.tc_ctx_tags;
+ service->srv_hpreq_ratio = PTLRPC_SVC_HP_RATIO;
+ service->srv_ops = conf->psc_ops;
+
+ for (i = 0; i < ncpts; i++) {
+ if (!conf->psc_thr.tc_cpu_affinity)
+ cpt = CFS_CPT_ANY;
+ else
+ cpt = cpts != NULL ? cpts[i] : i;
+
+ OBD_CPT_ALLOC(svcpt, cptable, cpt, sizeof(*svcpt));
+ if (svcpt == NULL)
+ GOTO(failed, rc = -ENOMEM);
+
+ service->srv_parts[i] = svcpt;
+ rc = ptlrpc_service_part_init(service, svcpt, cpt);
+ if (rc != 0)
+ GOTO(failed, rc);
+ }
+
+ ptlrpc_server_nthreads_check(service, conf);
+
+ rc = LNetSetLazyPortal(service->srv_req_portal);
+ LASSERT(rc == 0);
+
+ mutex_lock(&ptlrpc_all_services_mutex);
+ list_add (&service->srv_list, &ptlrpc_all_services);
+ mutex_unlock(&ptlrpc_all_services_mutex);
+
+ if (proc_entry != NULL)
+ ptlrpc_lprocfs_register_service(proc_entry, service);
+
+ rc = ptlrpc_service_nrs_setup(service);
+ if (rc != 0)
+ GOTO(failed, rc);
+
+ CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
+ service->srv_name, service->srv_req_portal);
+
+ rc = ptlrpc_start_threads(service);
+ if (rc != 0) {
+ CERROR("Failed to start threads for service %s: %d\n",
+ service->srv_name, rc);
+ GOTO(failed, rc);
+ }
+
+ return service;
+failed:
+ ptlrpc_unregister_service(service);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(ptlrpc_register_service);
+
+/**
+ * to actually free the request, must be called without holding svc_lock.
+ * note it's caller's responsibility to unlink req->rq_list.
+ */
+static void ptlrpc_server_free_request(struct ptlrpc_request *req)
+{
+ LASSERT(atomic_read(&req->rq_refcount) == 0);
+ LASSERT(list_empty(&req->rq_timed_list));
+
+ /* DEBUG_REQ() assumes the reply state of a request with a valid
+ * ref will not be destroyed until that reference is dropped. */
+ ptlrpc_req_drop_rs(req);
+
+ sptlrpc_svc_ctx_decref(req);
+
+ if (req != &req->rq_rqbd->rqbd_req) {
+ /* NB request buffers use an embedded
+ * req if the incoming req unlinked the
+ * MD; this isn't one of them! */
+ OBD_FREE(req, sizeof(*req));
+ }
+}
+
+/**
+ * drop a reference count of the request. if it reaches 0, we either
+ * put it into history list, or free it immediately.
+ */
+void ptlrpc_server_drop_request(struct ptlrpc_request *req)
+{
+ struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
+ struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ int refcount;
+ struct list_head *tmp;
+ struct list_head *nxt;
+
+ if (!atomic_dec_and_test(&req->rq_refcount))
+ return;
+
+ if (req->rq_at_linked) {
+ spin_lock(&svcpt->scp_at_lock);
+ /* recheck with lock, in case it's unlinked by
+ * ptlrpc_at_check_timed() */
+ if (likely(req->rq_at_linked))
+ ptlrpc_at_remove_timed(req);
+ spin_unlock(&svcpt->scp_at_lock);
+ }
+
+ LASSERT(list_empty(&req->rq_timed_list));
+
+ /* finalize request */
+ if (req->rq_export) {
+ class_export_put(req->rq_export);
+ req->rq_export = NULL;
+ }
+
+ spin_lock(&svcpt->scp_lock);
+
+ list_add(&req->rq_list, &rqbd->rqbd_reqs);
+
+ refcount = --(rqbd->rqbd_refcount);
+ if (refcount == 0) {
+ /* request buffer is now idle: add to history */
+ list_del(&rqbd->rqbd_list);
+
+ list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
+ svcpt->scp_hist_nrqbds++;
+
+ /* cull some history?
+ * I expect only about 1 or 2 rqbds need to be recycled here */
+ while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
+ rqbd = list_entry(svcpt->scp_hist_rqbds.next,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
+
+ list_del(&rqbd->rqbd_list);
+ svcpt->scp_hist_nrqbds--;
+
+ /* remove rqbd's reqs from svc's req history while
+ * I've got the service lock */
+ list_for_each(tmp, &rqbd->rqbd_reqs) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_list);
+ /* Track the highest culled req seq */
+ if (req->rq_history_seq >
+ svcpt->scp_hist_seq_culled) {
+ svcpt->scp_hist_seq_culled =
+ req->rq_history_seq;
+ }
+ list_del(&req->rq_history_list);
+ }
+
+ spin_unlock(&svcpt->scp_lock);
+
+ list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
+ req = list_entry(rqbd->rqbd_reqs.next,
+ struct ptlrpc_request,
+ rq_list);
+ list_del(&req->rq_list);
+ ptlrpc_server_free_request(req);
+ }
+
+ spin_lock(&svcpt->scp_lock);
+ /*
+ * now all reqs including the embedded req has been
+ * disposed, schedule request buffer for re-use.
+ */
+ LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) ==
+ 0);
+ list_add_tail(&rqbd->rqbd_list,
+ &svcpt->scp_rqbd_idle);
+ }
+
+ spin_unlock(&svcpt->scp_lock);
+ } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
+ /* If we are low on memory, we are not interested in history */
+ list_del(&req->rq_list);
+ list_del_init(&req->rq_history_list);
+
+ /* Track the highest culled req seq */
+ if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
+ svcpt->scp_hist_seq_culled = req->rq_history_seq;
+
+ spin_unlock(&svcpt->scp_lock);
+
+ ptlrpc_server_free_request(req);
+ } else {
+ spin_unlock(&svcpt->scp_lock);
+ }
+}
+
+/** Change request export and move hp request from old export to new */
+void ptlrpc_request_change_export(struct ptlrpc_request *req,
+ struct obd_export *export)
+{
+ if (req->rq_export != NULL) {
+ if (!list_empty(&req->rq_exp_list)) {
+ /* remove rq_exp_list from last export */
+ spin_lock_bh(&req->rq_export->exp_rpc_lock);
+ list_del_init(&req->rq_exp_list);
+ spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+
+ /* export has one reference already, so it`s safe to
+ * add req to export queue here and get another
+ * reference for request later */
+ spin_lock_bh(&export->exp_rpc_lock);
+ list_add(&req->rq_exp_list, &export->exp_hp_rpcs);
+ spin_unlock_bh(&export->exp_rpc_lock);
+ }
+ class_export_rpc_dec(req->rq_export);
+ class_export_put(req->rq_export);
+ }
+
+ /* request takes one export refcount */
+ req->rq_export = class_export_get(export);
+ class_export_rpc_inc(export);
+
+ return;
+}
+
+/**
+ * to finish a request: stop sending more early replies, and release
+ * the request.
+ */
+static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req)
+{
+ ptlrpc_server_hpreq_fini(req);
+
+ ptlrpc_server_drop_request(req);
+}
+
+/**
+ * to finish a active request: stop sending more early replies, and release
+ * the request. should be called after we finished handling the request.
+ */
+static void ptlrpc_server_finish_active_request(
+ struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req)
+{
+ spin_lock(&svcpt->scp_req_lock);
+ ptlrpc_nrs_req_stop_nolock(req);
+ svcpt->scp_nreqs_active--;
+ if (req->rq_hp)
+ svcpt->scp_nhreqs_active--;
+ spin_unlock(&svcpt->scp_req_lock);
+
+ ptlrpc_nrs_req_finalize(req);
+
+ if (req->rq_export != NULL)
+ class_export_rpc_dec(req->rq_export);
+
+ ptlrpc_server_finish_request(svcpt, req);
+}
+
+/**
+ * This function makes sure dead exports are evicted in a timely manner.
+ * This function is only called when some export receives a message (i.e.,
+ * the network is up.)
+ */
+static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
+{
+ struct obd_export *oldest_exp;
+ time_t oldest_time, new_time;
+
+ LASSERT(exp);
+
+ /* Compensate for slow machines, etc, by faking our request time
+ into the future. Although this can break the strict time-ordering
+ of the list, we can be really lazy here - we don't have to evict
+ at the exact right moment. Eventually, all silent exports
+ will make it to the top of the list. */
+
+ /* Do not pay attention on 1sec or smaller renewals. */
+ new_time = cfs_time_current_sec() + extra_delay;
+ if (exp->exp_last_request_time + 1 /*second */ >= new_time)
+ return;
+
+ exp->exp_last_request_time = new_time;
+ CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n",
+ exp->exp_client_uuid.uuid,
+ exp->exp_last_request_time, exp);
+
+ /* exports may get disconnected from the chain even though the
+ export has references, so we must keep the spin lock while
+ manipulating the lists */
+ spin_lock(&exp->exp_obd->obd_dev_lock);
+
+ if (list_empty(&exp->exp_obd_chain_timed)) {
+ /* this one is not timed */
+ spin_unlock(&exp->exp_obd->obd_dev_lock);
+ return;
+ }
+
+ list_move_tail(&exp->exp_obd_chain_timed,
+ &exp->exp_obd->obd_exports_timed);
+
+ oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next,
+ struct obd_export, exp_obd_chain_timed);
+ oldest_time = oldest_exp->exp_last_request_time;
+ spin_unlock(&exp->exp_obd->obd_dev_lock);
+
+ if (exp->exp_obd->obd_recovering) {
+ /* be nice to everyone during recovery */
+ return;
+ }
+
+ /* Note - racing to start/reset the obd_eviction timer is safe */
+ if (exp->exp_obd->obd_eviction_timer == 0) {
+ /* Check if the oldest entry is expired. */
+ if (cfs_time_current_sec() > (oldest_time + PING_EVICT_TIMEOUT +
+ extra_delay)) {
+ /* We need a second timer, in case the net was down and
+ * it just came back. Since the pinger may skip every
+ * other PING_INTERVAL (see note in ptlrpc_pinger_main),
+ * we better wait for 3. */
+ exp->exp_obd->obd_eviction_timer =
+ cfs_time_current_sec() + 3 * PING_INTERVAL;
+ CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
+ exp->exp_obd->obd_name,
+ obd_export_nid2str(oldest_exp), oldest_time);
+ }
+ } else {
+ if (cfs_time_current_sec() >
+ (exp->exp_obd->obd_eviction_timer + extra_delay)) {
+ /* The evictor won't evict anyone who we've heard from
+ * recently, so we don't have to check before we start
+ * it. */
+ if (!ping_evictor_wake(exp))
+ exp->exp_obd->obd_eviction_timer = 0;
+ }
+ }
+}
+
+/**
+ * Sanity check request \a req.
+ * Return 0 if all is ok, error code otherwise.
+ */
+static int ptlrpc_check_req(struct ptlrpc_request *req)
+{
+ int rc = 0;
+
+ if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
+ req->rq_export->exp_conn_cnt)) {
+ DEBUG_REQ(D_RPCTRACE, req,
+ "DROPPING req from old connection %d < %d",
+ lustre_msg_get_conn_cnt(req->rq_reqmsg),
+ req->rq_export->exp_conn_cnt);
+ return -EEXIST;
+ }
+ if (unlikely(req->rq_export->exp_obd &&
+ req->rq_export->exp_obd->obd_fail)) {
+ /* Failing over, don't handle any more reqs, send
+ error response instead. */
+ CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
+ req, req->rq_export->exp_obd->obd_name);
+ rc = -ENODEV;
+ } else if (lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_REPLAY | MSG_REQ_REPLAY_DONE) &&
+ !(req->rq_export->exp_obd->obd_recovering)) {
+ DEBUG_REQ(D_ERROR, req,
+ "Invalid replay without recovery");
+ class_fail_export(req->rq_export);
+ rc = -ENODEV;
+ } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 &&
+ !(req->rq_export->exp_obd->obd_recovering)) {
+ DEBUG_REQ(D_ERROR, req, "Invalid req with transno "
+ LPU64" without recovery",
+ lustre_msg_get_transno(req->rq_reqmsg));
+ class_fail_export(req->rq_export);
+ rc = -ENODEV;
+ }
+
+ if (unlikely(rc < 0)) {
+ req->rq_status = rc;
+ ptlrpc_error(req);
+ }
+ return rc;
+}
+
+static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_at_array *array = &svcpt->scp_at_array;
+ __s32 next;
+
+ if (array->paa_count == 0) {
+ cfs_timer_disarm(&svcpt->scp_at_timer);
+ return;
+ }
+
+ /* Set timer for closest deadline */
+ next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
+ at_early_margin);
+ if (next <= 0) {
+ ptlrpc_at_timer((unsigned long)svcpt);
+ } else {
+ cfs_timer_arm(&svcpt->scp_at_timer, cfs_time_shift(next));
+ CDEBUG(D_INFO, "armed %s at %+ds\n",
+ svcpt->scp_service->srv_name, next);
+ }
+}
+
+/* Add rpc to early reply check list */
+static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
+{
+ struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+ struct ptlrpc_at_array *array = &svcpt->scp_at_array;
+ struct ptlrpc_request *rq = NULL;
+ __u32 index;
+
+ if (AT_OFF)
+ return(0);
+
+ if (req->rq_no_reply)
+ return 0;
+
+ if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
+ return(-ENOSYS);
+
+ spin_lock(&svcpt->scp_at_lock);
+ LASSERT(list_empty(&req->rq_timed_list));
+
+ index = (unsigned long)req->rq_deadline % array->paa_size;
+ if (array->paa_reqs_count[index] > 0) {
+ /* latest rpcs will have the latest deadlines in the list,
+ * so search backward. */
+ list_for_each_entry_reverse(rq,
+ &array->paa_reqs_array[index],
+ rq_timed_list) {
+ if (req->rq_deadline >= rq->rq_deadline) {
+ list_add(&req->rq_timed_list,
+ &rq->rq_timed_list);
+ break;
+ }
+ }
+ }
+
+ /* Add the request at the head of the list */
+ if (list_empty(&req->rq_timed_list))
+ list_add(&req->rq_timed_list,
+ &array->paa_reqs_array[index]);
+
+ spin_lock(&req->rq_lock);
+ req->rq_at_linked = 1;
+ spin_unlock(&req->rq_lock);
+ req->rq_at_index = index;
+ array->paa_reqs_count[index]++;
+ array->paa_count++;
+ if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
+ array->paa_deadline = req->rq_deadline;
+ ptlrpc_at_set_timer(svcpt);
+ }
+ spin_unlock(&svcpt->scp_at_lock);
+
+ return 0;
+}
+
+static void
+ptlrpc_at_remove_timed(struct ptlrpc_request *req)
+{
+ struct ptlrpc_at_array *array;
+
+ array = &req->rq_rqbd->rqbd_svcpt->scp_at_array;
+
+ /* NB: must call with hold svcpt::scp_at_lock */
+ LASSERT(!list_empty(&req->rq_timed_list));
+ list_del_init(&req->rq_timed_list);
+
+ spin_lock(&req->rq_lock);
+ req->rq_at_linked = 0;
+ spin_unlock(&req->rq_lock);
+
+ array->paa_reqs_count[req->rq_at_index]--;
+ array->paa_count--;
+}
+
+static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
+{
+ struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+ struct ptlrpc_request *reqcopy;
+ struct lustre_msg *reqmsg;
+ cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
+ time_t newdl;
+ int rc;
+
+ /* deadline is when the client expects us to reply, margin is the
+ difference between clients' and servers' expectations */
+ DEBUG_REQ(D_ADAPTTO, req,
+ "%ssending early reply (deadline %+lds, margin %+lds) for "
+ "%d+%d", AT_OFF ? "AT off - not " : "",
+ olddl, olddl - at_get(&svcpt->scp_at_estimate),
+ at_get(&svcpt->scp_at_estimate), at_extra);
+
+ if (AT_OFF)
+ return 0;
+
+ if (olddl < 0) {
+ DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
+ "not sending early reply. Consider increasing "
+ "at_early_margin (%d)?", olddl, at_early_margin);
+
+ /* Return an error so we're not re-added to the timed list. */
+ return -ETIMEDOUT;
+ }
+
+ if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
+ DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
+ "but no AT support");
+ return -ENOSYS;
+ }
+
+ if (req->rq_export &&
+ lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
+ /* During recovery, we don't want to send too many early
+ * replies, but on the other hand we want to make sure the
+ * client has enough time to resend if the rpc is lost. So
+ * during the recovery period send at least 4 early replies,
+ * spacing them every at_extra if we can. at_estimate should
+ * always equal this fixed value during recovery. */
+ at_measured(&svcpt->scp_at_estimate, min(at_extra,
+ req->rq_export->exp_obd->obd_recovery_timeout / 4));
+ } else {
+ /* Fake our processing time into the future to ask the clients
+ * for some extra amount of time */
+ at_measured(&svcpt->scp_at_estimate, at_extra +
+ cfs_time_current_sec() -
+ req->rq_arrival_time.tv_sec);
+
+ /* Check to see if we've actually increased the deadline -
+ * we may be past adaptive_max */
+ if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
+ at_get(&svcpt->scp_at_estimate)) {
+ DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
+ "(%ld/%ld), not sending early reply\n",
+ olddl, req->rq_arrival_time.tv_sec +
+ at_get(&svcpt->scp_at_estimate) -
+ cfs_time_current_sec());
+ return -ETIMEDOUT;
+ }
+ }
+ newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
+
+ OBD_ALLOC(reqcopy, sizeof *reqcopy);
+ if (reqcopy == NULL)
+ return -ENOMEM;
+ OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
+ if (!reqmsg) {
+ OBD_FREE(reqcopy, sizeof *reqcopy);
+ return -ENOMEM;
+ }
+
+ *reqcopy = *req;
+ reqcopy->rq_reply_state = NULL;
+ reqcopy->rq_rep_swab_mask = 0;
+ reqcopy->rq_pack_bulk = 0;
+ reqcopy->rq_pack_udesc = 0;
+ reqcopy->rq_packed_final = 0;
+ sptlrpc_svc_ctx_addref(reqcopy);
+ /* We only need the reqmsg for the magic */
+ reqcopy->rq_reqmsg = reqmsg;
+ memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
+
+ LASSERT(atomic_read(&req->rq_refcount));
+ /** if it is last refcount then early reply isn't needed */
+ if (atomic_read(&req->rq_refcount) == 1) {
+ DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
+ "abort sending early reply\n");
+ GOTO(out, rc = -EINVAL);
+ }
+
+ /* Connection ref */
+ reqcopy->rq_export = class_conn2export(
+ lustre_msg_get_handle(reqcopy->rq_reqmsg));
+ if (reqcopy->rq_export == NULL)
+ GOTO(out, rc = -ENODEV);
+
+ /* RPC ref */
+ class_export_rpc_inc(reqcopy->rq_export);
+ if (reqcopy->rq_export->exp_obd &&
+ reqcopy->rq_export->exp_obd->obd_fail)
+ GOTO(out_put, rc = -ENODEV);
+
+ rc = lustre_pack_reply_flags(reqcopy, 1, NULL, NULL, LPRFL_EARLY_REPLY);
+ if (rc)
+ GOTO(out_put, rc);
+
+ rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
+
+ if (!rc) {
+ /* Adjust our own deadline to what we told the client */
+ req->rq_deadline = newdl;
+ req->rq_early_count++; /* number sent, server side */
+ } else {
+ DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
+ }
+
+ /* Free the (early) reply state from lustre_pack_reply.
+ (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
+ ptlrpc_req_drop_rs(reqcopy);
+
+out_put:
+ class_export_rpc_dec(reqcopy->rq_export);
+ class_export_put(reqcopy->rq_export);
+out:
+ sptlrpc_svc_ctx_decref(reqcopy);
+ OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
+ OBD_FREE(reqcopy, sizeof *reqcopy);
+ return rc;
+}
+
+/* Send early replies to everybody expiring within at_early_margin
+ asking for at_extra time */
+static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_at_array *array = &svcpt->scp_at_array;
+ struct ptlrpc_request *rq, *n;
+ struct list_head work_list;
+ __u32 index, count;
+ time_t deadline;
+ time_t now = cfs_time_current_sec();
+ cfs_duration_t delay;
+ int first, counter = 0;
+
+ spin_lock(&svcpt->scp_at_lock);
+ if (svcpt->scp_at_check == 0) {
+ spin_unlock(&svcpt->scp_at_lock);
+ return 0;
+ }
+ delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
+ svcpt->scp_at_check = 0;
+
+ if (array->paa_count == 0) {
+ spin_unlock(&svcpt->scp_at_lock);
+ return 0;
+ }
+
+ /* The timer went off, but maybe the nearest rpc already completed. */
+ first = array->paa_deadline - now;
+ if (first > at_early_margin) {
+ /* We've still got plenty of time. Reset the timer. */
+ ptlrpc_at_set_timer(svcpt);
+ spin_unlock(&svcpt->scp_at_lock);
+ return 0;
+ }
+
+ /* We're close to a timeout, and we don't know how much longer the
+ server will take. Send early replies to everyone expiring soon. */
+ INIT_LIST_HEAD(&work_list);
+ deadline = -1;
+ index = (unsigned long)array->paa_deadline % array->paa_size;
+ count = array->paa_count;
+ while (count > 0) {
+ count -= array->paa_reqs_count[index];
+ list_for_each_entry_safe(rq, n,
+ &array->paa_reqs_array[index],
+ rq_timed_list) {
+ if (rq->rq_deadline > now + at_early_margin) {
+ /* update the earliest deadline */
+ if (deadline == -1 ||
+ rq->rq_deadline < deadline)
+ deadline = rq->rq_deadline;
+ break;
+ }
+
+ ptlrpc_at_remove_timed(rq);
+ /**
+ * ptlrpc_server_drop_request() may drop
+ * refcount to 0 already. Let's check this and
+ * don't add entry to work_list
+ */
+ if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
+ list_add(&rq->rq_timed_list, &work_list);
+ counter++;
+ }
+
+ if (++index >= array->paa_size)
+ index = 0;
+ }
+ array->paa_deadline = deadline;
+ /* we have a new earliest deadline, restart the timer */
+ ptlrpc_at_set_timer(svcpt);
+
+ spin_unlock(&svcpt->scp_at_lock);
+
+ CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
+ "replies\n", first, at_extra, counter);
+ if (first < 0) {
+ /* We're already past request deadlines before we even get a
+ chance to send early replies */
+ LCONSOLE_WARN("%s: This server is not able to keep up with "
+ "request traffic (cpu-bound).\n",
+ svcpt->scp_service->srv_name);
+ CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
+ "delay="CFS_DURATION_T"(jiff)\n",
+ counter, svcpt->scp_nreqs_incoming,
+ svcpt->scp_nreqs_active,
+ at_get(&svcpt->scp_at_estimate), delay);
+ }
+
+ /* we took additional refcount so entries can't be deleted from list, no
+ * locking is needed */
+ while (!list_empty(&work_list)) {
+ rq = list_entry(work_list.next, struct ptlrpc_request,
+ rq_timed_list);
+ list_del_init(&rq->rq_timed_list);
+
+ if (ptlrpc_at_send_early_reply(rq) == 0)
+ ptlrpc_at_add_timed(rq);
+
+ ptlrpc_server_drop_request(rq);
+ }
+
+ return 1; /* return "did_something" for liblustre */
+}
+
+/**
+ * Put the request to the export list if the request may become
+ * a high priority one.
+ */
+static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req)
+{
+ int rc = 0;
+
+ if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
+ rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
+ if (rc < 0)
+ return rc;
+ LASSERT(rc == 0);
+ }
+ if (req->rq_export && req->rq_ops) {
+ /* Perform request specific check. We should do this check
+ * before the request is added into exp_hp_rpcs list otherwise
+ * it may hit swab race at LU-1044. */
+ if (req->rq_ops->hpreq_check) {
+ rc = req->rq_ops->hpreq_check(req);
+ /**
+ * XXX: Out of all current
+ * ptlrpc_hpreq_ops::hpreq_check(), only
+ * ldlm_cancel_hpreq_check() can return an error code;
+ * other functions assert in similar places, which seems
+ * odd. What also does not seem right is that handlers
+ * for those RPCs do not assert on the same checks, but
+ * rather handle the error cases. e.g. see
+ * ost_rw_hpreq_check(), and ost_brw_read(),
+ * ost_brw_write().
+ */
+ if (rc < 0)
+ return rc;
+ LASSERT(rc == 0 || rc == 1);
+ }
+
+ spin_lock_bh(&req->rq_export->exp_rpc_lock);
+ list_add(&req->rq_exp_list,
+ &req->rq_export->exp_hp_rpcs);
+ spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+ }
+
+ ptlrpc_nrs_req_initialize(svcpt, req, rc);
+
+ return rc;
+}
+
+/** Remove the request from the export list. */
+static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
+{
+ if (req->rq_export && req->rq_ops) {
+ /* refresh lock timeout again so that client has more
+ * room to send lock cancel RPC. */
+ if (req->rq_ops->hpreq_fini)
+ req->rq_ops->hpreq_fini(req);
+
+ spin_lock_bh(&req->rq_export->exp_rpc_lock);
+ list_del_init(&req->rq_exp_list);
+ spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+ }
+}
+
+static int ptlrpc_hpreq_check(struct ptlrpc_request *req)
+{
+ return 1;
+}
+
+static struct ptlrpc_hpreq_ops ptlrpc_hpreq_common = {
+ .hpreq_check = ptlrpc_hpreq_check,
+};
+
+/* Hi-Priority RPC check by RPC operation code. */
+int ptlrpc_hpreq_handler(struct ptlrpc_request *req)
+{
+ int opc = lustre_msg_get_opc(req->rq_reqmsg);
+
+ /* Check for export to let only reconnects for not yet evicted
+ * export to become a HP rpc. */
+ if ((req->rq_export != NULL) &&
+ (opc == OBD_PING || opc == MDS_CONNECT || opc == OST_CONNECT))
+ req->rq_ops = &ptlrpc_hpreq_common;
+
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_hpreq_handler);
+
+static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_request *req)
+{
+ int rc;
+
+ rc = ptlrpc_server_hpreq_init(svcpt, req);
+ if (rc < 0)
+ return rc;
+
+ ptlrpc_nrs_req_add(svcpt, req, !!rc);
+
+ return 0;
+}
+
+/**
+ * Allow to handle high priority request
+ * User can call it w/o any lock but need to hold
+ * ptlrpc_service_part::scp_req_lock to get reliable result
+ */
+static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
+ bool force)
+{
+ int running = svcpt->scp_nthrs_running;
+
+ if (!nrs_svcpt_has_hp(svcpt))
+ return false;
+
+ if (force)
+ return true;
+
+ if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
+ CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
+ /* leave just 1 thread for normal RPCs */
+ running = PTLRPC_NTHRS_INIT;
+ if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
+ running += 1;
+ }
+
+ if (svcpt->scp_nreqs_active >= running - 1)
+ return false;
+
+ if (svcpt->scp_nhreqs_active == 0)
+ return true;
+
+ return !ptlrpc_nrs_req_pending_nolock(svcpt, false) ||
+ svcpt->scp_hreq_count < svcpt->scp_service->srv_hpreq_ratio;
+}
+
+static bool ptlrpc_server_high_pending(struct ptlrpc_service_part *svcpt,
+ bool force)
+{
+ return ptlrpc_server_allow_high(svcpt, force) &&
+ ptlrpc_nrs_req_pending_nolock(svcpt, true);
+}
+
+/**
+ * Only allow normal priority requests on a service that has a high-priority
+ * queue if forced (i.e. cleanup), if there are other high priority requests
+ * already being processed (i.e. those threads can service more high-priority
+ * requests), or if there are enough idle threads that a later thread can do
+ * a high priority request.
+ * User can call it w/o any lock but need to hold
+ * ptlrpc_service_part::scp_req_lock to get reliable result
+ */
+static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
+ bool force)
+{
+ int running = svcpt->scp_nthrs_running;
+ if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
+ CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
+ /* leave just 1 thread for normal RPCs */
+ running = PTLRPC_NTHRS_INIT;
+ if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
+ running += 1;
+ }
+
+ if (force ||
+ svcpt->scp_nreqs_active < running - 2)
+ return true;
+
+ if (svcpt->scp_nreqs_active >= running - 1)
+ return false;
+
+ return svcpt->scp_nhreqs_active > 0 || !nrs_svcpt_has_hp(svcpt);
+}
+
+static bool ptlrpc_server_normal_pending(struct ptlrpc_service_part *svcpt,
+ bool force)
+{
+ return ptlrpc_server_allow_normal(svcpt, force) &&
+ ptlrpc_nrs_req_pending_nolock(svcpt, false);
+}
+
+/**
+ * Returns true if there are requests available in incoming
+ * request queue for processing and it is allowed to fetch them.
+ * User can call it w/o any lock but need to hold ptlrpc_service::scp_req_lock
+ * to get reliable result
+ * \see ptlrpc_server_allow_normal
+ * \see ptlrpc_server_allow high
+ */
+static inline bool
+ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force)
+{
+ return ptlrpc_server_high_pending(svcpt, force) ||
+ ptlrpc_server_normal_pending(svcpt, force);
+}
+
+/**
+ * Fetch a request for processing from queue of unprocessed requests.
+ * Favors high-priority requests.
+ * Returns a pointer to fetched request.
+ */
+static struct ptlrpc_request *
+ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
+{
+ struct ptlrpc_request *req = NULL;
+
+ spin_lock(&svcpt->scp_req_lock);
+
+ if (ptlrpc_server_high_pending(svcpt, force)) {
+ req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
+ if (req != NULL) {
+ svcpt->scp_hreq_count++;
+ goto got_request;
+ }
+ }
+
+ if (ptlrpc_server_normal_pending(svcpt, force)) {
+ req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
+ if (req != NULL) {
+ svcpt->scp_hreq_count = 0;
+ goto got_request;
+ }
+ }
+
+ spin_unlock(&svcpt->scp_req_lock);
+ return NULL;
+
+got_request:
+ svcpt->scp_nreqs_active++;
+ if (req->rq_hp)
+ svcpt->scp_nhreqs_active++;
+
+ spin_unlock(&svcpt->scp_req_lock);
+
+ if (likely(req->rq_export))
+ class_export_rpc_inc(req->rq_export);
+
+ return req;
+}
+
+/**
+ * Handle freshly incoming reqs, add to timed early reply list,
+ * pass on to regular request queue.
+ * All incoming requests pass through here before getting into
+ * ptlrpc_server_handle_req later on.
+ */
+static int
+ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_thread *thread)
+{
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ struct ptlrpc_request *req;
+ __u32 deadline;
+ int rc;
+
+ spin_lock(&svcpt->scp_lock);
+ if (list_empty(&svcpt->scp_req_incoming)) {
+ spin_unlock(&svcpt->scp_lock);
+ return 0;
+ }
+
+ req = list_entry(svcpt->scp_req_incoming.next,
+ struct ptlrpc_request, rq_list);
+ list_del_init(&req->rq_list);
+ svcpt->scp_nreqs_incoming--;
+ /* Consider this still a "queued" request as far as stats are
+ * concerned */
+ spin_unlock(&svcpt->scp_lock);
+
+ /* go through security check/transform */
+ rc = sptlrpc_svc_unwrap_request(req);
+ switch (rc) {
+ case SECSVC_OK:
+ break;
+ case SECSVC_COMPLETE:
+ target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
+ goto err_req;
+ case SECSVC_DROP:
+ goto err_req;
+ default:
+ LBUG();
+ }
+
+ /*
+ * for null-flavored rpc, msg has been unpacked by sptlrpc, although
+ * redo it wouldn't be harmful.
+ */
+ if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) {
+ rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
+ if (rc != 0) {
+ CERROR("error unpacking request: ptl %d from %s "
+ "x"LPU64"\n", svc->srv_req_portal,
+ libcfs_id2str(req->rq_peer), req->rq_xid);
+ goto err_req;
+ }
+ }
+
+ rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
+ if (rc) {
+ CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
+ LPU64"\n", svc->srv_req_portal,
+ libcfs_id2str(req->rq_peer), req->rq_xid);
+ goto err_req;
+ }
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
+ lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
+ CERROR("drop incoming rpc opc %u, x"LPU64"\n",
+ cfs_fail_val, req->rq_xid);
+ goto err_req;
+ }
+
+ rc = -EINVAL;
+ if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) {
+ CERROR("wrong packet type received (type=%u) from %s\n",
+ lustre_msg_get_type(req->rq_reqmsg),
+ libcfs_id2str(req->rq_peer));
+ goto err_req;
+ }
+
+ switch(lustre_msg_get_opc(req->rq_reqmsg)) {
+ case MDS_WRITEPAGE:
+ case OST_WRITE:
+ req->rq_bulk_write = 1;
+ break;
+ case MDS_READPAGE:
+ case OST_READ:
+ case MGS_CONFIG_READ:
+ req->rq_bulk_read = 1;
+ break;
+ }
+
+ CDEBUG(D_RPCTRACE, "got req x"LPU64"\n", req->rq_xid);
+
+ req->rq_export = class_conn2export(
+ lustre_msg_get_handle(req->rq_reqmsg));
+ if (req->rq_export) {
+ rc = ptlrpc_check_req(req);
+ if (rc == 0) {
+ rc = sptlrpc_target_export_check(req->rq_export, req);
+ if (rc)
+ DEBUG_REQ(D_ERROR, req, "DROPPING req with "
+ "illegal security flavor,");
+ }
+
+ if (rc)
+ goto err_req;
+ ptlrpc_update_export_timer(req->rq_export, 0);
+ }
+
+ /* req_in handling should/must be fast */
+ if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
+ DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
+ cfs_time_sub(cfs_time_current_sec(),
+ req->rq_arrival_time.tv_sec));
+
+ /* Set rpc server deadline and add it to the timed list */
+ deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
+ MSGHDR_AT_SUPPORT) ?
+ /* The max time the client expects us to take */
+ lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
+ req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
+ if (unlikely(deadline == 0)) {
+ DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
+ goto err_req;
+ }
+
+ req->rq_svc_thread = thread;
+
+ ptlrpc_at_add_timed(req);
+
+ /* Move it over to the request processing queue */
+ rc = ptlrpc_server_request_add(svcpt, req);
+ if (rc)
+ GOTO(err_req, rc);
+
+ wake_up(&svcpt->scp_waitq);
+ return 1;
+
+err_req:
+ ptlrpc_server_finish_request(svcpt, req);
+
+ return 1;
+}
+
+/**
+ * Main incoming request handling logic.
+ * Calls handler function from service to do actual processing.
+ */
+static int
+ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_thread *thread)
+{
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ struct ptlrpc_request *request;
+ struct timeval work_start;
+ struct timeval work_end;
+ long timediff;
+ int rc;
+ int fail_opc = 0;
+
+ request = ptlrpc_server_request_get(svcpt, false);
+ if (request == NULL)
+ return 0;
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
+ fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
+ else if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
+ fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT;
+
+ if (unlikely(fail_opc)) {
+ if (request->rq_export && request->rq_ops)
+ OBD_FAIL_TIMEOUT(fail_opc, 4);
+ }
+
+ ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
+
+ if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
+ libcfs_debug_dumplog();
+
+ do_gettimeofday(&work_start);
+ timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
+ if (likely(svc->srv_stats != NULL)) {
+ lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
+ timediff);
+ lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
+ svcpt->scp_nreqs_incoming);
+ lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
+ svcpt->scp_nreqs_active);
+ lprocfs_counter_add(svc->srv_stats, PTLRPC_TIMEOUT,
+ at_get(&svcpt->scp_at_estimate));
+ }
+
+ rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
+ if (rc) {
+ CERROR("Failure to initialize session: %d\n", rc);
+ goto out_req;
+ }
+ request->rq_session.lc_thread = thread;
+ request->rq_session.lc_cookie = 0x5;
+ lu_context_enter(&request->rq_session);
+
+ CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
+
+ request->rq_svc_thread = thread;
+ if (thread)
+ request->rq_svc_thread->t_env->le_ses = &request->rq_session;
+
+ if (likely(request->rq_export)) {
+ if (unlikely(ptlrpc_check_req(request)))
+ goto put_conn;
+ ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
+ }
+
+ /* Discard requests queued for longer than the deadline.
+ The deadline is increased if we send an early reply. */
+ if (cfs_time_current_sec() > request->rq_deadline) {
+ DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
+ ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
+ libcfs_id2str(request->rq_peer),
+ cfs_time_sub(request->rq_deadline,
+ request->rq_arrival_time.tv_sec),
+ cfs_time_sub(cfs_time_current_sec(),
+ request->rq_deadline));
+ goto put_conn;
+ }
+
+ CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
+ "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
+ (request->rq_export ?
+ (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+ (request->rq_export ?
+ atomic_read(&request->rq_export->exp_refcount) : -99),
+ lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
+ libcfs_id2str(request->rq_peer),
+ lustre_msg_get_opc(request->rq_reqmsg));
+
+ if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
+ CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
+
+ rc = svc->srv_ops.so_req_handler(request);
+
+ ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
+
+put_conn:
+ lu_context_exit(&request->rq_session);
+ lu_context_fini(&request->rq_session);
+
+ if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
+ DEBUG_REQ(D_WARNING, request, "Request took longer "
+ "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
+ " client may timeout.",
+ cfs_time_sub(request->rq_deadline,
+ request->rq_arrival_time.tv_sec),
+ cfs_time_sub(cfs_time_current_sec(),
+ request->rq_deadline));
+ }
+
+ do_gettimeofday(&work_end);
+ timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
+ CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
+ "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
+ "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
+ current_comm(),
+ (request->rq_export ?
+ (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+ (request->rq_export ?
+ atomic_read(&request->rq_export->exp_refcount) : -99),
+ lustre_msg_get_status(request->rq_reqmsg),
+ request->rq_xid,
+ libcfs_id2str(request->rq_peer),
+ lustre_msg_get_opc(request->rq_reqmsg),
+ timediff,
+ cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
+ (request->rq_repmsg ?
+ lustre_msg_get_transno(request->rq_repmsg) :
+ request->rq_transno),
+ request->rq_status,
+ (request->rq_repmsg ?
+ lustre_msg_get_status(request->rq_repmsg) : -999));
+ if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
+ __u32 op = lustre_msg_get_opc(request->rq_reqmsg);
+ int opc = opcode_offset(op);
+ if (opc > 0 && !(op == LDLM_ENQUEUE || op == MDS_REINT)) {
+ LASSERT(opc < LUSTRE_MAX_OPCODES);
+ lprocfs_counter_add(svc->srv_stats,
+ opc + EXTRA_MAX_OPCODES,
+ timediff);
+ }
+ }
+ if (unlikely(request->rq_early_count)) {
+ DEBUG_REQ(D_ADAPTTO, request,
+ "sent %d early replies before finishing in "
+ CFS_DURATION_T"s",
+ request->rq_early_count,
+ cfs_time_sub(work_end.tv_sec,
+ request->rq_arrival_time.tv_sec));
+ }
+
+out_req:
+ ptlrpc_server_finish_active_request(svcpt, request);
+
+ return 1;
+}
+
+/**
+ * An internal function to process a single reply state object.
+ */
+static int
+ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
+{
+ struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ struct obd_export *exp;
+ int nlocks;
+ int been_handled;
+
+ exp = rs->rs_export;
+
+ LASSERT (rs->rs_difficult);
+ LASSERT (rs->rs_scheduled);
+ LASSERT (list_empty(&rs->rs_list));
+
+ spin_lock(&exp->exp_lock);
+ /* Noop if removed already */
+ list_del_init (&rs->rs_exp_list);
+ spin_unlock(&exp->exp_lock);
+
+ /* The disk commit callback holds exp_uncommitted_replies_lock while it
+ * iterates over newly committed replies, removing them from
+ * exp_uncommitted_replies. It then drops this lock and schedules the
+ * replies it found for handling here.
+ *
+ * We can avoid contention for exp_uncommitted_replies_lock between the
+ * HRT threads and further commit callbacks by checking rs_committed
+ * which is set in the commit callback while it holds both
+ * rs_lock and exp_uncommitted_reples.
+ *
+ * If we see rs_committed clear, the commit callback _may_ not have
+ * handled this reply yet and we race with it to grab
+ * exp_uncommitted_replies_lock before removing the reply from
+ * exp_uncommitted_replies. Note that if we lose the race and the
+ * reply has already been removed, list_del_init() is a noop.
+ *
+ * If we see rs_committed set, we know the commit callback is handling,
+ * or has handled this reply since store reordering might allow us to
+ * see rs_committed set out of sequence. But since this is done
+ * holding rs_lock, we can be sure it has all completed once we hold
+ * rs_lock, which we do right next.
+ */
+ if (!rs->rs_committed) {
+ spin_lock(&exp->exp_uncommitted_replies_lock);
+ list_del_init(&rs->rs_obd_list);
+ spin_unlock(&exp->exp_uncommitted_replies_lock);
+ }
+
+ spin_lock(&rs->rs_lock);
+
+ been_handled = rs->rs_handled;
+ rs->rs_handled = 1;
+
+ nlocks = rs->rs_nlocks; /* atomic "steal", but */
+ rs->rs_nlocks = 0; /* locks still on rs_locks! */
+
+ if (nlocks == 0 && !been_handled) {
+ /* If we see this, we should already have seen the warning
+ * in mds_steal_ack_locks() */
+ CDEBUG(D_HA, "All locks stolen from rs %p x"LPD64".t"LPD64
+ " o%d NID %s\n",
+ rs,
+ rs->rs_xid, rs->rs_transno, rs->rs_opc,
+ libcfs_nid2str(exp->exp_connection->c_peer.nid));
+ }
+
+ if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
+ spin_unlock(&rs->rs_lock);
+
+ if (!been_handled && rs->rs_on_net) {
+ LNetMDUnlink(rs->rs_md_h);
+ /* Ignore return code; we're racing with completion */
+ }
+
+ while (nlocks-- > 0)
+ ldlm_lock_decref(&rs->rs_locks[nlocks],
+ rs->rs_modes[nlocks]);
+
+ spin_lock(&rs->rs_lock);
+ }
+
+ rs->rs_scheduled = 0;
+
+ if (!rs->rs_on_net) {
+ /* Off the net */
+ spin_unlock(&rs->rs_lock);
+
+ class_export_put (exp);
+ rs->rs_export = NULL;
+ ptlrpc_rs_decref (rs);
+ if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
+ svc->srv_is_stopping)
+ wake_up_all(&svcpt->scp_waitq);
+ return 1;
+ }
+
+ /* still on the net; callback will schedule */
+ spin_unlock(&rs->rs_lock);
+ return 1;
+}
+
+
+static void
+ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
+{
+ int avail = svcpt->scp_nrqbds_posted;
+ int low_water = test_req_buffer_pressure ? 0 :
+ svcpt->scp_service->srv_nbuf_per_group / 2;
+
+ /* NB I'm not locking; just looking. */
+
+ /* CAVEAT EMPTOR: We might be allocating buffers here because we've
+ * allowed the request history to grow out of control. We could put a
+ * sanity check on that here and cull some history if we need the
+ * space. */
+
+ if (avail <= low_water)
+ ptlrpc_grow_req_bufs(svcpt, 1);
+
+ if (svcpt->scp_service->srv_stats) {
+ lprocfs_counter_add(svcpt->scp_service->srv_stats,
+ PTLRPC_REQBUF_AVAIL_CNTR, avail);
+ }
+}
+
+static int
+ptlrpc_retry_rqbds(void *arg)
+{
+ struct ptlrpc_service_part *svcpt = (struct ptlrpc_service_part *)arg;
+
+ svcpt->scp_rqbd_timeout = 0;
+ return -ETIMEDOUT;
+}
+
+static inline int
+ptlrpc_threads_enough(struct ptlrpc_service_part *svcpt)
+{
+ return svcpt->scp_nreqs_active <
+ svcpt->scp_nthrs_running - 1 -
+ (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL);
+}
+
+/**
+ * allowed to create more threads
+ * user can call it w/o any lock but need to hold
+ * ptlrpc_service_part::scp_lock to get reliable result
+ */
+static inline int
+ptlrpc_threads_increasable(struct ptlrpc_service_part *svcpt)
+{
+ return svcpt->scp_nthrs_running +
+ svcpt->scp_nthrs_starting <
+ svcpt->scp_service->srv_nthrs_cpt_limit;
+}
+
+/**
+ * too many requests and allowed to create more threads
+ */
+static inline int
+ptlrpc_threads_need_create(struct ptlrpc_service_part *svcpt)
+{
+ return !ptlrpc_threads_enough(svcpt) &&
+ ptlrpc_threads_increasable(svcpt);
+}
+
+static inline int
+ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
+{
+ return thread_is_stopping(thread) ||
+ thread->t_svcpt->scp_service->srv_is_stopping;
+}
+
+static inline int
+ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt)
+{
+ return !list_empty(&svcpt->scp_rqbd_idle) &&
+ svcpt->scp_rqbd_timeout == 0;
+}
+
+static inline int
+ptlrpc_at_check(struct ptlrpc_service_part *svcpt)
+{
+ return svcpt->scp_at_check;
+}
+
+/**
+ * requests wait on preprocessing
+ * user can call it w/o any lock but need to hold
+ * ptlrpc_service_part::scp_lock to get reliable result
+ */
+static inline int
+ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
+{
+ return !list_empty(&svcpt->scp_req_incoming);
+}
+
+static __attribute__((__noinline__)) int
+ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
+ struct ptlrpc_thread *thread)
+{
+ /* Don't exit while there are replies to be handled */
+ struct l_wait_info lwi = LWI_TIMEOUT(svcpt->scp_rqbd_timeout,
+ ptlrpc_retry_rqbds, svcpt);
+
+ /* XXX: Add this back when libcfs watchdog is merged upstream
+ lc_watchdog_disable(thread->t_watchdog);
+ */
+
+ cond_resched();
+
+ l_wait_event_exclusive_head(svcpt->scp_waitq,
+ ptlrpc_thread_stopping(thread) ||
+ ptlrpc_server_request_incoming(svcpt) ||
+ ptlrpc_server_request_pending(svcpt, false) ||
+ ptlrpc_rqbd_pending(svcpt) ||
+ ptlrpc_at_check(svcpt), &lwi);
+
+ if (ptlrpc_thread_stopping(thread))
+ return -EINTR;
+
+ /*
+ lc_watchdog_touch(thread->t_watchdog,
+ ptlrpc_server_get_timeout(svcpt));
+ */
+ return 0;
+}
+
+/**
+ * Main thread body for service threads.
+ * Waits in a loop waiting for new requests to process to appear.
+ * Every time an incoming requests is added to its queue, a waitq
+ * is woken up and one of the threads will handle it.
+ */
+static int ptlrpc_main(void *arg)
+{
+ struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
+ struct ptlrpc_service_part *svcpt = thread->t_svcpt;
+ struct ptlrpc_service *svc = svcpt->scp_service;
+ struct ptlrpc_reply_state *rs;
+#ifdef WITH_GROUP_INFO
+ struct group_info *ginfo = NULL;
+#endif
+ struct lu_env *env;
+ int counter = 0, rc = 0;
+
+ thread->t_pid = current_pid();
+ unshare_fs_struct();
+
+ /* NB: we will call cfs_cpt_bind() for all threads, because we
+ * might want to run lustre server only on a subset of system CPUs,
+ * in that case ->scp_cpt is CFS_CPT_ANY */
+ rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
+ if (rc != 0) {
+ CWARN("%s: failed to bind %s on CPT %d\n",
+ svc->srv_name, thread->t_name, svcpt->scp_cpt);
+ }
+
+#ifdef WITH_GROUP_INFO
+ ginfo = groups_alloc(0);
+ if (!ginfo) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ set_current_groups(ginfo);
+ put_group_info(ginfo);
+#endif
+
+ if (svc->srv_ops.so_thr_init != NULL) {
+ rc = svc->srv_ops.so_thr_init(thread);
+ if (rc)
+ goto out;
+ }
+
+ OBD_ALLOC_PTR(env);
+ if (env == NULL) {
+ rc = -ENOMEM;
+ goto out_srv_fini;
+ }
+
+ rc = lu_context_init(&env->le_ctx,
+ svc->srv_ctx_tags|LCT_REMEMBER|LCT_NOREF);
+ if (rc)
+ goto out_srv_fini;
+
+ thread->t_env = env;
+ env->le_ctx.lc_thread = thread;
+ env->le_ctx.lc_cookie = 0x6;
+
+ while (!list_empty(&svcpt->scp_rqbd_idle)) {
+ rc = ptlrpc_server_post_idle_rqbds(svcpt);
+ if (rc >= 0)
+ continue;
+
+ CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
+ svc->srv_name, svcpt->scp_cpt, rc);
+ goto out_srv_fini;
+ }
+
+ /* Alloc reply state structure for this one */
+ OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
+ if (!rs) {
+ rc = -ENOMEM;
+ goto out_srv_fini;
+ }
+
+ spin_lock(&svcpt->scp_lock);
+
+ LASSERT(thread_is_starting(thread));
+ thread_clear_flags(thread, SVC_STARTING);
+
+ LASSERT(svcpt->scp_nthrs_starting == 1);
+ svcpt->scp_nthrs_starting--;
+
+ /* SVC_STOPPING may already be set here if someone else is trying
+ * to stop the service while this new thread has been dynamically
+ * forked. We still set SVC_RUNNING to let our creator know that
+ * we are now running, however we will exit as soon as possible */
+ thread_add_flags(thread, SVC_RUNNING);
+ svcpt->scp_nthrs_running++;
+ spin_unlock(&svcpt->scp_lock);
+
+ /* wake up our creator in case he's still waiting. */
+ wake_up(&thread->t_ctl_waitq);
+
+ /*
+ thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
+ NULL, NULL);
+ */
+
+ spin_lock(&svcpt->scp_rep_lock);
+ list_add(&rs->rs_list, &svcpt->scp_rep_idle);
+ wake_up(&svcpt->scp_rep_waitq);
+ spin_unlock(&svcpt->scp_rep_lock);
+
+ CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
+ svcpt->scp_nthrs_running);
+
+ /* XXX maintain a list of all managed devices: insert here */
+ while (!ptlrpc_thread_stopping(thread)) {
+ if (ptlrpc_wait_event(svcpt, thread))
+ break;
+
+ ptlrpc_check_rqbd_pool(svcpt);
+
+ if (ptlrpc_threads_need_create(svcpt)) {
+ /* Ignore return code - we tried... */
+ ptlrpc_start_thread(svcpt, 0);
+ }
+
+ /* Process all incoming reqs before handling any */
+ if (ptlrpc_server_request_incoming(svcpt)) {
+ lu_context_enter(&env->le_ctx);
+ env->le_ses = NULL;
+ ptlrpc_server_handle_req_in(svcpt, thread);
+ lu_context_exit(&env->le_ctx);
+
+ /* but limit ourselves in case of flood */
+ if (counter++ < 100)
+ continue;
+ counter = 0;
+ }
+
+ if (ptlrpc_at_check(svcpt))
+ ptlrpc_at_check_timed(svcpt);
+
+ if (ptlrpc_server_request_pending(svcpt, false)) {
+ lu_context_enter(&env->le_ctx);
+ ptlrpc_server_handle_request(svcpt, thread);
+ lu_context_exit(&env->le_ctx);
+ }
+
+ if (ptlrpc_rqbd_pending(svcpt) &&
+ ptlrpc_server_post_idle_rqbds(svcpt) < 0) {
+ /* I just failed to repost request buffers.
+ * Wait for a timeout (unless something else
+ * happens) before I try again */
+ svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
+ CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
+ svcpt->scp_nrqbds_posted);
+ }
+ }
+
+ /*
+ lc_watchdog_delete(thread->t_watchdog);
+ thread->t_watchdog = NULL;
+ */
+
+out_srv_fini:
+ /*
+ * deconstruct service specific state created by ptlrpc_start_thread()
+ */
+ if (svc->srv_ops.so_thr_done != NULL)
+ svc->srv_ops.so_thr_done(thread);
+
+ if (env != NULL) {
+ lu_context_fini(&env->le_ctx);
+ OBD_FREE_PTR(env);
+ }
+out:
+ CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
+ thread, thread->t_pid, thread->t_id, rc);
+
+ spin_lock(&svcpt->scp_lock);
+ if (thread_test_and_clear_flags(thread, SVC_STARTING))
+ svcpt->scp_nthrs_starting--;
+
+ if (thread_test_and_clear_flags(thread, SVC_RUNNING)) {
+ /* must know immediately */
+ svcpt->scp_nthrs_running--;
+ }
+
+ thread->t_id = rc;
+ thread_add_flags(thread, SVC_STOPPED);
+
+ wake_up(&thread->t_ctl_waitq);
+ spin_unlock(&svcpt->scp_lock);
+
+ return rc;
+}
+
+static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
+ struct list_head *replies)
+{
+ int result;
+
+ spin_lock(&hrt->hrt_lock);
+
+ list_splice_init(&hrt->hrt_queue, replies);
+ result = ptlrpc_hr.hr_stopping || !list_empty(replies);
+
+ spin_unlock(&hrt->hrt_lock);
+ return result;
+}
+
+/**
+ * Main body of "handle reply" function.
+ * It processes acked reply states
+ */
+static int ptlrpc_hr_main(void *arg)
+{
+ struct ptlrpc_hr_thread *hrt = (struct ptlrpc_hr_thread *)arg;
+ struct ptlrpc_hr_partition *hrp = hrt->hrt_partition;
+ LIST_HEAD (replies);
+ char threadname[20];
+ int rc;
+
+ snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
+ hrp->hrp_cpt, hrt->hrt_id);
+ unshare_fs_struct();
+
+ rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
+ if (rc != 0) {
+ CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
+ threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
+ }
+
+ atomic_inc(&hrp->hrp_nstarted);
+ wake_up(&ptlrpc_hr.hr_waitq);
+
+ while (!ptlrpc_hr.hr_stopping) {
+ l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
+
+ while (!list_empty(&replies)) {
+ struct ptlrpc_reply_state *rs;
+
+ rs = list_entry(replies.prev,
+ struct ptlrpc_reply_state,
+ rs_list);
+ list_del_init(&rs->rs_list);
+ ptlrpc_handle_rs(rs);
+ }
+ }
+
+ atomic_inc(&hrp->hrp_nstopped);
+ wake_up(&ptlrpc_hr.hr_waitq);
+
+ return 0;
+}
+
+static void ptlrpc_stop_hr_threads(void)
+{
+ struct ptlrpc_hr_partition *hrp;
+ int i;
+ int j;
+
+ ptlrpc_hr.hr_stopping = 1;
+
+ cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
+ if (hrp->hrp_thrs == NULL)
+ continue; /* uninitialized */
+ for (j = 0; j < hrp->hrp_nthrs; j++)
+ wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
+ }
+
+ cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
+ if (hrp->hrp_thrs == NULL)
+ continue; /* uninitialized */
+ wait_event(ptlrpc_hr.hr_waitq,
+ atomic_read(&hrp->hrp_nstopped) ==
+ atomic_read(&hrp->hrp_nstarted));
+ }
+}
+
+static int ptlrpc_start_hr_threads(void)
+{
+ struct ptlrpc_hr_partition *hrp;
+ int i;
+ int j;
+
+ cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
+ int rc = 0;
+
+ for (j = 0; j < hrp->hrp_nthrs; j++) {
+ struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
+ rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
+ &hrp->hrp_thrs[j],
+ "ptlrpc_hr%02d_%03d",
+ hrp->hrp_cpt,
+ hrt->hrt_id));
+ if (IS_ERR_VALUE(rc))
+ break;
+ }
+ wait_event(ptlrpc_hr.hr_waitq,
+ atomic_read(&hrp->hrp_nstarted) == j);
+ if (!IS_ERR_VALUE(rc))
+ continue;
+
+ CERROR("Reply handling thread %d:%d Failed on starting: "
+ "rc = %d\n", i, j, rc);
+ ptlrpc_stop_hr_threads();
+ return rc;
+ }
+ return 0;
+}
+
+static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
+{
+ struct l_wait_info lwi = { 0 };
+ struct ptlrpc_thread *thread;
+ LIST_HEAD (zombie);
+
+ CDEBUG(D_INFO, "Stopping threads for service %s\n",
+ svcpt->scp_service->srv_name);
+
+ spin_lock(&svcpt->scp_lock);
+ /* let the thread know that we would like it to stop asap */
+ list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
+ CDEBUG(D_INFO, "Stopping thread %s #%u\n",
+ svcpt->scp_service->srv_thread_name, thread->t_id);
+ thread_add_flags(thread, SVC_STOPPING);
+ }
+
+ wake_up_all(&svcpt->scp_waitq);
+
+ while (!list_empty(&svcpt->scp_threads)) {
+ thread = list_entry(svcpt->scp_threads.next,
+ struct ptlrpc_thread, t_link);
+ if (thread_is_stopped(thread)) {
+ list_del(&thread->t_link);
+ list_add(&thread->t_link, &zombie);
+ continue;
+ }
+ spin_unlock(&svcpt->scp_lock);
+
+ CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
+ svcpt->scp_service->srv_thread_name, thread->t_id);
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopped(thread), &lwi);
+
+ spin_lock(&svcpt->scp_lock);
+ }
+
+ spin_unlock(&svcpt->scp_lock);
+
+ while (!list_empty(&zombie)) {
+ thread = list_entry(zombie.next,
+ struct ptlrpc_thread, t_link);
+ list_del(&thread->t_link);
+ OBD_FREE_PTR(thread);
+ }
+}
+
+/**
+ * Stops all threads of a particular service \a svc
+ */
+void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
+{
+ struct ptlrpc_service_part *svcpt;
+ int i;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ if (svcpt->scp_service != NULL)
+ ptlrpc_svcpt_stop_threads(svcpt);
+ }
+}
+EXPORT_SYMBOL(ptlrpc_stop_all_threads);
+
+int ptlrpc_start_threads(struct ptlrpc_service *svc)
+{
+ int rc = 0;
+ int i;
+ int j;
+
+ /* We require 2 threads min, see note in ptlrpc_server_handle_request */
+ LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT);
+
+ for (i = 0; i < svc->srv_ncpts; i++) {
+ for (j = 0; j < svc->srv_nthrs_cpt_init; j++) {
+ rc = ptlrpc_start_thread(svc->srv_parts[i], 1);
+ if (rc == 0)
+ continue;
+
+ if (rc != -EMFILE)
+ goto failed;
+ /* We have enough threads, don't start more. b=15759 */
+ break;
+ }
+ }
+
+ return 0;
+ failed:
+ CERROR("cannot start %s thread #%d_%d: rc %d\n",
+ svc->srv_thread_name, i, j, rc);
+ ptlrpc_stop_all_threads(svc);
+ return rc;
+}
+EXPORT_SYMBOL(ptlrpc_start_threads);
+
+int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
+{
+ struct l_wait_info lwi = { 0 };
+ struct ptlrpc_thread *thread;
+ struct ptlrpc_service *svc;
+ int rc;
+
+ LASSERT(svcpt != NULL);
+
+ svc = svcpt->scp_service;
+
+ CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n",
+ svc->srv_name, svcpt->scp_cpt, svcpt->scp_nthrs_running,
+ svc->srv_nthrs_cpt_init, svc->srv_nthrs_cpt_limit);
+
+ again:
+ if (unlikely(svc->srv_is_stopping))
+ return -ESRCH;
+
+ if (!ptlrpc_threads_increasable(svcpt) ||
+ (OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
+ svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1))
+ return -EMFILE;
+
+ OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
+ if (thread == NULL)
+ return -ENOMEM;
+ init_waitqueue_head(&thread->t_ctl_waitq);
+
+ spin_lock(&svcpt->scp_lock);
+ if (!ptlrpc_threads_increasable(svcpt)) {
+ spin_unlock(&svcpt->scp_lock);
+ OBD_FREE_PTR(thread);
+ return -EMFILE;
+ }
+
+ if (svcpt->scp_nthrs_starting != 0) {
+ /* serialize starting because some modules (obdfilter)
+ * might require unique and contiguous t_id */
+ LASSERT(svcpt->scp_nthrs_starting == 1);
+ spin_unlock(&svcpt->scp_lock);
+ OBD_FREE_PTR(thread);
+ if (wait) {
+ CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
+ svc->srv_thread_name, svcpt->scp_thr_nextid);
+ schedule();
+ goto again;
+ }
+
+ CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n",
+ svc->srv_thread_name, svcpt->scp_thr_nextid);
+ return -EAGAIN;
+ }
+
+ svcpt->scp_nthrs_starting++;
+ thread->t_id = svcpt->scp_thr_nextid++;
+ thread_add_flags(thread, SVC_STARTING);
+ thread->t_svcpt = svcpt;
+
+ list_add(&thread->t_link, &svcpt->scp_threads);
+ spin_unlock(&svcpt->scp_lock);
+
+ if (svcpt->scp_cpt >= 0) {
+ snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d",
+ svc->srv_thread_name, svcpt->scp_cpt, thread->t_id);
+ } else {
+ snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d",
+ svc->srv_thread_name, thread->t_id);
+ }
+
+ CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
+ rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
+ if (IS_ERR_VALUE(rc)) {
+ CERROR("cannot start thread '%s': rc %d\n",
+ thread->t_name, rc);
+ spin_lock(&svcpt->scp_lock);
+ --svcpt->scp_nthrs_starting;
+ if (thread_is_stopping(thread)) {
+ /* this ptlrpc_thread is being hanled
+ * by ptlrpc_svcpt_stop_threads now
+ */
+ thread_add_flags(thread, SVC_STOPPED);
+ wake_up(&thread->t_ctl_waitq);
+ spin_unlock(&svcpt->scp_lock);
+ } else {
+ list_del(&thread->t_link);
+ spin_unlock(&svcpt->scp_lock);
+ OBD_FREE_PTR(thread);
+ }
+ return rc;
+ }
+
+ if (!wait)
+ return 0;
+
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread),
+ &lwi);
+
+ rc = thread_is_stopped(thread) ? thread->t_id : 0;
+ return rc;
+}
+
+int ptlrpc_hr_init(void)
+{
+ cpumask_t mask;
+ struct ptlrpc_hr_partition *hrp;
+ struct ptlrpc_hr_thread *hrt;
+ int rc;
+ int i;
+ int j;
+ int weight;
+
+ memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
+ ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
+
+ ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
+ sizeof(*hrp));
+ if (ptlrpc_hr.hr_partitions == NULL)
+ return -ENOMEM;
+
+ init_waitqueue_head(&ptlrpc_hr.hr_waitq);
+
+ cpumask_copy(&mask, topology_thread_cpumask(0));
+ weight = cpus_weight(mask);
+
+ cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
+ hrp->hrp_cpt = i;
+
+ atomic_set(&hrp->hrp_nstarted, 0);
+ atomic_set(&hrp->hrp_nstopped, 0);
+
+ hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
+ hrp->hrp_nthrs /= weight;
+
+ LASSERT(hrp->hrp_nthrs > 0);
+ OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i,
+ hrp->hrp_nthrs * sizeof(*hrt));
+ if (hrp->hrp_thrs == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ for (j = 0; j < hrp->hrp_nthrs; j++) {
+ hrt = &hrp->hrp_thrs[j];
+
+ hrt->hrt_id = j;
+ hrt->hrt_partition = hrp;
+ init_waitqueue_head(&hrt->hrt_waitq);
+ spin_lock_init(&hrt->hrt_lock);
+ INIT_LIST_HEAD(&hrt->hrt_queue);
+ }
+ }
+
+ rc = ptlrpc_start_hr_threads();
+out:
+ if (rc != 0)
+ ptlrpc_hr_fini();
+ return rc;
+}
+
+void ptlrpc_hr_fini(void)
+{
+ struct ptlrpc_hr_partition *hrp;
+ int i;
+
+ if (ptlrpc_hr.hr_partitions == NULL)
+ return;
+
+ ptlrpc_stop_hr_threads();
+
+ cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
+ if (hrp->hrp_thrs != NULL) {
+ OBD_FREE(hrp->hrp_thrs,
+ hrp->hrp_nthrs * sizeof(hrp->hrp_thrs[0]));
+ }
+ }
+
+ cfs_percpt_free(ptlrpc_hr.hr_partitions);
+ ptlrpc_hr.hr_partitions = NULL;
+}
+
+
+/**
+ * Wait until all already scheduled replies are processed.
+ */
+static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
+{
+ while (1) {
+ int rc;
+ struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(10),
+ NULL, NULL);
+
+ rc = l_wait_event(svcpt->scp_waitq,
+ atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
+ if (rc == 0)
+ break;
+ CWARN("Unexpectedly long timeout %s %p\n",
+ svcpt->scp_service->srv_name, svcpt->scp_service);
+ }
+}
+
+static void
+ptlrpc_service_del_atimer(struct ptlrpc_service *svc)
+{
+ struct ptlrpc_service_part *svcpt;
+ int i;
+
+ /* early disarm AT timer... */
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ if (svcpt->scp_service != NULL)
+ cfs_timer_disarm(&svcpt->scp_at_timer);
+ }
+}
+
+static void
+ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
+{
+ struct ptlrpc_service_part *svcpt;
+ struct ptlrpc_request_buffer_desc *rqbd;
+ struct l_wait_info lwi;
+ int rc;
+ int i;
+
+ /* All history will be culled when the next request buffer is
+ * freed in ptlrpc_service_purge_all() */
+ svc->srv_hist_nrqbds_cpt_max = 0;
+
+ rc = LNetClearLazyPortal(svc->srv_req_portal);
+ LASSERT(rc == 0);
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ if (svcpt->scp_service == NULL)
+ break;
+
+ /* Unlink all the request buffers. This forces a 'final'
+ * event with its 'unlink' flag set for each posted rqbd */
+ list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
+ rqbd_list) {
+ rc = LNetMDUnlink(rqbd->rqbd_md_h);
+ LASSERT(rc == 0 || rc == -ENOENT);
+ }
+ }
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ if (svcpt->scp_service == NULL)
+ break;
+
+ /* Wait for the network to release any buffers
+ * it's currently filling */
+ spin_lock(&svcpt->scp_lock);
+ while (svcpt->scp_nrqbds_posted != 0) {
+ spin_unlock(&svcpt->scp_lock);
+ /* Network access will complete in finite time but
+ * the HUGE timeout lets us CWARN for visibility
+ * of sluggish NALs */
+ lwi = LWI_TIMEOUT_INTERVAL(
+ cfs_time_seconds(LONG_UNLINK),
+ cfs_time_seconds(1), NULL, NULL);
+ rc = l_wait_event(svcpt->scp_waitq,
+ svcpt->scp_nrqbds_posted == 0, &lwi);
+ if (rc == -ETIMEDOUT) {
+ CWARN("Service %s waiting for "
+ "request buffers\n",
+ svcpt->scp_service->srv_name);
+ }
+ spin_lock(&svcpt->scp_lock);
+ }
+ spin_unlock(&svcpt->scp_lock);
+ }
+}
+
+static void
+ptlrpc_service_purge_all(struct ptlrpc_service *svc)
+{
+ struct ptlrpc_service_part *svcpt;
+ struct ptlrpc_request_buffer_desc *rqbd;
+ struct ptlrpc_request *req;
+ struct ptlrpc_reply_state *rs;
+ int i;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ if (svcpt->scp_service == NULL)
+ break;
+
+ spin_lock(&svcpt->scp_rep_lock);
+ while (!list_empty(&svcpt->scp_rep_active)) {
+ rs = list_entry(svcpt->scp_rep_active.next,
+ struct ptlrpc_reply_state, rs_list);
+ spin_lock(&rs->rs_lock);
+ ptlrpc_schedule_difficult_reply(rs);
+ spin_unlock(&rs->rs_lock);
+ }
+ spin_unlock(&svcpt->scp_rep_lock);
+
+ /* purge the request queue. NB No new replies (rqbds
+ * all unlinked) and no service threads, so I'm the only
+ * thread noodling the request queue now */
+ while (!list_empty(&svcpt->scp_req_incoming)) {
+ req = list_entry(svcpt->scp_req_incoming.next,
+ struct ptlrpc_request, rq_list);
+
+ list_del(&req->rq_list);
+ svcpt->scp_nreqs_incoming--;
+ ptlrpc_server_finish_request(svcpt, req);
+ }
+
+ while (ptlrpc_server_request_pending(svcpt, true)) {
+ req = ptlrpc_server_request_get(svcpt, true);
+ ptlrpc_server_finish_active_request(svcpt, req);
+ }
+
+ LASSERT(list_empty(&svcpt->scp_rqbd_posted));
+ LASSERT(svcpt->scp_nreqs_incoming == 0);
+ LASSERT(svcpt->scp_nreqs_active == 0);
+ /* history should have been culled by
+ * ptlrpc_server_finish_request */
+ LASSERT(svcpt->scp_hist_nrqbds == 0);
+
+ /* Now free all the request buffers since nothing
+ * references them any more... */
+
+ while (!list_empty(&svcpt->scp_rqbd_idle)) {
+ rqbd = list_entry(svcpt->scp_rqbd_idle.next,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
+ ptlrpc_free_rqbd(rqbd);
+ }
+ ptlrpc_wait_replies(svcpt);
+
+ while (!list_empty(&svcpt->scp_rep_idle)) {
+ rs = list_entry(svcpt->scp_rep_idle.next,
+ struct ptlrpc_reply_state,
+ rs_list);
+ list_del(&rs->rs_list);
+ OBD_FREE_LARGE(rs, svc->srv_max_reply_size);
+ }
+ }
+}
+
+static void
+ptlrpc_service_free(struct ptlrpc_service *svc)
+{
+ struct ptlrpc_service_part *svcpt;
+ struct ptlrpc_at_array *array;
+ int i;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ if (svcpt->scp_service == NULL)
+ break;
+
+ /* In case somebody rearmed this in the meantime */
+ cfs_timer_disarm(&svcpt->scp_at_timer);
+ array = &svcpt->scp_at_array;
+
+ if (array->paa_reqs_array != NULL) {
+ OBD_FREE(array->paa_reqs_array,
+ sizeof(struct list_head) * array->paa_size);
+ array->paa_reqs_array = NULL;
+ }
+
+ if (array->paa_reqs_count != NULL) {
+ OBD_FREE(array->paa_reqs_count,
+ sizeof(__u32) * array->paa_size);
+ array->paa_reqs_count = NULL;
+ }
+ }
+
+ ptlrpc_service_for_each_part(svcpt, i, svc)
+ OBD_FREE_PTR(svcpt);
+
+ if (svc->srv_cpts != NULL)
+ cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts);
+
+ OBD_FREE(svc, offsetof(struct ptlrpc_service,
+ srv_parts[svc->srv_ncpts]));
+}
+
+int ptlrpc_unregister_service(struct ptlrpc_service *service)
+{
+ CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
+
+ service->srv_is_stopping = 1;
+
+ mutex_lock(&ptlrpc_all_services_mutex);
+ list_del_init(&service->srv_list);
+ mutex_unlock(&ptlrpc_all_services_mutex);
+
+ ptlrpc_service_del_atimer(service);
+ ptlrpc_stop_all_threads(service);
+
+ ptlrpc_service_unlink_rqbd(service);
+ ptlrpc_service_purge_all(service);
+ ptlrpc_service_nrs_cleanup(service);
+
+ ptlrpc_lprocfs_unregister_service(service);
+
+ ptlrpc_service_free(service);
+
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_unregister_service);
+
+/**
+ * Returns 0 if the service is healthy.
+ *
+ * Right now, it just checks to make sure that requests aren't languishing
+ * in the queue. We'll use this health check to govern whether a node needs
+ * to be shot, so it's intentionally non-aggressive. */
+int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
+{
+ struct ptlrpc_request *request = NULL;
+ struct timeval right_now;
+ long timediff;
+
+ do_gettimeofday(&right_now);
+
+ spin_lock(&svcpt->scp_req_lock);
+ /* How long has the next entry been waiting? */
+ if (ptlrpc_server_high_pending(svcpt, true))
+ request = ptlrpc_nrs_req_peek_nolock(svcpt, true);
+ else if (ptlrpc_server_normal_pending(svcpt, true))
+ request = ptlrpc_nrs_req_peek_nolock(svcpt, false);
+
+ if (request == NULL) {
+ spin_unlock(&svcpt->scp_req_lock);
+ return 0;
+ }
+
+ timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
+ spin_unlock(&svcpt->scp_req_lock);
+
+ if ((timediff / ONE_MILLION) >
+ (AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
+ CERROR("%s: unhealthy - request has been waiting %lds\n",
+ svcpt->scp_service->srv_name, timediff / ONE_MILLION);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+ptlrpc_service_health_check(struct ptlrpc_service *svc)
+{
+ struct ptlrpc_service_part *svcpt;
+ int i;
+
+ if (svc == NULL)
+ return 0;
+
+ ptlrpc_service_for_each_part(svcpt, i, svc) {
+ int rc = ptlrpc_svcpt_health_check(svcpt);
+
+ if (rc != 0)
+ return rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ptlrpc_service_health_check);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wirehdr.c b/drivers/staging/lustre/lustre/ptlrpc/wirehdr.c
new file mode 100644
index 0000000..93bc40b
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/wirehdr.c
@@ -0,0 +1,47 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+# ifdef CONFIG_FS_POSIX_ACL
+# include <linux/fs.h>
+# include <linux/posix_acl_xattr.h>
+# endif
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lustre_disk.h>
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
new file mode 100644
index 0000000..9890bd9
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -0,0 +1,4474 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ */
+
+#define DEBUG_SUBSYSTEM S_RPC
+
+# ifdef CONFIG_FS_POSIX_ACL
+# include <linux/fs.h>
+# include <linux/posix_acl_xattr.h>
+# endif
+
+#include <obd_support.h>
+#include <obd_class.h>
+#include <lustre_net.h>
+#include <lustre_disk.h>
+void lustre_assert_wire_constants(void)
+{
+ /* Wire protocol assertions generated by 'wirecheck'
+ * (make -C lustre/utils newwiretest)
+ * running on Linux deva 2.6.32.279.lustre #5 SMP Tue Apr 9 22:52:17 CST 2013 x86_64 x86_64 x
+ * with gcc version 4.4.4 20100726 (Red Hat 4.4.4-13) (GCC) */
+
+
+ /* Constants... */
+ LASSERTF(PTL_RPC_MSG_REQUEST == 4711, "found %lld\n",
+ (long long)PTL_RPC_MSG_REQUEST);
+ LASSERTF(PTL_RPC_MSG_ERR == 4712, "found %lld\n",
+ (long long)PTL_RPC_MSG_ERR);
+ LASSERTF(PTL_RPC_MSG_REPLY == 4713, "found %lld\n",
+ (long long)PTL_RPC_MSG_REPLY);
+ LASSERTF(MDS_DIR_END_OFF == 0xfffffffffffffffeULL, "found 0x%.16llxULL\n",
+ MDS_DIR_END_OFF);
+ LASSERTF(DEAD_HANDLE_MAGIC == 0xdeadbeefcafebabeULL, "found 0x%.16llxULL\n",
+ DEAD_HANDLE_MAGIC);
+ CLASSERT(MTI_NAME_MAXLEN == 64);
+ LASSERTF(OST_REPLY == 0, "found %lld\n",
+ (long long)OST_REPLY);
+ LASSERTF(OST_GETATTR == 1, "found %lld\n",
+ (long long)OST_GETATTR);
+ LASSERTF(OST_SETATTR == 2, "found %lld\n",
+ (long long)OST_SETATTR);
+ LASSERTF(OST_READ == 3, "found %lld\n",
+ (long long)OST_READ);
+ LASSERTF(OST_WRITE == 4, "found %lld\n",
+ (long long)OST_WRITE);
+ LASSERTF(OST_CREATE == 5, "found %lld\n",
+ (long long)OST_CREATE);
+ LASSERTF(OST_DESTROY == 6, "found %lld\n",
+ (long long)OST_DESTROY);
+ LASSERTF(OST_GET_INFO == 7, "found %lld\n",
+ (long long)OST_GET_INFO);
+ LASSERTF(OST_CONNECT == 8, "found %lld\n",
+ (long long)OST_CONNECT);
+ LASSERTF(OST_DISCONNECT == 9, "found %lld\n",
+ (long long)OST_DISCONNECT);
+ LASSERTF(OST_PUNCH == 10, "found %lld\n",
+ (long long)OST_PUNCH);
+ LASSERTF(OST_OPEN == 11, "found %lld\n",
+ (long long)OST_OPEN);
+ LASSERTF(OST_CLOSE == 12, "found %lld\n",
+ (long long)OST_CLOSE);
+ LASSERTF(OST_STATFS == 13, "found %lld\n",
+ (long long)OST_STATFS);
+ LASSERTF(OST_SYNC == 16, "found %lld\n",
+ (long long)OST_SYNC);
+ LASSERTF(OST_SET_INFO == 17, "found %lld\n",
+ (long long)OST_SET_INFO);
+ LASSERTF(OST_QUOTACHECK == 18, "found %lld\n",
+ (long long)OST_QUOTACHECK);
+ LASSERTF(OST_QUOTACTL == 19, "found %lld\n",
+ (long long)OST_QUOTACTL);
+ LASSERTF(OST_QUOTA_ADJUST_QUNIT == 20, "found %lld\n",
+ (long long)OST_QUOTA_ADJUST_QUNIT);
+ LASSERTF(OST_LAST_OPC == 21, "found %lld\n",
+ (long long)OST_LAST_OPC);
+ LASSERTF(OBD_OBJECT_EOF == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
+ OBD_OBJECT_EOF);
+ LASSERTF(OST_MIN_PRECREATE == 32, "found %lld\n",
+ (long long)OST_MIN_PRECREATE);
+ LASSERTF(OST_MAX_PRECREATE == 20000, "found %lld\n",
+ (long long)OST_MAX_PRECREATE);
+ LASSERTF(OST_LVB_ERR_INIT == 0xffbadbad80000000ULL, "found 0x%.16llxULL\n",
+ OST_LVB_ERR_INIT);
+ LASSERTF(OST_LVB_ERR_MASK == 0xffbadbad00000000ULL, "found 0x%.16llxULL\n",
+ OST_LVB_ERR_MASK);
+ LASSERTF(MDS_FIRST_OPC == 33, "found %lld\n",
+ (long long)MDS_FIRST_OPC);
+ LASSERTF(MDS_GETATTR == 33, "found %lld\n",
+ (long long)MDS_GETATTR);
+ LASSERTF(MDS_GETATTR_NAME == 34, "found %lld\n",
+ (long long)MDS_GETATTR_NAME);
+ LASSERTF(MDS_CLOSE == 35, "found %lld\n",
+ (long long)MDS_CLOSE);
+ LASSERTF(MDS_REINT == 36, "found %lld\n",
+ (long long)MDS_REINT);
+ LASSERTF(MDS_READPAGE == 37, "found %lld\n",
+ (long long)MDS_READPAGE);
+ LASSERTF(MDS_CONNECT == 38, "found %lld\n",
+ (long long)MDS_CONNECT);
+ LASSERTF(MDS_DISCONNECT == 39, "found %lld\n",
+ (long long)MDS_DISCONNECT);
+ LASSERTF(MDS_GETSTATUS == 40, "found %lld\n",
+ (long long)MDS_GETSTATUS);
+ LASSERTF(MDS_STATFS == 41, "found %lld\n",
+ (long long)MDS_STATFS);
+ LASSERTF(MDS_PIN == 42, "found %lld\n",
+ (long long)MDS_PIN);
+ LASSERTF(MDS_UNPIN == 43, "found %lld\n",
+ (long long)MDS_UNPIN);
+ LASSERTF(MDS_SYNC == 44, "found %lld\n",
+ (long long)MDS_SYNC);
+ LASSERTF(MDS_DONE_WRITING == 45, "found %lld\n",
+ (long long)MDS_DONE_WRITING);
+ LASSERTF(MDS_SET_INFO == 46, "found %lld\n",
+ (long long)MDS_SET_INFO);
+ LASSERTF(MDS_QUOTACHECK == 47, "found %lld\n",
+ (long long)MDS_QUOTACHECK);
+ LASSERTF(MDS_QUOTACTL == 48, "found %lld\n",
+ (long long)MDS_QUOTACTL);
+ LASSERTF(MDS_GETXATTR == 49, "found %lld\n",
+ (long long)MDS_GETXATTR);
+ LASSERTF(MDS_SETXATTR == 50, "found %lld\n",
+ (long long)MDS_SETXATTR);
+ LASSERTF(MDS_WRITEPAGE == 51, "found %lld\n",
+ (long long)MDS_WRITEPAGE);
+ LASSERTF(MDS_IS_SUBDIR == 52, "found %lld\n",
+ (long long)MDS_IS_SUBDIR);
+ LASSERTF(MDS_GET_INFO == 53, "found %lld\n",
+ (long long)MDS_GET_INFO);
+ LASSERTF(MDS_HSM_STATE_GET == 54, "found %lld\n",
+ (long long)MDS_HSM_STATE_GET);
+ LASSERTF(MDS_HSM_STATE_SET == 55, "found %lld\n",
+ (long long)MDS_HSM_STATE_SET);
+ LASSERTF(MDS_HSM_ACTION == 56, "found %lld\n",
+ (long long)MDS_HSM_ACTION);
+ LASSERTF(MDS_HSM_PROGRESS == 57, "found %lld\n",
+ (long long)MDS_HSM_PROGRESS);
+ LASSERTF(MDS_HSM_REQUEST == 58, "found %lld\n",
+ (long long)MDS_HSM_REQUEST);
+ LASSERTF(MDS_HSM_CT_REGISTER == 59, "found %lld\n",
+ (long long)MDS_HSM_CT_REGISTER);
+ LASSERTF(MDS_HSM_CT_UNREGISTER == 60, "found %lld\n",
+ (long long)MDS_HSM_CT_UNREGISTER);
+ LASSERTF(MDS_SWAP_LAYOUTS == 61, "found %lld\n",
+ (long long)MDS_SWAP_LAYOUTS);
+ LASSERTF(MDS_LAST_OPC == 62, "found %lld\n",
+ (long long)MDS_LAST_OPC);
+ LASSERTF(REINT_SETATTR == 1, "found %lld\n",
+ (long long)REINT_SETATTR);
+ LASSERTF(REINT_CREATE == 2, "found %lld\n",
+ (long long)REINT_CREATE);
+ LASSERTF(REINT_LINK == 3, "found %lld\n",
+ (long long)REINT_LINK);
+ LASSERTF(REINT_UNLINK == 4, "found %lld\n",
+ (long long)REINT_UNLINK);
+ LASSERTF(REINT_RENAME == 5, "found %lld\n",
+ (long long)REINT_RENAME);
+ LASSERTF(REINT_OPEN == 6, "found %lld\n",
+ (long long)REINT_OPEN);
+ LASSERTF(REINT_SETXATTR == 7, "found %lld\n",
+ (long long)REINT_SETXATTR);
+ LASSERTF(REINT_RMENTRY == 8, "found %lld\n",
+ (long long)REINT_RMENTRY);
+ LASSERTF(REINT_MAX == 9, "found %lld\n",
+ (long long)REINT_MAX);
+ LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_IT_EXECD);
+ LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_LOOKUP_EXECD);
+ LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_LOOKUP_NEG);
+ LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_LOOKUP_POS);
+ LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_OPEN_CREATE);
+ LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_OPEN_OPEN);
+ LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_ENQ_COMPLETE);
+ LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_ENQ_OPEN_REF);
+ LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_ENQ_CREATE_REF);
+ LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n",
+ (unsigned)DISP_OPEN_LOCK);
+ LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n",
+ (long long)MDS_STATUS_CONN);
+ LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n",
+ (long long)MDS_STATUS_LOV);
+ LASSERTF(LUSTRE_BFLAG_UNCOMMITTED_WRITES == 1, "found %lld\n",
+ (long long)LUSTRE_BFLAG_UNCOMMITTED_WRITES);
+ LASSERTF(MF_SOM_CHANGE == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)MF_SOM_CHANGE);
+ LASSERTF(MF_EPOCH_OPEN == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)MF_EPOCH_OPEN);
+ LASSERTF(MF_EPOCH_CLOSE == 0x00000004UL, "found 0x%.8xUL\n",
+ (unsigned)MF_EPOCH_CLOSE);
+ LASSERTF(MF_MDC_CANCEL_FID1 == 0x00000008UL, "found 0x%.8xUL\n",
+ (unsigned)MF_MDC_CANCEL_FID1);
+ LASSERTF(MF_MDC_CANCEL_FID2 == 0x00000010UL, "found 0x%.8xUL\n",
+ (unsigned)MF_MDC_CANCEL_FID2);
+ LASSERTF(MF_MDC_CANCEL_FID3 == 0x00000020UL, "found 0x%.8xUL\n",
+ (unsigned)MF_MDC_CANCEL_FID3);
+ LASSERTF(MF_MDC_CANCEL_FID4 == 0x00000040UL, "found 0x%.8xUL\n",
+ (unsigned)MF_MDC_CANCEL_FID4);
+ LASSERTF(MF_SOM_AU == 0x00000080UL, "found 0x%.8xUL\n",
+ (unsigned)MF_SOM_AU);
+ LASSERTF(MF_GETATTR_LOCK == 0x00000100UL, "found 0x%.8xUL\n",
+ (unsigned)MF_GETATTR_LOCK);
+ LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_MODE);
+ LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_UID);
+ LASSERTF(MDS_ATTR_GID == 0x0000000000000004ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_GID);
+ LASSERTF(MDS_ATTR_SIZE == 0x0000000000000008ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_SIZE);
+ LASSERTF(MDS_ATTR_ATIME == 0x0000000000000010ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_ATIME);
+ LASSERTF(MDS_ATTR_MTIME == 0x0000000000000020ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_MTIME);
+ LASSERTF(MDS_ATTR_CTIME == 0x0000000000000040ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_CTIME);
+ LASSERTF(MDS_ATTR_ATIME_SET == 0x0000000000000080ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_ATIME_SET);
+ LASSERTF(MDS_ATTR_MTIME_SET == 0x0000000000000100ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_MTIME_SET);
+ LASSERTF(MDS_ATTR_FORCE == 0x0000000000000200ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_FORCE);
+ LASSERTF(MDS_ATTR_ATTR_FLAG == 0x0000000000000400ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_ATTR_FLAG);
+ LASSERTF(MDS_ATTR_KILL_SUID == 0x0000000000000800ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_KILL_SUID);
+ LASSERTF(MDS_ATTR_KILL_SGID == 0x0000000000001000ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_KILL_SGID);
+ LASSERTF(MDS_ATTR_CTIME_SET == 0x0000000000002000ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_CTIME_SET);
+ LASSERTF(MDS_ATTR_FROM_OPEN == 0x0000000000004000ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_FROM_OPEN);
+ LASSERTF(MDS_ATTR_BLOCKS == 0x0000000000008000ULL, "found 0x%.16llxULL\n",
+ (long long)MDS_ATTR_BLOCKS);
+ LASSERTF(FLD_QUERY == 900, "found %lld\n",
+ (long long)FLD_QUERY);
+ LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n",
+ (long long)FLD_FIRST_OPC);
+ LASSERTF(FLD_LAST_OPC == 901, "found %lld\n",
+ (long long)FLD_LAST_OPC);
+ LASSERTF(SEQ_QUERY == 700, "found %lld\n",
+ (long long)SEQ_QUERY);
+ LASSERTF(SEQ_FIRST_OPC == 700, "found %lld\n",
+ (long long)SEQ_FIRST_OPC);
+ LASSERTF(SEQ_LAST_OPC == 701, "found %lld\n",
+ (long long)SEQ_LAST_OPC);
+ LASSERTF(SEQ_ALLOC_SUPER == 0, "found %lld\n",
+ (long long)SEQ_ALLOC_SUPER);
+ LASSERTF(SEQ_ALLOC_META == 1, "found %lld\n",
+ (long long)SEQ_ALLOC_META);
+ LASSERTF(LDLM_ENQUEUE == 101, "found %lld\n",
+ (long long)LDLM_ENQUEUE);
+ LASSERTF(LDLM_CONVERT == 102, "found %lld\n",
+ (long long)LDLM_CONVERT);
+ LASSERTF(LDLM_CANCEL == 103, "found %lld\n",
+ (long long)LDLM_CANCEL);
+ LASSERTF(LDLM_BL_CALLBACK == 104, "found %lld\n",
+ (long long)LDLM_BL_CALLBACK);
+ LASSERTF(LDLM_CP_CALLBACK == 105, "found %lld\n",
+ (long long)LDLM_CP_CALLBACK);
+ LASSERTF(LDLM_GL_CALLBACK == 106, "found %lld\n",
+ (long long)LDLM_GL_CALLBACK);
+ LASSERTF(LDLM_SET_INFO == 107, "found %lld\n",
+ (long long)LDLM_SET_INFO);
+ LASSERTF(LDLM_LAST_OPC == 108, "found %lld\n",
+ (long long)LDLM_LAST_OPC);
+ LASSERTF(LCK_MINMODE == 0, "found %lld\n",
+ (long long)LCK_MINMODE);
+ LASSERTF(LCK_EX == 1, "found %lld\n",
+ (long long)LCK_EX);
+ LASSERTF(LCK_PW == 2, "found %lld\n",
+ (long long)LCK_PW);
+ LASSERTF(LCK_PR == 4, "found %lld\n",
+ (long long)LCK_PR);
+ LASSERTF(LCK_CW == 8, "found %lld\n",
+ (long long)LCK_CW);
+ LASSERTF(LCK_CR == 16, "found %lld\n",
+ (long long)LCK_CR);
+ LASSERTF(LCK_NL == 32, "found %lld\n",
+ (long long)LCK_NL);
+ LASSERTF(LCK_GROUP == 64, "found %lld\n",
+ (long long)LCK_GROUP);
+ LASSERTF(LCK_COS == 128, "found %lld\n",
+ (long long)LCK_COS);
+ LASSERTF(LCK_MAXMODE == 129, "found %lld\n",
+ (long long)LCK_MAXMODE);
+ LASSERTF(LCK_MODE_NUM == 8, "found %lld\n",
+ (long long)LCK_MODE_NUM);
+ CLASSERT(LDLM_PLAIN == 10);
+ CLASSERT(LDLM_EXTENT == 11);
+ CLASSERT(LDLM_FLOCK == 12);
+ CLASSERT(LDLM_IBITS == 13);
+ CLASSERT(LDLM_MAX_TYPE == 14);
+ CLASSERT(LUSTRE_RES_ID_SEQ_OFF == 0);
+ CLASSERT(LUSTRE_RES_ID_VER_OID_OFF == 1);
+ LASSERTF(UPDATE_OBJ == 1000, "found %lld\n",
+ (long long)UPDATE_OBJ);
+ LASSERTF(UPDATE_LAST_OPC == 1001, "found %lld\n",
+ (long long)UPDATE_LAST_OPC);
+ CLASSERT(LUSTRE_RES_ID_QUOTA_SEQ_OFF == 2);
+ CLASSERT(LUSTRE_RES_ID_QUOTA_VER_OID_OFF == 3);
+ CLASSERT(LUSTRE_RES_ID_HSH_OFF == 3);
+ CLASSERT(LQUOTA_TYPE_USR == 0);
+ CLASSERT(LQUOTA_TYPE_GRP == 1);
+ CLASSERT(LQUOTA_RES_MD == 1);
+ CLASSERT(LQUOTA_RES_DT == 2);
+ LASSERTF(OBD_PING == 400, "found %lld\n",
+ (long long)OBD_PING);
+ LASSERTF(OBD_LOG_CANCEL == 401, "found %lld\n",
+ (long long)OBD_LOG_CANCEL);
+ LASSERTF(OBD_QC_CALLBACK == 402, "found %lld\n",
+ (long long)OBD_QC_CALLBACK);
+ LASSERTF(OBD_IDX_READ == 403, "found %lld\n",
+ (long long)OBD_IDX_READ);
+ LASSERTF(OBD_LAST_OPC == 404, "found %lld\n",
+ (long long)OBD_LAST_OPC);
+ LASSERTF(QUOTA_DQACQ == 601, "found %lld\n",
+ (long long)QUOTA_DQACQ);
+ LASSERTF(QUOTA_DQREL == 602, "found %lld\n",
+ (long long)QUOTA_DQREL);
+ LASSERTF(QUOTA_LAST_OPC == 603, "found %lld\n",
+ (long long)QUOTA_LAST_OPC);
+ LASSERTF(MGS_CONNECT == 250, "found %lld\n",
+ (long long)MGS_CONNECT);
+ LASSERTF(MGS_DISCONNECT == 251, "found %lld\n",
+ (long long)MGS_DISCONNECT);
+ LASSERTF(MGS_EXCEPTION == 252, "found %lld\n",
+ (long long)MGS_EXCEPTION);
+ LASSERTF(MGS_TARGET_REG == 253, "found %lld\n",
+ (long long)MGS_TARGET_REG);
+ LASSERTF(MGS_TARGET_DEL == 254, "found %lld\n",
+ (long long)MGS_TARGET_DEL);
+ LASSERTF(MGS_SET_INFO == 255, "found %lld\n",
+ (long long)MGS_SET_INFO);
+ LASSERTF(MGS_LAST_OPC == 257, "found %lld\n",
+ (long long)MGS_LAST_OPC);
+ LASSERTF(SEC_CTX_INIT == 801, "found %lld\n",
+ (long long)SEC_CTX_INIT);
+ LASSERTF(SEC_CTX_INIT_CONT == 802, "found %lld\n",
+ (long long)SEC_CTX_INIT_CONT);
+ LASSERTF(SEC_CTX_FINI == 803, "found %lld\n",
+ (long long)SEC_CTX_FINI);
+ LASSERTF(SEC_LAST_OPC == 804, "found %lld\n",
+ (long long)SEC_LAST_OPC);
+ /* Sizes and Offsets */
+
+ /* Checks for struct obd_uuid */
+ LASSERTF((int)sizeof(struct obd_uuid) == 40, "found %lld\n",
+ (long long)(int)sizeof(struct obd_uuid));
+
+ /* Checks for struct lu_seq_range */
+ LASSERTF((int)sizeof(struct lu_seq_range) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct lu_seq_range));
+ LASSERTF((int)offsetof(struct lu_seq_range, lsr_start) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lu_seq_range, lsr_start));
+ LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_start) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_start));
+ LASSERTF((int)offsetof(struct lu_seq_range, lsr_end) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lu_seq_range, lsr_end));
+ LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_end) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_end));
+ LASSERTF((int)offsetof(struct lu_seq_range, lsr_index) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lu_seq_range, lsr_index));
+ LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_index) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_index));
+ LASSERTF((int)offsetof(struct lu_seq_range, lsr_flags) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct lu_seq_range, lsr_flags));
+ LASSERTF((int)sizeof(((struct lu_seq_range *)0)->lsr_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_seq_range *)0)->lsr_flags));
+ LASSERTF(LU_SEQ_RANGE_MDT == 0, "found %lld\n",
+ (long long)LU_SEQ_RANGE_MDT);
+ LASSERTF(LU_SEQ_RANGE_OST == 1, "found %lld\n",
+ (long long)LU_SEQ_RANGE_OST);
+
+ /* Checks for struct lustre_mdt_attrs */
+ LASSERTF((int)sizeof(struct lustre_mdt_attrs) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct lustre_mdt_attrs));
+ LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_compat) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_mdt_attrs, lma_compat));
+ LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_compat) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_compat));
+ LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_incompat) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_mdt_attrs, lma_incompat));
+ LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_incompat) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_incompat));
+ LASSERTF((int)offsetof(struct lustre_mdt_attrs, lma_self_fid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_mdt_attrs, lma_self_fid));
+ LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid));
+ LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)LMAI_RELEASED);
+ LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)LMAC_HSM);
+ LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)LMAC_SOM);
+ LASSERTF(OBJ_CREATE == 1, "found %lld\n",
+ (long long)OBJ_CREATE);
+ LASSERTF(OBJ_DESTROY == 2, "found %lld\n",
+ (long long)OBJ_DESTROY);
+ LASSERTF(OBJ_REF_ADD == 3, "found %lld\n",
+ (long long)OBJ_REF_ADD);
+ LASSERTF(OBJ_REF_DEL == 4, "found %lld\n",
+ (long long)OBJ_REF_DEL);
+ LASSERTF(OBJ_ATTR_SET == 5, "found %lld\n",
+ (long long)OBJ_ATTR_SET);
+ LASSERTF(OBJ_ATTR_GET == 6, "found %lld\n",
+ (long long)OBJ_ATTR_GET);
+ LASSERTF(OBJ_XATTR_SET == 7, "found %lld\n",
+ (long long)OBJ_XATTR_SET);
+ LASSERTF(OBJ_XATTR_GET == 8, "found %lld\n",
+ (long long)OBJ_XATTR_GET);
+ LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n",
+ (long long)OBJ_INDEX_LOOKUP);
+ LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n",
+ (long long)OBJ_INDEX_LOOKUP);
+ LASSERTF(OBJ_INDEX_INSERT == 10, "found %lld\n",
+ (long long)OBJ_INDEX_INSERT);
+ LASSERTF(OBJ_INDEX_DELETE == 11, "found %lld\n",
+ (long long)OBJ_INDEX_DELETE);
+
+ /* Checks for struct som_attrs */
+ LASSERTF((int)sizeof(struct som_attrs) == 40, "found %lld\n",
+ (long long)(int)sizeof(struct som_attrs));
+ LASSERTF((int)offsetof(struct som_attrs, som_compat) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct som_attrs, som_compat));
+ LASSERTF((int)sizeof(((struct som_attrs *)0)->som_compat) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct som_attrs *)0)->som_compat));
+ LASSERTF((int)offsetof(struct som_attrs, som_incompat) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct som_attrs, som_incompat));
+ LASSERTF((int)sizeof(((struct som_attrs *)0)->som_incompat) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct som_attrs *)0)->som_incompat));
+ LASSERTF((int)offsetof(struct som_attrs, som_ioepoch) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct som_attrs, som_ioepoch));
+ LASSERTF((int)sizeof(((struct som_attrs *)0)->som_ioepoch) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct som_attrs *)0)->som_ioepoch));
+ LASSERTF((int)offsetof(struct som_attrs, som_size) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct som_attrs, som_size));
+ LASSERTF((int)sizeof(((struct som_attrs *)0)->som_size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct som_attrs *)0)->som_size));
+ LASSERTF((int)offsetof(struct som_attrs, som_blocks) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct som_attrs, som_blocks));
+ LASSERTF((int)sizeof(((struct som_attrs *)0)->som_blocks) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct som_attrs *)0)->som_blocks));
+ LASSERTF((int)offsetof(struct som_attrs, som_mountid) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct som_attrs, som_mountid));
+ LASSERTF((int)sizeof(((struct som_attrs *)0)->som_mountid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct som_attrs *)0)->som_mountid));
+
+ /* Checks for struct hsm_attrs */
+ LASSERTF((int)sizeof(struct hsm_attrs) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_attrs));
+ LASSERTF((int)offsetof(struct hsm_attrs, hsm_compat) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_attrs, hsm_compat));
+ LASSERTF((int)sizeof(((struct hsm_attrs *)0)->hsm_compat) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_attrs *)0)->hsm_compat));
+ LASSERTF((int)offsetof(struct hsm_attrs, hsm_flags) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_attrs, hsm_flags));
+ LASSERTF((int)sizeof(((struct hsm_attrs *)0)->hsm_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_attrs *)0)->hsm_flags));
+ LASSERTF((int)offsetof(struct hsm_attrs, hsm_arch_id) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_attrs, hsm_arch_id));
+ LASSERTF((int)sizeof(((struct hsm_attrs *)0)->hsm_arch_id) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_attrs *)0)->hsm_arch_id));
+ LASSERTF((int)offsetof(struct hsm_attrs, hsm_arch_ver) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_attrs, hsm_arch_ver));
+ LASSERTF((int)sizeof(((struct hsm_attrs *)0)->hsm_arch_ver) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_attrs *)0)->hsm_arch_ver));
+
+ /* Checks for struct ost_id */
+ LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
+ (long long)(int)sizeof(struct ost_id));
+ LASSERTF((int)offsetof(struct ost_id, oi) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ost_id, oi));
+ LASSERTF((int)sizeof(((struct ost_id *)0)->oi) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_id *)0)->oi));
+ LASSERTF(LUSTRE_FID_INIT_OID == 1, "found %lld\n",
+ (long long)LUSTRE_FID_INIT_OID);
+ LASSERTF(FID_SEQ_OST_MDT0 == 0, "found %lld\n",
+ (long long)FID_SEQ_OST_MDT0);
+ LASSERTF(FID_SEQ_LLOG == 1, "found %lld\n",
+ (long long)FID_SEQ_LLOG);
+ LASSERTF(FID_SEQ_ECHO == 2, "found %lld\n",
+ (long long)FID_SEQ_ECHO);
+ LASSERTF(FID_SEQ_OST_MDT1 == 3, "found %lld\n",
+ (long long)FID_SEQ_OST_MDT1);
+ LASSERTF(FID_SEQ_OST_MAX == 9, "found %lld\n",
+ (long long)FID_SEQ_OST_MAX);
+ LASSERTF(FID_SEQ_RSVD == 11, "found %lld\n",
+ (long long)FID_SEQ_RSVD);
+ LASSERTF(FID_SEQ_IGIF == 12, "found %lld\n",
+ (long long)FID_SEQ_IGIF);
+ LASSERTF(FID_SEQ_IGIF_MAX == 0x00000000ffffffffULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_IGIF_MAX);
+ LASSERTF(FID_SEQ_IDIF == 0x0000000100000000ULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_IDIF);
+ LASSERTF(FID_SEQ_IDIF_MAX == 0x00000001ffffffffULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_IDIF_MAX);
+ LASSERTF(FID_SEQ_START == 0x0000000200000000ULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_START);
+ LASSERTF(FID_SEQ_LOCAL_FILE == 0x0000000200000001ULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_LOCAL_FILE);
+ LASSERTF(FID_SEQ_DOT_LUSTRE == 0x0000000200000002ULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_DOT_LUSTRE);
+ LASSERTF(FID_SEQ_SPECIAL == 0x0000000200000004ULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_SPECIAL);
+ LASSERTF(FID_SEQ_QUOTA == 0x0000000200000005ULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_QUOTA);
+ LASSERTF(FID_SEQ_QUOTA_GLB == 0x0000000200000006ULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_QUOTA_GLB);
+ LASSERTF(FID_SEQ_ROOT == 0x0000000200000007ULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_ROOT);
+ LASSERTF(FID_SEQ_NORMAL == 0x0000000200000400ULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_NORMAL);
+ LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
+ (long long)FID_SEQ_LOV_DEFAULT);
+ LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)FID_OID_SPECIAL_BFL);
+ LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)FID_OID_DOT_LUSTRE);
+ LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)FID_OID_DOT_LUSTRE_OBF);
+
+ /* Checks for struct lu_dirent */
+ LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct lu_dirent));
+ LASSERTF((int)offsetof(struct lu_dirent, lde_fid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirent, lde_fid));
+ LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirent *)0)->lde_fid));
+ LASSERTF((int)offsetof(struct lu_dirent, lde_hash) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirent, lde_hash));
+ LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_hash) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirent *)0)->lde_hash));
+ LASSERTF((int)offsetof(struct lu_dirent, lde_reclen) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirent, lde_reclen));
+ LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_reclen) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirent *)0)->lde_reclen));
+ LASSERTF((int)offsetof(struct lu_dirent, lde_namelen) == 26, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirent, lde_namelen));
+ LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_namelen) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirent *)0)->lde_namelen));
+ LASSERTF((int)offsetof(struct lu_dirent, lde_attrs) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirent, lde_attrs));
+ LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_attrs) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirent *)0)->lde_attrs));
+ LASSERTF((int)offsetof(struct lu_dirent, lde_name[0]) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirent, lde_name[0]));
+ LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0]));
+ LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)LUDA_FID);
+ LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)LUDA_TYPE);
+ LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n",
+ (unsigned)LUDA_64BITHASH);
+
+ /* Checks for struct luda_type */
+ LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n",
+ (long long)(int)sizeof(struct luda_type));
+ LASSERTF((int)offsetof(struct luda_type, lt_type) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct luda_type, lt_type));
+ LASSERTF((int)sizeof(((struct luda_type *)0)->lt_type) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct luda_type *)0)->lt_type));
+
+ /* Checks for struct lu_dirpage */
+ LASSERTF((int)sizeof(struct lu_dirpage) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct lu_dirpage));
+ LASSERTF((int)offsetof(struct lu_dirpage, ldp_hash_start) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirpage, ldp_hash_start));
+ LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_hash_start) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_hash_start));
+ LASSERTF((int)offsetof(struct lu_dirpage, ldp_hash_end) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirpage, ldp_hash_end));
+ LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_hash_end) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_hash_end));
+ LASSERTF((int)offsetof(struct lu_dirpage, ldp_flags) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirpage, ldp_flags));
+ LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_flags));
+ LASSERTF((int)offsetof(struct lu_dirpage, ldp_pad0) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirpage, ldp_pad0));
+ LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_pad0) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_pad0));
+ LASSERTF((int)offsetof(struct lu_dirpage, ldp_entries[0]) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lu_dirpage, ldp_entries[0]));
+ LASSERTF((int)sizeof(((struct lu_dirpage *)0)->ldp_entries[0]) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_dirpage *)0)->ldp_entries[0]));
+ LASSERTF(LDF_EMPTY == 1, "found %lld\n",
+ (long long)LDF_EMPTY);
+ LASSERTF(LDF_COLLIDE == 2, "found %lld\n",
+ (long long)LDF_COLLIDE);
+ LASSERTF(LU_PAGE_SIZE == 4096, "found %lld\n",
+ (long long)LU_PAGE_SIZE);
+ /* Checks for union lu_page */
+ LASSERTF((int)sizeof(union lu_page) == 4096, "found %lld\n",
+ (long long)(int)sizeof(union lu_page));
+
+ /* Checks for struct lustre_handle */
+ LASSERTF((int)sizeof(struct lustre_handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(struct lustre_handle));
+ LASSERTF((int)offsetof(struct lustre_handle, cookie) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_handle, cookie));
+ LASSERTF((int)sizeof(((struct lustre_handle *)0)->cookie) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_handle *)0)->cookie));
+
+ /* Checks for struct lustre_msg_v2 */
+ LASSERTF((int)sizeof(struct lustre_msg_v2) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct lustre_msg_v2));
+ LASSERTF((int)offsetof(struct lustre_msg_v2, lm_bufcount) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_msg_v2, lm_bufcount));
+ LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_bufcount) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_bufcount));
+ LASSERTF((int)offsetof(struct lustre_msg_v2, lm_secflvr) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_msg_v2, lm_secflvr));
+ LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_secflvr) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_secflvr));
+ LASSERTF((int)offsetof(struct lustre_msg_v2, lm_magic) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_msg_v2, lm_magic));
+ LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_magic) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_magic));
+ LASSERTF((int)offsetof(struct lustre_msg_v2, lm_repsize) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_msg_v2, lm_repsize));
+ LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_repsize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_repsize));
+ LASSERTF((int)offsetof(struct lustre_msg_v2, lm_cksum) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_msg_v2, lm_cksum));
+ LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_cksum) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_cksum));
+ LASSERTF((int)offsetof(struct lustre_msg_v2, lm_flags) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_msg_v2, lm_flags));
+ LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_flags));
+ LASSERTF((int)offsetof(struct lustre_msg_v2, lm_padding_2) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_msg_v2, lm_padding_2));
+ LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_2));
+ LASSERTF((int)offsetof(struct lustre_msg_v2, lm_padding_3) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_msg_v2, lm_padding_3));
+ LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_3) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_padding_3));
+ LASSERTF((int)offsetof(struct lustre_msg_v2, lm_buflens[0]) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_msg_v2, lm_buflens[0]));
+ LASSERTF((int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0]) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_msg_v2 *)0)->lm_buflens[0]));
+ LASSERTF(LUSTRE_MSG_MAGIC_V1 == 0x0BD00BD0, "found 0x%.8x\n",
+ LUSTRE_MSG_MAGIC_V1);
+ LASSERTF(LUSTRE_MSG_MAGIC_V2 == 0x0BD00BD3, "found 0x%.8x\n",
+ LUSTRE_MSG_MAGIC_V2);
+ LASSERTF(LUSTRE_MSG_MAGIC_V1_SWABBED == 0xD00BD00B, "found 0x%.8x\n",
+ LUSTRE_MSG_MAGIC_V1_SWABBED);
+ LASSERTF(LUSTRE_MSG_MAGIC_V2_SWABBED == 0xD30BD00B, "found 0x%.8x\n",
+ LUSTRE_MSG_MAGIC_V2_SWABBED);
+
+ /* Checks for struct ptlrpc_body */
+ LASSERTF((int)sizeof(struct ptlrpc_body_v3) == 184, "found %lld\n",
+ (long long)(int)sizeof(struct ptlrpc_body_v3));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_handle) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_handle));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_type) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_type));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_version) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_version));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_opc) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_opc));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_status) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_status));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_xid) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_seen));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_transno) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_transno));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_flags) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_flags));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_op_flags) == 60, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_op_flags));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_timeout) == 68, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_timeout));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_service_time) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_service_time));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_limit) == 76, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_limit));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_slv) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_slv));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv));
+ CLASSERT(PTLRPC_NUM_VERSIONS == 4);
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_pre_versions) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding));
+ CLASSERT(JOBSTATS_JOBID_SIZE == 32);
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n",
+ (long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_jobid) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_jobid));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_handle) == (int)offsetof(struct ptlrpc_body_v2, pb_handle), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_handle), (int)offsetof(struct ptlrpc_body_v2, pb_handle));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_handle), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_handle), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_handle));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_type) == (int)offsetof(struct ptlrpc_body_v2, pb_type), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_type), (int)offsetof(struct ptlrpc_body_v2, pb_type));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_type), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_type), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_type));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_version) == (int)offsetof(struct ptlrpc_body_v2, pb_version), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_version), (int)offsetof(struct ptlrpc_body_v2, pb_version));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_version), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_version), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_version));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_opc) == (int)offsetof(struct ptlrpc_body_v2, pb_opc), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_opc), (int)offsetof(struct ptlrpc_body_v2, pb_opc));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_opc), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_opc), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_opc));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_status) == (int)offsetof(struct ptlrpc_body_v2, pb_status), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_status), (int)offsetof(struct ptlrpc_body_v2, pb_status));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_status), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_status), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_status));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_xid) == (int)offsetof(struct ptlrpc_body_v2, pb_last_xid), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == (int)offsetof(struct ptlrpc_body_v2, pb_last_seen), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_last_seen), (int)offsetof(struct ptlrpc_body_v2, pb_last_seen));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_transno) == (int)offsetof(struct ptlrpc_body_v2, pb_transno), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_transno), (int)offsetof(struct ptlrpc_body_v2, pb_transno));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_transno), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_transno), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_transno));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_flags) == (int)offsetof(struct ptlrpc_body_v2, pb_flags), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_flags), (int)offsetof(struct ptlrpc_body_v2, pb_flags));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_flags), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_flags), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_flags));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_op_flags) == (int)offsetof(struct ptlrpc_body_v2, pb_op_flags), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_op_flags), (int)offsetof(struct ptlrpc_body_v2, pb_op_flags));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_op_flags), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_op_flags), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_op_flags));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt) == (int)offsetof(struct ptlrpc_body_v2, pb_conn_cnt), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_conn_cnt), (int)offsetof(struct ptlrpc_body_v2, pb_conn_cnt));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_conn_cnt), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_conn_cnt), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_conn_cnt));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_timeout) == (int)offsetof(struct ptlrpc_body_v2, pb_timeout), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_timeout), (int)offsetof(struct ptlrpc_body_v2, pb_timeout));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_timeout), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_timeout), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_timeout));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_service_time) == (int)offsetof(struct ptlrpc_body_v2, pb_service_time), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_service_time), (int)offsetof(struct ptlrpc_body_v2, pb_service_time));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_service_time), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_service_time), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_service_time));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_limit) == (int)offsetof(struct ptlrpc_body_v2, pb_limit), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_limit), (int)offsetof(struct ptlrpc_body_v2, pb_limit));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_limit), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_limit), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_limit));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_slv) == (int)offsetof(struct ptlrpc_body_v2, pb_slv), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_slv), (int)offsetof(struct ptlrpc_body_v2, pb_slv));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_slv), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_slv), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_slv));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_pre_versions) == (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions));
+ LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == (int)offsetof(struct ptlrpc_body_v2, pb_padding), "%d != %d\n",
+ (int)offsetof(struct ptlrpc_body_v3, pb_padding), (int)offsetof(struct ptlrpc_body_v2, pb_padding));
+ LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding), "%d != %d\n",
+ (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding));
+ LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n",
+ (long long)MSG_PTLRPC_BODY_OFF);
+ LASSERTF(REQ_REC_OFF == 1, "found %lld\n",
+ (long long)REQ_REC_OFF);
+ LASSERTF(REPLY_REC_OFF == 1, "found %lld\n",
+ (long long)REPLY_REC_OFF);
+ LASSERTF(DLM_LOCKREQ_OFF == 1, "found %lld\n",
+ (long long)DLM_LOCKREQ_OFF);
+ LASSERTF(DLM_REQ_REC_OFF == 2, "found %lld\n",
+ (long long)DLM_REQ_REC_OFF);
+ LASSERTF(DLM_INTENT_IT_OFF == 2, "found %lld\n",
+ (long long)DLM_INTENT_IT_OFF);
+ LASSERTF(DLM_INTENT_REC_OFF == 3, "found %lld\n",
+ (long long)DLM_INTENT_REC_OFF);
+ LASSERTF(DLM_LOCKREPLY_OFF == 1, "found %lld\n",
+ (long long)DLM_LOCKREPLY_OFF);
+ LASSERTF(DLM_REPLY_REC_OFF == 2, "found %lld\n",
+ (long long)DLM_REPLY_REC_OFF);
+ LASSERTF(MSG_PTLRPC_HEADER_OFF == 31, "found %lld\n",
+ (long long)MSG_PTLRPC_HEADER_OFF);
+ LASSERTF(PTLRPC_MSG_VERSION == 0x00000003, "found 0x%.8x\n",
+ PTLRPC_MSG_VERSION);
+ LASSERTF(LUSTRE_VERSION_MASK == 0xffff0000, "found 0x%.8x\n",
+ LUSTRE_VERSION_MASK);
+ LASSERTF(LUSTRE_OBD_VERSION == 0x00010000, "found 0x%.8x\n",
+ LUSTRE_OBD_VERSION);
+ LASSERTF(LUSTRE_MDS_VERSION == 0x00020000, "found 0x%.8x\n",
+ LUSTRE_MDS_VERSION);
+ LASSERTF(LUSTRE_OST_VERSION == 0x00030000, "found 0x%.8x\n",
+ LUSTRE_OST_VERSION);
+ LASSERTF(LUSTRE_DLM_VERSION == 0x00040000, "found 0x%.8x\n",
+ LUSTRE_DLM_VERSION);
+ LASSERTF(LUSTRE_LOG_VERSION == 0x00050000, "found 0x%.8x\n",
+ LUSTRE_LOG_VERSION);
+ LASSERTF(LUSTRE_MGS_VERSION == 0x00060000, "found 0x%.8x\n",
+ LUSTRE_MGS_VERSION);
+ LASSERTF(MSGHDR_AT_SUPPORT == 1, "found %lld\n",
+ (long long)MSGHDR_AT_SUPPORT);
+ LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n",
+ (long long)MSGHDR_CKSUM_INCOMPAT18);
+ LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_OP_FLAG_MASK);
+ LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n",
+ (long long)MSG_OP_FLAG_SHIFT);
+ LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n",
+ (unsigned)MSG_GEN_FLAG_MASK);
+ LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_LAST_REPLAY);
+ LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_RESENT);
+ LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_REPLAY);
+ LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_DELAY_REPLAY);
+ LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_VERSION_REPLAY);
+ LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_REQ_REPLAY_DONE);
+ LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_LOCK_REPLAY_DONE);
+ LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_CONNECT_RECOVERING);
+ LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_CONNECT_RECONNECT);
+ LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_CONNECT_REPLAYABLE);
+ LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_CONNECT_LIBCLIENT);
+ LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_CONNECT_INITIAL);
+ LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_CONNECT_ASYNC);
+ LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_CONNECT_NEXT_VER);
+ LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n",
+ (unsigned)MSG_CONNECT_TRANSNO);
+
+ /* Checks for struct obd_connect_data */
+ LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n",
+ (long long)(int)sizeof(struct obd_connect_data));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_version) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_version));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_version) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_version));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_grant) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_grant));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_grant) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_grant));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_index) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_index));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_index) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_index));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_brw_size) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_brw_size));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_brw_size) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_brw_size));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_ibits_known) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_ibits_known));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_ibits_known) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_ibits_known));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_blocksize) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_blocksize));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_blocksize) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_blocksize));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_inodespace) == 33, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_inodespace));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_inodespace) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_inodespace));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_grant_extent) == 34, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_grant_extent));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_grant_extent) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_grant_extent));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_unused) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_unused));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_unused) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_unused));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_transno) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_transno));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_transno) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_transno));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_group) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_group));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_group) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_group));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_cksum_types) == 52, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_cksum_types));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_cksum_types) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_cksum_types));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_max_easize) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_max_easize));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_max_easize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_max_easize));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_instance) == 60, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_instance));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_instance) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_instance));
+ LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxbytes) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding1));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding1));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding2) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding2));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding2));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding3));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding3));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding4) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding4));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding4) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding4));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding5) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding5));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding5) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding5));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding6) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding6));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding6) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding6));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding7) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding7));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding7) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding7));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding8) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding8));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding8) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding8));
+ LASSERTF((int)offsetof(struct obd_connect_data, padding9) == 136, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, padding9));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding9) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->padding9));
+ LASSERTF((int)offsetof(struct obd_connect_data, paddingA) == 144, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, paddingA));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingA) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingA));
+ LASSERTF((int)offsetof(struct obd_connect_data, paddingB) == 152, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, paddingB));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingB) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingB));
+ LASSERTF((int)offsetof(struct obd_connect_data, paddingC) == 160, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, paddingC));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingC) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingC));
+ LASSERTF((int)offsetof(struct obd_connect_data, paddingD) == 168, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, paddingD));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingD) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingD));
+ LASSERTF((int)offsetof(struct obd_connect_data, paddingE) == 176, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, paddingE));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingE) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingE));
+ LASSERTF((int)offsetof(struct obd_connect_data, paddingF) == 184, "found %lld\n",
+ (long long)(int)offsetof(struct obd_connect_data, paddingF));
+ LASSERTF((int)sizeof(((struct obd_connect_data *)0)->paddingF) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_connect_data *)0)->paddingF));
+ LASSERTF(OBD_CONNECT_RDONLY == 0x1ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_RDONLY);
+ LASSERTF(OBD_CONNECT_INDEX == 0x2ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_INDEX);
+ LASSERTF(OBD_CONNECT_MDS == 0x4ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_MDS);
+ LASSERTF(OBD_CONNECT_GRANT == 0x8ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_GRANT);
+ LASSERTF(OBD_CONNECT_SRVLOCK == 0x10ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_SRVLOCK);
+ LASSERTF(OBD_CONNECT_VERSION == 0x20ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_VERSION);
+ LASSERTF(OBD_CONNECT_REQPORTAL == 0x40ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_REQPORTAL);
+ LASSERTF(OBD_CONNECT_ACL == 0x80ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_ACL);
+ LASSERTF(OBD_CONNECT_XATTR == 0x100ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_XATTR);
+ LASSERTF(OBD_CONNECT_CROW == 0x200ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_CROW);
+ LASSERTF(OBD_CONNECT_TRUNCLOCK == 0x400ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_TRUNCLOCK);
+ LASSERTF(OBD_CONNECT_TRANSNO == 0x800ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_TRANSNO);
+ LASSERTF(OBD_CONNECT_IBITS == 0x1000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_IBITS);
+ LASSERTF(OBD_CONNECT_JOIN == 0x2000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_JOIN);
+ LASSERTF(OBD_CONNECT_ATTRFID == 0x4000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_ATTRFID);
+ LASSERTF(OBD_CONNECT_NODEVOH == 0x8000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_NODEVOH);
+ LASSERTF(OBD_CONNECT_RMT_CLIENT == 0x10000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_RMT_CLIENT);
+ LASSERTF(OBD_CONNECT_RMT_CLIENT_FORCE == 0x20000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_RMT_CLIENT_FORCE);
+ LASSERTF(OBD_CONNECT_BRW_SIZE == 0x40000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_BRW_SIZE);
+ LASSERTF(OBD_CONNECT_QUOTA64 == 0x80000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_QUOTA64);
+ LASSERTF(OBD_CONNECT_MDS_CAPA == 0x100000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_MDS_CAPA);
+ LASSERTF(OBD_CONNECT_OSS_CAPA == 0x200000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_OSS_CAPA);
+ LASSERTF(OBD_CONNECT_CANCELSET == 0x400000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_CANCELSET);
+ LASSERTF(OBD_CONNECT_SOM == 0x800000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_SOM);
+ LASSERTF(OBD_CONNECT_AT == 0x1000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_AT);
+ LASSERTF(OBD_CONNECT_LRU_RESIZE == 0x2000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_LRU_RESIZE);
+ LASSERTF(OBD_CONNECT_MDS_MDS == 0x4000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_MDS_MDS);
+ LASSERTF(OBD_CONNECT_REAL == 0x8000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_REAL);
+ LASSERTF(OBD_CONNECT_CHANGE_QS == 0x10000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_CHANGE_QS);
+ LASSERTF(OBD_CONNECT_CKSUM == 0x20000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_CKSUM);
+ LASSERTF(OBD_CONNECT_FID == 0x40000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_FID);
+ LASSERTF(OBD_CONNECT_VBR == 0x80000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_VBR);
+ LASSERTF(OBD_CONNECT_LOV_V3 == 0x100000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_LOV_V3);
+ LASSERTF(OBD_CONNECT_GRANT_SHRINK == 0x200000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_GRANT_SHRINK);
+ LASSERTF(OBD_CONNECT_SKIP_ORPHAN == 0x400000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_SKIP_ORPHAN);
+ LASSERTF(OBD_CONNECT_MAX_EASIZE == 0x800000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_MAX_EASIZE);
+ LASSERTF(OBD_CONNECT_FULL20 == 0x1000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_FULL20);
+ LASSERTF(OBD_CONNECT_LAYOUTLOCK == 0x2000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_LAYOUTLOCK);
+ LASSERTF(OBD_CONNECT_64BITHASH == 0x4000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_64BITHASH);
+ LASSERTF(OBD_CONNECT_MAXBYTES == 0x8000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_MAXBYTES);
+ LASSERTF(OBD_CONNECT_IMP_RECOV == 0x10000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_IMP_RECOV);
+ LASSERTF(OBD_CONNECT_JOBSTATS == 0x20000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_JOBSTATS);
+ LASSERTF(OBD_CONNECT_UMASK == 0x40000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_UMASK);
+ LASSERTF(OBD_CONNECT_EINPROGRESS == 0x80000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_EINPROGRESS);
+ LASSERTF(OBD_CONNECT_GRANT_PARAM == 0x100000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_GRANT_PARAM);
+ LASSERTF(OBD_CONNECT_FLOCK_OWNER == 0x200000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_FLOCK_OWNER);
+ LASSERTF(OBD_CONNECT_LVB_TYPE == 0x400000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_LVB_TYPE);
+ LASSERTF(OBD_CONNECT_NANOSEC_TIME == 0x800000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_NANOSEC_TIME);
+ LASSERTF(OBD_CONNECT_LIGHTWEIGHT == 0x1000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_LIGHTWEIGHT);
+ LASSERTF(OBD_CONNECT_SHORTIO == 0x2000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_SHORTIO);
+ LASSERTF(OBD_CONNECT_PINGLESS == 0x4000000000000ULL, "found 0x%.16llxULL\n",
+ OBD_CONNECT_PINGLESS);
+ LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)OBD_CKSUM_CRC32);
+ LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)OBD_CKSUM_ADLER);
+ LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n",
+ (unsigned)OBD_CKSUM_CRC32C);
+
+ /* Checks for struct obdo */
+ LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n",
+ (long long)(int)sizeof(struct obdo));
+ LASSERTF((int)offsetof(struct obdo, o_valid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_valid));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_valid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_valid));
+ LASSERTF((int)offsetof(struct obdo, o_oi) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_oi));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_oi) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_oi));
+ LASSERTF((int)offsetof(struct obdo, o_parent_seq) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_parent_seq));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_seq) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_parent_seq));
+ LASSERTF((int)offsetof(struct obdo, o_size) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_size));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_size));
+ LASSERTF((int)offsetof(struct obdo, o_mtime) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_mtime));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_mtime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_mtime));
+ LASSERTF((int)offsetof(struct obdo, o_atime) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_atime));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_atime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_atime));
+ LASSERTF((int)offsetof(struct obdo, o_ctime) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_ctime));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_ctime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_ctime));
+ LASSERTF((int)offsetof(struct obdo, o_blocks) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_blocks));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_blocks) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_blocks));
+ LASSERTF((int)offsetof(struct obdo, o_grant) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_grant));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_grant) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_grant));
+ LASSERTF((int)offsetof(struct obdo, o_blksize) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_blksize));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_blksize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_blksize));
+ LASSERTF((int)offsetof(struct obdo, o_mode) == 84, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_mode));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_mode));
+ LASSERTF((int)offsetof(struct obdo, o_uid) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_uid));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_uid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_uid));
+ LASSERTF((int)offsetof(struct obdo, o_gid) == 92, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_gid));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_gid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_gid));
+ LASSERTF((int)offsetof(struct obdo, o_flags) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_flags));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_flags));
+ LASSERTF((int)offsetof(struct obdo, o_nlink) == 100, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_nlink));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_nlink) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_nlink));
+ LASSERTF((int)offsetof(struct obdo, o_parent_oid) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_parent_oid));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_oid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_parent_oid));
+ LASSERTF((int)offsetof(struct obdo, o_misc) == 108, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_misc));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_misc) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_misc));
+ LASSERTF((int)offsetof(struct obdo, o_ioepoch) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_ioepoch));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_ioepoch) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_ioepoch));
+ LASSERTF((int)offsetof(struct obdo, o_stripe_idx) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_stripe_idx));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_stripe_idx) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_stripe_idx));
+ LASSERTF((int)offsetof(struct obdo, o_parent_ver) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_parent_ver));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_parent_ver) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_parent_ver));
+ LASSERTF((int)offsetof(struct obdo, o_handle) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_handle));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_handle));
+ LASSERTF((int)offsetof(struct obdo, o_lcookie) == 136, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_lcookie));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_lcookie) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_lcookie));
+ LASSERTF((int)offsetof(struct obdo, o_uid_h) == 168, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_uid_h));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_uid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_uid_h));
+ LASSERTF((int)offsetof(struct obdo, o_gid_h) == 172, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_gid_h));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_gid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_gid_h));
+ LASSERTF((int)offsetof(struct obdo, o_data_version) == 176, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_data_version));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_data_version) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_data_version));
+ LASSERTF((int)offsetof(struct obdo, o_padding_4) == 184, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_padding_4));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_4) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_padding_4));
+ LASSERTF((int)offsetof(struct obdo, o_padding_5) == 192, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_padding_5));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_5) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_padding_5));
+ LASSERTF((int)offsetof(struct obdo, o_padding_6) == 200, "found %lld\n",
+ (long long)(int)offsetof(struct obdo, o_padding_6));
+ LASSERTF((int)sizeof(((struct obdo *)0)->o_padding_6) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obdo *)0)->o_padding_6));
+ LASSERTF(OBD_MD_FLID == (0x00000001ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLID);
+ LASSERTF(OBD_MD_FLATIME == (0x00000002ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLATIME);
+ LASSERTF(OBD_MD_FLMTIME == (0x00000004ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLMTIME);
+ LASSERTF(OBD_MD_FLCTIME == (0x00000008ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLCTIME);
+ LASSERTF(OBD_MD_FLSIZE == (0x00000010ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLSIZE);
+ LASSERTF(OBD_MD_FLBLOCKS == (0x00000020ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLBLOCKS);
+ LASSERTF(OBD_MD_FLBLKSZ == (0x00000040ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLBLKSZ);
+ LASSERTF(OBD_MD_FLMODE == (0x00000080ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLMODE);
+ LASSERTF(OBD_MD_FLTYPE == (0x00000100ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLTYPE);
+ LASSERTF(OBD_MD_FLUID == (0x00000200ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLUID);
+ LASSERTF(OBD_MD_FLGID == (0x00000400ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLGID);
+ LASSERTF(OBD_MD_FLFLAGS == (0x00000800ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLFLAGS);
+ LASSERTF(OBD_MD_FLNLINK == (0x00002000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLNLINK);
+ LASSERTF(OBD_MD_FLGENER == (0x00004000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLGENER);
+ LASSERTF(OBD_MD_FLRDEV == (0x00010000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLRDEV);
+ LASSERTF(OBD_MD_FLEASIZE == (0x00020000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLEASIZE);
+ LASSERTF(OBD_MD_LINKNAME == (0x00040000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_LINKNAME);
+ LASSERTF(OBD_MD_FLHANDLE == (0x00080000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLHANDLE);
+ LASSERTF(OBD_MD_FLCKSUM == (0x00100000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLCKSUM);
+ LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLQOS);
+ LASSERTF(OBD_MD_FLCOOKIE == (0x00800000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLCOOKIE);
+ LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLGROUP);
+ LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLFID);
+ LASSERTF(OBD_MD_FLEPOCH == (0x04000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLEPOCH);
+ LASSERTF(OBD_MD_FLGRANT == (0x08000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLGRANT);
+ LASSERTF(OBD_MD_FLDIREA == (0x10000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLDIREA);
+ LASSERTF(OBD_MD_FLUSRQUOTA == (0x20000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLUSRQUOTA);
+ LASSERTF(OBD_MD_FLGRPQUOTA == (0x40000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLGRPQUOTA);
+ LASSERTF(OBD_MD_FLMODEASIZE == (0x80000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLMODEASIZE);
+ LASSERTF(OBD_MD_MDS == (0x0000000100000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_MDS);
+ LASSERTF(OBD_MD_REINT == (0x0000000200000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_REINT);
+ LASSERTF(OBD_MD_MEA == (0x0000000400000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_MEA);
+ LASSERTF(OBD_MD_FLXATTR == (0x0000001000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLXATTR);
+ LASSERTF(OBD_MD_FLXATTRLS == (0x0000002000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLXATTRLS);
+ LASSERTF(OBD_MD_FLXATTRRM == (0x0000004000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLXATTRRM);
+ LASSERTF(OBD_MD_FLACL == (0x0000008000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLACL);
+ LASSERTF(OBD_MD_FLRMTPERM == (0x0000010000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLRMTPERM);
+ LASSERTF(OBD_MD_FLMDSCAPA == (0x0000020000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLMDSCAPA);
+ LASSERTF(OBD_MD_FLOSSCAPA == (0x0000040000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLOSSCAPA);
+ LASSERTF(OBD_MD_FLCKSPLIT == (0x0000080000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLCKSPLIT);
+ LASSERTF(OBD_MD_FLCROSSREF == (0x0000100000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLCROSSREF);
+ LASSERTF(OBD_MD_FLGETATTRLOCK == (0x0000200000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLGETATTRLOCK);
+ LASSERTF(OBD_MD_FLRMTLSETFACL == (0x0001000000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLRMTLSETFACL);
+ LASSERTF(OBD_MD_FLRMTLGETFACL == (0x0002000000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLRMTLGETFACL);
+ LASSERTF(OBD_MD_FLRMTRSETFACL == (0x0004000000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLRMTRSETFACL);
+ LASSERTF(OBD_MD_FLRMTRGETFACL == (0x0008000000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLRMTRGETFACL);
+ LASSERTF(OBD_MD_FLDATAVERSION == (0x0010000000000000ULL), "found 0x%.16llxULL\n",
+ OBD_MD_FLDATAVERSION);
+ CLASSERT(OBD_FL_INLINEDATA == 0x00000001);
+ CLASSERT(OBD_FL_OBDMDEXISTS == 0x00000002);
+ CLASSERT(OBD_FL_DELORPHAN == 0x00000004);
+ CLASSERT(OBD_FL_NORPC == 0x00000008);
+ CLASSERT(OBD_FL_IDONLY == 0x00000010);
+ CLASSERT(OBD_FL_RECREATE_OBJS == 0x00000020);
+ CLASSERT(OBD_FL_DEBUG_CHECK == 0x00000040);
+ CLASSERT(OBD_FL_NO_USRQUOTA == 0x00000100);
+ CLASSERT(OBD_FL_NO_GRPQUOTA == 0x00000200);
+ CLASSERT(OBD_FL_CREATE_CROW == 0x00000400);
+ CLASSERT(OBD_FL_SRVLOCK == 0x00000800);
+ CLASSERT(OBD_FL_CKSUM_CRC32 == 0x00001000);
+ CLASSERT(OBD_FL_CKSUM_ADLER == 0x00002000);
+ CLASSERT(OBD_FL_CKSUM_CRC32C == 0x00004000);
+ CLASSERT(OBD_FL_CKSUM_RSVD2 == 0x00008000);
+ CLASSERT(OBD_FL_CKSUM_RSVD3 == 0x00010000);
+ CLASSERT(OBD_FL_SHRINK_GRANT == 0x00020000);
+ CLASSERT(OBD_FL_MMAP == 0x00040000);
+ CLASSERT(OBD_FL_RECOV_RESEND == 0x00080000);
+ CLASSERT(OBD_FL_NOSPC_BLK == 0x00100000);
+ CLASSERT(OBD_FL_LOCAL_MASK == 0xf0000000);
+
+ /* Checks for struct lov_ost_data_v1 */
+ LASSERTF((int)sizeof(struct lov_ost_data_v1) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct lov_ost_data_v1));
+ LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_oi) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_oi));
+ LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_oi) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_oi));
+ LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_gen) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_gen));
+ LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_gen) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_gen));
+ LASSERTF((int)offsetof(struct lov_ost_data_v1, l_ost_idx) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct lov_ost_data_v1, l_ost_idx));
+ LASSERTF((int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_idx) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_ost_data_v1 *)0)->l_ost_idx));
+
+ /* Checks for struct lov_mds_md_v1 */
+ LASSERTF((int)sizeof(struct lov_mds_md_v1) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct lov_mds_md_v1));
+ LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_magic) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v1, lmm_magic));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_magic) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_magic));
+ LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_pattern) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v1, lmm_pattern));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_pattern) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_pattern));
+ LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_oi) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v1, lmm_oi));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_oi) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_oi));
+ LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_stripe_size) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v1, lmm_stripe_size));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_size) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_size));
+ LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_stripe_count) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v1, lmm_stripe_count));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_count) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_stripe_count));
+ LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_layout_gen) == 30, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v1, lmm_layout_gen));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_layout_gen) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_layout_gen));
+ LASSERTF((int)offsetof(struct lov_mds_md_v1, lmm_objects[0]) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v1, lmm_objects[0]));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0]) == 24, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v1 *)0)->lmm_objects[0]));
+ CLASSERT(LOV_MAGIC_V1 == 0x0BD10BD0);
+
+ /* Checks for struct lov_mds_md_v3 */
+ LASSERTF((int)sizeof(struct lov_mds_md_v3) == 48, "found %lld\n",
+ (long long)(int)sizeof(struct lov_mds_md_v3));
+ LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_magic) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v3, lmm_magic));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_magic) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_magic));
+ LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_pattern) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v3, lmm_pattern));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pattern) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pattern));
+ LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_oi) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v3, lmm_oi));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_oi) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_oi));
+ LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_stripe_size) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v3, lmm_stripe_size));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_size) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_size));
+ LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_stripe_count) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v3, lmm_stripe_count));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_count) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_stripe_count));
+ LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_layout_gen) == 30, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v3, lmm_layout_gen));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_layout_gen));
+ CLASSERT(LOV_MAXPOOLNAME == 16);
+ LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16]) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v3, lmm_pool_name[16]));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pool_name[16]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_pool_name[16]));
+ LASSERTF((int)offsetof(struct lov_mds_md_v3, lmm_objects[0]) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct lov_mds_md_v3, lmm_objects[0]));
+ LASSERTF((int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]) == 24, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]));
+ CLASSERT(LOV_MAGIC_V3 == 0x0BD30BD0);
+ LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)LOV_PATTERN_RAID0);
+ LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)LOV_PATTERN_RAID1);
+ LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n",
+ (unsigned)LOV_PATTERN_FIRST);
+ LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n",
+ (unsigned)LOV_PATTERN_CMOBD);
+
+ /* Checks for struct obd_statfs */
+ LASSERTF((int)sizeof(struct obd_statfs) == 144, "found %lld\n",
+ (long long)(int)sizeof(struct obd_statfs));
+ LASSERTF((int)offsetof(struct obd_statfs, os_type) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_type));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_type) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_type));
+ LASSERTF((int)offsetof(struct obd_statfs, os_blocks) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_blocks));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_blocks) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_blocks));
+ LASSERTF((int)offsetof(struct obd_statfs, os_bfree) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_bfree));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bfree) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_bfree));
+ LASSERTF((int)offsetof(struct obd_statfs, os_bavail) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_bavail));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bavail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_bavail));
+ LASSERTF((int)offsetof(struct obd_statfs, os_ffree) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_ffree));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_ffree) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_ffree));
+ LASSERTF((int)offsetof(struct obd_statfs, os_fsid) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_fsid));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_fsid) == 40, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_fsid));
+ LASSERTF((int)offsetof(struct obd_statfs, os_bsize) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_bsize));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_bsize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_bsize));
+ LASSERTF((int)offsetof(struct obd_statfs, os_namelen) == 92, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_namelen));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_namelen) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_namelen));
+ LASSERTF((int)offsetof(struct obd_statfs, os_state) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_state));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_state) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_state));
+ LASSERTF((int)offsetof(struct obd_statfs, os_fprecreated) == 108, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_fprecreated));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_fprecreated) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_fprecreated));
+ LASSERTF((int)offsetof(struct obd_statfs, os_spare2) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_spare2));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare2));
+ LASSERTF((int)offsetof(struct obd_statfs, os_spare3) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_spare3));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare3) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare3));
+ LASSERTF((int)offsetof(struct obd_statfs, os_spare4) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_spare4));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare4) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare4));
+ LASSERTF((int)offsetof(struct obd_statfs, os_spare5) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_spare5));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare5) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare5));
+ LASSERTF((int)offsetof(struct obd_statfs, os_spare6) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_spare6));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare6) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare6));
+ LASSERTF((int)offsetof(struct obd_statfs, os_spare7) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_spare7));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare7) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare7));
+ LASSERTF((int)offsetof(struct obd_statfs, os_spare8) == 136, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_spare8));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare8) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare8));
+ LASSERTF((int)offsetof(struct obd_statfs, os_spare9) == 140, "found %lld\n",
+ (long long)(int)offsetof(struct obd_statfs, os_spare9));
+ LASSERTF((int)sizeof(((struct obd_statfs *)0)->os_spare9) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_statfs *)0)->os_spare9));
+
+ /* Checks for struct obd_ioobj */
+ LASSERTF((int)sizeof(struct obd_ioobj) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct obd_ioobj));
+ LASSERTF((int)offsetof(struct obd_ioobj, ioo_oid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct obd_ioobj, ioo_oid));
+ LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_oid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_oid));
+ LASSERTF((int)offsetof(struct obd_ioobj, ioo_max_brw) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct obd_ioobj, ioo_max_brw));
+ LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_max_brw) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_max_brw));
+ LASSERTF((int)offsetof(struct obd_ioobj, ioo_bufcnt) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt));
+ LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt));
+
+ /* Checks for union lquota_id */
+ LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n",
+ (long long)(int)sizeof(union lquota_id));
+
+ LASSERTF(QUOTABLOCK_BITS == 10, "found %lld\n",
+ (long long)QUOTABLOCK_BITS);
+ LASSERTF(QUOTABLOCK_SIZE == 1024, "found %lld\n",
+ (long long)QUOTABLOCK_SIZE);
+
+ /* Checks for struct obd_quotactl */
+ LASSERTF((int)sizeof(struct obd_quotactl) == 112, "found %lld\n",
+ (long long)(int)sizeof(struct obd_quotactl));
+ LASSERTF((int)offsetof(struct obd_quotactl, qc_cmd) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct obd_quotactl, qc_cmd));
+ LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_cmd) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_cmd));
+ LASSERTF((int)offsetof(struct obd_quotactl, qc_type) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct obd_quotactl, qc_type));
+ LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_type) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_type));
+ LASSERTF((int)offsetof(struct obd_quotactl, qc_id) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct obd_quotactl, qc_id));
+ LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_id));
+ LASSERTF((int)offsetof(struct obd_quotactl, qc_stat) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct obd_quotactl, qc_stat));
+ LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_stat) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_stat));
+ LASSERTF((int)offsetof(struct obd_quotactl, qc_dqinfo) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct obd_quotactl, qc_dqinfo));
+ LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_dqinfo) == 24, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_dqinfo));
+ LASSERTF((int)offsetof(struct obd_quotactl, qc_dqblk) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct obd_quotactl, qc_dqblk));
+ LASSERTF((int)sizeof(((struct obd_quotactl *)0)->qc_dqblk) == 72, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_quotactl *)0)->qc_dqblk));
+
+ /* Checks for struct obd_dqinfo */
+ LASSERTF((int)sizeof(struct obd_dqinfo) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct obd_dqinfo));
+ LASSERTF((int)offsetof(struct obd_dqinfo, dqi_bgrace) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqinfo, dqi_bgrace));
+ LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_bgrace) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_bgrace));
+ LASSERTF((int)offsetof(struct obd_dqinfo, dqi_igrace) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqinfo, dqi_igrace));
+ LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_igrace) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_igrace));
+ LASSERTF((int)offsetof(struct obd_dqinfo, dqi_flags) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqinfo, dqi_flags));
+ LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_flags));
+ LASSERTF((int)offsetof(struct obd_dqinfo, dqi_valid) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqinfo, dqi_valid));
+ LASSERTF((int)sizeof(((struct obd_dqinfo *)0)->dqi_valid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqinfo *)0)->dqi_valid));
+
+ /* Checks for struct obd_dqblk */
+ LASSERTF((int)sizeof(struct obd_dqblk) == 72, "found %lld\n",
+ (long long)(int)sizeof(struct obd_dqblk));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_bhardlimit) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_bhardlimit));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_bhardlimit) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_bhardlimit));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_bsoftlimit) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_bsoftlimit));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_bsoftlimit) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_bsoftlimit));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_curspace) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_curspace));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_curspace) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_curspace));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_ihardlimit) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_ihardlimit));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_ihardlimit) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_ihardlimit));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_isoftlimit) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_isoftlimit));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_isoftlimit) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_isoftlimit));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_curinodes) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_curinodes));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_curinodes) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_curinodes));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_btime) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_btime));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_btime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_btime));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_itime) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_itime));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_itime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_itime));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_valid) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_valid));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_valid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_valid));
+ LASSERTF((int)offsetof(struct obd_dqblk, dqb_padding) == 68, "found %lld\n",
+ (long long)(int)offsetof(struct obd_dqblk, dqb_padding));
+ LASSERTF((int)sizeof(((struct obd_dqblk *)0)->dqb_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct obd_dqblk *)0)->dqb_padding));
+ LASSERTF(Q_QUOTACHECK == 0x800100, "found 0x%.8x\n",
+ Q_QUOTACHECK);
+ LASSERTF(Q_INITQUOTA == 0x800101, "found 0x%.8x\n",
+ Q_INITQUOTA);
+ LASSERTF(Q_GETOINFO == 0x800102, "found 0x%.8x\n",
+ Q_GETOINFO);
+ LASSERTF(Q_GETOQUOTA == 0x800103, "found 0x%.8x\n",
+ Q_GETOQUOTA);
+ LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n",
+ Q_FINVALIDATE);
+
+ /* Checks for struct lquota_acct_rec */
+ LASSERTF((int)sizeof(struct lquota_acct_rec) == 16, "found %lld\n",
+ (long long)(int)sizeof(struct lquota_acct_rec));
+ LASSERTF((int)offsetof(struct lquota_acct_rec, bspace) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_acct_rec, bspace));
+ LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->bspace) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_acct_rec *)0)->bspace));
+ LASSERTF((int)offsetof(struct lquota_acct_rec, ispace) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_acct_rec, ispace));
+ LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->ispace) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_acct_rec *)0)->ispace));
+
+ /* Checks for struct lquota_glb_rec */
+ LASSERTF((int)sizeof(struct lquota_glb_rec) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct lquota_glb_rec));
+ LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_hardlimit) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_glb_rec, qbr_hardlimit));
+ LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit));
+ LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_softlimit) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_glb_rec, qbr_softlimit));
+ LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit));
+ LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_time) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_glb_rec, qbr_time));
+ LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_time) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_time));
+ LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_granted) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_glb_rec, qbr_granted));
+ LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted));
+
+ /* Checks for struct lquota_slv_rec */
+ LASSERTF((int)sizeof(struct lquota_slv_rec) == 8, "found %lld\n",
+ (long long)(int)sizeof(struct lquota_slv_rec));
+ LASSERTF((int)offsetof(struct lquota_slv_rec, qsr_granted) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_slv_rec, qsr_granted));
+ LASSERTF((int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted));
+
+ /* Checks for struct idx_info */
+ LASSERTF((int)sizeof(struct idx_info) == 80, "found %lld\n",
+ (long long)(int)sizeof(struct idx_info));
+ LASSERTF((int)offsetof(struct idx_info, ii_magic) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_magic));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_magic) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_magic));
+ LASSERTF((int)offsetof(struct idx_info, ii_flags) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_flags));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_flags));
+ LASSERTF((int)offsetof(struct idx_info, ii_count) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_count));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_count) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_count));
+ LASSERTF((int)offsetof(struct idx_info, ii_pad0) == 10, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_pad0));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad0) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_pad0));
+ LASSERTF((int)offsetof(struct idx_info, ii_attrs) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_attrs));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_attrs) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_attrs));
+ LASSERTF((int)offsetof(struct idx_info, ii_fid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_fid));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_fid));
+ LASSERTF((int)offsetof(struct idx_info, ii_version) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_version));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_version) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_version));
+ LASSERTF((int)offsetof(struct idx_info, ii_hash_start) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_hash_start));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_start) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_start));
+ LASSERTF((int)offsetof(struct idx_info, ii_hash_end) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_hash_end));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_end) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_end));
+ LASSERTF((int)offsetof(struct idx_info, ii_keysize) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_keysize));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_keysize) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_keysize));
+ LASSERTF((int)offsetof(struct idx_info, ii_recsize) == 58, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_recsize));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_recsize) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_recsize));
+ LASSERTF((int)offsetof(struct idx_info, ii_pad1) == 60, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_pad1));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_pad1));
+ LASSERTF((int)offsetof(struct idx_info, ii_pad2) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_pad2));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_pad2));
+ LASSERTF((int)offsetof(struct idx_info, ii_pad3) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct idx_info, ii_pad3));
+ LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad3) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct idx_info *)0)->ii_pad3));
+ CLASSERT(IDX_INFO_MAGIC == 0x3D37CC37);
+
+ /* Checks for struct lu_idxpage */
+ LASSERTF((int)sizeof(struct lu_idxpage) == 16, "found %lld\n",
+ (long long)(int)sizeof(struct lu_idxpage));
+ LASSERTF((int)offsetof(struct lu_idxpage, lip_magic) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lu_idxpage, lip_magic));
+ LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_magic) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_magic));
+ LASSERTF((int)offsetof(struct lu_idxpage, lip_flags) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct lu_idxpage, lip_flags));
+ LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_flags) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_flags));
+ LASSERTF((int)offsetof(struct lu_idxpage, lip_nr) == 6, "found %lld\n",
+ (long long)(int)offsetof(struct lu_idxpage, lip_nr));
+ LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_nr) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_nr));
+ LASSERTF((int)offsetof(struct lu_idxpage, lip_pad0) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lu_idxpage, lip_pad0));
+ LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_pad0) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_pad0));
+ CLASSERT(LIP_MAGIC == 0x8A6D6B6C);
+ LASSERTF(LIP_HDR_SIZE == 16, "found %lld\n",
+ (long long)LIP_HDR_SIZE);
+ LASSERTF(II_FL_NOHASH == 1, "found %lld\n",
+ (long long)II_FL_NOHASH);
+ LASSERTF(II_FL_VARKEY == 2, "found %lld\n",
+ (long long)II_FL_VARKEY);
+ LASSERTF(II_FL_VARREC == 4, "found %lld\n",
+ (long long)II_FL_VARREC);
+ LASSERTF(II_FL_NONUNQ == 8, "found %lld\n",
+ (long long)II_FL_NONUNQ);
+
+ /* Checks for struct niobuf_remote */
+ LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n",
+ (long long)(int)sizeof(struct niobuf_remote));
+ LASSERTF((int)offsetof(struct niobuf_remote, offset) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct niobuf_remote, offset));
+ LASSERTF((int)sizeof(((struct niobuf_remote *)0)->offset) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct niobuf_remote *)0)->offset));
+ LASSERTF((int)offsetof(struct niobuf_remote, len) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct niobuf_remote, len));
+ LASSERTF((int)sizeof(((struct niobuf_remote *)0)->len) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct niobuf_remote *)0)->len));
+ LASSERTF((int)offsetof(struct niobuf_remote, flags) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct niobuf_remote, flags));
+ LASSERTF((int)sizeof(((struct niobuf_remote *)0)->flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct niobuf_remote *)0)->flags));
+ LASSERTF(OBD_BRW_READ == 0x01, "found 0x%.8x\n",
+ OBD_BRW_READ);
+ LASSERTF(OBD_BRW_WRITE == 0x02, "found 0x%.8x\n",
+ OBD_BRW_WRITE);
+ LASSERTF(OBD_BRW_SYNC == 0x08, "found 0x%.8x\n",
+ OBD_BRW_SYNC);
+ LASSERTF(OBD_BRW_CHECK == 0x10, "found 0x%.8x\n",
+ OBD_BRW_CHECK);
+ LASSERTF(OBD_BRW_FROM_GRANT == 0x20, "found 0x%.8x\n",
+ OBD_BRW_FROM_GRANT);
+ LASSERTF(OBD_BRW_GRANTED == 0x40, "found 0x%.8x\n",
+ OBD_BRW_GRANTED);
+ LASSERTF(OBD_BRW_NOCACHE == 0x80, "found 0x%.8x\n",
+ OBD_BRW_NOCACHE);
+ LASSERTF(OBD_BRW_NOQUOTA == 0x100, "found 0x%.8x\n",
+ OBD_BRW_NOQUOTA);
+ LASSERTF(OBD_BRW_SRVLOCK == 0x200, "found 0x%.8x\n",
+ OBD_BRW_SRVLOCK);
+ LASSERTF(OBD_BRW_ASYNC == 0x400, "found 0x%.8x\n",
+ OBD_BRW_ASYNC);
+ LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n",
+ OBD_BRW_MEMALLOC);
+
+ /* Checks for struct ost_body */
+ LASSERTF((int)sizeof(struct ost_body) == 208, "found %lld\n",
+ (long long)(int)sizeof(struct ost_body));
+ LASSERTF((int)offsetof(struct ost_body, oa) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ost_body, oa));
+ LASSERTF((int)sizeof(((struct ost_body *)0)->oa) == 208, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_body *)0)->oa));
+
+ /* Checks for struct ll_fid */
+ LASSERTF((int)sizeof(struct ll_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(struct ll_fid));
+ LASSERTF((int)offsetof(struct ll_fid, id) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fid, id));
+ LASSERTF((int)sizeof(((struct ll_fid *)0)->id) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fid *)0)->id));
+ LASSERTF((int)offsetof(struct ll_fid, generation) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fid, generation));
+ LASSERTF((int)sizeof(((struct ll_fid *)0)->generation) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fid *)0)->generation));
+ LASSERTF((int)offsetof(struct ll_fid, f_type) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fid, f_type));
+ LASSERTF((int)sizeof(((struct ll_fid *)0)->f_type) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fid *)0)->f_type));
+
+ /* Checks for struct mdt_body */
+ LASSERTF((int)sizeof(struct mdt_body) == 216, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_body));
+ LASSERTF((int)offsetof(struct mdt_body, fid1) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, fid1));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->fid1) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->fid1));
+ LASSERTF((int)offsetof(struct mdt_body, fid2) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, fid2));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->fid2) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->fid2));
+ LASSERTF((int)offsetof(struct mdt_body, handle) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, handle));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->handle));
+ LASSERTF((int)offsetof(struct mdt_body, valid) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, valid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->valid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->valid));
+ LASSERTF((int)offsetof(struct mdt_body, size) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, size));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->size));
+ LASSERTF((int)offsetof(struct mdt_body, mtime) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mtime));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mtime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mtime));
+ LASSERTF((int)offsetof(struct mdt_body, atime) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, atime));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->atime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->atime));
+ LASSERTF((int)offsetof(struct mdt_body, ctime) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, ctime));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->ctime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->ctime));
+ LASSERTF((int)offsetof(struct mdt_body, blocks) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, blocks));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->blocks) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->blocks));
+ LASSERTF((int)offsetof(struct mdt_body, unused1) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, unused1));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->unused1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->unused1));
+ LASSERTF((int)offsetof(struct mdt_body, fsuid) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, fsuid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->fsuid));
+ LASSERTF((int)offsetof(struct mdt_body, fsgid) == 108, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, fsgid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->fsgid));
+ LASSERTF((int)offsetof(struct mdt_body, capability) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, capability));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->capability) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->capability));
+ LASSERTF((int)offsetof(struct mdt_body, mode) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, mode));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->mode));
+ LASSERTF((int)offsetof(struct mdt_body, uid) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, uid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->uid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->uid));
+ LASSERTF((int)offsetof(struct mdt_body, gid) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, gid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->gid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->gid));
+ LASSERTF((int)offsetof(struct mdt_body, flags) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, flags));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->flags));
+ LASSERTF((int)offsetof(struct mdt_body, rdev) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, rdev));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->rdev) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->rdev));
+ LASSERTF((int)offsetof(struct mdt_body, nlink) == 136, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, nlink));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->nlink) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->nlink));
+ LASSERTF((int)offsetof(struct mdt_body, unused2) == 140, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, unused2));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->unused2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->unused2));
+ LASSERTF((int)offsetof(struct mdt_body, suppgid) == 144, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, suppgid));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->suppgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->suppgid));
+ LASSERTF((int)offsetof(struct mdt_body, eadatasize) == 148, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, eadatasize));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->eadatasize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->eadatasize));
+ LASSERTF((int)offsetof(struct mdt_body, aclsize) == 152, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, aclsize));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->aclsize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->aclsize));
+ LASSERTF((int)offsetof(struct mdt_body, max_mdsize) == 156, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, max_mdsize));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->max_mdsize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->max_mdsize));
+ LASSERTF((int)offsetof(struct mdt_body, max_cookiesize) == 160, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, max_cookiesize));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->max_cookiesize) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->max_cookiesize));
+ LASSERTF((int)offsetof(struct mdt_body, uid_h) == 164, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, uid_h));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->uid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->uid_h));
+ LASSERTF((int)offsetof(struct mdt_body, gid_h) == 168, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, gid_h));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->gid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->gid_h));
+ LASSERTF((int)offsetof(struct mdt_body, padding_5) == 172, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, padding_5));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_5) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->padding_5));
+ LASSERTF((int)offsetof(struct mdt_body, padding_6) == 176, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, padding_6));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_6) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->padding_6));
+ LASSERTF((int)offsetof(struct mdt_body, padding_7) == 184, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, padding_7));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_7) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->padding_7));
+ LASSERTF((int)offsetof(struct mdt_body, padding_8) == 192, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, padding_8));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_8) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->padding_8));
+ LASSERTF((int)offsetof(struct mdt_body, padding_9) == 200, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, padding_9));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_9) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->padding_9));
+ LASSERTF((int)offsetof(struct mdt_body, padding_10) == 208, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_body, padding_10));
+ LASSERTF((int)sizeof(((struct mdt_body *)0)->padding_10) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_body *)0)->padding_10));
+ LASSERTF(MDS_FMODE_CLOSED == 000000000000UL, "found 0%.11oUL\n",
+ MDS_FMODE_CLOSED);
+ LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n",
+ MDS_FMODE_EXEC);
+ LASSERTF(MDS_FMODE_EPOCH == 000001000000UL, "found 0%.11oUL\n",
+ MDS_FMODE_EPOCH);
+ LASSERTF(MDS_FMODE_TRUNC == 000002000000UL, "found 0%.11oUL\n",
+ MDS_FMODE_TRUNC);
+ LASSERTF(MDS_FMODE_SOM == 000004000000UL, "found 0%.11oUL\n",
+ MDS_FMODE_SOM);
+ LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n",
+ MDS_OPEN_CREATED);
+ LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n",
+ MDS_OPEN_CROSS);
+ LASSERTF(MDS_OPEN_CREAT == 000000000100UL, "found 0%.11oUL\n",
+ MDS_OPEN_CREAT);
+ LASSERTF(MDS_OPEN_EXCL == 000000000200UL, "found 0%.11oUL\n",
+ MDS_OPEN_EXCL);
+ LASSERTF(MDS_OPEN_TRUNC == 000000001000UL, "found 0%.11oUL\n",
+ MDS_OPEN_TRUNC);
+ LASSERTF(MDS_OPEN_APPEND == 000000002000UL, "found 0%.11oUL\n",
+ MDS_OPEN_APPEND);
+ LASSERTF(MDS_OPEN_SYNC == 000000010000UL, "found 0%.11oUL\n",
+ MDS_OPEN_SYNC);
+ LASSERTF(MDS_OPEN_DIRECTORY == 000000200000UL, "found 0%.11oUL\n",
+ MDS_OPEN_DIRECTORY);
+ LASSERTF(MDS_OPEN_BY_FID == 000040000000UL, "found 0%.11oUL\n",
+ MDS_OPEN_BY_FID);
+ LASSERTF(MDS_OPEN_DELAY_CREATE == 000100000000UL, "found 0%.11oUL\n",
+ MDS_OPEN_DELAY_CREATE);
+ LASSERTF(MDS_OPEN_OWNEROVERRIDE == 000200000000UL, "found 0%.11oUL\n",
+ MDS_OPEN_OWNEROVERRIDE);
+ LASSERTF(MDS_OPEN_JOIN_FILE == 000400000000UL, "found 0%.11oUL\n",
+ MDS_OPEN_JOIN_FILE);
+ LASSERTF(MDS_OPEN_LOCK == 004000000000UL, "found 0%.11oUL\n",
+ MDS_OPEN_LOCK);
+ LASSERTF(MDS_OPEN_HAS_EA == 010000000000UL, "found 0%.11oUL\n",
+ MDS_OPEN_HAS_EA);
+ LASSERTF(MDS_OPEN_HAS_OBJS == 020000000000UL, "found 0%.11oUL\n",
+ MDS_OPEN_HAS_OBJS);
+ LASSERTF(MDS_OPEN_NORESTORE == 00000000000100000000000ULL, "found 0%.22lloULL\n",
+ (long long)MDS_OPEN_NORESTORE);
+ LASSERTF(MDS_OPEN_NEWSTRIPE == 00000000000200000000000ULL, "found 0%.22lloULL\n",
+ (long long)MDS_OPEN_NEWSTRIPE);
+ LASSERTF(MDS_OPEN_VOLATILE == 00000000000400000000000ULL, "found 0%.22lloULL\n",
+ (long long)MDS_OPEN_VOLATILE);
+ LASSERTF(LUSTRE_SYNC_FL == 0x00000008, "found 0x%.8x\n",
+ LUSTRE_SYNC_FL);
+ LASSERTF(LUSTRE_IMMUTABLE_FL == 0x00000010, "found 0x%.8x\n",
+ LUSTRE_IMMUTABLE_FL);
+ LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n",
+ LUSTRE_APPEND_FL);
+ LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n",
+ LUSTRE_NOATIME_FL);
+ LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n",
+ LUSTRE_DIRSYNC_FL);
+ LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n",
+ MDS_INODELOCK_LOOKUP);
+ LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n",
+ MDS_INODELOCK_UPDATE);
+ LASSERTF(MDS_INODELOCK_OPEN == 0x000004, "found 0x%.8x\n",
+ MDS_INODELOCK_OPEN);
+ LASSERTF(MDS_INODELOCK_LAYOUT == 0x000008, "found 0x%.8x\n",
+ MDS_INODELOCK_LAYOUT);
+
+ /* Checks for struct mdt_ioepoch */
+ LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_ioepoch));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, handle) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, handle));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->handle));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, ioepoch) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, ioepoch));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->ioepoch) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->ioepoch));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, flags) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, flags));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->flags));
+ LASSERTF((int)offsetof(struct mdt_ioepoch, padding) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_ioepoch, padding));
+ LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding));
+
+ /* Checks for struct mdt_remote_perm */
+ LASSERTF((int)sizeof(struct mdt_remote_perm) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_remote_perm));
+ LASSERTF((int)offsetof(struct mdt_remote_perm, rp_uid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_remote_perm, rp_uid));
+ LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_uid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_uid));
+ LASSERTF((int)offsetof(struct mdt_remote_perm, rp_gid) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_remote_perm, rp_gid));
+ LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_gid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_gid));
+ LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsuid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_remote_perm, rp_fsuid));
+ LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid));
+ LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsgid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_remote_perm, rp_fsgid));
+ LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid));
+ LASSERTF((int)offsetof(struct mdt_remote_perm, rp_access_perm) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_remote_perm, rp_access_perm));
+ LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm));
+ LASSERTF((int)offsetof(struct mdt_remote_perm, rp_padding) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_remote_perm, rp_padding));
+ LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_padding));
+ LASSERTF(CFS_SETUID_PERM == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)CFS_SETUID_PERM);
+ LASSERTF(CFS_SETGID_PERM == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)CFS_SETGID_PERM);
+ LASSERTF(CFS_SETGRP_PERM == 0x00000004UL, "found 0x%.8xUL\n",
+ (unsigned)CFS_SETGRP_PERM);
+ LASSERTF(CFS_RMTACL_PERM == 0x00000008UL, "found 0x%.8xUL\n",
+ (unsigned)CFS_RMTACL_PERM);
+ LASSERTF(CFS_RMTOWN_PERM == 0x00000010UL, "found 0x%.8xUL\n",
+ (unsigned)CFS_RMTOWN_PERM);
+
+ /* Checks for struct mdt_rec_setattr */
+ LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_rec_setattr));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_opcode) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_opcode));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_opcode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_opcode));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_cap) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_cap));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_cap) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_cap));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsuid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsuid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsuid_h) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsuid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsuid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsgid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsgid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fsgid_h) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_fsgid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fsgid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_suppgid) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_suppgid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_suppgid_h) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_suppgid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_suppgid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_1) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_1));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_1_h) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_1_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_1_h));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_fid) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_fid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_fid));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_valid) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_valid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_valid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_valid));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_uid) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_uid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_uid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_uid));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_gid) == 68, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_gid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_gid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_gid));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_size) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_size));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_size));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_blocks) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_blocks));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_blocks) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_blocks));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_mtime) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_mtime));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_mtime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_mtime));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_atime) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_atime));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_atime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_atime));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_ctime) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_ctime));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_ctime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_ctime));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_attr_flags) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_attr_flags));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_attr_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_attr_flags));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_mode) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_mode));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_mode));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_bias) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_bias));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_bias) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_bias));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_3) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_3));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_3) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_3));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_4) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_4));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_4) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_4));
+ LASSERTF((int)offsetof(struct mdt_rec_setattr, sa_padding_5) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setattr, sa_padding_5));
+ LASSERTF((int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_5) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setattr *)0)->sa_padding_5));
+
+ /* Checks for struct mdt_rec_create */
+ LASSERTF((int)sizeof(struct mdt_rec_create) == 136, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_rec_create));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_opcode) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_opcode));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_opcode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_opcode));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_cap) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_cap));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_cap) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_cap));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsuid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_fsuid));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsuid_h) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_fsuid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsuid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsgid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_fsgid));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_fsgid_h) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_fsgid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fsgid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid1) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid1_h) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid1_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid1_h));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid2) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_suppgid2_h) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_suppgid2_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_suppgid2_h));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_fid1) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_fid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fid1) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fid1));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_fid2) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_fid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_fid2) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_fid2));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_old_handle) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_old_handle));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_old_handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_old_handle));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_time) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_time));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_time) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_time));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_rdev) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_rdev));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_rdev) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_rdev));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_ioepoch) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_ioepoch));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_ioepoch) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_ioepoch));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_padding_1) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_padding_1));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_padding_1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_padding_1));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_mode) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_mode));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_mode));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_bias) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_bias));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_bias) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_bias));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_flags_l) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_flags_l));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_flags_l) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_flags_l));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_flags_h) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_flags_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_flags_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_flags_h));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_umask) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_umask));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_umask) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_umask));
+ LASSERTF((int)offsetof(struct mdt_rec_create, cr_padding_4) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_create, cr_padding_4));
+ LASSERTF((int)sizeof(((struct mdt_rec_create *)0)->cr_padding_4) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_create *)0)->cr_padding_4));
+
+ /* Checks for struct mdt_rec_link */
+ LASSERTF((int)sizeof(struct mdt_rec_link) == 136, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_rec_link));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_opcode) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_opcode));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_opcode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_opcode));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_cap) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_cap));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_cap) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_cap));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsuid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_fsuid));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsuid_h) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_fsuid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsuid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsgid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_fsgid));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_fsgid_h) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_fsgid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fsgid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid1) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid1_h) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid1_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid1_h));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid2) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_suppgid2_h) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_suppgid2_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_suppgid2_h));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_fid1) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_fid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fid1) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fid1));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_fid2) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_fid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_fid2) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_fid2));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_time) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_time));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_time) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_time));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_1) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_padding_1));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_1));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_2) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_padding_2));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_2));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_3) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_padding_3));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_3) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_3));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_4) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_padding_4));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_4) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_4));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_bias) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_bias));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_bias) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_bias));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_5) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_padding_5));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_5) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_5));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_6) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_padding_6));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_6) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_6));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_7) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_padding_7));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_7) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_7));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_8) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_padding_8));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_8) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_8));
+ LASSERTF((int)offsetof(struct mdt_rec_link, lk_padding_9) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_link, lk_padding_9));
+ LASSERTF((int)sizeof(((struct mdt_rec_link *)0)->lk_padding_9) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_link *)0)->lk_padding_9));
+
+ /* Checks for struct mdt_rec_unlink */
+ LASSERTF((int)sizeof(struct mdt_rec_unlink) == 136, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_rec_unlink));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_opcode) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_opcode));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_opcode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_opcode));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_cap) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_cap));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_cap) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_cap));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsuid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsuid));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsuid_h) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsuid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsuid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsgid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsgid));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fsgid_h) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_fsgid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fsgid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid1) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid1_h) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid1_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid1_h));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid2) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_suppgid2_h) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_suppgid2_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_suppgid2_h));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fid1) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_fid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid1) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid1));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_fid2) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_fid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid2) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_fid2));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_time) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_time));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_time) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_time));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_2) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_2));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_2));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_3) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_3));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_3) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_3));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_4) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_4));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_4) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_4));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_5) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_5));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_5) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_5));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_bias) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_bias));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_bias) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_bias));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_mode) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_mode));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_mode));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_6) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_6));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_6) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_6));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_7) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_7));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_7) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_7));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_8) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_8));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_8) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_8));
+ LASSERTF((int)offsetof(struct mdt_rec_unlink, ul_padding_9) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_unlink, ul_padding_9));
+ LASSERTF((int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_9) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_unlink *)0)->ul_padding_9));
+
+ /* Checks for struct mdt_rec_rename */
+ LASSERTF((int)sizeof(struct mdt_rec_rename) == 136, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_rec_rename));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_opcode) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_opcode));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_opcode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_opcode));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_cap) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_cap));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_cap) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_cap));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsuid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_fsuid));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsuid_h) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_fsuid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsuid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsgid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_fsgid));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fsgid_h) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_fsgid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fsgid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid1) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid1_h) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid1_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid1_h));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid2) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_suppgid2_h) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_suppgid2_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_suppgid2_h));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fid1) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_fid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fid1) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fid1));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_fid2) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_fid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_fid2) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_fid2));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_time) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_time));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_time) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_time));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_1) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_1));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_1));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_2) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_2));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_2));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_3) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_3));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_3) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_3));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_4) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_4));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_4) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_4));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_bias) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_bias));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_bias) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_bias));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_mode) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_mode));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_mode));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_5) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_5));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_5) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_5));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_6) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_6));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_6) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_6));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_7) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_7));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_7) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_7));
+ LASSERTF((int)offsetof(struct mdt_rec_rename, rn_padding_8) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_rename, rn_padding_8));
+ LASSERTF((int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_8) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_rename *)0)->rn_padding_8));
+
+ /* Checks for struct mdt_rec_setxattr */
+ LASSERTF((int)sizeof(struct mdt_rec_setxattr) == 136, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_rec_setxattr));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_opcode) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_opcode));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_opcode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_opcode));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_cap) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_cap));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_cap) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_cap));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsuid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsuid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsuid_h) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsuid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsuid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsgid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsgid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fsgid_h) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fsgid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fsgid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid1) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid1_h) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid1_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid1_h));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid2) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_suppgid2_h) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_suppgid2_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_suppgid2_h));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_fid) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_fid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_fid));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_1) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_1));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_1));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_2) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_2));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_2));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_3) == 68, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_3));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_3) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_3));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_valid) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_valid));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_valid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_valid));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_time) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_time));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_time) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_time));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_5) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_5));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_5) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_5));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_6) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_6));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_6) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_6));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_7) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_7));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_7) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_7));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_size) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_size));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_size) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_size));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_flags) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_flags));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_flags));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_8) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_8));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_8) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_8));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_9) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_9));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_9) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_9));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_10) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_10));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_10) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_10));
+ LASSERTF((int)offsetof(struct mdt_rec_setxattr, sx_padding_11) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_setxattr, sx_padding_11));
+ LASSERTF((int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_11) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_setxattr *)0)->sx_padding_11));
+
+ /* Checks for struct mdt_rec_reint */
+ LASSERTF((int)sizeof(struct mdt_rec_reint) == 136, "found %lld\n",
+ (long long)(int)sizeof(struct mdt_rec_reint));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_opcode) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_opcode));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_opcode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_opcode));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_cap) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_cap));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_cap) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_cap));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsuid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_fsuid));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsuid_h) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_fsuid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsuid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsgid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_fsgid));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fsgid_h) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_fsgid_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fsgid_h));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid1) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid1_h) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid1_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid1_h));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid2) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_suppgid2_h) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_suppgid2_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_suppgid2_h));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fid1) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_fid1));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fid1) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fid1));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_fid2) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_fid2));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_fid2) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_fid2));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_mtime) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_mtime));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_mtime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_mtime));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_atime) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_atime));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_atime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_atime));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_ctime) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_ctime));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_ctime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_ctime));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_size) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_size));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_size));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_blocks) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_blocks));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_blocks) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_blocks));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_bias) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_bias));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_bias) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_bias));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_mode) == 116, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_mode));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_mode));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_flags) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_flags));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_flags));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_flags_h) == 124, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_flags_h));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_flags_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_flags_h));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_umask) == 128, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_umask));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_umask) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_umask));
+ LASSERTF((int)offsetof(struct mdt_rec_reint, rr_padding_4) == 132, "found %lld\n",
+ (long long)(int)offsetof(struct mdt_rec_reint, rr_padding_4));
+ LASSERTF((int)sizeof(((struct mdt_rec_reint *)0)->rr_padding_4) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mdt_rec_reint *)0)->rr_padding_4));
+
+ /* Checks for struct lmv_desc */
+ LASSERTF((int)sizeof(struct lmv_desc) == 88, "found %lld\n",
+ (long long)(int)sizeof(struct lmv_desc));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_tgt_count) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_tgt_count));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_tgt_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_tgt_count));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_active_tgt_count) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_active_tgt_count));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_active_tgt_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_active_tgt_count));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_default_stripe_count) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_default_stripe_count));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_default_stripe_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_default_stripe_count));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_pattern) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_pattern));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_pattern) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_pattern));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_default_hash_size) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_default_hash_size));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_default_hash_size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_default_hash_size));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_padding_1) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_padding_1));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_1));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_padding_2) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_padding_2));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_2));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_qos_maxage) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_qos_maxage));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_qos_maxage) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_qos_maxage));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_padding_3) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_padding_3));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_3) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_3));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_padding_4) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_padding_4));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_padding_4) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_padding_4));
+ LASSERTF((int)offsetof(struct lmv_desc, ld_uuid) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_desc, ld_uuid));
+ LASSERTF((int)sizeof(((struct lmv_desc *)0)->ld_uuid) == 40, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_desc *)0)->ld_uuid));
+
+ /* Checks for struct lmv_stripe_md */
+ LASSERTF((int)sizeof(struct lmv_stripe_md) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct lmv_stripe_md));
+ LASSERTF((int)offsetof(struct lmv_stripe_md, mea_magic) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_stripe_md, mea_magic));
+ LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_magic) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_magic));
+ LASSERTF((int)offsetof(struct lmv_stripe_md, mea_count) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_stripe_md, mea_count));
+ LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_count));
+ LASSERTF((int)offsetof(struct lmv_stripe_md, mea_master) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_stripe_md, mea_master));
+ LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_master) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_master));
+ LASSERTF((int)offsetof(struct lmv_stripe_md, mea_padding) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_stripe_md, mea_padding));
+ LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_padding));
+ CLASSERT(LOV_MAXPOOLNAME == 16);
+ LASSERTF((int)offsetof(struct lmv_stripe_md, mea_pool_name[16]) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_stripe_md, mea_pool_name[16]));
+ LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_pool_name[16]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_pool_name[16]));
+ LASSERTF((int)offsetof(struct lmv_stripe_md, mea_ids[0]) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lmv_stripe_md, mea_ids[0]));
+ LASSERTF((int)sizeof(((struct lmv_stripe_md *)0)->mea_ids[0]) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct lmv_stripe_md *)0)->mea_ids[0]));
+
+ /* Checks for struct lov_desc */
+ LASSERTF((int)sizeof(struct lov_desc) == 88, "found %lld\n",
+ (long long)(int)sizeof(struct lov_desc));
+ LASSERTF((int)offsetof(struct lov_desc, ld_tgt_count) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_tgt_count));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_tgt_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_tgt_count));
+ LASSERTF((int)offsetof(struct lov_desc, ld_active_tgt_count) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_active_tgt_count));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_active_tgt_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_active_tgt_count));
+ LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_count) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_default_stripe_count));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_count));
+ LASSERTF((int)offsetof(struct lov_desc, ld_pattern) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_pattern));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_pattern) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_pattern));
+ LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_size) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_default_stripe_size));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_size));
+ LASSERTF((int)offsetof(struct lov_desc, ld_default_stripe_offset) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_default_stripe_offset));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_default_stripe_offset) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_default_stripe_offset));
+ LASSERTF((int)offsetof(struct lov_desc, ld_padding_0) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_padding_0));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_0) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_0));
+ LASSERTF((int)offsetof(struct lov_desc, ld_qos_maxage) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_qos_maxage));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_qos_maxage) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_qos_maxage));
+ LASSERTF((int)offsetof(struct lov_desc, ld_padding_1) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_padding_1));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_1));
+ LASSERTF((int)offsetof(struct lov_desc, ld_padding_2) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_padding_2));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_padding_2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_padding_2));
+ LASSERTF((int)offsetof(struct lov_desc, ld_uuid) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct lov_desc, ld_uuid));
+ LASSERTF((int)sizeof(((struct lov_desc *)0)->ld_uuid) == 40, "found %lld\n",
+ (long long)(int)sizeof(((struct lov_desc *)0)->ld_uuid));
+ CLASSERT(LOV_DESC_MAGIC == 0xB0CCDE5C);
+
+ /* Checks for struct ldlm_res_id */
+ LASSERTF((int)sizeof(struct ldlm_res_id) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_res_id));
+ CLASSERT(RES_NAME_SIZE == 4);
+ LASSERTF((int)offsetof(struct ldlm_res_id, name[4]) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_res_id, name[4]));
+ LASSERTF((int)sizeof(((struct ldlm_res_id *)0)->name[4]) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_res_id *)0)->name[4]));
+
+ /* Checks for struct ldlm_extent */
+ LASSERTF((int)sizeof(struct ldlm_extent) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_extent));
+ LASSERTF((int)offsetof(struct ldlm_extent, start) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_extent, start));
+ LASSERTF((int)sizeof(((struct ldlm_extent *)0)->start) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_extent *)0)->start));
+ LASSERTF((int)offsetof(struct ldlm_extent, end) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_extent, end));
+ LASSERTF((int)sizeof(((struct ldlm_extent *)0)->end) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_extent *)0)->end));
+ LASSERTF((int)offsetof(struct ldlm_extent, gid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_extent, gid));
+ LASSERTF((int)sizeof(((struct ldlm_extent *)0)->gid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_extent *)0)->gid));
+
+ /* Checks for struct ldlm_inodebits */
+ LASSERTF((int)sizeof(struct ldlm_inodebits) == 8, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_inodebits));
+ LASSERTF((int)offsetof(struct ldlm_inodebits, bits) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_inodebits, bits));
+ LASSERTF((int)sizeof(((struct ldlm_inodebits *)0)->bits) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_inodebits *)0)->bits));
+
+ /* Checks for struct ldlm_flock_wire */
+ LASSERTF((int)sizeof(struct ldlm_flock_wire) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_flock_wire));
+ LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_start) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_flock_wire, lfw_start));
+ LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_start) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_start));
+ LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_end) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_flock_wire, lfw_end));
+ LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_end) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_end));
+ LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_owner) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_flock_wire, lfw_owner));
+ LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_owner) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_owner));
+ LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_padding) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_flock_wire, lfw_padding));
+ LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_padding));
+ LASSERTF((int)offsetof(struct ldlm_flock_wire, lfw_pid) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_flock_wire, lfw_pid));
+ LASSERTF((int)sizeof(((struct ldlm_flock_wire *)0)->lfw_pid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_flock_wire *)0)->lfw_pid));
+
+ /* Checks for struct ldlm_intent */
+ LASSERTF((int)sizeof(struct ldlm_intent) == 8, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_intent));
+ LASSERTF((int)offsetof(struct ldlm_intent, opc) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_intent, opc));
+ LASSERTF((int)sizeof(((struct ldlm_intent *)0)->opc) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_intent *)0)->opc));
+
+ /* Checks for struct ldlm_resource_desc */
+ LASSERTF((int)sizeof(struct ldlm_resource_desc) == 40, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_resource_desc));
+ LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_type) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_resource_desc, lr_type));
+ LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_type) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_type));
+ LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_padding) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_resource_desc, lr_padding));
+ LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_padding));
+ LASSERTF((int)offsetof(struct ldlm_resource_desc, lr_name) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_resource_desc, lr_name));
+ LASSERTF((int)sizeof(((struct ldlm_resource_desc *)0)->lr_name) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_resource_desc *)0)->lr_name));
+
+ /* Checks for struct ldlm_lock_desc */
+ LASSERTF((int)sizeof(struct ldlm_lock_desc) == 80, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_lock_desc));
+ LASSERTF((int)offsetof(struct ldlm_lock_desc, l_resource) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_lock_desc, l_resource));
+ LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_resource) == 40, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_resource));
+ LASSERTF((int)offsetof(struct ldlm_lock_desc, l_req_mode) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_lock_desc, l_req_mode));
+ LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_req_mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_req_mode));
+ LASSERTF((int)offsetof(struct ldlm_lock_desc, l_granted_mode) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_lock_desc, l_granted_mode));
+ LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_granted_mode) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_granted_mode));
+ LASSERTF((int)offsetof(struct ldlm_lock_desc, l_policy_data) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_lock_desc, l_policy_data));
+ LASSERTF((int)sizeof(((struct ldlm_lock_desc *)0)->l_policy_data) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_lock_desc *)0)->l_policy_data));
+
+ /* Checks for struct ldlm_request */
+ LASSERTF((int)sizeof(struct ldlm_request) == 104, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_request));
+ LASSERTF((int)offsetof(struct ldlm_request, lock_flags) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_request, lock_flags));
+ LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_request *)0)->lock_flags));
+ LASSERTF((int)offsetof(struct ldlm_request, lock_count) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_request, lock_count));
+ LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_request *)0)->lock_count));
+ LASSERTF((int)offsetof(struct ldlm_request, lock_desc) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_request, lock_desc));
+ LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_desc) == 80, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_request *)0)->lock_desc));
+ LASSERTF((int)offsetof(struct ldlm_request, lock_handle) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_request, lock_handle));
+ LASSERTF((int)sizeof(((struct ldlm_request *)0)->lock_handle) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_request *)0)->lock_handle));
+
+ /* Checks for struct ldlm_reply */
+ LASSERTF((int)sizeof(struct ldlm_reply) == 112, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_reply));
+ LASSERTF((int)offsetof(struct ldlm_reply, lock_flags) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_reply, lock_flags));
+ LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_flags));
+ LASSERTF((int)offsetof(struct ldlm_reply, lock_padding) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_reply, lock_padding));
+ LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_padding));
+ LASSERTF((int)offsetof(struct ldlm_reply, lock_desc) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_reply, lock_desc));
+ LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_desc) == 80, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_desc));
+ LASSERTF((int)offsetof(struct ldlm_reply, lock_handle) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_reply, lock_handle));
+ LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_handle) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_handle));
+ LASSERTF((int)offsetof(struct ldlm_reply, lock_policy_res1) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_reply, lock_policy_res1));
+ LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_policy_res1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_policy_res1));
+ LASSERTF((int)offsetof(struct ldlm_reply, lock_policy_res2) == 104, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_reply, lock_policy_res2));
+ LASSERTF((int)sizeof(((struct ldlm_reply *)0)->lock_policy_res2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_reply *)0)->lock_policy_res2));
+
+ /* Checks for struct ost_lvb_v1 */
+ LASSERTF((int)sizeof(struct ost_lvb_v1) == 40, "found %lld\n",
+ (long long)(int)sizeof(struct ost_lvb_v1));
+ LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_size) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb_v1, lvb_size));
+ LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_size));
+ LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_mtime) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb_v1, lvb_mtime));
+ LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_mtime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_mtime));
+ LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_atime) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb_v1, lvb_atime));
+ LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_atime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_atime));
+ LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_ctime) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb_v1, lvb_ctime));
+ LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_ctime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_ctime));
+ LASSERTF((int)offsetof(struct ost_lvb_v1, lvb_blocks) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb_v1, lvb_blocks));
+ LASSERTF((int)sizeof(((struct ost_lvb_v1 *)0)->lvb_blocks) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb_v1 *)0)->lvb_blocks));
+
+ /* Checks for struct ost_lvb */
+ LASSERTF((int)sizeof(struct ost_lvb) == 56, "found %lld\n",
+ (long long)(int)sizeof(struct ost_lvb));
+ LASSERTF((int)offsetof(struct ost_lvb, lvb_size) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb, lvb_size));
+ LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_size) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_size));
+ LASSERTF((int)offsetof(struct ost_lvb, lvb_mtime) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb, lvb_mtime));
+ LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_mtime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_mtime));
+ LASSERTF((int)offsetof(struct ost_lvb, lvb_atime) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb, lvb_atime));
+ LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_atime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_atime));
+ LASSERTF((int)offsetof(struct ost_lvb, lvb_ctime) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb, lvb_ctime));
+ LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_ctime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_ctime));
+ LASSERTF((int)offsetof(struct ost_lvb, lvb_blocks) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb, lvb_blocks));
+ LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_blocks) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_blocks));
+ LASSERTF((int)offsetof(struct ost_lvb, lvb_mtime_ns) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb, lvb_mtime_ns));
+ LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_mtime_ns) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_mtime_ns));
+ LASSERTF((int)offsetof(struct ost_lvb, lvb_atime_ns) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb, lvb_atime_ns));
+ LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_atime_ns) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_atime_ns));
+ LASSERTF((int)offsetof(struct ost_lvb, lvb_ctime_ns) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb, lvb_ctime_ns));
+ LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_ctime_ns) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_ctime_ns));
+ LASSERTF((int)offsetof(struct ost_lvb, lvb_padding) == 52, "found %lld\n",
+ (long long)(int)offsetof(struct ost_lvb, lvb_padding));
+ LASSERTF((int)sizeof(((struct ost_lvb *)0)->lvb_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ost_lvb *)0)->lvb_padding));
+
+ /* Checks for struct lquota_lvb */
+ LASSERTF((int)sizeof(struct lquota_lvb) == 40, "found %lld\n",
+ (long long)(int)sizeof(struct lquota_lvb));
+ LASSERTF((int)offsetof(struct lquota_lvb, lvb_flags) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_lvb, lvb_flags));
+ LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_flags) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_flags));
+ LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_may_rel) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_lvb, lvb_id_may_rel));
+ LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_may_rel) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_may_rel));
+ LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_rel) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_lvb, lvb_id_rel));
+ LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_rel) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_rel));
+ LASSERTF((int)offsetof(struct lquota_lvb, lvb_id_qunit) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_lvb, lvb_id_qunit));
+ LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_id_qunit) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_id_qunit));
+ LASSERTF((int)offsetof(struct lquota_lvb, lvb_pad1) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lquota_lvb, lvb_pad1));
+ LASSERTF((int)sizeof(((struct lquota_lvb *)0)->lvb_pad1) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lquota_lvb *)0)->lvb_pad1));
+ LASSERTF(LQUOTA_FL_EDQUOT == 1, "found %lld\n",
+ (long long)LQUOTA_FL_EDQUOT);
+
+ /* Checks for struct ldlm_gl_lquota_desc */
+ LASSERTF((int)sizeof(struct ldlm_gl_lquota_desc) == 64, "found %lld\n",
+ (long long)(int)sizeof(struct ldlm_gl_lquota_desc));
+ LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_id) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_id));
+ LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_id) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_id));
+ LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_flags) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_flags));
+ LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_flags) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_flags));
+ LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_ver) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_ver));
+ LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_ver) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_ver));
+ LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_hardlimit) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_hardlimit));
+ LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_hardlimit) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_hardlimit));
+ LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_softlimit) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_softlimit));
+ LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_softlimit) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_softlimit));
+ LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_time) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_time));
+ LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_time) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_time));
+ LASSERTF((int)offsetof(struct ldlm_gl_lquota_desc, gl_pad2) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct ldlm_gl_lquota_desc, gl_pad2));
+ LASSERTF((int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_pad2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ldlm_gl_lquota_desc *)0)->gl_pad2));
+
+ /* Checks for struct mgs_send_param */
+ LASSERTF((int)sizeof(struct mgs_send_param) == 1024, "found %lld\n",
+ (long long)(int)sizeof(struct mgs_send_param));
+ CLASSERT(MGS_PARAM_MAXLEN == 1024);
+ LASSERTF((int)offsetof(struct mgs_send_param, mgs_param[1024]) == 1024, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_send_param, mgs_param[1024]));
+ LASSERTF((int)sizeof(((struct mgs_send_param *)0)->mgs_param[1024]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_send_param *)0)->mgs_param[1024]));
+
+ /* Checks for struct cfg_marker */
+ LASSERTF((int)sizeof(struct cfg_marker) == 160, "found %lld\n",
+ (long long)(int)sizeof(struct cfg_marker));
+ LASSERTF((int)offsetof(struct cfg_marker, cm_step) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct cfg_marker, cm_step));
+ LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_step) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct cfg_marker *)0)->cm_step));
+ LASSERTF((int)offsetof(struct cfg_marker, cm_flags) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct cfg_marker, cm_flags));
+ LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct cfg_marker *)0)->cm_flags));
+ LASSERTF((int)offsetof(struct cfg_marker, cm_vers) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct cfg_marker, cm_vers));
+ LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_vers) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct cfg_marker *)0)->cm_vers));
+ LASSERTF((int)offsetof(struct cfg_marker, cm_padding) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct cfg_marker, cm_padding));
+ LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct cfg_marker *)0)->cm_padding));
+ LASSERTF((int)offsetof(struct cfg_marker, cm_createtime) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct cfg_marker, cm_createtime));
+ LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_createtime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct cfg_marker *)0)->cm_createtime));
+ LASSERTF((int)offsetof(struct cfg_marker, cm_canceltime) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct cfg_marker, cm_canceltime));
+ LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_canceltime) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct cfg_marker *)0)->cm_canceltime));
+ LASSERTF((int)offsetof(struct cfg_marker, cm_tgtname) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct cfg_marker, cm_tgtname));
+ LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_tgtname) == 64, "found %lld\n",
+ (long long)(int)sizeof(((struct cfg_marker *)0)->cm_tgtname));
+ LASSERTF((int)offsetof(struct cfg_marker, cm_comment) == 96, "found %lld\n",
+ (long long)(int)offsetof(struct cfg_marker, cm_comment));
+ LASSERTF((int)sizeof(((struct cfg_marker *)0)->cm_comment) == 64, "found %lld\n",
+ (long long)(int)sizeof(((struct cfg_marker *)0)->cm_comment));
+
+ /* Checks for struct llog_logid */
+ LASSERTF((int)sizeof(struct llog_logid) == 20, "found %lld\n",
+ (long long)(int)sizeof(struct llog_logid));
+ LASSERTF((int)offsetof(struct llog_logid, lgl_oi) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_logid, lgl_oi));
+ LASSERTF((int)sizeof(((struct llog_logid *)0)->lgl_oi) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_logid *)0)->lgl_oi));
+ LASSERTF((int)offsetof(struct llog_logid, lgl_ogen) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_logid, lgl_ogen));
+ LASSERTF((int)sizeof(((struct llog_logid *)0)->lgl_ogen) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_logid *)0)->lgl_ogen));
+ CLASSERT(OST_SZ_REC == 274730752);
+ CLASSERT(MDS_UNLINK_REC == 274801668);
+ CLASSERT(MDS_UNLINK64_REC == 275325956);
+ CLASSERT(MDS_SETATTR64_REC == 275325953);
+ CLASSERT(OBD_CFG_REC == 274857984);
+ CLASSERT(LLOG_GEN_REC == 274989056);
+ CLASSERT(CHANGELOG_REC == 275120128);
+ CLASSERT(CHANGELOG_USER_REC == 275185664);
+ CLASSERT(LLOG_HDR_MAGIC == 275010873);
+ CLASSERT(LLOG_LOGID_MAGIC == 275010875);
+
+ /* Checks for struct llog_catid */
+ LASSERTF((int)sizeof(struct llog_catid) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct llog_catid));
+ LASSERTF((int)offsetof(struct llog_catid, lci_logid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_catid, lci_logid));
+ LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_logid) == 20, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_catid *)0)->lci_logid));
+ LASSERTF((int)offsetof(struct llog_catid, lci_padding1) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct llog_catid, lci_padding1));
+ LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding1));
+ LASSERTF((int)offsetof(struct llog_catid, lci_padding2) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct llog_catid, lci_padding2));
+ LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding2));
+ LASSERTF((int)offsetof(struct llog_catid, lci_padding3) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct llog_catid, lci_padding3));
+ LASSERTF((int)sizeof(((struct llog_catid *)0)->lci_padding3) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_catid *)0)->lci_padding3));
+
+ /* Checks for struct llog_rec_hdr */
+ LASSERTF((int)sizeof(struct llog_rec_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(struct llog_rec_hdr));
+ LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_len) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_rec_hdr, lrh_len));
+ LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_len) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_len));
+ LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_index) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct llog_rec_hdr, lrh_index));
+ LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_index) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_index));
+ LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_type) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct llog_rec_hdr, lrh_type));
+ LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_type) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_type));
+ LASSERTF((int)offsetof(struct llog_rec_hdr, lrh_id) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct llog_rec_hdr, lrh_id));
+ LASSERTF((int)sizeof(((struct llog_rec_hdr *)0)->lrh_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_rec_hdr *)0)->lrh_id));
+
+ /* Checks for struct llog_rec_tail */
+ LASSERTF((int)sizeof(struct llog_rec_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(struct llog_rec_tail));
+ LASSERTF((int)offsetof(struct llog_rec_tail, lrt_len) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_rec_tail, lrt_len));
+ LASSERTF((int)sizeof(((struct llog_rec_tail *)0)->lrt_len) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_rec_tail *)0)->lrt_len));
+ LASSERTF((int)offsetof(struct llog_rec_tail, lrt_index) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct llog_rec_tail, lrt_index));
+ LASSERTF((int)sizeof(((struct llog_rec_tail *)0)->lrt_index) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_rec_tail *)0)->lrt_index));
+
+ /* Checks for struct llog_logid_rec */
+ LASSERTF((int)sizeof(struct llog_logid_rec) == 64, "found %lld\n",
+ (long long)(int)sizeof(struct llog_logid_rec));
+ LASSERTF((int)offsetof(struct llog_logid_rec, lid_hdr) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_logid_rec, lid_hdr));
+ LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_hdr));
+ LASSERTF((int)offsetof(struct llog_logid_rec, lid_id) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_logid_rec, lid_id));
+ LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_id) == 20, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_id));
+ LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding1) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct llog_logid_rec, lid_padding1));
+ LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding1));
+ LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding2) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct llog_logid_rec, lid_padding2));
+ LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding2));
+ LASSERTF((int)offsetof(struct llog_logid_rec, lid_padding3) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct llog_logid_rec, lid_padding3));
+ LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_padding3) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_padding3));
+ LASSERTF((int)offsetof(struct llog_logid_rec, lid_tail) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct llog_logid_rec, lid_tail));
+ LASSERTF((int)sizeof(((struct llog_logid_rec *)0)->lid_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_logid_rec *)0)->lid_tail));
+
+ /* Checks for struct llog_unlink_rec */
+ LASSERTF((int)sizeof(struct llog_unlink_rec) == 40, "found %lld\n",
+ (long long)(int)sizeof(struct llog_unlink_rec));
+ LASSERTF((int)offsetof(struct llog_unlink_rec, lur_hdr) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink_rec, lur_hdr));
+ LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_hdr));
+ LASSERTF((int)offsetof(struct llog_unlink_rec, lur_oid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink_rec, lur_oid));
+ LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_oid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_oid));
+ LASSERTF((int)offsetof(struct llog_unlink_rec, lur_oseq) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink_rec, lur_oseq));
+ LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_oseq) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_oseq));
+ LASSERTF((int)offsetof(struct llog_unlink_rec, lur_count) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink_rec, lur_count));
+ LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_count));
+ LASSERTF((int)offsetof(struct llog_unlink_rec, lur_tail) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink_rec, lur_tail));
+ LASSERTF((int)sizeof(((struct llog_unlink_rec *)0)->lur_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink_rec *)0)->lur_tail));
+ /* Checks for struct llog_unlink64_rec */
+ LASSERTF((int)sizeof(struct llog_unlink64_rec) == 64, "found %lld\n",
+ (long long)(int)sizeof(struct llog_unlink64_rec));
+ LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_hdr) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink64_rec, lur_hdr));
+ LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_hdr));
+ LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_fid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink64_rec, lur_fid));
+ LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_fid));
+ LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_count) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink64_rec, lur_count));
+ LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_count));
+ LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_tail) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink64_rec, lur_tail));
+ LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_tail));
+ LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding1) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding1));
+ LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding1));
+ LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding2) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding2));
+ LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding2));
+ LASSERTF((int)offsetof(struct llog_unlink64_rec, lur_padding3) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct llog_unlink64_rec, lur_padding3));
+ LASSERTF((int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding3) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_unlink64_rec *)0)->lur_padding3));
+
+ /* Checks for struct llog_setattr64_rec */
+ LASSERTF((int)sizeof(struct llog_setattr64_rec) == 64, "found %lld\n",
+ (long long)(int)sizeof(struct llog_setattr64_rec));
+ LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_hdr) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_setattr64_rec, lsr_hdr));
+ LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_hdr));
+ LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_oi) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_setattr64_rec, lsr_oi));
+ LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_oi) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_oi));
+ LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_uid) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct llog_setattr64_rec, lsr_uid));
+ LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid));
+ LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_uid_h) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct llog_setattr64_rec, lsr_uid_h));
+ LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_uid_h));
+ LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_gid) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct llog_setattr64_rec, lsr_gid));
+ LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid));
+ LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_gid_h) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct llog_setattr64_rec, lsr_gid_h));
+ LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_gid_h));
+ LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_padding) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct llog_setattr64_rec, lsr_padding));
+ LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_padding) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_padding));
+ LASSERTF((int)offsetof(struct llog_setattr64_rec, lsr_tail) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct llog_setattr64_rec, lsr_tail));
+ LASSERTF((int)sizeof(((struct llog_setattr64_rec *)0)->lsr_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_setattr64_rec *)0)->lsr_tail));
+
+ /* Checks for struct llog_size_change_rec */
+ LASSERTF((int)sizeof(struct llog_size_change_rec) == 64, "found %lld\n",
+ (long long)(int)sizeof(struct llog_size_change_rec));
+ LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_hdr) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_size_change_rec, lsc_hdr));
+ LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_hdr));
+ LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_fid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_size_change_rec, lsc_fid));
+ LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_fid));
+ LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_ioepoch) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct llog_size_change_rec, lsc_ioepoch));
+ LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_ioepoch) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_ioepoch));
+ LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding1) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding1));
+ LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding1));
+ LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding2) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding2));
+ LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding2));
+ LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_padding3) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct llog_size_change_rec, lsc_padding3));
+ LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding3) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_padding3));
+ LASSERTF((int)offsetof(struct llog_size_change_rec, lsc_tail) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct llog_size_change_rec, lsc_tail));
+ LASSERTF((int)sizeof(((struct llog_size_change_rec *)0)->lsc_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_size_change_rec *)0)->lsc_tail));
+
+ /* Checks for struct changelog_rec */
+ LASSERTF((int)sizeof(struct changelog_rec) == 64, "found %lld\n",
+ (long long)(int)sizeof(struct changelog_rec));
+ LASSERTF((int)offsetof(struct changelog_rec, cr_namelen) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_rec, cr_namelen));
+ LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_namelen) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_rec *)0)->cr_namelen));
+ LASSERTF((int)offsetof(struct changelog_rec, cr_flags) == 2, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_rec, cr_flags));
+ LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_flags) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_rec *)0)->cr_flags));
+ LASSERTF((int)offsetof(struct changelog_rec, cr_type) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_rec, cr_type));
+ LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_type) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_rec *)0)->cr_type));
+ LASSERTF((int)offsetof(struct changelog_rec, cr_index) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_rec, cr_index));
+ LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_index) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_rec *)0)->cr_index));
+ LASSERTF((int)offsetof(struct changelog_rec, cr_prev) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_rec, cr_prev));
+ LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_prev) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_rec *)0)->cr_prev));
+ LASSERTF((int)offsetof(struct changelog_rec, cr_time) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_rec, cr_time));
+ LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_time) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_rec *)0)->cr_time));
+ LASSERTF((int)offsetof(struct changelog_rec, cr_tfid) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_rec, cr_tfid));
+ LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_tfid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_rec *)0)->cr_tfid));
+ LASSERTF((int)offsetof(struct changelog_rec, cr_pfid) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_rec, cr_pfid));
+ LASSERTF((int)sizeof(((struct changelog_rec *)0)->cr_pfid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_rec *)0)->cr_pfid));
+
+ /* Checks for struct changelog_ext_rec */
+ LASSERTF((int)sizeof(struct changelog_ext_rec) == 96, "found %lld\n",
+ (long long)(int)sizeof(struct changelog_ext_rec));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_namelen) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_namelen));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_namelen) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_namelen));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_flags) == 2, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_flags));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_flags) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_flags));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_type) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_type));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_type) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_type));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_index) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_index));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_index) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_index));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_prev) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_prev));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_prev) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_prev));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_time) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_time));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_time) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_time));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_tfid) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_tfid));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_tfid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_tfid));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_pfid) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_pfid));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_pfid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_pfid));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_sfid) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_sfid));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_sfid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_sfid));
+ LASSERTF((int)offsetof(struct changelog_ext_rec, cr_spfid) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_ext_rec, cr_spfid));
+ LASSERTF((int)sizeof(((struct changelog_ext_rec *)0)->cr_spfid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_ext_rec *)0)->cr_spfid));
+
+ /* Checks for struct changelog_setinfo */
+ LASSERTF((int)sizeof(struct changelog_setinfo) == 12, "found %lld\n",
+ (long long)(int)sizeof(struct changelog_setinfo));
+ LASSERTF((int)offsetof(struct changelog_setinfo, cs_recno) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_setinfo, cs_recno));
+ LASSERTF((int)sizeof(((struct changelog_setinfo *)0)->cs_recno) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_setinfo *)0)->cs_recno));
+ LASSERTF((int)offsetof(struct changelog_setinfo, cs_id) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct changelog_setinfo, cs_id));
+ LASSERTF((int)sizeof(((struct changelog_setinfo *)0)->cs_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct changelog_setinfo *)0)->cs_id));
+
+ /* Checks for struct llog_changelog_rec */
+ LASSERTF((int)sizeof(struct llog_changelog_rec) == 88, "found %lld\n",
+ (long long)(int)sizeof(struct llog_changelog_rec));
+ LASSERTF((int)offsetof(struct llog_changelog_rec, cr_hdr) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_changelog_rec, cr_hdr));
+ LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_hdr));
+ LASSERTF((int)offsetof(struct llog_changelog_rec, cr) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_changelog_rec, cr));
+ LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr) == 64, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr));
+ LASSERTF((int)offsetof(struct llog_changelog_rec, cr_tail) == 80, "found %lld\n",
+ (long long)(int)offsetof(struct llog_changelog_rec, cr_tail));
+ LASSERTF((int)sizeof(((struct llog_changelog_rec *)0)->cr_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_changelog_rec *)0)->cr_tail));
+
+ /* Checks for struct llog_changelog_user_rec */
+ LASSERTF((int)sizeof(struct llog_changelog_user_rec) == 40, "found %lld\n",
+ (long long)(int)sizeof(struct llog_changelog_user_rec));
+ LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_hdr) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_changelog_user_rec, cur_hdr));
+ LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_hdr));
+ LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_id) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_changelog_user_rec, cur_id));
+ LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_id));
+ LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_padding) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct llog_changelog_user_rec, cur_padding));
+ LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_padding));
+ LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_endrec) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct llog_changelog_user_rec, cur_endrec));
+ LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_endrec) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_endrec));
+ LASSERTF((int)offsetof(struct llog_changelog_user_rec, cur_tail) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct llog_changelog_user_rec, cur_tail));
+ LASSERTF((int)sizeof(((struct llog_changelog_user_rec *)0)->cur_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_changelog_user_rec *)0)->cur_tail));
+
+ /* Checks for struct llog_gen */
+ LASSERTF((int)sizeof(struct llog_gen) == 16, "found %lld\n",
+ (long long)(int)sizeof(struct llog_gen));
+ LASSERTF((int)offsetof(struct llog_gen, mnt_cnt) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_gen, mnt_cnt));
+ LASSERTF((int)sizeof(((struct llog_gen *)0)->mnt_cnt) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_gen *)0)->mnt_cnt));
+ LASSERTF((int)offsetof(struct llog_gen, conn_cnt) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct llog_gen, conn_cnt));
+ LASSERTF((int)sizeof(((struct llog_gen *)0)->conn_cnt) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_gen *)0)->conn_cnt));
+
+ /* Checks for struct llog_gen_rec */
+ LASSERTF((int)sizeof(struct llog_gen_rec) == 64, "found %lld\n",
+ (long long)(int)sizeof(struct llog_gen_rec));
+ LASSERTF((int)offsetof(struct llog_gen_rec, lgr_hdr) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_gen_rec, lgr_hdr));
+ LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_hdr));
+ LASSERTF((int)offsetof(struct llog_gen_rec, lgr_gen) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_gen_rec, lgr_gen));
+ LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_gen) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_gen));
+ LASSERTF((int)offsetof(struct llog_gen_rec, lgr_tail) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct llog_gen_rec, lgr_tail));
+ LASSERTF((int)sizeof(((struct llog_gen_rec *)0)->lgr_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_gen_rec *)0)->lgr_tail));
+
+ /* Checks for struct llog_log_hdr */
+ LASSERTF((int)sizeof(struct llog_log_hdr) == 8192, "found %lld\n",
+ (long long)(int)sizeof(struct llog_log_hdr));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_hdr) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_hdr));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_hdr) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_hdr));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_timestamp) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_timestamp));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_timestamp) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_timestamp));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_count) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_count));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_count));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_bitmap_offset) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_bitmap_offset));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap_offset) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap_offset));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_size) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_size));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_size) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_size));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_flags) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_flags));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_flags));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_cat_idx) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_cat_idx));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_cat_idx) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_cat_idx));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_tgtuuid) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_tgtuuid));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_tgtuuid) == 40, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_tgtuuid));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_reserved) == 84, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_reserved));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_reserved) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_reserved));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_bitmap) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_bitmap));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap) == 8096, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_bitmap));
+ LASSERTF((int)offsetof(struct llog_log_hdr, llh_tail) == 8184, "found %lld\n",
+ (long long)(int)offsetof(struct llog_log_hdr, llh_tail));
+ LASSERTF((int)sizeof(((struct llog_log_hdr *)0)->llh_tail) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_log_hdr *)0)->llh_tail));
+
+ /* Checks for struct llog_cookie */
+ LASSERTF((int)sizeof(struct llog_cookie) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct llog_cookie));
+ LASSERTF((int)offsetof(struct llog_cookie, lgc_lgl) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llog_cookie, lgc_lgl));
+ LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_lgl) == 20, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_lgl));
+ LASSERTF((int)offsetof(struct llog_cookie, lgc_subsys) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct llog_cookie, lgc_subsys));
+ LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_subsys) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_subsys));
+ LASSERTF((int)offsetof(struct llog_cookie, lgc_index) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct llog_cookie, lgc_index));
+ LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_index) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_index));
+ LASSERTF((int)offsetof(struct llog_cookie, lgc_padding) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct llog_cookie, lgc_padding));
+ LASSERTF((int)sizeof(((struct llog_cookie *)0)->lgc_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llog_cookie *)0)->lgc_padding));
+
+ /* Checks for struct llogd_body */
+ LASSERTF((int)sizeof(struct llogd_body) == 48, "found %lld\n",
+ (long long)(int)sizeof(struct llogd_body));
+ LASSERTF((int)offsetof(struct llogd_body, lgd_logid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_body, lgd_logid));
+ LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_logid) == 20, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_body *)0)->lgd_logid));
+ LASSERTF((int)offsetof(struct llogd_body, lgd_ctxt_idx) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_body, lgd_ctxt_idx));
+ LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_ctxt_idx) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_body *)0)->lgd_ctxt_idx));
+ LASSERTF((int)offsetof(struct llogd_body, lgd_llh_flags) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_body, lgd_llh_flags));
+ LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_llh_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_body *)0)->lgd_llh_flags));
+ LASSERTF((int)offsetof(struct llogd_body, lgd_index) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_body, lgd_index));
+ LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_index) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_body *)0)->lgd_index));
+ LASSERTF((int)offsetof(struct llogd_body, lgd_saved_index) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_body, lgd_saved_index));
+ LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_saved_index) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_body *)0)->lgd_saved_index));
+ LASSERTF((int)offsetof(struct llogd_body, lgd_len) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_body, lgd_len));
+ LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_len) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_body *)0)->lgd_len));
+ LASSERTF((int)offsetof(struct llogd_body, lgd_cur_offset) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_body, lgd_cur_offset));
+ LASSERTF((int)sizeof(((struct llogd_body *)0)->lgd_cur_offset) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_body *)0)->lgd_cur_offset));
+ CLASSERT(LLOG_ORIGIN_HANDLE_CREATE == 501);
+ CLASSERT(LLOG_ORIGIN_HANDLE_NEXT_BLOCK == 502);
+ CLASSERT(LLOG_ORIGIN_HANDLE_READ_HEADER == 503);
+ CLASSERT(LLOG_ORIGIN_HANDLE_WRITE_REC == 504);
+ CLASSERT(LLOG_ORIGIN_HANDLE_CLOSE == 505);
+ CLASSERT(LLOG_ORIGIN_CONNECT == 506);
+ CLASSERT(LLOG_CATINFO == 507);
+ CLASSERT(LLOG_ORIGIN_HANDLE_PREV_BLOCK == 508);
+ CLASSERT(LLOG_ORIGIN_HANDLE_DESTROY == 509);
+ CLASSERT(LLOG_FIRST_OPC == 501);
+ CLASSERT(LLOG_LAST_OPC == 510);
+
+ /* Checks for struct llogd_conn_body */
+ LASSERTF((int)sizeof(struct llogd_conn_body) == 40, "found %lld\n",
+ (long long)(int)sizeof(struct llogd_conn_body));
+ LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_gen) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_conn_body, lgdc_gen));
+ LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_gen) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_gen));
+ LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_logid) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_conn_body, lgdc_logid));
+ LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_logid) == 20, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_logid));
+ LASSERTF((int)offsetof(struct llogd_conn_body, lgdc_ctxt_idx) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct llogd_conn_body, lgdc_ctxt_idx));
+ LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx));
+
+ /* Checks for struct ll_fiemap_info_key */
+ LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n",
+ (long long)(int)sizeof(struct ll_fiemap_info_key));
+ LASSERTF((int)offsetof(struct ll_fiemap_info_key, name[8]) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_info_key, name[8]));
+ LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]));
+ LASSERTF((int)offsetof(struct ll_fiemap_info_key, oa) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_info_key, oa));
+ LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->oa) == 208, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->oa));
+ LASSERTF((int)offsetof(struct ll_fiemap_info_key, fiemap) == 216, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_info_key, fiemap));
+ LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap));
+
+ /* Checks for struct quota_body */
+ LASSERTF((int)sizeof(struct quota_body) == 112, "found %lld\n",
+ (long long)(int)sizeof(struct quota_body));
+ LASSERTF((int)offsetof(struct quota_body, qb_fid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_fid));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_fid));
+ LASSERTF((int)offsetof(struct quota_body, qb_id) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_id));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_id) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_id));
+ LASSERTF((int)offsetof(struct quota_body, qb_flags) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_flags));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_flags));
+ LASSERTF((int)offsetof(struct quota_body, qb_padding) == 36, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_padding));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_padding));
+ LASSERTF((int)offsetof(struct quota_body, qb_count) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_count));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_count) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_count));
+ LASSERTF((int)offsetof(struct quota_body, qb_usage) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_usage));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_usage) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_usage));
+ LASSERTF((int)offsetof(struct quota_body, qb_slv_ver) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_slv_ver));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_slv_ver) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_slv_ver));
+ LASSERTF((int)offsetof(struct quota_body, qb_lockh) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_lockh));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_lockh) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_lockh));
+ LASSERTF((int)offsetof(struct quota_body, qb_glb_lockh) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_glb_lockh));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_glb_lockh) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_glb_lockh));
+ LASSERTF((int)offsetof(struct quota_body, qb_padding1[4]) == 112, "found %lld\n",
+ (long long)(int)offsetof(struct quota_body, qb_padding1[4]));
+ LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding1[4]) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct quota_body *)0)->qb_padding1[4]));
+
+ /* Checks for struct mgs_target_info */
+ LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n",
+ (long long)(int)sizeof(struct mgs_target_info));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_lustre_ver) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_lustre_ver));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_lustre_ver) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_lustre_ver));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_stripe_index) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_stripe_index));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_stripe_index) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_stripe_index));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_config_ver) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_config_ver));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_config_ver) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_config_ver));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_flags) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_flags));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_flags));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_nid_count) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_nid_count));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_nid_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_nid_count));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_instance) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_instance));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_instance) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_instance));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_fsname) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_fsname));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_fsname) == 64, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_fsname));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_svname) == 88, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_svname));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_svname) == 64, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_svname));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_uuid) == 152, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_uuid));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_uuid) == 40, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_uuid));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_nids) == 192, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_nids));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_nids) == 256, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_nids));
+ LASSERTF((int)offsetof(struct mgs_target_info, mti_params) == 448, "found %lld\n",
+ (long long)(int)offsetof(struct mgs_target_info, mti_params));
+ LASSERTF((int)sizeof(((struct mgs_target_info *)0)->mti_params) == 4096, "found %lld\n",
+ (long long)(int)sizeof(((struct mgs_target_info *)0)->mti_params));
+
+ /* Checks for struct lustre_capa */
+ LASSERTF((int)sizeof(struct lustre_capa) == 120, "found %lld\n",
+ (long long)(int)sizeof(struct lustre_capa));
+ LASSERTF((int)offsetof(struct lustre_capa, lc_fid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa, lc_fid));
+ LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa *)0)->lc_fid));
+ LASSERTF((int)offsetof(struct lustre_capa, lc_opc) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa, lc_opc));
+ LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_opc) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa *)0)->lc_opc));
+ LASSERTF((int)offsetof(struct lustre_capa, lc_uid) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa, lc_uid));
+ LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_uid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa *)0)->lc_uid));
+ LASSERTF((int)offsetof(struct lustre_capa, lc_gid) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa, lc_gid));
+ LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_gid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa *)0)->lc_gid));
+ LASSERTF((int)offsetof(struct lustre_capa, lc_flags) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa, lc_flags));
+ LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa *)0)->lc_flags));
+ LASSERTF((int)offsetof(struct lustre_capa, lc_keyid) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa, lc_keyid));
+ LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_keyid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa *)0)->lc_keyid));
+ LASSERTF((int)offsetof(struct lustre_capa, lc_timeout) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa, lc_timeout));
+ LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_timeout) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa *)0)->lc_timeout));
+ LASSERTF((int)offsetof(struct lustre_capa, lc_expiry) == 52, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa, lc_expiry));
+ LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_expiry) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa *)0)->lc_expiry));
+ CLASSERT(CAPA_HMAC_MAX_LEN == 64);
+ LASSERTF((int)offsetof(struct lustre_capa, lc_hmac[64]) == 120, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa, lc_hmac[64]));
+ LASSERTF((int)sizeof(((struct lustre_capa *)0)->lc_hmac[64]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa *)0)->lc_hmac[64]));
+
+ /* Checks for struct lustre_capa_key */
+ LASSERTF((int)sizeof(struct lustre_capa_key) == 72, "found %lld\n",
+ (long long)(int)sizeof(struct lustre_capa_key));
+ LASSERTF((int)offsetof(struct lustre_capa_key, lk_seq) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa_key, lk_seq));
+ LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_seq) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_seq));
+ LASSERTF((int)offsetof(struct lustre_capa_key, lk_keyid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa_key, lk_keyid));
+ LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_keyid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_keyid));
+ LASSERTF((int)offsetof(struct lustre_capa_key, lk_padding) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa_key, lk_padding));
+ LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_padding));
+ CLASSERT(CAPA_HMAC_KEY_MAX_LEN == 56);
+ LASSERTF((int)offsetof(struct lustre_capa_key, lk_key[56]) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct lustre_capa_key, lk_key[56]));
+ LASSERTF((int)sizeof(((struct lustre_capa_key *)0)->lk_key[56]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct lustre_capa_key *)0)->lk_key[56]));
+
+ /* Checks for struct getinfo_fid2path */
+ LASSERTF((int)sizeof(struct getinfo_fid2path) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct getinfo_fid2path));
+ LASSERTF((int)offsetof(struct getinfo_fid2path, gf_fid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct getinfo_fid2path, gf_fid));
+ LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_fid));
+ LASSERTF((int)offsetof(struct getinfo_fid2path, gf_recno) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct getinfo_fid2path, gf_recno));
+ LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_recno) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_recno));
+ LASSERTF((int)offsetof(struct getinfo_fid2path, gf_linkno) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct getinfo_fid2path, gf_linkno));
+ LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_linkno) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_linkno));
+ LASSERTF((int)offsetof(struct getinfo_fid2path, gf_pathlen) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct getinfo_fid2path, gf_pathlen));
+ LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_pathlen) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_pathlen));
+ LASSERTF((int)offsetof(struct getinfo_fid2path, gf_path[0]) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct getinfo_fid2path, gf_path[0]));
+ LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n",
+ (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]));
+
+ /* Checks for struct ll_user_fiemap */
+ LASSERTF((int)sizeof(struct ll_user_fiemap) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct ll_user_fiemap));
+ LASSERTF((int)offsetof(struct ll_user_fiemap, fm_start) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ll_user_fiemap, fm_start));
+ LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_start) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_start));
+ LASSERTF((int)offsetof(struct ll_user_fiemap, fm_length) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ll_user_fiemap, fm_length));
+ LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_length) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_length));
+ LASSERTF((int)offsetof(struct ll_user_fiemap, fm_flags) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct ll_user_fiemap, fm_flags));
+ LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_flags));
+ LASSERTF((int)offsetof(struct ll_user_fiemap, fm_mapped_extents) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct ll_user_fiemap, fm_mapped_extents));
+ LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents));
+ LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extent_count) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct ll_user_fiemap, fm_extent_count));
+ LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count));
+ LASSERTF((int)offsetof(struct ll_user_fiemap, fm_reserved) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct ll_user_fiemap, fm_reserved));
+ LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved));
+ LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extents) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct ll_user_fiemap, fm_extents));
+ LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extents) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extents));
+ CLASSERT(FIEMAP_FLAG_SYNC == 0x00000001);
+ CLASSERT(FIEMAP_FLAG_XATTR == 0x00000002);
+ CLASSERT(FIEMAP_FLAG_DEVICE_ORDER == 0x40000000);
+
+ /* Checks for struct ll_fiemap_extent */
+ LASSERTF((int)sizeof(struct ll_fiemap_extent) == 56, "found %lld\n",
+ (long long)(int)sizeof(struct ll_fiemap_extent));
+ LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_logical) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_extent, fe_logical));
+ LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical));
+ LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_physical) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_extent, fe_physical));
+ LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical));
+ LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_length) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_extent, fe_length));
+ LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_length));
+ LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_flags) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_extent, fe_flags));
+ LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags));
+ LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_device) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct ll_fiemap_extent, fe_device));
+ LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_device) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_device));
+ CLASSERT(FIEMAP_EXTENT_LAST == 0x00000001);
+ CLASSERT(FIEMAP_EXTENT_UNKNOWN == 0x00000002);
+ CLASSERT(FIEMAP_EXTENT_DELALLOC == 0x00000004);
+ CLASSERT(FIEMAP_EXTENT_ENCODED == 0x00000008);
+ CLASSERT(FIEMAP_EXTENT_DATA_ENCRYPTED == 0x00000080);
+ CLASSERT(FIEMAP_EXTENT_NOT_ALIGNED == 0x00000100);
+ CLASSERT(FIEMAP_EXTENT_DATA_INLINE == 0x00000200);
+ CLASSERT(FIEMAP_EXTENT_DATA_TAIL == 0x00000400);
+ CLASSERT(FIEMAP_EXTENT_UNWRITTEN == 0x00000800);
+ CLASSERT(FIEMAP_EXTENT_MERGED == 0x00001000);
+ CLASSERT(FIEMAP_EXTENT_NO_DIRECT == 0x40000000);
+ CLASSERT(FIEMAP_EXTENT_NET == 0x80000000);
+
+ /* Checks for type posix_acl_xattr_entry */
+ LASSERTF((int)sizeof(posix_acl_xattr_entry) == 8, "found %lld\n",
+ (long long)(int)sizeof(posix_acl_xattr_entry));
+ LASSERTF((int)offsetof(posix_acl_xattr_entry, e_tag) == 0, "found %lld\n",
+ (long long)(int)offsetof(posix_acl_xattr_entry, e_tag));
+ LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_tag) == 2, "found %lld\n",
+ (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_tag));
+ LASSERTF((int)offsetof(posix_acl_xattr_entry, e_perm) == 2, "found %lld\n",
+ (long long)(int)offsetof(posix_acl_xattr_entry, e_perm));
+ LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_perm) == 2, "found %lld\n",
+ (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_perm));
+ LASSERTF((int)offsetof(posix_acl_xattr_entry, e_id) == 4, "found %lld\n",
+ (long long)(int)offsetof(posix_acl_xattr_entry, e_id));
+ LASSERTF((int)sizeof(((posix_acl_xattr_entry *)0)->e_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((posix_acl_xattr_entry *)0)->e_id));
+
+ /* Checks for type posix_acl_xattr_header */
+ LASSERTF((int)sizeof(posix_acl_xattr_header) == 4, "found %lld\n",
+ (long long)(int)sizeof(posix_acl_xattr_header));
+ LASSERTF((int)offsetof(posix_acl_xattr_header, a_version) == 0, "found %lld\n",
+ (long long)(int)offsetof(posix_acl_xattr_header, a_version));
+ LASSERTF((int)sizeof(((posix_acl_xattr_header *)0)->a_version) == 4, "found %lld\n",
+ (long long)(int)sizeof(((posix_acl_xattr_header *)0)->a_version));
+ LASSERTF((int)offsetof(posix_acl_xattr_header, a_entries) == 4, "found %lld\n",
+ (long long)(int)offsetof(posix_acl_xattr_header, a_entries));
+ LASSERTF((int)sizeof(((posix_acl_xattr_header *)0)->a_entries) == 0, "found %lld\n",
+ (long long)(int)sizeof(((posix_acl_xattr_header *)0)->a_entries));
+
+ /* Checks for struct link_ea_header */
+ LASSERTF((int)sizeof(struct link_ea_header) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct link_ea_header));
+ LASSERTF((int)offsetof(struct link_ea_header, leh_magic) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct link_ea_header, leh_magic));
+ LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_magic) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct link_ea_header *)0)->leh_magic));
+ LASSERTF((int)offsetof(struct link_ea_header, leh_reccount) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct link_ea_header, leh_reccount));
+ LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_reccount) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct link_ea_header *)0)->leh_reccount));
+ LASSERTF((int)offsetof(struct link_ea_header, leh_len) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct link_ea_header, leh_len));
+ LASSERTF((int)sizeof(((struct link_ea_header *)0)->leh_len) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct link_ea_header *)0)->leh_len));
+ LASSERTF((int)offsetof(struct link_ea_header, padding1) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct link_ea_header, padding1));
+ LASSERTF((int)sizeof(((struct link_ea_header *)0)->padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct link_ea_header *)0)->padding1));
+ LASSERTF((int)offsetof(struct link_ea_header, padding2) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct link_ea_header, padding2));
+ LASSERTF((int)sizeof(((struct link_ea_header *)0)->padding2) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct link_ea_header *)0)->padding2));
+ CLASSERT(LINK_EA_MAGIC == 0x11EAF1DFUL);
+
+ /* Checks for struct link_ea_entry */
+ LASSERTF((int)sizeof(struct link_ea_entry) == 18, "found %lld\n",
+ (long long)(int)sizeof(struct link_ea_entry));
+ LASSERTF((int)offsetof(struct link_ea_entry, lee_reclen) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct link_ea_entry, lee_reclen));
+ LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_reclen) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_reclen));
+ LASSERTF((int)offsetof(struct link_ea_entry, lee_parent_fid) == 2, "found %lld\n",
+ (long long)(int)offsetof(struct link_ea_entry, lee_parent_fid));
+ LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_parent_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_parent_fid));
+ LASSERTF((int)offsetof(struct link_ea_entry, lee_name) == 18, "found %lld\n",
+ (long long)(int)offsetof(struct link_ea_entry, lee_name));
+ LASSERTF((int)sizeof(((struct link_ea_entry *)0)->lee_name) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct link_ea_entry *)0)->lee_name));
+
+ /* Checks for struct layout_intent */
+ LASSERTF((int)sizeof(struct layout_intent) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct layout_intent));
+ LASSERTF((int)offsetof(struct layout_intent, li_opc) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct layout_intent, li_opc));
+ LASSERTF((int)sizeof(((struct layout_intent *)0)->li_opc) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct layout_intent *)0)->li_opc));
+ LASSERTF((int)offsetof(struct layout_intent, li_flags) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct layout_intent, li_flags));
+ LASSERTF((int)sizeof(((struct layout_intent *)0)->li_flags) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct layout_intent *)0)->li_flags));
+ LASSERTF((int)offsetof(struct layout_intent, li_start) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct layout_intent, li_start));
+ LASSERTF((int)sizeof(((struct layout_intent *)0)->li_start) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct layout_intent *)0)->li_start));
+ LASSERTF((int)offsetof(struct layout_intent, li_end) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct layout_intent, li_end));
+ LASSERTF((int)sizeof(((struct layout_intent *)0)->li_end) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct layout_intent *)0)->li_end));
+ LASSERTF(LAYOUT_INTENT_ACCESS == 0, "found %lld\n",
+ (long long)LAYOUT_INTENT_ACCESS);
+ LASSERTF(LAYOUT_INTENT_READ == 1, "found %lld\n",
+ (long long)LAYOUT_INTENT_READ);
+ LASSERTF(LAYOUT_INTENT_WRITE == 2, "found %lld\n",
+ (long long)LAYOUT_INTENT_WRITE);
+ LASSERTF(LAYOUT_INTENT_GLIMPSE == 3, "found %lld\n",
+ (long long)LAYOUT_INTENT_GLIMPSE);
+ LASSERTF(LAYOUT_INTENT_TRUNC == 4, "found %lld\n",
+ (long long)LAYOUT_INTENT_TRUNC);
+ LASSERTF(LAYOUT_INTENT_RELEASE == 5, "found %lld\n",
+ (long long)LAYOUT_INTENT_RELEASE);
+ LASSERTF(LAYOUT_INTENT_RESTORE == 6, "found %lld\n",
+ (long long)LAYOUT_INTENT_RESTORE);
+
+ /* Checks for struct hsm_action_item */
+ LASSERTF((int)sizeof(struct hsm_action_item) == 72, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_action_item));
+ LASSERTF((int)offsetof(struct hsm_action_item, hai_len) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_item, hai_len));
+ LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_len) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_len));
+ LASSERTF((int)offsetof(struct hsm_action_item, hai_action) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_item, hai_action));
+ LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_action) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_action));
+ LASSERTF((int)offsetof(struct hsm_action_item, hai_fid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_item, hai_fid));
+ LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_fid));
+ LASSERTF((int)offsetof(struct hsm_action_item, hai_dfid) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_item, hai_dfid));
+ LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_dfid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_dfid));
+ LASSERTF((int)offsetof(struct hsm_action_item, hai_extent) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_item, hai_extent));
+ LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_extent) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_extent));
+ LASSERTF((int)offsetof(struct hsm_action_item, hai_cookie) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_item, hai_cookie));
+ LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_cookie) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_cookie));
+ LASSERTF((int)offsetof(struct hsm_action_item, hai_gid) == 64, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_item, hai_gid));
+ LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_gid) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_gid));
+ LASSERTF((int)offsetof(struct hsm_action_item, hai_data) == 72, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_item, hai_data));
+ LASSERTF((int)sizeof(((struct hsm_action_item *)0)->hai_data) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_item *)0)->hai_data));
+
+ /* Checks for struct hsm_action_list */
+ LASSERTF((int)sizeof(struct hsm_action_list) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_action_list));
+ LASSERTF((int)offsetof(struct hsm_action_list, hal_version) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_list, hal_version));
+ LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_version) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_version));
+ LASSERTF((int)offsetof(struct hsm_action_list, hal_count) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_list, hal_count));
+ LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_count));
+ LASSERTF((int)offsetof(struct hsm_action_list, hal_compound_id) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_list, hal_compound_id));
+ LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_compound_id) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_compound_id));
+ LASSERTF((int)offsetof(struct hsm_action_list, hal_flags) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_list, hal_flags));
+ LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_flags) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_flags));
+ LASSERTF((int)offsetof(struct hsm_action_list, hal_archive_id) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_list, hal_archive_id));
+ LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_archive_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_archive_id));
+ LASSERTF((int)offsetof(struct hsm_action_list, padding1) == 28, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_list, padding1));
+ LASSERTF((int)sizeof(((struct hsm_action_list *)0)->padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_list *)0)->padding1));
+ LASSERTF((int)offsetof(struct hsm_action_list, hal_fsname) == 32, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_action_list, hal_fsname));
+ LASSERTF((int)sizeof(((struct hsm_action_list *)0)->hal_fsname) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_action_list *)0)->hal_fsname));
+
+ /* Checks for struct hsm_progress */
+ LASSERTF((int)sizeof(struct hsm_progress) == 48, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_progress));
+ LASSERTF((int)offsetof(struct hsm_progress, hp_fid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress, hp_fid));
+ LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress *)0)->hp_fid));
+ LASSERTF((int)offsetof(struct hsm_progress, hp_cookie) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress, hp_cookie));
+ LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_cookie) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress *)0)->hp_cookie));
+ LASSERTF((int)offsetof(struct hsm_progress, hp_extent) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress, hp_extent));
+ LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_extent) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress *)0)->hp_extent));
+ LASSERTF((int)offsetof(struct hsm_progress, hp_flags) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress, hp_flags));
+ LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_flags) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress *)0)->hp_flags));
+ LASSERTF((int)offsetof(struct hsm_progress, hp_errval) == 42, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress, hp_errval));
+ LASSERTF((int)sizeof(((struct hsm_progress *)0)->hp_errval) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress *)0)->hp_errval));
+ LASSERTF((int)offsetof(struct hsm_progress, padding) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress, padding));
+ LASSERTF((int)sizeof(((struct hsm_progress *)0)->padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress *)0)->padding));
+ LASSERTF(HP_FLAG_COMPLETED == 0x01, "found 0x%.8x\n",
+ HP_FLAG_COMPLETED);
+ LASSERTF(HP_FLAG_RETRY == 0x02, "found 0x%.8x\n",
+ HP_FLAG_RETRY);
+
+ LASSERTF((int)offsetof(struct hsm_copy, hc_data_version) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_copy, hc_data_version));
+ LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_data_version) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_copy *)0)->hc_data_version));
+ LASSERTF((int)offsetof(struct hsm_copy, hc_flags) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_copy, hc_flags));
+ LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_flags) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_copy *)0)->hc_flags));
+ LASSERTF((int)offsetof(struct hsm_copy, hc_errval) == 10, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_copy, hc_errval));
+ LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_errval) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_copy *)0)->hc_errval));
+ LASSERTF((int)offsetof(struct hsm_copy, padding) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_copy, padding));
+ LASSERTF((int)sizeof(((struct hsm_copy *)0)->padding) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_copy *)0)->padding));
+ LASSERTF((int)offsetof(struct hsm_copy, hc_hai) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_copy, hc_hai));
+ LASSERTF((int)sizeof(((struct hsm_copy *)0)->hc_hai) == 72, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_copy *)0)->hc_hai));
+
+ /* Checks for struct hsm_progress_kernel */
+ LASSERTF((int)sizeof(struct hsm_progress_kernel) == 64, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_progress_kernel));
+ LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_fid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress_kernel, hpk_fid));
+ LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_fid));
+ LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_cookie) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress_kernel, hpk_cookie));
+ LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_cookie) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_cookie));
+ LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_extent) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress_kernel, hpk_extent));
+ LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_extent) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_extent));
+ LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_flags) == 40, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress_kernel, hpk_flags));
+ LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_flags) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_flags));
+ LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_errval) == 42, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress_kernel, hpk_errval));
+ LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_errval) == 2, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_errval));
+ LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_padding1) == 44, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress_kernel, hpk_padding1));
+ LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding1) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding1));
+ LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_data_version) == 48, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress_kernel, hpk_data_version));
+ LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_data_version) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_data_version));
+ LASSERTF((int)offsetof(struct hsm_progress_kernel, hpk_padding2) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_progress_kernel, hpk_padding2));
+ LASSERTF((int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding2) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_progress_kernel *)0)->hpk_padding2));
+
+ /* Checks for struct hsm_user_item */
+ LASSERTF((int)sizeof(struct hsm_user_item) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_user_item));
+ LASSERTF((int)offsetof(struct hsm_user_item, hui_fid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_item, hui_fid));
+ LASSERTF((int)sizeof(((struct hsm_user_item *)0)->hui_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_user_item *)0)->hui_fid));
+ LASSERTF((int)offsetof(struct hsm_user_item, hui_extent) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_item, hui_extent));
+ LASSERTF((int)sizeof(((struct hsm_user_item *)0)->hui_extent) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_user_item *)0)->hui_extent));
+
+ /* Checks for struct hsm_user_state */
+ LASSERTF((int)sizeof(struct hsm_user_state) == 32, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_user_state));
+ LASSERTF((int)offsetof(struct hsm_user_state, hus_states) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_state, hus_states));
+ LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_states) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_states));
+ LASSERTF((int)offsetof(struct hsm_user_state, hus_archive_id) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_state, hus_archive_id));
+ LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_archive_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_archive_id));
+ LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_state) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_state));
+ LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_state) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_state));
+ LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_action) == 12, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_action));
+ LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_action) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_action));
+ LASSERTF((int)offsetof(struct hsm_user_state, hus_in_progress_location) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_state, hus_in_progress_location));
+ LASSERTF((int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_location) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_user_state *)0)->hus_in_progress_location));
+
+ /* Checks for struct hsm_state_set */
+ LASSERTF((int)sizeof(struct hsm_state_set) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_state_set));
+ LASSERTF((int)offsetof(struct hsm_state_set, hss_valid) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_state_set, hss_valid));
+ LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_valid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_valid));
+ LASSERTF((int)offsetof(struct hsm_state_set, hss_archive_id) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_state_set, hss_archive_id));
+ LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_archive_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_archive_id));
+ LASSERTF((int)offsetof(struct hsm_state_set, hss_setmask) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_state_set, hss_setmask));
+ LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_setmask) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_setmask));
+ LASSERTF((int)offsetof(struct hsm_state_set, hss_clearmask) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_state_set, hss_clearmask));
+ LASSERTF((int)sizeof(((struct hsm_state_set *)0)->hss_clearmask) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_state_set *)0)->hss_clearmask));
+
+ /* Checks for struct hsm_current_action */
+ LASSERTF((int)sizeof(struct hsm_current_action) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_current_action));
+ LASSERTF((int)offsetof(struct hsm_current_action, hca_state) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_current_action, hca_state));
+ LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_state) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_state));
+ LASSERTF((int)offsetof(struct hsm_current_action, hca_action) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_current_action, hca_action));
+ LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_action) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_action));
+ LASSERTF((int)offsetof(struct hsm_current_action, hca_location) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_current_action, hca_location));
+ LASSERTF((int)sizeof(((struct hsm_current_action *)0)->hca_location) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_current_action *)0)->hca_location));
+
+ /* Checks for struct hsm_request */
+ LASSERTF((int)sizeof(struct hsm_request) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_request));
+ LASSERTF((int)offsetof(struct hsm_request, hr_action) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_request, hr_action));
+ LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_action) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_request *)0)->hr_action));
+ LASSERTF((int)offsetof(struct hsm_request, hr_archive_id) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_request, hr_archive_id));
+ LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_archive_id) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_request *)0)->hr_archive_id));
+ LASSERTF((int)offsetof(struct hsm_request, hr_flags) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_request, hr_flags));
+ LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_flags) == 8, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_request *)0)->hr_flags));
+ LASSERTF((int)offsetof(struct hsm_request, hr_itemcount) == 16, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_request, hr_itemcount));
+ LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_itemcount) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_request *)0)->hr_itemcount));
+ LASSERTF((int)offsetof(struct hsm_request, hr_data_len) == 20, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_request, hr_data_len));
+ LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len));
+ LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n",
+ (unsigned)HSM_FORCE_ACTION);
+ LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n",
+ (unsigned)HSM_GHOST_COPY);
+
+ /* Checks for struct hsm_user_request */
+ LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n",
+ (long long)(int)sizeof(struct hsm_user_request));
+ LASSERTF((int)offsetof(struct hsm_user_request, hur_request) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_request, hur_request));
+ LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_request) == 24, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_user_request *)0)->hur_request));
+ LASSERTF((int)offsetof(struct hsm_user_request, hur_user_item) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct hsm_user_request, hur_user_item));
+ LASSERTF((int)sizeof(((struct hsm_user_request *)0)->hur_user_item) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct hsm_user_request *)0)->hur_user_item));
+
+ /* Checks for struct update_buf */
+ LASSERTF((int)sizeof(struct update_buf) == 8, "found %lld\n",
+ (long long)(int)sizeof(struct update_buf));
+ LASSERTF((int)offsetof(struct update_buf, ub_magic) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct update_buf, ub_magic));
+ LASSERTF((int)sizeof(((struct update_buf *)0)->ub_magic) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct update_buf *)0)->ub_magic));
+ LASSERTF((int)offsetof(struct update_buf, ub_count) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct update_buf, ub_count));
+ LASSERTF((int)sizeof(((struct update_buf *)0)->ub_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct update_buf *)0)->ub_count));
+ LASSERTF((int)offsetof(struct update_buf, ub_bufs) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct update_buf, ub_bufs));
+ LASSERTF((int)sizeof(((struct update_buf *)0)->ub_bufs) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct update_buf *)0)->ub_bufs));
+
+ /* Checks for struct update_reply */
+ LASSERTF((int)sizeof(struct update_reply) == 8, "found %lld\n",
+ (long long)(int)sizeof(struct update_reply));
+ LASSERTF((int)offsetof(struct update_reply, ur_version) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct update_reply, ur_version));
+ LASSERTF((int)sizeof(((struct update_reply *)0)->ur_version) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct update_reply *)0)->ur_version));
+ LASSERTF((int)offsetof(struct update_reply, ur_count) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct update_reply, ur_count));
+ LASSERTF((int)sizeof(((struct update_reply *)0)->ur_count) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct update_reply *)0)->ur_count));
+ LASSERTF((int)offsetof(struct update_reply, ur_lens) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct update_reply, ur_lens));
+ LASSERTF((int)sizeof(((struct update_reply *)0)->ur_lens) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct update_reply *)0)->ur_lens));
+
+ /* Checks for struct update */
+ LASSERTF((int)sizeof(struct update) == 56, "found %lld\n",
+ (long long)(int)sizeof(struct update));
+ LASSERTF((int)offsetof(struct update, u_type) == 0, "found %lld\n",
+ (long long)(int)offsetof(struct update, u_type));
+ LASSERTF((int)sizeof(((struct update *)0)->u_type) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct update *)0)->u_type));
+ LASSERTF((int)offsetof(struct update, u_batchid) == 4, "found %lld\n",
+ (long long)(int)offsetof(struct update, u_batchid));
+ LASSERTF((int)sizeof(((struct update *)0)->u_batchid) == 4, "found %lld\n",
+ (long long)(int)sizeof(((struct update *)0)->u_batchid));
+ LASSERTF((int)offsetof(struct update, u_fid) == 8, "found %lld\n",
+ (long long)(int)offsetof(struct update, u_fid));
+ LASSERTF((int)sizeof(((struct update *)0)->u_fid) == 16, "found %lld\n",
+ (long long)(int)sizeof(((struct update *)0)->u_fid));
+ LASSERTF((int)offsetof(struct update, u_lens) == 24, "found %lld\n",
+ (long long)(int)offsetof(struct update, u_lens));
+ LASSERTF((int)sizeof(((struct update *)0)->u_lens) == 32, "found %lld\n",
+ (long long)(int)sizeof(((struct update *)0)->u_lens));
+ LASSERTF((int)offsetof(struct update, u_bufs) == 56, "found %lld\n",
+ (long long)(int)offsetof(struct update, u_bufs));
+ LASSERTF((int)sizeof(((struct update *)0)->u_bufs) == 0, "found %lld\n",
+ (long long)(int)sizeof(((struct update *)0)->u_bufs));
+}
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index ae0abc3..46f1e61 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -29,6 +29,8 @@ source "drivers/staging/media/dt3155v4l/Kconfig"
source "drivers/staging/media/go7007/Kconfig"
+source "drivers/staging/media/msi3101/Kconfig"
+
source "drivers/staging/media/solo6x10/Kconfig"
# Keep LIRC at the end, as it has sub-menus
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 2b97cae..eb7f30b 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -4,4 +4,5 @@ obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_SOLO6X10) += solo6x10/
obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
+obj-$(CONFIG_USB_MSI3101) += msi3101/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
index 2e4a28b..12f321d 100644
--- a/drivers/staging/media/davinci_vpfe/Kconfig
+++ b/drivers/staging/media/davinci_vpfe/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_DM365_VPFE
tristate "DM365 VPFE Media Controller Capture Driver"
- depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE
+ depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF
select VIDEOBUF2_DMA_CONTIG
help
Support for DM365 VPFE based Media Controller Capture driver.
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index 05673ed..766a071 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1751,10 +1751,10 @@ static const struct media_entity_operations ipipe_media_ops = {
*/
void vpfe_ipipe_unregister_entities(struct vpfe_ipipe_device *vpfe_ipipe)
{
- /* cleanup entity */
- media_entity_cleanup(&vpfe_ipipe->subdev.entity);
/* unregister subdev */
v4l2_device_unregister_subdev(&vpfe_ipipe->subdev);
+ /* cleanup entity */
+ media_entity_cleanup(&vpfe_ipipe->subdev.entity);
}
/*
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index b2f4ef8..59540cd 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -947,10 +947,10 @@ void vpfe_ipipeif_unregister_entities(struct vpfe_ipipeif_device *ipipeif)
/* unregister video device */
vpfe_video_unregister(&ipipeif->video_in);
- /* cleanup entity */
- media_entity_cleanup(&ipipeif->subdev.entity);
/* unregister subdev */
v4l2_device_unregister_subdev(&ipipeif->subdev);
+ /* cleanup entity */
+ media_entity_cleanup(&ipipeif->subdev.entity);
}
int
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 5829360..ff48fce 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -1750,10 +1750,10 @@ static const struct media_entity_operations isif_media_ops = {
void vpfe_isif_unregister_entities(struct vpfe_isif_device *isif)
{
vpfe_video_unregister(&isif->video_out);
- /* cleanup entity */
- media_entity_cleanup(&isif->subdev.entity);
/* unregister subdev */
v4l2_device_unregister_subdev(&isif->subdev);
+ /* cleanup entity */
+ media_entity_cleanup(&isif->subdev.entity);
}
static void isif_restore_defaults(struct vpfe_isif_device *isif)
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 126f84c..8e13bd4 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -1777,14 +1777,14 @@ void vpfe_resizer_unregister_entities(struct vpfe_resizer_device *vpfe_rsz)
vpfe_video_unregister(&vpfe_rsz->resizer_a.video_out);
vpfe_video_unregister(&vpfe_rsz->resizer_b.video_out);
- /* cleanup entity */
- media_entity_cleanup(&vpfe_rsz->crop_resizer.subdev.entity);
- media_entity_cleanup(&vpfe_rsz->resizer_a.subdev.entity);
- media_entity_cleanup(&vpfe_rsz->resizer_b.subdev.entity);
/* unregister subdev */
v4l2_device_unregister_subdev(&vpfe_rsz->crop_resizer.subdev);
v4l2_device_unregister_subdev(&vpfe_rsz->resizer_a.subdev);
v4l2_device_unregister_subdev(&vpfe_rsz->resizer_b.subdev);
+ /* cleanup entity */
+ media_entity_cleanup(&vpfe_rsz->crop_resizer.subdev.entity);
+ media_entity_cleanup(&vpfe_rsz->resizer_a.subdev.entity);
+ media_entity_cleanup(&vpfe_rsz->resizer_b.subdev.entity);
}
/*
@@ -1865,12 +1865,12 @@ out_create_link:
vpfe_video_unregister(&resizer->resizer_b.video_out);
out_video_out2_register:
vpfe_video_unregister(&resizer->resizer_a.video_out);
- media_entity_cleanup(&resizer->crop_resizer.subdev.entity);
- media_entity_cleanup(&resizer->resizer_a.subdev.entity);
- media_entity_cleanup(&resizer->resizer_b.subdev.entity);
v4l2_device_unregister_subdev(&resizer->crop_resizer.subdev);
v4l2_device_unregister_subdev(&resizer->resizer_a.subdev);
v4l2_device_unregister_subdev(&resizer->resizer_b.subdev);
+ media_entity_cleanup(&resizer->crop_resizer.subdev.entity);
+ media_entity_cleanup(&resizer->resizer_a.subdev.entity);
+ media_entity_cleanup(&resizer->resizer_b.subdev.entity);
return ret;
}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index b88e1dd..d8ce20d 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -639,7 +639,8 @@ static int vpfe_probe(struct platform_device *pdev)
if (ret)
goto probe_free_dev_mem;
- if (vpfe_initialize_modules(vpfe_dev, pdev))
+ ret = vpfe_initialize_modules(vpfe_dev, pdev);
+ if (ret)
goto probe_disable_clock;
vpfe_dev->media_dev.dev = vpfe_dev->pdev;
@@ -663,7 +664,8 @@ static int vpfe_probe(struct platform_device *pdev)
/* set the driver data in platform device */
platform_set_drvdata(pdev, vpfe_dev);
/* register subdevs/entities */
- if (vpfe_register_entities(vpfe_dev))
+ ret = vpfe_register_entities(vpfe_dev);
+ if (ret)
goto probe_out_v4l2_unregister;
ret = vpfe_attach_irq(vpfe_dev);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index ba913f1..24d98a6 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -39,7 +39,7 @@ static struct media_entity *vpfe_get_input_entity
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct media_pad *remote;
- remote = media_entity_remote_source(&vpfe_dev->vpfe_isif.pads[0]);
+ remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
if (remote == NULL) {
pr_err("Invalid media connection to isif/ccdc\n");
return NULL;
@@ -56,7 +56,7 @@ static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video)
struct media_pad *remote;
int i;
- remote = media_entity_remote_source(&vpfe_dev->vpfe_isif.pads[0]);
+ remote = media_entity_remote_pad(&vpfe_dev->vpfe_isif.pads[0]);
if (remote == NULL) {
pr_err("Invalid media connection to isif/ccdc\n");
return -EINVAL;
@@ -89,7 +89,7 @@ static int vpfe_update_current_ext_subdev(struct vpfe_video_device *video)
static struct v4l2_subdev *
vpfe_video_remote_subdev(struct vpfe_video_device *video, u32 *pad)
{
- struct media_pad *remote = media_entity_remote_source(&video->pad);
+ struct media_pad *remote = media_entity_remote_pad(&video->pad);
if (remote == NULL || remote->entity->type != MEDIA_ENT_T_V4L2_SUBDEV)
return NULL;
@@ -114,7 +114,7 @@ __vpfe_video_get_format(struct vpfe_video_device *video,
return -EINVAL;
fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- remote = media_entity_remote_source(&video->pad);
+ remote = media_entity_remote_pad(&video->pad);
fmt.pad = remote->index;
ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
@@ -245,7 +245,7 @@ static int vpfe_video_validate_pipeline(struct vpfe_pipeline *pipe)
return -EPIPE;
/* Retrieve the source format */
- pad = media_entity_remote_source(pad);
+ pad = media_entity_remote_pad(pad);
if (pad == NULL ||
pad->entity->type != MEDIA_ENT_T_V4L2_SUBDEV)
break;
@@ -667,7 +667,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
return -EINVAL;
}
/* get the remote pad */
- remote = media_entity_remote_source(&video->pad);
+ remote = media_entity_remote_pad(&video->pad);
if (remote == NULL) {
v4l2_err(&vpfe_dev->v4l2_dev,
"invalid remote pad for video node\n");
@@ -1614,7 +1614,7 @@ int vpfe_video_register(struct vpfe_video_device *video,
void vpfe_video_unregister(struct vpfe_video_device *video)
{
if (video_is_registered(&video->video_dev)) {
- media_entity_cleanup(&video->video_dev.entity);
video_unregister_device(&video->video_dev);
+ media_entity_cleanup(&video->video_dev.entity);
}
}
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index c32e0ac..90d6ac4 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -829,7 +829,6 @@ static struct video_device dt3155_vdev = {
.minor = -1,
.release = video_device_release,
.tvnorms = DT3155_CURRENT_NORM,
- .current_norm = DT3155_CURRENT_NORM,
};
/* same as in drivers/base/dma-coherent.c */
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index 50066e0..46ed832 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -1124,7 +1124,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
case GO7007_BOARDID_LIFEVIEW_LR192:
printk(KERN_ERR "go7007-usb: The Lifeview TV Walker Ultra "
"is not supported. Sorry!\n");
- return 0;
+ return -ENODEV;
name = "Lifeview TV Walker Ultra";
board = &board_lifeview_lr192;
break;
@@ -1140,7 +1140,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
default:
printk(KERN_ERR "go7007-usb: unknown board ID %d!\n",
(unsigned int)id->driver_info);
- return 0;
+ return -ENODEV;
}
go = go7007_alloc(&board->main_info, &intf->dev);
diff --git a/drivers/staging/media/go7007/go7007.txt b/drivers/staging/media/go7007/go7007.txt
index fcb3e23..dc0026c 100644
--- a/drivers/staging/media/go7007/go7007.txt
+++ b/drivers/staging/media/go7007/go7007.txt
@@ -78,7 +78,6 @@ All vendor-built kernels should already be configured properly. However,
for custom-built kernels, the following options need to be enabled in the
kernel as built-in or modules:
- CONFIG_HOTPLUG - Support for hot-pluggable devices
CONFIG_MODULES - Enable loadable module support
CONFIG_KMOD - Automatic kernel module loading
CONFIG_FW_LOADER - Hotplug firmware loading support
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 2faa391..28c8b0b 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -240,10 +240,6 @@ static int unregister_from_lirc(struct igorplug *ir)
dprintk(DRIVER_NAME "[%d]: calling lirc_unregister_driver\n", devnum);
lirc_unregister_driver(d->minor);
- kfree(d);
- ir->d = NULL;
- kfree(ir);
-
return devnum;
}
@@ -377,20 +373,16 @@ static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
return -ENODATA;
}
-
-
static int igorplugusb_remote_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_device *dev = NULL;
+ struct usb_device *dev;
struct usb_host_interface *idesc = NULL;
struct usb_endpoint_descriptor *ep;
struct igorplug *ir = NULL;
struct lirc_driver *driver = NULL;
int devnum, pipe, maxp;
- int minor = 0;
char buf[63], name[128] = "";
- int mem_failure = 0;
int ret;
dprintk(DRIVER_NAME ": usb probe called.\n");
@@ -416,24 +408,18 @@ static int igorplugusb_remote_probe(struct usb_interface *intf,
dprintk(DRIVER_NAME "[%d]: bytes_in_key=%zu maxp=%d\n",
devnum, CODE_LENGTH, maxp);
- mem_failure = 0;
- ir = kzalloc(sizeof(struct igorplug), GFP_KERNEL);
- if (!ir) {
- mem_failure = 1;
- goto mem_failure_switch;
- }
- driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
- if (!driver) {
- mem_failure = 2;
- goto mem_failure_switch;
- }
+ ir = devm_kzalloc(&intf->dev, sizeof(*ir), GFP_KERNEL);
+ if (!ir)
+ return -ENOMEM;
+
+ driver = devm_kzalloc(&intf->dev, sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ return -ENOMEM;
ir->buf_in = usb_alloc_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
GFP_ATOMIC, &ir->dma_in);
- if (!ir->buf_in) {
- mem_failure = 3;
- goto mem_failure_switch;
- }
+ if (!ir->buf_in)
+ return -ENOMEM;
strcpy(driver->name, DRIVER_NAME " ");
driver->minor = -1;
@@ -449,27 +435,14 @@ static int igorplugusb_remote_probe(struct usb_interface *intf,
driver->dev = &intf->dev;
driver->owner = THIS_MODULE;
- minor = lirc_register_driver(driver);
- if (minor < 0)
- mem_failure = 9;
-
-mem_failure_switch:
-
- switch (mem_failure) {
- case 9:
+ ret = lirc_register_driver(driver);
+ if (ret < 0) {
usb_free_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
ir->buf_in, ir->dma_in);
- case 3:
- kfree(driver);
- case 2:
- kfree(ir);
- case 1:
- printk(DRIVER_NAME "[%d]: out of memory (code=%d)\n",
- devnum, mem_failure);
- return -ENOMEM;
+ return ret;
}
- driver->minor = minor;
+ driver->minor = ret;
ir->d = driver;
ir->devnum = devnum;
ir->usbdev = dev;
@@ -502,7 +475,6 @@ mem_failure_switch:
return 0;
}
-
static void igorplugusb_remote_disconnect(struct usb_interface *intf)
{
struct usb_device *usbdev = interface_to_usbdev(intf);
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 0a2c45d..4afa7da 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -911,8 +911,8 @@ static int imon_probe(struct usb_interface *interface,
if (retval) {
dev_err(dev, "%s: usb_submit_urb failed for intf0 (%d)\n",
__func__, retval);
- mutex_unlock(&context->ctx_lock);
- goto exit;
+ alloc_status = 8;
+ goto unlock;
}
usb_set_intfdata(interface, context);
@@ -937,6 +937,8 @@ unlock:
alloc_status_switch:
switch (alloc_status) {
+ case 8:
+ lirc_unregister_driver(driver->minor);
case 7:
usb_free_urb(tx_urb);
case 6:
@@ -959,7 +961,6 @@ alloc_status_switch:
retval = 0;
}
-exit:
mutex_unlock(&driver_lock);
return retval;
diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig
new file mode 100644
index 0000000..b94a95a
--- /dev/null
+++ b/drivers/staging/media/msi3101/Kconfig
@@ -0,0 +1,3 @@
+config USB_MSI3101
+ tristate "Mirics MSi3101 SDR Dongle"
+ depends on USB && VIDEO_DEV && VIDEO_V4L2
diff --git a/drivers/staging/media/msi3101/Makefile b/drivers/staging/media/msi3101/Makefile
new file mode 100644
index 0000000..3730654
--- /dev/null
+++ b/drivers/staging/media/msi3101/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_USB_MSI3101) += sdr-msi3101.o
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
new file mode 100644
index 0000000..24c7b70
--- /dev/null
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -0,0 +1,1931 @@
+/*
+ * Mirics MSi3101 SDR Dongle driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * That driver is somehow based of pwc driver:
+ * (C) 1999-2004 Nemosoft Unv.
+ * (C) 2004-2006 Luc Saillard (luc@saillard.org)
+ * (C) 2011 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Development tree of that driver will be on:
+ * http://git.linuxtv.org/anttip/media_tree.git/shortlog/refs/heads/mirics
+ *
+ * GNU Radio plugin "gr-kernel" for device usage will be on:
+ * http://git.linuxtv.org/anttip/gr-kernel.git
+ *
+ * TODO:
+ * Help is very highly welcome for these + all the others you could imagine:
+ * - split USB ADC interface and RF tuner to own drivers (msi2500 and msi001)
+ * - move controls to V4L2 API
+ * - use libv4l2 for stream format conversions
+ * - gr-kernel: switch to v4l2_mmap (current read eats a lot of cpu)
+ * - SDRSharp support
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gcd.h>
+#include <asm/div64.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <linux/usb.h>
+#include <media/videobuf2-vmalloc.h>
+
+struct msi3101_gain {
+ u8 tot:7;
+ u8 baseband:6;
+ bool lna:1;
+ bool mixer:1;
+};
+
+/* 60 – 120 MHz band, lna 24dB, mixer 19dB */
+static const struct msi3101_gain msi3101_gain_lut_120[] = {
+ { 0, 0, 0, 0},
+ { 1, 1, 0, 0},
+ { 2, 2, 0, 0},
+ { 3, 3, 0, 0},
+ { 4, 4, 0, 0},
+ { 5, 5, 0, 0},
+ { 6, 6, 0, 0},
+ { 7, 7, 0, 0},
+ { 8, 8, 0, 0},
+ { 9, 9, 0, 0},
+ { 10, 10, 0, 0},
+ { 11, 11, 0, 0},
+ { 12, 12, 0, 0},
+ { 13, 13, 0, 0},
+ { 14, 14, 0, 0},
+ { 15, 15, 0, 0},
+ { 16, 16, 0, 0},
+ { 17, 17, 0, 0},
+ { 18, 18, 0, 0},
+ { 19, 19, 0, 0},
+ { 20, 20, 0, 0},
+ { 21, 21, 0, 0},
+ { 22, 22, 0, 0},
+ { 23, 23, 0, 0},
+ { 24, 24, 0, 0},
+ { 25, 25, 0, 0},
+ { 26, 26, 0, 0},
+ { 27, 27, 0, 0},
+ { 28, 28, 0, 0},
+ { 29, 5, 1, 0},
+ { 30, 6, 1, 0},
+ { 31, 7, 1, 0},
+ { 32, 8, 1, 0},
+ { 33, 9, 1, 0},
+ { 34, 10, 1, 0},
+ { 35, 11, 1, 0},
+ { 36, 12, 1, 0},
+ { 37, 13, 1, 0},
+ { 38, 14, 1, 0},
+ { 39, 15, 1, 0},
+ { 40, 16, 1, 0},
+ { 41, 17, 1, 0},
+ { 42, 18, 1, 0},
+ { 43, 19, 1, 0},
+ { 44, 20, 1, 0},
+ { 45, 21, 1, 0},
+ { 46, 22, 1, 0},
+ { 47, 23, 1, 0},
+ { 48, 24, 1, 0},
+ { 49, 25, 1, 0},
+ { 50, 26, 1, 0},
+ { 51, 27, 1, 0},
+ { 52, 28, 1, 0},
+ { 53, 29, 1, 0},
+ { 54, 30, 1, 0},
+ { 55, 31, 1, 0},
+ { 56, 32, 1, 0},
+ { 57, 33, 1, 0},
+ { 58, 34, 1, 0},
+ { 59, 35, 1, 0},
+ { 60, 36, 1, 0},
+ { 61, 37, 1, 0},
+ { 62, 38, 1, 0},
+ { 63, 39, 1, 0},
+ { 64, 40, 1, 0},
+ { 65, 41, 1, 0},
+ { 66, 42, 1, 0},
+ { 67, 43, 1, 0},
+ { 68, 44, 1, 0},
+ { 69, 45, 1, 0},
+ { 70, 46, 1, 0},
+ { 71, 47, 1, 0},
+ { 72, 48, 1, 0},
+ { 73, 49, 1, 0},
+ { 74, 50, 1, 0},
+ { 75, 51, 1, 0},
+ { 76, 52, 1, 0},
+ { 77, 53, 1, 0},
+ { 78, 54, 1, 0},
+ { 79, 55, 1, 0},
+ { 80, 56, 1, 0},
+ { 81, 57, 1, 0},
+ { 82, 58, 1, 0},
+ { 83, 40, 1, 1},
+ { 84, 41, 1, 1},
+ { 85, 42, 1, 1},
+ { 86, 43, 1, 1},
+ { 87, 44, 1, 1},
+ { 88, 45, 1, 1},
+ { 89, 46, 1, 1},
+ { 90, 47, 1, 1},
+ { 91, 48, 1, 1},
+ { 92, 49, 1, 1},
+ { 93, 50, 1, 1},
+ { 94, 51, 1, 1},
+ { 95, 52, 1, 1},
+ { 96, 53, 1, 1},
+ { 97, 54, 1, 1},
+ { 98, 55, 1, 1},
+ { 99, 56, 1, 1},
+ {100, 57, 1, 1},
+ {101, 58, 1, 1},
+ {102, 59, 1, 1},
+};
+
+/* 120 – 245 MHz band, lna 24dB, mixer 19dB */
+static const struct msi3101_gain msi3101_gain_lut_245[] = {
+ { 0, 0, 0, 0},
+ { 1, 1, 0, 0},
+ { 2, 2, 0, 0},
+ { 3, 3, 0, 0},
+ { 4, 4, 0, 0},
+ { 5, 5, 0, 0},
+ { 6, 6, 0, 0},
+ { 7, 7, 0, 0},
+ { 8, 8, 0, 0},
+ { 9, 9, 0, 0},
+ { 10, 10, 0, 0},
+ { 11, 11, 0, 0},
+ { 12, 12, 0, 0},
+ { 13, 13, 0, 0},
+ { 14, 14, 0, 0},
+ { 15, 15, 0, 0},
+ { 16, 16, 0, 0},
+ { 17, 17, 0, 0},
+ { 18, 18, 0, 0},
+ { 19, 19, 0, 0},
+ { 20, 20, 0, 0},
+ { 21, 21, 0, 0},
+ { 22, 22, 0, 0},
+ { 23, 23, 0, 0},
+ { 24, 24, 0, 0},
+ { 25, 25, 0, 0},
+ { 26, 26, 0, 0},
+ { 27, 27, 0, 0},
+ { 28, 28, 0, 0},
+ { 29, 5, 1, 0},
+ { 30, 6, 1, 0},
+ { 31, 7, 1, 0},
+ { 32, 8, 1, 0},
+ { 33, 9, 1, 0},
+ { 34, 10, 1, 0},
+ { 35, 11, 1, 0},
+ { 36, 12, 1, 0},
+ { 37, 13, 1, 0},
+ { 38, 14, 1, 0},
+ { 39, 15, 1, 0},
+ { 40, 16, 1, 0},
+ { 41, 17, 1, 0},
+ { 42, 18, 1, 0},
+ { 43, 19, 1, 0},
+ { 44, 20, 1, 0},
+ { 45, 21, 1, 0},
+ { 46, 22, 1, 0},
+ { 47, 23, 1, 0},
+ { 48, 24, 1, 0},
+ { 49, 25, 1, 0},
+ { 50, 26, 1, 0},
+ { 51, 27, 1, 0},
+ { 52, 28, 1, 0},
+ { 53, 29, 1, 0},
+ { 54, 30, 1, 0},
+ { 55, 31, 1, 0},
+ { 56, 32, 1, 0},
+ { 57, 33, 1, 0},
+ { 58, 34, 1, 0},
+ { 59, 35, 1, 0},
+ { 60, 36, 1, 0},
+ { 61, 37, 1, 0},
+ { 62, 38, 1, 0},
+ { 63, 39, 1, 0},
+ { 64, 40, 1, 0},
+ { 65, 41, 1, 0},
+ { 66, 42, 1, 0},
+ { 67, 43, 1, 0},
+ { 68, 44, 1, 0},
+ { 69, 45, 1, 0},
+ { 70, 46, 1, 0},
+ { 71, 47, 1, 0},
+ { 72, 48, 1, 0},
+ { 73, 49, 1, 0},
+ { 74, 50, 1, 0},
+ { 75, 51, 1, 0},
+ { 76, 52, 1, 0},
+ { 77, 53, 1, 0},
+ { 78, 54, 1, 0},
+ { 79, 55, 1, 0},
+ { 80, 56, 1, 0},
+ { 81, 57, 1, 0},
+ { 82, 58, 1, 0},
+ { 83, 40, 1, 1},
+ { 84, 41, 1, 1},
+ { 85, 42, 1, 1},
+ { 86, 43, 1, 1},
+ { 87, 44, 1, 1},
+ { 88, 45, 1, 1},
+ { 89, 46, 1, 1},
+ { 90, 47, 1, 1},
+ { 91, 48, 1, 1},
+ { 92, 49, 1, 1},
+ { 93, 50, 1, 1},
+ { 94, 51, 1, 1},
+ { 95, 52, 1, 1},
+ { 96, 53, 1, 1},
+ { 97, 54, 1, 1},
+ { 98, 55, 1, 1},
+ { 99, 56, 1, 1},
+ {100, 57, 1, 1},
+ {101, 58, 1, 1},
+ {102, 59, 1, 1},
+};
+
+/* 420 – 1000 MHz band, lna 7dB, mixer 19dB */
+static const struct msi3101_gain msi3101_gain_lut_1000[] = {
+ { 0, 0, 0, 0},
+ { 1, 1, 0, 0},
+ { 2, 2, 0, 0},
+ { 3, 3, 0, 0},
+ { 4, 4, 0, 0},
+ { 5, 5, 0, 0},
+ { 6, 6, 0, 0},
+ { 7, 7, 0, 0},
+ { 8, 8, 0, 0},
+ { 9, 9, 0, 0},
+ { 10, 10, 0, 0},
+ { 11, 11, 0, 0},
+ { 12, 5, 1, 0},
+ { 13, 6, 1, 0},
+ { 14, 7, 1, 0},
+ { 15, 8, 1, 0},
+ { 16, 9, 1, 0},
+ { 17, 10, 1, 0},
+ { 18, 11, 1, 0},
+ { 19, 12, 1, 0},
+ { 20, 13, 1, 0},
+ { 21, 14, 1, 0},
+ { 22, 15, 1, 0},
+ { 23, 16, 1, 0},
+ { 24, 17, 1, 0},
+ { 25, 18, 1, 0},
+ { 26, 19, 1, 0},
+ { 27, 20, 1, 0},
+ { 28, 21, 1, 0},
+ { 29, 22, 1, 0},
+ { 30, 23, 1, 0},
+ { 31, 24, 1, 0},
+ { 32, 25, 1, 0},
+ { 33, 26, 1, 0},
+ { 34, 27, 1, 0},
+ { 35, 28, 1, 0},
+ { 36, 29, 1, 0},
+ { 37, 30, 1, 0},
+ { 38, 31, 1, 0},
+ { 39, 32, 1, 0},
+ { 40, 33, 1, 0},
+ { 41, 34, 1, 0},
+ { 42, 35, 1, 0},
+ { 43, 36, 1, 0},
+ { 44, 37, 1, 0},
+ { 45, 38, 1, 0},
+ { 46, 39, 1, 0},
+ { 47, 40, 1, 0},
+ { 48, 41, 1, 0},
+ { 49, 42, 1, 0},
+ { 50, 43, 1, 0},
+ { 51, 44, 1, 0},
+ { 52, 45, 1, 0},
+ { 53, 46, 1, 0},
+ { 54, 47, 1, 0},
+ { 55, 48, 1, 0},
+ { 56, 49, 1, 0},
+ { 57, 50, 1, 0},
+ { 58, 51, 1, 0},
+ { 59, 52, 1, 0},
+ { 60, 53, 1, 0},
+ { 61, 54, 1, 0},
+ { 62, 55, 1, 0},
+ { 63, 56, 1, 0},
+ { 64, 57, 1, 0},
+ { 65, 58, 1, 0},
+ { 66, 40, 1, 1},
+ { 67, 41, 1, 1},
+ { 68, 42, 1, 1},
+ { 69, 43, 1, 1},
+ { 70, 44, 1, 1},
+ { 71, 45, 1, 1},
+ { 72, 46, 1, 1},
+ { 73, 47, 1, 1},
+ { 74, 48, 1, 1},
+ { 75, 49, 1, 1},
+ { 76, 50, 1, 1},
+ { 77, 51, 1, 1},
+ { 78, 52, 1, 1},
+ { 79, 53, 1, 1},
+ { 80, 54, 1, 1},
+ { 81, 55, 1, 1},
+ { 82, 56, 1, 1},
+ { 83, 57, 1, 1},
+ { 84, 58, 1, 1},
+ { 85, 59, 1, 1},
+};
+
+/*
+ * iConfiguration 0
+ * bInterfaceNumber 0
+ * bAlternateSetting 1
+ * bNumEndpoints 1
+ * bEndpointAddress 0x81 EP 1 IN
+ * bmAttributes 1
+ * Transfer Type Isochronous
+ * wMaxPacketSize 0x1400 3x 1024 bytes
+ * bInterval 1
+ */
+#define MAX_ISO_BUFS (8)
+#define ISO_FRAMES_PER_DESC (8)
+#define ISO_MAX_FRAME_SIZE (3 * 1024)
+#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE)
+#define MAX_ISOC_ERRORS 20
+
+/* TODO: These should be moved to V4L2 API */
+#define MSI3101_CID_SAMPLING_MODE ((V4L2_CID_USER_BASE | 0xf000) + 0)
+#define MSI3101_CID_SAMPLING_RATE ((V4L2_CID_USER_BASE | 0xf000) + 1)
+#define MSI3101_CID_SAMPLING_RESOLUTION ((V4L2_CID_USER_BASE | 0xf000) + 2)
+#define MSI3101_CID_TUNER_RF ((V4L2_CID_USER_BASE | 0xf000) + 10)
+#define MSI3101_CID_TUNER_BW ((V4L2_CID_USER_BASE | 0xf000) + 11)
+#define MSI3101_CID_TUNER_IF ((V4L2_CID_USER_BASE | 0xf000) + 12)
+#define MSI3101_CID_TUNER_GAIN ((V4L2_CID_USER_BASE | 0xf000) + 13)
+
+/* intermediate buffers with raw data from the USB device */
+struct msi3101_frame_buf {
+ struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ struct list_head list;
+};
+
+struct msi3101_state {
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+
+ /* videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+
+ /* Note if taking both locks v4l2_lock must always be locked first! */
+ struct mutex v4l2_lock; /* Protects everything else */
+ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */
+
+ /* Pointer to our usb_device, will be NULL after unplug */
+ struct usb_device *udev; /* Both mutexes most be hold when setting! */
+
+ unsigned int isoc_errors; /* number of contiguous ISOC errors */
+ unsigned int vb_full; /* vb is full and packets dropped */
+
+ struct urb *urbs[MAX_ISO_BUFS];
+ int (*convert_stream) (struct msi3101_state *s, u32 *dst, u8 *src,
+ unsigned int src_len);
+
+ /* Controls */
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_sampling_rate;
+ struct v4l2_ctrl *ctrl_tuner_rf;
+ struct v4l2_ctrl *ctrl_tuner_bw;
+ struct v4l2_ctrl *ctrl_tuner_if;
+ struct v4l2_ctrl *ctrl_tuner_gain;
+
+ u32 next_sample; /* for track lost packets */
+ u32 sample; /* for sample rate calc */
+ unsigned long jiffies;
+ unsigned int sample_ctrl_bit[4];
+};
+
+/* Private functions */
+static struct msi3101_frame_buf *msi3101_get_next_fill_buf(
+ struct msi3101_state *s)
+{
+ unsigned long flags = 0;
+ struct msi3101_frame_buf *buf = NULL;
+
+ spin_lock_irqsave(&s->queued_bufs_lock, flags);
+ if (list_empty(&s->queued_bufs))
+ goto leave;
+
+ buf = list_entry(s->queued_bufs.next, struct msi3101_frame_buf, list);
+ list_del(&buf->list);
+leave:
+ spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+ return buf;
+}
+
+/*
+ * +===========================================================================
+ * | 00-1023 | USB packet type '384'
+ * +===========================================================================
+ * | 00- 03 | sequence number of first sample in that USB packet
+ * +---------------------------------------------------------------------------
+ * | 04- 15 | garbage
+ * +---------------------------------------------------------------------------
+ * | 16- 175 | samples
+ * +---------------------------------------------------------------------------
+ * | 176- 179 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 180- 339 | samples
+ * +---------------------------------------------------------------------------
+ * | 340- 343 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 344- 503 | samples
+ * +---------------------------------------------------------------------------
+ * | 504- 507 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 508- 667 | samples
+ * +---------------------------------------------------------------------------
+ * | 668- 671 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 672- 831 | samples
+ * +---------------------------------------------------------------------------
+ * | 832- 835 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 836- 995 | samples
+ * +---------------------------------------------------------------------------
+ * | 996- 999 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 1000-1023 | garbage
+ * +---------------------------------------------------------------------------
+ *
+ * Bytes 4 - 7 could have some meaning?
+ *
+ * Control bits for previous samples is 32-bit field, containing 16 x 2-bit
+ * numbers. This results one 2-bit number for 8 samples. It is likely used for
+ * for bit shifting sample by given bits, increasing actual sampling resolution.
+ * Number 2 (0b10) was never seen.
+ *
+ * 6 * 16 * 2 * 4 = 768 samples. 768 * 4 = 3072 bytes
+ */
+
+/*
+ * Integer to 32-bit IEEE floating point representation routine is taken
+ * from Radeon R600 driver (drivers/gpu/drm/radeon/r600_blit_kms.c).
+ *
+ * TODO: Currently we do conversion here in Kernel, but in future that will
+ * be moved to the libv4l2 library as video format conversions are.
+ */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts signed 8-bit integer into 32-bit IEEE floating point
+ * representation.
+ */
+static u32 msi3101_convert_sample_504(struct msi3101_state *s, u16 x)
+{
+ u32 msb, exponent, fraction, sign;
+
+ /* Zero is special */
+ if (!x)
+ return 0;
+
+ /* Negative / positive value */
+ if (x & (1 << 7)) {
+ x = -x;
+ x &= 0x7f; /* result is 7 bit ... + sign */
+ sign = 1 << 31;
+ } else {
+ sign = 0 << 31;
+ }
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return (fraction + exponent) | sign;
+}
+
+static int msi3101_convert_stream_504(struct msi3101_state *s, u32 *dst,
+ u8 *src, unsigned int src_len)
+{
+ int i, j, i_max, dst_len = 0;
+ u16 sample[2];
+ u32 sample_num[3];
+
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
+
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
+
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+
+ src += 16;
+ for (j = 0; j < 1008; j += 2) {
+ sample[0] = src[j + 0];
+ sample[1] = src[j + 1];
+
+ *dst++ = msi3101_convert_sample_504(s, sample[0]);
+ *dst++ = msi3101_convert_sample_504(s, sample[1]);
+ }
+ /* 504 x I+Q 32bit float samples */
+ dst_len += 504 * 2 * 4;
+ src += 1008;
+ }
+
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
+ }
+
+ /* next sample (sample = sample + i * 504) */
+ s->next_sample = sample_num[i_max - 1] + 504;
+
+ return dst_len;
+}
+
+/*
+ * Converts signed ~10+2-bit integer into 32-bit IEEE floating point
+ * representation.
+ */
+static u32 msi3101_convert_sample_384(struct msi3101_state *s, u16 x, int shift)
+{
+ u32 msb, exponent, fraction, sign;
+ s->sample_ctrl_bit[shift]++;
+
+ /* Zero is special */
+ if (!x)
+ return 0;
+
+ if (shift == 3)
+ shift = 2;
+
+ /* Convert 10-bit two's complement to 12-bit */
+ if (x & (1 << 9)) {
+ x |= ~0U << 10; /* set all the rest bits to one */
+ x <<= shift;
+ x = -x;
+ x &= 0x7ff; /* result is 11 bit ... + sign */
+ sign = 1 << 31;
+ } else {
+ x <<= shift;
+ sign = 0 << 31;
+ }
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return (fraction + exponent) | sign;
+}
+
+static int msi3101_convert_stream_384(struct msi3101_state *s, u32 *dst,
+ u8 *src, unsigned int src_len)
+{
+ int i, j, k, l, i_max, dst_len = 0;
+ u16 sample[4];
+ u32 bits;
+ u32 sample_num[3];
+
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
+
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%*ph %*ph\n", 12, &src[4], 24, &src[1000]);
+
+ src += 16;
+ for (j = 0; j < 6; j++) {
+ bits = src[160 + 3] << 24 | src[160 + 2] << 16 | src[160 + 1] << 8 | src[160 + 0] << 0;
+ for (k = 0; k < 16; k++) {
+ for (l = 0; l < 10; l += 5) {
+ sample[0] = (src[l + 0] & 0xff) >> 0 | (src[l + 1] & 0x03) << 8;
+ sample[1] = (src[l + 1] & 0xfc) >> 2 | (src[l + 2] & 0x0f) << 6;
+ sample[2] = (src[l + 2] & 0xf0) >> 4 | (src[l + 3] & 0x3f) << 4;
+ sample[3] = (src[l + 3] & 0xc0) >> 6 | (src[l + 4] & 0xff) << 2;
+
+ *dst++ = msi3101_convert_sample_384(s, sample[0], (bits >> (2 * k)) & 0x3);
+ *dst++ = msi3101_convert_sample_384(s, sample[1], (bits >> (2 * k)) & 0x3);
+ *dst++ = msi3101_convert_sample_384(s, sample[2], (bits >> (2 * k)) & 0x3);
+ *dst++ = msi3101_convert_sample_384(s, sample[3], (bits >> (2 * k)) & 0x3);
+ }
+ src += 10;
+ }
+ dev_dbg_ratelimited(&s->udev->dev,
+ "sample control bits %08x\n", bits);
+ src += 4;
+ }
+ /* 384 x I+Q 32bit float samples */
+ dst_len += 384 * 2 * 4;
+ src += 24;
+ }
+
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu bits=%d.%d.%d.%d\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs,
+ s->sample_ctrl_bit[0], s->sample_ctrl_bit[1],
+ s->sample_ctrl_bit[2], s->sample_ctrl_bit[3]);
+ }
+
+ /* next sample (sample = sample + i * 384) */
+ s->next_sample = sample_num[i_max - 1] + 384;
+
+ return dst_len;
+}
+
+/*
+ * Converts signed 12-bit integer into 32-bit IEEE floating point
+ * representation.
+ */
+static u32 msi3101_convert_sample_336(struct msi3101_state *s, u16 x)
+{
+ u32 msb, exponent, fraction, sign;
+
+ /* Zero is special */
+ if (!x)
+ return 0;
+
+ /* Negative / positive value */
+ if (x & (1 << 11)) {
+ x = -x;
+ x &= 0x7ff; /* result is 11 bit ... + sign */
+ sign = 1 << 31;
+ } else {
+ sign = 0 << 31;
+ }
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return (fraction + exponent) | sign;
+}
+
+static int msi3101_convert_stream_336(struct msi3101_state *s, u32 *dst,
+ u8 *src, unsigned int src_len)
+{
+ int i, j, i_max, dst_len = 0;
+ u16 sample[2];
+ u32 sample_num[3];
+
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
+
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
+
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+
+ src += 16;
+ for (j = 0; j < 1008; j += 3) {
+ sample[0] = (src[j + 0] & 0xff) >> 0 | (src[j + 1] & 0x0f) << 8;
+ sample[1] = (src[j + 1] & 0xf0) >> 4 | (src[j + 2] & 0xff) << 4;
+
+ *dst++ = msi3101_convert_sample_336(s, sample[0]);
+ *dst++ = msi3101_convert_sample_336(s, sample[1]);
+ }
+ /* 336 x I+Q 32bit float samples */
+ dst_len += 336 * 2 * 4;
+ src += 1008;
+ }
+
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
+ }
+
+ /* next sample (sample = sample + i * 336) */
+ s->next_sample = sample_num[i_max - 1] + 336;
+
+ return dst_len;
+}
+
+/*
+ * Converts signed 14-bit integer into 32-bit IEEE floating point
+ * representation.
+ */
+static u32 msi3101_convert_sample_252(struct msi3101_state *s, u16 x)
+{
+ u32 msb, exponent, fraction, sign;
+
+ /* Zero is special */
+ if (!x)
+ return 0;
+
+ /* Negative / positive value */
+ if (x & (1 << 13)) {
+ x = -x;
+ x &= 0x1fff; /* result is 13 bit ... + sign */
+ sign = 1 << 31;
+ } else {
+ sign = 0 << 31;
+ }
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return (fraction + exponent) | sign;
+}
+
+static int msi3101_convert_stream_252(struct msi3101_state *s, u32 *dst,
+ u8 *src, unsigned int src_len)
+{
+ int i, j, i_max, dst_len = 0;
+ u16 sample[2];
+ u32 sample_num[3];
+
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
+
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
+
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+
+ src += 16;
+ for (j = 0; j < 1008; j += 4) {
+ sample[0] = src[j + 0] >> 0 | src[j + 1] << 8;
+ sample[1] = src[j + 2] >> 0 | src[j + 3] << 8;
+
+ *dst++ = msi3101_convert_sample_252(s, sample[0]);
+ *dst++ = msi3101_convert_sample_252(s, sample[1]);
+ }
+ /* 252 x I+Q 32bit float samples */
+ dst_len += 252 * 2 * 4;
+ src += 1008;
+ }
+
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
+ }
+
+ /* next sample (sample = sample + i * 252) */
+ s->next_sample = sample_num[i_max - 1] + 252;
+
+ return dst_len;
+}
+
+/*
+ * This gets called for the Isochronous pipe (stream). This is done in interrupt
+ * time, so it has to be fast, not crash, and not stall. Neat.
+ */
+static void msi3101_isoc_handler(struct urb *urb)
+{
+ struct msi3101_state *s = (struct msi3101_state *)urb->context;
+ int i, flen, fstatus;
+ unsigned char *iso_buf = NULL;
+ struct msi3101_frame_buf *fbuf;
+
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN) {
+ dev_dbg(&s->udev->dev, "URB (%p) unlinked %ssynchronuously\n",
+ urb, urb->status == -ENOENT ? "" : "a");
+ return;
+ }
+
+ if (urb->status != 0) {
+ dev_dbg(&s->udev->dev,
+ "msi3101_isoc_handler() called with status %d\n",
+ urb->status);
+ /* Give up after a number of contiguous errors */
+ if (++s->isoc_errors > MAX_ISOC_ERRORS)
+ dev_dbg(&s->udev->dev,
+ "Too many ISOC errors, bailing out\n");
+ goto handler_end;
+ } else {
+ /* Reset ISOC error counter. We did get here, after all. */
+ s->isoc_errors = 0;
+ }
+
+ /* Compact data */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ void *ptr;
+
+ /* Check frame error */
+ fstatus = urb->iso_frame_desc[i].status;
+ if (fstatus) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "frame=%d/%d has error %d skipping\n",
+ i, urb->number_of_packets, fstatus);
+ goto skip;
+ }
+
+ /* Check if that frame contains data */
+ flen = urb->iso_frame_desc[i].actual_length;
+ if (flen == 0)
+ goto skip;
+
+ iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+
+ /* Get free framebuffer */
+ fbuf = msi3101_get_next_fill_buf(s);
+ if (fbuf == NULL) {
+ s->vb_full++;
+ dev_dbg_ratelimited(&s->udev->dev,
+ "videobuf is full, %d packets dropped\n",
+ s->vb_full);
+ goto skip;
+ }
+
+ /* fill framebuffer */
+ ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+ flen = s->convert_stream(s, ptr, iso_buf, flen);
+ vb2_set_plane_payload(&fbuf->vb, 0, flen);
+ vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+skip:
+ ;
+ }
+
+handler_end:
+ i = usb_submit_urb(urb, GFP_ATOMIC);
+ if (i != 0)
+ dev_dbg(&s->udev->dev,
+ "Error (%d) re-submitting urb in msi3101_isoc_handler\n",
+ i);
+}
+
+static void msi3101_iso_stop(struct msi3101_state *s)
+{
+ int i;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ /* Unlinking ISOC buffers one by one */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ if (s->urbs[i]) {
+ dev_dbg(&s->udev->dev, "Unlinking URB %p\n",
+ s->urbs[i]);
+ usb_kill_urb(s->urbs[i]);
+ }
+ }
+}
+
+static void msi3101_iso_free(struct msi3101_state *s)
+{
+ int i;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ /* Freeing ISOC buffers one by one */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ if (s->urbs[i]) {
+ dev_dbg(&s->udev->dev, "Freeing URB\n");
+ if (s->urbs[i]->transfer_buffer) {
+ usb_free_coherent(s->udev,
+ s->urbs[i]->transfer_buffer_length,
+ s->urbs[i]->transfer_buffer,
+ s->urbs[i]->transfer_dma);
+ }
+ usb_free_urb(s->urbs[i]);
+ s->urbs[i] = NULL;
+ }
+ }
+}
+
+/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
+static void msi3101_isoc_cleanup(struct msi3101_state *s)
+{
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ msi3101_iso_stop(s);
+ msi3101_iso_free(s);
+}
+
+/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
+static int msi3101_isoc_init(struct msi3101_state *s)
+{
+ struct usb_device *udev;
+ struct urb *urb;
+ int i, j, ret;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ s->isoc_errors = 0;
+ udev = s->udev;
+
+ ret = usb_set_interface(s->udev, 0, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Allocate and init Isochronuous urbs */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
+ if (urb == NULL) {
+ dev_err(&s->udev->dev,
+ "Failed to allocate urb %d\n", i);
+ msi3101_isoc_cleanup(s);
+ return -ENOMEM;
+ }
+ s->urbs[i] = urb;
+ dev_dbg(&s->udev->dev, "Allocated URB at 0x%p\n", urb);
+
+ urb->interval = 1;
+ urb->dev = udev;
+ urb->pipe = usb_rcvisocpipe(udev, 0x81);
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_buffer = usb_alloc_coherent(udev, ISO_BUFFER_SIZE,
+ GFP_KERNEL, &urb->transfer_dma);
+ if (urb->transfer_buffer == NULL) {
+ dev_err(&s->udev->dev,
+ "Failed to allocate urb buffer %d\n",
+ i);
+ msi3101_isoc_cleanup(s);
+ return -ENOMEM;
+ }
+ urb->transfer_buffer_length = ISO_BUFFER_SIZE;
+ urb->complete = msi3101_isoc_handler;
+ urb->context = s;
+ urb->start_frame = 0;
+ urb->number_of_packets = ISO_FRAMES_PER_DESC;
+ for (j = 0; j < ISO_FRAMES_PER_DESC; j++) {
+ urb->iso_frame_desc[j].offset = j * ISO_MAX_FRAME_SIZE;
+ urb->iso_frame_desc[j].length = ISO_MAX_FRAME_SIZE;
+ }
+ }
+
+ /* link */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ ret = usb_submit_urb(s->urbs[i], GFP_KERNEL);
+ if (ret) {
+ dev_err(&s->udev->dev,
+ "isoc_init() submit_urb %d failed with error %d\n",
+ i, ret);
+ msi3101_isoc_cleanup(s);
+ return ret;
+ }
+ dev_dbg(&s->udev->dev, "URB 0x%p submitted.\n", s->urbs[i]);
+ }
+
+ /* All is done... */
+ return 0;
+}
+
+/* Must be called with vb_queue_lock hold */
+static void msi3101_cleanup_queued_bufs(struct msi3101_state *s)
+{
+ unsigned long flags = 0;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ spin_lock_irqsave(&s->queued_bufs_lock, flags);
+ while (!list_empty(&s->queued_bufs)) {
+ struct msi3101_frame_buf *buf;
+
+ buf = list_entry(s->queued_bufs.next, struct msi3101_frame_buf,
+ list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+}
+
+/* The user yanked out the cable... */
+static void msi3101_disconnect(struct usb_interface *intf)
+{
+ struct v4l2_device *v = usb_get_intfdata(intf);
+ struct msi3101_state *s =
+ container_of(v, struct msi3101_state, v4l2_dev);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ mutex_lock(&s->vb_queue_lock);
+ mutex_lock(&s->v4l2_lock);
+ /* No need to keep the urbs around after disconnection */
+ s->udev = NULL;
+
+ v4l2_device_disconnect(&s->v4l2_dev);
+ video_unregister_device(&s->vdev);
+ mutex_unlock(&s->v4l2_lock);
+ mutex_unlock(&s->vb_queue_lock);
+
+ v4l2_device_put(&s->v4l2_dev);
+}
+
+static int msi3101_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, s->vdev.name, sizeof(cap->card));
+ usb_make_path(s->udev, cap->bus_info, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->device_caps = V4L2_CAP_TUNER;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+
+/* Videobuf2 operations */
+static int msi3101_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vq);
+ dev_dbg(&s->udev->dev, "%s: *nbuffers=%d\n", __func__, *nbuffers);
+
+ /* Absolute min and max number of buffers available for mmap() */
+ *nbuffers = 32;
+ *nplanes = 1;
+ sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */
+ dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
+ __func__, *nbuffers, sizes[0]);
+ return 0;
+}
+
+static int msi3101_buf_prepare(struct vb2_buffer *vb)
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* Don't allow queing new buffers after device disconnection */
+ if (!s->udev)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void msi3101_buf_queue(struct vb2_buffer *vb)
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vb->vb2_queue);
+ struct msi3101_frame_buf *buf =
+ container_of(vb, struct msi3101_frame_buf, vb);
+ unsigned long flags = 0;
+
+ /* Check the device has not disconnected between prep and queuing */
+ if (!s->udev) {
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ spin_lock_irqsave(&s->queued_bufs_lock, flags);
+ list_add_tail(&buf->list, &s->queued_bufs);
+ spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+}
+
+#define CMD_WREG 0x41
+#define CMD_START_STREAMING 0x43
+#define CMD_STOP_STREAMING 0x45
+#define CMD_READ_UNKNOW 0x48
+
+#define msi3101_dbg_usb_control_msg(udev, r, t, v, _i, b, l) { \
+ char *direction; \
+ if (t == (USB_TYPE_VENDOR | USB_DIR_OUT)) \
+ direction = ">>>"; \
+ else \
+ direction = "<<<"; \
+ dev_dbg(&udev->dev, "%s: %02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%s %*ph\n", __func__, t, r, v & 0xff, v >> 8, \
+ _i & 0xff, _i >> 8, l & 0xff, l >> 8, direction, l, b); \
+}
+
+static int msi3101_ctrl_msg(struct msi3101_state *s, u8 cmd, u32 data)
+{
+ int ret;
+ u8 request = cmd;
+ u8 requesttype = USB_DIR_OUT | USB_TYPE_VENDOR;
+ u16 value = (data >> 0) & 0xffff;
+ u16 index = (data >> 16) & 0xffff;
+
+ msi3101_dbg_usb_control_msg(s->udev,
+ request, requesttype, value, index, NULL, 0);
+
+ ret = usb_control_msg(s->udev, usb_sndctrlpipe(s->udev, 0),
+ request, requesttype, value, index, NULL, 0, 2000);
+
+ if (ret)
+ dev_err(&s->udev->dev, "%s: failed %d, cmd %02x, data %04x\n",
+ __func__, ret, cmd, data);
+
+ return ret;
+};
+
+static int msi3101_tuner_write(struct msi3101_state *s, u32 data)
+{
+ return msi3101_ctrl_msg(s, CMD_WREG, data << 8 | 0x09);
+};
+
+#define F_REF 24000000
+#define DIV_R_IN 2
+static int msi3101_set_usb_adc(struct msi3101_state *s)
+{
+ int ret, div_n, div_m, div_r_out, f_sr, f_vco, fract;
+ u32 reg3, reg4, reg7;
+
+ f_sr = s->ctrl_sampling_rate->val64;
+
+ /* select stream format */
+ if (f_sr < 6000000) {
+ s->convert_stream = msi3101_convert_stream_252;
+ reg7 = 0x00009407;
+ } else if (f_sr < 8000000) {
+ s->convert_stream = msi3101_convert_stream_336;
+ reg7 = 0x00008507;
+ } else if (f_sr < 9000000) {
+ s->convert_stream = msi3101_convert_stream_384;
+ reg7 = 0x0000a507;
+ } else {
+ s->convert_stream = msi3101_convert_stream_504;
+ reg7 = 0x000c9407;
+ }
+
+ /*
+ * Synthesizer config is just a educated guess...
+ *
+ * [7:0] 0x03, register address
+ * [8] 1, always
+ * [9] ?
+ * [12:10] output divider
+ * [13] 0 ?
+ * [14] 0 ?
+ * [15] fractional MSB, bit 20
+ * [16:19] N
+ * [23:20] ?
+ * [24:31] 0x01
+ *
+ * output divider
+ * val div
+ * 0 - (invalid)
+ * 1 4
+ * 2 6
+ * 3 8
+ * 4 10
+ * 5 12
+ * 6 14
+ * 7 16
+ *
+ * VCO 202000000 - 720000000++
+ */
+ reg3 = 0x01000303;
+ reg4 = 0x00000004;
+
+ /* XXX: Filters? AGC? */
+ if (f_sr < 6000000)
+ reg3 |= 0x1 << 20;
+ else if (f_sr < 7000000)
+ reg3 |= 0x5 << 20;
+ else if (f_sr < 8500000)
+ reg3 |= 0x9 << 20;
+ else
+ reg3 |= 0xd << 20;
+
+ for (div_r_out = 4; div_r_out < 16; div_r_out += 2) {
+ f_vco = f_sr * div_r_out * 12;
+ dev_dbg(&s->udev->dev, "%s: div_r_out=%d f_vco=%d\n",
+ __func__, div_r_out, f_vco);
+ if (f_vco >= 202000000)
+ break;
+ }
+
+ div_n = f_vco / (F_REF * DIV_R_IN);
+ div_m = f_vco % (F_REF * DIV_R_IN);
+ fract = 0x200000ul * div_m / (F_REF * DIV_R_IN);
+
+ reg3 |= div_n << 16;
+ reg3 |= (div_r_out / 2 - 1) << 10;
+ reg3 |= ((fract >> 20) & 0x000001) << 15; /* [20] */
+ reg4 |= ((fract >> 0) & 0x0fffff) << 8; /* [19:0] */
+
+ dev_dbg(&s->udev->dev,
+ "%s: f_sr=%d f_vco=%d div_n=%d div_m=%d div_r_out=%d reg3=%08x reg4=%08x\n",
+ __func__, f_sr, f_vco, div_n, div_m, div_r_out, reg3, reg4);
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00608008);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00000c05);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00020000);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00480102);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00f38008);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, reg7);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, reg4);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, reg3);
+ if (ret)
+ goto err;
+err:
+ return ret;
+};
+
+static int msi3101_set_tuner(struct msi3101_state *s)
+{
+ int ret, i, len;
+ unsigned int n, m, thresh, frac, vco_step, tmp, f_if1;
+ u32 reg;
+ u64 f_vco, tmp64;
+ u8 mode, filter_mode, lo_div;
+ const struct msi3101_gain *gain_lut;
+ static const struct {
+ u32 rf;
+ u8 mode;
+ u8 lo_div;
+ } band_lut[] = {
+ { 50000000, 0xe1, 16}, /* AM_MODE2, antenna 2 */
+ {108000000, 0x42, 32}, /* VHF_MODE */
+ {330000000, 0x44, 16}, /* B3_MODE */
+ {960000000, 0x48, 4}, /* B45_MODE */
+ { ~0U, 0x50, 2}, /* BL_MODE */
+ };
+ static const struct {
+ u32 freq;
+ u8 filter_mode;
+ } if_freq_lut[] = {
+ { 0, 0x03}, /* Zero IF */
+ { 450000, 0x02}, /* 450 kHz IF */
+ {1620000, 0x01}, /* 1.62 MHz IF */
+ {2048000, 0x00}, /* 2.048 MHz IF */
+ };
+ static const struct {
+ u32 freq;
+ u8 val;
+ } bandwidth_lut[] = {
+ { 200000, 0x00}, /* 200 kHz */
+ { 300000, 0x01}, /* 300 kHz */
+ { 600000, 0x02}, /* 600 kHz */
+ {1536000, 0x03}, /* 1.536 MHz */
+ {5000000, 0x04}, /* 5 MHz */
+ {6000000, 0x05}, /* 6 MHz */
+ {7000000, 0x06}, /* 7 MHz */
+ {8000000, 0x07}, /* 8 MHz */
+ };
+
+ unsigned int f_rf = s->ctrl_tuner_rf->val64;
+
+ /*
+ * bandwidth (Hz)
+ * 200000, 300000, 600000, 1536000, 5000000, 6000000, 7000000, 8000000
+ */
+ unsigned int bandwidth = s->ctrl_tuner_bw->val;
+
+ /*
+ * intermediate frequency (Hz)
+ * 0, 450000, 1620000, 2048000
+ */
+ unsigned int f_if = s->ctrl_tuner_if->val;
+
+ /*
+ * gain reduction (dB)
+ * 0 - 102 below 420 MHz
+ * 0 - 85 above 420 MHz
+ */
+ int gain = s->ctrl_tuner_gain->val;
+
+ dev_dbg(&s->udev->dev,
+ "%s: f_rf=%d bandwidth=%d f_if=%d gain=%d\n",
+ __func__, f_rf, bandwidth, f_if, gain);
+
+ ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(band_lut); i++) {
+ if (f_rf <= band_lut[i].rf) {
+ mode = band_lut[i].mode;
+ lo_div = band_lut[i].lo_div;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(band_lut))
+ goto err;
+
+ /* AM_MODE is upconverted */
+ if ((mode >> 0) & 0x1)
+ f_if1 = 5 * F_REF;
+ else
+ f_if1 = 0;
+
+ for (i = 0; i < ARRAY_SIZE(if_freq_lut); i++) {
+ if (f_if == if_freq_lut[i].freq) {
+ filter_mode = if_freq_lut[i].filter_mode;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(if_freq_lut))
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) {
+ if (bandwidth == bandwidth_lut[i].freq) {
+ bandwidth = bandwidth_lut[i].val;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(bandwidth_lut))
+ goto err;
+
+#define F_OUT_STEP 1
+#define R_REF 4
+ f_vco = (f_rf + f_if + f_if1) * lo_div;
+
+ tmp64 = f_vco;
+ m = do_div(tmp64, F_REF * R_REF);
+ n = (unsigned int) tmp64;
+
+ vco_step = F_OUT_STEP * lo_div;
+ thresh = (F_REF * R_REF) / vco_step;
+ frac = 1ul * thresh * m / (F_REF * R_REF);
+
+ /* Find out greatest common divisor and divide to smaller. */
+ tmp = gcd(thresh, frac);
+ thresh /= tmp;
+ frac /= tmp;
+
+ /* Force divide to reg max. Resolution will be reduced. */
+ tmp = DIV_ROUND_UP(thresh, 4095);
+ thresh = DIV_ROUND_CLOSEST(thresh, tmp);
+ frac = DIV_ROUND_CLOSEST(frac, tmp);
+
+ /* calc real RF set */
+ tmp = 1ul * F_REF * R_REF * n;
+ tmp += 1ul * F_REF * R_REF * frac / thresh;
+ tmp /= lo_div;
+
+ dev_dbg(&s->udev->dev,
+ "%s: rf=%u:%u n=%d thresh=%d frac=%d\n",
+ __func__, f_rf, tmp, n, thresh, frac);
+
+ ret = msi3101_tuner_write(s, 0x00000e);
+ if (ret)
+ goto err;
+
+ ret = msi3101_tuner_write(s, 0x000003);
+ if (ret)
+ goto err;
+
+ reg = 0 << 0;
+ reg |= mode << 4;
+ reg |= filter_mode << 12;
+ reg |= bandwidth << 14;
+ reg |= 0x02 << 17;
+ reg |= 0x00 << 20;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ reg = 5 << 0;
+ reg |= thresh << 4;
+ reg |= 1 << 19;
+ reg |= 1 << 21;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ reg = 2 << 0;
+ reg |= frac << 4;
+ reg |= n << 16;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ if (f_rf < 120000000) {
+ gain_lut = msi3101_gain_lut_120;
+ len = ARRAY_SIZE(msi3101_gain_lut_120);
+ } else if (f_rf < 245000000) {
+ gain_lut = msi3101_gain_lut_245;
+ len = ARRAY_SIZE(msi3101_gain_lut_120);
+ } else {
+ gain_lut = msi3101_gain_lut_1000;
+ len = ARRAY_SIZE(msi3101_gain_lut_1000);
+ }
+
+ for (i = 0; i < len; i++) {
+ if (gain_lut[i].tot >= gain)
+ break;
+ }
+
+ if (i == len)
+ goto err;
+
+ dev_dbg(&s->udev->dev,
+ "%s: gain tot=%d baseband=%d lna=%d mixer=%d\n",
+ __func__, gain_lut[i].tot, gain_lut[i].baseband,
+ gain_lut[i].lna, gain_lut[i].mixer);
+
+ reg = 1 << 0;
+ reg |= gain_lut[i].baseband << 4;
+ reg |= 0 << 10;
+ reg |= gain_lut[i].mixer << 12;
+ reg |= gain_lut[i].lna << 13;
+ reg |= 4 << 14;
+ reg |= 0 << 17;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ reg = 6 << 0;
+ reg |= 63 << 4;
+ reg |= 4095 << 10;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&s->udev->dev, "%s: failed %d\n", __func__, ret);
+ return ret;
+};
+
+static int msi3101_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vq);
+ int ret;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (!s->udev)
+ return -ENODEV;
+
+ if (mutex_lock_interruptible(&s->v4l2_lock))
+ return -ERESTARTSYS;
+
+ ret = msi3101_set_usb_adc(s);
+
+ ret = msi3101_isoc_init(s);
+ if (ret)
+ msi3101_cleanup_queued_bufs(s);
+
+ ret = msi3101_ctrl_msg(s, CMD_START_STREAMING, 0);
+
+ mutex_unlock(&s->v4l2_lock);
+
+ return ret;
+}
+
+static int msi3101_stop_streaming(struct vb2_queue *vq)
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vq);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (mutex_lock_interruptible(&s->v4l2_lock))
+ return -ERESTARTSYS;
+
+ if (s->udev)
+ msi3101_isoc_cleanup(s);
+
+ msi3101_cleanup_queued_bufs(s);
+
+ /* according to tests, at least 700us delay is required */
+ msleep(20);
+ msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
+
+ mutex_unlock(&s->v4l2_lock);
+
+ return 0;
+}
+
+static struct vb2_ops msi3101_vb2_ops = {
+ .queue_setup = msi3101_queue_setup,
+ .buf_prepare = msi3101_buf_prepare,
+ .buf_queue = msi3101_buf_queue,
+ .start_streaming = msi3101_start_streaming,
+ .stop_streaming = msi3101_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int msi3101_enum_input(struct file *file, void *fh, struct v4l2_input *i)
+{
+ if (i->index != 0)
+ return -EINVAL;
+
+ strlcpy(i->name, "SDR data", sizeof(i->name));
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int msi3101_g_input(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int msi3101_s_input(struct file *file, void *fh, unsigned int i)
+{
+ return i ? -EINVAL : 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *v)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ strcpy(v->name, "SDR RX");
+ v->capability = V4L2_TUNER_CAP_LOW;
+
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s: frequency=%lu Hz (%u)\n",
+ __func__, f->frequency * 625UL / 10UL, f->frequency);
+
+ return v4l2_ctrl_s_ctrl_int64(s->ctrl_tuner_rf,
+ f->frequency * 625UL / 10UL);
+}
+
+const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
+ .vidioc_querycap = msi3101_querycap,
+
+ .vidioc_enum_input = msi3101_enum_input,
+ .vidioc_g_input = msi3101_g_input,
+ .vidioc_s_input = msi3101_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_s_frequency = vidioc_s_frequency,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+static const struct v4l2_file_operations msi3101_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static struct video_device msi3101_template = {
+ .name = "Mirics MSi3101 SDR Dongle",
+ .release = video_device_release_empty,
+ .fops = &msi3101_fops,
+ .ioctl_ops = &msi3101_ioctl_ops,
+};
+
+static int msi3101_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct msi3101_state *s =
+ container_of(ctrl->handler, struct msi3101_state,
+ ctrl_handler);
+ int ret;
+ dev_dbg(&s->udev->dev,
+ "%s: id=%d name=%s val=%d min=%d max=%d step=%d\n",
+ __func__, ctrl->id, ctrl->name, ctrl->val,
+ ctrl->minimum, ctrl->maximum, ctrl->step);
+
+ switch (ctrl->id) {
+ case MSI3101_CID_SAMPLING_MODE:
+ case MSI3101_CID_SAMPLING_RATE:
+ case MSI3101_CID_SAMPLING_RESOLUTION:
+ ret = 0;
+ break;
+ case MSI3101_CID_TUNER_RF:
+ case MSI3101_CID_TUNER_BW:
+ case MSI3101_CID_TUNER_IF:
+ case MSI3101_CID_TUNER_GAIN:
+ ret = msi3101_set_tuner(s);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops msi3101_ctrl_ops = {
+ .s_ctrl = msi3101_s_ctrl,
+};
+
+static void msi3101_video_release(struct v4l2_device *v)
+{
+ struct msi3101_state *s =
+ container_of(v, struct msi3101_state, v4l2_dev);
+
+ v4l2_ctrl_handler_free(&s->ctrl_handler);
+ v4l2_device_unregister(&s->v4l2_dev);
+ kfree(s);
+}
+
+static int msi3101_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct msi3101_state *s = NULL;
+ int ret;
+ static const char * const ctrl_sampling_mode_qmenu_strings[] = {
+ "Quadrature Sampling",
+ NULL,
+ };
+ static const struct v4l2_ctrl_config ctrl_sampling_mode = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_SAMPLING_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .flags = V4L2_CTRL_FLAG_INACTIVE,
+ .name = "Sampling Mode",
+ .qmenu = ctrl_sampling_mode_qmenu_strings,
+ };
+ static const struct v4l2_ctrl_config ctrl_sampling_rate = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_SAMPLING_RATE,
+ .type = V4L2_CTRL_TYPE_INTEGER64,
+ .name = "Sampling Rate",
+ .min = 500000,
+ .max = 12000000,
+ .def = 2048000,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_sampling_resolution = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_SAMPLING_RESOLUTION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .flags = V4L2_CTRL_FLAG_INACTIVE,
+ .name = "Sampling Resolution",
+ .min = 10,
+ .max = 10,
+ .def = 10,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_tuner_rf = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_TUNER_RF,
+ .type = V4L2_CTRL_TYPE_INTEGER64,
+ .name = "Tuner RF",
+ .min = 40000000,
+ .max = 2000000000,
+ .def = 100000000,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_tuner_bw = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_TUNER_BW,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Tuner BW",
+ .min = 200000,
+ .max = 8000000,
+ .def = 600000,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_tuner_if = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_TUNER_IF,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .flags = V4L2_CTRL_FLAG_INACTIVE,
+ .name = "Tuner IF",
+ .min = 0,
+ .max = 2048000,
+ .def = 0,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_tuner_gain = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_TUNER_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Tuner Gain",
+ .min = 0,
+ .max = 102,
+ .def = 0,
+ .step = 1,
+ };
+
+ s = kzalloc(sizeof(struct msi3101_state), GFP_KERNEL);
+ if (s == NULL) {
+ pr_err("Could not allocate memory for msi3101_state\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&s->v4l2_lock);
+ mutex_init(&s->vb_queue_lock);
+ spin_lock_init(&s->queued_bufs_lock);
+ INIT_LIST_HEAD(&s->queued_bufs);
+
+ s->udev = udev;
+
+ /* Init videobuf2 queue structure */
+ s->vb_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ s->vb_queue.drv_priv = s;
+ s->vb_queue.buf_struct_size = sizeof(struct msi3101_frame_buf);
+ s->vb_queue.ops = &msi3101_vb2_ops;
+ s->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ s->vb_queue.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ ret = vb2_queue_init(&s->vb_queue);
+ if (ret < 0) {
+ dev_err(&s->udev->dev, "Could not initialize vb2 queue\n");
+ goto err_free_mem;
+ }
+
+ /* Init video_device structure */
+ s->vdev = msi3101_template;
+ s->vdev.queue = &s->vb_queue;
+ s->vdev.queue->lock = &s->vb_queue_lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev.flags);
+ video_set_drvdata(&s->vdev, s);
+
+ /* Register controls */
+ v4l2_ctrl_handler_init(&s->ctrl_handler, 7);
+ v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_sampling_mode, NULL);
+ s->ctrl_sampling_rate = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_sampling_rate, NULL);
+ v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_sampling_resolution, NULL);
+ s->ctrl_tuner_rf = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_rf, NULL);
+ s->ctrl_tuner_bw = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_bw, NULL);
+ s->ctrl_tuner_if = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_if, NULL);
+ s->ctrl_tuner_gain = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_gain, NULL);
+ if (s->ctrl_handler.error) {
+ ret = s->ctrl_handler.error;
+ dev_err(&s->udev->dev, "Could not initialize controls\n");
+ goto err_free_controls;
+ }
+
+ /* Register the v4l2_device structure */
+ s->v4l2_dev.release = msi3101_video_release;
+ ret = v4l2_device_register(&intf->dev, &s->v4l2_dev);
+ if (ret) {
+ dev_err(&s->udev->dev,
+ "Failed to register v4l2-device (%d)\n", ret);
+ goto err_free_controls;
+ }
+
+ s->v4l2_dev.ctrl_handler = &s->ctrl_handler;
+ s->vdev.v4l2_dev = &s->v4l2_dev;
+ s->vdev.lock = &s->v4l2_lock;
+
+ ret = video_register_device(&s->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(&s->udev->dev,
+ "Failed to register as video device (%d)\n",
+ ret);
+ goto err_unregister_v4l2_dev;
+ }
+ dev_info(&s->udev->dev, "Registered as %s\n",
+ video_device_node_name(&s->vdev));
+
+ return 0;
+
+err_unregister_v4l2_dev:
+ v4l2_device_unregister(&s->v4l2_dev);
+err_free_controls:
+ v4l2_ctrl_handler_free(&s->ctrl_handler);
+err_free_mem:
+ kfree(s);
+ return ret;
+}
+
+/* USB device ID list */
+static struct usb_device_id msi3101_id_table[] = {
+ { USB_DEVICE(0x1df7, 0x2500) }, /* Mirics MSi3101 SDR Dongle */
+ { USB_DEVICE(0x2040, 0xd300) }, /* Hauppauge WinTV 133559 LF */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, msi3101_id_table);
+
+/* USB subsystem interface */
+static struct usb_driver msi3101_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = msi3101_probe,
+ .disconnect = msi3101_disconnect,
+ .id_table = msi3101_id_table,
+};
+
+module_usb_driver(msi3101_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Mirics MSi3101 SDR Dongle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/solo6x10/Kconfig b/drivers/staging/media/solo6x10/Kconfig
index df6569b..9a4296c 100644
--- a/drivers/staging/media/solo6x10/Kconfig
+++ b/drivers/staging/media/solo6x10/Kconfig
@@ -1,10 +1,12 @@
config SOLO6X10
tristate "Softlogic 6x10 MPEG codec cards"
depends on PCI && VIDEO_DEV && SND && I2C
- depends on FONTS
+ select FONT_SUPPORT
+ select FONT_8x16
select VIDEOBUF2_DMA_SG
select VIDEOBUF2_DMA_CONTIG
select SND_PCM
+ select FONT_8x16
---help---
This driver supports the Softlogic based MPEG-4 and h.264 codec
cards.
diff --git a/drivers/staging/media/solo6x10/solo6x10-tw28.c b/drivers/staging/media/solo6x10/solo6x10-tw28.c
index ad00e2b..af65ea6 100644
--- a/drivers/staging/media/solo6x10/solo6x10-tw28.c
+++ b/drivers/staging/media/solo6x10/solo6x10-tw28.c
@@ -513,62 +513,82 @@ static int tw2815_setup(struct solo_dev *solo_dev, u8 dev_addr)
#define FIRST_ACTIVE_LINE 0x0008
#define LAST_ACTIVE_LINE 0x0102
-static void saa7128_setup(struct solo_dev *solo_dev)
+static void saa712x_write_regs(struct solo_dev *dev, const uint8_t *vals,
+ int start, int n)
{
- int i;
- unsigned char regs[128] = {
- 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ for (;start < n; start++, vals++) {
+ /* Skip read-only registers */
+ switch (start) {
+ /* case 0x00 ... 0x25: */
+ case 0x2e ... 0x37:
+ case 0x60:
+ case 0x7d:
+ continue;
+ }
+ solo_i2c_writebyte(dev, SOLO_I2C_SAA, 0x46, start, *vals);
+ }
+}
+
+#define SAA712x_reg7c (0x80 | ((LAST_ACTIVE_LINE & 0x100) >> 2) \
+ | ((FIRST_ACTIVE_LINE & 0x100) >> 4))
+
+static void saa712x_setup(struct solo_dev *dev)
+{
+ const int reg_start = 0x26;
+ const uint8_t saa7128_regs_ntsc[] = {
+ /* :0x26 */
+ 0x0d, 0x00,
+ /* :0x28 */
+ 0x59, 0x1d, 0x75, 0x3f, 0x06, 0x3f,
+ /* :0x2e XXX: read-only */
+ 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x1C, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00,
- 0x59, 0x1d, 0x75, 0x3f, 0x06, 0x3f, 0x00, 0x00,
- 0x1c, 0x33, 0x00, 0x3f, 0x00, 0x00, 0x3f, 0x00,
+ /* :0x38 */
0x1a, 0x1a, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* :0x40 */
0x00, 0x00, 0x00, 0x68, 0x10, 0x97, 0x4c, 0x18,
0x9b, 0x93, 0x9f, 0xff, 0x7c, 0x34, 0x3f, 0x3f,
+ /* :0x50 */
0x3f, 0x83, 0x83, 0x80, 0x0d, 0x0f, 0xc3, 0x06,
0x02, 0x80, 0x71, 0x77, 0xa7, 0x67, 0x66, 0x2e,
+ /* :0x60 */
0x7b, 0x11, 0x4f, 0x1f, 0x7c, 0xf0, 0x21, 0x77,
- 0x41, 0x88, 0x41, 0x12, 0xed, 0x10, 0x10, 0x00,
+ 0x41, 0x88, 0x41, 0x52, 0xed, 0x10, 0x10, 0x00,
+ /* :0x70 */
+ 0x41, 0xc3, 0x00, 0x3e, 0xb8, 0x02, 0x00, 0x00,
+ 0x00, 0x00, FIRST_ACTIVE_LINE, LAST_ACTIVE_LINE & 0xff,
+ SAA712x_reg7c, 0x00, 0xff, 0xff,
+ }, saa7128_regs_pal[] = {
+ /* :0x26 */
+ 0x0d, 0x00,
+ /* :0x28 */
+ 0xe1, 0x1d, 0x75, 0x3f, 0x06, 0x3f,
+ /* :0x2e XXX: read-only */
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* :0x38 */
+ 0x1a, 0x1a, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* :0x40 */
+ 0x00, 0x00, 0x00, 0x68, 0x10, 0x97, 0x4c, 0x18,
+ 0x9b, 0x93, 0x9f, 0xff, 0x7c, 0x34, 0x3f, 0x3f,
+ /* :0x50 */
+ 0x3f, 0x83, 0x83, 0x80, 0x0d, 0x0f, 0xc3, 0x06,
+ 0x02, 0x80, 0x0f, 0x77, 0xa7, 0x67, 0x66, 0x2e,
+ /* :0x60 */
+ 0x7b, 0x02, 0x35, 0xcb, 0x8a, 0x09, 0x2a, 0x77,
+ 0x41, 0x88, 0x41, 0x52, 0xf1, 0x10, 0x20, 0x00,
+ /* :0x70 */
0x41, 0xc3, 0x00, 0x3e, 0xb8, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x08, 0xff, 0x80, 0x00, 0xff, 0xff,
+ 0x00, 0x00, 0x12, 0x30,
+ SAA712x_reg7c | 0x40, 0x00, 0xff, 0xff,
};
- regs[0x7A] = FIRST_ACTIVE_LINE & 0xff;
- regs[0x7B] = LAST_ACTIVE_LINE & 0xff;
- regs[0x7C] = ((1 << 7) |
- (((LAST_ACTIVE_LINE >> 8) & 1) << 6) |
- (((FIRST_ACTIVE_LINE >> 8) & 1) << 4));
-
- /* PAL: XXX: We could do a second set of regs to avoid this */
- if (solo_dev->video_type != SOLO_VO_FMT_TYPE_NTSC) {
- regs[0x28] = 0xE1;
-
- regs[0x5A] = 0x0F;
- regs[0x61] = 0x02;
- regs[0x62] = 0x35;
- regs[0x63] = 0xCB;
- regs[0x64] = 0x8A;
- regs[0x65] = 0x09;
- regs[0x66] = 0x2A;
-
- regs[0x6C] = 0xf1;
- regs[0x6E] = 0x20;
-
- regs[0x7A] = 0x06 + 12;
- regs[0x7b] = 0x24 + 12;
- regs[0x7c] |= 1 << 6;
- }
-
- /* First 0x25 bytes are read-only? */
- for (i = 0x26; i < 128; i++) {
- if (i == 0x60 || i == 0x7D)
- continue;
- solo_i2c_writebyte(solo_dev, SOLO_I2C_SAA, 0x46, i, regs[i]);
- }
-
- return;
+ if (dev->video_type == SOLO_VO_FMT_TYPE_PAL)
+ saa712x_write_regs(dev, saa7128_regs_pal, reg_start,
+ sizeof(saa7128_regs_pal));
+ else
+ saa712x_write_regs(dev, saa7128_regs_ntsc, reg_start,
+ sizeof(saa7128_regs_ntsc));
}
int solo_tw28_init(struct solo_dev *solo_dev)
@@ -609,7 +629,7 @@ int solo_tw28_init(struct solo_dev *solo_dev)
return -EINVAL;
}
- saa7128_setup(solo_dev);
+ saa712x_setup(solo_dev);
for (i = 0; i < solo_dev->tw28_cnt; i++) {
if ((solo_dev->tw2865 & (1 << i)))
diff --git a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
index 98e2902..a4c5896 100644
--- a/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
@@ -996,12 +996,11 @@ static int solo_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
- struct solo_dev *solo_dev = solo_enc->solo_dev;
struct v4l2_captureparm *cp = &sp->parm.capture;
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = solo_enc->interval;
- cp->timeperframe.denominator = solo_dev->fps;
+ cp->timeperframe.denominator = solo_enc->solo_dev->fps;
cp->capturemode = 0;
/* XXX: Shouldn't we be able to get/set this from videobuf? */
cp->readbuffers = 2;
@@ -1009,36 +1008,29 @@ static int solo_g_parm(struct file *file, void *priv,
return 0;
}
+static inline int calc_interval(u8 fps, u32 n, u32 d)
+{
+ if (!n || !d)
+ return 1;
+ if (d == fps)
+ return n;
+ n *= fps;
+ return min(15U, n / d + (n % d >= (fps >> 1)));
+}
+
static int solo_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *sp)
{
struct solo_enc_dev *solo_enc = video_drvdata(file);
- struct solo_dev *solo_dev = solo_enc->solo_dev;
- struct v4l2_captureparm *cp = &sp->parm.capture;
+ struct v4l2_fract *t = &sp->parm.capture.timeperframe;
+ u8 fps = solo_enc->solo_dev->fps;
if (vb2_is_streaming(&solo_enc->vidq))
return -EBUSY;
- if ((cp->timeperframe.numerator == 0) ||
- (cp->timeperframe.denominator == 0)) {
- /* reset framerate */
- cp->timeperframe.numerator = 1;
- cp->timeperframe.denominator = solo_dev->fps;
- }
-
- if (cp->timeperframe.denominator != solo_dev->fps)
- cp->timeperframe.denominator = solo_dev->fps;
-
- if (cp->timeperframe.numerator > 15)
- cp->timeperframe.numerator = 15;
-
- solo_enc->interval = cp->timeperframe.numerator;
-
- cp->capability = V4L2_CAP_TIMEPERFRAME;
- cp->readbuffers = 2;
-
+ solo_enc->interval = calc_interval(fps, t->numerator, t->denominator);
solo_update_mode(solo_enc);
- return 0;
+ return solo_g_parm(file, priv, sp);
}
static long solo_enc_default(struct file *file, void *fh,
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index dd98cb1..46eabd0 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -896,7 +896,7 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv,
return err;
}
- pr_info("Registerd mdio bus id : %s\n", priv->mii_bus->id);
+ pr_info("Registered mdio bus id : %s\n", priv->mii_bus->id);
err = xlr_mii_probe(priv);
if (err) {
mdiobus_free(priv->mii_bus);
@@ -1020,12 +1020,11 @@ static int xlr_net_probe(struct platform_device *pdev)
goto err_gmac;
}
- ndev->base_addr = (unsigned long) devm_request_and_ioremap
+ ndev->base_addr = (unsigned long) devm_ioremap_resource
(&pdev->dev, res);
- if (!ndev->base_addr) {
- dev_err(&pdev->dev,
- "devm_request_and_ioremap failed\n");
- return -EBUSY;
+ if (IS_ERR_VALUE(ndev->base_addr)) {
+ err = ndev->base_addr;
+ goto err_gmac;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 197c393..5a5c639 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -33,7 +33,6 @@
#include <linux/mfd/core.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -751,8 +750,6 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
writel(0, nvec->base + I2C_SL_ADDR2);
enable_irq(nvec->irq);
-
- clk_disable_unprepare(nvec->i2c_clk);
}
#ifdef CONFIG_PM_SLEEP
@@ -772,11 +769,31 @@ static void nvec_power_off(void)
nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
}
+/*
+ * Parse common device tree data
+ */
+static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec)
+{
+ nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
+
+ if (nvec->gpio < 0) {
+ dev_err(nvec->dev, "no gpio specified");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
+ &nvec->i2c_addr)) {
+ dev_err(nvec->dev, "no i2c address specified");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int tegra_nvec_probe(struct platform_device *pdev)
{
int err, ret;
struct clk *i2c_clk;
- struct nvec_platform_data *pdata = pdev->dev.platform_data;
struct nvec_chip *nvec;
struct nvec_msg *msg;
struct resource *res;
@@ -785,6 +802,11 @@ static int tegra_nvec_probe(struct platform_device *pdev)
unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
+ if(!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "must be instantiated using device tree\n");
+ return -ENODEV;
+ }
+
nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
if (nvec == NULL) {
dev_err(&pdev->dev, "failed to reserve memory\n");
@@ -793,25 +815,9 @@ static int tegra_nvec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nvec);
nvec->dev = &pdev->dev;
- if (pdata) {
- nvec->gpio = pdata->gpio;
- nvec->i2c_addr = pdata->i2c_addr;
- } else if (nvec->dev->of_node) {
- nvec->gpio = of_get_named_gpio(nvec->dev->of_node,
- "request-gpios", 0);
- if (nvec->gpio < 0) {
- dev_err(&pdev->dev, "no gpio specified");
- return -ENODEV;
- }
- if (of_property_read_u32(nvec->dev->of_node,
- "slave-addr", &nvec->i2c_addr)) {
- dev_err(&pdev->dev, "no i2c address specified");
- return -ENODEV;
- }
- } else {
- dev_err(&pdev->dev, "no platform data\n");
- return -ENODEV;
- }
+ err = nvec_i2c_parse_dt_pdata(nvec);
+ if (err < 0)
+ return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
@@ -864,9 +870,6 @@ static int tegra_nvec_probe(struct platform_device *pdev)
tegra_init_i2c_slave(nvec);
- clk_prepare_enable(i2c_clk);
-
-
/* enable event reporting */
nvec_toggle_global_events(nvec, true);
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index 2b1316d..e880518 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -103,31 +103,6 @@ struct nvec_msg {
};
/**
- * struct nvec_subdev - A subdevice of nvec, such as nvec_kbd
- * @name: The name of the sub device
- * @platform_data: Platform data
- * @id: Identifier of the sub device
- */
-struct nvec_subdev {
- const char *name;
- void *platform_data;
- int id;
-};
-
-/**
- * struct nvec_platform_data - platform data for a tegra slave controller
- * @i2c_addr: number of i2c slave adapter the ec is connected to
- * @gpio: gpio number for the ec request line
- *
- * Platform data, to be used in board definitions. For an example, take a
- * look at the paz00 board in arch/arm/mach-tegra/board-paz00.c
- */
-struct nvec_platform_data {
- int i2c_addr;
- int gpio;
-};
-
-/**
* struct nvec_chip - A single connection to an NVIDIA Embedded controller
* @dev: The device
* @gpio: The same as for &struct nvec_platform_data
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index a0ec52a..c17a1c3 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -126,7 +126,7 @@ static int nvec_kbd_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(extcode_tab_us102); ++i)
keycodes[j++] = extcode_tab_us102[i];
- idev = input_allocate_device();
+ idev = devm_input_allocate_device(&pdev->dev);
idev->name = "nvec keyboard";
idev->phys = "nvec";
idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_LED);
@@ -142,7 +142,7 @@ static int nvec_kbd_probe(struct platform_device *pdev)
clear_bit(0, idev->keybit);
err = input_register_device(idev);
if (err)
- goto fail;
+ return err;
keys_dev.input = idev;
keys_dev.notifier.notifier_call = nvec_keys_notifier;
@@ -161,10 +161,6 @@ static int nvec_kbd_probe(struct platform_device *pdev)
nvec_write_async(nvec, clear_leds, sizeof(clear_leds));
return 0;
-
-fail:
- input_free_device(idev);
- return err;
}
static int nvec_kbd_remove(struct platform_device *pdev)
@@ -177,8 +173,6 @@ static int nvec_kbd_remove(struct platform_device *pdev)
nvec_write_async(nvec, disable_kbd, 2);
nvec_unregister_notifier(nvec, &keys_dev.notifier);
- input_unregister_device(keys_dev.input);
-
return 0;
}
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
new file mode 100644
index 0000000..16ea17f
--- /dev/null
+++ b/drivers/staging/octeon-usb/Kconfig
@@ -0,0 +1,10 @@
+config OCTEON_USB
+ tristate "Cavium Networks Octeon USB support"
+ depends on CAVIUM_OCTEON_SOC && USB
+ help
+ This driver supports USB host controller on some Cavium
+ Networks' products in the Octeon family.
+
+ To compile this driver as a module, choose M here. The module
+ will be called octeon-usb.
+
diff --git a/drivers/staging/octeon-usb/Makefile b/drivers/staging/octeon-usb/Makefile
new file mode 100644
index 0000000..89df1ad
--- /dev/null
+++ b/drivers/staging/octeon-usb/Makefile
@@ -0,0 +1,3 @@
+obj-${CONFIG_OCTEON_USB} := octeon-usb.o
+octeon-usb-y := octeon-hcd.o
+octeon-usb-y += cvmx-usb.o
diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO
new file mode 100644
index 0000000..cc58a7e
--- /dev/null
+++ b/drivers/staging/octeon-usb/TODO
@@ -0,0 +1,11 @@
+This driver is functional and has been tested on EdgeRouter Lite with
+USB mass storage.
+
+TODO:
+ - kernel coding style
+ - checkpatch warnings
+ - dead code elimination
+ - device tree bindings
+ - possibly eliminate the extra "hardware abstraction layer"
+
+Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c
new file mode 100644
index 0000000..45dfe94
--- /dev/null
+++ b/drivers/staging/octeon-usb/cvmx-usb.c
@@ -0,0 +1,3158 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * "cvmx-usb.c" defines a set of low level USB functions to help
+ * developers create Octeon USB drivers for various operating
+ * systems. These functions provide a generic API to the Octeon
+ * USB blocks, hiding the internal hardware specific
+ * operations.
+ */
+#include <linux/delay.h>
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+#include "cvmx-usbnx-defs.h"
+#include "cvmx-usbcx-defs.h"
+#include "cvmx-usb.h"
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
+
+#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
+#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
+// a normal prefetch
+#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
+// normal prefetches that use the pref instruction
+#define CVMX_PREFETCH_PREFX(X, address, offset) asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
+#define CVMX_PREFETCH_PREF0(address, offset) CVMX_PREFETCH_PREFX(0, address, offset)
+#define CVMX_CLZ(result, input) asm ("clz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+
+#define MAX_RETRIES 3 /* Maximum number of times to retry failed transactions */
+#define MAX_PIPES 32 /* Maximum number of pipes that can be open at once */
+#define MAX_TRANSACTIONS 256 /* Maximum number of outstanding transactions across all pipes */
+#define MAX_CHANNELS 8 /* Maximum number of hardware channels supported by the USB block */
+#define MAX_USB_ADDRESS 127 /* The highest valid USB device address */
+#define MAX_USB_ENDPOINT 15 /* The highest valid USB endpoint number */
+#define MAX_USB_HUB_PORT 15 /* The highest valid port number on a hub */
+#define MAX_TRANSFER_BYTES ((1<<19)-1) /* The low level hardware can transfer a maximum of this number of bytes in each transfer. The field is 19 bits wide */
+#define MAX_TRANSFER_PACKETS ((1<<10)-1) /* The low level hardware can transfer a maximum of this number of packets in each transfer. The field is 10 bits wide */
+
+/*
+ * These defines disable the normal read and write csr. This is so I can add
+ * extra debug stuff to the usb specific version and I won't use the normal
+ * version by mistake
+ */
+#define cvmx_read_csr use_cvmx_usb_read_csr64_instead_of_cvmx_read_csr
+#define cvmx_write_csr use_cvmx_usb_write_csr64_instead_of_cvmx_write_csr
+
+enum cvmx_usb_transaction_flags {
+ __CVMX_USB_TRANSACTION_FLAGS_IN_USE = 1<<16,
+};
+
+enum {
+ USB_CLOCK_TYPE_REF_12,
+ USB_CLOCK_TYPE_REF_24,
+ USB_CLOCK_TYPE_REF_48,
+ USB_CLOCK_TYPE_CRYSTAL_12,
+};
+
+/**
+ * Logical transactions may take numerous low level
+ * transactions, especially when splits are concerned. This
+ * enum represents all of the possible stages a transaction can
+ * be in. Note that split completes are always even. This is so
+ * the NAK handler can backup to the previous low level
+ * transaction with a simple clearing of bit 0.
+ */
+enum cvmx_usb_stage {
+ CVMX_USB_STAGE_NON_CONTROL,
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_SETUP,
+ CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_DATA,
+ CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_STATUS,
+ CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
+};
+
+/**
+ * struct cvmx_usb_transaction - describes each pending USB transaction
+ * regardless of type. These are linked together
+ * to form a list of pending requests for a pipe.
+ *
+ * @prev: Transaction before this one in the pipe.
+ * @next: Transaction after this one in the pipe.
+ * @type: Type of transaction, duplicated of the pipe.
+ * @flags: State flags for this transaction.
+ * @buffer: User's physical buffer address to read/write.
+ * @buffer_length: Size of the user's buffer in bytes.
+ * @control_header: For control transactions, physical address of the 8
+ * byte standard header.
+ * @iso_start_frame: For ISO transactions, the starting frame number.
+ * @iso_number_packets: For ISO transactions, the number of packets in the
+ * request.
+ * @iso_packets: For ISO transactions, the sub packets in the request.
+ * @actual_bytes: Actual bytes transfer for this transaction.
+ * @stage: For control transactions, the current stage.
+ * @callback: User's callback function when complete.
+ * @callback_data: User's data.
+ */
+struct cvmx_usb_transaction {
+ struct cvmx_usb_transaction *prev;
+ struct cvmx_usb_transaction *next;
+ enum cvmx_usb_transfer type;
+ enum cvmx_usb_transaction_flags flags;
+ uint64_t buffer;
+ int buffer_length;
+ uint64_t control_header;
+ int iso_start_frame;
+ int iso_number_packets;
+ struct cvmx_usb_iso_packet *iso_packets;
+ int xfersize;
+ int pktcnt;
+ int retries;
+ int actual_bytes;
+ enum cvmx_usb_stage stage;
+ cvmx_usb_callback_func_t callback;
+ void *callback_data;
+};
+
+/**
+ * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
+ * and some USB device. It contains a list of pending
+ * request to the device.
+ *
+ * @prev: Pipe before this one in the list
+ * @next: Pipe after this one in the list
+ * @head: The first pending transaction
+ * @tail: The last pending transaction
+ * @interval: For periodic pipes, the interval between packets in
+ * frames
+ * @next_tx_frame: The next frame this pipe is allowed to transmit on
+ * @flags: State flags for this pipe
+ * @device_speed: Speed of device connected to this pipe
+ * @transfer_type: Type of transaction supported by this pipe
+ * @transfer_dir: IN or OUT. Ignored for Control
+ * @multi_count: Max packet in a row for the device
+ * @max_packet: The device's maximum packet size in bytes
+ * @device_addr: USB device address at other end of pipe
+ * @endpoint_num: USB endpoint number at other end of pipe
+ * @hub_device_addr: Hub address this device is connected to
+ * @hub_port: Hub port this device is connected to
+ * @pid_toggle: This toggles between 0/1 on every packet send to track
+ * the data pid needed
+ * @channel: Hardware DMA channel for this pipe
+ * @split_sc_frame: The low order bits of the frame number the split
+ * complete should be sent on
+ */
+struct cvmx_usb_pipe {
+ struct cvmx_usb_pipe *prev;
+ struct cvmx_usb_pipe *next;
+ struct cvmx_usb_transaction *head;
+ struct cvmx_usb_transaction *tail;
+ uint64_t interval;
+ uint64_t next_tx_frame;
+ enum cvmx_usb_pipe_flags flags;
+ enum cvmx_usb_speed device_speed;
+ enum cvmx_usb_transfer transfer_type;
+ enum cvmx_usb_direction transfer_dir;
+ int multi_count;
+ uint16_t max_packet;
+ uint8_t device_addr;
+ uint8_t endpoint_num;
+ uint8_t hub_device_addr;
+ uint8_t hub_port;
+ uint8_t pid_toggle;
+ uint8_t channel;
+ int8_t split_sc_frame;
+};
+
+/**
+ * struct cvmx_usb_pipe_list
+ *
+ * @head: Head of the list, or NULL if empty.
+ * @tail: Tail if the list, or NULL if empty.
+ */
+struct cvmx_usb_pipe_list {
+ struct cvmx_usb_pipe *head;
+ struct cvmx_usb_pipe *tail;
+};
+
+struct cvmx_usb_tx_fifo {
+ struct {
+ int channel;
+ int size;
+ uint64_t address;
+ } entry[MAX_CHANNELS+1];
+ int head;
+ int tail;
+};
+
+/**
+ * struct cvmx_usb_internal_state - the state of the USB block
+ *
+ * init_flags: Flags passed to initialize.
+ * index: Which USB block this is for.
+ * idle_hardware_channels: Bit set for every idle hardware channel.
+ * usbcx_hprt: Stored port status so we don't need to read a CSR to
+ * determine splits.
+ * pipe_for_channel: Map channels to pipes.
+ * free_transaction_head: List of free transactions head.
+ * free_transaction_tail: List of free transactions tail.
+ * pipe: Storage for pipes.
+ * transaction: Storage for transactions.
+ * callback: User global callbacks.
+ * callback_data: User data for each callback.
+ * indent: Used by debug output to indent functions.
+ * port_status: Last port status used for change notification.
+ * free_pipes: List of all pipes that are currently closed.
+ * idle_pipes: List of open pipes that have no transactions.
+ * active_pipes: Active pipes indexed by transfer type.
+ * frame_number: Increments every SOF interrupt for time keeping.
+ * active_split: Points to the current active split, or NULL.
+ */
+struct cvmx_usb_internal_state {
+ int init_flags;
+ int index;
+ int idle_hardware_channels;
+ union cvmx_usbcx_hprt usbcx_hprt;
+ struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
+ struct cvmx_usb_transaction *free_transaction_head;
+ struct cvmx_usb_transaction *free_transaction_tail;
+ struct cvmx_usb_pipe pipe[MAX_PIPES];
+ struct cvmx_usb_transaction transaction[MAX_TRANSACTIONS];
+ cvmx_usb_callback_func_t callback[__CVMX_USB_CALLBACK_END];
+ void *callback_data[__CVMX_USB_CALLBACK_END];
+ int indent;
+ struct cvmx_usb_port_status port_status;
+ struct cvmx_usb_pipe_list free_pipes;
+ struct cvmx_usb_pipe_list idle_pipes;
+ struct cvmx_usb_pipe_list active_pipes[4];
+ uint64_t frame_number;
+ struct cvmx_usb_transaction *active_split;
+ struct cvmx_usb_tx_fifo periodic;
+ struct cvmx_usb_tx_fifo nonperiodic;
+};
+
+/* This macro spins on a field waiting for it to reach a value */
+#define CVMX_WAIT_FOR_FIELD32(address, type, field, op, value, timeout_usec)\
+ ({int result; \
+ do { \
+ uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
+ octeon_get_clock_rate() / 1000000; \
+ type c; \
+ while (1) { \
+ c.u32 = __cvmx_usb_read_csr32(usb, address); \
+ if (c.s.field op (value)) { \
+ result = 0; \
+ break; \
+ } else if (cvmx_get_cycle() > done) { \
+ result = -1; \
+ break; \
+ } else \
+ cvmx_wait(100); \
+ } \
+ } while (0); \
+ result; })
+
+/*
+ * This macro logically sets a single field in a CSR. It does the sequence
+ * read, modify, and write
+ */
+#define USB_SET_FIELD32(address, type, field, value) \
+ do { \
+ type c; \
+ c.u32 = __cvmx_usb_read_csr32(usb, address); \
+ c.s.field = value; \
+ __cvmx_usb_write_csr32(usb, address, c.u32); \
+ } while (0)
+
+/* Returns the IO address to push/pop stuff data from the FIFOs */
+#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
+
+static int octeon_usb_get_clock_type(void)
+{
+ switch (cvmx_sysinfo_get()->board_type) {
+ case CVMX_BOARD_TYPE_BBGW_REF:
+ case CVMX_BOARD_TYPE_LANAI2_A:
+ case CVMX_BOARD_TYPE_LANAI2_U:
+ case CVMX_BOARD_TYPE_LANAI2_G:
+ case CVMX_BOARD_TYPE_UBNT_E100:
+ return USB_CLOCK_TYPE_CRYSTAL_12;
+ }
+ return USB_CLOCK_TYPE_REF_48;
+}
+
+/**
+ * Read a USB 32bit CSR. It performs the necessary address swizzle
+ * for 32bit CSRs and logs the value in a readable format if
+ * debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to read
+ *
+ * Returns: Result of the read
+ */
+static inline uint32_t __cvmx_usb_read_csr32(struct cvmx_usb_internal_state *usb,
+ uint64_t address)
+{
+ uint32_t result = cvmx_read64_uint32(address ^ 4);
+ return result;
+}
+
+
+/**
+ * Write a USB 32bit CSR. It performs the necessary address
+ * swizzle for 32bit CSRs and logs the value in a readable format
+ * if debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to write
+ * @value: Value to write
+ */
+static inline void __cvmx_usb_write_csr32(struct cvmx_usb_internal_state *usb,
+ uint64_t address, uint32_t value)
+{
+ cvmx_write64_uint32(address ^ 4, value);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+}
+
+
+/**
+ * Read a USB 64bit CSR. It logs the value in a readable format if
+ * debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to read
+ *
+ * Returns: Result of the read
+ */
+static inline uint64_t __cvmx_usb_read_csr64(struct cvmx_usb_internal_state *usb,
+ uint64_t address)
+{
+ uint64_t result = cvmx_read64_uint64(address);
+ return result;
+}
+
+
+/**
+ * Write a USB 64bit CSR. It logs the value in a readable format
+ * if debugging is on.
+ *
+ * @usb: USB block this access is for
+ * @address: 64bit address to write
+ * @value: Value to write
+ */
+static inline void __cvmx_usb_write_csr64(struct cvmx_usb_internal_state *usb,
+ uint64_t address, uint64_t value)
+{
+ cvmx_write64_uint64(address, value);
+}
+
+/**
+ * Return non zero if this pipe connects to a non HIGH speed
+ * device through a high speed hub.
+ *
+ * @usb: USB block this access is for
+ * @pipe: Pipe to check
+ *
+ * Returns: Non zero if we need to do split transactions
+ */
+static inline int __cvmx_usb_pipe_needs_split(struct cvmx_usb_internal_state *usb, struct cvmx_usb_pipe *pipe)
+{
+ return ((pipe->device_speed != CVMX_USB_SPEED_HIGH) && (usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH));
+}
+
+
+/**
+ * Trivial utility function to return the correct PID for a pipe
+ *
+ * @pipe: pipe to check
+ *
+ * Returns: PID for pipe
+ */
+static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
+{
+ if (pipe->pid_toggle)
+ return 2; /* Data1 */
+ else
+ return 0; /* Data0 */
+}
+
+
+/**
+ * Return the number of USB ports supported by this Octeon
+ * chip. If the chip doesn't support USB, or is not supported
+ * by this API, a zero will be returned. Most Octeon chips
+ * support one usb port, but some support two ports.
+ * cvmx_usb_initialize() must be called on independent
+ * struct cvmx_usb_state.
+ *
+ * Returns: Number of port, zero if usb isn't supported
+ */
+int cvmx_usb_get_num_ports(void)
+{
+ int arch_ports = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ arch_ports = 2;
+ else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ arch_ports = 1;
+ else
+ arch_ports = 0;
+
+ return arch_ports;
+}
+
+
+/**
+ * Allocate a usb transaction for use
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * Returns: Transaction or NULL
+ */
+static inline struct cvmx_usb_transaction *__cvmx_usb_alloc_transaction(struct cvmx_usb_internal_state *usb)
+{
+ struct cvmx_usb_transaction *t;
+ t = usb->free_transaction_head;
+ if (t) {
+ usb->free_transaction_head = t->next;
+ if (!usb->free_transaction_head)
+ usb->free_transaction_tail = NULL;
+ }
+ if (t) {
+ memset(t, 0, sizeof(*t));
+ t->flags = __CVMX_USB_TRANSACTION_FLAGS_IN_USE;
+ }
+ return t;
+}
+
+
+/**
+ * Free a usb transaction
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @transaction:
+ * Transaction to free
+ */
+static inline void __cvmx_usb_free_transaction(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_transaction *transaction)
+{
+ transaction->flags = 0;
+ transaction->prev = NULL;
+ transaction->next = NULL;
+ if (usb->free_transaction_tail)
+ usb->free_transaction_tail->next = transaction;
+ else
+ usb->free_transaction_head = transaction;
+ usb->free_transaction_tail = transaction;
+}
+
+
+/**
+ * Add a pipe to the tail of a list
+ * @list: List to add pipe to
+ * @pipe: Pipe to add
+ */
+static inline void __cvmx_usb_append_pipe(struct cvmx_usb_pipe_list *list, struct cvmx_usb_pipe *pipe)
+{
+ pipe->next = NULL;
+ pipe->prev = list->tail;
+ if (list->tail)
+ list->tail->next = pipe;
+ else
+ list->head = pipe;
+ list->tail = pipe;
+}
+
+
+/**
+ * Remove a pipe from a list
+ * @list: List to remove pipe from
+ * @pipe: Pipe to remove
+ */
+static inline void __cvmx_usb_remove_pipe(struct cvmx_usb_pipe_list *list, struct cvmx_usb_pipe *pipe)
+{
+ if (list->head == pipe) {
+ list->head = pipe->next;
+ pipe->next = NULL;
+ if (list->head)
+ list->head->prev = NULL;
+ else
+ list->tail = NULL;
+ } else if (list->tail == pipe) {
+ list->tail = pipe->prev;
+ list->tail->next = NULL;
+ pipe->prev = NULL;
+ } else {
+ pipe->prev->next = pipe->next;
+ pipe->next->prev = pipe->prev;
+ pipe->prev = NULL;
+ pipe->next = NULL;
+ }
+}
+
+
+/**
+ * Initialize a USB port for use. This must be called before any
+ * other access to the Octeon USB port is made. The port starts
+ * off in the disabled state.
+ *
+ * @state: Pointer to an empty struct cvmx_usb_state
+ * that will be populated by the initialize call.
+ * This structure is then passed to all other USB
+ * functions.
+ * @usb_port_number:
+ * Which Octeon USB port to initialize.
+ * @flags: Flags to control hardware initialization. See
+ * enum cvmx_usb_initialize_flags for the flag
+ * definitions. Some flags are mandatory.
+ *
+ * Returns: 0 or a negative error code.
+ */
+int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
+ enum cvmx_usb_initialize_flags flags)
+{
+ union cvmx_usbnx_clk_ctl usbn_clk_ctl;
+ union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ usb->init_flags = flags;
+
+ /* Make sure that state is large enough to store the internal state */
+ if (sizeof(*state) < sizeof(*usb))
+ return -EINVAL;
+ /* At first allow 0-1 for the usb port number */
+ if ((usb_port_number < 0) || (usb_port_number > 1))
+ return -EINVAL;
+ /* For all chips except 52XX there is only one port */
+ if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
+ return -EINVAL;
+ /* Try to determine clock type automatically */
+ if ((flags & (CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI |
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0) {
+ if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; /* Only 12 MHZ crystals are supported */
+ else
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+ }
+
+ if (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
+ /* Check for auto ref clock frequency */
+ if (!(flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
+ switch (octeon_usb_get_clock_type()) {
+ case USB_CLOCK_TYPE_REF_12:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_24:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_48:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ }
+
+ memset(usb, 0, sizeof(*usb));
+ usb->init_flags = flags;
+
+ /* Initialize the USB state structure */
+ {
+ int i;
+ usb->index = usb_port_number;
+
+ /* Initialize the transaction double linked list */
+ usb->free_transaction_head = NULL;
+ usb->free_transaction_tail = NULL;
+ for (i = 0; i < MAX_TRANSACTIONS; i++)
+ __cvmx_usb_free_transaction(usb, usb->transaction + i);
+ for (i = 0; i < MAX_PIPES; i++)
+ __cvmx_usb_append_pipe(&usb->free_pipes, usb->pipe + i);
+ }
+
+ /*
+ * Power On Reset and PHY Initialization
+ *
+ * 1. Wait for DCOK to assert (nothing to do)
+ *
+ * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
+ * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
+ */
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hrst = 0;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hclk_rst = 0;
+ usbn_clk_ctl.s.enable = 0;
+ /*
+ * 2b. Select the USB reference clock/crystal parameters by writing
+ * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
+ */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
+ /*
+ * The USB port uses 12/24/48MHz 2.5V board clock
+ * source at USB_XO. USB_XI should be tied to GND.
+ * Most Octeon evaluation boards require this setting
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.cn31xx.p_xenbn = 0;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
+ else
+ usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */
+
+ switch (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
+ usbn_clk_ctl.s.p_c_sel = 0;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
+ usbn_clk_ctl.s.p_c_sel = 1;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
+ usbn_clk_ctl.s.p_c_sel = 2;
+ break;
+ }
+ } else {
+ /*
+ * The USB port uses a 12MHz crystal as clock source
+ * at USB_XO and USB_XI
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.cn31xx.p_xenbn = 1;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
+ else
+ usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */
+
+ usbn_clk_ctl.s.p_c_sel = 0;
+ }
+ /*
+ * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
+ * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
+ * such that USB is as close as possible to 125Mhz
+ */
+ {
+ int divisor = (octeon_get_clock_rate()+125000000-1)/125000000;
+ if (divisor < 4) /* Lower than 4 doesn't seem to work properly */
+ divisor = 4;
+ usbn_clk_ctl.s.divide = divisor;
+ usbn_clk_ctl.s.divide2 = 0;
+ }
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
+ usbn_clk_ctl.s.hclk_rst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
+ cvmx_wait(64);
+ /*
+ * 3. Program the power-on reset field in the USBN clock-control
+ * register:
+ * USBN_CLK_CTL[POR] = 0
+ */
+ usbn_clk_ctl.s.por = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 4. Wait 1 ms for PHY clock to start */
+ mdelay(1);
+ /*
+ * 5. Program the Reset input from automatic test equipment field in the
+ * USBP control and status register:
+ * USBN_USBP_CTL_STATUS[ATE_RESET] = 1
+ */
+ usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index));
+ usbn_usbp_ctl_status.s.ate_reset = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 6. Wait 10 cycles */
+ cvmx_wait(10);
+ /*
+ * 7. Clear ATE_RESET field in the USBN clock-control register:
+ * USBN_USBP_CTL_STATUS[ATE_RESET] = 0
+ */
+ usbn_usbp_ctl_status.s.ate_reset = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /*
+ * 8. Program the PHY reset field in the USBN clock-control register:
+ * USBN_CLK_CTL[PRST] = 1
+ */
+ usbn_clk_ctl.s.prst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /*
+ * 9. Program the USBP control and status register to select host or
+ * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
+ * device
+ */
+ usbn_usbp_ctl_status.s.hst_mode = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 10. Wait 1 us */
+ udelay(1);
+ /*
+ * 11. Program the hreset_n field in the USBN clock-control register:
+ * USBN_CLK_CTL[HRST] = 1
+ */
+ usbn_clk_ctl.s.hrst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 12. Proceed to USB core initialization */
+ usbn_clk_ctl.s.enable = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ udelay(1);
+
+ /*
+ * USB Core Initialization
+ *
+ * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
+ * determine USB core configuration parameters.
+ *
+ * Nothing needed
+ *
+ * 2. Program the following fields in the global AHB configuration
+ * register (USBC_GAHBCFG)
+ * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
+ * Burst length, USBC_GAHBCFG[HBSTLEN] = 0
+ * Nonperiodic TxFIFO empty level (slave mode only),
+ * USBC_GAHBCFG[NPTXFEMPLVL]
+ * Periodic TxFIFO empty level (slave mode only),
+ * USBC_GAHBCFG[PTXFEMPLVL]
+ * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
+ */
+ {
+ union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
+ /* Due to an errata, CN31XX doesn't support DMA */
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
+ usbcx_gahbcfg.u32 = 0;
+ usbcx_gahbcfg.s.dmaen = !(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ usb->idle_hardware_channels = 0x1; /* Only use one channel with non DMA */
+ else if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ usb->idle_hardware_channels = 0xf7; /* CN5XXX have an errata with channel 3 */
+ else
+ usb->idle_hardware_channels = 0xff;
+ usbcx_gahbcfg.s.hbstlen = 0;
+ usbcx_gahbcfg.s.nptxfemplvl = 1;
+ usbcx_gahbcfg.s.ptxfemplvl = 1;
+ usbcx_gahbcfg.s.glblintrmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
+ usbcx_gahbcfg.u32);
+ }
+ /*
+ * 3. Program the following fields in USBC_GUSBCFG register.
+ * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
+ * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
+ * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
+ * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
+ */
+ {
+ union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
+ usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
+ usbcx_gusbcfg.s.toutcal = 0;
+ usbcx_gusbcfg.s.ddrsel = 0;
+ usbcx_gusbcfg.s.usbtrdtim = 0x5;
+ usbcx_gusbcfg.s.phylpwrclksel = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
+ usbcx_gusbcfg.u32);
+ }
+ /*
+ * 4. The software must unmask the following bits in the USBC_GINTMSK
+ * register.
+ * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
+ * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
+ */
+ {
+ union cvmx_usbcx_gintmsk usbcx_gintmsk;
+ int channel;
+
+ usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
+ usbcx_gintmsk.s.otgintmsk = 1;
+ usbcx_gintmsk.s.modemismsk = 1;
+ usbcx_gintmsk.s.hchintmsk = 1;
+ usbcx_gintmsk.s.sofmsk = 0;
+ /* We need RX FIFO interrupts if we don't have DMA */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ usbcx_gintmsk.s.rxflvlmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
+ usbcx_gintmsk.u32);
+
+ /* Disable all channel interrupts. We'll enable them per channel later */
+ for (channel = 0; channel < 8; channel++)
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ }
+
+ {
+ /*
+ * Host Port Initialization
+ *
+ * 1. Program the host-port interrupt-mask field to unmask,
+ * USBC_GINTMSK[PRTINT] = 1
+ */
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
+ prtintmsk, 1);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
+ disconnintmsk, 1);
+ /*
+ * 2. Program the USBC_HCFG register to select full-speed host
+ * or high-speed host.
+ */
+ {
+ union cvmx_usbcx_hcfg usbcx_hcfg;
+ usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
+ usbcx_hcfg.s.fslssupp = 0;
+ usbcx_hcfg.s.fslspclksel = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
+ }
+ /*
+ * 3. Program the port power bit to drive VBUS on the USB,
+ * USBC_HPRT[PRTPWR] = 1
+ */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtpwr, 1);
+
+ /*
+ * Steps 4-15 from the manual are done later in the port enable
+ */
+ }
+
+ return 0;
+}
+
+
+/**
+ * Shutdown a USB port after a call to cvmx_usb_initialize().
+ * The port should be disabled with all pipes closed when this
+ * function is called.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+int cvmx_usb_shutdown(struct cvmx_usb_state *state)
+{
+ union cvmx_usbnx_clk_ctl usbn_clk_ctl;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ /* Make sure all pipes are closed */
+ if (usb->idle_pipes.head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS].head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT].head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_CONTROL].head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_BULK].head)
+ return -EBUSY;
+
+ /* Disable the clocks and put them in power on reset */
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.enable = 1;
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hclk_rst = 1;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hrst = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ return 0;
+}
+
+
+/**
+ * Enable a USB port. After this call succeeds, the USB port is
+ * online and servicing requests.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+int cvmx_usb_enable(struct cvmx_usb_state *state)
+{
+ union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+
+ /*
+ * If the port is already enabled the just return. We don't need to do
+ * anything
+ */
+ if (usb->usbcx_hprt.s.prtena)
+ return 0;
+
+ /* If there is nothing plugged into the port then fail immediately */
+ if (!usb->usbcx_hprt.s.prtconnsts) {
+ return -ETIMEDOUT;
+ }
+
+ /* Program the port reset bit to start the reset process */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 1);
+
+ /*
+ * Wait at least 50ms (high speed), or 10ms (full speed) for the reset
+ * process to complete.
+ */
+ mdelay(50);
+
+ /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 0);
+
+ /* Wait for the USBC_HPRT[PRTENA]. */
+ if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
+ prtena, ==, 1, 100000))
+ return -ETIMEDOUT;
+
+ /* Read the port speed field to get the enumerated speed, USBC_HPRT[PRTSPD]. */
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));
+
+ /*
+ * 13. Program the USBC_GRXFSIZ register to select the size of the
+ * receive FIFO (25%).
+ */
+ USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), union cvmx_usbcx_grxfsiz,
+ rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
+ /*
+ * 14. Program the USBC_GNPTXFSIZ register to select the size and the
+ * start address of the non- periodic transmit FIFO for nonperiodic
+ * transactions (50%).
+ */
+ {
+ union cvmx_usbcx_gnptxfsiz siz;
+ siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
+ siz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
+ siz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), siz.u32);
+ }
+ /*
+ * 15. Program the USBC_HPTXFSIZ register to select the size and start
+ * address of the periodic transmit FIFO for periodic transactions
+ * (25%).
+ */
+ {
+ union cvmx_usbcx_hptxfsiz siz;
+ siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
+ siz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
+ siz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), siz.u32);
+ }
+ /* Flush all FIFOs */
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfnum, 0x10);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ txfflsh, ==, 0, 100);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, rxfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ rxfflsh, ==, 0, 100);
+
+ return 0;
+}
+
+
+/**
+ * Disable a USB port. After this call the USB port will not
+ * generate data transfers and will not generate events.
+ * Transactions in process will fail and call their
+ * associated callbacks.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+int cvmx_usb_disable(struct cvmx_usb_state *state)
+{
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ /* Disable the port */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtena, 1);
+ return 0;
+}
+
+
+/**
+ * Get the current state of the USB port. Use this call to
+ * determine if the usb port has anything connected, is enabled,
+ * or has some sort of error condition. The return value of this
+ * call has "changed" bits to signal of the value of some fields
+ * have changed between calls. These "changed" fields are based
+ * on the last call to cvmx_usb_set_status(). In order to clear
+ * them, you must update the status through cvmx_usb_set_status().
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * Returns: Port status information
+ */
+struct cvmx_usb_port_status cvmx_usb_get_status(struct cvmx_usb_state *state)
+{
+ union cvmx_usbcx_hprt usbc_hprt;
+ struct cvmx_usb_port_status result;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ memset(&result, 0, sizeof(result));
+
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ result.port_enabled = usbc_hprt.s.prtena;
+ result.port_over_current = usbc_hprt.s.prtovrcurract;
+ result.port_powered = usbc_hprt.s.prtpwr;
+ result.port_speed = usbc_hprt.s.prtspd;
+ result.connected = usbc_hprt.s.prtconnsts;
+ result.connect_change = (result.connected != usb->port_status.connected);
+
+ return result;
+}
+
+
+/**
+ * Set the current state of the USB port. The status is used as
+ * a reference for the "changed" bits returned by
+ * cvmx_usb_get_status(). Other than serving as a reference, the
+ * status passed to this function is not used. No fields can be
+ * changed through this call.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @port_status:
+ * Port status to set, most like returned by cvmx_usb_get_status()
+ */
+void cvmx_usb_set_status(struct cvmx_usb_state *state, struct cvmx_usb_port_status port_status)
+{
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ usb->port_status = port_status;
+ return;
+}
+
+
+/**
+ * Convert a USB transaction into a handle
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @transaction:
+ * Transaction to get handle for
+ *
+ * Returns: Handle
+ */
+static inline int __cvmx_usb_get_submit_handle(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_transaction *transaction)
+{
+ return ((unsigned long)transaction - (unsigned long)usb->transaction) /
+ sizeof(*transaction);
+}
+
+
+/**
+ * Convert a USB pipe into a handle
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe: Pipe to get handle for
+ *
+ * Returns: Handle
+ */
+static inline int __cvmx_usb_get_pipe_handle(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_pipe *pipe)
+{
+ return ((unsigned long)pipe - (unsigned long)usb->pipe) / sizeof(*pipe);
+}
+
+
+/**
+ * Open a virtual pipe between the host and a USB device. A pipe
+ * must be opened before data can be transferred between a device
+ * and Octeon.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @flags: Optional pipe flags defined in
+ * enum cvmx_usb_pipe_flags.
+ * @device_addr:
+ * USB device address to open the pipe to
+ * (0-127).
+ * @endpoint_num:
+ * USB endpoint number to open the pipe to
+ * (0-15).
+ * @device_speed:
+ * The speed of the device the pipe is going
+ * to. This must match the device's speed,
+ * which may be different than the port speed.
+ * @max_packet: The maximum packet length the device can
+ * transmit/receive (low speed=0-8, full
+ * speed=0-1023, high speed=0-1024). This value
+ * comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <10:0>.
+ * @transfer_type:
+ * The type of transfer this pipe is for.
+ * @transfer_dir:
+ * The direction the pipe is in. This is not
+ * used for control pipes.
+ * @interval: For ISOCHRONOUS and INTERRUPT transfers,
+ * this is how often the transfer is scheduled
+ * for. All other transfers should specify
+ * zero. The units are in frames (8000/sec at
+ * high speed, 1000/sec for full speed).
+ * @multi_count:
+ * For high speed devices, this is the maximum
+ * allowed number of packet per microframe.
+ * Specify zero for non high speed devices. This
+ * value comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <12:11>.
+ * @hub_device_addr:
+ * Hub device address this device is connected
+ * to. Devices connected directly to Octeon
+ * use zero. This is only used when the device
+ * is full/low speed behind a high speed hub.
+ * The address will be of the high speed hub,
+ * not and full speed hubs after it.
+ * @hub_port: Which port on the hub the device is
+ * connected. Use zero for devices connected
+ * directly to Octeon. Like hub_device_addr,
+ * this is only used for full/low speed
+ * devices behind a high speed hub.
+ *
+ * Returns: A non negative value is a pipe handle. Negative
+ * values are error codes.
+ */
+int cvmx_usb_open_pipe(struct cvmx_usb_state *state, enum cvmx_usb_pipe_flags flags,
+ int device_addr, int endpoint_num,
+ enum cvmx_usb_speed device_speed, int max_packet,
+ enum cvmx_usb_transfer transfer_type,
+ enum cvmx_usb_direction transfer_dir, int interval,
+ int multi_count, int hub_device_addr, int hub_port)
+{
+ struct cvmx_usb_pipe *pipe;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ if (unlikely((device_addr < 0) || (device_addr > MAX_USB_ADDRESS)))
+ return -EINVAL;
+ if (unlikely((endpoint_num < 0) || (endpoint_num > MAX_USB_ENDPOINT)))
+ return -EINVAL;
+ if (unlikely(device_speed > CVMX_USB_SPEED_LOW))
+ return -EINVAL;
+ if (unlikely((max_packet <= 0) || (max_packet > 1024)))
+ return -EINVAL;
+ if (unlikely(transfer_type > CVMX_USB_TRANSFER_INTERRUPT))
+ return -EINVAL;
+ if (unlikely((transfer_dir != CVMX_USB_DIRECTION_OUT) &&
+ (transfer_dir != CVMX_USB_DIRECTION_IN)))
+ return -EINVAL;
+ if (unlikely(interval < 0))
+ return -EINVAL;
+ if (unlikely((transfer_type == CVMX_USB_TRANSFER_CONTROL) && interval))
+ return -EINVAL;
+ if (unlikely(multi_count < 0))
+ return -EINVAL;
+ if (unlikely((device_speed != CVMX_USB_SPEED_HIGH) &&
+ (multi_count != 0)))
+ return -EINVAL;
+ if (unlikely((hub_device_addr < 0) || (hub_device_addr > MAX_USB_ADDRESS)))
+ return -EINVAL;
+ if (unlikely((hub_port < 0) || (hub_port > MAX_USB_HUB_PORT)))
+ return -EINVAL;
+
+ /* Find a free pipe */
+ pipe = usb->free_pipes.head;
+ if (!pipe)
+ return -ENOMEM;
+ __cvmx_usb_remove_pipe(&usb->free_pipes, pipe);
+ pipe->flags = flags | __CVMX_USB_PIPE_FLAGS_OPEN;
+ if ((device_speed == CVMX_USB_SPEED_HIGH) &&
+ (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (transfer_type == CVMX_USB_TRANSFER_BULK))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ pipe->device_addr = device_addr;
+ pipe->endpoint_num = endpoint_num;
+ pipe->device_speed = device_speed;
+ pipe->max_packet = max_packet;
+ pipe->transfer_type = transfer_type;
+ pipe->transfer_dir = transfer_dir;
+ /*
+ * All pipes use interval to rate limit NAK processing. Force an
+ * interval if one wasn't supplied
+ */
+ if (!interval)
+ interval = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ pipe->interval = interval*8;
+ /* Force start splits to be schedule on uFrame 0 */
+ pipe->next_tx_frame = ((usb->frame_number+7)&~7) + pipe->interval;
+ } else {
+ pipe->interval = interval;
+ pipe->next_tx_frame = usb->frame_number + pipe->interval;
+ }
+ pipe->multi_count = multi_count;
+ pipe->hub_device_addr = hub_device_addr;
+ pipe->hub_port = hub_port;
+ pipe->pid_toggle = 0;
+ pipe->split_sc_frame = -1;
+ __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
+
+ /*
+ * We don't need to tell the hardware about this pipe yet since
+ * it doesn't have any submitted requests
+ */
+
+ return __cvmx_usb_get_pipe_handle(usb, pipe);
+}
+
+
+/**
+ * Poll the RX FIFOs and remove data as needed. This function is only used
+ * in non DMA mode. It is very important that this function be called quickly
+ * enough to prevent FIFO overflow.
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ */
+static void __cvmx_usb_poll_rx_fifo(struct cvmx_usb_internal_state *usb)
+{
+ union cvmx_usbcx_grxstsph rx_status;
+ int channel;
+ int bytes;
+ uint64_t address;
+ uint32_t *ptr;
+
+ rx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GRXSTSPH(usb->index));
+ /* Only read data if IN data is there */
+ if (rx_status.s.pktsts != 2)
+ return;
+ /* Check if no data is available */
+ if (!rx_status.s.bcnt)
+ return;
+
+ channel = rx_status.s.chnum;
+ bytes = rx_status.s.bcnt;
+ if (!bytes)
+ return;
+
+ /* Get where the DMA engine would have written this data */
+ address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8);
+ ptr = cvmx_phys_to_ptr(address);
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, address + bytes);
+
+ /* Loop writing the FIFO data for this packet into memory */
+ while (bytes > 0) {
+ *ptr++ = __cvmx_usb_read_csr32(usb, USB_FIFO_ADDRESS(channel, usb->index));
+ bytes -= 4;
+ }
+ CVMX_SYNCW;
+
+ return;
+}
+
+
+/**
+ * Fill the TX hardware fifo with data out of the software
+ * fifos
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @fifo: Software fifo to use
+ * @available: Amount of space in the hardware fifo
+ *
+ * Returns: Non zero if the hardware fifo was too small and needs
+ * to be serviced again.
+ */
+static int __cvmx_usb_fill_tx_hw(struct cvmx_usb_internal_state *usb, struct cvmx_usb_tx_fifo *fifo, int available)
+{
+ /*
+ * We're done either when there isn't anymore space or the software FIFO
+ * is empty
+ */
+ while (available && (fifo->head != fifo->tail)) {
+ int i = fifo->tail;
+ const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
+ uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, usb->index) ^ 4;
+ int words = available;
+
+ /* Limit the amount of data to waht the SW fifo has */
+ if (fifo->entry[i].size <= available) {
+ words = fifo->entry[i].size;
+ fifo->tail++;
+ if (fifo->tail > MAX_CHANNELS)
+ fifo->tail = 0;
+ }
+
+ /* Update the next locations and counts */
+ available -= words;
+ fifo->entry[i].address += words * 4;
+ fifo->entry[i].size -= words;
+
+ /*
+ * Write the HW fifo data. The read every three writes is due
+ * to an errata on CN3XXX chips
+ */
+ while (words > 3) {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ words -= 3;
+ }
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words) {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words)
+ cvmx_write64_uint32(csr_address, *ptr++);
+ }
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ }
+ return fifo->head != fifo->tail;
+}
+
+
+/**
+ * Check the hardware FIFOs and fill them as needed
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ */
+static void __cvmx_usb_poll_tx_fifo(struct cvmx_usb_internal_state *usb)
+{
+ if (usb->periodic.head != usb->periodic.tail) {
+ union cvmx_usbcx_hptxsts tx_status;
+ tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic, tx_status.s.ptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 0);
+ }
+
+ if (usb->nonperiodic.head != usb->nonperiodic.tail) {
+ union cvmx_usbcx_gnptxsts tx_status;
+ tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, tx_status.s.nptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 0);
+ }
+
+ return;
+}
+
+
+/**
+ * Fill the TX FIFO with an outgoing packet
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @channel: Channel number to get packet from
+ */
+static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_internal_state *usb, int channel)
+{
+ union cvmx_usbcx_hccharx hcchar;
+ union cvmx_usbcx_hcspltx usbc_hcsplt;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+ struct cvmx_usb_tx_fifo *fifo;
+
+ /* We only need to fill data on outbound channels */
+ hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
+ return;
+
+ /* OUT Splits only have data on the start and not the complete */
+ usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index));
+ if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
+ return;
+
+ /* Find out how many bytes we need to fill and convert it into 32bit words */
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ if (!usbc_hctsiz.s.xfersize)
+ return;
+
+ if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
+ (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
+ fifo = &usb->periodic;
+ else
+ fifo = &usb->nonperiodic;
+
+ fifo->entry[fifo->head].channel = channel;
+ fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
+ fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
+ fifo->head++;
+ if (fifo->head > MAX_CHANNELS)
+ fifo->head = 0;
+
+ __cvmx_usb_poll_tx_fifo(usb);
+
+ return;
+}
+
+/**
+ * Perform channel specific setup for Control transactions. All
+ * the generic stuff will already have been done in
+ * __cvmx_usb_start_channel()
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @channel: Channel to setup
+ * @pipe: Pipe for control transaction
+ */
+static void __cvmx_usb_start_channel_control(struct cvmx_usb_internal_state *usb,
+ int channel,
+ struct cvmx_usb_pipe *pipe)
+{
+ struct cvmx_usb_transaction *transaction = pipe->head;
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+ int packets_to_transfer;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ switch (transaction->stage) {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ cvmx_dprintf("%s: ERROR - Non control stage\n", __FUNCTION__);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = sizeof(*header);
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir, CVMX_USB_DIRECTION_OUT);
+ /*
+ * Setup send the control header instead of the buffer data. The
+ * buffer data will be used in the next stage
+ */
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, transaction->control_header);
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = 0;
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir, CVMX_USB_DIRECTION_OUT);
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_DATA:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (header->s.request_type & 0x80)
+ bytes_to_transfer = 0;
+ else if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+ }
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ if (!(header->s.request_type & 0x80))
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ }
+
+ /*
+ * Make sure the transfer never exceeds the byte limit of the hardware.
+ * Further bytes will be sent as continued transactions
+ */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
+ /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
+ bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /*
+ * Calculate the number of packets to transfer. If the length is zero
+ * we still need to transfer one packet
+ */
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ if (packets_to_transfer == 0)
+ packets_to_transfer = 1;
+ else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ /*
+ * Limit to one packet when not using DMA. Channels must be
+ * restarted between every packet for IN transactions, so there
+ * is no reason to do multiple packets in a row
+ */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
+ /*
+ * Limit the number of packet and data transferred to what the
+ * hardware can handle
+ */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ return;
+}
+
+
+/**
+ * Start a channel to perform the pipe's head transaction
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @channel: Channel to setup
+ * @pipe: Pipe to start
+ */
+static void __cvmx_usb_start_channel(struct cvmx_usb_internal_state *usb,
+ int channel,
+ struct cvmx_usb_pipe *pipe)
+{
+ struct cvmx_usb_transaction *transaction = pipe->head;
+
+ /* Make sure all writes to the DMA region get flushed */
+ CVMX_SYNCW;
+
+ /* Attach the channel to the pipe */
+ usb->pipe_for_channel[channel] = pipe;
+ pipe->channel = channel;
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /* Mark this channel as in use */
+ usb->idle_hardware_channels &= ~(1<<channel);
+
+ /* Enable the channel interrupt bits */
+ {
+ union cvmx_usbcx_hcintx usbc_hcint;
+ union cvmx_usbcx_hcintmskx usbc_hcintmsk;
+ union cvmx_usbcx_haintmsk usbc_haintmsk;
+
+ /* Clear all channel status bits */
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index), usbc_hcint.u32);
+
+ usbc_hcintmsk.u32 = 0;
+ usbc_hcintmsk.s.chhltdmsk = 1;
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ /* Channels need these extra interrupts when we aren't in DMA mode */
+ usbc_hcintmsk.s.datatglerrmsk = 1;
+ usbc_hcintmsk.s.frmovrunmsk = 1;
+ usbc_hcintmsk.s.bblerrmsk = 1;
+ usbc_hcintmsk.s.xacterrmsk = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /* Splits don't generate xfercompl, so we need ACK and NYET */
+ usbc_hcintmsk.s.nyetmsk = 1;
+ usbc_hcintmsk.s.ackmsk = 1;
+ }
+ usbc_hcintmsk.s.nakmsk = 1;
+ usbc_hcintmsk.s.stallmsk = 1;
+ usbc_hcintmsk.s.xfercomplmsk = 1;
+ }
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), usbc_hcintmsk.u32);
+
+ /* Enable the channel interrupt to propagate */
+ usbc_haintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index));
+ usbc_haintmsk.s.haintmsk |= 1<<channel;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), usbc_haintmsk.u32);
+ }
+
+ /* Setup the locations the DMA engines use */
+ {
+ uint64_t dma_address = transaction->buffer + transaction->actual_bytes;
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ dma_address = transaction->buffer + transaction->iso_packets[0].offset + transaction->actual_bytes;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, dma_address);
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, dma_address);
+ }
+
+ /* Setup both the size of the transfer and the SPLIT characteristics */
+ {
+ union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
+ union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
+ int packets_to_transfer;
+ int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+
+ /*
+ * ISOCHRONOUS transactions store each individual transfer size
+ * in the packet structure, not the global buffer_length
+ */
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ bytes_to_transfer = transaction->iso_packets[0].length - transaction->actual_bytes;
+
+ /*
+ * We need to do split transactions when we are talking to non
+ * high speed devices that are behind a high speed hub
+ */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * On the start split phase (stage is even) record the
+ * frame number we will need to send the split complete.
+ * We only store the lower two bits since the time ahead
+ * can only be two frames
+ */
+ if ((transaction->stage&1) == 0) {
+ if (transaction->type == CVMX_USB_TRANSFER_BULK)
+ pipe->split_sc_frame = (usb->frame_number + 1) & 0x7f;
+ else
+ pipe->split_sc_frame = (usb->frame_number + 2) & 0x7f;
+ } else
+ pipe->split_sc_frame = -1;
+
+ usbc_hcsplt.s.spltena = 1;
+ usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
+ usbc_hcsplt.s.prtaddr = pipe->hub_port;
+ usbc_hcsplt.s.compsplt = (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
+
+ /*
+ * SPLIT transactions can only ever transmit one data
+ * packet so limit the transfer size to the max packet
+ * size
+ */
+ if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+
+ /*
+ * ISOCHRONOUS OUT splits are unique in that they limit
+ * data transfers to 188 byte chunks representing the
+ * begin/middle/end of the data or all
+ */
+ if (!usbc_hcsplt.s.compsplt &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ /*
+ * Clear the split complete frame number as
+ * there isn't going to be a split complete
+ */
+ pipe->split_sc_frame = -1;
+ /*
+ * See if we've started this transfer and sent
+ * data
+ */
+ if (transaction->actual_bytes == 0) {
+ /*
+ * Nothing sent yet, this is either a
+ * begin or the entire payload
+ */
+ if (bytes_to_transfer <= 188)
+ usbc_hcsplt.s.xactpos = 3; /* Entire payload in one go */
+ else
+ usbc_hcsplt.s.xactpos = 2; /* First part of payload */
+ } else {
+ /*
+ * Continuing the previous data, we must
+ * either be in the middle or at the end
+ */
+ if (bytes_to_transfer <= 188)
+ usbc_hcsplt.s.xactpos = 1; /* End of payload */
+ else
+ usbc_hcsplt.s.xactpos = 0; /* Middle of payload */
+ }
+ /*
+ * Again, the transfer size is limited to 188
+ * bytes
+ */
+ if (bytes_to_transfer > 188)
+ bytes_to_transfer = 188;
+ }
+ }
+
+ /*
+ * Make sure the transfer never exceeds the byte limit of the
+ * hardware. Further bytes will be sent as continued
+ * transactions
+ */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
+ /*
+ * Round MAX_TRANSFER_BYTES to a multiple of out packet
+ * size
+ */
+ bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /*
+ * Calculate the number of packets to transfer. If the length is
+ * zero we still need to transfer one packet
+ */
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ if (packets_to_transfer == 0)
+ packets_to_transfer = 1;
+ else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ /*
+ * Limit to one packet when not using DMA. Channels must
+ * be restarted between every packet for IN
+ * transactions, so there is no reason to do multiple
+ * packets in a row
+ */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
+ /*
+ * Limit the number of packet and data transferred to
+ * what the hardware can handle
+ */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ /* Update the DATA0/DATA1 toggle */
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ /*
+ * High speed pipes may need a hardware ping before they start
+ */
+ if (pipe->flags & __CVMX_USB_PIPE_FLAGS_NEED_PING)
+ usbc_hctsiz.s.dopng = 1;
+
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index), usbc_hcsplt.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ }
+
+ /* Setup the Host Channel Characteristics Register */
+ {
+ union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
+
+ /*
+ * Set the startframe odd/even properly. This is only used for
+ * periodic
+ */
+ usbc_hcchar.s.oddfrm = usb->frame_number&1;
+
+ /*
+ * Set the number of back to back packets allowed by this
+ * endpoint. Split transactions interpret "ec" as the number of
+ * immediate retries of failure. These retries happen too
+ * quickly, so we disable these entirely for splits
+ */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count < 1)
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count > 3)
+ usbc_hcchar.s.ec = 3;
+ else
+ usbc_hcchar.s.ec = pipe->multi_count;
+
+ /* Set the rest of the endpoint specific settings */
+ usbc_hcchar.s.devaddr = pipe->device_addr;
+ usbc_hcchar.s.eptype = transaction->type;
+ usbc_hcchar.s.lspddev = (pipe->device_speed == CVMX_USB_SPEED_LOW);
+ usbc_hcchar.s.epdir = pipe->transfer_dir;
+ usbc_hcchar.s.epnum = pipe->endpoint_num;
+ usbc_hcchar.s.mps = pipe->max_packet;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ }
+
+ /* Do transaction type specific fixups as needed */
+ switch (transaction->type) {
+ case CVMX_USB_TRANSFER_CONTROL:
+ __cvmx_usb_start_channel_control(usb, channel, pipe);
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * ISO transactions require different PIDs depending on
+ * direction and how many packets are needed
+ */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ if (pipe->multi_count < 2) /* Need DATA0 */
+ USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 0);
+ else /* Need MDATA */
+ USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 3);
+ }
+ }
+ break;
+ }
+ {
+ union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index))};
+ transaction->xfersize = usbc_hctsiz.s.xfersize;
+ transaction->pktcnt = usbc_hctsiz.s.pktcnt;
+ }
+ /* Remeber when we start a split transaction */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ usb->active_split = transaction;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, chena, 1);
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_fill_tx_fifo(usb, channel);
+ return;
+}
+
+
+/**
+ * Find a pipe that is ready to be scheduled to hardware.
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @list: Pipe list to search
+ * @current_frame:
+ * Frame counter to use as a time reference.
+ *
+ * Returns: Pipe or NULL if none are ready
+ */
+static struct cvmx_usb_pipe *__cvmx_usb_find_ready_pipe(struct cvmx_usb_internal_state *usb, struct cvmx_usb_pipe_list *list, uint64_t current_frame)
+{
+ struct cvmx_usb_pipe *pipe = list->head;
+ while (pipe) {
+ if (!(pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED) && pipe->head &&
+ (pipe->next_tx_frame <= current_frame) &&
+ ((pipe->split_sc_frame == -1) || ((((int)current_frame - (int)pipe->split_sc_frame) & 0x7f) < 0x40)) &&
+ (!usb->active_split || (usb->active_split == pipe->head))) {
+ CVMX_PREFETCH(pipe, 128);
+ CVMX_PREFETCH(pipe->head, 0);
+ return pipe;
+ }
+ pipe = pipe->next;
+ }
+ return NULL;
+}
+
+
+/**
+ * Called whenever a pipe might need to be scheduled to the
+ * hardware.
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @is_sof: True if this schedule was called on a SOF interrupt.
+ */
+static void __cvmx_usb_schedule(struct cvmx_usb_internal_state *usb, int is_sof)
+{
+ int channel;
+ struct cvmx_usb_pipe *pipe;
+ int need_sof;
+ enum cvmx_usb_transfer ttype;
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ /* Without DMA we need to be careful to not schedule something at the end of a frame and cause an overrun */
+ union cvmx_usbcx_hfnum hfnum = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index))};
+ union cvmx_usbcx_hfir hfir = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFIR(usb->index))};
+ if (hfnum.s.frrem < hfir.s.frint/4)
+ goto done;
+ }
+
+ while (usb->idle_hardware_channels) {
+ /* Find an idle channel */
+ CVMX_CLZ(channel, usb->idle_hardware_channels);
+ channel = 31 - channel;
+ if (unlikely(channel > 7))
+ break;
+
+ /* Find a pipe needing service */
+ pipe = NULL;
+ if (is_sof) {
+ /*
+ * Only process periodic pipes on SOF interrupts. This
+ * way we are sure that the periodic data is sent in the
+ * beginning of the frame
+ */
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_ISOCHRONOUS, usb->frame_number);
+ if (likely(!pipe))
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_INTERRUPT, usb->frame_number);
+ }
+ if (likely(!pipe)) {
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_CONTROL, usb->frame_number);
+ if (likely(!pipe))
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_BULK, usb->frame_number);
+ }
+ if (!pipe)
+ break;
+
+ __cvmx_usb_start_channel(usb, channel, pipe);
+ }
+
+done:
+ /*
+ * Only enable SOF interrupts when we have transactions pending in the
+ * future that might need to be scheduled
+ */
+ need_sof = 0;
+ for (ttype = CVMX_USB_TRANSFER_CONTROL; ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
+ pipe = usb->active_pipes[ttype].head;
+ while (pipe) {
+ if (pipe->next_tx_frame > usb->frame_number) {
+ need_sof = 1;
+ break;
+ }
+ pipe = pipe->next;
+ }
+ }
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, sofmsk, need_sof);
+ return;
+}
+
+
+/**
+ * Call a user's callback for a specific reason.
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe: Pipe the callback is for or NULL
+ * @transaction:
+ * Transaction the callback is for or NULL
+ * @reason: Reason this callback is being called
+ * @complete_code:
+ * Completion code for the transaction, if any
+ */
+static void __cvmx_usb_perform_callback(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ enum cvmx_usb_callback reason,
+ enum cvmx_usb_complete complete_code)
+{
+ cvmx_usb_callback_func_t callback = usb->callback[reason];
+ void *user_data = usb->callback_data[reason];
+ int submit_handle = -1;
+ int pipe_handle = -1;
+ int bytes_transferred = 0;
+
+ if (pipe)
+ pipe_handle = __cvmx_usb_get_pipe_handle(usb, pipe);
+
+ if (transaction) {
+ submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
+ bytes_transferred = transaction->actual_bytes;
+ /* Transactions are allowed to override the default callback */
+ if ((reason == CVMX_USB_CALLBACK_TRANSFER_COMPLETE) && transaction->callback) {
+ callback = transaction->callback;
+ user_data = transaction->callback_data;
+ }
+ }
+
+ if (!callback)
+ return;
+
+ callback((struct cvmx_usb_state *)usb, reason, complete_code, pipe_handle, submit_handle,
+ bytes_transferred, user_data);
+}
+
+
+/**
+ * Signal the completion of a transaction and free it. The
+ * transaction will be removed from the pipe transaction list.
+ *
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe: Pipe the transaction is on
+ * @transaction:
+ * Transaction that completed
+ * @complete_code:
+ * Completion code
+ */
+static void __cvmx_usb_perform_complete(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ enum cvmx_usb_complete complete_code)
+{
+ /* If this was a split then clear our split in progress marker */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+
+ /*
+ * Isochronous transactions need extra processing as they might not be
+ * done after a single data transfer
+ */
+ if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ /* Update the number of bytes transferred in this ISO packet */
+ transaction->iso_packets[0].length = transaction->actual_bytes;
+ transaction->iso_packets[0].status = complete_code;
+
+ /*
+ * If there are more ISOs pending and we succeeded, schedule the
+ * next one
+ */
+ if ((transaction->iso_number_packets > 1) && (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
+ transaction->actual_bytes = 0; /* No bytes transferred for this packet as of yet */
+ transaction->iso_number_packets--; /* One less ISO waiting to transfer */
+ transaction->iso_packets++; /* Increment to the next location in our packet array */
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ goto done;
+ }
+ }
+
+ /* Remove the transaction from the pipe list */
+ if (transaction->next)
+ transaction->next->prev = transaction->prev;
+ else
+ pipe->tail = transaction->prev;
+ if (transaction->prev)
+ transaction->prev->next = transaction->next;
+ else
+ pipe->head = transaction->next;
+ if (!pipe->head) {
+ __cvmx_usb_remove_pipe(usb->active_pipes + pipe->transfer_type, pipe);
+ __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
+
+ }
+ __cvmx_usb_perform_callback(usb, pipe, transaction,
+ CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
+ complete_code);
+ __cvmx_usb_free_transaction(usb, transaction);
+done:
+ return;
+}
+
+
+/**
+ * Submit a usb transaction to a pipe. Called for all types
+ * of transactions.
+ *
+ * @usb:
+ * @pipe_handle:
+ * Which pipe to submit to. Will be validated in this function.
+ * @type: Transaction type
+ * @flags: Flags for the transaction
+ * @buffer: User buffer for the transaction
+ * @buffer_length:
+ * User buffer's length in bytes
+ * @control_header:
+ * For control transactions, the 8 byte standard header
+ * @iso_start_frame:
+ * For ISO transactions, the start frame
+ * @iso_number_packets:
+ * For ISO, the number of packet in the transaction.
+ * @iso_packets:
+ * A description of each ISO packet
+ * @callback: User callback to call when the transaction completes
+ * @user_data: User's data for the callback
+ *
+ * Returns: Submit handle or negative on failure. Matches the result
+ * in the external API.
+ */
+static int __cvmx_usb_submit_transaction(struct cvmx_usb_internal_state *usb,
+ int pipe_handle,
+ enum cvmx_usb_transfer type,
+ int flags,
+ uint64_t buffer,
+ int buffer_length,
+ uint64_t control_header,
+ int iso_start_frame,
+ int iso_number_packets,
+ struct cvmx_usb_iso_packet *iso_packets,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ struct cvmx_usb_transaction *transaction;
+ struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
+
+ if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ return -EINVAL;
+ /* Fail if the pipe isn't open */
+ if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ return -EINVAL;
+ if (unlikely(pipe->transfer_type != type))
+ return -EINVAL;
+
+ transaction = __cvmx_usb_alloc_transaction(usb);
+ if (unlikely(!transaction))
+ return -ENOMEM;
+
+ transaction->type = type;
+ transaction->flags |= flags;
+ transaction->buffer = buffer;
+ transaction->buffer_length = buffer_length;
+ transaction->control_header = control_header;
+ transaction->iso_start_frame = iso_start_frame; // FIXME: This is not used, implement it
+ transaction->iso_number_packets = iso_number_packets;
+ transaction->iso_packets = iso_packets;
+ transaction->callback = callback;
+ transaction->callback_data = user_data;
+ if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
+ transaction->stage = CVMX_USB_STAGE_SETUP;
+ else
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+
+ transaction->next = NULL;
+ if (pipe->tail) {
+ transaction->prev = pipe->tail;
+ transaction->prev->next = transaction;
+ } else {
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ transaction->prev = NULL;
+ pipe->head = transaction;
+ __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
+ __cvmx_usb_append_pipe(usb->active_pipes + pipe->transfer_type, pipe);
+ }
+ pipe->tail = transaction;
+
+ submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
+
+ /* We may need to schedule the pipe if this was the head of the pipe */
+ if (!transaction->prev)
+ __cvmx_usb_schedule(usb, 0);
+
+ return submit_handle;
+}
+
+
+/**
+ * Call to submit a USB Bulk transfer to a pipe.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Handle to the pipe for the transfer.
+ * @buffer: Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @buffer_length:
+ * Length of buffer in bytes.
+ * @callback: Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @user_data: User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * Returns: A submitted transaction handle or negative on
+ * failure. Negative values are error codes.
+ */
+int cvmx_usb_submit_bulk(struct cvmx_usb_state *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ /* Pipe handle checking is done later in a common place */
+ if (unlikely(!buffer))
+ return -EINVAL;
+ if (unlikely(buffer_length < 0))
+ return -EINVAL;
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_BULK,
+ 0, /* flags */
+ buffer,
+ buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ callback,
+ user_data);
+ return submit_handle;
+}
+
+
+/**
+ * Call to submit a USB Interrupt transfer to a pipe.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Handle to the pipe for the transfer.
+ * @buffer: Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @buffer_length:
+ * Length of buffer in bytes.
+ * @callback: Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @user_data: User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * Returns: A submitted transaction handle or negative on
+ * failure. Negative values are error codes.
+ */
+int cvmx_usb_submit_interrupt(struct cvmx_usb_state *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ /* Pipe handle checking is done later in a common place */
+ if (unlikely(!buffer))
+ return -EINVAL;
+ if (unlikely(buffer_length < 0))
+ return -EINVAL;
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_INTERRUPT,
+ 0, /* flags */
+ buffer,
+ buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ callback,
+ user_data);
+ return submit_handle;
+}
+
+
+/**
+ * Call to submit a USB Control transfer to a pipe.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Handle to the pipe for the transfer.
+ * @control_header:
+ * USB 8 byte control header physical address.
+ * Note that this is NOT A POINTER, but the
+ * full 64bit physical address of the buffer.
+ * @buffer: Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @buffer_length:
+ * Length of buffer in bytes.
+ * @callback: Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @user_data: User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * Returns: A submitted transaction handle or negative on
+ * failure. Negative values are error codes.
+ */
+int cvmx_usb_submit_control(struct cvmx_usb_state *state, int pipe_handle,
+ uint64_t control_header,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(control_header);
+
+ /* Pipe handle checking is done later in a common place */
+ if (unlikely(!control_header))
+ return -EINVAL;
+ /* Some drivers send a buffer with a zero length. God only knows why */
+ if (unlikely(buffer && (buffer_length < 0)))
+ return -EINVAL;
+ if (unlikely(!buffer && (buffer_length != 0)))
+ return -EINVAL;
+ if ((header->s.request_type & 0x80) == 0)
+ buffer_length = le16_to_cpu(header->s.length);
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_CONTROL,
+ 0, /* flags */
+ buffer,
+ buffer_length,
+ control_header,
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ callback,
+ user_data);
+ return submit_handle;
+}
+
+
+/**
+ * Call to submit a USB Isochronous transfer to a pipe.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Handle to the pipe for the transfer.
+ * @start_frame:
+ * Number of frames into the future to schedule
+ * this transaction.
+ * @flags: Flags to control the transfer. See
+ * enum cvmx_usb_isochronous_flags for the flag
+ * definitions.
+ * @number_packets:
+ * Number of sequential packets to transfer.
+ * "packets" is a pointer to an array of this
+ * many packet structures.
+ * @packets: Description of each transfer packet as
+ * defined by struct cvmx_usb_iso_packet. The array
+ * pointed to here must stay valid until the
+ * complete callback is called.
+ * @buffer: Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @buffer_length:
+ * Length of buffer in bytes.
+ * @callback: Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @user_data: User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * Returns: A submitted transaction handle or negative on
+ * failure. Negative values are error codes.
+ */
+int cvmx_usb_submit_isochronous(struct cvmx_usb_state *state, int pipe_handle,
+ int start_frame, int flags,
+ int number_packets,
+ struct cvmx_usb_iso_packet packets[],
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ int submit_handle;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ /* Pipe handle checking is done later in a common place */
+ if (unlikely(start_frame < 0))
+ return -EINVAL;
+ if (unlikely(flags & ~(CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT | CVMX_USB_ISOCHRONOUS_FLAGS_ASAP)))
+ return -EINVAL;
+ if (unlikely(number_packets < 1))
+ return -EINVAL;
+ if (unlikely(!packets))
+ return -EINVAL;
+ if (unlikely(!buffer))
+ return -EINVAL;
+ if (unlikely(buffer_length < 0))
+ return -EINVAL;
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_ISOCHRONOUS,
+ flags,
+ buffer,
+ buffer_length,
+ 0, /* control_header */
+ start_frame,
+ number_packets,
+ packets,
+ callback,
+ user_data);
+ return submit_handle;
+}
+
+
+/**
+ * Cancel one outstanding request in a pipe. Canceling a request
+ * can fail if the transaction has already completed before cancel
+ * is called. Even after a successful cancel call, it may take
+ * a frame or two for the cvmx_usb_poll() function to call the
+ * associated callback.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Pipe handle to cancel requests in.
+ * @submit_handle:
+ * Handle to transaction to cancel, returned by the submit function.
+ *
+ * Returns: 0 or a negative error code.
+ */
+int cvmx_usb_cancel(struct cvmx_usb_state *state, int pipe_handle, int submit_handle)
+{
+ struct cvmx_usb_transaction *transaction;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
+
+ if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ return -EINVAL;
+ if (unlikely((submit_handle < 0) || (submit_handle >= MAX_TRANSACTIONS)))
+ return -EINVAL;
+
+ /* Fail if the pipe isn't open */
+ if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ return -EINVAL;
+
+ transaction = usb->transaction + submit_handle;
+
+ /* Fail if this transaction already completed */
+ if (unlikely((transaction->flags & __CVMX_USB_TRANSACTION_FLAGS_IN_USE) == 0))
+ return -EINVAL;
+
+ /*
+ * If the transaction is the HEAD of the queue and scheduled. We need to
+ * treat it special
+ */
+ if ((pipe->head == transaction) &&
+ (pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
+ union cvmx_usbcx_hccharx usbc_hcchar;
+
+ usb->pipe_for_channel[pipe->channel] = NULL;
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ CVMX_SYNCW;
+
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
+ /* If the channel isn't enabled then the transaction already completed */
+ if (usbc_hcchar.s.chena) {
+ usbc_hcchar.s.chdis = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index), usbc_hcchar.u32);
+ }
+ }
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_CANCEL);
+ return 0;
+}
+
+
+/**
+ * Cancel all outstanding requests in a pipe. Logically all this
+ * does is call cvmx_usb_cancel() in a loop.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Pipe handle to cancel requests in.
+ *
+ * Returns: 0 or a negative error code.
+ */
+int cvmx_usb_cancel_all(struct cvmx_usb_state *state, int pipe_handle)
+{
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
+
+ if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ return -EINVAL;
+
+ /* Fail if the pipe isn't open */
+ if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ return -EINVAL;
+
+ /* Simply loop through and attempt to cancel each transaction */
+ while (pipe->head) {
+ int result = cvmx_usb_cancel(state, pipe_handle,
+ __cvmx_usb_get_submit_handle(usb, pipe->head));
+ if (unlikely(result != 0))
+ return result;
+ }
+ return 0;
+}
+
+
+/**
+ * Close a pipe created with cvmx_usb_open_pipe().
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Pipe handle to close.
+ *
+ * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
+ * outstanding transfers.
+ */
+int cvmx_usb_close_pipe(struct cvmx_usb_state *state, int pipe_handle)
+{
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
+
+ if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ return -EINVAL;
+
+ /* Fail if the pipe isn't open */
+ if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ return -EINVAL;
+
+ /* Fail if the pipe has pending transactions */
+ if (unlikely(pipe->head))
+ return -EBUSY;
+
+ pipe->flags = 0;
+ __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
+ __cvmx_usb_append_pipe(&usb->free_pipes, pipe);
+
+ return 0;
+}
+
+
+/**
+ * Register a function to be called when various USB events occur.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @reason: Which event to register for.
+ * @callback: Function to call when the event occurs.
+ * @user_data: User data parameter to the function.
+ *
+ * Returns: 0 or a negative error code.
+ */
+int cvmx_usb_register_callback(struct cvmx_usb_state *state,
+ enum cvmx_usb_callback reason,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
+{
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ if (unlikely(reason >= __CVMX_USB_CALLBACK_END))
+ return -EINVAL;
+ if (unlikely(!callback))
+ return -EINVAL;
+
+ usb->callback[reason] = callback;
+ usb->callback_data[reason] = user_data;
+
+ return 0;
+}
+
+
+/**
+ * Get the current USB protocol level frame number. The frame
+ * number is always in the range of 0-0x7ff.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * Returns: USB frame number
+ */
+int cvmx_usb_get_frame_number(struct cvmx_usb_state *state)
+{
+ int frame_number;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ union cvmx_usbcx_hfnum usbc_hfnum;
+
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ frame_number = usbc_hfnum.s.frnum;
+
+ return frame_number;
+}
+
+
+/**
+ * Poll a channel for status
+ *
+ * @usb: USB device
+ * @channel: Channel to poll
+ *
+ * Returns: Zero on success
+ */
+static int __cvmx_usb_poll_channel(struct cvmx_usb_internal_state *usb, int channel)
+{
+ union cvmx_usbcx_hcintx usbc_hcint;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+ union cvmx_usbcx_hccharx usbc_hcchar;
+ struct cvmx_usb_pipe *pipe;
+ struct cvmx_usb_transaction *transaction;
+ int bytes_this_transfer;
+ int bytes_in_last_packet;
+ int packets_processed;
+ int buffer_space_left;
+
+ /* Read the interrupt status bits for the channel */
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+
+ if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
+ /*
+ * There seems to be a bug in CN31XX which can cause
+ * interrupt IN transfers to get stuck until we do a
+ * write of HCCHARX without changing things
+ */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ return 0;
+ }
+
+ /*
+ * In non DMA mode the channels don't halt themselves. We need
+ * to manually disable channels that are left running
+ */
+ if (!usbc_hcint.s.chhltd) {
+ if (usbc_hcchar.s.chena) {
+ union cvmx_usbcx_hcintmskx hcintmsk;
+ /* Disable all interrupts except CHHLTD */
+ hcintmsk.u32 = 0;
+ hcintmsk.s.chhltdmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), hcintmsk.u32);
+ usbc_hcchar.s.chdis = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ return 0;
+ } else if (usbc_hcint.s.xfercompl) {
+ /* Successful IN/OUT with transfer complete. Channel halt isn't needed */
+ } else {
+ cvmx_dprintf("USB%d: Channel %d interrupt without halt\n", usb->index, channel);
+ return 0;
+ }
+ }
+ } else {
+ /*
+ * There is are no interrupts that we need to process when the
+ * channel is still running
+ */
+ if (!usbc_hcint.s.chhltd)
+ return 0;
+ }
+
+ /* Disable the channel interrupts now that it is done */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ usb->idle_hardware_channels |= (1<<channel);
+
+ /* Make sure this channel is tied to a valid pipe */
+ pipe = usb->pipe_for_channel[channel];
+ CVMX_PREFETCH(pipe, 0);
+ CVMX_PREFETCH(pipe, 128);
+ if (!pipe)
+ return 0;
+ transaction = pipe->head;
+ CVMX_PREFETCH0(transaction);
+
+ /*
+ * Disconnect this pipe from the HW channel. Later the schedule
+ * function will figure out which pipe needs to go
+ */
+ usb->pipe_for_channel[channel] = NULL;
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /*
+ * Read the channel config info so we can figure out how much data
+ * transfered
+ */
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ /*
+ * Calculating the number of bytes successfully transferred is dependent
+ * on the transfer direction
+ */
+ packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
+ if (usbc_hcchar.s.epdir) {
+ /*
+ * IN transactions are easy. For every byte received the
+ * hardware decrements xfersize. All we need to do is subtract
+ * the current value of xfersize from its starting value and we
+ * know how many bytes were written to the buffer
+ */
+ bytes_this_transfer = transaction->xfersize - usbc_hctsiz.s.xfersize;
+ } else {
+ /*
+ * OUT transaction don't decrement xfersize. Instead pktcnt is
+ * decremented on every successful packet send. The hardware
+ * does this when it receives an ACK, or NYET. If it doesn't
+ * receive one of these responses pktcnt doesn't change
+ */
+ bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
+ /*
+ * The last packet may not be a full transfer if we didn't have
+ * enough data
+ */
+ if (bytes_this_transfer > transaction->xfersize)
+ bytes_this_transfer = transaction->xfersize;
+ }
+ /* Figure out how many bytes were in the last packet of the transfer */
+ if (packets_processed)
+ bytes_in_last_packet = bytes_this_transfer - (packets_processed-1) * usbc_hcchar.s.mps;
+ else
+ bytes_in_last_packet = bytes_this_transfer;
+
+ /*
+ * As a special case, setup transactions output the setup header, not
+ * the user's data. For this reason we don't count setup data as bytes
+ * transferred
+ */
+ if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
+ (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
+ bytes_this_transfer = 0;
+
+ /*
+ * Add the bytes transferred to the running total. It is important that
+ * bytes_this_transfer doesn't count any data that needs to be
+ * retransmitted
+ */
+ transaction->actual_bytes += bytes_this_transfer;
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ buffer_space_left = transaction->iso_packets[0].length - transaction->actual_bytes;
+ else
+ buffer_space_left = transaction->buffer_length - transaction->actual_bytes;
+
+ /*
+ * We need to remember the PID toggle state for the next transaction.
+ * The hardware already updated it for the next transaction
+ */
+ pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
+
+ /*
+ * For high speed bulk out, assume the next transaction will need to do
+ * a ping before proceeding. If this isn't true the ACK processing below
+ * will clear this flag
+ */
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ if (usbc_hcint.s.stall) {
+ /*
+ * STALL as a response means this transaction cannot be
+ * completed because the device can't process transactions. Tell
+ * the user. Any data that was transferred will be counted on
+ * the actual bytes transferred
+ */
+ pipe->pid_toggle = 0;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_STALL);
+ } else if (usbc_hcint.s.xacterr) {
+ /*
+ * We know at least one packet worked if we get a ACK or NAK.
+ * Reset the retry counter
+ */
+ if (usbc_hcint.s.nak || usbc_hcint.s.ack)
+ transaction->retries = 0;
+ transaction->retries++;
+ if (transaction->retries > MAX_RETRIES) {
+ /*
+ * XactErr as a response means the device signaled
+ * something wrong with the transfer. For example, PID
+ * toggle errors cause these
+ */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_XACTERR);
+ } else {
+ /*
+ * If this was a split then clear our split in progress
+ * marker
+ */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+ /*
+ * Rewind to the beginning of the transaction by anding
+ * off the split complete bit
+ */
+ transaction->stage &= ~1;
+ pipe->split_sc_frame = -1;
+ pipe->next_tx_frame += pipe->interval;
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ }
+ } else if (usbc_hcint.s.bblerr) {
+ /* Babble Error (BblErr) */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_BABBLEERR);
+ } else if (usbc_hcint.s.datatglerr) {
+ /* We'll retry the exact same transaction again */
+ transaction->retries++;
+ } else if (usbc_hcint.s.nyet) {
+ /*
+ * NYET as a response is only allowed in three cases: as a
+ * response to a ping, as a response to a split transaction, and
+ * as a response to a bulk out. The ping case is handled by
+ * hardware, so we only have splits and bulk out
+ */
+ if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->retries = 0;
+ /*
+ * If there is more data to go then we need to try
+ * again. Otherwise this transaction is complete
+ */
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ } else {
+ /*
+ * Split transactions retry the split complete 4 times
+ * then rewind to the start split and do the entire
+ * transactions again
+ */
+ transaction->retries++;
+ if ((transaction->retries & 0x3) == 0) {
+ /*
+ * Rewind to the beginning of the transaction by
+ * anding off the split complete bit
+ */
+ transaction->stage &= ~1;
+ pipe->split_sc_frame = -1;
+ }
+ }
+ } else if (usbc_hcint.s.ack) {
+ transaction->retries = 0;
+ /*
+ * The ACK bit can only be checked after the other error bits.
+ * This is because a multi packet transfer may succeed in a
+ * number of packets and then get a different response on the
+ * last packet. In this case both ACK and the last response bit
+ * will be set. If none of the other response bits is set, then
+ * the last packet must have been an ACK
+ *
+ * Since we got an ACK, we know we don't need to do a ping on
+ * this pipe
+ */
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ switch (transaction->type) {
+ case CVMX_USB_TRANSFER_CONTROL:
+ switch (transaction->stage) {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ /* This should be impossible */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ pipe->pid_toggle = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage = CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
+ else {
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->s.length)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ {
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->s.length)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
+ /*
+ * For setup OUT data that are splits,
+ * the hardware doesn't appear to count
+ * transferred data. Here we manually
+ * update the data transferred
+ */
+ if (!usbc_hcchar.s.epdir) {
+ if (buffer_space_left < pipe->max_packet)
+ transaction->actual_bytes += buffer_space_left;
+ else
+ transaction->actual_bytes += pipe->max_packet;
+ }
+ } else if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ } else {
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ }
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage = CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
+ else
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ break;
+ }
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ /*
+ * The only time a bulk transfer isn't complete when it
+ * finishes with an ACK is during a split transaction.
+ * For splits we need to continue the transfer if more
+ * data is needed
+ */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ else {
+ if (buffer_space_left && (bytes_in_last_packet == pipe->max_packet))
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ else {
+ if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ } else {
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (usbc_hcint.s.nak))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ if (!buffer_space_left || (bytes_in_last_packet < pipe->max_packet)) {
+ if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * ISOCHRONOUS OUT splits don't require a
+ * complete split stage. Instead they use a
+ * sequence of begin OUT splits to transfer the
+ * data 188 bytes at a time. Once the transfer
+ * is complete, the pipe sleeps until the next
+ * schedule interval
+ */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ /*
+ * If no space left or this wasn't a max
+ * size packet then this transfer is
+ * complete. Otherwise start it again to
+ * send the next 188 bytes
+ */
+ if (!buffer_space_left || (bytes_this_transfer < 188)) {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ } else {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
+ /*
+ * We are in the incoming data
+ * phase. Keep getting data
+ * until we run out of space or
+ * get a small packet
+ */
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ } else
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ }
+ } else {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ break;
+ }
+ } else if (usbc_hcint.s.nak) {
+ /* If this was a split then clear our split in progress marker */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+ /*
+ * NAK as a response means the device couldn't accept the
+ * transaction, but it should be retried in the future. Rewind
+ * to the beginning of the transaction by anding off the split
+ * complete bit. Retry in the next interval
+ */
+ transaction->retries = 0;
+ transaction->stage &= ~1;
+ pipe->next_tx_frame += pipe->interval;
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ } else {
+ struct cvmx_usb_port_status port;
+ port = cvmx_usb_get_status((struct cvmx_usb_state *)usb);
+ if (port.port_enabled) {
+ /* We'll retry the exact same transaction again */
+ transaction->retries++;
+ } else {
+ /*
+ * We get channel halted interrupts with no result bits
+ * sets when the cable is unplugged
+ */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * Poll the USB block for status and call all needed callback
+ * handlers. This function is meant to be called in the interrupt
+ * handler for the USB controller. It can also be called
+ * periodically in a loop for non-interrupt based operation.
+ *
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ *
+ * Returns: 0 or a negative error code.
+ */
+int cvmx_usb_poll(struct cvmx_usb_state *state)
+{
+ union cvmx_usbcx_hfnum usbc_hfnum;
+ union cvmx_usbcx_gintsts usbc_gintsts;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ CVMX_PREFETCH(usb, 0);
+ CVMX_PREFETCH(usb, 1*128);
+ CVMX_PREFETCH(usb, 2*128);
+ CVMX_PREFETCH(usb, 3*128);
+ CVMX_PREFETCH(usb, 4*128);
+
+ /* Update the frame counter */
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum)
+ usb->frame_number += 0x4000;
+ usb->frame_number &= ~0x3fffull;
+ usb->frame_number |= usbc_hfnum.s.frnum;
+
+ /* Read the pending interrupts */
+ usbc_gintsts.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTSTS(usb->index));
+
+ /* Clear the interrupts now that we know about them */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), usbc_gintsts.u32);
+
+ if (usbc_gintsts.s.rxflvl) {
+ /*
+ * RxFIFO Non-Empty (RxFLvl)
+ * Indicates that there is at least one packet pending to be
+ * read from the RxFIFO.
+ *
+ * In DMA mode this is handled by hardware
+ */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_poll_rx_fifo(usb);
+ }
+ if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
+ /* Fill the Tx FIFOs when not in DMA mode */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_poll_tx_fifo(usb);
+ }
+ if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
+ union cvmx_usbcx_hprt usbc_hprt;
+ /*
+ * Disconnect Detected Interrupt (DisconnInt)
+ * Asserted when a device disconnect is detected.
+ *
+ * Host Port Interrupt (PrtInt)
+ * The core sets this bit to indicate a change in port status of
+ * one of the O2P USB core ports in Host mode. The application
+ * must read the Host Port Control and Status (HPRT) register to
+ * determine the exact event that caused this interrupt. The
+ * application must clear the appropriate status bit in the Host
+ * Port Control and Status register to clear this bit.
+ *
+ * Call the user's port callback
+ */
+ __cvmx_usb_perform_callback(usb, NULL, NULL,
+ CVMX_USB_CALLBACK_PORT_CHANGED,
+ CVMX_USB_COMPLETE_SUCCESS);
+ /* Clear the port change bits */
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.s.prtena = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), usbc_hprt.u32);
+ }
+ if (usbc_gintsts.s.hchint) {
+ /*
+ * Host Channels Interrupt (HChInt)
+ * The core sets this bit to indicate that an interrupt is
+ * pending on one of the channels of the core (in Host mode).
+ * The application must read the Host All Channels Interrupt
+ * (HAINT) register to determine the exact number of the channel
+ * on which the interrupt occurred, and then read the
+ * corresponding Host Channel-n Interrupt (HCINTn) register to
+ * determine the exact cause of the interrupt. The application
+ * must clear the appropriate status bit in the HCINTn register
+ * to clear this bit.
+ */
+ union cvmx_usbcx_haint usbc_haint;
+ usbc_haint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINT(usb->index));
+ while (usbc_haint.u32) {
+ int channel;
+ CVMX_CLZ(channel, usbc_haint.u32);
+ channel = 31 - channel;
+ __cvmx_usb_poll_channel(usb, channel);
+ usbc_haint.u32 ^= 1<<channel;
+ }
+ }
+
+ __cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
+
+ return 0;
+}
diff --git a/drivers/staging/octeon-usb/cvmx-usb.h b/drivers/staging/octeon-usb/cvmx-usb.h
new file mode 100644
index 0000000..8bf3696
--- /dev/null
+++ b/drivers/staging/octeon-usb/cvmx-usb.h
@@ -0,0 +1,542 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * "cvmx-usb.h" defines a set of low level USB functions to help
+ * developers create Octeon USB drivers for various operating
+ * systems. These functions provide a generic API to the Octeon
+ * USB blocks, hiding the internal hardware specific
+ * operations.
+ *
+ * At a high level the device driver needs to:
+ *
+ * - Call cvmx_usb_get_num_ports() to get the number of
+ * supported ports.
+ * - Call cvmx_usb_initialize() for each Octeon USB port.
+ * - Enable the port using cvmx_usb_enable().
+ * - Either periodically, or in an interrupt handler, call
+ * cvmx_usb_poll() to service USB events.
+ * - Manage pipes using cvmx_usb_open_pipe() and
+ * cvmx_usb_close_pipe().
+ * - Manage transfers using cvmx_usb_submit_*() and
+ * cvmx_usb_cancel*().
+ * - Shutdown USB on unload using cvmx_usb_shutdown().
+ *
+ * To monitor USB status changes, the device driver must use
+ * cvmx_usb_register_callback() to register for events that it
+ * is interested in. Below are a few hints on successfully
+ * implementing a driver on top of this API.
+ *
+ * == Initialization ==
+ *
+ * When a driver is first loaded, it is normally not necessary
+ * to bring up the USB port completely. Most operating systems
+ * expect to initialize and enable the port in two independent
+ * steps. Normally an operating system will probe hardware,
+ * initialize anything found, and then enable the hardware.
+ *
+ * In the probe phase you should:
+ * - Use cvmx_usb_get_num_ports() to determine the number of
+ * USB port to be supported.
+ * - Allocate space for a struct cvmx_usb_state for each
+ * port.
+ * - Tell the operating system about each port
+ *
+ * In the initialization phase you should:
+ * - Use cvmx_usb_initialize() on each port.
+ * - Do not call cvmx_usb_enable(). This leaves the USB port in
+ * the disabled state until the operating system is ready.
+ *
+ * Finally, in the enable phase you should:
+ * - Call cvmx_usb_enable() on the appropriate port.
+ * - Note that some operating system use a RESET instead of an
+ * enable call. To implement RESET, you should call
+ * cvmx_usb_disable() followed by cvmx_usb_enable().
+ *
+ * == Locking ==
+ *
+ * All of the functions in the cvmx-usb API assume exclusive
+ * access to the USB hardware and internal data structures. This
+ * means that the driver must provide locking as necessary.
+ *
+ * In the single CPU state it is normally enough to disable
+ * interrupts before every call to cvmx_usb*() and enable them
+ * again after the call is complete. Keep in mind that it is
+ * very common for the callback handlers to make additional
+ * calls into cvmx-usb, so the disable/enable must be protected
+ * against recursion. As an example, the Linux kernel
+ * local_irq_save() and local_irq_restore() are perfect for this
+ * in the non SMP case.
+ *
+ * In the SMP case, locking is more complicated. For SMP you not
+ * only need to disable interrupts on the local core, but also
+ * take a lock to make sure that another core cannot call
+ * cvmx-usb.
+ *
+ * == Port callback ==
+ *
+ * The port callback prototype needs to look as follows:
+ *
+ * void port_callback(struct cvmx_usb_state *usb,
+ * enum cvmx_usb_callback reason,
+ * enum cvmx_usb_complete status,
+ * int pipe_handle,
+ * int submit_handle,
+ * int bytes_transferred,
+ * void *user_data);
+ * - "usb" is the struct cvmx_usb_state for the port.
+ * - "reason" will always be CVMX_USB_CALLBACK_PORT_CHANGED.
+ * - "status" will always be CVMX_USB_COMPLETE_SUCCESS.
+ * - "pipe_handle" will always be -1.
+ * - "submit_handle" will always be -1.
+ * - "bytes_transferred" will always be 0.
+ * - "user_data" is the void pointer originally passed along
+ * with the callback. Use this for any state information you
+ * need.
+ *
+ * The port callback will be called whenever the user plugs /
+ * unplugs a device from the port. It will not be called when a
+ * device is plugged / unplugged from a hub connected to the
+ * root port. Normally all the callback needs to do is tell the
+ * operating system to poll the root hub for status. Under
+ * Linux, this is performed by calling usb_hcd_poll_rh_status().
+ * In the Linux driver we use "user_data". to pass around the
+ * Linux "hcd" structure. Once the port callback completes,
+ * Linux automatically calls octeon_usb_hub_status_data() which
+ * uses cvmx_usb_get_status() to determine the root port status.
+ *
+ * == Complete callback ==
+ *
+ * The completion callback prototype needs to look as follows:
+ *
+ * void complete_callback(struct cvmx_usb_state *usb,
+ * enum cvmx_usb_callback reason,
+ * enum cvmx_usb_complete status,
+ * int pipe_handle,
+ * int submit_handle,
+ * int bytes_transferred,
+ * void *user_data);
+ * - "usb" is the struct cvmx_usb_state for the port.
+ * - "reason" will always be CVMX_USB_CALLBACK_TRANSFER_COMPLETE.
+ * - "status" will be one of the cvmx_usb_complete enumerations.
+ * - "pipe_handle" is the handle to the pipe the transaction
+ * was originally submitted on.
+ * - "submit_handle" is the handle returned by the original
+ * cvmx_usb_submit_* call.
+ * - "bytes_transferred" is the number of bytes successfully
+ * transferred in the transaction. This will be zero on most
+ * error conditions.
+ * - "user_data" is the void pointer originally passed along
+ * with the callback. Use this for any state information you
+ * need. For example, the Linux "urb" is stored in here in the
+ * Linux driver.
+ *
+ * In general your callback handler should use "status" and
+ * "bytes_transferred" to tell the operating system the how the
+ * transaction completed. Normally the pipe is not changed in
+ * this callback.
+ *
+ * == Canceling transactions ==
+ *
+ * When a transaction is cancelled using cvmx_usb_cancel*(), the
+ * actual length of time until the complete callback is called
+ * can vary greatly. It may be called before cvmx_usb_cancel*()
+ * returns, or it may be called a number of usb frames in the
+ * future once the hardware frees the transaction. In either of
+ * these cases, the complete handler will receive
+ * CVMX_USB_COMPLETE_CANCEL.
+ *
+ * == Handling pipes ==
+ *
+ * USB "pipes" is a software construct created by this API to
+ * enable the ordering of usb transactions to a device endpoint.
+ * Octeon's underlying hardware doesn't have any concept
+ * equivalent to "pipes". The hardware instead has eight
+ * channels that can be used simultaneously to have up to eight
+ * transaction in process at the same time. In order to maintain
+ * ordering in a pipe, the transactions for a pipe will only be
+ * active in one hardware channel at a time. From an API user's
+ * perspective, this doesn't matter but it can be helpful to
+ * keep this in mind when you are probing hardware while
+ * debugging.
+ *
+ * Also keep in mind that usb transactions contain state
+ * information about the previous transaction to the same
+ * endpoint. Each transaction has a PID toggle that changes 0/1
+ * between each sub packet. This is maintained in the pipe data
+ * structures. For this reason, you generally cannot create and
+ * destroy a pipe for every transaction. A sequence of
+ * transaction to the same endpoint must use the same pipe.
+ *
+ * == Root Hub ==
+ *
+ * Some operating systems view the usb root port as a normal usb
+ * hub. These systems attempt to control the root hub with
+ * messages similar to the usb 2.0 spec for hub control and
+ * status. For these systems it may be necessary to write
+ * function to decode standard usb control messages into
+ * equivalent cvmx-usb API calls.
+ *
+ * == Interrupts ==
+ *
+ * If you plan on using usb interrupts, cvmx_usb_poll() must be
+ * called on every usb interrupt. It will read the usb state,
+ * call any needed callbacks, and schedule transactions as
+ * needed. Your device driver needs only to hookup an interrupt
+ * handler and call cvmx_usb_poll(). Octeon's usb port 0 causes
+ * CIU bit CIU_INT*_SUM0[USB] to be set (bit 56). For port 1,
+ * CIU bit CIU_INT_SUM1[USB1] is set (bit 17). How these bits
+ * are turned into interrupt numbers is operating system
+ * specific. For Linux, there are the convenient defines
+ * OCTEON_IRQ_USB0 and OCTEON_IRQ_USB1 for the IRQ numbers.
+ *
+ * If you aren't using interrupts, simple call cvmx_usb_poll()
+ * in your main processing loop.
+ */
+
+#ifndef __CVMX_USB_H__
+#define __CVMX_USB_H__
+
+/**
+ * enum cvmx_usb_speed - the possible USB device speeds
+ *
+ * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
+ * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
+ * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps
+ */
+enum cvmx_usb_speed {
+ CVMX_USB_SPEED_HIGH = 0,
+ CVMX_USB_SPEED_FULL = 1,
+ CVMX_USB_SPEED_LOW = 2,
+};
+
+/**
+ * enum cvmx_usb_transfer - the possible USB transfer types
+ *
+ * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status
+ * transfers
+ * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
+ * priority periodic transfers
+ * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority
+ * transfers
+ * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority
+ * periodic transfers
+ */
+enum cvmx_usb_transfer {
+ CVMX_USB_TRANSFER_CONTROL = 0,
+ CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
+ CVMX_USB_TRANSFER_BULK = 2,
+ CVMX_USB_TRANSFER_INTERRUPT = 3,
+};
+
+/**
+ * enum cvmx_usb_direction - the transfer directions
+ *
+ * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
+ * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon
+ */
+enum cvmx_usb_direction {
+ CVMX_USB_DIRECTION_OUT,
+ CVMX_USB_DIRECTION_IN,
+};
+
+/**
+ * enum cvmx_usb_complete - possible callback function status codes
+ *
+ * @CVMX_USB_COMPLETE_SUCCESS: The transaction / operation finished without
+ * any errors
+ * @CVMX_USB_COMPLETE_SHORT: FIXME: This is currently not implemented
+ * @CVMX_USB_COMPLETE_CANCEL: The transaction was canceled while in flight by
+ * a user call to cvmx_usb_cancel
+ * @CVMX_USB_COMPLETE_ERROR: The transaction aborted with an unexpected
+ * error status
+ * @CVMX_USB_COMPLETE_STALL: The transaction received a USB STALL response
+ * from the device
+ * @CVMX_USB_COMPLETE_XACTERR: The transaction failed with an error from the
+ * device even after a number of retries
+ * @CVMX_USB_COMPLETE_DATATGLERR: The transaction failed with a data toggle
+ * error even after a number of retries
+ * @CVMX_USB_COMPLETE_BABBLEERR: The transaction failed with a babble error
+ * @CVMX_USB_COMPLETE_FRAMEERR: The transaction failed with a frame error
+ * even after a number of retries
+ */
+enum cvmx_usb_complete {
+ CVMX_USB_COMPLETE_SUCCESS,
+ CVMX_USB_COMPLETE_SHORT,
+ CVMX_USB_COMPLETE_CANCEL,
+ CVMX_USB_COMPLETE_ERROR,
+ CVMX_USB_COMPLETE_STALL,
+ CVMX_USB_COMPLETE_XACTERR,
+ CVMX_USB_COMPLETE_DATATGLERR,
+ CVMX_USB_COMPLETE_BABBLEERR,
+ CVMX_USB_COMPLETE_FRAMEERR,
+};
+
+/**
+ * struct cvmx_usb_port_status - the USB port status information
+ *
+ * @port_enabled: 1 = Usb port is enabled, 0 = disabled
+ * @port_over_current: 1 = Over current detected, 0 = Over current not
+ * detected. Octeon doesn't support over current detection.
+ * @port_powered: 1 = Port power is being supplied to the device, 0 =
+ * power is off. Octeon doesn't support turning port power
+ * off.
+ * @port_speed: Current port speed.
+ * @connected: 1 = A device is connected to the port, 0 = No device is
+ * connected.
+ * @connect_change: 1 = Device connected state changed since the last set
+ * status call.
+ */
+struct cvmx_usb_port_status {
+ uint32_t reserved : 25;
+ uint32_t port_enabled : 1;
+ uint32_t port_over_current : 1;
+ uint32_t port_powered : 1;
+ enum cvmx_usb_speed port_speed : 2;
+ uint32_t connected : 1;
+ uint32_t connect_change : 1;
+};
+
+/**
+ * union cvmx_usb_control_header - the structure of a Control packet header
+ *
+ * @s.request_type: Bit 7 tells the direction: 1=IN, 0=OUT
+ * @s.request The standard usb request to make
+ * @s.value Value parameter for the request in little endian format
+ * @s.index Index for the request in little endian format
+ * @s.length Length of the data associated with this request in
+ * little endian format
+ */
+union cvmx_usb_control_header {
+ uint64_t u64;
+ struct {
+ uint64_t request_type : 8;
+ uint64_t request : 8;
+ uint64_t value : 16;
+ uint64_t index : 16;
+ uint64_t length : 16;
+ } s;
+};
+
+/**
+ * struct cvmx_usb_iso_packet - descriptor for Isochronous packets
+ *
+ * @offset: This is the offset in bytes into the main buffer where this data
+ * is stored.
+ * @length: This is the length in bytes of the data.
+ * @status: This is the status of this individual packet transfer.
+ */
+struct cvmx_usb_iso_packet {
+ int offset;
+ int length;
+ enum cvmx_usb_complete status;
+};
+
+/**
+ * enum cvmx_usb_callback - possible callback reasons for the USB API
+ *
+ * @CVMX_USB_CALLBACK_TRANSFER_COMPLETE: A callback of this type is called when
+ * a submitted transfer completes. The
+ * completion callback will be called even
+ * if the transfer fails or is canceled.
+ * The status parameter will contain
+ * details of why he callback was called.
+ * @CVMX_USB_CALLBACK_PORT_CHANGED: The status of the port changed. For
+ * example, someone may have plugged a
+ * device in. The status parameter
+ * contains CVMX_USB_COMPLETE_SUCCESS. Use
+ * cvmx_usb_get_status() to get the new
+ * port status.
+ * @__CVMX_USB_CALLBACK_END: Do not use. Used internally for array
+ * bounds.
+ */
+enum cvmx_usb_callback {
+ CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
+ CVMX_USB_CALLBACK_PORT_CHANGED,
+ __CVMX_USB_CALLBACK_END
+};
+
+/**
+ * USB state internal data. The contents of this structure
+ * may change in future SDKs. No data in it should be referenced
+ * by user's of this API.
+ */
+struct cvmx_usb_state {
+ char data[65536];
+};
+
+/**
+ * USB callback functions are always of the following type.
+ * The parameters are as follows:
+ * - state = USB device state populated by
+ * cvmx_usb_initialize().
+ * - reason = The enum cvmx_usb_callback used to register
+ * the callback.
+ * - status = The enum cvmx_usb_complete representing the
+ * status code of a transaction.
+ * - pipe_handle = The Pipe that caused this callback, or
+ * -1 if this callback wasn't associated with a pipe.
+ * - submit_handle = Transfer submit handle causing this
+ * callback, or -1 if this callback wasn't associated
+ * with a transfer.
+ * - Actual number of bytes transfer.
+ * - user_data = The user pointer supplied to the
+ * function cvmx_usb_submit() or
+ * cvmx_usb_register_callback() */
+typedef void (*cvmx_usb_callback_func_t)(struct cvmx_usb_state *state,
+ enum cvmx_usb_callback reason,
+ enum cvmx_usb_complete status,
+ int pipe_handle, int submit_handle,
+ int bytes_transferred, void *user_data);
+
+/**
+ * enum cvmx_usb_initialize_flags - flags to pass the initialization function
+ *
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal
+ * as clock source at USB_XO and
+ * USB_XI.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V
+ * board clock source at USB_XO.
+ * USB_XI should be tied to GND.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO: Automatically determine clock type
+ * based on function in
+ * cvmx-helper-board.c.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or
+ * crystal
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock
+ * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for
+ * data transfer use for the USB
+ */
+enum cvmx_usb_initialize_flags {
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO = 0,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3,
+ /* Bits 3-4 used to encode the clock frequency */
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5,
+};
+
+/**
+ * enum cvmx_usb_pipe_flags - flags for passing when a pipe is created.
+ * Currently no flags need to be passed.
+ *
+ * @__CVMX_USB_PIPE_FLAGS_OPEN: Used internally to determine if a pipe is
+ * open. Do not use.
+ * @__CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
+ * actively using hardware. Do not use.
+ * @__CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high
+ * speed pipe is in the ping state. Do not
+ * use.
+ */
+enum cvmx_usb_pipe_flags {
+ __CVMX_USB_PIPE_FLAGS_OPEN = 1 << 16,
+ __CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17,
+ __CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18,
+};
+
+extern int cvmx_usb_get_num_ports(void);
+extern int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
+ enum cvmx_usb_initialize_flags flags);
+extern int cvmx_usb_shutdown(struct cvmx_usb_state *state);
+extern int cvmx_usb_enable(struct cvmx_usb_state *state);
+extern int cvmx_usb_disable(struct cvmx_usb_state *state);
+extern struct cvmx_usb_port_status cvmx_usb_get_status(struct cvmx_usb_state *state);
+extern void cvmx_usb_set_status(struct cvmx_usb_state *state, struct cvmx_usb_port_status port_status);
+extern int cvmx_usb_open_pipe(struct cvmx_usb_state *state,
+ enum cvmx_usb_pipe_flags flags,
+ int device_addr, int endpoint_num,
+ enum cvmx_usb_speed device_speed, int max_packet,
+ enum cvmx_usb_transfer transfer_type,
+ enum cvmx_usb_direction transfer_dir, int interval,
+ int multi_count, int hub_device_addr,
+ int hub_port);
+extern int cvmx_usb_submit_bulk(struct cvmx_usb_state *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+extern int cvmx_usb_submit_interrupt(struct cvmx_usb_state *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+extern int cvmx_usb_submit_control(struct cvmx_usb_state *state, int pipe_handle,
+ uint64_t control_header,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+
+/**
+ * enum cvmx_usb_isochronous_flags - flags to pass the
+ * cvmx_usb_submit_isochronous() function.
+ *
+ * @CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT: Do not return an error if a transfer
+ * is less than the maximum packet size
+ * of the device.
+ * @CVMX_USB_ISOCHRONOUS_FLAGS_ASAP: Schedule the transaction as soon as
+ * possible.
+ */
+enum cvmx_usb_isochronous_flags {
+ CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT = 1 << 0,
+ CVMX_USB_ISOCHRONOUS_FLAGS_ASAP = 1 << 1,
+};
+
+extern int cvmx_usb_submit_isochronous(struct cvmx_usb_state *state, int pipe_handle,
+ int start_frame, int flags,
+ int number_packets,
+ struct cvmx_usb_iso_packet packets[],
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+extern int cvmx_usb_cancel(struct cvmx_usb_state *state, int pipe_handle,
+ int submit_handle);
+extern int cvmx_usb_cancel_all(struct cvmx_usb_state *state, int pipe_handle);
+extern int cvmx_usb_close_pipe(struct cvmx_usb_state *state, int pipe_handle);
+extern int cvmx_usb_register_callback(struct cvmx_usb_state *state,
+ enum cvmx_usb_callback reason,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+extern int cvmx_usb_get_frame_number(struct cvmx_usb_state *state);
+extern int cvmx_usb_poll(struct cvmx_usb_state *state);
+
+#endif /* __CVMX_USB_H__ */
diff --git a/drivers/staging/octeon-usb/cvmx-usbcx-defs.h b/drivers/staging/octeon-usb/cvmx-usbcx-defs.h
new file mode 100644
index 0000000..d349d77
--- /dev/null
+++ b/drivers/staging/octeon-usb/cvmx-usbcx-defs.h
@@ -0,0 +1,1528 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
+ * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-usbcx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon usbcx.
+ *
+ */
+#ifndef __CVMX_USBCX_TYPEDEFS_H__
+#define __CVMX_USBCX_TYPEDEFS_H__
+
+#define CVMX_USBCXBASE 0x00016F0010000000ull
+#define CVMX_USBCXREG1(reg, bid) \
+ (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
+ ((bid) & 1) * 0x100000000000ull)
+#define CVMX_USBCXREG2(reg, bid, off) \
+ (CVMX_ADD_IO_SEG(CVMX_USBCXBASE | reg) + \
+ (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32)
+
+#define CVMX_USBCX_GAHBCFG(bid) CVMX_USBCXREG1(0x008, bid)
+#define CVMX_USBCX_GHWCFG3(bid) CVMX_USBCXREG1(0x04c, bid)
+#define CVMX_USBCX_GINTMSK(bid) CVMX_USBCXREG1(0x018, bid)
+#define CVMX_USBCX_GINTSTS(bid) CVMX_USBCXREG1(0x014, bid)
+#define CVMX_USBCX_GNPTXFSIZ(bid) CVMX_USBCXREG1(0x028, bid)
+#define CVMX_USBCX_GNPTXSTS(bid) CVMX_USBCXREG1(0x02c, bid)
+#define CVMX_USBCX_GOTGCTL(bid) CVMX_USBCXREG1(0x000, bid)
+#define CVMX_USBCX_GRSTCTL(bid) CVMX_USBCXREG1(0x010, bid)
+#define CVMX_USBCX_GRXFSIZ(bid) CVMX_USBCXREG1(0x024, bid)
+#define CVMX_USBCX_GRXSTSPH(bid) CVMX_USBCXREG1(0x020, bid)
+#define CVMX_USBCX_GUSBCFG(bid) CVMX_USBCXREG1(0x00c, bid)
+#define CVMX_USBCX_HAINT(bid) CVMX_USBCXREG1(0x414, bid)
+#define CVMX_USBCX_HAINTMSK(bid) CVMX_USBCXREG1(0x418, bid)
+#define CVMX_USBCX_HCCHARX(off, bid) CVMX_USBCXREG2(0x500, bid, off)
+#define CVMX_USBCX_HCFG(bid) CVMX_USBCXREG1(0x400, bid)
+#define CVMX_USBCX_HCINTMSKX(off, bid) CVMX_USBCXREG2(0x50c, bid, off)
+#define CVMX_USBCX_HCINTX(off, bid) CVMX_USBCXREG2(0x508, bid, off)
+#define CVMX_USBCX_HCSPLTX(off, bid) CVMX_USBCXREG2(0x504, bid, off)
+#define CVMX_USBCX_HCTSIZX(off, bid) CVMX_USBCXREG2(0x510, bid, off)
+#define CVMX_USBCX_HFIR(bid) CVMX_USBCXREG1(0x404, bid)
+#define CVMX_USBCX_HFNUM(bid) CVMX_USBCXREG1(0x408, bid)
+#define CVMX_USBCX_HPRT(bid) CVMX_USBCXREG1(0x440, bid)
+#define CVMX_USBCX_HPTXFSIZ(bid) CVMX_USBCXREG1(0x100, bid)
+#define CVMX_USBCX_HPTXSTS(bid) CVMX_USBCXREG1(0x410, bid)
+
+/**
+ * cvmx_usbc#_gahbcfg
+ *
+ * Core AHB Configuration Register (GAHBCFG)
+ *
+ * This register can be used to configure the core after power-on or a change in
+ * mode of operation. This register mainly contains AHB system-related
+ * configuration parameters. The AHB is the processor interface to the O2P USB
+ * core. In general, software need not know about this interface except to
+ * program the values as specified.
+ *
+ * The application must program this register as part of the O2P USB core
+ * initialization. Do not change this register after the initial programming.
+ */
+union cvmx_usbcx_gahbcfg {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_gahbcfg_s
+ * @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl)
+ * Software should set this bit to 0x1.
+ * Indicates when the Periodic TxFIFO Empty Interrupt bit in the
+ * Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This
+ * bit is used only in Slave mode.
+ * * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic
+ * TxFIFO is half empty
+ * * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic
+ * TxFIFO is completely empty
+ * @nptxfemplvl: Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl)
+ * Software should set this bit to 0x1.
+ * Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in
+ * the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered.
+ * This bit is used only in Slave mode.
+ * * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non-
+ * Periodic TxFIFO is half empty
+ * * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non-
+ * Periodic TxFIFO is completely empty
+ * @dmaen: DMA Enable (DMAEn)
+ * * 1'b0: Core operates in Slave mode
+ * * 1'b1: Core operates in a DMA mode
+ * @hbstlen: Burst Length/Type (HBstLen)
+ * This field has not effect and should be left as 0x0.
+ * @glblintrmsk: Global Interrupt Mask (GlblIntrMsk)
+ * Software should set this field to 0x1.
+ * The application uses this bit to mask or unmask the interrupt
+ * line assertion to itself. Irrespective of this bit's setting,
+ * the interrupt status registers are updated by the core.
+ * * 1'b0: Mask the interrupt assertion to the application.
+ * * 1'b1: Unmask the interrupt assertion to the application.
+ */
+ struct cvmx_usbcx_gahbcfg_s {
+ uint32_t reserved_9_31 : 23;
+ uint32_t ptxfemplvl : 1;
+ uint32_t nptxfemplvl : 1;
+ uint32_t reserved_6_6 : 1;
+ uint32_t dmaen : 1;
+ uint32_t hbstlen : 4;
+ uint32_t glblintrmsk : 1;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_ghwcfg3
+ *
+ * User HW Config3 Register (GHWCFG3)
+ *
+ * This register contains the configuration options of the O2P USB core.
+ */
+union cvmx_usbcx_ghwcfg3 {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_ghwcfg3_s
+ * @dfifodepth: DFIFO Depth (DfifoDepth)
+ * This value is in terms of 32-bit words.
+ * * Minimum value is 32
+ * * Maximum value is 32768
+ * @ahbphysync: AHB and PHY Synchronous (AhbPhySync)
+ * Indicates whether AHB and PHY clocks are synchronous to
+ * each other.
+ * * 1'b0: No
+ * * 1'b1: Yes
+ * This bit is tied to 1.
+ * @rsttype: Reset Style for Clocked always Blocks in RTL (RstType)
+ * * 1'b0: Asynchronous reset is used in the core
+ * * 1'b1: Synchronous reset is used in the core
+ * @optfeature: Optional Features Removed (OptFeature)
+ * Indicates whether the User ID register, GPIO interface ports,
+ * and SOF toggle and counter ports were removed for gate count
+ * optimization.
+ * @vendor_control_interface_support: Vendor Control Interface Support
+ * * 1'b0: Vendor Control Interface is not available on the core.
+ * * 1'b1: Vendor Control Interface is available.
+ * @i2c_selection: I2C Selection
+ * * 1'b0: I2C Interface is not available on the core.
+ * * 1'b1: I2C Interface is available on the core.
+ * @otgen: OTG Function Enabled (OtgEn)
+ * The application uses this bit to indicate the O2P USB core's
+ * OTG capabilities.
+ * * 1'b0: Not OTG capable
+ * * 1'b1: OTG Capable
+ * @pktsizewidth: Width of Packet Size Counters (PktSizeWidth)
+ * * 3'b000: 4 bits
+ * * 3'b001: 5 bits
+ * * 3'b010: 6 bits
+ * * 3'b011: 7 bits
+ * * 3'b100: 8 bits
+ * * 3'b101: 9 bits
+ * * 3'b110: 10 bits
+ * * Others: Reserved
+ * @xfersizewidth: Width of Transfer Size Counters (XferSizeWidth)
+ * * 4'b0000: 11 bits
+ * * 4'b0001: 12 bits
+ * - ...
+ * * 4'b1000: 19 bits
+ * * Others: Reserved
+ */
+ struct cvmx_usbcx_ghwcfg3_s {
+ uint32_t dfifodepth : 16;
+ uint32_t reserved_13_15 : 3;
+ uint32_t ahbphysync : 1;
+ uint32_t rsttype : 1;
+ uint32_t optfeature : 1;
+ uint32_t vendor_control_interface_support : 1;
+ uint32_t i2c_selection : 1;
+ uint32_t otgen : 1;
+ uint32_t pktsizewidth : 3;
+ uint32_t xfersizewidth : 4;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gintmsk
+ *
+ * Core Interrupt Mask Register (GINTMSK)
+ *
+ * This register works with the Core Interrupt register to interrupt the
+ * application. When an interrupt bit is masked, the interrupt associated with
+ * that bit will not be generated. However, the Core Interrupt (GINTSTS)
+ * register bit corresponding to that interrupt will still be set.
+ * Mask interrupt: 1'b0, Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_gintmsk {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_gintmsk_s
+ * @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask
+ * (WkUpIntMsk)
+ * @sessreqintmsk: Session Request/New Session Detected Interrupt Mask
+ * (SessReqIntMsk)
+ * @disconnintmsk: Disconnect Detected Interrupt Mask (DisconnIntMsk)
+ * @conidstschngmsk: Connector ID Status Change Mask (ConIDStsChngMsk)
+ * @ptxfempmsk: Periodic TxFIFO Empty Mask (PTxFEmpMsk)
+ * @hchintmsk: Host Channels Interrupt Mask (HChIntMsk)
+ * @prtintmsk: Host Port Interrupt Mask (PrtIntMsk)
+ * @fetsuspmsk: Data Fetch Suspended Mask (FetSuspMsk)
+ * @incomplpmsk: Incomplete Periodic Transfer Mask (incomplPMsk)
+ * Incomplete Isochronous OUT Transfer Mask
+ * (incompISOOUTMsk)
+ * @incompisoinmsk: Incomplete Isochronous IN Transfer Mask
+ * (incompISOINMsk)
+ * @oepintmsk: OUT Endpoints Interrupt Mask (OEPIntMsk)
+ * @inepintmsk: IN Endpoints Interrupt Mask (INEPIntMsk)
+ * @epmismsk: Endpoint Mismatch Interrupt Mask (EPMisMsk)
+ * @eopfmsk: End of Periodic Frame Interrupt Mask (EOPFMsk)
+ * @isooutdropmsk: Isochronous OUT Packet Dropped Interrupt Mask
+ * (ISOOutDropMsk)
+ * @enumdonemsk: Enumeration Done Mask (EnumDoneMsk)
+ * @usbrstmsk: USB Reset Mask (USBRstMsk)
+ * @usbsuspmsk: USB Suspend Mask (USBSuspMsk)
+ * @erlysuspmsk: Early Suspend Mask (ErlySuspMsk)
+ * @i2cint: I2C Interrupt Mask (I2CINT)
+ * @ulpickintmsk: ULPI Carkit Interrupt Mask (ULPICKINTMsk)
+ * I2C Carkit Interrupt Mask (I2CCKINTMsk)
+ * @goutnakeffmsk: Global OUT NAK Effective Mask (GOUTNakEffMsk)
+ * @ginnakeffmsk: Global Non-Periodic IN NAK Effective Mask
+ * (GINNakEffMsk)
+ * @nptxfempmsk: Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk)
+ * @rxflvlmsk: Receive FIFO Non-Empty Mask (RxFLvlMsk)
+ * @sofmsk: Start of (micro)Frame Mask (SofMsk)
+ * @otgintmsk: OTG Interrupt Mask (OTGIntMsk)
+ * @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk)
+ */
+ struct cvmx_usbcx_gintmsk_s {
+ uint32_t wkupintmsk : 1;
+ uint32_t sessreqintmsk : 1;
+ uint32_t disconnintmsk : 1;
+ uint32_t conidstschngmsk : 1;
+ uint32_t reserved_27_27 : 1;
+ uint32_t ptxfempmsk : 1;
+ uint32_t hchintmsk : 1;
+ uint32_t prtintmsk : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t fetsuspmsk : 1;
+ uint32_t incomplpmsk : 1;
+ uint32_t incompisoinmsk : 1;
+ uint32_t oepintmsk : 1;
+ uint32_t inepintmsk : 1;
+ uint32_t epmismsk : 1;
+ uint32_t reserved_16_16 : 1;
+ uint32_t eopfmsk : 1;
+ uint32_t isooutdropmsk : 1;
+ uint32_t enumdonemsk : 1;
+ uint32_t usbrstmsk : 1;
+ uint32_t usbsuspmsk : 1;
+ uint32_t erlysuspmsk : 1;
+ uint32_t i2cint : 1;
+ uint32_t ulpickintmsk : 1;
+ uint32_t goutnakeffmsk : 1;
+ uint32_t ginnakeffmsk : 1;
+ uint32_t nptxfempmsk : 1;
+ uint32_t rxflvlmsk : 1;
+ uint32_t sofmsk : 1;
+ uint32_t otgintmsk : 1;
+ uint32_t modemismsk : 1;
+ uint32_t reserved_0_0 : 1;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gintsts
+ *
+ * Core Interrupt Register (GINTSTS)
+ *
+ * This register interrupts the application for system-level events in the
+ * current mode of operation (Device mode or Host mode). It is shown in
+ * Interrupt. Some of the bits in this register are valid only in Host mode,
+ * while others are valid in Device mode only. This register also indicates the
+ * current mode of operation. In order to clear the interrupt status bits of
+ * type R_SS_WC, the application must write 1'b1 into the bit. The FIFO status
+ * interrupts are read only; once software reads from or writes to the FIFO
+ * while servicing these interrupts, FIFO interrupt conditions are cleared
+ * automatically.
+ */
+union cvmx_usbcx_gintsts {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_gintsts_s
+ * @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt)
+ * In Device mode, this interrupt is asserted when a resume is
+ * detected on the USB. In Host mode, this interrupt is asserted
+ * when a remote wakeup is detected on the USB.
+ * For more information on how to use this interrupt, see "Partial
+ * Power-Down and Clock Gating Programming Model" on
+ * page 353.
+ * @sessreqint: Session Request/New Session Detected Interrupt
+ * (SessReqInt)
+ * In Host mode, this interrupt is asserted when a session request
+ * is detected from the device. In Device mode, this interrupt is
+ * asserted when the utmiotg_bvalid signal goes high.
+ * For more information on how to use this interrupt, see "Partial
+ * Power-Down and Clock Gating Programming Model" on
+ * page 353.
+ * @disconnint: Disconnect Detected Interrupt (DisconnInt)
+ * Asserted when a device disconnect is detected.
+ * @conidstschng: Connector ID Status Change (ConIDStsChng)
+ * The core sets this bit when there is a change in connector ID
+ * status.
+ * @ptxfemp: Periodic TxFIFO Empty (PTxFEmp)
+ * Asserted when the Periodic Transmit FIFO is either half or
+ * completely empty and there is space for at least one entry to be
+ * written in the Periodic Request Queue. The half or completely
+ * empty status is determined by the Periodic TxFIFO Empty Level
+ * bit in the Core AHB Configuration register
+ * (GAHBCFG.PTxFEmpLvl).
+ * @hchint: Host Channels Interrupt (HChInt)
+ * The core sets this bit to indicate that an interrupt is pending
+ * on one of the channels of the core (in Host mode). The
+ * application must read the Host All Channels Interrupt (HAINT)
+ * register to determine the exact number of the channel on which
+ * the interrupt occurred, and then read the corresponding Host
+ * Channel-n Interrupt (HCINTn) register to determine the exact
+ * cause of the interrupt. The application must clear the
+ * appropriate status bit in the HCINTn register to clear this bit.
+ * @prtint: Host Port Interrupt (PrtInt)
+ * The core sets this bit to indicate a change in port status of
+ * one of the O2P USB core ports in Host mode. The application must
+ * read the Host Port Control and Status (HPRT) register to
+ * determine the exact event that caused this interrupt. The
+ * application must clear the appropriate status bit in the Host
+ * Port Control and Status register to clear this bit.
+ * @fetsusp: Data Fetch Suspended (FetSusp)
+ * This interrupt is valid only in DMA mode. This interrupt
+ * indicates that the core has stopped fetching data for IN
+ * endpoints due to the unavailability of TxFIFO space or Request
+ * Queue space. This interrupt is used by the application for an
+ * endpoint mismatch algorithm.
+ * @incomplp: Incomplete Periodic Transfer (incomplP)
+ * In Host mode, the core sets this interrupt bit when there are
+ * incomplete periodic transactions still pending which are
+ * scheduled for the current microframe.
+ * Incomplete Isochronous OUT Transfer (incompISOOUT)
+ * The Device mode, the core sets this interrupt to indicate that
+ * there is at least one isochronous OUT endpoint on which the
+ * transfer is not completed in the current microframe. This
+ * interrupt is asserted along with the End of Periodic Frame
+ * Interrupt (EOPF) bit in this register.
+ * @incompisoin: Incomplete Isochronous IN Transfer (incompISOIN)
+ * The core sets this interrupt to indicate that there is at least
+ * one isochronous IN endpoint on which the transfer is not
+ * completed in the current microframe. This interrupt is asserted
+ * along with the End of Periodic Frame Interrupt (EOPF) bit in
+ * this register.
+ * @oepint: OUT Endpoints Interrupt (OEPInt)
+ * The core sets this bit to indicate that an interrupt is pending
+ * on one of the OUT endpoints of the core (in Device mode). The
+ * application must read the Device All Endpoints Interrupt
+ * (DAINT) register to determine the exact number of the OUT
+ * endpoint on which the interrupt occurred, and then read the
+ * corresponding Device OUT Endpoint-n Interrupt (DOEPINTn)
+ * register to determine the exact cause of the interrupt. The
+ * application must clear the appropriate status bit in the
+ * corresponding DOEPINTn register to clear this bit.
+ * @iepint: IN Endpoints Interrupt (IEPInt)
+ * The core sets this bit to indicate that an interrupt is pending
+ * on one of the IN endpoints of the core (in Device mode). The
+ * application must read the Device All Endpoints Interrupt
+ * (DAINT) register to determine the exact number of the IN
+ * endpoint on which the interrupt occurred, and then read the
+ * corresponding Device IN Endpoint-n Interrupt (DIEPINTn)
+ * register to determine the exact cause of the interrupt. The
+ * application must clear the appropriate status bit in the
+ * corresponding DIEPINTn register to clear this bit.
+ * @epmis: Endpoint Mismatch Interrupt (EPMis)
+ * Indicates that an IN token has been received for a non-periodic
+ * endpoint, but the data for another endpoint is present in the
+ * top of the Non-Periodic Transmit FIFO and the IN endpoint
+ * mismatch count programmed by the application has expired.
+ * @eopf: End of Periodic Frame Interrupt (EOPF)
+ * Indicates that the period specified in the Periodic Frame
+ * Interval field of the Device Configuration register
+ * (DCFG.PerFrInt) has been reached in the current microframe.
+ * @isooutdrop: Isochronous OUT Packet Dropped Interrupt (ISOOutDrop)
+ * The core sets this bit when it fails to write an isochronous OUT
+ * packet into the RxFIFO because the RxFIFO doesn't have
+ * enough space to accommodate a maximum packet size packet
+ * for the isochronous OUT endpoint.
+ * @enumdone: Enumeration Done (EnumDone)
+ * The core sets this bit to indicate that speed enumeration is
+ * complete. The application must read the Device Status (DSTS)
+ * register to obtain the enumerated speed.
+ * @usbrst: USB Reset (USBRst)
+ * The core sets this bit to indicate that a reset is detected on
+ * the USB.
+ * @usbsusp: USB Suspend (USBSusp)
+ * The core sets this bit to indicate that a suspend was detected
+ * on the USB. The core enters the Suspended state when there
+ * is no activity on the phy_line_state_i signal for an extended
+ * period of time.
+ * @erlysusp: Early Suspend (ErlySusp)
+ * The core sets this bit to indicate that an Idle state has been
+ * detected on the USB for 3 ms.
+ * @i2cint: I2C Interrupt (I2CINT)
+ * This bit is always 0x0.
+ * @ulpickint: ULPI Carkit Interrupt (ULPICKINT)
+ * This bit is always 0x0.
+ * @goutnakeff: Global OUT NAK Effective (GOUTNakEff)
+ * Indicates that the Set Global OUT NAK bit in the Device Control
+ * register (DCTL.SGOUTNak), set by the application, has taken
+ * effect in the core. This bit can be cleared by writing the Clear
+ * Global OUT NAK bit in the Device Control register
+ * (DCTL.CGOUTNak).
+ * @ginnakeff: Global IN Non-Periodic NAK Effective (GINNakEff)
+ * Indicates that the Set Global Non-Periodic IN NAK bit in the
+ * Device Control register (DCTL.SGNPInNak), set by the
+ * application, has taken effect in the core. That is, the core has
+ * sampled the Global IN NAK bit set by the application. This bit
+ * can be cleared by clearing the Clear Global Non-Periodic IN
+ * NAK bit in the Device Control register (DCTL.CGNPInNak).
+ * This interrupt does not necessarily mean that a NAK handshake
+ * is sent out on the USB. The STALL bit takes precedence over
+ * the NAK bit.
+ * @nptxfemp: Non-Periodic TxFIFO Empty (NPTxFEmp)
+ * This interrupt is asserted when the Non-Periodic TxFIFO is
+ * either half or completely empty, and there is space for at least
+ * one entry to be written to the Non-Periodic Transmit Request
+ * Queue. The half or completely empty status is determined by
+ * the Non-Periodic TxFIFO Empty Level bit in the Core AHB
+ * Configuration register (GAHBCFG.NPTxFEmpLvl).
+ * @rxflvl: RxFIFO Non-Empty (RxFLvl)
+ * Indicates that there is at least one packet pending to be read
+ * from the RxFIFO.
+ * @sof: Start of (micro)Frame (Sof)
+ * In Host mode, the core sets this bit to indicate that an SOF
+ * (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the
+ * USB. The application must write a 1 to this bit to clear the
+ * interrupt.
+ * In Device mode, in the core sets this bit to indicate that an
+ * SOF token has been received on the USB. The application can read
+ * the Device Status register to get the current (micro)frame
+ * number. This interrupt is seen only when the core is operating
+ * at either HS or FS.
+ * @otgint: OTG Interrupt (OTGInt)
+ * The core sets this bit to indicate an OTG protocol event. The
+ * application must read the OTG Interrupt Status (GOTGINT)
+ * register to determine the exact event that caused this
+ * interrupt. The application must clear the appropriate status bit
+ * in the GOTGINT register to clear this bit.
+ * @modemis: Mode Mismatch Interrupt (ModeMis)
+ * The core sets this bit when the application is trying to access:
+ * * A Host mode register, when the core is operating in Device
+ * mode
+ * * A Device mode register, when the core is operating in Host
+ * mode
+ * The register access is completed on the AHB with an OKAY
+ * response, but is ignored by the core internally and doesn't
+ * affect the operation of the core.
+ * @curmod: Current Mode of Operation (CurMod)
+ * Indicates the current mode of operation.
+ * * 1'b0: Device mode
+ * * 1'b1: Host mode
+ */
+ struct cvmx_usbcx_gintsts_s {
+ uint32_t wkupint : 1;
+ uint32_t sessreqint : 1;
+ uint32_t disconnint : 1;
+ uint32_t conidstschng : 1;
+ uint32_t reserved_27_27 : 1;
+ uint32_t ptxfemp : 1;
+ uint32_t hchint : 1;
+ uint32_t prtint : 1;
+ uint32_t reserved_23_23 : 1;
+ uint32_t fetsusp : 1;
+ uint32_t incomplp : 1;
+ uint32_t incompisoin : 1;
+ uint32_t oepint : 1;
+ uint32_t iepint : 1;
+ uint32_t epmis : 1;
+ uint32_t reserved_16_16 : 1;
+ uint32_t eopf : 1;
+ uint32_t isooutdrop : 1;
+ uint32_t enumdone : 1;
+ uint32_t usbrst : 1;
+ uint32_t usbsusp : 1;
+ uint32_t erlysusp : 1;
+ uint32_t i2cint : 1;
+ uint32_t ulpickint : 1;
+ uint32_t goutnakeff : 1;
+ uint32_t ginnakeff : 1;
+ uint32_t nptxfemp : 1;
+ uint32_t rxflvl : 1;
+ uint32_t sof : 1;
+ uint32_t otgint : 1;
+ uint32_t modemis : 1;
+ uint32_t curmod : 1;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gnptxfsiz
+ *
+ * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ)
+ *
+ * The application can program the RAM size and the memory start address for the
+ * Non-Periodic TxFIFO.
+ */
+union cvmx_usbcx_gnptxfsiz {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_gnptxfsiz_s
+ * @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep)
+ * This value is in terms of 32-bit words.
+ * Minimum value is 16
+ * Maximum value is 32768
+ * @nptxfstaddr: Non-Periodic Transmit RAM Start Address (NPTxFStAddr)
+ * This field contains the memory start address for Non-Periodic
+ * Transmit FIFO RAM.
+ */
+ struct cvmx_usbcx_gnptxfsiz_s {
+ uint32_t nptxfdep : 16;
+ uint32_t nptxfstaddr : 16;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gnptxsts
+ *
+ * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS)
+ *
+ * This read-only register contains the free space information for the
+ * Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue.
+ */
+union cvmx_usbcx_gnptxsts {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_gnptxsts_s
+ * @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop)
+ * Entry in the Non-Periodic Tx Request Queue that is currently
+ * being processed by the MAC.
+ * * Bits [30:27]: Channel/endpoint number
+ * * Bits [26:25]:
+ * - 2'b00: IN/OUT token
+ * - 2'b01: Zero-length transmit packet (device IN/host OUT)
+ * - 2'b10: PING/CSPLIT token
+ * - 2'b11: Channel halt command
+ * * Bit [24]: Terminate (last entry for selected channel/endpoint)
+ * @nptxqspcavail: Non-Periodic Transmit Request Queue Space Available
+ * (NPTxQSpcAvail)
+ * Indicates the amount of free space available in the Non-
+ * Periodic Transmit Request Queue. This queue holds both IN
+ * and OUT requests in Host mode. Device mode has only IN
+ * requests.
+ * * 8'h0: Non-Periodic Transmit Request Queue is full
+ * * 8'h1: 1 location available
+ * * 8'h2: 2 locations available
+ * * n: n locations available (0..8)
+ * * Others: Reserved
+ * @nptxfspcavail: Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail)
+ * Indicates the amount of free space available in the Non-
+ * Periodic TxFIFO.
+ * Values are in terms of 32-bit words.
+ * * 16'h0: Non-Periodic TxFIFO is full
+ * * 16'h1: 1 word available
+ * * 16'h2: 2 words available
+ * * 16'hn: n words available (where 0..32768)
+ * * 16'h8000: 32768 words available
+ * * Others: Reserved
+ */
+ struct cvmx_usbcx_gnptxsts_s {
+ uint32_t reserved_31_31 : 1;
+ uint32_t nptxqtop : 7;
+ uint32_t nptxqspcavail : 8;
+ uint32_t nptxfspcavail : 16;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_grstctl
+ *
+ * Core Reset Register (GRSTCTL)
+ *
+ * The application uses this register to reset various hardware features inside
+ * the core.
+ */
+union cvmx_usbcx_grstctl {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_grstctl_s
+ * @ahbidle: AHB Master Idle (AHBIdle)
+ * Indicates that the AHB Master State Machine is in the IDLE
+ * condition.
+ * @dmareq: DMA Request Signal (DMAReq)
+ * Indicates that the DMA request is in progress. Used for debug.
+ * @txfnum: TxFIFO Number (TxFNum)
+ * This is the FIFO number that must be flushed using the TxFIFO
+ * Flush bit. This field must not be changed until the core clears
+ * the TxFIFO Flush bit.
+ * * 5'h0: Non-Periodic TxFIFO flush
+ * * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic
+ * TxFIFO flush in Host mode
+ * * 5'h2: Periodic TxFIFO 2 flush in Device mode
+ * - ...
+ * * 5'hF: Periodic TxFIFO 15 flush in Device mode
+ * * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the
+ * core
+ * @txfflsh: TxFIFO Flush (TxFFlsh)
+ * This bit selectively flushes a single or all transmit FIFOs, but
+ * cannot do so if the core is in the midst of a transaction.
+ * The application must only write this bit after checking that the
+ * core is neither writing to the TxFIFO nor reading from the
+ * TxFIFO.
+ * The application must wait until the core clears this bit before
+ * performing any operations. This bit takes 8 clocks (of phy_clk
+ * or hclk, whichever is slower) to clear.
+ * @rxfflsh: RxFIFO Flush (RxFFlsh)
+ * The application can flush the entire RxFIFO using this bit, but
+ * must first ensure that the core is not in the middle of a
+ * transaction.
+ * The application must only write to this bit after checking that
+ * the core is neither reading from the RxFIFO nor writing to the
+ * RxFIFO.
+ * The application must wait until the bit is cleared before
+ * performing any other operations. This bit will take 8 clocks
+ * (slowest of PHY or AHB clock) to clear.
+ * @intknqflsh: IN Token Sequence Learning Queue Flush (INTknQFlsh)
+ * The application writes this bit to flush the IN Token Sequence
+ * Learning Queue.
+ * @frmcntrrst: Host Frame Counter Reset (FrmCntrRst)
+ * The application writes this bit to reset the (micro)frame number
+ * counter inside the core. When the (micro)frame counter is reset,
+ * the subsequent SOF sent out by the core will have a
+ * (micro)frame number of 0.
+ * @hsftrst: HClk Soft Reset (HSftRst)
+ * The application uses this bit to flush the control logic in the
+ * AHB Clock domain. Only AHB Clock Domain pipelines are reset.
+ * * FIFOs are not flushed with this bit.
+ * * All state machines in the AHB clock domain are reset to the
+ * Idle state after terminating the transactions on the AHB,
+ * following the protocol.
+ * * CSR control bits used by the AHB clock domain state
+ * machines are cleared.
+ * * To clear this interrupt, status mask bits that control the
+ * interrupt status and are generated by the AHB clock domain
+ * state machine are cleared.
+ * * Because interrupt status bits are not cleared, the application
+ * can get the status of any core events that occurred after it set
+ * this bit.
+ * This is a self-clearing bit that the core clears after all
+ * necessary logic is reset in the core. This may take several
+ * clocks, depending on the core's current state.
+ * @csftrst: Core Soft Reset (CSftRst)
+ * Resets the hclk and phy_clock domains as follows:
+ * * Clears the interrupts and all the CSR registers except the
+ * following register bits:
+ * - PCGCCTL.RstPdwnModule
+ * - PCGCCTL.GateHclk
+ * - PCGCCTL.PwrClmp
+ * - PCGCCTL.StopPPhyLPwrClkSelclk
+ * - GUSBCFG.PhyLPwrClkSel
+ * - GUSBCFG.DDRSel
+ * - GUSBCFG.PHYSel
+ * - GUSBCFG.FSIntf
+ * - GUSBCFG.ULPI_UTMI_Sel
+ * - GUSBCFG.PHYIf
+ * - HCFG.FSLSPclkSel
+ * - DCFG.DevSpd
+ * * All module state machines (except the AHB Slave Unit) are
+ * reset to the IDLE state, and all the transmit FIFOs and the
+ * receive FIFO are flushed.
+ * * Any transactions on the AHB Master are terminated as soon
+ * as possible, after gracefully completing the last data phase of
+ * an AHB transfer. Any transactions on the USB are terminated
+ * immediately.
+ * The application can write to this bit any time it wants to reset
+ * the core. This is a self-clearing bit and the core clears this
+ * bit after all the necessary logic is reset in the core, which
+ * may take several clocks, depending on the current state of the
+ * core. Once this bit is cleared software should wait at least 3
+ * PHY clocks before doing any access to the PHY domain
+ * (synchronization delay). Software should also should check that
+ * bit 31 of this register is 1 (AHB Master is IDLE) before
+ * starting any operation.
+ * Typically software reset is used during software development
+ * and also when you dynamically change the PHY selection bits
+ * in the USB configuration registers listed above. When you
+ * change the PHY, the corresponding clock for the PHY is
+ * selected and used in the PHY domain. Once a new clock is
+ * selected, the PHY domain has to be reset for proper operation.
+ */
+ struct cvmx_usbcx_grstctl_s {
+ uint32_t ahbidle : 1;
+ uint32_t dmareq : 1;
+ uint32_t reserved_11_29 : 19;
+ uint32_t txfnum : 5;
+ uint32_t txfflsh : 1;
+ uint32_t rxfflsh : 1;
+ uint32_t intknqflsh : 1;
+ uint32_t frmcntrrst : 1;
+ uint32_t hsftrst : 1;
+ uint32_t csftrst : 1;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_grxfsiz
+ *
+ * Receive FIFO Size Register (GRXFSIZ)
+ *
+ * The application can program the RAM size that must be allocated to the
+ * RxFIFO.
+ */
+union cvmx_usbcx_grxfsiz {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_grxfsiz_s
+ * @rxfdep: RxFIFO Depth (RxFDep)
+ * This value is in terms of 32-bit words.
+ * * Minimum value is 16
+ * * Maximum value is 32768
+ */
+ struct cvmx_usbcx_grxfsiz_s {
+ uint32_t reserved_16_31 : 16;
+ uint32_t rxfdep : 16;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_grxstsph
+ *
+ * Receive Status Read and Pop Register, Host Mode (GRXSTSPH)
+ *
+ * A read to the Receive Status Read and Pop register returns and additionally
+ * pops the top data entry out of the RxFIFO.
+ * This Description is only valid when the core is in Host Mode. For Device Mode
+ * use USBC_GRXSTSPD instead.
+ * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the
+ * same offset in the O2P USB core. The offset difference shown in this
+ * document is for software clarity and is actually ignored by the
+ * hardware.
+ */
+union cvmx_usbcx_grxstsph {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_grxstsph_s
+ * @pktsts: Packet Status (PktSts)
+ * Indicates the status of the received packet
+ * * 4'b0010: IN data packet received
+ * * 4'b0011: IN transfer completed (triggers an interrupt)
+ * * 4'b0101: Data toggle error (triggers an interrupt)
+ * * 4'b0111: Channel halted (triggers an interrupt)
+ * * Others: Reserved
+ * @dpid: Data PID (DPID)
+ * * 2'b00: DATA0
+ * * 2'b10: DATA1
+ * * 2'b01: DATA2
+ * * 2'b11: MDATA
+ * @bcnt: Byte Count (BCnt)
+ * Indicates the byte count of the received IN data packet
+ * @chnum: Channel Number (ChNum)
+ * Indicates the channel number to which the current received
+ * packet belongs.
+ */
+ struct cvmx_usbcx_grxstsph_s {
+ uint32_t reserved_21_31 : 11;
+ uint32_t pktsts : 4;
+ uint32_t dpid : 2;
+ uint32_t bcnt : 11;
+ uint32_t chnum : 4;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_gusbcfg
+ *
+ * Core USB Configuration Register (GUSBCFG)
+ *
+ * This register can be used to configure the core after power-on or a changing
+ * to Host mode or Device mode. It contains USB and USB-PHY related
+ * configuration parameters. The application must program this register before
+ * starting any transactions on either the AHB or the USB. Do not make changes
+ * to this register after the initial programming.
+ */
+union cvmx_usbcx_gusbcfg {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_gusbcfg_s
+ * @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel)
+ * This bit is always 0x0.
+ * @phylpwrclksel: PHY Low-Power Clock Select (PhyLPwrClkSel)
+ * Software should set this bit to 0x0.
+ * Selects either 480-MHz or 48-MHz (low-power) PHY mode. In
+ * FS and LS modes, the PHY can usually operate on a 48-MHz
+ * clock to save power.
+ * * 1'b0: 480-MHz Internal PLL clock
+ * * 1'b1: 48-MHz External Clock
+ * In 480 MHz mode, the UTMI interface operates at either 60 or
+ * 30-MHz, depending upon whether 8- or 16-bit data width is
+ * selected. In 48-MHz mode, the UTMI interface operates at 48
+ * MHz in FS mode and at either 48 or 6 MHz in LS mode
+ * (depending on the PHY vendor).
+ * This bit drives the utmi_fsls_low_power core output signal, and
+ * is valid only for UTMI+ PHYs.
+ * @usbtrdtim: USB Turnaround Time (USBTrdTim)
+ * Sets the turnaround time in PHY clocks.
+ * Specifies the response time for a MAC request to the Packet
+ * FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM).
+ * This must be programmed to 0x5.
+ * @hnpcap: HNP-Capable (HNPCap)
+ * This bit is always 0x0.
+ * @srpcap: SRP-Capable (SRPCap)
+ * This bit is always 0x0.
+ * @ddrsel: ULPI DDR Select (DDRSel)
+ * Software should set this bit to 0x0.
+ * @physel: USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial
+ * Software should set this bit to 0x0.
+ * @fsintf: Full-Speed Serial Interface Select (FSIntf)
+ * Software should set this bit to 0x0.
+ * @ulpi_utmi_sel: ULPI or UTMI+ Select (ULPI_UTMI_Sel)
+ * This bit is always 0x0.
+ * @phyif: PHY Interface (PHYIf)
+ * This bit is always 0x1.
+ * @toutcal: HS/FS Timeout Calibration (TOutCal)
+ * The number of PHY clocks that the application programs in this
+ * field is added to the high-speed/full-speed interpacket timeout
+ * duration in the core to account for any additional delays
+ * introduced by the PHY. This may be required, since the delay
+ * introduced by the PHY in generating the linestate condition may
+ * vary from one PHY to another.
+ * The USB standard timeout value for high-speed operation is
+ * 736 to 816 (inclusive) bit times. The USB standard timeout
+ * value for full-speed operation is 16 to 18 (inclusive) bit
+ * times. The application must program this field based on the
+ * speed of enumeration. The number of bit times added per PHY
+ * clock are:
+ * High-speed operation:
+ * * One 30-MHz PHY clock = 16 bit times
+ * * One 60-MHz PHY clock = 8 bit times
+ * Full-speed operation:
+ * * One 30-MHz PHY clock = 0.4 bit times
+ * * One 60-MHz PHY clock = 0.2 bit times
+ * * One 48-MHz PHY clock = 0.25 bit times
+ */
+ struct cvmx_usbcx_gusbcfg_s {
+ uint32_t reserved_17_31 : 15;
+ uint32_t otgi2csel : 1;
+ uint32_t phylpwrclksel : 1;
+ uint32_t reserved_14_14 : 1;
+ uint32_t usbtrdtim : 4;
+ uint32_t hnpcap : 1;
+ uint32_t srpcap : 1;
+ uint32_t ddrsel : 1;
+ uint32_t physel : 1;
+ uint32_t fsintf : 1;
+ uint32_t ulpi_utmi_sel : 1;
+ uint32_t phyif : 1;
+ uint32_t toutcal : 3;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_haint
+ *
+ * Host All Channels Interrupt Register (HAINT)
+ *
+ * When a significant event occurs on a channel, the Host All Channels Interrupt
+ * register interrupts the application using the Host Channels Interrupt bit of
+ * the Core Interrupt register (GINTSTS.HChInt). This is shown in Interrupt.
+ * There is one interrupt bit per channel, up to a maximum of 16 bits. Bits in
+ * this register are set and cleared when the application sets and clears bits
+ * in the corresponding Host Channel-n Interrupt register.
+ */
+union cvmx_usbcx_haint {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_haint_s
+ * @haint: Channel Interrupts (HAINT)
+ * One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15
+ */
+ struct cvmx_usbcx_haint_s {
+ uint32_t reserved_16_31 : 16;
+ uint32_t haint : 16;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_haintmsk
+ *
+ * Host All Channels Interrupt Mask Register (HAINTMSK)
+ *
+ * The Host All Channel Interrupt Mask register works with the Host All Channel
+ * Interrupt register to interrupt the application when an event occurs on a
+ * channel. There is one interrupt mask bit per channel, up to a maximum of 16
+ * bits.
+ * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_haintmsk {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_haintmsk_s
+ * @haintmsk: Channel Interrupt Mask (HAINTMsk)
+ * One bit per channel: Bit 0 for channel 0, bit 15 for channel 15
+ */
+ struct cvmx_usbcx_haintmsk_s {
+ uint32_t reserved_16_31 : 16;
+ uint32_t haintmsk : 16;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcchar#
+ *
+ * Host Channel-n Characteristics Register (HCCHAR)
+ *
+ */
+union cvmx_usbcx_hccharx {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hccharx_s
+ * @chena: Channel Enable (ChEna)
+ * This field is set by the application and cleared by the OTG
+ * host.
+ * * 1'b0: Channel disabled
+ * * 1'b1: Channel enabled
+ * @chdis: Channel Disable (ChDis)
+ * The application sets this bit to stop transmitting/receiving
+ * data on a channel, even before the transfer for that channel is
+ * complete. The application must wait for the Channel Disabled
+ * interrupt before treating the channel as disabled.
+ * @oddfrm: Odd Frame (OddFrm)
+ * This field is set (reset) by the application to indicate that
+ * the OTG host must perform a transfer in an odd (micro)frame.
+ * This field is applicable for only periodic (isochronous and
+ * interrupt) transactions.
+ * * 1'b0: Even (micro)frame
+ * * 1'b1: Odd (micro)frame
+ * @devaddr: Device Address (DevAddr)
+ * This field selects the specific device serving as the data
+ * source or sink.
+ * @ec: Multi Count (MC) / Error Count (EC)
+ * When the Split Enable bit of the Host Channel-n Split Control
+ * register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates
+ * to the host the number of transactions that should be executed
+ * per microframe for this endpoint.
+ * * 2'b00: Reserved. This field yields undefined results.
+ * * 2'b01: 1 transaction
+ * * 2'b10: 2 transactions to be issued for this endpoint per
+ * microframe
+ * * 2'b11: 3 transactions to be issued for this endpoint per
+ * microframe
+ * When HCSPLTn.SpltEna is set (1'b1), this field indicates the
+ * number of immediate retries to be performed for a periodic split
+ * transactions on transaction errors. This field must be set to at
+ * least 2'b01.
+ * @eptype: Endpoint Type (EPType)
+ * Indicates the transfer type selected.
+ * * 2'b00: Control
+ * * 2'b01: Isochronous
+ * * 2'b10: Bulk
+ * * 2'b11: Interrupt
+ * @lspddev: Low-Speed Device (LSpdDev)
+ * This field is set by the application to indicate that this
+ * channel is communicating to a low-speed device.
+ * @epdir: Endpoint Direction (EPDir)
+ * Indicates whether the transaction is IN or OUT.
+ * * 1'b0: OUT
+ * * 1'b1: IN
+ * @epnum: Endpoint Number (EPNum)
+ * Indicates the endpoint number on the device serving as the
+ * data source or sink.
+ * @mps: Maximum Packet Size (MPS)
+ * Indicates the maximum packet size of the associated endpoint.
+ */
+ struct cvmx_usbcx_hccharx_s {
+ uint32_t chena : 1;
+ uint32_t chdis : 1;
+ uint32_t oddfrm : 1;
+ uint32_t devaddr : 7;
+ uint32_t ec : 2;
+ uint32_t eptype : 2;
+ uint32_t lspddev : 1;
+ uint32_t reserved_16_16 : 1;
+ uint32_t epdir : 1;
+ uint32_t epnum : 4;
+ uint32_t mps : 11;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcfg
+ *
+ * Host Configuration Register (HCFG)
+ *
+ * This register configures the core after power-on. Do not make changes to this
+ * register after initializing the host.
+ */
+union cvmx_usbcx_hcfg {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hcfg_s
+ * @fslssupp: FS- and LS-Only Support (FSLSSupp)
+ * The application uses this bit to control the core's enumeration
+ * speed. Using this bit, the application can make the core
+ * enumerate as a FS host, even if the connected device supports
+ * HS traffic. Do not make changes to this field after initial
+ * programming.
+ * * 1'b0: HS/FS/LS, based on the maximum speed supported by
+ * the connected device
+ * * 1'b1: FS/LS-only, even if the connected device can support HS
+ * @fslspclksel: FS/LS PHY Clock Select (FSLSPclkSel)
+ * When the core is in FS Host mode
+ * * 2'b00: PHY clock is running at 30/60 MHz
+ * * 2'b01: PHY clock is running at 48 MHz
+ * * Others: Reserved
+ * When the core is in LS Host mode
+ * * 2'b00: PHY clock is running at 30/60 MHz. When the
+ * UTMI+/ULPI PHY Low Power mode is not selected, use
+ * 30/60 MHz.
+ * * 2'b01: PHY clock is running at 48 MHz. When the UTMI+
+ * PHY Low Power mode is selected, use 48MHz if the PHY
+ * supplies a 48 MHz clock during LS mode.
+ * * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode,
+ * use 6 MHz when the UTMI+ PHY Low Power mode is
+ * selected and the PHY supplies a 6 MHz clock during LS
+ * mode. If you select a 6 MHz clock during LS mode, you must
+ * do a soft reset.
+ * * 2'b11: Reserved
+ */
+ struct cvmx_usbcx_hcfg_s {
+ uint32_t reserved_3_31 : 29;
+ uint32_t fslssupp : 1;
+ uint32_t fslspclksel : 2;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcint#
+ *
+ * Host Channel-n Interrupt Register (HCINT)
+ *
+ * This register indicates the status of a channel with respect to USB- and
+ * AHB-related events. The application must read this register when the Host
+ * Channels Interrupt bit of the Core Interrupt register (GINTSTS.HChInt) is
+ * set. Before the application can read this register, it must first read
+ * the Host All Channels Interrupt (HAINT) register to get the exact channel
+ * number for the Host Channel-n Interrupt register. The application must clear
+ * the appropriate bit in this register to clear the corresponding bits in the
+ * HAINT and GINTSTS registers.
+ */
+union cvmx_usbcx_hcintx {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hcintx_s
+ * @datatglerr: Data Toggle Error (DataTglErr)
+ * @frmovrun: Frame Overrun (FrmOvrun)
+ * @bblerr: Babble Error (BblErr)
+ * @xacterr: Transaction Error (XactErr)
+ * @nyet: NYET Response Received Interrupt (NYET)
+ * @ack: ACK Response Received Interrupt (ACK)
+ * @nak: NAK Response Received Interrupt (NAK)
+ * @stall: STALL Response Received Interrupt (STALL)
+ * @ahberr: This bit is always 0x0.
+ * @chhltd: Channel Halted (ChHltd)
+ * Indicates the transfer completed abnormally either because of
+ * any USB transaction error or in response to disable request by
+ * the application.
+ * @xfercompl: Transfer Completed (XferCompl)
+ * Transfer completed normally without any errors.
+ */
+ struct cvmx_usbcx_hcintx_s {
+ uint32_t reserved_11_31 : 21;
+ uint32_t datatglerr : 1;
+ uint32_t frmovrun : 1;
+ uint32_t bblerr : 1;
+ uint32_t xacterr : 1;
+ uint32_t nyet : 1;
+ uint32_t ack : 1;
+ uint32_t nak : 1;
+ uint32_t stall : 1;
+ uint32_t ahberr : 1;
+ uint32_t chhltd : 1;
+ uint32_t xfercompl : 1;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcintmsk#
+ *
+ * Host Channel-n Interrupt Mask Register (HCINTMSKn)
+ *
+ * This register reflects the mask for each channel status described in the
+ * previous section.
+ * Mask interrupt: 1'b0 Unmask interrupt: 1'b1
+ */
+union cvmx_usbcx_hcintmskx {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hcintmskx_s
+ * @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk)
+ * @frmovrunmsk: Frame Overrun Mask (FrmOvrunMsk)
+ * @bblerrmsk: Babble Error Mask (BblErrMsk)
+ * @xacterrmsk: Transaction Error Mask (XactErrMsk)
+ * @nyetmsk: NYET Response Received Interrupt Mask (NyetMsk)
+ * @ackmsk: ACK Response Received Interrupt Mask (AckMsk)
+ * @nakmsk: NAK Response Received Interrupt Mask (NakMsk)
+ * @stallmsk: STALL Response Received Interrupt Mask (StallMsk)
+ * @ahberrmsk: AHB Error Mask (AHBErrMsk)
+ * @chhltdmsk: Channel Halted Mask (ChHltdMsk)
+ * @xfercomplmsk: Transfer Completed Mask (XferComplMsk)
+ */
+ struct cvmx_usbcx_hcintmskx_s {
+ uint32_t reserved_11_31 : 21;
+ uint32_t datatglerrmsk : 1;
+ uint32_t frmovrunmsk : 1;
+ uint32_t bblerrmsk : 1;
+ uint32_t xacterrmsk : 1;
+ uint32_t nyetmsk : 1;
+ uint32_t ackmsk : 1;
+ uint32_t nakmsk : 1;
+ uint32_t stallmsk : 1;
+ uint32_t ahberrmsk : 1;
+ uint32_t chhltdmsk : 1;
+ uint32_t xfercomplmsk : 1;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hcsplt#
+ *
+ * Host Channel-n Split Control Register (HCSPLT)
+ *
+ */
+union cvmx_usbcx_hcspltx {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hcspltx_s
+ * @spltena: Split Enable (SpltEna)
+ * The application sets this field to indicate that this channel is
+ * enabled to perform split transactions.
+ * @compsplt: Do Complete Split (CompSplt)
+ * The application sets this field to request the OTG host to
+ * perform a complete split transaction.
+ * @xactpos: Transaction Position (XactPos)
+ * This field is used to determine whether to send all, first,
+ * middle, or last payloads with each OUT transaction.
+ * * 2'b11: All. This is the entire data payload is of this
+ * transaction (which is less than or equal to 188 bytes).
+ * * 2'b10: Begin. This is the first data payload of this
+ * transaction (which is larger than 188 bytes).
+ * * 2'b00: Mid. This is the middle payload of this transaction
+ * (which is larger than 188 bytes).
+ * * 2'b01: End. This is the last payload of this transaction
+ * (which is larger than 188 bytes).
+ * @hubaddr: Hub Address (HubAddr)
+ * This field holds the device address of the transaction
+ * translator's hub.
+ * @prtaddr: Port Address (PrtAddr)
+ * This field is the port number of the recipient transaction
+ * translator.
+ */
+ struct cvmx_usbcx_hcspltx_s {
+ uint32_t spltena : 1;
+ uint32_t reserved_17_30 : 14;
+ uint32_t compsplt : 1;
+ uint32_t xactpos : 2;
+ uint32_t hubaddr : 7;
+ uint32_t prtaddr : 7;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hctsiz#
+ *
+ * Host Channel-n Transfer Size Register (HCTSIZ)
+ *
+ */
+union cvmx_usbcx_hctsizx {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hctsizx_s
+ * @dopng: Do Ping (DoPng)
+ * Setting this field to 1 directs the host to do PING protocol.
+ * @pid: PID (Pid)
+ * The application programs this field with the type of PID to use
+ * for the initial transaction. The host will maintain this field
+ * for the rest of the transfer.
+ * * 2'b00: DATA0
+ * * 2'b01: DATA2
+ * * 2'b10: DATA1
+ * * 2'b11: MDATA (non-control)/SETUP (control)
+ * @pktcnt: Packet Count (PktCnt)
+ * This field is programmed by the application with the expected
+ * number of packets to be transmitted (OUT) or received (IN).
+ * The host decrements this count on every successful
+ * transmission or reception of an OUT/IN packet. Once this count
+ * reaches zero, the application is interrupted to indicate normal
+ * completion.
+ * @xfersize: Transfer Size (XferSize)
+ * For an OUT, this field is the number of data bytes the host will
+ * send during the transfer.
+ * For an IN, this field is the buffer size that the application
+ * has reserved for the transfer. The application is expected to
+ * program this field as an integer multiple of the maximum packet
+ * size for IN transactions (periodic and non-periodic).
+ */
+ struct cvmx_usbcx_hctsizx_s {
+ uint32_t dopng : 1;
+ uint32_t pid : 2;
+ uint32_t pktcnt : 10;
+ uint32_t xfersize : 19;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hfir
+ *
+ * Host Frame Interval Register (HFIR)
+ *
+ * This register stores the frame interval information for the current speed to
+ * which the O2P USB core has enumerated.
+ */
+union cvmx_usbcx_hfir {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hfir_s
+ * @frint: Frame Interval (FrInt)
+ * The value that the application programs to this field specifies
+ * the interval between two consecutive SOFs (FS) or micro-
+ * SOFs (HS) or Keep-Alive tokens (HS). This field contains the
+ * number of PHY clocks that constitute the required frame
+ * interval. The default value set in this field for a FS operation
+ * when the PHY clock frequency is 60 MHz. The application can
+ * write a value to this register only after the Port Enable bit of
+ * the Host Port Control and Status register (HPRT.PrtEnaPort)
+ * has been set. If no value is programmed, the core calculates
+ * the value based on the PHY clock specified in the FS/LS PHY
+ * Clock Select field of the Host Configuration register
+ * (HCFG.FSLSPclkSel). Do not change the value of this field
+ * after the initial configuration.
+ * * 125 us (PHY clock frequency for HS)
+ * * 1 ms (PHY clock frequency for FS/LS)
+ */
+ struct cvmx_usbcx_hfir_s {
+ uint32_t reserved_16_31 : 16;
+ uint32_t frint : 16;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hfnum
+ *
+ * Host Frame Number/Frame Time Remaining Register (HFNUM)
+ *
+ * This register indicates the current frame number.
+ * It also indicates the time remaining (in terms of the number of PHY clocks)
+ * in the current (micro)frame.
+ */
+union cvmx_usbcx_hfnum {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hfnum_s
+ * @frrem: Frame Time Remaining (FrRem)
+ * Indicates the amount of time remaining in the current
+ * microframe (HS) or frame (FS/LS), in terms of PHY clocks.
+ * This field decrements on each PHY clock. When it reaches
+ * zero, this field is reloaded with the value in the Frame
+ * Interval register and a new SOF is transmitted on the USB.
+ * @frnum: Frame Number (FrNum)
+ * This field increments when a new SOF is transmitted on the
+ * USB, and is reset to 0 when it reaches 16'h3FFF.
+ */
+ struct cvmx_usbcx_hfnum_s {
+ uint32_t frrem : 16;
+ uint32_t frnum : 16;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hprt
+ *
+ * Host Port Control and Status Register (HPRT)
+ *
+ * This register is available in both Host and Device modes.
+ * Currently, the OTG Host supports only one port.
+ * A single register holds USB port-related information such as USB reset,
+ * enable, suspend, resume, connect status, and test mode for each port. The
+ * R_SS_WC bits in this register can trigger an interrupt to the application
+ * through the Host Port Interrupt bit of the Core Interrupt register
+ * (GINTSTS.PrtInt). On a Port Interrupt, the application must read this
+ * register and clear the bit that caused the interrupt. For the R_SS_WC bits,
+ * the application must write a 1 to the bit to clear the interrupt.
+ */
+union cvmx_usbcx_hprt {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hprt_s
+ * @prtspd: Port Speed (PrtSpd)
+ * Indicates the speed of the device attached to this port.
+ * * 2'b00: High speed
+ * * 2'b01: Full speed
+ * * 2'b10: Low speed
+ * * 2'b11: Reserved
+ * @prttstctl: Port Test Control (PrtTstCtl)
+ * The application writes a nonzero value to this field to put
+ * the port into a Test mode, and the corresponding pattern is
+ * signaled on the port.
+ * * 4'b0000: Test mode disabled
+ * * 4'b0001: Test_J mode
+ * * 4'b0010: Test_K mode
+ * * 4'b0011: Test_SE0_NAK mode
+ * * 4'b0100: Test_Packet mode
+ * * 4'b0101: Test_Force_Enable
+ * * Others: Reserved
+ * PrtSpd must be zero (i.e. the interface must be in high-speed
+ * mode) to use the PrtTstCtl test modes.
+ * @prtpwr: Port Power (PrtPwr)
+ * The application uses this field to control power to this port,
+ * and the core clears this bit on an overcurrent condition.
+ * * 1'b0: Power off
+ * * 1'b1: Power on
+ * @prtlnsts: Port Line Status (PrtLnSts)
+ * Indicates the current logic level USB data lines
+ * * Bit [10]: Logic level of D-
+ * * Bit [11]: Logic level of D+
+ * @prtrst: Port Reset (PrtRst)
+ * When the application sets this bit, a reset sequence is
+ * started on this port. The application must time the reset
+ * period and clear this bit after the reset sequence is
+ * complete.
+ * * 1'b0: Port not in reset
+ * * 1'b1: Port in reset
+ * The application must leave this bit set for at least a
+ * minimum duration mentioned below to start a reset on the
+ * port. The application can leave it set for another 10 ms in
+ * addition to the required minimum duration, before clearing
+ * the bit, even though there is no maximum limit set by the
+ * USB standard.
+ * * High speed: 50 ms
+ * * Full speed/Low speed: 10 ms
+ * @prtsusp: Port Suspend (PrtSusp)
+ * The application sets this bit to put this port in Suspend
+ * mode. The core only stops sending SOFs when this is set.
+ * To stop the PHY clock, the application must set the Port
+ * Clock Stop bit, which will assert the suspend input pin of
+ * the PHY.
+ * The read value of this bit reflects the current suspend
+ * status of the port. This bit is cleared by the core after a
+ * remote wakeup signal is detected or the application sets
+ * the Port Reset bit or Port Resume bit in this register or the
+ * Resume/Remote Wakeup Detected Interrupt bit or
+ * Disconnect Detected Interrupt bit in the Core Interrupt
+ * register (GINTSTS.WkUpInt or GINTSTS.DisconnInt,
+ * respectively).
+ * * 1'b0: Port not in Suspend mode
+ * * 1'b1: Port in Suspend mode
+ * @prtres: Port Resume (PrtRes)
+ * The application sets this bit to drive resume signaling on
+ * the port. The core continues to drive the resume signal
+ * until the application clears this bit.
+ * If the core detects a USB remote wakeup sequence, as
+ * indicated by the Port Resume/Remote Wakeup Detected
+ * Interrupt bit of the Core Interrupt register
+ * (GINTSTS.WkUpInt), the core starts driving resume
+ * signaling without application intervention and clears this bit
+ * when it detects a disconnect condition. The read value of
+ * this bit indicates whether the core is currently driving
+ * resume signaling.
+ * * 1'b0: No resume driven
+ * * 1'b1: Resume driven
+ * @prtovrcurrchng: Port Overcurrent Change (PrtOvrCurrChng)
+ * The core sets this bit when the status of the Port
+ * Overcurrent Active bit (bit 4) in this register changes.
+ * @prtovrcurract: Port Overcurrent Active (PrtOvrCurrAct)
+ * Indicates the overcurrent condition of the port.
+ * * 1'b0: No overcurrent condition
+ * * 1'b1: Overcurrent condition
+ * @prtenchng: Port Enable/Disable Change (PrtEnChng)
+ * The core sets this bit when the status of the Port Enable bit
+ * [2] of this register changes.
+ * @prtena: Port Enable (PrtEna)
+ * A port is enabled only by the core after a reset sequence,
+ * and is disabled by an overcurrent condition, a disconnect
+ * condition, or by the application clearing this bit. The
+ * application cannot set this bit by a register write. It can only
+ * clear it to disable the port. This bit does not trigger any
+ * interrupt to the application.
+ * * 1'b0: Port disabled
+ * * 1'b1: Port enabled
+ * @prtconndet: Port Connect Detected (PrtConnDet)
+ * The core sets this bit when a device connection is detected
+ * to trigger an interrupt to the application using the Host Port
+ * Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt).
+ * The application must write a 1 to this bit to clear the
+ * interrupt.
+ * @prtconnsts: Port Connect Status (PrtConnSts)
+ * * 0: No device is attached to the port.
+ * * 1: A device is attached to the port.
+ */
+ struct cvmx_usbcx_hprt_s {
+ uint32_t reserved_19_31 : 13;
+ uint32_t prtspd : 2;
+ uint32_t prttstctl : 4;
+ uint32_t prtpwr : 1;
+ uint32_t prtlnsts : 2;
+ uint32_t reserved_9_9 : 1;
+ uint32_t prtrst : 1;
+ uint32_t prtsusp : 1;
+ uint32_t prtres : 1;
+ uint32_t prtovrcurrchng : 1;
+ uint32_t prtovrcurract : 1;
+ uint32_t prtenchng : 1;
+ uint32_t prtena : 1;
+ uint32_t prtconndet : 1;
+ uint32_t prtconnsts : 1;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hptxfsiz
+ *
+ * Host Periodic Transmit FIFO Size Register (HPTXFSIZ)
+ *
+ * This register holds the size and the memory start address of the Periodic
+ * TxFIFO, as shown in Figures 310 and 311.
+ */
+union cvmx_usbcx_hptxfsiz {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hptxfsiz_s
+ * @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize)
+ * This value is in terms of 32-bit words.
+ * * Minimum value is 16
+ * * Maximum value is 32768
+ * @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr)
+ */
+ struct cvmx_usbcx_hptxfsiz_s {
+ uint32_t ptxfsize : 16;
+ uint32_t ptxfstaddr : 16;
+ } s;
+};
+
+/**
+ * cvmx_usbc#_hptxsts
+ *
+ * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS)
+ *
+ * This read-only register contains the free space information for the Periodic
+ * TxFIFO and the Periodic Transmit Request Queue
+ */
+union cvmx_usbcx_hptxsts {
+ uint32_t u32;
+ /**
+ * struct cvmx_usbcx_hptxsts_s
+ * @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop)
+ * This indicates the entry in the Periodic Tx Request Queue that
+ * is currently being processes by the MAC.
+ * This register is used for debugging.
+ * * Bit [31]: Odd/Even (micro)frame
+ * - 1'b0: send in even (micro)frame
+ * - 1'b1: send in odd (micro)frame
+ * * Bits [30:27]: Channel/endpoint number
+ * * Bits [26:25]: Type
+ * - 2'b00: IN/OUT
+ * - 2'b01: Zero-length packet
+ * - 2'b10: CSPLIT
+ * - 2'b11: Disable channel command
+ * * Bit [24]: Terminate (last entry for the selected
+ * channel/endpoint)
+ * @ptxqspcavail: Periodic Transmit Request Queue Space Available
+ * (PTxQSpcAvail)
+ * Indicates the number of free locations available to be written
+ * in the Periodic Transmit Request Queue. This queue holds both
+ * IN and OUT requests.
+ * * 8'h0: Periodic Transmit Request Queue is full
+ * * 8'h1: 1 location available
+ * * 8'h2: 2 locations available
+ * * n: n locations available (0..8)
+ * * Others: Reserved
+ * @ptxfspcavail: Periodic Transmit Data FIFO Space Available
+ * (PTxFSpcAvail)
+ * Indicates the number of free locations available to be written
+ * to in the Periodic TxFIFO.
+ * Values are in terms of 32-bit words
+ * * 16'h0: Periodic TxFIFO is full
+ * * 16'h1: 1 word available
+ * * 16'h2: 2 words available
+ * * 16'hn: n words available (where 0..32768)
+ * * 16'h8000: 32768 words available
+ * * Others: Reserved
+ */
+ struct cvmx_usbcx_hptxsts_s {
+ uint32_t ptxqtop : 8;
+ uint32_t ptxqspcavail : 8;
+ uint32_t ptxfspcavail : 16;
+ } s;
+};
+
+#endif
diff --git a/drivers/staging/octeon-usb/cvmx-usbnx-defs.h b/drivers/staging/octeon-usb/cvmx-usbnx-defs.h
new file mode 100644
index 0000000..e06aafa
--- /dev/null
+++ b/drivers/staging/octeon-usb/cvmx-usbnx-defs.h
@@ -0,0 +1,885 @@
+/***********************license start***************
+ * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Networks nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION
+ * OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * cvmx-usbnx-defs.h
+ *
+ * Configuration and status register (CSR) type definitions for
+ * Octeon usbnx.
+ *
+ */
+#ifndef __CVMX_USBNX_TYPEDEFS_H__
+#define __CVMX_USBNX_TYPEDEFS_H__
+
+#define CVMX_USBNXBID1(bid) (((bid) & 1) * 0x10000000ull)
+#define CVMX_USBNXBID2(bid) (((bid) & 1) * 0x100000000000ull)
+
+#define CVMX_USBNXREG1(reg, bid) \
+ (CVMX_ADD_IO_SEG(0x0001180068000000ull | reg) + CVMX_USBNXBID1(bid))
+#define CVMX_USBNXREG2(reg, bid) \
+ (CVMX_ADD_IO_SEG(0x00016F0000000000ull | reg) + CVMX_USBNXBID2(bid))
+
+#define CVMX_USBNX_CLK_CTL(bid) CVMX_USBNXREG1(0x10, bid)
+#define CVMX_USBNX_DMA0_INB_CHN0(bid) CVMX_USBNXREG2(0x818, bid)
+#define CVMX_USBNX_DMA0_OUTB_CHN0(bid) CVMX_USBNXREG2(0x858, bid)
+#define CVMX_USBNX_USBP_CTL_STATUS(bid) CVMX_USBNXREG1(0x18, bid)
+
+/**
+ * cvmx_usbn#_clk_ctl
+ *
+ * USBN_CLK_CTL = USBN's Clock Control
+ *
+ * This register is used to control the frequency of the hclk and the
+ * hreset and phy_rst signals.
+ */
+union cvmx_usbnx_clk_ctl {
+ uint64_t u64;
+ /**
+ * struct cvmx_usbnx_clk_ctl_s
+ * @divide2: The 'hclk' used by the USB subsystem is derived
+ * from the eclk.
+ * Also see the field DIVIDE. DIVIDE2<1> must currently
+ * be zero because it is not implemented, so the maximum
+ * ratio of eclk/hclk is currently 16.
+ * The actual divide number for hclk is:
+ * (DIVIDE2 + 1) * (DIVIDE + 1)
+ * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
+ * generate the hclk in the USB Subsystem is held
+ * in reset. This bit must be set to '0' before
+ * changing the value os DIVIDE in this register.
+ * The reset to the HCLK_DIVIDERis also asserted
+ * when core reset is asserted.
+ * @p_x_on: Force USB-PHY on during suspend.
+ * '1' USB-PHY XO block is powered-down during
+ * suspend.
+ * '0' USB-PHY XO block is powered-up during
+ * suspend.
+ * The value of this field must be set while POR is
+ * active.
+ * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
+ * remain powered in Suspend Mode.
+ * '1' The USB-PHY XO Bias, Bandgap and PLL are
+ * powered down in suspend mode.
+ * The value of this field must be set while POR is
+ * active.
+ * @p_c_sel: Phy clock speed select.
+ * Selects the reference clock / crystal frequency.
+ * '11': Reserved
+ * '10': 48 MHz (reserved when a crystal is used)
+ * '01': 24 MHz (reserved when a crystal is used)
+ * '00': 12 MHz
+ * The value of this field must be set while POR is
+ * active.
+ * NOTE: if a crystal is used as a reference clock,
+ * this field must be set to 12 MHz.
+ * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
+ * @sd_mode: Scaledown mode for the USBC. Control timing events
+ * in the USBC, for normal operation this must be '0'.
+ * @s_bist: Starts bist on the hclk memories, during the '0'
+ * to '1' transition.
+ * @por: Power On Reset for the PHY.
+ * Resets all the PHYS registers and state machines.
+ * @enable: When '1' allows the generation of the hclk. When
+ * '0' the hclk will not be generated. SEE DIVIDE
+ * field of this register.
+ * @prst: When this field is '0' the reset associated with
+ * the phy_clk functionality in the USB Subsystem is
+ * help in reset. This bit should not be set to '1'
+ * until the time it takes 6 clocks (hclk or phy_clk,
+ * whichever is slower) has passed. Under normal
+ * operation once this bit is set to '1' it should not
+ * be set to '0'.
+ * @hrst: When this field is '0' the reset associated with
+ * the hclk functioanlity in the USB Subsystem is
+ * held in reset.This bit should not be set to '1'
+ * until 12ms after phy_clk is stable. Under normal
+ * operation, once this bit is set to '1' it should
+ * not be set to '0'.
+ * @divide: The frequency of 'hclk' used by the USB subsystem
+ * is the eclk frequency divided by the value of
+ * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
+ * DIVIDE2 of this register.
+ * The hclk frequency should be less than 125Mhz.
+ * After writing a value to this field the SW should
+ * read the field for the value written.
+ * The ENABLE field of this register should not be set
+ * until AFTER this field is set and then read.
+ */
+ struct cvmx_usbnx_clk_ctl_s {
+ uint64_t reserved_20_63 : 44;
+ uint64_t divide2 : 2;
+ uint64_t hclk_rst : 1;
+ uint64_t p_x_on : 1;
+ uint64_t reserved_14_15 : 2;
+ uint64_t p_com_on : 1;
+ uint64_t p_c_sel : 2;
+ uint64_t cdiv_byp : 1;
+ uint64_t sd_mode : 2;
+ uint64_t s_bist : 1;
+ uint64_t por : 1;
+ uint64_t enable : 1;
+ uint64_t prst : 1;
+ uint64_t hrst : 1;
+ uint64_t divide : 3;
+ } s;
+ /**
+ * struct cvmx_usbnx_clk_ctl_cn30xx
+ * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
+ * generate the hclk in the USB Subsystem is held
+ * in reset. This bit must be set to '0' before
+ * changing the value os DIVIDE in this register.
+ * The reset to the HCLK_DIVIDERis also asserted
+ * when core reset is asserted.
+ * @p_x_on: Force USB-PHY on during suspend.
+ * '1' USB-PHY XO block is powered-down during
+ * suspend.
+ * '0' USB-PHY XO block is powered-up during
+ * suspend.
+ * The value of this field must be set while POR is
+ * active.
+ * @p_rclk: Phy refrence clock enable.
+ * '1' The PHY PLL uses the XO block output as a
+ * reference.
+ * '0' Reserved.
+ * @p_xenbn: Phy external clock enable.
+ * '1' The XO block uses the clock from a crystal.
+ * '0' The XO block uses an external clock supplied
+ * on the XO pin. USB_XI should be tied to
+ * ground for this usage.
+ * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
+ * remain powered in Suspend Mode.
+ * '1' The USB-PHY XO Bias, Bandgap and PLL are
+ * powered down in suspend mode.
+ * The value of this field must be set while POR is
+ * active.
+ * @p_c_sel: Phy clock speed select.
+ * Selects the reference clock / crystal frequency.
+ * '11': Reserved
+ * '10': 48 MHz
+ * '01': 24 MHz
+ * '00': 12 MHz
+ * The value of this field must be set while POR is
+ * active.
+ * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
+ * @sd_mode: Scaledown mode for the USBC. Control timing events
+ * in the USBC, for normal operation this must be '0'.
+ * @s_bist: Starts bist on the hclk memories, during the '0'
+ * to '1' transition.
+ * @por: Power On Reset for the PHY.
+ * Resets all the PHYS registers and state machines.
+ * @enable: When '1' allows the generation of the hclk. When
+ * '0' the hclk will not be generated.
+ * @prst: When this field is '0' the reset associated with
+ * the phy_clk functionality in the USB Subsystem is
+ * help in reset. This bit should not be set to '1'
+ * until the time it takes 6 clocks (hclk or phy_clk,
+ * whichever is slower) has passed. Under normal
+ * operation once this bit is set to '1' it should not
+ * be set to '0'.
+ * @hrst: When this field is '0' the reset associated with
+ * the hclk functioanlity in the USB Subsystem is
+ * held in reset.This bit should not be set to '1'
+ * until 12ms after phy_clk is stable. Under normal
+ * operation, once this bit is set to '1' it should
+ * not be set to '0'.
+ * @divide: The 'hclk' used by the USB subsystem is derived
+ * from the eclk. The eclk will be divided by the
+ * value of this field +1 to determine the hclk
+ * frequency. (Also see HRST of this register).
+ * The hclk frequency must be less than 125 MHz.
+ */
+ struct cvmx_usbnx_clk_ctl_cn30xx {
+ uint64_t reserved_18_63 : 46;
+ uint64_t hclk_rst : 1;
+ uint64_t p_x_on : 1;
+ uint64_t p_rclk : 1;
+ uint64_t p_xenbn : 1;
+ uint64_t p_com_on : 1;
+ uint64_t p_c_sel : 2;
+ uint64_t cdiv_byp : 1;
+ uint64_t sd_mode : 2;
+ uint64_t s_bist : 1;
+ uint64_t por : 1;
+ uint64_t enable : 1;
+ uint64_t prst : 1;
+ uint64_t hrst : 1;
+ uint64_t divide : 3;
+ } cn30xx;
+ struct cvmx_usbnx_clk_ctl_cn30xx cn31xx;
+ /**
+ * struct cvmx_usbnx_clk_ctl_cn50xx
+ * @divide2: The 'hclk' used by the USB subsystem is derived
+ * from the eclk.
+ * Also see the field DIVIDE. DIVIDE2<1> must currently
+ * be zero because it is not implemented, so the maximum
+ * ratio of eclk/hclk is currently 16.
+ * The actual divide number for hclk is:
+ * (DIVIDE2 + 1) * (DIVIDE + 1)
+ * @hclk_rst: When this field is '0' the HCLK-DIVIDER used to
+ * generate the hclk in the USB Subsystem is held
+ * in reset. This bit must be set to '0' before
+ * changing the value os DIVIDE in this register.
+ * The reset to the HCLK_DIVIDERis also asserted
+ * when core reset is asserted.
+ * @p_rtype: PHY reference clock type
+ * '0' The USB-PHY uses a 12MHz crystal as a clock
+ * source at the USB_XO and USB_XI pins
+ * '1' Reserved
+ * '2' The USB_PHY uses 12/24/48MHz 2.5V board clock
+ * at the USB_XO pin. USB_XI should be tied to
+ * ground in this case.
+ * '3' Reserved
+ * (bit 14 was P_XENBN on 3xxx)
+ * (bit 15 was P_RCLK on 3xxx)
+ * @p_com_on: '0' Force USB-PHY XO Bias, Bandgap and PLL to
+ * remain powered in Suspend Mode.
+ * '1' The USB-PHY XO Bias, Bandgap and PLL are
+ * powered down in suspend mode.
+ * The value of this field must be set while POR is
+ * active.
+ * @p_c_sel: Phy clock speed select.
+ * Selects the reference clock / crystal frequency.
+ * '11': Reserved
+ * '10': 48 MHz (reserved when a crystal is used)
+ * '01': 24 MHz (reserved when a crystal is used)
+ * '00': 12 MHz
+ * The value of this field must be set while POR is
+ * active.
+ * NOTE: if a crystal is used as a reference clock,
+ * this field must be set to 12 MHz.
+ * @cdiv_byp: Used to enable the bypass input to the USB_CLK_DIV.
+ * @sd_mode: Scaledown mode for the USBC. Control timing events
+ * in the USBC, for normal operation this must be '0'.
+ * @s_bist: Starts bist on the hclk memories, during the '0'
+ * to '1' transition.
+ * @por: Power On Reset for the PHY.
+ * Resets all the PHYS registers and state machines.
+ * @enable: When '1' allows the generation of the hclk. When
+ * '0' the hclk will not be generated. SEE DIVIDE
+ * field of this register.
+ * @prst: When this field is '0' the reset associated with
+ * the phy_clk functionality in the USB Subsystem is
+ * help in reset. This bit should not be set to '1'
+ * until the time it takes 6 clocks (hclk or phy_clk,
+ * whichever is slower) has passed. Under normal
+ * operation once this bit is set to '1' it should not
+ * be set to '0'.
+ * @hrst: When this field is '0' the reset associated with
+ * the hclk functioanlity in the USB Subsystem is
+ * held in reset.This bit should not be set to '1'
+ * until 12ms after phy_clk is stable. Under normal
+ * operation, once this bit is set to '1' it should
+ * not be set to '0'.
+ * @divide: The frequency of 'hclk' used by the USB subsystem
+ * is the eclk frequency divided by the value of
+ * (DIVIDE2 + 1) * (DIVIDE + 1), also see the field
+ * DIVIDE2 of this register.
+ * The hclk frequency should be less than 125Mhz.
+ * After writing a value to this field the SW should
+ * read the field for the value written.
+ * The ENABLE field of this register should not be set
+ * until AFTER this field is set and then read.
+ */
+ struct cvmx_usbnx_clk_ctl_cn50xx {
+ uint64_t reserved_20_63 : 44;
+ uint64_t divide2 : 2;
+ uint64_t hclk_rst : 1;
+ uint64_t reserved_16_16 : 1;
+ uint64_t p_rtype : 2;
+ uint64_t p_com_on : 1;
+ uint64_t p_c_sel : 2;
+ uint64_t cdiv_byp : 1;
+ uint64_t sd_mode : 2;
+ uint64_t s_bist : 1;
+ uint64_t por : 1;
+ uint64_t enable : 1;
+ uint64_t prst : 1;
+ uint64_t hrst : 1;
+ uint64_t divide : 3;
+ } cn50xx;
+ struct cvmx_usbnx_clk_ctl_cn50xx cn52xx;
+ struct cvmx_usbnx_clk_ctl_cn50xx cn56xx;
+};
+
+/**
+ * cvmx_usbn#_usbp_ctl_status
+ *
+ * USBN_USBP_CTL_STATUS = USBP Control And Status Register
+ *
+ * Contains general control and status information for the USBN block.
+ */
+union cvmx_usbnx_usbp_ctl_status {
+ uint64_t u64;
+ /**
+ * struct cvmx_usbnx_usbp_ctl_status_s
+ * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
+ * @txvreftune: HS DC Voltage Level Adjustment
+ * @txfslstune: FS/LS Source Impedence Adjustment
+ * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
+ * @sqrxtune: Squelch Threshold Adjustment
+ * @compdistune: Disconnect Threshold Adjustment
+ * @otgtune: VBUS Valid Threshold Adjustment
+ * @otgdisable: OTG Block Disable
+ * @portreset: Per_Port Reset
+ * @drvvbus: Drive VBUS
+ * @lsbist: Low-Speed BIST Enable.
+ * @fsbist: Full-Speed BIST Enable.
+ * @hsbist: High-Speed BIST Enable.
+ * @bist_done: PHY Bist Done.
+ * Asserted at the end of the PHY BIST sequence.
+ * @bist_err: PHY Bist Error.
+ * Indicates an internal error was detected during
+ * the BIST sequence.
+ * @tdata_out: PHY Test Data Out.
+ * Presents either internaly generated signals or
+ * test register contents, based upon the value of
+ * test_data_out_sel.
+ * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
+ * Normally should be set to zero.
+ * When customers have no intent to use USB PHY
+ * interface, they should:
+ * - still provide 3.3V to USB_VDD33, and
+ * - tie USB_REXT to 3.3V supply, and
+ * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1
+ * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
+ * @dma_bmode: When set to 1 the L2C DMA address will be updated
+ * with byte-counts between packets. When set to 0
+ * the L2C DMA address is incremented to the next
+ * 4-byte aligned address after adding byte-count.
+ * @usbc_end: Bigendian input to the USB Core. This should be
+ * set to '0' for operation.
+ * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
+ * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
+ * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D+ line. '1' pull down-resistance is connected
+ * to D+/ '0' pull down resistance is not connected
+ * to D+. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D- line. '1' pull down-resistance is connected
+ * to D-. '0' pull down resistance is not connected
+ * to D-. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @hst_mode: When '0' the USB is acting as HOST, when '1'
+ * USB is acting as device. This field needs to be
+ * set while the USB is in reset.
+ * @tuning: Transmitter Tuning for High-Speed Operation.
+ * Tunes the current supply and rise/fall output
+ * times for high-speed operation.
+ * [20:19] == 11: Current supply increased
+ * approximately 9%
+ * [20:19] == 10: Current supply increased
+ * approximately 4.5%
+ * [20:19] == 01: Design default.
+ * [20:19] == 00: Current supply decreased
+ * approximately 4.5%
+ * [22:21] == 11: Rise and fall times are increased.
+ * [22:21] == 10: Design default.
+ * [22:21] == 01: Rise and fall times are decreased.
+ * [22:21] == 00: Rise and fall times are decreased
+ * further as compared to the 01 setting.
+ * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
+ * Enables or disables bit stuffing on data[15:8]
+ * when bit-stuffing is enabled.
+ * @tx_bs_en: Transmit Bit Stuffing on [7:0].
+ * Enables or disables bit stuffing on data[7:0]
+ * when bit-stuffing is enabled.
+ * @loop_enb: PHY Loopback Test Enable.
+ * '1': During data transmission the receive is
+ * enabled.
+ * '0': During data transmission the receive is
+ * disabled.
+ * Must be '0' for normal operation.
+ * @vtest_enb: Analog Test Pin Enable.
+ * '1' The PHY's analog_test pin is enabled for the
+ * input and output of applicable analog test signals.
+ * '0' THe analog_test pin is disabled.
+ * @bist_enb: Built-In Self Test Enable.
+ * Used to activate BIST in the PHY.
+ * @tdata_sel: Test Data Out Select.
+ * '1' test_data_out[3:0] (PHY) register contents
+ * are output. '0' internaly generated signals are
+ * output.
+ * @taddr_in: Mode Address for Test Interface.
+ * Specifies the register address for writing to or
+ * reading from the PHY test interface register.
+ * @tdata_in: Internal Testing Register Input Data and Select
+ * This is a test bus. Data is present on [3:0],
+ * and its corresponding select (enable) is present
+ * on bits [7:4].
+ * @ate_reset: Reset input from automatic test equipment.
+ * This is a test signal. When the USB Core is
+ * powered up (not in Susned Mode), an automatic
+ * tester can use this to disable phy_clock and
+ * free_clk, then re-eanable them with an aligned
+ * phase.
+ * '1': The phy_clk and free_clk outputs are
+ * disabled. "0": The phy_clock and free_clk outputs
+ * are available within a specific period after the
+ * de-assertion.
+ */
+ struct cvmx_usbnx_usbp_ctl_status_s {
+ uint64_t txrisetune : 1;
+ uint64_t txvreftune : 4;
+ uint64_t txfslstune : 4;
+ uint64_t txhsxvtune : 2;
+ uint64_t sqrxtune : 3;
+ uint64_t compdistune : 3;
+ uint64_t otgtune : 3;
+ uint64_t otgdisable : 1;
+ uint64_t portreset : 1;
+ uint64_t drvvbus : 1;
+ uint64_t lsbist : 1;
+ uint64_t fsbist : 1;
+ uint64_t hsbist : 1;
+ uint64_t bist_done : 1;
+ uint64_t bist_err : 1;
+ uint64_t tdata_out : 4;
+ uint64_t siddq : 1;
+ uint64_t txpreemphasistune : 1;
+ uint64_t dma_bmode : 1;
+ uint64_t usbc_end : 1;
+ uint64_t usbp_bist : 1;
+ uint64_t tclk : 1;
+ uint64_t dp_pulld : 1;
+ uint64_t dm_pulld : 1;
+ uint64_t hst_mode : 1;
+ uint64_t tuning : 4;
+ uint64_t tx_bs_enh : 1;
+ uint64_t tx_bs_en : 1;
+ uint64_t loop_enb : 1;
+ uint64_t vtest_enb : 1;
+ uint64_t bist_enb : 1;
+ uint64_t tdata_sel : 1;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_in : 8;
+ uint64_t ate_reset : 1;
+ } s;
+ /**
+ * struct cvmx_usbnx_usbp_ctl_status_cn30xx
+ * @bist_done: PHY Bist Done.
+ * Asserted at the end of the PHY BIST sequence.
+ * @bist_err: PHY Bist Error.
+ * Indicates an internal error was detected during
+ * the BIST sequence.
+ * @tdata_out: PHY Test Data Out.
+ * Presents either internaly generated signals or
+ * test register contents, based upon the value of
+ * test_data_out_sel.
+ * @dma_bmode: When set to 1 the L2C DMA address will be updated
+ * with byte-counts between packets. When set to 0
+ * the L2C DMA address is incremented to the next
+ * 4-byte aligned address after adding byte-count.
+ * @usbc_end: Bigendian input to the USB Core. This should be
+ * set to '0' for operation.
+ * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
+ * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
+ * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D+ line. '1' pull down-resistance is connected
+ * to D+/ '0' pull down resistance is not connected
+ * to D+. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D- line. '1' pull down-resistance is connected
+ * to D-. '0' pull down resistance is not connected
+ * to D-. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @hst_mode: When '0' the USB is acting as HOST, when '1'
+ * USB is acting as device. This field needs to be
+ * set while the USB is in reset.
+ * @tuning: Transmitter Tuning for High-Speed Operation.
+ * Tunes the current supply and rise/fall output
+ * times for high-speed operation.
+ * [20:19] == 11: Current supply increased
+ * approximately 9%
+ * [20:19] == 10: Current supply increased
+ * approximately 4.5%
+ * [20:19] == 01: Design default.
+ * [20:19] == 00: Current supply decreased
+ * approximately 4.5%
+ * [22:21] == 11: Rise and fall times are increased.
+ * [22:21] == 10: Design default.
+ * [22:21] == 01: Rise and fall times are decreased.
+ * [22:21] == 00: Rise and fall times are decreased
+ * further as compared to the 01 setting.
+ * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
+ * Enables or disables bit stuffing on data[15:8]
+ * when bit-stuffing is enabled.
+ * @tx_bs_en: Transmit Bit Stuffing on [7:0].
+ * Enables or disables bit stuffing on data[7:0]
+ * when bit-stuffing is enabled.
+ * @loop_enb: PHY Loopback Test Enable.
+ * '1': During data transmission the receive is
+ * enabled.
+ * '0': During data transmission the receive is
+ * disabled.
+ * Must be '0' for normal operation.
+ * @vtest_enb: Analog Test Pin Enable.
+ * '1' The PHY's analog_test pin is enabled for the
+ * input and output of applicable analog test signals.
+ * '0' THe analog_test pin is disabled.
+ * @bist_enb: Built-In Self Test Enable.
+ * Used to activate BIST in the PHY.
+ * @tdata_sel: Test Data Out Select.
+ * '1' test_data_out[3:0] (PHY) register contents
+ * are output. '0' internaly generated signals are
+ * output.
+ * @taddr_in: Mode Address for Test Interface.
+ * Specifies the register address for writing to or
+ * reading from the PHY test interface register.
+ * @tdata_in: Internal Testing Register Input Data and Select
+ * This is a test bus. Data is present on [3:0],
+ * and its corresponding select (enable) is present
+ * on bits [7:4].
+ * @ate_reset: Reset input from automatic test equipment.
+ * This is a test signal. When the USB Core is
+ * powered up (not in Susned Mode), an automatic
+ * tester can use this to disable phy_clock and
+ * free_clk, then re-eanable them with an aligned
+ * phase.
+ * '1': The phy_clk and free_clk outputs are
+ * disabled. "0": The phy_clock and free_clk outputs
+ * are available within a specific period after the
+ * de-assertion.
+ */
+ struct cvmx_usbnx_usbp_ctl_status_cn30xx {
+ uint64_t reserved_38_63 : 26;
+ uint64_t bist_done : 1;
+ uint64_t bist_err : 1;
+ uint64_t tdata_out : 4;
+ uint64_t reserved_30_31 : 2;
+ uint64_t dma_bmode : 1;
+ uint64_t usbc_end : 1;
+ uint64_t usbp_bist : 1;
+ uint64_t tclk : 1;
+ uint64_t dp_pulld : 1;
+ uint64_t dm_pulld : 1;
+ uint64_t hst_mode : 1;
+ uint64_t tuning : 4;
+ uint64_t tx_bs_enh : 1;
+ uint64_t tx_bs_en : 1;
+ uint64_t loop_enb : 1;
+ uint64_t vtest_enb : 1;
+ uint64_t bist_enb : 1;
+ uint64_t tdata_sel : 1;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_in : 8;
+ uint64_t ate_reset : 1;
+ } cn30xx;
+ /**
+ * struct cvmx_usbnx_usbp_ctl_status_cn50xx
+ * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
+ * @txvreftune: HS DC Voltage Level Adjustment
+ * @txfslstune: FS/LS Source Impedence Adjustment
+ * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
+ * @sqrxtune: Squelch Threshold Adjustment
+ * @compdistune: Disconnect Threshold Adjustment
+ * @otgtune: VBUS Valid Threshold Adjustment
+ * @otgdisable: OTG Block Disable
+ * @portreset: Per_Port Reset
+ * @drvvbus: Drive VBUS
+ * @lsbist: Low-Speed BIST Enable.
+ * @fsbist: Full-Speed BIST Enable.
+ * @hsbist: High-Speed BIST Enable.
+ * @bist_done: PHY Bist Done.
+ * Asserted at the end of the PHY BIST sequence.
+ * @bist_err: PHY Bist Error.
+ * Indicates an internal error was detected during
+ * the BIST sequence.
+ * @tdata_out: PHY Test Data Out.
+ * Presents either internaly generated signals or
+ * test register contents, based upon the value of
+ * test_data_out_sel.
+ * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
+ * @dma_bmode: When set to 1 the L2C DMA address will be updated
+ * with byte-counts between packets. When set to 0
+ * the L2C DMA address is incremented to the next
+ * 4-byte aligned address after adding byte-count.
+ * @usbc_end: Bigendian input to the USB Core. This should be
+ * set to '0' for operation.
+ * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
+ * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
+ * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D+ line. '1' pull down-resistance is connected
+ * to D+/ '0' pull down resistance is not connected
+ * to D+. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D- line. '1' pull down-resistance is connected
+ * to D-. '0' pull down resistance is not connected
+ * to D-. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @hst_mode: When '0' the USB is acting as HOST, when '1'
+ * USB is acting as device. This field needs to be
+ * set while the USB is in reset.
+ * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
+ * Enables or disables bit stuffing on data[15:8]
+ * when bit-stuffing is enabled.
+ * @tx_bs_en: Transmit Bit Stuffing on [7:0].
+ * Enables or disables bit stuffing on data[7:0]
+ * when bit-stuffing is enabled.
+ * @loop_enb: PHY Loopback Test Enable.
+ * '1': During data transmission the receive is
+ * enabled.
+ * '0': During data transmission the receive is
+ * disabled.
+ * Must be '0' for normal operation.
+ * @vtest_enb: Analog Test Pin Enable.
+ * '1' The PHY's analog_test pin is enabled for the
+ * input and output of applicable analog test signals.
+ * '0' THe analog_test pin is disabled.
+ * @bist_enb: Built-In Self Test Enable.
+ * Used to activate BIST in the PHY.
+ * @tdata_sel: Test Data Out Select.
+ * '1' test_data_out[3:0] (PHY) register contents
+ * are output. '0' internaly generated signals are
+ * output.
+ * @taddr_in: Mode Address for Test Interface.
+ * Specifies the register address for writing to or
+ * reading from the PHY test interface register.
+ * @tdata_in: Internal Testing Register Input Data and Select
+ * This is a test bus. Data is present on [3:0],
+ * and its corresponding select (enable) is present
+ * on bits [7:4].
+ * @ate_reset: Reset input from automatic test equipment.
+ * This is a test signal. When the USB Core is
+ * powered up (not in Susned Mode), an automatic
+ * tester can use this to disable phy_clock and
+ * free_clk, then re-eanable them with an aligned
+ * phase.
+ * '1': The phy_clk and free_clk outputs are
+ * disabled. "0": The phy_clock and free_clk outputs
+ * are available within a specific period after the
+ * de-assertion.
+ */
+ struct cvmx_usbnx_usbp_ctl_status_cn50xx {
+ uint64_t txrisetune : 1;
+ uint64_t txvreftune : 4;
+ uint64_t txfslstune : 4;
+ uint64_t txhsxvtune : 2;
+ uint64_t sqrxtune : 3;
+ uint64_t compdistune : 3;
+ uint64_t otgtune : 3;
+ uint64_t otgdisable : 1;
+ uint64_t portreset : 1;
+ uint64_t drvvbus : 1;
+ uint64_t lsbist : 1;
+ uint64_t fsbist : 1;
+ uint64_t hsbist : 1;
+ uint64_t bist_done : 1;
+ uint64_t bist_err : 1;
+ uint64_t tdata_out : 4;
+ uint64_t reserved_31_31 : 1;
+ uint64_t txpreemphasistune : 1;
+ uint64_t dma_bmode : 1;
+ uint64_t usbc_end : 1;
+ uint64_t usbp_bist : 1;
+ uint64_t tclk : 1;
+ uint64_t dp_pulld : 1;
+ uint64_t dm_pulld : 1;
+ uint64_t hst_mode : 1;
+ uint64_t reserved_19_22 : 4;
+ uint64_t tx_bs_enh : 1;
+ uint64_t tx_bs_en : 1;
+ uint64_t loop_enb : 1;
+ uint64_t vtest_enb : 1;
+ uint64_t bist_enb : 1;
+ uint64_t tdata_sel : 1;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_in : 8;
+ uint64_t ate_reset : 1;
+ } cn50xx;
+ /**
+ * struct cvmx_usbnx_usbp_ctl_status_cn52xx
+ * @txrisetune: HS Transmitter Rise/Fall Time Adjustment
+ * @txvreftune: HS DC Voltage Level Adjustment
+ * @txfslstune: FS/LS Source Impedence Adjustment
+ * @txhsxvtune: Transmitter High-Speed Crossover Adjustment
+ * @sqrxtune: Squelch Threshold Adjustment
+ * @compdistune: Disconnect Threshold Adjustment
+ * @otgtune: VBUS Valid Threshold Adjustment
+ * @otgdisable: OTG Block Disable
+ * @portreset: Per_Port Reset
+ * @drvvbus: Drive VBUS
+ * @lsbist: Low-Speed BIST Enable.
+ * @fsbist: Full-Speed BIST Enable.
+ * @hsbist: High-Speed BIST Enable.
+ * @bist_done: PHY Bist Done.
+ * Asserted at the end of the PHY BIST sequence.
+ * @bist_err: PHY Bist Error.
+ * Indicates an internal error was detected during
+ * the BIST sequence.
+ * @tdata_out: PHY Test Data Out.
+ * Presents either internaly generated signals or
+ * test register contents, based upon the value of
+ * test_data_out_sel.
+ * @siddq: Drives the USBP (USB-PHY) SIDDQ input.
+ * Normally should be set to zero.
+ * When customers have no intent to use USB PHY
+ * interface, they should:
+ * - still provide 3.3V to USB_VDD33, and
+ * - tie USB_REXT to 3.3V supply, and
+ * - set USBN*_USBP_CTL_STATUS[SIDDQ]=1
+ * @txpreemphasistune: HS Transmitter Pre-Emphasis Enable
+ * @dma_bmode: When set to 1 the L2C DMA address will be updated
+ * with byte-counts between packets. When set to 0
+ * the L2C DMA address is incremented to the next
+ * 4-byte aligned address after adding byte-count.
+ * @usbc_end: Bigendian input to the USB Core. This should be
+ * set to '0' for operation.
+ * @usbp_bist: PHY, This is cleared '0' to run BIST on the USBP.
+ * @tclk: PHY Test Clock, used to load TDATA_IN to the USBP.
+ * @dp_pulld: PHY DP_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D+ line. '1' pull down-resistance is connected
+ * to D+/ '0' pull down resistance is not connected
+ * to D+. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @dm_pulld: PHY DM_PULLDOWN input to the USB-PHY.
+ * This signal enables the pull-down resistance on
+ * the D- line. '1' pull down-resistance is connected
+ * to D-. '0' pull down resistance is not connected
+ * to D-. When an A/B device is acting as a host
+ * (downstream-facing port), dp_pulldown and
+ * dm_pulldown are enabled. This must not toggle
+ * during normal opeartion.
+ * @hst_mode: When '0' the USB is acting as HOST, when '1'
+ * USB is acting as device. This field needs to be
+ * set while the USB is in reset.
+ * @tx_bs_enh: Transmit Bit Stuffing on [15:8].
+ * Enables or disables bit stuffing on data[15:8]
+ * when bit-stuffing is enabled.
+ * @tx_bs_en: Transmit Bit Stuffing on [7:0].
+ * Enables or disables bit stuffing on data[7:0]
+ * when bit-stuffing is enabled.
+ * @loop_enb: PHY Loopback Test Enable.
+ * '1': During data transmission the receive is
+ * enabled.
+ * '0': During data transmission the receive is
+ * disabled.
+ * Must be '0' for normal operation.
+ * @vtest_enb: Analog Test Pin Enable.
+ * '1' The PHY's analog_test pin is enabled for the
+ * input and output of applicable analog test signals.
+ * '0' THe analog_test pin is disabled.
+ * @bist_enb: Built-In Self Test Enable.
+ * Used to activate BIST in the PHY.
+ * @tdata_sel: Test Data Out Select.
+ * '1' test_data_out[3:0] (PHY) register contents
+ * are output. '0' internaly generated signals are
+ * output.
+ * @taddr_in: Mode Address for Test Interface.
+ * Specifies the register address for writing to or
+ * reading from the PHY test interface register.
+ * @tdata_in: Internal Testing Register Input Data and Select
+ * This is a test bus. Data is present on [3:0],
+ * and its corresponding select (enable) is present
+ * on bits [7:4].
+ * @ate_reset: Reset input from automatic test equipment.
+ * This is a test signal. When the USB Core is
+ * powered up (not in Susned Mode), an automatic
+ * tester can use this to disable phy_clock and
+ * free_clk, then re-eanable them with an aligned
+ * phase.
+ * '1': The phy_clk and free_clk outputs are
+ * disabled. "0": The phy_clock and free_clk outputs
+ * are available within a specific period after the
+ * de-assertion.
+ */
+ struct cvmx_usbnx_usbp_ctl_status_cn52xx {
+ uint64_t txrisetune : 1;
+ uint64_t txvreftune : 4;
+ uint64_t txfslstune : 4;
+ uint64_t txhsxvtune : 2;
+ uint64_t sqrxtune : 3;
+ uint64_t compdistune : 3;
+ uint64_t otgtune : 3;
+ uint64_t otgdisable : 1;
+ uint64_t portreset : 1;
+ uint64_t drvvbus : 1;
+ uint64_t lsbist : 1;
+ uint64_t fsbist : 1;
+ uint64_t hsbist : 1;
+ uint64_t bist_done : 1;
+ uint64_t bist_err : 1;
+ uint64_t tdata_out : 4;
+ uint64_t siddq : 1;
+ uint64_t txpreemphasistune : 1;
+ uint64_t dma_bmode : 1;
+ uint64_t usbc_end : 1;
+ uint64_t usbp_bist : 1;
+ uint64_t tclk : 1;
+ uint64_t dp_pulld : 1;
+ uint64_t dm_pulld : 1;
+ uint64_t hst_mode : 1;
+ uint64_t reserved_19_22 : 4;
+ uint64_t tx_bs_enh : 1;
+ uint64_t tx_bs_en : 1;
+ uint64_t loop_enb : 1;
+ uint64_t vtest_enb : 1;
+ uint64_t bist_enb : 1;
+ uint64_t tdata_sel : 1;
+ uint64_t taddr_in : 4;
+ uint64_t tdata_in : 8;
+ uint64_t ate_reset : 1;
+ } cn52xx;
+};
+
+#endif
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
new file mode 100644
index 0000000..5dbbd14
--- /dev/null
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -0,0 +1,835 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+
+#include <linux/time.h>
+#include <linux/delay.h>
+
+#include <asm/octeon/cvmx.h>
+#include "cvmx-usb.h"
+#include <asm/octeon/cvmx-iob-defs.h>
+
+#include <linux/usb/hcd.h>
+
+#include <linux/err.h>
+
+struct octeon_hcd {
+ spinlock_t lock;
+ struct cvmx_usb_state usb;
+ struct tasklet_struct dequeue_tasklet;
+ struct list_head dequeue_list;
+};
+
+/* convert between an HCD pointer and the corresponding struct octeon_hcd */
+static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
+{
+ return (struct octeon_hcd *)(hcd->hcd_priv);
+}
+
+static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
+{
+ return container_of((void *)p, struct usb_hcd, hcd_priv);
+}
+
+static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
+{
+ return container_of(p, struct octeon_hcd, usb);
+}
+
+static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_poll(&priv->usb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void octeon_usb_port_callback(struct cvmx_usb_state *usb,
+ enum cvmx_usb_callback reason,
+ enum cvmx_usb_complete status,
+ int pipe_handle,
+ int submit_handle,
+ int bytes_transferred,
+ void *user_data)
+{
+ struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
+
+ spin_unlock(&priv->lock);
+ usb_hcd_poll_rh_status(octeon_to_hcd(priv));
+ spin_lock(&priv->lock);
+}
+
+static int octeon_usb_start(struct usb_hcd *hcd)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ unsigned long flags;
+
+ hcd->state = HC_STATE_RUNNING;
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_register_callback(&priv->usb, CVMX_USB_CALLBACK_PORT_CHANGED,
+ octeon_usb_port_callback, NULL);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+}
+
+static void octeon_usb_stop(struct usb_hcd *hcd)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_register_callback(&priv->usb, CVMX_USB_CALLBACK_PORT_CHANGED,
+ NULL, NULL);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ hcd->state = HC_STATE_HALT;
+}
+
+static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+
+ return cvmx_usb_get_frame_number(&priv->usb);
+}
+
+static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
+ enum cvmx_usb_callback reason,
+ enum cvmx_usb_complete status,
+ int pipe_handle,
+ int submit_handle,
+ int bytes_transferred,
+ void *user_data)
+{
+ struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
+ struct usb_hcd *hcd = octeon_to_hcd(priv);
+ struct device *dev = hcd->self.controller;
+ struct urb *urb = user_data;
+
+ urb->actual_length = bytes_transferred;
+ urb->hcpriv = NULL;
+
+ if (!list_empty(&urb->urb_list)) {
+ /*
+ * It is on the dequeue_list, but we are going to call
+ * usb_hcd_giveback_urb(), so we must clear it from
+ * the list. We got to it before the
+ * octeon_usb_urb_dequeue_work() tasklet did.
+ */
+ list_del(&urb->urb_list);
+ /* No longer on the dequeue_list. */
+ INIT_LIST_HEAD(&urb->urb_list);
+ }
+
+ /* For Isochronous transactions we need to update the URB packet status
+ list from data in our private copy */
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ /*
+ * The pointer to the private list is stored in the setup_packet
+ * field.
+ */
+ struct cvmx_usb_iso_packet *iso_packet =
+ (struct cvmx_usb_iso_packet *) urb->setup_packet;
+ /* Recalculate the transfer size by adding up each packet */
+ urb->actual_length = 0;
+ for (i = 0; i < urb->number_of_packets; i++) {
+ if (iso_packet[i].status == CVMX_USB_COMPLETE_SUCCESS) {
+ urb->iso_frame_desc[i].status = 0;
+ urb->iso_frame_desc[i].actual_length = iso_packet[i].length;
+ urb->actual_length += urb->iso_frame_desc[i].actual_length;
+ } else {
+ dev_dbg(dev, "ISOCHRONOUS packet=%d of %d status=%d pipe=%d submit=%d size=%d\n",
+ i, urb->number_of_packets,
+ iso_packet[i].status, pipe_handle,
+ submit_handle, iso_packet[i].length);
+ urb->iso_frame_desc[i].status = -EREMOTEIO;
+ }
+ }
+ /* Free the private list now that we don't need it anymore */
+ kfree(iso_packet);
+ urb->setup_packet = NULL;
+ }
+
+ switch (status) {
+ case CVMX_USB_COMPLETE_SUCCESS:
+ urb->status = 0;
+ break;
+ case CVMX_USB_COMPLETE_CANCEL:
+ if (urb->status == 0)
+ urb->status = -ENOENT;
+ break;
+ case CVMX_USB_COMPLETE_STALL:
+ dev_dbg(dev, "status=stall pipe=%d submit=%d size=%d\n",
+ pipe_handle, submit_handle, bytes_transferred);
+ urb->status = -EPIPE;
+ break;
+ case CVMX_USB_COMPLETE_BABBLEERR:
+ dev_dbg(dev, "status=babble pipe=%d submit=%d size=%d\n",
+ pipe_handle, submit_handle, bytes_transferred);
+ urb->status = -EPIPE;
+ break;
+ case CVMX_USB_COMPLETE_SHORT:
+ dev_dbg(dev, "status=short pipe=%d submit=%d size=%d\n",
+ pipe_handle, submit_handle, bytes_transferred);
+ urb->status = -EREMOTEIO;
+ break;
+ case CVMX_USB_COMPLETE_ERROR:
+ case CVMX_USB_COMPLETE_XACTERR:
+ case CVMX_USB_COMPLETE_DATATGLERR:
+ case CVMX_USB_COMPLETE_FRAMEERR:
+ dev_dbg(dev, "status=%d pipe=%d submit=%d size=%d\n",
+ status, pipe_handle, submit_handle, bytes_transferred);
+ urb->status = -EPROTO;
+ break;
+ }
+ spin_unlock(&priv->lock);
+ usb_hcd_giveback_urb(octeon_to_hcd(priv), urb, urb->status);
+ spin_lock(&priv->lock);
+}
+
+static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct device *dev = hcd->self.controller;
+ int submit_handle = -1;
+ int pipe_handle;
+ unsigned long flags;
+ struct cvmx_usb_iso_packet *iso_packet;
+ struct usb_host_endpoint *ep = urb->ep;
+
+ urb->status = 0;
+ INIT_LIST_HEAD(&urb->urb_list); /* not enqueued on dequeue_list */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (!ep->hcpriv) {
+ enum cvmx_usb_transfer transfer_type;
+ enum cvmx_usb_speed speed;
+ int split_device = 0;
+ int split_port = 0;
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_ISOCHRONOUS:
+ transfer_type = CVMX_USB_TRANSFER_ISOCHRONOUS;
+ break;
+ case PIPE_INTERRUPT:
+ transfer_type = CVMX_USB_TRANSFER_INTERRUPT;
+ break;
+ case PIPE_CONTROL:
+ transfer_type = CVMX_USB_TRANSFER_CONTROL;
+ break;
+ default:
+ transfer_type = CVMX_USB_TRANSFER_BULK;
+ break;
+ }
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ speed = CVMX_USB_SPEED_LOW;
+ break;
+ case USB_SPEED_FULL:
+ speed = CVMX_USB_SPEED_FULL;
+ break;
+ default:
+ speed = CVMX_USB_SPEED_HIGH;
+ break;
+ }
+ /*
+ * For slow devices on high speed ports we need to find the hub
+ * that does the speed translation so we know where to send the
+ * split transactions.
+ */
+ if (speed != CVMX_USB_SPEED_HIGH) {
+ /*
+ * Start at this device and work our way up the usb
+ * tree.
+ */
+ struct usb_device *dev = urb->dev;
+ while (dev->parent) {
+ /*
+ * If our parent is high speed then he'll
+ * receive the splits.
+ */
+ if (dev->parent->speed == USB_SPEED_HIGH) {
+ split_device = dev->parent->devnum;
+ split_port = dev->portnum;
+ break;
+ }
+ /*
+ * Move up the tree one level. If we make it all
+ * the way up the tree, then the port must not
+ * be in high speed mode and we don't need a
+ * split.
+ */
+ dev = dev->parent;
+ }
+ }
+ pipe_handle = cvmx_usb_open_pipe(&priv->usb,
+ 0,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ speed,
+ le16_to_cpu(ep->desc.wMaxPacketSize) & 0x7ff,
+ transfer_type,
+ usb_pipein(urb->pipe) ? CVMX_USB_DIRECTION_IN : CVMX_USB_DIRECTION_OUT,
+ urb->interval,
+ (le16_to_cpu(ep->desc.wMaxPacketSize) >> 11) & 0x3,
+ split_device,
+ split_port);
+ if (pipe_handle < 0) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_dbg(dev, "Failed to create pipe\n");
+ return -ENOMEM;
+ }
+ ep->hcpriv = (void *)(0x10000L + pipe_handle);
+ } else {
+ pipe_handle = 0xffff & (long)ep->hcpriv;
+ }
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_ISOCHRONOUS:
+ dev_dbg(dev, "Submit isochronous to %d.%d\n",
+ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ /*
+ * Allocate a structure to use for our private list of
+ * isochronous packets.
+ */
+ iso_packet = kmalloc(urb->number_of_packets *
+ sizeof(struct cvmx_usb_iso_packet),
+ GFP_ATOMIC);
+ if (iso_packet) {
+ int i;
+ /* Fill the list with the data from the URB */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ iso_packet[i].offset = urb->iso_frame_desc[i].offset;
+ iso_packet[i].length = urb->iso_frame_desc[i].length;
+ iso_packet[i].status = CVMX_USB_COMPLETE_ERROR;
+ }
+ /*
+ * Store a pointer to the list in the URB setup_packet
+ * field. We know this currently isn't being used and
+ * this saves us a bunch of logic.
+ */
+ urb->setup_packet = (char *)iso_packet;
+ submit_handle = cvmx_usb_submit_isochronous(&priv->usb, pipe_handle,
+ urb->start_frame,
+ 0 /* flags */ ,
+ urb->number_of_packets,
+ iso_packet,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ octeon_usb_urb_complete_callback,
+ urb);
+ /*
+ * If submit failed we need to free our private packet
+ * list.
+ */
+ if (submit_handle < 0) {
+ urb->setup_packet = NULL;
+ kfree(iso_packet);
+ }
+ }
+ break;
+ case PIPE_INTERRUPT:
+ dev_dbg(dev, "Submit interrupt to %d.%d\n",
+ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ submit_handle = cvmx_usb_submit_interrupt(&priv->usb, pipe_handle,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ octeon_usb_urb_complete_callback,
+ urb);
+ break;
+ case PIPE_CONTROL:
+ dev_dbg(dev, "Submit control to %d.%d\n",
+ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ submit_handle = cvmx_usb_submit_control(&priv->usb, pipe_handle,
+ urb->setup_dma,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ octeon_usb_urb_complete_callback,
+ urb);
+ break;
+ case PIPE_BULK:
+ dev_dbg(dev, "Submit bulk to %d.%d\n",
+ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe));
+ submit_handle = cvmx_usb_submit_bulk(&priv->usb, pipe_handle,
+ urb->transfer_dma,
+ urb->transfer_buffer_length,
+ octeon_usb_urb_complete_callback,
+ urb);
+ break;
+ }
+ if (submit_handle < 0) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_dbg(dev, "Failed to submit\n");
+ return -ENOMEM;
+ }
+ urb->hcpriv = (void *)(long)(((submit_handle & 0xffff) << 16) | pipe_handle);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+}
+
+static void octeon_usb_urb_dequeue_work(unsigned long arg)
+{
+ unsigned long flags;
+ struct octeon_hcd *priv = (struct octeon_hcd *)arg;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ while (!list_empty(&priv->dequeue_list)) {
+ int pipe_handle;
+ int submit_handle;
+ struct urb *urb = container_of(priv->dequeue_list.next, struct urb, urb_list);
+ list_del(&urb->urb_list);
+ /* not enqueued on dequeue_list */
+ INIT_LIST_HEAD(&urb->urb_list);
+ pipe_handle = 0xffff & (long)urb->hcpriv;
+ submit_handle = ((long)urb->hcpriv) >> 16;
+ cvmx_usb_cancel(&priv->usb, pipe_handle, submit_handle);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ unsigned long flags;
+
+ if (!urb->dev)
+ return -EINVAL;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ urb->status = status;
+ list_add_tail(&urb->urb_list, &priv->dequeue_list);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ tasklet_schedule(&priv->dequeue_tasklet);
+
+ return 0;
+}
+
+static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct device *dev = hcd->self.controller;
+
+ if (ep->hcpriv) {
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ int pipe_handle = 0xffff & (long)ep->hcpriv;
+ unsigned long flags;
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_cancel_all(&priv->usb, pipe_handle);
+ if (cvmx_usb_close_pipe(&priv->usb, pipe_handle))
+ dev_dbg(dev, "Closing pipe %d failed\n", pipe_handle);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ep->hcpriv = NULL;
+ }
+}
+
+static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct cvmx_usb_port_status port_status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ port_status = cvmx_usb_get_status(&priv->usb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ buf[0] = 0;
+ buf[0] = port_status.connect_change << 1;
+
+ return (buf[0] != 0);
+}
+
+static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct device *dev = hcd->self.controller;
+ struct cvmx_usb_port_status usb_port_status;
+ int port_status;
+ struct usb_hub_descriptor *desc;
+ unsigned long flags;
+
+ switch (typeReq) {
+ case ClearHubFeature:
+ dev_dbg(dev, "ClearHubFeature\n");
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* Nothing required here */
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case ClearPortFeature:
+ dev_dbg(dev, "ClearPortFeature\n");
+ if (wIndex != 1) {
+ dev_dbg(dev, " INVALID\n");
+ return -EINVAL;
+ }
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ dev_dbg(dev, " ENABLE\n");
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_disable(&priv->usb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(dev, " SUSPEND\n");
+ /* Not supported on Octeon */
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(dev, " POWER\n");
+ /* Not supported on Octeon */
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ dev_dbg(dev, " INDICATOR\n");
+ /* Port inidicator not supported */
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ dev_dbg(dev, " C_CONNECTION\n");
+ /* Clears drivers internal connect status change flag */
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_set_status(&priv->usb, cvmx_usb_get_status(&priv->usb));
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ dev_dbg(dev, " C_RESET\n");
+ /*
+ * Clears the driver's internal Port Reset Change flag.
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_set_status(&priv->usb, cvmx_usb_get_status(&priv->usb));
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ dev_dbg(dev, " C_ENABLE\n");
+ /*
+ * Clears the driver's internal Port Enable/Disable
+ * Change flag.
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_set_status(&priv->usb, cvmx_usb_get_status(&priv->usb));
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ dev_dbg(dev, " C_SUSPEND\n");
+ /*
+ * Clears the driver's internal Port Suspend Change
+ * flag, which is set when resume signaling on the host
+ * port is complete.
+ */
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(dev, " C_OVER_CURRENT\n");
+ /* Clears the driver's overcurrent Change flag */
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_set_status(&priv->usb, cvmx_usb_get_status(&priv->usb));
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ default:
+ dev_dbg(dev, " UNKNOWN\n");
+ return -EINVAL;
+ }
+ break;
+ case GetHubDescriptor:
+ dev_dbg(dev, "GetHubDescriptor\n");
+ desc = (struct usb_hub_descriptor *)buf;
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = 0x08;
+ desc->bPwrOn2PwrGood = 1;
+ desc->bHubContrCurrent = 0;
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
+ break;
+ case GetHubStatus:
+ dev_dbg(dev, "GetHubStatus\n");
+ *(__le32 *) buf = 0;
+ break;
+ case GetPortStatus:
+ dev_dbg(dev, "GetPortStatus\n");
+ if (wIndex != 1) {
+ dev_dbg(dev, " INVALID\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+ usb_port_status = cvmx_usb_get_status(&priv->usb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ port_status = 0;
+
+ if (usb_port_status.connect_change) {
+ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
+ dev_dbg(dev, " C_CONNECTION\n");
+ }
+
+ if (usb_port_status.port_enabled) {
+ port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
+ dev_dbg(dev, " C_ENABLE\n");
+ }
+
+ if (usb_port_status.connected) {
+ port_status |= (1 << USB_PORT_FEAT_CONNECTION);
+ dev_dbg(dev, " CONNECTION\n");
+ }
+
+ if (usb_port_status.port_enabled) {
+ port_status |= (1 << USB_PORT_FEAT_ENABLE);
+ dev_dbg(dev, " ENABLE\n");
+ }
+
+ if (usb_port_status.port_over_current) {
+ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
+ dev_dbg(dev, " OVER_CURRENT\n");
+ }
+
+ if (usb_port_status.port_powered) {
+ port_status |= (1 << USB_PORT_FEAT_POWER);
+ dev_dbg(dev, " POWER\n");
+ }
+
+ if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH) {
+ port_status |= USB_PORT_STAT_HIGH_SPEED;
+ dev_dbg(dev, " HIGHSPEED\n");
+ } else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW) {
+ port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
+ dev_dbg(dev, " LOWSPEED\n");
+ }
+
+ *((__le32 *) buf) = cpu_to_le32(port_status);
+ break;
+ case SetHubFeature:
+ dev_dbg(dev, "SetHubFeature\n");
+ /* No HUB features supported */
+ break;
+ case SetPortFeature:
+ dev_dbg(dev, "SetPortFeature\n");
+ if (wIndex != 1) {
+ dev_dbg(dev, " INVALID\n");
+ return -EINVAL;
+ }
+
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(dev, " SUSPEND\n");
+ return -EINVAL;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(dev, " POWER\n");
+ return -EINVAL;
+ case USB_PORT_FEAT_RESET:
+ dev_dbg(dev, " RESET\n");
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_disable(&priv->usb);
+ if (cvmx_usb_enable(&priv->usb))
+ dev_dbg(dev, "Failed to enable the port\n");
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+ case USB_PORT_FEAT_INDICATOR:
+ dev_dbg(dev, " INDICATOR\n");
+ /* Not supported */
+ break;
+ default:
+ dev_dbg(dev, " UNKNOWN\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_dbg(dev, "Unknown root hub request\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+static const struct hc_driver octeon_hc_driver = {
+ .description = "Octeon USB",
+ .product_desc = "Octeon Host Controller",
+ .hcd_priv_size = sizeof(struct octeon_hcd),
+ .irq = octeon_usb_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+ .start = octeon_usb_start,
+ .stop = octeon_usb_stop,
+ .urb_enqueue = octeon_usb_urb_enqueue,
+ .urb_dequeue = octeon_usb_urb_dequeue,
+ .endpoint_disable = octeon_usb_endpoint_disable,
+ .get_frame_number = octeon_usb_get_frame_number,
+ .hub_status_data = octeon_usb_hub_status_data,
+ .hub_control = octeon_usb_hub_control,
+};
+
+
+static int octeon_usb_driver_probe(struct device *dev)
+{
+ int status;
+ int usb_num = to_platform_device(dev)->id;
+ int irq = platform_get_irq(to_platform_device(dev), 0);
+ struct octeon_hcd *priv;
+ struct usb_hcd *hcd;
+ unsigned long flags;
+
+ /*
+ * Set the DMA mask to 64bits so we get buffers already translated for
+ * DMA.
+ */
+ dev->coherent_dma_mask = ~0;
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
+ if (!hcd) {
+ dev_dbg(dev, "Failed to allocate memory for HCD\n");
+ return -1;
+ }
+ hcd->uses_new_polling = 1;
+ priv = (struct octeon_hcd *)hcd->hcd_priv;
+
+ spin_lock_init(&priv->lock);
+
+ tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
+ INIT_LIST_HEAD(&priv->dequeue_list);
+
+ status = cvmx_usb_initialize(&priv->usb, usb_num, CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO);
+ if (status) {
+ dev_dbg(dev, "USB initialization failed with %d\n", status);
+ kfree(hcd);
+ return -1;
+ }
+
+ /* This delay is needed for CN3010, but I don't know why... */
+ mdelay(10);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ cvmx_usb_poll(&priv->usb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ status = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (status) {
+ dev_dbg(dev, "USB add HCD failed with %d\n", status);
+ kfree(hcd);
+ return -1;
+ }
+
+ dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
+
+ return 0;
+}
+
+static int octeon_usb_driver_remove(struct device *dev)
+{
+ int status;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ unsigned long flags;
+
+ usb_remove_hcd(hcd);
+ tasklet_kill(&priv->dequeue_tasklet);
+ spin_lock_irqsave(&priv->lock, flags);
+ status = cvmx_usb_shutdown(&priv->usb);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ if (status)
+ dev_dbg(dev, "USB shutdown failed with %d\n", status);
+
+ kfree(hcd);
+
+ return 0;
+}
+
+static struct device_driver octeon_usb_driver = {
+ .name = "OcteonUSB",
+ .bus = &platform_bus_type,
+ .probe = octeon_usb_driver_probe,
+ .remove = octeon_usb_driver_remove,
+};
+
+
+#define MAX_USB_PORTS 10
+static struct platform_device *pdev_glob[MAX_USB_PORTS];
+static int octeon_usb_registered;
+static int __init octeon_usb_module_init(void)
+{
+ int num_devices = cvmx_usb_get_num_ports();
+ int device;
+
+ if (usb_disabled() || num_devices == 0)
+ return -ENODEV;
+
+ if (driver_register(&octeon_usb_driver))
+ return -ENOMEM;
+
+ octeon_usb_registered = 1;
+
+ /*
+ * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
+ * IOB priority registers. Under heavy network load USB
+ * hardware can be starved by the IOB causing a crash. Give
+ * it a priority boost if it has been waiting more than 400
+ * cycles to avoid this situation.
+ *
+ * Testing indicates that a cnt_val of 8192 is not sufficient,
+ * but no failures are seen with 4096. We choose a value of
+ * 400 to give a safety factor of 10.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
+
+ pri_cnt.u64 = 0;
+ pri_cnt.s.cnt_enb = 1;
+ pri_cnt.s.cnt_val = 400;
+ cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
+ }
+
+ for (device = 0; device < num_devices; device++) {
+ struct resource irq_resource;
+ struct platform_device *pdev;
+ memset(&irq_resource, 0, sizeof(irq_resource));
+ irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1;
+ irq_resource.end = irq_resource.start;
+ irq_resource.flags = IORESOURCE_IRQ;
+ pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1);
+ if (IS_ERR(pdev)) {
+ driver_unregister(&octeon_usb_driver);
+ octeon_usb_registered = 0;
+ return PTR_ERR(pdev);
+ }
+ if (device < MAX_USB_PORTS)
+ pdev_glob[device] = pdev;
+
+ }
+ return 0;
+}
+
+static void __exit octeon_usb_module_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_USB_PORTS; i++)
+ if (pdev_glob[i]) {
+ platform_device_unregister(pdev_glob[i]);
+ pdev_glob[i] = NULL;
+ }
+ if (octeon_usb_registered)
+ driver_unregister(&octeon_usb_driver);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
+MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver.");
+module_init(octeon_usb_module_init);
+module_exit(octeon_usb_module_cleanup);
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 9493128..6e1d5f8 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,6 +1,6 @@
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
- depends on CPU_CAVIUM_OCTEON && NETDEVICES
+ depends on CAVIUM_OCTEON_SOC && NETDEVICES
select PHYLIB
select MDIO_OCTEON
help
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 78b6cb7..199059d 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -48,13 +48,8 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
while (freed) {
struct sk_buff *skb = dev_alloc_skb(size + 256);
- if (unlikely(skb == NULL)) {
- pr_warning
- ("Failed to allocate skb for hardware pool %d\n",
- pool);
+ if (unlikely(skb == NULL))
break;
- }
-
skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
cvmx_fpa_free(skb->data, pool, DONT_WRITEBACK(size / 128));
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index d8f5f69..ea53af3 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -373,9 +373,7 @@ int cvm_oct_rgmii_init(struct net_device *dev)
* Enable interrupts on inband status changes
* for this port.
*/
- gmx_rx_int_en.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
- (index, interface));
+ gmx_rx_int_en.u64 = 0;
gmx_rx_int_en.s.phy_dupx = 1;
gmx_rx_int_en.s.phy_link = 1;
gmx_rx_int_en.s.phy_spd = 1;
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 34afc16..e14a1bb 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -303,6 +303,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (backlog > budget * cores_in_use && napi != NULL)
cvm_oct_enable_one_cpu();
}
+ rx_count++;
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
if (likely(skb_in_hw)) {
@@ -336,9 +337,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/
skb = dev_alloc_skb(work->len);
if (!skb) {
- printk_ratelimited("Port %d failed to allocate "
- "skbuff, packet dropped\n",
- work->ipprt);
cvm_oct_free_work(work);
continue;
}
@@ -429,7 +427,6 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
#endif
}
netif_receive_skb(skb);
- rx_count++;
} else {
/* Drop any packet received for a device that isn't up */
/*
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index fe40e0b..2ff015d 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -4,9 +4,14 @@ config FB_OLPC_DCON
select I2C
select BACKLIGHT_CLASS_DEVICE
---help---
- Add support for the OLPC XO DCON controller. This controller is
- only available on OLPC platforms. Unless you have one of these
- platforms, you will want to say 'N'.
+ In order to support very low power operation, the XO laptop uses a
+ secondary Display CONtroller, or DCON. This secondary controller
+ is present in the video pipeline between the primary display
+ controller (integrate into the processor or chipset) and the LCD
+ panel. It allows the main processor/display controller to be
+ completely powered off while still retaining an image on the display.
+ This controller is only available on OLPC platforms. Unless you have
+ one of these platforms, you will want to say 'N'.
config FB_OLPC_DCON_1
bool "OLPC XO-1 DCON support"
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index 35f9cda..61c2e65 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -1,16 +1,9 @@
TODO:
- - checkpatch.pl cleanups
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
- allow simultaneous XO-1 and XO-1.5 support
- - console event notifier support
- - drop global variables, use a proper olpc_dcon_priv struct
- - audit code for unnecessary code; old unsupported prototype
- workarounds, ancient variables (noaa?), etc
- - verify sane i2c API usage, update to new stuff if necessary
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
copy:
- Andres Salomon <dilinger@queued.net>
- Chris Ball <cjb@laptop.org>
- Jon Nettleton <jon.nettleton@gmail.com>
+ Daniel Drake <dsd@laptop.org>
+ Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 193e1c6..198595e 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -90,9 +90,10 @@ static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
/* SDRAM setup/hold time */
dcon_write(dcon, 0x3a, 0xc040);
- dcon_write(dcon, 0x41, 0x0000);
- dcon_write(dcon, 0x41, 0x0101);
- dcon_write(dcon, 0x42, 0x0101);
+ dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000); /* clear option bits */
+ dcon_write(dcon, DCON_REG_MEM_OPT_A,
+ MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
+ dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
/* Colour swizzle, AA, no passthrough, backlight */
if (is_init) {
@@ -121,30 +122,31 @@ err:
static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
{
unsigned long timeout;
+ u8 pm;
int x;
power_up:
if (is_powered_down) {
- x = 1;
- x = olpc_ec_cmd(0x26, (unsigned char *)&x, 1, NULL, 0);
+ pm = 1;
+ x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
if (x) {
pr_warn("unable to force dcon to power up: %d!\n", x);
return x;
}
- msleep(10); /* we'll be conservative */
+ usleep_range(10000, 11000); /* we'll be conservative */
}
pdata->bus_stabilize_wiggle();
for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
- msleep(1);
+ usleep_range(1000, 1100);
x = dcon_read(dcon, DCON_REG_ID);
}
if (x < 0) {
pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
- x = 0;
- olpc_ec_cmd(0x26, (unsigned char *)&x, 1, NULL, 0);
+ pm = 0;
+ olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
msleep(100);
is_powered_down = 1;
goto power_up; /* argh, stupid hardware.. */
@@ -207,8 +209,8 @@ static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
return;
if (sleep) {
- x = 0;
- x = olpc_ec_cmd(0x26, (unsigned char *)&x, 1, NULL, 0);
+ u8 pm = 0;
+ x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
if (x)
pr_warn("unable to force dcon to power down: %d!\n", x);
else
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 997bded..e2663b1 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -22,15 +22,24 @@
#define MODE_DEBUG (1<<14)
#define MODE_SELFTEST (1<<15)
-#define DCON_REG_HRES 2
-#define DCON_REG_HTOTAL 3
-#define DCON_REG_HSYNC_WIDTH 4
-#define DCON_REG_VRES 5
-#define DCON_REG_VTOTAL 6
-#define DCON_REG_VSYNC_WIDTH 7
-#define DCON_REG_TIMEOUT 8
-#define DCON_REG_SCAN_INT 9
-#define DCON_REG_BRIGHT 10
+#define DCON_REG_HRES 0x2
+#define DCON_REG_HTOTAL 0x3
+#define DCON_REG_HSYNC_WIDTH 0x4
+#define DCON_REG_VRES 0x5
+#define DCON_REG_VTOTAL 0x6
+#define DCON_REG_VSYNC_WIDTH 0x7
+#define DCON_REG_TIMEOUT 0x8
+#define DCON_REG_SCAN_INT 0x9
+#define DCON_REG_BRIGHT 0xa
+#define DCON_REG_MEM_OPT_A 0x41
+#define DCON_REG_MEM_OPT_B 0x42
+
+/* Load Delay Locked Loop (DLL) settings for clock delay */
+#define MEM_DLL_CLOCK_DELAY (1<<0)
+/* Memory controller power down function */
+#define MEM_POWER_DOWN (1<<8)
+/* Memory controller software reset */
+#define MEM_SOFT_RESET (1<<0)
/* Status values */
diff --git a/drivers/staging/ozwpan/Kbuild b/drivers/staging/ozwpan/Makefile
index 6cc84cb..29529c1 100644
--- a/drivers/staging/ozwpan/Kbuild
+++ b/drivers/staging/ozwpan/Makefile
@@ -2,6 +2,7 @@
# Copyright (c) 2011 Ozmo Inc
# Released under the GNU General Public License Version 2 (GPLv2).
# -----------------------------------------------------------------------------
+
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan.o
ozwpan-y := \
ozmain.o \
@@ -12,8 +13,4 @@ ozwpan-y := \
ozeltbuf.o \
ozproto.o \
ozcdev.o \
- ozurbparanoia.o \
- oztrace.o \
- ozevent.o
-
-
+ ozurbparanoia.o
diff --git a/drivers/staging/ozwpan/ozappif.h b/drivers/staging/ozwpan/ozappif.h
index 449a6ba..ea1b271 100644
--- a/drivers/staging/ozwpan/ozappif.h
+++ b/drivers/staging/ozwpan/ozappif.h
@@ -6,8 +6,6 @@
#ifndef _OZAPPIF_H
#define _OZAPPIF_H
-#include "ozeventdef.h"
-
#define OZ_IOCTL_MAGIC 0xf4
struct oz_mac_addr {
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 27d0666..6ccb64f 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -11,17 +11,14 @@
#include <linux/etherdevice.h>
#include <linux/poll.h>
#include <linux/sched.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
-#include "oztrace.h"
#include "ozappif.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
-#include "ozevent.h"
#include "ozcdev.h"
-/*------------------------------------------------------------------------------
- */
+
#define OZ_RD_BUF_SZ 256
struct oz_cdev {
dev_t devnum;
@@ -41,16 +38,17 @@ struct oz_serial_ctx {
int rd_in;
int rd_out;
};
-/*------------------------------------------------------------------------------
- */
+
static struct oz_cdev g_cdev;
static struct class *g_oz_class;
-/*------------------------------------------------------------------------------
+
+/*
* Context: process and softirq
*/
static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
{
struct oz_serial_ctx *ctx;
+
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx)
@@ -58,37 +56,40 @@ static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
return ctx;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
{
if (atomic_dec_and_test(&ctx->ref_count)) {
- oz_trace("Dealloc serial context.\n");
+ oz_dbg(ON, "Dealloc serial context\n");
kfree(ctx);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_cdev_open(struct inode *inode, struct file *filp)
{
- struct oz_cdev *dev;
- oz_trace("oz_cdev_open()\n");
- oz_trace("major = %d minor = %d\n", imajor(inode), iminor(inode));
- dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
+ struct oz_cdev *dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
+
+ oz_dbg(ON, "major = %d minor = %d\n", imajor(inode), iminor(inode));
+
filp->private_data = dev;
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_cdev_release(struct inode *inode, struct file *filp)
{
- oz_trace("oz_cdev_release()\n");
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
@@ -140,7 +141,8 @@ out2:
oz_pd_put(pd);
return count;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
@@ -159,7 +161,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
if (pd == NULL)
- return -1;
+ return -ENXIO;
+ if (!(pd->state & OZ_PD_S_CONNECTED))
+ return -EAGAIN;
eb = &pd->elt_buff;
ei = oz_elt_info_alloc(eb);
if (ei == NULL) {
@@ -197,7 +201,8 @@ out:
oz_pd_put(pd);
return count;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_set_active_pd(const u8 *addr)
@@ -205,6 +210,7 @@ static int oz_set_active_pd(const u8 *addr)
int rc = 0;
struct oz_pd *pd;
struct oz_pd *old_pd;
+
pd = oz_pd_find(addr);
if (pd) {
spin_lock_bh(&g_cdev.lock);
@@ -230,13 +236,15 @@ static int oz_set_active_pd(const u8 *addr)
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc = 0;
+
if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
@@ -252,7 +260,7 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
switch (cmd) {
case OZ_IOCTL_GET_PD_LIST: {
struct oz_pd_list list;
- oz_trace("OZ_IOCTL_GET_PD_LIST\n");
+ oz_dbg(ON, "OZ_IOCTL_GET_PD_LIST\n");
memset(&list, 0, sizeof(list));
list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
if (copy_to_user((void __user *)arg, &list,
@@ -262,7 +270,7 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
break;
case OZ_IOCTL_SET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
- oz_trace("OZ_IOCTL_SET_ACTIVE_PD\n");
+ oz_dbg(ON, "OZ_IOCTL_SET_ACTIVE_PD\n");
if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
return -EFAULT;
rc = oz_set_active_pd(addr);
@@ -270,7 +278,7 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
break;
case OZ_IOCTL_GET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
- oz_trace("OZ_IOCTL_GET_ACTIVE_PD\n");
+ oz_dbg(ON, "OZ_IOCTL_GET_ACTIVE_PD\n");
spin_lock_bh(&g_cdev.lock);
memcpy(addr, g_cdev.active_addr, ETH_ALEN);
spin_unlock_bh(&g_cdev.lock);
@@ -296,14 +304,16 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
{
unsigned int ret = 0;
struct oz_cdev *dev = filp->private_data;
- oz_trace("Poll called wait = %p\n", wait);
+
+ oz_dbg(ON, "Poll called wait = %p\n", wait);
spin_lock_bh(&dev->lock);
if (dev->active_pd) {
struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
@@ -318,7 +328,8 @@ static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &dev->rdq, wait);
return ret;
}
-/*------------------------------------------------------------------------------
+
+/*
*/
static const struct file_operations oz_fops = {
.owner = THIS_MODULE,
@@ -329,19 +340,21 @@ static const struct file_operations oz_fops = {
.unlocked_ioctl = oz_cdev_ioctl,
.poll = oz_cdev_poll
};
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_cdev_register(void)
{
int err;
struct device *dev;
+
memset(&g_cdev, 0, sizeof(g_cdev));
err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
if (err < 0)
- goto out3;
- oz_trace("Alloc dev number %d:%d\n", MAJOR(g_cdev.devnum),
- MINOR(g_cdev.devnum));
+ return err;
+ oz_dbg(ON, "Alloc dev number %d:%d\n",
+ MAJOR(g_cdev.devnum), MINOR(g_cdev.devnum));
cdev_init(&g_cdev.cdev, &oz_fops);
g_cdev.cdev.owner = THIS_MODULE;
g_cdev.cdev.ops = &oz_fops;
@@ -349,28 +362,31 @@ int oz_cdev_register(void)
init_waitqueue_head(&g_cdev.rdq);
err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
if (err < 0) {
- oz_trace("Failed to add cdev\n");
- goto out2;
+ oz_dbg(ON, "Failed to add cdev\n");
+ goto unregister;
}
g_oz_class = class_create(THIS_MODULE, "ozmo_wpan");
if (IS_ERR(g_oz_class)) {
- oz_trace("Failed to register ozmo_wpan class\n");
- goto out1;
+ oz_dbg(ON, "Failed to register ozmo_wpan class\n");
+ err = PTR_ERR(g_oz_class);
+ goto delete;
}
dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL, "ozwpan");
if (IS_ERR(dev)) {
- oz_trace("Failed to create sysfs entry for cdev\n");
- goto out1;
+ oz_dbg(ON, "Failed to create sysfs entry for cdev\n");
+ err = PTR_ERR(dev);
+ goto delete;
}
return 0;
-out1:
+
+delete:
cdev_del(&g_cdev.cdev);
-out2:
+unregister:
unregister_chrdev_region(g_cdev.devnum, 1);
-out3:
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_cdev_deregister(void)
@@ -383,33 +399,34 @@ int oz_cdev_deregister(void)
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_cdev_init(void)
{
- oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_SERIAL, NULL, 0);
oz_app_enable(OZ_APPID_SERIAL, 1);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_cdev_term(void)
{
- oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_SERIAL, NULL, 0);
oz_app_enable(OZ_APPID_SERIAL, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
int oz_cdev_start(struct oz_pd *pd, int resume)
{
struct oz_serial_ctx *ctx;
struct oz_serial_ctx *old_ctx;
- oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_SERIAL, NULL, resume);
+
if (resume) {
- oz_trace("Serial service resumed.\n");
+ oz_dbg(ON, "Serial service resumed\n");
return 0;
}
ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
@@ -431,21 +448,22 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
(memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
- oz_trace("Active PD arrived.\n");
+ oz_dbg(ON, "Active PD arrived\n");
}
spin_unlock(&g_cdev.lock);
- oz_trace("Serial service started.\n");
+ oz_dbg(ON, "Serial service started\n");
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_cdev_stop(struct oz_pd *pd, int pause)
{
struct oz_serial_ctx *ctx;
- oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_SERIAL, NULL, pause);
+
if (pause) {
- oz_trace("Serial service paused.\n");
+ oz_dbg(ON, "Serial service paused\n");
return;
}
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
@@ -462,11 +480,12 @@ void oz_cdev_stop(struct oz_pd *pd, int pause)
spin_unlock(&g_cdev.lock);
if (pd) {
oz_pd_put(pd);
- oz_trace("Active PD departed.\n");
+ oz_dbg(ON, "Active PD departed\n");
}
- oz_trace("Serial service stopped.\n");
+ oz_dbg(ON, "Serial service stopped\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
@@ -481,7 +500,7 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
ctx = oz_cdev_claim_ctx(pd);
if (ctx == NULL) {
- oz_trace("Cannot claim serial context.\n");
+ oz_dbg(ON, "Cannot claim serial context\n");
return;
}
@@ -491,8 +510,8 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
if (app_hdr->elt_seq_num != 0) {
if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) {
/* Reject duplicate element. */
- oz_trace("Duplicate element:%02x %02x\n",
- app_hdr->elt_seq_num, ctx->rx_seq_num);
+ oz_dbg(ON, "Duplicate element:%02x %02x\n",
+ app_hdr->elt_seq_num, ctx->rx_seq_num);
goto out;
}
}
@@ -505,7 +524,7 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
if (space < 0)
space += OZ_RD_BUF_SZ;
if (len > space) {
- oz_trace("Not enough space:%d %d\n", len, space);
+ oz_dbg(ON, "Not enough space:%d %d\n", len, space);
len = space;
}
ix = ctx->rd_in;
diff --git a/drivers/staging/ozwpan/ozconfig.h b/drivers/staging/ozwpan/ozconfig.h
deleted file mode 100644
index 43e6373..0000000
--- a/drivers/staging/ozwpan/ozconfig.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * ---------------------------------------------------------------------------*/
-#ifndef _OZCONFIG_H
-#define _OZCONFIG_H
-
-/* #define WANT_TRACE */
-#ifdef WANT_TRACE
-#define WANT_VERBOSE_TRACE
-#endif /* #ifdef WANT_TRACE */
-/* #define WANT_URB_PARANOIA */
-
-/* #define WANT_PRE_2_6_39 */
-#define WANT_EVENT_TRACE
-
-/* These defines determine what verbose trace is displayed. */
-#ifdef WANT_VERBOSE_TRACE
-/* #define WANT_TRACE_STREAM */
-/* #define WANT_TRACE_URB */
-/* #define WANT_TRACE_CTRL_DETAIL */
-#define WANT_TRACE_HUB
-/* #define WANT_TRACE_RX_FRAMES */
-/* #define WANT_TRACE_TX_FRAMES */
-#endif /* WANT_VERBOSE_TRACE */
-
-#endif /* _OZCONFIG_H */
diff --git a/drivers/staging/ozwpan/ozdbg.h b/drivers/staging/ozwpan/ozdbg.h
new file mode 100644
index 0000000..b86a2b7
--- /dev/null
+++ b/drivers/staging/ozwpan/ozdbg.h
@@ -0,0 +1,54 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * ---------------------------------------------------------------------------*/
+
+#ifndef _OZDBG_H
+#define _OZDBG_H
+
+#define OZ_WANT_DBG 0
+#define OZ_WANT_VERBOSE_DBG 1
+
+#define OZ_DBG_ON 0x0
+#define OZ_DBG_STREAM 0x1
+#define OZ_DBG_URB 0x2
+#define OZ_DBG_CTRL_DETAIL 0x4
+#define OZ_DBG_HUB 0x8
+#define OZ_DBG_RX_FRAMES 0x10
+#define OZ_DBG_TX_FRAMES 0x20
+
+#define OZ_DEFAULT_DBG_MASK \
+ ( \
+ /* OZ_DBG_STREAM | */ \
+ /* OZ_DBG_URB | */ \
+ /* OZ_DBG_CTRL_DETAIL | */ \
+ OZ_DBG_HUB | \
+ /* OZ_DBG_RX_FRAMES | */ \
+ /* OZ_DBG_TX_FRAMES | */ \
+ 0)
+
+extern unsigned int oz_dbg_mask;
+
+#define oz_want_dbg(mask) \
+ ((OZ_WANT_DBG && (OZ_DBG_##mask == OZ_DBG_ON)) || \
+ (OZ_WANT_VERBOSE_DBG && (OZ_DBG_##mask & oz_dbg_mask)))
+
+#define oz_dbg(mask, fmt, ...) \
+do { \
+ if (oz_want_dbg(mask)) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define oz_cdev_dbg(cdev, mask, fmt, ...) \
+do { \
+ if (oz_want_dbg(mask)) \
+ netdev_dbg((cdev)->dev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define oz_pd_dbg(pd, mask, fmt, ...) \
+do { \
+ if (oz_want_dbg(mask)) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _OZDBG_H */
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
index ac90fc7..9b86486 100644
--- a/drivers/staging/ozwpan/ozeltbuf.c
+++ b/drivers/staging/ozwpan/ozeltbuf.c
@@ -6,16 +6,15 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
-#include "oztrace.h"
-/*------------------------------------------------------------------------------
- */
+
#define OZ_ELT_INFO_MAGIC_USED 0x35791057
#define OZ_ELT_INFO_MAGIC_FREE 0x78940102
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
int oz_elt_buf_init(struct oz_elt_buf *buf)
@@ -28,13 +27,15 @@ int oz_elt_buf_init(struct oz_elt_buf *buf)
spin_lock_init(&buf->lock);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_elt_buf_term(struct oz_elt_buf *buf)
{
struct list_head *e;
int i;
+
/* Free any elements in the order or isoc lists. */
for (i = 0; i < 2; i++) {
struct list_head *list;
@@ -59,12 +60,14 @@ void oz_elt_buf_term(struct oz_elt_buf *buf)
}
buf->free_elts = 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
{
- struct oz_elt_info *ei = NULL;
+ struct oz_elt_info *ei;
+
spin_lock_bh(&buf->lock);
if (buf->free_elts && buf->elt_pool) {
ei = container_of(buf->elt_pool, struct oz_elt_info, link);
@@ -72,8 +75,8 @@ struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
buf->free_elts--;
spin_unlock_bh(&buf->lock);
if (ei->magic != OZ_ELT_INFO_MAGIC_FREE) {
- oz_trace("oz_elt_info_alloc: ei with bad magic: 0x%x\n",
- ei->magic);
+ oz_dbg(ON, "%s: ei with bad magic: 0x%x\n",
+ __func__, ei->magic);
}
} else {
spin_unlock_bh(&buf->lock);
@@ -91,7 +94,8 @@ struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
}
return ei;
}
-/*------------------------------------------------------------------------------
+
+/*
* Precondition: oz_elt_buf.lock must be held.
* Context: softirq or process
*/
@@ -104,18 +108,19 @@ void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei)
buf->elt_pool = &ei->link;
ei->magic = OZ_ELT_INFO_MAGIC_FREE;
} else {
- oz_trace("oz_elt_info_free: bad magic ei: %p"
- " magic: 0x%x\n",
- ei, ei->magic);
+ oz_dbg(ON, "%s: bad magic ei: %p magic: 0x%x\n",
+ __func__, ei, ei->magic);
}
}
}
+
/*------------------------------------------------------------------------------
* Context: softirq
*/
void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
{
struct list_head *e;
+
e = list->next;
spin_lock_bh(&buf->lock);
while (e != list) {
@@ -126,13 +131,12 @@ void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
}
spin_unlock_bh(&buf->lock);
}
-/*------------------------------------------------------------------------------
- */
+
int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
{
struct oz_elt_stream *st;
- oz_trace("oz_elt_stream_create(0x%x)\n", id);
+ oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
if (st == NULL)
@@ -146,13 +150,13 @@ int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
spin_unlock_bh(&buf->lock);
return 0;
}
-/*------------------------------------------------------------------------------
- */
+
int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
{
struct list_head *e;
struct oz_elt_stream *st = NULL;
- oz_trace("oz_elt_stream_delete(0x%x)\n", id);
+
+ oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
spin_lock_bh(&buf->lock);
e = buf->stream_list.next;
while (e != &buf->stream_list) {
@@ -175,9 +179,8 @@ int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
list_del_init(&ei->link);
list_del_init(&ei->link_order);
st->buf_count -= ei->length;
- oz_trace2(OZ_TRACE_STREAM, "Stream down: %d %d %d\n",
- st->buf_count,
- ei->length, atomic_read(&st->ref_count));
+ oz_dbg(STREAM, "Stream down: %d %d %d\n",
+ st->buf_count, ei->length, atomic_read(&st->ref_count));
oz_elt_stream_put(st);
oz_elt_info_free(buf, ei);
}
@@ -185,22 +188,21 @@ int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
oz_elt_stream_put(st);
return 0;
}
-/*------------------------------------------------------------------------------
- */
+
void oz_elt_stream_get(struct oz_elt_stream *st)
{
atomic_inc(&st->ref_count);
}
-/*------------------------------------------------------------------------------
- */
+
void oz_elt_stream_put(struct oz_elt_stream *st)
{
if (atomic_dec_and_test(&st->ref_count)) {
- oz_trace("Stream destroyed\n");
+ oz_dbg(ON, "Stream destroyed\n");
kfree(st);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Precondition: Element buffer lock must be held.
* If this function fails the caller is responsible for deallocating the elt
* info structure.
@@ -210,6 +212,7 @@ int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
{
struct oz_elt_stream *st = NULL;
struct list_head *e;
+
if (id) {
list_for_each(e, &buf->stream_list) {
st = container_of(e, struct oz_elt_stream, link);
@@ -242,8 +245,7 @@ int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
st->buf_count += ei->length;
/* Add to list in stream. */
list_add_tail(&ei->link, &st->elt_list);
- oz_trace2(OZ_TRACE_STREAM, "Stream up: %d %d\n",
- st->buf_count, ei->length);
+ oz_dbg(STREAM, "Stream up: %d %d\n", st->buf_count, ei->length);
/* Check if we have too much buffered for this stream. If so
* start dropping elements until we are back in bounds.
*/
@@ -263,8 +265,7 @@ int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
&buf->isoc_list : &buf->order_list);
return 0;
}
-/*------------------------------------------------------------------------------
- */
+
int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
unsigned max_len, struct list_head *list)
{
@@ -272,6 +273,7 @@ int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
struct list_head *e;
struct list_head *el;
struct oz_elt_info *ei;
+
spin_lock_bh(&buf->lock);
if (isoc)
el = &buf->isoc_list;
@@ -293,9 +295,8 @@ int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
list_del(&ei->link_order);
if (ei->stream) {
ei->stream->buf_count -= ei->length;
- oz_trace2(OZ_TRACE_STREAM,
- "Stream down: %d %d\n",
- ei->stream->buf_count, ei->length);
+ oz_dbg(STREAM, "Stream down: %d %d\n",
+ ei->stream->buf_count, ei->length);
oz_elt_stream_put(ei->stream);
ei->stream = NULL;
}
@@ -309,18 +310,17 @@ int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
spin_unlock_bh(&buf->lock);
return count;
}
-/*------------------------------------------------------------------------------
- */
+
int oz_are_elts_available(struct oz_elt_buf *buf)
{
return buf->order_list.next != &buf->order_list;
}
-/*------------------------------------------------------------------------------
- */
+
void oz_trim_elt_pool(struct oz_elt_buf *buf)
{
struct list_head *free = NULL;
struct list_head *e;
+
spin_lock_bh(&buf->lock);
while (buf->free_elts > buf->max_free_elts) {
e = buf->elt_pool;
diff --git a/drivers/staging/ozwpan/ozevent.c b/drivers/staging/ozwpan/ozevent.c
deleted file mode 100644
index 77e8675..0000000
--- a/drivers/staging/ozwpan/ozevent.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include "ozconfig.h"
-#ifdef WANT_EVENT_TRACE
-#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-#include "oztrace.h"
-#include "ozevent.h"
-#include "ozappif.h"
-/*------------------------------------------------------------------------------
- * Although the event mask is logically part of the oz_evtdev structure, it is
- * needed outside of this file so define it separately to avoid the need to
- * export definition of struct oz_evtdev.
- */
-u32 g_evt_mask;
-/*------------------------------------------------------------------------------
- */
-#define OZ_MAX_EVTS 2048 /* Must be power of 2 */
-struct oz_evtdev {
- struct dentry *root_dir;
- int evt_in;
- int evt_out;
- int missed_events;
- int present;
- atomic_t users;
- spinlock_t lock;
- struct oz_event evts[OZ_MAX_EVTS];
-};
-
-static struct oz_evtdev g_evtdev;
-
-/*------------------------------------------------------------------------------
- * Context: process
- */
-void oz_event_init(void)
-{
- /* Because g_evtdev is static external all fields initially zero so no
- * need to reinitialized those.
- */
- oz_trace("Event tracing initialized\n");
- spin_lock_init(&g_evtdev.lock);
- atomic_set(&g_evtdev.users, 0);
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-void oz_event_term(void)
-{
- oz_trace("Event tracing terminated\n");
-}
-/*------------------------------------------------------------------------------
- * Context: any
- */
-void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4)
-{
- unsigned long irqstate;
- int ix;
- spin_lock_irqsave(&g_evtdev.lock, irqstate);
- ix = (g_evtdev.evt_in + 1) & (OZ_MAX_EVTS - 1);
- if (ix != g_evtdev.evt_out) {
- struct oz_event *e = &g_evtdev.evts[g_evtdev.evt_in];
- e->jiffies = jiffies;
- e->evt = evt;
- e->ctx1 = ctx1;
- e->ctx2 = ctx2;
- e->ctx3 = (__u32)(unsigned long)ctx3;
- e->ctx4 = ctx4;
- g_evtdev.evt_in = ix;
- } else {
- g_evtdev.missed_events++;
- }
- spin_unlock_irqrestore(&g_evtdev.lock, irqstate);
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-#ifdef CONFIG_DEBUG_FS
-static void oz_events_clear(struct oz_evtdev *dev)
-{
- unsigned long irqstate;
- oz_trace("Clearing events\n");
- spin_lock_irqsave(&dev->lock, irqstate);
- dev->evt_in = dev->evt_out = 0;
- dev->missed_events = 0;
- spin_unlock_irqrestore(&dev->lock, irqstate);
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-static int oz_events_open(struct inode *inode, struct file *filp)
-{
- oz_trace("oz_evt_open()\n");
- oz_trace("Open flags: 0x%x\n", filp->f_flags);
- if (atomic_add_return(1, &g_evtdev.users) == 1) {
- oz_events_clear(&g_evtdev);
- return nonseekable_open(inode, filp);
- } else {
- atomic_dec(&g_evtdev.users);
- return -EBUSY;
- }
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-static int oz_events_release(struct inode *inode, struct file *filp)
-{
- oz_events_clear(&g_evtdev);
- atomic_dec(&g_evtdev.users);
- g_evt_mask = 0;
- oz_trace("oz_evt_release()\n");
- return 0;
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-static ssize_t oz_events_read(struct file *filp, char __user *buf, size_t count,
- loff_t *fpos)
-{
- struct oz_evtdev *dev = &g_evtdev;
- int rc = 0;
- int nb_evts = count / sizeof(struct oz_event);
- int n;
- int sz;
-
- n = dev->evt_in - dev->evt_out;
- if (n < 0)
- n += OZ_MAX_EVTS;
- if (nb_evts > n)
- nb_evts = n;
- if (nb_evts == 0)
- goto out;
- n = OZ_MAX_EVTS - dev->evt_out;
- if (n > nb_evts)
- n = nb_evts;
- sz = n * sizeof(struct oz_event);
- if (copy_to_user(buf, &dev->evts[dev->evt_out], sz)) {
- rc = -EFAULT;
- goto out;
- }
- if (n == nb_evts)
- goto out2;
- n = nb_evts - n;
- if (copy_to_user(buf + sz, dev->evts, n * sizeof(struct oz_event))) {
- rc = -EFAULT;
- goto out;
- }
-out2:
- dev->evt_out = (dev->evt_out + nb_evts) & (OZ_MAX_EVTS - 1);
- rc = nb_evts * sizeof(struct oz_event);
-out:
- return rc;
-}
-/*------------------------------------------------------------------------------
- */
-static const struct file_operations oz_events_fops = {
- .owner = THIS_MODULE,
- .open = oz_events_open,
- .release = oz_events_release,
- .read = oz_events_read,
-};
-/*------------------------------------------------------------------------------
- * Context: process
- */
-void oz_debugfs_init(void)
-{
- struct dentry *parent;
-
- parent = debugfs_create_dir("ozwpan", NULL);
- if (parent == NULL) {
- oz_trace("Failed to create debugfs directory ozmo\n");
- return;
- } else {
- g_evtdev.root_dir = parent;
- if (debugfs_create_file("events", S_IRUSR, parent, NULL,
- &oz_events_fops) == NULL)
- oz_trace("Failed to create file ozmo/events\n");
- if (debugfs_create_x32("event_mask", S_IRUSR | S_IWUSR, parent,
- &g_evt_mask) == NULL)
- oz_trace("Failed to create file ozmo/event_mask\n");
- }
-}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-void oz_debugfs_remove(void)
-{
- debugfs_remove_recursive(g_evtdev.root_dir);
-}
-#endif /* CONFIG_DEBUG_FS */
-#endif /* WANT_EVENT_TRACE */
diff --git a/drivers/staging/ozwpan/ozevent.h b/drivers/staging/ozwpan/ozevent.h
deleted file mode 100644
index 32f6f98..0000000
--- a/drivers/staging/ozwpan/ozevent.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZEVENT_H
-#define _OZEVENT_H
-#include "ozconfig.h"
-#include "ozeventdef.h"
-
-#ifdef WANT_EVENT_TRACE
-extern u32 g_evt_mask;
-void oz_event_init(void);
-void oz_event_term(void);
-void oz_event_log2(u8 evt, u8 ctx1, u16 ctx2, void *ctx3, unsigned ctx4);
-void oz_debugfs_init(void);
-void oz_debugfs_remove(void);
-#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4) \
- do { \
- if ((1<<(__evt)) & g_evt_mask) \
- oz_event_log2(__evt, __ctx1, __ctx2, __ctx3, __ctx4); \
- } while (0)
-
-#else
-#define oz_event_init()
-#define oz_event_term()
-#define oz_event_log(__evt, __ctx1, __ctx2, __ctx3, __ctx4)
-#define oz_debugfs_init()
-#define oz_debugfs_remove()
-#endif /* WANT_EVENT_TRACE */
-
-#endif /* _OZEVENT_H */
diff --git a/drivers/staging/ozwpan/ozeventdef.h b/drivers/staging/ozwpan/ozeventdef.h
deleted file mode 100644
index 4b93898..0000000
--- a/drivers/staging/ozwpan/ozeventdef.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZEVENTDEF_H
-#define _OZEVENTDEF_H
-
-#define OZ_EVT_RX_FRAME 0
-#define OZ_EVT_RX_PROCESS 1
-#define OZ_EVT_TX_FRAME 2
-#define OZ_EVT_TX_ISOC 3
-#define OZ_EVT_URB_SUBMIT 4
-#define OZ_EVT_URB_DONE 5
-#define OZ_EVT_URB_CANCEL 6
-#define OZ_EVT_CTRL_REQ 7
-#define OZ_EVT_CTRL_CNF 8
-#define OZ_EVT_CTRL_LOCAL 9
-#define OZ_EVT_CONNECT_REQ 10
-#define OZ_EVT_CONNECT_RSP 11
-#define OZ_EVT_EP_CREDIT 12
-#define OZ_EVT_EP_BUFFERING 13
-#define OZ_EVT_TX_ISOC_DONE 14
-#define OZ_EVT_TX_ISOC_DROP 15
-#define OZ_EVT_TIMER_CTRL 16
-#define OZ_EVT_TIMER 17
-#define OZ_EVT_PD_STATE 18
-#define OZ_EVT_SERVICE 19
-#define OZ_EVT_DEBUG 20
-
-struct oz_event {
- __u32 jiffies;
- __u8 evt;
- __u8 ctx1;
- __u16 ctx2;
- __u32 ctx3;
- __u32 ctx4;
-};
-
-#endif /* _OZEVENTDEF_H */
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index 8ac26f5..d9c43c3 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -26,32 +26,42 @@
*/
#include <linux/platform_device.h>
#include <linux/usb.h>
-#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "linux/usb/hcd.h"
#include <asm/unaligned.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozusbif.h"
-#include "oztrace.h"
#include "ozurbparanoia.h"
-#include "ozevent.h"
#include "ozhcd.h"
-/*------------------------------------------------------------------------------
+
+/*
* Number of units of buffering to capture for an isochronous IN endpoint before
* allowing data to be indicated up.
*/
-#define OZ_IN_BUFFERING_UNITS 50
+#define OZ_IN_BUFFERING_UNITS 100
+
/* Name of our platform device.
*/
#define OZ_PLAT_DEV_NAME "ozwpan"
+
/* Maximum number of free urb links that can be kept in the pool.
*/
#define OZ_MAX_LINK_POOL_SIZE 16
+
/* Get endpoint object from the containing link.
*/
#define ep_from_link(__e) container_of((__e), struct oz_endpoint, link)
-/*------------------------------------------------------------------------------
+
+/*EP0 timeout before ep0 request is again added to TX queue. (13*8 = 98mSec)
+ */
+#define EP0_TIMEOUT_COUNTER 13
+
+/* Debounce time HCD driver should wait before unregistering.
+ */
+#define OZ_HUB_DEBOUNCE_TIMEOUT 1500
+
+/*
* Used to link urbs together and also store some status information for each
* urb.
* A cache of these are kept in a pool to reduce number of calls to kmalloc.
@@ -62,16 +72,18 @@ struct oz_urb_link {
struct oz_port *port;
u8 req_id;
u8 ep_num;
- unsigned long submit_jiffies;
+ unsigned submit_counter;
};
/* Holds state information about a USB endpoint.
*/
+#define OZ_EP_BUFFER_SIZE_ISOC (1024 * 24)
+#define OZ_EP_BUFFER_SIZE_INT 512
struct oz_endpoint {
struct list_head urb_list; /* List of oz_urb_link items. */
struct list_head link; /* For isoc ep, links in to isoc
lists of oz_port. */
- unsigned long last_jiffies;
+ struct timespec timestamp;
int credit;
int credit_ceiling;
u8 ep_num;
@@ -84,6 +96,7 @@ struct oz_endpoint {
unsigned flags;
int start_frame;
};
+
/* Bits in the flags field. */
#define OZ_F_EP_BUFFERING 0x1
#define OZ_F_EP_HAVE_STREAM 0x2
@@ -114,6 +127,7 @@ struct oz_port {
struct list_head isoc_out_ep;
struct list_head isoc_in_ep;
};
+
#define OZ_PORT_F_PRESENT 0x1
#define OZ_PORT_F_CHANGED 0x2
#define OZ_PORT_F_DYING 0x4
@@ -131,11 +145,12 @@ struct oz_hcd {
uint flags;
struct usb_hcd *hcd;
};
+
/* Bits in flags field.
*/
#define OZ_HDC_F_SUSPENDED 0x1
-/*------------------------------------------------------------------------------
+/*
* Static function prototypes.
*/
static int oz_hcd_start(struct usb_hcd *hcd);
@@ -175,7 +190,8 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb);
static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
-/*------------------------------------------------------------------------------
+
+/*
* Static external variables.
*/
static struct platform_device *g_plat_dev;
@@ -189,6 +205,7 @@ static DEFINE_SPINLOCK(g_tasklet_lock);
static struct tasklet_struct g_urb_process_tasklet;
static struct tasklet_struct g_urb_cancel_tasklet;
static atomic_t g_pending_urbs = ATOMIC_INIT(0);
+static atomic_t g_usb_frame_number = ATOMIC_INIT(0);
static const struct hc_driver g_oz_hc_drv = {
.description = g_hcd_name,
.product_desc = "Ozmo Devices WPAN",
@@ -219,7 +236,8 @@ static struct platform_driver g_oz_plat_drv = {
.owner = THIS_MODULE,
},
};
-/*------------------------------------------------------------------------------
+
+/*
* Gets our private context area (which is of type struct oz_hcd) from the
* usb_hcd structure.
* Context: any
@@ -228,7 +246,8 @@ static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
{
return (struct oz_hcd *)hcd->hcd_priv;
}
-/*------------------------------------------------------------------------------
+
+/*
* Searches list of ports to find the index of the one with a specified USB
* bus address. If none of the ports has the bus address then the connection
* port is returned, if there is one or -1 otherwise.
@@ -237,13 +256,15 @@ static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
{
int i;
+
for (i = 0; i < OZ_NB_PORTS; i++) {
if (ozhcd->ports[i].bus_addr == bus_addr)
return i;
}
return ozhcd->conn_port;
}
-/*------------------------------------------------------------------------------
+
+/*
* Allocates an urb link, first trying the pool but going to heap if empty.
* Context: any
*/
@@ -251,6 +272,7 @@ static struct oz_urb_link *oz_alloc_urb_link(void)
{
struct oz_urb_link *urbl = NULL;
unsigned long irq_state;
+
spin_lock_irqsave(&g_link_lock, irq_state);
if (g_link_pool) {
urbl = container_of(g_link_pool, struct oz_urb_link, link);
@@ -262,7 +284,8 @@ static struct oz_urb_link *oz_alloc_urb_link(void)
urbl = kmalloc(sizeof(struct oz_urb_link), GFP_ATOMIC);
return urbl;
}
-/*------------------------------------------------------------------------------
+
+/*
* Frees an urb link by putting it in the pool if there is enough space or
* deallocating it to heap otherwise.
* Context: any
@@ -282,7 +305,8 @@ static void oz_free_urb_link(struct oz_urb_link *urbl)
kfree(urbl);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Deallocates all the urb links in the pool.
* Context: unknown
*/
@@ -290,6 +314,7 @@ static void oz_empty_link_pool(void)
{
struct list_head *e;
unsigned long irq_state;
+
spin_lock_irqsave(&g_link_lock, irq_state);
e = g_link_pool;
g_link_pool = NULL;
@@ -302,12 +327,13 @@ static void oz_empty_link_pool(void)
kfree(urbl);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Allocates endpoint structure and optionally a buffer. If a buffer is
* allocated it immediately follows the endpoint structure.
* Context: softirq
*/
-static struct oz_endpoint *oz_ep_alloc(gfp_t mem_flags, int buffer_size)
+static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags)
{
struct oz_endpoint *ep =
kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
@@ -322,7 +348,8 @@ static struct oz_endpoint *oz_ep_alloc(gfp_t mem_flags, int buffer_size)
}
return ep;
}
-/*------------------------------------------------------------------------------
+
+/*
* Pre-condition: Must be called with g_tasklet_lock held and interrupts
* disabled.
* Context: softirq or process
@@ -331,6 +358,7 @@ static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb
{
struct oz_urb_link *urbl;
struct list_head *e;
+
list_for_each(e, &ozhcd->urb_cancel_list) {
urbl = container_of(e, struct oz_urb_link, link);
if (urb == urbl->urb) {
@@ -340,17 +368,19 @@ static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb
}
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called when we have finished processing an urb. It unlinks it from
* the ep and returns it to the core.
* Context: softirq or process
*/
static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
- int status, unsigned long submit_jiffies)
+ int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned long irq_state;
- struct oz_urb_link *cancel_urbl = NULL;
+ struct oz_urb_link *cancel_urbl;
+
spin_lock_irqsave(&g_tasklet_lock, irq_state);
usb_hcd_unlink_urb_from_ep(hcd, urb);
/* Clear hcpriv which will prevent it being put in the cancel list
@@ -372,16 +402,9 @@ static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
*/
spin_unlock(&g_tasklet_lock);
if (oz_forget_urb(urb)) {
- oz_trace("OZWPAN: ERROR Unknown URB %p\n", urb);
+ oz_dbg(ON, "ERROR Unknown URB %p\n", urb);
} else {
- static unsigned long last_time;
atomic_dec(&g_pending_urbs);
- oz_trace2(OZ_TRACE_URB,
- "%lu: giveback_urb(%p,%x) %lu %lu pending:%d\n",
- jiffies, urb, status, jiffies-submit_jiffies,
- jiffies-last_time, atomic_read(&g_pending_urbs));
- last_time = jiffies;
- oz_event_log(OZ_EVT_URB_DONE, 0, 0, urb, status);
usb_hcd_giveback_urb(hcd, urb, status);
}
spin_lock(&g_tasklet_lock);
@@ -389,14 +412,14 @@ static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
if (cancel_urbl)
oz_free_urb_link(cancel_urbl);
}
-/*------------------------------------------------------------------------------
+
+/*
* Deallocates an endpoint including deallocating any associated stream and
* returning any queued urbs to the core.
* Context: softirq
*/
static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
{
- oz_trace("oz_ep_free()\n");
if (port) {
struct list_head list;
struct oz_hcd *ozhcd = port->ozhcd;
@@ -411,19 +434,20 @@ static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
list_splice_tail(&list, &ozhcd->orphanage);
spin_unlock_bh(&ozhcd->hcd_lock);
}
- oz_trace("Freeing endpoint memory\n");
+ oz_dbg(ON, "Freeing endpoint memory\n");
kfree(ep);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_complete_buffered_urb(struct oz_port *port,
struct oz_endpoint *ep,
struct urb *urb)
{
- u8 data_len, available_space, copy_len;
+ int data_len, available_space, copy_len;
- memcpy(&data_len, &ep->buffer[ep->out_ix], sizeof(u8));
+ data_len = ep->buffer[ep->out_ix];
if (data_len <= urb->transfer_buffer_length)
available_space = data_len;
else
@@ -448,28 +472,29 @@ static void oz_complete_buffered_urb(struct oz_port *port,
ep->out_ix = 0;
ep->buffered_units--;
- oz_trace("Trying to give back buffered frame of size=%d\n",
- available_space);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_dbg(ON, "Trying to give back buffered frame of size=%d\n",
+ available_space);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
-/*------------------------------------------------------------------------------
+/*
* Context: softirq
*/
static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
struct urb *urb, u8 req_id)
{
struct oz_urb_link *urbl;
- struct oz_endpoint *ep;
+ struct oz_endpoint *ep = NULL;
int err = 0;
+
if (ep_addr >= OZ_NB_ENDPOINTS) {
- oz_trace("Invalid endpoint number in oz_enqueue_ep_urb().\n");
+ oz_dbg(ON, "%s: Invalid endpoint number\n", __func__);
return -EINVAL;
}
urbl = oz_alloc_urb_link();
if (!urbl)
return -ENOMEM;
- urbl->submit_jiffies = jiffies;
+ urbl->submit_counter = 0;
urbl->urb = urb;
urbl->req_id = req_id;
urbl->ep_num = ep_addr;
@@ -482,15 +507,20 @@ static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
*/
if (urb->unlinked) {
spin_unlock_bh(&port->ozhcd->hcd_lock);
- oz_trace("urb %p unlinked so complete immediately\n", urb);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_dbg(ON, "urb %p unlinked so complete immediately\n", urb);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
oz_free_urb_link(urbl);
return 0;
}
+
if (in_dir)
ep = port->in_ep[ep_addr];
else
ep = port->out_ep[ep_addr];
+ if (!ep) {
+ err = -ENOMEM;
+ goto out;
+ }
/*For interrupt endpoint check for buffered data
* & complete urb
@@ -503,23 +533,23 @@ static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
return 0;
}
- if (ep && port->hpd) {
+ if (port->hpd) {
list_add_tail(&urbl->link, &ep->urb_list);
if (!in_dir && ep_addr && (ep->credit < 0)) {
- ep->last_jiffies = jiffies;
+ getrawmonotonic(&ep->timestamp);
ep->credit = 0;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num,
- 0, NULL, ep->credit);
}
} else {
err = -EPIPE;
}
+out:
spin_unlock_bh(&port->ozhcd->hcd_lock);
if (err)
oz_free_urb_link(urbl);
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Removes an urb from the queue in the endpoint.
* Returns 0 if it is found and -EIDRM otherwise.
* Context: softirq
@@ -529,6 +559,7 @@ static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
{
struct oz_urb_link *urbl = NULL;
struct oz_endpoint *ep;
+
spin_lock_bh(&port->ozhcd->hcd_lock);
if (in_dir)
ep = port->in_ep[ep_addr];
@@ -550,7 +581,8 @@ static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
oz_free_urb_link(urbl);
return urbl ? 0 : -EIDRM;
}
-/*------------------------------------------------------------------------------
+
+/*
* Finds an urb given its request id.
* Context: softirq
*/
@@ -559,7 +591,7 @@ static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
{
struct oz_hcd *ozhcd = port->ozhcd;
struct urb *urb = NULL;
- struct oz_urb_link *urbl = NULL;
+ struct oz_urb_link *urbl;
struct oz_endpoint *ep;
spin_lock_bh(&ozhcd->hcd_lock);
@@ -582,7 +614,8 @@ static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
oz_free_urb_link(urbl);
return urb;
}
-/*------------------------------------------------------------------------------
+
+/*
* Pre-condition: Port lock must be held.
* Context: softirq
*/
@@ -596,12 +629,14 @@ static void oz_acquire_port(struct oz_port *port, void *hpd)
oz_usb_get(hpd);
port->hpd = hpd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static struct oz_hcd *oz_hcd_claim(void)
{
struct oz_hcd *ozhcd;
+
spin_lock_bh(&g_hcdlock);
ozhcd = g_ozhcd;
if (ozhcd)
@@ -609,7 +644,8 @@ static struct oz_hcd *oz_hcd_claim(void)
spin_unlock_bh(&g_hcdlock);
return ozhcd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static inline void oz_hcd_put(struct oz_hcd *ozhcd)
@@ -617,7 +653,8 @@ static inline void oz_hcd_put(struct oz_hcd *ozhcd)
if (ozhcd)
usb_put_hcd(ozhcd->hcd);
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called by the protocol handler to notify that a PD has arrived.
* We allocate a port to associate with the PD and create a structure for
* endpoint 0. This port is made the connection port.
@@ -629,75 +666,74 @@ static inline void oz_hcd_put(struct oz_hcd *ozhcd)
* probably very rare indeed.
* Context: softirq
*/
-void *oz_hcd_pd_arrived(void *hpd)
+struct oz_port *oz_hcd_pd_arrived(void *hpd)
{
int i;
- void *hport = NULL;
- struct oz_hcd *ozhcd = NULL;
+ struct oz_port *hport;
+ struct oz_hcd *ozhcd;
struct oz_endpoint *ep;
- oz_trace("oz_hcd_pd_arrived()\n");
+
ozhcd = oz_hcd_claim();
- if (ozhcd == NULL)
+ if (!ozhcd)
return NULL;
/* Allocate an endpoint object in advance (before holding hcd lock) to
* use for out endpoint 0.
*/
- ep = oz_ep_alloc(GFP_ATOMIC, 0);
+ ep = oz_ep_alloc(0, GFP_ATOMIC);
+ if (!ep)
+ goto err_put;
+
spin_lock_bh(&ozhcd->hcd_lock);
- if (ozhcd->conn_port >= 0) {
- spin_unlock_bh(&ozhcd->hcd_lock);
- oz_trace("conn_port >= 0\n");
- goto out;
- }
+ if (ozhcd->conn_port >= 0)
+ goto err_unlock;
+
for (i = 0; i < OZ_NB_PORTS; i++) {
struct oz_port *port = &ozhcd->ports[i];
+
spin_lock(&port->port_lock);
- if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
+ if (!(port->flags & (OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED))) {
oz_acquire_port(port, hpd);
spin_unlock(&port->port_lock);
break;
}
spin_unlock(&port->port_lock);
}
- if (i < OZ_NB_PORTS) {
- oz_trace("Setting conn_port = %d\n", i);
- ozhcd->conn_port = i;
- /* Attach out endpoint 0.
- */
- ozhcd->ports[i].out_ep[0] = ep;
- ep = NULL;
- hport = &ozhcd->ports[i];
- spin_unlock_bh(&ozhcd->hcd_lock);
- if (ozhcd->flags & OZ_HDC_F_SUSPENDED) {
- oz_trace("Resuming root hub\n");
- usb_hcd_resume_root_hub(ozhcd->hcd);
- }
- usb_hcd_poll_rh_status(ozhcd->hcd);
- } else {
- spin_unlock_bh(&ozhcd->hcd_lock);
- }
-out:
- if (ep) /* ep is non-null if not used. */
- oz_ep_free(NULL, ep);
+ if (i == OZ_NB_PORTS)
+ goto err_unlock;
+
+ ozhcd->conn_port = i;
+ hport = &ozhcd->ports[i];
+ hport->out_ep[0] = ep;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ if (ozhcd->flags & OZ_HDC_F_SUSPENDED)
+ usb_hcd_resume_root_hub(ozhcd->hcd);
+ usb_hcd_poll_rh_status(ozhcd->hcd);
oz_hcd_put(ozhcd);
+
return hport;
+
+err_unlock:
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ oz_ep_free(NULL, ep);
+err_put:
+ oz_hcd_put(ozhcd);
+ return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called by the protocol handler to notify that the PD has gone away.
* We need to deallocate all resources and then request that the root hub is
* polled. We release the reference we hold on the PD.
* Context: softirq
*/
-void oz_hcd_pd_departed(void *hport)
+void oz_hcd_pd_departed(struct oz_port *port)
{
- struct oz_port *port = (struct oz_port *)hport;
struct oz_hcd *ozhcd;
void *hpd;
struct oz_endpoint *ep = NULL;
- oz_trace("oz_hcd_pd_departed()\n");
if (port == NULL) {
- oz_trace("oz_hcd_pd_departed() port = 0\n");
+ oz_dbg(ON, "%s: port = 0\n", __func__);
return;
}
ozhcd = port->ozhcd;
@@ -708,7 +744,7 @@ void oz_hcd_pd_departed(void *hport)
spin_lock_bh(&ozhcd->hcd_lock);
if ((ozhcd->conn_port >= 0) &&
(port == &ozhcd->ports[ozhcd->conn_port])) {
- oz_trace("Clearing conn_port\n");
+ oz_dbg(ON, "Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_lock(&port->port_lock);
@@ -721,9 +757,10 @@ void oz_hcd_pd_departed(void *hport)
hpd = port->hpd;
port->hpd = NULL;
port->bus_addr = 0xff;
+ port->config_num = 0;
port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
port->flags |= OZ_PORT_F_CHANGED;
- port->status &= ~USB_PORT_STAT_CONNECTION;
+ port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE);
port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
/* If there is an endpont 0 then clear the pointer while we hold
* the spinlock be we deallocate it after releasing the lock.
@@ -738,7 +775,8 @@ void oz_hcd_pd_departed(void *hport)
usb_hcd_poll_rh_status(ozhcd->hcd);
oz_usb_put(hpd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
void oz_hcd_pd_reset(void *hpd, void *hport)
@@ -747,7 +785,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
*/
struct oz_port *port = (struct oz_port *)hport;
struct oz_hcd *ozhcd = port->ozhcd;
- oz_trace("PD Reset\n");
+
+ oz_dbg(ON, "PD Reset\n");
spin_lock_bh(&port->port_lock);
port->flags |= OZ_PORT_F_CHANGED;
port->status |= USB_PORT_STAT_RESET;
@@ -756,7 +795,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
oz_clean_endpoints_for_config(ozhcd->hcd, port);
usb_hcd_poll_rh_status(ozhcd->hcd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
@@ -766,9 +806,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
struct urb *urb;
int err = 0;
- oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, NULL, status);
- oz_trace("oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
- length, offset, total_size);
+ oz_dbg(ON, "oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
+ length, offset, total_size);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb)
return;
@@ -800,54 +839,52 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
}
}
urb->actual_length = total_size;
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
-#ifdef WANT_TRACE
static void oz_display_conf_type(u8 t)
{
switch (t) {
case USB_REQ_GET_STATUS:
- oz_trace("USB_REQ_GET_STATUS - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_STATUS - cnf\n");
break;
case USB_REQ_CLEAR_FEATURE:
- oz_trace("USB_REQ_CLEAR_FEATURE - cnf\n");
+ oz_dbg(ON, "USB_REQ_CLEAR_FEATURE - cnf\n");
break;
case USB_REQ_SET_FEATURE:
- oz_trace("USB_REQ_SET_FEATURE - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_FEATURE - cnf\n");
break;
case USB_REQ_SET_ADDRESS:
- oz_trace("USB_REQ_SET_ADDRESS - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_ADDRESS - cnf\n");
break;
case USB_REQ_GET_DESCRIPTOR:
- oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
break;
case USB_REQ_SET_DESCRIPTOR:
- oz_trace("USB_REQ_SET_DESCRIPTOR - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_DESCRIPTOR - cnf\n");
break;
case USB_REQ_GET_CONFIGURATION:
- oz_trace("USB_REQ_GET_CONFIGURATION - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - cnf\n");
break;
case USB_REQ_SET_CONFIGURATION:
- oz_trace("USB_REQ_SET_CONFIGURATION - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - cnf\n");
break;
case USB_REQ_GET_INTERFACE:
- oz_trace("USB_REQ_GET_INTERFACE - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_INTERFACE - cnf\n");
break;
case USB_REQ_SET_INTERFACE:
- oz_trace("USB_REQ_SET_INTERFACE - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_INTERFACE - cnf\n");
break;
case USB_REQ_SYNCH_FRAME:
- oz_trace("USB_REQ_SYNCH_FRAME - cnf\n");
+ oz_dbg(ON, "USB_REQ_SYNCH_FRAME - cnf\n");
break;
}
}
-#else
-#define oz_display_conf_type(__x)
-#endif /* WANT_TRACE */
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
@@ -855,6 +892,7 @@ static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
{
int rc = 0;
struct usb_hcd *hcd = port->ozhcd->hcd;
+
if (rcode == 0) {
port->config_num = config_num;
oz_clean_endpoints_for_config(hcd, port);
@@ -865,9 +903,10 @@ static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
} else {
rc = -ENOMEM;
}
- oz_complete_urb(hcd, urb, rc, 0);
+ oz_complete_urb(hcd, urb, rc);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
@@ -875,10 +914,11 @@ static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
{
struct usb_hcd *hcd = port->ozhcd->hcd;
int rc = 0;
- if (rcode == 0) {
+
+ if ((rcode == 0) && (port->config_num > 0)) {
struct usb_host_config *config;
struct usb_host_interface *intf;
- oz_trace("Set interface %d alt %d\n", if_num, alt);
+ oz_dbg(ON, "Set interface %d alt %d\n", if_num, alt);
oz_clean_endpoints_for_interface(hcd, port, if_num);
config = &urb->dev->config[port->config_num-1];
intf = &config->intf_cache[if_num]->altsetting[alt];
@@ -890,9 +930,10 @@ static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
} else {
rc = -ENOMEM;
}
- oz_complete_urb(hcd, urb, rc, 0);
+ oz_complete_urb(hcd, urb, rc);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
@@ -905,11 +946,10 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
unsigned windex;
unsigned wvalue;
- oz_event_log(OZ_EVT_CTRL_CNF, 0, req_id, NULL, rcode);
- oz_trace("oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
+ oz_dbg(ON, "oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb) {
- oz_trace("URB not found\n");
+ oz_dbg(ON, "URB not found\n");
return;
}
setup = (struct usb_ctrlrequest *)urb->setup_packet;
@@ -928,12 +968,12 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
(u8)windex, (u8)wvalue);
break;
default:
- oz_complete_urb(hcd, urb, 0, 0);
+ oz_complete_urb(hcd, urb, 0);
}
} else {
int copy_len;
- oz_trace("VENDOR-CLASS - cnf\n");
+ oz_dbg(ON, "VENDOR-CLASS - cnf\n");
if (data_len) {
if (data_len <= urb->transfer_buffer_length)
copy_len = data_len;
@@ -942,10 +982,11 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
}
- oz_complete_urb(hcd, urb, 0, 0);
+ oz_complete_urb(hcd, urb, 0);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
@@ -953,13 +994,14 @@ static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
{
int space;
int copy_len;
+
if (!ep->buffer)
return -1;
space = ep->out_ix-ep->in_ix-1;
if (space < 0)
space += ep->buffer_size;
if (space < (data_len+1)) {
- oz_trace("Buffer full\n");
+ oz_dbg(ON, "Buffer full\n");
return -1;
}
ep->buffer[ep->in_ix] = (u8)data_len;
@@ -981,7 +1023,8 @@ static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
ep->buffered_units++;
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
@@ -989,6 +1032,7 @@ void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
struct oz_port *port = (struct oz_port *)hport;
struct oz_endpoint *ep;
struct oz_hcd *ozhcd = port->ozhcd;
+
spin_lock_bh(&ozhcd->hcd_lock);
ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
if (ep == NULL)
@@ -1012,10 +1056,10 @@ void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
copy_len = urb->transfer_buffer_length;
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
return;
} else {
- oz_trace("buffering frame as URB is not available\n");
+ oz_dbg(ON, "buffering frame as URB is not available\n");
oz_hcd_buffer_data(ep, data, data_len);
}
break;
@@ -1026,14 +1070,16 @@ void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
done:
spin_unlock_bh(&ozhcd->hcd_lock);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static inline int oz_usb_get_frame_number(void)
{
- return jiffies_to_msecs(get_jiffies_64());
+ return atomic_inc_return(&g_usb_frame_number);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_hcd_heartbeat(void *hport)
@@ -1047,7 +1093,9 @@ int oz_hcd_heartbeat(void *hport)
struct list_head *n;
struct urb *urb;
struct oz_endpoint *ep;
- unsigned long now = jiffies;
+ struct timespec ts, delta;
+
+ getrawmonotonic(&ts);
INIT_LIST_HEAD(&xfr_list);
/* Check the OUT isoc endpoints to see if any URB data can be sent.
*/
@@ -1056,12 +1104,11 @@ int oz_hcd_heartbeat(void *hport)
ep = ep_from_link(e);
if (ep->credit < 0)
continue;
- ep->credit += jiffies_to_msecs(now - ep->last_jiffies);
+ delta = timespec_sub(ts, ep->timestamp);
+ ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
if (ep->credit > ep->credit_ceiling)
ep->credit = ep->credit_ceiling;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, NULL,
- ep->credit);
- ep->last_jiffies = now;
+ ep->timestamp = ts;
while (ep->credit && !list_empty(&ep->urb_list)) {
urbl = list_first_entry(&ep->urb_list,
struct oz_urb_link, link);
@@ -1069,8 +1116,8 @@ int oz_hcd_heartbeat(void *hport)
if ((ep->credit + 1) < urb->number_of_packets)
break;
ep->credit -= urb->number_of_packets;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num, 0, NULL,
- ep->credit);
+ if (ep->credit < 0)
+ ep->credit = 0;
list_move_tail(&urbl->link, &xfr_list);
}
}
@@ -1078,16 +1125,14 @@ int oz_hcd_heartbeat(void *hport)
/* Send to PD and complete URBs.
*/
list_for_each_safe(e, n, &xfr_list) {
- unsigned long t;
urbl = container_of(e, struct oz_urb_link, link);
urb = urbl->urb;
- t = urbl->submit_jiffies;
list_del_init(e);
urb->error_count = 0;
urb->start_frame = oz_usb_get_frame_number();
oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
oz_free_urb_link(urbl);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, t);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check the IN isoc endpoints to see if any URBs can be completed.
*/
@@ -1098,20 +1143,14 @@ int oz_hcd_heartbeat(void *hport)
if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) {
ep->flags &= ~OZ_F_EP_BUFFERING;
ep->credit = 0;
- oz_event_log(OZ_EVT_EP_CREDIT,
- ep->ep_num | USB_DIR_IN,
- 0, NULL, ep->credit);
- ep->last_jiffies = now;
+ ep->timestamp = ts;
ep->start_frame = 0;
- oz_event_log(OZ_EVT_EP_BUFFERING,
- ep->ep_num | USB_DIR_IN, 0, NULL, 0);
}
continue;
}
- ep->credit += jiffies_to_msecs(now - ep->last_jiffies);
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
- 0, NULL, ep->credit);
- ep->last_jiffies = now;
+ delta = timespec_sub(ts, ep->timestamp);
+ ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
+ ep->timestamp = ts;
while (!list_empty(&ep->urb_list)) {
struct oz_urb_link *urbl =
list_first_entry(&ep->urb_list,
@@ -1120,7 +1159,7 @@ int oz_hcd_heartbeat(void *hport)
int len = 0;
int copy_len;
int i;
- if ((ep->credit + 1) < urb->number_of_packets)
+ if (ep->credit < urb->number_of_packets)
break;
if (ep->buffered_units < urb->number_of_packets)
break;
@@ -1154,8 +1193,6 @@ int oz_hcd_heartbeat(void *hport)
ep->start_frame += urb->number_of_packets;
list_move_tail(&urbl->link, &xfr_list);
ep->credit -= urb->number_of_packets;
- oz_event_log(OZ_EVT_EP_CREDIT, ep->ep_num | USB_DIR_IN,
- 0, NULL, ep->credit);
}
}
if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
@@ -1168,7 +1205,7 @@ int oz_hcd_heartbeat(void *hport)
urb = urbl->urb;
list_del_init(e);
oz_free_urb_link(urbl);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check if there are any ep0 requests that have timed out.
* If so resent to PD.
@@ -1180,11 +1217,12 @@ int oz_hcd_heartbeat(void *hport)
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each_safe(e, n, &ep->urb_list) {
urbl = container_of(e, struct oz_urb_link, link);
- if (time_after(now, urbl->submit_jiffies+HZ/2)) {
- oz_trace("%ld: Request 0x%p timeout\n",
- now, urbl->urb);
- urbl->submit_jiffies = now;
+ if (urbl->submit_counter > EP0_TIMEOUT_COUNTER) {
+ oz_dbg(ON, "Request 0x%p timeout\n", urbl->urb);
list_move_tail(e, &xfr_list);
+ urbl->submit_counter = 0;
+ } else {
+ urbl->submit_counter++;
}
}
if (!list_empty(&ep->urb_list))
@@ -1194,14 +1232,15 @@ int oz_hcd_heartbeat(void *hport)
while (e != &xfr_list) {
urbl = container_of(e, struct oz_urb_link, link);
e = e->next;
- oz_trace("Resending request to PD.\n");
+ oz_dbg(ON, "Resending request to PD\n");
oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
oz_free_urb_link(urbl);
}
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
@@ -1212,7 +1251,10 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
int i;
int if_ix = intf->desc.bInterfaceNumber;
int request_heartbeat = 0;
- oz_trace("interface[%d] = %p\n", if_ix, intf);
+
+ oz_dbg(ON, "interface[%d] = %p\n", if_ix, intf);
+ if (if_ix >= port->num_iface || port->iface == NULL)
+ return -ENOMEM;
for (i = 0; i < intf->desc.bNumEndpoints; i++) {
struct usb_host_endpoint *hep = &intf->endpoint[i];
u8 ep_addr = hep->desc.bEndpointAddress;
@@ -1220,20 +1262,20 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_endpoint *ep;
int buffer_size = 0;
- oz_trace("%d bEndpointAddress = %x\n", i, ep_addr);
+ oz_dbg(ON, "%d bEndpointAddress = %x\n", i, ep_addr);
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
switch (hep->desc.bmAttributes &
USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_ISOC:
- buffer_size = 24*1024;
+ buffer_size = OZ_EP_BUFFER_SIZE_ISOC;
break;
case USB_ENDPOINT_XFER_INT:
- buffer_size = 128;
+ buffer_size = OZ_EP_BUFFER_SIZE_INT;
break;
}
}
- ep = oz_ep_alloc(mem_flags, buffer_size);
+ ep = oz_ep_alloc(buffer_size, mem_flags);
if (!ep) {
oz_clean_endpoints_for_interface(hcd, port, if_ix);
return -ENOMEM;
@@ -1242,13 +1284,11 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
ep->ep_num = ep_num;
if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_ISOC) {
- oz_trace("wMaxPacketSize = %d\n",
- hep->desc.wMaxPacketSize);
+ oz_dbg(ON, "wMaxPacketSize = %d\n",
+ usb_endpoint_maxp(&hep->desc));
ep->credit_ceiling = 200;
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
ep->flags |= OZ_F_EP_BUFFERING;
- oz_event_log(OZ_EVT_EP_BUFFERING,
- ep->ep_num | USB_DIR_IN, 1, NULL, 0);
} else {
ep->flags |= OZ_F_EP_HAVE_STREAM;
if (oz_usb_stream_create(port->hpd, ep_num))
@@ -1280,7 +1320,8 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
@@ -1291,7 +1332,7 @@ static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
int i;
struct list_head ep_list;
- oz_trace("Deleting endpoints for interface %d\n", if_ix);
+ oz_dbg(ON, "Deleting endpoints for interface %d\n", if_ix);
if (if_ix >= port->num_iface)
return;
INIT_LIST_HEAD(&ep_list);
@@ -1325,7 +1366,8 @@ static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
oz_ep_free(port, ep);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
@@ -1335,6 +1377,7 @@ static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
struct oz_hcd *ozhcd = port->ozhcd;
int i;
int num_iface = config->desc.bNumInterfaces;
+
if (num_iface) {
struct oz_interface *iface;
@@ -1359,7 +1402,8 @@ fail:
oz_clean_endpoints_for_config(hcd, port);
return -1;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
@@ -1367,25 +1411,28 @@ static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
{
struct oz_hcd *ozhcd = port->ozhcd;
int i;
- oz_trace("Deleting endpoints for configuration.\n");
+
+ oz_dbg(ON, "Deleting endpoints for configuration\n");
for (i = 0; i < port->num_iface; i++)
oz_clean_endpoints_for_interface(hcd, port, i);
spin_lock_bh(&ozhcd->hcd_lock);
if (port->iface) {
- oz_trace("Freeing interfaces object.\n");
+ oz_dbg(ON, "Freeing interfaces object\n");
kfree(port->iface);
port->iface = NULL;
}
port->num_iface = 0;
spin_unlock_bh(&ozhcd->hcd_lock);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static void *oz_claim_hpd(struct oz_port *port)
{
- void *hpd = NULL;
+ void *hpd;
struct oz_hcd *ozhcd = port->ozhcd;
+
spin_lock_bh(&ozhcd->hcd_lock);
hpd = port->hpd;
if (hpd)
@@ -1393,7 +1440,8 @@ static void *oz_claim_hpd(struct oz_port *port)
spin_unlock_bh(&ozhcd->hcd_lock);
return hpd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
@@ -1403,7 +1451,7 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
unsigned windex;
unsigned wvalue;
unsigned wlength;
- void *hpd = NULL;
+ void *hpd;
u8 req_id;
int rc = 0;
unsigned complete = 0;
@@ -1411,7 +1459,7 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
int port_ix = -1;
struct oz_port *port = NULL;
- oz_trace2(OZ_TRACE_URB, "%lu: oz_process_ep0_urb(%p)\n", jiffies, urb);
+ oz_dbg(URB, "[%s]:(%p)\n", __func__, urb);
port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
if (port_ix < 0) {
rc = -EPIPE;
@@ -1420,8 +1468,8 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
port = &ozhcd->ports[port_ix];
if (((port->flags & OZ_PORT_F_PRESENT) == 0)
|| (port->flags & OZ_PORT_F_DYING)) {
- oz_trace("Refusing URB port_ix = %d devnum = %d\n",
- port_ix, urb->dev->devnum);
+ oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
+ port_ix, urb->dev->devnum);
rc = -EPIPE;
goto out;
}
@@ -1432,17 +1480,16 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
windex = le16_to_cpu(setup->wIndex);
wvalue = le16_to_cpu(setup->wValue);
wlength = le16_to_cpu(setup->wLength);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequestType = %x\n",
- setup->bRequestType);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "wValue = %x\n", wvalue);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "wIndex = %x\n", windex);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "wLength = %x\n", wlength);
+ oz_dbg(CTRL_DETAIL, "bRequestType = %x\n", setup->bRequestType);
+ oz_dbg(CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
+ oz_dbg(CTRL_DETAIL, "wValue = %x\n", wvalue);
+ oz_dbg(CTRL_DETAIL, "wIndex = %x\n", windex);
+ oz_dbg(CTRL_DETAIL, "wLength = %x\n", wlength);
req_id = port->next_req_id++;
hpd = oz_claim_hpd(port);
if (hpd == NULL) {
- oz_trace("Cannot claim port\n");
+ oz_dbg(ON, "Cannot claim port\n");
rc = -EPIPE;
goto out;
}
@@ -1452,34 +1499,31 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
*/
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
- oz_trace("USB_REQ_GET_DESCRIPTOR - req\n");
+ oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - req\n");
break;
case USB_REQ_SET_ADDRESS:
- oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest,
- 0, NULL, setup->bRequestType);
- oz_trace("USB_REQ_SET_ADDRESS - req\n");
- oz_trace("Port %d address is 0x%x\n", ozhcd->conn_port,
- (u8)le16_to_cpu(setup->wValue));
+ oz_dbg(ON, "USB_REQ_SET_ADDRESS - req\n");
+ oz_dbg(ON, "Port %d address is 0x%x\n",
+ ozhcd->conn_port,
+ (u8)le16_to_cpu(setup->wValue));
spin_lock_bh(&ozhcd->hcd_lock);
if (ozhcd->conn_port >= 0) {
ozhcd->ports[ozhcd->conn_port].bus_addr =
(u8)le16_to_cpu(setup->wValue);
- oz_trace("Clearing conn_port\n");
+ oz_dbg(ON, "Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_unlock_bh(&ozhcd->hcd_lock);
complete = 1;
break;
case USB_REQ_SET_CONFIGURATION:
- oz_trace("USB_REQ_SET_CONFIGURATION - req\n");
+ oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - req\n");
break;
case USB_REQ_GET_CONFIGURATION:
/* We short circuit this case and reply directly since
* we have the selected configuration number cached.
*/
- oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0,
- NULL, setup->bRequestType);
- oz_trace("USB_REQ_GET_CONFIGURATION - reply now\n");
+ oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
@@ -1493,22 +1537,20 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
/* We short circuit this case and reply directly since
* we have the selected interface alternative cached.
*/
- oz_event_log(OZ_EVT_CTRL_LOCAL, setup->bRequest, 0,
- NULL, setup->bRequestType);
- oz_trace("USB_REQ_GET_INTERFACE - reply now\n");
+ oz_dbg(ON, "USB_REQ_GET_INTERFACE - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
port->iface[(u8)windex].alt;
- oz_trace("interface = %d alt = %d\n",
- windex, port->iface[(u8)windex].alt);
+ oz_dbg(ON, "interface = %d alt = %d\n",
+ windex, port->iface[(u8)windex].alt);
complete = 1;
} else {
rc = -EPIPE;
}
break;
case USB_REQ_SET_INTERFACE:
- oz_trace("USB_REQ_SET_INTERFACE - req\n");
+ oz_dbg(ON, "USB_REQ_SET_INTERFACE - req\n");
break;
}
}
@@ -1539,13 +1581,14 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
oz_usb_put(hpd);
out:
if (rc || complete) {
- oz_trace("Completing request locally\n");
- oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ oz_dbg(ON, "Completing request locally\n");
+ oz_complete_urb(ozhcd->hcd, urb, rc);
} else {
oz_usb_request_heartbeat(port->hpd);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
@@ -1553,6 +1596,7 @@ static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
int rc = 0;
struct oz_port *port = urb->hcpriv;
u8 ep_addr;
+
/* When we are paranoid we keep a list of urbs which we check against
* before handing one back. This is just for debugging during
* development and should be turned off in the released driver.
@@ -1578,7 +1622,8 @@ static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static void oz_urb_process_tasklet(unsigned long unused)
@@ -1587,6 +1632,7 @@ static void oz_urb_process_tasklet(unsigned long unused)
struct urb *urb;
struct oz_hcd *ozhcd = oz_hcd_claim();
int rc = 0;
+
if (ozhcd == NULL)
return;
/* This is called from a tasklet so is in softirq context but the urb
@@ -1604,13 +1650,14 @@ static void oz_urb_process_tasklet(unsigned long unused)
oz_free_urb_link(urbl);
rc = oz_urb_process(ozhcd, urb);
if (rc)
- oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ oz_complete_urb(ozhcd->hcd, urb, rc);
spin_lock_irqsave(&g_tasklet_lock, irq_state);
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_hcd_put(ozhcd);
}
-/*------------------------------------------------------------------------------
+
+/*
* This function searches for the urb in any of the lists it could be in.
* If it is found it is removed from the list and completed. If the urb is
* being processed then it won't be in a list so won't be found. However, the
@@ -1626,13 +1673,14 @@ static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
struct oz_hcd *ozhcd;
unsigned long irq_state;
u8 ix;
+
if (port == NULL) {
- oz_trace("ERRORERROR: oz_urb_cancel(%p) port is null\n", urb);
+ oz_dbg(ON, "%s: ERROR: (%p) port is null\n", __func__, urb);
return;
}
ozhcd = port->ozhcd;
if (ozhcd == NULL) {
- oz_trace("ERRORERROR: oz_urb_cancel(%p) ozhcd is null\n", urb);
+ oz_dbg(ON, "%s; ERROR: (%p) ozhcd is null\n", __func__, urb);
return;
}
@@ -1657,7 +1705,7 @@ static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
urbl = container_of(e, struct oz_urb_link, link);
if (urbl->urb == urb) {
list_del(e);
- oz_trace("Found urb in orphanage\n");
+ oz_dbg(ON, "Found urb in orphanage\n");
goto out;
}
}
@@ -1673,10 +1721,11 @@ out2:
if (urbl) {
urb->actual_length = 0;
oz_free_urb_link(urbl);
- oz_complete_urb(ozhcd->hcd, urb, -EPIPE, 0);
+ oz_complete_urb(ozhcd->hcd, urb, -EPIPE);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static void oz_urb_cancel_tasklet(unsigned long unused)
@@ -1684,6 +1733,7 @@ static void oz_urb_cancel_tasklet(unsigned long unused)
unsigned long irq_state;
struct urb *urb;
struct oz_hcd *ozhcd = oz_hcd_claim();
+
if (ozhcd == NULL)
return;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
@@ -1702,7 +1752,8 @@ static void oz_urb_cancel_tasklet(unsigned long unused)
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_hcd_put(ozhcd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
@@ -1713,51 +1764,38 @@ static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
urbl = list_first_entry(&ozhcd->orphanage,
struct oz_urb_link, link);
list_del(&urbl->link);
- oz_complete_urb(ozhcd->hcd, urbl->urb, status, 0);
+ oz_complete_urb(ozhcd->hcd, urbl->urb, status);
oz_free_urb_link(urbl);
}
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static int oz_hcd_start(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_start()\n");
hcd->power_budget = 200;
hcd->state = HC_STATE_RUNNING;
hcd->uses_new_polling = 1;
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_stop(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_stop()\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_shutdown(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_shutdown()\n");
-}
-/*------------------------------------------------------------------------------
- * Context: any
- */
-#ifdef WANT_EVENT_TRACE
-static u8 oz_get_irq_ctx(void)
-{
- u8 irq_info = 0;
- if (in_interrupt())
- irq_info |= 1;
- if (in_irq())
- irq_info |= 2;
- return irq_info;
}
-#endif /* WANT_EVENT_TRACE */
-/*------------------------------------------------------------------------------
+
+/*
* Called to queue an urb for the device.
* This function should return a non-zero error code if it fails the urb but
* should not call usb_hcd_giveback_urb().
@@ -1767,23 +1805,19 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- int rc = 0;
+ int rc;
int port_ix;
struct oz_port *port;
unsigned long irq_state;
struct oz_urb_link *urbl;
- oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_enqueue(%p)\n",
- jiffies, urb);
- oz_event_log(OZ_EVT_URB_SUBMIT, oz_get_irq_ctx(),
- (u16)urb->number_of_packets, urb, urb->pipe);
+
+ oz_dbg(URB, "%s: (%p)\n", __func__, urb);
if (unlikely(ozhcd == NULL)) {
- oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not ozhcd.\n",
- jiffies, urb);
+ oz_dbg(URB, "Refused urb(%p) not ozhcd\n", urb);
return -EPIPE;
}
if (unlikely(hcd->state != HC_STATE_RUNNING)) {
- oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not running.\n",
- jiffies, urb);
+ oz_dbg(URB, "Refused urb(%p) not running\n", urb);
return -EPIPE;
}
port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
@@ -1792,9 +1826,10 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
port = &ozhcd->ports[port_ix];
if (port == NULL)
return -EPIPE;
- if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
- oz_trace("Refusing URB port_ix = %d devnum = %d\n",
- port_ix, urb->dev->devnum);
+ if (!(port->flags & OZ_PORT_F_PRESENT) ||
+ (port->flags & OZ_PORT_F_CHANGED)) {
+ oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
+ port_ix, urb->dev->devnum);
return -EPIPE;
}
urb->hcpriv = port;
@@ -1817,14 +1852,16 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
atomic_inc(&g_pending_urbs);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb)
{
- struct oz_urb_link *urbl = NULL;
+ struct oz_urb_link *urbl;
struct list_head *e;
+
if (unlikely(ep == NULL))
return NULL;
list_for_each(e, &ep->urb_list) {
@@ -1835,27 +1872,25 @@ static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
ep->credit -= urb->number_of_packets;
if (ep->credit < 0)
ep->credit = 0;
- oz_event_log(OZ_EVT_EP_CREDIT,
- usb_pipein(urb->pipe) ?
- (ep->ep_num | USB_DIR_IN) : ep->ep_num,
- 0, NULL, ep->credit);
}
return urbl;
}
}
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* Called to dequeue a previously submitted urb for the device.
* Context: any
*/
static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- struct oz_urb_link *urbl = NULL;
+ struct oz_urb_link *urbl;
int rc;
unsigned long irq_state;
- oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_dequeue(%p)\n", jiffies, urb);
+
+ oz_dbg(URB, "%s: (%p)\n", __func__, urb);
urbl = oz_alloc_urb_link();
if (unlikely(urbl == NULL))
return -ENOMEM;
@@ -1885,31 +1920,33 @@ static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
- oz_trace("oz_hcd_endpoint_disable\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
- oz_trace("oz_hcd_endpoint_reset\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_get_frame_number\n");
+ oz_dbg(ON, "oz_hcd_get_frame_number\n");
return oz_usb_get_frame_number();
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
* This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
* always do that in softirq context.
@@ -1919,27 +1956,33 @@ static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
int i;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_status_data()\n");
buf[0] = 0;
+ buf[1] = 0;
spin_lock_bh(&ozhcd->hcd_lock);
for (i = 0; i < OZ_NB_PORTS; i++) {
if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
- oz_trace2(OZ_TRACE_HUB, "Port %d changed\n", i);
+ oz_dbg(HUB, "Port %d changed\n", i);
ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
- buf[0] |= 1<<(i+1);
+ if (i < 7)
+ buf[0] |= 1 << (i + 1);
+ else
+ buf[1] |= 1 << (i - 7);
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
- return buf[0] ? 1 : 0;
+ if (buf[0] != 0 || buf[1] != 0)
+ return 2;
+ else
+ return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static void oz_get_hub_descriptor(struct usb_hcd *hcd,
struct usb_hub_descriptor *desc)
{
- oz_trace2(OZ_TRACE_HUB, "GetHubDescriptor\n");
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
@@ -1947,7 +1990,8 @@ static void oz_get_hub_descriptor(struct usb_hcd *hcd,
__constant_cpu_to_le16(0x0001);
desc->bNbrPorts = OZ_NB_PORTS;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
@@ -1958,59 +2002,59 @@ static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned set_bits = 0;
unsigned clear_bits = 0;
- oz_trace2(OZ_TRACE_HUB, "SetPortFeature\n");
+
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
return -EPIPE;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
break;
case USB_PORT_FEAT_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
clear_bits = USB_PORT_STAT_RESET;
ozhcd->ports[port_id-1].bus_addr = 0;
break;
case USB_PORT_FEAT_POWER:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
set_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
break;
case USB_PORT_FEAT_C_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
break;
case USB_PORT_FEAT_C_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
break;
case USB_PORT_FEAT_TEST:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
break;
default:
- oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ oz_dbg(HUB, "Other %d\n", wvalue);
break;
}
if (set_bits || clear_bits) {
@@ -2019,11 +2063,11 @@ static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
port->status |= set_bits;
spin_unlock_bh(&port->port_lock);
}
- oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
- port->status);
+ oz_dbg(HUB, "Port[%d] status = 0x%x\n", port_id, port->status);
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
@@ -2033,60 +2077,60 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
u8 port_id = (u8)windex;
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned clear_bits = 0;
- oz_trace2(OZ_TRACE_HUB, "ClearPortFeature\n");
+
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
return -EPIPE;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
clear_bits = USB_PORT_STAT_ENABLE;
break;
case USB_PORT_FEAT_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
break;
case USB_PORT_FEAT_POWER:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
clear_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
clear_bits = (USB_PORT_STAT_C_CONNECTION << 16);
break;
case USB_PORT_FEAT_C_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
clear_bits = (USB_PORT_STAT_C_ENABLE << 16);
break;
case USB_PORT_FEAT_C_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
clear_bits = (USB_PORT_FEAT_C_RESET << 16);
break;
case USB_PORT_FEAT_TEST:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
break;
default:
- oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ oz_dbg(HUB, "Other %d\n", wvalue);
break;
}
if (clear_bits) {
@@ -2094,37 +2138,40 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
port->status &= ~clear_bits;
spin_unlock_bh(&port->port_lock);
}
- oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
- ozhcd->ports[port_id-1].status);
+ oz_dbg(HUB, "Port[%d] status = 0x%x\n",
+ port_id, ozhcd->ports[port_id-1].status);
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
{
struct oz_hcd *ozhcd;
- u32 status = 0;
+ u32 status;
+
if ((windex < 1) || (windex > OZ_NB_PORTS))
return -EPIPE;
ozhcd = oz_hcd_private(hcd);
- oz_trace2(OZ_TRACE_HUB, "GetPortStatus windex = %d\n", windex);
+ oz_dbg(HUB, "GetPortStatus windex = %d\n", windex);
status = ozhcd->ports[windex-1].status;
put_unaligned(cpu_to_le32(status), (__le32 *)buf);
- oz_trace2(OZ_TRACE_HUB, "Port[%d] status = %x\n", windex, status);
+ oz_dbg(HUB, "Port[%d] status = %x\n", windex, status);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
u16 windex, char *buf, u16 wlength)
{
int err = 0;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_control()\n");
+
switch (req_type) {
case ClearHubFeature:
- oz_trace2(OZ_TRACE_HUB, "ClearHubFeature: %d\n", req_type);
+ oz_dbg(HUB, "ClearHubFeature: %d\n", req_type);
break;
case ClearPortFeature:
err = oz_clear_port_feature(hcd, wvalue, windex);
@@ -2133,32 +2180,32 @@ static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
- oz_trace2(OZ_TRACE_HUB, "GetHubStatus: req_type = 0x%x\n",
- req_type);
+ oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type);
put_unaligned(__constant_cpu_to_le32(0), (__le32 *)buf);
break;
case GetPortStatus:
err = oz_get_port_status(hcd, windex, buf);
break;
case SetHubFeature:
- oz_trace2(OZ_TRACE_HUB, "SetHubFeature: %d\n", req_type);
+ oz_dbg(HUB, "SetHubFeature: %d\n", req_type);
break;
case SetPortFeature:
err = oz_set_port_feature(hcd, wvalue, windex);
break;
default:
- oz_trace2(OZ_TRACE_HUB, "Other: %d\n", req_type);
+ oz_dbg(HUB, "Other: %d\n", req_type);
break;
}
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_suspend()\n");
+
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
hcd->state = HC_STATE_SUSPENDED;
@@ -2166,13 +2213,14 @@ static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
spin_unlock_bh(&ozhcd->hcd_lock);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_hcd_bus_resume(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_resume()\n");
+
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
@@ -2180,13 +2228,12 @@ static int oz_hcd_bus_resume(struct usb_hcd *hcd)
spin_unlock_bh(&ozhcd->hcd_lock);
return 0;
}
-/*------------------------------------------------------------------------------
- */
+
static void oz_plat_shutdown(struct platform_device *dev)
{
- oz_trace("oz_plat_shutdown()\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_plat_probe(struct platform_device *dev)
@@ -2195,10 +2242,10 @@ static int oz_plat_probe(struct platform_device *dev)
int err;
struct usb_hcd *hcd;
struct oz_hcd *ozhcd;
- oz_trace("oz_plat_probe()\n");
+
hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
if (hcd == NULL) {
- oz_trace("Failed to created hcd object OK\n");
+ oz_dbg(ON, "Failed to created hcd object OK\n");
return -ENOMEM;
}
ozhcd = oz_hcd_private(hcd);
@@ -2219,7 +2266,7 @@ static int oz_plat_probe(struct platform_device *dev)
}
err = usb_add_hcd(hcd, 0, 0);
if (err) {
- oz_trace("Failed to add hcd object OK\n");
+ oz_dbg(ON, "Failed to add hcd object OK\n");
usb_put_hcd(hcd);
return -1;
}
@@ -2228,14 +2275,15 @@ static int oz_plat_probe(struct platform_device *dev)
spin_unlock_bh(&g_hcdlock);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static int oz_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct oz_hcd *ozhcd;
- oz_trace("oz_plat_remove()\n");
+
if (hcd == NULL)
return -1;
ozhcd = oz_hcd_private(hcd);
@@ -2243,42 +2291,45 @@ static int oz_plat_remove(struct platform_device *dev)
if (ozhcd == g_ozhcd)
g_ozhcd = NULL;
spin_unlock_bh(&g_hcdlock);
- oz_trace("Clearing orphanage\n");
+ oz_dbg(ON, "Clearing orphanage\n");
oz_hcd_clear_orphanage(ozhcd, -EPIPE);
- oz_trace("Removing hcd\n");
+ oz_dbg(ON, "Removing hcd\n");
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
oz_empty_link_pool();
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
{
- oz_trace("oz_plat_suspend()\n");
return 0;
}
-/*------------------------------------------------------------------------------
+
+
+/*
* Context: unknown
*/
static int oz_plat_resume(struct platform_device *dev)
{
- oz_trace("oz_plat_resume()\n");
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_hcd_init(void)
{
int err;
+
if (usb_disabled())
return -ENODEV;
tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
err = platform_driver_register(&g_oz_plat_drv);
- oz_trace("platform_driver_register() returned %d\n", err);
+ oz_dbg(ON, "platform_driver_register() returned %d\n", err);
if (err)
goto error;
g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
@@ -2286,11 +2337,11 @@ int oz_hcd_init(void)
err = -ENOMEM;
goto error1;
}
- oz_trace("platform_device_alloc() succeeded\n");
+ oz_dbg(ON, "platform_device_alloc() succeeded\n");
err = platform_device_add(g_plat_dev);
if (err)
goto error2;
- oz_trace("platform_device_add() succeeded\n");
+ oz_dbg(ON, "platform_device_add() succeeded\n");
return 0;
error2:
platform_device_put(g_plat_dev);
@@ -2299,17 +2350,19 @@ error1:
error:
tasklet_disable(&g_urb_process_tasklet);
tasklet_disable(&g_urb_cancel_tasklet);
- oz_trace("oz_hcd_init() failed %d\n", err);
+ oz_dbg(ON, "oz_hcd_init() failed %d\n", err);
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_hcd_term(void)
{
+ msleep(OZ_HUB_DEBOUNCE_TIMEOUT);
tasklet_kill(&g_urb_process_tasklet);
tasklet_kill(&g_urb_cancel_tasklet);
platform_device_unregister(g_plat_dev);
platform_driver_unregister(&g_oz_plat_drv);
- oz_trace("Pending urbs:%d\n", atomic_read(&g_pending_urbs));
+ oz_dbg(ON, "Pending urbs:%d\n", atomic_read(&g_pending_urbs));
}
diff --git a/drivers/staging/ozwpan/ozhcd.h b/drivers/staging/ozwpan/ozhcd.h
index 9b30dfd..55e97b1 100644
--- a/drivers/staging/ozwpan/ozhcd.h
+++ b/drivers/staging/ozwpan/ozhcd.h
@@ -7,8 +7,8 @@
int oz_hcd_init(void);
void oz_hcd_term(void);
-void *oz_hcd_pd_arrived(void *ctx);
-void oz_hcd_pd_departed(void *ctx);
+struct oz_port *oz_hcd_pd_arrived(void *ctx);
+void oz_hcd_pd_departed(struct oz_port *hport);
void oz_hcd_pd_reset(void *hpd, void *hport);
#endif /* _OZHCD_H */
diff --git a/drivers/staging/ozwpan/ozmain.c b/drivers/staging/ozwpan/ozmain.c
index 57a0cbd..d1a5b7a 100644
--- a/drivers/staging/ozwpan/ozmain.c
+++ b/drivers/staging/ozwpan/ozmain.c
@@ -3,6 +3,7 @@
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
@@ -10,35 +11,34 @@
#include <linux/netdevice.h>
#include <linux/errno.h>
#include <linux/ieee80211.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozcdev.h"
-#include "oztrace.h"
-#include "ozevent.h"
-/*------------------------------------------------------------------------------
+
+unsigned int oz_dbg_mask = OZ_DEFAULT_DBG_MASK;
+
+/*
* The name of the 802.11 mac device. Empty string is the default value but a
* value can be supplied as a parameter to the module. An empty string means
* bind to nothing. '*' means bind to all netcards - this includes non-802.11
* netcards. Bindings can be added later using an IOCTL.
*/
static char *g_net_dev = "";
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int __init ozwpan_init(void)
{
- oz_event_init();
oz_cdev_register();
oz_protocol_init(g_net_dev);
oz_app_enable(OZ_APPID_USB, 1);
oz_apps_init();
-#ifdef CONFIG_DEBUG_FS
- oz_debugfs_init();
-#endif
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static void __exit ozwpan_exit(void)
@@ -46,13 +46,8 @@ static void __exit ozwpan_exit(void)
oz_protocol_term();
oz_apps_term();
oz_cdev_deregister();
- oz_event_term();
-#ifdef CONFIG_DEBUG_FS
- oz_debugfs_remove();
-#endif
}
-/*------------------------------------------------------------------------------
- */
+
module_param(g_net_dev, charp, S_IRUGO);
module_init(ozwpan_init);
module_exit(ozwpan_exit);
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index f8b9da0..ab85a72 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -3,29 +3,26 @@
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
#include <linux/errno.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
-#include "oztrace.h"
-#include "ozevent.h"
#include "ozcdev.h"
#include "ozusbsvc.h"
#include <asm/unaligned.h>
#include <linux/uaccess.h>
#include <net/psnap.h>
-/*------------------------------------------------------------------------------
- */
+
#define OZ_MAX_TX_POOL_SIZE 6
-/*------------------------------------------------------------------------------
- */
+
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
@@ -40,10 +37,12 @@ static void oz_def_app_term(void);
static int oz_def_app_start(struct oz_pd *pd, int resume);
static void oz_def_app_stop(struct oz_pd *pd, int pause);
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
-/*------------------------------------------------------------------------------
+
+/*
* Counts the uncompleted isoc frames submitted to netcard.
*/
static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
+
/* Application handler functions.
*/
static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
@@ -83,70 +82,75 @@ static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
NULL,
OZ_APPID_SERIAL},
};
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_def_app_init(void)
{
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static void oz_def_app_term(void)
{
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_def_app_start(struct oz_pd *pd, int resume)
{
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_def_app_stop(struct oz_pd *pd, int pause)
{
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
{
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_set_state(struct oz_pd *pd, unsigned state)
{
pd->state = state;
- oz_event_log(OZ_EVT_PD_STATE, 0, 0, NULL, state);
-#ifdef WANT_TRACE
switch (state) {
case OZ_PD_S_IDLE:
- oz_trace("PD State: OZ_PD_S_IDLE\n");
+ oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
break;
case OZ_PD_S_CONNECTED:
- oz_trace("PD State: OZ_PD_S_CONNECTED\n");
+ oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
break;
case OZ_PD_S_STOPPED:
- oz_trace("PD State: OZ_PD_S_STOPPED\n");
+ oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
break;
case OZ_PD_S_SLEEP:
- oz_trace("PD State: OZ_PD_S_SLEEP\n");
+ oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
break;
}
-#endif /* WANT_TRACE */
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_get(struct oz_pd *pd)
{
atomic_inc(&pd->ref_count);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_put(struct oz_pd *pd)
@@ -154,12 +158,14 @@ void oz_pd_put(struct oz_pd *pd)
if (atomic_dec_and_test(&pd->ref_count))
oz_pd_destroy(pd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
{
struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
+
if (pd) {
int i;
atomic_set(&pd->ref_count, 2);
@@ -179,19 +185,34 @@ struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
pd->last_sent_frame = &pd->tx_queue;
spin_lock_init(&pd->stream_lock);
INIT_LIST_HEAD(&pd->stream_list);
+ tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
+ (unsigned long)pd);
+ tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
+ (unsigned long)pd);
+ hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pd->heartbeat.function = oz_pd_heartbeat_event;
+ pd->timeout.function = oz_pd_timeout_event;
}
return pd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
-void oz_pd_destroy(struct oz_pd *pd)
+static void oz_pd_free(struct work_struct *work)
{
struct list_head *e;
struct oz_tx_frame *f;
struct oz_isoc_stream *st;
struct oz_farewell *fwell;
- oz_trace("Destroying PD\n");
+ struct oz_pd *pd;
+
+ oz_pd_dbg(pd, ON, "Destroying PD\n");
+ pd = container_of(work, struct oz_pd, workitem);
+ /*Disable timer tasklets*/
+ tasklet_kill(&pd->heartbeat_tasklet);
+ tasklet_kill(&pd->timeout_tasklet);
/* Delete any streams.
*/
e = pd->stream_list.next;
@@ -230,20 +251,38 @@ void oz_pd_destroy(struct oz_pd *pd)
dev_put(pd->net_dev);
kfree(pd);
}
-/*------------------------------------------------------------------------------
+
+/*
+ * Context: softirq or Process
+ */
+void oz_pd_destroy(struct oz_pd *pd)
+{
+ if (hrtimer_active(&pd->timeout))
+ hrtimer_cancel(&pd->timeout);
+ if (hrtimer_active(&pd->heartbeat))
+ hrtimer_cancel(&pd->heartbeat);
+
+ INIT_WORK(&pd->workitem, oz_pd_free);
+ if (!schedule_work(&pd->workitem))
+ oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
+}
+
+/*
* Context: softirq-serialized
*/
int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
{
const struct oz_app_if *ai;
int rc = 0;
- oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
+
+ oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
if (ai->start(pd, resume)) {
rc = -1;
- oz_trace("Unabled to start service %d\n",
- ai->app_id);
+ oz_pd_dbg(pd, ON,
+ "Unable to start service %d\n",
+ ai->app_id);
break;
}
oz_polling_lock_bh();
@@ -255,13 +294,15 @@ int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
{
const struct oz_app_if *ai;
- oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
+
+ oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
oz_polling_lock_bh();
@@ -276,34 +317,38 @@ void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
}
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
{
const struct oz_app_if *ai;
int more = 0;
+
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (ai->heartbeat && (apps & (1<<ai->app_id))) {
if (ai->heartbeat(pd))
more = 1;
}
}
- if (more)
- oz_pd_request_heartbeat(pd);
+ if ((!more) && (hrtimer_active(&pd->heartbeat)))
+ hrtimer_cancel(&pd->heartbeat);
if (pd->mode & OZ_F_ISOC_ANYTIME) {
int count = 8;
while (count-- && (oz_send_isoc_frame(pd) >= 0))
;
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_stop(struct oz_pd *pd)
{
- u16 stop_apps = 0;
- oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
+ u16 stop_apps;
+
+ oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
oz_pd_indicate_farewells(pd);
oz_polling_lock_bh();
stop_apps = pd->total_apps;
@@ -316,46 +361,46 @@ void oz_pd_stop(struct oz_pd *pd)
/* Remove from PD list.*/
list_del(&pd->link);
oz_polling_unlock_bh();
- oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
- oz_timer_delete(pd, 0);
+ oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
oz_pd_put(pd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_pd_sleep(struct oz_pd *pd)
{
int do_stop = 0;
- u16 stop_apps = 0;
+ u16 stop_apps;
+
oz_polling_lock_bh();
if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
oz_polling_unlock_bh();
return 0;
}
- if (pd->keep_alive_j && pd->session_id) {
+ if (pd->keep_alive && pd->session_id)
oz_pd_set_state(pd, OZ_PD_S_SLEEP);
- pd->pulse_time_j = jiffies + pd->keep_alive_j;
- oz_trace("Sleep Now %lu until %lu\n",
- jiffies, pd->pulse_time_j);
- } else {
+ else
do_stop = 1;
- }
+
stop_apps = pd->total_apps;
oz_polling_unlock_bh();
if (do_stop) {
oz_pd_stop(pd);
} else {
oz_services_stop(pd, stop_apps, 1);
- oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
+ oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
}
return do_stop;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
{
struct oz_tx_frame *f = NULL;
+
spin_lock_bh(&pd->tx_frame_lock);
if (pd->tx_pool) {
f = container_of(pd->tx_pool, struct oz_tx_frame, link);
@@ -372,7 +417,8 @@ static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
}
return f;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
@@ -386,10 +432,11 @@ static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
} else {
kfree(f);
}
- oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
- pd->nb_queued_isoc_frames);
+ oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
+ pd->nb_queued_isoc_frames);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
@@ -404,28 +451,34 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
spin_unlock_bh(&pd->tx_frame_lock);
kfree(f);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_set_more_bit(struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
+
oz_hdr->control |= OZ_F_MORE_DATA;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
+
oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_prepare_frame(struct oz_pd *pd, int empty)
{
struct oz_tx_frame *f;
+
if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
return -1;
if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
@@ -450,7 +503,8 @@ int oz_prepare_frame(struct oz_pd *pd, int empty)
spin_unlock(&pd->tx_frame_lock);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
@@ -460,6 +514,7 @@ static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
struct list_head *e;
+
/* Allocate skb with enough space for the lower layers as well
* as the space we need.
*/
@@ -494,13 +549,15 @@ fail:
kfree_skb(skb);
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
{
struct list_head *e;
struct oz_elt_info *ei;
+
e = f->elt_list.next;
while (e != &f->elt_list) {
ei = container_of(e, struct oz_elt_info, link);
@@ -516,7 +573,8 @@ static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
oz_trim_elt_pool(&pd->elt_buff);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
@@ -524,6 +582,7 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
struct sk_buff *skb;
struct oz_tx_frame *f;
struct list_head *e;
+
spin_lock(&pd->tx_frame_lock);
e = pd->last_sent_frame->next;
if (e == &pd->tx_queue) {
@@ -542,20 +601,16 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
if ((int)atomic_read(&g_submitted_isoc) <
OZ_MAX_SUBMITTED_ISOC) {
if (dev_queue_xmit(skb) < 0) {
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Dropping ISOC Frame\n");
- oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
+ oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
return -1;
}
atomic_inc(&g_submitted_isoc);
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Sending ISOC Frame, nb_isoc= %d\n",
- pd->nb_queued_isoc_frames);
+ oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
+ pd->nb_queued_isoc_frames);
return 0;
} else {
kfree_skb(skb);
- oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
- oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
+ oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
return -1;
}
}
@@ -563,21 +618,18 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
pd->last_sent_frame = e;
skb = oz_build_frame(pd, f);
spin_unlock(&pd->tx_frame_lock);
+ if (!skb)
+ return -1;
if (more_data)
oz_set_more_bit(skb);
- oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
- if (skb) {
- oz_event_log(OZ_EVT_TX_FRAME,
- 0,
- (((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
- NULL, f->hdr.pkt_num);
- if (dev_queue_xmit(skb) < 0)
- return -1;
+ oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
+ if (dev_queue_xmit(skb) < 0)
+ return -1;
- }
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_send_queued_frames(struct oz_pd *pd, int backlog)
@@ -615,7 +667,8 @@ void oz_send_queued_frames(struct oz_pd *pd, int backlog)
out: oz_prepare_frame(pd, 1);
oz_send_next_queued_frame(pd, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_send_isoc_frame(struct oz_pd *pd)
@@ -627,6 +680,7 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
struct list_head *e;
struct list_head list;
int total_size = sizeof(struct oz_hdr);
+
INIT_LIST_HEAD(&list);
oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
@@ -635,7 +689,7 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
return 0;
skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL) {
- oz_trace("Cannot alloc skb\n");
+ oz_dbg(ON, "Cannot alloc skb\n");
oz_elt_info_free_chain(&pd->elt_buff, &list);
return -1;
}
@@ -659,12 +713,12 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
memcpy(elt, ei->data, ei->length);
elt = oz_next_elt(elt);
}
- oz_event_log(OZ_EVT_TX_ISOC, 0, 0, NULL, 0);
dev_queue_xmit(skb);
oz_elt_info_free_chain(&pd->elt_buff, &list);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
@@ -684,8 +738,8 @@ void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
break;
- oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
- pkt_num, pd->nb_queued_frames);
+ oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
+ pkt_num, pd->nb_queued_frames);
if (first == NULL)
first = e;
last = e;
@@ -705,7 +759,8 @@ void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
oz_retire_frame(pd, f);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Precondition: stream_lock must be held.
* Context: softirq
*/
@@ -713,6 +768,7 @@ static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
{
struct list_head *e;
struct oz_isoc_stream *st;
+
list_for_each(e, &pd->stream_list) {
st = container_of(e, struct oz_isoc_stream, link);
if (st->ep_num == ep_num)
@@ -720,7 +776,8 @@ static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
}
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
@@ -739,7 +796,8 @@ int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
kfree(st);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_isoc_stream_free(struct oz_isoc_stream *st)
@@ -747,12 +805,14 @@ static void oz_isoc_stream_free(struct oz_isoc_stream *st)
kfree_skb(st->skb);
kfree(st);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
{
struct oz_isoc_stream *st;
+
spin_lock_bh(&pd->stream_lock);
st = pd_stream_find(pd, ep_num);
if (st)
@@ -762,16 +822,16 @@ int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
oz_isoc_stream_free(st);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: any
*/
static void oz_isoc_destructor(struct sk_buff *skb)
{
atomic_dec(&g_submitted_isoc);
- oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
- 0, skb, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
@@ -782,6 +842,7 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
struct sk_buff *skb = NULL;
struct oz_hdr *oz_hdr = NULL;
int size = 0;
+
spin_lock_bh(&pd->stream_lock);
st = pd_stream_find(pd, ep_num);
if (st) {
@@ -846,10 +907,20 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
struct oz_tx_frame *isoc_unit = NULL;
int nb = pd->nb_queued_isoc_frames;
if (nb >= pd->isoc_latency) {
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Dropping ISOC Unit nb= %d\n",
- nb);
- goto out;
+ struct list_head *e;
+ struct oz_tx_frame *f;
+ oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
+ nb);
+ spin_lock(&pd->tx_frame_lock);
+ list_for_each(e, &pd->tx_queue) {
+ f = container_of(e, struct oz_tx_frame,
+ link);
+ if (f->skb != NULL) {
+ oz_tx_isoc_free(pd, f);
+ break;
+ }
+ }
+ spin_unlock(&pd->tx_frame_lock);
}
isoc_unit = oz_tx_frame_alloc(pd);
if (isoc_unit == NULL)
@@ -860,72 +931,74 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
list_add_tail(&isoc_unit->link, &pd->tx_queue);
pd->nb_queued_isoc_frames++;
spin_unlock_bh(&pd->tx_frame_lock);
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
- pd->nb_queued_isoc_frames, pd->nb_queued_frames);
- oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
- skb, atomic_read(&g_submitted_isoc));
+ oz_dbg(TX_FRAMES,
+ "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
+ pd->nb_queued_isoc_frames, pd->nb_queued_frames);
return 0;
}
/*In ANYTIME mode Xmit unit immediately*/
if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
atomic_inc(&g_submitted_isoc);
- oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
- skb, atomic_read(&g_submitted_isoc));
- if (dev_queue_xmit(skb) < 0) {
- oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
+ if (dev_queue_xmit(skb) < 0)
return -1;
- } else
+ else
return 0;
}
-out: oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
- kfree_skb(skb);
+out: kfree_skb(skb);
return -1;
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_apps_init(void)
{
int i;
+
for (i = 0; i < OZ_APPID_MAX; i++)
if (g_app_if[i].init)
g_app_if[i].init();
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_apps_term(void)
{
int i;
+
/* Terminate all the apps. */
for (i = 0; i < OZ_APPID_MAX; i++)
if (g_app_if[i].term)
g_app_if[i].term();
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
{
const struct oz_app_if *ai;
+
if (app_id == 0 || app_id > OZ_APPID_MAX)
return;
ai = &g_app_if[app_id-1];
ai->rx(pd, elt);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_indicate_farewells(struct oz_pd *pd)
{
struct oz_farewell *f;
const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
+
while (1) {
oz_polling_lock_bh();
if (list_empty(&pd->farewell_list)) {
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
index fbf47cb..12c7129 100644
--- a/drivers/staging/ozwpan/ozpd.h
+++ b/drivers/staging/ozwpan/ozpd.h
@@ -6,6 +6,7 @@
#ifndef _OZPD_H_
#define _OZPD_H_
+#include <linux/interrupt.h>
#include "ozeltbuf.h"
/* PD state
@@ -47,8 +48,8 @@ struct oz_farewell {
struct list_head link;
u8 ep_num;
u8 index;
- u8 report[1];
u8 len;
+ u8 report[0];
};
/* Data structure that holds information on a specific peripheral device (PD).
@@ -68,18 +69,16 @@ struct oz_pd {
u8 isoc_sent;
u32 last_rx_pkt_num;
u32 last_tx_pkt_num;
+ struct timespec last_rx_timestamp;
u32 trigger_pkt_num;
- unsigned long pulse_time_j;
- unsigned long timeout_time_j;
- unsigned long pulse_period_j;
- unsigned long presleep_j;
- unsigned long keep_alive_j;
- unsigned long last_rx_time_j;
+ unsigned long pulse_time;
+ unsigned long pulse_period;
+ unsigned long presleep;
+ unsigned long keep_alive;
struct oz_elt_buf elt_buff;
void *app_ctx[OZ_APPID_MAX];
spinlock_t app_lock[OZ_APPID_MAX];
int max_tx_size;
- u8 heartbeat_requested;
u8 mode;
u8 ms_per_isoc;
unsigned isoc_latency;
@@ -95,6 +94,12 @@ struct oz_pd {
spinlock_t stream_lock;
struct list_head stream_list;
struct net_device *net_dev;
+ struct hrtimer heartbeat;
+ struct hrtimer timeout;
+ u8 timeout_type;
+ struct tasklet_struct heartbeat_tasklet;
+ struct tasklet_struct timeout_tasklet;
+ struct work_struct workitem;
};
#define OZ_MAX_QUEUED_FRAMES 4
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index 3badf15..88714ec 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -3,6 +3,7 @@
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
@@ -10,69 +11,45 @@
#include <linux/netdevice.h>
#include <linux/errno.h>
#include <linux/ieee80211.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozusbsvc.h"
-#include "oztrace.h"
+
#include "ozappif.h"
-#include "ozevent.h"
#include <asm/unaligned.h>
#include <linux/uaccess.h>
#include <net/psnap.h>
-/*------------------------------------------------------------------------------
- */
+
#define OZ_CF_CONN_SUCCESS 1
#define OZ_CF_CONN_FAILURE 2
#define OZ_DO_STOP 1
#define OZ_DO_SLEEP 2
-/* States of the timer.
- */
-#define OZ_TIMER_IDLE 0
-#define OZ_TIMER_SET 1
-#define OZ_TIMER_IN_HANDLER 2
-
#define OZ_MAX_TIMER_POOL_SIZE 16
-/*------------------------------------------------------------------------------
- */
struct oz_binding {
struct packet_type ptype;
char name[OZ_MAX_BINDING_LEN];
- struct oz_binding *next;
-};
-
-struct oz_timer {
struct list_head link;
- struct oz_pd *pd;
- unsigned long due_time;
- int type;
};
-/*------------------------------------------------------------------------------
+
+/*
* Static external variables.
*/
static DEFINE_SPINLOCK(g_polling_lock);
static LIST_HEAD(g_pd_list);
-static struct oz_binding *g_binding ;
+static LIST_HEAD(g_binding);
static DEFINE_SPINLOCK(g_binding_lock);
static struct sk_buff_head g_rx_queue;
static u8 g_session_id;
static u16 g_apps = 0x1;
static int g_processing_rx;
-static struct timer_list g_timer;
-static struct oz_timer *g_cur_timer;
-static struct list_head *g_timer_pool;
-static int g_timer_pool_count;
-static int g_timer_state = OZ_TIMER_IDLE;
-static LIST_HEAD(g_timer_list);
-/*------------------------------------------------------------------------------
- */
-static void oz_protocol_timer_start(void);
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static u8 oz_get_new_session_id(u8 exclude)
@@ -85,7 +62,8 @@ static u8 oz_get_new_session_id(u8 exclude)
}
return g_session_id;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
@@ -95,6 +73,7 @@ static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
struct oz_elt_connect_rsp *body;
+
int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
sizeof(struct oz_elt_connect_rsp);
skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
@@ -116,7 +95,6 @@ static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT);
oz_hdr->last_pkt_num = 0;
put_unaligned(0, &oz_hdr->pkt_num);
- oz_event_log(OZ_EVT_CONNECT_RSP, 0, 0, NULL, 0);
elt->type = OZ_ELT_CONNECT_RSP;
elt->length = sizeof(struct oz_elt_connect_rsp);
memset(body, 0, sizeof(struct oz_elt_connect_rsp));
@@ -126,11 +104,12 @@ static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
body->session_id = pd->session_id;
put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
}
- oz_trace("TX: OZ_ELT_CONNECT_RSP %d", status);
+ oz_dbg(ON, "TX: OZ_ELT_CONNECT_RSP %d", status);
dev_queue_xmit(skb);
return;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
@@ -139,35 +118,41 @@ static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
switch (kalive & OZ_KALIVE_TYPE_MASK) {
case OZ_KALIVE_SPECIAL:
- pd->keep_alive_j =
- oz_ms_to_jiffies(keep_alive * 1000*60*60*24*20);
+ pd->keep_alive = keep_alive * 1000*60*60*24*20;
break;
case OZ_KALIVE_SECS:
- pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000);
+ pd->keep_alive = keep_alive*1000;
break;
case OZ_KALIVE_MINS:
- pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60);
+ pd->keep_alive = keep_alive*1000*60;
break;
case OZ_KALIVE_HOURS:
- pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60*60);
+ pd->keep_alive = keep_alive*1000*60*60;
break;
default:
- pd->keep_alive_j = 0;
+ pd->keep_alive = 0;
}
- oz_trace("Keepalive = %lu jiffies\n", pd->keep_alive_j);
+ oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
-static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
+static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer)
{
if (presleep)
- pd->presleep_j = oz_ms_to_jiffies(presleep*100);
+ pd->presleep = presleep*100;
else
- pd->presleep_j = OZ_PRESLEEP_TOUT_J;
- oz_trace("Presleep time = %lu jiffies\n", pd->presleep_j);
+ pd->presleep = OZ_PRESLEEP_TOUT;
+ if (start_timer) {
+ spin_unlock(&g_polling_lock);
+ oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
+ spin_lock(&g_polling_lock);
+ }
+ oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
@@ -181,6 +166,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
u16 new_apps = g_apps;
struct net_device *old_net_dev = NULL;
struct oz_pd *free_pd = NULL;
+
if (cur_pd) {
pd = cur_pd;
spin_lock_bh(&g_polling_lock);
@@ -190,7 +176,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
pd = oz_pd_alloc(pd_addr);
if (pd == NULL)
return NULL;
- pd->last_rx_time_j = jiffies;
+ getnstimeofday(&pd->last_rx_timestamp);
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd2 = container_of(e, struct oz_pd, link);
@@ -212,7 +198,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
dev_hold(net_dev);
pd->net_dev = net_dev;
}
- oz_trace("Host vendor: %d\n", body->host_vendor);
+ oz_dbg(ON, "Host vendor: %d\n", body->host_vendor);
pd->max_tx_size = OZ_MAX_TX_SIZE;
pd->mode = body->mode;
pd->pd_info = body->pd_info;
@@ -236,12 +222,11 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
}
if (body->max_len_div16)
pd->max_tx_size = ((u16)body->max_len_div16)<<4;
- oz_trace("Max frame:%u Ms per isoc:%u\n",
- pd->max_tx_size, pd->ms_per_isoc);
+ oz_dbg(ON, "Max frame:%u Ms per isoc:%u\n",
+ pd->max_tx_size, pd->ms_per_isoc);
pd->max_stream_buffering = 3*1024;
- pd->timeout_time_j = jiffies + OZ_CONNECTION_TOUT_J;
- pd->pulse_period_j = OZ_QUANTUM_J;
- pd_set_presleep(pd, body->presleep);
+ pd->pulse_period = OZ_QUANTUM;
+ pd_set_presleep(pd, body->presleep, 0);
pd_set_keepalive(pd, body->keep_alive);
new_apps &= le16_to_cpu(get_unaligned(&body->apps));
@@ -273,9 +258,8 @@ done:
u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
spin_unlock_bh(&g_polling_lock);
oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
- oz_timer_delete(pd, OZ_TIMER_STOP);
- oz_trace("new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
- new_apps, pd->total_apps, pd->paused_apps);
+ oz_dbg(ON, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
+ new_apps, pd->total_apps, pd->paused_apps);
if (start_apps) {
if (oz_services_start(pd, start_apps, 0))
rsp_status = OZ_STATUS_TOO_MANY_PDS;
@@ -302,7 +286,8 @@ done:
oz_pd_destroy(free_pd);
return pd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
@@ -311,13 +296,15 @@ static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
struct oz_farewell *f;
struct oz_farewell *f2;
int found = 0;
- f = kmalloc(sizeof(struct oz_farewell) + len - 1, GFP_ATOMIC);
+
+ f = kmalloc(sizeof(struct oz_farewell) + len, GFP_ATOMIC);
if (!f)
return;
f->ep_num = ep_num;
f->index = index;
+ f->len = len;
memcpy(f->report, report, len);
- oz_trace("RX: Adding farewell report\n");
+ oz_dbg(ON, "RX: Adding farewell report\n");
spin_lock(&g_polling_lock);
list_for_each_entry(f2, &pd->farewell_list, link) {
if ((f2->ep_num == ep_num) && (f2->index == index)) {
@@ -331,7 +318,8 @@ static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
if (found)
kfree(f2);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_rx_frame(struct sk_buff *skb)
@@ -342,23 +330,20 @@ static void oz_rx_frame(struct sk_buff *skb)
int length;
struct oz_pd *pd = NULL;
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
+ struct timespec current_time;
int dup = 0;
u32 pkt_num;
- oz_event_log(OZ_EVT_RX_PROCESS, 0,
- (((u16)oz_hdr->control)<<8)|oz_hdr->last_pkt_num,
- NULL, oz_hdr->pkt_num);
- oz_trace2(OZ_TRACE_RX_FRAMES,
- "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
- oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
+ oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
+ oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
mac_hdr = skb_mac_header(skb);
src_addr = &mac_hdr[ETH_ALEN] ;
length = skb->len;
/* Check the version field */
if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
- oz_trace("Incorrect protocol version: %d\n",
- oz_get_prot_ver(oz_hdr->control));
+ oz_dbg(ON, "Incorrect protocol version: %d\n",
+ oz_get_prot_ver(oz_hdr->control));
goto done;
}
@@ -366,19 +351,24 @@ static void oz_rx_frame(struct sk_buff *skb)
pd = oz_pd_find(src_addr);
if (pd) {
- pd->last_rx_time_j = jiffies;
- oz_timer_add(pd, OZ_TIMER_TOUT,
- pd->last_rx_time_j + pd->presleep_j, 1);
+ if (!(pd->state & OZ_PD_S_CONNECTED))
+ oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
+ getnstimeofday(&current_time);
+ if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) ||
+ (pd->presleep < MSEC_PER_SEC)) {
+ oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
+ pd->last_rx_timestamp = current_time;
+ }
if (pkt_num != pd->last_rx_pkt_num) {
pd->last_rx_pkt_num = pkt_num;
} else {
dup = 1;
- oz_trace("Duplicate frame\n");
+ oz_dbg(ON, "Duplicate frame\n");
}
}
if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
- oz_trace2(OZ_TRACE_RX_FRAMES, "Received TRIGGER Frame\n");
+ oz_dbg(RX_FRAMES, "Received TRIGGER Frame\n");
pd->last_sent_frame = &pd->tx_queue;
if (oz_hdr->control & OZ_F_ACK) {
/* Retire completed frames */
@@ -402,23 +392,22 @@ static void oz_rx_frame(struct sk_buff *skb)
break;
switch (elt->type) {
case OZ_ELT_CONNECT_REQ:
- oz_event_log(OZ_EVT_CONNECT_REQ, 0, 0, NULL, 0);
- oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
+ oz_dbg(ON, "RX: OZ_ELT_CONNECT_REQ\n");
pd = oz_connect_req(pd, elt, src_addr, skb->dev);
break;
case OZ_ELT_DISCONNECT:
- oz_trace("RX: OZ_ELT_DISCONNECT\n");
+ oz_dbg(ON, "RX: OZ_ELT_DISCONNECT\n");
if (pd)
oz_pd_sleep(pd);
break;
case OZ_ELT_UPDATE_PARAM_REQ: {
struct oz_elt_update_param *body =
(struct oz_elt_update_param *)(elt + 1);
- oz_trace("RX: OZ_ELT_UPDATE_PARAM_REQ\n");
+ oz_dbg(ON, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
spin_lock(&g_polling_lock);
pd_set_keepalive(pd, body->keepalive);
- pd_set_presleep(pd, body->presleep);
+ pd_set_presleep(pd, body->presleep, 1);
spin_unlock(&g_polling_lock);
}
}
@@ -426,7 +415,7 @@ static void oz_rx_frame(struct sk_buff *skb)
case OZ_ELT_FAREWELL_REQ: {
struct oz_elt_farewell *body =
(struct oz_elt_farewell *)(elt + 1);
- oz_trace("RX: OZ_ELT_FAREWELL_REQ\n");
+ oz_dbg(ON, "RX: OZ_ELT_FAREWELL_REQ\n");
oz_add_farewell(pd, body->ep_num,
body->index, body->report,
elt->length + 1 - sizeof(*body));
@@ -442,7 +431,7 @@ static void oz_rx_frame(struct sk_buff *skb)
}
break;
default:
- oz_trace("RX: Unknown elt %02x\n", elt->type);
+ oz_dbg(ON, "RX: Unknown elt %02x\n", elt->type);
}
elt = oz_next_elt(elt);
}
@@ -451,19 +440,19 @@ done:
oz_pd_put(pd);
consume_skb(skb);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_protocol_term(void)
{
- struct list_head *chain;
- del_timer_sync(&g_timer);
+ struct oz_binding *b, *t;
+
/* Walk the list of bindings and remove each one.
*/
spin_lock_bh(&g_binding_lock);
- while (g_binding) {
- struct oz_binding *b = g_binding;
- g_binding = b->next;
+ list_for_each_entry_safe(b, t, &g_binding, link) {
+ list_del(&b->link);
spin_unlock_bh(&g_binding_lock);
dev_remove_pack(&b->ptype);
if (b->ptype.dev)
@@ -486,21 +475,38 @@ void oz_protocol_term(void)
oz_pd_put(pd);
spin_lock_bh(&g_polling_lock);
}
- chain = g_timer_pool;
- g_timer_pool = NULL;
spin_unlock_bh(&g_polling_lock);
- while (chain) {
- struct oz_timer *t = container_of(chain, struct oz_timer, link);
- chain = chain->next;
- kfree(t);
- }
- oz_trace("Protocol stopped\n");
+ oz_dbg(ON, "Protocol stopped\n");
+}
+
+/*
+ * Context: softirq
+ */
+void oz_pd_heartbeat_handler(unsigned long data)
+{
+ struct oz_pd *pd = (struct oz_pd *)data;
+ u16 apps = 0;
+
+ spin_lock_bh(&g_polling_lock);
+ if (pd->state & OZ_PD_S_CONNECTED)
+ apps = pd->total_apps;
+ spin_unlock_bh(&g_polling_lock);
+ if (apps)
+ oz_pd_heartbeat(pd, apps);
+ oz_pd_put(pd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
-static void oz_pd_handle_timer(struct oz_pd *pd, int type)
+void oz_pd_timeout_handler(unsigned long data)
{
+ int type;
+ struct oz_pd *pd = (struct oz_pd *)data;
+
+ spin_lock_bh(&g_polling_lock);
+ type = pd->timeout_type;
+ spin_unlock_bh(&g_polling_lock);
switch (type) {
case OZ_TIMER_TOUT:
oz_pd_sleep(pd);
@@ -508,236 +514,86 @@ static void oz_pd_handle_timer(struct oz_pd *pd, int type)
case OZ_TIMER_STOP:
oz_pd_stop(pd);
break;
- case OZ_TIMER_HEARTBEAT: {
- u16 apps = 0;
- spin_lock_bh(&g_polling_lock);
- pd->heartbeat_requested = 0;
- if (pd->state & OZ_PD_S_CONNECTED)
- apps = pd->total_apps;
- spin_unlock_bh(&g_polling_lock);
- if (apps)
- oz_pd_heartbeat(pd, apps);
- }
- break;
}
+ oz_pd_put(pd);
}
-/*------------------------------------------------------------------------------
- * Context: softirq
+
+/*
+ * Context: Interrupt
*/
-static void oz_protocol_timer(unsigned long arg)
+enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer)
{
- struct oz_timer *t;
- struct oz_timer *t2;
struct oz_pd *pd;
- spin_lock_bh(&g_polling_lock);
- if (!g_cur_timer) {
- /* This happens if we remove the current timer but can't stop
- * the timer from firing. In this case just get out.
- */
- oz_event_log(OZ_EVT_TIMER, 0, 0, NULL, 0);
- spin_unlock_bh(&g_polling_lock);
- return;
- }
- g_timer_state = OZ_TIMER_IN_HANDLER;
- t = g_cur_timer;
- g_cur_timer = NULL;
- list_del(&t->link);
- spin_unlock_bh(&g_polling_lock);
- do {
- pd = t->pd;
- oz_event_log(OZ_EVT_TIMER, 0, t->type, NULL, 0);
- oz_pd_handle_timer(pd, t->type);
- spin_lock_bh(&g_polling_lock);
- if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
- t->link.next = g_timer_pool;
- g_timer_pool = &t->link;
- g_timer_pool_count++;
- t = NULL;
- }
- if (!list_empty(&g_timer_list)) {
- t2 = container_of(g_timer_list.next,
- struct oz_timer, link);
- if (time_before_eq(t2->due_time, jiffies))
- list_del(&t2->link);
- else
- t2 = NULL;
- } else {
- t2 = NULL;
- }
- spin_unlock_bh(&g_polling_lock);
- oz_pd_put(pd);
- kfree(t);
- t = t2;
- } while (t);
- g_timer_state = OZ_TIMER_IDLE;
- oz_protocol_timer_start();
+
+ pd = container_of(timer, struct oz_pd, heartbeat);
+ hrtimer_forward_now(timer, ktime_set(pd->pulse_period /
+ MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC));
+ oz_pd_get(pd);
+ tasklet_schedule(&pd->heartbeat_tasklet);
+ return HRTIMER_RESTART;
}
-/*------------------------------------------------------------------------------
- * Context: softirq
+
+/*
+ * Context: Interrupt
*/
-static void oz_protocol_timer_start(void)
+enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer)
{
- spin_lock_bh(&g_polling_lock);
- if (!list_empty(&g_timer_list)) {
- g_cur_timer =
- container_of(g_timer_list.next, struct oz_timer, link);
- if (g_timer_state == OZ_TIMER_SET) {
- oz_event_log(OZ_EVT_TIMER_CTRL, 3,
- (u16)g_cur_timer->type, NULL,
- (unsigned)g_cur_timer->due_time);
- mod_timer(&g_timer, g_cur_timer->due_time);
- } else {
- oz_event_log(OZ_EVT_TIMER_CTRL, 4,
- (u16)g_cur_timer->type, NULL,
- (unsigned)g_cur_timer->due_time);
- g_timer.expires = g_cur_timer->due_time;
- g_timer.function = oz_protocol_timer;
- g_timer.data = 0;
- add_timer(&g_timer);
- }
- g_timer_state = OZ_TIMER_SET;
- } else {
- oz_trace("No queued timers\n");
- }
- spin_unlock_bh(&g_polling_lock);
+ struct oz_pd *pd;
+
+ pd = container_of(timer, struct oz_pd, timeout);
+ oz_pd_get(pd);
+ tasklet_schedule(&pd->timeout_tasklet);
+ return HRTIMER_NORESTART;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
- int remove)
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time)
{
- struct list_head *e;
- struct oz_timer *t = NULL;
- int restart_needed = 0;
- oz_event_log(OZ_EVT_TIMER_CTRL, 1, (u16)type, NULL, (unsigned)due_time);
- spin_lock(&g_polling_lock);
- if (remove) {
- list_for_each(e, &g_timer_list) {
- t = container_of(e, struct oz_timer, link);
- if ((t->pd == pd) && (t->type == type)) {
- if (g_cur_timer == t) {
- restart_needed = 1;
- g_cur_timer = NULL;
- }
- list_del(e);
- break;
- }
- t = NULL;
- }
- }
- if (!t) {
- if (g_timer_pool) {
- t = container_of(g_timer_pool, struct oz_timer, link);
- g_timer_pool = g_timer_pool->next;
- g_timer_pool_count--;
+ spin_lock_bh(&g_polling_lock);
+ switch (type) {
+ case OZ_TIMER_TOUT:
+ case OZ_TIMER_STOP:
+ if (hrtimer_active(&pd->timeout)) {
+ hrtimer_set_expires(&pd->timeout, ktime_set(due_time /
+ MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
+ NSEC_PER_MSEC));
+ hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL);
} else {
- t = kmalloc(sizeof(struct oz_timer), GFP_ATOMIC);
- }
- if (t) {
- t->pd = pd;
- t->type = type;
- oz_pd_get(pd);
- }
- }
- if (t) {
- struct oz_timer *t2;
- t->due_time = due_time;
- list_for_each(e, &g_timer_list) {
- t2 = container_of(e, struct oz_timer, link);
- if (time_before(due_time, t2->due_time)) {
- if (t2 == g_cur_timer) {
- g_cur_timer = NULL;
- restart_needed = 1;
- }
- break;
- }
+ hrtimer_start(&pd->timeout, ktime_set(due_time /
+ MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
+ NSEC_PER_MSEC), HRTIMER_MODE_REL);
}
- list_add_tail(&t->link, e);
- }
- if (g_timer_state == OZ_TIMER_IDLE)
- restart_needed = 1;
- else if (g_timer_state == OZ_TIMER_IN_HANDLER)
- restart_needed = 0;
- spin_unlock(&g_polling_lock);
- if (restart_needed)
- oz_protocol_timer_start();
-}
-/*------------------------------------------------------------------------------
- * Context: softirq or process
- */
-void oz_timer_delete(struct oz_pd *pd, int type)
-{
- struct list_head *chain = NULL;
- struct oz_timer *t;
- struct oz_timer *n;
- int restart_needed = 0;
- int release = 0;
- oz_event_log(OZ_EVT_TIMER_CTRL, 2, (u16)type, NULL, 0);
- spin_lock(&g_polling_lock);
- list_for_each_entry_safe(t, n, &g_timer_list, link) {
- if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
- if (g_cur_timer == t) {
- restart_needed = 1;
- g_cur_timer = NULL;
- del_timer(&g_timer);
- }
- list_del(&t->link);
- release++;
- if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
- t->link.next = g_timer_pool;
- g_timer_pool = &t->link;
- g_timer_pool_count++;
- } else {
- t->link.next = chain;
- chain = &t->link;
- }
- if (type)
- break;
- }
- }
- if (g_timer_state == OZ_TIMER_IN_HANDLER)
- restart_needed = 0;
- else if (restart_needed)
- g_timer_state = OZ_TIMER_IDLE;
- spin_unlock(&g_polling_lock);
- if (restart_needed)
- oz_protocol_timer_start();
- while (release--)
- oz_pd_put(pd);
- while (chain) {
- t = container_of(chain, struct oz_timer, link);
- chain = chain->next;
- kfree(t);
+ pd->timeout_type = type;
+ break;
+ case OZ_TIMER_HEARTBEAT:
+ if (!hrtimer_active(&pd->heartbeat))
+ hrtimer_start(&pd->heartbeat, ktime_set(due_time /
+ MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
+ NSEC_PER_MSEC), HRTIMER_MODE_REL);
+ break;
}
+ spin_unlock_bh(&g_polling_lock);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_request_heartbeat(struct oz_pd *pd)
{
- unsigned long now = jiffies;
- unsigned long t;
- spin_lock(&g_polling_lock);
- if (pd->heartbeat_requested) {
- spin_unlock(&g_polling_lock);
- return;
- }
- if (pd->pulse_period_j)
- t = ((now / pd->pulse_period_j) + 1) * pd->pulse_period_j;
- else
- t = now + 1;
- pd->heartbeat_requested = 1;
- spin_unlock(&g_polling_lock);
- oz_timer_add(pd, OZ_TIMER_HEARTBEAT, t, 0);
+ oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ?
+ pd->pulse_period : OZ_QUANTUM);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
struct oz_pd *oz_pd_find(const u8 *mac_addr)
{
struct oz_pd *pd;
struct list_head *e;
+
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd = container_of(e, struct oz_pd, link);
@@ -750,7 +606,8 @@ struct oz_pd *oz_pd_find(const u8 *mac_addr)
spin_unlock_bh(&g_polling_lock);
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_app_enable(int app_id, int enable)
@@ -764,13 +621,13 @@ void oz_app_enable(int app_id, int enable)
spin_unlock_bh(&g_polling_lock);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- oz_event_log(OZ_EVT_RX_FRAME, 0, 0, NULL, 0);
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb == NULL)
return 0;
@@ -799,10 +656,11 @@ static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
-void oz_binding_add(char *net_dev)
+void oz_binding_add(const char *net_dev)
{
struct oz_binding *binding;
@@ -812,43 +670,28 @@ void oz_binding_add(char *net_dev)
binding->ptype.func = oz_pkt_recv;
memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
if (net_dev && *net_dev) {
- oz_trace("Adding binding: %s\n", net_dev);
+ oz_dbg(ON, "Adding binding: %s\n", net_dev);
binding->ptype.dev =
dev_get_by_name(&init_net, net_dev);
if (binding->ptype.dev == NULL) {
- oz_trace("Netdev %s not found\n", net_dev);
+ oz_dbg(ON, "Netdev %s not found\n", net_dev);
kfree(binding);
binding = NULL;
}
} else {
- oz_trace("Binding to all netcards\n");
+ oz_dbg(ON, "Binding to all netcards\n");
binding->ptype.dev = NULL;
}
if (binding) {
dev_add_pack(&binding->ptype);
spin_lock_bh(&g_binding_lock);
- binding->next = g_binding;
- g_binding = binding;
+ list_add_tail(&binding->link, &g_binding);
spin_unlock_bh(&g_binding_lock);
}
}
}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-static int compare_binding_name(char *s1, char *s2)
-{
- int i;
- for (i = 0; i < OZ_MAX_BINDING_LEN; i++) {
- if (*s1 != *s2)
- return 0;
- if (!*s1++)
- return 1;
- s2++;
- }
- return 1;
-}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static void pd_stop_all_for_device(struct net_device *net_dev)
@@ -856,6 +699,7 @@ static void pd_stop_all_for_device(struct net_device *net_dev)
struct list_head h;
struct oz_pd *pd;
struct oz_pd *n;
+
INIT_LIST_HEAD(&h);
spin_lock_bh(&g_polling_lock);
list_for_each_entry_safe(pd, n, &g_pd_list, link) {
@@ -871,38 +715,37 @@ static void pd_stop_all_for_device(struct net_device *net_dev)
oz_pd_put(pd);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
-void oz_binding_remove(char *net_dev)
+void oz_binding_remove(const char *net_dev)
{
struct oz_binding *binding;
- struct oz_binding **link;
- oz_trace("Removing binding: %s\n", net_dev);
+ int found = 0;
+
+ oz_dbg(ON, "Removing binding: %s\n", net_dev);
spin_lock_bh(&g_binding_lock);
- binding = g_binding;
- link = &g_binding;
- while (binding) {
- if (compare_binding_name(binding->name, net_dev)) {
- oz_trace("Binding '%s' found\n", net_dev);
- *link = binding->next;
+ list_for_each_entry(binding, &g_binding, link) {
+ if (strncmp(binding->name, net_dev, OZ_MAX_BINDING_LEN) == 0) {
+ oz_dbg(ON, "Binding '%s' found\n", net_dev);
+ found = 1;
break;
- } else {
- link = &binding;
- binding = binding->next;
}
}
spin_unlock_bh(&g_binding_lock);
- if (binding) {
+ if (found) {
dev_remove_pack(&binding->ptype);
if (binding->ptype.dev) {
dev_put(binding->ptype.dev);
pd_stop_all_for_device(binding->ptype.dev);
}
+ list_del(&binding->link);
kfree(binding);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static char *oz_get_next_device_name(char *s, char *dname, int max_size)
@@ -916,7 +759,8 @@ static char *oz_get_next_device_name(char *s, char *dname, int max_size)
*dname = 0;
return s;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_protocol_init(char *devs)
@@ -932,10 +776,10 @@ int oz_protocol_init(char *devs)
oz_binding_add(d);
}
}
- init_timer(&g_timer);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
@@ -943,6 +787,7 @@ int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
struct oz_pd *pd;
struct list_head *e;
int count = 0;
+
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
if (count >= max_count)
@@ -953,14 +798,12 @@ int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
spin_unlock_bh(&g_polling_lock);
return count;
}
-/*------------------------------------------------------------------------------
-*/
+
void oz_polling_lock_bh(void)
{
spin_lock_bh(&g_polling_lock);
}
-/*------------------------------------------------------------------------------
-*/
+
void oz_polling_unlock_bh(void)
{
spin_unlock_bh(&g_polling_lock);
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
index 93bb4c0..0c49c8a 100644
--- a/drivers/staging/ozwpan/ozproto.h
+++ b/drivers/staging/ozwpan/ozproto.h
@@ -7,28 +7,19 @@
#define _OZPROTO_H
#include <asm/byteorder.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozappif.h"
#define OZ_ALLOCATED_SPACE(__x) (LL_RESERVED_SPACE(__x)+(__x)->needed_tailroom)
-/* Converts millisecs to jiffies.
- */
-#define oz_ms_to_jiffies(__x) msecs_to_jiffies(__x)
-
-/* Quantum milliseconds.
- */
-#define OZ_QUANTUM_MS 8
-/* Quantum jiffies
- */
-#define OZ_QUANTUM_J (oz_ms_to_jiffies(OZ_QUANTUM_MS))
+/* Quantum in MS */
+#define OZ_QUANTUM 8
/* Default timeouts.
*/
-#define OZ_CONNECTION_TOUT_J (2*HZ)
-#define OZ_PRESLEEP_TOUT_J (11*HZ)
+#define OZ_PRESLEEP_TOUT 11
/* Maximun sizes of tx frames. */
-#define OZ_MAX_TX_SIZE 1514
+#define OZ_MAX_TX_SIZE 760
/* Maximum number of uncompleted isoc frames that can be pending in network. */
#define OZ_MAX_SUBMITTED_ISOC 16
@@ -63,13 +54,18 @@ void oz_protocol_term(void);
int oz_get_pd_list(struct oz_mac_addr *addr, int max_count);
void oz_app_enable(int app_id, int enable);
struct oz_pd *oz_pd_find(const u8 *mac_addr);
-void oz_binding_add(char *net_dev);
-void oz_binding_remove(char *net_dev);
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
- int remove);
+void oz_binding_add(const char *net_dev);
+void oz_binding_remove(const char *net_dev);
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time);
void oz_timer_delete(struct oz_pd *pd, int type);
void oz_pd_request_heartbeat(struct oz_pd *pd);
void oz_polling_lock_bh(void);
void oz_polling_unlock_bh(void);
+void oz_pd_heartbeat_handler(unsigned long data);
+void oz_pd_timeout_handler(unsigned long data);
+enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer);
+enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer);
+int oz_get_pd_status_list(char *pd_list, int max_count);
+int oz_get_binding_list(char *buf, int max_if);
#endif /* _OZPROTO_H */
diff --git a/drivers/staging/ozwpan/oztrace.c b/drivers/staging/ozwpan/oztrace.c
deleted file mode 100644
index 353ead2..0000000
--- a/drivers/staging/ozwpan/oztrace.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include "ozconfig.h"
-#include "oztrace.h"
-
-#ifdef WANT_VERBOSE_TRACE
-unsigned long trace_flags =
- 0
-#ifdef WANT_TRACE_STREAM
- | OZ_TRACE_STREAM
-#endif /* WANT_TRACE_STREAM */
-#ifdef WANT_TRACE_URB
- | OZ_TRACE_URB
-#endif /* WANT_TRACE_URB */
-
-#ifdef WANT_TRACE_CTRL_DETAIL
- | OZ_TRACE_CTRL_DETAIL
-#endif /* WANT_TRACE_CTRL_DETAIL */
-
-#ifdef WANT_TRACE_HUB
- | OZ_TRACE_HUB
-#endif /* WANT_TRACE_HUB */
-
-#ifdef WANT_TRACE_RX_FRAMES
- | OZ_TRACE_RX_FRAMES
-#endif /* WANT_TRACE_RX_FRAMES */
-
-#ifdef WANT_TRACE_TX_FRAMES
- | OZ_TRACE_TX_FRAMES
-#endif /* WANT_TRACE_TX_FRAMES */
- ;
-#endif /* WANT_VERBOSE_TRACE */
-
diff --git a/drivers/staging/ozwpan/oztrace.h b/drivers/staging/ozwpan/oztrace.h
deleted file mode 100644
index 8293b24..0000000
--- a/drivers/staging/ozwpan/oztrace.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZTRACE_H_
-#define _OZTRACE_H_
-#include "ozconfig.h"
-
-#define TRACE_PREFIX KERN_ALERT "OZWPAN: "
-
-#ifdef WANT_TRACE
-#define oz_trace(...) printk(TRACE_PREFIX __VA_ARGS__)
-#ifdef WANT_VERBOSE_TRACE
-extern unsigned long trace_flags;
-#define oz_trace2(_flag, ...) \
- do { if (trace_flags & _flag) printk(TRACE_PREFIX __VA_ARGS__); \
- } while (0)
-#else
-#define oz_trace2(...)
-#endif /* #ifdef WANT_VERBOSE_TRACE */
-#else
-#define oz_trace(...)
-#define oz_trace2(...)
-#endif /* #ifdef WANT_TRACE */
-
-#define OZ_TRACE_STREAM 0x1
-#define OZ_TRACE_URB 0x2
-#define OZ_TRACE_CTRL_DETAIL 0x4
-#define OZ_TRACE_HUB 0x8
-#define OZ_TRACE_RX_FRAMES 0x10
-#define OZ_TRACE_TX_FRAMES 0x20
-
-#endif /* Sentry */
-
diff --git a/drivers/staging/ozwpan/ozurbparanoia.c b/drivers/staging/ozwpan/ozurbparanoia.c
index 55b9afb..cf6278a 100644
--- a/drivers/staging/ozwpan/ozurbparanoia.c
+++ b/drivers/staging/ozwpan/ozurbparanoia.c
@@ -4,37 +4,39 @@
* -----------------------------------------------------------------------------
*/
#include <linux/usb.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
+
#ifdef WANT_URB_PARANOIA
+
#include "ozurbparanoia.h"
-#include "oztrace.h"
-/*-----------------------------------------------------------------------------
- */
+
#define OZ_MAX_URBS 1000
struct urb *g_urb_memory[OZ_MAX_URBS];
int g_nb_urbs;
DEFINE_SPINLOCK(g_urb_mem_lock);
-/*-----------------------------------------------------------------------------
- */
+
void oz_remember_urb(struct urb *urb)
{
unsigned long irq_state;
+
spin_lock_irqsave(&g_urb_mem_lock, irq_state);
if (g_nb_urbs < OZ_MAX_URBS) {
g_urb_memory[g_nb_urbs++] = urb;
- oz_trace("%lu: urb up = %d %p\n", jiffies, g_nb_urbs, urb);
+ oz_dbg(ON, "urb up = %d %p\n", g_nb_urbs, urb);
} else {
- oz_trace("ERROR urb buffer full\n");
+ oz_dbg(ON, "ERROR urb buffer full\n");
}
spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
}
-/*------------------------------------------------------------------------------
+
+/*
*/
int oz_forget_urb(struct urb *urb)
{
unsigned long irq_state;
int i;
int rc = -1;
+
spin_lock_irqsave(&g_urb_mem_lock, irq_state);
for (i = 0; i < g_nb_urbs; i++) {
if (g_urb_memory[i] == urb) {
@@ -42,8 +44,7 @@ int oz_forget_urb(struct urb *urb)
if (--g_nb_urbs > i)
memcpy(&g_urb_memory[i], &g_urb_memory[i+1],
(g_nb_urbs - i) * sizeof(struct urb *));
- oz_trace("%lu: urb down = %d %p\n",
- jiffies, g_nb_urbs, urb);
+ oz_dbg(ON, "urb down = %d %p\n", g_nb_urbs, urb);
}
}
spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
diff --git a/drivers/staging/ozwpan/ozurbparanoia.h b/drivers/staging/ozwpan/ozurbparanoia.h
index 00f5a3a..5080ea7 100644
--- a/drivers/staging/ozwpan/ozurbparanoia.h
+++ b/drivers/staging/ozwpan/ozurbparanoia.h
@@ -10,8 +10,8 @@
void oz_remember_urb(struct urb *urb);
int oz_forget_urb(struct urb *urb);
#else
-#define oz_remember_urb(__x)
-#define oz_forget_urb(__x) 0
+static inline void oz_remember_urb(struct urb *urb) {}
+static inline int oz_forget_urb(struct urb *urb) { return 0; }
#endif /* WANT_URB_PARANOIA */
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
index 543a941..cf26379 100644
--- a/drivers/staging/ozwpan/ozusbsvc.c
+++ b/drivers/staging/ozwpan/ozusbsvc.c
@@ -10,6 +10,7 @@
* The implementation of this service uses ozhcd.c to implement a USB HCD.
* -----------------------------------------------------------------------------
*/
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
@@ -18,35 +19,34 @@
#include <linux/errno.h>
#include <linux/input.h>
#include <asm/unaligned.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozusbif.h"
#include "ozhcd.h"
-#include "oztrace.h"
#include "ozusbsvc.h"
-#include "ozevent.h"
-/*------------------------------------------------------------------------------
+
+/*
* This is called once when the driver is loaded to initialise the USB service.
* Context: process
*/
int oz_usb_init(void)
{
- oz_event_log(OZ_EVT_SERVICE, 1, OZ_APPID_USB, NULL, 0);
return oz_hcd_init();
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called once when the driver is unloaded to terminate the USB service.
* Context: process
*/
void oz_usb_term(void)
{
- oz_event_log(OZ_EVT_SERVICE, 2, OZ_APPID_USB, NULL, 0);
oz_hcd_term();
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called when the USB service is started or resumed for a PD.
* Context: softirq
*/
@@ -55,12 +55,12 @@ int oz_usb_start(struct oz_pd *pd, int resume)
int rc = 0;
struct oz_usb_ctx *usb_ctx;
struct oz_usb_ctx *old_ctx;
- oz_event_log(OZ_EVT_SERVICE, 3, OZ_APPID_USB, NULL, resume);
+
if (resume) {
- oz_trace("USB service resumed.\n");
+ oz_dbg(ON, "USB service resumed\n");
return 0;
}
- oz_trace("USB service started.\n");
+ oz_dbg(ON, "USB service started\n");
/* Create a USB context in case we need one. If we find the PD already
* has a USB context then we will destroy it.
*/
@@ -81,7 +81,7 @@ int oz_usb_start(struct oz_pd *pd, int resume)
oz_usb_get(pd->app_ctx[OZ_APPID_USB-1]);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (old_ctx) {
- oz_trace("Already have USB context.\n");
+ oz_dbg(ON, "Already have USB context\n");
kfree(usb_ctx);
usb_ctx = old_ctx;
} else if (usb_ctx) {
@@ -99,7 +99,7 @@ int oz_usb_start(struct oz_pd *pd, int resume)
} else {
usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
if (usb_ctx->hport == NULL) {
- oz_trace("USB hub returned null port.\n");
+ oz_dbg(ON, "USB hub returned null port\n");
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
@@ -110,16 +110,17 @@ int oz_usb_start(struct oz_pd *pd, int resume)
oz_usb_put(usb_ctx);
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called when the USB service is stopped or paused for a PD.
* Context: softirq or process
*/
void oz_usb_stop(struct oz_pd *pd, int pause)
{
struct oz_usb_ctx *usb_ctx;
- oz_event_log(OZ_EVT_SERVICE, 4, OZ_APPID_USB, NULL, pause);
+
if (pause) {
- oz_trace("USB service paused.\n");
+ oz_dbg(ON, "USB service paused\n");
return;
}
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
@@ -127,8 +128,9 @@ void oz_usb_stop(struct oz_pd *pd, int pause)
pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (usb_ctx) {
- unsigned long tout = jiffies + HZ;
- oz_trace("USB service stopping...\n");
+ struct timespec ts, now;
+ getnstimeofday(&ts);
+ oz_dbg(ON, "USB service stopping...\n");
usb_ctx->stopped = 1;
/* At this point the reference count on the usb context should
* be 2 - one from when we created it and one from the hcd
@@ -136,17 +138,21 @@ void oz_usb_stop(struct oz_pd *pd, int pause)
* should get in but someone may already be in. So wait
* until they leave but timeout after 1 second.
*/
- while ((atomic_read(&usb_ctx->ref_count) > 2) &&
- time_before(jiffies, tout))
- ;
- oz_trace("USB service stopped.\n");
+ while ((atomic_read(&usb_ctx->ref_count) > 2)) {
+ getnstimeofday(&now);
+ /*Approx 1 Sec. this is not perfect calculation*/
+ if (now.tv_sec != ts.tv_sec)
+ break;
+ }
+ oz_dbg(ON, "USB service stopped\n");
oz_hcd_pd_departed(usb_ctx->hport);
/* Release the reference taken in oz_usb_start.
*/
oz_usb_put(usb_ctx);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* This increments the reference count of the context area for a specific PD.
* This ensures this context area does not disappear while still in use.
* Context: softirq
@@ -154,29 +160,34 @@ void oz_usb_stop(struct oz_pd *pd, int pause)
void oz_usb_get(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+
atomic_inc(&usb_ctx->ref_count);
}
-/*------------------------------------------------------------------------------
+
+/*
* This decrements the reference count of the context area for a specific PD
* and destroys the context area if the reference count becomes zero.
- * Context: softirq or process
+ * Context: irq or process
*/
void oz_usb_put(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+
if (atomic_dec_and_test(&usb_ctx->ref_count)) {
- oz_trace("Dealloc USB context.\n");
+ oz_dbg(ON, "Dealloc USB context\n");
oz_pd_put(usb_ctx->pd);
kfree(usb_ctx);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_heartbeat(struct oz_pd *pd)
{
struct oz_usb_ctx *usb_ctx;
int rc = 0;
+
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
if (usb_ctx)
@@ -193,14 +204,16 @@ done:
oz_usb_put(usb_ctx);
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_stream_create(void *hpd, u8 ep_num)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
struct oz_pd *pd = usb_ctx->pd;
- oz_trace("oz_usb_stream_create(0x%x)\n", ep_num);
+
+ oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num);
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
oz_isoc_stream_create(pd, ep_num);
} else {
@@ -213,16 +226,18 @@ int oz_usb_stream_create(void *hpd, u8 ep_num)
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_stream_delete(void *hpd, u8 ep_num)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+
if (usb_ctx) {
struct oz_pd *pd = usb_ctx->pd;
if (pd) {
- oz_trace("oz_usb_stream_delete(0x%x)\n", ep_num);
+ oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num);
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
oz_isoc_stream_delete(pd, ep_num);
} else {
@@ -234,12 +249,14 @@ int oz_usb_stream_delete(void *hpd, u8 ep_num)
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_usb_request_heartbeat(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+
if (usb_ctx && usb_ctx->pd)
oz_pd_request_heartbeat(usb_ctx->pd);
}
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index 4e4b650..228bffa 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -13,20 +13,18 @@
#include <linux/errno.h>
#include <linux/input.h>
#include <asm/unaligned.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozusbif.h"
#include "ozhcd.h"
-#include "oztrace.h"
#include "ozusbsvc.h"
-#include "ozevent.h"
-/*------------------------------------------------------------------------------
- */
+
#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed))
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
@@ -35,6 +33,7 @@ static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
int ret;
struct oz_elt *elt = (struct oz_elt *)ei->data;
struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1);
+
elt->type = OZ_ELT_APP_DATA;
ei->app_id = OZ_APPID_USB;
ei->length = elt->length + sizeof(struct oz_elt);
@@ -51,7 +50,8 @@ static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
spin_unlock_bh(&eb->lock);
return ret;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
@@ -63,12 +63,13 @@ int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
struct oz_get_desc_req *body;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
- oz_trace(" req_type = 0x%x\n", req_type);
- oz_trace(" desc_type = 0x%x\n", desc_type);
- oz_trace(" index = 0x%x\n", index);
- oz_trace(" windex = 0x%x\n", windex);
- oz_trace(" offset = 0x%x\n", offset);
- oz_trace(" len = 0x%x\n", len);
+
+ oz_dbg(ON, " req_type = 0x%x\n", req_type);
+ oz_dbg(ON, " desc_type = 0x%x\n", desc_type);
+ oz_dbg(ON, " index = 0x%x\n", index);
+ oz_dbg(ON, " windex = 0x%x\n", windex);
+ oz_dbg(ON, " offset = 0x%x\n", offset);
+ oz_dbg(ON, " len = 0x%x\n", len);
if (len > 200)
len = 200;
if (ei == NULL)
@@ -86,7 +87,8 @@ int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
body->index = index;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
@@ -97,6 +99,7 @@ static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_config_req *body;
+
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
@@ -107,7 +110,8 @@ static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
body->index = index;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
@@ -118,6 +122,7 @@ static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_interface_req *body;
+
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
@@ -129,7 +134,8 @@ static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
body->alternative = alt;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
@@ -141,6 +147,7 @@ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_feature_req *body;
+
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
@@ -153,7 +160,8 @@ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
put_unaligned(feature, &body->feature);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
@@ -165,6 +173,7 @@ static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_vendor_class_req *body;
+
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
@@ -180,7 +189,8 @@ static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
memcpy(body->data, data, data_len);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
@@ -190,10 +200,7 @@ int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
unsigned windex = le16_to_cpu(setup->wIndex);
unsigned wlength = le16_to_cpu(setup->wLength);
int rc = 0;
- oz_event_log(OZ_EVT_CTRL_REQ, setup->bRequest, req_id,
- (void *)(((unsigned long)(setup->wValue))<<16 |
- ((unsigned long)setup->wIndex)),
- setup->bRequestType);
+
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
@@ -231,7 +238,8 @@ int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
@@ -302,13 +310,15 @@ int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
struct oz_usb_hdr *usb_hdr, int len)
{
struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
+
switch (data_hdr->format) {
case OZ_DATA_F_MULTIPLE_FIXED: {
struct oz_multiple_fixed *body =
@@ -344,7 +354,8 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
}
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called when the PD has received a USB element. The type of element
* is determined and is then passed to an appropriate handler function.
* Context: softirq-serialized
@@ -381,7 +392,7 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
u16 offs = le16_to_cpu(get_unaligned(&body->offset));
u16 total_size =
le16_to_cpu(get_unaligned(&body->total_size));
- oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
body->rcode, body->data,
data_len, offs, total_size);
@@ -416,12 +427,14 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
done:
oz_usb_put(usb_ctx);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq, process
*/
void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
{
struct oz_usb_ctx *usb_ctx;
+
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
if (usb_ctx)
@@ -430,7 +443,7 @@ void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
if (usb_ctx == NULL)
return; /* Context has gone so nothing to do. */
if (!usb_ctx->stopped) {
- oz_trace("Farewell indicated ep = 0x%x\n", ep_num);
+ oz_dbg(ON, "Farewell indicated ep = 0x%x\n", ep_num);
oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len);
}
oz_usb_put(usb_ctx);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index c54df39..cbc15c1 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -1756,17 +1756,18 @@ static inline int input_state_high(struct logical_input *input)
if (input->high_timer == 0) {
char *press_str = input->u.kbd.press_str;
- if (press_str[0])
- keypad_send_key(press_str,
- sizeof(input->u.kbd.press_str));
+ if (press_str[0]) {
+ int s = sizeof(input->u.kbd.press_str);
+ keypad_send_key(press_str, s);
+ }
}
if (input->u.kbd.repeat_str[0]) {
char *repeat_str = input->u.kbd.repeat_str;
if (input->high_timer >= KEYPAD_REP_START) {
+ int s = sizeof(input->u.kbd.repeat_str);
input->high_timer -= KEYPAD_REP_DELAY;
- keypad_send_key(repeat_str,
- sizeof(input->u.kbd.repeat_str));
+ keypad_send_key(repeat_str, s);
}
/* we will need to come back here soon */
inputs_stable = 0;
@@ -1802,10 +1803,11 @@ static inline void input_state_falling(struct logical_input *input)
if (input->u.kbd.repeat_str[0]) {
char *repeat_str = input->u.kbd.repeat_str;
- if (input->high_timer >= KEYPAD_REP_START)
+ if (input->high_timer >= KEYPAD_REP_START) {
+ int s = sizeof(input->u.kbd.repeat_str);
input->high_timer -= KEYPAD_REP_DELAY;
- keypad_send_key(repeat_str,
- sizeof(input->u.kbd.repeat_str));
+ keypad_send_key(repeat_str, s);
+ }
/* we will need to come back here soon */
inputs_stable = 0;
}
@@ -1822,9 +1824,10 @@ static inline void input_state_falling(struct logical_input *input)
release_fct(input->u.std.release_data);
} else if (input->type == INPUT_TYPE_KBD) {
char *release_str = input->u.kbd.release_str;
- if (release_str[0])
- keypad_send_key(release_str,
- sizeof(input->u.kbd.release_str));
+ if (release_str[0]) {
+ int s = sizeof(input->u.kbd.release_str);
+ keypad_send_key(release_str, s);
+ }
}
input->state = INPUT_ST_LOW;
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index adb8da5..4247d60 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -71,9 +71,8 @@ static struct quickstart_button *pressed;
static struct input_dev *quickstart_input;
/* Platform driver functions */
-static ssize_t quickstart_buttons_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t buttons_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int count = 0;
struct quickstart_button *b;
@@ -94,18 +93,17 @@ static ssize_t quickstart_buttons_show(struct device *dev,
return count;
}
-static ssize_t quickstart_pressed_button_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pressed_button_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%s\n",
(pressed ? pressed->name : "none"));
}
-static ssize_t quickstart_pressed_button_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pressed_button_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
if (count < 2)
return -EINVAL;
@@ -319,9 +317,8 @@ static int quickstart_acpi_remove(struct acpi_device *device)
}
/* Platform driver structs */
-static DEVICE_ATTR(pressed_button, 0666, quickstart_pressed_button_show,
- quickstart_pressed_button_store);
-static DEVICE_ATTR(buttons, 0444, quickstart_buttons_show, NULL);
+static DEVICE_ATTR_RW(pressed_button);
+static DEVICE_ATTR_RO(buttons);
static struct platform_device *pf_device;
static struct platform_driver pf_driver = {
.driver = {
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 8fc9f58..7f01549 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -14,7 +14,7 @@
* Copyright (c) 2004, Intel Corporation
*
* Modified for Realtek's wi-fi cards by Andrea Merello
- * <andreamrl@tiscali.it>
+ * <andrea.merello@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index e303159..10b2210 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -14,7 +14,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
@@ -399,8 +399,8 @@ static int is_duplicate_packet(struct ieee80211_device *ieee,
struct ieee_ibss_seq *entry = NULL;
u8 *mac = header->addr2;
int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE;
- //for (pos = (head)->next; pos != (head); pos = pos->next)
- __list_for_each(p, &ieee->ibss_mac_hash[index]) {
+
+ list_for_each(p, &ieee->ibss_mac_hash[index]) {
entry = list_entry(p, struct ieee_ibss_seq, list);
if (!memcmp(entry->mac, mac, ETH_ALEN))
break;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 00f9af0..b65db54 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
index d9add53..e528206 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
index 89ed86e..b346653 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
@@ -25,7 +25,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
index edacc80..d052f4a 100644
--- a/drivers/staging/rtl8187se/r8180.h
+++ b/drivers/staging/rtl8187se/r8180.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the
diff --git a/drivers/staging/rtl8187se/r8180_93cx6.h b/drivers/staging/rtl8187se/r8180_93cx6.h
index 79e7391..b52b5b0 100644
--- a/drivers/staging/rtl8187se/r8180_93cx6.h
+++ b/drivers/staging/rtl8187se/r8180_93cx6.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the official realtek driver
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index ca69155..5947a6f 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1,6 +1,6 @@
/*
This is part of rtl818x pci OpenSource driver - v 0.1
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public License)
Parts of this driver are based on the GPL part of the official
@@ -70,7 +70,7 @@ static int hwwep;
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, rtl8180_pci_id_tbl);
-MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
+MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
MODULE_DESCRIPTION("Linux driver for Realtek RTL8187SE WiFi cards");
module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
@@ -197,7 +197,7 @@ inline void force_pci_posting(struct net_device *dev)
mb();
}
-irqreturn_t rtl8180_interrupt(int irq, void *netdev, struct pt_regs *regs);
+static irqreturn_t rtl8180_interrupt(int irq, void *netdev);
void set_nic_rxring(struct net_device *dev);
void set_nic_txring(struct net_device *dev);
static struct net_device_stats *rtl8180_stats(struct net_device *dev);
@@ -2666,7 +2666,7 @@ short rtl8180_init(struct net_device *dev)
TX_BEACON_RING_ADDR))
return -ENOMEM;
- if (request_irq(dev->irq, (void *)rtl8180_interrupt, IRQF_SHARED, dev->name, dev)) {
+ if (request_irq(dev->irq, rtl8180_interrupt, IRQF_SHARED, dev->name, dev)) {
DMESGE("Error allocating IRQ %d", dev->irq);
return -1;
} else {
@@ -3537,7 +3537,7 @@ void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
spin_unlock_irqrestore(&priv->tx_lock, flag);
}
-irqreturn_t rtl8180_interrupt(int irq, void *netdev, struct pt_regs *regs)
+irqreturn_t rtl8180_interrupt(int irq, void *netdev)
{
struct net_device *dev = (struct net_device *) netdev;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
diff --git a/drivers/staging/rtl8187se/r8180_hw.h b/drivers/staging/rtl8187se/r8180_hw.h
index 5339381..92c05af 100644
--- a/drivers/staging/rtl8187se/r8180_hw.h
+++ b/drivers/staging/rtl8187se/r8180_hw.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225.h b/drivers/staging/rtl8187se/r8180_rtl8225.h
index c6f2128..c94ca07 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225.h
+++ b/drivers/staging/rtl8187se/r8180_rtl8225.h
@@ -1,7 +1,7 @@
/*
This is part of the rtl8180-sa2400 driver
released under the GPL (See file COPYING for details).
- Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
This files contains programming code for the rtl8225
radio frontend.
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
index c592f79..9ae96b7 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c
+++ b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
@@ -1,7 +1,7 @@
/*
* This is part of the rtl8180-sa2400 driver
* released under the GPL (See file COPYING for details).
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* This files contains programming code for the rtl8225
* radio frontend.
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 156b758..dab7875 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -2,7 +2,7 @@
This file contains wireless extension handlers.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part
diff --git a/drivers/staging/rtl8187se/r8180_wx.h b/drivers/staging/rtl8187se/r8180_wx.h
index 4081914..d471520 100644
--- a/drivers/staging/rtl8187se/r8180_wx.h
+++ b/drivers/staging/rtl8187se/r8180_wx.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver - v 0.3
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the official realtek driver
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
new file mode 100644
index 0000000..c9c548f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -0,0 +1,29 @@
+config R8188EU
+ tristate "Realtek RTL8188EU Wireless LAN NIC driver"
+ depends on WLAN && USB
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ default N
+ ---help---
+ This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
+ If built as a module, it will be called r8188eu.
+
+if R8188EU
+
+config 88EU_AP_MODE
+ bool "Realtek RTL8188EU AP mode"
+ default Y
+ ---help---
+ This option enables Access Point mode. Unless you know that your system
+ will never be used as an AP, or the target system has limited memory,
+ "Y" should be selected.
+
+config 88EU_P2P
+ bool "Realtek RTL8188EU Peer-to-peer mode"
+ default Y
+ ---help---
+ This option enables peer-to-peer mode for the r8188eu driver. Unless you
+ know that peer-to-peer (P2P) mode will never be used, or the target system has
+ limited memory, "Y" should be selected.
+
+endif
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
new file mode 100644
index 0000000..1639a45
--- /dev/null
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -0,0 +1,70 @@
+EXTRA_CFLAGS += -I$(src)/include
+
+r8188eu-y := \
+ core/rtw_ap.o \
+ core/rtw_br_ext.o \
+ core/rtw_cmd.o \
+ core/rtw_debug.o \
+ core/rtw_efuse.o \
+ core/rtw_ieee80211.o \
+ core/rtw_io.o \
+ core/rtw_ioctl_set.o \
+ core/rtw_iol.o \
+ core/rtw_led.o \
+ core/rtw_mlme.o \
+ core/rtw_mlme_ext.o \
+ core/rtw_mp.o \
+ core/rtw_mp_ioctl.o \
+ core/rtw_pwrctrl.o \
+ core/rtw_p2p.o \
+ core/rtw_recv.o \
+ core/rtw_rf.o \
+ core/rtw_security.o \
+ core/rtw_sreset.o \
+ core/rtw_sta_mgt.o \
+ core/rtw_wlan_util.o \
+ core/rtw_xmit.o \
+ hal/HalHWImg8188E_MAC.o \
+ hal/HalHWImg8188E_BB.o \
+ hal/HalHWImg8188E_RF.o \
+ hal/HalPhyRf.o \
+ hal/HalPhyRf_8188e.o \
+ hal/HalPwrSeqCmd.o \
+ hal/Hal8188EFWImg_CE.o \
+ hal/Hal8188EPwrSeq.o \
+ hal/Hal8188ERateAdaptive.o\
+ hal/hal_intf.o \
+ hal/hal_com.o \
+ hal/odm.o \
+ hal/odm_debug.o \
+ hal/odm_interface.o \
+ hal/odm_HWConfig.o \
+ hal/odm_RegConfig8188E.o\
+ hal/odm_RTL8188E.o \
+ hal/rtl8188e_cmd.o \
+ hal/rtl8188e_dm.o \
+ hal/rtl8188e_hal_init.o \
+ hal/rtl8188e_mp.o \
+ hal/rtl8188e_phycfg.o \
+ hal/rtl8188e_rf6052.o \
+ hal/rtl8188e_rxdesc.o \
+ hal/rtl8188e_sreset.o \
+ hal/rtl8188e_xmit.o \
+ hal/rtl8188eu_led.o \
+ hal/rtl8188eu_recv.o \
+ hal/rtl8188eu_xmit.o \
+ hal/usb_halinit.o \
+ hal/usb_ops_linux.o \
+ os_dep/ioctl_linux.o \
+ os_dep/mlme_linux.o \
+ os_dep/os_intfs.o \
+ os_dep/osdep_service.o \
+ os_dep/recv_linux.o \
+ os_dep/rtw_android.o \
+ os_dep/usb_intf.o \
+ os_dep/usb_ops_linux.o \
+ os_dep/xmit_linux.o
+
+obj-$(CONFIG_R8188EU) := r8188eu.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8188eu/TODO b/drivers/staging/rtl8188eu/TODO
new file mode 100644
index 0000000..e50aa50
--- /dev/null
+++ b/drivers/staging/rtl8188eu/TODO
@@ -0,0 +1,15 @@
+TODO:
+- find and remove remaining code valid only for 5 HGz. Most of the obvious
+ ones have been removed, but things like channel > 14 still exist.
+- find and remove any code for other chips that is left over
+- convert to external firmware
+- convert any remaining unusual variable types
+- find codes that can use %pM and %Nph formatting
+- checkpatch.pl fixes - most of the remaining ones are lines too long. Many
+ of them will require refactoring
+- merge Realtek's bugfixes and new features into the driver
+- switch to use LIB80211
+- switch to use MAC80211
+
+Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>,
+and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
new file mode 100644
index 0000000..2c73823
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -0,0 +1,1988 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_AP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <ieee80211.h>
+
+#ifdef CONFIG_88EU_AP_MODE
+
+void init_mlme_ap_info(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+
+ _rtw_spinlock_init(&pmlmepriv->bcn_update_lock);
+
+ /* for ACL */
+ _rtw_init_queue(&pacl_list->acl_node_q);
+
+ start_ap_mode(padapter);
+}
+
+void free_mlme_ap_info(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pmlmepriv->update_bcn = false;
+ pmlmeext->bstart_bss = false;
+
+ rtw_sta_flush(padapter);
+
+ pmlmeinfo->state = _HW_STATE_NOLINK_;
+
+ /* free_assoc_sta_resources */
+ rtw_free_all_stainfo(padapter);
+
+ /* free bc/mc sta_info */
+ psta = rtw_get_bcmc_stainfo(padapter);
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ rtw_free_stainfo(padapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+
+ _rtw_spinlock_free(&pmlmepriv->bcn_update_lock);
+}
+
+static void update_BCNTIM(struct adapter *padapter)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
+ unsigned char *pie = pnetwork_mlmeext->IEs;
+
+ /* update TIM IE */
+ if (true) {
+ u8 *p, *dst_ie, *premainder_ie = NULL;
+ u8 *pbackup_remainder_ie = NULL;
+ __le16 tim_bitmap_le;
+ uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
+
+ tim_bitmap_le = cpu_to_le16(pstapriv->tim_bitmap);
+
+ p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen, pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_);
+ if (p != NULL && tim_ielen > 0) {
+ tim_ielen += 2;
+ premainder_ie = p+tim_ielen;
+ tim_ie_offset = (int)(p - pie);
+ remainder_ielen = pnetwork_mlmeext->IELength - tim_ie_offset - tim_ielen;
+ /* append TIM IE from dst_ie offset */
+ dst_ie = p;
+ } else {
+ tim_ielen = 0;
+
+ /* calucate head_len */
+ offset = _FIXED_IE_LENGTH_;
+ offset += pnetwork_mlmeext->Ssid.SsidLength + 2;
+
+ /* get supported rates len */
+ p = rtw_get_ie(pie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &tmp_len, (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_));
+ if (p != NULL)
+ offset += tmp_len+2;
+
+ /* DS Parameter Set IE, len = 3 */
+ offset += 3;
+
+ premainder_ie = pie + offset;
+
+ remainder_ielen = pnetwork_mlmeext->IELength - offset - tim_ielen;
+
+ /* append TIM IE from offset */
+ dst_ie = pie + offset;
+ }
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+ *dst_ie++ = _TIM_IE_;
+
+ if ((pstapriv->tim_bitmap&0xff00) && (pstapriv->tim_bitmap&0x00fc))
+ tim_ielen = 5;
+ else
+ tim_ielen = 4;
+
+ *dst_ie++ = tim_ielen;
+
+ *dst_ie++ = 0;/* DTIM count */
+ *dst_ie++ = 1;/* DTIM peroid */
+
+ if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */
+ *dst_ie++ = BIT(0);/* bitmap ctrl */
+ else
+ *dst_ie++ = 0;
+
+ if (tim_ielen == 4) {
+ *dst_ie++ = *(u8 *)&tim_bitmap_le;
+ } else if (tim_ielen == 5) {
+ memcpy(dst_ie, &tim_bitmap_le, 2);
+ dst_ie += 2;
+ }
+
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+
+ kfree(pbackup_remainder_ie);
+ }
+ offset = (uint)(dst_ie - pie);
+ pnetwork_mlmeext->IELength = offset + remainder_ielen;
+ }
+
+ set_tx_beacon_cmd(padapter);
+}
+
+void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index, u8 *data, u8 len)
+{
+ struct ndis_802_11_var_ie *pIE;
+ u8 bmatch = false;
+ u8 *pie = pnetwork->IEs;
+ u8 *p = NULL, *dst_ie = NULL, *premainder_ie = NULL;
+ u8 *pbackup_remainder_ie = NULL;
+ u32 i, offset, ielen = 0, ie_offset, remainder_ielen = 0;
+
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pnetwork->IEs + i);
+
+ if (pIE->ElementID > index) {
+ break;
+ } else if (pIE->ElementID == index) { /* already exist the same IE */
+ p = (u8 *)pIE;
+ ielen = pIE->Length;
+ bmatch = true;
+ break;
+ }
+ p = (u8 *)pIE;
+ ielen = pIE->Length;
+ i += (pIE->Length + 2);
+ }
+
+ if (p != NULL && ielen > 0) {
+ ielen += 2;
+
+ premainder_ie = p+ielen;
+
+ ie_offset = (int)(p - pie);
+
+ remainder_ielen = pnetwork->IELength - ie_offset - ielen;
+
+ if (bmatch)
+ dst_ie = p;
+ else
+ dst_ie = (p+ielen);
+ }
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+
+ *dst_ie++ = index;
+ *dst_ie++ = len;
+
+ memcpy(dst_ie, data, len);
+ dst_ie += len;
+
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+
+ kfree(pbackup_remainder_ie);
+ }
+
+ offset = (uint)(dst_ie - pie);
+ pnetwork->IELength = offset + remainder_ielen;
+}
+
+void rtw_remove_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index)
+{
+ u8 *p, *dst_ie = NULL, *premainder_ie = NULL;
+ u8 *pbackup_remainder_ie = NULL;
+ uint offset, ielen, ie_offset, remainder_ielen = 0;
+ u8 *pie = pnetwork->IEs;
+
+ p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, index, &ielen,
+ pnetwork->IELength - _FIXED_IE_LENGTH_);
+ if (p != NULL && ielen > 0) {
+ ielen += 2;
+
+ premainder_ie = p+ielen;
+
+ ie_offset = (int)(p - pie);
+
+ remainder_ielen = pnetwork->IELength - ie_offset - ielen;
+
+ dst_ie = p;
+ }
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+
+ kfree(pbackup_remainder_ie);
+ }
+
+ offset = (uint)(dst_ie - pie);
+ pnetwork->IELength = offset + remainder_ielen;
+}
+
+static u8 chk_sta_is_alive(struct sta_info *psta)
+{
+ u8 ret = false;
+
+ if ((psta->sta_stats.last_rx_data_pkts + psta->sta_stats.last_rx_ctrl_pkts) ==
+ (psta->sta_stats.rx_data_pkts + psta->sta_stats.rx_ctrl_pkts))
+ ;
+ else
+ ret = true;
+
+ sta_update_last_rx_pkts(psta);
+
+ return ret;
+}
+
+void expire_timeout_chk(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ u8 updated = 0;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 chk_alive_num = 0;
+ char chk_alive_list[NUM_STA];
+ int i;
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ phead = &pstapriv->auth_list;
+ plist = get_next(phead);
+
+ /* check auth_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, auth_list);
+ plist = get_next(plist);
+
+ if (psta->expire_to > 0) {
+ psta->expire_to--;
+ if (psta->expire_to == 0) {
+ rtw_list_delete(&psta->auth_list);
+ pstapriv->auth_list_cnt--;
+
+ DBG_88E("auth expire %6ph\n",
+ psta->hwaddr);
+
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ rtw_free_stainfo(padapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ }
+ }
+
+ }
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ psta = NULL;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* check asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ plist = get_next(plist);
+
+ if (chk_sta_is_alive(psta) || !psta->expire_to) {
+ psta->expire_to = pstapriv->expire_to;
+ psta->keep_alive_trycnt = 0;
+ psta->under_exist_checking = 0;
+ } else {
+ psta->expire_to--;
+ }
+
+ if (psta->expire_to <= 0) {
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (padapter->registrypriv.wifi_spec == 1) {
+ psta->expire_to = pstapriv->expire_to;
+ continue;
+ }
+
+ if (psta->state & WIFI_SLEEP_STATE) {
+ if (!(psta->state & WIFI_STA_ALIVE_CHK_STATE)) {
+ /* to check if alive by another methods if staion is at ps mode. */
+ psta->expire_to = pstapriv->expire_to;
+ psta->state |= WIFI_STA_ALIVE_CHK_STATE;
+
+ /* to update bcn with tim_bitmap for this station */
+ pstapriv->tim_bitmap |= BIT(psta->aid);
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+
+ if (!pmlmeext->active_keep_alive_check)
+ continue;
+ }
+ }
+ if (pmlmeext->active_keep_alive_check) {
+ int stainfo_offset;
+
+ stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
+ if (stainfo_offset_valid(stainfo_offset))
+ chk_alive_list[chk_alive_num++] = stainfo_offset;
+ continue;
+ }
+
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+
+ DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
+ updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ } else {
+ /* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
+ if (psta->sleepq_len > (NR_XMITFRAME/pstapriv->asoc_list_cnt) &&
+ padapter->xmitpriv.free_xmitframe_cnt < (NR_XMITFRAME/pstapriv->asoc_list_cnt/2)) {
+ DBG_88E("%s sta:%pM, sleepq_len:%u, free_xmitframe_cnt:%u, asoc_list_cnt:%u, clear sleep_q\n", __func__,
+ (psta->hwaddr), psta->sleepq_len,
+ padapter->xmitpriv.free_xmitframe_cnt,
+ pstapriv->asoc_list_cnt);
+ wakeup_sta_to_xmit(padapter, psta);
+ }
+ }
+ }
+
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ if (chk_alive_num) {
+ u8 backup_oper_channel = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ /* switch to correct channel of current network before issue keep-alive frames */
+ if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
+ backup_oper_channel = rtw_get_oper_ch(padapter);
+ SelectChannel(padapter, pmlmeext->cur_channel);
+ }
+
+ /* issue null data to check sta alive*/
+ for (i = 0; i < chk_alive_num; i++) {
+ int ret = _FAIL;
+
+ psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
+
+ if (psta->state & WIFI_SLEEP_STATE)
+ ret = issue_nulldata(padapter, psta->hwaddr, 0, 1, 50);
+ else
+ ret = issue_nulldata(padapter, psta->hwaddr, 0, 3, 50);
+
+ psta->keep_alive_trycnt++;
+ if (ret == _SUCCESS) {
+ DBG_88E("asoc check, sta(%pM) is alive\n", (psta->hwaddr));
+ psta->expire_to = pstapriv->expire_to;
+ psta->keep_alive_trycnt = 0;
+ continue;
+ } else if (psta->keep_alive_trycnt <= 3) {
+ DBG_88E("ack check for asoc expire, keep_alive_trycnt =%d\n", psta->keep_alive_trycnt);
+ psta->expire_to = 1;
+ continue;
+ }
+
+ psta->keep_alive_trycnt = 0;
+
+ DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ }
+
+ if (backup_oper_channel > 0) /* back to the original operation channel */
+ SelectChannel(padapter, backup_oper_channel);
+ }
+
+ associated_clients_update(padapter, updated);
+}
+
+void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
+{
+ int i;
+ u8 rf_type;
+ u32 init_rate = 0;
+ unsigned char sta_band = 0, raid, shortGIrate = false;
+ unsigned char limit;
+ unsigned int tx_ra_bitmap = 0;
+ struct ht_priv *psta_ht = NULL;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+
+ if (psta)
+ psta_ht = &psta->htpriv;
+ else
+ return;
+
+ if (!(psta->state & _FW_LINKED))
+ return;
+
+ /* b/g mode ra_bitmap */
+ for (i = 0; i < sizeof(psta->bssrateset); i++) {
+ if (psta->bssrateset[i])
+ tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i]&0x7f);
+ }
+ /* n mode ra_bitmap */
+ if (psta_ht->ht_option) {
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if (rf_type == RF_2T2R)
+ limit = 16;/* 2R */
+ else
+ limit = 8;/* 1R */
+
+ for (i = 0; i < limit; i++) {
+ if (psta_ht->ht_cap.supp_mcs_set[i/8] & BIT(i%8))
+ tx_ra_bitmap |= BIT(i+12);
+ }
+
+ /* max short GI rate */
+ shortGIrate = psta_ht->sgi;
+ }
+
+ if (pcur_network->Configuration.DSConfig > 14) {
+ /* 5G band */
+ if (tx_ra_bitmap & 0xffff000)
+ sta_band |= WIRELESS_11_5N | WIRELESS_11A;
+ else
+ sta_band |= WIRELESS_11A;
+ } else {
+ if (tx_ra_bitmap & 0xffff000)
+ sta_band |= WIRELESS_11_24N | WIRELESS_11G | WIRELESS_11B;
+ else if (tx_ra_bitmap & 0xff0)
+ sta_band |= WIRELESS_11G | WIRELESS_11B;
+ else
+ sta_band |= WIRELESS_11B;
+ }
+
+ psta->wireless_mode = sta_band;
+
+ raid = networktype_to_raid(sta_band);
+ init_rate = get_highest_rate_idx(tx_ra_bitmap&0x0fffffff)&0x3f;
+
+ if (psta->aid < NUM_STA) {
+ u8 arg = 0;
+
+ arg = psta->mac_id&0x1f;
+
+ arg |= BIT(7);/* support entry 2~31 */
+
+ if (shortGIrate)
+ arg |= BIT(5);
+
+ tx_ra_bitmap |= ((raid<<28)&0xf0000000);
+
+ DBG_88E("%s => mac_id:%d , raid:%d , bitmap = 0x%x, arg = 0x%x\n",
+ __func__ , psta->mac_id, raid , tx_ra_bitmap, arg);
+
+ /* bitmap[0:27] = tx_rate_bitmap */
+ /* bitmap[28:31]= Rate Adaptive id */
+ /* arg[0:4] = macid */
+ /* arg[5] = Short GI */
+ rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, rssi_level);
+
+ if (shortGIrate)
+ init_rate |= BIT(6);
+
+ /* set ra_id, init_rate */
+ psta->raid = raid;
+ psta->init_rate = init_rate;
+
+ } else {
+ DBG_88E("station aid %d exceed the max number\n", psta->aid);
+ }
+}
+
+static void update_bmc_sta(struct adapter *padapter)
+{
+ unsigned long irqL;
+ u32 init_rate = 0;
+ unsigned char network_type, raid;
+ int i, supportRateNum = 0;
+ unsigned int tx_ra_bitmap = 0;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct sta_info *psta = rtw_get_bcmc_stainfo(padapter);
+
+ if (psta) {
+ psta->aid = 0;/* default set to 0 */
+ psta->mac_id = psta->aid + 1;
+
+ psta->qos_option = 0;
+ psta->htpriv.ht_option = false;
+
+ psta->ieee8021x_blocked = 0;
+
+ _rtw_memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+
+ /* prepare for add_RATid */
+ supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
+ network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates, supportRateNum, 1);
+
+ memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
+ psta->bssratelen = supportRateNum;
+
+ /* b/g mode ra_bitmap */
+ for (i = 0; i < supportRateNum; i++) {
+ if (psta->bssrateset[i])
+ tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i]&0x7f);
+ }
+
+ if (pcur_network->Configuration.DSConfig > 14) {
+ /* force to A mode. 5G doesn't support CCK rates */
+ network_type = WIRELESS_11A;
+ tx_ra_bitmap = 0x150; /* 6, 12, 24 Mbps */
+ } else {
+ /* force to b mode */
+ network_type = WIRELESS_11B;
+ tx_ra_bitmap = 0xf;
+ }
+
+ raid = networktype_to_raid(network_type);
+ init_rate = get_highest_rate_idx(tx_ra_bitmap&0x0fffffff)&0x3f;
+
+ /* ap mode */
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ {
+ u8 arg = 0;
+
+ arg = psta->mac_id&0x1f;
+ arg |= BIT(7);
+ tx_ra_bitmap |= ((raid<<28)&0xf0000000);
+ DBG_88E("update_bmc_sta, mask = 0x%x, arg = 0x%x\n", tx_ra_bitmap, arg);
+
+ /* bitmap[0:27] = tx_rate_bitmap */
+ /* bitmap[28:31]= Rate Adaptive id */
+ /* arg[0:4] = macid */
+ /* arg[5] = Short GI */
+ rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, 0);
+ }
+ /* set ra_id, init_rate */
+ psta->raid = raid;
+ psta->init_rate = init_rate;
+
+ rtw_stassoc_hw_rpt(padapter, psta);
+
+ _enter_critical_bh(&psta->lock, &irqL);
+ psta->state = _FW_LINKED;
+ _exit_critical_bh(&psta->lock, &irqL);
+
+ } else {
+ DBG_88E("add_RATid_bmc_sta error!\n");
+ }
+}
+
+/* notes: */
+/* AID: 1~MAX for sta and 0 for bc/mc in ap/adhoc mode */
+/* MAC_ID = AID+1 for sta in ap/adhoc mode */
+/* MAC_ID = 1 for bc/mc for sta/ap/adhoc */
+/* MAC_ID = 0 for bssid for sta/ap/adhoc */
+/* CAM_ID = 0~3 for default key, cmd_id = macid + 3, macid = aid+1; */
+
+void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
+ struct ht_priv *phtpriv_sta = &psta->htpriv;
+
+ psta->mac_id = psta->aid+1;
+ DBG_88E("%s\n", __func__);
+
+ /* ap mode */
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
+ psta->ieee8021x_blocked = true;
+ else
+ psta->ieee8021x_blocked = false;
+
+
+ /* update sta's cap */
+
+ /* ERP */
+ VCS_update(padapter, psta);
+ /* HT related cap */
+ if (phtpriv_sta->ht_option) {
+ /* check if sta supports rx ampdu */
+ phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
+
+ /* check if sta support s Short GI */
+ if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40))
+ phtpriv_sta->sgi = true;
+
+ /* bwmode */
+ if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & IEEE80211_HT_CAP_SUP_WIDTH) {
+ phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
+ phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
+ }
+ psta->qos_option = true;
+ } else {
+ phtpriv_sta->ampdu_enable = false;
+ phtpriv_sta->sgi = false;
+ phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
+ phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ /* Rx AMPDU */
+ send_delba(padapter, 0, psta->hwaddr);/* recipient */
+
+ /* TX AMPDU */
+ send_delba(padapter, 1, psta->hwaddr);/* originator */
+ phtpriv_sta->agg_enable_bitmap = 0x0;/* reset */
+ phtpriv_sta->candidate_tid_bitmap = 0x0;/* reset */
+
+ /* todo: init other variables */
+
+ _rtw_memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+
+ _enter_critical_bh(&psta->lock, &irqL);
+ psta->state |= _FW_LINKED;
+ _exit_critical_bh(&psta->lock, &irqL);
+}
+
+static void update_hw_ht_param(struct adapter *padapter)
+{
+ unsigned char max_AMPDU_len;
+ unsigned char min_MPDU_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_88E("%s\n", __func__);
+
+ /* handle A-MPDU parameter field */
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+ max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+
+ /* */
+ /* Config SM Power Save setting */
+ /* */
+ pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & 0x0C) >> 2;
+ if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
+ DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
+}
+
+static void start_bss_network(struct adapter *padapter, u8 *pbuf)
+{
+ u8 *p;
+ u8 val8, cur_channel, cur_bwmode, cur_ch_offset;
+ u16 bcn_interval;
+ u32 acparm;
+ int ie_len;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
+ struct HT_info_element *pht_info = NULL;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ bcn_interval = (u16)pnetwork->Configuration.BeaconPeriod;
+ cur_channel = pnetwork->Configuration.DSConfig;
+ cur_bwmode = HT_CHANNEL_WIDTH_20;
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+
+ /* check if there is wps ie, */
+ /* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
+ /* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
+ if (!rtw_get_wps_ie(pnetwork->IEs+_FIXED_IE_LENGTH_, pnetwork->IELength-_FIXED_IE_LENGTH_, NULL, NULL))
+ pmlmeext->bstart_bss = true;
+
+ /* todo: update wmm, ht cap */
+ if (pmlmepriv->qospriv.qos_option)
+ pmlmeinfo->WMM_enable = true;
+ if (pmlmepriv->htpriv.ht_option) {
+ pmlmeinfo->WMM_enable = true;
+ pmlmeinfo->HT_enable = true;
+
+ update_hw_ht_param(padapter);
+ }
+
+ if (pmlmepriv->cur_network.join_res != true) { /* setting only at first time */
+ /* WEP Key will be set before this function, do not clear CAM. */
+ if ((psecuritypriv->dot11PrivacyAlgrthm != _WEP40_) &&
+ (psecuritypriv->dot11PrivacyAlgrthm != _WEP104_))
+ flush_all_cam_entry(padapter); /* clear CAM */
+ }
+
+ /* set MSR to AP_Mode */
+ Set_MSR(padapter, _HW_STATE_AP_);
+
+ /* Set BSSID REG */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pnetwork->MacAddress);
+
+ /* Set EDCA param reg */
+ acparm = 0x002F3217; /* VO */
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acparm));
+ acparm = 0x005E4317; /* VI */
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acparm));
+ acparm = 0x005ea42b;
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acparm));
+ acparm = 0x0000A444; /* BK */
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acparm));
+
+ /* Set Security */
+ val8 = (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) ? 0xcc : 0xcf;
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* Beacon Control related register */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&bcn_interval));
+
+ UpdateBrateTbl(padapter, pnetwork->SupportedRates);
+ rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, pnetwork->SupportedRates);
+
+ if (!pmlmepriv->cur_network.join_res) { /* setting only at first time */
+ /* turn on all dynamic functions */
+ Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+ }
+ /* set channel, bwmode */
+ p = rtw_get_ie((pnetwork->IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - sizeof(struct ndis_802_11_fixed_ie)));
+ if (p && ie_len) {
+ pht_info = (struct HT_info_element *)(p+2);
+
+ if ((pregpriv->cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
+ /* switch to the 40M Hz mode */
+ cur_bwmode = HT_CHANNEL_WIDTH_40;
+ switch (pht_info->infos[0] & 0x3) {
+ case 1:
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case 3:
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ }
+ }
+ /* TODO: need to judge the phy parameters on concurrent mode for single phy */
+ set_channel_bwmode(padapter, cur_channel, cur_ch_offset, cur_bwmode);
+
+ DBG_88E("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode, cur_ch_offset);
+
+ /* */
+ pmlmeext->cur_channel = cur_channel;
+ pmlmeext->cur_bwmode = cur_bwmode;
+ pmlmeext->cur_ch_offset = cur_ch_offset;
+ pmlmeext->cur_wireless_mode = pmlmepriv->cur_network.network_type;
+
+ /* update cur_wireless_mode */
+ update_wireless_mode(padapter);
+
+ /* udpate capability after cur_wireless_mode updated */
+ update_capinfo(padapter, rtw_get_capability((struct wlan_bssid_ex *)pnetwork));
+
+ /* let pnetwork_mlmeext == pnetwork_mlme. */
+ memcpy(pnetwork_mlmeext, pnetwork, pnetwork->Length);
+
+#ifdef CONFIG_88EU_P2P
+ memcpy(pwdinfo->p2p_group_ssid, pnetwork->Ssid.Ssid, pnetwork->Ssid.SsidLength);
+ pwdinfo->p2p_group_ssid_len = pnetwork->Ssid.SsidLength;
+#endif /* CONFIG_88EU_P2P */
+
+ if (pmlmeext->bstart_bss) {
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+
+ /* issue beacon frame */
+ if (send_beacon(padapter) == _FAIL)
+ DBG_88E("issue_beacon, fail!\n");
+ }
+
+ /* update bc/mc sta_info */
+ update_bmc_sta(padapter);
+}
+
+int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
+{
+ int ret = _SUCCESS;
+ u8 *p;
+ u8 *pHT_caps_ie = NULL;
+ u8 *pHT_info_ie = NULL;
+ struct sta_info *psta = NULL;
+ u16 cap, ht_cap = false;
+ uint ie_len = 0;
+ int group_cipher, pairwise_cipher;
+ u8 channel, network_type, supportRate[NDIS_802_11_LENGTH_RATES_EX];
+ int supportRateNum = 0;
+ u8 OUI1[] = {0x00, 0x50, 0xf2, 0x01};
+ u8 WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pbss_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ u8 *ie = pbss_network->IEs;
+
+ /* SSID */
+ /* Supported rates */
+ /* DS Params */
+ /* WLAN_EID_COUNTRY */
+ /* ERP Information element */
+ /* Extended supported rates */
+ /* WPA/WPA2 */
+ /* Wi-Fi Wireless Multimedia Extensions */
+ /* ht_capab, ht_oper */
+ /* WPS IE */
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return _FAIL;
+
+
+ if (len > MAX_IE_SZ)
+ return _FAIL;
+
+ pbss_network->IELength = len;
+
+ _rtw_memset(ie, 0, MAX_IE_SZ);
+
+ memcpy(ie, pbuf, pbss_network->IELength);
+
+
+ if (pbss_network->InfrastructureMode != Ndis802_11APMode)
+ return _FAIL;
+
+ pbss_network->Rssi = 0;
+
+ memcpy(pbss_network->MacAddress, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ /* beacon interval */
+ p = rtw_get_beacon_interval_from_ie(ie);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
+ pbss_network->Configuration.BeaconPeriod = RTW_GET_LE16(p);
+
+ /* capability */
+ cap = RTW_GET_LE16(ie);
+
+ /* SSID */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ _rtw_memset(&pbss_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len);
+ pbss_network->Ssid.SsidLength = ie_len;
+ }
+
+ /* channel */
+ channel = 0;
+ pbss_network->Configuration.Length = 0;
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0)
+ channel = *(p + 2);
+
+ pbss_network->Configuration.DSConfig = channel;
+
+ _rtw_memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
+ /* get supported rates */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p != NULL) {
+ memcpy(supportRate, p+2, ie_len);
+ supportRateNum = ie_len;
+ }
+
+ /* get ext_supported rates */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->IELength - _BEACON_IE_OFFSET_);
+ if (p != NULL) {
+ memcpy(supportRate+supportRateNum, p+2, ie_len);
+ supportRateNum += ie_len;
+ }
+
+ network_type = rtw_check_network_type(supportRate, supportRateNum, channel);
+
+ rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
+
+ /* parsing ERP_IE */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0)
+ ERP_IE_handler(padapter, (struct ndis_802_11_var_ie *)p);
+
+ /* update privacy/security */
+ if (cap & BIT(4))
+ pbss_network->Privacy = 1;
+ else
+ pbss_network->Privacy = 0;
+
+ psecuritypriv->wpa_psk = 0;
+
+ /* wpa2 */
+ group_cipher = 0;
+ pairwise_cipher = 0;
+ psecuritypriv->wpa2_group_cipher = _NO_PRIVACY_;
+ psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ if (rtw_parse_wpa2_ie(p, ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
+ psecuritypriv->wpa_psk |= BIT(1);
+
+ psecuritypriv->wpa2_group_cipher = group_cipher;
+ psecuritypriv->wpa2_pairwise_cipher = pairwise_cipher;
+ }
+ }
+ /* wpa */
+ ie_len = 0;
+ group_cipher = 0;
+ pairwise_cipher = 0;
+ psecuritypriv->wpa_group_cipher = _NO_PRIVACY_;
+ psecuritypriv->wpa_pairwise_cipher = _NO_PRIVACY_;
+ for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
+ p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ if ((p) && (_rtw_memcmp(p+2, OUI1, 4))) {
+ if (rtw_parse_wpa_ie(p, ie_len+2, &group_cipher,
+ &pairwise_cipher, NULL) == _SUCCESS) {
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
+
+ psecuritypriv->wpa_psk |= BIT(0);
+
+ psecuritypriv->wpa_group_cipher = group_cipher;
+ psecuritypriv->wpa_pairwise_cipher = pairwise_cipher;
+ }
+ break;
+ }
+ if ((p == NULL) || (ie_len == 0))
+ break;
+ }
+
+ /* wmm */
+ ie_len = 0;
+ pmlmepriv->qospriv.qos_option = 0;
+ if (pregistrypriv->wmm_enable) {
+ for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
+ p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ if ((p) && _rtw_memcmp(p+2, WMM_PARA_IE, 6)) {
+ pmlmepriv->qospriv.qos_option = 1;
+
+ *(p+8) |= BIT(7);/* QoS Info, support U-APSD */
+
+ /* disable all ACM bits since the WMM admission control is not supported */
+ *(p + 10) &= ~BIT(4); /* BE */
+ *(p + 14) &= ~BIT(4); /* BK */
+ *(p + 18) &= ~BIT(4); /* VI */
+ *(p + 22) &= ~BIT(4); /* VO */
+ break;
+ }
+
+ if ((p == NULL) || (ie_len == 0))
+ break;
+ }
+ }
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ u8 rf_type;
+ struct rtw_ieee80211_ht_cap *pht_cap = (struct rtw_ieee80211_ht_cap *)(p+2);
+
+ pHT_caps_ie = p;
+ ht_cap = true;
+ network_type |= WIRELESS_11_24N;
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
+ (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP))
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2));
+ else
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
+
+ /* set Max Rx AMPDU size to 64K */
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_FACTOR & 0x03);
+
+ if (rf_type == RF_1T1R) {
+ pht_cap->supp_mcs_set[0] = 0xff;
+ pht_cap->supp_mcs_set[1] = 0x0;
+ }
+ memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
+ }
+
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0)
+ pHT_info_ie = p;
+ switch (network_type) {
+ case WIRELESS_11B:
+ pbss_network->NetworkTypeInUse = Ndis802_11DS;
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11BG:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11BG_24N:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ case WIRELESS_11A:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM5;
+ break;
+ default:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ }
+
+ pmlmepriv->cur_network.network_type = network_type;
+
+ pmlmepriv->htpriv.ht_option = false;
+
+ if ((psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_TKIP) ||
+ (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
+ /* todo: */
+ /* ht_cap = false; */
+ }
+
+ /* ht_cap */
+ if (pregistrypriv->ht_enable && ht_cap) {
+ pmlmepriv->htpriv.ht_option = true;
+ pmlmepriv->qospriv.qos_option = 1;
+
+ if (pregistrypriv->ampdu_enable == 1)
+ pmlmepriv->htpriv.ampdu_enable = true;
+ HT_caps_handler(padapter, (struct ndis_802_11_var_ie *)pHT_caps_ie);
+
+ HT_info_handler(padapter, (struct ndis_802_11_var_ie *)pHT_info_ie);
+ }
+
+ pbss_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pbss_network);
+
+ /* issue beacon to start bss network */
+ start_bss_network(padapter, (u8 *)pbss_network);
+
+ /* alloc sta_info for ap itself */
+ psta = rtw_get_stainfo(&padapter->stapriv, pbss_network->MacAddress);
+ if (!psta) {
+ psta = rtw_alloc_stainfo(&padapter->stapriv, pbss_network->MacAddress);
+ if (psta == NULL)
+ return _FAIL;
+ }
+
+ pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */
+ return ret;
+}
+
+void rtw_set_macaddr_acl(struct adapter *padapter, int mode)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+ DBG_88E("%s, mode =%d\n", __func__, mode);
+
+ pacl_list->mode = mode;
+}
+
+int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ u8 added = false;
+ int i, ret = 0;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
+
+ if ((NUM_ACL-1) < pacl_list->num)
+ return -1;
+
+ _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (_rtw_memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (paclnode->valid) {
+ added = true;
+ DBG_88E("%s, sta has been added\n", __func__);
+ break;
+ }
+ }
+ }
+
+ _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ if (added)
+ return ret;
+
+ _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ for (i = 0; i < NUM_ACL; i++) {
+ paclnode = &pacl_list->aclnode[i];
+
+ if (!paclnode->valid) {
+ _rtw_init_listhead(&paclnode->list);
+
+ memcpy(paclnode->addr, addr, ETH_ALEN);
+
+ paclnode->valid = true;
+
+ rtw_list_insert_tail(&paclnode->list, get_list_head(pacl_node_q));
+
+ pacl_list->num++;
+
+ break;
+ }
+ }
+
+ DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
+
+ _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ return ret;
+}
+
+int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ int ret = 0;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
+
+ _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (_rtw_memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (paclnode->valid) {
+ paclnode->valid = false;
+
+ rtw_list_delete(&paclnode->list);
+
+ pacl_list->num--;
+ }
+ }
+ }
+
+ _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
+ return ret;
+}
+
+static void update_bcn_fixed_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_erpinfo_ie(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ unsigned char *p, *ie = pnetwork->IEs;
+ u32 len = 0;
+
+ DBG_88E("%s, ERP_enable =%d\n", __func__, pmlmeinfo->ERP_enable);
+
+ if (!pmlmeinfo->ERP_enable)
+ return;
+
+ /* parsing ERP_IE */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &len,
+ (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ if (p && len > 0) {
+ struct ndis_802_11_var_ie *pIE = (struct ndis_802_11_var_ie *)p;
+
+ if (pmlmepriv->num_sta_non_erp == 1)
+ pIE->data[0] |= RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION;
+ else
+ pIE->data[0] &= ~(RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION);
+
+ if (pmlmepriv->num_sta_no_short_preamble > 0)
+ pIE->data[0] |= RTW_ERP_INFO_BARKER_PREAMBLE_MODE;
+ else
+ pIE->data[0] &= ~(RTW_ERP_INFO_BARKER_PREAMBLE_MODE);
+
+ ERP_IE_handler(padapter, pIE);
+ }
+}
+
+static void update_bcn_htcap_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_htinfo_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_rsn_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_wpa_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_wmm_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_wps_ie(struct adapter *padapter)
+{
+ u8 *pwps_ie = NULL, *pwps_ie_src;
+ u8 *premainder_ie, *pbackup_remainder_ie = NULL;
+ uint wps_ielen = 0, wps_offset, remainder_ielen;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ unsigned char *ie = pnetwork->IEs;
+ u32 ielen = pnetwork->IELength;
+
+ DBG_88E("%s\n", __func__);
+
+ pwps_ie = rtw_get_wps_ie(ie+_FIXED_IE_LENGTH_, ielen-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+
+ if (pwps_ie == NULL || wps_ielen == 0)
+ return;
+
+ wps_offset = (uint)(pwps_ie-ie);
+
+ premainder_ie = pwps_ie + wps_ielen;
+
+ remainder_ielen = ielen - wps_offset - wps_ielen;
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+
+ pwps_ie_src = pmlmepriv->wps_beacon_ie;
+ if (pwps_ie_src == NULL)
+ return;
+
+ wps_ielen = (uint)pwps_ie_src[1];/* to get ie data len */
+ if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+ memcpy(pwps_ie, pwps_ie_src, wps_ielen+2);
+ pwps_ie += (wps_ielen+2);
+
+ if (pbackup_remainder_ie)
+ memcpy(pwps_ie, pbackup_remainder_ie, remainder_ielen);
+
+ /* update IELength */
+ pnetwork->IELength = wps_offset + (wps_ielen+2) + remainder_ielen;
+ }
+
+ if (pbackup_remainder_ie)
+ kfree(pbackup_remainder_ie);
+}
+
+static void update_bcn_p2p_ie(struct adapter *padapter)
+{
+}
+
+static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
+{
+ DBG_88E("%s\n", __func__);
+
+ if (_rtw_memcmp(RTW_WPA_OUI, oui, 4))
+ update_bcn_wpa_ie(padapter);
+ else if (_rtw_memcmp(WMM_OUI, oui, 4))
+ update_bcn_wmm_ie(padapter);
+ else if (_rtw_memcmp(WPS_OUI, oui, 4))
+ update_bcn_wps_ie(padapter);
+ else if (_rtw_memcmp(P2P_OUI, oui, 4))
+ update_bcn_p2p_ie(padapter);
+ else
+ DBG_88E("unknown OUI type!\n");
+}
+
+void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv;
+ struct mlme_ext_priv *pmlmeext;
+
+ if (!padapter)
+ return;
+
+ pmlmepriv = &(padapter->mlmepriv);
+ pmlmeext = &(padapter->mlmeextpriv);
+
+ if (!pmlmeext->bstart_bss)
+ return;
+
+ _enter_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+
+ switch (ie_id) {
+ case 0xFF:
+ update_bcn_fixed_ie(padapter);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
+ break;
+ case _TIM_IE_:
+ update_BCNTIM(padapter);
+ break;
+ case _ERPINFO_IE_:
+ update_bcn_erpinfo_ie(padapter);
+ break;
+ case _HT_CAPABILITY_IE_:
+ update_bcn_htcap_ie(padapter);
+ break;
+ case _RSN_IE_2_:
+ update_bcn_rsn_ie(padapter);
+ break;
+ case _HT_ADD_INFO_IE_:
+ update_bcn_htinfo_ie(padapter);
+ break;
+ case _VENDOR_SPECIFIC_IE_:
+ update_bcn_vendor_spec_ie(padapter, oui);
+ break;
+ default:
+ break;
+ }
+
+ pmlmepriv->update_bcn = true;
+
+ _exit_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+
+ if (tx)
+ set_tx_beacon_cmd(padapter);
+}
+
+/*
+op_mode
+Set to 0 (HT pure) under the followign conditions
+ - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
+ - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
+Set to 1 (HT non-member protection) if there may be non-HT STAs
+ in both the primary and the secondary channel
+Set to 2 if only HT STAs are associated in BSS,
+ however and at least one 20 MHz HT STA is associated
+Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
+ (currently non-GF HT station is considered as non-HT STA also)
+*/
+static int rtw_ht_operation_update(struct adapter *padapter)
+{
+ u16 cur_op_mode, new_op_mode;
+ int op_mode_changes = 0;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
+
+ if (pmlmepriv->htpriv.ht_option)
+ return 0;
+
+ DBG_88E("%s current operation mode = 0x%X\n",
+ __func__, pmlmepriv->ht_op_mode);
+
+ if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT) &&
+ pmlmepriv->num_sta_ht_no_gf) {
+ pmlmepriv->ht_op_mode |=
+ HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
+ op_mode_changes++;
+ } else if ((pmlmepriv->ht_op_mode &
+ HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT) &&
+ pmlmepriv->num_sta_ht_no_gf == 0) {
+ pmlmepriv->ht_op_mode &=
+ ~HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
+ op_mode_changes++;
+ }
+
+ if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
+ (pmlmepriv->num_sta_no_ht || pmlmepriv->olbc_ht)) {
+ pmlmepriv->ht_op_mode |= HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
+ op_mode_changes++;
+ } else if ((pmlmepriv->ht_op_mode &
+ HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
+ (pmlmepriv->num_sta_no_ht == 0 && !pmlmepriv->olbc_ht)) {
+ pmlmepriv->ht_op_mode &=
+ ~HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
+ op_mode_changes++;
+ }
+
+ /* Note: currently we switch to the MIXED op mode if HT non-greenfield
+ * station is associated. Probably it's a theoretical case, since
+ * it looks like all known HT STAs support greenfield.
+ */
+ new_op_mode = 0;
+ if (pmlmepriv->num_sta_no_ht ||
+ (pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT))
+ new_op_mode = OP_MODE_MIXED;
+ else if ((phtpriv_ap->ht_cap.cap_info & IEEE80211_HT_CAP_SUP_WIDTH) &&
+ pmlmepriv->num_sta_ht_20mhz)
+ new_op_mode = OP_MODE_20MHZ_HT_STA_ASSOCED;
+ else if (pmlmepriv->olbc_ht)
+ new_op_mode = OP_MODE_MAY_BE_LEGACY_STAS;
+ else
+ new_op_mode = OP_MODE_PURE;
+
+ cur_op_mode = pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_OP_MODE_MASK;
+ if (cur_op_mode != new_op_mode) {
+ pmlmepriv->ht_op_mode &= ~HT_INFO_OPERATION_MODE_OP_MODE_MASK;
+ pmlmepriv->ht_op_mode |= new_op_mode;
+ op_mode_changes++;
+ }
+
+ DBG_88E("%s new operation mode = 0x%X changes =%d\n",
+ __func__, pmlmepriv->ht_op_mode, op_mode_changes);
+
+ return op_mode_changes;
+}
+
+void associated_clients_update(struct adapter *padapter, u8 updated)
+{
+ /* update associcated stations cap. */
+ if (updated) {
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* check asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ VCS_update(padapter, psta);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ }
+}
+
+/* called > TSR LEVEL for USB or SDIO Interface*/
+void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 beacon_updated = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
+ if (!psta->no_short_preamble_set) {
+ psta->no_short_preamble_set = 1;
+
+ pmlmepriv->num_sta_no_short_preamble++;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_preamble == 1)) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+ } else {
+ if (psta->no_short_preamble_set) {
+ psta->no_short_preamble_set = 0;
+
+ pmlmepriv->num_sta_no_short_preamble--;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_preamble == 0)) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+ }
+
+ if (psta->flags & WLAN_STA_NONERP) {
+ if (!psta->nonerp_set) {
+ psta->nonerp_set = 1;
+
+ pmlmepriv->num_sta_non_erp++;
+
+ if (pmlmepriv->num_sta_non_erp == 1) {
+ beacon_updated = true;
+ update_beacon(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+ } else {
+ if (psta->nonerp_set) {
+ psta->nonerp_set = 0;
+
+ pmlmepriv->num_sta_non_erp--;
+
+ if (pmlmepriv->num_sta_non_erp == 0) {
+ beacon_updated = true;
+ update_beacon(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+ }
+
+ if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT)) {
+ if (!psta->no_short_slot_time_set) {
+ psta->no_short_slot_time_set = 1;
+
+ pmlmepriv->num_sta_no_short_slot_time++;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_slot_time == 1)) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+ } else {
+ if (psta->no_short_slot_time_set) {
+ psta->no_short_slot_time_set = 0;
+
+ pmlmepriv->num_sta_no_short_slot_time--;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_slot_time == 0)) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+ }
+
+ if (psta->flags & WLAN_STA_HT) {
+ u16 ht_capab = psta->htpriv.ht_cap.cap_info;
+
+ DBG_88E("HT: STA %pM HT Capabilities Info: 0x%04x\n",
+ (psta->hwaddr), ht_capab);
+
+ if (psta->no_ht_set) {
+ psta->no_ht_set = 0;
+ pmlmepriv->num_sta_no_ht--;
+ }
+
+ if ((ht_capab & IEEE80211_HT_CAP_GRN_FLD) == 0) {
+ if (!psta->no_ht_gf_set) {
+ psta->no_ht_gf_set = 1;
+ pmlmepriv->num_sta_ht_no_gf++;
+ }
+ DBG_88E("%s STA %pM - no greenfield, num of non-gf stations %d\n",
+ __func__, (psta->hwaddr),
+ pmlmepriv->num_sta_ht_no_gf);
+ }
+
+ if ((ht_capab & IEEE80211_HT_CAP_SUP_WIDTH) == 0) {
+ if (!psta->ht_20mhz_set) {
+ psta->ht_20mhz_set = 1;
+ pmlmepriv->num_sta_ht_20mhz++;
+ }
+ DBG_88E("%s STA %pM - 20 MHz HT, num of 20MHz HT STAs %d\n",
+ __func__, (psta->hwaddr),
+ pmlmepriv->num_sta_ht_20mhz);
+ }
+ } else {
+ if (!psta->no_ht_set) {
+ psta->no_ht_set = 1;
+ pmlmepriv->num_sta_no_ht++;
+ }
+ if (pmlmepriv->htpriv.ht_option) {
+ DBG_88E("%s STA %pM - no HT, num of non-HT stations %d\n",
+ __func__, (psta->hwaddr),
+ pmlmepriv->num_sta_no_ht);
+ }
+ }
+
+ if (rtw_ht_operation_update(padapter) > 0) {
+ update_beacon(padapter, _HT_CAPABILITY_IE_, NULL, false);
+ update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
+ }
+
+ /* update associcated stations cap. */
+ associated_clients_update(padapter, beacon_updated);
+
+ DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
+}
+
+u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 beacon_updated = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ if (!psta)
+ return beacon_updated;
+
+ if (psta->no_short_preamble_set) {
+ psta->no_short_preamble_set = 0;
+ pmlmepriv->num_sta_no_short_preamble--;
+ if (pmlmeext->cur_wireless_mode > WIRELESS_11B &&
+ pmlmepriv->num_sta_no_short_preamble == 0) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+
+ if (psta->nonerp_set) {
+ psta->nonerp_set = 0;
+ pmlmepriv->num_sta_non_erp--;
+ if (pmlmepriv->num_sta_non_erp == 0) {
+ beacon_updated = true;
+ update_beacon(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+
+ if (psta->no_short_slot_time_set) {
+ psta->no_short_slot_time_set = 0;
+ pmlmepriv->num_sta_no_short_slot_time--;
+ if (pmlmeext->cur_wireless_mode > WIRELESS_11B &&
+ pmlmepriv->num_sta_no_short_slot_time == 0) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+
+ if (psta->no_ht_gf_set) {
+ psta->no_ht_gf_set = 0;
+ pmlmepriv->num_sta_ht_no_gf--;
+ }
+
+ if (psta->no_ht_set) {
+ psta->no_ht_set = 0;
+ pmlmepriv->num_sta_no_ht--;
+ }
+
+ if (psta->ht_20mhz_set) {
+ psta->ht_20mhz_set = 0;
+ pmlmepriv->num_sta_ht_20mhz--;
+ }
+
+ if (rtw_ht_operation_update(padapter) > 0) {
+ update_beacon(padapter, _HT_CAPABILITY_IE_, NULL, false);
+ update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
+ }
+
+ /* update associcated stations cap. */
+
+ DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
+
+ return beacon_updated;
+}
+
+u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
+ bool active, u16 reason)
+{
+ unsigned long irqL;
+ u8 beacon_updated = false;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (!psta)
+ return beacon_updated;
+
+ /* tear down Rx AMPDU */
+ send_delba(padapter, 0, psta->hwaddr);/* recipient */
+
+ /* tear down TX AMPDU */
+ send_delba(padapter, 1, psta->hwaddr);/* originator */
+ psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
+ psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
+
+ if (active)
+ issue_deauth(padapter, psta->hwaddr, reason);
+
+ /* clear cam entry / key */
+ rtw_clearstakey_cmd(padapter, (u8 *)psta, (u8)(psta->mac_id + 3), true);
+
+
+ _enter_critical_bh(&psta->lock, &irqL);
+ psta->state &= ~_FW_LINKED;
+ _exit_critical_bh(&psta->lock, &irqL);
+
+ rtw_indicate_sta_disassoc_event(padapter, psta);
+
+ report_del_sta_event(padapter, psta->hwaddr, reason);
+
+ beacon_updated = bss_cap_update_on_sta_leave(padapter, psta);
+
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ rtw_free_stainfo(padapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+
+ return beacon_updated;
+}
+
+int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return ret;
+
+ DBG_88E(FUNC_NDEV_FMT" with ch:%u, offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev), new_ch, ch_offset);
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* for each sta in asoc_queue */
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ plist = get_next(plist);
+
+ issue_action_spct_ch_switch(padapter, psta->hwaddr, new_ch, ch_offset);
+ psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ issue_action_spct_ch_switch(padapter, bc_addr, new_ch, ch_offset);
+
+ return ret;
+}
+
+int rtw_sta_flush(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ DBG_88E(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return ret;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* free sta asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+
+ ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+
+ issue_deauth(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
+
+ associated_clients_update(padapter, true);
+
+ return ret;
+}
+
+/* called > TSR LEVEL for USB or SDIO Interface*/
+void sta_info_update(struct adapter *padapter, struct sta_info *psta)
+{
+ int flags = psta->flags;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ /* update wmm cap. */
+ if (WLAN_STA_WME&flags)
+ psta->qos_option = 1;
+ else
+ psta->qos_option = 0;
+
+ if (pmlmepriv->qospriv.qos_option == 0)
+ psta->qos_option = 0;
+
+ /* update 802.11n ht cap. */
+ if (WLAN_STA_HT&flags) {
+ psta->htpriv.ht_option = true;
+ psta->qos_option = 1;
+ } else {
+ psta->htpriv.ht_option = false;
+ }
+
+ if (!pmlmepriv->htpriv.ht_option)
+ psta->htpriv.ht_option = false;
+
+ update_sta_info_apmode(padapter, psta);
+}
+
+/* called >= TSR LEVEL for USB or SDIO Interface*/
+void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta)
+{
+ if (psta->state & _FW_LINKED) {
+ /* add ratid */
+ add_RATid(padapter, psta, 0);/* DM_RATR_STA_INIT */
+ }
+}
+
+void start_ap_mode(struct adapter *padapter)
+{
+ int i;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+ pmlmepriv->update_bcn = false;
+
+ pmlmeext->bstart_bss = false;
+
+ pmlmepriv->num_sta_non_erp = 0;
+
+ pmlmepriv->num_sta_no_short_slot_time = 0;
+
+ pmlmepriv->num_sta_no_short_preamble = 0;
+
+ pmlmepriv->num_sta_ht_no_gf = 0;
+ pmlmepriv->num_sta_no_ht = 0;
+ pmlmepriv->num_sta_ht_20mhz = 0;
+
+ pmlmepriv->olbc = false;
+
+ pmlmepriv->olbc_ht = false;
+
+ pmlmepriv->ht_op_mode = 0;
+
+ for (i = 0; i < NUM_STA; i++)
+ pstapriv->sta_aid[i] = NULL;
+
+ pmlmepriv->wps_beacon_ie = NULL;
+ pmlmepriv->wps_probe_resp_ie = NULL;
+ pmlmepriv->wps_assoc_resp_ie = NULL;
+
+ pmlmepriv->p2p_beacon_ie = NULL;
+ pmlmepriv->p2p_probe_resp_ie = NULL;
+
+ /* for ACL */
+ _rtw_init_listhead(&(pacl_list->acl_node_q.queue));
+ pacl_list->num = 0;
+ pacl_list->mode = 0;
+ for (i = 0; i < NUM_ACL; i++) {
+ _rtw_init_listhead(&pacl_list->aclnode[i].list);
+ pacl_list->aclnode[i].valid = false;
+ }
+}
+
+void stop_ap_mode(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ pmlmepriv->update_bcn = false;
+ pmlmeext->bstart_bss = false;
+
+ /* reset and init security priv , this can refine with rtw_reset_securitypriv */
+ _rtw_memset((unsigned char *)&padapter->securitypriv, 0, sizeof(struct security_priv));
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+
+ /* for ACL */
+ _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (paclnode->valid) {
+ paclnode->valid = false;
+
+ rtw_list_delete(&paclnode->list);
+
+ pacl_list->num--;
+ }
+ }
+ _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ DBG_88E("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
+
+ rtw_sta_flush(padapter);
+
+ /* free_assoc_sta_resources */
+ rtw_free_all_stainfo(padapter);
+
+ psta = rtw_get_bcmc_stainfo(padapter);
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ rtw_free_stainfo(padapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+
+ rtw_init_bcmc_stainfo(padapter);
+
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
+}
+
+#endif /* CONFIG_88EU_AP_MODE */
diff --git a/drivers/staging/rtl8188eu/core/rtw_br_ext.c b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
new file mode 100644
index 0000000..fbca394
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
@@ -0,0 +1,1199 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_BR_EXT_C_
+
+#include <linux/if_arp.h>
+#include <net/ip.h>
+#include <net/ipx.h>
+#include <linux/atalk.h>
+#include <linux/udp.h>
+#include <linux/if_pppox.h>
+
+#include <drv_types.h>
+#include "rtw_br_ext.h"
+#include <usb_osintf.h>
+#include <recv_osdep.h>
+
+#ifndef csum_ipv6_magic
+#include <net/ip6_checksum.h>
+#endif
+
+#include <linux/ipv6.h>
+#include <linux/icmpv6.h>
+#include <net/ndisc.h>
+#include <net/checksum.h>
+
+#define NAT25_IPV4 01
+#define NAT25_IPV6 02
+#define NAT25_IPX 03
+#define NAT25_APPLE 04
+#define NAT25_PPPOE 05
+
+#define RTL_RELAY_TAG_LEN (ETH_ALEN)
+#define TAG_HDR_LEN 4
+
+#define MAGIC_CODE 0x8186
+#define MAGIC_CODE_LEN 2
+#define WAIT_TIME_PPPOE 5 /* waiting time for pppoe server in sec */
+
+/*-----------------------------------------------------------------
+ How database records network address:
+ 0 1 2 3 4 5 6 7 8 9 10
+ |----|----|----|----|----|----|----|----|----|----|----|
+ IPv4 |type| | IP addr |
+ IPX |type| Net addr | Node addr |
+ IPX |type| Net addr |Sckt addr|
+ Apple |type| Network |node|
+ PPPoE |type| SID | AC MAC |
+-----------------------------------------------------------------*/
+
+
+/* Find a tag in pppoe frame and return the pointer */
+static inline unsigned char *__nat25_find_pppoe_tag(struct pppoe_hdr *ph, unsigned short type)
+{
+ unsigned char *cur_ptr, *start_ptr;
+ unsigned short tagLen, tagType;
+
+ start_ptr = cur_ptr = (unsigned char *)ph->tag;
+ while ((cur_ptr - start_ptr) < ntohs(ph->length)) {
+ /* prevent un-alignment access */
+ tagType = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]);
+ tagLen = (unsigned short)((cur_ptr[2] << 8) + cur_ptr[3]);
+ if (tagType == type)
+ return cur_ptr;
+ cur_ptr = cur_ptr + TAG_HDR_LEN + tagLen;
+ }
+ return NULL;
+}
+
+
+static inline int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag)
+{
+ struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
+ int data_len;
+
+ data_len = tag->tag_len + TAG_HDR_LEN;
+ if (skb_tailroom(skb) < data_len) {
+ _DEBUG_ERR("skb_tailroom() failed in add SID tag!\n");
+ return -1;
+ }
+
+ skb_put(skb, data_len);
+ /* have a room for new tag */
+ memmove(((unsigned char *)ph->tag + data_len), (unsigned char *)ph->tag, ntohs(ph->length));
+ ph->length = htons(ntohs(ph->length) + data_len);
+ memcpy((unsigned char *)ph->tag, tag, data_len);
+ return data_len;
+}
+
+static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len)
+{
+ int tail_len;
+ unsigned long end, tail;
+
+ if ((src+len) > skb_tail_pointer(skb) || skb->len < len)
+ return -1;
+
+ tail = (unsigned long)skb_tail_pointer(skb);
+ end = (unsigned long)src+len;
+ if (tail < end)
+ return -1;
+
+ tail_len = (int)(tail-end);
+ if (tail_len > 0)
+ memmove(src, src+len, tail_len);
+
+ skb_trim(skb, skb->len-len);
+ return 0;
+}
+
+static inline unsigned long __nat25_timeout(struct adapter *priv)
+{
+ unsigned long timeout;
+
+ timeout = jiffies - NAT25_AGEING_TIME*HZ;
+
+ return timeout;
+}
+
+
+static inline int __nat25_has_expired(struct adapter *priv,
+ struct nat25_network_db_entry *fdb)
+{
+ if (time_before_eq(fdb->ageing_timer, __nat25_timeout(priv)))
+ return 1;
+
+ return 0;
+}
+
+
+static inline void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr,
+ unsigned int *ipAddr)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_IPV4;
+ memcpy(networkAddr+7, (unsigned char *)ipAddr, 4);
+}
+
+
+static inline void __nat25_generate_ipx_network_addr_with_node(unsigned char *networkAddr,
+ unsigned int *ipxNetAddr, unsigned char *ipxNodeAddr)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_IPX;
+ memcpy(networkAddr+1, (unsigned char *)ipxNetAddr, 4);
+ memcpy(networkAddr+5, ipxNodeAddr, 6);
+}
+
+
+static inline void __nat25_generate_ipx_network_addr_with_socket(unsigned char *networkAddr,
+ unsigned int *ipxNetAddr, unsigned short *ipxSocketAddr)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_IPX;
+ memcpy(networkAddr+1, (unsigned char *)ipxNetAddr, 4);
+ memcpy(networkAddr+5, (unsigned char *)ipxSocketAddr, 2);
+}
+
+
+static inline void __nat25_generate_apple_network_addr(unsigned char *networkAddr,
+ unsigned short *network, unsigned char *node)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_APPLE;
+ memcpy(networkAddr+1, (unsigned char *)network, 2);
+ networkAddr[3] = *node;
+}
+
+static inline void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
+ unsigned char *ac_mac, unsigned short *sid)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_PPPOE;
+ memcpy(networkAddr+1, (unsigned char *)sid, 2);
+ memcpy(networkAddr+3, (unsigned char *)ac_mac, 6);
+}
+
+static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
+ unsigned int *ipAddr)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_IPV6;
+ memcpy(networkAddr+1, (unsigned char *)ipAddr, 16);
+}
+
+static unsigned char *scan_tlv(unsigned char *data, int len, unsigned char tag, unsigned char len8b)
+{
+ while (len > 0) {
+ if (*data == tag && *(data+1) == len8b && len >= len8b*8)
+ return data+2;
+
+ len -= (*(data+1))*8;
+ data += (*(data+1))*8;
+ }
+ return NULL;
+}
+
+static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char *replace_mac)
+{
+ struct icmp6hdr *icmphdr = (struct icmp6hdr *)data;
+ unsigned char *mac;
+
+ if (icmphdr->icmp6_type == NDISC_ROUTER_SOLICITATION) {
+ if (len >= 8) {
+ mac = scan_tlv(&data[8], len-8, 1, 1);
+ if (mac) {
+ _DEBUG_INFO("Router Solicitation, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ } else if (icmphdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT) {
+ if (len >= 16) {
+ mac = scan_tlv(&data[16], len-16, 1, 1);
+ if (mac) {
+ _DEBUG_INFO("Router Advertisement, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ if (len >= 24) {
+ mac = scan_tlv(&data[24], len-24, 1, 1);
+ if (mac) {
+ _DEBUG_INFO("Neighbor Solicitation, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+ if (len >= 24) {
+ mac = scan_tlv(&data[24], len-24, 2, 1);
+ if (mac) {
+ _DEBUG_INFO("Neighbor Advertisement, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ } else if (icmphdr->icmp6_type == NDISC_REDIRECT) {
+ if (len >= 40) {
+ mac = scan_tlv(&data[40], len-40, 2, 1);
+ if (mac) {
+ _DEBUG_INFO("Redirect, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static inline int __nat25_network_hash(unsigned char *networkAddr)
+{
+ if (networkAddr[0] == NAT25_IPV4) {
+ unsigned long x;
+
+ x = networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else if (networkAddr[0] == NAT25_IPX) {
+ unsigned long x;
+
+ x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^
+ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else if (networkAddr[0] == NAT25_APPLE) {
+ unsigned long x;
+
+ x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else if (networkAddr[0] == NAT25_PPPOE) {
+ unsigned long x;
+
+ x = networkAddr[0] ^ networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else if (networkAddr[0] == NAT25_IPV6) {
+ unsigned long x;
+
+ x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^
+ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10] ^
+ networkAddr[11] ^ networkAddr[12] ^ networkAddr[13] ^ networkAddr[14] ^ networkAddr[15] ^
+ networkAddr[16];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else {
+ unsigned long x = 0;
+ int i;
+
+ for (i = 0; i < MAX_NETWORK_ADDR_LEN; i++)
+ x ^= networkAddr[i];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ }
+}
+
+static inline void __network_hash_link(struct adapter *priv,
+ struct nat25_network_db_entry *ent, int hash)
+{
+ /* Caller must _enter_critical_bh already! */
+ ent->next_hash = priv->nethash[hash];
+ if (ent->next_hash != NULL)
+ ent->next_hash->pprev_hash = &ent->next_hash;
+ priv->nethash[hash] = ent;
+ ent->pprev_hash = &priv->nethash[hash];
+}
+
+static inline void __network_hash_unlink(struct nat25_network_db_entry *ent)
+{
+ /* Caller must _enter_critical_bh already! */
+ *(ent->pprev_hash) = ent->next_hash;
+ if (ent->next_hash != NULL)
+ ent->next_hash->pprev_hash = ent->pprev_hash;
+ ent->next_hash = NULL;
+ ent->pprev_hash = NULL;
+}
+
+static int __nat25_db_network_lookup_and_replace(struct adapter *priv,
+ struct sk_buff *skb, unsigned char *networkAddr)
+{
+ struct nat25_network_db_entry *db;
+ unsigned long irqL;
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+
+ db = priv->nethash[__nat25_network_hash(networkAddr)];
+ while (db != NULL) {
+ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
+ if (!__nat25_has_expired(priv, db)) {
+ /* replace the destination mac address */
+ memcpy(skb->data, db->macAddr, ETH_ALEN);
+ atomic_inc(&db->use_count);
+
+ DEBUG_INFO("NAT25: Lookup M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x%02x%02x%02x\n",
+ db->macAddr[0],
+ db->macAddr[1],
+ db->macAddr[2],
+ db->macAddr[3],
+ db->macAddr[4],
+ db->macAddr[5],
+ db->networkAddr[0],
+ db->networkAddr[1],
+ db->networkAddr[2],
+ db->networkAddr[3],
+ db->networkAddr[4],
+ db->networkAddr[5],
+ db->networkAddr[6],
+ db->networkAddr[7],
+ db->networkAddr[8],
+ db->networkAddr[9],
+ db->networkAddr[10],
+ db->networkAddr[11],
+ db->networkAddr[12],
+ db->networkAddr[13],
+ db->networkAddr[14],
+ db->networkAddr[15],
+ db->networkAddr[16]);
+ }
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ return 1;
+ }
+ db = db->next_hash;
+ }
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ return 0;
+}
+
+static void __nat25_db_network_insert(struct adapter *priv,
+ unsigned char *macAddr, unsigned char *networkAddr)
+{
+ struct nat25_network_db_entry *db;
+ int hash;
+ unsigned long irqL;
+
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ hash = __nat25_network_hash(networkAddr);
+ db = priv->nethash[hash];
+ while (db != NULL) {
+ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
+ memcpy(db->macAddr, macAddr, ETH_ALEN);
+ db->ageing_timer = jiffies;
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ return;
+ }
+ db = db->next_hash;
+ }
+ db = (struct nat25_network_db_entry *) rtw_malloc(sizeof(*db));
+ if (db == NULL) {
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ return;
+ }
+ memcpy(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN);
+ memcpy(db->macAddr, macAddr, ETH_ALEN);
+ atomic_set(&db->use_count, 1);
+ db->ageing_timer = jiffies;
+
+ __network_hash_link(priv, db, hash);
+
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+}
+
+static void __nat25_db_print(struct adapter *priv)
+{
+}
+
+/*
+ * NAT2.5 interface
+ */
+
+void nat25_db_cleanup(struct adapter *priv)
+{
+ int i;
+ unsigned long irqL;
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+
+ for (i = 0; i < NAT25_HASH_SIZE; i++) {
+ struct nat25_network_db_entry *f;
+ f = priv->nethash[i];
+ while (f != NULL) {
+ struct nat25_network_db_entry *g;
+
+ g = f->next_hash;
+ if (priv->scdb_entry == f) {
+ memset(priv->scdb_mac, 0, ETH_ALEN);
+ memset(priv->scdb_ip, 0, 4);
+ priv->scdb_entry = NULL;
+ }
+ __network_hash_unlink(f);
+ kfree(f);
+ f = g;
+ }
+ }
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+}
+
+void nat25_db_expire(struct adapter *priv)
+{
+ int i;
+ unsigned long irqL;
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+
+ for (i = 0; i < NAT25_HASH_SIZE; i++) {
+ struct nat25_network_db_entry *f;
+ f = priv->nethash[i];
+
+ while (f != NULL) {
+ struct nat25_network_db_entry *g;
+ g = f->next_hash;
+
+ if (__nat25_has_expired(priv, f)) {
+ if (atomic_dec_and_test(&f->use_count)) {
+ if (priv->scdb_entry == f) {
+ memset(priv->scdb_mac, 0, ETH_ALEN);
+ memset(priv->scdb_ip, 0, 4);
+ priv->scdb_entry = NULL;
+ }
+ __network_hash_unlink(f);
+ kfree(f);
+ }
+ }
+ f = g;
+ }
+ }
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+}
+
+int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
+{
+ unsigned short protocol;
+ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
+ unsigned int tmp;
+
+ if (skb == NULL)
+ return -1;
+
+ if ((method <= NAT25_MIN) || (method >= NAT25_MAX))
+ return -1;
+
+ protocol = be16_to_cpu(*((__be16 *)(skb->data + 2 * ETH_ALEN)));
+
+ /*---------------------------------------------------*/
+ /* Handle IP frame */
+ /*---------------------------------------------------*/
+ if (protocol == ETH_P_IP) {
+ struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN);
+
+ if (((unsigned char *)(iph) + (iph->ihl<<2)) >= (skb->data + ETH_HLEN + skb->len)) {
+ DEBUG_WARN("NAT25: malformed IP packet !\n");
+ return -1;
+ }
+
+ switch (method) {
+ case NAT25_CHECK:
+ return -1;
+ case NAT25_INSERT:
+ /* some muticast with source IP is all zero, maybe other case is illegal */
+ /* in class A, B, C, host address is all zero or all one is illegal */
+ if (iph->saddr == 0)
+ return 0;
+ tmp = be32_to_cpu(iph->saddr);
+ DEBUG_INFO("NAT25: Insert IP, SA =%08x, DA =%08x\n", tmp, iph->daddr);
+ __nat25_generate_ipv4_network_addr(networkAddr, &tmp);
+ /* record source IP address and , source mac address into db */
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup IP, SA =%08x, DA =%08x\n", iph->saddr, iph->daddr);
+ tmp = be32_to_cpu(iph->daddr);
+ __nat25_generate_ipv4_network_addr(networkAddr, &tmp);
+
+ if (!__nat25_db_network_lookup_and_replace(priv, skb, networkAddr)) {
+ if (*((unsigned char *)&iph->daddr + 3) == 0xff) {
+ /* L2 is unicast but L3 is broadcast, make L2 bacome broadcast */
+ DEBUG_INFO("NAT25: Set DA as boardcast\n");
+ memset(skb->data, 0xff, ETH_ALEN);
+ } else {
+ /* forward unknow IP packet to upper TCP/IP */
+ DEBUG_INFO("NAT25: Replace DA with BR's MAC\n");
+ if ((*(u32 *)priv->br_mac) == 0 && (*(u16 *)(priv->br_mac+4)) == 0) {
+ printk("Re-init netdev_br_init() due to br_mac == 0!\n");
+ netdev_br_init(priv->pnetdev);
+ }
+ memcpy(skb->data, priv->br_mac, ETH_ALEN);
+ }
+ }
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (protocol == ETH_P_ARP) {
+ /*---------------------------------------------------*/
+ /* Handle ARP frame */
+ /*---------------------------------------------------*/
+ struct arphdr *arp = (struct arphdr *)(skb->data + ETH_HLEN);
+ unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+ unsigned int *sender, *target;
+
+ if (arp->ar_pro != __constant_htons(ETH_P_IP)) {
+ DEBUG_WARN("NAT25: arp protocol unknown (%4x)!\n", be16_to_cpu(arp->ar_pro));
+ return -1;
+ }
+
+ switch (method) {
+ case NAT25_CHECK:
+ return 0; /* skb_copy for all ARP frame */
+ case NAT25_INSERT:
+ DEBUG_INFO("NAT25: Insert ARP, MAC =%02x%02x%02x%02x%02x%02x\n", arp_ptr[0],
+ arp_ptr[1], arp_ptr[2], arp_ptr[3], arp_ptr[4], arp_ptr[5]);
+
+ /* change to ARP sender mac address to wlan STA address */
+ memcpy(arp_ptr, GET_MY_HWADDR(priv), ETH_ALEN);
+ arp_ptr += arp->ar_hln;
+ sender = (unsigned int *)arp_ptr;
+ __nat25_generate_ipv4_network_addr(networkAddr, sender);
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup ARP\n");
+
+ arp_ptr += arp->ar_hln;
+ sender = (unsigned int *)arp_ptr;
+ arp_ptr += (arp->ar_hln + arp->ar_pln);
+ target = (unsigned int *)arp_ptr;
+ __nat25_generate_ipv4_network_addr(networkAddr, target);
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ /* change to ARP target mac address to Lookup result */
+ arp_ptr = (unsigned char *)(arp + 1);
+ arp_ptr += (arp->ar_hln + arp->ar_pln);
+ memcpy(arp_ptr, skb->data, ETH_ALEN);
+ return 0;
+ default:
+ return -1;
+ }
+ } else if ((protocol == ETH_P_IPX) ||
+ (protocol <= ETH_FRAME_LEN)) {
+ /*---------------------------------------------------*/
+ /* Handle IPX and Apple Talk frame */
+ /*---------------------------------------------------*/
+ unsigned char ipx_header[2] = {0xFF, 0xFF};
+ struct ipxhdr *ipx = NULL;
+ struct elapaarp *ea = NULL;
+ struct ddpehdr *ddp = NULL;
+ unsigned char *framePtr = skb->data + ETH_HLEN;
+
+ if (protocol == ETH_P_IPX) {
+ DEBUG_INFO("NAT25: Protocol = IPX (Ethernet II)\n");
+ ipx = (struct ipxhdr *)framePtr;
+ } else if (protocol <= ETH_FRAME_LEN) {
+ if (!memcmp(ipx_header, framePtr, 2)) {
+ DEBUG_INFO("NAT25: Protocol = IPX (Ethernet 802.3)\n");
+ ipx = (struct ipxhdr *)framePtr;
+ } else {
+ unsigned char ipx_8022_type = 0xE0;
+ unsigned char snap_8022_type = 0xAA;
+
+ if (*framePtr == snap_8022_type) {
+ unsigned char ipx_snap_id[5] = {0x0, 0x0, 0x0, 0x81, 0x37}; /* IPX SNAP ID */
+ unsigned char aarp_snap_id[5] = {0x00, 0x00, 0x00, 0x80, 0xF3}; /* Apple Talk AARP SNAP ID */
+ unsigned char ddp_snap_id[5] = {0x08, 0x00, 0x07, 0x80, 0x9B}; /* Apple Talk DDP SNAP ID */
+
+ framePtr += 3; /* eliminate the 802.2 header */
+
+ if (!memcmp(ipx_snap_id, framePtr, 5)) {
+ framePtr += 5; /* eliminate the SNAP header */
+
+ DEBUG_INFO("NAT25: Protocol = IPX (Ethernet SNAP)\n");
+ ipx = (struct ipxhdr *)framePtr;
+ } else if (!memcmp(aarp_snap_id, framePtr, 5)) {
+ framePtr += 5; /* eliminate the SNAP header */
+
+ ea = (struct elapaarp *)framePtr;
+ } else if (!memcmp(ddp_snap_id, framePtr, 5)) {
+ framePtr += 5; /* eliminate the SNAP header */
+
+ ddp = (struct ddpehdr *)framePtr;
+ } else {
+ DEBUG_WARN("NAT25: Protocol = Ethernet SNAP %02x%02x%02x%02x%02x\n", framePtr[0],
+ framePtr[1], framePtr[2], framePtr[3], framePtr[4]);
+ return -1;
+ }
+ } else if (*framePtr == ipx_8022_type) {
+ framePtr += 3; /* eliminate the 802.2 header */
+
+ if (!memcmp(ipx_header, framePtr, 2)) {
+ DEBUG_INFO("NAT25: Protocol = IPX (Ethernet 802.2)\n");
+ ipx = (struct ipxhdr *)framePtr;
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ }
+ } else {
+ return -1;
+ }
+
+ /* IPX */
+ if (ipx != NULL) {
+ switch (method) {
+ case NAT25_CHECK:
+ if (!memcmp(skb->data+ETH_ALEN, ipx->ipx_source.node, ETH_ALEN))
+ DEBUG_INFO("NAT25: Check IPX skb_copy\n");
+ return 0;
+ return -1;
+ case NAT25_INSERT:
+ DEBUG_INFO("NAT25: Insert IPX, Dest =%08x,%02x%02x%02x%02x%02x%02x,%04x Source =%08x,%02x%02x%02x%02x%02x%02x,%04x\n",
+ ipx->ipx_dest.net,
+ ipx->ipx_dest.node[0],
+ ipx->ipx_dest.node[1],
+ ipx->ipx_dest.node[2],
+ ipx->ipx_dest.node[3],
+ ipx->ipx_dest.node[4],
+ ipx->ipx_dest.node[5],
+ ipx->ipx_dest.sock,
+ ipx->ipx_source.net,
+ ipx->ipx_source.node[0],
+ ipx->ipx_source.node[1],
+ ipx->ipx_source.node[2],
+ ipx->ipx_source.node[3],
+ ipx->ipx_source.node[4],
+ ipx->ipx_source.node[5],
+ ipx->ipx_source.sock);
+
+ if (!memcmp(skb->data+ETH_ALEN, ipx->ipx_source.node, ETH_ALEN)) {
+ DEBUG_INFO("NAT25: Use IPX Net, and Socket as network addr\n");
+
+ __nat25_generate_ipx_network_addr_with_socket(networkAddr, &ipx->ipx_source.net, &ipx->ipx_source.sock);
+
+ /* change IPX source node addr to wlan STA address */
+ memcpy(ipx->ipx_source.node, GET_MY_HWADDR(priv), ETH_ALEN);
+ } else {
+ __nat25_generate_ipx_network_addr_with_node(networkAddr, &ipx->ipx_source.net, ipx->ipx_source.node);
+ }
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ if (!memcmp(GET_MY_HWADDR(priv), ipx->ipx_dest.node, ETH_ALEN)) {
+ DEBUG_INFO("NAT25: Lookup IPX, Modify Destination IPX Node addr\n");
+
+ __nat25_generate_ipx_network_addr_with_socket(networkAddr, &ipx->ipx_dest.net, &ipx->ipx_dest.sock);
+
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+
+ /* replace IPX destination node addr with Lookup destination MAC addr */
+ memcpy(ipx->ipx_dest.node, skb->data, ETH_ALEN);
+ } else {
+ __nat25_generate_ipx_network_addr_with_node(networkAddr, &ipx->ipx_dest.net, ipx->ipx_dest.node);
+
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ }
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (ea != NULL) {
+ /* Sanity check fields. */
+ if (ea->hw_len != ETH_ALEN || ea->pa_len != AARP_PA_ALEN) {
+ DEBUG_WARN("NAT25: Appletalk AARP Sanity check fail!\n");
+ return -1;
+ }
+
+ switch (method) {
+ case NAT25_CHECK:
+ return 0;
+ case NAT25_INSERT:
+ /* change to AARP source mac address to wlan STA address */
+ memcpy(ea->hw_src, GET_MY_HWADDR(priv), ETH_ALEN);
+
+ DEBUG_INFO("NAT25: Insert AARP, Source =%d,%d Destination =%d,%d\n",
+ ea->pa_src_net,
+ ea->pa_src_node,
+ ea->pa_dst_net,
+ ea->pa_dst_node);
+
+ __nat25_generate_apple_network_addr(networkAddr, &ea->pa_src_net, &ea->pa_src_node);
+
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup AARP, Source =%d,%d Destination =%d,%d\n",
+ ea->pa_src_net,
+ ea->pa_src_node,
+ ea->pa_dst_net,
+ ea->pa_dst_node);
+
+ __nat25_generate_apple_network_addr(networkAddr, &ea->pa_dst_net, &ea->pa_dst_node);
+
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+
+ /* change to AARP destination mac address to Lookup result */
+ memcpy(ea->hw_dst, skb->data, ETH_ALEN);
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (ddp != NULL) {
+ switch (method) {
+ case NAT25_CHECK:
+ return -1;
+ case NAT25_INSERT:
+ DEBUG_INFO("NAT25: Insert DDP, Source =%d,%d Destination =%d,%d\n",
+ ddp->deh_snet,
+ ddp->deh_snode,
+ ddp->deh_dnet,
+ ddp->deh_dnode);
+
+ __nat25_generate_apple_network_addr(networkAddr, &ddp->deh_snet, &ddp->deh_snode);
+
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup DDP, Source =%d,%d Destination =%d,%d\n",
+ ddp->deh_snet,
+ ddp->deh_snode,
+ ddp->deh_dnet,
+ ddp->deh_dnode);
+ __nat25_generate_apple_network_addr(networkAddr, &ddp->deh_dnet, &ddp->deh_dnode);
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ return 0;
+ default:
+ return -1;
+ }
+ }
+
+ return -1;
+ } else if ((protocol == ETH_P_PPP_DISC) ||
+ (protocol == ETH_P_PPP_SES)) {
+ /*---------------------------------------------------*/
+ /* Handle PPPoE frame */
+ /*---------------------------------------------------*/
+ struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
+ unsigned short *pMagic;
+
+ switch (method) {
+ case NAT25_CHECK:
+ if (ph->sid == 0)
+ return 0;
+ return 1;
+ case NAT25_INSERT:
+ if (ph->sid == 0) { /* Discovery phase according to tag */
+ if (ph->code == PADI_CODE || ph->code == PADR_CODE) {
+ if (priv->ethBrExtInfo.addPPPoETag) {
+ struct pppoe_tag *tag, *pOldTag;
+ unsigned char tag_buf[40];
+ int old_tag_len = 0;
+
+ tag = (struct pppoe_tag *)tag_buf;
+ pOldTag = (struct pppoe_tag *)__nat25_find_pppoe_tag(ph, ntohs(PTT_RELAY_SID));
+ if (pOldTag) { /* if SID existed, copy old value and delete it */
+ old_tag_len = ntohs(pOldTag->tag_len);
+ if (old_tag_len+TAG_HDR_LEN+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN > sizeof(tag_buf)) {
+ DEBUG_ERR("SID tag length too long!\n");
+ return -1;
+ }
+
+ memcpy(tag->tag_data+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN,
+ pOldTag->tag_data, old_tag_len);
+
+ if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN+old_tag_len) < 0) {
+ DEBUG_ERR("call skb_pull_and_merge() failed in PADI/R packet!\n");
+ return -1;
+ }
+ ph->length = htons(ntohs(ph->length)-TAG_HDR_LEN-old_tag_len);
+ }
+
+ tag->tag_type = PTT_RELAY_SID;
+ tag->tag_len = htons(MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN+old_tag_len);
+
+ /* insert the magic_code+client mac in relay tag */
+ pMagic = (unsigned short *)tag->tag_data;
+ *pMagic = htons(MAGIC_CODE);
+ memcpy(tag->tag_data+MAGIC_CODE_LEN, skb->data+ETH_ALEN, ETH_ALEN);
+
+ /* Add relay tag */
+ if (__nat25_add_pppoe_tag(skb, tag) < 0)
+ return -1;
+
+ DEBUG_INFO("NAT25: Insert PPPoE, forward %s packet\n",
+ (ph->code == PADI_CODE ? "PADI" : "PADR"));
+ } else { /* not add relay tag */
+ if (priv->pppoe_connection_in_progress &&
+ memcmp(skb->data+ETH_ALEN, priv->pppoe_addr, ETH_ALEN)) {
+ DEBUG_ERR("Discard PPPoE packet due to another PPPoE connection is in progress!\n");
+ return -2;
+ }
+
+ if (priv->pppoe_connection_in_progress == 0)
+ memcpy(priv->pppoe_addr, skb->data+ETH_ALEN, ETH_ALEN);
+
+ priv->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
+ }
+ } else {
+ return -1;
+ }
+ } else { /* session phase */
+ DEBUG_INFO("NAT25: Insert PPPoE, insert session packet to %s\n", skb->dev->name);
+
+ __nat25_generate_pppoe_network_addr(networkAddr, skb->data, &(ph->sid));
+
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+
+ __nat25_db_print(priv);
+
+ if (!priv->ethBrExtInfo.addPPPoETag &&
+ priv->pppoe_connection_in_progress &&
+ !memcmp(skb->data+ETH_ALEN, priv->pppoe_addr, ETH_ALEN))
+ priv->pppoe_connection_in_progress = 0;
+ }
+ return 0;
+ case NAT25_LOOKUP:
+ if (ph->code == PADO_CODE || ph->code == PADS_CODE) {
+ if (priv->ethBrExtInfo.addPPPoETag) {
+ struct pppoe_tag *tag;
+ unsigned char *ptr;
+ unsigned short tagType, tagLen;
+ int offset = 0;
+
+ ptr = __nat25_find_pppoe_tag(ph, ntohs(PTT_RELAY_SID));
+ if (ptr == NULL) {
+ DEBUG_ERR("Fail to find PTT_RELAY_SID in FADO!\n");
+ return -1;
+ }
+
+ tag = (struct pppoe_tag *)ptr;
+ tagType = (unsigned short)((ptr[0] << 8) + ptr[1]);
+ tagLen = (unsigned short)((ptr[2] << 8) + ptr[3]);
+
+ if ((tagType != ntohs(PTT_RELAY_SID)) || (tagLen < (MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN))) {
+ DEBUG_ERR("Invalid PTT_RELAY_SID tag length [%d]!\n", tagLen);
+ return -1;
+ }
+
+ pMagic = (unsigned short *)tag->tag_data;
+ if (ntohs(*pMagic) != MAGIC_CODE) {
+ DEBUG_ERR("Can't find MAGIC_CODE in %s packet!\n",
+ (ph->code == PADO_CODE ? "PADO" : "PADS"));
+ return -1;
+ }
+
+ memcpy(skb->data, tag->tag_data+MAGIC_CODE_LEN, ETH_ALEN);
+
+ if (tagLen > MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN)
+ offset = TAG_HDR_LEN;
+
+ if (skb_pull_and_merge(skb, ptr+offset, TAG_HDR_LEN+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN-offset) < 0) {
+ DEBUG_ERR("call skb_pull_and_merge() failed in PADO packet!\n");
+ return -1;
+ }
+ ph->length = htons(ntohs(ph->length)-(TAG_HDR_LEN+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN-offset));
+ if (offset > 0)
+ tag->tag_len = htons(tagLen-MAGIC_CODE_LEN-RTL_RELAY_TAG_LEN);
+
+ DEBUG_INFO("NAT25: Lookup PPPoE, forward %s Packet from %s\n",
+ (ph->code == PADO_CODE ? "PADO" : "PADS"), skb->dev->name);
+ } else { /* not add relay tag */
+ if (!priv->pppoe_connection_in_progress) {
+ DEBUG_ERR("Discard PPPoE packet due to no connection in progresss!\n");
+ return -1;
+ }
+ memcpy(skb->data, priv->pppoe_addr, ETH_ALEN);
+ priv->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
+ }
+ } else {
+ if (ph->sid != 0) {
+ DEBUG_INFO("NAT25: Lookup PPPoE, lookup session packet from %s\n", skb->dev->name);
+ __nat25_generate_pppoe_network_addr(networkAddr, skb->data+ETH_ALEN, &(ph->sid));
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ __nat25_db_print(priv);
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (protocol == 0x888e) {
+ /*---------------------------------------------------*/
+ /* Handle EAP frame */
+ /*---------------------------------------------------*/
+ switch (method) {
+ case NAT25_CHECK:
+ return -1;
+ case NAT25_INSERT:
+ return 0;
+ case NAT25_LOOKUP:
+ return 0;
+ default:
+ return -1;
+ }
+ } else if ((protocol == 0xe2ae) || (protocol == 0xe2af)) {
+ /*---------------------------------------------------*/
+ /* Handle C-Media proprietary frame */
+ /*---------------------------------------------------*/
+ switch (method) {
+ case NAT25_CHECK:
+ return -1;
+ case NAT25_INSERT:
+ return 0;
+ case NAT25_LOOKUP:
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (protocol == ETH_P_IPV6) {
+ /*------------------------------------------------*/
+ /* Handle IPV6 frame */
+ /*------------------------------------------------*/
+ struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+
+ if (sizeof(*iph) >= (skb->len - ETH_HLEN)) {
+ DEBUG_WARN("NAT25: malformed IPv6 packet !\n");
+ return -1;
+ }
+
+ switch (method) {
+ case NAT25_CHECK:
+ if (skb->data[0] & 1)
+ return 0;
+ return -1;
+ case NAT25_INSERT:
+ DEBUG_INFO("NAT25: Insert IP, SA =%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x,"
+ " DA =%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x\n",
+ iph->saddr.s6_addr16[0], iph->saddr.s6_addr16[1], iph->saddr.s6_addr16[2], iph->saddr.s6_addr16[3],
+ iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7],
+ iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3],
+ iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
+
+ if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) {
+ __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr);
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_print(priv);
+
+ if (iph->nexthdr == IPPROTO_ICMPV6 &&
+ skb->len > (ETH_HLEN + sizeof(*iph) + 4)) {
+ if (update_nd_link_layer_addr(skb->data + ETH_HLEN + sizeof(*iph),
+ skb->len - ETH_HLEN - sizeof(*iph), GET_MY_HWADDR(priv))) {
+ struct icmp6hdr *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph));
+ hdr->icmp6_cksum = 0;
+ hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ iph->payload_len,
+ IPPROTO_ICMPV6,
+ csum_partial((__u8 *)hdr, iph->payload_len, 0));
+ }
+ }
+ }
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup IP, SA =%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x, DA =%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x\n",
+ iph->saddr.s6_addr16[0], iph->saddr.s6_addr16[1], iph->saddr.s6_addr16[2], iph->saddr.s6_addr16[3],
+ iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7],
+ iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3],
+ iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
+ __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->daddr);
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ return 0;
+ default:
+ return -1;
+ }
+ }
+ return -1;
+}
+
+int nat25_handle_frame(struct adapter *priv, struct sk_buff *skb)
+{
+ if (!(skb->data[0] & 1)) {
+ int is_vlan_tag = 0, i, retval = 0;
+ unsigned short vlan_hdr = 0;
+ unsigned short protocol;
+
+ protocol = be16_to_cpu(*((__be16 *)(skb->data + 2 * ETH_ALEN)));
+ if (protocol == ETH_P_8021Q) {
+ is_vlan_tag = 1;
+ vlan_hdr = *((unsigned short *)(skb->data+ETH_ALEN*2+2));
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+ETH_ALEN*2+2-i*2)) = *((unsigned short *)(skb->data+ETH_ALEN*2-2-i*2));
+ skb_pull(skb, 4);
+ }
+
+ if (!priv->ethBrExtInfo.nat25_disable) {
+ unsigned long irqL;
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ /*
+ * This function look up the destination network address from
+ * the NAT2.5 database. Return value = -1 means that the
+ * corresponding network protocol is NOT support.
+ */
+ if (!priv->ethBrExtInfo.nat25sc_disable &&
+ (be16_to_cpu(*((__be16 *)(skb->data+ETH_ALEN*2))) == ETH_P_IP) &&
+ !memcmp(priv->scdb_ip, skb->data+ETH_HLEN+16, 4)) {
+ memcpy(skb->data, priv->scdb_mac, ETH_ALEN);
+
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ } else {
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+
+ retval = nat25_db_handle(priv, skb, NAT25_LOOKUP);
+ }
+ } else {
+ if (((be16_to_cpu(*((__be16 *)(skb->data+ETH_ALEN*2))) == ETH_P_IP) &&
+ !memcmp(priv->br_ip, skb->data+ETH_HLEN+16, 4)) ||
+ ((be16_to_cpu(*((__be16 *)(skb->data+ETH_ALEN*2))) == ETH_P_ARP) &&
+ !memcmp(priv->br_ip, skb->data+ETH_HLEN+24, 4))) {
+ /* for traffic to upper TCP/IP */
+ retval = nat25_db_handle(priv, skb, NAT25_LOOKUP);
+ }
+ }
+
+ if (is_vlan_tag) {
+ skb_push(skb, 4);
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+i*2)) = *((unsigned short *)(skb->data+4+i*2));
+ *((__be16 *)(skb->data+ETH_ALEN*2)) = __constant_htons(ETH_P_8021Q);
+ *((unsigned short *)(skb->data+ETH_ALEN*2+2)) = vlan_hdr;
+ }
+
+ if (retval == -1) {
+ /* DEBUG_ERR("NAT25: Lookup fail!\n"); */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#define SERVER_PORT 67
+#define CLIENT_PORT 68
+#define DHCP_MAGIC 0x63825363
+#define BROADCAST_FLAG 0x8000
+
+struct dhcpMessage {
+ u_int8_t op;
+ u_int8_t htype;
+ u_int8_t hlen;
+ u_int8_t hops;
+ u_int32_t xid;
+ u_int16_t secs;
+ u_int16_t flags;
+ u_int32_t ciaddr;
+ u_int32_t yiaddr;
+ u_int32_t siaddr;
+ u_int32_t giaddr;
+ u_int8_t chaddr[16];
+ u_int8_t sname[64];
+ u_int8_t file[128];
+ u_int32_t cookie;
+ u_int8_t options[308]; /* 312 - cookie */
+};
+
+void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb)
+{
+ if (skb == NULL)
+ return;
+
+ if (!priv->ethBrExtInfo.dhcp_bcst_disable) {
+ __be16 protocol = *((__be16 *)(skb->data + 2 * ETH_ALEN));
+
+ if (protocol == __constant_htons(ETH_P_IP)) { /* IP */
+ struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN);
+
+ if (iph->protocol == IPPROTO_UDP) { /* UDP */
+ struct udphdr *udph = (struct udphdr *)((size_t)iph + (iph->ihl << 2));
+
+ if ((udph->source == __constant_htons(CLIENT_PORT)) &&
+ (udph->dest == __constant_htons(SERVER_PORT))) { /* DHCP request */
+ struct dhcpMessage *dhcph =
+ (struct dhcpMessage *)((size_t)udph + sizeof(struct udphdr));
+ u32 cookie = be32_to_cpu((__be32)dhcph->cookie);
+
+ if (cookie == DHCP_MAGIC) { /* match magic word */
+ if (!(dhcph->flags & htons(BROADCAST_FLAG))) {
+ /* if not broadcast */
+ register int sum = 0;
+
+ DEBUG_INFO("DHCP: change flag of DHCP request to broadcast.\n");
+ /* or BROADCAST flag */
+ dhcph->flags |= htons(BROADCAST_FLAG);
+ /* recalculate checksum */
+ sum = ~(udph->check) & 0xffff;
+ sum += be16_to_cpu(dhcph->flags);
+ while (sum >> 16)
+ sum = (sum & 0xffff) + (sum >> 16);
+ udph->check = ~sum;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void *scdb_findEntry(struct adapter *priv, unsigned char *macAddr,
+ unsigned char *ipAddr)
+{
+ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
+ struct nat25_network_db_entry *db;
+ int hash;
+ /* unsigned long irqL; */
+ /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */
+
+ __nat25_generate_ipv4_network_addr(networkAddr, (unsigned int *)ipAddr);
+ hash = __nat25_network_hash(networkAddr);
+ db = priv->nethash[hash];
+ while (db != NULL) {
+ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
+ /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
+ return (void *)db;
+ }
+
+ db = db->next_hash;
+ }
+
+ /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
+ return NULL;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
new file mode 100644
index 0000000..9632ef4
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -0,0 +1,2364 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_CMD_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <cmd_osdep.h>
+#include <mlme_osdep.h>
+#include <rtw_br_ext.h>
+#include <rtw_mlme_ext.h>
+
+/*
+Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
+No irqsave is necessary.
+*/
+
+int _rtw_init_cmd_priv (struct cmd_priv *pcmdpriv)
+{
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ _rtw_init_sema(&(pcmdpriv->cmd_queue_sema), 0);
+ /* _rtw_init_sema(&(pcmdpriv->cmd_done_sema), 0); */
+ _rtw_init_sema(&(pcmdpriv->terminate_cmdthread_sema), 0);
+
+
+ _rtw_init_queue(&(pcmdpriv->cmd_queue));
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+ pcmdpriv->cmd_seq = 1;
+
+ pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
+
+ if (pcmdpriv->cmd_allocated_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((size_t)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1));
+
+ pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
+
+ if (pcmdpriv->rsp_allocated_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((size_t)(pcmdpriv->rsp_allocated_buf) & 3);
+
+ pcmdpriv->cmd_issued_cnt = 0;
+ pcmdpriv->cmd_done_cnt = 0;
+ pcmdpriv->rsp_cnt = 0;
+exit:
+_func_exit_;
+ return res;
+}
+
+static void c2h_wk_callback(struct work_struct *work);
+
+int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
+{
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+ ATOMIC_SET(&pevtpriv->event_seq, 0);
+ pevtpriv->evt_done_cnt = 0;
+
+ _init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
+ pevtpriv->c2h_wk_alive = false;
+ pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN+1);
+
+_func_exit_;
+
+ return res;
+}
+
+void rtw_free_evt_priv(struct evt_priv *pevtpriv)
+{
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+rtw_free_evt_priv\n"));
+
+ _cancel_workitem_sync(&pevtpriv->c2h_wk);
+ while (pevtpriv->c2h_wk_alive)
+ rtw_msleep_os(10);
+
+ while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
+ void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
+ if (c2h != NULL && c2h != (void *)pevtpriv)
+ kfree(c2h);
+ }
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("-rtw_free_evt_priv\n"));
+
+_func_exit_;
+}
+
+void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv)
+{
+_func_enter_;
+
+ if (pcmdpriv) {
+ _rtw_spinlock_free(&(pcmdpriv->cmd_queue.lock));
+ _rtw_free_sema(&(pcmdpriv->cmd_queue_sema));
+ _rtw_free_sema(&(pcmdpriv->terminate_cmdthread_sema));
+
+ if (pcmdpriv->cmd_allocated_buf)
+ kfree(pcmdpriv->cmd_allocated_buf);
+
+ if (pcmdpriv->rsp_allocated_buf)
+ kfree(pcmdpriv->rsp_allocated_buf);
+ }
+_func_exit_;
+}
+
+/*
+Calling Context:
+
+rtw_enqueue_cmd can only be called between kernel thread,
+since only spin_lock is used.
+
+ISR/Call-Back functions can't call this sub-function.
+
+*/
+
+int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
+{
+ unsigned long irqL;
+
+_func_enter_;
+
+ if (obj == NULL)
+ goto exit;
+
+ /* _enter_critical_bh(&queue->lock, &irqL); */
+ _enter_critical(&queue->lock, &irqL);
+
+ rtw_list_insert_tail(&obj->list, &queue->queue);
+
+ /* _exit_critical_bh(&queue->lock, &irqL); */
+ _exit_critical(&queue->lock, &irqL);
+
+exit:
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
+{
+ unsigned long irqL;
+ struct cmd_obj *obj;
+
+_func_enter_;
+
+ /* _enter_critical_bh(&(queue->lock), &irqL); */
+ _enter_critical(&queue->lock, &irqL);
+ if (rtw_is_list_empty(&(queue->queue))) {
+ obj = NULL;
+ } else {
+ obj = LIST_CONTAINOR(get_next(&(queue->queue)), struct cmd_obj, list);
+ rtw_list_delete(&obj->list);
+ }
+
+ /* _exit_critical_bh(&(queue->lock), &irqL); */
+ _exit_critical(&queue->lock, &irqL);
+
+_func_exit_;
+
+ return obj;
+}
+
+u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+ u32 res;
+_func_enter_;
+ res = _rtw_init_cmd_priv (pcmdpriv);
+_func_exit_;
+ return res;
+}
+
+u32 rtw_init_evt_priv (struct evt_priv *pevtpriv)
+{
+ int res;
+_func_enter_;
+ res = _rtw_init_evt_priv(pevtpriv);
+_func_exit_;
+ return res;
+}
+
+void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+_func_enter_;
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("rtw_free_cmd_priv\n"));
+ _rtw_free_cmd_priv(pcmdpriv);
+_func_exit_;
+}
+
+int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+{
+ u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
+
+ /* To decide allow or not */
+ if ((pcmdpriv->padapter->pwrctrlpriv.bHWPwrPindetect) &&
+ (!pcmdpriv->padapter->registrypriv.usbss_enable)) {
+ if (cmd_obj->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)cmd_obj->parmbuf;
+ if (pdrvextra_cmd_parm->ec_id == POWER_SAVING_CTRL_WK_CID)
+ bAllow = true;
+ }
+ }
+
+ if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
+ bAllow = true;
+
+ if ((!pcmdpriv->padapter->hw_init_completed && !bAllow) ||
+ !pcmdpriv->cmdthd_running) /* com_thread not running */
+ return _FAIL;
+ return _SUCCESS;
+}
+
+u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+{
+ int res = _FAIL;
+ struct adapter *padapter = pcmdpriv->padapter;
+
+_func_enter_;
+
+ if (cmd_obj == NULL)
+ goto exit;
+
+ cmd_obj->padapter = padapter;
+
+ res = rtw_cmd_filter(pcmdpriv, cmd_obj);
+ if (_FAIL == res) {
+ rtw_free_cmd_obj(cmd_obj);
+ goto exit;
+ }
+
+ res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
+
+ if (res == _SUCCESS)
+ _rtw_up_sema(&pcmdpriv->cmd_queue_sema);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
+{
+ struct cmd_obj *cmd_obj;
+
+_func_enter_;
+
+ cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
+
+_func_exit_;
+ return cmd_obj;
+}
+
+void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv)
+{
+_func_enter_;
+ pcmdpriv->cmd_done_cnt++;
+ /* _rtw_up_sema(&(pcmdpriv->cmd_done_sema)); */
+_func_exit_;
+}
+
+void rtw_free_cmd_obj(struct cmd_obj *pcmd)
+{
+_func_enter_;
+
+ if ((pcmd->cmdcode != _JoinBss_CMD_) && (pcmd->cmdcode != _CreateBss_CMD_)) {
+ /* free parmbuf in cmd_obj */
+ kfree(pcmd->parmbuf);
+ }
+
+ if (pcmd->rsp != NULL) {
+ if (pcmd->rspsz != 0) {
+ /* free rsp in cmd_obj */
+ kfree(pcmd->rsp);
+ }
+ }
+
+ /* free cmd_obj */
+ kfree(pcmd);
+
+_func_exit_;
+}
+
+int rtw_cmd_thread(void *context)
+{
+ u8 ret;
+ struct cmd_obj *pcmd;
+ u8 *pcmdbuf;
+ u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
+ void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd);
+ struct adapter *padapter = (struct adapter *)context;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+
+_func_enter_;
+
+ thread_enter("RTW_CMD_THREAD");
+
+ pcmdbuf = pcmdpriv->cmd_buf;
+
+ pcmdpriv->cmdthd_running = true;
+ _rtw_up_sema(&pcmdpriv->terminate_cmdthread_sema);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("start r871x rtw_cmd_thread !!!!\n"));
+
+ while (1) {
+ if (_rtw_down_sema(&pcmdpriv->cmd_queue_sema) == _FAIL)
+ break;
+
+ if (padapter->bDriverStopped ||
+ padapter->bSurpriseRemoved) {
+ DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
+ __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ break;
+ }
+_next:
+ if (padapter->bDriverStopped ||
+ padapter->bSurpriseRemoved) {
+ DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
+ __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ break;
+ }
+
+ pcmd = rtw_dequeue_cmd(pcmdpriv);
+ if (!pcmd)
+ continue;
+
+ if (_FAIL == rtw_cmd_filter(pcmdpriv, pcmd)) {
+ pcmd->res = H2C_DROPPED;
+ goto post_process;
+ }
+
+ pcmdpriv->cmd_issued_cnt++;
+
+ pcmd->cmdsz = _RND4((pcmd->cmdsz));/* _RND4 */
+
+ memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
+
+ if (pcmd->cmdcode < ARRAY_SIZE(wlancmds)) {
+ cmd_hdl = wlancmds[pcmd->cmdcode].h2cfuns;
+
+ if (cmd_hdl) {
+ ret = cmd_hdl(pcmd->padapter, pcmdbuf);
+ pcmd->res = ret;
+ }
+
+ pcmdpriv->cmd_seq++;
+ } else {
+ pcmd->res = H2C_PARAMETERS_ERROR;
+ }
+
+ cmd_hdl = NULL;
+
+post_process:
+
+ /* call callback function for post-processed */
+ if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
+ pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
+ if (pcmd_callback == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("mlme_cmd_hdl(): pcmd_callback = 0x%p, cmdcode = 0x%x\n", pcmd_callback, pcmd->cmdcode));
+ rtw_free_cmd_obj(pcmd);
+ } else {
+ /* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
+ pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("%s: cmdcode = 0x%x callback not defined!\n", __func__, pcmd->cmdcode));
+ rtw_free_cmd_obj(pcmd);
+ }
+
+ flush_signals_thread();
+
+ goto _next;
+ }
+ pcmdpriv->cmdthd_running = false;
+
+ /* free all cmd_obj resources */
+ do {
+ pcmd = rtw_dequeue_cmd(pcmdpriv);
+ if (pcmd == NULL)
+ break;
+
+ /* DBG_88E("%s: leaving... drop cmdcode:%u\n", __func__, pcmd->cmdcode); */
+
+ rtw_free_cmd_obj(pcmd);
+ } while (1);
+
+ _rtw_up_sema(&pcmdpriv->terminate_cmdthread_sema);
+
+_func_exit_;
+
+ thread_exit();
+}
+
+u8 rtw_setstandby_cmd(struct adapter *padapter, uint action)
+{
+ struct cmd_obj *ph2c;
+ struct usb_suspend_parm *psetusbsuspend;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ psetusbsuspend = (struct usb_suspend_parm *)rtw_zmalloc(sizeof(struct usb_suspend_parm));
+ if (psetusbsuspend == NULL) {
+ kfree(ph2c);
+ ret = _FAIL;
+ goto exit;
+ }
+
+ psetusbsuspend->action = action;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetusbsuspend, GEN_CMD_CODE(_SetUsbSuspend));
+
+ ret = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+/*
+rtw_sitesurvey_cmd(~)
+ ### NOTE:#### (!!!!)
+ MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+*/
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
+ struct rtw_ieee80211_channel *ch, int ch_num)
+{
+ u8 res = _FAIL;
+ struct cmd_obj *ph2c;
+ struct sitesurvey_parm *psurveyPara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ p2p_ps_wk_cmd(padapter, P2P_PS_SCAN, 1);
+ }
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ return _FAIL;
+
+ psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ if (psurveyPara == NULL) {
+ kfree(ph2c);
+ return _FAIL;
+ }
+
+ rtw_free_network_queue(padapter, false);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("%s: flush network queue\n", __func__));
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
+
+ /* psurveyPara->bsslimit = 48; */
+ psurveyPara->scan_mode = pmlmepriv->scan_mode;
+
+ /* prepare ssid list */
+ if (ssid) {
+ int i;
+ for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (ssid[i].SsidLength) {
+ memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
+ psurveyPara->ssid_num++;
+ if (0)
+ DBG_88E(FUNC_ADPT_FMT" ssid:(%s, %d)\n", FUNC_ADPT_ARG(padapter),
+ psurveyPara->ssid[i].Ssid, psurveyPara->ssid[i].SsidLength);
+ }
+ }
+ }
+
+ /* prepare channel list */
+ if (ch) {
+ int i;
+ for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
+ if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
+ memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
+ psurveyPara->ch_num++;
+ if (0)
+ DBG_88E(FUNC_ADPT_FMT" ch:%u\n", FUNC_ADPT_ARG(padapter),
+ psurveyPara->ch[i].hw_value);
+ }
+ }
+ }
+
+ set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+ if (res == _SUCCESS) {
+ pmlmepriv->scan_start_time = rtw_get_current_time();
+
+ _set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT);
+
+ rtw_led_control(padapter, LED_CTL_SITE_SURVEY);
+
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+ } else {
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ }
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
+{
+ struct cmd_obj *ph2c;
+ struct setdatarate_parm *pbsetdataratepara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pbsetdataratepara = (struct setdatarate_parm *)rtw_zmalloc(sizeof(struct setdatarate_parm));
+ if (pbsetdataratepara == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara, GEN_CMD_CODE(_SetDataRate));
+ pbsetdataratepara->mac_id = 5;
+ memcpy(pbsetdataratepara->datarates, rateset, NumRates);
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset)
+{
+ struct cmd_obj *ph2c;
+ struct setbasicrate_parm *pssetbasicratepara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pssetbasicratepara = (struct setbasicrate_parm *)rtw_zmalloc(sizeof(struct setbasicrate_parm));
+
+ if (pssetbasicratepara == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pssetbasicratepara, _SetBasicRate_CMD_);
+
+ memcpy(pssetbasicratepara->basicrates, rateset, NumRates);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+
+/*
+unsigned char rtw_setphy_cmd(unsigned char *adapter)
+
+1. be called only after rtw_update_registrypriv_dev_network(~) or mp testing program
+2. for AdHoc/Ap mode or mp mode?
+
+*/
+u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch)
+{
+ struct cmd_obj *ph2c;
+ struct setphy_parm *psetphypara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ psetphypara = (struct setphy_parm *)rtw_zmalloc(sizeof(struct setphy_parm));
+
+ if (psetphypara == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetphypara, _SetPhy_CMD_);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("CH =%d, modem =%d", ch, modem));
+
+ psetphypara->modem = modem;
+ psetphypara->rfchannel = ch;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_setbbreg_cmd(struct adapter *padapter, u8 offset, u8 val)
+{
+ struct cmd_obj *ph2c;
+ struct writeBB_parm *pwritebbparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pwritebbparm = (struct writeBB_parm *)rtw_zmalloc(sizeof(struct writeBB_parm));
+
+ if (pwritebbparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pwritebbparm, GEN_CMD_CODE(_SetBBReg));
+
+ pwritebbparm->offset = offset;
+ pwritebbparm->value = val;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_getbbreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
+{
+ struct cmd_obj *ph2c;
+ struct readBB_parm *prdbbparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ prdbbparm = (struct readBB_parm *)rtw_zmalloc(sizeof(struct readBB_parm));
+
+ if (prdbbparm == NULL) {
+ kfree(ph2c);
+ return _FAIL;
+ }
+
+ _rtw_init_listhead(&ph2c->list);
+ ph2c->cmdcode = GEN_CMD_CODE(_GetBBReg);
+ ph2c->parmbuf = (unsigned char *)prdbbparm;
+ ph2c->cmdsz = sizeof(struct readBB_parm);
+ ph2c->rsp = pval;
+ ph2c->rspsz = sizeof(struct readBB_rsp);
+
+ prdbbparm->offset = offset;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_setrfreg_cmd(struct adapter *padapter, u8 offset, u32 val)
+{
+ struct cmd_obj *ph2c;
+ struct writeRF_parm *pwriterfparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+_func_enter_;
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pwriterfparm = (struct writeRF_parm *)rtw_zmalloc(sizeof(struct writeRF_parm));
+
+ if (pwriterfparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pwriterfparm, GEN_CMD_CODE(_SetRFReg));
+
+ pwriterfparm->offset = offset;
+ pwriterfparm->value = val;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_getrfreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
+{
+ struct cmd_obj *ph2c;
+ struct readRF_parm *prdrfparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ prdrfparm = (struct readRF_parm *)rtw_zmalloc(sizeof(struct readRF_parm));
+ if (prdrfparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_init_listhead(&ph2c->list);
+ ph2c->cmdcode = GEN_CMD_CODE(_GetRFReg);
+ ph2c->parmbuf = (unsigned char *)prdrfparm;
+ ph2c->cmdsz = sizeof(struct readRF_parm);
+ ph2c->rsp = pval;
+ ph2c->rspsz = sizeof(struct readRF_rsp);
+
+ prdrfparm->offset = offset;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ _func_enter_;
+
+ kfree(pcmd->parmbuf);
+ kfree(pcmd);
+
+ if (padapter->registrypriv.mp_mode == 1)
+ padapter->mppriv.workparam.bcompleted = true;
+_func_exit_;
+}
+
+void rtw_readtssi_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ _func_enter_;
+
+ kfree(pcmd->parmbuf);
+ kfree(pcmd);
+
+ if (padapter->registrypriv.mp_mode == 1)
+ padapter->mppriv.workparam.bcompleted = true;
+_func_exit_;
+}
+
+u8 rtw_createbss_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+
+ if (pmlmepriv->assoc_ssid.SsidLength == 0)
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+ else
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_init_listhead(&pcmd->list);
+ pcmd->cmdcode = _CreateBss_CMD_;
+ pcmd->parmbuf = (unsigned char *)pdev_network;
+ pcmd->cmdsz = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+ pdev_network->Length = pcmd->cmdsz;
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss, unsigned int sz)
+{
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_init_listhead(&pcmd->list);
+ pcmd->cmdcode = GEN_CMD_CODE(_CreateBss);
+ pcmd->parmbuf = pbss;
+ pcmd->cmdsz = sz;
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
+{
+ u8 res = _SUCCESS;
+ uint t_len = 0;
+ struct wlan_bssid_ex *psecnetwork;
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ enum ndis_802_11_network_infra ndis_network_mode = pnetwork->network.InfrastructureMode;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+_func_enter_;
+
+ rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+
+ if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
+ }
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
+ goto exit;
+ }
+ /* for IEs is fix buf size */
+ t_len = sizeof(struct wlan_bssid_ex);
+
+
+ /* for hidden ap to set fw_state here */
+ if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE)) {
+ switch (ndis_network_mode) {
+ case Ndis802_11IBSS:
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ break;
+ case Ndis802_11Infrastructure:
+ set_fwstate(pmlmepriv, WIFI_STATION_STATE);
+ break;
+ case Ndis802_11APMode:
+ case Ndis802_11AutoUnknown:
+ case Ndis802_11InfrastructureMax:
+ break;
+ }
+ }
+
+ psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
+ if (psecnetwork == NULL) {
+ if (pcmd != NULL)
+ kfree(pcmd);
+
+ res = _FAIL;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd :psecnetwork == NULL!!!\n"));
+
+ goto exit;
+ }
+
+ _rtw_memset(psecnetwork, 0, t_len);
+
+ memcpy(psecnetwork, &pnetwork->network, get_wlan_bssid_ex_sz(&pnetwork->network));
+
+ psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
+
+ if ((psecnetwork->IELength-12) < (256-1)) {
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength-12);
+ } else {
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], (256-1));
+ }
+
+ psecnetwork->IELength = 0;
+ /* Added by Albert 2009/02/18 */
+ /* If the the driver wants to use the bssid to create the connection. */
+ /* If not, we have to copy the connecting AP's MAC address to it so that */
+ /* the driver just has the bssid information for PMKIDList searching. */
+
+ if (!pmlmepriv->assoc_by_bssid)
+ memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.MacAddress[0], ETH_ALEN);
+
+ psecnetwork->IELength = rtw_restruct_sec_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength);
+
+
+ pqospriv->qos_option = 0;
+
+ if (pregistrypriv->wmm_enable) {
+ u32 tmp_len;
+
+ tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength, psecnetwork->IELength);
+
+ if (psecnetwork->IELength != tmp_len) {
+ psecnetwork->IELength = tmp_len;
+ pqospriv->qos_option = 1; /* There is WMM IE in this corresp. beacon */
+ } else {
+ pqospriv->qos_option = 0;/* There is no WMM IE in this corresp. beacon */
+ }
+ }
+
+ phtpriv->ht_option = false;
+ if (pregistrypriv->ht_enable) {
+ /* Added by Albert 2010/06/23 */
+ /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
+ /* Especially for Realtek 8192u SoftAP. */
+ if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
+ /* rtw_restructure_ht_ie */
+ rtw_restructure_ht_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0],
+ pnetwork->network.IELength, &psecnetwork->IELength);
+ }
+ }
+
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->network.IEs, pnetwork->network.IELength);
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_TENDA)
+ padapter->pwrctrlpriv.smart_ps = 0;
+ else
+ padapter->pwrctrlpriv.smart_ps = padapter->registrypriv.smart_ps;
+
+ DBG_88E("%s: smart_ps =%d\n", __func__, padapter->pwrctrlpriv.smart_ps);
+
+ pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */
+
+ _rtw_init_listhead(&pcmd->list);
+ pcmd->cmdcode = _JoinBss_CMD_;/* GEN_CMD_CODE(_JoinBss) */
+ pcmd->parmbuf = (unsigned char *)psecnetwork;
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue) /* for sta_mode */
+{
+ struct cmd_obj *cmdobj = NULL;
+ struct disconnect_parm *param = NULL;
+ struct cmd_priv *cmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_disassoc_cmd\n"));
+
+ /* prepare cmd parameter */
+ param = (struct disconnect_parm *)rtw_zmalloc(sizeof(*param));
+ if (param == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ param->deauth_timeout_ms = deauth_timeout_ms;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ cmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(*cmdobj));
+ if (cmdobj == NULL) {
+ res = _FAIL;
+ kfree(param);
+ goto exit;
+ }
+ init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_);
+ res = rtw_enqueue_cmd(cmdpriv, cmdobj);
+ } else {
+ /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
+ if (H2C_SUCCESS != disconnect_hdl(padapter, (u8 *)param))
+ res = _FAIL;
+ kfree(param);
+ }
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype)
+{
+ struct cmd_obj *ph2c;
+ struct setopmode_parm *psetop;
+
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = false;
+ goto exit;
+ }
+ psetop = (struct setopmode_parm *)rtw_zmalloc(sizeof(struct setopmode_parm));
+
+ if (psetop == NULL) {
+ kfree(ph2c);
+ res = false;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_);
+ psetop->mode = (u8)networktype;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct set_stakey_rsp *psetstakey_rsp = NULL;
+
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct sta_info *sta = (struct sta_info *)psta;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ if (psetstakey_para == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ if (psetstakey_rsp == NULL) {
+ kfree(ph2c);
+ kfree(psetstakey_para);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+ ph2c->rsp = (u8 *)psetstakey_rsp;
+ ph2c->rspsz = sizeof(struct set_stakey_rsp);
+
+ memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ psetstakey_para->algorithm = (unsigned char) psecuritypriv->dot11PrivacyAlgrthm;
+ else
+ GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
+
+ if (unicast_key)
+ memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
+ else
+ memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
+
+ /* jeff: set this becasue at least sw key is ready */
+ padapter->securitypriv.busetkipkey = true;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct set_stakey_rsp *psetstakey_rsp = NULL;
+ struct sta_info *sta = (struct sta_info *)psta;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ if (!enqueue) {
+ clear_cam_entry(padapter, entry);
+ } else {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ if (psetstakey_para == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ if (psetstakey_rsp == NULL) {
+ kfree(ph2c);
+ kfree(psetstakey_para);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+ ph2c->rsp = (u8 *)psetstakey_rsp;
+ ph2c->rspsz = sizeof(struct set_stakey_rsp);
+
+ memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+
+ psetstakey_para->algorithm = _NO_PRIVACY_;
+
+ psetstakey_para->id = entry;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ }
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_table)
+{
+ struct cmd_obj *ph2c;
+ struct setratable_parm *psetrttblparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ psetrttblparm = (struct setratable_parm *)rtw_zmalloc(sizeof(struct setratable_parm));
+
+ if (psetrttblparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable));
+
+ memcpy(psetrttblparm, prate_table, sizeof(struct setratable_parm));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval)
+{
+ struct cmd_obj *ph2c;
+ struct getratable_parm *pgetrttblparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pgetrttblparm = (struct getratable_parm *)rtw_zmalloc(sizeof(struct getratable_parm));
+
+ if (pgetrttblparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+/* init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable)); */
+
+ _rtw_init_listhead(&ph2c->list);
+ ph2c->cmdcode = GEN_CMD_CODE(_GetRaTable);
+ ph2c->parmbuf = (unsigned char *)pgetrttblparm;
+ ph2c->cmdsz = sizeof(struct getratable_parm);
+ ph2c->rsp = (u8 *)pval;
+ ph2c->rspsz = sizeof(struct getratable_rsp);
+
+ pgetrttblparm->rsvd = 0x0;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr)
+{
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct cmd_obj *ph2c;
+ struct set_assocsta_parm *psetassocsta_para;
+ struct set_stakey_rsp *psetassocsta_rsp = NULL;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetassocsta_para = (struct set_assocsta_parm *)rtw_zmalloc(sizeof(struct set_assocsta_parm));
+ if (psetassocsta_para == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetassocsta_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_assocsta_rsp));
+ if (psetassocsta_rsp == NULL) {
+ kfree(ph2c);
+ kfree(psetassocsta_para);
+ return _FAIL;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetassocsta_para, _SetAssocSta_CMD_);
+ ph2c->rsp = (u8 *)psetassocsta_rsp;
+ ph2c->rspsz = sizeof(struct set_assocsta_rsp);
+
+ memcpy(psetassocsta_para->addr, mac_addr, ETH_ALEN);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+ }
+
+u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
+{
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct cmd_obj *ph2c;
+ struct addBaReq_parm *paddbareq_parm;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ paddbareq_parm = (struct addBaReq_parm *)rtw_zmalloc(sizeof(struct addBaReq_parm));
+ if (paddbareq_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ paddbareq_parm->tid = tid;
+ memcpy(paddbareq_parm->addr, addr, ETH_ALEN);
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
+
+ /* DBG_88E("rtw_addbareq_cmd, tid =%d\n", tid); */
+
+ /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = DYNAMIC_CHK_WK_CID;
+ pdrvextra_cmd_parm->type_size = 0;
+ pdrvextra_cmd_parm->pbuf = (u8 *)padapter;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+
+ /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_set_ch_cmd(struct adapter *padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue)
+{
+ struct cmd_obj *pcmdobj;
+ struct set_ch_parm *set_ch_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ DBG_88E(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev), ch, bw, ch_offset);
+
+ /* check input parameter */
+
+ /* prepare cmd parameter */
+ set_ch_parm = (struct set_ch_parm *)rtw_zmalloc(sizeof(*set_ch_parm));
+ if (set_ch_parm == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ set_ch_parm->ch = ch;
+ set_ch_parm->bw = bw;
+ set_ch_parm->ch_offset = ch_offset;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmdobj == NULL) {
+ kfree(set_ch_parm);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, set_ch_parm, GEN_CMD_CODE(_SetChannel));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
+ } else {
+ /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
+ if (H2C_SUCCESS != set_ch_hdl(padapter, (u8 *)set_ch_parm))
+ res = _FAIL;
+
+ kfree(set_ch_parm);
+ }
+
+ /* do something based on res... */
+
+exit:
+
+ DBG_88E(FUNC_NDEV_FMT" res:%u\n", FUNC_NDEV_ARG(padapter->pnetdev), res);
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
+{
+ struct cmd_obj *pcmdobj;
+ struct SetChannelPlan_param *setChannelPlan_param;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_chplan_cmd\n"));
+
+ /* check input parameter */
+ if (!rtw_is_channel_plan_valid(chplan)) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ /* prepare cmd parameter */
+ setChannelPlan_param = (struct SetChannelPlan_param *)rtw_zmalloc(sizeof(struct SetChannelPlan_param));
+ if (setChannelPlan_param == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ setChannelPlan_param->channel_plan = chplan;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmdobj == NULL) {
+ kfree(setChannelPlan_param);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelPlan_param, GEN_CMD_CODE(_SetChannelPlan));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
+ } else {
+ /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
+ if (H2C_SUCCESS != set_chplan_hdl(padapter, (unsigned char *)setChannelPlan_param))
+ res = _FAIL;
+
+ kfree(setChannelPlan_param);
+ }
+
+ /* do something based on res... */
+ if (res == _SUCCESS)
+ padapter->mlmepriv.ChannelPlan = chplan;
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_led_blink_cmd(struct adapter *padapter, struct LED_871x *pLed)
+{
+ struct cmd_obj *pcmdobj;
+ struct LedBlink_param *ledBlink_param;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_led_blink_cmd\n"));
+
+ pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmdobj == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ ledBlink_param = (struct LedBlink_param *)rtw_zmalloc(sizeof(struct LedBlink_param));
+ if (ledBlink_param == NULL) {
+ kfree(pcmdobj);
+ res = _FAIL;
+ goto exit;
+ }
+
+ ledBlink_param->pLed = pLed;
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, ledBlink_param, GEN_CMD_CODE(_LedBlink));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_set_csa_cmd(struct adapter *padapter, u8 new_ch_no)
+{
+ struct cmd_obj *pcmdobj;
+ struct SetChannelSwitch_param *setChannelSwitch_param;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_csa_cmd\n"));
+
+ pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmdobj == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ setChannelSwitch_param = (struct SetChannelSwitch_param *)rtw_zmalloc(sizeof(struct SetChannelSwitch_param));
+ if (setChannelSwitch_param == NULL) {
+ kfree(pcmdobj);
+ res = _FAIL;
+ goto exit;
+ }
+
+ setChannelSwitch_param->new_ch_no = new_ch_no;
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelSwitch_param, GEN_CMD_CODE(_SetChannelSwitch));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_tdls_cmd(struct adapter *padapter, u8 *addr, u8 option)
+{
+ return _SUCCESS;
+}
+
+static void traffic_status_watchdog(struct adapter *padapter)
+{
+ u8 bEnterPS;
+ u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false;
+ u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false, bHigherBusyTxTraffic = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ /* */
+ /* Determine if our traffic is busy now */
+ /* */
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 100 ||
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 100) {
+ bBusyTraffic = true;
+
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ bRxBusyTraffic = true;
+ else
+ bTxBusyTraffic = true;
+ }
+
+ /* Higher Tx/Rx data. */
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) {
+ bHigherBusyTraffic = true;
+
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ bHigherBusyRxTraffic = true;
+ else
+ bHigherBusyTxTraffic = true;
+ }
+
+ /* check traffic for powersaving. */
+ if (((pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod + pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
+ (pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 2))
+ bEnterPS = false;
+ else
+ bEnterPS = true;
+
+ /* LeisurePS only work in infra mode. */
+ if (bEnterPS)
+ LPS_Enter(padapter);
+ else
+ LPS_Leave(padapter);
+ } else {
+ LPS_Leave(padapter);
+ }
+
+ pmlmepriv->LinkDetectInfo.NumRxOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bTxBusyTraffic = bTxBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bRxBusyTraffic = bRxBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
+}
+
+void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
+{
+ struct mlme_priv *pmlmepriv;
+
+ padapter = (struct adapter *)pbuf;
+ pmlmepriv = &(padapter->mlmepriv);
+
+#ifdef CONFIG_88EU_AP_MODE
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ expire_timeout_chk(padapter);
+#endif
+
+ rtw_hal_sreset_xmit_status_check(padapter);
+
+ linked_status_chk(padapter);
+ traffic_status_watchdog(padapter);
+
+ rtw_hal_dm_watchdog(padapter);
+}
+
+static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 mstatus;
+
+_func_enter_;
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
+ return;
+
+ switch (lps_ctrl_type) {
+ case LPS_CTRL_SCAN:
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ /* connect */
+ LPS_Leave(padapter);
+ }
+ break;
+ case LPS_CTRL_JOINBSS:
+ LPS_Leave(padapter);
+ break;
+ case LPS_CTRL_CONNECT:
+ mstatus = 1;/* connect */
+ /* Reset LPS Setting */
+ padapter->pwrctrlpriv.LpsIdleCount = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ break;
+ case LPS_CTRL_DISCONNECT:
+ mstatus = 0;/* disconnect */
+ LPS_Leave(padapter);
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ break;
+ case LPS_CTRL_SPECIAL_PACKET:
+ /* DBG_88E("LPS_CTRL_SPECIAL_PACKET\n"); */
+ pwrpriv->DelayLPSLastTimeStamp = rtw_get_current_time();
+ LPS_Leave(padapter);
+ break;
+ case LPS_CTRL_LEAVE:
+ LPS_Leave(padapter);
+ break;
+ default:
+ break;
+ }
+
+_func_exit_;
+}
+
+u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ /* struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; */
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ /* if (!pwrctrlpriv->bLeisurePs) */
+ /* return res; */
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID;
+ pdrvextra_cmd_parm->type_size = lps_ctrl_type;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ } else {
+ lps_ctrl_wk_hdl(padapter, lps_ctrl_type);
+ }
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void rpt_timer_setting_wk_hdl(struct adapter *padapter, u16 min_time)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_RPT_TIMER_SETTING, (u8 *)(&min_time));
+}
+
+u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = RTP_TIMER_CFG_WK_CID;
+ pdrvextra_cmd_parm->type_size = min_time;
+ pdrvextra_cmd_parm->pbuf = NULL;
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void antenna_select_wk_hdl(struct adapter *padapter, u8 antenna)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_ANTENNA_DIVERSITY_SELECT, (u8 *)(&antenna));
+}
+
+u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 support_ant_div;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+ rtw_hal_get_def_var(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div);
+ if (!support_ant_div)
+ return res;
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = ANT_SELECT_WK_CID;
+ pdrvextra_cmd_parm->type_size = antenna;
+ pdrvextra_cmd_parm->pbuf = NULL;
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ } else {
+ antenna_select_wk_hdl(padapter, antenna);
+ }
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void power_saving_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
+{
+ rtw_ps_processor(padapter);
+}
+
+#ifdef CONFIG_88EU_P2P
+u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return res;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = P2P_PROTO_WK_CID;
+ pdrvextra_cmd_parm->type_size = intCmdType; /* As the command tppe. */
+ pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+#endif /* CONFIG_88EU_P2P */
+
+u8 rtw_ps_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ppscmd;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+_func_enter_;
+
+ ppscmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ppscmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ppscmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = POWER_SAVING_CTRL_WK_CID;
+ pdrvextra_cmd_parm->pbuf = NULL;
+ init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ppscmd);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+#ifdef CONFIG_88EU_AP_MODE
+
+static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
+{
+ int cnt = 0;
+ struct sta_info *psta_bmc;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (!psta_bmc)
+ return;
+
+ if (psta_bmc->sleepq_len == 0) {
+ u8 val = 0;
+
+ /* while ((rtw_read32(padapter, 0x414)&0x00ffff00)!= 0) */
+ /* while ((rtw_read32(padapter, 0x414)&0x0000ff00)!= 0) */
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+
+ while (!val) {
+ rtw_msleep_os(100);
+
+ cnt++;
+
+ if (cnt > 10)
+ break;
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+ }
+
+ if (cnt <= 10) {
+ pstapriv->tim_bitmap &= ~BIT(0);
+ pstapriv->sta_dz_bitmap &= ~BIT(0);
+
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ } else { /* re check again */
+ rtw_chk_hi_queue_cmd(padapter);
+ }
+ }
+}
+
+u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = CHECK_HIQ_WK_CID;
+ pdrvextra_cmd_parm->type_size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+ return res;
+}
+#endif
+
+u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = C2H_WK_CID;
+ pdrvextra_cmd_parm->type_size = c2h_evt ? 16 : 0;
+ pdrvextra_cmd_parm->pbuf = c2h_evt;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+}
+
+static s32 c2h_evt_hdl(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter)
+{
+ s32 ret = _FAIL;
+ u8 buf[16];
+
+ if (!c2h_evt) {
+ /* No c2h event in cmd_obj, read c2h event before handling*/
+ if (c2h_evt_read(adapter, buf) == _SUCCESS) {
+ c2h_evt = (struct c2h_evt_hdr *)buf;
+
+ if (filter && filter(c2h_evt->id) == false)
+ goto exit;
+
+ ret = rtw_hal_c2h_handler(adapter, c2h_evt);
+ }
+ } else {
+ if (filter && filter(c2h_evt->id) == false)
+ goto exit;
+
+ ret = rtw_hal_c2h_handler(adapter, c2h_evt);
+ }
+exit:
+ return ret;
+}
+
+static void c2h_wk_callback(struct work_struct *work)
+{
+ struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk);
+ struct adapter *adapter = container_of(evtpriv, struct adapter, evtpriv);
+ struct c2h_evt_hdr *c2h_evt;
+ c2h_id_filter ccx_id_filter = rtw_hal_c2h_id_filter_ccx(adapter);
+
+ evtpriv->c2h_wk_alive = true;
+
+ while (!rtw_cbuf_empty(evtpriv->c2h_queue)) {
+ if ((c2h_evt = (struct c2h_evt_hdr *)rtw_cbuf_pop(evtpriv->c2h_queue)) != NULL) {
+ /* This C2H event is read, clear it */
+ c2h_evt_clear(adapter);
+ } else if ((c2h_evt = (struct c2h_evt_hdr *)rtw_malloc(16)) != NULL) {
+ /* This C2H event is not read, read & clear now */
+ if (c2h_evt_read(adapter, (u8 *)c2h_evt) != _SUCCESS)
+ continue;
+ }
+
+ /* Special pointer to trigger c2h_evt_clear only */
+ if ((void *)c2h_evt == (void *)evtpriv)
+ continue;
+
+ if (!c2h_evt_exist(c2h_evt)) {
+ kfree(c2h_evt);
+ continue;
+ }
+
+ if (ccx_id_filter(c2h_evt->id) == true) {
+ /* Handle CCX report here */
+ rtw_hal_c2h_handler(adapter, c2h_evt);
+ kfree(c2h_evt);
+ } else {
+#ifdef CONFIG_88EU_P2P
+ /* Enqueue into cmd_thread for others */
+ rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
+#endif
+ }
+ }
+
+ evtpriv->c2h_wk_alive = false;
+}
+
+u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct drvextra_cmd_parm *pdrvextra_cmd;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf;
+
+ switch (pdrvextra_cmd->ec_id) {
+ case DYNAMIC_CHK_WK_CID:
+ dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size);
+ break;
+ case POWER_SAVING_CTRL_WK_CID:
+ power_saving_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size);
+ break;
+ case LPS_CTRL_WK_CID:
+ lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type_size);
+ break;
+ case RTP_TIMER_CFG_WK_CID:
+ rpt_timer_setting_wk_hdl(padapter, pdrvextra_cmd->type_size);
+ break;
+ case ANT_SELECT_WK_CID:
+ antenna_select_wk_hdl(padapter, pdrvextra_cmd->type_size);
+ break;
+#ifdef CONFIG_88EU_P2P
+ case P2P_PS_WK_CID:
+ p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size);
+ break;
+ case P2P_PROTO_WK_CID:
+ /* Commented by Albert 2011/07/01 */
+ /* I used the type_size as the type command */
+ p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size);
+ break;
+#endif
+#ifdef CONFIG_88EU_AP_MODE
+ case CHECK_HIQ_WK_CID:
+ rtw_chk_hi_queue_hdl(padapter);
+ break;
+#endif /* CONFIG_88EU_AP_MODE */
+ case C2H_WK_CID:
+ c2h_evt_hdl(padapter, (struct c2h_evt_hdr *)pdrvextra_cmd->pbuf, NULL);
+ break;
+ default:
+ break;
+ }
+
+ if (pdrvextra_cmd->pbuf && pdrvextra_cmd->type_size > 0)
+ kfree(pdrvextra_cmd->pbuf);
+
+ return H2C_SUCCESS;
+}
+
+void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ if (pcmd->res == H2C_DROPPED) {
+ /* TODO: cancel timer and do timeout handler directly... */
+ /* need to make timeout handlerOS independent */
+ _set_timer(&pmlmepriv->scan_to_timer, 1);
+ } else if (pcmd->res != H2C_SUCCESS) {
+ _set_timer(&pmlmepriv->scan_to_timer, 1);
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: MgntActrtw_set_802_11_bssid_LIST_SCAN Fail ************\n\n."));
+ }
+
+ /* free cmd */
+ rtw_free_cmd_obj(pcmd);
+
+_func_exit_;
+}
+void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ if (pcmd->res != H2C_SUCCESS) {
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ set_fwstate(pmlmepriv, _FW_LINKED);
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
+
+ goto exit;
+ } else /* clear bridge database */
+ nat25_db_cleanup(padapter);
+
+ /* free cmd */
+ rtw_free_cmd_obj(pcmd);
+
+exit:
+
+_func_exit_;
+}
+
+void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ if (pcmd->res == H2C_DROPPED) {
+ /* TODO: cancel timer and do timeout handler directly... */
+ /* need to make timeout handlerOS independent */
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ } else if (pcmd->res != H2C_SUCCESS) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("********Error:rtw_select_and_join_from_scanned_queue Wait Sema Fail ************\n"));
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ }
+
+ rtw_free_cmd_obj(pcmd);
+
+_func_exit_;
+}
+
+void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ unsigned long irqL;
+ u8 timer_cancelled;
+ struct sta_info *psta = NULL;
+ struct wlan_network *pwlan = NULL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
+ struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
+
+_func_enter_;
+
+ if ((pcmd->res != H2C_SUCCESS)) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: rtw_createbss_cmd_callback Fail ************\n\n."));
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ }
+
+ _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->MacAddress);
+ if (!psta) {
+ psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nCan't alloc sta_info when createbss_cmd_callback\n"));
+ goto createbss_cmd_fail ;
+ }
+ }
+
+ rtw_indicate_connect(padapter);
+ } else {
+ unsigned long irqL;
+
+ pwlan = _rtw_alloc_network(pmlmepriv);
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ if (pwlan == NULL) {
+ pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
+ if (pwlan == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n Error: can't get pwlan in rtw_joinbss_event_callback\n"));
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ goto createbss_cmd_fail;
+ }
+ pwlan->last_scanned = rtw_get_current_time();
+ } else {
+ rtw_list_insert_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
+ }
+
+ pnetwork->Length = get_wlan_bssid_ex_sz(pnetwork);
+ memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
+
+ memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork)));
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ /* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
+ }
+
+createbss_cmd_fail:
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+ rtw_free_cmd_obj(pcmd);
+
+_func_exit_;
+}
+
+void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp);
+ struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
+
+_func_enter_;
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: rtw_setstaKey_cmdrsp_callback => can't get sta_info\n\n"));
+ goto exit;
+ }
+exit:
+ rtw_free_cmd_obj(pcmd);
+_func_exit_;
+}
+
+void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ unsigned long irqL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
+ struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp);
+ struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
+
+_func_enter_;
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: setassocsta_cmdrsp_callbac => can't get sta_info\n\n"));
+ goto exit;
+ }
+
+ psta->aid = passocsta_rsp->cam_id;
+ psta->mac_id = passocsta_rsp->cam_id;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true))
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ set_fwstate(pmlmepriv, _FW_LINKED);
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+exit:
+ rtw_free_cmd_obj(pcmd);
+
+_func_exit_;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
new file mode 100644
index 0000000..0fe5f5d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -0,0 +1,948 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_DEBUG_C_
+
+#include <rtw_debug.h>
+#include <rtw_version.h>
+
+int proc_get_drv_version(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "%s\n", DRIVERVERSION);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_write_reg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ *eof = 1;
+ return 0;
+}
+
+int proc_set_write_reg(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 addr, val, len;
+
+ if (count < 3) {
+ DBG_88E("argument size is less than 3\n");
+ return -EFAULT;
+ }
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ int num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+
+ if (num != 3) {
+ DBG_88E("invalid write_reg parameter!\n");
+ return count;
+ }
+ switch (len) {
+ case 1:
+ rtw_write8(padapter, addr, (u8)val);
+ break;
+ case 2:
+ rtw_write16(padapter, addr, (u16)val);
+ break;
+ case 4:
+ rtw_write32(padapter, addr, val);
+ break;
+ default:
+ DBG_88E("error write length =%d", len);
+ break;
+ }
+ }
+ return count;
+}
+
+static u32 proc_get_read_addr = 0xeeeeeeee;
+static u32 proc_get_read_len = 0x4;
+
+int proc_get_read_reg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ int len = 0;
+
+ if (proc_get_read_addr == 0xeeeeeeee) {
+ *eof = 1;
+ return len;
+ }
+
+ switch (proc_get_read_len) {
+ case 1:
+ len += snprintf(page + len, count - len, "rtw_read8(0x%x)=0x%x\n", proc_get_read_addr, rtw_read8(padapter, proc_get_read_addr));
+ break;
+ case 2:
+ len += snprintf(page + len, count - len, "rtw_read16(0x%x)=0x%x\n", proc_get_read_addr, rtw_read16(padapter, proc_get_read_addr));
+ break;
+ case 4:
+ len += snprintf(page + len, count - len, "rtw_read32(0x%x)=0x%x\n", proc_get_read_addr, rtw_read32(padapter, proc_get_read_addr));
+ break;
+ default:
+ len += snprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len);
+ break;
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_read_reg(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char tmp[16];
+ u32 addr, len;
+
+ if (count < 2) {
+ DBG_88E("argument size is less than 2\n");
+ return -EFAULT;
+ }
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ int num = sscanf(tmp, "%x %x", &addr, &len);
+
+ if (num != 2) {
+ DBG_88E("invalid read_reg parameter!\n");
+ return count;
+ }
+
+ proc_get_read_addr = addr;
+
+ proc_get_read_len = len;
+ }
+
+ return count;
+}
+
+int proc_get_fwstate(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "fwstate=0x%x\n", get_fwstate(pmlmepriv));
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_sec_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "auth_alg=0x%x, enc_alg=0x%x, auth_type=0x%x, enc_type=0x%x\n",
+ psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm,
+ psecuritypriv->ndisauthtype, psecuritypriv->ndisencryptstatus);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_mlmext_state(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "pmlmeinfo->state=0x%x\n", pmlmeinfo->state);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_qos_option(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "qos_option=%d\n", pmlmepriv->qospriv.qos_option);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_ht_option(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ int len = 0;
+ len += snprintf(page + len, count - len, "ht_option=%d\n", pmlmepriv->htpriv.ht_option);
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rf_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "cur_ch=%d, cur_bw=%d, cur_ch_offet=%d\n",
+ pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
+ *eof = 1;
+ return len;
+}
+
+int proc_get_ap_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct sta_info *psta;
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ int len = 0;
+
+ psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
+ if (psta) {
+ int i;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ len += snprintf(page + len, count - len, "SSID=%s\n", cur_network->network.Ssid.Ssid);
+ len += snprintf(page + len, count - len, "sta's macaddr:%pM\n", psta->hwaddr);
+ len += snprintf(page + len, count - len, "cur_channel=%d, cur_bwmode=%d, cur_ch_offset=%d\n", pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
+ len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
+ len += snprintf(page + len, count - len, "state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ len += snprintf(page + len, count - len, "qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ len += snprintf(page + len, count - len, "bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
+ len += snprintf(page + len, count - len, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ len += snprintf(page + len, count - len, "agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ if (preorder_ctrl->enable)
+ len += snprintf(page + len, count - len, "tid=%d, indicate_seq=%d\n", i, preorder_ctrl->indicate_seq);
+ }
+ } else {
+ len += snprintf(page + len, count - len, "can't get sta's macaddr, cur_network's macaddr: %pM\n", cur_network->network.MacAddress);
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_adapter_state(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n",
+ padapter->bSurpriseRemoved, padapter->bDriverStopped);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_trx_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "free_xmitbuf_cnt=%d, free_xmitframe_cnt=%d, free_ext_xmitbuf_cnt=%d, free_recvframe_cnt=%d\n",
+ pxmitpriv->free_xmitbuf_cnt, pxmitpriv->free_xmitframe_cnt, pxmitpriv->free_xmit_extbuf_cnt, precvpriv->free_recvframe_cnt);
+ len += snprintf(page + len, count - len, "rx_urb_pending_cn=%d\n", precvpriv->rx_pending_cnt);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_mac_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
+
+ for (i = 0x0; i < 0x300; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_mac_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
+ memset(page, 0, count);
+ for (i = 0x300; i < 0x600; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_mac_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
+
+ for (i = 0x600; i < 0x800; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_bb_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
+ for (i = 0x800; i < 0xB00; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_bb_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
+ for (i = 0xB00; i < 0xE00; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_bb_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
+ for (i = 0xE00; i < 0x1000; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rf_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1, path;
+ u32 value;
+
+ len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
+ path = 1;
+ len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
+ for (i = 0; i < 0xC0; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x ", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", value);
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rf_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1, path;
+ u32 value;
+
+ len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
+ path = 1;
+ len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
+ for (i = 0xC0; i < 0x100; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x ", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", value);
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rf_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1, path;
+ u32 value;
+
+ len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
+ path = 2;
+ len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
+ for (i = 0; i < 0xC0; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x ", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", value);
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+
+ *eof = 1;
+ return len;
+}
+
+
+int proc_get_rf_reg_dump4(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1, path;
+ u32 value;
+
+ len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
+ path = 2;
+ len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
+ for (i = 0xC0; i < 0x100; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x ", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", value);
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+
+
+int proc_get_rx_signal(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+
+ len = snprintf(page + len, count,
+ "rssi:%d\n"
+ "rxpwdb:%d\n"
+ "signal_strength:%u\n"
+ "signal_qual:%u\n"
+ "noise:%u\n",
+ padapter->recvpriv.rssi,
+ padapter->recvpriv.rxpwdb,
+ padapter->recvpriv.signal_strength,
+ padapter->recvpriv.signal_qual,
+ padapter->recvpriv.noise
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_rx_signal(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 is_signal_dbg;
+ s32 signal_strength;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ int num = sscanf(tmp, "%u %u", &is_signal_dbg, &signal_strength);
+ is_signal_dbg = is_signal_dbg == 0 ? 0 : 1;
+ if (is_signal_dbg && num != 2)
+ return count;
+
+ signal_strength = signal_strength > 100 ? 100 : signal_strength;
+ signal_strength = signal_strength < 0 ? 0 : signal_strength;
+
+ padapter->recvpriv.is_signal_dbg = is_signal_dbg;
+ padapter->recvpriv.signal_strength_dbg = signal_strength;
+
+ if (is_signal_dbg)
+ DBG_88E("set %s %u\n", "DBG_SIGNAL_STRENGTH", signal_strength);
+ else
+ DBG_88E("set %s\n", "HW_SIGNAL_STRENGTH");
+ }
+ return count;
+}
+
+int proc_get_ht_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ int len = 0;
+
+ if (pregpriv)
+ len += snprintf(page + len, count - len,
+ "%d\n",
+ pregpriv->ht_enable
+ );
+ *eof = 1;
+ return len;
+}
+
+int proc_set_ht_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ s32 mode = 0;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ if (pregpriv) {
+ pregpriv->ht_enable = mode;
+ pr_info("ht_enable=%d\n", pregpriv->ht_enable);
+ }
+ }
+
+ return count;
+}
+
+int proc_get_cbw40_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ int len = 0;
+
+ if (pregpriv)
+ len += snprintf(page + len, count - len,
+ "%d\n",
+ pregpriv->cbw40_enable
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ s32 mode = 0;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ if (pregpriv) {
+ pregpriv->cbw40_enable = mode;
+ pr_info("cbw40_enable=%d\n", mode);
+ }
+ }
+ return count;
+}
+
+int proc_get_ampdu_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ int len = 0;
+
+ if (pregpriv)
+ len += snprintf(page + len, count - len,
+ "%d\n",
+ pregpriv->ampdu_enable
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ s32 mode = 0;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ if (pregpriv) {
+ pregpriv->ampdu_enable = mode;
+ pr_info("ampdu_enable=%d\n", mode);
+ }
+ }
+ return count;
+}
+
+int proc_get_two_path_rssi(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ int len = 0;
+
+ if (padapter)
+ len += snprintf(page + len, count - len,
+ "%d %d\n",
+ padapter->recvpriv.RxRssi[0],
+ padapter->recvpriv.RxRssi[1]
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rx_stbc(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ int len = 0;
+
+ if (pregpriv)
+ len += snprintf(page + len, count - len,
+ "%d\n",
+ pregpriv->rx_stbc
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_rx_stbc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ u32 mode = 0;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ if (pregpriv) {
+ pregpriv->rx_stbc = mode;
+ printk("rx_stbc=%d\n", mode);
+ }
+ }
+ return count;
+}
+
+int proc_get_rssi_disp(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ *eof = 1;
+ return 0;
+}
+
+int proc_set_rssi_disp(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 enable = 0;
+
+ if (count < 1) {
+ DBG_88E("argument size is less than 1\n");
+ return -EFAULT;
+ }
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ int num = sscanf(tmp, "%x", &enable);
+
+ if (num != 1) {
+ DBG_88E("invalid set_rssi_disp parameter!\n");
+ return count;
+ }
+
+ if (enable) {
+ DBG_88E("Turn On Rx RSSI Display Function\n");
+ padapter->bRxRSSIDisplay = enable ;
+ } else {
+ DBG_88E("Turn Off Rx RSSI Display Function\n");
+ padapter->bRxRSSIDisplay = 0;
+ }
+ }
+ return count;
+}
+
+#ifdef CONFIG_88EU_AP_MODE
+
+int proc_get_all_sta_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ unsigned long irqL;
+ struct sta_info *psta;
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ int i, j;
+ struct list_head *plist, *phead;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ int len = 0;
+
+
+ len += snprintf(page + len, count - len, "sta_dz_bitmap=0x%x, tim_bitmap=0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
+
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+
+ for (i = 0; i < NUM_STA; i++) {
+ phead = &(pstapriv->sta_hash[i]);
+ plist = get_next(phead);
+
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ plist = get_next(plist);
+
+ len += snprintf(page + len, count - len, "sta's macaddr: %pM\n", psta->hwaddr);
+ len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
+ len += snprintf(page + len, count - len, "state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ len += snprintf(page + len, count - len, "qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ len += snprintf(page + len, count - len, "bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
+ len += snprintf(page + len, count - len, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ len += snprintf(page + len, count - len, "agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+ len += snprintf(page + len, count - len, "sleepq_len=%d\n", psta->sleepq_len);
+ len += snprintf(page + len, count - len, "capability=0x%x\n", psta->capability);
+ len += snprintf(page + len, count - len, "flags=0x%x\n", psta->flags);
+ len += snprintf(page + len, count - len, "wpa_psk=0x%x\n", psta->wpa_psk);
+ len += snprintf(page + len, count - len, "wpa2_group_cipher=0x%x\n", psta->wpa2_group_cipher);
+ len += snprintf(page + len, count - len, "wpa2_pairwise_cipher=0x%x\n", psta->wpa2_pairwise_cipher);
+ len += snprintf(page + len, count - len, "qos_info=0x%x\n", psta->qos_info);
+ len += snprintf(page + len, count - len, "dot118021XPrivacy=0x%x\n", psta->dot118021XPrivacy);
+
+ for (j = 0; j < 16; j++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[j];
+ if (preorder_ctrl->enable)
+ len += snprintf(page + len, count - len, "tid=%d, indicate_seq=%d\n", j, preorder_ctrl->indicate_seq);
+ }
+ }
+ }
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+
+ *eof = 1;
+ return len;
+}
+#endif
+
+int proc_get_best_channel(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ int len = 0;
+ u32 i, best_channel_24G = 1, best_channel_5G = 36, index_24G = 0, index_5G = 0;
+
+ for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) {
+ if (pmlmeext->channel_set[i].ChannelNum == 1)
+ index_24G = i;
+ if (pmlmeext->channel_set[i].ChannelNum == 36)
+ index_5G = i;
+ }
+
+ for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) {
+ /* 2.4G */
+ if (pmlmeext->channel_set[i].ChannelNum == 6) {
+ if (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_24G].rx_count) {
+ index_24G = i;
+ best_channel_24G = pmlmeext->channel_set[i].ChannelNum;
+ }
+ }
+
+ /* 5G */
+ if (pmlmeext->channel_set[i].ChannelNum >= 36 &&
+ pmlmeext->channel_set[i].ChannelNum < 140) {
+ /* Find primary channel */
+ if (((pmlmeext->channel_set[i].ChannelNum - 36) % 8 == 0) &&
+ (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) {
+ index_5G = i;
+ best_channel_5G = pmlmeext->channel_set[i].ChannelNum;
+ }
+ }
+
+ if (pmlmeext->channel_set[i].ChannelNum >= 149 &&
+ pmlmeext->channel_set[i].ChannelNum < 165) {
+ /* find primary channel */
+ if (((pmlmeext->channel_set[i].ChannelNum - 149) % 8 == 0) &&
+ (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) {
+ index_5G = i;
+ best_channel_5G = pmlmeext->channel_set[i].ChannelNum;
+ }
+ }
+ /* debug */
+ len += snprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n",
+ pmlmeext->channel_set[i].ChannelNum, pmlmeext->channel_set[i].rx_count);
+ }
+
+ len += snprintf(page + len, count - len, "best_channel_5G = %d\n", best_channel_5G);
+ len += snprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G);
+
+ *eof = 1;
+ return len;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
new file mode 100644
index 0000000..869434c
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -0,0 +1,875 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_EFUSE_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_efuse.h>
+
+
+
+/*------------------------Define local variable------------------------------*/
+u8 fakeEfuseBank;
+u32 fakeEfuseUsedBytes;
+u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE] = {0};
+u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN] = {0};
+u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN] = {0};
+
+u32 BTEfuseUsedBytes;
+u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+
+u32 fakeBTEfuseUsedBytes;
+u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+/*------------------------Define local variable------------------------------*/
+
+/* */
+#define REG_EFUSE_CTRL 0x0030
+#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
+/* */
+
+bool
+Efuse_Read1ByteFromFakeContent(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u8 *Value);
+bool
+Efuse_Read1ByteFromFakeContent(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u8 *Value)
+{
+ if (Offset >= EFUSE_MAX_HW_SIZE)
+ return false;
+ if (fakeEfuseBank == 0)
+ *Value = fakeEfuseContent[Offset];
+ else
+ *Value = fakeBTEfuseContent[fakeEfuseBank-1][Offset];
+ return true;
+}
+
+static bool
+Efuse_Write1ByteToFakeContent(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u8 Value)
+{
+ if (Offset >= EFUSE_MAX_HW_SIZE)
+ return false;
+ if (fakeEfuseBank == 0) {
+ fakeEfuseContent[Offset] = Value;
+ } else {
+ fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value;
+ }
+ return true;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: Efuse_PowerSwitch
+ *
+ * Overview: When we want to enable write operation, we should change to
+ * pwr on state. When we stop write, we should switch to 500k mode
+ * and disable LDO 2.5V.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/17/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void
+Efuse_PowerSwitch(
+ struct adapter *pAdapter,
+ u8 write,
+ u8 PwrState)
+{
+ pAdapter->HalFunc.EfusePowerSwitch(pAdapter, write, PwrState);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_GetCurrentSize
+ *
+ * Overview: Get current efuse size!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/16/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+u16
+Efuse_GetCurrentSize(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ bool pseudo)
+{
+ u16 ret = 0;
+
+ ret = pAdapter->HalFunc.EfuseGetCurrentSize(pAdapter, efuseType, pseudo);
+
+ return ret;
+}
+
+/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
+u8
+Efuse_CalculateWordCnts(u8 word_en)
+{
+ u8 word_cnts = 0;
+ if (!(word_en & BIT(0)))
+ word_cnts++; /* 0 : write enable */
+ if (!(word_en & BIT(1)))
+ word_cnts++;
+ if (!(word_en & BIT(2)))
+ word_cnts++;
+ if (!(word_en & BIT(3)))
+ word_cnts++;
+ return word_cnts;
+}
+
+/* */
+/* Description: */
+/* Execute E-Fuse read byte operation. */
+/* Refered from SD1 Richard. */
+/* */
+/* Assumption: */
+/* 1. Boot from E-Fuse and successfully auto-load. */
+/* 2. PASSIVE_LEVEL (USB interface) */
+/* */
+/* Created by Roger, 2008.10.21. */
+/* */
+void
+ReadEFuseByte(
+ struct adapter *Adapter,
+ u16 _offset,
+ u8 *pbuf,
+ bool pseudo)
+{
+ u32 value32;
+ u8 readbyte;
+ u16 retry;
+
+ if (pseudo) {
+ Efuse_Read1ByteFromFakeContent(Adapter, _offset, pbuf);
+ return;
+ }
+
+ /* Write Address */
+ rtw_write8(Adapter, EFUSE_CTRL+1, (_offset & 0xff));
+ readbyte = rtw_read8(Adapter, EFUSE_CTRL+2);
+ rtw_write8(Adapter, EFUSE_CTRL+2, ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
+
+ /* Write bit 32 0 */
+ readbyte = rtw_read8(Adapter, EFUSE_CTRL+3);
+ rtw_write8(Adapter, EFUSE_CTRL+3, (readbyte & 0x7f));
+
+ /* Check bit 32 read-ready */
+ retry = 0;
+ value32 = rtw_read32(Adapter, EFUSE_CTRL);
+ while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
+ value32 = rtw_read32(Adapter, EFUSE_CTRL);
+ retry++;
+ }
+
+ /* 20100205 Joseph: Add delay suggested by SD1 Victor. */
+ /* This fix the problem that Efuse read error in high temperature condition. */
+ /* Designer says that there shall be some delay after ready bit is set, or the */
+ /* result will always stay on last data we read. */
+ rtw_udelay_os(50);
+ value32 = rtw_read32(Adapter, EFUSE_CTRL);
+
+ *pbuf = (u8)(value32 & 0xff);
+}
+
+/* */
+/* Description: */
+/* 1. Execute E-Fuse read byte operation according as map offset and */
+/* save to E-Fuse table. */
+/* 2. Refered from SD1 Richard. */
+/* */
+/* Assumption: */
+/* 1. Boot from E-Fuse and successfully auto-load. */
+/* 2. PASSIVE_LEVEL (USB interface) */
+/* */
+/* Created by Roger, 2008.10.21. */
+/* */
+/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
+/* 2. Add efuse utilization collect. */
+/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
+/* write addr must be after sec5. */
+/* */
+
+static void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool pseudo)
+{
+ Adapter->HalFunc.ReadEFuse(Adapter, efuseType, _offset, _size_byte, pbuf, pseudo);
+}
+
+void EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut, bool pseudo
+ )
+{
+ pAdapter->HalFunc.EFUSEGetEfuseDefinition(pAdapter, efuseType, type, pOut, pseudo);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_Read1Byte
+ *
+ * Overview: Copy from WMAC fot EFUSE read 1 byte.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 09/23/2008 MHC Copy from WMAC.
+ *
+ *---------------------------------------------------------------------------*/
+u8 EFUSE_Read1Byte(struct adapter *Adapter, u16 Address)
+{
+ u8 data;
+ u8 Bytetemp = {0x00};
+ u8 temp = {0x00};
+ u32 k = 0;
+ u16 contentLen = 0;
+
+ EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI , TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&contentLen, false);
+
+ if (Address < contentLen) { /* E-fuse 512Byte */
+ /* Write E-fuse Register address bit0~7 */
+ temp = Address & 0xFF;
+ rtw_write8(Adapter, EFUSE_CTRL+1, temp);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+2);
+ /* Write E-fuse Register address bit8~9 */
+ temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
+ rtw_write8(Adapter, EFUSE_CTRL+2, temp);
+
+ /* Write 0x30[31]= 0 */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ temp = Bytetemp & 0x7F;
+ rtw_write8(Adapter, EFUSE_CTRL+3, temp);
+
+ /* Wait Write-ready (0x30[31]= 1) */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ while (!(Bytetemp & 0x80)) {
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ k++;
+ if (k == 1000) {
+ k = 0;
+ break;
+ }
+ }
+ data = rtw_read8(Adapter, EFUSE_CTRL);
+ return data;
+ } else {
+ return 0xFF;
+ }
+
+} /* EFUSE_Read1Byte */
+
+/* 11/16/2008 MH Read one byte from real Efuse. */
+u8 efuse_OneByteRead(struct adapter *pAdapter, u16 addr, u8 *data, bool pseudo)
+{
+ u8 tmpidx = 0;
+ u8 result;
+
+ if (pseudo) {
+ result = Efuse_Read1ByteFromFakeContent(pAdapter, addr, data);
+ return result;
+ }
+ /* -----------------e-fuse reg ctrl --------------------------------- */
+ /* address */
+ rtw_write8(pAdapter, EFUSE_CTRL+1, (u8)(addr & 0xff));
+ rtw_write8(pAdapter, EFUSE_CTRL+2, ((u8)((addr>>8) & 0x03)) |
+ (rtw_read8(pAdapter, EFUSE_CTRL+2) & 0xFC));
+
+ rtw_write8(pAdapter, EFUSE_CTRL+3, 0x72);/* read cmd */
+
+ while (!(0x80 & rtw_read8(pAdapter, EFUSE_CTRL+3)) && (tmpidx < 100))
+ tmpidx++;
+ if (tmpidx < 100) {
+ *data = rtw_read8(pAdapter, EFUSE_CTRL);
+ result = true;
+ } else {
+ *data = 0xff;
+ result = false;
+ }
+ return result;
+}
+
+/* 11/16/2008 MH Write one byte to reald Efuse. */
+u8 efuse_OneByteWrite(struct adapter *pAdapter, u16 addr, u8 data, bool pseudo)
+{
+ u8 tmpidx = 0;
+ u8 result;
+
+ if (pseudo) {
+ result = Efuse_Write1ByteToFakeContent(pAdapter, addr, data);
+ return result;
+ }
+
+ /* -----------------e-fuse reg ctrl --------------------------------- */
+ /* address */
+ rtw_write8(pAdapter, EFUSE_CTRL+1, (u8)(addr&0xff));
+ rtw_write8(pAdapter, EFUSE_CTRL+2,
+ (rtw_read8(pAdapter, EFUSE_CTRL+2) & 0xFC) |
+ (u8)((addr>>8) & 0x03));
+ rtw_write8(pAdapter, EFUSE_CTRL, data);/* data */
+
+ rtw_write8(pAdapter, EFUSE_CTRL+3, 0xF2);/* write cmd */
+
+ while ((0x80 & rtw_read8(pAdapter, EFUSE_CTRL+3)) && (tmpidx < 100))
+ tmpidx++;
+
+ if (tmpidx < 100)
+ result = true;
+ else
+ result = false;
+
+ return result;
+}
+
+int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool pseudo)
+{
+ int ret = 0;
+
+ ret = pAdapter->HalFunc.Efuse_PgPacketRead(pAdapter, offset, data, pseudo);
+
+ return ret;
+}
+
+int Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool pseudo)
+{
+ int ret;
+
+ ret = pAdapter->HalFunc.Efuse_PgPacketWrite(pAdapter, offset, word_en, data, pseudo);
+
+ return ret;
+}
+
+
+static int Efuse_PgPacketWrite_BT(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool pseudo)
+{
+ int ret;
+
+ ret = pAdapter->HalFunc.Efuse_PgPacketWrite_BT(pAdapter, offset, word_en, data, pseudo);
+
+ return ret;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_WordEnableDataRead
+ *
+ * Overview: Read allowed word in current efuse section data.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/16/2008 MHC Create Version 0.
+ * 11/21/2008 MHC Fix Write bug when we only enable late word.
+ *
+ *---------------------------------------------------------------------------*/
+void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata)
+{
+ if (!(word_en&BIT(0))) {
+ targetdata[0] = sourdata[0];
+ targetdata[1] = sourdata[1];
+ }
+ if (!(word_en&BIT(1))) {
+ targetdata[2] = sourdata[2];
+ targetdata[3] = sourdata[3];
+ }
+ if (!(word_en&BIT(2))) {
+ targetdata[4] = sourdata[4];
+ targetdata[5] = sourdata[5];
+ }
+ if (!(word_en&BIT(3))) {
+ targetdata[6] = sourdata[6];
+ targetdata[7] = sourdata[7];
+ }
+}
+
+u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool pseudo)
+{
+ u8 ret = 0;
+
+ ret = pAdapter->HalFunc.Efuse_WordEnableDataWrite(pAdapter, efuse_addr, word_en, data, pseudo);
+
+ return ret;
+}
+
+static u8 efuse_read8(struct adapter *padapter, u16 address, u8 *value)
+{
+ return efuse_OneByteRead(padapter, address, value, false);
+}
+
+static u8 efuse_write8(struct adapter *padapter, u16 address, u8 *value)
+{
+ return efuse_OneByteWrite(padapter, address, *value, false);
+}
+
+/*
+ * read/wirte raw efuse data
+ */
+u8 rtw_efuse_access(struct adapter *padapter, u8 write, u16 start_addr, u16 cnts, u8 *data)
+{
+ int i = 0;
+ u16 real_content_len = 0, max_available_size = 0;
+ u8 res = _FAIL ;
+ u8 (*rw8)(struct adapter *, u16, u8*);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&real_content_len, false);
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+
+ if (start_addr > real_content_len)
+ return _FAIL;
+
+ if (write) {
+ if ((start_addr + cnts) > max_available_size)
+ return _FAIL;
+ rw8 = &efuse_write8;
+ } else {
+ rw8 = &efuse_read8;
+ }
+
+ Efuse_PowerSwitch(padapter, write, true);
+
+ /* e-fuse one byte read / write */
+ for (i = 0; i < cnts; i++) {
+ if (start_addr >= real_content_len) {
+ res = _FAIL;
+ break;
+ }
+
+ res = rw8(padapter, start_addr++, data++);
+ if (_FAIL == res)
+ break;
+ }
+
+ Efuse_PowerSwitch(padapter, write, false);
+
+ return res;
+}
+/* */
+u16 efuse_GetMaxSize(struct adapter *padapter)
+{
+ u16 max_size;
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI , TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_size, false);
+ return max_size;
+}
+/* */
+u8 efuse_GetCurrentSize(struct adapter *padapter, u16 *size)
+{
+ Efuse_PowerSwitch(padapter, false, true);
+ *size = Efuse_GetCurrentSize(padapter, EFUSE_WIFI, false);
+ Efuse_PowerSwitch(padapter, false, false);
+
+ return _SUCCESS;
+}
+/* */
+u8 rtw_efuse_map_read(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ Efuse_PowerSwitch(padapter, false, true);
+
+ efuse_ReadEFuse(padapter, EFUSE_WIFI, addr, cnts, data, false);
+
+ Efuse_PowerSwitch(padapter, false, false);
+
+ return _SUCCESS;
+}
+
+u8 rtw_BT_efuse_map_read(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ Efuse_PowerSwitch(padapter, false, true);
+
+ efuse_ReadEFuse(padapter, EFUSE_BT, addr, cnts, data, false);
+
+ Efuse_PowerSwitch(padapter, false, false);
+
+ return _SUCCESS;
+}
+/* */
+u8 rtw_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u8 offset, word_en;
+ u8 *map;
+ u8 newdata[PGPKT_DATA_SIZE];
+ s32 i, idx;
+ u8 ret = _SUCCESS;
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ map = rtw_zmalloc(mapLen);
+ if (map == NULL)
+ return _FAIL;
+
+ ret = rtw_efuse_map_read(padapter, 0, mapLen, map);
+ if (ret == _FAIL)
+ goto exit;
+
+ Efuse_PowerSwitch(padapter, true, true);
+
+ offset = (addr >> 3);
+ word_en = 0xF;
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ i = addr & 0x7; /* index of one package */
+ idx = 0; /* data index */
+
+ if (i & 0x1) {
+ /* odd start */
+ if (data[idx] != map[addr+idx]) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i-1] = map[addr+idx-1];
+ newdata[i] = data[idx];
+ }
+ i++;
+ idx++;
+ }
+ do {
+ for (; i < PGPKT_DATA_SIZE; i += 2) {
+ if (cnts == idx)
+ break;
+ if ((cnts - idx) == 1) {
+ if (data[idx] != map[addr+idx]) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i] = data[idx];
+ newdata[i+1] = map[addr+idx+1];
+ }
+ idx++;
+ break;
+ } else {
+ if ((data[idx] != map[addr+idx]) ||
+ (data[idx+1] != map[addr+idx+1])) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i] = data[idx];
+ newdata[i+1] = data[idx + 1];
+ }
+ idx += 2;
+ }
+ if (idx == cnts)
+ break;
+ }
+
+ if (word_en != 0xF) {
+ ret = Efuse_PgPacketWrite(padapter, offset, word_en, newdata, false);
+ DBG_88E("offset=%x\n", offset);
+ DBG_88E("word_en=%x\n", word_en);
+
+ for (i = 0; i < PGPKT_DATA_SIZE; i++)
+ DBG_88E("data=%x \t", newdata[i]);
+ if (ret == _FAIL)
+ break;
+ }
+
+ if (idx == cnts)
+ break;
+
+ offset++;
+ i = 0;
+ word_en = 0xF;
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ } while (1);
+
+ Efuse_PowerSwitch(padapter, true, false);
+exit:
+ kfree(map);
+ return ret;
+}
+
+/* */
+u8 rtw_BT_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u8 offset, word_en;
+ u8 *map;
+ u8 newdata[PGPKT_DATA_SIZE];
+ s32 i, idx;
+ u8 ret = _SUCCESS;
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ map = rtw_zmalloc(mapLen);
+ if (map == NULL)
+ return _FAIL;
+
+ ret = rtw_BT_efuse_map_read(padapter, 0, mapLen, map);
+ if (ret == _FAIL)
+ goto exit;
+
+ Efuse_PowerSwitch(padapter, true, true);
+
+ offset = (addr >> 3);
+ word_en = 0xF;
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ i = addr & 0x7; /* index of one package */
+ idx = 0; /* data index */
+
+ if (i & 0x1) {
+ /* odd start */
+ if (data[idx] != map[addr+idx]) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i-1] = map[addr+idx-1];
+ newdata[i] = data[idx];
+ }
+ i++;
+ idx++;
+ }
+ do {
+ for (; i < PGPKT_DATA_SIZE; i += 2) {
+ if (cnts == idx)
+ break;
+ if ((cnts - idx) == 1) {
+ if (data[idx] != map[addr+idx]) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i] = data[idx];
+ newdata[i+1] = map[addr+idx+1];
+ }
+ idx++;
+ break;
+ } else {
+ if ((data[idx] != map[addr+idx]) ||
+ (data[idx+1] != map[addr+idx+1])) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i] = data[idx];
+ newdata[i+1] = data[idx + 1];
+ }
+ idx += 2;
+ }
+ if (idx == cnts)
+ break;
+ }
+
+ if (word_en != 0xF) {
+ DBG_88E("%s: offset=%#X\n", __func__, offset);
+ DBG_88E("%s: word_en=%#X\n", __func__, word_en);
+ DBG_88E("%s: data=", __func__);
+ for (i = 0; i < PGPKT_DATA_SIZE; i++)
+ DBG_88E("0x%02X ", newdata[i]);
+ DBG_88E("\n");
+
+ ret = Efuse_PgPacketWrite_BT(padapter, offset, word_en, newdata, false);
+ if (ret == _FAIL)
+ break;
+ }
+
+ if (idx == cnts)
+ break;
+
+ offset++;
+ i = 0;
+ word_en = 0xF;
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ } while (1);
+
+ Efuse_PowerSwitch(padapter, true, false);
+
+exit:
+
+ kfree(map);
+
+ return ret;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_ShadowRead1Byte
+ * efuse_ShadowRead2Byte
+ * efuse_ShadowRead4Byte
+ *
+ * Overview: Read from efuse init map by one/two/four bytes !!!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+static void
+efuse_ShadowRead1Byte(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u8 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+
+} /* EFUSE_ShadowRead1Byte */
+
+/* Read Two Bytes */
+static void
+efuse_ShadowRead2Byte(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u16 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+
+} /* EFUSE_ShadowRead2Byte */
+
+/* Read Four Bytes */
+static void
+efuse_ShadowRead4Byte(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u32 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+2]<<16;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+3]<<24;
+
+} /* efuse_ShadowRead4Byte */
+
+/*-----------------------------------------------------------------------------
+ * Function: Efuse_ReadAllMap
+ *
+ * Overview: Read All Efuse content
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/11/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse, bool pseudo)
+{
+ u16 mapLen = 0;
+
+ Efuse_PowerSwitch(pAdapter, false, true);
+
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
+
+ efuse_ReadEFuse(pAdapter, efuseType, 0, mapLen, Efuse, pseudo);
+
+ Efuse_PowerSwitch(pAdapter, false, false);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_ShadowMapUpdate
+ *
+ * Overview: Transfer current EFUSE content to shadow init and modify map.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/13/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void EFUSE_ShadowMapUpdate(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ bool pseudo)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
+
+ if (pEEPROM->bautoload_fail_flag)
+ _rtw_memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
+ else
+ Efuse_ReadAllMap(pAdapter, efuseType, pEEPROM->efuse_eeprom_data, pseudo);
+} /* EFUSE_ShadowMapUpdate */
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_ShadowRead
+ *
+ * Overview: Read from efuse init map !!!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void EFUSE_ShadowRead(struct adapter *pAdapter, u8 Type, u16 Offset, u32 *Value)
+{
+ if (Type == 1)
+ efuse_ShadowRead1Byte(pAdapter, Offset, (u8 *)Value);
+ else if (Type == 2)
+ efuse_ShadowRead2Byte(pAdapter, Offset, (u16 *)Value);
+ else if (Type == 4)
+ efuse_ShadowRead4Byte(pAdapter, Offset, (u32 *)Value);
+
+} /* EFUSE_ShadowRead */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
new file mode 100644
index 0000000..6fc7742
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -0,0 +1,1640 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _IEEE80211_C
+
+#include <drv_types.h>
+#include <ieee80211.h>
+#include <wifi.h>
+#include <osdep_service.h>
+#include <wlan_bssdef.h>
+#include <usb_osintf.h>
+
+u8 RTW_WPA_OUI_TYPE[] = { 0x00, 0x50, 0xf2, 1 };
+u16 RTW_WPA_VERSION = 1;
+u8 WPA_AUTH_KEY_MGMT_NONE[] = { 0x00, 0x50, 0xf2, 0 };
+u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x50, 0xf2, 1 };
+u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x50, 0xf2, 2 };
+u8 WPA_CIPHER_SUITE_NONE[] = { 0x00, 0x50, 0xf2, 0 };
+u8 WPA_CIPHER_SUITE_WEP40[] = { 0x00, 0x50, 0xf2, 1 };
+u8 WPA_CIPHER_SUITE_TKIP[] = { 0x00, 0x50, 0xf2, 2 };
+u8 WPA_CIPHER_SUITE_WRAP[] = { 0x00, 0x50, 0xf2, 3 };
+u8 WPA_CIPHER_SUITE_CCMP[] = { 0x00, 0x50, 0xf2, 4 };
+u8 WPA_CIPHER_SUITE_WEP104[] = { 0x00, 0x50, 0xf2, 5 };
+
+u16 RSN_VERSION_BSD = 1;
+u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x0f, 0xac, 1 };
+u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x0f, 0xac, 2 };
+u8 RSN_CIPHER_SUITE_NONE[] = { 0x00, 0x0f, 0xac, 0 };
+u8 RSN_CIPHER_SUITE_WEP40[] = { 0x00, 0x0f, 0xac, 1 };
+u8 RSN_CIPHER_SUITE_TKIP[] = { 0x00, 0x0f, 0xac, 2 };
+u8 RSN_CIPHER_SUITE_WRAP[] = { 0x00, 0x0f, 0xac, 3 };
+u8 RSN_CIPHER_SUITE_CCMP[] = { 0x00, 0x0f, 0xac, 4 };
+u8 RSN_CIPHER_SUITE_WEP104[] = { 0x00, 0x0f, 0xac, 5 };
+/* */
+/* for adhoc-master to generate ie and provide supported-rate to fw */
+/* */
+
+static u8 WIFI_CCKRATES[] = {
+ (IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK)
+ };
+
+static u8 WIFI_OFDMRATES[] = {
+ (IEEE80211_OFDM_RATE_6MB),
+ (IEEE80211_OFDM_RATE_9MB),
+ (IEEE80211_OFDM_RATE_12MB),
+ (IEEE80211_OFDM_RATE_18MB),
+ (IEEE80211_OFDM_RATE_24MB),
+ IEEE80211_OFDM_RATE_36MB,
+ IEEE80211_OFDM_RATE_48MB,
+ IEEE80211_OFDM_RATE_54MB
+ };
+
+
+int rtw_get_bit_value_from_ieee_value(u8 val)
+{
+ unsigned char dot11_rate_table[] = {
+ 2, 4, 11, 22, 12, 18, 24, 36, 48,
+ 72, 96, 108, 0}; /* last element must be zero!! */
+
+ int i = 0;
+ while (dot11_rate_table[i] != 0) {
+ if (dot11_rate_table[i] == val)
+ return BIT(i);
+ i++;
+ }
+ return 0;
+}
+
+uint rtw_is_cckrates_included(u8 *rate)
+{
+ u32 i = 0;
+
+ while (rate[i] != 0) {
+ if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
+ (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ return true;
+ i++;
+ }
+ return false;
+}
+
+uint rtw_is_cckratesonly_included(u8 *rate)
+{
+ u32 i = 0;
+
+ while (rate[i] != 0) {
+ if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
+ (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ return false;
+ i++;
+ }
+
+ return true;
+}
+
+int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
+{
+ if (channel > 14) {
+ if ((rtw_is_cckrates_included(rate)) == true)
+ return WIRELESS_INVALID;
+ else
+ return WIRELESS_11A;
+ } else { /* could be pure B, pure G, or B/G */
+ if ((rtw_is_cckratesonly_included(rate)) == true)
+ return WIRELESS_11B;
+ else if ((rtw_is_cckrates_included(rate)) == true)
+ return WIRELESS_11BG;
+ else
+ return WIRELESS_11G;
+ }
+}
+
+u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
+ unsigned int *frlen)
+{
+ memcpy((void *)pbuf, (void *)source, len);
+ *frlen = *frlen + len;
+ return pbuf + len;
+}
+
+/* rtw_set_ie will update frame length */
+u8 *rtw_set_ie
+(
+ u8 *pbuf,
+ int index,
+ uint len,
+ u8 *source,
+ uint *frlen /* frame length */
+)
+{
+_func_enter_;
+ *pbuf = (u8)index;
+
+ *(pbuf + 1) = (u8)len;
+
+ if (len > 0)
+ memcpy((void *)(pbuf + 2), (void *)source, len);
+
+ *frlen = *frlen + (len + 2);
+
+_func_exit_;
+ return pbuf + len + 2;
+}
+
+inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
+ u8 new_ch, u8 ch_switch_cnt)
+{
+ u8 ie_data[3];
+
+ ie_data[0] = ch_switch_mode;
+ ie_data[1] = new_ch;
+ ie_data[2] = ch_switch_cnt;
+ return rtw_set_ie(buf, WLAN_EID_CHANNEL_SWITCH, 3, ie_data, buf_len);
+}
+
+inline u8 secondary_ch_offset_to_hal_ch_offset(u8 ch_offset)
+{
+ if (ch_offset == SCN)
+ return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ else if (ch_offset == SCA)
+ return HAL_PRIME_CHNL_OFFSET_UPPER;
+ else if (ch_offset == SCB)
+ return HAL_PRIME_CHNL_OFFSET_LOWER;
+
+ return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+}
+
+inline u8 hal_ch_offset_to_secondary_ch_offset(u8 ch_offset)
+{
+ if (ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
+ return SCN;
+ else if (ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ return SCB;
+ else if (ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ return SCA;
+
+ return SCN;
+}
+
+inline u8 *rtw_set_ie_secondary_ch_offset(u8 *buf, u32 *buf_len, u8 secondary_ch_offset)
+{
+ return rtw_set_ie(buf, WLAN_EID_SECONDARY_CHANNEL_OFFSET, 1, &secondary_ch_offset, buf_len);
+}
+
+inline u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
+ u8 flags, u16 reason, u16 precedence)
+{
+ u8 ie_data[6];
+
+ ie_data[0] = ttl;
+ ie_data[1] = flags;
+ RTW_PUT_LE16((u8 *)&ie_data[2], reason);
+ RTW_PUT_LE16((u8 *)&ie_data[4], precedence);
+
+ return rtw_set_ie(buf, 0x118, 6, ie_data, buf_len);
+}
+
+/*----------------------------------------------------------------------------
+index: the information element id index, limit is the limit for search
+-----------------------------------------------------------------------------*/
+u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
+{
+ int tmp, i;
+ u8 *p;
+_func_enter_;
+ if (limit < 1) {
+ _func_exit_;
+ return NULL;
+ }
+
+ p = pbuf;
+ i = 0;
+ *len = 0;
+ while (1) {
+ if (*p == index) {
+ *len = *(p + 1);
+ return p;
+ } else {
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
+ }
+ if (i >= limit)
+ break;
+ }
+_func_exit_;
+ return NULL;
+}
+
+/**
+ * rtw_get_ie_ex - Search specific IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @eid: Element ID to match
+ * @oui: OUI to match
+ * @oui_len: OUI length
+ * @ie: If not NULL and the specific IE is found, the IE will be copied to the buf starting from the specific IE
+ * @ielen: If not NULL and the specific IE is found, will set to the length of the entire IE
+ *
+ * Returns: The address of the specific IE found, or NULL
+ */
+u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen)
+{
+ uint cnt;
+ u8 *target_ie = NULL;
+
+
+ if (ielen)
+ *ielen = 0;
+
+ if (!in_ie || in_len <= 0)
+ return target_ie;
+
+ cnt = 0;
+
+ while (cnt < in_len) {
+ if (eid == in_ie[cnt] && (!oui || _rtw_memcmp(&in_ie[cnt+2], oui, oui_len))) {
+ target_ie = &in_ie[cnt];
+
+ if (ie)
+ memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ if (ielen)
+ *ielen = in_ie[cnt+1]+2;
+
+ break;
+ } else {
+ cnt += in_ie[cnt+1]+2; /* goto next */
+ }
+ }
+ return target_ie;
+}
+
+/**
+ * rtw_ies_remove_ie - Find matching IEs and remove
+ * @ies: Address of IEs to search
+ * @ies_len: Pointer of length of ies, will update to new length
+ * @offset: The offset to start scarch
+ * @eid: Element ID to match
+ * @oui: OUI to match
+ * @oui_len: OUI length
+ *
+ * Returns: _SUCCESS: ies is updated, _FAIL: not updated
+ */
+int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len)
+{
+ int ret = _FAIL;
+ u8 *target_ie;
+ u32 target_ielen;
+ u8 *start;
+ uint search_len;
+
+ if (!ies || !ies_len || *ies_len <= offset)
+ goto exit;
+
+ start = ies + offset;
+ search_len = *ies_len - offset;
+
+ while (1) {
+ target_ie = rtw_get_ie_ex(start, search_len, eid, oui, oui_len, NULL, &target_ielen);
+ if (target_ie && target_ielen) {
+ u8 buf[MAX_IE_SZ] = {0};
+ u8 *remain_ies = target_ie + target_ielen;
+ uint remain_len = search_len - (remain_ies - start);
+
+ memcpy(buf, remain_ies, remain_len);
+ memcpy(target_ie, buf, remain_len);
+ *ies_len = *ies_len - target_ielen;
+ ret = _SUCCESS;
+
+ start = target_ie;
+ search_len = remain_len;
+ } else {
+ break;
+ }
+ }
+exit:
+ return ret;
+}
+
+void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
+{
+_func_enter_;
+
+ _rtw_memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ switch (mode) {
+ case WIRELESS_11B:
+ memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11A:
+ case WIRELESS_11_5N:
+ case WIRELESS_11A_5N:/* Todo: no basic rate for ofdm ? */
+ memcpy(SupportedRates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
+ break;
+ case WIRELESS_11BG:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11_24N:
+ case WIRELESS_11BG_24N:
+ memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+ memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
+ break;
+ }
+_func_exit_;
+}
+
+uint rtw_get_rateset_len(u8 *rateset)
+{
+ uint i = 0;
+_func_enter_;
+ while (1) {
+ if ((rateset[i]) == 0)
+ break;
+ if (i > 12)
+ break;
+ i++;
+ }
+_func_exit_;
+ return i;
+}
+
+int rtw_generate_ie(struct registry_priv *pregistrypriv)
+{
+ u8 wireless_mode;
+ int sz = 0, rateLen;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ u8 *ie = pdev_network->IEs;
+
+_func_enter_;
+
+ /* timestamp will be inserted by hardware */
+ sz += 8;
+ ie += sz;
+
+ /* beacon interval : 2bytes */
+ *(__le16 *)ie = cpu_to_le16((u16)pdev_network->Configuration.BeaconPeriod);/* BCN_INTERVAL; */
+ sz += 2;
+ ie += 2;
+
+ /* capability info */
+ *(u16 *)ie = 0;
+
+ *(__le16 *)ie |= cpu_to_le16(cap_IBSS);
+
+ if (pregistrypriv->preamble == PREAMBLE_SHORT)
+ *(__le16 *)ie |= cpu_to_le16(cap_ShortPremble);
+
+ if (pdev_network->Privacy)
+ *(__le16 *)ie |= cpu_to_le16(cap_Privacy);
+
+ sz += 2;
+ ie += 2;
+
+ /* SSID */
+ ie = rtw_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength, pdev_network->Ssid.Ssid, &sz);
+
+ /* supported rates */
+ if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) {
+ if (pdev_network->Configuration.DSConfig > 14)
+ wireless_mode = WIRELESS_11A_5N;
+ else
+ wireless_mode = WIRELESS_11BG_24N;
+ } else {
+ wireless_mode = pregistrypriv->wireless_mode;
+ }
+
+ rtw_set_supported_rate(pdev_network->SupportedRates, wireless_mode);
+
+ rateLen = rtw_get_rateset_len(pdev_network->SupportedRates);
+
+ if (rateLen > 8) {
+ ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, 8, pdev_network->SupportedRates, &sz);
+ /* ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz); */
+ } else {
+ ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, rateLen, pdev_network->SupportedRates, &sz);
+ }
+
+ /* DS parameter set */
+ ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&(pdev_network->Configuration.DSConfig), &sz);
+
+ /* IBSS Parameter Set */
+
+ ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz);
+
+ if (rateLen > 8)
+ ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz);
+_func_exit_;
+
+ return sz;
+}
+
+unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
+{
+ int len;
+ u16 val16;
+ __le16 le_tmp;
+ unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01};
+ u8 *pbuf = pie;
+ int limit_new = limit;
+
+ while (1) {
+ pbuf = rtw_get_ie(pbuf, _WPA_IE_ID_, &len, limit_new);
+
+ if (pbuf) {
+ /* check if oui matches... */
+ if (_rtw_memcmp((pbuf + 2), wpa_oui_type, sizeof (wpa_oui_type)) == false)
+ goto check_next_ie;
+
+ /* check version... */
+ memcpy((u8 *)&le_tmp, (pbuf + 6), sizeof(val16));
+
+ val16 = le16_to_cpu(le_tmp);
+ if (val16 != 0x0001)
+ goto check_next_ie;
+ *wpa_ie_len = *(pbuf + 1);
+ return pbuf;
+ } else {
+ *wpa_ie_len = 0;
+ return NULL;
+ }
+
+check_next_ie:
+ limit_new = limit - (pbuf - pie) - 2 - len;
+ if (limit_new <= 0)
+ break;
+ pbuf += (2 + len);
+ }
+ *wpa_ie_len = 0;
+ return NULL;
+}
+
+unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit)
+{
+
+ return rtw_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit);
+}
+
+int rtw_get_wpa_cipher_suite(u8 *s)
+{
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_NONE;
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_WEP40;
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_TKIP;
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_CCMP;
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_WEP104;
+
+ return 0;
+}
+
+int rtw_get_wpa2_cipher_suite(u8 *s)
+{
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_NONE;
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_WEP40;
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_TKIP;
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_CCMP;
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_WEP104;
+
+ return 0;
+}
+
+
+int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
+{
+ int i, ret = _SUCCESS;
+ int left, count;
+ u8 *pos;
+ u8 SUITE_1X[4] = {0x00, 0x50, 0xf2, 1};
+
+ if (wpa_ie_len <= 0) {
+ /* No WPA IE - fail silently */
+ return _FAIL;
+ }
+
+
+ if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) ||
+ (_rtw_memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN) != true))
+ return _FAIL;
+
+ pos = wpa_ie;
+
+ pos += 8;
+ left = wpa_ie_len - 8;
+
+
+ /* group_cipher */
+ if (left >= WPA_SELECTOR_LEN) {
+ *group_cipher = rtw_get_wpa_cipher_suite(pos);
+ pos += WPA_SELECTOR_LEN;
+ left -= WPA_SELECTOR_LEN;
+ } else if (left > 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left));
+ return _FAIL;
+ }
+
+ /* pairwise_cipher */
+ if (left >= 2) {
+ count = RTW_GET_LE16(pos);
+ pos += 2;
+ left -= 2;
+
+ if (count == 0 || left < count * WPA_SELECTOR_LEN) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), "
+ "count %u left %u", __func__, count, left));
+ return _FAIL;
+ }
+
+ for (i = 0; i < count; i++) {
+ *pairwise_cipher |= rtw_get_wpa_cipher_suite(pos);
+
+ pos += WPA_SELECTOR_LEN;
+ left -= WPA_SELECTOR_LEN;
+ }
+ } else if (left == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__));
+ return _FAIL;
+ }
+
+ if (is_8021x) {
+ if (left >= 6) {
+ pos += 2;
+ if (_rtw_memcmp(pos, SUITE_1X, 4) == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s : there has 802.1x auth\n", __func__));
+ *is_8021x = 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
+{
+ int i, ret = _SUCCESS;
+ int left, count;
+ u8 *pos;
+ u8 SUITE_1X[4] = {0x00, 0x0f, 0xac, 0x01};
+
+ if (rsn_ie_len <= 0) {
+ /* No RSN IE - fail silently */
+ return _FAIL;
+ }
+
+
+ if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2)))
+ return _FAIL;
+
+ pos = rsn_ie;
+ pos += 4;
+ left = rsn_ie_len - 4;
+
+ /* group_cipher */
+ if (left >= RSN_SELECTOR_LEN) {
+ *group_cipher = rtw_get_wpa2_cipher_suite(pos);
+
+ pos += RSN_SELECTOR_LEN;
+ left -= RSN_SELECTOR_LEN;
+
+ } else if (left > 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left));
+ return _FAIL;
+ }
+
+ /* pairwise_cipher */
+ if (left >= 2) {
+ count = RTW_GET_LE16(pos);
+ pos += 2;
+ left -= 2;
+
+ if (count == 0 || left < count * RSN_SELECTOR_LEN) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), "
+ "count %u left %u", __func__, count, left));
+ return _FAIL;
+ }
+
+ for (i = 0; i < count; i++) {
+ *pairwise_cipher |= rtw_get_wpa2_cipher_suite(pos);
+
+ pos += RSN_SELECTOR_LEN;
+ left -= RSN_SELECTOR_LEN;
+ }
+
+ } else if (left == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__));
+
+ return _FAIL;
+ }
+
+ if (is_8021x) {
+ if (left >= 6) {
+ pos += 2;
+ if (_rtw_memcmp(pos, SUITE_1X, 4) == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s (): there has 802.1x auth\n", __func__));
+ *is_8021x = 1;
+ }
+ }
+ }
+ return ret;
+}
+
+int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len)
+{
+ u8 authmode, sec_idx, i;
+ u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
+ uint cnt;
+
+_func_enter_;
+
+ /* Search required WPA or WPA2 IE and copy to sec_ie[] */
+
+ cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
+
+ sec_idx = 0;
+
+ while (cnt < in_len) {
+ authmode = in_ie[cnt];
+
+ if ((authmode == _WPA_IE_ID_) && (_rtw_memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n rtw_get_wpa_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
+ sec_idx, in_ie[cnt+1]+2));
+
+ if (wpa_ie) {
+ memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ for (i = 0; i < (in_ie[cnt+1]+2); i += 8) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
+ wpa_ie[i], wpa_ie[i+1], wpa_ie[i+2], wpa_ie[i+3], wpa_ie[i+4],
+ wpa_ie[i+5], wpa_ie[i+6], wpa_ie[i+7]));
+ }
+ }
+
+ *wpa_len = in_ie[cnt+1]+2;
+ cnt += in_ie[cnt+1]+2; /* get next */
+ } else {
+ if (authmode == _WPA2_IE_ID_) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n get_rsn_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
+ sec_idx, in_ie[cnt+1]+2));
+
+ if (rsn_ie) {
+ memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ for (i = 0; i < (in_ie[cnt+1]+2); i += 8) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
+ rsn_ie[i], rsn_ie[i+1], rsn_ie[i+2], rsn_ie[i+3], rsn_ie[i+4],
+ rsn_ie[i+5], rsn_ie[i+6], rsn_ie[i+7]));
+ }
+ }
+
+ *rsn_len = in_ie[cnt+1]+2;
+ cnt += in_ie[cnt+1]+2; /* get next */
+ } else {
+ cnt += in_ie[cnt+1]+2; /* get next */
+ }
+ }
+ }
+
+_func_exit_;
+
+ return *rsn_len + *wpa_len;
+}
+
+u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
+{
+ u8 match = false;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if (ie_ptr == NULL)
+ return match;
+
+ eid = ie_ptr[0];
+
+ if ((eid == _WPA_IE_ID_) && (_rtw_memcmp(&ie_ptr[2], wps_oui, 4))) {
+ *wps_ielen = ie_ptr[1]+2;
+ match = true;
+ }
+ return match;
+}
+
+/**
+ * rtw_get_wps_ie - Search WPS IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @wps_ie: If not NULL and WPS IE is found, WPS IE will be copied to the buf starting from wps_ie
+ * @wps_ielen: If not NULL and WPS IE is found, will set to the length of the entire WPS IE
+ *
+ * Returns: The address of the WPS IE found, or NULL
+ */
+u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
+{
+ uint cnt;
+ u8 *wpsie_ptr = NULL;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if (wps_ielen)
+ *wps_ielen = 0;
+
+ if (!in_ie || in_len <= 0)
+ return wpsie_ptr;
+
+ cnt = 0;
+
+ while (cnt < in_len) {
+ eid = in_ie[cnt];
+
+ if ((eid == _WPA_IE_ID_) && (_rtw_memcmp(&in_ie[cnt+2], wps_oui, 4))) {
+ wpsie_ptr = &in_ie[cnt];
+
+ if (wps_ie)
+ memcpy(wps_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ if (wps_ielen)
+ *wps_ielen = in_ie[cnt+1]+2;
+
+ cnt += in_ie[cnt+1]+2;
+
+ break;
+ } else {
+ cnt += in_ie[cnt+1]+2; /* goto next */
+ }
+ }
+ return wpsie_ptr;
+}
+
+/**
+ * rtw_get_wps_attr - Search a specific WPS attribute from a given WPS IE
+ * @wps_ie: Address of WPS IE to search
+ * @wps_ielen: Length limit from wps_ie
+ * @target_attr_id: The attribute ID of WPS attribute to search
+ * @buf_attr: If not NULL and the WPS attribute is found, WPS attribute will be copied to the buf starting from buf_attr
+ * @len_attr: If not NULL and the WPS attribute is found, will set to the length of the entire WPS attribute
+ *
+ * Returns: the address of the specific WPS attribute found, or NULL
+ */
+u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_attr, u32 *len_attr)
+{
+ u8 *attr_ptr = NULL;
+ u8 *target_attr_ptr = NULL;
+ u8 wps_oui[4] = {0x00, 0x50, 0xF2, 0x04};
+
+ if (len_attr)
+ *len_attr = 0;
+
+ if ((wps_ie[0] != _VENDOR_SPECIFIC_IE_) ||
+ (_rtw_memcmp(wps_ie + 2, wps_oui , 4) != true))
+ return attr_ptr;
+
+ /* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */
+ attr_ptr = wps_ie + 6; /* goto first attr */
+
+ while (attr_ptr - wps_ie < wps_ielen) {
+ /* 4 = 2(Attribute ID) + 2(Length) */
+ u16 attr_id = RTW_GET_BE16(attr_ptr);
+ u16 attr_data_len = RTW_GET_BE16(attr_ptr + 2);
+ u16 attr_len = attr_data_len + 4;
+
+ if (attr_id == target_attr_id) {
+ target_attr_ptr = attr_ptr;
+ if (buf_attr)
+ memcpy(buf_attr, attr_ptr, attr_len);
+ if (len_attr)
+ *len_attr = attr_len;
+ break;
+ } else {
+ attr_ptr += attr_len; /* goto next */
+ }
+ }
+ return target_attr_ptr;
+}
+
+/**
+ * rtw_get_wps_attr_content - Search a specific WPS attribute content from a given WPS IE
+ * @wps_ie: Address of WPS IE to search
+ * @wps_ielen: Length limit from wps_ie
+ * @target_attr_id: The attribute ID of WPS attribute to search
+ * @buf_content: If not NULL and the WPS attribute is found, WPS attribute content will be copied to the buf starting from buf_content
+ * @len_content: If not NULL and the WPS attribute is found, will set to the length of the WPS attribute content
+ *
+ * Returns: the address of the specific WPS attribute content found, or NULL
+ */
+u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_content, uint *len_content)
+{
+ u8 *attr_ptr;
+ u32 attr_len;
+
+ if (len_content)
+ *len_content = 0;
+
+ attr_ptr = rtw_get_wps_attr(wps_ie, wps_ielen, target_attr_id, NULL, &attr_len);
+
+ if (attr_ptr && attr_len) {
+ if (buf_content)
+ memcpy(buf_content, attr_ptr+4, attr_len-4);
+
+ if (len_content)
+ *len_content = attr_len-4;
+
+ return attr_ptr+4;
+ }
+
+ return NULL;
+}
+
+static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors)
+{
+ unsigned int oui;
+
+ /* first 3 bytes in vendor specific information element are the IEEE
+ * OUI of the vendor. The following byte is used a vendor specific
+ * sub-type. */
+ if (elen < 4) {
+ if (show_errors) {
+ DBG_88E("short vendor specific information element ignored (len=%lu)\n",
+ (unsigned long) elen);
+ }
+ return -1;
+ }
+
+ oui = RTW_GET_BE24(pos);
+ switch (oui) {
+ case OUI_MICROSOFT:
+ /* Microsoft/Wi-Fi information elements are further typed and
+ * subtyped */
+ switch (pos[3]) {
+ case 1:
+ /* Microsoft OUI (00:50:F2) with OUI Type 1:
+ * real WPA information element */
+ elems->wpa_ie = pos;
+ elems->wpa_ie_len = elen;
+ break;
+ case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */
+ if (elen < 5) {
+ DBG_88E("short WME information element ignored (len=%lu)\n",
+ (unsigned long) elen);
+ return -1;
+ }
+ switch (pos[4]) {
+ case WME_OUI_SUBTYPE_INFORMATION_ELEMENT:
+ case WME_OUI_SUBTYPE_PARAMETER_ELEMENT:
+ elems->wme = pos;
+ elems->wme_len = elen;
+ break;
+ case WME_OUI_SUBTYPE_TSPEC_ELEMENT:
+ elems->wme_tspec = pos;
+ elems->wme_tspec_len = elen;
+ break;
+ default:
+ DBG_88E("unknown WME information element ignored (subtype=%d len=%lu)\n",
+ pos[4], (unsigned long) elen);
+ return -1;
+ }
+ break;
+ case 4:
+ /* Wi-Fi Protected Setup (WPS) IE */
+ elems->wps_ie = pos;
+ elems->wps_ie_len = elen;
+ break;
+ default:
+ DBG_88E("Unknown Microsoft information element ignored (type=%d len=%lu)\n",
+ pos[3], (unsigned long) elen);
+ return -1;
+ }
+ break;
+
+ case OUI_BROADCOM:
+ switch (pos[3]) {
+ case VENDOR_HT_CAPAB_OUI_TYPE:
+ elems->vendor_ht_cap = pos;
+ elems->vendor_ht_cap_len = elen;
+ break;
+ default:
+ DBG_88E("Unknown Broadcom information element ignored (type=%d len=%lu)\n",
+ pos[3], (unsigned long) elen);
+ return -1;
+ }
+ break;
+ default:
+ DBG_88E("unknown vendor specific information element ignored (vendor OUI %02x:%02x:%02x len=%lu)\n",
+ pos[0], pos[1], pos[2], (unsigned long) elen);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * ieee802_11_parse_elems - Parse information elements in management frames
+ * @start: Pointer to the start of IEs
+ * @len: Length of IE buffer in octets
+ * @elems: Data structure for parsed elements
+ * @show_errors: Whether to show parsing errors in debug log
+ * Returns: Parsing result
+ */
+enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors)
+{
+ uint left = len;
+ u8 *pos = start;
+ int unknown = 0;
+
+ _rtw_memset(elems, 0, sizeof(*elems));
+
+ while (left >= 2) {
+ u8 id, elen;
+
+ id = *pos++;
+ elen = *pos++;
+ left -= 2;
+
+ if (elen > left) {
+ if (show_errors) {
+ DBG_88E("IEEE 802.11 element parse failed (id=%d elen=%d left=%lu)\n",
+ id, elen, (unsigned long) left);
+ }
+ return ParseFailed;
+ }
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_FH_PARAMS:
+ elems->fh_params = pos;
+ elems->fh_params_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ elems->ds_params = pos;
+ elems->ds_params_len = elen;
+ break;
+ case WLAN_EID_CF_PARAMS:
+ elems->cf_params = pos;
+ elems->cf_params_len = elen;
+ break;
+ case WLAN_EID_TIM:
+ elems->tim = pos;
+ elems->tim_len = elen;
+ break;
+ case WLAN_EID_IBSS_PARAMS:
+ elems->ibss_params = pos;
+ elems->ibss_params_len = elen;
+ break;
+ case WLAN_EID_CHALLENGE:
+ elems->challenge = pos;
+ elems->challenge_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ elems->erp_info = pos;
+ elems->erp_info_len = elen;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (rtw_ieee802_11_parse_vendor_specific(pos, elen, elems, show_errors))
+ unknown++;
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn_ie = pos;
+ elems->rsn_ie_len = elen;
+ break;
+ case WLAN_EID_PWR_CAPABILITY:
+ elems->power_cap = pos;
+ elems->power_cap_len = elen;
+ break;
+ case WLAN_EID_SUPPORTED_CHANNELS:
+ elems->supp_channels = pos;
+ elems->supp_channels_len = elen;
+ break;
+ case WLAN_EID_MOBILITY_DOMAIN:
+ elems->mdie = pos;
+ elems->mdie_len = elen;
+ break;
+ case WLAN_EID_FAST_BSS_TRANSITION:
+ elems->ftie = pos;
+ elems->ftie_len = elen;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ elems->timeout_int = pos;
+ elems->timeout_int_len = elen;
+ break;
+ case WLAN_EID_HT_CAP:
+ elems->ht_capabilities = pos;
+ elems->ht_capabilities_len = elen;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ elems->ht_operation = pos;
+ elems->ht_operation_len = elen;
+ break;
+ default:
+ unknown++;
+ if (!show_errors)
+ break;
+ DBG_88E("IEEE 802.11 element parse ignored unknown element (id=%d elen=%d)\n",
+ id, elen);
+ break;
+ }
+ left -= elen;
+ pos += elen;
+ }
+ if (left)
+ return ParseFailed;
+ return unknown ? ParseUnknown : ParseOK;
+}
+
+u8 key_char2num(u8 ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ else if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ else if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ else
+ return 0xff;
+}
+
+u8 str_2char2num(u8 hch, u8 lch)
+{
+ return (key_char2num(hch) * 10) + key_char2num(lch);
+}
+
+u8 key_2char2num(u8 hch, u8 lch)
+{
+ return (key_char2num(hch) << 4) | key_char2num(lch);
+}
+
+void rtw_macaddr_cfg(u8 *mac_addr)
+{
+ u8 mac[ETH_ALEN];
+ if (mac_addr == NULL)
+ return;
+
+ if (rtw_initmac) { /* Users specify the mac address */
+ int jj, kk;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ mac[jj] = key_2char2num(rtw_initmac[kk], rtw_initmac[kk + 1]);
+ memcpy(mac_addr, mac, ETH_ALEN);
+ } else { /* Use the mac address stored in the Efuse */
+ memcpy(mac, mac_addr, ETH_ALEN);
+ }
+
+ if (((mac[0] == 0xff) && (mac[1] == 0xff) && (mac[2] == 0xff) &&
+ (mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) ||
+ ((mac[0] == 0x0) && (mac[1] == 0x0) && (mac[2] == 0x0) &&
+ (mac[3] == 0x0) && (mac[4] == 0x0) && (mac[5] == 0x0))) {
+ mac[0] = 0x00;
+ mac[1] = 0xe0;
+ mac[2] = 0x4c;
+ mac[3] = 0x87;
+ mac[4] = 0x00;
+ mac[5] = 0x00;
+ /* use default mac addresss */
+ memcpy(mac_addr, mac, ETH_ALEN);
+ DBG_88E("MAC Address from efuse error, assign default one !!!\n");
+ }
+
+ DBG_88E("rtw_macaddr_cfg MAC Address = %pM\n", (mac_addr));
+}
+
+void dump_ies(u8 *buf, u32 buf_len)
+{
+ u8 *pos = (u8 *)buf;
+ u8 id, len;
+
+ while (pos-buf <= buf_len) {
+ id = *pos;
+ len = *(pos+1);
+
+ DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len);
+ #ifdef CONFIG_88EU_P2P
+ dump_p2p_ie(pos, len);
+ #endif
+ dump_wps_ie(pos, len);
+
+ pos += (2 + len);
+ }
+}
+
+void dump_wps_ie(u8 *ie, u32 ie_len)
+{
+ u8 *pos = (u8 *)ie;
+ u16 id;
+ u16 len;
+ u8 *wps_ie;
+ uint wps_ielen;
+
+ wps_ie = rtw_get_wps_ie(ie, ie_len, NULL, &wps_ielen);
+ if (wps_ie != ie || wps_ielen == 0)
+ return;
+
+ pos += 6;
+ while (pos-ie < ie_len) {
+ id = RTW_GET_BE16(pos);
+ len = RTW_GET_BE16(pos + 2);
+ DBG_88E("%s ID:0x%04x, LEN:%u\n", __func__, id, len);
+ pos += (4+len);
+ }
+}
+
+#ifdef CONFIG_88EU_P2P
+void dump_p2p_ie(u8 *ie, u32 ie_len)
+{
+ u8 *pos = (u8 *)ie;
+ u8 id;
+ u16 len;
+ u8 *p2p_ie;
+ uint p2p_ielen;
+
+ p2p_ie = rtw_get_p2p_ie(ie, ie_len, NULL, &p2p_ielen);
+ if (p2p_ie != ie || p2p_ielen == 0)
+ return;
+
+ pos += 6;
+ while (pos-ie < ie_len) {
+ id = *pos;
+ len = RTW_GET_LE16(pos+1);
+ DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len);
+ pos += (3+len);
+ }
+}
+
+/**
+ * rtw_get_p2p_ie - Search P2P IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @p2p_ie: If not NULL and P2P IE is found, P2P IE will be copied to the buf starting from p2p_ie
+ * @p2p_ielen: If not NULL and P2P IE is found, will set to the length of the entire P2P IE
+ *
+ * Returns: The address of the P2P IE found, or NULL
+ */
+u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen)
+{
+ uint cnt = 0;
+ u8 *p2p_ie_ptr;
+ u8 eid, p2p_oui[4] = {0x50, 0x6F, 0x9A, 0x09};
+
+ if (p2p_ielen != NULL)
+ *p2p_ielen = 0;
+
+ while (cnt < in_len) {
+ eid = in_ie[cnt];
+ if ((in_len < 0) || (cnt > MAX_IE_SZ)) {
+ dump_stack();
+ return NULL;
+ }
+ if ((eid == _VENDOR_SPECIFIC_IE_) && (_rtw_memcmp(&in_ie[cnt+2], p2p_oui, 4) == true)) {
+ p2p_ie_ptr = in_ie + cnt;
+
+ if (p2p_ie != NULL)
+ memcpy(p2p_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
+ if (p2p_ielen != NULL)
+ *p2p_ielen = in_ie[cnt + 1] + 2;
+ return p2p_ie_ptr;
+ } else {
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
+ }
+ }
+ return NULL;
+}
+
+/**
+ * rtw_get_p2p_attr - Search a specific P2P attribute from a given P2P IE
+ * @p2p_ie: Address of P2P IE to search
+ * @p2p_ielen: Length limit from p2p_ie
+ * @target_attr_id: The attribute ID of P2P attribute to search
+ * @buf_attr: If not NULL and the P2P attribute is found, P2P attribute will be copied to the buf starting from buf_attr
+ * @len_attr: If not NULL and the P2P attribute is found, will set to the length of the entire P2P attribute
+ *
+ * Returns: the address of the specific WPS attribute found, or NULL
+ */
+u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id , u8 *buf_attr, u32 *len_attr)
+{
+ u8 *attr_ptr = NULL;
+ u8 *target_attr_ptr = NULL;
+ u8 p2p_oui[4] = {0x50, 0x6F, 0x9A, 0x09};
+
+ if (len_attr)
+ *len_attr = 0;
+
+ if (!p2p_ie || (p2p_ie[0] != _VENDOR_SPECIFIC_IE_) ||
+ (_rtw_memcmp(p2p_ie + 2, p2p_oui , 4) != true))
+ return attr_ptr;
+
+ /* 6 = 1(Element ID) + 1(Length) + 3 (OUI) + 1(OUI Type) */
+ attr_ptr = p2p_ie + 6; /* goto first attr */
+
+ while (attr_ptr - p2p_ie < p2p_ielen) {
+ /* 3 = 1(Attribute ID) + 2(Length) */
+ u8 attr_id = *attr_ptr;
+ u16 attr_data_len = RTW_GET_LE16(attr_ptr + 1);
+ u16 attr_len = attr_data_len + 3;
+
+ if (attr_id == target_attr_id) {
+ target_attr_ptr = attr_ptr;
+
+ if (buf_attr)
+ memcpy(buf_attr, attr_ptr, attr_len);
+ if (len_attr)
+ *len_attr = attr_len;
+ break;
+ } else {
+ attr_ptr += attr_len; /* goto next */
+ }
+ }
+ return target_attr_ptr;
+}
+
+/**
+ * rtw_get_p2p_attr_content - Search a specific P2P attribute content from a given P2P IE
+ * @p2p_ie: Address of P2P IE to search
+ * @p2p_ielen: Length limit from p2p_ie
+ * @target_attr_id: The attribute ID of P2P attribute to search
+ * @buf_content: If not NULL and the P2P attribute is found, P2P attribute content will be copied to the buf starting from buf_content
+ * @len_content: If not NULL and the P2P attribute is found, will set to the length of the P2P attribute content
+ *
+ * Returns: the address of the specific P2P attribute content found, or NULL
+ */
+u8 *rtw_get_p2p_attr_content(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id , u8 *buf_content, uint *len_content)
+{
+ u8 *attr_ptr;
+ u32 attr_len;
+
+ if (len_content)
+ *len_content = 0;
+
+ attr_ptr = rtw_get_p2p_attr(p2p_ie, p2p_ielen, target_attr_id, NULL, &attr_len);
+
+ if (attr_ptr && attr_len) {
+ if (buf_content)
+ memcpy(buf_content, attr_ptr+3, attr_len-3);
+
+ if (len_content)
+ *len_content = attr_len-3;
+
+ return attr_ptr+3;
+ }
+
+ return NULL;
+}
+
+u32 rtw_set_p2p_attr_content(u8 *pbuf, u8 attr_id, u16 attr_len, u8 *pdata_attr)
+{
+ u32 a_len;
+
+ *pbuf = attr_id;
+
+ /* u16*)(pbuf + 1) = cpu_to_le16(attr_len); */
+ RTW_PUT_LE16(pbuf + 1, attr_len);
+
+ if (pdata_attr)
+ memcpy(pbuf + 3, pdata_attr, attr_len);
+
+ a_len = attr_len + 3;
+
+ return a_len;
+}
+
+static uint rtw_p2p_attr_remove(u8 *ie, uint ielen_ori, u8 attr_id)
+{
+ u8 *target_attr;
+ u32 target_attr_len;
+ uint ielen = ielen_ori;
+
+ while (1) {
+ target_attr = rtw_get_p2p_attr(ie, ielen, attr_id, NULL, &target_attr_len);
+ if (target_attr && target_attr_len) {
+ u8 *next_attr = target_attr+target_attr_len;
+ uint remain_len = ielen-(next_attr-ie);
+
+ _rtw_memset(target_attr, 0, target_attr_len);
+ memcpy(target_attr, next_attr, remain_len);
+ _rtw_memset(target_attr+remain_len, 0, target_attr_len);
+ *(ie+1) -= target_attr_len;
+ ielen -= target_attr_len;
+ } else {
+ break;
+ }
+ }
+ return ielen;
+}
+
+void rtw_wlan_bssid_ex_remove_p2p_attr(struct wlan_bssid_ex *bss_ex, u8 attr_id)
+{
+ u8 *p2p_ie;
+ uint p2p_ielen, p2p_ielen_ori;
+
+ p2p_ie = rtw_get_p2p_ie(bss_ex->IEs+_FIXED_IE_LENGTH_, bss_ex->IELength-_FIXED_IE_LENGTH_, NULL, &p2p_ielen_ori);
+ if (p2p_ie) {
+ p2p_ielen = rtw_p2p_attr_remove(p2p_ie, p2p_ielen_ori, attr_id);
+ if (p2p_ielen != p2p_ielen_ori) {
+ u8 *next_ie_ori = p2p_ie+p2p_ielen_ori;
+ u8 *next_ie = p2p_ie+p2p_ielen;
+ uint remain_len = bss_ex->IELength-(next_ie_ori-bss_ex->IEs);
+
+ memcpy(next_ie, next_ie_ori, remain_len);
+ _rtw_memset(next_ie+remain_len, 0, p2p_ielen_ori-p2p_ielen);
+ bss_ex->IELength -= p2p_ielen_ori-p2p_ielen;
+ }
+ }
+}
+
+#endif /* CONFIG_88EU_P2P */
+
+/* Baron adds to avoid FreeBSD warning */
+int ieee80211_is_empty_essid(const char *essid, int essid_len)
+{
+ /* Single white space is for Linksys APs */
+ if (essid_len == 1 && essid[0] == ' ')
+ return 1;
+
+ /* Otherwise, if the entire essid is 0, we assume it is hidden */
+ while (essid_len) {
+ essid_len--;
+ if (essid[essid_len] != '\0')
+ return 0;
+ }
+
+ return 1;
+}
+
+int ieee80211_get_hdrlen(u16 fc)
+{
+ int hdrlen = 24;
+
+ switch (WLAN_FC_GET_TYPE(fc)) {
+ case RTW_IEEE80211_FTYPE_DATA:
+ if (fc & RTW_IEEE80211_STYPE_QOS_DATA)
+ hdrlen += 2;
+ if ((fc & RTW_IEEE80211_FCTL_FROMDS) && (fc & RTW_IEEE80211_FCTL_TODS))
+ hdrlen += 6; /* Addr4 */
+ break;
+ case RTW_IEEE80211_FTYPE_CTL:
+ switch (WLAN_FC_GET_STYPE(fc)) {
+ case RTW_IEEE80211_STYPE_CTS:
+ case RTW_IEEE80211_STYPE_ACK:
+ hdrlen = 10;
+ break;
+ default:
+ hdrlen = 16;
+ break;
+ }
+ break;
+ }
+
+ return hdrlen;
+}
+
+static int rtw_get_cipher_info(struct wlan_network *pnetwork)
+{
+ u32 wpa_ielen;
+ unsigned char *pbuf;
+ int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
+ int ret = _FAIL;
+ pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_cipher_info: wpa_ielen: %d", wpa_ielen));
+ if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
+ pnetwork->BcnInfo.group_cipher = group_cipher;
+ pnetwork->BcnInfo.is_8021x = is8021x;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->pairwise_cipher: %d, is_8021x is %d",
+ __func__, pnetwork->BcnInfo.pairwise_cipher, pnetwork->BcnInfo.is_8021x));
+ ret = _SUCCESS;
+ }
+ } else {
+ pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE\n"));
+ if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE OK!!!\n"));
+ pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
+ pnetwork->BcnInfo.group_cipher = group_cipher;
+ pnetwork->BcnInfo.is_8021x = is8021x;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->pairwise_cipher: %d,"
+ "pnetwork->group_cipher is %d, is_8021x is %d", __func__, pnetwork->BcnInfo.pairwise_cipher,
+ pnetwork->BcnInfo.group_cipher, pnetwork->BcnInfo.is_8021x));
+ ret = _SUCCESS;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void rtw_get_bcn_info(struct wlan_network *pnetwork)
+{
+ unsigned short cap = 0;
+ u8 bencrypt = 0;
+ __le16 le_tmp;
+ u16 wpa_len = 0, rsn_len = 0;
+ struct HT_info_element *pht_info = NULL;
+ struct rtw_ieee80211_ht_cap *pht_cap = NULL;
+ unsigned int len;
+ unsigned char *p;
+
+ memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
+ cap = le16_to_cpu(le_tmp);
+ if (cap & WLAN_CAPABILITY_PRIVACY) {
+ bencrypt = 1;
+ pnetwork->network.Privacy = 1;
+ } else {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
+ }
+ rtw_get_sec_ie(pnetwork->network.IEs , pnetwork->network.IELength, NULL, &rsn_len, NULL, &wpa_len);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+
+ if (rsn_len > 0) {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA2;
+ } else if (wpa_len > 0) {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA;
+ } else {
+ if (bencrypt)
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WEP;
+ }
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: pnetwork->encryp_protocol is %x\n",
+ pnetwork->BcnInfo.encryp_protocol));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: pnetwork->encryp_protocol is %x\n",
+ pnetwork->BcnInfo.encryp_protocol));
+ rtw_get_cipher_info(pnetwork);
+
+ /* get bwmode and ch_offset */
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
+ pnetwork->BcnInfo.ht_cap_info = pht_cap->cap_info;
+ } else {
+ pnetwork->BcnInfo.ht_cap_info = 0;
+ }
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_info = (struct HT_info_element *)(p + 2);
+ pnetwork->BcnInfo.ht_info_infos_0 = pht_info->infos[0];
+ } else {
+ pnetwork->BcnInfo.ht_info_infos_0 = 0;
+ }
+}
+
+/* show MCS rate, unit: 100Kbps */
+u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate)
+{
+ u16 max_rate = 0;
+
+ if (rf_type == RF_1T1R) {
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
+ } else {
+ if (MCS_rate[1]) {
+ if (MCS_rate[1] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 3000 : 2700) : ((short_GI_20) ? 1444 : 1300);
+ else if (MCS_rate[1] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 2700 : 2430) : ((short_GI_20) ? 1300 : 1170);
+ else if (MCS_rate[1] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 2400 : 2160) : ((short_GI_20) ? 1156 : 1040);
+ else if (MCS_rate[1] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1800 : 1620) : ((short_GI_20) ? 867 : 780);
+ else if (MCS_rate[1] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
+ else if (MCS_rate[1] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
+ else if (MCS_rate[1] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
+ else if (MCS_rate[1] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
+ } else {
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
+ }
+ }
+ return max_rate;
+}
+
+int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *action)
+{
+ const u8 *frame_body = frame + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u16 fc;
+ u8 c, a = 0;
+
+ fc = le16_to_cpu(((struct rtw_ieee80211_hdr_3addr *)frame)->frame_ctl);
+
+ if ((fc & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE)) !=
+ (RTW_IEEE80211_FTYPE_MGMT|RTW_IEEE80211_STYPE_ACTION))
+ return false;
+
+ c = frame_body[0];
+
+ switch (c) {
+ case RTW_WLAN_CATEGORY_P2P: /* vendor-specific */
+ break;
+ default:
+ a = frame_body[1];
+ }
+
+ if (category)
+ *category = c;
+ if (action)
+ *action = a;
+
+ return true;
+}
+
+static const char *_action_public_str[] = {
+ "ACT_PUB_BSSCOEXIST",
+ "ACT_PUB_DSE_ENABLE",
+ "ACT_PUB_DSE_DEENABLE",
+ "ACT_PUB_DSE_REG_LOCATION",
+ "ACT_PUB_EXT_CHL_SWITCH",
+ "ACT_PUB_DSE_MSR_REQ",
+ "ACT_PUB_DSE_MSR_RPRT",
+ "ACT_PUB_MP",
+ "ACT_PUB_DSE_PWR_CONSTRAINT",
+ "ACT_PUB_VENDOR",
+ "ACT_PUB_GAS_INITIAL_REQ",
+ "ACT_PUB_GAS_INITIAL_RSP",
+ "ACT_PUB_GAS_COMEBACK_REQ",
+ "ACT_PUB_GAS_COMEBACK_RSP",
+ "ACT_PUB_TDLS_DISCOVERY_RSP",
+ "ACT_PUB_LOCATION_TRACK",
+ "ACT_PUB_RSVD",
+};
+
+const char *action_public_str(u8 action)
+{
+ action = (action >= ACT_PUBLIC_MAX) ? ACT_PUBLIC_MAX : action;
+ return _action_public_str[action];
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_io.c b/drivers/staging/rtl8188eu/core/rtw_io.c
new file mode 100644
index 0000000..10c9c65
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_io.c
@@ -0,0 +1,329 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*
+
+The purpose of rtw_io.c
+
+a. provides the API
+
+b. provides the protocol engine
+
+c. provides the software interface between caller and the hardware interface
+
+
+Compiler Flag Option:
+
+USB:
+ a. USE_ASYNC_IRP: Both sync/async operations are provided.
+
+Only sync read/rtw_write_mem operations are provided.
+
+jackson@realtek.com.tw
+
+*/
+
+#define _RTW_IO_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_io.h>
+#include <osdep_intf.h>
+#include <usb_ops.h>
+
+#define rtw_le16_to_cpu(val) le16_to_cpu(val)
+#define rtw_le32_to_cpu(val) le32_to_cpu(val)
+#define rtw_cpu_to_le16(val) cpu_to_le16(val)
+#define rtw_cpu_to_le32(val) cpu_to_le32(val)
+
+
+u8 _rtw_read8(struct adapter *adapter, u32 addr)
+{
+ u8 r_val;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
+
+ _func_enter_;
+ _read8 = pintfhdl->io_ops._read8;
+ r_val = _read8(pintfhdl, addr);
+ _func_exit_;
+ return r_val;
+}
+
+u16 _rtw_read16(struct adapter *adapter, u32 addr)
+{
+ u16 r_val;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
+_func_enter_;
+ _read16 = pintfhdl->io_ops._read16;
+
+ r_val = _read16(pintfhdl, addr);
+_func_exit_;
+ return r_val;
+}
+
+u32 _rtw_read32(struct adapter *adapter, u32 addr)
+{
+ u32 r_val;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
+_func_enter_;
+ _read32 = pintfhdl->io_ops._read32;
+
+ r_val = _read32(pintfhdl, addr);
+_func_exit_;
+ return r_val;
+}
+
+int _rtw_write8(struct adapter *adapter, u32 addr, u8 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int ret;
+ _func_enter_;
+ _write8 = pintfhdl->io_ops._write8;
+
+ ret = _write8(pintfhdl, addr, val);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+int _rtw_write16(struct adapter *adapter, u32 addr, u16 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int ret;
+ _func_enter_;
+ _write16 = pintfhdl->io_ops._write16;
+
+ ret = _write16(pintfhdl, addr, val);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+int _rtw_write32(struct adapter *adapter, u32 addr, u32 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ int ret;
+ _func_enter_;
+ _write32 = pintfhdl->io_ops._write32;
+
+ ret = _write32(pintfhdl, addr, val);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+int _rtw_writeN(struct adapter *adapter, u32 addr , u32 length , u8 *pdata)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = (struct intf_hdl *)(&(pio_priv->intf));
+ int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata);
+ int ret;
+ _func_enter_;
+ _writeN = pintfhdl->io_ops._writeN;
+
+ ret = _writeN(pintfhdl, addr, length, pdata);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+int _rtw_write8_async(struct adapter *adapter, u32 addr, u8 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int ret;
+ _func_enter_;
+ _write8_async = pintfhdl->io_ops._write8_async;
+
+ ret = _write8_async(pintfhdl, addr, val);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+int _rtw_write16_async(struct adapter *adapter, u32 addr, u16 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int ret;
+
+_func_enter_;
+ _write16_async = pintfhdl->io_ops._write16_async;
+ ret = _write16_async(pintfhdl, addr, val);
+_func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+int _rtw_write32_async(struct adapter *adapter, u32 addr, u32 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ int ret;
+
+_func_enter_;
+ _write32_async = pintfhdl->io_ops._write32_async;
+ ret = _write32_async(pintfhdl, addr, val);
+_func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _func_enter_;
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_mem:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ adapter->bDriverStopped, adapter->bSurpriseRemoved));
+ return;
+ }
+ _read_mem = pintfhdl->io_ops._read_mem;
+ _read_mem(pintfhdl, addr, cnt, pmem);
+ _func_exit_;
+}
+
+void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _func_enter_;
+
+ _write_mem = pintfhdl->io_ops._write_mem;
+
+ _write_mem(pintfhdl, addr, cnt, pmem);
+
+ _func_exit_;
+}
+
+void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _func_enter_;
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_port:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ adapter->bDriverStopped, adapter->bSurpriseRemoved));
+ return;
+ }
+
+ _read_port = pintfhdl->io_ops._read_port;
+
+ _read_port(pintfhdl, addr, cnt, pmem);
+
+ _func_exit_;
+}
+
+void _rtw_read_port_cancel(struct adapter *adapter)
+{
+ void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _read_port_cancel = pintfhdl->io_ops._read_port_cancel;
+
+ if (_read_port_cancel)
+ _read_port_cancel(pintfhdl);
+}
+
+u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u32 ret = _SUCCESS;
+
+ _func_enter_;
+
+ _write_port = pintfhdl->io_ops._write_port;
+
+ ret = _write_port(pintfhdl, addr, cnt, pmem);
+
+ _func_exit_;
+
+ return ret;
+}
+
+u32 _rtw_write_port_and_wait(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem, int timeout_ms)
+{
+ int ret = _SUCCESS;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)pmem;
+ struct submit_ctx sctx;
+
+ rtw_sctx_init(&sctx, timeout_ms);
+ pxmitbuf->sctx = &sctx;
+
+ ret = _rtw_write_port(adapter, addr, cnt, pmem);
+
+ if (ret == _SUCCESS)
+ ret = rtw_sctx_wait(&sctx);
+
+ return ret;
+}
+
+void _rtw_write_port_cancel(struct adapter *adapter)
+{
+ void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _write_port_cancel = pintfhdl->io_ops._write_port_cancel;
+
+ if (_write_port_cancel)
+ _write_port_cancel(pintfhdl);
+}
+
+int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct _io_ops *pops))
+{
+ struct io_priv *piopriv = &padapter->iopriv;
+ struct intf_hdl *pintf = &piopriv->intf;
+
+ if (set_intf_ops == NULL)
+ return _FAIL;
+
+ piopriv->padapter = padapter;
+ pintf->padapter = padapter;
+ pintf->pintf_dev = adapter_to_dvobj(padapter);
+
+ set_intf_ops(&pintf->io_ops);
+
+ return _SUCCESS;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
new file mode 100644
index 0000000..193f641
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -0,0 +1,1169 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_IOCTL_SET_C_
+
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_ioctl_set.h>
+#include <hal_intf.h>
+
+#include <usb_osintf.h>
+#include <usb_ops.h>
+
+extern void indicate_wx_scan_complete_event(struct adapter *padapter);
+
+#define IS_MAC_ADDRESS_BROADCAST(addr) \
+(\
+ ((addr[0] == 0xff) && (addr[1] == 0xff) && \
+ (addr[2] == 0xff) && (addr[3] == 0xff) && \
+ (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
+)
+
+u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid)
+{
+ u8 i;
+ u8 ret = true;
+
+_func_enter_;
+
+ if (ssid->SsidLength > 32) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid length >32\n"));
+ ret = false;
+ goto exit;
+ }
+
+ for (i = 0; i < ssid->SsidLength; i++) {
+ /* wifi, printable ascii code must be supported */
+ if (!((ssid->Ssid[i] >= 0x20) && (ssid->Ssid[i] <= 0x7e))) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid has nonprintabl ascii\n"));
+ ret = false;
+ break;
+ }
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+u8 rtw_do_join(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ u8 *pibss = NULL;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("\n rtw_do_join: phead = %p; plist = %p\n\n\n", phead, plist));
+
+ pmlmepriv->cur_network.join_res = -2;
+
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ pmlmepriv->pscanned = plist;
+
+ pmlmepriv->to_join = true;
+
+ if (_rtw_queue_empty(queue)) {
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* when set_ssid/set_bssid for rtw_do_join(), but scanning queue is empty */
+ /* we try to issue sitesurvey firstly */
+
+ if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
+ pmlmepriv->to_roaming > 0) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_do_join(): site survey if scanned_queue is empty\n."));
+ /* submit site_survey_cmd */
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ if (_SUCCESS != ret) {
+ pmlmepriv->to_join = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_do_join(): site survey return error\n."));
+ }
+ } else {
+ pmlmepriv->to_join = false;
+ ret = _FAIL;
+ }
+
+ goto exit;
+ } else {
+ int select_ret;
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ select_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
+ if (select_ret == _SUCCESS) {
+ pmlmepriv->to_join = false;
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ } else {
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) {
+ /* submit createbss_cmd to change to a ADHOC_MASTER */
+
+ /* pmlmepriv->lock has been acquired by caller... */
+ struct wlan_bssid_ex *pdev_network = &(padapter->registrypriv.dev_network);
+
+ pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
+
+ pibss = padapter->registrypriv.dev_network.MacAddress;
+
+ _rtw_memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+
+ rtw_update_registrypriv_dev_network(padapter);
+
+ rtw_generate_random_ibss(pibss);
+
+ if (rtw_createbss_cmd(padapter) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>do_goin: rtw_createbss_cmd status FAIL***\n "));
+ ret = false;
+ goto exit;
+ }
+ pmlmepriv->to_join = false;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("***Error => rtw_select_and_join_from_scanned_queue FAIL under STA_Mode***\n "));
+ } else {
+ /* can't associate ; reset under-linking */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* when set_ssid/set_bssid for rtw_do_join(), but there are no desired bss in scanning queue */
+ /* we try to issue sitesurvey firstly */
+ if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
+ pmlmepriv->to_roaming > 0) {
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ if (_SUCCESS != ret) {
+ pmlmepriv->to_join = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("do_join(): site survey return error\n."));
+ }
+ } else {
+ ret = _FAIL;
+ pmlmepriv->to_join = false;
+ }
+ }
+ }
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
+{
+ unsigned long irqL;
+ u8 status = _SUCCESS;
+ u32 cur_time = 0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid);
+
+ if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 &&
+ bssid[3] == 0x00 && bssid[4] == 0x00 && bssid[5] == 0x00) ||
+ (bssid[0] == 0xFF && bssid[1] == 0xFF && bssid[2] == 0xFF &&
+ bssid[3] == 0xFF && bssid[4] == 0xFF && bssid[5] == 0xFF)) {
+ status = _FAIL;
+ goto exit;
+ }
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+
+ DBG_88E("Set BSSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
+ goto handle_tkip_countermeasure;
+ else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
+ goto release_mlme_lock;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
+
+ if (_rtw_memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)
+ goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set BSSID not the same bssid\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid =%pM\n", (bssid)));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("cur_bssid =%pM\n", (pmlmepriv->cur_network.network.MacAddress)));
+
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ }
+ }
+
+handle_tkip_countermeasure:
+ /* should we add something here...? */
+
+ if (padapter->securitypriv.btkip_countermeasure) {
+ cur_time = rtw_get_current_time();
+
+ if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
+ padapter->securitypriv.btkip_countermeasure = false;
+ padapter->securitypriv.btkip_countermeasure_time = 0;
+ } else {
+ status = _FAIL;
+ goto release_mlme_lock;
+ }
+ }
+
+ memcpy(&pmlmepriv->assoc_bssid, bssid, ETH_ALEN);
+ pmlmepriv->assoc_by_bssid = true;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
+ pmlmepriv->to_join = true;
+ else
+ status = rtw_do_join(padapter);
+
+release_mlme_lock:
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+exit:
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_bssid: status=%d\n", status));
+
+_func_exit_;
+
+ return status;
+}
+
+u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
+{
+ unsigned long irqL;
+ u8 status = _SUCCESS;
+ u32 cur_time = 0;
+
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *pnetwork = &pmlmepriv->cur_network;
+
+_func_enter_;
+
+ DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n",
+ ssid->Ssid, get_fwstate(pmlmepriv));
+
+ if (!padapter->hw_init_completed) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("set_ssid: hw_init_completed == false =>exit!!!\n"));
+ status = _FAIL;
+ goto exit;
+ }
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ DBG_88E("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ goto handle_tkip_countermeasure;
+ } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ goto release_mlme_lock;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
+
+ if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) &&
+ (_rtw_memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) {
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Set SSID is the same ssid, fw_state = 0x%08x\n",
+ get_fwstate(pmlmepriv)));
+
+ if (!rtw_is_same_ibss(padapter, pnetwork)) {
+ /* if in WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE, create bss or rejoin again */
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ } else {
+ goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
+ }
+ } else {
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_JOINBSS, 1);
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set SSID not the same ssid\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid =[%s] len = 0x%x\n", ssid->Ssid, (unsigned int)ssid->SsidLength));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("assoc_ssid =[%s] len = 0x%x\n", pmlmepriv->assoc_ssid.Ssid, (unsigned int)pmlmepriv->assoc_ssid.SsidLength));
+
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ }
+ }
+
+handle_tkip_countermeasure:
+
+ if (padapter->securitypriv.btkip_countermeasure) {
+ cur_time = rtw_get_current_time();
+
+ if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
+ padapter->securitypriv.btkip_countermeasure = false;
+ padapter->securitypriv.btkip_countermeasure_time = 0;
+ } else {
+ status = _FAIL;
+ goto release_mlme_lock;
+ }
+ }
+
+ memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid));
+ pmlmepriv->assoc_by_bssid = false;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ pmlmepriv->to_join = true;
+ } else {
+ status = rtw_do_join(padapter);
+ }
+
+release_mlme_lock:
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+exit:
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("-rtw_set_802_11_ssid: status =%d\n", status));
+_func_exit_;
+ return status;
+}
+
+u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
+ enum ndis_802_11_network_infra networktype)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ enum ndis_802_11_network_infra *pold_state = &(cur_network->network.InfrastructureMode);
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_,
+ ("+rtw_set_802_11_infrastructure_mode: old =%d new =%d fw_state = 0x%08x\n",
+ *pold_state, networktype, get_fwstate(pmlmepriv)));
+
+ if (*pold_state != networktype) {
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, (" change mode!"));
+ /* DBG_88E("change mode, old_mode =%d, new_mode =%d, fw_state = 0x%x\n", *pold_state, networktype, get_fwstate(pmlmepriv)); */
+
+ if (*pold_state == Ndis802_11APMode) {
+ /* change to other mode from Ndis802_11APMode */
+ cur_network->join_res = -1;
+
+#ifdef CONFIG_88EU_AP_MODE
+ stop_ap_mode(padapter);
+#endif
+ }
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
+ (*pold_state == Ndis802_11IBSS))
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
+ rtw_free_assoc_resources(padapter, 1);
+
+ if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */
+ }
+
+ *pold_state = networktype;
+
+ _clr_fwstate_(pmlmepriv, ~WIFI_NULL_STATE);
+
+ switch (networktype) {
+ case Ndis802_11IBSS:
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ break;
+ case Ndis802_11Infrastructure:
+ set_fwstate(pmlmepriv, WIFI_STATION_STATE);
+ break;
+ case Ndis802_11APMode:
+ set_fwstate(pmlmepriv, WIFI_AP_STATE);
+#ifdef CONFIG_88EU_AP_MODE
+ start_ap_mode(padapter);
+#endif
+ break;
+ case Ndis802_11AutoUnknown:
+ case Ndis802_11InfrastructureMax:
+ break;
+ }
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ }
+
+_func_exit_;
+
+ return true;
+}
+
+
+u8 rtw_set_802_11_disassociate(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("MgntActrtw_set_802_11_disassociate: rtw_indicate_disconnect\n"));
+
+ rtw_disassoc_cmd(padapter, 0, true);
+ rtw_indicate_disconnect(padapter);
+ rtw_free_assoc_resources(padapter, 1);
+ rtw_pwr_wakeup(padapter);
+ }
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+_func_exit_;
+
+ return true;
+}
+
+u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 res = true;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("+rtw_set_802_11_bssid_list_scan(), fw_state =%x\n", get_fwstate(pmlmepriv)));
+
+ if (padapter == NULL) {
+ res = false;
+ goto exit;
+ }
+ if (!padapter->hw_init_completed) {
+ res = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n === rtw_set_802_11_bssid_list_scan:hw_init_completed == false ===\n"));
+ goto exit;
+ }
+
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) ||
+ (pmlmepriv->LinkDetectInfo.bBusyTraffic)) {
+ /* Scan or linking is in progress, do nothing. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_bssid_list_scan fail since fw_state = %x\n", get_fwstate(pmlmepriv)));
+ res = true;
+
+ if (check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###pmlmepriv->sitesurveyctrl.traffic_busy == true\n\n"));
+ }
+ } else {
+ if (rtw_is_scan_deny(padapter)) {
+ DBG_88E(FUNC_ADPT_FMT": scan deny\n", FUNC_ADPT_ARG(padapter));
+ indicate_wx_scan_complete_event(padapter);
+ return _SUCCESS;
+ }
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num, NULL, 0);
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ }
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11_auth_mode authmode)
+{
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ int res;
+ u8 ret;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_802_11_auth.mode(): mode =%x\n", authmode));
+
+ psecuritypriv->ndisauthtype = authmode;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_authentication_mode:psecuritypriv->ndisauthtype=%d",
+ psecuritypriv->ndisauthtype));
+
+ if (psecuritypriv->ndisauthtype > 3)
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ res = rtw_set_auth(padapter, psecuritypriv);
+
+ if (res == _SUCCESS)
+ ret = true;
+ else
+ ret = false;
+
+_func_exit_;
+
+ return ret;
+}
+
+u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
+{
+ int keyid, res;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ keyid = wep->KeyIndex & 0x3fffffff;
+
+ if (keyid >= 4) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("MgntActrtw_set_802_11_add_wep:keyid>4 =>fail\n"));
+ ret = false;
+ goto exit;
+ }
+
+ switch (wep->KeyLength) {
+ case 5:
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 5\n"));
+ break;
+ case 13:
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 13\n"));
+ break;
+ default:
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength!= 5 or 13\n"));
+ break;
+ }
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_wep:befor memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n",
+ wep->KeyLength, wep->KeyIndex, keyid));
+
+ memcpy(&(psecuritypriv->dot11DefKey[keyid].skey[0]), &(wep->KeyMaterial), wep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[keyid] = wep->KeyLength;
+
+ psecuritypriv->dot11PrivacyKeyIndex = keyid;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_wep:security key material : %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
+ psecuritypriv->dot11DefKey[keyid].skey[0],
+ psecuritypriv->dot11DefKey[keyid].skey[1],
+ psecuritypriv->dot11DefKey[keyid].skey[2],
+ psecuritypriv->dot11DefKey[keyid].skey[3],
+ psecuritypriv->dot11DefKey[keyid].skey[4],
+ psecuritypriv->dot11DefKey[keyid].skey[5],
+ psecuritypriv->dot11DefKey[keyid].skey[6],
+ psecuritypriv->dot11DefKey[keyid].skey[7],
+ psecuritypriv->dot11DefKey[keyid].skey[8],
+ psecuritypriv->dot11DefKey[keyid].skey[9],
+ psecuritypriv->dot11DefKey[keyid].skey[10],
+ psecuritypriv->dot11DefKey[keyid].skey[11],
+ psecuritypriv->dot11DefKey[keyid].skey[12]));
+
+ res = rtw_set_key(padapter, psecuritypriv, keyid, 1);
+
+ if (res == _FAIL)
+ ret = false;
+exit:
+_func_exit_;
+ return ret;
+}
+
+u8 rtw_set_802_11_remove_wep(struct adapter *padapter, u32 keyindex)
+{
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+ if (keyindex >= 0x80000000 || padapter == NULL) {
+ ret = false;
+ goto exit;
+ } else {
+ int res;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ if (keyindex < 4) {
+ _rtw_memset(&psecuritypriv->dot11DefKey[keyindex], 0, 16);
+ res = rtw_set_key(padapter, psecuritypriv, keyindex, 0);
+ psecuritypriv->dot11DefKeylen[keyindex] = 0;
+ if (res == _FAIL)
+ ret = _FAIL;
+ } else {
+ ret = _FAIL;
+ }
+ }
+exit:
+
+_func_exit_;
+ return ret;
+}
+
+u8 rtw_set_802_11_add_key(struct adapter *padapter, struct ndis_802_11_key *key)
+{
+ uint encryptionalgo;
+ u8 *pbssid;
+ struct sta_info *stainfo;
+ u8 bgroup = false;
+ u8 bgrouptkey = false;/* can be removed later */
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ if (((key->KeyIndex & 0x80000000) == 0) && ((key->KeyIndex & 0x40000000) > 0)) {
+ /* It is invalid to clear bit 31 and set bit 30. If the miniport driver encounters this combination, */
+ /* it must fail the request and return NDIS_STATUS_INVALID_DATA. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_key: ((key->KeyIndex & 0x80000000)==0)[=%d]",
+ (int)(key->KeyIndex & 0x80000000) == 0));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_key:((key->KeyIndex & 0x40000000)>0)[=%d]",
+ (int)(key->KeyIndex & 0x40000000) > 0));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_key: key->KeyIndex=%d\n",
+ (int)key->KeyIndex));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (key->KeyIndex & 0x40000000) {
+ /* Pairwise key */
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ Pairwise key +++++\n"));
+
+ pbssid = get_bssid(&padapter->mlmepriv);
+ stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid);
+
+ if ((stainfo != NULL) && (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("OID_802_11_ADD_KEY:(stainfo!=NULL)&&(Adapter->securitypriv.dot11AuthAlgrthm==dot11AuthAlgrthm_8021X)\n"));
+ encryptionalgo = stainfo->dot118021XPrivacy;
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: stainfo == NULL)||(Adapter->securitypriv.dot11AuthAlgrthm!= dot11AuthAlgrthm_8021X)\n"));
+ encryptionalgo = padapter->securitypriv.dot11PrivacyAlgrthm;
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_add_key: (encryptionalgo==%d)!\n",
+ encryptionalgo));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_add_key: (Adapter->securitypriv.dot11PrivacyAlgrthm==%d)!\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_add_key: (Adapter->securitypriv.dot11AuthAlgrthm==%d)!\n",
+ padapter->securitypriv.dot11AuthAlgrthm));
+
+ if ((stainfo != NULL))
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_add_key: (stainfo->dot118021XPrivacy==%d)!\n",
+ stainfo->dot118021XPrivacy));
+
+ if (key->KeyIndex & 0x000000FF) {
+ /* The key index is specified in the lower 8 bits by values of zero to 255. */
+ /* The key index should be set to zero for a Pairwise key, and the driver should fail with */
+ /* NDIS_STATUS_INVALID_DATA if the lower 8 bits is not zero */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, (" key->KeyIndex & 0x000000FF.\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* check BSSID */
+ if (IS_MAC_ADDRESS_BROADCAST(key->BSSID) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("MacAddr_isBcst(key->BSSID)\n"));
+ ret = false;
+ goto exit;
+ }
+
+ /* Check key length for TKIP. */
+ if ((encryptionalgo == _TKIP_) && (key->KeyLength != 32)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("TKIP KeyLength:0x%x != 32\n", key->KeyLength));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Check key length for AES. */
+ if ((encryptionalgo == _AES_) && (key->KeyLength != 16)) {
+ /* For our supplicant, EAPPkt9x.vxd, cannot differentiate TKIP and AES case. */
+ if (key->KeyLength == 32) {
+ key->KeyLength = 16;
+ } else {
+ ret = _FAIL;
+ goto exit;
+ }
+ }
+
+ /* Check key length for WEP. For NDTEST, 2005.01.27, by rcnjko. */
+ if ((encryptionalgo == _WEP40_ || encryptionalgo == _WEP104_) &&
+ (key->KeyLength != 5 && key->KeyLength != 13)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("WEP KeyLength:0x%x != 5 or 13\n", key->KeyLength));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ bgroup = false;
+
+ /* Check the pairwise key. Added by Annie, 2005-07-06. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("[Pairwise Key set]\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key index: 0x%8x(0x%8x)\n", key->KeyIndex, (key->KeyIndex&0x3)));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key Length: %d\n", key->KeyLength));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+
+ } else {
+ /* Group key - KeyIndex(BIT30 == 0) */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ Group key +++++\n"));
+
+
+ /* when add wep key through add key and didn't assigned encryption type before */
+ if ((padapter->securitypriv.ndisauthtype <= 3) &&
+ (padapter->securitypriv.dot118021XGrpPrivacy == 0)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("keylen =%d(Adapter->securitypriv.dot11PrivacyAlgrthm=%x )padapter->securitypriv.dot118021XGrpPrivacy(%x)\n",
+ key->KeyLength, padapter->securitypriv.dot11PrivacyAlgrthm,
+ padapter->securitypriv.dot118021XGrpPrivacy));
+ switch (key->KeyLength) {
+ case 5:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Adapter->securitypriv.dot11PrivacyAlgrthm=%x key->KeyLength=%u\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm, key->KeyLength));
+ break;
+ case 13:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Adapter->securitypriv.dot11PrivacyAlgrthm=%x key->KeyLength=%u\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm, key->KeyLength));
+ break;
+ default:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Adapter->securitypriv.dot11PrivacyAlgrthm=%x key->KeyLength=%u\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm, key->KeyLength));
+ break;
+ }
+
+ encryptionalgo = padapter->securitypriv.dot11PrivacyAlgrthm;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ (" Adapter->securitypriv.dot11PrivacyAlgrthm=%x\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm));
+
+ } else {
+ encryptionalgo = padapter->securitypriv.dot118021XGrpPrivacy;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("(Adapter->securitypriv.dot11PrivacyAlgrthm=%x)encryptionalgo(%x)=padapter->securitypriv.dot118021XGrpPrivacy(%x)keylen=%d\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm, encryptionalgo,
+ padapter->securitypriv.dot118021XGrpPrivacy, key->KeyLength));
+ }
+
+ if ((check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE) == true) && (IS_MAC_ADDRESS_BROADCAST(key->BSSID) == false)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ (" IBSS but BSSID is not Broadcast Address.\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Check key length for TKIP */
+ if ((encryptionalgo == _TKIP_) && (key->KeyLength != 32)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ (" TKIP GTK KeyLength:%u != 32\n", key->KeyLength));
+ ret = _FAIL;
+ goto exit;
+ } else if (encryptionalgo == _AES_ && (key->KeyLength != 16 && key->KeyLength != 32)) {
+ /* Check key length for AES */
+ /* For NDTEST, we allow keylen = 32 in this case. 2005.01.27, by rcnjko. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("<=== SetInfo, OID_802_11_ADD_KEY: AES GTK KeyLength:%u != 16 or 32\n",
+ key->KeyLength));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Change the key length for EAPPkt9x.vxd. Added by Annie, 2005-11-03. */
+ if ((encryptionalgo == _AES_) && (key->KeyLength == 32)) {
+ key->KeyLength = 16;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("AES key length changed: %u\n", key->KeyLength));
+ }
+
+ if (key->KeyIndex & 0x8000000) {/* error ??? 0x8000_0000 */
+ bgrouptkey = true;
+ }
+
+ if ((check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE)) &&
+ (check_fwstate(&padapter->mlmepriv, _FW_LINKED)))
+ bgrouptkey = true;
+ bgroup = true;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("[Group Key set]\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n")) ;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key index: 0x%8x(0x%8x)\n", key->KeyIndex, (key->KeyIndex&0x3)));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key Length: %d\n", key->KeyLength)) ;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+ }
+
+ /* If WEP encryption algorithm, just call rtw_set_802_11_add_wep(). */
+ if ((padapter->securitypriv.dot11AuthAlgrthm != dot11AuthAlgrthm_8021X) &&
+ (encryptionalgo == _WEP40_ || encryptionalgo == _WEP104_)) {
+ u32 keyindex;
+ u32 len = FIELD_OFFSET(struct ndis_802_11_key, KeyMaterial) + key->KeyLength;
+ struct ndis_802_11_wep *wep = &padapter->securitypriv.ndiswep;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ WEP key +++++\n"));
+
+ wep->Length = len;
+ keyindex = key->KeyIndex&0x7fffffff;
+ wep->KeyIndex = keyindex ;
+ wep->KeyLength = key->KeyLength;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY:Before memcpy\n"));
+
+ memcpy(wep->KeyMaterial, key->KeyMaterial, key->KeyLength);
+ memcpy(&(padapter->securitypriv.dot11DefKey[keyindex].skey[0]), key->KeyMaterial, key->KeyLength);
+
+ padapter->securitypriv.dot11DefKeylen[keyindex] = key->KeyLength;
+ padapter->securitypriv.dot11PrivacyKeyIndex = keyindex;
+
+ ret = rtw_set_802_11_add_wep(padapter, wep);
+ goto exit;
+ }
+ if (key->KeyIndex & 0x20000000) {
+ /* SetRSC */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ SetRSC+++++\n"));
+ if (bgroup) {
+ unsigned long long keysrc = key->KeyRSC & 0x00FFFFFFFFFFFFULL;
+ memcpy(&padapter->securitypriv.dot11Grprxpn, &keysrc, 8);
+ } else {
+ unsigned long long keysrc = key->KeyRSC & 0x00FFFFFFFFFFFFULL;
+ memcpy(&padapter->securitypriv.dot11Grptxpn, &keysrc, 8);
+ }
+ }
+
+ /* Indicate this key idx is used for TX */
+ /* Save the key in KeyMaterial */
+ if (bgroup) { /* Group transmit key */
+ int res;
+
+ if (bgrouptkey)
+ padapter->securitypriv.dot118021XGrpKeyid = (u8)key->KeyIndex;
+ if ((key->KeyIndex&0x3) == 0) {
+ ret = _FAIL;
+ goto exit;
+ }
+ _rtw_memset(&padapter->securitypriv.dot118021XGrpKey[(u8)((key->KeyIndex) & 0x03)], 0, 16);
+ _rtw_memset(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], 0, 16);
+ _rtw_memset(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], 0, 16);
+
+ if ((key->KeyIndex & 0x10000000)) {
+ memcpy(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 16, 8);
+ memcpy(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 24, 8);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("\n rtw_set_802_11_add_key:rx mic :0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[0],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[1],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[2],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[3],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[4],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[5],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[6],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[7]));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:set Group mic key!!!!!!!!\n"));
+ } else {
+ memcpy(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 24, 8);
+ memcpy(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 16, 8);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("\n rtw_set_802_11_add_key:rx mic :0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[0],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[1],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[2],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[3],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[4],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[5],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[6],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[7]));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("\n rtw_set_802_11_add_key:set Group mic key!!!!!!!!\n"));
+ }
+
+ /* set group key by index */
+ memcpy(&padapter->securitypriv.dot118021XGrpKey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial, key->KeyLength);
+
+ key->KeyIndex = key->KeyIndex & 0x03;
+
+ padapter->securitypriv.binstallGrpkey = true;
+
+ padapter->securitypriv.bcheck_grpkey = false;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("reset group key"));
+
+ res = rtw_set_key(padapter, &padapter->securitypriv, key->KeyIndex, 1);
+
+ if (res == _FAIL)
+ ret = _FAIL;
+
+ goto exit;
+
+ } else { /* Pairwise Key */
+ u8 res;
+
+ pbssid = get_bssid(&padapter->mlmepriv);
+ stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid);
+
+ if (stainfo != NULL) {
+ _rtw_memset(&stainfo->dot118021x_UncstKey, 0, 16);/* clear keybuffer */
+
+ memcpy(&stainfo->dot118021x_UncstKey, key->KeyMaterial, 16);
+
+ if (encryptionalgo == _TKIP_) {
+ padapter->securitypriv.busetkipkey = false;
+
+ /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n========== _set_timer\n"));
+
+ /* if TKIP, save the Receive/Transmit MIC key in KeyMaterial[128-255] */
+ if ((key->KeyIndex & 0x10000000)) {
+ memcpy(&stainfo->dot11tkiptxmickey, key->KeyMaterial + 16, 8);
+ memcpy(&stainfo->dot11tkiprxmickey, key->KeyMaterial + 24, 8);
+
+ } else {
+ memcpy(&stainfo->dot11tkiptxmickey, key->KeyMaterial + 24, 8);
+ memcpy(&stainfo->dot11tkiprxmickey, key->KeyMaterial + 16, 8);
+ }
+ }
+
+
+ /* Set key to CAM through H2C command */
+ if (bgrouptkey) { /* never go to here */
+ res = rtw_setstakey_cmd(padapter, (unsigned char *)stainfo, false);
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:rtw_setstakey_cmd(group)\n"));
+ } else {
+ res = rtw_setstakey_cmd(padapter, (unsigned char *)stainfo, true);
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:rtw_setstakey_cmd(unicast)\n"));
+ }
+ if (!res)
+ ret = _FAIL;
+ }
+ }
+exit:
+
+_func_exit_;
+ return ret;
+}
+
+u8 rtw_set_802_11_remove_key(struct adapter *padapter, struct ndis_802_11_remove_key *key)
+{
+ u8 *pbssid;
+ struct sta_info *stainfo;
+ u8 bgroup = (key->KeyIndex & 0x4000000) > 0 ? false : true;
+ u8 keyIndex = (u8)key->KeyIndex & 0x03;
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ if ((key->KeyIndex & 0xbffffffc) > 0) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (bgroup) {
+ /* clear group key by index */
+
+ _rtw_memset(&padapter->securitypriv.dot118021XGrpKey[keyIndex], 0, 16);
+
+ /* \todo Send a H2C Command to Firmware for removing this Key in CAM Entry. */
+ } else {
+ pbssid = get_bssid(&padapter->mlmepriv);
+ stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid);
+ if (stainfo) {
+ /* clear key by BSSID */
+ _rtw_memset(&stainfo->dot118021x_UncstKey, 0, 16);
+
+ /* \todo Send a H2C Command to Firmware for disable this Key in CAM Entry. */
+ } else {
+ ret = _FAIL;
+ goto exit;
+ }
+ }
+exit:
+
+_func_exit_;
+ return ret;
+}
+
+/*
+* rtw_get_cur_max_rate -
+* @adapter: pointer to struct adapter structure
+*
+* Return 0 or 100Kbps
+*/
+u16 rtw_get_cur_max_rate(struct adapter *adapter)
+{
+ int i = 0;
+ u8 *p;
+ u16 rate = 0, max_rate = 0;
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ struct rtw_ieee80211_ht_cap *pht_capie;
+ u8 rf_type = 0;
+ u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0;
+ u16 mcs_rate = 0;
+ u32 ht_ielen = 0;
+
+ if (adapter->registrypriv.mp_mode == 1) {
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
+ return 0;
+ }
+
+ if ((!check_fwstate(pmlmepriv, _FW_LINKED)) &&
+ (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
+ return 0;
+
+ if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N|WIRELESS_11_5N)) {
+ p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
+ if (p && ht_ielen > 0) {
+ pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
+
+ memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
+
+ /* cur_bwmod is updated by beacon, pmlmeinfo is updated by association response */
+ bw_40MHz = (pmlmeext->cur_bwmode && (HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH & pmlmeinfo->HT_info.infos[0])) ? 1 : 0;
+
+ short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
+ short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
+
+ rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ max_rate = rtw_mcs_rate(
+ rf_type,
+ bw_40MHz & (pregistrypriv->cbw40_enable),
+ short_GI_20,
+ short_GI_40,
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate
+ );
+ }
+ } else {
+ while ((pcur_bss->SupportedRates[i] != 0) && (pcur_bss->SupportedRates[i] != 0xFF)) {
+ rate = pcur_bss->SupportedRates[i]&0x7F;
+ if (rate > max_rate)
+ max_rate = rate;
+ i++;
+ }
+
+ max_rate = max_rate*10/2;
+ }
+
+ return max_rate;
+}
+
+/*
+* rtw_set_scan_mode -
+* @adapter: pointer to struct adapter structure
+* @scan_mode:
+*
+* Return _SUCCESS or _FAIL
+*/
+int rtw_set_scan_mode(struct adapter *adapter, enum rt_scan_type scan_mode)
+{
+ if (scan_mode != SCAN_ACTIVE && scan_mode != SCAN_PASSIVE)
+ return _FAIL;
+
+ adapter->mlmepriv.scan_mode = scan_mode;
+
+ return _SUCCESS;
+}
+
+/*
+* rtw_set_channel_plan -
+* @adapter: pointer to struct adapter structure
+* @channel_plan:
+*
+* Return _SUCCESS or _FAIL
+*/
+int rtw_set_channel_plan(struct adapter *adapter, u8 channel_plan)
+{
+ /* handle by cmd_thread to sync with scan operation */
+ return rtw_set_chplan_cmd(adapter, channel_plan, 1);
+}
+
+/*
+* rtw_set_country -
+* @adapter: pointer to struct adapter structure
+* @country_code: string of country code
+*
+* Return _SUCCESS or _FAIL
+*/
+int rtw_set_country(struct adapter *adapter, const char *country_code)
+{
+ int channel_plan = RT_CHANNEL_DOMAIN_WORLD_WIDE_5G;
+
+ DBG_88E("%s country_code:%s\n", __func__, country_code);
+
+ /* TODO: should have a table to match country code and RT_CHANNEL_DOMAIN */
+ /* TODO: should consider 2-character and 3-character country code */
+ if (0 == strcmp(country_code, "US"))
+ channel_plan = RT_CHANNEL_DOMAIN_FCC;
+ else if (0 == strcmp(country_code, "EU"))
+ channel_plan = RT_CHANNEL_DOMAIN_ETSI;
+ else if (0 == strcmp(country_code, "JP"))
+ channel_plan = RT_CHANNEL_DOMAIN_MKK;
+ else if (0 == strcmp(country_code, "CN"))
+ channel_plan = RT_CHANNEL_DOMAIN_CHINA;
+ else
+ DBG_88E("%s unknown country_code:%s\n", __func__, country_code);
+
+ return rtw_set_channel_plan(adapter, channel_plan);
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_iol.c b/drivers/staging/rtl8188eu/core/rtw_iol.c
new file mode 100644
index 0000000..e6fdd32
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_iol.c
@@ -0,0 +1,209 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include<rtw_iol.h>
+
+struct xmit_frame *rtw_IOL_accquire_xmit_frame(struct adapter *adapter)
+{
+ struct xmit_frame *xmit_frame;
+ struct xmit_buf *xmitbuf;
+ struct pkt_attrib *pattrib;
+ struct xmit_priv *pxmitpriv = &(adapter->xmitpriv);
+
+ xmit_frame = rtw_alloc_xmitframe(pxmitpriv);
+ if (xmit_frame == NULL) {
+ DBG_88E("%s rtw_alloc_xmitframe return null\n", __func__);
+ goto exit;
+ }
+
+ xmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (xmitbuf == NULL) {
+ DBG_88E("%s rtw_alloc_xmitbuf return null\n", __func__);
+ rtw_free_xmitframe(pxmitpriv, xmit_frame);
+ xmit_frame = NULL;
+ goto exit;
+ }
+
+ xmit_frame->frame_tag = MGNT_FRAMETAG;
+ xmit_frame->pxmitbuf = xmitbuf;
+ xmit_frame->buf_addr = xmitbuf->pbuf;
+ xmitbuf->priv_data = xmit_frame;
+
+ pattrib = &xmit_frame->attrib;
+ update_mgntframe_attrib(adapter, pattrib);
+ pattrib->qsel = 0x10;/* Beacon */
+ pattrib->subtype = WIFI_BEACON;
+ pattrib->pktlen = 0;
+ pattrib->last_txcmdsz = 0;
+exit:
+ return xmit_frame;
+}
+
+int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds, u32 cmd_len)
+{
+ struct pkt_attrib *pattrib = &xmit_frame->attrib;
+ u16 buf_offset;
+ u32 ori_len;
+
+ buf_offset = TXDESC_OFFSET;
+ ori_len = buf_offset+pattrib->pktlen;
+
+ /* check if the io_buf can accommodate new cmds */
+ if (ori_len + cmd_len + 8 > MAX_XMITBUF_SZ) {
+ DBG_88E("%s %u is large than MAX_XMITBUF_SZ:%u, can't accommodate new cmds\n",
+ __func__ , ori_len + cmd_len + 8, MAX_XMITBUF_SZ);
+ return _FAIL;
+ }
+
+ memcpy(xmit_frame->buf_addr + buf_offset + pattrib->pktlen, IOL_cmds, cmd_len);
+ pattrib->pktlen += cmd_len;
+ pattrib->last_txcmdsz += cmd_len;
+
+ return _SUCCESS;
+}
+
+bool rtw_IOL_applied(struct adapter *adapter)
+{
+ if (1 == adapter->registrypriv.fw_iol)
+ return true;
+
+ if ((2 == adapter->registrypriv.fw_iol) && (!adapter_to_dvobj(adapter)->ishighspeed))
+ return true;
+ return false;
+}
+
+int rtw_IOL_exec_cmds_sync(struct adapter *adapter, struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt)
+{
+ return rtw_hal_iol_cmd(adapter, xmit_frame, max_wating_ms, bndy_cnt);
+}
+
+int rtw_IOL_append_LLT_cmd(struct xmit_frame *xmit_frame, u8 page_boundary)
+{
+ return _SUCCESS;
+}
+
+int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8 mask)
+{
+ struct ioreg_cfg cmd = {8, IOREG_CMD_WB_REG, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16(addr);
+ cmd.data = cpu_to_le32(value);
+
+ if (mask != 0xFF) {
+ cmd.length = 12;
+ cmd.mask = cpu_to_le32(mask);
+ }
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
+}
+
+int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, u16 value, u16 mask)
+{
+ struct ioreg_cfg cmd = {8, IOREG_CMD_WW_REG, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16(addr);
+ cmd.data = cpu_to_le32(value);
+
+ if (mask != 0xFFFF) {
+ cmd.length = 12;
+ cmd.mask = cpu_to_le32(mask);
+ }
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
+}
+
+int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, u32 value, u32 mask)
+{
+ struct ioreg_cfg cmd = {8, IOREG_CMD_WD_REG, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16(addr);
+ cmd.data = cpu_to_le32(value);
+
+ if (mask != 0xFFFFFFFF) {
+ cmd.length = 12;
+ cmd.mask = cpu_to_le32(mask);
+ }
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
+}
+
+int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path, u16 addr, u32 value, u32 mask)
+{
+ struct ioreg_cfg cmd = {8, IOREG_CMD_W_RF, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16((rf_path<<8) | ((addr) & 0xFF));
+ cmd.data = cpu_to_le32(value);
+
+ if (mask != 0x000FFFFF) {
+ cmd.length = 12;
+ cmd.mask = cpu_to_le32(mask);
+ }
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
+}
+
+int rtw_IOL_append_DELAY_US_cmd(struct xmit_frame *xmit_frame, u16 us)
+{
+ struct ioreg_cfg cmd = {4, IOREG_CMD_DELAY_US, 0x0, 0x0, 0x0};
+ cmd.address = cpu_to_le16(us);
+
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, 4);
+}
+
+int rtw_IOL_append_DELAY_MS_cmd(struct xmit_frame *xmit_frame, u16 ms)
+{
+ struct ioreg_cfg cmd = {4, IOREG_CMD_DELAY_US, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16(ms);
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, 4);
+}
+
+int rtw_IOL_append_END_cmd(struct xmit_frame *xmit_frame)
+{
+ struct ioreg_cfg cmd = {4, IOREG_CMD_END, cpu_to_le16(0xFFFF), cpu_to_le32(0xFF), 0x0};
+
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, 4);
+}
+
+u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame)
+{
+ u8 is_cmd_bndy = false;
+ if (((pxmit_frame->attrib.pktlen+32)%256) + 8 >= 256) {
+ rtw_IOL_append_END_cmd(pxmit_frame);
+ pxmit_frame->attrib.pktlen = ((((pxmit_frame->attrib.pktlen+32)/256)+1)*256);
+
+ pxmit_frame->attrib.last_txcmdsz = pxmit_frame->attrib.pktlen;
+ is_cmd_bndy = true;
+ }
+ return is_cmd_bndy;
+}
+
+void rtw_IOL_cmd_buf_dump(struct adapter *Adapter, int buf_len, u8 *pbuf)
+{
+ int i;
+ int j = 1;
+
+ pr_info("###### %s ######\n", __func__);
+ for (i = 0; i < buf_len; i++) {
+ printk("%02x-", *(pbuf+i));
+
+ if (j%32 == 0)
+ printk("\n");
+ j++;
+ }
+ printk("\n");
+ pr_info("=============ioreg_cmd len=%d===============\n", buf_len);
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
new file mode 100644
index 0000000..afac537
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -0,0 +1,1692 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include <drv_types.h>
+#include "rtw_led.h"
+
+/* */
+/* Description: */
+/* Callback function of LED BlinkTimer, */
+/* it just schedules to corresponding BlinkWorkItem/led_blink_hdl */
+/* */
+void BlinkTimerCallback(void *data)
+{
+ struct LED_871x *pLed = (struct LED_871x *)data;
+ struct adapter *padapter = pLed->padapter;
+
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
+ return;
+
+ _set_workitem(&(pLed->BlinkWorkItem));
+}
+
+/* */
+/* Description: */
+/* Callback function of LED BlinkWorkItem. */
+/* We dispatch acture LED blink action according to LedStrategy. */
+/* */
+void BlinkWorkItemCallback(struct work_struct *work)
+{
+ struct LED_871x *pLed = container_of(work, struct LED_871x, BlinkWorkItem);
+ BlinkHandler(pLed);
+}
+
+/* */
+/* Description: */
+/* Reset status of LED_871x object. */
+/* */
+void ResetLedStatus(struct LED_871x *pLed)
+{
+ pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
+ pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
+
+ pLed->bLedBlinkInProgress = false; /* true if it is blinking, false o.w.. */
+ pLed->bLedWPSBlinkInProgress = false;
+
+ pLed->BlinkTimes = 0; /* Number of times to toggle led state for blinking. */
+ pLed->BlinkingLedState = LED_UNKNOWN; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
+
+ pLed->bLedNoLinkBlinkInProgress = false;
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedStartToLinkBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = false;
+}
+
+/*Description: */
+/* Initialize an LED_871x object. */
+void InitLed871x(struct adapter *padapter, struct LED_871x *pLed, enum LED_PIN_871x LedPin)
+{
+ pLed->padapter = padapter;
+ pLed->LedPin = LedPin;
+
+ ResetLedStatus(pLed);
+
+ _init_timer(&(pLed->BlinkTimer), padapter->pnetdev, BlinkTimerCallback, pLed);
+
+ _init_workitem(&(pLed->BlinkWorkItem), BlinkWorkItemCallback, pLed);
+}
+
+
+/* */
+/* Description: */
+/* DeInitialize an LED_871x object. */
+/* */
+void DeInitLed871x(struct LED_871x *pLed)
+{
+ _cancel_workitem_sync(&(pLed->BlinkWorkItem));
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ ResetLedStatus(pLed);
+}
+
+/* */
+/* Description: */
+/* Implementation of LED blinking behavior. */
+/* It toggle off LED and schedule corresponding timer if necessary. */
+/* */
+
+static void SwLedBlink(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ /* Determine if we shall change LED state again. */
+ pLed->BlinkTimes--;
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_NORMAL:
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ break;
+ case LED_BLINK_StartToBlink:
+ if (check_fwstate(pmlmepriv, _FW_LINKED) && check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ bStopBlinking = true;
+ if (check_fwstate(pmlmepriv, _FW_LINKED) &&
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
+ bStopBlinking = true;
+ else if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ break;
+ default:
+ bStopBlinking = true;
+ break;
+ }
+
+ if (bStopBlinking) {
+ /* if (padapter->pwrctrlpriv.cpwm >= PS_STATE_S2) */
+ if (0) {
+ SwLedOff(padapter, pLed);
+ } else if ((check_fwstate(pmlmepriv, _FW_LINKED)) && (!pLed->bLedOn)) {
+ SwLedOn(padapter, pLed);
+ } else if ((check_fwstate(pmlmepriv, _FW_LINKED)) && pLed->bLedOn) {
+ SwLedOff(padapter, pLed);
+ }
+ pLed->BlinkTimes = 0;
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ /* Assign LED state to toggle. */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ /* Schedule a timer to toggle LED state. */
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_NORMAL:
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ break;
+ case LED_BLINK_SLOWLY:
+ case LED_BLINK_StartToBlink:
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LONG_INTERVAL);
+ else
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LONG_INTERVAL);
+ break;
+ default:
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ break;
+ }
+ }
+}
+
+static void SwLedBlink1(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ ResetLedStatus(pLed);
+ return;
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SLOWLY:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_NORMAL:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->BlinkTimes = 0;
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_WPS_STOP: /* WPS success */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ bStopBlinking = false;
+ else
+ bStopBlinking = true;
+
+ if (bStopBlinking) {
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+
+ pLed->bLedWPSBlinkInProgress = false;
+ } else {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void SwLedBlink2(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("stop scan blink CurrLedState %d\n", pLed->CurrLedState));
+
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("stop scan blink CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("stop CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("stop CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void SwLedBlink3(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ if (pLed->CurrLedState != LED_BLINK_WPS_STOP)
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedOn)
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+
+ if (pLed->bLedOn)
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_WPS_STOP: /* WPS success */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ bStopBlinking = false;
+ } else {
+ bStopBlinking = true;
+ }
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void SwLedBlink4(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct LED_871x *pLed1 = &(ledpriv->SwLed1);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ if (!pLed1->bLedWPSBlinkInProgress && pLed1->BlinkingLedState == LED_UNKNOWN) {
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+ SwLedOff(padapter, pLed1);
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SLOWLY:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_StartToBlink:
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ break;
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = false;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->bLedNoLinkBlinkInProgress = false;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ }
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ break;
+ case LED_BLINK_WPS_STOP: /* WPS authentication fail */
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ break;
+ case LED_BLINK_WPS_STOP_OVERLAP: /* WPS session overlap */
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0) {
+ if (pLed->bLedOn)
+ pLed->BlinkTimes = 1;
+ else
+ bStopBlinking = true;
+ }
+
+ if (bStopBlinking) {
+ pLed->BlinkTimes = 10;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ break;
+ default:
+ break;
+ }
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink4 CurrLedState %d\n", pLed->CurrLedState));
+}
+
+static void SwLedBlink5(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedOn)
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedOn)
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink5 CurrLedState %d\n", pLed->CurrLedState));
+}
+
+static void SwLedBlink6(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("<==== blink6\n"));
+}
+
+ /* ALPHA, added by chiyoko, 20090106 */
+static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ switch (LedAction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if (!pLed->bLedNoLinkBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_LINK:
+ if (!pLed->bLedLinkBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_SITE_SURVEY:
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ ;
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if (!pLed->bLedBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (!pLed->bLedWPSBlinkInProgress) {
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_STOP_WPS:
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress)
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ else
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS_STOP;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_STOP_WPS_FAIL:
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ SwLedOff(padapter, pLed);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
+}
+
+ /* Arcadyan/Sitecom , added by chiyoko, 20090216 */
+static void SwLedControlMode2(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+
+ switch (LedAction) {
+ case LED_CTL_SITE_SURVEY:
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if ((!pLed->bLedBlinkInProgress) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_LINK:
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (!pLed->bLedWPSBlinkInProgress) {
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_STOP_WPS:
+ pLed->bLedWPSBlinkInProgress = false;
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ break;
+ case LED_CTL_STOP_WPS_FAIL:
+ pLed->bLedWPSBlinkInProgress = false;
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ break;
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if (!IS_LED_BLINKING(pLed)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+}
+
+ /* COREGA, added by chiyoko, 20090316 */
+ static void SwLedControlMode3(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+
+ switch (LedAction) {
+ case LED_CTL_SITE_SURVEY:
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if ((!pLed->bLedBlinkInProgress) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_LINK:
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (!pLed->bLedWPSBlinkInProgress) {
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_STOP_WPS:
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ } else {
+ pLed->bLedWPSBlinkInProgress = true;
+ }
+
+ pLed->CurrLedState = LED_BLINK_WPS_STOP;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_STOP_WPS_FAIL:
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if (!IS_LED_BLINKING(pLed)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ default:
+ break;
+ }
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("CurrLedState %d\n", pLed->CurrLedState));
+}
+
+ /* Edimax-Belkin, added by chiyoko, 20090413 */
+static void SwLedControlMode4(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+ struct LED_871x *pLed1 = &(ledpriv->SwLed1);
+
+ switch (LedAction) {
+ case LED_CTL_START_TO_LINK:
+ if (pLed1->bLedWPSBlinkInProgress) {
+ pLed1->bLedWPSBlinkInProgress = false;
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+
+ if (pLed1->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+
+ if (!pLed->bLedStartToLinkBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+
+ pLed->bLedStartToLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_StartToBlink;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ }
+ break;
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ /* LED1 settings */
+ if (LedAction == LED_CTL_LINK) {
+ if (pLed1->bLedWPSBlinkInProgress) {
+ pLed1->bLedWPSBlinkInProgress = false;
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+
+ if (pLed1->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ }
+
+ if (!pLed->bLedNoLinkBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_SITE_SURVEY:
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if (!pLed->bLedBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (pLed1->bLedWPSBlinkInProgress) {
+ pLed1->bLedWPSBlinkInProgress = false;
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+
+ if (pLed1->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+
+ if (!pLed->bLedWPSBlinkInProgress) {
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ }
+ break;
+ case LED_CTL_STOP_WPS: /* WPS connect success */
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+
+ break;
+ case LED_CTL_STOP_WPS_FAIL: /* WPS authentication fail */
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+
+ /* LED1 settings */
+ if (pLed1->bLedWPSBlinkInProgress)
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+ else
+ pLed1->bLedWPSBlinkInProgress = true;
+ pLed1->CurrLedState = LED_BLINK_WPS_STOP;
+ if (pLed1->bLedOn)
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed1->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ break;
+ case LED_CTL_STOP_WPS_FAIL_OVERLAP: /* WPS session overlap */
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+
+ /* LED1 settings */
+ if (pLed1->bLedWPSBlinkInProgress)
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+ else
+ pLed1->bLedWPSBlinkInProgress = true;
+ pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
+ pLed1->BlinkTimes = 10;
+ if (pLed1->bLedOn)
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed1->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedStartToLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedStartToLinkBlinkInProgress = false;
+ }
+ if (pLed1->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+ pLed1->bLedWPSBlinkInProgress = false;
+ }
+ pLed1->BlinkingLedState = LED_UNKNOWN;
+ SwLedOff(padapter, pLed);
+ SwLedOff(padapter, pLed1);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
+}
+
+
+
+ /* Sercomm-Belkin, added by chiyoko, 20090415 */
+static void
+SwLedControlMode5(
+ struct adapter *padapter,
+ enum LED_CTL_MODE LedAction
+)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+
+ switch (LedAction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_NO_LINK:
+ case LED_CTL_LINK: /* solid blue */
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ case LED_CTL_SITE_SURVEY:
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if (!pLed->bLedBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN)
+ return;
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ SwLedOff(padapter, pLed);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
+}
+
+ /* WNC-Corega, added by chiyoko, 20090902 */
+static void
+SwLedControlMode6(
+ struct adapter *padapter,
+ enum LED_CTL_MODE LedAction
+)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct LED_871x *pLed0 = &(ledpriv->SwLed0);
+
+ switch (LedAction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ _cancel_timer_ex(&(pLed0->BlinkTimer));
+ pLed0->CurrLedState = RTW_LED_ON;
+ pLed0->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed0->BlinkTimer), 0);
+ break;
+ case LED_CTL_POWER_OFF:
+ SwLedOff(padapter, pLed0);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("ledcontrol 6 Led %d\n", pLed0->CurrLedState));
+}
+
+/* */
+/* Description: */
+/* Handler function of LED Blinking. */
+/* We dispatch acture LED blink action according to LedStrategy. */
+/* */
+void BlinkHandler(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
+ return;
+
+ switch (ledpriv->LedStrategy) {
+ case SW_LED_MODE0:
+ SwLedBlink(pLed);
+ break;
+ case SW_LED_MODE1:
+ SwLedBlink1(pLed);
+ break;
+ case SW_LED_MODE2:
+ SwLedBlink2(pLed);
+ break;
+ case SW_LED_MODE3:
+ SwLedBlink3(pLed);
+ break;
+ case SW_LED_MODE4:
+ SwLedBlink4(pLed);
+ break;
+ case SW_LED_MODE5:
+ SwLedBlink5(pLed);
+ break;
+ case SW_LED_MODE6:
+ SwLedBlink6(pLed);
+ break;
+ default:
+ break;
+ }
+}
+
+void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
+ (!padapter->hw_init_completed))
+ return;
+
+ if (!ledpriv->bRegUseLed)
+ return;
+
+ if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
+ padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
+ (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
+ LedAction == LED_CTL_SITE_SURVEY ||
+ LedAction == LED_CTL_LINK ||
+ LedAction == LED_CTL_NO_LINK ||
+ LedAction == LED_CTL_POWER_ON))
+ return;
+
+ switch (ledpriv->LedStrategy) {
+ case SW_LED_MODE0:
+ break;
+ case SW_LED_MODE1:
+ SwLedControlMode1(padapter, LedAction);
+ break;
+ case SW_LED_MODE2:
+ SwLedControlMode2(padapter, LedAction);
+ break;
+ case SW_LED_MODE3:
+ SwLedControlMode3(padapter, LedAction);
+ break;
+ case SW_LED_MODE4:
+ SwLedControlMode4(padapter, LedAction);
+ break;
+ case SW_LED_MODE5:
+ SwLedControlMode5(padapter, LedAction);
+ break;
+ case SW_LED_MODE6:
+ SwLedControlMode6(padapter, LedAction);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("LedStrategy:%d, LedAction %d\n",
+ ledpriv->LedStrategy, LedAction));
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
new file mode 100644
index 0000000..ea66071
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -0,0 +1,2442 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_MLME_C_
+
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+#include <hal_intf.h>
+#include <mlme_osdep.h>
+#include <sta_info.h>
+#include <wifi.h>
+#include <wlan_bssdef.h>
+#include <rtw_ioctl_set.h>
+#include <usb_osintf.h>
+
+extern unsigned char MCS_rate_2R[16];
+extern unsigned char MCS_rate_1R[16];
+
+int _rtw_init_mlme_priv (struct adapter *padapter)
+{
+ int i;
+ u8 *pbuf;
+ struct wlan_network *pnetwork;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
+
+ pmlmepriv->nic_hdl = (u8 *)padapter;
+
+ pmlmepriv->pscanned = NULL;
+ pmlmepriv->fw_state = 0;
+ pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
+ pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
+
+ _rtw_spinlock_init(&(pmlmepriv->lock));
+ _rtw_init_queue(&(pmlmepriv->free_bss_pool));
+ _rtw_init_queue(&(pmlmepriv->scanned_queue));
+
+ set_scanned_network_val(pmlmepriv, 0);
+
+ _rtw_memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
+
+ pbuf = rtw_zvmalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
+
+ if (pbuf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pmlmepriv->free_bss_buf = pbuf;
+
+ pnetwork = (struct wlan_network *)pbuf;
+
+ for (i = 0; i < MAX_BSS_CNT; i++) {
+ _rtw_init_listhead(&(pnetwork->list));
+
+ rtw_list_insert_tail(&(pnetwork->list), &(pmlmepriv->free_bss_pool.queue));
+
+ pnetwork++;
+ }
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+ rtw_clear_scan_deny(padapter);
+
+ rtw_init_mlme_timer(padapter);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void rtw_mfree_mlme_priv_lock (struct mlme_priv *pmlmepriv)
+{
+ _rtw_spinlock_free(&pmlmepriv->lock);
+ _rtw_spinlock_free(&(pmlmepriv->free_bss_pool.lock));
+ _rtw_spinlock_free(&(pmlmepriv->scanned_queue.lock));
+}
+
+#if defined (CONFIG_88EU_AP_MODE)
+static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
+{
+ kfree(*ppie);
+ *plen = 0;
+ *ppie = NULL;
+}
+
+void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
+{
+ rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
+ rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_beacon_ie, &pmlmepriv->wps_beacon_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_assoc_resp_ie, &pmlmepriv->wps_assoc_resp_ie_len);
+
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_beacon_ie, &pmlmepriv->p2p_beacon_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_req_ie, &pmlmepriv->p2p_probe_req_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_resp_ie, &pmlmepriv->p2p_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_go_probe_resp_ie, &pmlmepriv->p2p_go_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
+}
+#else
+void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
+{
+}
+#endif
+
+void _rtw_free_mlme_priv (struct mlme_priv *pmlmepriv)
+{
+_func_enter_;
+
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
+
+ if (pmlmepriv) {
+ rtw_mfree_mlme_priv_lock (pmlmepriv);
+
+ if (pmlmepriv->free_bss_buf) {
+ rtw_vmfree(pmlmepriv->free_bss_buf, MAX_BSS_CNT * sizeof(struct wlan_network));
+ }
+ }
+_func_exit_;
+}
+
+int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork)
+{
+ unsigned long irql;
+
+_func_enter_;
+
+ if (pnetwork == NULL)
+ goto exit;
+
+ _enter_critical_bh(&queue->lock, &irql);
+
+ rtw_list_insert_tail(&pnetwork->list, &queue->queue);
+
+ _exit_critical_bh(&queue->lock, &irql);
+
+exit:
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+struct wlan_network *_rtw_dequeue_network(struct __queue *queue)
+{
+ unsigned long irql;
+
+ struct wlan_network *pnetwork;
+
+_func_enter_;
+
+ _enter_critical_bh(&queue->lock, &irql);
+
+ if (_rtw_queue_empty(queue)) {
+ pnetwork = NULL;
+ } else {
+ pnetwork = LIST_CONTAINOR(get_next(&queue->queue), struct wlan_network, list);
+
+ rtw_list_delete(&(pnetwork->list));
+ }
+
+ _exit_critical_bh(&queue->lock, &irql);
+
+_func_exit_;
+
+ return pnetwork;
+}
+
+struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
+{
+ unsigned long irql;
+ struct wlan_network *pnetwork;
+ struct __queue *free_queue = &pmlmepriv->free_bss_pool;
+ struct list_head *plist = NULL;
+
+_func_enter_;
+
+ _enter_critical_bh(&free_queue->lock, &irql);
+
+ if (_rtw_queue_empty(free_queue) == true) {
+ pnetwork = NULL;
+ goto exit;
+ }
+ plist = get_next(&(free_queue->queue));
+
+ pnetwork = LIST_CONTAINOR(plist , struct wlan_network, list);
+
+ rtw_list_delete(&pnetwork->list);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr=%p\n", plist));
+ pnetwork->network_type = 0;
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+
+ pmlmepriv->num_of_scanned++;
+
+exit:
+ _exit_critical_bh(&free_queue->lock, &irql);
+
+_func_exit_;
+
+ return pnetwork;
+}
+
+void _rtw_free_network(struct mlme_priv *pmlmepriv , struct wlan_network *pnetwork, u8 isfreeall)
+{
+ u32 curr_time, delta_time;
+ u32 lifetime = SCANQUEUE_LIFETIME;
+ unsigned long irql;
+ struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
+
+_func_enter_;
+
+ if (pnetwork == NULL)
+ goto exit;
+
+ if (pnetwork->fixed)
+ goto exit;
+ curr_time = rtw_get_current_time();
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
+ lifetime = 1;
+ if (!isfreeall) {
+ delta_time = (curr_time - pnetwork->last_scanned)/HZ;
+ if (delta_time < lifetime)/* unit:sec */
+ goto exit;
+ }
+ _enter_critical_bh(&free_queue->lock, &irql);
+ rtw_list_delete(&(pnetwork->list));
+ rtw_list_insert_tail(&(pnetwork->list), &(free_queue->queue));
+ pmlmepriv->num_of_scanned--;
+ _exit_critical_bh(&free_queue->lock, &irql);
+
+exit:
+_func_exit_;
+}
+
+void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork)
+{
+ struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
+
+_func_enter_;
+ if (pnetwork == NULL)
+ goto exit;
+ if (pnetwork->fixed)
+ goto exit;
+ rtw_list_delete(&(pnetwork->list));
+ rtw_list_insert_tail(&(pnetwork->list), get_list_head(free_queue));
+ pmlmepriv->num_of_scanned--;
+exit:
+
+_func_exit_;
+}
+
+/*
+ return the wlan_network with the matching addr
+
+ Shall be calle under atomic context... to avoid possible racing condition...
+*/
+struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
+{
+ struct list_head *phead, *plist;
+ struct wlan_network *pnetwork = NULL;
+ u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+_func_enter_;
+ if (_rtw_memcmp(zero_addr, addr, ETH_ALEN)) {
+ pnetwork = NULL;
+ goto exit;
+ }
+ phead = get_list_head(scanned_queue);
+ plist = get_next(phead);
+
+ while (plist != phead) {
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network , list);
+ if (_rtw_memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN) == true)
+ break;
+ plist = get_next(plist);
+ }
+ if (plist == phead)
+ pnetwork = NULL;
+exit:
+_func_exit_;
+ return pnetwork;
+}
+
+
+void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
+{
+ unsigned long irql;
+ struct list_head *phead, *plist;
+ struct wlan_network *pnetwork;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct __queue *scanned_queue = &pmlmepriv->scanned_queue;
+
+_func_enter_;
+
+
+ _enter_critical_bh(&scanned_queue->lock, &irql);
+
+ phead = get_list_head(scanned_queue);
+ plist = get_next(phead);
+
+ while (rtw_end_of_queue_search(phead, plist) == false) {
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ plist = get_next(plist);
+
+ _rtw_free_network(pmlmepriv, pnetwork, isfreeall);
+ }
+ _exit_critical_bh(&scanned_queue->lock, &irql);
+_func_exit_;
+}
+
+int rtw_if_up(struct adapter *padapter)
+{
+ int res;
+_func_enter_;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
+ (check_fwstate(&padapter->mlmepriv, _FW_LINKED) == false)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_if_up:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ res = false;
+ } else {
+ res = true;
+ }
+
+_func_exit_;
+ return res;
+}
+
+void rtw_generate_random_ibss(u8 *pibss)
+{
+ u32 curtime = rtw_get_current_time();
+
+_func_enter_;
+ pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
+ pibss[1] = 0x11;
+ pibss[2] = 0x87;
+ pibss[3] = (u8)(curtime & 0xff);/* p[0]; */
+ pibss[4] = (u8)((curtime>>8) & 0xff);/* p[1]; */
+ pibss[5] = (u8)((curtime>>16) & 0xff);/* p[2]; */
+_func_exit_;
+ return;
+}
+
+u8 *rtw_get_capability_from_ie(u8 *ie)
+{
+ return ie + 8 + 2;
+}
+
+
+u16 rtw_get_capability(struct wlan_bssid_ex *bss)
+{
+ __le16 val;
+_func_enter_;
+
+ memcpy((u8 *)&val, rtw_get_capability_from_ie(bss->IEs), 2);
+
+_func_exit_;
+ return le16_to_cpu(val);
+}
+
+u8 *rtw_get_timestampe_from_ie(u8 *ie)
+{
+ return ie + 0;
+}
+
+u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
+{
+ return ie + 8;
+}
+
+int rtw_init_mlme_priv (struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
+{
+ int res;
+_func_enter_;
+ res = _rtw_init_mlme_priv(padapter);/* (pmlmepriv); */
+_func_exit_;
+ return res;
+}
+
+void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv)
+{
+_func_enter_;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_mlme_priv\n"));
+ _rtw_free_mlme_priv (pmlmepriv);
+_func_exit_;
+}
+
+static struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
+{
+ struct wlan_network *pnetwork;
+_func_enter_;
+ pnetwork = _rtw_alloc_network(pmlmepriv);
+_func_exit_;
+ return pnetwork;
+}
+
+static void rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork)
+{
+_func_enter_;
+ _rtw_free_network_nolock(pmlmepriv, pnetwork);
+_func_exit_;
+}
+
+
+void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
+{
+_func_enter_;
+ _rtw_free_network_queue(dev, isfreeall);
+_func_exit_;
+}
+
+/*
+ return the wlan_network with the matching addr
+
+ Shall be calle under atomic context... to avoid possible racing condition...
+*/
+struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
+{
+ struct wlan_network *pnetwork = _rtw_find_network(scanned_queue, addr);
+
+ return pnetwork;
+}
+
+int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork)
+{
+ int ret = true;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ if ((psecuritypriv->dot11PrivacyAlgrthm != _NO_PRIVACY_) &&
+ (pnetwork->network.Privacy == 0))
+ ret = false;
+ else if ((psecuritypriv->dot11PrivacyAlgrthm == _NO_PRIVACY_) &&
+ (pnetwork->network.Privacy == 1))
+ ret = false;
+ else
+ ret = true;
+ return ret;
+}
+
+static int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b)
+{
+ return (a->Ssid.SsidLength == b->Ssid.SsidLength) &&
+ _rtw_memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength);
+}
+
+int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
+{
+ u16 s_cap, d_cap;
+ __le16 le_scap, le_dcap;
+
+_func_enter_;
+ memcpy((u8 *)&le_scap, rtw_get_capability_from_ie(src->IEs), 2);
+ memcpy((u8 *)&le_dcap, rtw_get_capability_from_ie(dst->IEs), 2);
+
+
+ s_cap = le16_to_cpu(le_scap);
+ d_cap = le16_to_cpu(le_dcap);
+
+_func_exit_;
+
+ return ((src->Ssid.SsidLength == dst->Ssid.SsidLength) &&
+ ((_rtw_memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN)) == true) &&
+ ((_rtw_memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) == true) &&
+ ((s_cap & WLAN_CAPABILITY_IBSS) ==
+ (d_cap & WLAN_CAPABILITY_IBSS)) &&
+ ((s_cap & WLAN_CAPABILITY_BSS) ==
+ (d_cap & WLAN_CAPABILITY_BSS)));
+}
+
+struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
+{
+ struct list_head *plist, *phead;
+ struct wlan_network *pwlan = NULL;
+ struct wlan_network *oldest = NULL;
+
+_func_enter_;
+ phead = get_list_head(scanned_queue);
+
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (!pwlan->fixed) {
+ if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
+ oldest = pwlan;
+ }
+
+ plist = get_next(plist);
+ }
+_func_exit_;
+ return oldest;
+}
+
+void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
+ struct adapter *padapter, bool update_ie)
+{
+ long rssi_ori = dst->Rssi;
+ u8 sq_smp = src->PhyInfo.SignalQuality;
+ u8 ss_final;
+ u8 sq_final;
+ long rssi_final;
+
+_func_enter_;
+ rtw_hal_antdiv_rssi_compared(padapter, dst, src); /* this will update src.Rssi, need consider again */
+
+ /* The rule below is 1/5 for sample value, 4/5 for history value */
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network(&(padapter->mlmepriv.cur_network.network), src)) {
+ /* Take the recvpriv's value for the connected AP*/
+ ss_final = padapter->recvpriv.signal_strength;
+ sq_final = padapter->recvpriv.signal_qual;
+ /* the rssi value here is undecorated, and will be used for antenna diversity */
+ if (sq_smp != 101) /* from the right channel */
+ rssi_final = (src->Rssi+dst->Rssi*4)/5;
+ else
+ rssi_final = rssi_ori;
+ } else {
+ if (sq_smp != 101) { /* from the right channel */
+ ss_final = ((u32)(src->PhyInfo.SignalStrength)+(u32)(dst->PhyInfo.SignalStrength)*4)/5;
+ sq_final = ((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5;
+ rssi_final = (src->Rssi+dst->Rssi*4)/5;
+ } else {
+ /* bss info not receving from the right channel, use the original RX signal infos */
+ ss_final = dst->PhyInfo.SignalStrength;
+ sq_final = dst->PhyInfo.SignalQuality;
+ rssi_final = dst->Rssi;
+ }
+ }
+ if (update_ie)
+ memcpy((u8 *)dst, (u8 *)src, get_wlan_bssid_ex_sz(src));
+ dst->PhyInfo.SignalStrength = ss_final;
+ dst->PhyInfo.SignalQuality = sq_final;
+ dst->Rssi = rssi_final;
+
+_func_exit_;
+}
+
+static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork)
+{
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+_func_enter_;
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) &&
+ (is_same_network(&(pmlmepriv->cur_network.network), pnetwork))) {
+ update_network(&(pmlmepriv->cur_network.network), pnetwork, adapter, true);
+ rtw_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct ndis_802_11_fixed_ie),
+ pmlmepriv->cur_network.network.IELength);
+ }
+_func_exit_;
+}
+
+/*
+Caller must hold pmlmepriv->lock first.
+*/
+void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ u32 bssid_ex_sz;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ struct wlan_network *oldest = NULL;
+
+_func_enter_;
+
+ _enter_critical_bh(&queue->lock, &irql);
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (is_same_network(&(pnetwork->network), target))
+ break;
+ if ((oldest == ((struct wlan_network *)0)) ||
+ time_after(oldest->last_scanned, pnetwork->last_scanned))
+ oldest = pnetwork;
+ plist = get_next(plist);
+ }
+ /* If we didn't find a match, then get a new network slot to initialize
+ * with this beacon's information */
+ if (rtw_end_of_queue_search(phead, plist) == true) {
+ if (_rtw_queue_empty(&(pmlmepriv->free_bss_pool)) == true) {
+ /* If there are no more slots, expire the oldest */
+ pnetwork = oldest;
+
+ rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(target->PhyInfo.Optimum_antenna));
+ memcpy(&(pnetwork->network), target, get_wlan_bssid_ex_sz(target));
+ /* variable initialize */
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = rtw_get_current_time();
+
+ pnetwork->network_type = 0;
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+
+ /* bss info not receving from the right channel */
+ if (pnetwork->network.PhyInfo.SignalQuality == 101)
+ pnetwork->network.PhyInfo.SignalQuality = 0;
+ } else {
+ /* Otherwise just pull from the free list */
+
+ pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */
+
+ if (pnetwork == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
+ goto exit;
+ }
+
+ bssid_ex_sz = get_wlan_bssid_ex_sz(target);
+ target->Length = bssid_ex_sz;
+ rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(target->PhyInfo.Optimum_antenna));
+ memcpy(&(pnetwork->network), target, bssid_ex_sz);
+
+ pnetwork->last_scanned = rtw_get_current_time();
+
+ /* bss info not receving from the right channel */
+ if (pnetwork->network.PhyInfo.SignalQuality == 101)
+ pnetwork->network.PhyInfo.SignalQuality = 0;
+ rtw_list_insert_tail(&(pnetwork->list), &(queue->queue));
+ }
+ } else {
+ /* we have an entry and we are going to update it. But this entry may
+ * be already expired. In this case we do the same as we found a new
+ * net and call the new_net handler
+ */
+ bool update_ie = true;
+
+ pnetwork->last_scanned = rtw_get_current_time();
+
+ /* target.Reserved[0]== 1, means that scaned network is a bcn frame. */
+ if ((pnetwork->network.IELength > target->IELength) && (target->Reserved[0] == 1))
+ update_ie = false;
+
+ update_network(&(pnetwork->network), target, adapter, update_ie);
+ }
+
+exit:
+ _exit_critical_bh(&queue->lock, &irql);
+
+_func_exit_;
+}
+
+static void rtw_add_network(struct adapter *adapter,
+ struct wlan_bssid_ex *pnetwork)
+{
+_func_enter_;
+#if defined(CONFIG_88EU_P2P)
+ rtw_wlan_bssid_ex_remove_p2p_attr(pnetwork, P2P_ATTR_GROUP_INFO);
+#endif
+ update_current_network(adapter, pnetwork);
+ rtw_update_scanned_network(adapter, pnetwork);
+_func_exit_;
+}
+
+/* select the desired network based on the capability of the (i)bss. */
+/* check items: (1) security */
+/* (2) network_type */
+/* (3) WMM */
+/* (4) HT */
+/* (5) others */
+static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork)
+{
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u32 desired_encmode;
+ u32 privacy;
+
+ /* u8 wps_ie[512]; */
+ uint wps_ielen;
+
+ int bselected = true;
+
+ desired_encmode = psecuritypriv->ndisencryptstatus;
+ privacy = pnetwork->network.Privacy;
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
+ if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen) != NULL)
+ return true;
+ else
+ return false;
+ }
+ if (adapter->registrypriv.wifi_spec == 1) { /* for correct flow of 8021X to do.... */
+ if ((desired_encmode == Ndis802_11EncryptionDisabled) && (privacy != 0))
+ bselected = false;
+ }
+
+
+ if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) {
+ DBG_88E("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy);
+ bselected = false;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) {
+ if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
+ bselected = false;
+ }
+
+
+ return bselected;
+}
+
+/* TODO: Perry: For Power Management */
+void rtw_atimdone_event_callback(struct adapter *adapter , u8 *pbuf)
+{
+_func_enter_;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n"));
+_func_exit_;
+ return;
+}
+
+
+void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql;
+ u32 len;
+ struct wlan_bssid_ex *pnetwork;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+_func_enter_;
+
+ pnetwork = (struct wlan_bssid_ex *)pbuf;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_survey_event_callback, ssid=%s\n", pnetwork->Ssid.Ssid));
+
+ len = get_wlan_bssid_ex_sz(pnetwork);
+ if (len > (sizeof(struct wlan_bssid_ex))) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n****rtw_survey_event_callback: return a wrong bss ***\n"));
+ return;
+ }
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+
+ /* update IBSS_network 's timestamp */
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) {
+ if (_rtw_memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
+ struct wlan_network *ibss_wlan = NULL;
+ unsigned long irql;
+
+ memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->MacAddress);
+ if (ibss_wlan) {
+ memcpy(ibss_wlan->network.IEs , pnetwork->IEs, 8);
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ goto exit;
+ }
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ }
+ }
+
+ /* lock pmlmepriv->lock when you accessing network_q */
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == false) {
+ if (pnetwork->Ssid.Ssid[0] == 0)
+ pnetwork->Ssid.SsidLength = 0;
+ rtw_add_network(adapter, pnetwork);
+ }
+
+exit:
+
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+
+_func_exit_;
+
+ return;
+}
+
+
+
+void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext;
+
+_func_enter_;
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+
+ if (pmlmepriv->wps_probe_req_ie) {
+ pmlmepriv->wps_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_req_ie);
+ pmlmepriv->wps_probe_req_ie = NULL;
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_surveydone_event_callback: fw_state:%x\n\n", get_fwstate(pmlmepriv)));
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ u8 timer_cancelled;
+
+ _cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled);
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ } else {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("nic status=%x, survey done event comes too late!\n", get_fwstate(pmlmepriv)));
+ }
+
+ rtw_set_signal_stat_timer(&adapter->recvpriv);
+
+ if (pmlmepriv->to_join) {
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == false) {
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ if (rtw_select_and_join_from_scanned_queue(pmlmepriv) == _SUCCESS) {
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ } else {
+ struct wlan_bssid_ex *pdev_network = &(adapter->registrypriv.dev_network);
+ u8 *pibss = adapter->registrypriv.dev_network.MacAddress;
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("switching to adhoc master\n"));
+
+ _rtw_memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+
+ rtw_update_registrypriv_dev_network(adapter);
+ rtw_generate_random_ibss(pibss);
+
+ pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
+
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error=>rtw_createbss_cmd status FAIL\n"));
+ pmlmepriv->to_join = false;
+ }
+ }
+ } else {
+ int s_ret;
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+ pmlmepriv->to_join = false;
+ s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
+ if (_SUCCESS == s_ret) {
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ } else if (s_ret == 2) { /* there is no need to wait for join */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ rtw_indicate_connect(adapter);
+ } else {
+ DBG_88E("try_to_join, but select scanning queue fail, to_roaming:%d\n", pmlmepriv->to_roaming);
+ if (pmlmepriv->to_roaming != 0) {
+ if (--pmlmepriv->to_roaming == 0 ||
+ _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)) {
+ pmlmepriv->to_roaming = 0;
+ rtw_free_assoc_resources(adapter, 1);
+ rtw_indicate_disconnect(adapter);
+ } else {
+ pmlmepriv->to_join = true;
+ }
+ }
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+ }
+ }
+
+ indicate_wx_scan_complete_event(adapter);
+
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ p2p_ps_wk_cmd(adapter, P2P_PS_SCAN_DONE, 0);
+
+ rtw_os_xmit_schedule(adapter);
+
+ pmlmeext = &adapter->mlmeextpriv;
+ if (pmlmeext->sitesurvey_res.bss_cnt == 0)
+ rtw_hal_sreset_reset(adapter);
+_func_exit_;
+}
+
+void rtw_dummy_event_callback(struct adapter *adapter , u8 *pbuf)
+{
+}
+
+void rtw_fwdbg_event_callback(struct adapter *adapter , u8 *pbuf)
+{
+}
+
+static void free_scanqueue(struct mlme_priv *pmlmepriv)
+{
+ unsigned long irql, irql0;
+ struct __queue *free_queue = &pmlmepriv->free_bss_pool;
+ struct __queue *scan_queue = &pmlmepriv->scanned_queue;
+ struct list_head *plist, *phead, *ptemp;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+free_scanqueue\n"));
+ _enter_critical_bh(&scan_queue->lock, &irql0);
+ _enter_critical_bh(&free_queue->lock, &irql);
+
+ phead = get_list_head(scan_queue);
+ plist = get_next(phead);
+
+ while (plist != phead) {
+ ptemp = get_next(plist);
+ rtw_list_delete(plist);
+ rtw_list_insert_tail(plist, &free_queue->queue);
+ plist = ptemp;
+ pmlmepriv->num_of_scanned--;
+ }
+
+ _exit_critical_bh(&free_queue->lock, &irql);
+ _exit_critical_bh(&scan_queue->lock, &irql0);
+
+_func_exit_;
+}
+
+/*
+*rtw_free_assoc_resources: the caller has to lock pmlmepriv->lock
+*/
+void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
+{
+ unsigned long irql;
+ struct wlan_network *pwlan = NULL;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_free_assoc_resources\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("tgt_network->network.MacAddress=%pM ssid=%s\n",
+ tgt_network->network.MacAddress, tgt_network->network.Ssid.Ssid));
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_AP_STATE)) {
+ struct sta_info *psta;
+
+ psta = rtw_get_stainfo(&adapter->stapriv, tgt_network->network.MacAddress);
+
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ rtw_free_stainfo(adapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE)) {
+ struct sta_info *psta;
+
+ rtw_free_all_stainfo(adapter);
+
+ psta = rtw_get_bcmc_stainfo(adapter);
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ rtw_free_stainfo(adapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+
+ rtw_init_bcmc_stainfo(adapter);
+ }
+
+ if (lock_scanned_queue)
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+
+ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan)
+ pwlan->fixed = false;
+ else
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_assoc_resources:pwlan==NULL\n\n"));
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) && (adapter->stapriv.asoc_sta_count == 1)))
+ rtw_free_network_nolock(pmlmepriv, pwlan);
+
+ if (lock_scanned_queue)
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ pmlmepriv->key_mask = 0;
+_func_exit_;
+}
+
+/*
+*rtw_indicate_connect: the caller has to lock pmlmepriv->lock
+*/
+void rtw_indicate_connect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_connect\n"));
+
+ pmlmepriv->to_join = false;
+
+ if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ set_fwstate(pmlmepriv, _FW_LINKED);
+
+ rtw_led_control(padapter, LED_CTL_LINK);
+
+ rtw_os_indicate_connect(padapter);
+ }
+
+ pmlmepriv->to_roaming = 0;
+
+ rtw_set_scan_deny(padapter, 3000);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("-rtw_indicate_connect: fw_state=0x%08x\n", get_fwstate(pmlmepriv)));
+_func_exit_;
+}
+
+/*
+*rtw_indicate_disconnect: the caller has to lock pmlmepriv->lock
+*/
+void rtw_indicate_disconnect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_disconnect\n"));
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING | WIFI_UNDER_WPS);
+
+
+ if (pmlmepriv->to_roaming > 0)
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) ||
+ (pmlmepriv->to_roaming <= 0)) {
+ rtw_os_indicate_disconnect(padapter);
+
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+ rtw_clear_scan_deny(padapter);
+ }
+ p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
+
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 1);
+
+_func_exit_;
+}
+
+inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
+{
+ rtw_os_indicate_scan_done(padapter, aborted);
+}
+
+void rtw_scan_abort(struct adapter *adapter)
+{
+ u32 start;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(adapter->mlmeextpriv);
+
+ start = rtw_get_current_time();
+ pmlmeext->scan_abort = true;
+ while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) &&
+ rtw_get_passing_time_ms(start) <= 200) {
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ break;
+ DBG_88E(FUNC_NDEV_FMT"fw_state=_FW_UNDER_SURVEY!\n", FUNC_NDEV_ARG(adapter->pnetdev));
+ rtw_msleep_os(20);
+ }
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved)
+ DBG_88E(FUNC_NDEV_FMT"waiting for scan_abort time out!\n", FUNC_NDEV_ARG(adapter->pnetdev));
+ rtw_indicate_scan_done(adapter, true);
+ }
+ pmlmeext->scan_abort = false;
+}
+
+static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, struct wlan_network *pnetwork)
+{
+ int i;
+ struct sta_info *bmc_sta, *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ psta = rtw_get_stainfo(pstapriv, pnetwork->network.MacAddress);
+ if (psta == NULL)
+ psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress);
+
+ if (psta) { /* update ptarget_sta */
+ DBG_88E("%s\n", __func__);
+ psta->aid = pnetwork->join_res;
+ psta->mac_id = 0;
+ /* sta mode */
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+ /* security related */
+ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ padapter->securitypriv.binstallGrpkey = false;
+ padapter->securitypriv.busetkipkey = false;
+ padapter->securitypriv.bgrpkey_handshake = false;
+ psta->ieee8021x_blocked = true;
+ psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+ _rtw_memset((u8 *)&psta->dot118021x_UncstKey, 0, sizeof(union Keytype));
+ _rtw_memset((u8 *)&psta->dot11tkiprxmickey, 0, sizeof(union Keytype));
+ _rtw_memset((u8 *)&psta->dot11tkiptxmickey, 0, sizeof(union Keytype));
+ _rtw_memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48));
+ _rtw_memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48));
+ }
+ /* Commented by Albert 2012/07/21 */
+ /* When doing the WPS, the wps_ie_len won't equal to 0 */
+ /* And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+ if (padapter->securitypriv.wps_ie_len != 0) {
+ psta->ieee8021x_blocked = true;
+ padapter->securitypriv.wps_ie_len = 0;
+ }
+ /* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
+ /* if A-MPDU Rx is enabled, reseting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
+ /* todo: check if AP can send A-MPDU packets */
+ for (i = 0; i < 16; i++) {
+ /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */
+ }
+ bmc_sta = rtw_get_bcmc_stainfo(padapter);
+ if (bmc_sta) {
+ for (i = 0; i < 16; i++) {
+ /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
+ preorder_ctrl = &bmc_sta->recvreorder_ctrl[i];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */
+ }
+ }
+ /* misc. */
+ update_sta_info(padapter, psta);
+ }
+ return psta;
+}
+
+/* pnetwork: returns from rtw_joinbss_event_callback */
+/* ptarget_wlan: found from scanned_queue */
+static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_network *ptarget_wlan, struct wlan_network *pnetwork)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+
+ DBG_88E("%s\n", __func__);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\nfw_state:%x, BSSID:%pM\n",
+ get_fwstate(pmlmepriv), pnetwork->network.MacAddress));
+
+
+ /* why not use ptarget_wlan?? */
+ memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
+ /* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
+ cur_network->network.IELength = ptarget_wlan->network.IELength;
+ memcpy(&cur_network->network.IEs[0], &ptarget_wlan->network.IEs[0], MAX_IE_SZ);
+
+ cur_network->aid = pnetwork->join_res;
+
+
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+ padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength;
+ padapter->recvpriv.signal_qual = ptarget_wlan->network.PhyInfo.SignalQuality;
+ /* the ptarget_wlan->network.Rssi is raw data, we use ptarget_wlan->network.PhyInfo.SignalStrength instead (has scaled) */
+ padapter->recvpriv.rssi = translate_percentage_to_dbm(ptarget_wlan->network.PhyInfo.SignalStrength);
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+
+ /* update fw_state will clr _FW_UNDER_LINKING here indirectly */
+ switch (pnetwork->network.InfrastructureMode) {
+ case Ndis802_11Infrastructure:
+ if (pmlmepriv->fw_state&WIFI_UNDER_WPS)
+ pmlmepriv->fw_state = WIFI_STATION_STATE|WIFI_UNDER_WPS;
+ else
+ pmlmepriv->fw_state = WIFI_STATION_STATE;
+ break;
+ case Ndis802_11IBSS:
+ pmlmepriv->fw_state = WIFI_ADHOC_STATE;
+ break;
+ default:
+ pmlmepriv->fw_state = WIFI_NULL_STATE;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Invalid network_mode\n"));
+ break;
+ }
+
+ rtw_update_protection(padapter, (cur_network->network.IEs) +
+ sizeof(struct ndis_802_11_fixed_ie),
+ (cur_network->network.IELength));
+ rtw_update_ht_cap(padapter, cur_network->network.IEs, cur_network->network.IELength);
+}
+
+/* Notes: the fucntion could be > passive_level (the same context as Rx tasklet) */
+/* pnetwork: returns from rtw_joinbss_event_callback */
+/* ptarget_wlan: found from scanned_queue */
+/* if join_res > 0, for (fw_state == WIFI_STATION_STATE), we check if "ptarget_sta" & "ptarget_wlan" exist. */
+/* if join_res > 0, for (fw_state == WIFI_ADHOC_STATE), we only check if "ptarget_wlan" exist. */
+/* if join_res > 0, update "cur_network->network" from "pnetwork->network" if (ptarget_wlan != NULL). */
+
+void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql, irql2;
+ u8 timer_cancelled;
+ struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL;
+ unsigned int the_same_macaddr = false;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("joinbss event call back received with res=%d\n", pnetwork->join_res));
+
+ rtw_get_encrypt_decrypt_from_registrypriv(adapter);
+
+
+ if (pmlmepriv->assoc_ssid.SsidLength == 0)
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n"));
+ else
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+
+ the_same_macaddr = _rtw_memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
+
+ pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network);
+ if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n ***joinbss_evt_callback return a wrong bss ***\n\n"));
+ goto ignore_nolock;
+ }
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nrtw_joinbss_event_callback!! _enter_critical\n"));
+
+ if (pnetwork->join_res > 0) {
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
+ /* s1. find ptarget_wlan */
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (the_same_macaddr) {
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ } else {
+ pcur_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ if (pcur_wlan)
+ pcur_wlan->fixed = false;
+
+ pcur_sta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
+ if (pcur_sta) {
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ rtw_free_stainfo(adapter, pcur_sta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ }
+
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ if (ptarget_wlan)
+ ptarget_wlan->fixed = true;
+ }
+ }
+ } else {
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ if (ptarget_wlan)
+ ptarget_wlan->fixed = true;
+ }
+ }
+
+ /* s2. update cur_network */
+ if (ptarget_wlan) {
+ rtw_joinbss_update_network(adapter, ptarget_wlan, pnetwork);
+ } else {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't find ptarget_wlan when joinbss_event callback\n"));
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ goto ignore_joinbss_callback;
+ }
+
+
+ /* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
+ if (ptarget_sta == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ goto ignore_joinbss_callback;
+ }
+ }
+
+ /* s4. indicate connect */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ rtw_indicate_connect(adapter);
+ } else {
+ /* adhoc mode will rtw_indicate_connect when rtw_stassoc_event_callback */
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv)));
+ }
+
+ /* s5. Cancle assoc_timer */
+ _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancle assoc_timer\n"));
+
+ } else {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_joinbss_event_callback err: fw_state:%x", get_fwstate(pmlmepriv)));
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ goto ignore_joinbss_callback;
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+
+ } else if (pnetwork->join_res == -4) {
+ rtw_reset_securitypriv(adapter);
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == true) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("fail! clear _FW_UNDER_LINKING ^^^fw_state=%x\n", get_fwstate(pmlmepriv)));
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+ } else { /* if join_res < 0 (join fails), then try again */
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+
+ignore_joinbss_callback:
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+ignore_nolock:
+_func_exit_;
+}
+
+void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
+
+_func_enter_;
+
+ mlmeext_joinbss_event_callback(adapter, pnetwork->join_res);
+
+ rtw_os_xmit_schedule(adapter);
+
+_func_exit_;
+}
+
+static u8 search_max_mac_id(struct adapter *padapter)
+{
+ u8 mac_id;
+#if defined (CONFIG_88EU_AP_MODE)
+ u8 aid;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+#endif
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+#if defined (CONFIG_88EU_AP_MODE)
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ for (aid = (pstapriv->max_num_sta); aid > 0; aid--) {
+ if (pstapriv->sta_aid[aid-1] != NULL)
+ break;
+ }
+ mac_id = aid + 1;
+ } else
+#endif
+ {/* adhoc id = 31~2 */
+ for (mac_id = (NUM_STA-1); mac_id >= IBSS_START_MAC_ID; mac_id--) {
+ if (pmlmeinfo->FW_sta_info[mac_id].status == 1)
+ break;
+ }
+ }
+ return mac_id;
+}
+
+/* FOR AP , AD-HOC mode */
+void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta)
+{
+ u16 media_status;
+ u8 macid;
+
+ if (psta == NULL)
+ return;
+
+ macid = search_max_mac_id(adapter);
+ rtw_hal_set_hwreg(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid);
+ media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE:1 connect */
+ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+}
+
+void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql;
+ struct sta_info *psta;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct wlan_network *ptarget_wlan = NULL;
+
+_func_enter_;
+
+ if (rtw_access_ctrl(adapter, pstassoc->macaddr) == false)
+ return;
+
+#if defined (CONFIG_88EU_AP_MODE)
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
+ if (psta) {
+ ap_sta_info_defer_update(adapter, psta);
+ rtw_stassoc_hw_rpt(adapter, psta);
+ }
+ goto exit;
+ }
+#endif
+ /* for AD-HOC mode */
+ psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
+ if (psta != NULL) {
+ /* the sta have been in sta_info_queue => do nothing */
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n"));
+ goto exit; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
+ }
+ psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n"));
+ goto exit;
+ }
+ /* to do: init sta_info variable */
+ psta->qos_option = 0;
+ psta->mac_id = (uint)pstassoc->cam_id;
+ DBG_88E("%s\n", __func__);
+ /* for ad-hoc mode */
+ rtw_hal_set_odm_var(adapter, HAL_ODM_STA_INFO, psta, true);
+ rtw_stassoc_hw_rpt(adapter, psta);
+ if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
+ psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
+ psta->ieee8021x_blocked = false;
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))) {
+ if (adapter->stapriv.asoc_sta_count == 2) {
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ if (ptarget_wlan)
+ ptarget_wlan->fixed = true;
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
+ rtw_indicate_connect(adapter);
+ }
+ }
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+ mlmeext_sta_add_event_callback(adapter, psta);
+exit:
+_func_exit_;
+}
+
+void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql, irql2;
+ int mac_id = -1;
+ struct sta_info *psta;
+ struct wlan_network *pwlan = NULL;
+ struct wlan_bssid_ex *pdev_network = NULL;
+ u8 *pibss = NULL;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct stadel_event *pstadel = (struct stadel_event *)pbuf;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
+
+_func_enter_;
+
+ psta = rtw_get_stainfo(&adapter->stapriv, pstadel->macaddr);
+ if (psta)
+ mac_id = psta->mac_id;
+ else
+ mac_id = pstadel->mac_id;
+
+ DBG_88E("%s(mac_id=%d)=%pM\n", __func__, mac_id, pstadel->macaddr);
+
+ if (mac_id >= 0) {
+ u16 media_status;
+ media_status = (mac_id<<8)|0; /* MACID|OPMODE:0 means disconnect */
+ /* for STA, AP, ADHOC mode, report disconnect stauts to FW */
+ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ return;
+
+ mlmeext_sta_del_event_callback(adapter);
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql2);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ if (pmlmepriv->to_roaming > 0)
+ pmlmepriv->to_roaming--; /* this stadel_event is caused by roaming, decrease to_roaming */
+ else if (pmlmepriv->to_roaming == 0)
+ pmlmepriv->to_roaming = adapter->registrypriv.max_roaming_times;
+
+ if (*((unsigned short *)(pstadel->rsvd)) != WLAN_REASON_EXPIRATION_CHK)
+ pmlmepriv->to_roaming = 0; /* don't roam */
+
+ rtw_free_uc_swdec_pending_queue(adapter);
+
+ rtw_free_assoc_resources(adapter, 1);
+ rtw_indicate_disconnect(adapter);
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ /* remove the network entry in scanned_queue */
+ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan) {
+ pwlan->fixed = false;
+ rtw_free_network_nolock(pmlmepriv, pwlan);
+ }
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ _rtw_roaming(adapter, tgt_network);
+ }
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ rtw_free_stainfo(adapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+
+ if (adapter->stapriv.asoc_sta_count == 1) { /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ /* free old ibss network */
+ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan) {
+ pwlan->fixed = false;
+ rtw_free_network_nolock(pmlmepriv, pwlan);
+ }
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ /* re-create ibss */
+ pdev_network = &(adapter->registrypriv.dev_network);
+ pibss = adapter->registrypriv.dev_network.MacAddress;
+
+ memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network));
+
+ _rtw_memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+
+ rtw_update_registrypriv_dev_network(adapter);
+
+ rtw_generate_random_ibss(pibss);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
+ set_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error=>stadel_event_callback: rtw_createbss_cmd status FAIL***\n "));
+ }
+ }
+ _exit_critical_bh(&pmlmepriv->lock, &irql2);
+_func_exit_;
+}
+
+void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
+{
+_func_enter_;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_cpwm_event_callback !!!\n"));
+_func_exit_;
+}
+
+/*
+* _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+* @adapter: pointer to struct adapter structure
+*/
+void _rtw_join_timeout_handler (struct adapter *adapter)
+{
+ unsigned long irql;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ int do_join_r;
+
+_func_enter_;
+
+ DBG_88E("%s, fw_state=%x\n", __func__, get_fwstate(pmlmepriv));
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ return;
+
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+
+ if (pmlmepriv->to_roaming > 0) { /* join timeout caused by roaming */
+ while (1) {
+ pmlmepriv->to_roaming--;
+ if (pmlmepriv->to_roaming != 0) { /* try another , */
+ DBG_88E("%s try another roaming\n", __func__);
+ do_join_r = rtw_do_join(adapter);
+ if (_SUCCESS != do_join_r) {
+ DBG_88E("%s roaming do_join return %d\n", __func__ , do_join_r);
+ continue;
+ }
+ break;
+ } else {
+ DBG_88E("%s We've try roaming but fail\n", __func__);
+ rtw_indicate_disconnect(adapter);
+ break;
+ }
+ }
+ } else {
+ rtw_indicate_disconnect(adapter);
+ free_scanqueue(pmlmepriv);/* */
+ }
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+_func_exit_;
+}
+
+/*
+* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
+* @adapter: pointer to struct adapter structure
+*/
+void rtw_scan_timeout_handler (struct adapter *adapter)
+{
+ unsigned long irql;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+ rtw_indicate_scan_done(adapter, true);
+}
+
+static void rtw_auto_scan_handler(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ /* auto site survey per 60sec */
+ if (pmlmepriv->scan_interval > 0) {
+ pmlmepriv->scan_interval--;
+ if (pmlmepriv->scan_interval == 0) {
+ DBG_88E("%s\n", __func__);
+ rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+ }
+ }
+}
+
+void rtw_dynamic_check_timer_handlder(struct adapter *adapter)
+{
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+
+ if (!adapter)
+ return;
+ if (!adapter->hw_init_completed)
+ return;
+ if ((adapter->bDriverStopped) || (adapter->bSurpriseRemoved))
+ return;
+ if (adapter->net_closed)
+ return;
+ rtw_dynamic_chk_wk_cmd(adapter);
+
+ if (pregistrypriv->wifi_spec == 1) {
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+#endif
+ {
+ /* auto site survey */
+ rtw_auto_scan_handler(adapter);
+ }
+ }
+
+ rcu_read_lock();
+
+ if (rcu_dereference(adapter->pnetdev->rx_handler_data) &&
+ (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == true)) {
+ /* expire NAT2.5 entry */
+ nat25_db_expire(adapter);
+
+ if (adapter->pppoe_connection_in_progress > 0) {
+ adapter->pppoe_connection_in_progress--;
+ }
+
+ /* due to rtw_dynamic_check_timer_handlder() is called every 2 seconds */
+ if (adapter->pppoe_connection_in_progress > 0) {
+ adapter->pppoe_connection_in_progress--;
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+#define RTW_SCAN_RESULT_EXPIRE 2000
+
+/*
+* Select a new join candidate from the original @param candidate and @param competitor
+* @return true: candidate is updated
+* @return false: candidate is not updated
+*/
+static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
+ , struct wlan_network **candidate, struct wlan_network *competitor)
+{
+ int updated = false;
+ struct adapter *adapter = container_of(pmlmepriv, struct adapter, mlmepriv);
+
+
+ /* check bssid, if needed */
+ if (pmlmepriv->assoc_by_bssid) {
+ if (!_rtw_memcmp(competitor->network.MacAddress, pmlmepriv->assoc_bssid, ETH_ALEN))
+ goto exit;
+ }
+
+ /* check ssid, if needed */
+ if (pmlmepriv->assoc_ssid.Ssid && pmlmepriv->assoc_ssid.SsidLength) {
+ if (competitor->network.Ssid.SsidLength != pmlmepriv->assoc_ssid.SsidLength ||
+ _rtw_memcmp(competitor->network.Ssid.Ssid, pmlmepriv->assoc_ssid.Ssid, pmlmepriv->assoc_ssid.SsidLength) == false)
+ goto exit;
+ }
+
+ if (rtw_is_desired_network(adapter, competitor) == false)
+ goto exit;
+
+ if (pmlmepriv->to_roaming) {
+ if (rtw_get_passing_time_ms((u32)competitor->last_scanned) >= RTW_SCAN_RESULT_EXPIRE ||
+ is_same_ess(&competitor->network, &pmlmepriv->cur_network.network) == false)
+ goto exit;
+ }
+
+ if (*candidate == NULL || (*candidate)->network.Rssi < competitor->network.Rssi) {
+ *candidate = competitor;
+ updated = true;
+ }
+ if (updated) {
+ DBG_88E("[by_bssid:%u][assoc_ssid:%s]new candidate: %s(%pM rssi:%d\n",
+ pmlmepriv->assoc_by_bssid,
+ pmlmepriv->assoc_ssid.Ssid,
+ (*candidate)->network.Ssid.Ssid,
+ (*candidate)->network.MacAddress,
+ (int)(*candidate)->network.Rssi);
+ DBG_88E("[to_roaming:%u]\n", pmlmepriv->to_roaming);
+ }
+
+exit:
+ return updated;
+}
+
+/*
+Calling context:
+The caller of the sub-routine will be in critical section...
+The caller must hold the following spinlock
+pmlmepriv->lock
+*/
+
+int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
+{
+ unsigned long irql;
+ int ret;
+ struct list_head *phead;
+ struct adapter *adapter;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ struct wlan_network *candidate = NULL;
+ u8 supp_ant_div = false;
+
+_func_enter_;
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ phead = get_list_head(queue);
+ adapter = (struct adapter *)pmlmepriv->nic_hdl;
+ pmlmepriv->pscanned = get_next(phead);
+ while (!rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) {
+ pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+ if (pnetwork == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork==NULL)\n", __func__));
+ ret = _FAIL;
+ goto exit;
+ }
+ pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
+ }
+ if (candidate == NULL) {
+ DBG_88E("%s: return _FAIL(candidate==NULL)\n", __func__);
+ ret = _FAIL;
+ goto exit;
+ } else {
+ DBG_88E("%s: candidate: %s(%pM ch:%u)\n", __func__,
+ candidate->network.Ssid.Ssid, candidate->network.MacAddress,
+ candidate->network.Configuration.DSConfig);
+ }
+
+
+ /* check for situation of _FW_LINKED */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ DBG_88E("%s: _FW_LINKED while ask_for_joinbss!!!\n", __func__);
+
+ rtw_disassoc_cmd(adapter, 0, true);
+ rtw_indicate_disconnect(adapter);
+ rtw_free_assoc_resources(adapter, 0);
+ }
+
+ rtw_hal_get_def_var(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &(supp_ant_div));
+ if (supp_ant_div) {
+ u8 cur_ant;
+ rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(cur_ant));
+ DBG_88E("#### Opt_Ant_(%s), cur_Ant(%s)\n",
+ (2 == candidate->network.PhyInfo.Optimum_antenna) ? "A" : "B",
+ (2 == cur_ant) ? "A" : "B"
+ );
+ }
+
+ ret = rtw_joinbss_cmd(adapter, candidate);
+
+exit:
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+
+_func_exit_;
+
+ return ret;
+}
+
+int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
+{
+ struct cmd_obj *pcmd;
+ struct setauth_parm *psetauthparm;
+ struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL; /* try again */
+ goto exit;
+ }
+
+ psetauthparm = (struct setauth_parm *)rtw_zmalloc(sizeof(struct setauth_parm));
+ if (psetauthparm == NULL) {
+ kfree(pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+ _rtw_memset(psetauthparm, 0, sizeof(struct setauth_parm));
+ psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm;
+ pcmd->cmdcode = _SetAuth_CMD_;
+ pcmd->parmbuf = (unsigned char *)psetauthparm;
+ pcmd->cmdsz = (sizeof(struct setauth_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+ _rtw_init_listhead(&pcmd->list);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("after enqueue set_auth_cmd, auth_mode=%x\n",
+ psecuritypriv->dot11AuthAlgrthm));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+exit:
+_func_exit_;
+ return res;
+}
+
+int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, int keyid, u8 set_tx)
+{
+ u8 keylen;
+ struct cmd_obj *pcmd;
+ struct setkey_parm *psetkeyparm;
+ struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ int res = _SUCCESS;
+
+_func_enter_;
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL; /* try again */
+ goto exit;
+ }
+ psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ if (psetkeyparm == NULL) {
+ kfree(pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_memset(psetkeyparm, 0, sizeof(struct setkey_parm));
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ psetkeyparm->algorithm = (unsigned char)psecuritypriv->dot118021XGrpPrivacy;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key: psetkeyparm->algorithm=(unsigned char)psecuritypriv->dot118021XGrpPrivacy=%d\n",
+ psetkeyparm->algorithm));
+ } else {
+ psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key: psetkeyparm->algorithm=(u8)psecuritypriv->dot11PrivacyAlgrthm=%d\n",
+ psetkeyparm->algorithm));
+ }
+ psetkeyparm->keyid = (u8)keyid;/* 0~3 */
+ psetkeyparm->set_tx = set_tx;
+ pmlmepriv->key_mask |= BIT(psetkeyparm->keyid);
+ DBG_88E("==> rtw_set_key algorithm(%x), keyid(%x), key_mask(%x)\n",
+ psetkeyparm->algorithm, psetkeyparm->keyid, pmlmepriv->key_mask);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key: psetkeyparm->algorithm=%d psetkeyparm->keyid=(u8)keyid=%d\n",
+ psetkeyparm->algorithm, keyid));
+
+ switch (psetkeyparm->algorithm) {
+ case _WEP40_:
+ keylen = 5;
+ memcpy(&(psetkeyparm->key[0]), &(psecuritypriv->dot11DefKey[keyid].skey[0]), keylen);
+ break;
+ case _WEP104_:
+ keylen = 13;
+ memcpy(&(psetkeyparm->key[0]), &(psecuritypriv->dot11DefKey[keyid].skey[0]), keylen);
+ break;
+ case _TKIP_:
+ keylen = 16;
+ memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen);
+ psetkeyparm->grpkey = 1;
+ break;
+ case _AES_:
+ keylen = 16;
+ memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen);
+ psetkeyparm->grpkey = 1;
+ break;
+ default:
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key:psecuritypriv->dot11PrivacyAlgrthm=%x (must be 1 or 2 or 4 or 5)\n",
+ psecuritypriv->dot11PrivacyAlgrthm));
+ res = _FAIL;
+ goto exit;
+ }
+ pcmd->cmdcode = _SetKey_CMD_;
+ pcmd->parmbuf = (u8 *)psetkeyparm;
+ pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+ _rtw_init_listhead(&pcmd->list);
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+exit:
+_func_exit_;
+ return res;
+}
+
+/* adjust IEs for rtw_joinbss_cmd in WMM */
+int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len)
+{
+ unsigned int ielength = 0;
+ unsigned int i, j;
+
+ i = 12; /* after the fixed IE */
+ while (i < in_len) {
+ ielength = initial_out_len;
+
+ if (in_ie[i] == 0xDD && in_ie[i+2] == 0x00 && in_ie[i+3] == 0x50 && in_ie[i+4] == 0xF2 && in_ie[i+5] == 0x02 && i+5 < in_len) {
+ /* WMM element ID and OUI */
+ /* Append WMM IE to the last index of out_ie */
+
+ for (j = i; j < i + 9; j++) {
+ out_ie[ielength] = in_ie[j];
+ ielength++;
+ }
+ out_ie[initial_out_len + 1] = 0x07;
+ out_ie[initial_out_len + 6] = 0x00;
+ out_ie[initial_out_len + 8] = 0x00;
+ break;
+ }
+ i += (in_ie[i+1]+2); /* to the next IE element */
+ }
+ return ielength;
+}
+
+/* */
+/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
+/* Added by Annie, 2006-05-07. */
+/* */
+/* Search by BSSID, */
+/* Return Value: */
+/* -1 :if there is no pre-auth key in the table */
+/* >= 0 :if there is pre-auth key, and return the entry id */
+/* */
+/* */
+
+static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
+{
+ struct security_priv *psecuritypriv = &Adapter->securitypriv;
+ int i = 0;
+
+ do {
+ if ((psecuritypriv->PMKIDList[i].bUsed) &&
+ (_rtw_memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN) == true)) {
+ break;
+ } else {
+ i++;
+ /* continue; */
+ }
+
+ } while (i < NUM_PMKID_CACHE);
+
+ if (i == NUM_PMKID_CACHE) {
+ i = -1;/* Could not find. */
+ } else {
+ /* There is one Pre-Authentication Key for the specific BSSID. */
+ }
+ return i;
+}
+
+/* */
+/* Check the RSN IE length */
+/* If the RSN IE length <= 20, the RSN IE didn't include the PMKID information */
+/* 0-11th element in the array are the fixed IE */
+/* 12th element in the array is the IE */
+/* 13th element in the array is the IE length */
+/* */
+
+static int rtw_append_pmkid(struct adapter *Adapter, int iEntry, u8 *ie, uint ie_len)
+{
+ struct security_priv *psecuritypriv = &Adapter->securitypriv;
+
+ if (ie[13] <= 20) {
+ /* The RSN IE didn't include the PMK ID, append the PMK information */
+ ie[ie_len] = 1;
+ ie_len++;
+ ie[ie_len] = 0; /* PMKID count = 0x0100 */
+ ie_len++;
+ memcpy(&ie[ie_len], &psecuritypriv->PMKIDList[iEntry].PMKID, 16);
+
+ ie_len += 16;
+ ie[13] += 18;/* PMKID length = 2+16 */
+ }
+ return ie_len;
+}
+
+int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len)
+{
+ u8 authmode;
+ uint ielength;
+ int iEntry;
+
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ uint ndisauthmode = psecuritypriv->ndisauthtype;
+ uint ndissecuritytype = psecuritypriv->ndisencryptstatus;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("+rtw_restruct_sec_ie: ndisauthmode=%d ndissecuritytype=%d\n",
+ ndisauthmode, ndissecuritytype));
+
+ /* copy fixed ie only */
+ memcpy(out_ie, in_ie, 12);
+ ielength = 12;
+ if ((ndisauthmode == Ndis802_11AuthModeWPA) ||
+ (ndisauthmode == Ndis802_11AuthModeWPAPSK))
+ authmode = _WPA_IE_ID_;
+ if ((ndisauthmode == Ndis802_11AuthModeWPA2) ||
+ (ndisauthmode == Ndis802_11AuthModeWPA2PSK))
+ authmode = _WPA2_IE_ID_;
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
+ memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len);
+
+ ielength += psecuritypriv->wps_ie_len;
+ } else if ((authmode == _WPA_IE_ID_) || (authmode == _WPA2_IE_ID_)) {
+ /* copy RSN or SSN */
+ memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1]+2);
+ ielength += psecuritypriv->supplicant_ie[1]+2;
+ rtw_report_sec_ie(adapter, authmode, psecuritypriv->supplicant_ie);
+ }
+
+ iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid);
+ if (iEntry < 0) {
+ return ielength;
+ } else {
+ if (authmode == _WPA2_IE_ID_)
+ ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength);
+ }
+
+_func_exit_;
+
+ return ielength;
+}
+
+void rtw_init_registrypriv_dev_network(struct adapter *adapter)
+{
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ struct eeprom_priv *peepriv = &adapter->eeprompriv;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ u8 *myhwaddr = myid(peepriv);
+
+_func_enter_;
+
+ memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN);
+
+ memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid));
+
+ pdev_network->Configuration.Length = sizeof(struct ndis_802_11_config);
+ pdev_network->Configuration.BeaconPeriod = 100;
+ pdev_network->Configuration.FHConfig.Length = 0;
+ pdev_network->Configuration.FHConfig.HopPattern = 0;
+ pdev_network->Configuration.FHConfig.HopSet = 0;
+ pdev_network->Configuration.FHConfig.DwellTime = 0;
+
+_func_exit_;
+}
+
+void rtw_update_registrypriv_dev_network(struct adapter *adapter)
+{
+ int sz = 0;
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
+
+_func_enter_;
+
+ pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0); /* adhoc no 802.1x */
+
+ pdev_network->Rssi = 0;
+
+ switch (pregistrypriv->wireless_mode) {
+ case WIRELESS_11B:
+ pdev_network->NetworkTypeInUse = (Ndis802_11DS);
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11BG:
+ case WIRELESS_11_24N:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11BG_24N:
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ break;
+ case WIRELESS_11A:
+ case WIRELESS_11A_5N:
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ break;
+ case WIRELESS_11ABGN:
+ if (pregistrypriv->channel > 14)
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ else
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ break;
+ default:
+ /* TODO */
+ break;
+ }
+
+ pdev_network->Configuration.DSConfig = (pregistrypriv->channel);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("pregistrypriv->channel=%d, pdev_network->Configuration.DSConfig=0x%x\n",
+ pregistrypriv->channel, pdev_network->Configuration.DSConfig));
+
+ if (cur_network->network.InfrastructureMode == Ndis802_11IBSS)
+ pdev_network->Configuration.ATIMWindow = (0);
+
+ pdev_network->InfrastructureMode = (cur_network->network.InfrastructureMode);
+
+ /* 1. Supported rates */
+ /* 2. IE */
+
+ sz = rtw_generate_ie(pregistrypriv);
+ pdev_network->IELength = sz;
+ pdev_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
+
+ /* notes: translate IELength & Length after assign the Length to cmdsz in createbss_cmd(); */
+ /* pdev_network->IELength = cpu_to_le32(sz); */
+_func_exit_;
+}
+
+void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter)
+{
+_func_enter_;
+_func_exit_;
+}
+
+/* the fucntion is at passive_level */
+void rtw_joinbss_reset(struct adapter *padapter)
+{
+ u8 threshold;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ /* todo: if you want to do something io/reg/hw setting before join_bss, please add code here */
+ pmlmepriv->num_FortyMHzIntolerant = 0;
+
+ pmlmepriv->num_sta_no_ht = 0;
+
+ phtpriv->ampdu_enable = false;/* reset to disabled */
+
+ /* TH = 1 => means that invalidate usb rx aggregation */
+ /* TH = 0 => means that validate usb rx aggregation, use init value. */
+ if (phtpriv->ht_option) {
+ if (padapter->registrypriv.wifi_spec == 1)
+ threshold = 1;
+ else
+ threshold = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ } else {
+ threshold = 1;
+ rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ }
+}
+
+/* the fucntion is >= passive_level */
+unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len)
+{
+ u32 ielen, out_len;
+ enum ht_cap_ampdu_factor max_rx_ampdu_factor;
+ unsigned char *p;
+ struct rtw_ieee80211_ht_cap ht_capie;
+ unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ u32 rx_packet_offset, max_recvbuf_sz;
+
+
+ phtpriv->ht_option = false;
+
+ p = rtw_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12);
+
+ if (p && ielen > 0) {
+ if (pqospriv->qos_option == 0) {
+ out_len = *pout_len;
+ rtw_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_,
+ _WMM_IE_Length_, WMM_IE, pout_len);
+
+ pqospriv->qos_option = 1;
+ }
+
+ out_len = *pout_len;
+
+ _rtw_memset(&ht_capie, 0, sizeof(struct rtw_ieee80211_ht_cap));
+
+ ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_TX_STBC |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ rtw_hal_get_def_var(padapter, HAL_DEF_RX_PACKET_OFFSET, &rx_packet_offset);
+ rtw_hal_get_def_var(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz);
+
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+
+ rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
+ ht_capie.ampdu_params_info = (max_rx_ampdu_factor&0x03);
+
+ if (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
+ ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2));
+ else
+ ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
+
+
+ rtw_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_,
+ sizeof(struct rtw_ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len);
+
+ phtpriv->ht_option = true;
+
+ p = rtw_get_ie(in_ie+12, _HT_ADD_INFO_IE_, &ielen, in_len-12);
+ if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
+ out_len = *pout_len;
+ rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2 , pout_len);
+ }
+ }
+ return phtpriv->ht_option;
+}
+
+/* the fucntion is > passive_level (in critical_section) */
+void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
+{
+ u8 *p, max_ampdu_sz;
+ int len;
+ struct rtw_ieee80211_ht_cap *pht_capie;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (!phtpriv->ht_option)
+ return;
+
+ if ((!pmlmeinfo->HT_info_enable) || (!pmlmeinfo->HT_caps_enable))
+ return;
+
+ DBG_88E("+rtw_update_ht_cap()\n");
+
+ /* maybe needs check if ap supports rx ampdu. */
+ if ((!phtpriv->ampdu_enable) && (pregistrypriv->ampdu_enable == 1)) {
+ if (pregistrypriv->wifi_spec == 1)
+ phtpriv->ampdu_enable = false;
+ else
+ phtpriv->ampdu_enable = true;
+ } else if (pregistrypriv->ampdu_enable == 2) {
+ phtpriv->ampdu_enable = true;
+ }
+
+
+ /* check Max Rx A-MPDU Size */
+ len = 0;
+ p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fixed_ie), _HT_CAPABILITY_IE_, &len, ie_len-sizeof(struct ndis_802_11_fixed_ie));
+ if (p && len > 0) {
+ pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
+ max_ampdu_sz = (pht_capie->ampdu_params_info & IEEE80211_HT_CAP_AMPDU_FACTOR);
+ max_ampdu_sz = 1 << (max_ampdu_sz+3); /* max_ampdu_sz (kbytes); */
+ phtpriv->rx_ampdu_maxlen = max_ampdu_sz;
+ }
+ len = 0;
+ p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fixed_ie), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct ndis_802_11_fixed_ie));
+
+ /* update cur_bwmode & cur_ch_offset */
+ if ((pregistrypriv->cbw40_enable) &&
+ (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & BIT(1)) &&
+ (pmlmeinfo->HT_info.infos[0] & BIT(2))) {
+ int i;
+ u8 rf_type;
+
+ padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ /* update the MCS rates */
+ for (i = 0; i < 16; i++) {
+ if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
+ else
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
+ }
+ /* switch to the 40M Hz mode accoring to the AP */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
+ switch ((pmlmeinfo->HT_info.infos[0] & 0x3)) {
+ case HT_EXTCHNL_OFFSET_UPPER:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case HT_EXTCHNL_OFFSET_LOWER:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ }
+
+ /* Config SM Power Save setting */
+ pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & 0x0C) >> 2;
+ if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
+ DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
+
+ /* Config current HT Protection mode. */
+ pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3;
+}
+
+void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ u8 issued;
+ int priority;
+ struct sta_info *psta = NULL;
+ struct ht_priv *phtpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ s32 bmcst = IS_MCAST(pattrib->ra);
+
+ if (bmcst || (padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100))
+ return;
+
+ priority = pattrib->priority;
+
+ if (pattrib->psta)
+ psta = pattrib->psta;
+ else
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+
+ if (psta == NULL)
+ return;
+
+ phtpriv = &psta->htpriv;
+
+ if ((phtpriv->ht_option) && (phtpriv->ampdu_enable)) {
+ issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
+ issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
+
+ if (0 == issued) {
+ DBG_88E("rtw_issue_addbareq_cmd, p=%d\n", priority);
+ psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
+ rtw_addbareq_cmd(padapter, (u8) priority, pattrib->ra);
+ }
+ }
+}
+
+void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
+{
+ unsigned long irql;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ _rtw_roaming(padapter, tgt_network);
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int do_join_r;
+
+ struct wlan_network *pnetwork;
+
+ if (tgt_network != NULL)
+ pnetwork = tgt_network;
+ else
+ pnetwork = &pmlmepriv->cur_network;
+
+ if (0 < pmlmepriv->to_roaming) {
+ DBG_88E("roaming from %s(%pM length:%d\n",
+ pnetwork->network.Ssid.Ssid, pnetwork->network.MacAddress,
+ pnetwork->network.Ssid.SsidLength);
+ memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
+
+ pmlmepriv->assoc_by_bssid = false;
+
+ while (1) {
+ do_join_r = rtw_do_join(padapter);
+ if (_SUCCESS == do_join_r) {
+ break;
+ } else {
+ DBG_88E("roaming do_join return %d\n", do_join_r);
+ pmlmepriv->to_roaming--;
+
+ if (0 < pmlmepriv->to_roaming) {
+ continue;
+ } else {
+ DBG_88E("%s(%d) -to roaming fail, indicate_disconnect\n", __func__, __LINE__);
+ rtw_indicate_disconnect(padapter);
+ break;
+ }
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
new file mode 100644
index 0000000..4b2eb8e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -0,0 +1,8481 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_MLME_EXT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <rtw_mlme_ext.h>
+#include <wlan_bssdef.h>
+#include <mlme_osdep.h>
+#include <recv_osdep.h>
+
+static struct mlme_handler mlme_sta_tbl[] = {
+ {WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq},
+ {WIFI_ASSOCRSP, "OnAssocRsp", &OnAssocRsp},
+ {WIFI_REASSOCREQ, "OnReAssocReq", &OnAssocReq},
+ {WIFI_REASSOCRSP, "OnReAssocRsp", &OnAssocRsp},
+ {WIFI_PROBEREQ, "OnProbeReq", &OnProbeReq},
+ {WIFI_PROBERSP, "OnProbeRsp", &OnProbeRsp},
+
+ /*----------------------------------------------------------
+ below 2 are reserved
+ -----------------------------------------------------------*/
+ {0, "DoReserved", &DoReserved},
+ {0, "DoReserved", &DoReserved},
+ {WIFI_BEACON, "OnBeacon", &OnBeacon},
+ {WIFI_ATIM, "OnATIM", &OnAtim},
+ {WIFI_DISASSOC, "OnDisassoc", &OnDisassoc},
+ {WIFI_AUTH, "OnAuth", &OnAuthClient},
+ {WIFI_DEAUTH, "OnDeAuth", &OnDeAuth},
+ {WIFI_ACTION, "OnAction", &OnAction},
+};
+
+static struct action_handler OnAction_tbl[] = {
+ {RTW_WLAN_CATEGORY_SPECTRUM_MGMT, "ACTION_SPECTRUM_MGMT", on_action_spct},
+ {RTW_WLAN_CATEGORY_QOS, "ACTION_QOS", &OnAction_qos},
+ {RTW_WLAN_CATEGORY_DLS, "ACTION_DLS", &OnAction_dls},
+ {RTW_WLAN_CATEGORY_BACK, "ACTION_BACK", &OnAction_back},
+ {RTW_WLAN_CATEGORY_PUBLIC, "ACTION_PUBLIC", on_action_public},
+ {RTW_WLAN_CATEGORY_RADIO_MEASUREMENT, "ACTION_RADIO_MEASUREMENT", &DoReserved},
+ {RTW_WLAN_CATEGORY_FT, "ACTION_FT", &DoReserved},
+ {RTW_WLAN_CATEGORY_HT, "ACTION_HT", &OnAction_ht},
+ {RTW_WLAN_CATEGORY_SA_QUERY, "ACTION_SA_QUERY", &DoReserved},
+ {RTW_WLAN_CATEGORY_WMM, "ACTION_WMM", &OnAction_wmm},
+ {RTW_WLAN_CATEGORY_P2P, "ACTION_P2P", &OnAction_p2p},
+};
+
+
+static u8 null_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+/**************************************************
+OUI definitions for the vendor specific IE
+***************************************************/
+unsigned char RTW_WPA_OUI[] = {0x00, 0x50, 0xf2, 0x01};
+unsigned char WMM_OUI[] = {0x00, 0x50, 0xf2, 0x02};
+unsigned char WPS_OUI[] = {0x00, 0x50, 0xf2, 0x04};
+unsigned char P2P_OUI[] = {0x50, 0x6F, 0x9A, 0x09};
+unsigned char WFD_OUI[] = {0x50, 0x6F, 0x9A, 0x0A};
+
+unsigned char WMM_INFO_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
+unsigned char WMM_PARA_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+
+unsigned char WPA_TKIP_CIPHER[4] = {0x00, 0x50, 0xf2, 0x02};
+unsigned char RSN_TKIP_CIPHER[4] = {0x00, 0x0f, 0xac, 0x02};
+
+extern unsigned char REALTEK_96B_IE[];
+
+/********************************************************
+MCS rate definitions
+*********************************************************/
+unsigned char MCS_rate_2R[16] = {0xff, 0xff, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+unsigned char MCS_rate_1R[16] = {0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+/********************************************************
+ChannelPlan definitions
+*********************************************************/
+static struct rt_channel_plan_2g RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* 0x02, RT_CHANNEL_DOMAIN_2G_FCC1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14}, /* 0x03, RT_CHANNEL_DOMAIN_2G_MIKK1 */
+ {{10, 11, 12, 13}, 4}, /* 0x04, RT_CHANNEL_DOMAIN_2G_ETSI2 */
+ {{}, 0}, /* 0x05, RT_CHANNEL_DOMAIN_2G_NULL */
+};
+
+static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
+ /* 0x00 ~ 0x1F , Old Define ===== */
+ {0x02}, /* 0x00, RT_CHANNEL_DOMAIN_FCC */
+ {0x02}, /* 0x01, RT_CHANNEL_DOMAIN_IC */
+ {0x01}, /* 0x02, RT_CHANNEL_DOMAIN_ETSI */
+ {0x01}, /* 0x03, RT_CHANNEL_DOMAIN_SPAIN */
+ {0x01}, /* 0x04, RT_CHANNEL_DOMAIN_FRANCE */
+ {0x03}, /* 0x05, RT_CHANNEL_DOMAIN_MKK */
+ {0x03}, /* 0x06, RT_CHANNEL_DOMAIN_MKK1 */
+ {0x01}, /* 0x07, RT_CHANNEL_DOMAIN_ISRAEL */
+ {0x03}, /* 0x08, RT_CHANNEL_DOMAIN_TELEC */
+ {0x03}, /* 0x09, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN */
+ {0x00}, /* 0x0A, RT_CHANNEL_DOMAIN_WORLD_WIDE_13 */
+ {0x02}, /* 0x0B, RT_CHANNEL_DOMAIN_TAIWAN */
+ {0x01}, /* 0x0C, RT_CHANNEL_DOMAIN_CHINA */
+ {0x02}, /* 0x0D, RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO */
+ {0x02}, /* 0x0E, RT_CHANNEL_DOMAIN_KOREA */
+ {0x02}, /* 0x0F, RT_CHANNEL_DOMAIN_TURKEY */
+ {0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
+ {0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
+ {0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x00}, /* 0x13, RT_CHANNEL_DOMAIN_WORLD_WIDE_5G */
+ {0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
+ {0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
+ {0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
+ {0x03}, /* 0x17, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x05}, /* 0x18, RT_CHANNEL_DOMAIN_PAKISTAN_NO_DFS */
+ {0x02}, /* 0x19, RT_CHANNEL_DOMAIN_TAIWAN2_NO_DFS */
+ {0x00}, /* 0x1A, */
+ {0x00}, /* 0x1B, */
+ {0x00}, /* 0x1C, */
+ {0x00}, /* 0x1D, */
+ {0x00}, /* 0x1E, */
+ {0x05}, /* 0x1F, RT_CHANNEL_DOMAIN_WORLD_WIDE_ONLY_5G */
+ /* 0x20 ~ 0x7F , New Define ===== */
+ {0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
+ {0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
+ {0x02}, /* 0x22, RT_CHANNEL_DOMAIN_FCC1_NULL */
+ {0x03}, /* 0x23, RT_CHANNEL_DOMAIN_MKK1_NULL */
+ {0x04}, /* 0x24, RT_CHANNEL_DOMAIN_ETSI2_NULL */
+ {0x02}, /* 0x25, RT_CHANNEL_DOMAIN_FCC1_FCC1 */
+ {0x00}, /* 0x26, RT_CHANNEL_DOMAIN_WORLD_ETSI1 */
+ {0x03}, /* 0x27, RT_CHANNEL_DOMAIN_MKK1_MKK1 */
+ {0x00}, /* 0x28, RT_CHANNEL_DOMAIN_WORLD_KCC1 */
+ {0x00}, /* 0x29, RT_CHANNEL_DOMAIN_WORLD_FCC2 */
+ {0x00}, /* 0x2A, */
+ {0x00}, /* 0x2B, */
+ {0x00}, /* 0x2C, */
+ {0x00}, /* 0x2D, */
+ {0x00}, /* 0x2E, */
+ {0x00}, /* 0x2F, */
+ {0x00}, /* 0x30, RT_CHANNEL_DOMAIN_WORLD_FCC3 */
+ {0x00}, /* 0x31, RT_CHANNEL_DOMAIN_WORLD_FCC4 */
+ {0x00}, /* 0x32, RT_CHANNEL_DOMAIN_WORLD_FCC5 */
+ {0x00}, /* 0x33, RT_CHANNEL_DOMAIN_WORLD_FCC6 */
+ {0x02}, /* 0x34, RT_CHANNEL_DOMAIN_FCC1_FCC7 */
+ {0x00}, /* 0x35, RT_CHANNEL_DOMAIN_WORLD_ETSI2 */
+ {0x00}, /* 0x36, RT_CHANNEL_DOMAIN_WORLD_ETSI3 */
+ {0x03}, /* 0x37, RT_CHANNEL_DOMAIN_MKK1_MKK2 */
+ {0x03}, /* 0x38, RT_CHANNEL_DOMAIN_MKK1_MKK3 */
+ {0x02}, /* 0x39, RT_CHANNEL_DOMAIN_FCC1_NCC1 */
+ {0x00}, /* 0x3A, */
+ {0x00}, /* 0x3B, */
+ {0x00}, /* 0x3C, */
+ {0x00}, /* 0x3D, */
+ {0x00}, /* 0x3E, */
+ {0x00}, /* 0x3F, */
+ {0x02}, /* 0x40, RT_CHANNEL_DOMAIN_FCC1_NCC2 */
+ {0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
+};
+
+static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the conbination for max channel numbers */
+
+/*
+ * Search the @param channel_num in given @param channel_set
+ * @ch_set: the given channel set
+ * @ch: the given channel number
+ *
+ * return the index of channel_num in channel_set, -1 if not found
+ */
+int rtw_ch_set_search_ch(struct rt_channel_info *ch_set, const u32 ch)
+{
+ int i;
+ for (i = 0; ch_set[i].ChannelNum != 0; i++) {
+ if (ch == ch_set[i].ChannelNum)
+ break;
+ }
+
+ if (i >= ch_set[i].ChannelNum)
+ return -1;
+ return i;
+}
+
+/****************************************************************************
+
+Following are the initialization functions for WiFi MLME
+
+*****************************************************************************/
+
+int init_hw_mlme_ext(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+ return _SUCCESS;
+}
+
+static void init_mlme_ext_priv_value(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ unsigned char mixed_datarate[NumRates] = {
+ _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
+ _9M_RATE_, _12M_RATE_, _18M_RATE_, _24M_RATE_, _36M_RATE_,
+ _48M_RATE_, _54M_RATE_, 0xff
+ };
+ unsigned char mixed_basicrate[NumRates] = {
+ _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
+ _12M_RATE_, _24M_RATE_, 0xff,
+ };
+
+ ATOMIC_SET(&pmlmeext->event_seq, 0);
+ pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
+
+ pmlmeext->cur_channel = padapter->registrypriv.channel;
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeext->oper_channel = pmlmeext->cur_channel ;
+ pmlmeext->oper_bwmode = pmlmeext->cur_bwmode;
+ pmlmeext->oper_ch_offset = pmlmeext->cur_ch_offset;
+ pmlmeext->retry = 0;
+
+ pmlmeext->cur_wireless_mode = padapter->registrypriv.wireless_mode;
+
+ memcpy(pmlmeext->datarate, mixed_datarate, NumRates);
+ memcpy(pmlmeext->basicrate, mixed_basicrate, NumRates);
+
+ pmlmeext->tx_rate = IEEE80211_CCK_RATE_1MB;
+
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+ pmlmeext->sitesurvey_res.channel_idx = 0;
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->scan_abort = false;
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ pmlmeinfo->reauth_count = 0;
+ pmlmeinfo->reassoc_count = 0;
+ pmlmeinfo->link_count = 0;
+ pmlmeinfo->auth_seq = 0;
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
+ pmlmeinfo->key_index = 0;
+ pmlmeinfo->iv = 0;
+
+ pmlmeinfo->enc_algo = _NO_PRIVACY_;
+ pmlmeinfo->authModeToggle = 0;
+
+ _rtw_memset(pmlmeinfo->chg_txt, 0, 128);
+
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ pmlmeinfo->preamble_mode = PREAMBLE_AUTO;
+
+ pmlmeinfo->dialogToken = 0;
+
+ pmlmeext->action_public_rxseq = 0xffff;
+ pmlmeext->action_public_dialog_token = 0xff;
+}
+
+static int has_channel(struct rt_channel_info *channel_set,
+ u8 chanset_size,
+ u8 chan) {
+ int i;
+
+ for (i = 0; i < chanset_size; i++) {
+ if (channel_set[i].ChannelNum == chan)
+ return 1;
+ }
+ return 0;
+}
+
+static void init_channel_list(struct adapter *padapter, struct rt_channel_info *channel_set,
+ u8 chanset_size,
+ struct p2p_channels *channel_list) {
+ struct p2p_oper_class_map op_class[] = {
+ { IEEE80211G, 81, 1, 13, 1, BW20 },
+ { IEEE80211G, 82, 14, 14, 1, BW20 },
+ { -1, 0, 0, 0, 0, BW20 }
+ };
+
+ int cla, op;
+
+ cla = 0;
+
+ for (op = 0; op_class[op].op_class; op++) {
+ u8 ch;
+ struct p2p_oper_class_map *o = &op_class[op];
+ struct p2p_reg_class *reg = NULL;
+
+ for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
+ if (!has_channel(channel_set, chanset_size, ch)) {
+ continue;
+ }
+
+ if ((0 == padapter->registrypriv.ht_enable) && (8 == o->inc))
+ continue;
+
+ if ((0 == (padapter->registrypriv.cbw40_enable & BIT(1))) &&
+ ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ continue;
+
+ if (reg == NULL) {
+ reg = &channel_list->reg_class[cla];
+ cla++;
+ reg->reg_class = o->op_class;
+ reg->channels = 0;
+ }
+ reg->channel[reg->channels] = ch;
+ reg->channels++;
+ }
+ }
+ channel_list->reg_classes = cla;
+}
+
+static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_channel_info *channel_set)
+{
+ u8 index, chanset_size = 0;
+ u8 b2_4GBand = false;
+ u8 Index2G = 0;
+
+ _rtw_memset(channel_set, 0, sizeof(struct rt_channel_info) * MAX_CHANNEL_NUM);
+
+ if (ChannelPlan >= RT_CHANNEL_DOMAIN_MAX && ChannelPlan != RT_CHANNEL_DOMAIN_REALTEK_DEFINE) {
+ DBG_88E("ChannelPlan ID %x error !!!!!\n", ChannelPlan);
+ return chanset_size;
+ }
+
+ if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
+ b2_4GBand = true;
+ if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
+ else
+ Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
+ }
+
+ if (b2_4GBand) {
+ for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
+ channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
+
+ if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
+ (RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G == ChannelPlan)) {
+ if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+ else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
+ channel_set[chanset_size].ScanType = SCAN_PASSIVE;
+ } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
+ RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) {/* channel 12~13, passive scan */
+ if (channel_set[chanset_size].ChannelNum <= 11)
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+ else
+ channel_set[chanset_size].ScanType = SCAN_PASSIVE;
+ } else {
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+ }
+
+ chanset_size++;
+ }
+ }
+ return chanset_size;
+}
+
+int init_mlme_ext_priv(struct adapter *padapter)
+{
+ int res = _SUCCESS;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pmlmeext->padapter = padapter;
+
+ init_mlme_ext_priv_value(padapter);
+ pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
+
+ init_mlme_ext_timer(padapter);
+
+#ifdef CONFIG_88EU_AP_MODE
+ init_mlme_ap_info(padapter);
+#endif
+
+ pmlmeext->max_chan_nums = init_channel_set(padapter, pmlmepriv->ChannelPlan, pmlmeext->channel_set);
+ init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
+
+ pmlmeext->chan_scan_time = SURVEY_TO;
+ pmlmeext->mlmeext_init = true;
+
+
+ pmlmeext->active_keep_alive_check = true;
+
+ return res;
+}
+
+void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
+{
+ struct adapter *padapter = pmlmeext->padapter;
+
+ if (!padapter)
+ return;
+
+ if (padapter->bDriverStopped) {
+ _cancel_timer_ex(&pmlmeext->survey_timer);
+ _cancel_timer_ex(&pmlmeext->link_timer);
+ /* _cancel_timer_ex(&pmlmeext->ADDBA_timer); */
+ }
+}
+
+static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, union recv_frame *precv_frame)
+{
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+
+ if (ptable->func) {
+ /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
+ if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ !_rtw_memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ return;
+ ptable->func(padapter, precv_frame);
+ }
+}
+
+void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ int index;
+ struct mlme_handler *ptable;
+#ifdef CONFIG_88EU_AP_MODE
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+#endif /* CONFIG_88EU_AP_MODE */
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("+mgt_dispatcher: type(0x%x) subtype(0x%x)\n",
+ GetFrameType(pframe), GetFrameSubType(pframe)));
+
+ if (GetFrameType(pframe) != WIFI_MGT_TYPE) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("mgt_dispatcher: type(0x%x) error!\n", GetFrameType(pframe)));
+ return;
+ }
+
+ /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
+ if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ !_rtw_memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ return;
+
+ ptable = mlme_sta_tbl;
+
+ index = GetFrameSubType(pframe) >> 4;
+
+ if (index > 13) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Currently we do not support reserved sub-fr-type=%d\n", index));
+ return;
+ }
+ ptable += index;
+
+ if (psta != NULL) {
+ if (GetRetry(pframe)) {
+ if (precv_frame->u.hdr.attrib.seq_num == psta->RxMgmtFrameSeqNum) {
+ /* drop the duplicate management frame */
+ DBG_88E("Drop duplicate management frame with seq_num=%d.\n", precv_frame->u.hdr.attrib.seq_num);
+ return;
+ }
+ }
+ psta->RxMgmtFrameSeqNum = precv_frame->u.hdr.attrib.seq_num;
+ }
+
+#ifdef CONFIG_88EU_AP_MODE
+ switch (GetFrameSubType(pframe)) {
+ case WIFI_AUTH:
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ ptable->func = &OnAuth;
+ else
+ ptable->func = &OnAuthClient;
+ /* fall through */
+ case WIFI_ASSOCREQ:
+ case WIFI_REASSOCREQ:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ case WIFI_PROBEREQ:
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ else
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ case WIFI_BEACON:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ case WIFI_ACTION:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ default:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ rtw_hostapd_mlme_rx(padapter, precv_frame);
+ break;
+ }
+#else
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+#endif
+}
+
+#ifdef CONFIG_88EU_P2P
+static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da)
+{
+ bool response = true;
+
+ /* do nothing if the device name is empty */
+ if (!padapter->wdinfo.device_name_len)
+ response = false;
+
+ if (response)
+ issue_probersp_p2p(padapter, da);
+
+ return _SUCCESS;
+}
+#endif /* CONFIG_88EU_P2P */
+
+
+/****************************************************************************
+
+Following are the callback functions for each subtype of the management frames
+
+*****************************************************************************/
+
+unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int ielen;
+ unsigned char *p;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+ u8 is_valid_p2p_probereq = false;
+
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 wifi_test_chk_rate = 1;
+
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE) &&
+ !rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN)) {
+ /* mcs_rate = 0 -> CCK 1M rate */
+ /* mcs_rate = 1 -> CCK 2M rate */
+ /* mcs_rate = 2 -> CCK 5.5M rate */
+ /* mcs_rate = 3 -> CCK 11M rate */
+ /* In the P2P mode, the driver should not support the CCK rate */
+
+ /* Commented by Kurt 2012/10/16 */
+ /* IOT issue: Google Nexus7 use 1M rate to send p2p_probe_req after GO nego completed and Nexus7 is client */
+ if (wifi_test_chk_rate == 1) {
+ is_valid_p2p_probereq = process_probe_req_p2p_ie(pwdinfo, pframe, len);
+ if (is_valid_p2p_probereq) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+ /* FIXME */
+ report_survey_event(padapter, precv_frame);
+ p2p_listen_state_process(padapter, get_sa(pframe));
+
+ return _SUCCESS;
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ goto _continue;
+ }
+ }
+ }
+
+_continue:
+#endif /* CONFIG_88EU_P2P */
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ return _SUCCESS;
+
+ if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
+ !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE))
+ return _SUCCESS;
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ielen,
+ len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
+
+ /* check (wildcard) SSID */
+ if (p != NULL) {
+ if (is_valid_p2p_probereq)
+ goto _issue_probersp;
+
+ if ((ielen != 0 && !_rtw_memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength)) ||
+ (ielen == 0 && pmlmeinfo->hidden_ssid_mode))
+ return _SUCCESS;
+
+_issue_probersp:
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) &&
+ pmlmepriv->cur_network.join_res)
+ issue_probersp(padapter, get_sa(pframe), is_valid_p2p_probereq);
+ }
+ return _SUCCESS;
+}
+
+unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+#endif
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
+ if (pwdinfo->tx_prov_disc_info.benable) {
+ if (_rtw_memcmp(pwdinfo->tx_prov_disc_info.peerIFAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ pwdinfo->tx_prov_disc_info.benable = false;
+ issue_p2p_provision_request(padapter,
+ pwdinfo->tx_prov_disc_info.ssid.Ssid,
+ pwdinfo->tx_prov_disc_info.ssid.SsidLength,
+ pwdinfo->tx_prov_disc_info.peerDevAddr);
+ } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ pwdinfo->tx_prov_disc_info.benable = false;
+ issue_p2p_provision_request(padapter, NULL, 0,
+ pwdinfo->tx_prov_disc_info.peerDevAddr);
+ }
+ }
+ }
+ return _SUCCESS;
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
+ if (pwdinfo->nego_req_info.benable) {
+ DBG_88E("[%s] P2P State is GONEGO ING!\n", __func__);
+ if (_rtw_memcmp(pwdinfo->nego_req_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ pwdinfo->nego_req_info.benable = false;
+ issue_p2p_GO_request(padapter, pwdinfo->nego_req_info.peerDevAddr);
+ }
+ }
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ)) {
+ if (pwdinfo->invitereq_info.benable) {
+ DBG_88E("[%s] P2P_STATE_TX_INVITE_REQ!\n", __func__);
+ if (_rtw_memcmp(pwdinfo->invitereq_info.peer_macaddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ pwdinfo->invitereq_info.benable = false;
+ issue_p2p_invitation_request(padapter, pwdinfo->invitereq_info.peer_macaddr);
+ }
+ }
+ }
+#endif
+
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ report_survey_event(padapter, precv_frame);
+ return _SUCCESS;
+ }
+
+ return _SUCCESS;
+}
+
+unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ int cam_idx;
+ struct sta_info *psta;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+ struct wlan_bssid_ex *pbss;
+ int ret = _SUCCESS;
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ report_survey_event(padapter, precv_frame);
+ return _SUCCESS;
+ }
+
+ if (_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
+ if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
+ /* we should update current network before auth, or some IE is wrong */
+ pbss = (struct wlan_bssid_ex *)rtw_malloc(sizeof(struct wlan_bssid_ex));
+ if (pbss) {
+ if (collect_bss_info(padapter, precv_frame, pbss) == _SUCCESS) {
+ update_network(&(pmlmepriv->cur_network.network), pbss, padapter, true);
+ rtw_get_bcn_info(&(pmlmepriv->cur_network));
+ }
+ kfree(pbss);
+ }
+
+ /* check the vendor of the assoc AP */
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe+sizeof(struct rtw_ieee80211_hdr_3addr), len-sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ /* update TSF Value */
+ update_TSF(pmlmeext, pframe, len);
+
+ /* start auth */
+ start_clnt_auth(padapter);
+
+ return _SUCCESS;
+ }
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta != NULL) {
+ ret = rtw_check_bcn_info(padapter, pframe, len);
+ if (!ret) {
+ DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress , 65535);
+ return _SUCCESS;
+ }
+ /* update WMM, ERP in the beacon */
+ /* todo: the timer is used instead of the number of the beacon received */
+ if ((sta_rx_pkts(psta) & 0xf) == 0)
+ update_beacon_info(padapter, pframe, len, psta);
+ process_p2p_ps_ie(padapter, (pframe + WLAN_HDR_A3_LEN), (len - WLAN_HDR_A3_LEN));
+ }
+ } else if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta != NULL) {
+ /* update WMM, ERP in the beacon */
+ /* todo: the timer is used instead of the number of the beacon received */
+ if ((sta_rx_pkts(psta) & 0xf) == 0)
+ update_beacon_info(padapter, pframe, len, psta);
+ } else {
+ /* allocate a new CAM entry for IBSS station */
+ cam_idx = allocate_fw_sta_entry(padapter);
+ if (cam_idx == NUM_STA)
+ goto _END_ONBEACON_;
+
+ /* get supported rate */
+ if (update_sta_support_rate(padapter, (pframe + WLAN_HDR_A3_LEN + _BEACON_IE_OFFSET_), (len - WLAN_HDR_A3_LEN - _BEACON_IE_OFFSET_), cam_idx) == _FAIL) {
+ pmlmeinfo->FW_sta_info[cam_idx].status = 0;
+ goto _END_ONBEACON_;
+ }
+
+ /* update TSF Value */
+ update_TSF(pmlmeext, pframe, len);
+
+ /* report sta add event */
+ report_add_sta_event(padapter, GetAddr2Ptr(pframe), cam_idx);
+ }
+ }
+ }
+
+_END_ONBEACON_:
+
+ return _SUCCESS;
+}
+
+unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned long irqL;
+ unsigned int auth_mode, ie_len;
+ u16 seq;
+ unsigned char *sa, *p;
+ u16 algorithm;
+ int status;
+ static struct sta_info stat;
+ struct sta_info *pstat = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return _FAIL;
+
+ DBG_88E("+OnAuth\n");
+
+ sa = GetAddr2Ptr(pframe);
+
+ auth_mode = psecuritypriv->dot11AuthAlgrthm;
+ seq = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + 2));
+ algorithm = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN));
+
+ DBG_88E("auth alg=%x, seq=%X\n", algorithm, seq);
+
+ if (auth_mode == 2 && psecuritypriv->dot11PrivacyAlgrthm != _WEP40_ &&
+ psecuritypriv->dot11PrivacyAlgrthm != _WEP104_)
+ auth_mode = 0;
+
+ if ((algorithm > 0 && auth_mode == 0) || /* rx a shared-key auth but shared not enabled */
+ (algorithm == 0 && auth_mode == 1)) { /* rx a open-system auth but shared-key is enabled */
+ DBG_88E("auth rejected due to bad alg [alg=%d, auth_mib=%d] %02X%02X%02X%02X%02X%02X\n",
+ algorithm, auth_mode, sa[0], sa[1], sa[2], sa[3], sa[4], sa[5]);
+
+ status = _STATS_NO_SUPP_ALG_;
+
+ goto auth_fail;
+ }
+
+ if (!rtw_access_ctrl(padapter, sa)) {
+ status = _STATS_UNABLE_HANDLE_STA_;
+ goto auth_fail;
+ }
+
+ pstat = rtw_get_stainfo(pstapriv, sa);
+ if (pstat == NULL) {
+ /* allocate a new one */
+ DBG_88E("going to alloc stainfo for sa=%pM\n", sa);
+ pstat = rtw_alloc_stainfo(pstapriv, sa);
+ if (pstat == NULL) {
+ DBG_88E(" Exceed the upper limit of supported clients...\n");
+ status = _STATS_UNABLE_HANDLE_STA_;
+ goto auth_fail;
+ }
+
+ pstat->state = WIFI_FW_AUTH_NULL;
+ pstat->auth_seq = 0;
+ } else {
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (!rtw_is_list_empty(&pstat->asoc_list)) {
+ rtw_list_delete(&pstat->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ if (seq == 1) {
+ /* TODO: STA re_auth and auth timeout */
+ }
+ }
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ if (rtw_is_list_empty(&pstat->auth_list)) {
+ rtw_list_insert_tail(&pstat->auth_list, &pstapriv->auth_list);
+ pstapriv->auth_list_cnt++;
+ }
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ if (pstat->auth_seq == 0)
+ pstat->expire_to = pstapriv->auth_to;
+
+ if ((pstat->auth_seq + 1) != seq) {
+ DBG_88E("(1)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = _STATS_OUT_OF_AUTH_SEQ_;
+ goto auth_fail;
+ }
+
+ if (algorithm == 0 && (auth_mode == 0 || auth_mode == 2)) {
+ if (seq == 1) {
+ pstat->state &= ~WIFI_FW_AUTH_NULL;
+ pstat->state |= WIFI_FW_AUTH_SUCCESS;
+ pstat->expire_to = pstapriv->assoc_to;
+ pstat->authalg = algorithm;
+ } else {
+ DBG_88E("(2)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = _STATS_OUT_OF_AUTH_SEQ_;
+ goto auth_fail;
+ }
+ } else { /* shared system or auto authentication */
+ if (seq == 1) {
+ /* prepare for the challenging txt... */
+
+ pstat->state &= ~WIFI_FW_AUTH_NULL;
+ pstat->state |= WIFI_FW_AUTH_STATE;
+ pstat->authalg = algorithm;
+ pstat->auth_seq = 2;
+ } else if (seq == 3) {
+ /* checking for challenging txt... */
+ DBG_88E("checking for challenging txt...\n");
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_ , _CHLGETXT_IE_, (int *)&ie_len,
+ len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
+
+ if ((p == NULL) || (ie_len <= 0)) {
+ DBG_88E("auth rejected because challenge failure!(1)\n");
+ status = _STATS_CHALLENGE_FAIL_;
+ goto auth_fail;
+ }
+
+ if (_rtw_memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
+ pstat->state &= (~WIFI_FW_AUTH_STATE);
+ pstat->state |= WIFI_FW_AUTH_SUCCESS;
+ /* challenging txt is correct... */
+ pstat->expire_to = pstapriv->assoc_to;
+ } else {
+ DBG_88E("auth rejected because challenge failure!\n");
+ status = _STATS_CHALLENGE_FAIL_;
+ goto auth_fail;
+ }
+ } else {
+ DBG_88E("(3)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = _STATS_OUT_OF_AUTH_SEQ_;
+ goto auth_fail;
+ }
+ }
+
+ /* Now, we are going to issue_auth... */
+ pstat->auth_seq = seq + 1;
+
+#ifdef CONFIG_88EU_AP_MODE
+ issue_auth(padapter, pstat, (unsigned short)(_STATS_SUCCESSFUL_));
+#endif
+
+ if (pstat->state & WIFI_FW_AUTH_SUCCESS)
+ pstat->auth_seq = 0;
+
+ return _SUCCESS;
+
+auth_fail:
+
+ if (pstat)
+ rtw_free_stainfo(padapter , pstat);
+
+ pstat = &stat;
+ _rtw_memset((char *)pstat, '\0', sizeof(stat));
+ pstat->auth_seq = 2;
+ memcpy(pstat->hwaddr, sa, 6);
+
+#ifdef CONFIG_88EU_AP_MODE
+ issue_auth(padapter, pstat, (unsigned short)status);
+#endif
+
+#endif
+ return _FAIL;
+}
+
+unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int seq, len, status, offset;
+ unsigned char *p;
+ unsigned int go2asoc = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint pkt_len = precv_frame->u.hdr.len;
+
+ DBG_88E("%s\n", __func__);
+
+ /* check A1 matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ return _SUCCESS;
+
+ if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE))
+ return _SUCCESS;
+
+ offset = (GetPrivacy(pframe)) ? 4 : 0;
+
+ seq = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + offset + 2));
+ status = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + offset + 4));
+
+ if (status != 0) {
+ DBG_88E("clnt auth fail, status: %d\n", status);
+ if (status == 13) { /* pmlmeinfo->auth_algo == dot11AuthAlgrthm_Auto) */
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
+ else
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared;
+ }
+
+ set_link_timer(pmlmeext, 1);
+ goto authclnt_fail;
+ }
+
+ if (seq == 2) {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
+ /* legendary shared system */
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&len,
+ pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
+
+ if (p == NULL)
+ goto authclnt_fail;
+
+ memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
+ pmlmeinfo->auth_seq = 3;
+ issue_auth(padapter, NULL, 0);
+ set_link_timer(pmlmeext, REAUTH_TO);
+
+ return _SUCCESS;
+ } else {
+ /* open system */
+ go2asoc = 1;
+ }
+ } else if (seq == 4) {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
+ go2asoc = 1;
+ else
+ goto authclnt_fail;
+ } else {
+ /* this is also illegal */
+ goto authclnt_fail;
+ }
+
+ if (go2asoc) {
+ DBG_88E_LEVEL(_drv_info_, "auth success, start assoc\n");
+ start_clnt_assoc(padapter);
+ return _SUCCESS;
+ }
+authclnt_fail:
+ return _FAIL;
+}
+
+unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned long irqL;
+ u16 capab_info;
+ struct rtw_ieee802_11_elems elems;
+ struct sta_info *pstat;
+ unsigned char reassoc, *p, *pos, *wpa_ie;
+ unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
+ int i, ie_len, wpa_ie_len, left;
+ unsigned char supportRate[16];
+ int supportRateNum;
+ unsigned short status = _STATS_SUCCESSFUL_;
+ unsigned short frame_type, ie_offset = 0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint pkt_len = precv_frame->u.hdr.len;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 p2p_status_code = P2P_STATUS_SUCCESS;
+ u8 *p2pie;
+ u32 p2pielen = 0;
+#endif /* CONFIG_88EU_P2P */
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return _FAIL;
+
+ frame_type = GetFrameSubType(pframe);
+ if (frame_type == WIFI_ASSOCREQ) {
+ reassoc = 0;
+ ie_offset = _ASOCREQ_IE_OFFSET_;
+ } else { /* WIFI_REASSOCREQ */
+ reassoc = 1;
+ ie_offset = _REASOCREQ_IE_OFFSET_;
+ }
+
+
+ if (pkt_len < IEEE80211_3ADDR_LEN + ie_offset) {
+ DBG_88E("handle_assoc(reassoc=%d) - too short payload (len=%lu)"
+ "\n", reassoc, (unsigned long)pkt_len);
+ return _FAIL;
+ }
+
+ pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (pstat == (struct sta_info *)NULL) {
+ status = _RSON_CLS2_;
+ goto asoc_class2_error;
+ }
+
+ capab_info = RTW_GET_LE16(pframe + WLAN_HDR_A3_LEN);
+
+ left = pkt_len - (IEEE80211_3ADDR_LEN + ie_offset);
+ pos = pframe + (IEEE80211_3ADDR_LEN + ie_offset);
+
+
+ DBG_88E("%s\n", __func__);
+
+ /* check if this stat has been successfully authenticated/assocated */
+ if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
+ if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
+ status = _RSON_CLS2_;
+ goto asoc_class2_error;
+ } else {
+ pstat->state &= (~WIFI_FW_ASSOC_SUCCESS);
+ pstat->state |= WIFI_FW_ASSOC_STATE;
+ }
+ } else {
+ pstat->state &= (~WIFI_FW_AUTH_SUCCESS);
+ pstat->state |= WIFI_FW_ASSOC_STATE;
+ }
+ pstat->capability = capab_info;
+ /* now parse all ieee802_11 ie to point to elems */
+ if (rtw_ieee802_11_parse_elems(pos, left, &elems, 1) == ParseFailed ||
+ !elems.ssid) {
+ DBG_88E("STA %pM sent invalid association request\n",
+ pstat->hwaddr);
+ status = _STATS_FAILURE_;
+ goto OnAssocReqFail;
+ }
+
+
+ /* now we should check all the fields... */
+ /* checking SSID */
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SSID_IE_, &ie_len,
+ pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p == NULL)
+ status = _STATS_FAILURE_;
+
+ if (ie_len == 0) { /* broadcast ssid, however it is not allowed in assocreq */
+ status = _STATS_FAILURE_;
+ } else {
+ /* check if ssid match */
+ if (!_rtw_memcmp((void *)(p+2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
+ status = _STATS_FAILURE_;
+
+ if (ie_len != cur->Ssid.SsidLength)
+ status = _STATS_FAILURE_;
+ }
+
+ if (_STATS_SUCCESSFUL_ != status)
+ goto OnAssocReqFail;
+
+ /* check if the supported rate is ok */
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p == NULL) {
+ DBG_88E("Rx a sta assoc-req which supported rate is empty!\n");
+ /* use our own rate set as statoin used */
+ /* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
+ /* supportRateNum = AP_BSSRATE_LEN; */
+
+ status = _STATS_FAILURE_;
+ goto OnAssocReqFail;
+ } else {
+ memcpy(supportRate, p+2, ie_len);
+ supportRateNum = ie_len;
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_ , &ie_len,
+ pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p != NULL) {
+ if (supportRateNum <= sizeof(supportRate)) {
+ memcpy(supportRate+supportRateNum, p+2, ie_len);
+ supportRateNum += ie_len;
+ }
+ }
+ }
+
+ /* todo: mask supportRate between AP & STA -> move to update raid */
+ /* get_matched_rate(pmlmeext, supportRate, &supportRateNum, 0); */
+
+ /* update station supportRate */
+ pstat->bssratelen = supportRateNum;
+ memcpy(pstat->bssrateset, supportRate, supportRateNum);
+ UpdateBrateTblForSoftAP(pstat->bssrateset, pstat->bssratelen);
+
+ /* check RSN/WPA/WPS */
+ pstat->dot8021xalg = 0;
+ pstat->wpa_psk = 0;
+ pstat->wpa_group_cipher = 0;
+ pstat->wpa2_group_cipher = 0;
+ pstat->wpa_pairwise_cipher = 0;
+ pstat->wpa2_pairwise_cipher = 0;
+ _rtw_memset(pstat->wpa_ie, 0, sizeof(pstat->wpa_ie));
+ if ((psecuritypriv->wpa_psk & BIT(1)) && elems.rsn_ie) {
+ int group_cipher = 0, pairwise_cipher = 0;
+
+ wpa_ie = elems.rsn_ie;
+ wpa_ie_len = elems.rsn_ie_len;
+
+ if (rtw_parse_wpa2_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ pstat->dot8021xalg = 1;/* psk, todo:802.1x */
+ pstat->wpa_psk |= BIT(1);
+
+ pstat->wpa2_group_cipher = group_cipher&psecuritypriv->wpa2_group_cipher;
+ pstat->wpa2_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa2_pairwise_cipher;
+
+ if (!pstat->wpa2_group_cipher)
+ status = WLAN_STATUS_GROUP_CIPHER_NOT_VALID;
+
+ if (!pstat->wpa2_pairwise_cipher)
+ status = WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID;
+ } else {
+ status = WLAN_STATUS_INVALID_IE;
+ }
+ } else if ((psecuritypriv->wpa_psk & BIT(0)) && elems.wpa_ie) {
+ int group_cipher = 0, pairwise_cipher = 0;
+
+ wpa_ie = elems.wpa_ie;
+ wpa_ie_len = elems.wpa_ie_len;
+
+ if (rtw_parse_wpa_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ pstat->dot8021xalg = 1;/* psk, todo:802.1x */
+ pstat->wpa_psk |= BIT(0);
+
+ pstat->wpa_group_cipher = group_cipher&psecuritypriv->wpa_group_cipher;
+ pstat->wpa_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa_pairwise_cipher;
+
+ if (!pstat->wpa_group_cipher)
+ status = WLAN_STATUS_GROUP_CIPHER_NOT_VALID;
+
+ if (!pstat->wpa_pairwise_cipher)
+ status = WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID;
+ } else {
+ status = WLAN_STATUS_INVALID_IE;
+ }
+ } else {
+ wpa_ie = NULL;
+ wpa_ie_len = 0;
+ }
+
+ if (_STATS_SUCCESSFUL_ != status)
+ goto OnAssocReqFail;
+
+ pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
+ if (wpa_ie == NULL) {
+ if (elems.wps_ie) {
+ DBG_88E("STA included WPS IE in "
+ "(Re)Association Request - assume WPS is "
+ "used\n");
+ pstat->flags |= WLAN_STA_WPS;
+ /* wpabuf_free(sta->wps_ie); */
+ /* sta->wps_ie = wpabuf_alloc_copy(elems.wps_ie + 4, */
+ /* elems.wps_ie_len - 4); */
+ } else {
+ DBG_88E("STA did not include WPA/RSN IE "
+ "in (Re)Association Request - possible WPS "
+ "use\n");
+ pstat->flags |= WLAN_STA_MAYBE_WPS;
+ }
+
+
+ /* AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
+ /* that the selected registrar of AP is _FLASE */
+ if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS|WLAN_STA_MAYBE_WPS))) {
+ if (pmlmepriv->wps_beacon_ie) {
+ u8 selected_registrar = 0;
+
+ rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR , &selected_registrar, NULL);
+
+ if (!selected_registrar) {
+ DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n");
+
+ status = _STATS_UNABLE_HANDLE_STA_;
+
+ goto OnAssocReqFail;
+ }
+ }
+ }
+ } else {
+ int copy_len;
+
+ if (psecuritypriv->wpa_psk == 0) {
+ DBG_88E("STA %pM: WPA/RSN IE in association "
+ "request, but AP don't support WPA/RSN\n", pstat->hwaddr);
+
+ status = WLAN_STATUS_INVALID_IE;
+
+ goto OnAssocReqFail;
+ }
+
+ if (elems.wps_ie) {
+ DBG_88E("STA included WPS IE in "
+ "(Re)Association Request - WPS is "
+ "used\n");
+ pstat->flags |= WLAN_STA_WPS;
+ copy_len = 0;
+ } else {
+ copy_len = ((wpa_ie_len+2) > sizeof(pstat->wpa_ie)) ? (sizeof(pstat->wpa_ie)) : (wpa_ie_len+2);
+ }
+ if (copy_len > 0)
+ memcpy(pstat->wpa_ie, wpa_ie-2, copy_len);
+ }
+ /* check if there is WMM IE & support WWM-PS */
+ pstat->flags &= ~WLAN_STA_WME;
+ pstat->qos_option = 0;
+ pstat->qos_info = 0;
+ pstat->has_legacy_ac = true;
+ pstat->uapsd_vo = 0;
+ pstat->uapsd_vi = 0;
+ pstat->uapsd_be = 0;
+ pstat->uapsd_bk = 0;
+ if (pmlmepriv->qospriv.qos_option) {
+ p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
+ for (;;) {
+ p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p != NULL) {
+ if (_rtw_memcmp(p+2, WMM_IE, 6)) {
+ pstat->flags |= WLAN_STA_WME;
+
+ pstat->qos_option = 1;
+ pstat->qos_info = *(p+8);
+
+ pstat->max_sp_len = (pstat->qos_info>>5)&0x3;
+
+ if ((pstat->qos_info&0xf) != 0xf)
+ pstat->has_legacy_ac = true;
+ else
+ pstat->has_legacy_ac = false;
+
+ if (pstat->qos_info&0xf) {
+ if (pstat->qos_info&BIT(0))
+ pstat->uapsd_vo = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_vo = 0;
+
+ if (pstat->qos_info&BIT(1))
+ pstat->uapsd_vi = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_vi = 0;
+
+ if (pstat->qos_info&BIT(2))
+ pstat->uapsd_bk = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_bk = 0;
+
+ if (pstat->qos_info&BIT(3))
+ pstat->uapsd_be = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_be = 0;
+ }
+ break;
+ }
+ } else {
+ break;
+ }
+ p = p + ie_len + 2;
+ }
+ }
+
+ /* save HT capabilities in the sta object */
+ _rtw_memset(&pstat->htpriv.ht_cap, 0, sizeof(struct rtw_ieee80211_ht_cap));
+ if (elems.ht_capabilities && elems.ht_capabilities_len >= sizeof(struct rtw_ieee80211_ht_cap)) {
+ pstat->flags |= WLAN_STA_HT;
+
+ pstat->flags |= WLAN_STA_WME;
+
+ memcpy(&pstat->htpriv.ht_cap, elems.ht_capabilities, sizeof(struct rtw_ieee80211_ht_cap));
+ } else {
+ pstat->flags &= ~WLAN_STA_HT;
+ }
+ if ((!pmlmepriv->htpriv.ht_option) && (pstat->flags&WLAN_STA_HT)) {
+ status = _STATS_FAILURE_;
+ goto OnAssocReqFail;
+ }
+
+ if ((pstat->flags & WLAN_STA_HT) &&
+ ((pstat->wpa2_pairwise_cipher&WPA_CIPHER_TKIP) ||
+ (pstat->wpa_pairwise_cipher&WPA_CIPHER_TKIP))) {
+ DBG_88E("HT: %pM tried to "
+ "use TKIP with HT association\n", pstat->hwaddr);
+
+ /* status = WLAN_STATUS_CIPHER_REJECTED_PER_POLICY; */
+ /* goto OnAssocReqFail; */
+ }
+
+ pstat->flags |= WLAN_STA_NONERP;
+ for (i = 0; i < pstat->bssratelen; i++) {
+ if ((pstat->bssrateset[i] & 0x7f) > 22) {
+ pstat->flags &= ~WLAN_STA_NONERP;
+ break;
+ }
+ }
+
+ if (pstat->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ pstat->flags |= WLAN_STA_SHORT_PREAMBLE;
+ else
+ pstat->flags &= ~WLAN_STA_SHORT_PREAMBLE;
+
+
+
+ if (status != _STATS_SUCCESSFUL_)
+ goto OnAssocReqFail;
+
+#ifdef CONFIG_88EU_P2P
+ pstat->is_p2p_device = false;
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ p2pie = rtw_get_p2p_ie(pframe + WLAN_HDR_A3_LEN + ie_offset , pkt_len - WLAN_HDR_A3_LEN - ie_offset , NULL, &p2pielen);
+ if (p2pie) {
+ pstat->is_p2p_device = true;
+ p2p_status_code = (u8)process_assoc_req_p2p_ie(pwdinfo, pframe, pkt_len, pstat);
+ if (p2p_status_code > 0) {
+ pstat->p2p_status_code = p2p_status_code;
+ status = _STATS_CAP_FAIL_;
+ goto OnAssocReqFail;
+ }
+ }
+ }
+ pstat->p2p_status_code = p2p_status_code;
+#endif /* CONFIG_88EU_P2P */
+
+ /* TODO: identify_proprietary_vendor_ie(); */
+ /* Realtek proprietary IE */
+ /* identify if this is Broadcom sta */
+ /* identify if this is ralink sta */
+ /* Customer proprietary IE */
+
+ /* get a unique AID */
+ if (pstat->aid > 0) {
+ DBG_88E(" old AID %d\n", pstat->aid);
+ } else {
+ for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
+ if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
+ break;
+
+ /* if (pstat->aid > NUM_STA) { */
+ if (pstat->aid > pstapriv->max_num_sta) {
+ pstat->aid = 0;
+
+ DBG_88E(" no room for more AIDs\n");
+
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
+
+ goto OnAssocReqFail;
+ } else {
+ pstapriv->sta_aid[pstat->aid - 1] = pstat;
+ DBG_88E("allocate new AID=(%d)\n", pstat->aid);
+ }
+ }
+
+ pstat->state &= (~WIFI_FW_ASSOC_STATE);
+ pstat->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ if (!rtw_is_list_empty(&pstat->auth_list)) {
+ rtw_list_delete(&pstat->auth_list);
+ pstapriv->auth_list_cnt--;
+ }
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (rtw_is_list_empty(&pstat->asoc_list)) {
+ pstat->expire_to = pstapriv->expire_to;
+ rtw_list_insert_tail(&pstat->asoc_list, &pstapriv->asoc_list);
+ pstapriv->asoc_list_cnt++;
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ /* now the station is qualified to join our BSS... */
+ if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
+#ifdef CONFIG_88EU_AP_MODE
+ /* 1 bss_cap_update & sta_info_update */
+ bss_cap_update_on_sta_join(padapter, pstat);
+ sta_info_update(padapter, pstat);
+
+ /* issue assoc rsp before notify station join event. */
+ if (frame_type == WIFI_ASSOCREQ)
+ issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
+ else
+ issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
+
+ /* 2 - report to upper layer */
+ DBG_88E("indicate_sta_join_event to upper layer - hostapd\n");
+ rtw_indicate_sta_assoc_event(padapter, pstat);
+
+ /* 3-(1) report sta add event */
+ report_add_sta_event(padapter, pstat->hwaddr, pstat->aid);
+#endif
+ }
+
+ return _SUCCESS;
+
+asoc_class2_error:
+
+#ifdef CONFIG_88EU_AP_MODE
+ issue_deauth(padapter, (void *)GetAddr2Ptr(pframe), status);
+#endif
+
+ return _FAIL;
+
+OnAssocReqFail:
+
+
+#ifdef CONFIG_88EU_AP_MODE
+ pstat->aid = 0;
+ if (frame_type == WIFI_ASSOCREQ)
+ issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
+ else
+ issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
+#endif
+
+
+#endif /* CONFIG_88EU_AP_MODE */
+
+ return _FAIL;
+}
+
+unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ uint i;
+ int res;
+ unsigned short status;
+ struct ndis_802_11_var_ie *pIE;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ /* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint pkt_len = precv_frame->u.hdr.len;
+
+ DBG_88E("%s\n", __func__);
+
+ /* check A1 matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ return _SUCCESS;
+
+ if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
+ return _SUCCESS;
+
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
+ return _SUCCESS;
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ /* status */
+ status = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 2));
+ if (status > 0) {
+ DBG_88E("assoc reject, status code: %d\n", status);
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ res = -4;
+ goto report_assoc_result;
+ }
+
+ /* get capabilities */
+ pmlmeinfo->capability = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+
+ /* set slot time */
+ pmlmeinfo->slotTime = (pmlmeinfo->capability & BIT(10)) ? 9 : 20;
+
+ /* AID */
+ pmlmeinfo->aid = (int)(le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 4))&0x3fff);
+ res = pmlmeinfo->aid;
+
+ /* following are moved to join event callback function */
+ /* to handle HT, WMM, rate adaptive, update MAC reg */
+ /* for not to handle the synchronous IO in the tasklet */
+ for (i = (6 + WLAN_HDR_A3_LEN); i < pkt_len;) {
+ pIE = (struct ndis_802_11_var_ie *)(pframe + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if (_rtw_memcmp(pIE->data, WMM_PARA_OUI, 6)) /* WMM */
+ WMM_param_handler(padapter, pIE);
+ break;
+ case _HT_CAPABILITY_IE_: /* HT caps */
+ HT_caps_handler(padapter, pIE);
+ break;
+ case _HT_EXTRA_INFO_IE_: /* HT info */
+ HT_info_handler(padapter, pIE);
+ break;
+ case _ERPINFO_IE_:
+ ERP_IE_handler(padapter, pIE);
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ pmlmeinfo->state &= (~WIFI_FW_ASSOC_STATE);
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ /* Update Basic Rate Table for spec, 2010-12-28 , by thomas */
+ UpdateBrateTbl(padapter, pmlmeinfo->network.SupportedRates);
+
+report_assoc_result:
+ if (res > 0) {
+ rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
+ } else {
+ rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
+ }
+
+ report_join_res(padapter, res);
+
+ return _SUCCESS;
+}
+
+unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned short reason;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ /* check A3 */
+ if (!(_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ return _SUCCESS;
+
+#ifdef CONFIG_88EU_P2P
+ if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ _set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+
+ DBG_88E("%s Reason code(%d)\n", __func__, reason);
+
+#ifdef CONFIG_88EU_AP_MODE
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ unsigned long irqL;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E_LEVEL(_drv_always_, "ap recv deauth reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta) {
+ u8 updated = 0;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (!rtw_is_list_empty(&psta->asoc_list)) {
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, false, reason);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ associated_clients_update(padapter, updated);
+ }
+
+
+ return _SUCCESS;
+ } else
+#endif
+ {
+ DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe) , reason);
+ }
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+ return _SUCCESS;
+}
+
+unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u16 reason;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ /* check A3 */
+ if (!(_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ return _SUCCESS;
+
+#ifdef CONFIG_88EU_P2P
+ if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ _set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+
+ DBG_88E("%s Reason code(%d)\n", __func__, reason);
+
+#ifdef CONFIG_88EU_AP_MODE
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ unsigned long irqL;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL); */
+ /* rtw_free_stainfo(padapter, psta); */
+ /* _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL); */
+
+ DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta) {
+ u8 updated = 0;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (!rtw_is_list_empty(&psta->asoc_list)) {
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, false, reason);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ associated_clients_update(padapter, updated);
+ }
+
+ return _SUCCESS;
+ } else
+#endif
+ {
+ DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+ }
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+ return _SUCCESS;
+}
+
+unsigned int OnAtim(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ DBG_88E("%s\n", __func__);
+ return _SUCCESS;
+}
+
+unsigned int on_action_spct(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ u8 category;
+ u8 action;
+
+ DBG_88E(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+
+ if (!psta)
+ goto exit;
+
+ category = frame_body[0];
+ if (category != RTW_WLAN_CATEGORY_SPECTRUM_MGMT)
+ goto exit;
+
+ action = frame_body[1];
+ switch (action) {
+ case RTW_WLAN_ACTION_SPCT_MSR_REQ:
+ case RTW_WLAN_ACTION_SPCT_MSR_RPRT:
+ case RTW_WLAN_ACTION_SPCT_TPC_REQ:
+ case RTW_WLAN_ACTION_SPCT_TPC_RPRT:
+ break;
+ case RTW_WLAN_ACTION_SPCT_CHL_SWITCH:
+ break;
+ default:
+ break;
+ }
+
+exit:
+ return ret;
+}
+
+unsigned int OnAction_qos(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction_dls(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u8 *addr;
+ struct sta_info *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ unsigned char *frame_body;
+ unsigned char category, action;
+ unsigned short tid, status, reason_code = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ /* check RA matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ return _SUCCESS;
+
+ DBG_88E("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
+ return _SUCCESS;
+
+ addr = GetAddr2Ptr(pframe);
+ psta = rtw_get_stainfo(pstapriv, addr);
+
+ if (psta == NULL)
+ return _SUCCESS;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+ if (category == RTW_WLAN_CATEGORY_BACK) { /* representing Block Ack */
+ if (!pmlmeinfo->HT_enable)
+ return _SUCCESS;
+ action = frame_body[1];
+ DBG_88E("%s, action=%d\n", __func__, action);
+ switch (action) {
+ case RTW_WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
+ memcpy(&(pmlmeinfo->ADDBA_req), &(frame_body[2]), sizeof(struct ADDBA_request));
+ process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
+
+ if (pmlmeinfo->bAcceptAddbaReq)
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
+ else
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
+ break;
+ case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
+ status = RTW_GET_LE16(&frame_body[3]);
+ tid = ((frame_body[5] >> 2) & 0x7);
+ if (status == 0) { /* successful */
+ DBG_88E("agg_enable for TID=%d\n", tid);
+ psta->htpriv.agg_enable_bitmap |= 1 << tid;
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+ } else {
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ }
+ break;
+ case RTW_WLAN_ACTION_DELBA: /* DELBA */
+ if ((frame_body[3] & BIT(3)) == 0) {
+ psta->htpriv.agg_enable_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
+ psta->htpriv.candidate_tid_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
+ reason_code = RTW_GET_LE16(&frame_body[4]);
+ } else if ((frame_body[3] & BIT(3)) == BIT(3)) {
+ tid = (frame_body[3] >> 4) & 0x0F;
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ }
+ DBG_88E("%s(): DELBA: %x(%x)\n", __func__, pmlmeinfo->agg_enable_bitmap, reason_code);
+ /* todo: how to notify the host while receiving DELETE BA */
+ break;
+ default:
+ break;
+ }
+ }
+ return _SUCCESS;
+}
+
+#ifdef CONFIG_88EU_P2P
+
+static int get_reg_classes_full_count(struct p2p_channels *channel_list)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < channel_list->reg_classes; i++) {
+ cnt += channel_list->reg_class[i].channels;
+ }
+
+ return cnt;
+}
+
+void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_NEGO_REQ;
+ u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
+ u8 wpsielen = 0, p2pielen = 0;
+ u16 len_channellist_attr = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pwdinfo->negotiation_dialog_token = 1; /* Initialize the dialog value */
+ pframe = rtw_set_fixed_ie(pframe, 1, &pwdinfo->negotiation_dialog_token, &(pattrib->pktlen));
+
+
+
+ /* WPS Section */
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Device Password ID */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+
+ if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PEER_DISPLAY_PIN)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_USER_SPEC);
+ else if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_SELF_DISPLAY_PIN)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC);
+ else if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PBC)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_PBC);
+
+ wpsielen += 2;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110306 */
+ /* According to the P2P Specification, the group negoitation request frame should contain 9 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Group Owner Intent */
+ /* 3. Configuration Timeout */
+ /* 4. Listen Channel */
+ /* 5. Extended Listen Timing */
+ /* 6. Intended P2P Interface Address */
+ /* 7. Channel List */
+ /* 8. P2P Device Info */
+ /* 9. Operating Channel */
+
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
+ else
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
+
+ /* Group Owner Intent */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GO_INTENT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Todo the tie breaker bit. */
+ p2pie[p2pielen++] = ((pwdinfo->intent << 1) | BIT(0));
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
+
+
+ /* Listen Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_LISTEN_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->listen_channel; /* listening channel number */
+
+
+ /* Extended Listen Timing ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+
+ /* Intended P2P Interface Address */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INTENTED_IF_ADDR;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3
+ + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes)
+ + get_reg_classes_full_count(&pmlmeext->channel_list);
+
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+
+ {
+ int i, j;
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name , pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame_body, uint len, u8 result)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_NEGO_RESP;
+ u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
+ u8 p2pielen = 0;
+ uint wpsielen = 0;
+ u16 wps_devicepassword_id = 0x0000;
+ __be16 be_tmp;
+ uint wps_devicepassword_id_len = 0;
+ u16 len_channellist_attr = 0;
+
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ DBG_88E("[%s] In, result=%d\n", __func__, result);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pwdinfo->negotiation_dialog_token = frame_body[7]; /* The Dialog Token of provisioning discovery request frame. */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pwdinfo->negotiation_dialog_token), &(pattrib->pktlen));
+
+ /* Commented by Albert 20110328 */
+ /* Try to get the device password ID from the WPS IE of group negotiation request frame */
+ /* WiFi Direct test plan 5.1.15 */
+ rtw_get_wps_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, wpsie, &wpsielen);
+ rtw_get_wps_attr_content(wpsie, wpsielen, WPS_ATTR_DEVICE_PWID, (u8 *)&be_tmp, &wps_devicepassword_id_len);
+ wps_devicepassword_id = be16_to_cpu(be_tmp);
+
+ _rtw_memset(wpsie, 0x00, 255);
+ wpsielen = 0;
+
+ /* WPS Section */
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Device Password ID */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC);
+ else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_USER_SPEC);
+ else
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_PBC);
+ wpsielen += 2;
+
+ /* Commented by Kurt 20120113 */
+ /* If some device wants to do p2p handshake without sending prov_disc_req */
+ /* We have to get peer_req_cm from here. */
+ if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
+ if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
+ else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
+ else
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
+ }
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20100908 */
+ /* According to the P2P Specification, the group negoitation response frame should contain 9 P2P attributes */
+ /* 1. Status */
+ /* 2. P2P Capability */
+ /* 3. Group Owner Intent */
+ /* 4. Configuration Timeout */
+ /* 5. Operating Channel */
+ /* 6. Intended P2P Interface Address */
+ /* 7. Channel List */
+ /* 8. Device Info */
+ /* 9. Group ID (Only GO) */
+
+
+ /* ToDo: */
+
+ /* P2P Status */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_STATUS;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = result;
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ /* Commented by Albert 2011/03/08 */
+ /* According to the P2P specification */
+ /* if the sending device will be client, the P2P Capability should be reserved of group negotation response frame */
+ p2pie[p2pielen++] = 0;
+ } else {
+ /* Be group owner or meet the error case */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+ }
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported) {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
+ } else {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
+ }
+
+ /* Group Owner Intent */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GO_INTENT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ if (pwdinfo->peer_intent & 0x01) {
+ /* Peer's tie breaker bit is 1, our tie breaker bit should be 0 */
+ p2pie[p2pielen++] = (pwdinfo->intent << 1);
+ } else {
+ /* Peer's tie breaker bit is 0, our tie breaker bit should be 1 */
+ p2pie[p2pielen++] = ((pwdinfo->intent << 1) | BIT(0));
+ }
+
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
+
+ /* Intended P2P Interface Address */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INTENTED_IF_ADDR;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3
+ + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
+ + get_reg_classes_full_count(&pmlmeext->channel_list);
+
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+
+ {
+ int i, j;
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name , pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Group ID Attribute */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + pwdinfo->nego_ssidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* p2P Device Address */
+ memcpy(p2pie + p2pielen , pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* SSID */
+ memcpy(p2pie + p2pielen, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+ p2pielen += pwdinfo->nego_ssidlen;
+ }
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+ return;
+}
+
+static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_NEGO_CONF;
+ u8 p2pie[255] = { 0x00 };
+ u8 p2pielen = 0;
+
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pwdinfo->negotiation_dialog_token), &(pattrib->pktlen));
+
+
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110306 */
+ /* According to the P2P Specification, the group negoitation request frame should contain 5 P2P attributes */
+ /* 1. Status */
+ /* 2. P2P Capability */
+ /* 3. Operating Channel */
+ /* 4. Channel List */
+ /* 5. Group ID (if this WiFi is GO) */
+
+ /* P2P Status */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_STATUS;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = result;
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
+ else
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+ p2pie[p2pielen++] = pwdinfo->peer_operating_ch;
+ } else {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* Use the listen channel as the operating channel */
+ }
+
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(pwdinfo->channel_list_attr_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->channel_list_attr, pwdinfo->channel_list_attr_len);
+ p2pielen += pwdinfo->channel_list_attr_len;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Group ID Attribute */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + pwdinfo->nego_ssidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* p2P Device Address */
+ memcpy(p2pie + p2pielen , pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* SSID */
+ memcpy(p2pie + p2pielen, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+ p2pielen += pwdinfo->nego_ssidlen;
+ }
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+ pattrib->last_txcmdsz = pattrib->pktlen;
+ dump_mgntframe(padapter, pmgntframe);
+ return;
+}
+
+void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_INVIT_REQ;
+ u8 p2pie[255] = { 0x00 };
+ u8 p2pielen = 0;
+ u8 dialogToken = 3;
+ u16 len_channellist_attr = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, raddr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20101011 */
+ /* According to the P2P Specification, the P2P Invitation request frame should contain 7 P2P attributes */
+ /* 1. Configuration Timeout */
+ /* 2. Invitation Flags */
+ /* 3. Operating Channel (Only GO) */
+ /* 4. P2P Group BSSID (Should be included if I am the GO) */
+ /* 5. Channel List */
+ /* 6. P2P Group ID */
+ /* 7. P2P Device Info */
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
+
+ /* Invitation Flags */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INVITATION_FLAGS;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = P2P_INVITATION_FLAGS_PERSISTENT;
+
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->invitereq_info.operating_ch; /* operating channel number */
+
+ if (_rtw_memcmp(myid(&padapter->eeprompriv), pwdinfo->invitereq_info.go_bssid, ETH_ALEN)) {
+ /* P2P Group BSSID */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address for GO */
+ memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_bssid, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+ }
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+
+ /* Length: */
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3
+ + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
+ + get_reg_classes_full_count(&pmlmeext->channel_list);
+
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+ {
+ int i, j;
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+
+
+ /* P2P Group ID */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(6 + pwdinfo->invitereq_info.ssidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address for GO */
+ memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_bssid, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* SSID */
+ memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_ssid, pwdinfo->invitereq_info.ssidlen);
+ p2pielen += pwdinfo->invitereq_info.ssidlen;
+
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY);
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialogToken, u8 status_code)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_INVIT_RESP;
+ u8 p2pie[255] = { 0x00 };
+ u8 p2pielen = 0;
+ u16 len_channellist_attr = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, raddr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20101005 */
+ /* According to the P2P Specification, the P2P Invitation response frame should contain 5 P2P attributes */
+ /* 1. Status */
+ /* 2. Configuration Timeout */
+ /* 3. Operating Channel (Only GO) */
+ /* 4. P2P Group BSSID (Only GO) */
+ /* 5. Channel List */
+
+ /* P2P Status */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_STATUS;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ /* When status code is P2P_STATUS_FAIL_INFO_UNAVAILABLE. */
+ /* Sent the event receiving the P2P Invitation Req frame to DMP UI. */
+ /* DMP had to compare the MAC address to find out the profile. */
+ /* So, the WiFi driver will send the P2P_STATUS_FAIL_INFO_UNAVAILABLE to NB. */
+ /* If the UI found the corresponding profile, the WiFi driver sends the P2P Invitation Req */
+ /* to NB to rebuild the persistent group. */
+ p2pie[p2pielen++] = status_code;
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
+
+ if (status_code == P2P_STATUS_SUCCESS) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* The P2P Invitation request frame asks this Wi-Fi device to be the P2P GO */
+ /* In this case, the P2P Invitation response frame should carry the two more P2P attributes. */
+ /* First one is operating channel attribute. */
+ /* Second one is P2P Group BSSID attribute. */
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
+
+
+ /* P2P Group BSSID */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address for GO */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+ }
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3
+ + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
+ + get_reg_classes_full_count(&pmlmeext->channel_list);
+
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+ {
+ int i, j;
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+ }
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidlen, u8 *pdev_raddr)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u8 dialogToken = 1;
+ u8 oui_subtype = P2P_PROVISION_DISC_REQ;
+ u8 wpsie[100] = { 0x00 };
+ u8 wpsielen = 0;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u32 p2pielen = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, pdev_raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pdev_raddr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ p2pielen = build_prov_disc_request_p2p_ie(pwdinfo, pframe, pssid, ussidlen, pdev_raddr);
+
+ pframe += p2pielen;
+ pattrib->pktlen += p2pielen;
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Config Method */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->tx_prov_disc_info.wps_config_method_request);
+ wpsielen += 2;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static u8 is_matched_in_profilelist(u8 *peermacaddr, struct profile_info *profileinfo)
+{
+ u8 i, match_result = 0;
+
+ DBG_88E("[%s] peermac=%.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
+ peermacaddr[0], peermacaddr[1], peermacaddr[2], peermacaddr[3], peermacaddr[4], peermacaddr[5]);
+
+ for (i = 0; i < P2P_MAX_PERSISTENT_GROUP_NUM; i++, profileinfo++) {
+ DBG_88E("[%s] profileinfo_mac=%.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
+ profileinfo->peermac[0], profileinfo->peermac[1], profileinfo->peermac[2], profileinfo->peermac[3], profileinfo->peermac[4], profileinfo->peermac[5]);
+ if (_rtw_memcmp(peermacaddr, profileinfo->peermac, ETH_ALEN)) {
+ match_result = 1;
+ DBG_88E("[%s] Match!\n", __func__);
+ break;
+ }
+ }
+ return match_result;
+}
+
+void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ u16 beacon_interval = 100;
+ u16 capInfo = 0;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 wpsie[255] = { 0x00 };
+ u32 wpsielen = 0, p2pielen = 0;
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+
+ /* Use the device address for BSSID field. */
+ memcpy(pwlanhdr->addr3, mac, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+ memcpy(pframe, (unsigned char *)&beacon_interval, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+ /* ESS and IBSS bits must be 0 (defined in the 3.1.2.1.1 of WiFi Direct Spec) */
+ capInfo |= cap_ShortPremble;
+ capInfo |= cap_ShortSlot;
+
+ memcpy(pframe, (unsigned char *)&capInfo, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, 7, pwdinfo->p2p_wildcard_ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ /* Use the OFDM rate in the P2P probe response frame. (6(B), 9(B), 12, 18, 24, 36, 48, 54) */
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pwdinfo->support_rate, &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&pwdinfo->listen_channel, &pattrib->pktlen);
+
+ /* Todo: WPS IE */
+ /* Noted by Albert 20100907 */
+ /* According to the WPS specification, all the WPS attribute is presented by Big Endian. */
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* WiFi Simple Config State */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_SIMPLE_CONF_STATE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_WSC_STATE_NOT_CONFIG; /* Not Configured. */
+
+ /* Response Type */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_RESP_TYPE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_RESPONSE_TYPE_8021X;
+
+ /* UUID-E */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_UUID_E);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0010);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ wpsielen += 0x10;
+
+ /* Manufacturer */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MANUFACTURER);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0007);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, "Realtek", 7);
+ wpsielen += 7;
+
+ /* Model Name */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MODEL_NAME);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0006);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, "8188EU", 6);
+ wpsielen += 6;
+
+ /* Model Number */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MODEL_NUMBER);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = 0x31; /* character 1 */
+
+ /* Serial Number */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_SERIAL_NUMBER);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(ETH_ALEN);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, "123456" , ETH_ALEN);
+ wpsielen += ETH_ALEN;
+
+ /* Primary Device Type */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0008);
+ wpsielen += 2;
+
+ /* Value: */
+ /* Category ID */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ wpsielen += 2;
+
+ /* OUI */
+ *(__be32 *)(wpsie + wpsielen) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ wpsielen += 2;
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->device_name_len);
+ wpsielen += 2;
+
+ /* Value: */
+ if (pwdinfo->device_name_len) {
+ memcpy(wpsie + wpsielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ wpsielen += pwdinfo->device_name_len;
+ }
+
+ /* Config Method */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+ wpsielen += 2;
+
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+
+ p2pielen = build_probe_resp_p2p_ie(pwdinfo, pframe);
+ pframe += p2pielen;
+ pattrib->pktlen += p2pielen;
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
+ u16 wpsielen = 0, p2pielen = 0;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ if (da) {
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, da, ETH_ALEN);
+ } else {
+ if ((pwdinfo->p2p_info.scan_op_ch_only) || (pwdinfo->rx_invitereq_info.scan_op_ch_only)) {
+ /* This two flags will be set when this is only the P2P client mode. */
+ memcpy(pwlanhdr->addr1, pwdinfo->p2p_peer_interface_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->p2p_peer_interface_addr, ETH_ALEN);
+ } else {
+ /* broadcast probe request frame */
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+ }
+ }
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_PROBEREQ);
+
+ pframe += sizeof (struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ))
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pwdinfo->tx_prov_disc_info.ssid.SsidLength, pwdinfo->tx_prov_disc_info.ssid.Ssid, &(pattrib->pktlen));
+ else
+ pframe = rtw_set_ie(pframe, _SSID_IE_, P2P_WILDCARD_SSID_LEN, pwdinfo->p2p_wildcard_ssid, &(pattrib->pktlen));
+
+ /* Use the OFDM rate in the P2P probe request frame. (6(B), 9(B), 12(B), 24(B), 36, 48, 54) */
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pwdinfo->support_rate, &pattrib->pktlen);
+
+
+ /* WPS IE */
+ /* Noted by Albert 20110221 */
+ /* According to the WPS specification, all the WPS attribute is presented by Big Endian. */
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ if (pmlmepriv->wps_probe_req_ie == NULL) {
+ /* UUID-E */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_UUID_E);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0010);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ wpsielen += 0x10;
+
+ /* Config Method */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+ wpsielen += 2;
+ }
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->device_name_len);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ wpsielen += pwdinfo->device_name_len;
+
+ /* Primary Device Type */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0008);
+ wpsielen += 2;
+
+ /* Value: */
+ /* Category ID */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_CID_RTK_WIDI);
+ wpsielen += 2;
+
+ /* OUI */
+ *(__be32 *)(wpsie + wpsielen) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_SCID_RTK_DMP);
+ wpsielen += 2;
+
+ /* Device Password ID */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC); /* Registrar-specified */
+ wpsielen += 2;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110221 */
+ /* According to the P2P Specification, the probe request frame should contain 5 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. P2P Device ID if this probe request wants to find the specific P2P device */
+ /* 3. Listen Channel */
+ /* 4. Extended Listen Timing */
+ /* 5. Operating Channel if this WiFi is working as the group owner now */
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+
+ /* Listen Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_LISTEN_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->listen_channel; /* listen channel */
+
+
+ /* Extended Listen Timing */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Operating Channel (if this WiFi is working as the group owner now) */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
+ }
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ if (pmlmepriv->wps_probe_req_ie != NULL) {
+ /* WPS IE */
+ memcpy(pframe, pmlmepriv->wps_probe_req_ie, pmlmepriv->wps_probe_req_ie_len);
+ pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
+ pframe += pmlmepriv->wps_probe_req_ie_len;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("issuing probe_req, tx_len=%d\n", pattrib->last_txcmdsz));
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+inline void issue_probereq_p2p(struct adapter *adapter, u8 *da)
+{
+ _issue_probereq_p2p(adapter, da, false);
+}
+
+int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+
+ do {
+ ret = _issue_probereq_p2p(adapter, da, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(adapter), da, rtw_get_oper_ch(adapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(adapter), rtw_get_oper_ch(adapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+#endif /* CONFIG_88EU_P2P */
+
+static s32 rtw_action_public_decache(union recv_frame *recv_frame, s32 token)
+{
+ struct adapter *adapter = recv_frame->u.hdr.adapter;
+ struct mlme_ext_priv *mlmeext = &(adapter->mlmeextpriv);
+ u8 *frame = recv_frame->u.hdr.rx_data;
+ u16 seq_ctrl = ((recv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
+ (recv_frame->u.hdr.attrib.frag_num & 0xf);
+
+ if (GetRetry(frame)) {
+ if (token >= 0) {
+ if ((seq_ctrl == mlmeext->action_public_rxseq) && (token == mlmeext->action_public_dialog_token)) {
+ DBG_88E(FUNC_ADPT_FMT" seq_ctrl = 0x%x, rxseq = 0x%x, token:%d\n",
+ FUNC_ADPT_ARG(adapter), seq_ctrl, mlmeext->action_public_rxseq, token);
+ return _FAIL;
+ }
+ } else {
+ if (seq_ctrl == mlmeext->action_public_rxseq) {
+ DBG_88E(FUNC_ADPT_FMT" seq_ctrl = 0x%x, rxseq = 0x%x\n",
+ FUNC_ADPT_ARG(adapter), seq_ctrl, mlmeext->action_public_rxseq);
+ return _FAIL;
+ }
+ }
+ }
+
+ mlmeext->action_public_rxseq = seq_ctrl;
+
+ if (token >= 0)
+ mlmeext->action_public_dialog_token = token;
+
+ return _SUCCESS;
+}
+
+static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
+{
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body;
+ u8 dialogToken = 0;
+#ifdef CONFIG_88EU_P2P
+ struct adapter *padapter = precv_frame->u.hdr.adapter;
+ uint len = precv_frame->u.hdr.len;
+ u8 *p2p_ie;
+ u32 p2p_ielen;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 result = P2P_STATUS_SUCCESS;
+ u8 empty_addr[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+#endif /* CONFIG_88EU_P2P */
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[7];
+
+ if (rtw_action_public_decache(precv_frame, dialogToken) == _FAIL)
+ return _FAIL;
+
+#ifdef CONFIG_88EU_P2P
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ /* Do nothing if the driver doesn't enable the P2P function. */
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
+ return _SUCCESS;
+
+ len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ switch (frame_body[6]) { /* OUI Subtype */
+ case P2P_GO_NEGO_REQ:
+ DBG_88E("[%s] Got GO Nego Req Frame\n", __func__);
+ _rtw_memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ))
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL)) {
+ /* Commented by Albert 20110526 */
+ /* In this case, this means the previous nego fail doesn't be reset yet. */
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ /* Restore the previous p2p state */
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+ DBG_88E("[%s] Restore the previous p2p state to %d\n", __func__, rtw_p2p_state(pwdinfo));
+ }
+
+ /* Commented by Kurt 20110902 */
+ /* Add if statement to avoid receiving duplicate prov disc req. such that pre_p2p_state would be covered. */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING))
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+
+ /* Commented by Kurt 20120113 */
+ /* Get peer_dev_addr here if peer doesn't issue prov_disc frame. */
+ if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.peerDevAddr, empty_addr, ETH_ALEN))
+ memcpy(pwdinfo->rx_prov_disc_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN);
+
+ result = process_p2p_group_negotation_req(pwdinfo, frame_body, len);
+ issue_p2p_GO_response(padapter, GetAddr2Ptr(pframe), frame_body, len, result);
+
+ /* Commented by Albert 20110718 */
+ /* No matter negotiating or negotiation failure, the driver should set up the restore P2P state timer. */
+ _set_timer(&pwdinfo->restore_p2p_state_timer, 5000);
+ break;
+ case P2P_GO_NEGO_RESP:
+ DBG_88E("[%s] Got GO Nego Resp Frame\n", __func__);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
+ /* Commented by Albert 20110425 */
+ /* The restore timer is enabled when issuing the nego request frame of rtw_p2p_connect function. */
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ pwdinfo->nego_req_info.benable = false;
+ result = process_p2p_group_negotation_resp(pwdinfo, frame_body, len);
+ issue_p2p_GO_confirm(pwdinfo->padapter, GetAddr2Ptr(pframe), result);
+ if (P2P_STATUS_SUCCESS == result) {
+ if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
+ pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
+ pwdinfo->p2p_info.scan_op_ch_only = 1;
+ _set_timer(&pwdinfo->reset_ch_sitesurvey2, P2P_RESET_SCAN_CH);
+ }
+ }
+ /* Reset the dialog token for group negotiation frames. */
+ pwdinfo->negotiation_dialog_token = 1;
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
+ _set_timer(&pwdinfo->restore_p2p_state_timer, 5000);
+ } else {
+ DBG_88E("[%s] Skipped GO Nego Resp Frame (p2p_state != P2P_STATE_GONEGO_ING)\n", __func__);
+ }
+ break;
+ case P2P_GO_NEGO_CONF:
+ DBG_88E("[%s] Got GO Nego Confirm Frame\n", __func__);
+ result = process_p2p_group_negotation_confirm(pwdinfo, frame_body, len);
+ if (P2P_STATUS_SUCCESS == result) {
+ if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
+ pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
+ pwdinfo->p2p_info.scan_op_ch_only = 1;
+ _set_timer(&pwdinfo->reset_ch_sitesurvey2, P2P_RESET_SCAN_CH);
+ }
+ }
+ break;
+ case P2P_INVIT_REQ:
+ /* Added by Albert 2010/10/05 */
+ /* Received the P2P Invite Request frame. */
+
+ DBG_88E("[%s] Got invite request frame!\n", __func__);
+ p2p_ie = rtw_get_p2p_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen);
+ if (p2p_ie) {
+ /* Parse the necessary information from the P2P Invitation Request frame. */
+ /* For example: The MAC address of sending this P2P Invitation Request frame. */
+ u32 attr_contentlen = 0;
+ u8 status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ struct group_id_info group_id;
+ u8 invitation_flag = 0;
+
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_INVITATION_FLAGS, &invitation_flag, &attr_contentlen);
+ if (attr_contentlen) {
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_BSSID, pwdinfo->p2p_peer_interface_addr, &attr_contentlen);
+ /* Commented by Albert 20120510 */
+ /* Copy to the pwdinfo->p2p_peer_interface_addr. */
+ /* So that the WFD UI (or Sigma) can get the peer interface address by using the following command. */
+ /* #> iwpriv wlan0 p2p_get peer_ifa */
+ /* After having the peer interface address, the sigma can find the correct conf file for wpa_supplicant. */
+
+ if (attr_contentlen) {
+ DBG_88E("[%s] GO's BSSID = %.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
+ pwdinfo->p2p_peer_interface_addr[0], pwdinfo->p2p_peer_interface_addr[1],
+ pwdinfo->p2p_peer_interface_addr[2], pwdinfo->p2p_peer_interface_addr[3],
+ pwdinfo->p2p_peer_interface_addr[4], pwdinfo->p2p_peer_interface_addr[5]);
+ }
+
+ if (invitation_flag & P2P_INVITATION_FLAGS_PERSISTENT) {
+ /* Re-invoke the persistent group. */
+
+ _rtw_memset(&group_id, 0x00, sizeof(struct group_id_info));
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
+ if (attr_contentlen) {
+ if (_rtw_memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ /* The p2p device sending this p2p invitation request wants this Wi-Fi device to be the persistent GO. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_GO);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ status_code = P2P_STATUS_SUCCESS;
+ } else {
+ /* The p2p device sending this p2p invitation request wants to be the persistent GO. */
+ if (is_matched_in_profilelist(pwdinfo->p2p_peer_interface_addr, &pwdinfo->profileinfo[0])) {
+ u8 operatingch_info[5] = { 0x00 };
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen)) {
+ if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, (u32)operatingch_info[4])) {
+ /* The operating channel is acceptable for this device. */
+ pwdinfo->rx_invitereq_info.operation_ch[0] = operatingch_info[4];
+ pwdinfo->rx_invitereq_info.scan_op_ch_only = 1;
+ _set_timer(&pwdinfo->reset_ch_sitesurvey, P2P_RESET_SCAN_CH);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_MATCH);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ status_code = P2P_STATUS_SUCCESS;
+ } else {
+ /* The operating channel isn't supported by this device. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ status_code = P2P_STATUS_FAIL_NO_COMMON_CH;
+ _set_timer(&pwdinfo->restore_p2p_state_timer, 3000);
+ }
+ } else {
+ /* Commented by Albert 20121130 */
+ /* Intel will use the different P2P IE to store the operating channel information */
+ /* Workaround for Intel WiDi 3.5 */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_MATCH);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ status_code = P2P_STATUS_SUCCESS;
+ }
+ } else {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
+ status_code = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
+ }
+ }
+ } else {
+ DBG_88E("[%s] P2P Group ID Attribute NOT FOUND!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ } else {
+ /* Received the invitation to join a P2P group. */
+
+ _rtw_memset(&group_id, 0x00, sizeof(struct group_id_info));
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
+ if (attr_contentlen) {
+ if (_rtw_memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ /* In this case, the GO can't be myself. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ } else {
+ /* The p2p device sending this p2p invitation request wants to join an existing P2P group */
+ /* Commented by Albert 2012/06/28 */
+ /* In this case, this Wi-Fi device should use the iwpriv command to get the peer device address. */
+ /* The peer device address should be the destination address for the provisioning discovery request. */
+ /* Then, this Wi-Fi device should use the iwpriv command to get the peer interface address. */
+ /* The peer interface address should be the address for WPS mac address */
+ memcpy(pwdinfo->p2p_peer_device_addr, group_id.go_device_addr , ETH_ALEN);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_JOIN);
+ status_code = P2P_STATUS_SUCCESS;
+ }
+ } else {
+ DBG_88E("[%s] P2P Group ID Attribute NOT FOUND!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ }
+ } else {
+ DBG_88E("[%s] P2P Invitation Flags Attribute NOT FOUND!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+
+ DBG_88E("[%s] status_code = %d\n", __func__, status_code);
+
+ pwdinfo->inviteresp_info.token = frame_body[7];
+ issue_p2p_invitation_response(padapter, GetAddr2Ptr(pframe), pwdinfo->inviteresp_info.token, status_code);
+ }
+ break;
+ case P2P_INVIT_RESP: {
+ u8 attr_content = 0x00;
+ u32 attr_contentlen = 0;
+
+ DBG_88E("[%s] Got invite response frame!\n", __func__);
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ p2p_ie = rtw_get_p2p_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen);
+ if (p2p_ie) {
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
+
+ if (attr_contentlen == 1) {
+ DBG_88E("[%s] Status = %d\n", __func__, attr_content);
+ pwdinfo->invitereq_info.benable = false;
+
+ if (attr_content == P2P_STATUS_SUCCESS) {
+ if (_rtw_memcmp(pwdinfo->invitereq_info.go_bssid, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_OK);
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
+ }
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
+ }
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
+ }
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL))
+ _set_timer(&pwdinfo->restore_p2p_state_timer, 5000);
+ break;
+ }
+ case P2P_DEVDISC_REQ:
+ process_p2p_devdisc_req(pwdinfo, pframe, len);
+ break;
+ case P2P_DEVDISC_RESP:
+ process_p2p_devdisc_resp(pwdinfo, pframe, len);
+ break;
+ case P2P_PROVISION_DISC_REQ:
+ DBG_88E("[%s] Got Provisioning Discovery Request Frame\n", __func__);
+ process_p2p_provdisc_req(pwdinfo, pframe, len);
+ memcpy(pwdinfo->rx_prov_disc_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN);
+
+ /* 20110902 Kurt */
+ /* Add the following statement to avoid receiving duplicate prov disc req. such that pre_p2p_state would be covered. */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ))
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ);
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
+ break;
+ case P2P_PROVISION_DISC_RESP:
+ /* Commented by Albert 20110707 */
+ /* Should we check the pwdinfo->tx_prov_disc_info.bsent flag here?? */
+ DBG_88E("[%s] Got Provisioning Discovery Response Frame\n", __func__);
+ /* Commented by Albert 20110426 */
+ /* The restore timer is enabled when issuing the provisioing request frame in rtw_p2p_prov_disc function. */
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_RSP);
+ process_p2p_provdisc_resp(pwdinfo, pframe);
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
+ break;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ return _SUCCESS;
+}
+
+static unsigned int on_action_public_vendor(union recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ if (_rtw_memcmp(frame_body + 2, P2P_OUI, 4) == true) {
+ ret = on_action_public_p2p(precv_frame);
+ }
+
+ return ret;
+}
+
+static unsigned int on_action_public_default(union recv_frame *precv_frame, u8 action)
+{
+ unsigned int ret = _FAIL;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 token;
+
+ token = frame_body[2];
+
+ if (rtw_action_public_decache(precv_frame, token) == _FAIL)
+ goto exit;
+
+ ret = _SUCCESS;
+
+exit:
+ return ret;
+}
+
+unsigned int on_action_public(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 category, action;
+
+ /* check RA matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
+ goto exit;
+
+ category = frame_body[0];
+ if (category != RTW_WLAN_CATEGORY_PUBLIC)
+ goto exit;
+
+ action = frame_body[1];
+ switch (action) {
+ case ACT_PUBLIC_VENDOR:
+ ret = on_action_public_vendor(precv_frame);
+ break;
+ default:
+ ret = on_action_public_default(precv_frame, action);
+ break;
+ }
+
+exit:
+ return ret;
+}
+
+unsigned int OnAction_ht(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction_wmm(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction_p2p(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_P2P
+ u8 *frame_body;
+ u8 category, OUI_Subtype;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+
+ DBG_88E("%s\n", __func__);
+
+ /* check RA matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ return _SUCCESS;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+ if (category != RTW_WLAN_CATEGORY_P2P)
+ return _SUCCESS;
+
+ if (be32_to_cpu(*((__be32 *)(frame_body + 1))) != P2POUI)
+ return _SUCCESS;
+
+ len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+ OUI_Subtype = frame_body[5];
+
+ switch (OUI_Subtype) {
+ case P2P_NOTICE_OF_ABSENCE:
+ break;
+ case P2P_PRESENCE_REQUEST:
+ process_p2p_presence_req(pwdinfo, pframe, len);
+ break;
+ case P2P_PRESENCE_RESPONSE:
+ break;
+ case P2P_GO_DISC_REQUEST:
+ break;
+ default:
+ break;
+ }
+#endif /* CONFIG_88EU_P2P */
+ return _SUCCESS;
+}
+
+unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ int i;
+ unsigned char category;
+ struct action_handler *ptable;
+ unsigned char *frame_body;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+
+ for (i = 0; i < sizeof(OnAction_tbl)/sizeof(struct action_handler); i++) {
+ ptable = &OnAction_tbl[i];
+ if (category == ptable->num)
+ ptable->func(padapter, precv_frame);
+ }
+ return _SUCCESS;
+}
+
+unsigned int DoReserved(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
+{
+ struct xmit_frame *pmgntframe;
+ struct xmit_buf *pxmitbuf;
+
+ pmgntframe = rtw_alloc_xmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ DBG_88E("%s, alloc xmitframe fail\n", __func__);
+ return NULL;
+ }
+
+ pxmitbuf = rtw_alloc_xmitbuf_ext(pxmitpriv);
+ if (pxmitbuf == NULL) {
+ DBG_88E("%s, alloc xmitbuf fail\n", __func__);
+ rtw_free_xmitframe(pxmitpriv, pmgntframe);
+ return NULL;
+ }
+ pmgntframe->frame_tag = MGNT_FRAMETAG;
+ pmgntframe->pxmitbuf = pxmitbuf;
+ pmgntframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pmgntframe;
+ return pmgntframe;
+}
+
+/****************************************************************************
+
+Following are some TX fuctions for WiFi MLME
+
+*****************************************************************************/
+
+void update_mgnt_tx_rate(struct adapter *padapter, u8 rate)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ pmlmeext->tx_rate = rate;
+ DBG_88E("%s(): rate = %x\n", __func__, rate);
+}
+
+void update_mgntframe_attrib(struct adapter *padapter, struct pkt_attrib *pattrib)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ _rtw_memset((u8 *)(pattrib), 0, sizeof(struct pkt_attrib));
+
+ pattrib->hdrlen = 24;
+ pattrib->nr_frags = 1;
+ pattrib->priority = 7;
+ pattrib->mac_id = 0;
+ pattrib->qsel = 0x12;
+
+ pattrib->pktlen = 0;
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ pattrib->raid = 6;/* b mode */
+ else
+ pattrib->raid = 5;/* a/g mode */
+
+ pattrib->encrypt = _NO_PRIVACY_;
+ pattrib->bswenc = false;
+
+ pattrib->qos_en = false;
+ pattrib->ht_en = false;
+ pattrib->bwmode = HT_CHANNEL_WIDTH_20;
+ pattrib->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pattrib->sgi = false;
+
+ pattrib->seqnum = pmlmeext->mgnt_seq;
+
+ pattrib->retry_ctrl = true;
+}
+
+void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return;
+
+ rtw_hal_mgnt_xmit(padapter, pmgntframe);
+}
+
+s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntframe, int timeout_ms)
+{
+ s32 ret = _FAIL;
+ struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
+ struct submit_ctx sctx;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return ret;
+
+ rtw_sctx_init(&sctx, timeout_ms);
+ pxmitbuf->sctx = &sctx;
+
+ ret = rtw_hal_mgnt_xmit(padapter, pmgntframe);
+
+ if (ret == _SUCCESS)
+ ret = rtw_sctx_wait(&sctx);
+
+ return ret;
+}
+
+s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ s32 ret = _FAIL;
+ u32 timeout_ms = 500;/* 500ms */
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return -1;
+
+ _enter_critical_mutex(&pxmitpriv->ack_tx_mutex, NULL);
+ pxmitpriv->ack_tx = true;
+
+ pmgntframe->ack_report = 1;
+ if (rtw_hal_mgnt_xmit(padapter, pmgntframe) == _SUCCESS) {
+ ret = rtw_ack_tx_wait(pxmitpriv, timeout_ms);
+ }
+
+ pxmitpriv->ack_tx = false;
+ _exit_critical_mutex(&pxmitpriv->ack_tx_mutex, NULL);
+
+ return ret;
+}
+
+static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
+{
+ u8 *ssid_ie;
+ int ssid_len_ori;
+ int len_diff = 0;
+
+ ssid_ie = rtw_get_ie(ies, WLAN_EID_SSID, &ssid_len_ori, ies_len);
+
+ if (ssid_ie && ssid_len_ori > 0) {
+ switch (hidden_ssid_mode) {
+ case 1: {
+ u8 *next_ie = ssid_ie + 2 + ssid_len_ori;
+ u32 remain_len = 0;
+
+ remain_len = ies_len - (next_ie - ies);
+
+ ssid_ie[1] = 0;
+ memcpy(ssid_ie+2, next_ie, remain_len);
+ len_diff -= ssid_len_ori;
+
+ break;
+ }
+ case 2:
+ _rtw_memset(&ssid_ie[2], 0, ssid_len_ori);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return len_diff;
+}
+
+void issue_beacon(struct adapter *padapter, int timeout_ms)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned int rate_len;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+#if defined(CONFIG_88EU_AP_MODE)
+ unsigned long irqL;
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ DBG_88E("%s, alloc mgnt frame fail\n", __func__);
+ return;
+ }
+#if defined (CONFIG_88EU_AP_MODE)
+ _enter_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->qsel = 0x10;
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
+ /* pmlmeext->mgnt_seq++; */
+ SetFrameSubType(pframe, WIFI_BEACON);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+#ifdef CONFIG_88EU_P2P
+ /* for P2P : Primary Device Type & Device Name */
+ u32 wpsielen = 0, insert_len = 0;
+ u8 *wpsie = NULL;
+ wpsie = rtw_get_wps_ie(cur_network->IEs+_FIXED_IE_LENGTH_, cur_network->IELength-_FIXED_IE_LENGTH_, NULL, &wpsielen);
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && wpsie && wpsielen > 0) {
+ uint wps_offset, remainder_ielen;
+ u8 *premainder_ie, *pframe_wscie;
+
+ wps_offset = (uint)(wpsie - cur_network->IEs);
+ premainder_ie = wpsie + wpsielen;
+ remainder_ielen = cur_network->IELength - wps_offset - wpsielen;
+ pframe_wscie = pframe + wps_offset;
+ memcpy(pframe, cur_network->IEs, wps_offset+wpsielen);
+ pframe += (wps_offset + wpsielen);
+ pattrib->pktlen += (wps_offset + wpsielen);
+
+ /* now pframe is end of wsc ie, insert Primary Device Type & Device Name */
+ /* Primary Device Type */
+ /* Type: */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
+ insert_len += 2;
+
+ /* Length: */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(0x0008);
+ insert_len += 2;
+
+ /* Value: */
+ /* Category ID */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ insert_len += 2;
+
+ /* OUI */
+ *(__be32 *)(pframe + insert_len) = cpu_to_be32(WPSOUI);
+ insert_len += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ insert_len += 2;
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ insert_len += 2;
+
+ /* Length: */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(pwdinfo->device_name_len);
+ insert_len += 2;
+
+ /* Value: */
+ memcpy(pframe + insert_len, pwdinfo->device_name, pwdinfo->device_name_len);
+ insert_len += pwdinfo->device_name_len;
+
+ /* update wsc ie length */
+ *(pframe_wscie+1) = (wpsielen-2) + insert_len;
+
+ /* pframe move to end */
+ pframe += insert_len;
+ pattrib->pktlen += insert_len;
+
+ /* copy remainder_ie to pframe */
+ memcpy(pframe, premainder_ie, remainder_ielen);
+ pframe += remainder_ielen;
+ pattrib->pktlen += remainder_ielen;
+ } else
+#endif /* CONFIG_88EU_P2P */
+ {
+ int len_diff;
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ len_diff = update_hidden_ssid(
+ pframe+_BEACON_IE_OFFSET_
+ , cur_network->IELength-_BEACON_IE_OFFSET_
+ , pmlmeinfo->hidden_ssid_mode
+ );
+ pframe += (cur_network->IELength+len_diff);
+ pattrib->pktlen += (cur_network->IELength+len_diff);
+ }
+
+ {
+ u8 *wps_ie;
+ uint wps_ielen;
+ u8 sr = 0;
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof (struct rtw_ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
+ pattrib->pktlen-sizeof (struct rtw_ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ if (wps_ie && wps_ielen > 0)
+ rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
+ if (sr != 0)
+ set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
+ else
+ _clr_fwstate_(pmlmepriv, WIFI_UNDER_WPS);
+ }
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ u32 len;
+ len = build_beacon_p2p_ie(pwdinfo, pframe);
+
+ pframe += len;
+ pattrib->pktlen += len;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ goto _issue_bcn;
+ }
+
+ /* below for ad-hoc mode */
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pattrib->pktlen);
+
+ {
+ u8 erpinfo = 0;
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
+
+ /* ERP IE */
+ pframe = rtw_set_ie(pframe, _ERPINFO_IE_, 1, &erpinfo, &pattrib->pktlen);
+ }
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pattrib->pktlen);
+ /* todo:HT for adhoc */
+_issue_bcn:
+
+#if defined (CONFIG_88EU_AP_MODE)
+ pmlmepriv->update_bcn = false;
+
+ _exit_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+
+ if ((pattrib->pktlen + TXDESC_SIZE) > 512) {
+ DBG_88E("beacon frame too large\n");
+ return;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ /* DBG_88E("issue bcn_sz=%d\n", pattrib->last_txcmdsz); */
+ if (timeout_ms > 0)
+ dump_mgntframe_and_wait(padapter, pmgntframe, timeout_ms);
+ else
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p_probereq)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac, *bssid;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+#if defined (CONFIG_88EU_AP_MODE)
+ u8 *pwps_ie;
+ uint wps_ielen;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ unsigned int rate_len;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ DBG_88E("%s, alloc mgnt frame fail\n", __func__);
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+ bssid = cur_network->MacAddress;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ if (cur_network->IELength > MAX_IE_SZ)
+ return;
+
+#if defined(CONFIG_88EU_AP_MODE)
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ pwps_ie = rtw_get_wps_ie(cur_network->IEs+_FIXED_IE_LENGTH_, cur_network->IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+
+ /* inerset & update wps_probe_resp_ie */
+ if ((pmlmepriv->wps_probe_resp_ie != NULL) && pwps_ie && (wps_ielen > 0)) {
+ uint wps_offset, remainder_ielen;
+ u8 *premainder_ie;
+
+ wps_offset = (uint)(pwps_ie - cur_network->IEs);
+
+ premainder_ie = pwps_ie + wps_ielen;
+
+ remainder_ielen = cur_network->IELength - wps_offset - wps_ielen;
+
+ memcpy(pframe, cur_network->IEs, wps_offset);
+ pframe += wps_offset;
+ pattrib->pktlen += wps_offset;
+
+ wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];/* to get ie data len */
+ if ((wps_offset+wps_ielen+2) <= MAX_IE_SZ) {
+ memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen+2);
+ pframe += wps_ielen+2;
+ pattrib->pktlen += wps_ielen+2;
+ }
+
+ if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+ memcpy(pframe, premainder_ie, remainder_ielen);
+ pframe += remainder_ielen;
+ pattrib->pktlen += remainder_ielen;
+ }
+ } else {
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ pframe += cur_network->IELength;
+ pattrib->pktlen += cur_network->IELength;
+ }
+ } else
+#endif
+ {
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* below for ad-hoc mode */
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pattrib->pktlen);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ u8 erpinfo = 0;
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ /* ATIMWindow = cur->Configuration.ATIMWindow; */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
+
+ /* ERP IE */
+ pframe = rtw_set_ie(pframe, _ERPINFO_IE_, 1, &erpinfo, &pattrib->pktlen);
+ }
+
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pattrib->pktlen);
+ /* todo:HT for adhoc */
+ }
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && is_valid_p2p_probereq) {
+ u32 len;
+ len = build_probe_resp_p2p_ie(pwdinfo, pframe);
+
+ pframe += len;
+ pattrib->pktlen += len;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac;
+ unsigned char bssrate[NumRates];
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ int bssrate_len = 0;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+issue_probereq\n"));
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ if (da) {
+ /* unicast probe request frame */
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, da, ETH_ALEN);
+ } else {
+ /* broadcast probe request frame */
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+ }
+
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_PROBEREQ);
+
+ pframe += sizeof (struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+
+ if (pssid)
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &(pattrib->pktlen));
+ else
+ pframe = rtw_set_ie(pframe, _SSID_IE_, 0, NULL, &(pattrib->pktlen));
+
+ get_rate_set(padapter, bssrate, &bssrate_len);
+
+ if (bssrate_len > 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ } else {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+ }
+
+ /* add wps_ie for wps2.0 */
+ if (pmlmepriv->wps_probe_req_ie_len > 0 && pmlmepriv->wps_probe_req_ie) {
+ memcpy(pframe, pmlmepriv->wps_probe_req_ie, pmlmepriv->wps_probe_req_ie_len);
+ pframe += pmlmepriv->wps_probe_req_ie_len;
+ pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("issuing probe_req, tx_len=%d\n", pattrib->last_txcmdsz));
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+inline void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da)
+{
+ _issue_probereq(padapter, pssid, da, false);
+}
+
+int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da,
+ int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+
+ do {
+ ret = _issue_probereq(padapter, pssid, da, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+/* if psta == NULL, indiate we are station(client) now... */
+void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned int val32;
+ u16 val16;
+#ifdef CONFIG_88EU_AP_MODE
+ __le16 le_val16;
+#endif
+ int use_shared_key = 0;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_AUTH);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+
+ if (psta) {/* for AP mode */
+#ifdef CONFIG_88EU_AP_MODE
+
+ memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+
+ /* setting auth algo number */
+ val16 = (u16)psta->authalg;
+
+ if (status != _STATS_SUCCESSFUL_)
+ val16 = 0;
+
+ if (val16) {
+ le_val16 = cpu_to_le16(val16);
+ use_shared_key = 1;
+ } else {
+ le_val16 = 0;
+ }
+
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_val16, &(pattrib->pktlen));
+
+ /* setting auth seq number */
+ val16 = (u16)psta->auth_seq;
+ le_val16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_val16, &(pattrib->pktlen));
+
+ /* setting status code... */
+ val16 = status;
+ le_val16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_val16, &(pattrib->pktlen));
+
+ /* added challenging text... */
+ if ((psta->auth_seq == 2) && (psta->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1))
+ pframe = rtw_set_ie(pframe, _CHLGETXT_IE_, 128, psta->chg_txt, &(pattrib->pktlen));
+#endif
+ } else {
+ __le32 le_tmp32;
+ __le16 le_tmp16;
+ memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
+
+ /* setting auth algo number */
+ val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/* 0:OPEN System, 1:Shared key */
+ if (val16)
+ use_shared_key = 1;
+
+ /* setting IV for auth seq #3 */
+ if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
+ val32 = ((pmlmeinfo->iv++) | (pmlmeinfo->key_index << 30));
+ le_tmp32 = cpu_to_le32(val32);
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&le_tmp32, &(pattrib->pktlen));
+
+ pattrib->iv_len = 4;
+ }
+
+ le_tmp16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_tmp16, &(pattrib->pktlen));
+
+ /* setting auth seq number */
+ val16 = pmlmeinfo->auth_seq;
+ le_tmp16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_tmp16, &(pattrib->pktlen));
+
+
+ /* setting status code... */
+ le_tmp16 = cpu_to_le16(status);
+ pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_tmp16, &(pattrib->pktlen));
+
+ /* then checking to see if sending challenging text... */
+ if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
+ pframe = rtw_set_ie(pframe, _CHLGETXT_IE_, 128, pmlmeinfo->chg_txt, &(pattrib->pktlen));
+
+ SetPrivacy(fctrl);
+
+ pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pattrib->encrypt = _WEP40_;
+
+ pattrib->icv_len = 4;
+
+ pattrib->pktlen += pattrib->icv_len;
+ }
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
+ DBG_88E("%s\n", __func__);
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+
+void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ struct xmit_frame *pmgntframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ struct pkt_attrib *pattrib;
+ unsigned char *pbuf, *pframe;
+ unsigned short val;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ u8 *ie = pnetwork->IEs;
+ __le16 lestatus, leval;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ DBG_88E("%s\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
+ memcpy((void *)GetAddr2Ptr(pwlanhdr), myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy((void *)GetAddr3Ptr(pwlanhdr), get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ if ((pkt_type == WIFI_ASSOCRSP) || (pkt_type == WIFI_REASSOCRSP))
+ SetFrameSubType(pwlanhdr, pkt_type);
+ else
+ return;
+
+ pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen += pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ /* capability */
+ val = *(unsigned short *)rtw_get_capability_from_ie(ie);
+
+ pframe = rtw_set_fixed_ie(pframe, _CAPABILITY_ , (unsigned char *)&val, &(pattrib->pktlen));
+
+ lestatus = cpu_to_le16(status);
+ pframe = rtw_set_fixed_ie(pframe , _STATUS_CODE_ , (unsigned char *)&lestatus, &(pattrib->pktlen));
+
+ leval = cpu_to_le16(pstat->aid | BIT(14) | BIT(15));
+ pframe = rtw_set_fixed_ie(pframe, _ASOC_ID_ , (unsigned char *)&leval, &(pattrib->pktlen));
+
+ if (pstat->bssratelen <= 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, pstat->bssratelen, pstat->bssrateset, &(pattrib->pktlen));
+ } else {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pstat->bssrateset, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (pstat->bssratelen-8), pstat->bssrateset+8, &(pattrib->pktlen));
+ }
+
+ if ((pstat->flags & WLAN_STA_HT) && (pmlmepriv->htpriv.ht_option)) {
+ uint ie_len = 0;
+
+ /* FILL HT CAP INFO IE */
+ pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ if (pbuf && ie_len > 0) {
+ memcpy(pframe, pbuf, ie_len+2);
+ pframe += (ie_len+2);
+ pattrib->pktlen += (ie_len+2);
+ }
+
+ /* FILL HT ADD INFO IE */
+ pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ if (pbuf && ie_len > 0) {
+ memcpy(pframe, pbuf, ie_len+2);
+ pframe += (ie_len+2);
+ pattrib->pktlen += (ie_len+2);
+ }
+ }
+
+ /* FILL WMM IE */
+ if ((pstat->flags & WLAN_STA_WME) && (pmlmepriv->qospriv.qos_option)) {
+ uint ie_len = 0;
+ unsigned char WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+
+ for (pbuf = ie + _BEACON_IE_OFFSET_;; pbuf += (ie_len + 2)) {
+ pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ if (pbuf && _rtw_memcmp(pbuf+2, WMM_PARA_IE, 6)) {
+ memcpy(pframe, pbuf, ie_len+2);
+ pframe += (ie_len+2);
+ pattrib->pktlen += (ie_len+2);
+ break;
+ }
+
+ if ((pbuf == NULL) || (ie_len == 0))
+ break;
+ }
+ }
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+
+ /* add WPS IE ie for wps 2.0 */
+ if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
+ memcpy(pframe, pmlmepriv->wps_assoc_resp_ie, pmlmepriv->wps_assoc_resp_ie_len);
+
+ pframe += pmlmepriv->wps_assoc_resp_ie_len;
+ pattrib->pktlen += pmlmepriv->wps_assoc_resp_ie_len;
+ }
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && (pstat->is_p2p_device)) {
+ u32 len;
+
+ len = build_assoc_resp_p2p_ie(pwdinfo, pframe, pstat->p2p_status_code);
+
+ pframe += len;
+ pattrib->pktlen += len;
+ }
+#endif /* CONFIG_88EU_P2P */
+ pattrib->last_txcmdsz = pattrib->pktlen;
+ dump_mgntframe(padapter, pmgntframe);
+#endif
+}
+
+void issue_assocreq(struct adapter *padapter)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe, *p;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ __le16 le_tmp;
+ unsigned int i, j, ie_len, index = 0;
+ unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
+ struct ndis_802_11_var_ie *pIE;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int bssrate_len = 0, sta_bssrate_len = 0;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 p2pie[255] = { 0x00 };
+ u16 p2pielen = 0;
+#endif /* CONFIG_88EU_P2P */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ASSOCREQ);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* caps */
+
+ memcpy(pframe, rtw_get_capability_from_ie(pmlmeinfo->network.IEs), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* listen interval */
+ /* todo: listen interval for power saving */
+ le_tmp = cpu_to_le16(3);
+ memcpy(pframe , (unsigned char *)&le_tmp, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pmlmeinfo->network.Ssid.SsidLength, pmlmeinfo->network.Ssid.Ssid, &(pattrib->pktlen));
+
+ /* supported rate & extended supported rate */
+
+ /* Check if the AP's supported rates are also supported by STA. */
+ get_rate_set(padapter, sta_bssrate, &sta_bssrate_len);
+
+ if (pmlmeext->cur_channel == 14)/* for JAPAN, channel 14 can only uses B Mode(CCK) */
+ sta_bssrate_len = 4;
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ if (pmlmeinfo->network.SupportedRates[i] == 0)
+ break;
+ DBG_88E("network.SupportedRates[%d]=%02X\n", i, pmlmeinfo->network.SupportedRates[i]);
+ }
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ if (pmlmeinfo->network.SupportedRates[i] == 0)
+ break;
+
+ /* Check if the AP's supported rates are also supported by STA. */
+ for (j = 0; j < sta_bssrate_len; j++) {
+ /* Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
+ if ((pmlmeinfo->network.SupportedRates[i]|IEEE80211_BASIC_RATE_MASK)
+ == (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK))
+ break;
+ }
+
+ if (j == sta_bssrate_len) {
+ /* the rate is not supported by STA */
+ DBG_88E("%s(): the rate[%d]=%02X is not supported by STA!\n", __func__, i, pmlmeinfo->network.SupportedRates[i]);
+ } else {
+ /* the rate is supported by STA */
+ bssrate[index++] = pmlmeinfo->network.SupportedRates[i];
+ }
+ }
+
+ bssrate_len = index;
+ DBG_88E("bssrate_len=%d\n", bssrate_len);
+
+ if (bssrate_len == 0) {
+ rtw_free_xmitbuf(pxmitpriv, pmgntframe->pxmitbuf);
+ rtw_free_xmitframe(pxmitpriv, pmgntframe);
+ goto exit; /* don't connect to AP if no joint supported rate */
+ }
+
+
+ if (bssrate_len > 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ } else {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+ }
+
+ /* RSN */
+ p = rtw_get_ie((pmlmeinfo->network.IEs + sizeof(struct ndis_802_11_fixed_ie)), _RSN_IE_2_, &ie_len, (pmlmeinfo->network.IELength - sizeof(struct ndis_802_11_fixed_ie)));
+ if (p != NULL)
+ pframe = rtw_set_ie(pframe, _RSN_IE_2_, ie_len, (p + 2), &(pattrib->pktlen));
+
+ /* HT caps */
+ if (padapter->mlmepriv.htpriv.ht_option) {
+ p = rtw_get_ie((pmlmeinfo->network.IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.IELength - sizeof(struct ndis_802_11_fixed_ie)));
+ if ((p != NULL) && (!(is_ap_in_tkip(padapter)))) {
+ memcpy(&(pmlmeinfo->HT_caps), (p + 2), sizeof(struct HT_caps_element));
+
+ /* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
+ if (pregpriv->cbw40_enable == 0)
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info &= cpu_to_le16(~(BIT(6) | BIT(1)));
+ else
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(BIT(1));
+
+ /* todo: disable SM power save mode */
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x000c);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ switch (rf_type) {
+ case RF_1T1R:
+ if (pregpriv->rx_stbc)
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
+ memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_1R, 16);
+ break;
+ case RF_2T2R:
+ case RF_1T2R:
+ default:
+ if ((pregpriv->rx_stbc == 0x3) ||/* enable for 2.4/5 GHz */
+ ((pmlmeext->cur_wireless_mode & WIRELESS_11_24N) && (pregpriv->rx_stbc == 0x1)) || /* enable for 2.4GHz */
+ (pregpriv->wifi_spec == 1)) {
+ DBG_88E("declare supporting RX STBC\n");
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0200);/* RX STBC two spatial stream */
+ }
+ memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_2R, 16);
+ break;
+ }
+ pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len , (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
+ }
+ }
+
+ /* vendor specific IE, such as WPA, WMM, WPS */
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4)) ||
+ (_rtw_memcmp(pIE->data, WMM_OUI, 4)) ||
+ (_rtw_memcmp(pIE->data, WPS_OUI, 4))) {
+ if (!padapter->registrypriv.wifi_spec) {
+ /* Commented by Kurt 20110629 */
+ /* In some older APs, WPS handshake */
+ /* would be fail if we append vender extensions informations to AP */
+ if (_rtw_memcmp(pIE->data, WPS_OUI, 4))
+ pIE->Length = 14;
+ }
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, pIE->Length, pIE->data, &(pattrib->pktlen));
+ }
+ break;
+ default:
+ break;
+ }
+ i += (pIE->Length + 2);
+ }
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+
+#ifdef CONFIG_88EU_P2P
+
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) && !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) {
+ /* Should add the P2P IE in the association request frame. */
+ /* P2P OUI */
+
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20101109 */
+ /* According to the P2P Specification, the association request frame should contain 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Extended Listen Timing */
+ /* 3. Device Info */
+ /* Commented by Albert 20110516 */
+ /* 4. P2P Interface */
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+
+ /* Extended Listen Timing */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ if ((pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PEER_DISPLAY_PIN) ||
+ (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_SELF_DISPLAY_PIN))
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY);
+ else
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_PBC);
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ /* P2P Interface */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INTERFACE;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x000D);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN); /* P2P Device Address */
+ p2pielen += ETH_ALEN;
+
+ p2pie[p2pielen++] = 1; /* P2P Interface Address Count */
+
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN); /* P2P Interface Address List */
+ p2pielen += ETH_ALEN;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+ }
+
+#endif /* CONFIG_88EU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+ dump_mgntframe(padapter, pmgntframe);
+
+ ret = _SUCCESS;
+
+exit:
+ if (ret == _SUCCESS)
+ rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
+ else
+ rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
+
+ return;
+}
+
+/* when wait_ack is ture, this function shoule be called at process context */
+static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+ if (!padapter)
+ goto exit;
+
+ pxmitpriv = &(padapter->xmitpriv);
+ pmlmeext = &(padapter->mlmeextpriv);
+ pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ SetFrDs(fctrl);
+ else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ SetToDs(fctrl);
+
+ if (power_mode)
+ SetPwrMgt(fctrl);
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_DATA_NULL);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+
+/* when wait_ms > 0 , this function shoule be called at process context */
+/* da == NULL for station mode */
+int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* da == NULL, assum it's null data for sta to ap*/
+ if (da == NULL)
+ da = get_my_bssid(&(pmlmeinfo->network));
+
+ do {
+ ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+/* when wait_ack is ture, this function shoule be called at process context */
+static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl, *qc;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_88E("%s\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ pattrib->hdrlen += 2;
+ pattrib->qos_en = true;
+ pattrib->eosp = 1;
+ pattrib->ack_policy = 0;
+ pattrib->mdata = 0;
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ SetFrDs(fctrl);
+ else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ SetToDs(fctrl);
+
+ if (pattrib->mdata)
+ SetMData(fctrl);
+
+ qc = (unsigned short *)(pframe + pattrib->hdrlen - 2);
+
+ SetPriority(qc, tid);
+
+ SetEOSP(qc, pattrib->eosp);
+
+ SetAckpolicy(qc, pattrib->ack_policy);
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+/* when wait_ms > 0 , this function shoule be called at process context */
+/* da == NULL for station mode */
+int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* da == NULL, assum it's null data for sta to ap*/
+ if (da == NULL)
+ da = get_my_bssid(&(pmlmeinfo->network));
+
+ do {
+ ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason, u8 wait_ack)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int ret = _FAIL;
+ __le16 le_tmp;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+#ifdef CONFIG_88EU_P2P
+ if (!(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) && (pwdinfo->rx_invitereq_info.scan_op_ch_only)) {
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ _set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_DEAUTH);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ le_tmp = cpu_to_le16(reason);
+ pframe = rtw_set_fixed_ie(pframe, _RSON_CODE_ , (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason)
+{
+ DBG_88E("%s to %pM\n", __func__, da);
+ return _issue_deauth(padapter, da, reason, false);
+}
+
+int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int try_cnt,
+ int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+
+ do {
+ ret = _issue_deauth(padapter, da, reason, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+void issue_action_spct_ch_switch (struct adapter *padapter, u8 *ra, u8 new_ch, u8 ch_offset)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+
+ DBG_88E(FUNC_NDEV_FMT" ra =%pM, ch:%u, offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev), ra, new_ch, ch_offset);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, ra, ETH_ALEN); /* RA */
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN); /* TA */
+ memcpy(pwlanhdr->addr3, ra, ETH_ALEN); /* DA = RA */
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* category, action */
+ {
+ u8 category, action;
+ category = RTW_WLAN_CATEGORY_SPECTRUM_MGMT;
+ action = RTW_WLAN_ACTION_SPCT_CHL_SWITCH;
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ }
+
+ pframe = rtw_set_ie_ch_switch (pframe, &(pattrib->pktlen), 0, new_ch, 0);
+ pframe = rtw_set_ie_secondary_ch_offset(pframe, &(pattrib->pktlen),
+ hal_ch_offset_to_secondary_ch_offset(ch_offset));
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status)
+{
+ u8 category = RTW_WLAN_CATEGORY_BACK;
+ u16 start_seq;
+ u16 BA_para_set;
+ u16 reason_code;
+ u16 BA_timeout_value;
+ __le16 le_tmp;
+ u16 BA_starting_seqctrl = 0;
+ enum ht_cap_ampdu_factor max_rx_ampdu_factor;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ u8 *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ DBG_88E("%s, category=%d, action=%d, status=%d\n", __func__, category, action, status);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ /* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+
+ if (category == 3) {
+ switch (action) {
+ case 0: /* ADDBA req */
+ do {
+ pmlmeinfo->dialogToken++;
+ } while (pmlmeinfo->dialogToken == 0);
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->dialogToken), &(pattrib->pktlen));
+
+ BA_para_set = (0x1002 | ((status & 0xf) << 2)); /* immediate ack & 64 buffer size */
+ le_tmp = cpu_to_le16(BA_para_set);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+
+ BA_timeout_value = 5000;/* 5ms */
+ le_tmp = cpu_to_le16(BA_timeout_value);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+
+ psta = rtw_get_stainfo(pstapriv, raddr);
+ if (psta != NULL) {
+ start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07]&0xfff) + 1;
+
+ DBG_88E("BA_starting_seqctrl=%d for TID=%d\n", start_seq, status & 0x07);
+
+ psta->BA_starting_seqctrl[status & 0x07] = start_seq;
+
+ BA_starting_seqctrl = start_seq << 4;
+ }
+ le_tmp = cpu_to_le16(BA_starting_seqctrl);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+ break;
+ case 1: /* ADDBA rsp */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->ADDBA_req.dialog_token), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&status), &(pattrib->pktlen));
+ rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
+ if (MAX_AMPDU_FACTOR_64K == max_rx_ampdu_factor)
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+ else if (MAX_AMPDU_FACTOR_32K == max_rx_ampdu_factor)
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
+ else if (MAX_AMPDU_FACTOR_16K == max_rx_ampdu_factor)
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
+ else if (MAX_AMPDU_FACTOR_8K == max_rx_ampdu_factor)
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
+ else
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+
+ if (pregpriv->ampdu_amsdu == 0)/* disabled */
+ BA_para_set = BA_para_set & ~BIT(0);
+ else if (pregpriv->ampdu_amsdu == 1)/* enabled */
+ BA_para_set = BA_para_set | BIT(0);
+ le_tmp = cpu_to_le16(BA_para_set);
+
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(pmlmeinfo->ADDBA_req.BA_timeout_value)), &(pattrib->pktlen));
+ break;
+ case 2:/* DELBA */
+ BA_para_set = (status & 0x1F) << 3;
+ le_tmp = cpu_to_le16(BA_para_set);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+
+ reason_code = 37;/* Requested from peer STA as it does not want to use the mechanism */
+ le_tmp = cpu_to_le16(reason_code);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+ break;
+ default:
+ break;
+ }
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+static void issue_action_BSSCoexistPacket(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ unsigned char category, action;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct wlan_network *pnetwork = NULL;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ u8 InfoContent[16] = {0};
+ u8 ICS[8][15];
+ if ((pmlmepriv->num_FortyMHzIntolerant == 0) || (pmlmepriv->num_sta_no_ht == 0))
+ return;
+
+ if (pmlmeinfo->bwmode_updated)
+ return;
+
+
+ DBG_88E("%s\n", __func__);
+
+
+ category = RTW_WLAN_CATEGORY_PUBLIC;
+ action = ACT_PUBLIC_BSSCOEXIST;
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+
+
+ /* */
+ if (pmlmepriv->num_FortyMHzIntolerant > 0) {
+ u8 iedata = 0;
+
+ iedata |= BIT(2);/* 20 MHz BSS Width Request */
+
+ pframe = rtw_set_ie(pframe, EID_BSSCoexistence, 1, &iedata, &(pattrib->pktlen));
+ }
+
+
+ /* */
+ _rtw_memset(ICS, 0, sizeof(ICS));
+ if (pmlmepriv->num_sta_no_ht > 0) {
+ int i;
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ int len;
+ u8 *p;
+ struct wlan_bssid_ex *pbss_network;
+
+ if (rtw_end_of_queue_search(phead, plist))
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ plist = get_next(plist);
+
+ pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
+
+ p = rtw_get_ie(pbss_network->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->IELength - _FIXED_IE_LENGTH_);
+ if ((p == NULL) || (len == 0)) { /* non-HT */
+ if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
+ continue;
+
+ ICS[0][pbss_network->Configuration.DSConfig] = 1;
+
+ if (ICS[0][0] == 0)
+ ICS[0][0] = 1;
+ }
+ }
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ for (i = 0; i < 8; i++) {
+ if (ICS[i][0] == 1) {
+ int j, k = 0;
+
+ InfoContent[k] = i;
+ /* SET_BSS_INTOLERANT_ELE_REG_CLASS(InfoContent, i); */
+ k++;
+
+ for (j = 1; j <= 14; j++) {
+ if (ICS[i][j] == 1) {
+ if (k < 16) {
+ InfoContent[k] = j; /* channel number */
+ /* SET_BSS_INTOLERANT_ELE_CHANNEL(InfoContent+k, j); */
+ k++;
+ }
+ }
+ }
+
+ pframe = rtw_set_ie(pframe, EID_BSSIntolerantChlReport, k, InfoContent, &(pattrib->pktlen));
+ }
+ }
+ }
+
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+ /* struct recv_reorder_ctrl *preorder_ctrl; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u16 tid;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
+ return _SUCCESS;
+
+ psta = rtw_get_stainfo(pstapriv, addr);
+ if (psta == NULL)
+ return _SUCCESS;
+
+ if (initiator == 0) { /* recipient */
+ for (tid = 0; tid < MAXTID; tid++) {
+ if (psta->recvreorder_ctrl[tid].enable) {
+ DBG_88E("rx agg disable tid(%d)\n", tid);
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
+ psta->recvreorder_ctrl[tid].enable = false;
+ psta->recvreorder_ctrl[tid].indicate_seq = 0xffff;
+ }
+ }
+ } else if (initiator == 1) { /* originator */
+ for (tid = 0; tid < MAXTID; tid++) {
+ if (psta->htpriv.agg_enable_bitmap & BIT(tid)) {
+ DBG_88E("tx agg disable tid(%d)\n", tid);
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+ }
+ }
+ }
+
+ return _SUCCESS;
+}
+
+unsigned int send_beacon(struct adapter *padapter)
+{
+ u8 bxmitok = false;
+ int issue = 0;
+ int poll = 0;
+
+ u32 start = rtw_get_current_time();
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
+ do {
+ issue_beacon(padapter, 100);
+ issue++;
+ do {
+ rtw_yield_os();
+ rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
+ poll++;
+ } while ((poll%10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+ } while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return _FAIL;
+ if (!bxmitok) {
+ DBG_88E("%s fail! %u ms\n", __func__, rtw_get_passing_time_ms(start));
+ return _FAIL;
+ } else {
+ u32 passing_time = rtw_get_passing_time_ms(start);
+
+ if (passing_time > 100 || issue > 3)
+ DBG_88E("%s success, issue:%d, poll:%d, %u ms\n", __func__, issue, poll, rtw_get_passing_time_ms(start));
+ return _SUCCESS;
+ }
+}
+
+/****************************************************************************
+
+Following are some utitity fuctions for WiFi MLME
+
+*****************************************************************************/
+
+void site_survey(struct adapter *padapter)
+{
+ unsigned char survey_channel = 0, val8;
+ enum rt_scan_type ScanType = SCAN_PASSIVE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u32 initialgain = 0;
+
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
+ if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
+ survey_channel = pwdinfo->rx_invitereq_info.operation_ch[pmlmeext->sitesurvey_res.channel_idx];
+ } else {
+ survey_channel = pwdinfo->p2p_info.operation_ch[pmlmeext->sitesurvey_res.channel_idx];
+ }
+ ScanType = SCAN_ACTIVE;
+ } else if (rtw_p2p_findphase_ex_is_social(pwdinfo)) {
+ /* Commented by Albert 2011/06/03 */
+ /* The driver is in the find phase, it should go through the social channel. */
+ int ch_set_idx;
+ survey_channel = pwdinfo->social_chan[pmlmeext->sitesurvey_res.channel_idx];
+ ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, survey_channel);
+ if (ch_set_idx >= 0)
+ ScanType = pmlmeext->channel_set[ch_set_idx].ScanType;
+ else
+ ScanType = SCAN_ACTIVE;
+ } else
+#endif /* CONFIG_88EU_P2P */
+ {
+ struct rtw_ieee80211_channel *ch;
+ if (pmlmeext->sitesurvey_res.channel_idx < pmlmeext->sitesurvey_res.ch_num) {
+ ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx];
+ survey_channel = ch->hw_value;
+ ScanType = (ch->flags & RTW_IEEE80211_CHAN_PASSIVE_SCAN) ? SCAN_PASSIVE : SCAN_ACTIVE;
+ }
+ }
+
+ if (survey_channel != 0) {
+ /* PAUSE 4-AC Queue when site_survey */
+ /* rtw_hal_get_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+ /* val8 |= 0x0f; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+ if (pmlmeext->sitesurvey_res.channel_idx == 0)
+ set_channel_bwmode(padapter, survey_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ else
+ SelectChannel(padapter, survey_channel);
+
+ if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */
+ #ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) ||
+ rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)) {
+ issue_probereq_p2p(padapter, NULL);
+ issue_probereq_p2p(padapter, NULL);
+ issue_probereq_p2p(padapter, NULL);
+ } else
+ #endif /* CONFIG_88EU_P2P */
+ {
+ int i;
+ for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) {
+ /* todo: to issue two probe req??? */
+ issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
+ /* rtw_msleep_os(SURVEY_TO>>1); */
+ issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
+ }
+ }
+
+ if (pmlmeext->sitesurvey_res.scan_mode == SCAN_ACTIVE) {
+ /* todo: to issue two probe req??? */
+ issue_probereq(padapter, NULL, NULL);
+ /* rtw_msleep_os(SURVEY_TO>>1); */
+ issue_probereq(padapter, NULL, NULL);
+ }
+ }
+ }
+
+ set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
+ } else {
+ /* channel number is 0 or this channel is not valid. */
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)) {
+ if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
+ /* Set the find_phase_state_exchange_cnt to P2P_FINDPHASE_EX_CNT. */
+ /* This will let the following flow to run the scanning end. */
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_MAX);
+ }
+ }
+
+ if (rtw_p2p_findphase_ex_is_needed(pwdinfo)) {
+ /* Set the P2P State to the listen state of find phase and set the current channel to the listen channel */
+ set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_LISTEN);
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+
+ initialgain = 0xff; /* restore RX GAIN */
+ rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* turn on dynamic functions */
+ Restore_DM_Func_Flag(padapter);
+ /* Switch_DM_Func(padapter, DYNAMIC_FUNC_DIG|DYNAMIC_FUNC_HP|DYNAMIC_FUNC_SS, true); */
+
+ _set_timer(&pwdinfo->find_phase_timer, (u32)((u32)(pwdinfo->listen_dwell) * 100));
+ } else
+#endif /* CONFIG_88EU_P2P */
+ {
+ /* 20100721:Interrupt scan operation here. */
+ /* For SW antenna diversity before link, it needs to switch to another antenna and scan again. */
+ /* It compares the scan result and select beter one to do connection. */
+ if (rtw_hal_antdiv_before_linked(padapter)) {
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->sitesurvey_res.channel_idx = -1;
+ pmlmeext->chan_scan_time = SURVEY_TO / 2;
+ set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
+ return;
+ }
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH))
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
+#endif /* CONFIG_88EU_P2P */
+
+ pmlmeext->sitesurvey_res.state = SCAN_COMPLETE;
+
+ /* switch back to the original channel */
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_LISTEN))
+ set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ else
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+#endif /* CONFIG_88EU_P2P */
+
+ /* flush 4-AC Queue after site_survey */
+ /* val8 = 0; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+
+ /* config MSR */
+ Set_MSR(padapter, (pmlmeinfo->state & 0x3));
+
+ initialgain = 0xff; /* restore RX GAIN */
+ rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* turn on dynamic functions */
+ Restore_DM_Func_Flag(padapter);
+ /* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
+
+ if (is_client_associated_to_ap(padapter))
+ issue_nulldata(padapter, NULL, 0, 3, 500);
+
+ val8 = 0; /* survey done */
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+
+ report_surveydone_event(padapter);
+
+ pmlmeext->chan_scan_time = SURVEY_TO;
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+
+ issue_action_BSSCoexistPacket(padapter);
+ issue_action_BSSCoexistPacket(padapter);
+ issue_action_BSSCoexistPacket(padapter);
+ }
+ }
+ return;
+}
+
+/* collect bss info from Beacon and Probe request/response frames. */
+u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
+{
+ int i;
+ u32 len;
+ u8 *p;
+ u16 val16, subtype;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u32 packet_len = precv_frame->u.hdr.len;
+ u8 ie_offset;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ __le32 le32_tmp;
+
+ len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ if (len > MAX_IE_SZ)
+ return _FAIL;
+
+ _rtw_memset(bssid, 0, sizeof(struct wlan_bssid_ex));
+
+ subtype = GetFrameSubType(pframe);
+
+ if (subtype == WIFI_BEACON) {
+ bssid->Reserved[0] = 1;
+ ie_offset = _BEACON_IE_OFFSET_;
+ } else {
+ /* FIXME : more type */
+ if (subtype == WIFI_PROBEREQ) {
+ ie_offset = _PROBEREQ_IE_OFFSET_;
+ bssid->Reserved[0] = 2;
+ } else if (subtype == WIFI_PROBERSP) {
+ ie_offset = _PROBERSP_IE_OFFSET_;
+ bssid->Reserved[0] = 3;
+ } else {
+ bssid->Reserved[0] = 0;
+ ie_offset = _FIXED_IE_LENGTH_;
+ }
+ }
+
+ bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
+
+ /* below is to copy the information element */
+ bssid->IELength = len;
+ memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+
+ /* get the signal strength */
+ bssid->Rssi = precv_frame->u.hdr.attrib.phy_info.recvpower; /* in dBM.raw data */
+ bssid->PhyInfo.SignalQuality = precv_frame->u.hdr.attrib.phy_info.SignalQuality;/* in percentage */
+ bssid->PhyInfo.SignalStrength = precv_frame->u.hdr.attrib.phy_info.SignalStrength;/* in percentage */
+ rtw_hal_get_def_var(padapter, HAL_DEF_CURRENT_ANTENNA, &bssid->PhyInfo.Optimum_antenna);
+
+ /* checking SSID */
+ p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
+ if (p == NULL) {
+ DBG_88E("marc: cannot find SSID for survey event\n");
+ return _FAIL;
+ }
+
+ if (*(p + 1)) {
+ if (len > NDIS_802_11_LENGTH_SSID) {
+ DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
+ bssid->Ssid.SsidLength = *(p + 1);
+ } else {
+ bssid->Ssid.SsidLength = 0;
+ }
+
+ _rtw_memset(bssid->SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ /* checking rate info... */
+ i = 0;
+ p = rtw_get_ie(bssid->IEs + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ if (p != NULL) {
+ if (len > NDIS_802_11_LENGTH_RATES_EX) {
+ DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->SupportedRates, (p + 2), len);
+ i = len;
+ }
+
+ p = rtw_get_ie(bssid->IEs + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ if (p != NULL) {
+ if (len > (NDIS_802_11_LENGTH_RATES_EX-i)) {
+ DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->SupportedRates + i, (p + 2), len);
+ }
+
+ /* todo: */
+ bssid->NetworkTypeInUse = Ndis802_11OFDM24;
+
+ if (bssid->IELength < 12)
+ return _FAIL;
+
+ /* Checking for DSConfig */
+ p = rtw_get_ie(bssid->IEs + ie_offset, _DSSET_IE_, &len, bssid->IELength - ie_offset);
+
+ bssid->Configuration.DSConfig = 0;
+ bssid->Configuration.Length = 0;
+
+ if (p) {
+ bssid->Configuration.DSConfig = *(p + 2);
+ } else {/* In 5G, some ap do not have DSSET IE */
+ /* checking HT info for channel */
+ p = rtw_get_ie(bssid->IEs + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->IELength - ie_offset);
+ if (p) {
+ struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
+ bssid->Configuration.DSConfig = HT_info->primary_channel;
+ } else { /* use current channel */
+ bssid->Configuration.DSConfig = rtw_get_oper_ch(padapter);
+ }
+ }
+
+ if (subtype == WIFI_PROBEREQ) {
+ /* FIXME */
+ bssid->InfrastructureMode = Ndis802_11Infrastructure;
+ memcpy(bssid->MacAddress, GetAddr2Ptr(pframe), ETH_ALEN);
+ bssid->Privacy = 1;
+ return _SUCCESS;
+ }
+
+ memcpy(&le32_tmp, rtw_get_beacon_interval_from_ie(bssid->IEs), 2);
+ bssid->Configuration.BeaconPeriod = le32_to_cpu(le32_tmp);
+
+ val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
+
+ if (val16 & BIT(0)) {
+ bssid->InfrastructureMode = Ndis802_11Infrastructure;
+ memcpy(bssid->MacAddress, GetAddr2Ptr(pframe), ETH_ALEN);
+ } else {
+ bssid->InfrastructureMode = Ndis802_11IBSS;
+ memcpy(bssid->MacAddress, GetAddr3Ptr(pframe), ETH_ALEN);
+ }
+
+ if (val16 & BIT(4))
+ bssid->Privacy = 1;
+ else
+ bssid->Privacy = 0;
+
+ bssid->Configuration.ATIMWindow = 0;
+
+ /* 20/40 BSS Coexistence check */
+ if ((pregistrypriv->wifi_spec == 1) && (!pmlmeinfo->bwmode_updated)) {
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ p = rtw_get_ie(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset);
+ if (p && len > 0) {
+ struct HT_caps_element *pHT_caps;
+ pHT_caps = (struct HT_caps_element *)(p + 2);
+
+ if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info)&BIT(14))
+ pmlmepriv->num_FortyMHzIntolerant++;
+ } else {
+ pmlmepriv->num_sta_no_ht++;
+ }
+ }
+
+ /* mark bss info receving from nearby channel as SignalQuality 101 */
+ if (bssid->Configuration.DSConfig != rtw_get_oper_ch(padapter))
+ bssid->PhyInfo.SignalQuality = 101;
+ return _SUCCESS;
+}
+
+void start_create_ibss(struct adapter *padapter)
+{
+ unsigned short caps;
+ u8 val8;
+ u8 join_type;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
+ pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
+
+ /* update wireless mode */
+ update_wireless_mode(padapter);
+
+ /* udpate capability */
+ caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
+ update_capinfo(padapter, caps);
+ if (caps&cap_IBSS) {/* adhoc master */
+ val8 = 0xcf;
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* switch channel */
+ /* SelectChannel(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE); */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+
+ beacon_timing_control(padapter);
+
+ /* set msr to WIFI_FW_ADHOC_STATE */
+ pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
+ Set_MSR(padapter, (pmlmeinfo->state & 0x3));
+
+ /* issue beacon */
+ if (send_beacon(padapter) == _FAIL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("issuing beacon frame fail....\n"));
+
+ report_join_res(padapter, -1);
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ } else {
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.MacAddress);
+ join_type = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ report_join_res(padapter, 1);
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+ }
+ } else {
+ DBG_88E("start_create_ibss, invalid cap:%x\n", caps);
+ return;
+ }
+}
+
+void start_clnt_join(struct adapter *padapter)
+{
+ unsigned short caps;
+ u8 val8;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ int beacon_timeout;
+
+ pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
+ pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
+
+ /* update wireless mode */
+ update_wireless_mode(padapter);
+
+ /* udpate capability */
+ caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
+ update_capinfo(padapter, caps);
+ if (caps&cap_ESS) {
+ Set_MSR(padapter, WIFI_FW_STATION_STATE);
+
+ val8 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X) ? 0xcc : 0xcf;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* switch channel */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ /* here wait for receiving the beacon to start auth */
+ /* and enable a timer */
+ beacon_timeout = decide_wait_for_beacon_timeout(pmlmeinfo->bcn_interval);
+ set_link_timer(pmlmeext, beacon_timeout);
+ _set_timer(&padapter->mlmepriv.assoc_timer,
+ (REAUTH_TO * REAUTH_LIMIT) + (REASSOC_TO*REASSOC_LIMIT) + beacon_timeout);
+
+ pmlmeinfo->state = WIFI_FW_AUTH_NULL | WIFI_FW_STATION_STATE;
+ } else if (caps&cap_IBSS) { /* adhoc client */
+ Set_MSR(padapter, WIFI_FW_ADHOC_STATE);
+
+ val8 = 0xcf;
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* switch channel */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ beacon_timing_control(padapter);
+
+ pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
+
+ report_join_res(padapter, 1);
+ } else {
+ return;
+ }
+}
+
+void start_clnt_auth(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ pmlmeinfo->state &= (~WIFI_FW_AUTH_NULL);
+ pmlmeinfo->state |= WIFI_FW_AUTH_STATE;
+
+ pmlmeinfo->auth_seq = 1;
+ pmlmeinfo->reauth_count = 0;
+ pmlmeinfo->reassoc_count = 0;
+ pmlmeinfo->link_count = 0;
+ pmlmeext->retry = 0;
+
+
+ /* Because of AP's not receiving deauth before */
+ /* AP may: 1)not response auth or 2)deauth us after link is complete */
+ /* issue deauth before issuing auth to deal with the situation */
+ /* Commented by Albert 2012/07/21 */
+ /* For the Win8 P2P connection, it will be hard to have a successful connection if this Wi-Fi doesn't connect to it. */
+ issue_deauth(padapter, (&(pmlmeinfo->network))->MacAddress, WLAN_REASON_DEAUTH_LEAVING);
+
+ DBG_88E_LEVEL(_drv_info_, "start auth\n");
+ issue_auth(padapter, NULL, 0);
+
+ set_link_timer(pmlmeext, REAUTH_TO);
+}
+
+
+void start_clnt_assoc(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ pmlmeinfo->state &= (~(WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE));
+ pmlmeinfo->state |= (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE);
+
+ issue_assocreq(padapter);
+
+ set_link_timer(pmlmeext, REASSOC_TO);
+}
+
+unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* check A3 */
+ if (!(_rtw_memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ return _SUCCESS;
+
+ DBG_88E("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_del_sta_event(padapter, MacAddr, reason);
+ } else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE) {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res(padapter, -2);
+ }
+ }
+ return _SUCCESS;
+}
+
+static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid)
+{
+ struct registry_priv *pregistrypriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct rt_channel_info *chplan_new;
+ u8 channel;
+ u8 i;
+
+ pregistrypriv = &padapter->registrypriv;
+ pmlmeext = &padapter->mlmeextpriv;
+
+ /* Adjust channel plan by AP Country IE */
+ if (pregistrypriv->enable80211d &&
+ (!pmlmeext->update_channel_plan_by_ap_done)) {
+ u8 *ie, *p;
+ u32 len;
+ struct rt_channel_plan chplan_ap;
+ struct rt_channel_info chplan_sta[MAX_CHANNEL_NUM];
+ u8 country[4];
+ u8 fcn; /* first channel number */
+ u8 noc; /* number of channel */
+ u8 j, k;
+
+ ie = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _COUNTRY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (!ie)
+ return;
+ if (len < 6)
+ return;
+ ie += 2;
+ p = ie;
+ ie += len;
+
+ _rtw_memset(country, 0, 4);
+ memcpy(country, p, 3);
+ p += 3;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("%s: 802.11d country =%s\n", __func__, country));
+
+ i = 0;
+ while ((ie - p) >= 3) {
+ fcn = *(p++);
+ noc = *(p++);
+ p++;
+
+ for (j = 0; j < noc; j++) {
+ if (fcn <= 14)
+ channel = fcn + j; /* 2.4 GHz */
+ else
+ channel = fcn + j*4; /* 5 GHz */
+
+ chplan_ap.Channel[i++] = channel;
+ }
+ }
+ chplan_ap.Len = i;
+
+ memcpy(chplan_sta, pmlmeext->channel_set, sizeof(chplan_sta));
+
+ _rtw_memset(pmlmeext->channel_set, 0, sizeof(pmlmeext->channel_set));
+ chplan_new = pmlmeext->channel_set;
+
+ i = 0;
+ j = 0;
+ k = 0;
+ if (pregistrypriv->wireless_mode & WIRELESS_11G) {
+ do {
+ if ((i == MAX_CHANNEL_NUM) ||
+ (chplan_sta[i].ChannelNum == 0) ||
+ (chplan_sta[i].ChannelNum > 14))
+ break;
+
+ if ((j == chplan_ap.Len) || (chplan_ap.Channel[j] > 14))
+ break;
+
+ if (chplan_sta[i].ChannelNum == chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ i++;
+ j++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum < chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum > chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } while (1);
+
+ /* change AP not support channel to Passive scan */
+ while ((i < MAX_CHANNEL_NUM) &&
+ (chplan_sta[i].ChannelNum != 0) &&
+ (chplan_sta[i].ChannelNum <= 14)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ }
+
+ /* add channel AP supported */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } else {
+ /* keep original STA 2.4G channel plan */
+ while ((i < MAX_CHANNEL_NUM) &&
+ (chplan_sta[i].ChannelNum != 0) &&
+ (chplan_sta[i].ChannelNum <= 14)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = chplan_sta[i].ScanType;
+ i++;
+ k++;
+ }
+
+ /* skip AP 2.4G channel plan */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14))
+ j++;
+ }
+
+ /* keep original STA 5G channel plan */
+ while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = chplan_sta[i].ScanType;
+ i++;
+ k++;
+ }
+
+ pmlmeext->update_channel_plan_by_ap_done = 1;
+ }
+
+ /* If channel is used by AP, set channel scan type to active */
+ channel = bssid->Configuration.DSConfig;
+ chplan_new = pmlmeext->channel_set;
+ i = 0;
+ while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
+ if (chplan_new[i].ChannelNum == channel) {
+ if (chplan_new[i].ScanType == SCAN_PASSIVE) {
+ chplan_new[i].ScanType = SCAN_ACTIVE;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("%s: change channel %d scan type from passive to active\n",
+ __func__, channel));
+ }
+ break;
+ }
+ i++;
+ }
+}
+
+/****************************************************************************
+
+Following are the functions to report events
+
+*****************************************************************************/
+
+void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct survey_event *psurvey_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext;
+ struct cmd_priv *pcmdpriv;
+ /* u8 *pframe = precv_frame->u.hdr.rx_data; */
+ /* uint len = precv_frame->u.hdr.len; */
+
+ if (!padapter)
+ return;
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pcmdpriv = &padapter->cmdpriv;
+
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct survey_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+
+ if (collect_bss_info(padapter, precv_frame, (struct wlan_bssid_ex *)&psurvey_evt->bss) == _FAIL) {
+ kfree(pcmd_obj);
+ kfree(pevtcmd);
+ return;
+ }
+
+ process_80211d(padapter, &psurvey_evt->bss);
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ pmlmeext->sitesurvey_res.bss_cnt++;
+
+ return;
+}
+
+void report_surveydone_event(struct adapter *padapter)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct surveydone_event *psurveydone_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct surveydone_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
+
+ DBG_88E("survey done event(%x)\n", psurveydone_evt->bss_cnt);
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_join_res(struct adapter *padapter, int res)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct joinbss_event *pjoinbss_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct joinbss_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
+ pjoinbss_evt->network.join_res = res;
+ pjoinbss_evt->network.aid = res;
+
+ DBG_88E("report_join_res(%d)\n", res);
+
+
+ rtw_joinbss_event_prehandle(padapter, (u8 *)&pjoinbss_evt->network);
+
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct sta_info *psta;
+ int mac_id;
+ struct stadel_event *pdel_sta_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stadel_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+ memcpy((unsigned char *)(pdel_sta_evt->rsvd), (unsigned char *)(&reason), 2);
+
+
+ psta = rtw_get_stainfo(&padapter->stapriv, MacAddr);
+ if (psta)
+ mac_id = (int)psta->mac_id;
+ else
+ mac_id = (-1);
+
+ pdel_sta_evt->mac_id = mac_id;
+
+ DBG_88E("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct stassoc_event *padd_sta_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stassoc_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+ padd_sta_evt->cam_id = cam_idx;
+
+ DBG_88E("report_add_sta_event: add STA\n");
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+
+/****************************************************************************
+
+Following are the event callback functions
+
+*****************************************************************************/
+
+/* for sta/adhoc mode */
+void update_sta_info(struct adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* ERP */
+ VCS_update(padapter, psta);
+
+ /* HT */
+ if (pmlmepriv->htpriv.ht_option) {
+ psta->htpriv.ht_option = true;
+
+ psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
+
+ if (support_short_GI(padapter, &(pmlmeinfo->HT_caps)))
+ psta->htpriv.sgi = true;
+
+ psta->qos_option = true;
+ } else {
+ psta->htpriv.ht_option = false;
+
+ psta->htpriv.ampdu_enable = false;
+
+ psta->htpriv.sgi = false;
+ psta->qos_option = false;
+ }
+ psta->htpriv.bwmode = pmlmeext->cur_bwmode;
+ psta->htpriv.ch_offset = pmlmeext->cur_ch_offset;
+
+ psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
+ psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
+
+ /* QoS */
+ if (pmlmepriv->qospriv.qos_option)
+ psta->qos_option = true;
+
+
+ psta->state = _FW_LINKED;
+}
+
+void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
+{
+ struct sta_info *psta, *psta_bmc;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 join_type;
+ u16 media_status;
+
+ if (join_res < 0) {
+ join_type = 1;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+
+ /* restore to initial setting. */
+ update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
+
+ goto exit_mlmeext_joinbss_event_callback;
+ }
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ /* for bc/mc */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (psta_bmc) {
+ pmlmeinfo->FW_sta_info[psta_bmc->mac_id].psta = psta_bmc;
+ update_bmc_sta_support_rate(padapter, psta_bmc->mac_id);
+ Update_RA_Entry(padapter, psta_bmc->mac_id);
+ }
+ }
+
+
+ /* turn on dynamic functions */
+ Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+
+ /* update IOT-releated issue */
+ update_IOT_info(padapter);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
+
+ /* BCN interval */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
+
+ /* udpate capability */
+ update_capinfo(padapter, pmlmeinfo->capability);
+
+ /* WMM, Update EDCA param */
+ WMMOnAssocRsp(padapter);
+
+ /* HT */
+ HTOnAssocRsp(padapter);
+
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ psta = rtw_get_stainfo(pstapriv, cur_network->MacAddress);
+ if (psta) { /* only for infra. mode */
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ psta->wireless_mode = pmlmeext->cur_wireless_mode;
+
+ /* set per sta rate after updating HT cap. */
+ set_sta_rate(padapter, psta);
+ rtw_hal_set_hwreg(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
+ media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE: 1 means connect */
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ }
+
+ join_type = 2;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
+ /* correcting TSF */
+ correct_TSF(padapter, pmlmeext);
+ }
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_CONNECT, 0);
+
+exit_mlmeext_joinbss_event_callback:
+
+ DBG_88E("=>%s\n", __func__);
+}
+
+void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 join_type;
+
+ DBG_88E("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {/* adhoc master or sta_count>1 */
+ /* nothing to do */
+ } else { /* adhoc client */
+ /* correcting TSF */
+ correct_TSF(padapter, pmlmeext);
+
+ /* start beacon */
+ if (send_beacon(padapter) == _FAIL) {
+ pmlmeinfo->FW_sta_info[psta->mac_id].status = 0;
+ pmlmeinfo->state ^= WIFI_FW_ADHOC_STATE;
+ return;
+ }
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+ }
+
+ join_type = 2;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ }
+
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ /* rate radaptive */
+ Update_RA_Entry(padapter, psta->mac_id);
+
+ /* update adhoc sta_info */
+ update_sta_info(padapter, psta);
+}
+
+void mlmeext_sta_del_event_callback(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (is_client_associated_to_ap(padapter) || is_IBSS_empty(padapter)) {
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+
+ /* restore to initial setting. */
+ update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
+
+ /* switch to the 20M Hz mode after disconnect */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ /* SelectChannel(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset); */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+
+ flush_all_cam_entry(padapter);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* set MSR to no link state -> infra. mode */
+ Set_MSR(padapter, _HW_STATE_STATION_);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+ }
+}
+
+/****************************************************************************
+
+Following are the functions for the timer handlers
+
+*****************************************************************************/
+void _linked_rx_signal_strehgth_display(struct adapter *padapter);
+void _linked_rx_signal_strehgth_display(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 mac_id;
+ int UndecoratedSmoothedPWDB;
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ mac_id = 0;
+ else if ((pmlmeinfo->state&0x03) == _HW_STATE_AP_)
+ mac_id = 2;
+
+ rtw_hal_get_def_var(padapter, HW_DEF_RA_INFO_DUMP, &mac_id);
+
+ rtw_hal_get_def_var(padapter, HAL_DEF_UNDERCORATEDSMOOTHEDPWDB, &UndecoratedSmoothedPWDB);
+ DBG_88E("UndecoratedSmoothedPWDB:%d\n", UndecoratedSmoothedPWDB);
+}
+
+static u8 chk_ap_is_alive(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 ret = false;
+
+ if ((sta_rx_data_pkts(psta) == sta_last_rx_data_pkts(psta)) &&
+ sta_rx_beacon_pkts(psta) == sta_last_rx_beacon_pkts(psta) &&
+ sta_rx_probersp_pkts(psta) == sta_last_rx_probersp_pkts(psta))
+ ret = false;
+ else
+ ret = true;
+
+ sta_update_last_rx_pkts(psta);
+
+ return ret;
+}
+
+void linked_status_chk(struct adapter *padapter)
+{
+ u32 i;
+ struct sta_info *psta;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (padapter->bRxRSSIDisplay)
+ _linked_rx_signal_strehgth_display(padapter);
+
+ rtw_hal_sreset_linked_status_check(padapter);
+
+ if (is_client_associated_to_ap(padapter)) {
+ /* linked infrastructure client mode */
+
+ int tx_chk = _SUCCESS, rx_chk = _SUCCESS;
+ int rx_chk_limit;
+
+ rx_chk_limit = 4;
+ psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.MacAddress);
+ if (psta != NULL) {
+ bool is_p2p_enable = false;
+ #ifdef CONFIG_88EU_P2P
+ is_p2p_enable = !rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE);
+ #endif
+
+ if (!chk_ap_is_alive(padapter, psta))
+ rx_chk = _FAIL;
+
+ if (pxmitpriv->last_tx_pkts == pxmitpriv->tx_pkts)
+ tx_chk = _FAIL;
+
+ if (pmlmeext->active_keep_alive_check && (rx_chk == _FAIL || tx_chk == _FAIL)) {
+ u8 backup_oper_channel = 0;
+
+ /* switch to correct channel of current network before issue keep-alive frames */
+ if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
+ backup_oper_channel = rtw_get_oper_ch(padapter);
+ SelectChannel(padapter, pmlmeext->cur_channel);
+ }
+
+ if (rx_chk != _SUCCESS)
+ issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, psta->hwaddr, 3, 1);
+
+ if ((tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf) || rx_chk != _SUCCESS) {
+ tx_chk = issue_nulldata(padapter, psta->hwaddr, 0, 3, 1);
+ /* if tx acked and p2p disabled, set rx_chk _SUCCESS to reset retry count */
+ if (tx_chk == _SUCCESS && !is_p2p_enable)
+ rx_chk = _SUCCESS;
+ }
+
+ /* back to the original operation channel */
+ if (backup_oper_channel > 0)
+ SelectChannel(padapter, backup_oper_channel);
+ } else {
+ if (rx_chk != _SUCCESS) {
+ if (pmlmeext->retry == 0) {
+ issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
+ issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
+ issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
+ }
+ }
+
+ if (tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf) {
+ tx_chk = issue_nulldata(padapter, NULL, 0, 1, 0);
+ }
+ }
+
+ if (rx_chk == _FAIL) {
+ pmlmeext->retry++;
+ if (pmlmeext->retry > rx_chk_limit) {
+ DBG_88E_LEVEL(_drv_always_, FUNC_ADPT_FMT" disconnect or roaming\n",
+ FUNC_ADPT_ARG(padapter));
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress,
+ WLAN_REASON_EXPIRATION_CHK);
+ return;
+ }
+ } else {
+ pmlmeext->retry = 0;
+ }
+
+ if (tx_chk == _FAIL) {
+ pmlmeinfo->link_count &= 0xf;
+ } else {
+ pxmitpriv->last_tx_pkts = pxmitpriv->tx_pkts;
+ pmlmeinfo->link_count = 0;
+ }
+ } /* end of if ((psta = rtw_get_stainfo(pstapriv, passoc_res->network.MacAddress)) != NULL) */
+ } else if (is_client_associated_to_ibss(padapter)) {
+ /* linked IBSS mode */
+ /* for each assoc list entry to check the rx pkt counter */
+ for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
+ if (pmlmeinfo->FW_sta_info[i].status == 1) {
+ psta = pmlmeinfo->FW_sta_info[i].psta;
+
+ if (NULL == psta)
+ continue;
+ if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
+ if (pmlmeinfo->FW_sta_info[i].retry < 3) {
+ pmlmeinfo->FW_sta_info[i].retry++;
+ } else {
+ pmlmeinfo->FW_sta_info[i].retry = 0;
+ pmlmeinfo->FW_sta_info[i].status = 0;
+ report_del_sta_event(padapter, psta->hwaddr
+ , 65535/* indicate disconnect caused by no rx */
+ );
+ }
+ } else {
+ pmlmeinfo->FW_sta_info[i].retry = 0;
+ pmlmeinfo->FW_sta_info[i].rx_pkt = (u32)sta_rx_pkts(psta);
+ }
+ }
+ }
+ }
+}
+
+void survey_timer_hdl(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct sitesurvey_parm *psurveyPara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif
+
+ /* issue rtw_sitesurvey_cmd */
+ if (pmlmeext->sitesurvey_res.state > SCAN_START) {
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
+ pmlmeext->sitesurvey_res.channel_idx++;
+
+ if (pmlmeext->scan_abort) {
+ #ifdef CONFIG_88EU_P2P
+ if (!rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE)) {
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_MAX);
+ pmlmeext->sitesurvey_res.channel_idx = 3;
+ DBG_88E("%s idx:%d, cnt:%u\n", __func__
+ , pmlmeext->sitesurvey_res.channel_idx
+ , pwdinfo->find_phase_state_exchange_cnt
+ );
+ } else
+ #endif
+ {
+ pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
+ DBG_88E("%s idx:%d\n", __func__
+ , pmlmeext->sitesurvey_res.channel_idx
+ );
+ }
+
+ pmlmeext->scan_abort = false;/* reset */
+ }
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ goto exit_survey_timer_hdl;
+
+ psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ if (psurveyPara == NULL) {
+ kfree(ph2c);
+ goto exit_survey_timer_hdl;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
+ rtw_enqueue_cmd(pcmdpriv, ph2c);
+ }
+
+
+exit_survey_timer_hdl:
+ return;
+}
+
+void link_timer_hdl(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
+ DBG_88E("link_timer_hdl:no beacon while connecting\n");
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res(padapter, -3);
+ } else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
+ /* re-auth timer */
+ if (++pmlmeinfo->reauth_count > REAUTH_LIMIT) {
+ pmlmeinfo->state = 0;
+ report_join_res(padapter, -1);
+ return;
+ }
+
+ DBG_88E("link_timer_hdl: auth timeout and try again\n");
+ pmlmeinfo->auth_seq = 1;
+ issue_auth(padapter, NULL, 0);
+ set_link_timer(pmlmeext, REAUTH_TO);
+ } else if (pmlmeinfo->state & WIFI_FW_ASSOC_STATE) {
+ /* re-assoc timer */
+ if (++pmlmeinfo->reassoc_count > REASSOC_LIMIT) {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res(padapter, -2);
+ return;
+ }
+
+ DBG_88E("link_timer_hdl: assoc timeout and try again\n");
+ issue_assocreq(padapter);
+ set_link_timer(pmlmeext, REASSOC_TO);
+ }
+ return;
+}
+
+void addba_timer_hdl(struct sta_info *psta)
+{
+ struct ht_priv *phtpriv;
+
+ if (!psta)
+ return;
+
+ phtpriv = &psta->htpriv;
+
+ if ((phtpriv->ht_option) && (phtpriv->ampdu_enable)) {
+ if (phtpriv->candidate_tid_bitmap)
+ phtpriv->candidate_tid_bitmap = 0x0;
+ }
+}
+
+u8 NULL_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ return H2C_SUCCESS;
+}
+
+u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u8 type;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct setopmode_parm *psetop = (struct setopmode_parm *)pbuf;
+
+ if (psetop->mode == Ndis802_11APMode) {
+ pmlmeinfo->state = WIFI_FW_AP_STATE;
+ type = _HW_STATE_AP_;
+ } else if (psetop->mode == Ndis802_11Infrastructure) {
+ pmlmeinfo->state &= ~(BIT(0)|BIT(1));/* clear state */
+ pmlmeinfo->state |= WIFI_FW_STATION_STATE;/* set to STATION_STATE */
+ type = _HW_STATE_STATION_;
+ } else if (psetop->mode == Ndis802_11IBSS) {
+ type = _HW_STATE_ADHOC_;
+ } else {
+ type = _HW_STATE_NOLINK_;
+ }
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type));
+ /* Set_NETYPE0_MSR(padapter, type); */
+
+ return H2C_SUCCESS;
+}
+
+u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ struct joinbss_parm *pparm = (struct joinbss_parm *)pbuf;
+ /* u32 initialgain; */
+
+
+ if (pparm->network.InfrastructureMode == Ndis802_11APMode) {
+#ifdef CONFIG_88EU_AP_MODE
+
+ if (pmlmeinfo->state == WIFI_FW_AP_STATE) {
+ /* todo: */
+ return H2C_SUCCESS;
+ }
+#endif
+ }
+
+ /* below is for ad-hoc master */
+ if (pparm->network.InfrastructureMode == Ndis802_11IBSS) {
+ rtw_joinbss_reset(padapter);
+
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeinfo->ERP_enable = 0;
+ pmlmeinfo->WMM_enable = 0;
+ pmlmeinfo->HT_enable = 0;
+ pmlmeinfo->HT_caps_enable = 0;
+ pmlmeinfo->HT_info_enable = 0;
+ pmlmeinfo->agg_enable_bitmap = 0;
+ pmlmeinfo->candidate_tid_bitmap = 0;
+
+ /* disable dynamic functions, such as high power, DIG */
+ Save_DM_Func_Flag(padapter);
+ Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* config the initial gain under linking, need to write the BB registers */
+ /* initialgain = 0x1E; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
+
+ /* cancel link timer */
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ /* clear CAM */
+ flush_all_cam_entry(padapter);
+
+ memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
+
+ if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ return H2C_PARAMETERS_ERROR;
+
+ memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
+
+ start_create_ibss(padapter);
+ }
+
+ return H2C_SUCCESS;
+}
+
+u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u8 join_type;
+ struct ndis_802_11_var_ie *pIE;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ struct joinbss_parm *pparm = (struct joinbss_parm *)pbuf;
+ u32 i;
+
+ /* check already connecting to AP or not */
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
+ if (pmlmeinfo->state & WIFI_FW_STATION_STATE)
+ issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, 5, 100);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* clear CAM */
+ flush_all_cam_entry(padapter);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ /* set MSR to nolink -> infra. mode */
+ Set_MSR(padapter, _HW_STATE_STATION_);
+
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ }
+
+ rtw_antenna_select_cmd(padapter, pparm->network.PhyInfo.Optimum_antenna, false);
+
+ rtw_joinbss_reset(padapter);
+
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeinfo->ERP_enable = 0;
+ pmlmeinfo->WMM_enable = 0;
+ pmlmeinfo->HT_enable = 0;
+ pmlmeinfo->HT_caps_enable = 0;
+ pmlmeinfo->HT_info_enable = 0;
+ pmlmeinfo->agg_enable_bitmap = 0;
+ pmlmeinfo->candidate_tid_bitmap = 0;
+ pmlmeinfo->bwmode_updated = false;
+
+ memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
+
+ if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ return H2C_PARAMETERS_ERROR;
+
+ memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
+
+ /* Check AP vendor to move rtw_joinbss_cmd() */
+
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pnetwork->IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:/* Get WMM IE. */
+ if (_rtw_memcmp(pIE->data, WMM_OUI, 4))
+ pmlmeinfo->WMM_enable = 1;
+ break;
+ case _HT_CAPABILITY_IE_: /* Get HT Cap IE. */
+ pmlmeinfo->HT_caps_enable = 1;
+ break;
+ case _HT_EXTRA_INFO_IE_: /* Get HT Info IE. */
+ pmlmeinfo->HT_info_enable = 1;
+
+ /* spec case only for cisco's ap because cisco's ap issue assoc rsp using mcs rate @40MHz or @20MHz */
+ {
+ struct HT_info_element *pht_info = (struct HT_info_element *)(pIE->data);
+
+ if ((pregpriv->cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
+ /* switch to the 40M Hz mode according to the AP */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
+ switch (pht_info->infos[0] & 0x3) {
+ case 1:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case 3:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+
+ DBG_88E("set ch/bw before connected\n");
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+ /* disable dynamic functions, such as high power, DIG */
+
+ /* config the initial gain under linking, need to write the BB registers */
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pmlmeinfo->network.MacAddress);
+ join_type = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ /* cancel link timer */
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ start_clnt_join(padapter);
+
+ return H2C_SUCCESS;
+}
+
+u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct disconnect_parm *param = (struct disconnect_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ u8 val8;
+
+ if (is_client_associated_to_ap(padapter))
+ issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+
+ /* restore to initial setting. */
+ update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
+ /* Stop BCN */
+ val8 = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_BCN_FUNC, (u8 *)(&val8));
+ }
+
+
+ /* set MSR to no link state -> infra. mode */
+ Set_MSR(padapter, _HW_STATE_STATION_);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* switch to the 20M Hz mode after disconnect */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ flush_all_cam_entry(padapter);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ rtw_free_uc_swdec_pending_queue(padapter);
+
+ return H2C_SUCCESS;
+}
+
+static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_channel *out,
+ u32 out_num, struct rtw_ieee80211_channel *in, u32 in_num)
+{
+ int i, j;
+ int set_idx;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ /* clear out first */
+ _rtw_memset(out, 0, sizeof(struct rtw_ieee80211_channel)*out_num);
+
+ /* acquire channels from in */
+ j = 0;
+ for (i = 0; i < in_num; i++) {
+ set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
+ if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED) &&
+ set_idx >= 0) {
+ memcpy(&out[j], &in[i], sizeof(struct rtw_ieee80211_channel));
+
+ if (pmlmeext->channel_set[set_idx].ScanType == SCAN_PASSIVE)
+ out[j].flags &= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
+
+ j++;
+ }
+ if (j >= out_num)
+ break;
+ }
+
+ /* if out is empty, use channel_set as default */
+ if (j == 0) {
+ for (i = 0; i < pmlmeext->max_chan_nums; i++) {
+ out[i].hw_value = pmlmeext->channel_set[i].ChannelNum;
+
+ if (pmlmeext->channel_set[i].ScanType == SCAN_PASSIVE)
+ out[i].flags &= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
+
+ j++;
+ }
+ }
+
+ return j;
+}
+
+u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
+ u8 bdelayscan = false;
+ u8 val8;
+ u32 initialgain;
+ u32 i;
+
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_DISABLE) {
+ /* for first time sitesurvey_cmd */
+ rtw_hal_set_hwreg(padapter, HW_VAR_CHECK_TXBUF, NULL);
+
+ pmlmeext->sitesurvey_res.state = SCAN_START;
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->sitesurvey_res.channel_idx = 0;
+
+ for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (pparm->ssid[i].SsidLength) {
+ memcpy(pmlmeext->sitesurvey_res.ssid[i].Ssid, pparm->ssid[i].Ssid, IW_ESSID_MAX_SIZE);
+ pmlmeext->sitesurvey_res.ssid[i].SsidLength = pparm->ssid[i].SsidLength;
+ } else {
+ pmlmeext->sitesurvey_res.ssid[i].SsidLength = 0;
+ }
+ }
+
+ pmlmeext->sitesurvey_res.ch_num = rtw_scan_ch_decision(padapter
+ , pmlmeext->sitesurvey_res.ch, RTW_CHANNEL_SCAN_AMOUNT
+ , pparm->ch, pparm->ch_num
+ );
+
+ pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode;
+
+ /* issue null data if associating to the AP */
+ if (is_client_associated_to_ap(padapter)) {
+ pmlmeext->sitesurvey_res.state = SCAN_TXNULL;
+
+ issue_nulldata(padapter, NULL, 1, 3, 500);
+
+ bdelayscan = true;
+ }
+ if (bdelayscan) {
+ /* delay 50ms to protect nulldata(1). */
+ set_survey_timer(pmlmeext, 50);
+ return H2C_SUCCESS;
+ }
+ }
+
+ if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
+ /* disable dynamic functions, such as high power, DIG */
+ Save_DM_Func_Flag(padapter);
+ Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* config the initial gain under scaning, need to write the BB registers */
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ initialgain = 0x1E;
+ else
+ initialgain = 0x28;
+#else /* CONFIG_88EU_P2P */
+ initialgain = 0x1E;
+#endif /* CONFIG_88EU_P2P */
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+
+ /* set MSR to no link state */
+ Set_MSR(padapter, _HW_STATE_NOLINK_);
+
+ val8 = 1; /* under site survey */
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+
+ pmlmeext->sitesurvey_res.state = SCAN_PROCESS;
+ }
+
+ site_survey(padapter);
+
+ return H2C_SUCCESS;
+}
+
+u8 setauth_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct setauth_parm *pparm = (struct setauth_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pparm->mode < 4)
+ pmlmeinfo->auth_algo = pparm->mode;
+ return H2C_SUCCESS;
+}
+
+u8 setkey_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ unsigned short ctrl;
+ struct setkey_parm *pparm = (struct setkey_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ /* main tx key for wep. */
+ if (pparm->set_tx)
+ pmlmeinfo->key_index = pparm->keyid;
+
+ /* write cam */
+ ctrl = BIT(15) | ((pparm->algorithm) << 2) | pparm->keyid;
+
+ DBG_88E_LEVEL(_drv_info_, "set group key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) "
+ "keyid:%d\n", pparm->algorithm, pparm->keyid);
+ write_cam(padapter, pparm->keyid, ctrl, null_sta, pparm->key);
+
+ return H2C_SUCCESS;
+}
+
+u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u16 ctrl = 0;
+ u8 cam_id;/* cam_entry */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct set_stakey_parm *pparm = (struct set_stakey_parm *)pbuf;
+
+ /* cam_entry: */
+ /* 0~3 for default key */
+
+ /* for concurrent mode (ap+sta): */
+ /* default key is disable, using sw encrypt/decrypt */
+ /* cam_entry = 4 for sta mode (macid = 0) */
+ /* cam_entry(macid+3) = 5 ~ N for ap mode (aid = 1~N, macid = 2 ~N) */
+
+ /* for concurrent mode (sta+sta): */
+ /* default key is disable, using sw encrypt/decrypt */
+ /* cam_entry = 4 mapping to macid = 0 */
+ /* cam_entry = 5 mapping to macid = 2 */
+
+ cam_id = 4;
+
+ DBG_88E_LEVEL(_drv_info_, "set pairwise key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) camid:%d\n",
+ pparm->algorithm, cam_id);
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (pparm->algorithm == _NO_PRIVACY_) /* clear cam entry */ {
+ clear_cam_entry(padapter, pparm->id);
+ return H2C_SUCCESS_RSP;
+ }
+
+ psta = rtw_get_stainfo(pstapriv, pparm->addr);
+ if (psta) {
+ ctrl = (BIT(15) | ((pparm->algorithm) << 2));
+
+ DBG_88E("r871x_set_stakey_hdl(): enc_algorithm=%d\n", pparm->algorithm);
+
+ if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA-4))) {
+ DBG_88E("r871x_set_stakey_hdl():set_stakey failed, mac_id(aid)=%d\n", psta->mac_id);
+ return H2C_REJECTED;
+ }
+
+ cam_id = (psta->mac_id + 3);/* 0~3 for default key, cmd_id = macid + 3, macid = aid+1; */
+
+ DBG_88E("Write CAM, mac_addr =%x:%x:%x:%x:%x:%x, cam_entry=%d\n", pparm->addr[0],
+ pparm->addr[1], pparm->addr[2], pparm->addr[3], pparm->addr[4],
+ pparm->addr[5], cam_id);
+
+ write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
+
+ return H2C_SUCCESS_RSP;
+ } else {
+ DBG_88E("r871x_set_stakey_hdl(): sta has been free\n");
+ return H2C_REJECTED;
+ }
+ }
+
+ /* below for sta mode */
+
+ if (pparm->algorithm == _NO_PRIVACY_) { /* clear cam entry */
+ clear_cam_entry(padapter, pparm->id);
+ return H2C_SUCCESS;
+ }
+ ctrl = BIT(15) | ((pparm->algorithm) << 2);
+ write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
+ pmlmeinfo->enc_algo = pparm->algorithm;
+ return H2C_SUCCESS;
+}
+
+u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct addBaReq_parm *pparm = (struct addBaReq_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, pparm->addr);
+
+ if (!psta)
+ return H2C_SUCCESS;
+
+ if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && (pmlmeinfo->HT_enable)) ||
+ ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
+ issue_action_BA(padapter, pparm->addr, RTW_WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
+ _set_timer(&psta->addba_retry_timer, ADDBA_TO);
+ } else {
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(pparm->tid);
+ }
+ return H2C_SUCCESS;
+}
+
+u8 set_tx_beacon_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct Tx_Beacon_param *ptxBeacon_parm;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 res = _SUCCESS;
+ int len_diff = 0;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ ptxBeacon_parm = (struct Tx_Beacon_param *)rtw_zmalloc(sizeof(struct Tx_Beacon_param));
+ if (ptxBeacon_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ memcpy(&(ptxBeacon_parm->network), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
+
+ len_diff = update_hidden_ssid(ptxBeacon_parm->network.IEs+_BEACON_IE_OFFSET_,
+ ptxBeacon_parm->network.IELength-_BEACON_IE_OFFSET_,
+ pmlmeinfo->hidden_ssid_mode);
+ ptxBeacon_parm->network.IELength += len_diff;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm, GEN_CMD_CODE(_TX_Beacon));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ u8 evt_code;
+ u16 evt_sz;
+ uint *peventbuf;
+ void (*event_callback)(struct adapter *dev, u8 *pbuf);
+ struct evt_priv *pevt_priv = &(padapter->evtpriv);
+
+ peventbuf = (uint *)pbuf;
+ evt_sz = (u16)(*peventbuf&0xffff);
+ evt_code = (u8)((*peventbuf>>16)&0xff);
+
+ /* checking if event code is valid */
+ if (evt_code >= MAX_C2HEVT) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nEvent Code(%d) mismatch!\n", evt_code));
+ goto _abort_event_;
+ }
+
+ /* checking if event size match the event parm size */
+ if ((wlanevents[evt_code].parmsize != 0) &&
+ (wlanevents[evt_code].parmsize != evt_sz)) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\nEvent(%d) Parm Size mismatch (%d vs %d)!\n",
+ evt_code, wlanevents[evt_code].parmsize, evt_sz));
+ goto _abort_event_;
+ }
+
+ ATOMIC_INC(&pevt_priv->event_seq);
+
+ peventbuf += 2;
+
+ if (peventbuf) {
+ event_callback = wlanevents[evt_code].event_callback;
+ event_callback(padapter, (u8 *)peventbuf);
+
+ pevt_priv->evt_done_cnt++;
+ }
+
+_abort_event_:
+ return H2C_SUCCESS;
+}
+
+u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ return H2C_SUCCESS;
+}
+
+u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ if (send_beacon(padapter) == _FAIL) {
+ DBG_88E("issue_beacon, fail!\n");
+ return H2C_PARAMETERS_ERROR;
+ }
+#ifdef CONFIG_88EU_AP_MODE
+ else { /* tx bc/mc frames after update TIM */
+ unsigned long irqL;
+ struct sta_info *psta_bmc;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (!psta_bmc)
+ return H2C_SUCCESS;
+
+ if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
+ rtw_msleep_os(10);/* 10ms, ATIM(HIQ) Windows */
+ _enter_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+
+ xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ psta_bmc->sleepq_len--;
+ if (psta_bmc->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ pxmitframe->attrib.qsel = 0x11;/* HIQ */
+
+ _exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ if (rtw_hal_xmit(padapter, pxmitframe))
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ _enter_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ }
+ _exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ }
+ }
+#endif
+ return H2C_SUCCESS;
+}
+
+u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct set_ch_parm *set_ch_parm;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ set_ch_parm = (struct set_ch_parm *)pbuf;
+
+ DBG_88E(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev),
+ set_ch_parm->ch, set_ch_parm->bw, set_ch_parm->ch_offset);
+
+ pmlmeext->cur_channel = set_ch_parm->ch;
+ pmlmeext->cur_ch_offset = set_ch_parm->ch_offset;
+ pmlmeext->cur_bwmode = set_ch_parm->bw;
+
+ set_channel_bwmode(padapter, set_ch_parm->ch, set_ch_parm->ch_offset, set_ch_parm->bw);
+
+ return H2C_SUCCESS;
+}
+
+u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct SetChannelPlan_param *setChannelPlan_param;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ setChannelPlan_param = (struct SetChannelPlan_param *)pbuf;
+
+ pmlmeext->max_chan_nums = init_channel_set(padapter, setChannelPlan_param->channel_plan, pmlmeext->channel_set);
+ init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
+
+ return H2C_SUCCESS;
+}
+
+u8 led_blink_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+ return H2C_SUCCESS;
+}
+
+u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ return H2C_REJECTED;
+}
+
+/* TDLS_WRCR : write RCR DATA BIT */
+/* TDLS_SD_PTI : issue peer traffic indication */
+/* TDLS_CS_OFF : go back to the channel linked with AP, terminating channel switch procedure */
+/* TDLS_INIT_CH_SEN : init channel sensing, receive all data and mgnt frame */
+/* TDLS_DONE_CH_SEN: channel sensing and report candidate channel */
+/* TDLS_OFF_CH : first time set channel to off channel */
+/* TDLS_BASE_CH : go back tp the channel linked with AP when set base channel as target channel */
+/* TDLS_P_OFF_CH : periodically go to off channel */
+/* TDLS_P_BASE_CH : periodically go back to base channel */
+/* TDLS_RS_RCR : restore RCR */
+/* TDLS_CKALV_PH1 : check alive timer phase1 */
+/* TDLS_CKALV_PH2 : check alive timer phase2 */
+/* TDLS_FREE_STA : free tdls sta */
+u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ return H2C_REJECTED;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
new file mode 100644
index 0000000..9832dcb
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -0,0 +1,997 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ *published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_MP_C_
+
+#include <drv_types.h>
+
+#include "odm_precomp.h"
+#include "rtl8188e_hal.h"
+
+u32 read_macreg(struct adapter *padapter, u32 addr, u32 sz)
+{
+ u32 val = 0;
+
+ switch (sz) {
+ case 1:
+ val = rtw_read8(padapter, addr);
+ break;
+ case 2:
+ val = rtw_read16(padapter, addr);
+ break;
+ case 4:
+ val = rtw_read32(padapter, addr);
+ break;
+ default:
+ val = 0xffffffff;
+ break;
+ }
+
+ return val;
+}
+
+void write_macreg(struct adapter *padapter, u32 addr, u32 val, u32 sz)
+{
+ switch (sz) {
+ case 1:
+ rtw_write8(padapter, addr, (u8)val);
+ break;
+ case 2:
+ rtw_write16(padapter, addr, (u16)val);
+ break;
+ case 4:
+ rtw_write32(padapter, addr, val);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 read_bbreg(struct adapter *padapter, u32 addr, u32 bitmask)
+{
+ return rtw_hal_read_bbreg(padapter, addr, bitmask);
+}
+
+void write_bbreg(struct adapter *padapter, u32 addr, u32 bitmask, u32 val)
+{
+ rtw_hal_write_bbreg(padapter, addr, bitmask, val);
+}
+
+u32 _read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask)
+{
+ return rtw_hal_read_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bitmask);
+}
+
+void _write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask, u32 val)
+{
+ rtw_hal_write_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bitmask, val);
+}
+
+u32 read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr)
+{
+ return _read_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bRFRegOffsetMask);
+}
+
+void write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 val)
+{
+ _write_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bRFRegOffsetMask, val);
+}
+
+static void _init_mp_priv_(struct mp_priv *pmp_priv)
+{
+ struct wlan_bssid_ex *pnetwork;
+
+ _rtw_memset(pmp_priv, 0, sizeof(struct mp_priv));
+
+ pmp_priv->mode = MP_OFF;
+
+ pmp_priv->channel = 1;
+ pmp_priv->bandwidth = HT_CHANNEL_WIDTH_20;
+ pmp_priv->prime_channel_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmp_priv->rateidx = MPT_RATE_1M;
+ pmp_priv->txpoweridx = 0x2A;
+
+ pmp_priv->antenna_tx = ANTENNA_A;
+ pmp_priv->antenna_rx = ANTENNA_AB;
+
+ pmp_priv->check_mp_pkt = 0;
+
+ pmp_priv->tx_pktcount = 0;
+
+ pmp_priv->rx_pktcount = 0;
+ pmp_priv->rx_crcerrpktcount = 0;
+
+ pmp_priv->network_macaddr[0] = 0x00;
+ pmp_priv->network_macaddr[1] = 0xE0;
+ pmp_priv->network_macaddr[2] = 0x4C;
+ pmp_priv->network_macaddr[3] = 0x87;
+ pmp_priv->network_macaddr[4] = 0x66;
+ pmp_priv->network_macaddr[5] = 0x55;
+
+ pnetwork = &pmp_priv->mp_network.network;
+ memcpy(pnetwork->MacAddress, pmp_priv->network_macaddr, ETH_ALEN);
+
+ pnetwork->Ssid.SsidLength = 8;
+ memcpy(pnetwork->Ssid.Ssid, "mp_871x", pnetwork->Ssid.SsidLength);
+}
+
+static void mp_init_xmit_attrib(struct mp_tx *pmptx, struct adapter *padapter)
+{
+ struct pkt_attrib *pattrib;
+ struct tx_desc *desc;
+
+ /* init xmitframe attribute */
+ pattrib = &pmptx->attrib;
+ _rtw_memset(pattrib, 0, sizeof(struct pkt_attrib));
+ desc = &pmptx->desc;
+ _rtw_memset(desc, 0, TXDESC_SIZE);
+
+ pattrib->ether_type = 0x8712;
+ _rtw_memset(pattrib->dst, 0xFF, ETH_ALEN);
+ pattrib->ack_policy = 0;
+ pattrib->hdrlen = WLAN_HDR_A3_LEN;
+ pattrib->subtype = WIFI_DATA;
+ pattrib->priority = 0;
+ pattrib->qsel = pattrib->priority;
+ pattrib->nr_frags = 1;
+ pattrib->encrypt = 0;
+ pattrib->bswenc = false;
+ pattrib->qos_en = false;
+}
+
+s32 init_mp_priv(struct adapter *padapter)
+{
+ struct mp_priv *pmppriv = &padapter->mppriv;
+
+ _init_mp_priv_(pmppriv);
+ pmppriv->papdater = padapter;
+
+ pmppriv->tx.stop = 1;
+ mp_init_xmit_attrib(&pmppriv->tx, padapter);
+
+ switch (padapter->registrypriv.rf_config) {
+ case RF_1T1R:
+ pmppriv->antenna_tx = ANTENNA_A;
+ pmppriv->antenna_rx = ANTENNA_A;
+ break;
+ case RF_1T2R:
+ default:
+ pmppriv->antenna_tx = ANTENNA_A;
+ pmppriv->antenna_rx = ANTENNA_AB;
+ break;
+ case RF_2T2R:
+ case RF_2T2R_GREEN:
+ pmppriv->antenna_tx = ANTENNA_AB;
+ pmppriv->antenna_rx = ANTENNA_AB;
+ break;
+ case RF_2T4R:
+ pmppriv->antenna_tx = ANTENNA_AB;
+ pmppriv->antenna_rx = ANTENNA_ABCD;
+ break;
+ }
+
+ return _SUCCESS;
+}
+
+void free_mp_priv(struct mp_priv *pmp_priv)
+{
+ kfree(pmp_priv->pallocated_mp_xmitframe_buf);
+ pmp_priv->pallocated_mp_xmitframe_buf = NULL;
+ pmp_priv->pmp_xmtframe_buf = NULL;
+}
+
+#define PHY_IQCalibrate(a, b) PHY_IQCalibrate_8188E(a, b)
+#define PHY_LCCalibrate(a) PHY_LCCalibrate_8188E(a)
+#define PHY_SetRFPathSwitch(a, b) PHY_SetRFPathSwitch_8188E(a, b)
+
+s32 MPT_InitializeAdapter(struct adapter *pAdapter, u8 Channel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ s32 rtStatus = _SUCCESS;
+ struct mpt_context *pMptCtx = &pAdapter->mppriv.MptCtx;
+ struct mlme_priv *pmlmepriv = &pAdapter->mlmepriv;
+
+ /* HW Initialization for 8190 MPT. */
+ /* SW Initialization for 8190 MP. */
+ pMptCtx->bMptDrvUnload = false;
+ pMptCtx->bMassProdTest = false;
+ pMptCtx->bMptIndexEven = true; /* default gain index is -6.0db */
+ pMptCtx->h2cReqNum = 0x0;
+ /* Init mpt event. */
+ /* init for BT MP */
+
+ pMptCtx->bMptWorkItemInProgress = false;
+ pMptCtx->CurrMptAct = NULL;
+ /* */
+
+ /* Don't accept any packets */
+ rtw_write32(pAdapter, REG_RCR, 0);
+
+ PHY_IQCalibrate(pAdapter, false);
+ dm_CheckTXPowerTracking(&pHalData->odmpriv); /* trigger thermal meter */
+ PHY_LCCalibrate(pAdapter);
+
+ pMptCtx->backup0xc50 = (u8)PHY_QueryBBReg(pAdapter, rOFDM0_XAAGCCore1, bMaskByte0);
+ pMptCtx->backup0xc58 = (u8)PHY_QueryBBReg(pAdapter, rOFDM0_XBAGCCore1, bMaskByte0);
+ pMptCtx->backup0xc30 = (u8)PHY_QueryBBReg(pAdapter, rOFDM0_RxDetector1, bMaskByte0);
+ pMptCtx->backup0x52_RF_A = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0);
+ pMptCtx->backup0x52_RF_B = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0);
+
+ /* set ant to wifi side in mp mode */
+ rtw_write16(pAdapter, 0x870, 0x300);
+ rtw_write16(pAdapter, 0x860, 0x110);
+
+ if (pAdapter->registrypriv.mp_mode == 1)
+ pmlmepriv->fw_state = WIFI_MP_STATE;
+
+ return rtStatus;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: MPT_DeInitAdapter()
+ *
+ * Overview: Extra DeInitialization for Mass Production Test.
+ *
+ * Input: struct adapter * pAdapter
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 05/08/2007 MHC Create Version 0.
+ * 05/18/2007 MHC Add normal driver MPHalt code.
+ *
+ *---------------------------------------------------------------------------*/
+void MPT_DeInitAdapter(struct adapter *pAdapter)
+{
+ struct mpt_context *pMptCtx = &pAdapter->mppriv.MptCtx;
+
+ pMptCtx->bMptDrvUnload = true;
+}
+
+static u8 mpt_ProStartTest(struct adapter *padapter)
+{
+ struct mpt_context *pMptCtx = &padapter->mppriv.MptCtx;
+
+ pMptCtx->bMassProdTest = true;
+ pMptCtx->bStartContTx = false;
+ pMptCtx->bCckContTx = false;
+ pMptCtx->bOfdmContTx = false;
+ pMptCtx->bSingleCarrier = false;
+ pMptCtx->bCarrierSuppression = false;
+ pMptCtx->bSingleTone = false;
+
+ return _SUCCESS;
+}
+
+/*
+ * General use
+ */
+s32 SetPowerTracking(struct adapter *padapter, u8 enable)
+{
+ Hal_SetPowerTracking(padapter, enable);
+ return 0;
+}
+
+void GetPowerTracking(struct adapter *padapter, u8 *enable)
+{
+ Hal_GetPowerTracking(padapter, enable);
+}
+
+static void disable_dm(struct adapter *padapter)
+{
+ u8 v8;
+
+ /* 3 1. disable firmware dynamic mechanism */
+ /* disable Power Training, Rate Adaptive */
+ v8 = rtw_read8(padapter, REG_BCN_CTRL);
+ v8 &= ~EN_BCN_FUNCTION;
+ rtw_write8(padapter, REG_BCN_CTRL, v8);
+
+ /* 3 2. disable driver dynamic mechanism */
+ /* disable Dynamic Initial Gain */
+ /* disable High Power */
+ /* disable Power Tracking */
+ Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* enable APK, LCK and IQK but disable power tracking */
+ Switch_DM_Func(padapter, DYNAMIC_RF_CALIBRATION, true);
+}
+
+/* This function initializes the DUT to the MP test mode */
+s32 mp_start_test(struct adapter *padapter)
+{
+ struct wlan_bssid_ex bssid;
+ struct sta_info *psta;
+ u32 length;
+ u8 val8;
+
+ unsigned long irqL;
+ s32 res = _SUCCESS;
+
+ struct mp_priv *pmppriv = &padapter->mppriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+
+ padapter->registrypriv.mp_mode = 1;
+ pmppriv->bSetTxPower = 0; /* for manually set tx power */
+
+ /* 3 disable dynamic mechanism */
+ disable_dm(padapter);
+
+ /* 3 0. update mp_priv */
+
+ if (padapter->registrypriv.rf_config == RF_819X_MAX_TYPE) {
+ switch (GET_RF_TYPE(padapter)) {
+ case RF_1T1R:
+ pmppriv->antenna_tx = ANTENNA_A;
+ pmppriv->antenna_rx = ANTENNA_A;
+ break;
+ case RF_1T2R:
+ default:
+ pmppriv->antenna_tx = ANTENNA_A;
+ pmppriv->antenna_rx = ANTENNA_AB;
+ break;
+ case RF_2T2R:
+ case RF_2T2R_GREEN:
+ pmppriv->antenna_tx = ANTENNA_AB;
+ pmppriv->antenna_rx = ANTENNA_AB;
+ break;
+ case RF_2T4R:
+ pmppriv->antenna_tx = ANTENNA_AB;
+ pmppriv->antenna_rx = ANTENNA_ABCD;
+ break;
+ }
+ }
+
+ mpt_ProStartTest(padapter);
+
+ /* 3 1. initialize a new struct wlan_bssid_ex */
+/* _rtw_memset(&bssid, 0, sizeof(struct wlan_bssid_ex)); */
+ memcpy(bssid.MacAddress, pmppriv->network_macaddr, ETH_ALEN);
+ bssid.Ssid.SsidLength = strlen("mp_pseudo_adhoc");
+ memcpy(bssid.Ssid.Ssid, (u8 *)"mp_pseudo_adhoc", bssid.Ssid.SsidLength);
+ bssid.InfrastructureMode = Ndis802_11IBSS;
+ bssid.NetworkTypeInUse = Ndis802_11DS;
+ bssid.IELength = 0;
+
+ length = get_wlan_bssid_ex_sz(&bssid);
+ if (length % 4)
+ bssid.Length = ((length >> 2) + 1) << 2; /* round up to multiple of 4 bytes. */
+ else
+ bssid.Length = length;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)
+ goto end_of_mp_start_test;
+
+ /* init mp_start_test status */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ rtw_disassoc_cmd(padapter, 500, true);
+ rtw_indicate_disconnect(padapter);
+ rtw_free_assoc_resources(padapter, 1);
+ }
+ pmppriv->prev_fw_state = get_fwstate(pmlmepriv);
+ if (padapter->registrypriv.mp_mode == 1)
+ pmlmepriv->fw_state = WIFI_MP_STATE;
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* 3 2. create a new psta for mp driver */
+ /* clear psta in the cur_network, if any */
+ psta = rtw_get_stainfo(&padapter->stapriv, tgt_network->network.MacAddress);
+ if (psta)
+ rtw_free_stainfo(padapter, psta);
+
+ psta = rtw_alloc_stainfo(&padapter->stapriv, bssid.MacAddress);
+ if (psta == NULL) {
+ RT_TRACE(_module_mp_, _drv_err_, ("mp_start_test: Can't alloc sta_info!\n"));
+ pmlmepriv->fw_state = pmppriv->prev_fw_state;
+ res = _FAIL;
+ goto end_of_mp_start_test;
+ }
+
+ /* 3 3. join psudo AdHoc */
+ tgt_network->join_res = 1;
+ tgt_network->aid = 1;
+ psta->aid = 1;
+ memcpy(&tgt_network->network, &bssid, length);
+
+ rtw_indicate_connect(padapter);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+end_of_mp_start_test:
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (res == _SUCCESS) {
+ /* set MSR to WIFI_FW_ADHOC_STATE */
+ val8 = rtw_read8(padapter, MSR) & 0xFC; /* 0x0102 */
+ val8 |= WIFI_FW_ADHOC_STATE;
+ rtw_write8(padapter, MSR, val8); /* Link in ad hoc network */
+ }
+ return res;
+}
+/* */
+/* This function change the DUT from the MP test mode into normal mode */
+void mp_stop_test(struct adapter *padapter)
+{
+ struct mp_priv *pmppriv = &padapter->mppriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+ struct sta_info *psta;
+
+ unsigned long irqL;
+
+ if (pmppriv->mode == MP_ON) {
+ pmppriv->bSetTxPower = 0;
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false)
+ goto end_of_mp_stop_test;
+
+ /* 3 1. disconnect psudo AdHoc */
+ rtw_indicate_disconnect(padapter);
+
+ /* 3 2. clear psta used in mp test mode. */
+ psta = rtw_get_stainfo(&padapter->stapriv, tgt_network->network.MacAddress);
+ if (psta)
+ rtw_free_stainfo(padapter, psta);
+
+ /* 3 3. return to normal state (default:station mode) */
+ pmlmepriv->fw_state = pmppriv->prev_fw_state; /* WIFI_STATION_STATE; */
+
+ /* flush the cur_network */
+ _rtw_memset(tgt_network, 0, sizeof(struct wlan_network));
+
+ _clr_fwstate_(pmlmepriv, WIFI_MP_STATE);
+
+end_of_mp_stop_test:
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ }
+}
+
+/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
+/*
+ * SetChannel
+ * Description
+ * Use H2C command to change channel,
+ * not only modify rf register, but also other setting need to be done.
+ */
+void SetChannel(struct adapter *pAdapter)
+{
+ Hal_SetChannel(pAdapter);
+}
+
+/*
+ * Notice
+ * Switch bandwitdth may change center frequency(channel)
+ */
+void SetBandwidth(struct adapter *pAdapter)
+{
+ Hal_SetBandwidth(pAdapter);
+}
+
+void SetAntenna(struct adapter *pAdapter)
+{
+ Hal_SetAntenna(pAdapter);
+}
+
+void SetAntennaPathPower(struct adapter *pAdapter)
+{
+ Hal_SetAntennaPathPower(pAdapter);
+}
+
+void SetTxPower(struct adapter *pAdapter)
+{
+ Hal_SetTxPower(pAdapter);
+ }
+
+void SetDataRate(struct adapter *pAdapter)
+{
+ Hal_SetDataRate(pAdapter);
+}
+
+void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter , bool bMain)
+{
+ PHY_SetRFPathSwitch(pAdapter, bMain);
+}
+
+s32 SetThermalMeter(struct adapter *pAdapter, u8 target_ther)
+{
+ return Hal_SetThermalMeter(pAdapter, target_ther);
+}
+
+void GetThermalMeter(struct adapter *pAdapter, u8 *value)
+{
+ Hal_GetThermalMeter(pAdapter, value);
+}
+
+void SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart)
+{
+ PhySetTxPowerLevel(pAdapter);
+ Hal_SetSingleCarrierTx(pAdapter, bStart);
+}
+
+void SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
+{
+ PhySetTxPowerLevel(pAdapter);
+ Hal_SetSingleToneTx(pAdapter, bStart);
+}
+
+void SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart)
+{
+ PhySetTxPowerLevel(pAdapter);
+ Hal_SetCarrierSuppressionTx(pAdapter, bStart);
+}
+
+void SetContinuousTx(struct adapter *pAdapter, u8 bStart)
+{
+ PhySetTxPowerLevel(pAdapter);
+ Hal_SetContinuousTx(pAdapter, bStart);
+}
+
+
+void PhySetTxPowerLevel(struct adapter *pAdapter)
+{
+ struct mp_priv *pmp_priv = &pAdapter->mppriv;
+
+ if (pmp_priv->bSetTxPower == 0) /* for NO manually set power index */
+ PHY_SetTxPowerLevel8188E(pAdapter, pmp_priv->channel);
+}
+
+/* */
+static void dump_mpframe(struct adapter *padapter, struct xmit_frame *pmpframe)
+{
+ rtw_hal_mgnt_xmit(padapter, pmpframe);
+}
+
+static struct xmit_frame *alloc_mp_xmitframe(struct xmit_priv *pxmitpriv)
+{
+ struct xmit_frame *pmpframe;
+ struct xmit_buf *pxmitbuf;
+
+ pmpframe = rtw_alloc_xmitframe(pxmitpriv);
+ if (pmpframe == NULL)
+ return NULL;
+
+ pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (pxmitbuf == NULL) {
+ rtw_free_xmitframe(pxmitpriv, pmpframe);
+ return NULL;
+ }
+
+ pmpframe->frame_tag = MP_FRAMETAG;
+
+ pmpframe->pxmitbuf = pxmitbuf;
+
+ pmpframe->buf_addr = pxmitbuf->pbuf;
+
+ pxmitbuf->priv_data = pmpframe;
+
+ return pmpframe;
+}
+
+static int mp_xmit_packet_thread(void *context)
+{
+ struct xmit_frame *pxmitframe;
+ struct mp_tx *pmptx;
+ struct mp_priv *pmp_priv;
+ struct xmit_priv *pxmitpriv;
+ struct adapter *padapter;
+
+ pmp_priv = (struct mp_priv *)context;
+ pmptx = &pmp_priv->tx;
+ padapter = pmp_priv->papdater;
+ pxmitpriv = &(padapter->xmitpriv);
+
+ thread_enter("RTW_MP_THREAD");
+
+ /* DBG_88E("%s:pkTx Start\n", __func__); */
+ while (1) {
+ pxmitframe = alloc_mp_xmitframe(pxmitpriv);
+ if (pxmitframe == NULL) {
+ if (pmptx->stop ||
+ padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped) {
+ goto exit;
+ } else {
+ rtw_msleep_os(1);
+ continue;
+ }
+ }
+
+ memcpy((u8 *)(pxmitframe->buf_addr+TXDESC_OFFSET), pmptx->buf, pmptx->write_size);
+ memcpy(&(pxmitframe->attrib), &(pmptx->attrib), sizeof(struct pkt_attrib));
+
+ dump_mpframe(padapter, pxmitframe);
+
+ pmptx->sended++;
+ pmp_priv->tx_pktcount++;
+
+ if (pmptx->stop ||
+ padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped)
+ goto exit;
+ if ((pmptx->count != 0) &&
+ (pmptx->count == pmptx->sended))
+ goto exit;
+
+ flush_signals_thread();
+ }
+
+exit:
+ kfree(pmptx->pallocated_buf);
+ pmptx->pallocated_buf = NULL;
+ pmptx->stop = 1;
+
+ thread_exit();
+}
+
+void fill_txdesc_for_mp(struct adapter *padapter, struct tx_desc *ptxdesc)
+{
+ struct mp_priv *pmp_priv = &padapter->mppriv;
+ memcpy(ptxdesc, &(pmp_priv->tx.desc), TXDESC_SIZE);
+}
+
+void SetPacketTx(struct adapter *padapter)
+{
+ u8 *ptr, *pkt_start, *pkt_end;
+ u32 pkt_size;
+ struct tx_desc *desc;
+ struct rtw_ieee80211_hdr *hdr;
+ u8 payload;
+ s32 bmcast;
+ struct pkt_attrib *pattrib;
+ struct mp_priv *pmp_priv;
+
+
+ pmp_priv = &padapter->mppriv;
+ if (pmp_priv->tx.stop)
+ return;
+ pmp_priv->tx.sended = 0;
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx_pktcount = 0;
+
+ /* 3 1. update_attrib() */
+ pattrib = &pmp_priv->tx.attrib;
+ memcpy(pattrib->src, padapter->eeprompriv.mac_addr, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ bmcast = IS_MCAST(pattrib->ra);
+ if (bmcast) {
+ pattrib->mac_id = 1;
+ pattrib->psta = rtw_get_bcmc_stainfo(padapter);
+ } else {
+ pattrib->mac_id = 0;
+ pattrib->psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
+ }
+
+ pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->pktlen;
+
+ /* 3 2. allocate xmit buffer */
+ pkt_size = pattrib->last_txcmdsz;
+
+ kfree(pmp_priv->tx.pallocated_buf);
+ pmp_priv->tx.write_size = pkt_size;
+ pmp_priv->tx.buf_size = pkt_size + XMITBUF_ALIGN_SZ;
+ pmp_priv->tx.pallocated_buf = rtw_zmalloc(pmp_priv->tx.buf_size);
+ if (pmp_priv->tx.pallocated_buf == NULL) {
+ DBG_88E("%s: malloc(%d) fail!!\n", __func__, pmp_priv->tx.buf_size);
+ return;
+ }
+ pmp_priv->tx.buf = (u8 *)N_BYTE_ALIGMENT((size_t)(pmp_priv->tx.pallocated_buf), XMITBUF_ALIGN_SZ);
+ ptr = pmp_priv->tx.buf;
+
+ desc = &(pmp_priv->tx.desc);
+ _rtw_memset(desc, 0, TXDESC_SIZE);
+ pkt_start = ptr;
+ pkt_end = pkt_start + pkt_size;
+
+ /* 3 3. init TX descriptor */
+ /* offset 0 */
+ desc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
+ desc->txdw0 |= cpu_to_le32(pkt_size & 0x0000FFFF); /* packet size */
+ desc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) << OFFSET_SHT) & 0x00FF0000); /* 32 bytes for TX Desc */
+ if (bmcast)
+ desc->txdw0 |= cpu_to_le32(BMC); /* broadcast packet */
+
+ desc->txdw1 |= cpu_to_le32((0x01 << 26) & 0xff000000);
+ /* offset 4 */
+ desc->txdw1 |= cpu_to_le32((pattrib->mac_id) & 0x3F); /* CAM_ID(MAC_ID) */
+ desc->txdw1 |= cpu_to_le32((pattrib->qsel << QSEL_SHT) & 0x00001F00); /* Queue Select, TID */
+
+ desc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000); /* Rate Adaptive ID */
+ /* offset 8 */
+ /* offset 12 */
+
+ desc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0x0fff0000);
+
+ /* offset 16 */
+ desc->txdw4 |= cpu_to_le32(HW_SSN);
+ desc->txdw4 |= cpu_to_le32(USERATE);
+ desc->txdw4 |= cpu_to_le32(DISDATAFB);
+
+ if (pmp_priv->preamble) {
+ if (pmp_priv->rateidx <= MPT_RATE_54M)
+ desc->txdw4 |= cpu_to_le32(DATA_SHORT); /* CCK Short Preamble */
+ }
+ if (pmp_priv->bandwidth == HT_CHANNEL_WIDTH_40)
+ desc->txdw4 |= cpu_to_le32(DATA_BW);
+
+ /* offset 20 */
+ desc->txdw5 |= cpu_to_le32(pmp_priv->rateidx & 0x0000001F);
+
+ if (pmp_priv->preamble) {
+ if (pmp_priv->rateidx > MPT_RATE_54M)
+ desc->txdw5 |= cpu_to_le32(SGI); /* MCS Short Guard Interval */
+ }
+ desc->txdw5 |= cpu_to_le32(RTY_LMT_EN); /* retry limit enable */
+ desc->txdw5 |= cpu_to_le32(0x00180000); /* DATA/RTS Rate Fallback Limit */
+
+ /* 3 4. make wlan header, make_wlanhdr() */
+ hdr = (struct rtw_ieee80211_hdr *)pkt_start;
+ SetFrameSubType(&hdr->frame_ctl, pattrib->subtype);
+ memcpy(hdr->addr1, pattrib->dst, ETH_ALEN); /* DA */
+ memcpy(hdr->addr2, pattrib->src, ETH_ALEN); /* SA */
+ memcpy(hdr->addr3, get_bssid(&padapter->mlmepriv), ETH_ALEN); /* RA, BSSID */
+
+ /* 3 5. make payload */
+ ptr = pkt_start + pattrib->hdrlen;
+
+ switch (pmp_priv->tx.payload) {
+ case 0:
+ payload = 0x00;
+ break;
+ case 1:
+ payload = 0x5a;
+ break;
+ case 2:
+ payload = 0xa5;
+ break;
+ case 3:
+ payload = 0xff;
+ break;
+ default:
+ payload = 0x00;
+ break;
+ }
+
+ _rtw_memset(ptr, payload, pkt_end - ptr);
+
+ /* 3 6. start thread */
+ pmp_priv->tx.PktTxThread = kthread_run(mp_xmit_packet_thread, pmp_priv, "RTW_MP_THREAD");
+ if (IS_ERR(pmp_priv->tx.PktTxThread))
+ DBG_88E("Create PktTx Thread Fail !!!!!\n");
+}
+
+void SetPacketRx(struct adapter *pAdapter, u8 bStartRx)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+ if (bStartRx) {
+ /* Accept CRC error and destination address */
+ pHalData->ReceiveConfig = AAP | APM | AM | AB | APP_ICV | ADF | AMF | HTC_LOC_CTRL | APP_MIC | APP_PHYSTS;
+
+ pHalData->ReceiveConfig |= ACRC32;
+
+ rtw_write32(pAdapter, REG_RCR, pHalData->ReceiveConfig);
+
+ /* Accept all data frames */
+ rtw_write16(pAdapter, REG_RXFLTMAP2, 0xFFFF);
+ } else {
+ rtw_write32(pAdapter, REG_RCR, 0);
+ }
+}
+
+void ResetPhyRxPktCount(struct adapter *pAdapter)
+{
+ u32 i, phyrx_set = 0;
+
+ for (i = 0; i <= 0xF; i++) {
+ phyrx_set = 0;
+ phyrx_set |= _RXERR_RPT_SEL(i); /* select */
+ phyrx_set |= RXERR_RPT_RST; /* set counter to zero */
+ rtw_write32(pAdapter, REG_RXERR_RPT, phyrx_set);
+ }
+}
+
+static u32 GetPhyRxPktCounts(struct adapter *pAdapter, u32 selbit)
+{
+ /* selection */
+ u32 phyrx_set = 0, count = 0;
+
+ phyrx_set = _RXERR_RPT_SEL(selbit & 0xF);
+ rtw_write32(pAdapter, REG_RXERR_RPT, phyrx_set);
+
+ /* Read packet count */
+ count = rtw_read32(pAdapter, REG_RXERR_RPT) & RXERR_COUNTER_MASK;
+
+ return count;
+}
+
+u32 GetPhyRxPktReceived(struct adapter *pAdapter)
+{
+ u32 OFDM_cnt = 0, CCK_cnt = 0, HT_cnt = 0;
+
+ OFDM_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_OFDM_MPDU_OK);
+ CCK_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_CCK_MPDU_OK);
+ HT_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_HT_MPDU_OK);
+
+ return OFDM_cnt + CCK_cnt + HT_cnt;
+}
+
+u32 GetPhyRxPktCRC32Error(struct adapter *pAdapter)
+{
+ u32 OFDM_cnt = 0, CCK_cnt = 0, HT_cnt = 0;
+
+ OFDM_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_OFDM_MPDU_FAIL);
+ CCK_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_CCK_MPDU_FAIL);
+ HT_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_HT_MPDU_FAIL);
+
+ return OFDM_cnt + CCK_cnt + HT_cnt;
+}
+
+/* reg 0x808[9:0]: FFT data x */
+/* reg 0x808[22]: 0 --> 1 to get 1 FFT data y */
+/* reg 0x8B4[15:0]: FFT data y report */
+static u32 rtw_GetPSDData(struct adapter *pAdapter, u32 point)
+{
+ int psd_val;
+
+
+ psd_val = rtw_read32(pAdapter, 0x808);
+ psd_val &= 0xFFBFFC00;
+ psd_val |= point;
+
+ rtw_write32(pAdapter, 0x808, psd_val);
+ rtw_mdelay_os(1);
+ psd_val |= 0x00400000;
+
+ rtw_write32(pAdapter, 0x808, psd_val);
+ rtw_mdelay_os(1);
+ psd_val = rtw_read32(pAdapter, 0x8B4);
+
+ psd_val &= 0x0000FFFF;
+
+ return psd_val;
+}
+
+/*
+ *pts start_point_min stop_point_max
+ * 128 64 64 + 128 = 192
+ * 256 128 128 + 256 = 384
+ * 512 256 256 + 512 = 768
+ * 1024 512 512 + 1024 = 1536
+ *
+ */
+u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
+{
+ u32 i, psd_pts = 0, psd_start = 0, psd_stop = 0;
+ u32 psd_data = 0;
+
+
+ if (!netif_running(pAdapter->pnetdev)) {
+ RT_TRACE(_module_mp_, _drv_warning_, ("mp_query_psd: Fail! interface not opened!\n"));
+ return 0;
+ }
+
+ if (check_fwstate(&pAdapter->mlmepriv, WIFI_MP_STATE) == false) {
+ RT_TRACE(_module_mp_, _drv_warning_, ("mp_query_psd: Fail! not in MP mode!\n"));
+ return 0;
+ }
+
+ if (strlen(data) == 0) { /* default value */
+ psd_pts = 128;
+ psd_start = 64;
+ psd_stop = 128;
+ } else {
+ sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
+ }
+
+ _rtw_memset(data, '\0', sizeof(*data));
+
+ i = psd_start;
+ while (i < psd_stop) {
+ if (i >= psd_pts) {
+ psd_data = rtw_GetPSDData(pAdapter, i-psd_pts);
+ } else {
+ psd_data = rtw_GetPSDData(pAdapter, i);
+ }
+ sprintf(data, "%s%x ", data, psd_data);
+ i++;
+ }
+
+ rtw_msleep_os(100);
+ return strlen(data)+1;
+}
+
+void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv)
+{
+ int i, res;
+ struct adapter *padapter = pxmitpriv->adapter;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+
+ u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
+ if (padapter->registrypriv.mp_mode == 0) {
+ max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ num_xmit_extbuf = NR_XMIT_EXTBUFF;
+ } else {
+ max_xmit_extbuf_size = 20000;
+ num_xmit_extbuf = 1;
+ }
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+
+ pxmitbuf++;
+ }
+
+ if (pxmitpriv->pallocated_xmit_extbuf)
+ rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf, num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+
+ if (padapter->registrypriv.mp_mode == 0) {
+ max_xmit_extbuf_size = 20000;
+ num_xmit_extbuf = 1;
+ } else {
+ max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ num_xmit_extbuf = NR_XMIT_EXTBUFF;
+ }
+
+ /* Init xmit extension buff */
+ _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
+
+ pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+
+ if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ _rtw_init_listhead(&pxmitbuf->list);
+
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->padapter = padapter;
+ pxmitbuf->ext_tag = true;
+
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
+ if (res == _FAIL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ pxmitbuf++;
+ }
+
+ pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
+
+exit:
+ ;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
new file mode 100644
index 0000000..f06312c
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
@@ -0,0 +1,1508 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_MP_IOCTL_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <mlme_osdep.h>
+
+/* include <rtw_mp.h> */
+#include <rtw_mp_ioctl.h>
+
+
+/* rtl8188eu_oid_rtl_seg_81_85 section start **************** */
+int rtl8188eu_oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->information_buf_len < sizeof(u8))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ if (poid_par_priv->type_of_oid == SET_OID) {
+ Adapter->registrypriv.wireless_mode = *(u8 *)poid_par_priv->information_buf;
+ } else if (poid_par_priv->type_of_oid == QUERY_OID) {
+ *(u8 *)poid_par_priv->information_buf = Adapter->registrypriv.wireless_mode;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ RT_TRACE(_module_mp_, _drv_info_, ("-query Wireless Mode=%d\n", Adapter->registrypriv.wireless_mode));
+ } else {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* rtl8188eu_oid_rtl_seg_81_87_80 section start **************** */
+int rtl8188eu_oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct bb_reg_param *pbbreg;
+ u16 offset;
+ u32 value;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_write_bb_reg_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf);
+
+ offset = (u16)(pbbreg->offset) & 0xFFF; /* 0ffset :0x800~0xfff */
+ if (offset < BB_REG_BASE_ADDR)
+ offset |= BB_REG_BASE_ADDR;
+
+ value = pbbreg->value;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_write_bb_reg_hdl: offset=0x%03X value=0x%08X\n",
+ offset, value));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ write_bbreg(Adapter, offset, 0xFFFFFFFF, value);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct bb_reg_param *pbbreg;
+ u16 offset;
+ u32 value;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_read_bb_reg_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf);
+
+ offset = (u16)(pbbreg->offset) & 0xFFF; /* 0ffset :0x800~0xfff */
+ if (offset < BB_REG_BASE_ADDR)
+ offset |= BB_REG_BASE_ADDR;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ value = read_bbreg(Adapter, offset, 0xFFFFFFFF);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ pbbreg->value = value;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("-rtl8188eu_oid_rt_pro_read_bb_reg_hdl: offset=0x%03X value:0x%08X\n",
+ offset, value));
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct rf_reg_param *pbbreg;
+ u8 path;
+ u8 offset;
+ u32 value;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_write_rf_reg_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf);
+
+ if (pbbreg->path >= MAX_RF_PATH_NUMS)
+ return NDIS_STATUS_NOT_ACCEPTED;
+ if (pbbreg->offset > 0xFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+ if (pbbreg->value > 0xFFFFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ path = (u8)pbbreg->path;
+ offset = (u8)pbbreg->offset;
+ value = pbbreg->value;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_write_rf_reg_hdl: path=%d offset=0x%02X value=0x%05X\n",
+ path, offset, value));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ write_rfreg(Adapter, path, offset, value);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct rf_reg_param *pbbreg;
+ u8 path;
+ u8 offset;
+ u32 value;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+ int status = NDIS_STATUS_SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_read_rf_reg_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf);
+
+ if (pbbreg->path >= MAX_RF_PATH_NUMS)
+ return NDIS_STATUS_NOT_ACCEPTED;
+ if (pbbreg->offset > 0xFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ path = (u8)pbbreg->path;
+ offset = (u8)pbbreg->offset;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ value = read_rfreg(Adapter, path, offset);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ pbbreg->value = value;
+
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("-rtl8188eu_oid_rt_pro_read_rf_reg_hdl: path=%d offset=0x%02X value=0x%05X\n",
+ path, offset, value));
+
+_func_exit_;
+
+ return status;
+}
+/* rtl8188eu_oid_rtl_seg_81_87_00 section end**************** */
+/* */
+
+/* rtl8188eu_oid_rtl_seg_81_80_00 section start **************** */
+/* */
+int rtl8188eu_oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 ratevalue;/* 4 */
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("+rtl8188eu_oid_rt_pro_set_data_rate_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ ratevalue = *((u32 *)poid_par_priv->information_buf);/* 4 */
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_set_data_rate_hdl: data rate idx=%d\n", ratevalue));
+ if (ratevalue >= MPT_RATE_LAST)
+ return NDIS_STATUS_INVALID_DATA;
+
+ Adapter->mppriv.rateidx = ratevalue;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetDataRate(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 mode;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_start_test_hdl\n"));
+
+ if (Adapter->registrypriv.mp_mode == 0)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ /* IQCalibrateBcut(Adapter); */
+
+ mode = *((u32 *)poid_par_priv->information_buf);
+ Adapter->mppriv.mode = mode;/* 1 for loopback */
+
+ if (mp_start_test(Adapter) == _FAIL) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ goto exit;
+ }
+
+exit:
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("-rtl8188eu_oid_rt_pro_start_test_hdl: mp_mode=%d\n", Adapter->mppriv.mode));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+Set OID_RT_PRO_STOP_TEST\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ mp_stop_test(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("-Set OID_RT_PRO_STOP_TEST\n"));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 Channel;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl\n"));
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ if (poid_par_priv->type_of_oid == QUERY_OID) {
+ *((u32 *)poid_par_priv->information_buf) = Adapter->mppriv.channel;
+ return NDIS_STATUS_SUCCESS;
+ }
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ Channel = *((u32 *)poid_par_priv->information_buf);
+ RT_TRACE(_module_mp_, _drv_notice_, ("rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl: Channel=%d\n", Channel));
+ if (Channel > 14)
+ return NDIS_STATUS_NOT_ACCEPTED;
+ Adapter->mppriv.channel = Channel;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetChannel(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u16 bandwidth;
+ u16 channel_offset;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("+rtl8188eu_oid_rt_set_bandwidth_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ bandwidth = *((u32 *)poid_par_priv->information_buf);/* 4 */
+ channel_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ if (bandwidth != HT_CHANNEL_WIDTH_40)
+ bandwidth = HT_CHANNEL_WIDTH_20;
+ padapter->mppriv.bandwidth = (u8)bandwidth;
+ padapter->mppriv.prime_channel_offset = (u8)channel_offset;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetBandwidth(padapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("-rtl8188eu_oid_rt_set_bandwidth_hdl: bandwidth=%d channel_offset=%d\n",
+ bandwidth, channel_offset));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 antenna;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_antenna_bb_hdl\n"));
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ if (poid_par_priv->type_of_oid == SET_OID) {
+ antenna = *(u32 *)poid_par_priv->information_buf;
+
+ Adapter->mppriv.antenna_tx = (u16)((antenna & 0xFFFF0000) >> 16);
+ Adapter->mppriv.antenna_rx = (u16)(antenna & 0x0000FFFF);
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_set_antenna_bb_hdl: tx_ant=0x%04x rx_ant=0x%04x\n",
+ Adapter->mppriv.antenna_tx, Adapter->mppriv.antenna_rx));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetAntenna(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+ } else {
+ antenna = (Adapter->mppriv.antenna_tx << 16)|Adapter->mppriv.antenna_rx;
+ *(u32 *)poid_par_priv->information_buf = antenna;
+ }
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 tx_pwr_idx;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_pro_set_tx_power_control_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ tx_pwr_idx = *((u32 *)poid_par_priv->information_buf);
+ if (tx_pwr_idx > MAX_TX_PWR_INDEX_N_MODE)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ Adapter->mppriv.txpoweridx = (u8)tx_pwr_idx;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_set_tx_power_control_hdl: idx=0x%2x\n",
+ Adapter->mppriv.txpoweridx));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetTxPower(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+/* */
+/* rtl8188eu_oid_rtl_seg_81_80_20 section start **************** */
+/* */
+int rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ if (poid_par_priv->information_buf_len == sizeof(u32)) {
+ *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.tx_pktcount;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ } else {
+ status = NDIS_STATUS_INVALID_LENGTH;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+ RT_TRACE(_module_mp_, _drv_alert_, ("===> rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl.\n"));
+ if (poid_par_priv->information_buf_len == sizeof(u32)) {
+ *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.rx_pktcount;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ RT_TRACE(_module_mp_, _drv_alert_, ("recv_ok:%d\n", Adapter->mppriv.rx_pktcount));
+ } else {
+ status = NDIS_STATUS_INVALID_LENGTH;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+ RT_TRACE(_module_mp_, _drv_alert_, ("===> rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl.\n"));
+ if (poid_par_priv->information_buf_len == sizeof(u32)) {
+ *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.rx_crcerrpktcount;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ RT_TRACE(_module_mp_, _drv_alert_, ("recv_err:%d\n", Adapter->mppriv.rx_crcerrpktcount));
+ } else {
+ status = NDIS_STATUS_INVALID_LENGTH;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* */
+
+int rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ RT_TRACE(_module_mp_, _drv_alert_, ("===> rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl.\n"));
+ Adapter->mppriv.tx_pktcount = 0;
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ if (poid_par_priv->information_buf_len == sizeof(u32)) {
+ Adapter->mppriv.rx_pktcount = 0;
+ Adapter->mppriv.rx_crcerrpktcount = 0;
+ } else {
+ status = NDIS_STATUS_INVALID_LENGTH;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ ResetPhyRxPktCount(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ *(u32 *)poid_par_priv->information_buf = GetPhyRxPktReceived(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("-rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl: recv_ok=%d\n", *(u32 *)poid_par_priv->information_buf));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ *(u32 *)poid_par_priv->information_buf = GetPhyRxPktCRC32Error(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl: recv_err =%d\n",
+ *(u32 *)poid_par_priv->information_buf));
+
+_func_exit_;
+
+ return status;
+}
+/* rtl8188eu_oid_rtl_seg_81_80_20 section end **************** */
+int rtl8188eu_oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 bStartTest;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_continuous_tx_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ bStartTest = *((u32 *)poid_par_priv->information_buf);
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetContinuousTx(Adapter, (u8)bStartTest);
+ if (bStartTest) {
+ struct mp_priv *pmp_priv = &Adapter->mppriv;
+ if (pmp_priv->tx.stop == 0) {
+ pmp_priv->tx.stop = 1;
+ DBG_88E("%s: pkt tx is running...\n", __func__);
+ rtw_msleep_os(5);
+ }
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = 1;
+ SetPacketTx(Adapter);
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 bStartTest;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_alert_, ("+rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ bStartTest = *((u32 *)poid_par_priv->information_buf);
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetSingleCarrierTx(Adapter, (u8)bStartTest);
+ if (bStartTest) {
+ struct mp_priv *pmp_priv = &Adapter->mppriv;
+ if (pmp_priv->tx.stop == 0) {
+ pmp_priv->tx.stop = 1;
+ DBG_88E("%s: pkt tx is running...\n", __func__);
+ rtw_msleep_os(5);
+ }
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = 1;
+ SetPacketTx(Adapter);
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 bStartTest;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ bStartTest = *((u32 *)poid_par_priv->information_buf);
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetCarrierSuppressionTx(Adapter, (u8)bStartTest);
+ if (bStartTest) {
+ struct mp_priv *pmp_priv = &Adapter->mppriv;
+ if (pmp_priv->tx.stop == 0) {
+ pmp_priv->tx.stop = 1;
+ DBG_88E("%s: pkt tx is running...\n", __func__);
+ rtw_msleep_os(5);
+ }
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = 1;
+ SetPacketTx(Adapter);
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 bStartTest;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_alert_, ("+rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ bStartTest = *((u32 *)poid_par_priv->information_buf);
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetSingleToneTx(Adapter, (u8)bStartTest);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_trigger_gpio_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+ int status = NDIS_STATUS_SUCCESS;
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ rtw_hal_set_hwreg(Adapter, HW_VAR_TRIGGER_GPIO_0, NULL);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* rtl8188eu_oid_rtl_seg_81_80_00 section end **************** */
+/* */
+int rtl8188eu_oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct mp_rw_reg *RegRWStruct;
+ u32 offset, width;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("+rtl8188eu_oid_rt_pro_read_register_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf;
+ offset = RegRWStruct->offset;
+ width = RegRWStruct->width;
+
+ if (offset > 0xFFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ switch (width) {
+ case 1:
+ RegRWStruct->value = rtw_read8(Adapter, offset);
+ break;
+ case 2:
+ RegRWStruct->value = rtw_read16(Adapter, offset);
+ break;
+ default:
+ width = 4;
+ RegRWStruct->value = rtw_read32(Adapter, offset);
+ break;
+ }
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_read_register_hdl: offset:0x%04X value:0x%X\n",
+ offset, RegRWStruct->value));
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ *poid_par_priv->bytes_rw = width;
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct mp_rw_reg *RegRWStruct;
+ u32 offset, width, value;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("+rtl8188eu_oid_rt_pro_write_register_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf;
+ offset = RegRWStruct->offset;
+ width = RegRWStruct->width;
+ value = RegRWStruct->value;
+
+ if (offset > 0xFFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ switch (RegRWStruct->width) {
+ case 1:
+ if (value > 0xFF) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ break;
+ }
+ rtw_write8(padapter, offset, (u8)value);
+ break;
+ case 2:
+ if (value > 0xFFFF) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ break;
+ }
+ rtw_write16(padapter, offset, (u16)value);
+ break;
+ case 4:
+ rtw_write32(padapter, offset, value);
+ break;
+ default:
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ break;
+ }
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_pro_write_register_hdl: offset=0x%08X width=%d value=0x%X\n",
+ offset, width, value));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_burst_read_register_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_burst_write_register_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+/* */
+int rtl8188eu_oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+/* */
+int rtl8188eu_oid_rt_pro_write16_eeprom_hdl (struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_wr_attrib_mem_hdl (struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+ int status = NDIS_STATUS_SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+OID_RT_PRO_SET_DATA_RATE_EX\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ if (rtw_setdatarate_cmd(Adapter, poid_par_priv->information_buf) != _SUCCESS)
+ status = NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ u8 thermal = 0;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_get_thermal_meter_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ GetThermalMeter(Adapter, &thermal);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ *(u32 *)poid_par_priv->information_buf = (u32)thermal;
+ *poid_par_priv->bytes_rw = sizeof(u32);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_tssi_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+
+_func_enter_;
+
+ if (poid_par_priv->information_buf_len < sizeof(u8))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ if (poid_par_priv->type_of_oid == SET_OID) {
+ u8 enable;
+
+ enable = *(u8 *)poid_par_priv->information_buf;
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("+rtl8188eu_oid_rt_pro_set_power_tracking_hdl: enable =%d\n", enable));
+
+ SetPowerTracking(Adapter, enable);
+ } else {
+ GetPowerTracking(Adapter, (u8 *)poid_par_priv->information_buf);
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+/* rtl8188eu_oid_rtl_seg_87_12_00 section start **************** */
+int rtl8188eu_oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return NDIS_STATUS_SUCCESS;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct efuse_access_struct *pefuse;
+ u8 *data;
+ u16 addr = 0, cnts = 0, max_available_size = 0;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct efuse_access_struct))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pefuse = (struct efuse_access_struct *)poid_par_priv->information_buf;
+ addr = pefuse->start_addr;
+ cnts = pefuse->cnts;
+ data = pefuse->data;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("+rtl8188eu_oid_rt_pro_read_efuse_hd: buf_len=%d addr=%d cnts=%d\n",
+ poid_par_priv->information_buf_len, addr, cnts));
+
+ EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+
+ if ((addr + cnts) > max_available_size) {
+ RT_TRACE(_module_mp_, _drv_err_, ("!rtl8188eu_oid_rt_pro_read_efuse_hdl: parameter error!\n"));
+ return NDIS_STATUS_NOT_ACCEPTED;
+ }
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ if (rtw_efuse_access(Adapter, false, addr, cnts, data) == _FAIL) {
+ RT_TRACE(_module_mp_, _drv_err_, ("!rtl8188eu_oid_rt_pro_read_efuse_hdl: rtw_efuse_access FAIL!\n"));
+ status = NDIS_STATUS_FAILURE;
+ } else {
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct efuse_access_struct *pefuse;
+ u8 *data;
+ u16 addr = 0, cnts = 0, max_available_size = 0;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ pefuse = (struct efuse_access_struct *)poid_par_priv->information_buf;
+ addr = pefuse->start_addr;
+ cnts = pefuse->cnts;
+ data = pefuse->data;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("+rtl8188eu_oid_rt_pro_write_efuse_hdl: buf_len=%d addr=0x%04x cnts=%d\n",
+ poid_par_priv->information_buf_len, addr, cnts));
+
+ EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+
+ if ((addr + cnts) > max_available_size) {
+ RT_TRACE(_module_mp_, _drv_err_, ("!rtl8188eu_oid_rt_pro_write_efuse_hdl: parameter error"));
+ return NDIS_STATUS_NOT_ACCEPTED;
+ }
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ if (rtw_efuse_access(Adapter, true, addr, cnts, data) == _FAIL)
+ status = NDIS_STATUS_FAILURE;
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct pgpkt *ppgpkt;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ *poid_par_priv->bytes_rw = 0;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct pgpkt *))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ ppgpkt = (struct pgpkt *)poid_par_priv->information_buf;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ if (poid_par_priv->type_of_oid == QUERY_OID) {
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl: Read offset=0x%x\n",\
+ ppgpkt->offset));
+
+ Efuse_PowerSwitch(Adapter, false, true);
+ if (Efuse_PgPacketRead(Adapter, ppgpkt->offset, ppgpkt->data, false) == true)
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ else
+ status = NDIS_STATUS_FAILURE;
+ Efuse_PowerSwitch(Adapter, false, false);
+ } else {
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl: Write offset=0x%x word_en=0x%x\n",\
+ ppgpkt->offset, ppgpkt->word_en));
+
+ Efuse_PowerSwitch(Adapter, true, true);
+ if (Efuse_PgPacketWrite(Adapter, ppgpkt->offset, ppgpkt->word_en, ppgpkt->data, false) == true)
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ else
+ status = NDIS_STATUS_FAILURE;
+ Efuse_PowerSwitch(Adapter, true, false);
+ }
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl: status=0x%08X\n", status));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u16 size;
+ u8 ret;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ ret = efuse_GetCurrentSize(Adapter, &size);
+ _irqlevel_changed_(&oldirql, RAISE);
+ if (ret == _SUCCESS) {
+ *(u32 *)poid_par_priv->information_buf = size;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ } else {
+ status = NDIS_STATUS_FAILURE;
+ }
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ *(u32 *)poid_par_priv->information_buf = efuse_GetMaxSize(Adapter);
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_get_efuse_max_size_hdl: size=%d status=0x%08X\n",
+ *(int *)poid_par_priv->information_buf, status));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_pro_efuse_hdl\n"));
+
+ if (poid_par_priv->type_of_oid == QUERY_OID)
+ status = rtl8188eu_oid_rt_pro_read_efuse_hdl(poid_par_priv);
+ else
+ status = rtl8188eu_oid_rt_pro_write_efuse_hdl(poid_par_priv);
+
+ RT_TRACE(_module_mp_, _drv_info_, ("-rtl8188eu_oid_rt_pro_efuse_hdl: status=0x%08X\n", status));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u8 *data;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+ u16 maplen = 0;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_efuse_map_hdl\n"));
+
+ EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&maplen, false);
+
+ *poid_par_priv->bytes_rw = 0;
+
+ if (poid_par_priv->information_buf_len < maplen)
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ data = (u8 *)poid_par_priv->information_buf;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ if (poid_par_priv->type_of_oid == QUERY_OID) {
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("rtl8188eu_oid_rt_pro_efuse_map_hdl: READ\n"));
+
+ if (rtw_efuse_map_read(Adapter, 0, maplen, data) == _SUCCESS) {
+ *poid_par_priv->bytes_rw = maplen;
+ } else {
+ RT_TRACE(_module_mp_, _drv_err_,
+ ("rtl8188eu_oid_rt_pro_efuse_map_hdl: READ fail\n"));
+ status = NDIS_STATUS_FAILURE;
+ }
+ } else {
+ /* SET_OID */
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("rtl8188eu_oid_rt_pro_efuse_map_hdl: WRITE\n"));
+
+ if (rtw_efuse_map_write(Adapter, 0, maplen, data) == _SUCCESS) {
+ *poid_par_priv->bytes_rw = maplen;
+ } else {
+ RT_TRACE(_module_mp_, _drv_err_,
+ ("rtl8188eu_oid_rt_pro_efuse_map_hdl: WRITE fail\n"));
+ status = NDIS_STATUS_FAILURE;
+ }
+ }
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_pro_efuse_map_hdl: status=0x%08X\n", status));
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ return status;
+}
+
+int rtl8188eu_oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u8 rx_pkt_type;
+ int status = NDIS_STATUS_SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_set_rx_packet_type_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u8))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ rx_pkt_type = *((u8 *)poid_par_priv->information_buf);/* 4 */
+
+ RT_TRACE(_module_mp_, _drv_info_, ("rx_pkt_type: %x\n", rx_pkt_type));
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct mp_xmit_parm *pparm;
+ struct adapter *padapter;
+ struct mp_priv *pmp_priv;
+ struct pkt_attrib *pattrib;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+%s\n", __func__));
+
+ pparm = (struct mp_xmit_parm *)poid_par_priv->information_buf;
+ padapter = (struct adapter *)poid_par_priv->adapter_context;
+ pmp_priv = &padapter->mppriv;
+
+ if (poid_par_priv->type_of_oid == QUERY_OID) {
+ pparm->enable = !pmp_priv->tx.stop;
+ pparm->count = pmp_priv->tx.sended;
+ } else {
+ if (pparm->enable == 0) {
+ pmp_priv->tx.stop = 1;
+ } else if (pmp_priv->tx.stop == 1) {
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = pparm->count;
+ pmp_priv->tx.payload = pparm->payload_type;
+ pattrib = &pmp_priv->tx.attrib;
+ pattrib->pktlen = pparm->length;
+ memcpy(pattrib->dst, pparm->da, ETH_ALEN);
+ SetPacketTx(padapter);
+ } else {
+ return NDIS_STATUS_FAILURE;
+ }
+ }
+
+ return NDIS_STATUS_SUCCESS;
+}
+
+/* */
+int rtl8188eu_oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("\n ===> Setrtl8188eu_oid_rt_set_power_down_hdl.\n"));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ /* CALL the power_down function */
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_power_mode_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_p2p.c b/drivers/staging/rtl8188eu/core/rtw_p2p.c
new file mode 100644
index 0000000..8cf915f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_p2p.c
@@ -0,0 +1,2064 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_P2P_C_
+
+#include <drv_types.h>
+#include <rtw_p2p.h>
+#include <wifi.h>
+
+#ifdef CONFIG_88EU_P2P
+
+static int rtw_p2p_is_channel_list_ok(u8 desired_ch, u8 *ch_list, u8 ch_cnt)
+{
+ int found = 0, i = 0;
+
+ for (i = 0; i < ch_cnt; i++) {
+ if (ch_list[i] == desired_ch) {
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
+static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ u32 len = 0;
+ u16 attr_len = 0;
+ u8 tmplen, *pdata_attr, *pstart, *pcur;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E("%s\n", __func__);
+
+ pdata_attr = rtw_zmalloc(MAX_P2P_IE_LEN);
+
+ pstart = pdata_attr;
+ pcur = pdata_attr;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* look up sta asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+
+ if (psta->is_p2p_device) {
+ tmplen = 0;
+
+ pcur++;
+
+ /* P2P device address */
+ memcpy(pcur, psta->dev_addr, ETH_ALEN);
+ pcur += ETH_ALEN;
+
+ /* P2P interface address */
+ memcpy(pcur, psta->hwaddr, ETH_ALEN);
+ pcur += ETH_ALEN;
+
+ *pcur = psta->dev_cap;
+ pcur++;
+
+ /* u16*)(pcur) = cpu_to_be16(psta->config_methods); */
+ RTW_PUT_BE16(pcur, psta->config_methods);
+ pcur += 2;
+
+ memcpy(pcur, psta->primary_dev_type, 8);
+ pcur += 8;
+
+ *pcur = psta->num_of_secdev_type;
+ pcur++;
+
+ memcpy(pcur, psta->secdev_types_list, psta->num_of_secdev_type*8);
+ pcur += psta->num_of_secdev_type*8;
+
+ if (psta->dev_name_len > 0) {
+ /* u16*)(pcur) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
+ RTW_PUT_BE16(pcur, WPS_ATTR_DEVICE_NAME);
+ pcur += 2;
+
+ /* u16*)(pcur) = cpu_to_be16(psta->dev_name_len); */
+ RTW_PUT_BE16(pcur, psta->dev_name_len);
+ pcur += 2;
+
+ memcpy(pcur, psta->dev_name, psta->dev_name_len);
+ pcur += psta->dev_name_len;
+ }
+
+
+ tmplen = (u8)(pcur-pstart);
+
+ *pstart = (tmplen-1);
+
+ attr_len += tmplen;
+
+ /* pstart += tmplen; */
+ pstart = pcur;
+ }
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ if (attr_len > 0)
+ len = rtw_set_p2p_attr_content(pbuf, P2P_ATTR_GROUP_INFO, attr_len, pdata_attr);
+
+ kfree(pdata_attr);
+ return len;
+}
+
+static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ unsigned char category = RTW_WLAN_CATEGORY_P2P;/* P2P action frame */
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_DISC_REQUEST;
+ u8 dialogToken = 0;
+
+ DBG_88E("[%s]\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pwdinfo->interface_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->interface_addr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* Build P2P action frame header */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ /* there is no IE in this P2P action frame */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 status, u8 dialogToken)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_DEVDISC_RESP;
+ u8 p2pie[8] = { 0x00 };
+ u32 p2pielen = 0;
+
+ DBG_88E("[%s]\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pwdinfo->device_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->device_addr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* Build P2P public action frame header */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+
+ /* Build P2P IE */
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* P2P_ATTR_STATUS */
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status);
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr, u8 *frame_body, u16 config_method)
+{
+ struct adapter *padapter = pwdinfo->padapter;
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u8 dialogToken = frame_body[7]; /* The Dialog Token of provisioning discovery request frame. */
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_PROVISION_DISC_RESP;
+ u8 wpsie[100] = { 0x00 };
+ u8 wpsielen = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ wpsielen = 0;
+ /* WPS OUI */
+ RTW_PUT_BE32(wpsie, WPSOUI);
+ wpsielen += 4;
+
+ /* Config Method */
+ /* Type: */
+ RTW_PUT_BE16(wpsie + wpsielen, WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ RTW_PUT_BE16(wpsie + wpsielen, 0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ RTW_PUT_BE16(wpsie + wpsielen, config_method);
+ wpsielen += 2;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 status, u8 dialogToken)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ unsigned char category = RTW_WLAN_CATEGORY_P2P;/* P2P action frame */
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_PRESENCE_RESPONSE;
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u8 noa_attr_content[32] = { 0x00 };
+ u32 p2pielen = 0;
+
+ DBG_88E("[%s]\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pwdinfo->interface_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->interface_addr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* Build P2P action frame header */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+
+ /* Add P2P IE header */
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Add Status attribute in P2P IE */
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status);
+
+ /* Add NoA attribute in P2P IE */
+ noa_attr_content[0] = 0x1;/* index */
+ noa_attr_content[1] = 0x0;/* CTWindow and OppPS Parameters */
+
+ /* todo: Notice of Absence Descriptor(s) */
+
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_NOA, 2, noa_attr_content);
+
+
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, p2pie, &(pattrib->pktlen));
+
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+u32 build_beacon_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u16 capability = 0;
+ u32 len = 0, p2pielen = 0;
+ __le16 le_tmp;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+
+ /* According to the P2P Specification, the beacon frame should contain 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. P2P Device ID */
+ /* 3. Notice of Absence (NOA) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ /* Be able to participate in additional P2P Groups and */
+ /* support the P2P Invitation Procedure */
+ /* Group Capability Bitmap, 1 byte */
+ capability = P2P_DEVCAP_INVITATION_PROC|P2P_DEVCAP_CLIENT_DISCOVERABILITY;
+ capability |= ((P2P_GRPCAP_GO | P2P_GRPCAP_INTRABSS) << 8);
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
+ capability |= (P2P_GRPCAP_GROUP_FORMATION<<8);
+
+ le_tmp = cpu_to_le16(capability);
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_CAPABILITY, 2, (u8 *)&le_tmp);
+
+ /* P2P Device ID ATTR */
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_DEVICE_ID, ETH_ALEN, pwdinfo->device_addr);
+
+ /* Notice of Absence ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+
+ pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
+ return len;
+}
+
+u32 build_probe_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20100907 */
+ /* According to the P2P Specification, the probe response frame should contain 5 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Extended Listen Timing */
+ /* 3. Notice of Absence (NOA) (Only GO needs this) */
+ /* 4. Device Info */
+ /* 5. Group Info (Only GO need this) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ p2pie[p2pielen] = (P2P_GRPCAP_GO | P2P_GRPCAP_INTRABSS);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
+ p2pie[p2pielen] |= P2P_GRPCAP_GROUP_FORMATION;
+
+ p2pielen++;
+ } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+ }
+
+ /* Extended Listen Timing ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0004); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0xFFFF);
+ p2pielen += 2;
+
+
+ /* Notice of Absence ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+
+ /* Device Info ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len); */
+ RTW_PUT_LE16(p2pie + p2pielen, 21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm); */
+ RTW_PUT_BE16(p2pie + p2pielen, pwdinfo->supported_wps_cm);
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ /* u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI); */
+ RTW_PUT_BE32(p2pie + p2pielen, WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len); */
+ RTW_PUT_BE16(p2pie + p2pielen, pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ /* Group Info ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ p2pielen += go_add_group_info_attr(pwdinfo, p2pie + p2pielen);
+
+
+ pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
+
+
+ return len;
+}
+
+u32 build_prov_disc_request_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf, u8 *pssid, u8 ussidlen, u8 *pdev_raddr)
+{
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110301 */
+ /* According to the P2P Specification, the provision discovery request frame should contain 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Device Info */
+ /* 3. Group ID (When joining an operating P2P Group) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+
+
+ /* Device Info ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len); */
+ RTW_PUT_LE16(p2pie + p2pielen, 21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PBC) {
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_PBC); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_CONFIG_METHOD_PBC);
+ } else {
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_CONFIG_METHOD_DISPLAY);
+ }
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ /* u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI); */
+ RTW_PUT_BE32(p2pie + p2pielen, WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len); */
+ RTW_PUT_BE16(p2pie + p2pielen, pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ /* Added by Albert 2011/05/19 */
+ /* In this case, the pdev_raddr is the device address of the group owner. */
+
+ /* P2P Group ID ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + ussidlen); */
+ RTW_PUT_LE16(p2pie + p2pielen, ETH_ALEN + ussidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pdev_raddr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ memcpy(p2pie + p2pielen, pssid, ussidlen);
+ p2pielen += ussidlen;
+ }
+
+ pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
+
+
+ return len;
+}
+
+
+u32 build_assoc_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf, u8 status_code)
+{
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* According to the P2P Specification, the Association response frame should contain 2 P2P attributes */
+ /* 1. Status */
+ /* 2. Extended Listen Timing (optional) */
+
+
+ /* Status ATTR */
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status_code);
+
+
+ /* Extended Listen Timing ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+
+ pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
+
+ return len;
+}
+
+u32 build_deauth_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u32 len = 0;
+
+ return len;
+}
+
+u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *p;
+ u32 ret = false;
+ u8 *p2pie;
+ u32 p2pielen = 0;
+ int ssid_len = 0, rate_cnt = 0;
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SUPPORTEDRATES_IE_, (int *)&rate_cnt,
+ len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
+
+ if (rate_cnt <= 4) {
+ int i, g_rate = 0;
+
+ for (i = 0; i < rate_cnt; i++) {
+ if (((*(p + 2 + i) & 0xff) != 0x02) &&
+ ((*(p + 2 + i) & 0xff) != 0x04) &&
+ ((*(p + 2 + i) & 0xff) != 0x0B) &&
+ ((*(p + 2 + i) & 0xff) != 0x16))
+ g_rate = 1;
+ }
+
+ if (g_rate == 0) {
+ /* There is no OFDM rate included in SupportedRates IE of this probe request frame */
+ /* The driver should response this probe request. */
+ return ret;
+ }
+ } else {
+ /* rate_cnt > 4 means the SupportRates IE contains the OFDM rate because the count of CCK rates are 4. */
+ /* We should proceed the following check for this probe request. */
+ }
+
+ /* Added comments by Albert 20100906 */
+ /* There are several items we should check here. */
+ /* 1. This probe request frame must contain the P2P IE. (Done) */
+ /* 2. This probe request frame must contain the wildcard SSID. (Done) */
+ /* 3. Wildcard BSSID. (Todo) */
+ /* 4. Destination Address. (Done in mgt_dispatcher function) */
+ /* 5. Requested Device Type in WSC IE. (Todo) */
+ /* 6. Device ID attribute in P2P IE. (Todo) */
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ssid_len,
+ len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
+
+ ssid_len &= 0xff; /* Just last 1 byte is valid for ssid len of the probe request */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ p2pie = rtw_get_p2p_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_ , len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_ , NULL, &p2pielen);
+ if (p2pie) {
+ if ((p != NULL) && _rtw_memcmp((void *)(p+2), (void *)pwdinfo->p2p_wildcard_ssid , 7)) {
+ /* todo: */
+ /* Check Requested Device Type attributes in WSC IE. */
+ /* Check Device ID attribute in P2P IE */
+
+ ret = true;
+ } else if ((p != NULL) && (ssid_len == 0)) {
+ ret = true;
+ }
+ } else {
+ /* non -p2p device */
+ }
+ }
+
+
+ return ret;
+}
+
+u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint len, struct sta_info *psta)
+{
+ u8 status_code = P2P_STATUS_SUCCESS;
+ u8 *pbuf, *pattr_content = NULL;
+ u32 attr_contentlen = 0;
+ u16 cap_attr = 0;
+ unsigned short frame_type, ie_offset = 0;
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+ u32 p2p_ielen = 0;
+ __be16 be_tmp;
+ __le16 le_tmp;
+
+ if (!rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ return P2P_STATUS_FAIL_REQUEST_UNABLE;
+
+ frame_type = GetFrameSubType(pframe);
+ if (frame_type == WIFI_ASSOCREQ)
+ ie_offset = _ASOCREQ_IE_OFFSET_;
+ else /* WIFI_REASSOCREQ */
+ ie_offset = _REASOCREQ_IE_OFFSET_;
+
+ ies = pframe + WLAN_HDR_A3_LEN + ie_offset;
+ ies_len = len - WLAN_HDR_A3_LEN - ie_offset;
+
+ p2p_ie = rtw_get_p2p_ie(ies , ies_len , NULL, &p2p_ielen);
+
+ if (!p2p_ie) {
+ DBG_88E("[%s] P2P IE not Found!!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INVALID_PARAM;
+ } else {
+ DBG_88E("[%s] P2P IE Found!!\n", __func__);
+ }
+
+ while (p2p_ie) {
+ /* Check P2P Capability ATTR */
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_CAPABILITY, (u8 *)&le_tmp, (uint *)&attr_contentlen)) {
+ DBG_88E("[%s] Got P2P Capability Attr!!\n", __func__);
+ cap_attr = le16_to_cpu(le_tmp);
+ psta->dev_cap = cap_attr&0xff;
+ }
+
+ /* Check Extended Listen Timing ATTR */
+
+
+ /* Check P2P Device Info ATTR */
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_INFO, NULL, (uint *)&attr_contentlen)) {
+ DBG_88E("[%s] Got P2P DEVICE INFO Attr!!\n", __func__);
+ pattr_content = rtw_zmalloc(attr_contentlen);
+ pbuf = pattr_content;
+ if (pattr_content) {
+ u8 num_of_secdev_type;
+ u16 dev_name_len;
+
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_INFO , pattr_content, (uint *)&attr_contentlen);
+
+ memcpy(psta->dev_addr, pattr_content, ETH_ALEN);/* P2P Device Address */
+
+ pattr_content += ETH_ALEN;
+
+ memcpy(&be_tmp, pattr_content, 2);/* Config Methods */
+ psta->config_methods = be16_to_cpu(be_tmp);
+
+ pattr_content += 2;
+
+ memcpy(psta->primary_dev_type, pattr_content, 8);
+
+ pattr_content += 8;
+
+ num_of_secdev_type = *pattr_content;
+ pattr_content += 1;
+
+ if (num_of_secdev_type == 0) {
+ psta->num_of_secdev_type = 0;
+ } else {
+ u32 len;
+
+ psta->num_of_secdev_type = num_of_secdev_type;
+
+ len = (sizeof(psta->secdev_types_list) < (num_of_secdev_type*8)) ?
+ (sizeof(psta->secdev_types_list)) : (num_of_secdev_type*8);
+
+ memcpy(psta->secdev_types_list, pattr_content, len);
+
+ pattr_content += (num_of_secdev_type*8);
+ }
+
+
+ psta->dev_name_len = 0;
+ if (WPS_ATTR_DEVICE_NAME == be16_to_cpu(*(__be16 *)pattr_content)) {
+ dev_name_len = be16_to_cpu(*(__be16 *)(pattr_content+2));
+
+ psta->dev_name_len = (sizeof(psta->dev_name) < dev_name_len) ? sizeof(psta->dev_name) : dev_name_len;
+
+ memcpy(psta->dev_name, pattr_content+4, psta->dev_name_len);
+ }
+ kfree(pbuf);
+ }
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+
+ return status_code;
+}
+
+u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *frame_body;
+ u8 status, dialogToken;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *p2p_ie;
+ u32 p2p_ielen = 0;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[7];
+ status = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
+
+ p2p_ie = rtw_get_p2p_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen);
+ if (p2p_ie) {
+ u8 groupid[38] = { 0x00 };
+ u8 dev_addr[ETH_ALEN] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
+ if (_rtw_memcmp(pwdinfo->device_addr, groupid, ETH_ALEN) &&
+ _rtw_memcmp(pwdinfo->p2p_group_ssid, groupid+ETH_ALEN, pwdinfo->p2p_group_ssid_len)) {
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_ID, dev_addr, &attr_contentlen)) {
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* look up sta asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ if (psta->is_p2p_device && (psta->dev_cap&P2P_DEVCAP_CLIENT_DISCOVERABILITY) &&
+ _rtw_memcmp(psta->dev_addr, dev_addr, ETH_ALEN)) {
+ /* issue GO Discoverability Request */
+ issue_group_disc_req(pwdinfo, psta->hwaddr);
+ status = P2P_STATUS_SUCCESS;
+ break;
+ } else {
+ status = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ } else {
+ status = P2P_STATUS_FAIL_INVALID_PARAM;
+ }
+ } else {
+ status = P2P_STATUS_FAIL_INVALID_PARAM;
+ }
+ }
+ }
+
+
+ /* issue Device Discoverability Response */
+ issue_p2p_devdisc_resp(pwdinfo, GetAddr2Ptr(pframe), status, dialogToken);
+
+ return (status == P2P_STATUS_SUCCESS) ? true : false;
+}
+
+u32 process_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ return true;
+}
+
+u8 process_p2p_provdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *frame_body;
+ u8 *wpsie;
+ uint wps_ielen = 0, attr_contentlen = 0;
+ u16 uconfig_method = 0;
+ __be16 be_tmp;
+
+ frame_body = (pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ wpsie = rtw_get_wps_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen);
+ if (wpsie) {
+ if (rtw_get_wps_attr_content(wpsie, wps_ielen, WPS_ATTR_CONF_METHOD, (u8 *)&be_tmp, &attr_contentlen)) {
+ uconfig_method = be16_to_cpu(be_tmp);
+ switch (uconfig_method) {
+ case WPS_CM_DISPLYA:
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
+ break;
+ case WPS_CM_LABEL:
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "lab", 3);
+ break;
+ case WPS_CM_PUSH_BUTTON:
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
+ break;
+ case WPS_CM_KEYPAD:
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
+ break;
+ }
+ issue_p2p_provision_resp(pwdinfo, GetAddr2Ptr(pframe), frame_body, uconfig_method);
+ }
+ }
+ DBG_88E("[%s] config method = %s\n", __func__, pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req);
+ return true;
+}
+
+u8 process_p2p_provdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe)
+{
+ return true;
+}
+
+static u8 rtw_p2p_get_peer_ch_list(struct wifidirect_info *pwdinfo, u8 *ch_content, u8 ch_cnt, u8 *peer_ch_list)
+{
+ u8 i = 0, j = 0;
+ u8 temp = 0;
+ u8 ch_no = 0;
+ ch_content += 3;
+ ch_cnt -= 3;
+
+ while (ch_cnt > 0) {
+ ch_content += 1;
+ ch_cnt -= 1;
+ temp = *ch_content;
+ for (i = 0 ; i < temp ; i++, j++)
+ peer_ch_list[j] = *(ch_content + 1 + i);
+ ch_content += (temp + 1);
+ ch_cnt -= (temp + 1);
+ ch_no += temp ;
+ }
+
+ return ch_no;
+}
+
+static u8 rtw_p2p_ch_inclusion(struct mlme_ext_priv *pmlmeext, u8 *peer_ch_list, u8 peer_ch_num, u8 *ch_list_inclusioned)
+{
+ int i = 0, j = 0, temp = 0;
+ u8 ch_no = 0;
+
+ for (i = 0; i < peer_ch_num; i++) {
+ for (j = temp; j < pmlmeext->max_chan_nums; j++) {
+ if (*(peer_ch_list + i) == pmlmeext->channel_set[j].ChannelNum) {
+ ch_list_inclusioned[ch_no++] = *(peer_ch_list + i);
+ temp = j;
+ break;
+ }
+ }
+ }
+
+ return ch_no;
+}
+
+u8 process_p2p_group_negotation_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ struct adapter *padapter = pwdinfo->padapter;
+ u8 result = P2P_STATUS_SUCCESS;
+ u32 p2p_ielen = 0, wps_ielen = 0;
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+ u8 *wpsie;
+ u16 wps_devicepassword_id = 0x0000;
+ uint wps_devicepassword_id_len = 0;
+ __be16 be_tmp;
+
+ wpsie = rtw_get_wps_ie(pframe + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen);
+ if (wpsie) {
+ /* Commented by Kurt 20120113 */
+ /* If some device wants to do p2p handshake without sending prov_disc_req */
+ /* We have to get peer_req_cm from here. */
+ if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
+ rtw_get_wps_attr_content(wpsie, wps_ielen, WPS_ATTR_DEVICE_PWID, (u8 *)&be_tmp, &wps_devicepassword_id_len);
+ wps_devicepassword_id = be16_to_cpu(be_tmp);
+
+ if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
+ else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
+ else
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
+ }
+ } else {
+ DBG_88E("[%s] WPS IE not Found!!\n", __func__);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ return result;
+ }
+
+ if (pwdinfo->ui_got_wps_info == P2P_NO_WPSINFO) {
+ result = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_INFOR_NOREADY);
+ return result;
+ }
+
+ ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
+ ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
+
+ p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
+
+ if (!p2p_ie) {
+ DBG_88E("[%s] P2P IE not Found!!\n", __func__);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+
+ while (p2p_ie) {
+ u8 attr_content = 0x00;
+ u32 attr_contentlen = 0;
+ u8 ch_content[50] = { 0x00 };
+ uint ch_cnt = 0;
+ u8 peer_ch_list[50] = { 0x00 };
+ u8 peer_ch_num = 0;
+ u8 ch_list_inclusioned[50] = { 0x00 };
+ u8 ch_num_inclusioned = 0;
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_ING);
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GO_INTENT , &attr_content, &attr_contentlen)) {
+ DBG_88E("[%s] GO Intent = %d, tie = %d\n", __func__, attr_content >> 1, attr_content & 0x01);
+ pwdinfo->peer_intent = attr_content; /* include both intent and tie breaker values. */
+
+ if (pwdinfo->intent == (pwdinfo->peer_intent >> 1)) {
+ /* Try to match the tie breaker value */
+ if (pwdinfo->intent == P2P_MAX_INTENT) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ result = P2P_STATUS_FAIL_BOTH_GOINTENT_15;
+ } else {
+ if (attr_content & 0x01)
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ else
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ } else if (pwdinfo->intent > (pwdinfo->peer_intent >> 1)) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Store the group id information. */
+ memcpy(pwdinfo->groupid_info.go_device_addr, pwdinfo->device_addr, ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+ }
+ }
+
+
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_INTENTED_IF_ADDR, pwdinfo->p2p_peer_interface_addr, &attr_contentlen)) {
+ if (attr_contentlen != ETH_ALEN)
+ _rtw_memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
+ }
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_CH_LIST, ch_content, &ch_cnt)) {
+ peer_ch_num = rtw_p2p_get_peer_ch_list(pwdinfo, ch_content, ch_cnt, peer_ch_list);
+ ch_num_inclusioned = rtw_p2p_ch_inclusion(&padapter->mlmeextpriv, peer_ch_list, peer_ch_num, ch_list_inclusioned);
+
+ if (ch_num_inclusioned == 0) {
+ DBG_88E("[%s] No common channel in channel list!\n", __func__);
+ result = P2P_STATUS_FAIL_NO_COMMON_CH;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ break;
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ if (!rtw_p2p_is_channel_list_ok(pwdinfo->operating_channel,
+ ch_list_inclusioned, ch_num_inclusioned)) {
+ u8 operatingch_info[5] = { 0x00 }, peer_operating_ch = 0;
+ attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
+ peer_operating_ch = operatingch_info[4];
+
+ if (rtw_p2p_is_channel_list_ok(peer_operating_ch,
+ ch_list_inclusioned, ch_num_inclusioned)) {
+ /**
+ * Change our operating channel as peer's for compatibility.
+ */
+ pwdinfo->operating_channel = peer_operating_ch;
+ DBG_88E("[%s] Change op ch to %02x as peer's\n", __func__, pwdinfo->operating_channel);
+ } else {
+ /* Take first channel of ch_list_inclusioned as operating channel */
+ pwdinfo->operating_channel = ch_list_inclusioned[0];
+ DBG_88E("[%s] Change op ch to %02x\n", __func__, pwdinfo->operating_channel);
+ }
+ }
+ }
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+ return result;
+}
+
+u8 process_p2p_group_negotation_resp(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ struct adapter *padapter = pwdinfo->padapter;
+ u8 result = P2P_STATUS_SUCCESS;
+ u32 p2p_ielen, wps_ielen;
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+
+ ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
+ ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
+
+ /* Be able to know which one is the P2P GO and which one is P2P client. */
+
+ if (rtw_get_wps_ie(ies, ies_len, NULL, &wps_ielen)) {
+ } else {
+ DBG_88E("[%s] WPS IE not Found!!\n", __func__);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+
+ p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
+ if (!p2p_ie) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ } else {
+ u8 attr_content = 0x00;
+ u32 attr_contentlen = 0;
+ u8 operatingch_info[5] = { 0x00 };
+ u8 groupid[38];
+ u8 peer_ch_list[50] = { 0x00 };
+ u8 peer_ch_num = 0;
+ u8 ch_list_inclusioned[50] = { 0x00 };
+ u8 ch_num_inclusioned = 0;
+
+ while (p2p_ie) { /* Found the P2P IE. */
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
+ if (attr_contentlen == 1) {
+ DBG_88E("[%s] Status = %d\n", __func__, attr_content);
+ if (attr_content == P2P_STATUS_SUCCESS) {
+ /* Do nothing. */
+ } else {
+ if (P2P_STATUS_FAIL_INFO_UNAVAILABLE == attr_content) {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INFOR_NOREADY);
+ } else {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ result = attr_content;
+ break;
+ }
+ }
+
+ /* Try to get the peer's interface address */
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_INTENTED_IF_ADDR, pwdinfo->p2p_peer_interface_addr, &attr_contentlen)) {
+ if (attr_contentlen != ETH_ALEN)
+ _rtw_memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
+ }
+
+ /* Try to get the peer's intent and tie breaker value. */
+ attr_content = 0x00;
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GO_INTENT , &attr_content, &attr_contentlen)) {
+ DBG_88E("[%s] GO Intent = %d, tie = %d\n", __func__, attr_content >> 1, attr_content & 0x01);
+ pwdinfo->peer_intent = attr_content; /* include both intent and tie breaker values. */
+
+ if (pwdinfo->intent == (pwdinfo->peer_intent >> 1)) {
+ /* Try to match the tie breaker value */
+ if (pwdinfo->intent == P2P_MAX_INTENT) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ result = P2P_STATUS_FAIL_BOTH_GOINTENT_15;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ } else {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ if (attr_content & 0x01)
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ else
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ } else if (pwdinfo->intent > (pwdinfo->peer_intent >> 1)) {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ } else {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Store the group id information. */
+ memcpy(pwdinfo->groupid_info.go_device_addr, pwdinfo->device_addr, ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+ }
+ }
+
+ /* Try to get the operation channel information */
+
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen)) {
+ DBG_88E("[%s] Peer's operating channel = %d\n", __func__, operatingch_info[4]);
+ pwdinfo->peer_operating_ch = operatingch_info[4];
+ }
+
+ /* Try to get the channel list information */
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_CH_LIST, pwdinfo->channel_list_attr, &pwdinfo->channel_list_attr_len)) {
+ DBG_88E("[%s] channel list attribute found, len = %d\n", __func__, pwdinfo->channel_list_attr_len);
+
+ peer_ch_num = rtw_p2p_get_peer_ch_list(pwdinfo, pwdinfo->channel_list_attr, pwdinfo->channel_list_attr_len, peer_ch_list);
+ ch_num_inclusioned = rtw_p2p_ch_inclusion(&padapter->mlmeextpriv, peer_ch_list, peer_ch_num, ch_list_inclusioned);
+
+ if (ch_num_inclusioned == 0) {
+ DBG_88E("[%s] No common channel in channel list!\n", __func__);
+ result = P2P_STATUS_FAIL_NO_COMMON_CH;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ break;
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ if (!rtw_p2p_is_channel_list_ok(pwdinfo->operating_channel,
+ ch_list_inclusioned, ch_num_inclusioned)) {
+ u8 operatingch_info[5] = { 0x00 }, peer_operating_ch = 0;
+ attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
+ peer_operating_ch = operatingch_info[4];
+
+ if (rtw_p2p_is_channel_list_ok(peer_operating_ch,
+ ch_list_inclusioned, ch_num_inclusioned)) {
+ /**
+ * Change our operating channel as peer's for compatibility.
+ */
+ pwdinfo->operating_channel = peer_operating_ch;
+ DBG_88E("[%s] Change op ch to %02x as peer's\n", __func__, pwdinfo->operating_channel);
+ } else {
+ /* Take first channel of ch_list_inclusioned as operating channel */
+ pwdinfo->operating_channel = ch_list_inclusioned[0];
+ DBG_88E("[%s] Change op ch to %02x\n", __func__, pwdinfo->operating_channel);
+ }
+ }
+ }
+ } else {
+ DBG_88E("[%s] channel list attribute not found!\n", __func__);
+ }
+
+ /* Try to get the group id information if peer is GO */
+ attr_contentlen = 0;
+ _rtw_memset(groupid, 0x00, 38);
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
+ memcpy(pwdinfo->groupid_info.go_device_addr, &groupid[0], ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, &groupid[6], attr_contentlen - ETH_ALEN);
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+ }
+ return result;
+}
+
+u8 process_p2p_group_negotation_confirm(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+ u32 p2p_ielen = 0;
+ u8 result = P2P_STATUS_SUCCESS;
+ ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
+ ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
+
+ p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
+ while (p2p_ie) { /* Found the P2P IE. */
+ u8 attr_content = 0x00, operatingch_info[5] = { 0x00 };
+ u8 groupid[38] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ pwdinfo->negotiation_dialog_token = 1;
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
+ if (attr_contentlen == 1) {
+ DBG_88E("[%s] Status = %d\n", __func__, attr_content);
+ result = attr_content;
+
+ if (attr_content == P2P_STATUS_SUCCESS) {
+ u8 bcancelled = 0;
+
+ _cancel_timer(&pwdinfo->restore_p2p_state_timer, &bcancelled);
+
+ /* Commented by Albert 20100911 */
+ /* Todo: Need to handle the case which both Intents are the same. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ if ((pwdinfo->intent) > (pwdinfo->peer_intent >> 1)) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ } else if ((pwdinfo->intent) < (pwdinfo->peer_intent >> 1)) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ } else {
+ /* Have to compare the Tie Breaker */
+ if (pwdinfo->peer_intent & 0x01)
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ else
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ break;
+ }
+ }
+
+ /* Try to get the group id information */
+ attr_contentlen = 0;
+ _rtw_memset(groupid, 0x00, 38);
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
+ DBG_88E("[%s] Ssid = %s, ssidlen = %zu\n", __func__, &groupid[ETH_ALEN], strlen(&groupid[ETH_ALEN]));
+ memcpy(pwdinfo->groupid_info.go_device_addr, &groupid[0], ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, &groupid[6], attr_contentlen - ETH_ALEN);
+ }
+
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen)) {
+ DBG_88E("[%s] Peer's operating channel = %d\n", __func__, operatingch_info[4]);
+ pwdinfo->peer_operating_ch = operatingch_info[4];
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+ return result;
+}
+
+u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *frame_body;
+ u8 dialogToken = 0;
+ u8 status = P2P_STATUS_SUCCESS;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[6];
+
+ /* todo: check NoA attribute */
+
+ issue_p2p_presence_resp(pwdinfo, GetAddr2Ptr(pframe), status, dialogToken);
+
+ return true;
+}
+
+static void find_phase_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ndis_802_11_ssid ssid;
+ unsigned long irqL;
+
+_func_enter_;
+
+ _rtw_memset((unsigned char *)&ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN);
+ ssid.SsidLength = P2P_WILDCARD_SSID_LEN;
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+
+_func_exit_;
+}
+
+void p2p_concurrent_handler(struct adapter *padapter);
+
+static void restore_p2p_state_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+_func_enter_;
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+ /* In the P2P client mode, the driver should not switch back to its listen channel */
+ /* because this P2P client should stay at the operating channel of P2P GO. */
+ set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ }
+_func_exit_;
+}
+
+static void pre_tx_invitereq_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 val8 = 1;
+_func_enter_;
+
+ set_channel_bwmode(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ issue_probereq_p2p(padapter, NULL);
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+_func_exit_;
+}
+
+static void pre_tx_provdisc_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 val8 = 1;
+_func_enter_;
+
+ set_channel_bwmode(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ issue_probereq_p2p(padapter, NULL);
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+_func_exit_;
+}
+
+static void pre_tx_negoreq_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 val8 = 1;
+_func_enter_;
+
+ set_channel_bwmode(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ issue_probereq_p2p(padapter, NULL);
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+_func_exit_;
+}
+
+void p2p_protocol_wk_hdl(struct adapter *padapter, int intCmdType)
+{
+_func_enter_;
+ switch (intCmdType) {
+ case P2P_FIND_PHASE_WK:
+ find_phase_handler(padapter);
+ break;
+ case P2P_RESTORE_STATE_WK:
+ restore_p2p_state_handler(padapter);
+ break;
+ case P2P_PRE_TX_PROVDISC_PROCESS_WK:
+ pre_tx_provdisc_handler(padapter);
+ break;
+ case P2P_PRE_TX_INVITEREQ_PROCESS_WK:
+ pre_tx_invitereq_handler(padapter);
+ break;
+ case P2P_PRE_TX_NEGOREQ_PROCESS_WK:
+ pre_tx_negoreq_handler(padapter);
+ break;
+ }
+
+_func_exit_;
+}
+
+void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
+{
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+ u32 p2p_ielen = 0;
+ u8 noa_attr[MAX_P2P_IE_LEN] = { 0x00 };/* NoA length should be n*(13) + 2 */
+ u32 attr_contentlen = 0;
+
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 find_p2p = false, find_p2p_ps = false;
+ u8 noa_offset, noa_num, noa_index;
+
+_func_enter_;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+ if (IELength <= _BEACON_IE_OFFSET_)
+ return;
+
+ ies = IEs + _BEACON_IE_OFFSET_;
+ ies_len = IELength - _BEACON_IE_OFFSET_;
+
+ p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
+
+ while (p2p_ie) {
+ find_p2p = true;
+ /* Get Notice of Absence IE. */
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_NOA, noa_attr, &attr_contentlen)) {
+ find_p2p_ps = true;
+ noa_index = noa_attr[0];
+
+ if ((pwdinfo->p2p_ps_mode == P2P_PS_NONE) ||
+ (noa_index != pwdinfo->noa_index)) { /* if index change, driver should reconfigure related setting. */
+ pwdinfo->noa_index = noa_index;
+ pwdinfo->opp_ps = noa_attr[1] >> 7;
+ pwdinfo->ctwindow = noa_attr[1] & 0x7F;
+
+ noa_offset = 2;
+ noa_num = 0;
+ /* NoA length should be n*(13) + 2 */
+ if (attr_contentlen > 2) {
+ while (noa_offset < attr_contentlen) {
+ /* memcpy(&wifidirect_info->noa_count[noa_num], &noa_attr[noa_offset], 1); */
+ pwdinfo->noa_count[noa_num] = noa_attr[noa_offset];
+ noa_offset += 1;
+
+ memcpy(&pwdinfo->noa_duration[noa_num], &noa_attr[noa_offset], 4);
+ noa_offset += 4;
+
+ memcpy(&pwdinfo->noa_interval[noa_num], &noa_attr[noa_offset], 4);
+ noa_offset += 4;
+
+ memcpy(&pwdinfo->noa_start_time[noa_num], &noa_attr[noa_offset], 4);
+ noa_offset += 4;
+
+ noa_num++;
+ }
+ }
+ pwdinfo->noa_num = noa_num;
+
+ if (pwdinfo->opp_ps == 1) {
+ pwdinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
+ /* driver should wait LPS for entering CTWindow */
+ if (padapter->pwrctrlpriv.bFwCurrentInPSMode)
+ p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 1);
+ } else if (pwdinfo->noa_num > 0) {
+ pwdinfo->p2p_ps_mode = P2P_PS_NOA;
+ p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 1);
+ } else if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
+ p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
+ }
+ }
+
+ break; /* find target, just break. */
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+
+ if (find_p2p) {
+ if ((pwdinfo->p2p_ps_mode > P2P_PS_NONE) && !find_p2p_ps)
+ p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
+ }
+
+_func_exit_;
+}
+
+void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+_func_enter_;
+
+ /* Pre action for p2p state */
+ switch (p2p_ps_state) {
+ case P2P_PS_DISABLE:
+ pwdinfo->p2p_ps_state = p2p_ps_state;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+
+ pwdinfo->noa_index = 0;
+ pwdinfo->ctwindow = 0;
+ pwdinfo->opp_ps = 0;
+ pwdinfo->noa_num = 0;
+ pwdinfo->p2p_ps_mode = P2P_PS_NONE;
+ if (padapter->pwrctrlpriv.bFwCurrentInPSMode) {
+ if (pwrpriv->smart_ps == 0) {
+ pwrpriv->smart_ps = 2;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&(padapter->pwrctrlpriv.pwr_mode)));
+ }
+ }
+ break;
+ case P2P_PS_ENABLE:
+ if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
+ pwdinfo->p2p_ps_state = p2p_ps_state;
+
+ if (pwdinfo->ctwindow > 0) {
+ if (pwrpriv->smart_ps != 0) {
+ pwrpriv->smart_ps = 0;
+ DBG_88E("%s(): Enter CTW, change SmartPS\n", __func__);
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&(padapter->pwrctrlpriv.pwr_mode)));
+ }
+ }
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ }
+ break;
+ case P2P_PS_SCAN:
+ case P2P_PS_SCAN_DONE:
+ case P2P_PS_ALLSTASLEEP:
+ if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
+ pwdinfo->p2p_ps_state = p2p_ps_state;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ }
+ break;
+ default:
+ break;
+ }
+
+_func_exit_;
+}
+
+u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return res;
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = P2P_PS_WK_CID;
+ pdrvextra_cmd_parm->type_size = p2p_ps_state;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ } else {
+ p2p_ps_wk_hdl(padapter, p2p_ps_state);
+ }
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void reset_ch_sitesurvey_timer_process (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* Reset the operation channel information */
+ pwdinfo->rx_invitereq_info.operation_ch[0] = 0;
+ pwdinfo->rx_invitereq_info.scan_op_ch_only = 0;
+}
+
+static void reset_ch_sitesurvey_timer_process2 (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* Reset the operation channel information */
+ pwdinfo->p2p_info.operation_ch[0] = 0;
+ pwdinfo->p2p_info.scan_op_ch_only = 0;
+}
+
+static void restore_p2p_state_timer_process (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ p2p_protocol_wk_cmd(adapter, P2P_RESTORE_STATE_WK);
+}
+
+static void pre_tx_scan_timer_process(void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
+ if (pwdinfo->tx_prov_disc_info.benable) { /* the provision discovery request frame is trigger to send or not */
+ p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_PROVDISC_PROCESS_WK);
+ /* issue_probereq_p2p(adapter, NULL); */
+ /* _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); */
+ }
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
+ if (pwdinfo->nego_req_info.benable)
+ p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_NEGOREQ_PROCESS_WK);
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ)) {
+ if (pwdinfo->invitereq_info.benable)
+ p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_INVITEREQ_PROCESS_WK);
+ } else {
+ DBG_88E("[%s] p2p_state is %d, ignore!!\n", __func__, rtw_p2p_state(pwdinfo));
+ }
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+}
+
+static void find_phase_timer_process(void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ adapter->wdinfo.find_phase_state_exchange_cnt++;
+
+ p2p_protocol_wk_cmd(adapter, P2P_FIND_PHASE_WK);
+}
+
+void reset_global_wifidirect_info(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo;
+
+ pwdinfo = &padapter->wdinfo;
+ pwdinfo->persistent_supported = 0;
+ pwdinfo->session_available = true;
+ pwdinfo->wfd_tdls_enable = 0;
+ pwdinfo->wfd_tdls_weaksec = 0;
+}
+
+void rtw_init_wifidirect_timers(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ _init_timer(&pwdinfo->find_phase_timer, padapter->pnetdev, find_phase_timer_process, padapter);
+ _init_timer(&pwdinfo->restore_p2p_state_timer, padapter->pnetdev, restore_p2p_state_timer_process, padapter);
+ _init_timer(&pwdinfo->pre_tx_scan_timer, padapter->pnetdev, pre_tx_scan_timer_process, padapter);
+ _init_timer(&pwdinfo->reset_ch_sitesurvey, padapter->pnetdev, reset_ch_sitesurvey_timer_process, padapter);
+ _init_timer(&pwdinfo->reset_ch_sitesurvey2, padapter->pnetdev, reset_ch_sitesurvey_timer_process2, padapter);
+}
+
+void rtw_init_wifidirect_addrs(struct adapter *padapter, u8 *dev_addr, u8 *iface_addr)
+{
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ /*init device&interface address */
+ if (dev_addr)
+ memcpy(pwdinfo->device_addr, dev_addr, ETH_ALEN);
+ if (iface_addr)
+ memcpy(pwdinfo->interface_addr, iface_addr, ETH_ALEN);
+#endif
+}
+
+void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
+{
+ struct wifidirect_info *pwdinfo;
+
+ pwdinfo = &padapter->wdinfo;
+ pwdinfo->padapter = padapter;
+
+ /* 1, 6, 11 are the social channel defined in the WiFi Direct specification. */
+ pwdinfo->social_chan[0] = 1;
+ pwdinfo->social_chan[1] = 6;
+ pwdinfo->social_chan[2] = 11;
+ pwdinfo->social_chan[3] = 0; /* channel 0 for scanning ending in site survey function. */
+
+ /* Use the channel 11 as the listen channel */
+ pwdinfo->listen_channel = 11;
+
+ if (role == P2P_ROLE_DEVICE) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
+ pwdinfo->intent = 1;
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_LISTEN);
+ } else if (role == P2P_ROLE_CLIENT) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ pwdinfo->intent = 1;
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ } else if (role == P2P_ROLE_GO) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ pwdinfo->intent = 15;
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ }
+
+/* Use the OFDM rate in the P2P probe response frame. (6(B), 9(B), 12, 18, 24, 36, 48, 54) */
+ pwdinfo->support_rate[0] = 0x8c; /* 6(B) */
+ pwdinfo->support_rate[1] = 0x92; /* 9(B) */
+ pwdinfo->support_rate[2] = 0x18; /* 12 */
+ pwdinfo->support_rate[3] = 0x24; /* 18 */
+ pwdinfo->support_rate[4] = 0x30; /* 24 */
+ pwdinfo->support_rate[5] = 0x48; /* 36 */
+ pwdinfo->support_rate[6] = 0x60; /* 48 */
+ pwdinfo->support_rate[7] = 0x6c; /* 54 */
+
+ memcpy(pwdinfo->p2p_wildcard_ssid, "DIRECT-", 7);
+
+ _rtw_memset(pwdinfo->device_name, 0x00, WPS_MAX_DEVICE_NAME_LEN);
+ pwdinfo->device_name_len = 0;
+
+ _rtw_memset(&pwdinfo->invitereq_info, 0x00, sizeof(struct tx_invite_req_info));
+ pwdinfo->invitereq_info.token = 3; /* Token used for P2P invitation request frame. */
+
+ _rtw_memset(&pwdinfo->inviteresp_info, 0x00, sizeof(struct tx_invite_resp_info));
+ pwdinfo->inviteresp_info.token = 0;
+
+ pwdinfo->profileindex = 0;
+ _rtw_memset(&pwdinfo->profileinfo[0], 0x00, sizeof(struct profile_info) * P2P_MAX_PERSISTENT_GROUP_NUM);
+
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
+
+ pwdinfo->listen_dwell = (u8) ((rtw_get_current_time() % 3) + 1);
+
+ _rtw_memset(&pwdinfo->tx_prov_disc_info, 0x00, sizeof(struct tx_provdisc_req_info));
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_NONE;
+
+ _rtw_memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
+
+ pwdinfo->device_password_id_for_nego = WPS_DPID_PBC;
+ pwdinfo->negotiation_dialog_token = 1;
+
+ _rtw_memset(pwdinfo->nego_ssid, 0x00, WLAN_SSID_MAXLEN);
+ pwdinfo->nego_ssidlen = 0;
+
+ pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
+ pwdinfo->supported_wps_cm = WPS_CONFIG_METHOD_DISPLAY | WPS_CONFIG_METHOD_PBC | WPS_CONFIG_METHOD_KEYPAD;
+ pwdinfo->channel_list_attr_len = 0;
+ _rtw_memset(pwdinfo->channel_list_attr, 0x00, 100);
+
+ _rtw_memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, 0x00, 4);
+ _rtw_memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, '0', 3);
+ _rtw_memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
+ pwdinfo->wfd_tdls_enable = 0;
+ _rtw_memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
+ _rtw_memset(pwdinfo->p2p_peer_device_addr, 0x00, ETH_ALEN);
+
+ pwdinfo->rx_invitereq_info.operation_ch[0] = 0;
+ pwdinfo->rx_invitereq_info.operation_ch[1] = 0; /* Used to indicate the scan end in site survey function */
+ pwdinfo->rx_invitereq_info.scan_op_ch_only = 0;
+ pwdinfo->p2p_info.operation_ch[0] = 0;
+ pwdinfo->p2p_info.operation_ch[1] = 0; /* Used to indicate the scan end in site survey function */
+ pwdinfo->p2p_info.scan_op_ch_only = 0;
+}
+
+int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
+{
+ int ret = _SUCCESS;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) {
+ /* leave IPS/Autosuspend */
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Added by Albert 2011/03/22 */
+ /* In the P2P mode, the driver should not support the b mode. */
+ /* So, the Tx packet shouldn't use the CCK rate */
+ update_tx_basic_rate(padapter, WIRELESS_11AGN);
+
+ /* Enable P2P function */
+ init_wifidirect_info(padapter, role);
+
+ rtw_hal_set_odm_var(padapter, HAL_ODM_P2P_STATE, NULL, true);
+ } else if (role == P2P_ROLE_DISABLE) {
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Disable P2P function */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ _cancel_timer_ex(&pwdinfo->find_phase_timer);
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ _cancel_timer_ex(&pwdinfo->pre_tx_scan_timer);
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey2);
+ reset_ch_sitesurvey_timer_process(padapter);
+ reset_ch_sitesurvey_timer_process2(padapter);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DISABLE);
+ _rtw_memset(&pwdinfo->rx_prov_disc_info, 0x00, sizeof(struct rx_provdisc_req_info));
+ }
+
+ rtw_hal_set_odm_var(padapter, HAL_ODM_P2P_STATE, NULL, false);
+
+ /* Restore to initial setting. */
+ update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
+ }
+
+exit:
+ return ret;
+}
+
+#else
+u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
+{
+ return _FAIL;
+}
+
+void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
+{
+}
+
+#endif /* CONFIG_88EU_P2P */
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
new file mode 100644
index 0000000..58a1661
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -0,0 +1,662 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_PWRCTRL_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <osdep_intf.h>
+#include <linux/usb.h>
+
+void ips_enter(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct xmit_priv *pxmit_priv = &padapter->xmitpriv;
+
+ if (padapter->registrypriv.mp_mode == 1)
+ return;
+
+ if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
+ pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF) {
+ DBG_88E_LEVEL(_drv_info_, "There are some pkts to transmit\n");
+ DBG_88E_LEVEL(_drv_info_, "free_xmitbuf_cnt: %d, free_xmit_extbuf_cnt: %d\n",
+ pxmit_priv->free_xmitbuf_cnt, pxmit_priv->free_xmit_extbuf_cnt);
+ return;
+ }
+
+ _enter_pwrlock(&pwrpriv->lock);
+
+ pwrpriv->bips_processing = true;
+
+ /* syn ips_mode with request */
+ pwrpriv->ips_mode = pwrpriv->ips_mode_req;
+
+ pwrpriv->ips_enter_cnts++;
+ DBG_88E("==>ips_enter cnts:%d\n", pwrpriv->ips_enter_cnts);
+ if (rf_off == pwrpriv->change_rfpwrstate) {
+ pwrpriv->bpower_saving = true;
+ DBG_88E_LEVEL(_drv_info_, "nolinked power save enter\n");
+
+ if (pwrpriv->ips_mode == IPS_LEVEL_2)
+ pwrpriv->bkeepfwalive = true;
+
+ rtw_ips_pwr_down(padapter);
+ pwrpriv->rf_pwrstate = rf_off;
+ }
+ pwrpriv->bips_processing = false;
+
+ _exit_pwrlock(&pwrpriv->lock);
+}
+
+int ips_leave(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ int result = _SUCCESS;
+ int keyid;
+
+
+ _enter_pwrlock(&pwrpriv->lock);
+
+ if ((pwrpriv->rf_pwrstate == rf_off) && (!pwrpriv->bips_processing)) {
+ pwrpriv->bips_processing = true;
+ pwrpriv->change_rfpwrstate = rf_on;
+ pwrpriv->ips_leave_cnts++;
+ DBG_88E("==>ips_leave cnts:%d\n", pwrpriv->ips_leave_cnts);
+
+ result = rtw_ips_pwr_up(padapter);
+ if (result == _SUCCESS) {
+ pwrpriv->rf_pwrstate = rf_on;
+ }
+ DBG_88E_LEVEL(_drv_info_, "nolinked power save leave\n");
+
+ if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) || (_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm)) {
+ DBG_88E("==>%s, channel(%d), processing(%x)\n", __func__, padapter->mlmeextpriv.cur_channel, pwrpriv->bips_processing);
+ set_channel_bwmode(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ for (keyid = 0; keyid < 4; keyid++) {
+ if (pmlmepriv->key_mask & BIT(keyid)) {
+ if (keyid == psecuritypriv->dot11PrivacyKeyIndex)
+ result = rtw_set_key(padapter, psecuritypriv, keyid, 1);
+ else
+ result = rtw_set_key(padapter, psecuritypriv, keyid, 0);
+ }
+ }
+ }
+
+ DBG_88E("==> ips_leave.....LED(0x%08x)...\n", rtw_read32(padapter, 0x4c));
+ pwrpriv->bips_processing = false;
+
+ pwrpriv->bkeepfwalive = false;
+ pwrpriv->bpower_saving = false;
+ }
+
+ _exit_pwrlock(&pwrpriv->lock);
+
+ return result;
+}
+
+static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
+{
+ struct adapter *buddy = adapter->pbuddy_adapter;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(adapter->wdinfo);
+#endif
+
+ bool ret = false;
+
+ if (adapter->pwrctrlpriv.ips_deny_time >= rtw_get_current_time())
+ goto exit;
+
+ if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
+ check_fwstate(pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS) ||
+ check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE) ||
+#if defined(CONFIG_88EU_P2P)
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+#else
+ 0)
+#endif
+ goto exit;
+
+ /* consider buddy, if exist */
+ if (buddy) {
+ struct mlme_priv *b_pmlmepriv = &(buddy->mlmepriv);
+ #ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *b_pwdinfo = &(buddy->wdinfo);
+ #endif
+
+ if (check_fwstate(b_pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
+ check_fwstate(b_pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS) ||
+ check_fwstate(b_pmlmepriv, WIFI_AP_STATE) ||
+ check_fwstate(b_pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE) ||
+#if defined(CONFIG_88EU_P2P)
+ !rtw_p2p_chk_state(b_pwdinfo, P2P_STATE_NONE))
+#else
+ 0)
+#endif
+ goto exit;
+ }
+ ret = true;
+
+exit:
+ return ret;
+}
+
+void rtw_ps_processor(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ enum rt_rf_power_state rfpwrstate;
+
+ pwrpriv->ps_processing = true;
+
+ if (pwrpriv->bips_processing)
+ goto exit;
+
+ if (padapter->pwrctrlpriv.bHWPwrPindetect) {
+ rfpwrstate = RfOnOffDetect(padapter);
+ DBG_88E("@@@@- #2 %s==> rfstate:%s\n", __func__, (rfpwrstate == rf_on) ? "rf_on" : "rf_off");
+
+ if (rfpwrstate != pwrpriv->rf_pwrstate) {
+ if (rfpwrstate == rf_off) {
+ pwrpriv->change_rfpwrstate = rf_off;
+ pwrpriv->brfoffbyhw = true;
+ padapter->bCardDisableWOHSM = true;
+ rtw_hw_suspend(padapter);
+ } else {
+ pwrpriv->change_rfpwrstate = rf_on;
+ rtw_hw_resume(padapter);
+ }
+ DBG_88E("current rf_pwrstate(%s)\n", (pwrpriv->rf_pwrstate == rf_off) ? "rf_off" : "rf_on");
+ }
+ pwrpriv->pwr_state_check_cnts++;
+ }
+
+ if (pwrpriv->ips_mode_req == IPS_NONE)
+ goto exit;
+
+ if (rtw_pwr_unassociated_idle(padapter) == false)
+ goto exit;
+
+ if ((pwrpriv->rf_pwrstate == rf_on) && ((pwrpriv->pwr_state_check_cnts%4) == 0)) {
+ DBG_88E("==>%s .fw_state(%x)\n", __func__, get_fwstate(pmlmepriv));
+ pwrpriv->change_rfpwrstate = rf_off;
+
+ ips_enter(padapter);
+ }
+exit:
+ rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
+ pwrpriv->ps_processing = false;
+ return;
+}
+
+static void pwr_state_check_handler(void *FunctionContext)
+{
+ struct adapter *padapter = (struct adapter *)FunctionContext;
+ rtw_ps_cmd(padapter);
+}
+
+/*
+ *
+ * Parameters
+ * padapter
+ * pslv power state level, only could be PS_STATE_S0 ~ PS_STATE_S4
+ *
+ */
+void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
+{
+ u8 rpwm;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+_func_enter_;
+
+ pslv = PS_STATE(pslv);
+
+
+ if (pwrpriv->btcoex_rfon) {
+ if (pslv < PS_STATE_S4)
+ pslv = PS_STATE_S3;
+ }
+
+ if ((pwrpriv->rpwm == pslv)) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: Already set rpwm[0x%02X], new=0x%02X!\n", __func__, pwrpriv->rpwm, pslv));
+ return;
+ }
+
+ if ((padapter->bSurpriseRemoved) ||
+ (!padapter->hw_init_completed)) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: SurpriseRemoved(%d) hw_init_completed(%d)\n",
+ __func__, padapter->bSurpriseRemoved, padapter->hw_init_completed));
+
+ pwrpriv->cpwm = PS_STATE_S4;
+
+ return;
+ }
+
+ if (padapter->bDriverStopped) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: change power state(0x%02X) when DriverStopped\n", __func__, pslv));
+
+ if (pslv < PS_STATE_S2) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: Reject to enter PS_STATE(0x%02X) lower than S2 when DriverStopped!!\n", __func__, pslv));
+ return;
+ }
+ }
+
+ rpwm = pslv | pwrpriv->tog;
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("rtw_set_rpwm: rpwm=0x%02x cpwm=0x%02x\n", rpwm, pwrpriv->cpwm));
+
+ pwrpriv->rpwm = pslv;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_SET_RPWM, (u8 *)(&rpwm));
+
+ pwrpriv->tog += 0x80;
+ pwrpriv->cpwm = pslv;
+
+_func_exit_;
+}
+
+static u8 PS_RDY_CHECK(struct adapter *padapter)
+{
+ u32 curr_time, delta_time;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+
+ curr_time = rtw_get_current_time();
+ delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
+
+ if (delta_time < LPS_DELAY_TIME)
+ return false;
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == false) ||
+ (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) ||
+ (check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
+ return false;
+ if (pwrpriv->bInSuspend)
+ return false;
+ if ((padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) && (padapter->securitypriv.binstallGrpkey == false)) {
+ DBG_88E("Group handshake still in progress !!!\n");
+ return false;
+ }
+ return true;
+}
+
+void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("%s: PowerMode=%d Smart_PS=%d\n",
+ __func__, ps_mode, smart_ps));
+
+ if (ps_mode > PM_Card_Disable) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_, ("ps_mode:%d error\n", ps_mode));
+ return;
+ }
+
+ if (pwrpriv->pwr_mode == ps_mode) {
+ if (PS_MODE_ACTIVE == ps_mode)
+ return;
+
+ if ((pwrpriv->smart_ps == smart_ps) &&
+ (pwrpriv->bcn_ant_mode == bcn_ant_mode))
+ return;
+ }
+
+ /* if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) */
+ if (ps_mode == PS_MODE_ACTIVE) {
+#ifdef CONFIG_88EU_P2P
+ if (pwdinfo->opp_ps == 0) {
+ DBG_88E("rtw_set_ps_mode: Leave 802.11 power save\n");
+ pwrpriv->pwr_mode = ps_mode;
+ rtw_set_rpwm(padapter, PS_STATE_S4);
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ pwrpriv->bFwCurrentInPSMode = false;
+ }
+ } else {
+#endif /* CONFIG_88EU_P2P */
+ if (PS_RDY_CHECK(padapter)) {
+ DBG_88E("%s: Enter 802.11 power save\n", __func__);
+ pwrpriv->bFwCurrentInPSMode = true;
+ pwrpriv->pwr_mode = ps_mode;
+ pwrpriv->smart_ps = smart_ps;
+ pwrpriv->bcn_ant_mode = bcn_ant_mode;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+
+#ifdef CONFIG_88EU_P2P
+ /* Set CTWindow after LPS */
+ if (pwdinfo->opp_ps == 1)
+ p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 0);
+#endif /* CONFIG_88EU_P2P */
+
+ rtw_set_rpwm(padapter, PS_STATE_S2);
+ }
+ }
+
+_func_exit_;
+}
+
+/*
+ * Return:
+ * 0: Leave OK
+ * -1: Timeout
+ * -2: Other error
+ */
+s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
+{
+ u32 start_time;
+ u8 bAwake = false;
+ s32 err = 0;
+
+
+ start_time = rtw_get_current_time();
+ while (1) {
+ rtw_hal_get_hwreg(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
+ if (bAwake)
+ break;
+
+ if (padapter->bSurpriseRemoved) {
+ err = -2;
+ DBG_88E("%s: device surprise removed!!\n", __func__);
+ break;
+ }
+
+ if (rtw_get_passing_time_ms(start_time) > delay_ms) {
+ err = -1;
+ DBG_88E("%s: Wait for FW LPS leave more than %u ms!!!\n", __func__, delay_ms);
+ break;
+ }
+ rtw_usleep_os(100);
+ }
+
+ return err;
+}
+
+/* */
+/* Description: */
+/* Enter the leisure power save mode. */
+/* */
+void LPS_Enter(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+_func_enter_;
+
+ if (PS_RDY_CHECK(padapter) == false)
+ return;
+
+ if (pwrpriv->bLeisurePs) {
+ /* Idle for a while if we connect to AP a while ago. */
+ if (pwrpriv->LpsIdleCount >= 2) { /* 4 Sec */
+ if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) {
+ pwrpriv->bpower_saving = true;
+ DBG_88E("%s smart_ps:%d\n", __func__, pwrpriv->smart_ps);
+ /* For Tenda W311R IOT issue */
+ rtw_set_ps_mode(padapter, pwrpriv->power_mgnt, pwrpriv->smart_ps, 0);
+ }
+ } else {
+ pwrpriv->LpsIdleCount++;
+ }
+ }
+
+_func_exit_;
+}
+
+#define LPS_LEAVE_TIMEOUT_MS 100
+
+/* Description: */
+/* Leave the leisure power save mode. */
+void LPS_Leave(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+_func_enter_;
+
+ if (pwrpriv->bLeisurePs) {
+ if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
+ rtw_set_ps_mode(padapter, PS_MODE_ACTIVE, 0, 0);
+
+ if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
+ LPS_RF_ON_check(padapter, LPS_LEAVE_TIMEOUT_MS);
+ }
+ }
+
+ pwrpriv->bpower_saving = false;
+
+_func_exit_;
+}
+
+/* */
+/* Description: Leave all power save mode: LPS, FwLPS, IPS if needed. */
+/* Move code to function by tynli. 2010.03.26. */
+/* */
+void LeaveAllPowerSaveMode(struct adapter *Adapter)
+{
+ struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
+ u8 enqueue = 0;
+
+_func_enter_;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) { /* connect */
+ p2p_ps_wk_cmd(Adapter, P2P_PS_DISABLE, enqueue);
+
+ rtw_lps_ctrl_wk_cmd(Adapter, LPS_CTRL_LEAVE, enqueue);
+ }
+
+_func_exit_;
+}
+
+void rtw_init_pwrctrl_priv(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+_func_enter_;
+
+ _init_pwrlock(&pwrctrlpriv->lock);
+ pwrctrlpriv->rf_pwrstate = rf_on;
+ pwrctrlpriv->ips_enter_cnts = 0;
+ pwrctrlpriv->ips_leave_cnts = 0;
+ pwrctrlpriv->bips_processing = false;
+
+ pwrctrlpriv->ips_mode = padapter->registrypriv.ips_mode;
+ pwrctrlpriv->ips_mode_req = padapter->registrypriv.ips_mode;
+
+ pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
+ pwrctrlpriv->pwr_state_check_cnts = 0;
+ pwrctrlpriv->bInternalAutoSuspend = false;
+ pwrctrlpriv->bInSuspend = false;
+ pwrctrlpriv->bkeepfwalive = false;
+
+ pwrctrlpriv->LpsIdleCount = 0;
+ if (padapter->registrypriv.mp_mode == 1)
+ pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE ;
+ else
+ pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
+ pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+
+ pwrctrlpriv->bFwCurrentInPSMode = false;
+
+ pwrctrlpriv->rpwm = 0;
+ pwrctrlpriv->cpwm = PS_STATE_S4;
+
+ pwrctrlpriv->pwr_mode = PS_MODE_ACTIVE;
+ pwrctrlpriv->smart_ps = padapter->registrypriv.smart_ps;
+ pwrctrlpriv->bcn_ant_mode = 0;
+
+ pwrctrlpriv->tog = 0x80;
+
+ pwrctrlpriv->btcoex_rfon = false;
+
+ _init_timer(&(pwrctrlpriv->pwr_state_check_timer), padapter->pnetdev, pwr_state_check_handler, (u8 *)padapter);
+
+_func_exit_;
+}
+
+void rtw_free_pwrctrl_priv(struct adapter *adapter)
+{
+ struct pwrctrl_priv *pwrctrlpriv = &adapter->pwrctrlpriv;
+
+_func_enter_;
+
+ _free_pwrlock(&pwrctrlpriv->lock);
+
+_func_exit_;
+}
+
+u8 rtw_interface_ps_func(struct adapter *padapter, enum hal_intf_ps_func efunc_id, u8 *val)
+{
+ u8 bResult = true;
+ rtw_hal_intf_ps_func(padapter, efunc_id, val);
+
+ return bResult;
+}
+
+
+inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ms);
+}
+
+/*
+* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
+* @adapter: pointer to struct adapter structure
+* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
+* Return _SUCCESS or _FAIL
+*/
+
+int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *caller)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int ret = _SUCCESS;
+
+ if (pwrpriv->ips_deny_time < rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms))
+ pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms);
+
+{
+ u32 start = rtw_get_current_time();
+ if (pwrpriv->ps_processing) {
+ DBG_88E("%s wait ps_processing...\n", __func__);
+ while (pwrpriv->ps_processing && rtw_get_passing_time_ms(start) <= 3000)
+ rtw_msleep_os(10);
+ if (pwrpriv->ps_processing)
+ DBG_88E("%s wait ps_processing timeout\n", __func__);
+ else
+ DBG_88E("%s wait ps_processing done\n", __func__);
+ }
+}
+
+ /* System suspend is not allowed to wakeup */
+ if ((!pwrpriv->bInternalAutoSuspend) && (pwrpriv->bInSuspend)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* block??? */
+ if ((pwrpriv->bInternalAutoSuspend) && (padapter->net_closed)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* I think this should be check in IPS, LPS, autosuspend functions... */
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+ if (rf_off == pwrpriv->rf_pwrstate) {
+ DBG_88E("%s call ips_leave....\n", __func__);
+ if (_FAIL == ips_leave(padapter)) {
+ DBG_88E("======> ips_leave fail.............\n");
+ ret = _FAIL;
+ goto exit;
+ }
+ }
+
+ /* TODO: the following checking need to be merged... */
+ if (padapter->bDriverStopped || !padapter->bup ||
+ !padapter->hw_init_completed) {
+ DBG_88E("%s: bDriverStopped=%d, bup=%d, hw_init_completed =%u\n"
+ , caller
+ , padapter->bDriverStopped
+ , padapter->bup
+ , padapter->hw_init_completed);
+ ret = false;
+ goto exit;
+ }
+
+exit:
+ if (pwrpriv->ips_deny_time < rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms))
+ pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms);
+ return ret;
+}
+
+int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
+{
+ int ret = 0;
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ if (mode < PS_MODE_NUM) {
+ if (pwrctrlpriv->power_mgnt != mode) {
+ if (PS_MODE_ACTIVE == mode)
+ LeaveAllPowerSaveMode(padapter);
+ else
+ pwrctrlpriv->LpsIdleCount = 2;
+ pwrctrlpriv->power_mgnt = mode;
+ pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
+{
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ if (mode == IPS_NORMAL || mode == IPS_LEVEL_2) {
+ rtw_ips_mode_req(pwrctrlpriv, mode);
+ DBG_88E("%s %s\n", __func__, mode == IPS_NORMAL ? "IPS_NORMAL" : "IPS_LEVEL_2");
+ return 0;
+ } else if (mode == IPS_NONE) {
+ rtw_ips_mode_req(pwrctrlpriv, mode);
+ DBG_88E("%s %s\n", __func__, "IPS_NONE");
+ if ((padapter->bSurpriseRemoved == 0) && (_FAIL == rtw_pwr_wakeup(padapter)))
+ return -EFAULT;
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
new file mode 100644
index 0000000..2011657
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -0,0 +1,2299 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_RECV_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <mlme_osdep.h>
+#include <ip.h>
+#include <if_ether.h>
+#include <ethernet.h>
+#include <usb_ops.h>
+#include <wifi.h>
+
+static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
+static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
+
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static u8 rtw_bridge_tunnel_header[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8
+};
+
+static u8 rtw_rfc1042_header[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
+};
+
+void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
+
+void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
+{
+_func_enter_;
+
+ _rtw_memset((u8 *)psta_recvpriv, 0, sizeof (struct sta_recv_priv));
+
+ _rtw_spinlock_init(&psta_recvpriv->lock);
+
+ _rtw_init_queue(&psta_recvpriv->defrag_q);
+
+_func_exit_;
+}
+
+int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
+{
+ int i;
+
+ union recv_frame *precvframe;
+
+ int res = _SUCCESS;
+
+_func_enter_;
+ _rtw_spinlock_init(&precvpriv->lock);
+
+ _rtw_init_queue(&precvpriv->free_recv_queue);
+ _rtw_init_queue(&precvpriv->recv_pending_queue);
+ _rtw_init_queue(&precvpriv->uc_swdec_pending_queue);
+
+ precvpriv->adapter = padapter;
+
+ precvpriv->free_recvframe_cnt = NR_RECVFRAME;
+
+ rtw_os_recv_resource_init(precvpriv, padapter);
+
+ precvpriv->pallocated_frame_buf = rtw_zvmalloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
+
+ if (precvpriv->pallocated_frame_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
+
+ precvframe = (union recv_frame *)precvpriv->precv_frame_buf;
+
+ for (i = 0; i < NR_RECVFRAME; i++) {
+ _rtw_init_listhead(&(precvframe->u.list));
+
+ rtw_list_insert_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue));
+
+ res = rtw_os_recv_resource_alloc(padapter, precvframe);
+
+ precvframe->u.hdr.len = 0;
+
+ precvframe->u.hdr.adapter = padapter;
+ precvframe++;
+ }
+ precvpriv->rx_pending_cnt = 1;
+
+ _rtw_init_sema(&precvpriv->allrxreturnevt, 0);
+
+ res = rtw_hal_init_recv_priv(padapter);
+
+ _init_timer(&precvpriv->signal_stat_timer, padapter->pnetdev, RTW_TIMER_HDL_NAME(signal_stat), padapter);
+
+ precvpriv->signal_stat_sampling_interval = 1000; /* ms */
+
+ rtw_set_signal_stat_timer(precvpriv);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void rtw_mfree_recv_priv_lock(struct recv_priv *precvpriv)
+{
+ _rtw_spinlock_free(&precvpriv->lock);
+ _rtw_spinlock_free(&precvpriv->free_recv_queue.lock);
+ _rtw_spinlock_free(&precvpriv->recv_pending_queue.lock);
+
+ _rtw_spinlock_free(&precvpriv->free_recv_buf_queue.lock);
+}
+
+void _rtw_free_recv_priv (struct recv_priv *precvpriv)
+{
+ struct adapter *padapter = precvpriv->adapter;
+
+_func_enter_;
+
+ rtw_free_uc_swdec_pending_queue(padapter);
+
+ rtw_mfree_recv_priv_lock(precvpriv);
+
+ rtw_os_recv_resource_free(precvpriv);
+
+ if (precvpriv->pallocated_frame_buf) {
+ rtw_vmfree(precvpriv->pallocated_frame_buf, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
+ }
+
+ rtw_hal_free_recv_priv(padapter);
+
+_func_exit_;
+}
+
+union recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+{
+ union recv_frame *precvframe;
+ struct list_head *plist, *phead;
+ struct adapter *padapter;
+ struct recv_priv *precvpriv;
+_func_enter_;
+
+ if (_rtw_queue_empty(pfree_recv_queue)) {
+ precvframe = NULL;
+ } else {
+ phead = get_list_head(pfree_recv_queue);
+
+ plist = get_next(phead);
+
+ precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+
+ rtw_list_delete(&precvframe->u.hdr.list);
+ padapter = precvframe->u.hdr.adapter;
+ if (padapter != NULL) {
+ precvpriv = &padapter->recvpriv;
+ if (pfree_recv_queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt--;
+ }
+ }
+
+_func_exit_;
+
+ return precvframe;
+}
+
+union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+{
+ unsigned long irqL;
+ union recv_frame *precvframe;
+
+ _enter_critical_bh(&pfree_recv_queue->lock, &irqL);
+
+ precvframe = _rtw_alloc_recvframe(pfree_recv_queue);
+
+ _exit_critical_bh(&pfree_recv_queue->lock, &irqL);
+
+ return precvframe;
+}
+
+void rtw_init_recvframe(union recv_frame *precvframe, struct recv_priv *precvpriv)
+{
+ /* Perry: This can be removed */
+ _rtw_init_listhead(&precvframe->u.hdr.list);
+
+ precvframe->u.hdr.len = 0;
+}
+
+int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue)
+{
+ unsigned long irqL;
+ struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+_func_enter_;
+
+ if (precvframe->u.hdr.pkt) {
+ dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
+ precvframe->u.hdr.pkt = NULL;
+ }
+
+ _enter_critical_bh(&pfree_recv_queue->lock, &irqL);
+
+ rtw_list_delete(&(precvframe->u.hdr.list));
+
+ precvframe->u.hdr.len = 0;
+
+ rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(pfree_recv_queue));
+
+ if (padapter != NULL) {
+ if (pfree_recv_queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt++;
+ }
+
+ _exit_critical_bh(&pfree_recv_queue->lock, &irqL);
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+{
+ struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+_func_enter_;
+
+ rtw_list_delete(&(precvframe->u.hdr.list));
+ rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(queue));
+
+ if (padapter != NULL) {
+ if (queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt++;
+ }
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+{
+ int ret;
+ unsigned long irqL;
+
+ _enter_critical_bh(&queue->lock, &irqL);
+ ret = _rtw_enqueue_recvframe(precvframe, queue);
+ _exit_critical_bh(&queue->lock, &irqL);
+
+ return ret;
+}
+
+/*
+caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
+pframequeue: defrag_queue : will be accessed in recv_thread (passive)
+
+using spinlock to protect
+
+*/
+
+void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
+{
+ union recv_frame *precvframe;
+ struct list_head *plist, *phead;
+
+_func_enter_;
+ spin_lock(&pframequeue->lock);
+
+ phead = get_list_head(pframequeue);
+ plist = get_next(phead);
+
+ while (rtw_end_of_queue_search(phead, plist) == false) {
+ precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+
+ plist = get_next(plist);
+
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ }
+
+ spin_unlock(&pframequeue->lock);
+
+_func_exit_;
+}
+
+u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
+{
+ u32 cnt = 0;
+ union recv_frame *pending_frame;
+ while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
+ rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
+ DBG_88E("%s: dequeue uc_swdec_pending_queue\n", __func__);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+int rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue)
+{
+ unsigned long irqL;
+
+ _enter_critical_bh(&queue->lock, &irqL);
+
+ rtw_list_delete(&precvbuf->list);
+ rtw_list_insert_head(&precvbuf->list, get_list_head(queue));
+
+ _exit_critical_bh(&queue->lock, &irqL);
+
+ return _SUCCESS;
+}
+
+int rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue)
+{
+ unsigned long irqL;
+ _enter_critical_ex(&queue->lock, &irqL);
+
+ rtw_list_delete(&precvbuf->list);
+
+ rtw_list_insert_tail(&precvbuf->list, get_list_head(queue));
+ _exit_critical_ex(&queue->lock, &irqL);
+ return _SUCCESS;
+}
+
+struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
+{
+ unsigned long irqL;
+ struct recv_buf *precvbuf;
+ struct list_head *plist, *phead;
+
+ _enter_critical_ex(&queue->lock, &irqL);
+
+ if (_rtw_queue_empty(queue)) {
+ precvbuf = NULL;
+ } else {
+ phead = get_list_head(queue);
+
+ plist = get_next(phead);
+
+ precvbuf = LIST_CONTAINOR(plist, struct recv_buf, list);
+
+ rtw_list_delete(&precvbuf->list);
+ }
+
+ _exit_critical_ex(&queue->lock, &irqL);
+
+ return precvbuf;
+}
+
+static int recvframe_chkmic(struct adapter *adapter, union recv_frame *precvframe)
+{
+ int i, res = _SUCCESS;
+ u32 datalen;
+ u8 miccode[8];
+ u8 bmic_err = false, brpt_micerror = true;
+ u8 *pframe, *payload, *pframemic;
+ u8 *mickey;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &precvframe->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+_func_enter_;
+
+ stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
+
+ if (prxattrib->encrypt == _TKIP_) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:prxattrib->encrypt==_TKIP_\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2], prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+
+ /* calculate mic code */
+ if (stainfo != NULL) {
+ if (IS_MCAST(prxattrib->ra)) {
+ mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic: bcmc key\n"));
+
+ if (!psecuritypriv) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n"));
+ DBG_88E("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n");
+ goto exit;
+ }
+ } else {
+ mickey = &stainfo->dot11tkiprxmickey.skey[0];
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic: unicast key\n"));
+ }
+
+ datalen = precvframe->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len-prxattrib->icv_len-8;/* icv_len included the mic code */
+ pframe = precvframe->u.hdr.rx_data;
+ payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
+ rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
+ (unsigned char)prxattrib->priority); /* care the length of the data */
+
+ pframemic = payload+datalen;
+
+ bmic_err = false;
+
+ for (i = 0; i < 8; i++) {
+ if (miccode[i] != *(pframemic+i)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recvframe_chkmic:miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
+ i, miccode[i], i, *(pframemic+i)));
+ bmic_err = true;
+ }
+ }
+
+ if (bmic_err) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-8), *(pframemic-7), *(pframemic-6),
+ *(pframemic-5), *(pframemic-4), *(pframemic-3),
+ *(pframemic-2), *(pframemic-1)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-16), *(pframemic-15), *(pframemic-14),
+ *(pframemic-13), *(pframemic-12), *(pframemic-11),
+ *(pframemic-10), *(pframemic-9)));
+ {
+ uint i;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ======demp packet (len=%d)======\n", precvframe->u.hdr.len));
+ for (i = 0; i < precvframe->u.hdr.len; i = i+8) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+ *(precvframe->u.hdr.rx_data+i), *(precvframe->u.hdr.rx_data+i+1),
+ *(precvframe->u.hdr.rx_data+i+2), *(precvframe->u.hdr.rx_data+i+3),
+ *(precvframe->u.hdr.rx_data+i+4), *(precvframe->u.hdr.rx_data+i+5),
+ *(precvframe->u.hdr.rx_data+i+6), *(precvframe->u.hdr.rx_data+i+7)));
+ }
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ====== demp packet end [len=%d]======\n", precvframe->u.hdr.len));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n hrdlen=%d,\n", prxattrib->hdrlen));
+ }
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
+ prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+ prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
+
+ /* double check key_index for some timing issue , */
+ /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
+ if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
+ brpt_micerror = false;
+
+ if ((prxattrib->bdecrypted) && (brpt_micerror)) {
+ rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+ DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+ DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+ }
+ res = _FAIL;
+ } else {
+ /* mic checked ok */
+ if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
+ psecuritypriv->bcheck_grpkey = true;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
+ }
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic: rtw_get_stainfo==NULL!!!\n"));
+ }
+
+ recvframe_pull_tail(precvframe, 8);
+ }
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+/* decrypt and set the ivlen, icvlen of the recv_frame */
+static union recv_frame *decryptor(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct rx_pkt_attrib *prxattrib = &precv_frame->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ union recv_frame *return_packet = precv_frame;
+ u32 res = _SUCCESS;
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("prxstat->decrypted=%x prxattrib->encrypt=0x%03x\n", prxattrib->bdecrypted, prxattrib->encrypt));
+
+ if (prxattrib->encrypt > 0) {
+ u8 *iv = precv_frame->u.hdr.rx_data+prxattrib->hdrlen;
+ prxattrib->key_index = (((iv[3])>>6)&0x3);
+
+ if (prxattrib->key_index > WEP_KEYS) {
+ DBG_88E("prxattrib->key_index(%d)>WEP_KEYS\n", prxattrib->key_index);
+
+ switch (prxattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ prxattrib->key_index = psecuritypriv->dot11PrivacyKeyIndex;
+ break;
+ case _TKIP_:
+ case _AES_:
+ default:
+ prxattrib->key_index = psecuritypriv->dot118021XGrpKeyid;
+ break;
+ }
+ }
+ }
+
+ if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0) || (psecuritypriv->sw_decrypt))) {
+ psecuritypriv->hw_decrypted = false;
+
+ switch (prxattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ rtw_wep_decrypt(padapter, (u8 *)precv_frame);
+ break;
+ case _TKIP_:
+ res = rtw_tkip_decrypt(padapter, (u8 *)precv_frame);
+ break;
+ case _AES_:
+ res = rtw_aes_decrypt(padapter, (u8 *)precv_frame);
+ break;
+ default:
+ break;
+ }
+ } else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
+ (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_))
+ psecuritypriv->hw_decrypted = true;
+
+ if (res == _FAIL) {
+ rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue);
+ return_packet = NULL;
+ }
+
+_func_exit_;
+
+ return return_packet;
+}
+
+/* set the security information in the recv_frame */
+static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame)
+{
+ u8 *psta_addr = NULL, *ptr;
+ uint auth_alg;
+ struct recv_frame_hdr *pfhdr;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv;
+ union recv_frame *prtnframe;
+ u16 ether_type = 0;
+ u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */
+ struct rx_pkt_attrib *pattrib;
+ __be16 be_tmp;
+
+_func_enter_;
+
+ pstapriv = &adapter->stapriv;
+ psta = rtw_get_stainfo(pstapriv, psta_addr);
+
+ auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
+
+ ptr = get_recvframe_data(precv_frame);
+ pfhdr = &precv_frame->u.hdr;
+ pattrib = &pfhdr->attrib;
+ psta_addr = pattrib->ta;
+
+ prtnframe = NULL;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:adapter->securitypriv.dot11AuthAlgrthm=%d\n", adapter->securitypriv.dot11AuthAlgrthm));
+
+ if (auth_alg == 2) {
+ if ((psta != NULL) && (psta->ieee8021x_blocked)) {
+ /* blocked */
+ /* only accept EAPOL frame */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked==1\n"));
+
+ prtnframe = precv_frame;
+
+ /* get ether_type */
+ ptr = ptr+pfhdr->attrib.hdrlen+pfhdr->attrib.iv_len+LLC_HEADER_SIZE;
+ memcpy(&be_tmp, ptr, 2);
+ ether_type = ntohs(be_tmp);
+
+ if (ether_type == eapol_type) {
+ prtnframe = precv_frame;
+ } else {
+ /* free this frame */
+ rtw_free_recvframe(precv_frame, &adapter->recvpriv.free_recv_queue);
+ prtnframe = NULL;
+ }
+ } else {
+ /* allowed */
+ /* check decryption status, and decrypt the frame if needed */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked==0\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:precv_frame->hdr.attrib.privacy=%x\n", precv_frame->u.hdr.attrib.privacy));
+
+ if (pattrib->bdecrypted == 0)
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:prxstat->decrypted=%x\n", pattrib->bdecrypted));
+
+ prtnframe = precv_frame;
+ /* check is the EAPOL frame or not (Rekey) */
+ if (ether_type == eapol_type) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("########portctrl:ether_type==0x888e\n"));
+ /* check Rekey */
+
+ prtnframe = precv_frame;
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:ether_type=0x%04x\n", ether_type));
+ }
+ }
+ } else {
+ prtnframe = precv_frame;
+ }
+
+_func_exit_;
+
+ return prtnframe;
+}
+
+static int recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache)
+{
+ int tid = precv_frame->u.hdr.attrib.priority;
+
+ u16 seq_ctrl = ((precv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
+ (precv_frame->u.hdr.attrib.frag_num & 0xf);
+
+_func_enter_;
+
+ if (tid > 15) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, (tid>15)! seq_ctrl=0x%x, tid=0x%x\n", seq_ctrl, tid));
+
+ return _FAIL;
+ }
+
+ if (1) {/* if (bretry) */
+ if (seq_ctrl == prxcache->tid_rxseq[tid]) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, seq_ctrl=0x%x, tid=0x%x, tid_rxseq=0x%x\n", seq_ctrl, tid, prxcache->tid_rxseq[tid]));
+
+ return _FAIL;
+ }
+ }
+
+ prxcache->tid_rxseq[tid] = seq_ctrl;
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame);
+void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned char pwrbit;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ psta = rtw_get_stainfo(pstapriv, pattrib->src);
+
+ pwrbit = GetPwrMgt(ptr);
+
+ if (psta) {
+ if (pwrbit) {
+ if (!(psta->state & WIFI_SLEEP_STATE))
+ stop_sta_xmit(padapter, psta);
+ } else {
+ if (psta->state & WIFI_SLEEP_STATE)
+ wakeup_sta_to_xmit(padapter, psta);
+ }
+ }
+
+#endif
+}
+
+static void process_wmmps_data(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ psta = rtw_get_stainfo(pstapriv, pattrib->src);
+
+ if (!psta)
+ return;
+
+ if (!psta->qos_option)
+ return;
+
+ if (!(psta->qos_info&0xf))
+ return;
+
+ if (psta->state&WIFI_SLEEP_STATE) {
+ u8 wmmps_ac = 0;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(1);
+ break;
+ }
+
+ if (wmmps_ac) {
+ if (psta->sleepq_ac_len > 0) {
+ /* process received triggered frame */
+ xmit_delivery_enabled_frames(padapter, psta);
+ } else {
+ /* issue one qos null frame with More data bit = 0 and the EOSP bit set (= 1) */
+ issue_qos_nulldata(padapter, psta->hwaddr, (u16)pattrib->priority, 0, 0);
+ }
+ }
+ }
+
+#endif
+}
+
+static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe, struct sta_info *sta)
+{
+ int sz;
+ struct sta_info *psta = NULL;
+ struct stainfo_stats *pstats = NULL;
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ sz = get_recvframe_len(prframe);
+ precvpriv->rx_bytes += sz;
+
+ padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
+
+ if ((!MacAddr_isBcst(pattrib->dst)) && (!IS_MCAST(pattrib->dst)))
+ padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
+
+ if (sta)
+ psta = sta;
+ else
+ psta = prframe->u.hdr.psta;
+
+ if (psta) {
+ pstats = &psta->sta_stats;
+
+ pstats->rx_data_pkts++;
+ pstats->rx_bytes += sz;
+ }
+}
+
+int sta2sta_data_frame(
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta
+);
+
+int sta2sta_data_frame(struct adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta)
+{
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ int ret = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *mybssid = get_bssid(pmlmepriv);
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ u8 *sta_addr = NULL;
+ int bmcast = IS_MCAST(pattrib->dst);
+
+_func_enter_;
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+ /* filter packets that SA is myself or multicast or broadcast */
+ if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if ((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ _rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ !_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->src;
+ } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ /* For Station mode, sa and bssid should always be BSSID, and DA is my mac-address */
+ if (!_rtw_memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("bssid!=TA under STATION_MODE; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+ sta_addr = pattrib->bssid;
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ if (bmcast) {
+ /* For AP mode, if DA == MCAST, then BSSID should be also MCAST */
+ if (!IS_MCAST(pattrib->bssid)) {
+ ret = _FAIL;
+ goto exit;
+ }
+ } else { /* not mc-frame */
+ /* For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID */
+ if (!_rtw_memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->src;
+ }
+ } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
+ memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+
+ sta_addr = mybssid;
+ } else {
+ ret = _FAIL;
+ }
+
+ if (bmcast)
+ *psta = rtw_get_bcmc_stainfo(adapter);
+ else
+ *psta = rtw_get_stainfo(pstapriv, sta_addr); /* get ap_info */
+
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under sta2sta_data_frame ; drop pkt\n"));
+ if (adapter->registrypriv.mp_mode == 1) {
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)
+ adapter->mppriv.rx_pktloss++;
+ }
+ ret = _FAIL;
+ goto exit;
+ }
+
+exit:
+_func_exit_;
+ return ret;
+}
+
+static int ap2sta_data_frame (
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta)
+{
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ int ret = _SUCCESS;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *mybssid = get_bssid(pmlmepriv);
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ int bmcast = IS_MCAST(pattrib->dst);
+
+_func_enter_;
+
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true ||
+ check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
+ /* filter packets that SA is myself or multicast or broadcast */
+ if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* da should be for me */
+ if ((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ (" ap2sta_data_frame: compare DA fail; DA=%pM\n", (pattrib->dst)));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* check BSSID */
+ if (_rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ _rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ (!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ (" ap2sta_data_frame: compare BSSID fail ; BSSID=%pM\n", (pattrib->bssid)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("mybssid=%pM\n", (mybssid)));
+
+ if (!bmcast) {
+ DBG_88E("issue_deauth to the nonassociated ap=%pM for the reason(7)\n", (pattrib->bssid));
+ issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ }
+
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (bmcast)
+ *psta = rtw_get_bcmc_stainfo(adapter);
+ else
+ *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get ap_info */
+
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("ap2sta: can't get psta under STATION_MODE ; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) { */
+ /* */
+
+ if (GetFrameSubType(ptr) & BIT(6)) {
+ /* No data, will not indicate to upper layer, temporily count it here */
+ count_rx_stats(adapter, precv_frame, *psta);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ } else if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true)) {
+ memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+
+ /* */
+ memcpy(pattrib->bssid, mybssid, ETH_ALEN);
+
+ *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under MP_MODE ; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ /* Special case */
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ } else {
+ if (_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && (!bmcast)) {
+ *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
+ if (*psta == NULL) {
+ DBG_88E("issue_deauth to the ap =%pM for the reason(7)\n", (pattrib->bssid));
+
+ issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ }
+ }
+
+ ret = _FAIL;
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+static int sta2ap_data_frame(struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta)
+{
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ unsigned char *mybssid = get_bssid(pmlmepriv);
+ int ret = _SUCCESS;
+
+_func_enter_;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ /* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */
+ if (!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ *psta = rtw_get_stainfo(pstapriv, pattrib->src);
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under AP_MODE; drop pkt\n"));
+ DBG_88E("issue_deauth to sta=%pM for the reason(7)\n", (pattrib->src));
+
+ issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+ process_pwrbit_data(adapter, precv_frame);
+
+ if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
+ process_wmmps_data(adapter, precv_frame);
+ }
+
+ if (GetFrameSubType(ptr) & BIT(6)) {
+ /* No data, will not indicate to upper layer, temporily count it here */
+ count_rx_stats(adapter, precv_frame, *psta);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ } else {
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ if (!_rtw_memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ DBG_88E("issue_deauth to sta=%pM for the reason(7)\n", (pattrib->src));
+ issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+static int validate_recv_ctrl_frame(struct adapter *padapter,
+ union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ /* uint len = precv_frame->u.hdr.len; */
+
+ if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
+ return _FAIL;
+
+ /* receive the frames that ra(a1) is my address */
+ if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
+ return _FAIL;
+
+ /* only handle ps-poll */
+ if (GetFrameSubType(pframe) == WIFI_PSPOLL) {
+ u16 aid;
+ u8 wmmps_ac = 0;
+ struct sta_info *psta = NULL;
+
+ aid = GetAid(pframe);
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+
+ if ((psta == NULL) || (psta->aid != aid))
+ return _FAIL;
+
+ /* for rx pkt statistics */
+ psta->sta_stats.rx_ctrl_pkts++;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(0);
+ break;
+ }
+
+ if (wmmps_ac)
+ return _FAIL;
+
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ DBG_88E("%s alive check-rx ps-poll\n", __func__);
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
+
+ if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
+ unsigned long irqL;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+
+ _enter_critical_bh(&psta->sleep_q.lock, &irqL);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ if ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == false) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ psta->sleepq_len--;
+
+ if (psta->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irqL);
+ if (rtw_hal_xmit(padapter, pxmitframe) == true)
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ _enter_critical_bh(&psta->sleep_q.lock, &irqL);
+
+ if (psta->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ } else {
+ if (pstapriv->tim_bitmap&BIT(psta->aid)) {
+ if (psta->sleepq_len == 0) {
+ DBG_88E("no buffered packets to xmit\n");
+
+ /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
+ issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
+ } else {
+ DBG_88E("error!psta->sleepq_len=%d\n", psta->sleepq_len);
+ psta->sleepq_len = 0;
+ }
+
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irqL);
+ }
+ }
+
+#endif
+
+ return _FAIL;
+}
+
+union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame);
+
+static int validate_recv_mgnt_frame(struct adapter *padapter,
+ union recv_frame *precv_frame)
+{
+ struct sta_info *psta;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("+validate_recv_mgnt_frame\n"));
+
+ precv_frame = recvframe_chk_defrag(padapter, precv_frame);
+ if (precv_frame == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("%s: fragment packet\n", __func__));
+ return _SUCCESS;
+ }
+
+ /* for rx pkt statistics */
+ psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->u.hdr.rx_data));
+ if (psta) {
+ psta->sta_stats.rx_mgnt_pkts++;
+ if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_BEACON) {
+ psta->sta_stats.rx_beacon_pkts++;
+ } else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBEREQ) {
+ psta->sta_stats.rx_probereq_pkts++;
+ } else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBERSP) {
+ if (_rtw_memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->u.hdr.rx_data), ETH_ALEN) == true)
+ psta->sta_stats.rx_probersp_pkts++;
+ else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)) ||
+ is_multicast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)))
+ psta->sta_stats.rx_probersp_bm_pkts++;
+ else
+ psta->sta_stats.rx_probersp_uo_pkts++;
+ }
+ }
+
+ mgt_dispatcher(padapter, precv_frame);
+
+ return _SUCCESS;
+}
+
+static int validate_recv_data_frame(struct adapter *adapter,
+ union recv_frame *precv_frame)
+{
+ u8 bretry;
+ u8 *psa, *pda, *pbssid;
+ struct sta_info *psta = NULL;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ int ret = _SUCCESS;
+
+_func_enter_;
+
+ bretry = GetRetry(ptr);
+ pda = get_da(ptr);
+ psa = get_sa(ptr);
+ pbssid = get_hdr_bssid(ptr);
+
+ if (pbssid == NULL) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ memcpy(pattrib->dst, pda, ETH_ALEN);
+ memcpy(pattrib->src, psa, ETH_ALEN);
+
+ memcpy(pattrib->bssid, pbssid, ETH_ALEN);
+
+ switch (pattrib->to_fr_ds) {
+ case 0:
+ memcpy(pattrib->ra, pda, ETH_ALEN);
+ memcpy(pattrib->ta, psa, ETH_ALEN);
+ ret = sta2sta_data_frame(adapter, precv_frame, &psta);
+ break;
+ case 1:
+ memcpy(pattrib->ra, pda, ETH_ALEN);
+ memcpy(pattrib->ta, pbssid, ETH_ALEN);
+ ret = ap2sta_data_frame(adapter, precv_frame, &psta);
+ break;
+ case 2:
+ memcpy(pattrib->ra, pbssid, ETH_ALEN);
+ memcpy(pattrib->ta, psa, ETH_ALEN);
+ ret = sta2ap_data_frame(adapter, precv_frame, &psta);
+ break;
+ case 3:
+ memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
+ ret = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" case 3\n"));
+ break;
+ default:
+ ret = _FAIL;
+ break;
+ }
+
+ if (ret == _FAIL) {
+ goto exit;
+ } else if (ret == RTW_RX_HANDLED) {
+ goto exit;
+ }
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" after to_fr_ds_chk; psta==NULL\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* psta->rssi = prxcmd->rssi; */
+ /* psta->signal_quality = prxcmd->sq; */
+ precv_frame->u.hdr.psta = psta;
+
+ pattrib->amsdu = 0;
+ pattrib->ack_policy = 0;
+ /* parsing QC field */
+ if (pattrib->qos == 1) {
+ pattrib->priority = GetPriority((ptr + 24));
+ pattrib->ack_policy = GetAckpolicy((ptr + 24));
+ pattrib->amsdu = GetAMsdu((ptr + 24));
+ pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 32 : 26;
+
+ if (pattrib->priority != 0 && pattrib->priority != 3)
+ adapter->recvpriv.bIsAnyNonBEPkts = true;
+ } else {
+ pattrib->priority = 0;
+ pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 30 : 24;
+ }
+
+ if (pattrib->order)/* HT-CTRL 11n */
+ pattrib->hdrlen += 4;
+
+ precv_frame->u.hdr.preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
+
+ /* decache, drop duplicate recv packets */
+ if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("decache : drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (pattrib->privacy) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra)));
+
+ GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, IS_MCAST(pattrib->ra));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt));
+
+ SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
+ } else {
+ pattrib->encrypt = 0;
+ pattrib->iv_len = 0;
+ pattrib->icv_len = 0;
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+static int validate_recv_frame(struct adapter *adapter, union recv_frame *precv_frame)
+{
+ /* shall check frame subtype, to / from ds, da, bssid */
+
+ /* then call check if rx seq/frag. duplicated. */
+
+ u8 type;
+ u8 subtype;
+ int retval = _SUCCESS;
+ u8 bDumpRxPkt;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ u8 ver = (unsigned char) (*ptr)&0x3;
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+
+_func_enter_;
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter));
+ if (ch_set_idx >= 0)
+ pmlmeext->channel_set[ch_set_idx].rx_count++;
+ }
+
+ /* add version chk */
+ if (ver != 0) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail! (ver!=0)\n"));
+ retval = _FAIL;
+ goto exit;
+ }
+
+ type = GetFrameType(ptr);
+ subtype = GetFrameSubType(ptr); /* bit(7)~bit(2) */
+
+ pattrib->to_fr_ds = get_tofr_ds(ptr);
+
+ pattrib->frag_num = GetFragNum(ptr);
+ pattrib->seq_num = GetSequence(ptr);
+
+ pattrib->pw_save = GetPwrMgt(ptr);
+ pattrib->mfrag = GetMFrag(ptr);
+ pattrib->mdata = GetMData(ptr);
+ pattrib->privacy = GetPrivacy(ptr);
+ pattrib->order = GetOrder(ptr);
+
+ /* Dump rx packets */
+ rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
+ if (bDumpRxPkt == 1) {/* dump all rx packets */
+ int i;
+ DBG_88E("#############################\n");
+
+ for (i = 0; i < 64; i = i+8)
+ DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
+ *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
+ DBG_88E("#############################\n");
+ } else if (bDumpRxPkt == 2) {
+ if (type == WIFI_MGT_TYPE) {
+ int i;
+ DBG_88E("#############################\n");
+
+ for (i = 0; i < 64; i = i+8)
+ DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
+ *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
+ DBG_88E("#############################\n");
+ }
+ } else if (bDumpRxPkt == 3) {
+ if (type == WIFI_DATA_TYPE) {
+ int i;
+ DBG_88E("#############################\n");
+
+ for (i = 0; i < 64; i = i+8)
+ DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
+ *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
+ DBG_88E("#############################\n");
+ }
+ }
+ switch (type) {
+ case WIFI_MGT_TYPE: /* mgnt */
+ retval = validate_recv_mgnt_frame(adapter, precv_frame);
+ if (retval == _FAIL)
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_mgnt_frame fail\n"));
+ retval = _FAIL; /* only data frame return _SUCCESS */
+ break;
+ case WIFI_CTRL_TYPE: /* ctrl */
+ retval = validate_recv_ctrl_frame(adapter, precv_frame);
+ if (retval == _FAIL)
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_ctrl_frame fail\n"));
+ retval = _FAIL; /* only data frame return _SUCCESS */
+ break;
+ case WIFI_DATA_TYPE: /* data */
+ rtw_led_control(adapter, LED_CTL_RX);
+ pattrib->qos = (subtype & BIT(7)) ? 1 : 0;
+ retval = validate_recv_data_frame(adapter, precv_frame);
+ if (retval == _FAIL) {
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ precvpriv->rx_drop++;
+ }
+ break;
+ default:
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail! type= 0x%x\n", type));
+ retval = _FAIL;
+ break;
+ }
+
+exit:
+
+_func_exit_;
+
+ return retval;
+}
+
+/* remove the wlanhdr and add the eth_hdr */
+
+static int wlanhdr_to_ethhdr (union recv_frame *precvframe)
+{
+ int rmv_len;
+ u16 eth_type, len;
+ __be16 be_tmp;
+ u8 bsnaphdr;
+ u8 *psnap_type;
+ struct ieee80211_snap_hdr *psnap;
+
+ int ret = _SUCCESS;
+ struct adapter *adapter = precvframe->u.hdr.adapter;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ u8 *ptr = get_recvframe_data(precvframe); /* point to frame_ctrl field */
+ struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
+
+_func_enter_;
+
+ if (pattrib->encrypt)
+ recvframe_pull_tail(precvframe, pattrib->icv_len);
+
+ psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
+ psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
+ /* convert hdr + possible LLC headers into Ethernet header */
+ if ((_rtw_memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
+ (_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
+ (_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2) == false)) ||
+ _rtw_memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
+ bsnaphdr = true;
+ } else {
+ /* Leave Ethernet header part of hdr and full payload */
+ bsnaphdr = false;
+ }
+
+ rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
+ len = precvframe->u.hdr.len - rmv_len;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len));
+
+ memcpy(&be_tmp, ptr+rmv_len, 2);
+ eth_type = ntohs(be_tmp); /* pattrib->ether_type */
+ pattrib->eth_type = eth_type;
+
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE))) {
+ ptr += rmv_len;
+ *ptr = 0x87;
+ *(ptr+1) = 0x12;
+
+ eth_type = 0x8712;
+ /* append rx status for mp test packets */
+ ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr)+2)-24);
+ memcpy(ptr, get_rxmem(precvframe), 24);
+ ptr += 24;
+ } else {
+ ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0)));
+ }
+
+ memcpy(ptr, pattrib->dst, ETH_ALEN);
+ memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
+
+ if (!bsnaphdr) {
+ be_tmp = htons(len);
+ memcpy(ptr+12, &be_tmp, 2);
+ }
+
+_func_exit_;
+ return ret;
+}
+
+/* perform defrag */
+static union recv_frame *recvframe_defrag(struct adapter *adapter, struct __queue *defrag_q)
+{
+ struct list_head *plist, *phead;
+ u8 wlanhdr_offset;
+ u8 curfragnum;
+ struct recv_frame_hdr *pfhdr, *pnfhdr;
+ union recv_frame *prframe, *pnextrframe;
+ struct __queue *pfree_recv_queue;
+
+_func_enter_;
+
+ curfragnum = 0;
+ pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
+
+ phead = get_list_head(defrag_q);
+ plist = get_next(phead);
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pfhdr = &prframe->u.hdr;
+ rtw_list_delete(&(prframe->u.list));
+
+ if (curfragnum != pfhdr->attrib.frag_num) {
+ /* the first fragment number must be 0 */
+ /* free the whole queue */
+ rtw_free_recvframe(prframe, pfree_recv_queue);
+ rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
+
+ return NULL;
+ }
+
+ curfragnum++;
+
+ plist = get_list_head(defrag_q);
+
+ plist = get_next(plist);
+
+ while (rtw_end_of_queue_search(phead, plist) == false) {
+ pnextrframe = LIST_CONTAINOR(plist, union recv_frame , u);
+ pnfhdr = &pnextrframe->u.hdr;
+
+ /* check the fragment sequence (2nd ~n fragment frame) */
+
+ if (curfragnum != pnfhdr->attrib.frag_num) {
+ /* the fragment number must be increasing (after decache) */
+ /* release the defrag_q & prframe */
+ rtw_free_recvframe(prframe, pfree_recv_queue);
+ rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
+ return NULL;
+ }
+
+ curfragnum++;
+
+ /* copy the 2nd~n fragment frame's payload to the first fragment */
+ /* get the 2nd~last fragment frame's payload */
+
+ wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
+
+ recvframe_pull(pnextrframe, wlanhdr_offset);
+
+ /* append to first fragment frame's tail (if privacy frame, pull the ICV) */
+ recvframe_pull_tail(prframe, pfhdr->attrib.icv_len);
+
+ /* memcpy */
+ memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len);
+
+ recvframe_put(prframe, pnfhdr->len);
+
+ pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
+ plist = get_next(plist);
+ };
+
+ /* free the defrag_q queue and return the prframe */
+ rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Performance defrag!!!!!\n"));
+
+_func_exit_;
+
+ return prframe;
+}
+
+/* check if need to defrag, if needed queue the frame to defrag_q */
+union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u8 ismfrag;
+ u8 fragnum;
+ u8 *psta_addr;
+ struct recv_frame_hdr *pfhdr;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv;
+ struct list_head *phead;
+ union recv_frame *prtnframe = NULL;
+ struct __queue *pfree_recv_queue, *pdefrag_q;
+
+_func_enter_;
+
+ pstapriv = &padapter->stapriv;
+
+ pfhdr = &precv_frame->u.hdr;
+
+ pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ /* need to define struct of wlan header frame ctrl */
+ ismfrag = pfhdr->attrib.mfrag;
+ fragnum = pfhdr->attrib.frag_num;
+
+ psta_addr = pfhdr->attrib.ta;
+ psta = rtw_get_stainfo(pstapriv, psta_addr);
+ if (psta == NULL) {
+ u8 type = GetFrameType(pfhdr->rx_data);
+ if (type != WIFI_DATA_TYPE) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ pdefrag_q = &psta->sta_recvpriv.defrag_q;
+ } else {
+ pdefrag_q = NULL;
+ }
+ } else {
+ pdefrag_q = &psta->sta_recvpriv.defrag_q;
+ }
+
+ if ((ismfrag == 0) && (fragnum == 0))
+ prtnframe = precv_frame;/* isn't a fragment frame */
+
+ if (ismfrag == 1) {
+ /* 0~(n-1) fragment frame */
+ /* enqueue to defraf_g */
+ if (pdefrag_q != NULL) {
+ if (fragnum == 0) {
+ /* the first fragment */
+ if (_rtw_queue_empty(pdefrag_q) == false) {
+ /* free current defrag_q */
+ rtw_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
+ }
+ }
+
+ /* Then enqueue the 0~(n-1) fragment into the defrag_q */
+
+ phead = get_list_head(pdefrag_q);
+ rtw_list_insert_tail(&pfhdr->list, phead);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Enqueuq: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
+
+ prtnframe = NULL;
+ } else {
+ /* can't find this ta's defrag_queue, so free this recv_frame */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+ prtnframe = NULL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("Free because pdefrag_q==NULL: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
+ }
+ }
+
+ if ((ismfrag == 0) && (fragnum != 0)) {
+ /* the last fragment frame */
+ /* enqueue the last fragment */
+ if (pdefrag_q != NULL) {
+ phead = get_list_head(pdefrag_q);
+ rtw_list_insert_tail(&pfhdr->list, phead);
+
+ /* call recvframe_defrag to defrag */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("defrag: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
+ precv_frame = recvframe_defrag(padapter, pdefrag_q);
+ prtnframe = precv_frame;
+ } else {
+ /* can't find this ta's defrag_queue, so free this recv_frame */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+ prtnframe = NULL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("Free because pdefrag_q==NULL: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
+ }
+ }
+
+ if ((prtnframe != NULL) && (prtnframe->u.hdr.attrib.privacy)) {
+ /* after defrag we must check tkip mic code */
+ if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic(padapter, prtnframe)==_FAIL\n"));
+ rtw_free_recvframe(prtnframe, pfree_recv_queue);
+ prtnframe = NULL;
+ }
+ }
+
+_func_exit_;
+
+ return prtnframe;
+}
+
+static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
+{
+ int a_len, padding_len;
+ u16 eth_type, nSubframe_Length;
+ u8 nr_subframes, i;
+ unsigned char *pdata;
+ struct rx_pkt_attrib *pattrib;
+ unsigned char *data_ptr;
+ struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
+ int ret = _SUCCESS;
+ nr_subframes = 0;
+
+ pattrib = &prframe->u.hdr.attrib;
+
+ recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
+
+ if (prframe->u.hdr.attrib.iv_len > 0)
+ recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
+
+ a_len = prframe->u.hdr.len;
+
+ pdata = prframe->u.hdr.rx_data;
+
+ while (a_len > ETH_HLEN) {
+ /* Offset 12 denote 2 mac address */
+ nSubframe_Length = RTW_GET_BE16(pdata + 12);
+
+ if (a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
+ DBG_88E("nRemain_Length is %d and nSubframe_Length is : %d\n", a_len, nSubframe_Length);
+ goto exit;
+ }
+
+ /* move the data point to data content */
+ pdata += ETH_HLEN;
+ a_len -= ETH_HLEN;
+
+ /* Allocate new skb for releasing to upper layer */
+ sub_skb = dev_alloc_skb(nSubframe_Length + 12);
+ if (sub_skb) {
+ skb_reserve(sub_skb, 12);
+ data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
+ memcpy(data_ptr, pdata, nSubframe_Length);
+ } else {
+ sub_skb = skb_clone(prframe->u.hdr.pkt, GFP_ATOMIC);
+ if (sub_skb) {
+ sub_skb->data = pdata;
+ sub_skb->len = nSubframe_Length;
+ skb_set_tail_pointer(sub_skb, nSubframe_Length);
+ } else {
+ DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes);
+ break;
+ }
+ }
+
+ subframes[nr_subframes++] = sub_skb;
+
+ if (nr_subframes >= MAX_SUBFRAME_COUNT) {
+ DBG_88E("ParseSubframe(): Too many Subframes! Packets dropped!\n");
+ break;
+ }
+
+ pdata += nSubframe_Length;
+ a_len -= nSubframe_Length;
+ if (a_len != 0) {
+ padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4-1));
+ if (padding_len == 4) {
+ padding_len = 0;
+ }
+
+ if (a_len < padding_len) {
+ goto exit;
+ }
+ pdata += padding_len;
+ a_len -= padding_len;
+ }
+ }
+
+ for (i = 0; i < nr_subframes; i++) {
+ sub_skb = subframes[i];
+ /* convert hdr + possible LLC headers into Ethernet header */
+ eth_type = RTW_GET_BE16(&sub_skb->data[6]);
+ if (sub_skb->len >= 8 &&
+ ((_rtw_memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
+ eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
+ _rtw_memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
+ skb_pull(sub_skb, SNAP_SIZE);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
+ } else {
+ __be16 len;
+ /* Leave Ethernet header part of hdr and full payload */
+ len = htons(sub_skb->len);
+ memcpy(skb_push(sub_skb, 2), &len, 2);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
+ }
+
+ /* Indicat the packets to upper layer */
+ if (sub_skb) {
+ /* Insert NAT2.5 RX here! */
+ sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev);
+ sub_skb->dev = padapter->pnetdev;
+
+ sub_skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx(sub_skb);
+ }
+ }
+
+exit:
+
+ prframe->u.hdr.len = 0;
+ rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */
+
+ return ret;
+}
+
+static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
+{
+ u8 wsize = preorder_ctrl->wsize_b;
+ u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) & 0xFFF;/* 4096; */
+
+ /* Rx Reorder initialize condition. */
+ if (preorder_ctrl->indicate_seq == 0xFFFF)
+ preorder_ctrl->indicate_seq = seq_num;
+
+ /* Drop out the packet which SeqNum is smaller than WinStart */
+ if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
+ return false;
+
+ /* */
+ /* Sliding window manipulation. Conditions includes: */
+ /* 1. Incoming SeqNum is equal to WinStart =>Window shift 1 */
+ /* 2. Incoming SeqNum is larger than the WinEnd => Window shift N */
+ /* */
+ if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) {
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
+ } else if (SN_LESS(wend, seq_num)) {
+ if (seq_num >= (wsize - 1))
+ preorder_ctrl->indicate_seq = seq_num + 1 - wsize;
+ else
+ preorder_ctrl->indicate_seq = 0xFFF - (wsize - (seq_num + 1)) + 1;
+ }
+
+ return true;
+}
+
+int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe);
+int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+ struct list_head *phead, *plist;
+ union recv_frame *pnextrframe;
+ struct rx_pkt_attrib *pnextattrib;
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = get_next(phead);
+
+ while (rtw_end_of_queue_search(phead, plist) == false) {
+ pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextattrib = &pnextrframe->u.hdr.attrib;
+
+ if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
+ plist = get_next(plist);
+ else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
+ return false;
+ else
+ break;
+ }
+
+ rtw_list_delete(&(prframe->u.hdr.list));
+
+ rtw_list_insert_tail(&(prframe->u.hdr.list), plist);
+ return true;
+}
+
+static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
+{
+ struct list_head *phead, *plist;
+ union recv_frame *prframe;
+ struct rx_pkt_attrib *pattrib;
+ int bPktInBuf = false;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = get_next(phead);
+
+ /* Handling some condition for forced indicate case. */
+ if (bforced) {
+ if (rtw_is_list_empty(phead))
+ return true;
+
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pattrib = &prframe->u.hdr.attrib;
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ }
+
+ /* Prepare indication list and indication. */
+ /* Check if there is any packet need indicate. */
+ while (!rtw_is_list_empty(phead)) {
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pattrib = &prframe->u.hdr.attrib;
+
+ if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_indicatepkts_in_order: indicate=%d seq=%d amsdu=%d\n",
+ preorder_ctrl->indicate_seq, pattrib->seq_num, pattrib->amsdu));
+ plist = get_next(plist);
+ rtw_list_delete(&(prframe->u.hdr.list));
+
+ if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num))
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
+
+ /* Set this as a lock to make sure that only one thread is indicating packet. */
+
+ /* indicate this recv_frame */
+ if (!pattrib->amsdu) {
+ if ((!padapter->bDriverStopped) &&
+ (!padapter->bSurpriseRemoved))
+ rtw_recv_indicatepkt(padapter, prframe);/* indicate this recv_frame */
+ } else if (pattrib->amsdu == 1) {
+ if (amsdu_to_msdu(padapter, prframe) != _SUCCESS)
+ rtw_free_recvframe(prframe, &precvpriv->free_recv_queue);
+ } else {
+ /* error condition; */
+ }
+
+ /* Update local variables. */
+ bPktInBuf = false;
+ } else {
+ bPktInBuf = true;
+ break;
+ }
+ }
+ return bPktInBuf;
+}
+
+static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *prframe)
+{
+ unsigned long irql;
+ int retval = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ if (!pattrib->amsdu) {
+ /* s1. */
+ wlanhdr_to_ethhdr(prframe);
+
+ if ((pattrib->qos != 1) || (pattrib->eth_type == 0x0806) ||
+ (pattrib->ack_policy != 0)) {
+ if ((!padapter->bDriverStopped) &&
+ (!padapter->bSurpriseRemoved)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ recv_indicatepkt_reorder -recv_func recv_indicatepkt\n"));
+
+ rtw_recv_indicatepkt(padapter, prframe);
+ return _SUCCESS;
+ }
+
+ return _FAIL;
+ }
+
+ if (!preorder_ctrl->enable) {
+ /* indicate this recv_frame */
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ rtw_recv_indicatepkt(padapter, prframe);
+
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
+ return _SUCCESS;
+ }
+ } else if (pattrib->amsdu == 1) { /* temp filter -> means didn't support A-MSDUs in a A-MPDU */
+ if (!preorder_ctrl->enable) {
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ retval = amsdu_to_msdu(padapter, prframe);
+
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
+ return retval;
+ }
+ }
+
+ _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_indicatepkt_reorder: indicate=%d seq=%d\n",
+ preorder_ctrl->indicate_seq, pattrib->seq_num));
+
+ /* s2. check if winstart_b(indicate_seq) needs to been updated */
+ if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) {
+ rtw_recv_indicatepkt(padapter, prframe);
+
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ goto _success_exit;
+ }
+
+ /* s3. Insert all packet into Reorder Queue to maintain its ordering. */
+ if (!enqueue_reorder_recvframe(preorder_ctrl, prframe))
+ goto _err_exit;
+
+ /* s4. */
+ /* Indication process. */
+ /* After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets */
+ /* with the SeqNum smaller than latest WinStart and buffer other packets. */
+ /* */
+ /* For Rx Reorder condition: */
+ /* 1. All packets with SeqNum smaller than WinStart => Indicate */
+ /* 2. All packets with SeqNum larger than or equal to WinStart => Buffer it. */
+ /* */
+
+ /* recv_indicatepkts_in_order(padapter, preorder_ctrl, true); */
+ if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false)) {
+ _set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ } else {
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ }
+
+_success_exit:
+
+ return _SUCCESS;
+
+_err_exit:
+
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ return _FAIL;
+}
+
+void rtw_reordering_ctrl_timeout_handler(void *pcontext)
+{
+ unsigned long irql;
+ struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
+ struct adapter *padapter = preorder_ctrl->padapter;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ return;
+
+ _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true)
+ _set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
+
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+}
+
+static int process_recv_indicatepkts(struct adapter *padapter, union recv_frame *prframe)
+{
+ int retval = _SUCCESS;
+ /* struct recv_priv *precvpriv = &padapter->recvpriv; */
+ /* struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib; */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (phtpriv->ht_option) { /* B/G/N Mode */
+ /* prframe->u.hdr.preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */
+
+ if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
+ /* including perform A-MPDU Rx Ordering Buffer Control */
+ if ((!padapter->bDriverStopped) &&
+ (!padapter->bSurpriseRemoved)) {
+ retval = _FAIL;
+ return retval;
+ }
+ }
+ } else { /* B/G mode */
+ retval = wlanhdr_to_ethhdr (prframe);
+ if (retval != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("wlanhdr_to_ethhdr: drop pkt\n"));
+ return retval;
+ }
+
+ if ((!padapter->bDriverStopped) &&
+ (!padapter->bSurpriseRemoved)) {
+ /* indicate this recv_frame */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ process_recv_indicatepkts- recv_func recv_indicatepkt\n"));
+ rtw_recv_indicatepkt(padapter, prframe);
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ process_recv_indicatepkts- recv_func free_indicatepkt\n"));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ retval = _FAIL;
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+static int recv_func_prehandle(struct adapter *padapter, union recv_frame *rframe)
+{
+ int ret = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = &rframe->u.hdr.attrib;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (padapter->registrypriv.mp_mode == 1) {
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)) { /* padapter->mppriv.check_mp_pkt == 0)) */
+ if (pattrib->crc_err == 1)
+ padapter->mppriv.rx_crcerrpktcount++;
+ else
+ padapter->mppriv.rx_pktcount++;
+
+ if (check_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE) == false) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_alert_, ("MP - Not in loopback mode , drop pkt\n"));
+ ret = _FAIL;
+ rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
+ goto exit;
+ }
+ }
+ }
+
+ /* check the frame crtl field and decache */
+ ret = validate_recv_frame(padapter, rframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
+ rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int recv_func_posthandle(struct adapter *padapter, union recv_frame *prframe)
+{
+ int ret = _SUCCESS;
+ union recv_frame *orig_prframe = prframe;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ /* DATA FRAME */
+ rtw_led_control(padapter, LED_CTL_RX);
+
+ prframe = decryptor(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("decryptor: drop pkt\n"));
+ ret = _FAIL;
+ goto _recv_data_drop;
+ }
+
+ prframe = recvframe_chk_defrag(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chk_defrag: drop pkt\n"));
+ goto _recv_data_drop;
+ }
+
+ prframe = portctrl(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("portctrl: drop pkt\n"));
+ ret = _FAIL;
+ goto _recv_data_drop;
+ }
+
+ count_rx_stats(padapter, prframe, NULL);
+
+ ret = process_recv_indicatepkts(padapter, prframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recv_func: process_recv_indicatepkts fail!\n"));
+ rtw_free_recvframe(orig_prframe, pfree_recv_queue);/* free this recv_frame */
+ goto _recv_data_drop;
+ }
+ return ret;
+
+_recv_data_drop:
+ precvpriv->rx_drop++;
+ return ret;
+}
+
+static int recv_func(struct adapter *padapter, union recv_frame *rframe)
+{
+ int ret;
+ struct rx_pkt_attrib *prxattrib = &rframe->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+
+ /* check if need to handle uc_swdec_pending_queue*/
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
+ union recv_frame *pending_frame;
+
+ while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
+ if (recv_func_posthandle(padapter, pending_frame) == _SUCCESS)
+ DBG_88E("%s: dequeue uc_swdec_pending_queue\n", __func__);
+ }
+ }
+
+ ret = recv_func_prehandle(padapter, rframe);
+
+ if (ret == _SUCCESS) {
+ /* check if need to enqueue into uc_swdec_pending_queue*/
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
+ !IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 &&
+ (prxattrib->bdecrypted == 0 || psecuritypriv->sw_decrypt) &&
+ !is_wep_enc(psecuritypriv->dot11PrivacyAlgrthm) &&
+ !psecuritypriv->busetkipkey) {
+ rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
+ DBG_88E("%s: no key, enqueue uc_swdec_pending_queue\n", __func__);
+ goto exit;
+ }
+
+ ret = recv_func_posthandle(padapter, rframe);
+ }
+
+exit:
+ return ret;
+}
+
+s32 rtw_recv_entry(union recv_frame *precvframe)
+{
+ struct adapter *padapter;
+ struct recv_priv *precvpriv;
+ s32 ret = _SUCCESS;
+
+_func_enter_;
+
+ padapter = precvframe->u.hdr.adapter;
+
+ precvpriv = &padapter->recvpriv;
+
+ ret = recv_func(padapter, precvframe);
+ if (ret == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("rtw_recv_entry: recv_func return fail!!!\n"));
+ goto _recv_entry_drop;
+ }
+
+ precvpriv->rx_pkts++;
+
+_func_exit_;
+
+ return ret;
+
+_recv_entry_drop:
+
+ if (padapter->registrypriv.mp_mode == 1)
+ padapter->mppriv.rx_pktloss = precvpriv->rx_drop;
+
+_func_exit_;
+
+ return ret;
+}
+
+void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct recv_priv *recvpriv = &adapter->recvpriv;
+
+ u32 tmp_s, tmp_q;
+ u8 avg_signal_strength = 0;
+ u8 avg_signal_qual = 0;
+ u8 _alpha = 3; /* this value is based on converging_constant = 5000 and sampling_interval = 1000 */
+
+ if (adapter->recvpriv.is_signal_dbg) {
+ /* update the user specific value, signal_strength_dbg, to signal_strength, rssi */
+ adapter->recvpriv.signal_strength = adapter->recvpriv.signal_strength_dbg;
+ adapter->recvpriv.rssi = (s8)translate_percentage_to_dbm((u8)adapter->recvpriv.signal_strength_dbg);
+ } else {
+ if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */
+ avg_signal_strength = recvpriv->signal_strength_data.avg_val;
+ /* after avg_vals are accquired, we can re-stat the signal values */
+ recvpriv->signal_strength_data.update_req = 1;
+ }
+
+ if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */
+ avg_signal_qual = recvpriv->signal_qual_data.avg_val;
+ /* after avg_vals are accquired, we can re-stat the signal values */
+ recvpriv->signal_qual_data.update_req = 1;
+ }
+
+ /* update value of signal_strength, rssi, signal_qual */
+ if (check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY) == false) {
+ tmp_s = (avg_signal_strength+(_alpha-1)*recvpriv->signal_strength);
+ if (tmp_s % _alpha)
+ tmp_s = tmp_s/_alpha + 1;
+ else
+ tmp_s = tmp_s/_alpha;
+ if (tmp_s > 100)
+ tmp_s = 100;
+
+ tmp_q = (avg_signal_qual+(_alpha-1)*recvpriv->signal_qual);
+ if (tmp_q % _alpha)
+ tmp_q = tmp_q/_alpha + 1;
+ else
+ tmp_q = tmp_q/_alpha;
+ if (tmp_q > 100)
+ tmp_q = 100;
+
+ recvpriv->signal_strength = tmp_s;
+ recvpriv->rssi = (s8)translate_percentage_to_dbm(tmp_s);
+ recvpriv->signal_qual = tmp_q;
+ }
+ }
+ rtw_set_signal_stat_timer(recvpriv);
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c
new file mode 100644
index 0000000..1170dd0
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_rf.c
@@ -0,0 +1,89 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_RF_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+
+
+struct ch_freq {
+ u32 channel;
+ u32 frequency;
+};
+
+static struct ch_freq ch_freq_map[] = {
+ {1, 2412}, {2, 2417}, {3, 2422}, {4, 2427}, {5, 2432},
+ {6, 2437}, {7, 2442}, {8, 2447}, {9, 2452}, {10, 2457},
+ {11, 2462}, {12, 2467}, {13, 2472}, {14, 2484},
+ /* UNII */
+ {36, 5180}, {40, 5200}, {44, 5220}, {48, 5240}, {52, 5260},
+ {56, 5280}, {60, 5300}, {64, 5320}, {149, 5745}, {153, 5765},
+ {157, 5785}, {161, 5805}, {165, 5825}, {167, 5835}, {169, 5845},
+ {171, 5855}, {173, 5865},
+ /* HiperLAN2 */
+ {100, 5500}, {104, 5520}, {108, 5540}, {112, 5560}, {116, 5580},
+ {120, 5600}, {124, 5620}, {128, 5640}, {132, 5660}, {136, 5680},
+ {140, 5700},
+ /* Japan MMAC */
+ {34, 5170}, {38, 5190}, {42, 5210}, {46, 5230},
+ /* Japan */
+ {184, 4920}, {188, 4940}, {192, 4960}, {196, 4980},
+ {208, 5040},/* Japan, means J08 */
+ {212, 5060},/* Japan, means J12 */
+ {216, 5080},/* Japan, means J16 */
+};
+
+static int ch_freq_map_num = (sizeof(ch_freq_map) / sizeof(struct ch_freq));
+
+u32 rtw_ch2freq(u32 channel)
+{
+ u8 i;
+ u32 freq = 0;
+
+ for (i = 0; i < ch_freq_map_num; i++) {
+ if (channel == ch_freq_map[i].channel) {
+ freq = ch_freq_map[i].frequency;
+ break;
+ }
+ }
+ if (i == ch_freq_map_num)
+ freq = 2412;
+
+ return freq;
+}
+
+u32 rtw_freq2ch(u32 freq)
+{
+ u8 i;
+ u32 ch = 0;
+
+ for (i = 0; i < ch_freq_map_num; i++) {
+ if (freq == ch_freq_map[i].frequency) {
+ ch = ch_freq_map[i].channel;
+ break;
+ }
+ }
+ if (i == ch_freq_map_num)
+ ch = 1;
+
+ return ch;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
new file mode 100644
index 0000000..0f076d0
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -0,0 +1,1779 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_SECURITY_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <osdep_intf.h>
+
+/* WEP related ===== */
+
+#define CRC32_POLY 0x04c11db7
+
+struct arc4context {
+ u32 x;
+ u32 y;
+ u8 state[256];
+};
+
+static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
+{
+ u32 t, u;
+ u32 keyindex;
+ u32 stateindex;
+ u8 *state;
+ u32 counter;
+_func_enter_;
+ state = parc4ctx->state;
+ parc4ctx->x = 0;
+ parc4ctx->y = 0;
+ for (counter = 0; counter < 256; counter++)
+ state[counter] = (u8)counter;
+ keyindex = 0;
+ stateindex = 0;
+ for (counter = 0; counter < 256; counter++) {
+ t = state[counter];
+ stateindex = (stateindex + key[keyindex] + t) & 0xff;
+ u = state[stateindex];
+ state[stateindex] = (u8)t;
+ state[counter] = (u8)u;
+ if (++keyindex >= key_len)
+ keyindex = 0;
+ }
+_func_exit_;
+}
+
+static u32 arcfour_byte(struct arc4context *parc4ctx)
+{
+ u32 x;
+ u32 y;
+ u32 sx, sy;
+ u8 *state;
+_func_enter_;
+ state = parc4ctx->state;
+ x = (parc4ctx->x + 1) & 0xff;
+ sx = state[x];
+ y = (sx + parc4ctx->y) & 0xff;
+ sy = state[y];
+ parc4ctx->x = x;
+ parc4ctx->y = y;
+ state[y] = (u8)sx;
+ state[x] = (u8)sy;
+_func_exit_;
+ return state[(sx + sy) & 0xff];
+}
+
+static void arcfour_encrypt(struct arc4context *parc4ctx, u8 *dest, u8 *src, u32 len)
+{
+ u32 i;
+_func_enter_;
+ for (i = 0; i < len; i++)
+ dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx);
+_func_exit_;
+}
+
+static int bcrc32initialized;
+static u32 crc32_table[256];
+
+static u8 crc32_reverseBit(u8 data)
+{
+ return (u8)((data<<7)&0x80) | ((data<<5)&0x40) | ((data<<3)&0x20) |
+ ((data<<1)&0x10) | ((data>>1)&0x08) | ((data>>3)&0x04) |
+ ((data>>5)&0x02) | ((data>>7)&0x01);
+}
+
+static void crc32_init(void)
+{
+_func_enter_;
+ if (bcrc32initialized == 1) {
+ goto exit;
+ } else {
+ int i, j;
+ u32 c;
+ u8 *p = (u8 *)&c, *p1;
+ u8 k;
+
+ c = 0x12340000;
+
+ for (i = 0; i < 256; ++i) {
+ k = crc32_reverseBit((u8)i);
+ for (c = ((u32)k) << 24, j = 8; j > 0; --j)
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ p1 = (u8 *)&crc32_table[i];
+
+ p1[0] = crc32_reverseBit(p[3]);
+ p1[1] = crc32_reverseBit(p[2]);
+ p1[2] = crc32_reverseBit(p[1]);
+ p1[3] = crc32_reverseBit(p[0]);
+ }
+ bcrc32initialized = 1;
+ }
+exit:
+_func_exit_;
+}
+
+static __le32 getcrc32(u8 *buf, int len)
+{
+ u8 *p;
+ u32 crc;
+_func_enter_;
+ if (bcrc32initialized == 0)
+ crc32_init();
+
+ crc = 0xffffffff; /* preload shift register, per CRC-32 spec */
+
+ for (p = buf; len > 0; ++p, --len)
+ crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
+_func_exit_;
+ return cpu_to_le32(~crc); /* transmit complement, per CRC-32 spec */
+}
+
+/*
+ Need to consider the fragment situation
+*/
+void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
+{ /* exclude ICV */
+
+ unsigned char crc[4];
+ struct arc4context mycontext;
+
+ int curfragnum, length;
+ u32 keylength;
+
+ u8 *pframe, *payload, *iv; /* wepkey */
+ u8 wepkey[16];
+ u8 hw_hdr_offset = 0;
+ struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+_func_enter_;
+
+ if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ return;
+
+ hw_hdr_offset = TXDESC_SIZE +
+ (((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
+
+ pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+
+ /* start to encrypt each fragment */
+ if ((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) {
+ keylength = psecuritypriv->dot11DefKeylen[psecuritypriv->dot11PrivacyKeyIndex];
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ iv = pframe+pattrib->hdrlen;
+ memcpy(&wepkey[0], iv, 3);
+ memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength);
+ payload = pframe+pattrib->iv_len+pattrib->hdrlen;
+
+ if ((curfragnum+1) == pattrib->nr_frags) { /* the last fragment */
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+
+ *((__le32 *)crc) = getcrc32(payload, length);
+
+ arcfour_init(&mycontext, wepkey, 3+keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+ } else {
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+ *((__le32 *)crc) = getcrc32(payload, length);
+ arcfour_init(&mycontext, wepkey, 3+keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ pframe += pxmitpriv->frag_len;
+ pframe = (u8 *)RND4((size_t)(pframe));
+ }
+ }
+ }
+
+_func_exit_;
+}
+
+void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
+{
+ /* exclude ICV */
+ u8 crc[4];
+ struct arc4context mycontext;
+ int length;
+ u32 keylength;
+ u8 *pframe, *payload, *iv, wepkey[16];
+ u8 keyindex;
+ struct rx_pkt_attrib *prxattrib = &(((union recv_frame *)precvframe)->u.hdr.attrib);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+_func_enter_;
+
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+
+ /* start to decrypt recvframe */
+ if ((prxattrib->encrypt == _WEP40_) || (prxattrib->encrypt == _WEP104_)) {
+ iv = pframe+prxattrib->hdrlen;
+ keyindex = prxattrib->key_index;
+ keylength = psecuritypriv->dot11DefKeylen[keyindex];
+ memcpy(&wepkey[0], iv, 3);
+ memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[keyindex].skey[0], keylength);
+ length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+
+ payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+
+ /* decrypt payload include icv */
+ arcfour_init(&mycontext, wepkey, 3+keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ /* calculate icv and compare the icv */
+ *((__le32 *)crc) = getcrc32(payload, length - 4);
+
+ if (crc[3] != payload[length-1] ||
+ crc[2] != payload[length-2] ||
+ crc[1] != payload[length-3] ||
+ crc[0] != payload[length-4]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
+ &crc, &payload[length-4]));
+ }
+ }
+_func_exit_;
+ return;
+}
+
+/* 3 ===== TKIP related ===== */
+
+static u32 secmicgetuint32(u8 *p)
+/* Convert from Byte[] to Us3232 in a portable way */
+{
+ s32 i;
+ u32 res = 0;
+_func_enter_;
+ for (i = 0; i < 4; i++)
+ res |= ((u32)(*p++)) << (8*i);
+_func_exit_;
+ return res;
+}
+
+static void secmicputuint32(u8 *p, u32 val)
+/* Convert from Us3232 to Byte[] in a portable way */
+{
+ long i;
+_func_enter_;
+ for (i = 0; i < 4; i++) {
+ *p++ = (u8) (val & 0xff);
+ val >>= 8;
+ }
+_func_exit_;
+}
+
+static void secmicclear(struct mic_data *pmicdata)
+{
+/* Reset the state to the empty message. */
+_func_enter_;
+ pmicdata->L = pmicdata->K0;
+ pmicdata->R = pmicdata->K1;
+ pmicdata->nBytesInM = 0;
+ pmicdata->M = 0;
+_func_exit_;
+}
+
+void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key)
+{
+ /* Set the key */
+_func_enter_;
+ pmicdata->K0 = secmicgetuint32(key);
+ pmicdata->K1 = secmicgetuint32(key + 4);
+ /* and reset the message */
+ secmicclear(pmicdata);
+_func_exit_;
+}
+
+void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b)
+{
+_func_enter_;
+ /* Append the byte to our word-sized buffer */
+ pmicdata->M |= ((unsigned long)b) << (8*pmicdata->nBytesInM);
+ pmicdata->nBytesInM++;
+ /* Process the word if it is full. */
+ if (pmicdata->nBytesInM >= 4) {
+ pmicdata->L ^= pmicdata->M;
+ pmicdata->R ^= ROL32(pmicdata->L, 17);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ((pmicdata->L & 0xff00ff00) >> 8) | ((pmicdata->L & 0x00ff00ff) << 8);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ROL32(pmicdata->L, 3);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ROR32(pmicdata->L, 2);
+ pmicdata->L += pmicdata->R;
+ /* Clear the buffer */
+ pmicdata->M = 0;
+ pmicdata->nBytesInM = 0;
+ }
+_func_exit_;
+}
+
+void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nbytes)
+{
+_func_enter_;
+ /* This is simple */
+ while (nbytes > 0) {
+ rtw_secmicappendbyte(pmicdata, *src++);
+ nbytes--;
+ }
+_func_exit_;
+}
+
+void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst)
+{
+_func_enter_;
+ /* Append the minimum padding */
+ rtw_secmicappendbyte(pmicdata, 0x5a);
+ rtw_secmicappendbyte(pmicdata, 0);
+ rtw_secmicappendbyte(pmicdata, 0);
+ rtw_secmicappendbyte(pmicdata, 0);
+ rtw_secmicappendbyte(pmicdata, 0);
+ /* and then zeroes until the length is a multiple of 4 */
+ while (pmicdata->nBytesInM != 0)
+ rtw_secmicappendbyte(pmicdata, 0);
+ /* The appendByte function has already computed the result. */
+ secmicputuint32(dst, pmicdata->L);
+ secmicputuint32(dst+4, pmicdata->R);
+ /* Reset to the empty message. */
+ secmicclear(pmicdata);
+_func_exit_;
+}
+
+void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
+{
+ struct mic_data micdata;
+ u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
+_func_enter_;
+ rtw_secmicsetkey(&micdata, key);
+ priority[0] = pri;
+
+ /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
+ if (header[1]&1) { /* ToDS == 1 */
+ rtw_secmicappend(&micdata, &header[16], 6); /* DA */
+ if (header[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &header[24], 6);
+ else
+ rtw_secmicappend(&micdata, &header[10], 6);
+ } else { /* ToDS == 0 */
+ rtw_secmicappend(&micdata, &header[4], 6); /* DA */
+ if (header[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &header[16], 6);
+ else
+ rtw_secmicappend(&micdata, &header[10], 6);
+ }
+ rtw_secmicappend(&micdata, &priority[0], 4);
+
+ rtw_secmicappend(&micdata, data, data_len);
+
+ rtw_secgetmic(&micdata, mic_code);
+_func_exit_;
+}
+
+
+
+/* macros for extraction/creation of unsigned char/unsigned short values */
+#define RotR1(v16) ((((v16) >> 1) & 0x7FFF) ^ (((v16) & 1) << 15))
+#define Lo8(v16) ((u8)((v16) & 0x00FF))
+#define Hi8(v16) ((u8)(((v16) >> 8) & 0x00FF))
+#define Lo16(v32) ((u16)((v32) & 0xFFFF))
+#define Hi16(v32) ((u16)(((v32) >> 16) & 0xFFFF))
+#define Mk16(hi, lo) ((lo) ^ (((u16)(hi)) << 8))
+
+/* select the Nth 16-bit word of the temporal key unsigned char array TK[] */
+#define TK16(N) Mk16(tk[2*(N)+1], tk[2*(N)])
+
+/* S-box lookup: 16 bits --> 16 bits */
+#define _S_(v16) (Sbox1[0][Lo8(v16)] ^ Sbox1[1][Hi8(v16)])
+
+/* fixed algorithm "parameters" */
+#define PHASE1_LOOP_CNT 8 /* this needs to be "big enough" */
+#define TA_SIZE 6 /* 48-bit transmitter address */
+#define TK_SIZE 16 /* 128-bit temporal key */
+#define P1K_SIZE 10 /* 80-bit Phase1 key */
+#define RC4_KEY_SIZE 16 /* 128-bit RC4KEY (104 bits unknown) */
+
+/* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
+static const unsigned short Sbox1[2][256] = { /* Sbox for hash (can be in ROM) */
+{
+ 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
+ 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
+ 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
+ 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
+ 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
+ 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
+ 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
+ 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
+ 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
+ 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
+ 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
+ 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
+ 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
+ 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
+ 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
+ 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
+ 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
+ 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
+ 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
+ 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
+ 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
+ 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
+ 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
+ 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
+ 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
+ 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
+ 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
+ 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
+ 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
+ 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
+ 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
+ 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
+ },
+
+ { /* second half of table is unsigned char-reversed version of first! */
+ 0xA5C6, 0x84F8, 0x99EE, 0x8DF6, 0x0DFF, 0xBDD6, 0xB1DE, 0x5491,
+ 0x5060, 0x0302, 0xA9CE, 0x7D56, 0x19E7, 0x62B5, 0xE64D, 0x9AEC,
+ 0x458F, 0x9D1F, 0x4089, 0x87FA, 0x15EF, 0xEBB2, 0xC98E, 0x0BFB,
+ 0xEC41, 0x67B3, 0xFD5F, 0xEA45, 0xBF23, 0xF753, 0x96E4, 0x5B9B,
+ 0xC275, 0x1CE1, 0xAE3D, 0x6A4C, 0x5A6C, 0x417E, 0x02F5, 0x4F83,
+ 0x5C68, 0xF451, 0x34D1, 0x08F9, 0x93E2, 0x73AB, 0x5362, 0x3F2A,
+ 0x0C08, 0x5295, 0x6546, 0x5E9D, 0x2830, 0xA137, 0x0F0A, 0xB52F,
+ 0x090E, 0x3624, 0x9B1B, 0x3DDF, 0x26CD, 0x694E, 0xCD7F, 0x9FEA,
+ 0x1B12, 0x9E1D, 0x7458, 0x2E34, 0x2D36, 0xB2DC, 0xEEB4, 0xFB5B,
+ 0xF6A4, 0x4D76, 0x61B7, 0xCE7D, 0x7B52, 0x3EDD, 0x715E, 0x9713,
+ 0xF5A6, 0x68B9, 0x0000, 0x2CC1, 0x6040, 0x1FE3, 0xC879, 0xEDB6,
+ 0xBED4, 0x468D, 0xD967, 0x4B72, 0xDE94, 0xD498, 0xE8B0, 0x4A85,
+ 0x6BBB, 0x2AC5, 0xE54F, 0x16ED, 0xC586, 0xD79A, 0x5566, 0x9411,
+ 0xCF8A, 0x10E9, 0x0604, 0x81FE, 0xF0A0, 0x4478, 0xBA25, 0xE34B,
+ 0xF3A2, 0xFE5D, 0xC080, 0x8A05, 0xAD3F, 0xBC21, 0x4870, 0x04F1,
+ 0xDF63, 0xC177, 0x75AF, 0x6342, 0x3020, 0x1AE5, 0x0EFD, 0x6DBF,
+ 0x4C81, 0x1418, 0x3526, 0x2FC3, 0xE1BE, 0xA235, 0xCC88, 0x392E,
+ 0x5793, 0xF255, 0x82FC, 0x477A, 0xACC8, 0xE7BA, 0x2B32, 0x95E6,
+ 0xA0C0, 0x9819, 0xD19E, 0x7FA3, 0x6644, 0x7E54, 0xAB3B, 0x830B,
+ 0xCA8C, 0x29C7, 0xD36B, 0x3C28, 0x79A7, 0xE2BC, 0x1D16, 0x76AD,
+ 0x3BDB, 0x5664, 0x4E74, 0x1E14, 0xDB92, 0x0A0C, 0x6C48, 0xE4B8,
+ 0x5D9F, 0x6EBD, 0xEF43, 0xA6C4, 0xA839, 0xA431, 0x37D3, 0x8BF2,
+ 0x32D5, 0x438B, 0x596E, 0xB7DA, 0x8C01, 0x64B1, 0xD29C, 0xE049,
+ 0xB4D8, 0xFAAC, 0x07F3, 0x25CF, 0xAFCA, 0x8EF4, 0xE947, 0x1810,
+ 0xD56F, 0x88F0, 0x6F4A, 0x725C, 0x2438, 0xF157, 0xC773, 0x5197,
+ 0x23CB, 0x7CA1, 0x9CE8, 0x213E, 0xDD96, 0xDC61, 0x860D, 0x850F,
+ 0x90E0, 0x427C, 0xC471, 0xAACC, 0xD890, 0x0506, 0x01F7, 0x121C,
+ 0xA3C2, 0x5F6A, 0xF9AE, 0xD069, 0x9117, 0x5899, 0x273A, 0xB927,
+ 0x38D9, 0x13EB, 0xB32B, 0x3322, 0xBBD2, 0x70A9, 0x8907, 0xA733,
+ 0xB62D, 0x223C, 0x9215, 0x20C9, 0x4987, 0xFFAA, 0x7850, 0x7AA5,
+ 0x8F03, 0xF859, 0x8009, 0x171A, 0xDA65, 0x31D7, 0xC684, 0xB8D0,
+ 0xC382, 0xB029, 0x775A, 0x111E, 0xCB7B, 0xFCA8, 0xD66D, 0x3A2C,
+ }
+};
+
+ /*
+**********************************************************************
+* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
+*
+* Inputs:
+* tk[] = temporal key [128 bits]
+* ta[] = transmitter's MAC address [ 48 bits]
+* iv32 = upper 32 bits of IV [ 32 bits]
+* Output:
+* p1k[] = Phase 1 key [ 80 bits]
+*
+* Note:
+* This function only needs to be called every 2**16 packets,
+* although in theory it could be called every packet.
+*
+**********************************************************************
+*/
+static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
+{
+ int i;
+_func_enter_;
+ /* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */
+ p1k[0] = Lo16(iv32);
+ p1k[1] = Hi16(iv32);
+ p1k[2] = Mk16(ta[1], ta[0]); /* use TA[] as little-endian */
+ p1k[3] = Mk16(ta[3], ta[2]);
+ p1k[4] = Mk16(ta[5], ta[4]);
+
+ /* Now compute an unbalanced Feistel cipher with 80-bit block */
+ /* size on the 80-bit block P1K[], using the 128-bit key TK[] */
+ for (i = 0; i < PHASE1_LOOP_CNT; i++) { /* Each add operation here is mod 2**16 */
+ p1k[0] += _S_(p1k[4] ^ TK16((i&1)+0));
+ p1k[1] += _S_(p1k[0] ^ TK16((i&1)+2));
+ p1k[2] += _S_(p1k[1] ^ TK16((i&1)+4));
+ p1k[3] += _S_(p1k[2] ^ TK16((i&1)+6));
+ p1k[4] += _S_(p1k[3] ^ TK16((i&1)+0));
+ p1k[4] += (unsigned short)i; /* avoid "slide attacks" */
+ }
+_func_exit_;
+}
+
+/*
+**********************************************************************
+* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
+*
+* Inputs:
+* tk[] = Temporal key [128 bits]
+* p1k[] = Phase 1 output key [ 80 bits]
+* iv16 = low 16 bits of IV counter [ 16 bits]
+* Output:
+* rc4key[] = the key used to encrypt the packet [128 bits]
+*
+* Note:
+* The value {TA, IV32, IV16} for Phase1/Phase2 must be unique
+* across all packets using the same key TK value. Then, for a
+* given value of TK[], this TKIP48 construction guarantees that
+* the final RC4KEY value is unique across all packets.
+*
+* Suggested implementation optimization: if PPK[] is "overlaid"
+* appropriately on RC4KEY[], there is no need for the final
+* for loop below that copies the PPK[] result into RC4KEY[].
+*
+**********************************************************************
+*/
+static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
+{
+ int i;
+ u16 PPK[6]; /* temporary key for mixing */
+_func_enter_;
+ /* Note: all adds in the PPK[] equations below are mod 2**16 */
+ for (i = 0; i < 5; i++)
+ PPK[i] = p1k[i]; /* first, copy P1K to PPK */
+ PPK[5] = p1k[4] + iv16; /* next, add in IV16 */
+
+ /* Bijective non-linear mixing of the 96 bits of PPK[0..5] */
+ PPK[0] += _S_(PPK[5] ^ TK16(0)); /* Mix key in each "round" */
+ PPK[1] += _S_(PPK[0] ^ TK16(1));
+ PPK[2] += _S_(PPK[1] ^ TK16(2));
+ PPK[3] += _S_(PPK[2] ^ TK16(3));
+ PPK[4] += _S_(PPK[3] ^ TK16(4));
+ PPK[5] += _S_(PPK[4] ^ TK16(5)); /* Total # S-box lookups == 6 */
+
+ /* Final sweep: bijective, "linear". Rotates kill LSB correlations */
+ PPK[0] += RotR1(PPK[5] ^ TK16(6));
+ PPK[1] += RotR1(PPK[0] ^ TK16(7)); /* Use all of TK[] in Phase2 */
+ PPK[2] += RotR1(PPK[1]);
+ PPK[3] += RotR1(PPK[2]);
+ PPK[4] += RotR1(PPK[3]);
+ PPK[5] += RotR1(PPK[4]);
+ /* Note: At this point, for a given key TK[0..15], the 96-bit output */
+ /* value PPK[0..5] is guaranteed to be unique, as a function */
+ /* of the 96-bit "input" value {TA, IV32, IV16}. That is, P1K */
+ /* is now a keyed permutation of {TA, IV32, IV16}. */
+
+ /* Set RC4KEY[0..3], which includes "cleartext" portion of RC4 key */
+ rc4key[0] = Hi8(iv16); /* RC4KEY[0..2] is the WEP IV */
+ rc4key[1] = (Hi8(iv16) | 0x20) & 0x7F; /* Help avoid weak (FMS) keys */
+ rc4key[2] = Lo8(iv16);
+ rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1);
+
+ /* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */
+ for (i = 0; i < 6; i++) {
+ rc4key[4+2*i] = Lo8(PPK[i]);
+ rc4key[5+2*i] = Hi8(PPK[i]);
+ }
+_func_exit_;
+}
+
+/* The hlen isn't include the IV */
+u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
+{ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ u8 hw_hdr_offset = 0;
+ struct arc4context mycontext;
+ int curfragnum, length;
+
+ u8 *pframe, *payload, *iv, *prwskey;
+ union pn48 dot11txpn;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u32 res = _SUCCESS;
+_func_enter_;
+
+ if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ return _FAIL;
+
+ hw_hdr_offset = TXDESC_SIZE +
+ (((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
+ pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+ /* 4 start to encrypt each fragment */
+ if (pattrib->encrypt == _TKIP_) {
+ if (pattrib->psta)
+ stainfo = pattrib->psta;
+ else
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
+
+ if (stainfo != NULL) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo!= NULL!!!\n"));
+
+ if (IS_MCAST(pattrib->ra))
+ prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
+ else
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ iv = pframe+pattrib->hdrlen;
+ payload = pframe+pattrib->iv_len+pattrib->hdrlen;
+
+ GET_TKIP_PN(iv, dot11txpn);
+
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
+ phase1((u16 *)&ttkey[0], prwskey, &pattrib->ta[0], pnh);
+ phase2(&rc4key[0], prwskey, (u16 *)&ttkey[0], pnl);
+
+ if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+ RT_TRACE(_module_rtl871x_security_c_, _drv_info_,
+ ("pattrib->iv_len=%x, pattrib->icv_len=%x\n",
+ pattrib->iv_len, pattrib->icv_len));
+ *((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
+
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+ } else {
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len ;
+ *((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ pframe += pxmitpriv->frag_len;
+ pframe = (u8 *)RND4((size_t)(pframe));
+ }
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo==NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+_func_exit_;
+ return res;
+}
+
+/* The hlen isn't include the IV */
+u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
+{ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ struct arc4context mycontext;
+ int length;
+
+ u8 *pframe, *payload, *iv, *prwskey;
+ union pn48 dot11txpn;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 res = _SUCCESS;
+
+_func_enter_;
+
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+
+ /* 4 start to decrypt recvframe */
+ if (prxattrib->encrypt == _TKIP_) {
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
+ if (stainfo != NULL) {
+ if (IS_MCAST(prxattrib->ra)) {
+ if (!psecuritypriv->binstallGrpkey) {
+ res = _FAIL;
+ DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
+ goto exit;
+ }
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo!= NULL!!!\n"));
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+ }
+
+ iv = pframe+prxattrib->hdrlen;
+ payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+ length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+
+ GET_TKIP_PN(iv, dot11txpn);
+
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
+
+ phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
+ phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
+
+ /* 4 decrypt payload include icv */
+
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ *((__le32 *)crc) = getcrc32(payload, length-4);
+
+ if (crc[3] != payload[length-1] ||
+ crc[2] != payload[length-2] ||
+ crc[1] != payload[length-3] ||
+ crc[0] != payload[length-4]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
+ &crc, &payload[length-4]));
+ res = _FAIL;
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+_func_exit_;
+exit:
+ return res;
+}
+
+/* 3 ===== AES related ===== */
+
+
+#define MAX_MSG_SIZE 2048
+/*****************************/
+/******** SBOX Table *********/
+/*****************************/
+
+static u8 sbox_table[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
+ 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
+ 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
+ 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
+ 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
+ 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
+ 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
+ 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
+ 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
+ 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
+ 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
+ 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+};
+
+/*****************************/
+/**** Function Prototypes ****/
+/*****************************/
+
+static void bitwise_xor(u8 *ina, u8 *inb, u8 *out);
+static void construct_mic_iv(u8 *mic_header1, int qc_exists, int a4_exists, u8 *mpdu, uint payload_length, u8 *pn_vector);
+static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu);
+static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists);
+static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c);
+static void xor_128(u8 *a, u8 *b, u8 *out);
+static void xor_32(u8 *a, u8 *b, u8 *out);
+static u8 sbox(u8 a);
+static void next_key(u8 *key, int round);
+static void byte_sub(u8 *in, u8 *out);
+static void shift_row(u8 *in, u8 *out);
+static void mix_column(u8 *in, u8 *out);
+static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
+
+/****************************************/
+/* aes128k128d() */
+/* Performs a 128 bit AES encrypt with */
+/* 128 bit data. */
+/****************************************/
+static void xor_128(u8 *a, u8 *b, u8 *out)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ out[i] = a[i] ^ b[i];
+_func_exit_;
+}
+
+static void xor_32(u8 *a, u8 *b, u8 *out)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 4; i++)
+ out[i] = a[i] ^ b[i];
+_func_exit_;
+}
+
+static u8 sbox(u8 a)
+{
+ return sbox_table[(int)a];
+}
+
+static void next_key(u8 *key, int round)
+{
+ u8 rcon;
+ u8 sbox_key[4];
+ u8 rcon_table[12] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+ 0x1b, 0x36, 0x36, 0x36
+ };
+_func_enter_;
+ sbox_key[0] = sbox(key[13]);
+ sbox_key[1] = sbox(key[14]);
+ sbox_key[2] = sbox(key[15]);
+ sbox_key[3] = sbox(key[12]);
+
+ rcon = rcon_table[round];
+
+ xor_32(&key[0], sbox_key, &key[0]);
+ key[0] = key[0] ^ rcon;
+
+ xor_32(&key[4], &key[0], &key[4]);
+ xor_32(&key[8], &key[4], &key[8]);
+ xor_32(&key[12], &key[8], &key[12]);
+_func_exit_;
+}
+
+static void byte_sub(u8 *in, u8 *out)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ out[i] = sbox(in[i]);
+_func_exit_;
+}
+
+static void shift_row(u8 *in, u8 *out)
+{
+_func_enter_;
+ out[0] = in[0];
+ out[1] = in[5];
+ out[2] = in[10];
+ out[3] = in[15];
+ out[4] = in[4];
+ out[5] = in[9];
+ out[6] = in[14];
+ out[7] = in[3];
+ out[8] = in[8];
+ out[9] = in[13];
+ out[10] = in[2];
+ out[11] = in[7];
+ out[12] = in[12];
+ out[13] = in[1];
+ out[14] = in[6];
+ out[15] = in[11];
+_func_exit_;
+}
+
+static void mix_column(u8 *in, u8 *out)
+{
+ int i;
+ u8 add1b[4];
+ u8 add1bf7[4];
+ u8 rotl[4];
+ u8 swap_halfs[4];
+ u8 andf7[4];
+ u8 rotr[4];
+ u8 temp[4];
+ u8 tempb[4];
+_func_enter_;
+ for (i = 0 ; i < 4; i++) {
+ if ((in[i] & 0x80) == 0x80)
+ add1b[i] = 0x1b;
+ else
+ add1b[i] = 0x00;
+ }
+
+ swap_halfs[0] = in[2]; /* Swap halfs */
+ swap_halfs[1] = in[3];
+ swap_halfs[2] = in[0];
+ swap_halfs[3] = in[1];
+
+ rotl[0] = in[3]; /* Rotate left 8 bits */
+ rotl[1] = in[0];
+ rotl[2] = in[1];
+ rotl[3] = in[2];
+
+ andf7[0] = in[0] & 0x7f;
+ andf7[1] = in[1] & 0x7f;
+ andf7[2] = in[2] & 0x7f;
+ andf7[3] = in[3] & 0x7f;
+
+ for (i = 3; i > 0; i--) { /* logical shift left 1 bit */
+ andf7[i] = andf7[i] << 1;
+ if ((andf7[i-1] & 0x80) == 0x80)
+ andf7[i] = (andf7[i] | 0x01);
+ }
+ andf7[0] = andf7[0] << 1;
+ andf7[0] = andf7[0] & 0xfe;
+
+ xor_32(add1b, andf7, add1bf7);
+
+ xor_32(in, add1bf7, rotr);
+
+ temp[0] = rotr[0]; /* Rotate right 8 bits */
+ rotr[0] = rotr[1];
+ rotr[1] = rotr[2];
+ rotr[2] = rotr[3];
+ rotr[3] = temp[0];
+
+ xor_32(add1bf7, rotr, temp);
+ xor_32(swap_halfs, rotl, tempb);
+ xor_32(temp, tempb, out);
+_func_exit_;
+}
+
+static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
+{
+ int round;
+ int i;
+ u8 intermediatea[16];
+ u8 intermediateb[16];
+ u8 round_key[16];
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ round_key[i] = key[i];
+ for (round = 0; round < 11; round++) {
+ if (round == 0) {
+ xor_128(round_key, data, ciphertext);
+ next_key(round_key, round);
+ } else if (round == 10) {
+ byte_sub(ciphertext, intermediatea);
+ shift_row(intermediatea, intermediateb);
+ xor_128(intermediateb, round_key, ciphertext);
+ } else { /* 1 - 9 */
+ byte_sub(ciphertext, intermediatea);
+ shift_row(intermediatea, intermediateb);
+ mix_column(&intermediateb[0], &intermediatea[0]);
+ mix_column(&intermediateb[4], &intermediatea[4]);
+ mix_column(&intermediateb[8], &intermediatea[8]);
+ mix_column(&intermediateb[12], &intermediatea[12]);
+ xor_128(intermediatea, round_key, ciphertext);
+ next_key(round_key, round);
+ }
+ }
+_func_exit_;
+}
+
+/************************************************/
+/* construct_mic_iv() */
+/* Builds the MIC IV from header fields and PN */
+/************************************************/
+static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
+ uint payload_length, u8 *pn_vector)
+{
+ int i;
+_func_enter_;
+ mic_iv[0] = 0x59;
+ if (qc_exists && a4_exists)
+ mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
+ if (qc_exists && !a4_exists)
+ mic_iv[1] = mpdu[24] & 0x0f; /* mute bits 7-4 */
+ if (!qc_exists)
+ mic_iv[1] = 0x00;
+ for (i = 2; i < 8; i++)
+ mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
+ for (i = 8; i < 14; i++)
+ mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
+ mic_iv[14] = (unsigned char) (payload_length / 256);
+ mic_iv[15] = (unsigned char) (payload_length % 256);
+_func_exit_;
+}
+
+/************************************************/
+/* construct_mic_header1() */
+/* Builds the first MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu)
+{
+_func_enter_;
+ mic_header1[0] = (u8)((header_length - 2) / 256);
+ mic_header1[1] = (u8)((header_length - 2) % 256);
+ mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
+ mic_header1[3] = mpdu[1] & 0xc7; /* Mute retry, more data and pwr mgt bits */
+ mic_header1[4] = mpdu[4]; /* A1 */
+ mic_header1[5] = mpdu[5];
+ mic_header1[6] = mpdu[6];
+ mic_header1[7] = mpdu[7];
+ mic_header1[8] = mpdu[8];
+ mic_header1[9] = mpdu[9];
+ mic_header1[10] = mpdu[10]; /* A2 */
+ mic_header1[11] = mpdu[11];
+ mic_header1[12] = mpdu[12];
+ mic_header1[13] = mpdu[13];
+ mic_header1[14] = mpdu[14];
+ mic_header1[15] = mpdu[15];
+_func_exit_;
+}
+
+/************************************************/
+/* construct_mic_header2() */
+/* Builds the last MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ mic_header2[i] = 0x00;
+
+ mic_header2[0] = mpdu[16]; /* A3 */
+ mic_header2[1] = mpdu[17];
+ mic_header2[2] = mpdu[18];
+ mic_header2[3] = mpdu[19];
+ mic_header2[4] = mpdu[20];
+ mic_header2[5] = mpdu[21];
+
+ mic_header2[6] = 0x00;
+ mic_header2[7] = 0x00; /* mpdu[23]; */
+
+ if (!qc_exists && a4_exists) {
+ for (i = 0; i < 6; i++)
+ mic_header2[8+i] = mpdu[24+i]; /* A4 */
+ }
+
+ if (qc_exists && !a4_exists) {
+ mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
+ mic_header2[9] = mpdu[25] & 0x00;
+ }
+
+ if (qc_exists && a4_exists) {
+ for (i = 0; i < 6; i++)
+ mic_header2[8+i] = mpdu[24+i]; /* A4 */
+
+ mic_header2[14] = mpdu[30] & 0x0f;
+ mic_header2[15] = mpdu[31] & 0x00;
+ }
+
+_func_exit_;
+}
+
+/************************************************/
+/* construct_mic_header2() */
+/* Builds the last MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ ctr_preload[i] = 0x00;
+ i = 0;
+
+ ctr_preload[0] = 0x01; /* flag */
+ if (qc_exists && a4_exists)
+ ctr_preload[1] = mpdu[30] & 0x0f; /* QoC_Control */
+ if (qc_exists && !a4_exists)
+ ctr_preload[1] = mpdu[24] & 0x0f;
+
+ for (i = 2; i < 8; i++)
+ ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
+ for (i = 8; i < 14; i++)
+ ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
+ ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
+ ctr_preload[15] = (unsigned char) (c % 256);
+_func_exit_;
+}
+
+/************************************/
+/* bitwise_xor() */
+/* A 128 bit, bitwise exclusive or */
+/************************************/
+static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ out[i] = ina[i] ^ inb[i];
+_func_exit_;
+}
+
+static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
+{
+ uint qc_exists, a4_exists, i, j, payload_remainder,
+ num_blocks, payload_index;
+
+ u8 pn_vector[6];
+ u8 mic_iv[16];
+ u8 mic_header1[16];
+ u8 mic_header2[16];
+ u8 ctr_preload[16];
+
+ /* Intermediate Buffers */
+ u8 chain_buffer[16];
+ u8 aes_out[16];
+ u8 padded_buffer[16];
+ u8 mic[8];
+ uint frtype = GetFrameType(pframe);
+ uint frsubtype = GetFrameSubType(pframe);
+
+_func_enter_;
+ frsubtype = frsubtype>>4;
+
+ _rtw_memset((void *)mic_iv, 0, 16);
+ _rtw_memset((void *)mic_header1, 0, 16);
+ _rtw_memset((void *)mic_header2, 0, 16);
+ _rtw_memset((void *)ctr_preload, 0, 16);
+ _rtw_memset((void *)chain_buffer, 0, 16);
+ _rtw_memset((void *)aes_out, 0, 16);
+ _rtw_memset((void *)padded_buffer, 0, 16);
+
+ if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
+ a4_exists = 0;
+ else
+ a4_exists = 1;
+
+ if ((frtype == WIFI_DATA_CFACK) || (frtype == WIFI_DATA_CFPOLL) || (frtype == WIFI_DATA_CFACKPOLL)) {
+ qc_exists = 1;
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ } else if ((frsubtype == 0x08) || (frsubtype == 0x09) || (frsubtype == 0x0a) || (frsubtype == 0x0b)) {
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ qc_exists = 1;
+ } else {
+ qc_exists = 0;
+ }
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+
+ construct_mic_iv(mic_iv, qc_exists, a4_exists, pframe, plen, pn_vector);
+
+ construct_mic_header1(mic_header1, hdrlen, pframe);
+ construct_mic_header2(mic_header2, pframe, a4_exists, qc_exists);
+
+ payload_remainder = plen % 16;
+ num_blocks = plen / 16;
+
+ /* Find start of payload */
+ payload_index = (hdrlen + 8);
+
+ /* Calculate MIC */
+ aes128k128d(key, mic_iv, aes_out);
+ bitwise_xor(aes_out, mic_header1, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ bitwise_xor(aes_out, mic_header2, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ for (i = 0; i < num_blocks; i++) {
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+
+ payload_index += 16;
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ /* Add on the final payload block if it needs padding */
+ if (payload_remainder > 0) {
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index++];/* padded_buffer[j] = message[payload_index++]; */
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ for (j = 0; j < 8; j++)
+ mic[j] = aes_out[j];
+
+ /* Insert MIC into payload */
+ for (j = 0; j < 8; j++)
+ pframe[payload_index+j] = mic[j]; /* message[payload_index+j] = mic[j]; */
+
+ payload_index = hdrlen + 8;
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, i+1);
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
+ /* encrypt it and copy the unpadded part back */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, num_blocks+1);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index+j];
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+ /* Encrypt the MIC */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, 0);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < 8; j++)
+ padded_buffer[j] = pframe[j+hdrlen+8+plen];
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < 8; j++)
+ pframe[payload_index++] = chain_buffer[j];
+_func_exit_;
+ return _SUCCESS;
+}
+
+u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
+{ /* exclude ICV */
+
+ /*static*/
+/* unsigned char message[MAX_MSG_SIZE]; */
+
+ /* Intermediate Buffers */
+ int curfragnum, length;
+ u8 *pframe, *prwskey; /* *payload,*iv */
+ u8 hw_hdr_offset = 0;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+/* uint offset = 0; */
+ u32 res = _SUCCESS;
+_func_enter_;
+
+ if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ return _FAIL;
+
+ hw_hdr_offset = TXDESC_SIZE +
+ (((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
+
+ pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+
+ /* 4 start to encrypt each fragment */
+ if ((pattrib->encrypt == _AES_)) {
+ if (pattrib->psta)
+ stainfo = pattrib->psta;
+ else
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
+
+ if (stainfo != NULL) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo!= NULL!!!\n"));
+
+ if (IS_MCAST(pattrib->ra))
+ prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
+ else
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+
+ aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
+ } else{
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len ;
+
+ aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
+ pframe += pxmitpriv->frag_len;
+ pframe = (u8 *)RND4((size_t)(pframe));
+ }
+ }
+ } else{
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo==NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+
+
+_func_exit_;
+ return res;
+}
+
+static int aes_decipher(u8 *key, uint hdrlen,
+ u8 *pframe, uint plen)
+{
+ static u8 message[MAX_MSG_SIZE];
+ uint qc_exists, a4_exists, i, j, payload_remainder,
+ num_blocks, payload_index;
+ int res = _SUCCESS;
+ u8 pn_vector[6];
+ u8 mic_iv[16];
+ u8 mic_header1[16];
+ u8 mic_header2[16];
+ u8 ctr_preload[16];
+
+ /* Intermediate Buffers */
+ u8 chain_buffer[16];
+ u8 aes_out[16];
+ u8 padded_buffer[16];
+ u8 mic[8];
+
+/* uint offset = 0; */
+ uint frtype = GetFrameType(pframe);
+ uint frsubtype = GetFrameSubType(pframe);
+_func_enter_;
+ frsubtype = frsubtype>>4;
+
+ _rtw_memset((void *)mic_iv, 0, 16);
+ _rtw_memset((void *)mic_header1, 0, 16);
+ _rtw_memset((void *)mic_header2, 0, 16);
+ _rtw_memset((void *)ctr_preload, 0, 16);
+ _rtw_memset((void *)chain_buffer, 0, 16);
+ _rtw_memset((void *)aes_out, 0, 16);
+ _rtw_memset((void *)padded_buffer, 0, 16);
+
+ /* start to decrypt the payload */
+
+ num_blocks = (plen-8) / 16; /* plen including llc, payload_length and mic) */
+
+ payload_remainder = (plen-8) % 16;
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+
+ if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
+ a4_exists = 0;
+ else
+ a4_exists = 1;
+
+ if ((frtype == WIFI_DATA_CFACK) || (frtype == WIFI_DATA_CFPOLL) ||
+ (frtype == WIFI_DATA_CFACKPOLL)) {
+ qc_exists = 1;
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ } else if ((frsubtype == 0x08) || (frsubtype == 0x09) ||
+ (frsubtype == 0x0a) || (frsubtype == 0x0b)) {
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ qc_exists = 1;
+ } else {
+ qc_exists = 0;
+ }
+
+ /* now, decrypt pframe with hdrlen offset and plen long */
+
+ payload_index = hdrlen + 8; /* 8 is for extiv */
+
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, i+1);
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
+ /* encrypt it and copy the unpadded part back */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, num_blocks+1);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index+j];
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ /* start to calculate the mic */
+ if ((hdrlen+plen+8) <= MAX_MSG_SIZE)
+ memcpy(message, pframe, (hdrlen + plen+8)); /* 8 is for ext iv len */
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+ construct_mic_iv(mic_iv, qc_exists, a4_exists, message, plen-8, pn_vector);
+
+ construct_mic_header1(mic_header1, hdrlen, message);
+ construct_mic_header2(mic_header2, message, a4_exists, qc_exists);
+
+ payload_remainder = (plen-8) % 16;
+ num_blocks = (plen-8) / 16;
+
+ /* Find start of payload */
+ payload_index = (hdrlen + 8);
+
+ /* Calculate MIC */
+ aes128k128d(key, mic_iv, aes_out);
+ bitwise_xor(aes_out, mic_header1, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ bitwise_xor(aes_out, mic_header2, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ for (i = 0; i < num_blocks; i++) {
+ bitwise_xor(aes_out, &message[payload_index], chain_buffer);
+
+ payload_index += 16;
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ /* Add on the final payload block if it needs padding */
+ if (payload_remainder > 0) {
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = message[payload_index++];
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ for (j = 0 ; j < 8; j++)
+ mic[j] = aes_out[j];
+
+ /* Insert MIC into payload */
+ for (j = 0; j < 8; j++)
+ message[payload_index+j] = mic[j];
+
+ payload_index = hdrlen + 8;
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, i+1);
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &message[payload_index], chain_buffer);
+ for (j = 0; j < 16; j++)
+ message[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
+ /* encrypt it and copy the unpadded part back */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, num_blocks+1);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = message[payload_index+j];
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ message[payload_index++] = chain_buffer[j];
+ }
+
+ /* Encrypt the MIC */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, 0);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < 8; j++)
+ padded_buffer[j] = message[j+hdrlen+8+plen-8];
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < 8; j++)
+ message[payload_index++] = chain_buffer[j];
+
+ /* compare the mic */
+ for (i = 0; i < 8; i++) {
+ if (pframe[hdrlen+8+plen-8+i] != message[hdrlen+8+plen-8+i]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("aes_decipher:mic check error mic[%d]: pframe(%x)!=message(%x)\n",
+ i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]));
+ DBG_88E("aes_decipher:mic check error mic[%d]: pframe(%x)!=message(%x)\n",
+ i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]);
+ res = _FAIL;
+ }
+ }
+_func_exit_;
+ return res;
+}
+
+u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
+{ /* exclude ICV */
+ /* Intermediate Buffers */
+ int length;
+ u8 *pframe, *prwskey; /* *payload,*iv */
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 res = _SUCCESS;
+_func_enter_;
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ /* 4 start to encrypt each fragment */
+ if ((prxattrib->encrypt == _AES_)) {
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
+ if (stainfo != NULL) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo!= NULL!!!\n"));
+
+ if (IS_MCAST(prxattrib->ra)) {
+ /* in concurrent we should use sw descrypt in group key, so we remove this message */
+ if (!psecuritypriv->binstallGrpkey) {
+ res = _FAIL;
+ DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
+ goto exit;
+ }
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
+ if (psecuritypriv->dot118021XGrpKeyid != prxattrib->key_index) {
+ DBG_88E("not match packet_index=%d, install_index=%d\n",
+ prxattrib->key_index, psecuritypriv->dot118021XGrpKeyid);
+ res = _FAIL;
+ goto exit;
+ }
+ } else {
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+ }
+ length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+ res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo==NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+_func_exit_;
+exit:
+ return res;
+}
+
+/* AES tables*/
+const u32 Te0[256] = {
+ 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
+ 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
+ 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
+ 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
+ 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
+ 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
+ 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
+ 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
+ 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
+ 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
+ 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
+ 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
+ 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
+ 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
+ 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
+ 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
+ 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
+ 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
+ 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
+ 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
+ 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
+ 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
+ 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
+ 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
+ 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
+ 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
+ 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
+ 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
+ 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
+ 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
+ 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
+ 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
+ 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
+ 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
+ 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
+ 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
+ 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
+ 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
+ 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
+ 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
+ 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
+ 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
+ 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
+ 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
+ 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
+ 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
+ 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
+ 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
+ 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
+ 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
+ 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
+ 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
+ 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
+ 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
+ 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
+ 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
+ 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
+ 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
+ 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
+ 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
+ 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
+ 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
+ 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
+ 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
+};
+
+const u32 Td0[256] = {
+ 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
+ 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
+ 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
+ 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
+ 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
+ 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
+ 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
+ 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
+ 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
+ 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
+ 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
+ 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
+ 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
+ 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
+ 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
+ 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
+ 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
+ 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
+ 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
+ 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
+ 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
+ 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
+ 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
+ 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
+ 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
+ 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
+ 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
+ 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
+ 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
+ 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
+ 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
+ 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
+ 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
+ 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
+ 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
+ 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
+ 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
+ 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
+ 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
+ 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
+ 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
+ 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
+ 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
+ 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
+ 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
+ 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
+ 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
+ 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
+ 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
+ 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
+ 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
+ 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
+ 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
+ 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
+ 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
+ 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
+ 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
+ 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
+ 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
+ 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
+ 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
+ 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
+ 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
+ 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
+};
+
+const u8 Td4s[256] = {
+ 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
+ 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
+ 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
+ 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
+ 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
+ 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
+ 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
+ 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
+ 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
+ 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
+ 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
+ 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
+ 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
+ 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
+ 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
+ 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
+ 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
+ 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
+ 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
+ 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
+ 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
+ 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
+ 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
+ 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
+ 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
+ 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
+ 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
+ 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
+ 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
+ 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
+ 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
+ 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
+};
+const u8 rcons[] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36
+ /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ *
+ * @return the number of rounds for the given cipher key size.
+ */
+#define ROUND(i, d, s) \
+do { \
+ d##0 = TE0(s##0) ^ TE1(s##1) ^ TE2(s##2) ^ TE3(s##3) ^ rk[4 * i]; \
+ d##1 = TE0(s##1) ^ TE1(s##2) ^ TE2(s##3) ^ TE3(s##0) ^ rk[4 * i + 1]; \
+ d##2 = TE0(s##2) ^ TE1(s##3) ^ TE2(s##0) ^ TE3(s##1) ^ rk[4 * i + 2]; \
+ d##3 = TE0(s##3) ^ TE1(s##0) ^ TE2(s##1) ^ TE3(s##2) ^ rk[4 * i + 3]; \
+} while (0);
+
+/**
+ * omac1_aes_128 - One-Key CBC MAC (OMAC1) hash with AES-128 (aka AES-CMAC)
+ * @key: 128-bit key for the hash operation
+ * @data: Data buffer for which a MAC is determined
+ * @data_len: Length of data buffer in bytes
+ * @mac: Buffer for MAC (128 bits, i.e., 16 bytes)
+ * Returns: 0 on success, -1 on failure
+ *
+ * This is a mode for using block cipher (AES in this case) for authentication.
+ * OMAC1 was standardized with the name CMAC by NIST in a Special Publication
+ * (SP) 800-38B.
+ */
+void rtw_use_tkipkey_handler(void *FunctionContext)
+{
+ struct adapter *padapter = (struct adapter *)FunctionContext;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("^^^rtw_use_tkipkey_handler ^^^\n"));
+
+ padapter->securitypriv.busetkipkey = true;
+
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("^^^rtw_use_tkipkey_handler padapter->securitypriv.busetkipkey=%d^^^\n", padapter->securitypriv.busetkipkey));
+
+_func_exit_;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
new file mode 100644
index 0000000..298f754
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -0,0 +1,79 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include <rtw_sreset.h>
+
+void sreset_init_value(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ _rtw_mutex_init(&psrtpriv->silentreset_mutex);
+ psrtpriv->silent_reset_inprogress = false;
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+ psrtpriv->last_tx_time = 0;
+ psrtpriv->last_tx_complete_time = 0;
+}
+void sreset_reset_value(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ psrtpriv->silent_reset_inprogress = false;
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+ psrtpriv->last_tx_time = 0;
+ psrtpriv->last_tx_complete_time = 0;
+}
+
+u8 sreset_get_wifi_status(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ u8 status = WIFI_STATUS_SUCCESS;
+ u32 val32 = 0;
+
+ if (psrtpriv->silent_reset_inprogress)
+ return status;
+ val32 = rtw_read32(padapter, REG_TXDMA_STATUS);
+ if (val32 == 0xeaeaeaea) {
+ psrtpriv->Wifi_Error_Status = WIFI_IF_NOT_EXIST;
+ } else if (val32 != 0) {
+ DBG_88E("txdmastatu(%x)\n", val32);
+ psrtpriv->Wifi_Error_Status = WIFI_MAC_TXDMA_ERROR;
+ }
+
+ if (WIFI_STATUS_SUCCESS != psrtpriv->Wifi_Error_Status) {
+ DBG_88E("==>%s error_status(0x%x)\n", __func__, psrtpriv->Wifi_Error_Status);
+ status = (psrtpriv->Wifi_Error_Status & (~(USB_READ_PORT_FAIL|USB_WRITE_PORT_FAIL)));
+ }
+ DBG_88E("==> %s wifi_status(0x%x)\n", __func__, status);
+
+ /* status restore */
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+
+ return status;
+}
+
+void sreset_set_wifi_error_status(struct adapter *padapter, u32 status)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ pHalData->srestpriv.Wifi_Error_Status = status;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
new file mode 100644
index 0000000..c2977be
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -0,0 +1,655 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_STA_MGT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+#include <mlme_osdep.h>
+#include <sta_info.h>
+
+static void _rtw_init_stainfo(struct sta_info *psta)
+{
+_func_enter_;
+ _rtw_memset((u8 *)psta, 0, sizeof (struct sta_info));
+
+ _rtw_spinlock_init(&psta->lock);
+ _rtw_init_listhead(&psta->list);
+ _rtw_init_listhead(&psta->hash_list);
+ _rtw_init_queue(&psta->sleep_q);
+ psta->sleepq_len = 0;
+
+ _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
+ _rtw_init_sta_recv_priv(&psta->sta_recvpriv);
+
+#ifdef CONFIG_88EU_AP_MODE
+
+ _rtw_init_listhead(&psta->asoc_list);
+
+ _rtw_init_listhead(&psta->auth_list);
+
+ psta->expire_to = 0;
+
+ psta->flags = 0;
+
+ psta->capability = 0;
+
+ psta->bpairwise_key_installed = false;
+
+#ifdef CONFIG_88EU_AP_MODE
+ psta->nonerp_set = 0;
+ psta->no_short_slot_time_set = 0;
+ psta->no_short_preamble_set = 0;
+ psta->no_ht_gf_set = 0;
+ psta->no_ht_set = 0;
+ psta->ht_20mhz_set = 0;
+#endif
+
+ psta->under_exist_checking = 0;
+
+ psta->keep_alive_trycnt = 0;
+
+#endif /* CONFIG_88EU_AP_MODE */
+
+_func_exit_;
+}
+
+u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
+{
+ struct sta_info *psta;
+ s32 i;
+
+_func_enter_;
+
+ pstapriv->pallocated_stainfo_buf = rtw_zvmalloc(sizeof(struct sta_info) * NUM_STA + 4);
+
+ if (!pstapriv->pallocated_stainfo_buf)
+ return _FAIL;
+
+ pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
+ ((size_t)(pstapriv->pallocated_stainfo_buf) & 3);
+
+ _rtw_init_queue(&pstapriv->free_sta_queue);
+
+ _rtw_spinlock_init(&pstapriv->sta_hash_lock);
+
+ pstapriv->asoc_sta_count = 0;
+ _rtw_init_queue(&pstapriv->sleep_q);
+ _rtw_init_queue(&pstapriv->wakeup_q);
+
+ psta = (struct sta_info *)(pstapriv->pstainfo_buf);
+
+ for (i = 0; i < NUM_STA; i++) {
+ _rtw_init_stainfo(psta);
+
+ _rtw_init_listhead(&(pstapriv->sta_hash[i]));
+
+ rtw_list_insert_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
+
+ psta++;
+ }
+
+#ifdef CONFIG_88EU_AP_MODE
+
+ pstapriv->sta_dz_bitmap = 0;
+ pstapriv->tim_bitmap = 0;
+
+ _rtw_init_listhead(&pstapriv->asoc_list);
+ _rtw_init_listhead(&pstapriv->auth_list);
+ _rtw_spinlock_init(&pstapriv->asoc_list_lock);
+ _rtw_spinlock_init(&pstapriv->auth_list_lock);
+ pstapriv->asoc_list_cnt = 0;
+ pstapriv->auth_list_cnt = 0;
+
+ pstapriv->auth_to = 3; /* 3*2 = 6 sec */
+ pstapriv->assoc_to = 3;
+ pstapriv->expire_to = 3; /* 3*2 = 6 sec */
+ pstapriv->max_num_sta = NUM_STA;
+#endif
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+inline int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta)
+{
+ int offset = (((u8 *)sta) - stapriv->pstainfo_buf)/sizeof(struct sta_info);
+
+ if (!stainfo_offset_valid(offset))
+ DBG_88E("%s invalid offset(%d), out of range!!!", __func__, offset);
+
+ return offset;
+}
+
+inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset)
+{
+ if (!stainfo_offset_valid(offset))
+ DBG_88E("%s invalid offset(%d), out of range!!!", __func__, offset);
+
+ return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
+}
+
+void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv);
+void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv)
+{
+_func_enter_;
+
+ _rtw_spinlock_free(&psta_xmitpriv->lock);
+
+ _rtw_spinlock_free(&(psta_xmitpriv->be_q.sta_pending.lock));
+ _rtw_spinlock_free(&(psta_xmitpriv->bk_q.sta_pending.lock));
+ _rtw_spinlock_free(&(psta_xmitpriv->vi_q.sta_pending.lock));
+ _rtw_spinlock_free(&(psta_xmitpriv->vo_q.sta_pending.lock));
+_func_exit_;
+}
+
+static void _rtw_free_sta_recv_priv_lock(struct sta_recv_priv *psta_recvpriv)
+{
+_func_enter_;
+
+ _rtw_spinlock_free(&psta_recvpriv->lock);
+
+ _rtw_spinlock_free(&(psta_recvpriv->defrag_q.lock));
+
+_func_exit_;
+}
+
+void rtw_mfree_stainfo(struct sta_info *psta);
+void rtw_mfree_stainfo(struct sta_info *psta)
+{
+_func_enter_;
+
+ if (&psta->lock != NULL)
+ _rtw_spinlock_free(&psta->lock);
+
+ _rtw_free_sta_xmit_priv_lock(&psta->sta_xmitpriv);
+ _rtw_free_sta_recv_priv_lock(&psta->sta_recvpriv);
+
+_func_exit_;
+}
+
+/* this function is used to free the memory of lock || sema for all stainfos */
+void rtw_mfree_all_stainfo(struct sta_priv *pstapriv);
+void rtw_mfree_all_stainfo(struct sta_priv *pstapriv)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ struct sta_info *psta = NULL;
+
+_func_enter_;
+
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+ phead = get_list_head(&pstapriv->free_sta_queue);
+ plist = get_next(phead);
+
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info , list);
+ plist = get_next(plist);
+
+ rtw_mfree_stainfo(psta);
+ }
+
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+_func_exit_;
+}
+
+static void rtw_mfree_sta_priv_lock(struct sta_priv *pstapriv)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+#endif
+
+ rtw_mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
+
+ _rtw_spinlock_free(&pstapriv->free_sta_queue.lock);
+
+ _rtw_spinlock_free(&pstapriv->sta_hash_lock);
+ _rtw_spinlock_free(&pstapriv->wakeup_q.lock);
+ _rtw_spinlock_free(&pstapriv->sleep_q.lock);
+
+#ifdef CONFIG_88EU_AP_MODE
+ _rtw_spinlock_free(&pstapriv->asoc_list_lock);
+ _rtw_spinlock_free(&pstapriv->auth_list_lock);
+ _rtw_spinlock_free(&pacl_list->acl_node_q.lock);
+#endif
+}
+
+u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
+{
+ unsigned long irql;
+ struct list_head *phead, *plist;
+ struct sta_info *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ int index;
+
+_func_enter_;
+ if (pstapriv) {
+ /* delete all reordering_ctrl_timer */
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &(pstapriv->sta_hash[index]);
+ plist = get_next(phead);
+
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ int i;
+ psta = LIST_CONTAINOR(plist, struct sta_info , hash_list);
+ plist = get_next(plist);
+
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ }
+ }
+ }
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ /*===============================*/
+
+ rtw_mfree_sta_priv_lock(pstapriv);
+
+ if (pstapriv->pallocated_stainfo_buf) {
+ rtw_vmfree(pstapriv->pallocated_stainfo_buf, sizeof(struct sta_info)*NUM_STA+4);
+ }
+ }
+
+_func_exit_;
+ return _SUCCESS;
+}
+
+struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
+{
+ unsigned long irql, irql2;
+ s32 index;
+ struct list_head *phash_list;
+ struct sta_info *psta;
+ struct __queue *pfree_sta_queue;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ int i = 0;
+ u16 wRxSeqInitialValue = 0xffff;
+
+_func_enter_;
+
+ pfree_sta_queue = &pstapriv->free_sta_queue;
+
+ _enter_critical_bh(&(pfree_sta_queue->lock), &irql);
+
+ if (_rtw_queue_empty(pfree_sta_queue) == true) {
+ _exit_critical_bh(&(pfree_sta_queue->lock), &irql);
+ psta = NULL;
+ } else {
+ psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list);
+ rtw_list_delete(&(psta->list));
+ _exit_critical_bh(&(pfree_sta_queue->lock), &irql);
+ _rtw_init_stainfo(psta);
+ memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
+ index = wifi_mac_hash(hwaddr);
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_, ("rtw_alloc_stainfo: index=%x", index));
+ if (index >= NUM_STA) {
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("ERROR => rtw_alloc_stainfo: index >= NUM_STA"));
+ psta = NULL;
+ goto exit;
+ }
+ phash_list = &(pstapriv->sta_hash[index]);
+
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+
+ rtw_list_insert_tail(&psta->hash_list, phash_list);
+
+ pstapriv->asoc_sta_count++ ;
+
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+
+/* Commented by Albert 2009/08/13 */
+/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
+/* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */
+/* So, we initialize the tid_rxseq variable as the 0xffff. */
+
+ for (i = 0; i < 16; i++)
+ memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2);
+
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("alloc number_%d stainfo with hwaddr = %pM\n",
+ pstapriv->asoc_sta_count , hwaddr));
+
+ init_addba_retry_timer(pstapriv->padapter, psta);
+
+ /* for A-MPDU Rx reordering buffer control */
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+
+ preorder_ctrl->padapter = pstapriv->padapter;
+
+ preorder_ctrl->enable = false;
+
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* 64; */
+
+ _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
+
+ rtw_init_recv_timer(preorder_ctrl);
+ }
+
+ /* init for DM */
+ psta->rssi_stat.UndecoratedSmoothedPWDB = (-1);
+ psta->rssi_stat.UndecoratedSmoothedCCK = (-1);
+
+ /* init for the sequence number of received management frame */
+ psta->RxMgmtFrameSeqNum = 0xffff;
+ }
+
+exit:
+
+_func_exit_;
+
+ return psta;
+}
+
+/* using pstapriv->sta_hash_lock to protect */
+u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta)
+{
+ int i;
+ unsigned long irql0;
+ struct __queue *pfree_sta_queue;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_xmit_priv *pstaxmitpriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+_func_enter_;
+
+ if (psta == NULL)
+ goto exit;
+
+ pfree_sta_queue = &pstapriv->free_sta_queue;
+
+ pstaxmitpriv = &psta->sta_xmitpriv;
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql0);
+
+ rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
+ psta->sleepq_len = 0;
+
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
+
+ rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending));
+
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
+
+ rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending));
+
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
+
+ rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending));
+
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
+
+ rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+
+ rtw_list_delete(&psta->hash_list);
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("\n free number_%d stainfo with hwaddr=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", pstapriv->asoc_sta_count , psta->hwaddr[0], psta->hwaddr[1], psta->hwaddr[2], psta->hwaddr[3], psta->hwaddr[4], psta->hwaddr[5]));
+ pstapriv->asoc_sta_count--;
+
+ /* re-init sta_info; 20061114 */
+ _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
+ _rtw_init_sta_recv_priv(&psta->sta_recvpriv);
+
+ _cancel_timer_ex(&psta->addba_retry_timer);
+
+ /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
+ for (i = 0; i < 16 ; i++) {
+ unsigned long irql;
+ struct list_head *phead, *plist;
+ union recv_frame *prframe;
+ struct __queue *ppending_recvframe_queue;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+
+ _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+
+ ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = get_next(phead);
+
+ while (!rtw_is_list_empty(phead)) {
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+
+ plist = get_next(plist);
+
+ rtw_list_delete(&(prframe->u.hdr.list));
+
+ rtw_free_recvframe(prframe, pfree_recv_queue);
+ }
+
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ }
+
+ if (!(psta->state & WIFI_AP_STATE))
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, false);
+
+#ifdef CONFIG_88EU_AP_MODE
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irql0);
+ if (!rtw_is_list_empty(&psta->auth_list)) {
+ rtw_list_delete(&psta->auth_list);
+ pstapriv->auth_list_cnt--;
+ }
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irql0);
+
+ psta->expire_to = 0;
+
+ psta->sleepq_ac_len = 0;
+ psta->qos_info = 0;
+
+ psta->max_sp_len = 0;
+ psta->uapsd_bk = 0;
+ psta->uapsd_be = 0;
+ psta->uapsd_vi = 0;
+ psta->uapsd_vo = 0;
+ psta->has_legacy_ac = 0;
+
+ pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ if ((psta->aid > 0) && (pstapriv->sta_aid[psta->aid - 1] == psta)) {
+ pstapriv->sta_aid[psta->aid - 1] = NULL;
+ psta->aid = 0;
+ }
+
+ psta->under_exist_checking = 0;
+
+#endif /* CONFIG_88EU_AP_MODE */
+
+ _enter_critical_bh(&(pfree_sta_queue->lock), &irql0);
+ rtw_list_insert_tail(&psta->list, get_list_head(pfree_sta_queue));
+ _exit_critical_bh(&(pfree_sta_queue->lock), &irql0);
+
+exit:
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+/* free all stainfo which in sta_hash[all] */
+void rtw_free_all_stainfo(struct adapter *padapter)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ s32 index;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
+
+_func_enter_;
+
+ if (pstapriv->asoc_sta_count == 1)
+ goto exit;
+
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &(pstapriv->sta_hash[index]);
+ plist = get_next(phead);
+
+ while ((!rtw_end_of_queue_search(phead, plist))) {
+ psta = LIST_CONTAINOR(plist, struct sta_info , hash_list);
+
+ plist = get_next(plist);
+
+ if (pbcmc_stainfo != psta)
+ rtw_free_stainfo(padapter , psta);
+ }
+ }
+
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+exit:
+
+_func_exit_;
+}
+
+/* any station allocated can be searched by hash list */
+struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ struct sta_info *psta = NULL;
+ u32 index;
+ u8 *addr;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+_func_enter_;
+
+ if (hwaddr == NULL)
+ return NULL;
+
+ if (IS_MCAST(hwaddr))
+ addr = bc_addr;
+ else
+ addr = hwaddr;
+
+ index = wifi_mac_hash(addr);
+
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+ phead = &(pstapriv->sta_hash[index]);
+ plist = get_next(phead);
+
+ while ((!rtw_end_of_queue_search(phead, plist))) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ if ((_rtw_memcmp(psta->hwaddr, addr, ETH_ALEN)) == true) {
+ /* if found the matched address */
+ break;
+ }
+ psta = NULL;
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+_func_exit_;
+ return psta;
+}
+
+u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
+{
+ struct sta_info *psta;
+ u32 res = _SUCCESS;
+ unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+_func_enter_;
+
+ psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
+
+ if (psta == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("rtw_alloc_stainfo fail"));
+ goto exit;
+ }
+
+ /* default broadcast & multicast use macid 1 */
+ psta->mac_id = 1;
+
+exit:
+_func_exit_;
+ return res;
+}
+
+struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
+{
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+_func_enter_;
+ psta = rtw_get_stainfo(pstapriv, bc_addr);
+_func_exit_;
+ return psta;
+}
+
+u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
+{
+ u8 res = true;
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ struct rtw_wlan_acl_node *paclnode;
+ u8 match = false;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ _enter_critical_bh(&(pacl_node_q->lock), &irql);
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+ while ((!rtw_end_of_queue_search(phead, plist))) {
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (_rtw_memcmp(paclnode->addr, mac_addr, ETH_ALEN)) {
+ if (paclnode->valid) {
+ match = true;
+ break;
+ }
+ }
+ }
+ _exit_critical_bh(&(pacl_node_q->lock), &irql);
+
+ if (pacl_list->mode == 1)/* accept unless in deny list */
+ res = (match) ? false : true;
+ else if (pacl_list->mode == 2)/* deny unless in accept list */
+ res = (match) ? true : false;
+ else
+ res = true;
+
+#endif
+
+ return res;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
new file mode 100644
index 0000000..8018edd
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -0,0 +1,1689 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_WLAN_UTIL_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+
+static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f};
+static unsigned char ARTHEROS_OUI2[] = {0x00, 0x13, 0x74};
+
+static unsigned char BROADCOM_OUI1[] = {0x00, 0x10, 0x18};
+static unsigned char BROADCOM_OUI2[] = {0x00, 0x0a, 0xf7};
+
+static unsigned char CISCO_OUI[] = {0x00, 0x40, 0x96};
+static unsigned char MARVELL_OUI[] = {0x00, 0x50, 0x43};
+static unsigned char RALINK_OUI[] = {0x00, 0x0c, 0x43};
+static unsigned char REALTEK_OUI[] = {0x00, 0xe0, 0x4c};
+static unsigned char AIRGOCAP_OUI[] = {0x00, 0x0a, 0xf5};
+static unsigned char EPIGRAM_OUI[] = {0x00, 0x90, 0x4c};
+
+unsigned char REALTEK_96B_IE[] = {0x00, 0xe0, 0x4c, 0x02, 0x01, 0x20};
+
+#define R2T_PHY_DELAY (0)
+
+/* define WAIT_FOR_BCN_TO_M (3000) */
+#define WAIT_FOR_BCN_TO_MIN (6000)
+#define WAIT_FOR_BCN_TO_MAX (20000)
+
+static u8 rtw_basic_rate_cck[4] = {
+ IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK
+};
+
+static u8 rtw_basic_rate_ofdm[3] = {
+ IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+};
+
+static u8 rtw_basic_rate_mix[7] = {
+ IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+};
+
+int cckrates_included(unsigned char *rate, int ratelen)
+{
+ int i;
+
+ for (i = 0; i < ratelen; i++) {
+ if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
+ (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ return true;
+ }
+ return false;
+}
+
+int cckratesonly_included(unsigned char *rate, int ratelen)
+{
+ int i;
+
+ for (i = 0; i < ratelen; i++) {
+ if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
+ (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ return false;
+ }
+
+ return true;
+}
+
+unsigned char networktype_to_raid(unsigned char network_type)
+{
+ unsigned char raid;
+
+ switch (network_type) {
+ case WIRELESS_11B:
+ raid = RATR_INX_WIRELESS_B;
+ break;
+ case WIRELESS_11A:
+ case WIRELESS_11G:
+ raid = RATR_INX_WIRELESS_G;
+ break;
+ case WIRELESS_11BG:
+ raid = RATR_INX_WIRELESS_GB;
+ break;
+ case WIRELESS_11_24N:
+ case WIRELESS_11_5N:
+ raid = RATR_INX_WIRELESS_N;
+ break;
+ case WIRELESS_11A_5N:
+ case WIRELESS_11G_24N:
+ raid = RATR_INX_WIRELESS_NG;
+ break;
+ case WIRELESS_11BG_24N:
+ raid = RATR_INX_WIRELESS_NGB;
+ break;
+ default:
+ raid = RATR_INX_WIRELESS_GB;
+ break;
+ }
+ return raid;
+}
+
+u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int ratelen)
+{
+ u8 network_type = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmeext->cur_channel > 14) {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_5N;
+
+ network_type |= WIRELESS_11A;
+ } else {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_24N;
+
+ if ((cckratesonly_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11B;
+ else if ((cckrates_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11BG;
+ else
+ network_type |= WIRELESS_11G;
+ }
+ return network_type;
+}
+
+static unsigned char ratetbl_val_2wifirate(unsigned char rate)
+{
+ unsigned char val = 0;
+
+ switch (rate & 0x7f) {
+ case 0:
+ val = IEEE80211_CCK_RATE_1MB;
+ break;
+ case 1:
+ val = IEEE80211_CCK_RATE_2MB;
+ break;
+ case 2:
+ val = IEEE80211_CCK_RATE_5MB;
+ break;
+ case 3:
+ val = IEEE80211_CCK_RATE_11MB;
+ break;
+ case 4:
+ val = IEEE80211_OFDM_RATE_6MB;
+ break;
+ case 5:
+ val = IEEE80211_OFDM_RATE_9MB;
+ break;
+ case 6:
+ val = IEEE80211_OFDM_RATE_12MB;
+ break;
+ case 7:
+ val = IEEE80211_OFDM_RATE_18MB;
+ break;
+ case 8:
+ val = IEEE80211_OFDM_RATE_24MB;
+ break;
+ case 9:
+ val = IEEE80211_OFDM_RATE_36MB;
+ break;
+ case 10:
+ val = IEEE80211_OFDM_RATE_48MB;
+ break;
+ case 11:
+ val = IEEE80211_OFDM_RATE_54MB;
+ break;
+ }
+ return val;
+}
+
+static int is_basicrate(struct adapter *padapter, unsigned char rate)
+{
+ int i;
+ unsigned char val;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ for (i = 0; i < NumRates; i++) {
+ val = pmlmeext->basicrate[i];
+
+ if ((val != 0xff) && (val != 0xfe)) {
+ if (rate == ratetbl_val_2wifirate(val))
+ return true;
+ }
+ }
+ return false;
+}
+
+static unsigned int ratetbl2rateset(struct adapter *padapter, unsigned char *rateset)
+{
+ int i;
+ unsigned char rate;
+ unsigned int len = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ for (i = 0; i < NumRates; i++) {
+ rate = pmlmeext->datarate[i];
+
+ switch (rate) {
+ case 0xff:
+ return len;
+ case 0xfe:
+ continue;
+ default:
+ rate = ratetbl_val_2wifirate(rate);
+
+ if (is_basicrate(padapter, rate) == true)
+ rate |= IEEE80211_BASIC_RATE_MASK;
+
+ rateset[len] = rate;
+ len++;
+ break;
+ }
+ }
+ return len;
+}
+
+void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *bssrate_len)
+{
+ unsigned char supportedrates[NumRates];
+
+ _rtw_memset(supportedrates, 0, NumRates);
+ *bssrate_len = ratetbl2rateset(padapter, supportedrates);
+ memcpy(pbssrate, supportedrates, *bssrate_len);
+}
+
+void UpdateBrateTbl(struct adapter *Adapter, u8 *mbrate)
+{
+ u8 i;
+ u8 rate;
+
+ /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ rate = mbrate[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ case IEEE80211_OFDM_RATE_6MB:
+ case IEEE80211_OFDM_RATE_12MB:
+ case IEEE80211_OFDM_RATE_24MB:
+ mbrate[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+}
+
+void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
+{
+ u8 i;
+ u8 rate;
+
+ for (i = 0; i < bssratelen; i++) {
+ rate = bssrateset[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+}
+
+void Save_DM_Func_Flag(struct adapter *padapter)
+{
+ u8 saveflag = true;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
+}
+
+void Restore_DM_Func_Flag(struct adapter *padapter)
+{
+ u8 saveflag = false;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
+}
+
+void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable)
+{
+ if (enable)
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode));
+ else
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
+}
+
+static void Set_NETYPE0_MSR(struct adapter *padapter, u8 type)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_MEDIA_STATUS, (u8 *)(&type));
+}
+
+void Set_MSR(struct adapter *padapter, u8 type)
+{
+ Set_NETYPE0_MSR(padapter, type);
+}
+
+inline u8 rtw_get_oper_ch(struct adapter *adapter)
+{
+ return adapter->mlmeextpriv.oper_channel;
+}
+
+inline void rtw_set_oper_ch(struct adapter *adapter, u8 ch)
+{
+ adapter->mlmeextpriv.oper_channel = ch;
+}
+
+inline u8 rtw_get_oper_bw(struct adapter *adapter)
+{
+ return adapter->mlmeextpriv.oper_bwmode;
+}
+
+inline void rtw_set_oper_bw(struct adapter *adapter, u8 bw)
+{
+ adapter->mlmeextpriv.oper_bwmode = bw;
+}
+
+inline u8 rtw_get_oper_choffset(struct adapter *adapter)
+{
+ return adapter->mlmeextpriv.oper_ch_offset;
+}
+
+inline void rtw_set_oper_choffset(struct adapter *adapter, u8 offset)
+{
+ adapter->mlmeextpriv.oper_ch_offset = offset;
+}
+
+void SelectChannel(struct adapter *padapter, unsigned char channel)
+{
+ /* saved channel info */
+ rtw_set_oper_ch(padapter, channel);
+ rtw_hal_set_chan(padapter, channel);
+}
+
+void SetBWMode(struct adapter *padapter, unsigned short bwmode,
+ unsigned char channel_offset)
+{
+ /* saved bw info */
+ rtw_set_oper_bw(padapter, bwmode);
+ rtw_set_oper_choffset(padapter, channel_offset);
+
+ rtw_hal_set_bwmode(padapter, (enum ht_channel_width)bwmode, channel_offset);
+}
+
+void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode)
+{
+ u8 center_ch;
+
+ if (padapter->bNotifyChannelChange)
+ DBG_88E("[%s] ch = %d, offset = %d, bwmode = %d\n", __func__, channel, channel_offset, bwmode);
+
+ if ((bwmode == HT_CHANNEL_WIDTH_20) ||
+ (channel_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)) {
+ /* SelectChannel(padapter, channel); */
+ center_ch = channel;
+ } else {
+ /* switch to the proper channel */
+ if (channel_offset == HAL_PRIME_CHNL_OFFSET_LOWER) {
+ /* SelectChannel(padapter, channel + 2); */
+ center_ch = channel + 2;
+ } else {
+ /* SelectChannel(padapter, channel - 2); */
+ center_ch = channel - 2;
+ }
+ }
+
+ /* set Channel */
+ /* saved channel/bw info */
+ rtw_set_oper_ch(padapter, channel);
+ rtw_set_oper_bw(padapter, bwmode);
+ rtw_set_oper_choffset(padapter, channel_offset);
+
+ rtw_hal_set_chan(padapter, center_ch); /* set center channel */
+ SetBWMode(padapter, bwmode, channel_offset);
+}
+
+int get_bsstype(unsigned short capability)
+{
+ if (capability & BIT(0))
+ return WIFI_FW_AP_STATE;
+ else if (capability & BIT(1))
+ return WIFI_FW_ADHOC_STATE;
+ else
+ return 0;
+}
+
+__inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
+{
+ return pnetwork->MacAddress;
+}
+
+u16 get_beacon_interval(struct wlan_bssid_ex *bss)
+{
+ __le16 val;
+ memcpy((unsigned char *)&val, rtw_get_beacon_interval_from_ie(bss->IEs), 2);
+
+ return le16_to_cpu(val);
+}
+
+int is_client_associated_to_ap(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+ if (!padapter)
+ return _FAIL;
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE))
+ return true;
+ else
+ return _FAIL;
+}
+
+int is_client_associated_to_ibss(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE))
+ return true;
+ else
+ return _FAIL;
+}
+
+int is_IBSS_empty(struct adapter *padapter)
+{
+ unsigned int i;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
+ if (pmlmeinfo->FW_sta_info[i].status == 1)
+ return _FAIL;
+ }
+ return true;
+}
+
+unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval)
+{
+ if ((bcn_interval << 2) < WAIT_FOR_BCN_TO_MIN)
+ return WAIT_FOR_BCN_TO_MIN;
+ else if ((bcn_interval << 2) > WAIT_FOR_BCN_TO_MAX)
+ return WAIT_FOR_BCN_TO_MAX;
+ else
+ return bcn_interval << 2;
+}
+
+void CAM_empty_entry(struct adapter *Adapter, u8 ucIndex)
+{
+ rtw_hal_set_hwreg(Adapter, HW_VAR_CAM_EMPTY_ENTRY, (u8 *)(&ucIndex));
+}
+
+void invalidate_cam_all(struct adapter *padapter)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
+}
+
+void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key)
+{
+ unsigned int i, val, addr;
+ int j;
+ u32 cam_val[2];
+
+ addr = entry << 3;
+
+ for (j = 5; j >= 0; j--) {
+ switch (j) {
+ case 0:
+ val = (ctrl | (mac[0] << 16) | (mac[1] << 24));
+ break;
+ case 1:
+ val = (mac[2] | (mac[3] << 8) | (mac[4] << 16) | (mac[5] << 24));
+ break;
+ default:
+ i = (j - 2) << 2;
+ val = (key[i] | (key[i+1] << 8) | (key[i+2] << 16) | (key[i+3] << 24));
+ break;
+ }
+
+ cam_val[0] = val;
+ cam_val[1] = addr + (unsigned int)j;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_CAM_WRITE, (u8 *)cam_val);
+ }
+}
+
+void clear_cam_entry(struct adapter *padapter, u8 entry)
+{
+ unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ unsigned char null_key[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ write_cam(padapter, entry, 0, null_sta, null_key);
+}
+
+int allocate_fw_sta_entry(struct adapter *padapter)
+{
+ unsigned int mac_id;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ for (mac_id = IBSS_START_MAC_ID; mac_id < NUM_STA; mac_id++) {
+ if (pmlmeinfo->FW_sta_info[mac_id].status == 0) {
+ pmlmeinfo->FW_sta_info[mac_id].status = 1;
+ pmlmeinfo->FW_sta_info[mac_id].retry = 0;
+ break;
+ }
+ }
+
+ return mac_id;
+}
+
+void flush_all_cam_entry(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
+
+ _rtw_memset((u8 *)(pmlmeinfo->FW_sta_info), 0, sizeof(pmlmeinfo->FW_sta_info));
+}
+
+int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ /* struct registry_priv *pregpriv = &padapter->registrypriv; */
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmepriv->qospriv.qos_option == 0) {
+ pmlmeinfo->WMM_enable = 0;
+ return _FAIL;
+ }
+
+ pmlmeinfo->WMM_enable = 1;
+ memcpy(&(pmlmeinfo->WMM_param), (pIE->data + 6), sizeof(struct WMM_para_element));
+ return true;
+}
+
+void WMMOnAssocRsp(struct adapter *padapter)
+{
+ u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
+ u8 acm_mask;
+ u16 TXOP;
+ u32 acParm, i;
+ u32 edca[4], inx[4];
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ if (pmlmeinfo->WMM_enable == 0) {
+ padapter->mlmepriv.acm_mask = 0;
+ return;
+ }
+
+ acm_mask = 0;
+
+ if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
+ aSifsTime = 10;
+ else
+ aSifsTime = 16;
+
+ for (i = 0; i < 4; i++) {
+ ACI = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 5) & 0x03;
+ ACM = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 4) & 0x01;
+
+ /* AIFS = AIFSN * slot time + SIFS - r2t phy delay */
+ AIFS = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN & 0x0f) * pmlmeinfo->slotTime + aSifsTime;
+
+ ECWMin = (pmlmeinfo->WMM_param.ac_param[i].CW & 0x0f);
+ ECWMax = (pmlmeinfo->WMM_param.ac_param[i].CW & 0xf0) >> 4;
+ TXOP = le16_to_cpu(pmlmeinfo->WMM_param.ac_param[i].TXOP_limit);
+
+ acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16);
+
+ switch (ACI) {
+ case 0x0:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
+ acm_mask |= (ACM ? BIT(1) : 0);
+ edca[XMIT_BE_QUEUE] = acParm;
+ break;
+ case 0x1:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acParm));
+ edca[XMIT_BK_QUEUE] = acParm;
+ break;
+ case 0x2:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acParm));
+ acm_mask |= (ACM ? BIT(2) : 0);
+ edca[XMIT_VI_QUEUE] = acParm;
+ break;
+ case 0x3:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acParm));
+ acm_mask |= (ACM ? BIT(3) : 0);
+ edca[XMIT_VO_QUEUE] = acParm;
+ break;
+ }
+
+ DBG_88E("WMM(%x): %x, %x\n", ACI, ACM, acParm);
+ }
+
+ if (padapter->registrypriv.acm_method == 1)
+ rtw_hal_set_hwreg(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask));
+ else
+ padapter->mlmepriv.acm_mask = acm_mask;
+
+ inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
+
+ if (pregpriv->wifi_spec == 1) {
+ u32 j, tmp, change_inx = false;
+
+ /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
+ for (i = 0; i < 4; i++) {
+ for (j = i+1; j < 4; j++) {
+ /* compare CW and AIFS */
+ if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) {
+ change_inx = true;
+ } else if ((edca[j] & 0xFFFF) == (edca[i] & 0xFFFF)) {
+ /* compare TXOP */
+ if ((edca[j] >> 16) > (edca[i] >> 16))
+ change_inx = true;
+ }
+
+ if (change_inx) {
+ tmp = edca[i];
+ edca[i] = edca[j];
+ edca[j] = tmp;
+
+ tmp = inx[i];
+ inx[i] = inx[j];
+ inx[j] = tmp;
+
+ change_inx = false;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ pxmitpriv->wmm_para_seq[i] = inx[i];
+ DBG_88E("wmm_para_seq(%d): %d\n", i, pxmitpriv->wmm_para_seq[i]);
+ }
+
+ return;
+}
+
+static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ unsigned char new_bwmode;
+ unsigned char new_ch_offset;
+ struct HT_info_element *pHT_info;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (!pIE)
+ return;
+
+ if (!phtpriv)
+ return;
+
+ if (pIE->Length > sizeof(struct HT_info_element))
+ return;
+
+ pHT_info = (struct HT_info_element *)pIE->data;
+
+ if ((pHT_info->infos[0] & BIT(2)) && pregistrypriv->cbw40_enable) {
+ new_bwmode = HT_CHANNEL_WIDTH_40;
+
+ switch (pHT_info->infos[0] & 0x3) {
+ case 1:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case 3:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ } else {
+ new_bwmode = HT_CHANNEL_WIDTH_20;
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ if ((new_bwmode != pmlmeext->cur_bwmode) ||
+ (new_ch_offset != pmlmeext->cur_ch_offset)) {
+ pmlmeinfo->bwmode_updated = true;
+
+ pmlmeext->cur_bwmode = new_bwmode;
+ pmlmeext->cur_ch_offset = new_ch_offset;
+
+ /* update HT info also */
+ HT_info_handler(padapter, pIE);
+ } else {
+ pmlmeinfo->bwmode_updated = false;
+ }
+
+ if (pmlmeinfo->bwmode_updated) {
+ struct sta_info *psta;
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
+
+ /* update ap's stainfo */
+ psta = rtw_get_stainfo(pstapriv, cur_network->MacAddress);
+ if (psta) {
+ struct ht_priv *phtpriv_sta = &psta->htpriv;
+
+ if (phtpriv_sta->ht_option) {
+ /* bwmode */
+ phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
+ phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
+ } else {
+ phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
+ phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+ }
+ }
+}
+
+void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ unsigned int i;
+ u8 rf_type;
+ u8 max_AMPDU_len, min_MPDU_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (pIE == NULL)
+ return;
+
+ if (!phtpriv->ht_option)
+ return;
+
+ pmlmeinfo->HT_caps_enable = 1;
+
+ for (i = 0; i < (pIE->Length); i++) {
+ if (i != 2) {
+ /* Got the endian issue here. */
+ pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]);
+ } else {
+ /* modify from fw by Thomas 2010/11/17 */
+ if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3) > (pIE->data[i] & 0x3))
+ max_AMPDU_len = (pIE->data[i] & 0x3);
+ else
+ max_AMPDU_len = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3);
+
+ if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) > (pIE->data[i] & 0x1c))
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c);
+ else
+ min_MPDU_spacing = (pIE->data[i] & 0x1c);
+
+ pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing;
+ }
+ }
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ /* update the MCS rates */
+ for (i = 0; i < 16; i++) {
+ if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
+ else
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
+ }
+ return;
+}
+
+void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (pIE == NULL)
+ return;
+
+ if (!phtpriv->ht_option)
+ return;
+
+ if (pIE->Length > sizeof(struct HT_info_element))
+ return;
+
+ pmlmeinfo->HT_info_enable = 1;
+ memcpy(&(pmlmeinfo->HT_info), pIE->data, pIE->Length);
+ return;
+}
+
+void HTOnAssocRsp(struct adapter *padapter)
+{
+ unsigned char max_AMPDU_len;
+ unsigned char min_MPDU_spacing;
+ /* struct registry_priv *pregpriv = &padapter->registrypriv; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_88E("%s\n", __func__);
+
+ if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable)) {
+ pmlmeinfo->HT_enable = 1;
+ } else {
+ pmlmeinfo->HT_enable = 0;
+ return;
+ }
+
+ /* handle A-MPDU parameter field */
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+ max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+}
+
+void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pIE->Length > 1)
+ return;
+
+ pmlmeinfo->ERP_enable = 1;
+ memcpy(&(pmlmeinfo->ERP_IE), pIE->data, pIE->Length);
+}
+
+void VCS_update(struct adapter *padapter, struct sta_info *psta)
+{
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ switch (pregpriv->vrtl_carrier_sense) { /* 0:off 1:on 2:auto */
+ case 0: /* off */
+ psta->rtsen = 0;
+ psta->cts2self = 0;
+ break;
+ case 1: /* on */
+ if (pregpriv->vcs_type == 1) { /* 1:RTS/CTS 2:CTS to self */
+ psta->rtsen = 1;
+ psta->cts2self = 0;
+ } else {
+ psta->rtsen = 0;
+ psta->cts2self = 1;
+ }
+ break;
+ case 2: /* auto */
+ default:
+ if ((pmlmeinfo->ERP_enable) && (pmlmeinfo->ERP_IE & BIT(1))) {
+ if (pregpriv->vcs_type == 1) {
+ psta->rtsen = 1;
+ psta->cts2self = 0;
+ } else {
+ psta->rtsen = 0;
+ psta->cts2self = 1;
+ }
+ } else {
+ psta->rtsen = 0;
+ psta->cts2self = 0;
+ }
+ break;
+ }
+}
+
+int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
+{
+ unsigned int len;
+ unsigned char *p;
+ unsigned short val16, subtype;
+ struct wlan_network *cur_network = &(Adapter->mlmepriv.cur_network);
+ /* u8 wpa_ie[255], rsn_ie[255]; */
+ u16 wpa_len = 0, rsn_len = 0;
+ u8 encryp_protocol = 0;
+ struct wlan_bssid_ex *bssid;
+ int group_cipher = 0, pairwise_cipher = 0, is_8021x = 0;
+ unsigned char *pbuf;
+ u32 wpa_ielen = 0;
+ u8 *pbssid = GetAddr3Ptr(pframe);
+ u32 hidden_ssid = 0;
+ struct HT_info_element *pht_info = NULL;
+ struct rtw_ieee80211_ht_cap *pht_cap = NULL;
+ u32 bcn_channel;
+ unsigned short ht_cap_info;
+ unsigned char ht_info_infos_0;
+
+ if (is_client_associated_to_ap(Adapter) == false)
+ return true;
+
+ len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ if (len > MAX_IE_SZ) {
+ DBG_88E("%s IE too long for survey event\n", __func__);
+ return _FAIL;
+ }
+
+ if (_rtw_memcmp(cur_network->network.MacAddress, pbssid, 6) == false) {
+ DBG_88E("Oops: rtw_check_network_encrypt linked but recv other bssid bcn\n%pM %pM\n",
+ (pbssid), (cur_network->network.MacAddress));
+ return true;
+ }
+
+ bssid = (struct wlan_bssid_ex *)rtw_zmalloc(sizeof(struct wlan_bssid_ex));
+
+ subtype = GetFrameSubType(pframe) >> 4;
+
+ if (subtype == WIFI_BEACON)
+ bssid->Reserved[0] = 1;
+
+ bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
+
+ /* below is to copy the information element */
+ bssid->IELength = len;
+ memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+
+ /* check bw and channel offset */
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
+ ht_cap_info = pht_cap->cap_info;
+ } else {
+ ht_cap_info = 0;
+ }
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_info = (struct HT_info_element *)(p + 2);
+ ht_info_infos_0 = pht_info->infos[0];
+ } else {
+ ht_info_infos_0 = 0;
+ }
+ if (ht_cap_info != cur_network->BcnInfo.ht_cap_info ||
+ ((ht_info_infos_0&0x03) != (cur_network->BcnInfo.ht_info_infos_0&0x03))) {
+ DBG_88E("%s bcn now: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ ht_cap_info, ht_info_infos_0);
+ DBG_88E("%s bcn link: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ cur_network->BcnInfo.ht_cap_info, cur_network->BcnInfo.ht_info_infos_0);
+ DBG_88E("%s bw mode change, disconnect\n", __func__);
+ /* bcn_info_update */
+ cur_network->BcnInfo.ht_cap_info = ht_cap_info;
+ cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
+ /* to do : need to check that whether modify related register of BB or not */
+ /* goto _mismatch; */
+ }
+
+ /* Checking for channel */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p) {
+ bcn_channel = *(p + 2);
+ } else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (pht_info) {
+ bcn_channel = pht_info->primary_channel;
+ } else { /* we don't find channel IE, so don't check it */
+ DBG_88E("Oops: %s we don't find channel IE, so don't check it\n", __func__);
+ bcn_channel = Adapter->mlmeextpriv.cur_channel;
+ }
+ }
+ if (bcn_channel != Adapter->mlmeextpriv.cur_channel) {
+ DBG_88E("%s beacon channel:%d cur channel:%d disconnect\n", __func__,
+ bcn_channel, Adapter->mlmeextpriv.cur_channel);
+ goto _mismatch;
+ }
+
+ /* checking SSID */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p == NULL) {
+ DBG_88E("%s marc: cannot find SSID for survey event\n", __func__);
+ hidden_ssid = true;
+ } else {
+ hidden_ssid = false;
+ }
+
+ if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
+ memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
+ bssid->Ssid.SsidLength = *(p + 1);
+ } else {
+ bssid->Ssid.SsidLength = 0;
+ bssid->Ssid.Ssid[0] = '\0';
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
+ "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
+ bssid->Ssid.SsidLength, cur_network->network.Ssid.Ssid,
+ cur_network->network.Ssid.SsidLength));
+
+ if (!_rtw_memcmp(bssid->Ssid.Ssid, cur_network->network.Ssid.Ssid, 32) ||
+ bssid->Ssid.SsidLength != cur_network->network.Ssid.SsidLength) {
+ if (bssid->Ssid.Ssid[0] != '\0' && bssid->Ssid.SsidLength != 0) { /* not hidden ssid */
+ DBG_88E("%s(), SSID is not match return FAIL\n", __func__);
+ goto _mismatch;
+ }
+ }
+
+ /* check encryption info */
+ val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
+
+ if (val16 & BIT(4))
+ bssid->Privacy = 1;
+ else
+ bssid->Privacy = 0;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s(): cur_network->network.Privacy is %d, bssid.Privacy is %d\n",
+ __func__, cur_network->network.Privacy, bssid->Privacy));
+ if (cur_network->network.Privacy != bssid->Privacy) {
+ DBG_88E("%s(), privacy is not match return FAIL\n", __func__);
+ goto _mismatch;
+ }
+
+ rtw_get_sec_ie(bssid->IEs, bssid->IELength, NULL, &rsn_len, NULL, &wpa_len);
+
+ if (rsn_len > 0) {
+ encryp_protocol = ENCRYP_PROTOCOL_WPA2;
+ } else if (wpa_len > 0) {
+ encryp_protocol = ENCRYP_PROTOCOL_WPA;
+ } else {
+ if (bssid->Privacy)
+ encryp_protocol = ENCRYP_PROTOCOL_WEP;
+ }
+
+ if (cur_network->BcnInfo.encryp_protocol != encryp_protocol) {
+ DBG_88E("%s(): enctyp is not match , return FAIL\n", __func__);
+ goto _mismatch;
+ }
+
+ if (encryp_protocol == ENCRYP_PROTOCOL_WPA || encryp_protocol == ENCRYP_PROTOCOL_WPA2) {
+ pbuf = rtw_get_wpa_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+ if (pbuf && (wpa_ielen > 0)) {
+ if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s pnetwork->pairwise_cipher: %d, group_cipher is %d, is_8021x is %d\n", __func__,
+ pairwise_cipher, group_cipher, is_8021x));
+ }
+ } else {
+ pbuf = rtw_get_wpa2_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s pnetwork->pairwise_cipher: %d, pnetwork->group_cipher is %d, is_802x is %d\n",
+ __func__, pairwise_cipher, group_cipher, is_8021x));
+ }
+ }
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s cur_network->group_cipher is %d: %d\n", __func__, cur_network->BcnInfo.group_cipher, group_cipher));
+ if (pairwise_cipher != cur_network->BcnInfo.pairwise_cipher || group_cipher != cur_network->BcnInfo.group_cipher) {
+ DBG_88E("%s pairwise_cipher(%x:%x) or group_cipher(%x:%x) is not match , return FAIL\n", __func__,
+ pairwise_cipher, cur_network->BcnInfo.pairwise_cipher,
+ group_cipher, cur_network->BcnInfo.group_cipher);
+ goto _mismatch;
+ }
+
+ if (is_8021x != cur_network->BcnInfo.is_8021x) {
+ DBG_88E("%s authentication is not match , return FAIL\n", __func__);
+ goto _mismatch;
+ }
+ }
+
+ kfree(bssid);
+ return _SUCCESS;
+
+_mismatch:
+ kfree(bssid);
+ return _FAIL;
+
+ _func_exit_;
+}
+
+void update_beacon_info(struct adapter *padapter, u8 *pframe, uint pkt_len, struct sta_info *psta)
+{
+ unsigned int i;
+ unsigned int len;
+ struct ndis_802_11_var_ie *pIE;
+
+ len = pkt_len - (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN);
+
+ for (i = 0; i < len;) {
+ pIE = (struct ndis_802_11_var_ie *)(pframe + (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN) + i);
+
+ switch (pIE->ElementID) {
+ case _HT_EXTRA_INFO_IE_: /* HT info */
+ /* HT_info_handler(padapter, pIE); */
+ bwmode_update_check(padapter, pIE);
+ break;
+ case _ERPINFO_IE_:
+ ERP_IE_handler(padapter, pIE);
+ VCS_update(padapter, psta);
+ break;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+}
+
+unsigned int is_ap_in_tkip(struct adapter *padapter)
+{
+ u32 i;
+ struct ndis_802_11_var_ie *pIE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
+ if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4)) && (_rtw_memcmp((pIE->data + 12), WPA_TKIP_CIPHER, 4)))
+ return true;
+ break;
+ case _RSN_IE_2_:
+ if (_rtw_memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4))
+ return true;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+ return false;
+ } else {
+ return false;
+ }
+}
+
+unsigned int should_forbid_n_rate(struct adapter *padapter)
+{
+ u32 i;
+ struct ndis_802_11_var_ie *pIE;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *cur_network = &pmlmepriv->cur_network.network;
+
+ if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < cur_network->IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(cur_network->IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if (_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4) &&
+ ((_rtw_memcmp((pIE->data + 12), WPA_CIPHER_SUITE_CCMP, 4)) ||
+ (_rtw_memcmp((pIE->data + 16), WPA_CIPHER_SUITE_CCMP, 4))))
+ return false;
+ break;
+ case _RSN_IE_2_:
+ if ((_rtw_memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP, 4)) ||
+ (_rtw_memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP, 4)))
+ return false;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+}
+
+unsigned int is_ap_in_wep(struct adapter *padapter)
+{
+ u32 i;
+ struct ndis_802_11_var_ie *pIE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
+ if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if (_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4))
+ return false;
+ break;
+ case _RSN_IE_2_:
+ return false;
+ default:
+ break;
+ }
+ i += (pIE->Length + 2);
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+int wifirate2_ratetbl_inx(unsigned char rate)
+{
+ int inx = 0;
+ rate = rate & 0x7f;
+
+ switch (rate) {
+ case 54*2:
+ inx = 11;
+ break;
+ case 48*2:
+ inx = 10;
+ break;
+ case 36*2:
+ inx = 9;
+ break;
+ case 24*2:
+ inx = 8;
+ break;
+ case 18*2:
+ inx = 7;
+ break;
+ case 12*2:
+ inx = 6;
+ break;
+ case 9*2:
+ inx = 5;
+ break;
+ case 6*2:
+ inx = 4;
+ break;
+ case 11*2:
+ inx = 3;
+ break;
+ case 11:
+ inx = 2;
+ break;
+ case 2*2:
+ inx = 1;
+ break;
+ case 1*2:
+ inx = 0;
+ break;
+ }
+ return inx;
+}
+
+unsigned int update_basic_rate(unsigned char *ptn, unsigned int ptn_sz)
+{
+ unsigned int i, num_of_rate;
+ unsigned int mask = 0;
+
+ num_of_rate = (ptn_sz > NumRates) ? NumRates : ptn_sz;
+
+ for (i = 0; i < num_of_rate; i++) {
+ if ((*(ptn + i)) & 0x80)
+ mask |= 0x1 << wifirate2_ratetbl_inx(*(ptn + i));
+ }
+ return mask;
+}
+
+unsigned int update_supported_rate(unsigned char *ptn, unsigned int ptn_sz)
+{
+ unsigned int i, num_of_rate;
+ unsigned int mask = 0;
+
+ num_of_rate = (ptn_sz > NumRates) ? NumRates : ptn_sz;
+
+ for (i = 0; i < num_of_rate; i++)
+ mask |= 0x1 << wifirate2_ratetbl_inx(*(ptn + i));
+ return mask;
+}
+
+unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps)
+{
+ unsigned int mask = 0;
+
+ mask = ((pHT_caps->u.HT_cap_element.MCS_rate[0] << 12) | (pHT_caps->u.HT_cap_element.MCS_rate[1] << 20));
+
+ return mask;
+}
+
+int support_short_GI(struct adapter *padapter, struct HT_caps_element *pHT_caps)
+{
+ unsigned char bit_offset;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (!(pmlmeinfo->HT_enable))
+ return _FAIL;
+
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK))
+ return _FAIL;
+
+ bit_offset = (pmlmeext->cur_bwmode & HT_CHANNEL_WIDTH_40) ? 6 : 5;
+
+ if (__le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & (0x1 << bit_offset))
+ return _SUCCESS;
+ else
+ return _FAIL;
+}
+
+unsigned char get_highest_rate_idx(u32 mask)
+{
+ int i;
+ unsigned char rate_idx = 0;
+
+ for (i = 27; i >= 0; i--) {
+ if (mask & BIT(i)) {
+ rate_idx = i;
+ break;
+ }
+ }
+ return rate_idx;
+}
+
+void Update_RA_Entry(struct adapter *padapter, u32 mac_id)
+{
+ rtw_hal_update_ra_mask(padapter, mac_id, 0);
+}
+
+static void enable_rate_adaptive(struct adapter *padapter, u32 mac_id)
+{
+ Update_RA_Entry(padapter, mac_id);
+}
+
+void set_sta_rate(struct adapter *padapter, struct sta_info *psta)
+{
+ /* rate adaptive */
+ enable_rate_adaptive(padapter, psta->mac_id);
+}
+
+/* Update RRSR and Rate for USERATE */
+void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
+{
+ unsigned char supported_rates[NDIS_802_11_LENGTH_RATES_EX];
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ /* Added by Albert 2011/03/22 */
+ /* In the P2P mode, the driver should not support the b mode. */
+ /* So, the Tx packet shouldn't use the CCK rate */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+#endif /* CONFIG_88EU_P2P */
+ _rtw_memset(supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ if ((wirelessmode & WIRELESS_11B) && (wirelessmode == WIRELESS_11B)) {
+ memcpy(supported_rates, rtw_basic_rate_cck, 4);
+ } else if (wirelessmode & WIRELESS_11B) {
+ memcpy(supported_rates, rtw_basic_rate_mix, 7);
+ } else {
+ memcpy(supported_rates, rtw_basic_rate_ofdm, 3);
+ }
+
+ if (wirelessmode & WIRELESS_11B)
+ update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
+ else
+ update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, supported_rates);
+}
+
+unsigned char check_assoc_AP(u8 *pframe, uint len)
+{
+ unsigned int i;
+ struct ndis_802_11_var_ie *pIE;
+ u8 epigram_vendor_flag;
+ u8 ralink_vendor_flag;
+ epigram_vendor_flag = 0;
+ ralink_vendor_flag = 0;
+
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < len;) {
+ pIE = (struct ndis_802_11_var_ie *)(pframe + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((_rtw_memcmp(pIE->data, ARTHEROS_OUI1, 3)) ||
+ (_rtw_memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
+ DBG_88E("link to Artheros AP\n");
+ return HT_IOT_PEER_ATHEROS;
+ } else if ((_rtw_memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
+ (_rtw_memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
+ (_rtw_memcmp(pIE->data, BROADCOM_OUI2, 3))) {
+ DBG_88E("link to Broadcom AP\n");
+ return HT_IOT_PEER_BROADCOM;
+ } else if (_rtw_memcmp(pIE->data, MARVELL_OUI, 3)) {
+ DBG_88E("link to Marvell AP\n");
+ return HT_IOT_PEER_MARVELL;
+ } else if (_rtw_memcmp(pIE->data, RALINK_OUI, 3)) {
+ if (!ralink_vendor_flag) {
+ ralink_vendor_flag = 1;
+ } else {
+ DBG_88E("link to Ralink AP\n");
+ return HT_IOT_PEER_RALINK;
+ }
+ } else if (_rtw_memcmp(pIE->data, CISCO_OUI, 3)) {
+ DBG_88E("link to Cisco AP\n");
+ return HT_IOT_PEER_CISCO;
+ } else if (_rtw_memcmp(pIE->data, REALTEK_OUI, 3)) {
+ DBG_88E("link to Realtek 96B\n");
+ return HT_IOT_PEER_REALTEK;
+ } else if (_rtw_memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
+ DBG_88E("link to Airgo Cap\n");
+ return HT_IOT_PEER_AIRGO;
+ } else if (_rtw_memcmp(pIE->data, EPIGRAM_OUI, 3)) {
+ epigram_vendor_flag = 1;
+ if (ralink_vendor_flag) {
+ DBG_88E("link to Tenda W311R AP\n");
+ return HT_IOT_PEER_TENDA;
+ } else {
+ DBG_88E("Capture EPIGRAM_OUI\n");
+ }
+ } else {
+ break;
+ }
+
+ default:
+ break;
+ }
+ i += (pIE->Length + 2);
+ }
+
+ if (ralink_vendor_flag && !epigram_vendor_flag) {
+ DBG_88E("link to Ralink AP\n");
+ return HT_IOT_PEER_RALINK;
+ } else if (ralink_vendor_flag && epigram_vendor_flag) {
+ DBG_88E("link to Tenda W311R AP\n");
+ return HT_IOT_PEER_TENDA;
+ } else {
+ DBG_88E("link to new AP\n");
+ return HT_IOT_PEER_UNKNOWN;
+ }
+}
+
+void update_IOT_info(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ switch (pmlmeinfo->assoc_AP_vendor) {
+ case HT_IOT_PEER_MARVELL:
+ pmlmeinfo->turboMode_cts2self = 1;
+ pmlmeinfo->turboMode_rtsen = 0;
+ break;
+ case HT_IOT_PEER_RALINK:
+ pmlmeinfo->turboMode_cts2self = 0;
+ pmlmeinfo->turboMode_rtsen = 1;
+ /* disable high power */
+ Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
+ break;
+ case HT_IOT_PEER_REALTEK:
+ /* rtw_write16(padapter, 0x4cc, 0xffff); */
+ /* rtw_write16(padapter, 0x546, 0x01c0); */
+ /* disable high power */
+ Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
+ break;
+ default:
+ pmlmeinfo->turboMode_cts2self = 0;
+ pmlmeinfo->turboMode_rtsen = 1;
+ break;
+ }
+}
+
+void update_capinfo(struct adapter *Adapter, u16 updateCap)
+{
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ bool ShortPreamble;
+
+ /* Check preamble mode, 2005.01.06, by rcnjko. */
+ /* Mark to update preamble value forever, 2008.03.18 by lanhsin */
+
+ if (updateCap & cShortPreamble) { /* Short Preamble */
+ if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /* PREAMBLE_LONG or PREAMBLE_AUTO */
+ ShortPreamble = true;
+ pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
+ rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ }
+ } else { /* Long Preamble */
+ if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) { /* PREAMBLE_SHORT or PREAMBLE_AUTO */
+ ShortPreamble = false;
+ pmlmeinfo->preamble_mode = PREAMBLE_LONG;
+ rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ }
+ }
+
+ if (updateCap & cIBSS) {
+ /* Filen: See 802.11-2007 p.91 */
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ } else { /* Filen: See 802.11-2007 p.90 */
+ if (pmlmeext->cur_wireless_mode & (WIRELESS_11G | WIRELESS_11_24N)) {
+ if (updateCap & cShortSlotTime) { /* Short Slot Time */
+ if (pmlmeinfo->slotTime != SHORT_SLOT_TIME)
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ } else { /* Long Slot Time */
+ if (pmlmeinfo->slotTime != NON_SHORT_SLOT_TIME)
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ }
+ } else if (pmlmeext->cur_wireless_mode & (WIRELESS_11A | WIRELESS_11_5N)) {
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ } else {
+ /* B Mode */
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ }
+ }
+
+ rtw_hal_set_hwreg(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime);
+}
+
+void update_wireless_mode(struct adapter *padapter)
+{
+ int ratelen, network_type = 0;
+ u32 SIFS_Timer;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ unsigned char *rate = cur_network->SupportedRates;
+
+ ratelen = rtw_get_rateset_len(cur_network->SupportedRates);
+
+ if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable))
+ pmlmeinfo->HT_enable = 1;
+
+ if (pmlmeext->cur_channel > 14) {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_5N;
+
+ network_type |= WIRELESS_11A;
+ } else {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_24N;
+
+ if ((cckratesonly_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11B;
+ else if ((cckrates_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11BG;
+ else
+ network_type |= WIRELESS_11G;
+ }
+
+ pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode;
+
+ SIFS_Timer = 0x0a0a0808;/* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
+ /* change this value if having IOT issues. */
+
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
+ else
+ update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB);
+}
+
+void update_bmc_sta_support_rate(struct adapter *padapter, u32 mac_id)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B) {
+ /* Only B, B/G, and B/G/N AP could use CCK rate */
+ memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates), rtw_basic_rate_cck, 4);
+ } else {
+ memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates), rtw_basic_rate_ofdm, 3);
+ }
+}
+
+int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_len, int cam_idx)
+{
+ unsigned int ie_len;
+ struct ndis_802_11_var_ie *pIE;
+ int supportRateNum = 0;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+ if (pIE == NULL)
+ return _FAIL;
+
+ memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
+ supportRateNum = ie_len;
+
+ pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+ if (pIE)
+ memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
+
+ return _SUCCESS;
+}
+
+void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
+{
+ struct sta_info *psta;
+ u16 tid;
+ u16 param;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct ADDBA_request *preq = (struct ADDBA_request *)paddba_req;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ psta = rtw_get_stainfo(pstapriv, addr);
+
+ if (psta) {
+ param = le16_to_cpu(preq->BA_para_set);
+ tid = (param>>2)&0x0f;
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq) ? true : false;
+ }
+}
+
+void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
+{
+ u8 *pIE;
+ __le32 *pbuf;
+
+ pIE = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ pbuf = (__le32 *)pIE;
+
+ pmlmeext->TSFValue = le32_to_cpu(*(pbuf+1));
+
+ pmlmeext->TSFValue = pmlmeext->TSFValue << 32;
+
+ pmlmeext->TSFValue |= le32_to_cpu(*pbuf);
+}
+
+void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_CORRECT_TSF, NULL);
+}
+
+void beacon_timing_control(struct adapter *padapter)
+{
+ rtw_hal_bcn_related_reg_setting(padapter);
+}
+
+static struct adapter *pbuddy_padapter;
+
+int rtw_handle_dualmac(struct adapter *adapter, bool init)
+{
+ int status = _SUCCESS;
+
+ if (init) {
+ if (pbuddy_padapter == NULL) {
+ pbuddy_padapter = adapter;
+ DBG_88E("%s(): pbuddy_padapter == NULL, Set pbuddy_padapter\n", __func__);
+ } else {
+ adapter->pbuddy_adapter = pbuddy_padapter;
+ pbuddy_padapter->pbuddy_adapter = adapter;
+ /* clear global value */
+ pbuddy_padapter = NULL;
+ DBG_88E("%s(): pbuddy_padapter exist, Exchange Information\n", __func__);
+ }
+ } else {
+ pbuddy_padapter = NULL;
+ }
+ return status;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
new file mode 100644
index 0000000..bb5cd95
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -0,0 +1,2447 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_XMIT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <osdep_intf.h>
+#include <ip.h>
+#include <usb_ops.h>
+#include <usb_osintf.h>
+
+static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
+static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
+
+static void _init_txservq(struct tx_servq *ptxservq)
+{
+_func_enter_;
+ _rtw_init_listhead(&ptxservq->tx_pending);
+ _rtw_init_queue(&ptxservq->sta_pending);
+ ptxservq->qcnt = 0;
+_func_exit_;
+}
+
+void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
+{
+_func_enter_;
+ _rtw_memset((unsigned char *)psta_xmitpriv, 0, sizeof (struct sta_xmit_priv));
+ _rtw_spinlock_init(&psta_xmitpriv->lock);
+ _init_txservq(&psta_xmitpriv->be_q);
+ _init_txservq(&psta_xmitpriv->bk_q);
+ _init_txservq(&psta_xmitpriv->vi_q);
+ _init_txservq(&psta_xmitpriv->vo_q);
+ _rtw_init_listhead(&psta_xmitpriv->legacy_dz);
+ _rtw_init_listhead(&psta_xmitpriv->apsd);
+
+_func_exit_;
+}
+
+s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
+{
+ int i;
+ struct xmit_buf *pxmitbuf;
+ struct xmit_frame *pxframe;
+ int res = _SUCCESS;
+ u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
+
+_func_enter_;
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
+
+ _rtw_spinlock_init(&pxmitpriv->lock);
+ _rtw_init_sema(&pxmitpriv->xmit_sema, 0);
+ _rtw_init_sema(&pxmitpriv->terminate_xmitthread_sema, 0);
+
+ /*
+ Please insert all the queue initializaiton using _rtw_init_queue below
+ */
+
+ pxmitpriv->adapter = padapter;
+
+ _rtw_init_queue(&pxmitpriv->be_pending);
+ _rtw_init_queue(&pxmitpriv->bk_pending);
+ _rtw_init_queue(&pxmitpriv->vi_pending);
+ _rtw_init_queue(&pxmitpriv->vo_pending);
+ _rtw_init_queue(&pxmitpriv->bm_pending);
+
+ _rtw_init_queue(&pxmitpriv->free_xmit_queue);
+
+ /*
+ Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ and initialize free_xmit_frame below.
+ Please also apply free_txobj to link_up all the xmit_frames...
+ */
+
+ pxmitpriv->pallocated_frame_buf = rtw_zvmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+
+ if (pxmitpriv->pallocated_frame_buf == NULL) {
+ pxmitpriv->pxmit_frame_buf = NULL;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_frame fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+ pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_frame_buf), 4);
+ /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
+ /* ((size_t) (pxmitpriv->pallocated_frame_buf) &3); */
+
+ pxframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
+
+ for (i = 0; i < NR_XMITFRAME; i++) {
+ _rtw_init_listhead(&(pxframe->list));
+
+ pxframe->padapter = padapter;
+ pxframe->frame_tag = NULL_FRAMETAG;
+
+ pxframe->pkt = NULL;
+
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ rtw_list_insert_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
+
+ pxframe++;
+ }
+
+ pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
+
+ pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
+
+ /* init xmit_buf */
+ _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
+ _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
+
+ pxmitpriv->pallocated_xmitbuf = rtw_zvmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
+
+ if (pxmitpriv->pallocated_xmitbuf == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_buf fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmitbuf), 4);
+ /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
+ /* ((size_t) (pxmitpriv->pallocated_xmitbuf) &3); */
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+
+ for (i = 0; i < NR_XMITBUFF; i++) {
+ _rtw_init_listhead(&pxmitbuf->list);
+
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->padapter = padapter;
+ pxmitbuf->ext_tag = false;
+
+ /* Tx buf allocation may fail sometimes, so sleep and retry. */
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ if (res == _FAIL) {
+ rtw_msleep_os(10);
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ if (res == _FAIL) {
+ goto exit;
+ }
+ }
+
+ pxmitbuf->flags = XMIT_VO_QUEUE;
+
+ rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
+ pxmitbuf++;
+ }
+
+ pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
+
+ /* Init xmit extension buff */
+ _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
+
+ pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+
+ if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ _rtw_init_listhead(&pxmitbuf->list);
+
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->padapter = padapter;
+ pxmitbuf->ext_tag = true;
+
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
+ if (res == _FAIL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ pxmitbuf++;
+ }
+
+ pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
+
+ rtw_alloc_hwxmits(padapter);
+ rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+
+ for (i = 0; i < 4; i++)
+ pxmitpriv->wmm_para_seq[i] = i;
+
+ pxmitpriv->txirp_cnt = 1;
+
+ _rtw_init_sema(&(pxmitpriv->tx_retevt), 0);
+
+ /* per AC pending irp */
+ pxmitpriv->beq_cnt = 0;
+ pxmitpriv->bkq_cnt = 0;
+ pxmitpriv->viq_cnt = 0;
+ pxmitpriv->voq_cnt = 0;
+
+ pxmitpriv->ack_tx = false;
+ _rtw_mutex_init(&pxmitpriv->ack_tx_mutex);
+ rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
+
+ rtw_hal_init_xmit_priv(padapter);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void rtw_mfree_xmit_priv_lock (struct xmit_priv *pxmitpriv)
+{
+ _rtw_spinlock_free(&pxmitpriv->lock);
+ _rtw_free_sema(&pxmitpriv->xmit_sema);
+ _rtw_free_sema(&pxmitpriv->terminate_xmitthread_sema);
+
+ _rtw_spinlock_free(&pxmitpriv->be_pending.lock);
+ _rtw_spinlock_free(&pxmitpriv->bk_pending.lock);
+ _rtw_spinlock_free(&pxmitpriv->vi_pending.lock);
+ _rtw_spinlock_free(&pxmitpriv->vo_pending.lock);
+ _rtw_spinlock_free(&pxmitpriv->bm_pending.lock);
+
+ _rtw_spinlock_free(&pxmitpriv->free_xmit_queue.lock);
+ _rtw_spinlock_free(&pxmitpriv->free_xmitbuf_queue.lock);
+ _rtw_spinlock_free(&pxmitpriv->pending_xmitbuf_queue.lock);
+}
+
+void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
+{
+ int i;
+ struct adapter *padapter = pxmitpriv->adapter;
+ struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+ u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
+
+ _func_enter_;
+
+ rtw_hal_free_xmit_priv(padapter);
+
+ rtw_mfree_xmit_priv_lock(pxmitpriv);
+
+ if (pxmitpriv->pxmit_frame_buf == NULL)
+ goto out;
+
+ for (i = 0; i < NR_XMITFRAME; i++) {
+ rtw_os_xmit_complete(padapter, pxmitframe);
+
+ pxmitframe++;
+ }
+
+ for (i = 0; i < NR_XMITBUFF; i++) {
+ rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ pxmitbuf++;
+ }
+
+ if (pxmitpriv->pallocated_frame_buf)
+ rtw_vmfree(pxmitpriv->pallocated_frame_buf, NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+
+ if (pxmitpriv->pallocated_xmitbuf)
+ rtw_vmfree(pxmitpriv->pallocated_xmitbuf, NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
+
+ /* free xmit extension buff */
+ _rtw_spinlock_free(&pxmitpriv->free_xmit_extbuf_queue.lock);
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+ pxmitbuf++;
+ }
+
+ if (pxmitpriv->pallocated_xmit_extbuf) {
+ rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf, num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ }
+
+ rtw_free_hwxmits(padapter);
+
+ _rtw_mutex_free(&pxmitpriv->ack_tx_mutex);
+
+out:
+
+_func_exit_;
+}
+
+static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ u32 sz;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct sta_info *psta = pattrib->psta;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pattrib->nr_frags != 1)
+ sz = padapter->xmitpriv.frag_len;
+ else /* no frag */
+ sz = pattrib->last_txcmdsz;
+
+ /* (1) RTS_Threshold is compared to the MPDU, not MSDU. */
+ /* (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. */
+ /* Other fragments are protected by previous fragment. */
+ /* So we only need to check the length of first fragment. */
+ if (pmlmeext->cur_wireless_mode < WIRELESS_11_24N || padapter->registrypriv.wifi_spec) {
+ if (sz > padapter->registrypriv.rts_thresh) {
+ pattrib->vcs_mode = RTS_CTS;
+ } else {
+ if (psta->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (psta->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+ else
+ pattrib->vcs_mode = NONE_VCS;
+ }
+ } else {
+ while (true) {
+ /* IOT action */
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS) && pattrib->ampdu_en &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+ pattrib->vcs_mode = CTS_TO_SELF;
+ break;
+ }
+
+ /* check ERP protection */
+ if (psta->rtsen || psta->cts2self) {
+ if (psta->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (psta->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+
+ break;
+ }
+
+ /* check HT op mode */
+ if (pattrib->ht_en) {
+ u8 htopmode = pmlmeinfo->HT_protection;
+ if ((pmlmeext->cur_bwmode && (htopmode == 2 || htopmode == 3)) ||
+ (!pmlmeext->cur_bwmode && htopmode == 3)) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+ }
+
+ /* check rts */
+ if (sz > padapter->registrypriv.rts_thresh) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+
+ /* to do list: check MIMO power save condition. */
+
+ /* check AMPDU aggregation for TXOP */
+ if (pattrib->ampdu_en) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+
+ pattrib->vcs_mode = NONE_VCS;
+ break;
+ }
+ }
+}
+
+static void update_attrib_phy_info(struct pkt_attrib *pattrib, struct sta_info *psta)
+{
+ /*if (psta->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (psta->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+ else
+ pattrib->vcs_mode = NONE_VCS;*/
+
+ pattrib->mdata = 0;
+ pattrib->eosp = 0;
+ pattrib->triggered = 0;
+
+ /* qos_en, ht_en, init rate, , bw, ch_offset, sgi */
+ pattrib->qos_en = psta->qos_option;
+
+ pattrib->raid = psta->raid;
+ pattrib->ht_en = psta->htpriv.ht_option;
+ pattrib->bwmode = psta->htpriv.bwmode;
+ pattrib->ch_offset = psta->htpriv.ch_offset;
+ pattrib->sgi = psta->htpriv.sgi;
+ pattrib->ampdu_en = false;
+ pattrib->retry_ctrl = false;
+}
+
+u8 qos_acm(u8 acm_mask, u8 priority)
+{
+ u8 change_priority = priority;
+
+ switch (priority) {
+ case 0:
+ case 3:
+ if (acm_mask & BIT(1))
+ change_priority = 1;
+ break;
+ case 1:
+ case 2:
+ break;
+ case 4:
+ case 5:
+ if (acm_mask & BIT(2))
+ change_priority = 0;
+ break;
+ case 6:
+ case 7:
+ if (acm_mask & BIT(3))
+ change_priority = 5;
+ break;
+ default:
+ DBG_88E("qos_acm(): invalid pattrib->priority: %d!!!\n", priority);
+ break;
+ }
+
+ return change_priority;
+}
+
+static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
+{
+ struct ethhdr etherhdr;
+ struct iphdr ip_hdr;
+ s32 user_prio = 0;
+
+ _rtw_open_pktfile(ppktfile->pkt, ppktfile);
+ _rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
+
+ /* get user_prio from IP hdr */
+ if (pattrib->ether_type == 0x0800) {
+ _rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
+/* user_prio = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
+ user_prio = ip_hdr.tos >> 5;
+ } else if (pattrib->ether_type == 0x888e) {
+ /* "When priority processing of data frames is supported, */
+ /* a STA's SME should send EAPOL-Key frames at the highest priority." */
+ user_prio = 7;
+ }
+
+ pattrib->priority = user_prio;
+ pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
+ pattrib->subtype = WIFI_QOS_DATA_TYPE;
+}
+
+static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib)
+{
+ struct pkt_file pktfile;
+ struct sta_info *psta = NULL;
+ struct ethhdr etherhdr;
+
+ int bmcast;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ int res = _SUCCESS;
+
+ _func_enter_;
+
+ _rtw_open_pktfile(pkt, &pktfile);
+ _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
+
+ pattrib->ether_type = ntohs(etherhdr.h_proto);
+
+ memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN);
+ memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN);
+
+ pattrib->pctrl = 0;
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, get_bssid(pmlmepriv), ETH_ALEN);
+ }
+
+ pattrib->pktlen = pktfile.pkt_len;
+
+ if (ETH_P_IP == pattrib->ether_type) {
+ /* The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time */
+ /* to prevent DHCP protocol fail */
+ u8 tmp[24];
+ _rtw_pktfile_read(&pktfile, &tmp[0], 24);
+ pattrib->dhcp_pkt = 0;
+ if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
+ if (ETH_P_IP == pattrib->ether_type) {/* IP header */
+ if (((tmp[21] == 68) && (tmp[23] == 67)) ||
+ ((tmp[21] == 67) && (tmp[23] == 68))) {
+ /* 68 : UDP BOOTP client */
+ /* 67 : UDP BOOTP server */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====================== update_attrib: get DHCP Packet\n"));
+ /* Use low rate to send DHCP packet. */
+ pattrib->dhcp_pkt = 1;
+ }
+ }
+ }
+ } else if (0x888e == pattrib->ether_type) {
+ DBG_88E_LEVEL(_drv_info_, "send eapol packet\n");
+ }
+
+ if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
+ rtw_set_scan_deny(padapter, 3000);
+
+ /* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */
+ if ((pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
+
+ bmcast = IS_MCAST(pattrib->ra);
+
+ /* get sta_info */
+ if (bmcast) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->ra);
+ if (psta == NULL) { /* if we cannot get psta => drrp the pkt */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra: %pM\n", (pattrib->ra)));
+ res = _FAIL;
+ goto exit;
+ } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) && (!(psta->state & _FW_LINKED))) {
+ res = _FAIL;
+ goto exit;
+ }
+ }
+
+ if (psta) {
+ pattrib->mac_id = psta->mac_id;
+ /* DBG_88E("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */
+ pattrib->psta = psta;
+ } else {
+ /* if we cannot get psta => drop the pkt */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra:%pM\n", (pattrib->ra)));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pattrib->ack_policy = 0;
+ /* get ether_hdr_len */
+ pattrib->pkt_hdrlen = ETH_HLEN;/* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
+
+ pattrib->hdrlen = WLAN_HDR_A3_LEN;
+ pattrib->subtype = WIFI_DATA_TYPE;
+ pattrib->priority = 0;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE|WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
+ if (psta->qos_option)
+ set_qos(&pktfile, pattrib);
+ } else {
+ if (pqospriv->qos_option) {
+ set_qos(&pktfile, pattrib);
+
+ if (pmlmepriv->acm_mask != 0)
+ pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority);
+ }
+ }
+
+ if (psta->ieee8021x_blocked) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\n psta->ieee8021x_blocked == true\n"));
+
+ pattrib->encrypt = 0;
+
+ if ((pattrib->ether_type != 0x888e) && !check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\npsta->ieee8021x_blocked == true, pattrib->ether_type(%.4x) != 0x888e\n", pattrib->ether_type));
+ res = _FAIL;
+ goto exit;
+ }
+ } else {
+ GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
+
+ switch (psecuritypriv->dot11AuthAlgrthm) {
+ case dot11AuthAlgrthm_Open:
+ case dot11AuthAlgrthm_Shared:
+ case dot11AuthAlgrthm_Auto:
+ pattrib->key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex;
+ break;
+ case dot11AuthAlgrthm_8021X:
+ if (bmcast)
+ pattrib->key_idx = (u8)psecuritypriv->dot118021XGrpKeyid;
+ else
+ pattrib->key_idx = 0;
+ break;
+ default:
+ pattrib->key_idx = 0;
+ break;
+ }
+ }
+
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ pattrib->iv_len = 4;
+ pattrib->icv_len = 4;
+ break;
+ case _TKIP_:
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 4;
+
+ if (padapter->securitypriv.busetkipkey == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("\npadapter->securitypriv.busetkipkey(%d) == _FAIL drop packet\n",
+ padapter->securitypriv.busetkipkey));
+ res = _FAIL;
+ goto exit;
+ }
+ break;
+ case _AES_:
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("pattrib->encrypt=%d (_AES_)\n", pattrib->encrypt));
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 8;
+ break;
+ default:
+ pattrib->iv_len = 0;
+ pattrib->icv_len = 0;
+ break;
+ }
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("update_attrib: encrypt=%d securitypriv.sw_encrypt=%d\n",
+ pattrib->encrypt, padapter->securitypriv.sw_encrypt));
+
+ if (pattrib->encrypt &&
+ (padapter->securitypriv.sw_encrypt || !psecuritypriv->hw_decrypted)) {
+ pattrib->bswenc = true;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("update_attrib: encrypt=%d securitypriv.hw_decrypted=%d bswenc = true\n",
+ pattrib->encrypt, padapter->securitypriv.sw_encrypt));
+ } else {
+ pattrib->bswenc = false;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("update_attrib: bswenc = false\n"));
+ }
+
+ rtw_set_tx_chksum_offload(pkt, pattrib);
+
+ update_attrib_phy_info(pattrib, psta);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ int curfragnum, length;
+ u8 *pframe, *payload, mic[8];
+ struct mic_data micdata;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
+ u8 hw_hdr_offset = 0;
+ int bmcst = IS_MCAST(pattrib->ra);
+
+ if (pattrib->psta)
+ stainfo = pattrib->psta;
+ else
+ stainfo = rtw_get_stainfo(&padapter->stapriv , &pattrib->ra[0]);
+
+_func_enter_;
+
+ hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);;
+
+ if (pattrib->encrypt == _TKIP_) {/* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
+ /* encode mic code */
+ if (stainfo != NULL) {
+ u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0};
+
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
+
+ if (bmcst) {
+ if (_rtw_memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
+ return _FAIL;
+ /* start to calculate the mic code */
+ rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
+ } else {
+ if (_rtw_memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16) == true) {
+ /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
+ /* rtw_msleep_os(10); */
+ return _FAIL;
+ }
+ /* start to calculate the mic code */
+ rtw_secmicsetkey(&micdata, &stainfo->dot11tkiptxmickey.skey[0]);
+ }
+
+ if (pframe[1]&1) { /* ToDS == 1 */
+ rtw_secmicappend(&micdata, &pframe[16], 6); /* DA */
+ if (pframe[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &pframe[24], 6);
+ else
+ rtw_secmicappend(&micdata, &pframe[10], 6);
+ } else { /* ToDS == 0 */
+ rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */
+ if (pframe[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &pframe[16], 6);
+ else
+ rtw_secmicappend(&micdata, &pframe[10], 6);
+ }
+
+ if (pattrib->qos_en)
+ priority[0] = (u8)pxmitframe->attrib.priority;
+
+ rtw_secmicappend(&micdata, &priority[0], 4);
+
+ payload = pframe;
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ payload = (u8 *)RND4((size_t)(payload));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("=== curfragnum=%d, pframe = 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n",
+ curfragnum, *payload, *(payload+1),
+ *(payload+2), *(payload+3),
+ *(payload+4), *(payload+5),
+ *(payload+6), *(payload+7)));
+
+ payload = payload+pattrib->hdrlen+pattrib->iv_len;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("curfragnum=%d pattrib->hdrlen=%d pattrib->iv_len=%d",
+ curfragnum, pattrib->hdrlen, pattrib->iv_len));
+ if ((curfragnum+1) == pattrib->nr_frags) {
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-((pattrib->bswenc) ? pattrib->icv_len : 0);
+ rtw_secmicappend(&micdata, payload, length);
+ payload = payload+length;
+ } else {
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-((pattrib->bswenc) ? pattrib->icv_len : 0);
+ rtw_secmicappend(&micdata, payload, length);
+ payload = payload+length+pattrib->icv_len;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum=%d length=%d pattrib->icv_len=%d", curfragnum, length, pattrib->icv_len));
+ }
+ }
+ rtw_secgetmic(&micdata, &(mic[0]));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: before add mic code!!!\n"));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: pattrib->last_txcmdsz=%d!!!\n", pattrib->last_txcmdsz));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: mic[0]=0x%.2x , mic[1]=0x%.2x , mic[2]= 0x%.2x, mic[3]=0x%.2x\n\
+ mic[4]= 0x%.2x , mic[5]= 0x%.2x , mic[6]= 0x%.2x , mic[7]= 0x%.2x !!!!\n",
+ mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]));
+ /* add mic code and add the mic code length in last_txcmdsz */
+
+ memcpy(payload, &(mic[0]), 8);
+ pattrib->last_txcmdsz += 8;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ======== last pkt ========\n"));
+ payload = payload-pattrib->last_txcmdsz+8;
+ for (curfragnum = 0; curfragnum < pattrib->last_txcmdsz; curfragnum = curfragnum+8)
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ (" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ",
+ *(payload+curfragnum), *(payload+curfragnum+1),
+ *(payload+curfragnum+2), *(payload+curfragnum+3),
+ *(payload+curfragnum+4), *(payload+curfragnum+5),
+ *(payload+curfragnum+6), *(payload+curfragnum+7)));
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: rtw_get_stainfo==NULL!!!\n"));
+ }
+ }
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+_func_enter_;
+
+ if (pattrib->bswenc) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### xmitframe_swencrypt\n"));
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ rtw_wep_encrypt(padapter, (u8 *)pxmitframe);
+ break;
+ case _TKIP_:
+ rtw_tkip_encrypt(padapter, (u8 *)pxmitframe);
+ break;
+ case _AES_:
+ rtw_aes_encrypt(padapter, (u8 *)pxmitframe);
+ break;
+ default:
+ break;
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
+ }
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+s32 rtw_make_wlanhdr (struct adapter *padapter , u8 *hdr, struct pkt_attrib *pattrib)
+{
+ u16 *qc;
+
+ struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ u8 qos_option = false;
+
+ int res = _SUCCESS;
+ u16 *fctrl = &pwlanhdr->frame_ctl;
+
+ struct sta_info *psta;
+
+ int bmcst = IS_MCAST(pattrib->ra);
+
+_func_enter_;
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ if (bmcst) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ } else {
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+ }
+ }
+
+ _rtw_memset(hdr, 0, WLANHDR_OFFSET);
+
+ SetFrameSubType(fctrl, pattrib->subtype);
+
+ if (pattrib->subtype & WIFI_DATA_TYPE) {
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ /* to_ds = 1, fr_ds = 0; */
+ /* Data transfer to AP */
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
+
+ if (pqospriv->qos_option)
+ qos_option = true;
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ /* to_ds = 0, fr_ds = 1; */
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
+
+ if (psta->qos_option)
+ qos_option = true;
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
+
+ if (psta->qos_option)
+ qos_option = true;
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
+ res = _FAIL;
+ goto exit;
+ }
+
+ if (pattrib->mdata)
+ SetMData(fctrl);
+
+ if (pattrib->encrypt)
+ SetPrivacy(fctrl);
+
+ if (qos_option) {
+ qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
+
+ if (pattrib->priority)
+ SetPriority(qc, pattrib->priority);
+
+ SetEOSP(qc, pattrib->eosp);
+
+ SetAckpolicy(qc, pattrib->ack_policy);
+ }
+
+ /* TODO: fill HT Control Field */
+
+ /* Update Seq Num will be handled by f/w */
+ if (psta) {
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
+
+ pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
+
+ SetSeqNum(hdr, pattrib->seqnum);
+
+ /* check if enable ampdu */
+ if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
+ if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
+ pattrib->ampdu_en = true;
+ }
+
+ /* re-check if enable ampdu by BA_starting_seqctrl */
+ if (pattrib->ampdu_en) {
+ u16 tx_seq;
+
+ tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
+
+ /* check BA_starting_seqctrl */
+ if (SN_LESS(pattrib->seqnum, tx_seq)) {
+ pattrib->ampdu_en = false;/* AGG BK */
+ } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff;
+
+ pattrib->ampdu_en = true;/* AGG EN */
+ } else {
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff;
+ pattrib->ampdu_en = true;/* AGG EN */
+ }
+ }
+ }
+ }
+exit:
+
+_func_exit_;
+ return res;
+}
+
+s32 rtw_txframes_pending(struct adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ return ((_rtw_queue_empty(&pxmitpriv->be_pending) == false) ||
+ (_rtw_queue_empty(&pxmitpriv->bk_pending) == false) ||
+ (_rtw_queue_empty(&pxmitpriv->vi_pending) == false) ||
+ (_rtw_queue_empty(&pxmitpriv->vo_pending) == false));
+}
+
+s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pattrib)
+{
+ struct sta_info *psta;
+ struct tx_servq *ptxservq;
+ int priority = pattrib->priority;
+
+ psta = pattrib->psta;
+
+ switch (priority) {
+ case 1:
+ case 2:
+ ptxservq = &(psta->sta_xmitpriv.bk_q);
+ break;
+ case 4:
+ case 5:
+ ptxservq = &(psta->sta_xmitpriv.vi_q);
+ break;
+ case 6:
+ case 7:
+ ptxservq = &(psta->sta_xmitpriv.vo_q);
+ break;
+ case 0:
+ case 3:
+ default:
+ ptxservq = &(psta->sta_xmitpriv.be_q);
+ break;
+ }
+
+ return ptxservq->qcnt;
+}
+
+/*
+ * Calculate wlan 802.11 packet MAX size from pkt_attrib
+ * This function doesn't consider fragment case
+ */
+u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib)
+{
+ u32 len = 0;
+
+ len = pattrib->hdrlen + pattrib->iv_len; /* WLAN Header and IV */
+ len += SNAP_SIZE + sizeof(u16); /* LLC */
+ len += pattrib->pktlen;
+ if (pattrib->encrypt == _TKIP_)
+ len += 8; /* MIC */
+ len += ((pattrib->bswenc) ? pattrib->icv_len : 0); /* ICV */
+
+ return len;
+}
+
+/*
+
+This sub-routine will perform all the following:
+
+1. remove 802.3 header.
+2. create wlan_header, based on the info in pxmitframe
+3. append sta's iv/ext-iv
+4. append LLC
+5. move frag chunk from pframe to pxmitframe->mem
+6. apply sw-encrypt, if necessary.
+
+*/
+s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
+{
+ struct pkt_file pktfile;
+ s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
+ size_t addr;
+ u8 *pframe, *mem_start;
+ u8 hw_hdr_offset;
+ struct sta_info *psta;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ u8 *pbuf_start;
+ s32 bmcst = IS_MCAST(pattrib->ra);
+ s32 res = _SUCCESS;
+
+_func_enter_;
+
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+
+ if (psta == NULL)
+ return _FAIL;
+
+ if (pxmitframe->buf_addr == NULL) {
+ DBG_88E("==> %s buf_addr == NULL\n", __func__);
+ return _FAIL;
+ }
+
+ pbuf_start = pxmitframe->buf_addr;
+
+ hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
+
+ mem_start = pbuf_start + hw_hdr_offset;
+
+ if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n"));
+ DBG_88E("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n");
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_open_pktfile(pkt, &pktfile);
+ _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
+
+ frg_inx = 0;
+ frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
+
+ while (1) {
+ llc_sz = 0;
+
+ mpdu_len = frg_len;
+
+ pframe = mem_start;
+
+ SetMFrag(mem_start);
+
+ pframe += pattrib->hdrlen;
+ mpdu_len -= pattrib->hdrlen;
+
+ /* adding icv, if necessary... */
+ if (pattrib->iv_len) {
+ if (psta != NULL) {
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ break;
+ case _TKIP_:
+ if (bmcst)
+ TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
+ case _AES_:
+ if (bmcst)
+ AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ AES_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
+ }
+ }
+
+ memcpy(pframe, pattrib->iv, pattrib->iv_len);
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_,
+ ("rtw_xmitframe_coalesce: keyid=%d pattrib->iv[3]=%.2x pframe=%.2x %.2x %.2x %.2x\n",
+ padapter->securitypriv.dot11PrivacyKeyIndex, pattrib->iv[3], *pframe, *(pframe+1), *(pframe+2), *(pframe+3)));
+
+ pframe += pattrib->iv_len;
+
+ mpdu_len -= pattrib->iv_len;
+ }
+
+ if (frg_inx == 0) {
+ llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
+ pframe += llc_sz;
+ mpdu_len -= llc_sz;
+ }
+
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ mpdu_len -= pattrib->icv_len;
+ }
+
+ if (bmcst) {
+ /* don't do fragment to broadcat/multicast packets */
+ mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
+ } else {
+ mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len);
+ }
+
+ pframe += mem_sz;
+
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ memcpy(pframe, pattrib->icv, pattrib->icv_len);
+ pframe += pattrib->icv_len;
+ }
+
+ frg_inx++;
+
+ if (bmcst || rtw_endofpktfile(&pktfile)) {
+ pattrib->nr_frags = frg_inx;
+
+ pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) +
+ ((pattrib->bswenc) ? pattrib->icv_len : 0) + mem_sz;
+
+ ClearMFrag(mem_start);
+
+ break;
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
+ }
+
+ addr = (size_t)(pframe);
+
+ mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
+ memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
+ }
+
+ if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n"));
+ DBG_88E("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n");
+ res = _FAIL;
+ goto exit;
+ }
+
+ xmitframe_swencrypt(padapter, pxmitframe);
+
+ if (!bmcst)
+ update_attrib_vcs_info(padapter, pxmitframe);
+ else
+ pattrib->vcs_mode = NONE_VCS;
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
+ * IEEE LLC/SNAP header contains 8 octets
+ * First 3 octets comprise the LLC portion
+ * SNAP portion, 5 octets, is divided into two fields:
+ * Organizationally Unique Identifier(OUI), 3 octets,
+ * type, defined by that organization, 2 octets.
+ */
+s32 rtw_put_snap(u8 *data, u16 h_proto)
+{
+ struct ieee80211_snap_hdr *snap;
+ u8 *oui;
+
+_func_enter_;
+
+ snap = (struct ieee80211_snap_hdr *)data;
+ snap->dsap = 0xaa;
+ snap->ssap = 0xaa;
+ snap->ctrl = 0x03;
+
+ if (h_proto == 0x8137 || h_proto == 0x80f3)
+ oui = P802_1H_OUI;
+ else
+ oui = RFC1042_OUI;
+
+ snap->oui[0] = oui[0];
+ snap->oui[1] = oui[1];
+ snap->oui[2] = oui[2];
+
+ *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
+
+_func_exit_;
+
+ return SNAP_SIZE + sizeof(u16);
+}
+
+void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
+{
+ uint protection;
+ u8 *perp;
+ int erp_len;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+
+_func_enter_;
+
+ switch (pxmitpriv->vcs_setting) {
+ case DISABLE_VCS:
+ pxmitpriv->vcs = NONE_VCS;
+ break;
+ case ENABLE_VCS:
+ break;
+ case AUTO_VCS:
+ default:
+ perp = rtw_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len);
+ if (perp == NULL) {
+ pxmitpriv->vcs = NONE_VCS;
+ } else {
+ protection = (*(perp + 2)) & BIT(1);
+ if (protection) {
+ if (pregistrypriv->vcs_type == RTS_CTS)
+ pxmitpriv->vcs = RTS_CTS;
+ else
+ pxmitpriv->vcs = CTS_TO_SELF;
+ } else {
+ pxmitpriv->vcs = NONE_VCS;
+ }
+ }
+ break;
+ }
+
+_func_exit_;
+}
+
+void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz)
+{
+ struct sta_info *psta = NULL;
+ struct stainfo_stats *pstats = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ pxmitpriv->tx_bytes += sz;
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod += pxmitframe->agg_num;
+
+ psta = pxmitframe->attrib.psta;
+ if (psta) {
+ pstats = &psta->sta_stats;
+ pstats->tx_pkts += pxmitframe->agg_num;
+ pstats->tx_bytes += sz;
+ }
+ }
+}
+
+struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
+{
+ unsigned long irql;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
+
+_func_enter_;
+
+ _enter_critical(&pfree_queue->lock, &irql);
+
+ if (_rtw_queue_empty(pfree_queue) == true) {
+ pxmitbuf = NULL;
+ } else {
+ phead = get_list_head(pfree_queue);
+
+ plist = get_next(phead);
+
+ pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+
+ rtw_list_delete(&(pxmitbuf->list));
+ }
+
+ if (pxmitbuf != NULL) {
+ pxmitpriv->free_xmit_extbuf_cnt--;
+
+ pxmitbuf->priv_data = NULL;
+ /* pxmitbuf->ext_tag = true; */
+
+ if (pxmitbuf->sctx) {
+ DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
+ }
+ }
+
+ _exit_critical(&pfree_queue->lock, &irql);
+
+_func_exit_;
+
+ return pxmitbuf;
+}
+
+s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ unsigned long irql;
+ struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
+
+_func_enter_;
+
+ if (pxmitbuf == NULL)
+ return _FAIL;
+
+ _enter_critical(&pfree_queue->lock, &irql);
+
+ rtw_list_delete(&pxmitbuf->list);
+
+ rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
+ pxmitpriv->free_xmit_extbuf_cnt++;
+
+ _exit_critical(&pfree_queue->lock, &irql);
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
+{
+ unsigned long irql;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
+
+_func_enter_;
+
+ /* DBG_88E("+rtw_alloc_xmitbuf\n"); */
+
+ _enter_critical(&pfree_xmitbuf_queue->lock, &irql);
+
+ if (_rtw_queue_empty(pfree_xmitbuf_queue) == true) {
+ pxmitbuf = NULL;
+ } else {
+ phead = get_list_head(pfree_xmitbuf_queue);
+
+ plist = get_next(phead);
+
+ pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+
+ rtw_list_delete(&(pxmitbuf->list));
+ }
+
+ if (pxmitbuf != NULL) {
+ pxmitpriv->free_xmitbuf_cnt--;
+ pxmitbuf->priv_data = NULL;
+ if (pxmitbuf->sctx) {
+ DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
+ }
+ }
+ _exit_critical(&pfree_xmitbuf_queue->lock, &irql);
+
+_func_exit_;
+
+ return pxmitbuf;
+}
+
+s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ unsigned long irql;
+ struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
+
+_func_enter_;
+ if (pxmitbuf == NULL)
+ return _FAIL;
+
+ if (pxmitbuf->sctx) {
+ DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
+ }
+
+ if (pxmitbuf->ext_tag) {
+ rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf);
+ } else {
+ _enter_critical(&pfree_xmitbuf_queue->lock, &irql);
+
+ rtw_list_delete(&pxmitbuf->list);
+
+ rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
+
+ pxmitpriv->free_xmitbuf_cnt++;
+ _exit_critical(&pfree_xmitbuf_queue->lock, &irql);
+ }
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+/*
+Calling context:
+1. OS_TXENTRY
+2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+
+If we turn on USE_RXTHREAD, then, no need for critical section.
+Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+
+Must be very very cautious...
+
+*/
+
+struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
+{
+ /*
+ Please remember to use all the osdep_service api,
+ and lock/unlock or _enter/_exit critical to protect
+ pfree_xmit_queue
+ */
+
+ unsigned long irql;
+ struct xmit_frame *pxframe = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
+
+_func_enter_;
+
+ _enter_critical_bh(&pfree_xmit_queue->lock, &irql);
+
+ if (_rtw_queue_empty(pfree_xmit_queue) == true) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt));
+ pxframe = NULL;
+ } else {
+ phead = get_list_head(pfree_xmit_queue);
+
+ plist = get_next(phead);
+
+ pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ rtw_list_delete(&(pxframe->list));
+ }
+
+ if (pxframe != NULL) { /* default value setting */
+ pxmitpriv->free_xmitframe_cnt--;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt));
+
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ _rtw_memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
+ /* pxframe->attrib.psta = NULL; */
+
+ pxframe->frame_tag = DATA_FRAMETAG;
+
+ pxframe->pkt = NULL;
+ pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */
+
+ pxframe->agg_num = 1;
+ pxframe->ack_report = 0;
+ }
+
+ _exit_critical_bh(&pfree_xmit_queue->lock, &irql);
+
+_func_exit_;
+
+ return pxframe;
+}
+
+s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
+{
+ unsigned long irql;
+ struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
+ struct adapter *padapter = pxmitpriv->adapter;
+ struct sk_buff *pndis_pkt = NULL;
+
+_func_enter_;
+
+ if (pxmitframe == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====== rtw_free_xmitframe():pxmitframe == NULL!!!!!!!!!!\n"));
+ goto exit;
+ }
+
+ _enter_critical_bh(&pfree_xmit_queue->lock, &irql);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ if (pxmitframe->pkt) {
+ pndis_pkt = pxmitframe->pkt;
+ pxmitframe->pkt = NULL;
+ }
+
+ rtw_list_insert_tail(&pxmitframe->list, get_list_head(pfree_xmit_queue));
+
+ pxmitpriv->free_xmitframe_cnt++;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt));
+
+ _exit_critical_bh(&pfree_xmit_queue->lock, &irql);
+
+ if (pndis_pkt)
+ rtw_os_pkt_complete(padapter, pndis_pkt);
+
+exit:
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ struct xmit_frame *pxmitframe;
+
+_func_enter_;
+
+ _enter_critical_bh(&(pframequeue->lock), &irql);
+
+ phead = get_list_head(pframequeue);
+ plist = get_next(phead);
+
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ plist = get_next(plist);
+
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ }
+ _exit_critical_bh(&(pframequeue->lock), &irql);
+
+_func_exit_;
+}
+
+s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("rtw_xmitframe_enqueue: drop xmit pkt for classifier fail\n"));
+/* pxmitframe->pkt = NULL; */
+ return _FAIL;
+ }
+
+ return _SUCCESS;
+}
+
+static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit, struct tx_servq *ptxservq, struct __queue *pframe_queue)
+{
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+
+ xmitframe_phead = get_list_head(pframe_queue);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ ptxservq->qcnt--;
+
+ break;
+
+ pxmitframe = NULL;
+ }
+
+ return pxmitframe;
+}
+
+struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, int entry)
+{
+ unsigned long irql0;
+ struct list_head *sta_plist, *sta_phead;
+ struct hw_xmit *phwxmit;
+ struct tx_servq *ptxservq = NULL;
+ struct __queue *pframe_queue = NULL;
+ struct xmit_frame *pxmitframe = NULL;
+ struct adapter *padapter = pxmitpriv->adapter;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ int i, inx[4];
+
+_func_enter_;
+
+ inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
+
+ if (pregpriv->wifi_spec == 1) {
+ int j;
+
+ for (j = 0; j < 4; j++)
+ inx[j] = pxmitpriv->wmm_para_seq[j];
+ }
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql0);
+
+ for (i = 0; i < entry; i++) {
+ phwxmit = phwxmit_i + inx[i];
+
+ sta_phead = get_list_head(phwxmit->sta_queue);
+ sta_plist = get_next(sta_phead);
+
+ while (!rtw_end_of_queue_search(sta_phead, sta_plist)) {
+ ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending);
+
+ pframe_queue = &ptxservq->sta_pending;
+
+ pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
+
+ if (pxmitframe) {
+ phwxmit->accnt--;
+
+ /* Remove sta node when there are no pending packets. */
+ if (_rtw_queue_empty(pframe_queue)) /* must be done after get_next and before break */
+ rtw_list_delete(&ptxservq->tx_pending);
+ goto exit;
+ }
+
+ sta_plist = get_next(sta_plist);
+ }
+ }
+exit:
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+_func_exit_;
+ return pxmitframe;
+}
+
+struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *psta, int up, u8 *ac)
+{
+ struct tx_servq *ptxservq;
+
+_func_enter_;
+ switch (up) {
+ case 1:
+ case 2:
+ ptxservq = &(psta->sta_xmitpriv.bk_q);
+ *(ac) = 3;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BK\n"));
+ break;
+ case 4:
+ case 5:
+ ptxservq = &(psta->sta_xmitpriv.vi_q);
+ *(ac) = 1;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VI\n"));
+ break;
+ case 6:
+ case 7:
+ ptxservq = &(psta->sta_xmitpriv.vo_q);
+ *(ac) = 0;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VO\n"));
+ break;
+ case 0:
+ case 3:
+ default:
+ ptxservq = &(psta->sta_xmitpriv.be_q);
+ *(ac) = 2;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BE\n"));
+ break;
+ }
+
+_func_exit_;
+
+ return ptxservq;
+}
+
+/*
+ * Will enqueue pxmitframe to the proper queue,
+ * and indicate it to xx_pending list.....
+ */
+s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ /* unsigned long irql0; */
+ u8 ac_index;
+ struct sta_info *psta;
+ struct tx_servq *ptxservq;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->ra);
+ }
+
+ if (psta == NULL) {
+ res = _FAIL;
+ DBG_88E("rtw_xmit_classifier: psta == NULL\n");
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmit_classifier: psta == NULL\n"));
+ goto exit;
+ }
+
+ ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
+
+ if (rtw_is_list_empty(&ptxservq->tx_pending))
+ rtw_list_insert_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
+
+ rtw_list_insert_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
+ ptxservq->qcnt++;
+ phwxmits[ac_index].accnt++;
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+void rtw_alloc_hwxmits(struct adapter *padapter)
+{
+ struct hw_xmit *hwxmits;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
+
+ pxmitpriv->hwxmits = (struct hw_xmit *)rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
+
+ hwxmits = pxmitpriv->hwxmits;
+
+ if (pxmitpriv->hwxmit_entry == 5) {
+ hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
+ hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
+ hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
+ hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+ hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
+ } else if (pxmitpriv->hwxmit_entry == 4) {
+ hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
+ hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
+ hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
+ hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+ } else {
+ }
+}
+
+void rtw_free_hwxmits(struct adapter *padapter)
+{
+ struct hw_xmit *hwxmits;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ hwxmits = pxmitpriv->hwxmits;
+ kfree(hwxmits);
+}
+
+void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < entry; i++, phwxmit++)
+ phwxmit->accnt = 0;
+_func_exit_;
+}
+
+static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ unsigned long irql;
+ int res, is_vlan_tag = 0, i, do_nat25 = 1;
+ unsigned short vlan_hdr = 0;
+ void *br_port = NULL;
+
+ rcu_read_lock();
+ br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+ _enter_critical_bh(&padapter->br_ext_lock, &irql);
+ if (!(skb->data[0] & 1) && br_port &&
+ memcmp(skb->data+MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
+ *((__be16 *)(skb->data+MACADDRLEN*2)) != __constant_htons(ETH_P_8021Q) &&
+ *((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_IP) &&
+ !memcmp(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN) && padapter->scdb_entry) {
+ memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
+ padapter->scdb_entry->ageing_timer = jiffies;
+ _exit_critical_bh(&padapter->br_ext_lock, &irql);
+ } else {
+ if (*((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_8021Q)) {
+ is_vlan_tag = 1;
+ vlan_hdr = *((unsigned short *)(skb->data+MACADDRLEN*2+2));
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+MACADDRLEN*2+2-i*2)) = *((unsigned short *)(skb->data+MACADDRLEN*2-2-i*2));
+ skb_pull(skb, 4);
+ }
+ if (!memcmp(skb->data+MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
+ (*((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_IP)))
+ memcpy(padapter->br_ip, skb->data+WLAN_ETHHDR_LEN+12, 4);
+
+ if (*((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_IP)) {
+ if (memcmp(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN)) {
+ padapter->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(padapter,
+ skb->data+MACADDRLEN, skb->data+WLAN_ETHHDR_LEN+12);
+ if (padapter->scdb_entry) {
+ memcpy(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN);
+ memcpy(padapter->scdb_ip, skb->data+WLAN_ETHHDR_LEN+12, 4);
+ padapter->scdb_entry->ageing_timer = jiffies;
+ do_nat25 = 0;
+ }
+ } else {
+ if (padapter->scdb_entry) {
+ padapter->scdb_entry->ageing_timer = jiffies;
+ do_nat25 = 0;
+ } else {
+ memset(padapter->scdb_mac, 0, MACADDRLEN);
+ memset(padapter->scdb_ip, 0, 4);
+ }
+ }
+ }
+ _exit_critical_bh(&padapter->br_ext_lock, &irql);
+ if (do_nat25) {
+ if (nat25_db_handle(padapter, skb, NAT25_CHECK) == 0) {
+ struct sk_buff *newskb;
+
+ if (is_vlan_tag) {
+ skb_push(skb, 4);
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+i*2)) = *((unsigned short *)(skb->data+4+i*2));
+ *((__be16 *)(skb->data+MACADDRLEN*2)) = __constant_htons(ETH_P_8021Q);
+ *((unsigned short *)(skb->data+MACADDRLEN*2+2)) = vlan_hdr;
+ }
+
+ newskb = skb_copy(skb, GFP_ATOMIC);
+ if (newskb == NULL) {
+ DEBUG_ERR("TX DROP: skb_copy fail!\n");
+ return -1;
+ }
+ dev_kfree_skb_any(skb);
+
+ *pskb = skb = newskb;
+ if (is_vlan_tag) {
+ vlan_hdr = *((unsigned short *)(skb->data+MACADDRLEN*2+2));
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+MACADDRLEN*2+2-i*2)) = *((unsigned short *)(skb->data+MACADDRLEN*2-2-i*2));
+ skb_pull(skb, 4);
+ }
+ }
+
+ if (skb_is_nonlinear(skb))
+ DEBUG_ERR("%s(): skb_is_nonlinear!!\n", __func__);
+
+ res = skb_linearize(skb);
+ if (res < 0) {
+ DEBUG_ERR("TX DROP: skb_linearize fail!\n");
+ return -1;
+ }
+
+ res = nat25_db_handle(padapter, skb, NAT25_INSERT);
+ if (res < 0) {
+ if (res == -2) {
+ DEBUG_ERR("TX DROP: nat25_db_handle fail!\n");
+ return -1;
+ }
+ return 0;
+ }
+ }
+
+ memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
+
+ dhcp_flag_bcast(padapter, skb);
+
+ if (is_vlan_tag) {
+ skb_push(skb, 4);
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+i*2)) = *((unsigned short *)(skb->data+4+i*2));
+ *((__be16 *)(skb->data+MACADDRLEN*2)) = __constant_htons(ETH_P_8021Q);
+ *((unsigned short *)(skb->data+MACADDRLEN*2+2)) = vlan_hdr;
+ }
+ }
+
+ /* check if SA is equal to our MAC */
+ if (memcmp(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN)) {
+ DEBUG_ERR("TX DROP: untransformed frame SA:%02X%02X%02X%02X%02X%02X!\n",
+ skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11]);
+ return -1;
+ }
+ return 0;
+}
+
+u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
+{
+ u32 addr;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+ switch (pattrib->qsel) {
+ case 0:
+ case 3:
+ addr = BE_QUEUE_INX;
+ break;
+ case 1:
+ case 2:
+ addr = BK_QUEUE_INX;
+ break;
+ case 4:
+ case 5:
+ addr = VI_QUEUE_INX;
+ break;
+ case 6:
+ case 7:
+ addr = VO_QUEUE_INX;
+ break;
+ case 0x10:
+ addr = BCN_QUEUE_INX;
+ break;
+ case 0x11:/* BC/MC in PS (HIQ) */
+ addr = HIGH_QUEUE_INX;
+ break;
+ case 0x12:
+ default:
+ addr = MGT_QUEUE_INX;
+ break;
+ }
+
+ return addr;
+}
+
+static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib)
+{
+ u8 qsel;
+
+ qsel = pattrib->priority;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("### do_queue_select priority=%d , qsel = %d\n", pattrib->priority , qsel));
+
+ pattrib->qsel = qsel;
+}
+
+/*
+ * The main transmit(tx) entry
+ *
+ * Return
+ * 1 enqueue
+ * 0 success, hardware will handle this xmit frame(packet)
+ * <0 fail
+ */
+s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned long irql0;
+#endif
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_frame *pxmitframe = NULL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ void *br_port = NULL;
+ s32 res;
+
+ pxmitframe = rtw_alloc_xmitframe(pxmitpriv);
+ if (pxmitframe == NULL) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: no more pxmitframe\n"));
+ DBG_88E("DBG_TX_DROP_FRAME %s no more pxmitframe\n", __func__);
+ return -1;
+ }
+
+ rcu_read_lock();
+ br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+
+ if (br_port && check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE)) {
+ res = rtw_br_client_tx(padapter, ppkt);
+ if (res == -1) {
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ return -1;
+ }
+ }
+
+ res = update_attrib(padapter, *ppkt, &pxmitframe->attrib);
+
+ if (res == _FAIL) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: update attrib fail\n"));
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ return -1;
+ }
+ pxmitframe->pkt = *ppkt;
+
+ rtw_led_control(padapter, LED_CTL_TX);
+
+ do_queue_select(padapter, &pxmitframe->attrib);
+
+#ifdef CONFIG_88EU_AP_MODE
+ _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe)) {
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ return 1;
+ }
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+#endif
+
+ if (rtw_hal_xmit(padapter, pxmitframe) == false)
+ return 1;
+
+ return 0;
+}
+
+#if defined(CONFIG_88EU_AP_MODE)
+
+int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ unsigned long irql;
+ int ret = false;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int bmcst = IS_MCAST(pattrib->ra);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == false)
+ return ret;
+
+ if (pattrib->psta)
+ psta = pattrib->psta;
+ else
+ psta = rtw_get_stainfo(pstapriv, pattrib->ra);
+
+ if (psta == NULL)
+ return ret;
+
+ if (pattrib->triggered == 1) {
+ if (bmcst)
+ pattrib->qsel = 0x11;/* HIQ */
+ return ret;
+ }
+
+ if (bmcst) {
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+
+ if (pstapriv->sta_dz_bitmap) {/* if any one sta is in ps mode */
+ rtw_list_delete(&pxmitframe->list);
+
+ rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
+
+ psta->sleepq_len++;
+
+ pstapriv->tim_bitmap |= BIT(0);/* */
+ pstapriv->sta_dz_bitmap |= BIT(0);
+
+ update_beacon(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after upate bcn */
+
+ ret = true;
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+
+ return ret;
+ }
+
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+
+ if (psta->state&WIFI_SLEEP_STATE) {
+ u8 wmmps_ac = 0;
+
+ if (pstapriv->sta_dz_bitmap&BIT(psta->aid)) {
+ rtw_list_delete(&pxmitframe->list);
+
+ rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
+
+ psta->sleepq_len++;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(0);
+ break;
+ }
+
+ if (wmmps_ac)
+ psta->sleepq_ac_len++;
+
+ if (((psta->has_legacy_ac) && (!wmmps_ac)) ||
+ ((!psta->has_legacy_ac) && (wmmps_ac))) {
+ pstapriv->tim_bitmap |= BIT(psta->aid);
+
+ if (psta->sleepq_len == 1) {
+ /* upate BCN for TIM IE */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ }
+ ret = true;
+ }
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+
+ return ret;
+}
+
+static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue)
+{
+ struct list_head *plist, *phead;
+ u8 ac_index;
+ struct tx_servq *ptxservq;
+ struct pkt_attrib *pattrib;
+ struct xmit_frame *pxmitframe;
+ struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
+
+ phead = get_list_head(pframequeue);
+ plist = get_next(phead);
+
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ plist = get_next(plist);
+
+ xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe);
+
+ pattrib = &pxmitframe->attrib;
+
+ ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
+
+ ptxservq->qcnt--;
+ phwxmits[ac_index].accnt--;
+ }
+}
+
+void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
+{
+ unsigned long irql0;
+ struct sta_info *psta_bmc;
+ struct sta_xmit_priv *pstaxmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ pstaxmitpriv = &psta->sta_xmitpriv;
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql0);
+
+ psta->state |= WIFI_SLEEP_STATE;
+
+ pstapriv->sta_dz_bitmap |= BIT(psta->aid);
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending));
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending));
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending));
+
+ /* for BC/MC Frames */
+ pstaxmitpriv = &psta_bmc->sta_xmitpriv;
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+}
+
+void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
+{
+ unsigned long irql;
+ u8 update_mask = 0, wmmps_ac = 0;
+ struct sta_info *psta_bmc;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ switch (pxmitframe->attrib.priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(1);
+ break;
+ }
+
+ psta->sleepq_len--;
+ if (psta->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ if (wmmps_ac) {
+ psta->sleepq_ac_len--;
+ if (psta->sleepq_ac_len > 0) {
+ pxmitframe->attrib.mdata = 1;
+ pxmitframe->attrib.eosp = 0;
+ } else {
+ pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.eosp = 1;
+ }
+ }
+
+ pxmitframe->attrib.triggered = 1;
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ if (rtw_hal_xmit(padapter, pxmitframe))
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ }
+
+ if (psta->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ update_mask = BIT(0);
+
+ if (psta->state&WIFI_SLEEP_STATE)
+ psta->state ^= WIFI_SLEEP_STATE;
+
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
+
+ pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (!psta_bmc)
+ return;
+
+ if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) { /* no any sta in ps mode */
+ _enter_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+
+ xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ psta_bmc->sleepq_len--;
+ if (psta_bmc->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ _exit_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ if (rtw_hal_xmit(padapter, pxmitframe))
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ _enter_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ }
+
+ if (psta_bmc->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(0);
+ pstapriv->sta_dz_bitmap &= ~BIT(0);
+
+ update_mask |= BIT(1);
+ }
+
+ _exit_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ }
+
+ if (update_mask)
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+}
+
+void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
+{
+ unsigned long irql;
+ u8 wmmps_ac = 0;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ switch (pxmitframe->attrib.priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(1);
+ break;
+ }
+
+ if (!wmmps_ac)
+ continue;
+
+ rtw_list_delete(&pxmitframe->list);
+
+ psta->sleepq_len--;
+ psta->sleepq_ac_len--;
+
+ if (psta->sleepq_ac_len > 0) {
+ pxmitframe->attrib.mdata = 1;
+ pxmitframe->attrib.eosp = 0;
+ } else {
+ pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.eosp = 1;
+ }
+
+ pxmitframe->attrib.triggered = 1;
+
+ if (rtw_hal_xmit(padapter, pxmitframe) == true)
+ rtw_os_xmit_complete(padapter, pxmitframe);
+
+ if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+}
+
+#endif
+
+void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
+{
+ sctx->timeout_ms = timeout_ms;
+ sctx->submit_time = rtw_get_current_time();
+ init_completion(&sctx->done);
+ sctx->status = RTW_SCTX_SUBMITTED;
+}
+
+int rtw_sctx_wait(struct submit_ctx *sctx)
+{
+ int ret = _FAIL;
+ unsigned long expire;
+ int status = 0;
+
+ expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT;
+ if (!wait_for_completion_timeout(&sctx->done, expire)) {
+ /* timeout, do something?? */
+ status = RTW_SCTX_DONE_TIMEOUT;
+ DBG_88E("%s timeout\n", __func__);
+ } else {
+ status = sctx->status;
+ }
+
+ if (status == RTW_SCTX_DONE_SUCCESS)
+ ret = _SUCCESS;
+
+ return ret;
+}
+
+static bool rtw_sctx_chk_waring_status(int status)
+{
+ switch (status) {
+ case RTW_SCTX_DONE_UNKNOWN:
+ case RTW_SCTX_DONE_BUF_ALLOC:
+ case RTW_SCTX_DONE_BUF_FREE:
+
+ case RTW_SCTX_DONE_DRV_STOP:
+ case RTW_SCTX_DONE_DEV_REMOVE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
+{
+ if (*sctx) {
+ if (rtw_sctx_chk_waring_status(status))
+ DBG_88E("%s status:%d\n", __func__, status);
+ (*sctx)->status = status;
+ complete(&((*sctx)->done));
+ *sctx = NULL;
+ }
+}
+
+void rtw_sctx_done(struct submit_ctx **sctx)
+{
+ rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
+}
+
+int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
+{
+ struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
+
+ pack_tx_ops->submit_time = rtw_get_current_time();
+ pack_tx_ops->timeout_ms = timeout_ms;
+ pack_tx_ops->status = RTW_SCTX_SUBMITTED;
+
+ return rtw_sctx_wait(pack_tx_ops);
+}
+
+void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
+{
+ struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
+
+ if (pxmitpriv->ack_tx)
+ rtw_sctx_done_err(&pack_tx_ops, status);
+ else
+ DBG_88E("%s ack_tx not set\n", __func__);
+}
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c b/drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c
new file mode 100644
index 0000000..95759be
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c
@@ -0,0 +1,1761 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+#include "odm_precomp.h"
+
+const u8 Rtl8188EFwImgArray[Rtl8188EFWImgArrayLength] = {
+ 0xE1, 0x88, 0x10, 0x00, 0x0B, 0x00, 0x01, 0x00,
+ 0x01, 0x21, 0x11, 0x27, 0x30, 0x36, 0x00, 0x00,
+ 0x2D, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x45, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xC1, 0x6F, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xA1, 0xE6, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x56, 0xF7, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC2, 0xAF, 0x80, 0xFE, 0x32, 0x12, 0x42, 0x04,
+ 0x85, 0xD0, 0x0B, 0x75, 0xD0, 0x08, 0xAA, 0xE0,
+ 0xC2, 0x8C, 0xE5, 0x8A, 0x24, 0x67, 0xF5, 0x8A,
+ 0xE5, 0x8C, 0x34, 0x79, 0xF5, 0x8C, 0xD2, 0x8C,
+ 0xEC, 0x24, 0x89, 0xF8, 0xE6, 0xBC, 0x03, 0x02,
+ 0x74, 0xFF, 0xC3, 0x95, 0x81, 0xB4, 0x40, 0x00,
+ 0x40, 0xCE, 0x79, 0x04, 0x78, 0x80, 0x16, 0xE6,
+ 0x08, 0x70, 0x0B, 0xC2, 0xAF, 0xE6, 0x30, 0xE1,
+ 0x03, 0x44, 0x18, 0xF6, 0xD2, 0xAF, 0x08, 0xD9,
+ 0xED, 0xEA, 0x8B, 0xD0, 0x22, 0xE5, 0x0C, 0xFF,
+ 0x23, 0x24, 0x81, 0xF8, 0x0F, 0x08, 0x08, 0xBF,
+ 0x04, 0x04, 0x7F, 0x00, 0x78, 0x81, 0xE6, 0x30,
+ 0xE4, 0xF2, 0x00, 0xE5, 0x0C, 0xC3, 0x9F, 0x50,
+ 0x20, 0x05, 0x0C, 0x74, 0x88, 0x25, 0x0C, 0xF8,
+ 0xE6, 0xFD, 0xA6, 0x81, 0x08, 0xE6, 0xAE, 0x0C,
+ 0xBE, 0x03, 0x02, 0x74, 0xFF, 0xCD, 0xF8, 0xE8,
+ 0x6D, 0x60, 0xE0, 0x08, 0xE6, 0xC0, 0xE0, 0x80,
+ 0xF6, 0xE5, 0x0C, 0xD3, 0x9F, 0x40, 0x27, 0xE5,
+ 0x0C, 0x24, 0x89, 0xF8, 0xE6, 0xAE, 0x0C, 0xBE,
+ 0x03, 0x02, 0x74, 0xFF, 0xFD, 0x18, 0xE6, 0xCD,
+ 0xF8, 0xE5, 0x81, 0x6D, 0x60, 0x06, 0xD0, 0xE0,
+ 0xF6, 0x18, 0x80, 0xF5, 0xE5, 0x0C, 0x24, 0x88,
+ 0xC8, 0xF6, 0x15, 0x0C, 0x80, 0xD3, 0xE5, 0x0C,
+ 0x23, 0x24, 0x81, 0xF8, 0x7F, 0x04, 0xC2, 0xAF,
+ 0xE6, 0x30, 0xE0, 0x03, 0x10, 0xE2, 0x0C, 0x7F,
+ 0x00, 0x30, 0xE1, 0x07, 0x30, 0xE3, 0x04, 0x7F,
+ 0x08, 0x54, 0xF4, 0x54, 0x7C, 0xC6, 0xD2, 0xAF,
+ 0x54, 0x80, 0x42, 0x07, 0x22, 0x78, 0x88, 0xA6,
+ 0x81, 0x74, 0x03, 0x60, 0x06, 0xFF, 0x08, 0x76,
+ 0xFF, 0xDF, 0xFB, 0x7F, 0x04, 0xE4, 0x78, 0x80,
+ 0xF6, 0x08, 0xF6, 0x08, 0xDF, 0xFA, 0x78, 0x81,
+ 0x76, 0x30, 0x90, 0x45, 0xDE, 0x74, 0x01, 0x93,
+ 0xC0, 0xE0, 0xE4, 0x93, 0xC0, 0xE0, 0x43, 0x89,
+ 0x01, 0x75, 0x8A, 0x60, 0x75, 0x8C, 0x79, 0xD2,
+ 0x8C, 0xD2, 0xAF, 0x22, 0x03, 0xEF, 0xD3, 0x94,
+ 0x03, 0x40, 0x03, 0x7F, 0xFF, 0x22, 0x74, 0x81,
+ 0x2F, 0x2F, 0xF8, 0xE6, 0x20, 0xE5, 0xF4, 0xC2,
+ 0xAF, 0xE6, 0x44, 0x30, 0xF6, 0xD2, 0xAF, 0xAE,
+ 0x0C, 0xEE, 0xC3, 0x9F, 0x50, 0x21, 0x0E, 0x74,
+ 0x88, 0x2E, 0xF8, 0xE6, 0xF9, 0x08, 0xE6, 0x18,
+ 0xBE, 0x03, 0x02, 0x74, 0xFF, 0xFD, 0xED, 0x69,
+ 0x60, 0x09, 0x09, 0xE7, 0x19, 0x19, 0xF7, 0x09,
+ 0x09, 0x80, 0xF3, 0x16, 0x16, 0x80, 0xDA, 0xEE,
+ 0xD3, 0x9F, 0x40, 0x04, 0x05, 0x81, 0x05, 0x81,
+ 0xEE, 0xD3, 0x9F, 0x40, 0x22, 0x74, 0x88, 0x2E,
+ 0xF8, 0x08, 0xE6, 0xF9, 0xEE, 0xB5, 0x0C, 0x02,
+ 0xA9, 0x81, 0x18, 0x06, 0x06, 0xE6, 0xFD, 0xED,
+ 0x69, 0x60, 0x09, 0x19, 0x19, 0xE7, 0x09, 0x09,
+ 0xF7, 0x19, 0x80, 0xF3, 0x1E, 0x80, 0xD9, 0xEF,
+ 0x24, 0x88, 0xF8, 0xE6, 0x04, 0xF8, 0xEF, 0x2F,
+ 0x04, 0x90, 0x45, 0xDE, 0x93, 0xF6, 0x08, 0xEF,
+ 0x2F, 0x93, 0xF6, 0x7F, 0x00, 0x22, 0xEF, 0xD3,
+ 0x94, 0x03, 0x40, 0x03, 0x7F, 0xFF, 0x22, 0xEF,
+ 0x23, 0x24, 0x81, 0xF8, 0xE6, 0x30, 0xE5, 0xF4,
+ 0xC2, 0xAF, 0xE6, 0x54, 0x8C, 0xF6, 0xD2, 0xAF,
+ 0xE5, 0x0C, 0xB5, 0x07, 0x0A, 0x74, 0x88, 0x2F,
+ 0xF8, 0xE6, 0xF5, 0x81, 0x02, 0x42, 0x4D, 0x50,
+ 0x2E, 0x74, 0x89, 0x2F, 0xF8, 0xE6, 0xBF, 0x03,
+ 0x02, 0x74, 0xFF, 0xFD, 0x18, 0xE6, 0xF9, 0x74,
+ 0x88, 0x2F, 0xF8, 0xFB, 0xE6, 0xFC, 0xE9, 0x6C,
+ 0x60, 0x08, 0xA8, 0x05, 0xE7, 0xF6, 0x1D, 0x19,
+ 0x80, 0xF4, 0xA8, 0x03, 0xA6, 0x05, 0x1F, 0xE5,
+ 0x0C, 0xB5, 0x07, 0xE3, 0x7F, 0x00, 0x22, 0x74,
+ 0x89, 0x2F, 0xF8, 0xE6, 0xFD, 0x18, 0x86, 0x01,
+ 0x0F, 0x74, 0x88, 0x2F, 0xF8, 0xA6, 0x01, 0x08,
+ 0x86, 0x04, 0xE5, 0x0C, 0xB5, 0x07, 0x02, 0xAC,
+ 0x81, 0xED, 0x6C, 0x60, 0x08, 0x0D, 0x09, 0xA8,
+ 0x05, 0xE6, 0xF7, 0x80, 0xF4, 0xE5, 0x0C, 0xB5,
+ 0x07, 0xDE, 0x89, 0x81, 0x7F, 0x00, 0x22, 0xEF,
+ 0xD3, 0x94, 0x03, 0x40, 0x03, 0x7F, 0xFF, 0x22,
+ 0xEF, 0x23, 0x24, 0x81, 0xF8, 0xC2, 0xAF, 0xE6,
+ 0x30, 0xE5, 0x05, 0x30, 0xE0, 0x02, 0xD2, 0xE4,
+ 0xD2, 0xE2, 0xC6, 0xD2, 0xAF, 0x7F, 0x00, 0x30,
+ 0xE2, 0x01, 0x0F, 0x02, 0x42, 0x4C, 0x8F, 0xF0,
+ 0xE4, 0xFF, 0xFE, 0xE5, 0x0C, 0x23, 0x24, 0x80,
+ 0xF8, 0xC2, 0xA9, 0x30, 0xF7, 0x0D, 0x7F, 0x08,
+ 0xE6, 0x60, 0x0B, 0x2D, 0xF6, 0x60, 0x30, 0x50,
+ 0x2E, 0x80, 0x07, 0x30, 0xF1, 0x06, 0xED, 0xF6,
+ 0x60, 0x25, 0x7E, 0x02, 0x08, 0x30, 0xF0, 0x10,
+ 0xC2, 0xAF, 0xE6, 0x10, 0xE7, 0x23, 0x0E, 0x30,
+ 0xE2, 0x0C, 0xD2, 0xAF, 0x7F, 0x04, 0x80, 0x12,
+ 0xC2, 0xAF, 0xE6, 0x10, 0xE7, 0x13, 0x54, 0xEC,
+ 0x4E, 0xF6, 0xD2, 0xAF, 0x02, 0x42, 0x4D, 0x7F,
+ 0x08, 0x08, 0xEF, 0x44, 0x83, 0xF4, 0xC2, 0xAF,
+ 0x56, 0xC6, 0xD2, 0xAF, 0x54, 0x80, 0x4F, 0xFF,
+ 0x22, 0xC5, 0xF0, 0xF8, 0xA3, 0xE0, 0x28, 0xF0,
+ 0xC5, 0xF0, 0xF8, 0xE5, 0x82, 0x15, 0x82, 0x70,
+ 0x02, 0x15, 0x83, 0xE0, 0x38, 0xF0, 0x22, 0xEF,
+ 0x5B, 0xFF, 0xEE, 0x5A, 0xFE, 0xED, 0x59, 0xFD,
+ 0xEC, 0x58, 0xFC, 0x22, 0xEF, 0x4B, 0xFF, 0xEE,
+ 0x4A, 0xFE, 0xED, 0x49, 0xFD, 0xEC, 0x48, 0xFC,
+ 0x22, 0xE0, 0xFC, 0xA3, 0xE0, 0xFD, 0xA3, 0xE0,
+ 0xFE, 0xA3, 0xE0, 0xFF, 0x22, 0xE2, 0xFC, 0x08,
+ 0xE2, 0xFD, 0x08, 0xE2, 0xFE, 0x08, 0xE2, 0xFF,
+ 0x22, 0xE2, 0xFB, 0x08, 0xE2, 0xF9, 0x08, 0xE2,
+ 0xFA, 0x08, 0xE2, 0xCB, 0xF8, 0x22, 0xEC, 0xF2,
+ 0x08, 0xED, 0xF2, 0x08, 0xEE, 0xF2, 0x08, 0xEF,
+ 0xF2, 0x22, 0xA4, 0x25, 0x82, 0xF5, 0x82, 0xE5,
+ 0xF0, 0x35, 0x83, 0xF5, 0x83, 0x22, 0xE0, 0xFB,
+ 0xA3, 0xE0, 0xFA, 0xA3, 0xE0, 0xF9, 0x22, 0xEB,
+ 0xF0, 0xA3, 0xEA, 0xF0, 0xA3, 0xE9, 0xF0, 0x22,
+ 0xD0, 0x83, 0xD0, 0x82, 0xF8, 0xE4, 0x93, 0x70,
+ 0x12, 0x74, 0x01, 0x93, 0x70, 0x0D, 0xA3, 0xA3,
+ 0x93, 0xF8, 0x74, 0x01, 0x93, 0xF5, 0x82, 0x88,
+ 0x83, 0xE4, 0x73, 0x74, 0x02, 0x93, 0x68, 0x60,
+ 0xEF, 0xA3, 0xA3, 0xA3, 0x80, 0xDF, 0x02, 0x45,
+ 0x8C, 0x02, 0x42, 0xDD, 0xE4, 0x93, 0xA3, 0xF8,
+ 0xE4, 0x93, 0xA3, 0x40, 0x03, 0xF6, 0x80, 0x01,
+ 0xF2, 0x08, 0xDF, 0xF4, 0x80, 0x29, 0xE4, 0x93,
+ 0xA3, 0xF8, 0x54, 0x07, 0x24, 0x0C, 0xC8, 0xC3,
+ 0x33, 0xC4, 0x54, 0x0F, 0x44, 0x20, 0xC8, 0x83,
+ 0x40, 0x04, 0xF4, 0x56, 0x80, 0x01, 0x46, 0xF6,
+ 0xDF, 0xE4, 0x80, 0x0B, 0x01, 0x02, 0x04, 0x08,
+ 0x10, 0x20, 0x40, 0x80, 0x90, 0x45, 0xD1, 0xE4,
+ 0x7E, 0x01, 0x93, 0x60, 0xBC, 0xA3, 0xFF, 0x54,
+ 0x3F, 0x30, 0xE5, 0x09, 0x54, 0x1F, 0xFE, 0xE4,
+ 0x93, 0xA3, 0x60, 0x01, 0x0E, 0xCF, 0x54, 0xC0,
+ 0x25, 0xE0, 0x60, 0xA8, 0x40, 0xB8, 0xE4, 0x93,
+ 0xA3, 0xFA, 0xE4, 0x93, 0xA3, 0xF8, 0xE4, 0x93,
+ 0xA3, 0xC8, 0xC5, 0x82, 0xC8, 0xCA, 0xC5, 0x83,
+ 0xCA, 0xF0, 0xA3, 0xC8, 0xC5, 0x82, 0xC8, 0xCA,
+ 0xC5, 0x83, 0xCA, 0xDF, 0xE9, 0xDE, 0xE7, 0x80,
+ 0xBE, 0x00, 0x41, 0x82, 0x09, 0x00, 0x41, 0x82,
+ 0x0A, 0x00, 0x41, 0x82, 0x17, 0x00, 0x59, 0xE2,
+ 0x5C, 0x24, 0x5E, 0x5D, 0x5F, 0xA1, 0xC0, 0xE0,
+ 0xC0, 0xF0, 0xC0, 0x83, 0xC0, 0x82, 0xC0, 0xD0,
+ 0x75, 0xD0, 0x00, 0xC0, 0x00, 0xC0, 0x01, 0xC0,
+ 0x02, 0xC0, 0x03, 0xC0, 0x04, 0xC0, 0x05, 0xC0,
+ 0x06, 0xC0, 0x07, 0x90, 0x01, 0xC4, 0x74, 0xE6,
+ 0xF0, 0x74, 0x45, 0xA3, 0xF0, 0xD1, 0x35, 0x74,
+ 0xE6, 0x04, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x45,
+ 0xA3, 0xF0, 0xD0, 0x07, 0xD0, 0x06, 0xD0, 0x05,
+ 0xD0, 0x04, 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01,
+ 0xD0, 0x00, 0xD0, 0xD0, 0xD0, 0x82, 0xD0, 0x83,
+ 0xD0, 0xF0, 0xD0, 0xE0, 0x32, 0x90, 0x00, 0x54,
+ 0xE0, 0x55, 0x35, 0xF5, 0x39, 0xA3, 0xE0, 0x55,
+ 0x36, 0xF5, 0x3A, 0xA3, 0xE0, 0x55, 0x37, 0xF5,
+ 0x3B, 0xA3, 0xE0, 0x55, 0x38, 0xF5, 0x3C, 0xAD,
+ 0x39, 0x7F, 0x54, 0x12, 0x32, 0x1E, 0xAD, 0x3A,
+ 0x7F, 0x55, 0x12, 0x32, 0x1E, 0xAD, 0x3B, 0x7F,
+ 0x56, 0x12, 0x32, 0x1E, 0xAD, 0x3C, 0x7F, 0x57,
+ 0x12, 0x32, 0x1E, 0x53, 0x91, 0xEF, 0x22, 0xC0,
+ 0xE0, 0xC0, 0xF0, 0xC0, 0x83, 0xC0, 0x82, 0xC0,
+ 0xD0, 0x75, 0xD0, 0x00, 0xC0, 0x00, 0xC0, 0x01,
+ 0xC0, 0x02, 0xC0, 0x03, 0xC0, 0x04, 0xC0, 0x05,
+ 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x01, 0xC4, 0x74,
+ 0x6F, 0xF0, 0x74, 0x46, 0xA3, 0xF0, 0x12, 0x6C,
+ 0x78, 0xE5, 0x41, 0x30, 0xE4, 0x04, 0x7F, 0x02,
+ 0x91, 0x27, 0xE5, 0x41, 0x30, 0xE6, 0x03, 0x12,
+ 0x6C, 0xD5, 0xE5, 0x43, 0x30, 0xE0, 0x03, 0x12,
+ 0x51, 0xC2, 0xE5, 0x43, 0x30, 0xE1, 0x03, 0x12,
+ 0x4D, 0x0C, 0xE5, 0x43, 0x30, 0xE2, 0x03, 0x12,
+ 0x4C, 0xC1, 0xE5, 0x43, 0x30, 0xE3, 0x03, 0x12,
+ 0x6C, 0xE2, 0xE5, 0x43, 0x30, 0xE4, 0x03, 0x12,
+ 0x6D, 0x04, 0xE5, 0x43, 0x30, 0xE5, 0x03, 0x12,
+ 0x6D, 0x33, 0xE5, 0x43, 0x30, 0xE6, 0x02, 0xF1,
+ 0x0F, 0xE5, 0x44, 0x30, 0xE1, 0x03, 0x12, 0x51,
+ 0x7F, 0x74, 0x6F, 0x04, 0x90, 0x01, 0xC4, 0xF0,
+ 0x74, 0x46, 0xA3, 0xF0, 0xD0, 0x07, 0xD0, 0x06,
+ 0xD0, 0x05, 0xD0, 0x04, 0xD0, 0x03, 0xD0, 0x02,
+ 0xD0, 0x01, 0xD0, 0x00, 0xD0, 0xD0, 0xD0, 0x82,
+ 0xD0, 0x83, 0xD0, 0xF0, 0xD0, 0xE0, 0x32, 0x90,
+ 0x80, 0xDE, 0xE0, 0xB4, 0x01, 0x13, 0x90, 0x81,
+ 0x27, 0xE0, 0x60, 0x0D, 0x90, 0x81, 0x2B, 0xE0,
+ 0x54, 0xFE, 0xF0, 0x54, 0x07, 0x70, 0x02, 0xF1,
+ 0x2A, 0x22, 0x90, 0x81, 0x1F, 0xE0, 0x90, 0x81,
+ 0x29, 0x30, 0xE0, 0x05, 0xE0, 0xFF, 0x02, 0x74,
+ 0x8F, 0xE0, 0xFF, 0x7D, 0x01, 0xD3, 0x10, 0xAF,
+ 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x82, 0x13, 0xED,
+ 0xF0, 0x90, 0x81, 0x2A, 0xE0, 0x90, 0x82, 0x14,
+ 0xF0, 0x90, 0x81, 0x24, 0xE0, 0xFE, 0xC4, 0x13,
+ 0x13, 0x54, 0x03, 0x30, 0xE0, 0x03, 0x02, 0x48,
+ 0xA0, 0xEE, 0xC4, 0x13, 0x13, 0x13, 0x54, 0x01,
+ 0x30, 0xE0, 0x03, 0x02, 0x48, 0xA0, 0x90, 0x82,
+ 0x14, 0xE0, 0xFE, 0x6F, 0x70, 0x03, 0x02, 0x48,
+ 0xA0, 0xEF, 0x70, 0x03, 0x02, 0x48, 0x17, 0x24,
+ 0xFE, 0x70, 0x03, 0x02, 0x48, 0x50, 0x24, 0xFE,
+ 0x60, 0x51, 0x24, 0xFC, 0x70, 0x03, 0x02, 0x48,
+ 0x8B, 0x24, 0xFC, 0x60, 0x03, 0x02, 0x48, 0xA0,
+ 0xEE, 0xB4, 0x0E, 0x03, 0x12, 0x49, 0x5E, 0x90,
+ 0x82, 0x14, 0xE0, 0x70, 0x05, 0x7F, 0x01, 0x12,
+ 0x49, 0x93, 0x90, 0x82, 0x14, 0xE0, 0xB4, 0x06,
+ 0x03, 0x12, 0x49, 0x34, 0x90, 0x82, 0x14, 0xE0,
+ 0xB4, 0x04, 0x0F, 0x90, 0x82, 0x13, 0xE0, 0xFF,
+ 0x60, 0x05, 0x12, 0x73, 0x75, 0x80, 0x03, 0x12,
+ 0x66, 0x26, 0x90, 0x82, 0x14, 0xE0, 0x64, 0x08,
+ 0x60, 0x03, 0x02, 0x48, 0xA0, 0x12, 0x73, 0xD3,
+ 0x02, 0x48, 0xA0, 0x90, 0x82, 0x14, 0xE0, 0x70,
+ 0x05, 0x7F, 0x01, 0x12, 0x49, 0x93, 0x90, 0x82,
+ 0x14, 0xE0, 0xB4, 0x06, 0x03, 0x12, 0x49, 0x34,
+ 0x90, 0x82, 0x14, 0xE0, 0xB4, 0x0E, 0x09, 0x12,
+ 0x48, 0xA5, 0xBF, 0x01, 0x03, 0x12, 0x49, 0x5E,
+ 0x90, 0x82, 0x14, 0xE0, 0x64, 0x0C, 0x60, 0x02,
+ 0x01, 0xA0, 0x11, 0xA5, 0xEF, 0x64, 0x01, 0x60,
+ 0x02, 0x01, 0xA0, 0x11, 0xFA, 0x01, 0xA0, 0x90,
+ 0x82, 0x14, 0xE0, 0xB4, 0x0E, 0x07, 0x11, 0xA5,
+ 0xBF, 0x01, 0x02, 0x31, 0x5E, 0x90, 0x82, 0x14,
+ 0xE0, 0xB4, 0x06, 0x02, 0x31, 0x34, 0x90, 0x82,
+ 0x14, 0xE0, 0xB4, 0x0C, 0x07, 0x11, 0xA5, 0xBF,
+ 0x01, 0x02, 0x11, 0xFA, 0x90, 0x82, 0x14, 0xE0,
+ 0x64, 0x04, 0x70, 0x5C, 0x12, 0x72, 0xF5, 0xEF,
+ 0x64, 0x01, 0x70, 0x54, 0x31, 0xBE, 0x80, 0x50,
+ 0x90, 0x82, 0x14, 0xE0, 0xB4, 0x0E, 0x07, 0x11,
+ 0xA5, 0xBF, 0x01, 0x02, 0x31, 0x5E, 0x90, 0x82,
+ 0x14, 0xE0, 0xB4, 0x06, 0x02, 0x31, 0x34, 0x90,
+ 0x82, 0x14, 0xE0, 0xB4, 0x0C, 0x07, 0x11, 0xA5,
+ 0xBF, 0x01, 0x02, 0x11, 0xFA, 0x90, 0x82, 0x14,
+ 0xE0, 0x70, 0x04, 0x7F, 0x01, 0x31, 0x93, 0x90,
+ 0x82, 0x14, 0xE0, 0xB4, 0x04, 0x1A, 0x12, 0x73,
+ 0xBB, 0x80, 0x15, 0x90, 0x82, 0x14, 0xE0, 0xB4,
+ 0x0C, 0x0E, 0x90, 0x81, 0x25, 0xE0, 0xFF, 0x13,
+ 0x13, 0x54, 0x3F, 0x30, 0xE0, 0x02, 0x31, 0xB1,
+ 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD1, 0xAB, 0xEF,
+ 0x64, 0x01, 0x60, 0x08, 0x90, 0x01, 0xB8, 0x74,
+ 0x01, 0xF0, 0x80, 0x3D, 0x90, 0x81, 0x24, 0xE0,
+ 0xFF, 0x13, 0x13, 0x13, 0x54, 0x1F, 0x30, 0xE0,
+ 0x08, 0x90, 0x01, 0xB8, 0x74, 0x02, 0xF0, 0x80,
+ 0x28, 0xEF, 0xC4, 0x54, 0x0F, 0x30, 0xE0, 0x08,
+ 0x90, 0x01, 0xB8, 0x74, 0x04, 0xF0, 0x80, 0x19,
+ 0x90, 0x81, 0x29, 0xE0, 0xD3, 0x94, 0x04, 0x40,
+ 0x08, 0x90, 0x01, 0xB8, 0x74, 0x08, 0xF0, 0x80,
+ 0x08, 0x90, 0x01, 0xB8, 0xE4, 0xF0, 0x7F, 0x01,
+ 0x22, 0x90, 0x01, 0xB9, 0x74, 0x02, 0xF0, 0x7F,
+ 0x00, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01,
+ 0x70, 0x31, 0x90, 0x81, 0x25, 0xE0, 0x54, 0xFD,
+ 0xF0, 0x90, 0x05, 0x22, 0x74, 0x6F, 0xF0, 0x7F,
+ 0x01, 0xF1, 0x0D, 0xBF, 0x01, 0x12, 0x90, 0x81,
+ 0x24, 0xE0, 0x44, 0x80, 0xF0, 0x90, 0x81, 0x2A,
+ 0x74, 0x0E, 0xF0, 0x90, 0x81, 0x23, 0xF0, 0x22,
+ 0x90, 0x01, 0xB9, 0x74, 0x01, 0xF0, 0x90, 0x01,
+ 0xB8, 0x04, 0xF0, 0x22, 0x90, 0x81, 0x25, 0xE0,
+ 0x90, 0x06, 0x04, 0x20, 0xE0, 0x0C, 0xE0, 0x44,
+ 0x40, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x04, 0xF0,
+ 0x80, 0x0E, 0xE0, 0x54, 0x7F, 0xF0, 0x90, 0x81,
+ 0x2A, 0x74, 0x0C, 0xF0, 0x90, 0x81, 0x23, 0xF0,
+ 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x22, 0x90, 0x81,
+ 0x25, 0xE0, 0xC3, 0x13, 0x20, 0xE0, 0x08, 0x90,
+ 0x81, 0x2A, 0x74, 0x0C, 0xF0, 0x80, 0x1E, 0x90,
+ 0x06, 0x04, 0xE0, 0x44, 0x40, 0xF0, 0xE0, 0x44,
+ 0x80, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x04, 0xF0,
+ 0x90, 0x05, 0x27, 0xE0, 0x44, 0x80, 0xF0, 0x90,
+ 0x81, 0x23, 0x74, 0x04, 0xF0, 0x90, 0x05, 0x22,
+ 0xE4, 0xF0, 0x22, 0x90, 0x82, 0x15, 0xEF, 0xF0,
+ 0x12, 0x54, 0x65, 0x90, 0x82, 0x15, 0xE0, 0x60,
+ 0x05, 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x90, 0x81,
+ 0x2A, 0x74, 0x04, 0xF0, 0x90, 0x81, 0x23, 0xF0,
+ 0x22, 0x31, 0xE3, 0x90, 0x81, 0x2A, 0x74, 0x08,
+ 0xF0, 0x90, 0x81, 0x23, 0xF0, 0x22, 0x90, 0x05,
+ 0x22, 0x74, 0xFF, 0xF0, 0xF1, 0x3A, 0x90, 0x01,
+ 0x37, 0x74, 0x02, 0xF0, 0xFD, 0x7F, 0x03, 0x51,
+ 0x57, 0x31, 0xE3, 0xE4, 0x90, 0x81, 0x2A, 0xF0,
+ 0x90, 0x81, 0x23, 0xF0, 0x22, 0x90, 0x05, 0x22,
+ 0x74, 0xFF, 0xF0, 0xF1, 0x3A, 0x90, 0x85, 0xBB,
+ 0x12, 0x20, 0xDA, 0xCC, 0xF0, 0x00, 0xC0, 0x7F,
+ 0x8C, 0x7E, 0x08, 0x12, 0x2E, 0xA2, 0x90, 0x85,
+ 0xBB, 0x12, 0x20, 0xDA, 0x00, 0x00, 0x00, 0x14,
+ 0x7F, 0x70, 0x7E, 0x0E, 0x12, 0x2E, 0xA2, 0x90,
+ 0x81, 0xF9, 0x12, 0x20, 0xDA, 0x00, 0x00, 0x00,
+ 0x00, 0xE4, 0xFD, 0xFF, 0x12, 0x55, 0x1C, 0x7F,
+ 0x7C, 0x7E, 0x08, 0x12, 0x2D, 0x5C, 0xEC, 0x44,
+ 0x80, 0xFC, 0x90, 0x82, 0x05, 0x12, 0x20, 0xCE,
+ 0x90, 0x82, 0x05, 0x12, 0x44, 0xD9, 0x90, 0x85,
+ 0xBB, 0x12, 0x20, 0xCE, 0x7F, 0x7C, 0x7E, 0x08,
+ 0x12, 0x2E, 0xA2, 0x90, 0x01, 0x00, 0x74, 0x3F,
+ 0xF0, 0xA3, 0xE0, 0x54, 0xFD, 0xF0, 0x90, 0x05,
+ 0x53, 0xE0, 0x44, 0x20, 0xF0, 0x22, 0x90, 0x01,
+ 0x34, 0x74, 0x40, 0xF0, 0xFD, 0xE4, 0xFF, 0x74,
+ 0x3D, 0x2F, 0xF8, 0xE6, 0x4D, 0xFE, 0xF6, 0x74,
+ 0x30, 0x2F, 0xF5, 0x82, 0xE4, 0x34, 0x01, 0xF5,
+ 0x83, 0xEE, 0xF0, 0x22, 0xD3, 0x10, 0xAF, 0x01,
+ 0xC3, 0xC0, 0xD0, 0xE4, 0x90, 0x81, 0xCB, 0xF0,
+ 0x12, 0x1F, 0xA4, 0xFF, 0x54, 0x01, 0xFE, 0x90,
+ 0x81, 0x1F, 0xE0, 0x54, 0xFE, 0x4E, 0xFE, 0xF0,
+ 0xEF, 0x54, 0x02, 0xFF, 0xEE, 0x54, 0xFD, 0x4F,
+ 0xFF, 0xF0, 0x12, 0x1F, 0xA4, 0xFE, 0x54, 0x04,
+ 0xFD, 0xEF, 0x54, 0xFB, 0x4D, 0xFF, 0x90, 0x81,
+ 0x1F, 0xF0, 0xEE, 0x54, 0x08, 0xFE, 0xEF, 0x54,
+ 0xF7, 0x4E, 0xFF, 0xF0, 0x12, 0x1F, 0xA4, 0xFE,
+ 0x54, 0x10, 0xFD, 0xEF, 0x54, 0xEF, 0x4D, 0xFF,
+ 0x90, 0x81, 0x1F, 0xF0, 0xEE, 0x54, 0x20, 0xFE,
+ 0xEF, 0x54, 0xDF, 0x4E, 0xF0, 0x12, 0x1F, 0xA4,
+ 0xC3, 0x13, 0x20, 0xE0, 0x02, 0x61, 0x5E, 0x90,
+ 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0, 0x6D, 0x90,
+ 0x81, 0xCB, 0x74, 0x21, 0xF0, 0xEF, 0x13, 0x13,
+ 0x54, 0x3F, 0x30, 0xE0, 0x0B, 0x51, 0x4E, 0x90,
+ 0x81, 0xCB, 0xE0, 0x44, 0x08, 0xF0, 0x80, 0x0C,
+ 0xE4, 0x90, 0x81, 0x20, 0xF0, 0xA3, 0xF0, 0x7D,
+ 0x40, 0xFF, 0x91, 0x26, 0x90, 0x81, 0x1F, 0xE0,
+ 0xFD, 0x13, 0x13, 0x13, 0x54, 0x1F, 0x30, 0xE0,
+ 0x07, 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x12, 0xF0,
+ 0xED, 0xC4, 0x54, 0x0F, 0x30, 0xE0, 0x07, 0x90,
+ 0x81, 0xCB, 0xE0, 0x44, 0x14, 0xF0, 0x90, 0x81,
+ 0x1F, 0xE0, 0xC4, 0x13, 0x54, 0x07, 0x30, 0xE0,
+ 0x07, 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x80, 0xF0,
+ 0x90, 0x81, 0xCB, 0xE0, 0x90, 0x05, 0x27, 0xF0,
+ 0x90, 0x81, 0x22, 0xE0, 0x60, 0x02, 0x81, 0x17,
+ 0x7F, 0x01, 0x80, 0x15, 0x90, 0x81, 0xCB, 0x74,
+ 0x01, 0xF0, 0x90, 0x05, 0x27, 0xF0, 0x90, 0x81,
+ 0x22, 0xE0, 0x64, 0x04, 0x60, 0x02, 0x81, 0x17,
+ 0xFF, 0x12, 0x53, 0x0E, 0x81, 0x17, 0x90, 0x81,
+ 0x1F, 0xE0, 0xFF, 0x20, 0xE0, 0x02, 0x61, 0xE7,
+ 0x90, 0x81, 0xCB, 0x74, 0x31, 0xF0, 0xEF, 0x13,
+ 0x13, 0x54, 0x3F, 0x30, 0xE0, 0x0B, 0x51, 0x4E,
+ 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x08, 0xF0, 0x80,
+ 0x06, 0x7D, 0x40, 0xE4, 0xFF, 0x91, 0x26, 0x90,
+ 0x81, 0x1F, 0xE0, 0xFD, 0x13, 0x13, 0x13, 0x54,
+ 0x1F, 0x30, 0xE0, 0x07, 0x90, 0x81, 0xCB, 0xE0,
+ 0x44, 0x02, 0xF0, 0xED, 0xC4, 0x54, 0x0F, 0x30,
+ 0xE0, 0x07, 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x04,
+ 0xF0, 0x90, 0x81, 0xCB, 0xE0, 0x90, 0x05, 0x27,
+ 0xF0, 0x90, 0x81, 0x23, 0xE0, 0x64, 0x02, 0x70,
+ 0x1D, 0xFD, 0x7F, 0x04, 0x12, 0x47, 0x3D, 0x12,
+ 0x51, 0x73, 0xBF, 0x01, 0x09, 0x90, 0x81, 0x29,
+ 0xE0, 0xFF, 0x7D, 0x01, 0x80, 0x03, 0xE4, 0xFD,
+ 0xFF, 0x12, 0x47, 0x3D, 0x80, 0x41, 0x90, 0x81,
+ 0x2A, 0xE0, 0x90, 0x81, 0x23, 0xF0, 0x90, 0x05,
+ 0x27, 0xE0, 0x44, 0x40, 0xF0, 0x80, 0x30, 0x90,
+ 0x81, 0xCB, 0x74, 0x01, 0xF0, 0x90, 0x05, 0x27,
+ 0xF0, 0x90, 0x81, 0x23, 0xE0, 0xB4, 0x02, 0x06,
+ 0x7D, 0x01, 0x7F, 0x04, 0x80, 0x0B, 0x90, 0x81,
+ 0x23, 0xE0, 0xB4, 0x08, 0x07, 0x7D, 0x01, 0x7F,
+ 0x0C, 0x12, 0x47, 0x3D, 0xD1, 0x34, 0x90, 0x81,
+ 0x29, 0x12, 0x47, 0x39, 0x12, 0x5A, 0xA7, 0xD0,
+ 0xD0, 0x92, 0xAF, 0x22, 0x7D, 0x02, 0x7F, 0x02,
+ 0x91, 0x26, 0x7D, 0x01, 0x7F, 0x02, 0x74, 0x3D,
+ 0x2F, 0xF8, 0xE6, 0xFE, 0xED, 0xF4, 0x5E, 0xFE,
+ 0xF6, 0x74, 0x30, 0x2F, 0xF5, 0x82, 0xE4, 0x34,
+ 0x01, 0xF5, 0x83, 0xEE, 0xF0, 0x22, 0xEF, 0x70,
+ 0x37, 0x7D, 0x78, 0x7F, 0x02, 0x91, 0x26, 0x7D,
+ 0x02, 0x7F, 0x03, 0x91, 0x26, 0x7D, 0xC8, 0x7F,
+ 0x02, 0x12, 0x71, 0x8F, 0x90, 0x01, 0x57, 0xE4,
+ 0xF0, 0x90, 0x01, 0x3C, 0x74, 0x02, 0xF0, 0x7D,
+ 0x01, 0x7F, 0x0C, 0x12, 0x47, 0x3D, 0x90, 0x81,
+ 0x24, 0xE0, 0x54, 0xF7, 0xF0, 0x54, 0xEF, 0xF0,
+ 0x90, 0x06, 0x0A, 0xE0, 0x54, 0xF8, 0xF0, 0x22,
+ 0x90, 0x01, 0x36, 0x74, 0x78, 0xF0, 0xA3, 0x74,
+ 0x02, 0xF0, 0x7D, 0x78, 0xFF, 0x51, 0x57, 0x7D,
+ 0x02, 0x7F, 0x03, 0x51, 0x57, 0x90, 0x06, 0x0A,
+ 0xE0, 0x44, 0x07, 0xF0, 0x90, 0x81, 0x32, 0xA3,
+ 0xE0, 0x90, 0x05, 0x58, 0xF0, 0x90, 0x80, 0xDE,
+ 0xE0, 0xB4, 0x01, 0x15, 0x90, 0x81, 0x25, 0xE0,
+ 0x54, 0xFB, 0xF0, 0x90, 0x81, 0x2A, 0xE0, 0x20,
+ 0xE2, 0x0E, 0x7D, 0x01, 0x7F, 0x04, 0x02, 0x47,
+ 0x3D, 0x90, 0x81, 0x25, 0xE0, 0x44, 0x04, 0xF0,
+ 0x22, 0x90, 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0,
+ 0x08, 0x90, 0x81, 0x23, 0xE0, 0x64, 0x02, 0x60,
+ 0x3A, 0x90, 0x81, 0x27, 0xE0, 0x70, 0x04, 0xEF,
+ 0x30, 0xE0, 0x0A, 0x90, 0x81, 0x2A, 0xE0, 0x64,
+ 0x02, 0x60, 0x28, 0xB1, 0x83, 0x90, 0x81, 0x25,
+ 0xE0, 0x13, 0x13, 0x13, 0x54, 0x1F, 0x30, 0xE0,
+ 0x14, 0x90, 0x81, 0x2D, 0xE0, 0xFF, 0xA3, 0xE0,
+ 0x6F, 0x70, 0x0A, 0xF1, 0xCD, 0x91, 0x1C, 0x90,
+ 0x81, 0x2E, 0xE0, 0x14, 0xF0, 0x90, 0x01, 0xE6,
+ 0xE0, 0x04, 0xF0, 0x22, 0x90, 0x81, 0x1F, 0xE0,
+ 0x30, 0xE0, 0x06, 0x90, 0x81, 0x21, 0x74, 0x01,
+ 0xF0, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x45, 0x90,
+ 0x81, 0x25, 0xE0, 0xFF, 0x13, 0x13, 0x13, 0x54,
+ 0x1F, 0x30, 0xE0, 0x12, 0x90, 0x01, 0x3B, 0xE0,
+ 0x30, 0xE4, 0x0B, 0x91, 0x1C, 0x90, 0x81, 0x2D,
+ 0xE0, 0x14, 0x90, 0x05, 0x73, 0xF0, 0x90, 0x82,
+ 0x0B, 0xE4, 0x75, 0xF0, 0x01, 0x12, 0x44, 0xA9,
+ 0xC3, 0x90, 0x82, 0x0C, 0xE0, 0x94, 0x80, 0x90,
+ 0x82, 0x0B, 0xE0, 0x64, 0x80, 0x94, 0x80, 0x40,
+ 0x0B, 0x90, 0x01, 0x98, 0xE0, 0x54, 0xFE, 0xF0,
+ 0xE0, 0x44, 0x01, 0xF0, 0x12, 0x75, 0xF8, 0xD1,
+ 0xD6, 0x90, 0x81, 0x3F, 0xE0, 0x30, 0xE0, 0x0C,
+ 0xE4, 0xF5, 0x1D, 0xA3, 0xF1, 0xFB, 0x90, 0x01,
+ 0x57, 0x74, 0x05, 0xF0, 0x90, 0x01, 0xBE, 0xE0,
+ 0x04, 0xF0, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0x64,
+ 0x01, 0x60, 0x02, 0xC1, 0x23, 0x90, 0x81, 0x27,
+ 0xE0, 0x70, 0x02, 0xC1, 0x23, 0x90, 0x81, 0x26,
+ 0xE0, 0xC4, 0x54, 0x0F, 0x64, 0x01, 0x70, 0x22,
+ 0x90, 0x06, 0xAB, 0xE0, 0x90, 0x81, 0x2E, 0xF0,
+ 0x90, 0x06, 0xAA, 0xE0, 0x90, 0x81, 0x2D, 0xF0,
+ 0xA3, 0xE0, 0xFF, 0x70, 0x08, 0x90, 0x81, 0x2D,
+ 0xE0, 0xFE, 0xFF, 0x80, 0x00, 0x90, 0x81, 0x2E,
+ 0xEF, 0xF0, 0x90, 0x81, 0x25, 0xE0, 0x44, 0x04,
+ 0xF0, 0xE4, 0x90, 0x81, 0x30, 0xF0, 0x90, 0x81,
+ 0x32, 0xA3, 0xE0, 0x90, 0x05, 0x58, 0xF0, 0x90,
+ 0x01, 0x57, 0xE4, 0xF0, 0x90, 0x01, 0x3C, 0x74,
+ 0x02, 0xF0, 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFD,
+ 0xF0, 0x54, 0xEF, 0xF0, 0x90, 0x81, 0x26, 0xE0,
+ 0xFF, 0xC4, 0x54, 0x0F, 0x24, 0xFD, 0x50, 0x02,
+ 0x80, 0x0F, 0x90, 0x81, 0x1F, 0xE0, 0x30, 0xE0,
+ 0x05, 0x12, 0x6D, 0xF2, 0x80, 0x03, 0x12, 0x6E,
+ 0xC9, 0x90, 0x81, 0x25, 0xE0, 0x13, 0x13, 0x13,
+ 0x54, 0x1F, 0x30, 0xE0, 0x0E, 0x90, 0x81, 0x2D,
+ 0xE0, 0xFF, 0xA3, 0xE0, 0xB5, 0x07, 0x04, 0xF1,
+ 0xCD, 0x91, 0x22, 0x90, 0x81, 0x1F, 0xE0, 0xC3,
+ 0x13, 0x20, 0xE0, 0x07, 0x90, 0x81, 0x25, 0xE0,
+ 0x44, 0x04, 0xF0, 0x22, 0xD1, 0xAB, 0xEF, 0x70,
+ 0x02, 0xD1, 0x3C, 0x22, 0x90, 0x81, 0x27, 0xE0,
+ 0x64, 0x01, 0x70, 0x66, 0x90, 0x81, 0x26, 0xE0,
+ 0x54, 0x0F, 0x60, 0x51, 0x90, 0x81, 0x2A, 0xE0,
+ 0x70, 0x03, 0xFF, 0x31, 0x93, 0x90, 0x81, 0x2A,
+ 0xE0, 0x64, 0x0C, 0x60, 0x03, 0x12, 0x66, 0x26,
+ 0x90, 0x01, 0x5B, 0xE4, 0xF0, 0x90, 0x01, 0x3C,
+ 0x74, 0x04, 0xF0, 0xD1, 0xAB, 0xEF, 0x64, 0x01,
+ 0x60, 0x38, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
+ 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
+ 0xFB, 0xFD, 0x7F, 0x58, 0x7E, 0x01, 0x12, 0x50,
+ 0x05, 0x90, 0x01, 0x5B, 0x74, 0x05, 0xF0, 0x90,
+ 0x06, 0x92, 0x74, 0x01, 0xF0, 0x90, 0x81, 0x24,
+ 0xE0, 0x44, 0x08, 0xF0, 0x22, 0x90, 0x81, 0x2A,
+ 0xE0, 0x70, 0x07, 0x7D, 0x01, 0x7F, 0x04, 0x12,
+ 0x47, 0x3D, 0x22, 0x90, 0x04, 0x1A, 0xE0, 0xF4,
+ 0x60, 0x03, 0x7F, 0x00, 0x22, 0x90, 0x04, 0x1B,
+ 0xE0, 0x54, 0x07, 0x64, 0x07, 0x7F, 0x01, 0x60,
+ 0x02, 0x7F, 0x00, 0x22, 0x12, 0x50, 0x60, 0x90,
+ 0x81, 0x2D, 0xE0, 0x14, 0x90, 0x05, 0x73, 0xF0,
+ 0x7D, 0x02, 0x7F, 0x02, 0x51, 0x57, 0x90, 0x81,
+ 0x42, 0xE0, 0x30, 0xE0, 0x2D, 0x90, 0x80, 0xDE,
+ 0xE0, 0xB4, 0x01, 0x26, 0x90, 0x82, 0x17, 0xE0,
+ 0x04, 0xF0, 0xE0, 0xB4, 0x0A, 0x0B, 0x90, 0x81,
+ 0x44, 0xE0, 0x04, 0xF0, 0xE4, 0x90, 0x82, 0x17,
+ 0xF0, 0x90, 0x81, 0x44, 0xE0, 0xFF, 0x90, 0x81,
+ 0x43, 0xE0, 0xB5, 0x07, 0x05, 0xE4, 0xA3, 0xF0,
+ 0xF1, 0x0B, 0x22, 0xE4, 0xFF, 0x8F, 0x53, 0x90,
+ 0x04, 0x1D, 0xE0, 0x60, 0x19, 0x90, 0x05, 0x22,
+ 0xE0, 0xF5, 0x56, 0x74, 0xFF, 0xF0, 0xF1, 0x3A,
+ 0xBF, 0x01, 0x03, 0x12, 0x74, 0xFB, 0x90, 0x05,
+ 0x22, 0xE5, 0x56, 0xF0, 0x80, 0x03, 0x12, 0x74,
+ 0xFB, 0x90, 0x04, 0x1F, 0x74, 0x20, 0xF0, 0x7F,
+ 0x01, 0x22, 0xE4, 0x90, 0x82, 0x0F, 0xF0, 0xA3,
+ 0xF0, 0x90, 0x05, 0xF8, 0xE0, 0x70, 0x0F, 0xA3,
+ 0xE0, 0x70, 0x0B, 0xA3, 0xE0, 0x70, 0x07, 0xA3,
+ 0xE0, 0x70, 0x03, 0x7F, 0x01, 0x22, 0xD3, 0x90,
+ 0x82, 0x10, 0xE0, 0x94, 0xE8, 0x90, 0x82, 0x0F,
+ 0xE0, 0x94, 0x03, 0x40, 0x0A, 0x90, 0x01, 0xC0,
+ 0xE0, 0x44, 0x20, 0xF0, 0x7F, 0x00, 0x22, 0x7F,
+ 0x32, 0x7E, 0x00, 0x12, 0x32, 0xAA, 0x90, 0x82,
+ 0x0F, 0xE4, 0x75, 0xF0, 0x01, 0x12, 0x44, 0xA9,
+ 0x80, 0xBF, 0x74, 0x1F, 0x2D, 0xF5, 0x82, 0xE4,
+ 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x54, 0x3F, 0xF0,
+ 0xEF, 0x60, 0x1D, 0x74, 0x21, 0x2D, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x10,
+ 0xF0, 0x74, 0x1F, 0x2D, 0xF5, 0x82, 0xE4, 0x34,
+ 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0x22,
+ 0x74, 0x21, 0x2D, 0xF5, 0x82, 0xE4, 0x34, 0xFC,
+ 0xF5, 0x83, 0xE0, 0x54, 0xEF, 0xF0, 0x74, 0x1F,
+ 0x2D, 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83,
+ 0xE0, 0x44, 0x40, 0xF0, 0x22, 0xEF, 0x14, 0x90,
+ 0x05, 0x73, 0xF0, 0x90, 0x01, 0x3F, 0x74, 0x10,
+ 0xF0, 0xFD, 0x7F, 0x03, 0x74, 0x45, 0x2F, 0xF8,
+ 0xE6, 0x4D, 0xFE, 0xF6, 0x74, 0x38, 0x2F, 0xF5,
+ 0x82, 0xE4, 0x34, 0x01, 0xF5, 0x83, 0xEE, 0xF0,
+ 0x22, 0xE0, 0x44, 0x02, 0xF0, 0xE4, 0xF5, 0x1D,
+ 0x90, 0x81, 0x39, 0xE0, 0xF5, 0x1E, 0xE4, 0xFB,
+ 0xFD, 0x7F, 0x54, 0x7E, 0x01, 0x8E, 0x19, 0x8F,
+ 0x1A, 0xE5, 0x1E, 0x54, 0x07, 0xC4, 0x33, 0x54,
+ 0xE0, 0x85, 0x19, 0x83, 0x85, 0x1A, 0x82, 0xF0,
+ 0xE5, 0x1D, 0x54, 0x07, 0xC4, 0x33, 0x54, 0xE0,
+ 0xFF, 0xE5, 0x1E, 0x13, 0x13, 0x13, 0x54, 0x1F,
+ 0x4F, 0xA3, 0xF0, 0xEB, 0x54, 0x07, 0xC4, 0x33,
+ 0x54, 0xE0, 0xFF, 0xE5, 0x1D, 0x13, 0x13, 0x13,
+ 0x54, 0x1F, 0x4F, 0x85, 0x1A, 0x82, 0x85, 0x19,
+ 0x83, 0xA3, 0xA3, 0xF0, 0xBD, 0x01, 0x0C, 0x85,
+ 0x1A, 0x82, 0x8E, 0x83, 0xA3, 0xA3, 0xA3, 0x74,
+ 0x03, 0xF0, 0x22, 0x85, 0x1A, 0x82, 0x85, 0x19,
+ 0x83, 0xA3, 0xA3, 0xA3, 0x74, 0x01, 0xF0, 0x22,
+ 0xE4, 0x90, 0x81, 0x4D, 0xF0, 0x90, 0x81, 0x27,
+ 0xE0, 0x60, 0x58, 0x90, 0x80, 0xDE, 0xE0, 0x64,
+ 0x01, 0x70, 0x50, 0x90, 0x81, 0x4D, 0x04, 0xF0,
+ 0xE4, 0x90, 0x81, 0x2E, 0xF0, 0x90, 0x81, 0x1F,
+ 0xE0, 0x30, 0xE0, 0x15, 0x90, 0x81, 0x23, 0xE0,
+ 0xB4, 0x02, 0x05, 0xE4, 0x90, 0x81, 0x4D, 0xF0,
+ 0x31, 0x73, 0xEF, 0x70, 0x04, 0x90, 0x81, 0x4D,
+ 0xF0, 0x90, 0x81, 0x4D, 0xE0, 0x60, 0x24, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x10, 0xF0, 0xE4, 0xF5,
+ 0x1D, 0x90, 0x81, 0x2F, 0x12, 0x4F, 0xFB, 0x90,
+ 0x01, 0x57, 0x74, 0x05, 0xF0, 0x90, 0x81, 0x2A,
+ 0xE0, 0x20, 0xE2, 0x07, 0x7D, 0x01, 0x7F, 0x04,
+ 0x12, 0x47, 0x3D, 0x22, 0xE4, 0x90, 0x81, 0x4C,
+ 0xF0, 0x90, 0x81, 0x27, 0xE0, 0x70, 0x02, 0x21,
+ 0x72, 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01, 0x60,
+ 0x02, 0x21, 0x72, 0x90, 0x81, 0x26, 0xE0, 0xFF,
+ 0xC4, 0x54, 0x0F, 0x60, 0x22, 0x24, 0xFE, 0x60,
+ 0x03, 0x04, 0x70, 0x21, 0x90, 0x81, 0x2E, 0xE0,
+ 0x14, 0xF0, 0xE0, 0xFF, 0x60, 0x06, 0x90, 0x81,
+ 0x30, 0xE0, 0x60, 0x11, 0xEF, 0x70, 0x08, 0x90,
+ 0x81, 0x2D, 0xE0, 0xA3, 0xF0, 0x80, 0x00, 0x90,
+ 0x81, 0x4C, 0x74, 0x01, 0xF0, 0x90, 0x81, 0x1F,
+ 0xE0, 0x30, 0xE0, 0x15, 0x90, 0x81, 0x23, 0xE0,
+ 0xB4, 0x02, 0x05, 0xE4, 0x90, 0x81, 0x4C, 0xF0,
+ 0x31, 0x73, 0xEF, 0x70, 0x04, 0x90, 0x81, 0x4C,
+ 0xF0, 0x90, 0x81, 0x4C, 0xE0, 0x60, 0x43, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x10, 0xF0, 0x90, 0x81,
+ 0x30, 0xE0, 0x60, 0x03, 0xB4, 0x01, 0x09, 0xE4,
+ 0xF5, 0x1D, 0x90, 0x81, 0x30, 0xE0, 0x80, 0x0D,
+ 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x30, 0xE0, 0x75,
+ 0xF0, 0x03, 0xA4, 0x24, 0xFE, 0xFF, 0x90, 0x81,
+ 0x2F, 0xE0, 0x2F, 0x12, 0x4F, 0xFC, 0x90, 0x01,
+ 0x57, 0x74, 0x05, 0xF0, 0x90, 0x81, 0x2A, 0xE0,
+ 0x20, 0xE2, 0x07, 0x7D, 0x01, 0x7F, 0x04, 0x12,
+ 0x47, 0x3D, 0x22, 0x90, 0x05, 0x43, 0xE0, 0x7F,
+ 0x00, 0x30, 0xE7, 0x02, 0x7F, 0x01, 0x22, 0x90,
+ 0x81, 0x27, 0xE0, 0x70, 0x07, 0x90, 0x81, 0x1F,
+ 0xE0, 0x30, 0xE0, 0x11, 0x90, 0x81, 0x1F, 0xE0,
+ 0x30, 0xE0, 0x07, 0x31, 0x73, 0xBF, 0x01, 0x05,
+ 0x41, 0x5B, 0x12, 0x4E, 0x3C, 0x22, 0xD3, 0x10,
+ 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81, 0x1E,
+ 0xE0, 0xB4, 0x01, 0x04, 0x7F, 0x04, 0x80, 0x0B,
+ 0x31, 0x73, 0xBF, 0x01, 0x04, 0x7F, 0x01, 0x80,
+ 0x02, 0x7F, 0x02, 0x71, 0x0E, 0xD0, 0xD0, 0x92,
+ 0xAF, 0x22, 0x90, 0x81, 0x4B, 0xE0, 0x60, 0x0F,
+ 0xE4, 0xF0, 0x90, 0x05, 0x53, 0xE0, 0x44, 0x02,
+ 0xF0, 0x90, 0x05, 0xFC, 0xE0, 0x04, 0xF0, 0x90,
+ 0x81, 0x1F, 0xE0, 0x30, 0xE0, 0x10, 0xA3, 0x74,
+ 0x01, 0xF0, 0x90, 0x81, 0x1F, 0xE0, 0xFF, 0xC3,
+ 0x13, 0x30, 0xE0, 0x02, 0x31, 0x9E, 0x11, 0xC4,
+ 0x90, 0x81, 0x3F, 0xE0, 0x30, 0xE0, 0x07, 0x91,
+ 0x65, 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x22, 0x90,
+ 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0, 0x3D, 0x90,
+ 0x81, 0x23, 0xE0, 0x7E, 0x00, 0xB4, 0x02, 0x02,
+ 0x7E, 0x01, 0x90, 0x81, 0x22, 0xE0, 0x7D, 0x00,
+ 0xB4, 0x04, 0x02, 0x7D, 0x01, 0xED, 0x4E, 0x70,
+ 0x23, 0xEF, 0xC3, 0x13, 0x30, 0xE0, 0x02, 0x21,
+ 0x9E, 0x51, 0x45, 0x90, 0x81, 0x23, 0xE0, 0xB4,
+ 0x08, 0x06, 0xE4, 0xFD, 0x7F, 0x0C, 0x80, 0x09,
+ 0x90, 0x81, 0x23, 0xE0, 0x70, 0x06, 0xFD, 0x7F,
+ 0x04, 0x12, 0x47, 0x3D, 0x22, 0x90, 0x81, 0x1E,
+ 0xE0, 0xB4, 0x01, 0x0F, 0x90, 0x81, 0x23, 0xE0,
+ 0x64, 0x02, 0x60, 0x07, 0x7D, 0x01, 0x7F, 0x02,
+ 0x12, 0x47, 0x3D, 0x90, 0x81, 0x27, 0xE0, 0x64,
+ 0x02, 0x60, 0x14, 0x90, 0x81, 0x26, 0xE0, 0x54,
+ 0x0F, 0x60, 0x0C, 0x12, 0x4E, 0xAB, 0xEF, 0x70,
+ 0x06, 0xFD, 0x7F, 0x0C, 0x12, 0x47, 0x3D, 0x22,
+ 0x90, 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0, 0x3F,
+ 0x90, 0x81, 0x23, 0xE0, 0x7E, 0x00, 0xB4, 0x02,
+ 0x02, 0x7E, 0x01, 0x90, 0x81, 0x22, 0xE0, 0x7D,
+ 0x00, 0xB4, 0x04, 0x02, 0x7D, 0x01, 0xED, 0x4E,
+ 0x70, 0x25, 0xEF, 0xC3, 0x13, 0x30, 0xE0, 0x02,
+ 0x21, 0x9E, 0x12, 0x74, 0xAC, 0x90, 0x81, 0x23,
+ 0xE0, 0xB4, 0x0C, 0x06, 0xE4, 0xFD, 0x7F, 0x08,
+ 0x80, 0x0A, 0x90, 0x81, 0x23, 0xE0, 0xB4, 0x04,
+ 0x06, 0xE4, 0xFD, 0xFF, 0x12, 0x47, 0x3D, 0x22,
+ 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90,
+ 0x81, 0xCB, 0x12, 0x45, 0x1F, 0x12, 0x1F, 0xA4,
+ 0xFF, 0x90, 0x81, 0x1E, 0xF0, 0xBF, 0x01, 0x12,
+ 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16, 0x90, 0x00,
+ 0x01, 0x12, 0x1F, 0xBD, 0x64, 0x01, 0x60, 0x21,
+ 0x80, 0x1D, 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16,
+ 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0x64, 0x01,
+ 0x60, 0x0F, 0x90, 0x81, 0x1F, 0xE0, 0x20, 0xE0,
+ 0x06, 0xE4, 0xFF, 0x71, 0x0E, 0x80, 0x02, 0x31,
+ 0x9E, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD3, 0x10,
+ 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81, 0x22,
+ 0xE0, 0x90, 0x82, 0x16, 0xF0, 0x6F, 0x70, 0x02,
+ 0x81, 0x04, 0xEF, 0x14, 0x60, 0x3E, 0x14, 0x60,
+ 0x62, 0x14, 0x70, 0x02, 0x61, 0xB8, 0x14, 0x70,
+ 0x02, 0x61, 0xDF, 0x24, 0x04, 0x60, 0x02, 0x81,
+ 0x04, 0x90, 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x04,
+ 0x04, 0x91, 0x41, 0x81, 0x04, 0xEF, 0xB4, 0x02,
+ 0x04, 0x91, 0x50, 0x81, 0x04, 0x90, 0x82, 0x16,
+ 0xE0, 0xFF, 0xB4, 0x03, 0x04, 0x91, 0x54, 0x81,
+ 0x04, 0xEF, 0x64, 0x01, 0x60, 0x02, 0x81, 0x04,
+ 0x91, 0x43, 0x81, 0x04, 0x90, 0x82, 0x16, 0xE0,
+ 0xFF, 0xB4, 0x04, 0x04, 0x91, 0xF3, 0x81, 0x04,
+ 0xEF, 0xB4, 0x02, 0x04, 0x91, 0x58, 0x81, 0x04,
+ 0x90, 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x03, 0x04,
+ 0x91, 0xE8, 0x81, 0x04, 0xEF, 0x70, 0x7D, 0x91,
+ 0x2B, 0x80, 0x79, 0x90, 0x82, 0x16, 0xE0, 0xB4,
+ 0x04, 0x05, 0x12, 0x74, 0x60, 0x80, 0x6D, 0x90,
+ 0x82, 0x16, 0xE0, 0xB4, 0x01, 0x04, 0x91, 0x21,
+ 0x80, 0x62, 0x90, 0x82, 0x16, 0xE0, 0xB4, 0x03,
+ 0x05, 0x12, 0x74, 0x71, 0x80, 0x56, 0x90, 0x82,
+ 0x16, 0xE0, 0x70, 0x50, 0x91, 0x1F, 0x80, 0x4C,
+ 0x90, 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x04, 0x05,
+ 0x12, 0x74, 0x4C, 0x80, 0x3F, 0xEF, 0xB4, 0x01,
+ 0x04, 0x91, 0x34, 0x80, 0x37, 0xEF, 0xB4, 0x02,
+ 0x04, 0x91, 0xDF, 0x80, 0x2F, 0x90, 0x82, 0x16,
+ 0xE0, 0x70, 0x29, 0x91, 0x32, 0x80, 0x25, 0x90,
+ 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x03, 0x05, 0x12,
+ 0x74, 0x7B, 0x80, 0x18, 0xEF, 0xB4, 0x01, 0x04,
+ 0x91, 0x0B, 0x80, 0x10, 0xEF, 0xB4, 0x02, 0x04,
+ 0xB1, 0x06, 0x80, 0x08, 0x90, 0x82, 0x16, 0xE0,
+ 0x70, 0x02, 0x91, 0x09, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0x91, 0x2B, 0x90, 0x05, 0x22, 0x74, 0x6F,
+ 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x54, 0xBF, 0xF0,
+ 0x90, 0x81, 0x22, 0x74, 0x04, 0xF0, 0x22, 0x91,
+ 0x2B, 0x12, 0x49, 0xDD, 0x90, 0x81, 0x22, 0x74,
+ 0x02, 0xF0, 0x22, 0x90, 0x81, 0x22, 0x74, 0x01,
+ 0xF0, 0x22, 0x91, 0x2B, 0x90, 0x05, 0x22, 0x74,
+ 0xFF, 0xF0, 0x90, 0x81, 0x22, 0x74, 0x03, 0xF0,
+ 0x22, 0x91, 0xF3, 0x90, 0x05, 0x27, 0xE0, 0x54,
+ 0xBF, 0xF0, 0xE4, 0x90, 0x81, 0x22, 0xF0, 0x22,
+ 0x91, 0x58, 0x80, 0xEF, 0x91, 0xE8, 0x80, 0xEB,
+ 0x91, 0x65, 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x90,
+ 0x81, 0x22, 0x04, 0xF0, 0x22, 0xD3, 0x10, 0xAF,
+ 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x01, 0x01, 0xE0,
+ 0x44, 0x02, 0xF0, 0x90, 0x01, 0x00, 0x74, 0xFF,
+ 0xF0, 0x90, 0x06, 0xB7, 0x74, 0x09, 0xF0, 0x90,
+ 0x06, 0xB4, 0x74, 0x86, 0xF0, 0x7F, 0x7C, 0x7E,
+ 0x08, 0x12, 0x2D, 0x5C, 0xEC, 0x54, 0x7F, 0xFC,
+ 0x90, 0x82, 0x01, 0x12, 0x20, 0xCE, 0x90, 0x82,
+ 0x01, 0x12, 0x44, 0xD9, 0x90, 0x85, 0xBB, 0x12,
+ 0x20, 0xCE, 0x7F, 0x7C, 0x7E, 0x08, 0x12, 0x2E,
+ 0xA2, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xDA, 0xCC,
+ 0xC0, 0x00, 0xC0, 0x7F, 0x8C, 0x7E, 0x08, 0x12,
+ 0x2E, 0xA2, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xDA,
+ 0x00, 0xC0, 0x00, 0x14, 0x7F, 0x70, 0x7E, 0x0E,
+ 0x12, 0x2E, 0xA2, 0x90, 0x81, 0xF9, 0x12, 0x20,
+ 0xDA, 0x00, 0x03, 0x3E, 0x60, 0xE4, 0xFD, 0xFF,
+ 0xB1, 0x1C, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x91,
+ 0x65, 0x90, 0x81, 0x22, 0x74, 0x03, 0xF0, 0x22,
+ 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x90, 0x81, 0x22,
+ 0x04, 0xF0, 0x22, 0x90, 0x05, 0x22, 0xE4, 0xF0,
+ 0x90, 0x05, 0x27, 0xE0, 0x44, 0x40, 0xF0, 0x90,
+ 0x81, 0x22, 0x74, 0x01, 0xF0, 0x22, 0x91, 0x65,
+ 0x90, 0x05, 0x22, 0x74, 0x6F, 0xF0, 0x90, 0x05,
+ 0x27, 0xE0, 0x54, 0xBF, 0xF0, 0x90, 0x81, 0x22,
+ 0x74, 0x04, 0xF0, 0x22, 0xD3, 0x10, 0xAF, 0x01,
+ 0xC3, 0xC0, 0xD0, 0xC0, 0x07, 0xC0, 0x05, 0x90,
+ 0x81, 0xF9, 0x12, 0x44, 0xD9, 0x90, 0x81, 0xE5,
+ 0x12, 0x20, 0xCE, 0xD0, 0x05, 0xD0, 0x07, 0x12,
+ 0x60, 0xF5, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x90,
+ 0x81, 0xC8, 0x12, 0x45, 0x1F, 0xEF, 0x12, 0x45,
+ 0x28, 0x55, 0x71, 0x00, 0x55, 0x7A, 0x01, 0x55,
+ 0x83, 0x02, 0x55, 0x8B, 0x03, 0x55, 0x94, 0x04,
+ 0x55, 0x9C, 0x20, 0x55, 0xA4, 0x21, 0x55, 0xAD,
+ 0x23, 0x55, 0xB5, 0x24, 0x55, 0xBE, 0x25, 0x55,
+ 0xC7, 0x26, 0x55, 0xCF, 0xC0, 0x00, 0x00, 0x55,
+ 0xD8, 0x90, 0x81, 0xC8, 0x12, 0x45, 0x16, 0x02,
+ 0x6A, 0xB0, 0x90, 0x81, 0xC8, 0x12, 0x45, 0x16,
+ 0x02, 0x65, 0x81, 0x90, 0x81, 0xC8, 0x12, 0x45,
+ 0x16, 0x41, 0xC0, 0x90, 0x81, 0xC8, 0x12, 0x45,
+ 0x16, 0x02, 0x75, 0xD8, 0x90, 0x81, 0xC8, 0x12,
+ 0x45, 0x16, 0x80, 0x44, 0x90, 0x81, 0xC8, 0x12,
+ 0x45, 0x16, 0xC1, 0x4B, 0x90, 0x81, 0xC8, 0x12,
+ 0x45, 0x16, 0x02, 0x6A, 0xF8, 0x90, 0x81, 0xC8,
+ 0x12, 0x45, 0x16, 0xE1, 0xE1, 0x90, 0x81, 0xC8,
+ 0x12, 0x45, 0x16, 0x02, 0x4A, 0x6C, 0x90, 0x81,
+ 0xC8, 0x12, 0x45, 0x16, 0x02, 0x6B, 0x3E, 0x90,
+ 0x81, 0xC8, 0x12, 0x45, 0x16, 0x80, 0x3E, 0x90,
+ 0x81, 0xC8, 0x12, 0x45, 0x16, 0x02, 0x6B, 0x4E,
+ 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x01, 0xF0, 0x22,
+ 0x12, 0x5A, 0x4B, 0x12, 0x1F, 0xA4, 0xFF, 0x54,
+ 0x01, 0xFE, 0x90, 0x81, 0x45, 0xE0, 0x54, 0xFE,
+ 0x4E, 0xF0, 0xEF, 0xC3, 0x13, 0x30, 0xE0, 0x14,
+ 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0x90, 0x81,
+ 0x46, 0xF0, 0x90, 0x00, 0x02, 0x12, 0x1F, 0xBD,
+ 0x90, 0x81, 0x47, 0xF0, 0x22, 0x12, 0x1F, 0xA4,
+ 0xFF, 0x54, 0x01, 0xFE, 0x90, 0x81, 0x3F, 0xE0,
+ 0x54, 0xFE, 0x4E, 0xF0, 0x90, 0x00, 0x01, 0x12,
+ 0x1F, 0xBD, 0xFE, 0x90, 0x05, 0x54, 0xE0, 0xC3,
+ 0x9E, 0x90, 0x81, 0x40, 0xF0, 0xEF, 0x20, 0xE0,
+ 0x07, 0x91, 0x65, 0x90, 0x05, 0x22, 0xE4, 0xF0,
+ 0x90, 0x81, 0x3F, 0xE0, 0x54, 0x01, 0x90, 0x01,
+ 0xBC, 0xF0, 0x90, 0x81, 0x40, 0xE0, 0x90, 0x01,
+ 0xBD, 0xF0, 0x22, 0x12, 0x1F, 0xA4, 0xFF, 0x54,
+ 0x7F, 0x90, 0x81, 0x27, 0xF0, 0xEF, 0xC4, 0x13,
+ 0x13, 0x13, 0x54, 0x01, 0xA3, 0xF0, 0x90, 0x00,
+ 0x01, 0x12, 0x1F, 0xBD, 0xFF, 0x54, 0xF0, 0xC4,
+ 0x54, 0x0F, 0xFE, 0x90, 0x81, 0x26, 0xE0, 0x54,
+ 0xF0, 0x4E, 0xF0, 0x90, 0x00, 0x03, 0x12, 0x1F,
+ 0xBD, 0x54, 0x01, 0x25, 0xE0, 0xFE, 0x90, 0x81,
+ 0x24, 0xE0, 0x54, 0xFD, 0x4E, 0xF0, 0xEF, 0x54,
+ 0x0F, 0xC4, 0x54, 0xF0, 0xFF, 0x90, 0x81, 0x26,
+ 0xE0, 0x54, 0x0F, 0x4F, 0xF0, 0x90, 0x00, 0x04,
+ 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x29, 0xF0, 0xD1,
+ 0xC6, 0x90, 0x01, 0xB9, 0x74, 0x01, 0xF0, 0x90,
+ 0x01, 0xB8, 0xF0, 0x90, 0x81, 0x27, 0xE0, 0x90,
+ 0x01, 0xBA, 0xF0, 0x90, 0x81, 0x29, 0xE0, 0x90,
+ 0x01, 0xBB, 0xF0, 0x90, 0x81, 0x26, 0xE0, 0x54,
+ 0x0F, 0x90, 0x01, 0xBE, 0xF0, 0x22, 0x90, 0x81,
+ 0xCB, 0x12, 0x45, 0x1F, 0x12, 0x72, 0xB3, 0x90,
+ 0x81, 0x27, 0xE0, 0xFF, 0x12, 0x4C, 0x3E, 0x90,
+ 0x81, 0x27, 0xE0, 0x60, 0x19, 0x90, 0x81, 0xCB,
+ 0x12, 0x45, 0x16, 0x90, 0x00, 0x01, 0x12, 0x1F,
+ 0xBD, 0x54, 0x0F, 0xFF, 0x90, 0x00, 0x02, 0x12,
+ 0x1F, 0xBD, 0xFD, 0x12, 0x72, 0xC4, 0x22, 0xC0,
+ 0xE0, 0xC0, 0xF0, 0xC0, 0x83, 0xC0, 0x82, 0xC0,
+ 0xD0, 0x75, 0xD0, 0x00, 0xC0, 0x00, 0xC0, 0x01,
+ 0xC0, 0x02, 0xC0, 0x03, 0xC0, 0x04, 0xC0, 0x05,
+ 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x01, 0xC4, 0x74,
+ 0xF7, 0xF0, 0x74, 0x56, 0xA3, 0xF0, 0x12, 0x6C,
+ 0xA5, 0xE5, 0x49, 0x30, 0xE1, 0x03, 0x12, 0x6F,
+ 0x79, 0xE5, 0x49, 0x30, 0xE2, 0x02, 0xF1, 0xA5,
+ 0xE5, 0x49, 0x30, 0xE3, 0x03, 0x12, 0x6F, 0x8D,
+ 0xE5, 0x4A, 0x30, 0xE0, 0x03, 0x12, 0x6F, 0xC9,
+ 0xE5, 0x4A, 0x30, 0xE4, 0x03, 0x12, 0x70, 0x22,
+ 0xE5, 0x4B, 0x30, 0xE1, 0x02, 0x51, 0x78, 0xE5,
+ 0x4B, 0x30, 0xE0, 0x02, 0x31, 0xFF, 0xE5, 0x4B,
+ 0x30, 0xE3, 0x02, 0xF1, 0xE0, 0xE5, 0x4C, 0x30,
+ 0xE1, 0x05, 0x7F, 0x03, 0x12, 0x44, 0x27, 0xE5,
+ 0x4C, 0x30, 0xE4, 0x03, 0x12, 0x4E, 0xC4, 0xE5,
+ 0x4C, 0x30, 0xE5, 0x03, 0x12, 0x70, 0x38, 0xE5,
+ 0x4C, 0x30, 0xE6, 0x03, 0x12, 0x70, 0xCE, 0x74,
+ 0xF7, 0x04, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x56,
+ 0xA3, 0xF0, 0xD0, 0x07, 0xD0, 0x06, 0xD0, 0x05,
+ 0xD0, 0x04, 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01,
+ 0xD0, 0x00, 0xD0, 0xD0, 0xD0, 0x82, 0xD0, 0x83,
+ 0xD0, 0xF0, 0xD0, 0xE0, 0x32, 0x90, 0x81, 0x27,
+ 0xE0, 0x60, 0x34, 0x90, 0x06, 0x92, 0xE0, 0x30,
+ 0xE0, 0x23, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
+ 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
+ 0xFB, 0xFD, 0x7F, 0x58, 0x7E, 0x01, 0x11, 0x05,
+ 0x90, 0x01, 0x5B, 0x74, 0x05, 0xF0, 0x90, 0x06,
+ 0x92, 0x74, 0x01, 0xF0, 0x22, 0x90, 0x81, 0x24,
+ 0xE0, 0x54, 0xF7, 0xF0, 0x12, 0x47, 0x2A, 0x22,
+ 0x22, 0x12, 0x1F, 0xA4, 0x90, 0x81, 0x31, 0xF0,
+ 0x22, 0x90, 0x01, 0xC8, 0xE4, 0xF0, 0xA3, 0xF0,
+ 0xA3, 0xF0, 0x7B, 0x01, 0x7A, 0x81, 0x79, 0x51,
+ 0x7F, 0xFF, 0xFE, 0x12, 0x2B, 0x27, 0xBF, 0x01,
+ 0x09, 0x90, 0x81, 0x51, 0xE0, 0x64, 0x03, 0x60,
+ 0x03, 0x22, 0x01, 0xAB, 0xE4, 0x90, 0x81, 0x56,
+ 0xF0, 0x90, 0x81, 0x56, 0xE0, 0xFF, 0xC3, 0x94,
+ 0x02, 0x40, 0x02, 0x01, 0xE6, 0xC3, 0x74, 0xFE,
+ 0x9F, 0xFF, 0xE4, 0x94, 0x00, 0xFE, 0x7B, 0x01,
+ 0x7A, 0x81, 0x79, 0x52, 0x12, 0x2B, 0x27, 0xEF,
+ 0x64, 0x01, 0x70, 0x77, 0x90, 0x81, 0x52, 0xE0,
+ 0xFF, 0x54, 0xC0, 0xFE, 0x60, 0x05, 0xEF, 0x54,
+ 0x0C, 0x70, 0x16, 0x90, 0x81, 0x52, 0xE0, 0xFF,
+ 0x54, 0x30, 0x60, 0x67, 0xEF, 0x54, 0x03, 0x60,
+ 0x62, 0x90, 0x81, 0x53, 0x74, 0x01, 0xF0, 0x80,
+ 0x05, 0xE4, 0x90, 0x81, 0x53, 0xF0, 0x90, 0x81,
+ 0x53, 0xE0, 0x90, 0x81, 0x52, 0x70, 0x16, 0xE0,
+ 0xFF, 0xEE, 0x13, 0x13, 0x54, 0x3F, 0x90, 0x81,
+ 0x54, 0xF0, 0xEF, 0x54, 0x0C, 0x13, 0x13, 0x54,
+ 0x3F, 0xA3, 0xF0, 0x80, 0x0D, 0xE0, 0xFE, 0x54,
+ 0x30, 0x90, 0x81, 0x54, 0xF0, 0xEE, 0x54, 0x03,
+ 0xA3, 0xF0, 0x90, 0x81, 0x54, 0xE0, 0x64, 0x30,
+ 0x70, 0x54, 0xA3, 0xE0, 0x64, 0x02, 0x70, 0x4E,
+ 0x90, 0x00, 0xF5, 0xE0, 0x54, 0x40, 0x90, 0x81,
+ 0x57, 0xF0, 0xE0, 0x70, 0x41, 0xA3, 0x74, 0x02,
+ 0xF0, 0x80, 0x10, 0x90, 0x81, 0x58, 0x74, 0x01,
+ 0xF0, 0x80, 0x08, 0x90, 0x81, 0x56, 0xE0, 0x04,
+ 0xF0, 0x01, 0x11, 0x90, 0x01, 0xC4, 0x74, 0xE9,
+ 0xF0, 0x74, 0x57, 0xA3, 0xF0, 0x90, 0x81, 0x58,
+ 0xE0, 0x90, 0x01, 0xC8, 0xF0, 0x90, 0x81, 0x52,
+ 0xE0, 0x90, 0x01, 0xC9, 0xF0, 0x90, 0x81, 0x53,
+ 0xE0, 0x90, 0x01, 0xCA, 0xF0, 0xE4, 0xFD, 0x7F,
+ 0x1F, 0x12, 0x32, 0x1E, 0x80, 0xD5, 0x22, 0x90,
+ 0x00, 0xF7, 0xE0, 0x20, 0xE7, 0x09, 0xE0, 0x7F,
+ 0x01, 0x20, 0xE6, 0x0C, 0x7F, 0x02, 0x22, 0x90,
+ 0x00, 0xF7, 0xE0, 0x30, 0xE6, 0x02, 0x7F, 0x03,
+ 0x22, 0x11, 0xE7, 0x90, 0x80, 0x3C, 0xEF, 0xF0,
+ 0x31, 0x13, 0x90, 0x01, 0x64, 0x74, 0x01, 0xF0,
+ 0x02, 0x2D, 0xA7, 0x31, 0x81, 0x31, 0xB1, 0x31,
+ 0x40, 0x31, 0x5F, 0xE4, 0xF5, 0x35, 0xF5, 0x36,
+ 0xF5, 0x37, 0xF5, 0x38, 0xAD, 0x35, 0x7F, 0x50,
+ 0x12, 0x32, 0x1E, 0xAD, 0x36, 0x7F, 0x51, 0x12,
+ 0x32, 0x1E, 0xAD, 0x37, 0x7F, 0x52, 0x12, 0x32,
+ 0x1E, 0xAD, 0x38, 0x7F, 0x53, 0x02, 0x32, 0x1E,
+ 0x75, 0x3D, 0x10, 0xE4, 0xF5, 0x3E, 0x75, 0x3F,
+ 0x07, 0x75, 0x40, 0x02, 0x90, 0x01, 0x30, 0xE5,
+ 0x3D, 0xF0, 0xA3, 0xE5, 0x3E, 0xF0, 0xA3, 0xE5,
+ 0x3F, 0xF0, 0xA3, 0xE5, 0x40, 0xF0, 0x22, 0x75,
+ 0x45, 0x0E, 0x75, 0x46, 0x01, 0x43, 0x46, 0x10,
+ 0x75, 0x47, 0x03, 0x75, 0x48, 0x62, 0x90, 0x01,
+ 0x38, 0xE5, 0x45, 0xF0, 0xA3, 0xE5, 0x46, 0xF0,
+ 0xA3, 0xE5, 0x47, 0xF0, 0xA3, 0xE5, 0x48, 0xF0,
+ 0x22, 0x90, 0x01, 0x30, 0xE4, 0xF0, 0xA3, 0xF0,
+ 0xA3, 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0x38, 0xF0,
+ 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xFD, 0x7F,
+ 0x50, 0x12, 0x32, 0x1E, 0xE4, 0xFD, 0x7F, 0x51,
+ 0x12, 0x32, 0x1E, 0xE4, 0xFD, 0x7F, 0x52, 0x12,
+ 0x32, 0x1E, 0xE4, 0xFD, 0x7F, 0x53, 0x02, 0x32,
+ 0x1E, 0x90, 0x01, 0x34, 0x74, 0xFF, 0xF0, 0xA3,
+ 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0x3C,
+ 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xFD,
+ 0x7F, 0x54, 0x12, 0x32, 0x1E, 0x7D, 0xFF, 0x7F,
+ 0x55, 0x12, 0x32, 0x1E, 0x7D, 0xFF, 0x7F, 0x56,
+ 0x12, 0x32, 0x1E, 0x7D, 0xFF, 0x7F, 0x57, 0x02,
+ 0x32, 0x1E, 0x90, 0x00, 0x80, 0xE0, 0x44, 0x80,
+ 0xFD, 0x7F, 0x80, 0x12, 0x32, 0x1E, 0x90, 0xFD,
+ 0x00, 0xE0, 0x54, 0xBF, 0xF0, 0x12, 0x57, 0xE9,
+ 0x51, 0x77, 0x12, 0x32, 0x77, 0x51, 0xC9, 0x51,
+ 0x5E, 0x7F, 0x01, 0x12, 0x43, 0x15, 0x90, 0x81,
+ 0x41, 0x74, 0x02, 0xF0, 0xFF, 0x12, 0x43, 0x15,
+ 0x90, 0x81, 0x41, 0xE0, 0x04, 0xF0, 0x7F, 0x03,
+ 0x12, 0x43, 0x15, 0x90, 0x81, 0x41, 0xE0, 0x04,
+ 0xF0, 0x31, 0x01, 0x51, 0x3F, 0x90, 0x00, 0x80,
+ 0xE0, 0x44, 0x40, 0xFD, 0x7F, 0x80, 0x12, 0x32,
+ 0x1E, 0x75, 0x20, 0xFF, 0x51, 0x68, 0x51, 0xF9,
+ 0x51, 0x7F, 0xE4, 0xFF, 0x02, 0x43, 0x9E, 0x51,
+ 0x62, 0x51, 0x6F, 0x51, 0xA7, 0x71, 0x4F, 0x51,
+ 0x8A, 0x51, 0x95, 0x90, 0x81, 0x45, 0xE0, 0x54,
+ 0xFE, 0xF0, 0xA3, 0x74, 0x03, 0xF0, 0xA3, 0xF0,
+ 0xE4, 0xA3, 0xF0, 0xA3, 0xF0, 0x22, 0xE4, 0xF5,
+ 0x4D, 0x22, 0xE4, 0x90, 0x80, 0xDE, 0xF0, 0x22,
+ 0x75, 0xE8, 0x03, 0x75, 0xA8, 0x84, 0x22, 0xE4,
+ 0x90, 0x80, 0xD8, 0xF0, 0xA3, 0xF0, 0x22, 0x90,
+ 0x01, 0x94, 0xE0, 0x44, 0x01, 0xF0, 0x22, 0x90,
+ 0x01, 0xE4, 0x74, 0x0B, 0xF0, 0xA3, 0x74, 0x01,
+ 0xF0, 0x22, 0x90, 0x81, 0x3F, 0xE0, 0x54, 0xFE,
+ 0xF0, 0xE4, 0xA3, 0xF0, 0x22, 0x90, 0x81, 0x42,
+ 0xE0, 0x54, 0xFE, 0xF0, 0x54, 0x7F, 0xF0, 0xA3,
+ 0x74, 0x0A, 0xF0, 0xE4, 0xA3, 0xF0, 0x22, 0x90,
+ 0x81, 0x1F, 0xE0, 0x54, 0xFE, 0xF0, 0x54, 0xFD,
+ 0xF0, 0x54, 0xFB, 0xF0, 0x54, 0xF7, 0xF0, 0x54,
+ 0xEF, 0xF0, 0x54, 0xDF, 0xF0, 0xE4, 0xA3, 0xF0,
+ 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0x74, 0x0C, 0xF0,
+ 0x22, 0x90, 0x01, 0x01, 0xE0, 0x44, 0x04, 0xF0,
+ 0x90, 0x01, 0x9C, 0x74, 0x7E, 0xF0, 0xA3, 0x74,
+ 0x92, 0xF0, 0xA3, 0x74, 0xA0, 0xF0, 0xA3, 0x74,
+ 0x24, 0xF0, 0x90, 0x01, 0x9B, 0x74, 0x49, 0xF0,
+ 0x90, 0x01, 0x9A, 0x74, 0xE0, 0xF0, 0x90, 0x01,
+ 0x99, 0xE4, 0xF0, 0x90, 0x01, 0x98, 0x04, 0xF0,
+ 0x22, 0xE4, 0x90, 0x81, 0x51, 0xF0, 0xA3, 0xF0,
+ 0x90, 0x01, 0x98, 0xE0, 0x7F, 0x00, 0x30, 0xE4,
+ 0x02, 0x7F, 0x01, 0xEF, 0x64, 0x01, 0x60, 0x3E,
+ 0xC3, 0x90, 0x81, 0x52, 0xE0, 0x94, 0x88, 0x90,
+ 0x81, 0x51, 0xE0, 0x94, 0x13, 0x40, 0x08, 0x90,
+ 0x01, 0xC1, 0xE0, 0x44, 0x10, 0xF0, 0x22, 0x90,
+ 0x81, 0x51, 0xE4, 0x75, 0xF0, 0x01, 0x12, 0x44,
+ 0xA9, 0x7F, 0x14, 0x7E, 0x00, 0x12, 0x32, 0xAA,
+ 0xD3, 0x90, 0x81, 0x52, 0xE0, 0x94, 0x32, 0x90,
+ 0x81, 0x51, 0xE0, 0x94, 0x00, 0x40, 0xB9, 0x90,
+ 0x01, 0xC6, 0xE0, 0x30, 0xE3, 0xB2, 0x22, 0xE4,
+ 0x90, 0x81, 0x27, 0xF0, 0xA3, 0xF0, 0x90, 0x81,
+ 0x26, 0xE0, 0x54, 0x0F, 0xF0, 0x54, 0xF0, 0xF0,
+ 0x90, 0x81, 0x24, 0xE0, 0x54, 0xFD, 0xF0, 0x54,
+ 0xF7, 0xF0, 0x54, 0xEF, 0xF0, 0x90, 0x81, 0x2D,
+ 0x74, 0x01, 0xF0, 0xA3, 0xF0, 0x90, 0x81, 0x24,
+ 0xE0, 0x54, 0xFB, 0xF0, 0xA3, 0xE0, 0x54, 0xFB,
+ 0xF0, 0xE4, 0x90, 0x81, 0x30, 0xF0, 0x90, 0x81,
+ 0x2F, 0x74, 0x07, 0xF0, 0x90, 0x81, 0x32, 0xE4,
+ 0xF0, 0xA3, 0x74, 0x02, 0xF0, 0xE4, 0x90, 0x81,
+ 0x2B, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x54, 0xFE,
+ 0xF0, 0x90, 0x81, 0x29, 0x74, 0x0C, 0xF0, 0x90,
+ 0x81, 0x24, 0xE0, 0x54, 0xDF, 0xF0, 0x90, 0x81,
+ 0x2A, 0x74, 0x0C, 0xF0, 0x90, 0x81, 0x24, 0xE0,
+ 0x54, 0xBF, 0xF0, 0x54, 0x7F, 0xF0, 0xA3, 0xE0,
+ 0x54, 0xFE, 0xF0, 0x54, 0xFD, 0xF0, 0x54, 0xF7,
+ 0xF0, 0x90, 0x81, 0x34, 0x12, 0x20, 0xDA, 0x00,
+ 0x00, 0x00, 0x00, 0x90, 0x80, 0x3C, 0xE0, 0xB4,
+ 0x01, 0x08, 0x90, 0x81, 0x31, 0x74, 0x99, 0xF0,
+ 0x80, 0x12, 0x90, 0x80, 0x3C, 0xE0, 0x90, 0x81,
+ 0x31, 0xB4, 0x03, 0x05, 0x74, 0x90, 0xF0, 0x80,
+ 0x03, 0x74, 0x40, 0xF0, 0x90, 0x81, 0x38, 0x74,
+ 0x01, 0xF0, 0xA3, 0x74, 0x05, 0xF0, 0xA3, 0xE0,
+ 0x54, 0x01, 0x44, 0x28, 0xF0, 0xA3, 0x74, 0x05,
+ 0xF0, 0xE4, 0xA3, 0xF0, 0xA3, 0xE0, 0x54, 0xFD,
+ 0xF0, 0x54, 0xFB, 0xF0, 0x54, 0xF7, 0xF0, 0x54,
+ 0xEF, 0xF0, 0x54, 0xDF, 0xF0, 0x54, 0xBF, 0xF0,
+ 0xE4, 0xA3, 0xF0, 0x22, 0xE4, 0x90, 0x81, 0x59,
+ 0xF0, 0x90, 0x81, 0x59, 0xE0, 0x64, 0x01, 0xF0,
+ 0x24, 0x24, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x5C,
+ 0xA3, 0xF0, 0x90, 0x81, 0x2A, 0xE0, 0xFF, 0x90,
+ 0x81, 0x29, 0xE0, 0x6F, 0x60, 0x03, 0x12, 0x47,
+ 0x2A, 0xD1, 0x08, 0xBF, 0x01, 0x02, 0x91, 0x5F,
+ 0xB1, 0xF2, 0x12, 0x32, 0x9E, 0xBF, 0x01, 0x02,
+ 0xB1, 0x67, 0x12, 0x42, 0x4D, 0x80, 0xCA, 0xD3,
+ 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81,
+ 0x24, 0xE0, 0x30, 0xE0, 0x24, 0x90, 0x81, 0x1F,
+ 0xE0, 0xFF, 0x30, 0xE0, 0x1A, 0xC3, 0x13, 0x30,
+ 0xE0, 0x07, 0xB1, 0xFB, 0xBF, 0x01, 0x12, 0x80,
+ 0x0A, 0x90, 0x81, 0x23, 0xE0, 0xFF, 0x60, 0x03,
+ 0xB4, 0x08, 0x06, 0x91, 0x96, 0x80, 0x02, 0x91,
+ 0xA6, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD3, 0x10,
+ 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0xB1, 0x22, 0x91,
+ 0xBA, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x90, 0x81,
+ 0x2A, 0xE0, 0x70, 0x0D, 0xD1, 0x2F, 0xBF, 0x01,
+ 0x08, 0x91, 0x96, 0x90, 0x01, 0xE5, 0xE0, 0x04,
+ 0xF0, 0x22, 0xB1, 0xF3, 0x90, 0x00, 0x08, 0xE0,
+ 0x54, 0xEF, 0xFD, 0x7F, 0x08, 0x12, 0x32, 0x1E,
+ 0xE4, 0xFF, 0x8F, 0x50, 0xE4, 0x90, 0x81, 0x5A,
+ 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0x09, 0xE0, 0x7F,
+ 0x00, 0x30, 0xE7, 0x02, 0x7F, 0x01, 0xEF, 0x65,
+ 0x50, 0x60, 0x3E, 0xC3, 0x90, 0x81, 0x5B, 0xE0,
+ 0x94, 0x88, 0x90, 0x81, 0x5A, 0xE0, 0x94, 0x13,
+ 0x40, 0x08, 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x10,
+ 0xF0, 0x22, 0x90, 0x81, 0x5A, 0xE4, 0x75, 0xF0,
+ 0x01, 0x12, 0x44, 0xA9, 0x7F, 0x14, 0x7E, 0x00,
+ 0x12, 0x32, 0xAA, 0xD3, 0x90, 0x81, 0x5B, 0xE0,
+ 0x94, 0x32, 0x90, 0x81, 0x5A, 0xE0, 0x94, 0x00,
+ 0x40, 0xB9, 0x90, 0x01, 0xC6, 0xE0, 0x30, 0xE0,
+ 0xB2, 0x22, 0x90, 0x81, 0x31, 0xE0, 0xFD, 0x7F,
+ 0x93, 0x12, 0x32, 0x1E, 0x90, 0x81, 0x28, 0xE0,
+ 0x60, 0x12, 0x90, 0x01, 0x2F, 0xE0, 0x30, 0xE7,
+ 0x05, 0x74, 0x10, 0xF0, 0x80, 0x06, 0x90, 0x01,
+ 0x2F, 0x74, 0x90, 0xF0, 0x90, 0x00, 0x08, 0xE0,
+ 0x44, 0x10, 0xFD, 0x7F, 0x08, 0x12, 0x32, 0x1E,
+ 0x7F, 0x01, 0x91, 0xCA, 0x90, 0x00, 0x90, 0xE0,
+ 0x44, 0x01, 0xFD, 0x7F, 0x90, 0x12, 0x32, 0x1E,
+ 0x7F, 0x14, 0x7E, 0x00, 0x02, 0x32, 0xAA, 0xD3,
+ 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x12, 0x2D,
+ 0xA7, 0xE4, 0xF5, 0x52, 0x12, 0x32, 0x9E, 0xEF,
+ 0x60, 0x73, 0x63, 0x52, 0x01, 0xE5, 0x52, 0x24,
+ 0x67, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x5D, 0xA3,
+ 0xF0, 0x90, 0x00, 0x88, 0xE0, 0xF5, 0x50, 0xF5,
+ 0x51, 0x54, 0x0F, 0x60, 0xDF, 0xE5, 0x50, 0x30,
+ 0xE0, 0x0B, 0x20, 0xE4, 0x03, 0x12, 0x29, 0xC5,
+ 0x53, 0x51, 0xEE, 0x80, 0x3F, 0xE5, 0x50, 0x30,
+ 0xE1, 0x16, 0x20, 0xE5, 0x0E, 0x12, 0x11, 0xBD,
+ 0xEF, 0x70, 0x03, 0x43, 0x51, 0x20, 0x90, 0x01,
+ 0x06, 0xE4, 0xF0, 0x53, 0x51, 0xFD, 0x80, 0x24,
+ 0xE5, 0x50, 0x30, 0xE2, 0x0B, 0x20, 0xE6, 0x03,
+ 0x12, 0x67, 0x06, 0x53, 0x51, 0xFB, 0x80, 0x14,
+ 0xE5, 0x50, 0x30, 0xE3, 0x0F, 0x20, 0xE7, 0x09,
+ 0x12, 0x61, 0x6E, 0xEF, 0x70, 0x03, 0x43, 0x51,
+ 0x80, 0x53, 0x51, 0xF7, 0xAD, 0x51, 0x7F, 0x88,
+ 0x12, 0x32, 0x1E, 0x80, 0x87, 0xD0, 0xD0, 0x92,
+ 0xAF, 0x22, 0x22, 0x90, 0x00, 0x90, 0xE0, 0x20,
+ 0xE0, 0xF9, 0x22, 0x90, 0x81, 0x22, 0xE0, 0x64,
+ 0x02, 0x7F, 0x01, 0x60, 0x02, 0x7F, 0x00, 0x22,
+ 0x7F, 0x02, 0x90, 0x81, 0x41, 0xE0, 0xFE, 0xEF,
+ 0xC3, 0x9E, 0x50, 0x18, 0xEF, 0x25, 0xE0, 0x24,
+ 0x81, 0xF8, 0xE6, 0x30, 0xE4, 0x0B, 0x90, 0x01,
+ 0xB8, 0x74, 0x08, 0xF0, 0xA3, 0xF0, 0x7F, 0x00,
+ 0x22, 0x0F, 0x80, 0xDE, 0x7F, 0x01, 0x22, 0x90,
+ 0x02, 0x87, 0xE0, 0x60, 0x08, 0x90, 0x01, 0xB8,
+ 0x74, 0x01, 0xF0, 0x80, 0x17, 0x90, 0x02, 0x86,
+ 0xE0, 0x20, 0xE1, 0x08, 0x90, 0x01, 0xB8, 0x74,
+ 0x04, 0xF0, 0x80, 0x08, 0x90, 0x01, 0xB8, 0xE4,
+ 0xF0, 0x7F, 0x01, 0x22, 0x90, 0x01, 0xB9, 0x74,
+ 0x08, 0xF0, 0x7F, 0x00, 0x22, 0xE4, 0xFB, 0xFA,
+ 0xFD, 0x7F, 0x01, 0x12, 0x44, 0x4E, 0x90, 0x81,
+ 0xBD, 0xEF, 0xF0, 0x60, 0xF0, 0xD1, 0x71, 0x80,
+ 0xEC, 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0,
+ 0x90, 0x01, 0xCC, 0xE0, 0x54, 0x0F, 0x90, 0x81,
+ 0xBE, 0xF0, 0x90, 0x81, 0xBE, 0xE0, 0xFD, 0x70,
+ 0x02, 0xE1, 0x9C, 0x90, 0x82, 0x09, 0xE0, 0xFF,
+ 0x74, 0x01, 0x7E, 0x00, 0xA8, 0x07, 0x08, 0x80,
+ 0x05, 0xC3, 0x33, 0xCE, 0x33, 0xCE, 0xD8, 0xF9,
+ 0xFF, 0xEF, 0x5D, 0x70, 0x02, 0xE1, 0x95, 0x90,
+ 0x82, 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90, 0x01,
+ 0xD0, 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81, 0xBF,
+ 0xF0, 0x75, 0x13, 0x01, 0x75, 0x14, 0x81, 0x75,
+ 0x15, 0xBF, 0x75, 0x16, 0x01, 0x7B, 0x01, 0x7A,
+ 0x81, 0x79, 0xC0, 0x12, 0x2B, 0xED, 0x90, 0x82,
+ 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90, 0x01, 0xD1,
+ 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81, 0xC1, 0xF0,
+ 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90,
+ 0x01, 0xD2, 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81,
+ 0xC2, 0xF0, 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0,
+ 0x04, 0x90, 0x01, 0xD3, 0x12, 0x45, 0x0A, 0xE0,
+ 0x90, 0x81, 0xC3, 0xF0, 0x90, 0x82, 0x09, 0xE0,
+ 0x75, 0xF0, 0x04, 0x90, 0x01, 0xF0, 0x12, 0x45,
+ 0x0A, 0xE0, 0x90, 0x81, 0xC4, 0xF0, 0x90, 0x82,
+ 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90, 0x01, 0xF1,
+ 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81, 0xC5, 0xF0,
+ 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90,
+ 0x01, 0xF2, 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81,
+ 0xC6, 0xF0, 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0,
+ 0x04, 0x90, 0x01, 0xF3, 0x12, 0x45, 0x0A, 0xE0,
+ 0x90, 0x81, 0xC7, 0xF0, 0x90, 0x81, 0xBE, 0xE0,
+ 0xFF, 0x90, 0x82, 0x09, 0xE0, 0xFE, 0x74, 0x01,
+ 0xA8, 0x06, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
+ 0xFC, 0xF4, 0x5F, 0x90, 0x81, 0xBE, 0xF0, 0x90,
+ 0x82, 0x09, 0xE0, 0xFF, 0x74, 0x01, 0xA8, 0x07,
+ 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8, 0xFC, 0x90,
+ 0x01, 0xCC, 0xF0, 0x90, 0x81, 0xC0, 0xE0, 0xFF,
+ 0x7B, 0x01, 0x7A, 0x81, 0x79, 0xC1, 0x12, 0x55,
+ 0x3F, 0x90, 0x82, 0x09, 0xE0, 0x04, 0xF0, 0xE0,
+ 0x54, 0x03, 0xF0, 0xC1, 0x82, 0x90, 0x01, 0xC0,
+ 0xE0, 0x44, 0x02, 0xF0, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0xE4, 0xFB, 0xFA, 0xFD, 0x7F, 0x01, 0x12,
+ 0x44, 0x4E, 0x90, 0x81, 0xD0, 0xEF, 0xF0, 0x60,
+ 0xF0, 0x12, 0x6C, 0x19, 0x80, 0xEB, 0x90, 0x81,
+ 0xD4, 0xEF, 0xF0, 0xA3, 0xED, 0xF0, 0xA3, 0x12,
+ 0x20, 0xDA, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x90,
+ 0x81, 0xE2, 0xF0, 0x7F, 0x24, 0x7E, 0x08, 0x12,
+ 0x2D, 0x5C, 0x90, 0x81, 0xDA, 0x12, 0x20, 0xCE,
+ 0x90, 0x81, 0xD4, 0xE0, 0xFB, 0x70, 0x08, 0x90,
+ 0x81, 0xDA, 0x12, 0x44, 0xD9, 0x80, 0x16, 0xEB,
+ 0x75, 0xF0, 0x08, 0xA4, 0x24, 0x62, 0xF5, 0x82,
+ 0xE4, 0x34, 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3,
+ 0xE0, 0xFF, 0x12, 0x2D, 0x5C, 0x90, 0x81, 0xDE,
+ 0x12, 0x20, 0xCE, 0x90, 0x81, 0xD5, 0xE0, 0xFF,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x17, 0x12, 0x20,
+ 0xBB, 0xA8, 0x04, 0xA9, 0x05, 0xAA, 0x06, 0xAB,
+ 0x07, 0x90, 0x81, 0xDE, 0x12, 0x44, 0xD9, 0xED,
+ 0x54, 0x7F, 0xFD, 0xEC, 0x54, 0x80, 0xFC, 0x12,
+ 0x44, 0xCC, 0xEC, 0x44, 0x80, 0xFC, 0x90, 0x81,
+ 0xDE, 0x12, 0x20, 0xCE, 0x90, 0x81, 0xDA, 0x12,
+ 0x44, 0xD9, 0xEC, 0x54, 0x7F, 0xFC, 0x90, 0x85,
+ 0xBB, 0x12, 0x20, 0xCE, 0x7F, 0x24, 0x7E, 0x08,
+ 0x12, 0x2E, 0xA2, 0x90, 0x81, 0xD4, 0xE0, 0x75,
+ 0xF0, 0x08, 0xA4, 0x24, 0x62, 0xF5, 0x82, 0xE4,
+ 0x34, 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3, 0xE0,
+ 0xFF, 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x81, 0xDE,
+ 0x12, 0x44, 0xD9, 0x90, 0x85, 0xBB, 0x12, 0x20,
+ 0xCE, 0xD0, 0x07, 0xD0, 0x06, 0x12, 0x2E, 0xA2,
+ 0x90, 0x81, 0xDA, 0x12, 0x44, 0xD9, 0xEC, 0x44,
+ 0x80, 0xFC, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xCE,
+ 0x7F, 0x24, 0x7E, 0x08, 0x12, 0x2E, 0xA2, 0x90,
+ 0x81, 0xD4, 0xE0, 0x70, 0x04, 0x7F, 0x20, 0x80,
+ 0x09, 0x90, 0x81, 0xD4, 0xE0, 0xB4, 0x01, 0x16,
+ 0x7F, 0x28, 0x7E, 0x08, 0x12, 0x2D, 0x5C, 0x78,
+ 0x08, 0x12, 0x20, 0xA8, 0xEF, 0x54, 0x01, 0xFF,
+ 0xE4, 0x90, 0x81, 0xE2, 0xEF, 0xF0, 0x90, 0x81,
+ 0xE2, 0xE0, 0x90, 0x81, 0xD4, 0x60, 0x0E, 0xE0,
+ 0x75, 0xF0, 0x08, 0xA4, 0x24, 0x66, 0xF5, 0x82,
+ 0xE4, 0x34, 0x87, 0x80, 0x0C, 0xE0, 0x75, 0xF0,
+ 0x08, 0xA4, 0x24, 0x64, 0xF5, 0x82, 0xE4, 0x34,
+ 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3, 0xE0, 0xFF,
+ 0x12, 0x2D, 0x5C, 0xED, 0x54, 0x0F, 0xFD, 0xE4,
+ 0xFC, 0x90, 0x81, 0xD6, 0x12, 0x20, 0xCE, 0x90,
+ 0x81, 0xD6, 0x02, 0x44, 0xD9, 0x90, 0x81, 0xE3,
+ 0xEF, 0xF0, 0xAB, 0x05, 0x90, 0x81, 0xE9, 0x12,
+ 0x20, 0xDA, 0x00, 0x00, 0x00, 0x00, 0xAF, 0x03,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x14, 0x12, 0x20,
+ 0xBB, 0xA8, 0x04, 0xA9, 0x05, 0xAA, 0x06, 0xAB,
+ 0x07, 0x90, 0x81, 0xE5, 0x12, 0x44, 0xD9, 0xED,
+ 0x54, 0x0F, 0xFD, 0xE4, 0xFC, 0x12, 0x44, 0xCC,
+ 0xEC, 0x54, 0x0F, 0xFC, 0x90, 0x81, 0xE9, 0x12,
+ 0x20, 0xCE, 0x90, 0x81, 0xE3, 0xE0, 0x75, 0xF0,
+ 0x08, 0xA4, 0x24, 0x60, 0xF5, 0x82, 0xE4, 0x34,
+ 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3, 0xE0, 0xFF,
+ 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x81, 0xE9, 0x12,
+ 0x44, 0xD9, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xCE,
+ 0xD0, 0x07, 0xD0, 0x06, 0x02, 0x2E, 0xA2, 0xD3,
+ 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x12, 0x5F,
+ 0xB6, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x78, 0x10,
+ 0x74, 0x01, 0xF2, 0x90, 0x02, 0x09, 0xE0, 0x78,
+ 0x00, 0xF2, 0x08, 0x74, 0x20, 0xF2, 0x18, 0xE2,
+ 0xFF, 0x30, 0xE0, 0x05, 0x08, 0xE2, 0x24, 0x80,
+ 0xF2, 0xEF, 0xC3, 0x13, 0x90, 0xFD, 0x10, 0xF0,
+ 0x78, 0x01, 0xE2, 0x24, 0x00, 0xF5, 0x82, 0xE4,
+ 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x78, 0x03, 0xF2,
+ 0x64, 0x04, 0x60, 0x0D, 0xE2, 0xFF, 0x64, 0x08,
+ 0x60, 0x07, 0xEF, 0x64, 0x0C, 0x60, 0x02, 0x61,
+ 0xDE, 0xE4, 0x78, 0x02, 0xF2, 0x78, 0x03, 0xE2,
+ 0xFF, 0x18, 0xE2, 0xC3, 0x9F, 0x50, 0x2D, 0xE2,
+ 0xFD, 0x18, 0xE2, 0x2D, 0x90, 0x81, 0x5A, 0xF0,
+ 0xE0, 0xFF, 0x24, 0x00, 0xF5, 0x82, 0xE4, 0x34,
+ 0xFC, 0xF5, 0x83, 0xE0, 0xFE, 0x74, 0x04, 0x2D,
+ 0xF8, 0xEE, 0xF2, 0xEF, 0xB4, 0xFF, 0x06, 0x90,
+ 0xFD, 0x10, 0xE0, 0x04, 0xF0, 0x78, 0x02, 0xE2,
+ 0x04, 0xF2, 0x80, 0xC9, 0x78, 0x04, 0xE2, 0x78,
+ 0x12, 0xF2, 0xFF, 0x78, 0x05, 0xE2, 0x78, 0x11,
+ 0xF2, 0x78, 0x06, 0xE2, 0x78, 0x13, 0xF2, 0x78,
+ 0x07, 0xE2, 0x78, 0x14, 0xF2, 0x78, 0x08, 0xE2,
+ 0x78, 0x33, 0xF2, 0x78, 0x09, 0xE2, 0x78, 0x34,
+ 0xF2, 0x78, 0x0A, 0xE2, 0x78, 0x35, 0xF2, 0x78,
+ 0x0B, 0xE2, 0x78, 0x36, 0xF2, 0x78, 0x0C, 0xE2,
+ 0x78, 0x37, 0xF2, 0x78, 0x0D, 0xE2, 0x78, 0x38,
+ 0xF2, 0x78, 0x0E, 0xE2, 0x78, 0x39, 0xF2, 0x78,
+ 0x0F, 0xE2, 0x78, 0x3A, 0xF2, 0xE4, 0x78, 0x15,
+ 0xF2, 0xEF, 0x24, 0xF8, 0x60, 0x75, 0x24, 0xFC,
+ 0x60, 0x6C, 0x24, 0x08, 0x60, 0x02, 0x61, 0xC0,
+ 0x78, 0x11, 0xE2, 0xB4, 0x01, 0x05, 0x12, 0x29,
+ 0xC5, 0x61, 0xC5, 0x78, 0x11, 0xE2, 0xB4, 0x02,
+ 0x05, 0x12, 0x11, 0xBD, 0x61, 0xC5, 0x78, 0x11,
+ 0xE2, 0xB4, 0x03, 0x04, 0xF1, 0x06, 0x61, 0xC5,
+ 0x78, 0x11, 0xE2, 0xB4, 0x10, 0x17, 0x78, 0x14,
+ 0xE2, 0xFE, 0x18, 0xE2, 0xFD, 0xED, 0xFF, 0x78,
+ 0x16, 0xEE, 0xF2, 0xFE, 0x08, 0xEF, 0xF2, 0xFF,
+ 0x12, 0x32, 0xAA, 0x61, 0xC5, 0x78, 0x11, 0xE2,
+ 0xB4, 0x11, 0x17, 0x78, 0x14, 0xE2, 0xFE, 0x18,
+ 0xE2, 0xFD, 0xED, 0xFF, 0x78, 0x16, 0xEE, 0xF2,
+ 0xFE, 0x08, 0xEF, 0xF2, 0xFF, 0x12, 0x32, 0x06,
+ 0x61, 0xC5, 0x78, 0x11, 0xE2, 0xF4, 0x60, 0x02,
+ 0x61, 0xC5, 0x18, 0xF2, 0x61, 0xC5, 0x78, 0x15,
+ 0x74, 0x01, 0xF2, 0x78, 0x11, 0xE2, 0x64, 0x07,
+ 0x60, 0x02, 0x61, 0xAA, 0x78, 0x34, 0xE2, 0xFF,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x08, 0x12, 0x20,
+ 0xBB, 0xC0, 0x04, 0xA9, 0x05, 0xAA, 0x06, 0xAB,
+ 0x07, 0x78, 0x33, 0xE2, 0xFF, 0xE4, 0xFC, 0xFD,
+ 0xFE, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0xC0, 0x04,
+ 0xC0, 0x05, 0xC0, 0x06, 0xC0, 0x07, 0x78, 0x35,
+ 0xE2, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x10,
+ 0x12, 0x20, 0xBB, 0xD0, 0x03, 0xD0, 0x02, 0xD0,
+ 0x01, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0x78, 0x18,
+ 0x12, 0x44, 0xFE, 0x78, 0x15, 0xE2, 0x70, 0x02,
+ 0x61, 0x93, 0x18, 0xE2, 0xFF, 0x18, 0xE2, 0xFD,
+ 0x31, 0x5F, 0x78, 0x1C, 0x12, 0x44, 0xFE, 0x78,
+ 0x38, 0xE2, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78,
+ 0x08, 0x12, 0x20, 0xBB, 0xC0, 0x04, 0xA9, 0x05,
+ 0xAA, 0x06, 0xAB, 0x07, 0x78, 0x37, 0xE2, 0xFF,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0xD0, 0x00, 0x12, 0x44,
+ 0xCC, 0xC0, 0x04, 0xC0, 0x05, 0xC0, 0x06, 0xC0,
+ 0x07, 0x78, 0x39, 0xE2, 0xFF, 0xE4, 0xFC, 0xFD,
+ 0xFE, 0x78, 0x10, 0x12, 0x20, 0xBB, 0xD0, 0x03,
+ 0xD0, 0x02, 0xD0, 0x01, 0xD0, 0x00, 0x12, 0x44,
+ 0xCC, 0x78, 0x20, 0x12, 0x44, 0xFE, 0x78, 0x20,
+ 0x12, 0x44, 0xE5, 0x12, 0x20, 0x9B, 0x78, 0x1C,
+ 0x12, 0x44, 0xF1, 0x12, 0x44, 0xBF, 0xC0, 0x04,
+ 0xC0, 0x05, 0xC0, 0x06, 0xC0, 0x07, 0x78, 0x18,
+ 0x12, 0x44, 0xE5, 0x78, 0x20, 0x12, 0x44, 0xF1,
+ 0x12, 0x44, 0xBF, 0xD0, 0x03, 0xD0, 0x02, 0xD0,
+ 0x01, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0x78, 0x18,
+ 0x12, 0x44, 0xFE, 0x78, 0x18, 0x12, 0x44, 0xE5,
+ 0x90, 0x81, 0xF9, 0x12, 0x20, 0xCE, 0x78, 0x13,
+ 0xE2, 0xFD, 0x08, 0xE2, 0xFF, 0x12, 0x55, 0x1C,
+ 0x80, 0x1B, 0x78, 0x13, 0xE2, 0xFF, 0x08, 0xE2,
+ 0xFD, 0x78, 0x11, 0xE2, 0xFB, 0x78, 0x15, 0xE2,
+ 0x90, 0x81, 0xBC, 0xF0, 0x71, 0xE1, 0x80, 0x05,
+ 0x78, 0x10, 0x74, 0x02, 0xF2, 0x78, 0x10, 0xE2,
+ 0xFF, 0xC3, 0x94, 0x02, 0x50, 0x10, 0xEF, 0x60,
+ 0x0A, 0x78, 0x02, 0xE2, 0xFF, 0x18, 0xE2, 0x2F,
+ 0xF2, 0x21, 0x90, 0x7F, 0x01, 0x22, 0x7F, 0x00,
+ 0x22, 0xAC, 0x07, 0xED, 0xAD, 0x04, 0x78, 0x24,
+ 0xF2, 0xED, 0x08, 0xF2, 0xEB, 0xB4, 0x04, 0x07,
+ 0x78, 0x27, 0x74, 0x01, 0xF2, 0x80, 0x0E, 0xEB,
+ 0x78, 0x27, 0xB4, 0x05, 0x05, 0x74, 0x02, 0xF2,
+ 0x80, 0x03, 0x74, 0x04, 0xF2, 0xD3, 0x78, 0x25,
+ 0xE2, 0x94, 0xFF, 0x18, 0xE2, 0x94, 0x00, 0x50,
+ 0x63, 0xE4, 0x78, 0x26, 0xF2, 0x78, 0x27, 0xE2,
+ 0xFF, 0x18, 0xE2, 0xFE, 0xC3, 0x9F, 0x40, 0x02,
+ 0xA1, 0x7F, 0x74, 0x33, 0x2E, 0xF8, 0xE2, 0x78,
+ 0x28, 0xF2, 0x90, 0x81, 0xBC, 0xE0, 0x60, 0x2D,
+ 0x74, 0x37, 0x2E, 0xF8, 0xE2, 0x78, 0x32, 0xF2,
+ 0xEE, 0xFF, 0x78, 0x25, 0xE2, 0x2F, 0xFF, 0x18,
+ 0xE2, 0x34, 0x00, 0x8F, 0x82, 0xF5, 0x83, 0xE0,
+ 0x78, 0x29, 0xF2, 0x78, 0x32, 0xE2, 0xFF, 0xF4,
+ 0xFE, 0x78, 0x29, 0xE2, 0x5E, 0xFE, 0x18, 0xE2,
+ 0xFD, 0xEF, 0x5D, 0x4E, 0xF2, 0x78, 0x24, 0x08,
+ 0xE2, 0xFF, 0x08, 0xE2, 0x2F, 0xFF, 0x78, 0x28,
+ 0xE2, 0xFD, 0x12, 0x32, 0x1E, 0x78, 0x26, 0xE2,
+ 0x04, 0xF2, 0x80, 0xA1, 0xD3, 0x78, 0x25, 0xE2,
+ 0x94, 0xFF, 0x18, 0xE2, 0x94, 0x07, 0x50, 0x69,
+ 0xE4, 0x78, 0x26, 0xF2, 0x78, 0x27, 0xE2, 0xFF,
+ 0x18, 0xE2, 0xFE, 0xC3, 0x9F, 0x40, 0x02, 0xA1,
+ 0x7F, 0x74, 0x33, 0x2E, 0xF8, 0xE2, 0x78, 0x28,
+ 0xF2, 0x90, 0x81, 0xBC, 0xE0, 0x60, 0x2D, 0x78,
+ 0x26, 0xE2, 0xFF, 0xFD, 0x18, 0xE2, 0x2D, 0xFD,
+ 0x18, 0xE2, 0x34, 0x00, 0x8D, 0x82, 0xF5, 0x83,
+ 0xE0, 0x78, 0x29, 0xF2, 0x74, 0x37, 0x2F, 0xF8,
+ 0xE2, 0x78, 0x32, 0xF2, 0xE2, 0xFF, 0xF4, 0xFE,
+ 0x78, 0x29, 0xE2, 0x5E, 0xFE, 0x18, 0xE2, 0xFD,
+ 0xEF, 0x5D, 0x4E, 0xF2, 0x78, 0x28, 0xE2, 0xFF,
+ 0x78, 0x26, 0xE2, 0xFD, 0x18, 0xE2, 0x2D, 0xFD,
+ 0x18, 0xE2, 0x34, 0x00, 0x8D, 0x82, 0xF5, 0x83,
+ 0xEF, 0xF0, 0x78, 0x26, 0xE2, 0x04, 0xF2, 0x80,
+ 0x9B, 0x90, 0x81, 0xBC, 0xE0, 0x60, 0x0F, 0x78,
+ 0x24, 0xE2, 0xFE, 0x08, 0xE2, 0xFF, 0x12, 0x2D,
+ 0x5C, 0x78, 0x2E, 0x12, 0x44, 0xFE, 0xE4, 0x78,
+ 0x26, 0xF2, 0x78, 0x27, 0xE2, 0xFF, 0x18, 0xE2,
+ 0xFE, 0xC3, 0x9F, 0x50, 0x5D, 0x74, 0x33, 0x2E,
+ 0xF8, 0xE2, 0x78, 0x28, 0xF2, 0x90, 0x81, 0xBC,
+ 0xE0, 0x60, 0x2B, 0x78, 0x2E, 0x12, 0x44, 0xE5,
+ 0x78, 0x26, 0xE2, 0xFB, 0x75, 0xF0, 0x08, 0xA4,
+ 0xF9, 0xF8, 0x12, 0x20, 0xA8, 0x78, 0x29, 0xEF,
+ 0xF2, 0x74, 0x37, 0x2B, 0xF8, 0xE2, 0x78, 0x32,
+ 0xF2, 0xE2, 0xFE, 0xF4, 0x5F, 0xFF, 0x78, 0x28,
+ 0xE2, 0xFD, 0xEE, 0x5D, 0x4F, 0xF2, 0x78, 0x28,
+ 0xE2, 0xFF, 0x78, 0x26, 0xE2, 0xFD, 0xC3, 0x74,
+ 0x03, 0x9D, 0xFD, 0xE4, 0x94, 0x00, 0xFC, 0x7B,
+ 0xFE, 0x74, 0x2A, 0x2D, 0xF9, 0x74, 0x80, 0x3C,
+ 0xFA, 0xEF, 0x12, 0x1F, 0xEA, 0xE2, 0x04, 0xF2,
+ 0x80, 0x98, 0x78, 0x2A, 0x12, 0x44, 0xE5, 0x90,
+ 0x85, 0xBB, 0x12, 0x20, 0xCE, 0x78, 0x24, 0xE2,
+ 0xFE, 0x08, 0xE2, 0xFF, 0x12, 0x2E, 0xA2, 0x22,
+ 0x22, 0x90, 0x81, 0xCB, 0x12, 0x45, 0x1F, 0x90,
+ 0x00, 0x01, 0x12, 0x1F, 0xBD, 0xFF, 0xFE, 0x12,
+ 0x1F, 0xA4, 0xFD, 0xC3, 0x13, 0x30, 0xE0, 0x12,
+ 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16, 0x90, 0x00,
+ 0x02, 0x12, 0x1F, 0xBD, 0x90, 0x81, 0xCF, 0xF0,
+ 0x80, 0x05, 0x90, 0x81, 0xCF, 0xEF, 0xF0, 0x90,
+ 0x81, 0xCE, 0xEE, 0xF0, 0x90, 0x81, 0xCF, 0xE0,
+ 0xFE, 0x90, 0x81, 0xCE, 0xE0, 0xFF, 0xD3, 0x9E,
+ 0x50, 0x38, 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16,
+ 0x12, 0x1F, 0xA4, 0x54, 0x01, 0xFE, 0x74, 0xDE,
+ 0x2F, 0xF5, 0x82, 0xE4, 0x34, 0x80, 0xF5, 0x83,
+ 0xEE, 0xF0, 0x74, 0xDE, 0x2F, 0xF5, 0x82, 0xE4,
+ 0x34, 0x80, 0xF5, 0x83, 0xE0, 0x70, 0x04, 0xD1,
+ 0x25, 0x80, 0x07, 0x90, 0x81, 0xCE, 0xE0, 0xFF,
+ 0xB1, 0x80, 0x90, 0x81, 0xCE, 0xE0, 0x04, 0xF0,
+ 0x80, 0xBA, 0x90, 0x80, 0xDE, 0xE0, 0x70, 0x24,
+ 0x90, 0x81, 0x2A, 0xE0, 0x70, 0x04, 0xFF, 0x12,
+ 0x49, 0x93, 0x90, 0x81, 0x2A, 0xE0, 0x64, 0x0C,
+ 0x60, 0x02, 0xD1, 0x26, 0x90, 0x81, 0x24, 0xE0,
+ 0x54, 0xF7, 0xF0, 0x54, 0xEF, 0xF0, 0x54, 0xBF,
+ 0xF0, 0x54, 0x7F, 0xF0, 0x22, 0x22, 0x90, 0x06,
+ 0x04, 0xE0, 0x54, 0x7F, 0xF0, 0x90, 0x05, 0x22,
+ 0xE4, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x0C, 0xF0,
+ 0x22, 0x90, 0x81, 0xED, 0xEF, 0xF0, 0xA3, 0xED,
+ 0xF0, 0xAD, 0x03, 0xAC, 0x02, 0xE4, 0x90, 0x81,
+ 0xF5, 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0xC4, 0x74,
+ 0x39, 0xF0, 0x74, 0x66, 0xA3, 0xF0, 0xEC, 0x54,
+ 0x3F, 0xFC, 0x90, 0x01, 0x40, 0xED, 0xF0, 0xAE,
+ 0x04, 0xEE, 0xA3, 0xF0, 0x90, 0x81, 0xED, 0xE0,
+ 0x24, 0x81, 0x60, 0x34, 0x24, 0xDA, 0x60, 0x1C,
+ 0x24, 0x3C, 0x70, 0x41, 0x90, 0x81, 0xEE, 0xE0,
+ 0xC4, 0x33, 0x33, 0x33, 0x54, 0x80, 0x90, 0x81,
+ 0xF2, 0xF0, 0xA3, 0x74, 0x69, 0xF0, 0xA3, 0x74,
+ 0x80, 0xF0, 0x80, 0x2C, 0x90, 0x81, 0xEE, 0xE0,
+ 0x54, 0x01, 0x90, 0x81, 0xF2, 0xF0, 0xA3, 0x74,
+ 0xA5, 0xF0, 0xA3, 0x74, 0x01, 0xF0, 0x80, 0x18,
+ 0x90, 0x81, 0xEE, 0xE0, 0xC4, 0x54, 0x10, 0x90,
+ 0x81, 0xF2, 0xF0, 0xA3, 0x74, 0x7F, 0xF0, 0xA3,
+ 0x74, 0x10, 0xF0, 0x80, 0x03, 0x7F, 0x00, 0x22,
+ 0x90, 0x81, 0xF3, 0xE0, 0x90, 0x01, 0x06, 0xF0,
+ 0x90, 0x81, 0xF2, 0xE0, 0x60, 0x0E, 0x90, 0x01,
+ 0x42, 0xF0, 0x90, 0x81, 0xF1, 0xE0, 0x90, 0x01,
+ 0x43, 0xF0, 0x80, 0x0D, 0x90, 0x01, 0x43, 0xE4,
+ 0xF0, 0x90, 0x81, 0xF2, 0xE0, 0x90, 0x01, 0x42,
+ 0xF0, 0x90, 0x81, 0xF4, 0xE0, 0xFF, 0x90, 0x01,
+ 0x42, 0xE0, 0x5F, 0xFF, 0x90, 0x81, 0xF2, 0xE0,
+ 0x6F, 0x60, 0xEE, 0x74, 0x39, 0x04, 0x90, 0x01,
+ 0xC4, 0xF0, 0x74, 0x66, 0xA3, 0xF0, 0x90, 0x01,
+ 0x43, 0xE4, 0xF0, 0x7F, 0x01, 0x22, 0xE4, 0x90,
+ 0x81, 0x6A, 0xF0, 0x90, 0x87, 0x5F, 0xE0, 0x90,
+ 0x81, 0x69, 0xF0, 0xE4, 0x90, 0x81, 0x76, 0xF0,
+ 0x90, 0x81, 0x66, 0xF0, 0x90, 0x81, 0x66, 0xE0,
+ 0xFF, 0xC3, 0x94, 0x40, 0x50, 0x15, 0x74, 0x79,
+ 0x2F, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83,
+ 0x74, 0xFF, 0xF0, 0x90, 0x81, 0x66, 0xE0, 0x04,
+ 0xF0, 0x80, 0xE1, 0xE4, 0x90, 0x81, 0x66, 0xF0,
+ 0x90, 0x81, 0x69, 0xE0, 0xFF, 0x90, 0x81, 0x66,
+ 0xE0, 0xFE, 0xC3, 0x9F, 0x40, 0x03, 0x02, 0x68,
+ 0x12, 0x74, 0xDF, 0x2E, 0xF9, 0xE4, 0x34, 0x86,
+ 0x75, 0x13, 0x01, 0xF5, 0x14, 0x89, 0x15, 0x75,
+ 0x16, 0x0A, 0x7B, 0x01, 0x7A, 0x81, 0x79, 0x5B,
+ 0x12, 0x2B, 0xED, 0x90, 0x81, 0x5C, 0xE0, 0xFF,
+ 0x12, 0x2F, 0x27, 0xEF, 0x04, 0x90, 0x81, 0x76,
+ 0xF0, 0x90, 0x81, 0x5B, 0xE0, 0xFF, 0xA3, 0xE0,
+ 0xFD, 0x12, 0x31, 0xEA, 0xEF, 0x24, 0xC8, 0x90,
+ 0x81, 0x78, 0xF0, 0x75, 0xF0, 0x08, 0xA4, 0xF0,
+ 0x90, 0x81, 0x5C, 0xE0, 0x54, 0x0F, 0x90, 0x81,
+ 0x77, 0xF0, 0xE4, 0x90, 0x81, 0x65, 0xF0, 0x90,
+ 0x81, 0x67, 0xF0, 0x90, 0x81, 0x67, 0xE0, 0xFF,
+ 0xC3, 0x94, 0x04, 0x50, 0x57, 0x90, 0x81, 0x77,
+ 0xE0, 0xFE, 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3,
+ 0x13, 0xD8, 0xFC, 0x20, 0xE0, 0x3E, 0x90, 0x81,
+ 0x67, 0xE0, 0x25, 0xE0, 0xFF, 0x90, 0x81, 0x78,
+ 0xE0, 0x2F, 0x24, 0x79, 0xF9, 0xE4, 0x34, 0x81,
+ 0xFA, 0x7B, 0x01, 0xC0, 0x03, 0xC0, 0x01, 0x90,
+ 0x81, 0x65, 0xE0, 0x75, 0xF0, 0x02, 0xA4, 0x24,
+ 0x5D, 0xF9, 0x74, 0x81, 0x35, 0xF0, 0x8B, 0x13,
+ 0xF5, 0x14, 0x89, 0x15, 0x75, 0x16, 0x02, 0xD0,
+ 0x01, 0xD0, 0x03, 0x12, 0x2B, 0xED, 0x90, 0x81,
+ 0x65, 0xE0, 0x04, 0xF0, 0x90, 0x81, 0x67, 0xE0,
+ 0x04, 0xF0, 0x80, 0x9F, 0x90, 0x81, 0x76, 0xE0,
+ 0xFF, 0x90, 0x81, 0x66, 0xE0, 0x2F, 0xF0, 0x02,
+ 0x67, 0x40, 0xE4, 0x90, 0x81, 0x6A, 0xF0, 0x90,
+ 0x81, 0x6A, 0xE0, 0xC3, 0x94, 0x40, 0x40, 0x02,
+ 0x41, 0xAF, 0xE0, 0xFF, 0x24, 0x79, 0xF5, 0x82,
+ 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0x90, 0x81,
+ 0x6C, 0xF0, 0xE0, 0xFE, 0x54, 0xF0, 0xC4, 0x54,
+ 0x0F, 0xFD, 0x90, 0x81, 0x6B, 0xF0, 0xEE, 0x54,
+ 0x0F, 0xFE, 0xA3, 0xF0, 0x74, 0x7A, 0x2F, 0xF5,
+ 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0x90,
+ 0x81, 0x6D, 0xF0, 0xFC, 0xEE, 0xFE, 0xEC, 0xFB,
+ 0xEB, 0xFF, 0x90, 0x81, 0x72, 0xEE, 0xF0, 0xA3,
+ 0xEF, 0xF0, 0xED, 0x12, 0x45, 0x28, 0x68, 0x8B,
+ 0x00, 0x68, 0xC2, 0x01, 0x69, 0x73, 0x02, 0x6A,
+ 0xA0, 0x03, 0x69, 0x8E, 0x04, 0x69, 0xAF, 0x05,
+ 0x69, 0xAF, 0x06, 0x69, 0xAF, 0x07, 0x69, 0xAF,
+ 0x08, 0x6A, 0x33, 0x09, 0x6A, 0x69, 0x0A, 0x00,
+ 0x00, 0x6A, 0xAF, 0x90, 0x81, 0x6A, 0xE0, 0xFD,
+ 0x24, 0x7C, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5,
+ 0x83, 0xE0, 0xFE, 0x74, 0x7B, 0x2D, 0xF5, 0x82,
+ 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0xFD, 0xED,
+ 0xFF, 0x90, 0x81, 0x74, 0xEE, 0xF0, 0xFC, 0xA3,
+ 0xEF, 0xF0, 0x90, 0x81, 0x6D, 0xE0, 0xFF, 0x12,
+ 0x2F, 0x96, 0x90, 0x81, 0x68, 0x74, 0x02, 0xF0,
+ 0x41, 0xA0, 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7C,
+ 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0,
+ 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x08, 0x12,
+ 0x20, 0xBB, 0xA8, 0x04, 0xA9, 0x05, 0xAA, 0x06,
+ 0xAB, 0x07, 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7B,
+ 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0,
+ 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x12, 0x44, 0xCC,
+ 0xC0, 0x04, 0xC0, 0x05, 0xC0, 0x06, 0xC0, 0x07,
+ 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7D, 0xF5, 0x82,
+ 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0xFF, 0xE4,
+ 0xFC, 0xFD, 0xFE, 0x78, 0x10, 0x12, 0x20, 0xBB,
+ 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01, 0xD0, 0x00,
+ 0x12, 0x44, 0xCC, 0xC0, 0x04, 0xC0, 0x05, 0xC0,
+ 0x06, 0xC0, 0x07, 0x90, 0x81, 0x6A, 0xE0, 0x24,
+ 0x7E, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83,
+ 0xE0, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x18,
+ 0x12, 0x20, 0xBB, 0xD0, 0x03, 0xD0, 0x02, 0xD0,
+ 0x01, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0x90, 0x81,
+ 0x6E, 0x12, 0x20, 0xCE, 0x90, 0x81, 0x6E, 0x12,
+ 0x44, 0xD9, 0x90, 0x85, 0x96, 0x12, 0x20, 0xCE,
+ 0x90, 0x81, 0x72, 0xE0, 0xFE, 0xA3, 0xE0, 0xFF,
+ 0x12, 0x2E, 0xE4, 0x90, 0x81, 0x68, 0x74, 0x04,
+ 0xF0, 0x41, 0xA0, 0x90, 0x81, 0x6D, 0xE0, 0xFD,
+ 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7B, 0xF5, 0x82,
+ 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0xFB, 0xE4,
+ 0xFF, 0x12, 0x30, 0xC7, 0x80, 0x19, 0x90, 0x81,
+ 0x6D, 0xE0, 0xFD, 0x90, 0x81, 0x6A, 0xE0, 0x24,
+ 0x7B, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83,
+ 0xE0, 0xFB, 0xE4, 0xFF, 0x12, 0x30, 0x6A, 0x90,
+ 0x81, 0x68, 0x74, 0x01, 0xF0, 0x41, 0xA0, 0x90,
+ 0x81, 0x68, 0x74, 0x02, 0xF0, 0x90, 0x81, 0x6A,
+ 0xE0, 0x24, 0x7C, 0xF5, 0x82, 0xE4, 0x34, 0x81,
+ 0xF5, 0x83, 0xE0, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE,
+ 0x78, 0x08, 0x12, 0x20, 0xBB, 0xA8, 0x04, 0xA9,
+ 0x05, 0xAA, 0x06, 0xAB, 0x07, 0x90, 0x81, 0x6A,
+ 0xE0, 0x24, 0x7B, 0xF5, 0x82, 0xE4, 0x34, 0x81,
+ 0xF5, 0x83, 0xE0, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE,
+ 0x12, 0x44, 0xCC, 0xC0, 0x04, 0xC0, 0x05, 0xC0,
+ 0x06, 0xC0, 0x07, 0x90, 0x81, 0x6C, 0xE0, 0xFF,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x10, 0x12, 0x20,
+ 0xBB, 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01, 0xD0,
+ 0x00, 0x12, 0x44, 0xCC, 0x90, 0x81, 0x6E, 0x12,
+ 0x20, 0xCE, 0x90, 0x81, 0x6B, 0xE0, 0x24, 0xFB,
+ 0xFF, 0xC0, 0x07, 0x90, 0x81, 0x6E, 0x12, 0x44,
+ 0xD9, 0x90, 0x81, 0xF9, 0x12, 0x20, 0xCE, 0x90,
+ 0x81, 0x6D, 0xE0, 0xFD, 0xD0, 0x07, 0x12, 0x55,
+ 0x1C, 0x80, 0x6D, 0x90, 0x81, 0x68, 0x74, 0x01,
+ 0xF0, 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7B, 0xF9,
+ 0xE4, 0x34, 0x81, 0x75, 0x13, 0x01, 0xF5, 0x14,
+ 0x89, 0x15, 0x75, 0x16, 0x01, 0x7B, 0xFE, 0x7A,
+ 0x80, 0x79, 0x33, 0x12, 0x2B, 0xED, 0x90, 0x81,
+ 0x6D, 0xE0, 0xFF, 0x90, 0x81, 0x6C, 0xE0, 0xFD,
+ 0xE4, 0x90, 0x81, 0xBC, 0xF0, 0x7B, 0x04, 0x80,
+ 0x34, 0x90, 0x81, 0x68, 0x74, 0x04, 0xF0, 0x90,
+ 0x81, 0x6A, 0xE0, 0x24, 0x7B, 0xF9, 0xE4, 0x34,
+ 0x81, 0x75, 0x13, 0x01, 0xF5, 0x14, 0x89, 0x15,
+ 0x75, 0x16, 0x04, 0x7B, 0xFE, 0x7A, 0x80, 0x79,
+ 0x33, 0x12, 0x2B, 0xED, 0x90, 0x81, 0x6D, 0xE0,
+ 0xFF, 0x90, 0x81, 0x6C, 0xE0, 0xFD, 0xE4, 0x90,
+ 0x81, 0xBC, 0xF0, 0x7B, 0x06, 0x12, 0x63, 0xE1,
+ 0x90, 0x81, 0x68, 0xE0, 0x24, 0x02, 0xFF, 0x90,
+ 0x81, 0x6A, 0xE0, 0x2F, 0xF0, 0x01, 0x17, 0x22,
+ 0x90, 0x02, 0x09, 0xE0, 0xFD, 0x12, 0x1F, 0xA4,
+ 0xFE, 0xAF, 0x05, 0xED, 0x2E, 0x90, 0x80, 0x3D,
+ 0xF0, 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0xFF,
+ 0xED, 0x2F, 0x90, 0x80, 0x3E, 0xF0, 0x90, 0x00,
+ 0x02, 0x12, 0x1F, 0xBD, 0xFF, 0xED, 0x2F, 0x90,
+ 0x80, 0x3F, 0xF0, 0x90, 0x00, 0x03, 0x12, 0x1F,
+ 0xBD, 0xFF, 0xED, 0x2F, 0x90, 0x80, 0x40, 0xF0,
+ 0x90, 0x00, 0x04, 0x12, 0x1F, 0xBD, 0xFF, 0xAE,
+ 0x05, 0xED, 0x2F, 0x90, 0x80, 0x41, 0xF0, 0x22,
+ 0x90, 0x00, 0x02, 0x12, 0x1F, 0xBD, 0xFF, 0x30,
+ 0xE0, 0x26, 0x12, 0x1F, 0xA4, 0x90, 0x81, 0x38,
+ 0xF0, 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0x90,
+ 0x81, 0x39, 0xF0, 0xEF, 0x54, 0xFE, 0xFF, 0xA3,
+ 0xE0, 0x54, 0x01, 0x4F, 0xF0, 0x90, 0x00, 0x03,
+ 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x3B, 0xF0, 0x22,
+ 0x90, 0x81, 0x38, 0x74, 0x01, 0xF0, 0xA3, 0x74,
+ 0x05, 0xF0, 0xA3, 0xE0, 0x54, 0x01, 0x44, 0x28,
+ 0xF0, 0xA3, 0x74, 0x05, 0xF0, 0x22, 0x12, 0x1F,
+ 0xA4, 0x90, 0x81, 0x3E, 0xF0, 0x90, 0x81, 0x3E,
+ 0xE0, 0x90, 0x01, 0xE7, 0xF0, 0x22, 0x12, 0x1F,
+ 0xA4, 0x90, 0x81, 0x4A, 0xF0, 0x90, 0x00, 0x01,
+ 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x4B, 0xF0, 0x22,
+ 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90,
+ 0x81, 0xFD, 0xEE, 0xF0, 0xA3, 0xEF, 0xF0, 0xE4,
+ 0xA3, 0xF0, 0xA3, 0xF0, 0x90, 0x81, 0xFD, 0xE0,
+ 0xFE, 0xA3, 0xE0, 0xF5, 0x82, 0x8E, 0x83, 0xE0,
+ 0x60, 0x2D, 0xC3, 0x90, 0x82, 0x00, 0xE0, 0x94,
+ 0xE8, 0x90, 0x81, 0xFF, 0xE0, 0x94, 0x03, 0x40,
+ 0x0B, 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x80, 0xF0,
+ 0x7F, 0x00, 0x80, 0x15, 0x90, 0x81, 0xFF, 0xE4,
+ 0x75, 0xF0, 0x01, 0x12, 0x44, 0xA9, 0x7F, 0x0A,
+ 0x7E, 0x00, 0x12, 0x32, 0xAA, 0x80, 0xC5, 0x7F,
+ 0x01, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD3, 0x10,
+ 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81, 0xD1,
+ 0x12, 0x45, 0x1F, 0x90, 0x82, 0x0A, 0xE0, 0xFF,
+ 0x04, 0xF0, 0x90, 0x00, 0x01, 0xEF, 0x12, 0x1F,
+ 0xFC, 0x7F, 0xAF, 0x7E, 0x01, 0x71, 0x60, 0xEF,
+ 0x60, 0x3A, 0x90, 0x81, 0xD1, 0x12, 0x45, 0x16,
+ 0x8B, 0x13, 0x8A, 0x14, 0x89, 0x15, 0x90, 0x00,
+ 0x0E, 0x12, 0x1F, 0xBD, 0x24, 0x02, 0xF5, 0x16,
+ 0x7B, 0x01, 0x7A, 0x01, 0x79, 0xA0, 0x12, 0x2B,
+ 0xED, 0x90, 0x81, 0xD1, 0x12, 0x45, 0x16, 0x90,
+ 0x00, 0x0E, 0x12, 0x1F, 0xBD, 0x90, 0x01, 0xAE,
+ 0xF0, 0xA3, 0x74, 0xFF, 0xF0, 0x90, 0x01, 0xCB,
+ 0xE0, 0x64, 0x80, 0xF0, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0,
+ 0xE4, 0xFF, 0x90, 0x80, 0xD9, 0xE0, 0xFE, 0x90,
+ 0x80, 0xD8, 0xE0, 0xFD, 0xB5, 0x06, 0x04, 0x7E,
+ 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x64, 0x01,
+ 0x60, 0x32, 0x90, 0x01, 0xAF, 0xE0, 0x70, 0x13,
+ 0xED, 0x75, 0xF0, 0x0F, 0xA4, 0x24, 0x42, 0xF9,
+ 0x74, 0x80, 0x35, 0xF0, 0xFA, 0x7B, 0x01, 0x71,
+ 0xB6, 0x7F, 0x01, 0xEF, 0x60, 0x16, 0x90, 0x80,
+ 0xD8, 0xE0, 0x04, 0xF0, 0xE0, 0x7F, 0x00, 0xB4,
+ 0x0A, 0x02, 0x7F, 0x01, 0xEF, 0x60, 0x05, 0xE4,
+ 0x90, 0x80, 0xD8, 0xF0, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0x8F, 0x0D, 0x22, 0x8F, 0x0E, 0x22, 0x22,
+ 0x90, 0x01, 0x34, 0xE0, 0x55, 0x3D, 0xF5, 0x41,
+ 0xA3, 0xE0, 0x55, 0x3E, 0xF5, 0x42, 0xA3, 0xE0,
+ 0x55, 0x3F, 0xF5, 0x43, 0xA3, 0xE0, 0x55, 0x40,
+ 0xF5, 0x44, 0x90, 0x01, 0x34, 0xE5, 0x41, 0xF0,
+ 0xA3, 0xE5, 0x42, 0xF0, 0xA3, 0xE5, 0x43, 0xF0,
+ 0xA3, 0xE5, 0x44, 0xF0, 0x22, 0x90, 0x01, 0x3C,
+ 0xE0, 0x55, 0x45, 0xF5, 0x49, 0xA3, 0xE0, 0x55,
+ 0x46, 0xF5, 0x4A, 0xA3, 0xE0, 0x55, 0x47, 0xF5,
+ 0x4B, 0xA3, 0xE0, 0x55, 0x48, 0xF5, 0x4C, 0x90,
+ 0x01, 0x3C, 0xE5, 0x49, 0xF0, 0xA3, 0xE5, 0x4A,
+ 0xF0, 0xA3, 0xE5, 0x4B, 0xF0, 0xA3, 0xE5, 0x4C,
+ 0xF0, 0x53, 0x91, 0xDF, 0x22, 0x90, 0x81, 0x1F,
+ 0xE0, 0x30, 0xE0, 0x05, 0xE4, 0xA3, 0xF0, 0xA3,
+ 0xF0, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01,
+ 0x70, 0x19, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x13,
+ 0x90, 0x01, 0x57, 0xE4, 0xF0, 0x90, 0x01, 0x3C,
+ 0x74, 0x02, 0x12, 0x4F, 0xF4, 0x90, 0x01, 0x57,
+ 0x74, 0x05, 0xF0, 0x22, 0x90, 0x80, 0xDE, 0xE0,
+ 0x64, 0x01, 0x70, 0x26, 0x90, 0x81, 0x27, 0xE0,
+ 0x60, 0x20, 0x90, 0x01, 0x57, 0xE4, 0xF0, 0x90,
+ 0x01, 0x3C, 0x74, 0x02, 0xF0, 0x90, 0x81, 0x24,
+ 0xE0, 0x54, 0xFB, 0xF0, 0x90, 0x81, 0x2B, 0xE0,
+ 0x54, 0xFD, 0xF0, 0x54, 0x07, 0x70, 0x03, 0x12,
+ 0x47, 0x2A, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0xB4,
+ 0x01, 0x14, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x0E,
+ 0x90, 0x81, 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x02,
+ 0x60, 0x02, 0x80, 0x03, 0xD1, 0x7F, 0x22, 0x90,
+ 0x04, 0x1D, 0xE0, 0x70, 0x13, 0x90, 0x80, 0x3E,
+ 0xE0, 0xFF, 0xE4, 0xFD, 0xB1, 0x69, 0x8E, 0x4E,
+ 0x8F, 0x4F, 0x90, 0x04, 0x1F, 0x74, 0x20, 0xF0,
+ 0x22, 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0,
+ 0x90, 0x82, 0x0E, 0xED, 0xF0, 0x90, 0x82, 0x0D,
+ 0xEF, 0xF0, 0xE4, 0xFD, 0xFC, 0xF1, 0x37, 0x7C,
+ 0x00, 0xAD, 0x07, 0x90, 0x82, 0x0D, 0xE0, 0x90,
+ 0x04, 0x25, 0xF0, 0x90, 0x82, 0x0E, 0xE0, 0x60,
+ 0x0E, 0x74, 0x0F, 0x2F, 0xF5, 0x82, 0xE4, 0x34,
+ 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0xAF,
+ 0x05, 0x74, 0x08, 0x2F, 0xF5, 0x82, 0xE4, 0x34,
+ 0xFC, 0xF5, 0x83, 0xE4, 0xF0, 0x74, 0x09, 0x2F,
+ 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0,
+ 0x54, 0xF0, 0xF0, 0x74, 0x21, 0x2D, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x54, 0xF7,
+ 0xF0, 0xAE, 0x04, 0xAF, 0x05, 0xD0, 0xD0, 0x92,
+ 0xAF, 0x22, 0x8F, 0x4E, 0xF1, 0x4B, 0xBF, 0x01,
+ 0x18, 0x90, 0x80, 0x40, 0xE0, 0xFF, 0x7D, 0x01,
+ 0xB1, 0x69, 0xAD, 0x07, 0xAC, 0x06, 0xAF, 0x4E,
+ 0x12, 0x4F, 0x82, 0x90, 0x04, 0x1F, 0x74, 0x20,
+ 0xF0, 0x22, 0x90, 0x06, 0xA9, 0xE0, 0x90, 0x81,
+ 0x4C, 0xF0, 0xE0, 0xFD, 0x54, 0xC0, 0x70, 0x09,
+ 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFE, 0xF0, 0x80,
+ 0x72, 0xED, 0x30, 0xE6, 0x4B, 0x90, 0x81, 0x27,
+ 0xE0, 0x64, 0x02, 0x70, 0x2A, 0x90, 0x81, 0x24,
+ 0xE0, 0xFF, 0xC3, 0x13, 0x20, 0xE0, 0x09, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x01, 0xF0, 0x80, 0x28,
+ 0x90, 0x81, 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x01,
+ 0x70, 0x2D, 0x90, 0x81, 0x2B, 0xE0, 0x44, 0x04,
+ 0xF0, 0x7F, 0x01, 0xB1, 0xD2, 0x80, 0x20, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x01, 0xF0, 0x90, 0x81,
+ 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x02, 0x60, 0x04,
+ 0xB1, 0x4F, 0x80, 0x0B, 0xD1, 0x7F, 0x80, 0x07,
+ 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFE, 0xF0, 0x90,
+ 0x81, 0x4C, 0xE0, 0x90, 0x81, 0x2B, 0x30, 0xE7,
+ 0x11, 0x12, 0x4F, 0xF1, 0x90, 0x01, 0x57, 0x74,
+ 0x05, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x44, 0x04,
+ 0xF0, 0x22, 0xE0, 0x54, 0xFD, 0xF0, 0x22, 0x90,
+ 0x01, 0x5F, 0xE4, 0xF0, 0x90, 0x01, 0x3C, 0x74,
+ 0x08, 0xF0, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
+ 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
+ 0xFB, 0xFD, 0x7F, 0x5C, 0x7E, 0x01, 0x12, 0x50,
+ 0x05, 0x90, 0x01, 0x5F, 0x74, 0x05, 0xF0, 0x90,
+ 0x06, 0x92, 0x74, 0x02, 0xF0, 0x90, 0x81, 0x24,
+ 0xE0, 0x44, 0x10, 0xF0, 0x90, 0x81, 0x2A, 0xE0,
+ 0x64, 0x0C, 0x60, 0x0C, 0xE4, 0xFD, 0x7F, 0x0C,
+ 0x12, 0x47, 0x3D, 0xE4, 0xFF, 0x12, 0x4F, 0x0D,
+ 0x22, 0xE4, 0x90, 0x81, 0x4C, 0xF0, 0x90, 0x06,
+ 0xA9, 0xE0, 0x90, 0x81, 0x4C, 0xF0, 0xE0, 0x54,
+ 0xC0, 0x70, 0x0D, 0x90, 0x81, 0x2B, 0xE0, 0x54,
+ 0xFE, 0xF0, 0x54, 0xFD, 0xF0, 0x02, 0x47, 0x2A,
+ 0x90, 0x81, 0x4C, 0xE0, 0x30, 0xE6, 0x21, 0x90,
+ 0x81, 0x27, 0xE0, 0x64, 0x01, 0x70, 0x20, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x01, 0xF0, 0x90, 0x81,
+ 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x02, 0x60, 0x04,
+ 0xB1, 0x4F, 0x80, 0x0B, 0xD1, 0x7F, 0x80, 0x07,
+ 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFE, 0xF0, 0x90,
+ 0x81, 0x4C, 0xE0, 0x90, 0x81, 0x2B, 0x30, 0xE7,
+ 0x11, 0x12, 0x4F, 0xF1, 0x90, 0x01, 0x57, 0x74,
+ 0x05, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x44, 0x04,
+ 0xF0, 0x22, 0xE0, 0x54, 0xFD, 0xF0, 0x22, 0xE4,
+ 0xFE, 0xEF, 0xC3, 0x13, 0xFD, 0xEF, 0x30, 0xE0,
+ 0x02, 0x7E, 0x80, 0x90, 0xFD, 0x10, 0xED, 0xF0,
+ 0xAF, 0x06, 0x22, 0xD3, 0x10, 0xAF, 0x01, 0xC3,
+ 0xC0, 0xD0, 0x90, 0x04, 0x1D, 0xE0, 0x60, 0x1A,
+ 0x90, 0x05, 0x22, 0xE0, 0x54, 0x90, 0x60, 0x07,
+ 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x08, 0xF0, 0x90,
+ 0x01, 0xC6, 0xE0, 0x30, 0xE1, 0xE4, 0x7F, 0x00,
+ 0x80, 0x02, 0x7F, 0x01, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x03, 0x12,
+ 0x73, 0xE1, 0x90, 0x81, 0x3F, 0xE0, 0x30, 0xE0,
+ 0x03, 0x12, 0x49, 0xDD, 0x22, 0x90, 0x81, 0x27,
+ 0xE0, 0x60, 0x35, 0x90, 0x06, 0x92, 0xE0, 0x30,
+ 0xE1, 0x24, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
+ 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
+ 0xFB, 0xFD, 0x7F, 0x5C, 0x7E, 0x01, 0x12, 0x50,
+ 0x05, 0x90, 0x01, 0x5F, 0x74, 0x05, 0xF0, 0x90,
+ 0x06, 0x92, 0x74, 0x02, 0xF0, 0x22, 0x90, 0x81,
+ 0x24, 0xE0, 0x54, 0xEF, 0xF0, 0x12, 0x47, 0x2A,
+ 0x22, 0x12, 0x71, 0x48, 0x90, 0x81, 0x4D, 0xEF,
+ 0xF0, 0x90, 0x81, 0x24, 0x30, 0xE0, 0x06, 0xE0,
+ 0x44, 0x01, 0xF0, 0x80, 0x04, 0xE0, 0x54, 0xFE,
+ 0xF0, 0x90, 0x81, 0x4D, 0xE0, 0x30, 0xE6, 0x11,
+ 0x90, 0x01, 0x2F, 0xE0, 0x30, 0xE7, 0x04, 0xE4,
+ 0xF0, 0x80, 0x06, 0x90, 0x01, 0x2F, 0x74, 0x80,
+ 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x30, 0xE0, 0x1A,
+ 0x90, 0x81, 0x32, 0xE4, 0xF0, 0xA3, 0x74, 0x07,
+ 0xF0, 0x90, 0x81, 0x32, 0xA3, 0xE0, 0x90, 0x05,
+ 0x58, 0xF0, 0x90, 0x04, 0xEC, 0xE0, 0x54, 0xDD,
+ 0xF0, 0x22, 0x90, 0x04, 0xEC, 0xE0, 0x44, 0x22,
+ 0xF0, 0x22, 0x90, 0x81, 0x4A, 0xE0, 0x60, 0x0F,
+ 0xE4, 0xF0, 0x90, 0x05, 0x53, 0xE0, 0x44, 0x01,
+ 0xF0, 0x90, 0x05, 0xFD, 0xE0, 0x04, 0xF0, 0x22,
+ 0x90, 0x81, 0x24, 0xE0, 0xFF, 0xC4, 0x13, 0x13,
+ 0x54, 0x03, 0x30, 0xE0, 0x27, 0xEF, 0x54, 0xBF,
+ 0xF0, 0x90, 0x04, 0xE0, 0xE0, 0x90, 0x81, 0x25,
+ 0x30, 0xE0, 0x06, 0xE0, 0x44, 0x01, 0xF0, 0x80,
+ 0x10, 0xE0, 0x54, 0xFE, 0xF0, 0x90, 0x01, 0xB9,
+ 0x74, 0x01, 0xF0, 0x90, 0x01, 0xB8, 0x74, 0x04,
+ 0xF0, 0x12, 0x47, 0x2A, 0xE4, 0xFF, 0x90, 0x81,
+ 0x45, 0xE0, 0x30, 0xE0, 0x48, 0x90, 0x81, 0x49,
+ 0xE0, 0xFD, 0x60, 0x41, 0x74, 0x01, 0x7E, 0x00,
+ 0xA8, 0x07, 0x08, 0x80, 0x05, 0xC3, 0x33, 0xCE,
+ 0x33, 0xCE, 0xD8, 0xF9, 0xFF, 0x90, 0x04, 0xE0,
+ 0xE0, 0xFB, 0xEF, 0x5B, 0x60, 0x06, 0xE4, 0x90,
+ 0x81, 0x49, 0xF0, 0x22, 0x90, 0x81, 0x47, 0xE0,
+ 0xD3, 0x9D, 0x50, 0x10, 0x90, 0x01, 0xC7, 0x74,
+ 0x10, 0xF0, 0x11, 0xBE, 0x90, 0x81, 0x45, 0xE0,
+ 0x54, 0xFE, 0xF0, 0x22, 0x12, 0x4F, 0x0B, 0x90,
+ 0x81, 0x49, 0xE0, 0x04, 0xF0, 0x22, 0x90, 0x80,
+ 0x3C, 0xE0, 0x64, 0x02, 0x60, 0x07, 0x90, 0x06,
+ 0x90, 0xE0, 0x44, 0x01, 0xF0, 0x22, 0x90, 0x81,
+ 0x24, 0xE0, 0xFF, 0xC4, 0x13, 0x13, 0x13, 0x54,
+ 0x01, 0x30, 0xE0, 0x2C, 0xEF, 0x54, 0x7F, 0xF0,
+ 0x90, 0x04, 0xE0, 0xE0, 0x90, 0x81, 0x25, 0x30,
+ 0xE1, 0x06, 0xE0, 0x44, 0x02, 0xF0, 0x80, 0x0F,
+ 0xE0, 0x54, 0xFD, 0xF0, 0x90, 0x01, 0xB9, 0x74,
+ 0x01, 0xF0, 0x90, 0x01, 0xB8, 0x04, 0xF0, 0x90,
+ 0x81, 0x27, 0xE0, 0x60, 0x03, 0x12, 0x47, 0x2A,
+ 0x7F, 0x01, 0x01, 0x6E, 0xC3, 0xEE, 0x94, 0x01,
+ 0x40, 0x0A, 0x0D, 0xED, 0x13, 0x90, 0xFD, 0x10,
+ 0xF0, 0xE4, 0x2F, 0xFF, 0x22, 0xC3, 0xEE, 0x94,
+ 0x01, 0x40, 0x24, 0x90, 0xFD, 0x11, 0xE0, 0x6D,
+ 0x70, 0x1A, 0x90, 0x01, 0x17, 0xE0, 0xB5, 0x05,
+ 0x0D, 0x90, 0x01, 0xE4, 0x74, 0x77, 0xF0, 0x90,
+ 0xFD, 0x11, 0xE4, 0xF0, 0x80, 0x06, 0xED, 0x04,
+ 0x90, 0xFD, 0x11, 0xF0, 0xE4, 0x2F, 0xFF, 0x22,
+ 0xE4, 0x90, 0x81, 0x4E, 0xF0, 0xA3, 0xF0, 0xA3,
+ 0xF0, 0x90, 0x00, 0x83, 0xE0, 0x90, 0x81, 0x4E,
+ 0xF0, 0x90, 0x00, 0x83, 0xE0, 0xFE, 0x90, 0x81,
+ 0x4E, 0xE0, 0xFF, 0xB5, 0x06, 0x01, 0x22, 0xC3,
+ 0x90, 0x81, 0x50, 0xE0, 0x94, 0x64, 0x90, 0x81,
+ 0x4F, 0xE0, 0x94, 0x00, 0x40, 0x0D, 0x90, 0x01,
+ 0xC0, 0xE0, 0x44, 0x40, 0xF0, 0x90, 0x81, 0x4E,
+ 0xE0, 0xFF, 0x22, 0x90, 0x81, 0x4F, 0xE4, 0x75,
+ 0xF0, 0x01, 0x12, 0x44, 0xA9, 0x80, 0xC2, 0x74,
+ 0x45, 0x2F, 0xF8, 0xE6, 0xFE, 0xED, 0xF4, 0x5E,
+ 0xFE, 0xF6, 0x74, 0x38, 0x2F, 0xF5, 0x82, 0xE4,
+ 0x34, 0x01, 0xF5, 0x83, 0xEE, 0xF0, 0x22, 0xD3,
+ 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x82,
+ 0x12, 0xED, 0xF0, 0x90, 0x82, 0x11, 0xEF, 0xF0,
+ 0xD3, 0x94, 0x07, 0x50, 0x70, 0xE0, 0xFF, 0x74,
+ 0x01, 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33,
+ 0xD8, 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x47, 0xE0,
+ 0x5F, 0xFD, 0x7F, 0x47, 0x12, 0x32, 0x1E, 0x90,
+ 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01, 0xA8, 0x07,
+ 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8, 0xFC, 0xFF,
+ 0x90, 0x00, 0x46, 0xE0, 0x4F, 0xFD, 0x7F, 0x46,
+ 0x12, 0x32, 0x1E, 0x90, 0x82, 0x12, 0xE0, 0x60,
+ 0x18, 0x90, 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01,
+ 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
+ 0xFC, 0xFF, 0x90, 0x00, 0x45, 0xE0, 0x4F, 0x80,
+ 0x17, 0x90, 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01,
+ 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
+ 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x45, 0xE0, 0x5F,
+ 0xFD, 0x7F, 0x45, 0x80, 0x7E, 0x90, 0x82, 0x11,
+ 0xE0, 0x24, 0xF8, 0xF0, 0xE0, 0x24, 0x04, 0xFF,
+ 0x74, 0x01, 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3,
+ 0x33, 0xD8, 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x43,
+ 0xE0, 0x5F, 0xFD, 0x7F, 0x43, 0x12, 0x32, 0x1E,
+ 0x90, 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01, 0xA8,
+ 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8, 0xFC,
+ 0xFF, 0x90, 0x00, 0x43, 0xE0, 0x4F, 0xFD, 0x7F,
+ 0x43, 0x12, 0x32, 0x1E, 0x90, 0x82, 0x12, 0xE0,
+ 0x60, 0x1D, 0x90, 0x82, 0x11, 0xE0, 0x24, 0x04,
+ 0xFF, 0x74, 0x01, 0xA8, 0x07, 0x08, 0x80, 0x02,
+ 0xC3, 0x33, 0xD8, 0xFC, 0xFF, 0x90, 0x00, 0x42,
+ 0xE0, 0x4F, 0xFD, 0x7F, 0x42, 0x80, 0x1C, 0x90,
+ 0x82, 0x11, 0xE0, 0x24, 0x04, 0xFF, 0x74, 0x01,
+ 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
+ 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x42, 0xE0, 0x5F,
+ 0xFD, 0x7F, 0x42, 0x12, 0x32, 0x1E, 0xD0, 0xD0,
+ 0x92, 0xAF, 0x22, 0x90, 0x81, 0x24, 0xE0, 0x54,
+ 0xFB, 0xF0, 0xE4, 0x90, 0x81, 0x30, 0xF0, 0x90,
+ 0x81, 0x2B, 0xF0, 0x22, 0xEF, 0x24, 0xFE, 0x60,
+ 0x0C, 0x04, 0x70, 0x28, 0x90, 0x81, 0x2D, 0x74,
+ 0x01, 0xF0, 0xA3, 0xF0, 0x22, 0xED, 0x70, 0x0A,
+ 0x90, 0x81, 0x3B, 0xE0, 0x90, 0x81, 0x2D, 0xF0,
+ 0x80, 0x05, 0x90, 0x81, 0x2D, 0xED, 0xF0, 0x90,
+ 0x81, 0x2D, 0xE0, 0xA3, 0xF0, 0x90, 0x81, 0x25,
+ 0xE0, 0x44, 0x08, 0xF0, 0x22, 0x12, 0x4E, 0xAB,
+ 0xEF, 0x64, 0x01, 0x60, 0x08, 0x90, 0x01, 0xB8,
+ 0x74, 0x01, 0xF0, 0x80, 0x67, 0x90, 0x81, 0x2B,
+ 0xE0, 0xFF, 0x54, 0x03, 0x60, 0x08, 0x90, 0x01,
+ 0xB8, 0x74, 0x02, 0xF0, 0x80, 0x56, 0x90, 0x81,
+ 0x29, 0xE0, 0xFE, 0xE4, 0xC3, 0x9E, 0x50, 0x08,
+ 0x90, 0x01, 0xB8, 0x74, 0x04, 0xF0, 0x80, 0x44,
+ 0xEF, 0x30, 0xE2, 0x08, 0x90, 0x01, 0xB8, 0x74,
+ 0x08, 0xF0, 0x80, 0x38, 0x90, 0x81, 0x2B, 0xE0,
+ 0x30, 0xE4, 0x08, 0x90, 0x01, 0xB8, 0x74, 0x10,
+ 0xF0, 0x80, 0x29, 0x90, 0x81, 0x25, 0xE0, 0x13,
+ 0x13, 0x54, 0x3F, 0x20, 0xE0, 0x08, 0x90, 0x01,
+ 0xB8, 0x74, 0x20, 0xF0, 0x80, 0x16, 0x90, 0x81,
+ 0x3E, 0xE0, 0x60, 0x08, 0x90, 0x01, 0xB8, 0x74,
+ 0x80, 0xF0, 0x80, 0x08, 0x90, 0x01, 0xB8, 0xE4,
+ 0xF0, 0x7F, 0x01, 0x22, 0x90, 0x01, 0xB9, 0x74,
+ 0x04, 0xF0, 0x7F, 0x00, 0x22, 0xEF, 0x60, 0x42,
+ 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01, 0x70, 0x3A,
+ 0x90, 0x81, 0x25, 0xE0, 0x54, 0xFE, 0xF0, 0x90,
+ 0x05, 0x22, 0x74, 0x0F, 0xF0, 0x90, 0x06, 0x04,
+ 0xE0, 0x54, 0xBF, 0xF0, 0xE4, 0xFF, 0x12, 0x4F,
+ 0x0D, 0xBF, 0x01, 0x12, 0x90, 0x81, 0x24, 0xE0,
+ 0x44, 0x40, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x06,
+ 0xF0, 0x90, 0x81, 0x23, 0xF0, 0x22, 0x90, 0x01,
+ 0xB9, 0x74, 0x01, 0xF0, 0x90, 0x01, 0xB8, 0x74,
+ 0x08, 0xF0, 0x22, 0x90, 0x05, 0x22, 0x74, 0x6F,
+ 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x54, 0xBF, 0xF0,
+ 0x90, 0x81, 0x2A, 0x74, 0x02, 0xF0, 0x90, 0x81,
+ 0x23, 0xF0, 0x22, 0x12, 0x54, 0x65, 0x90, 0x81,
+ 0x2A, 0x74, 0x0C, 0xF0, 0x90, 0x81, 0x23, 0xF0,
+ 0x22, 0x90, 0x81, 0x24, 0xE0, 0xFF, 0x13, 0x13,
+ 0x54, 0x3F, 0x30, 0xE0, 0x11, 0xEF, 0x54, 0xFB,
+ 0xF0, 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFD, 0xF0,
+ 0x54, 0x07, 0x70, 0x42, 0x80, 0x3D, 0x90, 0x81,
+ 0x30, 0xE0, 0x04, 0xF0, 0x90, 0x81, 0x2B, 0xE0,
+ 0x54, 0xEF, 0xF0, 0x90, 0x81, 0x30, 0xE0, 0xFF,
+ 0xB4, 0x01, 0x02, 0x80, 0x04, 0xEF, 0xB4, 0x02,
+ 0x06, 0x90, 0x05, 0x58, 0xE0, 0x04, 0xF0, 0x90,
+ 0x81, 0x38, 0xE0, 0xFF, 0x90, 0x81, 0x30, 0xE0,
+ 0xD3, 0x9F, 0x40, 0x0F, 0x90, 0x80, 0xDE, 0xE0,
+ 0xB4, 0x01, 0x0B, 0x90, 0x81, 0x25, 0xE0, 0x54,
+ 0xFB, 0xF0, 0x22, 0x12, 0x47, 0x2A, 0x22, 0x22,
+ 0x90, 0x05, 0x2B, 0xE0, 0x7F, 0x00, 0x30, 0xE7,
+ 0x02, 0x7F, 0x01, 0x22, 0x90, 0x05, 0x22, 0x74,
+ 0xFF, 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x44, 0x40,
+ 0xF0, 0x90, 0x81, 0x22, 0x74, 0x03, 0xF0, 0x22,
+ 0x90, 0x05, 0x27, 0xE0, 0x44, 0x40, 0xF0, 0x12,
+ 0x49, 0xDD, 0x90, 0x81, 0x22, 0x74, 0x02, 0xF0,
+ 0x22, 0x12, 0x49, 0xE3, 0x90, 0x81, 0x22, 0x74,
+ 0x02, 0xF0, 0x22, 0x90, 0x05, 0x22, 0x74, 0x6F,
+ 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x54, 0xBF, 0xF0,
+ 0x90, 0x81, 0x22, 0x74, 0x04, 0xF0, 0x22, 0xAE,
+ 0x07, 0x12, 0x51, 0x73, 0xBF, 0x01, 0x12, 0x90,
+ 0x81, 0x23, 0xE0, 0x64, 0x02, 0x60, 0x0A, 0xAF,
+ 0x06, 0x7D, 0x01, 0x12, 0x47, 0x3D, 0x7F, 0x01,
+ 0x22, 0x7F, 0x00, 0x22, 0x90, 0x01, 0x57, 0xE0,
+ 0x60, 0x48, 0xE4, 0xF0, 0x90, 0x01, 0x3C, 0x74,
+ 0x02, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0xFF, 0x13,
+ 0x13, 0x54, 0x3F, 0x30, 0xE0, 0x0C, 0xEF, 0x54,
+ 0xFB, 0xF0, 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFD,
+ 0xF0, 0x22, 0x90, 0x81, 0x30, 0xE0, 0x04, 0xF0,
+ 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xEF, 0xF0, 0x90,
+ 0x81, 0x38, 0xE0, 0xFF, 0x90, 0x81, 0x30, 0xE0,
+ 0xD3, 0x9F, 0x40, 0x0E, 0x90, 0x80, 0xDE, 0xE0,
+ 0xB4, 0x01, 0x07, 0x90, 0x81, 0x25, 0xE0, 0x54,
+ 0xFB, 0xF0, 0x22, 0x90, 0x80, 0x3F, 0xE0, 0xFF,
+ 0x7D, 0x01, 0x12, 0x6D, 0x69, 0x8E, 0x54, 0x8F,
+ 0x55, 0xAD, 0x55, 0xAC, 0x54, 0xAF, 0x53, 0x12,
+ 0x4F, 0x82, 0xAF, 0x55, 0xAE, 0x54, 0x90, 0x04,
+ 0x80, 0xE0, 0x54, 0x0F, 0xFD, 0xAC, 0x07, 0x74,
+ 0x11, 0x2C, 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5,
+ 0x83, 0xE0, 0x44, 0x01, 0xF0, 0x74, 0x11, 0x2C,
+ 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0,
+ 0x54, 0xFB, 0xF0, 0xAC, 0x07, 0x74, 0x16, 0x2C,
+ 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0,
+ 0x44, 0xFA, 0xF0, 0x74, 0x15, 0x2C, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x1F,
+ 0xF0, 0xAC, 0x07, 0x74, 0x06, 0x2C, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x0F,
+ 0xF0, 0x90, 0x04, 0x53, 0xE4, 0xF0, 0x90, 0x04,
+ 0x52, 0xF0, 0x90, 0x04, 0x51, 0x74, 0xFF, 0xF0,
+ 0x90, 0x04, 0x50, 0x74, 0xFD, 0xF0, 0x74, 0x14,
+ 0x2C, 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83,
+ 0xE0, 0x54, 0xC0, 0x4D, 0xFD, 0x74, 0x14, 0x2F,
+ 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xED,
+ 0xF0, 0x22, 0xAB, 0x07, 0xAA, 0x06, 0xED, 0x2B,
+ 0xFB, 0xE4, 0x3A, 0xFA, 0xC3, 0x90, 0x80, 0xDB,
+ 0xE0, 0x9B, 0x90, 0x80, 0xDA, 0xE0, 0x9A, 0x50,
+ 0x13, 0xA3, 0xE0, 0x24, 0x01, 0xFF, 0x90, 0x80,
+ 0xDA, 0xE0, 0x34, 0x00, 0xFE, 0xC3, 0xEB, 0x9F,
+ 0xFB, 0xEA, 0x9E, 0xFA, 0xEA, 0x90, 0xFD, 0x11,
+ 0xF0, 0xAF, 0x03, 0x74, 0x00, 0x2F, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFB, 0xF5, 0x83, 0xE0, 0xFF, 0x22,
+ 0x12, 0x1F, 0xA4, 0xFF, 0x54, 0x01, 0xFE, 0x90,
+ 0x81, 0x42, 0xE0, 0x54, 0xFE, 0x4E, 0xF0, 0xEF,
+ 0xC3, 0x13, 0x30, 0xE0, 0x0A, 0x90, 0x00, 0x01,
+ 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x43, 0xF0, 0x22,
+ 0x90, 0x81, 0x45, 0xE0, 0x30, 0xE0, 0x2D, 0x90,
+ 0x81, 0x48, 0xE0, 0x04, 0xF0, 0xE0, 0xFF, 0x90,
+ 0x81, 0x46, 0xE0, 0xB5, 0x07, 0x1E, 0x90, 0x06,
+ 0x92, 0xE0, 0x54, 0x1C, 0x70, 0x0B, 0x12, 0x4F,
+ 0x0B, 0x90, 0x81, 0x49, 0xE0, 0x04, 0xF0, 0x80,
+ 0x06, 0x90, 0x06, 0x92, 0x74, 0x1C, 0xF0, 0xE4,
+ 0x90, 0x81, 0x48, 0xF0, 0x22, 0x00, 0xBB, 0x8E,
+};
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188EPwrSeq.c b/drivers/staging/rtl8188eu/hal/Hal8188EPwrSeq.c
new file mode 100644
index 0000000..fc23bf1
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/Hal8188EPwrSeq.c
@@ -0,0 +1,86 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "Hal8188EPwrSeq.h"
+#include <rtl8188e_hal.h>
+
+/*
+ drivers should parse below arrays and do the corresponding actions
+*/
+/* 3 Power on Array */
+struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_CARDEMU_TO_ACT
+ RTL8188E_TRANS_END
+};
+
+/* 3Radio off Array */
+struct wl_pwr_cfg rtl8188E_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_ACT_TO_CARDEMU
+ RTL8188E_TRANS_END
+};
+
+/* 3Card Disable Array */
+struct wl_pwr_cfg rtl8188E_card_disable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_ACT_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_CARDDIS
+ RTL8188E_TRANS_END
+};
+
+/* 3 Card Enable Array */
+struct wl_pwr_cfg rtl8188E_card_enable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_CARDDIS_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_ACT
+ RTL8188E_TRANS_END
+};
+
+/* 3Suspend Array */
+struct wl_pwr_cfg rtl8188E_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_ACT_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_SUS
+ RTL8188E_TRANS_END
+};
+
+/* 3 Resume Array */
+struct wl_pwr_cfg rtl8188E_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_SUS_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_ACT
+ RTL8188E_TRANS_END
+};
+
+/* 3HWPDN Array */
+struct wl_pwr_cfg rtl8188E_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_ACT_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_PDN
+ RTL8188E_TRANS_END
+};
+
+/* 3 Enter LPS */
+struct wl_pwr_cfg rtl8188E_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ /* FW behavior */
+ RTL8188E_TRANS_ACT_TO_LPS
+ RTL8188E_TRANS_END
+};
+
+/* 3 Leave LPS */
+struct wl_pwr_cfg rtl8188E_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ /* FW behavior */
+ RTL8188E_TRANS_LPS_TO_ACT
+ RTL8188E_TRANS_END
+};
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
new file mode 100644
index 0000000..aaa2617
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -0,0 +1,760 @@
+/*++
+Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+
+Module Name:
+ RateAdaptive.c
+
+Abstract:
+ Implement Rate Adaptive functions for common operations.
+
+Major Change History:
+ When Who What
+ ---------- --------------- -------------------------------
+ 2011-08-12 Page Create.
+
+--*/
+#include "odm_precomp.h"
+
+/* Rate adaptive parameters */
+
+static u8 RETRY_PENALTY[PERENTRY][RETRYSIZE+1] = {
+ {5, 4, 3, 2, 0, 3}, /* 92 , idx = 0 */
+ {6, 5, 4, 3, 0, 4}, /* 86 , idx = 1 */
+ {6, 5, 4, 2, 0, 4}, /* 81 , idx = 2 */
+ {8, 7, 6, 4, 0, 6}, /* 75 , idx = 3 */
+ {10, 9, 8, 6, 0, 8}, /* 71 , idx = 4 */
+ {10, 9, 8, 4, 0, 8}, /* 66 , idx = 5 */
+ {10, 9, 8, 2, 0, 8}, /* 62 , idx = 6 */
+ {10, 9, 8, 0, 0, 8}, /* 59 , idx = 7 */
+ {18, 17, 16, 8, 0, 16}, /* 53 , idx = 8 */
+ {26, 25, 24, 16, 0, 24}, /* 50 , idx = 9 */
+ {34, 33, 32, 24, 0, 32}, /* 47 , idx = 0x0a */
+ {34, 31, 28, 20, 0, 32}, /* 43 , idx = 0x0b */
+ {34, 31, 27, 18, 0, 32}, /* 40 , idx = 0x0c */
+ {34, 31, 26, 16, 0, 32}, /* 37 , idx = 0x0d */
+ {34, 30, 22, 16, 0, 32}, /* 32 , idx = 0x0e */
+ {34, 30, 24, 16, 0, 32}, /* 26 , idx = 0x0f */
+ {49, 46, 40, 16, 0, 48}, /* 20 , idx = 0x10 */
+ {49, 45, 32, 0, 0, 48}, /* 17 , idx = 0x11 */
+ {49, 45, 22, 18, 0, 48}, /* 15 , idx = 0x12 */
+ {49, 40, 24, 16, 0, 48}, /* 12 , idx = 0x13 */
+ {49, 32, 18, 12, 0, 48}, /* 9 , idx = 0x14 */
+ {49, 22, 18, 14, 0, 48}, /* 6 , idx = 0x15 */
+ {49, 16, 16, 0, 0, 48}
+ }; /* 3, idx = 0x16 */
+
+static u8 PT_PENALTY[RETRYSIZE+1] = {34, 31, 30, 24, 0, 32};
+
+/* wilson modify */
+static u8 RETRY_PENALTY_IDX[2][RATESIZE] = {
+ {4, 4, 4, 5, 4, 4, 5, 7, 7, 7, 8, 0x0a, /* SS>TH */
+ 4, 4, 4, 4, 6, 0x0a, 0x0b, 0x0d,
+ 5, 5, 7, 7, 8, 0x0b, 0x0d, 0x0f}, /* 0329 R01 */
+ {0x0a, 0x0a, 0x0b, 0x0c, 0x0a,
+ 0x0a, 0x0b, 0x0c, 0x0d, 0x10, 0x13, 0x14, /* SS<TH */
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x11, 0x13, 0x15,
+ 9, 9, 9, 9, 0x0c, 0x0e, 0x11, 0x13}
+ };
+
+static u8 RETRY_PENALTY_UP_IDX[RATESIZE] = {
+ 0x0c, 0x0d, 0x0d, 0x0f, 0x0d, 0x0e,
+ 0x0f, 0x0f, 0x10, 0x12, 0x13, 0x14, /* SS>TH */
+ 0x0f, 0x10, 0x10, 0x12, 0x12, 0x13, 0x14, 0x15,
+ 0x11, 0x11, 0x12, 0x13, 0x13, 0x13, 0x14, 0x15};
+
+static u8 RSSI_THRESHOLD[RATESIZE] = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0x24, 0x26, 0x2a,
+ 0x18, 0x1a, 0x1d, 0x1f, 0x21, 0x27, 0x29, 0x2a,
+ 0, 0, 0, 0x1f, 0x23, 0x28, 0x2a, 0x2c};
+
+static u16 N_THRESHOLD_HIGH[RATESIZE] = {
+ 4, 4, 8, 16,
+ 24, 36, 48, 72, 96, 144, 192, 216,
+ 60, 80, 100, 160, 240, 400, 560, 640,
+ 300, 320, 480, 720, 1000, 1200, 1600, 2000};
+static u16 N_THRESHOLD_LOW[RATESIZE] = {
+ 2, 2, 4, 8,
+ 12, 18, 24, 36, 48, 72, 96, 108,
+ 30, 40, 50, 80, 120, 200, 280, 320,
+ 150, 160, 240, 360, 500, 600, 800, 1000};
+
+static u8 DROPING_NECESSARY[RATESIZE] = {
+ 1, 1, 1, 1,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 5, 6, 7, 8, 9, 10, 11, 12};
+
+static u8 PendingForRateUpFail[5] = {2, 10, 24, 40, 60};
+static u16 DynamicTxRPTTiming[6] = {
+ 0x186a, 0x30d4, 0x493e, 0x61a8, 0x7a12 , 0x927c}; /* 200ms-1200ms */
+
+/* End Rate adaptive parameters */
+
+static void odm_SetTxRPTTiming_8188E(
+ struct odm_dm_struct *dm_odm,
+ struct odm_ra_info *pRaInfo,
+ u8 extend
+ )
+{
+ u8 idx = 0;
+
+ for (idx = 0; idx < 5; idx++)
+ if (DynamicTxRPTTiming[idx] == pRaInfo->RptTime)
+ break;
+
+ if (extend == 0) { /* back to default timing */
+ idx = 0; /* 200ms */
+ } else if (extend == 1) {/* increase the timing */
+ idx += 1;
+ if (idx > 5)
+ idx = 5;
+ } else if (extend == 2) {/* decrease the timing */
+ if (idx != 0)
+ idx -= 1;
+ }
+ pRaInfo->RptTime = DynamicTxRPTTiming[idx];
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("pRaInfo->RptTime = 0x%x\n", pRaInfo->RptTime));
+}
+
+static int odm_RateDown_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_info *pRaInfo)
+{
+ u8 RateID, LowestRate, HighestRate;
+ u8 i;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("=====>odm_RateDown_8188E()\n"));
+ if (NULL == pRaInfo) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("odm_RateDown_8188E(): pRaInfo is NULL\n"));
+ return -1;
+ }
+ RateID = pRaInfo->PreRate;
+ LowestRate = pRaInfo->LowestRate;
+ HighestRate = pRaInfo->HighestRate;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" RateID =%d LowestRate =%d HighestRate =%d RateSGI =%d\n",
+ RateID, LowestRate, HighestRate, pRaInfo->RateSGI));
+ if (RateID > HighestRate) {
+ RateID = HighestRate;
+ } else if (pRaInfo->RateSGI) {
+ pRaInfo->RateSGI = 0;
+ } else if (RateID > LowestRate) {
+ if (RateID > 0) {
+ for (i = RateID-1; i > LowestRate; i--) {
+ if (pRaInfo->RAUseRate & BIT(i)) {
+ RateID = i;
+ goto RateDownFinish;
+ }
+ }
+ }
+ } else if (RateID <= LowestRate) {
+ RateID = LowestRate;
+ }
+RateDownFinish:
+ if (pRaInfo->RAWaitingCounter == 1) {
+ pRaInfo->RAWaitingCounter += 1;
+ pRaInfo->RAPendingCounter += 1;
+ } else if (pRaInfo->RAWaitingCounter == 0) {
+ ;
+ } else {
+ pRaInfo->RAWaitingCounter = 0;
+ pRaInfo->RAPendingCounter = 0;
+ }
+
+ if (pRaInfo->RAPendingCounter >= 4)
+ pRaInfo->RAPendingCounter = 4;
+
+ pRaInfo->DecisionRate = RateID;
+ odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 2);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("Rate down, RPT Timing default\n"));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("RAWaitingCounter %d, RAPendingCounter %d", pRaInfo->RAWaitingCounter, pRaInfo->RAPendingCounter));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("Rate down to RateID %d RateSGI %d\n", RateID, pRaInfo->RateSGI));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("<===== odm_RateDown_8188E()\n"));
+ return 0;
+}
+
+static int odm_RateUp_8188E(
+ struct odm_dm_struct *dm_odm,
+ struct odm_ra_info *pRaInfo
+ )
+{
+ u8 RateID, HighestRate;
+ u8 i;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("=====>odm_RateUp_8188E()\n"));
+ if (NULL == pRaInfo) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("odm_RateUp_8188E(): pRaInfo is NULL\n"));
+ return -1;
+ }
+ RateID = pRaInfo->PreRate;
+ HighestRate = pRaInfo->HighestRate;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" RateID =%d HighestRate =%d\n",
+ RateID, HighestRate));
+ if (pRaInfo->RAWaitingCounter == 1) {
+ pRaInfo->RAWaitingCounter = 0;
+ pRaInfo->RAPendingCounter = 0;
+ } else if (pRaInfo->RAWaitingCounter > 1) {
+ pRaInfo->PreRssiStaRA = pRaInfo->RssiStaRA;
+ goto RateUpfinish;
+ }
+ odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 0);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("odm_RateUp_8188E():Decrease RPT Timing\n"));
+
+ if (RateID < HighestRate) {
+ for (i = RateID+1; i <= HighestRate; i++) {
+ if (pRaInfo->RAUseRate & BIT(i)) {
+ RateID = i;
+ goto RateUpfinish;
+ }
+ }
+ } else if (RateID == HighestRate) {
+ if (pRaInfo->SGIEnable && (pRaInfo->RateSGI != 1))
+ pRaInfo->RateSGI = 1;
+ else if ((pRaInfo->SGIEnable) != 1)
+ pRaInfo->RateSGI = 0;
+ } else {
+ RateID = HighestRate;
+ }
+RateUpfinish:
+ if (pRaInfo->RAWaitingCounter == (4+PendingForRateUpFail[pRaInfo->RAPendingCounter]))
+ pRaInfo->RAWaitingCounter = 0;
+ else
+ pRaInfo->RAWaitingCounter++;
+
+ pRaInfo->DecisionRate = RateID;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("Rate up to RateID %d\n", RateID));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("RAWaitingCounter %d, RAPendingCounter %d", pRaInfo->RAWaitingCounter, pRaInfo->RAPendingCounter));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("<===== odm_RateUp_8188E()\n"));
+ return 0;
+}
+
+static void odm_ResetRaCounter_8188E(struct odm_ra_info *pRaInfo)
+{
+ u8 RateID;
+
+ RateID = pRaInfo->DecisionRate;
+ pRaInfo->NscUp = (N_THRESHOLD_HIGH[RateID]+N_THRESHOLD_LOW[RateID])>>1;
+ pRaInfo->NscDown = (N_THRESHOLD_HIGH[RateID]+N_THRESHOLD_LOW[RateID])>>1;
+}
+
+static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
+ struct odm_ra_info *pRaInfo
+ )
+{
+ u8 RateID = 0, RtyPtID = 0, PenaltyID1 = 0, PenaltyID2 = 0;
+ /* u32 pool_retry; */
+ static u8 DynamicTxRPTTimingCounter;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("=====>odm_RateDecision_8188E()\n"));
+
+ if (pRaInfo->Active && (pRaInfo->TOTAL > 0)) { /* STA used and data packet exits */
+ if ((pRaInfo->RssiStaRA < (pRaInfo->PreRssiStaRA - 3)) ||
+ (pRaInfo->RssiStaRA > (pRaInfo->PreRssiStaRA + 3))) {
+ pRaInfo->RAWaitingCounter = 0;
+ pRaInfo->RAPendingCounter = 0;
+ }
+ /* Start RA decision */
+ if (pRaInfo->PreRate > pRaInfo->HighestRate)
+ RateID = pRaInfo->HighestRate;
+ else
+ RateID = pRaInfo->PreRate;
+ if (pRaInfo->RssiStaRA > RSSI_THRESHOLD[RateID])
+ RtyPtID = 0;
+ else
+ RtyPtID = 1;
+ PenaltyID1 = RETRY_PENALTY_IDX[RtyPtID][RateID]; /* TODO by page */
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" NscDown init is %d\n", pRaInfo->NscDown));
+ pRaInfo->NscDown += pRaInfo->RTY[0] * RETRY_PENALTY[PenaltyID1][0];
+ pRaInfo->NscDown += pRaInfo->RTY[1] * RETRY_PENALTY[PenaltyID1][1];
+ pRaInfo->NscDown += pRaInfo->RTY[2] * RETRY_PENALTY[PenaltyID1][2];
+ pRaInfo->NscDown += pRaInfo->RTY[3] * RETRY_PENALTY[PenaltyID1][3];
+ pRaInfo->NscDown += pRaInfo->RTY[4] * RETRY_PENALTY[PenaltyID1][4];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" NscDown is %d, total*penalty[5] is %d\n",
+ pRaInfo->NscDown, (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5])));
+ if (pRaInfo->NscDown > (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5]))
+ pRaInfo->NscDown -= pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5];
+ else
+ pRaInfo->NscDown = 0;
+
+ /* rate up */
+ PenaltyID2 = RETRY_PENALTY_UP_IDX[RateID];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" NscUp init is %d\n", pRaInfo->NscUp));
+ pRaInfo->NscUp += pRaInfo->RTY[0] * RETRY_PENALTY[PenaltyID2][0];
+ pRaInfo->NscUp += pRaInfo->RTY[1] * RETRY_PENALTY[PenaltyID2][1];
+ pRaInfo->NscUp += pRaInfo->RTY[2] * RETRY_PENALTY[PenaltyID2][2];
+ pRaInfo->NscUp += pRaInfo->RTY[3] * RETRY_PENALTY[PenaltyID2][3];
+ pRaInfo->NscUp += pRaInfo->RTY[4] * RETRY_PENALTY[PenaltyID2][4];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ ("NscUp is %d, total*up[5] is %d\n",
+ pRaInfo->NscUp, (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5])));
+ if (pRaInfo->NscUp > (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5]))
+ pRaInfo->NscUp -= pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5];
+ else
+ pRaInfo->NscUp = 0;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE|ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" RssiStaRa = %d RtyPtID =%d PenaltyID1 = 0x%x PenaltyID2 = 0x%x RateID =%d NscDown =%d NscUp =%d SGI =%d\n",
+ pRaInfo->RssiStaRA, RtyPtID, PenaltyID1, PenaltyID2, RateID, pRaInfo->NscDown, pRaInfo->NscUp, pRaInfo->RateSGI));
+ if ((pRaInfo->NscDown < N_THRESHOLD_LOW[RateID]) ||
+ (pRaInfo->DROP > DROPING_NECESSARY[RateID]))
+ odm_RateDown_8188E(dm_odm, pRaInfo);
+ else if (pRaInfo->NscUp > N_THRESHOLD_HIGH[RateID])
+ odm_RateUp_8188E(dm_odm, pRaInfo);
+
+ if (pRaInfo->DecisionRate > pRaInfo->HighestRate)
+ pRaInfo->DecisionRate = pRaInfo->HighestRate;
+
+ if ((pRaInfo->DecisionRate) == (pRaInfo->PreRate))
+ DynamicTxRPTTimingCounter += 1;
+ else
+ DynamicTxRPTTimingCounter = 0;
+
+ if (DynamicTxRPTTimingCounter >= 4) {
+ odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 1);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE,
+ ODM_DBG_LOUD, ("<===== Rate don't change 4 times, Extend RPT Timing\n"));
+ DynamicTxRPTTimingCounter = 0;
+ }
+
+ pRaInfo->PreRate = pRaInfo->DecisionRate; /* YJ, add, 120120 */
+
+ odm_ResetRaCounter_8188E(pRaInfo);
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("<===== odm_RateDecision_8188E()\n"));
+}
+
+static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_info *pRaInfo)
+{ /* Wilson 2011/10/26 */
+ u32 MaskFromReg;
+ s8 i;
+
+ switch (pRaInfo->RateID) {
+ case RATR_INX_WIRELESS_NGB:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0f8ff015;
+ break;
+ case RATR_INX_WIRELESS_NG:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0f8ff010;
+ break;
+ case RATR_INX_WIRELESS_NB:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0f8ff005;
+ break;
+ case RATR_INX_WIRELESS_N:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0f8ff000;
+ break;
+ case RATR_INX_WIRELESS_GB:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x00000ff5;
+ break;
+ case RATR_INX_WIRELESS_G:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x00000ff0;
+ break;
+ case RATR_INX_WIRELESS_B:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0000000d;
+ break;
+ case 12:
+ MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR0);
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
+ break;
+ case 13:
+ MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR1);
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
+ break;
+ case 14:
+ MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR2);
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
+ break;
+ case 15:
+ MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR3);
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
+ break;
+ default:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask);
+ break;
+ }
+ /* Highest rate */
+ if (pRaInfo->RAUseRate) {
+ for (i = RATESIZE; i >= 0; i--) {
+ if ((pRaInfo->RAUseRate)&BIT(i)) {
+ pRaInfo->HighestRate = i;
+ break;
+ }
+ }
+ } else {
+ pRaInfo->HighestRate = 0;
+ }
+ /* Lowest rate */
+ if (pRaInfo->RAUseRate) {
+ for (i = 0; i < RATESIZE; i++) {
+ if ((pRaInfo->RAUseRate) & BIT(i)) {
+ pRaInfo->LowestRate = i;
+ break;
+ }
+ }
+ } else {
+ pRaInfo->LowestRate = 0;
+ }
+ if (pRaInfo->HighestRate > 0x13)
+ pRaInfo->PTModeSS = 3;
+ else if (pRaInfo->HighestRate > 0x0b)
+ pRaInfo->PTModeSS = 2;
+ else if (pRaInfo->HighestRate > 0x0b)
+ pRaInfo->PTModeSS = 1;
+ else
+ pRaInfo->PTModeSS = 0;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("ODM_ARFBRefresh_8188E(): PTModeSS =%d\n", pRaInfo->PTModeSS));
+
+ if (pRaInfo->DecisionRate > pRaInfo->HighestRate)
+ pRaInfo->DecisionRate = pRaInfo->HighestRate;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("ODM_ARFBRefresh_8188E(): RateID =%d RateMask =%8.8x RAUseRate =%8.8x HighestRate =%d, DecisionRate =%d\n",
+ pRaInfo->RateID, pRaInfo->RateMask, pRaInfo->RAUseRate, pRaInfo->HighestRate, pRaInfo->DecisionRate));
+ return 0;
+}
+
+static void odm_PTTryState_8188E(struct odm_ra_info *pRaInfo)
+{
+ pRaInfo->PTTryState = 0;
+ switch (pRaInfo->PTModeSS) {
+ case 3:
+ if (pRaInfo->DecisionRate >= 0x19)
+ pRaInfo->PTTryState = 1;
+ break;
+ case 2:
+ if (pRaInfo->DecisionRate >= 0x11)
+ pRaInfo->PTTryState = 1;
+ break;
+ case 1:
+ if (pRaInfo->DecisionRate >= 0x0a)
+ pRaInfo->PTTryState = 1;
+ break;
+ case 0:
+ if (pRaInfo->DecisionRate >= 0x03)
+ pRaInfo->PTTryState = 1;
+ break;
+ default:
+ pRaInfo->PTTryState = 0;
+ break;
+ }
+
+ if (pRaInfo->RssiStaRA < 48) {
+ pRaInfo->PTStage = 0;
+ } else if (pRaInfo->PTTryState == 1) {
+ if ((pRaInfo->PTStopCount >= 10) ||
+ (pRaInfo->PTPreRssi > pRaInfo->RssiStaRA + 5) ||
+ (pRaInfo->PTPreRssi < pRaInfo->RssiStaRA - 5) ||
+ (pRaInfo->DecisionRate != pRaInfo->PTPreRate)) {
+ if (pRaInfo->PTStage == 0)
+ pRaInfo->PTStage = 1;
+ else if (pRaInfo->PTStage == 1)
+ pRaInfo->PTStage = 3;
+ else
+ pRaInfo->PTStage = 5;
+
+ pRaInfo->PTPreRssi = pRaInfo->RssiStaRA;
+ pRaInfo->PTStopCount = 0;
+ } else {
+ pRaInfo->RAstage = 0;
+ pRaInfo->PTStopCount++;
+ }
+ } else {
+ pRaInfo->PTStage = 0;
+ pRaInfo->RAstage = 0;
+ }
+ pRaInfo->PTPreRate = pRaInfo->DecisionRate;
+}
+
+static void odm_PTDecision_8188E(struct odm_ra_info *pRaInfo)
+{
+ u8 j;
+ u8 temp_stage;
+ u32 numsc;
+ u32 num_total;
+ u8 stage_id;
+
+ numsc = 0;
+ num_total = pRaInfo->TOTAL * PT_PENALTY[5];
+ for (j = 0; j <= 4; j++) {
+ numsc += pRaInfo->RTY[j] * PT_PENALTY[j];
+ if (numsc > num_total)
+ break;
+ }
+
+ j = j >> 1;
+ temp_stage = (pRaInfo->PTStage + 1) >> 1;
+ if (temp_stage > j)
+ stage_id = temp_stage-j;
+ else
+ stage_id = 0;
+
+ pRaInfo->PTSmoothFactor = (pRaInfo->PTSmoothFactor>>1) + (pRaInfo->PTSmoothFactor>>2) + stage_id*16+2;
+ if (pRaInfo->PTSmoothFactor > 192)
+ pRaInfo->PTSmoothFactor = 192;
+ stage_id = pRaInfo->PTSmoothFactor >> 6;
+ temp_stage = stage_id*2;
+ if (temp_stage != 0)
+ temp_stage -= 1;
+ if (pRaInfo->DROP > 3)
+ temp_stage = 0;
+ pRaInfo->PTStage = temp_stage;
+}
+
+static void
+odm_RATxRPTTimerSetting(
+ struct odm_dm_struct *dm_odm,
+ u16 minRptTime
+)
+{
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, (" =====>odm_RATxRPTTimerSetting()\n"));
+
+ if (dm_odm->CurrminRptTime != minRptTime) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ (" CurrminRptTime = 0x%04x minRptTime = 0x%04x\n", dm_odm->CurrminRptTime, minRptTime));
+ rtw_rpt_timer_cfg_cmd(dm_odm->Adapter, minRptTime);
+ dm_odm->CurrminRptTime = minRptTime;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, (" <===== odm_RATxRPTTimerSetting()\n"));
+}
+
+void
+ODM_RASupport_Init(
+ struct odm_dm_struct *dm_odm
+ )
+{
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("=====>ODM_RASupport_Init()\n"));
+
+ /* 2012/02/14 MH Be noticed, the init must be after IC type is recognized!!!!! */
+ if (dm_odm->SupportICType == ODM_RTL8188E)
+ dm_odm->RaSupport88E = true;
+}
+
+int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
+{
+ struct odm_ra_info *pRaInfo = &dm_odm->RAInfo[macid];
+ u8 WirelessMode = 0xFF; /* invalid value */
+ u8 max_rate_idx = 0x13; /* MCS7 */
+ if (dm_odm->pWirelessMode != NULL)
+ WirelessMode = *(dm_odm->pWirelessMode);
+
+ if (WirelessMode != 0xFF) {
+ if (WirelessMode & ODM_WM_N24G)
+ max_rate_idx = 0x13;
+ else if (WirelessMode & ODM_WM_G)
+ max_rate_idx = 0x0b;
+ else if (WirelessMode & ODM_WM_B)
+ max_rate_idx = 0x03;
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("ODM_RAInfo_Init(): WirelessMode:0x%08x , max_raid_idx:0x%02x\n",
+ WirelessMode, max_rate_idx));
+
+ pRaInfo->DecisionRate = max_rate_idx;
+ pRaInfo->PreRate = max_rate_idx;
+ pRaInfo->HighestRate = max_rate_idx;
+ pRaInfo->LowestRate = 0;
+ pRaInfo->RateID = 0;
+ pRaInfo->RateMask = 0xffffffff;
+ pRaInfo->RssiStaRA = 0;
+ pRaInfo->PreRssiStaRA = 0;
+ pRaInfo->SGIEnable = 0;
+ pRaInfo->RAUseRate = 0xffffffff;
+ pRaInfo->NscDown = (N_THRESHOLD_HIGH[0x13]+N_THRESHOLD_LOW[0x13])/2;
+ pRaInfo->NscUp = (N_THRESHOLD_HIGH[0x13]+N_THRESHOLD_LOW[0x13])/2;
+ pRaInfo->RateSGI = 0;
+ pRaInfo->Active = 1; /* Active is not used at present. by page, 110819 */
+ pRaInfo->RptTime = 0x927c;
+ pRaInfo->DROP = 0;
+ pRaInfo->RTY[0] = 0;
+ pRaInfo->RTY[1] = 0;
+ pRaInfo->RTY[2] = 0;
+ pRaInfo->RTY[3] = 0;
+ pRaInfo->RTY[4] = 0;
+ pRaInfo->TOTAL = 0;
+ pRaInfo->RAWaitingCounter = 0;
+ pRaInfo->RAPendingCounter = 0;
+ pRaInfo->PTActive = 1; /* Active when this STA is use */
+ pRaInfo->PTTryState = 0;
+ pRaInfo->PTStage = 5; /* Need to fill into HW_PWR_STATUS */
+ pRaInfo->PTSmoothFactor = 192;
+ pRaInfo->PTStopCount = 0;
+ pRaInfo->PTPreRate = 0;
+ pRaInfo->PTPreRssi = 0;
+ pRaInfo->PTModeSS = 0;
+ pRaInfo->RAstage = 0;
+ return 0;
+}
+
+int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm)
+{
+ u8 macid = 0;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("=====>\n"));
+ dm_odm->CurrminRptTime = 0;
+
+ for (macid = 0; macid < ODM_ASSOCIATE_ENTRY_NUM; macid++)
+ ODM_RAInfo_Init(dm_odm, macid);
+
+ return 0;
+}
+
+u8 ODM_RA_GetShortGI_8188E(struct odm_dm_struct *dm_odm, u8 macid)
+{
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return 0;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ ("macid =%d SGI =%d\n", macid, dm_odm->RAInfo[macid].RateSGI));
+ return dm_odm->RAInfo[macid].RateSGI;
+}
+
+u8 ODM_RA_GetDecisionRate_8188E(struct odm_dm_struct *dm_odm, u8 macid)
+{
+ u8 DecisionRate = 0;
+
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return 0;
+ DecisionRate = (dm_odm->RAInfo[macid].DecisionRate);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" macid =%d DecisionRate = 0x%x\n", macid, DecisionRate));
+ return DecisionRate;
+}
+
+u8 ODM_RA_GetHwPwrStatus_8188E(struct odm_dm_struct *dm_odm, u8 macid)
+{
+ u8 PTStage = 5;
+
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return 0;
+ PTStage = (dm_odm->RAInfo[macid].PTStage);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ ("macid =%d PTStage = 0x%x\n", macid, PTStage));
+ return PTStage;
+}
+
+void ODM_RA_UpdateRateInfo_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 RateID, u32 RateMask, u8 SGIEnable)
+{
+ struct odm_ra_info *pRaInfo = NULL;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("macid =%d RateID = 0x%x RateMask = 0x%x SGIEnable =%d\n",
+ macid, RateID, RateMask, SGIEnable));
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return;
+
+ pRaInfo = &(dm_odm->RAInfo[macid]);
+ pRaInfo->RateID = RateID;
+ pRaInfo->RateMask = RateMask;
+ pRaInfo->SGIEnable = SGIEnable;
+ odm_ARFBRefresh_8188E(dm_odm, pRaInfo);
+}
+
+void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
+{
+ struct odm_ra_info *pRaInfo = NULL;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" macid =%d Rssi =%d\n", macid, Rssi));
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return;
+
+ pRaInfo = &(dm_odm->RAInfo[macid]);
+ pRaInfo->RssiStaRA = Rssi;
+}
+
+void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime)
+{
+ ODM_Write2Byte(dm_odm, REG_TX_RPT_TIME, minRptTime);
+}
+
+void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16 TxRPT_Len, u32 macid_entry0, u32 macid_entry1)
+{
+ struct odm_ra_info *pRAInfo = NULL;
+ u8 MacId = 0;
+ u8 *pBuffer = NULL;
+ u32 valid = 0, ItemNum = 0;
+ u16 minRptTime = 0x927c;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("=====>ODM_RA_TxRPT2Handle_8188E(): valid0 =%d valid1 =%d BufferLength =%d\n",
+ macid_entry0, macid_entry1, TxRPT_Len));
+
+ ItemNum = TxRPT_Len >> 3;
+ pBuffer = TxRPT_Buf;
+
+ do {
+ if (MacId >= ASSOCIATE_ENTRY_NUM)
+ valid = 0;
+ else if (MacId >= 32)
+ valid = (1 << (MacId - 32)) & macid_entry1;
+ else
+ valid = (1 << MacId) & macid_entry0;
+
+ pRAInfo = &(dm_odm->RAInfo[MacId]);
+ if (valid) {
+ pRAInfo->RTY[0] = (u16)GET_TX_REPORT_TYPE1_RERTY_0(pBuffer);
+ pRAInfo->RTY[1] = (u16)GET_TX_REPORT_TYPE1_RERTY_1(pBuffer);
+ pRAInfo->RTY[2] = (u16)GET_TX_REPORT_TYPE1_RERTY_2(pBuffer);
+ pRAInfo->RTY[3] = (u16)GET_TX_REPORT_TYPE1_RERTY_3(pBuffer);
+ pRAInfo->RTY[4] = (u16)GET_TX_REPORT_TYPE1_RERTY_4(pBuffer);
+ pRAInfo->DROP = (u16)GET_TX_REPORT_TYPE1_DROP_0(pBuffer);
+ pRAInfo->TOTAL = pRAInfo->RTY[0] + pRAInfo->RTY[1] +
+ pRAInfo->RTY[2] + pRAInfo->RTY[3] +
+ pRAInfo->RTY[4] + pRAInfo->DROP;
+ if (pRAInfo->TOTAL != 0) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("macid =%d Total =%d R0 =%d R1 =%d R2 =%d R3 =%d R4 =%d D0 =%d valid0 =%x valid1 =%x\n",
+ MacId, pRAInfo->TOTAL,
+ pRAInfo->RTY[0], pRAInfo->RTY[1],
+ pRAInfo->RTY[2], pRAInfo->RTY[3],
+ pRAInfo->RTY[4], pRAInfo->DROP,
+ macid_entry0 , macid_entry1));
+ if (pRAInfo->PTActive) {
+ if (pRAInfo->RAstage < 5)
+ odm_RateDecision_8188E(dm_odm, pRAInfo);
+ else if (pRAInfo->RAstage == 5) /* Power training try state */
+ odm_PTTryState_8188E(pRAInfo);
+ else /* RAstage == 6 */
+ odm_PTDecision_8188E(pRAInfo);
+
+ /* Stage_RA counter */
+ if (pRAInfo->RAstage <= 5)
+ pRAInfo->RAstage++;
+ else
+ pRAInfo->RAstage = 0;
+ } else {
+ odm_RateDecision_8188E(dm_odm, pRAInfo);
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("macid =%d R0 =%d R1 =%d R2 =%d R3 =%d R4 =%d drop =%d valid0 =%x RateID =%d SGI =%d\n",
+ MacId,
+ pRAInfo->RTY[0],
+ pRAInfo->RTY[1],
+ pRAInfo->RTY[2],
+ pRAInfo->RTY[3],
+ pRAInfo->RTY[4],
+ pRAInfo->DROP,
+ macid_entry0,
+ pRAInfo->DecisionRate,
+ pRAInfo->RateSGI));
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, (" TOTAL = 0!!!!\n"));
+ }
+ }
+
+ if (minRptTime > pRAInfo->RptTime)
+ minRptTime = pRAInfo->RptTime;
+
+ pBuffer += TX_RPT2_ITEM_SIZE;
+ MacId++;
+ } while (MacId < ItemNum);
+
+ odm_RATxRPTTimerSetting(dm_odm, minRptTime);
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("<===== ODM_RA_TxRPT2Handle_8188E()\n"));
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_BB.c
new file mode 100644
index 0000000..787e8f1
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_BB.c
@@ -0,0 +1,721 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#include "odm_precomp.h"
+
+#include <rtw_iol.h>
+
+#define read_next_pair(array, v1, v2, i) \
+ do { \
+ i += 2; \
+ v1 = array[i]; \
+ v2 = array[i+1]; \
+ } while (0)
+
+static bool CheckCondition(const u32 condition, const u32 hex)
+{
+ u32 _board = (hex & 0x000000FF);
+ u32 _interface = (hex & 0x0000FF00) >> 8;
+ u32 _platform = (hex & 0x00FF0000) >> 16;
+ u32 cond = condition;
+
+ if (condition == 0xCDCDCDCD)
+ return true;
+
+ cond = condition & 0x000000FF;
+ if ((_board == cond) && cond != 0x00)
+ return false;
+
+ cond = condition & 0x0000FF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = condition & 0x00FF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+
+/******************************************************************************
+* AGC_TAB_1T.TXT
+******************************************************************************/
+
+static u32 array_agc_tab_1t_8188e[] = {
+ 0xC78, 0xFB000001,
+ 0xC78, 0xFB010001,
+ 0xC78, 0xFB020001,
+ 0xC78, 0xFB030001,
+ 0xC78, 0xFB040001,
+ 0xC78, 0xFB050001,
+ 0xC78, 0xFA060001,
+ 0xC78, 0xF9070001,
+ 0xC78, 0xF8080001,
+ 0xC78, 0xF7090001,
+ 0xC78, 0xF60A0001,
+ 0xC78, 0xF50B0001,
+ 0xC78, 0xF40C0001,
+ 0xC78, 0xF30D0001,
+ 0xC78, 0xF20E0001,
+ 0xC78, 0xF10F0001,
+ 0xC78, 0xF0100001,
+ 0xC78, 0xEF110001,
+ 0xC78, 0xEE120001,
+ 0xC78, 0xED130001,
+ 0xC78, 0xEC140001,
+ 0xC78, 0xEB150001,
+ 0xC78, 0xEA160001,
+ 0xC78, 0xE9170001,
+ 0xC78, 0xE8180001,
+ 0xC78, 0xE7190001,
+ 0xC78, 0xE61A0001,
+ 0xC78, 0xE51B0001,
+ 0xC78, 0xE41C0001,
+ 0xC78, 0xE31D0001,
+ 0xC78, 0xE21E0001,
+ 0xC78, 0xE11F0001,
+ 0xC78, 0x8A200001,
+ 0xC78, 0x89210001,
+ 0xC78, 0x88220001,
+ 0xC78, 0x87230001,
+ 0xC78, 0x86240001,
+ 0xC78, 0x85250001,
+ 0xC78, 0x84260001,
+ 0xC78, 0x83270001,
+ 0xC78, 0x82280001,
+ 0xC78, 0x6B290001,
+ 0xC78, 0x6A2A0001,
+ 0xC78, 0x692B0001,
+ 0xC78, 0x682C0001,
+ 0xC78, 0x672D0001,
+ 0xC78, 0x662E0001,
+ 0xC78, 0x652F0001,
+ 0xC78, 0x64300001,
+ 0xC78, 0x63310001,
+ 0xC78, 0x62320001,
+ 0xC78, 0x61330001,
+ 0xC78, 0x46340001,
+ 0xC78, 0x45350001,
+ 0xC78, 0x44360001,
+ 0xC78, 0x43370001,
+ 0xC78, 0x42380001,
+ 0xC78, 0x41390001,
+ 0xC78, 0x403A0001,
+ 0xC78, 0x403B0001,
+ 0xC78, 0x403C0001,
+ 0xC78, 0x403D0001,
+ 0xC78, 0x403E0001,
+ 0xC78, 0x403F0001,
+ 0xC78, 0xFB400001,
+ 0xC78, 0xFB410001,
+ 0xC78, 0xFB420001,
+ 0xC78, 0xFB430001,
+ 0xC78, 0xFB440001,
+ 0xC78, 0xFB450001,
+ 0xC78, 0xFB460001,
+ 0xC78, 0xFB470001,
+ 0xC78, 0xFB480001,
+ 0xC78, 0xFA490001,
+ 0xC78, 0xF94A0001,
+ 0xC78, 0xF84B0001,
+ 0xC78, 0xF74C0001,
+ 0xC78, 0xF64D0001,
+ 0xC78, 0xF54E0001,
+ 0xC78, 0xF44F0001,
+ 0xC78, 0xF3500001,
+ 0xC78, 0xF2510001,
+ 0xC78, 0xF1520001,
+ 0xC78, 0xF0530001,
+ 0xC78, 0xEF540001,
+ 0xC78, 0xEE550001,
+ 0xC78, 0xED560001,
+ 0xC78, 0xEC570001,
+ 0xC78, 0xEB580001,
+ 0xC78, 0xEA590001,
+ 0xC78, 0xE95A0001,
+ 0xC78, 0xE85B0001,
+ 0xC78, 0xE75C0001,
+ 0xC78, 0xE65D0001,
+ 0xC78, 0xE55E0001,
+ 0xC78, 0xE45F0001,
+ 0xC78, 0xE3600001,
+ 0xC78, 0xE2610001,
+ 0xC78, 0xC3620001,
+ 0xC78, 0xC2630001,
+ 0xC78, 0xC1640001,
+ 0xC78, 0x8B650001,
+ 0xC78, 0x8A660001,
+ 0xC78, 0x89670001,
+ 0xC78, 0x88680001,
+ 0xC78, 0x87690001,
+ 0xC78, 0x866A0001,
+ 0xC78, 0x856B0001,
+ 0xC78, 0x846C0001,
+ 0xC78, 0x676D0001,
+ 0xC78, 0x666E0001,
+ 0xC78, 0x656F0001,
+ 0xC78, 0x64700001,
+ 0xC78, 0x63710001,
+ 0xC78, 0x62720001,
+ 0xC78, 0x61730001,
+ 0xC78, 0x60740001,
+ 0xC78, 0x46750001,
+ 0xC78, 0x45760001,
+ 0xC78, 0x44770001,
+ 0xC78, 0x43780001,
+ 0xC78, 0x42790001,
+ 0xC78, 0x417A0001,
+ 0xC78, 0x407B0001,
+ 0xC78, 0x407C0001,
+ 0xC78, 0x407D0001,
+ 0xC78, 0x407E0001,
+ 0xC78, 0x407F0001,
+};
+
+enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
+{
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = dm_odm->SupportPlatform;
+ u8 interfaceValue = dm_odm->SupportInterface;
+ u8 board = dm_odm->BoardType;
+ u32 arraylen = sizeof(array_agc_tab_1t_8188e)/sizeof(u32);
+ u32 *array = array_agc_tab_1t_8188e;
+ bool biol = false;
+ struct adapter *adapter = dm_odm->Adapter;
+ struct xmit_frame *pxmit_frame = NULL;
+ u8 bndy_cnt = 1;
+ enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
+
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ biol = rtw_IOL_applied(adapter);
+
+ if (biol) {
+ pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
+ if (pxmit_frame == NULL) {
+ pr_info("rtw_IOL_accquire_xmit_frame failed\n");
+ return HAL_STATUS_FAILURE;
+ }
+ }
+
+ for (i = 0; i < arraylen; i += 2) {
+ u32 v1 = array[i];
+ u32 v2 = array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
+ } else {
+ odm_ConfigBB_AGC_8188E(dm_odm, v1, bMaskDWord, v2);
+ }
+ continue;
+ } else {
+ /* This line is the start line of branch. */
+ if (!CheckCondition(array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ read_next_pair(array, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen - 2)
+ read_next_pair(array, v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ read_next_pair(array, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen - 2) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
+ } else {
+ odm_ConfigBB_AGC_8188E(dm_odm, v1, bMaskDWord, v2);
+ }
+ read_next_pair(array, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen - 2)
+ read_next_pair(array, v1, v2, i);
+ }
+ }
+ }
+ if (biol) {
+ if (!rtw_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ printk("~~~ %s IOL_exec_cmds Failed !!!\n", __func__);
+ rst = HAL_STATUS_FAILURE;
+ }
+ }
+ return rst;
+}
+
+/******************************************************************************
+* PHY_REG_1T.TXT
+******************************************************************************/
+
+static u32 array_phy_reg_1t_8188e[] = {
+ 0x800, 0x80040000,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x02200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A11A9,
+ 0x85C, 0x01000014,
+ 0x860, 0x66F60110,
+ 0x864, 0x061F0649,
+ 0x868, 0x00000000,
+ 0x86C, 0x27272700,
+ 0x870, 0x07000760,
+ 0x874, 0x25004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x00000000,
+ 0x880, 0xB0000C1C,
+ 0x884, 0x00000001,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0x910, 0x00000002,
+ 0x914, 0x00000201,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x80FF000C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7F120F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x218075B1,
+ 0xB2C, 0x80000000,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x08800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC47,
+ 0xC34, 0x469652AF,
+ 0xC38, 0x49795994,
+ 0xC3C, 0x0A97971C,
+ 0xC40, 0x1F7C403F,
+ 0xC44, 0x000100B7,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69553420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x00013169,
+ 0xC5C, 0x00250492,
+ 0xC60, 0x00000000,
+ 0xC64, 0x7112848B,
+ 0xC68, 0x47C00BFF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x020610DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x390000E4,
+ 0xC84, 0x20F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00091521,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00121820,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00000000,
+ 0xCA4, 0x000300A0,
+ 0xCA8, 0x00000000,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x64B22427,
+ 0xCDC, 0x00766932,
+ 0xCE0, 0x00222222,
+ 0xCE4, 0x00000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00000740,
+ 0xD04, 0x00020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC43,
+ 0xD18, 0x7A8F5B6F,
+ 0xD2C, 0xCC979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x80608000,
+ 0xD38, 0x00000000,
+ 0xD3C, 0x00127353,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x6437140A,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000282,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xD6C, 0x2A201C16,
+ 0xD70, 0x1812362E,
+ 0xD74, 0x322C2220,
+ 0xD78, 0x000E3C24,
+ 0xE00, 0x2D2D2D2D,
+ 0xE04, 0x2D2D2D2D,
+ 0xE08, 0x0390272D,
+ 0xE10, 0x2D2D2D2D,
+ 0xE14, 0x2D2D2D2D,
+ 0xE18, 0x2D2D2D2D,
+ 0xE1C, 0x2D2D2D2D,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000008,
+ 0xE68, 0x001B25A4,
+ 0xE6C, 0x00C00014,
+ 0xE70, 0x00C00014,
+ 0xE74, 0x01000014,
+ 0xE78, 0x01000014,
+ 0xE7C, 0x01000014,
+ 0xE80, 0x01000014,
+ 0xE84, 0x00C00014,
+ 0xE88, 0x01000014,
+ 0xE8C, 0x00C00014,
+ 0xED0, 0x00C00014,
+ 0xED4, 0x00C00014,
+ 0xED8, 0x00C00014,
+ 0xEDC, 0x00000014,
+ 0xEE0, 0x00000014,
+ 0xEEC, 0x01C00014,
+ 0xF14, 0x00000003,
+ 0xF4C, 0x00000000,
+ 0xF00, 0x00000300,
+};
+
+enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
+{
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = dm_odm->SupportPlatform;
+ u8 interfaceValue = dm_odm->SupportInterface;
+ u8 board = dm_odm->BoardType;
+ u32 arraylen = sizeof(array_phy_reg_1t_8188e)/sizeof(u32);
+ u32 *array = array_phy_reg_1t_8188e;
+ bool biol = false;
+ struct adapter *adapter = dm_odm->Adapter;
+ struct xmit_frame *pxmit_frame = NULL;
+ u8 bndy_cnt = 1;
+ enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ biol = rtw_IOL_applied(adapter);
+
+ if (biol) {
+ pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
+ if (pxmit_frame == NULL) {
+ pr_info("rtw_IOL_accquire_xmit_frame failed\n");
+ return HAL_STATUS_FAILURE;
+ }
+ }
+
+ for (i = 0; i < arraylen; i += 2) {
+ u32 v1 = array[i];
+ u32 v2 = array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ if (v1 == 0xfe) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
+ } else if (v1 == 0xfd) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
+ } else if (v1 == 0xfc) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
+ } else if (v1 == 0xfb) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
+ } else if (v1 == 0xfa) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
+ } else if (v1 == 0xf9) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
+ } else {
+ if (v1 == 0xa24)
+ dm_odm->RFCalibrateInfo.RegA24 = v2;
+ rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
+ }
+ } else {
+ odm_ConfigBB_PHY_8188E(dm_odm, v1, bMaskDWord, v2);
+ }
+ continue;
+ } else { /* This line is the start line of branch. */
+ if (!CheckCondition(array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ read_next_pair(array, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen - 2)
+ read_next_pair(array, v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ read_next_pair(array, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen - 2) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ if (v1 == 0xfe) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
+ } else if (v1 == 0xfd) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
+ } else if (v1 == 0xfc) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
+ } else if (v1 == 0xfb) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
+ } else if (v1 == 0xfa) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
+ } else if (v1 == 0xf9) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
+ } else{
+ if (v1 == 0xa24)
+ dm_odm->RFCalibrateInfo.RegA24 = v2;
+
+ rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
+ }
+ } else {
+ odm_ConfigBB_PHY_8188E(dm_odm, v1, bMaskDWord, v2);
+ }
+ read_next_pair(array, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen - 2)
+ read_next_pair(array, v1, v2, i);
+ }
+ }
+ }
+ if (biol) {
+ if (!rtw_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ rst = HAL_STATUS_FAILURE;
+ pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
+ }
+ }
+ return rst;
+}
+
+/******************************************************************************
+* PHY_REG_PG.TXT
+******************************************************************************/
+
+static u32 array_phy_reg_pg_8188e[] = {
+ 0xE00, 0xFFFFFFFF, 0x06070809,
+ 0xE04, 0xFFFFFFFF, 0x02020405,
+ 0xE08, 0x0000FF00, 0x00000006,
+ 0x86C, 0xFFFFFF00, 0x00020400,
+ 0xE10, 0xFFFFFFFF, 0x08090A0B,
+ 0xE14, 0xFFFFFFFF, 0x01030607,
+ 0xE18, 0xFFFFFFFF, 0x08090A0B,
+ 0xE1C, 0xFFFFFFFF, 0x01030607,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x02020202,
+ 0xE04, 0xFFFFFFFF, 0x00020202,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x04040404,
+ 0xE14, 0xFFFFFFFF, 0x00020404,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x02020202,
+ 0xE04, 0xFFFFFFFF, 0x00020202,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x04040404,
+ 0xE14, 0xFFFFFFFF, 0x00020404,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x02020202,
+ 0xE04, 0xFFFFFFFF, 0x00020202,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x04040404,
+ 0xE14, 0xFFFFFFFF, 0x00020404,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+
+};
+
+void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm)
+{
+ u32 hex;
+ u32 i = 0;
+ u8 platform = dm_odm->SupportPlatform;
+ u8 interfaceValue = dm_odm->SupportInterface;
+ u8 board = dm_odm->BoardType;
+ u32 arraylen = sizeof(array_phy_reg_pg_8188e) / sizeof(u32);
+ u32 *array = array_phy_reg_pg_8188e;
+
+ hex = board + (interfaceValue << 8);
+ hex += (platform << 16) + 0xFF000000;
+
+ for (i = 0; i < arraylen; i += 3) {
+ u32 v1 = array[i];
+ u32 v2 = array[i+1];
+ u32 v3 = array[i+2];
+
+ /* this line is a line of pure_body */
+ if (v1 < 0xCDCDCDCD) {
+ odm_ConfigBB_PHY_REG_PG_8188E(dm_odm, v1, v2, v3);
+ continue;
+ } else { /* this line is the start of branch */
+ if (!CheckCondition(array[i], hex)) {
+ /* don't need the hw_body */
+ i += 2; /* skip the pair of expression */
+ v1 = array[i];
+ v2 = array[i+1];
+ v3 = array[i+2];
+ while (v2 != 0xDEAD) {
+ i += 3;
+ v1 = array[i];
+ v2 = array[i+1];
+ v3 = array[i+1];
+ }
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_MAC.c
new file mode 100644
index 0000000..b49b5ab
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_MAC.c
@@ -0,0 +1,231 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#include "odm_precomp.h"
+#include <rtw_iol.h>
+
+static bool Checkcondition(const u32 condition, const u32 hex)
+{
+ u32 _board = (hex & 0x000000FF);
+ u32 _interface = (hex & 0x0000FF00) >> 8;
+ u32 _platform = (hex & 0x00FF0000) >> 16;
+ u32 cond = condition;
+
+ if (condition == 0xCDCDCDCD)
+ return true;
+
+ cond = condition & 0x000000FF;
+ if ((_board == cond) && cond != 0x00)
+ return false;
+
+ cond = condition & 0x0000FF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = condition & 0x00FF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+
+/******************************************************************************
+* MAC_REG.TXT
+******************************************************************************/
+
+static u32 array_MAC_REG_8188E[] = {
+ 0x026, 0x00000041,
+ 0x027, 0x00000035,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000001,
+ 0x432, 0x00000002,
+ 0x433, 0x00000004,
+ 0x434, 0x00000005,
+ 0x435, 0x00000006,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43A, 0x00000001,
+ 0x43B, 0x00000002,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000006,
+ 0x43F, 0x00000007,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000015,
+ 0x445, 0x000000F0,
+ 0x446, 0x0000000F,
+ 0x447, 0x00000000,
+ 0x458, 0x00000041,
+ 0x459, 0x000000A8,
+ 0x45A, 0x00000072,
+ 0x45B, 0x000000B9,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x480, 0x00000008,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x4D3, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x652, 0x00000020,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+};
+
+enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
+{
+ #define READ_NEXT_PAIR(v1, v2, i) do { i += 2; v1 = array[i]; v2 = array[i+1]; } while (0)
+
+ u32 hex = 0;
+ u32 i;
+ u8 platform = dm_odm->SupportPlatform;
+ u8 interface_val = dm_odm->SupportInterface;
+ u8 board = dm_odm->BoardType;
+ u32 array_len = sizeof(array_MAC_REG_8188E)/sizeof(u32);
+ u32 *array = array_MAC_REG_8188E;
+ bool biol = false;
+
+ struct adapter *adapt = dm_odm->Adapter;
+ struct xmit_frame *pxmit_frame = NULL;
+ u8 bndy_cnt = 1;
+ enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
+ hex += board;
+ hex += interface_val << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+
+ biol = rtw_IOL_applied(adapt);
+
+ if (biol) {
+ pxmit_frame = rtw_IOL_accquire_xmit_frame(adapt);
+ if (pxmit_frame == NULL) {
+ pr_info("rtw_IOL_accquire_xmit_frame failed\n");
+ return HAL_STATUS_FAILURE;
+ }
+ }
+
+ for (i = 0; i < array_len; i += 2) {
+ u32 v1 = array[i];
+ u32 v2 = array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ rtw_IOL_append_WB_cmd(pxmit_frame, (u16)v1, (u8)v2, 0xFF);
+ } else {
+ odm_ConfigMAC_8188E(dm_odm, v1, (u8)v2);
+ }
+ continue;
+ } else { /* This line is the start line of branch. */
+ if (!Checkcondition(array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < array_len - 2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < array_len - 2) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ rtw_IOL_append_WB_cmd(pxmit_frame, (u16)v1, (u8)v2, 0xFF);
+ } else {
+ odm_ConfigMAC_8188E(dm_odm, v1, (u8)v2);
+ }
+
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ while (v2 != 0xDEAD && i < array_len - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+ if (biol) {
+ if (!rtw_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ pr_info("~~~ MAC IOL_exec_cmds Failed !!!\n");
+ rst = HAL_STATUS_FAILURE;
+ }
+ }
+ return rst;
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
new file mode 100644
index 0000000..480c810
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
@@ -0,0 +1,269 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#include "odm_precomp.h"
+
+#include <rtw_iol.h>
+
+static bool CheckCondition(const u32 Condition, const u32 Hex)
+{
+ u32 _board = (Hex & 0x000000FF);
+ u32 _interface = (Hex & 0x0000FF00) >> 8;
+ u32 _platform = (Hex & 0x00FF0000) >> 16;
+ u32 cond = Condition;
+
+ if (Condition == 0xCDCDCDCD)
+ return true;
+
+ cond = Condition & 0x000000FF;
+ if ((_board == cond) && cond != 0x00)
+ return false;
+
+ cond = Condition & 0x0000FF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = Condition & 0x00FF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+
+/******************************************************************************
+* RadioA_1T.TXT
+******************************************************************************/
+
+static u32 Array_RadioA_1T_8188E[] = {
+ 0x000, 0x00030000,
+ 0x008, 0x00084000,
+ 0x018, 0x00000407,
+ 0x019, 0x00000012,
+ 0x01E, 0x00080009,
+ 0x01F, 0x00000880,
+ 0x02F, 0x0001A060,
+ 0x03F, 0x00000000,
+ 0x042, 0x000060C0,
+ 0x057, 0x000D0000,
+ 0x058, 0x000BE180,
+ 0x067, 0x00001552,
+ 0x083, 0x00000000,
+ 0x0B0, 0x000FF8FC,
+ 0x0B1, 0x00054400,
+ 0x0B2, 0x000CCC19,
+ 0x0B4, 0x00043003,
+ 0x0B6, 0x0004953E,
+ 0x0B7, 0x0001C718,
+ 0x0B8, 0x000060FF,
+ 0x0B9, 0x00080001,
+ 0x0BA, 0x00040000,
+ 0x0BB, 0x00000400,
+ 0x0BF, 0x000C0000,
+ 0x0C2, 0x00002400,
+ 0x0C3, 0x00000009,
+ 0x0C4, 0x00040C91,
+ 0x0C5, 0x00099999,
+ 0x0C6, 0x000000A3,
+ 0x0C7, 0x00088820,
+ 0x0C8, 0x00076C06,
+ 0x0C9, 0x00000000,
+ 0x0CA, 0x00080000,
+ 0x0DF, 0x00000180,
+ 0x0EF, 0x000001A0,
+ 0x051, 0x0006B27D,
+ 0xFF0F041F, 0xABCD,
+ 0x052, 0x0007E4DD,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x052, 0x0007E49D,
+ 0xFF0F041F, 0xDEAD,
+ 0x053, 0x00000073,
+ 0x056, 0x00051FF3,
+ 0x035, 0x00000086,
+ 0x035, 0x00000186,
+ 0x035, 0x00000286,
+ 0x036, 0x00001C25,
+ 0x036, 0x00009C25,
+ 0x036, 0x00011C25,
+ 0x036, 0x00019C25,
+ 0x0B6, 0x00048538,
+ 0x018, 0x00000C07,
+ 0x05A, 0x0004BD00,
+ 0x019, 0x000739D0,
+ 0x034, 0x0000ADF3,
+ 0x034, 0x00009DF0,
+ 0x034, 0x00008DED,
+ 0x034, 0x00007DEA,
+ 0x034, 0x00006DE7,
+ 0x034, 0x000054EE,
+ 0x034, 0x000044EB,
+ 0x034, 0x000034E8,
+ 0x034, 0x0000246B,
+ 0x034, 0x00001468,
+ 0x034, 0x0000006D,
+ 0x000, 0x00030159,
+ 0x084, 0x00068200,
+ 0x086, 0x000000CE,
+ 0x087, 0x00048A00,
+ 0x08E, 0x00065540,
+ 0x08F, 0x00088000,
+ 0x0EF, 0x000020A0,
+ 0x03B, 0x000F02B0,
+ 0x03B, 0x000EF7B0,
+ 0x03B, 0x000D4FB0,
+ 0x03B, 0x000CF060,
+ 0x03B, 0x000B0090,
+ 0x03B, 0x000A0080,
+ 0x03B, 0x00090080,
+ 0x03B, 0x0008F780,
+ 0x03B, 0x000722B0,
+ 0x03B, 0x0006F7B0,
+ 0x03B, 0x00054FB0,
+ 0x03B, 0x0004F060,
+ 0x03B, 0x00030090,
+ 0x03B, 0x00020080,
+ 0x03B, 0x00010080,
+ 0x03B, 0x0000F780,
+ 0x0EF, 0x000000A0,
+ 0x000, 0x00010159,
+ 0x018, 0x0000F407,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x01F, 0x00080003,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x01E, 0x00000001,
+ 0x01F, 0x00080000,
+ 0x000, 0x00033E60,
+};
+
+enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
+{
+ #define READ_NEXT_PAIR(v1, v2, i) do \
+ { i += 2; v1 = Array[i]; \
+ v2 = Array[i+1]; } while (0)
+
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = pDM_Odm->SupportPlatform;
+ u8 interfaceValue = pDM_Odm->SupportInterface;
+ u8 board = pDM_Odm->BoardType;
+ u32 ArrayLen = sizeof(Array_RadioA_1T_8188E)/sizeof(u32);
+ u32 *Array = Array_RadioA_1T_8188E;
+ bool biol = false;
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct xmit_frame *pxmit_frame = NULL;
+ u8 bndy_cnt = 1;
+ enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
+
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ biol = rtw_IOL_applied(Adapter);
+
+ if (biol) {
+ pxmit_frame = rtw_IOL_accquire_xmit_frame(Adapter);
+ if (pxmit_frame == NULL) {
+ pr_info("rtw_IOL_accquire_xmit_frame failed\n");
+ return HAL_STATUS_FAILURE;
+ }
+ }
+
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+
+ if (v1 == 0xffe)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
+ else if (v1 == 0xfd)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
+ else if (v1 == 0xfc)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
+ else if (v1 == 0xfb)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
+ else if (v1 == 0xfa)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
+ else if (v1 == 0xf9)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
+ else
+ rtw_IOL_append_WRF_cmd(pxmit_frame, ODM_RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
+ } else {
+ odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
+ }
+ continue;
+ } else { /* This line is the start line of branch. */
+ if (!CheckCondition(Array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+
+ if (v1 == 0xffe)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
+ else if (v1 == 0xfd)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
+ else if (v1 == 0xfc)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
+ else if (v1 == 0xfb)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
+ else if (v1 == 0xfa)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
+ else if (v1 == 0xf9)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
+ else
+ rtw_IOL_append_WRF_cmd(pxmit_frame, ODM_RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
+ } else {
+ odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
+ }
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+ if (biol) {
+ if (!rtw_IOL_exec_cmds_sync(pDM_Odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ rst = HAL_STATUS_FAILURE;
+ pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
+ }
+ }
+ return rst;
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalPhyRf.c b/drivers/staging/rtl8188eu/hal/HalPhyRf.c
new file mode 100644
index 0000000..980f7da
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalPhyRf.c
@@ -0,0 +1,49 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+ #include "odm_precomp.h"
+
+/* 3============================================================ */
+/* 3 IQ Calibration */
+/* 3============================================================ */
+
+void ODM_ResetIQKResult(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+u8 ODM_GetRightChnlPlaceforIQK(u8 chnl)
+{
+ u8 channel_all[ODM_TARGET_CHNL_NUM_2G_5G] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64,
+ 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122,
+ 124, 126, 128, 130, 132, 134, 136, 138, 140, 149, 151, 153,
+ 155, 157, 159, 161, 163, 165
+ };
+ u8 place = chnl;
+
+ if (chnl > 14) {
+ for (place = 14; place < sizeof(channel_all); place++) {
+ if (channel_all[place] == chnl)
+ return place-13;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
new file mode 100644
index 0000000..e4f20da
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
@@ -0,0 +1,1928 @@
+
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+/*---------------------------Define Local Constant---------------------------*/
+/* 2010/04/25 MH Define the max tx power tracking tx agc power. */
+#define ODM_TXPWRTRACK_MAX_IDX_88E 6
+
+/*---------------------------Define Local Constant---------------------------*/
+
+/* 3============================================================ */
+/* 3 Tx Power Tracking */
+/* 3============================================================ */
+/*-----------------------------------------------------------------------------
+ * Function: ODM_TxPwrTrackAdjust88E()
+ *
+ * Overview: 88E we can not write 0xc80/c94/c4c/ 0xa2x. Instead of write TX agc.
+ * No matter OFDM & CCK use the same method.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 04/23/2012 MHC Create Version 0.
+ * 04/23/2012 MHC Adjust TX agc directly not throughput BB digital.
+ *
+ *---------------------------------------------------------------------------*/
+void ODM_TxPwrTrackAdjust88E(struct odm_dm_struct *dm_odm, u8 Type,/* 0 = OFDM, 1 = CCK */
+ u8 *pDirection, /* 1 = +(increase) 2 = -(decrease) */
+ u32 *pOutWriteVal /* Tx tracking CCK/OFDM BB swing index adjust */
+ )
+{
+ u8 pwr_value = 0;
+ /* Tx power tracking BB swing table. */
+ /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ if (Type == 0) { /* For OFDM afjust */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("BbSwingIdxOfdm = %d BbSwingFlagOfdm=%d\n",
+ dm_odm->BbSwingIdxOfdm, dm_odm->BbSwingFlagOfdm));
+
+ if (dm_odm->BbSwingIdxOfdm <= dm_odm->BbSwingIdxOfdmBase) {
+ *pDirection = 1;
+ pwr_value = (dm_odm->BbSwingIdxOfdmBase - dm_odm->BbSwingIdxOfdm);
+ } else {
+ *pDirection = 2;
+ pwr_value = (dm_odm->BbSwingIdxOfdm - dm_odm->BbSwingIdxOfdmBase);
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("BbSwingIdxOfdm = %d BbSwingFlagOfdm=%d\n",
+ dm_odm->BbSwingIdxOfdm, dm_odm->BbSwingFlagOfdm));
+ } else if (Type == 1) { /* For CCK adjust. */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("dm_odm->BbSwingIdxCck = %d dm_odm->BbSwingIdxCckBase = %d\n",
+ dm_odm->BbSwingIdxCck, dm_odm->BbSwingIdxCckBase));
+
+ if (dm_odm->BbSwingIdxCck <= dm_odm->BbSwingIdxCckBase) {
+ *pDirection = 1;
+ pwr_value = (dm_odm->BbSwingIdxCckBase - dm_odm->BbSwingIdxCck);
+ } else {
+ *pDirection = 2;
+ pwr_value = (dm_odm->BbSwingIdxCck - dm_odm->BbSwingIdxCckBase);
+ }
+ }
+
+ /* */
+ /* 2012/04/25 MH According to Ed/Luke.Lees estimate for EVM the max tx power tracking */
+ /* need to be less than 6 power index for 88E. */
+ /* */
+ if (pwr_value >= ODM_TXPWRTRACK_MAX_IDX_88E && *pDirection == 1)
+ pwr_value = ODM_TXPWRTRACK_MAX_IDX_88E;
+
+ *pOutWriteVal = pwr_value | (pwr_value<<8) | (pwr_value<<16) | (pwr_value<<24);
+} /* ODM_TxPwrTrackAdjust88E */
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_TxPwrTrackSetPwr88E()
+ *
+ * Overview: 88E change all channel tx power accordign to flag.
+ * OFDM & CCK are all different.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 04/23/2012 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+static void odm_TxPwrTrackSetPwr88E(struct odm_dm_struct *dm_odm)
+{
+ if (dm_odm->BbSwingFlagOfdm || dm_odm->BbSwingFlagCck) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("odm_TxPwrTrackSetPwr88E CH=%d\n", *(dm_odm->pChannel)));
+ PHY_SetTxPowerLevel8188E(dm_odm->Adapter, *(dm_odm->pChannel));
+ dm_odm->BbSwingFlagOfdm = false;
+ dm_odm->BbSwingFlagCck = false;
+ }
+} /* odm_TxPwrTrackSetPwr88E */
+
+/* 091212 chiyokolin */
+void
+odm_TXPowerTrackingCallback_ThermalMeter_8188E(
+ struct adapter *Adapter
+ )
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, offset;
+ u8 ThermalValue_AVG_count = 0;
+ u32 ThermalValue_AVG = 0;
+ s32 ele_A = 0, ele_D, TempCCk, X, value32;
+ s32 Y, ele_C = 0;
+ s8 OFDM_index[2], CCK_index = 0;
+ s8 OFDM_index_old[2] = {0, 0}, CCK_index_old = 0;
+ u32 i = 0, j = 0;
+ bool is2t = false;
+
+ u8 OFDM_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB, which is required by Arthur */
+ u8 Indexforchannel = 0/*GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/;
+ s8 OFDM_index_mapping[2][index_mapping_NUM_88E] = {
+ {0, 0, 2, 3, 4, 4, /* 2.4G, decrease power */
+ 5, 6, 7, 7, 8, 9,
+ 10, 10, 11}, /* For lower temperature, 20120220 updated on 20120220. */
+ {0, 0, -1, -2, -3, -4, /* 2.4G, increase power */
+ -4, -4, -4, -5, -7, -8,
+ -9, -9, -10},
+ };
+ u8 Thermal_mapping[2][index_mapping_NUM_88E] = {
+ {0, 2, 4, 6, 8, 10, /* 2.4G, decrease power */
+ 12, 14, 16, 18, 20, 22,
+ 24, 26, 27},
+ {0, 2, 4, 6, 8, 10, /* 2.4G,, increase power */
+ 12, 14, 16, 18, 20, 22,
+ 25, 25, 25},
+ };
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ /* 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E. */
+ odm_TxPwrTrackSetPwr88E(dm_odm);
+
+ dm_odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++; /* cosa add for debug */
+ dm_odm->RFCalibrateInfo.bTXPowerTrackingInit = true;
+
+ /* <Kordan> RFCalibrateInfo.RegA24 will be initialized when ODM HW configuring, but MP configures with para files. */
+ dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("===>dm_TXPowerTrackingCallback_ThermalMeter_8188E txpowercontrol %d\n",
+ dm_odm->RFCalibrateInfo.TxPowerTrackControl));
+
+ ThermalValue = (u8)ODM_GetRFReg(dm_odm, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x EEPROMthermalmeter 0x%x\n",
+ ThermalValue, dm_odm->RFCalibrateInfo.ThermalValue, pHalData->EEPROMThermalMeter));
+
+ if (is2t)
+ rf = 2;
+ else
+ rf = 1;
+
+ if (ThermalValue) {
+ /* Query OFDM path A default setting */
+ ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
+ if (ele_D == (OFDMSwingTable[i]&bMaskOFDM_D)) {
+ OFDM_index_old[0] = (u8)i;
+ dm_odm->BbSwingIdxOfdmBase = (u8)i;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Initial pathA ele_D reg0x%x = 0x%x, OFDM_index=0x%x\n",
+ rOFDM0_XATxIQImbalance, ele_D, OFDM_index_old[0]));
+ break;
+ }
+ }
+
+ /* Query OFDM path B default setting */
+ if (is2t) {
+ ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
+ if (ele_D == (OFDMSwingTable[i]&bMaskOFDM_D)) {
+ OFDM_index_old[1] = (u8)i;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Initial pathB ele_D reg0x%x = 0x%x, OFDM_index=0x%x\n",
+ rOFDM0_XBTxIQImbalance, ele_D, OFDM_index_old[1]));
+ break;
+ }
+ }
+ }
+
+ /* Query CCK default setting From 0xa24 */
+ TempCCk = dm_odm->RFCalibrateInfo.RegA24;
+
+ for (i = 0; i < CCK_TABLE_SIZE; i++) {
+ if (dm_odm->RFCalibrateInfo.bCCKinCH14) {
+ if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4) == 0) {
+ CCK_index_old = (u8)i;
+ dm_odm->BbSwingIdxCckBase = (u8)i;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Initial reg0x%x = 0x%x, CCK_index=0x%x, ch 14 %d\n",
+ rCCK0_TxFilter2, TempCCk, CCK_index_old, dm_odm->RFCalibrateInfo.bCCKinCH14));
+ break;
+ }
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("RegA24: 0x%X, CCKSwingTable_Ch1_Ch13[%d][2]: CCKSwingTable_Ch1_Ch13[i][2]: 0x%X\n",
+ TempCCk, i, CCKSwingTable_Ch1_Ch13[i][2]));
+ if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4) == 0) {
+ CCK_index_old = (u8)i;
+ dm_odm->BbSwingIdxCckBase = (u8)i;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Initial reg0x%x = 0x%x, CCK_index=0x%x, ch14 %d\n",
+ rCCK0_TxFilter2, TempCCk, CCK_index_old, dm_odm->RFCalibrateInfo.bCCKinCH14));
+ break;
+ }
+ }
+ }
+
+ if (!dm_odm->RFCalibrateInfo.ThermalValue) {
+ dm_odm->RFCalibrateInfo.ThermalValue = pHalData->EEPROMThermalMeter;
+ dm_odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue;
+ dm_odm->RFCalibrateInfo.ThermalValue_IQK = ThermalValue;
+
+ for (i = 0; i < rf; i++)
+ dm_odm->RFCalibrateInfo.OFDM_index[i] = OFDM_index_old[i];
+ dm_odm->RFCalibrateInfo.CCK_index = CCK_index_old;
+ }
+
+ if (dm_odm->RFCalibrateInfo.bReloadtxpowerindex)
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("reload ofdm index for band switch\n"));
+
+ /* calculate average thermal meter */
+ dm_odm->RFCalibrateInfo.ThermalValue_AVG[dm_odm->RFCalibrateInfo.ThermalValue_AVG_index] = ThermalValue;
+ dm_odm->RFCalibrateInfo.ThermalValue_AVG_index++;
+ if (dm_odm->RFCalibrateInfo.ThermalValue_AVG_index == AVG_THERMAL_NUM_88E)
+ dm_odm->RFCalibrateInfo.ThermalValue_AVG_index = 0;
+
+ for (i = 0; i < AVG_THERMAL_NUM_88E; i++) {
+ if (dm_odm->RFCalibrateInfo.ThermalValue_AVG[i]) {
+ ThermalValue_AVG += dm_odm->RFCalibrateInfo.ThermalValue_AVG[i];
+ ThermalValue_AVG_count++;
+ }
+ }
+
+ if (ThermalValue_AVG_count) {
+ ThermalValue = (u8)(ThermalValue_AVG / ThermalValue_AVG_count);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("AVG Thermal Meter = 0x%x\n", ThermalValue));
+ }
+
+ if (dm_odm->RFCalibrateInfo.bReloadtxpowerindex) {
+ delta = ThermalValue > pHalData->EEPROMThermalMeter ?
+ (ThermalValue - pHalData->EEPROMThermalMeter) :
+ (pHalData->EEPROMThermalMeter - ThermalValue);
+ dm_odm->RFCalibrateInfo.bReloadtxpowerindex = false;
+ dm_odm->RFCalibrateInfo.bDoneTxpower = false;
+ } else if (dm_odm->RFCalibrateInfo.bDoneTxpower) {
+ delta = (ThermalValue > dm_odm->RFCalibrateInfo.ThermalValue) ?
+ (ThermalValue - dm_odm->RFCalibrateInfo.ThermalValue) :
+ (dm_odm->RFCalibrateInfo.ThermalValue - ThermalValue);
+ } else {
+ delta = ThermalValue > pHalData->EEPROMThermalMeter ?
+ (ThermalValue - pHalData->EEPROMThermalMeter) :
+ (pHalData->EEPROMThermalMeter - ThermalValue);
+ }
+ delta_LCK = (ThermalValue > dm_odm->RFCalibrateInfo.ThermalValue_LCK) ?
+ (ThermalValue - dm_odm->RFCalibrateInfo.ThermalValue_LCK) :
+ (dm_odm->RFCalibrateInfo.ThermalValue_LCK - ThermalValue);
+ delta_IQK = (ThermalValue > dm_odm->RFCalibrateInfo.ThermalValue_IQK) ?
+ (ThermalValue - dm_odm->RFCalibrateInfo.ThermalValue_IQK) :
+ (dm_odm->RFCalibrateInfo.ThermalValue_IQK - ThermalValue);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x EEPROMthermalmeter 0x%x delta 0x%x delta_LCK 0x%x delta_IQK 0x%x\n",
+ ThermalValue, dm_odm->RFCalibrateInfo.ThermalValue,
+ pHalData->EEPROMThermalMeter, delta, delta_LCK, delta_IQK));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("pre thermal meter LCK 0x%x pre thermal meter IQK 0x%x delta_LCK_bound 0x%x delta_IQK_bound 0x%x\n",
+ dm_odm->RFCalibrateInfo.ThermalValue_LCK,
+ dm_odm->RFCalibrateInfo.ThermalValue_IQK,
+ dm_odm->RFCalibrateInfo.Delta_LCK,
+ dm_odm->RFCalibrateInfo.Delta_IQK));
+
+ if ((delta_LCK >= 8)) { /* Delta temperature is equal to or larger than 20 centigrade. */
+ dm_odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue;
+ PHY_LCCalibrate_8188E(Adapter);
+ }
+
+ if (delta > 0 && dm_odm->RFCalibrateInfo.TxPowerTrackControl) {
+ delta = ThermalValue > pHalData->EEPROMThermalMeter ?
+ (ThermalValue - pHalData->EEPROMThermalMeter) :
+ (pHalData->EEPROMThermalMeter - ThermalValue);
+ /* calculate new OFDM / CCK offset */
+ if (ThermalValue > pHalData->EEPROMThermalMeter)
+ j = 1;
+ else
+ j = 0;
+ for (offset = 0; offset < index_mapping_NUM_88E; offset++) {
+ if (delta < Thermal_mapping[j][offset]) {
+ if (offset != 0)
+ offset--;
+ break;
+ }
+ }
+ if (offset >= index_mapping_NUM_88E)
+ offset = index_mapping_NUM_88E-1;
+ for (i = 0; i < rf; i++)
+ OFDM_index[i] = dm_odm->RFCalibrateInfo.OFDM_index[i] + OFDM_index_mapping[j][offset];
+ CCK_index = dm_odm->RFCalibrateInfo.CCK_index + OFDM_index_mapping[j][offset];
+
+ if (is2t) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, CCK_index=0x%x\n",
+ dm_odm->RFCalibrateInfo.OFDM_index[0],
+ dm_odm->RFCalibrateInfo.OFDM_index[1],
+ dm_odm->RFCalibrateInfo.CCK_index));
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("temp OFDM_A_index=0x%x, CCK_index=0x%x\n",
+ dm_odm->RFCalibrateInfo.OFDM_index[0],
+ dm_odm->RFCalibrateInfo.CCK_index));
+ }
+
+ for (i = 0; i < rf; i++) {
+ if (OFDM_index[i] > OFDM_TABLE_SIZE_92D-1)
+ OFDM_index[i] = OFDM_TABLE_SIZE_92D-1;
+ else if (OFDM_index[i] < OFDM_min_index)
+ OFDM_index[i] = OFDM_min_index;
+ }
+
+ if (CCK_index > CCK_TABLE_SIZE-1)
+ CCK_index = CCK_TABLE_SIZE-1;
+ else if (CCK_index < 0)
+ CCK_index = 0;
+
+ if (is2t) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("new OFDM_A_index=0x%x, OFDM_B_index=0x%x, CCK_index=0x%x\n",
+ OFDM_index[0], OFDM_index[1], CCK_index));
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("new OFDM_A_index=0x%x, CCK_index=0x%x\n",
+ OFDM_index[0], CCK_index));
+ }
+
+ /* 2 temporarily remove bNOPG */
+ /* Config by SwingTable */
+ if (dm_odm->RFCalibrateInfo.TxPowerTrackControl) {
+ dm_odm->RFCalibrateInfo.bDoneTxpower = true;
+
+ /* Adujst OFDM Ant_A according to IQK result */
+ ele_D = (OFDMSwingTable[(u8)OFDM_index[0]] & 0xFFC00000)>>22;
+ X = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][0];
+ Y = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][1];
+
+ /* Revse TX power table. */
+ dm_odm->BbSwingIdxOfdm = (u8)OFDM_index[0];
+ dm_odm->BbSwingIdxCck = (u8)CCK_index;
+
+ if (dm_odm->BbSwingIdxOfdmCurrent != dm_odm->BbSwingIdxOfdm) {
+ dm_odm->BbSwingIdxOfdmCurrent = dm_odm->BbSwingIdxOfdm;
+ dm_odm->BbSwingFlagOfdm = true;
+ }
+
+ if (dm_odm->BbSwingIdxCckCurrent != dm_odm->BbSwingIdxCck) {
+ dm_odm->BbSwingIdxCckCurrent = dm_odm->BbSwingIdxCck;
+ dm_odm->BbSwingFlagCck = true;
+ }
+
+ if (X != 0) {
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ ele_A = ((X * ele_D)>>8)&0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+ ele_C = ((Y * ele_D)>>8)&0x000003FF;
+
+ /* 2012/04/23 MH According to Luke's suggestion, we can not write BB digital */
+ /* to increase TX power. Otherwise, EVM will be bad. */
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("TxPwrTracking for path A: X=0x%x, Y=0x%x ele_A=0x%x ele_C=0x%x ele_D=0x%x 0xe94=0x%x 0xe9c=0x%x\n",
+ (u32)X, (u32)Y, (u32)ele_A, (u32)ele_C, (u32)ele_D, (u32)X, (u32)Y));
+
+ if (is2t) {
+ ele_D = (OFDMSwingTable[(u8)OFDM_index[1]] & 0xFFC00000)>>22;
+
+ /* new element A = element D x X */
+ X = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][4];
+ Y = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][5];
+
+ if ((X != 0) && (*(dm_odm->pBandType) == ODM_BAND_2_4G)) {
+ if ((X & 0x00000200) != 0) /* consider minus */
+ X = X | 0xFFFFFC00;
+ ele_A = ((X * ele_D)>>8)&0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+ ele_C = ((Y * ele_D)>>8)&0x00003FF;
+
+ /* wtite new elements A, C, D to regC88 and regC9C, element B is always 0 */
+ value32 = (ele_D<<22) | ((ele_C&0x3F)<<16) | ele_A;
+ ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
+
+ value32 = (ele_C&0x000003C0)>>6;
+ ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
+
+ value32 = ((X * ele_D)>>7)&0x01;
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT28, value32);
+ } else {
+ ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
+ ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT28, 0x00);
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("TxPwrTracking path B: X=0x%x, Y=0x%x ele_A=0x%x ele_C=0x%x ele_D=0x%x 0xeb4=0x%x 0xebc=0x%x\n",
+ (u32)X, (u32)Y, (u32)ele_A,
+ (u32)ele_C, (u32)ele_D, (u32)X, (u32)Y));
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
+ ODM_GetBBReg(dm_odm, 0xc80, bMaskDWord), ODM_GetBBReg(dm_odm,
+ 0xc94, bMaskDWord), ODM_GetRFReg(dm_odm, RF_PATH_A, 0x24, bRFRegOffsetMask)));
+ }
+ }
+
+ if (delta_IQK >= 8) { /* Delta temperature is equal to or larger than 20 centigrade. */
+ ODM_ResetIQKResult(dm_odm);
+
+ dm_odm->RFCalibrateInfo.ThermalValue_IQK = ThermalValue;
+ PHY_IQCalibrate_8188E(Adapter, false);
+ }
+ /* update thermal meter value */
+ if (dm_odm->RFCalibrateInfo.TxPowerTrackControl)
+ dm_odm->RFCalibrateInfo.ThermalValue = ThermalValue;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("<===dm_TXPowerTrackingCallback_ThermalMeter_8188E\n"));
+ dm_odm->RFCalibrateInfo.TXPowercount = 0;
+}
+
+/* 1 7. IQK */
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 1 /* ms */
+
+static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+phy_PathA_IQK_8188E(struct adapter *adapt, bool configPathB)
+{
+ u32 regeac, regE94, regE9C, regEA4;
+ u8 result = 0x00;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK!\n"));
+
+ /* 1 Tx IQK */
+ /* path-A IQK setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A IQK setting!\n"));
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
+ ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+
+ /* LO calibration setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E));
+ /* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
+ ODM_delay_ms(IQK_DELAY_TIME_88E);
+
+ /* Check failed */
+ regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regeac));
+ regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94));
+ regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C));
+ regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4));
+
+ if (!(regeac & BIT28) &&
+ (((regE94 & 0x03FF0000)>>16) != 0x142) &&
+ (((regE9C & 0x03FF0000)>>16) != 0x42))
+ result |= 0x01;
+ return result;
+}
+
+static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
+{
+ u32 regeac, regE94, regE9C, regEA4, u4tmp;
+ u8 result = 0x00;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK!\n"));
+
+ /* 1 Get TXIMR setting */
+ /* modify RXIQK mode table */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table!\n"));
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
+
+ /* PA,PAD off */
+ ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
+
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+
+ /* IQK setting */
+ ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+
+ /* path-A IQK setting */
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
+ ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+
+ /* LO calibration setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Delay %d ms for One shot, path A LOK & IQK.\n",
+ IQK_DELAY_TIME_88E));
+ ODM_delay_ms(IQK_DELAY_TIME_88E);
+
+ /* Check failed */
+ regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xeac = 0x%x\n", regeac));
+ regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xe94 = 0x%x\n", regE94));
+ regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xe9c = 0x%x\n", regE9C));
+
+ if (!(regeac & BIT28) &&
+ (((regE94 & 0x03FF0000)>>16) != 0x142) &&
+ (((regE9C & 0x03FF0000)>>16) != 0x42))
+ result |= 0x01;
+ else /* if Tx not OK, ignore Rx */
+ return result;
+
+ u4tmp = 0x80007C00 | (regE94&0x3FF0000) | ((regE9C&0x3FF0000) >> 16);
+ ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, u4tmp);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", ODM_GetBBReg(dm_odm, rTx_IQK, bMaskDWord), u4tmp));
+
+ /* 1 RX IQK */
+ /* modify RXIQK mode table */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table 2!\n"));
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+
+ /* IQK setting */
+ ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x01004800);
+
+ /* path-A IQK setting */
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
+ ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
+
+ /* LO calibration setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E));
+ /* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
+ ODM_delay_ms(IQK_DELAY_TIME_88E);
+
+ /* Check failed */
+ regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regeac));
+ regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94));
+ regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C));
+ regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4));
+
+ /* reload RF 0xdf */
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
+
+ if (!(regeac & BIT27) && /* if Tx is OK, check whether Rx is OK */
+ (((regEA4 & 0x03FF0000)>>16) != 0x132) &&
+ (((regeac & 0x03FF0000)>>16) != 0x36))
+ result |= 0x02;
+ else
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK fail!!\n"));
+
+ return result;
+}
+
+static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+phy_PathB_IQK_8188E(struct adapter *adapt)
+{
+ u32 regeac, regeb4, regebc, regec4, regecc;
+ u8 result = 0x00;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK!\n"));
+
+ /* One shot, path B LOK & IQK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000000);
+
+ /* delay x ms */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Delay %d ms for One shot, path B LOK & IQK.\n",
+ IQK_DELAY_TIME_88E));
+ ODM_delay_ms(IQK_DELAY_TIME_88E);
+
+ /* Check failed */
+ regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xeac = 0x%x\n", regeac));
+ regeb4 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xeb4 = 0x%x\n", regeb4));
+ regebc = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xebc = 0x%x\n", regebc));
+ regec4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xec4 = 0x%x\n", regec4));
+ regecc = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xecc = 0x%x\n", regecc));
+
+ if (!(regeac & BIT31) &&
+ (((regeb4 & 0x03FF0000)>>16) != 0x142) &&
+ (((regebc & 0x03FF0000)>>16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ if (!(regeac & BIT30) &&
+ (((regec4 & 0x03FF0000)>>16) != 0x132) &&
+ (((regecc & 0x03FF0000)>>16) != 0x36))
+ result |= 0x02;
+ else
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK fail!!\n"));
+ return result;
+}
+
+static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u8 final_candidate, bool txonly)
+{
+ u32 Oldval_0, X, TX0_A, reg;
+ s32 Y, TX0_C;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Path A IQ Calibration %s !\n",
+ (iqkok) ? "Success" : "Failed"));
+
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (iqkok) {
+ Oldval_0 = (ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+
+ X = result[final_candidate][0];
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ TX0_A = (X * Oldval_0) >> 8;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("X = 0x%x, TX0_A = 0x%x, Oldval_0 0x%x\n",
+ X, TX0_A, Oldval_0));
+ ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
+
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0>>7) & 0x1));
+
+ Y = result[final_candidate][1];
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+
+ TX0_C = (Y * Oldval_0) >> 8;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX = 0x%x\n", Y, TX0_C));
+ ODM_SetBBReg(dm_odm, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6));
+ ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C&0x3F));
+
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0>>7) & 0x1));
+
+ if (txonly) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("patha_fill_iqk only Tx OK\n"));
+ return;
+ }
+
+ reg = result[final_candidate][2];
+ ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0x3FF, reg);
+
+ reg = result[final_candidate][3] & 0x3F;
+ ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0xFC00, reg);
+
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ ODM_SetBBReg(dm_odm, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
+ }
+}
+
+static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u8 final_candidate, bool txonly)
+{
+ u32 Oldval_1, X, TX1_A, reg;
+ s32 Y, TX1_C;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Path B IQ Calibration %s !\n",
+ (iqkok) ? "Success" : "Failed"));
+
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (iqkok) {
+ Oldval_1 = (ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+
+ X = result[final_candidate][4];
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ TX1_A = (X * Oldval_1) >> 8;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX1_A = 0x%x\n", X, TX1_A));
+ ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
+
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(27), ((X * Oldval_1>>7) & 0x1));
+
+ Y = result[final_candidate][5];
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+
+ TX1_C = (Y * Oldval_1) >> 8;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX1_C = 0x%x\n", Y, TX1_C));
+ ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6));
+ ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C&0x3F));
+
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(25), ((Y * Oldval_1>>7) & 0x1));
+
+ if (txonly)
+ return;
+
+ reg = result[final_candidate][6];
+ ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
+
+ reg = result[final_candidate][7] & 0x3F;
+ ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
+
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ ODM_SetBBReg(dm_odm, rOFDM0_AGCRSSITable, 0x0000F000, reg);
+ }
+}
+
+/* */
+/* 2011/07/26 MH Add an API for testing IQK fail case. */
+/* */
+/* MP Already declare in odm.c */
+static bool ODM_CheckPowerStatus(struct adapter *Adapter)
+{
+ return true;
+}
+
+void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegisterNum)
+{
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ if (ODM_CheckPowerStatus(adapt) == false)
+ return;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save ADDA parameters.\n"));
+ for (i = 0; i < RegisterNum; i++) {
+ ADDABackup[i] = ODM_GetBBReg(dm_odm, ADDAReg[i], bMaskDWord);
+ }
+}
+
+static void _PHY_SaveMACRegisters(
+ struct adapter *adapt,
+ u32 *MACReg,
+ u32 *MACBackup
+ )
+{
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save MAC parameters.\n"));
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
+ MACBackup[i] = ODM_Read1Byte(dm_odm, MACReg[i]);
+ }
+ MACBackup[i] = ODM_Read4Byte(dm_odm, MACReg[i]);
+}
+
+static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum)
+{
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload ADDA power saving parameters !\n"));
+ for (i = 0; i < RegiesterNum; i++)
+ ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, ADDABackup[i]);
+}
+
+static void
+_PHY_ReloadMACRegisters(
+ struct adapter *adapt,
+ u32 *MACReg,
+ u32 *MACBackup
+ )
+{
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload MAC parameters !\n"));
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
+ ODM_Write1Byte(dm_odm, MACReg[i], (u8)MACBackup[i]);
+ }
+ ODM_Write4Byte(dm_odm, MACReg[i], MACBackup[i]);
+}
+
+void
+_PHY_PathADDAOn(
+ struct adapter *adapt,
+ u32 *ADDAReg,
+ bool isPathAOn,
+ bool is2t
+ )
+{
+ u32 pathOn;
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("ADDA ON.\n"));
+
+ pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
+ if (false == is2t) {
+ pathOn = 0x0bdb25a0;
+ ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
+ } else {
+ ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, pathOn);
+ }
+
+ for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+ ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, pathOn);
+}
+
+void
+_PHY_MACSettingCalibration(
+ struct adapter *adapt,
+ u32 *MACReg,
+ u32 *MACBackup
+ )
+{
+ u32 i = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("MAC settings for Calibration.\n"));
+
+ ODM_Write1Byte(dm_odm, MACReg[i], 0x3F);
+
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) {
+ ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i]&(~BIT3)));
+ }
+ ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i]&(~BIT5)));
+}
+
+void
+_PHY_PathAStandBy(
+ struct adapter *adapt
+ )
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A standby mode!\n"));
+
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x0);
+ ODM_SetBBReg(dm_odm, 0x840, bMaskDWord, 0x00010000);
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+}
+
+static void _PHY_PIModeSwitch(
+ struct adapter *adapt,
+ bool PIMode
+ )
+{
+ u32 mode;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("BB Switch to %s mode!\n", (PIMode ? "PI" : "SI")));
+
+ mode = PIMode ? 0x01000100 : 0x01000000;
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
+}
+
+static bool phy_SimularityCompare_8188E(
+ struct adapter *adapt,
+ s32 resulta[][8],
+ u8 c1,
+ u8 c2
+ )
+{
+ u32 i, j, diff, sim_bitmap, bound = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
+ bool result = true;
+ bool is2t;
+ s32 tmp1 = 0, tmp2 = 0;
+
+ if ((dm_odm->RFType == ODM_2T2R) || (dm_odm->RFType == ODM_2T3R) || (dm_odm->RFType == ODM_2T4R))
+ is2t = true;
+ else
+ is2t = false;
+
+ if (is2t)
+ bound = 8;
+ else
+ bound = 4;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("===> IQK:phy_SimularityCompare_8188E c1 %d c2 %d!!!\n", c1, c2));
+
+ sim_bitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ if ((i == 1) || (i == 3) || (i == 5) || (i == 7)) {
+ if ((resulta[c1][i] & 0x00000200) != 0)
+ tmp1 = resulta[c1][i] | 0xFFFFFC00;
+ else
+ tmp1 = resulta[c1][i];
+
+ if ((resulta[c2][i] & 0x00000200) != 0)
+ tmp2 = resulta[c2][i] | 0xFFFFFC00;
+ else
+ tmp2 = resulta[c2][i];
+ } else {
+ tmp1 = resulta[c1][i];
+ tmp2 = resulta[c2][i];
+ }
+
+ diff = (tmp1 > tmp2) ? (tmp1 - tmp2) : (tmp2 - tmp1);
+
+ if (diff > MAX_TOLERANCE) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("IQK:phy_SimularityCompare_8188E differnece overflow index %d compare1 0x%x compare2 0x%x!!!\n",
+ i, resulta[c1][i], resulta[c2][i]));
+
+ if ((i == 2 || i == 6) && !sim_bitmap) {
+ if (resulta[c1][i] + resulta[c1][i+1] == 0)
+ final_candidate[(i/4)] = c2;
+ else if (resulta[c2][i] + resulta[c2][i+1] == 0)
+ final_candidate[(i/4)] = c1;
+ else
+ sim_bitmap = sim_bitmap | (1<<i);
+ } else {
+ sim_bitmap = sim_bitmap | (1<<i);
+ }
+ }
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:phy_SimularityCompare_8188E sim_bitmap %d !!!\n", sim_bitmap));
+
+ if (sim_bitmap == 0) {
+ for (i = 0; i < (bound/4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i*4; j < (i+1)*4-2; j++)
+ resulta[3][j] = resulta[final_candidate[i]][j];
+ result = false;
+ }
+ }
+ return result;
+ } else {
+ if (!(sim_bitmap & 0x03)) { /* path A TX OK */
+ for (i = 0; i < 2; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+ if (!(sim_bitmap & 0x0c)) { /* path A RX OK */
+ for (i = 2; i < 4; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+
+ if (!(sim_bitmap & 0x30)) { /* path B TX OK */
+ for (i = 4; i < 6; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+
+ if (!(sim_bitmap & 0xc0)) { /* path B RX OK */
+ for (i = 6; i < 8; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+ return false;
+ }
+}
+
+static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t, bool is2t)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ u32 i;
+ u8 PathAOK, PathBOK;
+ u32 ADDA_REG[IQK_ADDA_REG_NUM] = {
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth,
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN };
+ u32 IQK_MAC_REG[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL,
+ REG_BCN_CTRL_1, REG_GPIO_MUXCFG};
+
+ /* since 92C & 92D have the different define in IQK_BB_REG */
+ u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
+ rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar,
+ rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB,
+ rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
+ rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD
+ };
+
+ u32 retryCount = 9;
+ if (*(dm_odm->mp_mode) == 1)
+ retryCount = 9;
+ else
+ retryCount = 2;
+ /* Note: IQ calibration must be performed after loading */
+ /* PHY_REG.txt , and radio_a, radio_b.txt */
+
+ if (t == 0) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2t ? "2T2R" : "1T1R"), t));
+
+ /* Save ADDA parameters, turn Path A ADDA on */
+ _PHY_SaveADDARegisters(adapt, ADDA_REG, dm_odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM);
+ _PHY_SaveMACRegisters(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
+ _PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2t ? "2T2R" : "1T1R"), t));
+
+ _PHY_PathADDAOn(adapt, ADDA_REG, true, is2t);
+ if (t == 0)
+ dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)ODM_GetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, BIT(8));
+
+ if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
+ /* Switch BB to PI mode to do IQ Calibration. */
+ _PHY_PIModeSwitch(adapt, true);
+ }
+
+ /* BB setting */
+ ODM_SetBBReg(dm_odm, rFPGA0_RFMOD, BIT24, 0x00);
+ ODM_SetBBReg(dm_odm, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
+ ODM_SetBBReg(dm_odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
+ ODM_SetBBReg(dm_odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
+
+ ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT10, 0x01);
+ ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT26, 0x01);
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT10, 0x00);
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT10, 0x00);
+
+ if (is2t) {
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000);
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000);
+ }
+
+ /* MAC settings */
+ _PHY_MACSettingCalibration(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
+
+ /* Page B init */
+ /* AP or IQK */
+ ODM_SetBBReg(dm_odm, rConfig_AntA, bMaskDWord, 0x0f600000);
+
+ if (is2t)
+ ODM_SetBBReg(dm_odm, rConfig_AntB, bMaskDWord, 0x0f600000);
+
+ /* IQ calibration setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK setting!\n"));
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+
+ for (i = 0; i < retryCount; i++) {
+ PathAOK = phy_PathA_IQK_8188E(adapt, is2t);
+ if (PathAOK == 0x01) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Tx IQK Success!!\n"));
+ result[t][0] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][1] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ }
+ }
+
+ for (i = 0; i < retryCount; i++) {
+ PathAOK = phy_PathA_RxIQK(adapt, is2t);
+ if (PathAOK == 0x03) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Success!!\n"));
+ result[t][2] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][3] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Fail!!\n"));
+ }
+ }
+
+ if (0x00 == PathAOK) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK failed!!\n"));
+ }
+
+ if (is2t) {
+ _PHY_PathAStandBy(adapt);
+
+ /* Turn Path B ADDA on */
+ _PHY_PathADDAOn(adapt, ADDA_REG, false, is2t);
+
+ for (i = 0; i < retryCount; i++) {
+ PathBOK = phy_PathB_IQK_8188E(adapt);
+ if (PathBOK == 0x03) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK Success!!\n"));
+ result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][6] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][7] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ } else if (i == (retryCount - 1) && PathBOK == 0x01) { /* Tx IQK OK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Only Tx IQK Success!!\n"));
+ result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ }
+ }
+
+ if (0x00 == PathBOK) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK failed!!\n"));
+ }
+ }
+
+ /* Back to BB mode, load original value */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Back to BB mode, load original value!\n"));
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0);
+
+ if (t != 0) {
+ if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
+ /* Switch back BB to SI mode after finish IQ Calibration. */
+ _PHY_PIModeSwitch(adapt, false);
+ }
+
+ /* Reload ADDA power saving parameters */
+ reload_adda_reg(adapt, ADDA_REG, dm_odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM);
+
+ /* Reload MAC parameters */
+ _PHY_ReloadMACRegisters(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
+
+ reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
+
+ /* Restore RX initial gain */
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
+ if (is2t)
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3);
+
+ /* load 0xe30 IQC default value */
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_IQCalibrate_8188E() <==\n"));
+}
+
+static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
+{
+ u8 tmpreg;
+ u32 RF_Amode = 0, RF_Bmode = 0, LC_Cal;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ /* Check continuous TX and Packet TX */
+ tmpreg = ODM_Read1Byte(dm_odm, 0xd03);
+
+ if ((tmpreg&0x70) != 0) /* Deal with contisuous TX case */
+ ODM_Write1Byte(dm_odm, 0xd03, tmpreg&0x8F); /* disable all continuous TX */
+ else /* Deal with Packet TX case */
+ ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0xFF); /* block all queues */
+
+ if ((tmpreg&0x70) != 0) {
+ /* 1. Read original RF mode */
+ /* Path-A */
+ RF_Amode = PHY_QueryRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits);
+
+ /* Path-B */
+ if (is2t)
+ RF_Bmode = PHY_QueryRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits);
+
+ /* 2. Set RF mode = standby mode */
+ /* Path-A */
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000);
+
+ /* Path-B */
+ if (is2t)
+ ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000);
+ }
+
+ /* 3. Read RF reg18 */
+ LC_Cal = PHY_QueryRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
+
+ /* 4. Set LC calibration begin bit15 */
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000);
+
+ ODM_sleep_ms(100);
+
+ /* Restore original situation */
+ if ((tmpreg&0x70) != 0) {
+ /* Deal with continuous TX case */
+ /* Path-A */
+ ODM_Write1Byte(dm_odm, 0xd03, tmpreg);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
+
+ /* Path-B */
+ if (is2t)
+ ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
+ } else {
+ /* Deal with Packet TX case */
+ ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0x00);
+ }
+}
+
+/* Analog Pre-distortion calibration */
+#define APK_BB_REG_NUM 8
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+static void phy_APCalibrate_8188E(struct adapter *adapt, s8 delta, bool is2t)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ u32 regD[PATH_NUM];
+ u32 tmpreg, index, offset, apkbound;
+ u8 path, i, pathbound = PATH_NUM;
+ u32 BB_backup[APK_BB_REG_NUM];
+ u32 BB_REG[APK_BB_REG_NUM] = {
+ rFPGA1_TxBlock, rOFDM0_TRxPathEnable,
+ rFPGA0_RFMOD, rOFDM0_TRMuxPar,
+ rFPGA0_XCD_RFInterfaceSW, rFPGA0_XAB_RFInterfaceSW,
+ rFPGA0_XA_RFInterfaceOE, rFPGA0_XB_RFInterfaceOE };
+ u32 BB_AP_MODE[APK_BB_REG_NUM] = {
+ 0x00000020, 0x00a05430, 0x02040000,
+ 0x000800e4, 0x00204000 };
+ u32 BB_normal_AP_MODE[APK_BB_REG_NUM] = {
+ 0x00000020, 0x00a05430, 0x02040000,
+ 0x000800e4, 0x22204000 };
+
+ u32 AFE_backup[IQK_ADDA_REG_NUM];
+ u32 AFE_REG[IQK_ADDA_REG_NUM] = {
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth,
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN };
+
+ u32 MAC_backup[IQK_MAC_REG_NUM];
+ u32 MAC_REG[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL,
+ REG_BCN_CTRL_1, REG_GPIO_MUXCFG};
+
+ u32 APK_RF_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
+ {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
+ };
+
+ u32 APK_normal_RF_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c}, /* path settings equal to path b settings */
+ {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
+ };
+
+ u32 APK_RF_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
+ {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
+ };
+
+ u32 APK_normal_RF_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}, /* path settings equal to path b settings */
+ {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
+ };
+
+ u32 AFE_on_off[PATH_NUM] = {
+ 0x04db25a4, 0x0b1b25a4}; /* path A on path B off / path A off path B on */
+
+ u32 APK_offset[PATH_NUM] = {
+ rConfig_AntA, rConfig_AntB};
+
+ u32 APK_normal_offset[PATH_NUM] = {
+ rConfig_Pmpd_AntA, rConfig_Pmpd_AntB};
+
+ u32 APK_value[PATH_NUM] = {
+ 0x92fc0000, 0x12fc0000};
+
+ u32 APK_normal_value[PATH_NUM] = {
+ 0x92680000, 0x12680000};
+
+ s8 APK_delta_mapping[APK_BB_REG_NUM][13] = {
+ {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
+ };
+
+ u32 APK_normal_setting_value_1[13] = {
+ 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
+ 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
+ 0x12680000, 0x00880000, 0x00880000
+ };
+
+ u32 APK_normal_setting_value_2[16] = {
+ 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
+ 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
+ 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
+ 0x00050006
+ };
+
+ u32 APK_result[PATH_NUM][APK_BB_REG_NUM]; /* val_1_1a, val_1_2a, val_2a, val_3a, val_4a */
+ s32 BB_offset, delta_V, delta_offset;
+
+ if (*(dm_odm->mp_mode) == 1) {
+ struct mpt_context *pMptCtx = &(adapt->mppriv.MptCtx);
+ pMptCtx->APK_bound[0] = 45;
+ pMptCtx->APK_bound[1] = 52;
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("==>phy_APCalibrate_8188E() delta %d\n", delta));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("AP Calibration for %s\n", (is2t ? "2T2R" : "1T1R")));
+ if (!is2t)
+ pathbound = 1;
+
+ /* 2 FOR NORMAL CHIP SETTINGS */
+
+/* Temporarily do not allow normal driver to do the following settings
+ * because these offset and value will cause RF internal PA to be
+ * unpredictably disabled by HW, such that RF Tx signal will disappear
+ * after disable/enable card many times on 88CU. RF SD and DD have not
+ * find the root cause, so we remove these actions temporarily.
+ */
+ if (*(dm_odm->mp_mode) != 1)
+ return;
+ /* settings adjust for normal chip */
+ for (index = 0; index < PATH_NUM; index++) {
+ APK_offset[index] = APK_normal_offset[index];
+ APK_value[index] = APK_normal_value[index];
+ AFE_on_off[index] = 0x6fdb25a4;
+ }
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ for (path = 0; path < pathbound; path++) {
+ APK_RF_init_value[path][index] = APK_normal_RF_init_value[path][index];
+ APK_RF_value_0[path][index] = APK_normal_RF_value_0[path][index];
+ }
+ BB_AP_MODE[index] = BB_normal_AP_MODE[index];
+ }
+
+ apkbound = 6;
+
+ /* save BB default value */
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0) /* skip */
+ continue;
+ BB_backup[index] = ODM_GetBBReg(dm_odm, BB_REG[index], bMaskDWord);
+ }
+
+ /* save MAC default value */
+ _PHY_SaveMACRegisters(adapt, MAC_REG, MAC_backup);
+
+ /* save AFE default value */
+ _PHY_SaveADDARegisters(adapt, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM);
+
+ for (path = 0; path < pathbound; path++) {
+ if (path == RF_PATH_A) {
+ /* path A APK */
+ /* load APK setting */
+ /* path-A */
+ offset = rPdp_AntA;
+ for (index = 0; index < 11; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+ offset += 0x04;
+ }
+
+ ODM_SetBBReg(dm_odm, rConfig_Pmpd_AntB, bMaskDWord, 0x12680000);
+
+ offset = rConfig_AntA;
+ for (; index < 13; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+ offset += 0x04;
+ }
+
+ /* page-B1 */
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x40000000);
+
+ /* path A */
+ offset = rPdp_AntA;
+ for (index = 0; index < 16; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_2[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+
+ offset += 0x04;
+ }
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ } else if (path == RF_PATH_B) {
+ /* path B APK */
+ /* load APK setting */
+ /* path-B */
+ offset = rPdp_AntB;
+ for (index = 0; index < 10; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+
+ offset += 0x04;
+ }
+ ODM_SetBBReg(dm_odm, rConfig_Pmpd_AntA, bMaskDWord, 0x12680000);
+ PHY_SetBBReg(adapt, rConfig_Pmpd_AntB, bMaskDWord, 0x12680000);
+
+ offset = rConfig_AntA;
+ index = 11;
+ for (; index < 13; index++) { /* offset 0xb68, 0xb6c */
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+ offset += 0x04;
+ }
+
+ /* page-B1 */
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x40000000);
+
+ /* path B */
+ offset = 0xb60;
+ for (index = 0; index < 16; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_2[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+
+ offset += 0x04;
+ }
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0);
+ }
+
+ /* save RF default value */
+ regD[path] = PHY_QueryRFReg(adapt, path, RF_TXBIAS_A, bMaskDWord);
+
+ /* Path A AFE all on, path B AFE All off or vise versa */
+ for (index = 0; index < IQK_ADDA_REG_NUM; index++)
+ ODM_SetBBReg(dm_odm, AFE_REG[index], bMaskDWord, AFE_on_off[path]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0xe70 %x\n",
+ ODM_GetBBReg(dm_odm, rRx_Wait_CCA, bMaskDWord)));
+
+ /* BB to AP mode */
+ if (path == 0) {
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0) /* skip */
+ continue;
+ else if (index < 5)
+ ODM_SetBBReg(dm_odm, BB_REG[index], bMaskDWord, BB_AP_MODE[index]);
+ else if (BB_REG[index] == 0x870)
+ ODM_SetBBReg(dm_odm, BB_REG[index], bMaskDWord, BB_backup[index]|BIT10|BIT26);
+ else
+ ODM_SetBBReg(dm_odm, BB_REG[index], BIT10, 0x0);
+ }
+
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ } else {
+ /* path B */
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_B, bMaskDWord, 0x01008c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_B, bMaskDWord, 0x01008c00);
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x800 %x\n",
+ ODM_GetBBReg(dm_odm, 0x800, bMaskDWord)));
+
+ /* MAC settings */
+ _PHY_MACSettingCalibration(adapt, MAC_REG, MAC_backup);
+
+ if (path == RF_PATH_A) {
+ /* Path B to standby mode */
+ ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMaskDWord, 0x10000);
+ } else {
+ /* Path A to standby mode */
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMaskDWord, 0x10000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE1, bMaskDWord, 0x1000f);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE2, bMaskDWord, 0x20103);
+ }
+
+ delta_offset = ((delta+14)/2);
+ if (delta_offset < 0)
+ delta_offset = 0;
+ else if (delta_offset > 12)
+ delta_offset = 12;
+
+ /* AP calibration */
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index != 1) /* only DO PA11+PAD01001, AP RF setting */
+ continue;
+
+ tmpreg = APK_RF_init_value[path][index];
+ if (!dm_odm->RFCalibrateInfo.bAPKThermalMeterIgnore) {
+ BB_offset = (tmpreg & 0xF0000) >> 16;
+
+ if (!(tmpreg & BIT15)) /* sign bit 0 */
+ BB_offset = -BB_offset;
+
+ delta_V = APK_delta_mapping[index][delta_offset];
+
+ BB_offset += delta_V;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() APK index %d tmpreg 0x%x delta_V %d delta_offset %d\n",
+ index, tmpreg, delta_V, delta_offset));
+
+ if (BB_offset < 0) {
+ tmpreg = tmpreg & (~BIT15);
+ BB_offset = -BB_offset;
+ } else {
+ tmpreg = tmpreg | BIT15;
+ }
+ tmpreg = (tmpreg & 0xFFF0FFFF) | (BB_offset << 16);
+ }
+
+ ODM_SetRFReg(dm_odm, path, RF_IPA_A, bMaskDWord, 0x8992e);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xc %x\n", PHY_QueryRFReg(adapt, path, RF_IPA_A, bMaskDWord)));
+ ODM_SetRFReg(dm_odm, path, RF_AC, bMaskDWord, APK_RF_value_0[path][index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x0 %x\n", PHY_QueryRFReg(adapt, path, RF_AC, bMaskDWord)));
+ ODM_SetRFReg(dm_odm, path, RF_TXBIAS_A, bMaskDWord, tmpreg);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xd %x\n", PHY_QueryRFReg(adapt, path, RF_TXBIAS_A, bMaskDWord)));
+ /* PA11+PAD01111, one shot */
+ i = 0;
+ do {
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80000000);
+ ODM_SetBBReg(dm_odm, APK_offset[path], bMaskDWord, APK_value[0]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", APK_offset[path], ODM_GetBBReg(dm_odm, APK_offset[path], bMaskDWord)));
+ ODM_delay_ms(3);
+ ODM_SetBBReg(dm_odm, APK_offset[path], bMaskDWord, APK_value[1]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", APK_offset[path], ODM_GetBBReg(dm_odm, APK_offset[path], bMaskDWord)));
+
+ ODM_delay_ms(20);
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+
+ if (path == RF_PATH_A)
+ tmpreg = ODM_GetBBReg(dm_odm, rAPK, 0x03E00000);
+ else
+ tmpreg = ODM_GetBBReg(dm_odm, rAPK, 0xF8000000);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xbd8[25:21] %x\n", tmpreg));
+
+ i++;
+ } while (tmpreg > apkbound && i < 4);
+
+ APK_result[path][index] = tmpreg;
+ }
+ }
+
+ /* reload MAC default value */
+ _PHY_ReloadMACRegisters(adapt, MAC_REG, MAC_backup);
+
+ /* reload BB default value */
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0) /* skip */
+ continue;
+ ODM_SetBBReg(dm_odm, BB_REG[index], bMaskDWord, BB_backup[index]);
+ }
+
+ /* reload AFE default value */
+ reload_adda_reg(adapt, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM);
+
+ /* reload RF path default value */
+ for (path = 0; path < pathbound; path++) {
+ ODM_SetRFReg(dm_odm, path, 0xd, bMaskDWord, regD[path]);
+ if (path == RF_PATH_B) {
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE1, bMaskDWord, 0x1000f);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE2, bMaskDWord, 0x20101);
+ }
+
+ /* note no index == 0 */
+ if (APK_result[path][1] > 6)
+ APK_result[path][1] = 6;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("apk path %d result %d 0x%x \t", path, 1, APK_result[path][1]));
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\n"));
+
+ for (path = 0; path < pathbound; path++) {
+ ODM_SetRFReg(dm_odm, path, 0x3, bMaskDWord,
+ ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (APK_result[path][1] << 5) | APK_result[path][1]));
+ if (path == RF_PATH_A)
+ ODM_SetRFReg(dm_odm, path, 0x4, bMaskDWord,
+ ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (0x00 << 5) | 0x05));
+ else
+ ODM_SetRFReg(dm_odm, path, 0x4, bMaskDWord,
+ ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (0x02 << 5) | 0x05));
+ ODM_SetRFReg(dm_odm, path, RF_BS_PA_APSET_G9_G11, bMaskDWord,
+ ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) | 0x08));
+ }
+
+ dm_odm->RFCalibrateInfo.bAPKdone = true;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("<==phy_APCalibrate_8188E()\n"));
+}
+
+#define DP_BB_REG_NUM 7
+#define DP_RF_REG_NUM 1
+#define DP_RETRY_LIMIT 10
+#define DP_PATH_NUM 2
+#define DP_DPK_NUM 3
+#define DP_DPK_VALUE_NUM 2
+
+void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ struct mpt_context *pMptCtx = &(adapt->mppriv.MptCtx);
+ s32 result[4][8]; /* last is final result */
+ u8 i, final_candidate, Indexforchannel;
+ bool pathaok, pathbok;
+ s32 RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC;
+ bool is12simular, is13simular, is23simular;
+ bool singletone = false, carrier_sup = false;
+ u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
+ rOFDM0_XARxIQImbalance, rOFDM0_XBRxIQImbalance,
+ rOFDM0_ECCAThreshold, rOFDM0_AGCRSSITable,
+ rOFDM0_XATxIQImbalance, rOFDM0_XBTxIQImbalance,
+ rOFDM0_XCTxAFE, rOFDM0_XDTxAFE,
+ rOFDM0_RxIQExtAnta};
+ bool is2t;
+
+ is2t = (dm_odm->RFType == ODM_2T2R) ? true : false;
+ if (ODM_CheckPowerStatus(adapt) == false)
+ return;
+
+ if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
+ return;
+
+ if (*(dm_odm->mp_mode) == 1) {
+ singletone = pMptCtx->bSingleTone;
+ carrier_sup = pMptCtx->bCarrierSuppression;
+ }
+
+ /* 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) */
+ if (singletone || carrier_sup)
+ return;
+
+ if (recovery) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("PHY_IQCalibrate_8188E: Return due to recovery!\n"));
+ reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
+ return;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Start!!!\n"));
+
+ for (i = 0; i < 8; i++) {
+ result[0][i] = 0;
+ result[1][i] = 0;
+ result[2][i] = 0;
+ if ((i == 0) || (i == 2) || (i == 4) || (i == 6))
+ result[3][i] = 0x100;
+ else
+ result[3][i] = 0;
+ }
+ final_candidate = 0xff;
+ pathaok = false;
+ pathbok = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+
+ for (i = 0; i < 3; i++) {
+ phy_IQCalibrate_8188E(adapt, result, i, is2t);
+
+ if (i == 1) {
+ is12simular = phy_SimularityCompare_8188E(adapt, result, 0, 1);
+ if (is12simular) {
+ final_candidate = 0;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is12simular final_candidate is %x\n", final_candidate));
+ break;
+ }
+ }
+
+ if (i == 2) {
+ is13simular = phy_SimularityCompare_8188E(adapt, result, 0, 2);
+ if (is13simular) {
+ final_candidate = 0;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is13simular final_candidate is %x\n", final_candidate));
+
+ break;
+ }
+ is23simular = phy_SimularityCompare_8188E(adapt, result, 1, 2);
+ if (is23simular) {
+ final_candidate = 1;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is23simular final_candidate is %x\n", final_candidate));
+ } else {
+ final_candidate = 3;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ RegE94 = result[i][0];
+ RegE9C = result[i][1];
+ RegEA4 = result[i][2];
+ RegEAC = result[i][3];
+ RegEB4 = result[i][4];
+ RegEBC = result[i][5];
+ RegEC4 = result[i][6];
+ RegECC = result[i][7];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("IQK: RegE94=%x RegE9C=%x RegEA4=%x RegEAC=%x RegEB4=%x RegEBC=%x RegEC4=%x RegECC=%x\n",
+ RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC));
+ }
+
+ if (final_candidate != 0xff) {
+ RegE94 = result[final_candidate][0];
+ RegE9C = result[final_candidate][1];
+ RegEA4 = result[final_candidate][2];
+ RegEAC = result[final_candidate][3];
+ RegEB4 = result[final_candidate][4];
+ RegEBC = result[final_candidate][5];
+ dm_odm->RFCalibrateInfo.RegE94 = RegE94;
+ dm_odm->RFCalibrateInfo.RegE9C = RegE9C;
+ dm_odm->RFCalibrateInfo.RegEB4 = RegEB4;
+ dm_odm->RFCalibrateInfo.RegEBC = RegEBC;
+ RegEC4 = result[final_candidate][6];
+ RegECC = result[final_candidate][7];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("IQK: final_candidate is %x\n", final_candidate));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("IQK: RegE94=%x RegE9C=%x RegEA4=%x RegEAC=%x RegEB4=%x RegEBC=%x RegEC4=%x RegECC=%x\n",
+ RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC));
+ pathaok = true;
+ pathbok = true;
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: FAIL use default value\n"));
+ dm_odm->RFCalibrateInfo.RegE94 = 0x100;
+ dm_odm->RFCalibrateInfo.RegEB4 = 0x100; /* X default value */
+ dm_odm->RFCalibrateInfo.RegE9C = 0x0;
+ dm_odm->RFCalibrateInfo.RegEBC = 0x0; /* Y default value */
+ }
+ if (RegE94 != 0)
+ patha_fill_iqk(adapt, pathaok, result, final_candidate, (RegEA4 == 0));
+ if (is2t) {
+ if (RegEB4 != 0)
+ pathb_fill_iqk(adapt, pathbok, result, final_candidate, (RegEC4 == 0));
+ }
+
+ Indexforchannel = ODM_GetRightChnlPlaceforIQK(pHalData->CurrentChannel);
+
+/* To Fix BSOD when final_candidate is 0xff */
+/* by sherry 20120321 */
+ if (final_candidate < 4) {
+ for (i = 0; i < IQK_Matrix_REG_NUM; i++)
+ dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][i] = result[final_candidate][i];
+ dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].bIQKDone = true;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel));
+
+ _PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK finished\n"));
+}
+
+void PHY_LCCalibrate_8188E(struct adapter *adapt)
+{
+ bool singletone = false, carrier_sup = false;
+ u32 timeout = 2000, timecount = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ struct mpt_context *pMptCtx = &(adapt->mppriv.MptCtx);
+
+ if (*(dm_odm->mp_mode) == 1) {
+ singletone = pMptCtx->bSingleTone;
+ carrier_sup = pMptCtx->bCarrierSuppression;
+ }
+ if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
+ return;
+ /* 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) */
+ if (singletone || carrier_sup)
+ return;
+
+ while (*(dm_odm->pbScanInProcess) && timecount < timeout) {
+ ODM_delay_ms(50);
+ timecount += 50;
+ }
+
+ dm_odm->RFCalibrateInfo.bLCKInProgress = true;
+
+ if (dm_odm->RFType == ODM_2T2R) {
+ phy_LCCalibrate_8188E(adapt, true);
+ } else {
+ /* For 88C 1T1R */
+ phy_LCCalibrate_8188E(adapt, false);
+ }
+
+ dm_odm->RFCalibrateInfo.bLCKInProgress = false;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("LCK:Finish!!!interface %d\n", dm_odm->InterfaceIndex));
+}
+
+void PHY_APCalibrate_8188E(struct adapter *adapt, s8 delta)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ return;
+ if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
+ return;
+
+#if FOR_BRAZIL_PRETEST != 1
+ if (dm_odm->RFCalibrateInfo.bAPKdone)
+#endif
+ return;
+
+ if (dm_odm->RFType == ODM_2T2R) {
+ phy_APCalibrate_8188E(adapt, delta, true);
+ } else {
+ /* For 88C 1T1R */
+ phy_APCalibrate_8188E(adapt, delta, false);
+ }
+}
+
+static void phy_setrfpathswitch_8188e(struct adapter *adapt, bool main, bool is2t)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ if (!adapt->hw_init_completed) {
+ u8 u1btmp;
+ u1btmp = ODM_Read1Byte(dm_odm, REG_LEDCFG2) | BIT7;
+ ODM_Write1Byte(dm_odm, REG_LEDCFG2, u1btmp);
+ ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFParameter, BIT13, 0x01);
+ }
+
+ if (is2t) { /* 92C */
+ if (main)
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x1); /* 92C_Path_A */
+ else
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x2); /* BT */
+ } else { /* 88C */
+ if (main)
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x2); /* Main */
+ else
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x1); /* Aux */
+ }
+}
+
+void PHY_SetRFPathSwitch_8188E(struct adapter *adapt, bool main)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ if (dm_odm->RFType == ODM_2T2R) {
+ phy_setrfpathswitch_8188e(adapt, main, true);
+ } else {
+ /* For 88C 1T1R */
+ phy_setrfpathswitch_8188e(adapt, main, false);
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
new file mode 100644
index 0000000..e913a22
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*++
+Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+
+Module Name:
+ HalPwrSeqCmd.c
+
+Abstract:
+ Implement HW Power sequence configuration CMD handling routine for Realtek devices.
+
+Major Change History:
+ When Who What
+ ---------- --------------- -------------------------------
+ 2011-10-26 Lucas Modify to be compatible with SD4-CE driver.
+ 2011-07-07 Roger Create.
+
+--*/
+
+#include <HalPwrSeqCmd.h>
+
+/* Description: */
+/* This routine deals with the Power Configuration CMDs parsing
+ * for RTL8723/RTL8188E Series IC.
+ * Assumption:
+ * We should follow specific format which was released from HW SD.
+ */
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
+ u8 ifacetype, struct wl_pwr_cfg pwrseqcmd[])
+{
+ struct wl_pwr_cfg pwrcfgcmd = {0};
+ u8 poll_bit = false;
+ u32 aryidx = 0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 poll_count = 0; /* polling autoload done. */
+ u32 max_poll_count = 5000;
+
+ do {
+ pwrcfgcmd = pwrseqcmd[aryidx];
+
+ RT_TRACE(_module_hal_init_c_ , _drv_info_,
+ ("HalPwrSeqCmdParsing: offset(%#x) cut_msk(%#x) fab_msk(%#x) interface_msk(%#x) base(%#x) cmd(%#x) msk(%#x) value(%#x)\n",
+ GET_PWR_CFG_OFFSET(pwrcfgcmd),
+ GET_PWR_CFG_CUT_MASK(pwrcfgcmd),
+ GET_PWR_CFG_FAB_MASK(pwrcfgcmd),
+ GET_PWR_CFG_INTF_MASK(pwrcfgcmd),
+ GET_PWR_CFG_BASE(pwrcfgcmd),
+ GET_PWR_CFG_CMD(pwrcfgcmd),
+ GET_PWR_CFG_MASK(pwrcfgcmd),
+ GET_PWR_CFG_VALUE(pwrcfgcmd)));
+
+ /* 2 Only Handle the command whose FAB, CUT, and Interface are matched */
+ if ((GET_PWR_CFG_FAB_MASK(pwrcfgcmd) & fab_vers) &&
+ (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) &&
+ (GET_PWR_CFG_INTF_MASK(pwrcfgcmd) & ifacetype)) {
+ switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_READ\n"));
+ break;
+ case PWR_CMD_WRITE:
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_WRITE\n"));
+ offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
+
+ /* Read the value from system register */
+ value = rtw_read8(padapter, offset);
+
+ value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd));
+ value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd));
+
+ /* Write the value back to sytem register */
+ rtw_write8(padapter, offset, value);
+ break;
+ case PWR_CMD_POLLING:
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_POLLING\n"));
+
+ poll_bit = false;
+ offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
+ do {
+ value = rtw_read8(padapter, offset);
+
+ value &= GET_PWR_CFG_MASK(pwrcfgcmd);
+ if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
+ poll_bit = true;
+ else
+ rtw_udelay_os(10);
+
+ if (poll_count++ > max_poll_count) {
+ DBG_88E("Fail to polling Offset[%#x]\n", offset);
+ return false;
+ }
+ } while (!poll_bit);
+ break;
+ case PWR_CMD_DELAY:
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_DELAY\n"));
+ if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
+ rtw_udelay_os(GET_PWR_CFG_OFFSET(pwrcfgcmd));
+ else
+ rtw_udelay_os(GET_PWR_CFG_OFFSET(pwrcfgcmd)*1000);
+ break;
+ case PWR_CMD_END:
+ /* When this command is parsed, end the process */
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_END\n"));
+ return true;
+ break;
+ default:
+ RT_TRACE(_module_hal_init_c_ , _drv_err_, ("HalPwrSeqCmdParsing: Unknown CMD!!\n"));
+ break;
+ }
+ }
+
+ aryidx++;/* Add Array Index */
+ } while (1);
+ return true;
+}
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
new file mode 100644
index 0000000..829b900
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -0,0 +1,381 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <hal_intf.h>
+#include <hal_com.h>
+#include <rtl8188e_hal.h>
+
+#define _HAL_INIT_C_
+
+void dump_chip_info(struct HAL_VERSION chip_vers)
+{
+ uint cnt = 0;
+ char buf[128];
+
+ if (IS_81XXC(chip_vers)) {
+ cnt += sprintf((buf+cnt), "Chip Version Info: %s_",
+ IS_92C_SERIAL(chip_vers) ?
+ "CHIP_8192C" : "CHIP_8188C");
+ } else if (IS_92D(chip_vers)) {
+ cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8192D_");
+ } else if (IS_8723_SERIES(chip_vers)) {
+ cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8723A_");
+ } else if (IS_8188E(chip_vers)) {
+ cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8188E_");
+ }
+
+ cnt += sprintf((buf+cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ?
+ "Normal_Chip" : "Test_Chip");
+ cnt += sprintf((buf+cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ?
+ "TSMC" : "UMC");
+ if (IS_A_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "A_CUT_");
+ else if (IS_B_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "B_CUT_");
+ else if (IS_C_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "C_CUT_");
+ else if (IS_D_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "D_CUT_");
+ else if (IS_E_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "E_CUT_");
+ else
+ cnt += sprintf((buf+cnt), "UNKNOWN_CUT(%d)_",
+ chip_vers.CUTVersion);
+
+ if (IS_1T1R(chip_vers))
+ cnt += sprintf((buf+cnt), "1T1R_");
+ else if (IS_1T2R(chip_vers))
+ cnt += sprintf((buf+cnt), "1T2R_");
+ else if (IS_2T2R(chip_vers))
+ cnt += sprintf((buf+cnt), "2T2R_");
+ else
+ cnt += sprintf((buf+cnt), "UNKNOWN_RFTYPE(%d)_",
+ chip_vers.RFType);
+
+ cnt += sprintf((buf+cnt), "RomVer(%d)\n", chip_vers.ROMVer);
+
+ pr_info("%s", buf);
+}
+
+#define CHAN_PLAN_HW 0x80
+
+u8 /* return the final channel plan decision */
+hal_com_get_channel_plan(struct adapter *padapter, u8 hw_channel_plan,
+ u8 sw_channel_plan, u8 def_channel_plan,
+ bool load_fail)
+{
+ u8 sw_cfg;
+ u8 chnlplan;
+
+ sw_cfg = true;
+ if (!load_fail) {
+ if (!rtw_is_channel_plan_valid(sw_channel_plan))
+ sw_cfg = false;
+ if (hw_channel_plan & CHAN_PLAN_HW)
+ sw_cfg = false;
+ }
+
+ if (sw_cfg)
+ chnlplan = sw_channel_plan;
+ else
+ chnlplan = hw_channel_plan & (~CHAN_PLAN_HW);
+
+ if (!rtw_is_channel_plan_valid(chnlplan))
+ chnlplan = def_channel_plan;
+
+ return chnlplan;
+}
+
+u8 MRateToHwRate(u8 rate)
+{
+ u8 ret = DESC_RATE1M;
+
+ switch (rate) {
+ /* CCK and OFDM non-HT rates */
+ case IEEE80211_CCK_RATE_1MB:
+ ret = DESC_RATE1M;
+ break;
+ case IEEE80211_CCK_RATE_2MB:
+ ret = DESC_RATE2M;
+ break;
+ case IEEE80211_CCK_RATE_5MB:
+ ret = DESC_RATE5_5M;
+ break;
+ case IEEE80211_CCK_RATE_11MB:
+ ret = DESC_RATE11M;
+ break;
+ case IEEE80211_OFDM_RATE_6MB:
+ ret = DESC_RATE6M;
+ break;
+ case IEEE80211_OFDM_RATE_9MB:
+ ret = DESC_RATE9M;
+ break;
+ case IEEE80211_OFDM_RATE_12MB:
+ ret = DESC_RATE12M;
+ break;
+ case IEEE80211_OFDM_RATE_18MB:
+ ret = DESC_RATE18M;
+ break;
+ case IEEE80211_OFDM_RATE_24MB:
+ ret = DESC_RATE24M;
+ break;
+ case IEEE80211_OFDM_RATE_36MB:
+ ret = DESC_RATE36M;
+ break;
+ case IEEE80211_OFDM_RATE_48MB:
+ ret = DESC_RATE48M;
+ break;
+ case IEEE80211_OFDM_RATE_54MB:
+ ret = DESC_RATE54M;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+void HalSetBrateCfg(struct adapter *adapt, u8 *brates, u16 *rate_cfg)
+{
+ u8 i, is_brate, brate;
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ is_brate = brates[i] & IEEE80211_BASIC_RATE_MASK;
+ brate = brates[i] & 0x7f;
+
+ if (is_brate) {
+ switch (brate) {
+ case IEEE80211_CCK_RATE_1MB:
+ *rate_cfg |= RATE_1M;
+ break;
+ case IEEE80211_CCK_RATE_2MB:
+ *rate_cfg |= RATE_2M;
+ break;
+ case IEEE80211_CCK_RATE_5MB:
+ *rate_cfg |= RATE_5_5M;
+ break;
+ case IEEE80211_CCK_RATE_11MB:
+ *rate_cfg |= RATE_11M;
+ break;
+ case IEEE80211_OFDM_RATE_6MB:
+ *rate_cfg |= RATE_6M;
+ break;
+ case IEEE80211_OFDM_RATE_9MB:
+ *rate_cfg |= RATE_9M;
+ break;
+ case IEEE80211_OFDM_RATE_12MB:
+ *rate_cfg |= RATE_12M;
+ break;
+ case IEEE80211_OFDM_RATE_18MB:
+ *rate_cfg |= RATE_18M;
+ break;
+ case IEEE80211_OFDM_RATE_24MB:
+ *rate_cfg |= RATE_24M;
+ break;
+ case IEEE80211_OFDM_RATE_36MB:
+ *rate_cfg |= RATE_36M;
+ break;
+ case IEEE80211_OFDM_RATE_48MB:
+ *rate_cfg |= RATE_48M;
+ break;
+ case IEEE80211_OFDM_RATE_54MB:
+ *rate_cfg |= RATE_54M;
+ break;
+ }
+ }
+ }
+}
+
+static void one_out_pipe(struct adapter *adapter)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+}
+
+static void two_out_pipe(struct adapter *adapter, bool wifi_cfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ if (wifi_cfg) { /* WMM */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 0, 1, 0, 1, 0, 0, 0, 0, 0}; */
+ /* 0:H, 1:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+
+ } else {/* typical setting */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 1, 1, 0, 0, 0, 0, 0, 0, 0}; */
+ /* 0:H, 1:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+ }
+}
+
+static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ if (wifi_cfg) {/* for WMM */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 1, 2, 1, 0, 0, 0, 0, 0, 0}; */
+ /* 0:H, 1:N, 2:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+
+ } else {/* typical setting */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 2, 2, 1, 0, 0, 0, 0, 0, 0}; */
+ /* 0:H, 1:N, 2:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[2];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+ }
+}
+
+bool Hal_MappingOutPipe(struct adapter *adapter, u8 numoutpipe)
+{
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ bool wifi_cfg = (pregistrypriv->wifi_spec) ? true : false;
+ bool result = true;
+
+ switch (numoutpipe) {
+ case 2:
+ two_out_pipe(adapter, wifi_cfg);
+ break;
+ case 3:
+ three_out_pipe(adapter, wifi_cfg);
+ break;
+ case 1:
+ one_out_pipe(adapter);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+void hal_init_macaddr(struct adapter *adapter)
+{
+ rtw_hal_set_hwreg(adapter, HW_VAR_MAC_ADDR,
+ adapter->eeprompriv.mac_addr);
+}
+
+/*
+* C2H event format:
+* Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
+* BITS [127:120] [119:16] [15:8] [7:4] [3:0]
+*/
+
+void c2h_evt_clear(struct adapter *adapter)
+{
+ rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
+}
+
+s32 c2h_evt_read(struct adapter *adapter, u8 *buf)
+{
+ s32 ret = _FAIL;
+ struct c2h_evt_hdr *c2h_evt;
+ int i;
+ u8 trigger;
+
+ if (buf == NULL)
+ goto exit;
+
+ trigger = rtw_read8(adapter, REG_C2HEVT_CLEAR);
+
+ if (trigger == C2H_EVT_HOST_CLOSE)
+ goto exit; /* Not ready */
+ else if (trigger != C2H_EVT_FW_CLOSE)
+ goto clear_evt; /* Not a valid value */
+
+ c2h_evt = (struct c2h_evt_hdr *)buf;
+
+ _rtw_memset(c2h_evt, 0, 16);
+
+ *buf = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL);
+ *(buf+1) = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_info_, "c2h_evt_read(): ",
+ &c2h_evt , sizeof(c2h_evt));
+
+ /* Read the content */
+ for (i = 0; i < c2h_evt->plen; i++)
+ c2h_evt->payload[i] = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL +
+ sizeof(*c2h_evt) + i);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_info_,
+ "c2h_evt_read(): Command Content:\n",
+ c2h_evt->payload, c2h_evt->plen);
+
+ ret = _SUCCESS;
+
+clear_evt:
+ /*
+ * Clear event to notify FW we have read the command.
+ * If this field isn't clear, the FW won't update the next
+ * command message.
+ */
+ c2h_evt_clear(adapter);
+exit:
+ return ret;
+}
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
new file mode 100644
index 0000000..5981404
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -0,0 +1,464 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#define _HAL_INTF_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <hal_intf.h>
+#include <usb_hal.h>
+
+void rtw_hal_chip_configure(struct adapter *adapt)
+{
+ if (adapt->HalFunc.intf_chip_configure)
+ adapt->HalFunc.intf_chip_configure(adapt);
+}
+
+void rtw_hal_read_chip_info(struct adapter *adapt)
+{
+ if (adapt->HalFunc.read_adapter_info)
+ adapt->HalFunc.read_adapter_info(adapt);
+}
+
+void rtw_hal_read_chip_version(struct adapter *adapt)
+{
+ if (adapt->HalFunc.read_chip_version)
+ adapt->HalFunc.read_chip_version(adapt);
+}
+
+void rtw_hal_def_value_init(struct adapter *adapt)
+{
+ if (adapt->HalFunc.init_default_value)
+ adapt->HalFunc.init_default_value(adapt);
+}
+
+void rtw_hal_free_data(struct adapter *adapt)
+{
+ if (adapt->HalFunc.free_hal_data)
+ adapt->HalFunc.free_hal_data(adapt);
+}
+
+void rtw_hal_dm_init(struct adapter *adapt)
+{
+ if (adapt->HalFunc.dm_init)
+ adapt->HalFunc.dm_init(adapt);
+}
+
+void rtw_hal_dm_deinit(struct adapter *adapt)
+{
+ /* cancel dm timer */
+ if (adapt->HalFunc.dm_deinit)
+ adapt->HalFunc.dm_deinit(adapt);
+}
+
+void rtw_hal_sw_led_init(struct adapter *adapt)
+{
+ if (adapt->HalFunc.InitSwLeds)
+ adapt->HalFunc.InitSwLeds(adapt);
+}
+
+void rtw_hal_sw_led_deinit(struct adapter *adapt)
+{
+ if (adapt->HalFunc.DeInitSwLeds)
+ adapt->HalFunc.DeInitSwLeds(adapt);
+}
+
+u32 rtw_hal_power_on(struct adapter *adapt)
+{
+ if (adapt->HalFunc.hal_power_on)
+ return adapt->HalFunc.hal_power_on(adapt);
+ return _FAIL;
+}
+
+uint rtw_hal_init(struct adapter *adapt)
+{
+ uint status = _SUCCESS;
+
+ adapt->hw_init_completed = false;
+
+ status = adapt->HalFunc.hal_init(adapt);
+
+ if (status == _SUCCESS) {
+ adapt->hw_init_completed = true;
+
+ if (adapt->registrypriv.notch_filter == 1)
+ rtw_hal_notch_filter(adapt, 1);
+
+ rtw_hal_reset_security_engine(adapt);
+ } else {
+ adapt->hw_init_completed = false;
+ DBG_88E("rtw_hal_init: hal__init fail\n");
+ }
+
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("-rtl871x_hal_init:status=0x%x\n", status));
+
+ return status;
+}
+
+uint rtw_hal_deinit(struct adapter *adapt)
+{
+ uint status = _SUCCESS;
+
+_func_enter_;
+
+ status = adapt->HalFunc.hal_deinit(adapt);
+
+ if (status == _SUCCESS)
+ adapt->hw_init_completed = false;
+ else
+ DBG_88E("\n rtw_hal_deinit: hal_init fail\n");
+
+_func_exit_;
+
+ return status;
+}
+
+void rtw_hal_set_hwreg(struct adapter *adapt, u8 variable, u8 *val)
+{
+ if (adapt->HalFunc.SetHwRegHandler)
+ adapt->HalFunc.SetHwRegHandler(adapt, variable, val);
+}
+
+void rtw_hal_get_hwreg(struct adapter *adapt, u8 variable, u8 *val)
+{
+ if (adapt->HalFunc.GetHwRegHandler)
+ adapt->HalFunc.GetHwRegHandler(adapt, variable, val);
+}
+
+u8 rtw_hal_set_def_var(struct adapter *adapt, enum hal_def_variable var,
+ void *val)
+{
+ if (adapt->HalFunc.SetHalDefVarHandler)
+ return adapt->HalFunc.SetHalDefVarHandler(adapt, var, val);
+ return _FAIL;
+}
+
+u8 rtw_hal_get_def_var(struct adapter *adapt,
+ enum hal_def_variable var, void *val)
+{
+ if (adapt->HalFunc.GetHalDefVarHandler)
+ return adapt->HalFunc.GetHalDefVarHandler(adapt, var, val);
+ return _FAIL;
+}
+
+void rtw_hal_set_odm_var(struct adapter *adapt,
+ enum hal_odm_variable var, void *val1,
+ bool set)
+{
+ if (adapt->HalFunc.SetHalODMVarHandler)
+ adapt->HalFunc.SetHalODMVarHandler(adapt, var,
+ val1, set);
+}
+
+void rtw_hal_get_odm_var(struct adapter *adapt,
+ enum hal_odm_variable var, void *val1,
+ bool set)
+{
+ if (adapt->HalFunc.GetHalODMVarHandler)
+ adapt->HalFunc.GetHalODMVarHandler(adapt, var,
+ val1, set);
+}
+
+void rtw_hal_enable_interrupt(struct adapter *adapt)
+{
+ if (adapt->HalFunc.enable_interrupt)
+ adapt->HalFunc.enable_interrupt(adapt);
+ else
+ DBG_88E("%s: HalFunc.enable_interrupt is NULL!\n", __func__);
+}
+
+void rtw_hal_disable_interrupt(struct adapter *adapt)
+{
+ if (adapt->HalFunc.disable_interrupt)
+ adapt->HalFunc.disable_interrupt(adapt);
+ else
+ DBG_88E("%s: HalFunc.disable_interrupt is NULL!\n", __func__);
+}
+
+u32 rtw_hal_inirp_init(struct adapter *adapt)
+{
+ u32 rst = _FAIL;
+
+ if (adapt->HalFunc.inirp_init)
+ rst = adapt->HalFunc.inirp_init(adapt);
+ else
+ DBG_88E(" %s HalFunc.inirp_init is NULL!!!\n", __func__);
+ return rst;
+}
+
+u32 rtw_hal_inirp_deinit(struct adapter *adapt)
+{
+ if (adapt->HalFunc.inirp_deinit)
+ return adapt->HalFunc.inirp_deinit(adapt);
+
+ return _FAIL;
+}
+
+u8 rtw_hal_intf_ps_func(struct adapter *adapt,
+ enum hal_intf_ps_func efunc_id, u8 *val)
+{
+ if (adapt->HalFunc.interface_ps_func)
+ return adapt->HalFunc.interface_ps_func(adapt, efunc_id,
+ val);
+ return _FAIL;
+}
+
+s32 rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ if (adapt->HalFunc.hal_xmit)
+ return adapt->HalFunc.hal_xmit(adapt, pxmitframe);
+
+ return false;
+}
+
+s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
+{
+ s32 ret = _FAIL;
+ if (adapt->HalFunc.mgnt_xmit)
+ ret = adapt->HalFunc.mgnt_xmit(adapt, pmgntframe);
+ return ret;
+}
+
+s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
+{
+ if (adapt->HalFunc.init_xmit_priv != NULL)
+ return adapt->HalFunc.init_xmit_priv(adapt);
+ return _FAIL;
+}
+
+void rtw_hal_free_xmit_priv(struct adapter *adapt)
+{
+ if (adapt->HalFunc.free_xmit_priv != NULL)
+ adapt->HalFunc.free_xmit_priv(adapt);
+}
+
+s32 rtw_hal_init_recv_priv(struct adapter *adapt)
+{
+ if (adapt->HalFunc.init_recv_priv)
+ return adapt->HalFunc.init_recv_priv(adapt);
+
+ return _FAIL;
+}
+
+void rtw_hal_free_recv_priv(struct adapter *adapt)
+{
+ if (adapt->HalFunc.free_recv_priv)
+ adapt->HalFunc.free_recv_priv(adapt);
+}
+
+void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
+{
+ struct mlme_priv *pmlmepriv = &(adapt->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+#ifdef CONFIG_88EU_AP_MODE
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &adapt->stapriv;
+ if ((mac_id-1) > 0)
+ psta = pstapriv->sta_aid[(mac_id-1) - 1];
+ if (psta)
+ add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/
+#endif
+ } else {
+ if (adapt->HalFunc.UpdateRAMaskHandler)
+ adapt->HalFunc.UpdateRAMaskHandler(adapt, mac_id,
+ rssi_level);
+ }
+}
+
+void rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg,
+ u8 rssi_level)
+{
+ if (adapt->HalFunc.Add_RateATid)
+ adapt->HalFunc.Add_RateATid(adapt, bitmap, arg,
+ rssi_level);
+}
+
+/* Start specifical interface thread */
+void rtw_hal_start_thread(struct adapter *adapt)
+{
+ if (adapt->HalFunc.run_thread)
+ adapt->HalFunc.run_thread(adapt);
+}
+
+/* Start specifical interface thread */
+void rtw_hal_stop_thread(struct adapter *adapt)
+{
+ if (adapt->HalFunc.cancel_thread)
+ adapt->HalFunc.cancel_thread(adapt);
+}
+
+u32 rtw_hal_read_bbreg(struct adapter *adapt, u32 regaddr, u32 bitmask)
+{
+ u32 data = 0;
+
+ if (adapt->HalFunc.read_bbreg)
+ data = adapt->HalFunc.read_bbreg(adapt, regaddr, bitmask);
+ return data;
+}
+
+void rtw_hal_write_bbreg(struct adapter *adapt, u32 regaddr, u32 bitmask,
+ u32 data)
+{
+ if (adapt->HalFunc.write_bbreg)
+ adapt->HalFunc.write_bbreg(adapt, regaddr, bitmask, data);
+}
+
+u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
+ u32 regaddr, u32 bitmask)
+{
+ u32 data = 0;
+
+ if (adapt->HalFunc.read_rfreg)
+ data = adapt->HalFunc.read_rfreg(adapt, rfpath, regaddr,
+ bitmask);
+ return data;
+}
+
+void rtw_hal_write_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ if (adapt->HalFunc.write_rfreg)
+ adapt->HalFunc.write_rfreg(adapt, rfpath, regaddr,
+ bitmask, data);
+}
+
+s32 rtw_hal_interrupt_handler(struct adapter *adapt)
+{
+ if (adapt->HalFunc.interrupt_handler)
+ return adapt->HalFunc.interrupt_handler(adapt);
+ return _FAIL;
+}
+
+void rtw_hal_set_bwmode(struct adapter *adapt,
+ enum ht_channel_width bandwidth, u8 offset)
+{
+ if (adapt->HalFunc.set_bwmode_handler)
+ adapt->HalFunc.set_bwmode_handler(adapt, bandwidth,
+ offset);
+}
+
+void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
+{
+ if (adapt->HalFunc.set_channel_handler)
+ adapt->HalFunc.set_channel_handler(adapt, channel);
+}
+
+void rtw_hal_dm_watchdog(struct adapter *adapt)
+{
+ if (adapt->HalFunc.hal_dm_watchdog)
+ adapt->HalFunc.hal_dm_watchdog(adapt);
+}
+
+void rtw_hal_bcn_related_reg_setting(struct adapter *adapt)
+{
+ if (adapt->HalFunc.SetBeaconRelatedRegistersHandler)
+ adapt->HalFunc.SetBeaconRelatedRegistersHandler(adapt);
+}
+
+u8 rtw_hal_antdiv_before_linked(struct adapter *adapt)
+{
+ if (adapt->HalFunc.AntDivBeforeLinkHandler)
+ return adapt->HalFunc.AntDivBeforeLinkHandler(adapt);
+ return false;
+}
+
+void rtw_hal_antdiv_rssi_compared(struct adapter *adapt,
+ struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src)
+{
+ if (adapt->HalFunc.AntDivCompareHandler)
+ adapt->HalFunc.AntDivCompareHandler(adapt, dst, src);
+}
+
+void rtw_hal_sreset_init(struct adapter *adapt)
+{
+ if (adapt->HalFunc.sreset_init_value)
+ adapt->HalFunc.sreset_init_value(adapt);
+}
+
+void rtw_hal_sreset_reset(struct adapter *adapt)
+{
+ if (adapt->HalFunc.silentreset)
+ adapt->HalFunc.silentreset(adapt);
+}
+
+void rtw_hal_sreset_reset_value(struct adapter *adapt)
+{
+ if (adapt->HalFunc.sreset_reset_value)
+ adapt->HalFunc.sreset_reset_value(adapt);
+}
+
+void rtw_hal_sreset_xmit_status_check(struct adapter *adapt)
+{
+ if (adapt->HalFunc.sreset_xmit_status_check)
+ adapt->HalFunc.sreset_xmit_status_check(adapt);
+}
+
+void rtw_hal_sreset_linked_status_check(struct adapter *adapt)
+{
+ if (adapt->HalFunc.sreset_linked_status_check)
+ adapt->HalFunc.sreset_linked_status_check(adapt);
+}
+
+u8 rtw_hal_sreset_get_wifi_status(struct adapter *adapt)
+{
+ u8 status = 0;
+
+ if (adapt->HalFunc.sreset_get_wifi_status)
+ status = adapt->HalFunc.sreset_get_wifi_status(adapt);
+ return status;
+}
+
+int rtw_hal_iol_cmd(struct adapter *adapter, struct xmit_frame *xmit_frame,
+ u32 max_wating_ms, u32 bndy_cnt)
+{
+ if (adapter->HalFunc.IOL_exec_cmds_sync)
+ return adapter->HalFunc.IOL_exec_cmds_sync(adapter, xmit_frame,
+ max_wating_ms,
+ bndy_cnt);
+ return _FAIL;
+}
+
+void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
+{
+ if (adapter->HalFunc.hal_notch_filter)
+ adapter->HalFunc.hal_notch_filter(adapter, enable);
+}
+
+void rtw_hal_reset_security_engine(struct adapter *adapter)
+{
+ if (adapter->HalFunc.hal_reset_security_engine)
+ adapter->HalFunc.hal_reset_security_engine(adapter);
+}
+
+s32 rtw_hal_c2h_handler(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt)
+{
+ s32 ret = _FAIL;
+
+ if (adapter->HalFunc.c2h_handler)
+ ret = adapter->HalFunc.c2h_handler(adapter, c2h_evt);
+ return ret;
+}
+
+c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter)
+{
+ return adapter->HalFunc.c2h_id_filter_ccx;
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
new file mode 100644
index 0000000..285475f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -0,0 +1,2171 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+/* include files */
+
+#include "odm_precomp.h"
+
+static const u16 dB_Invert_Table[8][12] = {
+ {1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4},
+ {4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16},
+ {18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63},
+ {71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251},
+ {282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000},
+ {1122, 1259, 1413, 1585, 1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
+ {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000, 11220, 12589, 14125, 15849},
+ {17783, 19953, 22387, 25119, 28184, 31623, 35481, 39811, 44668, 50119, 56234, 65535}
+};
+
+/* avoid to warn in FreeBSD ==> To DO modify */
+static u32 EDCAParam[HT_IOT_PEER_MAX][3] = {
+ /* UL DL */
+ {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 0:unknown AP */
+ {0xa44f, 0x5ea44f, 0x5e431c}, /* 1:realtek AP */
+ {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 2:unknown AP => realtek_92SE */
+ {0x5ea32b, 0x5ea42b, 0x5e4322}, /* 3:broadcom AP */
+ {0x5ea422, 0x00a44f, 0x00a44f}, /* 4:ralink AP */
+ {0x5ea322, 0x00a630, 0x00a44f}, /* 5:atheros AP */
+ {0x5e4322, 0x5e4322, 0x5e4322},/* 6:cisco AP */
+ {0x5ea44f, 0x00a44f, 0x5ea42b}, /* 8:marvell AP */
+ {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 10:unknown AP=> 92U AP */
+ {0x5ea42b, 0xa630, 0x5e431c}, /* 11:airgocap AP */
+};
+
+/* Global var */
+u32 OFDMSwingTable[OFDM_TABLE_SIZE_92D] = {
+ 0x7f8001fe, /* 0, +6.0dB */
+ 0x788001e2, /* 1, +5.5dB */
+ 0x71c001c7, /* 2, +5.0dB */
+ 0x6b8001ae, /* 3, +4.5dB */
+ 0x65400195, /* 4, +4.0dB */
+ 0x5fc0017f, /* 5, +3.5dB */
+ 0x5a400169, /* 6, +3.0dB */
+ 0x55400155, /* 7, +2.5dB */
+ 0x50800142, /* 8, +2.0dB */
+ 0x4c000130, /* 9, +1.5dB */
+ 0x47c0011f, /* 10, +1.0dB */
+ 0x43c0010f, /* 11, +0.5dB */
+ 0x40000100, /* 12, +0dB */
+ 0x3c8000f2, /* 13, -0.5dB */
+ 0x390000e4, /* 14, -1.0dB */
+ 0x35c000d7, /* 15, -1.5dB */
+ 0x32c000cb, /* 16, -2.0dB */
+ 0x300000c0, /* 17, -2.5dB */
+ 0x2d4000b5, /* 18, -3.0dB */
+ 0x2ac000ab, /* 19, -3.5dB */
+ 0x288000a2, /* 20, -4.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x24000090, /* 22, -5.0dB */
+ 0x22000088, /* 23, -5.5dB */
+ 0x20000080, /* 24, -6.0dB */
+ 0x1e400079, /* 25, -6.5dB */
+ 0x1c800072, /* 26, -7.0dB */
+ 0x1b00006c, /* 27. -7.5dB */
+ 0x19800066, /* 28, -8.0dB */
+ 0x18000060, /* 29, -8.5dB */
+ 0x16c0005b, /* 30, -9.0dB */
+ 0x15800056, /* 31, -9.5dB */
+ 0x14400051, /* 32, -10.0dB */
+ 0x1300004c, /* 33, -10.5dB */
+ 0x12000048, /* 34, -11.0dB */
+ 0x11000044, /* 35, -11.5dB */
+ 0x10000040, /* 36, -12.0dB */
+ 0x0f00003c,/* 37, -12.5dB */
+ 0x0e400039,/* 38, -13.0dB */
+ 0x0d800036,/* 39, -13.5dB */
+ 0x0cc00033,/* 40, -14.0dB */
+ 0x0c000030,/* 41, -14.5dB */
+ 0x0b40002d,/* 42, -15.0dB */
+};
+
+u8 CCKSwingTable_Ch1_Ch13[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
+};
+
+u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
+};
+
+
+#define RxDefaultAnt1 0x65a9
+#define RxDefaultAnt2 0x569a
+
+/* 3 Export Interface */
+
+/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
+void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2012.05.03 Luke: For all IC series */
+ odm_CommonInfoSelfInit(pDM_Odm);
+ odm_CmnInfoInit_Debug(pDM_Odm);
+ odm_DIGInit(pDM_Odm);
+ odm_RateAdaptiveMaskInit(pDM_Odm);
+
+ if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
+ ;
+ } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
+ odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
+ odm_DynamicBBPowerSavingInit(pDM_Odm);
+ odm_DynamicTxPowerInit(pDM_Odm);
+ odm_TXPowerTrackingInit(pDM_Odm);
+ ODM_EdcaTurboInit(pDM_Odm);
+ ODM_RAInfo_Init_all(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_InitHybridAntDiv(pDM_Odm);
+ else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
+ odm_SwAntDivInit(pDM_Odm);
+ }
+}
+
+/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
+/* You can not add any dummy function here, be care, you can only use DM structure */
+/* to perform any new ODM_DM. */
+void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2012.05.03 Luke: For all IC series */
+ odm_GlobalAdapterCheck();
+ odm_CmnInfoHook_Debug(pDM_Odm);
+ odm_CmnInfoUpdate_Debug(pDM_Odm);
+ odm_CommonInfoSelfUpdate(pDM_Odm);
+ odm_FalseAlarmCounterStatistics(pDM_Odm);
+ odm_RSSIMonitorCheck(pDM_Odm);
+
+ /* For CE Platform(SPRD or Tablet) */
+ /* 8723A or 8189ES platform */
+ /* NeilChen--2012--08--24-- */
+ /* Fix Leave LPS issue */
+ if ((pDM_Odm->Adapter->pwrctrlpriv.pwr_mode != PS_MODE_ACTIVE) &&/* in LPS mode */
+ ((pDM_Odm->SupportICType & (ODM_RTL8723A)) ||
+ (pDM_Odm->SupportICType & (ODM_RTL8188E) &&
+ ((pDM_Odm->SupportInterface == ODM_ITRF_SDIO))))) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("----Step1: odm_DIG is in LPS mode\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Step2: 8723AS is in LPS mode\n"));
+ odm_DIGbyRSSI_LPS(pDM_Odm);
+ } else {
+ odm_DIG(pDM_Odm);
+ }
+ odm_CCKPacketDetectionThresh(pDM_Odm);
+
+ if (*(pDM_Odm->pbPowerSaving))
+ return;
+
+ odm_RefreshRateAdaptiveMask(pDM_Odm);
+
+ odm_DynamicBBPowerSaving(pDM_Odm);
+ odm_DynamicPrimaryCCA(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_HwAntDiv(pDM_Odm);
+ else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
+ odm_SwAntDivChkAntSwitch(pDM_Odm, SWAW_STEP_PEAK);
+
+ if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
+ ;
+ } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
+ ODM_TXPowerTrackingCheck(pDM_Odm);
+ odm_EdcaTurboCheck(pDM_Odm);
+ odm_DynamicTxPower(pDM_Odm);
+ }
+ odm_dtc(pDM_Odm);
+}
+
+/* Init /.. Fixed HW value. Only init time. */
+void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u32 Value)
+{
+ /* This section is used for init value */
+ switch (CmnInfo) {
+ /* Fixed ODM value. */
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+ case ODM_CMNINFO_PLATFORM:
+ pDM_Odm->SupportPlatform = (u8)Value;
+ break;
+ case ODM_CMNINFO_INTERFACE:
+ pDM_Odm->SupportInterface = (u8)Value;
+ break;
+ case ODM_CMNINFO_MP_TEST_CHIP:
+ pDM_Odm->bIsMPChip = (u8)Value;
+ break;
+ case ODM_CMNINFO_IC_TYPE:
+ pDM_Odm->SupportICType = Value;
+ break;
+ case ODM_CMNINFO_CUT_VER:
+ pDM_Odm->CutVersion = (u8)Value;
+ break;
+ case ODM_CMNINFO_FAB_VER:
+ pDM_Odm->FabVersion = (u8)Value;
+ break;
+ case ODM_CMNINFO_RF_TYPE:
+ pDM_Odm->RFType = (u8)Value;
+ break;
+ case ODM_CMNINFO_RF_ANTENNA_TYPE:
+ pDM_Odm->AntDivType = (u8)Value;
+ break;
+ case ODM_CMNINFO_BOARD_TYPE:
+ pDM_Odm->BoardType = (u8)Value;
+ break;
+ case ODM_CMNINFO_EXT_LNA:
+ pDM_Odm->ExtLNA = (u8)Value;
+ break;
+ case ODM_CMNINFO_EXT_PA:
+ pDM_Odm->ExtPA = (u8)Value;
+ break;
+ case ODM_CMNINFO_EXT_TRSW:
+ pDM_Odm->ExtTRSW = (u8)Value;
+ break;
+ case ODM_CMNINFO_PATCH_ID:
+ pDM_Odm->PatchID = (u8)Value;
+ break;
+ case ODM_CMNINFO_BINHCT_TEST:
+ pDM_Odm->bInHctTest = (bool)Value;
+ break;
+ case ODM_CMNINFO_BWIFI_TEST:
+ pDM_Odm->bWIFITest = (bool)Value;
+ break;
+ case ODM_CMNINFO_SMART_CONCURRENT:
+ pDM_Odm->bDualMacSmartConcurrent = (bool)Value;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+
+ /* Tx power tracking BB swing table. */
+ /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ pDM_Odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
+ pDM_Odm->BbSwingIdxOfdmCurrent = 12;
+ pDM_Odm->BbSwingFlagOfdm = false;
+}
+
+void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, void *pValue)
+{
+ /* */
+ /* Hook call by reference pointer. */
+ /* */
+ switch (CmnInfo) {
+ /* Dynamic call by reference pointer. */
+ case ODM_CMNINFO_MAC_PHY_MODE:
+ pDM_Odm->pMacPhyMode = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_TX_UNI:
+ pDM_Odm->pNumTxBytesUnicast = (u64 *)pValue;
+ break;
+ case ODM_CMNINFO_RX_UNI:
+ pDM_Odm->pNumRxBytesUnicast = (u64 *)pValue;
+ break;
+ case ODM_CMNINFO_WM_MODE:
+ pDM_Odm->pWirelessMode = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_BAND:
+ pDM_Odm->pBandType = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SEC_CHNL_OFFSET:
+ pDM_Odm->pSecChOffset = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SEC_MODE:
+ pDM_Odm->pSecurity = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_BW:
+ pDM_Odm->pBandWidth = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_CHNL:
+ pDM_Odm->pChannel = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_DMSP_GET_VALUE:
+ pDM_Odm->pbGetValueFromOtherMac = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_BUDDY_ADAPTOR:
+ pDM_Odm->pBuddyAdapter = (struct adapter **)pValue;
+ break;
+ case ODM_CMNINFO_DMSP_IS_MASTER:
+ pDM_Odm->pbMasterOfDMSP = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_SCAN:
+ pDM_Odm->pbScanInProcess = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_POWER_SAVING:
+ pDM_Odm->pbPowerSaving = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_ONE_PATH_CCA:
+ pDM_Odm->pOnePathCCA = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_DRV_STOP:
+ pDM_Odm->pbDriverStopped = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_PNP_IN:
+ pDM_Odm->pbDriverIsGoingToPnpSetPowerSleep = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_INIT_ON:
+ pDM_Odm->pinit_adpt_in_progress = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_ANT_TEST:
+ pDM_Odm->pAntennaTest = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_NET_CLOSED:
+ pDM_Odm->pbNet_closed = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_MP_MODE:
+ pDM_Odm->mp_mode = (u8 *)pValue;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u16 Index, void *pValue)
+{
+ /* Hook call by reference pointer. */
+ switch (CmnInfo) {
+ /* Dynamic call by reference pointer. */
+ case ODM_CMNINFO_STA_STATUS:
+ pDM_Odm->pODM_StaInfo[Index] = (struct sta_info *)pValue;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
+void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
+{
+ /* */
+ /* This init variable may be changed in run time. */
+ /* */
+ switch (CmnInfo) {
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+ case ODM_CMNINFO_RF_TYPE:
+ pDM_Odm->RFType = (u8)Value;
+ break;
+ case ODM_CMNINFO_WIFI_DIRECT:
+ pDM_Odm->bWIFI_Direct = (bool)Value;
+ break;
+ case ODM_CMNINFO_WIFI_DISPLAY:
+ pDM_Odm->bWIFI_Display = (bool)Value;
+ break;
+ case ODM_CMNINFO_LINK:
+ pDM_Odm->bLinked = (bool)Value;
+ break;
+ case ODM_CMNINFO_RSSI_MIN:
+ pDM_Odm->RSSI_Min = (u8)Value;
+ break;
+ case ODM_CMNINFO_DBG_COMP:
+ pDM_Odm->DebugComponents = Value;
+ break;
+ case ODM_CMNINFO_DBG_LEVEL:
+ pDM_Odm->DebugLevel = (u32)Value;
+ break;
+ case ODM_CMNINFO_RA_THRESHOLD_HIGH:
+ pDM_Odm->RateAdaptive.HighRSSIThresh = (u8)Value;
+ break;
+ case ODM_CMNINFO_RA_THRESHOLD_LOW:
+ pDM_Odm->RateAdaptive.LowRSSIThresh = (u8)Value;
+ break;
+ }
+}
+
+void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
+{
+ pDM_Odm->bCckHighPower = (bool) ODM_GetBBReg(pDM_Odm, 0x824, BIT9);
+ pDM_Odm->RFPathRxEnable = (u8) ODM_GetBBReg(pDM_Odm, 0xc04, 0x0F);
+ if (pDM_Odm->SupportICType & (ODM_RTL8192C|ODM_RTL8192D))
+ pDM_Odm->AntDivType = CG_TRX_HW_ANTDIV;
+ if (pDM_Odm->SupportICType & (ODM_RTL8723A))
+ pDM_Odm->AntDivType = CGCS_RX_SW_ANTDIV;
+
+ ODM_InitDebugSetting(pDM_Odm);
+}
+
+void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
+{
+ u8 EntryCnt = 0;
+ u8 i;
+ struct sta_info *pEntry;
+
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M) {
+ if (*(pDM_Odm->pSecChOffset) == 1)
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) - 2;
+ else if (*(pDM_Odm->pSecChOffset) == 2)
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) + 2;
+ } else {
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel);
+ }
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ pEntry = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pEntry))
+ EntryCnt++;
+ }
+ if (EntryCnt == 1)
+ pDM_Odm->bOneEntryOnly = true;
+ else
+ pDM_Odm->bOneEntryOnly = false;
+}
+
+void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoInit_Debug==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportPlatform=%d\n", pDM_Odm->SupportPlatform));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportAbility=0x%x\n", pDM_Odm->SupportAbility));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface=%d\n", pDM_Odm->SupportInterface));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType=0x%x\n", pDM_Odm->SupportICType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion=%d\n", pDM_Odm->CutVersion));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("FabVersion=%d\n", pDM_Odm->FabVersion));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RFType=%d\n", pDM_Odm->RFType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType=%d\n", pDM_Odm->BoardType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA=%d\n", pDM_Odm->ExtLNA));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtPA=%d\n", pDM_Odm->ExtPA));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtTRSW=%d\n", pDM_Odm->ExtTRSW));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("PatchID=%d\n", pDM_Odm->PatchID));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bInHctTest=%d\n", pDM_Odm->bInHctTest));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFITest=%d\n", pDM_Odm->bWIFITest));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bDualMacSmartConcurrent=%d\n", pDM_Odm->bDualMacSmartConcurrent));
+}
+
+void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoHook_Debug==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumTxBytesUnicast=%llu\n", *(pDM_Odm->pNumTxBytesUnicast)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumRxBytesUnicast=%llu\n", *(pDM_Odm->pNumRxBytesUnicast)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pWirelessMode=0x%x\n", *(pDM_Odm->pWirelessMode)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecChOffset=%d\n", *(pDM_Odm->pSecChOffset)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecurity=%d\n", *(pDM_Odm->pSecurity)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pBandWidth=%d\n", *(pDM_Odm->pBandWidth)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pChannel=%d\n", *(pDM_Odm->pChannel)));
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess=%d\n", *(pDM_Odm->pbScanInProcess)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving=%d\n", *(pDM_Odm->pbPowerSaving)));
+
+ if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL))
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pOnePathCCA=%d\n", *(pDM_Odm->pOnePathCCA)));
+}
+
+void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoUpdate_Debug==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Direct=%d\n", pDM_Odm->bWIFI_Direct));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Display=%d\n", pDM_Odm->bWIFI_Display));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bLinked=%d\n", pDM_Odm->bLinked));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RSSI_Min=%d\n", pDM_Odm->RSSI_Min));
+}
+
+static int getIGIForDiff(int value_IGI)
+{
+ #define ONERCCA_LOW_TH 0x30
+ #define ONERCCA_LOW_DIFF 8
+
+ if (value_IGI < ONERCCA_LOW_TH) {
+ if ((ONERCCA_LOW_TH - value_IGI) < ONERCCA_LOW_DIFF)
+ return ONERCCA_LOW_TH;
+ else
+ return value_IGI + ONERCCA_LOW_DIFF;
+ } else {
+ return value_IGI;
+ }
+}
+
+void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("ODM_REG(IGI_A,pDM_Odm)=0x%x, ODM_BIT(IGI,pDM_Odm)=0x%x\n",
+ ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm)));
+
+ if (pDM_DigTable->CurIGValue != CurrentIGI) {
+ if (pDM_Odm->SupportPlatform & (ODM_CE|ODM_MP)) {
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ if (pDM_Odm->SupportICType != ODM_RTL8188E)
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ } else if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
+ switch (*(pDM_Odm->pOnePathCCA)) {
+ case ODM_CCA_2R:
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ if (pDM_Odm->SupportICType != ODM_RTL8188E)
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ break;
+ case ODM_CCA_1R_A:
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ if (pDM_Odm->SupportICType != ODM_RTL8188E)
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
+ break;
+ case ODM_CCA_1R_B:
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
+ if (pDM_Odm->SupportICType != ODM_RTL8188E)
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ break;
+ }
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("CurrentIGI(0x%02x).\n", CurrentIGI));
+ /* pDM_DigTable->PreIGValue = pDM_DigTable->CurIGValue; */
+ pDM_DigTable->CurIGValue = CurrentIGI;
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("ODM_Write_DIG():CurrentIGI=0x%x\n", CurrentIGI));
+
+/* Add by Neil Chen to enable edcca to MP Platform */
+}
+
+/* Need LPS mode for CE platform --2012--08--24--- */
+/* 8723AS/8189ES */
+void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *pAdapter = pDM_Odm->Adapter;
+ struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+
+ u8 RSSI_Lower = DM_DIG_MIN_NIC; /* 0x1E or 0x1C */
+ u8 bFwCurrentInPSMode = false;
+ u8 CurrentIGI = pDM_Odm->RSSI_Min;
+
+ if (!(pDM_Odm->SupportICType & (ODM_RTL8723A | ODM_RTL8188E)))
+ return;
+
+ CurrentIGI = CurrentIGI + RSSI_OFFSET_DIG;
+ bFwCurrentInPSMode = pAdapter->pwrctrlpriv.bFwCurrentInPSMode;
+
+ /* Using FW PS mode to make IGI */
+ if (bFwCurrentInPSMode) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Neil---odm_DIG is in LPS mode\n"));
+ /* Adjust by FA in LPS MODE */
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_LPS)
+ CurrentIGI = CurrentIGI+2;
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_LPS)
+ CurrentIGI = CurrentIGI+1;
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_LPS)
+ CurrentIGI = CurrentIGI-1;
+ } else {
+ CurrentIGI = RSSI_Lower;
+ }
+
+ /* Lower bound checking */
+
+ /* RSSI Lower bound check */
+ if ((pDM_Odm->RSSI_Min-10) > DM_DIG_MIN_NIC)
+ RSSI_Lower = (pDM_Odm->RSSI_Min-10);
+ else
+ RSSI_Lower = DM_DIG_MIN_NIC;
+
+ /* Upper and Lower Bound checking */
+ if (CurrentIGI > DM_DIG_MAX_NIC)
+ CurrentIGI = DM_DIG_MAX_NIC;
+ else if (CurrentIGI < RSSI_Lower)
+ CurrentIGI = RSSI_Lower;
+
+ ODM_Write_DIG(pDM_Odm, CurrentIGI);/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
+}
+
+void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ pDM_DigTable->CurIGValue = (u8) ODM_GetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm));
+ pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
+ pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
+ pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
+ pDM_DigTable->FAHighThresh = DM_false_ALARM_THRESH_HIGH;
+ if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
+ } else {
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
+ }
+ pDM_DigTable->BackoffVal = DM_DIG_BACKOFF_DEFAULT;
+ pDM_DigTable->BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
+ pDM_DigTable->BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
+ pDM_DigTable->PreCCK_CCAThres = 0xFF;
+ pDM_DigTable->CurCCK_CCAThres = 0x83;
+ pDM_DigTable->ForbiddenIGI = DM_DIG_MIN_NIC;
+ pDM_DigTable->LargeFAHit = 0;
+ pDM_DigTable->Recover_cnt = 0;
+ pDM_DigTable->DIG_Dynamic_MIN_0 = DM_DIG_MIN_NIC;
+ pDM_DigTable->DIG_Dynamic_MIN_1 = DM_DIG_MIN_NIC;
+ pDM_DigTable->bMediaConnect_0 = false;
+ pDM_DigTable->bMediaConnect_1 = false;
+
+ /* To Initialize pDM_Odm->bDMInitialGainEnable == false to avoid DIG error */
+ pDM_Odm->bDMInitialGainEnable = true;
+}
+
+void odm_DIG(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+ u8 DIG_Dynamic_MIN;
+ u8 DIG_MaxOfMin;
+ bool FirstConnect, FirstDisConnect;
+ u8 dm_dig_max, dm_dig_min;
+ u8 CurrentIGI = pDM_DigTable->CurIGValue;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG()==>\n"));
+ if ((!(pDM_Odm->SupportAbility&ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility&ODM_BB_FA_CNT))) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n"));
+ return;
+ }
+
+ if (*(pDM_Odm->pbScanInProcess)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: In Scan Progress\n"));
+ return;
+ }
+
+ /* add by Neil Chen to avoid PSD is processing */
+ if (pDM_Odm->bDMInitialGainEnable == false) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: PSD is Processing\n"));
+ return;
+ }
+
+ if (pDM_Odm->SupportICType == ODM_RTL8192D) {
+ if (*(pDM_Odm->pMacPhyMode) == ODM_DMSP) {
+ if (*(pDM_Odm->pbMasterOfDMSP)) {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
+ } else {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
+ }
+ } else {
+ if (*(pDM_Odm->pBandType) == ODM_BAND_5G) {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
+ } else {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
+ }
+ }
+ } else {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
+ }
+
+ /* 1 Boundary Decision */
+ if ((pDM_Odm->SupportICType & (ODM_RTL8192C|ODM_RTL8723A)) &&
+ ((pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) || pDM_Odm->ExtLNA)) {
+ if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
+ dm_dig_max = DM_DIG_MAX_AP_HP;
+ dm_dig_min = DM_DIG_MIN_AP_HP;
+ } else {
+ dm_dig_max = DM_DIG_MAX_NIC_HP;
+ dm_dig_min = DM_DIG_MIN_NIC_HP;
+ }
+ DIG_MaxOfMin = DM_DIG_MAX_AP_HP;
+ } else {
+ if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
+ dm_dig_max = DM_DIG_MAX_AP;
+ dm_dig_min = DM_DIG_MIN_AP;
+ DIG_MaxOfMin = dm_dig_max;
+ } else {
+ dm_dig_max = DM_DIG_MAX_NIC;
+ dm_dig_min = DM_DIG_MIN_NIC;
+ DIG_MaxOfMin = DM_DIG_MAX_AP;
+ }
+ }
+ if (pDM_Odm->bLinked) {
+ /* 2 8723A Series, offset need to be 10 */
+ if (pDM_Odm->SupportICType == (ODM_RTL8723A)) {
+ /* 2 Upper Bound */
+ if ((pDM_Odm->RSSI_Min + 10) > DM_DIG_MAX_NIC)
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ else if ((pDM_Odm->RSSI_Min + 10) < DM_DIG_MIN_NIC)
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MIN_NIC;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 10;
+ /* 2 If BT is Concurrent, need to set Lower Bound */
+ DIG_Dynamic_MIN = DM_DIG_MIN_NIC;
+ } else {
+ /* 2 Modify DIG upper bound */
+ if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
+ pDM_DigTable->rx_gain_range_max = dm_dig_min;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
+ /* 2 Modify DIG lower bound */
+ if (pDM_Odm->bOneEntryOnly) {
+ if (pDM_Odm->RSSI_Min < dm_dig_min)
+ DIG_Dynamic_MIN = dm_dig_min;
+ else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
+ DIG_Dynamic_MIN = DIG_MaxOfMin;
+ else
+ DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() : bOneEntryOnly=true, DIG_Dynamic_MIN=0x%x\n",
+ DIG_Dynamic_MIN));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() : pDM_Odm->RSSI_Min=%d\n",
+ pDM_Odm->RSSI_Min));
+ } else if ((pDM_Odm->SupportICType == ODM_RTL8188E) &&
+ (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
+ /* 1 Lower Bound for 88E AntDiv */
+ if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("odm_DIG(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
+ pDM_DigTable->AntDiv_RSSI_max));
+ }
+ } else {
+ DIG_Dynamic_MIN = dm_dig_min;
+ }
+ }
+ } else {
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ DIG_Dynamic_MIN = dm_dig_min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() : No Link\n"));
+ }
+
+ /* 1 Modify DIG lower bound, deal with abnormally large false alarm */
+ if (pFalseAlmCnt->Cnt_all > 10000) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("dm_DIG(): Abnornally false alarm case.\n"));
+
+ if (pDM_DigTable->LargeFAHit != 3)
+ pDM_DigTable->LargeFAHit++;
+ if (pDM_DigTable->ForbiddenIGI < CurrentIGI) {
+ pDM_DigTable->ForbiddenIGI = CurrentIGI;
+ pDM_DigTable->LargeFAHit = 1;
+ }
+
+ if (pDM_DigTable->LargeFAHit >= 3) {
+ if ((pDM_DigTable->ForbiddenIGI+1) > pDM_DigTable->rx_gain_range_max)
+ pDM_DigTable->rx_gain_range_min = pDM_DigTable->rx_gain_range_max;
+ else
+ pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
+ pDM_DigTable->Recover_cnt = 3600; /* 3600=2hr */
+ }
+
+ } else {
+ /* Recovery mechanism for IGI lower bound */
+ if (pDM_DigTable->Recover_cnt != 0) {
+ pDM_DigTable->Recover_cnt--;
+ } else {
+ if (pDM_DigTable->LargeFAHit < 3) {
+ if ((pDM_DigTable->ForbiddenIGI-1) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */
+ pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
+ pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: At Lower Bound\n"));
+ } else {
+ pDM_DigTable->ForbiddenIGI--;
+ pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: Approach Lower Bound\n"));
+ }
+ } else {
+ pDM_DigTable->LargeFAHit = 0;
+ }
+ }
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG(): pDM_DigTable->LargeFAHit=%d\n",
+ pDM_DigTable->LargeFAHit));
+
+ /* 1 Adjust initial gain by false alarm */
+ if (pDM_Odm->bLinked) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG AfterLink\n"));
+ if (FirstConnect) {
+ CurrentIGI = pDM_Odm->RSSI_Min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("DIG: First Connect\n"));
+ } else {
+ if (pDM_Odm->SupportICType == ODM_RTL8192D) {
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_92D)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_92D)
+ CurrentIGI = CurrentIGI + 1; /* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_92D)
+ CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
+ } else {
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
+ CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
+ CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
+ }
+ }
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG BeforeLink\n"));
+ if (FirstDisConnect) {
+ CurrentIGI = pDM_DigTable->rx_gain_range_min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): First DisConnect\n"));
+ } else {
+ /* 2012.03.30 LukeLee: enable DIG before link but with very high thresholds */
+ if (pFalseAlmCnt->Cnt_all > 10000)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > 8000)
+ CurrentIGI = CurrentIGI + 1;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < 500)
+ CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): England DIG\n"));
+ }
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG End Adjust IGI\n"));
+ /* 1 Check initial gain by upper/lower bound */
+ if (CurrentIGI > pDM_DigTable->rx_gain_range_max)
+ CurrentIGI = pDM_DigTable->rx_gain_range_max;
+ if (CurrentIGI < pDM_DigTable->rx_gain_range_min)
+ CurrentIGI = pDM_DigTable->rx_gain_range_min;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG(): rx_gain_range_max=0x%x, rx_gain_range_min=0x%x\n",
+ pDM_DigTable->rx_gain_range_max, pDM_DigTable->rx_gain_range_min));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): TotalFA=%d\n", pFalseAlmCnt->Cnt_all));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): CurIGValue=0x%x\n", CurrentIGI));
+
+ /* 2 High power RSSI threshold */
+
+ ODM_Write_DIG(pDM_Odm, CurrentIGI);/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
+ pDM_DigTable->bMediaConnect_0 = pDM_Odm->bLinked;
+ pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
+}
+
+/* 3============================================================ */
+/* 3 FASLE ALARM CHECK */
+/* 3============================================================ */
+
+void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
+{
+ u32 ret_value;
+ struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
+ return;
+
+ if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
+ /* hold ofdm counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 1); /* hold page C counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 1); /* hold page D counter */
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Parity_Fail = ((ret_value&0xffff0000)>>16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Crc8_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
+
+ FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
+
+ if (pDM_Odm->SupportICType == ODM_RTL8188E) {
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_SC_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_BW_LSC = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_BW_USC = ((ret_value&0xffff0000)>>16);
+ }
+
+ /* hold cck counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT12, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT14, 1);
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
+ FalseAlmCnt->Cnt_Cck_fail = ret_value;
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
+ FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff)<<8;
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_CCK_CCA = ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
+
+ FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
+ FalseAlmCnt->Cnt_SB_Search_fail +
+ FalseAlmCnt->Cnt_Parity_Fail +
+ FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail +
+ FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Cck_fail);
+
+ FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
+
+ if (pDM_Odm->SupportICType >= ODM_RTL8723A) {
+ /* reset false alarm counter registers */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 0);
+ /* update ofdm counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 0); /* update page C counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 0); /* update page D counter */
+
+ /* reset CCK CCA counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 2);
+ /* reset CCK FA counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 2);
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter odm_FalseAlarmCounterStatistics\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Fast_Fsync=%d, Cnt_SB_Search_fail=%d\n",
+ FalseAlmCnt->Cnt_Fast_Fsync, FalseAlmCnt->Cnt_SB_Search_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Parity_Fail=%d, Cnt_Rate_Illegal=%d\n",
+ FalseAlmCnt->Cnt_Parity_Fail, FalseAlmCnt->Cnt_Rate_Illegal));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Crc8_fail=%d, Cnt_Mcs_fail=%d\n",
+ FalseAlmCnt->Cnt_Crc8_fail, FalseAlmCnt->Cnt_Mcs_fail));
+ } else { /* FOR ODM_IC_11AC_SERIES */
+ /* read OFDM FA counter */
+ FalseAlmCnt->Cnt_Ofdm_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_11AC, bMaskLWord);
+ FalseAlmCnt->Cnt_Cck_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_11AC, bMaskLWord);
+ FalseAlmCnt->Cnt_all = FalseAlmCnt->Cnt_Ofdm_fail + FalseAlmCnt->Cnt_Cck_fail;
+
+ /* reset OFDM FA coutner */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 0);
+ /* reset CCK FA counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 1);
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Cck_fail=%d\n", FalseAlmCnt->Cnt_Cck_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Ofdm_fail=%d\n", FalseAlmCnt->Cnt_Ofdm_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Total False Alarm=%d\n", FalseAlmCnt->Cnt_all));
+}
+
+/* 3============================================================ */
+/* 3 CCK Packet Detect Threshold */
+/* 3============================================================ */
+
+void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
+{
+ u8 CurCCK_CCAThres;
+ struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+
+ if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD|ODM_BB_FA_CNT)))
+ return;
+ if (pDM_Odm->ExtLNA)
+ return;
+ if (pDM_Odm->bLinked) {
+ if (pDM_Odm->RSSI_Min > 25) {
+ CurCCK_CCAThres = 0xcd;
+ } else if ((pDM_Odm->RSSI_Min <= 25) && (pDM_Odm->RSSI_Min > 10)) {
+ CurCCK_CCAThres = 0x83;
+ } else {
+ if (FalseAlmCnt->Cnt_Cck_fail > 1000)
+ CurCCK_CCAThres = 0x83;
+ else
+ CurCCK_CCAThres = 0x40;
+ }
+ } else {
+ if (FalseAlmCnt->Cnt_Cck_fail > 1000)
+ CurCCK_CCAThres = 0x83;
+ else
+ CurCCK_CCAThres = 0x40;
+ }
+ ODM_Write_CCK_CCA_Thres(pDM_Odm, CurCCK_CCAThres);
+}
+
+void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
+ ODM_Write1Byte(pDM_Odm, ODM_REG(CCK_CCA, pDM_Odm), CurCCK_CCAThres);
+ pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
+ pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
+}
+
+/* 3============================================================ */
+/* 3 BB Power Save */
+/* 3============================================================ */
+void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+
+ pDM_PSTable->PreCCAState = CCA_MAX;
+ pDM_PSTable->CurCCAState = CCA_MAX;
+ pDM_PSTable->PreRFState = RF_MAX;
+ pDM_PSTable->CurRFState = RF_MAX;
+ pDM_PSTable->Rssi_val_min = 0;
+ pDM_PSTable->initialize = 0;
+}
+
+void odm_DynamicBBPowerSaving(struct odm_dm_struct *pDM_Odm)
+{
+ if ((pDM_Odm->SupportICType != ODM_RTL8192C) && (pDM_Odm->SupportICType != ODM_RTL8723A))
+ return;
+ if (!(pDM_Odm->SupportAbility & ODM_BB_PWR_SAVE))
+ return;
+ if (!(pDM_Odm->SupportPlatform & (ODM_MP|ODM_CE)))
+ return;
+
+ /* 1 2.Power Saving for 92C */
+ if ((pDM_Odm->SupportICType == ODM_RTL8192C) && (pDM_Odm->RFType == ODM_2T2R)) {
+ odm_1R_CCA(pDM_Odm);
+ } else {
+ /* 20100628 Joseph: Turn off BB power save for 88CE because it makesthroughput unstable. */
+ /* 20100831 Joseph: Turn ON BB power save again after modifying AGC delay from 900ns ot 600ns. */
+ /* 1 3.Power Saving for 88C */
+ ODM_RF_Saving(pDM_Odm, false);
+ }
+}
+
+void odm_1R_CCA(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+
+ if (pDM_Odm->RSSI_Min != 0xFF) {
+ if (pDM_PSTable->PreCCAState == CCA_2R) {
+ if (pDM_Odm->RSSI_Min >= 35)
+ pDM_PSTable->CurCCAState = CCA_1R;
+ else
+ pDM_PSTable->CurCCAState = CCA_2R;
+ } else {
+ if (pDM_Odm->RSSI_Min <= 30)
+ pDM_PSTable->CurCCAState = CCA_2R;
+ else
+ pDM_PSTable->CurCCAState = CCA_1R;
+ }
+ } else {
+ pDM_PSTable->CurCCAState = CCA_MAX;
+ }
+
+ if (pDM_PSTable->PreCCAState != pDM_PSTable->CurCCAState) {
+ if (pDM_PSTable->CurCCAState == CCA_1R) {
+ if (pDM_Odm->RFType == ODM_2T2R)
+ ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x13);
+ else
+ ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x23);
+ } else {
+ ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x33);
+ }
+ pDM_PSTable->PreCCAState = pDM_PSTable->CurCCAState;
+ }
+}
+
+void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+ u8 Rssi_Up_bound = 30;
+ u8 Rssi_Low_bound = 25;
+
+ if (pDM_Odm->PatchID == 40) { /* RT_CID_819x_FUNAI_TV */
+ Rssi_Up_bound = 50;
+ Rssi_Low_bound = 45;
+ }
+ if (pDM_PSTable->initialize == 0) {
+ pDM_PSTable->Reg874 = (ODM_GetBBReg(pDM_Odm, 0x874, bMaskDWord)&0x1CC000)>>14;
+ pDM_PSTable->RegC70 = (ODM_GetBBReg(pDM_Odm, 0xc70, bMaskDWord)&BIT3)>>3;
+ pDM_PSTable->Reg85C = (ODM_GetBBReg(pDM_Odm, 0x85c, bMaskDWord)&0xFF000000)>>24;
+ pDM_PSTable->RegA74 = (ODM_GetBBReg(pDM_Odm, 0xa74, bMaskDWord)&0xF000)>>12;
+ pDM_PSTable->initialize = 1;
+ }
+
+ if (!bForceInNormal) {
+ if (pDM_Odm->RSSI_Min != 0xFF) {
+ if (pDM_PSTable->PreRFState == RF_Normal) {
+ if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
+ pDM_PSTable->CurRFState = RF_Save;
+ else
+ pDM_PSTable->CurRFState = RF_Normal;
+ } else {
+ if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
+ pDM_PSTable->CurRFState = RF_Normal;
+ else
+ pDM_PSTable->CurRFState = RF_Save;
+ }
+ } else {
+ pDM_PSTable->CurRFState = RF_MAX;
+ }
+ } else {
+ pDM_PSTable->CurRFState = RF_Normal;
+ }
+
+ if (pDM_PSTable->PreRFState != pDM_PSTable->CurRFState) {
+ if (pDM_PSTable->CurRFState == RF_Save) {
+ /* <tynli_note> 8723 RSSI report will be wrong. Set 0x874[5]=1 when enter BB power saving mode. */
+ /* Suggested by SD3 Yu-Nan. 2011.01.20. */
+ if (pDM_Odm->SupportICType == ODM_RTL8723A)
+ ODM_SetBBReg(pDM_Odm, 0x874 , BIT5, 0x1); /* Reg874[5]=1b'1 */
+ ODM_SetBBReg(pDM_Odm, 0x874 , 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
+ ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, 0); /* RegC70[3]=1'b0 */
+ ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
+ ODM_SetBBReg(pDM_Odm, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
+ ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
+ ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0); /* Reg818[28]=1'b0 */
+ ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x1); /* Reg818[28]=1'b1 */
+ } else {
+ ODM_SetBBReg(pDM_Odm, 0x874 , 0x1CC000, pDM_PSTable->Reg874);
+ ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, pDM_PSTable->RegC70);
+ ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, pDM_PSTable->Reg85C);
+ ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, pDM_PSTable->RegA74);
+ ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0);
+
+ if (pDM_Odm->SupportICType == ODM_RTL8723A)
+ ODM_SetBBReg(pDM_Odm, 0x874, BIT5, 0x0); /* Reg874[5]=1b'0 */
+ }
+ pDM_PSTable->PreRFState = pDM_PSTable->CurRFState;
+ }
+}
+
+/* 3============================================================ */
+/* 3 RATR MASK */
+/* 3============================================================ */
+/* 3============================================================ */
+/* 3 Rate Adaptive */
+/* 3============================================================ */
+
+void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
+
+ pOdmRA->Type = DM_Type_ByDriver;
+ if (pOdmRA->Type == DM_Type_ByDriver)
+ pDM_Odm->bUseRAMask = true;
+ else
+ pDM_Odm->bUseRAMask = false;
+
+ pOdmRA->RATRState = DM_RATR_STA_INIT;
+ pOdmRA->HighRSSIThresh = 50;
+ pOdmRA->LowRSSIThresh = 20;
+}
+
+u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u8 rssi_level)
+{
+ struct sta_info *pEntry;
+ u32 rate_bitmap = 0x0fffffff;
+ u8 WirelessMode;
+
+ pEntry = pDM_Odm->pODM_StaInfo[macid];
+ if (!IS_STA_VALID(pEntry))
+ return ra_mask;
+
+ WirelessMode = pEntry->wireless_mode;
+
+ switch (WirelessMode) {
+ case ODM_WM_B:
+ if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
+ rate_bitmap = 0x0000000d;
+ else
+ rate_bitmap = 0x0000000f;
+ break;
+ case (ODM_WM_A|ODM_WM_G):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else
+ rate_bitmap = 0x00000ff0;
+ break;
+ case (ODM_WM_B|ODM_WM_G):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else if (rssi_level == DM_RATR_STA_MIDDLE)
+ rate_bitmap = 0x00000ff0;
+ else
+ rate_bitmap = 0x00000ff5;
+ break;
+ case (ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
+ case (ODM_WM_A|ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
+ if (pDM_Odm->RFType == ODM_1T2R || pDM_Odm->RFType == ODM_1T1R) {
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x000f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x000ff000;
+ } else {
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ rate_bitmap = 0x000ff015;
+ else
+ rate_bitmap = 0x000ff005;
+ }
+ } else {
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x0f8f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x0f8ff000;
+ } else {
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ rate_bitmap = 0x0f8ff015;
+ else
+ rate_bitmap = 0x0f8ff005;
+ }
+ }
+ break;
+ default:
+ /* case WIRELESS_11_24N: */
+ /* case WIRELESS_11_5N: */
+ if (pDM_Odm->RFType == RF_1T2R)
+ rate_bitmap = 0x000fffff;
+ else
+ rate_bitmap = 0x0fffffff;
+ break;
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
+ (" ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x\n",
+ rssi_level, WirelessMode, rate_bitmap));
+
+ return rate_bitmap;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_RefreshRateAdaptiveMask()
+ *
+ * Overview: Update rate table mask according to rssi
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 05/27/2009 hpfan Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RA_MASK))
+ return;
+ /* */
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ /* */
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ odm_RefreshRateAdaptiveMaskMP(pDM_Odm);
+ break;
+ case ODM_CE:
+ odm_RefreshRateAdaptiveMaskCE(pDM_Odm);
+ break;
+ case ODM_AP:
+ case ODM_ADSL:
+ odm_RefreshRateAdaptiveMaskAPADSL(pDM_Odm);
+ break;
+ }
+}
+
+void odm_RefreshRateAdaptiveMaskMP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm)
+{
+ u8 i;
+ struct adapter *pAdapter = pDM_Odm->Adapter;
+
+ if (pAdapter->bDriverStopped) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE, ("<---- odm_RefreshRateAdaptiveMask(): driver is going to unload\n"));
+ return;
+ }
+
+ if (!pDM_Odm->bUseRAMask) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("<---- odm_RefreshRateAdaptiveMask(): driver does not control rate adaptive mask\n"));
+ return;
+ }
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pstat)) {
+ if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false , &pstat->rssi_level)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
+ ("RSSI:%d, RSSI_LEVEL:%d\n",
+ pstat->rssi_stat.UndecoratedSmoothedPWDB, pstat->rssi_level));
+ rtw_hal_update_ra_mask(pAdapter, i, pstat->rssi_level);
+ }
+ }
+ }
+}
+
+void odm_RefreshRateAdaptiveMaskAPADSL(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+/* Return Value: bool */
+/* - true: RATRState is changed. */
+bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate, u8 *pRATRState)
+{
+ struct odm_rate_adapt *pRA = &pDM_Odm->RateAdaptive;
+ const u8 GoUpGap = 5;
+ u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
+ u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
+ u8 RATRState;
+
+ /* Threshold Adjustment: */
+ /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
+ /* Here GoUpGap is added to solve the boundary's level alternation issue. */
+ switch (*pRATRState) {
+ case DM_RATR_STA_INIT:
+ case DM_RATR_STA_HIGH:
+ break;
+ case DM_RATR_STA_MIDDLE:
+ HighRSSIThreshForRA += GoUpGap;
+ break;
+ case DM_RATR_STA_LOW:
+ HighRSSIThreshForRA += GoUpGap;
+ LowRSSIThreshForRA += GoUpGap;
+ break;
+ default:
+ ODM_RT_ASSERT(pDM_Odm, false, ("wrong rssi level setting %d !", *pRATRState));
+ break;
+ }
+
+ /* Decide RATRState by RSSI. */
+ if (RSSI > HighRSSIThreshForRA)
+ RATRState = DM_RATR_STA_HIGH;
+ else if (RSSI > LowRSSIThreshForRA)
+ RATRState = DM_RATR_STA_MIDDLE;
+ else
+ RATRState = DM_RATR_STA_LOW;
+
+ if (*pRATRState != RATRState || bForceUpdate) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("RSSI Level %d -> %d\n", *pRATRState, RATRState));
+ *pRATRState = RATRState;
+ return true;
+ }
+ return false;
+}
+
+/* 3============================================================ */
+/* 3 Dynamic Tx Power */
+/* 3============================================================ */
+
+void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ pdmpriv->bDynamicTxPowerEnable = false;
+ pdmpriv->LastDTPLvl = TxHighPwrLevel_Normal;
+ pdmpriv->DynamicTxHighPowerLvl = TxHighPwrLevel_Normal;
+}
+
+void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm)
+{
+ /* For AP/ADSL use struct rtl8192cd_priv * */
+ /* For CE/NIC use struct adapter * */
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_DYNAMIC_TXPWR))
+ return;
+
+ /* 2012/01/12 MH According to Luke's suggestion, only high power will support the feature. */
+ if (!pDM_Odm->ExtPA)
+ return;
+
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ case ODM_CE:
+ odm_DynamicTxPowerNIC(pDM_Odm);
+ break;
+ case ODM_AP:
+ odm_DynamicTxPowerAP(pDM_Odm);
+ break;
+ case ODM_ADSL:
+ break;
+ }
+}
+
+void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_DYNAMIC_TXPWR))
+ return;
+
+ if (pDM_Odm->SupportICType == ODM_RTL8188E) {
+ /* ??? */
+ /* This part need to be redefined. */
+ }
+}
+
+void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+/* 3============================================================ */
+/* 3 RSSI Monitor */
+/* 3============================================================ */
+
+void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RSSI_MONITOR))
+ return;
+
+ /* */
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ /* */
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ odm_RSSIMonitorCheckMP(pDM_Odm);
+ break;
+ case ODM_CE:
+ odm_RSSIMonitorCheckCE(pDM_Odm);
+ break;
+ case ODM_AP:
+ odm_RSSIMonitorCheckAP(pDM_Odm);
+ break;
+ case ODM_ADSL:
+ /* odm_DIGAP(pDM_Odm); */
+ break;
+ }
+
+} /* odm_RSSIMonitorCheck */
+
+void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+static void FindMinimumRSSI(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct mlme_priv *pmlmepriv = &pAdapter->mlmepriv;
+
+ /* 1 1.Determine the minimum RSSI */
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == false) &&
+ (pdmpriv->EntryMinUndecoratedSmoothedPWDB == 0))
+ pdmpriv->MinUndecoratedPWDBForDM = 0;
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) /* Default port */
+ pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
+ else /* associated entry pwdb */
+ pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
+}
+
+void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ int i;
+ int tmpEntryMaxPWDB = 0, tmpEntryMinPWDB = 0xff;
+ u8 sta_cnt = 0;
+ u32 PWDB_rssi[NUM_STA] = {0};/* 0~15]:MACID, [16~31]:PWDB_rssi */
+ struct sta_info *psta;
+ u8 bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if (!check_fwstate(&Adapter->mlmepriv, _FW_LINKED))
+ return;
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ psta = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(psta) &&
+ (psta->state & WIFI_ASOC_STATE) &&
+ !_rtw_memcmp(psta->hwaddr, bcast_addr, ETH_ALEN) &&
+ !_rtw_memcmp(psta->hwaddr, myid(&Adapter->eeprompriv), ETH_ALEN)) {
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB < tmpEntryMinPWDB)
+ tmpEntryMinPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB > tmpEntryMaxPWDB)
+ tmpEntryMaxPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB != (-1))
+ PWDB_rssi[sta_cnt++] = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB<<16));
+ }
+ }
+
+ for (i = 0; i < sta_cnt; i++) {
+ if (PWDB_rssi[i] != (0)) {
+ if (pHalData->fw_ractrl) {
+ /* Report every sta's RSSI to FW */
+ } else {
+ ODM_RA_SetRSSI_8188E(
+ &(pHalData->odmpriv), (PWDB_rssi[i]&0xFF), (u8)((PWDB_rssi[i]>>16) & 0xFF));
+ }
+ }
+ }
+
+ if (tmpEntryMaxPWDB != 0) /* If associated entry is found */
+ pdmpriv->EntryMaxUndecoratedSmoothedPWDB = tmpEntryMaxPWDB;
+ else
+ pdmpriv->EntryMaxUndecoratedSmoothedPWDB = 0;
+
+ if (tmpEntryMinPWDB != 0xff) /* If associated entry is found */
+ pdmpriv->EntryMinUndecoratedSmoothedPWDB = tmpEntryMinPWDB;
+ else
+ pdmpriv->EntryMinUndecoratedSmoothedPWDB = 0;
+
+ FindMinimumRSSI(Adapter);
+ ODM_CmnInfoUpdate(&pHalData->odmpriv , ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM);
+}
+
+void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_InitializeTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer,
+ (void *)odm_SwAntDivChkAntSwitchCallback, NULL, "SwAntennaSwitchTimer");
+}
+
+void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_CancelTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
+}
+
+void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
+
+ ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->FastAntTrainingTimer);
+}
+
+/* 3============================================================ */
+/* 3 Tx Power Tracking */
+/* 3============================================================ */
+
+void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm)
+{
+ odm_TXPowerTrackingThermalMeterInit(pDM_Odm);
+}
+
+void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
+{
+ pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
+ pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
+ pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
+ if (*(pDM_Odm->mp_mode) != 1)
+ pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
+ MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
+
+ pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
+}
+
+void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ odm_TXPowerTrackingCheckMP(pDM_Odm);
+ break;
+ case ODM_CE:
+ odm_TXPowerTrackingCheckCE(pDM_Odm);
+ break;
+ case ODM_AP:
+ odm_TXPowerTrackingCheckAP(pDM_Odm);
+ break;
+ case ODM_ADSL:
+ break;
+ }
+}
+
+void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+
+ if (!(pDM_Odm->SupportAbility & ODM_RF_TX_PWR_TRACK))
+ return;
+
+ if (!pDM_Odm->RFCalibrateInfo.TM_Trigger) { /* at least delay 1 sec */
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, BIT17 | BIT16, 0x03);
+
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
+ return;
+ } else {
+ odm_TXPowerTrackingCallback_ThermalMeter_8188E(Adapter);
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
+ }
+}
+
+void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+/* antenna mapping info */
+/* 1: right-side antenna */
+/* 2/0: left-side antenna */
+/* PDM_SWAT_Table->CCK_Ant1_Cnt /OFDM_Ant1_Cnt: for right-side antenna: Ant:1 RxDefaultAnt1 */
+/* PDM_SWAT_Table->CCK_Ant2_Cnt /OFDM_Ant2_Cnt: for left-side antenna: Ant:0 RxDefaultAnt2 */
+/* We select left antenna as default antenna in initial process, modify it as needed */
+/* */
+
+/* 3============================================================ */
+/* 3 SW Antenna Diversity */
+/* 3============================================================ */
+void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID, struct odm_phy_status_info *pPhyInfo)
+{
+}
+
+void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step)
+{
+}
+
+void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext)
+{
+}
+
+/* 3============================================================ */
+/* 3 SW Antenna Diversity */
+/* 3============================================================ */
+
+void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Return: Not Support HW AntDiv\n"));
+ return;
+ }
+
+ if (pDM_Odm->SupportICType & (ODM_RTL8192C | ODM_RTL8192D))
+ ;
+ else if (pDM_Odm->SupportICType == ODM_RTL8188E)
+ ODM_AntennaDiversityInit_88E(pDM_Odm);
+}
+
+void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId, u32 PWDBAll, bool isCCKrate)
+{
+ struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+
+ if (pDM_SWAT_Table->antsel == 1) {
+ if (isCCKrate) {
+ pDM_SWAT_Table->CCK_Ant1_Cnt[MacId]++;
+ } else {
+ pDM_SWAT_Table->OFDM_Ant1_Cnt[MacId]++;
+ pDM_SWAT_Table->RSSI_Ant1_Sum[MacId] += PWDBAll;
+ }
+ } else {
+ if (isCCKrate) {
+ pDM_SWAT_Table->CCK_Ant2_Cnt[MacId]++;
+ } else {
+ pDM_SWAT_Table->OFDM_Ant2_Cnt[MacId]++;
+ pDM_SWAT_Table->RSSI_Ant2_Sum[MacId] += PWDBAll;
+ }
+ }
+}
+
+void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Return: Not Support HW AntDiv\n"));
+ return;
+ }
+
+ if (pDM_Odm->SupportICType == ODM_RTL8188E)
+ ODM_AntennaDiversity_88E(pDM_Odm);
+}
+
+/* EDCA Turbo */
+void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
+ pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
+ Adapter->recvpriv.bIsAnyNonBEPkts = false;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VO PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VO_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VI PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VI_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BE PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BE_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BK PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BK_PARAM)));
+} /* ODM_InitEdcaTurbo */
+
+void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("odm_EdcaTurboCheck========================>\n"));
+
+ if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
+ return;
+
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ break;
+ case ODM_CE:
+ odm_EdcaTurboCheckCE(pDM_Odm);
+ break;
+ case ODM_AP:
+ case ODM_ADSL:
+ break;
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("<========================odm_EdcaTurboCheck\n"));
+} /* odm_CheckEdcaTurbo */
+
+void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ u32 trafficIndex;
+ u32 edca_param;
+ u64 cur_tx_bytes = 0;
+ u64 cur_rx_bytes = 0;
+ u8 bbtchange = false;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct xmit_priv *pxmitpriv = &(Adapter->xmitpriv);
+ struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ struct registry_priv *pregpriv = &Adapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &(Adapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((pregpriv->wifi_spec == 1))/* (pmlmeinfo->HT_enable == 0)) */
+ goto dm_CheckEdcaTurbo_EXIT;
+
+ if (pmlmeinfo->assoc_AP_vendor >= HT_IOT_PEER_MAX)
+ goto dm_CheckEdcaTurbo_EXIT;
+
+ /* Check if the status needs to be changed. */
+ if ((bbtchange) || (!precvpriv->bIsAnyNonBEPkts)) {
+ cur_tx_bytes = pxmitpriv->tx_bytes - pxmitpriv->last_tx_bytes;
+ cur_rx_bytes = precvpriv->rx_bytes - precvpriv->last_rx_bytes;
+
+ /* traffic, TX or RX */
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK) ||
+ (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS)) {
+ if (cur_tx_bytes > (cur_rx_bytes << 2)) {
+ /* Uplink TP is present. */
+ trafficIndex = UP_LINK;
+ } else {
+ /* Balance TP is present. */
+ trafficIndex = DOWN_LINK;
+ }
+ } else {
+ if (cur_rx_bytes > (cur_tx_bytes << 2)) {
+ /* Downlink TP is present. */
+ trafficIndex = DOWN_LINK;
+ } else {
+ /* Balance TP is present. */
+ trafficIndex = UP_LINK;
+ }
+ }
+
+ if ((pDM_Odm->DM_EDCA_Table.prv_traffic_idx != trafficIndex) || (!pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA)) {
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_CISCO) && (pmlmeext->cur_wireless_mode & WIRELESS_11_24N))
+ edca_param = EDCAParam[pmlmeinfo->assoc_AP_vendor][trafficIndex];
+ else
+ edca_param = EDCAParam[HT_IOT_PEER_UNKNOWN][trafficIndex];
+
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, edca_param);
+
+ pDM_Odm->DM_EDCA_Table.prv_traffic_idx = trafficIndex;
+ }
+
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = true;
+ } else {
+ /* Turn Off EDCA turbo here. */
+ /* Restore original EDCA according to the declaration of AP. */
+ if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, pHalData->AcParam_BE);
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
+ }
+ }
+
+dm_CheckEdcaTurbo_EXIT:
+ /* Set variables for next time. */
+ precvpriv->bIsAnyNonBEPkts = false;
+ pxmitpriv->last_tx_bytes = pxmitpriv->tx_bytes;
+ precvpriv->last_rx_bytes = precvpriv->rx_bytes;
+}
+
+/* need to ODM CE Platform */
+/* move to here for ANT detection mechanism using */
+
+u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point, u8 initial_gain_psd)
+{
+ u32 psd_report;
+
+ /* Set DCO frequency index, offset=(40MHz/SamplePts)*point */
+ ODM_SetBBReg(pDM_Odm, 0x808, 0x3FF, point);
+
+ /* Start PSD calculation, Reg808[22]=0->1 */
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 1);
+ /* Need to wait for HW PSD report */
+ ODM_StallExecution(30);
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 0);
+ /* Read PSD report, Reg8B4[15:0] */
+ psd_report = ODM_GetBBReg(pDM_Odm, 0x8B4, bMaskDWord) & 0x0000FFFF;
+
+ psd_report = (u32) (ConvertTo_dB(psd_report))+(u32)(initial_gain_psd-0x1c);
+
+ return psd_report;
+}
+
+u32 ConvertTo_dB(u32 Value)
+{
+ u8 i;
+ u8 j;
+ u32 dB;
+
+ Value = Value & 0xFFFF;
+ for (i = 0; i < 8; i++) {
+ if (Value <= dB_Invert_Table[i][11])
+ break;
+ }
+
+ if (i >= 8)
+ return 96; /* maximum 96 dB */
+
+ for (j = 0; j < 12; j++) {
+ if (Value <= dB_Invert_Table[i][j])
+ break;
+ }
+
+ dB = i*12 + j + 1;
+
+ return dB;
+}
+
+/* 2011/09/22 MH Add for 92D global spin lock utilization. */
+void odm_GlobalAdapterCheck(void)
+{
+} /* odm_GlobalAdapterCheck */
+
+/* Description: */
+/* Set Single/Dual Antenna default setting for products that do not do detection in advance. */
+/* Added by Joseph, 2012.03.22 */
+void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm)
+{
+ struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+
+ pDM_SWAT_Table->ANTA_ON = true;
+ pDM_SWAT_Table->ANTB_ON = true;
+}
+
+
+/* 2 8723A ANT DETECT */
+
+static void odm_PHY_SaveAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegisterNum)
+{
+ u32 i;
+
+ /* RTPRINT(FINIT, INIT_IQK, ("Save ADDA parameters.\n")); */
+ for (i = 0; i < RegisterNum; i++)
+ AFEBackup[i] = ODM_GetBBReg(pDM_Odm, AFEReg[i], bMaskDWord);
+}
+
+static void odm_PHY_ReloadAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegiesterNum)
+{
+ u32 i;
+
+ for (i = 0; i < RegiesterNum; i++)
+ ODM_SetBBReg(pDM_Odm, AFEReg[i], bMaskDWord, AFEBackup[i]);
+}
+
+/* 2 8723A ANT DETECT */
+/* Description: */
+/* Implement IQK single tone for RF DPK loopback and BB PSD scanning. */
+/* This function is cooperated with BB team Neil. */
+bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode)
+{
+ struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+ u32 CurrentChannel, RfLoopReg;
+ u8 n;
+ u32 Reg88c, Regc08, Reg874, Regc50;
+ u8 initial_gain = 0x5a;
+ u32 PSD_report_tmp;
+ u32 AntA_report = 0x0, AntB_report = 0x0, AntO_report = 0x0;
+ bool bResult = true;
+ u32 AFE_Backup[16];
+ u32 AFE_REG_8723A[16] = {
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN,
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth};
+
+ if (!(pDM_Odm->SupportICType & (ODM_RTL8723A|ODM_RTL8192C)))
+ return bResult;
+
+ if (!(pDM_Odm->SupportAbility&ODM_BB_ANT_DIV))
+ return bResult;
+
+ if (pDM_Odm->SupportICType == ODM_RTL8192C) {
+ /* Which path in ADC/DAC is turnned on for PSD: both I/Q */
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT10|BIT11, 0x3);
+ /* Ageraged number: 8 */
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT12|BIT13, 0x1);
+ /* pts = 128; */
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT14|BIT15, 0x0);
+ }
+
+ /* 1 Backup Current RF/BB Settings */
+
+ CurrentChannel = ODM_GetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask);
+ RfLoopReg = ODM_GetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, ODM_DPDT, Antenna_A); /* change to Antenna A */
+ /* Step 1: USE IQK to transmitter single tone */
+
+ ODM_StallExecution(10);
+
+ /* Store A Path Register 88c, c08, 874, c50 */
+ Reg88c = ODM_GetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord);
+ Regc08 = ODM_GetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord);
+ Reg874 = ODM_GetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord);
+ Regc50 = ODM_GetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord);
+
+ /* Store AFE Registers */
+ odm_PHY_SaveAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
+
+ /* Set PSD 128 pts */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_PSDFunction, BIT14|BIT15, 0x0); /* 128 pts */
+
+ /* To SET CH1 to do */
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask, 0x01); /* Channel 1 */
+
+ /* AFE all on step */
+ ODM_SetBBReg(pDM_Odm, rRx_Wait_CCA, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_CCK_RFON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_CCK_BBON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_OFDM_RFON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_OFDM_BBON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_To_Rx, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_To_Tx, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_CCK, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_OFDM, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_Wait_RIFS, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_TO_Rx, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rStandby, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rSleep, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rPMPD_ANAEN, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_SwitchControl, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rBlue_Tooth, bMaskDWord, 0x6FDB25A4);
+
+ /* 3 wire Disable */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, 0xCCF000C0);
+
+ /* BB IQK Setting */
+ ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800E4);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22208000);
+
+ /* IQK setting tone@ 4.34Mhz */
+ ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008C1C);
+ ODM_SetBBReg(pDM_Odm, rTx_IQK, bMaskDWord, 0x01007c00);
+
+
+ /* Page B init */
+ ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x00080000);
+ ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x0f600000);
+ ODM_SetBBReg(pDM_Odm, rRx_IQK, bMaskDWord, 0x01004800);
+ ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x10008c1f);
+ ODM_SetBBReg(pDM_Odm, rTx_IQK_PI_A, bMaskDWord, 0x82150008);
+ ODM_SetBBReg(pDM_Odm, rRx_IQK_PI_A, bMaskDWord, 0x28150008);
+ ODM_SetBBReg(pDM_Odm, rIQK_AGC_Rsp, bMaskDWord, 0x001028d0);
+
+ /* RF loop Setting */
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x0, 0xFFFFF, 0x50008);
+
+ /* IQK Single tone start */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ ODM_StallExecution(1000);
+ PSD_report_tmp = 0x0;
+
+ for (n = 0; n < 2; n++) {
+ PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
+ if (PSD_report_tmp > AntA_report)
+ AntA_report = PSD_report_tmp;
+ }
+
+ PSD_report_tmp = 0x0;
+
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B); /* change to Antenna B */
+ ODM_StallExecution(10);
+
+
+ for (n = 0; n < 2; n++) {
+ PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
+ if (PSD_report_tmp > AntB_report)
+ AntB_report = PSD_report_tmp;
+ }
+
+ /* change to open case */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, 0); /* change to Ant A and B all open case */
+ ODM_StallExecution(10);
+
+ for (n = 0; n < 2; n++) {
+ PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
+ if (PSD_report_tmp > AntO_report)
+ AntO_report = PSD_report_tmp;
+ }
+
+ /* Close IQK Single Tone function */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ PSD_report_tmp = 0x0;
+
+ /* 1 Return to antanna A */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, Reg88c);
+ ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, Regc08);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, Reg874);
+ ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, 0x7F, 0x40);
+ ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord, Regc50);
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, CurrentChannel);
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask, RfLoopReg);
+
+ /* Reload AFE Registers */
+ odm_PHY_ReloadAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_A[%d]= %d\n", 2416, AntA_report));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_B[%d]= %d\n", 2416, AntB_report));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_O[%d]= %d\n", 2416, AntO_report));
+
+
+ if (pDM_Odm->SupportICType == ODM_RTL8723A) {
+ /* 2 Test Ant B based on Ant A is ON */
+ if (mode == ANTTESTB) {
+ if (AntA_report >= 100) {
+ if (AntB_report > (AntA_report+1)) {
+ pDM_SWAT_Table->ANTB_ON = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
+ } else {
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Dual Antenna is A and B\n"));
+ }
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
+ pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
+ bResult = false;
+ }
+ } else if (mode == ANTTESTALL) {
+ /* 2 Test Ant A and B based on DPDT Open */
+ if ((AntO_report >= 100)&(AntO_report < 118)) {
+ if (AntA_report > (AntO_report+1)) {
+ pDM_SWAT_Table->ANTA_ON = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is OFF"));
+ } else {
+ pDM_SWAT_Table->ANTA_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is ON"));
+ }
+
+ if (AntB_report > (AntO_report+2)) {
+ pDM_SWAT_Table->ANTB_ON = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is OFF"));
+ } else {
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is ON"));
+ }
+ }
+ }
+ } else if (pDM_Odm->SupportICType == ODM_RTL8192C) {
+ if (AntA_report >= 100) {
+ if (AntB_report > (AntA_report+2)) {
+ pDM_SWAT_Table->ANTA_ON = false;
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna B\n"));
+ } else if (AntA_report > (AntB_report+2)) {
+ pDM_SWAT_Table->ANTA_ON = true;
+ pDM_SWAT_Table->ANTB_ON = false;
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
+ } else {
+ pDM_SWAT_Table->ANTA_ON = true;
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("ODM_SingleDualAntennaDetection(): Dual Antenna\n"));
+ }
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
+ pDM_SWAT_Table->ANTA_ON = true; /* Set Antenna A on as default */
+ pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
+ bResult = false;
+ }
+ }
+ return bResult;
+}
+
+/* Justin: According to the current RRSI to adjust Response Frame TX power, 2012/11/05 */
+void odm_dtc(struct odm_dm_struct *pDM_Odm)
+{
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
new file mode 100644
index 0000000..19c509a
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -0,0 +1,596 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+/* include files */
+
+#include "odm_precomp.h"
+
+#define READ_AND_CONFIG READ_AND_CONFIG_MP
+
+#define READ_AND_CONFIG_MP(ic, txt) (ODM_ReadAndConfig##txt##ic(dm_odm))
+#define READ_AND_CONFIG_TC(ic, txt) (ODM_ReadAndConfig_TC##txt##ic(dm_odm))
+
+static u8 odm_QueryRxPwrPercentage(s8 AntPower)
+{
+ if ((AntPower <= -100) || (AntPower >= 20))
+ return 0;
+ else if (AntPower >= 0)
+ return 100;
+ else
+ return 100+AntPower;
+}
+
+/* 2012/01/12 MH MOve some signal strength smooth method to MP HAL layer. */
+/* IF other SW team do not support the feature, remove this section.?? */
+static s32 odm_sig_patch_lenove(struct odm_dm_struct *dm_odm, s32 CurrSig)
+{
+ return 0;
+}
+
+static s32 odm_sig_patch_netcore(struct odm_dm_struct *dm_odm, s32 CurrSig)
+{
+ return 0;
+}
+
+static s32 odm_SignalScaleMapping_92CSeries(struct odm_dm_struct *dm_odm, s32 CurrSig)
+{
+ s32 RetSig = 0;
+
+ if ((dm_odm->SupportInterface == ODM_ITRF_USB) ||
+ (dm_odm->SupportInterface == ODM_ITRF_SDIO)) {
+ if (CurrSig >= 51 && CurrSig <= 100)
+ RetSig = 100;
+ else if (CurrSig >= 41 && CurrSig <= 50)
+ RetSig = 80 + ((CurrSig - 40)*2);
+ else if (CurrSig >= 31 && CurrSig <= 40)
+ RetSig = 66 + (CurrSig - 30);
+ else if (CurrSig >= 21 && CurrSig <= 30)
+ RetSig = 54 + (CurrSig - 20);
+ else if (CurrSig >= 10 && CurrSig <= 20)
+ RetSig = 42 + (((CurrSig - 10) * 2) / 3);
+ else if (CurrSig >= 5 && CurrSig <= 9)
+ RetSig = 22 + (((CurrSig - 5) * 3) / 2);
+ else if (CurrSig >= 1 && CurrSig <= 4)
+ RetSig = 6 + (((CurrSig - 1) * 3) / 2);
+ else
+ RetSig = CurrSig;
+ }
+ return RetSig;
+}
+
+static s32 odm_SignalScaleMapping(struct odm_dm_struct *dm_odm, s32 CurrSig)
+{
+ if ((dm_odm->SupportPlatform == ODM_MP) &&
+ (dm_odm->SupportInterface != ODM_ITRF_PCIE) && /* USB & SDIO */
+ (dm_odm->PatchID == 10))
+ return odm_sig_patch_netcore(dm_odm, CurrSig);
+ else if ((dm_odm->SupportPlatform == ODM_MP) &&
+ (dm_odm->SupportInterface == ODM_ITRF_PCIE) &&
+ (dm_odm->PatchID == 19))
+ return odm_sig_patch_lenove(dm_odm, CurrSig);
+ else
+ return odm_SignalScaleMapping_92CSeries(dm_odm, CurrSig);
+}
+
+/* pMgntInfo->CustomerID == RT_CID_819x_Lenovo */
+static u8 odm_SQ_process_patch_RT_CID_819x_Lenovo(struct odm_dm_struct *dm_odm,
+ u8 isCCKrate, u8 PWDB_ALL, u8 path, u8 RSSI)
+{
+ return 0;
+}
+
+static u8 odm_EVMdbToPercentage(s8 Value)
+{
+ /* -33dB~0dB to 0%~99% */
+ s8 ret_val;
+
+ ret_val = Value;
+
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+
+ if (ret_val == 99)
+ ret_val = 100;
+ return ret_val;
+}
+
+static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
+ struct odm_phy_status_info *pPhyInfo,
+ u8 *pPhyStatus,
+ struct odm_per_pkt_info *pPktinfo)
+{
+ struct sw_ant_switch *pDM_SWAT_Table = &dm_odm->DM_SWAT_Table;
+ u8 i, Max_spatial_stream;
+ s8 rx_pwr[4], rx_pwr_all = 0;
+ u8 EVM, PWDB_ALL = 0, PWDB_ALL_BT;
+ u8 RSSI, total_rssi = 0;
+ u8 isCCKrate = 0;
+ u8 rf_rx_num = 0;
+ u8 cck_highpwr = 0;
+ u8 LNA_idx, VGA_idx;
+
+ struct phy_status_rpt *pPhyStaRpt = (struct phy_status_rpt *)pPhyStatus;
+
+ isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = -1;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+
+ if (isCCKrate) {
+ u8 report;
+ u8 cck_agc_rpt;
+
+ dm_odm->PhyDbgInfo.NumQryPhyStatusCCK++;
+ /* (1)Hardware does not provide RSSI for CCK */
+ /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+
+ cck_highpwr = dm_odm->bCckHighPower;
+
+ cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ;
+
+ /* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
+ /* The RSSI formula should be modified according to the gain table */
+ /* In 88E, cck_highpwr is always set to 1 */
+ if (dm_odm->SupportICType & (ODM_RTL8188E|ODM_RTL8812)) {
+ LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
+ VGA_idx = (cck_agc_rpt & 0x1F);
+ switch (LNA_idx) {
+ case 7:
+ if (VGA_idx <= 27)
+ rx_pwr_all = -100 + 2*(27-VGA_idx); /* VGA_idx = 27~2 */
+ else
+ rx_pwr_all = -100;
+ break;
+ case 6:
+ rx_pwr_all = -48 + 2*(2-VGA_idx); /* VGA_idx = 2~0 */
+ break;
+ case 5:
+ rx_pwr_all = -42 + 2*(7-VGA_idx); /* VGA_idx = 7~5 */
+ break;
+ case 4:
+ rx_pwr_all = -36 + 2*(7-VGA_idx); /* VGA_idx = 7~4 */
+ break;
+ case 3:
+ rx_pwr_all = -24 + 2*(7-VGA_idx); /* VGA_idx = 7~0 */
+ break;
+ case 2:
+ if (cck_highpwr)
+ rx_pwr_all = -12 + 2*(5-VGA_idx); /* VGA_idx = 5~0 */
+ else
+ rx_pwr_all = -6 + 2*(5-VGA_idx);
+ break;
+ case 1:
+ rx_pwr_all = 8-2*VGA_idx;
+ break;
+ case 0:
+ rx_pwr_all = 14-2*VGA_idx;
+ break;
+ default:
+ break;
+ }
+ rx_pwr_all += 6;
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ if (!cck_highpwr) {
+ if (PWDB_ALL >= 80)
+ PWDB_ALL = ((PWDB_ALL-80)<<1)+((PWDB_ALL-80)>>1)+80;
+ else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
+ PWDB_ALL += 3;
+ if (PWDB_ALL > 100)
+ PWDB_ALL = 100;
+ }
+ } else {
+ if (!cck_highpwr) {
+ report = (cck_agc_rpt & 0xc0)>>6;
+ switch (report) {
+ /* 03312009 modified by cosa */
+ /* Modify the RF RNA gain value to -40, -20, -2, 14 by Jenyu's suggestion */
+ /* Note: different RF with the different RNA gain. */
+ case 0x3:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ report = (cck_agc_rpt & 0x60)>>5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f)<<1) ;
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f)<<1);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f)<<1);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f)<<1);
+ break;
+ }
+ }
+
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+
+ /* Modification for ext-LNA board */
+ if (dm_odm->BoardType == ODM_BOARD_HIGHPWR) {
+ if ((cck_agc_rpt>>7) == 0) {
+ PWDB_ALL = (PWDB_ALL > 94) ? 100 : (PWDB_ALL+6);
+ } else {
+ if (PWDB_ALL > 38)
+ PWDB_ALL -= 16;
+ else
+ PWDB_ALL = (PWDB_ALL <= 16) ? (PWDB_ALL>>2) : (PWDB_ALL-12);
+ }
+
+ /* CCK modification */
+ if (PWDB_ALL > 25 && PWDB_ALL <= 60)
+ PWDB_ALL += 6;
+ } else {/* Modification for int-LNA board */
+ if (PWDB_ALL > 99)
+ PWDB_ALL -= 8;
+ else if (PWDB_ALL > 50 && PWDB_ALL <= 68)
+ PWDB_ALL += 4;
+ }
+ }
+
+ pPhyInfo->RxPWDBAll = PWDB_ALL;
+ pPhyInfo->BTRxRSSIPercentage = PWDB_ALL;
+ pPhyInfo->RecvSignalPower = rx_pwr_all;
+ /* (3) Get Signal Quality (EVM) */
+ if (pPktinfo->bPacketMatchBSSID) {
+ u8 SQ, SQ_rpt;
+
+ if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
+ SQ = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, 0, 0);
+ } else if (pPhyInfo->RxPWDBAll > 40 && !dm_odm->bInHctTest) {
+ SQ = 100;
+ } else {
+ SQ_rpt = pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all;
+
+ if (SQ_rpt > 64)
+ SQ = 0;
+ else if (SQ_rpt < 20)
+ SQ = 100;
+ else
+ SQ = ((64-SQ_rpt) * 100) / 44;
+ }
+ pPhyInfo->SignalQuality = SQ;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = SQ;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+ }
+ } else { /* is OFDM rate */
+ dm_odm->PhyDbgInfo.NumQryPhyStatusOFDM++;
+
+ /* (1)Get RSSI for HT rate */
+
+ for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX; i++) {
+ /* 2008/01/30 MH we will judge RF RX path now. */
+ if (dm_odm->RFPathRxEnable & BIT(i))
+ rf_rx_num++;
+
+ rx_pwr[i] = ((pPhyStaRpt->path_agc[i].gain & 0x3F)*2) - 110;
+
+ pPhyInfo->RxPwr[i] = rx_pwr[i];
+
+ /* Translate DBM to percentage. */
+ RSSI = odm_QueryRxPwrPercentage(rx_pwr[i]);
+ total_rssi += RSSI;
+
+ /* Modification for ext-LNA board */
+ if (dm_odm->BoardType == ODM_BOARD_HIGHPWR) {
+ if ((pPhyStaRpt->path_agc[i].trsw) == 1)
+ RSSI = (RSSI > 94) ? 100 : (RSSI + 6);
+ else
+ RSSI = (RSSI <= 16) ? (RSSI >> 3) : (RSSI - 16);
+
+ if ((RSSI <= 34) && (RSSI >= 4))
+ RSSI -= 4;
+ }
+
+ pPhyInfo->RxMIMOSignalStrength[i] = (u8)RSSI;
+
+ /* Get Rx snr value in DB */
+ pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
+ dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
+
+ /* Record Signal Strength for next packet */
+ if (pPktinfo->bPacketMatchBSSID) {
+ if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
+ if (i == ODM_RF_PATH_A)
+ pPhyInfo->SignalQuality = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, i, RSSI);
+ }
+ }
+ }
+ /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
+
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ PWDB_ALL_BT = PWDB_ALL;
+
+ pPhyInfo->RxPWDBAll = PWDB_ALL;
+ pPhyInfo->BTRxRSSIPercentage = PWDB_ALL_BT;
+ pPhyInfo->RxPower = rx_pwr_all;
+ pPhyInfo->RecvSignalPower = rx_pwr_all;
+
+ if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
+ /* do nothing */
+ } else {
+ /* (3)EVM of HT rate */
+ if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
+ Max_spatial_stream = 2; /* both spatial stream make sense */
+ else
+ Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
+
+ for (i = 0; i < Max_spatial_stream; i++) {
+ /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
+ /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
+ /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
+ EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
+
+ if (pPktinfo->bPacketMatchBSSID) {
+ if (i == ODM_RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
+ pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
+ pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
+ }
+ }
+ }
+ }
+ /* UI BSS List signal strength(in percentage), make it good looking, from 0~100. */
+ /* It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp(). */
+ if (isCCKrate) {
+ pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(dm_odm, PWDB_ALL));/* PWDB_ALL; */
+ } else {
+ if (rf_rx_num != 0)
+ pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(dm_odm, total_rssi /= rf_rx_num));
+ }
+
+ /* For 92C/92D HW (Hybrid) Antenna Diversity */
+ pDM_SWAT_Table->antsel = pPhyStaRpt->ant_sel;
+ /* For 88E HW Antenna Diversity */
+ dm_odm->DM_FatTable.antsel_rx_keep_0 = pPhyStaRpt->ant_sel;
+ dm_odm->DM_FatTable.antsel_rx_keep_1 = pPhyStaRpt->ant_sel_b;
+ dm_odm->DM_FatTable.antsel_rx_keep_2 = pPhyStaRpt->antsel_rx_keep_2;
+}
+
+void odm_Init_RSSIForDM(struct odm_dm_struct *dm_odm)
+{
+}
+
+static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
+ struct odm_phy_status_info *pPhyInfo,
+ struct odm_per_pkt_info *pPktinfo)
+{
+ s32 UndecoratedSmoothedPWDB, UndecoratedSmoothedCCK;
+ s32 UndecoratedSmoothedOFDM, RSSI_Ave;
+ u8 isCCKrate = 0;
+ u8 RSSI_max, RSSI_min, i;
+ u32 OFDM_pkt = 0;
+ u32 Weighting = 0;
+ struct sta_info *pEntry;
+
+ if (pPktinfo->StationID == 0xFF)
+ return;
+ pEntry = dm_odm->pODM_StaInfo[pPktinfo->StationID];
+ if (!IS_STA_VALID(pEntry))
+ return;
+ if ((!pPktinfo->bPacketMatchBSSID))
+ return;
+
+ isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+
+ /* Smart Antenna Debug Message------------------ */
+ if (dm_odm->SupportICType == ODM_RTL8188E) {
+ u8 antsel_tr_mux;
+ struct fast_ant_train *pDM_FatTable = &dm_odm->DM_FatTable;
+
+ if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
+ if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
+ if (pPktinfo->bPacketToSelf) {
+ antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
+ (pDM_FatTable->antsel_rx_keep_1<<1) |
+ pDM_FatTable->antsel_rx_keep_0;
+ pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
+ pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
+ }
+ }
+ } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+ antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
+ (pDM_FatTable->antsel_rx_keep_1<<1) | pDM_FatTable->antsel_rx_keep_0;
+ ODM_AntselStatistics_88E(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
+ }
+ }
+ }
+ /* Smart Antenna Debug Message------------------ */
+
+ UndecoratedSmoothedCCK = pEntry->rssi_stat.UndecoratedSmoothedCCK;
+ UndecoratedSmoothedOFDM = pEntry->rssi_stat.UndecoratedSmoothedOFDM;
+ UndecoratedSmoothedPWDB = pEntry->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+ if (!isCCKrate) { /* ofdm rate */
+ if (pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B] == 0) {
+ RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ } else {
+ if (pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A] > pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B]) {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
+ } else {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ }
+ if ((RSSI_max - RSSI_min) < 3)
+ RSSI_Ave = RSSI_max;
+ else if ((RSSI_max - RSSI_min) < 6)
+ RSSI_Ave = RSSI_max - 1;
+ else if ((RSSI_max - RSSI_min) < 10)
+ RSSI_Ave = RSSI_max - 2;
+ else
+ RSSI_Ave = RSSI_max - 3;
+ }
+
+ /* 1 Process OFDM RSSI */
+ if (UndecoratedSmoothedOFDM <= 0) { /* initialize */
+ UndecoratedSmoothedOFDM = pPhyInfo->RxPWDBAll;
+ } else {
+ if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedOFDM) {
+ UndecoratedSmoothedOFDM =
+ (((UndecoratedSmoothedOFDM)*(Rx_Smooth_Factor-1)) +
+ (RSSI_Ave)) / (Rx_Smooth_Factor);
+ UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM + 1;
+ } else {
+ UndecoratedSmoothedOFDM =
+ (((UndecoratedSmoothedOFDM)*(Rx_Smooth_Factor-1)) +
+ (RSSI_Ave)) / (Rx_Smooth_Factor);
+ }
+ }
+
+ pEntry->rssi_stat.PacketMap = (pEntry->rssi_stat.PacketMap<<1) | BIT0;
+
+ } else {
+ RSSI_Ave = pPhyInfo->RxPWDBAll;
+
+ /* 1 Process CCK RSSI */
+ if (UndecoratedSmoothedCCK <= 0) { /* initialize */
+ UndecoratedSmoothedCCK = pPhyInfo->RxPWDBAll;
+ } else {
+ if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedCCK) {
+ UndecoratedSmoothedCCK =
+ ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor-1)) +
+ pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor;
+ UndecoratedSmoothedCCK = UndecoratedSmoothedCCK + 1;
+ } else {
+ UndecoratedSmoothedCCK =
+ ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor-1)) +
+ pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor;
+ }
+ }
+ pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap<<1;
+ }
+ /* 2011.07.28 LukeLee: modified to prevent unstable CCK RSSI */
+ if (pEntry->rssi_stat.ValidBit >= 64)
+ pEntry->rssi_stat.ValidBit = 64;
+ else
+ pEntry->rssi_stat.ValidBit++;
+
+ for (i = 0; i < pEntry->rssi_stat.ValidBit; i++)
+ OFDM_pkt += (u8)(pEntry->rssi_stat.PacketMap>>i)&BIT0;
+
+ if (pEntry->rssi_stat.ValidBit == 64) {
+ Weighting = ((OFDM_pkt<<4) > 64) ? 64 : (OFDM_pkt<<4);
+ UndecoratedSmoothedPWDB = (Weighting*UndecoratedSmoothedOFDM+(64-Weighting)*UndecoratedSmoothedCCK)>>6;
+ } else {
+ if (pEntry->rssi_stat.ValidBit != 0)
+ UndecoratedSmoothedPWDB = (OFDM_pkt * UndecoratedSmoothedOFDM +
+ (pEntry->rssi_stat.ValidBit-OFDM_pkt) *
+ UndecoratedSmoothedCCK)/pEntry->rssi_stat.ValidBit;
+ else
+ UndecoratedSmoothedPWDB = 0;
+ }
+ pEntry->rssi_stat.UndecoratedSmoothedCCK = UndecoratedSmoothedCCK;
+ pEntry->rssi_stat.UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM;
+ pEntry->rssi_stat.UndecoratedSmoothedPWDB = UndecoratedSmoothedPWDB;
+ }
+}
+
+/* Endianness before calling this API */
+static void ODM_PhyStatusQuery_92CSeries(struct odm_dm_struct *dm_odm,
+ struct odm_phy_status_info *pPhyInfo,
+ u8 *pPhyStatus,
+ struct odm_per_pkt_info *pPktinfo)
+{
+ odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus,
+ pPktinfo);
+ if (dm_odm->RSSI_test) {
+ /* Select the packets to do RSSI checking for antenna switching. */
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon)
+ ODM_SwAntDivChkPerPktRssi(dm_odm, pPktinfo->StationID, pPhyInfo);
+ } else {
+ odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
+ }
+}
+
+void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
+ struct odm_phy_status_info *pPhyInfo,
+ u8 *pPhyStatus, struct odm_per_pkt_info *pPktinfo)
+{
+ ODM_PhyStatusQuery_92CSeries(dm_odm, pPhyInfo, pPhyStatus, pPktinfo);
+}
+
+/* For future use. */
+void ODM_MacStatusQuery(struct odm_dm_struct *dm_odm, u8 *mac_stat,
+ u8 macid, bool pkt_match_bssid,
+ bool pkttoself, bool pkt_beacon)
+{
+ /* 2011/10/19 Driver team will handle in the future. */
+}
+
+enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
+ enum ODM_RF_RADIO_PATH content,
+ enum ODM_RF_RADIO_PATH rfpath)
+{
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("===>ODM_ConfigRFWithHeaderFile\n"));
+ if (dm_odm->SupportICType == ODM_RTL8188E) {
+ if (rfpath == ODM_RF_PATH_A)
+ READ_AND_CONFIG(8188E, _RadioA_1T_);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_A:Rtl8188ERadioA_1TArray\n"));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_B:Rtl8188ERadioB_1TArray\n"));
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("ODM_ConfigRFWithHeaderFile: Radio No %x\n", rfpath));
+ return HAL_STATUS_SUCCESS;
+}
+
+enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *dm_odm,
+ enum odm_bb_config_type config_tp)
+{
+ if (dm_odm->SupportICType == ODM_RTL8188E) {
+ if (config_tp == CONFIG_BB_PHY_REG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_1T_);
+ } else if (config_tp == CONFIG_BB_AGC_TAB) {
+ READ_AND_CONFIG(8188E, _AGC_TAB_1T_);
+ } else if (config_tp == CONFIG_BB_PHY_REG_PG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_PG_);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" ===> phy_ConfigBBWithHeaderFile() agc:Rtl8188EPHY_REG_PGArray\n"));
+ }
+ }
+ return HAL_STATUS_SUCCESS;
+}
+
+enum HAL_STATUS ODM_ConfigMACWithHeaderFile(struct odm_dm_struct *dm_odm)
+{
+ u8 result = HAL_STATUS_SUCCESS;
+ if (dm_odm->SupportICType == ODM_RTL8188E)
+ result = READ_AND_CONFIG(8188E, _MAC_REG_);
+ return result;
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
new file mode 100644
index 0000000..58410f3
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -0,0 +1,399 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void ODM_DIG_LowerBound_88E(struct odm_dm_struct *dm_odm)
+{
+ struct rtw_dig *pDM_DigTable = &dm_odm->DM_DigTable;
+
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ pDM_DigTable->rx_gain_range_min = (u8) pDM_DigTable->AntDiv_RSSI_max;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("ODM_DIG_LowerBound_88E(): pDM_DigTable->AntDiv_RSSI_max=%d\n", pDM_DigTable->AntDiv_RSSI_max));
+ }
+ /* If only one Entry connected */
+}
+
+static void odm_RX_HWAntDivInit(struct odm_dm_struct *dm_odm)
+{
+ u32 value32;
+
+ if (*(dm_odm->mp_mode) == 1) {
+ dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
+ ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* 1:CG, 0:CS */
+ return;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_RX_HWAntDivInit()\n"));
+
+ /* MAC Setting */
+ value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ /* Pin Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT22, 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ /* OFDM Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ /* CCK Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
+ ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
+ ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
+ ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
+}
+
+static void odm_TRX_HWAntDivInit(struct odm_dm_struct *dm_odm)
+{
+ u32 value32;
+
+ if (*(dm_odm->mp_mode) == 1) {
+ dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
+ ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, 0); /* Default RX (0/1) */
+ return;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_TRX_HWAntDivInit()\n"));
+
+ /* MAC Setting */
+ value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ /* Pin Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ /* OFDM Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ /* CCK Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
+ ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
+ /* Tx Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
+ ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
+
+ /* antenna mapping table */
+ if (!dm_odm->bIsMPChip) { /* testchip */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
+ } else { /* MPchip */
+ ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
+ }
+}
+
+static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
+{
+ u32 value32, i;
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ u32 AntCombination = 2;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_FastAntTrainingInit()\n"));
+
+ if (*(dm_odm->mp_mode) == 1) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("dm_odm->AntDivType: %d\n", dm_odm->AntDivType));
+ return;
+ }
+
+ for (i = 0; i < 6; i++) {
+ dm_fat_tbl->Bssid[i] = 0;
+ dm_fat_tbl->antSumRSSI[i] = 0;
+ dm_fat_tbl->antRSSIcnt[i] = 0;
+ dm_fat_tbl->antAveRSSI[i] = 0;
+ }
+ dm_fat_tbl->TrainIdx = 0;
+ dm_fat_tbl->FAT_State = FAT_NORMAL_STATE;
+
+ /* MAC Setting */
+ value32 = ODM_GetMACReg(dm_odm, 0x4c, bMaskDWord);
+ ODM_SetMACReg(dm_odm, 0x4c, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = ODM_GetMACReg(dm_odm, 0x7B4, bMaskDWord);
+ ODM_SetMACReg(dm_odm, 0x7b4, bMaskDWord, value32|(BIT16|BIT17)); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
+
+ /* Match MAC ADDR */
+ ODM_SetMACReg(dm_odm, 0x7b4, 0xFFFF, 0);
+ ODM_SetMACReg(dm_odm, 0x7b0, bMaskDWord, 0);
+
+ ODM_SetBBReg(dm_odm, 0x870, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ ODM_SetBBReg(dm_odm, 0x864, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ ODM_SetBBReg(dm_odm, 0xb2c, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ ODM_SetBBReg(dm_odm, 0xb2c, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ ODM_SetBBReg(dm_odm, 0xca4, bMaskDWord, 0x000000a0);
+
+ /* antenna mapping table */
+ if (AntCombination == 2) {
+ if (!dm_odm->bIsMPChip) { /* testchip */
+ ODM_SetBBReg(dm_odm, 0x858, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
+ ODM_SetBBReg(dm_odm, 0x858, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
+ } else { /* MPchip */
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 1);
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 2);
+ }
+ } else if (AntCombination == 7) {
+ if (!dm_odm->bIsMPChip) { /* testchip */
+ ODM_SetBBReg(dm_odm, 0x858, BIT10|BIT9|BIT8, 0); /* Reg858[10:8]=3'b000 */
+ ODM_SetBBReg(dm_odm, 0x858, BIT13|BIT12|BIT11, 1); /* Reg858[13:11]=3'b001 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT16, 0);
+ ODM_SetBBReg(dm_odm, 0x858, BIT15|BIT14, 2); /* Reg878[0],Reg858[14:15])=3'b010 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT19|BIT18|BIT17, 3);/* Reg878[3:1]=3b'011 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT22|BIT21|BIT20, 4);/* Reg878[6:4]=3b'100 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT25|BIT24|BIT23, 5);/* Reg878[9:7]=3b'101 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT28|BIT27|BIT26, 6);/* Reg878[12:10]=3b'110 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT31|BIT30|BIT29, 7);/* Reg878[15:13]=3b'111 */
+ } else { /* MPchip */
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 0);
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 1);
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte2, 2);
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte3, 3);
+ ODM_SetBBReg(dm_odm, 0x918, bMaskByte0, 4);
+ ODM_SetBBReg(dm_odm, 0x918, bMaskByte1, 5);
+ ODM_SetBBReg(dm_odm, 0x918, bMaskByte2, 6);
+ ODM_SetBBReg(dm_odm, 0x918, bMaskByte3, 7);
+ }
+ }
+
+ /* Default Ant Setting when no fast training */
+ ODM_SetBBReg(dm_odm, 0x80c, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
+ ODM_SetBBReg(dm_odm, 0x864, BIT5|BIT4|BIT3, 0); /* Default RX */
+ ODM_SetBBReg(dm_odm, 0x864, BIT8|BIT7|BIT6, 1); /* Optional RX */
+
+ /* Enter Traing state */
+ ODM_SetBBReg(dm_odm, 0x864, BIT2|BIT1|BIT0, (AntCombination-1)); /* Reg864[2:0]=3'd6 ant combination=reg864[2:0]+1 */
+ ODM_SetBBReg(dm_odm, 0xc50, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+}
+
+void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
+{
+ if (dm_odm->SupportICType != ODM_RTL8188E)
+ return;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("dm_odm->AntDivType=%d\n", dm_odm->AntDivType));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("dm_odm->bIsMPChip=%s\n", (dm_odm->bIsMPChip ? "true" : "false")));
+
+ if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)
+ odm_RX_HWAntDivInit(dm_odm);
+ else if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ odm_TRX_HWAntDivInit(dm_odm);
+ else if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV)
+ odm_FastAntTrainingInit(dm_odm);
+}
+
+void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ u32 DefaultAnt, OptionalAnt;
+
+ if (dm_fat_tbl->RxIdleAnt != Ant) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Update Rx Idle Ant\n"));
+ if (Ant == MAIN_ANT) {
+ DefaultAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
+ OptionalAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+ } else {
+ DefaultAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+ OptionalAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
+ }
+
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
+ ODM_SetBBReg(dm_odm, ODM_REG_ANTSEL_CTRL_11N, BIT14|BIT13|BIT12, DefaultAnt); /* Default TX */
+ ODM_SetMACReg(dm_odm, ODM_REG_RESP_TX_11N, BIT6|BIT7, DefaultAnt); /* Resp Tx */
+ } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
+ }
+ }
+ dm_fat_tbl->RxIdleAnt = Ant;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("RxIdleAnt=%s\n", (Ant == MAIN_ANT) ? "MAIN_ANT" : "AUX_ANT"));
+ pr_info("RxIdleAnt=%s\n", (Ant == MAIN_ANT) ? "MAIN_ANT" : "AUX_ANT");
+}
+
+static void odm_UpdateTxAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant, u32 MacId)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ u8 TargetAnt;
+
+ if (Ant == MAIN_ANT)
+ TargetAnt = MAIN_ANT_CG_TRX;
+ else
+ TargetAnt = AUX_ANT_CG_TRX;
+ dm_fat_tbl->antsel_a[MacId] = TargetAnt&BIT0;
+ dm_fat_tbl->antsel_b[MacId] = (TargetAnt&BIT1)>>1;
+ dm_fat_tbl->antsel_c[MacId] = (TargetAnt&BIT2)>>2;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("Tx from TxInfo, TargetAnt=%s\n",
+ (Ant == MAIN_ANT) ? "MAIN_ANT" : "AUX_ANT"));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("antsel_tr_mux=3'b%d%d%d\n",
+ dm_fat_tbl->antsel_c[MacId], dm_fat_tbl->antsel_b[MacId], dm_fat_tbl->antsel_a[MacId]));
+}
+
+void ODM_SetTxAntByTxInfo_88E(struct odm_dm_struct *dm_odm, u8 *pDesc, u8 macId)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+
+ if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV)) {
+ SET_TX_DESC_ANTSEL_A_88E(pDesc, dm_fat_tbl->antsel_a[macId]);
+ SET_TX_DESC_ANTSEL_B_88E(pDesc, dm_fat_tbl->antsel_b[macId]);
+ SET_TX_DESC_ANTSEL_C_88E(pDesc, dm_fat_tbl->antsel_c[macId]);
+ }
+}
+
+void ODM_AntselStatistics_88E(struct odm_dm_struct *dm_odm, u8 antsel_tr_mux, u32 MacId, u8 RxPWDBAll)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ if (antsel_tr_mux == MAIN_ANT_CG_TRX) {
+ dm_fat_tbl->MainAnt_Sum[MacId] += RxPWDBAll;
+ dm_fat_tbl->MainAnt_Cnt[MacId]++;
+ } else {
+ dm_fat_tbl->AuxAnt_Sum[MacId] += RxPWDBAll;
+ dm_fat_tbl->AuxAnt_Cnt[MacId]++;
+ }
+ } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
+ if (antsel_tr_mux == MAIN_ANT_CGCS_RX) {
+ dm_fat_tbl->MainAnt_Sum[MacId] += RxPWDBAll;
+ dm_fat_tbl->MainAnt_Cnt[MacId]++;
+ } else {
+ dm_fat_tbl->AuxAnt_Sum[MacId] += RxPWDBAll;
+ dm_fat_tbl->AuxAnt_Cnt[MacId]++;
+ }
+ }
+}
+
+static void odm_HWAntDiv(struct odm_dm_struct *dm_odm)
+{
+ u32 i, MinRSSI = 0xFF, AntDivMaxRSSI = 0, MaxRSSI = 0, LocalMinRSSI, LocalMaxRSSI;
+ u32 Main_RSSI, Aux_RSSI;
+ u8 RxIdleAnt = 0, TargetAnt = 7;
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ struct rtw_dig *pDM_DigTable = &dm_odm->DM_DigTable;
+ struct sta_info *pEntry;
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ pEntry = dm_odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pEntry)) {
+ /* 2 Caculate RSSI per Antenna */
+ Main_RSSI = (dm_fat_tbl->MainAnt_Cnt[i] != 0) ? (dm_fat_tbl->MainAnt_Sum[i]/dm_fat_tbl->MainAnt_Cnt[i]) : 0;
+ Aux_RSSI = (dm_fat_tbl->AuxAnt_Cnt[i] != 0) ? (dm_fat_tbl->AuxAnt_Sum[i]/dm_fat_tbl->AuxAnt_Cnt[i]) : 0;
+ TargetAnt = (Main_RSSI >= Aux_RSSI) ? MAIN_ANT : AUX_ANT;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("MacID=%d, MainAnt_Sum=%d, MainAnt_Cnt=%d\n",
+ i, dm_fat_tbl->MainAnt_Sum[i],
+ dm_fat_tbl->MainAnt_Cnt[i]));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("MacID=%d, AuxAnt_Sum=%d, AuxAnt_Cnt=%d\n",
+ i, dm_fat_tbl->AuxAnt_Sum[i], dm_fat_tbl->AuxAnt_Cnt[i]));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("MacID=%d, Main_RSSI= %d, Aux_RSSI= %d\n",
+ i, Main_RSSI, Aux_RSSI));
+ /* 2 Select MaxRSSI for DIG */
+ LocalMaxRSSI = (Main_RSSI > Aux_RSSI) ? Main_RSSI : Aux_RSSI;
+ if ((LocalMaxRSSI > AntDivMaxRSSI) && (LocalMaxRSSI < 40))
+ AntDivMaxRSSI = LocalMaxRSSI;
+ if (LocalMaxRSSI > MaxRSSI)
+ MaxRSSI = LocalMaxRSSI;
+
+ /* 2 Select RX Idle Antenna */
+ if ((dm_fat_tbl->RxIdleAnt == MAIN_ANT) && (Main_RSSI == 0))
+ Main_RSSI = Aux_RSSI;
+ else if ((dm_fat_tbl->RxIdleAnt == AUX_ANT) && (Aux_RSSI == 0))
+ Aux_RSSI = Main_RSSI;
+
+ LocalMinRSSI = (Main_RSSI > Aux_RSSI) ? Aux_RSSI : Main_RSSI;
+ if (LocalMinRSSI < MinRSSI) {
+ MinRSSI = LocalMinRSSI;
+ RxIdleAnt = TargetAnt;
+ }
+ /* 2 Select TRX Antenna */
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ odm_UpdateTxAnt_88E(dm_odm, TargetAnt, i);
+ }
+ dm_fat_tbl->MainAnt_Sum[i] = 0;
+ dm_fat_tbl->AuxAnt_Sum[i] = 0;
+ dm_fat_tbl->MainAnt_Cnt[i] = 0;
+ dm_fat_tbl->AuxAnt_Cnt[i] = 0;
+ }
+
+ /* 2 Set RX Idle Antenna */
+ ODM_UpdateRxIdleAnt_88E(dm_odm, RxIdleAnt);
+
+ pDM_DigTable->AntDiv_RSSI_max = AntDivMaxRSSI;
+ pDM_DigTable->RSSI_max = MaxRSSI;
+}
+
+void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ if ((dm_odm->SupportICType != ODM_RTL8188E) || (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV)))
+ return;
+ if (!dm_odm->bLinked) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_AntennaDiversity_88E(): No Link.\n"));
+ if (dm_fat_tbl->bBecomeLinked) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Turn off HW AntDiv\n"));
+ ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* RegC50[7]=1'b1 enable HW AntDiv */
+ ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 0); /* Enable CCK AntDiv */
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
+ dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
+ }
+ return;
+ } else {
+ if (!dm_fat_tbl->bBecomeLinked) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Turn on HW AntDiv\n"));
+ /* Because HW AntDiv is disabled before Link, we enable HW AntDiv after link */
+ ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 1); /* Enable CCK AntDiv */
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
+ dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
+ }
+ }
+ if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV))
+ odm_HWAntDiv(dm_odm);
+}
+
+/* 3============================================================ */
+/* 3 Dynamic Primary CCA */
+/* 3============================================================ */
+
+void odm_PrimaryCCA_Init(struct odm_dm_struct *dm_odm)
+{
+ struct dyn_primary_cca *PrimaryCCA = &(dm_odm->DM_PriCCA);
+
+ PrimaryCCA->DupRTS_flag = 0;
+ PrimaryCCA->intf_flag = 0;
+ PrimaryCCA->intf_type = 0;
+ PrimaryCCA->Monitor_flag = 0;
+ PrimaryCCA->PriCCA_flag = 0;
+}
+
+bool ODM_DynamicPrimaryCCA_DupRTS(struct odm_dm_struct *dm_odm)
+{
+ struct dyn_primary_cca *PrimaryCCA = &(dm_odm->DM_PriCCA);
+
+ return PrimaryCCA->DupRTS_flag;
+}
+
+void odm_DynamicPrimaryCCA(struct odm_dm_struct *dm_odm)
+{
+ return;
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
new file mode 100644
index 0000000..18c0533
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
@@ -0,0 +1,130 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Data, enum ODM_RF_RADIO_PATH RF_PATH,
+ u32 RegAddr)
+{
+ if (Addr == 0xffe) {
+ ODM_sleep_ms(50);
+ } else if (Addr == 0xfd) {
+ ODM_delay_ms(5);
+ } else if (Addr == 0xfc) {
+ ODM_delay_ms(1);
+ } else if (Addr == 0xfb) {
+ ODM_delay_us(50);
+ } else if (Addr == 0xfa) {
+ ODM_delay_us(5);
+ } else if (Addr == 0xf9) {
+ ODM_delay_us(1);
+ } else {
+ ODM_SetRFReg(pDM_Odm, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ ODM_delay_us(1);
+ }
+}
+
+void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
+{
+ u32 content = 0x1000; /* RF_Content: radioa_txt */
+ u32 maskforPhySet = (u32)(content&0xE000);
+
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, ODM_RF_PATH_A, Addr|maskforPhySet);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigRFWithHeaderFile: [RadioA] %08X %08X\n", Addr, Data));
+}
+
+void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
+{
+ u32 content = 0x1001; /* RF_Content: radiob_txt */
+ u32 maskforPhySet = (u32)(content&0xE000);
+
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, ODM_RF_PATH_B, Addr|maskforPhySet);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigRFWithHeaderFile: [RadioB] %08X %08X\n", Addr, Data));
+}
+
+void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
+{
+ ODM_Write1Byte(pDM_Odm, Addr, Data);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigMACWithHeaderFile: [MAC_REG] %08X %08X\n", Addr, Data));
+}
+
+void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
+{
+ ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ ODM_delay_us(1);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
+ ("===> ODM_ConfigBBWithHeaderFile: [AGC_TAB] %08X %08X\n",
+ Addr, Data));
+}
+
+void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Bitmask, u32 Data)
+{
+ if (Addr == 0xfe) {
+ ODM_sleep_ms(50);
+ } else if (Addr == 0xfd) {
+ ODM_delay_ms(5);
+ } else if (Addr == 0xfc) {
+ ODM_delay_ms(1);
+ } else if (Addr == 0xfb) {
+ ODM_delay_us(50);
+ } else if (Addr == 0xfa) {
+ ODM_delay_us(5);
+ } else if (Addr == 0xf9) {
+ ODM_delay_us(1);
+ } else{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===> @@@@@@@ ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X %08X\n",
+ Addr, Bitmask, Data));
+ storePwrIndexDiffRateOffset(pDM_Odm->Adapter, Addr, Bitmask, Data);
+ }
+}
+
+void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
+{
+ if (Addr == 0xfe) {
+ ODM_sleep_ms(50);
+ } else if (Addr == 0xfd) {
+ ODM_delay_ms(5);
+ } else if (Addr == 0xfc) {
+ ODM_delay_ms(1);
+ } else if (Addr == 0xfb) {
+ ODM_delay_us(50);
+ } else if (Addr == 0xfa) {
+ ODM_delay_us(5);
+ } else if (Addr == 0xf9) {
+ ODM_delay_us(1);
+ } else {
+ if (Addr == 0xa24)
+ pDM_Odm->RFCalibrateInfo.RegA24 = Data;
+ ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+
+ /* Add 1us delay between BB/RF register setting. */
+ ODM_delay_us(1);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
+ ("===> ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X\n",
+ Addr, Data));
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm_debug.c b/drivers/staging/rtl8188eu/hal/odm_debug.c
new file mode 100644
index 0000000..84caadd
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_debug.c
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+/* include files */
+
+#include "odm_precomp.h"
+
+void ODM_InitDebugSetting(struct odm_dm_struct *pDM_Odm)
+{
+ pDM_Odm->DebugLevel = ODM_DBG_TRACE;
+
+ pDM_Odm->DebugComponents = 0;
+}
+
+u32 GlobalDebugLevel;
diff --git a/drivers/staging/rtl8188eu/hal/odm_interface.c b/drivers/staging/rtl8188eu/hal/odm_interface.c
new file mode 100644
index 0000000..59ad5bf
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_interface.c
@@ -0,0 +1,203 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+/* ODM IO Relative API. */
+
+u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return rtw_read8(Adapter, RegAddr);
+}
+
+u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return rtw_read16(Adapter, RegAddr);
+}
+
+u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return rtw_read32(Adapter, RegAddr);
+}
+
+void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ rtw_write8(Adapter, RegAddr, Data);
+}
+
+void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ rtw_write16(Adapter, RegAddr, Data);
+}
+
+void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ rtw_write32(Adapter, RegAddr, Data);
+}
+
+void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
+}
+
+u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
+}
+
+void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
+}
+
+u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
+}
+
+void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask, Data);
+}
+
+u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath, u32 RegAddr, u32 BitMask)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return PHY_QueryRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask);
+}
+
+/* ODM Memory relative API. */
+void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length)
+{
+ *pPtr = rtw_zvmalloc(length);
+}
+
+/* length could be ignored, used to detect memory leakage. */
+void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length)
+{
+ rtw_vmfree(pPtr, length);
+}
+
+s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2, u32 length)
+{
+ return _rtw_memcmp(pBuf1, pBuf2, length);
+}
+
+/* ODM MISC relative API. */
+void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
+{
+}
+
+void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
+{
+}
+
+/* Work item relative API. FOr MP driver only~! */
+void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
+ RT_WORKITEM_CALL_BACK RtWorkItemCallback,
+ void *pContext, const char *szID)
+{
+}
+
+void ODM_StartWorkItem(void *pRtWorkItem)
+{
+}
+
+void ODM_StopWorkItem(void *pRtWorkItem)
+{
+}
+
+void ODM_FreeWorkItem(void *pRtWorkItem)
+{
+}
+
+void ODM_ScheduleWorkItem(void *pRtWorkItem)
+{
+}
+
+void ODM_IsWorkItemScheduled(void *pRtWorkItem)
+{
+}
+
+/* ODM Timer relative API. */
+void ODM_StallExecution(u32 usDelay)
+{
+ rtw_udelay_os(usDelay);
+}
+
+void ODM_delay_ms(u32 ms)
+{
+ rtw_mdelay_os(ms);
+}
+
+void ODM_delay_us(u32 us)
+{
+ rtw_udelay_os(us);
+}
+
+void ODM_sleep_ms(u32 ms)
+{
+ rtw_msleep_os(ms);
+}
+
+void ODM_sleep_us(u32 us)
+{
+ rtw_usleep_os(us);
+}
+
+void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer, u32 msDelay)
+{
+ _set_timer(pTimer, msDelay); /* ms */
+}
+
+void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
+ void *CallBackFunc, void *pContext,
+ const char *szID)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ _init_timer(pTimer, Adapter->pnetdev, CallBackFunc, pDM_Odm);
+}
+
+void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
+{
+ _cancel_timer_ex(pTimer);
+}
+
+void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
+{
+}
+
+/* ODM FW relative API. */
+u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
+ u32 *pElementID, u32 *pCmdLen,
+ u8 **pCmbBuffer, u8 *CmdStartSeq)
+{
+ return true;
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
new file mode 100644
index 0000000..8c858775
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -0,0 +1,779 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_CMD_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <cmd_osdep.h>
+#include <mlme_osdep.h>
+#include <rtw_ioctl_set.h>
+
+#include <rtl8188e_hal.h>
+
+#define RTL88E_MAX_H2C_BOX_NUMS 4
+#define RTL88E_MAX_CMD_LEN 7
+#define RTL88E_MESSAGE_BOX_SIZE 4
+#define RTL88E_EX_MESSAGE_BOX_SIZE 4
+
+static u8 _is_fw_read_cmd_down(struct adapter *adapt, u8 msgbox_num)
+{
+ u8 read_down = false;
+ int retry_cnts = 100;
+
+ u8 valid;
+
+ do {
+ valid = rtw_read8(adapt, REG_HMETFR) & BIT(msgbox_num);
+ if (0 == valid)
+ read_down = true;
+ } while ((!read_down) && (retry_cnts--));
+
+ return read_down;
+}
+
+/*****************************************
+* H2C Msg format :
+* 0x1DF - 0x1D0
+*| 31 - 8 | 7-5 4 - 0 |
+*| h2c_msg |Class_ID CMD_ID |
+*
+* Extend 0x1FF - 0x1F0
+*|31 - 0 |
+*|ext_msg|
+******************************************/
+static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer)
+{
+ u8 bcmd_down = false;
+ s32 retry_cnts = 100;
+ u8 h2c_box_num;
+ u32 msgbox_addr;
+ u32 msgbox_ex_addr;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ u8 cmd_idx, ext_cmd_len;
+ u32 h2c_cmd = 0;
+ u32 h2c_cmd_ex = 0;
+ s32 ret = _FAIL;
+
+_func_enter_;
+
+ if (!adapt->bFWReady) {
+ DBG_88E("FillH2CCmd_88E(): return H2C cmd because fw is not ready\n");
+ return ret;
+ }
+
+ if (!pCmdBuffer)
+ goto exit;
+ if (CmdLen > RTL88E_MAX_CMD_LEN)
+ goto exit;
+ if (adapt->bSurpriseRemoved)
+ goto exit;
+
+ /* pay attention to if race condition happened in H2C cmd setting. */
+ do {
+ h2c_box_num = haldata->LastHMEBoxNum;
+
+ if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
+ DBG_88E(" fw read cmd failed...\n");
+ goto exit;
+ }
+
+ *(u8 *)(&h2c_cmd) = ElementID;
+
+ if (CmdLen <= 3) {
+ memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, CmdLen);
+ } else {
+ memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, 3);
+ ext_cmd_len = CmdLen-3;
+ memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer+3, ext_cmd_len);
+
+ /* Write Ext command */
+ msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * RTL88E_EX_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++) {
+ rtw_write8(adapt, msgbox_ex_addr+cmd_idx, *((u8 *)(&h2c_cmd_ex)+cmd_idx));
+ }
+ }
+ /* Write command */
+ msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL88E_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < RTL88E_MESSAGE_BOX_SIZE; cmd_idx++) {
+ rtw_write8(adapt, msgbox_addr+cmd_idx, *((u8 *)(&h2c_cmd)+cmd_idx));
+ }
+ bcmd_down = true;
+
+ haldata->LastHMEBoxNum = (h2c_box_num+1) % RTL88E_MAX_H2C_BOX_NUMS;
+
+ } while ((!bcmd_down) && (retry_cnts--));
+
+ ret = _SUCCESS;
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+u8 rtl8188e_set_rssi_cmd(struct adapter *adapt, u8 *param)
+{
+ u8 res = _SUCCESS;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+_func_enter_;
+
+ if (haldata->fw_ractrl) {
+ ;
+ } else {
+ DBG_88E("==>%s fw dont support RA\n", __func__);
+ res = _FAIL;
+ }
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
+{
+ u8 buf[3];
+ u8 res = _SUCCESS;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+_func_enter_;
+ if (haldata->fw_ractrl) {
+ __le32 lmask;
+
+ _rtw_memset(buf, 0, 3);
+ lmask = cpu_to_le32(mask);
+ memcpy(buf, &lmask, 3);
+
+ FillH2CCmd_88E(adapt, H2C_DM_MACID_CFG, 3, buf);
+ } else {
+ DBG_88E("==>%s fw dont support RA\n", __func__);
+ res = _FAIL;
+ }
+
+_func_exit_;
+
+ return res;
+}
+
+/* bitmap[0:27] = tx_rate_bitmap */
+/* bitmap[28:31]= Rate Adaptive id */
+/* arg[0:4] = macid */
+/* arg[5] = Short GI */
+void rtl8188e_Add_RateATid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(pAdapter);
+
+ u8 macid, init_rate, raid, shortGIrate = false;
+
+ macid = arg&0x1f;
+
+ raid = (bitmap>>28) & 0x0f;
+ bitmap &= 0x0fffffff;
+
+ if (rssi_level != DM_RATR_STA_INIT)
+ bitmap = ODM_Get_Rate_Bitmap(&haldata->odmpriv, macid, bitmap, rssi_level);
+
+ bitmap |= ((raid<<28)&0xf0000000);
+
+ init_rate = get_highest_rate_idx(bitmap&0x0fffffff)&0x3f;
+
+ shortGIrate = (arg&BIT(5)) ? true : false;
+
+ if (shortGIrate)
+ init_rate |= BIT(6);
+
+ raid = (bitmap>>28) & 0x0f;
+
+ bitmap &= 0x0fffffff;
+
+ DBG_88E("%s=> mac_id:%d, raid:%d, ra_bitmap=0x%x, shortGIrate=0x%02x\n",
+ __func__, macid, raid, bitmap, shortGIrate);
+
+ ODM_RA_UpdateRateInfo_8188E(&(haldata->odmpriv), macid, raid, bitmap, shortGIrate);
+}
+
+void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
+{
+ struct setpwrmode_parm H2CSetPwrMode;
+ struct pwrctrl_priv *pwrpriv = &adapt->pwrctrlpriv;
+ u8 RLBM = 0; /* 0:Min, 1:Max, 2:User define */
+_func_enter_;
+
+ DBG_88E("%s: Mode=%d SmartPS=%d UAPSD=%d\n", __func__,
+ Mode, pwrpriv->smart_ps, adapt->registrypriv.uapsd_enable);
+
+ switch (Mode) {
+ case PS_MODE_ACTIVE:
+ H2CSetPwrMode.Mode = 0;
+ break;
+ case PS_MODE_MIN:
+ H2CSetPwrMode.Mode = 1;
+ break;
+ case PS_MODE_MAX:
+ RLBM = 1;
+ H2CSetPwrMode.Mode = 1;
+ break;
+ case PS_MODE_DTIM:
+ RLBM = 2;
+ H2CSetPwrMode.Mode = 1;
+ break;
+ case PS_MODE_UAPSD_WMM:
+ H2CSetPwrMode.Mode = 2;
+ break;
+ default:
+ H2CSetPwrMode.Mode = 0;
+ break;
+ }
+
+ H2CSetPwrMode.SmartPS_RLBM = (((pwrpriv->smart_ps<<4)&0xf0) | (RLBM & 0x0f));
+
+ H2CSetPwrMode.AwakeInterval = 1;
+
+ H2CSetPwrMode.bAllQueueUAPSD = adapt->registrypriv.uapsd_enable;
+
+ if (Mode > 0)
+ H2CSetPwrMode.PwrState = 0x00;/* AllON(0x0C), RFON(0x04), RFOFF(0x00) */
+ else
+ H2CSetPwrMode.PwrState = 0x0C;/* AllON(0x0C), RFON(0x04), RFOFF(0x00) */
+
+ FillH2CCmd_88E(adapt, H2C_PS_PWR_MODE, sizeof(H2CSetPwrMode), (u8 *)&H2CSetPwrMode);
+
+_func_exit_;
+}
+
+void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
+{
+ u8 opmode, macid;
+ u16 mst_rpt = le16_to_cpu(mstatus_rpt);
+ opmode = (u8) mst_rpt;
+ macid = (u8)(mst_rpt >> 8);
+
+ DBG_88E("### %s: MStatus=%x MACID=%d\n", __func__, opmode, macid);
+ FillH2CCmd_88E(adapt, H2C_COM_MEDIA_STATUS_RPT, sizeof(mst_rpt), (u8 *)&mst_rpt);
+}
+
+static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
+{
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u32 rate_len, pktlen;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
+ SetFrameSubType(pframe, WIFI_BEACON);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pktlen += 2;
+
+ /* capability info: 2 bytes */
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pktlen += 2;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ pktlen += cur_network->IELength - sizeof(struct ndis_802_11_fixed_ie);
+ memcpy(pframe, cur_network->IEs+sizeof(struct ndis_802_11_fixed_ie), pktlen);
+
+ goto _ConstructBeacon;
+ }
+
+ /* below for ad-hoc mode */
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pktlen);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pktlen);
+ }
+
+ /* todo: ERP IE */
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pktlen);
+
+ /* todo:HT for adhoc */
+
+_ConstructBeacon:
+
+ if ((pktlen + TXDESC_SIZE) > 512) {
+ DBG_88E("beacon frame too large\n");
+ return;
+ }
+
+ *pLength = pktlen;
+}
+
+static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
+{
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u16 *fctrl;
+
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ /* Frame control. */
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ SetPwrMgt(fctrl);
+ SetFrameSubType(pframe, WIFI_PSPOLL);
+
+ /* AID. */
+ SetDuration(pframe, (pmlmeinfo->aid | 0xc000));
+
+ /* BSSID. */
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ /* TA. */
+ memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
+
+ *pLength = 16;
+}
+
+static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
+ u32 *pLength,
+ u8 *StaAddr,
+ u8 bQoS,
+ u8 AC,
+ u8 bEosp,
+ u8 bForcePowerSave)
+{
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u32 pktlen;
+ struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_ctl;
+ *(fctrl) = 0;
+ if (bForcePowerSave)
+ SetPwrMgt(fctrl);
+
+ switch (cur_network->network.InfrastructureMode) {
+ case Ndis802_11Infrastructure:
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, StaAddr, ETH_ALEN);
+ break;
+ case Ndis802_11APMode:
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ break;
+ case Ndis802_11IBSS:
+ default:
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ break;
+ }
+
+ SetSeqNum(pwlanhdr, 0);
+
+ if (bQoS) {
+ struct rtw_ieee80211_hdr_3addr_qos *pwlanqoshdr;
+
+ SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+
+ pwlanqoshdr = (struct rtw_ieee80211_hdr_3addr_qos *)pframe;
+ SetPriority(&pwlanqoshdr->qc, AC);
+ SetEOSP(&pwlanqoshdr->qc, bEosp);
+
+ pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ } else {
+ SetFrameSubType(pframe, WIFI_DATA_NULL);
+
+ pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ }
+
+ *pLength = pktlen;
+}
+
+static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
+{
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u8 *mac, *bssid;
+ u32 pktlen;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(adapt->eeprompriv));
+ bssid = cur_network->MacAddress;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0);
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += pktlen;
+
+ if (cur_network->IELength > MAX_IE_SZ)
+ return;
+
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ pframe += cur_network->IELength;
+ pktlen += cur_network->IELength;
+
+ *pLength = pktlen;
+}
+
+/* To check if reserved page content is destroyed by beacon beacuse beacon is too large. */
+/* 2010.06.23. Added by tynli. */
+void CheckFwRsvdPageContent(struct adapter *Adapter)
+{
+}
+
+/* */
+/* Description: Fill the reserved packets that FW will use to RSVD page. */
+/* Now we just send 4 types packet to rsvd page. */
+/* (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. */
+/* Input: */
+/* bDLFinished - false: At the first time we will send all the packets as a large packet to Hw, */
+/* so we need to set the packet length to total lengh. */
+/* true: At the second time, we should send the first packet (default:beacon) */
+/* to Hw again and set the lengh in descriptor to the real beacon lengh. */
+/* 2009.10.15 by tynli. */
+static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
+{
+ struct hal_data_8188e *haldata;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ u32 BeaconLength = 0, ProbeRspLength = 0, PSPollLength;
+ u32 NullDataLength, QosNullLength;
+ u8 *ReservedPagePacket;
+ u8 PageNum, PageNeed, TxDescLen;
+ u16 BufIndex;
+ u32 TotalPacketLen;
+ struct rsvdpage_loc RsvdPageLoc;
+
+ DBG_88E("%s\n", __func__);
+ ReservedPagePacket = (u8 *)rtw_zmalloc(1000);
+ if (ReservedPagePacket == NULL) {
+ DBG_88E("%s: alloc ReservedPagePacket fail!\n", __func__);
+ return;
+ }
+
+ haldata = GET_HAL_DATA(adapt);
+ pxmitpriv = &adapt->xmitpriv;
+ pmlmeext = &adapt->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ TxDescLen = TXDESC_SIZE;
+ PageNum = 0;
+
+ /* 3 (1) beacon * 2 pages */
+ BufIndex = TXDESC_OFFSET;
+ ConstructBeacon(adapt, &ReservedPagePacket[BufIndex], &BeaconLength);
+
+ /* When we count the first page size, we need to reserve description size for the RSVD */
+ /* packet, it will be filled in front of the packet in TXPKTBUF. */
+ PageNeed = (u8)PageNum_128(TxDescLen + BeaconLength);
+ /* To reserved 2 pages for beacon buffer. 2010.06.24. */
+ if (PageNeed == 1)
+ PageNeed += 1;
+ PageNum += PageNeed;
+ haldata->FwRsvdPageStartOffset = PageNum;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (2) ps-poll *1 page */
+ RsvdPageLoc.LocPsPoll = PageNum;
+ ConstructPSPoll(adapt, &ReservedPagePacket[BufIndex], &PSPollLength);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], PSPollLength, true, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + PSPollLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (3) null data * 1 page */
+ RsvdPageLoc.LocNullData = PageNum;
+ ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex], &NullDataLength, get_my_bssid(&pmlmeinfo->network), false, 0, 0, false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], NullDataLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (4) probe response * 1page */
+ RsvdPageLoc.LocProbeRsp = PageNum;
+ ConstructProbeRsp(adapt, &ReservedPagePacket[BufIndex], &ProbeRspLength, get_my_bssid(&pmlmeinfo->network), false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], ProbeRspLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + ProbeRspLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (5) Qos null data */
+ RsvdPageLoc.LocQosNull = PageNum;
+ ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex],
+ &QosNullLength, get_my_bssid(&pmlmeinfo->network), true, 0, 0, false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], QosNullLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + QosNullLength);
+ PageNum += PageNeed;
+
+ TotalPacketLen = BufIndex + QosNullLength;
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(adapt, pattrib);
+ pattrib->qsel = 0x10;
+ pattrib->last_txcmdsz = TotalPacketLen - TXDESC_OFFSET;
+ pattrib->pktlen = pattrib->last_txcmdsz;
+ memcpy(pmgntframe->buf_addr, ReservedPagePacket, TotalPacketLen);
+
+ rtw_hal_mgnt_xmit(adapt, pmgntframe);
+
+ DBG_88E("%s: Set RSVD page location to Fw\n", __func__);
+ FillH2CCmd_88E(adapt, H2C_COM_RSVD_PAGE, sizeof(RsvdPageLoc), (u8 *)&RsvdPageLoc);
+
+exit:
+ kfree(ReservedPagePacket);
+}
+
+void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ bool bSendBeacon = false;
+ bool bcn_valid = false;
+ u8 DLBcnCount = 0;
+ u32 poll = 0;
+
+_func_enter_;
+
+ DBG_88E("%s mstatus(%x)\n", __func__, mstatus);
+
+ if (mstatus == 1) {
+ /* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
+ /* Suggested by filen. Added by tynli. */
+ rtw_write16(adapt, REG_BCN_PSR_RPT, (0xC000|pmlmeinfo->aid));
+ /* Do not set TSF again here or vWiFi beacon DMA INT will not work. */
+
+ /* Set REG_CR bit 8. DMA beacon by SW. */
+ haldata->RegCR_1 |= BIT0;
+ rtw_write8(adapt, REG_CR+1, haldata->RegCR_1);
+
+ /* Disable Hw protection for a time which revserd for Hw sending beacon. */
+ /* Fix download reserved page packet fail that access collision with the protection time. */
+ /* 2010.05.11. Added by tynli. */
+ rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL)&(~BIT(3)));
+ rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL)|BIT(4));
+
+ if (haldata->RegFwHwTxQCtrl&BIT6) {
+ DBG_88E("HalDownloadRSVDPage(): There is an Adapter is sending beacon.\n");
+ bSendBeacon = true;
+ }
+
+ /* Set FWHW_TXQ_CTRL 0x422[6]=0 to tell Hw the packet is not a real beacon frame. */
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl&(~BIT6)));
+ haldata->RegFwHwTxQCtrl &= (~BIT6);
+
+ /* Clear beacon valid check bit. */
+ rtw_hal_set_hwreg(adapt, HW_VAR_BCN_VALID, NULL);
+ DLBcnCount = 0;
+ poll = 0;
+ do {
+ /* download rsvd page. */
+ SetFwRsvdPagePkt(adapt, false);
+ DLBcnCount++;
+ do {
+ rtw_yield_os();
+ /* rtw_mdelay_os(10); */
+ /* check rsvd page download OK. */
+ rtw_hal_get_hwreg(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
+ poll++;
+ } while (!bcn_valid && (poll%10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
+ } while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
+
+ if (adapt->bSurpriseRemoved || adapt->bDriverStopped)
+ ;
+ else if (!bcn_valid)
+ DBG_88E("%s: 1 Download RSVD page failed! DLBcnCount:%u, poll:%u\n", __func__, DLBcnCount, poll);
+ else
+ DBG_88E("%s: 1 Download RSVD success! DLBcnCount:%u, poll:%u\n", __func__, DLBcnCount, poll);
+ /* */
+ /* We just can send the reserved page twice during the time that Tx thread is stopped (e.g. pnpsetpower) */
+ /* becuase we need to free the Tx BCN Desc which is used by the first reserved page packet. */
+ /* At run time, we cannot get the Tx Desc until it is released in TxHandleInterrupt() so we will return */
+ /* the beacon TCB in the following code. 2011.11.23. by tynli. */
+ /* */
+
+ /* Enable Bcn */
+ rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL)|BIT(3));
+ rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL)&(~BIT(4)));
+
+ /* To make sure that if there exists an adapter which would like to send beacon. */
+ /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
+ /* prevent from setting 0x422[6] to 0 after download reserved page, or it will cause */
+ /* the beacon cannot be sent by HW. */
+ /* 2010.06.23. Added by tynli. */
+ if (bSendBeacon) {
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl|BIT6));
+ haldata->RegFwHwTxQCtrl |= BIT6;
+ }
+
+ /* Update RSVD page location H2C to Fw. */
+ if (bcn_valid) {
+ rtw_hal_set_hwreg(adapt, HW_VAR_BCN_VALID, NULL);
+ DBG_88E("Set RSVD page location to Fw.\n");
+ }
+
+ /* Do not enable HW DMA BCN or it will cause Pcie interface hang by timing issue. 2011.11.24. by tynli. */
+ /* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
+ haldata->RegCR_1 &= (~BIT0);
+ rtw_write8(adapt, REG_CR+1, haldata->RegCR_1);
+ }
+_func_exit_;
+}
+
+void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
+{
+#ifdef CONFIG_88EU_P2P
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct wifidirect_info *pwdinfo = &(adapt->wdinfo);
+ struct P2P_PS_Offload_t *p2p_ps_offload = &haldata->p2p_ps_offload;
+ u8 i;
+
+_func_enter_;
+
+ switch (p2p_ps_state) {
+ case P2P_PS_DISABLE:
+ DBG_88E("P2P_PS_DISABLE\n");
+ _rtw_memset(p2p_ps_offload, 0, 1);
+ break;
+ case P2P_PS_ENABLE:
+ DBG_88E("P2P_PS_ENABLE\n");
+ /* update CTWindow value. */
+ if (pwdinfo->ctwindow > 0) {
+ p2p_ps_offload->CTWindow_En = 1;
+ rtw_write8(adapt, REG_P2P_CTWIN, pwdinfo->ctwindow);
+ }
+
+ /* hw only support 2 set of NoA */
+ for (i = 0; i < pwdinfo->noa_num; i++) {
+ /* To control the register setting for which NOA */
+ rtw_write8(adapt, REG_NOA_DESC_SEL, (i << 4));
+ if (i == 0)
+ p2p_ps_offload->NoA0_En = 1;
+ else
+ p2p_ps_offload->NoA1_En = 1;
+
+ /* config P2P NoA Descriptor Register */
+ rtw_write32(adapt, REG_NOA_DESC_DURATION, pwdinfo->noa_duration[i]);
+ rtw_write32(adapt, REG_NOA_DESC_INTERVAL, pwdinfo->noa_interval[i]);
+ rtw_write32(adapt, REG_NOA_DESC_START, pwdinfo->noa_start_time[i]);
+ rtw_write8(adapt, REG_NOA_DESC_COUNT, pwdinfo->noa_count[i]);
+ }
+
+ if ((pwdinfo->opp_ps == 1) || (pwdinfo->noa_num > 0)) {
+ /* rst p2p circuit */
+ rtw_write8(adapt, REG_DUAL_TSF_RST, BIT(4));
+
+ p2p_ps_offload->Offload_En = 1;
+
+ if (pwdinfo->role == P2P_ROLE_GO) {
+ p2p_ps_offload->role = 1;
+ p2p_ps_offload->AllStaSleep = 0;
+ } else {
+ p2p_ps_offload->role = 0;
+ }
+
+ p2p_ps_offload->discovery = 0;
+ }
+ break;
+ case P2P_PS_SCAN:
+ DBG_88E("P2P_PS_SCAN\n");
+ p2p_ps_offload->discovery = 1;
+ break;
+ case P2P_PS_SCAN_DONE:
+ DBG_88E("P2P_PS_SCAN_DONE\n");
+ p2p_ps_offload->discovery = 0;
+ pwdinfo->p2p_ps_state = P2P_PS_ENABLE;
+ break;
+ default:
+ break;
+ }
+
+ FillH2CCmd_88E(adapt, H2C_PS_P2P_OFFLOAD, 1, (u8 *)p2p_ps_offload);
+#endif
+
+_func_exit_;
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
new file mode 100644
index 0000000..ec0028d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -0,0 +1,268 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/* */
+/* Description: */
+/* */
+/* This file is for 92CE/92CU dynamic mechanism only */
+/* */
+/* */
+/* */
+#define _RTL8188E_DM_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <rtl8188e_hal.h>
+
+static void dm_CheckStatistics(struct adapter *Adapter)
+{
+}
+
+/* Initialize GPIO setting registers */
+static void dm_InitGPIOSetting(struct adapter *Adapter)
+{
+ u8 tmp1byte;
+
+ tmp1byte = rtw_read8(Adapter, REG_GPIO_MUXCFG);
+ tmp1byte &= (GPIOSEL_GPIO | ~GPIOSEL_ENBT);
+
+ rtw_write8(Adapter, REG_GPIO_MUXCFG, tmp1byte);
+}
+
+/* */
+/* functions */
+/* */
+static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+ u8 cut_ver, fab_ver;
+
+ /* Init Value */
+ _rtw_memset(dm_odm, 0, sizeof(*dm_odm));
+
+ dm_odm->Adapter = Adapter;
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PLATFORM, ODM_CE);
+
+ if (Adapter->interface_type == RTW_GSPI)
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, ODM_ITRF_SDIO);
+ else
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, Adapter->interface_type);/* RTL871X_HCI_TYPE */
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8188E);
+
+ fab_ver = ODM_TSMC;
+ cut_ver = ODM_CUT_A;
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_FAB_VER, fab_ver);
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_CUT_VER, cut_ver);
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_MP_TEST_CHIP, IS_NORMAL_CHIP(hal_data->VersionID));
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PATCH_ID, hal_data->CustomerID);
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_BWIFI_TEST, Adapter->registrypriv.wifi_spec);
+
+
+ if (hal_data->rf_type == RF_1T1R)
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_1T1R);
+ else if (hal_data->rf_type == RF_2T2R)
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_2T2R);
+ else if (hal_data->rf_type == RF_1T2R)
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_1T2R);
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
+
+ pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
+
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
+}
+
+static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
+{
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ int i;
+
+ pdmpriv->InitODMFlag = ODM_BB_DIG |
+ ODM_BB_RA_MASK |
+ ODM_BB_DYNAMIC_TXPWR |
+ ODM_BB_FA_CNT |
+ ODM_BB_RSSI_MONITOR |
+ ODM_BB_CCK_PD |
+ ODM_BB_PWR_SAVE |
+ ODM_MAC_EDCA_TURBO |
+ ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
+ if (hal_data->AntDivCfg)
+ pdmpriv->InitODMFlag |= ODM_BB_ANT_DIV;
+
+ if (Adapter->registrypriv.mp_mode == 1) {
+ pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
+ }
+
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
+
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_TX_UNI, &(Adapter->xmitpriv.tx_bytes));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_RX_UNI, &(Adapter->recvpriv.rx_bytes));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_WM_MODE, &(pmlmeext->cur_wireless_mode));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_CHNL_OFFSET, &(hal_data->nCur40MhzPrimeSC));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_MODE, &(Adapter->securitypriv.dot11PrivacyAlgrthm));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_BW, &(hal_data->CurrentChannelBW));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_CHNL, &(hal_data->CurrentChannel));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_NET_CLOSED, &(Adapter->net_closed));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_MP_MODE, &(Adapter->registrypriv.mp_mode));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SCAN, &(pmlmepriv->bScanInProcess));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_POWER_SAVING, &(pwrctrlpriv->bpower_saving));
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
+
+ for (i = 0; i < NUM_STA; i++)
+ ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i, NULL);
+}
+
+void rtl8188e_InitHalDm(struct adapter *Adapter)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+
+ dm_InitGPIOSetting(Adapter);
+ pdmpriv->DM_Type = DM_Type_ByDriver;
+ pdmpriv->DMFlag = DYNAMIC_FUNC_DISABLE;
+ Update_ODM_ComInfo_88E(Adapter);
+ ODM_DMInit(dm_odm);
+ Adapter->fix_rate = 0xFF;
+}
+
+void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
+{
+ bool fw_cur_in_ps = false;
+ bool fw_ps_awake = true;
+ u8 hw_init_completed = false;
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+
+ _func_enter_;
+ hw_init_completed = Adapter->hw_init_completed;
+
+ if (!hw_init_completed)
+ goto skip_dm;
+
+ fw_cur_in_ps = Adapter->pwrctrlpriv.bFwCurrentInPSMode;
+ rtw_hal_get_hwreg(Adapter, HW_VAR_FWLPS_RF_ON, (u8 *)(&fw_ps_awake));
+
+ /* Fw is under p2p powersaving mode, driver should stop dynamic mechanism. */
+ /* modifed by thomas. 2011.06.11. */
+ if (Adapter->wdinfo.p2p_ps_mode)
+ fw_ps_awake = false;
+
+ if (hw_init_completed && ((!fw_cur_in_ps) && fw_ps_awake)) {
+ /* Calculate Tx/Rx statistics. */
+ dm_CheckStatistics(Adapter);
+
+ _func_exit_;
+ }
+
+ /* ODM */
+ if (hw_init_completed) {
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ u8 bLinked = false;
+
+ if ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))) {
+ if (Adapter->stapriv.asoc_sta_count > 2)
+ bLinked = true;
+ } else {/* Station mode */
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ bLinked = true;
+ }
+
+ ODM_CmnInfoUpdate(&hal_data->odmpriv, ODM_CMNINFO_LINK, bLinked);
+ ODM_DMWatchdog(&hal_data->odmpriv);
+ }
+skip_dm:
+ /* Check GPIO to determine current RF on/off and Pbc status. */
+ /* Check Hardware Radio ON/OFF or not */
+ return;
+}
+
+void rtl8188e_init_dm_priv(struct adapter *Adapter)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *podmpriv = &hal_data->odmpriv;
+
+ _rtw_memset(pdmpriv, 0, sizeof(struct dm_priv));
+ Init_ODM_ComInfo_88E(Adapter);
+ ODM_InitDebugSetting(podmpriv);
+}
+
+void rtl8188e_deinit_dm_priv(struct adapter *Adapter)
+{
+}
+
+/* Add new function to reset the state of antenna diversity before link. */
+/* Compare RSSI for deciding antenna */
+void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+
+ if (0 != hal_data->AntDivCfg) {
+ /* select optimum_antenna for before linked =>For antenna diversity */
+ if (dst->Rssi >= src->Rssi) {/* keep org parameter */
+ src->Rssi = dst->Rssi;
+ src->PhyInfo.Optimum_antenna = dst->PhyInfo.Optimum_antenna;
+ }
+ }
+}
+
+/* Add new function to reset the state of antenna diversity before link. */
+u8 AntDivBeforeLink8188E(struct adapter *Adapter)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct sw_ant_switch *dm_swat_tbl = &dm_odm->DM_SWAT_Table;
+ struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
+
+ /* Condition that does not need to use antenna diversity. */
+ if (hal_data->AntDivCfg == 0)
+ return false;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ return false;
+
+ if (dm_swat_tbl->SWAS_NoLink_State == 0) {
+ /* switch channel */
+ dm_swat_tbl->SWAS_NoLink_State = 1;
+ dm_swat_tbl->CurAntenna = (dm_swat_tbl->CurAntenna == Antenna_A) ? Antenna_B : Antenna_A;
+
+ rtw_antenna_select_cmd(Adapter, dm_swat_tbl->CurAntenna, false);
+ return true;
+ } else {
+ dm_swat_tbl->SWAS_NoLink_State = 0;
+ return false;
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
new file mode 100644
index 0000000..292ba62
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -0,0 +1,2378 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _HAL_INIT_C_
+
+#include <drv_types.h>
+#include <rtw_efuse.h>
+
+#include <rtl8188e_hal.h>
+
+#include <rtw_iol.h>
+
+#include <usb_ops.h>
+
+static void iol_mode_enable(struct adapter *padapter, u8 enable)
+{
+ u8 reg_0xf0 = 0;
+
+ if (enable) {
+ /* Enable initial offload */
+ reg_0xf0 = rtw_read8(padapter, REG_SYS_CFG);
+ rtw_write8(padapter, REG_SYS_CFG, reg_0xf0|SW_OFFLOAD_EN);
+
+ if (!padapter->bFWReady) {
+ DBG_88E("bFWReady == false call reset 8051...\n");
+ _8051Reset88E(padapter);
+ }
+
+ } else {
+ /* disable initial offload */
+ reg_0xf0 = rtw_read8(padapter, REG_SYS_CFG);
+ rtw_write8(padapter, REG_SYS_CFG, reg_0xf0 & ~SW_OFFLOAD_EN);
+ }
+}
+
+static s32 iol_execute(struct adapter *padapter, u8 control)
+{
+ s32 status = _FAIL;
+ u8 reg_0x88 = 0;
+ u32 start = 0, passing_time = 0;
+
+ control = control&0x0f;
+ reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
+ rtw_write8(padapter, REG_HMEBOX_E0, reg_0x88|control);
+
+ start = rtw_get_current_time();
+ while ((reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0)) & control &&
+ (passing_time = rtw_get_passing_time_ms(start)) < 1000) {
+ ;
+ }
+
+ reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
+ status = (reg_0x88 & control) ? _FAIL : _SUCCESS;
+ if (reg_0x88 & control<<4)
+ status = _FAIL;
+ return status;
+}
+
+static s32 iol_InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
+{
+ s32 rst = _SUCCESS;
+ iol_mode_enable(padapter, 1);
+ rtw_write8(padapter, REG_TDECTRL+1, txpktbuf_bndy);
+ rst = iol_execute(padapter, CMD_INIT_LLT);
+ iol_mode_enable(padapter, 0);
+ return rst;
+}
+
+static void
+efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+ u8 *efuseTbl = NULL;
+ u8 rtemp8;
+ u16 eFuse_Addr = 0;
+ u8 offset, wren;
+ u16 i, j;
+ u16 **eFuseWord = NULL;
+ u16 efuse_utilized = 0;
+ u8 u1temp = 0;
+
+ efuseTbl = (u8 *)rtw_zmalloc(EFUSE_MAP_LEN_88E);
+ if (efuseTbl == NULL) {
+ DBG_88E("%s: alloc efuseTbl fail!\n", __func__);
+ goto exit;
+ }
+
+ eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
+ if (eFuseWord == NULL) {
+ DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
+ goto exit;
+ }
+
+ /* 0. Refresh efuse init map as all oxFF. */
+ for (i = 0; i < EFUSE_MAX_SECTION_88E; i++)
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
+ eFuseWord[i][j] = 0xFFFF;
+
+ /* */
+ /* 1. Read the first byte to check if efuse is empty!!! */
+ /* */
+ /* */
+ rtemp8 = *(phymap+eFuse_Addr);
+ if (rtemp8 != 0xFF) {
+ efuse_utilized++;
+ eFuse_Addr++;
+ } else {
+ DBG_88E("EFUSE is empty efuse_Addr-%d efuse_data =%x\n", eFuse_Addr, rtemp8);
+ goto exit;
+ }
+
+ /* */
+ /* 2. Read real efuse content. Filter PG header and every section data. */
+ /* */
+ while ((rtemp8 != 0xFF) && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
+ /* Check PG header for section num. */
+ if ((rtemp8 & 0x1F) == 0x0F) { /* extended header */
+ u1temp = ((rtemp8 & 0xE0) >> 5);
+ rtemp8 = *(phymap+eFuse_Addr);
+ if ((rtemp8 & 0x0F) == 0x0F) {
+ eFuse_Addr++;
+ rtemp8 = *(phymap+eFuse_Addr);
+
+ if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E))
+ eFuse_Addr++;
+ continue;
+ } else {
+ offset = ((rtemp8 & 0xF0) >> 1) | u1temp;
+ wren = (rtemp8 & 0x0F);
+ eFuse_Addr++;
+ }
+ } else {
+ offset = ((rtemp8 >> 4) & 0x0f);
+ wren = (rtemp8 & 0x0f);
+ }
+
+ if (offset < EFUSE_MAX_SECTION_88E) {
+ /* Get word enable value from PG header */
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(wren & 0x01)) {
+ rtemp8 = *(phymap+eFuse_Addr);
+ eFuse_Addr++;
+ efuse_utilized++;
+ eFuseWord[offset][i] = (rtemp8 & 0xff);
+ if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
+ break;
+ rtemp8 = *(phymap+eFuse_Addr);
+ eFuse_Addr++;
+ efuse_utilized++;
+ eFuseWord[offset][i] |= (((u16)rtemp8 << 8) & 0xff00);
+
+ if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
+ break;
+ }
+ wren >>= 1;
+ }
+ }
+ /* Read next PG header */
+ rtemp8 = *(phymap+eFuse_Addr);
+
+ if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
+ efuse_utilized++;
+ eFuse_Addr++;
+ }
+ }
+
+ /* */
+ /* 3. Collect 16 sections and 4 word unit into Efuse map. */
+ /* */
+ for (i = 0; i < EFUSE_MAX_SECTION_88E; i++) {
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
+ efuseTbl[(i*8)+(j*2)] = (eFuseWord[i][j] & 0xff);
+ efuseTbl[(i*8)+((j*2)+1)] = ((eFuseWord[i][j] >> 8) & 0xff);
+ }
+ }
+
+ /* */
+ /* 4. Copy from Efuse map to output pointer memory!!! */
+ /* */
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuseTbl[_offset+i];
+
+ /* */
+ /* 5. Calculate Efuse utilization. */
+ /* */
+
+exit:
+ kfree(efuseTbl);
+
+ if (eFuseWord)
+ rtw_mfree2d((void *)eFuseWord, EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
+}
+
+static void efuse_read_phymap_from_txpktbuf(
+ struct adapter *adapter,
+ int bcnhead, /* beacon head, where FW store len(2-byte) and efuse physical map. */
+ u8 *content, /* buffer to store efuse physical map */
+ u16 *size /* for efuse content: the max byte to read. will update to byte read */
+ )
+{
+ u16 dbg_addr = 0;
+ u32 start = 0, passing_time = 0;
+ u8 reg_0x143 = 0;
+ u32 lo32 = 0, hi32 = 0;
+ u16 len = 0, count = 0;
+ int i = 0;
+ u16 limit = *size;
+
+ u8 *pos = content;
+
+ if (bcnhead < 0) /* if not valid */
+ bcnhead = rtw_read8(adapter, REG_TDECTRL+1);
+
+ DBG_88E("%s bcnhead:%d\n", __func__, bcnhead);
+
+ rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
+
+ dbg_addr = bcnhead*128/8; /* 8-bytes addressing */
+
+ while (1) {
+ rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr+i);
+
+ rtw_write8(adapter, REG_TXPKTBUF_DBG, 0);
+ start = rtw_get_current_time();
+ while (!(reg_0x143 = rtw_read8(adapter, REG_TXPKTBUF_DBG)) &&
+ (passing_time = rtw_get_passing_time_ms(start)) < 1000) {
+ DBG_88E("%s polling reg_0x143:0x%02x, reg_0x106:0x%02x\n", __func__, reg_0x143, rtw_read8(adapter, 0x106));
+ rtw_usleep_os(100);
+ }
+
+ lo32 = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_L);
+ hi32 = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_H);
+
+ if (i == 0) {
+ u8 lenc[2];
+ u16 lenbak, aaabak;
+ u16 aaa;
+ lenc[0] = rtw_read8(adapter, REG_PKTBUF_DBG_DATA_L);
+ lenc[1] = rtw_read8(adapter, REG_PKTBUF_DBG_DATA_L+1);
+
+ aaabak = le16_to_cpup((__le16 *)lenc);
+ lenbak = le16_to_cpu(*((__le16 *)lenc));
+ aaa = le16_to_cpup((__le16 *)&lo32);
+ len = le16_to_cpu(*((__le16 *)&lo32));
+
+ limit = (len-2 < limit) ? len-2 : limit;
+
+ DBG_88E("%s len:%u, lenbak:%u, aaa:%u, aaabak:%u\n", __func__, len, lenbak, aaa, aaabak);
+
+ memcpy(pos, ((u8 *)&lo32)+2, (limit >= count+2) ? 2 : limit-count);
+ count += (limit >= count+2) ? 2 : limit-count;
+ pos = content+count;
+
+ } else {
+ memcpy(pos, ((u8 *)&lo32), (limit >= count+4) ? 4 : limit-count);
+ count += (limit >= count+4) ? 4 : limit-count;
+ pos = content+count;
+ }
+
+ if (limit > count && len-2 > count) {
+ memcpy(pos, (u8 *)&hi32, (limit >= count+4) ? 4 : limit-count);
+ count += (limit >= count+4) ? 4 : limit-count;
+ pos = content+count;
+ }
+
+ if (limit <= count || len-2 <= count)
+ break;
+ i++;
+ }
+ rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, DISABLE_TRXPKT_BUF_ACCESS);
+ DBG_88E("%s read count:%u\n", __func__, count);
+ *size = count;
+}
+
+static s32 iol_read_efuse(struct adapter *padapter, u8 txpktbuf_bndy, u16 offset, u16 size_byte, u8 *logical_map)
+{
+ s32 status = _FAIL;
+ u8 physical_map[512];
+ u16 size = 512;
+
+ rtw_write8(padapter, REG_TDECTRL+1, txpktbuf_bndy);
+ _rtw_memset(physical_map, 0xFF, 512);
+ rtw_write8(padapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
+ status = iol_execute(padapter, CMD_READ_EFUSE_MAP);
+ if (status == _SUCCESS)
+ efuse_read_phymap_from_txpktbuf(padapter, txpktbuf_bndy, physical_map, &size);
+ efuse_phymap_to_logical(physical_map, offset, size_byte, logical_map);
+ return status;
+}
+
+s32 rtl8188e_iol_efuse_patch(struct adapter *padapter)
+{
+ s32 result = _SUCCESS;
+
+ DBG_88E("==> %s\n", __func__);
+ if (rtw_IOL_applied(padapter)) {
+ iol_mode_enable(padapter, 1);
+ result = iol_execute(padapter, CMD_READ_EFUSE_MAP);
+ if (result == _SUCCESS)
+ result = iol_execute(padapter, CMD_EFUSE_PATCH);
+
+ iol_mode_enable(padapter, 0);
+ }
+ return result;
+}
+
+static s32 iol_ioconfig(struct adapter *padapter, u8 iocfg_bndy)
+{
+ s32 rst = _SUCCESS;
+
+ rtw_write8(padapter, REG_TDECTRL+1, iocfg_bndy);
+ rst = iol_execute(padapter, CMD_IOCONFIG);
+ return rst;
+}
+
+static int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter, struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt)
+{
+ struct pkt_attrib *pattrib = &xmit_frame->attrib;
+ u8 i;
+ int ret = _FAIL;
+
+ if (rtw_IOL_append_END_cmd(xmit_frame) != _SUCCESS)
+ goto exit;
+ if (rtw_usb_bulk_size_boundary(adapter, TXDESC_SIZE+pattrib->last_txcmdsz)) {
+ if (rtw_IOL_append_END_cmd(xmit_frame) != _SUCCESS)
+ goto exit;
+ }
+
+ dump_mgntframe_and_wait(adapter, xmit_frame, max_wating_ms);
+
+ iol_mode_enable(adapter, 1);
+ for (i = 0; i < bndy_cnt; i++) {
+ u8 page_no = 0;
+ page_no = i*2;
+ ret = iol_ioconfig(adapter, page_no);
+ if (ret != _SUCCESS)
+ break;
+ }
+ iol_mode_enable(adapter, 0);
+exit:
+ /* restore BCN_HEAD */
+ rtw_write8(adapter, REG_TDECTRL+1, 0);
+ return ret;
+}
+
+void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
+{
+ u32 fifo_data, reg_140;
+ u32 addr, rstatus, loop = 0;
+ u16 data_cnts = (data_len/8)+1;
+ u8 *pbuf = rtw_zvmalloc(data_len+10);
+ DBG_88E("###### %s ######\n", __func__);
+
+ rtw_write8(Adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
+ if (pbuf) {
+ for (addr = 0; addr < data_cnts; addr++) {
+ rtw_write32(Adapter, 0x140, addr);
+ rtw_usleep_os(2);
+ loop = 0;
+ do {
+ rstatus = (reg_140 = rtw_read32(Adapter, REG_PKTBUF_DBG_CTRL)&BIT24);
+ if (rstatus) {
+ fifo_data = rtw_read32(Adapter, REG_PKTBUF_DBG_DATA_L);
+ memcpy(pbuf+(addr*8), &fifo_data, 4);
+
+ fifo_data = rtw_read32(Adapter, REG_PKTBUF_DBG_DATA_H);
+ memcpy(pbuf+(addr*8+4), &fifo_data, 4);
+ }
+ rtw_usleep_os(2);
+ } while (!rstatus && (loop++ < 10));
+ }
+ rtw_IOL_cmd_buf_dump(Adapter, data_len, pbuf);
+ rtw_vmfree(pbuf, data_len+10);
+ }
+ DBG_88E("###### %s ######\n", __func__);
+}
+
+static void _FWDownloadEnable(struct adapter *padapter, bool enable)
+{
+ u8 tmp;
+
+ if (enable) {
+ /* MCU firmware download enable. */
+ tmp = rtw_read8(padapter, REG_MCUFWDL);
+ rtw_write8(padapter, REG_MCUFWDL, tmp | 0x01);
+
+ /* 8051 reset */
+ tmp = rtw_read8(padapter, REG_MCUFWDL+2);
+ rtw_write8(padapter, REG_MCUFWDL+2, tmp&0xf7);
+ } else {
+ /* MCU firmware download disable. */
+ tmp = rtw_read8(padapter, REG_MCUFWDL);
+ rtw_write8(padapter, REG_MCUFWDL, tmp&0xfe);
+
+ /* Reserved for fw extension. */
+ rtw_write8(padapter, REG_MCUFWDL+1, 0x00);
+ }
+}
+
+#define MAX_REG_BOLCK_SIZE 196
+
+static int _BlockWrite(struct adapter *padapter, void *buffer, u32 buffSize)
+{
+ int ret = _SUCCESS;
+ u32 blockSize_p1 = 4; /* (Default) Phase #1 : PCI muse use 4-byte write to download FW */
+ u32 blockSize_p2 = 8; /* Phase #2 : Use 8-byte, if Phase#1 use big size to write FW. */
+ u32 blockSize_p3 = 1; /* Phase #3 : Use 1-byte, the remnant of FW image. */
+ u32 blockCount_p1 = 0, blockCount_p2 = 0, blockCount_p3 = 0;
+ u32 remainSize_p1 = 0, remainSize_p2 = 0;
+ u8 *bufferPtr = (u8 *)buffer;
+ u32 i = 0, offset = 0;
+
+ blockSize_p1 = MAX_REG_BOLCK_SIZE;
+
+ /* 3 Phase #1 */
+ blockCount_p1 = buffSize / blockSize_p1;
+ remainSize_p1 = buffSize % blockSize_p1;
+
+ if (blockCount_p1) {
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P1] buffSize(%d) blockSize_p1(%d) blockCount_p1(%d) remainSize_p1(%d)\n",
+ buffSize, blockSize_p1, blockCount_p1, remainSize_p1));
+ }
+
+ for (i = 0; i < blockCount_p1; i++) {
+ ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + i * blockSize_p1), blockSize_p1, (bufferPtr + i * blockSize_p1));
+ if (ret == _FAIL)
+ goto exit;
+ }
+
+ /* 3 Phase #2 */
+ if (remainSize_p1) {
+ offset = blockCount_p1 * blockSize_p1;
+
+ blockCount_p2 = remainSize_p1/blockSize_p2;
+ remainSize_p2 = remainSize_p1%blockSize_p2;
+
+ if (blockCount_p2) {
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P2] buffSize_p2(%d) blockSize_p2(%d) blockCount_p2(%d) remainSize_p2(%d)\n",
+ (buffSize-offset), blockSize_p2 , blockCount_p2, remainSize_p2));
+ }
+
+ for (i = 0; i < blockCount_p2; i++) {
+ ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + offset + i*blockSize_p2), blockSize_p2, (bufferPtr + offset + i*blockSize_p2));
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ }
+
+ /* 3 Phase #3 */
+ if (remainSize_p2) {
+ offset = (blockCount_p1 * blockSize_p1) + (blockCount_p2 * blockSize_p2);
+
+ blockCount_p3 = remainSize_p2 / blockSize_p3;
+
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P3] buffSize_p3(%d) blockSize_p3(%d) blockCount_p3(%d)\n",
+ (buffSize-offset), blockSize_p3, blockCount_p3));
+
+ for (i = 0; i < blockCount_p3; i++) {
+ ret = rtw_write8(padapter, (FW_8188E_START_ADDRESS + offset + i), *(bufferPtr + offset + i));
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int _PageWrite(struct adapter *padapter, u32 page, void *buffer, u32 size)
+{
+ u8 value8;
+ u8 u8Page = (u8)(page & 0x07);
+
+ value8 = (rtw_read8(padapter, REG_MCUFWDL+2) & 0xF8) | u8Page;
+ rtw_write8(padapter, REG_MCUFWDL+2, value8);
+
+ return _BlockWrite(padapter, buffer, size);
+}
+
+static int _WriteFW(struct adapter *padapter, void *buffer, u32 size)
+{
+ /* Since we need dynamic decide method of dwonload fw, so we call this function to get chip version. */
+ /* We can remove _ReadChipVersion from ReadpadapterInfo8192C later. */
+ int ret = _SUCCESS;
+ u32 pageNums, remainSize;
+ u32 page, offset;
+ u8 *bufferPtr = (u8 *)buffer;
+
+ pageNums = size / MAX_PAGE_SIZE;
+ remainSize = size % MAX_PAGE_SIZE;
+
+ for (page = 0; page < pageNums; page++) {
+ offset = page * MAX_PAGE_SIZE;
+ ret = _PageWrite(padapter, page, bufferPtr+offset, MAX_PAGE_SIZE);
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ if (remainSize) {
+ offset = pageNums * MAX_PAGE_SIZE;
+ page = pageNums;
+ ret = _PageWrite(padapter, page, bufferPtr+offset, remainSize);
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("_WriteFW Done- for Normal chip.\n"));
+exit:
+ return ret;
+}
+
+void _8051Reset88E(struct adapter *padapter)
+{
+ u8 u1bTmp;
+
+ u1bTmp = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp&(~BIT2));
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp|(BIT2));
+ DBG_88E("=====> _8051Reset88E(): 8051 reset success .\n");
+}
+
+static s32 _FWFreeToGo(struct adapter *padapter)
+{
+ u32 counter = 0;
+ u32 value32;
+
+ /* polling CheckSum report */
+ do {
+ value32 = rtw_read32(padapter, REG_MCUFWDL);
+ if (value32 & FWDL_ChkSum_rpt)
+ break;
+ } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
+
+ if (counter >= POLLING_READY_TIMEOUT_COUNT) {
+ DBG_88E("%s: chksum report fail! REG_MCUFWDL:0x%08x\n", __func__, value32);
+ return _FAIL;
+ }
+ DBG_88E("%s: Checksum report OK! REG_MCUFWDL:0x%08x\n", __func__, value32);
+
+ value32 = rtw_read32(padapter, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ value32 &= ~WINTINI_RDY;
+ rtw_write32(padapter, REG_MCUFWDL, value32);
+
+ _8051Reset88E(padapter);
+
+ /* polling for FW ready */
+ counter = 0;
+ do {
+ value32 = rtw_read32(padapter, REG_MCUFWDL);
+ if (value32 & WINTINI_RDY) {
+ DBG_88E("%s: Polling FW ready success!! REG_MCUFWDL:0x%08x\n", __func__, value32);
+ return _SUCCESS;
+ }
+ rtw_udelay_os(5);
+ } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
+
+ DBG_88E("%s: Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", __func__, value32);
+ return _FAIL;
+}
+
+#define IS_FW_81xxC(padapter) (((GET_HAL_DATA(padapter))->FirmwareSignature & 0xFFF0) == 0x88C0)
+
+s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
+{
+ s32 rtStatus = _SUCCESS;
+ u8 writeFW_retry = 0;
+ u32 fwdl_start_time;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ u8 *FwImage;
+ u32 FwImageLen;
+ struct rt_firmware *pFirmware = NULL;
+ struct rt_firmware_hdr *pFwHdr = NULL;
+ u8 *pFirmwareBuf;
+ u32 FirmwareLen;
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
+ pFirmware = (struct rt_firmware *)rtw_zmalloc(sizeof(struct rt_firmware));
+ if (!pFirmware) {
+ rtStatus = _FAIL;
+ goto Exit;
+ }
+
+ FwImage = (u8 *)Rtl8188E_FwImageArray;
+ FwImageLen = Rtl8188E_FWImgArrayLength;
+
+ pFirmware->eFWSource = FW_SOURCE_HEADER_FILE;
+
+ switch (pFirmware->eFWSource) {
+ case FW_SOURCE_IMG_FILE:
+ break;
+ case FW_SOURCE_HEADER_FILE:
+ if (FwImageLen > FW_8188E_SIZE) {
+ rtStatus = _FAIL;
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("Firmware size exceed 0x%X. Check it.\n", FW_8188E_SIZE));
+ goto Exit;
+ }
+
+ pFirmware->szFwBuffer = FwImage;
+ pFirmware->ulFwLength = FwImageLen;
+ break;
+ }
+ pFirmwareBuf = pFirmware->szFwBuffer;
+ FirmwareLen = pFirmware->ulFwLength;
+ DBG_88E_LEVEL(_drv_info_, "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__, FirmwareLen);
+
+ /* To Check Fw header. Added by tynli. 2009.12.04. */
+ pFwHdr = (struct rt_firmware_hdr *)pFirmware->szFwBuffer;
+
+ pHalData->FirmwareVersion = le16_to_cpu(pFwHdr->Version);
+ pHalData->FirmwareSubVersion = pFwHdr->Subversion;
+ pHalData->FirmwareSignature = le16_to_cpu(pFwHdr->Signature);
+
+ DBG_88E("%s: fw_ver =%d fw_subver =%d sig = 0x%x\n",
+ __func__, pHalData->FirmwareVersion, pHalData->FirmwareSubVersion, pHalData->FirmwareSignature);
+
+ if (IS_FW_HEADER_EXIST(pFwHdr)) {
+ /* Shift 32 bytes for FW header */
+ pFirmwareBuf = pFirmwareBuf + 32;
+ FirmwareLen = FirmwareLen - 32;
+ }
+
+ /* Suggested by Filen. If 8051 is running in RAM code, driver should inform Fw to reset by itself, */
+ /* or it will cause download Fw fail. 2010.02.01. by tynli. */
+ if (rtw_read8(padapter, REG_MCUFWDL) & RAM_DL_SEL) { /* 8051 RAM code */
+ rtw_write8(padapter, REG_MCUFWDL, 0x00);
+ _8051Reset88E(padapter);
+ }
+
+ _FWDownloadEnable(padapter, true);
+ fwdl_start_time = rtw_get_current_time();
+ while (1) {
+ /* reset the FWDL chksum */
+ rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL) | FWDL_ChkSum_rpt);
+
+ rtStatus = _WriteFW(padapter, pFirmwareBuf, FirmwareLen);
+
+ if (rtStatus == _SUCCESS ||
+ (rtw_get_passing_time_ms(fwdl_start_time) > 500 && writeFW_retry++ >= 3))
+ break;
+
+ DBG_88E("%s writeFW_retry:%u, time after fwdl_start_time:%ums\n",
+ __func__, writeFW_retry, rtw_get_passing_time_ms(fwdl_start_time)
+ );
+ }
+ _FWDownloadEnable(padapter, false);
+ if (_SUCCESS != rtStatus) {
+ DBG_88E("DL Firmware failed!\n");
+ goto Exit;
+ }
+
+ rtStatus = _FWFreeToGo(padapter);
+ if (_SUCCESS != rtStatus) {
+ DBG_88E("DL Firmware failed!\n");
+ goto Exit;
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("Firmware is ready to run!\n"));
+
+Exit:
+
+ kfree(pFirmware);
+ return rtStatus;
+}
+
+void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ /* Init Fw LPS related. */
+ padapter->pwrctrlpriv.bFwCurrentInPSMode = false;
+
+ /* Init H2C counter. by tynli. 2009.12.09. */
+ pHalData->LastHMEBoxNum = 0;
+}
+
+static void rtl8188e_free_hal_data(struct adapter *padapter)
+{
+_func_enter_;
+ kfree(padapter->HalData);
+ padapter->HalData = NULL;
+_func_exit_;
+}
+
+/* */
+/* Efuse related code */
+/* */
+enum{
+ VOLTAGE_V25 = 0x03,
+ LDOE25_SHIFT = 28 ,
+ };
+
+static bool
+hal_EfusePgPacketWrite2ByteHeader(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ u16 *pAddr,
+ struct pgpkt *pTargetPkt,
+ bool bPseudoTest);
+static bool
+hal_EfusePgPacketWrite1ByteHeader(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ u16 *pAddr,
+ struct pgpkt *pTargetPkt,
+ bool bPseudoTest);
+static bool
+hal_EfusePgPacketWriteData(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ u16 *pAddr,
+ struct pgpkt *pTargetPkt,
+ bool bPseudoTest);
+
+static void
+hal_EfusePowerSwitch_RTL8188E(
+ struct adapter *pAdapter,
+ u8 bWrite,
+ u8 PwrState)
+{
+ u8 tempval;
+ u16 tmpV16;
+
+ if (PwrState) {
+ rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
+
+ /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid */
+ tmpV16 = rtw_read16(pAdapter, REG_SYS_ISO_CTRL);
+ if (!(tmpV16 & PWC_EV12V)) {
+ tmpV16 |= PWC_EV12V;
+ rtw_write16(pAdapter, REG_SYS_ISO_CTRL, tmpV16);
+ }
+ /* Reset: 0x0000h[28], default valid */
+ tmpV16 = rtw_read16(pAdapter, REG_SYS_FUNC_EN);
+ if (!(tmpV16 & FEN_ELDR)) {
+ tmpV16 |= FEN_ELDR;
+ rtw_write16(pAdapter, REG_SYS_FUNC_EN, tmpV16);
+ }
+
+ /* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid */
+ tmpV16 = rtw_read16(pAdapter, REG_SYS_CLKR);
+ if ((!(tmpV16 & LOADER_CLK_EN)) || (!(tmpV16 & ANA8M))) {
+ tmpV16 |= (LOADER_CLK_EN | ANA8M);
+ rtw_write16(pAdapter, REG_SYS_CLKR, tmpV16);
+ }
+
+ if (bWrite) {
+ /* Enable LDO 2.5V before read/write action */
+ tempval = rtw_read8(pAdapter, EFUSE_TEST+3);
+ tempval &= 0x0F;
+ tempval |= (VOLTAGE_V25 << 4);
+ rtw_write8(pAdapter, EFUSE_TEST+3, (tempval | 0x80));
+ }
+ } else {
+ rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+
+ if (bWrite) {
+ /* Disable LDO 2.5V after read/write action */
+ tempval = rtw_read8(pAdapter, EFUSE_TEST+3);
+ rtw_write8(pAdapter, EFUSE_TEST+3, (tempval & 0x7F));
+ }
+ }
+}
+
+static void
+rtl8188e_EfusePowerSwitch(
+ struct adapter *pAdapter,
+ u8 bWrite,
+ u8 PwrState)
+{
+ hal_EfusePowerSwitch_RTL8188E(pAdapter, bWrite, PwrState);
+}
+
+
+static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
+ u16 _offset,
+ u16 _size_byte,
+ u8 *pbuf,
+ bool bPseudoTest
+ )
+{
+ u8 *efuseTbl = NULL;
+ u8 rtemp8[1];
+ u16 eFuse_Addr = 0;
+ u8 offset, wren;
+ u16 i, j;
+ u16 **eFuseWord = NULL;
+ u16 efuse_utilized = 0;
+ u8 u1temp = 0;
+
+ /* */
+ /* Do NOT excess total size of EFuse table. Added by Roger, 2008.11.10. */
+ /* */
+ if ((_offset + _size_byte) > EFUSE_MAP_LEN_88E) {/* total E-Fuse table is 512bytes */
+ DBG_88E("Hal_EfuseReadEFuse88E(): Invalid offset(%#x) with read bytes(%#x)!!\n", _offset, _size_byte);
+ goto exit;
+ }
+
+ efuseTbl = (u8 *)rtw_zmalloc(EFUSE_MAP_LEN_88E);
+ if (efuseTbl == NULL) {
+ DBG_88E("%s: alloc efuseTbl fail!\n", __func__);
+ goto exit;
+ }
+
+ eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
+ if (eFuseWord == NULL) {
+ DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
+ goto exit;
+ }
+
+ /* 0. Refresh efuse init map as all oxFF. */
+ for (i = 0; i < EFUSE_MAX_SECTION_88E; i++)
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
+ eFuseWord[i][j] = 0xFFFF;
+
+ /* */
+ /* 1. Read the first byte to check if efuse is empty!!! */
+ /* */
+ /* */
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ if (*rtemp8 != 0xFF) {
+ efuse_utilized++;
+ eFuse_Addr++;
+ } else {
+ DBG_88E("EFUSE is empty efuse_Addr-%d efuse_data =%x\n", eFuse_Addr, *rtemp8);
+ goto exit;
+ }
+
+ /* */
+ /* 2. Read real efuse content. Filter PG header and every section data. */
+ /* */
+ while ((*rtemp8 != 0xFF) && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
+ /* Check PG header for section num. */
+ if ((*rtemp8 & 0x1F) == 0x0F) { /* extended header */
+ u1temp = ((*rtemp8 & 0xE0) >> 5);
+
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+
+ if ((*rtemp8 & 0x0F) == 0x0F) {
+ eFuse_Addr++;
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+
+ if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E))
+ eFuse_Addr++;
+ continue;
+ } else {
+ offset = ((*rtemp8 & 0xF0) >> 1) | u1temp;
+ wren = (*rtemp8 & 0x0F);
+ eFuse_Addr++;
+ }
+ } else {
+ offset = ((*rtemp8 >> 4) & 0x0f);
+ wren = (*rtemp8 & 0x0f);
+ }
+
+ if (offset < EFUSE_MAX_SECTION_88E) {
+ /* Get word enable value from PG header */
+
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(wren & 0x01)) {
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ eFuse_Addr++;
+ efuse_utilized++;
+ eFuseWord[offset][i] = (*rtemp8 & 0xff);
+ if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
+ break;
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ eFuse_Addr++;
+ efuse_utilized++;
+ eFuseWord[offset][i] |= (((u16)*rtemp8 << 8) & 0xff00);
+ if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
+ break;
+ }
+ wren >>= 1;
+ }
+ }
+
+ /* Read next PG header */
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+
+ if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
+ efuse_utilized++;
+ eFuse_Addr++;
+ }
+ }
+
+ /* 3. Collect 16 sections and 4 word unit into Efuse map. */
+ for (i = 0; i < EFUSE_MAX_SECTION_88E; i++) {
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
+ efuseTbl[(i*8)+(j*2)] = (eFuseWord[i][j] & 0xff);
+ efuseTbl[(i*8)+((j*2)+1)] = ((eFuseWord[i][j] >> 8) & 0xff);
+ }
+ }
+
+ /* 4. Copy from Efuse map to output pointer memory!!! */
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuseTbl[_offset+i];
+
+ /* 5. Calculate Efuse utilization. */
+ rtw_hal_set_hwreg(Adapter, HW_VAR_EFUSE_BYTES, (u8 *)&eFuse_Addr);
+
+exit:
+ kfree(efuseTbl);
+
+ if (eFuseWord)
+ rtw_mfree2d((void *)eFuseWord, EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
+}
+
+static void ReadEFuseByIC(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool bPseudoTest)
+{
+ if (!bPseudoTest) {
+ int ret = _FAIL;
+ if (rtw_IOL_applied(Adapter)) {
+ rtw_hal_power_on(Adapter);
+
+ iol_mode_enable(Adapter, 1);
+ ret = iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
+ iol_mode_enable(Adapter, 0);
+
+ if (_SUCCESS == ret)
+ goto exit;
+ }
+ }
+ Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf, bPseudoTest);
+
+exit:
+ return;
+}
+
+static void ReadEFuse_Pseudo(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool bPseudoTest)
+{
+ Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf, bPseudoTest);
+}
+
+static void rtl8188e_ReadEFuse(struct adapter *Adapter, u8 efuseType,
+ u16 _offset, u16 _size_byte, u8 *pbuf,
+ bool bPseudoTest)
+{
+ if (bPseudoTest)
+ ReadEFuse_Pseudo (Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
+ else
+ ReadEFuseByIC(Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
+}
+
+/* Do not support BT */
+static void Hal_EFUSEGetEfuseDefinition88E(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut)
+{
+ switch (type) {
+ case TYPE_EFUSE_MAX_SECTION:
+ {
+ u8 *pMax_section;
+ pMax_section = (u8 *)pOut;
+ *pMax_section = EFUSE_MAX_SECTION_88E;
+ }
+ break;
+ case TYPE_EFUSE_REAL_CONTENT_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
+ }
+ break;
+ case TYPE_EFUSE_CONTENT_LEN_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
+ }
+ break;
+ case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ case TYPE_EFUSE_MAP_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
+ }
+ break;
+ case TYPE_EFUSE_PROTECT_BYTES_BANK:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+ *pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ default:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+ *pu1Tmp = 0;
+ }
+ break;
+ }
+}
+
+static void Hal_EFUSEGetEfuseDefinition_Pseudo88E(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut)
+{
+ switch (type) {
+ case TYPE_EFUSE_MAX_SECTION:
+ {
+ u8 *pMax_section;
+ pMax_section = (u8 *)pOut;
+ *pMax_section = EFUSE_MAX_SECTION_88E;
+ }
+ break;
+ case TYPE_EFUSE_REAL_CONTENT_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
+ }
+ break;
+ case TYPE_EFUSE_CONTENT_LEN_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
+ }
+ break;
+ case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ case TYPE_EFUSE_MAP_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
+ }
+ break;
+ case TYPE_EFUSE_PROTECT_BYTES_BANK:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+ *pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ default:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+ *pu1Tmp = 0;
+ }
+ break;
+ }
+}
+
+static void rtl8188e_EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut, bool bPseudoTest)
+{
+ if (bPseudoTest)
+ Hal_EFUSEGetEfuseDefinition_Pseudo88E(pAdapter, efuseType, type, pOut);
+ else
+ Hal_EFUSEGetEfuseDefinition88E(pAdapter, efuseType, type, pOut);
+}
+
+static u8 Hal_EfuseWordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ u16 tmpaddr = 0;
+ u16 start_addr = efuse_addr;
+ u8 badworden = 0x0F;
+ u8 tmpdata[8];
+
+ _rtw_memset((void *)tmpdata, 0xff, PGPKT_DATA_SIZE);
+
+ if (!(word_en&BIT0)) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(pAdapter, start_addr++, data[0], bPseudoTest);
+ efuse_OneByteWrite(pAdapter, start_addr++, data[1], bPseudoTest);
+
+ efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[0], bPseudoTest);
+ efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[1], bPseudoTest);
+ if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
+ badworden &= (~BIT0);
+ }
+ if (!(word_en&BIT1)) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(pAdapter, start_addr++, data[2], bPseudoTest);
+ efuse_OneByteWrite(pAdapter, start_addr++, data[3], bPseudoTest);
+
+ efuse_OneByteRead(pAdapter, tmpaddr , &tmpdata[2], bPseudoTest);
+ efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[3], bPseudoTest);
+ if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
+ badworden &= (~BIT1);
+ }
+ if (!(word_en&BIT2)) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(pAdapter, start_addr++, data[4], bPseudoTest);
+ efuse_OneByteWrite(pAdapter, start_addr++, data[5], bPseudoTest);
+
+ efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[4], bPseudoTest);
+ efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[5], bPseudoTest);
+ if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
+ badworden &= (~BIT2);
+ }
+ if (!(word_en&BIT3)) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(pAdapter, start_addr++, data[6], bPseudoTest);
+ efuse_OneByteWrite(pAdapter, start_addr++, data[7], bPseudoTest);
+
+ efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[6], bPseudoTest);
+ efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[7], bPseudoTest);
+ if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
+ badworden &= (~BIT3);
+ }
+ return badworden;
+}
+
+static u8 Hal_EfuseWordEnableDataWrite_Pseudo(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ u8 ret;
+
+ ret = Hal_EfuseWordEnableDataWrite(pAdapter, efuse_addr, word_en, data, bPseudoTest);
+ return ret;
+}
+
+static u8 rtl8188e_Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ u8 ret = 0;
+
+ if (bPseudoTest)
+ ret = Hal_EfuseWordEnableDataWrite_Pseudo (pAdapter, efuse_addr, word_en, data, bPseudoTest);
+ else
+ ret = Hal_EfuseWordEnableDataWrite(pAdapter, efuse_addr, word_en, data, bPseudoTest);
+ return ret;
+}
+
+static u16 hal_EfuseGetCurrentSize_8188e(struct adapter *pAdapter, bool bPseudoTest)
+{
+ int bContinual = true;
+ u16 efuse_addr = 0;
+ u8 hoffset = 0, hworden = 0;
+ u8 efuse_data, word_cnts = 0;
+
+ if (bPseudoTest)
+ efuse_addr = (u16)(fakeEfuseUsedBytes);
+ else
+ rtw_hal_get_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
+
+ while (bContinual &&
+ efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest) &&
+ AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ if (efuse_data != 0xFF) {
+ if ((efuse_data&0x1F) == 0x0F) { /* extended header */
+ hoffset = efuse_data;
+ efuse_addr++;
+ efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest);
+ if ((efuse_data & 0x0F) == 0x0F) {
+ efuse_addr++;
+ continue;
+ } else {
+ hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ }
+ } else {
+ hoffset = (efuse_data>>4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ /* read next header */
+ efuse_addr = efuse_addr + (word_cnts*2)+1;
+ } else {
+ bContinual = false;
+ }
+ }
+
+ if (bPseudoTest)
+ fakeEfuseUsedBytes = efuse_addr;
+ else
+ rtw_hal_set_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
+
+ return efuse_addr;
+}
+
+static u16 Hal_EfuseGetCurrentSize_Pseudo(struct adapter *pAdapter, bool bPseudoTest)
+{
+ u16 ret = 0;
+
+ ret = hal_EfuseGetCurrentSize_8188e(pAdapter, bPseudoTest);
+ return ret;
+}
+
+static u16 rtl8188e_EfuseGetCurrentSize(struct adapter *pAdapter, u8 efuseType, bool bPseudoTest)
+{
+ u16 ret = 0;
+
+ if (bPseudoTest)
+ ret = Hal_EfuseGetCurrentSize_Pseudo(pAdapter, bPseudoTest);
+ else
+ ret = hal_EfuseGetCurrentSize_8188e(pAdapter, bPseudoTest);
+ return ret;
+}
+
+static int hal_EfusePgPacketRead_8188e(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
+{
+ u8 ReadState = PG_STATE_HEADER;
+ int bContinual = true;
+ int bDataEmpty = true;
+ u8 efuse_data, word_cnts = 0;
+ u16 efuse_addr = 0;
+ u8 hoffset = 0, hworden = 0;
+ u8 tmpidx = 0;
+ u8 tmpdata[8];
+ u8 max_section = 0;
+ u8 tmp_header = 0;
+
+ EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, (void *)&max_section, bPseudoTest);
+
+ if (data == NULL)
+ return false;
+ if (offset > max_section)
+ return false;
+
+ _rtw_memset((void *)data, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
+ _rtw_memset((void *)tmpdata, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
+
+ /* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
+ /* Skip dummy parts to prevent unexpected data read from Efuse. */
+ /* By pass right now. 2009.02.19. */
+ while (bContinual && AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ /* Header Read ------------- */
+ if (ReadState & PG_STATE_HEADER) {
+ if (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest) && (efuse_data != 0xFF)) {
+ if (EXT_HEADER(efuse_data)) {
+ tmp_header = efuse_data;
+ efuse_addr++;
+ efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest);
+ if (!ALL_WORDS_DISABLED(efuse_data)) {
+ hoffset = ((tmp_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ } else {
+ DBG_88E("Error, All words disabled\n");
+ efuse_addr++;
+ continue;
+ }
+ } else {
+ hoffset = (efuse_data>>4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ bDataEmpty = true;
+
+ if (hoffset == offset) {
+ for (tmpidx = 0; tmpidx < word_cnts*2; tmpidx++) {
+ if (efuse_OneByteRead(pAdapter, efuse_addr+1+tmpidx, &efuse_data, bPseudoTest)) {
+ tmpdata[tmpidx] = efuse_data;
+ if (efuse_data != 0xff)
+ bDataEmpty = false;
+ }
+ }
+ if (bDataEmpty == false) {
+ ReadState = PG_STATE_DATA;
+ } else {/* read next header */
+ efuse_addr = efuse_addr + (word_cnts*2)+1;
+ ReadState = PG_STATE_HEADER;
+ }
+ } else {/* read next header */
+ efuse_addr = efuse_addr + (word_cnts*2)+1;
+ ReadState = PG_STATE_HEADER;
+ }
+ } else {
+ bContinual = false;
+ }
+ } else if (ReadState & PG_STATE_DATA) {
+ /* Data section Read ------------- */
+ efuse_WordEnableDataRead(hworden, tmpdata, data);
+ efuse_addr = efuse_addr + (word_cnts*2)+1;
+ ReadState = PG_STATE_HEADER;
+ }
+
+ }
+
+ if ((data[0] == 0xff) && (data[1] == 0xff) && (data[2] == 0xff) && (data[3] == 0xff) &&
+ (data[4] == 0xff) && (data[5] == 0xff) && (data[6] == 0xff) && (data[7] == 0xff))
+ return false;
+ else
+ return true;
+}
+
+static int Hal_EfusePgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ ret = hal_EfusePgPacketRead_8188e(pAdapter, offset, data, bPseudoTest);
+ return ret;
+}
+
+static int Hal_EfusePgPacketRead_Pseudo(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ ret = hal_EfusePgPacketRead_8188e(pAdapter, offset, data, bPseudoTest);
+ return ret;
+}
+
+static int rtl8188e_Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ if (bPseudoTest)
+ ret = Hal_EfusePgPacketRead_Pseudo (pAdapter, offset, data, bPseudoTest);
+ else
+ ret = Hal_EfusePgPacketRead(pAdapter, offset, data, bPseudoTest);
+ return ret;
+}
+
+static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, struct pgpkt *pFixPkt, u16 *pAddr, bool bPseudoTest)
+{
+ u8 originaldata[8], badworden = 0;
+ u16 efuse_addr = *pAddr;
+ u32 PgWriteSuccess = 0;
+
+ _rtw_memset((void *)originaldata, 0xff, 8);
+
+ if (Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata, bPseudoTest)) {
+ /* check if data exist */
+ badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr+1, pFixPkt->word_en, originaldata, bPseudoTest);
+
+ if (badworden != 0xf) { /* write fail */
+ PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pFixPkt->offset, badworden, originaldata, bPseudoTest);
+
+ if (!PgWriteSuccess)
+ return false;
+ else
+ efuse_addr = Efuse_GetCurrentSize(pAdapter, efuseType, bPseudoTest);
+ } else {
+ efuse_addr = efuse_addr + (pFixPkt->word_cnts*2) + 1;
+ }
+ } else {
+ efuse_addr = efuse_addr + (pFixPkt->word_cnts*2) + 1;
+ }
+ *pAddr = efuse_addr;
+ return true;
+}
+
+static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
+{
+ bool bRet = false;
+ u16 efuse_addr = *pAddr, efuse_max_available_len = 0;
+ u8 pg_header = 0, tmp_header = 0, pg_header_temp = 0;
+ u8 repeatcnt = 0;
+
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
+
+ while (efuse_addr < efuse_max_available_len) {
+ pg_header = ((pTargetPkt->offset & 0x07) << 5) | 0x0F;
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+
+ while (tmp_header == 0xFF) {
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
+ return false;
+
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+ }
+
+ /* to write ext_header */
+ if (tmp_header == pg_header) {
+ efuse_addr++;
+ pg_header_temp = pg_header;
+ pg_header = ((pTargetPkt->offset & 0x78) << 1) | pTargetPkt->word_en;
+
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+
+ while (tmp_header == 0xFF) {
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
+ return false;
+
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+ }
+
+ if ((tmp_header & 0x0F) == 0x0F) { /* word_en PG fail */
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
+ return false;
+ } else {
+ efuse_addr++;
+ continue;
+ }
+ } else if (pg_header != tmp_header) { /* offset PG fail */
+ struct pgpkt fixPkt;
+ fixPkt.offset = ((pg_header_temp & 0xE0) >> 5) | ((tmp_header & 0xF0) >> 1);
+ fixPkt.word_en = tmp_header & 0x0F;
+ fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
+ if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr, bPseudoTest))
+ return false;
+ } else {
+ bRet = true;
+ break;
+ }
+ } else if ((tmp_header & 0x1F) == 0x0F) { /* wrong extended header */
+ efuse_addr += 2;
+ continue;
+ }
+ }
+
+ *pAddr = efuse_addr;
+ return bRet;
+}
+
+static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
+{
+ bool bRet = false;
+ u8 pg_header = 0, tmp_header = 0;
+ u16 efuse_addr = *pAddr;
+ u8 repeatcnt = 0;
+
+ pg_header = ((pTargetPkt->offset << 4) & 0xf0) | pTargetPkt->word_en;
+
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+
+ while (tmp_header == 0xFF) {
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
+ return false;
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+ }
+
+ if (pg_header == tmp_header) {
+ bRet = true;
+ } else {
+ struct pgpkt fixPkt;
+ fixPkt.offset = (tmp_header>>4) & 0x0F;
+ fixPkt.word_en = tmp_header & 0x0F;
+ fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
+ if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr, bPseudoTest))
+ return false;
+ }
+
+ *pAddr = efuse_addr;
+ return bRet;
+}
+
+static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
+{
+ bool bRet = false;
+ u16 efuse_addr = *pAddr;
+ u8 badworden = 0;
+ u32 PgWriteSuccess = 0;
+
+ badworden = 0x0f;
+ badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr+1, pTargetPkt->word_en, pTargetPkt->data, bPseudoTest);
+ if (badworden == 0x0F) {
+ /* write ok */
+ return true;
+ } else {
+ /* reorganize other pg packet */
+ PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
+ if (!PgWriteSuccess)
+ return false;
+ else
+ return true;
+ }
+ return bRet;
+}
+
+static bool
+hal_EfusePgPacketWriteHeader(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ u16 *pAddr,
+ struct pgpkt *pTargetPkt,
+ bool bPseudoTest)
+{
+ bool bRet = false;
+
+ if (pTargetPkt->offset >= EFUSE_MAX_SECTION_BASE)
+ bRet = hal_EfusePgPacketWrite2ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
+ else
+ bRet = hal_EfusePgPacketWrite1ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
+
+ return bRet;
+}
+
+static bool wordEnMatched(struct pgpkt *pTargetPkt, struct pgpkt *pCurPkt,
+ u8 *pWden)
+{
+ u8 match_word_en = 0x0F; /* default all words are disabled */
+
+ /* check if the same words are enabled both target and current PG packet */
+ if (((pTargetPkt->word_en & BIT0) == 0) &&
+ ((pCurPkt->word_en & BIT0) == 0))
+ match_word_en &= ~BIT0; /* enable word 0 */
+ if (((pTargetPkt->word_en & BIT1) == 0) &&
+ ((pCurPkt->word_en & BIT1) == 0))
+ match_word_en &= ~BIT1; /* enable word 1 */
+ if (((pTargetPkt->word_en & BIT2) == 0) &&
+ ((pCurPkt->word_en & BIT2) == 0))
+ match_word_en &= ~BIT2; /* enable word 2 */
+ if (((pTargetPkt->word_en & BIT3) == 0) &&
+ ((pCurPkt->word_en & BIT3) == 0))
+ match_word_en &= ~BIT3; /* enable word 3 */
+
+ *pWden = match_word_en;
+
+ if (match_word_en != 0xf)
+ return true;
+ else
+ return false;
+}
+
+static bool hal_EfuseCheckIfDatafollowed(struct adapter *pAdapter, u8 word_cnts, u16 startAddr, bool bPseudoTest)
+{
+ bool bRet = false;
+ u8 i, efuse_data;
+
+ for (i = 0; i < (word_cnts*2); i++) {
+ if (efuse_OneByteRead(pAdapter, (startAddr+i), &efuse_data, bPseudoTest) && (efuse_data != 0xFF))
+ bRet = true;
+ }
+ return bRet;
+}
+
+static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
+{
+ bool bRet = false;
+ u8 i, efuse_data = 0, cur_header = 0;
+ u8 matched_wden = 0, badworden = 0;
+ u16 startAddr = 0, efuse_max_available_len = 0, efuse_max = 0;
+ struct pgpkt curPkt;
+
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&efuse_max, bPseudoTest);
+
+ if (efuseType == EFUSE_WIFI) {
+ if (bPseudoTest) {
+ startAddr = (u16)(fakeEfuseUsedBytes%EFUSE_REAL_CONTENT_LEN);
+ } else {
+ rtw_hal_get_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&startAddr);
+ startAddr %= EFUSE_REAL_CONTENT_LEN;
+ }
+ } else {
+ if (bPseudoTest)
+ startAddr = (u16)(fakeBTEfuseUsedBytes%EFUSE_REAL_CONTENT_LEN);
+ else
+ startAddr = (u16)(BTEfuseUsedBytes%EFUSE_REAL_CONTENT_LEN);
+ }
+
+ while (1) {
+ if (startAddr >= efuse_max_available_len) {
+ bRet = false;
+ break;
+ }
+
+ if (efuse_OneByteRead(pAdapter, startAddr, &efuse_data, bPseudoTest) && (efuse_data != 0xFF)) {
+ if (EXT_HEADER(efuse_data)) {
+ cur_header = efuse_data;
+ startAddr++;
+ efuse_OneByteRead(pAdapter, startAddr, &efuse_data, bPseudoTest);
+ if (ALL_WORDS_DISABLED(efuse_data)) {
+ bRet = false;
+ break;
+ } else {
+ curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ curPkt.word_en = efuse_data & 0x0F;
+ }
+ } else {
+ cur_header = efuse_data;
+ curPkt.offset = (cur_header>>4) & 0x0F;
+ curPkt.word_en = cur_header & 0x0F;
+ }
+
+ curPkt.word_cnts = Efuse_CalculateWordCnts(curPkt.word_en);
+ /* if same header is found but no data followed */
+ /* write some part of data followed by the header. */
+ if ((curPkt.offset == pTargetPkt->offset) &&
+ (!hal_EfuseCheckIfDatafollowed(pAdapter, curPkt.word_cnts, startAddr+1, bPseudoTest)) &&
+ wordEnMatched(pTargetPkt, &curPkt, &matched_wden)) {
+ /* Here to write partial data */
+ badworden = Efuse_WordEnableDataWrite(pAdapter, startAddr+1, matched_wden, pTargetPkt->data, bPseudoTest);
+ if (badworden != 0x0F) {
+ u32 PgWriteSuccess = 0;
+ /* if write fail on some words, write these bad words again */
+
+ PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
+
+ if (!PgWriteSuccess) {
+ bRet = false; /* write fail, return */
+ break;
+ }
+ }
+ /* partial write ok, update the target packet for later use */
+ for (i = 0; i < 4; i++) {
+ if ((matched_wden & (0x1<<i)) == 0) /* this word has been written */
+ pTargetPkt->word_en |= (0x1<<i); /* disable the word */
+ }
+ pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
+ }
+ /* read from next header */
+ startAddr = startAddr + (curPkt.word_cnts*2) + 1;
+ } else {
+ /* not used header, 0xff */
+ *pAddr = startAddr;
+ bRet = true;
+ break;
+ }
+ }
+ return bRet;
+}
+
+static bool
+hal_EfusePgCheckAvailableAddr(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ bool bPseudoTest
+ )
+{
+ u16 efuse_max_available_len = 0;
+
+ /* Change to check TYPE_EFUSE_MAP_LEN , beacuse 8188E raw 256, logic map over 256. */
+ EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&efuse_max_available_len, false);
+
+ if (Efuse_GetCurrentSize(pAdapter, efuseType, bPseudoTest) >= efuse_max_available_len)
+ return false;
+ return true;
+}
+
+static void hal_EfuseConstructPGPkt(u8 offset, u8 word_en, u8 *pData, struct pgpkt *pTargetPkt)
+{
+ _rtw_memset((void *)pTargetPkt->data, 0xFF, sizeof(u8)*8);
+ pTargetPkt->offset = offset;
+ pTargetPkt->word_en = word_en;
+ efuse_WordEnableDataRead(word_en, pData, pTargetPkt->data);
+ pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
+}
+
+static bool hal_EfusePgPacketWrite_8188e(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *pData, bool bPseudoTest)
+{
+ struct pgpkt targetPkt;
+ u16 startAddr = 0;
+ u8 efuseType = EFUSE_WIFI;
+
+ if (!hal_EfusePgCheckAvailableAddr(pAdapter, efuseType, bPseudoTest))
+ return false;
+
+ hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
+
+ if (!hal_EfusePartialWriteCheck(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ if (!hal_EfusePgPacketWriteHeader(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ if (!hal_EfusePgPacketWriteData(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ return true;
+}
+
+static int Hal_EfusePgPacketWrite_Pseudo(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ ret = hal_EfusePgPacketWrite_8188e(pAdapter, offset, word_en, data, bPseudoTest);
+ return ret;
+}
+
+static int Hal_EfusePgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ int ret = 0;
+ ret = hal_EfusePgPacketWrite_8188e(pAdapter, offset, word_en, data, bPseudoTest);
+
+ return ret;
+}
+
+static int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ if (bPseudoTest)
+ ret = Hal_EfusePgPacketWrite_Pseudo (pAdapter, offset, word_en, data, bPseudoTest);
+ else
+ ret = Hal_EfusePgPacketWrite(pAdapter, offset, word_en, data, bPseudoTest);
+ return ret;
+}
+
+static struct HAL_VERSION ReadChipVersion8188E(struct adapter *padapter)
+{
+ u32 value32;
+ struct HAL_VERSION ChipVersion;
+ struct hal_data_8188e *pHalData;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ value32 = rtw_read32(padapter, REG_SYS_CFG);
+ ChipVersion.ICType = CHIP_8188E;
+ ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
+
+ ChipVersion.RFType = RF_TYPE_1T1R;
+ ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
+ ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK)>>CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
+
+ /* For regulator mode. by tynli. 2011.01.14 */
+ pHalData->RegulatorMode = ((value32 & TRP_BT_EN) ? RT_LDO_REGULATOR : RT_SWITCHING_REGULATOR);
+
+ ChipVersion.ROMVer = 0; /* ROM code version. */
+ pHalData->MultiFunc = RT_MULTI_FUNC_NONE;
+
+ dump_chip_info(ChipVersion);
+
+ pHalData->VersionID = ChipVersion;
+
+ if (IS_1T2R(ChipVersion)) {
+ pHalData->rf_type = RF_1T2R;
+ pHalData->NumTotalRFPath = 2;
+ } else if (IS_2T2R(ChipVersion)) {
+ pHalData->rf_type = RF_2T2R;
+ pHalData->NumTotalRFPath = 2;
+ } else{
+ pHalData->rf_type = RF_1T1R;
+ pHalData->NumTotalRFPath = 1;
+ }
+
+ MSG_88E("RF_Type is %x!!\n", pHalData->rf_type);
+
+ return ChipVersion;
+}
+
+static void rtl8188e_read_chip_version(struct adapter *padapter)
+{
+ ReadChipVersion8188E(padapter);
+}
+
+static void rtl8188e_GetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+{
+}
+
+static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct odm_dm_struct *podmpriv = &pHalData->odmpriv;
+ switch (eVariable) {
+ case HAL_ODM_STA_INFO:
+ {
+ struct sta_info *psta = (struct sta_info *)pValue1;
+ if (bSet) {
+ DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
+ ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, psta);
+ ODM_RAInfo_Init(podmpriv, psta->mac_id);
+ } else {
+ DBG_88E("### Clean STA_(%d) info\n", psta->mac_id);
+ ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, NULL);
+ }
+ }
+ break;
+ case HAL_ODM_P2P_STATE:
+ ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DIRECT, bSet);
+ break;
+ case HAL_ODM_WIFI_DISPLAY_STATE:
+ ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DISPLAY, bSet);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtl8188e_clone_haldata(struct adapter *dst_adapter, struct adapter *src_adapter)
+{
+ memcpy(dst_adapter->HalData, src_adapter->HalData, dst_adapter->hal_data_sz);
+}
+
+void rtl8188e_start_thread(struct adapter *padapter)
+{
+}
+
+void rtl8188e_stop_thread(struct adapter *padapter)
+{
+}
+
+static void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
+{
+ if (enable) {
+ DBG_88E("Enable notch filter\n");
+ rtw_write8(adapter, rOFDM0_RxDSP+1, rtw_read8(adapter, rOFDM0_RxDSP+1) | BIT1);
+ } else {
+ DBG_88E("Disable notch filter\n");
+ rtw_write8(adapter, rOFDM0_RxDSP+1, rtw_read8(adapter, rOFDM0_RxDSP+1) & ~BIT1);
+ }
+}
+void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc)
+{
+ pHalFunc->free_hal_data = &rtl8188e_free_hal_data;
+
+ pHalFunc->dm_init = &rtl8188e_init_dm_priv;
+ pHalFunc->dm_deinit = &rtl8188e_deinit_dm_priv;
+
+ pHalFunc->read_chip_version = &rtl8188e_read_chip_version;
+
+ pHalFunc->set_bwmode_handler = &PHY_SetBWMode8188E;
+ pHalFunc->set_channel_handler = &PHY_SwChnl8188E;
+
+ pHalFunc->hal_dm_watchdog = &rtl8188e_HalDmWatchDog;
+
+ pHalFunc->Add_RateATid = &rtl8188e_Add_RateATid;
+ pHalFunc->run_thread = &rtl8188e_start_thread;
+ pHalFunc->cancel_thread = &rtl8188e_stop_thread;
+
+ pHalFunc->AntDivBeforeLinkHandler = &AntDivBeforeLink8188E;
+ pHalFunc->AntDivCompareHandler = &AntDivCompare8188E;
+ pHalFunc->read_bbreg = &rtl8188e_PHY_QueryBBReg;
+ pHalFunc->write_bbreg = &rtl8188e_PHY_SetBBReg;
+ pHalFunc->read_rfreg = &rtl8188e_PHY_QueryRFReg;
+ pHalFunc->write_rfreg = &rtl8188e_PHY_SetRFReg;
+
+ /* Efuse related function */
+ pHalFunc->EfusePowerSwitch = &rtl8188e_EfusePowerSwitch;
+ pHalFunc->ReadEFuse = &rtl8188e_ReadEFuse;
+ pHalFunc->EFUSEGetEfuseDefinition = &rtl8188e_EFUSE_GetEfuseDefinition;
+ pHalFunc->EfuseGetCurrentSize = &rtl8188e_EfuseGetCurrentSize;
+ pHalFunc->Efuse_PgPacketRead = &rtl8188e_Efuse_PgPacketRead;
+ pHalFunc->Efuse_PgPacketWrite = &rtl8188e_Efuse_PgPacketWrite;
+ pHalFunc->Efuse_WordEnableDataWrite = &rtl8188e_Efuse_WordEnableDataWrite;
+
+ pHalFunc->sreset_init_value = &sreset_init_value;
+ pHalFunc->sreset_reset_value = &sreset_reset_value;
+ pHalFunc->silentreset = &rtl8188e_silentreset_for_specific_platform;
+ pHalFunc->sreset_xmit_status_check = &rtl8188e_sreset_xmit_status_check;
+ pHalFunc->sreset_linked_status_check = &rtl8188e_sreset_linked_status_check;
+ pHalFunc->sreset_get_wifi_status = &sreset_get_wifi_status;
+
+ pHalFunc->GetHalODMVarHandler = &rtl8188e_GetHalODMVar;
+ pHalFunc->SetHalODMVarHandler = &rtl8188e_SetHalODMVar;
+
+ pHalFunc->IOL_exec_cmds_sync = &rtl8188e_IOL_exec_cmds_sync;
+
+ pHalFunc->hal_notch_filter = &hal_notch_filter_8188e;
+}
+
+u8 GetEEPROMSize8188E(struct adapter *padapter)
+{
+ u8 size = 0;
+ u32 cr;
+
+ cr = rtw_read16(padapter, REG_9346CR);
+ /* 6: EEPROM used is 93C46, 4: boot from E-Fuse. */
+ size = (cr & BOOT_FROM_EEPROM) ? 6 : 4;
+
+ MSG_88E("EEPROM type is %s\n", size == 4 ? "E-FUSE" : "93C46");
+
+ return size;
+}
+
+/* */
+/* */
+/* LLT R/W/Init function */
+/* */
+/* */
+static s32 _LLTWrite(struct adapter *padapter, u32 address, u32 data)
+{
+ s32 status = _SUCCESS;
+ s32 count = 0;
+ u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+ u16 LLTReg = REG_LLT_INIT;
+
+ rtw_write32(padapter, LLTReg, value);
+
+ /* polling */
+ do {
+ value = rtw_read32(padapter, LLTReg);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("Failed to polling write LLT done at address %d!\n", address));
+ status = _FAIL;
+ break;
+ }
+ } while (count++);
+
+ return status;
+}
+
+s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
+{
+ s32 status = _FAIL;
+ u32 i;
+ u32 Last_Entry_Of_TxPktBuf = LAST_ENTRY_OF_TX_PKT_BUFFER;/* 176, 22k */
+
+ if (rtw_IOL_applied(padapter)) {
+ status = iol_InitLLTTable(padapter, txpktbuf_bndy);
+ } else {
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = _LLTWrite(padapter, i, i + 1);
+ if (_SUCCESS != status)
+ return status;
+ }
+
+ /* end of list */
+ status = _LLTWrite(padapter, (txpktbuf_bndy - 1), 0xFF);
+ if (_SUCCESS != status)
+ return status;
+
+ /* Make the other pages as ring buffer */
+ /* This ring buffer is used as beacon buffer if we config this MAC as two MAC transfer. */
+ /* Otherwise used as local loopback buffer. */
+ for (i = txpktbuf_bndy; i < Last_Entry_Of_TxPktBuf; i++) {
+ status = _LLTWrite(padapter, i, (i + 1));
+ if (_SUCCESS != status)
+ return status;
+ }
+
+ /* Let last entry point to the start entry of ring buffer */
+ status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy);
+ if (_SUCCESS != status) {
+ return status;
+ }
+ }
+
+ return status;
+}
+
+void
+Hal_InitPGData88E(struct adapter *padapter)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ if (!pEEPROM->bautoload_fail_flag) { /* autoload OK. */
+ if (!is_boot_from_eeprom(padapter)) {
+ /* Read EFUSE real map to shadow. */
+ EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
+ }
+ } else {/* autoload fail */
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("AutoLoad Fail reported from CR9346!!\n"));
+ /* update to default value 0xFF */
+ if (!is_boot_from_eeprom(padapter))
+ EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
+ }
+}
+
+void
+Hal_EfuseParseIDCode88E(
+ struct adapter *padapter,
+ u8 *hwinfo
+ )
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ u16 EEPROMId;
+
+ /* Checl 0x8129 again for making sure autoload status!! */
+ EEPROMId = le16_to_cpu(*((__le16 *)hwinfo));
+ if (EEPROMId != RTL_EEPROM_ID) {
+ DBG_88E("EEPROM ID(%#x) is invalid!!\n", EEPROMId);
+ pEEPROM->bautoload_fail_flag = true;
+ } else {
+ pEEPROM->bautoload_fail_flag = false;
+ }
+
+ DBG_88E("EEPROM ID = 0x%04x\n", EEPROMId);
+}
+
+static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, u8 *PROMContent, bool AutoLoadFail)
+{
+ u32 rfPath, eeAddr = EEPROM_TX_PWR_INX_88E, group, TxCount = 0;
+
+ _rtw_memset(pwrInfo24G, 0, sizeof(struct txpowerinfo24g));
+
+ if (AutoLoadFail) {
+ for (rfPath = 0; rfPath < MAX_RF_PATH; rfPath++) {
+ /* 2.4G default value */
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrInfo24G->BW20_Diff[rfPath][0] = EEPROM_DEFAULT_24G_HT20_DIFF;
+ pwrInfo24G->OFDM_Diff[rfPath][0] = EEPROM_DEFAULT_24G_OFDM_DIFF;
+ } else {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ }
+ }
+ }
+ return;
+ }
+
+ for (rfPath = 0; rfPath < MAX_RF_PATH; rfPath++) {
+ /* 2.4G default value */
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrInfo24G->IndexCCK_Base[rfPath][group] = PROMContent[eeAddr++];
+ if (pwrInfo24G->IndexCCK_Base[rfPath][group] == 0xFF)
+ pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ }
+ for (group = 0; group < MAX_CHNL_GROUP_24G-1; group++) {
+ pwrInfo24G->IndexBW40_Base[rfPath][group] = PROMContent[eeAddr++];
+ if (pwrInfo24G->IndexBW40_Base[rfPath][group] == 0xFF)
+ pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = 0;
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_HT20_DIFF;
+ } else {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_OFDM_DIFF;
+ } else {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
+ }
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = 0;
+ eeAddr++;
+ } else {
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ } else {
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ if (pwrInfo24G->BW40_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ } else {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ } else {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ } else {
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ if (pwrInfo24G->CCK_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ }
+ }
+ }
+}
+
+static u8 Hal_GetChnlGroup88E(u8 chnl, u8 *pGroup)
+{
+ u8 bIn24G = true;
+
+ if (chnl <= 14) {
+ bIn24G = true;
+
+ if (chnl < 3) /* Chanel 1-2 */
+ *pGroup = 0;
+ else if (chnl < 6) /* Channel 3-5 */
+ *pGroup = 1;
+ else if (chnl < 9) /* Channel 6-8 */
+ *pGroup = 2;
+ else if (chnl < 12) /* Channel 9-11 */
+ *pGroup = 3;
+ else if (chnl < 14) /* Channel 12-13 */
+ *pGroup = 4;
+ else if (chnl == 14) /* Channel 14 */
+ *pGroup = 5;
+ } else {
+ bIn24G = false;
+
+ if (chnl <= 40)
+ *pGroup = 0;
+ else if (chnl <= 48)
+ *pGroup = 1;
+ else if (chnl <= 56)
+ *pGroup = 2;
+ else if (chnl <= 64)
+ *pGroup = 3;
+ else if (chnl <= 104)
+ *pGroup = 4;
+ else if (chnl <= 112)
+ *pGroup = 5;
+ else if (chnl <= 120)
+ *pGroup = 5;
+ else if (chnl <= 128)
+ *pGroup = 6;
+ else if (chnl <= 136)
+ *pGroup = 7;
+ else if (chnl <= 144)
+ *pGroup = 8;
+ else if (chnl <= 153)
+ *pGroup = 9;
+ else if (chnl <= 161)
+ *pGroup = 10;
+ else if (chnl <= 177)
+ *pGroup = 11;
+ }
+ return bIn24G;
+}
+
+void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ if (AutoLoadFail) {
+ padapter->pwrctrlpriv.bHWPowerdown = false;
+ padapter->pwrctrlpriv.bSupportRemoteWakeup = false;
+ } else {
+ /* hw power down mode selection , 0:rf-off / 1:power down */
+
+ if (padapter->registrypriv.hwpdn_mode == 2)
+ padapter->pwrctrlpriv.bHWPowerdown = (hwinfo[EEPROM_RF_FEATURE_OPTION_88E] & BIT4);
+ else
+ padapter->pwrctrlpriv.bHWPowerdown = padapter->registrypriv.hwpdn_mode;
+
+ /* decide hw if support remote wakeup function */
+ /* if hw supported, 8051 (SIE) will generate WeakUP signal(D+/D- toggle) when autoresume */
+ padapter->pwrctrlpriv.bSupportRemoteWakeup = (hwinfo[EEPROM_USB_OPTIONAL_FUNCTION0] & BIT1) ? true : false;
+
+ DBG_88E("%s...bHWPwrPindetect(%x)-bHWPowerdown(%x) , bSupportRemoteWakeup(%x)\n", __func__,
+ padapter->pwrctrlpriv.bHWPwrPindetect, padapter->pwrctrlpriv.bHWPowerdown , padapter->pwrctrlpriv.bSupportRemoteWakeup);
+
+ DBG_88E("### PS params => power_mgnt(%x), usbss_enable(%x) ###\n", padapter->registrypriv.power_mgnt, padapter->registrypriv.usbss_enable);
+ }
+}
+
+void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct txpowerinfo24g pwrInfo24G;
+ u8 rfPath, ch, group;
+ u8 bIn24G, TxCount;
+
+ Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
+
+ if (!AutoLoadFail)
+ pHalData->bTXPowerDataReadFromEEPORM = true;
+
+ for (rfPath = 0; rfPath < pHalData->NumTotalRFPath; rfPath++) {
+ for (ch = 0; ch <= CHANNEL_MAX_NUMBER; ch++) {
+ bIn24G = Hal_GetChnlGroup88E(ch, &group);
+ if (bIn24G) {
+ pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
+ if (ch == 14)
+ pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4];
+ else
+ pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
+ }
+ if (bIn24G) {
+ DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
+ DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_CCK_Base[rfPath][ch]);
+ DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_BW40_Base[rfPath][ch]);
+ }
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
+ pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
+ pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
+ pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
+ DBG_88E("======= TxCount %d =======\n", TxCount);
+ DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
+ DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
+ DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
+ DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
+ }
+ }
+
+ /* 2010/10/19 MH Add Regulator recognize for CU. */
+ if (!AutoLoadFail) {
+ pHalData->EEPROMRegulatory = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x7); /* bit0~2 */
+ if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
+ pHalData->EEPROMRegulatory = (EEPROM_DEFAULT_BOARD_OPTION&0x7); /* bit0~2 */
+ } else {
+ pHalData->EEPROMRegulatory = 0;
+ }
+ DBG_88E("EEPROMRegulatory = 0x%x\n", pHalData->EEPROMRegulatory);
+}
+
+void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+ if (!AutoLoadFail) {
+ pHalData->CrystalCap = hwinfo[EEPROM_XTAL_88E];
+ if (pHalData->CrystalCap == 0xFF)
+ pHalData->CrystalCap = EEPROM_Default_CrystalCap_88E;
+ } else {
+ pHalData->CrystalCap = EEPROM_Default_CrystalCap_88E;
+ }
+ DBG_88E("CrystalCap: 0x%2x\n", pHalData->CrystalCap);
+}
+
+void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+ if (!AutoLoadFail)
+ pHalData->BoardType = ((hwinfo[EEPROM_RF_BOARD_OPTION_88E]&0xE0)>>5);
+ else
+ pHalData->BoardType = 0;
+ DBG_88E("Board Type: 0x%2x\n", pHalData->BoardType);
+}
+
+void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ if (!AutoLoadFail) {
+ pHalData->EEPROMVersion = hwinfo[EEPROM_VERSION_88E];
+ if (pHalData->EEPROMVersion == 0xFF)
+ pHalData->EEPROMVersion = EEPROM_Default_Version;
+ } else {
+ pHalData->EEPROMVersion = 1;
+ }
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("Hal_EfuseParseEEPROMVer(), EEVer = %d\n",
+ pHalData->EEPROMVersion));
+}
+
+void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ padapter->mlmepriv.ChannelPlan =
+ hal_com_get_channel_plan(padapter,
+ hwinfo ? hwinfo[EEPROM_ChannelPlan_88E] : 0xFF,
+ padapter->registrypriv.channel_plan,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_13, AutoLoadFail);
+
+ DBG_88E("mlmepriv.ChannelPlan = 0x%02x\n", padapter->mlmepriv.ChannelPlan);
+}
+
+void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ if (!AutoLoadFail) {
+ pHalData->EEPROMCustomerID = hwinfo[EEPROM_CUSTOMERID_88E];
+ } else {
+ pHalData->EEPROMCustomerID = 0;
+ pHalData->EEPROMSubCustomerID = 0;
+ }
+ DBG_88E("EEPROM Customer ID: 0x%2x\n", pHalData->EEPROMCustomerID);
+}
+
+void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct registry_priv *registry_par = &pAdapter->registrypriv;
+
+ if (!AutoLoadFail) {
+ /* Antenna Diversity setting. */
+ if (registry_par->antdiv_cfg == 2) { /* 2:By EFUSE */
+ pHalData->AntDivCfg = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x18)>>3;
+ if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
+ pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION&0x18)>>3;;
+ } else {
+ pHalData->AntDivCfg = registry_par->antdiv_cfg; /* 0:OFF , 1:ON, 2:By EFUSE */
+ }
+
+ if (registry_par->antdiv_type == 0) {
+ /* If TRxAntDivType is AUTO in advanced setting, use EFUSE value instead. */
+ pHalData->TRxAntDivType = PROMContent[EEPROM_RF_ANTENNA_OPT_88E];
+ if (pHalData->TRxAntDivType == 0xFF)
+ pHalData->TRxAntDivType = CG_TRX_HW_ANTDIV; /* For 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
+ } else {
+ pHalData->TRxAntDivType = registry_par->antdiv_type;
+ }
+
+ if (pHalData->TRxAntDivType == CG_TRX_HW_ANTDIV || pHalData->TRxAntDivType == CGCS_RX_HW_ANTDIV)
+ pHalData->AntDivCfg = 1; /* 0xC1[3] is ignored. */
+ } else {
+ pHalData->AntDivCfg = 0;
+ pHalData->TRxAntDivType = pHalData->TRxAntDivType; /* The value in the driver setting of device manager. */
+ }
+ DBG_88E("EEPROM : AntDivCfg = %x, TRxAntDivType = %x\n", pHalData->AntDivCfg, pHalData->TRxAntDivType);
+}
+
+void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ /* ThermalMeter from EEPROM */
+ if (!AutoloadFail)
+ pHalData->EEPROMThermalMeter = PROMContent[EEPROM_THERMAL_METER_88E];
+ else
+ pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
+
+ if (pHalData->EEPROMThermalMeter == 0xff || AutoloadFail) {
+ pHalData->bAPKThermalMeterIgnore = true;
+ pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
+ }
+ DBG_88E("ThermalMeter = 0x%x\n", pHalData->EEPROMThermalMeter);
+}
+
+void Hal_InitChannelPlan(struct adapter *padapter)
+{
+}
+
+bool HalDetectPwrDownMode88E(struct adapter *Adapter)
+{
+ u8 tmpvalue = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
+
+ EFUSE_ShadowRead(Adapter, 1, EEPROM_RF_FEATURE_OPTION_88E, (u32 *)&tmpvalue);
+
+ /* 2010/08/25 MH INF priority > PDN Efuse value. */
+ if (tmpvalue & BIT(4) && pwrctrlpriv->reg_pdnmode)
+ pHalData->pwrdown = true;
+ else
+ pHalData->pwrdown = false;
+
+ DBG_88E("HalDetectPwrDownMode(): PDN =%d\n", pHalData->pwrdown);
+
+ return pHalData->pwrdown;
+} /* HalDetectPwrDownMode */
+
+/* This function is used only for 92C to set REG_BCN_CTRL(0x550) register. */
+/* We just reserve the value of the register in variable pHalData->RegBcnCtrlVal and then operate */
+/* the value of the register via atomic operation. */
+/* This prevents from race condition when setting this register. */
+/* The value of pHalData->RegBcnCtrlVal is initialized in HwConfigureRTL8192CE() function. */
+
+void SetBcnCtrlReg(struct adapter *padapter, u8 SetBits, u8 ClearBits)
+{
+ struct hal_data_8188e *pHalData;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->RegBcnCtrlVal |= SetBits;
+ pHalData->RegBcnCtrlVal &= ~ClearBits;
+
+ rtw_write8(padapter, REG_BCN_CTRL, (u8)pHalData->RegBcnCtrlVal);
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
new file mode 100644
index 0000000..e97ba02
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
@@ -0,0 +1,860 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_MP_C_
+
+#include <drv_types.h>
+#include <rtw_mp.h>
+#include <rtl8188e_hal.h>
+#include <rtl8188e_dm.h>
+
+s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct odm_dm_struct *pDM_Odm = &(pHalData->odmpriv);
+
+ if (!netif_running(padapter->pnetdev)) {
+ RT_TRACE(_module_mp_, _drv_warning_,
+ ("SetPowerTracking! Fail: interface not opened!\n"));
+ return _FAIL;
+ }
+
+ if (!check_fwstate(&padapter->mlmepriv, WIFI_MP_STATE)) {
+ RT_TRACE(_module_mp_, _drv_warning_,
+ ("SetPowerTracking! Fail: not in MP mode!\n"));
+ return _FAIL;
+ }
+
+ if (enable)
+ pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
+ else
+ pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
+
+ return _SUCCESS;
+}
+
+void Hal_GetPowerTracking(struct adapter *padapter, u8 *enable)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct odm_dm_struct *pDM_Odm = &(pHalData->odmpriv);
+
+ *enable = pDM_Odm->RFCalibrateInfo.TxPowerTrackControl;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: mpt_SwitchRfSetting
+ *
+ * Overview: Change RF Setting when we siwthc channel/rate/BW for MP.
+ *
+ * Input: struct adapter * pAdapter
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 01/08/2009 MHC Suggestion from SD3 Willis for 92S series.
+ * 01/09/2009 MHC Add CCK modification for 40MHZ. Suggestion from SD3.
+ *
+ *---------------------------------------------------------------------------*/
+void Hal_mpt_SwitchRfSetting(struct adapter *pAdapter)
+{
+ struct mp_priv *pmp = &pAdapter->mppriv;
+
+ /* <20120525, Kordan> Dynamic mechanism for APK, asked by Dennis. */
+ pmp->MptCtx.backup0x52_RF_A = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0);
+ pmp->MptCtx.backup0x52_RF_B = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_B, RF_0x52, 0x000F0);
+ PHY_SetRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0, 0xD);
+ PHY_SetRFReg(pAdapter, RF_PATH_B, RF_0x52, 0x000F0, 0xD);
+
+ return;
+}
+/*---------------------------hal\rtl8192c\MPT_Phy.c---------------------------*/
+
+/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
+void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14)
+{
+ u32 TempVal = 0, TempVal2 = 0, TempVal3 = 0;
+ u32 CurrCCKSwingVal = 0, CCKSwingIndex = 12;
+ u8 i;
+
+ /* get current cck swing value and check 0xa22 & 0xa23 later to match the table. */
+ CurrCCKSwingVal = read_bbreg(Adapter, rCCK0_TxFilter1, bMaskHWord);
+
+ if (!bInCH14) {
+ /* Readback the current bb cck swing value and compare with the table to */
+ /* get the current swing index */
+ for (i = 0; i < CCK_TABLE_SIZE; i++) {
+ if (((CurrCCKSwingVal&0xff) == (u32)CCKSwingTable_Ch1_Ch13[i][0]) &&
+ (((CurrCCKSwingVal&0xff00)>>8) == (u32)CCKSwingTable_Ch1_Ch13[i][1])) {
+ CCKSwingIndex = i;
+ break;
+ }
+ }
+
+ /* Write 0xa22 0xa23 */
+ TempVal = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][0] +
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][1]<<8);
+
+
+ /* Write 0xa24 ~ 0xa27 */
+ TempVal2 = 0;
+ TempVal2 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][2] +
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][3]<<8) +
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][4]<<16)+
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][5]<<24);
+
+ /* Write 0xa28 0xa29 */
+ TempVal3 = 0;
+ TempVal3 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][6] +
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][7]<<8);
+ } else {
+ for (i = 0; i < CCK_TABLE_SIZE; i++) {
+ if (((CurrCCKSwingVal&0xff) == (u32)CCKSwingTable_Ch14[i][0]) &&
+ (((CurrCCKSwingVal&0xff00)>>8) == (u32)CCKSwingTable_Ch14[i][1])) {
+ CCKSwingIndex = i;
+ break;
+ }
+ }
+
+ /* Write 0xa22 0xa23 */
+ TempVal = CCKSwingTable_Ch14[CCKSwingIndex][0] +
+ (CCKSwingTable_Ch14[CCKSwingIndex][1]<<8);
+
+ /* Write 0xa24 ~ 0xa27 */
+ TempVal2 = 0;
+ TempVal2 = CCKSwingTable_Ch14[CCKSwingIndex][2] +
+ (CCKSwingTable_Ch14[CCKSwingIndex][3]<<8) +
+ (CCKSwingTable_Ch14[CCKSwingIndex][4]<<16)+
+ (CCKSwingTable_Ch14[CCKSwingIndex][5]<<24);
+
+ /* Write 0xa28 0xa29 */
+ TempVal3 = 0;
+ TempVal3 = CCKSwingTable_Ch14[CCKSwingIndex][6] +
+ (CCKSwingTable_Ch14[CCKSwingIndex][7]<<8);
+ }
+
+ write_bbreg(Adapter, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ write_bbreg(Adapter, rCCK0_TxFilter2, bMaskDWord, TempVal2);
+ write_bbreg(Adapter, rCCK0_DebugPort, bMaskLWord, TempVal3);
+}
+
+void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct mpt_context *pMptCtx = &pAdapter->mppriv.MptCtx;
+ struct odm_dm_struct *pDM_Odm = &(pHalData->odmpriv);
+ s32 TempCCk;
+ u8 CCK_index, CCK_index_old = 0;
+ u8 Action = 0; /* 0: no action, 1: even->odd, 2:odd->even */
+ s32 i = 0;
+
+
+ if (!IS_92C_SERIAL(pHalData->VersionID))
+ return;
+ if (beven && !pMptCtx->bMptIndexEven) {
+ /* odd->even */
+ Action = 2;
+ pMptCtx->bMptIndexEven = true;
+ } else if (!beven && pMptCtx->bMptIndexEven) {
+ /* even->odd */
+ Action = 1;
+ pMptCtx->bMptIndexEven = false;
+ }
+
+ if (Action != 0) {
+ /* Query CCK default setting From 0xa24 */
+ TempCCk = read_bbreg(pAdapter, rCCK0_TxFilter2, bMaskDWord) & bMaskCCK;
+ for (i = 0; i < CCK_TABLE_SIZE; i++) {
+ if (pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
+ if (_rtw_memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4)) {
+ CCK_index_old = (u8)i;
+ break;
+ }
+ } else {
+ if (_rtw_memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4)) {
+ CCK_index_old = (u8)i;
+ break;
+ }
+ }
+ }
+
+ if (Action == 1)
+ CCK_index = CCK_index_old - 1;
+ else
+ CCK_index = CCK_index_old + 1;
+
+ /* Adjust CCK according to gain index */
+ if (!pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
+ rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch1_Ch13[CCK_index][0]);
+ rtw_write8(pAdapter, 0xa23, CCKSwingTable_Ch1_Ch13[CCK_index][1]);
+ rtw_write8(pAdapter, 0xa24, CCKSwingTable_Ch1_Ch13[CCK_index][2]);
+ rtw_write8(pAdapter, 0xa25, CCKSwingTable_Ch1_Ch13[CCK_index][3]);
+ rtw_write8(pAdapter, 0xa26, CCKSwingTable_Ch1_Ch13[CCK_index][4]);
+ rtw_write8(pAdapter, 0xa27, CCKSwingTable_Ch1_Ch13[CCK_index][5]);
+ rtw_write8(pAdapter, 0xa28, CCKSwingTable_Ch1_Ch13[CCK_index][6]);
+ rtw_write8(pAdapter, 0xa29, CCKSwingTable_Ch1_Ch13[CCK_index][7]);
+ } else {
+ rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch14[CCK_index][0]);
+ rtw_write8(pAdapter, 0xa23, CCKSwingTable_Ch14[CCK_index][1]);
+ rtw_write8(pAdapter, 0xa24, CCKSwingTable_Ch14[CCK_index][2]);
+ rtw_write8(pAdapter, 0xa25, CCKSwingTable_Ch14[CCK_index][3]);
+ rtw_write8(pAdapter, 0xa26, CCKSwingTable_Ch14[CCK_index][4]);
+ rtw_write8(pAdapter, 0xa27, CCKSwingTable_Ch14[CCK_index][5]);
+ rtw_write8(pAdapter, 0xa28, CCKSwingTable_Ch14[CCK_index][6]);
+ rtw_write8(pAdapter, 0xa29, CCKSwingTable_Ch14[CCK_index][7]);
+ }
+ }
+}
+/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
+
+/*
+ * SetChannel
+ * Description
+ * Use H2C command to change channel,
+ * not only modify rf register, but also other setting need to be done.
+ */
+void Hal_SetChannel(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct mp_priv *pmp = &pAdapter->mppriv;
+ struct odm_dm_struct *pDM_Odm = &(pHalData->odmpriv);
+ u8 eRFPath;
+ u8 channel = pmp->channel;
+
+ /* set RF channel register */
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
+ _write_rfreg(pAdapter, eRFPath, ODM_CHANNEL, 0x3FF, channel);
+ Hal_mpt_SwitchRfSetting(pAdapter);
+
+ SelectChannel(pAdapter, channel);
+
+ if (pHalData->CurrentChannel == 14 && !pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
+ pDM_Odm->RFCalibrateInfo.bCCKinCH14 = true;
+ Hal_MPT_CCKTxPowerAdjust(pAdapter, pDM_Odm->RFCalibrateInfo.bCCKinCH14);
+ } else if (pHalData->CurrentChannel != 14 && pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
+ pDM_Odm->RFCalibrateInfo.bCCKinCH14 = false;
+ Hal_MPT_CCKTxPowerAdjust(pAdapter, pDM_Odm->RFCalibrateInfo.bCCKinCH14);
+ }
+}
+
+/*
+ * Notice
+ * Switch bandwitdth may change center frequency(channel)
+ */
+void Hal_SetBandwidth(struct adapter *pAdapter)
+{
+ struct mp_priv *pmp = &pAdapter->mppriv;
+
+
+ SetBWMode(pAdapter, pmp->bandwidth, pmp->prime_channel_offset);
+ Hal_mpt_SwitchRfSetting(pAdapter);
+}
+
+void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 *TxPower)
+{
+ u32 tmpval = 0;
+
+
+ /* rf-A cck tx power */
+ write_bbreg(pAdapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, TxPower[RF_PATH_A]);
+ tmpval = (TxPower[RF_PATH_A]<<16) | (TxPower[RF_PATH_A]<<8) | TxPower[RF_PATH_A];
+ write_bbreg(pAdapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+ /* rf-B cck tx power */
+ write_bbreg(pAdapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, TxPower[RF_PATH_B]);
+ tmpval = (TxPower[RF_PATH_B]<<16) | (TxPower[RF_PATH_B]<<8) | TxPower[RF_PATH_B];
+ write_bbreg(pAdapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("-SetCCKTxPower: A[0x%02x] B[0x%02x]\n",
+ TxPower[RF_PATH_A], TxPower[RF_PATH_B]));
+}
+
+void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 *TxPower)
+{
+ u32 TxAGC = 0;
+ u8 tmpval = 0;
+
+ /* HT Tx-rf(A) */
+ tmpval = TxPower[RF_PATH_A];
+ TxAGC = (tmpval<<24) | (tmpval<<16) | (tmpval<<8) | tmpval;
+
+ write_bbreg(pAdapter, rTxAGC_A_Rate18_06, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Rate54_24, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Mcs03_Mcs00, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Mcs07_Mcs04, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Mcs11_Mcs08, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Mcs15_Mcs12, bMaskDWord, TxAGC);
+
+ /* HT Tx-rf(B) */
+ tmpval = TxPower[RF_PATH_B];
+ TxAGC = (tmpval<<24) | (tmpval<<16) | (tmpval<<8) | tmpval;
+
+ write_bbreg(pAdapter, rTxAGC_B_Rate18_06, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Rate54_24, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Mcs03_Mcs00, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Mcs07_Mcs04, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Mcs11_Mcs08, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Mcs15_Mcs12, bMaskDWord, TxAGC);
+}
+
+void Hal_SetAntennaPathPower(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ u8 TxPowerLevel[MAX_RF_PATH_NUMS];
+ u8 rfPath;
+
+ TxPowerLevel[RF_PATH_A] = pAdapter->mppriv.txpoweridx;
+ TxPowerLevel[RF_PATH_B] = pAdapter->mppriv.txpoweridx_b;
+
+ switch (pAdapter->mppriv.antenna_tx) {
+ case ANTENNA_A:
+ default:
+ rfPath = RF_PATH_A;
+ break;
+ case ANTENNA_B:
+ rfPath = RF_PATH_B;
+ break;
+ case ANTENNA_C:
+ rfPath = RF_PATH_C;
+ break;
+ }
+
+ switch (pHalData->rf_chip) {
+ case RF_8225:
+ case RF_8256:
+ case RF_6052:
+ Hal_SetCCKTxPower(pAdapter, TxPowerLevel);
+ if (pAdapter->mppriv.rateidx < MPT_RATE_6M) /* CCK rate */
+ Hal_MPT_CCKTxPowerAdjustbyIndex(pAdapter, TxPowerLevel[rfPath]%2 == 0);
+ Hal_SetOFDMTxPower(pAdapter, TxPowerLevel);
+ break;
+ default:
+ break;
+ }
+}
+
+void Hal_SetTxPower(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ u8 TxPower = pAdapter->mppriv.txpoweridx;
+ u8 TxPowerLevel[MAX_RF_PATH_NUMS];
+ u8 rf, rfPath;
+
+ for (rf = 0; rf < MAX_RF_PATH_NUMS; rf++)
+ TxPowerLevel[rf] = TxPower;
+
+ switch (pAdapter->mppriv.antenna_tx) {
+ case ANTENNA_A:
+ default:
+ rfPath = RF_PATH_A;
+ break;
+ case ANTENNA_B:
+ rfPath = RF_PATH_B;
+ break;
+ case ANTENNA_C:
+ rfPath = RF_PATH_C;
+ break;
+ }
+
+ switch (pHalData->rf_chip) {
+ /* 2008/09/12 MH Test only !! We enable the TX power tracking for MP!!!!! */
+ /* We should call normal driver API later!! */
+ case RF_8225:
+ case RF_8256:
+ case RF_6052:
+ Hal_SetCCKTxPower(pAdapter, TxPowerLevel);
+ if (pAdapter->mppriv.rateidx < MPT_RATE_6M) /* CCK rate */
+ Hal_MPT_CCKTxPowerAdjustbyIndex(pAdapter, TxPowerLevel[rfPath]%2 == 0);
+ Hal_SetOFDMTxPower(pAdapter, TxPowerLevel);
+ break;
+ default:
+ break;
+ }
+}
+
+void Hal_SetDataRate(struct adapter *pAdapter)
+{
+ Hal_mpt_SwitchRfSetting(pAdapter);
+}
+
+void Hal_SetAntenna(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+ struct ant_sel_ofdm *p_ofdm_tx; /* OFDM Tx register */
+ struct ant_sel_cck *p_cck_txrx;
+ u8 r_rx_antenna_ofdm = 0, r_ant_select_cck_val = 0;
+ u8 chgTx = 0, chgRx = 0;
+ u32 r_ant_select_ofdm_val = 0, r_ofdm_tx_en_val = 0;
+
+
+ p_ofdm_tx = (struct ant_sel_ofdm *)&r_ant_select_ofdm_val;
+ p_cck_txrx = (struct ant_sel_cck *)&r_ant_select_cck_val;
+
+ p_ofdm_tx->r_ant_ht1 = 0x1;
+ p_ofdm_tx->r_ant_ht2 = 0x2; /* Second TX RF path is A */
+ p_ofdm_tx->r_ant_non_ht = 0x3; /* 0x1+0x2=0x3 */
+
+ switch (pAdapter->mppriv.antenna_tx) {
+ case ANTENNA_A:
+ p_ofdm_tx->r_tx_antenna = 0x1;
+ r_ofdm_tx_en_val = 0x1;
+ p_ofdm_tx->r_ant_l = 0x1;
+ p_ofdm_tx->r_ant_ht_s1 = 0x1;
+ p_ofdm_tx->r_ant_non_ht_s1 = 0x1;
+ p_cck_txrx->r_ccktx_enable = 0x8;
+ chgTx = 1;
+
+ /* From SD3 Willis suggestion !!! Set RF A=TX and B as standby */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 2);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 1);
+ r_ofdm_tx_en_val = 0x3;
+
+ /* Power save */
+
+ /* We need to close RFB by SW control */
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XB_RFInterfaceOE, BIT10, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT1, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT17, 0);
+ }
+ break;
+ case ANTENNA_B:
+ p_ofdm_tx->r_tx_antenna = 0x2;
+ r_ofdm_tx_en_val = 0x2;
+ p_ofdm_tx->r_ant_l = 0x2;
+ p_ofdm_tx->r_ant_ht_s1 = 0x2;
+ p_ofdm_tx->r_ant_non_ht_s1 = 0x2;
+ p_cck_txrx->r_ccktx_enable = 0x4;
+ chgTx = 1;
+ /* From SD3 Willis suggestion !!! Set RF A as standby */
+ PHY_SetBBReg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 2);
+
+ /* Power save */
+ /* cosa r_ant_select_ofdm_val = 0x22222222; */
+
+ /* 2008/10/31 MH From SD3 Willi's suggestion. We must read RF 1T table. */
+ /* 2009/01/08 MH From Sd3 Willis. We need to close RFA by SW control */
+ if (pHalData->rf_type == RF_2T2R || pHalData->rf_type == RF_1T2R) {
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XA_RFInterfaceOE, BIT10, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT1, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT17, 1);
+ }
+ break;
+ case ANTENNA_AB: /* For 8192S */
+ p_ofdm_tx->r_tx_antenna = 0x3;
+ r_ofdm_tx_en_val = 0x3;
+ p_ofdm_tx->r_ant_l = 0x3;
+ p_ofdm_tx->r_ant_ht_s1 = 0x3;
+ p_ofdm_tx->r_ant_non_ht_s1 = 0x3;
+ p_cck_txrx->r_ccktx_enable = 0xC;
+ chgTx = 1;
+
+ /* From SD3 Willis suggestion !!! Set RF B as standby */
+ PHY_SetBBReg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 2);
+ PHY_SetBBReg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 2);
+
+ /* Disable Power save */
+ /* cosa r_ant_select_ofdm_val = 0x3321333; */
+ /* 2009/01/08 MH From Sd3 Willis. We need to enable RFA/B by SW control */
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT1, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT17, 1);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* r_rx_antenna_ofdm, bit0=A, bit1=B, bit2=C, bit3=D */
+ /* r_cckrx_enable : CCK default, 0=A, 1=B, 2=C, 3=D */
+ /* r_cckrx_enable_2 : CCK option, 0=A, 1=B, 2=C, 3=D */
+ switch (pAdapter->mppriv.antenna_rx) {
+ case ANTENNA_A:
+ r_rx_antenna_ofdm = 0x1; /* A */
+ p_cck_txrx->r_cckrx_enable = 0x0; /* default: A */
+ p_cck_txrx->r_cckrx_enable_2 = 0x0; /* option: A */
+ chgRx = 1;
+ break;
+ case ANTENNA_B:
+ r_rx_antenna_ofdm = 0x2; /* B */
+ p_cck_txrx->r_cckrx_enable = 0x1; /* default: B */
+ p_cck_txrx->r_cckrx_enable_2 = 0x1; /* option: B */
+ chgRx = 1;
+ break;
+ case ANTENNA_AB:
+ r_rx_antenna_ofdm = 0x3; /* AB */
+ p_cck_txrx->r_cckrx_enable = 0x0; /* default:A */
+ p_cck_txrx->r_cckrx_enable_2 = 0x1; /* option:B */
+ chgRx = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (chgTx && chgRx) {
+ switch (pHalData->rf_chip) {
+ case RF_8225:
+ case RF_8256:
+ case RF_6052:
+ /* r_ant_sel_cck_val = r_ant_select_cck_val; */
+ PHY_SetBBReg(pAdapter, rFPGA1_TxInfo, 0x7fffffff, r_ant_select_ofdm_val); /* OFDM Tx */
+ PHY_SetBBReg(pAdapter, rFPGA0_TxInfo, 0x0000000f, r_ofdm_tx_en_val); /* OFDM Tx */
+ PHY_SetBBReg(pAdapter, rOFDM0_TRxPathEnable, 0x0000000f, r_rx_antenna_ofdm); /* OFDM Rx */
+ PHY_SetBBReg(pAdapter, rOFDM1_TRxPathEnable, 0x0000000f, r_rx_antenna_ofdm); /* OFDM Rx */
+ PHY_SetBBReg(pAdapter, rCCK0_AFESetting, bMaskByte3, r_ant_select_cck_val); /* CCK TxRx */
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("-SwitchAntenna: finished\n"));
+}
+
+s32 Hal_SetThermalMeter(struct adapter *pAdapter, u8 target_ther)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+
+ if (!netif_running(pAdapter->pnetdev)) {
+ RT_TRACE(_module_mp_, _drv_warning_, ("SetThermalMeter! Fail: interface not opened!\n"));
+ return _FAIL;
+ }
+
+ if (check_fwstate(&pAdapter->mlmepriv, WIFI_MP_STATE) == false) {
+ RT_TRACE(_module_mp_, _drv_warning_, ("SetThermalMeter: Fail! not in MP mode!\n"));
+ return _FAIL;
+ }
+
+ target_ther &= 0xff;
+ if (target_ther < 0x07)
+ target_ther = 0x07;
+ else if (target_ther > 0x1d)
+ target_ther = 0x1d;
+
+ pHalData->EEPROMThermalMeter = target_ther;
+
+ return _SUCCESS;
+}
+
+void Hal_TriggerRFThermalMeter(struct adapter *pAdapter)
+{
+ _write_rfreg(pAdapter, RF_PATH_A , RF_T_METER_88E , BIT17 | BIT16 , 0x03);
+}
+
+u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter)
+{
+ u32 ThermalValue = 0;
+
+ ThermalValue = _read_rfreg(pAdapter, RF_PATH_A, RF_T_METER_88E, 0xfc00);
+ return (u8)ThermalValue;
+}
+
+void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value)
+{
+ Hal_TriggerRFThermalMeter(pAdapter);
+ rtw_msleep_os(1000);
+ *value = Hal_ReadRFThermalMeter(pAdapter);
+}
+
+void Hal_SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart)
+{
+ pAdapter->mppriv.MptCtx.bSingleCarrier = bStart;
+ if (bStart) {
+ /* Start Single Carrier. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleCarrierTx: test start\n"));
+ /* 1. if OFDM block on? */
+ if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn))
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, bEnable);/* set OFDM block on */
+
+ /* 2. set CCK test mode off, set to CCK normal mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, bDisable);
+ /* 3. turn on scramble setting */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable);
+ /* 4. Turn On Single Carrier Tx and turn off the other test modes. */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bEnable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+ } else {
+ /* Stop Single Carrier. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleCarrierTx: test stop\n"));
+
+ /* Turn off all test modes. */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+ rtw_msleep_os(10);
+
+ /* BB Reset */
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+}
+
+
+void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ bool is92C = IS_92C_SERIAL(pHalData->VersionID);
+
+ u8 rfPath;
+ u32 reg58 = 0x0;
+ switch (pAdapter->mppriv.antenna_tx) {
+ case ANTENNA_A:
+ default:
+ rfPath = RF_PATH_A;
+ break;
+ case ANTENNA_B:
+ rfPath = RF_PATH_B;
+ break;
+ case ANTENNA_C:
+ rfPath = RF_PATH_C;
+ break;
+ }
+
+ pAdapter->mppriv.MptCtx.bSingleTone = bStart;
+ if (bStart) {
+ /* Start Single Tone. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleToneTx: test start\n"));
+ /* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
+ if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
+ reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
+ reg58 &= 0xFFFFFFF0;
+ reg58 += 2;
+ PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
+ }
+ PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x0);
+ PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x0);
+
+ if (is92C) {
+ _write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x01);
+ rtw_usleep_os(100);
+ if (rfPath == RF_PATH_A)
+ write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x10000); /* PAD all on. */
+ else if (rfPath == RF_PATH_B)
+ write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x10000); /* PAD all on. */
+ write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
+ rtw_usleep_os(100);
+ } else {
+ write_rfreg(pAdapter, rfPath, 0x21, 0xd4000);
+ rtw_usleep_os(100);
+ write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
+ rtw_usleep_os(100);
+ }
+
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+
+ } else {
+ /* Stop Single Tone. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleToneTx: test stop\n"));
+
+ /* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
+ /* <20120326, Kordan> Only in single tone mode. (asked by Edlu) */
+ if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
+ reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
+ reg58 &= 0xFFFFFFF0;
+ PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
+ }
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x1);
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
+ if (is92C) {
+ _write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x00);
+ rtw_usleep_os(100);
+ write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x32d75); /* PAD all on. */
+ write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x32d75); /* PAD all on. */
+ rtw_usleep_os(100);
+ } else {
+ write_rfreg(pAdapter, rfPath, 0x21, 0x54000);
+ rtw_usleep_os(100);
+ write_rfreg(pAdapter, rfPath, 0x00, 0x30000); /* PAD all on. */
+ rtw_usleep_os(100);
+ }
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+}
+
+
+
+void Hal_SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart)
+{
+ pAdapter->mppriv.MptCtx.bCarrierSuppression = bStart;
+ if (bStart) {
+ /* Start Carrier Suppression. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetCarrierSuppressionTx: test start\n"));
+ if (pAdapter->mppriv.rateidx <= MPT_RATE_11M) {
+ /* 1. if CCK block on? */
+ if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn))
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, bEnable);/* set CCK block on */
+
+ /* Turn Off All Test Mode */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x2); /* transmit mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, 0x0); /* turn off scramble setting */
+
+ /* Set CCK Tx Test Rate */
+ write_bbreg(pAdapter, rCCK0_System, bCCKTxRate, 0x0); /* Set FTxRate to 1Mbps */
+ }
+
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+ } else {
+ /* Stop Carrier Suppression. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetCarrierSuppressionTx: test stop\n"));
+ if (pAdapter->mppriv.rateidx <= MPT_RATE_11M) {
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x0); /* normal mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, 0x1); /* turn on scramble setting */
+
+ /* BB Reset */
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
+ }
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+}
+
+void Hal_SetCCKContinuousTx(struct adapter *pAdapter, u8 bStart)
+{
+ u32 cckrate;
+
+ if (bStart) {
+ RT_TRACE(_module_mp_, _drv_alert_,
+ ("SetCCKContinuousTx: test start\n"));
+
+ /* 1. if CCK block on? */
+ if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn))
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, bEnable);/* set CCK block on */
+
+ /* Turn Off All Test Mode */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+ /* Set CCK Tx Test Rate */
+ cckrate = pAdapter->mppriv.rateidx;
+ write_bbreg(pAdapter, rCCK0_System, bCCKTxRate, cckrate);
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x2); /* transmit mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable); /* turn on scramble setting */
+
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+ } else {
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("SetCCKContinuousTx: test stop\n"));
+
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x0); /* normal mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable); /* turn on scramble setting */
+
+ /* BB Reset */
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+
+ pAdapter->mppriv.MptCtx.bCckContTx = bStart;
+ pAdapter->mppriv.MptCtx.bOfdmContTx = false;
+} /* mpt_StartCckContTx */
+
+void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart)
+{
+ if (bStart) {
+ RT_TRACE(_module_mp_, _drv_info_, ("SetOFDMContinuousTx: test start\n"));
+ /* 1. if OFDM block on? */
+ if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn))
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, bEnable);/* set OFDM block on */
+
+ /* 2. set CCK test mode off, set to CCK normal mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, bDisable);
+
+ /* 3. turn on scramble setting */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable);
+ /* 4. Turn On Continue Tx and turn off the other test modes. */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bEnable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+
+ } else {
+ RT_TRACE(_module_mp_, _drv_info_, ("SetOFDMContinuousTx: test stop\n"));
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+ /* Delay 10 ms */
+ rtw_msleep_os(10);
+ /* BB Reset */
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+
+ pAdapter->mppriv.MptCtx.bCckContTx = false;
+ pAdapter->mppriv.MptCtx.bOfdmContTx = bStart;
+} /* mpt_StartOfdmContTx */
+
+void Hal_SetContinuousTx(struct adapter *pAdapter, u8 bStart)
+{
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("SetContinuousTx: rate:%d\n", pAdapter->mppriv.rateidx));
+
+ pAdapter->mppriv.MptCtx.bStartContTx = bStart;
+ if (pAdapter->mppriv.rateidx <= MPT_RATE_11M)
+ Hal_SetCCKContinuousTx(pAdapter, bStart);
+ else if ((pAdapter->mppriv.rateidx >= MPT_RATE_6M) &&
+ (pAdapter->mppriv.rateidx <= MPT_RATE_MCS15))
+ Hal_SetOFDMContinuousTx(pAdapter, bStart);
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
new file mode 100644
index 0000000..ff468a68
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
@@ -0,0 +1,1144 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_PHYCFG_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_iol.h>
+#include <rtl8188e_hal.h>
+
+/*---------------------------Define Local Constant---------------------------*/
+/* Channel switch:The size of command tables for switch channel*/
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+/*---------------------------Define Local Constant---------------------------*/
+
+
+/*------------------------Define global variable-----------------------------*/
+
+/*------------------------Define local variable------------------------------*/
+
+
+/*--------------------Define export function prototype-----------------------*/
+/* Please refer to header file */
+/*--------------------Define export function prototype-----------------------*/
+
+/*----------------------------Function Body----------------------------------*/
+/* */
+/* 1. BB register R/W API */
+/* */
+
+/**
+* Function: phy_CalculateBitShift
+*
+* OverView: Get shifted position of the BitMask
+*
+* Input:
+* u32 BitMask,
+*
+* Output: none
+* Return: u32 Return the shift bit bit position of the mask
+*/
+static u32 phy_CalculateBitShift(u32 BitMask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((BitMask>>i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+
+/**
+* Function: PHY_QueryBBReg
+*
+* OverView: Read "sepcific bits" from BB register
+*
+* Input:
+* struct adapter *Adapter,
+* u32 RegAddr, The target address to be readback
+* u32 BitMask The target bit position in the target address
+* to be readback
+* Output: None
+* Return: u32 Data The readback register value
+* Note: This function is equal to "GetRegSetting" in PHY programming guide
+*/
+u32
+rtl8188e_PHY_QueryBBReg(
+ struct adapter *Adapter,
+ u32 RegAddr,
+ u32 BitMask
+ )
+{
+ u32 ReturnValue = 0, OriginalValue, BitShift;
+
+ OriginalValue = rtw_read32(Adapter, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ ReturnValue = (OriginalValue & BitMask) >> BitShift;
+ return ReturnValue;
+}
+
+
+/**
+* Function: PHY_SetBBReg
+*
+* OverView: Write "Specific bits" to BB register (page 8~)
+*
+* Input:
+* struct adapter *Adapter,
+* u32 RegAddr, The target address to be modified
+* u32 BitMask The target bit position in the target address
+* to be modified
+* u32 Data The new register value in the target bit position
+* of the target address
+*
+* Output: None
+* Return: None
+* Note: This function is equal to "PutRegSetting" in PHY programming guide
+*/
+
+void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ u32 OriginalValue, BitShift;
+
+ if (BitMask != bMaskDWord) { /* if not "double word" write */
+ OriginalValue = rtw_read32(Adapter, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ Data = ((OriginalValue & (~BitMask)) | (Data << BitShift));
+ }
+
+ rtw_write32(Adapter, RegAddr, Data);
+}
+
+
+/* */
+/* 2. RF register R/W API */
+/* */
+/**
+* Function: phy_RFSerialRead
+*
+* OverView: Read regster from RF chips
+*
+* Input:
+* struct adapter *Adapter,
+* enum rf_radio_path eRFPath, Radio path of A/B/C/D
+* u32 Offset, The target address to be read
+*
+* Output: None
+* Return: u32 reback value
+* Note: Threre are three types of serial operations:
+* 1. Software serial write
+* 2. Hardware LSSI-Low Speed Serial Interface
+* 3. Hardware HSSI-High speed
+* serial write. Driver need to implement (1) and (2).
+* This function is equal to the combination of RF_ReadReg() and RFLSSIRead()
+*/
+static u32
+phy_RFSerialRead(
+ struct adapter *Adapter,
+ enum rf_radio_path eRFPath,
+ u32 Offset
+ )
+{
+ u32 retValue = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
+ u32 NewOffset;
+ u32 tmplong, tmplong2;
+ u8 RfPiEnable = 0;
+ /* */
+ /* Make sure RF register offset is correct */
+ /* */
+ Offset &= 0xff;
+
+ /* */
+ /* Switch page for 8256 RF IC */
+ /* */
+ NewOffset = Offset;
+
+ /* For 92S LSSI Read RFLSSIRead */
+ /* For RF A/B write 0x824/82c(does not work in the future) */
+ /* We must use 0x824 for RF A and B to execute read trigger */
+ tmplong = PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord);
+ if (eRFPath == RF_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = PHY_QueryBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord);
+
+ tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset<<23) | bLSSIReadEdge; /* T65 RF */
+
+ PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord, tmplong&(~bLSSIReadEdge));
+ rtw_udelay_os(10);/* PlatformStallExecution(10); */
+
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
+ rtw_udelay_os(100);/* PlatformStallExecution(100); */
+
+ rtw_udelay_os(10);/* PlatformStallExecution(10); */
+
+ if (eRFPath == RF_PATH_A)
+ RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1, BIT8);
+ else if (eRFPath == RF_PATH_B)
+ RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XB_HSSIParameter1, BIT8);
+
+ if (RfPiEnable) { /* Read from BBreg8b8, 12 bits for 8190, 20bits for T65 RF */
+ retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi, bLSSIReadBackData);
+ } else { /* Read from BBreg8a0, 12 bits for 8190, 20 bits for T65 RF */
+ retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack, bLSSIReadBackData);
+ }
+ return retValue;
+}
+
+/**
+* Function: phy_RFSerialWrite
+*
+* OverView: Write data to RF register (page 8~)
+*
+* Input:
+* struct adapter *Adapter,
+* enum rf_radio_path eRFPath, Radio path of A/B/C/D
+* u32 Offset, The target address to be read
+* u32 Data The new register Data in the target bit position
+* of the target to be read
+*
+* Output: None
+* Return: None
+* Note: Threre are three types of serial operations:
+* 1. Software serial write
+* 2. Hardware LSSI-Low Speed Serial Interface
+* 3. Hardware HSSI-High speed
+* serial write. Driver need to implement (1) and (2).
+* This function is equal to the combination of RF_ReadReg() and RFLSSIRead()
+ *
+ * Note: For RF8256 only
+ * The total count of RTL8256(Zebra4) register is around 36 bit it only employs
+ * 4-bit RF address. RTL8256 uses "register mode control bit" (Reg00[12], Reg00[10])
+ * to access register address bigger than 0xf. See "Appendix-4 in PHY Configuration
+ * programming guide" for more details.
+ * Thus, we define a sub-finction for RTL8526 register address conversion
+ * ===========================================================
+ * Register Mode RegCTL[1] RegCTL[0] Note
+ * (Reg00[12]) (Reg00[10])
+ * ===========================================================
+ * Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf)
+ * ------------------------------------------------------------------
+ * Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf)
+ * ------------------------------------------------------------------
+ * Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
+ * ------------------------------------------------------------------
+ *
+ * 2008/09/02 MH Add 92S RF definition
+ *
+ *
+ *
+*/
+static void
+phy_RFSerialWrite(
+ struct adapter *Adapter,
+ enum rf_radio_path eRFPath,
+ u32 Offset,
+ u32 Data
+ )
+{
+ u32 DataAndAddr = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
+ u32 NewOffset;
+
+
+ /* 2009/06/17 MH We can not execute IO for power save or other accident mode. */
+
+ Offset &= 0xff;
+
+ /* */
+ /* Switch page for 8256 RF IC */
+ /* */
+ NewOffset = Offset;
+
+ /* */
+ /* Put write addr in [5:0] and write data in [31:16] */
+ /* */
+ DataAndAddr = ((NewOffset<<20) | (Data&0x000fffff)) & 0x0fffffff; /* T65 RF */
+
+ /* */
+ /* Write Operation */
+ /* */
+ PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
+}
+
+/**
+* Function: PHY_QueryRFReg
+*
+* OverView: Query "Specific bits" to RF register (page 8~)
+*
+* Input:
+* struct adapter *Adapter,
+* enum rf_radio_path eRFPath, Radio path of A/B/C/D
+* u32 RegAddr, The target address to be read
+* u32 BitMask The target bit position in the target address
+* to be read
+*
+* Output: None
+* Return: u32 Readback value
+* Note: This function is equal to "GetRFRegSetting" in PHY programming guide
+*/
+u32 rtl8188e_PHY_QueryRFReg(struct adapter *Adapter, enum rf_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask)
+{
+ u32 Original_Value, Readback_Value, BitShift;
+
+ Original_Value = phy_RFSerialRead(Adapter, eRFPath, RegAddr);
+
+ BitShift = phy_CalculateBitShift(BitMask);
+ Readback_Value = (Original_Value & BitMask) >> BitShift;
+ return Readback_Value;
+}
+
+/**
+* Function: PHY_SetRFReg
+*
+* OverView: Write "Specific bits" to RF register (page 8~)
+*
+* Input:
+* struct adapter *Adapter,
+* enum rf_radio_path eRFPath, Radio path of A/B/C/D
+* u32 RegAddr, The target address to be modified
+* u32 BitMask The target bit position in the target address
+* to be modified
+* u32 Data The new register Data in the target bit position
+* of the target address
+*
+* Output: None
+* Return: None
+* Note: This function is equal to "PutRFRegSetting" in PHY programming guide
+*/
+void
+rtl8188e_PHY_SetRFReg(
+ struct adapter *Adapter,
+ enum rf_radio_path eRFPath,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Data
+ )
+{
+ u32 Original_Value, BitShift;
+
+ /* RF data is 12 bits only */
+ if (BitMask != bRFRegOffsetMask) {
+ Original_Value = phy_RFSerialRead(Adapter, eRFPath, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ Data = ((Original_Value & (~BitMask)) | (Data << BitShift));
+ }
+
+ phy_RFSerialWrite(Adapter, eRFPath, RegAddr, Data);
+}
+
+/* */
+/* 3. Initial MAC/BB/RF config by reading MAC/BB/RF txt. */
+/* */
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_MACConfig8192C
+ *
+ * Overview: Condig MAC by header file or parameter file.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 08/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+s32 PHY_MACConfig8188E(struct adapter *Adapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+
+ /* */
+ /* Config MAC */
+ /* */
+ if (HAL_STATUS_FAILURE == ODM_ConfigMACWithHeaderFile(&pHalData->odmpriv))
+ rtStatus = _FAIL;
+
+ /* 2010.07.13 AMPDU aggregation number B */
+ rtw_write16(Adapter, REG_MAX_AGGR_NUM, MAX_AGGR_NUM);
+
+ return rtStatus;
+}
+
+/**
+* Function: phy_InitBBRFRegisterDefinition
+*
+* OverView: Initialize Register definition offset for Radio Path A/B/C/D
+*
+* Input:
+* struct adapter *Adapter,
+*
+* Output: None
+* Return: None
+* Note: The initialization value is constant and it should never be changes
+*/
+static void
+phy_InitBBRFRegisterDefinition(
+ struct adapter *Adapter
+)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ /* RF Interface Sowrtware Control */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 LSBs if read 32-bit from 0x870 */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
+ pHalData->PHYRegDef[RF_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;/* 16 LSBs if read 32-bit from 0x874 */
+ pHalData->PHYRegDef[RF_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;/* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
+
+ /* RF Interface Readback Value */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; /* 16 LSBs if read 32-bit from 0x8E0 */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;/* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
+ pHalData->PHYRegDef[RF_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;/* 16 LSBs if read 32-bit from 0x8E4 */
+ pHalData->PHYRegDef[RF_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;/* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
+
+ /* RF Interface Output (and Enable) */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; /* 16 LSBs if read 32-bit from 0x860 */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE; /* 16 LSBs if read 32-bit from 0x864 */
+
+ /* RF Interface (Output and) Enable */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE; /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE; /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
+
+ /* Addr of LSSI. Wirte RF register by driver */
+ pHalData->PHYRegDef[RF_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter; /* LSSI Parameter */
+ pHalData->PHYRegDef[RF_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
+
+ /* RF parameter */
+ pHalData->PHYRegDef[RF_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; /* BB Band Select */
+ pHalData->PHYRegDef[RF_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
+ pHalData->PHYRegDef[RF_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
+ pHalData->PHYRegDef[RF_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
+
+ /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
+ pHalData->PHYRegDef[RF_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
+ pHalData->PHYRegDef[RF_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
+ pHalData->PHYRegDef[RF_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
+ pHalData->PHYRegDef[RF_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
+
+ /* Tranceiver A~D HSSI Parameter-1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; /* wire control parameter1 */
+ pHalData->PHYRegDef[RF_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1; /* wire control parameter1 */
+
+ /* Tranceiver A~D HSSI Parameter-2 */
+ pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; /* wire control parameter2 */
+ pHalData->PHYRegDef[RF_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; /* wire control parameter2 */
+
+ /* RF switch Control */
+ pHalData->PHYRegDef[RF_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; /* TR/Ant switch control */
+ pHalData->PHYRegDef[RF_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
+ pHalData->PHYRegDef[RF_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
+ pHalData->PHYRegDef[RF_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
+
+ /* AGC control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
+ pHalData->PHYRegDef[RF_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
+ pHalData->PHYRegDef[RF_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
+ pHalData->PHYRegDef[RF_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
+
+ /* AGC control 2 */
+ pHalData->PHYRegDef[RF_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
+ pHalData->PHYRegDef[RF_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
+ pHalData->PHYRegDef[RF_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
+ pHalData->PHYRegDef[RF_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
+
+ /* RX AFE control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
+
+ /* RX AFE control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
+ pHalData->PHYRegDef[RF_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
+ pHalData->PHYRegDef[RF_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
+ pHalData->PHYRegDef[RF_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
+
+ /* Tx AFE control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
+
+ /* Tx AFE control 2 */
+ pHalData->PHYRegDef[RF_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
+ pHalData->PHYRegDef[RF_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
+ pHalData->PHYRegDef[RF_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
+ pHalData->PHYRegDef[RF_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
+
+ /* Tranceiver LSSI Readback SI mode */
+ pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
+ pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
+ pHalData->PHYRegDef[RF_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
+ pHalData->PHYRegDef[RF_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
+
+ /* Tranceiver LSSI Readback PI mode */
+ pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
+ pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBackPi = TransceiverB_HSPI_Readback;
+}
+
+void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ if (RegAddr == rTxAGC_A_Rate18_06)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][0] = Data;
+ if (RegAddr == rTxAGC_A_Rate54_24)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][1] = Data;
+ if (RegAddr == rTxAGC_A_CCK1_Mcs32)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][6] = Data;
+ if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0xffffff00)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][7] = Data;
+ if (RegAddr == rTxAGC_A_Mcs03_Mcs00)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][2] = Data;
+ if (RegAddr == rTxAGC_A_Mcs07_Mcs04)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][3] = Data;
+ if (RegAddr == rTxAGC_A_Mcs11_Mcs08)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][4] = Data;
+ if (RegAddr == rTxAGC_A_Mcs15_Mcs12) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][5] = Data;
+ if (pHalData->rf_type == RF_1T1R)
+ pHalData->pwrGroupCnt++;
+ }
+ if (RegAddr == rTxAGC_B_Rate18_06)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][8] = Data;
+ if (RegAddr == rTxAGC_B_Rate54_24)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][9] = Data;
+ if (RegAddr == rTxAGC_B_CCK1_55_Mcs32)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][14] = Data;
+ if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0x000000ff)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][15] = Data;
+ if (RegAddr == rTxAGC_B_Mcs03_Mcs00)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][10] = Data;
+ if (RegAddr == rTxAGC_B_Mcs07_Mcs04)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][11] = Data;
+ if (RegAddr == rTxAGC_B_Mcs11_Mcs08)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][12] = Data;
+ if (RegAddr == rTxAGC_B_Mcs15_Mcs12) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][13] = Data;
+ if (pHalData->rf_type != RF_1T1R)
+ pHalData->pwrGroupCnt++;
+ }
+}
+
+static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(Adapter);
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+
+ /* */
+ /* 1. Read PHY_REG.TXT BB INIT!! */
+ /* We will seperate as 88C / 92C according to chip version */
+ /* */
+ if (HAL_STATUS_FAILURE == ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG))
+ rtStatus = _FAIL;
+ if (rtStatus != _SUCCESS)
+ goto phy_BB8190_Config_ParaFile_Fail;
+
+ /* 2. If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
+ if (!pEEPROM->bautoload_fail_flag) {
+ pHalData->pwrGroupCnt = 0;
+
+ if (HAL_STATUS_FAILURE == ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG_PG))
+ rtStatus = _FAIL;
+ }
+
+ if (rtStatus != _SUCCESS)
+ goto phy_BB8190_Config_ParaFile_Fail;
+
+ /* 3. BB AGC table Initialization */
+ if (HAL_STATUS_FAILURE == ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB))
+ rtStatus = _FAIL;
+
+ if (rtStatus != _SUCCESS)
+ goto phy_BB8190_Config_ParaFile_Fail;
+
+phy_BB8190_Config_ParaFile_Fail:
+
+ return rtStatus;
+}
+
+int
+PHY_BBConfig8188E(
+ struct adapter *Adapter
+ )
+{
+ int rtStatus = _SUCCESS;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u32 RegVal;
+ u8 CrystalCap;
+
+ phy_InitBBRFRegisterDefinition(Adapter);
+
+
+ /* Enable BB and RF */
+ RegVal = rtw_read16(Adapter, REG_SYS_FUNC_EN);
+ rtw_write16(Adapter, REG_SYS_FUNC_EN, (u16)(RegVal|BIT13|BIT0|BIT1));
+
+ /* 20090923 Joseph: Advised by Steven and Jenyu. Power sequence before init RF. */
+
+ rtw_write8(Adapter, REG_RF_CTRL, RF_EN|RF_RSTB|RF_SDMRSTB);
+
+ rtw_write8(Adapter, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD | FEN_BB_GLB_RSTn | FEN_BBRSTB);
+
+ /* Config BB and AGC */
+ rtStatus = phy_BB8188E_Config_ParaFile(Adapter);
+
+ /* write 0x24[16:11] = 0x24[22:17] = CrystalCap */
+ CrystalCap = pHalData->CrystalCap & 0x3F;
+ PHY_SetBBReg(Adapter, REG_AFE_XTAL_CTRL, 0x7ff800, (CrystalCap | (CrystalCap << 6)));
+
+ return rtStatus;
+}
+
+int PHY_RFConfig8188E(struct adapter *Adapter)
+{
+ int rtStatus = _SUCCESS;
+
+ /* RF config */
+ rtStatus = PHY_RF6052_Config8188E(Adapter);
+ return rtStatus;
+}
+
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_ConfigRFWithParaFile()
+ *
+ * Overview: This function read RF parameters from general file format, and do RF 3-wire
+ *
+ * Input: struct adapter *Adapter
+ * ps8 pFileName
+ * enum rf_radio_path eRFPath
+ *
+ * Output: NONE
+ *
+ * Return: RT_STATUS_SUCCESS: configuration file exist
+ *
+ * Note: Delay may be required for RF configuration
+ *---------------------------------------------------------------------------*/
+int rtl8188e_PHY_ConfigRFWithParaFile(struct adapter *Adapter, u8 *pFileName, enum rf_radio_path eRFPath)
+{
+ return _SUCCESS;
+}
+
+void
+rtl8192c_PHY_GetHWRegOriginalValue(
+ struct adapter *Adapter
+ )
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ /* read rx initial gain */
+ pHalData->DefaultInitialGain[0] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XAAGCCore1, bMaskByte0);
+ pHalData->DefaultInitialGain[1] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XBAGCCore1, bMaskByte0);
+ pHalData->DefaultInitialGain[2] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XCAGCCore1, bMaskByte0);
+ pHalData->DefaultInitialGain[3] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XDAGCCore1, bMaskByte0);
+
+ /* read framesync */
+ pHalData->framesync = (u8)PHY_QueryBBReg(Adapter, rOFDM0_RxDetector3, bMaskByte0);
+ pHalData->framesyncC34 = PHY_QueryBBReg(Adapter, rOFDM0_RxDetector2, bMaskDWord);
+}
+
+/* */
+/* Description: */
+/* Map dBm into Tx power index according to */
+/* current HW model, for example, RF and PA, and */
+/* current wireless mode. */
+/* By Bruce, 2008-01-29. */
+/* */
+static u8 phy_DbmToTxPwrIdx(struct adapter *Adapter, enum wireless_mode WirelessMode, int PowerInDbm)
+{
+ u8 TxPwrIdx = 0;
+ int Offset = 0;
+
+
+ /* */
+ /* Tested by MP, we found that CCK Index 0 equals to 8dbm, OFDM legacy equals to */
+ /* 3dbm, and OFDM HT equals to 0dbm repectively. */
+ /* Note: */
+ /* The mapping may be different by different NICs. Do not use this formula for what needs accurate result. */
+ /* By Bruce, 2008-01-29. */
+ /* */
+ switch (WirelessMode) {
+ case WIRELESS_MODE_B:
+ Offset = -7;
+ break;
+
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ default:
+ Offset = -8;
+ break;
+ }
+
+ if ((PowerInDbm - Offset) > 0)
+ TxPwrIdx = (u8)((PowerInDbm - Offset) * 2);
+ else
+ TxPwrIdx = 0;
+
+ /* Tx Power Index is too large. */
+ if (TxPwrIdx > MAX_TXPWR_IDX_NMODE_92S)
+ TxPwrIdx = MAX_TXPWR_IDX_NMODE_92S;
+
+ return TxPwrIdx;
+}
+
+/* */
+/* Description: */
+/* Map Tx power index into dBm according to */
+/* current HW model, for example, RF and PA, and */
+/* current wireless mode. */
+/* By Bruce, 2008-01-29. */
+/* */
+static int phy_TxPwrIdxToDbm(struct adapter *Adapter, enum wireless_mode WirelessMode, u8 TxPwrIdx)
+{
+ int Offset = 0;
+ int PwrOutDbm = 0;
+
+ /* */
+ /* Tested by MP, we found that CCK Index 0 equals to -7dbm, OFDM legacy equals to -8dbm. */
+ /* Note: */
+ /* The mapping may be different by different NICs. Do not use this formula for what needs accurate result. */
+ /* By Bruce, 2008-01-29. */
+ /* */
+ switch (WirelessMode) {
+ case WIRELESS_MODE_B:
+ Offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ default:
+ Offset = -8;
+ break;
+ }
+
+ PwrOutDbm = TxPwrIdx / 2 + Offset; /* Discard the decimal part. */
+
+ return PwrOutDbm;
+}
+
+
+/*-----------------------------------------------------------------------------
+ * Function: GetTxPowerLevel8190()
+ *
+ * Overview: This function is export to "common" moudule
+ *
+ * Input: struct adapter *Adapter
+ * psByte Power Level
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ *---------------------------------------------------------------------------*/
+void PHY_GetTxPowerLevel8188E(struct adapter *Adapter, u32 *powerlevel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 TxPwrLevel = 0;
+ int TxPwrDbm;
+
+ /* */
+ /* Because the Tx power indexes are different, we report the maximum of them to */
+ /* meet the CCX TPC request. By Bruce, 2008-01-31. */
+ /* */
+
+ /* CCK */
+ TxPwrLevel = pHalData->CurrentCckTxPwrIdx;
+ TxPwrDbm = phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_B, TxPwrLevel);
+
+ /* Legacy OFDM */
+ TxPwrLevel = pHalData->CurrentOfdm24GTxPwrIdx + pHalData->LegacyHTTxPowerDiff;
+
+ /* Compare with Legacy OFDM Tx power. */
+ if (phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_G, TxPwrLevel) > TxPwrDbm)
+ TxPwrDbm = phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_G, TxPwrLevel);
+
+ /* HT OFDM */
+ TxPwrLevel = pHalData->CurrentOfdm24GTxPwrIdx;
+
+ /* Compare with HT OFDM Tx power. */
+ if (phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_N_24G, TxPwrLevel) > TxPwrDbm)
+ TxPwrDbm = phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_N_24G, TxPwrLevel);
+
+ *powerlevel = TxPwrDbm;
+}
+
+static void getTxPowerIndex88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
+ u8 *ofdmPowerLevel, u8 *BW20PowerLevel,
+ u8 *BW40PowerLevel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 index = (channel - 1);
+ u8 TxCount = 0, path_nums;
+
+ if ((RF_1T2R == pHalData->rf_type) || (RF_1T1R == pHalData->rf_type))
+ path_nums = 1;
+ else
+ path_nums = 2;
+
+ for (TxCount = 0; TxCount < path_nums; TxCount++) {
+ if (TxCount == RF_PATH_A) {
+ /* 1. CCK */
+ cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
+ /* 2. OFDM */
+ ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->OFDM_24G_Diff[TxCount][RF_PATH_A];
+ /* 1. BW20 */
+ BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[TxCount][RF_PATH_A];
+ /* 2. BW40 */
+ BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
+ } else if (TxCount == RF_PATH_B) {
+ /* 1. CCK */
+ cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
+ /* 2. OFDM */
+ ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+ /* 1. BW20 */
+ BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[TxCount][RF_PATH_A]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+ /* 2. BW40 */
+ BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
+ } else if (TxCount == RF_PATH_C) {
+ /* 1. CCK */
+ cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
+ /* 2. OFDM */
+ ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_B][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+ /* 1. BW20 */
+ BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_B][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+ /* 2. BW40 */
+ BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
+ } else if (TxCount == RF_PATH_D) {
+ /* 1. CCK */
+ cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
+ /* 2. OFDM */
+ ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_B][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_C][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+
+ /* 1. BW20 */
+ BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_B][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_C][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+
+ /* 2. BW40 */
+ BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
+ }
+ }
+}
+
+static void phy_PowerIndexCheck88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
+ u8 *ofdmPowerLevel, u8 *BW20PowerLevel, u8 *BW40PowerLevel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ pHalData->CurrentCckTxPwrIdx = cckPowerLevel[0];
+ pHalData->CurrentOfdm24GTxPwrIdx = ofdmPowerLevel[0];
+ pHalData->CurrentBW2024GTxPwrIdx = BW20PowerLevel[0];
+ pHalData->CurrentBW4024GTxPwrIdx = BW40PowerLevel[0];
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: SetTxPowerLevel8190()
+ *
+ * Overview: This function is export to "HalCommon" moudule
+ * We must consider RF path later!!!!!!!
+ *
+ * Input: struct adapter *Adapter
+ * u8 channel
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ * 2008/11/04 MHC We remove EEPROM_93C56.
+ * We need to move CCX relative code to independet file.
+ * 2009/01/21 MHC Support new EEPROM format from SD3 requirement.
+ *
+ *---------------------------------------------------------------------------*/
+void
+PHY_SetTxPowerLevel8188E(
+ struct adapter *Adapter,
+ u8 channel
+ )
+{
+ u8 cckPowerLevel[MAX_TX_COUNT] = {0};
+ u8 ofdmPowerLevel[MAX_TX_COUNT] = {0};/* [0]:RF-A, [1]:RF-B */
+ u8 BW20PowerLevel[MAX_TX_COUNT] = {0};
+ u8 BW40PowerLevel[MAX_TX_COUNT] = {0};
+
+ getTxPowerIndex88E(Adapter, channel, &cckPowerLevel[0], &ofdmPowerLevel[0], &BW20PowerLevel[0], &BW40PowerLevel[0]);
+
+ phy_PowerIndexCheck88E(Adapter, channel, &cckPowerLevel[0], &ofdmPowerLevel[0], &BW20PowerLevel[0], &BW40PowerLevel[0]);
+
+ rtl8188e_PHY_RF6052SetCckTxPower(Adapter, &cckPowerLevel[0]);
+ rtl8188e_PHY_RF6052SetOFDMTxPower(Adapter, &ofdmPowerLevel[0], &BW20PowerLevel[0], &BW40PowerLevel[0], channel);
+}
+
+/* */
+/* Description: */
+/* Update transmit power level of all channel supported. */
+/* */
+/* TODO: */
+/* A mode. */
+/* By Bruce, 2008-02-04. */
+/* */
+bool
+PHY_UpdateTxPowerDbm8188E(
+ struct adapter *Adapter,
+ int powerInDbm
+ )
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 idx;
+ u8 rf_path;
+
+ /* TODO: A mode Tx power. */
+ u8 CckTxPwrIdx = phy_DbmToTxPwrIdx(Adapter, WIRELESS_MODE_B, powerInDbm);
+ u8 OfdmTxPwrIdx = phy_DbmToTxPwrIdx(Adapter, WIRELESS_MODE_N_24G, powerInDbm);
+
+ if (OfdmTxPwrIdx - pHalData->LegacyHTTxPowerDiff > 0)
+ OfdmTxPwrIdx -= pHalData->LegacyHTTxPowerDiff;
+ else
+ OfdmTxPwrIdx = 0;
+
+ for (idx = 0; idx < 14; idx++) {
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ pHalData->TxPwrLevelCck[rf_path][idx] = CckTxPwrIdx;
+ pHalData->TxPwrLevelHT40_1S[rf_path][idx] =
+ pHalData->TxPwrLevelHT40_2S[rf_path][idx] = OfdmTxPwrIdx;
+ }
+ }
+ return true;
+}
+
+void
+PHY_ScanOperationBackup8188E(
+ struct adapter *Adapter,
+ u8 Operation
+ )
+{
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_SetBWModeCallback8192C()
+ *
+ * Overview: Timer callback function for SetSetBWMode
+ *
+ * Input: PRT_TIMER pTimer
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note: (1) We do not take j mode into consideration now
+ * (2) Will two workitem of "switch channel" and "switch channel bandwidth" run
+ * concurrently?
+ *---------------------------------------------------------------------------*/
+static void
+_PHY_SetBWMode92C(
+ struct adapter *Adapter
+)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 regBwOpMode;
+ u8 regRRSR_RSC;
+
+ if (pHalData->rf_chip == RF_PSEUDO_11N)
+ return;
+
+ /* There is no 40MHz mode in RF_8225. */
+ if (pHalData->rf_chip == RF_8225)
+ return;
+
+ if (Adapter->bDriverStopped)
+ return;
+
+ /* 3 */
+ /* 3<1>Set MAC register */
+ /* 3 */
+
+ regBwOpMode = rtw_read8(Adapter, REG_BWOPMODE);
+ regRRSR_RSC = rtw_read8(Adapter, REG_RRSR+2);
+
+ switch (pHalData->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ regBwOpMode |= BW_OPMODE_20MHZ;
+ /* 2007/02/07 Mark by Emily becasue we have not verify whether this register works */
+ rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
+ break;
+ case HT_CHANNEL_WIDTH_40:
+ regBwOpMode &= ~BW_OPMODE_20MHZ;
+ /* 2007/02/07 Mark by Emily becasue we have not verify whether this register works */
+ rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
+ regRRSR_RSC = (regRRSR_RSC&0x90) | (pHalData->nCur40MhzPrimeSC<<5);
+ rtw_write8(Adapter, REG_RRSR+2, regRRSR_RSC);
+ break;
+ default:
+ break;
+ }
+
+ /* 3 */
+ /* 3 <2>Set PHY related register */
+ /* 3 */
+ switch (pHalData->CurrentChannelBW) {
+ /* 20 MHz channel*/
+ case HT_CHANNEL_WIDTH_20:
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
+ PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
+ break;
+ /* 40 MHz channel*/
+ case HT_CHANNEL_WIDTH_40:
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
+ PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
+ /* Set Control channel to upper or lower. These settings are required only for 40MHz */
+ PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand, (pHalData->nCur40MhzPrimeSC>>1));
+ PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00, pHalData->nCur40MhzPrimeSC);
+ PHY_SetBBReg(Adapter, 0x818, (BIT26 | BIT27),
+ (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+ break;
+ default:
+ break;
+ }
+ /* Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315 */
+
+ /* 3<3>Set RF related register */
+ switch (pHalData->rf_chip) {
+ case RF_8225:
+ break;
+ case RF_8256:
+ /* Please implement this function in Hal8190PciPhy8256.c */
+ break;
+ case RF_8258:
+ /* Please implement this function in Hal8190PciPhy8258.c */
+ break;
+ case RF_PSEUDO_11N:
+ break;
+ case RF_6052:
+ rtl8188e_PHY_RF6052SetBandwidth(Adapter, pHalData->CurrentChannelBW);
+ break;
+ default:
+ break;
+ }
+}
+
+ /*-----------------------------------------------------------------------------
+ * Function: SetBWMode8190Pci()
+ *
+ * Overview: This function is export to "HalCommon" moudule
+ *
+ * Input: struct adapter *Adapter
+ * enum ht_channel_width Bandwidth 20M or 40M
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note: We do not take j mode into consideration now
+ *---------------------------------------------------------------------------*/
+void PHY_SetBWMode8188E(struct adapter *Adapter, enum ht_channel_width Bandwidth, /* 20M or 40M */
+ unsigned char Offset) /* Upper, Lower, or Don't care */
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ enum ht_channel_width tmpBW = pHalData->CurrentChannelBW;
+
+ pHalData->CurrentChannelBW = Bandwidth;
+
+ pHalData->nCur40MhzPrimeSC = Offset;
+
+ if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved))
+ _PHY_SetBWMode92C(Adapter);
+ else
+ pHalData->CurrentChannelBW = tmpBW;
+}
+
+static void _PHY_SwChnl8192C(struct adapter *Adapter, u8 channel)
+{
+ u8 eRFPath;
+ u32 param1, param2;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ if (Adapter->bNotifyChannelChange)
+ DBG_88E("[%s] ch = %d\n", __func__, channel);
+
+ /* s1. pre common command - CmdID_SetTxPowerLevel */
+ PHY_SetTxPowerLevel8188E(Adapter, channel);
+
+ /* s2. RF dependent command - CmdID_RF_WriteReg, param1=RF_CHNLBW, param2=channel */
+ param1 = RF_CHNLBW;
+ param2 = channel;
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
+ pHalData->RfRegChnlVal[eRFPath] = ((pHalData->RfRegChnlVal[eRFPath] & 0xfffffc00) | param2);
+ PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, param1, bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
+ }
+}
+
+void PHY_SwChnl8188E(struct adapter *Adapter, u8 channel)
+{
+ /* Call after initialization */
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 tmpchannel = pHalData->CurrentChannel;
+ bool bResult = true;
+
+ if (pHalData->rf_chip == RF_PSEUDO_11N)
+ return; /* return immediately if it is peudo-phy */
+
+ if (channel == 0)
+ channel = 1;
+
+ pHalData->CurrentChannel = channel;
+
+ if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved)) {
+ _PHY_SwChnl8192C(Adapter, channel);
+
+ if (bResult)
+ ;
+ else
+ pHalData->CurrentChannel = tmpchannel;
+
+ } else {
+ pHalData->CurrentChannel = tmpchannel;
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
new file mode 100644
index 0000000..bfdf9b3
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
@@ -0,0 +1,572 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/******************************************************************************
+ *
+ *
+ * Module: rtl8192c_rf6052.c ( Source C File)
+ *
+ * Note: Provide RF 6052 series relative API.
+ *
+ * Function:
+ *
+ * Export:
+ *
+ * Abbrev:
+ *
+ * History:
+ * Data Who Remark
+ *
+ * 09/25/2008 MHC Create initial version.
+ * 11/05/2008 MHC Add API for tw power setting.
+ *
+ *
+******************************************************************************/
+
+#define _RTL8188E_RF6052_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <rtl8188e_hal.h>
+
+/*---------------------------Define Local Constant---------------------------*/
+/* Define local structure for debug!!!!! */
+struct rf_shadow {
+ /* Shadow register value */
+ u32 Value;
+ /* Compare or not flag */
+ u8 Compare;
+ /* Record If it had ever modified unpredicted */
+ u8 ErrorOrNot;
+ /* Recorver Flag */
+ u8 Recorver;
+ /* */
+ u8 Driver_Write;
+};
+
+/*---------------------------Define Local Constant---------------------------*/
+
+
+/*------------------------Define global variable-----------------------------*/
+
+/*------------------------Define local variable------------------------------*/
+
+/*-----------------------------------------------------------------------------
+ * Function: RF_ChangeTxPath
+ *
+ * Overview: For RL6052, we must change some RF settign for 1T or 2T.
+ *
+ * Input: u16 DataRate 0x80-8f, 0x90-9f
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 09/25/2008 MHC Create Version 0.
+ * Firmwaer support the utility later.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8188e_RF_ChangeTxPath(struct adapter *Adapter, u16 DataRate)
+{
+/* We do not support gain table change inACUT now !!!! Delete later !!! */
+} /* RF_ChangeTxPath */
+
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetBandwidth()
+ *
+ * Overview: This function is called by SetBWModeCallback8190Pci() only
+ *
+ * Input: struct adapter *Adapter
+ * WIRELESS_BANDWIDTH_E Bandwidth 20M or 40M
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note: For RF type 0222D
+ *---------------------------------------------------------------------------*/
+void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
+ enum ht_channel_width Bandwidth)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ switch (Bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT(10) | BIT(11));
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ break;
+ case HT_CHANNEL_WIDTH_40:
+ pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT(10));
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ break;
+ default:
+ break;
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetCckTxPower
+ *
+ * Overview:
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/05/2008 MHC Simulate 8192series..
+ *
+ *---------------------------------------------------------------------------*/
+
+void
+rtl8188e_PHY_RF6052SetCckTxPower(
+ struct adapter *Adapter,
+ u8 *pPowerlevel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ u32 TxAGC[2] = {0, 0}, tmpval = 0, pwrtrac_value;
+ bool TurboScanOff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+ u8 direction;
+ /* FOR CE ,must disable turbo scan */
+ TurboScanOff = true;
+
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ TxAGC[RF_PATH_A] = 0x3f3f3f3f;
+ TxAGC[RF_PATH_B] = 0x3f3f3f3f;
+
+ TurboScanOff = true;/* disable turbo scan */
+
+ if (TurboScanOff) {
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ TxAGC[idx1] =
+ pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
+ (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
+ /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */
+ if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
+ TxAGC[idx1] = 0x20;
+ }
+ }
+ } else {
+ /* Driver dynamic Tx power shall not affect Tx power.
+ * It shall be determined by power training mechanism.
+i * Currently, we cannot fully disable driver dynamic
+ * tx power mechanism because it is referenced by BT
+ * coexist mechanism.
+ * In the future, two mechanism shall be separated from
+ * each other and maintained independantly. */
+ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
+ TxAGC[RF_PATH_A] = 0x10101010;
+ TxAGC[RF_PATH_B] = 0x10101010;
+ } else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2) {
+ TxAGC[RF_PATH_A] = 0x00000000;
+ TxAGC[RF_PATH_B] = 0x00000000;
+ } else {
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ TxAGC[idx1] =
+ pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
+ (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
+ }
+ if (pHalData->EEPROMRegulatory == 0) {
+ tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][6]) +
+ (pHalData->MCSTxPowerLevelOriginalOffset[0][7]<<8);
+ TxAGC[RF_PATH_A] += tmpval;
+
+ tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][14]) +
+ (pHalData->MCSTxPowerLevelOriginalOffset[0][15]<<24);
+ TxAGC[RF_PATH_B] += tmpval;
+ }
+ }
+ }
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ ptr = (u8 *)(&(TxAGC[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+ ODM_TxPwrTrackAdjust88E(&pHalData->odmpriv, 1, &direction, &pwrtrac_value);
+
+ if (direction == 1) {
+ /* Increase TX pwoer */
+ TxAGC[0] += pwrtrac_value;
+ TxAGC[1] += pwrtrac_value;
+ } else if (direction == 2) {
+ /* Decrease TX pwoer */
+ TxAGC[0] -= pwrtrac_value;
+ TxAGC[1] -= pwrtrac_value;
+ }
+
+ /* rf-A cck tx power */
+ tmpval = TxAGC[RF_PATH_A]&0xff;
+ PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
+ tmpval = TxAGC[RF_PATH_A]>>8;
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+ /* rf-B cck tx power */
+ tmpval = TxAGC[RF_PATH_B]>>24;
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
+ tmpval = TxAGC[RF_PATH_B]&0x00ffffff;
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
+} /* PHY_RF6052SetCckTxPower */
+
+/* */
+/* powerbase0 for OFDM rates */
+/* powerbase1 for HT MCS rates */
+/* */
+static void getpowerbase88e(struct adapter *Adapter, u8 *pPowerLevelOFDM,
+ u8 *pPowerLevelBW20, u8 *pPowerLevelBW40, u8 Channel, u32 *OfdmBase, u32 *MCSBase)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u32 powerBase0, powerBase1;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerBase0 = pPowerLevelOFDM[i];
+
+ powerBase0 = (powerBase0<<24) | (powerBase0<<16) | (powerBase0<<8) | powerBase0;
+ *(OfdmBase+i) = powerBase0;
+ }
+ for (i = 0; i < pHalData->NumTotalRFPath; i++) {
+ /* Check HT20 to HT40 diff */
+ if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ powerlevel[i] = pPowerLevelBW20[i];
+ else
+ powerlevel[i] = pPowerLevelBW40[i];
+ powerBase1 = powerlevel[i];
+ powerBase1 = (powerBase1<<24) | (powerBase1<<16) | (powerBase1<<8) | powerBase1;
+ *(MCSBase+i) = powerBase1;
+ }
+}
+static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
+ u8 index, u32 *powerBase0, u32 *powerBase1,
+ u32 *pOutWriteVal)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ u8 i, chnlGroup = 0, pwr_diff_limit[4], customer_pwr_limit;
+ s8 pwr_diff = 0;
+ u32 writeVal, customer_limit, rf;
+ u8 Regulatory = pHalData->EEPROMRegulatory;
+
+ /* Index 0 & 1= legacy OFDM, 2-5=HT_MCS rate */
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (Regulatory) {
+ case 0: /* Realtek better performance */
+ /* increase power diff defined by Realtek for large power */
+ chnlGroup = 0;
+ writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ case 1: /* Realtek regulatory */
+ /* increase power diff defined by Realtek for regulatory */
+ if (pHalData->pwrGroupCnt == 1)
+ chnlGroup = 0;
+ if (pHalData->pwrGroupCnt >= pHalData->PGMaxGroup) {
+ if (Channel < 3) /* Chanel 1-2 */
+ chnlGroup = 0;
+ else if (Channel < 6) /* Channel 3-5 */
+ chnlGroup = 1;
+ else if (Channel < 9) /* Channel 6-8 */
+ chnlGroup = 2;
+ else if (Channel < 12) /* Channel 9-11 */
+ chnlGroup = 3;
+ else if (Channel < 14) /* Channel 12-13 */
+ chnlGroup = 4;
+ else if (Channel == 14) /* Channel 14 */
+ chnlGroup = 5;
+ }
+ writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ case 2: /* Better regulatory */
+ /* don't increase any power diff */
+ writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ case 3: /* Customer defined power diff. */
+ /* increase power diff defined by customer. */
+ chnlGroup = 0;
+
+ if (index < 2)
+ pwr_diff = pHalData->TxPwrLegacyHtDiff[rf][Channel-1];
+ else if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ pwr_diff = pHalData->TxPwrHt20Diff[rf][Channel-1];
+
+ if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_40)
+ customer_pwr_limit = pHalData->PwrGroupHT40[rf][Channel-1];
+ else
+ customer_pwr_limit = pHalData->PwrGroupHT20[rf][Channel-1];
+
+ if (pwr_diff >= customer_pwr_limit)
+ pwr_diff = 0;
+ else
+ pwr_diff = customer_pwr_limit - pwr_diff;
+
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] = (u8)((pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)]&(0x7f<<(i*8)))>>(i*8));
+
+ if (pwr_diff_limit[i] > pwr_diff)
+ pwr_diff_limit[i] = pwr_diff;
+ }
+ customer_limit = (pwr_diff_limit[3]<<24) | (pwr_diff_limit[2]<<16) |
+ (pwr_diff_limit[1]<<8) | (pwr_diff_limit[0]);
+ writeVal = customer_limit + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ default:
+ chnlGroup = 0;
+ writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ }
+/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
+/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
+/* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */
+ /* 92d do not need this */
+ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1)
+ writeVal = 0x14141414;
+ else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2)
+ writeVal = 0x00000000;
+
+ /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */
+ /* This mechanism is only applied when Driver-Highpower-Mechanism is OFF. */
+ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT1)
+ writeVal = writeVal - 0x06060606;
+ else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT2)
+ writeVal = writeVal;
+ *(pOutWriteVal+rf) = writeVal;
+ }
+}
+static void writeOFDMPowerReg88E(struct adapter *Adapter, u8 index, u32 *pValue)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u16 regoffset_a[6] = {
+ rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24,
+ rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04,
+ rTxAGC_A_Mcs11_Mcs08, rTxAGC_A_Mcs15_Mcs12};
+ u16 regoffset_b[6] = {
+ rTxAGC_B_Rate18_06, rTxAGC_B_Rate54_24,
+ rTxAGC_B_Mcs03_Mcs00, rTxAGC_B_Mcs07_Mcs04,
+ rTxAGC_B_Mcs11_Mcs08, rTxAGC_B_Mcs15_Mcs12};
+ u8 i, rf, pwr_val[4];
+ u32 writeVal;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeVal = pValue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8)((writeVal & (0x7f<<(i*8)))>>(i*8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeVal = (pwr_val[3]<<24) | (pwr_val[2]<<16) | (pwr_val[1]<<8) | pwr_val[0];
+
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+
+ PHY_SetBBReg(Adapter, regoffset, bMaskDWord, writeVal);
+
+ /* 201005115 Joseph: Set Tx Power diff for Tx power training mechanism. */
+ if (((pHalData->rf_type == RF_2T2R) &&
+ (regoffset == rTxAGC_A_Mcs15_Mcs12 || regoffset == rTxAGC_B_Mcs15_Mcs12)) ||
+ ((pHalData->rf_type != RF_2T2R) &&
+ (regoffset == rTxAGC_A_Mcs07_Mcs04 || regoffset == rTxAGC_B_Mcs07_Mcs04))) {
+ writeVal = pwr_val[3];
+ if (regoffset == rTxAGC_A_Mcs15_Mcs12 || regoffset == rTxAGC_A_Mcs07_Mcs04)
+ regoffset = 0xc90;
+ if (regoffset == rTxAGC_B_Mcs15_Mcs12 || regoffset == rTxAGC_B_Mcs07_Mcs04)
+ regoffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+ if (i != 2)
+ writeVal = (writeVal > 8) ? (writeVal-8) : 0;
+ else
+ writeVal = (writeVal > 6) ? (writeVal-6) : 0;
+ rtw_write8(Adapter, (u32)(regoffset+i), (u8)writeVal);
+ }
+ }
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetOFDMTxPower
+ *
+ * Overview: For legacy and HY OFDM, we must read EEPROM TX power index for
+ * different channel and read original value in TX power register area from
+ * 0xe00. We increase offset and original value to be correct tx pwr.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/05/2008 MHC Simulate 8192 series method.
+ * 01/06/2009 MHC 1. Prevent Path B tx power overflow or underflow dure to
+ * A/B pwr difference or legacy/HT pwr diff.
+ * 2. We concern with path B legacy/HT OFDM difference.
+ * 01/22/2009 MHC Support new EPRO format from SD3.
+ *
+ *---------------------------------------------------------------------------*/
+
+void
+rtl8188e_PHY_RF6052SetOFDMTxPower(
+ struct adapter *Adapter,
+ u8 *pPowerLevelOFDM,
+ u8 *pPowerLevelBW20,
+ u8 *pPowerLevelBW40,
+ u8 Channel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u32 writeVal[2], powerBase0[2], powerBase1[2], pwrtrac_value;
+ u8 direction;
+ u8 index = 0;
+
+ getpowerbase88e(Adapter, pPowerLevelOFDM, pPowerLevelBW20, pPowerLevelBW40, Channel, &powerBase0[0], &powerBase1[0]);
+
+ /* 2012/04/23 MH According to power tracking value, we need to revise OFDM tx power. */
+ /* This is ued to fix unstable power tracking mode. */
+ ODM_TxPwrTrackAdjust88E(&pHalData->odmpriv, 0, &direction, &pwrtrac_value);
+
+ for (index = 0; index < 6; index++) {
+ get_rx_power_val_by_reg(Adapter, Channel, index,
+ &powerBase0[0], &powerBase1[0],
+ &writeVal[0]);
+
+ if (direction == 1) {
+ writeVal[0] += pwrtrac_value;
+ writeVal[1] += pwrtrac_value;
+ } else if (direction == 2) {
+ writeVal[0] -= pwrtrac_value;
+ writeVal[1] -= pwrtrac_value;
+ }
+ writeOFDMPowerReg88E(Adapter, index, &writeVal[0]);
+ }
+}
+
+static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
+{
+ struct bb_reg_def *pPhyReg;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u32 u4RegValue = 0;
+ u8 eRFPath;
+ int rtStatus = _SUCCESS;
+
+ /* 3----------------------------------------------------------------- */
+ /* 3 <2> Initialize RF */
+ /* 3----------------------------------------------------------------- */
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
+ pPhyReg = &pHalData->PHYRegDef[eRFPath];
+
+ /*----Store original RFENV control type----*/
+ switch (eRFPath) {
+ case RF_PATH_A:
+ case RF_PATH_C:
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
+ break;
+ case RF_PATH_B:
+ case RF_PATH_D:
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16);
+ break;
+ }
+ /*----Set RF_ENV enable----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ rtw_udelay_os(1);/* PlatformStallExecution(1); */
+
+ /*----Set RF_ENV output high----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+ rtw_udelay_os(1);/* PlatformStallExecution(1); */
+
+ /* Set bit number of Address and Data for RF register */
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
+ rtw_udelay_os(1);/* PlatformStallExecution(1); */
+
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
+ rtw_udelay_os(1);/* PlatformStallExecution(1); */
+
+ /*----Initialize RF fom connfiguration file----*/
+ switch (eRFPath) {
+ case RF_PATH_A:
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum ODM_RF_RADIO_PATH)eRFPath, (enum ODM_RF_RADIO_PATH)eRFPath))
+ rtStatus = _FAIL;
+ break;
+ case RF_PATH_B:
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum ODM_RF_RADIO_PATH)eRFPath, (enum ODM_RF_RADIO_PATH)eRFPath))
+ rtStatus = _FAIL;
+ break;
+ case RF_PATH_C:
+ break;
+ case RF_PATH_D:
+ break;
+ }
+ /*----Restore RFENV control type----*/;
+ switch (eRFPath) {
+ case RF_PATH_A:
+ case RF_PATH_C:
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
+ break;
+ case RF_PATH_B:
+ case RF_PATH_D:
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue);
+ break;
+ }
+ if (rtStatus != _SUCCESS)
+ goto phy_RF6052_Config_ParaFile_Fail;
+ }
+ return rtStatus;
+
+phy_RF6052_Config_ParaFile_Fail:
+ return rtStatus;
+}
+
+int PHY_RF6052_Config8188E(struct adapter *Adapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+
+ /* */
+ /* Initialize general global value */
+ /* */
+ /* TODO: Extend RF_PATH_C and RF_PATH_D in the future */
+ if (pHalData->rf_type == RF_1T1R)
+ pHalData->NumTotalRFPath = 1;
+ else
+ pHalData->NumTotalRFPath = 2;
+
+ /* */
+ /* Config BB and RF */
+ /* */
+ rtStatus = phy_RF6052_Config_ParaFile(Adapter);
+ return rtStatus;
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
new file mode 100644
index 0000000..05e2475
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -0,0 +1,202 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_REDESC_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtl8188e_hal.h>
+
+static void process_rssi(struct adapter *padapter, union recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct signal_stat *signal_stat = &padapter->recvpriv.signal_strength_data;
+
+ if (signal_stat->update_req) {
+ signal_stat->total_num = 0;
+ signal_stat->total_val = 0;
+ signal_stat->update_req = 0;
+ }
+
+ signal_stat->total_num++;
+ signal_stat->total_val += pattrib->phy_info.SignalStrength;
+ signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
+} /* Process_UI_RSSI_8192C */
+
+static void process_link_qual(struct adapter *padapter, union recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib;
+ struct signal_stat *signal_stat;
+
+ if (prframe == NULL || padapter == NULL)
+ return;
+
+ pattrib = &prframe->u.hdr.attrib;
+ signal_stat = &padapter->recvpriv.signal_qual_data;
+
+ if (signal_stat->update_req) {
+ signal_stat->total_num = 0;
+ signal_stat->total_val = 0;
+ signal_stat->update_req = 0;
+ }
+
+ signal_stat->total_num++;
+ signal_stat->total_val += pattrib->phy_info.SignalQuality;
+ signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
+}
+
+void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
+{
+ union recv_frame *precvframe = (union recv_frame *)prframe;
+
+ /* Check RSSI */
+ process_rssi(padapter, precvframe);
+ /* Check EVM */
+ process_link_qual(padapter, precvframe);
+}
+
+void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat *prxstat)
+{
+ struct rx_pkt_attrib *pattrib;
+ struct recv_stat report;
+
+ report.rxdw0 = prxstat->rxdw0;
+ report.rxdw1 = prxstat->rxdw1;
+ report.rxdw2 = prxstat->rxdw2;
+ report.rxdw3 = prxstat->rxdw3;
+ report.rxdw4 = prxstat->rxdw4;
+ report.rxdw5 = prxstat->rxdw5;
+
+ pattrib = &precvframe->u.hdr.attrib;
+ _rtw_memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
+
+ pattrib->crc_err = (u8)((le32_to_cpu(report.rxdw0) >> 14) & 0x1);;/* u8)prxreport->crc32; */
+
+ /* update rx report to recv_frame attribute */
+ pattrib->pkt_rpt_type = (u8)((le32_to_cpu(report.rxdw3) >> 14) & 0x3);/* prxreport->rpt_sel; */
+
+ if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
+ pattrib->pkt_len = (u16)(le32_to_cpu(report.rxdw0) & 0x00003fff);/* u16)prxreport->pktlen; */
+ pattrib->drvinfo_sz = (u8)((le32_to_cpu(report.rxdw0) >> 16) & 0xf) * 8;/* u8)(prxreport->drvinfosize << 3); */
+
+ pattrib->physt = (u8)((le32_to_cpu(report.rxdw0) >> 26) & 0x1);/* u8)prxreport->physt; */
+
+ pattrib->bdecrypted = (le32_to_cpu(report.rxdw0) & BIT(27)) ? 0 : 1;/* u8)(prxreport->swdec ? 0 : 1); */
+ pattrib->encrypt = (u8)((le32_to_cpu(report.rxdw0) >> 20) & 0x7);/* u8)prxreport->security; */
+
+ pattrib->qos = (u8)((le32_to_cpu(report.rxdw0) >> 23) & 0x1);/* u8)prxreport->qos; */
+ pattrib->priority = (u8)((le32_to_cpu(report.rxdw1) >> 8) & 0xf);/* u8)prxreport->tid; */
+
+ pattrib->amsdu = (u8)((le32_to_cpu(report.rxdw1) >> 13) & 0x1);/* u8)prxreport->amsdu; */
+
+ pattrib->seq_num = (u16)(le32_to_cpu(report.rxdw2) & 0x00000fff);/* u16)prxreport->seq; */
+ pattrib->frag_num = (u8)((le32_to_cpu(report.rxdw2) >> 12) & 0xf);/* u8)prxreport->frag; */
+ pattrib->mfrag = (u8)((le32_to_cpu(report.rxdw1) >> 27) & 0x1);/* u8)prxreport->mf; */
+ pattrib->mdata = (u8)((le32_to_cpu(report.rxdw1) >> 26) & 0x1);/* u8)prxreport->md; */
+
+ pattrib->mcs_rate = (u8)(le32_to_cpu(report.rxdw3) & 0x3f);/* u8)prxreport->rxmcs; */
+ pattrib->rxht = (u8)((le32_to_cpu(report.rxdw3) >> 6) & 0x1);/* u8)prxreport->rxht; */
+
+ pattrib->icv_err = (u8)((le32_to_cpu(report.rxdw0) >> 15) & 0x1);/* u8)prxreport->icverr; */
+ pattrib->shift_sz = (u8)((le32_to_cpu(report.rxdw0) >> 24) & 0x3);
+ } else if (pattrib->pkt_rpt_type == TX_REPORT1) { /* CCX */
+ pattrib->pkt_len = TX_RPT1_PKT_LEN;
+ pattrib->drvinfo_sz = 0;
+ } else if (pattrib->pkt_rpt_type == TX_REPORT2) { /* TX RPT */
+ pattrib->pkt_len = (u16)(le32_to_cpu(report.rxdw0) & 0x3FF);/* Rx length[9:0] */
+ pattrib->drvinfo_sz = 0;
+
+ /* */
+ /* Get TX report MAC ID valid. */
+ /* */
+ pattrib->MacIDValidEntry[0] = le32_to_cpu(report.rxdw4);
+ pattrib->MacIDValidEntry[1] = le32_to_cpu(report.rxdw5);
+
+ } else if (pattrib->pkt_rpt_type == HIS_REPORT) { /* USB HISR RPT */
+ pattrib->pkt_len = (u16)(le32_to_cpu(report.rxdw0) & 0x00003fff);/* u16)prxreport->pktlen; */
+ }
+}
+
+/*
+ * Notice:
+ * Before calling this function,
+ * precvframe->u.hdr.rx_data should be ready!
+ */
+void update_recvframe_phyinfo_88e(union recv_frame *precvframe, struct phy_stat *pphy_status)
+{
+ struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct odm_phy_status_info *pPHYInfo = (struct odm_phy_status_info *)(&pattrib->phy_info);
+ u8 *wlanhdr;
+ struct odm_per_pkt_info pkt_info;
+ u8 *sa = NULL;
+ struct sta_priv *pstapriv;
+ struct sta_info *psta;
+
+ pkt_info.bPacketMatchBSSID = false;
+ pkt_info.bPacketToSelf = false;
+ pkt_info.bPacketBeacon = false;
+
+ wlanhdr = get_recvframe_data(precvframe);
+
+ pkt_info.bPacketMatchBSSID = ((!IsFrameTypeCtrl(wlanhdr)) &&
+ !pattrib->icv_err && !pattrib->crc_err &&
+ _rtw_memcmp(get_hdr_bssid(wlanhdr),
+ get_bssid(&padapter->mlmepriv), ETH_ALEN));
+
+ pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID &&
+ (_rtw_memcmp(get_da(wlanhdr),
+ myid(&padapter->eeprompriv), ETH_ALEN));
+
+ pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
+ (GetFrameSubType(wlanhdr) == WIFI_BEACON);
+
+ if (pkt_info.bPacketBeacon) {
+ if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE))
+ sa = padapter->mlmepriv.cur_network.network.MacAddress;
+ /* to do Ad-hoc */
+ } else {
+ sa = get_sa(wlanhdr);
+ }
+
+ pstapriv = &padapter->stapriv;
+ pkt_info.StationID = 0xFF;
+ psta = rtw_get_stainfo(pstapriv, sa);
+ if (psta)
+ pkt_info.StationID = psta->mac_id;
+ pkt_info.Rate = pattrib->mcs_rate;
+
+ ODM_PhyStatusQuery(&pHalData->odmpriv, pPHYInfo, (u8 *)pphy_status, &(pkt_info));
+
+ precvframe->u.hdr.psta = NULL;
+ if (pkt_info.bPacketMatchBSSID &&
+ (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE))) {
+ if (psta) {
+ precvframe->u.hdr.psta = psta;
+ rtl8188e_process_phy_info(padapter, precvframe);
+ }
+ } else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
+ if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
+ if (psta)
+ precvframe->u.hdr.psta = psta;
+ }
+ rtl8188e_process_phy_info(padapter, precvframe);
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c b/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
new file mode 100644
index 0000000..96d698e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_SRESET_C_
+
+#include <rtl8188e_sreset.h>
+#include <rtl8188e_hal.h>
+
+void rtl8188e_silentreset_for_specific_platform(struct adapter *padapter)
+{
+}
+
+void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ unsigned long current_time;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ unsigned int diff_time;
+ u32 txdma_status;
+
+ txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS);
+ if (txdma_status != 0x00) {
+ DBG_88E("%s REG_TXDMA_STATUS:0x%08x\n", __func__, txdma_status);
+ rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status);
+ rtl8188e_silentreset_for_specific_platform(padapter);
+ }
+ /* total xmit irp = 4 */
+ current_time = rtw_get_current_time();
+ if (0 == pxmitpriv->free_xmitbuf_cnt) {
+ diff_time = jiffies_to_msecs(current_time - psrtpriv->last_tx_time);
+
+ if (diff_time > 2000) {
+ if (psrtpriv->last_tx_complete_time == 0) {
+ psrtpriv->last_tx_complete_time = current_time;
+ } else {
+ diff_time = jiffies_to_msecs(current_time - psrtpriv->last_tx_complete_time);
+ if (diff_time > 4000) {
+ DBG_88E("%s tx hang\n", __func__);
+ rtl8188e_silentreset_for_specific_platform(padapter);
+ }
+ }
+ }
+ }
+}
+
+void rtl8188e_sreset_linked_status_check(struct adapter *padapter)
+{
+ u32 rx_dma_status = 0;
+ u8 fw_status = 0;
+ rx_dma_status = rtw_read32(padapter, REG_RXDMA_STATUS);
+ if (rx_dma_status != 0x00) {
+ DBG_88E("%s REG_RXDMA_STATUS:0x%08x\n", __func__, rx_dma_status);
+ rtw_write32(padapter, REG_RXDMA_STATUS, rx_dma_status);
+ }
+ fw_status = rtw_read8(padapter, REG_FMETHR);
+ if (fw_status != 0x00) {
+ if (fw_status == 1)
+ DBG_88E("%s REG_FW_STATUS (0x%02x), Read_Efuse_Fail !!\n", __func__, fw_status);
+ else if (fw_status == 2)
+ DBG_88E("%s REG_FW_STATUS (0x%02x), Condition_No_Match !!\n", __func__, fw_status);
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
new file mode 100644
index 0000000..7ecbcf7
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -0,0 +1,91 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_XMIT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtl8188e_hal.h>
+
+void dump_txrpt_ccx_88e(void *buf)
+{
+ struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
+
+ DBG_88E("%s:\n"
+ "tag1:%u, pkt_num:%u, txdma_underflow:%u, int_bt:%u, int_tri:%u, int_ccx:%u\n"
+ "mac_id:%u, pkt_ok:%u, bmc:%u\n"
+ "retry_cnt:%u, lifetime_over:%u, retry_over:%u\n"
+ "ccx_qtime:%u\n"
+ "final_data_rate:0x%02x\n"
+ "qsel:%u, sw:0x%03x\n",
+ __func__, txrpt_ccx->tag1, txrpt_ccx->pkt_num,
+ txrpt_ccx->txdma_underflow, txrpt_ccx->int_bt,
+ txrpt_ccx->int_tri, txrpt_ccx->int_ccx,
+ txrpt_ccx->mac_id, txrpt_ccx->pkt_ok, txrpt_ccx->bmc,
+ txrpt_ccx->retry_cnt, txrpt_ccx->lifetime_over,
+ txrpt_ccx->retry_over, txrpt_ccx_qtime_88e(txrpt_ccx),
+ txrpt_ccx->final_data_rate, txrpt_ccx->qsel,
+ txrpt_ccx_sw_88e(txrpt_ccx)
+ );
+}
+
+void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
+{
+ struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
+
+ if (txrpt_ccx->int_ccx) {
+ if (txrpt_ccx->pkt_ok)
+ rtw_ack_tx_done(&adapter->xmitpriv,
+ RTW_SCTX_DONE_SUCCESS);
+ else
+ rtw_ack_tx_done(&adapter->xmitpriv,
+ RTW_SCTX_DONE_CCX_PKT_FAIL);
+ }
+}
+
+void _dbg_dump_tx_info(struct adapter *padapter, int frame_tag,
+ struct tx_desc *ptxdesc)
+{
+ u8 dmp_txpkt;
+ bool dump_txdesc = false;
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DUMP_TXPKT, &(dmp_txpkt));
+
+ if (dmp_txpkt == 1) {/* dump txdesc for data frame */
+ DBG_88E("dump tx_desc for data frame\n");
+ if ((frame_tag & 0x0f) == DATA_FRAMETAG)
+ dump_txdesc = true;
+ } else if (dmp_txpkt == 2) {/* dump txdesc for mgnt frame */
+ DBG_88E("dump tx_desc for mgnt frame\n");
+ if ((frame_tag & 0x0f) == MGNT_FRAMETAG)
+ dump_txdesc = true;
+ }
+
+ if (dump_txdesc) {
+ DBG_88E("=====================================\n");
+ DBG_88E("txdw0(0x%08x)\n", ptxdesc->txdw0);
+ DBG_88E("txdw1(0x%08x)\n", ptxdesc->txdw1);
+ DBG_88E("txdw2(0x%08x)\n", ptxdesc->txdw2);
+ DBG_88E("txdw3(0x%08x)\n", ptxdesc->txdw3);
+ DBG_88E("txdw4(0x%08x)\n", ptxdesc->txdw4);
+ DBG_88E("txdw5(0x%08x)\n", ptxdesc->txdw5);
+ DBG_88E("txdw6(0x%08x)\n", ptxdesc->txdw6);
+ DBG_88E("txdw7(0x%08x)\n", ptxdesc->txdw7);
+ DBG_88E("=====================================\n");
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
new file mode 100644
index 0000000..08dfd94
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -0,0 +1,111 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtl8188e_hal.h>
+#include <rtl8188e_led.h>
+
+/* LED object. */
+
+/* LED_819xUsb routines. */
+/* Description: */
+/* Turn on LED according to LedPin specified. */
+void SwLedOn(struct adapter *padapter, struct LED_871x *pLed)
+{
+ u8 LedCfg;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return;
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);
+ switch (pLed->LedPin) {
+ case LED_PIN_LED0:
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg&0xf0)|BIT5|BIT6); /* SW control led0 on. */
+ break;
+ case LED_PIN_LED1:
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg&0x0f)|BIT5); /* SW control led1 on. */
+ break;
+ default:
+ break;
+ }
+ pLed->bLedOn = true;
+}
+
+/* Description: */
+/* Turn off LED according to LedPin specified. */
+void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
+{
+ u8 LedCfg;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ goto exit;
+
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);/* 0x4E */
+
+ switch (pLed->LedPin) {
+ case LED_PIN_LED0:
+ if (pHalData->bLedOpenDrain) {
+ /* Open-drain arrangement for controlling the LED) */
+ LedCfg &= 0x90; /* Set to software control. */
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg|BIT3));
+ LedCfg = rtw_read8(padapter, REG_MAC_PINMUX_CFG);
+ LedCfg &= 0xFE;
+ rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
+ } else {
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg|BIT3|BIT5|BIT6));
+ }
+ break;
+ case LED_PIN_LED1:
+ LedCfg &= 0x0f; /* Set to software control. */
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg|BIT3));
+ break;
+ default:
+ break;
+ }
+exit:
+ pLed->bLedOn = false;
+}
+
+/* Interface to manipulate LED objects. */
+/* Default LED behavior. */
+
+/* Description: */
+/* Initialize all LED_871x objects. */
+void rtl8188eu_InitSwLeds(struct adapter *padapter)
+{
+ struct led_priv *pledpriv = &(padapter->ledpriv);
+
+ pledpriv->LedControlHandler = LedControl8188eu;
+
+ InitLed871x(padapter, &(pledpriv->SwLed0), LED_PIN_LED0);
+
+ InitLed871x(padapter, &(pledpriv->SwLed1), LED_PIN_LED1);
+}
+
+/* Description: */
+/* DeInitialize all LED_819xUsb objects. */
+void rtl8188eu_DeInitSwLeds(struct adapter *padapter)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+
+ DeInitLed871x(&(ledpriv->SwLed0));
+ DeInitLed871x(&(ledpriv->SwLed1));
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
new file mode 100644
index 0000000..0f47b89
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -0,0 +1,138 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188EU_RECV_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <mlme_osdep.h>
+#include <ip.h>
+#include <if_ether.h>
+#include <ethernet.h>
+
+#include <usb_ops.h>
+#include <wifi.h>
+
+#include <rtl8188e_hal.h>
+
+void rtl8188eu_init_recvbuf(struct adapter *padapter, struct recv_buf *precvbuf)
+{
+ precvbuf->transfer_len = 0;
+
+ precvbuf->len = 0;
+
+ precvbuf->ref_cnt = 0;
+
+ if (precvbuf->pbuf) {
+ precvbuf->pdata = precvbuf->pbuf;
+ precvbuf->phead = precvbuf->pbuf;
+ precvbuf->ptail = precvbuf->pbuf;
+ precvbuf->pend = precvbuf->pdata + MAX_RECVBUF_SZ;
+ }
+}
+
+int rtl8188eu_init_recv_priv(struct adapter *padapter)
+{
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ int i, res = _SUCCESS;
+ struct recv_buf *precvbuf;
+
+ tasklet_init(&precvpriv->recv_tasklet,
+ (void(*)(unsigned long))rtl8188eu_recv_tasklet,
+ (unsigned long)padapter);
+
+ /* init recv_buf */
+ _rtw_init_queue(&precvpriv->free_recv_buf_queue);
+
+ precvpriv->pallocated_recv_buf = rtw_zmalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4);
+ if (precvpriv->pallocated_recv_buf == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("alloc recv_buf fail!\n"));
+ goto exit;
+ }
+ _rtw_memset(precvpriv->pallocated_recv_buf, 0, NR_RECVBUFF * sizeof(struct recv_buf) + 4);
+
+ precvpriv->precv_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_recv_buf), 4);
+
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ _rtw_init_listhead(&precvbuf->list);
+ _rtw_spinlock_init(&precvbuf->recvbuf_lock);
+ precvbuf->alloc_sz = MAX_RECVBUF_SZ;
+ res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
+ if (res == _FAIL)
+ break;
+ precvbuf->ref_cnt = 0;
+ precvbuf->adapter = padapter;
+ precvbuf++;
+ }
+ precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
+ skb_queue_head_init(&precvpriv->rx_skb_queue);
+ {
+ int i;
+ size_t tmpaddr = 0;
+ size_t alignment = 0;
+ struct sk_buff *pskb = NULL;
+
+ skb_queue_head_init(&precvpriv->free_recv_skb_queue);
+
+ for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
+ pskb = __netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ, GFP_KERNEL);
+ if (pskb) {
+ pskb->dev = padapter->pnetdev;
+ tmpaddr = (size_t)pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
+ skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
+
+ skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
+ }
+ pskb = NULL;
+ }
+ }
+exit:
+ return res;
+}
+
+void rtl8188eu_free_recv_priv(struct adapter *padapter)
+{
+ int i;
+ struct recv_buf *precvbuf;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ rtw_os_recvbuf_resource_free(padapter, precvbuf);
+ precvbuf++;
+ }
+
+ kfree(precvpriv->pallocated_recv_buf);
+
+ if (skb_queue_len(&precvpriv->rx_skb_queue))
+ DBG_88E(KERN_WARNING "rx_skb_queue not empty\n");
+ skb_queue_purge(&precvpriv->rx_skb_queue);
+
+
+ if (skb_queue_len(&precvpriv->free_recv_skb_queue))
+ DBG_88E(KERN_WARNING "free_recv_skb_queue not empty, %d\n", skb_queue_len(&precvpriv->free_recv_skb_queue));
+
+ skb_queue_purge(&precvpriv->free_recv_skb_queue);
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
new file mode 100644
index 0000000..bd8a9ae
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -0,0 +1,706 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_XMIT_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <osdep_intf.h>
+#include <usb_ops.h>
+#include <rtl8188e_hal.h>
+
+s32 rtl8188eu_init_xmit_priv(struct adapter *adapt)
+{
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+
+ tasklet_init(&pxmitpriv->xmit_tasklet,
+ (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
+ (unsigned long)adapt);
+ return _SUCCESS;
+}
+
+void rtl8188eu_free_xmit_priv(struct adapter *adapt)
+{
+}
+
+static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
+{
+ u8 set_tx_desc_offset;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ set_tx_desc_offset = (((sz + TXDESC_SIZE) % haldata->UsbBulkOutSize) == 0) ? 1 : 0;
+
+ return set_tx_desc_offset;
+}
+
+static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
+{
+ u16 *usptr = (u16 *)ptxdesc;
+ u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
+ u32 index;
+ u16 checksum = 0;
+
+ /* Clear first */
+ ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
+
+ for (index = 0; index < count; index++)
+ checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
+ ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
+}
+
+/* Description: In normal chip, we should send some packet to Hw which will be used by Fw */
+/* in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
+/* Fw can tell Hw to send these packet derectly. */
+void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8 ispspoll, u8 is_btqosnull)
+{
+ struct tx_desc *ptxdesc;
+
+ /* Clear all status */
+ ptxdesc = (struct tx_desc *)desc;
+ _rtw_memset(desc, 0, TXDESC_SIZE);
+
+ /* offset 0 */
+ ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
+
+ ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */
+
+ ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /* Buffer size + command header */
+
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /* Fixed queue of Mgnt queue */
+
+ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
+ if (ispspoll) {
+ ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
+ } else {
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /* Hw set sequence number */
+ ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
+ }
+
+ if (is_btqosnull)
+ ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /* BT NULL */
+
+ /* offset 16 */
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
+
+ /* USB interface drop packet if the checksum of descriptor isn't correct. */
+ /* Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
+ rtl8188eu_cal_txdesc_chksum(ptxdesc);
+}
+
+static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
+{
+ if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
+ switch (pattrib->encrypt) {
+ /* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
+ case _WEP40_:
+ case _WEP104_:
+ ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
+ ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
+ break;
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
+ ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
+ break;
+ case _AES_:
+ ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
+ ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
+ break;
+ case _NO_PRIVACY_:
+ default:
+ break;
+ }
+ }
+}
+
+static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
+{
+ switch (pattrib->vcs_mode) {
+ case RTS_CTS:
+ *pdw |= cpu_to_le32(RTS_EN);
+ break;
+ case CTS_TO_SELF:
+ *pdw |= cpu_to_le32(CTS_2_SELF);
+ break;
+ case NONE_VCS:
+ default:
+ break;
+ }
+ if (pattrib->vcs_mode) {
+ *pdw |= cpu_to_le32(HW_RTS_EN);
+ /* Set RTS BW */
+ if (pattrib->ht_en) {
+ *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
+
+ if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ *pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ *pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
+ *pdw |= 0;
+ else
+ *pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
+ }
+ }
+}
+
+static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
+{
+ if (pattrib->ht_en) {
+ *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
+
+ if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ *pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ *pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
+ *pdw |= 0;
+ else
+ *pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
+ }
+}
+
+static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
+{
+ int pull = 0;
+ uint qsel;
+ u8 data_rate, pwr_status, offset;
+ struct adapter *adapt = pxmitframe->padapter;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int bmcst = IS_MCAST(pattrib->ra);
+
+ if (adapt->registrypriv.mp_mode == 0) {
+ if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
+ ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
+ pull = 1;
+ }
+ }
+
+ _rtw_memset(ptxdesc, 0, sizeof(struct tx_desc));
+
+ /* 4 offset 0 */
+ ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
+ ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
+
+ offset = TXDESC_SIZE + OFFSET_SZ;
+
+ ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
+
+ if (bmcst)
+ ptxdesc->txdw0 |= cpu_to_le32(BMC);
+
+ if (adapt->registrypriv.mp_mode == 0) {
+ if (!bagg_pkt) {
+ if ((pull) && (pxmitframe->pkt_offset > 0))
+ pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
+ }
+ }
+
+ /* pkt_offset, unit:8 bytes padding */
+ if (pxmitframe->pkt_offset > 0)
+ ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
+
+ /* driver uses rate */
+ ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
+
+ if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
+
+ qsel = (uint)(pattrib->qsel & 0x0000001f);
+ ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
+
+ ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
+
+ fill_txdesc_sectype(pattrib, ptxdesc);
+
+ if (pattrib->ampdu_en) {
+ ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
+ ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
+ } else {
+ ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
+ }
+
+ /* offset 8 */
+
+ /* offset 12 */
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
+
+ /* offset 16 , offset 20 */
+ if (pattrib->qos_en)
+ ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
+
+ /* offset 20 */
+ if (pxmitframe->agg_num > 1)
+ ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
+
+ if ((pattrib->ether_type != 0x888e) &&
+ (pattrib->ether_type != 0x0806) &&
+ (pattrib->ether_type != 0x88b4) &&
+ (pattrib->dhcp_pkt != 1)) {
+ /* Non EAP & ARP & DHCP type data packet */
+
+ fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
+ fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
+
+ ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
+ ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS Rate FB LMT */
+
+ if (pattrib->ht_en) {
+ if (ODM_RA_GetShortGI_8188E(&haldata->odmpriv, pattrib->mac_id))
+ ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
+ }
+ data_rate = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, pattrib->mac_id);
+ ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
+ pwr_status = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, pattrib->mac_id);
+ ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
+ } else {
+ /* EAP data packet and ARP packet and DHCP. */
+ /* Use the 1M data rate to send the EAP/ARP packet. */
+ /* This will maybe make the handshake smooth. */
+ ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
+ if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/* DATA_SHORT */
+ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
+ }
+ } else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
+
+ qsel = (uint)(pattrib->qsel&0x0000001f);
+ ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
+
+ ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
+
+ /* offset 8 */
+ /* CCX-TXRPT ack for xmit mgmt frames. */
+ if (pxmitframe->ack_report)
+ ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
+
+ /* offset 12 */
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
+
+ /* offset 20 */
+ ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
+ if (pattrib->retry_ctrl)
+ ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
+ else
+ ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
+
+ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
+ } else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
+ DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
+ } else if (((pxmitframe->frame_tag&0x0f) == MP_FRAMETAG) &&
+ (adapt->registrypriv.mp_mode == 1)) {
+ fill_txdesc_for_mp(adapt, ptxdesc);
+ } else {
+ DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
+
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
+
+ ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
+
+ /* offset 8 */
+
+ /* offset 12 */
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
+
+ /* offset 20 */
+ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
+ }
+
+ /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
+ /* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
+ /* mgnt frame should be controled by Hw because Fw will also send null data */
+ /* which we cannot control when Fw LPS enable. */
+ /* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
+ /* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
+ /* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
+ /* 2010.06.23. Added by tynli. */
+ if (!pattrib->qos_en) {
+ ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /* Hw set sequence number */
+ ptxdesc->txdw4 |= cpu_to_le32(HW_SSN); /* Hw set sequence number */
+ }
+
+ ODM_SetTxAntByTxInfo_88E(&haldata->odmpriv, pmem, pattrib->mac_id);
+
+ rtl8188eu_cal_txdesc_chksum(ptxdesc);
+ _dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
+ return pull;
+}
+
+/* for non-agg data frame or management frame */
+static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ s32 ret = _SUCCESS;
+ s32 inner_ret = _SUCCESS;
+ int t, sz, w_sz, pull = 0;
+ u8 *mem_addr;
+ u32 ff_hwaddr;
+ struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+ struct security_priv *psecuritypriv = &adapt->securitypriv;
+ if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
+ (pxmitframe->attrib.ether_type != 0x0806) &&
+ (pxmitframe->attrib.ether_type != 0x888e) &&
+ (pxmitframe->attrib.ether_type != 0x88b4) &&
+ (pxmitframe->attrib.dhcp_pkt != 1))
+ rtw_issue_addbareq_cmd(adapt, pxmitframe);
+ mem_addr = pxmitframe->buf_addr;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
+
+ for (t = 0; t < pattrib->nr_frags; t++) {
+ if (inner_ret != _SUCCESS && ret == _SUCCESS)
+ ret = _FAIL;
+
+ if (t != (pattrib->nr_frags - 1)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
+
+ sz = pxmitpriv->frag_len;
+ sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
+ } else {
+ /* no frag */
+ sz = pattrib->last_txcmdsz;
+ }
+
+ pull = update_txdesc(pxmitframe, mem_addr, sz, false);
+
+ if (pull) {
+ mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
+ pxmitframe->buf_addr = mem_addr;
+ w_sz = sz + TXDESC_SIZE;
+ } else {
+ w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
+ }
+ ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
+
+ inner_ret = rtw_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf);
+
+ rtw_count_tx_stats(adapt, pxmitframe, sz);
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
+
+ mem_addr += w_sz;
+
+ mem_addr = (u8 *)RND4(((size_t)(mem_addr)));
+ }
+
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ if (ret != _SUCCESS)
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
+
+ return ret;
+}
+
+static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
+{
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+ u32 len = 0;
+
+ /* no consider fragement */
+ len = pattrib->hdrlen + pattrib->iv_len +
+ SNAP_SIZE + sizeof(u16) +
+ pattrib->pktlen +
+ ((pattrib->bswenc) ? pattrib->icv_len : 0);
+
+ if (pattrib->encrypt == _TKIP_)
+ len += 8;
+
+ return len;
+}
+
+s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct xmit_frame *pxmitframe = NULL;
+ struct xmit_frame *pfirstframe = NULL;
+
+ /* aggregate variable */
+ struct hw_xmit *phwxmit;
+ struct sta_info *psta = NULL;
+ struct tx_servq *ptxservq = NULL;
+
+ unsigned long irql;
+ struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
+
+ u32 pbuf; /* next pkt address */
+ u32 pbuf_tail; /* last pkt tail */
+ u32 len; /* packet length, except TXDESC_SIZE and PKT_OFFSET */
+
+ u32 bulksize = haldata->UsbBulkOutSize;
+ u8 desc_cnt;
+ u32 bulkptr;
+
+ /* dump frame variable */
+ u32 ff_hwaddr;
+
+ RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
+
+ /* check xmitbuffer is ok */
+ if (pxmitbuf == NULL) {
+ pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (pxmitbuf == NULL)
+ return false;
+ }
+
+ /* 3 1. pick up first frame */
+ do {
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+ if (pxmitframe == NULL) {
+ /* no more xmit frame, release xmit buffer */
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ return false;
+ }
+
+ pxmitframe->pxmitbuf = pxmitbuf;
+ pxmitframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pxmitframe;
+
+ pxmitframe->agg_num = 1; /* alloc xmitframe should assign to 1. */
+ pxmitframe->pkt_offset = 1; /* first frame of aggregation, reserve offset */
+
+ rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
+
+ /* always return ndis_packet after rtw_xmitframe_coalesce */
+ rtw_os_xmit_complete(adapt, pxmitframe);
+
+ break;
+ } while (1);
+
+ /* 3 2. aggregate same priority and same DA(AP or STA) frames */
+ pfirstframe = pxmitframe;
+ len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
+ pbuf_tail = len;
+ pbuf = _RND8(pbuf_tail);
+
+ /* check pkt amount in one bulk */
+ desc_cnt = 0;
+ bulkptr = bulksize;
+ if (pbuf < bulkptr) {
+ desc_cnt++;
+ } else {
+ desc_cnt = 0;
+ bulkptr = ((pbuf / bulksize) + 1) * bulksize; /* round to next bulksize */
+ }
+
+ /* dequeue same priority packet from station tx queue */
+ psta = pfirstframe->attrib.psta;
+ switch (pfirstframe->attrib.priority) {
+ case 1:
+ case 2:
+ ptxservq = &(psta->sta_xmitpriv.bk_q);
+ phwxmit = pxmitpriv->hwxmits + 3;
+ break;
+ case 4:
+ case 5:
+ ptxservq = &(psta->sta_xmitpriv.vi_q);
+ phwxmit = pxmitpriv->hwxmits + 1;
+ break;
+ case 6:
+ case 7:
+ ptxservq = &(psta->sta_xmitpriv.vo_q);
+ phwxmit = pxmitpriv->hwxmits;
+ break;
+ case 0:
+ case 3:
+ default:
+ ptxservq = &(psta->sta_xmitpriv.be_q);
+ phwxmit = pxmitpriv->hwxmits + 2;
+ break;
+ }
+ _enter_critical_bh(&pxmitpriv->lock, &irql);
+
+ xmitframe_phead = get_list_head(&ptxservq->sta_pending);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ pxmitframe->agg_num = 0; /* not first frame of aggregation */
+ pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */
+
+ len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
+
+ if (_RND8(pbuf + len) > MAX_XMITBUF_SZ) {
+ pxmitframe->agg_num = 1;
+ pxmitframe->pkt_offset = 1;
+ break;
+ }
+ rtw_list_delete(&pxmitframe->list);
+ ptxservq->qcnt--;
+ phwxmit->accnt--;
+
+ pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
+
+ rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
+ /* always return ndis_packet after rtw_xmitframe_coalesce */
+ rtw_os_xmit_complete(adapt, pxmitframe);
+
+ /* (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
+ update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
+
+ /* don't need xmitframe any more */
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ /* handle pointer and stop condition */
+ pbuf_tail = pbuf + len;
+ pbuf = _RND8(pbuf_tail);
+
+ pfirstframe->agg_num++;
+ if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
+ break;
+
+ if (pbuf < bulkptr) {
+ desc_cnt++;
+ if (desc_cnt == haldata->UsbTxAggDescNum)
+ break;
+ } else {
+ desc_cnt = 0;
+ bulkptr = ((pbuf / bulksize) + 1) * bulksize;
+ }
+ } /* end while (aggregate same priority and same DA(AP or STA) frames) */
+
+ if (_rtw_queue_empty(&ptxservq->sta_pending) == true)
+ rtw_list_delete(&ptxservq->tx_pending);
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql);
+ if ((pfirstframe->attrib.ether_type != 0x0806) &&
+ (pfirstframe->attrib.ether_type != 0x888e) &&
+ (pfirstframe->attrib.ether_type != 0x88b4) &&
+ (pfirstframe->attrib.dhcp_pkt != 1))
+ rtw_issue_addbareq_cmd(adapt, pfirstframe);
+ /* 3 3. update first frame txdesc */
+ if ((pbuf_tail % bulksize) == 0) {
+ /* remove pkt_offset */
+ pbuf_tail -= PACKET_OFFSET_SZ;
+ pfirstframe->buf_addr += PACKET_OFFSET_SZ;
+ pfirstframe->pkt_offset--;
+ }
+
+ update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
+
+ /* 3 4. write xmit buffer to USB FIFO */
+ ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
+ rtw_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);
+
+ /* 3 5. update statisitc */
+ pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
+ pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
+
+ rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
+
+ rtw_free_xmitframe(pxmitpriv, pfirstframe);
+
+ return true;
+}
+
+static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ s32 res = _SUCCESS;
+
+ res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
+ if (res == _SUCCESS)
+ rtw_dump_xframe(adapt, pxmitframe);
+ else
+ DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
+ return res;
+}
+
+/*
+ * Return
+ * true dump packet directly
+ * false enqueue packet
+ */
+static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ unsigned long irql;
+ s32 res;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql);
+
+ if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
+ goto enqueue;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
+ goto enqueue;
+
+ pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (pxmitbuf == NULL)
+ goto enqueue;
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql);
+
+ pxmitframe->pxmitbuf = pxmitbuf;
+ pxmitframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pxmitframe;
+
+ if (xmitframe_direct(adapt, pxmitframe) != _SUCCESS) {
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ }
+
+ return true;
+
+enqueue:
+ res = rtw_xmitframe_enqueue(adapt, pxmitframe);
+ _exit_critical_bh(&pxmitpriv->lock, &irql);
+
+ if (res != _SUCCESS) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ /* Trick, make the statistics correct */
+ pxmitpriv->tx_pkts--;
+ pxmitpriv->tx_drop++;
+ return true;
+ }
+
+ return false;
+}
+
+s32 rtl8188eu_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
+{
+ return rtw_dump_xframe(adapt, pmgntframe);
+}
+
+/*
+ * Return
+ * true dump packet directly ok
+ * false temporary can't transmit packets to hardware
+ */
+s32 rtl8188eu_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ return pre_xmitframe(adapt, pxmitframe);
+}
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
new file mode 100644
index 0000000..5e656ce
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -0,0 +1,2346 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _HCI_HAL_INIT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_efuse.h>
+
+#include <rtl8188e_hal.h>
+#include <rtl8188e_led.h>
+#include <rtw_iol.h>
+#include <usb_ops.h>
+#include <usb_hal.h>
+#include <usb_osintf.h>
+
+#define HAL_MAC_ENABLE 1
+#define HAL_BB_ENABLE 1
+#define HAL_RF_ENABLE 1
+
+static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ switch (NumOutPipe) {
+ case 3:
+ haldata->OutEpQueueSel = TX_SELE_HQ | TX_SELE_LQ | TX_SELE_NQ;
+ haldata->OutEpNumber = 3;
+ break;
+ case 2:
+ haldata->OutEpQueueSel = TX_SELE_HQ | TX_SELE_NQ;
+ haldata->OutEpNumber = 2;
+ break;
+ case 1:
+ haldata->OutEpQueueSel = TX_SELE_HQ;
+ haldata->OutEpNumber = 1;
+ break;
+ default:
+ break;
+ }
+ DBG_88E("%s OutEpQueueSel(0x%02x), OutEpNumber(%d)\n", __func__, haldata->OutEpQueueSel, haldata->OutEpNumber);
+}
+
+static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPipe, u8 NumOutPipe)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ bool result = false;
+
+ _ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
+
+ /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
+ if (1 == haldata->OutEpNumber) {
+ if (1 != NumInPipe)
+ return result;
+ }
+
+ /* All config other than above support one Bulk IN and one Interrupt IN. */
+
+ result = Hal_MappingOutPipe(adapt, NumOutPipe);
+
+ return result;
+}
+
+static void rtl8188eu_interface_configure(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
+
+ if (pdvobjpriv->ishighspeed)
+ haldata->UsbBulkOutSize = USB_HIGH_SPEED_BULK_SIZE;/* 512 bytes */
+ else
+ haldata->UsbBulkOutSize = USB_FULL_SPEED_BULK_SIZE;/* 64 bytes */
+
+ haldata->interfaceIndex = pdvobjpriv->InterfaceNumber;
+
+ haldata->UsbTxAggMode = 1;
+ haldata->UsbTxAggDescNum = 0x6; /* only 4 bits */
+
+ haldata->UsbRxAggMode = USB_RX_AGG_DMA;/* USB_RX_AGG_DMA; */
+ haldata->UsbRxAggBlockCount = 8; /* unit : 512b */
+ haldata->UsbRxAggBlockTimeout = 0x6;
+ haldata->UsbRxAggPageCount = 48; /* uint :128 b 0x0A; 10 = MAX_RX_DMA_BUFFER_SIZE/2/haldata->UsbBulkOutSize */
+ haldata->UsbRxAggPageTimeout = 0x4; /* 6, absolute time = 34ms/(2^6) */
+
+ HalUsbSetQueuePipeMapping8188EUsb(adapt,
+ pdvobjpriv->RtNumInPipes, pdvobjpriv->RtNumOutPipes);
+}
+
+static u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
+{
+ u16 value16;
+ /* HW Power on sequence */
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ if (haldata->bMacPwrCtrlOn)
+ return _SUCCESS;
+
+ if (!HalPwrSeqCmdParsing(adapt, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_PWR_ON_FLOW)) {
+ DBG_88E(KERN_ERR "%s: run power on flow fail\n", __func__);
+ return _FAIL;
+ }
+
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ /* Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */
+ rtw_write16(adapt, REG_CR, 0x00); /* suggseted by zhouzhou, by page, 20111230 */
+
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ value16 = rtw_read16(adapt, REG_CR);
+ value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN
+ | PROTOCOL_EN | SCHEDULE_EN | ENSEC | CALTMR_EN);
+ /* for SDIO - Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */
+
+ rtw_write16(adapt, REG_CR, value16);
+ haldata->bMacPwrCtrlOn = true;
+
+ return _SUCCESS;
+}
+
+/* Shall USB interface init this? */
+static void _InitInterrupt(struct adapter *Adapter)
+{
+ u32 imr, imr_ex;
+ u8 usb_opt;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ /* HISR write one to clear */
+ rtw_write32(Adapter, REG_HISR_88E, 0xFFFFFFFF);
+ /* HIMR - */
+ imr = IMR_PSTIMEOUT_88E | IMR_TBDER_88E | IMR_CPWM_88E | IMR_CPWM2_88E;
+ rtw_write32(Adapter, REG_HIMR_88E, imr);
+ haldata->IntrMask[0] = imr;
+
+ imr_ex = IMR_TXERR_88E | IMR_RXERR_88E | IMR_TXFOVW_88E | IMR_RXFOVW_88E;
+ rtw_write32(Adapter, REG_HIMRE_88E, imr_ex);
+ haldata->IntrMask[1] = imr_ex;
+
+ /* REG_USB_SPECIAL_OPTION - BIT(4) */
+ /* 0; Use interrupt endpoint to upload interrupt pkt */
+ /* 1; Use bulk endpoint to upload interrupt pkt, */
+ usb_opt = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION);
+
+ if (!adapter_to_dvobj(Adapter)->ishighspeed)
+ usb_opt = usb_opt & (~INT_BULK_SEL);
+ else
+ usb_opt = usb_opt | (INT_BULK_SEL);
+
+ rtw_write8(Adapter, REG_USB_SPECIAL_OPTION, usb_opt);
+}
+
+static void _InitQueueReservedPage(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u32 numHQ = 0;
+ u32 numLQ = 0;
+ u32 numNQ = 0;
+ u32 numPubQ;
+ u32 value32;
+ u8 value8;
+ bool bWiFiConfig = pregistrypriv->wifi_spec;
+
+ if (bWiFiConfig) {
+ if (haldata->OutEpQueueSel & TX_SELE_HQ)
+ numHQ = 0x29;
+
+ if (haldata->OutEpQueueSel & TX_SELE_LQ)
+ numLQ = 0x1C;
+
+ /* NOTE: This step shall be proceed before writting REG_RQPN. */
+ if (haldata->OutEpQueueSel & TX_SELE_NQ)
+ numNQ = 0x1C;
+ value8 = (u8)_NPQ(numNQ);
+ rtw_write8(Adapter, REG_RQPN_NPQ, value8);
+
+ numPubQ = 0xA8 - numHQ - numLQ - numNQ;
+
+ /* TX DMA */
+ value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
+ rtw_write32(Adapter, REG_RQPN, value32);
+ } else {
+ rtw_write16(Adapter, REG_RQPN_NPQ, 0x0000);/* Just follow MP Team,??? Georgia 03/28 */
+ rtw_write16(Adapter, REG_RQPN_NPQ, 0x0d);
+ rtw_write32(Adapter, REG_RQPN, 0x808E000d);/* reserve 7 page for LPS */
+ }
+}
+
+static void _InitTxBufferBoundary(struct adapter *Adapter, u8 txpktbuf_bndy)
+{
+ rtw_write8(Adapter, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TRXFF_BNDY, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TDECTRL+1, txpktbuf_bndy);
+}
+
+static void _InitPageBoundary(struct adapter *Adapter)
+{
+ /* RX Page Boundary */
+ /* */
+ u16 rxff_bndy = MAX_RX_DMA_BUFFER_SIZE_88E-1;
+
+ rtw_write16(Adapter, (REG_TRXFF_BNDY + 2), rxff_bndy);
+}
+
+static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ,
+ u16 bkQ, u16 viQ, u16 voQ, u16 mgtQ,
+ u16 hiQ)
+{
+ u16 value16 = (rtw_read16(Adapter, REG_TRXDMA_CTRL) & 0x7);
+
+ value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
+ _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
+ _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
+
+ rtw_write16(Adapter, REG_TRXDMA_CTRL, value16);
+}
+
+static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ u16 value = 0;
+ switch (haldata->OutEpQueueSel) {
+ case TX_SELE_HQ:
+ value = QUEUE_HIGH;
+ break;
+ case TX_SELE_LQ:
+ value = QUEUE_LOW;
+ break;
+ case TX_SELE_NQ:
+ value = QUEUE_NORMAL;
+ break;
+ default:
+ break;
+ }
+ _InitNormalChipRegPriority(Adapter, value, value, value, value,
+ value, value);
+}
+
+static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+ u16 valueHi = 0;
+ u16 valueLow = 0;
+
+ switch (haldata->OutEpQueueSel) {
+ case (TX_SELE_HQ | TX_SELE_LQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_NQ | TX_SELE_LQ):
+ valueHi = QUEUE_NORMAL;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_HQ | TX_SELE_NQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_NORMAL;
+ break;
+ default:
+ break;
+ }
+
+ if (!pregistrypriv->wifi_spec) {
+ beQ = valueLow;
+ bkQ = valueLow;
+ viQ = valueHi;
+ voQ = valueHi;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
+ beQ = valueLow;
+ bkQ = valueHi;
+ viQ = valueHi;
+ voQ = valueLow;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ }
+ _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+}
+
+static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
+{
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+
+ if (!pregistrypriv->wifi_spec) {/* typical setting */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_LOW;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ } else {/* for WMM */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_NORMAL;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ }
+ _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+}
+
+static void _InitQueuePriority(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ switch (haldata->OutEpNumber) {
+ case 1:
+ _InitNormalChipOneOutEpPriority(Adapter);
+ break;
+ case 2:
+ _InitNormalChipTwoOutEpPriority(Adapter);
+ break;
+ case 3:
+ _InitNormalChipThreeOutEpPriority(Adapter);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _InitNetworkType(struct adapter *Adapter)
+{
+ u32 value32;
+
+ value32 = rtw_read32(Adapter, REG_CR);
+ /* TODO: use the other function to set network type */
+ value32 = (value32 & ~MASK_NETTYPE) | _NETTYPE(NT_LINK_AP);
+
+ rtw_write32(Adapter, REG_CR, value32);
+}
+
+static void _InitTransferPageSize(struct adapter *Adapter)
+{
+ /* Tx page size is always 128. */
+
+ u8 value8;
+ value8 = _PSRX(PBP_128) | _PSTX(PBP_128);
+ rtw_write8(Adapter, REG_PBP, value8);
+}
+
+static void _InitDriverInfoSize(struct adapter *Adapter, u8 drvInfoSize)
+{
+ rtw_write8(Adapter, REG_RX_DRVINFO_SZ, drvInfoSize);
+}
+
+static void _InitWMACSetting(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ haldata->ReceiveConfig = RCR_AAP | RCR_APM | RCR_AM | RCR_AB |
+ RCR_CBSSID_DATA | RCR_CBSSID_BCN |
+ RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+ RCR_APP_MIC | RCR_APP_PHYSTS;
+
+ /* some REG_RCR will be modified later by phy_ConfigMACWithHeaderFile() */
+ rtw_write32(Adapter, REG_RCR, haldata->ReceiveConfig);
+
+ /* Accept all multicast address */
+ rtw_write32(Adapter, REG_MAR, 0xFFFFFFFF);
+ rtw_write32(Adapter, REG_MAR + 4, 0xFFFFFFFF);
+}
+
+static void _InitAdaptiveCtrl(struct adapter *Adapter)
+{
+ u16 value16;
+ u32 value32;
+
+ /* Response Rate Set */
+ value32 = rtw_read32(Adapter, REG_RRSR);
+ value32 &= ~RATE_BITMAP_ALL;
+ value32 |= RATE_RRSR_CCK_ONLY_1M;
+ rtw_write32(Adapter, REG_RRSR, value32);
+
+ /* CF-END Threshold */
+
+ /* SIFS (used in NAV) */
+ value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
+ rtw_write16(Adapter, REG_SPEC_SIFS, value16);
+
+ /* Retry Limit */
+ value16 = _LRL(0x30) | _SRL(0x30);
+ rtw_write16(Adapter, REG_RL, value16);
+}
+
+static void _InitEDCA(struct adapter *Adapter)
+{
+ /* Set Spec SIFS (used in NAV) */
+ rtw_write16(Adapter, REG_SPEC_SIFS, 0x100a);
+ rtw_write16(Adapter, REG_MAC_SPEC_SIFS, 0x100a);
+
+ /* Set SIFS for CCK */
+ rtw_write16(Adapter, REG_SIFS_CTX, 0x100a);
+
+ /* Set SIFS for OFDM */
+ rtw_write16(Adapter, REG_SIFS_TRX, 0x100a);
+
+ /* TXOP */
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtw_write32(Adapter, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtw_write32(Adapter, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtw_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
+}
+
+static void _InitBeaconMaxError(struct adapter *Adapter, bool InfraMode)
+{
+}
+
+static void _InitHWLed(struct adapter *Adapter)
+{
+ struct led_priv *pledpriv = &(Adapter->ledpriv);
+
+ if (pledpriv->LedStrategy != HW_LED)
+ return;
+
+/* HW led control */
+/* to do .... */
+/* must consider cases of antenna diversity/ commbo card/solo card/mini card */
+}
+
+static void _InitRDGSetting(struct adapter *Adapter)
+{
+ rtw_write8(Adapter, REG_RD_CTRL, 0xFF);
+ rtw_write16(Adapter, REG_RD_NAV_NXT, 0x200);
+ rtw_write8(Adapter, REG_RD_RESP_PKT_TH, 0x05);
+}
+
+static void _InitRxSetting(struct adapter *Adapter)
+{
+ rtw_write32(Adapter, REG_MACID, 0x87654321);
+ rtw_write32(Adapter, 0x0700, 0x87654321);
+}
+
+static void _InitRetryFunction(struct adapter *Adapter)
+{
+ u8 value8;
+
+ value8 = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL);
+ value8 |= EN_AMPDU_RTY_NEW;
+ rtw_write8(Adapter, REG_FWHW_TXQ_CTRL, value8);
+
+ /* Set ACK timeout */
+ rtw_write8(Adapter, REG_ACKTO, 0x40);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: usb_AggSettingTxUpdate()
+ *
+ * Overview: Seperate TX/RX parameters update independent for TP detection and
+ * dynamic TX/RX aggreagtion parameters update.
+ *
+ * Input: struct adapter *
+ *
+ * Output/Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 12/10/2010 MHC Seperate to smaller function.
+ *
+ *---------------------------------------------------------------------------*/
+static void usb_AggSettingTxUpdate(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ u32 value32;
+
+ if (Adapter->registrypriv.wifi_spec)
+ haldata->UsbTxAggMode = false;
+
+ if (haldata->UsbTxAggMode) {
+ value32 = rtw_read32(Adapter, REG_TDECTRL);
+ value32 = value32 & ~(BLK_DESC_NUM_MASK << BLK_DESC_NUM_SHIFT);
+ value32 |= ((haldata->UsbTxAggDescNum & BLK_DESC_NUM_MASK) << BLK_DESC_NUM_SHIFT);
+
+ rtw_write32(Adapter, REG_TDECTRL, value32);
+ }
+} /* usb_AggSettingTxUpdate */
+
+/*-----------------------------------------------------------------------------
+ * Function: usb_AggSettingRxUpdate()
+ *
+ * Overview: Seperate TX/RX parameters update independent for TP detection and
+ * dynamic TX/RX aggreagtion parameters update.
+ *
+ * Input: struct adapter *
+ *
+ * Output/Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 12/10/2010 MHC Seperate to smaller function.
+ *
+ *---------------------------------------------------------------------------*/
+static void
+usb_AggSettingRxUpdate(
+ struct adapter *Adapter
+ )
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ u8 valueDMA;
+ u8 valueUSB;
+
+ valueDMA = rtw_read8(Adapter, REG_TRXDMA_CTRL);
+ valueUSB = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION);
+
+ switch (haldata->UsbRxAggMode) {
+ case USB_RX_AGG_DMA:
+ valueDMA |= RXDMA_AGG_EN;
+ valueUSB &= ~USB_AGG_EN;
+ break;
+ case USB_RX_AGG_USB:
+ valueDMA &= ~RXDMA_AGG_EN;
+ valueUSB |= USB_AGG_EN;
+ break;
+ case USB_RX_AGG_MIX:
+ valueDMA |= RXDMA_AGG_EN;
+ valueUSB |= USB_AGG_EN;
+ break;
+ case USB_RX_AGG_DISABLE:
+ default:
+ valueDMA &= ~RXDMA_AGG_EN;
+ valueUSB &= ~USB_AGG_EN;
+ break;
+ }
+
+ rtw_write8(Adapter, REG_TRXDMA_CTRL, valueDMA);
+ rtw_write8(Adapter, REG_USB_SPECIAL_OPTION, valueUSB);
+
+ switch (haldata->UsbRxAggMode) {
+ case USB_RX_AGG_DMA:
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, haldata->UsbRxAggPageCount);
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH+1, haldata->UsbRxAggPageTimeout);
+ break;
+ case USB_RX_AGG_USB:
+ rtw_write8(Adapter, REG_USB_AGG_TH, haldata->UsbRxAggBlockCount);
+ rtw_write8(Adapter, REG_USB_AGG_TO, haldata->UsbRxAggBlockTimeout);
+ break;
+ case USB_RX_AGG_MIX:
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, haldata->UsbRxAggPageCount);
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH+1, (haldata->UsbRxAggPageTimeout & 0x1F));/* 0x280[12:8] */
+ rtw_write8(Adapter, REG_USB_AGG_TH, haldata->UsbRxAggBlockCount);
+ rtw_write8(Adapter, REG_USB_AGG_TO, haldata->UsbRxAggBlockTimeout);
+ break;
+ case USB_RX_AGG_DISABLE:
+ default:
+ /* TODO: */
+ break;
+ }
+
+ switch (PBP_128) {
+ case PBP_128:
+ haldata->HwRxPageSize = 128;
+ break;
+ case PBP_64:
+ haldata->HwRxPageSize = 64;
+ break;
+ case PBP_256:
+ haldata->HwRxPageSize = 256;
+ break;
+ case PBP_512:
+ haldata->HwRxPageSize = 512;
+ break;
+ case PBP_1024:
+ haldata->HwRxPageSize = 1024;
+ break;
+ default:
+ break;
+ }
+} /* usb_AggSettingRxUpdate */
+
+static void InitUsbAggregationSetting(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ /* Tx aggregation setting */
+ usb_AggSettingTxUpdate(Adapter);
+
+ /* Rx aggregation setting */
+ usb_AggSettingRxUpdate(Adapter);
+
+ /* 201/12/10 MH Add for USB agg mode dynamic switch. */
+ haldata->UsbRxHighSpeedMode = false;
+}
+
+static void _InitOperationMode(struct adapter *Adapter)
+{
+}
+
+static void _InitBeaconParameters(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ rtw_write16(Adapter, REG_BCN_CTRL, 0x1010);
+
+ /* TODO: Remove these magic number */
+ rtw_write16(Adapter, REG_TBTT_PROHIBIT, 0x6404);/* ms */
+ rtw_write8(Adapter, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/* 5ms */
+ rtw_write8(Adapter, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME); /* 2ms */
+
+ /* Suggested by designer timchen. Change beacon AIFS to the largest number */
+ /* beacause test chip does not contension before sending beacon. by tynli. 2009.11.03 */
+ rtw_write16(Adapter, REG_BCNTCFG, 0x660F);
+
+ haldata->RegBcnCtrlVal = rtw_read8(Adapter, REG_BCN_CTRL);
+ haldata->RegTxPause = rtw_read8(Adapter, REG_TXPAUSE);
+ haldata->RegFwHwTxQCtrl = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL+2);
+ haldata->RegReg542 = rtw_read8(Adapter, REG_TBTT_PROHIBIT+2);
+ haldata->RegCR_1 = rtw_read8(Adapter, REG_CR+1);
+}
+
+static void _BeaconFunctionEnable(struct adapter *Adapter,
+ bool Enable, bool Linked)
+{
+ rtw_write8(Adapter, REG_BCN_CTRL, (BIT4 | BIT3 | BIT1));
+
+ rtw_write8(Adapter, REG_RD_CTRL+1, 0x6F);
+}
+
+/* Set CCK and OFDM Block "ON" */
+static void _BBTurnOnBlock(struct adapter *Adapter)
+{
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bCCKEn, 0x1);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
+}
+
+enum {
+ Antenna_Lfet = 1,
+ Antenna_Right = 2,
+};
+
+static void _InitAntenna_Selection(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ if (haldata->AntDivCfg == 0)
+ return;
+ DBG_88E("==> %s ....\n", __func__);
+
+ rtw_write32(Adapter, REG_LEDCFG0, rtw_read32(Adapter, REG_LEDCFG0)|BIT23);
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, BIT13, 0x01);
+
+ if (PHY_QueryBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, 0x300) == Antenna_A)
+ haldata->CurAntenna = Antenna_A;
+ else
+ haldata->CurAntenna = Antenna_B;
+ DBG_88E("%s,Cur_ant:(%x)%s\n", __func__, haldata->CurAntenna, (haldata->CurAntenna == Antenna_A) ? "Antenna_A" : "Antenna_B");
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: HwSuspendModeEnable92Cu()
+ *
+ * Overview: HW suspend mode switch.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 08/23/2010 MHC HW suspend mode switch test..
+ *---------------------------------------------------------------------------*/
+enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
+{
+ u8 val8;
+ enum rt_rf_power_state rfpowerstate = rf_off;
+
+ if (adapt->pwrctrlpriv.bHWPowerdown) {
+ val8 = rtw_read8(adapt, REG_HSISR);
+ DBG_88E("pwrdown, 0x5c(BIT7)=%02x\n", val8);
+ rfpowerstate = (val8 & BIT7) ? rf_off : rf_on;
+ } else { /* rf on/off */
+ rtw_write8(adapt, REG_MAC_PINMUX_CFG, rtw_read8(adapt, REG_MAC_PINMUX_CFG)&~(BIT3));
+ val8 = rtw_read8(adapt, REG_GPIO_IO_SEL);
+ DBG_88E("GPIO_IN=%02x\n", val8);
+ rfpowerstate = (val8 & BIT3) ? rf_on : rf_off;
+ }
+ return rfpowerstate;
+} /* HalDetectPwrDownMode */
+
+static u32 rtl8188eu_hal_init(struct adapter *Adapter)
+{
+ u8 value8 = 0;
+ u16 value16;
+ u8 txpktbuf_bndy;
+ u32 status = _SUCCESS;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u32 init_start_time = rtw_get_current_time();
+
+ #define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
+
+_func_enter_;
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
+
+ if (Adapter->pwrctrlpriv.bkeepfwalive) {
+ _ps_open_RF(Adapter);
+
+ if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
+ PHY_IQCalibrate_8188E(Adapter, true);
+ } else {
+ PHY_IQCalibrate_8188E(Adapter, false);
+ haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
+ }
+
+ ODM_TXPowerTrackingCheck(&haldata->odmpriv);
+ PHY_LCCalibrate_8188E(Adapter);
+
+ goto exit;
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PW_ON);
+ status = rtl8188eu_InitPowerOn(Adapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("Failed to init power on!\n"));
+ goto exit;
+ }
+
+ /* Save target channel */
+ haldata->CurrentChannel = 6;/* default set to 6 */
+
+ if (pwrctrlpriv->reg_rfoff) {
+ pwrctrlpriv->rf_pwrstate = rf_off;
+ }
+
+ /* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
+ /* HW GPIO pin. Before PHY_RFConfig8192C. */
+ /* 2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
+
+ if (!pregistrypriv->wifi_spec) {
+ txpktbuf_bndy = TX_PAGE_BOUNDARY_88E;
+ } else {
+ /* for WMM */
+ txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY_88E;
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC01);
+ _InitQueueReservedPage(Adapter);
+ _InitQueuePriority(Adapter);
+ _InitPageBoundary(Adapter);
+ _InitTransferPageSize(Adapter);
+
+ _InitTxBufferBoundary(Adapter, 0);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_DOWNLOAD_FW);
+ if (Adapter->registrypriv.mp_mode == 1) {
+ _InitRxSetting(Adapter);
+ Adapter->bFWReady = false;
+ haldata->fw_ractrl = false;
+ } else {
+ status = rtl8188e_FirmwareDownload(Adapter);
+
+ if (status != _SUCCESS) {
+ DBG_88E("%s: Download Firmware failed!!\n", __func__);
+ Adapter->bFWReady = false;
+ haldata->fw_ractrl = false;
+ return status;
+ } else {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
+ Adapter->bFWReady = true;
+ haldata->fw_ractrl = false;
+ }
+ }
+ rtl8188e_InitializeFirmwareVars(Adapter);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MAC);
+#if (HAL_MAC_ENABLE == 1)
+ status = PHY_MACConfig8188E(Adapter);
+ if (status == _FAIL) {
+ DBG_88E(" ### Failed to init MAC ......\n ");
+ goto exit;
+ }
+#endif
+
+ /* */
+ /* d. Initialize BB related configurations. */
+ /* */
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BB);
+#if (HAL_BB_ENABLE == 1)
+ status = PHY_BBConfig8188E(Adapter);
+ if (status == _FAIL) {
+ DBG_88E(" ### Failed to init BB ......\n ");
+ goto exit;
+ }
+#endif
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_RF);
+#if (HAL_RF_ENABLE == 1)
+ status = PHY_RFConfig8188E(Adapter);
+ if (status == _FAIL) {
+ DBG_88E(" ### Failed to init RF ......\n ");
+ goto exit;
+ }
+#endif
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_EFUSE_PATCH);
+ status = rtl8188e_iol_efuse_patch(Adapter);
+ if (status == _FAIL) {
+ DBG_88E("%s rtl8188e_iol_efuse_patch failed\n", __func__);
+ goto exit;
+ }
+
+ _InitTxBufferBoundary(Adapter, txpktbuf_bndy);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_LLTT);
+ status = InitLLTTable(Adapter, txpktbuf_bndy);
+ if (status == _FAIL) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("Failed to init LLT table\n"));
+ goto exit;
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC02);
+ /* Get Rx PHY status in order to report RSSI and others. */
+ _InitDriverInfoSize(Adapter, DRVINFO_SZ);
+
+ _InitInterrupt(Adapter);
+ hal_init_macaddr(Adapter);/* set mac_address */
+ _InitNetworkType(Adapter);/* set msr */
+ _InitWMACSetting(Adapter);
+ _InitAdaptiveCtrl(Adapter);
+ _InitEDCA(Adapter);
+ _InitRetryFunction(Adapter);
+ InitUsbAggregationSetting(Adapter);
+ _InitOperationMode(Adapter);/* todo */
+ _InitBeaconParameters(Adapter);
+ _InitBeaconMaxError(Adapter, true);
+
+ /* */
+ /* Init CR MACTXEN, MACRXEN after setting RxFF boundary REG_TRXFF_BNDY to patch */
+ /* Hw bug which Hw initials RxFF boundry size to a value which is larger than the real Rx buffer size in 88E. */
+ /* */
+ /* Enable MACTXEN/MACRXEN block */
+ value16 = rtw_read16(Adapter, REG_CR);
+ value16 |= (MACTXEN | MACRXEN);
+ rtw_write8(Adapter, REG_CR, value16);
+
+ if (haldata->bRDGEnable)
+ _InitRDGSetting(Adapter);
+
+ /* Enable TX Report */
+ /* Enable Tx Report Timer */
+ value8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
+ rtw_write8(Adapter, REG_TX_RPT_CTRL, (value8|BIT1|BIT0));
+ /* Set MAX RPT MACID */
+ rtw_write8(Adapter, REG_TX_RPT_CTRL+1, 2);/* FOR sta mode ,0: bc/mc ,1:AP */
+ /* Tx RPT Timer. Unit: 32us */
+ rtw_write16(Adapter, REG_TX_RPT_TIME, 0xCdf0);
+
+ rtw_write8(Adapter, REG_EARLY_MODE_CONTROL, 0);
+
+ rtw_write16(Adapter, REG_PKT_VO_VI_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
+ rtw_write16(Adapter, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
+
+ _InitHWLed(Adapter);
+
+ /* Keep RfRegChnlVal for later use. */
+ haldata->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
+ haldata->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_TURN_ON_BLOCK);
+ _BBTurnOnBlock(Adapter);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_SECURITY);
+ invalidate_cam_all(Adapter);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC11);
+ /* 2010/12/17 MH We need to set TX power according to EFUSE content at first. */
+ PHY_SetTxPowerLevel8188E(Adapter, haldata->CurrentChannel);
+
+/* Move by Neo for USB SS to below setp */
+/* _RfPowerSave(Adapter); */
+
+ _InitAntenna_Selection(Adapter);
+
+ /* */
+ /* Disable BAR, suggested by Scott */
+ /* 2010.04.09 add by hpfan */
+ /* */
+ rtw_write32(Adapter, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+ /* HW SEQ CTRL */
+ /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
+ rtw_write8(Adapter, REG_HWSEQ_CTRL, 0xFF);
+
+ if (pregistrypriv->wifi_spec)
+ rtw_write16(Adapter, REG_FAST_EDCA_CTRL, 0);
+
+ /* Nav limit , suggest by scott */
+ rtw_write8(Adapter, 0x652, 0x0);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_HAL_DM);
+ rtl8188e_InitHalDm(Adapter);
+
+ if (Adapter->registrypriv.mp_mode == 1) {
+ Adapter->mppriv.channel = haldata->CurrentChannel;
+ MPT_InitializeAdapter(Adapter, Adapter->mppriv.channel);
+ } else {
+ /* 2010/08/11 MH Merge from 8192SE for Minicard init. We need to confirm current radio status */
+ /* and then decide to enable RF or not.!!!??? For Selective suspend mode. We may not */
+ /* call initstruct adapter. May cause some problem?? */
+ /* Fix the bug that Hw/Sw radio off before S3/S4, the RF off action will not be executed */
+ /* in MgntActSet_RF_State() after wake up, because the value of haldata->eRFPowerState */
+ /* is the same as eRfOff, we should change it to eRfOn after we config RF parameters. */
+ /* Added by tynli. 2010.03.30. */
+ pwrctrlpriv->rf_pwrstate = rf_on;
+
+ /* enable Tx report. */
+ rtw_write8(Adapter, REG_FWHW_TXQ_CTRL+1, 0x0F);
+
+ /* Suggested by SD1 pisa. Added by tynli. 2011.10.21. */
+ rtw_write8(Adapter, REG_EARLY_MODE_CONTROL+3, 0x01);/* Pretx_en, for WEP/TKIP SEC */
+
+ /* tynli_test_tx_report. */
+ rtw_write16(Adapter, REG_TX_RPT_TIME, 0x3DF0);
+
+ /* enable tx DMA to drop the redundate data of packet */
+ rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK) | DROP_DATA_EN));
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
+ /* 2010/08/26 MH Merge from 8192CE. */
+ if (pwrctrlpriv->rf_pwrstate == rf_on) {
+ if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
+ PHY_IQCalibrate_8188E(Adapter, true);
+ } else {
+ PHY_IQCalibrate_8188E(Adapter, false);
+ haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
+ }
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_PW_TRACK);
+
+ ODM_TXPowerTrackingCheck(&haldata->odmpriv);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_LCK);
+ PHY_LCCalibrate_8188E(Adapter);
+ }
+ }
+
+/* HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PABIAS); */
+/* _InitPABias(Adapter); */
+ rtw_write8(Adapter, REG_USB_HRPWM, 0);
+
+ /* ack for xmit mgmt frames. */
+ rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, rtw_read32(Adapter, REG_FWHW_TXQ_CTRL)|BIT(12));
+
+exit:
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
+
+ DBG_88E("%s in %dms\n", __func__, rtw_get_passing_time_ms(init_start_time));
+
+_func_exit_;
+
+ return status;
+}
+
+void _ps_open_RF(struct adapter *adapt)
+{
+ /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
+ /* phy_SsPwrSwitch92CU(adapt, rf_on, 1); */
+}
+
+static void _ps_close_RF(struct adapter *adapt)
+{
+ /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
+ /* phy_SsPwrSwitch92CU(adapt, rf_off, 1); */
+}
+
+static void CardDisableRTL8188EU(struct adapter *Adapter)
+{
+ u8 val8;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("CardDisableRTL8188EU\n"));
+
+ /* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
+ val8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
+ rtw_write8(Adapter, REG_TX_RPT_CTRL, val8&(~BIT1));
+
+ /* stop rx */
+ rtw_write8(Adapter, REG_CR, 0x0);
+
+ /* Run LPS WL RFOFF flow */
+ HalPwrSeqCmdParsing(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_LPS_ENTER_FLOW);
+
+ /* 2. 0x1F[7:0] = 0 turn off RF */
+
+ val8 = rtw_read8(Adapter, REG_MCUFWDL);
+ if ((val8 & RAM_DL_SEL) && Adapter->bFWReady) { /* 8051 RAM code */
+ /* Reset MCU 0x2[10]=0. */
+ val8 = rtw_read8(Adapter, REG_SYS_FUNC_EN+1);
+ val8 &= ~BIT(2); /* 0x2[10], FEN_CPUEN */
+ rtw_write8(Adapter, REG_SYS_FUNC_EN+1, val8);
+ }
+
+ /* reset MCU ready status */
+ rtw_write8(Adapter, REG_MCUFWDL, 0);
+
+ /* YJ,add,111212 */
+ /* Disable 32k */
+ val8 = rtw_read8(Adapter, REG_32K_CTRL);
+ rtw_write8(Adapter, REG_32K_CTRL, val8&(~BIT0));
+
+ /* Card disable power action flow */
+ HalPwrSeqCmdParsing(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_DISABLE_FLOW);
+
+ /* Reset MCU IO Wrapper */
+ val8 = rtw_read8(Adapter, REG_RSV_CTRL+1);
+ rtw_write8(Adapter, REG_RSV_CTRL+1, (val8&(~BIT3)));
+ val8 = rtw_read8(Adapter, REG_RSV_CTRL+1);
+ rtw_write8(Adapter, REG_RSV_CTRL+1, val8|BIT3);
+
+ /* YJ,test add, 111207. For Power Consumption. */
+ val8 = rtw_read8(Adapter, GPIO_IN);
+ rtw_write8(Adapter, GPIO_OUT, val8);
+ rtw_write8(Adapter, GPIO_IO_SEL, 0xFF);/* Reg0x46 */
+
+ val8 = rtw_read8(Adapter, REG_GPIO_IO_SEL);
+ rtw_write8(Adapter, REG_GPIO_IO_SEL, (val8<<4));
+ val8 = rtw_read8(Adapter, REG_GPIO_IO_SEL+1);
+ rtw_write8(Adapter, REG_GPIO_IO_SEL+1, val8|0x0F);/* Reg0x43 */
+ rtw_write32(Adapter, REG_BB_PAD_CTRL, 0x00080808);/* set LNA ,TRSW,EX_PA Pin to output mode */
+ haldata->bMacPwrCtrlOn = false;
+ Adapter->bFWReady = false;
+}
+static void rtl8192cu_hw_power_down(struct adapter *adapt)
+{
+ /* 2010/-8/09 MH For power down module, we need to enable register block contrl reg at 0x1c. */
+ /* Then enable power down control bit of register 0x04 BIT4 and BIT15 as 1. */
+
+ /* Enable register area 0x0-0xc. */
+ rtw_write8(adapt, REG_RSV_CTRL, 0x0);
+ rtw_write16(adapt, REG_APS_FSMCO, 0x8812);
+}
+
+static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
+{
+
+ DBG_88E("==> %s\n", __func__);
+
+ rtw_write32(Adapter, REG_HIMR_88E, IMR_DISABLED_88E);
+ rtw_write32(Adapter, REG_HIMRE_88E, IMR_DISABLED_88E);
+
+ DBG_88E("bkeepfwalive(%x)\n", Adapter->pwrctrlpriv.bkeepfwalive);
+ if (Adapter->pwrctrlpriv.bkeepfwalive) {
+ _ps_close_RF(Adapter);
+ if ((Adapter->pwrctrlpriv.bHWPwrPindetect) && (Adapter->pwrctrlpriv.bHWPowerdown))
+ rtl8192cu_hw_power_down(Adapter);
+ } else {
+ if (Adapter->hw_init_completed) {
+ CardDisableRTL8188EU(Adapter);
+
+ if ((Adapter->pwrctrlpriv.bHWPwrPindetect) && (Adapter->pwrctrlpriv.bHWPowerdown))
+ rtl8192cu_hw_power_down(Adapter);
+ }
+ }
+ return _SUCCESS;
+ }
+
+static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
+{
+ u8 i;
+ struct recv_buf *precvbuf;
+ uint status;
+ struct intf_hdl *pintfhdl = &Adapter->iopriv.intf;
+ struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+
+_func_enter_;
+
+ _read_port = pintfhdl->io_ops._read_port;
+
+ status = _SUCCESS;
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("===> usb_inirp_init\n"));
+
+ precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR;
+
+ /* issue Rx irp to receive data */
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ if (_read_port(pintfhdl, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf) == false) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("usb_rx_init: usb_read_port error\n"));
+ status = _FAIL;
+ goto exit;
+ }
+
+ precvbuf++;
+ precvpriv->free_recv_buf_queue_cnt--;
+ }
+
+exit:
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("<=== usb_inirp_init\n"));
+
+_func_exit_;
+
+ return status;
+}
+
+static unsigned int rtl8188eu_inirp_deinit(struct adapter *Adapter)
+{
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("\n ===> usb_rx_deinit\n"));
+
+ rtw_read_port_cancel(Adapter);
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("\n <=== usb_rx_deinit\n"));
+
+ return _SUCCESS;
+}
+
+/* */
+/* */
+/* EEPROM/EFUSE Content Parsing */
+/* */
+/* */
+static void _ReadLEDSetting(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
+{
+ struct led_priv *pledpriv = &(Adapter->ledpriv);
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ pledpriv->bRegUseLed = true;
+ pledpriv->LedStrategy = SW_LED_MODE1;
+ haldata->bLedOpenDrain = true;/* Support Open-drain arrangement for controlling the LED. */
+}
+
+static void Hal_EfuseParsePIDVID_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ if (!AutoLoadFail) {
+ /* VID, PID */
+ haldata->EEPROMVID = EF2BYTE(*(__le16 *)&hwinfo[EEPROM_VID_88EU]);
+ haldata->EEPROMPID = EF2BYTE(*(__le16 *)&hwinfo[EEPROM_PID_88EU]);
+
+ /* Customer ID, 0x00 and 0xff are reserved for Realtek. */
+ haldata->EEPROMCustomerID = *(u8 *)&hwinfo[EEPROM_CUSTOMERID_88E];
+ haldata->EEPROMSubCustomerID = EEPROM_Default_SubCustomerID;
+ } else {
+ haldata->EEPROMVID = EEPROM_Default_VID;
+ haldata->EEPROMPID = EEPROM_Default_PID;
+
+ /* Customer ID, 0x00 and 0xff are reserved for Realtek. */
+ haldata->EEPROMCustomerID = EEPROM_Default_CustomerID;
+ haldata->EEPROMSubCustomerID = EEPROM_Default_SubCustomerID;
+ }
+
+ DBG_88E("VID = 0x%04X, PID = 0x%04X\n", haldata->EEPROMVID, haldata->EEPROMPID);
+ DBG_88E("Customer ID: 0x%02X, SubCustomer ID: 0x%02X\n", haldata->EEPROMCustomerID, haldata->EEPROMSubCustomerID);
+}
+
+static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
+{
+ u16 i;
+ u8 sMacAddr[6] = {0x00, 0xE0, 0x4C, 0x81, 0x88, 0x02};
+ struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(adapt);
+
+ if (AutoLoadFail) {
+ for (i = 0; i < 6; i++)
+ eeprom->mac_addr[i] = sMacAddr[i];
+ } else {
+ /* Read Permanent MAC address */
+ memcpy(eeprom->mac_addr, &hwinfo[EEPROM_MAC_ADDR_88EU], ETH_ALEN);
+ }
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
+ ("Hal_EfuseParseMACAddr_8188EU: Permanent Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
+ eeprom->mac_addr[0], eeprom->mac_addr[1],
+ eeprom->mac_addr[2], eeprom->mac_addr[3],
+ eeprom->mac_addr[4], eeprom->mac_addr[5]));
+}
+
+static void Hal_CustomizeByCustomerID_8188EU(struct adapter *adapt)
+{
+}
+
+static void
+readAdapterInfo_8188EU(
+ struct adapter *adapt
+ )
+{
+ struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(adapt);
+
+ /* parse the eeprom/efuse content */
+ Hal_EfuseParseIDCode88E(adapt, eeprom->efuse_eeprom_data);
+ Hal_EfuseParsePIDVID_8188EU(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseMACAddr_8188EU(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+
+ Hal_ReadPowerSavingMode88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadTxPowerInfo88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseEEPROMVer88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ rtl8188e_EfuseParseChnlPlan(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseXtal_8188E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseCustomerID88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadAntennaDiversity88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+
+ /* */
+ /* The following part initialize some vars by PG info. */
+ /* */
+ Hal_InitChannelPlan(adapt);
+ Hal_CustomizeByCustomerID_8188EU(adapt);
+
+ _ReadLEDSetting(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+}
+
+static void _ReadPROMContent(
+ struct adapter *Adapter
+ )
+{
+ struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(Adapter);
+ u8 eeValue;
+
+ /* check system boot selection */
+ eeValue = rtw_read8(Adapter, REG_9346CR);
+ eeprom->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM) ? true : false;
+ eeprom->bautoload_fail_flag = (eeValue & EEPROM_EN) ? false : true;
+
+ DBG_88E("Boot from %s, Autoload %s !\n", (eeprom->EepromOrEfuse ? "EEPROM" : "EFUSE"),
+ (eeprom->bautoload_fail_flag ? "Fail" : "OK"));
+
+ Hal_InitPGData88E(Adapter);
+ readAdapterInfo_8188EU(Adapter);
+}
+
+static void _ReadRFType(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ haldata->rf_chip = RF_6052;
+}
+
+static int _ReadAdapterInfo8188EU(struct adapter *Adapter)
+{
+ u32 start = rtw_get_current_time();
+
+ MSG_88E("====> %s\n", __func__);
+
+ _ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
+ _ReadPROMContent(Adapter);
+
+ MSG_88E("<==== %s in %d ms\n", __func__, rtw_get_passing_time_ms(start));
+
+ return _SUCCESS;
+}
+
+static void ReadAdapterInfo8188EU(struct adapter *Adapter)
+{
+ /* Read EEPROM size before call any EEPROM function */
+ Adapter->EepromAddressSize = GetEEPROMSize8188E(Adapter);
+
+ _ReadAdapterInfo8188EU(Adapter);
+}
+
+#define GPIO_DEBUG_PORT_NUM 0
+static void rtl8192cu_trigger_gpio_0(struct adapter *adapt)
+{
+}
+
+static void ResumeTxBeacon(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
+ /* which should be read from register to a global variable. */
+
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl) | BIT6);
+ haldata->RegFwHwTxQCtrl |= BIT6;
+ rtw_write8(adapt, REG_TBTT_PROHIBIT+1, 0xff);
+ haldata->RegReg542 |= BIT0;
+ rtw_write8(adapt, REG_TBTT_PROHIBIT+2, haldata->RegReg542);
+}
+
+static void StopTxBeacon(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
+ /* which should be read from register to a global variable. */
+
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl) & (~BIT6));
+ haldata->RegFwHwTxQCtrl &= (~BIT6);
+ rtw_write8(adapt, REG_TBTT_PROHIBIT+1, 0x64);
+ haldata->RegReg542 &= ~(BIT0);
+ rtw_write8(adapt, REG_TBTT_PROHIBIT+2, haldata->RegReg542);
+
+ /* todo: CheckFwRsvdPageContent(Adapter); 2010.06.23. Added by tynli. */
+}
+
+static void hw_var_set_opmode(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ u8 val8;
+ u8 mode = *((u8 *)val);
+
+ /* disable Port0 TSF update */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)|BIT(4));
+
+ /* set net_type */
+ val8 = rtw_read8(Adapter, MSR)&0x0c;
+ val8 |= mode;
+ rtw_write8(Adapter, MSR, val8);
+
+ DBG_88E("%s()-%d mode = %d\n", __func__, __LINE__, mode);
+
+ if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) {
+ StopTxBeacon(Adapter);
+
+ rtw_write8(Adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */
+ } else if ((mode == _HW_STATE_ADHOC_)) {
+ ResumeTxBeacon(Adapter);
+ rtw_write8(Adapter, REG_BCN_CTRL, 0x1a);
+ } else if (mode == _HW_STATE_AP_) {
+ ResumeTxBeacon(Adapter);
+
+ rtw_write8(Adapter, REG_BCN_CTRL, 0x12);
+
+ /* Set RCR */
+ rtw_write32(Adapter, REG_RCR, 0x7000208e);/* CBSSID_DATA must set to 0,reject ICV_ERR packet */
+ /* enable to rx data frame */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
+ /* enable to rx ps-poll */
+ rtw_write16(Adapter, REG_RXFLTMAP1, 0x0400);
+
+ /* Beacon Control related register for first time */
+ rtw_write8(Adapter, REG_BCNDMATIM, 0x02); /* 2ms */
+
+ rtw_write8(Adapter, REG_ATIMWND, 0x0a); /* 10ms */
+ rtw_write16(Adapter, REG_BCNTCFG, 0x00);
+ rtw_write16(Adapter, REG_TBTT_PROHIBIT, 0xff04);
+ rtw_write16(Adapter, REG_TSFTR_SYN_OFFSET, 0x7fff);/* +32767 (~32ms) */
+
+ /* reset TSF */
+ rtw_write8(Adapter, REG_DUAL_TSF_RST, BIT(0));
+
+ /* BIT3 - If set 0, hw will clr bcnq when tx becon ok/fail or port 0 */
+ rtw_write8(Adapter, REG_MBID_NUM, rtw_read8(Adapter, REG_MBID_NUM) | BIT(3) | BIT(4));
+
+ /* enable BCN0 Function for if1 */
+ /* don't enable update TSF0 for if1 (due to TSF update when beacon/probe rsp are received) */
+ rtw_write8(Adapter, REG_BCN_CTRL, (DIS_TSF_UDT0_NORMAL_CHIP|EN_BCN_FUNCTION | BIT(1)));
+
+ /* dis BCN1 ATIM WND if if2 is station */
+ rtw_write8(Adapter, REG_BCN_CTRL_1, rtw_read8(Adapter, REG_BCN_CTRL_1) | BIT(0));
+ }
+}
+
+static void hw_var_set_macaddr(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ u8 idx = 0;
+ u32 reg_macid;
+
+ reg_macid = REG_MACID;
+
+ for (idx = 0; idx < 6; idx++)
+ rtw_write8(Adapter, (reg_macid+idx), val[idx]);
+}
+
+static void hw_var_set_bssid(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ u8 idx = 0;
+ u32 reg_bssid;
+
+ reg_bssid = REG_BSSID;
+
+ for (idx = 0; idx < 6; idx++)
+ rtw_write8(Adapter, (reg_bssid+idx), val[idx]);
+}
+
+static void hw_var_set_bcn_func(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ u32 bcn_ctrl_reg;
+
+ bcn_ctrl_reg = REG_BCN_CTRL;
+
+ if (*((u8 *)val))
+ rtw_write8(Adapter, bcn_ctrl_reg, (EN_BCN_FUNCTION | EN_TXBCN_RPT));
+ else
+ rtw_write8(Adapter, bcn_ctrl_reg, rtw_read8(Adapter, bcn_ctrl_reg)&(~(EN_BCN_FUNCTION | EN_TXBCN_RPT)));
+}
+
+static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &haldata->dmpriv;
+ struct odm_dm_struct *podmpriv = &haldata->odmpriv;
+_func_enter_;
+
+ switch (variable) {
+ case HW_VAR_MEDIA_STATUS:
+ {
+ u8 val8;
+
+ val8 = rtw_read8(Adapter, MSR)&0x0c;
+ val8 |= *((u8 *)val);
+ rtw_write8(Adapter, MSR, val8);
+ }
+ break;
+ case HW_VAR_MEDIA_STATUS1:
+ {
+ u8 val8;
+
+ val8 = rtw_read8(Adapter, MSR) & 0x03;
+ val8 |= *((u8 *)val) << 2;
+ rtw_write8(Adapter, MSR, val8);
+ }
+ break;
+ case HW_VAR_SET_OPMODE:
+ hw_var_set_opmode(Adapter, variable, val);
+ break;
+ case HW_VAR_MAC_ADDR:
+ hw_var_set_macaddr(Adapter, variable, val);
+ break;
+ case HW_VAR_BSSID:
+ hw_var_set_bssid(Adapter, variable, val);
+ break;
+ case HW_VAR_BASIC_RATE:
+ {
+ u16 BrateCfg = 0;
+ u8 RateIndex = 0;
+
+ /* 2007.01.16, by Emily */
+ /* Select RRSR (in Legacy-OFDM and CCK) */
+ /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M, and 1M from the Basic rate. */
+ /* We do not use other rates. */
+ HalSetBrateCfg(Adapter, val, &BrateCfg);
+ DBG_88E("HW_VAR_BASIC_RATE: BrateCfg(%#x)\n", BrateCfg);
+
+ /* 2011.03.30 add by Luke Lee */
+ /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */
+ /* because CCK 2M has poor TXEVM */
+ /* CCK 5.5M & 11M ACK should be enabled for better performance */
+
+ BrateCfg = (BrateCfg | 0xd) & 0x15d;
+ haldata->BasicRateSet = BrateCfg;
+
+ BrateCfg |= 0x01; /* default enable 1M ACK rate */
+ /* Set RRSR rate table. */
+ rtw_write8(Adapter, REG_RRSR, BrateCfg & 0xff);
+ rtw_write8(Adapter, REG_RRSR+1, (BrateCfg >> 8) & 0xff);
+ rtw_write8(Adapter, REG_RRSR+2, rtw_read8(Adapter, REG_RRSR+2)&0xf0);
+
+ /* Set RTS initial rate */
+ while (BrateCfg > 0x1) {
+ BrateCfg = (BrateCfg >> 1);
+ RateIndex++;
+ }
+ /* Ziv - Check */
+ rtw_write8(Adapter, REG_INIRTS_RATE_SEL, RateIndex);
+ }
+ break;
+ case HW_VAR_TXPAUSE:
+ rtw_write8(Adapter, REG_TXPAUSE, *((u8 *)val));
+ break;
+ case HW_VAR_BCN_FUNC:
+ hw_var_set_bcn_func(Adapter, variable, val);
+ break;
+ case HW_VAR_CORRECT_TSF:
+ {
+ u64 tsf;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ tsf = pmlmeext->TSFValue - rtw_modular64(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024)) - 1024; /* us */
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE))
+ StopTxBeacon(Adapter);
+
+ /* disable related TSF function */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)&(~BIT(3)));
+
+ rtw_write32(Adapter, REG_TSFTR, tsf);
+ rtw_write32(Adapter, REG_TSFTR+4, tsf>>32);
+
+ /* enable related TSF function */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)|BIT(3));
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE))
+ ResumeTxBeacon(Adapter);
+ }
+ break;
+ case HW_VAR_CHECK_BSSID:
+ if (*((u8 *)val)) {
+ rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR)|RCR_CBSSID_DATA|RCR_CBSSID_BCN);
+ } else {
+ u32 val32;
+
+ val32 = rtw_read32(Adapter, REG_RCR);
+
+ val32 &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+
+ rtw_write32(Adapter, REG_RCR, val32);
+ }
+ break;
+ case HW_VAR_MLME_DISCONNECT:
+ /* Set RCR to not to receive data frame when NO LINK state */
+ /* reject all data frames */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
+
+ /* reset TSF */
+ rtw_write8(Adapter, REG_DUAL_TSF_RST, (BIT(0)|BIT(1)));
+
+ /* disable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)|BIT(4));
+ break;
+ case HW_VAR_MLME_SITESURVEY:
+ if (*((u8 *)val)) { /* under sitesurvey */
+ /* config RCR to receive different BSSID & not to receive data frame */
+ u32 v = rtw_read32(Adapter, REG_RCR);
+ v &= ~(RCR_CBSSID_BCN);
+ rtw_write32(Adapter, REG_RCR, v);
+ /* reject all data frame */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
+
+ /* disable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)|BIT(4));
+ } else { /* sitesurvey done */
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((is_client_associated_to_ap(Adapter)) ||
+ ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) {
+ /* enable to rx data frame */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
+
+ /* enable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
+ } else if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
+ /* enable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
+ }
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR)|RCR_CBSSID_BCN);
+ } else {
+ if (Adapter->in_cta_test) {
+ u32 v = rtw_read32(Adapter, REG_RCR);
+ v &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
+ rtw_write32(Adapter, REG_RCR, v);
+ } else {
+ rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR)|RCR_CBSSID_BCN);
+ }
+ }
+ }
+ break;
+ case HW_VAR_MLME_JOIN:
+ {
+ u8 RetryLimit = 0x30;
+ u8 type = *((u8 *)val);
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+
+ if (type == 0) { /* prepare to join */
+ /* enable to rx data frame.Accept all data frame */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
+
+ if (Adapter->in_cta_test) {
+ u32 v = rtw_read32(Adapter, REG_RCR);
+ v &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
+ rtw_write32(Adapter, REG_RCR, v);
+ } else {
+ rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR)|RCR_CBSSID_DATA|RCR_CBSSID_BCN);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ RetryLimit = (haldata->CustomerID == RT_CID_CCX) ? 7 : 48;
+ else /* Ad-hoc Mode */
+ RetryLimit = 0x7;
+ } else if (type == 1) {
+ /* joinbss_event call back when join res < 0 */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
+ } else if (type == 2) {
+ /* sta add event call back */
+ /* enable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE))
+ RetryLimit = 0x7;
+ }
+ rtw_write16(Adapter, REG_RL, RetryLimit << RETRY_LIMIT_SHORT_SHIFT | RetryLimit << RETRY_LIMIT_LONG_SHIFT);
+ }
+ break;
+ case HW_VAR_BEACON_INTERVAL:
+ rtw_write16(Adapter, REG_BCN_INTERVAL, *((u16 *)val));
+ break;
+ case HW_VAR_SLOT_TIME:
+ {
+ u8 u1bAIFS, aSifsTime;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ rtw_write8(Adapter, REG_SLOT, val[0]);
+
+ if (pmlmeinfo->WMM_enable == 0) {
+ if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
+ aSifsTime = 10;
+ else
+ aSifsTime = 16;
+
+ u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime);
+
+ /* <Roger_EXP> Temporary removed, 2008.06.20. */
+ rtw_write8(Adapter, REG_EDCA_VO_PARAM, u1bAIFS);
+ rtw_write8(Adapter, REG_EDCA_VI_PARAM, u1bAIFS);
+ rtw_write8(Adapter, REG_EDCA_BE_PARAM, u1bAIFS);
+ rtw_write8(Adapter, REG_EDCA_BK_PARAM, u1bAIFS);
+ }
+ }
+ break;
+ case HW_VAR_RESP_SIFS:
+ /* RESP_SIFS for CCK */
+ rtw_write8(Adapter, REG_R2T_SIFS, val[0]); /* SIFS_T2T_CCK (0x08) */
+ rtw_write8(Adapter, REG_R2T_SIFS+1, val[1]); /* SIFS_R2T_CCK(0x08) */
+ /* RESP_SIFS for OFDM */
+ rtw_write8(Adapter, REG_T2T_SIFS, val[2]); /* SIFS_T2T_OFDM (0x0a) */
+ rtw_write8(Adapter, REG_T2T_SIFS+1, val[3]); /* SIFS_R2T_OFDM(0x0a) */
+ break;
+ case HW_VAR_ACK_PREAMBLE:
+ {
+ u8 regTmp;
+ u8 bShortPreamble = *((bool *)val);
+ /* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
+ regTmp = (haldata->nCur40MhzPrimeSC)<<5;
+ if (bShortPreamble)
+ regTmp |= 0x80;
+
+ rtw_write8(Adapter, REG_RRSR+2, regTmp);
+ }
+ break;
+ case HW_VAR_SEC_CFG:
+ rtw_write8(Adapter, REG_SECCFG, *((u8 *)val));
+ break;
+ case HW_VAR_DM_FLAG:
+ podmpriv->SupportAbility = *((u8 *)val);
+ break;
+ case HW_VAR_DM_FUNC_OP:
+ if (val[0])
+ podmpriv->BK_SupportAbility = podmpriv->SupportAbility;
+ else
+ podmpriv->SupportAbility = podmpriv->BK_SupportAbility;
+ break;
+ case HW_VAR_DM_FUNC_SET:
+ if (*((u32 *)val) == DYNAMIC_ALL_FUNC_ENABLE) {
+ pdmpriv->DMFlag = pdmpriv->InitDMFlag;
+ podmpriv->SupportAbility = pdmpriv->InitODMFlag;
+ } else {
+ podmpriv->SupportAbility |= *((u32 *)val);
+ }
+ break;
+ case HW_VAR_DM_FUNC_CLR:
+ podmpriv->SupportAbility &= *((u32 *)val);
+ break;
+ case HW_VAR_CAM_EMPTY_ENTRY:
+ {
+ u8 ucIndex = *((u8 *)val);
+ u8 i;
+ u32 ulCommand = 0;
+ u32 ulContent = 0;
+ u32 ulEncAlgo = CAM_AES;
+
+ for (i = 0; i < CAM_CONTENT_COUNT; i++) {
+ /* filled id in CAM config 2 byte */
+ if (i == 0)
+ ulContent |= (ucIndex & 0x03) | ((u16)(ulEncAlgo)<<2);
+ else
+ ulContent = 0;
+ /* polling bit, and No Write enable, and address */
+ ulCommand = CAM_CONTENT_COUNT*ucIndex+i;
+ ulCommand = ulCommand | CAM_POLLINIG|CAM_WRITE;
+ /* write content 0 is equall to mark invalid */
+ rtw_write32(Adapter, WCAMI, ulContent); /* delay_ms(40); */
+ rtw_write32(Adapter, RWCAM, ulCommand); /* delay_ms(40); */
+ }
+ }
+ break;
+ case HW_VAR_CAM_INVALID_ALL:
+ rtw_write32(Adapter, RWCAM, BIT(31)|BIT(30));
+ break;
+ case HW_VAR_CAM_WRITE:
+ {
+ u32 cmd;
+ u32 *cam_val = (u32 *)val;
+ rtw_write32(Adapter, WCAMI, cam_val[0]);
+
+ cmd = CAM_POLLINIG | CAM_WRITE | cam_val[1];
+ rtw_write32(Adapter, RWCAM, cmd);
+ }
+ break;
+ case HW_VAR_AC_PARAM_VO:
+ rtw_write32(Adapter, REG_EDCA_VO_PARAM, ((u32 *)(val))[0]);
+ break;
+ case HW_VAR_AC_PARAM_VI:
+ rtw_write32(Adapter, REG_EDCA_VI_PARAM, ((u32 *)(val))[0]);
+ break;
+ case HW_VAR_AC_PARAM_BE:
+ haldata->AcParam_BE = ((u32 *)(val))[0];
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, ((u32 *)(val))[0]);
+ break;
+ case HW_VAR_AC_PARAM_BK:
+ rtw_write32(Adapter, REG_EDCA_BK_PARAM, ((u32 *)(val))[0]);
+ break;
+ case HW_VAR_ACM_CTRL:
+ {
+ u8 acm_ctrl = *((u8 *)val);
+ u8 AcmCtrl = rtw_read8(Adapter, REG_ACMHWCTRL);
+
+ if (acm_ctrl > 1)
+ AcmCtrl = AcmCtrl | 0x1;
+
+ if (acm_ctrl & BIT(3))
+ AcmCtrl |= AcmHw_VoqEn;
+ else
+ AcmCtrl &= (~AcmHw_VoqEn);
+
+ if (acm_ctrl & BIT(2))
+ AcmCtrl |= AcmHw_ViqEn;
+ else
+ AcmCtrl &= (~AcmHw_ViqEn);
+
+ if (acm_ctrl & BIT(1))
+ AcmCtrl |= AcmHw_BeqEn;
+ else
+ AcmCtrl &= (~AcmHw_BeqEn);
+
+ DBG_88E("[HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl);
+ rtw_write8(Adapter, REG_ACMHWCTRL, AcmCtrl);
+ }
+ break;
+ case HW_VAR_AMPDU_MIN_SPACE:
+ {
+ u8 MinSpacingToSet;
+ u8 SecMinSpace;
+
+ MinSpacingToSet = *((u8 *)val);
+ if (MinSpacingToSet <= 7) {
+ switch (Adapter->securitypriv.dot11PrivacyAlgrthm) {
+ case _NO_PRIVACY_:
+ case _AES_:
+ SecMinSpace = 0;
+ break;
+ case _WEP40_:
+ case _WEP104_:
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ SecMinSpace = 6;
+ break;
+ default:
+ SecMinSpace = 7;
+ break;
+ }
+ if (MinSpacingToSet < SecMinSpace)
+ MinSpacingToSet = SecMinSpace;
+ rtw_write8(Adapter, REG_AMPDU_MIN_SPACE, (rtw_read8(Adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | MinSpacingToSet);
+ }
+ }
+ break;
+ case HW_VAR_AMPDU_FACTOR:
+ {
+ u8 RegToSet_Normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+ u8 FactorToSet;
+ u8 *pRegToSet;
+ u8 index = 0;
+
+ pRegToSet = RegToSet_Normal; /* 0xb972a841; */
+ FactorToSet = *((u8 *)val);
+ if (FactorToSet <= 3) {
+ FactorToSet = (1<<(FactorToSet + 2));
+ if (FactorToSet > 0xf)
+ FactorToSet = 0xf;
+
+ for (index = 0; index < 4; index++) {
+ if ((pRegToSet[index] & 0xf0) > (FactorToSet<<4))
+ pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet<<4);
+
+ if ((pRegToSet[index] & 0x0f) > FactorToSet)
+ pRegToSet[index] = (pRegToSet[index] & 0xf0) | (FactorToSet);
+
+ rtw_write8(Adapter, (REG_AGGLEN_LMT+index), pRegToSet[index]);
+ }
+ }
+ }
+ break;
+ case HW_VAR_RXDMA_AGG_PG_TH:
+ {
+ u8 threshold = *((u8 *)val);
+ if (threshold == 0)
+ threshold = haldata->UsbRxAggPageCount;
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, threshold);
+ }
+ break;
+ case HW_VAR_SET_RPWM:
+ break;
+ case HW_VAR_H2C_FW_PWRMODE:
+ {
+ u8 psmode = (*(u8 *)val);
+
+ /* Forece leave RF low power mode for 1T1R to prevent conficting setting in Fw power */
+ /* saving sequence. 2010.06.07. Added by tynli. Suggested by SD3 yschang. */
+ if ((psmode != PS_MODE_ACTIVE) && (!IS_92C_SERIAL(haldata->VersionID)))
+ ODM_RF_Saving(podmpriv, true);
+ rtl8188e_set_FwPwrMode_cmd(Adapter, psmode);
+ }
+ break;
+ case HW_VAR_H2C_FW_JOINBSSRPT:
+ {
+ u8 mstatus = (*(u8 *)val);
+ rtl8188e_set_FwJoinBssReport_cmd(Adapter, mstatus);
+ }
+ break;
+#ifdef CONFIG_88EU_P2P
+ case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
+ {
+ u8 p2p_ps_state = (*(u8 *)val);
+ rtl8188e_set_p2p_ps_offload_cmd(Adapter, p2p_ps_state);
+ }
+ break;
+#endif
+ case HW_VAR_INITIAL_GAIN:
+ {
+ struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
+ u32 rx_gain = ((u32 *)(val))[0];
+
+ if (rx_gain == 0xff) {/* restore rx gain */
+ ODM_Write_DIG(podmpriv, pDigTable->BackupIGValue);
+ } else {
+ pDigTable->BackupIGValue = pDigTable->CurIGValue;
+ ODM_Write_DIG(podmpriv, rx_gain);
+ }
+ }
+ break;
+ case HW_VAR_TRIGGER_GPIO_0:
+ rtl8192cu_trigger_gpio_0(Adapter);
+ break;
+ case HW_VAR_RPT_TIMER_SETTING:
+ {
+ u16 min_rpt_time = (*(u16 *)val);
+ ODM_RA_Set_TxRPT_Time(podmpriv, min_rpt_time);
+ }
+ break;
+ case HW_VAR_ANTENNA_DIVERSITY_SELECT:
+ {
+ u8 Optimum_antenna = (*(u8 *)val);
+ u8 Ant;
+ /* switch antenna to Optimum_antenna */
+ if (haldata->CurAntenna != Optimum_antenna) {
+ Ant = (Optimum_antenna == 2) ? MAIN_ANT : AUX_ANT;
+ ODM_UpdateRxIdleAnt_88E(&haldata->odmpriv, Ant);
+
+ haldata->CurAntenna = Optimum_antenna;
+ }
+ }
+ break;
+ case HW_VAR_EFUSE_BYTES: /* To set EFUE total used bytes, added by Roger, 2008.12.22. */
+ haldata->EfuseUsedBytes = *((u16 *)val);
+ break;
+ case HW_VAR_FIFO_CLEARN_UP:
+ {
+ struct pwrctrl_priv *pwrpriv = &Adapter->pwrctrlpriv;
+ u8 trycnt = 100;
+
+ /* pause tx */
+ rtw_write8(Adapter, REG_TXPAUSE, 0xff);
+
+ /* keep sn */
+ Adapter->xmitpriv.nqos_ssn = rtw_read16(Adapter, REG_NQOS_SEQ);
+
+ if (!pwrpriv->bkeepfwalive) {
+ /* RX DMA stop */
+ rtw_write32(Adapter, REG_RXPKT_NUM, (rtw_read32(Adapter, REG_RXPKT_NUM)|RW_RELEASE_EN));
+ do {
+ if (!(rtw_read32(Adapter, REG_RXPKT_NUM)&RXDMA_IDLE))
+ break;
+ } while (trycnt--);
+ if (trycnt == 0)
+ DBG_88E("Stop RX DMA failed......\n");
+
+ /* RQPN Load 0 */
+ rtw_write16(Adapter, REG_RQPN_NPQ, 0x0);
+ rtw_write32(Adapter, REG_RQPN, 0x80000000);
+ rtw_mdelay_os(10);
+ }
+ }
+ break;
+ case HW_VAR_CHECK_TXBUF:
+ break;
+ case HW_VAR_APFM_ON_MAC:
+ haldata->bMacPwrCtrlOn = *val;
+ DBG_88E("%s: bMacPwrCtrlOn=%d\n", __func__, haldata->bMacPwrCtrlOn);
+ break;
+ case HW_VAR_TX_RPT_MAX_MACID:
+ {
+ u8 maxMacid = *val;
+ DBG_88E("### MacID(%d),Set Max Tx RPT MID(%d)\n", maxMacid, maxMacid+1);
+ rtw_write8(Adapter, REG_TX_RPT_CTRL+1, maxMacid+1);
+ }
+ break;
+ case HW_VAR_H2C_MEDIA_STATUS_RPT:
+ rtl8188e_set_FwMediaStatus_cmd(Adapter , (*(__le16 *)val));
+ break;
+ case HW_VAR_BCN_VALID:
+ /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */
+ rtw_write8(Adapter, REG_TDECTRL+2, rtw_read8(Adapter, REG_TDECTRL+2) | BIT0);
+ break;
+ default:
+ break;
+ }
+_func_exit_;
+}
+
+static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct odm_dm_struct *podmpriv = &haldata->odmpriv;
+_func_enter_;
+
+ switch (variable) {
+ case HW_VAR_BASIC_RATE:
+ *((u16 *)(val)) = haldata->BasicRateSet;
+ case HW_VAR_TXPAUSE:
+ val[0] = rtw_read8(Adapter, REG_TXPAUSE);
+ break;
+ case HW_VAR_BCN_VALID:
+ /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2 */
+ val[0] = (BIT0 & rtw_read8(Adapter, REG_TDECTRL+2)) ? true : false;
+ break;
+ case HW_VAR_DM_FLAG:
+ val[0] = podmpriv->SupportAbility;
+ break;
+ case HW_VAR_RF_TYPE:
+ val[0] = haldata->rf_type;
+ break;
+ case HW_VAR_FWLPS_RF_ON:
+ {
+ /* When we halt NIC, we should check if FW LPS is leave. */
+ if (Adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
+ /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
+ /* because Fw is unload. */
+ val[0] = true;
+ } else {
+ u32 valRCR;
+ valRCR = rtw_read32(Adapter, REG_RCR);
+ valRCR &= 0x00070000;
+ if (valRCR)
+ val[0] = false;
+ else
+ val[0] = true;
+ }
+ }
+ break;
+ case HW_VAR_CURRENT_ANTENNA:
+ val[0] = haldata->CurAntenna;
+ break;
+ case HW_VAR_EFUSE_BYTES: /* To get EFUE total used bytes, added by Roger, 2008.12.22. */
+ *((u16 *)(val)) = haldata->EfuseUsedBytes;
+ break;
+ case HW_VAR_APFM_ON_MAC:
+ *val = haldata->bMacPwrCtrlOn;
+ break;
+ case HW_VAR_CHK_HI_QUEUE_EMPTY:
+ *val = ((rtw_read32(Adapter, REG_HGQ_INFORMATION)&0x0000ff00) == 0) ? true : false;
+ break;
+ default:
+ break;
+ }
+
+_func_exit_;
+}
+
+/* */
+/* Description: */
+/* Query setting of specified variable. */
+/* */
+static u8
+GetHalDefVar8188EUsb(
+ struct adapter *Adapter,
+ enum hal_def_variable eVariable,
+ void *pValue
+ )
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ u8 bResult = _SUCCESS;
+
+ switch (eVariable) {
+ case HAL_DEF_UNDERCORATEDSMOOTHEDPWDB:
+ {
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ struct sta_priv *pstapriv = &Adapter->stapriv;
+ struct sta_info *psta;
+ psta = rtw_get_stainfo(pstapriv, pmlmepriv->cur_network.network.MacAddress);
+ if (psta)
+ *((int *)pValue) = psta->rssi_stat.UndecoratedSmoothedPWDB;
+ }
+ break;
+ case HAL_DEF_IS_SUPPORT_ANT_DIV:
+ *((u8 *)pValue) = (haldata->AntDivCfg == 0) ? false : true;
+ break;
+ case HAL_DEF_CURRENT_ANTENNA:
+ *((u8 *)pValue) = haldata->CurAntenna;
+ break;
+ case HAL_DEF_DRVINFO_SZ:
+ *((u32 *)pValue) = DRVINFO_SZ;
+ break;
+ case HAL_DEF_MAX_RECVBUF_SZ:
+ *((u32 *)pValue) = MAX_RECVBUF_SZ;
+ break;
+ case HAL_DEF_RX_PACKET_OFFSET:
+ *((u32 *)pValue) = RXDESC_SIZE + DRVINFO_SZ;
+ break;
+ case HAL_DEF_DBG_DM_FUNC:
+ *((u32 *)pValue) = haldata->odmpriv.SupportAbility;
+ break;
+ case HAL_DEF_RA_DECISION_RATE:
+ {
+ u8 MacID = *((u8 *)pValue);
+ *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&(haldata->odmpriv), MacID);
+ }
+ break;
+ case HAL_DEF_RA_SGI:
+ {
+ u8 MacID = *((u8 *)pValue);
+ *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&(haldata->odmpriv), MacID);
+ }
+ break;
+ case HAL_DEF_PT_PWR_STATUS:
+ {
+ u8 MacID = *((u8 *)pValue);
+ *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&(haldata->odmpriv), MacID);
+ }
+ break;
+ case HW_VAR_MAX_RX_AMPDU_FACTOR:
+ *((u32 *)pValue) = MAX_AMPDU_FACTOR_64K;
+ break;
+ case HW_DEF_RA_INFO_DUMP:
+ {
+ u8 entry_id = *((u8 *)pValue);
+ if (check_fwstate(&Adapter->mlmepriv, _FW_LINKED)) {
+ DBG_88E("============ RA status check ===================\n");
+ DBG_88E("Mac_id:%d , RateID = %d, RAUseRate = 0x%08x, RateSGI = %d, DecisionRate = 0x%02x ,PTStage = %d\n",
+ entry_id,
+ haldata->odmpriv.RAInfo[entry_id].RateID,
+ haldata->odmpriv.RAInfo[entry_id].RAUseRate,
+ haldata->odmpriv.RAInfo[entry_id].RateSGI,
+ haldata->odmpriv.RAInfo[entry_id].DecisionRate,
+ haldata->odmpriv.RAInfo[entry_id].PTStage);
+ }
+ }
+ break;
+ case HW_DEF_ODM_DBG_FLAG:
+ {
+ struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
+ pr_info("dm_ocm->DebugComponents = 0x%llx\n", dm_ocm->DebugComponents);
+ }
+ break;
+ case HAL_DEF_DBG_DUMP_RXPKT:
+ *((u8 *)pValue) = haldata->bDumpRxPkt;
+ break;
+ case HAL_DEF_DBG_DUMP_TXPKT:
+ *((u8 *)pValue) = haldata->bDumpTxPkt;
+ break;
+ default:
+ bResult = _FAIL;
+ break;
+ }
+
+ return bResult;
+}
+
+/* */
+/* Description: */
+/* Change default setting of specified variable. */
+/* */
+static u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ u8 bResult = _SUCCESS;
+
+ switch (eVariable) {
+ case HAL_DEF_DBG_DM_FUNC:
+ {
+ u8 dm_func = *((u8 *)pValue);
+ struct odm_dm_struct *podmpriv = &haldata->odmpriv;
+
+ if (dm_func == 0) { /* disable all dynamic func */
+ podmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
+ DBG_88E("==> Disable all dynamic function...\n");
+ } else if (dm_func == 1) {/* disable DIG */
+ podmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
+ DBG_88E("==> Disable DIG...\n");
+ } else if (dm_func == 2) {/* disable High power */
+ podmpriv->SupportAbility &= (~DYNAMIC_BB_DYNAMIC_TXPWR);
+ } else if (dm_func == 3) {/* disable tx power tracking */
+ podmpriv->SupportAbility &= (~DYNAMIC_RF_CALIBRATION);
+ DBG_88E("==> Disable tx power tracking...\n");
+ } else if (dm_func == 5) {/* disable antenna diversity */
+ podmpriv->SupportAbility &= (~DYNAMIC_BB_ANT_DIV);
+ } else if (dm_func == 6) {/* turn on all dynamic func */
+ if (!(podmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
+ struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
+ pDigTable->CurIGValue = rtw_read8(Adapter, 0xc50);
+ }
+ podmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
+ DBG_88E("==> Turn on all dynamic function...\n");
+ }
+ }
+ break;
+ case HAL_DEF_DBG_DUMP_RXPKT:
+ haldata->bDumpRxPkt = *((u8 *)pValue);
+ break;
+ case HAL_DEF_DBG_DUMP_TXPKT:
+ haldata->bDumpTxPkt = *((u8 *)pValue);
+ break;
+ case HW_DEF_FA_CNT_DUMP:
+ {
+ u8 bRSSIDump = *((u8 *)pValue);
+ struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
+ if (bRSSIDump)
+ dm_ocm->DebugComponents = ODM_COMP_DIG|ODM_COMP_FA_CNT ;
+ else
+ dm_ocm->DebugComponents = 0;
+ }
+ break;
+ case HW_DEF_ODM_DBG_FLAG:
+ {
+ u64 DebugComponents = *((u64 *)pValue);
+ struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
+ dm_ocm->DebugComponents = DebugComponents;
+ }
+ break;
+ default:
+ bResult = _FAIL;
+ break;
+ }
+
+ return bResult;
+}
+
+static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
+{
+ u8 init_rate = 0;
+ u8 networkType, raid;
+ u32 mask, rate_bitmap;
+ u8 shortGIrate = false;
+ int supportRateNum = 0;
+ struct sta_info *psta;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
+ if (mac_id >= NUM_STA) /* CAM_SIZE */
+ return;
+ psta = pmlmeinfo->FW_sta_info[mac_id].psta;
+ if (psta == NULL)
+ return;
+ switch (mac_id) {
+ case 0:/* for infra mode */
+ supportRateNum = rtw_get_rateset_len(cur_network->SupportedRates);
+ networkType = judge_network_type(adapt, cur_network->SupportedRates, supportRateNum) & 0xf;
+ raid = networktype_to_raid(networkType);
+ mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
+ mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&(pmlmeinfo->HT_caps)) : 0;
+ if (support_short_GI(adapt, &(pmlmeinfo->HT_caps)))
+ shortGIrate = true;
+ break;
+ case 1:/* for broadcast/multicast */
+ supportRateNum = rtw_get_rateset_len(pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ networkType = WIRELESS_11B;
+ else
+ networkType = WIRELESS_11G;
+ raid = networktype_to_raid(networkType);
+ mask = update_basic_rate(cur_network->SupportedRates, supportRateNum);
+ break;
+ default: /* for each sta in IBSS */
+ supportRateNum = rtw_get_rateset_len(pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ networkType = judge_network_type(adapt, pmlmeinfo->FW_sta_info[mac_id].SupportedRates, supportRateNum) & 0xf;
+ raid = networktype_to_raid(networkType);
+ mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
+
+ /* todo: support HT in IBSS */
+ break;
+ }
+
+ rate_bitmap = 0x0fffffff;
+ rate_bitmap = ODM_Get_Rate_Bitmap(&haldata->odmpriv, mac_id, mask, rssi_level);
+ DBG_88E("%s => mac_id:%d, networkType:0x%02x, mask:0x%08x\n\t ==> rssi_level:%d, rate_bitmap:0x%08x\n",
+ __func__, mac_id, networkType, mask, rssi_level, rate_bitmap);
+
+ mask &= rate_bitmap;
+
+ init_rate = get_highest_rate_idx(mask)&0x3f;
+
+ if (haldata->fw_ractrl) {
+ u8 arg;
+
+ arg = mac_id & 0x1f;/* MACID */
+ arg |= BIT(7);
+ if (shortGIrate)
+ arg |= BIT(5);
+ mask |= ((raid << 28) & 0xf0000000);
+ DBG_88E("update raid entry, mask=0x%x, arg=0x%x\n", mask, arg);
+ psta->ra_mask = mask;
+ mask |= ((raid << 28) & 0xf0000000);
+
+ /* to do ,for 8188E-SMIC */
+ rtl8188e_set_raid_cmd(adapt, mask);
+ } else {
+ ODM_RA_UpdateRateInfo_8188E(&(haldata->odmpriv),
+ mac_id,
+ raid,
+ mask,
+ shortGIrate
+ );
+ }
+ /* set ra_id */
+ psta->raid = raid;
+ psta->init_rate = init_rate;
+}
+
+static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
+{
+ u32 value32;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u32 bcn_ctrl_reg = REG_BCN_CTRL;
+ /* reset TSF, enable update TSF, correcting TSF On Beacon */
+
+ /* BCN interval */
+ rtw_write16(adapt, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval);
+ rtw_write8(adapt, REG_ATIMWND, 0x02);/* 2ms */
+
+ _InitBeaconParameters(adapt);
+
+ rtw_write8(adapt, REG_SLOT, 0x09);
+
+ value32 = rtw_read32(adapt, REG_TCR);
+ value32 &= ~TSFRST;
+ rtw_write32(adapt, REG_TCR, value32);
+
+ value32 |= TSFRST;
+ rtw_write32(adapt, REG_TCR, value32);
+
+ /* NOTE: Fix test chip's bug (about contention windows's randomness) */
+ rtw_write8(adapt, REG_RXTSF_OFFSET_CCK, 0x50);
+ rtw_write8(adapt, REG_RXTSF_OFFSET_OFDM, 0x50);
+
+ _BeaconFunctionEnable(adapt, true, true);
+
+ ResumeTxBeacon(adapt);
+
+ rtw_write8(adapt, bcn_ctrl_reg, rtw_read8(adapt, bcn_ctrl_reg)|BIT(1));
+}
+
+static void rtl8188eu_init_default_value(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata;
+ struct pwrctrl_priv *pwrctrlpriv;
+ u8 i;
+
+ haldata = GET_HAL_DATA(adapt);
+ pwrctrlpriv = &adapt->pwrctrlpriv;
+
+ /* init default value */
+ haldata->fw_ractrl = false;
+ if (!pwrctrlpriv->bkeepfwalive)
+ haldata->LastHMEBoxNum = 0;
+
+ /* init dm default value */
+ haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = false;
+ haldata->odmpriv.RFCalibrateInfo.TM_Trigger = 0;/* for IQK */
+ haldata->pwrGroupCnt = 0;
+ haldata->PGMaxGroup = 13;
+ haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP_index = 0;
+ for (i = 0; i < HP_THERMAL_NUM; i++)
+ haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP[i] = 0;
+}
+
+static u8 rtl8188eu_ps_func(struct adapter *Adapter, enum hal_intf_ps_func efunc_id, u8 *val)
+{
+ u8 bResult = true;
+ return bResult;
+}
+
+void rtl8188eu_set_hal_ops(struct adapter *adapt)
+{
+ struct hal_ops *halfunc = &adapt->HalFunc;
+
+_func_enter_;
+
+ adapt->HalData = rtw_zmalloc(sizeof(struct hal_data_8188e));
+ if (adapt->HalData == NULL)
+ DBG_88E("cant not alloc memory for HAL DATA\n");
+ adapt->hal_data_sz = sizeof(struct hal_data_8188e);
+
+ halfunc->hal_power_on = rtl8188eu_InitPowerOn;
+ halfunc->hal_init = &rtl8188eu_hal_init;
+ halfunc->hal_deinit = &rtl8188eu_hal_deinit;
+
+ halfunc->inirp_init = &rtl8188eu_inirp_init;
+ halfunc->inirp_deinit = &rtl8188eu_inirp_deinit;
+
+ halfunc->init_xmit_priv = &rtl8188eu_init_xmit_priv;
+ halfunc->free_xmit_priv = &rtl8188eu_free_xmit_priv;
+
+ halfunc->init_recv_priv = &rtl8188eu_init_recv_priv;
+ halfunc->free_recv_priv = &rtl8188eu_free_recv_priv;
+ halfunc->InitSwLeds = &rtl8188eu_InitSwLeds;
+ halfunc->DeInitSwLeds = &rtl8188eu_DeInitSwLeds;
+
+ halfunc->init_default_value = &rtl8188eu_init_default_value;
+ halfunc->intf_chip_configure = &rtl8188eu_interface_configure;
+ halfunc->read_adapter_info = &ReadAdapterInfo8188EU;
+
+ halfunc->SetHwRegHandler = &SetHwReg8188EU;
+ halfunc->GetHwRegHandler = &GetHwReg8188EU;
+ halfunc->GetHalDefVarHandler = &GetHalDefVar8188EUsb;
+ halfunc->SetHalDefVarHandler = &SetHalDefVar8188EUsb;
+
+ halfunc->UpdateRAMaskHandler = &UpdateHalRAMask8188EUsb;
+ halfunc->SetBeaconRelatedRegistersHandler = &SetBeaconRelatedRegisters8188EUsb;
+
+ halfunc->hal_xmit = &rtl8188eu_hal_xmit;
+ halfunc->mgnt_xmit = &rtl8188eu_mgnt_xmit;
+
+ halfunc->interface_ps_func = &rtl8188eu_ps_func;
+
+ rtl8188e_set_hal_ops(halfunc);
+_func_exit_;
+}
diff --git a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
new file mode 100644
index 0000000..bc56416
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
@@ -0,0 +1,726 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _HCI_OPS_OS_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <osdep_intf.h>
+#include <usb_ops.h>
+#include <recv_osdep.h>
+#include <rtl8188e_hal.h>
+
+static int usbctrl_vendorreq(struct intf_hdl *pintfhdl, u8 request, u16 value, u16 index, void *pdata, u16 len, u8 requesttype)
+{
+ struct adapter *adapt = pintfhdl->padapter;
+ struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt);
+ struct usb_device *udev = dvobjpriv->pusbdev;
+ unsigned int pipe;
+ int status = 0;
+ u8 reqtype;
+ u8 *pIo_buf;
+ int vendorreq_times = 0;
+
+ if ((adapt->bSurpriseRemoved) || (adapt->pwrctrlpriv.pnp_bstop_trx)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usbctrl_vendorreq:(adapt->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n"));
+ status = -EPERM;
+ goto exit;
+ }
+
+ if (len > MAX_VENDOR_REQ_CMD_SIZE) {
+ DBG_88E("[%s] Buffer len error ,vendor request failed\n", __func__);
+ status = -EINVAL;
+ goto exit;
+ }
+
+ _enter_critical_mutex(&dvobjpriv->usb_vendor_req_mutex, NULL);
+
+ /* Acquire IO memory for vendorreq */
+ pIo_buf = dvobjpriv->usb_vendor_req_buf;
+
+ if (pIo_buf == NULL) {
+ DBG_88E("[%s] pIo_buf == NULL\n", __func__);
+ status = -ENOMEM;
+ goto release_mutex;
+ }
+
+ while (++vendorreq_times <= MAX_USBCTRL_VENDORREQ_TIMES) {
+ _rtw_memset(pIo_buf, 0, len);
+
+ if (requesttype == 0x01) {
+ pipe = usb_rcvctrlpipe(udev, 0);/* read_in */
+ reqtype = REALTEK_USB_VENQT_READ;
+ } else {
+ pipe = usb_sndctrlpipe(udev, 0);/* write_out */
+ reqtype = REALTEK_USB_VENQT_WRITE;
+ memcpy(pIo_buf, pdata, len);
+ }
+
+ status = rtw_usb_control_msg(udev, pipe, request, reqtype, value, index, pIo_buf, len, RTW_USB_CONTROL_MSG_TIMEOUT);
+
+ if (status == len) { /* Success this control transfer. */
+ rtw_reset_continual_urb_error(dvobjpriv);
+ if (requesttype == 0x01)
+ memcpy(pdata, pIo_buf, len);
+ } else { /* error cases */
+ DBG_88E("reg 0x%x, usb %s %u fail, status:%d value=0x%x, vendorreq_times:%d\n",
+ value, (requesttype == 0x01) ? "read" : "write",
+ len, status, *(u32 *)pdata, vendorreq_times);
+
+ if (status < 0) {
+ if (status == (-ESHUTDOWN) || status == -ENODEV) {
+ adapt->bSurpriseRemoved = true;
+ } else {
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ haldata->srestpriv.Wifi_Error_Status = USB_VEN_REQ_CMD_FAIL;
+ }
+ } else { /* status != len && status >= 0 */
+ if (status > 0) {
+ if (requesttype == 0x01) {
+ /* For Control read transfer, we have to copy the read data from pIo_buf to pdata. */
+ memcpy(pdata, pIo_buf, len);
+ }
+ }
+ }
+
+ if (rtw_inc_and_chk_continual_urb_error(dvobjpriv)) {
+ adapt->bSurpriseRemoved = true;
+ break;
+ }
+
+ }
+
+ /* firmware download is checksumed, don't retry */
+ if ((value >= FW_8188E_START_ADDRESS && value <= FW_8188E_END_ADDRESS) || status == len)
+ break;
+ }
+release_mutex:
+ _exit_critical_mutex(&dvobjpriv->usb_vendor_req_mutex, NULL);
+exit:
+ return status;
+}
+
+static u8 usb_read8(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u8 data = 0;
+
+ _func_enter_;
+
+ request = 0x05;
+ requesttype = 0x01;/* read_in */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 1;
+
+ usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ _func_exit_;
+
+ return data;
+
+}
+
+static u16 usb_read16(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ __le32 data;
+
+_func_enter_;
+ request = 0x05;
+ requesttype = 0x01;/* read_in */
+ index = 0;/* n/a */
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 2;
+ usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+_func_exit_;
+
+ return (u16)(le32_to_cpu(data)&0xffff);
+}
+
+static u32 usb_read32(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ __le32 data;
+
+_func_enter_;
+
+ request = 0x05;
+ requesttype = 0x01;/* read_in */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 4;
+
+ usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+_func_exit_;
+
+ return le32_to_cpu(data);
+}
+
+static int usb_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u8 data;
+ int ret;
+
+ _func_enter_;
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 1;
+ data = val;
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+ _func_exit_;
+ return ret;
+}
+
+static int usb_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ __le32 data;
+ int ret;
+
+ _func_enter_;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 2;
+
+ data = cpu_to_le32(val & 0x0000ffff);
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int usb_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ __le32 data;
+ int ret;
+
+ _func_enter_;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 4;
+ data = cpu_to_le32(val);
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int usb_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u8 buf[VENDOR_CMD_MAX_DATA_LEN] = {0};
+ int ret;
+
+ _func_enter_;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = length;
+ memcpy(buf, pdata, len);
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, buf, len, requesttype);
+
+ _func_exit_;
+
+ return ret;
+}
+
+static void interrupt_handler_8188eu(struct adapter *adapt, u16 pkt_len, u8 *pbuf)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ if (pkt_len != INTERRUPT_MSG_FORMAT_LEN) {
+ DBG_88E("%s Invalid interrupt content length (%d)!\n", __func__, pkt_len);
+ return;
+ }
+
+ /* HISR */
+ memcpy(&(haldata->IntArray[0]), &(pbuf[USB_INTR_CONTENT_HISR_OFFSET]), 4);
+ memcpy(&(haldata->IntArray[1]), &(pbuf[USB_INTR_CONTENT_HISRE_OFFSET]), 4);
+
+ /* C2H Event */
+ if (pbuf[0] != 0)
+ memcpy(&(haldata->C2hArray[0]), &(pbuf[USB_INTR_CONTENT_C2H_OFFSET]), 16);
+}
+
+static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
+{
+ u8 *pbuf;
+ u8 shift_sz = 0;
+ u16 pkt_cnt;
+ u32 pkt_offset, skb_len, alloc_sz;
+ s32 transfer_len;
+ struct recv_stat *prxstat;
+ struct phy_stat *pphy_status = NULL;
+ struct sk_buff *pkt_copy = NULL;
+ union recv_frame *precvframe = NULL;
+ struct rx_pkt_attrib *pattrib = NULL;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct recv_priv *precvpriv = &adapt->recvpriv;
+ struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
+
+ transfer_len = (s32)pskb->len;
+ pbuf = pskb->data;
+
+ prxstat = (struct recv_stat *)pbuf;
+ pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
+
+ do {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("recvbuf2recvframe: rxdesc=offsset 0:0x%08x, 4:0x%08x, 8:0x%08x, C:0x%08x\n",
+ prxstat->rxdw0, prxstat->rxdw1, prxstat->rxdw2, prxstat->rxdw4));
+
+ prxstat = (struct recv_stat *)pbuf;
+
+ precvframe = rtw_alloc_recvframe(pfree_recv_queue);
+ if (precvframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvbuf2recvframe: precvframe==NULL\n"));
+ DBG_88E("%s()-%d: rtw_alloc_recvframe() failed! RX Drop!\n", __func__, __LINE__);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ _rtw_init_listhead(&precvframe->u.hdr.list);
+ precvframe->u.hdr.precvbuf = NULL; /* can't access the precvbuf for new arch. */
+ precvframe->u.hdr.len = 0;
+
+ update_recvframe_attrib_88e(precvframe, prxstat);
+
+ pattrib = &precvframe->u.hdr.attrib;
+
+ if ((pattrib->crc_err) || (pattrib->icv_err)) {
+ DBG_88E("%s: RX Warning! crc_err=%d icv_err=%d, skip!\n", __func__, pattrib->crc_err, pattrib->icv_err);
+
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ if ((pattrib->physt) && (pattrib->pkt_rpt_type == NORMAL_RX))
+ pphy_status = (struct phy_stat *)(pbuf + RXDESC_OFFSET);
+
+ pkt_offset = RXDESC_SIZE + pattrib->drvinfo_sz + pattrib->shift_sz + pattrib->pkt_len;
+
+ if ((pattrib->pkt_len <= 0) || (pkt_offset > transfer_len)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recvbuf2recvframe: pkt_len<=0\n"));
+ DBG_88E("%s()-%d: RX Warning!,pkt_len<=0 or pkt_offset> transfoer_len\n", __func__, __LINE__);
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ /* Modified by Albert 20101213 */
+ /* For 8 bytes IP header alignment. */
+ if (pattrib->qos) /* Qos data, wireless lan header length is 26 */
+ shift_sz = 6;
+ else
+ shift_sz = 0;
+
+ skb_len = pattrib->pkt_len;
+
+ /* for first fragment packet, driver need allocate 1536+drvinfo_sz+RXDESC_SIZE to defrag packet. */
+ /* modify alloc_sz for recvive crc error packet by thomas 2011-06-02 */
+ if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
+ if (skb_len <= 1650)
+ alloc_sz = 1664;
+ else
+ alloc_sz = skb_len + 14;
+ } else {
+ alloc_sz = skb_len;
+ /* 6 is for IP header 8 bytes alignment in QoS packet case. */
+ /* 8 is for skb->data 4 bytes alignment. */
+ alloc_sz += 14;
+ }
+
+ pkt_copy = netdev_alloc_skb(adapt->pnetdev, alloc_sz);
+ if (pkt_copy) {
+ pkt_copy->dev = adapt->pnetdev;
+ precvframe->u.hdr.pkt = pkt_copy;
+ precvframe->u.hdr.rx_head = pkt_copy->data;
+ precvframe->u.hdr.rx_end = pkt_copy->data + alloc_sz;
+ skb_reserve(pkt_copy, 8 - ((size_t)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
+ skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
+ memcpy(pkt_copy->data, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
+ precvframe->u.hdr.rx_tail = pkt_copy->data;
+ precvframe->u.hdr.rx_data = pkt_copy->data;
+ } else {
+ if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
+ DBG_88E("recvbuf2recvframe: alloc_skb fail , drop frag frame\n");
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+ precvframe->u.hdr.pkt = skb_clone(pskb, GFP_ATOMIC);
+ if (precvframe->u.hdr.pkt) {
+ precvframe->u.hdr.rx_tail = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE;
+ precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_tail;
+ precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail;
+ precvframe->u.hdr.rx_end = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE + alloc_sz;
+ } else {
+ DBG_88E("recvbuf2recvframe: skb_clone fail\n");
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+ }
+
+ recvframe_put(precvframe, skb_len);
+
+ switch (haldata->UsbRxAggMode) {
+ case USB_RX_AGG_DMA:
+ case USB_RX_AGG_MIX:
+ pkt_offset = (u16)_RND128(pkt_offset);
+ break;
+ case USB_RX_AGG_USB:
+ pkt_offset = (u16)_RND4(pkt_offset);
+ break;
+ case USB_RX_AGG_DISABLE:
+ default:
+ break;
+ }
+ if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
+ if (pattrib->physt)
+ update_recvframe_phyinfo_88e(precvframe, (struct phy_stat *)pphy_status);
+ if (rtw_recv_entry(precvframe) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recvbuf2recvframe: rtw_recv_entry(precvframe) != _SUCCESS\n"));
+ }
+ } else {
+ /* enqueue recvframe to txrtp queue */
+ if (pattrib->pkt_rpt_type == TX_REPORT1) {
+ /* CCX-TXRPT ack for xmit mgmt frames. */
+ handle_txrpt_ccx_88e(adapt, precvframe->u.hdr.rx_data);
+ } else if (pattrib->pkt_rpt_type == TX_REPORT2) {
+ ODM_RA_TxRPT2Handle_8188E(
+ &haldata->odmpriv,
+ precvframe->u.hdr.rx_data,
+ pattrib->pkt_len,
+ pattrib->MacIDValidEntry[0],
+ pattrib->MacIDValidEntry[1]
+ );
+ } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
+ interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->u.hdr.rx_data);
+ }
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ }
+ pkt_cnt--;
+ transfer_len -= pkt_offset;
+ pbuf += pkt_offset;
+ precvframe = NULL;
+ pkt_copy = NULL;
+
+ if (transfer_len > 0 && pkt_cnt == 0)
+ pkt_cnt = (le32_to_cpu(prxstat->rxdw2)>>16) & 0xff;
+
+ } while ((transfer_len > 0) && (pkt_cnt > 0));
+
+_exit_recvbuf2recvframe:
+
+ return _SUCCESS;
+}
+
+void rtl8188eu_recv_tasklet(void *priv)
+{
+ struct sk_buff *pskb;
+ struct adapter *adapt = (struct adapter *)priv;
+ struct recv_priv *precvpriv = &adapt->recvpriv;
+
+ while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
+ if ((adapt->bDriverStopped) || (adapt->bSurpriseRemoved)) {
+ DBG_88E("recv_tasklet => bDriverStopped or bSurpriseRemoved\n");
+ dev_kfree_skb_any(pskb);
+ break;
+ }
+ recvbuf2recvframe(adapt, pskb);
+ skb_reset_tail_pointer(pskb);
+ pskb->len = 0;
+ skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
+ }
+}
+
+static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
+{
+ struct recv_buf *precvbuf = (struct recv_buf *)purb->context;
+ struct adapter *adapt = (struct adapter *)precvbuf->adapter;
+ struct recv_priv *precvpriv = &adapt->recvpriv;
+
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete!!!\n"));
+
+ precvpriv->rx_pending_cnt--;
+
+ if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
+ adapt->bDriverStopped, adapt->bSurpriseRemoved));
+
+ precvbuf->reuse = true;
+ DBG_88E("%s() RX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bReadPortCancel(%d)\n",
+ __func__, adapt->bDriverStopped,
+ adapt->bSurpriseRemoved, adapt->bReadPortCancel);
+ goto exit;
+ }
+
+ if (purb->status == 0) { /* SUCCESS */
+ if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n"));
+ precvbuf->reuse = true;
+ rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__);
+ } else {
+ rtw_reset_continual_urb_error(adapter_to_dvobj(adapt));
+
+ precvbuf->transfer_len = purb->actual_length;
+ skb_put(precvbuf->pskb, purb->actual_length);
+ skb_queue_tail(&precvpriv->rx_skb_queue, precvbuf->pskb);
+
+ if (skb_queue_len(&precvpriv->rx_skb_queue) <= 1)
+ tasklet_schedule(&precvpriv->recv_tasklet);
+
+ precvbuf->pskb = NULL;
+ precvbuf->reuse = false;
+ rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ }
+ } else {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status));
+
+ DBG_88E("###=> usb_read_port_complete => urb status(%d)\n", purb->status);
+
+ if (rtw_inc_and_chk_continual_urb_error(adapter_to_dvobj(adapt)))
+ adapt->bSurpriseRemoved = true;
+
+ switch (purb->status) {
+ case -EINVAL:
+ case -EPIPE:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete:bSurpriseRemoved=true\n"));
+ case -ENOENT:
+ adapt->bDriverStopped = true;
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete:bDriverStopped=true\n"));
+ break;
+ case -EPROTO:
+ case -EOVERFLOW:
+ {
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ haldata->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL;
+ }
+ precvbuf->reuse = true;
+ rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ break;
+ case -EINPROGRESS:
+ DBG_88E("ERROR: URB IS IN PROGRESS!/n");
+ break;
+ default:
+ break;
+ }
+ }
+
+exit:
+_func_exit_;
+}
+
+static u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
+{
+ struct urb *purb = NULL;
+ struct recv_buf *precvbuf = (struct recv_buf *)rmem;
+ struct adapter *adapter = pintfhdl->padapter;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ struct usb_device *pusbd = pdvobj->pusbdev;
+ int err;
+ unsigned int pipe;
+ size_t tmpaddr = 0;
+ size_t alignment = 0;
+ u32 ret = _SUCCESS;
+
+_func_enter_;
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
+ adapter->pwrctrlpriv.pnp_bstop_trx) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port:(adapt->bDriverStopped ||adapt->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n"));
+ return _FAIL;
+ }
+
+ if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
+ precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
+ if (NULL != precvbuf->pskb)
+ precvbuf->reuse = true;
+ }
+
+ if (precvbuf != NULL) {
+ rtl8188eu_init_recvbuf(adapter, precvbuf);
+
+ /* re-assign for linux based on skb */
+ if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
+ precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+ if (precvbuf->pskb == NULL) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
+ DBG_88E("#### usb_read_port() alloc_skb fail!#####\n");
+ return _FAIL;
+ }
+
+ tmpaddr = (size_t)precvbuf->pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
+ skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
+
+ precvbuf->phead = precvbuf->pskb->head;
+ precvbuf->pdata = precvbuf->pskb->data;
+ precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
+ precvbuf->pend = skb_end_pointer(precvbuf->pskb);
+ precvbuf->pbuf = precvbuf->pskb->data;
+ } else { /* reuse skb */
+ precvbuf->phead = precvbuf->pskb->head;
+ precvbuf->pdata = precvbuf->pskb->data;
+ precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
+ precvbuf->pend = skb_end_pointer(precvbuf->pskb);
+ precvbuf->pbuf = precvbuf->pskb->data;
+
+ precvbuf->reuse = false;
+ }
+
+ precvpriv->rx_pending_cnt++;
+
+ purb = precvbuf->purb;
+
+ /* translate DMA FIFO addr to pipehandle */
+ pipe = ffaddr2pipehdl(pdvobj, addr);
+
+ usb_fill_bulk_urb(purb, pusbd, pipe,
+ precvbuf->pbuf,
+ MAX_RECVBUF_SZ,
+ usb_read_port_complete,
+ precvbuf);/* context is precvbuf */
+
+ err = usb_submit_urb(purb, GFP_ATOMIC);
+ if ((err) && (err != (-EPERM))) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("cannot submit rx in-token(err=0x%.8x), URB_STATUS =0x%.8x",
+ err, purb->status));
+ DBG_88E("cannot submit rx in-token(err = 0x%08x),urb_status = %d\n",
+ err, purb->status);
+ ret = _FAIL;
+ }
+ } else {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port:precvbuf ==NULL\n"));
+ ret = _FAIL;
+ }
+
+_func_exit_;
+ return ret;
+}
+
+void rtl8188eu_xmit_tasklet(void *priv)
+{
+ int ret = false;
+ struct adapter *adapt = (struct adapter *)priv;
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+
+ if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
+ return;
+
+ while (1) {
+ if ((adapt->bDriverStopped) ||
+ (adapt->bSurpriseRemoved) ||
+ (adapt->bWritePortCancel)) {
+ DBG_88E("xmit_tasklet => bDriverStopped or bSurpriseRemoved or bWritePortCancel\n");
+ break;
+ }
+
+ ret = rtl8188eu_xmitframe_complete(adapt, pxmitpriv, NULL);
+
+ if (!ret)
+ break;
+ }
+}
+
+void rtl8188eu_set_intf_ops(struct _io_ops *pops)
+{
+ _func_enter_;
+ _rtw_memset((u8 *)pops, 0, sizeof(struct _io_ops));
+ pops->_read8 = &usb_read8;
+ pops->_read16 = &usb_read16;
+ pops->_read32 = &usb_read32;
+ pops->_read_mem = &usb_read_mem;
+ pops->_read_port = &usb_read_port;
+ pops->_write8 = &usb_write8;
+ pops->_write16 = &usb_write16;
+ pops->_write32 = &usb_write32;
+ pops->_writeN = &usb_writeN;
+ pops->_write_mem = &usb_write_mem;
+ pops->_write_port = &usb_write_port;
+ pops->_read_port_cancel = &usb_read_port_cancel;
+ pops->_write_port_cancel = &usb_write_port_cancel;
+ _func_exit_;
+}
+
+void rtl8188eu_set_hw_type(struct adapter *adapt)
+{
+ adapt->chip_type = RTL8188E;
+ adapt->HardwareType = HARDWARE_TYPE_RTL8188EU;
+ DBG_88E("CHIP TYPE: RTL8188E\n");
+}
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h b/drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h
new file mode 100644
index 0000000..949c33b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h
@@ -0,0 +1,28 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+#ifndef __INC_HAL8188E_FW_IMG_H
+#define __INC_HAL8188E_FW_IMG_H
+
+/* V10(1641) */
+#define Rtl8188EFWImgArrayLength 13904
+
+extern const u8 Rtl8188EFwImgArray[Rtl8188EFWImgArrayLength];
+
+#endif /* __INC_HAL8188E_FW_IMG_H */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
new file mode 100644
index 0000000..c4769e2
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -0,0 +1,276 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __INC_HAL8188EPHYCFG_H__
+#define __INC_HAL8188EPHYCFG_H__
+
+
+/*--------------------------Define Parameters-------------------------------*/
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50 /* us */
+#define AntennaDiversityValue 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define Reset_Cnt_Limit 3
+
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+
+#define MAX_AGGR_NUM 0x07
+
+
+/*--------------------------Define Parameters-------------------------------*/
+
+
+/*------------------------------Define structure----------------------------*/
+enum sw_chnl_cmd_id {
+ CmdID_End,
+ CmdID_SetTxPowerLevel,
+ CmdID_BBRegWrite10,
+ CmdID_WritePortUlong,
+ CmdID_WritePortUshort,
+ CmdID_WritePortUchar,
+ CmdID_RF_WriteReg,
+};
+
+/* 1. Switch channel related */
+struct sw_chnl_cmd {
+ enum sw_chnl_cmd_id CmdID;
+ u32 Para1;
+ u32 Para2;
+ u32 msDelay;
+};
+
+enum hw90_block {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4, /* Never use this */
+};
+
+enum rf_radio_path {
+ RF_PATH_A = 0, /* Radio Path A */
+ RF_PATH_B = 1, /* Radio Path B */
+ RF_PATH_C = 2, /* Radio Path C */
+ RF_PATH_D = 3, /* Radio Path D */
+};
+
+#define MAX_PG_GROUP 13
+
+#define RF_PATH_MAX 2
+#define MAX_RF_PATH RF_PATH_MAX
+#define MAX_TX_COUNT 4 /* path numbers */
+
+#define CHANNEL_MAX_NUMBER 14 /* 14 is the max chnl number */
+#define MAX_CHNL_GROUP_24G 6 /* ch1~2, ch3~5, ch6~8,
+ *ch9~11, ch12~13, CH 14
+ * total three groups */
+#define CHANNEL_GROUP_MAX_88E 6
+
+enum wireless_mode {
+ WIRELESS_MODE_UNKNOWN = 0x00,
+ WIRELESS_MODE_A = BIT2,
+ WIRELESS_MODE_B = BIT0,
+ WIRELESS_MODE_G = BIT1,
+ WIRELESS_MODE_AUTO = BIT5,
+ WIRELESS_MODE_N_24G = BIT3,
+ WIRELESS_MODE_N_5G = BIT4,
+ WIRELESS_MODE_AC = BIT6
+};
+
+enum phy_rate_tx_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+/* BB/RF related */
+enum RF_TYPE_8190P {
+ RF_TYPE_MIN, /* 0 */
+ RF_8225 = 1, /* 1 11b/g RF for verification only */
+ RF_8256 = 2, /* 2 11b/g/n */
+ RF_8258 = 3, /* 3 11a/b/g/n RF */
+ RF_6052 = 4, /* 4 11b/g/n RF */
+ /* TODO: We should remove this psudo PHY RF after we get new RF. */
+ RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
+};
+
+struct bb_reg_def {
+ u32 rfintfs; /* set software control: */
+ /* 0x870~0x877[8 bytes] */
+ u32 rfintfi; /* readback data: */
+ /* 0x8e0~0x8e7[8 bytes] */
+ u32 rfintfo; /* output data: */
+ /* 0x860~0x86f [16 bytes] */
+ u32 rfintfe; /* output enable: */
+ /* 0x860~0x86f [16 bytes] */
+ u32 rf3wireOffset; /* LSSI data: */
+ /* 0x840~0x84f [16 bytes] */
+ u32 rfLSSI_Select; /* BB Band Select: */
+ /* 0x878~0x87f [8 bytes] */
+ u32 rfTxGainStage; /* Tx gain stage: */
+ /* 0x80c~0x80f [4 bytes] */
+ u32 rfHSSIPara1; /* wire parameter control1 : */
+ /* 0x820~0x823,0x828~0x82b,
+ * 0x830~0x833, 0x838~0x83b [16 bytes] */
+ u32 rfHSSIPara2; /* wire parameter control2 : */
+ /* 0x824~0x827,0x82c~0x82f, 0x834~0x837,
+ * 0x83c~0x83f [16 bytes] */
+ u32 rfSwitchControl; /* Tx Rx antenna control : */
+ /* 0x858~0x85f [16 bytes] */
+ u32 rfAGCControl1; /* AGC parameter control1 : */
+ /* 0xc50~0xc53,0xc58~0xc5b, 0xc60~0xc63,
+ * 0xc68~0xc6b [16 bytes] */
+ u32 rfAGCControl2; /* AGC parameter control2 : */
+ /* 0xc54~0xc57,0xc5c~0xc5f, 0xc64~0xc67,
+ * 0xc6c~0xc6f [16 bytes] */
+ u32 rfRxIQImbalance; /* OFDM Rx IQ imbalance matrix : */
+ /* 0xc14~0xc17,0xc1c~0xc1f, 0xc24~0xc27,
+ * 0xc2c~0xc2f [16 bytes] */
+ u32 rfRxAFE; /* Rx IQ DC ofset and Rx digital filter,
+ * Rx DC notch filter : */
+ /* 0xc10~0xc13,0xc18~0xc1b, 0xc20~0xc23,
+ * 0xc28~0xc2b [16 bytes] */
+ u32 rfTxIQImbalance; /* OFDM Tx IQ imbalance matrix */
+ /* 0xc80~0xc83,0xc88~0xc8b, 0xc90~0xc93,
+ * 0xc98~0xc9b [16 bytes] */
+ u32 rfTxAFE; /* Tx IQ DC Offset and Tx DFIR type */
+ /* 0xc84~0xc87,0xc8c~0xc8f, 0xc94~0xc97,
+ * 0xc9c~0xc9f [16 bytes] */
+ u32 rfLSSIReadBack; /* LSSI RF readback data SI mode */
+ /* 0x8a0~0x8af [16 bytes] */
+ u32 rfLSSIReadBackPi; /* LSSI RF readback data PI mode 0x8b8-8bc for
+ * Path A and B */
+};
+
+struct ant_sel_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 OFDM_TXSC:2;
+ u32 reserved:2;
+};
+
+struct ant_sel_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+/*------------------------------Define structure----------------------------*/
+
+
+/*------------------------Export global variable----------------------------*/
+/*------------------------Export global variable----------------------------*/
+
+
+/*------------------------Export Marco Definition---------------------------*/
+/*------------------------Export Marco Definition---------------------------*/
+
+
+/*--------------------------Exported Function prototype---------------------*/
+/* */
+/* BB and RF register read/write */
+/* */
+u32 rtl8188e_PHY_QueryBBReg(struct adapter *adapter, u32 regaddr, u32 mask);
+void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr,
+ u32 mask, u32 data);
+u32 rtl8188e_PHY_QueryRFReg(struct adapter *adapter, enum rf_radio_path rfpath,
+ u32 regaddr, u32 mask);
+void rtl8188e_PHY_SetRFReg(struct adapter *adapter, enum rf_radio_path rfpath,
+ u32 regaddr, u32 mask, u32 data);
+
+/* Initialization related function */
+/* MAC/BB/RF HAL config */
+int PHY_MACConfig8188E(struct adapter *adapter);
+int PHY_BBConfig8188E(struct adapter *adapter);
+int PHY_RFConfig8188E(struct adapter *adapter);
+
+/* RF config */
+int rtl8188e_PHY_ConfigRFWithParaFile(struct adapter *adapter, u8 *filename,
+ enum rf_radio_path rfpath);
+int rtl8188e_PHY_ConfigRFWithHeaderFile(struct adapter *adapter,
+ enum rf_radio_path rfpath);
+
+/* Read initi reg value for tx power setting. */
+void rtl8192c_PHY_GetHWRegOriginalValue(struct adapter *adapter);
+
+/* BB TX Power R/W */
+void PHY_GetTxPowerLevel8188E(struct adapter *adapter, u32 *powerlevel);
+void PHY_SetTxPowerLevel8188E(struct adapter *adapter, u8 channel);
+bool PHY_UpdateTxPowerDbm8188E(struct adapter *adapter, int power);
+
+void PHY_ScanOperationBackup8188E(struct adapter *Adapter, u8 Operation);
+
+/* Switch bandwidth for 8192S */
+void PHY_SetBWMode8188E(struct adapter *adapter,
+ enum ht_channel_width chnlwidth, unsigned char offset);
+
+/* channel switch related funciton */
+void PHY_SwChnl8188E(struct adapter *adapter, u8 channel);
+/* Call after initialization */
+void ChkFwCmdIoDone(struct adapter *adapter);
+
+/* BB/MAC/RF other monitor API */
+void PHY_SetRFPathSwitch_8188E(struct adapter *adapter, bool main);
+
+void PHY_SwitchEphyParameter(struct adapter *adapter);
+
+void PHY_EnableHostClkReq(struct adapter *adapter);
+
+bool SetAntennaConfig92C(struct adapter *adapter, u8 defaultant);
+
+void storePwrIndexDiffRateOffset(struct adapter *adapter, u32 regaddr,
+ u32 mask, u32 data);
+/*--------------------------Exported Function prototype---------------------*/
+
+#define PHY_QueryBBReg(adapt, regaddr, mask) \
+ rtl8188e_PHY_QueryBBReg((adapt), (regaddr), (mask))
+#define PHY_SetBBReg(adapt, regaddr, bitmask, data) \
+ rtl8188e_PHY_SetBBReg((adapt), (regaddr), (bitmask), (data))
+#define PHY_QueryRFReg(adapt, rfpath, regaddr, bitmask) \
+ rtl8188e_PHY_QueryRFReg((adapt), (rfpath), (regaddr), (bitmask))
+#define PHY_SetRFReg(adapt, rfpath, regaddr, bitmask, data) \
+ rtl8188e_PHY_SetRFReg((adapt), (rfpath), (regaddr), (bitmask), (data))
+
+#define PHY_SetMacReg PHY_SetBBReg
+
+#define SIC_HW_SUPPORT 0
+
+#define SIC_MAX_POLL_CNT 5
+
+#define SIC_CMD_READY 0
+#define SIC_CMD_WRITE 1
+#define SIC_CMD_READ 2
+
+#define SIC_CMD_REG 0x1EB /* 1byte */
+#define SIC_ADDR_REG 0x1E8 /* 1b9~1ba, 2 bytes */
+#define SIC_DATA_REG 0x1EC /* 1bc~1bf */
+
+#endif /* __INC_HAL8192CPHYCFG_H */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
new file mode 100644
index 0000000..0e06d29
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -0,0 +1,1094 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __INC_HAL8188EPHYREG_H__
+#define __INC_HAL8188EPHYREG_H__
+/*--------------------------Define Parameters-------------------------------*/
+/* */
+/* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
+/* 3. RF register 0x00-2E */
+/* 4. Bit Mask for BB/RF register */
+/* 5. Other defintion for BB/RF R/W */
+/* */
+
+
+/* */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 1. Page1(0x100) */
+/* */
+#define rPMAC_Reset 0x100
+#define rPMAC_TxStart 0x104
+#define rPMAC_TxLegacySIG 0x108
+#define rPMAC_TxHTSIG1 0x10c
+#define rPMAC_TxHTSIG2 0x110
+#define rPMAC_PHYDebug 0x114
+#define rPMAC_TxPacketNum 0x118
+#define rPMAC_TxIdle 0x11c
+#define rPMAC_TxMACHeader0 0x120
+#define rPMAC_TxMACHeader1 0x124
+#define rPMAC_TxMACHeader2 0x128
+#define rPMAC_TxMACHeader3 0x12c
+#define rPMAC_TxMACHeader4 0x130
+#define rPMAC_TxMACHeader5 0x134
+#define rPMAC_TxDataType 0x138
+#define rPMAC_TxRandomSeed 0x13c
+#define rPMAC_CCKPLCPPreamble 0x140
+#define rPMAC_CCKPLCPHeader 0x144
+#define rPMAC_CCKCRC16 0x148
+#define rPMAC_OFDMRxCRC32OK 0x170
+#define rPMAC_OFDMRxCRC32Er 0x174
+#define rPMAC_OFDMRxParityEr 0x178
+#define rPMAC_OFDMRxCRC8Er 0x17c
+#define rPMAC_CCKCRxRC16Er 0x180
+#define rPMAC_CCKCRxRC32Er 0x184
+#define rPMAC_CCKCRxRC32OK 0x188
+#define rPMAC_TxStatus 0x18c
+
+/* 2. Page2(0x200) */
+/* The following two definition are only used for USB interface. */
+#define RF_BB_CMD_ADDR 0x02c0 /* RF/BB r/w cmd address. */
+#define RF_BB_CMD_DATA 0x02c4 /* RF/BB r/w cmd data. */
+
+/* 3. Page8(0x800) */
+#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting */
+
+#define rFPGA0_TxInfo 0x804 /* Status report?? */
+#define rFPGA0_PSDFunction 0x808
+
+#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
+
+#define rFPGA0_RFTiming1 0x810 /* Useless now */
+#define rFPGA0_RFTiming2 0x814
+
+#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
+#define rFPGA0_XA_HSSIParameter2 0x824
+#define rFPGA0_XB_HSSIParameter1 0x828
+#define rFPGA0_XB_HSSIParameter2 0x82c
+
+#define rFPGA0_XA_LSSIParameter 0x840
+#define rFPGA0_XB_LSSIParameter 0x844
+
+#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
+#define rFPGA0_RFSleepUpParameter 0x854
+
+#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
+#define rFPGA0_XCD_SwitchControl 0x85c
+
+#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
+#define rFPGA0_XB_RFInterfaceOE 0x864
+
+#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Iface Software Control */
+#define rFPGA0_XCD_RFInterfaceSW 0x874
+
+#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
+#define rFPGA0_XCD_RFParameter 0x87c
+
+/* Crystal cap setting RF-R/W protection for parameter4?? */
+#define rFPGA0_AnalogParameter1 0x880
+#define rFPGA0_AnalogParameter2 0x884
+#define rFPGA0_AnalogParameter3 0x888
+/* enable ad/da clock1 for dual-phy */
+#define rFPGA0_AdDaClockEn 0x888
+#define rFPGA0_AnalogParameter4 0x88c
+
+#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
+#define rFPGA0_XB_LSSIReadBack 0x8a4
+#define rFPGA0_XC_LSSIReadBack 0x8a8
+#define rFPGA0_XD_LSSIReadBack 0x8ac
+
+#define rFPGA0_PSDReport 0x8b4 /* Useless now */
+/* Transceiver A HSPI Readback */
+#define TransceiverA_HSPI_Readback 0x8b8
+/* Transceiver B HSPI Readback */
+#define TransceiverB_HSPI_Readback 0x8bc
+/* Useless now RF Interface Readback Value */
+#define rFPGA0_XAB_RFInterfaceRB 0x8e0
+#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
+
+/* 4. Page9(0x900) */
+/* RF mode & OFDM TxSC RF BW Setting?? */
+#define rFPGA1_RFMOD 0x900
+
+#define rFPGA1_TxBlock 0x904 /* Useless now */
+#define rFPGA1_DebugSelect 0x908 /* Useless now */
+#define rFPGA1_TxInfo 0x90c /* Useless now Status report */
+
+/* 5. PageA(0xA00) */
+/* Set Control channel to upper or lower - required only for 40MHz */
+#define rCCK0_System 0xa00
+
+/* Disable init gain now Select RX path by RSSI */
+#define rCCK0_AFESetting 0xa04
+/* Disable init gain now Init gain */
+#define rCCK0_CCA 0xa08
+
+/* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold,
+ * RX LNA Threshold useless now. Not the same as 90 series */
+#define rCCK0_RxAGC1 0xa0c
+#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
+
+#define rCCK0_RxHP 0xa14
+
+/* Timing recovery & Channel estimation threshold */
+#define rCCK0_DSPParameter1 0xa18
+#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
+
+#define rCCK0_TxFilter1 0xa20
+#define rCCK0_TxFilter2 0xa24
+#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
+#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now */
+#define rCCK0_TRSSIReport 0xa50
+#define rCCK0_RxReport 0xa54 /* 0xa57 */
+#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
+#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
+
+/* */
+/* PageB(0xB00) */
+/* */
+#define rPdp_AntA 0xb00
+#define rPdp_AntA_4 0xb04
+#define rConfig_Pmpd_AntA 0xb28
+#define rConfig_AntA 0xb68
+#define rConfig_AntB 0xb6c
+#define rPdp_AntB 0xb70
+#define rPdp_AntB_4 0xb74
+#define rConfig_Pmpd_AntB 0xb98
+#define rAPK 0xbd8
+
+/* */
+/* 6. PageC(0xC00) */
+/* */
+#define rOFDM0_LSTF 0xc00
+
+#define rOFDM0_TRxPathEnable 0xc04
+#define rOFDM0_TRMuxPar 0xc08
+#define rOFDM0_TRSWIsolation 0xc0c
+
+/* RxIQ DC offset, Rx digital filter, DC notch filter */
+#define rOFDM0_XARxAFE 0xc10
+#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
+#define rOFDM0_XBRxAFE 0xc18
+#define rOFDM0_XBRxIQImbalance 0xc1c
+#define rOFDM0_XCRxAFE 0xc20
+#define rOFDM0_XCRxIQImbalance 0xc24
+#define rOFDM0_XDRxAFE 0xc28
+#define rOFDM0_XDRxIQImbalance 0xc2c
+
+#define rOFDM0_RxDetector1 0xc30 /*PD,BW & SBD DM tune init gain*/
+#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
+#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
+#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
+
+#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
+#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
+#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
+#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
+
+#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
+#define rOFDM0_XAAGCCore2 0xc54
+#define rOFDM0_XBAGCCore1 0xc58
+#define rOFDM0_XBAGCCore2 0xc5c
+#define rOFDM0_XCAGCCore1 0xc60
+#define rOFDM0_XCAGCCore2 0xc64
+#define rOFDM0_XDAGCCore1 0xc68
+#define rOFDM0_XDAGCCore2 0xc6c
+
+#define rOFDM0_AGCParameter1 0xc70
+#define rOFDM0_AGCParameter2 0xc74
+#define rOFDM0_AGCRSSITable 0xc78
+#define rOFDM0_HTSTFAGC 0xc7c
+
+#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
+#define rOFDM0_XATxAFE 0xc84
+#define rOFDM0_XBTxIQImbalance 0xc88
+#define rOFDM0_XBTxAFE 0xc8c
+#define rOFDM0_XCTxIQImbalance 0xc90
+#define rOFDM0_XCTxAFE 0xc94
+#define rOFDM0_XDTxIQImbalance 0xc98
+#define rOFDM0_XDTxAFE 0xc9c
+
+#define rOFDM0_RxIQExtAnta 0xca0
+#define rOFDM0_TxCoeff1 0xca4
+#define rOFDM0_TxCoeff2 0xca8
+#define rOFDM0_TxCoeff3 0xcac
+#define rOFDM0_TxCoeff4 0xcb0
+#define rOFDM0_TxCoeff5 0xcb4
+#define rOFDM0_TxCoeff6 0xcb8
+#define rOFDM0_RxHPParameter 0xce0
+#define rOFDM0_TxPseudoNoiseWgt 0xce4
+#define rOFDM0_FrameSync 0xcf0
+#define rOFDM0_DFSReport 0xcf4
+
+
+/* */
+/* 7. PageD(0xD00) */
+/* */
+#define rOFDM1_LSTF 0xd00
+#define rOFDM1_TRxPathEnable 0xd04
+
+#define rOFDM1_CFO 0xd08 /* No setting now */
+#define rOFDM1_CSI1 0xd10
+#define rOFDM1_SBD 0xd14
+#define rOFDM1_CSI2 0xd18
+#define rOFDM1_CFOTracking 0xd2c
+#define rOFDM1_TRxMesaure1 0xd34
+#define rOFDM1_IntfDet 0xd3c
+#define rOFDM1_PseudoNoiseStateAB 0xd50
+#define rOFDM1_PseudoNoiseStateCD 0xd54
+#define rOFDM1_RxPseudoNoiseWgt 0xd58
+
+#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
+#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
+#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
+
+#define rOFDM_ShortCFOAB 0xdac /* No setting now */
+#define rOFDM_ShortCFOCD 0xdb0
+#define rOFDM_LongCFOAB 0xdb4
+#define rOFDM_LongCFOCD 0xdb8
+#define rOFDM_TailCFOAB 0xdbc
+#define rOFDM_TailCFOCD 0xdc0
+#define rOFDM_PWMeasure1 0xdc4
+#define rOFDM_PWMeasure2 0xdc8
+#define rOFDM_BWReport 0xdcc
+#define rOFDM_AGCReport 0xdd0
+#define rOFDM_RxSNR 0xdd4
+#define rOFDM_RxEVMCSI 0xdd8
+#define rOFDM_SIGReport 0xddc
+
+
+/* */
+/* 8. PageE(0xE00) */
+/* */
+#define rTxAGC_A_Rate18_06 0xe00
+#define rTxAGC_A_Rate54_24 0xe04
+#define rTxAGC_A_CCK1_Mcs32 0xe08
+#define rTxAGC_A_Mcs03_Mcs00 0xe10
+#define rTxAGC_A_Mcs07_Mcs04 0xe14
+#define rTxAGC_A_Mcs11_Mcs08 0xe18
+#define rTxAGC_A_Mcs15_Mcs12 0xe1c
+
+#define rTxAGC_B_Rate18_06 0x830
+#define rTxAGC_B_Rate54_24 0x834
+#define rTxAGC_B_CCK1_55_Mcs32 0x838
+#define rTxAGC_B_Mcs03_Mcs00 0x83c
+#define rTxAGC_B_Mcs07_Mcs04 0x848
+#define rTxAGC_B_Mcs11_Mcs08 0x84c
+#define rTxAGC_B_Mcs15_Mcs12 0x868
+#define rTxAGC_B_CCK11_A_CCK2_11 0x86c
+
+#define rFPGA0_IQK 0xe28
+#define rTx_IQK_Tone_A 0xe30
+#define rRx_IQK_Tone_A 0xe34
+#define rTx_IQK_PI_A 0xe38
+#define rRx_IQK_PI_A 0xe3c
+
+#define rTx_IQK 0xe40
+#define rRx_IQK 0xe44
+#define rIQK_AGC_Pts 0xe48
+#define rIQK_AGC_Rsp 0xe4c
+#define rTx_IQK_Tone_B 0xe50
+#define rRx_IQK_Tone_B 0xe54
+#define rTx_IQK_PI_B 0xe58
+#define rRx_IQK_PI_B 0xe5c
+#define rIQK_AGC_Cont 0xe60
+
+#define rBlue_Tooth 0xe6c
+#define rRx_Wait_CCA 0xe70
+#define rTx_CCK_RFON 0xe74
+#define rTx_CCK_BBON 0xe78
+#define rTx_OFDM_RFON 0xe7c
+#define rTx_OFDM_BBON 0xe80
+#define rTx_To_Rx 0xe84
+#define rTx_To_Tx 0xe88
+#define rRx_CCK 0xe8c
+
+#define rTx_Power_Before_IQK_A 0xe94
+#define rTx_Power_After_IQK_A 0xe9c
+
+#define rRx_Power_Before_IQK_A 0xea0
+#define rRx_Power_Before_IQK_A_2 0xea4
+#define rRx_Power_After_IQK_A 0xea8
+#define rRx_Power_After_IQK_A_2 0xeac
+
+#define rTx_Power_Before_IQK_B 0xeb4
+#define rTx_Power_After_IQK_B 0xebc
+
+#define rRx_Power_Before_IQK_B 0xec0
+#define rRx_Power_Before_IQK_B_2 0xec4
+#define rRx_Power_After_IQK_B 0xec8
+#define rRx_Power_After_IQK_B_2 0xecc
+
+#define rRx_OFDM 0xed0
+#define rRx_Wait_RIFS 0xed4
+#define rRx_TO_Rx 0xed8
+#define rStandby 0xedc
+#define rSleep 0xee0
+#define rPMPD_ANAEN 0xeec
+
+/* */
+/* 7. RF Register 0x00-0x2E (RF 8256) */
+/* RF-0222D 0x00-3F */
+/* */
+/* Zebra1 */
+#define rZebra1_HSSIEnable 0x0 /* Useless now */
+#define rZebra1_TRxEnable1 0x1
+#define rZebra1_TRxEnable2 0x2
+#define rZebra1_AGC 0x4
+#define rZebra1_ChargePump 0x5
+#define rZebra1_Channel 0x7 /* RF channel switch */
+
+/* endif */
+#define rZebra1_TxGain 0x8 /* Useless now */
+#define rZebra1_TxLPF 0x9
+#define rZebra1_RxLPF 0xb
+#define rZebra1_RxHPFCorner 0xc
+
+/* Zebra4 */
+#define rGlobalCtrl 0 /* Useless now */
+#define rRTL8256_TxLPF 19
+#define rRTL8256_RxLPF 11
+
+/* RTL8258 */
+#define rRTL8258_TxLPF 0x11 /* Useless now */
+#define rRTL8258_RxLPF 0x13
+#define rRTL8258_RSSILPF 0xa
+
+/* */
+/* RL6052 Register definition */
+/* */
+#define RF_AC 0x00 /* */
+
+#define RF_IQADJ_G1 0x01 /* */
+#define RF_IQADJ_G2 0x02 /* */
+
+#define RF_POW_TRSW 0x05 /* */
+
+#define RF_GAIN_RX 0x06 /* */
+#define RF_GAIN_TX 0x07 /* */
+
+#define RF_TXM_IDAC 0x08 /* */
+#define RF_IPA_G 0x09 /* */
+#define RF_TXBIAS_G 0x0A
+#define RF_TXPA_AG 0x0B
+#define RF_IPA_A 0x0C /* */
+#define RF_TXBIAS_A 0x0D
+#define RF_BS_PA_APSET_G9_G11 0x0E
+#define RF_BS_IQGEN 0x0F /* */
+
+#define RF_MODE1 0x10 /* */
+#define RF_MODE2 0x11 /* */
+
+#define RF_RX_AGC_HP 0x12 /* */
+#define RF_TX_AGC 0x13 /* */
+#define RF_BIAS 0x14 /* */
+#define RF_IPA 0x15 /* */
+#define RF_TXBIAS 0x16
+#define RF_POW_ABILITY 0x17 /* */
+#define RF_CHNLBW 0x18 /* RF channel and BW switch */
+#define RF_TOP 0x19 /* */
+
+#define RF_RX_G1 0x1A /* */
+#define RF_RX_G2 0x1B /* */
+
+#define RF_RX_BB2 0x1C /* */
+#define RF_RX_BB1 0x1D /* */
+
+#define RF_RCK1 0x1E /* */
+#define RF_RCK2 0x1F /* */
+
+#define RF_TX_G1 0x20 /* */
+#define RF_TX_G2 0x21 /* */
+#define RF_TX_G3 0x22 /* */
+
+#define RF_TX_BB1 0x23 /* */
+
+#define RF_T_METER_92D 0x42 /* */
+#define RF_T_METER_88E 0x42 /* */
+#define RF_T_METER 0x24 /* */
+
+#define RF_SYN_G1 0x25 /* RF TX Power control */
+#define RF_SYN_G2 0x26 /* RF TX Power control */
+#define RF_SYN_G3 0x27 /* RF TX Power control */
+#define RF_SYN_G4 0x28 /* RF TX Power control */
+#define RF_SYN_G5 0x29 /* RF TX Power control */
+#define RF_SYN_G6 0x2A /* RF TX Power control */
+#define RF_SYN_G7 0x2B /* RF TX Power control */
+#define RF_SYN_G8 0x2C /* RF TX Power control */
+
+#define RF_RCK_OS 0x30 /* RF TX PA control */
+#define RF_TXPA_G1 0x31 /* RF TX PA control */
+#define RF_TXPA_G2 0x32 /* RF TX PA control */
+#define RF_TXPA_G3 0x33 /* RF TX PA control */
+#define RF_TX_BIAS_A 0x35
+#define RF_TX_BIAS_D 0x36
+#define RF_LOBF_9 0x38
+#define RF_RXRF_A3 0x3C /* */
+#define RF_TRSW 0x3F
+
+#define RF_TXRF_A2 0x41
+#define RF_TXPA_G4 0x46
+#define RF_TXPA_A4 0x4B
+#define RF_0x52 0x52
+#define RF_WE_LUT 0xEF
+
+
+/* */
+/* Bit Mask */
+/* */
+/* 1. Page1(0x100) */
+#define bBBResetB 0x100 /* Useless now? */
+#define bGlobalResetB 0x200
+#define bOFDMTxStart 0x4
+#define bCCKTxStart 0x8
+#define bCRC32Debug 0x100
+#define bPMACLoopback 0x10
+#define bTxLSIG 0xffffff
+#define bOFDMTxRate 0xf
+#define bOFDMTxReserved 0x10
+#define bOFDMTxLength 0x1ffe0
+#define bOFDMTxParity 0x20000
+#define bTxHTSIG1 0xffffff
+#define bTxHTMCSRate 0x7f
+#define bTxHTBW 0x80
+#define bTxHTLength 0xffff00
+#define bTxHTSIG2 0xffffff
+#define bTxHTSmoothing 0x1
+#define bTxHTSounding 0x2
+#define bTxHTReserved 0x4
+#define bTxHTAggreation 0x8
+#define bTxHTSTBC 0x30
+#define bTxHTAdvanceCoding 0x40
+#define bTxHTShortGI 0x80
+#define bTxHTNumberHT_LTF 0x300
+#define bTxHTCRC8 0x3fc00
+#define bCounterReset 0x10000
+#define bNumOfOFDMTx 0xffff
+#define bNumOfCCKTx 0xffff0000
+#define bTxIdleInterval 0xffff
+#define bOFDMService 0xffff0000
+#define bTxMACHeader 0xffffffff
+#define bTxDataInit 0xff
+#define bTxHTMode 0x100
+#define bTxDataType 0x30000
+#define bTxRandomSeed 0xffffffff
+#define bCCKTxPreamble 0x1
+#define bCCKTxSFD 0xffff0000
+#define bCCKTxSIG 0xff
+#define bCCKTxService 0xff00
+#define bCCKLengthExt 0x8000
+#define bCCKTxLength 0xffff0000
+#define bCCKTxCRC16 0xffff
+#define bCCKTxStatus 0x1
+#define bOFDMTxStatus 0x2
+
+#define IS_BB_REG_OFFSET_92S(_Offset) \
+ ((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+/* 2. Page8(0x800) */
+#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
+#define bJapanMode 0x2
+#define bCCKTxSC 0x30
+#define bCCKEn 0x1000000
+#define bOFDMEn 0x2000000
+
+#define bOFDMRxADCPhase 0x10000 /* Useless now */
+#define bOFDMTxDACPhase 0x40000
+#define bXATxAGC 0x3f
+
+#define bAntennaSelect 0x0300
+
+#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
+#define bXCTxAGC 0xf000
+#define bXDTxAGC 0xf0000
+
+#define bPAStart 0xf0000000 /* Useless now */
+#define bTRStart 0x00f00000
+#define bRFStart 0x0000f000
+#define bBBStart 0x000000f0
+#define bBBCCKStart 0x0000000f
+#define bPAEnd 0xf /* Reg0x814 */
+#define bTREnd 0x0f000000
+#define bRFEnd 0x000f0000
+#define bCCAMask 0x000000f0 /* T2R */
+#define bR2RCCAMask 0x00000f00
+#define bHSSI_R2TDelay 0xf8000000
+#define bHSSI_T2RDelay 0xf80000
+#define bContTxHSSI 0x400 /* change gain at continue Tx */
+#define bIGFromCCK 0x200
+#define bAGCAddress 0x3f
+#define bRxHPTx 0x7000
+#define bRxHPT2R 0x38000
+#define bRxHPCCKIni 0xc0000
+#define bAGCTxCode 0xc00000
+#define bAGCRxCode 0x300000
+
+/* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
+#define b3WireDataLength 0x800
+#define b3WireAddressLength 0x400
+
+#define b3WireRFPowerDown 0x1 /* Useless now */
+#define b5GPAPEPolarity 0x40000000
+#define b2GPAPEPolarity 0x80000000
+#define bRFSW_TxDefaultAnt 0x3
+#define bRFSW_TxOptionAnt 0x30
+#define bRFSW_RxDefaultAnt 0x300
+#define bRFSW_RxOptionAnt 0x3000
+#define bRFSI_3WireData 0x1
+#define bRFSI_3WireClock 0x2
+#define bRFSI_3WireLoad 0x4
+#define bRFSI_3WireRW 0x8
+#define bRFSI_3Wire 0xf
+
+#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
+
+#define bRFSI_TRSW 0x20 /* Useless now */
+#define bRFSI_TRSWB 0x40
+#define bRFSI_ANTSW 0x100
+#define bRFSI_ANTSWB 0x200
+#define bRFSI_PAPE 0x400
+#define bRFSI_PAPE5G 0x800
+#define bBandSelect 0x1
+#define bHTSIG2_GI 0x80
+#define bHTSIG2_Smoothing 0x01
+#define bHTSIG2_Sounding 0x02
+#define bHTSIG2_Aggreaton 0x08
+#define bHTSIG2_STBC 0x30
+#define bHTSIG2_AdvCoding 0x40
+#define bHTSIG2_NumOfHTLTF 0x300
+#define bHTSIG2_CRC8 0x3fc
+#define bHTSIG1_MCS 0x7f
+#define bHTSIG1_BandWidth 0x80
+#define bHTSIG1_HTLength 0xffff
+#define bLSIG_Rate 0xf
+#define bLSIG_Reserved 0x10
+#define bLSIG_Length 0x1fffe
+#define bLSIG_Parity 0x20
+#define bCCKRxPhase 0x4
+
+#define bLSSIReadAddress 0x7f800000 /* T65 RF */
+
+#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
+
+#define bLSSIReadBackData 0xfffff /* T65 RF */
+
+#define bLSSIReadOKFlag 0x1000 /* Useless now */
+#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
+#define bRegulator0Standby 0x1
+#define bRegulatorPLLStandby 0x2
+#define bRegulator1Standby 0x4
+#define bPLLPowerUp 0x8
+#define bDPLLPowerUp 0x10
+#define bDA10PowerUp 0x20
+#define bAD7PowerUp 0x200
+#define bDA6PowerUp 0x2000
+#define bXtalPowerUp 0x4000
+#define b40MDClkPowerUP 0x8000
+#define bDA6DebugMode 0x20000
+#define bDA6Swing 0x380000
+
+/* Reg 0x880 rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
+#define bADClkPhase 0x4000000
+
+#define b80MClkDelay 0x18000000 /* Useless */
+#define bAFEWatchDogEnable 0x20000000
+
+/* Reg 0x884 rFPGA0_AnalogParameter2 Crystal cap */
+#define bXtalCap01 0xc0000000
+#define bXtalCap23 0x3
+#define bXtalCap92x 0x0f000000
+#define bXtalCap 0x0f000000
+
+#define bIntDifClkEnable 0x400 /* Useless */
+#define bExtSigClkEnable 0x800
+#define bBandgapMbiasPowerUp 0x10000
+#define bAD11SHGain 0xc0000
+#define bAD11InputRange 0x700000
+#define bAD11OPCurrent 0x3800000
+#define bIPathLoopback 0x4000000
+#define bQPathLoopback 0x8000000
+#define bAFELoopback 0x10000000
+#define bDA10Swing 0x7e0
+#define bDA10Reverse 0x800
+#define bDAClkSource 0x1000
+#define bAD7InputRange 0x6000
+#define bAD7Gain 0x38000
+#define bAD7OutputCMMode 0x40000
+#define bAD7InputCMMode 0x380000
+#define bAD7Current 0xc00000
+#define bRegulatorAdjust 0x7000000
+#define bAD11PowerUpAtTx 0x1
+#define bDA10PSAtTx 0x10
+#define bAD11PowerUpAtRx 0x100
+#define bDA10PSAtRx 0x1000
+#define bCCKRxAGCFormat 0x200
+#define bPSDFFTSamplepPoint 0xc000
+#define bPSDAverageNum 0x3000
+#define bIQPathControl 0xc00
+#define bPSDFreq 0x3ff
+#define bPSDAntennaPath 0x30
+#define bPSDIQSwitch 0x40
+#define bPSDRxTrigger 0x400000
+#define bPSDTxTrigger 0x80000000
+#define bPSDSineToneScale 0x7f000000
+#define bPSDReport 0xffff
+
+/* 3. Page9(0x900) */
+#define bOFDMTxSC 0x30000000 /* Useless */
+#define bCCKTxOn 0x1
+#define bOFDMTxOn 0x2
+#define bDebugPage 0xfff /* reset debug page and HWord, LWord */
+#define bDebugItem 0xff /* reset debug page and LWord */
+#define bAntL 0x10
+#define bAntNonHT 0x100
+#define bAntHT1 0x1000
+#define bAntHT2 0x10000
+#define bAntHT1S1 0x100000
+#define bAntNonHTS1 0x1000000
+
+/* 4. PageA(0xA00) */
+#define bCCKBBMode 0x3 /* Useless */
+#define bCCKTxPowerSaving 0x80
+#define bCCKRxPowerSaving 0x40
+
+#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0_System 20/40 */
+
+#define bCCKScramble 0x8 /* Useless */
+#define bCCKAntDiversity 0x8000
+#define bCCKCarrierRecovery 0x4000
+#define bCCKTxRate 0x3000
+#define bCCKDCCancel 0x0800
+#define bCCKISICancel 0x0400
+#define bCCKMatchFilter 0x0200
+#define bCCKEqualizer 0x0100
+#define bCCKPreambleDetect 0x800000
+#define bCCKFastFalseCCA 0x400000
+#define bCCKChEstStart 0x300000
+#define bCCKCCACount 0x080000
+#define bCCKcs_lim 0x070000
+#define bCCKBistMode 0x80000000
+#define bCCKCCAMask 0x40000000
+#define bCCKTxDACPhase 0x4
+#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
+#define bCCKr_cp_mode0 0x0100
+#define bCCKTxDCOffset 0xf0
+#define bCCKRxDCOffset 0xf
+#define bCCKCCAMode 0xc000
+#define bCCKFalseCS_lim 0x3f00
+#define bCCKCS_ratio 0xc00000
+#define bCCKCorgBit_sel 0x300000
+#define bCCKPD_lim 0x0f0000
+#define bCCKNewCCA 0x80000000
+#define bCCKRxHPofIG 0x8000
+#define bCCKRxIG 0x7f00
+#define bCCKLNAPolarity 0x800000
+#define bCCKRx1stGain 0x7f0000
+#define bCCKRFExtend 0x20000000 /* CCK Rx Iinital gain polarity */
+#define bCCKRxAGCSatLevel 0x1f000000
+#define bCCKRxAGCSatCount 0xe0
+#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
+#define bCCKFixedRxAGC 0x8000
+#define bCCKAntennaPolarity 0x2000
+#define bCCKTxFilterType 0x0c00
+#define bCCKRxAGCReportType 0x0300
+#define bCCKRxDAGCEn 0x80000000
+#define bCCKRxDAGCPeriod 0x20000000
+#define bCCKRxDAGCSatLevel 0x1f000000
+#define bCCKTimingRecovery 0x800000
+#define bCCKTxC0 0x3f0000
+#define bCCKTxC1 0x3f000000
+#define bCCKTxC2 0x3f
+#define bCCKTxC3 0x3f00
+#define bCCKTxC4 0x3f0000
+#define bCCKTxC5 0x3f000000
+#define bCCKTxC6 0x3f
+#define bCCKTxC7 0x3f00
+#define bCCKDebugPort 0xff0000
+#define bCCKDACDebug 0x0f000000
+#define bCCKFalseAlarmEnable 0x8000
+#define bCCKFalseAlarmRead 0x4000
+#define bCCKTRSSI 0x7f
+#define bCCKRxAGCReport 0xfe
+#define bCCKRxReport_AntSel 0x80000000
+#define bCCKRxReport_MFOff 0x40000000
+#define bCCKRxRxReport_SQLoss 0x20000000
+#define bCCKRxReport_Pktloss 0x10000000
+#define bCCKRxReport_Lockedbit 0x08000000
+#define bCCKRxReport_RateError 0x04000000
+#define bCCKRxReport_RxRate 0x03000000
+#define bCCKRxFACounterLower 0xff
+#define bCCKRxFACounterUpper 0xff000000
+#define bCCKRxHPAGCStart 0xe000
+#define bCCKRxHPAGCFinal 0x1c00
+#define bCCKRxFalseAlarmEnable 0x8000
+#define bCCKFACounterFreeze 0x4000
+#define bCCKTxPathSel 0x10000000
+#define bCCKDefaultRxPath 0xc000000
+#define bCCKOptionRxPath 0x3000000
+
+/* 5. PageC(0xC00) */
+#define bNumOfSTF 0x3 /* Useless */
+#define bShift_L 0xc0
+#define bGI_TH 0xc
+#define bRxPathA 0x1
+#define bRxPathB 0x2
+#define bRxPathC 0x4
+#define bRxPathD 0x8
+#define bTxPathA 0x1
+#define bTxPathB 0x2
+#define bTxPathC 0x4
+#define bTxPathD 0x8
+#define bTRSSIFreq 0x200
+#define bADCBackoff 0x3000
+#define bDFIRBackoff 0xc000
+#define bTRSSILatchPhase 0x10000
+#define bRxIDCOffset 0xff
+#define bRxQDCOffset 0xff00
+#define bRxDFIRMode 0x1800000
+#define bRxDCNFType 0xe000000
+#define bRXIQImb_A 0x3ff
+#define bRXIQImb_B 0xfc00
+#define bRXIQImb_C 0x3f0000
+#define bRXIQImb_D 0xffc00000
+#define bDC_dc_Notch 0x60000
+#define bRxNBINotch 0x1f000000
+#define bPD_TH 0xf
+#define bPD_TH_Opt2 0xc000
+#define bPWED_TH 0x700
+#define bIfMF_Win_L 0x800
+#define bPD_Option 0x1000
+#define bMF_Win_L 0xe000
+#define bBW_Search_L 0x30000
+#define bwin_enh_L 0xc0000
+#define bBW_TH 0x700000
+#define bED_TH2 0x3800000
+#define bBW_option 0x4000000
+#define bRatio_TH 0x18000000
+#define bWindow_L 0xe0000000
+#define bSBD_Option 0x1
+#define bFrame_TH 0x1c
+#define bFS_Option 0x60
+#define bDC_Slope_check 0x80
+#define bFGuard_Counter_DC_L 0xe00
+#define bFrame_Weight_Short 0x7000
+#define bSub_Tune 0xe00000
+#define bFrame_DC_Length 0xe000000
+#define bSBD_start_offset 0x30000000
+#define bFrame_TH_2 0x7
+#define bFrame_GI2_TH 0x38
+#define bGI2_Sync_en 0x40
+#define bSarch_Short_Early 0x300
+#define bSarch_Short_Late 0xc00
+#define bSarch_GI2_Late 0x70000
+#define bCFOAntSum 0x1
+#define bCFOAcc 0x2
+#define bCFOStartOffset 0xc
+#define bCFOLookBack 0x70
+#define bCFOSumWeight 0x80
+#define bDAGCEnable 0x10000
+#define bTXIQImb_A 0x3ff
+#define bTXIQImb_B 0xfc00
+#define bTXIQImb_C 0x3f0000
+#define bTXIQImb_D 0xffc00000
+#define bTxIDCOffset 0xff
+#define bTxQDCOffset 0xff00
+#define bTxDFIRMode 0x10000
+#define bTxPesudoNoiseOn 0x4000000
+#define bTxPesudoNoise_A 0xff
+#define bTxPesudoNoise_B 0xff00
+#define bTxPesudoNoise_C 0xff0000
+#define bTxPesudoNoise_D 0xff000000
+#define bCCADropOption 0x20000
+#define bCCADropThres 0xfff00000
+#define bEDCCA_H 0xf
+#define bEDCCA_L 0xf0
+#define bLambda_ED 0x300
+#define bRxInitialGain 0x7f
+#define bRxAntDivEn 0x80
+#define bRxAGCAddressForLNA 0x7f00
+#define bRxHighPowerFlow 0x8000
+#define bRxAGCFreezeThres 0xc0000
+#define bRxFreezeStep_AGC1 0x300000
+#define bRxFreezeStep_AGC2 0xc00000
+#define bRxFreezeStep_AGC3 0x3000000
+#define bRxFreezeStep_AGC0 0xc000000
+#define bRxRssi_Cmp_En 0x10000000
+#define bRxQuickAGCEn 0x20000000
+#define bRxAGCFreezeThresMode 0x40000000
+#define bRxOverFlowCheckType 0x80000000
+#define bRxAGCShift 0x7f
+#define bTRSW_Tri_Only 0x80
+#define bPowerThres 0x300
+#define bRxAGCEn 0x1
+#define bRxAGCTogetherEn 0x2
+#define bRxAGCMin 0x4
+#define bRxHP_Ini 0x7
+#define bRxHP_TRLNA 0x70
+#define bRxHP_RSSI 0x700
+#define bRxHP_BBP1 0x7000
+#define bRxHP_BBP2 0x70000
+#define bRxHP_BBP3 0x700000
+#define bRSSI_H 0x7f0000 /* threshold for high power */
+#define bRSSI_Gen 0x7f000000 /* threshold for ant diversity */
+#define bRxSettle_TRSW 0x7
+#define bRxSettle_LNA 0x38
+#define bRxSettle_RSSI 0x1c0
+#define bRxSettle_BBP 0xe00
+#define bRxSettle_RxHP 0x7000
+#define bRxSettle_AntSW_RSSI 0x38000
+#define bRxSettle_AntSW 0xc0000
+#define bRxProcessTime_DAGC 0x300000
+#define bRxSettle_HSSI 0x400000
+#define bRxProcessTime_BBPPW 0x800000
+#define bRxAntennaPowerShift 0x3000000
+#define bRSSITableSelect 0xc000000
+#define bRxHP_Final 0x7000000
+#define bRxHTSettle_BBP 0x7
+#define bRxHTSettle_HSSI 0x8
+#define bRxHTSettle_RxHP 0x70
+#define bRxHTSettle_BBPPW 0x80
+#define bRxHTSettle_Idle 0x300
+#define bRxHTSettle_Reserved 0x1c00
+#define bRxHTRxHPEn 0x8000
+#define bRxHTAGCFreezeThres 0x30000
+#define bRxHTAGCTogetherEn 0x40000
+#define bRxHTAGCMin 0x80000
+#define bRxHTAGCEn 0x100000
+#define bRxHTDAGCEn 0x200000
+#define bRxHTRxHP_BBP 0x1c00000
+#define bRxHTRxHP_Final 0xe0000000
+#define bRxPWRatioTH 0x3
+#define bRxPWRatioEn 0x4
+#define bRxMFHold 0x3800
+#define bRxPD_Delay_TH1 0x38
+#define bRxPD_Delay_TH2 0x1c0
+#define bRxPD_DC_COUNT_MAX 0x600
+#define bRxPD_Delay_TH 0x8000
+#define bRxProcess_Delay 0xf0000
+#define bRxSearchrange_GI2_Early 0x700000
+#define bRxFrame_Guard_Counter_L 0x3800000
+#define bRxSGI_Guard_L 0xc000000
+#define bRxSGI_Search_L 0x30000000
+#define bRxSGI_TH 0xc0000000
+#define bDFSCnt0 0xff
+#define bDFSCnt1 0xff00
+#define bDFSFlag 0xf0000
+#define bMFWeightSum 0x300000
+#define bMinIdxTH 0x7f000000
+#define bDAFormat 0x40000
+#define bTxChEmuEnable 0x01000000
+#define bTRSWIsolation_A 0x7f
+#define bTRSWIsolation_B 0x7f00
+#define bTRSWIsolation_C 0x7f0000
+#define bTRSWIsolation_D 0x7f000000
+#define bExtLNAGain 0x7c00
+
+/* 6. PageE(0xE00) */
+#define bSTBCEn 0x4 /* Useless */
+#define bAntennaMapping 0x10
+#define bNss 0x20
+#define bCFOAntSumD 0x200
+#define bPHYCounterReset 0x8000000
+#define bCFOReportGet 0x4000000
+#define bOFDMContinueTx 0x10000000
+#define bOFDMSingleCarrier 0x20000000
+#define bOFDMSingleTone 0x40000000
+#define bHTDetect 0x100
+#define bCFOEn 0x10000
+#define bCFOValue 0xfff00000
+#define bSigTone_Re 0x3f
+#define bSigTone_Im 0x7f00
+#define bCounter_CCA 0xffff
+#define bCounter_ParityFail 0xffff0000
+#define bCounter_RateIllegal 0xffff
+#define bCounter_CRC8Fail 0xffff0000
+#define bCounter_MCSNoSupport 0xffff
+#define bCounter_FastSync 0xffff
+#define bShortCFO 0xfff
+#define bShortCFOTLength 12 /* total */
+#define bShortCFOFLength 11 /* fraction */
+#define bLongCFO 0x7ff
+#define bLongCFOTLength 11
+#define bLongCFOFLength 11
+#define bTailCFO 0x1fff
+#define bTailCFOTLength 13
+#define bTailCFOFLength 12
+#define bmax_en_pwdB 0xffff
+#define bCC_power_dB 0xffff0000
+#define bnoise_pwdB 0xffff
+#define bPowerMeasTLength 10
+#define bPowerMeasFLength 3
+#define bRx_HT_BW 0x1
+#define bRxSC 0x6
+#define bRx_HT 0x8
+#define bNB_intf_det_on 0x1
+#define bIntf_win_len_cfg 0x30
+#define bNB_Intf_TH_cfg 0x1c0
+#define bRFGain 0x3f
+#define bTableSel 0x40
+#define bTRSW 0x80
+#define bRxSNR_A 0xff
+#define bRxSNR_B 0xff00
+#define bRxSNR_C 0xff0000
+#define bRxSNR_D 0xff000000
+#define bSNREVMTLength 8
+#define bSNREVMFLength 1
+#define bCSI1st 0xff
+#define bCSI2nd 0xff00
+#define bRxEVM1st 0xff0000
+#define bRxEVM2nd 0xff000000
+#define bSIGEVM 0xff
+#define bPWDB 0xff00
+#define bSGIEN 0x10000
+
+#define bSFactorQAM1 0xf /* Useless */
+#define bSFactorQAM2 0xf0
+#define bSFactorQAM3 0xf00
+#define bSFactorQAM4 0xf000
+#define bSFactorQAM5 0xf0000
+#define bSFactorQAM6 0xf0000
+#define bSFactorQAM7 0xf00000
+#define bSFactorQAM8 0xf000000
+#define bSFactorQAM9 0xf0000000
+#define bCSIScheme 0x100000
+
+#define bNoiseLvlTopSet 0x3 /* Useless */
+#define bChSmooth 0x4
+#define bChSmoothCfg1 0x38
+#define bChSmoothCfg2 0x1c0
+#define bChSmoothCfg3 0xe00
+#define bChSmoothCfg4 0x7000
+#define bMRCMode 0x800000
+#define bTHEVMCfg 0x7000000
+
+#define bLoopFitType 0x1 /* Useless */
+#define bUpdCFO 0x40
+#define bUpdCFOOffData 0x80
+#define bAdvUpdCFO 0x100
+#define bAdvTimeCtrl 0x800
+#define bUpdClko 0x1000
+#define bFC 0x6000
+#define bTrackingMode 0x8000
+#define bPhCmpEnable 0x10000
+#define bUpdClkoLTF 0x20000
+#define bComChCFO 0x40000
+#define bCSIEstiMode 0x80000
+#define bAdvUpdEqz 0x100000
+#define bUChCfg 0x7000000
+#define bUpdEqz 0x8000000
+
+/* Rx Pseduo noise */
+#define bRxPesudoNoiseOn 0x20000000 /* Useless */
+#define bRxPesudoNoise_A 0xff
+#define bRxPesudoNoise_B 0xff00
+#define bRxPesudoNoise_C 0xff0000
+#define bRxPesudoNoise_D 0xff000000
+#define bPesudoNoiseState_A 0xffff
+#define bPesudoNoiseState_B 0xffff0000
+#define bPesudoNoiseState_C 0xffff
+#define bPesudoNoiseState_D 0xffff0000
+
+/* 7. RF Register */
+/* Zebra1 */
+#define bZebra1_HSSIEnable 0x8 /* Useless */
+#define bZebra1_TRxControl 0xc00
+#define bZebra1_TRxGainSetting 0x07f
+#define bZebra1_RxCorner 0xc00
+#define bZebra1_TxChargePump 0x38
+#define bZebra1_RxChargePump 0x7
+#define bZebra1_ChannelNum 0xf80
+#define bZebra1_TxLPFBW 0x400
+#define bZebra1_RxLPFBW 0x600
+
+/* Zebra4 */
+#define bRTL8256RegModeCtrl1 0x100 /* Useless */
+#define bRTL8256RegModeCtrl0 0x40
+#define bRTL8256_TxLPFBW 0x18
+#define bRTL8256_RxLPFBW 0x600
+
+/* RTL8258 */
+#define bRTL8258_TxLPFBW 0xc /* Useless */
+#define bRTL8258_RxLPFBW 0xc00
+#define bRTL8258_RSSILPFBW 0xc0
+
+
+/* */
+/* Other Definition */
+/* */
+
+/* byte endable for sb_write */
+#define bByte0 0x1 /* Useless */
+#define bByte1 0x2
+#define bByte2 0x4
+#define bByte3 0x8
+#define bWord0 0x3
+#define bWord1 0xc
+#define bDWord 0xf
+
+/* for PutRegsetting & GetRegSetting BitMask */
+#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
+#define bMaskByte1 0xff00
+#define bMaskByte2 0xff0000
+#define bMaskByte3 0xff000000
+#define bMaskHWord 0xffff0000
+#define bMaskLWord 0x0000ffff
+#define bMaskDWord 0xffffffff
+#define bMask12Bits 0xfff
+#define bMaskH4Bits 0xf0000000
+#define bMaskOFDM_D 0xffc00000
+#define bMaskCCK 0x3f3f3f3f
+
+/* for PutRFRegsetting & GetRFRegSetting BitMask */
+#define bRFRegOffsetMask 0xfffff
+
+#define bEnable 0x1 /* Useless */
+#define bDisable 0x0
+
+#define LeftAntenna 0x0 /* Useless */
+#define RightAntenna 0x1
+
+#define tCheckTxStatus 500 /* 500ms Useless */
+#define tUpdateRxCounter 100 /* 100ms */
+
+#define rateCCK 0 /* Useless */
+#define rateOFDM 1
+#define rateHT 2
+
+/* define Register-End */
+#define bPMAC_End 0x1ff /* Useless */
+#define bFPGAPHY0_End 0x8ff
+#define bFPGAPHY1_End 0x9ff
+#define bCCKPHY0_End 0xaff
+#define bOFDMPHY0_End 0xcff
+#define bOFDMPHY1_End 0xdff
+
+#define bPMACControl 0x0 /* Useless */
+#define bWMACControl 0x1
+#define bWNICControl 0x2
+
+#define PathA 0x0 /* Useless */
+#define PathB 0x1
+#define PathC 0x2
+#define PathD 0x3
+
+/*--------------------------Define Parameters-------------------------------*/
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h b/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
new file mode 100644
index 0000000..20d0b3e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
@@ -0,0 +1,176 @@
+
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __HAL8188EPWRSEQ_H__
+#define __HAL8188EPWRSEQ_H__
+
+#include "HalPwrSeqCmd.h"
+
+/*
+ Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
+ There are 6 HW Power States:
+ 0: POFF--Power Off
+ 1: PDN--Power Down
+ 2: CARDEMU--Card Emulation
+ 3: ACT--Active Mode
+ 4: LPS--Low Power State
+ 5: SUS--Suspend
+
+ The transision from different states are defined below
+ TRANS_CARDEMU_TO_ACT
+ TRANS_ACT_TO_CARDEMU
+ TRANS_CARDEMU_TO_SUS
+ TRANS_SUS_TO_CARDEMU
+ TRANS_CARDEMU_TO_PDN
+ TRANS_ACT_TO_LPS
+ TRANS_LPS_TO_ACT
+
+ TRANS_END
+
+ PWR SEQ Version: rtl8188E_PwrSeq_V09.h
+*/
+#define RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS 10
+#define RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS 10
+#define RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS 10
+#define RTL8188E_TRANS_SUS_TO_CARDEMU_STEPS 10
+#define RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS 10
+#define RTL8188E_TRANS_PDN_TO_CARDEMU_STEPS 10
+#define RTL8188E_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8188E_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8188E_TRANS_END_STEPS 1
+
+
+#define RTL8188E_TRANS_CARDEMU_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0|BIT1, 0}, /* 0x02[1:0] = 0 reset BB*/ \
+ {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7}, /*0x24[23] = 2b'01 schmit trigger */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0}, /* 0x04[15] = 0 disable HWPDN (control by DRV)*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4|BIT3, 0}, /*0x04[12:11] = 2b'00 disable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0}, /*0x04[8] = 1 polling until return 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT0, 0}, /*wait till 0x04[8] = 0*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*LDO normal mode*/ \
+ {0x0074, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*SDIO Driving*/ \
+
+#define RTL8188E_TRANS_ACT_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*LDO Sleep mode*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
+
+#define RTL8188E_TRANS_CARDEMU_TO_SUS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01enable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3|BIT4}, /*0x04[12:11] = 2b'11enable WL suspend for PCIe*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, BIT7}, /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */ \
+ {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
+ {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*Set USB suspend enable local register 0xfe10[4]=1 */ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8188E_TRANS_SUS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8188E_TRANS_CARDEMU_TO_CARDDIS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7}, /*0x24[23] = 2b'01 schmit trigger */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */ \
+ {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
+ {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*Set USB suspend enable local register 0xfe10[4]=1 */ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8188E_TRANS_CARDDIS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8188E_TRANS_CARDEMU_TO_PDN \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/* 0x04[16] = 0*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8188E_TRANS_PDN_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
+#define RTL8188E_TRANS_ACT_TO_LPS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/ \
+
+
+#define RTL8188E_TRANS_LPS_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0 TSF in 40M*/\
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/\
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+
+#define RTL8188E_TRANS_END \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0, PWR_CMD_END, 0, 0}, /* */
+
+
+extern struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_card_disable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_card_enable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
+
+#endif /* __HAL8188EPWRSEQ_H__ */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h b/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
new file mode 100644
index 0000000..21996a1
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
@@ -0,0 +1,75 @@
+#ifndef __INC_RA_H
+#define __INC_RA_H
+/*++
+Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+
+Module Name:
+ RateAdaptive.h
+
+Abstract:
+ Prototype of RA and related data structure.
+
+Major Change History:
+ When Who What
+ ---------- --------------- -------------------------------
+ 2011-08-12 Page Create.
+--*/
+
+/* Rate adaptive define */
+#define PERENTRY 23
+#define RETRYSIZE 5
+#define RATESIZE 28
+#define TX_RPT2_ITEM_SIZE 8
+
+/* */
+/* TX report 2 format in Rx desc */
+/* */
+#define GET_TX_RPT2_DESC_PKT_LEN_88E(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE(__pRxStatusDesc, 0, 9)
+#define GET_TX_RPT2_DESC_MACID_VALID_1_88E(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE(__pRxStatusDesc+16, 0, 32)
+#define GET_TX_RPT2_DESC_MACID_VALID_2_88E(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE(__pRxStatusDesc+20, 0, 32)
+
+#define GET_TX_REPORT_TYPE1_RERTY_0(__pAddr) \
+ LE_BITS_TO_4BYTE(__pAddr, 0, 16)
+#define GET_TX_REPORT_TYPE1_RERTY_1(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+2, 0, 8)
+#define GET_TX_REPORT_TYPE1_RERTY_2(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+3, 0, 8)
+#define GET_TX_REPORT_TYPE1_RERTY_3(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+4, 0, 8)
+#define GET_TX_REPORT_TYPE1_RERTY_4(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+4+1, 0, 8)
+#define GET_TX_REPORT_TYPE1_DROP_0(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+4+2, 0, 8)
+#define GET_TX_REPORT_TYPE1_DROP_1(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+4+3, 0, 8)
+
+/* End rate adaptive define */
+
+void ODM_RASupport_Init(struct odm_dm_struct *dm_odm);
+
+int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm);
+
+int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 MacID);
+
+u8 ODM_RA_GetShortGI_8188E(struct odm_dm_struct *dm_odm, u8 MacID);
+
+u8 ODM_RA_GetDecisionRate_8188E(struct odm_dm_struct *dm_odm, u8 MacID);
+
+u8 ODM_RA_GetHwPwrStatus_8188E(struct odm_dm_struct *dm_odm, u8 MacID);
+void ODM_RA_UpdateRateInfo_8188E(struct odm_dm_struct *dm_odm, u8 MacID,
+ u8 RateID, u32 RateMask,
+ u8 SGIEnable);
+
+void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid,
+ u8 rssi);
+
+void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm,
+ u8 *txrpt_buf, u16 txrpt_len,
+ u32 validentry0, u32 validentry1);
+
+void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EReg.h b/drivers/staging/rtl8188eu/include/Hal8188EReg.h
new file mode 100644
index 0000000..d880b0c
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EReg.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/* */
+/* File Name: Hal8188EReg.h */
+/* */
+/* Description: */
+/* */
+/* This file is for RTL8188E register definition. */
+/* */
+/* */
+/* */
+#ifndef __HAL_8188E_REG_H__
+#define __HAL_8188E_REG_H__
+
+/* */
+/* Register Definition */
+/* */
+#define TRX_ANTDIV_PATH 0x860
+#define RX_ANTDIV_PATH 0xb2c
+#define ODM_R_A_AGC_CORE1_8188E 0xc50
+
+
+/* */
+/* Bitmap Definition */
+/* */
+#define BIT_FA_RESET_8188E BIT0
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_BB.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_BB.h
new file mode 100644
index 0000000..e574521
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_BB.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#ifndef __INC_BB_8188E_HW_IMG_H
+#define __INC_BB_8188E_HW_IMG_H
+
+/* static bool CheckCondition(const u32 Condition, const u32 Hex); */
+
+/******************************************************************************
+* AGC_TAB_1T.TXT
+******************************************************************************/
+
+enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *odm);
+
+/******************************************************************************
+* PHY_REG_1T.TXT
+******************************************************************************/
+
+enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *odm);
+
+/******************************************************************************
+* PHY_REG_PG.TXT
+******************************************************************************/
+
+void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
new file mode 100644
index 0000000..1bf9bc7
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
@@ -0,0 +1,34 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#ifndef __INC_FW_8188E_HW_IMG_H
+#define __INC_FW_8188E_HW_IMG_H
+
+
+/******************************************************************************
+* FW_AP.TXT
+******************************************************************************/
+/******************************************************************************
+* FW_WoWLAN.TXT
+******************************************************************************/
+#define ArrayLength_8188E_FW_WoWLAN 15764
+extern const u8 Array_8188E_FW_WoWLAN[ArrayLength_8188E_FW_WoWLAN];
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_MAC.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_MAC.h
new file mode 100644
index 0000000..acf78b9
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_MAC.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#ifndef __INC_MAC_8188E_HW_IMG_H
+#define __INC_MAC_8188E_HW_IMG_H
+
+/******************************************************************************
+* MAC_REG.TXT
+******************************************************************************/
+
+enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *pDM_Odm);
+
+#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_RF.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_RF.h
new file mode 100644
index 0000000..8ecb40d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_RF.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#ifndef __INC_RF_8188E_HW_IMG_H
+#define __INC_RF_8188E_HW_IMG_H
+
+/******************************************************************************
+ * RadioA_1T.TXT
+ ******************************************************************************/
+
+enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *odm);
+
+#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8188eu/include/HalPhyRf.h b/drivers/staging/rtl8188eu/include/HalPhyRf.h
new file mode 100644
index 0000000..1ec4971
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalPhyRf.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+ #ifndef __HAL_PHY_RF_H__
+ #define __HAL_PHY_RF_H__
+
+#define ODM_TARGET_CHNL_NUM_2G_5G 59
+
+void ODM_ResetIQKResult(struct odm_dm_struct *pDM_Odm);
+
+u8 ODM_GetRightChnlPlaceforIQK(u8 chnl);
+
+#endif /* #ifndef __HAL_PHY_RF_H__ */
diff --git a/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h b/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h
new file mode 100644
index 0000000..fa583f2
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h
@@ -0,0 +1,63 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __HAL_PHY_RF_8188E_H__
+#define __HAL_PHY_RF_8188E_H__
+
+/*--------------------------Define Parameters-------------------------------*/
+#define IQK_DELAY_TIME_88E 10 /* ms */
+#define index_mapping_NUM_88E 15
+#define AVG_THERMAL_NUM_88E 4
+
+
+void ODM_TxPwrTrackAdjust88E(struct odm_dm_struct *pDM_Odm,
+ u8 Type, /* 0 = OFDM, 1 = CCK */
+ u8 *pDirection,/* 1 = +(incr) 2 = -(decr) */
+ u32 *pOutWriteVal); /* Tx tracking CCK/OFDM BB
+ * swing index adjust */
+
+
+void odm_TXPowerTrackingCallback_ThermalMeter_8188E(struct adapter *Adapter);
+
+
+/* 1 7. IQK */
+
+void PHY_IQCalibrate_8188E(struct adapter *Adapter, bool ReCovery);
+
+/* LC calibrate */
+void PHY_LCCalibrate_8188E(struct adapter *pAdapter);
+
+/* AP calibrate */
+void PHY_APCalibrate_8188E(struct adapter *pAdapter, s8 delta);
+
+void PHY_DigitalPredistortion_8188E(struct adapter *pAdapter);
+
+void _PHY_SaveADDARegisters(struct adapter *pAdapter, u32 *ADDAReg,
+ u32 *ADDABackup, u32 RegisterNum);
+
+void _PHY_PathADDAOn(struct adapter *pAdapter, u32 *ADDAReg,
+ bool isPathAOn, bool is2T);
+
+void _PHY_MACSettingCalibration(struct adapter *pAdapter, u32 *MACReg,
+ u32 *MACBackup);
+
+void _PHY_PathAStandBy(struct adapter *pAdapter);
+
+#endif /* #ifndef __HAL_PHY_RF_8188E_H__ */
diff --git a/drivers/staging/rtl8188eu/include/HalPwrSeqCmd.h b/drivers/staging/rtl8188eu/include/HalPwrSeqCmd.h
new file mode 100644
index 0000000..d945784
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalPwrSeqCmd.h
@@ -0,0 +1,128 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HALPWRSEQCMD_H__
+#define __HALPWRSEQCMD_H__
+
+#include <drv_types.h>
+
+/*---------------------------------------------*/
+/* 3 The value of cmd: 4 bits */
+/*---------------------------------------------*/
+#define PWR_CMD_READ 0x00
+ /* offset: the read register offset */
+ /* msk: the mask of the read value */
+ /* value: N/A, left by 0 */
+ /* note: dirver shall implement this function by read & msk */
+
+#define PWR_CMD_WRITE 0x01
+ /* offset: the read register offset */
+ /* msk: the mask of the write bits */
+ /* value: write value */
+ /* note: driver shall implement this cmd by read & msk after write */
+
+#define PWR_CMD_POLLING 0x02
+ /* offset: the read register offset */
+ /* msk: the mask of the polled value */
+ /* value: the value to be polled, masked by the msd field. */
+ /* note: driver shall implement this cmd by */
+ /* do{ */
+ /* if ( (Read(offset) & msk) == (value & msk) ) */
+ /* break; */
+ /* } while (not timeout); */
+
+#define PWR_CMD_DELAY 0x03
+ /* offset: the value to delay */
+ /* msk: N/A */
+ /* value: the unit of delay, 0: us, 1: ms */
+
+#define PWR_CMD_END 0x04
+ /* offset: N/A */
+ /* msk: N/A */
+ /* value: N/A */
+
+/*---------------------------------------------*/
+/* 3 The value of base: 4 bits */
+/*---------------------------------------------*/
+ /* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+/*---------------------------------------------*/
+/* 3 The value of interface_msk: 4 bits */
+/*---------------------------------------------*/
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+/*---------------------------------------------*/
+/* 3 The value of fab_msk: 4 bits */
+/*---------------------------------------------*/
+#define PWR_FAB_TSMC_MSK BIT(0)
+#define PWR_FAB_UMC_MSK BIT(1)
+#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+/*---------------------------------------------*/
+/* 3 The value of cut_msk: 8 bits */
+/*---------------------------------------------*/
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+
+
+enum pwrseq_cmd_delat_unit {
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+};
+
+struct wl_pwr_cfg {
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+};
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
+#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
+#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
+#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
+#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
+
+
+/* Prototype of protected function. */
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 CutVersion, u8 FabVersion,
+ u8 InterfaceType, struct wl_pwr_cfg PwrCfgCmd[]);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/HalVerDef.h b/drivers/staging/rtl8188eu/include/HalVerDef.h
new file mode 100644
index 0000000..97047cf
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalVerDef.h
@@ -0,0 +1,167 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HAL_VERSION_DEF_H__
+#define __HAL_VERSION_DEF_H__
+
+enum HAL_IC_TYPE {
+ CHIP_8192S = 0,
+ CHIP_8188C = 1,
+ CHIP_8192C = 2,
+ CHIP_8192D = 3,
+ CHIP_8723A = 4,
+ CHIP_8188E = 5,
+ CHIP_8881A = 6,
+ CHIP_8812A = 7,
+ CHIP_8821A = 8,
+ CHIP_8723B = 9,
+ CHIP_8192E = 10,
+};
+
+enum HAL_CHIP_TYPE {
+ TEST_CHIP = 0,
+ NORMAL_CHIP = 1,
+ FPGA = 2,
+};
+
+enum HAL_CUT_VERSION {
+ A_CUT_VERSION = 0,
+ B_CUT_VERSION = 1,
+ C_CUT_VERSION = 2,
+ D_CUT_VERSION = 3,
+ E_CUT_VERSION = 4,
+ F_CUT_VERSION = 5,
+ G_CUT_VERSION = 6,
+};
+
+enum HAL_VENDOR {
+ CHIP_VENDOR_TSMC = 0,
+ CHIP_VENDOR_UMC = 1,
+};
+
+enum HAL_RF_TYPE {
+ RF_TYPE_1T1R = 0,
+ RF_TYPE_1T2R = 1,
+ RF_TYPE_2T2R = 2,
+ RF_TYPE_2T3R = 3,
+ RF_TYPE_2T4R = 4,
+ RF_TYPE_3T3R = 5,
+ RF_TYPE_3T4R = 6,
+ RF_TYPE_4T4R = 7,
+};
+
+struct HAL_VERSION {
+ enum HAL_IC_TYPE ICType;
+ enum HAL_CHIP_TYPE ChipType;
+ enum HAL_CUT_VERSION CUTVersion;
+ enum HAL_VENDOR VendorType;
+ enum HAL_RF_TYPE RFType;
+ u8 ROMVer;
+};
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version) (((version).ICType))
+#define GET_CVID_CHIP_TYPE(version) (((version).ChipType))
+#define GET_CVID_RF_TYPE(version) (((version).RFType))
+#define GET_CVID_MANUFACTUER(version) (((version).VendorType))
+#define GET_CVID_CUT_VERSION(version) (((version).CUTVersion))
+#define GET_CVID_ROM_VERSION(version) (((version).ROMVer) & ROM_VERSION_MASK)
+
+/* Common Macro. -- */
+/* HAL_VERSION VersionID */
+
+/* HAL_IC_TYPE_E */
+#define IS_81XXC(version) \
+ (((GET_CVID_IC_TYPE(version) == CHIP_8192C) || \
+ (GET_CVID_IC_TYPE(version) == CHIP_8188C)) ? true : false)
+#define IS_8723_SERIES(version) \
+ ((GET_CVID_IC_TYPE(version) == CHIP_8723A) ? true : false)
+#define IS_92D(version) \
+ ((GET_CVID_IC_TYPE(version) == CHIP_8192D) ? true : false)
+#define IS_8188E(version) \
+ ((GET_CVID_IC_TYPE(version) == CHIP_8188E) ? true : false)
+
+/* HAL_CHIP_TYPE_E */
+#define IS_TEST_CHIP(version) \
+ ((GET_CVID_CHIP_TYPE(version) == TEST_CHIP) ? true : false)
+#define IS_NORMAL_CHIP(version) \
+ ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false)
+
+/* HAL_CUT_VERSION_E */
+#define IS_A_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == A_CUT_VERSION) ? true : false)
+#define IS_B_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? true : false)
+#define IS_C_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == C_CUT_VERSION) ? true : false)
+#define IS_D_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == D_CUT_VERSION) ? true : false)
+#define IS_E_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == E_CUT_VERSION) ? true : false)
+
+
+/* HAL_VENDOR_E */
+#define IS_CHIP_VENDOR_TSMC(version) \
+ ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC) ? true : false)
+#define IS_CHIP_VENDOR_UMC(version) \
+ ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC) ? true : false)
+
+/* HAL_RF_TYPE_E */
+#define IS_1T1R(version) \
+ ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R) ? true : false)
+#define IS_1T2R(version) \
+ ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R) ? true : false)
+#define IS_2T2R(version) \
+ ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R) ? true : false)
+
+/* Chip version Macro. -- */
+#define IS_81XXC_TEST_CHIP(version) \
+ ((IS_81XXC(version) && (!IS_NORMAL_CHIP(version))) ? true : false)
+
+#define IS_92C_SERIAL(version) \
+ ((IS_81XXC(version) && IS_2T2R(version)) ? true : false)
+#define IS_81xxC_VENDOR_UMC_A_CUT(version) \
+ (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
+ (IS_A_CUT(version) ? true : false) : false) : false)
+#define IS_81xxC_VENDOR_UMC_B_CUT(version) \
+ (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
+ (IS_B_CUT(version) ? true : false) : false) : false)
+#define IS_81xxC_VENDOR_UMC_C_CUT(version) \
+ (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
+ (IS_C_CUT(version) ? true : false) : false) : false)
+
+#define IS_NORMAL_CHIP92D(version) \
+ ((IS_92D(version)) ? \
+ ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false) : false)
+
+#define IS_92D_SINGLEPHY(version) \
+ ((IS_92D(version)) ? (IS_2T2R(version) ? true : false) : false)
+#define IS_92D_C_CUT(version) \
+ ((IS_92D(version)) ? (IS_C_CUT(version) ? true : false) : false)
+#define IS_92D_D_CUT(version) \
+ ((IS_92D(version)) ? (IS_D_CUT(version) ? true : false) : false)
+#define IS_92D_E_CUT(version) \
+ ((IS_92D(version)) ? (IS_E_CUT(version) ? true : false) : false)
+
+#define IS_8723A_A_CUT(version) \
+ ((IS_8723_SERIES(version)) ? (IS_A_CUT(version) ? true : false) : false)
+#define IS_8723A_B_CUT(version) \
+ ((IS_8723_SERIES(version)) ? (IS_B_CUT(version) ? true : false) : false)
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
new file mode 100644
index 0000000..8a7ca99
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -0,0 +1,184 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __BASIC_TYPES_H__
+#define __BASIC_TYPES_H__
+
+#define SUCCESS 0
+#define FAIL (-1)
+
+#include <linux/types.h>
+#define NDIS_OID uint
+
+typedef void (*proc_t)(void *);
+
+#define FIELD_OFFSET(s, field) ((ssize_t)&((s *)(0))->field)
+
+#define MEM_ALIGNMENT_OFFSET (sizeof(size_t))
+#define MEM_ALIGNMENT_PADDING (sizeof(size_t) - 1)
+
+/* port from fw */
+/* TODO: Macros Below are Sync from SD7-Driver. It is necessary
+ * to check correctness */
+
+/*
+ * Call endian free function when
+ * 1. Read/write packet content.
+ * 2. Before write integer to IO.
+ * 3. After read integer from IO.
+*/
+
+/* Convert little data endian to host ordering */
+#define EF1BYTE(_val) \
+ ((u8)(_val))
+#define EF2BYTE(_val) \
+ (le16_to_cpu(_val))
+#define EF4BYTE(_val) \
+ (le32_to_cpu(_val))
+
+/* Read data from memory */
+#define READEF1BYTE(_ptr) \
+ EF1BYTE(*((u8 *)(_ptr)))
+/* Read le16 data from memory and convert to host ordering */
+#define READEF2BYTE(_ptr) \
+ EF2BYTE(*(_ptr))
+#define READEF4BYTE(_ptr) \
+ EF4BYTE(*(_ptr))
+
+/* Write data to memory */
+#define WRITEEF1BYTE(_ptr, _val) \
+ do { \
+ (*((u8 *)(_ptr))) = EF1BYTE(_val) \
+ } while (0)
+/* Write le data to memory in host ordering */
+#define WRITEEF2BYTE(_ptr, _val) \
+ do { \
+ (*((u16 *)(_ptr))) = EF2BYTE(_val) \
+ } while (0)
+
+#define WRITEEF4BYTE(_ptr, _val) \
+ do { \
+ (*((u32 *)(_ptr))) = EF2BYTE(_val) \
+ } while (0)
+
+/* Create a bit mask
+ * Examples:
+ * BIT_LEN_MASK_32(0) => 0x00000000
+ * BIT_LEN_MASK_32(1) => 0x00000001
+ * BIT_LEN_MASK_32(2) => 0x00000003
+ * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
+ */
+#define BIT_LEN_MASK_32(__bitlen) \
+ (0xFFFFFFFF >> (32 - (__bitlen)))
+#define BIT_LEN_MASK_16(__bitlen) \
+ (0xFFFF >> (16 - (__bitlen)))
+#define BIT_LEN_MASK_8(__bitlen) \
+ (0xFF >> (8 - (__bitlen)))
+
+/* Create an offset bit mask
+ * Examples:
+ * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+ * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
+ */
+#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
+
+/*Description:
+ * Return 4-byte value in host byte ordering from
+ * 4-byte pointer in little-endian system.
+ */
+#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
+ (EF4BYTE(*((__le32 *)(__pstart))))
+#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
+ (EF2BYTE(*((__le16 *)(__pstart))))
+#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
+ (EF1BYTE(*((u8 *)(__pstart))))
+
+/*Description:
+Translate subfield (continuous bits in little-endian) of 4-byte
+value to host byte ordering.*/
+#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_32(__bitlen) \
+ )
+#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_16(__bitlen) \
+ )
+#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_8(__bitlen) \
+ )
+
+/* Description:
+ * Mask subfield (continuous bits in little-endian) of 4-byte value
+ * and return the result in 4-byte value in host byte ordering.
+ */
+#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
+ )
+
+/* Description:
+ * Set subfield of little-endian 4-byte value to specified value.
+ */
+#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u32 *)(__pstart)) = \
+ ( \
+ LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
+ )
+
+#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u16 *)(__pstart)) = \
+ ( \
+ LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
+ );
+
+#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u8 *)(__pstart)) = EF1BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
+ )
+
+/* Get the N-bytes aligment offset from the current length */
+#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
+ (__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
+
+#endif /* __BASIC_TYPES_H__ */
diff --git a/drivers/staging/rtl8188eu/include/cmd_osdep.h b/drivers/staging/rtl8188eu/include/cmd_osdep.h
new file mode 100644
index 0000000..5a8465e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/cmd_osdep.h
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __CMD_OSDEP_H_
+#define __CMD_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+extern int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv);
+extern int _rtw_init_evt_priv(struct evt_priv *pevtpriv);
+extern void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv);
+extern int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj);
+extern struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
new file mode 100644
index 0000000..ad073c8
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -0,0 +1,334 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*-----------------------------------------------------------------------------
+
+ For type defines and data structure defines
+
+------------------------------------------------------------------------------*/
+
+
+#ifndef __DRV_TYPES_H__
+#define __DRV_TYPES_H__
+
+#define DRV_NAME "r8188eu"
+
+#include <osdep_service.h>
+#include <wlan_bssdef.h>
+#include <drv_types_linux.h>
+#include <rtw_ht.h>
+#include <rtw_cmd.h>
+#include <rtw_xmit.h>
+#include <rtw_recv.h>
+#include <hal_intf.h>
+#include <hal_com.h>
+#include <rtw_qos.h>
+#include <rtw_security.h>
+#include <rtw_pwrctrl.h>
+#include <rtw_io.h>
+#include <rtw_eeprom.h>
+#include <sta_info.h>
+#include <rtw_mlme.h>
+#include <rtw_debug.h>
+#include <rtw_rf.h>
+#include <rtw_event.h>
+#include <rtw_led.h>
+#include <rtw_mlme_ext.h>
+#include <rtw_p2p.h>
+#include <rtw_ap.h>
+#include <rtw_mp.h>
+#include <rtw_br_ext.h>
+
+enum _NIC_VERSION {
+ RTL8711_NIC,
+ RTL8712_NIC,
+ RTL8713_NIC,
+ RTL8716_NIC
+};
+
+#define SPEC_DEV_ID_NONE BIT(0)
+#define SPEC_DEV_ID_DISABLE_HT BIT(1)
+#define SPEC_DEV_ID_ENABLE_PS BIT(2)
+#define SPEC_DEV_ID_RF_CONFIG_1T1R BIT(3)
+#define SPEC_DEV_ID_RF_CONFIG_2T2R BIT(4)
+#define SPEC_DEV_ID_ASSIGN_IFNAME BIT(5)
+
+struct specific_device_id {
+ u32 flags;
+ u16 idVendor;
+ u16 idProduct;
+};
+
+struct registry_priv {
+ u8 chip_version;
+ u8 rfintfs;
+ u8 lbkmode;
+ u8 hci;
+ struct ndis_802_11_ssid ssid;
+ u8 network_mode; /* infra, ad-hoc, auto */
+ u8 channel;/* ad-hoc support requirement */
+ u8 wireless_mode;/* A, B, G, auto */
+ u8 scan_mode;/* active, passive */
+ u8 radio_enable;
+ u8 preamble;/* long, short, auto */
+ u8 vrtl_carrier_sense;/* Enable, Disable, Auto */
+ u8 vcs_type;/* RTS/CTS, CTS-to-self */
+ u16 rts_thresh;
+ u16 frag_thresh;
+ u8 adhoc_tx_pwr;
+ u8 soft_ap;
+ u8 power_mgnt;
+ u8 ips_mode;
+ u8 smart_ps;
+ u8 long_retry_lmt;
+ u8 short_retry_lmt;
+ u16 busy_thresh;
+ u8 ack_policy;
+ u8 mp_mode;
+ u8 software_encrypt;
+ u8 software_decrypt;
+ u8 acm_method;
+ /* UAPSD */
+ u8 wmm_enable;
+ u8 uapsd_enable;
+ u8 uapsd_max_sp;
+ u8 uapsd_acbk_en;
+ u8 uapsd_acbe_en;
+ u8 uapsd_acvi_en;
+ u8 uapsd_acvo_en;
+
+ struct wlan_bssid_ex dev_network;
+
+ u8 ht_enable;
+ u8 cbw40_enable;
+ u8 ampdu_enable;/* for tx */
+ u8 rx_stbc;
+ u8 ampdu_amsdu;/* A-MPDU Supports A-MSDU is permitted */
+ u8 lowrate_two_xmit;
+
+ u8 rf_config;
+ u8 low_power;
+
+ u8 wifi_spec;/* !turbo_mode */
+
+ u8 channel_plan;
+ bool bAcceptAddbaReq;
+
+ u8 antdiv_cfg;
+ u8 antdiv_type;
+
+ u8 usbss_enable;/* 0:disable,1:enable */
+ u8 hwpdn_mode;/* 0:disable,1:enable,2:decide by EFUSE config */
+ u8 hwpwrp_detect;/* 0:disable,1:enable */
+
+ u8 hw_wps_pbc;/* 0:disable,1:enable */
+
+ u8 max_roaming_times; /* the max number driver will try */
+
+ u8 fw_iol; /* enable iol without other concern */
+
+ u8 enable80211d;
+
+ u8 ifname[16];
+ u8 if2name[16];
+
+ u8 notch_filter;
+};
+
+/* For registry parameters */
+#define RGTRY_OFT(field) ((u32)FIELD_OFFSET(struct registry_priv, field))
+#define RGTRY_SZ(field) sizeof(((struct registry_priv *)0)->field)
+#define BSSID_OFT(field) ((u32)FIELD_OFFSET(struct wlan_bssid_ex, field))
+#define BSSID_SZ(field) sizeof(((struct wlan_bssid_ex *)0)->field)
+
+#define MAX_CONTINUAL_URB_ERR 4
+
+struct dvobj_priv {
+ struct adapter *if1;
+ struct adapter *if2;
+
+ /* For 92D, DMDP have 2 interface. */
+ u8 InterfaceNumber;
+ u8 NumInterfaces;
+
+ /* In /Out Pipe information */
+ int RtInPipe[2];
+ int RtOutPipe[3];
+ u8 Queue2Pipe[HW_QUEUE_ENTRY];/* for out pipe mapping */
+
+ u8 irq_alloc;
+
+/*-------- below is for USB INTERFACE --------*/
+
+ u8 nr_endpoint;
+ u8 ishighspeed;
+ u8 RtNumInPipes;
+ u8 RtNumOutPipes;
+ int ep_num[5]; /* endpoint number */
+ int RegUsbSS;
+ struct semaphore usb_suspend_sema;
+ struct mutex usb_vendor_req_mutex;
+
+ u8 *usb_alloc_vendor_req_buf;
+ u8 *usb_vendor_req_buf;
+
+ struct usb_interface *pusbintf;
+ struct usb_device *pusbdev;
+
+ ATOMIC_T continual_urb_error;
+};
+
+static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
+{
+ /* todo: get interface type from dvobj and the return
+ * the dev accordingly */
+ return &dvobj->pusbintf->dev;
+};
+
+enum _IFACE_TYPE {
+ IFACE_PORT0, /* mapping to port0 for C/D series chips */
+ IFACE_PORT1, /* mapping to port1 for C/D series chip */
+ MAX_IFACE_PORT,
+};
+
+enum _ADAPTER_TYPE {
+ PRIMARY_ADAPTER,
+ SECONDARY_ADAPTER,
+ MAX_ADAPTER,
+};
+
+enum driver_state {
+ DRIVER_NORMAL = 0,
+ DRIVER_DISAPPEAR = 1,
+ DRIVER_REPLACE_DONGLE = 2,
+};
+
+struct adapter {
+ int DriverState;/* for disable driver using module, use dongle toi
+ * replace module. */
+ int pid[3];/* process id from UI, 0:wps, 1:hostapd, 2:dhcpcd */
+ int bDongle;/* build-in module or external dongle */
+ u16 chip_type;
+ u16 HardwareType;
+ u16 interface_type;/* USB,SDIO,SPI,PCI */
+
+ struct dvobj_priv *dvobj;
+ struct mlme_priv mlmepriv;
+ struct mlme_ext_priv mlmeextpriv;
+ struct cmd_priv cmdpriv;
+ struct evt_priv evtpriv;
+ struct io_priv iopriv;
+ struct xmit_priv xmitpriv;
+ struct recv_priv recvpriv;
+ struct sta_priv stapriv;
+ struct security_priv securitypriv;
+ struct registry_priv registrypriv;
+ struct pwrctrl_priv pwrctrlpriv;
+ struct eeprom_priv eeprompriv;
+ struct led_priv ledpriv;
+ struct mp_priv mppriv;
+
+#ifdef CONFIG_88EU_AP_MODE
+ struct hostapd_priv *phostapdpriv;
+#endif
+
+ struct wifidirect_info wdinfo;
+
+ void *HalData;
+ u32 hal_data_sz;
+ struct hal_ops HalFunc;
+
+ s32 bDriverStopped;
+ s32 bSurpriseRemoved;
+ s32 bCardDisableWOHSM;
+
+ u32 IsrContent;
+ u32 ImrContent;
+
+ u8 EepromAddressSize;
+ u8 hw_init_completed;
+ u8 bDriverIsGoingToUnload;
+ u8 init_adpt_in_progress;
+ u8 bHaltInProgress;
+
+ void *cmdThread;
+ void *evtThread;
+ void *xmitThread;
+ void *recvThread;
+ void (*intf_start)(struct adapter *adapter);
+ void (*intf_stop)(struct adapter *adapter);
+ struct net_device *pnetdev;
+
+ /* used by rtw_rereg_nd_name related function */
+ struct rereg_nd_name_data {
+ struct net_device *old_pnetdev;
+ char old_ifname[IFNAMSIZ];
+ u8 old_ips_mode;
+ u8 old_bRegUseLed;
+ } rereg_nd_name_priv;
+
+ int bup;
+ struct net_device_stats stats;
+ struct iw_statistics iwstats;
+ struct proc_dir_entry *dir_dev;/* for proc directory */
+
+ int net_closed;
+ u8 bFWReady;
+ u8 bBTFWReady;
+ u8 bReadPortCancel;
+ u8 bWritePortCancel;
+ u8 bRxRSSIDisplay;
+ /* The driver will show up the desired channel number
+ * when this flag is 1. */
+ u8 bNotifyChannelChange;
+#ifdef CONFIG_88EU_P2P
+ /* The driver will show the current P2P status when the
+ * upper application reads it. */
+ u8 bShowGetP2PState;
+#endif
+ struct adapter *pbuddy_adapter;
+
+ struct mutex *hw_init_mutex;
+
+ spinlock_t br_ext_lock;
+ struct nat25_network_db_entry *nethash[NAT25_HASH_SIZE];
+ int pppoe_connection_in_progress;
+ unsigned char pppoe_addr[MACADDRLEN];
+ unsigned char scdb_mac[MACADDRLEN];
+ unsigned char scdb_ip[4];
+ struct nat25_network_db_entry *scdb_entry;
+ unsigned char br_mac[MACADDRLEN];
+ unsigned char br_ip[4];
+ struct br_ext_info ethBrExtInfo;
+
+ u8 fix_rate;
+
+ unsigned char in_cta_test;
+};
+
+#define adapter_to_dvobj(adapter) (adapter->dvobj)
+
+int rtw_handle_dualmac(struct adapter *adapter, bool init);
+
+static inline u8 *myid(struct eeprom_priv *peepriv)
+{
+ return peepriv->mac_addr;
+}
+
+#endif /* __DRV_TYPES_H__ */
diff --git a/drivers/staging/rtl8188eu/include/drv_types_linux.h b/drivers/staging/rtl8188eu/include/drv_types_linux.h
new file mode 100644
index 0000000..812b744
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/drv_types_linux.h
@@ -0,0 +1,24 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __DRV_TYPES_LINUX_H__
+#define __DRV_TYPES_LINUX_H__
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/ethernet.h b/drivers/staging/rtl8188eu/include/ethernet.h
new file mode 100644
index 0000000..a59f912
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ethernet.h
@@ -0,0 +1,42 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*! \file */
+#ifndef __INC_ETHERNET_H
+#define __INC_ETHERNET_H
+
+#define ETHERNET_ADDRESS_LENGTH 6 /* Ethernet Address Length */
+#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
+#define LLC_HEADER_SIZE 6 /* LLC Header Length */
+#define TYPE_LENGTH_FIELD_SIZE 2 /* Type/Length Size */
+#define MINIMUM_ETHERNET_PACKET_SIZE 60 /* Min Ethernet Packet Size */
+#define MAXIMUM_ETHERNET_PACKET_SIZE 1514 /* Max Ethernet Packet Size */
+
+/* Is Multicast Address? */
+#define RT_ETH_IS_MULTICAST(_addr) ((((u8 *)(_addr))[0]&0x01) != 0)
+#define RT_ETH_IS_BROADCAST(_addr) ( \
+ ((u8 *)(_addr))[0] == 0xff && \
+ ((u8 *)(_addr))[1] == 0xff && \
+ ((u8 *)(_addr))[2] == 0xff && \
+ ((u8 *)(_addr))[3] == 0xff && \
+ ((u8 *)(_addr))[4] == 0xff && \
+ ((u8 *)(_addr))[5] == 0xff) /* Is Broadcast Address? */
+
+
+#endif /* #ifndef __INC_ETHERNET_H */
diff --git a/drivers/staging/rtl8188eu/include/h2clbk.h b/drivers/staging/rtl8188eu/include/h2clbk.h
new file mode 100644
index 0000000..e595030
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/h2clbk.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#define _H2CLBK_H_
+
+
+#include <rtl8711_spec.h>
+#include <TypeDef.h>
+
+
+void _lbk_cmd(struct adapter *adapter);
+
+void _lbk_rsp(struct adapter *adapter);
+
+void _lbk_evt(IN struct adapter *adapter);
+
+void h2c_event_callback(unsigned char *dev, unsigned char *pbuf);
diff --git a/drivers/staging/rtl8188eu/include/hal_com.h b/drivers/staging/rtl8188eu/include/hal_com.h
new file mode 100644
index 0000000..81c2709
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/hal_com.h
@@ -0,0 +1,173 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HAL_COMMON_H__
+#define __HAL_COMMON_H__
+
+/* */
+/* Rate Definition */
+/* */
+/* CCK */
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+/* OFDM */
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+/* MCS 1 Spatial Stream */
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+/* MCS 2 Spatial Stream */
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+/* CCK */
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+/* OFDM */
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+/* MCS 1 Spatial Stream */
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+/* MCS 2 Spatial Stream */
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+/* ALL CCK Rate */
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M | \
+ RATR_24M | RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | \
+ RATR_MCS3 | RATR_MCS4 | RATR_MCS5|RATR_MCS6 | \
+ RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
+ RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
+ RATR_MCS14 | RATR_MCS15)
+
+/*------------------------------ Tx Desc definition Macro --------------------*/
+/* pragma mark -- Tx Desc related definition. -- */
+/* Rate */
+/* CCK Rates, TxHT = 0 */
+#define DESC_RATE1M 0x00
+#define DESC_RATE2M 0x01
+#define DESC_RATE5_5M 0x02
+#define DESC_RATE11M 0x03
+
+/* OFDM Rates, TxHT = 0 */
+#define DESC_RATE6M 0x04
+#define DESC_RATE9M 0x05
+#define DESC_RATE12M 0x06
+#define DESC_RATE18M 0x07
+#define DESC_RATE24M 0x08
+#define DESC_RATE36M 0x09
+#define DESC_RATE48M 0x0a
+#define DESC_RATE54M 0x0b
+
+/* MCS Rates, TxHT = 1 */
+#define DESC_RATEMCS0 0x0c
+#define DESC_RATEMCS1 0x0d
+#define DESC_RATEMCS2 0x0e
+#define DESC_RATEMCS3 0x0f
+#define DESC_RATEMCS4 0x10
+#define DESC_RATEMCS5 0x11
+#define DESC_RATEMCS6 0x12
+#define DESC_RATEMCS7 0x13
+#define DESC_RATEMCS8 0x14
+#define DESC_RATEMCS9 0x15
+#define DESC_RATEMCS10 0x16
+#define DESC_RATEMCS11 0x17
+#define DESC_RATEMCS12 0x18
+#define DESC_RATEMCS13 0x19
+#define DESC_RATEMCS14 0x1a
+#define DESC_RATEMCS15 0x1b
+#define DESC_RATEMCS15_SG 0x1c
+#define DESC_RATEMCS32 0x20
+
+/* 1 Byte long (in unit of TU) */
+#define REG_P2P_CTWIN 0x0572
+#define REG_NOA_DESC_SEL 0x05CF
+#define REG_NOA_DESC_DURATION 0x05E0
+#define REG_NOA_DESC_INTERVAL 0x05E4
+#define REG_NOA_DESC_START 0x05E8
+#define REG_NOA_DESC_COUNT 0x05EC
+
+#include "HalVerDef.h"
+void dump_chip_info(struct HAL_VERSION ChipVersion);
+
+
+/* return the final channel plan decision */
+u8 hal_com_get_channel_plan(struct adapter *padapter,
+ u8 hw_channel_plan,
+ u8 sw_channel_plan,
+ u8 def_channel_plan,
+ bool AutoLoadFail
+);
+
+u8 MRateToHwRate(u8 rate);
+
+void HalSetBrateCfg(struct adapter *Adapter, u8 *mBratesOS, u16 *pBrateCfg);
+
+bool Hal_MappingOutPipe(struct adapter *pAdapter, u8 NumOutPipe);
+
+void hal_init_macaddr(struct adapter *adapter);
+
+void c2h_evt_clear(struct adapter *adapter);
+s32 c2h_evt_read(struct adapter *adapter, u8 *buf);
+
+#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
new file mode 100644
index 0000000..439c3c9
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -0,0 +1,426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HAL_INTF_H__
+#define __HAL_INTF_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <Hal8188EPhyCfg.h>
+
+enum RTL871X_HCI_TYPE {
+ RTW_PCIE = BIT0,
+ RTW_USB = BIT1,
+ RTW_SDIO = BIT2,
+ RTW_GSPI = BIT3,
+};
+
+enum _CHIP_TYPE {
+ NULL_CHIP_TYPE,
+ RTL8712_8188S_8191S_8192S,
+ RTL8188C_8192C,
+ RTL8192D,
+ RTL8723A,
+ RTL8188E,
+ MAX_CHIP_TYPE
+};
+
+enum hw_variables {
+ HW_VAR_MEDIA_STATUS,
+ HW_VAR_MEDIA_STATUS1,
+ HW_VAR_SET_OPMODE,
+ HW_VAR_MAC_ADDR,
+ HW_VAR_BSSID,
+ HW_VAR_INIT_RTS_RATE,
+ HW_VAR_BASIC_RATE,
+ HW_VAR_TXPAUSE,
+ HW_VAR_BCN_FUNC,
+ HW_VAR_CORRECT_TSF,
+ HW_VAR_CHECK_BSSID,
+ HW_VAR_MLME_DISCONNECT,
+ HW_VAR_MLME_SITESURVEY,
+ HW_VAR_MLME_JOIN,
+ HW_VAR_BEACON_INTERVAL,
+ HW_VAR_SLOT_TIME,
+ HW_VAR_RESP_SIFS,
+ HW_VAR_ACK_PREAMBLE,
+ HW_VAR_SEC_CFG,
+ HW_VAR_BCN_VALID,
+ HW_VAR_RF_TYPE,
+ HW_VAR_DM_FLAG,
+ HW_VAR_DM_FUNC_OP,
+ HW_VAR_DM_FUNC_SET,
+ HW_VAR_DM_FUNC_CLR,
+ HW_VAR_CAM_EMPTY_ENTRY,
+ HW_VAR_CAM_INVALID_ALL,
+ HW_VAR_CAM_WRITE,
+ HW_VAR_CAM_READ,
+ HW_VAR_AC_PARAM_VO,
+ HW_VAR_AC_PARAM_VI,
+ HW_VAR_AC_PARAM_BE,
+ HW_VAR_AC_PARAM_BK,
+ HW_VAR_ACM_CTRL,
+ HW_VAR_AMPDU_MIN_SPACE,
+ HW_VAR_AMPDU_FACTOR,
+ HW_VAR_RXDMA_AGG_PG_TH,
+ HW_VAR_SET_RPWM,
+ HW_VAR_H2C_FW_PWRMODE,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_FWLPS_RF_ON,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ HW_VAR_TDLS_WRCR,
+ HW_VAR_TDLS_INIT_CH_SEN,
+ HW_VAR_TDLS_RS_RCR,
+ HW_VAR_TDLS_DONE_CH_SEN,
+ HW_VAR_INITIAL_GAIN,
+ HW_VAR_TRIGGER_GPIO_0,
+ HW_VAR_BT_SET_COEXIST,
+ HW_VAR_BT_ISSUE_DELBA,
+ HW_VAR_CURRENT_ANTENNA,
+ HW_VAR_ANTENNA_DIVERSITY_LINK,
+ HW_VAR_ANTENNA_DIVERSITY_SELECT,
+ HW_VAR_SWITCH_EPHY_WoWLAN,
+ HW_VAR_EFUSE_USAGE,
+ HW_VAR_EFUSE_BYTES,
+ HW_VAR_EFUSE_BT_USAGE,
+ HW_VAR_EFUSE_BT_BYTES,
+ HW_VAR_FIFO_CLEARN_UP,
+ HW_VAR_CHECK_TXBUF,
+ HW_VAR_APFM_ON_MAC, /* Auto FSM to Turn On, include clock, isolation,
+ * power control for MAC only */
+ /* The valid upper nav range for the HW updating, if the true value is
+ * larger than the upper range, the HW won't update it. */
+ /* Unit in microsecond. 0 means disable this function. */
+ HW_VAR_NAV_UPPER,
+ HW_VAR_RPT_TIMER_SETTING,
+ HW_VAR_TX_RPT_MAX_MACID,
+ HW_VAR_H2C_MEDIA_STATUS_RPT,
+ HW_VAR_CHK_HI_QUEUE_EMPTY,
+};
+
+enum hal_def_variable {
+ HAL_DEF_UNDERCORATEDSMOOTHEDPWDB,
+ HAL_DEF_IS_SUPPORT_ANT_DIV,
+ HAL_DEF_CURRENT_ANTENNA,
+ HAL_DEF_DRVINFO_SZ,
+ HAL_DEF_MAX_RECVBUF_SZ,
+ HAL_DEF_RX_PACKET_OFFSET,
+ HAL_DEF_DBG_DUMP_RXPKT,/* for dbg */
+ HAL_DEF_DBG_DM_FUNC,/* for dbg */
+ HAL_DEF_RA_DECISION_RATE,
+ HAL_DEF_RA_SGI,
+ HAL_DEF_PT_PWR_STATUS,
+ HW_VAR_MAX_RX_AMPDU_FACTOR,
+ HW_DEF_RA_INFO_DUMP,
+ HAL_DEF_DBG_DUMP_TXPKT,
+ HW_DEF_FA_CNT_DUMP,
+ HW_DEF_ODM_DBG_FLAG,
+};
+
+enum hal_odm_variable {
+ HAL_ODM_STA_INFO,
+ HAL_ODM_P2P_STATE,
+ HAL_ODM_WIFI_DISPLAY_STATE,
+};
+
+enum hal_intf_ps_func {
+ HAL_USB_SELECT_SUSPEND,
+ HAL_MAX_ID,
+};
+
+typedef s32 (*c2h_id_filter)(u8 id);
+
+struct hal_ops {
+ u32 (*hal_power_on)(struct adapter *padapter);
+ u32 (*hal_init)(struct adapter *padapter);
+ u32 (*hal_deinit)(struct adapter *padapter);
+
+ void (*free_hal_data)(struct adapter *padapter);
+
+ u32 (*inirp_init)(struct adapter *padapter);
+ u32 (*inirp_deinit)(struct adapter *padapter);
+
+ s32 (*init_xmit_priv)(struct adapter *padapter);
+ void (*free_xmit_priv)(struct adapter *padapter);
+
+ s32 (*init_recv_priv)(struct adapter *padapter);
+ void (*free_recv_priv)(struct adapter *padapter);
+
+ void (*InitSwLeds)(struct adapter *padapter);
+ void (*DeInitSwLeds)(struct adapter *padapter);
+
+ void (*dm_init)(struct adapter *padapter);
+ void (*dm_deinit)(struct adapter *padapter);
+ void (*read_chip_version)(struct adapter *padapter);
+
+ void (*init_default_value)(struct adapter *padapter);
+
+ void (*intf_chip_configure)(struct adapter *padapter);
+
+ void (*read_adapter_info)(struct adapter *padapter);
+
+ void (*enable_interrupt)(struct adapter *padapter);
+ void (*disable_interrupt)(struct adapter *padapter);
+ s32 (*interrupt_handler)(struct adapter *padapter);
+
+ void (*set_bwmode_handler)(struct adapter *padapter,
+ enum ht_channel_width Bandwidth,
+ u8 Offset);
+ void (*set_channel_handler)(struct adapter *padapter, u8 channel);
+
+ void (*hal_dm_watchdog)(struct adapter *padapter);
+
+ void (*SetHwRegHandler)(struct adapter *padapter, u8 variable,
+ u8 *val);
+ void (*GetHwRegHandler)(struct adapter *padapter, u8 variable,
+ u8 *val);
+
+ u8 (*GetHalDefVarHandler)(struct adapter *padapter,
+ enum hal_def_variable eVariable,
+ void *pValue);
+ u8 (*SetHalDefVarHandler)(struct adapter *padapter,
+ enum hal_def_variable eVariable,
+ void *pValue);
+
+ void (*GetHalODMVarHandler)(struct adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+ void (*SetHalODMVarHandler)(struct adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+
+ void (*UpdateRAMaskHandler)(struct adapter *padapter,
+ u32 mac_id, u8 rssi_level);
+ void (*SetBeaconRelatedRegistersHandler)(struct adapter *padapter);
+
+ void (*Add_RateATid)(struct adapter *adapter, u32 bitmap, u8 arg,
+ u8 rssi_level);
+ void (*run_thread)(struct adapter *adapter);
+ void (*cancel_thread)(struct adapter *adapter);
+
+ u8 (*AntDivBeforeLinkHandler)(struct adapter *adapter);
+ void (*AntDivCompareHandler)(struct adapter *adapter,
+ struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src);
+ u8 (*interface_ps_func)(struct adapter *padapter,
+ enum hal_intf_ps_func efunc_id, u8 *val);
+
+ s32 (*hal_xmit)(struct adapter *padapter,
+ struct xmit_frame *pxmitframe);
+ s32 (*mgnt_xmit)(struct adapter *padapter,
+ struct xmit_frame *pmgntframe);
+
+ u32 (*read_bbreg)(struct adapter *padapter, u32 RegAddr,
+ u32 BitMask);
+ void (*write_bbreg)(struct adapter *padapter, u32 RegAddr,
+ u32 BitMask, u32 Data);
+ u32 (*read_rfreg)(struct adapter *padapter,
+ enum rf_radio_path eRFPath, u32 RegAddr,
+ u32 BitMask);
+ void (*write_rfreg)(struct adapter *padapter,
+ enum rf_radio_path eRFPath, u32 RegAddr,
+ u32 BitMask, u32 Data);
+
+ void (*EfusePowerSwitch)(struct adapter *padapter, u8 bWrite,
+ u8 PwrState);
+ void (*ReadEFuse)(struct adapter *padapter, u8 efuseType, u16 _offset,
+ u16 _size_byte, u8 *pbuf, bool bPseudoTest);
+ void (*EFUSEGetEfuseDefinition)(struct adapter *padapter, u8 efuseType,
+ u8 type, void *pOut, bool bPseudoTest);
+ u16 (*EfuseGetCurrentSize)(struct adapter *padapter, u8 efuseType,
+ bool bPseudoTest);
+ int (*Efuse_PgPacketRead)(struct adapter *adapter, u8 offset,
+ u8 *data, bool bPseudoTest);
+ int (*Efuse_PgPacketWrite)(struct adapter *padapter, u8 offset,
+ u8 word_en, u8 *data, bool bPseudoTest);
+ u8 (*Efuse_WordEnableDataWrite)(struct adapter *padapter,
+ u16 efuse_addr, u8 word_en,
+ u8 *data, bool bPseudoTest);
+ bool (*Efuse_PgPacketWrite_BT)(struct adapter *padapter, u8 offset,
+ u8 word_en, u8 *data, bool test);
+
+ void (*sreset_init_value)(struct adapter *padapter);
+ void (*sreset_reset_value)(struct adapter *padapter);
+ void (*silentreset)(struct adapter *padapter);
+ void (*sreset_xmit_status_check)(struct adapter *padapter);
+ void (*sreset_linked_status_check) (struct adapter *padapter);
+ u8 (*sreset_get_wifi_status)(struct adapter *padapter);
+
+ int (*IOL_exec_cmds_sync)(struct adapter *padapter,
+ struct xmit_frame *frame, u32 max_wait,
+ u32 bndy_cnt);
+
+ void (*hal_notch_filter)(struct adapter *adapter, bool enable);
+ void (*hal_reset_security_engine)(struct adapter *adapter);
+ s32 (*c2h_handler)(struct adapter *padapter,
+ struct c2h_evt_hdr *c2h_evt);
+ c2h_id_filter c2h_id_filter_ccx;
+};
+
+enum rt_eeprom_type {
+ EEPROM_93C46,
+ EEPROM_93C56,
+ EEPROM_BOOT_EFUSE,
+};
+
+#define RF_CHANGE_BY_INIT 0
+#define RF_CHANGE_BY_IPS BIT28
+#define RF_CHANGE_BY_PS BIT29
+#define RF_CHANGE_BY_HW BIT30
+#define RF_CHANGE_BY_SW BIT31
+
+enum hardware_type {
+ HARDWARE_TYPE_RTL8180,
+ HARDWARE_TYPE_RTL8185,
+ HARDWARE_TYPE_RTL8187,
+ HARDWARE_TYPE_RTL8188,
+ HARDWARE_TYPE_RTL8190P,
+ HARDWARE_TYPE_RTL8192E,
+ HARDWARE_TYPE_RTL819xU,
+ HARDWARE_TYPE_RTL8192SE,
+ HARDWARE_TYPE_RTL8192SU,
+ HARDWARE_TYPE_RTL8192CE,
+ HARDWARE_TYPE_RTL8192CU,
+ HARDWARE_TYPE_RTL8192DE,
+ HARDWARE_TYPE_RTL8192DU,
+ HARDWARE_TYPE_RTL8723AE,
+ HARDWARE_TYPE_RTL8723AU,
+ HARDWARE_TYPE_RTL8723AS,
+ HARDWARE_TYPE_RTL8188EE,
+ HARDWARE_TYPE_RTL8188EU,
+ HARDWARE_TYPE_RTL8188ES,
+ HARDWARE_TYPE_MAX,
+};
+
+/* RTL8188E Series */
+#define IS_HARDWARE_TYPE_8188EE(_Adapter) \
+(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EE)
+#define IS_HARDWARE_TYPE_8188EU(_Adapter) \
+(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EU)
+#define IS_HARDWARE_TYPE_8188ES(_Adapter) \
+(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188ES)
+#define IS_HARDWARE_TYPE_8188E(_Adapter) \
+(IS_HARDWARE_TYPE_8188EE(_Adapter) || IS_HARDWARE_TYPE_8188EU(_Adapter) || \
+ IS_HARDWARE_TYPE_8188ES(_Adapter))
+
+#define GET_EEPROM_EFUSE_PRIV(adapter) (&adapter->eeprompriv)
+
+#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
+
+void rtw_hal_def_value_init(struct adapter *padapter);
+
+void rtw_hal_free_data(struct adapter *padapter);
+
+void rtw_hal_dm_init(struct adapter *padapter);
+void rtw_hal_dm_deinit(struct adapter *padapter);
+void rtw_hal_sw_led_init(struct adapter *padapter);
+void rtw_hal_sw_led_deinit(struct adapter *padapter);
+
+u32 rtw_hal_power_on(struct adapter *padapter);
+uint rtw_hal_init(struct adapter *padapter);
+uint rtw_hal_deinit(struct adapter *padapter);
+void rtw_hal_stop(struct adapter *padapter);
+void rtw_hal_set_hwreg(struct adapter *padapter, u8 variable, u8 *val);
+void rtw_hal_get_hwreg(struct adapter *padapter, u8 variable, u8 *val);
+
+void rtw_hal_chip_configure(struct adapter *padapter);
+void rtw_hal_read_chip_info(struct adapter *padapter);
+void rtw_hal_read_chip_version(struct adapter *padapter);
+
+u8 rtw_hal_set_def_var(struct adapter *padapter,
+ enum hal_def_variable eVariable, void *pValue);
+u8 rtw_hal_get_def_var(struct adapter *padapter,
+ enum hal_def_variable eVariable, void *pValue);
+
+void rtw_hal_set_odm_var(struct adapter *padapter,
+ enum hal_odm_variable eVariable, void *pValue1,
+ bool bSet);
+void rtw_hal_get_odm_var(struct adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+
+void rtw_hal_enable_interrupt(struct adapter *padapter);
+void rtw_hal_disable_interrupt(struct adapter *padapter);
+
+u32 rtw_hal_inirp_init(struct adapter *padapter);
+u32 rtw_hal_inirp_deinit(struct adapter *padapter);
+
+u8 rtw_hal_intf_ps_func(struct adapter *padapter,
+ enum hal_intf_ps_func efunc_id, u8 *val);
+
+s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
+s32 rtw_hal_mgnt_xmit(struct adapter *padapter,
+ struct xmit_frame *pmgntframe);
+
+s32 rtw_hal_init_xmit_priv(struct adapter *padapter);
+void rtw_hal_free_xmit_priv(struct adapter *padapter);
+
+s32 rtw_hal_init_recv_priv(struct adapter *padapter);
+void rtw_hal_free_recv_priv(struct adapter *padapter);
+
+void rtw_hal_update_ra_mask(struct adapter *padapter, u32 mac_id, u8 level);
+void rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg, u8 level);
+void rtw_hal_clone_data(struct adapter *dst_adapt,
+ struct adapter *src_adapt);
+void rtw_hal_start_thread(struct adapter *padapter);
+void rtw_hal_stop_thread(struct adapter *padapter);
+
+void rtw_hal_bcn_related_reg_setting(struct adapter *padapter);
+
+u32 rtw_hal_read_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask);
+void rtw_hal_write_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask,
+ u32 Data);
+u32 rtw_hal_read_rfreg(struct adapter *padapter, enum rf_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask);
+void rtw_hal_write_rfreg(struct adapter *padapter,
+ enum rf_radio_path eRFPath, u32 RegAddr,
+ u32 BitMask, u32 Data);
+
+s32 rtw_hal_interrupt_handler(struct adapter *padapter);
+
+void rtw_hal_set_bwmode(struct adapter *padapter,
+ enum ht_channel_width Bandwidth, u8 Offset);
+void rtw_hal_set_chan(struct adapter *padapter, u8 channel);
+void rtw_hal_dm_watchdog(struct adapter *padapter);
+
+u8 rtw_hal_antdiv_before_linked(struct adapter *padapter);
+void rtw_hal_antdiv_rssi_compared(struct adapter *padapter,
+ struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src);
+
+void rtw_hal_sreset_init(struct adapter *padapter);
+void rtw_hal_sreset_reset(struct adapter *padapter);
+void rtw_hal_sreset_reset_value(struct adapter *padapter);
+void rtw_hal_sreset_xmit_status_check(struct adapter *padapter);
+void rtw_hal_sreset_linked_status_check(struct adapter *padapter);
+u8 rtw_hal_sreset_get_wifi_status(struct adapter *padapter);
+
+int rtw_hal_iol_cmd(struct adapter *adapter, struct xmit_frame *xmit_frame,
+ u32 max_wating_ms, u32 bndy_cnt);
+
+void rtw_hal_notch_filter(struct adapter *adapter, bool enable);
+void rtw_hal_reset_security_engine(struct adapter *adapter);
+
+s32 rtw_hal_c2h_handler(struct adapter *adapter,
+ struct c2h_evt_hdr *c2h_evt);
+c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter);
+void indicate_wx_scan_complete_event(struct adapter *padapter);
+u8 rtw_do_join(struct adapter *padapter);
+
+#endif /* __HAL_INTF_H__ */
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
new file mode 100644
index 0000000..cd37ea4
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -0,0 +1,1274 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __IEEE80211_H
+#define __IEEE80211_H
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include "wifi.h"
+#include <linux/wireless.h>
+
+#define MGMT_QUEUE_NUM 5
+
+#define ETH_ALEN 6
+#define ETH_TYPE_LEN 2
+#define PAYLOAD_TYPE_LEN 1
+
+#ifdef CONFIG_88EU_AP_MODE
+
+#define RTL_IOCTL_HOSTAPD (SIOCIWFIRSTPRIV + 28)
+
+/* RTL871X_IOCTL_HOSTAPD ioctl() cmd: */
+enum {
+ RTL871X_HOSTAPD_FLUSH = 1,
+ RTL871X_HOSTAPD_ADD_STA = 2,
+ RTL871X_HOSTAPD_REMOVE_STA = 3,
+ RTL871X_HOSTAPD_GET_INFO_STA = 4,
+ /* REMOVED: PRISM2_HOSTAPD_RESET_TXEXC_STA = 5, */
+ RTL871X_HOSTAPD_GET_WPAIE_STA = 5,
+ RTL871X_SET_ENCRYPTION = 6,
+ RTL871X_GET_ENCRYPTION = 7,
+ RTL871X_HOSTAPD_SET_FLAGS_STA = 8,
+ RTL871X_HOSTAPD_GET_RID = 9,
+ RTL871X_HOSTAPD_SET_RID = 10,
+ RTL871X_HOSTAPD_SET_ASSOC_AP_ADDR = 11,
+ RTL871X_HOSTAPD_SET_GENERIC_ELEMENT = 12,
+ RTL871X_HOSTAPD_MLME = 13,
+ RTL871X_HOSTAPD_SCAN_REQ = 14,
+ RTL871X_HOSTAPD_STA_CLEAR_STATS = 15,
+ RTL871X_HOSTAPD_SET_BEACON = 16,
+ RTL871X_HOSTAPD_SET_WPS_BEACON = 17,
+ RTL871X_HOSTAPD_SET_WPS_PROBE_RESP = 18,
+ RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP = 19,
+ RTL871X_HOSTAPD_SET_HIDDEN_SSID = 20,
+ RTL871X_HOSTAPD_SET_MACADDR_ACL = 21,
+ RTL871X_HOSTAPD_ACL_ADD_STA = 22,
+ RTL871X_HOSTAPD_ACL_REMOVE_STA = 23,
+};
+
+/* STA flags */
+#define WLAN_STA_AUTH BIT(0)
+#define WLAN_STA_ASSOC BIT(1)
+#define WLAN_STA_PS BIT(2)
+#define WLAN_STA_TIM BIT(3)
+#define WLAN_STA_PERM BIT(4)
+#define WLAN_STA_AUTHORIZED BIT(5)
+#define WLAN_STA_PENDING_POLL BIT(6) /* pending activity poll not ACKed */
+#define WLAN_STA_SHORT_PREAMBLE BIT(7)
+#define WLAN_STA_PREAUTH BIT(8)
+#define WLAN_STA_WME BIT(9)
+#define WLAN_STA_MFP BIT(10)
+#define WLAN_STA_HT BIT(11)
+#define WLAN_STA_WPS BIT(12)
+#define WLAN_STA_MAYBE_WPS BIT(13)
+#define WLAN_STA_NONERP BIT(31)
+
+#endif
+
+#define IEEE_CMD_SET_WPA_PARAM 1
+#define IEEE_CMD_SET_WPA_IE 2
+#define IEEE_CMD_SET_ENCRYPTION 3
+#define IEEE_CMD_MLME 4
+
+#define IEEE_PARAM_WPA_ENABLED 1
+#define IEEE_PARAM_TKIP_COUNTERMEASURES 2
+#define IEEE_PARAM_DROP_UNENCRYPTED 3
+#define IEEE_PARAM_PRIVACY_INVOKED 4
+#define IEEE_PARAM_AUTH_ALGS 5
+#define IEEE_PARAM_IEEE_802_1X 6
+#define IEEE_PARAM_WPAX_SELECT 7
+
+#define AUTH_ALG_OPEN_SYSTEM 0x1
+#define AUTH_ALG_SHARED_KEY 0x2
+#define AUTH_ALG_LEAP 0x00000004
+
+#define IEEE_MLME_STA_DEAUTH 1
+#define IEEE_MLME_STA_DISASSOC 2
+
+#define IEEE_CRYPT_ERR_UNKNOWN_ALG 2
+#define IEEE_CRYPT_ERR_UNKNOWN_ADDR 3
+#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED 4
+#define IEEE_CRYPT_ERR_KEY_SET_FAILED 5
+#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
+#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
+
+
+#define IEEE_CRYPT_ALG_NAME_LEN 16
+
+#define WPA_CIPHER_NONE BIT(0)
+#define WPA_CIPHER_WEP40 BIT(1)
+#define WPA_CIPHER_WEP104 BIT(2)
+#define WPA_CIPHER_TKIP BIT(3)
+#define WPA_CIPHER_CCMP BIT(4)
+
+
+
+#define WPA_SELECTOR_LEN 4
+extern u8 RTW_WPA_OUI_TYPE[];
+extern u16 RTW_WPA_VERSION;
+extern u8 WPA_AUTH_KEY_MGMT_NONE[];
+extern u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[];
+extern u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[];
+extern u8 WPA_CIPHER_SUITE_NONE[];
+extern u8 WPA_CIPHER_SUITE_WEP40[];
+extern u8 WPA_CIPHER_SUITE_TKIP[];
+extern u8 WPA_CIPHER_SUITE_WRAP[];
+extern u8 WPA_CIPHER_SUITE_CCMP[];
+extern u8 WPA_CIPHER_SUITE_WEP104[];
+
+
+#define RSN_HEADER_LEN 4
+#define RSN_SELECTOR_LEN 4
+
+extern u16 RSN_VERSION_BSD;
+extern u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[];
+extern u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[];
+extern u8 RSN_CIPHER_SUITE_NONE[];
+extern u8 RSN_CIPHER_SUITE_WEP40[];
+extern u8 RSN_CIPHER_SUITE_TKIP[];
+extern u8 RSN_CIPHER_SUITE_WRAP[];
+extern u8 RSN_CIPHER_SUITE_CCMP[];
+extern u8 RSN_CIPHER_SUITE_WEP104[];
+
+enum ratr_table_mode {
+ RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */
+ RATR_INX_WIRELESS_NG = 1, /* GN or N */
+ RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */
+ RATR_INX_WIRELESS_N = 3,
+ RATR_INX_WIRELESS_GB = 4,
+ RATR_INX_WIRELESS_G = 5,
+ RATR_INX_WIRELESS_B = 6,
+ RATR_INX_WIRELESS_MC = 7,
+ RATR_INX_WIRELESS_AC_N = 8,
+};
+
+enum NETWORK_TYPE {
+ WIRELESS_INVALID = 0,
+ /* Sub-Element */
+ WIRELESS_11B = BIT(0), /* tx:cck only, rx:cck only, hw: cck */
+ WIRELESS_11G = BIT(1), /* tx:ofdm only, rx:ofdm & cck, hw:cck & ofdm*/
+ WIRELESS_11A = BIT(2), /* tx:ofdm only, rx: ofdm only, hw:ofdm only */
+ WIRELESS_11_24N = BIT(3), /* tx:MCS only, rx:MCS & cck, hw:MCS & cck */
+ WIRELESS_11_5N = BIT(4), /* tx:MCS only, rx:MCS & ofdm, hw:ofdm only */
+ WIRELESS_AC = BIT(6),
+
+ /* Combination */
+ /* tx: cck & ofdm, rx: cck & ofdm & MCS, hw: cck & ofdm */
+ WIRELESS_11BG = (WIRELESS_11B | WIRELESS_11G),
+ /* tx: ofdm & MCS, rx: ofdm & cck & MCS, hw: cck & ofdm */
+ WIRELESS_11G_24N = (WIRELESS_11G | WIRELESS_11_24N),
+ /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
+ WIRELESS_11A_5N = (WIRELESS_11A | WIRELESS_11_5N),
+ /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */
+ WIRELESS_11BG_24N = (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N),
+ /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
+ WIRELESS_11AGN = (WIRELESS_11A | WIRELESS_11G | WIRELESS_11_24N |
+ WIRELESS_11_5N),
+ WIRELESS_11ABGN = (WIRELESS_11A | WIRELESS_11B | WIRELESS_11G |
+ WIRELESS_11_24N | WIRELESS_11_5N),
+};
+
+#define SUPPORTED_24G_NETTYPE_MSK \
+ (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N)
+#define SUPPORTED_5G_NETTYPE_MSK \
+ (WIRELESS_11A | WIRELESS_11_5N)
+
+#define IsSupported24G(NetType) \
+ ((NetType) & SUPPORTED_24G_NETTYPE_MSK ? true : false)
+#define IsSupported5G(NetType) \
+ ((NetType) & SUPPORTED_5G_NETTYPE_MSK ? true : false)
+
+#define IsEnableHWCCK(NetType) \
+ IsSupported24G(NetType)
+#define IsEnableHWOFDM(NetType) \
+ ((NetType) & (WIRELESS_11G | WIRELESS_11_24N | \
+ SUPPORTED_5G_NETTYPE_MSK) ? true : false)
+
+#define IsSupportedRxCCK(NetType) IsEnableHWCCK(NetType)
+#define IsSupportedRxOFDM(NetType) IsEnableHWOFDM(NetType)
+#define IsSupportedRxMCS(NetType) IsEnableHWOFDM(NetType)
+
+#define IsSupportedTxCCK(NetType) \
+ ((NetType) & (WIRELESS_11B) ? true : false)
+#define IsSupportedTxOFDM(NetType) \
+ ((NetType) & (WIRELESS_11G|WIRELESS_11A) ? true : false)
+#define IsSupportedTxMCS(NetType) \
+ ((NetType) & (WIRELESS_11_24N|WIRELESS_11_5N) ? true : false)
+
+
+struct ieee_param {
+ u32 cmd;
+ u8 sta_addr[ETH_ALEN];
+ union {
+ struct {
+ u8 name;
+ u32 value;
+ } wpa_param;
+ struct {
+ u32 len;
+ u8 reserved[32];
+ u8 data[0];
+ } wpa_ie;
+ struct {
+ int command;
+ int reason_code;
+ } mlme;
+ struct {
+ u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
+ u8 set_tx;
+ u32 err;
+ u8 idx;
+ u8 seq[8]; /* sequence counter (set: RX, get: TX) */
+ u16 key_len;
+ u8 key[0];
+ } crypt;
+#ifdef CONFIG_88EU_AP_MODE
+ struct {
+ u16 aid;
+ u16 capability;
+ int flags;
+ u8 tx_supp_rates[16];
+ struct rtw_ieee80211_ht_cap ht_cap;
+ } add_sta;
+ struct {
+ u8 reserved[2];/* for set max_num_sta */
+ u8 buf[0];
+ } bcn_ie;
+#endif
+
+ } u;
+};
+
+#ifdef CONFIG_88EU_AP_MODE
+struct ieee_param_ex {
+ u32 cmd;
+ u8 sta_addr[ETH_ALEN];
+ u8 data[0];
+};
+
+struct sta_data {
+ u16 aid;
+ u16 capability;
+ int flags;
+ u32 sta_set;
+ u8 tx_supp_rates[16];
+ u32 tx_supp_rates_len;
+ struct rtw_ieee80211_ht_cap ht_cap;
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+#endif
+
+#define IEEE80211_DATA_LEN 2304
+/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
+ 6.2.1.1.2.
+
+ The figure in section 7.1.2 suggests a body size of up to 2312
+ bytes is allowed, which is a bit confusing, I suspect this
+ represents the 2304 bytes of real data, plus a possible 8 bytes of
+ WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+
+
+#define IEEE80211_HLEN 30
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+
+/* this is stolen from ipw2200 driver */
+#define IEEE_IBSS_MAC_HASH_SIZE 31
+
+struct ieee_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+struct rtw_ieee80211_hdr {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+} __packed;
+
+struct rtw_ieee80211_hdr_3addr {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+} __packed;
+
+struct rtw_ieee80211_hdr_qos {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+ u16 qc;
+} __packed;
+
+struct rtw_ieee80211_hdr_3addr_qos {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+ u16 qc;
+} __packed;
+
+struct eapol {
+ u8 snap[6];
+ u16 ethertype;
+ u8 version;
+ u8 type;
+ u16 length;
+} __packed;
+
+enum eap_type {
+ EAP_PACKET = 0,
+ EAPOL_START,
+ EAPOL_LOGOFF,
+ EAPOL_KEY,
+ EAPOL_ENCAP_ASF_ALERT
+};
+
+#define IEEE80211_3ADDR_LEN 24
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_FCS_LEN 4
+
+#define MIN_FRAG_THRESHOLD 256U
+#define MAX_FRAG_THRESHOLD 2346U
+
+/* Frame control field constants */
+#define RTW_IEEE80211_FCTL_VERS 0x0003
+#define RTW_IEEE80211_FCTL_FTYPE 0x000c
+#define RTW_IEEE80211_FCTL_STYPE 0x00f0
+#define RTW_IEEE80211_FCTL_TODS 0x0100
+#define RTW_IEEE80211_FCTL_FROMDS 0x0200
+#define RTW_IEEE80211_FCTL_MOREFRAGS 0x0400
+#define RTW_IEEE80211_FCTL_RETRY 0x0800
+#define RTW_IEEE80211_FCTL_PM 0x1000
+#define RTW_IEEE80211_FCTL_MOREDATA 0x2000
+#define RTW_IEEE80211_FCTL_PROTECTED 0x4000
+#define RTW_IEEE80211_FCTL_ORDER 0x8000
+#define RTW_IEEE80211_FCTL_CTL_EXT 0x0f00
+
+#define RTW_IEEE80211_FTYPE_MGMT 0x0000
+#define RTW_IEEE80211_FTYPE_CTL 0x0004
+#define RTW_IEEE80211_FTYPE_DATA 0x0008
+#define RTW_IEEE80211_FTYPE_EXT 0x000c
+
+/* management */
+#define RTW_IEEE80211_STYPE_ASSOC_REQ 0x0000
+#define RTW_IEEE80211_STYPE_ASSOC_RESP 0x0010
+#define RTW_IEEE80211_STYPE_REASSOC_REQ 0x0020
+#define RTW_IEEE80211_STYPE_REASSOC_RESP 0x0030
+#define RTW_IEEE80211_STYPE_PROBE_REQ 0x0040
+#define RTW_IEEE80211_STYPE_PROBE_RESP 0x0050
+#define RTW_IEEE80211_STYPE_BEACON 0x0080
+#define RTW_IEEE80211_STYPE_ATIM 0x0090
+#define RTW_IEEE80211_STYPE_DISASSOC 0x00A0
+#define RTW_IEEE80211_STYPE_AUTH 0x00B0
+#define RTW_IEEE80211_STYPE_DEAUTH 0x00C0
+#define RTW_IEEE80211_STYPE_ACTION 0x00D0
+
+/* control */
+#define RTW_IEEE80211_STYPE_CTL_EXT 0x0060
+#define RTW_IEEE80211_STYPE_BACK_REQ 0x0080
+#define RTW_IEEE80211_STYPE_BACK 0x0090
+#define RTW_IEEE80211_STYPE_PSPOLL 0x00A0
+#define RTW_IEEE80211_STYPE_RTS 0x00B0
+#define RTW_IEEE80211_STYPE_CTS 0x00C0
+#define RTW_IEEE80211_STYPE_ACK 0x00D0
+#define RTW_IEEE80211_STYPE_CFEND 0x00E0
+#define RTW_IEEE80211_STYPE_CFENDACK 0x00F0
+
+/* data */
+#define RTW_IEEE80211_STYPE_DATA 0x0000
+#define RTW_IEEE80211_STYPE_DATA_CFACK 0x0010
+#define RTW_IEEE80211_STYPE_DATA_CFPOLL 0x0020
+#define RTW_IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
+#define RTW_IEEE80211_STYPE_NULLFUNC 0x0040
+#define RTW_IEEE80211_STYPE_CFACK 0x0050
+#define RTW_IEEE80211_STYPE_CFPOLL 0x0060
+#define RTW_IEEE80211_STYPE_CFACKPOLL 0x0070
+#define RTW_IEEE80211_STYPE_QOS_DATA 0x0080
+#define RTW_IEEE80211_STYPE_QOS_DATA_CFACK 0x0090
+#define RTW_IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0
+#define RTW_IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0
+#define RTW_IEEE80211_STYPE_QOS_NULLFUNC 0x00C0
+#define RTW_IEEE80211_STYPE_QOS_CFACK 0x00D0
+#define RTW_IEEE80211_STYPE_QOS_CFPOLL 0x00E0
+#define RTW_IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0
+
+/* sequence control field */
+#define RTW_IEEE80211_SCTL_FRAG 0x000F
+#define RTW_IEEE80211_SCTL_SEQ 0xFFF0
+
+
+#define RTW_ERP_INFO_NON_ERP_PRESENT BIT(0)
+#define RTW_ERP_INFO_USE_PROTECTION BIT(1)
+#define RTW_ERP_INFO_BARKER_PREAMBLE_MODE BIT(2)
+
+/* QoS, QOS */
+#define NORMAL_ACK 0
+#define NO_ACK 1
+#define NON_EXPLICIT_ACK 2
+#define BLOCK_ACK 3
+
+#ifndef ETH_P_PAE
+#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#endif /* ETH_P_PAE */
+
+#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
+
+#define ETH_P_ECONET 0x0018
+
+#ifndef ETH_P_80211_RAW
+#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
+#endif
+
+/* IEEE 802.11 defines */
+
+#define P80211_OUI_LEN 3
+
+struct ieee80211_snap_hdr {
+ u8 dsap; /* always 0xAA */
+ u8 ssap; /* always 0xAA */
+ u8 ctrl; /* always 0x03 */
+ u8 oui[P80211_OUI_LEN]; /* organizational universal id */
+} __packed;
+
+#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
+
+#define WLAN_FC_GET_TYPE(fc) ((fc) & RTW_IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & RTW_IEEE80211_FCTL_STYPE)
+
+#define WLAN_QC_GET_TID(qc) ((qc) & 0x0f)
+
+#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTW_IEEE80211_SCTL_FRAG)
+#define WLAN_GET_SEQ_SEQ(seq) ((seq) & RTW_IEEE80211_SCTL_SEQ)
+
+/* Authentication algorithms */
+#define WLAN_AUTH_OPEN 0
+#define WLAN_AUTH_SHARED_KEY 1
+
+#define WLAN_AUTH_CHALLENGE_LEN 128
+
+#define WLAN_CAPABILITY_BSS (1<<0)
+#define WLAN_CAPABILITY_IBSS (1<<1)
+#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
+#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
+#define WLAN_CAPABILITY_PRIVACY (1<<4)
+#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
+#define WLAN_CAPABILITY_PBCC (1<<6)
+#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
+#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
+
+/* Status codes */
+#define WLAN_STATUS_SUCCESS 0
+#define WLAN_STATUS_UNSPECIFIED_FAILURE 1
+#define WLAN_STATUS_CAPS_UNSUPPORTED 10
+#define WLAN_STATUS_REASSOC_NO_ASSOC 11
+#define WLAN_STATUS_ASSOC_DENIED_UNSPEC 12
+#define WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG 13
+#define WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION 14
+#define WLAN_STATUS_CHALLENGE_FAIL 15
+#define WLAN_STATUS_AUTH_TIMEOUT 16
+#define WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA 17
+#define WLAN_STATUS_ASSOC_DENIED_RATES 18
+/* 802.11b */
+#define WLAN_STATUS_ASSOC_DENIED_NOSHORT 19
+#define WLAN_STATUS_ASSOC_DENIED_NOPBCC 20
+#define WLAN_STATUS_ASSOC_DENIED_NOAGILITY 21
+
+/* Reason codes */
+#define WLAN_REASON_UNSPECIFIED 1
+#define WLAN_REASON_PREV_AUTH_NOT_VALID 2
+#define WLAN_REASON_DEAUTH_LEAVING 3
+#define WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY 4
+#define WLAN_REASON_DISASSOC_AP_BUSY 5
+#define WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA 6
+#define WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA 7
+#define WLAN_REASON_DISASSOC_STA_HAS_LEFT 8
+#define WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH 9
+#define WLAN_REASON_JOIN_WRONG_CHANNEL 65534
+#define WLAN_REASON_EXPIRATION_CHK 65535
+
+/* Information Element IDs */
+#define WLAN_EID_SSID 0
+#define WLAN_EID_SUPP_RATES 1
+#define WLAN_EID_FH_PARAMS 2
+#define WLAN_EID_DS_PARAMS 3
+#define WLAN_EID_CF_PARAMS 4
+#define WLAN_EID_TIM 5
+#define WLAN_EID_IBSS_PARAMS 6
+#define WLAN_EID_CHALLENGE 16
+/* EIDs defined by IEEE 802.11h - START */
+#define WLAN_EID_PWR_CONSTRAINT 32
+#define WLAN_EID_PWR_CAPABILITY 33
+#define WLAN_EID_TPC_REQUEST 34
+#define WLAN_EID_TPC_REPORT 35
+#define WLAN_EID_SUPPORTED_CHANNELS 36
+#define WLAN_EID_CHANNEL_SWITCH 37
+#define WLAN_EID_MEASURE_REQUEST 38
+#define WLAN_EID_MEASURE_REPORT 39
+#define WLAN_EID_QUITE 40
+#define WLAN_EID_IBSS_DFS 41
+/* EIDs defined by IEEE 802.11h - END */
+#define WLAN_EID_ERP_INFO 42
+#define WLAN_EID_HT_CAP 45
+#define WLAN_EID_RSN 48
+#define WLAN_EID_EXT_SUPP_RATES 50
+#define WLAN_EID_MOBILITY_DOMAIN 54
+#define WLAN_EID_FAST_BSS_TRANSITION 55
+#define WLAN_EID_TIMEOUT_INTERVAL 56
+#define WLAN_EID_RIC_DATA 57
+#define WLAN_EID_HT_OPERATION 61
+#define WLAN_EID_SECONDARY_CHANNEL_OFFSET 62
+#define WLAN_EID_20_40_BSS_COEXISTENCE 72
+#define WLAN_EID_20_40_BSS_INTOLERANT 73
+#define WLAN_EID_OVERLAPPING_BSS_SCAN_PARAMS 74
+#define WLAN_EID_MMIE 76
+#define WLAN_EID_VENDOR_SPECIFIC 221
+#define WLAN_EID_GENERIC (WLAN_EID_VENDOR_SPECIFIC)
+
+#define IEEE80211_MGMT_HDR_LEN 24
+#define IEEE80211_DATA_HDR3_LEN 24
+#define IEEE80211_DATA_HDR4_LEN 30
+
+
+#define IEEE80211_STATMASK_SIGNAL (1<<0)
+#define IEEE80211_STATMASK_RSSI (1<<1)
+#define IEEE80211_STATMASK_NOISE (1<<2)
+#define IEEE80211_STATMASK_RATE (1<<3)
+#define IEEE80211_STATMASK_WEMASK 0x7
+
+
+#define IEEE80211_CCK_MODULATION (1<<0)
+#define IEEE80211_OFDM_MODULATION (1<<1)
+
+#define IEEE80211_24GHZ_BAND (1<<0)
+#define IEEE80211_52GHZ_BAND (1<<1)
+
+#define IEEE80211_CCK_RATE_LEN 4
+#define IEEE80211_NUM_OFDM_RATESLEN 8
+
+
+#define IEEE80211_CCK_RATE_1MB 0x02
+#define IEEE80211_CCK_RATE_2MB 0x04
+#define IEEE80211_CCK_RATE_5MB 0x0B
+#define IEEE80211_CCK_RATE_11MB 0x16
+#define IEEE80211_OFDM_RATE_LEN 8
+#define IEEE80211_OFDM_RATE_6MB 0x0C
+#define IEEE80211_OFDM_RATE_9MB 0x12
+#define IEEE80211_OFDM_RATE_12MB 0x18
+#define IEEE80211_OFDM_RATE_18MB 0x24
+#define IEEE80211_OFDM_RATE_24MB 0x30
+#define IEEE80211_OFDM_RATE_36MB 0x48
+#define IEEE80211_OFDM_RATE_48MB 0x60
+#define IEEE80211_OFDM_RATE_54MB 0x6C
+#define IEEE80211_BASIC_RATE_MASK 0x80
+
+#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
+#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
+#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
+#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
+#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
+#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
+#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
+#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
+#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
+#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
+#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
+#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
+
+#define IEEE80211_CCK_RATES_MASK 0x0000000F
+#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
+ IEEE80211_CCK_RATE_2MB_MASK)
+#define IEEE80211_CCK_DEFAULT_RATES_MASK \
+ (IEEE80211_CCK_BASIC_RATES_MASK | \
+ IEEE80211_CCK_RATE_5MB_MASK | \
+ IEEE80211_CCK_RATE_11MB_MASK)
+
+#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
+#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
+ IEEE80211_OFDM_RATE_12MB_MASK | \
+ IEEE80211_OFDM_RATE_24MB_MASK)
+#define IEEE80211_OFDM_DEFAULT_RATES_MASK \
+ (IEEE80211_OFDM_BASIC_RATES_MASK | \
+ IEEE80211_OFDM_RATE_9MB_MASK | \
+ IEEE80211_OFDM_RATE_18MB_MASK | \
+ IEEE80211_OFDM_RATE_36MB_MASK | \
+ IEEE80211_OFDM_RATE_48MB_MASK | \
+ IEEE80211_OFDM_RATE_54MB_MASK)
+#define IEEE80211_DEFAULT_RATES_MASK \
+ (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
+ IEEE80211_CCK_DEFAULT_RATES_MASK)
+
+#define IEEE80211_NUM_OFDM_RATES 8
+#define IEEE80211_NUM_CCK_RATES 4
+#define IEEE80211_OFDM_SHIFT_MASK_A 4
+
+/* NOTE: This data is for statistical purposes; not all hardware provides this
+ * information for frames received. Not setting these will not cause
+ * any adverse affects. */
+struct ieee80211_rx_stats {
+ /* u32 mac_time[2]; */
+ s8 rssi;
+ u8 signal;
+ u8 noise;
+ u8 received_channel;
+ u16 rate; /* in 100 kbps */
+ /* u8 control; */
+ u8 mask;
+ u8 freq;
+ u16 len;
+};
+
+/* IEEE 802.11 requires that STA supports concurrent reception of at least
+ * three fragmented frames. This define can be increased to support more
+ * concurrent frames, but it should be noted that each entry can consume about
+ * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
+#define IEEE80211_FRAG_CACHE_LEN 4
+
+struct ieee80211_frag_entry {
+ u32 first_frag_time;
+ uint seq;
+ uint last_frag;
+ uint qos; /* jackson */
+ uint tid; /* jackson */
+ struct sk_buff *skb;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
+};
+
+struct ieee80211_stats {
+ uint tx_unicast_frames;
+ uint tx_multicast_frames;
+ uint tx_fragments;
+ uint tx_unicast_octets;
+ uint tx_multicast_octets;
+ uint tx_deferred_transmissions;
+ uint tx_single_retry_frames;
+ uint tx_multiple_retry_frames;
+ uint tx_retry_limit_exceeded;
+ uint tx_discards;
+ uint rx_unicast_frames;
+ uint rx_multicast_frames;
+ uint rx_fragments;
+ uint rx_unicast_octets;
+ uint rx_multicast_octets;
+ uint rx_fcs_errors;
+ uint rx_discards_no_buffer;
+ uint tx_discards_wrong_sa;
+ uint rx_discards_undecryptable;
+ uint rx_message_in_msg_fragments;
+ uint rx_message_in_bad_msg_fragments;
+};
+
+struct ieee80211_softmac_stats {
+ uint rx_ass_ok;
+ uint rx_ass_err;
+ uint rx_probe_rq;
+ uint tx_probe_rs;
+ uint tx_beacons;
+ uint rx_auth_rq;
+ uint rx_auth_rs_ok;
+ uint rx_auth_rs_err;
+ uint tx_auth_rq;
+ uint no_auth_rs;
+ uint no_ass_rs;
+ uint tx_ass_rq;
+ uint rx_ass_rq;
+ uint tx_probe_rq;
+ uint reassoc;
+ uint swtxstop;
+ uint swtxawake;
+};
+
+#define SEC_KEY_1 (1<<0)
+#define SEC_KEY_2 (1<<1)
+#define SEC_KEY_3 (1<<2)
+#define SEC_KEY_4 (1<<3)
+#define SEC_ACTIVE_KEY (1<<4)
+#define SEC_AUTH_MODE (1<<5)
+#define SEC_UNICAST_GROUP (1<<6)
+#define SEC_LEVEL (1<<7)
+#define SEC_ENABLED (1<<8)
+
+#define SEC_LEVEL_0 0 /* None */
+#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
+#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
+#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
+#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
+
+#define WEP_KEYS 4
+#define WEP_KEY_LEN 13
+
+struct ieee80211_security {
+ u16 active_key:2,
+ enabled:1,
+ auth_mode:2,
+ auth_algo:4,
+ unicast_uses_group:1;
+ u8 key_sizes[WEP_KEYS];
+ u8 keys[WEP_KEYS][WEP_KEY_LEN];
+ u8 level;
+ u16 flags;
+} __packed;
+
+/*
+
+ 802.11 data frame from AP
+
+ ,-------------------------------------------------------------------.
+Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
+ |------|------|---------|---------|---------|------|---------|------|
+Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
+ | | tion | (BSSID) | | | ence | data | |
+ `-------------------------------------------------------------------'
+
+Total: 28-2340 bytes
+
+*/
+
+struct ieee80211_header_data {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[6];
+ u8 addr2[6];
+ u8 addr3[6];
+ u16 seq_ctrl;
+};
+
+#define BEACON_PROBE_SSID_ID_POSITION 12
+
+/* Management Frame Information Element Types */
+#define MFIE_TYPE_SSID 0
+#define MFIE_TYPE_RATES 1
+#define MFIE_TYPE_FH_SET 2
+#define MFIE_TYPE_DS_SET 3
+#define MFIE_TYPE_CF_SET 4
+#define MFIE_TYPE_TIM 5
+#define MFIE_TYPE_IBSS_SET 6
+#define MFIE_TYPE_CHALLENGE 16
+#define MFIE_TYPE_ERP 42
+#define MFIE_TYPE_RSN 48
+#define MFIE_TYPE_RATES_EX 50
+#define MFIE_TYPE_GENERIC 221
+
+struct ieee80211_info_element_hdr {
+ u8 id;
+ u8 len;
+} __packed;
+
+struct ieee80211_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+/*
+ * These are the data types that can make up management packets
+ *
+ u16 auth_algorithm;
+ u16 auth_sequence;
+ u16 beacon_interval;
+ u16 capability;
+ u8 current_ap[ETH_ALEN];
+ u16 listen_interval;
+ struct {
+ u16 association_id:14, reserved:2;
+ } __packed;
+ u32 time_stamp[2];
+ u16 reason;
+ u16 status;
+*/
+
+#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
+#define IEEE80211_DEFAULT_BASIC_RATE 10
+
+struct ieee80211_authentication {
+ struct ieee80211_header_data header;
+ u16 algorithm;
+ u16 transaction;
+ u16 status;
+ /* struct ieee80211_info_element_hdr info_element; */
+} __packed;
+
+struct ieee80211_probe_response {
+ struct ieee80211_header_data header;
+ u32 time_stamp[2];
+ u16 beacon_interval;
+ u16 capability;
+ struct ieee80211_info_element info_element;
+} __packed;
+
+struct ieee80211_probe_request {
+ struct ieee80211_header_data header;
+} __packed;
+
+struct ieee80211_assoc_request_frame {
+ struct rtw_ieee80211_hdr_3addr header;
+ u16 capability;
+ u16 listen_interval;
+ struct ieee80211_info_element_hdr info_element;
+} __packed;
+
+struct ieee80211_assoc_response_frame {
+ struct rtw_ieee80211_hdr_3addr header;
+ u16 capability;
+ u16 status;
+ u16 aid;
+} __packed;
+
+struct ieee80211_txb {
+ u8 nr_frags;
+ u8 encrypted;
+ u16 reserved;
+ u16 frag_size;
+ u16 payload_size;
+ struct sk_buff *fragments[0];
+};
+
+
+/* SWEEP TABLE ENTRIES NUMBER*/
+#define MAX_SWEEP_TAB_ENTRIES 42
+#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
+/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
+ * only use 8, and then use extended rates for the remaining supported
+ * rates. Other APs, however, stick all of their supported rates on the
+ * main rates information element... */
+#define MAX_RATES_LENGTH ((u8)12)
+#define MAX_RATES_EX_LENGTH ((u8)16)
+#define MAX_NETWORK_COUNT 128
+#define MAX_CHANNEL_NUMBER 161
+#define IEEE80211_SOFTMAC_SCAN_TIME 400
+/* HZ / 2) */
+#define IEEE80211_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
+
+#define CRC_LENGTH 4U
+
+#define MAX_WPA_IE_LEN (256)
+#define MAX_WPS_IE_LEN (512)
+#define MAX_P2P_IE_LEN (256)
+#define MAX_WFD_IE_LEN (128)
+
+#define NETWORK_EMPTY_ESSID (1<<0)
+#define NETWORK_HAS_OFDM (1<<1)
+#define NETWORK_HAS_CCK (1<<2)
+
+#define IEEE80211_DTIM_MBCAST 4
+#define IEEE80211_DTIM_UCAST 2
+#define IEEE80211_DTIM_VALID 1
+#define IEEE80211_DTIM_INVALID 0
+
+#define IEEE80211_PS_DISABLED 0
+#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
+#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
+#define IW_ESSID_MAX_SIZE 32
+/*
+join_res:
+-1: authentication fail
+-2: association fail
+> 0: TID
+*/
+
+enum ieee80211_state {
+ /* the card is not linked at all */
+ IEEE80211_NOLINK = 0,
+
+ /* IEEE80211_ASSOCIATING* are for BSS client mode
+ * the driver shall not perform RX filtering unless
+ * the state is LINKED.
+ * The driver shall just check for the state LINKED and
+ * defaults to NOLINK for ALL the other states (including
+ * LINKED_SCANNING)
+ */
+
+ /* the association procedure will start (wq scheduling)*/
+ IEEE80211_ASSOCIATING,
+ IEEE80211_ASSOCIATING_RETRY,
+
+ /* the association procedure is sending AUTH request*/
+ IEEE80211_ASSOCIATING_AUTHENTICATING,
+
+ /* the association procedure has successfully authentcated
+ * and is sending association request
+ */
+ IEEE80211_ASSOCIATING_AUTHENTICATED,
+
+ /* the link is ok. the card associated to a BSS or linked
+ * to a ibss cell or acting as an AP and creating the bss
+ */
+ IEEE80211_LINKED,
+
+ /* same as LINKED, but the driver shall apply RX filter
+ * rules as we are in NO_LINK mode. As the card is still
+ * logically linked, but it is doing a syncro site survey
+ * then it will be back to LINKED state.
+ */
+ IEEE80211_LINKED_SCANNING,
+
+};
+
+#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
+#define DEFAULT_FTS 2346
+
+static inline int is_multicast_mac_addr(const u8 *addr)
+{
+ return ((addr[0] != 0xff) && (0x01 & addr[0]));
+}
+
+static inline int is_broadcast_mac_addr(const u8 *addr)
+{
+ return (addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) &&
+ (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff);
+}
+
+#define CFG_IEEE80211_RESERVE_FCS (1<<0)
+#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+
+struct tx_pending {
+ int frag;
+ struct ieee80211_txb *txb;
+};
+
+#define MAXTID 16
+
+#define IEEE_A (1<<0)
+#define IEEE_B (1<<1)
+#define IEEE_G (1<<2)
+#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+
+/* Baron move to ieee80211.c */
+int ieee80211_is_empty_essid(const char *essid, int essid_len);
+int ieee80211_get_hdrlen(u16 fc);
+
+/* Action category code */
+enum rtw_ieee80211_category {
+ RTW_WLAN_CATEGORY_SPECTRUM_MGMT = 0,
+ RTW_WLAN_CATEGORY_QOS = 1,
+ RTW_WLAN_CATEGORY_DLS = 2,
+ RTW_WLAN_CATEGORY_BACK = 3,
+ RTW_WLAN_CATEGORY_PUBLIC = 4, /* IEEE 802.11 public action frames */
+ RTW_WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ RTW_WLAN_CATEGORY_FT = 6,
+ RTW_WLAN_CATEGORY_HT = 7,
+ RTW_WLAN_CATEGORY_SA_QUERY = 8,
+ RTW_WLAN_CATEGORY_TDLS = 12,
+ RTW_WLAN_CATEGORY_WMM = 17,
+ RTW_WLAN_CATEGORY_P2P = 0x7f,/* P2P action frames */
+};
+
+/* SPECTRUM_MGMT action code */
+enum rtw_ieee80211_spectrum_mgmt_actioncode {
+ RTW_WLAN_ACTION_SPCT_MSR_REQ = 0,
+ RTW_WLAN_ACTION_SPCT_MSR_RPRT = 1,
+ RTW_WLAN_ACTION_SPCT_TPC_REQ = 2,
+ RTW_WLAN_ACTION_SPCT_TPC_RPRT = 3,
+ RTW_WLAN_ACTION_SPCT_CHL_SWITCH = 4,
+ RTW_WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
+};
+
+enum _PUBLIC_ACTION {
+ ACT_PUBLIC_BSSCOEXIST = 0, /* 20/40 BSS Coexistence */
+ ACT_PUBLIC_DSE_ENABLE = 1,
+ ACT_PUBLIC_DSE_DEENABLE = 2,
+ ACT_PUBLIC_DSE_REG_LOCATION = 3,
+ ACT_PUBLIC_EXT_CHL_SWITCH = 4,
+ ACT_PUBLIC_DSE_MSR_REQ = 5,
+ ACT_PUBLIC_DSE_MSR_RPRT = 6,
+ ACT_PUBLIC_MP = 7, /* Measurement Pilot */
+ ACT_PUBLIC_DSE_PWR_CONSTRAINT = 8,
+ ACT_PUBLIC_VENDOR = 9, /* for WIFI_DIRECT */
+ ACT_PUBLIC_GAS_INITIAL_REQ = 10,
+ ACT_PUBLIC_GAS_INITIAL_RSP = 11,
+ ACT_PUBLIC_GAS_COMEBACK_REQ = 12,
+ ACT_PUBLIC_GAS_COMEBACK_RSP = 13,
+ ACT_PUBLIC_TDLS_DISCOVERY_RSP = 14,
+ ACT_PUBLIC_LOCATION_TRACK = 15,
+ ACT_PUBLIC_MAX
+};
+
+/* BACK action code */
+enum rtw_ieee80211_back_actioncode {
+ RTW_WLAN_ACTION_ADDBA_REQ = 0,
+ RTW_WLAN_ACTION_ADDBA_RESP = 1,
+ RTW_WLAN_ACTION_DELBA = 2,
+};
+
+/* HT features action code */
+enum rtw_ieee80211_ht_actioncode {
+ RTW_WLAN_ACTION_NOTIFY_CH_WIDTH = 0,
+ RTW_WLAN_ACTION_SM_PS = 1,
+ RTW_WLAN_ACTION_PSPM = 2,
+ RTW_WLAN_ACTION_PCO_PHASE = 3,
+ RTW_WLAN_ACTION_MIMO_CSI_MX = 4,
+ RTW_WLAN_ACTION_MIMO_NONCP_BF = 5,
+ RTW_WLAN_ACTION_MIMP_CP_BF = 6,
+ RTW_WLAN_ACTION_ASEL_INDICATES_FB = 7,
+ RTW_WLAN_ACTION_HI_INFO_EXCHG = 8,
+};
+
+/* BACK (block-ack) parties */
+enum rtw_ieee80211_back_parties {
+ RTW_WLAN_BACK_RECIPIENT = 0,
+ RTW_WLAN_BACK_INITIATOR = 1,
+ RTW_WLAN_BACK_TIMER = 2,
+};
+
+#define OUI_MICROSOFT 0x0050f2 /* Microsoft (also used in Wi-Fi specs)
+ * 00:50:F2 */
+#define WME_OUI_TYPE 2
+#define WME_OUI_SUBTYPE_INFORMATION_ELEMENT 0
+#define WME_OUI_SUBTYPE_PARAMETER_ELEMENT 1
+#define WME_OUI_SUBTYPE_TSPEC_ELEMENT 2
+#define WME_VERSION 1
+
+#define WME_ACTION_CODE_SETUP_REQUEST 0
+#define WME_ACTION_CODE_SETUP_RESPONSE 1
+#define WME_ACTION_CODE_TEARDOWN 2
+
+#define WME_SETUP_RESPONSE_STATUS_ADMISSION_ACCEPTED 0
+#define WME_SETUP_RESPONSE_STATUS_INVALID_PARAMETERS 1
+#define WME_SETUP_RESPONSE_STATUS_REFUSED 3
+
+#define WME_TSPEC_DIRECTION_UPLINK 0
+#define WME_TSPEC_DIRECTION_DOWNLINK 1
+#define WME_TSPEC_DIRECTION_BI_DIRECTIONAL 3
+
+
+#define OUI_BROADCOM 0x00904c /* Broadcom (Epigram) */
+
+#define VENDOR_HT_CAPAB_OUI_TYPE 0x33 /* 00-90-4c:0x33 */
+
+/**
+ * enum rtw_ieee80211_channel_flags - channel flags
+ *
+ * Channel flags set by the regulatory control code.
+ *
+ * @RTW_IEEE80211_CHAN_DISABLED: This channel is disabled.
+ * @RTW_IEEE80211_CHAN_PASSIVE_SCAN: Only passive scanning is permitted
+ * on this channel.
+ * @RTW_IEEE80211_CHAN_NO_IBSS: IBSS is not allowed on this channel.
+ * @RTW_IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
+ * @RTW_IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
+ * is not permitted.
+ * @RTW_IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
+ * is not permitted.
+ */
+enum rtw_ieee80211_channel_flags {
+ RTW_IEEE80211_CHAN_DISABLED = 1<<0,
+ RTW_IEEE80211_CHAN_PASSIVE_SCAN = 1<<1,
+ RTW_IEEE80211_CHAN_NO_IBSS = 1<<2,
+ RTW_IEEE80211_CHAN_RADAR = 1<<3,
+ RTW_IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
+ RTW_IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
+};
+
+#define RTW_IEEE80211_CHAN_NO_HT40 \
+ (RTW_IEEE80211_CHAN_NO_HT40PLUS | RTW_IEEE80211_CHAN_NO_HT40MINUS)
+
+/* Represent channel details, subset of ieee80211_channel */
+struct rtw_ieee80211_channel {
+ u16 hw_value;
+ u32 flags;
+};
+
+#define CHAN_FMT \
+ "hw_value:%u, " \
+ "flags:0x%08x" \
+
+#define CHAN_ARG(channel) \
+ (channel)->hw_value \
+ , (channel)->flags \
+
+/* Parsed Information Elements */
+struct rtw_ieee802_11_elems {
+ u8 *ssid;
+ u8 ssid_len;
+ u8 *supp_rates;
+ u8 supp_rates_len;
+ u8 *fh_params;
+ u8 fh_params_len;
+ u8 *ds_params;
+ u8 ds_params_len;
+ u8 *cf_params;
+ u8 cf_params_len;
+ u8 *tim;
+ u8 tim_len;
+ u8 *ibss_params;
+ u8 ibss_params_len;
+ u8 *challenge;
+ u8 challenge_len;
+ u8 *erp_info;
+ u8 erp_info_len;
+ u8 *ext_supp_rates;
+ u8 ext_supp_rates_len;
+ u8 *wpa_ie;
+ u8 wpa_ie_len;
+ u8 *rsn_ie;
+ u8 rsn_ie_len;
+ u8 *wme;
+ u8 wme_len;
+ u8 *wme_tspec;
+ u8 wme_tspec_len;
+ u8 *wps_ie;
+ u8 wps_ie_len;
+ u8 *power_cap;
+ u8 power_cap_len;
+ u8 *supp_channels;
+ u8 supp_channels_len;
+ u8 *mdie;
+ u8 mdie_len;
+ u8 *ftie;
+ u8 ftie_len;
+ u8 *timeout_int;
+ u8 timeout_int_len;
+ u8 *ht_capabilities;
+ u8 ht_capabilities_len;
+ u8 *ht_operation;
+ u8 ht_operation_len;
+ u8 *vendor_ht_cap;
+ u8 vendor_ht_cap_len;
+};
+
+enum parse_res {
+ ParseOK = 0,
+ ParseUnknown = 1,
+ ParseFailed = -1
+};
+
+enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors);
+
+u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len,
+ unsigned char *source, unsigned int *frlen);
+u8 *rtw_set_ie(u8 *pbuf, int index, uint len, u8 *source, uint *frlen);
+
+enum secondary_ch_offset {
+ SCN = 0, /* no secondary channel */
+ SCA = 1, /* secondary channel above */
+ SCB = 3, /* secondary channel below */
+};
+u8 secondary_ch_offset_to_hal_ch_offset(u8 ch_offset);
+u8 hal_ch_offset_to_secondary_ch_offset(u8 ch_offset);
+u8 *rtw_set_ie_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode,
+ u8 new_ch, u8 ch_switch_cnt);
+u8 *rtw_set_ie_secondary_ch_offset(u8 *buf, u32 *buf_len,
+ u8 secondary_ch_offset);
+u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
+ u8 flags, u16 reason, u16 precedence);
+
+u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit);
+u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui,
+ u8 oui_len, u8 *ie, uint *ielen);
+int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset,
+ u8 eid, u8 *oui, u8 oui_len);
+
+void rtw_set_supported_rate(u8 *SupportedRates, uint mode);
+
+unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit);
+unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit);
+int rtw_get_wpa_cipher_suite(u8 *s);
+int rtw_get_wpa2_cipher_suite(u8 *s);
+int rtw_get_wapi_ie(u8 *in_ie, uint in_len, u8 *wapi_ie, u16 *wapi_len);
+int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
+ int *pairwise_cipher, int *is_8021x);
+int rtw_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
+ int *pairwise_cipher, int *is_8021x);
+
+int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
+ u8 *wpa_ie, u16 *wpa_len);
+
+u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen);
+u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen);
+u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
+ u8 *buf_attr, u32 *len_attr);
+u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
+ u8 *buf_content, uint *len_content);
+
+/**
+ * for_each_ie - iterate over continuous IEs
+ * @ie:
+ * @buf:
+ * @buf_len:
+ */
+#define for_each_ie(ie, buf, buf_len) \
+ for (ie = (void *)buf; (((u8 *)ie) - ((u8 *)buf) + 1) < buf_len; \
+ ie = (void *)(((u8 *)ie) + *(((u8 *)ie)+1) + 2))
+
+void dump_ies(u8 *buf, u32 buf_len);
+void dump_wps_ie(u8 *ie, u32 ie_len);
+
+#ifdef CONFIG_88EU_P2P
+void dump_p2p_ie(u8 *ie, u32 ie_len);
+u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen);
+u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id,
+ u8 *buf_attr, u32 *len_attr);
+u8 *rtw_get_p2p_attr_content(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id,
+ u8 *buf_content, uint *len_content);
+u32 rtw_set_p2p_attr_content(u8 *pbuf, u8 attr_id, u16 attr_len,
+ u8 *pdata_attr);
+void rtw_wlan_bssid_ex_remove_p2p_attr(struct wlan_bssid_ex *bss_ex,
+ u8 attr_id);
+#endif
+
+uint rtw_get_rateset_len(u8 *rateset);
+
+struct registry_priv;
+int rtw_generate_ie(struct registry_priv *pregistrypriv);
+
+
+int rtw_get_bit_value_from_ieee_value(u8 val);
+
+uint rtw_is_cckrates_included(u8 *rate);
+
+uint rtw_is_cckratesonly_included(u8 *rate);
+
+int rtw_check_network_type(unsigned char *rate, int ratelen, int channel);
+
+void rtw_get_bcn_info(struct wlan_network *pnetwork);
+
+void rtw_macaddr_cfg(u8 *mac_addr);
+
+u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40,
+ unsigned char *MCS_rate);
+
+int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category,
+ u8 *action);
+const char *action_public_str(u8 action);
+
+#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8188eu/include/ieee80211_ext.h b/drivers/staging/rtl8188eu/include/ieee80211_ext.h
new file mode 100644
index 0000000..1052d18
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ieee80211_ext.h
@@ -0,0 +1,290 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __IEEE80211_EXT_H
+#define __IEEE80211_EXT_H
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define WMM_OUI_TYPE 2
+#define WMM_OUI_SUBTYPE_INFORMATION_ELEMENT 0
+#define WMM_OUI_SUBTYPE_PARAMETER_ELEMENT 1
+#define WMM_OUI_SUBTYPE_TSPEC_ELEMENT 2
+#define WMM_VERSION 1
+
+#define WPA_PROTO_WPA BIT(0)
+#define WPA_PROTO_RSN BIT(1)
+
+#define WPA_KEY_MGMT_IEEE8021X BIT(0)
+#define WPA_KEY_MGMT_PSK BIT(1)
+#define WPA_KEY_MGMT_NONE BIT(2)
+#define WPA_KEY_MGMT_IEEE8021X_NO_WPA BIT(3)
+#define WPA_KEY_MGMT_WPA_NONE BIT(4)
+
+
+#define WPA_CAPABILITY_PREAUTH BIT(0)
+#define WPA_CAPABILITY_MGMT_FRAME_PROTECTION BIT(6)
+#define WPA_CAPABILITY_PEERKEY_ENABLED BIT(9)
+
+
+#define PMKID_LEN 16
+
+
+struct wpa_ie_hdr {
+ u8 elem_id;
+ u8 len;
+ u8 oui[4]; /* 24-bit OUI followed by 8-bit OUI type */
+ u8 version[2]; /* little endian */
+} __packed;
+
+struct rsn_ie_hdr {
+ u8 elem_id; /* WLAN_EID_RSN */
+ u8 len;
+ u8 version[2]; /* little endian */
+} __packed;
+
+struct wme_ac_parameter {
+#if defined(__LITTLE_ENDIAN)
+ /* byte 1 */
+ u8 aifsn:4,
+ acm:1,
+ aci:2,
+ reserved:1;
+
+ /* byte 2 */
+ u8 eCWmin:4,
+ eCWmax:4;
+#elif defined(__BIG_ENDIAN)
+ /* byte 1 */
+ u8 reserved:1,
+ aci:2,
+ acm:1,
+ aifsn:4;
+
+ /* byte 2 */
+ u8 eCWmax:4,
+ eCWmin:4;
+#else
+#error "Please fix <endian.h>"
+#endif
+
+ /* bytes 3 & 4 */
+ u16 txopLimit;
+} __packed;
+
+struct wme_parameter_element {
+ /* required fields for WME version 1 */
+ u8 oui[3];
+ u8 oui_type;
+ u8 oui_subtype;
+ u8 version;
+ u8 acInfo;
+ u8 reserved;
+ struct wme_ac_parameter ac[4];
+
+} __packed;
+
+#define WPA_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16) (val)) >> 8; \
+ (a)[0] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define WPA_PUT_BE32(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[3] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define WPA_PUT_LE32(a, val) \
+ do { \
+ (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[0] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RSN_SELECTOR_PUT(a, val) WPA_PUT_BE32((u8 *)(a), (val))
+
+/* Action category code */
+enum ieee80211_category {
+ WLAN_CATEGORY_SPECTRUM_MGMT = 0,
+ WLAN_CATEGORY_QOS = 1,
+ WLAN_CATEGORY_DLS = 2,
+ WLAN_CATEGORY_BACK = 3,
+ WLAN_CATEGORY_HT = 7,
+ WLAN_CATEGORY_WMM = 17,
+};
+
+/* SPECTRUM_MGMT action code */
+enum ieee80211_spectrum_mgmt_actioncode {
+ WLAN_ACTION_SPCT_MSR_REQ = 0,
+ WLAN_ACTION_SPCT_MSR_RPRT = 1,
+ WLAN_ACTION_SPCT_TPC_REQ = 2,
+ WLAN_ACTION_SPCT_TPC_RPRT = 3,
+ WLAN_ACTION_SPCT_CHL_SWITCH = 4,
+ WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
+};
+
+/* BACK action code */
+enum ieee80211_back_actioncode {
+ WLAN_ACTION_ADDBA_REQ = 0,
+ WLAN_ACTION_ADDBA_RESP = 1,
+ WLAN_ACTION_DELBA = 2,
+};
+
+/* HT features action code */
+enum ieee80211_ht_actioncode {
+ WLAN_ACTION_NOTIFY_CH_WIDTH = 0,
+ WLAN_ACTION_SM_PS = 1,
+ WLAN_ACTION_PSPM = 2,
+ WLAN_ACTION_PCO_PHASE = 3,
+ WLAN_ACTION_MIMO_CSI_MX = 4,
+ WLAN_ACTION_MIMO_NONCP_BF = 5,
+ WLAN_ACTION_MIMP_CP_BF = 6,
+ WLAN_ACTION_ASEL_INDICATES_FB = 7,
+ WLAN_ACTION_HI_INFO_EXCHG = 8,
+};
+
+/* BACK (block-ack) parties */
+enum ieee80211_back_parties {
+ WLAN_BACK_RECIPIENT = 0,
+ WLAN_BACK_INITIATOR = 1,
+ WLAN_BACK_TIMER = 2,
+};
+
+struct ieee80211_mgmt {
+ u16 frame_control;
+ u16 duration;
+ u8 da[6];
+ u8 sa[6];
+ u8 bssid[6];
+ u16 seq_ctrl;
+ union {
+ struct {
+ u16 auth_alg;
+ u16 auth_transaction;
+ u16 status_code;
+ /* possibly followed by Challenge text */
+ u8 variable[0];
+ } __packed auth;
+ struct {
+ u16 reason_code;
+ } __packed deauth;
+ struct {
+ u16 capab_info;
+ u16 listen_interval;
+ /* followed by SSID and Supported rates */
+ u8 variable[0];
+ } __packed assoc_req;
+ struct {
+ u16 capab_info;
+ u16 status_code;
+ u16 aid;
+ /* followed by Supported rates */
+ u8 variable[0];
+ } __packed assoc_resp, reassoc_resp;
+ struct {
+ u16 capab_info;
+ u16 listen_interval;
+ u8 current_ap[6];
+ /* followed by SSID and Supported rates */
+ u8 variable[0];
+ } __packed reassoc_req;
+ struct {
+ u16 reason_code;
+ } __packed disassoc;
+ struct {
+ __le64 timestamp;
+ u16 beacon_int;
+ u16 capab_info;
+ /* followed by some of SSID, Supported rates,
+ * FH Params, DS Params, CF Params, IBSS Params, TIM */
+ u8 variable[0];
+ } __packed beacon;
+ struct {
+ /* only variable items: SSID, Supported rates */
+ u8 variable[0];
+ } __packed probe_req;
+ struct {
+ __le64 timestamp;
+ u16 beacon_int;
+ u16 capab_info;
+ /* followed by some of SSID, Supported rates,
+ * FH Params, DS Params, CF Params, IBSS Params */
+ u8 variable[0];
+ } __packed probe_resp;
+ struct {
+ u8 category;
+ union {
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 status_code;
+ u8 variable[0];
+ } __packed wme_action;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u16 capab;
+ u16 timeout;
+ u16 start_seq_num;
+ } __packed addba_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u16 status;
+ u16 capab;
+ u16 timeout;
+ } __packed addba_resp;
+ struct {
+ u8 action_code;
+ u16 params;
+ u16 reason_code;
+ } __packed delba;
+ structi {
+ u8 action_code;
+ /* capab_info for open and confirm,
+ * reason for close
+ */
+ u16 aux;
+ /* Followed in plink_confirm by status
+ * code, AID and supported rates,
+ * and directly by supported rates in
+ * plink_open and plink_close
+ */
+ u8 variable[0];
+ } __packed plink_action;
+ struct{
+ u8 action_code;
+ u8 variable[0];
+ } __packed mesh_action;
+ } __packed u;
+ } __packed action;
+ } __packed u;
+} __packed;
+
+/* mgmt header + 1 byte category code */
+#define IEEE80211_MIN_ACTION_SIZE \
+ FIELD_OFFSET(struct ieee80211_mgmt, u.action.u)
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/if_ether.h b/drivers/staging/rtl8188eu/include/if_ether.h
new file mode 100644
index 0000000..db15771
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/if_ether.h
@@ -0,0 +1,111 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef _LINUX_IF_ETHER_H
+#define _LINUX_IF_ETHER_H
+
+/*
+ * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
+ * and FCS/CRC (frame check sequence).
+ */
+
+#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_HLEN 14 /* Total octets in header. */
+#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
+#define ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+
+/*
+ * These are the defined Ethernet Protocol ID's.
+ */
+
+#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
+#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
+#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_X25 0x0805 /* CCITT X.25 */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet */
+#define ETH_P_IEEEPUP 0x0a00 /* Xerox IEEE802.3 PUP packet */
+#define ETH_P_IEEEPUPAT 0x0a01 /* Xerox IEEE802.3 PUP */
+#define ETH_P_DEC 0x6000 /* DEC Assigned proto */
+#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */
+#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */
+#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */
+#define ETH_P_LAT 0x6004 /* DEC LAT */
+#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
+#define ETH_P_CUST 0x6006 /* DEC Customer use */
+#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
+#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
+#define ETH_P_ATALK 0x809B /* Appletalk DDP */
+#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
+#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
+#define ETH_P_IPX 0x8137 /* IPX over DIX */
+#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
+#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */
+#define ETH_P_PPP_SES 0x8864 /* PPPoE session messages */
+#define ETH_P_ATMMPOA 0x884c /* MultiProtocol Over ATM */
+#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport
+ * over Ethernet
+ */
+
+/*
+ * Non DIX types. Won't clash for 1500 types.
+ */
+
+#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
+#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
+#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
+#define ETH_P_802_2 0x0004 /* 802.2 frames */
+#define ETH_P_SNAP 0x0005 /* Internal only */
+#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */
+#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
+#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
+#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
+#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
+#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
+#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */
+#define ETH_P_CONTROL 0x0016 /* Card specific control frames */
+#define ETH_P_IRDA 0x0017 /* Linux-IrDA */
+#define ETH_P_ECONET 0x0018 /* Acorn Econet */
+
+/*
+ * This is an Ethernet frame header.
+ */
+
+struct ethhdr {
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ unsigned short h_proto; /* packet type ID field */
+};
+
+struct _vlan {
+ unsigned short h_vlan_TCI; /* Encap prio and VLAN ID */
+ unsigned short h_vlan_encapsulated_proto;
+};
+
+#define get_vlan_id(pvlan) \
+ ((ntohs((unsigned short)pvlan->h_vlan_TCI)) & 0xfff)
+#define get_vlan_priority(pvlan) \
+ ((ntohs((unsigned short)pvlan->h_vlan_TCI))>>13)
+#define get_vlan_encap_proto(pvlan) \
+ (ntohs((unsigned short)pvlan->h_vlan_encapsulated_proto))
+
+#endif /* _LINUX_IF_ETHER_H */
diff --git a/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h b/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h
new file mode 100644
index 0000000..037e9a5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h
@@ -0,0 +1,107 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __IOCTL_CFG80211_H__
+#define __IOCTL_CFG80211_H__
+
+struct rtw_wdev_invit_info {
+ u8 token;
+ u8 flags;
+ u8 status;
+ u8 req_op_ch;
+ u8 rsp_op_ch;
+};
+
+#define rtw_wdev_invit_info_init(invit_info) \
+ do { \
+ (invit_info)->token = 0; \
+ (invit_info)->flags = 0x00; \
+ (invit_info)->status = 0xff; \
+ (invit_info)->req_op_ch = 0; \
+ (invit_info)->rsp_op_ch = 0; \
+ } while (0)
+
+struct rtw_wdev_priv {
+ struct wireless_dev *rtw_wdev;
+
+ struct adapter *padapter;
+
+ struct cfg80211_scan_request *scan_request;
+ spinlock_t scan_req_lock;
+
+ struct net_device *pmon_ndev;/* for monitor interface */
+ char ifname_mon[IFNAMSIZ + 1]; /* name of monitor interface */
+
+ u8 p2p_enabled;
+
+ u8 provdisc_req_issued;
+
+ struct rtw_wdev_invit_info invit_info;
+
+ u8 bandroid_scan;
+ bool block;
+ bool power_mgmt;
+};
+
+#define wdev_to_priv(w) ((struct rtw_wdev_priv *)(wdev_priv(w)))
+
+#define wiphy_to_wdev(x) \
+((struct wireless_dev *)(((struct rtw_wdev_priv *)wiphy_priv(x))->rtw_wdev))
+
+int rtw_wdev_alloc(struct adapter *padapter, struct device *dev);
+void rtw_wdev_free(struct wireless_dev *wdev);
+void rtw_wdev_unregister(struct wireless_dev *wdev);
+
+void rtw_cfg80211_init_wiphy(struct adapter *padapter);
+
+void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter);
+
+void rtw_cfg80211_indicate_connect(struct adapter *padapter);
+void rtw_cfg80211_indicate_disconnect(struct adapter *padapter);
+void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
+ bool aborted);
+
+#ifdef CONFIG_88EU_AP_MODE
+void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter,
+ u8 *pmgmt_frame, uint frame_len);
+void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter,
+ unsigned char *da,
+ unsigned short reason);
+#endif /* CONFIG_88EU_AP_MODE */
+
+void rtw_cfg80211_issue_p2p_provision_request(struct adapter *padapter,
+ const u8 *buf, size_t len);
+void rtw_cfg80211_rx_p2p_action_public(struct adapter *padapter,
+ u8 *pmgmt_frame, uint frame_len);
+void rtw_cfg80211_rx_action_p2p(struct adapter *padapter, u8 *pmgmt_frame,
+ uint frame_len);
+void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame,
+ uint frame_len, const char *msg);
+
+int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net,
+ char *buf, int len, int type);
+
+bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter);
+
+#define rtw_cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp) \
+ cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp)
+#define rtw_cfg80211_send_rx_assoc(dev, bss, buf, len) \
+ cfg80211_send_rx_assoc(dev, bss, buf, len)
+
+#endif /* __IOCTL_CFG80211_H__ */
diff --git a/drivers/staging/rtl8188eu/include/ip.h b/drivers/staging/rtl8188eu/include/ip.h
new file mode 100644
index 0000000..9fdac6d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ip.h
@@ -0,0 +1,126 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _LINUX_IP_H
+#define _LINUX_IP_H
+
+/* SOL_IP socket options */
+
+#define IPTOS_TOS_MASK 0x1E
+#define IPTOS_TOS(tos) ((tos)&IPTOS_TOS_MASK)
+#define IPTOS_LOWDELAY 0x10
+#define IPTOS_THROUGHPUT 0x08
+#define IPTOS_RELIABILITY 0x04
+#define IPTOS_MINCOST 0x02
+
+#define IPTOS_PREC_MASK 0xE0
+#define IPTOS_PREC(tos) ((tos)&IPTOS_PREC_MASK)
+#define IPTOS_PREC_NETCONTROL 0xe0
+#define IPTOS_PREC_INTERNETCONTROL 0xc0
+#define IPTOS_PREC_CRITIC_ECP 0xa0
+#define IPTOS_PREC_FLASHOVERRIDE 0x80
+#define IPTOS_PREC_FLASH 0x60
+#define IPTOS_PREC_IMMEDIATE 0x40
+#define IPTOS_PREC_PRIORITY 0x20
+#define IPTOS_PREC_ROUTINE 0x00
+
+
+/* IP options */
+#define IPOPT_COPY 0x80
+#define IPOPT_CLASS_MASK 0x60
+#define IPOPT_NUMBER_MASK 0x1f
+
+#define IPOPT_COPIED(o) ((o)&IPOPT_COPY)
+#define IPOPT_CLASS(o) ((o)&IPOPT_CLASS_MASK)
+#define IPOPT_NUMBER(o) ((o)&IPOPT_NUMBER_MASK)
+
+#define IPOPT_CONTROL 0x00
+#define IPOPT_RESERVED1 0x20
+#define IPOPT_MEASUREMENT 0x40
+#define IPOPT_RESERVED2 0x60
+
+#define IPOPT_END (0 | IPOPT_CONTROL)
+#define IPOPT_NOOP (1 | IPOPT_CONTROL)
+#define IPOPT_SEC (2 | IPOPT_CONTROL | IPOPT_COPY)
+#define IPOPT_LSRR (3 | IPOPT_CONTROL | IPOPT_COPY)
+#define IPOPT_TIMESTAMP (4 | IPOPT_MEASUREMENT)
+#define IPOPT_RR (7 | IPOPT_CONTROL)
+#define IPOPT_SID (8 | IPOPT_CONTROL | IPOPT_COPY)
+#define IPOPT_SSRR (9 | IPOPT_CONTROL | IPOPT_COPY)
+#define IPOPT_RA (20 | IPOPT_CONTROL | IPOPT_COPY)
+
+#define IPVERSION 4
+#define MAXTTL 255
+#define IPDEFTTL 64
+#define IPOPT_OPTVAL 0
+#define IPOPT_OLEN 1
+#define IPOPT_OFFSET 2
+#define IPOPT_MINOFF 4
+#define MAX_IPOPTLEN 40
+#define IPOPT_NOP IPOPT_NOOP
+#define IPOPT_EOL IPOPT_END
+#define IPOPT_TS IPOPT_TIMESTAMP
+
+#define IPOPT_TS_TSONLY 0 /* timestamps only */
+#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
+#define IPOPT_TS_PRESPEC 3 /* specified modules only */
+
+struct ip_options {
+ __u32 faddr; /* Saved first hop address */
+ unsigned char optlen;
+ unsigned char srr;
+ unsigned char rr;
+ unsigned char ts;
+ unsigned char is_setbyuser:1, /* Set by setsockopt? */
+ is_data:1, /* Options in __data, rather than skb*/
+ is_strictroute:1,/* Strict source route */
+ srr_is_hit:1, /* Packet destn addr was ours */
+ is_changed:1, /* IP checksum more not valid */
+ rr_needaddr:1, /* Need to record addr of out dev*/
+ ts_needtime:1, /* Need to record timestamp */
+ ts_needaddr:1; /* Need to record addr of out dev */
+ unsigned char router_alert;
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __data[0];
+};
+
+#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
+
+struct iphdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ihl:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:4,
+ ihl:4;
+#endif
+ __u8 tos;
+ __u16 tot_len;
+ __u16 id;
+ __u16 frag_off;
+ __u8 ttl;
+ __u8 protocol;
+ __u16 check;
+ __u32 saddr;
+ __u32 daddr;
+ /*The options start here. */
+};
+
+#endif /* _LINUX_IP_H */
diff --git a/drivers/staging/rtl8188eu/include/mlme_osdep.h b/drivers/staging/rtl8188eu/include/mlme_osdep.h
new file mode 100644
index 0000000..ae1722c
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/mlme_osdep.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __MLME_OSDEP_H_
+#define __MLME_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+void rtw_init_mlme_timer(struct adapter *padapter);
+void rtw_os_indicate_disconnect(struct adapter *adapter);
+void rtw_os_indicate_connect(struct adapter *adapter);
+void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted);
+void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie);
+
+void rtw_reset_securitypriv(struct adapter *adapter);
+void indicate_wx_scan_complete_event(struct adapter *padapter);
+
+#endif /* _MLME_OSDEP_H_ */
diff --git a/drivers/staging/rtl8188eu/include/mp_custom_oid.h b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
new file mode 100644
index 0000000..6fa52cf
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
@@ -0,0 +1,352 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __CUSTOM_OID_H
+#define __CUSTOM_OID_H
+
+/* by Owen */
+/* 0xFF818000 - 0xFF81802F RTL8180 Mass Production Kit */
+/* 0xFF818500 - 0xFF81850F RTL8185 Setup Utility */
+/* 0xFF818580 - 0xFF81858F RTL8185 Phy Status Utility */
+
+/* */
+
+/* by Owen for Production Kit */
+/* For Production Kit with Agilent Equipments */
+/* in order to make our custom oids hopefully somewhat unique */
+/* we will use 0xFF (indicating implementation specific OID) */
+/* 81(first byte of non zero Realtek unique identifier) */
+/* 80 (second byte of non zero Realtek unique identifier) */
+/* XX (the custom OID number - providing 255 possible custom oids) */
+
+#define OID_RT_PRO_RESET_DUT 0xFF818000
+#define OID_RT_PRO_SET_DATA_RATE 0xFF818001
+#define OID_RT_PRO_START_TEST 0xFF818002
+#define OID_RT_PRO_STOP_TEST 0xFF818003
+#define OID_RT_PRO_SET_PREAMBLE 0xFF818004
+#define OID_RT_PRO_SET_SCRAMBLER 0xFF818005
+#define OID_RT_PRO_SET_FILTER_BB 0xFF818006
+#define OID_RT_PRO_SET_MANUAL_DIVERSITY_BB 0xFF818007
+#define OID_RT_PRO_SET_CHANNEL_DIRECT_CALL 0xFF818008
+#define OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL 0xFF818009
+#define OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL 0xFF81800A
+
+#define OID_RT_PRO_SET_TX_ANTENNA_BB 0xFF81800D
+#define OID_RT_PRO_SET_ANTENNA_BB 0xFF81800E
+#define OID_RT_PRO_SET_CR_SCRAMBLER 0xFF81800F
+#define OID_RT_PRO_SET_CR_NEW_FILTER 0xFF818010
+#define OID_RT_PRO_SET_TX_POWER_CONTROL 0xFF818011
+#define OID_RT_PRO_SET_CR_TX_CONFIG 0xFF818012
+#define OID_RT_PRO_GET_TX_POWER_CONTROL 0xFF818013
+#define OID_RT_PRO_GET_CR_SIGNAL_QUALITY 0xFF818014
+#define OID_RT_PRO_SET_CR_SETPOINT 0xFF818015
+#define OID_RT_PRO_SET_INTEGRATOR 0xFF818016
+#define OID_RT_PRO_SET_SIGNAL_QUALITY 0xFF818017
+#define OID_RT_PRO_GET_INTEGRATOR 0xFF818018
+#define OID_RT_PRO_GET_SIGNAL_QUALITY 0xFF818019
+#define OID_RT_PRO_QUERY_EEPROM_TYPE 0xFF81801A
+#define OID_RT_PRO_WRITE_MAC_ADDRESS 0xFF81801B
+#define OID_RT_PRO_READ_MAC_ADDRESS 0xFF81801C
+#define OID_RT_PRO_WRITE_CIS_DATA 0xFF81801D
+#define OID_RT_PRO_READ_CIS_DATA 0xFF81801E
+#define OID_RT_PRO_WRITE_POWER_CONTROL 0xFF81801F
+#define OID_RT_PRO_READ_POWER_CONTROL 0xFF818020
+#define OID_RT_PRO_WRITE_EEPROM 0xFF818021
+#define OID_RT_PRO_READ_EEPROM 0xFF818022
+#define OID_RT_PRO_RESET_TX_PACKET_SENT 0xFF818023
+#define OID_RT_PRO_QUERY_TX_PACKET_SENT 0xFF818024
+#define OID_RT_PRO_RESET_RX_PACKET_RECEIVED 0xFF818025
+#define OID_RT_PRO_QUERY_RX_PACKET_RECEIVED 0xFF818026
+#define OID_RT_PRO_QUERY_RX_PACKET_CRC32_ERROR 0xFF818027
+#define OID_RT_PRO_QUERY_CURRENT_ADDRESS 0xFF818028
+#define OID_RT_PRO_QUERY_PERMANENT_ADDRESS 0xFF818029
+#define OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS 0xFF81802A
+#define OID_RT_PRO_RECEIVE_PACKET 0xFF81802C
+/* added by Owen on 04/08/03 for Cameo's request */
+#define OID_RT_PRO_WRITE_EEPROM_BYTE 0xFF81802D
+#define OID_RT_PRO_READ_EEPROM_BYTE 0xFF81802E
+#define OID_RT_PRO_SET_MODULATION 0xFF81802F
+/* */
+
+/* Sean */
+#define OID_RT_DRIVER_OPTION 0xFF818080
+#define OID_RT_RF_OFF 0xFF818081
+#define OID_RT_AUTH_STATUS 0xFF818082
+
+/* */
+#define OID_RT_PRO_SET_CONTINUOUS_TX 0xFF81800B
+#define OID_RT_PRO_SET_SINGLE_CARRIER_TX 0xFF81800C
+#define OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX 0xFF81802B
+#define OID_RT_PRO_SET_SINGLE_TONE_TX 0xFF818043
+/* */
+
+
+/* by Owen for RTL8185 Phy Status Report Utility */
+#define OID_RT_UTILITY_false_ALARM_COUNTERS 0xFF818580
+#define OID_RT_UTILITY_SELECT_DEBUG_MODE 0xFF818581
+#define OID_RT_UTILITY_SELECT_SUBCARRIER_NUMBER 0xFF818582
+#define OID_RT_UTILITY_GET_RSSI_STATUS 0xFF818583
+#define OID_RT_UTILITY_GET_FRAME_DETECTION_STATUS 0xFF818584
+#define OID_RT_UTILITY_GET_AGC_AND_FREQUENCY_OFFSET_ESTIMATION_STATUS \
+ 0xFF818585
+#define OID_RT_UTILITY_GET_CHANNEL_ESTIMATION_STATUS 0xFF818586
+/* */
+
+/* by Owen on 03/09/19-03/09/22 for RTL8185 */
+#define OID_RT_WIRELESS_MODE 0xFF818500
+#define OID_RT_SUPPORTED_RATES 0xFF818501
+#define OID_RT_DESIRED_RATES 0xFF818502
+#define OID_RT_WIRELESS_MODE_STARTING_ADHOC 0xFF818503
+/* */
+
+#define OID_RT_GET_CONNECT_STATE 0xFF030001
+#define OID_RT_RESCAN 0xFF030002
+#define OID_RT_SET_KEY_LENGTH 0xFF030003
+#define OID_RT_SET_DEFAULT_KEY_ID 0xFF030004
+
+#define OID_RT_SET_CHANNEL 0xFF010182
+#define OID_RT_SET_SNIFFER_MODE 0xFF010183
+#define OID_RT_GET_SIGNAL_QUALITY 0xFF010184
+#define OID_RT_GET_SMALL_PACKET_CRC 0xFF010185
+#define OID_RT_GET_MIDDLE_PACKET_CRC 0xFF010186
+#define OID_RT_GET_LARGE_PACKET_CRC 0xFF010187
+#define OID_RT_GET_TX_RETRY 0xFF010188
+#define OID_RT_GET_RX_RETRY 0xFF010189
+#define OID_RT_PRO_SET_FW_DIG_STATE 0xFF01018A/* S */
+#define OID_RT_PRO_SET_FW_RA_STATE 0xFF01018B/* S */
+
+#define OID_RT_GET_RX_TOTAL_PACKET 0xFF010190
+#define OID_RT_GET_TX_BEACON_OK 0xFF010191
+#define OID_RT_GET_TX_BEACON_ERR 0xFF010192
+#define OID_RT_GET_RX_ICV_ERR 0xFF010193
+#define OID_RT_SET_ENCRYPTION_ALGORITHM 0xFF010194
+#define OID_RT_SET_NO_AUTO_RESCAN 0xFF010195
+#define OID_RT_GET_PREAMBLE_MODE 0xFF010196
+#define OID_RT_GET_DRIVER_UP_DELTA_TIME 0xFF010197
+#define OID_RT_GET_AP_IP 0xFF010198
+#define OID_RT_GET_CHANNELPLAN 0xFF010199
+#define OID_RT_SET_PREAMBLE_MODE 0xFF01019A
+#define OID_RT_SET_BCN_INTVL 0xFF01019B
+#define OID_RT_GET_RF_VENDER 0xFF01019C
+#define OID_RT_DEDICATE_PROBE 0xFF01019D
+#define OID_RT_PRO_RX_FILTER_PATTERN 0xFF01019E
+
+#define OID_RT_GET_DCST_CURRENT_THRESHOLD 0xFF01019F
+
+#define OID_RT_GET_CCA_ERR 0xFF0101A0
+#define OID_RT_GET_CCA_UPGRADE_THRESHOLD 0xFF0101A1
+#define OID_RT_GET_CCA_FALLBACK_THRESHOLD 0xFF0101A2
+
+#define OID_RT_GET_CCA_UPGRADE_EVALUATE_TIMES 0xFF0101A3
+#define OID_RT_GET_CCA_FALLBACK_EVALUATE_TIMES 0xFF0101A4
+
+/* by Owen on 03/31/03 for Cameo's request */
+#define OID_RT_SET_RATE_ADAPTIVE 0xFF0101A5
+/* */
+#define OID_RT_GET_DCST_EVALUATE_PERIOD 0xFF0101A5
+#define OID_RT_GET_DCST_TIME_UNIT_INDEX 0xFF0101A6
+#define OID_RT_GET_TOTAL_TX_BYTES 0xFF0101A7
+#define OID_RT_GET_TOTAL_RX_BYTES 0xFF0101A8
+#define OID_RT_CURRENT_TX_POWER_LEVEL 0xFF0101A9
+#define OID_RT_GET_ENC_KEY_MISMATCH_COUNT 0xFF0101AA
+#define OID_RT_GET_ENC_KEY_MATCH_COUNT 0xFF0101AB
+#define OID_RT_GET_CHANNEL 0xFF0101AC
+
+#define OID_RT_SET_CHANNELPLAN 0xFF0101AD
+#define OID_RT_GET_HARDWARE_RADIO_OFF 0xFF0101AE
+#define OID_RT_CHANNELPLAN_BY_COUNTRY 0xFF0101AF
+#define OID_RT_SCAN_AVAILABLE_BSSID 0xFF0101B0
+#define OID_RT_GET_HARDWARE_VERSION 0xFF0101B1
+#define OID_RT_GET_IS_ROAMING 0xFF0101B2
+#define OID_RT_GET_IS_PRIVACY 0xFF0101B3
+#define OID_RT_GET_KEY_MISMATCH 0xFF0101B4
+#define OID_RT_SET_RSSI_ROAM_TRAFFIC_TH 0xFF0101B5
+#define OID_RT_SET_RSSI_ROAM_SIGNAL_TH 0xFF0101B6
+#define OID_RT_RESET_LOG 0xFF0101B7
+#define OID_RT_GET_LOG 0xFF0101B8
+#define OID_RT_SET_INDICATE_HIDDEN_AP 0xFF0101B9
+#define OID_RT_GET_HEADER_FAIL 0xFF0101BA
+#define OID_RT_SUPPORTED_WIRELESS_MODE 0xFF0101BB
+#define OID_RT_GET_CHANNEL_LIST 0xFF0101BC
+#define OID_RT_GET_SCAN_IN_PROGRESS 0xFF0101BD
+#define OID_RT_GET_TX_INFO 0xFF0101BE
+#define OID_RT_RF_READ_WRITE_OFFSET 0xFF0101BF
+#define OID_RT_RF_READ_WRITE 0xFF0101C0
+
+/* For Netgear request. 2005.01.13, by rcnjko. */
+#define OID_RT_FORCED_DATA_RATE 0xFF0101C1
+#define OID_RT_WIRELESS_MODE_FOR_SCAN_LIST 0xFF0101C2
+/* For Netgear request. 2005.02.17, by rcnjko. */
+#define OID_RT_GET_BSS_WIRELESS_MODE 0xFF0101C3
+/* For AZ project. 2005.06.27, by rcnjko. */
+#define OID_RT_SCAN_WITH_MAGIC_PACKET 0xFF0101C4
+
+/* Vincent 8185MP */
+#define OID_RT_PRO_RX_FILTER 0xFF0111C0
+
+#define OID_CE_USB_WRITE_REGISTRY 0xFF0111C1
+#define OID_CE_USB_READ_REGISTRY 0xFF0111C2
+
+#define OID_RT_PRO_SET_INITIAL_GA 0xFF0111C3
+#define OID_RT_PRO_SET_BB_RF_STANDBY_MODE 0xFF0111C4
+#define OID_RT_PRO_SET_BB_RF_SHUTDOWN_MODE 0xFF0111C5
+#define OID_RT_PRO_SET_TX_CHARGE_PUMP 0xFF0111C6
+#define OID_RT_PRO_SET_RX_CHARGE_PUMP 0xFF0111C7
+#define OID_RT_PRO_RF_WRITE_REGISTRY 0xFF0111C8
+#define OID_RT_PRO_RF_READ_REGISTRY 0xFF0111C9
+#define OID_RT_PRO_QUERY_RF_TYPE 0xFF0111CA
+
+/* AP OID */
+#define OID_RT_AP_GET_ASSOCIATED_STATION_LIST 0xFF010300
+#define OID_RT_AP_GET_CURRENT_TIME_STAMP 0xFF010301
+#define OID_RT_AP_SWITCH_INTO_AP_MODE 0xFF010302
+#define OID_RT_AP_SET_DTIM_PERIOD 0xFF010303
+/* Determine if driver supports AP mode. */
+#define OID_RT_AP_SUPPORTED 0xFF010304
+/* Set WPA-PSK passphrase into authenticator. */
+#define OID_RT_AP_SET_PASSPHRASE 0xFF010305
+
+/* 8187MP. 2004.09.06, by rcnjko. */
+#define OID_RT_PRO8187_WI_POLL 0xFF818780
+#define OID_RT_PRO_WRITE_BB_REG 0xFF818781
+#define OID_RT_PRO_READ_BB_REG 0xFF818782
+#define OID_RT_PRO_WRITE_RF_REG 0xFF818783
+#define OID_RT_PRO_READ_RF_REG 0xFF818784
+
+/* Meeting House. added by Annie, 2005-07-20. */
+#define OID_RT_MH_VENDER_ID 0xFFEDC100
+
+/* 8711 MP OID added 20051230. */
+#define OID_RT_PRO8711_JOIN_BSS 0xFF871100/* S */
+
+#define OID_RT_PRO_READ_REGISTER 0xFF871101 /* Q */
+#define OID_RT_PRO_WRITE_REGISTER 0xFF871102 /* S */
+
+#define OID_RT_PRO_BURST_READ_REGISTER 0xFF871103 /* Q */
+#define OID_RT_PRO_BURST_WRITE_REGISTER 0xFF871104 /* S */
+
+#define OID_RT_PRO_WRITE_TXCMD 0xFF871105 /* S */
+
+#define OID_RT_PRO_READ16_EEPROM 0xFF871106 /* Q */
+#define OID_RT_PRO_WRITE16_EEPROM 0xFF871107 /* S */
+
+#define OID_RT_PRO_H2C_SET_COMMAND 0xFF871108 /* S */
+#define OID_RT_PRO_H2C_QUERY_RESULT 0xFF871109 /* Q */
+
+#define OID_RT_PRO8711_WI_POLL 0xFF87110A /* Q */
+#define OID_RT_PRO8711_PKT_LOSS 0xFF87110B /* Q */
+#define OID_RT_RD_ATTRIB_MEM 0xFF87110C/* Q */
+#define OID_RT_WR_ATTRIB_MEM 0xFF87110D/* S */
+
+
+/* Method 2 for H2C/C2H */
+#define OID_RT_PRO_H2C_CMD_MODE 0xFF871110 /* S */
+#define OID_RT_PRO_H2C_CMD_RSP_MODE 0xFF871111 /* Q */
+#define OID_RT_PRO_H2C_CMD_EVENT_MODE 0xFF871112 /* S */
+#define OID_RT_PRO_WAIT_C2H_EVENT 0xFF871113 /* Q */
+#define OID_RT_PRO_RW_ACCESS_PROTOCOL_TEST 0xFF871114/* Q */
+
+#define OID_RT_PRO_SCSI_ACCESS_TEST 0xFF871115 /* Q, S */
+
+#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_OUT 0xFF871116 /* S */
+#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_IN 0xFF871117 /* Q,S */
+#define OID_RT_RRO_RX_PKT_VIA_IOCTRL 0xFF871118 /* Q */
+#define OID_RT_RRO_RX_PKTARRAY_VIA_IOCTRL 0xFF871119 /* Q */
+
+#define OID_RT_RPO_SET_PWRMGT_TEST 0xFF87111A /* S */
+#define OID_RT_PRO_QRY_PWRMGT_TEST 0XFF87111B /* Q */
+#define OID_RT_RPO_ASYNC_RWIO_TEST 0xFF87111C /* S */
+#define OID_RT_RPO_ASYNC_RWIO_POLL 0xFF87111D /* Q */
+#define OID_RT_PRO_SET_RF_INTFS 0xFF87111E /* S */
+#define OID_RT_POLL_RX_STATUS 0xFF87111F /* Q */
+
+#define OID_RT_PRO_CFG_DEBUG_MESSAGE 0xFF871120 /* Q,S */
+#define OID_RT_PRO_SET_DATA_RATE_EX 0xFF871121/* S */
+#define OID_RT_PRO_SET_BASIC_RATE 0xFF871122/* S */
+#define OID_RT_PRO_READ_TSSI 0xFF871123/* S */
+#define OID_RT_PRO_SET_POWER_TRACKING 0xFF871124/* S */
+
+
+#define OID_RT_PRO_QRY_PWRSTATE 0xFF871150 /* Q */
+#define OID_RT_PRO_SET_PWRSTATE 0xFF871151 /* S */
+
+/* Method 2 , using workitem */
+#define OID_RT_SET_READ_REG 0xFF871181 /* S */
+#define OID_RT_SET_WRITE_REG 0xFF871182 /* S */
+#define OID_RT_SET_BURST_READ_REG 0xFF871183 /* S */
+#define OID_RT_SET_BURST_WRITE_REG 0xFF871184 /* S */
+#define OID_RT_SET_WRITE_TXCMD 0xFF871185 /* S */
+#define OID_RT_SET_READ16_EEPROM 0xFF871186 /* S */
+#define OID_RT_SET_WRITE16_EEPROM 0xFF871187 /* S */
+#define OID_RT_QRY_POLL_WKITEM 0xFF871188 /* Q */
+
+/* For SDIO INTERFACE only */
+#define OID_RT_PRO_SYNCPAGERW_SRAM 0xFF8711A0 /* Q, S */
+#define OID_RT_PRO_871X_DRV_EXT 0xFF8711A1
+
+/* For USB INTERFACE only */
+#define OID_RT_PRO_USB_VENDOR_REQ 0xFF8711B0 /* Q, S */
+#define OID_RT_PRO_SCSI_AUTO_TEST 0xFF8711B1 /* S */
+#define OID_RT_PRO_USB_MAC_AC_FIFO_WRITE 0xFF8711B2 /* S */
+#define OID_RT_PRO_USB_MAC_RX_FIFO_READ 0xFF8711B3 /* Q */
+#define OID_RT_PRO_USB_MAC_RX_FIFO_POLLING 0xFF8711B4 /* Q */
+
+#define OID_RT_PRO_H2C_SET_RATE_TABLE 0xFF8711FB /* S */
+#define OID_RT_PRO_H2C_GET_RATE_TABLE 0xFF8711FC /* S */
+#define OID_RT_PRO_H2C_C2H_LBK_TEST 0xFF8711FE
+
+#define OID_RT_PRO_ENCRYPTION_CTRL 0xFF871200 /* Q, S */
+#define OID_RT_PRO_ADD_STA_INFO 0xFF871201 /* S */
+#define OID_RT_PRO_DELE_STA_INFO 0xFF871202 /* S */
+#define OID_RT_PRO_QUERY_DR_VARIABLE 0xFF871203 /* Q */
+
+#define OID_RT_PRO_RX_PACKET_TYPE 0xFF871204 /* Q, S */
+
+#define OID_RT_PRO_READ_EFUSE 0xFF871205 /* Q */
+#define OID_RT_PRO_WRITE_EFUSE 0xFF871206 /* S */
+#define OID_RT_PRO_RW_EFUSE_PGPKT 0xFF871207 /* Q, S */
+#define OID_RT_GET_EFUSE_CURRENT_SIZE 0xFF871208 /* Q */
+
+#define OID_RT_SET_BANDWIDTH 0xFF871209 /* S */
+#define OID_RT_SET_CRYSTAL_CAP 0xFF87120A /* S */
+
+#define OID_RT_SET_RX_PACKET_TYPE 0xFF87120B /* S */
+
+#define OID_RT_GET_EFUSE_MAX_SIZE 0xFF87120C /* Q */
+
+#define OID_RT_PRO_SET_TX_AGC_OFFSET 0xFF87120D /* S */
+
+#define OID_RT_PRO_SET_PKT_TEST_MODE 0xFF87120E /* S */
+
+#define OID_RT_PRO_FOR_EVM_TEST_SETTING 0xFF87120F /* S */
+
+#define OID_RT_PRO_GET_THERMAL_METER 0xFF871210 /* Q */
+
+#define OID_RT_RESET_PHY_RX_PACKET_COUNT 0xFF871211 /* S */
+#define OID_RT_GET_PHY_RX_PACKET_RECEIVED 0xFF871212 /* Q */
+#define OID_RT_GET_PHY_RX_PACKET_CRC32_ERROR 0xFF871213 /* Q */
+
+#define OID_RT_SET_POWER_DOWN 0xFF871214 /* S */
+
+#define OID_RT_GET_POWER_MODE 0xFF871215 /* Q */
+
+#define OID_RT_PRO_EFUSE 0xFF871216 /* Q, S */
+#define OID_RT_PRO_EFUSE_MAP 0xFF871217 /* Q, S */
+
+#endif /* ifndef __CUSTOM_OID_H */
diff --git a/drivers/staging/rtl8188eu/include/nic_spec.h b/drivers/staging/rtl8188eu/include/nic_spec.h
new file mode 100644
index 0000000..d422447
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/nic_spec.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#ifndef __NIC_SPEC_H__
+#define __NIC_SPEC_H__
+
+#define RTL8711_MCTRL_ (0x20000)
+#define RTL8711_UART_ (0x30000)
+#define RTL8711_TIMER_ (0x40000)
+#define RTL8711_FINT_ (0x50000)
+#define RTL8711_HINT_ (0x50000)
+#define RTL8711_GPIO_ (0x60000)
+#define RTL8711_WLANCTRL_ (0x200000)
+#define RTL8711_WLANFF_ (0xe00000)
+#define RTL8711_HCICTRL_ (0x600000)
+#define RTL8711_SYSCFG_ (0x620000)
+#define RTL8711_SYSCTRL_ (0x620000)
+#define RTL8711_MCCTRL_ (0x020000)
+
+
+#include <rtl8711_regdef.h>
+
+#include <rtl8711_bitdef.h>
+
+
+#endif /* __RTL8711_SPEC_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
new file mode 100644
index 0000000..4787bac
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -0,0 +1,1198 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#ifndef __HALDMOUTSRC_H__
+#define __HALDMOUTSRC_H__
+
+/* Definition */
+/* Define all team support ability. */
+
+/* Define for all teams. Please Define the constant in your precomp header. */
+
+/* define DM_ODM_SUPPORT_AP 0 */
+/* define DM_ODM_SUPPORT_ADSL 0 */
+/* define DM_ODM_SUPPORT_CE 0 */
+/* define DM_ODM_SUPPORT_MP 1 */
+
+/* Define ODM SW team support flag. */
+
+/* Antenna Switch Relative Definition. */
+
+/* Add new function SwAntDivCheck8192C(). */
+/* This is the main function of Antenna diversity function before link. */
+/* Mainly, it just retains last scan result and scan again. */
+/* After that, it compares the scan result to see which one gets better
+ * RSSI. It selects antenna with better receiving power and returns better
+ * scan result. */
+
+#define TP_MODE 0
+#define RSSI_MODE 1
+#define TRAFFIC_LOW 0
+#define TRAFFIC_HIGH 1
+
+/* 3 Tx Power Tracking */
+/* 3============================================================ */
+#define DPK_DELTA_MAPPING_NUM 13
+#define index_mapping_HP_NUM 15
+
+
+/* */
+/* 3 PSD Handler */
+/* 3============================================================ */
+
+#define AFH_PSD 1 /* 0:normal PSD scan, 1: only do 20 pts PSD */
+#define MODE_40M 0 /* 0:20M, 1:40M */
+#define PSD_TH2 3
+#define PSD_CHM 20 /* Minimum channel number for BT AFH */
+#define SIR_STEP_SIZE 3
+#define Smooth_Size_1 5
+#define Smooth_TH_1 3
+#define Smooth_Size_2 10
+#define Smooth_TH_2 4
+#define Smooth_Size_3 20
+#define Smooth_TH_3 4
+#define Smooth_Step_Size 5
+#define Adaptive_SIR 1
+#define PSD_RESCAN 4
+#define PSD_SCAN_INTERVAL 700 /* ms */
+
+/* 8723A High Power IGI Setting */
+#define DM_DIG_HIGH_PWR_IGI_LOWER_BOUND 0x22
+#define DM_DIG_Gmode_HIGH_PWR_IGI_LOWER_BOUND 0x28
+#define DM_DIG_HIGH_PWR_THRESHOLD 0x3a
+
+/* LPS define */
+#define DM_DIG_FA_TH0_LPS 4 /* 4 in lps */
+#define DM_DIG_FA_TH1_LPS 15 /* 15 lps */
+#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */
+#define RSSI_OFFSET_DIG 0x05;
+
+/* ANT Test */
+#define ANTTESTALL 0x00 /* Ant A or B will be Testing */
+#define ANTTESTA 0x01 /* Ant A will be Testing */
+#define ANTTESTB 0x02 /* Ant B will be testing */
+
+/* structure and define */
+
+/* Add for AP/ADSLpseudo DM structuer requirement. */
+/* We need to remove to other position??? */
+struct rtl8192cd_priv {
+ u8 temp;
+};
+
+struct rtw_dig {
+ u8 Dig_Enable_Flag;
+ u8 Dig_Ext_Port_Stage;
+
+ int RssiLowThresh;
+ int RssiHighThresh;
+
+ u32 FALowThresh;
+ u32 FAHighThresh;
+
+ u8 CurSTAConnectState;
+ u8 PreSTAConnectState;
+ u8 CurMultiSTAConnectState;
+
+ u8 PreIGValue;
+ u8 CurIGValue;
+ u8 BackupIGValue;
+
+ s8 BackoffVal;
+ s8 BackoffVal_range_max;
+ s8 BackoffVal_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 Rssi_val_min;
+
+ u8 PreCCK_CCAThres;
+ u8 CurCCK_CCAThres;
+ u8 PreCCKPDState;
+ u8 CurCCKPDState;
+
+ u8 LargeFAHit;
+ u8 ForbiddenIGI;
+ u32 Recover_cnt;
+
+ u8 DIG_Dynamic_MIN_0;
+ u8 DIG_Dynamic_MIN_1;
+ bool bMediaConnect_0;
+ bool bMediaConnect_1;
+
+ u32 AntDiv_RSSI_max;
+ u32 RSSI_max;
+};
+
+struct rtl_ps {
+ u8 PreCCAState;
+ u8 CurCCAState;
+
+ u8 PreRFState;
+ u8 CurRFState;
+
+ int Rssi_val_min;
+
+ u8 initialize;
+ u32 Reg874,RegC70,Reg85C,RegA74;
+
+};
+
+struct false_alarm_stats {
+ u32 Cnt_Parity_Fail;
+ u32 Cnt_Rate_Illegal;
+ u32 Cnt_Crc8_fail;
+ u32 Cnt_Mcs_fail;
+ u32 Cnt_Ofdm_fail;
+ u32 Cnt_Cck_fail;
+ u32 Cnt_all;
+ u32 Cnt_Fast_Fsync;
+ u32 Cnt_SB_Search_fail;
+ u32 Cnt_OFDM_CCA;
+ u32 Cnt_CCK_CCA;
+ u32 Cnt_CCA_all;
+ u32 Cnt_BW_USC; /* Gary */
+ u32 Cnt_BW_LSC; /* Gary */
+};
+
+struct dyn_primary_cca {
+ u8 PriCCA_flag;
+ u8 intf_flag;
+ u8 intf_type;
+ u8 DupRTS_flag;
+ u8 Monitor_flag;
+};
+
+struct rx_hpc {
+ u8 RXHP_flag;
+ u8 PSD_func_trigger;
+ u8 PSD_bitmap_RXHP[80];
+ u8 Pre_IGI;
+ u8 Cur_IGI;
+ u8 Pre_pw_th;
+ u8 Cur_pw_th;
+ bool First_time_enter;
+ bool RXHP_enable;
+ u8 TP_Mode;
+ struct timer_list PSDTimer;
+};
+
+#define ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
+#define ODM_ASSOCIATE_ENTRY_NUM ASSOCIATE_ENTRY_NUM
+
+/* This indicates two different steps. */
+/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to
+ * the signal on the air. */
+/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
+ * SWAW_STEP_PEAK with original RSSI to determine if it is necessary to
+ * switch antenna. */
+
+#define SWAW_STEP_PEAK 0
+#define SWAW_STEP_DETERMINE 1
+
+#define TP_MODE 0
+#define RSSI_MODE 1
+#define TRAFFIC_LOW 0
+#define TRAFFIC_HIGH 1
+
+struct sw_ant_switch {
+ u8 try_flag;
+ s32 PreRSSI;
+ u8 CurAntenna;
+ u8 PreAntenna;
+ u8 RSSI_Trying;
+ u8 TestMode;
+ u8 bTriggerAntennaSwitch;
+ u8 SelectAntennaMap;
+ u8 RSSI_target;
+
+ /* Before link Antenna Switch check */
+ u8 SWAS_NoLink_State;
+ u32 SWAS_NoLink_BK_Reg860;
+ bool ANTA_ON; /* To indicate Ant A is or not */
+ bool ANTB_ON; /* To indicate Ant B is on or not */
+
+ s32 RSSI_sum_A;
+ s32 RSSI_sum_B;
+ s32 RSSI_cnt_A;
+ s32 RSSI_cnt_B;
+ u64 lastTxOkCnt;
+ u64 lastRxOkCnt;
+ u64 TXByteCnt_A;
+ u64 TXByteCnt_B;
+ u64 RXByteCnt_A;
+ u64 RXByteCnt_B;
+ u8 TrafficLoad;
+ struct timer_list SwAntennaSwitchTimer;
+ /* Hybrid Antenna Diversity */
+ u32 CCK_Ant1_Cnt[ASSOCIATE_ENTRY_NUM];
+ u32 CCK_Ant2_Cnt[ASSOCIATE_ENTRY_NUM];
+ u32 OFDM_Ant1_Cnt[ASSOCIATE_ENTRY_NUM];
+ u32 OFDM_Ant2_Cnt[ASSOCIATE_ENTRY_NUM];
+ u32 RSSI_Ant1_Sum[ASSOCIATE_ENTRY_NUM];
+ u32 RSSI_Ant2_Sum[ASSOCIATE_ENTRY_NUM];
+ u8 TxAnt[ASSOCIATE_ENTRY_NUM];
+ u8 TargetSTA;
+ u8 antsel;
+ u8 RxIdleAnt;
+};
+
+struct edca_turbo {
+ bool bCurrentTurboEDCA;
+ bool bIsCurRDLState;
+ u32 prv_traffic_idx; /* edca turbo */
+};
+
+struct odm_rate_adapt {
+ u8 Type; /* DM_Type_ByFW/DM_Type_ByDriver */
+ u8 HighRSSIThresh; /* if RSSI > HighRSSIThresh => RATRState is DM_RATR_STA_HIGH */
+ u8 LowRSSIThresh; /* if RSSI <= LowRSSIThresh => RATRState is DM_RATR_STA_LOW */
+ u8 RATRState; /* Current RSSI level, DM_RATR_STA_HIGH/DM_RATR_STA_MIDDLE/DM_RATR_STA_LOW */
+ u32 LastRATR; /* RATR Register Content */
+};
+
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM_MAX 10
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+
+#define AVG_THERMAL_NUM 8
+#define IQK_Matrix_REG_NUM 8
+#define IQK_Matrix_Settings_NUM 1+24+21
+
+#define DM_Type_ByFWi 0
+#define DM_Type_ByDriver 1
+
+/* Declare for common info */
+
+#define MAX_PATH_NUM_92CS 2
+
+struct odm_phy_status_info {
+ u8 RxPWDBAll;
+ u8 SignalQuality; /* in 0-100 index. */
+ u8 RxMIMOSignalQuality[MAX_PATH_NUM_92CS]; /* EVM */
+ u8 RxMIMOSignalStrength[MAX_PATH_NUM_92CS];/* in 0~100 index */
+ s8 RxPower; /* in dBm Translate from PWdB */
+ s8 RecvSignalPower;/* Real power in dBm for this packet, no
+ * beautification and aggregation. Keep this raw
+ * info to be used for the other procedures. */
+ u8 BTRxRSSIPercentage;
+ u8 SignalStrength; /* in 0-100 index. */
+ u8 RxPwr[MAX_PATH_NUM_92CS];/* per-path's pwdb */
+ u8 RxSNR[MAX_PATH_NUM_92CS];/* per-path's SNR */
+};
+
+struct odm_phy_dbg_info {
+ /* ODM Write,debug info */
+ s8 RxSNRdB[MAX_PATH_NUM_92CS];
+ u64 NumQryPhyStatus;
+ u64 NumQryPhyStatusCCK;
+ u64 NumQryPhyStatusOFDM;
+ /* Others */
+ s32 RxEVM[MAX_PATH_NUM_92CS];
+};
+
+struct odm_per_pkt_info {
+ s8 Rate;
+ u8 StationID;
+ bool bPacketMatchBSSID;
+ bool bPacketToSelf;
+ bool bPacketBeacon;
+};
+
+struct odm_mac_status_info {
+ u8 test;
+};
+
+enum odm_ability {
+ /* BB Team */
+ ODM_DIG = 0x00000001,
+ ODM_HIGH_POWER = 0x00000002,
+ ODM_CCK_CCA_TH = 0x00000004,
+ ODM_FA_STATISTICS = 0x00000008,
+ ODM_RAMASK = 0x00000010,
+ ODM_RSSI_MONITOR = 0x00000020,
+ ODM_SW_ANTDIV = 0x00000040,
+ ODM_HW_ANTDIV = 0x00000080,
+ ODM_BB_PWRSV = 0x00000100,
+ ODM_2TPATHDIV = 0x00000200,
+ ODM_1TPATHDIV = 0x00000400,
+ ODM_PSD2AFH = 0x00000800
+};
+
+/* 2011/20/20 MH For MP driver RT_WLAN_STA = struct sta_info */
+/* Please declare below ODM relative info in your STA info structure. */
+
+struct odm_sta_info {
+ /* Driver Write */
+ bool bUsed; /* record the sta status link or not? */
+ u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
+
+ /* ODM Write */
+ /* 1 PHY_STATUS_INFO */
+ u8 RSSI_Path[4]; /* */
+ u8 RSSI_Ave;
+ u8 RXEVM[4];
+ u8 RXSNR[4];
+};
+
+/* 2011/10/20 MH Define Common info enum for all team. */
+
+enum odm_common_info_def {
+ /* Fixed value: */
+
+ /* HOOK BEFORE REG INIT----------- */
+ ODM_CMNINFO_PLATFORM = 0,
+ ODM_CMNINFO_ABILITY, /* ODM_ABILITY_E */
+ ODM_CMNINFO_INTERFACE, /* ODM_INTERFACE_E */
+ ODM_CMNINFO_MP_TEST_CHIP,
+ ODM_CMNINFO_IC_TYPE, /* ODM_IC_TYPE_E */
+ ODM_CMNINFO_CUT_VER, /* ODM_CUT_VERSION_E */
+ ODM_CMNINFO_FAB_VER, /* ODM_FAB_E */
+ ODM_CMNINFO_RF_TYPE, /* ODM_RF_PATH_E or ODM_RF_TYPE_E? */
+ ODM_CMNINFO_BOARD_TYPE, /* ODM_BOARD_TYPE_E */
+ ODM_CMNINFO_EXT_LNA, /* true */
+ ODM_CMNINFO_EXT_PA,
+ ODM_CMNINFO_EXT_TRSW,
+ ODM_CMNINFO_PATCH_ID, /* CUSTOMER ID */
+ ODM_CMNINFO_BINHCT_TEST,
+ ODM_CMNINFO_BWIFI_TEST,
+ ODM_CMNINFO_SMART_CONCURRENT,
+ /* HOOK BEFORE REG INIT----------- */
+
+ /* Dynamic value: */
+/* POINTER REFERENCE----------- */
+ ODM_CMNINFO_MAC_PHY_MODE, /* ODM_MAC_PHY_MODE_E */
+ ODM_CMNINFO_TX_UNI,
+ ODM_CMNINFO_RX_UNI,
+ ODM_CMNINFO_WM_MODE, /* ODM_WIRELESS_MODE_E */
+ ODM_CMNINFO_BAND, /* ODM_BAND_TYPE_E */
+ ODM_CMNINFO_SEC_CHNL_OFFSET, /* ODM_SEC_CHNL_OFFSET_E */
+ ODM_CMNINFO_SEC_MODE, /* ODM_SECURITY_E */
+ ODM_CMNINFO_BW, /* ODM_BW_E */
+ ODM_CMNINFO_CHNL,
+
+ ODM_CMNINFO_DMSP_GET_VALUE,
+ ODM_CMNINFO_BUDDY_ADAPTOR,
+ ODM_CMNINFO_DMSP_IS_MASTER,
+ ODM_CMNINFO_SCAN,
+ ODM_CMNINFO_POWER_SAVING,
+ ODM_CMNINFO_ONE_PATH_CCA, /* ODM_CCA_PATH_E */
+ ODM_CMNINFO_DRV_STOP,
+ ODM_CMNINFO_PNP_IN,
+ ODM_CMNINFO_INIT_ON,
+ ODM_CMNINFO_ANT_TEST,
+ ODM_CMNINFO_NET_CLOSED,
+ ODM_CMNINFO_MP_MODE,
+/* POINTER REFERENCE----------- */
+
+/* CALL BY VALUE------------- */
+ ODM_CMNINFO_WIFI_DIRECT,
+ ODM_CMNINFO_WIFI_DISPLAY,
+ ODM_CMNINFO_LINK,
+ ODM_CMNINFO_RSSI_MIN,
+ ODM_CMNINFO_DBG_COMP, /* u64 */
+ ODM_CMNINFO_DBG_LEVEL, /* u32 */
+ ODM_CMNINFO_RA_THRESHOLD_HIGH, /* u8 */
+ ODM_CMNINFO_RA_THRESHOLD_LOW, /* u8 */
+ ODM_CMNINFO_RF_ANTENNA_TYPE, /* u8 */
+ ODM_CMNINFO_BT_DISABLED,
+ ODM_CMNINFO_BT_OPERATION,
+ ODM_CMNINFO_BT_DIG,
+ ODM_CMNINFO_BT_BUSY, /* Check Bt is using or not */
+ ODM_CMNINFO_BT_DISABLE_EDCA,
+/* CALL BY VALUE-------------*/
+
+ /* Dynamic ptr array hook itms. */
+ ODM_CMNINFO_STA_STATUS,
+ ODM_CMNINFO_PHY_STATUS,
+ ODM_CMNINFO_MAC_STATUS,
+ ODM_CMNINFO_MAX,
+};
+
+/* 2011/10/20 MH Define ODM support ability. ODM_CMNINFO_ABILITY */
+
+enum odm_ability_def {
+ /* BB ODM section BIT 0-15 */
+ ODM_BB_DIG = BIT0,
+ ODM_BB_RA_MASK = BIT1,
+ ODM_BB_DYNAMIC_TXPWR = BIT2,
+ ODM_BB_FA_CNT = BIT3,
+ ODM_BB_RSSI_MONITOR = BIT4,
+ ODM_BB_CCK_PD = BIT5,
+ ODM_BB_ANT_DIV = BIT6,
+ ODM_BB_PWR_SAVE = BIT7,
+ ODM_BB_PWR_TRA = BIT8,
+ ODM_BB_RATE_ADAPTIVE = BIT9,
+ ODM_BB_PATH_DIV = BIT10,
+ ODM_BB_PSD = BIT11,
+ ODM_BB_RXHP = BIT12,
+
+ /* MAC DM section BIT 16-23 */
+ ODM_MAC_EDCA_TURBO = BIT16,
+ ODM_MAC_EARLY_MODE = BIT17,
+
+ /* RF ODM section BIT 24-31 */
+ ODM_RF_TX_PWR_TRACK = BIT24,
+ ODM_RF_RX_GAIN_TRACK = BIT25,
+ ODM_RF_CALIBRATION = BIT26,
+};
+
+/* ODM_CMNINFO_INTERFACE */
+enum odm_interface_def {
+ ODM_ITRF_PCIE = 0x1,
+ ODM_ITRF_USB = 0x2,
+ ODM_ITRF_SDIO = 0x4,
+ ODM_ITRF_ALL = 0x7,
+};
+
+/* ODM_CMNINFO_IC_TYPE */
+enum odm_ic_type {
+ ODM_RTL8192S = BIT0,
+ ODM_RTL8192C = BIT1,
+ ODM_RTL8192D = BIT2,
+ ODM_RTL8723A = BIT3,
+ ODM_RTL8188E = BIT4,
+ ODM_RTL8812 = BIT5,
+ ODM_RTL8821 = BIT6,
+};
+
+#define ODM_IC_11N_SERIES \
+ (ODM_RTL8192S | ODM_RTL8192C | ODM_RTL8192D | \
+ ODM_RTL8723A | ODM_RTL8188E)
+#define ODM_IC_11AC_SERIES (ODM_RTL8812)
+
+/* ODM_CMNINFO_CUT_VER */
+enum odm_cut_version {
+ ODM_CUT_A = 1,
+ ODM_CUT_B = 2,
+ ODM_CUT_C = 3,
+ ODM_CUT_D = 4,
+ ODM_CUT_E = 5,
+ ODM_CUT_F = 6,
+ ODM_CUT_TEST = 7,
+};
+
+/* ODM_CMNINFO_FAB_VER */
+enum odm_fab_Version {
+ ODM_TSMC = 0,
+ ODM_UMC = 1,
+};
+
+/* ODM_CMNINFO_RF_TYPE */
+/* For example 1T2R (A+AB = BIT0|BIT4|BIT5) */
+enum odm_rf_path {
+ ODM_RF_TX_A = BIT0,
+ ODM_RF_TX_B = BIT1,
+ ODM_RF_TX_C = BIT2,
+ ODM_RF_TX_D = BIT3,
+ ODM_RF_RX_A = BIT4,
+ ODM_RF_RX_B = BIT5,
+ ODM_RF_RX_C = BIT6,
+ ODM_RF_RX_D = BIT7,
+};
+
+enum odm_rf_type {
+ ODM_1T1R = 0,
+ ODM_1T2R = 1,
+ ODM_2T2R = 2,
+ ODM_2T3R = 3,
+ ODM_2T4R = 4,
+ ODM_3T3R = 5,
+ ODM_3T4R = 6,
+ ODM_4T4R = 7,
+};
+
+/* ODM Dynamic common info value definition */
+
+enum odm_mac_phy_mode {
+ ODM_SMSP = 0,
+ ODM_DMSP = 1,
+ ODM_DMDP = 2,
+};
+
+enum odm_bt_coexist {
+ ODM_BT_BUSY = 1,
+ ODM_BT_ON = 2,
+ ODM_BT_OFF = 3,
+ ODM_BT_NONE = 4,
+};
+
+/* ODM_CMNINFO_OP_MODE */
+enum odm_operation_mode {
+ ODM_NO_LINK = BIT0,
+ ODM_LINK = BIT1,
+ ODM_SCAN = BIT2,
+ ODM_POWERSAVE = BIT3,
+ ODM_AP_MODE = BIT4,
+ ODM_CLIENT_MODE = BIT5,
+ ODM_AD_HOC = BIT6,
+ ODM_WIFI_DIRECT = BIT7,
+ ODM_WIFI_DISPLAY = BIT8,
+};
+
+/* ODM_CMNINFO_WM_MODE */
+enum odm_wireless_mode {
+ ODM_WM_UNKNOW = 0x0,
+ ODM_WM_B = BIT0,
+ ODM_WM_G = BIT1,
+ ODM_WM_A = BIT2,
+ ODM_WM_N24G = BIT3,
+ ODM_WM_N5G = BIT4,
+ ODM_WM_AUTO = BIT5,
+ ODM_WM_AC = BIT6,
+};
+
+/* ODM_CMNINFO_BAND */
+enum odm_band_type {
+ ODM_BAND_2_4G = BIT0,
+ ODM_BAND_5G = BIT1,
+};
+
+/* ODM_CMNINFO_SEC_CHNL_OFFSET */
+enum odm_sec_chnl_offset {
+ ODM_DONT_CARE = 0,
+ ODM_BELOW = 1,
+ ODM_ABOVE = 2
+};
+
+/* ODM_CMNINFO_SEC_MODE */
+enum odm_security {
+ ODM_SEC_OPEN = 0,
+ ODM_SEC_WEP40 = 1,
+ ODM_SEC_TKIP = 2,
+ ODM_SEC_RESERVE = 3,
+ ODM_SEC_AESCCMP = 4,
+ ODM_SEC_WEP104 = 5,
+ ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
+ ODM_SEC_SMS4 = 7,
+};
+
+/* ODM_CMNINFO_BW */
+enum odm_bw {
+ ODM_BW20M = 0,
+ ODM_BW40M = 1,
+ ODM_BW80M = 2,
+ ODM_BW160M = 3,
+ ODM_BW10M = 4,
+};
+
+/* ODM_CMNINFO_BOARD_TYPE */
+enum odm_board_type {
+ ODM_BOARD_NORMAL = 0,
+ ODM_BOARD_HIGHPWR = 1,
+ ODM_BOARD_MINICARD = 2,
+ ODM_BOARD_SLIM = 3,
+ ODM_BOARD_COMBO = 4,
+};
+
+/* ODM_CMNINFO_ONE_PATH_CCA */
+enum odm_cca_path {
+ ODM_CCA_2R = 0,
+ ODM_CCA_1R_A = 1,
+ ODM_CCA_1R_B = 2,
+};
+
+struct odm_ra_info {
+ u8 RateID;
+ u32 RateMask;
+ u32 RAUseRate;
+ u8 RateSGI;
+ u8 RssiStaRA;
+ u8 PreRssiStaRA;
+ u8 SGIEnable;
+ u8 DecisionRate;
+ u8 PreRate;
+ u8 HighestRate;
+ u8 LowestRate;
+ u32 NscUp;
+ u32 NscDown;
+ u16 RTY[5];
+ u32 TOTAL;
+ u16 DROP;
+ u8 Active;
+ u16 RptTime;
+ u8 RAWaitingCounter;
+ u8 RAPendingCounter;
+ u8 PTActive; /* on or off */
+ u8 PTTryState; /* 0 trying state, 1 for decision state */
+ u8 PTStage; /* 0~6 */
+ u8 PTStopCount; /* Stop PT counter */
+ u8 PTPreRate; /* if rate change do PT */
+ u8 PTPreRssi; /* if RSSI change 5% do PT */
+ u8 PTModeSS; /* decide whitch rate should do PT */
+ u8 RAstage; /* StageRA, decide how many times RA will be done
+ * between PT */
+ u8 PTSmoothFactor;
+};
+
+struct ijk_matrix_regs_set {
+ bool bIQKDone;
+ s32 Value[1][IQK_Matrix_REG_NUM];
+};
+
+struct odm_rf_cal {
+ /* for tx power tracking */
+ u32 RegA24; /* for TempCCK */
+ s32 RegE94;
+ s32 RegE9C;
+ s32 RegEB4;
+ s32 RegEBC;
+
+ u8 TXPowercount;
+ bool bTXPowerTrackingInit;
+ bool bTXPowerTracking;
+ u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking
+ * as default */
+ u8 TM_Trigger;
+ u8 InternalPA5G[2]; /* pathA / pathB */
+
+ u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0,
+ * and 1 for RFIC1 */
+ u8 ThermalValue;
+ u8 ThermalValue_LCK;
+ u8 ThermalValue_IQK;
+ u8 ThermalValue_DPK;
+ u8 ThermalValue_AVG[AVG_THERMAL_NUM];
+ u8 ThermalValue_AVG_index;
+ u8 ThermalValue_RxGain;
+ u8 ThermalValue_Crystal;
+ u8 ThermalValue_DPKstore;
+ u8 ThermalValue_DPKtrack;
+ bool TxPowerTrackingInProgress;
+ bool bDPKenable;
+
+ bool bReloadtxpowerindex;
+ u8 bRfPiEnable;
+ u32 TXPowerTrackingCallbackCnt; /* cosa add for debug */
+
+ u8 bCCKinCH14;
+ u8 CCK_index;
+ u8 OFDM_index[2];
+ bool bDoneTxpower;
+
+ u8 ThermalValue_HP[HP_THERMAL_NUM];
+ u8 ThermalValue_HP_index;
+ struct ijk_matrix_regs_set IQKMatrixRegSetting[IQK_Matrix_Settings_NUM];
+
+ u8 Delta_IQK;
+ u8 Delta_LCK;
+
+ /* for IQK */
+ u32 RegC04;
+ u32 Reg874;
+ u32 RegC08;
+ u32 RegB68;
+ u32 RegB6C;
+ u32 Reg870;
+ u32 Reg860;
+ u32 Reg864;
+
+ bool bIQKInitialized;
+ bool bLCKInProgress;
+ bool bAntennaDetected;
+ u32 ADDA_backup[IQK_ADDA_REG_NUM];
+ u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
+ u32 IQK_BB_backup_recover[9];
+ u32 IQK_BB_backup[IQK_BB_REG_NUM];
+
+ /* for APK */
+ u32 APKoutput[2][2]; /* path A/B; output1_1a/output1_2a */
+ u8 bAPKdone;
+ u8 bAPKThermalMeterIgnore;
+ u8 bDPdone;
+ u8 bDPPathAOK;
+ u8 bDPPathBOK;
+};
+
+/* ODM Dynamic common info value definition */
+
+struct fast_ant_train {
+ u8 Bssid[6];
+ u8 antsel_rx_keep_0;
+ u8 antsel_rx_keep_1;
+ u8 antsel_rx_keep_2;
+ u32 antSumRSSI[7];
+ u32 antRSSIcnt[7];
+ u32 antAveRSSI[7];
+ u8 FAT_State;
+ u32 TrainIdx;
+ u8 antsel_a[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 antsel_b[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 antsel_c[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 MainAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 AuxAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 MainAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 AuxAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 RxIdleAnt;
+ bool bBecomeLinked;
+};
+
+enum fat_state {
+ FAT_NORMAL_STATE = 0,
+ FAT_TRAINING_STATE = 1,
+};
+
+enum ant_div_type {
+ NO_ANTDIV = 0xFF,
+ CG_TRX_HW_ANTDIV = 0x01,
+ CGCS_RX_HW_ANTDIV = 0x02,
+ FIXED_HW_ANTDIV = 0x03,
+ CG_TRX_SMART_ANTDIV = 0x04,
+ CGCS_RX_SW_ANTDIV = 0x05,
+};
+
+/* Copy from SD4 defined structure. We use to support PHY DM integration. */
+struct odm_dm_struct {
+ /* Add for different team use temporarily */
+ struct adapter *Adapter; /* For CE/NIC team */
+ struct rtl8192cd_priv *priv; /* For AP/ADSL team */
+ /* WHen you use above pointers, they must be initialized. */
+ bool odm_ready;
+
+ struct rtl8192cd_priv *fake_priv;
+ u64 DebugComponents;
+ u32 DebugLevel;
+
+/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
+ bool bCckHighPower;
+ u8 RFPathRxEnable; /* ODM_CMNINFO_RFPATH_ENABLE */
+ u8 ControlChannel;
+/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
+
+/* 1 COMMON INFORMATION */
+ /* Init Value */
+/* HOOK BEFORE REG INIT----------- */
+ /* ODM Platform info AP/ADSL/CE/MP = 1/2/3/4 */
+ u8 SupportPlatform;
+ /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/ ¡K¡K = 1/2/3/¡K */
+ u32 SupportAbility;
+ /* ODM PCIE/USB/SDIO/GSPI = 0/1/2/3 */
+ u8 SupportInterface;
+ /* ODM composite or independent. Bit oriented/ 92C+92D+ .... or any
+ * other type = 1/2/3/... */
+ u32 SupportICType;
+ /* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
+ u8 CutVersion;
+ /* Fab Version TSMC/UMC = 0/1 */
+ u8 FabVersion;
+ /* RF Type 4T4R/3T3R/2T2R/1T2R/1T1R/... */
+ u8 RFType;
+ /* Board Type Normal/HighPower/MiniCard/SLIM/Combo/. = 0/1/2/3/4/. */
+ u8 BoardType;
+ /* with external LNA NO/Yes = 0/1 */
+ u8 ExtLNA;
+ /* with external PA NO/Yes = 0/1 */
+ u8 ExtPA;
+ /* with external TRSW NO/Yes = 0/1 */
+ u8 ExtTRSW;
+ u8 PatchID; /* Customer ID */
+ bool bInHctTest;
+ bool bWIFITest;
+
+ bool bDualMacSmartConcurrent;
+ u32 BK_SupportAbility;
+ u8 AntDivType;
+/* HOOK BEFORE REG INIT----------- */
+
+ /* Dynamic Value */
+/* POINTER REFERENCE----------- */
+
+ u8 u8_temp;
+ bool bool_temp;
+ struct adapter *adapter_temp;
+
+ /* MAC PHY Mode SMSP/DMSP/DMDP = 0/1/2 */
+ u8 *pMacPhyMode;
+ /* TX Unicast byte count */
+ u64 *pNumTxBytesUnicast;
+ /* RX Unicast byte count */
+ u64 *pNumRxBytesUnicast;
+ /* Wireless mode B/G/A/N = BIT0/BIT1/BIT2/BIT3 */
+ u8 *pWirelessMode; /* ODM_WIRELESS_MODE_E */
+ /* Frequence band 2.4G/5G = 0/1 */
+ u8 *pBandType;
+ /* Secondary channel offset don't_care/below/above = 0/1/2 */
+ u8 *pSecChOffset;
+ /* Security mode Open/WEP/AES/TKIP = 0/1/2/3 */
+ u8 *pSecurity;
+ /* BW info 20M/40M/80M = 0/1/2 */
+ u8 *pBandWidth;
+ /* Central channel location Ch1/Ch2/.... */
+ u8 *pChannel; /* central channel number */
+ /* Common info for 92D DMSP */
+
+ bool *pbGetValueFromOtherMac;
+ struct adapter **pBuddyAdapter;
+ bool *pbMasterOfDMSP; /* MAC0: master, MAC1: slave */
+ /* Common info for Status */
+ bool *pbScanInProcess;
+ bool *pbPowerSaving;
+ /* CCA Path 2-path/path-A/path-B = 0/1/2; using ODM_CCA_PATH_E. */
+ u8 *pOnePathCCA;
+ /* pMgntInfo->AntennaTest */
+ u8 *pAntennaTest;
+ bool *pbNet_closed;
+/* POINTER REFERENCE----------- */
+ /* */
+/* CALL BY VALUE------------- */
+ bool bWIFI_Direct;
+ bool bWIFI_Display;
+ bool bLinked;
+ u8 RSSI_Min;
+ u8 InterfaceIndex; /* Add for 92D dual MAC: 0--Mac0 1--Mac1 */
+ bool bIsMPChip;
+ bool bOneEntryOnly;
+ /* Common info for BTDM */
+ bool bBtDisabled; /* BT is disabled */
+ bool bBtHsOperation; /* BT HS mode is under progress */
+ u8 btHsDigVal; /* use BT rssi to decide the DIG value */
+ bool bBtDisableEdcaTurbo;/* Under some condition, don't enable the
+ * EDCA Turbo */
+ bool bBtBusy; /* BT is busy. */
+/* CALL BY VALUE------------- */
+
+ /* 2 Define STA info. */
+ /* _ODM_STA_INFO */
+ /* For MP, we need to reduce one array pointer for default port.?? */
+ struct sta_info *pODM_StaInfo[ODM_ASSOCIATE_ENTRY_NUM];
+
+ u16 CurrminRptTime;
+ struct odm_ra_info RAInfo[ODM_ASSOCIATE_ENTRY_NUM]; /* Use MacID as
+ * array index. STA MacID=0,
+ * VWiFi Client MacID={1, ODM_ASSOCIATE_ENTRY_NUM-1} */
+ /* */
+ /* 2012/02/14 MH Add to share 88E ra with other SW team. */
+ /* We need to colelct all support abilit to a proper area. */
+ /* */
+ bool RaSupport88E;
+
+ /* Define ........... */
+
+ /* Latest packet phy info (ODM write) */
+ struct odm_phy_dbg_info PhyDbgInfo;
+
+ /* Latest packet phy info (ODM write) */
+ struct odm_mac_status_info *pMacInfo;
+
+ /* Different Team independt structure?? */
+
+ /* ODM Structure */
+ struct fast_ant_train DM_FatTable;
+ struct rtw_dig DM_DigTable;
+ struct rtl_ps DM_PSTable;
+ struct dyn_primary_cca DM_PriCCA;
+ struct rx_hpc DM_RXHP_Table;
+ struct false_alarm_stats FalseAlmCnt;
+ struct false_alarm_stats FlaseAlmCntBuddyAdapter;
+ struct sw_ant_switch DM_SWAT_Table;
+ bool RSSI_test;
+
+ struct edca_turbo DM_EDCA_Table;
+ u32 WMMEDCA_BE;
+ /* Copy from SD4 structure */
+ /* */
+ /* ================================================== */
+ /* */
+
+ bool *pbDriverStopped;
+ bool *pbDriverIsGoingToPnpSetPowerSleep;
+ bool *pinit_adpt_in_progress;
+
+ /* PSD */
+ bool bUserAssignLevel;
+ struct timer_list PSDTimer;
+ u8 RSSI_BT; /* come from BT */
+ bool bPSDinProcess;
+ bool bDMInitialGainEnable;
+
+ /* for rate adaptive, in fact, 88c/92c fw will handle this */
+ u8 bUseRAMask;
+
+ struct odm_rate_adapt RateAdaptive;
+
+ struct odm_rf_cal RFCalibrateInfo;
+
+ /* TX power tracking */
+ u8 BbSwingIdxOfdm;
+ u8 BbSwingIdxOfdmCurrent;
+ u8 BbSwingIdxOfdmBase;
+ bool BbSwingFlagOfdm;
+ u8 BbSwingIdxCck;
+ u8 BbSwingIdxCckCurrent;
+ u8 BbSwingIdxCckBase;
+ bool BbSwingFlagCck;
+ u8 *mp_mode;
+ /* ODM system resource. */
+
+ /* ODM relative time. */
+ struct timer_list PathDivSwitchTimer;
+ /* 2011.09.27 add for Path Diversity */
+ struct timer_list CCKPathDiversityTimer;
+ struct timer_list FastAntTrainingTimer;
+}; /* DM_Dynamic_Mechanism_Structure */
+
+#define ODM_RF_PATH_MAX 2
+
+enum ODM_RF_RADIO_PATH {
+ ODM_RF_PATH_A = 0, /* Radio Path A */
+ ODM_RF_PATH_B = 1, /* Radio Path B */
+ ODM_RF_PATH_C = 2, /* Radio Path C */
+ ODM_RF_PATH_D = 3, /* Radio Path D */
+};
+
+enum ODM_RF_CONTENT {
+ odm_radioa_txt = 0x1000,
+ odm_radiob_txt = 0x1001,
+ odm_radioc_txt = 0x1002,
+ odm_radiod_txt = 0x1003
+};
+
+enum odm_bb_config_type {
+ CONFIG_BB_PHY_REG,
+ CONFIG_BB_AGC_TAB,
+ CONFIG_BB_AGC_TAB_2G,
+ CONFIG_BB_AGC_TAB_5G,
+ CONFIG_BB_PHY_REG_PG,
+};
+
+/* Status code */
+enum rt_status {
+ RT_STATUS_SUCCESS,
+ RT_STATUS_FAILURE,
+ RT_STATUS_PENDING,
+ RT_STATUS_RESOURCE,
+ RT_STATUS_INVALID_CONTEXT,
+ RT_STATUS_INVALID_PARAMETER,
+ RT_STATUS_NOT_SUPPORT,
+ RT_STATUS_OS_API_FAILED,
+};
+
+/* 3=========================================================== */
+/* 3 DIG */
+/* 3=========================================================== */
+
+enum dm_dig_op {
+ RT_TYPE_THRESH_HIGH = 0,
+ RT_TYPE_THRESH_LOW = 1,
+ RT_TYPE_BACKOFF = 2,
+ RT_TYPE_RX_GAIN_MIN = 3,
+ RT_TYPE_RX_GAIN_MAX = 4,
+ RT_TYPE_ENABLE = 5,
+ RT_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_SCAN_RSSI_TH 0x14 /* scan return issue for LC */
+
+
+#define DM_false_ALARM_THRESH_LOW 400
+#define DM_false_ALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX_NIC 0x4e
+#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */
+
+#define DM_DIG_MAX_AP 0x32
+#define DM_DIG_MIN_AP 0x20
+
+#define DM_DIG_MAX_NIC_HP 0x46
+#define DM_DIG_MIN_NIC_HP 0x2e
+
+#define DM_DIG_MAX_AP_HP 0x42
+#define DM_DIG_MIN_AP_HP 0x30
+
+/* vivi 92c&92d has different definition, 20110504 */
+/* this is for 92c */
+#define DM_DIG_FA_TH0 0x200/* 0x20 */
+#define DM_DIG_FA_TH1 0x300/* 0x100 */
+#define DM_DIG_FA_TH2 0x400/* 0x200 */
+/* this is for 92d */
+#define DM_DIG_FA_TH0_92D 0x100
+#define DM_DIG_FA_TH1_92D 0x400
+#define DM_DIG_FA_TH2_92D 0x600
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+/* 3=========================================================== */
+/* 3 AGC RX High Power Mode */
+/* 3=========================================================== */
+#define LNA_Low_Gain_1 0x64
+#define LNA_Low_Gain_2 0x5A
+#define LNA_Low_Gain_3 0x58
+
+#define FA_RXHP_TH1 5000
+#define FA_RXHP_TH2 1500
+#define FA_RXHP_TH3 800
+#define FA_RXHP_TH4 600
+#define FA_RXHP_TH5 500
+
+/* 3=========================================================== */
+/* 3 EDCA */
+/* 3=========================================================== */
+
+/* 3=========================================================== */
+/* 3 Dynamic Tx Power */
+/* 3=========================================================== */
+/* Dynamic Tx Power Control Threshold */
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define TX_POWER_NEAR_FIELD_THRESH_AP 0x3F
+
+#define TxHighPwrLevel_Normal 0
+#define TxHighPwrLevel_Level1 1
+#define TxHighPwrLevel_Level2 2
+#define TxHighPwrLevel_BT1 3
+#define TxHighPwrLevel_BT2 4
+#define TxHighPwrLevel_15 5
+#define TxHighPwrLevel_35 6
+#define TxHighPwrLevel_50 7
+#define TxHighPwrLevel_70 8
+#define TxHighPwrLevel_100 9
+
+/* 3=========================================================== */
+/* 3 Rate Adaptive */
+/* 3=========================================================== */
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+/* 3=========================================================== */
+/* 3 BB Power Save */
+/* 3=========================================================== */
+
+
+enum dm_1r_cca {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf {
+ RF_Save = 0,
+ RF_Normal = 1,
+ RF_MAX = 2,
+};
+
+/* 3=========================================================== */
+/* 3 Antenna Diversity */
+/* 3=========================================================== */
+enum dm_swas {
+ Antenna_A = 1,
+ Antenna_B = 2,
+ Antenna_MAX = 3,
+};
+
+/* Maximal number of antenna detection mechanism needs to perform. */
+#define MAX_ANTENNA_DETECTION_CNT 10
+
+/* Extern Global Variables. */
+#define OFDM_TABLE_SIZE_92C 37
+#define OFDM_TABLE_SIZE_92D 43
+#define CCK_TABLE_SIZE 33
+
+extern u32 OFDMSwingTable[OFDM_TABLE_SIZE_92D];
+extern u8 CCKSwingTable_Ch1_Ch13[CCK_TABLE_SIZE][8];
+extern u8 CCKSwingTable_Ch14 [CCK_TABLE_SIZE][8];
+
+/* check Sta pointer valid or not */
+#define IS_STA_VALID(pSta) (pSta)
+/* 20100514 Joseph: Add definition for antenna switching test after link. */
+/* This indicates two different the steps. */
+/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the
+ * signal on the air. */
+/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
+ * SWAW_STEP_PEAK */
+/* with original RSSI to determine if it is necessary to switch antenna. */
+#define SWAW_STEP_PEAK 0
+#define SWAW_STEP_DETERMINE 1
+
+void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
+void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
+
+void ODM_SetAntenna(struct odm_dm_struct *pDM_Odm, u8 Antenna);
+
+
+#define dm_RF_Saving ODM_RF_Saving
+void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
+
+#define SwAntDivRestAfterLink ODM_SwAntDivRestAfterLink
+void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm);
+
+#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck
+void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm);
+
+bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI,
+ bool bForceUpdate, u8 *pRATRState);
+
+#define dm_SWAW_RSSI_Check ODM_SwAntDivChkPerPktRssi
+void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID,
+ struct odm_phy_status_info *pPhyInfo);
+
+u32 ConvertTo_dB(u32 Value);
+
+u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point,
+ u8 initial_gain_psd);
+
+void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm);
+
+u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid,
+ u32 ra_mask, u8 rssi_level);
+
+void ODM_DMInit(struct odm_dm_struct *pDM_Odm);
+
+void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm);
+
+void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm,
+ enum odm_common_info_def CmnInfo, u32 Value);
+
+void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm,
+ enum odm_common_info_def CmnInfo, void *pValue);
+
+void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm,
+ enum odm_common_info_def CmnInfo,
+ u16 Index, void *pValue);
+
+void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value);
+
+void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm);
+
+void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm);
+
+void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm);
+
+void ODM_ResetIQKResult(struct odm_dm_struct *pDM_Odm);
+
+void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId,
+ u32 PWDBAll, bool isCCKrate);
+
+void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm);
+
+bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode);
+
+void odm_dtc(struct odm_dm_struct *pDM_Odm);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
new file mode 100644
index 0000000..63779f5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __HALHWOUTSRC_H__
+#define __HALHWOUTSRC_H__
+
+/* Definition */
+/* CCK Rates, TxHT = 0 */
+#define DESC92C_RATE1M 0x00
+#define DESC92C_RATE2M 0x01
+#define DESC92C_RATE5_5M 0x02
+#define DESC92C_RATE11M 0x03
+
+/* OFDM Rates, TxHT = 0 */
+#define DESC92C_RATE6M 0x04
+#define DESC92C_RATE9M 0x05
+#define DESC92C_RATE12M 0x06
+#define DESC92C_RATE18M 0x07
+#define DESC92C_RATE24M 0x08
+#define DESC92C_RATE36M 0x09
+#define DESC92C_RATE48M 0x0a
+#define DESC92C_RATE54M 0x0b
+
+/* MCS Rates, TxHT = 1 */
+#define DESC92C_RATEMCS0 0x0c
+#define DESC92C_RATEMCS1 0x0d
+#define DESC92C_RATEMCS2 0x0e
+#define DESC92C_RATEMCS3 0x0f
+#define DESC92C_RATEMCS4 0x10
+#define DESC92C_RATEMCS5 0x11
+#define DESC92C_RATEMCS6 0x12
+#define DESC92C_RATEMCS7 0x13
+#define DESC92C_RATEMCS8 0x14
+#define DESC92C_RATEMCS9 0x15
+#define DESC92C_RATEMCS10 0x16
+#define DESC92C_RATEMCS11 0x17
+#define DESC92C_RATEMCS12 0x18
+#define DESC92C_RATEMCS13 0x19
+#define DESC92C_RATEMCS14 0x1a
+#define DESC92C_RATEMCS15 0x1b
+#define DESC92C_RATEMCS15_SG 0x1c
+#define DESC92C_RATEMCS32 0x20
+
+/* structure and define */
+
+struct phy_rx_agc_info {
+ #ifdef __LITTLE_ENDIAN
+ u8 gain:7, trsw:1;
+ #else
+ u8 trsw:1, gain:7;
+ #endif
+};
+
+struct phy_status_rpt {
+ struct phy_rx_agc_info path_agc[2];
+ u8 ch_corr[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 rsvd_1;/* ch_corr_msb; */
+ u8 noise_power_db_msb;
+ u8 path_cfotail[2];
+ u8 pcts_mask[2];
+ s8 stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 noise_power_db_lsb;
+ u8 rsvd_2[3];
+ u8 stream_csi[2];
+ u8 stream_target_csi[2];
+ s8 sig_evm;
+ u8 rsvd_3;
+
+#ifdef __LITTLE_ENDIAN
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 idle_long:1;
+ u8 r_ant_train_en:1;
+ u8 ant_sel_b:1;
+ u8 ant_sel:1;
+#else /* _BIG_ENDIAN_ */
+ u8 ant_sel:1;
+ u8 ant_sel_b:1;
+ u8 r_ant_train_en:1;
+ u8 idle_long:1;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+#endif
+};
+
+void odm_Init_RSSIForDM(struct odm_dm_struct *pDM_Odm);
+
+void ODM_PhyStatusQuery(struct odm_dm_struct *pDM_Odm,
+ struct odm_phy_status_info *pPhyInfo,
+ u8 *pPhyStatus,
+ struct odm_per_pkt_info *pPktinfo);
+
+void ODM_MacStatusQuery(struct odm_dm_struct *pDM_Odm,
+ u8 *pMacStatus,
+ u8 MacID,
+ bool bPacketMatchBSSID,
+ bool bPacketToSelf,
+ bool bPacketBeacon);
+
+enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *pDM_Odm,
+ enum ODM_RF_RADIO_PATH Content,
+ enum ODM_RF_RADIO_PATH eRFPath);
+
+enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *pDM_Odm,
+ enum odm_bb_config_type ConfigType);
+
+enum HAL_STATUS ODM_ConfigMACWithHeaderFile(struct odm_dm_struct *pDM_Odm);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
new file mode 100644
index 0000000..f96ad5a
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __ODM_RTL8188E_H__
+#define __ODM_RTL8188E_H__
+
+#define MAIN_ANT 0
+#define AUX_ANT 1
+#define MAIN_ANT_CG_TRX 1
+#define AUX_ANT_CG_TRX 0
+#define MAIN_ANT_CGCS_RX 0
+#define AUX_ANT_CGCS_RX 1
+
+void ODM_DIG_LowerBound_88E(struct odm_dm_struct *pDM_Odm);
+
+void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *pDM_Odm);
+
+void ODM_AntennaDiversity_88E(struct odm_dm_struct *pDM_Odm);
+
+void ODM_SetTxAntByTxInfo_88E(struct odm_dm_struct *pDM_Odm, u8 *pDesc,
+ u8 macId);
+
+void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *pDM_Odm, u8 Ant);
+
+void ODM_AntselStatistics_88E(struct odm_dm_struct *pDM_Odm, u8 antsel_tr_mux,
+ u32 MacId, u8 RxPWDBAll);
+
+void odm_FastAntTraining(struct odm_dm_struct *pDM_Odm);
+
+void odm_FastAntTrainingCallback(struct odm_dm_struct *pDM_Odm);
+
+void odm_FastAntTrainingWorkItemCallback(struct odm_dm_struct *pDM_Odm);
+
+void odm_PrimaryCCA_Init(struct odm_dm_struct *pDM_Odm);
+
+bool ODM_DynamicPrimaryCCA_DupRTS(struct odm_dm_struct *pDM_Odm);
+
+void odm_DynamicPrimaryCCA(struct odm_dm_struct *pDM_Odm);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h b/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
new file mode 100644
index 0000000..727e6b2
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __INC_ODM_REGCONFIG_H_8188E
+#define __INC_ODM_REGCONFIG_H_8188E
+
+void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data,
+ enum ODM_RF_RADIO_PATH RF_PATH, u32 RegAddr);
+
+void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm,
+ u32 Addr, u32 Data);
+
+void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm,
+ u32 Addr, u32 Data);
+
+void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data);
+
+void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Bitmask, u32 Data);
+
+void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Bitmask, u32 Data);
+
+void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Bitmask, u32 Data);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11AC.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11AC.h
new file mode 100644
index 0000000..f08775c
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_RegDefine11AC.h
@@ -0,0 +1,54 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_REGDEFINE11AC_H__
+#define __ODM_REGDEFINE11AC_H__
+
+/* 2 RF REG LIST */
+
+
+
+/* 2 BB REG LIST */
+/* PAGE 8 */
+/* PAGE 9 */
+#define ODM_REG_OFDM_FA_RST_11AC 0x9A4
+/* PAGE A */
+#define ODM_REG_CCK_CCA_11AC 0xA0A
+#define ODM_REG_CCK_FA_RST_11AC 0xA2C
+#define ODM_REG_CCK_FA_11AC 0xA5C
+/* PAGE C */
+#define ODM_REG_IGI_A_11AC 0xC50
+/* PAGE E */
+#define ODM_REG_IGI_B_11AC 0xE50
+/* PAGE F */
+#define ODM_REG_OFDM_FA_11AC 0xF48
+
+
+/* 2 MAC REG LIST */
+
+
+
+
+/* DIG Related */
+#define ODM_BIT_IGI_11AC 0xFFFFFFFF
+
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
new file mode 100644
index 0000000..5a61f90
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
@@ -0,0 +1,171 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_REGDEFINE11N_H__
+#define __ODM_REGDEFINE11N_H__
+
+
+/* 2 RF REG LIST */
+#define ODM_REG_RF_MODE_11N 0x00
+#define ODM_REG_RF_0B_11N 0x0B
+#define ODM_REG_CHNBW_11N 0x18
+#define ODM_REG_T_METER_11N 0x24
+#define ODM_REG_RF_25_11N 0x25
+#define ODM_REG_RF_26_11N 0x26
+#define ODM_REG_RF_27_11N 0x27
+#define ODM_REG_RF_2B_11N 0x2B
+#define ODM_REG_RF_2C_11N 0x2C
+#define ODM_REG_RXRF_A3_11N 0x3C
+#define ODM_REG_T_METER_92D_11N 0x42
+#define ODM_REG_T_METER_88E_11N 0x42
+
+
+
+/* 2 BB REG LIST */
+/* PAGE 8 */
+#define ODM_REG_BB_CTRL_11N 0x800
+#define ODM_REG_RF_PIN_11N 0x804
+#define ODM_REG_PSD_CTRL_11N 0x808
+#define ODM_REG_TX_ANT_CTRL_11N 0x80C
+#define ODM_REG_BB_PWR_SAV5_11N 0x818
+#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
+#define ODM_REG_RX_DEFUALT_A_11N 0x858
+#define ODM_REG_RX_DEFUALT_B_11N 0x85A
+#define ODM_REG_BB_PWR_SAV3_11N 0x85C
+#define ODM_REG_ANTSEL_CTRL_11N 0x860
+#define ODM_REG_RX_ANT_CTRL_11N 0x864
+#define ODM_REG_PIN_CTRL_11N 0x870
+#define ODM_REG_BB_PWR_SAV1_11N 0x874
+#define ODM_REG_ANTSEL_PATH_11N 0x878
+#define ODM_REG_BB_3WIRE_11N 0x88C
+#define ODM_REG_SC_CNT_11N 0x8C4
+#define ODM_REG_PSD_DATA_11N 0x8B4
+/* PAGE 9 */
+#define ODM_REG_ANT_MAPPING1_11N 0x914
+#define ODM_REG_ANT_MAPPING2_11N 0x918
+/* PAGE A */
+#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define ODM_REG_CCK_CCA_11N 0xA0A
+#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
+#define ODM_REG_CCK_ANTDIV_PARA3_11N 0xA10
+#define ODM_REG_CCK_ANTDIV_PARA4_11N 0xA14
+#define ODM_REG_CCK_FILTER_PARA1_11N 0xA22
+#define ODM_REG_CCK_FILTER_PARA2_11N 0xA23
+#define ODM_REG_CCK_FILTER_PARA3_11N 0xA24
+#define ODM_REG_CCK_FILTER_PARA4_11N 0xA25
+#define ODM_REG_CCK_FILTER_PARA5_11N 0xA26
+#define ODM_REG_CCK_FILTER_PARA6_11N 0xA27
+#define ODM_REG_CCK_FILTER_PARA7_11N 0xA28
+#define ODM_REG_CCK_FILTER_PARA8_11N 0xA29
+#define ODM_REG_CCK_FA_RST_11N 0xA2C
+#define ODM_REG_CCK_FA_MSB_11N 0xA58
+#define ODM_REG_CCK_FA_LSB_11N 0xA5C
+#define ODM_REG_CCK_CCA_CNT_11N 0xA60
+#define ODM_REG_BB_PWR_SAV4_11N 0xA74
+/* PAGE B */
+#define ODM_REG_LNA_SWITCH_11N 0xB2C
+#define ODM_REG_PATH_SWITCH_11N 0xB30
+#define ODM_REG_RSSI_CTRL_11N 0xB38
+#define ODM_REG_CONFIG_ANTA_11N 0xB68
+#define ODM_REG_RSSI_BT_11N 0xB9C
+/* PAGE C */
+#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
+#define ODM_REG_RX_PATH_11N 0xC04
+#define ODM_REG_TRMUX_11N 0xC08
+#define ODM_REG_OFDM_FA_RSTC_11N 0xC0C
+#define ODM_REG_RXIQI_MATRIX_11N 0xC14
+#define ODM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
+#define ODM_REG_IGI_A_11N 0xC50
+#define ODM_REG_ANTDIV_PARA2_11N 0xC54
+#define ODM_REG_IGI_B_11N 0xC58
+#define ODM_REG_ANTDIV_PARA3_11N 0xC5C
+#define ODM_REG_BB_PWR_SAV2_11N 0xC70
+#define ODM_REG_RX_OFF_11N 0xC7C
+#define ODM_REG_TXIQK_MATRIXA_11N 0xC80
+#define ODM_REG_TXIQK_MATRIXB_11N 0xC88
+#define ODM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
+#define ODM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
+#define ODM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
+#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
+#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
+/* PAGE D */
+#define ODM_REG_OFDM_FA_RSTD_11N 0xD00
+#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
+#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
+#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
+/* PAGE E */
+#define ODM_REG_TXAGC_A_6_18_11N 0xE00
+#define ODM_REG_TXAGC_A_24_54_11N 0xE04
+#define ODM_REG_TXAGC_A_1_MCS32_11N 0xE08
+#define ODM_REG_TXAGC_A_MCS0_3_11N 0xE10
+#define ODM_REG_TXAGC_A_MCS4_7_11N 0xE14
+#define ODM_REG_TXAGC_A_MCS8_11_11N 0xE18
+#define ODM_REG_TXAGC_A_MCS12_15_11N 0xE1C
+#define ODM_REG_FPGA0_IQK_11N 0xE28
+#define ODM_REG_TXIQK_TONE_A_11N 0xE30
+#define ODM_REG_RXIQK_TONE_A_11N 0xE34
+#define ODM_REG_TXIQK_PI_A_11N 0xE38
+#define ODM_REG_RXIQK_PI_A_11N 0xE3C
+#define ODM_REG_TXIQK_11N 0xE40
+#define ODM_REG_RXIQK_11N 0xE44
+#define ODM_REG_IQK_AGC_PTS_11N 0xE48
+#define ODM_REG_IQK_AGC_RSP_11N 0xE4C
+#define ODM_REG_BLUETOOTH_11N 0xE6C
+#define ODM_REG_RX_WAIT_CCA_11N 0xE70
+#define ODM_REG_TX_CCK_RFON_11N 0xE74
+#define ODM_REG_TX_CCK_BBON_11N 0xE78
+#define ODM_REG_OFDM_RFON_11N 0xE7C
+#define ODM_REG_OFDM_BBON_11N 0xE80
+#define ODM_REG_TX2RX_11N 0xE84
+#define ODM_REG_TX2TX_11N 0xE88
+#define ODM_REG_RX_CCK_11N 0xE8C
+#define ODM_REG_RX_OFDM_11N 0xED0
+#define ODM_REG_RX_WAIT_RIFS_11N 0xED4
+#define ODM_REG_RX2RX_11N 0xED8
+#define ODM_REG_STANDBY_11N 0xEDC
+#define ODM_REG_SLEEP_11N 0xEE0
+#define ODM_REG_PMPD_ANAEN_11N 0xEEC
+
+
+
+
+
+
+
+/* 2 MAC REG LIST */
+#define ODM_REG_BB_RST_11N 0x02
+#define ODM_REG_ANTSEL_PIN_11N 0x4C
+#define ODM_REG_EARLY_MODE_11N 0x4D0
+#define ODM_REG_RSSI_MONITOR_11N 0x4FE
+#define ODM_REG_EDCA_VO_11N 0x500
+#define ODM_REG_EDCA_VI_11N 0x504
+#define ODM_REG_EDCA_BE_11N 0x508
+#define ODM_REG_EDCA_BK_11N 0x50C
+#define ODM_REG_TXPAUSE_11N 0x522
+#define ODM_REG_RESP_TX_11N 0x6D8
+#define ODM_REG_ANT_TRAIN_PARA1_11N 0x7b0
+#define ODM_REG_ANT_TRAIN_PARA2_11N 0x7b4
+
+
+/* DIG Related */
+#define ODM_BIT_IGI_11N 0x0000007F
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
new file mode 100644
index 0000000..a9ba6df
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -0,0 +1,145 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#ifndef __ODM_DBG_H__
+#define __ODM_DBG_H__
+
+
+/* */
+/* Define the debug levels */
+/* */
+/* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */
+/* They can help SW engineer to develope or trace states changed */
+/* and also help HW enginner to trace every operation to and from HW, */
+/* e.g IO, Tx, Rx. */
+/* */
+/* 2. DBG_WARNNING and DBG_SERIOUS are used for unusual or error cases, */
+/* which help us to debug SW or HW. */
+
+/* Never used in a call to ODM_RT_TRACE()! */
+#define ODM_DBG_OFF 1
+
+/* Fatal bug. */
+/* For example, Tx/Rx/IO locked up, OS hangs, memory access violation, */
+/* resource allocation failed, unexpected HW behavior, HW BUG and so on. */
+#define ODM_DBG_SERIOUS 2
+
+/* Abnormal, rare, or unexpeted cases. */
+/* For example, IRP/Packet/OID canceled, device suprisely unremoved and so on. */
+#define ODM_DBG_WARNING 3
+
+/* Normal case with useful information about current SW or HW state. */
+/* For example, Tx/Rx descriptor to fill, Tx/Rx descr. completed status, */
+/* SW protocol state change, dynamic mechanism state change and so on. */
+/* */
+#define ODM_DBG_LOUD 4
+
+/* Normal case with detail execution flow or information. */
+#define ODM_DBG_TRACE 5
+
+/* Define the tracing components */
+/* BB Functions */
+#define ODM_COMP_DIG BIT0
+#define ODM_COMP_RA_MASK BIT1
+#define ODM_COMP_DYNAMIC_TXPWR BIT2
+#define ODM_COMP_FA_CNT BIT3
+#define ODM_COMP_RSSI_MONITOR BIT4
+#define ODM_COMP_CCK_PD BIT5
+#define ODM_COMP_ANT_DIV BIT6
+#define ODM_COMP_PWR_SAVE BIT7
+#define ODM_COMP_PWR_TRA BIT8
+#define ODM_COMP_RATE_ADAPTIVE BIT9
+#define ODM_COMP_PATH_DIV BIT10
+#define ODM_COMP_PSD BIT11
+#define ODM_COMP_DYNAMIC_PRICCA BIT12
+#define ODM_COMP_RXHP BIT13
+/* MAC Functions */
+#define ODM_COMP_EDCA_TURBO BIT16
+#define ODM_COMP_EARLY_MODE BIT17
+/* RF Functions */
+#define ODM_COMP_TX_PWR_TRACK BIT24
+#define ODM_COMP_RX_GAIN_TRACK BIT25
+#define ODM_COMP_CALIBRATION BIT26
+/* Common Functions */
+#define ODM_COMP_COMMON BIT30
+#define ODM_COMP_INIT BIT31
+
+/*------------------------Export Marco Definition---------------------------*/
+#define DbgPrint pr_info
+#define RT_PRINTK(fmt, args...) \
+ DbgPrint( "%s(): " fmt, __func__, ## args);
+
+#ifndef ASSERT
+ #define ASSERT(expr)
+#endif
+
+#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
+ if (((comp) & pDM_Odm->DebugComponents) && \
+ (level <= pDM_Odm->DebugLevel)) { \
+ if (pDM_Odm->SupportICType == ODM_RTL8192C) \
+ DbgPrint("[ODM-92C] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8192D) \
+ DbgPrint("[ODM-92D] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8723A) \
+ DbgPrint("[ODM-8723A] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8188E) \
+ DbgPrint("[ODM-8188E] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8812) \
+ DbgPrint("[ODM-8812] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8821) \
+ DbgPrint("[ODM-8821] "); \
+ RT_PRINTK fmt; \
+ }
+
+#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt) \
+ if (((comp) & pDM_Odm->DebugComponents) && \
+ (level <= pDM_Odm->DebugLevel)) { \
+ RT_PRINTK fmt; \
+ }
+
+#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) \
+ if (!(expr)) { \
+ DbgPrint( "Assertion failed! %s at ......\n", #expr); \
+ DbgPrint( " ......%s,%s,line=%d\n", __FILE__, \
+ __func__, __LINE__); \
+ RT_PRINTK fmt; \
+ ASSERT(false); \
+ }
+#define ODM_dbg_enter() { DbgPrint("==> %s\n", __func__); }
+#define ODM_dbg_exit() { DbgPrint("<== %s\n", __func__); }
+#define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
+
+#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr) \
+ if (((comp) & pDM_Odm->DebugComponents) && \
+ (level <= pDM_Odm->DebugLevel)) { \
+ int __i; \
+ u8 *__ptr = (u8 *)ptr; \
+ DbgPrint("[ODM] "); \
+ DbgPrint(title_str); \
+ DbgPrint(" "); \
+ for (__i = 0; __i < 6; __i++) \
+ DbgPrint("%02X%s", __ptr[__i], (__i==5)?"":"-");\
+ DbgPrint("\n"); \
+ }
+
+void ODM_InitDebugSetting(struct odm_dm_struct *pDM_Odm);
+
+#endif /* __ODM_DBG_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm_interface.h b/drivers/staging/rtl8188eu/include/odm_interface.h
new file mode 100644
index 0000000..e5c8704
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_interface.h
@@ -0,0 +1,164 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_INTERFACE_H__
+#define __ODM_INTERFACE_H__
+
+/* */
+/* =========== Constant/Structure/Enum/... Define */
+/* */
+
+/* */
+/* =========== Macro Define */
+/* */
+
+#define _reg_all(_name) ODM_##_name
+#define _reg_ic(_name, _ic) ODM_##_name##_ic
+#define _bit_all(_name) BIT_##_name
+#define _bit_ic(_name, _ic) BIT_##_name##_ic
+
+/* _cat: implemented by Token-Pasting Operator. */
+
+/*===================================
+
+#define ODM_REG_DIG_11N 0xC50
+#define ODM_REG_DIG_11AC 0xDDD
+
+ODM_REG(DIG,_pDM_Odm)
+=====================================*/
+
+#define _reg_11N(_name) ODM_REG_##_name##_11N
+#define _reg_11AC(_name) ODM_REG_##_name##_11AC
+#define _bit_11N(_name) ODM_BIT_##_name##_11N
+#define _bit_11AC(_name) ODM_BIT_##_name##_11AC
+
+#define _cat(_name, _ic_type, _func) \
+ ( \
+ ((_ic_type) & ODM_IC_11N_SERIES) ? _func##_11N(_name) : \
+ _func##_11AC(_name) \
+ )
+
+/* _name: name of register or bit. */
+/* Example: "ODM_REG(R_A_AGC_CORE1, pDM_Odm)" */
+/* gets "ODM_R_A_AGC_CORE1" or "ODM_R_A_AGC_CORE1_8192C",
+ * depends on SupportICType. */
+#define ODM_REG(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _reg)
+#define ODM_BIT(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _bit)
+
+enum odm_h2c_cmd {
+ ODM_H2C_RSSI_REPORT = 0,
+ ODM_H2C_PSD_RESULT= 1,
+ ODM_H2C_PathDiv = 2,
+ ODM_MAX_H2CCMD
+};
+
+/* 2012/02/17 MH For non-MP compile pass only. Linux does not support workitem. */
+/* Suggest HW team to use thread instead of workitem. Windows also support the feature. */
+typedef void (*RT_WORKITEM_CALL_BACK)(void *pContext);
+
+/* =========== Extern Variable ??? It should be forbidden. */
+
+/* =========== EXtern Function Prototype */
+
+u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
+
+u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
+
+u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
+
+void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data);
+
+void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data);
+
+void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data);
+
+void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
+ u32 BitMask, u32 Data);
+
+u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
+
+void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
+ u32 BitMask, u32 Data);
+
+u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
+
+void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath,
+ u32 RegAddr, u32 BitMask, u32 Data);
+
+u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath,
+ u32 RegAddr, u32 BitMask);
+
+/* Memory Relative Function. */
+void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length);
+void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length);
+
+s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2,
+ u32 length);
+
+/* ODM MISC-spin lock relative API. */
+void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm,
+ enum RT_SPINLOCK_TYPE type);
+
+void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm,
+ enum RT_SPINLOCK_TYPE type);
+
+/* ODM MISC-workitem relative API. */
+void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
+ RT_WORKITEM_CALL_BACK RtWorkItemCallback,
+ void *pContext, const char *szID);
+
+void ODM_StartWorkItem(void *pRtWorkItem);
+
+void ODM_StopWorkItem(void *pRtWorkItem);
+
+void ODM_FreeWorkItem(void *pRtWorkItem);
+
+void ODM_ScheduleWorkItem(void *pRtWorkItem);
+
+void ODM_IsWorkItemScheduled(void *pRtWorkItem);
+
+/* ODM Timer relative API. */
+void ODM_StallExecution(u32 usDelay);
+
+void ODM_delay_ms(u32 ms);
+
+void ODM_delay_us(u32 us);
+
+void ODM_sleep_ms(u32 ms);
+
+void ODM_sleep_us(u32 us);
+
+void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
+ u32 msDelay);
+
+void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm,
+ struct timer_list *pTimer, void *CallBackFunc,
+ void *pContext, const char *szID);
+
+void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
+
+void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
+
+/* ODM FW relative API. */
+u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
+ u32 *pElementID, u32 *pCmdLen, u8 **pCmbBuffer,
+ u8 *CmdStartSeq);
+
+#endif /* __ODM_INTERFACE_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
new file mode 100644
index 0000000..520cbba
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -0,0 +1,104 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_PRECOMP_H__
+#define __ODM_PRECOMP_H__
+
+#include "odm_types.h"
+
+#define TEST_FALG___ 1
+
+/* 2 Config Flags and Structs - defined by each ODM Type */
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <hal_intf.h>
+
+/* 2 Hardware Parameter Files */
+
+#include "Hal8188EFWImg_CE.h"
+
+
+/* 2 OutSrc Header Files */
+
+#include "odm.h"
+#include "odm_HWConfig.h"
+#include "odm_debug.h"
+#include "odm_RegDefine11AC.h"
+#include "odm_RegDefine11N.h"
+
+#include "HalPhyRf.h"
+#include "HalPhyRf_8188e.h"/* for IQK,LCK,Power-tracking */
+#include "Hal8188ERateAdaptive.h"/* for RA,Power training */
+#include "rtl8188e_hal.h"
+
+#include "odm_interface.h"
+#include "odm_reg.h"
+
+#include "HalHWImg8188E_MAC.h"
+#include "HalHWImg8188E_RF.h"
+#include "HalHWImg8188E_BB.h"
+#include "Hal8188EReg.h"
+
+#include "odm_RegConfig8188E.h"
+#include "odm_RTL8188E.h"
+
+void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm);
+void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm);
+void odm_DIGInit(struct odm_dm_struct *pDM_Odm);
+void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm);
+void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm);
+void odm_SwAntDivInit_NIC(struct odm_dm_struct *pDM_Odm);
+void odm_GlobalAdapterCheck(void);
+void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm);
+void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm);
+void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm);
+void odm_DIG(struct odm_dm_struct *pDM_Odm);
+void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm);
+void odm_RefreshRateAdaptiveMaskMP(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicBBPowerSaving(struct odm_dm_struct *pDM_Odm);
+void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step);
+void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm);
+void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm);
+void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm);
+void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm);
+void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm);
+void odm_1R_CCA(struct odm_dm_struct *pDM_Odm);
+void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm);
+void odm_RefreshRateAdaptiveMaskAPADSL(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm);
+void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm);
+void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm);
+void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm);
+void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm);
+void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext);
+void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm);
+void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm);
+
+#endif /* __ODM_PRECOMP_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm_reg.h b/drivers/staging/rtl8188eu/include/odm_reg.h
new file mode 100644
index 0000000..89bc46b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_reg.h
@@ -0,0 +1,119 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/* */
+/* File Name: odm_reg.h */
+/* */
+/* Description: */
+/* */
+/* This file is for general register definition. */
+/* */
+/* */
+/* */
+#ifndef __HAL_ODM_REG_H__
+#define __HAL_ODM_REG_H__
+
+/* */
+/* Register Definition */
+/* */
+
+/* MAC REG */
+#define ODM_BB_RESET 0x002
+#define ODM_DUMMY 0x4fe
+#define ODM_EDCA_VO_PARAM 0x500
+#define ODM_EDCA_VI_PARAM 0x504
+#define ODM_EDCA_BE_PARAM 0x508
+#define ODM_EDCA_BK_PARAM 0x50C
+#define ODM_TXPAUSE 0x522
+
+/* BB REG */
+#define ODM_FPGA_PHY0_PAGE8 0x800
+#define ODM_PSD_SETTING 0x808
+#define ODM_AFE_SETTING 0x818
+#define ODM_TXAGC_B_6_18 0x830
+#define ODM_TXAGC_B_24_54 0x834
+#define ODM_TXAGC_B_MCS32_5 0x838
+#define ODM_TXAGC_B_MCS0_MCS3 0x83c
+#define ODM_TXAGC_B_MCS4_MCS7 0x848
+#define ODM_TXAGC_B_MCS8_MCS11 0x84c
+#define ODM_ANALOG_REGISTER 0x85c
+#define ODM_RF_INTERFACE_OUTPUT 0x860
+#define ODM_TXAGC_B_MCS12_MCS15 0x868
+#define ODM_TXAGC_B_11_A_2_11 0x86c
+#define ODM_AD_DA_LSB_MASK 0x874
+#define ODM_ENABLE_3_WIRE 0x88c
+#define ODM_PSD_REPORT 0x8b4
+#define ODM_R_ANT_SELECT 0x90c
+#define ODM_CCK_ANT_SELECT 0xa07
+#define ODM_CCK_PD_THRESH 0xa0a
+#define ODM_CCK_RF_REG1 0xa11
+#define ODM_CCK_MATCH_FILTER 0xa20
+#define ODM_CCK_RAKE_MAC 0xa2e
+#define ODM_CCK_CNT_RESET 0xa2d
+#define ODM_CCK_TX_DIVERSITY 0xa2f
+#define ODM_CCK_FA_CNT_MSB 0xa5b
+#define ODM_CCK_FA_CNT_LSB 0xa5c
+#define ODM_CCK_NEW_FUNCTION 0xa75
+#define ODM_OFDM_PHY0_PAGE_C 0xc00
+#define ODM_OFDM_RX_ANT 0xc04
+#define ODM_R_A_RXIQI 0xc14
+#define ODM_R_A_AGC_CORE1 0xc50
+#define ODM_R_A_AGC_CORE2 0xc54
+#define ODM_R_B_AGC_CORE1 0xc58
+#define ODM_R_AGC_PAR 0xc70
+#define ODM_R_HTSTF_AGC_PAR 0xc7c
+#define ODM_TX_PWR_TRAINING_A 0xc90
+#define ODM_TX_PWR_TRAINING_B 0xc98
+#define ODM_OFDM_FA_CNT1 0xcf0
+#define ODM_OFDM_PHY0_PAGE_D 0xd00
+#define ODM_OFDM_FA_CNT2 0xda0
+#define ODM_OFDM_FA_CNT3 0xda4
+#define ODM_OFDM_FA_CNT4 0xda8
+#define ODM_TXAGC_A_6_18 0xe00
+#define ODM_TXAGC_A_24_54 0xe04
+#define ODM_TXAGC_A_1_MCS32 0xe08
+#define ODM_TXAGC_A_MCS0_MCS3 0xe10
+#define ODM_TXAGC_A_MCS4_MCS7 0xe14
+#define ODM_TXAGC_A_MCS8_MCS11 0xe18
+#define ODM_TXAGC_A_MCS12_MCS15 0xe1c
+
+/* RF REG */
+#define ODM_GAIN_SETTING 0x00
+#define ODM_CHANNEL 0x18
+
+/* Ant Detect Reg */
+#define ODM_DPDT 0x300
+
+/* PSD Init */
+#define ODM_PSDREG 0x808
+
+/* 92D Path Div */
+#define PATHDIV_REG 0xB30
+#define PATHDIV_TRI 0xBA0
+
+
+/* */
+/* Bitmap Definition */
+/* */
+
+#define BIT_FA_RESET BIT0
+
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_types.h b/drivers/staging/rtl8188eu/include/odm_types.h
new file mode 100644
index 0000000..78ee2ba
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_types.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __ODM_TYPES_H__
+#define __ODM_TYPES_H__
+
+/* */
+/* Define Different SW team support */
+/* */
+#define ODM_AP 0x01 /* BIT0 */
+#define ODM_ADSL 0x02 /* BIT1 */
+#define ODM_CE 0x04 /* BIT2 */
+#define ODM_MP 0x08 /* BIT3 */
+
+#define RT_PCI_INTERFACE 1
+#define RT_USB_INTERFACE 2
+#define RT_SDIO_INTERFACE 3
+
+enum HAL_STATUS {
+ HAL_STATUS_SUCCESS,
+ HAL_STATUS_FAILURE,
+};
+
+enum RT_SPINLOCK_TYPE {
+ RT_TEMP = 1,
+};
+
+#include <basic_types.h>
+
+#define DEV_BUS_TYPE RT_USB_INTERFACE
+
+#define SET_TX_DESC_ANTSEL_A_88E(__pTxDesc, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 24, 1, __Value)
+#define SET_TX_DESC_ANTSEL_B_88E(__pTxDesc, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 25, 1, __Value)
+#define SET_TX_DESC_ANTSEL_C_88E(__pTxDesc, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pTxDesc+28, 29, 1, __Value)
+
+/* define useless flag to avoid compile warning */
+#define USE_WORKITEM 0
+#define FOR_BRAZIL_PRETEST 0
+#define BT_30_SUPPORT 0
+#define FPGA_TWO_MAC_VERIFICATION 0
+
+
+#endif /* __ODM_TYPES_H__ */
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
new file mode 100644
index 0000000..c4599c5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -0,0 +1,83 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __OSDEP_INTF_H_
+#define __OSDEP_INTF_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+struct intf_priv {
+ u8 *intf_dev;
+ u32 max_iosz; /* USB2.0: 128, USB1.1: 64, SDIO:64 */
+ u32 max_xmitsz; /* USB2.0: unlimited, SDIO:512 */
+ u32 max_recvsz; /* USB2.0: unlimited, SDIO:512 */
+
+ u8 *io_rwmem;
+ u8 *allocated_io_rwmem;
+ u32 io_wsz; /* unit: 4bytes */
+ u32 io_rsz;/* unit: 4bytes */
+ u8 intf_status;
+
+ void (*_bus_io)(u8 *priv);
+
+/*
+Under Sync. IRP (SDIO/USB)
+A protection mechanism is necessary for the io_rwmem(read/write protocol)
+
+Under Async. IRP (SDIO/USB)
+The protection mechanism is through the pending queue.
+*/
+ struct mutex ioctl_mutex;
+ /* when in USB, IO is through interrupt in/out endpoints */
+ struct usb_device *udev;
+ struct urb *piorw_urb;
+ u8 io_irp_cnt;
+ u8 bio_irp_pending;
+ struct semaphore io_retevt;
+ struct timer_list io_timer;
+ u8 bio_irp_timeout;
+ u8 bio_timer_cancel;
+};
+
+u8 rtw_init_drv_sw(struct adapter *padapter);
+u8 rtw_free_drv_sw(struct adapter *padapter);
+u8 rtw_reset_drv_sw(struct adapter *padapter);
+
+u32 rtw_start_drv_threads(struct adapter *padapter);
+void rtw_stop_drv_threads (struct adapter *padapter);
+void rtw_cancel_all_timer(struct adapter *padapter);
+
+int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
+struct net_device *rtw_init_netdev(struct adapter *padapter);
+u16 rtw_recv_select_queue(struct sk_buff *skb);
+void rtw_proc_init_one(struct net_device *dev);
+void rtw_proc_remove_one(struct net_device *dev);
+
+void rtw_ips_dev_unload(struct adapter *padapter);
+
+int rtw_ips_pwr_up(struct adapter *padapter);
+void rtw_ips_pwr_down(struct adapter *padapter);
+int rtw_hw_suspend(struct adapter *padapter);
+int rtw_hw_resume(struct adapter *padapter);
+
+#endif /* _OSDEP_INTF_H_ */
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
new file mode 100644
index 0000000..44f24fa
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -0,0 +1,547 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __OSDEP_SERVICE_H_
+#define __OSDEP_SERVICE_H_
+
+#include <basic_types.h>
+
+#define _FAIL 0
+#define _SUCCESS 1
+#define RTW_RX_HANDLED 2
+
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/circ_buf.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/sem.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h> /* Necessary because we use the proc fs */
+#include <linux/interrupt.h> /* for struct tasklet_struct */
+#include <linux/ip.h>
+#include <linux/kthread.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+
+struct __queue {
+ struct list_head queue;
+ spinlock_t lock;
+};
+
+#define thread_exit() complete_and_exit(NULL, 0)
+
+static inline struct list_head *get_next(struct list_head *list)
+{
+ return list->next;
+}
+
+static inline struct list_head *get_list_head(struct __queue *queue)
+{
+ return (&(queue->queue));
+}
+
+
+#define LIST_CONTAINOR(ptr, type, member) \
+ ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
+
+
+static inline void _enter_critical(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_lock_irqsave(plock, *pirqL);
+}
+
+static inline void _exit_critical(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_unlock_irqrestore(plock, *pirqL);
+}
+
+static inline void _enter_critical_ex(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_lock_irqsave(plock, *pirqL);
+}
+
+static inline void _exit_critical_ex(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_unlock_irqrestore(plock, *pirqL);
+}
+
+static inline void _enter_critical_bh(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_lock_bh(plock);
+}
+
+static inline void _exit_critical_bh(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_unlock_bh(plock);
+}
+
+static inline int _enter_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(pmutex);
+ return ret;
+}
+
+
+static inline void _exit_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
+{
+ mutex_unlock(pmutex);
+}
+
+static inline void rtw_list_delete(struct list_head *plist)
+{
+ list_del_init(plist);
+}
+
+static inline void _init_timer(struct timer_list *ptimer,struct net_device *nic_hdl,void *pfunc,void* cntx)
+{
+ ptimer->function = pfunc;
+ ptimer->data = (unsigned long)cntx;
+ init_timer(ptimer);
+}
+
+static inline void _set_timer(struct timer_list *ptimer,u32 delay_time)
+{
+ mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
+}
+
+static inline void _cancel_timer(struct timer_list *ptimer,u8 *bcancelled)
+{
+ del_timer_sync(ptimer);
+ *bcancelled= true;/* true ==1; false==0 */
+}
+
+#define RTW_TIMER_HDL_ARGS void *FunctionContext
+#define RTW_TIMER_HDL_NAME(name) rtw_##name##_timer_hdl
+#define RTW_DECLARE_TIMER_HDL(name) void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
+
+static inline void _init_workitem(struct work_struct *pwork, void *pfunc, void * cntx)
+{
+ INIT_WORK(pwork, pfunc);
+}
+
+static inline void _set_workitem(struct work_struct *pwork)
+{
+ schedule_work(pwork);
+}
+
+static inline void _cancel_workitem_sync(struct work_struct *pwork)
+{
+ cancel_work_sync(pwork);
+}
+/* */
+/* Global Mutex: can only be used at PASSIVE level. */
+/* */
+
+#define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
+{ \
+ while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1)\
+ { \
+ atomic_dec((atomic_t *)&(_MutexCounter)); \
+ msleep(10); \
+ } \
+}
+
+#define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
+{ \
+ atomic_dec((atomic_t *)&(_MutexCounter)); \
+}
+
+static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
+{
+ return netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3));
+}
+
+static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
+{
+ netif_tx_wake_all_queues(pnetdev);
+}
+
+static inline void rtw_netif_start_queue(struct net_device *pnetdev)
+{
+ netif_tx_start_all_queues(pnetdev);
+}
+
+static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
+{
+ netif_tx_stop_all_queues(pnetdev);
+}
+
+#ifndef BIT
+ #define BIT(x) ( 1 << (x))
+#endif
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+#define BIT32 0x0100000000
+#define BIT33 0x0200000000
+#define BIT34 0x0400000000
+#define BIT35 0x0800000000
+#define BIT36 0x1000000000
+
+extern int RTW_STATUS_CODE(int error_code);
+
+/* flags used for rtw_update_mem_stat() */
+enum {
+ MEM_STAT_VIR_ALLOC_SUCCESS,
+ MEM_STAT_VIR_ALLOC_FAIL,
+ MEM_STAT_VIR_FREE,
+ MEM_STAT_PHY_ALLOC_SUCCESS,
+ MEM_STAT_PHY_ALLOC_FAIL,
+ MEM_STAT_PHY_FREE,
+ MEM_STAT_TX, /* used to distinguish TX/RX, asigned from caller */
+ MEM_STAT_TX_ALLOC_SUCCESS,
+ MEM_STAT_TX_ALLOC_FAIL,
+ MEM_STAT_TX_FREE,
+ MEM_STAT_RX, /* used to distinguish TX/RX, asigned from caller */
+ MEM_STAT_RX_ALLOC_SUCCESS,
+ MEM_STAT_RX_ALLOC_FAIL,
+ MEM_STAT_RX_FREE
+};
+
+extern unsigned char MCS_rate_2R[16];
+extern unsigned char MCS_rate_1R[16];
+extern unsigned char RTW_WPA_OUI[];
+extern unsigned char WPA_TKIP_CIPHER[4];
+extern unsigned char RSN_TKIP_CIPHER[4];
+
+#define rtw_update_mem_stat(flag, sz) do {} while (0)
+u8 *_rtw_vmalloc(u32 sz);
+u8 *_rtw_zvmalloc(u32 sz);
+void _rtw_vmfree(u8 *pbuf, u32 sz);
+u8 *_rtw_zmalloc(u32 sz);
+u8 *_rtw_malloc(u32 sz);
+void _rtw_mfree(u8 *pbuf, u32 sz);
+#define rtw_vmalloc(sz) _rtw_vmalloc((sz))
+#define rtw_zvmalloc(sz) _rtw_zvmalloc((sz))
+#define rtw_vmfree(pbuf, sz) _rtw_vmfree((pbuf), (sz))
+#define rtw_malloc(sz) _rtw_malloc((sz))
+#define rtw_zmalloc(sz) _rtw_zmalloc((sz))
+#define rtw_mfree(pbuf, sz) _rtw_mfree((pbuf), (sz))
+
+void *rtw_malloc2d(int h, int w, int size);
+void rtw_mfree2d(void *pbuf, int h, int w, int size);
+
+void _rtw_memcpy(void *dec, void *sour, u32 sz);
+int _rtw_memcmp(void *dst, void *src, u32 sz);
+void _rtw_memset(void *pbuf, int c, u32 sz);
+
+void _rtw_init_listhead(struct list_head *list);
+u32 rtw_is_list_empty(struct list_head *phead);
+void rtw_list_insert_head(struct list_head *plist, struct list_head *phead);
+void rtw_list_insert_tail(struct list_head *plist, struct list_head *phead);
+void rtw_list_delete(struct list_head *plist);
+
+void _rtw_init_sema(struct semaphore *sema, int init_val);
+void _rtw_free_sema(struct semaphore *sema);
+void _rtw_up_sema(struct semaphore *sema);
+u32 _rtw_down_sema(struct semaphore *sema);
+void _rtw_mutex_init(struct mutex *pmutex);
+void _rtw_mutex_free(struct mutex *pmutex);
+void _rtw_spinlock_init(spinlock_t *plock);
+void _rtw_spinlock_free(spinlock_t *plock);
+
+void _rtw_init_queue(struct __queue *pqueue);
+u32 _rtw_queue_empty(struct __queue *pqueue);
+u32 rtw_end_of_queue_search(struct list_head *queue, struct list_head *pelement);
+
+u32 rtw_get_current_time(void);
+u32 rtw_systime_to_ms(u32 systime);
+u32 rtw_ms_to_systime(u32 ms);
+s32 rtw_get_passing_time_ms(u32 start);
+s32 rtw_get_time_interval_ms(u32 start, u32 end);
+
+void rtw_sleep_schedulable(int ms);
+
+void rtw_msleep_os(int ms);
+void rtw_usleep_os(int us);
+
+u32 rtw_atoi(u8 *s);
+
+void rtw_mdelay_os(int ms);
+void rtw_udelay_os(int us);
+
+void rtw_yield_os(void);
+
+static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
+{
+ return del_timer_sync(ptimer);
+}
+
+static __inline void thread_enter(char *name)
+{
+#ifdef daemonize
+ daemonize("%s", name);
+#endif
+ allow_signal(SIGTERM);
+}
+
+static inline void flush_signals_thread(void)
+{
+ if (signal_pending (current))
+ flush_signals(current);
+}
+
+static inline int res_to_status(int res)
+{
+ return res;
+}
+
+#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
+#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
+
+static inline u32 _RND4(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2;
+ return val;
+}
+
+static inline u32 _RND8(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3;
+ return val;
+}
+
+static inline u32 _RND128(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 7) + ((sz & 127) ? 1: 0)) << 7;
+ return val;
+}
+
+static inline u32 _RND256(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 8) + ((sz & 255) ? 1: 0)) << 8;
+ return val;
+}
+
+static inline u32 _RND512(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 9) + ((sz & 511) ? 1: 0)) << 9;
+ return val;
+}
+
+static inline u32 bitshift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++)
+ if (((bitmask>>i) & 0x1) == 1) break;
+ return i;
+}
+
+/* limitation of path length */
+#define PATH_LENGTH_MAX PATH_MAX
+
+void rtw_suspend_lock_init(void);
+void rtw_suspend_lock_uninit(void);
+void rtw_lock_suspend(void);
+void rtw_unlock_suspend(void);
+
+/* Atomic integer operations */
+#define ATOMIC_T atomic_t
+
+void ATOMIC_SET(ATOMIC_T *v, int i);
+int ATOMIC_READ(ATOMIC_T *v);
+void ATOMIC_ADD(ATOMIC_T *v, int i);
+void ATOMIC_SUB(ATOMIC_T *v, int i);
+void ATOMIC_INC(ATOMIC_T *v);
+void ATOMIC_DEC(ATOMIC_T *v);
+int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i);
+int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i);
+int ATOMIC_INC_RETURN(ATOMIC_T *v);
+int ATOMIC_DEC_RETURN(ATOMIC_T *v);
+
+/* File operation APIs, just for linux now */
+int rtw_is_file_readable(char *path);
+int rtw_retrive_from_file(char *path, u8 __user *buf, u32 sz);
+int rtw_store_to_file(char *path, u8 __user *buf, u32 sz);
+
+struct rtw_netdev_priv_indicator {
+ void *priv;
+ u32 sizeof_priv;
+};
+struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
+ void *old_priv);
+struct net_device *rtw_alloc_etherdev(int sizeof_priv);
+
+#define rtw_netdev_priv(netdev) \
+ (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
+void rtw_free_netdev(struct net_device *netdev);
+
+#define NDEV_FMT "%s"
+#define NDEV_ARG(ndev) ndev->name
+#define ADPT_FMT "%s"
+#define ADPT_ARG(adapter) adapter->pnetdev->name
+#define FUNC_NDEV_FMT "%s(%s)"
+#define FUNC_NDEV_ARG(ndev) __func__, ndev->name
+#define FUNC_ADPT_FMT "%s(%s)"
+#define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name
+
+#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)),(sig), 1)
+
+u64 rtw_modular64(u64 x, u64 y);
+u64 rtw_division64(u64 x, u64 y);
+
+/* Macros for handling unaligned memory accesses */
+
+#define RTW_GET_BE16(a) ((u16) (((a)[0] << 8) | (a)[1]))
+#define RTW_PUT_BE16(a, val) \
+ do { \
+ (a)[0] = ((u16) (val)) >> 8; \
+ (a)[1] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define RTW_GET_LE16(a) ((u16) (((a)[1] << 8) | (a)[0]))
+#define RTW_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16) (val)) >> 8; \
+ (a)[0] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define RTW_GET_BE24(a) ((((u32) (a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
+ ((u32) (a)[2]))
+#define RTW_PUT_BE24(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[2] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_BE32(a) ((((u32) (a)[0]) << 24) | (((u32) (a)[1]) << 16) | \
+ (((u32) (a)[2]) << 8) | ((u32) (a)[3]))
+#define RTW_PUT_BE32(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[3] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_LE32(a) ((((u32) (a)[3]) << 24) | (((u32) (a)[2]) << 16) | \
+ (((u32) (a)[1]) << 8) | ((u32) (a)[0]))
+#define RTW_PUT_LE32(a, val) \
+ do { \
+ (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[0] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_BE64(a) ((((u64) (a)[0]) << 56) | (((u64) (a)[1]) << 48) | \
+ (((u64) (a)[2]) << 40) | (((u64) (a)[3]) << 32) | \
+ (((u64) (a)[4]) << 24) | (((u64) (a)[5]) << 16) | \
+ (((u64) (a)[6]) << 8) | ((u64) (a)[7]))
+#define RTW_PUT_BE64(a, val) \
+ do { \
+ (a)[0] = (u8) (((u64) (val)) >> 56); \
+ (a)[1] = (u8) (((u64) (val)) >> 48); \
+ (a)[2] = (u8) (((u64) (val)) >> 40); \
+ (a)[3] = (u8) (((u64) (val)) >> 32); \
+ (a)[4] = (u8) (((u64) (val)) >> 24); \
+ (a)[5] = (u8) (((u64) (val)) >> 16); \
+ (a)[6] = (u8) (((u64) (val)) >> 8); \
+ (a)[7] = (u8) (((u64) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_LE64(a) ((((u64) (a)[7]) << 56) | (((u64) (a)[6]) << 48) | \
+ (((u64) (a)[5]) << 40) | (((u64) (a)[4]) << 32) | \
+ (((u64) (a)[3]) << 24) | (((u64) (a)[2]) << 16) | \
+ (((u64) (a)[1]) << 8) | ((u64) (a)[0]))
+
+void rtw_buf_free(u8 **buf, u32 *buf_len);
+void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len);
+
+struct rtw_cbuf {
+ u32 write;
+ u32 read;
+ u32 size;
+ void *bufs[0];
+};
+
+bool rtw_cbuf_full(struct rtw_cbuf *cbuf);
+bool rtw_cbuf_empty(struct rtw_cbuf *cbuf);
+bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf);
+void *rtw_cbuf_pop(struct rtw_cbuf *cbuf);
+struct rtw_cbuf *rtw_cbuf_alloc(u32 size);
+int wifirate2_ratetbl_inx(unsigned char rate);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
new file mode 100644
index 0000000..6912380
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RECV_OSDEP_H_
+#define __RECV_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
+void _rtw_free_recv_priv(struct recv_priv *precvpriv);
+
+
+s32 rtw_recv_entry(union recv_frame *precv_frame);
+int rtw_recv_indicatepkt(struct adapter *adapter, union recv_frame *recv_frame);
+void rtw_recv_returnpacket(struct net_device *cnxt, struct sk_buff *retpkt);
+
+void rtw_hostapd_mlme_rx(struct adapter *padapter, union recv_frame *recv_fr);
+void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
+
+int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
+void rtw_free_recv_priv(struct recv_priv *precvpriv);
+
+int rtw_os_recv_resource_init(struct recv_priv *recvpr, struct adapter *adapt);
+int rtw_os_recv_resource_alloc(struct adapter *adapt, union recv_frame *recvfr);
+void rtw_os_recv_resource_free(struct recv_priv *precvpriv);
+
+int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
+int rtw_os_recvbuf_resource_free(struct adapter *adapt, struct recv_buf *buf);
+
+void rtw_os_read_port(struct adapter *padapter, struct recv_buf *precvbuf);
+
+void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
+int nat25_handle_frame(struct adapter *priv, struct sk_buff *skb);
+int _netdev_open(struct net_device *pnetdev);
+int netdev_open(struct net_device *pnetdev);
+int netdev_close(struct net_device *pnetdev);
+
+#endif /* */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
new file mode 100644
index 0000000..b32bc28
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -0,0 +1,122 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_CMD_H__
+#define __RTL8188E_CMD_H__
+
+enum RTL8188E_H2C_CMD_ID {
+ /* Class Common */
+ H2C_COM_RSVD_PAGE = 0x00,
+ H2C_COM_MEDIA_STATUS_RPT = 0x01,
+ H2C_COM_SCAN = 0x02,
+ H2C_COM_KEEP_ALIVE = 0x03,
+ H2C_COM_DISCNT_DECISION = 0x04,
+ H2C_COM_INIT_OFFLOAD = 0x06,
+ H2C_COM_REMOTE_WAKE_CTL = 0x07,
+ H2C_COM_AP_OFFLOAD = 0x08,
+ H2C_COM_BCN_RSVD_PAGE = 0x09,
+ H2C_COM_PROB_RSP_RSVD_PAGE = 0x0A,
+
+ /* Class PS */
+ H2C_PS_PWR_MODE = 0x20,
+ H2C_PS_TUNE_PARA = 0x21,
+ H2C_PS_TUNE_PARA_2 = 0x22,
+ H2C_PS_LPS_PARA = 0x23,
+ H2C_PS_P2P_OFFLOAD = 0x24,
+
+ /* Class DM */
+ H2C_DM_MACID_CFG = 0x40,
+ H2C_DM_TXBF = 0x41,
+
+ /* Class BT */
+ H2C_BT_COEX_MASK = 0x60,
+ H2C_BT_COEX_GPIO_MODE = 0x61,
+ H2C_BT_DAC_SWING_VAL = 0x62,
+ H2C_BT_PSD_RST = 0x63,
+
+ /* Class */
+ H2C_RESET_TSF = 0xc0,
+};
+
+struct cmd_msg_parm {
+ u8 eid; /* element id */
+ u8 sz; /* sz */
+ u8 buf[6];
+};
+
+enum {
+ PWRS
+};
+
+struct setpwrmode_parm {
+ u8 Mode;/* 0:Active,1:LPS,2:WMMPS */
+ u8 SmartPS_RLBM;/* LPS= 0:PS_Poll,1:PS_Poll,2:NullData,WMM= 0:PS_Poll,1:NullData */
+ u8 AwakeInterval; /* unit: beacon interval */
+ u8 bAllQueueUAPSD;
+ u8 PwrState;/* AllON(0x0c),RFON(0x04),RFOFF(0x00) */
+};
+
+struct H2C_SS_RFOFF_PARAM {
+ u8 ROFOn; /* 1: on, 0:off */
+ u16 gpio_period; /* unit: 1024 us */
+} __packed;
+
+struct joinbssrpt_parm {
+ u8 OpMode; /* RT_MEDIA_STATUS */
+};
+
+struct rsvdpage_loc {
+ u8 LocProbeRsp;
+ u8 LocPsPoll;
+ u8 LocNullData;
+ u8 LocQosNull;
+ u8 LocBTQosNull;
+};
+
+struct P2P_PS_Offload_t {
+ u8 Offload_En:1;
+ u8 role:1; /* 1: Owner, 0: Client */
+ u8 CTWindow_En:1;
+ u8 NoA0_En:1;
+ u8 NoA1_En:1;
+ u8 AllStaSleep:1; /* Only valid in Owner */
+ u8 discovery:1;
+ u8 rsvd:1;
+};
+
+struct P2P_PS_CTWPeriod_t {
+ u8 CTWPeriod; /* TU */
+};
+
+/* host message to firmware cmd */
+void rtl8188e_set_FwPwrMode_cmd(struct adapter *padapter, u8 Mode);
+void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *padapter, u8 mstatus);
+u8 rtl8188e_set_rssi_cmd(struct adapter *padapter, u8 *param);
+u8 rtl8188e_set_raid_cmd(struct adapter *padapter, u32 mask);
+void rtl8188e_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 arg,
+ u8 rssi_level);
+
+#ifdef CONFIG_88EU_P2P
+void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state);
+#endif /* CONFIG_88EU_P2P */
+
+void CheckFwRsvdPageContent(struct adapter *adapt);
+void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt);
+
+#endif/* __RTL8188E_CMD_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
new file mode 100644
index 0000000..97a3175
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_DM_H__
+#define __RTL8188E_DM_H__
+enum{
+ UP_LINK,
+ DOWN_LINK,
+};
+/* duplicate code,will move to ODM ######### */
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+/* duplicate code,will move to ODM ######### */
+struct dm_priv {
+ u8 DM_Type;
+ u8 DMFlag;
+ u8 InitDMFlag;
+ u32 InitODMFlag;
+
+ /* Upper and Lower Signal threshold for Rate Adaptive*/
+ int UndecoratedSmoothedPWDB;
+ int UndecoratedSmoothedCCK;
+ int EntryMinUndecoratedSmoothedPWDB;
+ int EntryMaxUndecoratedSmoothedPWDB;
+ int MinUndecoratedPWDBForDM;
+ int LastMinUndecoratedPWDBForDM;
+
+ /* for High Power */
+ u8 bDynamicTxPowerEnable;
+ u8 LastDTPLvl;
+ u8 DynamicTxHighPowerLvl;/* Tx Power Control for Near/Far Range */
+ u8 PowerIndex_backup[6];
+};
+
+void rtl8188e_init_dm_priv(struct adapter *adapt);
+void rtl8188e_deinit_dm_priv(struct adapter *adapt);
+void rtl8188e_InitHalDm(struct adapter *adapt);
+void rtl8188e_HalDmWatchDog(struct adapter *adapt);
+
+void AntDivCompare8188E(struct adapter *adapt, struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src);
+u8 AntDivBeforeLink8188E(struct adapter *adapt);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
new file mode 100644
index 0000000..555c801
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -0,0 +1,487 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_HAL_H__
+#define __RTL8188E_HAL_H__
+
+
+/* include HAL Related header after HAL Related compiling flags */
+#include "rtl8188e_spec.h"
+#include "Hal8188EPhyReg.h"
+#include "Hal8188EPhyCfg.h"
+#include "rtl8188e_rf.h"
+#include "rtl8188e_dm.h"
+#include "rtl8188e_recv.h"
+#include "rtl8188e_xmit.h"
+#include "rtl8188e_cmd.h"
+#include "Hal8188EPwrSeq.h"
+#include "rtl8188e_sreset.h"
+#include "rtw_efuse.h"
+
+#include "odm_precomp.h"
+
+/* Fw Array */
+#define Rtl8188E_FwImageArray Rtl8188EFwImgArray
+#define Rtl8188E_FWImgArrayLength Rtl8188EFWImgArrayLength
+
+#define RTL8188E_FW_UMC_IMG "rtl8188E\\rtl8188efw.bin"
+#define RTL8188E_PHY_REG "rtl8188E\\PHY_REG_1T.txt"
+#define RTL8188E_PHY_RADIO_A "rtl8188E\\radio_a_1T.txt"
+#define RTL8188E_PHY_RADIO_B "rtl8188E\\radio_b_1T.txt"
+#define RTL8188E_AGC_TAB "rtl8188E\\AGC_TAB_1T.txt"
+#define RTL8188E_PHY_MACREG "rtl8188E\\MAC_REG.txt"
+#define RTL8188E_PHY_REG_PG "rtl8188E\\PHY_REG_PG.txt"
+#define RTL8188E_PHY_REG_MP "rtl8188E\\PHY_REG_MP.txt"
+
+/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
+#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow
+#define Rtl8188E_NIC_RF_OFF_FLOW rtl8188E_radio_off_flow
+#define Rtl8188E_NIC_DISABLE_FLOW rtl8188E_card_disable_flow
+#define Rtl8188E_NIC_ENABLE_FLOW rtl8188E_card_enable_flow
+#define Rtl8188E_NIC_SUSPEND_FLOW rtl8188E_suspend_flow
+#define Rtl8188E_NIC_RESUME_FLOW rtl8188E_resume_flow
+#define Rtl8188E_NIC_PDN_FLOW rtl8188E_hwpdn_flow
+#define Rtl8188E_NIC_LPS_ENTER_FLOW rtl8188E_enter_lps_flow
+#define Rtl8188E_NIC_LPS_LEAVE_FLOW rtl8188E_leave_lps_flow
+
+#define DRVINFO_SZ 4 /* unit is 8bytes */
+#define PageNum_128(_Len) (u32)(((_Len)>>7) + ((_Len) & 0x7F ? 1 : 0))
+
+/* download firmware related data structure */
+#define FW_8188E_SIZE 0x4000 /* 16384,16k */
+#define FW_8188E_START_ADDRESS 0x1000
+#define FW_8188E_END_ADDRESS 0x1FFF /* 0x5FFF */
+
+#define MAX_PAGE_SIZE 4096 /* @ page : 4k bytes */
+
+#define IS_FW_HEADER_EXIST(_pFwHdr) \
+ ((le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x92C0 || \
+ (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x88C0 || \
+ (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x2300 || \
+ (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x88E0)
+
+enum firmware_source {
+ FW_SOURCE_IMG_FILE = 0,
+ FW_SOURCE_HEADER_FILE = 1, /* from header file */
+};
+
+struct rt_firmware {
+ enum firmware_source eFWSource;
+ u8 *szFwBuffer;
+ u32 ulFwLength;
+};
+
+/* This structure must be careful with byte-ordering */
+
+struct rt_firmware_hdr {
+ /* 8-byte alinment required */
+ /* LONG WORD 0 ---- */
+ __le16 Signature; /* 92C0: test chip; 92C,
+ * 88C0: test chip; 88C1: MP A-cut;
+ * 92C1: MP A-cut */
+ u8 Category; /* AP/NIC and USB/PCI */
+ u8 Function; /* Reserved for different FW function
+ * indcation, for further use when
+ * driver needs to download different
+ * FW for different conditions */
+ __le16 Version; /* FW Version */
+ u8 Subversion; /* FW Subversion, default 0x00 */
+ u16 Rsvd1;
+
+ /* LONG WORD 1 ---- */
+ u8 Month; /* Release time Month field */
+ u8 Date; /* Release time Date field */
+ u8 Hour; /* Release time Hour field */
+ u8 Minute; /* Release time Minute field */
+ __le16 RamCodeSize; /* The size of RAM code */
+ u8 Foundry;
+ u8 Rsvd2;
+
+ /* LONG WORD 2 ---- */
+ __le32 SvnIdx; /* The SVN entry index */
+ u32 Rsvd3;
+
+ /* LONG WORD 3 ---- */
+ u32 Rsvd4;
+ u32 Rsvd5;
+};
+
+#define DRIVER_EARLY_INT_TIME 0x05
+#define BCN_DMA_ATIME_INT_TIME 0x02
+
+enum usb_rx_agg_mode {
+ USB_RX_AGG_DISABLE,
+ USB_RX_AGG_DMA,
+ USB_RX_AGG_USB,
+ USB_RX_AGG_MIX
+};
+
+#define MAX_RX_DMA_BUFFER_SIZE_88E \
+ 0x2400 /* 9k for 88E nornal chip , MaxRxBuff=10k-max(TxReportSize(64*8),
+ * WOLPattern(16*24)) */
+
+#define MAX_TX_REPORT_BUFFER_SIZE 0x0400 /* 1k */
+
+
+/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
+#define MAX_TX_QUEUE 9
+
+#define TX_SELE_HQ BIT(0) /* High Queue */
+#define TX_SELE_LQ BIT(1) /* Low Queue */
+#define TX_SELE_NQ BIT(2) /* Normal Queue */
+
+/* Note: We will divide number of page equally for each queue other
+ * than public queue! */
+/* 22k = 22528 bytes = 176 pages (@page = 128 bytes) */
+/* must reserved about 7 pages for LPS => 176-7 = 169 (0xA9) */
+/* 2*BCN / 1*ps-poll / 1*null-data /1*prob_rsp /1*QOS null-data /1*BT QOS
+ * null-data */
+
+#define TX_TOTAL_PAGE_NUMBER_88E 0xA9/* 169 (21632=> 21k) */
+
+#define TX_PAGE_BOUNDARY_88E (TX_TOTAL_PAGE_NUMBER_88E + 1)
+
+/* Note: For Normal Chip Setting ,modify later */
+#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER \
+ TX_TOTAL_PAGE_NUMBER_88E /* 0xA9 , 0xb0=>176=>22k */
+#define WMM_NORMAL_TX_PAGE_BOUNDARY_88E \
+ (WMM_NORMAL_TX_TOTAL_PAGE_NUMBER + 1) /* 0xA9 */
+
+/* Chip specific */
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
+#define CHIP_BONDING_92C_1T2R 0x1
+#define CHIP_BONDING_88C_USB_MCARD 0x2
+#define CHIP_BONDING_88C_USB_HP 0x1
+#include "HalVerDef.h"
+#include "hal_com.h"
+
+/* Channel Plan */
+enum ChannelPlan {
+ CHPL_FCC = 0,
+ CHPL_IC = 1,
+ CHPL_ETSI = 2,
+ CHPL_SPA = 3,
+ CHPL_FRANCE = 4,
+ CHPL_MKK = 5,
+ CHPL_MKK1 = 6,
+ CHPL_ISRAEL = 7,
+ CHPL_TELEC = 8,
+ CHPL_GLOBAL = 9,
+ CHPL_WORLD = 10,
+};
+
+struct txpowerinfo24g {
+ u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+ u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+ /* If only one tx, only BW20 and OFDM are used. */
+ s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW20_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW40_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+#define EFUSE_REAL_CONTENT_LEN 512
+#define EFUSE_MAX_SECTION 16
+#define EFUSE_IC_ID_OFFSET 506 /* For some inferior IC purpose*/
+#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN)
+/* To prevent out of boundary programming case, */
+/* leave 1byte and program full section */
+/* 9bytes + 1byt + 5bytes and pre 1byte. */
+/* For worst case: */
+/* | 1byte|----8bytes----|1byte|--5bytes--| */
+/* | | Reserved(14bytes) | */
+
+/* PG data exclude header, dummy 6 bytes frome CP test and reserved 1byte. */
+#define EFUSE_OOB_PROTECT_BYTES 15
+
+#define HWSET_MAX_SIZE_88E 512
+
+#define EFUSE_REAL_CONTENT_LEN_88E 256
+#define EFUSE_MAP_LEN_88E 512
+#define EFUSE_MAP_LEN EFUSE_MAP_LEN_88E
+#define EFUSE_MAX_SECTION_88E 64
+#define EFUSE_MAX_WORD_UNIT_88E 4
+#define EFUSE_IC_ID_OFFSET_88E 506
+#define AVAILABLE_EFUSE_ADDR_88E(addr) \
+ (addr < EFUSE_REAL_CONTENT_LEN_88E)
+/* To prevent out of boundary programming case, leave 1byte and program
+ * full section */
+/* 9bytes + 1byt + 5bytes and pre 1byte. */
+/* For worst case: */
+/* | 2byte|----8bytes----|1byte|--7bytes--| 92D */
+/* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte. */
+#define EFUSE_OOB_PROTECT_BYTES_88E 18
+#define EFUSE_PROTECT_BYTES_BANK_88E 16
+
+/* EFUSE for BT definition */
+#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
+#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
+#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
+
+#define EFUSE_PROTECT_BYTES_BANK 16
+
+/* For RTL8723 WiFi/BT/GPS multi-function configuration. */
+enum rt_multi_func {
+ RT_MULTI_FUNC_NONE = 0x00,
+ RT_MULTI_FUNC_WIFI = 0x01,
+ RT_MULTI_FUNC_BT = 0x02,
+ RT_MULTI_FUNC_GPS = 0x04,
+};
+
+/* For RTL8723 regulator mode. */
+enum rt_regulator_mode {
+ RT_SWITCHING_REGULATOR = 0,
+ RT_LDO_REGULATOR = 1,
+};
+
+struct hal_data_8188e {
+ struct HAL_VERSION VersionID;
+ enum rt_multi_func MultiFunc; /* For multi-function consideration. */
+ enum rt_regulator_mode RegulatorMode; /* switching regulator or LDO */
+ u16 CustomerID;
+
+ u16 FirmwareVersion;
+ u16 FirmwareVersionRev;
+ u16 FirmwareSubVersion;
+ u16 FirmwareSignature;
+ u8 PGMaxGroup;
+ /* current WIFI_PHY values */
+ u32 ReceiveConfig;
+ enum wireless_mode CurrentWirelessMode;
+ enum ht_channel_width CurrentChannelBW;
+ u8 CurrentChannel;
+ u8 nCur40MhzPrimeSC;/* Control channel sub-carrier */
+
+ u16 BasicRateSet;
+
+ /* rf_ctrl */
+ u8 rf_chip;
+ u8 rf_type;
+ u8 NumTotalRFPath;
+
+ u8 BoardType;
+
+ /* EEPROM setting. */
+ u16 EEPROMVID;
+ u16 EEPROMPID;
+ u16 EEPROMSVID;
+ u16 EEPROMSDID;
+ u8 EEPROMCustomerID;
+ u8 EEPROMSubCustomerID;
+ u8 EEPROMVersion;
+ u8 EEPROMRegulatory;
+
+ u8 bTXPowerDataReadFromEEPORM;
+ u8 EEPROMThermalMeter;
+ u8 bAPKThermalMeterIgnore;
+
+ bool EepromOrEfuse;
+ /* 92C:256bytes, 88E:512bytes, we use union set (512bytes) */
+ u8 EfuseMap[2][HWSET_MAX_SIZE_512];
+ u8 EfuseUsedPercentage;
+ struct efuse_hal EfuseHal;
+
+ u8 Index24G_CCK_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ u8 Index24G_BW40_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ /* If only one tx, only BW20 and OFDM are used. */
+ s8 CCK_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 OFDM_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW20_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW40_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+
+ u8 TxPwrLevelCck[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* For HT 40MHZ pwr */
+ u8 TxPwrLevelHT40_1S[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* For HT 40MHZ pwr */
+ u8 TxPwrLevelHT40_2S[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* HT 20<->40 Pwr diff */
+ u8 TxPwrHt20Diff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* For HT<->legacy pwr diff */
+ u8 TxPwrLegacyHtDiff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* For power group */
+ u8 PwrGroupHT20[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ u8 PwrGroupHT40[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+
+ u8 LegacyHTTxPowerDiff;/* Legacy to HT rate power diff */
+ /* The current Tx Power Level */
+ u8 CurrentCckTxPwrIdx;
+ u8 CurrentOfdm24GTxPwrIdx;
+ u8 CurrentBW2024GTxPwrIdx;
+ u8 CurrentBW4024GTxPwrIdx;
+
+
+ /* Read/write are allow for following hardware information variables */
+ u8 framesync;
+ u32 framesyncC34;
+ u8 framesyncMonitor;
+ u8 DefaultInitialGain[4];
+ u8 pwrGroupCnt;
+ u32 MCSTxPowerLevelOriginalOffset[MAX_PG_GROUP][16];
+ u32 CCKTxPowerLevelOriginalOffset;
+
+ u8 CrystalCap;
+ u32 AntennaTxPath; /* Antenna path Tx */
+ u32 AntennaRxPath; /* Antenna path Rx */
+ u8 BluetoothCoexist;
+ u8 ExternalPA;
+
+ u8 bLedOpenDrain; /* Open-drain support for controlling the LED.*/
+
+ u8 b1x1RecvCombine; /* for 1T1R receive combining */
+
+ u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
+
+ struct bb_reg_def PHYRegDef[4]; /* Radio A/B/C/D */
+
+ u32 RfRegChnlVal[2];
+
+ /* RDG enable */
+ bool bRDGEnable;
+
+ /* for host message to fw */
+ u8 LastHMEBoxNum;
+
+ u8 fw_ractrl;
+ u8 RegTxPause;
+ /* Beacon function related global variable. */
+ u32 RegBcnCtrlVal;
+ u8 RegFwHwTxQCtrl;
+ u8 RegReg542;
+ u8 RegCR_1;
+
+ struct dm_priv dmpriv;
+ struct odm_dm_struct odmpriv;
+ struct sreset_priv srestpriv;
+
+ u8 CurAntenna;
+ u8 AntDivCfg;
+ u8 TRxAntDivType;
+
+
+ u8 bDumpRxPkt;/* for debug */
+ u8 bDumpTxPkt;/* for debug */
+ u8 FwRsvdPageStartOffset; /* Reserve page start offset except
+ * beacon in TxQ. */
+
+ /* 2010/08/09 MH Add CU power down mode. */
+ bool pwrdown;
+
+ /* Add for dual MAC 0--Mac0 1--Mac1 */
+ u32 interfaceIndex;
+
+ u8 OutEpQueueSel;
+ u8 OutEpNumber;
+
+ /* Add for USB aggreation mode dynamic shceme. */
+ bool UsbRxHighSpeedMode;
+
+ /* 2010/11/22 MH Add for slim combo debug mode selective. */
+ /* This is used for fix the drawback of CU TSMC-A/UMC-A cut.
+ * HW auto suspend ability. Close BT clock. */
+ bool SlimComboDbg;
+
+ u16 EfuseUsedBytes;
+
+#ifdef CONFIG_88EU_P2P
+ struct P2P_PS_Offload_t p2p_ps_offload;
+#endif
+
+ /* Auto FSM to Turn On, include clock, isolation, power control
+ * for MAC only */
+ u8 bMacPwrCtrlOn;
+
+ u32 UsbBulkOutSize;
+
+ /* Interrupt relatd register information. */
+ u32 IntArray[3];/* HISR0,HISR1,HSISR */
+ u32 IntrMask[3];
+ u8 C2hArray[16];
+ u8 UsbTxAggMode;
+ u8 UsbTxAggDescNum;
+ u16 HwRxPageSize; /* Hardware setting */
+ u32 MaxUsbRxAggBlock;
+
+ enum usb_rx_agg_mode UsbRxAggMode;
+ u8 UsbRxAggBlockCount; /* USB Block count. Block size is
+ * 512-byte in high speed and 64-byte
+ * in full speed */
+ u8 UsbRxAggBlockTimeout;
+ u8 UsbRxAggPageCount; /* 8192C DMA page count */
+ u8 UsbRxAggPageTimeout;
+};
+
+#define GET_HAL_DATA(__pAdapter) \
+ ((struct hal_data_8188e *)((__pAdapter)->HalData))
+#define GET_RF_TYPE(priv) (GET_HAL_DATA(priv)->rf_type)
+
+#define INCLUDE_MULTI_FUNC_BT(_Adapter) \
+ (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_BT)
+#define INCLUDE_MULTI_FUNC_GPS(_Adapter) \
+ (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_GPS)
+
+/* rtl8188e_hal_init.c */
+s32 rtl8188e_FirmwareDownload(struct adapter *padapter);
+void _8051Reset88E(struct adapter *padapter);
+void rtl8188e_InitializeFirmwareVars(struct adapter *padapter);
+
+
+s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy);
+
+/* EFuse */
+u8 GetEEPROMSize8188E(struct adapter *padapter);
+void Hal_InitPGData88E(struct adapter *padapter);
+void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo);
+void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+
+void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter,u8 *PROMContent,
+ bool AutoLoadFail);
+void Hal_ReadThermalMeter_88E(struct adapter * dapter, u8 *PROMContent,
+ bool AutoloadFail);
+void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_ReadPowerSavingMode88E(struct adapter *pAdapter, u8 *hwinfo,
+ bool AutoLoadFail);
+
+bool HalDetectPwrDownMode88E(struct adapter *Adapter);
+
+void Hal_InitChannelPlan(struct adapter *padapter);
+void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc);
+
+/* register */
+void SetBcnCtrlReg(struct adapter *padapter, u8 SetBits, u8 ClearBits);
+
+void rtl8188e_clone_haldata(struct adapter *dst, struct adapter *src);
+void rtl8188e_start_thread(struct adapter *padapter);
+void rtl8188e_stop_thread(struct adapter *padapter);
+
+void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int len);
+s32 rtl8188e_iol_efuse_patch(struct adapter *padapter);
+void rtw_cancel_all_timer(struct adapter *padapter);
+void _ps_open_RF(struct adapter *adapt);
+
+#endif /* __RTL8188E_HAL_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_led.h b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
new file mode 100644
index 0000000..c0147e7
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_LED_H__
+#define __RTL8188E_LED_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+/* */
+/* Interface to manipulate LED objects. */
+/* */
+void rtl8188eu_InitSwLeds(struct adapter *padapter);
+void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
+void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
+void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
new file mode 100644
index 0000000..02ccb40
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -0,0 +1,69 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_RECV_H__
+#define __RTL8188E_RECV_H__
+
+#define TX_RPT1_PKT_LEN 8
+
+#define RECV_BLK_SZ 512
+#define RECV_BLK_CNT 16
+#define RECV_BLK_TH RECV_BLK_CNT
+#define RECV_BULK_IN_ADDR 0x80
+#define RECV_INT_IN_ADDR 0x81
+
+#define NR_PREALLOC_RECV_SKB (8)
+
+#define NR_RECVBUFF (4)
+
+#define MAX_RECVBUF_SZ (15360) /* 15k < 16k */
+
+struct phy_stat {
+ unsigned int phydw0;
+ unsigned int phydw1;
+ unsigned int phydw2;
+ unsigned int phydw3;
+ unsigned int phydw4;
+ unsigned int phydw5;
+ unsigned int phydw6;
+ unsigned int phydw7;
+};
+
+/* Rx smooth factor */
+#define Rx_Smooth_Factor (20)
+
+enum rx_packet_type {
+ NORMAL_RX,/* Normal rx packet */
+ TX_REPORT1,/* CCX */
+ TX_REPORT2,/* TX RPT */
+ HIS_REPORT,/* USB HISR RPT */
+};
+
+#define INTERRUPT_MSG_FORMAT_LEN 60
+void rtl8188eu_init_recvbuf(struct adapter *padapter, struct recv_buf *buf);
+s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
+void rtl8188eu_free_recv_priv(struct adapter * padapter);
+void rtl8188eu_recv_hdl(struct adapter * padapter, struct recv_buf *precvbuf);
+void rtl8188eu_recv_tasklet(void *priv);
+void rtl8188e_query_rx_phy_status(union recv_frame *fr, struct phy_stat *phy);
+void rtl8188e_process_phy_info(struct adapter * padapter, void *prframe);
+void update_recvframe_phyinfo_88e(union recv_frame *fra, struct phy_stat *phy);
+void update_recvframe_attrib_88e(union recv_frame *fra, struct recv_stat *stat);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_rf.h b/drivers/staging/rtl8188eu/include/rtl8188e_rf.h
new file mode 100644
index 0000000..10fc356
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_rf.h
@@ -0,0 +1,36 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_RF_H__
+#define __RTL8188E_RF_H__
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+#define RF6052_MAX_PATH 2
+
+
+int PHY_RF6052_Config8188E(struct adapter *Adapter);
+void rtl8188e_RF_ChangeTxPath(struct adapter *Adapter, u16 DataRate);
+void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
+ enum ht_channel_width Bandwidth);
+void rtl8188e_PHY_RF6052SetCckTxPower(struct adapter *Adapter, u8 *level);
+void rtl8188e_PHY_RF6052SetOFDMTxPower(struct adapter *Adapter, u8 *ofdm,
+ u8 *pwrbw20, u8 *pwrbw40, u8 channel);
+
+#endif/* __RTL8188E_RF_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
new file mode 100644
index 0000000..c12c56b9
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -0,0 +1,1439 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *******************************************************************************/
+#ifndef __RTL8188E_SPEC_H__
+#define __RTL8188E_SPEC_H__
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+/* 8192C Regsiter offset definition */
+
+#define HAL_PS_TIMER_INT_DELAY 50 /* 50 microseconds */
+#define HAL_92C_NAV_UPPER_UNIT 128 /* micro-second */
+
+#define MAC_ADDR_LEN 6
+/* 8188E PKT_BUFF_ACCESS_CTRL value */
+#define TXPKT_BUF_SELECT 0x69
+#define RXPKT_BUF_SELECT 0xA5
+#define DISABLE_TRXPKT_BUF_ACCESS 0x0
+
+
+/* 0x0000h ~ 0x00FFh System Configuration */
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+#define REG_AFE_PLL_CTRL 0x0028
+#define REG_APE_PLL_CTRL_EXT 0x002c
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+#define REG_GPIO_PIN_CTRL_2 0x0060 /* RTL8723 WIFI/BT/GPS
+ * Multi-Function GPIO Pin Control. */
+#define REG_GPIO_IO_SEL_2 0x0062 /* RTL8723 WIFI/BT/GPS
+ * Multi-Function GPIO Select. */
+#define REG_BB_PAD_CTRL 0x0064
+#define REG_MULTI_FUNC_CTRL 0x0068 /* RTL8723 WIFI/BT/GPS
+ * Multi-Function control source. */
+#define REG_GPIO_OUTPUT 0x006c
+#define REG_AFE_XTAL_CTRL_EXT 0x0078 /* RTL8188E */
+#define REG_XCK_OUT_CTRL 0x007c /* RTL8188E */
+#define REG_MCUFWDL 0x0080
+#define REG_WOL_EVENT 0x0081 /* RTL8188E */
+#define REG_MCUTSTCFG 0x0084
+#define REG_HMEBOX_E0 0x0088
+#define REG_HMEBOX_E1 0x008A
+#define REG_HMEBOX_E2 0x008C
+#define REG_HMEBOX_E3 0x008E
+#define REG_HMEBOX_EXT_0 0x01F0
+#define REG_HMEBOX_EXT_1 0x01F4
+#define REG_HMEBOX_EXT_2 0x01F8
+#define REG_HMEBOX_EXT_3 0x01FC
+#define REG_HIMR_88E 0x00B0
+#define REG_HISR_88E 0x00B4
+#define REG_HIMRE_88E 0x00B8
+#define REG_HISRE_88E 0x00BC
+#define REG_EFUSE_ACCESS 0x00CF /* Efuse access protection
+ * for RTL8723 */
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only. */
+#define REG_TYPE_ID 0x00FC
+
+#define REG_MAC_PHY_CTRL_NORMAL 0x00f8
+
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+/* define REG_HIMR 0x0120 */
+/* define REG_HISR 0x0124 */
+#define REG_HIMRE 0x0128
+#define REG_HISRE 0x012C
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FTIMR 0x0138
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_ADDR (REG_PKTBUF_DBG_CTRL)
+#define REG_RXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+2)
+#define REG_TXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+3)
+#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_32K_CTRL 0x0194 /* RTL8188E */
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_CLEAR 0x01AF
+#define REG_MCUTST_1 0x01c0
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+
+#define REG_LLT_INIT 0x01E0
+
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_RXPKT_NUM 0x0284
+#define REG_RXDMA_STATUS 0x0288
+
+/* 0x0300h ~ 0x03FFh PCIe */
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304 /* Interrupt Migration */
+#define REG_BCNQ_DESA 0x0308 /* TX Beacon Descr Address */
+#define REG_HQ_DESA 0x0310 /* TX High Queue Descr Addr */
+#define REG_MGQ_DESA 0x0318 /* TX Manage Queue Descr Addr*/
+#define REG_VOQ_DESA 0x0320 /* TX VO Queue Descr Addr */
+#define REG_VIQ_DESA 0x0328 /* TX VI Queue Descr Addr */
+#define REG_BEQ_DESA 0x0330 /* TX BE Queue Descr Addr */
+#define REG_BKQ_DESA 0x0338 /* TX BK Queue Descr Addr */
+#define REG_RX_DESA 0x0340 /* RX Queue Descr Addr */
+#define REG_MDIO 0x0354 /* MDIO for Access PCIE PHY */
+#define REG_DBG_SEL 0x0360 /* Debug Selection Register */
+#define REG_PCIE_HRPWM 0x0361 /* PCIe RPWM */
+#define REG_PCIE_HCPWM 0x0363 /* PCIe CPWM */
+#define REG_WATCH_DOG 0x0368
+
+/* RTL8723 series ------------------------------ */
+#define REG_PCIE_HISR 0x03A0
+
+/* spec version 11 */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+#define REG_TXPKT_EMPTY 0x041A
+
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_LIFETIME_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x0448
+#define REG_ARFR2 0x044C
+#define REG_ARFR3 0x0450
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+/* define REG_INIDATA_RATE_SEL 0x0484 */
+#define REG_POWER_STATUS 0x04A4
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_VO_VI_LIFE_TIME 0x04C0
+#define REG_PKT_BE_BK_LIFE_TIME 0x04C2
+#define REG_STBC_SETTING 0x04C4
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_MAX_AGGR_NUM 0x04CA
+#define REG_RTS_MAX_AGGR_NUM 0x04CB
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_EARLY_MODE_CONTROL 0x4D0
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_TX_RPT_CTRL 0x04EC
+#define REG_TX_RPT_TIME 0x04F0 /* 2 byte */
+#define REG_DUMMY 0x04FC
+
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_TSFTR_SYN_OFFSET 0x0518
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+/* Format for offset 540h-542h: */
+/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
+ * beacon content before TBTT. */
+/* [7:4]: Reserved. */
+/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
+ * to send the beacon packet. */
+/* [23:20]: Reserved */
+/* Description: */
+/* | */
+/* |<--Setup--|--Hold------------>| */
+/* --------------|---------------------- */
+/* | */
+/* TBTT */
+/* Note: We cannot update beacon content to HW or send any AC packets during
+ * the time between Setup and Hold. */
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_BCN_CTRL_1 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define REG_BCN_INTERVAL 0x0554
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_TSFTR1 0x0568
+#define REG_ATIMWND_1 0x0570
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+
+/* define REG_FW_TSF_SYNC_CNT 0x04A0 */
+#define REG_FW_RESET_TSF_CNT_1 0x05FC
+#define REG_FW_RESET_TSF_CNT_0 0x05FD
+#define REG_FW_BCN_DIS_CNT 0x05FE
+
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+
+/* 20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
+/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS 0x063C
+/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS 0x063E
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+/* RXERR_RPT */
+#define RXERR_TYPE_OFDM_PPDU 0
+#define RXERR_TYPE_OFDM_false_ALARM 1
+#define RXERR_TYPE_OFDM_MPDU_OK 2
+#define RXERR_TYPE_OFDM_MPDU_FAIL 3
+#define RXERR_TYPE_CCK_PPDU 4
+#define RXERR_TYPE_CCK_false_ALARM 5
+#define RXERR_TYPE_CCK_MPDU_OK 6
+#define RXERR_TYPE_CCK_MPDU_FAIL 7
+#define RXERR_TYPE_HT_PPDU 8
+#define RXERR_TYPE_HT_false_ALARM 9
+#define RXERR_TYPE_HT_MPDU_TOTAL 10
+#define RXERR_TYPE_HT_MPDU_OK 11
+#define RXERR_TYPE_HT_MPDU_FAIL 12
+#define RXERR_TYPE_RX_FULL_DROP 15
+
+#define RXERR_COUNTER_MASK 0xFFFFF
+#define RXERR_RPT_RST BIT(27)
+#define _RXERR_RPT_SEL(type) ((type) << 28)
+
+/* Note: */
+/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
+ * The default value is always too small, but the WiFi TestPlan test
+ * by 25,000 microseconds of NAV through sending CTS in the air.
+ * We must update this value greater than 25,000 microseconds to pass
+ * the item. The offset of NAV_UPPER in 8192C Spec is incorrect, and
+ * the offset should be 0x0652. */
+#define REG_NAV_UPPER 0x0652 /* unit of 128 */
+
+/* WMA, BA, CCX */
+/* define REG_NAV_CTRL 0x0650 */
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_WMAC_TRXPTCL_CTL 0x0668
+
+/* Security */
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+/* Power */
+#define REG_WOW_CTRL 0x0690
+#define REG_PS_RX_INFO 0x0692
+#define REG_UAPSD_TID 0x0693
+#define REG_WKFMCAM_CMD 0x0698
+#define REG_WKFMCAM_NUM_88E 0x698
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_BT_COEX_TABLE 0x06C0
+
+/* Hardware Port 2 */
+#define REG_MACID1 0x0700
+#define REG_BSSID1 0x0708
+
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+/* For normal chip */
+#define REG_NORMAL_SIE_VID 0xFE60 /* 0xFE60~0xFE61 */
+#define REG_NORMAL_SIE_PID 0xFE62 /* 0xFE62~0xFE63 */
+#define REG_NORMAL_SIE_OPTIONAL 0xFE64
+#define REG_NORMAL_SIE_EP 0xFE65 /* 0xFE65~0xFE67 */
+#define REG_NORMAL_SIE_PHY 0xFE68 /* 0xFE68~0xFE6B */
+#define REG_NORMAL_SIE_OPTIONAL2 0xFE6C
+#define REG_NORMAL_SIE_GPS_EP 0xFE6D /* 0xFE6D, for RTL8723 only. */
+#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
+#define REG_NORMAL_SIE_STRING 0xFE80 /* 0xFE80~0xFEDF */
+
+/* TODO: use these definition when using REG_xxx naming rule. */
+/* NOTE: DO NOT Remove these definition. Use later. */
+
+#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
+#define EFUSE_TEST REG_EFUSE_TEST /* E-Fuse Test. */
+#define MSR (REG_CR + 2) /* Media Status reg */
+#define ISR REG_HISR_88E
+/* Timing Sync Function Timer Register. */
+#define TSFR REG_TSFTR
+
+#define PBP REG_PBP
+
+/* Redifine MACID register, to compatible prior ICs. */
+/* MAC ID Register, Offset 0x0050-0x0053 */
+#define IDR0 REG_MACID
+/* MAC ID Register, Offset 0x0054-0x0055 */
+#define IDR4 (REG_MACID + 4)
+
+/* 9. Security Control Registers (Offset: ) */
+/* IN 8190 Data Sheet is called CAMcmd */
+#define RWCAM REG_CAMCMD
+/* Software write CAM input content */
+#define WCAMI REG_CAMWRITE
+/* Software read/write CAM config */
+#define RCAMO REG_CAMREAD
+#define CAMDBG REG_CAMDBG
+/* Security Configuration Register */
+#define SECR REG_SECCFG
+
+/* Unused register */
+#define UnusedRegister 0x1BF
+#define DCAM UnusedRegister
+#define PSR UnusedRegister
+#define BBAddr UnusedRegister
+#define PhyDataR UnusedRegister
+
+/* Min Spacing related settings. */
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+/* EEPROM enable when set 1 */
+#define CmdEEPROM_En BIT5
+/* System EEPROM select, 0: boot from E-FUSE, 1: The EEPROM used is 9346 */
+#define CmdEERPOMSEL BIT4
+#define Cmd9346CR_9356SEL BIT4
+
+/* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
+#define GPIOSEL_GPIO 0
+#define GPIOSEL_ENBT BIT5
+
+/* 8192C GPIO PIN Control Register (offset 0x44, 4 byte) */
+/* GPIO pins input value */
+#define GPIO_IN REG_GPIO_PIN_CTRL
+/* GPIO pins output value */
+#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
+/* GPIO pins output enable when a bit is set to "1"; otherwise,
+ * input is configured. */
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
+
+/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+#define HSIMR_GPIO12_0_INT_EN BIT0
+#define HSIMR_SPS_OCP_INT_EN BIT5
+#define HSIMR_RON_INT_EN BIT6
+#define HSIMR_PDN_INT_EN BIT7
+#define HSIMR_GPIO9_INT_EN BIT25
+
+/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
+#define HSISR_GPIO12_0_INT BIT0
+#define HSISR_SPS_OCP_INT BIT5
+#define HSISR_RON_INT_EN BIT6
+#define HSISR_PDNINT BIT7
+#define HSISR_GPIO9_INT BIT25
+
+/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
+/*
+Network Type
+00: No link
+01: Link in ad hoc network
+10: Link in infrastructure network
+11: AP mode
+Default: 00b.
+*/
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+/* 88EU (MSR) Media Status Register (Offset 0x4C, 8 bits) */
+#define USB_INTR_CONTENT_C2H_OFFSET 0
+#define USB_INTR_CONTENT_CPWM1_OFFSET 16
+#define USB_INTR_CONTENT_CPWM2_OFFSET 20
+#define USB_INTR_CONTENT_HISR_OFFSET 48
+#define USB_INTR_CONTENT_HISRE_OFFSET 52
+
+/* 88E Driver Initialization Offload REG_FDHM0(Offset 0x88, 8 bits) */
+/* IOL config for REG_FDHM0(Reg0x88) */
+#define CMD_INIT_LLT BIT0
+#define CMD_READ_EFUSE_MAP BIT1
+#define CMD_EFUSE_PATCH BIT2
+#define CMD_IOCONFIG BIT3
+#define CMD_INIT_LLT_ERR BIT4
+#define CMD_READ_EFUSE_MAP_ERR BIT5
+#define CMD_EFUSE_PATCH_ERR BIT6
+#define CMD_IOCONFIG_ERR BIT7
+
+/* 6. Adaptive Control Registers (Offset: 0x0160 - 0x01CF) */
+/* 8192C Response Rate Set Register (offset 0x181, 24bits) */
+#define RRSR_1M BIT0
+#define RRSR_2M BIT1
+#define RRSR_5_5M BIT2
+#define RRSR_11M BIT3
+#define RRSR_6M BIT4
+#define RRSR_9M BIT5
+#define RRSR_12M BIT6
+#define RRSR_18M BIT7
+#define RRSR_24M BIT8
+#define RRSR_36M BIT9
+#define RRSR_48M BIT10
+#define RRSR_54M BIT11
+#define RRSR_MCS0 BIT12
+#define RRSR_MCS1 BIT13
+#define RRSR_MCS2 BIT14
+#define RRSR_MCS3 BIT15
+#define RRSR_MCS4 BIT16
+#define RRSR_MCS5 BIT17
+#define RRSR_MCS6 BIT18
+#define RRSR_MCS7 BIT19
+
+/* 8192C Response Rate Set Register (offset 0x1BF, 8bits) */
+/* WOL bit information */
+#define HAL92C_WOL_PTK_UPDATE_EVENT BIT0
+#define HAL92C_WOL_GTK_UPDATE_EVENT BIT1
+
+/* 8192C BW_OPMODE bits (Offset 0x203, 8bit) */
+#define BW_OPMODE_20MHZ BIT2
+#define BW_OPMODE_5G BIT1
+
+/* 8192C CAM Config Setting (offset 0x250, 1 byte) */
+#define CAM_VALID BIT15
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT5
+
+#define CAM_CONTENT_COUNT 8
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+#define CAM_SMS4 0x6
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_CONFIG_USEDK true
+#define CAM_CONFIG_NO_USEDK false
+
+#define CAM_WRITE BIT16
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT31
+
+#define SCR_UseDK 0x01
+#define SCR_TxSecEnable 0x02
+#define SCR_RxSecEnable 0x04
+
+/* 10. Power Save Control Registers (Offset: 0x0260 - 0x02DF) */
+#define WOW_PMEN BIT0 /* Power management Enable. */
+#define WOW_WOMEN BIT1 /* WoW function on or off. */
+#define WOW_MAGIC BIT2 /* Magic packet */
+#define WOW_UWF BIT3 /* Unicast Wakeup frame. */
+
+/* 12. Host Interrupt Status Registers (Offset: 0x0300 - 0x030F) */
+/* 8188 IMR/ISR bits */
+#define IMR_DISABLED_88E 0x0
+/* IMR DW0(0x0060-0063) Bit 0-31 */
+#define IMR_TXCCK_88E BIT30 /* TXRPT interrupt when CCX bit of the packet is set */
+#define IMR_PSTIMEOUT_88E BIT29 /* Power Save Time Out Interrupt */
+#define IMR_GTINT4_88E BIT28 /* When GTIMER4 expires, this bit is set to 1 */
+#define IMR_GTINT3_88E BIT27 /* When GTIMER3 expires, this bit is set to 1 */
+#define IMR_TBDER_88E BIT26 /* Transmit Beacon0 Error */
+#define IMR_TBDOK_88E BIT25 /* Transmit Beacon0 OK */
+#define IMR_TSF_BIT32_TOGGLE_88E BIT24 /* TSF Timer BIT32 toggle indication interrupt */
+#define IMR_BCNDMAINT0_88E BIT20 /* Beacon DMA Interrupt 0 */
+#define IMR_BCNDERR0_88E BIT16 /* Beacon Queue DMA Error 0 */
+#define IMR_HSISR_IND_ON_INT_88E BIT15 /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
+#define IMR_BCNDMAINT_E_88E BIT14 /* Beacon DMA Interrupt Extension for Win7 */
+#define IMR_ATIMEND_88E BIT12 /* CTWidnow End or ATIM Window End */
+#define IMR_HISR1_IND_INT_88E BIT11 /* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1) */
+#define IMR_C2HCMD_88E BIT10 /* CPU to Host Command INT Status, Write 1 clear */
+#define IMR_CPWM2_88E BIT9 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_CPWM_88E BIT8 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_HIGHDOK_88E BIT7 /* High Queue DMA OK */
+#define IMR_MGNTDOK_88E BIT6 /* Management Queue DMA OK */
+#define IMR_BKDOK_88E BIT5 /* AC_BK DMA OK */
+#define IMR_BEDOK_88E BIT4 /* AC_BE DMA OK */
+#define IMR_VIDOK_88E BIT3 /* AC_VI DMA OK */
+#define IMR_VODOK_88E BIT2 /* AC_VO DMA OK */
+#define IMR_RDU_88E BIT1 /* Rx Descriptor Unavailable */
+#define IMR_ROK_88E BIT0 /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define IMR_BCNDMAINT7_88E BIT27 /* Beacon DMA Interrupt 7 */
+#define IMR_BCNDMAINT6_88E BIT26 /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5_88E BIT25 /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4_88E BIT24 /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3_88E BIT23 /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2_88E BIT22 /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1_88E BIT21 /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDERR7_88E BIT20 /* Beacon DMA Error Int 7 */
+#define IMR_BCNDERR6_88E BIT19 /* Beacon DMA Error Int 6 */
+#define IMR_BCNDERR5_88E BIT18 /* Beacon DMA Error Int 5 */
+#define IMR_BCNDERR4_88E BIT17 /* Beacon DMA Error Int 4 */
+#define IMR_BCNDERR3_88E BIT16 /* Beacon DMA Error Int 3 */
+#define IMR_BCNDERR2_88E BIT15 /* Beacon DMA Error Int 2 */
+#define IMR_BCNDERR1_88E BIT14 /* Beacon DMA Error Int 1 */
+#define IMR_ATIMEND_E_88E BIT13 /* ATIM Window End Ext for Win7 */
+#define IMR_TXERR_88E BIT11 /* Tx Err Flag Int Status, write 1 clear. */
+#define IMR_RXERR_88E BIT10 /* Rx Err Flag INT Status, Write 1 clear */
+#define IMR_TXFOVW_88E BIT9 /* Transmit FIFO Overflow */
+#define IMR_RXFOVW_88E BIT8 /* Receive FIFO Overflow */
+
+#define HAL_NIC_UNPLUG_ISR 0xFFFFFFFF /* The value when the NIC is unplugged for PCI. */
+
+/* 8192C EFUSE */
+#define HWSET_MAX_SIZE 256
+#define HWSET_MAX_SIZE_88E 512
+
+/*===================================================================
+=====================================================================
+Here the register defines are for 92C. When the define is as same with 92C,
+we will use the 92C's define for the consistency
+So the following defines for 92C is not entire!!!!!!
+=====================================================================
+=====================================================================*/
+/*
+Based on Datasheet V33---090401
+Register Summary
+Current IOREG MAP
+0x0000h ~ 0x00FFh System Configuration (256 Bytes)
+0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
+0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
+0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
+0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
+0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
+0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
+0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
+0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
+*/
+/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
+/* Note: */
+/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
+ * RTL8192S/RTL8192C are wrong, */
+/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
+ * and BK - Bit3. */
+/* 8723 and 88E may be not correct either in the earlier version. */
+#define StopBecon BIT6
+#define StopHigh BIT5
+#define StopMgt BIT4
+#define StopBK BIT3
+#define StopBE BIT2
+#define StopVI BIT1
+#define StopVO BIT0
+
+/* 8192C (RCR) Receive Configuration Register(Offset 0x608, 32 bits) */
+#define RCR_APPFCS BIT31 /* WMAC append FCS after payload */
+#define RCR_APP_MIC BIT30
+#define RCR_APP_PHYSTS BIT28
+#define RCR_APP_ICV BIT29
+#define RCR_APP_PHYST_RXFF BIT28
+#define RCR_APP_BA_SSN BIT27 /* Accept BA SSN */
+#define RCR_ENMBID BIT24 /* Enable Multiple BssId. */
+#define RCR_LSIGEN BIT23
+#define RCR_MFBEN BIT22
+#define RCR_HTC_LOC_CTRL BIT14 /* MFC<--HTC=1 MFC-->HTC=0 */
+#define RCR_AMF BIT13 /* Accept management type frame */
+#define RCR_ACF BIT12 /* Accept control type frame */
+#define RCR_ADF BIT11 /* Accept data type frame */
+#define RCR_AICV BIT9 /* Accept ICV error packet */
+#define RCR_ACRC32 BIT8 /* Accept CRC32 error packet */
+#define RCR_CBSSID_BCN BIT7 /* Accept BSSID match packet
+ * (Rx beacon, probe rsp) */
+#define RCR_CBSSID_DATA BIT6 /* Accept BSSID match (Data)*/
+#define RCR_CBSSID RCR_CBSSID_DATA /* Accept BSSID match */
+#define RCR_APWRMGT BIT5 /* Accept power management pkt*/
+#define RCR_ADD3 BIT4 /* Accept address 3 match pkt */
+#define RCR_AB BIT3 /* Accept broadcast packet */
+#define RCR_AM BIT2 /* Accept multicast packet */
+#define RCR_APM BIT1 /* Accept physical match pkt */
+#define RCR_AAP BIT0 /* Accept all unicast packet */
+#define RCR_MXDMA_OFFSET 8
+#define RCR_FIFO_OFFSET 13
+
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_USB_HRPWM 0xFE58
+#define REG_USB_HCPWM 0xFE57
+/* 8192C Regsiter Bit and Content definition */
+/* 0x0000h ~ 0x00FFh System Configuration */
+
+/* 2 SYS_ISO_CTRL */
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+#define PWC_EV12V BIT(15)
+
+/* 2 SYS_FUNC_EN */
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTn BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_DIO_RF BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+/* 2 APS_FSMCO */
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define EnPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+/* 2 SYS_CLKR */
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+
+/* 2 9346CR */
+
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROM_EN BIT(5)
+
+/* 2 SPS0_CTRL */
+
+/* 2 SPS_OCP_CFG */
+
+/* 2 RF_CTRL */
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+/* 2 LDOV12D_CTRL */
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+/* 2EFUSE_CTRL */
+#define ALD_EN BIT(18)
+#define EF_PD BIT(19)
+#define EF_FLAG BIT(31)
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EF_TRPT BIT(7)
+/* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
+#define EF_CELL_SEL (BIT(8)|BIT(9))
+#define LDOE25_EN BIT(31)
+#define EFUSE_SEL(x) (((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK 0x300
+#define EFUSE_WIFI_SEL_0 0x0
+#define EFUSE_BT_SEL_0 0x1
+#define EFUSE_BT_SEL_1 0x2
+#define EFUSE_BT_SEL_2 0x3
+
+#define EFUSE_ACCESS_ON 0x69 /* For RTL8723 only. */
+#define EFUSE_ACCESS_OFF 0x00 /* For RTL8723 only. */
+
+/* 2 8051FWDL */
+/* 2 MCUFWDL */
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_ChkSum_rpt BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define RAM_DL_SEL BIT(7) /* 1:RAM, 0:ROM */
+#define ROM_DLEN BIT(19)
+#define CPRST BIT(23)
+
+/* 2 REG_SYS_CFG */
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define SW_OFFLOAD_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define CHIP_VER (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23) /* RTL ID */
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define CHIP_VER_RTL_MASK 0xF000 /* Bit 12 ~ 15 */
+#define CHIP_VER_RTL_SHIFT 12
+
+/* 2REG_GPIO_OUTSTS (For RTL8723 only) */
+#define EFS_HCI_SEL (BIT(0)|BIT(1))
+#define PAD_HCI_SEL (BIT(2)|BIT(3))
+#define HCI_SEL (BIT(4)|BIT(5))
+#define PKG_SEL_HCI BIT(6)
+#define FEN_GPS BIT(7)
+#define FEN_BT BIT(8)
+#define FEN_WL BIT(9)
+#define FEN_PCI BIT(10)
+#define FEN_USB BIT(11)
+#define BTRF_HWPDN_N BIT(12)
+#define WLRF_HWPDN_N BIT(13)
+#define PDN_BT_N BIT(14)
+#define PDN_GPS_N BIT(15)
+#define BT_CTL_HWPDN BIT(16)
+#define GPS_CTL_HWPDN BIT(17)
+#define PPHY_SUSB BIT(20)
+#define UPHY_SUSB BIT(21)
+#define PCI_SUSEN BIT(22)
+#define USB_SUSEN BIT(23)
+#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
+
+/* 2SYS_CFG */
+#define RTL_ID BIT(23) /* TestChip ID, 1:Test(RLE); 0:MP(RL) */
+
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+
+/* 2 Function Enable Registers */
+/* 2 CR */
+
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+#define CALTMR_EN BIT(10) /* 32k CAL TMR enable */
+
+/* Network type */
+#define _NETTYPE(x) (((x) & 0x3) << 16)
+#define MASK_NETTYPE 0x30000
+#define NT_NO_LINK 0x0
+#define NT_LINK_AD_HOC 0x1
+#define NT_LINK_AP 0x2
+#define NT_AS_AP 0x3
+
+/* 2 PBP - Page Size Register */
+#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
+#define _PSRX_MASK 0xF
+#define _PSTX_MASK 0xF0
+#define _PSRX(x) (x)
+#define _PSTX(x) ((x) << 4)
+
+#define PBP_64 0x0
+#define PBP_128 0x1
+#define PBP_256 0x2
+#define PBP_512 0x3
+#define PBP_1024 0x4
+
+/* 2 TX/RXDMA */
+#define RXDMA_ARBBW_EN BIT(0)
+#define RXSHFT_EN BIT(1)
+#define RXDMA_AGG_EN BIT(2)
+#define QS_VO_QUEUE BIT(8)
+#define QS_VI_QUEUE BIT(9)
+#define QS_BE_QUEUE BIT(10)
+#define QS_BK_QUEUE BIT(11)
+#define QS_MANAGER_QUEUE BIT(12)
+#define QS_HIGH_QUEUE BIT(13)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+/* For normal driver, 0x10C */
+#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8 )
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6 )
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4 )
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+/* 2 TRXFF_BNDY */
+
+/* 2 LLT_INIT */
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* 2RQPN */
+#define _HPQ(x) ((x) & 0xFF)
+#define _LPQ(x) (((x) & 0xFF) << 8)
+#define _PUBQ(x) (((x) & 0xFF) << 16)
+/* NOTE: in RQPN_NPQ register */
+#define _NPQ(x) ((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS BIT(24)
+#define LPQ_PUBLIC_DIS BIT(25)
+#define LD_RQPN BIT(31)
+
+/* 2TDECTRL */
+#define BCN_VALID BIT(16)
+#define BCN_HEAD(x) (((x) & 0xFF) << 8)
+#define BCN_HEAD_MASK 0xFF00
+
+/* 2 TDECTL */
+#define BLK_DESC_NUM_SHIFT 4
+#define BLK_DESC_NUM_MASK 0xF
+
+/* 2 TXDMA_OFFSET_CHK */
+#define DROP_DATA_EN BIT(9)
+
+/* 0x0280h ~ 0x028Bh RX DMA Configuration */
+
+/* REG_RXDMA_CONTROL, 0x0286h */
+
+/* 2 REG_RXPKT_NUM, 0x0284 */
+#define RXPKT_RELEASE_POLL BIT(16)
+#define RXDMA_IDLE BIT(17)
+#define RW_RELEASE_EN BIT(18)
+
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* 2 FWHW_TXQ_CTRL */
+#define EN_AMPDU_RTY_NEW BIT(7)
+
+/* 2 SPEC SIFS */
+#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
+
+/* 2 RL */
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+
+/* 2 EDCA setting */
+#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+#define _LRL(x) ((x) & 0x3F)
+#define _SRL(x) (((x) & 0x3F) << 8)
+
+/* 2 BCN_CTRL */
+#define EN_MBSSID BIT(1)
+#define EN_TXBCN_RPT BIT(2)
+#define EN_BCN_FUNCTION BIT(3)
+#define DIS_TSF_UPDATE BIT(3)
+
+/* The same function but different bit field. */
+#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
+#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
+#define STOP_BCNQ BIT(6)
+
+/* 2 ACMHWCTRL */
+#define AcmHw_HwEn BIT(0)
+#define AcmHw_BeqEn BIT(1)
+#define AcmHw_ViqEn BIT(2)
+#define AcmHw_VoqEn BIT(3)
+#define AcmHw_BeqStatus BIT(4)
+#define AcmHw_ViqStatus BIT(5)
+#define AcmHw_VoqStatus BIT(6)
+
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* 2APSD_CTRL */
+#define APSDOFF BIT(6)
+#define APSDOFF_STATUS BIT(7)
+
+#define RATE_BITMAP_ALL 0xFFFFF
+
+/* Only use CCK 1M rate for ACK */
+#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
+
+/* 2 TCR */
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+/* 2 RCR */
+#define AAP BIT(0)
+#define APM BIT(1)
+#define AM BIT(2)
+#define AB BIT(3)
+#define ADD3 BIT(4)
+#define APWRMGT BIT(5)
+#define CBSSID BIT(6)
+#define CBSSID_DATA BIT(6)
+#define CBSSID_BCN BIT(7)
+#define ACRC32 BIT(8)
+#define AICV BIT(9)
+#define ADF BIT(11)
+#define ACF BIT(12)
+#define AMF BIT(13)
+#define HTC_LOC_CTRL BIT(14)
+#define UC_DATA_EN BIT(16)
+#define BM_DATA_EN BIT(17)
+#define MFBEN BIT(22)
+#define LSIGEN BIT(23)
+#define EnMBID BIT(24)
+#define APP_BASSN BIT(27)
+#define APP_PHYSTS BIT(28)
+#define APP_ICV BIT(29)
+#define APP_MIC BIT(30)
+#define APP_FCS BIT(31)
+
+/* 2 SECCFG */
+#define SCR_TxUseDK BIT(0) /* Force Tx Use Default Key */
+#define SCR_RxUseDK BIT(1) /* Force Rx Use Default Key */
+#define SCR_TxEncEnable BIT(2) /* Enable Tx Encryption */
+#define SCR_RxDecEnable BIT(3) /* Enable Rx Decryption */
+#define SCR_SKByA2 BIT(4) /* Search kEY BY A2 */
+#define SCR_NoSKMC BIT(5) /* No Key Search Multicast */
+#define SCR_TXBCUSEDK BIT(6) /* Force Tx Bcast pkt Use Default Key */
+#define SCR_RXBCUSEDK BIT(7) /* Force Rx Bcast pkt Use Default Key */
+
+/* RTL8188E SDIO Configuration */
+
+/* I/O bus domain address mapping */
+#define SDIO_LOCAL_BASE 0x10250000
+#define WLAN_IOREG_BASE 0x10260000
+#define FIRMWARE_FIFO_BASE 0x10270000
+#define TX_HIQ_BASE 0x10310000
+#define TX_MIQ_BASE 0x10320000
+#define TX_LOQ_BASE 0x10330000
+#define RX_RX0FF_BASE 0x10340000
+
+/* SDIO host local register space mapping. */
+#define SDIO_LOCAL_MSK 0x0FFF
+#define WLAN_IOREG_MSK 0x7FFF
+#define WLAN_FIFO_MSK 0x1FFF /* Aggregation Length[12:0] */
+#define WLAN_RX0FF_MSK 0x0003
+
+/* Without ref to the SDIO Device ID */
+#define SDIO_WITHOUT_REF_DEVICE_ID 0
+#define SDIO_LOCAL_DEVICE_ID 0 /* 0b[16], 000b[15:13] */
+#define WLAN_TX_HIQ_DEVICE_ID 4 /* 0b[16], 100b[15:13] */
+#define WLAN_TX_MIQ_DEVICE_ID 5 /* 0b[16], 101b[15:13] */
+#define WLAN_TX_LOQ_DEVICE_ID 6 /* 0b[16], 110b[15:13] */
+#define WLAN_RX0FF_DEVICE_ID 7 /* 0b[16], 111b[15:13] */
+#define WLAN_IOREG_DEVICE_ID 8 /* 1b[16] */
+
+/* SDIO Tx Free Page Index */
+#define HI_QUEUE_IDX 0
+#define MID_QUEUE_IDX 1
+#define LOW_QUEUE_IDX 2
+#define PUBLIC_QUEUE_IDX 3
+
+#define SDIO_MAX_TX_QUEUE 3 /* HIQ, MIQ and LOQ */
+#define SDIO_MAX_RX_QUEUE 1
+
+/* SDIO Tx Control */
+#define SDIO_REG_TX_CTRL 0x0000
+/* SDIO Host Interrupt Mask */
+#define SDIO_REG_HIMR 0x0014
+/* SDIO Host Interrupt Service Routine */
+#define SDIO_REG_HISR 0x0018
+/* HCI Current Power Mode */
+#define SDIO_REG_HCPWM 0x0019
+/* RXDMA Request Length */
+#define SDIO_REG_RX0_REQ_LEN 0x001C
+/* Free Tx Buffer Page */
+#define SDIO_REG_FREE_TXPG 0x0020
+/* HCI Current Power Mode 1 */
+#define SDIO_REG_HCPWM1 0x0024
+/* HCI Current Power Mode 2 */
+#define SDIO_REG_HCPWM2 0x0026
+/* HTSF Informaion */
+#define SDIO_REG_HTSFR_INFO 0x0030
+/* HCI Request Power Mode 1 */
+#define SDIO_REG_HRPWM1 0x0080
+/* HCI Request Power Mode 2 */
+#define SDIO_REG_HRPWM2 0x0082
+/* HCI Power Save Clock */
+#define SDIO_REG_HPS_CLKR 0x0084
+/* SDIO HCI Suspend Control */
+#define SDIO_REG_HSUS_CTRL 0x0086
+/* SDIO Host Extension Interrupt Mask Always */
+#define SDIO_REG_HIMR_ON 0x0090
+/* SDIO Host Extension Interrupt Status Always */
+#define SDIO_REG_HISR_ON 0x0091
+
+#define SDIO_HIMR_DISABLED 0
+
+/* RTL8188E SDIO Host Interrupt Mask Register */
+#define SDIO_HIMR_RX_REQUEST_MSK BIT0
+#define SDIO_HIMR_AVAL_MSK BIT1
+#define SDIO_HIMR_TXERR_MSK BIT2
+#define SDIO_HIMR_RXERR_MSK BIT3
+#define SDIO_HIMR_TXFOVW_MSK BIT4
+#define SDIO_HIMR_RXFOVW_MSK BIT5
+#define SDIO_HIMR_TXBCNOK_MSK BIT6
+#define SDIO_HIMR_TXBCNERR_MSK BIT7
+#define SDIO_HIMR_BCNERLY_INT_MSK BIT16
+#define SDIO_HIMR_C2HCMD_MSK BIT17
+#define SDIO_HIMR_CPWM1_MSK BIT18
+#define SDIO_HIMR_CPWM2_MSK BIT19
+#define SDIO_HIMR_HSISR_IND_MSK BIT20
+#define SDIO_HIMR_GTINT3_IND_MSK BIT21
+#define SDIO_HIMR_GTINT4_IND_MSK BIT22
+#define SDIO_HIMR_PSTIMEOUT_MSK BIT23
+#define SDIO_HIMR_OCPINT_MSK BIT24
+#define SDIO_HIMR_ATIMEND_MSK BIT25
+#define SDIO_HIMR_ATIMEND_E_MSK BIT26
+#define SDIO_HIMR_CTWEND_MSK BIT27
+
+/* RTL8188E SDIO Specific */
+#define SDIO_HIMR_MCU_ERR_MSK BIT28
+#define SDIO_HIMR_TSF_BIT32_TOGGLE_MSK BIT29
+
+/* SDIO Host Interrupt Service Routine */
+#define SDIO_HISR_RX_REQUEST BIT0
+#define SDIO_HISR_AVAL BIT1
+#define SDIO_HISR_TXERR BIT2
+#define SDIO_HISR_RXERR BIT3
+#define SDIO_HISR_TXFOVW BIT4
+#define SDIO_HISR_RXFOVW BIT5
+#define SDIO_HISR_TXBCNOK BIT6
+#define SDIO_HISR_TXBCNERR BIT7
+#define SDIO_HISR_BCNERLY_INT BIT16
+#define SDIO_HISR_C2HCMD BIT17
+#define SDIO_HISR_CPWM1 BIT18
+#define SDIO_HISR_CPWM2 BIT19
+#define SDIO_HISR_HSISR_IND BIT20
+#define SDIO_HISR_GTINT3_IND BIT21
+#define SDIO_HISR_GTINT4_IND BIT22
+#define SDIO_HISR_PSTIME BIT23
+#define SDIO_HISR_OCPINT BIT24
+#define SDIO_HISR_ATIMEND BIT25
+#define SDIO_HISR_ATIMEND_E BIT26
+#define SDIO_HISR_CTWEND BIT27
+
+/* RTL8188E SDIO Specific */
+#define SDIO_HISR_MCU_ERR BIT28
+#define SDIO_HISR_TSF_BIT32_TOGGLE BIT29
+
+#define MASK_SDIO_HISR_CLEAR \
+ (SDIO_HISR_TXERR | SDIO_HISR_RXERR | SDIO_HISR_TXFOVW |\
+ SDIO_HISR_RXFOVW | SDIO_HISR_TXBCNOK | SDIO_HISR_TXBCNERR |\
+ SDIO_HISR_C2HCMD | SDIO_HISR_CPWM1 | SDIO_HISR_CPWM2 |\
+ SDIO_HISR_HSISR_IND | SDIO_HISR_GTINT3_IND | SDIO_HISR_GTINT4_IND |\
+ SDIO_HISR_PSTIMEOUT | SDIO_HISR_OCPINT)
+
+/* SDIO HCI Suspend Control Register */
+#define HCI_RESUME_PWR_RDY BIT1
+#define HCI_SUS_CTRL BIT0
+
+/* SDIO Tx FIFO related */
+/* The number of Tx FIFO free page */
+#define SDIO_TX_FREE_PG_QUEUE 4
+#define SDIO_TX_FIFO_PAGE_SZ 128
+
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+
+/* 2 USB Information (0xFE17) */
+#define USB_IS_HIGH_SPEED 0
+#define USB_IS_FULL_SPEED 1
+#define USB_SPEED_MASK BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK 0xF
+#define USB_NORMAL_SIE_EP_SHIFT 4
+
+/* 2 Special Option */
+#define USB_AGG_EN BIT(3)
+
+/* 0; Use interrupt endpoint to upload interrupt pkt */
+/* 1; Use bulk endpoint to upload interrupt pkt, */
+#define INT_BULK_SEL BIT(4)
+
+/* 2REG_C2HEVT_CLEAR */
+/* Set by driver and notify FW that the driver has read
+ * the C2H command message */
+#define C2H_EVT_HOST_CLOSE 0x00
+/* Set by FW indicating that FW had set the C2H command
+ * message and it's not yet read by driver. */
+#define C2H_EVT_FW_CLOSE 0xFF
+
+/* 2REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
+/* Enable GPIO[9] as WiFi HW PDn source */
+#define WL_HWPDN_EN BIT0
+/* WiFi HW PDn polarity control */
+#define WL_HWPDN_SL BIT1
+/* WiFi function enable */
+#define WL_FUNC_EN BIT2
+/* Enable GPIO[9] as WiFi RF HW PDn source */
+#define WL_HWROF_EN BIT3
+/* Enable GPIO[11] as BT HW PDn source */
+#define BT_HWPDN_EN BIT16
+/* BT HW PDn polarity control */
+#define BT_HWPDN_SL BIT17
+/* BT function enable */
+#define BT_FUNC_EN BIT18
+/* Enable GPIO[11] as BT/GPS RF HW PDn source */
+#define BT_HWROF_EN BIT19
+/* Enable GPIO[10] as GPS HW PDn source */
+#define GPS_HWPDN_EN BIT20
+/* GPS HW PDn polarity control */
+#define GPS_HWPDN_SL BIT21
+/* GPS function enable */
+#define GPS_FUNC_EN BIT22
+
+/* 3 REG_LIFECTRL_CTRL */
+#define HAL92C_EN_PKT_LIFE_TIME_BK BIT3
+#define HAL92C_EN_PKT_LIFE_TIME_BE BIT2
+#define HAL92C_EN_PKT_LIFE_TIME_VI BIT1
+#define HAL92C_EN_PKT_LIFE_TIME_VO BIT0
+
+#define HAL92C_MSDU_LIFE_TIME_UNIT 128 /* in us */
+
+/* General definitions */
+#define LAST_ENTRY_OF_TX_PKT_BUFFER 176 /* 22k 22528 bytes */
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 1000
+/* GPIO BIT */
+#define HAL_8192C_HW_GPIO_WPS_BIT BIT2
+
+/* 8192C EEPROM/EFUSE share register definition. */
+
+/* EEPROM/Efuse PG Offset for 88EE/88EU/88ES */
+#define EEPROM_TX_PWR_INX_88E 0x10
+
+#define EEPROM_ChannelPlan_88E 0xB8
+#define EEPROM_XTAL_88E 0xB9
+#define EEPROM_THERMAL_METER_88E 0xBA
+#define EEPROM_IQK_LCK_88E 0xBB
+
+#define EEPROM_RF_BOARD_OPTION_88E 0xC1
+#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
+#define EEPROM_RF_BT_SETTING_88E 0xC3
+#define EEPROM_VERSION_88E 0xC4
+#define EEPROM_CUSTOMERID_88E 0xC5
+#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
+
+/* RTL88EE */
+#define EEPROM_MAC_ADDR_88EE 0xD0
+#define EEPROM_VID_88EE 0xD6
+#define EEPROM_DID_88EE 0xD8
+#define EEPROM_SVID_88EE 0xDA
+#define EEPROM_SMID_88EE 0xDC
+
+/* RTL88EU */
+#define EEPROM_MAC_ADDR_88EU 0xD7
+#define EEPROM_VID_88EU 0xD0
+#define EEPROM_PID_88EU 0xD2
+#define EEPROM_USB_OPTIONAL_FUNCTION0 0xD4
+
+/* RTL88ES */
+#define EEPROM_MAC_ADDR_88ES 0x11A
+
+/* EEPROM/Efuse Value Type */
+#define EETYPE_TX_PWR 0x0
+
+/* Default Value for EEPROM or EFUSE!!! */
+#define EEPROM_Default_TSSI 0x0
+#define EEPROM_Default_TxPowerDiff 0x0
+#define EEPROM_Default_CrystalCap 0x5
+/* Default: 2X2, RTL8192CE(QFPN68) */
+#define EEPROM_Default_BoardType 0x02
+#define EEPROM_Default_TxPower 0x1010
+#define EEPROM_Default_HT2T_TxPwr 0x10
+
+#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
+#define EEPROM_Default_ThermalMeter 0x12
+
+#define EEPROM_Default_AntTxPowerDiff 0x0
+#define EEPROM_Default_TxPwDiff_CrystalCap 0x5
+#define EEPROM_Default_TxPowerLevel 0x2A
+
+#define EEPROM_Default_HT40_2SDiff 0x0
+/* HT20<->40 default Tx Power Index Difference */
+#define EEPROM_Default_HT20_Diff 2
+#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
+#define EEPROM_Default_HT40_PwrMaxOffset 0
+#define EEPROM_Default_HT20_PwrMaxOffset 0
+
+#define EEPROM_Default_CrystalCap_88E 0x20
+#define EEPROM_Default_ThermalMeter_88E 0x18
+
+/* New EFUSE deafult value */
+#define EEPROM_DEFAULT_24G_INDEX 0x2D
+#define EEPROM_DEFAULT_24G_HT20_DIFF 0X02
+#define EEPROM_DEFAULT_24G_OFDM_DIFF 0X04
+
+#define EEPROM_DEFAULT_5G_INDEX 0X2A
+#define EEPROM_DEFAULT_5G_HT20_DIFF 0X00
+#define EEPROM_DEFAULT_5G_OFDM_DIFF 0X04
+
+#define EEPROM_DEFAULT_DIFF 0XFE
+#define EEPROM_DEFAULT_CHANNEL_PLAN 0x7F
+#define EEPROM_DEFAULT_BOARD_OPTION 0x00
+#define EEPROM_DEFAULT_FEATURE_OPTION 0x00
+#define EEPROM_DEFAULT_BT_OPTION 0x10
+
+/* For debug */
+#define EEPROM_Default_PID 0x1234
+#define EEPROM_Default_VID 0x5678
+#define EEPROM_Default_CustomerID 0xAB
+#define EEPROM_Default_CustomerID_8188E 0x00
+#define EEPROM_Default_SubCustomerID 0xCD
+#define EEPROM_Default_Version 0
+
+#define EEPROM_CHANNEL_PLAN_FCC 0x0
+#define EEPROM_CHANNEL_PLAN_IC 0x1
+#define EEPROM_CHANNEL_PLAN_ETSI 0x2
+#define EEPROM_CHANNEL_PLAN_SPA 0x3
+#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
+#define EEPROM_CHANNEL_PLAN_MKK 0x5
+#define EEPROM_CHANNEL_PLAN_MKK1 0x6
+#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
+#define EEPROM_CHANNEL_PLAN_TELEC 0x8
+#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMA 0x9
+#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
+#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_USB_OPTIONAL1 0xE
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_TOSHIBA 0x4
+#define EEPROM_CID_CCX 0x10 /* CCX test. */
+#define EEPROM_CID_QMI 0x0D
+#define EEPROM_CID_WHQL 0xFE
+#define RTL_EEPROM_ID 0x8129
+
+#endif /* __RTL8188E_SPEC_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_sreset.h b/drivers/staging/rtl8188eu/include/rtl8188e_sreset.h
new file mode 100644
index 0000000..a29e695
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_sreset.h
@@ -0,0 +1,31 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTL8188E_SRESET_H_
+#define _RTL8188E_SRESET_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_sreset.h>
+
+void rtl8188e_silentreset_for_specific_platform(struct adapter *padapter);
+void rtl8188e_sreset_xmit_status_check(struct adapter *padapter);
+void rtl8188e_sreset_linked_status_check(struct adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
new file mode 100644
index 0000000..cf7267a
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -0,0 +1,178 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_XMIT_H__
+#define __RTL8188E_XMIT_H__
+
+#define MAX_TX_AGG_PACKET_NUMBER 0xFF
+/* */
+/* Queue Select Value in TxDesc */
+/* */
+#define QSLT_BK 0x2/* 0x01 */
+#define QSLT_BE 0x0
+#define QSLT_VI 0x5/* 0x4 */
+#define QSLT_VO 0x7/* 0x6 */
+#define QSLT_BEACON 0x10
+#define QSLT_HIGH 0x11
+#define QSLT_MGNT 0x12
+#define QSLT_CMD 0x13
+
+/* For 88e early mode */
+#define SET_EARLYMODE_PKTNUM(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr, 0, 3, __Value)
+#define SET_EARLYMODE_LEN0(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr, 4, 12, __Value)
+#define SET_EARLYMODE_LEN1(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr, 16, 12, __Value)
+#define SET_EARLYMODE_LEN2_1(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr, 28, 4, __Value)
+#define SET_EARLYMODE_LEN2_2(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr+4, 0, 8, __Value)
+#define SET_EARLYMODE_LEN3(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr+4, 8, 12, __Value)
+#define SET_EARLYMODE_LEN4(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr+4, 20, 12, __Value)
+
+/* */
+/* defined for TX DESC Operation */
+/* */
+
+#define MAX_TID (15)
+
+/* OFFSET 0 */
+#define OFFSET_SZ 0
+#define OFFSET_SHT 16
+#define BMC BIT(24)
+#define LSG BIT(26)
+#define FSG BIT(27)
+#define OWN BIT(31)
+
+
+/* OFFSET 4 */
+#define PKT_OFFSET_SZ 0
+#define QSEL_SHT 8
+#define RATE_ID_SHT 16
+#define NAVUSEHDR BIT(20)
+#define SEC_TYPE_SHT 22
+#define PKT_OFFSET_SHT 26
+
+/* OFFSET 8 */
+#define AGG_EN BIT(12)
+#define AGG_BK BIT(16)
+#define AMPDU_DENSITY_SHT 20
+#define ANTSEL_A BIT(24)
+#define ANTSEL_B BIT(25)
+#define TX_ANT_CCK_SHT 26
+#define TX_ANTL_SHT 28
+#define TX_ANT_HT_SHT 30
+
+/* OFFSET 12 */
+#define SEQ_SHT 16
+#define EN_HWSEQ BIT(31)
+
+/* OFFSET 16 */
+#define QOS BIT(6)
+#define HW_SSN BIT(7)
+#define USERATE BIT(8)
+#define DISDATAFB BIT(10)
+#define CTS_2_SELF BIT(11)
+#define RTS_EN BIT(12)
+#define HW_RTS_EN BIT(13)
+#define DATA_SHORT BIT(24)
+#define PWR_STATUS_SHT 15
+#define DATA_SC_SHT 20
+#define DATA_BW BIT(25)
+
+/* OFFSET 20 */
+#define RTY_LMT_EN BIT(17)
+
+enum TXDESC_SC {
+ SC_DONT_CARE = 0x00,
+ SC_UPPER = 0x01,
+ SC_LOWER = 0x02,
+ SC_DUPLICATE = 0x03
+};
+/* OFFSET 20 */
+#define SGI BIT(6)
+#define USB_TXAGG_NUM_SHT 24
+
+#define txdesc_set_ccx_sw_88e(txdesc, value) \
+ do { \
+ ((struct txdesc_88e *)(txdesc))->sw1 = (((value)>>8) & 0x0f); \
+ ((struct txdesc_88e *)(txdesc))->sw0 = ((value) & 0xff); \
+ } while (0)
+
+struct txrpt_ccx_88e {
+ /* offset 0 */
+ u8 tag1:1;
+ u8 pkt_num:3;
+ u8 txdma_underflow:1;
+ u8 int_bt:1;
+ u8 int_tri:1;
+ u8 int_ccx:1;
+
+ /* offset 1 */
+ u8 mac_id:6;
+ u8 pkt_ok:1;
+ u8 bmc:1;
+
+ /* offset 2 */
+ u8 retry_cnt:6;
+ u8 lifetime_over:1;
+ u8 retry_over:1;
+
+ /* offset 3 */
+ u8 ccx_qtime0;
+ u8 ccx_qtime1;
+
+ /* offset 5 */
+ u8 final_data_rate;
+
+ /* offset 6 */
+ u8 sw1:4;
+ u8 qsel:4;
+
+ /* offset 7 */
+ u8 sw0;
+};
+
+#define txrpt_ccx_sw_88e(txrpt_ccx) ((txrpt_ccx)->sw0 + ((txrpt_ccx)->sw1<<8))
+#define txrpt_ccx_qtime_88e(txrpt_ccx) \
+ ((txrpt_ccx)->ccx_qtime0+((txrpt_ccx)->ccx_qtime1<<8))
+
+void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc,
+ u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull);
+s32 rtl8188eu_init_xmit_priv(struct adapter *padapter);
+void rtl8188eu_free_xmit_priv(struct adapter *padapter);
+s32 rtl8188eu_hal_xmit(struct adapter *padapter, struct xmit_frame *frame);
+s32 rtl8188eu_mgnt_xmit(struct adapter *padapter, struct xmit_frame *frame);
+s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
+#define hal_xmit_handler rtl8188eu_xmit_buf_handler
+void rtl8188eu_xmit_tasklet(void *priv);
+s32 rtl8188eu_xmitframe_complete(struct adapter *padapter,
+ struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf);
+
+void dump_txrpt_ccx_88e(void *buf);
+void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf);
+
+void _dbg_dump_tx_info(struct adapter *padapter, int frame_tag,
+ struct tx_desc *ptxdesc);
+
+#endif /* __RTL8188E_XMIT_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
new file mode 100644
index 0000000..e85bf1f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -0,0 +1,64 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __RTW_ANDROID_H__
+#define __RTW_ANDROID_H__
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+enum ANDROID_WIFI_CMD {
+ ANDROID_WIFI_CMD_START,
+ ANDROID_WIFI_CMD_STOP,
+ ANDROID_WIFI_CMD_SCAN_ACTIVE,
+ ANDROID_WIFI_CMD_SCAN_PASSIVE,
+ ANDROID_WIFI_CMD_RSSI,
+ ANDROID_WIFI_CMD_LINKSPEED,
+ ANDROID_WIFI_CMD_RXFILTER_START,
+ ANDROID_WIFI_CMD_RXFILTER_STOP,
+ ANDROID_WIFI_CMD_RXFILTER_ADD,
+ ANDROID_WIFI_CMD_RXFILTER_REMOVE,
+ ANDROID_WIFI_CMD_BTCOEXSCAN_START,
+ ANDROID_WIFI_CMD_BTCOEXSCAN_STOP,
+ ANDROID_WIFI_CMD_BTCOEXMODE,
+ ANDROID_WIFI_CMD_SETSUSPENDOPT,
+ ANDROID_WIFI_CMD_P2P_DEV_ADDR,
+ ANDROID_WIFI_CMD_SETFWPATH,
+ ANDROID_WIFI_CMD_SETBAND,
+ ANDROID_WIFI_CMD_GETBAND,
+ ANDROID_WIFI_CMD_COUNTRY,
+ ANDROID_WIFI_CMD_P2P_SET_NOA,
+ ANDROID_WIFI_CMD_P2P_GET_NOA,
+ ANDROID_WIFI_CMD_P2P_SET_PS,
+ ANDROID_WIFI_CMD_SET_AP_WPS_P2P_IE,
+ ANDROID_WIFI_CMD_MACADDR,
+ ANDROID_WIFI_CMD_BLOCK,
+ ANDROID_WIFI_CMD_WFD_ENABLE,
+ ANDROID_WIFI_CMD_WFD_DISABLE,
+ ANDROID_WIFI_CMD_WFD_SET_TCPPORT,
+ ANDROID_WIFI_CMD_WFD_SET_MAX_TPUT,
+ ANDROID_WIFI_CMD_WFD_SET_DEVTYPE,
+ ANDROID_WIFI_CMD_MAX
+};
+
+int rtw_android_cmdstr_to_num(char *cmdstr);
+int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+
+#endif /* __RTW_ANDROID_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ap.h b/drivers/staging/rtl8188eu/include/rtw_ap.h
new file mode 100644
index 0000000..9233401
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ap.h
@@ -0,0 +1,65 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_AP_H_
+#define __RTW_AP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#ifdef CONFIG_88EU_AP_MODE
+
+/* external function */
+void rtw_indicate_sta_assoc_event(struct adapter *padapter,
+ struct sta_info *psta);
+void rtw_indicate_sta_disassoc_event(struct adapter *padapter,
+ struct sta_info *psta);
+void init_mlme_ap_info(struct adapter *padapter);
+void free_mlme_ap_info(struct adapter *padapter);
+void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
+ u8 index, u8 *data, u8 len);
+void rtw_remove_bcn_ie(struct adapter *padapter,
+ struct wlan_bssid_ex *pnetwork, u8 index);
+void update_beacon(struct adapter *padapter, u8 ie_id,
+ u8 *oui, u8 tx);
+void add_RATid(struct adapter *padapter, struct sta_info *psta,
+ u8 rssi_level);
+void expire_timeout_chk(struct adapter *padapter);
+void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta);
+int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len);
+void rtw_set_macaddr_acl(struct adapter *padapter, int mode);
+int rtw_acl_add_sta(struct adapter *padapter, u8 *addr);
+int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr);
+
+#ifdef CONFIG_88EU_AP_MODE
+void associated_clients_update(struct adapter *padapter, u8 updated);
+void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta);
+u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta);
+void sta_info_update(struct adapter *padapter, struct sta_info *psta);
+void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta);
+u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
+ bool active, u16 reason);
+int rtw_sta_flush(struct adapter *padapter);
+int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset);
+void start_ap_mode(struct adapter *padapter);
+void stop_ap_mode(struct adapter *padapter);
+#endif
+#endif /* end of CONFIG_88EU_AP_MODE */
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_br_ext.h b/drivers/staging/rtl8188eu/include/rtw_br_ext.h
new file mode 100644
index 0000000..f21e7a4
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_br_ext.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_BR_EXT_H_
+#define _RTW_BR_EXT_H_
+
+#define MACADDRLEN 6
+#define _DEBUG_ERR DBG_88E
+#define _DEBUG_INFO DBG_88E
+#define DEBUG_WARN DBG_88E
+#define DEBUG_INFO DBG_88E
+#define DEBUG_ERR DBG_88E
+#define GET_MY_HWADDR(padapter) ((padapter)->eeprompriv.mac_addr)
+
+#define NAT25_HASH_BITS 4
+#define NAT25_HASH_SIZE (1 << NAT25_HASH_BITS)
+#define NAT25_AGEING_TIME 300
+
+#define MAX_NETWORK_ADDR_LEN 17
+
+struct nat25_network_db_entry {
+ struct nat25_network_db_entry *next_hash;
+ struct nat25_network_db_entry **pprev_hash;
+ atomic_t use_count;
+ unsigned char macAddr[6];
+ unsigned long ageing_timer;
+ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
+};
+
+enum NAT25_METHOD {
+ NAT25_MIN,
+ NAT25_CHECK,
+ NAT25_INSERT,
+ NAT25_LOOKUP,
+ NAT25_PARSE,
+ NAT25_MAX
+};
+
+struct br_ext_info {
+ unsigned int nat25_disable;
+ unsigned int macclone_enable;
+ unsigned int dhcp_bcst_disable;
+ int addPPPoETag; /* 1: Add PPPoE relay-SID, 0: disable */
+ unsigned char nat25_dmzMac[MACADDRLEN];
+ unsigned int nat25sc_disable;
+};
+
+void nat25_db_cleanup(struct adapter *priv);
+
+#endif /* _RTW_BR_EXT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
new file mode 100644
index 0000000..819285b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -0,0 +1,991 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_CMD_H_
+#define __RTW_CMD_H_
+
+#include <wlan_bssdef.h>
+#include <rtw_rf.h>
+#include <rtw_led.h>
+
+#define C2H_MEM_SZ (16*1024)
+
+#include <osdep_service.h>
+#include <ieee80211.h> /* <ieee80211/ieee80211.h> */
+
+#define FREE_CMDOBJ_SZ 128
+
+#define MAX_CMDSZ 1024
+#define MAX_RSPSZ 512
+#define MAX_EVTSZ 1024
+
+#define CMDBUFF_ALIGN_SZ 512
+
+struct cmd_obj {
+ struct adapter *padapter;
+ u16 cmdcode;
+ u8 res;
+ u8 *parmbuf;
+ u32 cmdsz;
+ u8 *rsp;
+ u32 rspsz;
+ struct list_head list;
+};
+
+struct cmd_priv {
+ struct semaphore cmd_queue_sema;
+ struct semaphore terminate_cmdthread_sema;
+ struct __queue cmd_queue;
+ u8 cmd_seq;
+ u8 *cmd_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *cmd_allocated_buf;
+ u8 *rsp_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *rsp_allocated_buf;
+ u32 cmd_issued_cnt;
+ u32 cmd_done_cnt;
+ u32 rsp_cnt;
+ u8 cmdthd_running;
+ struct adapter *padapter;
+};
+
+struct evt_priv {
+ struct work_struct c2h_wk;
+ bool c2h_wk_alive;
+ struct rtw_cbuf *c2h_queue;
+ #define C2H_QUEUE_MAX_LEN 10
+ ATOMIC_T event_seq;
+ u8 *evt_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *evt_allocated_buf;
+ u32 evt_done_cnt;
+};
+
+#define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
+do {\
+ _rtw_init_listhead(&pcmd->list);\
+ pcmd->cmdcode = code;\
+ pcmd->parmbuf = (u8 *)(pparm);\
+ pcmd->cmdsz = sizeof(*pparm);\
+ pcmd->rsp = NULL;\
+ pcmd->rspsz = 0;\
+} while (0)
+
+struct c2h_evt_hdr {
+ u8 id:4;
+ u8 plen:4;
+ u8 seq;
+ u8 payload[0];
+};
+
+#define c2h_evt_exist(c2h_evt) ((c2h_evt)->id || (c2h_evt)->plen)
+
+u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *obj);
+struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv);
+void rtw_free_cmd_obj(struct cmd_obj *pcmd);
+
+int rtw_cmd_thread(void *context);
+
+u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv);
+void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv);
+
+u32 rtw_init_evt_priv(struct evt_priv *pevtpriv);
+void rtw_free_evt_priv(struct evt_priv *pevtpriv);
+void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv);
+void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
+#ifdef CONFIG_88EU_P2P
+u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType);
+#endif /* CONFIG_88EU_P2P */
+
+enum rtw_drvextra_cmd_id {
+ NONE_WK_CID,
+ DYNAMIC_CHK_WK_CID,
+ DM_CTRL_WK_CID,
+ PBC_POLLING_WK_CID,
+ POWER_SAVING_CTRL_WK_CID,/* IPS,AUTOSuspend */
+ LPS_CTRL_WK_CID,
+ ANT_SELECT_WK_CID,
+ P2P_PS_WK_CID,
+ P2P_PROTO_WK_CID,
+ CHECK_HIQ_WK_CID,/* for softap mode, check hi queue if empty */
+ INTEl_WIDI_WK_CID,
+ C2H_WK_CID,
+ RTP_TIMER_CFG_WK_CID,
+ MAX_WK_CID
+};
+
+enum LPS_CTRL_TYPE {
+ LPS_CTRL_SCAN = 0,
+ LPS_CTRL_JOINBSS = 1,
+ LPS_CTRL_CONNECT = 2,
+ LPS_CTRL_DISCONNECT = 3,
+ LPS_CTRL_SPECIAL_PACKET = 4,
+ LPS_CTRL_LEAVE = 5,
+};
+
+enum RFINTFS {
+ SWSI,
+ HWSI,
+ HWPI,
+};
+
+/*
+Caller Mode: Infra, Ad-HoC(C)
+
+Notes: To enter USB suspend mode
+
+Command Mode
+
+*/
+struct usb_suspend_parm {
+ u32 action;/* 1: sleep, 0:resume */
+};
+
+/*
+Caller Mode: Infra, Ad-HoC
+
+Notes: To join a known BSS.
+
+Command-Event Mode
+
+*/
+
+/*
+Caller Mode: Infra, Ad-Hoc
+
+Notes: To join the specified bss
+
+Command Event Mode
+
+*/
+struct joinbss_parm {
+ struct wlan_bssid_ex network;
+};
+
+/*
+Caller Mode: Infra, Ad-HoC(C)
+
+Notes: To disconnect the current associated BSS
+
+Command Mode
+
+*/
+struct disconnect_parm {
+ u32 deauth_timeout_ms;
+};
+
+/*
+Caller Mode: AP, Ad-HoC(M)
+
+Notes: To create a BSS
+
+Command Mode
+*/
+struct createbss_parm {
+ struct wlan_bssid_ex network;
+};
+
+struct setopmode_parm {
+ u8 mode;
+ u8 rsvd[3];
+};
+
+/*
+Caller Mode: AP, Ad-HoC, Infra
+
+Notes: To ask RTL8711 performing site-survey
+
+Command-Event Mode
+
+*/
+
+#define RTW_SSID_SCAN_AMOUNT 9 /* for WEXT_CSCAN_AMOUNT 9 */
+#define RTW_CHANNEL_SCAN_AMOUNT (14+37)
+struct sitesurvey_parm {
+ int scan_mode; /* active: 1, passive: 0 */
+ u8 ssid_num;
+ u8 ch_num;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To set the auth type of RTL8711. open/shared/802.1x
+
+Command Mode
+
+*/
+struct setauth_parm {
+ u8 mode; /* 0: legacy open, 1: legacy shared 2: 802.1x */
+ u8 _1x; /* 0: PSK, 1: TLS */
+ u8 rsvd[2];
+};
+
+/*
+Caller Mode: Infra
+
+a. algorithm: wep40, wep104, tkip & aes
+b. keytype: grp key/unicast key
+c. key contents
+
+when shared key ==> keyid is the camid
+when 802.1x ==> keyid [0:1] ==> grp key
+when 802.1x ==> keyid > 2 ==> unicast key
+
+*/
+struct setkey_parm {
+ u8 algorithm; /* could be none, wep40, TKIP, CCMP, wep104 */
+ u8 keyid;
+ u8 grpkey; /* 1: this is the grpkey for 802.1x.
+ * 0: this is the unicast key for 802.1x */
+ u8 set_tx; /* 1: main tx key for wep. 0: other key. */
+ u8 key[16]; /* this could be 40 or 104 */
+};
+
+/*
+When in AP or Ad-Hoc mode, this is used to
+allocate an sw/hw entry for a newly associated sta.
+
+Command
+
+when shared key ==> algorithm/keyid
+
+*/
+struct set_stakey_parm {
+ u8 addr[ETH_ALEN];
+ u8 algorithm;
+ u8 id;/* currently for erasing cam entry if
+ * algorithm == _NO_PRIVACY_ */
+ u8 key[16];
+};
+
+struct set_stakey_rsp {
+ u8 addr[ETH_ALEN];
+ u8 keyid;
+ u8 rsvd;
+};
+
+/*
+Caller Ad-Hoc/AP
+
+Command -Rsp(AID == CAMID) mode
+
+This is to force fw to add an sta_data entry per driver's request.
+
+FW will write an cam entry associated with it.
+
+*/
+struct set_assocsta_parm {
+ u8 addr[ETH_ALEN];
+};
+
+struct set_assocsta_rsp {
+ u8 cam_id;
+ u8 rsvd[3];
+};
+
+/*
+ Caller Ad-Hoc/AP
+
+ Command mode
+
+ This is to force fw to del an sta_data entry per driver's request
+
+ FW will invalidate the cam entry associated with it.
+
+*/
+struct del_assocsta_parm {
+ u8 addr[ETH_ALEN];
+};
+
+/*
+Caller Mode: AP/Ad-HoC(M)
+
+Notes: To notify fw that given staid has changed its power state
+
+Command Mode
+
+*/
+struct setstapwrstate_parm {
+ u8 staid;
+ u8 status;
+ u8 hwaddr[6];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To setup the basic rate of RTL8711
+
+Command Mode
+
+*/
+struct setbasicrate_parm {
+ u8 basicrates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To read the current basic rate
+
+Command-Rsp Mode
+
+*/
+struct getbasicrate_parm {
+ u32 rsvd;
+};
+
+struct getbasicrate_rsp {
+ u8 basicrates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To setup the data rate of RTL8711
+
+Command Mode
+
+*/
+struct setdatarate_parm {
+ u8 mac_id;
+ u8 datarates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To read the current data rate
+
+Command-Rsp Mode
+
+*/
+struct getdatarate_parm {
+ u32 rsvd;
+
+};
+struct getdatarate_rsp {
+ u8 datarates[NumRates];
+};
+
+/*
+Caller Mode: Any
+AP: AP can use the info for the contents of beacon frame
+Infra: STA can use the info when sitesurveying
+Ad-HoC(M): Like AP
+Ad-HoC(C): Like STA
+
+Notes: To set the phy capability of the NIC
+
+Command Mode
+
+*/
+
+struct setphyinfo_parm {
+ struct regulatory_class class_sets[NUM_REGULATORYS];
+ u8 status;
+};
+
+struct getphyinfo_parm {
+ u32 rsvd;
+};
+
+struct getphyinfo_rsp {
+ struct regulatory_class class_sets[NUM_REGULATORYS];
+ u8 status;
+};
+
+/*
+Caller Mode: Any
+
+Notes: To set the channel/modem/band
+This command will be used when channel/modem/band is changed.
+
+Command Mode
+
+*/
+struct setphy_parm {
+ u8 rfchannel;
+ u8 modem;
+};
+
+/*
+Caller Mode: Any
+
+Notes: To get the current setting of channel/modem/band
+
+Command-Rsp Mode
+
+*/
+struct getphy_parm {
+ u32 rsvd;
+
+};
+struct getphy_rsp {
+ u8 rfchannel;
+ u8 modem;
+};
+
+struct readBB_parm {
+ u8 offset;
+};
+struct readBB_rsp {
+ u8 value;
+};
+
+struct readTSSI_parm {
+ u8 offset;
+};
+struct readTSSI_rsp {
+ u8 value;
+};
+
+struct writeBB_parm {
+ u8 offset;
+ u8 value;
+};
+
+struct readRF_parm {
+ u8 offset;
+};
+struct readRF_rsp {
+ u32 value;
+};
+
+struct writeRF_parm {
+ u32 offset;
+ u32 value;
+};
+
+struct getrfintfs_parm {
+ u8 rfintfs;
+};
+
+struct Tx_Beacon_param
+{
+ struct wlan_bssid_ex network;
+};
+
+/*
+ Notes: This command is used for H2C/C2H loopback testing
+
+ mac[0] == 0
+ ==> CMD mode, return H2C_SUCCESS.
+ The following condition must be ture under CMD mode
+ mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
+ s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
+ s2 == (b1 << 8 | b0);
+
+ mac[0] == 1
+ ==> CMD_RSP mode, return H2C_SUCCESS_RSP
+
+ The rsp layout shall be:
+ rsp: parm:
+ mac[0] = mac[5];
+ mac[1] = mac[4];
+ mac[2] = mac[3];
+ mac[3] = mac[2];
+ mac[4] = mac[1];
+ mac[5] = mac[0];
+ s0 = s1;
+ s1 = swap16(s0);
+ w0 = swap32(w1);
+ b0 = b1
+ s2 = s0 + s1
+ b1 = b0
+ w1 = w0
+
+ mac[0] == 2
+ ==> CMD_EVENT mode, return H2C_SUCCESS
+ The event layout shall be:
+ event: parm:
+ mac[0] = mac[5];
+ mac[1] = mac[4];
+ mac[2] = event's seq no, starting from 1 to parm's marc[3]
+ mac[3] = mac[2];
+ mac[4] = mac[1];
+ mac[5] = mac[0];
+ s0 = swap16(s0) - event.mac[2];
+ s1 = s1 + event.mac[2];
+ w0 = swap32(w0);
+ b0 = b1
+ s2 = s0 + event.mac[2]
+ b1 = b0
+ w1 = swap32(w1) - event.mac[2];
+
+ parm->mac[3] is the total event counts that host requested.
+ event will be the same with the cmd's param.
+*/
+
+/* CMD param Format for driver extra cmd handler */
+struct drvextra_cmd_parm {
+ int ec_id; /* extra cmd id */
+ int type_size; /* Can use this field as the type id or command size */
+ unsigned char *pbuf;
+};
+
+/*------------------- Below are used for RF/BB tunning ---------------------*/
+
+struct setantenna_parm {
+ u8 tx_antset;
+ u8 rx_antset;
+ u8 tx_antenna;
+ u8 rx_antenna;
+};
+
+struct enrateadaptive_parm {
+ u32 en;
+};
+
+struct settxagctbl_parm {
+ u32 txagc[MAX_RATES_LENGTH];
+};
+
+struct gettxagctbl_parm {
+ u32 rsvd;
+};
+struct gettxagctbl_rsp {
+ u32 txagc[MAX_RATES_LENGTH];
+};
+
+struct setagcctrl_parm {
+ u32 agcctrl; /* 0: pure hw, 1: fw */
+};
+
+struct setssup_parm {
+ u32 ss_ForceUp[MAX_RATES_LENGTH];
+};
+
+struct getssup_parm {
+ u32 rsvd;
+};
+
+struct getssup_rsp {
+ u8 ss_ForceUp[MAX_RATES_LENGTH];
+};
+
+struct setssdlevel_parm {
+ u8 ss_DLevel[MAX_RATES_LENGTH];
+};
+
+struct getssdlevel_parm {
+ u32 rsvd;
+};
+
+struct getssdlevel_rsp {
+ u8 ss_DLevel[MAX_RATES_LENGTH];
+};
+
+struct setssulevel_parm {
+ u8 ss_ULevel[MAX_RATES_LENGTH];
+};
+
+struct getssulevel_parm {
+ u32 rsvd;
+};
+
+struct getssulevel_rsp {
+ u8 ss_ULevel[MAX_RATES_LENGTH];
+};
+
+struct setcountjudge_parm {
+ u8 count_judge[MAX_RATES_LENGTH];
+};
+
+struct getcountjudge_parm {
+ u32 rsvd;
+};
+
+struct getcountjudge_rsp {
+ u8 count_judge[MAX_RATES_LENGTH];
+};
+
+struct setratable_parm {
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
+};
+
+struct getratable_parm {
+ uint rsvd;
+};
+
+struct getratable_rsp {
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
+};
+
+/* to get TX,RX retry count */
+
+struct gettxretrycnt_parm {
+ unsigned int rsvd;
+};
+
+struct gettxretrycnt_rsp {
+ unsigned long tx_retrycnt;
+};
+
+struct getrxretrycnt_parm {
+ unsigned int rsvd;
+};
+
+struct getrxretrycnt_rsp {
+ unsigned long rx_retrycnt;
+};
+
+/* to get BCNOK,BCNERR count */
+struct getbcnokcnt_parm {
+ unsigned int rsvd;
+};
+
+struct getbcnokcnt_rsp {
+ unsigned long bcnokcnt;
+};
+
+struct getbcnerrcnt_parm {
+ unsigned int rsvd;
+};
+
+struct getbcnerrcnt_rsp {
+ unsigned long bcnerrcnt;
+};
+
+/* to get current TX power level */
+struct getcurtxpwrlevel_parm {
+ unsigned int rsvd;
+};
+struct getcurtxpwrlevel_rspi {
+ unsigned short tx_power;
+};
+
+struct setprobereqextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setassocreqextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setproberspextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setassocrspextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct addBaReq_parm {
+ unsigned int tid;
+ u8 addr[ETH_ALEN];
+};
+
+/*H2C Handler index: 46 */
+struct set_ch_parm {
+ u8 ch;
+ u8 bw;
+ u8 ch_offset;
+};
+
+/*H2C Handler index: 59 */
+struct SetChannelPlan_param
+{
+ u8 channel_plan;
+};
+
+/*H2C Handler index: 60 */
+struct LedBlink_param
+{
+ struct LED_871x *pLed;
+};
+
+/*H2C Handler index: 61 */
+struct SetChannelSwitch_param
+{
+ u8 new_ch_no;
+};
+
+/*H2C Handler index: 62 */
+struct TDLSoption_param
+{
+ u8 addr[ETH_ALEN];
+ u8 option;
+};
+
+#define GEN_CMD_CODE(cmd) cmd ## _CMD_
+
+/*
+
+Result:
+0x00: success
+0x01: sucess, and check Response.
+0x02: cmd ignored due to duplicated sequcne number
+0x03: cmd dropped due to invalid cmd code
+0x04: reserved.
+
+*/
+
+#define H2C_RSP_OFFSET 512
+
+#define H2C_SUCCESS 0x00
+#define H2C_SUCCESS_RSP 0x01
+#define H2C_DUPLICATED 0x02
+#define H2C_DROPPED 0x03
+#define H2C_PARAMETERS_ERROR 0x04
+#define H2C_REJECTED 0x05
+#define H2C_CMD_OVERFLOW 0x06
+#define H2C_RESERVED 0x07
+
+u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr);
+u8 rtw_setstandby_cmd(struct adapter *padapter, uint action);
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
+ int ssid_num, struct rtw_ieee80211_channel *ch,
+ int ch_num);
+u8 rtw_createbss_cmd(struct adapter *padapter);
+u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss,
+ unsigned int sz);
+u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch);
+u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key);
+u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
+u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network* pnetwork);
+u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
+u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype);
+u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
+u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset);
+u8 rtw_setbbreg_cmd(struct adapter * padapter, u8 offset, u8 val);
+u8 rtw_setrfreg_cmd(struct adapter * padapter, u8 offset, u32 val);
+u8 rtw_getbbreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
+u8 rtw_getrfreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
+u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
+u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_table);
+u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval);
+
+u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset,u8 *pval);
+u8 rtw_setfwdig_cmd(struct adapter*padapter, u8 type);
+u8 rtw_setfwra_cmd(struct adapter*padapter, u8 type);
+
+u8 rtw_addbareq_cmd(struct adapter*padapter, u8 tid, u8 *addr);
+
+u8 rtw_dynamic_chk_wk_cmd(struct adapter *adapter);
+
+u8 rtw_lps_ctrl_wk_cmd(struct adapter*padapter, u8 lps_ctrl_type, u8 enqueue);
+u8 rtw_rpt_timer_cfg_cmd(struct adapter*padapter, u16 minRptTime);
+
+ u8 rtw_antenna_select_cmd(struct adapter*padapter, u8 antenna,u8 enqueue);
+u8 rtw_ps_cmd(struct adapter*padapter);
+
+#ifdef CONFIG_88EU_AP_MODE
+u8 rtw_chk_hi_queue_cmd(struct adapter*padapter);
+#endif
+
+u8 rtw_set_ch_cmd(struct adapter*padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue);
+u8 rtw_set_chplan_cmd(struct adapter*padapter, u8 chplan, u8 enqueue);
+u8 rtw_led_blink_cmd(struct adapter*padapter, struct LED_871x * pLed);
+u8 rtw_set_csa_cmd(struct adapter*padapter, u8 new_ch_no);
+u8 rtw_tdls_cmd(struct adapter *padapter, u8 *addr, u8 option);
+
+u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt);
+
+u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf);
+
+void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+void rtw_createbss_cmd_callback(struct adapter *adapt, struct cmd_obj *pcmd);
+void rtw_getbbrfreg_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+void rtw_readtssi_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+
+void rtw_setstaKey_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+void rtw_setassocsta_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cm);
+void rtw_getrttbl_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+
+struct _cmd_callback {
+ u32 cmd_code;
+ void (*callback)(struct adapter *padapter, struct cmd_obj *cmd);
+};
+
+enum rtw_h2c_cmd {
+ GEN_CMD_CODE(_Read_MACREG), /*0*/
+ GEN_CMD_CODE(_Write_MACREG),
+ GEN_CMD_CODE(_Read_BBREG),
+ GEN_CMD_CODE(_Write_BBREG),
+ GEN_CMD_CODE(_Read_RFREG),
+ GEN_CMD_CODE(_Write_RFREG), /*5*/
+ GEN_CMD_CODE(_Read_EEPROM),
+ GEN_CMD_CODE(_Write_EEPROM),
+ GEN_CMD_CODE(_Read_EFUSE),
+ GEN_CMD_CODE(_Write_EFUSE),
+
+ GEN_CMD_CODE(_Read_CAM), /*10*/
+ GEN_CMD_CODE(_Write_CAM),
+ GEN_CMD_CODE(_setBCNITV),
+ GEN_CMD_CODE(_setMBIDCFG),
+ GEN_CMD_CODE(_JoinBss), /*14*/
+ GEN_CMD_CODE(_DisConnect), /*15*/
+ GEN_CMD_CODE(_CreateBss),
+ GEN_CMD_CODE(_SetOpMode),
+ GEN_CMD_CODE(_SiteSurvey), /*18*/
+ GEN_CMD_CODE(_SetAuth),
+
+ GEN_CMD_CODE(_SetKey), /*20*/
+ GEN_CMD_CODE(_SetStaKey),
+ GEN_CMD_CODE(_SetAssocSta),
+ GEN_CMD_CODE(_DelAssocSta),
+ GEN_CMD_CODE(_SetStaPwrState),
+ GEN_CMD_CODE(_SetBasicRate), /*25*/
+ GEN_CMD_CODE(_GetBasicRate),
+ GEN_CMD_CODE(_SetDataRate),
+ GEN_CMD_CODE(_GetDataRate),
+ GEN_CMD_CODE(_SetPhyInfo),
+
+ GEN_CMD_CODE(_GetPhyInfo), /*30*/
+ GEN_CMD_CODE(_SetPhy),
+ GEN_CMD_CODE(_GetPhy),
+ GEN_CMD_CODE(_readRssi),
+ GEN_CMD_CODE(_readGain),
+ GEN_CMD_CODE(_SetAtim), /*35*/
+ GEN_CMD_CODE(_SetPwrMode),
+ GEN_CMD_CODE(_JoinbssRpt),
+ GEN_CMD_CODE(_SetRaTable),
+ GEN_CMD_CODE(_GetRaTable),
+
+ GEN_CMD_CODE(_GetCCXReport), /*40*/
+ GEN_CMD_CODE(_GetDTMReport),
+ GEN_CMD_CODE(_GetTXRateStatistics),
+ GEN_CMD_CODE(_SetUsbSuspend),
+ GEN_CMD_CODE(_SetH2cLbk),
+ GEN_CMD_CODE(_AddBAReq), /*45*/
+ GEN_CMD_CODE(_SetChannel), /*46*/
+ GEN_CMD_CODE(_SetTxPower),
+ GEN_CMD_CODE(_SwitchAntenna),
+ GEN_CMD_CODE(_SetCrystalCap),
+ GEN_CMD_CODE(_SetSingleCarrierTx), /*50*/
+
+ GEN_CMD_CODE(_SetSingleToneTx),/*51*/
+ GEN_CMD_CODE(_SetCarrierSuppressionTx),
+ GEN_CMD_CODE(_SetContinuousTx),
+ GEN_CMD_CODE(_SwitchBandwidth), /*54*/
+ GEN_CMD_CODE(_TX_Beacon), /*55*/
+
+ GEN_CMD_CODE(_Set_MLME_EVT), /*56*/
+ GEN_CMD_CODE(_Set_Drv_Extra), /*57*/
+ GEN_CMD_CODE(_Set_H2C_MSG), /*58*/
+
+ GEN_CMD_CODE(_SetChannelPlan), /*59*/
+ GEN_CMD_CODE(_LedBlink), /*60*/
+
+ GEN_CMD_CODE(_SetChannelSwitch), /*61*/
+ GEN_CMD_CODE(_TDLS), /*62*/
+
+ MAX_H2CCMD
+};
+
+#define _GetBBReg_CMD_ _Read_BBREG_CMD_
+#define _SetBBReg_CMD_ _Write_BBREG_CMD_
+#define _GetRFReg_CMD_ _Read_RFREG_CMD_
+#define _SetRFReg_CMD_ _Write_RFREG_CMD_
+
+#ifdef _RTW_CMD_C_
+static struct _cmd_callback rtw_cmd_callback[] =
+{
+ {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
+ {GEN_CMD_CODE(_Write_MACREG), NULL},
+ {GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Write_BBREG), NULL},
+ {GEN_CMD_CODE(_Read_RFREG), &rtw_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
+ {GEN_CMD_CODE(_Read_EEPROM), NULL},
+ {GEN_CMD_CODE(_Write_EEPROM), NULL},
+ {GEN_CMD_CODE(_Read_EFUSE), NULL},
+ {GEN_CMD_CODE(_Write_EFUSE), NULL},
+
+ {GEN_CMD_CODE(_Read_CAM), NULL}, /*10*/
+ {GEN_CMD_CODE(_Write_CAM), NULL},
+ {GEN_CMD_CODE(_setBCNITV), NULL},
+ {GEN_CMD_CODE(_setMBIDCFG), NULL},
+ {GEN_CMD_CODE(_JoinBss), &rtw_joinbss_cmd_callback}, /*14*/
+ {GEN_CMD_CODE(_DisConnect), &rtw_disassoc_cmd_callback}, /*15*/
+ {GEN_CMD_CODE(_CreateBss), &rtw_createbss_cmd_callback},
+ {GEN_CMD_CODE(_SetOpMode), NULL},
+ {GEN_CMD_CODE(_SiteSurvey), &rtw_survey_cmd_callback}, /*18*/
+ {GEN_CMD_CODE(_SetAuth), NULL},
+
+ {GEN_CMD_CODE(_SetKey), NULL}, /*20*/
+ {GEN_CMD_CODE(_SetStaKey), &rtw_setstaKey_cmdrsp_callback},
+ {GEN_CMD_CODE(_SetAssocSta), &rtw_setassocsta_cmdrsp_callback},
+ {GEN_CMD_CODE(_DelAssocSta), NULL},
+ {GEN_CMD_CODE(_SetStaPwrState), NULL},
+ {GEN_CMD_CODE(_SetBasicRate), NULL}, /*25*/
+ {GEN_CMD_CODE(_GetBasicRate), NULL},
+ {GEN_CMD_CODE(_SetDataRate), NULL},
+ {GEN_CMD_CODE(_GetDataRate), NULL},
+ {GEN_CMD_CODE(_SetPhyInfo), NULL},
+
+ {GEN_CMD_CODE(_GetPhyInfo), NULL}, /*30*/
+ {GEN_CMD_CODE(_SetPhy), NULL},
+ {GEN_CMD_CODE(_GetPhy), NULL},
+ {GEN_CMD_CODE(_readRssi), NULL},
+ {GEN_CMD_CODE(_readGain), NULL},
+ {GEN_CMD_CODE(_SetAtim), NULL}, /*35*/
+ {GEN_CMD_CODE(_SetPwrMode), NULL},
+ {GEN_CMD_CODE(_JoinbssRpt), NULL},
+ {GEN_CMD_CODE(_SetRaTable), NULL},
+ {GEN_CMD_CODE(_GetRaTable), NULL},
+
+ {GEN_CMD_CODE(_GetCCXReport), NULL}, /*40*/
+ {GEN_CMD_CODE(_GetDTMReport), NULL},
+ {GEN_CMD_CODE(_GetTXRateStatistics), NULL},
+ {GEN_CMD_CODE(_SetUsbSuspend), NULL},
+ {GEN_CMD_CODE(_SetH2cLbk), NULL},
+ {GEN_CMD_CODE(_AddBAReq), NULL}, /*45*/
+ {GEN_CMD_CODE(_SetChannel), NULL}, /*46*/
+ {GEN_CMD_CODE(_SetTxPower), NULL},
+ {GEN_CMD_CODE(_SwitchAntenna), NULL},
+ {GEN_CMD_CODE(_SetCrystalCap), NULL},
+ {GEN_CMD_CODE(_SetSingleCarrierTx), NULL}, /*50*/
+
+ {GEN_CMD_CODE(_SetSingleToneTx), NULL}, /*51*/
+ {GEN_CMD_CODE(_SetCarrierSuppressionTx), NULL},
+ {GEN_CMD_CODE(_SetContinuousTx), NULL},
+ {GEN_CMD_CODE(_SwitchBandwidth), NULL}, /*54*/
+ {GEN_CMD_CODE(_TX_Beacon), NULL},/*55*/
+
+ {GEN_CMD_CODE(_Set_MLME_EVT), NULL},/*56*/
+ {GEN_CMD_CODE(_Set_Drv_Extra), NULL},/*57*/
+ {GEN_CMD_CODE(_Set_H2C_MSG), NULL},/*58*/
+ {GEN_CMD_CODE(_SetChannelPlan), NULL},/*59*/
+ {GEN_CMD_CODE(_LedBlink), NULL},/*60*/
+
+ {GEN_CMD_CODE(_SetChannelSwitch), NULL},/*61*/
+ {GEN_CMD_CODE(_TDLS), NULL},/*62*/
+};
+#endif
+
+#endif /* _CMD_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
new file mode 100644
index 0000000..c6b193a
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -0,0 +1,290 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_DEBUG_H__
+#define __RTW_DEBUG_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+#define _drv_always_ 1
+#define _drv_emerg_ 2
+#define _drv_alert_ 3
+#define _drv_crit_ 4
+#define _drv_err_ 5
+#define _drv_warning_ 6
+#define _drv_notice_ 7
+#define _drv_info_ 8
+#define _drv_debug_ 9
+
+
+#define _module_rtl871x_xmit_c_ BIT(0)
+#define _module_xmit_osdep_c_ BIT(1)
+#define _module_rtl871x_recv_c_ BIT(2)
+#define _module_recv_osdep_c_ BIT(3)
+#define _module_rtl871x_mlme_c_ BIT(4)
+#define _module_mlme_osdep_c_ BIT(5)
+#define _module_rtl871x_sta_mgt_c_ BIT(6)
+#define _module_rtl871x_cmd_c_ BIT(7)
+#define _module_cmd_osdep_c_ BIT(8)
+#define _module_rtl871x_io_c_ BIT(9)
+#define _module_io_osdep_c_ BIT(10)
+#define _module_os_intfs_c_ BIT(11)
+#define _module_rtl871x_security_c_ BIT(12)
+#define _module_rtl871x_eeprom_c_ BIT(13)
+#define _module_hal_init_c_ BIT(14)
+#define _module_hci_hal_init_c_ BIT(15)
+#define _module_rtl871x_ioctl_c_ BIT(16)
+#define _module_rtl871x_ioctl_set_c_ BIT(17)
+#define _module_rtl871x_ioctl_query_c_ BIT(18)
+#define _module_rtl871x_pwrctrl_c_ BIT(19)
+#define _module_hci_intfs_c_ BIT(20)
+#define _module_hci_ops_c_ BIT(21)
+#define _module_osdep_service_c_ BIT(22)
+#define _module_mp_ BIT(23)
+#define _module_hci_ops_os_c_ BIT(24)
+#define _module_rtl871x_ioctl_os_c BIT(25)
+#define _module_rtl8712_cmd_c_ BIT(26)
+#define _module_rtl8192c_xmit_c_ BIT(27)
+#define _module_hal_xmit_c_ BIT(28)
+#define _module_efuse_ BIT(29)
+#define _module_rtl8712_recv_c_ BIT(30)
+#define _module_rtl8712_led_c_ BIT(31)
+
+#define DRIVER_PREFIX "R8188EU: "
+
+extern u32 GlobalDebugLevel;
+
+#define DBG_88E_LEVEL(_level, fmt, arg...) \
+ do { \
+ if (_level <= GlobalDebugLevel) \
+ pr_info(DRIVER_PREFIX"ERROR " fmt, ##arg); \
+ } while (0)
+
+#define DBG_88E(...) \
+ do { \
+ if (_drv_err_ <= GlobalDebugLevel) \
+ pr_info(DRIVER_PREFIX __VA_ARGS__); \
+ } while (0)
+
+#define MSG_88E(...) \
+ do { \
+ if (_drv_err_ <= GlobalDebugLevel) \
+ pr_info(DRIVER_PREFIX __VA_ARGS__); \
+ } while (0)
+
+#define RT_TRACE(_comp, _level, fmt) \
+ do { \
+ if (_level <= GlobalDebugLevel) { \
+ pr_info("%s [0x%08x,%d]", DRIVER_PREFIX, \
+ (unsigned int)_comp, _level); \
+ pr_info fmt; \
+ } \
+ } while (0)
+
+#define _func_enter_ \
+ do { \
+ if (GlobalDebugLevel >= _drv_debug_) \
+ pr_info("%s : %s enters at %d\n", \
+ DRIVER_PREFIX, __func__, __LINE__); \
+ } while (0)
+
+#define _func_exit_ \
+ do { \
+ if (GlobalDebugLevel >= _drv_debug_) \
+ pr_info("%s : %s exits at %d\n", \
+ DRIVER_PREFIX, __func__, __LINE__); \
+ } while (0)
+
+#define RT_PRINT_DATA(_comp, _level, _titlestring, _hexdata, _hexdatalen)\
+ do { \
+ if (_level <= GlobalDebugLevel) { \
+ int __i; \
+ u8 *ptr = (u8 *)_hexdata; \
+ pr_info("%s", DRIVER_PREFIX); \
+ pr_info(_titlestring); \
+ for (__i = 0; __i < (int)_hexdatalen; __i++ ) { \
+ pr_info("%02X%s", ptr[__i], \
+ (((__i + 1) % 4) == 0) ? \
+ " " : " "); \
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+ } \
+ } while (0)
+
+int proc_get_drv_version(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_write_reg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_write_reg(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+int proc_get_read_reg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_read_reg(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_fwstate(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_sec_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_mlmext_state(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_qos_option(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_ht_option(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_rf_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_ap_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_adapter_state(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_trx_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_mac_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_mac_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_mac_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_bb_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_bb_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_bb_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rf_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rf_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rf_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rf_reg_dump4(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+#ifdef CONFIG_88EU_AP_MODE
+
+int proc_get_all_sta_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+#endif
+
+int proc_get_best_channel(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rx_signal(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_rx_signal(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_ht_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_ht_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_cbw40_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_ampdu_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_rx_stbc(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_rx_stbc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_two_path_rssi(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rssi_disp(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_rssi_disp(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+#ifdef CONFIG_BT_COEXIST
+int proc_get_btcoex_dbg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_btcoex_dbg(struct file *file, const char *buffer,
+ signed long count, void *data);
+
+#endif /* CONFIG_BT_COEXIST */
+
+#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
new file mode 100644
index 0000000..b2672c3
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -0,0 +1,130 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_EEPROM_H__
+#define __RTW_EEPROM_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define RTL8712_EEPROM_ID 0x8712
+
+#define HWSET_MAX_SIZE_512 512
+#define EEPROM_MAX_SIZE HWSET_MAX_SIZE_512
+
+#define CLOCK_RATE 50 /* 100us */
+
+/* EEPROM opcodes */
+#define EEPROM_READ_OPCODE 06
+#define EEPROM_WRITE_OPCODE 05
+#define EEPROM_ERASE_OPCODE 07
+#define EEPROM_EWEN_OPCODE 19 /* Erase/write enable */
+#define EEPROM_EWDS_OPCODE 16 /* Erase/write disable */
+
+/* Country codes */
+#define USA 0x555320
+#define EUROPE 0x1 /* temp, should be provided later */
+#define JAPAN 0x2 /* temp, should be provided later */
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_ALPHA 0x1
+#define EEPROM_CID_Senao 0x3
+#define EEPROM_CID_NetCore 0x5
+#define EEPROM_CID_CAMEO 0X8
+#define EEPROM_CID_SITECOM 0x9
+#define EEPROM_CID_COREGA 0xB
+#define EEPROM_CID_EDIMAX_BELK 0xC
+#define EEPROM_CID_SERCOMM_BELK 0xE
+#define EEPROM_CID_CAMEO1 0xF
+#define EEPROM_CID_WNC_COREGA 0x12
+#define EEPROM_CID_CLEVO 0x13
+#define EEPROM_CID_WHQL 0xFE
+
+/* Customer ID, note that: */
+/* This variable is initiailzed through EEPROM or registry, */
+/* however, its definition may be different with that in EEPROM for */
+/* EEPROM size consideration. So, we have to perform proper translation
+ * between them. */
+/* Besides, CustomerID of registry has precedence of that of EEPROM. */
+/* defined below. 060703, by rcnjko. */
+enum RT_CUSTOMER_ID {
+ RT_CID_DEFAULT = 0,
+ RT_CID_8187_ALPHA0 = 1,
+ RT_CID_8187_SERCOMM_PS = 2,
+ RT_CID_8187_HW_LED = 3,
+ RT_CID_8187_NETGEAR = 4,
+ RT_CID_WHQL = 5,
+ RT_CID_819x_CAMEO = 6,
+ RT_CID_819x_RUNTOP = 7,
+ RT_CID_819x_Senao = 8,
+ RT_CID_TOSHIBA = 9, /* Merge by Jacken, 2008/01/31. */
+ RT_CID_819x_Netcore = 10,
+ RT_CID_Nettronix = 11,
+ RT_CID_DLINK = 12,
+ RT_CID_PRONET = 13,
+ RT_CID_COREGA = 14,
+ RT_CID_CHINA_MOBILE = 15,
+ RT_CID_819x_ALPHA = 16,
+ RT_CID_819x_Sitecom = 17,
+ RT_CID_CCX = 18, /* It's set under CCX logo test and isn't demanded
+ * for CCX functions, but for test behavior like retry
+ * limit and tx report. By Bruce, 2009-02-17. */
+ RT_CID_819x_Lenovo = 19,
+ RT_CID_819x_QMI = 20,
+ RT_CID_819x_Edimax_Belkin = 21,
+ RT_CID_819x_Sercomm_Belkin = 22,
+ RT_CID_819x_CAMEO1 = 23,
+ RT_CID_819x_MSI = 24,
+ RT_CID_819x_Acer = 25,
+ RT_CID_819x_AzWave_ASUS = 26,
+ RT_CID_819x_AzWave = 27, /* For AzWave in PCIe,i
+ * The ID is AzWave use and not only Asus */
+ RT_CID_819x_HP = 28,
+ RT_CID_819x_WNC_COREGA = 29,
+ RT_CID_819x_Arcadyan_Belkin = 30,
+ RT_CID_819x_SAMSUNG = 31,
+ RT_CID_819x_CLEVO = 32,
+ RT_CID_819x_DELL = 33,
+ RT_CID_819x_PRONETS = 34,
+ RT_CID_819x_Edimax_ASUS = 35,
+ RT_CID_819x_CAMEO_NETGEAR = 36,
+ RT_CID_PLANEX = 37,
+ RT_CID_CC_C = 38,
+ RT_CID_819x_Xavi = 39,
+ RT_CID_819x_FUNAI_TV = 40,
+ RT_CID_819x_ALPHA_WD=41,
+};
+
+struct eeprom_priv {
+ u8 bautoload_fail_flag;
+ u8 bloadfile_fail_flag;
+ u8 bloadmac_fail_flag;
+ u8 mac_addr[6]; /* PermanentAddress */
+ u16 channel_plan;
+ u8 EepromOrEfuse;
+ u8 efuse_eeprom_data[HWSET_MAX_SIZE_512];
+};
+
+void eeprom_write16(struct adapter *padapter, u16 reg, u16 data);
+u16 eeprom_read16(struct adapter *padapter, u16 reg);
+void read_eeprom_content(struct adapter *padapter);
+void eeprom_read_sz(struct adapter *adapt, u16 reg, u8 *data, u32 sz);
+void read_eeprom_content_by_attrib(struct adapter *padapter);
+
+#endif /* __RTL871X_EEPROM_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
new file mode 100644
index 0000000..cee6b5e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -0,0 +1,150 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_EFUSE_H__
+#define __RTW_EFUSE_H__
+
+#include <osdep_service.h>
+
+#define EFUSE_ERROE_HANDLE 1
+
+#define PG_STATE_HEADER 0x01
+#define PG_STATE_WORD_0 0x02
+#define PG_STATE_WORD_1 0x04
+#define PG_STATE_WORD_2 0x08
+#define PG_STATE_WORD_3 0x10
+#define PG_STATE_DATA 0x20
+
+#define PG_SWBYTE_H 0x01
+#define PG_SWBYTE_L 0x02
+
+#define PGPKT_DATA_SIZE 8
+
+#define EFUSE_WIFI 0
+#define EFUSE_BT 1
+
+enum _EFUSE_DEF_TYPE {
+ TYPE_EFUSE_MAX_SECTION = 0,
+ TYPE_EFUSE_REAL_CONTENT_LEN = 1,
+ TYPE_AVAILABLE_EFUSE_BYTES_BANK = 2,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL = 3,
+ TYPE_EFUSE_MAP_LEN = 4,
+ TYPE_EFUSE_PROTECT_BYTES_BANK = 5,
+ TYPE_EFUSE_CONTENT_LEN_BANK = 6,
+};
+
+/* E-Fuse */
+#define EFUSE_MAP_SIZE 512
+#define EFUSE_MAX_SIZE 256
+/* end of E-Fuse */
+
+#define EFUSE_MAX_MAP_LEN 512
+#define EFUSE_MAX_HW_SIZE 512
+#define EFUSE_MAX_SECTION_BASE 16
+
+#define EXT_HEADER(header) ((header & 0x1F) == 0x0F)
+#define ALL_WORDS_DISABLED(wde) ((wde & 0x0F) == 0x0F)
+#define GET_HDR_OFFSET_2_0(header) ((header & 0xE0) >> 5)
+
+#define EFUSE_REPEAT_THRESHOLD_ 3
+
+/* The following is for BT Efuse definition */
+#define EFUSE_BT_MAX_MAP_LEN 1024
+#define EFUSE_MAX_BANK 4
+#define EFUSE_MAX_BT_BANK (EFUSE_MAX_BANK-1)
+/*--------------------------Define Parameters-------------------------------*/
+#define EFUSE_MAX_WORD_UNIT 4
+
+/*------------------------------Define structure----------------------------*/
+struct pgpkt {
+ u8 offset;
+ u8 word_en;
+ u8 data[8];
+ u8 word_cnts;
+};
+
+/*------------------------------Define structure----------------------------*/
+struct efuse_hal {
+ u8 fakeEfuseBank;
+ u32 fakeEfuseUsedBytes;
+ u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE];
+ u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN];
+ u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN];
+
+ u16 BTEfuseUsedBytes;
+ u8 BTEfuseUsedPercentage;
+ u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+ u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
+ u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
+
+ u16 fakeBTEfuseUsedBytes;
+ u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+ u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
+ u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
+};
+
+/*------------------------Export global variable----------------------------*/
+extern u8 fakeEfuseBank;
+extern u32 fakeEfuseUsedBytes;
+extern u8 fakeEfuseContent[];
+extern u8 fakeEfuseInitMap[];
+extern u8 fakeEfuseModifiedMap[];
+
+extern u32 BTEfuseUsedBytes;
+extern u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+extern u8 BTEfuseInitMap[];
+extern u8 BTEfuseModifiedMap[];
+
+extern u32 fakeBTEfuseUsedBytes;
+extern u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+extern u8 fakeBTEfuseInitMap[];
+extern u8 fakeBTEfuseModifiedMap[];
+/*------------------------Export global variable----------------------------*/
+
+u8 efuse_GetCurrentSize(struct adapter *adapter, u16 *size);
+u16 efuse_GetMaxSize(struct adapter *adapter);
+u8 rtw_efuse_access(struct adapter *adapter, u8 read, u16 start_addr,
+ u16 cnts, u8 *data);
+u8 rtw_efuse_map_read(struct adapter *adapter, u16 addr, u16 cnts, u8 *data);
+u8 rtw_efuse_map_write(struct adapter *adapter, u16 addr, u16 cnts, u8 *data);
+u8 rtw_BT_efuse_map_read(struct adapter *adapter, u16 addr,
+ u16 cnts, u8 *data);
+u8 rtw_BT_efuse_map_write(struct adapter *adapter, u16 addr,
+ u16 cnts, u8 *data);
+u16 Efuse_GetCurrentSize(struct adapter *adapter, u8 efusetype, bool test);
+u8 Efuse_CalculateWordCnts(u8 word_en);
+void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf, bool test);
+void EFUSE_GetEfuseDefinition(struct adapter *adapt, u8 type, u8 type1,
+ void *out, bool bPseudoTest);
+u8 efuse_OneByteRead(struct adapter *adapter, u16 addr, u8 *data, bool test);
+u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data, bool test);
+
+void Efuse_PowerSwitch(struct adapter *adapt,u8 bWrite,u8 PwrState);
+int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data, bool test);
+int Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data,
+ bool test);
+void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
+u8 Efuse_WordEnableDataWrite(struct adapter *adapter, u16 efuse_addr,
+ u8 word_en, u8 *data, bool test);
+
+u8 EFUSE_Read1Byte(struct adapter *adapter, u16 address);
+void EFUSE_ShadowMapUpdate(struct adapter *adapter, u8 efusetype, bool test);
+void EFUSE_ShadowRead(struct adapter *adapt, u8 type, u16 offset, u32 *val);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
new file mode 100644
index 0000000..52151dc
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -0,0 +1,115 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_EVENT_H_
+#define _RTW_EVENT_H_
+
+#include <osdep_service.h>
+
+#include <wlan_bssdef.h>
+#include <linux/semaphore.h>
+#include <linux/sem.h>
+
+/*
+Used to report a bss has been scanned
+*/
+struct survey_event {
+ struct wlan_bssid_ex bss;
+};
+
+/*
+Used to report that the requested site survey has been done.
+
+bss_cnt indicates the number of bss that has been reported.
+
+
+*/
+struct surveydone_event {
+ unsigned int bss_cnt;
+
+};
+
+/*
+Used to report the link result of joinning the given bss
+
+
+join_res:
+-1: authentication fail
+-2: association fail
+> 0: TID
+
+*/
+struct joinbss_event {
+ struct wlan_network network;
+};
+
+/*
+Used to report a given STA has joinned the created BSS.
+It is used in AP/Ad-HoC(M) mode.
+*/
+
+struct stassoc_event {
+ unsigned char macaddr[6];
+ unsigned char rsvd[2];
+ int cam_id;
+};
+
+struct stadel_event {
+ unsigned char macaddr[6];
+ unsigned char rsvd[2]; /* for reason */
+ int mac_id;
+};
+
+struct addba_event {
+ unsigned int tid;
+};
+
+#define GEN_EVT_CODE(event) event ## _EVT_
+
+struct fwevent {
+ u32 parmsize;
+ void (*event_callback)(struct adapter *dev, u8 *pbuf);
+};
+
+#define C2HEVENT_SZ 32
+
+struct event_node {
+ unsigned char *node;
+ unsigned char evt_code;
+ unsigned short evt_sz;
+ int *caller_ff_tail;
+ int caller_ff_sz;
+};
+
+struct c2hevent_queue {
+ int head;
+ int tail;
+ struct event_node nodes[C2HEVENT_SZ];
+ unsigned char seq;
+};
+
+#define NETWORK_QUEUE_SZ 4
+
+struct network_queue {
+ int head;
+ int tail;
+ struct wlan_bssid_ex networks[NETWORK_QUEUE_SZ];
+};
+
+#endif /* _WLANEVENT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ht.h b/drivers/staging/rtl8188eu/include/rtw_ht.h
new file mode 100644
index 0000000..beb210b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ht.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_HT_H_
+#define _RTW_HT_H_
+
+#include <osdep_service.h>
+#include "wifi.h"
+
+struct ht_priv {
+ u32 ht_option;
+ u32 ampdu_enable;/* for enable Tx A-MPDU */
+ u32 tx_amsdu_enable;/* for enable Tx A-MSDU */
+ u32 tx_amdsu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */
+ u32 rx_ampdu_maxlen; /* for rx reordering ctrl win_sz,
+ * updated when join_callback. */
+ u8 bwmode;/* */
+ u8 ch_offset;/* PRIME_CHNL_OFFSET */
+ u8 sgi;/* short GI */
+
+ /* for processing Tx A-MPDU */
+ u8 agg_enable_bitmap;
+ u8 candidate_tid_bitmap;
+
+ struct rtw_ieee80211_ht_cap ht_cap;
+};
+
+#endif /* _RTL871X_HT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
new file mode 100644
index 0000000..eb6f0e5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
@@ -0,0 +1,387 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef _RTW_IO_H_
+#define _RTW_IO_H_
+
+#include <osdep_service.h>
+#include <osdep_intf.h>
+
+#include <asm/byteorder.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+
+#define rtw_usb_buffer_alloc(dev, size, dma) \
+ usb_alloc_coherent((dev), (size), (in_interrupt() ? \
+ GFP_ATOMIC : GFP_KERNEL), (dma))
+#define rtw_usb_buffer_free(dev, size, addr, dma) \
+ usb_free_coherent((dev), (size), (addr), (dma))
+
+#define NUM_IOREQ 8
+
+#define MAX_PROT_SZ (64-16)
+
+#define _IOREADY 0
+#define _IO_WAIT_COMPLETE 1
+#define _IO_WAIT_RSP 2
+
+/* IO COMMAND TYPE */
+#define _IOSZ_MASK_ (0x7F)
+#define _IO_WRITE_ BIT(7)
+#define _IO_FIXED_ BIT(8)
+#define _IO_BURST_ BIT(9)
+#define _IO_BYTE_ BIT(10)
+#define _IO_HW_ BIT(11)
+#define _IO_WORD_ BIT(12)
+#define _IO_SYNC_ BIT(13)
+#define _IO_CMDMASK_ (0x1F80)
+
+/*
+ For prompt mode accessing, caller shall free io_req
+ Otherwise, io_handler will free io_req
+*/
+
+/* IO STATUS TYPE */
+#define _IO_ERR_ BIT(2)
+#define _IO_SUCCESS_ BIT(1)
+#define _IO_DONE_ BIT(0)
+
+#define IO_RD32 (_IO_SYNC_ | _IO_WORD_)
+#define IO_RD16 (_IO_SYNC_ | _IO_HW_)
+#define IO_RD8 (_IO_SYNC_ | _IO_BYTE_)
+
+#define IO_RD32_ASYNC (_IO_WORD_)
+#define IO_RD16_ASYNC (_IO_HW_)
+#define IO_RD8_ASYNC (_IO_BYTE_)
+
+#define IO_WR32 (_IO_WRITE_ | _IO_SYNC_ | _IO_WORD_)
+#define IO_WR16 (_IO_WRITE_ | _IO_SYNC_ | _IO_HW_)
+#define IO_WR8 (_IO_WRITE_ | _IO_SYNC_ | _IO_BYTE_)
+
+#define IO_WR32_ASYNC (_IO_WRITE_ | _IO_WORD_)
+#define IO_WR16_ASYNC (_IO_WRITE_ | _IO_HW_)
+#define IO_WR8_ASYNC (_IO_WRITE_ | _IO_BYTE_)
+
+/*
+ Only Sync. burst accessing is provided.
+*/
+
+#define IO_WR_BURST(x) \
+ (_IO_WRITE_ | _IO_SYNC_ | _IO_BURST_ | ((x) & _IOSZ_MASK_))
+#define IO_RD_BURST(x) \
+ (_IO_SYNC_ | _IO_BURST_ | ((x) & _IOSZ_MASK_))
+
+/* below is for the intf_option bit defition... */
+
+#define _INTF_ASYNC_ BIT(0) /* support async io */
+
+struct intf_priv;
+struct intf_hdl;
+struct io_queue;
+
+struct _io_ops {
+ u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
+ u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
+ u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
+ int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length,
+ u8 *pdata);
+ int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+ void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+ void (*_sync_irp_protocol_rw)(struct io_queue *pio_q);
+ u32 (*_read_interrupt)(struct intf_hdl *pintfhdl, u32 addr);
+ u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+ u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+ u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
+ void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
+ void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
+};
+
+struct io_req {
+ struct list_head list;
+ u32 addr;
+ u32 val;
+ u32 command;
+ u32 status;
+ u8 *pbuf;
+ struct semaphore sema;
+
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req, u8 *cnxt);
+ u8 *cnxt;
+};
+
+struct intf_hdl {
+ struct adapter *padapter;
+ struct dvobj_priv *pintf_dev;
+ struct _io_ops io_ops;
+};
+
+struct reg_protocol_rd {
+#ifdef __LITTLE_ENDIAN
+ /* DW1 */
+ u32 NumOfTrans:4;
+ u32 Reserved1:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 ByteCount:7;
+ u32 WriteEnable:1; /* 0:read, 1:write */
+ u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
+ u32 BurstMode:1;
+ u32 Byte1Access:1;
+ u32 Byte2Access:1;
+ u32 Byte4Access:1;
+ u32 Reserved3:3;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ /* u32 Value; */
+#else
+/* DW1 */
+ u32 Reserved1:4;
+ u32 NumOfTrans:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 WriteEnable:1;
+ u32 ByteCount:7;
+ u32 Reserved3:3;
+ u32 Byte4Access:1;
+
+ u32 Byte2Access:1;
+ u32 Byte1Access:1;
+ u32 BurstMode:1;
+ u32 FixOrContinuous:1;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+
+ /* DW4 */
+#endif
+};
+
+struct reg_protocol_wt {
+#ifdef __LITTLE_ENDIAN
+ /* DW1 */
+ u32 NumOfTrans:4;
+ u32 Reserved1:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 ByteCount:7;
+ u32 WriteEnable:1; /* 0:read, 1:write */
+ u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
+ u32 BurstMode:1;
+ u32 Byte1Access:1;
+ u32 Byte2Access:1;
+ u32 Byte4Access:1;
+ u32 Reserved3:3;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ u32 Value;
+#else
+ /* DW1 */
+ u32 Reserved1 :4;
+ u32 NumOfTrans:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 WriteEnable:1;
+ u32 ByteCount:7;
+ u32 Reserved3:3;
+ u32 Byte4Access:1;
+ u32 Byte2Access:1;
+ u32 Byte1Access:1;
+ u32 BurstMode:1;
+ u32 FixOrContinuous:1;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ u32 Value;
+#endif
+};
+
+/*
+Below is the data structure used by _io_handler
+*/
+
+struct io_queue {
+ spinlock_t lock;
+ struct list_head free_ioreqs;
+ struct list_head pending; /* The io_req list that will be served
+ * in the single protocol read/write.*/
+ struct list_head processing;
+ u8 *free_ioreqs_buf; /* 4-byte aligned */
+ u8 *pallocated_free_ioreqs_buf;
+ struct intf_hdl intf;
+};
+
+struct io_priv {
+ struct adapter *padapter;
+ struct intf_hdl intf;
+};
+
+uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
+void sync_ioreq_enqueue(struct io_req *preq,struct io_queue *ioqueue);
+uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
+uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
+struct io_req *alloc_ioreq(struct io_queue *pio_q);
+
+uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl);
+void unregister_intf_hdl(struct intf_hdl *pintfhdl);
+
+void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+u8 _rtw_read8(struct adapter *adapter, u32 addr);
+u16 _rtw_read16(struct adapter *adapter, u32 addr);
+u32 _rtw_read32(struct adapter *adapter, u32 addr);
+void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void _rtw_read_port_cancel(struct adapter *adapter);
+
+int _rtw_write8(struct adapter *adapter, u32 addr, u8 val);
+int _rtw_write16(struct adapter *adapter, u32 addr, u16 val);
+int _rtw_write32(struct adapter *adapter, u32 addr, u32 val);
+int _rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata);
+
+int _rtw_write8_async(struct adapter *adapter, u32 addr, u8 val);
+int _rtw_write16_async(struct adapter *adapter, u32 addr, u16 val);
+int _rtw_write32_async(struct adapter *adapter, u32 addr, u32 val);
+
+void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 _rtw_write_port_and_wait(struct adapter *adapter, u32 addr, u32 cnt,
+ u8 *pmem, int timeout_ms);
+void _rtw_write_port_cancel(struct adapter *adapter);
+
+#define rtw_read8(adapter, addr) _rtw_read8((adapter), (addr))
+#define rtw_read16(adapter, addr) _rtw_read16((adapter), (addr))
+#define rtw_read32(adapter, addr) _rtw_read32((adapter), (addr))
+#define rtw_read_mem(adapter, addr, cnt, mem) \
+ _rtw_read_mem((adapter), (addr), (cnt), (mem))
+#define rtw_read_port(adapter, addr, cnt, mem) \
+ _rtw_read_port((adapter), (addr), (cnt), (mem))
+#define rtw_read_port_cancel(adapter) _rtw_read_port_cancel((adapter))
+
+#define rtw_write8(adapter, addr, val) \
+ _rtw_write8((adapter), (addr), (val))
+#define rtw_write16(adapter, addr, val) \
+ _rtw_write16((adapter), (addr), (val))
+#define rtw_write32(adapter, addr, val) \
+ _rtw_write32((adapter), (addr), (val))
+#define rtw_writeN(adapter, addr, length, data) \
+ _rtw_writeN((adapter), (addr), (length), (data))
+#define rtw_write8_async(adapter, addr, val) \
+ _rtw_write8_async((adapter), (addr), (val))
+#define rtw_write16_async(adapter, addr, val) \
+ _rtw_write16_async((adapter), (addr), (val))
+#define rtw_write32_async(adapter, addr, val) \
+ _rtw_write32_async((adapter), (addr), (val))
+#define rtw_write_mem(adapter, addr, cnt, mem) \
+ _rtw_write_mem((adapter), (addr), (cnt), (mem))
+#define rtw_write_port(adapter, addr, cnt, mem) \
+ _rtw_write_port((adapter), (addr), (cnt), (mem))
+#define rtw_write_port_and_wait(adapter, addr, cnt, mem, timeout_ms) \
+ _rtw_write_port_and_wait((adapter), (addr), (cnt), (mem), (timeout_ms))
+#define rtw_write_port_cancel(adapter) _rtw_write_port_cancel((adapter))
+
+void rtw_write_scsi(struct adapter *adapter, u32 cnt, u8 *pmem);
+
+/* ioreq */
+void ioreq_read8(struct adapter *adapter, u32 addr, u8 *pval);
+void ioreq_read16(struct adapter *adapter, u32 addr, u16 *pval);
+void ioreq_read32(struct adapter *adapter, u32 addr, u32 *pval);
+void ioreq_write8(struct adapter *adapter, u32 addr, u8 val);
+void ioreq_write16(struct adapter *adapter, u32 addr, u16 val);
+void ioreq_write32(struct adapter *adapter, u32 addr, u32 val);
+
+uint async_read8(struct adapter *adapter, u32 addr, u8 *pbuff,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+uint async_read16(struct adapter *adapter, u32 addr, u8 *pbuff,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+uint async_read32(struct adapter *adapter, u32 addr, u8 *pbuff,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+
+void async_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void async_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+void async_write8(struct adapter *adapter, u32 addr, u8 val,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+void async_write16(struct adapter *adapter, u32 addr, u16 val,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+void async_write32(struct adapter *adapter, u32 addr, u32 val,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+
+void async_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void async_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+int rtw_init_io_priv(struct adapter *padapter,
+ void (*set_intf_ops)(struct _io_ops *pops));
+
+uint alloc_io_queue(struct adapter *adapter);
+void free_io_queue(struct adapter *adapter);
+void async_bus_io(struct io_queue *pio_q);
+void bus_sync_io(struct io_queue *pio_q);
+u32 _ioreq2rwmem(struct io_queue *pio_q);
+void dev_power_down(struct adapter * Adapter, u8 bpwrup);
+
+#define PlatformEFIOWrite1Byte(_a,_b,_c) \
+ rtw_write8(_a,_b,_c)
+#define PlatformEFIOWrite2Byte(_a,_b,_c) \
+ rtw_write16(_a,_b,_c)
+#define PlatformEFIOWrite4Byte(_a,_b,_c) \
+ rtw_write32(_a,_b,_c)
+
+#define PlatformEFIORead1Byte(_a,_b) \
+ rtw_read8(_a,_b)
+#define PlatformEFIORead2Byte(_a,_b) \
+ rtw_read16(_a,_b)
+#define PlatformEFIORead4Byte(_a,_b) \
+ rtw_read32(_a,_b)
+
+#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
new file mode 100644
index 0000000..8772d1d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -0,0 +1,124 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_IOCTL_H_
+#define _RTW_IOCTL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+#ifndef OID_802_11_CAPABILITY
+ #define OID_802_11_CAPABILITY 0x0d010122
+#endif
+
+#ifndef OID_802_11_PMKID
+ #define OID_802_11_PMKID 0x0d010123
+#endif
+
+
+/* For DDK-defined OIDs */
+#define OID_NDIS_SEG1 0x00010100
+#define OID_NDIS_SEG2 0x00010200
+#define OID_NDIS_SEG3 0x00020100
+#define OID_NDIS_SEG4 0x01010100
+#define OID_NDIS_SEG5 0x01020100
+#define OID_NDIS_SEG6 0x01020200
+#define OID_NDIS_SEG7 0xFD010100
+#define OID_NDIS_SEG8 0x0D010100
+#define OID_NDIS_SEG9 0x0D010200
+#define OID_NDIS_SEG10 0x0D020200
+
+#define SZ_OID_NDIS_SEG1 23
+#define SZ_OID_NDIS_SEG2 3
+#define SZ_OID_NDIS_SEG3 6
+#define SZ_OID_NDIS_SEG4 6
+#define SZ_OID_NDIS_SEG5 4
+#define SZ_OID_NDIS_SEG6 8
+#define SZ_OID_NDIS_SEG7 7
+#define SZ_OID_NDIS_SEG8 36
+#define SZ_OID_NDIS_SEG9 24
+#define SZ_OID_NDIS_SEG10 19
+
+/* For Realtek-defined OIDs */
+#define OID_MP_SEG1 0xFF871100
+#define OID_MP_SEG2 0xFF818000
+
+#define OID_MP_SEG3 0xFF818700
+#define OID_MP_SEG4 0xFF011100
+
+#define DEBUG_OID(dbg, str) \
+ if ((!dbg)) { \
+ RT_TRACE(_module_rtl871x_ioctl_c_, _drv_info_, \
+ ("%s(%d): %s", __func__, __line__, str)); \
+ }
+
+enum oid_type {
+ QUERY_OID,
+ SET_OID
+};
+
+struct oid_funs_node {
+ unsigned int oid_start; /* the starting number for OID */
+ unsigned int oid_end; /* the ending number for OID */
+ struct oid_obj_priv *node_array;
+ unsigned int array_sz; /* the size of node_array */
+ int query_counter; /* count the number of query hits for this segment */
+ int set_counter; /* count the number of set hits for this segment */
+};
+
+struct oid_par_priv {
+ void *adapter_context;
+ NDIS_OID oid;
+ void *information_buf;
+ u32 information_buf_len;
+ u32 *bytes_rw;
+ u32 *bytes_needed;
+ enum oid_type type_of_oid;
+ u32 dbg;
+};
+
+struct oid_obj_priv {
+ unsigned char dbg; /* 0: without OID debug message
+ * 1: with OID debug message */
+ int (*oidfuns)(struct oid_par_priv *poid_par_priv);
+};
+
+#if defined(_RTW_MP_IOCTL_C_)
+static int oid_null_function(struct oid_par_priv *poid_par_priv) {
+ _func_enter_;
+ _func_exit_;
+ return NDIS_STATUS_SUCCESS;
+}
+#endif
+
+extern struct iw_handler_def rtw_handlers_def;
+
+int drv_query_info(struct net_device *miniportadaptercontext, NDIS_OID oid,
+ void *informationbuffer, u32 informationbufferlength,
+ u32 *byteswritten, u32 *bytesneeded);
+
+int drv_set_info(struct net_device *MiniportAdapterContext,
+ NDIS_OID oid, void *informationbuffer,
+ u32 informationbufferlength, u32 *bytesread,
+ u32 *bytesneeded);
+
+extern int ui_pid[3];
+
+#endif /* #ifndef __INC_CEINFO_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
new file mode 100644
index 0000000..8fa3858
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
@@ -0,0 +1,79 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_IOCTL_RTL_H_
+#define _RTW_IOCTL_RTL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+/* oid_rtl_seg_01_01 ************** */
+int oid_rt_get_signal_quality_hdl(struct oid_par_priv *poid_par_priv);/* 84 */
+int oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_large_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_tx_retry_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_rx_retry_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_rx_total_packet_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_tx_beacon_ok_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_tx_beacon_err_hdl(struct oid_par_priv *poid_par_priv);
+
+int oid_rt_pro_set_fw_dig_state_hdl(struct oid_par_priv *poid_par_priv);/* 8a */
+int oid_rt_pro_set_fw_ra_state_hdl(struct oid_par_priv *poid_par_priv); /* 8b */
+
+int oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv);/* 93 */
+int oid_rt_set_encryption_algorithm_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_ap_ip_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_set_channelplan_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_set_preamble_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_set_bcn_intvl_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_dedicate_probe_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_current_tx_power_level_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_enc_key_mismatch_count_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_enc_key_match_count_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_hardware_radio_off_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_key_mismatch_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_supported_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_channel_list_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_scan_in_progress_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_forced_data_rate_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_wireless_mode_for_scan_list_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_bss_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_scan_with_magic_packet_hdl(struct oid_par_priv *poid_par_priv);
+
+/* oid_rtl_seg_01_03 section start ************** */
+int oid_rt_ap_get_associated_station_list_hdl(struct oid_par_priv *priv);
+int oid_rt_ap_switch_into_ap_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_ap_supported_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_ap_set_passphrase_hdl(struct oid_par_priv *poid_par_priv);
+
+/* oid_rtl_seg_01_11 */
+int oid_rt_pro_rf_write_registry_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv);
+
+/* oid_rtl_seg_03_00 section start ************** */
+int oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_set_default_key_id_hdl(struct oid_par_priv *poid_par_priv);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
new file mode 100644
index 0000000..49efb23
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
@@ -0,0 +1,50 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_IOCTL_SET_H_
+#define __RTW_IOCTL_SET_H_
+
+#include <drv_types.h>
+
+
+typedef u8 NDIS_802_11_PMKID_VALUE[16];
+
+u8 rtw_set_802_11_add_key(struct adapter *adapt, struct ndis_802_11_key *key);
+u8 rtw_set_802_11_authentication_mode(struct adapter *adapt,
+ enum ndis_802_11_auth_mode authmode);
+u8 rtw_set_802_11_bssid(struct adapter*adapter, u8 *bssid);
+u8 rtw_set_802_11_add_wep(struct adapter *adapter, struct ndis_802_11_wep *wep);
+u8 rtw_set_802_11_disassociate(struct adapter *adapter);
+u8 rtw_set_802_11_bssid_list_scan(struct adapter*adapter,
+ struct ndis_802_11_ssid *pssid,
+ int ssid_max_num);
+u8 rtw_set_802_11_infrastructure_mode(struct adapter *adapter,
+ enum ndis_802_11_network_infra type);
+u8 rtw_set_802_11_remove_wep(struct adapter *adapter, u32 keyindex);
+u8 rtw_set_802_11_ssid(struct adapter *adapt, struct ndis_802_11_ssid *ssid);
+u8 rtw_set_802_11_remove_key(struct adapter *adapt,
+ struct ndis_802_11_remove_key *key);
+u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid);
+u16 rtw_get_cur_max_rate(struct adapter *adapter);
+int rtw_set_scan_mode(struct adapter *adapter, enum rt_scan_type scan_mode);
+int rtw_set_channel_plan(struct adapter *adapter, u8 channel_plan);
+int rtw_set_country(struct adapter *adapter, const char *country_code);
+int rtw_change_ifname(struct adapter *padapter, const char *ifname);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_iol.h b/drivers/staging/rtl8188eu/include/rtw_iol.h
new file mode 100644
index 0000000..6949922
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_iol.h
@@ -0,0 +1,84 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_IOL_H_
+#define __RTW_IOL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define IOREG_CMD_END_LEN 4
+
+struct ioreg_cfg {
+ u8 length;
+ u8 cmd_id;
+ __le16 address;
+ __le32 data;
+ __le32 mask;
+};
+
+enum ioreg_cmd {
+ IOREG_CMD_LLT = 0x01,
+ IOREG_CMD_REFUSE = 0x02,
+ IOREG_CMD_EFUSE_PATH = 0x03,
+ IOREG_CMD_WB_REG = 0x04,
+ IOREG_CMD_WW_REG = 0x05,
+ IOREG_CMD_WD_REG = 0x06,
+ IOREG_CMD_W_RF = 0x07,
+ IOREG_CMD_DELAY_US = 0x10,
+ IOREG_CMD_DELAY_MS = 0x11,
+ IOREG_CMD_END = 0xFF,
+};
+
+struct xmit_frame *rtw_IOL_accquire_xmit_frame(struct adapter *adapter);
+int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds,
+ u32 cmd_len);
+int rtw_IOL_append_LLT_cmd(struct xmit_frame *xmit_frame, u8 page_boundary);
+int rtw_IOL_exec_cmds_sync(struct adapter *adapter,
+ struct xmit_frame *xmit_frame, u32 max_wating_ms,
+ u32 bndy_cnt);
+bool rtw_IOL_applied(struct adapter *adapter);
+int rtw_IOL_append_DELAY_US_cmd(struct xmit_frame *xmit_frame, u16 us);
+int rtw_IOL_append_DELAY_MS_cmd(struct xmit_frame *xmit_frame, u16 ms);
+int rtw_IOL_append_END_cmd(struct xmit_frame *xmit_frame);
+
+void read_efuse_from_txpktbuf(struct adapter *adapter, int bcnhead,
+ u8 *content, u16 *size);
+
+int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr,
+ u8 value, u8 mask);
+int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr,
+ u16 value, u16 mask);
+int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr,
+ u32 value, u32 mask);
+int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path,
+ u16 addr, u32 value, u32 mask);
+#define rtw_IOL_append_WB_cmd(xmit_frame, addr, value, mask) \
+ _rtw_IOL_append_WB_cmd((xmit_frame), (addr), (value) ,(mask))
+#define rtw_IOL_append_WW_cmd(xmit_frame, addr, value, mask) \
+ _rtw_IOL_append_WW_cmd((xmit_frame), (addr), (value),(mask))
+#define rtw_IOL_append_WD_cmd(xmit_frame, addr, value, mask) \
+ _rtw_IOL_append_WD_cmd((xmit_frame), (addr), (value), (mask))
+#define rtw_IOL_append_WRF_cmd(xmit_frame, rf_path, addr, value, mask) \
+ _rtw_IOL_append_WRF_cmd((xmit_frame),(rf_path), (addr), (value), (mask))
+
+u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame);
+void rtw_IOL_cmd_buf_dump(struct adapter *Adapter,int buf_len,u8 *pbuf);
+
+#endif /* __RTW_IOL_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
new file mode 100644
index 0000000..2e61804
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_LED_H_
+#define __RTW_LED_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
+
+#define LED_BLINK_NORMAL_INTERVAL 100
+#define LED_BLINK_SLOWLY_INTERVAL 200
+#define LED_BLINK_LONG_INTERVAL 400
+
+#define LED_BLINK_NO_LINK_INTERVAL_ALPHA 1000
+#define LED_BLINK_LINK_INTERVAL_ALPHA 500 /* 500 */
+#define LED_BLINK_SCAN_INTERVAL_ALPHA 180 /* 150 */
+#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
+#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
+
+#define LED_BLINK_NORMAL_INTERVAL_NETTRONIX 100
+#define LED_BLINK_SLOWLY_INTERVAL_NETTRONIX 2000
+
+#define LED_BLINK_SLOWLY_INTERVAL_PORNET 1000
+#define LED_BLINK_NORMAL_INTERVAL_PORNET 100
+
+#define LED_BLINK_FAST_INTERVAL_BITLAND 30
+
+/* 060403, rcnjko: Customized for AzWave. */
+#define LED_CM2_BLINK_ON_INTERVAL 250
+#define LED_CM2_BLINK_OFF_INTERVAL 4750
+
+#define LED_CM8_BLINK_INTERVAL 500 /* for QMI */
+#define LED_CM8_BLINK_OFF_INTERVAL 3750 /* for QMI */
+
+/* 080124, lanhsin: Customized for RunTop */
+#define LED_RunTop_BLINK_INTERVAL 300
+
+/* 060421, rcnjko: Customized for Sercomm Printer Server case. */
+#define LED_CM3_BLINK_INTERVAL 1500
+
+enum LED_CTL_MODE {
+ LED_CTL_POWER_ON = 1,
+ LED_CTL_LINK = 2,
+ LED_CTL_NO_LINK = 3,
+ LED_CTL_TX = 4,
+ LED_CTL_RX = 5,
+ LED_CTL_SITE_SURVEY = 6,
+ LED_CTL_POWER_OFF = 7,
+ LED_CTL_START_TO_LINK = 8,
+ LED_CTL_START_WPS = 9,
+ LED_CTL_STOP_WPS = 10,
+ LED_CTL_START_WPS_BOTTON = 11, /* added for runtop */
+ LED_CTL_STOP_WPS_FAIL = 12, /* added for ALPHA */
+ LED_CTL_STOP_WPS_FAIL_OVERLAP = 13, /* added for BELKIN */
+ LED_CTL_CONNECTION_NO_TRANSFER = 14,
+};
+
+enum LED_STATE_871x {
+ LED_UNKNOWN = 0,
+ RTW_LED_ON = 1,
+ RTW_LED_OFF = 2,
+ LED_BLINK_NORMAL = 3,
+ LED_BLINK_SLOWLY = 4,
+ LED_BLINK_POWER_ON = 5,
+ LED_BLINK_SCAN = 6, /* LED is blinking during scanning period,
+ * the # of times to blink is depend on time
+ * for scanning. */
+ LED_BLINK_NO_LINK = 7, /* LED is blinking during no link state. */
+ LED_BLINK_StartToBlink = 8,/* Customzied for Sercomm Printer
+ * Server case */
+ LED_BLINK_TXRX = 9,
+ LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
+ LED_BLINK_WPS_STOP = 11, /* for ALPHA */
+ LED_BLINK_WPS_STOP_OVERLAP = 12, /* for BELKIN */
+ LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
+ LED_BLINK_CAMEO = 14,
+ LED_BLINK_XAVI = 15,
+ LED_BLINK_ALWAYS_ON = 16,
+};
+
+enum LED_PIN_871x {
+ LED_PIN_NULL = 0,
+ LED_PIN_LED0 = 1,
+ LED_PIN_LED1 = 2,
+ LED_PIN_LED2 = 3,
+ LED_PIN_GPIO0 = 4,
+};
+
+struct LED_871x {
+ struct adapter *padapter;
+
+ enum LED_PIN_871x LedPin; /* Identify how to implement this
+ * SW led. */
+ enum LED_STATE_871x CurrLedState; /* Current LED state. */
+ enum LED_STATE_871x BlinkingLedState; /* Next state for blinking,
+ * either RTW_LED_ON or RTW_LED_OFF are. */
+
+ u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
+
+ u8 bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
+
+ u8 bLedWPSBlinkInProgress;
+
+ u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
+
+ struct timer_list BlinkTimer; /* Timer object for led blinking. */
+
+ u8 bSWLedCtrl;
+
+ /* ALPHA, added by chiyoko, 20090106 */
+ u8 bLedNoLinkBlinkInProgress;
+ u8 bLedLinkBlinkInProgress;
+ u8 bLedStartToLinkBlinkInProgress;
+ u8 bLedScanBlinkInProgress;
+ struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
+ * manipulate H/W to blink LED. */
+};
+
+#define IS_LED_WPS_BLINKING(_LED_871x) \
+ (((struct LED_871x *)_LED_871x)->CurrLedState == LED_BLINK_WPS || \
+ ((struct LED_871x *)_LED_871x)->CurrLedState == LED_BLINK_WPS_STOP || \
+ ((struct LED_871x *)_LED_871x)->bLedWPSBlinkInProgress)
+
+#define IS_LED_BLINKING(_LED_871x) \
+ (((struct LED_871x *)_LED_871x)->bLedWPSBlinkInProgress || \
+ ((struct LED_871x *)_LED_871x)->bLedScanBlinkInProgress)
+
+/* LED customization. */
+
+enum LED_STRATEGY_871x {
+ SW_LED_MODE0 = 0, /* SW control 1 LED via GPIO0. It is default option.*/
+ SW_LED_MODE1= 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
+ SW_LED_MODE2 = 2, /* SW control 1 LED via GPIO0, customized for AzWave
+ * 8187 minicard. */
+ SW_LED_MODE3 = 3, /* SW control 1 LED via GPIO0, customized for Sercomm
+ * Printer Server case. */
+ SW_LED_MODE4 = 4, /* for Edimax / Belkin */
+ SW_LED_MODE5 = 5, /* for Sercomm / Belkin */
+ SW_LED_MODE6 = 6, /* for 88CU minicard, porting from ce SW_LED_MODE7 */
+ HW_LED = 50, /* HW control 2 LEDs, LED0 and LED1 (there are 4
+ * different control modes, see MAC.CONFIG1 for details.)*/
+ LED_ST_NONE = 99,
+};
+
+void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
+
+struct led_priv{
+ /* add for led controll */
+ struct LED_871x SwLed0;
+ struct LED_871x SwLed1;
+ enum LED_STRATEGY_871x LedStrategy;
+ u8 bRegUseLed;
+ void (*LedControlHandler)(struct adapter *padapter,
+ enum LED_CTL_MODE LedAction);
+ /* add for led controll */
+};
+
+#define rtw_led_control(adapt, action) \
+ do { \
+ if ((adapt)->ledpriv.LedControlHandler) \
+ (adapt)->ledpriv.LedControlHandler((adapt), (action)); \
+ } while (0)
+
+void BlinkTimerCallback(void *data);
+void BlinkWorkItemCallback(struct work_struct *work);
+
+void ResetLedStatus(struct LED_871x * pLed);
+
+void InitLed871x(struct adapter *padapter, struct LED_871x *pLed,
+ enum LED_PIN_871x LedPin);
+
+void DeInitLed871x(struct LED_871x *pLed);
+
+/* hal... */
+void BlinkHandler(struct LED_871x * pLed);
+void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
+void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
+
+#endif /* __RTW_LED_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
new file mode 100644
index 0000000..22538e6
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -0,0 +1,655 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_MLME_H_
+#define __RTW_MLME_H_
+
+#include <osdep_service.h>
+#include <mlme_osdep.h>
+#include <drv_types.h>
+#include <wlan_bssdef.h>
+
+#define MAX_BSS_CNT 128
+#define MAX_JOIN_TIMEOUT 6500
+
+/* Increase the scanning timeout because of increasing the SURVEY_TO value. */
+
+#define SCANNING_TIMEOUT 8000
+
+#define SCAN_INTERVAL (30) /* unit:2sec, 30*2=60sec */
+
+#define SCANQUEUE_LIFETIME 20 /* unit:sec */
+
+#define WIFI_NULL_STATE 0x00000000
+
+#define WIFI_ASOC_STATE 0x00000001 /* Under Linked state */
+#define WIFI_REASOC_STATE 0x00000002
+#define WIFI_SLEEP_STATE 0x00000004
+#define WIFI_STATION_STATE 0x00000008
+
+#define WIFI_AP_STATE 0x00000010
+#define WIFI_ADHOC_STATE 0x00000020
+#define WIFI_ADHOC_MASTER_STATE 0x00000040
+#define WIFI_UNDER_LINKING 0x00000080
+
+#define WIFI_UNDER_WPS 0x00000100
+#define WIFI_STA_ALIVE_CHK_STATE 0x00000400
+#define WIFI_SITE_MONITOR 0x00000800 /* to indicate the station is under site surveying */
+
+#define WIFI_MP_STATE 0x00010000
+#define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in continous tx background */
+#define WIFI_MP_CTX_ST 0x00040000 /* in continous tx with single-tone */
+#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in continous tx background due to out of skb */
+#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continous tx */
+#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in continous tx with carrier suppression */
+#define WIFI_MP_LPBK_STATE 0x00400000
+
+#define _FW_UNDER_LINKING WIFI_UNDER_LINKING
+#define _FW_LINKED WIFI_ASOC_STATE
+#define _FW_UNDER_SURVEY WIFI_SITE_MONITOR
+
+enum dot11AuthAlgrthmNum {
+ dot11AuthAlgrthm_Open = 0,
+ dot11AuthAlgrthm_Shared,
+ dot11AuthAlgrthm_8021X,
+ dot11AuthAlgrthm_Auto,
+ dot11AuthAlgrthm_WAPI,
+ dot11AuthAlgrthm_MaxNum
+};
+
+/* Scan type including active and passive scan. */
+enum rt_scan_type {
+ SCAN_PASSIVE,
+ SCAN_ACTIVE,
+ SCAN_MIX,
+};
+
+enum SCAN_RESULT_TYPE {
+ SCAN_RESULT_P2P_ONLY = 0, /* Will return all the P2P devices. */
+ SCAN_RESULT_ALL = 1, /* Will return all the scanned device,
+ * include AP. */
+ SCAN_RESULT_WFD_TYPE = 2 /* Will just return the correct WFD
+ * device. */
+ /* If this device is Miracast sink
+ * device, it will just return all the
+ * Miracast source devices. */
+};
+
+/*
+there are several "locks" in mlme_priv,
+since mlme_priv is a shared resource between many threads,
+like ISR/Call-Back functions, the OID handlers, and even timer functions.
+
+Each _queue has its own locks, already.
+Other items are protected by mlme_priv.lock.
+
+To avoid possible dead lock, any thread trying to modifiying mlme_priv
+SHALL not lock up more than one lock at a time!
+*/
+
+#define traffic_threshold 10
+#define traffic_scan_period 500
+
+struct sitesurvey_ctrl {
+ u64 last_tx_pkts;
+ uint last_rx_pkts;
+ int traffic_busy;
+ struct timer_list sitesurvey_ctrl_timer;
+};
+
+struct rt_link_detect {
+ u32 NumTxOkInPeriod;
+ u32 NumRxOkInPeriod;
+ u32 NumRxUnicastOkInPeriod;
+ bool bBusyTraffic;
+ bool bTxBusyTraffic;
+ bool bRxBusyTraffic;
+ bool bHigherBusyTraffic; /* For interrupt migration purpose. */
+ bool bHigherBusyRxTraffic; /* We may disable Tx interrupt according
+ * to Rx traffic. */
+ bool bHigherBusyTxTraffic; /* We may disable Tx interrupt according
+ * to Tx traffic. */
+};
+
+struct profile_info {
+ u8 ssidlen;
+ u8 ssid[ WLAN_SSID_MAXLEN ];
+ u8 peermac[ ETH_ALEN ];
+};
+
+struct tx_invite_req_info {
+ u8 token;
+ u8 benable;
+ u8 go_ssid[ WLAN_SSID_MAXLEN ];
+ u8 ssidlen;
+ u8 go_bssid[ ETH_ALEN ];
+ u8 peer_macaddr[ ETH_ALEN ];
+ u8 operating_ch; /* This information will be set by using the
+ * p2p_set op_ch=x */
+ u8 peer_ch; /* The listen channel for peer P2P device */
+};
+
+struct tx_invite_resp_info {
+ u8 token; /* Used to record the dialog token of p2p invitation
+ * request frame. */
+};
+
+struct tx_provdisc_req_info {
+ u16 wps_config_method_request; /* Used when sending the
+ * provisioning request frame*/
+ u16 peer_channel_num[2]; /* The channel number which the
+ * receiver stands. */
+ struct ndis_802_11_ssid ssid;
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
+ u8 peerIFAddr[ETH_ALEN]; /* Peer interface address */
+ u8 benable; /* This provision discovery
+ * request frame is trigger
+ * to send or not */
+};
+
+/* When peer device issue prov_disc_req first, we should store the following
+ * information */
+/* The UI must know this information to know which config method the
+ * remote p2p device needs. */
+struct rx_provdisc_req_info {
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
+ u8 strconfig_method_desc_of_prov_disc_req[4]; /* description
+ * for the config method located in the provisioning
+ * discovery request frame. */
+};
+
+struct tx_nego_req_info {
+ u16 peer_channel_num[2]; /* The channel number. */
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
+ u8 benable; /* This negotiation request frame is
+ * trigger to send or not */
+};
+
+struct group_id_info {
+ u8 go_device_addr[ ETH_ALEN ]; /* The GO's device address of
+ * this P2P group */
+ u8 ssid[ WLAN_SSID_MAXLEN ]; /* The SSID of this P2P group */
+};
+
+struct scan_limit_info {
+ u8 scan_op_ch_only; /* When this flag is set, the driver
+ * should only scan the op. channel */
+ u8 operation_ch[2]; /* Store the op. chan of invitation */
+};
+
+struct wifidirect_info {
+ struct adapter *padapter;
+ struct timer_list find_phase_timer;
+ struct timer_list restore_p2p_state_timer;
+
+ /* Used to do the scanning. After confirming the peer is availalble,
+ * the driver transmits the P2P frame to peer. */
+ struct timer_list pre_tx_scan_timer;
+ struct timer_list reset_ch_sitesurvey;
+ struct timer_list reset_ch_sitesurvey2; /* Just for resetting the scan
+ * limit function by using p2p nego */
+ struct tx_provdisc_req_info tx_prov_disc_info;
+ struct rx_provdisc_req_info rx_prov_disc_info;
+ struct tx_invite_req_info invitereq_info;
+ /* Store the profile information of persistent group */
+ struct profile_info profileinfo[P2P_MAX_PERSISTENT_GROUP_NUM];
+ struct tx_invite_resp_info inviteresp_info;
+ struct tx_nego_req_info nego_req_info;
+ /* Store the group id info when doing the group negot handshake. */
+ struct group_id_info groupid_info;
+ /* Used for get the limit scan channel from the Invitation procedure */
+ struct scan_limit_info rx_invitereq_info;
+ /* Used for get the limit scan chan from the P2P negotiation handshake*/
+ struct scan_limit_info p2p_info;
+ enum P2P_ROLE role;
+ enum P2P_STATE pre_p2p_state;
+ enum P2P_STATE p2p_state;
+ /* The device address should be the mac address of this device. */
+ u8 device_addr[ETH_ALEN];
+ u8 interface_addr[ETH_ALEN];
+ u8 social_chan[4];
+ u8 listen_channel;
+ u8 operating_channel;
+ u8 listen_dwell; /* This value should be between 1 and 3 */
+ u8 support_rate[8];
+ u8 p2p_wildcard_ssid[P2P_WILDCARD_SSID_LEN];
+ u8 intent; /* should only include the intent value. */
+ u8 p2p_peer_interface_addr[ETH_ALEN];
+ u8 p2p_peer_device_addr[ETH_ALEN];
+ u8 peer_intent; /* Included the intent value and tie breaker value. */
+ /* Device name for displaying on searching device screen */
+ u8 device_name[WPS_MAX_DEVICE_NAME_LEN];
+ u8 device_name_len;
+ u8 profileindex; /* Used to point to the index of profileinfo array */
+ u8 peer_operating_ch;
+ u8 find_phase_state_exchange_cnt;
+ /* The device password ID for group negotation */
+ u16 device_password_id_for_nego;
+ u8 negotiation_dialog_token;
+ /* SSID information for group negotitation */
+ u8 nego_ssid[WLAN_SSID_MAXLEN];
+ u8 nego_ssidlen;
+ u8 p2p_group_ssid[WLAN_SSID_MAXLEN];
+ u8 p2p_group_ssid_len;
+ /* Flag to know if the persistent function should be supported or not.*/
+ u8 persistent_supported;
+ /* In the Sigma test, the Sigma will provide this enable from the
+ * sta_set_p2p CAPI. */
+ /* 0: disable */
+ /* 1: enable */
+ u8 session_available; /* Flag to set the WFD session available to
+ * enable or disable "by Sigma" */
+ /* In the Sigma test, the Sigma will disable the session available
+ * by using the sta_preset CAPI. */
+ /* 0: disable */
+ /* 1: enable */
+ u8 wfd_tdls_enable; /* Flag to enable or disable the TDLS by WFD Sigma*/
+ /* 0: disable */
+ /* 1: enable */
+ u8 wfd_tdls_weaksec; /* Flag to enable or disable the weak security
+ * function for TDLS by WFD Sigma */
+ /* 0: disable */
+ /* In this case, the driver can't issue the tdsl
+ * setup request frame. */
+ /* 1: enable */
+ /* In this case, the driver can issue the tdls
+ * setup request frame */
+ /* even the current security is weak security. */
+
+ /* This field will store the WPS value (PIN value or PBC) that UI had
+ * got from the user. */
+ enum P2P_WPSINFO ui_got_wps_info;
+ u16 supported_wps_cm; /* This field describes the WPS config method
+ * which this driver supported. */
+ /* The value should be the combination of config
+ * method defined in page104 of WPS v2.0 spec.*/
+ /* This field will contain the length of body of P2P Channel List
+ * attribute of group negotiation response frame. */
+ uint channel_list_attr_len;
+ /* This field will contain the body of P2P Channel List attribute of
+ * group negotitation response frame. */
+ /* We will use the channel_cnt and channel_list fields when constructing
+ * the group negotiation confirm frame. */
+ u8 channel_list_attr[100];
+ enum P2P_PS_MODE p2p_ps_mode; /* indicate p2p ps mode */
+ enum P2P_PS_STATE p2p_ps_state; /* indicate p2p ps state */
+ u8 noa_index; /* Identifies and instance of Notice of Absence timing. */
+ u8 ctwindow; /* Client traffic window. A period of time in TU after TBTT. */
+ u8 opp_ps; /* opportunistic power save. */
+ u8 noa_num; /* number of NoA descriptor in P2P IE. */
+ u8 noa_count[P2P_MAX_NOA_NUM]; /* Count for owner, Type of client. */
+ /* Max duration for owner, preferred or min acceptable duration for
+ * client. */
+ u32 noa_duration[P2P_MAX_NOA_NUM];
+ /* Length of interval for owner, preferred or max acceptable interval
+ * of client. */
+ u32 noa_interval[P2P_MAX_NOA_NUM];
+ /* schedule expressed in terms of the lower 4 bytes of the TSF timer. */
+ u32 noa_start_time[P2P_MAX_NOA_NUM];
+};
+
+struct tdls_ss_record { /* signal strength record */
+ u8 macaddr[ETH_ALEN];
+ u8 RxPWDBAll;
+ u8 is_tdls_sta; /* true: direct link sta, false: else */
+};
+
+struct tdls_info {
+ u8 ap_prohibited;
+ uint setup_state;
+ u8 sta_cnt;
+ u8 sta_maximum; /* 1:tdls sta is equal (NUM_STA-1), reach max direct link number; 0: else; */
+ struct tdls_ss_record ss_record;
+ u8 macid_index; /* macid entry that is ready to write */
+ u8 clear_cam; /* cam entry that is trying to clear, using it in direct link teardown */
+ u8 ch_sensing;
+ u8 cur_channel;
+ u8 candidate_ch;
+ u8 collect_pkt_num[MAX_CHANNEL_NUM];
+ spinlock_t cmd_lock;
+ spinlock_t hdl_lock;
+ u8 watchdog_count;
+ u8 dev_discovered; /* WFD_TDLS: for sigma test */
+ u8 enable;
+};
+
+struct mlme_priv {
+ spinlock_t lock;
+ int fw_state; /* shall we protect this variable? maybe not necessarily... */
+ u8 bScanInProcess;
+ u8 to_join; /* flag */
+ u8 to_roaming; /* roaming trying times */
+
+ u8 *nic_hdl;
+
+ u8 not_indic_disco;
+ struct list_head *pscanned;
+ struct __queue free_bss_pool;
+ struct __queue scanned_queue;
+ u8 *free_bss_buf;
+ u32 num_of_scanned;
+
+ struct ndis_802_11_ssid assoc_ssid;
+ u8 assoc_bssid[6];
+
+ struct wlan_network cur_network;
+
+ u32 scan_interval;
+
+ struct timer_list assoc_timer;
+
+ uint assoc_by_bssid;
+ uint assoc_by_rssi;
+
+ struct timer_list scan_to_timer; /* driver itself handles scan_timeout status. */
+ u32 scan_start_time; /* used to evaluate the time spent in scanning */
+
+ struct qos_priv qospriv;
+
+ /* Number of non-HT AP/stations */
+ int num_sta_no_ht;
+
+ /* Number of HT AP/stations 20 MHz */
+ /* int num_sta_ht_20mhz; */
+
+ int num_FortyMHzIntolerant;
+ struct ht_priv htpriv;
+ struct rt_link_detect LinkDetectInfo;
+ struct timer_list dynamic_chk_timer; /* dynamic/periodic check timer */
+
+ u8 key_mask; /* use for ips to set wep key after ips_leave */
+ u8 acm_mask; /* for wmm acm mask */
+ u8 ChannelPlan;
+ enum rt_scan_type scan_mode; /* active: 1, passive: 0 */
+
+ /* u8 probereq_wpsie[MAX_WPS_IE_LEN];added in probe req */
+ /* int probereq_wpsie_len; */
+ u8 *wps_probe_req_ie;
+ u32 wps_probe_req_ie_len;
+
+ u8 *assoc_req;
+ u32 assoc_req_len;
+ u8 *assoc_rsp;
+ u32 assoc_rsp_len;
+
+#if defined (CONFIG_88EU_AP_MODE)
+ /* Number of associated Non-ERP stations (i.e., stations using 802.11b
+ * in 802.11g BSS) */
+ int num_sta_non_erp;
+
+ /* Number of associated stations that do not support Short Slot Time */
+ int num_sta_no_short_slot_time;
+
+ /* Number of associated stations that do not support Short Preamble */
+ int num_sta_no_short_preamble;
+
+ int olbc; /* Overlapping Legacy BSS Condition */
+
+ /* Number of HT assoc sta that do not support greenfield */
+ int num_sta_ht_no_gf;
+
+ /* Number of associated non-HT stations */
+ /* int num_sta_no_ht; */
+
+ /* Number of HT associated stations 20 MHz */
+ int num_sta_ht_20mhz;
+
+ /* Overlapping BSS information */
+ int olbc_ht;
+
+ u16 ht_op_mode;
+
+ u8 *wps_beacon_ie;
+ /* u8 *wps_probe_req_ie; */
+ u8 *wps_probe_resp_ie;
+ u8 *wps_assoc_resp_ie;
+
+ u32 wps_beacon_ie_len;
+ u32 wps_probe_resp_ie_len;
+ u32 wps_assoc_resp_ie_len;
+
+ u8 *p2p_beacon_ie;
+ u8 *p2p_probe_req_ie;
+ u8 *p2p_probe_resp_ie;
+ u8 *p2p_go_probe_resp_ie; /* for GO */
+ u8 *p2p_assoc_req_ie;
+
+ u32 p2p_beacon_ie_len;
+ u32 p2p_probe_req_ie_len;
+ u32 p2p_probe_resp_ie_len;
+ u32 p2p_go_probe_resp_ie_len; /* for GO */
+ u32 p2p_assoc_req_ie_len;
+ spinlock_t bcn_update_lock;
+ u8 update_bcn;
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+};
+
+#ifdef CONFIG_88EU_AP_MODE
+
+struct hostapd_priv {
+ struct adapter *padapter;
+};
+
+int hostapd_mode_init(struct adapter *padapter);
+void hostapd_mode_unload(struct adapter *padapter);
+#endif
+
+extern unsigned char WPA_TKIP_CIPHER[4];
+extern unsigned char RSN_TKIP_CIPHER[4];
+extern unsigned char REALTEK_96B_IE[];
+extern unsigned char MCS_rate_2R[16];
+extern unsigned char MCS_rate_1R[16];
+
+void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf);
+void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_cpwm_event_callback(struct adapter *adapter, u8 *pbuf);
+void indicate_wx_scan_complete_event(struct adapter *padapter);
+void rtw_indicate_wx_assoc_event(struct adapter *padapter);
+void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
+int event_thread(void *context);
+void rtw_join_timeout_handler(void *FunctionContext);
+void _rtw_scan_timeout_handler(void *FunctionContext);
+void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall);
+int rtw_init_mlme_priv(struct adapter *adapter);
+void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv);
+int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv);
+int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv,
+ int keyid, u8 set_tx);
+int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv);
+
+static inline u8 *get_bssid(struct mlme_priv *pmlmepriv)
+{ /* if sta_mode:pmlmepriv->cur_network.network.MacAddress=> bssid */
+ /* if adhoc_mode:pmlmepriv->cur_network.network.MacAddress=> ibss mac address */
+ return pmlmepriv->cur_network.network.MacAddress;
+}
+
+static inline int check_fwstate(struct mlme_priv *pmlmepriv, int state)
+{
+ if (pmlmepriv->fw_state & state)
+ return true;
+
+ return false;
+}
+
+static inline int get_fwstate(struct mlme_priv *pmlmepriv)
+{
+ return pmlmepriv->fw_state;
+}
+
+/*
+ * No Limit on the calling context,
+ * therefore set it to be the critical section...
+ *
+ * ### NOTE:#### (!!!!)
+ * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+ */
+static inline void set_fwstate(struct mlme_priv *pmlmepriv, int state)
+{
+ pmlmepriv->fw_state |= state;
+ /* FOR HW integration */
+ if (_FW_UNDER_SURVEY==state)
+ pmlmepriv->bScanInProcess = true;
+}
+
+static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
+{
+ pmlmepriv->fw_state &= ~state;
+ /* FOR HW integration */
+ if (_FW_UNDER_SURVEY==state)
+ pmlmepriv->bScanInProcess = false;
+}
+
+/*
+ * No Limit on the calling context,
+ * therefore set it to be the critical section...
+ */
+static inline void clr_fwstate(struct mlme_priv *pmlmepriv, int state)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ if (check_fwstate(pmlmepriv, state) == true)
+ pmlmepriv->fw_state ^= state;
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+static inline void clr_fwstate_ex(struct mlme_priv *pmlmepriv, int state)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ _clr_fwstate_(pmlmepriv, state);
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ pmlmepriv->num_of_scanned++;
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ pmlmepriv->num_of_scanned--;
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, int val)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ pmlmepriv->num_of_scanned = val;
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+u16 rtw_get_capability(struct wlan_bssid_ex *bss);
+void rtw_update_scanned_network(struct adapter *adapter,
+ struct wlan_bssid_ex *target);
+void rtw_disconnect_hdl_under_linked(struct adapter *adapter,
+ struct sta_info *psta, u8 free_assoc);
+void rtw_generate_random_ibss(u8 *pibss);
+struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr);
+struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
+
+void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
+void rtw_indicate_disconnect(struct adapter *adapter);
+void rtw_indicate_connect(struct adapter *adapter);
+void rtw_indicate_scan_done( struct adapter *padapter, bool aborted);
+void rtw_scan_abort(struct adapter *adapter);
+
+int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
+ uint in_len);
+int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
+ uint in_len, uint initial_out_len);
+void rtw_init_registrypriv_dev_network(struct adapter *adapter);
+
+void rtw_update_registrypriv_dev_network(struct adapter *adapter);
+
+void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter);
+
+void _rtw_join_timeout_handler(struct adapter *adapter);
+void rtw_scan_timeout_handler(struct adapter *adapter);
+
+ void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
+#define rtw_is_scan_deny(adapter) false
+#define rtw_clear_scan_deny(adapter) do {} while (0)
+#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
+#define rtw_set_scan_deny(adapter, ms) do {} while (0)
+
+
+int _rtw_init_mlme_priv(struct adapter *padapter);
+
+void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
+
+void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
+
+int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork);
+
+struct wlan_network *_rtw_dequeue_network(struct __queue *queue);
+
+ struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
+
+
+void _rtw_free_network(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork, u8 isfreeall);
+void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork);
+
+
+struct wlan_network* _rtw_find_network(struct __queue *scanned_queue, u8 *addr);
+
+void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall);
+
+int rtw_if_up(struct adapter *padapter);
+
+
+u8 *rtw_get_capability_from_ie(u8 *ie);
+u8 *rtw_get_timestampe_from_ie(u8 *ie);
+u8 *rtw_get_beacon_interval_from_ie(u8 *ie);
+
+
+void rtw_joinbss_reset(struct adapter *padapter);
+
+unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie,
+ u8 *out_ie, uint in_len, uint *pout_len);
+void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len);
+void rtw_issue_addbareq_cmd(struct adapter *padapter,
+ struct xmit_frame *pxmitframe);
+
+int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork);
+int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst);
+
+void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
+void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
+
+void rtw_stassoc_hw_rpt(struct adapter *adapter,struct sta_info *psta);
+
+#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
new file mode 100644
index 0000000..853ab80
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -0,0 +1,878 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_MLME_EXT_H_
+#define __RTW_MLME_EXT_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wlan_bssdef.h>
+
+
+/* Commented by Albert 20101105 */
+/* Increase the SURVEY_TO value from 100 to 150 ( 100ms to 150ms ) */
+/* The Realtek 8188CE SoftAP will spend around 100ms to send the probe response after receiving the probe request. */
+/* So, this driver tried to extend the dwell time for each scanning channel. */
+/* This will increase the chance to receive the probe response from SoftAP. */
+
+#define SURVEY_TO (100)
+#define REAUTH_TO (300) /* 50) */
+#define REASSOC_TO (300) /* 50) */
+/* define DISCONNECT_TO (3000) */
+#define ADDBA_TO (2000)
+
+#define LINKED_TO (1) /* unit:2 sec, 1x2=2 sec */
+
+#define REAUTH_LIMIT (4)
+#define REASSOC_LIMIT (4)
+#define READDBA_LIMIT (2)
+
+#define ROAMING_LIMIT 8
+
+#define DYNAMIC_FUNC_DISABLE (0x0)
+
+/* ====== ODM_ABILITY_E ======== */
+/* BB ODM section BIT 0-15 */
+#define DYNAMIC_BB_DIG BIT(0)
+#define DYNAMIC_BB_RA_MASK BIT(1)
+#define DYNAMIC_BB_DYNAMIC_TXPWR BIT(2)
+#define DYNAMIC_BB_BB_FA_CNT BIT(3)
+
+#define DYNAMIC_BB_RSSI_MONITOR BIT(4)
+#define DYNAMIC_BB_CCK_PD BIT(5)
+#define DYNAMIC_BB_ANT_DIV BIT(6)
+#define DYNAMIC_BB_PWR_SAVE BIT(7)
+#define DYNAMIC_BB_PWR_TRA BIT(8)
+#define DYNAMIC_BB_RATE_ADAPTIVE BIT(9)
+#define DYNAMIC_BB_PATH_DIV BIT(10)
+#define DYNAMIC_BB_PSD BIT(11)
+
+/* MAC DM section BIT 16-23 */
+#define DYNAMIC_MAC_EDCA_TURBO BIT(16)
+#define DYNAMIC_MAC_EARLY_MODE BIT(17)
+
+/* RF ODM section BIT 24-31 */
+#define DYNAMIC_RF_TX_PWR_TRACK BIT(24)
+#define DYNAMIC_RF_RX_GAIN_TRACK BIT(25)
+#define DYNAMIC_RF_CALIBRATION BIT(26)
+
+#define DYNAMIC_ALL_FUNC_ENABLE 0xFFFFFFF
+
+#define _HW_STATE_NOLINK_ 0x00
+#define _HW_STATE_ADHOC_ 0x01
+#define _HW_STATE_STATION_ 0x02
+#define _HW_STATE_AP_ 0x03
+
+
+#define _1M_RATE_ 0
+#define _2M_RATE_ 1
+#define _5M_RATE_ 2
+#define _11M_RATE_ 3
+#define _6M_RATE_ 4
+#define _9M_RATE_ 5
+#define _12M_RATE_ 6
+#define _18M_RATE_ 7
+#define _24M_RATE_ 8
+#define _36M_RATE_ 9
+#define _48M_RATE_ 10
+#define _54M_RATE_ 11
+
+
+extern unsigned char RTW_WPA_OUI[];
+extern unsigned char WMM_OUI[];
+extern unsigned char WPS_OUI[];
+extern unsigned char WFD_OUI[];
+extern unsigned char P2P_OUI[];
+
+extern unsigned char WMM_INFO_OUI[];
+extern unsigned char WMM_PARA_OUI[];
+
+/* Channel Plan Type. */
+/* Note: */
+/* We just add new channel plan when the new channel plan is different
+ * from any of the following channel plan. */
+/* If you just wnat to customize the acitions(scan period or join actions)
+ * about one of the channel plan, */
+/* customize them in struct rt_channel_info in the RT_CHANNEL_LIST. */
+enum RT_CHANNEL_DOMAIN {
+ /* old channel plan mapping ===== */
+ RT_CHANNEL_DOMAIN_FCC = 0x00,
+ RT_CHANNEL_DOMAIN_IC = 0x01,
+ RT_CHANNEL_DOMAIN_ETSI = 0x02,
+ RT_CHANNEL_DOMAIN_SPAIN = 0x03,
+ RT_CHANNEL_DOMAIN_FRANCE = 0x04,
+ RT_CHANNEL_DOMAIN_MKK = 0x05,
+ RT_CHANNEL_DOMAIN_MKK1 = 0x06,
+ RT_CHANNEL_DOMAIN_ISRAEL = 0x07,
+ RT_CHANNEL_DOMAIN_TELEC = 0x08,
+ RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN = 0x09,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_13 = 0x0A,
+ RT_CHANNEL_DOMAIN_TAIWAN = 0x0B,
+ RT_CHANNEL_DOMAIN_CHINA = 0x0C,
+ RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO = 0x0D,
+ RT_CHANNEL_DOMAIN_KOREA = 0x0E,
+ RT_CHANNEL_DOMAIN_TURKEY = 0x0F,
+ RT_CHANNEL_DOMAIN_JAPAN = 0x10,
+ RT_CHANNEL_DOMAIN_FCC_NO_DFS = 0x11,
+ RT_CHANNEL_DOMAIN_JAPAN_NO_DFS = 0x12,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_5G = 0x13,
+ RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS = 0x14,
+
+ /* new channel plan mapping, (2GDOMAIN_5GDOMAIN) ===== */
+ RT_CHANNEL_DOMAIN_WORLD_NULL = 0x20,
+ RT_CHANNEL_DOMAIN_ETSI1_NULL = 0x21,
+ RT_CHANNEL_DOMAIN_FCC1_NULL = 0x22,
+ RT_CHANNEL_DOMAIN_MKK1_NULL = 0x23,
+ RT_CHANNEL_DOMAIN_ETSI2_NULL = 0x24,
+ RT_CHANNEL_DOMAIN_FCC1_FCC1 = 0x25,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI1 = 0x26,
+ RT_CHANNEL_DOMAIN_MKK1_MKK1 = 0x27,
+ RT_CHANNEL_DOMAIN_WORLD_KCC1 = 0x28,
+ RT_CHANNEL_DOMAIN_WORLD_FCC2 = 0x29,
+ RT_CHANNEL_DOMAIN_WORLD_FCC3 = 0x30,
+ RT_CHANNEL_DOMAIN_WORLD_FCC4 = 0x31,
+ RT_CHANNEL_DOMAIN_WORLD_FCC5 = 0x32,
+ RT_CHANNEL_DOMAIN_WORLD_FCC6 = 0x33,
+ RT_CHANNEL_DOMAIN_FCC1_FCC7 = 0x34,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI2 = 0x35,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI3 = 0x36,
+ RT_CHANNEL_DOMAIN_MKK1_MKK2 = 0x37,
+ RT_CHANNEL_DOMAIN_MKK1_MKK3 = 0x38,
+ RT_CHANNEL_DOMAIN_FCC1_NCC1 = 0x39,
+ RT_CHANNEL_DOMAIN_FCC1_NCC2 = 0x40,
+ RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G = 0x41,
+ /* Add new channel plan above this line=============== */
+ RT_CHANNEL_DOMAIN_MAX,
+ RT_CHANNEL_DOMAIN_REALTEK_DEFINE = 0x7F,
+};
+
+enum RT_CHANNEL_DOMAIN_2G {
+ RT_CHANNEL_DOMAIN_2G_WORLD = 0x00, /* Worldwide 13 */
+ RT_CHANNEL_DOMAIN_2G_ETSI1 = 0x01, /* Europe */
+ RT_CHANNEL_DOMAIN_2G_FCC1 = 0x02, /* US */
+ RT_CHANNEL_DOMAIN_2G_MKK1 = 0x03, /* Japan */
+ RT_CHANNEL_DOMAIN_2G_ETSI2 = 0x04, /* France */
+ RT_CHANNEL_DOMAIN_2G_NULL = 0x05,
+ /* Add new channel plan above this line=============== */
+ RT_CHANNEL_DOMAIN_2G_MAX,
+};
+
+#define rtw_is_channel_plan_valid(chplan) \
+ (chplan < RT_CHANNEL_DOMAIN_MAX || \
+ chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
+
+struct rt_channel_plan {
+ unsigned char Channel[MAX_CHANNEL_NUM];
+ unsigned char Len;
+};
+
+struct rt_channel_plan_2g {
+ unsigned char Channel[MAX_CHANNEL_NUM_2G];
+ unsigned char Len;
+};
+
+struct rt_channel_plan_map {
+ unsigned char Index2G;
+};
+
+enum Associated_AP {
+ atherosAP = 0,
+ broadcomAP = 1,
+ ciscoAP = 2,
+ marvellAP = 3,
+ ralinkAP = 4,
+ realtekAP = 5,
+ airgocapAP = 6,
+ unknownAP = 7,
+ maxAP,
+};
+
+enum HT_IOT_PEER {
+ HT_IOT_PEER_UNKNOWN = 0,
+ HT_IOT_PEER_REALTEK = 1,
+ HT_IOT_PEER_REALTEK_92SE = 2,
+ HT_IOT_PEER_BROADCOM = 3,
+ HT_IOT_PEER_RALINK = 4,
+ HT_IOT_PEER_ATHEROS = 5,
+ HT_IOT_PEER_CISCO = 6,
+ HT_IOT_PEER_MERU = 7,
+ HT_IOT_PEER_MARVELL = 8,
+ HT_IOT_PEER_REALTEK_SOFTAP = 9,/* peer is RealTek SOFT_AP */
+ HT_IOT_PEER_SELF_SOFTAP = 10, /* Self is SoftAP */
+ HT_IOT_PEER_AIRGO = 11,
+ HT_IOT_PEER_INTEL = 12,
+ HT_IOT_PEER_RTK_APCLIENT = 13,
+ HT_IOT_PEER_REALTEK_81XX = 14,
+ HT_IOT_PEER_REALTEK_WOW = 15,
+ HT_IOT_PEER_TENDA = 16,
+ HT_IOT_PEER_MAX = 17
+};
+
+enum SCAN_STATE {
+ SCAN_DISABLE = 0,
+ SCAN_START = 1,
+ SCAN_TXNULL = 2,
+ SCAN_PROCESS = 3,
+ SCAN_COMPLETE = 4,
+ SCAN_STATE_MAX,
+};
+
+struct mlme_handler {
+ unsigned int num;
+ char *str;
+ unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
+};
+
+struct action_handler {
+ unsigned int num;
+ char* str;
+ unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
+};
+
+struct ss_res {
+ int state;
+ int bss_cnt;
+ int channel_idx;
+ int scan_mode;
+ u8 ssid_num;
+ u8 ch_num;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+};
+
+/* define AP_MODE 0x0C */
+/* define STATION_MODE 0x08 */
+/* define AD_HOC_MODE 0x04 */
+/* define NO_LINK_MODE 0x00 */
+
+#define WIFI_FW_NULL_STATE _HW_STATE_NOLINK_
+#define WIFI_FW_STATION_STATE _HW_STATE_STATION_
+#define WIFI_FW_AP_STATE _HW_STATE_AP_
+#define WIFI_FW_ADHOC_STATE _HW_STATE_ADHOC_
+
+#define WIFI_FW_AUTH_NULL 0x00000100
+#define WIFI_FW_AUTH_STATE 0x00000200
+#define WIFI_FW_AUTH_SUCCESS 0x00000400
+
+#define WIFI_FW_ASSOC_STATE 0x00002000
+#define WIFI_FW_ASSOC_SUCCESS 0x00004000
+
+#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | \
+ WIFI_FW_AUTH_STATE | \
+ WIFI_FW_AUTH_SUCCESS | \
+ WIFI_FW_ASSOC_STATE)
+
+struct FW_Sta_Info {
+ struct sta_info *psta;
+ u32 status;
+ u32 rx_pkt;
+ u32 retry;
+ unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
+};
+
+/*
+ * Usage:
+ * When one iface acted as AP mode and the other iface is STA mode and scanning,
+ * it should switch back to AP's operating channel periodically.
+ * Parameters info:
+ * When the driver scanned RTW_SCAN_NUM_OF_CH channels, it would switch back to
+ * AP's operating channel for
+ * RTW_STAY_AP_CH_MILLISECOND * SURVEY_TO milliseconds.
+ * Example:
+ * For chip supports 2.4G + 5GHz and AP mode is operating in channel 1,
+ * RTW_SCAN_NUM_OF_CH is 8, RTW_STAY_AP_CH_MS is 3 and SURVEY_TO is 100.
+ * When it's STA mode gets set_scan command,
+ * it would
+ * 1. Doing the scan on channel 1.2.3.4.5.6.7.8
+ * 2. Back to channel 1 for 300 milliseconds
+ * 3. Go through doing site survey on channel 9.10.11.36.40.44.48.52
+ * 4. Back to channel 1 for 300 milliseconds
+ * 5. ... and so on, till survey done.
+ */
+
+struct mlme_ext_info {
+ u32 state;
+ u32 reauth_count;
+ u32 reassoc_count;
+ u32 link_count;
+ u32 auth_seq;
+ u32 auth_algo; /* 802.11 auth, could be open, shared, auto */
+ u32 authModeToggle;
+ u32 enc_algo;/* encrypt algorithm; */
+ u32 key_index; /* this is only valid for legacy wep,
+ * 0~3 for key id. */
+ u32 iv;
+ u8 chg_txt[128];
+ u16 aid;
+ u16 bcn_interval;
+ u16 capability;
+ u8 assoc_AP_vendor;
+ u8 slotTime;
+ u8 preamble_mode;
+ u8 WMM_enable;
+ u8 ERP_enable;
+ u8 ERP_IE;
+ u8 HT_enable;
+ u8 HT_caps_enable;
+ u8 HT_info_enable;
+ u8 HT_protection;
+ u8 turboMode_cts2self;
+ u8 turboMode_rtsen;
+ u8 SM_PS;
+ u8 agg_enable_bitmap;
+ u8 ADDBA_retry_count;
+ u8 candidate_tid_bitmap;
+ u8 dialogToken;
+ /* Accept ADDBA Request */
+ bool bAcceptAddbaReq;
+ u8 bwmode_updated;
+ u8 hidden_ssid_mode;
+
+ struct ADDBA_request ADDBA_req;
+ struct WMM_para_element WMM_param;
+ struct HT_caps_element HT_caps;
+ struct HT_info_element HT_info;
+ struct wlan_bssid_ex network;/* join network or bss_network,
+ * if in ap mode, it is the same
+ * as cur_network.network */
+ struct FW_Sta_Info FW_sta_info[NUM_STA];
+};
+
+/* The channel information about this channel including joining,
+ * scanning, and power constraints. */
+struct rt_channel_info {
+ u8 ChannelNum; /* The channel number. */
+ enum rt_scan_type ScanType; /* Scan type such as passive
+ * or active scan. */
+ u32 rx_count;
+};
+
+int rtw_ch_set_search_ch(struct rt_channel_info *ch_set, const u32 ch);
+
+/* P2P_MAX_REG_CLASSES - Maximum number of regulatory classes */
+#define P2P_MAX_REG_CLASSES 10
+
+/* P2P_MAX_REG_CLASS_CHANNELS - Maximum number of chan per regulatory class */
+#define P2P_MAX_REG_CLASS_CHANNELS 20
+
+/* struct p2p_channels - List of supported channels */
+struct p2p_channels {
+ /* struct p2p_reg_class - Supported regulatory class */
+ struct p2p_reg_class {
+ /* reg_class - Regulatory class (IEEE 802.11-2007, Annex J) */
+ u8 reg_class;
+
+ /* channel - Supported channels */
+ u8 channel[P2P_MAX_REG_CLASS_CHANNELS];
+
+ /* channels - Number of channel entries in use */
+ size_t channels;
+ } reg_class[P2P_MAX_REG_CLASSES];
+
+ /* reg_classes - Number of reg_class entries in use */
+ size_t reg_classes;
+};
+
+struct p2p_oper_class_map {
+ enum hw_mode {IEEE80211G} mode;
+ u8 op_class;
+ u8 min_chan;
+ u8 max_chan;
+ u8 inc;
+ enum {BW20, BW40PLUS, BW40MINUS} bw;
+};
+
+struct mlme_ext_priv {
+ struct adapter *padapter;
+ u8 mlmeext_init;
+ ATOMIC_T event_seq;
+ u16 mgnt_seq;
+
+ unsigned char cur_channel;
+ unsigned char cur_bwmode;
+ unsigned char cur_ch_offset;/* PRIME_CHNL_OFFSET */
+ unsigned char cur_wireless_mode; /* NETWORK_TYPE */
+
+ unsigned char oper_channel; /* saved chan info when call
+ * set_channel_bw */
+ unsigned char oper_bwmode;
+ unsigned char oper_ch_offset;/* PRIME_CHNL_OFFSET */
+
+ unsigned char max_chan_nums;
+ struct rt_channel_info channel_set[MAX_CHANNEL_NUM];
+ struct p2p_channels channel_list;
+ unsigned char basicrate[NumRates];
+ unsigned char datarate[NumRates];
+
+ struct ss_res sitesurvey_res;
+ struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including
+ * current scan/connecting/connected
+ * related info. For ap mode,
+ * network includes ap's cap_info*/
+ struct timer_list survey_timer;
+ struct timer_list link_timer;
+ u16 chan_scan_time;
+
+ u8 scan_abort;
+ u8 tx_rate; /* TXRATE when USERATE is set. */
+
+ u32 retry; /* retry for issue probereq */
+
+ u64 TSFValue;
+
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned char bstart_bss;
+#endif
+ u8 update_channel_plan_by_ap_done;
+ /* recv_decache check for Action_public frame */
+ u8 action_public_dialog_token;
+ u16 action_public_rxseq;
+ u8 active_keep_alive_check;
+};
+
+int init_mlme_ext_priv(struct adapter *adapter);
+int init_hw_mlme_ext(struct adapter *padapter);
+void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
+extern void init_mlme_ext_timer(struct adapter *padapter);
+extern void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
+extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
+
+unsigned char networktype_to_raid(unsigned char network_type);
+u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int len);
+void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *len);
+void UpdateBrateTbl(struct adapter *padapter, u8 *mBratesOS);
+void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen);
+
+void Save_DM_Func_Flag(struct adapter *padapter);
+void Restore_DM_Func_Flag(struct adapter *padapter);
+void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable);
+
+void Set_MSR(struct adapter *padapter, u8 type);
+
+u8 rtw_get_oper_ch(struct adapter *adapter);
+void rtw_set_oper_ch(struct adapter *adapter, u8 ch);
+u8 rtw_get_oper_bw(struct adapter *adapter);
+void rtw_set_oper_bw(struct adapter *adapter, u8 bw);
+u8 rtw_get_oper_choffset(struct adapter *adapter);
+void rtw_set_oper_choffset(struct adapter *adapter, u8 offset);
+
+void set_channel_bwmode(struct adapter *padapter, unsigned char channel,
+ unsigned char channel_offset, unsigned short bwmode);
+void SelectChannel(struct adapter *padapter, unsigned char channel);
+void SetBWMode(struct adapter *padapter, unsigned short bwmode,
+ unsigned char channel_offset);
+
+unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval);
+
+void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
+void clear_cam_entry(struct adapter *padapter, u8 entry);
+
+void invalidate_cam_all(struct adapter *padapter);
+void CAM_empty_entry(struct adapter * Adapter, u8 ucIndex);
+
+int allocate_fw_sta_entry(struct adapter *padapter);
+void flush_all_cam_entry(struct adapter *padapter);
+
+void site_survey(struct adapter *padapter);
+u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame,
+ struct wlan_bssid_ex *bssid);
+void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
+ struct adapter *adapter, bool update_ie);
+
+int get_bsstype(unsigned short capability);
+u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork);
+u16 get_beacon_interval(struct wlan_bssid_ex *bss);
+
+int is_client_associated_to_ap(struct adapter *padapter);
+int is_client_associated_to_ibss(struct adapter *padapter);
+int is_IBSS_empty(struct adapter *padapter);
+
+unsigned char check_assoc_AP(u8 *pframe, uint len);
+
+int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
+void WMMOnAssocRsp(struct adapter *padapter);
+
+void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
+void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
+void HTOnAssocRsp(struct adapter *padapter);
+
+void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
+void VCS_update(struct adapter *padapter, struct sta_info *psta);
+
+void update_beacon_info(struct adapter *padapter, u8 *pframe, uint len,
+ struct sta_info *psta);
+int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len);
+void update_IOT_info(struct adapter *padapter);
+void update_capinfo(struct adapter *adapter, u16 updatecap);
+void update_wireless_mode(struct adapter *padapter);
+void update_tx_basic_rate(struct adapter *padapter, u8 modulation);
+void update_bmc_sta_support_rate(struct adapter *padapter, u32 mac_id);
+int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie,
+ uint var_ie_len, int cam_idx);
+
+/* for sta/adhoc mode */
+void update_sta_info(struct adapter *padapter, struct sta_info *psta);
+unsigned int update_basic_rate(unsigned char *ptn, unsigned int ptn_sz);
+unsigned int update_supported_rate(unsigned char *ptn, unsigned int ptn_sz);
+unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps);
+void Update_RA_Entry(struct adapter *padapter, u32 mac_id);
+void set_sta_rate(struct adapter *padapter, struct sta_info *psta);
+
+unsigned int receive_disconnect(struct adapter *padapter,
+ unsigned char *macaddr, unsigned short reason);
+
+unsigned char get_highest_rate_idx(u32 mask);
+int support_short_GI(struct adapter *padapter, struct HT_caps_element *caps);
+unsigned int is_ap_in_tkip(struct adapter *padapter);
+unsigned int is_ap_in_wep(struct adapter *padapter);
+unsigned int should_forbid_n_rate(struct adapter *padapter);
+
+void report_join_res(struct adapter *padapter, int res);
+void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame);
+void report_surveydone_event(struct adapter *padapter);
+void report_del_sta_event(struct adapter *padapter,
+ unsigned char *addr, unsigned short reason);
+void report_add_sta_event(struct adapter *padapter, unsigned char* addr,
+ int cam_idx);
+
+void beacon_timing_control(struct adapter *padapter);
+extern u8 set_tx_beacon_cmd(struct adapter*padapter);
+unsigned int setup_beacon_frame(struct adapter *padapter,
+ unsigned char *beacon_frame);
+void update_mgnt_tx_rate(struct adapter *padapter, u8 rate);
+void update_mgntframe_attrib(struct adapter *padapter,
+ struct pkt_attrib *pattrib);
+void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe);
+s32 dump_mgntframe_and_wait(struct adapter *padapter,
+ struct xmit_frame *pmgntframe, int timeout_ms);
+s32 dump_mgntframe_and_wait_ack(struct adapter *padapter,
+ struct xmit_frame *pmgntframe);
+
+#ifdef CONFIG_88EU_P2P
+void issue_probersp_p2p(struct adapter *padapter, unsigned char *da);
+void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid,
+ u8 ussidlen, u8 *pdev_raddr);
+void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr);
+void issue_probereq_p2p(struct adapter *padapter, u8 *da);
+int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt,
+ int wait_ms);
+void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr,
+ u8 dialogToken, u8 success);
+void issue_p2p_invitation_request(struct adapter *padapter, u8* raddr);
+#endif /* CONFIG_88EU_P2P */
+void issue_beacon(struct adapter *padapter, int timeout_ms);
+void issue_probersp(struct adapter *padapter, unsigned char *da,
+ u8 is_valid_p2p_probereq);
+void issue_assocreq(struct adapter *padapter);
+void issue_asocrsp(struct adapter *padapter, unsigned short status,
+ struct sta_info *pstat, int pkt_type);
+void issue_auth(struct adapter *padapter, struct sta_info *psta,
+ unsigned short status);
+void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
+ u8 *da);
+s32 issue_probereq_ex(struct adapter *adapter, struct ndis_802_11_ssid *pssid,
+ u8* da, int try_cnt, int wait_ms);
+int issue_nulldata(struct adapter *padapter, unsigned char *da,
+ unsigned int power_mode, int try_cnt, int wait_ms);
+int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
+ u16 tid, int try_cnt, int wait_ms);
+int issue_deauth(struct adapter *padapter, unsigned char *da,
+ unsigned short reason);
+int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason,
+ int try_cnt, int wait_ms);
+void issue_action_spct_ch_switch(struct adapter *padapter, u8 *ra, u8 new_ch,
+ u8 ch_offset);
+void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
+ unsigned char action, unsigned short status);
+unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
+unsigned int send_beacon(struct adapter *padapter);
+
+void start_clnt_assoc(struct adapter *padapter);
+void start_clnt_auth(struct adapter *padapter);
+void start_clnt_join(struct adapter *padapter);
+void start_create_ibss(struct adapter *padapter);
+
+unsigned int OnAssocReq(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAssocRsp(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnProbeReq(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnProbeRsp(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int DoReserved(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnBeacon(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAtim(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnDisassoc(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAuth(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAuthClient(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnDeAuth(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction(struct adapter *padapter,
+ union recv_frame *precv_frame);
+
+unsigned int on_action_spct(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_qos(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_dls(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_back(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int on_action_public(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_ht(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_wmm(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_p2p(struct adapter *padapter,
+ union recv_frame *precv_frame);
+
+void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res);
+void mlmeext_sta_del_event_callback(struct adapter *padapter);
+void mlmeext_sta_add_event_callback(struct adapter *padapter,
+ struct sta_info *psta);
+
+void linked_status_chk(struct adapter *padapter);
+
+void survey_timer_hdl (struct adapter *padapter);
+void link_timer_hdl (struct adapter *padapter);
+void addba_timer_hdl(struct sta_info *psta);
+
+#define set_survey_timer(mlmeext, ms) \
+ do { \
+ _set_timer(&(mlmeext)->survey_timer, (ms)); \
+ } while (0)
+
+#define set_link_timer(mlmeext, ms) \
+ do { \
+ _set_timer(&(mlmeext)->link_timer, (ms)); \
+ } while (0)
+
+int cckrates_included(unsigned char *rate, int ratelen);
+int cckratesonly_included(unsigned char *rate, int ratelen);
+
+void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr);
+
+void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
+void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext);
+
+struct cmd_hdl {
+ uint parmsize;
+ u8 (*h2cfuns)(struct adapter *padapter, u8 *pbuf);
+};
+
+u8 read_macreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 write_macreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 read_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 write_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 read_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 write_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 NULL_hdl(struct adapter *padapter, u8 *pbuf);
+u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf);
+u8 disconnect_hdl(struct adapter *padapter, u8 *pbuf);
+u8 createbss_hdl(struct adapter *padapter, u8 *pbuf);
+u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf);
+u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf);
+u8 setauth_hdl(struct adapter *padapter, u8 *pbuf);
+u8 setkey_hdl(struct adapter *padapter, u8 *pbuf);
+u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf);
+u8 set_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
+u8 del_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
+u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf);
+
+u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf);
+u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 led_blink_hdl(struct adapter *padapter, unsigned char *pbuf);
+/* Handling DFS channel switch announcement ie. */
+u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf);
+
+#define GEN_DRV_CMD_HANDLER(size, cmd) {size, &cmd ## _hdl},
+#define GEN_MLME_EXT_HANDLER(size, cmd) {size, cmd},
+
+#ifdef _RTW_CMD_C_
+
+static struct cmd_hdl wlancmds[] = {
+ GEN_DRV_CMD_HANDLER(0, NULL) /*0*/
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*10*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct joinbss_parm), join_cmd_hdl) /*14*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct disconnect_parm), disconnect_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct createbss_parm), createbss_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setopmode_parm), setopmode_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct sitesurvey_parm),
+ sitesurvey_cmd_hdl) /*18*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct setauth_parm), setauth_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setkey_parm), setkey_hdl) /*20*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct set_stakey_parm), set_stakey_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct set_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct del_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setstapwrstate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setphyinfo_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getphyinfo_parm), NULL) /*30*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct setphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*40*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl) /* 46 */
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*50*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct Tx_Beacon_param),
+ tx_beacon_hdl) /*55*/
+
+ GEN_MLME_EXT_HANDLER(0, mlme_evt_hdl) /*56*/
+ GEN_MLME_EXT_HANDLER(0, rtw_drvextra_cmd_hdl) /*57*/
+
+ GEN_MLME_EXT_HANDLER(0, h2c_msg_hdl) /*58*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelPlan_param),
+ set_chplan_hdl) /*59*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct LedBlink_param),
+ led_blink_hdl) /*60*/
+
+ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelSwitch_param),
+ set_csa_hdl) /*61*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct TDLSoption_param),
+ tdls_hdl) /*62*/
+};
+
+#endif
+
+struct C2HEvent_Header {
+#ifdef __LITTLE_ENDIAN
+ unsigned int len:16;
+ unsigned int ID:8;
+ unsigned int seq:8;
+#elif defined(__BIG_ENDIAN)
+ unsigned int seq:8;
+ unsigned int ID:8;
+ unsigned int len:16;
+#endif
+ unsigned int rsvd;
+};
+
+void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf);
+
+enum rtw_c2h_event {
+ GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
+ GEN_EVT_CODE(_Read_BBREG),
+ GEN_EVT_CODE(_Read_RFREG),
+ GEN_EVT_CODE(_Read_EEPROM),
+ GEN_EVT_CODE(_Read_EFUSE),
+ GEN_EVT_CODE(_Read_CAM), /*5*/
+ GEN_EVT_CODE(_Get_BasicRate),
+ GEN_EVT_CODE(_Get_DataRate),
+ GEN_EVT_CODE(_Survey), /*8*/
+ GEN_EVT_CODE(_SurveyDone), /*9*/
+
+ GEN_EVT_CODE(_JoinBss) , /*10*/
+ GEN_EVT_CODE(_AddSTA),
+ GEN_EVT_CODE(_DelSTA),
+ GEN_EVT_CODE(_AtimDone),
+ GEN_EVT_CODE(_TX_Report),
+ GEN_EVT_CODE(_CCX_Report), /*15*/
+ GEN_EVT_CODE(_DTM_Report),
+ GEN_EVT_CODE(_TX_Rate_Statistics),
+ GEN_EVT_CODE(_C2HLBK),
+ GEN_EVT_CODE(_FWDBG),
+ GEN_EVT_CODE(_C2HFEEDBACK), /*20*/
+ GEN_EVT_CODE(_ADDBA),
+ GEN_EVT_CODE(_C2HBCN),
+ GEN_EVT_CODE(_ReportPwrState), /* filen: only for PCIE, USB */
+ GEN_EVT_CODE(_CloseRF), /* filen: only for PCIE,
+ * work around ASPM */
+ MAX_C2HEVT
+};
+
+
+#ifdef _RTW_MLME_EXT_C_
+
+static struct fwevent wlanevents[] = {
+ {0, rtw_dummy_event_callback}, /*0*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_survey_event_callback}, /*8*/
+ {sizeof (struct surveydone_event), &rtw_surveydone_event_callback},/*9*/
+ {0, &rtw_joinbss_event_callback}, /*10*/
+ {sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
+ {sizeof(struct stadel_event), &rtw_stadel_event_callback},
+ {0, &rtw_atimdone_event_callback},
+ {0, rtw_dummy_event_callback},
+ {0, NULL}, /*15*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, rtw_fwdbg_event_callback},
+ {0, NULL}, /*20*/
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_cpwm_event_callback},
+ {0, NULL},
+};
+
+#endif/* _RTL_MLME_EXT_C_ */
+
+#endif /* __RTW_MLME_EXT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp.h b/drivers/staging/rtl8188eu/include/rtw_mp.h
new file mode 100644
index 0000000..59bdbb5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mp.h
@@ -0,0 +1,495 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_MP_H_
+#define _RTW_MP_H_
+
+/* 00 - Success */
+/* 11 - Error */
+#define STATUS_SUCCESS (0x00000000L)
+#define STATUS_PENDING (0x00000103L)
+
+#define STATUS_UNSUCCESSFUL (0xC0000001L)
+#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
+#define STATUS_NOT_SUPPORTED (0xC00000BBL)
+
+#define NDIS_STATUS_SUCCESS ((int)STATUS_SUCCESS)
+#define NDIS_STATUS_PENDING ((int)STATUS_PENDING)
+#define NDIS_STATUS_NOT_RECOGNIZED ((int)0x00010001L)
+#define NDIS_STATUS_NOT_COPIED ((int)0x00010002L)
+#define NDIS_STATUS_NOT_ACCEPTED ((int)0x00010003L)
+#define NDIS_STATUS_CALL_ACTIVE ((int)0x00010007L)
+
+#define NDIS_STATUS_FAILURE ((int)STATUS_UNSUCCESSFUL)
+#define NDIS_STATUS_RESOURCES ((int)STATUS_INSUFFICIENT_RESOURCES)
+#define NDIS_STATUS_CLOSING ((int)0xC0010002L)
+#define NDIS_STATUS_BAD_VERSION ((int)0xC0010004L)
+#define NDIS_STATUS_BAD_CHARACTERISTICS ((int)0xC0010005L)
+#define NDIS_STATUS_ADAPTER_NOT_FOUND ((int)0xC0010006L)
+#define NDIS_STATUS_OPEN_FAILED ((int)0xC0010007L)
+#define NDIS_STATUS_DEVICE_FAILED ((int)0xC0010008L)
+#define NDIS_STATUS_MULTICAST_FULL ((int)0xC0010009L)
+#define NDIS_STATUS_MULTICAST_EXISTS ((int)0xC001000AL)
+#define NDIS_STATUS_MULTICAST_NOT_FOUND ((int)0xC001000BL)
+#define NDIS_STATUS_REQUEST_ABORTED ((int)0xC001000CL)
+#define NDIS_STATUS_RESET_IN_PROGRESS ((int)0xC001000DL)
+#define NDIS_STATUS_CLOSING_INDICATING ((int)0xC001000EL)
+#define NDIS_STATUS_NOT_SUPPORTED ((int)STATUS_NOT_SUPPORTED)
+#define NDIS_STATUS_INVALID_PACKET ((int)0xC001000FL)
+#define NDIS_STATUS_OPEN_LIST_FULL ((int)0xC0010010L)
+#define NDIS_STATUS_ADAPTER_NOT_READY ((int)0xC0010011L)
+#define NDIS_STATUS_ADAPTER_NOT_OPEN ((int)0xC0010012L)
+#define NDIS_STATUS_NOT_INDICATING ((int)0xC0010013L)
+#define NDIS_STATUS_INVALID_LENGTH ((int)0xC0010014L)
+#define NDIS_STATUS_INVALID_DATA ((int)0xC0010015L)
+#define NDIS_STATUS_BUFFER_TOO_SHORT ((int)0xC0010016L)
+#define NDIS_STATUS_INVALID_OID ((int)0xC0010017L)
+#define NDIS_STATUS_ADAPTER_REMOVED ((int)0xC0010018L)
+#define NDIS_STATUS_UNSUPPORTED_MEDIA ((int)0xC0010019L)
+#define NDIS_STATUS_GROUP_ADDRESS_IN_USE ((int)0xC001001AL)
+#define NDIS_STATUS_FILE_NOT_FOUND ((int)0xC001001BL)
+#define NDIS_STATUS_ERROR_READING_FILE ((int)0xC001001CL)
+#define NDIS_STATUS_ALREADY_MAPPED ((int)0xC001001DL)
+#define NDIS_STATUS_RESOURCE_CONFLICT ((int)0xC001001EL)
+#define NDIS_STATUS_NO_CABLE ((int)0xC001001FL)
+
+#define NDIS_STATUS_INVALID_SAP ((int)0xC0010020L)
+#define NDIS_STATUS_SAP_IN_USE ((int)0xC0010021L)
+#define NDIS_STATUS_INVALID_ADDRESS ((int)0xC0010022L)
+#define NDIS_STATUS_VC_NOT_ACTIVATED ((int)0xC0010023L)
+#define NDIS_STATUS_DEST_OUT_OF_ORDER ((int)0xC0010024L) /*cause 27*/
+#define NDIS_STATUS_VC_NOT_AVAILABLE ((int)0xC0010025L) /*cause 35,45 */
+#define NDIS_STATUS_CELLRATE_NOT_AVAILABLE ((int)0xC0010026L) /*cause 37*/
+#define NDIS_STATUS_INCOMPATABLE_QOS ((int)0xC0010027L) /*cause 49*/
+#define NDIS_STATUS_AAL_PARAMS_UNSUPPORTED ((int)0xC0010028L) /*cause 93*/
+#define NDIS_STATUS_NO_ROUTE_TO_DESTINATION ((int)0xC0010029L) /*cause 3 */
+
+enum antenna_path {
+ ANTENNA_NONE = 0x00,
+ ANTENNA_D,
+ ANTENNA_C,
+ ANTENNA_CD,
+ ANTENNA_B,
+ ANTENNA_BD,
+ ANTENNA_BC,
+ ANTENNA_BCD,
+ ANTENNA_A,
+ ANTENNA_AD,
+ ANTENNA_AC,
+ ANTENNA_ACD,
+ ANTENNA_AB,
+ ANTENNA_ABD,
+ ANTENNA_ABC,
+ ANTENNA_ABCD
+};
+
+
+#define MAX_MP_XMITBUF_SZ 2048
+#define NR_MP_XMITFRAME 8
+
+struct mp_xmit_frame {
+ struct list_head list;
+ struct pkt_attrib attrib;
+ struct sk_buff *pkt;
+ int frame_tag;
+ struct adapter *padapter;
+ struct urb *pxmit_urb[8];
+ /* insert urb, irp, and irpcnt info below... */
+ u8 *mem_addr;
+ u32 sz[8];
+ u8 bpending[8];
+ int ac_tag[8];
+ int last[8];
+ uint irpcnt;
+ uint fragcnt;
+ uint mem[(MAX_MP_XMITBUF_SZ >> 2)];
+};
+
+struct mp_wiparam {
+ u32 bcompleted;
+ u32 act_type;
+ u32 io_offset;
+ u32 io_value;
+};
+
+typedef void(*wi_act_func)(void *padapter);
+
+struct mp_tx {
+ u8 stop;
+ u32 count, sended;
+ u8 payload;
+ struct pkt_attrib attrib;
+ struct tx_desc desc;
+ u8 *pallocated_buf;
+ u8 *buf;
+ u32 buf_size, write_size;
+ void *PktTxThread;
+};
+
+#include <Hal8188EPhyCfg.h>
+
+#define MP_MAX_LINES 1000
+#define MP_MAX_LINES_BYTES 256
+
+typedef void (*MPT_WORK_ITEM_HANDLER)(void *Adapter);
+
+struct mpt_context {
+ /* Indicate if we have started Mass Production Test. */
+ bool bMassProdTest;
+
+ /* Indicate if the driver is unloading or unloaded. */
+ bool bMptDrvUnload;
+
+ struct semaphore MPh2c_Sema;
+ struct timer_list MPh2c_timeout_timer;
+/* Event used to sync H2c for BT control */
+
+ bool MptH2cRspEvent;
+ bool MptBtC2hEvent;
+ bool bMPh2c_timeout;
+
+ /* 8190 PCI does not support NDIS_WORK_ITEM. */
+ /* Work Item for Mass Production Test. */
+ /* Event used to sync the case unloading driver and MptWorkItem
+ * is still in progress. */
+ /* Indicate a MptWorkItem is scheduled and not yet finished. */
+ bool bMptWorkItemInProgress;
+ /* An instance which implements function and context of MptWorkItem. */
+ MPT_WORK_ITEM_HANDLER CurrMptAct;
+
+ /* 1=Start, 0=Stop from UI. */
+ u32 MptTestStart;
+ /* _TEST_MODE, defined in MPT_Req2.h */
+ u32 MptTestItem;
+ /* Variable needed in each implementation of CurrMptAct. */
+ u32 MptActType; /* Type of action performed in CurrMptAct. */
+ /* The Offset of IO operation is depend of MptActType. */
+ u32 MptIoOffset;
+ /* The Value of IO operation is depend of MptActType. */
+ u32 MptIoValue;
+ /* The RfPath of IO operation is depend of MptActType. */
+ u32 MptRfPath;
+
+ enum wireless_mode MptWirelessModeToSw; /* Wireless mode to switch. */
+ u8 MptChannelToSw; /* Channel to switch. */
+ u8 MptInitGainToSet; /* Initial gain to set. */
+ u32 MptBandWidth; /* bandwidth to switch. */
+ u32 MptRateIndex; /* rate index. */
+ /* Register value kept for Single Carrier Tx test. */
+ u8 btMpCckTxPower;
+ /* Register value kept for Single Carrier Tx test. */
+ u8 btMpOfdmTxPower;
+ /* For MP Tx Power index */
+ u8 TxPwrLevel[2]; /* rf-A, rf-B */
+
+ /* Content of RCR Regsiter for Mass Production Test. */
+ u32 MptRCR;
+ /* true if we only receive packets with specific pattern. */
+ bool bMptFilterPattern;
+ /* Rx OK count, statistics used in Mass Production Test. */
+ u32 MptRxOkCnt;
+ /* Rx CRC32 error count, statistics used in Mass Production Test. */
+ u32 MptRxCrcErrCnt;
+
+ bool bCckContTx; /* true if we are in CCK Continuous Tx test. */
+ bool bOfdmContTx; /* true if we are in OFDM Continuous Tx test. */
+ bool bStartContTx; /* true if we have start Continuous Tx test. */
+ /* true if we are in Single Carrier Tx test. */
+ bool bSingleCarrier;
+ /* true if we are in Carrier Suppression Tx Test. */
+ bool bCarrierSuppression;
+ /* true if we are in Single Tone Tx test. */
+ bool bSingleTone;
+
+ /* ACK counter asked by K.Y.. */
+ bool bMptEnableAckCounter;
+ u32 MptAckCounter;
+
+ u8 APK_bound[2]; /* for APK path A/path B */
+ bool bMptIndexEven;
+
+ u8 backup0xc50;
+ u8 backup0xc58;
+ u8 backup0xc30;
+ u8 backup0x52_RF_A;
+ u8 backup0x52_RF_B;
+
+ u8 h2cReqNum;
+ u8 c2hBuf[20];
+
+ u8 btInBuf[100];
+ u32 mptOutLen;
+ u8 mptOutBuf[100];
+};
+
+enum {
+ WRITE_REG = 1,
+ READ_REG,
+ WRITE_RF,
+ READ_RF,
+ MP_START,
+ MP_STOP,
+ MP_RATE,
+ MP_CHANNEL,
+ MP_BANDWIDTH,
+ MP_TXPOWER,
+ MP_ANT_TX,
+ MP_ANT_RX,
+ MP_CTX,
+ MP_QUERY,
+ MP_ARX,
+ MP_PSD,
+ MP_PWRTRK,
+ MP_THER,
+ MP_IOCTL,
+ EFUSE_GET,
+ EFUSE_SET,
+ MP_RESET_STATS,
+ MP_DUMP,
+ MP_PHYPARA,
+ MP_SetRFPathSwh,
+ MP_QueryDrvStats,
+ MP_SetBT,
+ CTA_TEST,
+ MP_NULL,
+};
+
+struct mp_priv {
+ struct adapter *papdater;
+
+ /* Testing Flag */
+ /* 0 for normal type packet, 1 for loopback packet (16bytes TXCMD) */
+ u32 mode;
+
+ u32 prev_fw_state;
+
+ /* OID cmd handler */
+ struct mp_wiparam workparam;
+
+ /* Tx Section */
+ u8 TID;
+ u32 tx_pktcount;
+ struct mp_tx tx;
+
+ /* Rx Section */
+ u32 rx_pktcount;
+ u32 rx_crcerrpktcount;
+ u32 rx_pktloss;
+
+ struct recv_stat rxstat;
+
+ /* RF/BB relative */
+ u8 channel;
+ u8 bandwidth;
+ u8 prime_channel_offset;
+ u8 txpoweridx;
+ u8 txpoweridx_b;
+ u8 rateidx;
+ u32 preamble;
+ u32 CrystalCap;
+
+ u16 antenna_tx;
+ u16 antenna_rx;
+
+ u8 check_mp_pkt;
+
+ u8 bSetTxPower;
+
+ struct wlan_network mp_network;
+ unsigned char network_macaddr[ETH_ALEN];
+
+ u8 *pallocated_mp_xmitframe_buf;
+ u8 *pmp_xmtframe_buf;
+ struct __queue free_mp_xmitqueue;
+ u32 free_mp_xmitframe_cnt;
+
+ struct mpt_context MptCtx;
+};
+
+struct iocmd_struct {
+ u8 cmdclass;
+ u16 value;
+ u8 index;
+};
+
+struct rf_reg_param {
+ u32 path;
+ u32 offset;
+ u32 value;
+};
+
+struct bb_reg_param {
+ u32 offset;
+ u32 value;
+};
+/* */
+
+#define LOWER true
+#define RAISE false
+
+/* Hardware Registers */
+#define BB_REG_BASE_ADDR 0x800
+
+/* MP variables */
+enum mp_mode_{
+ MP_OFF,
+ MP_ON,
+ MP_ERR,
+ MP_CONTINUOUS_TX,
+ MP_SINGLE_CARRIER_TX,
+ MP_CARRIER_SUPPRISSION_TX,
+ MP_SINGLE_TONE_TX,
+ MP_PACKET_TX,
+ MP_PACKET_RX
+};
+
+#define MAX_RF_PATH_NUMS RF_PATH_MAX
+
+extern u8 mpdatarate[NumRates];
+
+/* MP set force data rate base on the definition. */
+enum mpt_rate_index {
+ /* CCK rate. */
+ MPT_RATE_1M, /* 0 */
+ MPT_RATE_2M,
+ MPT_RATE_55M,
+ MPT_RATE_11M, /* 3 */
+
+ /* OFDM rate. */
+ MPT_RATE_6M, /* 4 */
+ MPT_RATE_9M,
+ MPT_RATE_12M,
+ MPT_RATE_18M,
+ MPT_RATE_24M,
+ MPT_RATE_36M,
+ MPT_RATE_48M,
+ MPT_RATE_54M, /* 11 */
+
+ /* HT rate. */
+ MPT_RATE_MCS0, /* 12 */
+ MPT_RATE_MCS1,
+ MPT_RATE_MCS2,
+ MPT_RATE_MCS3,
+ MPT_RATE_MCS4,
+ MPT_RATE_MCS5,
+ MPT_RATE_MCS6,
+ MPT_RATE_MCS7, /* 19 */
+ MPT_RATE_MCS8,
+ MPT_RATE_MCS9,
+ MPT_RATE_MCS10,
+ MPT_RATE_MCS11,
+ MPT_RATE_MCS12,
+ MPT_RATE_MCS13,
+ MPT_RATE_MCS14,
+ MPT_RATE_MCS15, /* 27 */
+ MPT_RATE_LAST
+};
+
+#define MAX_TX_PWR_INDEX_N_MODE 64 /* 0x3F */
+
+enum power_mode {
+ POWER_LOW = 0,
+ POWER_NORMAL
+};
+
+#define RX_PKT_BROADCAST 1
+#define RX_PKT_DEST_ADDR 2
+#define RX_PKT_PHY_MATCH 3
+
+enum encry_ctrl_state {
+ HW_CONTROL, /* hw encryption& decryption */
+ SW_CONTROL, /* sw encryption& decryption */
+ HW_ENCRY_SW_DECRY, /* hw encryption & sw decryption */
+ SW_ENCRY_HW_DECRY /* sw encryption & hw decryption */
+};
+
+s32 init_mp_priv(struct adapter *padapter);
+void free_mp_priv(struct mp_priv *pmp_priv);
+s32 MPT_InitializeAdapter(struct adapter *padapter, u8 Channel);
+void MPT_DeInitAdapter(struct adapter *padapter);
+s32 mp_start_test(struct adapter *padapter);
+void mp_stop_test(struct adapter *padapter);
+
+u32 _read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask);
+void _write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask, u32 val);
+
+u32 read_macreg(struct adapter *padapter, u32 addr, u32 sz);
+void write_macreg(struct adapter *padapter, u32 addr, u32 val, u32 sz);
+u32 read_bbreg(struct adapter *padapter, u32 addr, u32 bitmask);
+void write_bbreg(struct adapter *padapter, u32 addr, u32 bitmask, u32 val);
+u32 read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr);
+void write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 val);
+
+void SetChannel(struct adapter *pAdapter);
+void SetBandwidth(struct adapter *pAdapter);
+void SetTxPower(struct adapter *pAdapter);
+void SetAntennaPathPower(struct adapter *pAdapter);
+void SetDataRate(struct adapter *pAdapter);
+
+void SetAntenna(struct adapter *pAdapter);
+
+s32 SetThermalMeter(struct adapter *pAdapter, u8 target_ther);
+void GetThermalMeter(struct adapter *pAdapter, u8 *value);
+
+void SetContinuousTx(struct adapter *pAdapter, u8 bStart);
+void SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart);
+void SetSingleToneTx(struct adapter *pAdapter, u8 bStart);
+void SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart);
+void PhySetTxPowerLevel(struct adapter *pAdapter);
+
+void fill_txdesc_for_mp(struct adapter *padapter, struct tx_desc *ptxdesc);
+void SetPacketTx(struct adapter *padapter);
+void SetPacketRx(struct adapter *pAdapter, u8 bStartRx);
+
+void ResetPhyRxPktCount(struct adapter *pAdapter);
+u32 GetPhyRxPktReceived(struct adapter *pAdapter);
+u32 GetPhyRxPktCRC32Error(struct adapter *pAdapter);
+
+s32 SetPowerTracking(struct adapter *padapter, u8 enable);
+void GetPowerTracking(struct adapter *padapter, u8 *enable);
+u32 mp_query_psd(struct adapter *pAdapter, u8 *data);
+void Hal_SetAntenna(struct adapter *pAdapter);
+void Hal_SetBandwidth(struct adapter *pAdapter);
+void Hal_SetTxPower(struct adapter *pAdapter);
+void Hal_SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart);
+void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart);
+void Hal_SetSingleCarrierTx (struct adapter *pAdapter, u8 bStart);
+void Hal_SetContinuousTx (struct adapter *pAdapter, u8 bStart);
+void Hal_SetBandwidth(struct adapter *pAdapter);
+void Hal_SetDataRate(struct adapter *pAdapter);
+void Hal_SetChannel(struct adapter *pAdapter);
+void Hal_SetAntennaPathPower(struct adapter *pAdapter);
+s32 Hal_SetThermalMeter(struct adapter *pAdapter, u8 target_ther);
+s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable);
+void Hal_GetPowerTracking(struct adapter *padapter, u8 * enable);
+void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value);
+void Hal_mpt_SwitchRfSetting(struct adapter *pAdapter);
+void Hal_MPT_CCKTxPowerAdjust(struct adapter * Adapter, bool bInCH14);
+void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven);
+void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 * TxPower);
+void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 * TxPower);
+void Hal_TriggerRFThermalMeter(struct adapter *pAdapter);
+u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter);
+void Hal_SetCCKContinuousTx(struct adapter *pAdapter, u8 bStart);
+void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart);
+void Hal_ProSetCrystalCap (struct adapter *pAdapter , u32 CrystalCapVal);
+void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv);
+void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter ,bool bMain);
+
+#endif /* _RTW_MP_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
new file mode 100644
index 0000000..494e90e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
@@ -0,0 +1,340 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_MP_IOCTL_H_
+#define _RTW_MP_IOCTL_H_
+
+#include <drv_types.h>
+#include <mp_custom_oid.h>
+#include <rtw_ioctl.h>
+#include <rtw_ioctl_rtl.h>
+#include <rtw_efuse.h>
+#include <rtw_mp.h>
+
+/* */
+struct cfg_dbg_msg_struct {
+ u32 DebugLevel;
+ u32 DebugComponent_H32;
+ u32 DebugComponent_L32;
+};
+
+struct mp_rw_reg {
+ u32 offset;
+ u32 width;
+ u32 value;
+};
+
+struct efuse_access_struct {
+ u16 start_addr;
+ u16 cnts;
+ u8 data[0];
+};
+
+struct burst_rw_reg {
+ u32 offset;
+ u32 len;
+ u8 Data[256];
+};
+
+struct usb_vendor_req {
+ u8 bRequest;
+ u16 wValue;
+ u16 wIndex;
+ u16 wLength;
+ u8 u8Dir;/* 0:OUT, 1:IN */
+ u8 u8InData;
+};
+
+struct dr_variable_struct {
+ u8 offset;
+ u32 variable;
+};
+
+#define _irqlevel_changed_(a, b)
+
+/* rtl8188eu_oid_rtl_seg_81_80_00 */
+int rtl8188eu_oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_81_80_20 */
+int rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *par_priv);
+int rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *par_priv);
+int rtl8188eu_oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *par_priv);
+int rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_81_87 */
+int rtl8188eu_oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_81_85 */
+int rtl8188eu_oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_87_11_00 */
+int rtl8188eu_oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_burst_read_register_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_burst_write_register_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write16_eeprom_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_wr_attrib_mem_hdl (struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv);
+/* rtl8188eu_oid_rtl_seg_87_11_20 */
+int rtl8188eu_oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_tssi_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_priv);
+/* rtl8188eu_oid_rtl_seg_87_11_50 */
+int rtl8188eu_oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv);
+/* rtl8188eu_oid_rtl_seg_87_11_F0 */
+int rtl8188eu_oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_87_12_00 */
+int rtl8188eu_oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *par_priv);
+int rtl8188eu_oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_power_mode_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_trigger_gpio_hdl(struct oid_par_priv *poid_par_priv);
+
+#ifdef _RTW_MP_IOCTL_C_
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_80_00[] = {
+ {1, &oid_null_function}, /* 0x00 OID_RT_PRO_RESET_DUT */
+ {1, &rtl8188eu_oid_rt_pro_set_data_rate_hdl}, /* 0x01 */
+ {1, &rtl8188eu_oid_rt_pro_start_test_hdl}, /* 0x02 */
+ {1, &rtl8188eu_oid_rt_pro_stop_test_hdl}, /* 0x03 */
+ {1, &oid_null_function}, /* 0x04 OID_RT_PRO_SET_PREAMBLE */
+ {1, &oid_null_function}, /* 0x05 OID_RT_PRO_SET_SCRAMBLER */
+ {1, &oid_null_function}, /* 0x06 OID_RT_PRO_SET_FILTER_BB */
+ {1, &oid_null_function},/* 0x07 OID_RT_PRO_SET_MANUAL_DIVERSITY_BB */
+ {1, &rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl}, /* 0x08 */
+ {1, &oid_null_function},/* 0x09 OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL */
+ {1, &oid_null_function},/* 0x0A OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL */
+ {1, &rtl8188eu_oid_rt_pro_set_continuous_tx_hdl}, /* 0x0B OID_RT_PRO_SET_TX_CONTINUOUS_DIRECT_CALL */
+ {1, &rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl},/* 0x0C OID_RT_PRO_SET_SINGLE_CARRIER_TX_CONTINUOUS */
+ {1, &oid_null_function}, /* 0x0D OID_RT_PRO_SET_TX_ANTENNA_BB */
+ {1, &rtl8188eu_oid_rt_pro_set_antenna_bb_hdl}, /* 0x0E */
+ {1, &oid_null_function}, /* 0x0F OID_RT_PRO_SET_CR_SCRAMBLER */
+ {1, &oid_null_function}, /* 0x10 OID_RT_PRO_SET_CR_NEW_FILTER */
+ {1, &rtl8188eu_oid_rt_pro_set_tx_power_control_hdl},/* 0x11 OID_RT_PRO_SET_TX_POWER_CONTROL */
+ {1, &oid_null_function}, /* 0x12 OID_RT_PRO_SET_CR_TX_CONFIG */
+ {1, &oid_null_function}, /* 0x13 OID_RT_PRO_GET_TX_POWER_CONTROL */
+ {1, &oid_null_function}, /* 0x14 OID_RT_PRO_GET_CR_SIGNAL_QUALITY */
+ {1, &oid_null_function}, /* 0x15 OID_RT_PRO_SET_CR_SETPOINT */
+ {1, &oid_null_function}, /* 0x16 OID_RT_PRO_SET_INTEGRATOR */
+ {1, &oid_null_function}, /* 0x17 OID_RT_PRO_SET_SIGNAL_QUALITY */
+ {1, &oid_null_function}, /* 0x18 OID_RT_PRO_GET_INTEGRATOR */
+ {1, &oid_null_function}, /* 0x19 OID_RT_PRO_GET_SIGNAL_QUALITY */
+ {1, &oid_null_function}, /* 0x1A OID_RT_PRO_QUERY_EEPROM_TYPE */
+ {1, &oid_null_function}, /* 0x1B OID_RT_PRO_WRITE_MAC_ADDRESS */
+ {1, &oid_null_function}, /* 0x1C OID_RT_PRO_READ_MAC_ADDRESS */
+ {1, &oid_null_function}, /* 0x1D OID_RT_PRO_WRITE_CIS_DATA */
+ {1, &oid_null_function}, /* 0x1E OID_RT_PRO_READ_CIS_DATA */
+ {1, &oid_null_function} /* 0x1F OID_RT_PRO_WRITE_POWER_CONTROL */
+};
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_80_20[] = {
+ {1, &oid_null_function}, /* 0x20 OID_RT_PRO_READ_POWER_CONTROL */
+ {1, &oid_null_function}, /* 0x21 OID_RT_PRO_WRITE_EEPROM */
+ {1, &oid_null_function}, /* 0x22 OID_RT_PRO_READ_EEPROM */
+ {1, &rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl}, /* 0x23 */
+ {1, &rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl}, /* 0x24 */
+ {1, &rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl}, /* 0x25 */
+ {1, &rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl}, /* 0x26 */
+ {1, &rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl}, /* 0x27 */
+ {1, &oid_null_function}, /* 0x28 OID_RT_PRO_QUERY_CURRENT_ADDRESS */
+ {1, &oid_null_function}, /* 0x29 OID_RT_PRO_QUERY_PERMANENT_ADDRESS */
+ {1, &oid_null_function}, /* 0x2A OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS */
+ {1, &rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl},/* 0x2B OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX */
+ {1, &oid_null_function}, /* 0x2C OID_RT_PRO_RECEIVE_PACKET */
+ {1, &oid_null_function}, /* 0x2D OID_RT_PRO_WRITE_EEPROM_BYTE */
+ {1, &oid_null_function}, /* 0x2E OID_RT_PRO_READ_EEPROM_BYTE */
+ {1, &rtl8188eu_oid_rt_pro_set_modulation_hdl} /* 0x2F */
+};
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_80_40[] = {
+ {1, &oid_null_function}, /* 0x40 */
+ {1, &oid_null_function}, /* 0x41 */
+ {1, &oid_null_function}, /* 0x42 */
+ {1, &rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl}, /* 0x43 */
+ {1, &oid_null_function}, /* 0x44 */
+ {1, &oid_null_function} /* 0x45 */
+};
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_80_80[] = {
+ {1, &oid_null_function}, /* 0x80 OID_RT_DRIVER_OPTION */
+ {1, &oid_null_function}, /* 0x81 OID_RT_RF_OFF */
+ {1, &oid_null_function} /* 0x82 OID_RT_AUTH_STATUS */
+};
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_85[] = {
+ {1, &rtl8188eu_oid_rt_wireless_mode_hdl} /* 0x00 OID_RT_WIRELESS_MODE */
+};
+
+#endif /* _RTL871X_MP_IOCTL_C_ */
+
+struct rwreg_param {
+ u32 offset;
+ u32 width;
+ u32 value;
+};
+
+struct bbreg_param {
+ u32 offset;
+ u32 phymask;
+ u32 value;
+};
+
+struct txpower_param {
+ u32 pwr_index;
+};
+
+struct datarate_param {
+ u32 rate_index;
+};
+
+struct rfintfs_parm {
+ u32 rfintfs;
+};
+
+struct mp_xmit_parm {
+ u8 enable;
+ u32 count;
+ u16 length;
+ u8 payload_type;
+ u8 da[ETH_ALEN];
+};
+
+struct mp_xmit_packet {
+ u32 len;
+ u32 mem[MAX_MP_XMITBUF_SZ >> 2];
+};
+
+struct psmode_param {
+ u32 ps_mode;
+ u32 smart_ps;
+};
+
+/* for OID_RT_PRO_READ16_EEPROM & OID_RT_PRO_WRITE16_EEPROM */
+struct eeprom_rw_param {
+ u32 offset;
+ u16 value;
+};
+
+struct mp_ioctl_handler {
+ u32 paramsize;
+ s32 (*handler)(struct oid_par_priv* poid_par_priv);
+ u32 oid;
+};
+
+struct mp_ioctl_param{
+ u32 subcode;
+ u32 len;
+ u8 data[0];
+};
+
+#define GEN_MP_IOCTL_SUBCODE(code) _MP_IOCTL_ ## code ## _CMD_
+
+enum RTL871X_MP_IOCTL_SUBCODE {
+ GEN_MP_IOCTL_SUBCODE(MP_START), /*0*/
+ GEN_MP_IOCTL_SUBCODE(MP_STOP),
+ GEN_MP_IOCTL_SUBCODE(READ_REG),
+ GEN_MP_IOCTL_SUBCODE(WRITE_REG),
+ GEN_MP_IOCTL_SUBCODE(READ_BB_REG),
+ GEN_MP_IOCTL_SUBCODE(WRITE_BB_REG), /*5*/
+ GEN_MP_IOCTL_SUBCODE(READ_RF_REG),
+ GEN_MP_IOCTL_SUBCODE(WRITE_RF_REG),
+ GEN_MP_IOCTL_SUBCODE(SET_CHANNEL),
+ GEN_MP_IOCTL_SUBCODE(SET_TXPOWER),
+ GEN_MP_IOCTL_SUBCODE(SET_DATARATE), /*10*/
+ GEN_MP_IOCTL_SUBCODE(SET_BANDWIDTH),
+ GEN_MP_IOCTL_SUBCODE(SET_ANTENNA),
+ GEN_MP_IOCTL_SUBCODE(CNTU_TX),
+ GEN_MP_IOCTL_SUBCODE(SC_TX),
+ GEN_MP_IOCTL_SUBCODE(CS_TX), /*15*/
+ GEN_MP_IOCTL_SUBCODE(ST_TX),
+ GEN_MP_IOCTL_SUBCODE(IOCTL_XMIT_PACKET),
+ GEN_MP_IOCTL_SUBCODE(SET_RX_PKT_TYPE),
+ GEN_MP_IOCTL_SUBCODE(RESET_PHY_RX_PKT_CNT),
+ GEN_MP_IOCTL_SUBCODE(GET_PHY_RX_PKT_RECV), /*20*/
+ GEN_MP_IOCTL_SUBCODE(GET_PHY_RX_PKT_ERROR),
+ GEN_MP_IOCTL_SUBCODE(READ16_EEPROM),
+ GEN_MP_IOCTL_SUBCODE(WRITE16_EEPROM),
+ GEN_MP_IOCTL_SUBCODE(EFUSE),
+ GEN_MP_IOCTL_SUBCODE(EFUSE_MAP), /*25*/
+ GEN_MP_IOCTL_SUBCODE(GET_EFUSE_MAX_SIZE),
+ GEN_MP_IOCTL_SUBCODE(GET_EFUSE_CURRENT_SIZE),
+ GEN_MP_IOCTL_SUBCODE(GET_THERMAL_METER),
+ GEN_MP_IOCTL_SUBCODE(SET_PTM),
+ GEN_MP_IOCTL_SUBCODE(SET_POWER_DOWN), /*30*/
+ GEN_MP_IOCTL_SUBCODE(TRIGGER_GPIO),
+ GEN_MP_IOCTL_SUBCODE(SET_DM_BT), /*35*/
+ GEN_MP_IOCTL_SUBCODE(DEL_BA), /*36*/
+ GEN_MP_IOCTL_SUBCODE(GET_WIFI_STATUS), /*37*/
+ MAX_MP_IOCTL_SUBCODE,
+};
+
+s32 rtl8188eu_mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv);
+
+#define GEN_HANDLER(sz, hdl, oid) {sz, hdl, oid},
+
+#define EXT_MP_IOCTL_HANDLER(sz, subcode, oid) \
+ {sz, rtl8188eu_mp_ioctl_##subcode##_hdl, oid},
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
new file mode 100644
index 0000000..3ad2207
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -0,0 +1,1084 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*****************************************************************************
+ *
+ * Module: __RTW_MP_PHY_REGDEF_H_
+ *
+ *
+ * Note: 1. Define PMAC/BB register map
+ * 2. Define RF register map
+ * 3. PMAC/BB register bit mask.
+ * 4. RF reg bit mask.
+ * 5. Other BB/RF relative definition.
+ *
+ *
+ * Export: Constants, macro, functions(API), global variables(None).
+ *
+ * Abbrev:
+ *
+ * History:
+ * Data Who Remark
+ * 08/07/2007 MHC 1. Porting from 9x series PHYCFG.h.
+ * 2. Reorganize code architecture.
+ * 09/25/2008 MH 1. Add RL6052 register definition
+ *
+ *****************************************************************************/
+#ifndef __RTW_MP_PHY_REGDEF_H_
+#define __RTW_MP_PHY_REGDEF_H_
+
+
+/*--------------------------Define Parameters-------------------------------*/
+
+/* */
+/* 8192S Regsiter offset definition */
+/* */
+
+/* */
+/* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
+/* 3. RF register 0x00-2E */
+/* 4. Bit Mask for BB/RF register */
+/* 5. Other defintion for BB/RF R/W */
+/* */
+
+
+/* */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 1. Page1(0x100) */
+/* */
+#define rPMAC_Reset 0x100
+#define rPMAC_TxStart 0x104
+#define rPMAC_TxLegacySIG 0x108
+#define rPMAC_TxHTSIG1 0x10c
+#define rPMAC_TxHTSIG2 0x110
+#define rPMAC_PHYDebug 0x114
+#define rPMAC_TxPacketNum 0x118
+#define rPMAC_TxIdle 0x11c
+#define rPMAC_TxMACHeader0 0x120
+#define rPMAC_TxMACHeader1 0x124
+#define rPMAC_TxMACHeader2 0x128
+#define rPMAC_TxMACHeader3 0x12c
+#define rPMAC_TxMACHeader4 0x130
+#define rPMAC_TxMACHeader5 0x134
+#define rPMAC_TxDataType 0x138
+#define rPMAC_TxRandomSeed 0x13c
+#define rPMAC_CCKPLCPPreamble 0x140
+#define rPMAC_CCKPLCPHeader 0x144
+#define rPMAC_CCKCRC16 0x148
+#define rPMAC_OFDMRxCRC32OK 0x170
+#define rPMAC_OFDMRxCRC32Er 0x174
+#define rPMAC_OFDMRxParityEr 0x178
+#define rPMAC_OFDMRxCRC8Er 0x17c
+#define rPMAC_CCKCRxRC16Er 0x180
+#define rPMAC_CCKCRxRC32Er 0x184
+#define rPMAC_CCKCRxRC32OK 0x188
+#define rPMAC_TxStatus 0x18c
+
+/* */
+/* 2. Page2(0x200) */
+/* */
+/* The following two definition are only used for USB interface. */
+/* define RF_BB_CMD_ADDR 0x02c0 RF/BB read/write command address. */
+/* define RF_BB_CMD_DATA 0x02c4 RF/BB read/write command data. */
+
+/* */
+/* 3. Page8(0x800) */
+/* */
+#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting?? */
+
+#define rFPGA0_TxInfo 0x804 /* Status report?? */
+#define rFPGA0_PSDFunction 0x808
+
+#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
+
+#define rFPGA0_RFTiming1 0x810 /* Useless now */
+#define rFPGA0_RFTiming2 0x814
+/* define rFPGA0_XC_RFTiming 0x818 */
+/* define rFPGA0_XD_RFTiming 0x81c */
+
+#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
+#define rFPGA0_XA_HSSIParameter2 0x824
+#define rFPGA0_XB_HSSIParameter1 0x828
+#define rFPGA0_XB_HSSIParameter2 0x82c
+#define rFPGA0_XC_HSSIParameter1 0x830
+#define rFPGA0_XC_HSSIParameter2 0x834
+#define rFPGA0_XD_HSSIParameter1 0x838
+#define rFPGA0_XD_HSSIParameter2 0x83c
+#define rFPGA0_XA_LSSIParameter 0x840
+#define rFPGA0_XB_LSSIParameter 0x844
+#define rFPGA0_XC_LSSIParameter 0x848
+#define rFPGA0_XD_LSSIParameter 0x84c
+
+#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
+#define rFPGA0_RFSleepUpParameter 0x854
+
+#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
+#define rFPGA0_XCD_SwitchControl 0x85c
+
+#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
+#define rFPGA0_XB_RFInterfaceOE 0x864
+#define rFPGA0_XC_RFInterfaceOE 0x868
+#define rFPGA0_XD_RFInterfaceOE 0x86c
+
+#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Interface Software Control */
+#define rFPGA0_XCD_RFInterfaceSW 0x874
+
+#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
+#define rFPGA0_XCD_RFParameter 0x87c
+
+#define rFPGA0_AnalogParameter1 0x880 /* Crystal cap setting RF-R/W protection for parameter4?? */
+#define rFPGA0_AnalogParameter2 0x884
+#define rFPGA0_AnalogParameter3 0x888 /* Useless now */
+#define rFPGA0_AnalogParameter4 0x88c
+
+#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
+#define rFPGA0_XB_LSSIReadBack 0x8a4
+#define rFPGA0_XC_LSSIReadBack 0x8a8
+#define rFPGA0_XD_LSSIReadBack 0x8ac
+
+#define rFPGA0_PSDReport 0x8b4 /* Useless now */
+#define rFPGA0_XAB_RFInterfaceRB 0x8e0 /* Useless now RF Interface Readback Value */
+#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
+
+/* */
+/* 4. Page9(0x900) */
+/* */
+#define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC RF BW Setting?? */
+
+#define rFPGA1_TxBlock 0x904 /* Useless now */
+#define rFPGA1_DebugSelect 0x908 /* Useless now */
+#define rFPGA1_TxInfo 0x90c /* Useless now Status report?? */
+
+/* */
+/* 5. PageA(0xA00) */
+/* */
+/* Set Control channel to upper or lower. These settings are required only for 40MHz */
+#define rCCK0_System 0xa00
+
+#define rCCK0_AFESetting 0xa04 /* Disable init gain now Select RX path by RSSI */
+#define rCCK0_CCA 0xa08 /* Disable init gain now Init gain */
+
+#define rCCK0_RxAGC1 0xa0c /* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold, RX LNA Threshold useless now. Not the same as 90 series */
+#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
+
+#define rCCK0_RxHP 0xa14
+
+#define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel estimation threshold */
+#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
+
+#define rCCK0_TxFilter1 0xa20
+#define rCCK0_TxFilter2 0xa24
+#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
+#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now 0xa30-a4f channel report */
+#define rCCK0_TRSSIReport 0xa50
+#define rCCK0_RxReport 0xa54 /* 0xa57 */
+#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
+#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
+
+/* */
+/* 6. PageC(0xC00) */
+/* */
+#define rOFDM0_LSTF 0xc00
+
+#define rOFDM0_TRxPathEnable 0xc04
+#define rOFDM0_TRMuxPar 0xc08
+#define rOFDM0_TRSWIsolation 0xc0c
+
+#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
+#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
+#define rOFDM0_XBRxAFE 0xc18
+#define rOFDM0_XBRxIQImbalance 0xc1c
+#define rOFDM0_XCRxAFE 0xc20
+#define rOFDM0_XCRxIQImbalance 0xc24
+#define rOFDM0_XDRxAFE 0xc28
+#define rOFDM0_XDRxIQImbalance 0xc2c
+
+#define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD DM tune init gain */
+#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
+#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
+#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
+
+#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
+#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
+#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
+#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
+
+#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
+#define rOFDM0_XAAGCCore2 0xc54
+#define rOFDM0_XBAGCCore1 0xc58
+#define rOFDM0_XBAGCCore2 0xc5c
+#define rOFDM0_XCAGCCore1 0xc60
+#define rOFDM0_XCAGCCore2 0xc64
+#define rOFDM0_XDAGCCore1 0xc68
+#define rOFDM0_XDAGCCore2 0xc6c
+
+#define rOFDM0_AGCParameter1 0xc70
+#define rOFDM0_AGCParameter2 0xc74
+#define rOFDM0_AGCRSSITable 0xc78
+#define rOFDM0_HTSTFAGC 0xc7c
+
+#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
+#define rOFDM0_XATxAFE 0xc84
+#define rOFDM0_XBTxIQImbalance 0xc88
+#define rOFDM0_XBTxAFE 0xc8c
+#define rOFDM0_XCTxIQImbalance 0xc90
+#define rOFDM0_XCTxAFE 0xc94
+#define rOFDM0_XDTxIQImbalance 0xc98
+#define rOFDM0_XDTxAFE 0xc9c
+#define rOFDM0_RxIQExtAnta 0xca0
+
+#define rOFDM0_RxHPParameter 0xce0
+#define rOFDM0_TxPseudoNoiseWgt 0xce4
+#define rOFDM0_FrameSync 0xcf0
+#define rOFDM0_DFSReport 0xcf4
+#define rOFDM0_TxCoeff1 0xca4
+#define rOFDM0_TxCoeff2 0xca8
+#define rOFDM0_TxCoeff3 0xcac
+#define rOFDM0_TxCoeff4 0xcb0
+#define rOFDM0_TxCoeff5 0xcb4
+#define rOFDM0_TxCoeff6 0xcb8
+
+/* 7. PageD(0xD00) */
+#define rOFDM1_LSTF 0xd00
+#define rOFDM1_TRxPathEnable 0xd04
+
+#define rOFDM1_CFO 0xd08 /* No setting now */
+#define rOFDM1_CSI1 0xd10
+#define rOFDM1_SBD 0xd14
+#define rOFDM1_CSI2 0xd18
+#define rOFDM1_CFOTracking 0xd2c
+#define rOFDM1_TRxMesaure1 0xd34
+#define rOFDM1_IntfDet 0xd3c
+#define rOFDM1_PseudoNoiseStateAB 0xd50
+#define rOFDM1_PseudoNoiseStateCD 0xd54
+#define rOFDM1_RxPseudoNoiseWgt 0xd58
+
+#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
+#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
+#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
+
+#define rOFDM_ShortCFOAB 0xdac /* No setting now */
+#define rOFDM_ShortCFOCD 0xdb0
+#define rOFDM_LongCFOAB 0xdb4
+#define rOFDM_LongCFOCD 0xdb8
+#define rOFDM_TailCFOAB 0xdbc
+#define rOFDM_TailCFOCD 0xdc0
+#define rOFDM_PWMeasure1 0xdc4
+#define rOFDM_PWMeasure2 0xdc8
+#define rOFDM_BWReport 0xdcc
+#define rOFDM_AGCReport 0xdd0
+#define rOFDM_RxSNR 0xdd4
+#define rOFDM_RxEVMCSI 0xdd8
+#define rOFDM_SIGReport 0xddc
+
+
+/* */
+/* 8. PageE(0xE00) */
+/* */
+#define rTxAGC_Rate18_06 0xe00
+#define rTxAGC_Rate54_24 0xe04
+#define rTxAGC_CCK_Mcs32 0xe08
+#define rTxAGC_Mcs03_Mcs00 0xe10
+#define rTxAGC_Mcs07_Mcs04 0xe14
+#define rTxAGC_Mcs11_Mcs08 0xe18
+#define rTxAGC_Mcs15_Mcs12 0xe1c
+
+/* Analog- control in RX_WAIT_CCA : REG: EE0 [Analog- Power & Control Register] */
+#define rRx_Wait_CCCA 0xe70
+#define rAnapar_Ctrl_BB 0xee0
+
+/* */
+/* 7. RF Register 0x00-0x2E (RF 8256) */
+/* RF-0222D 0x00-3F */
+/* */
+/* Zebra1 */
+#define RTL92SE_FPGA_VERIFY 0
+#define rZebra1_HSSIEnable 0x0 /* Useless now */
+#define rZebra1_TRxEnable1 0x1
+#define rZebra1_TRxEnable2 0x2
+#define rZebra1_AGC 0x4
+#define rZebra1_ChargePump 0x5
+/* if (RTL92SE_FPGA_VERIFY == 1) */
+#define rZebra1_Channel 0x7 /* RF channel switch */
+/* else */
+
+/* endif */
+#define rZebra1_TxGain 0x8 /* Useless now */
+#define rZebra1_TxLPF 0x9
+#define rZebra1_RxLPF 0xb
+#define rZebra1_RxHPFCorner 0xc
+
+/* Zebra4 */
+#define rGlobalCtrl 0 /* Useless now */
+#define rRTL8256_TxLPF 19
+#define rRTL8256_RxLPF 11
+
+/* RTL8258 */
+#define rRTL8258_TxLPF 0x11 /* Useless now */
+#define rRTL8258_RxLPF 0x13
+#define rRTL8258_RSSILPF 0xa
+
+/* */
+/* RL6052 Register definition */
+#define RF_AC 0x00 /* */
+
+#define RF_IQADJ_G1 0x01 /* */
+#define RF_IQADJ_G2 0x02 /* */
+#define RF_POW_TRSW 0x05 /* */
+
+#define RF_GAIN_RX 0x06 /* */
+#define RF_GAIN_TX 0x07 /* */
+
+#define RF_TXM_IDAC 0x08 /* */
+#define RF_BS_IQGEN 0x0F /* */
+
+#define RF_MODE1 0x10 /* */
+#define RF_MODE2 0x11 /* */
+
+#define RF_RX_AGC_HP 0x12 /* */
+#define RF_TX_AGC 0x13 /* */
+#define RF_BIAS 0x14 /* */
+#define RF_IPA 0x15 /* */
+#define RF_TXBIAS 0x16 /* */
+#define RF_POW_ABILITY 0x17 /* */
+#define RF_MODE_AG 0x18 /* */
+#define rRfChannel 0x18 /* RF channel and BW switch */
+#define RF_CHNLBW 0x18 /* RF channel and BW switch */
+#define RF_TOP 0x19 /* */
+
+#define RF_RX_G1 0x1A /* */
+#define RF_RX_G2 0x1B /* */
+
+#define RF_RX_BB2 0x1C /* */
+#define RF_RX_BB1 0x1D /* */
+
+#define RF_RCK1 0x1E /* */
+#define RF_RCK2 0x1F /* */
+
+#define RF_TX_G1 0x20 /* */
+#define RF_TX_G2 0x21 /* */
+#define RF_TX_G3 0x22 /* */
+
+#define RF_TX_BB1 0x23 /* */
+
+#define RF_T_METER 0x24 /* */
+
+#define RF_SYN_G1 0x25 /* RF TX Power control */
+#define RF_SYN_G2 0x26 /* RF TX Power control */
+#define RF_SYN_G3 0x27 /* RF TX Power control */
+#define RF_SYN_G4 0x28 /* RF TX Power control */
+#define RF_SYN_G5 0x29 /* RF TX Power control */
+#define RF_SYN_G6 0x2A /* RF TX Power control */
+#define RF_SYN_G7 0x2B /* RF TX Power control */
+#define RF_SYN_G8 0x2C /* RF TX Power control */
+
+#define RF_RCK_OS 0x30 /* RF TX PA control */
+#define RF_TXPA_G1 0x31 /* RF TX PA control */
+#define RF_TXPA_G2 0x32 /* RF TX PA control */
+#define RF_TXPA_G3 0x33 /* RF TX PA control */
+
+/* */
+/* Bit Mask */
+/* */
+/* 1. Page1(0x100) */
+#define bBBResetB 0x100 /* Useless now? */
+#define bGlobalResetB 0x200
+#define bOFDMTxStart 0x4
+#define bCCKTxStart 0x8
+#define bCRC32Debug 0x100
+#define bPMACLoopback 0x10
+#define bTxLSIG 0xffffff
+#define bOFDMTxRate 0xf
+#define bOFDMTxReserved 0x10
+#define bOFDMTxLength 0x1ffe0
+#define bOFDMTxParity 0x20000
+#define bTxHTSIG1 0xffffff
+#define bTxHTMCSRate 0x7f
+#define bTxHTBW 0x80
+#define bTxHTLength 0xffff00
+#define bTxHTSIG2 0xffffff
+#define bTxHTSmoothing 0x1
+#define bTxHTSounding 0x2
+#define bTxHTReserved 0x4
+#define bTxHTAggreation 0x8
+#define bTxHTSTBC 0x30
+#define bTxHTAdvanceCoding 0x40
+#define bTxHTShortGI 0x80
+#define bTxHTNumberHT_LTF 0x300
+#define bTxHTCRC8 0x3fc00
+#define bCounterReset 0x10000
+#define bNumOfOFDMTx 0xffff
+#define bNumOfCCKTx 0xffff0000
+#define bTxIdleInterval 0xffff
+#define bOFDMService 0xffff0000
+#define bTxMACHeader 0xffffffff
+#define bTxDataInit 0xff
+#define bTxHTMode 0x100
+#define bTxDataType 0x30000
+#define bTxRandomSeed 0xffffffff
+#define bCCKTxPreamble 0x1
+#define bCCKTxSFD 0xffff0000
+#define bCCKTxSIG 0xff
+#define bCCKTxService 0xff00
+#define bCCKLengthExt 0x8000
+#define bCCKTxLength 0xffff0000
+#define bCCKTxCRC16 0xffff
+#define bCCKTxStatus 0x1
+#define bOFDMTxStatus 0x2
+
+#define IS_BB_REG_OFFSET_92S(_Offset) ((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+/* 2. Page8(0x800) */
+#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
+#define bJapanMode 0x2
+#define bCCKTxSC 0x30
+#define bCCKEn 0x1000000
+#define bOFDMEn 0x2000000
+
+#define bOFDMRxADCPhase 0x10000 /* Useless now */
+#define bOFDMTxDACPhase 0x40000
+#define bXATxAGC 0x3f
+
+#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
+#define bXCTxAGC 0xf000
+#define bXDTxAGC 0xf0000
+
+#define bPAStart 0xf0000000 /* Useless now */
+#define bTRStart 0x00f00000
+#define bRFStart 0x0000f000
+#define bBBStart 0x000000f0
+#define bBBCCKStart 0x0000000f
+#define bPAEnd 0xf /* Reg0x814 */
+#define bTREnd 0x0f000000
+#define bRFEnd 0x000f0000
+#define bCCAMask 0x000000f0 /* T2R */
+#define bR2RCCAMask 0x00000f00
+#define bHSSI_R2TDelay 0xf8000000
+#define bHSSI_T2RDelay 0xf80000
+#define bContTxHSSI 0x400 /* chane gain at continue Tx */
+#define bIGFromCCK 0x200
+#define bAGCAddress 0x3f
+#define bRxHPTx 0x7000
+#define bRxHPT2R 0x38000
+#define bRxHPCCKIni 0xc0000
+#define bAGCTxCode 0xc00000
+#define bAGCRxCode 0x300000
+
+#define b3WireDataLength 0x800 /* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
+#define b3WireAddressLength 0x400
+
+#define b3WireRFPowerDown 0x1 /* Useless now */
+/* define bHWSISelect 0x8 */
+#define b5GPAPEPolarity 0x40000000
+#define b2GPAPEPolarity 0x80000000
+#define bRFSW_TxDefaultAnt 0x3
+#define bRFSW_TxOptionAnt 0x30
+#define bRFSW_RxDefaultAnt 0x300
+#define bRFSW_RxOptionAnt 0x3000
+#define bRFSI_3WireData 0x1
+#define bRFSI_3WireClock 0x2
+#define bRFSI_3WireLoad 0x4
+#define bRFSI_3WireRW 0x8
+#define bRFSI_3Wire 0xf
+
+#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
+
+#define bRFSI_TRSW 0x20 /* Useless now */
+#define bRFSI_TRSWB 0x40
+#define bRFSI_ANTSW 0x100
+#define bRFSI_ANTSWB 0x200
+#define bRFSI_PAPE 0x400
+#define bRFSI_PAPE5G 0x800
+#define bBandSelect 0x1
+#define bHTSIG2_GI 0x80
+#define bHTSIG2_Smoothing 0x01
+#define bHTSIG2_Sounding 0x02
+#define bHTSIG2_Aggreaton 0x08
+#define bHTSIG2_STBC 0x30
+#define bHTSIG2_AdvCoding 0x40
+#define bHTSIG2_NumOfHTLTF 0x300
+#define bHTSIG2_CRC8 0x3fc
+#define bHTSIG1_MCS 0x7f
+#define bHTSIG1_BandWidth 0x80
+#define bHTSIG1_HTLength 0xffff
+#define bLSIG_Rate 0xf
+#define bLSIG_Reserved 0x10
+#define bLSIG_Length 0x1fffe
+#define bLSIG_Parity 0x20
+#define bCCKRxPhase 0x4
+#if (RTL92SE_FPGA_VERIFY == 1)
+#define bLSSIReadAddress 0x3f000000 /* LSSI "Read" Address
+ Reg 0x824 rFPGA0_XA_HSSIParameter2 */
+#else
+#define bLSSIReadAddress 0x7f800000 /* T65 RF */
+#endif
+#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
+#if (RTL92SE_FPGA_VERIFY == 1)
+#define bLSSIReadBackData 0xfff /* Reg 0x8a0
+ rFPGA0_XA_LSSIReadBack */
+#else
+#define bLSSIReadBackData 0xfffff /* T65 RF */
+#endif
+#define bLSSIReadOKFlag 0x1000 /* Useless now */
+#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
+#define bRegulator0Standby 0x1
+#define bRegulatorPLLStandby 0x2
+#define bRegulator1Standby 0x4
+#define bPLLPowerUp 0x8
+#define bDPLLPowerUp 0x10
+#define bDA10PowerUp 0x20
+#define bAD7PowerUp 0x200
+#define bDA6PowerUp 0x2000
+#define bXtalPowerUp 0x4000
+#define b40MDClkPowerUP 0x8000
+#define bDA6DebugMode 0x20000
+#define bDA6Swing 0x380000
+
+#define bADClkPhase 0x4000000 /* Reg 0x880
+ rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
+
+#define b80MClkDelay 0x18000000 /* Useless */
+#define bAFEWatchDogEnable 0x20000000
+
+#define bXtalCap01 0xc0000000 /* Reg 0x884
+ rFPGA0_AnalogParameter2 Crystal cap */
+#define bXtalCap23 0x3
+#define bXtalCap92x 0x0f000000
+#define bXtalCap 0x0f000000
+
+#define bIntDifClkEnable 0x400 /* Useless */
+#define bExtSigClkEnable 0x800
+#define bBandgapMbiasPowerUp 0x10000
+#define bAD11SHGain 0xc0000
+#define bAD11InputRange 0x700000
+#define bAD11OPCurrent 0x3800000
+#define bIPathLoopback 0x4000000
+#define bQPathLoopback 0x8000000
+#define bAFELoopback 0x10000000
+#define bDA10Swing 0x7e0
+#define bDA10Reverse 0x800
+#define bDAClkSource 0x1000
+#define bAD7InputRange 0x6000
+#define bAD7Gain 0x38000
+#define bAD7OutputCMMode 0x40000
+#define bAD7InputCMMode 0x380000
+#define bAD7Current 0xc00000
+#define bRegulatorAdjust 0x7000000
+#define bAD11PowerUpAtTx 0x1
+#define bDA10PSAtTx 0x10
+#define bAD11PowerUpAtRx 0x100
+#define bDA10PSAtRx 0x1000
+#define bCCKRxAGCFormat 0x200
+#define bPSDFFTSamplepPoint 0xc000
+#define bPSDAverageNum 0x3000
+#define bIQPathControl 0xc00
+#define bPSDFreq 0x3ff
+#define bPSDAntennaPath 0x30
+#define bPSDIQSwitch 0x40
+#define bPSDRxTrigger 0x400000
+#define bPSDTxTrigger 0x80000000
+#define bPSDSineToneScale 0x7f000000
+#define bPSDReport 0xffff
+
+/* 3. Page9(0x900) */
+#define bOFDMTxSC 0x30000000 /* Useless */
+#define bCCKTxOn 0x1
+#define bOFDMTxOn 0x2
+#define bDebugPage 0xfff /* reset debug page and HWord,
+ * LWord */
+#define bDebugItem 0xff /* reset debug page and LWord */
+#define bAntL 0x10
+#define bAntNonHT 0x100
+#define bAntHT1 0x1000
+#define bAntHT2 0x10000
+#define bAntHT1S1 0x100000
+#define bAntNonHTS1 0x1000000
+
+/* 4. PageA(0xA00) */
+#define bCCKBBMode 0x3 /* Useless */
+#define bCCKTxPowerSaving 0x80
+#define bCCKRxPowerSaving 0x40
+
+#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0 20/40 sw */
+
+#define bCCKScramble 0x8 /* Useless */
+#define bCCKAntDiversity 0x8000
+#define bCCKCarrierRecovery 0x4000
+#define bCCKTxRate 0x3000
+#define bCCKDCCancel 0x0800
+#define bCCKISICancel 0x0400
+#define bCCKMatchFilter 0x0200
+#define bCCKEqualizer 0x0100
+#define bCCKPreambleDetect 0x800000
+#define bCCKFastFalseCCA 0x400000
+#define bCCKChEstStart 0x300000
+#define bCCKCCACount 0x080000
+#define bCCKcs_lim 0x070000
+#define bCCKBistMode 0x80000000
+#define bCCKCCAMask 0x40000000
+#define bCCKTxDACPhase 0x4
+#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
+#define bCCKr_cp_mode0 0x0100
+#define bCCKTxDCOffset 0xf0
+#define bCCKRxDCOffset 0xf
+#define bCCKCCAMode 0xc000
+#define bCCKFalseCS_lim 0x3f00
+#define bCCKCS_ratio 0xc00000
+#define bCCKCorgBit_sel 0x300000
+#define bCCKPD_lim 0x0f0000
+#define bCCKNewCCA 0x80000000
+#define bCCKRxHPofIG 0x8000
+#define bCCKRxIG 0x7f00
+#define bCCKLNAPolarity 0x800000
+#define bCCKRx1stGain 0x7f0000
+#define bCCKRFExtend 0x20000000 /* CCK Rx init gain polar */
+#define bCCKRxAGCSatLevel 0x1f000000
+#define bCCKRxAGCSatCount 0xe0
+#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
+#define bCCKFixedRxAGC 0x8000
+#define bCCKAntennaPolarity 0x2000
+#define bCCKTxFilterType 0x0c00
+#define bCCKRxAGCReportType 0x0300
+#define bCCKRxDAGCEn 0x80000000
+#define bCCKRxDAGCPeriod 0x20000000
+#define bCCKRxDAGCSatLevel 0x1f000000
+#define bCCKTimingRecovery 0x800000
+#define bCCKTxC0 0x3f0000
+#define bCCKTxC1 0x3f000000
+#define bCCKTxC2 0x3f
+#define bCCKTxC3 0x3f00
+#define bCCKTxC4 0x3f0000
+#define bCCKTxC5 0x3f000000
+#define bCCKTxC6 0x3f
+#define bCCKTxC7 0x3f00
+#define bCCKDebugPort 0xff0000
+#define bCCKDACDebug 0x0f000000
+#define bCCKFalseAlarmEnable 0x8000
+#define bCCKFalseAlarmRead 0x4000
+#define bCCKTRSSI 0x7f
+#define bCCKRxAGCReport 0xfe
+#define bCCKRxReport_AntSel 0x80000000
+#define bCCKRxReport_MFOff 0x40000000
+#define bCCKRxRxReport_SQLoss 0x20000000
+#define bCCKRxReport_Pktloss 0x10000000
+#define bCCKRxReport_Lockedbit 0x08000000
+#define bCCKRxReport_RateError 0x04000000
+#define bCCKRxReport_RxRate 0x03000000
+#define bCCKRxFACounterLower 0xff
+#define bCCKRxFACounterUpper 0xff000000
+#define bCCKRxHPAGCStart 0xe000
+#define bCCKRxHPAGCFinal 0x1c00
+#define bCCKRxFalseAlarmEnable 0x8000
+#define bCCKFACounterFreeze 0x4000
+#define bCCKTxPathSel 0x10000000
+#define bCCKDefaultRxPath 0xc000000
+#define bCCKOptionRxPath 0x3000000
+
+/* 5. PageC(0xC00) */
+#define bNumOfSTF 0x3 /* Useless */
+#define bShift_L 0xc0
+#define bGI_TH 0xc
+#define bRxPathA 0x1
+#define bRxPathB 0x2
+#define bRxPathC 0x4
+#define bRxPathD 0x8
+#define bTxPathA 0x1
+#define bTxPathB 0x2
+#define bTxPathC 0x4
+#define bTxPathD 0x8
+#define bTRSSIFreq 0x200
+#define bADCBackoff 0x3000
+#define bDFIRBackoff 0xc000
+#define bTRSSILatchPhase 0x10000
+#define bRxIDCOffset 0xff
+#define bRxQDCOffset 0xff00
+#define bRxDFIRMode 0x1800000
+#define bRxDCNFType 0xe000000
+#define bRXIQImb_A 0x3ff
+#define bRXIQImb_B 0xfc00
+#define bRXIQImb_C 0x3f0000
+#define bRXIQImb_D 0xffc00000
+#define bDC_dc_Notch 0x60000
+#define bRxNBINotch 0x1f000000
+#define bPD_TH 0xf
+#define bPD_TH_Opt2 0xc000
+#define bPWED_TH 0x700
+#define bIfMF_Win_L 0x800
+#define bPD_Option 0x1000
+#define bMF_Win_L 0xe000
+#define bBW_Search_L 0x30000
+#define bwin_enh_L 0xc0000
+#define bBW_TH 0x700000
+#define bED_TH2 0x3800000
+#define bBW_option 0x4000000
+#define bRatio_TH 0x18000000
+#define bWindow_L 0xe0000000
+#define bSBD_Option 0x1
+#define bFrame_TH 0x1c
+#define bFS_Option 0x60
+#define bDC_Slope_check 0x80
+#define bFGuard_Counter_DC_L 0xe00
+#define bFrame_Weight_Short 0x7000
+#define bSub_Tune 0xe00000
+#define bFrame_DC_Length 0xe000000
+#define bSBD_start_offset 0x30000000
+#define bFrame_TH_2 0x7
+#define bFrame_GI2_TH 0x38
+#define bGI2_Sync_en 0x40
+#define bSarch_Short_Early 0x300
+#define bSarch_Short_Late 0xc00
+#define bSarch_GI2_Late 0x70000
+#define bCFOAntSum 0x1
+#define bCFOAcc 0x2
+#define bCFOStartOffset 0xc
+#define bCFOLookBack 0x70
+#define bCFOSumWeight 0x80
+#define bDAGCEnable 0x10000
+#define bTXIQImb_A 0x3ff
+#define bTXIQImb_B 0xfc00
+#define bTXIQImb_C 0x3f0000
+#define bTXIQImb_D 0xffc00000
+#define bTxIDCOffset 0xff
+#define bTxQDCOffset 0xff00
+#define bTxDFIRMode 0x10000
+#define bTxPesudoNoiseOn 0x4000000
+#define bTxPesudoNoise_A 0xff
+#define bTxPesudoNoise_B 0xff00
+#define bTxPesudoNoise_C 0xff0000
+#define bTxPesudoNoise_D 0xff000000
+#define bCCADropOption 0x20000
+#define bCCADropThres 0xfff00000
+#define bEDCCA_H 0xf
+#define bEDCCA_L 0xf0
+#define bLambda_ED 0x300
+#define bRxInitialGain 0x7f
+#define bRxAntDivEn 0x80
+#define bRxAGCAddressForLNA 0x7f00
+#define bRxHighPowerFlow 0x8000
+#define bRxAGCFreezeThres 0xc0000
+#define bRxFreezeStep_AGC1 0x300000
+#define bRxFreezeStep_AGC2 0xc00000
+#define bRxFreezeStep_AGC3 0x3000000
+#define bRxFreezeStep_AGC0 0xc000000
+#define bRxRssi_Cmp_En 0x10000000
+#define bRxQuickAGCEn 0x20000000
+#define bRxAGCFreezeThresMode 0x40000000
+#define bRxOverFlowCheckType 0x80000000
+#define bRxAGCShift 0x7f
+#define bTRSW_Tri_Only 0x80
+#define bPowerThres 0x300
+#define bRxAGCEn 0x1
+#define bRxAGCTogetherEn 0x2
+#define bRxAGCMin 0x4
+#define bRxHP_Ini 0x7
+#define bRxHP_TRLNA 0x70
+#define bRxHP_RSSI 0x700
+#define bRxHP_BBP1 0x7000
+#define bRxHP_BBP2 0x70000
+#define bRxHP_BBP3 0x700000
+#define bRSSI_H 0x7f0000 /* thresh for hi power */
+#define bRSSI_Gen 0x7f000000 /* thresh for ant div */
+#define bRxSettle_TRSW 0x7
+#define bRxSettle_LNA 0x38
+#define bRxSettle_RSSI 0x1c0
+#define bRxSettle_BBP 0xe00
+#define bRxSettle_RxHP 0x7000
+#define bRxSettle_AntSW_RSSI 0x38000
+#define bRxSettle_AntSW 0xc0000
+#define bRxProcessTime_DAGC 0x300000
+#define bRxSettle_HSSI 0x400000
+#define bRxProcessTime_BBPPW 0x800000
+#define bRxAntennaPowerShift 0x3000000
+#define bRSSITableSelect 0xc000000
+#define bRxHP_Final 0x7000000
+#define bRxHTSettle_BBP 0x7
+#define bRxHTSettle_HSSI 0x8
+#define bRxHTSettle_RxHP 0x70
+#define bRxHTSettle_BBPPW 0x80
+#define bRxHTSettle_Idle 0x300
+#define bRxHTSettle_Reserved 0x1c00
+#define bRxHTRxHPEn 0x8000
+#define bRxHTAGCFreezeThres 0x30000
+#define bRxHTAGCTogetherEn 0x40000
+#define bRxHTAGCMin 0x80000
+#define bRxHTAGCEn 0x100000
+#define bRxHTDAGCEn 0x200000
+#define bRxHTRxHP_BBP 0x1c00000
+#define bRxHTRxHP_Final 0xe0000000
+#define bRxPWRatioTH 0x3
+#define bRxPWRatioEn 0x4
+#define bRxMFHold 0x3800
+#define bRxPD_Delay_TH1 0x38
+#define bRxPD_Delay_TH2 0x1c0
+#define bRxPD_DC_COUNT_MAX 0x600
+/* define bRxMF_Hold 0x3800 */
+#define bRxPD_Delay_TH 0x8000
+#define bRxProcess_Delay 0xf0000
+#define bRxSearchrange_GI2_Early 0x700000
+#define bRxFrame_Guard_Counter_L 0x3800000
+#define bRxSGI_Guard_L 0xc000000
+#define bRxSGI_Search_L 0x30000000
+#define bRxSGI_TH 0xc0000000
+#define bDFSCnt0 0xff
+#define bDFSCnt1 0xff00
+#define bDFSFlag 0xf0000
+#define bMFWeightSum 0x300000
+#define bMinIdxTH 0x7f000000
+#define bDAFormat 0x40000
+#define bTxChEmuEnable 0x01000000
+#define bTRSWIsolation_A 0x7f
+#define bTRSWIsolation_B 0x7f00
+#define bTRSWIsolation_C 0x7f0000
+#define bTRSWIsolation_D 0x7f000000
+#define bExtLNAGain 0x7c00
+
+/* 6. PageE(0xE00) */
+#define bSTBCEn 0x4 /* Useless */
+#define bAntennaMapping 0x10
+#define bNss 0x20
+#define bCFOAntSumD 0x200
+#define bPHYCounterReset 0x8000000
+#define bCFOReportGet 0x4000000
+#define bOFDMContinueTx 0x10000000
+#define bOFDMSingleCarrier 0x20000000
+#define bOFDMSingleTone 0x40000000
+/* define bRxPath1 0x01 */
+/* define bRxPath2 0x02 */
+/* define bRxPath3 0x04 */
+/* define bRxPath4 0x08 */
+/* define bTxPath1 0x10 */
+/* define bTxPath2 0x20 */
+#define bHTDetect 0x100
+#define bCFOEn 0x10000
+#define bCFOValue 0xfff00000
+#define bSigTone_Re 0x3f
+#define bSigTone_Im 0x7f00
+#define bCounter_CCA 0xffff
+#define bCounter_ParityFail 0xffff0000
+#define bCounter_RateIllegal 0xffff
+#define bCounter_CRC8Fail 0xffff0000
+#define bCounter_MCSNoSupport 0xffff
+#define bCounter_FastSync 0xffff
+#define bShortCFO 0xfff
+#define bShortCFOTLength 12 /* total */
+#define bShortCFOFLength 11 /* fraction */
+#define bLongCFO 0x7ff
+#define bLongCFOTLength 11
+#define bLongCFOFLength 11
+#define bTailCFO 0x1fff
+#define bTailCFOTLength 13
+#define bTailCFOFLength 12
+#define bmax_en_pwdB 0xffff
+#define bCC_power_dB 0xffff0000
+#define bnoise_pwdB 0xffff
+#define bPowerMeasTLength 10
+#define bPowerMeasFLength 3
+#define bRx_HT_BW 0x1
+#define bRxSC 0x6
+#define bRx_HT 0x8
+#define bNB_intf_det_on 0x1
+#define bIntf_win_len_cfg 0x30
+#define bNB_Intf_TH_cfg 0x1c0
+#define bRFGain 0x3f
+#define bTableSel 0x40
+#define bTRSW 0x80
+#define bRxSNR_A 0xff
+#define bRxSNR_B 0xff00
+#define bRxSNR_C 0xff0000
+#define bRxSNR_D 0xff000000
+#define bSNREVMTLength 8
+#define bSNREVMFLength 1
+#define bCSI1st 0xff
+#define bCSI2nd 0xff00
+#define bRxEVM1st 0xff0000
+#define bRxEVM2nd 0xff000000
+#define bSIGEVM 0xff
+#define bPWDB 0xff00
+#define bSGIEN 0x10000
+
+#define bSFactorQAM1 0xf /* Useless */
+#define bSFactorQAM2 0xf0
+#define bSFactorQAM3 0xf00
+#define bSFactorQAM4 0xf000
+#define bSFactorQAM5 0xf0000
+#define bSFactorQAM6 0xf0000
+#define bSFactorQAM7 0xf00000
+#define bSFactorQAM8 0xf000000
+#define bSFactorQAM9 0xf0000000
+#define bCSIScheme 0x100000
+
+#define bNoiseLvlTopSet 0x3 /* Useless */
+#define bChSmooth 0x4
+#define bChSmoothCfg1 0x38
+#define bChSmoothCfg2 0x1c0
+#define bChSmoothCfg3 0xe00
+#define bChSmoothCfg4 0x7000
+#define bMRCMode 0x800000
+#define bTHEVMCfg 0x7000000
+
+#define bLoopFitType 0x1 /* Useless */
+#define bUpdCFO 0x40
+#define bUpdCFOOffData 0x80
+#define bAdvUpdCFO 0x100
+#define bAdvTimeCtrl 0x800
+#define bUpdClko 0x1000
+#define bFC 0x6000
+#define bTrackingMode 0x8000
+#define bPhCmpEnable 0x10000
+#define bUpdClkoLTF 0x20000
+#define bComChCFO 0x40000
+#define bCSIEstiMode 0x80000
+#define bAdvUpdEqz 0x100000
+#define bUChCfg 0x7000000
+#define bUpdEqz 0x8000000
+
+#define bTxAGCRate18_06 0x7f7f7f7f /* Useless */
+#define bTxAGCRate54_24 0x7f7f7f7f
+#define bTxAGCRateMCS32 0x7f
+#define bTxAGCRateCCK 0x7f00
+#define bTxAGCRateMCS3_MCS0 0x7f7f7f7f
+#define bTxAGCRateMCS7_MCS4 0x7f7f7f7f
+#define bTxAGCRateMCS11_MCS8 0x7f7f7f7f
+#define bTxAGCRateMCS15_MCS12 0x7f7f7f7f
+
+/* Rx Pseduo noise */
+#define bRxPesudoNoiseOn 0x20000000 /* Useless */
+#define bRxPesudoNoise_A 0xff
+#define bRxPesudoNoise_B 0xff00
+#define bRxPesudoNoise_C 0xff0000
+#define bRxPesudoNoise_D 0xff000000
+#define bPesudoNoiseState_A 0xffff
+#define bPesudoNoiseState_B 0xffff0000
+#define bPesudoNoiseState_C 0xffff
+#define bPesudoNoiseState_D 0xffff0000
+
+/* 7. RF Register */
+/* Zebra1 */
+#define bZebra1_HSSIEnable 0x8 /* Useless */
+#define bZebra1_TRxControl 0xc00
+#define bZebra1_TRxGainSetting 0x07f
+#define bZebra1_RxCorner 0xc00
+#define bZebra1_TxChargePump 0x38
+#define bZebra1_RxChargePump 0x7
+#define bZebra1_ChannelNum 0xf80
+#define bZebra1_TxLPFBW 0x400
+#define bZebra1_RxLPFBW 0x600
+
+/* Zebra4 */
+#define bRTL8256RegModeCtrl1 0x100 /* Useless */
+#define bRTL8256RegModeCtrl0 0x40
+#define bRTL8256_TxLPFBW 0x18
+#define bRTL8256_RxLPFBW 0x600
+
+/* RTL8258 */
+#define bRTL8258_TxLPFBW 0xc /* Useless */
+#define bRTL8258_RxLPFBW 0xc00
+#define bRTL8258_RSSILPFBW 0xc0
+
+
+/* */
+/* Other Definition */
+/* */
+
+/* byte endable for sb_write */
+#define bByte0 0x1 /* Useless */
+#define bByte1 0x2
+#define bByte2 0x4
+#define bByte3 0x8
+#define bWord0 0x3
+#define bWord1 0xc
+#define bDWord 0xf
+
+/* for PutRegsetting & GetRegSetting BitMask */
+#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
+#define bMaskByte1 0xff00
+#define bMaskByte2 0xff0000
+#define bMaskByte3 0xff000000
+#define bMaskHWord 0xffff0000
+#define bMaskLWord 0x0000ffff
+#define bMaskDWord 0xffffffff
+#define bMaskH4Bits 0xf0000000
+#define bMaskOFDM_D 0xffc00000
+#define bMaskCCK 0x3f3f3f3f
+#define bMask12Bits 0xfff
+
+/* for PutRFRegsetting & GetRFRegSetting BitMask */
+#if (RTL92SE_FPGA_VERIFY == 1)
+#define bRFRegOffsetMask 0xfff
+#else
+#define bRFRegOffsetMask 0xfffff
+#endif
+#define bEnable 0x1 /* Useless */
+#define bDisabl 0x0
+
+#define LeftAntenna 0x0 /* Useless */
+#define RightAntenna 0x1
+
+#define tCheckTxStatus 500 /* 500ms Useless */
+#define tUpdateRxCounter 100 /* 100ms */
+
+#define rateCCK 0 /* Useless */
+#define rateOFDM 1
+#define rateHT 2
+
+/* define Register-End */
+#define bPMAC_End 0x1ff /* Useless */
+#define bFPGAPHY0_End 0x8ff
+#define bFPGAPHY1_End 0x9ff
+#define bCCKPHY0_End 0xaff
+#define bOFDMPHY0_End 0xcff
+#define bOFDMPHY1_End 0xdff
+
+/* define max debug item in each debug page */
+/* define bMaxItem_FPGA_PHY0 0x9 */
+/* define bMaxItem_FPGA_PHY1 0x3 */
+/* define bMaxItem_PHY_11B 0x16 */
+/* define bMaxItem_OFDM_PHY0 0x29 */
+/* define bMaxItem_OFDM_PHY1 0x0 */
+
+#define bPMACControl 0x0 /* Useless */
+#define bWMACControl 0x1
+#define bWNICControl 0x2
+
+#define RCR_AAP BIT(0) /* accept all physical address */
+#define RCR_APM BIT(1) /* accept physical match */
+#define RCR_AM BIT(2) /* accept multicast */
+#define RCR_AB BIT(3) /* accept broadcast */
+#define RCR_ACRC32 BIT(5) /* accept error packet */
+#define RCR_9356SEL BIT(6)
+#define RCR_AICV BIT(12) /* Accept ICV error packet */
+#define RCR_RXFTH0 (BIT(13)|BIT(14)|BIT(15)) /* Rx FIFO threshold */
+#define RCR_ADF BIT(18) /* Accept Data(frame type) frame */
+#define RCR_ACF BIT(19) /* Accept control frame */
+#define RCR_AMF BIT(20) /* Accept management frame */
+#define RCR_ADD3 BIT(21)
+#define RCR_APWRMGT BIT(22) /* Accept power management packet */
+#define RCR_CBSSID BIT(23) /* Accept BSSID match packet */
+#define RCR_ENMARP BIT(28) /* enable mac auto reset phy */
+#define RCR_EnCS1 BIT(29) /* enable carrier sense method 1 */
+#define RCR_EnCS2 BIT(30) /* enable carrier sense method 2 */
+#define RCR_OnlyErlPkt BIT(31) /* Rx Early mode is performed for
+ * packet size greater than 1536 */
+
+/*--------------------------Define Parameters-------------------------------*/
+
+
+#endif /* __INC_HAL8192SPHYREG_H */
diff --git a/drivers/staging/rtl8188eu/include/rtw_p2p.h b/drivers/staging/rtl8188eu/include/rtw_p2p.h
new file mode 100644
index 0000000..a3e3adc
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_p2p.h
@@ -0,0 +1,135 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_P2P_H_
+#define __RTW_P2P_H_
+
+#include <drv_types.h>
+
+u32 build_beacon_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_probe_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_prov_disc_request_p2p_ie(struct wifidirect_info *pwdinfo,
+ u8 *pbuf, u8 *pssid, u8 ussidlen,
+ u8 *pdev_raddr);
+u32 build_assoc_resp_p2p_ie(struct wifidirect_info *pwdinfo,
+ u8 *pbuf, u8 status_code);
+u32 build_deauth_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len, struct sta_info *psta);
+u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u32 process_p2p_devdisc_resp(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_provdisc_req(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_provdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe);
+u8 process_p2p_group_negotation_req(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_group_negotation_resp(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_group_negotation_confirm(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe,
+ uint len);
+void p2p_protocol_wk_hdl(struct adapter *padapter, int intcmdtype);
+void process_p2p_ps_ie(struct adapter *padapter, u8 *ies, u32 ielength);
+void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state);
+u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue);
+void reset_global_wifidirect_info(struct adapter *padapter);
+int rtw_init_wifi_display_info(struct adapter *padapter);
+void rtw_init_wifidirect_timers(struct adapter *padapter);
+void rtw_init_wifidirect_addrs(struct adapter *padapter, u8 *dev_addr,
+ u8 *iface_addr);
+void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role);
+int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role);
+
+static inline void _rtw_p2p_set_state(struct wifidirect_info *wdinfo,
+ enum P2P_STATE state)
+{
+ if (wdinfo->p2p_state != state)
+ wdinfo->p2p_state = state;
+}
+
+static inline void _rtw_p2p_set_pre_state(struct wifidirect_info *wdinfo,
+ enum P2P_STATE state)
+{
+ if (wdinfo->pre_p2p_state != state)
+ wdinfo->pre_p2p_state = state;
+}
+
+static inline void _rtw_p2p_set_role(struct wifidirect_info *wdinfo,
+ enum P2P_ROLE role)
+{
+ if (wdinfo->role != role)
+ wdinfo->role = role;
+}
+
+static inline int _rtw_p2p_state(struct wifidirect_info *wdinfo)
+{
+ return wdinfo->p2p_state;
+}
+
+static inline int _rtw_p2p_pre_state(struct wifidirect_info *wdinfo)
+{
+ return wdinfo->pre_p2p_state;
+}
+
+static inline int _rtw_p2p_role(struct wifidirect_info *wdinfo)
+{
+ return wdinfo->role;
+}
+
+static inline bool _rtw_p2p_chk_state(struct wifidirect_info *wdinfo,
+ enum P2P_STATE state)
+{
+ return wdinfo->p2p_state == state;
+}
+
+static inline bool _rtw_p2p_chk_role(struct wifidirect_info *wdinfo,
+ enum P2P_ROLE role)
+{
+ return wdinfo->role == role;
+}
+
+#define rtw_p2p_set_state(wdinfo, state) _rtw_p2p_set_state(wdinfo, state)
+#define rtw_p2p_set_pre_state(wdinfo, state) \
+ _rtw_p2p_set_pre_state(wdinfo, state)
+#define rtw_p2p_set_role(wdinfo, role) _rtw_p2p_set_role(wdinfo, role)
+
+#define rtw_p2p_state(wdinfo) _rtw_p2p_state(wdinfo)
+#define rtw_p2p_pre_state(wdinfo) _rtw_p2p_pre_state(wdinfo)
+#define rtw_p2p_role(wdinfo) _rtw_p2p_role(wdinfo)
+#define rtw_p2p_chk_state(wdinfo, state) _rtw_p2p_chk_state(wdinfo, state)
+#define rtw_p2p_chk_role(wdinfo, role) _rtw_p2p_chk_role(wdinfo, role)
+
+#define rtw_p2p_findphase_ex_set(wdinfo, value) \
+ ((wdinfo)->find_phase_state_exchange_cnt = (value))
+
+/* is this find phase exchange for social channel scan? */
+#define rtw_p2p_findphase_ex_is_social(wdinfo) \
+((wdinfo)->find_phase_state_exchange_cnt >= P2P_FINDPHASE_EX_SOCIAL_FIRST)
+
+/* should we need find phase exchange anymore? */
+#define rtw_p2p_findphase_ex_is_needed(wdinfo) \
+ ((wdinfo)->find_phase_state_exchange_cnt < P2P_FINDPHASE_EX_MAX && \
+ (wdinfo)->find_phase_state_exchange_cnt != P2P_FINDPHASE_EX_NONE)
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
new file mode 100644
index 0000000..d4b8acb
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -0,0 +1,283 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_PWRCTRL_H_
+#define __RTW_PWRCTRL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define FW_PWR0 0
+#define FW_PWR1 1
+#define FW_PWR2 2
+#define FW_PWR3 3
+#define HW_PWR0 7
+#define HW_PWR1 6
+#define HW_PWR2 2
+#define HW_PWR3 0
+#define HW_PWR4 8
+
+#define FW_PWRMSK 0x7
+
+#define XMIT_ALIVE BIT(0)
+#define RECV_ALIVE BIT(1)
+#define CMD_ALIVE BIT(2)
+#define EVT_ALIVE BIT(3)
+
+enum power_mgnt {
+ PS_MODE_ACTIVE = 0,
+ PS_MODE_MIN,
+ PS_MODE_MAX,
+ PS_MODE_DTIM,
+ PS_MODE_VOIP,
+ PS_MODE_UAPSD_WMM,
+ PS_MODE_UAPSD,
+ PS_MODE_IBSS,
+ PS_MODE_WWLAN,
+ PM_Radio_Off,
+ PM_Card_Disable,
+ PS_MODE_NUM
+};
+
+/*
+ BIT[2:0] = HW state
+ BIT[3] = Protocol PS state, 0: register active state,
+ 1: register sleep state
+ BIT[4] = sub-state
+*/
+
+#define PS_DPS BIT(0)
+#define PS_LCLK (PS_DPS)
+#define PS_RF_OFF BIT(1)
+#define PS_ALL_ON BIT(2)
+#define PS_ST_ACTIVE BIT(3)
+
+#define PS_ISR_ENABLE BIT(4)
+#define PS_IMR_ENABLE BIT(5)
+#define PS_ACK BIT(6)
+#define PS_TOGGLE BIT(7)
+
+#define PS_STATE_MASK (0x0F)
+#define PS_STATE_HW_MASK (0x07)
+#define PS_SEQ_MASK (0xc0)
+
+#define PS_STATE(x) (PS_STATE_MASK & (x))
+#define PS_STATE_HW(x) (PS_STATE_HW_MASK & (x))
+#define PS_SEQ(x) (PS_SEQ_MASK & (x))
+
+#define PS_STATE_S0 (PS_DPS)
+#define PS_STATE_S1 (PS_LCLK)
+#define PS_STATE_S2 (PS_RF_OFF)
+#define PS_STATE_S3 (PS_ALL_ON)
+#define PS_STATE_S4 ((PS_ST_ACTIVE) | (PS_ALL_ON))
+
+#define PS_IS_RF_ON(x) ((x) & (PS_ALL_ON))
+#define PS_IS_ACTIVE(x) ((x) & (PS_ST_ACTIVE))
+#define CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
+
+struct reportpwrstate_parm {
+ unsigned char mode;
+ unsigned char state; /* the CPWM value */
+ unsigned short rsvd;
+};
+
+static inline void _init_pwrlock(struct semaphore *plock)
+{
+ _rtw_init_sema(plock, 1);
+}
+
+static inline void _free_pwrlock(struct semaphore *plock)
+{
+ _rtw_free_sema(plock);
+}
+
+static inline void _enter_pwrlock(struct semaphore *plock)
+{
+ _rtw_down_sema(plock);
+}
+
+static inline void _exit_pwrlock(struct semaphore *plock)
+{
+ _rtw_up_sema(plock);
+}
+
+#define LPS_DELAY_TIME 1*HZ /* 1 sec */
+
+#define EXE_PWR_NONE 0x01
+#define EXE_PWR_IPS 0x02
+#define EXE_PWR_LPS 0x04
+
+/* RF state. */
+enum rt_rf_power_state {
+ rf_on, /* RF is on after RFSleep or RFOff */
+ rf_sleep, /* 802.11 Power Save mode */
+ rf_off, /* HW/SW Radio OFF or Inactive Power Save */
+ /* Add the new RF state above this line===== */
+ rf_max
+};
+
+/* RF Off Level for IPS or HW/SW radio off */
+#define RT_RF_OFF_LEVL_ASPM BIT(0) /* PCI ASPM */
+#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /* PCI clock request */
+#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /* PCI D3 mode */
+#define RT_RF_OFF_LEVL_HALT_NIC BIT(3) /* NIC halt, re-init hw param*/
+#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /* FW free, re-download the FW*/
+#define RT_RF_OFF_LEVL_FW_32K BIT(5) /* FW in 32k */
+#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) /* Always enable ASPM and Clock
+ * Req in initialization. */
+#define RT_RF_LPS_DISALBE_2R BIT(30) /* When LPS is on, disable 2R
+ * if no packet is RX or TX. */
+#define RT_RF_LPS_LEVEL_ASPM BIT(31) /* LPS with ASPM */
+
+#define RT_IN_PS_LEVEL(ppsc, _PS_FLAG) \
+ ((ppsc->cur_ps_level & _PS_FLAG) ? true : false)
+#define RT_CLEAR_PS_LEVEL(ppsc, _PS_FLAG) \
+ (ppsc->cur_ps_level &= (~(_PS_FLAG)))
+#define RT_SET_PS_LEVEL(ppsc, _PS_FLAG) \
+ (ppsc->cur_ps_level |= _PS_FLAG)
+
+enum _PS_BBRegBackup_ {
+ PSBBREG_RF0 = 0,
+ PSBBREG_RF1,
+ PSBBREG_RF2,
+ PSBBREG_AFE0,
+ PSBBREG_TOTALCNT
+};
+
+enum { /* for ips_mode */
+ IPS_NONE = 0,
+ IPS_NORMAL,
+ IPS_LEVEL_2,
+};
+
+struct pwrctrl_priv {
+ struct semaphore lock;
+ volatile u8 rpwm; /* requested power state for fw */
+ volatile u8 cpwm; /* fw current power state. updated when
+ * 1. read from HCPWM 2. driver lowers power level */
+ volatile u8 tog; /* toggling */
+ volatile u8 cpwm_tog; /* toggling */
+
+ u8 pwr_mode;
+ u8 smart_ps;
+ u8 bcn_ant_mode;
+
+ u32 alives;
+ struct work_struct cpwm_event;
+ u8 bpower_saving;
+
+ u8 b_hw_radio_off;
+ u8 reg_rfoff;
+ u8 reg_pdnmode; /* powerdown mode */
+ u32 rfoff_reason;
+
+ /* RF OFF Level */
+ u32 cur_ps_level;
+ u32 reg_rfps_level;
+ uint ips_enter_cnts;
+ uint ips_leave_cnts;
+
+ u8 ips_mode;
+ u8 ips_mode_req; /* used to accept the mode setting request,
+ * will update to ipsmode later */
+ uint bips_processing;
+ u32 ips_deny_time; /* will deny IPS when system time less than this */
+ u8 ps_processing; /* temp used to mark whether in rtw_ps_processor */
+
+ u8 bLeisurePs;
+ u8 LpsIdleCount;
+ u8 power_mgnt;
+ u8 bFwCurrentInPSMode;
+ u32 DelayLPSLastTimeStamp;
+ u8 btcoex_rfon;
+ s32 pnp_current_pwr_state;
+ u8 pnp_bstop_trx;
+
+ u8 bInternalAutoSuspend;
+ u8 bInSuspend;
+#ifdef CONFIG_BT_COEXIST
+ u8 bAutoResume;
+ u8 autopm_cnt;
+#endif
+ u8 bSupportRemoteWakeup;
+ struct timer_list pwr_state_check_timer;
+ int pwr_state_check_interval;
+ u8 pwr_state_check_cnts;
+
+ int ps_flag;
+
+ enum rt_rf_power_state rf_pwrstate;/* cur power state */
+ enum rt_rf_power_state change_rfpwrstate;
+
+ u8 wepkeymask;
+ u8 bHWPowerdown;/* if support hw power down */
+ u8 bHWPwrPindetect;
+ u8 bkeepfwalive;
+ u8 brfoffbyhw;
+ unsigned long PS_BBRegBackup[PSBBREG_TOTALCNT];
+};
+
+#define rtw_get_ips_mode_req(pwrctrlpriv) \
+ (pwrctrlpriv)->ips_mode_req
+
+#define rtw_ips_mode_req(pwrctrlpriv, ips_mode) \
+ ((pwrctrlpriv)->ips_mode_req = (ips_mode))
+
+#define RTW_PWR_STATE_CHK_INTERVAL 2000
+
+#define _rtw_set_pwr_state_check_timer(pwrctrlpriv, ms) \
+ do { \
+ _set_timer(&(pwrctrlpriv)->pwr_state_check_timer, (ms)); \
+ } while (0)
+
+#define rtw_set_pwr_state_check_timer(pwrctrl) \
+ _rtw_set_pwr_state_check_timer((pwrctrl), \
+ (pwrctrl)->pwr_state_check_interval)
+
+void rtw_init_pwrctrl_priv(struct adapter *adapter);
+void rtw_free_pwrctrl_priv(struct adapter *adapter);
+
+void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
+ u8 bcn_ant_mode);
+void rtw_set_rpwm(struct adapter *adapter, u8 val8);
+void LeaveAllPowerSaveMode(struct adapter *adapter);
+void ips_enter(struct adapter *padapter);
+int ips_leave(struct adapter *padapter);
+
+void rtw_ps_processor(struct adapter *padapter);
+
+enum rt_rf_power_state RfOnOffDetect(struct adapter *iadapter);
+
+s32 LPS_RF_ON_check(struct adapter *adapter, u32 delay_ms);
+void LPS_Enter(struct adapter *adapter);
+void LPS_Leave(struct adapter *adapter);
+
+u8 rtw_interface_ps_func(struct adapter *adapter,
+ enum hal_intf_ps_func efunc_id, u8 *val);
+void rtw_set_ips_deny(struct adapter *adapter, u32 ms);
+int _rtw_pwr_wakeup(struct adapter *adapter, u32 ips_defer_ms,
+ const char *caller);
+#define rtw_pwr_wakeup(adapter) \
+ _rtw_pwr_wakeup(adapter, RTW_PWR_STATE_CHK_INTERVAL, __func__)
+#define rtw_pwr_wakeup_ex(adapter, ips_deffer_ms) \
+ _rtw_pwr_wakeup(adapter, ips_deffer_ms, __func__)
+int rtw_pm_set_ips(struct adapter *adapter, u8 mode);
+int rtw_pm_set_lps(struct adapter *adapter, u8 mode);
+
+#endif /* __RTL871X_PWRCTRL_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_qos.h b/drivers/staging/rtl8188eu/include/rtw_qos.h
new file mode 100644
index 0000000..bbee1dd
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_qos.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_QOS_H_
+#define _RTW_QOS_H_
+
+#include <osdep_service.h>
+
+struct qos_priv {
+ unsigned int qos_option; /* bit mask option: u-apsd,
+ * s-apsd, ts, block ack... */
+};
+
+#endif /* _RTL871X_QOS_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
new file mode 100644
index 0000000..bae8885
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -0,0 +1,485 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_RECV_H_
+#define _RTW_RECV_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+#define NR_RECVFRAME 256
+
+#define RXFRAME_ALIGN 8
+#define RXFRAME_ALIGN_SZ (1<<RXFRAME_ALIGN)
+
+#define MAX_RXFRAME_CNT 512
+#define MAX_RX_NUMBLKS (32)
+#define RECVFRAME_HDR_ALIGN 128
+
+#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
+
+#define MAX_SUBFRAME_COUNT 64
+
+/* for Rx reordering buffer control */
+struct recv_reorder_ctrl {
+ struct adapter *padapter;
+ u8 enable;
+ u16 indicate_seq;/* wstart_b, init_value=0xffff */
+ u16 wend_b;
+ u8 wsize_b;
+ struct __queue pending_recvframe_queue;
+ struct timer_list reordering_ctrl_timer;
+};
+
+struct stainfo_rxcache {
+ u16 tid_rxseq[16];
+/*
+ unsigned short tid0_rxseq;
+ unsigned short tid1_rxseq;
+ unsigned short tid2_rxseq;
+ unsigned short tid3_rxseq;
+ unsigned short tid4_rxseq;
+ unsigned short tid5_rxseq;
+ unsigned short tid6_rxseq;
+ unsigned short tid7_rxseq;
+ unsigned short tid8_rxseq;
+ unsigned short tid9_rxseq;
+ unsigned short tid10_rxseq;
+ unsigned short tid11_rxseq;
+ unsigned short tid12_rxseq;
+ unsigned short tid13_rxseq;
+ unsigned short tid14_rxseq;
+ unsigned short tid15_rxseq;
+*/
+};
+
+struct smooth_rssi_data {
+ u32 elements[100]; /* array to store values */
+ u32 index; /* index to current array to store */
+ u32 total_num; /* num of valid elements */
+ u32 total_val; /* sum of valid elements */
+};
+
+struct signal_stat {
+ u8 update_req; /* used to indicate */
+ u8 avg_val; /* avg of valid elements */
+ u32 total_num; /* num of valid elements */
+ u32 total_val; /* sum of valid elements */
+};
+#define MAX_PATH_NUM_92CS 2
+struct phy_info {
+ u8 RxPWDBAll;
+ u8 SignalQuality; /* in 0-100 index. */
+ u8 RxMIMOSignalQuality[MAX_PATH_NUM_92CS]; /* EVM */
+ u8 RxMIMOSignalStrength[MAX_PATH_NUM_92CS];/* in 0~100 index */
+ s8 RxPower; /* in dBm Translate from PWdB */
+/* Real power in dBm for this packet, no beautification and aggregation.
+ * Keep this raw info to be used for the other procedures. */
+ s8 recvpower;
+ u8 BTRxRSSIPercentage;
+ u8 SignalStrength; /* in 0-100 index. */
+ u8 RxPwr[MAX_PATH_NUM_92CS];/* per-path's pwdb */
+ u8 RxSNR[MAX_PATH_NUM_92CS];/* per-path's SNR */
+};
+
+struct rx_pkt_attrib {
+ u16 pkt_len;
+ u8 physt;
+ u8 drvinfo_sz;
+ u8 shift_sz;
+ u8 hdrlen; /* the WLAN Header Len */
+ u8 to_fr_ds;
+ u8 amsdu;
+ u8 qos;
+ u8 priority;
+ u8 pw_save;
+ u8 mdata;
+ u16 seq_num;
+ u8 frag_num;
+ u8 mfrag;
+ u8 order;
+ u8 privacy; /* in frame_ctrl field */
+ u8 bdecrypted;
+ u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
+ * indicate the encrypt algorith */
+ u8 iv_len;
+ u8 icv_len;
+ u8 crc_err;
+ u8 icv_err;
+
+ u16 eth_type;
+
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ u8 ra[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+
+ u8 ack_policy;
+
+ u8 key_index;
+
+ u8 mcs_rate;
+ u8 rxht;
+ u8 sgi;
+ u8 pkt_rpt_type;
+ u32 MacIDValidEntry[2]; /* 64 bits present 64 entry. */
+
+ struct phy_info phy_info;
+};
+
+
+/* These definition is used for Rx packet reordering. */
+#define SN_LESS(a, b) (((a - b) & 0x800) != 0)
+#define SN_EQUAL(a, b) (a == b)
+#define REORDER_WAIT_TIME (50) /* (ms) */
+
+#define RECVBUFF_ALIGN_SZ 8
+
+#define RXDESC_SIZE 24
+#define RXDESC_OFFSET RXDESC_SIZE
+
+struct recv_stat {
+ __le32 rxdw0;
+ __le32 rxdw1;
+ __le32 rxdw2;
+ __le32 rxdw3;
+ __le32 rxdw4;
+ __le32 rxdw5;
+};
+
+#define EOR BIT(30)
+
+/*
+accesser of recv_priv: rtw_recv_entry(dispatch / passive level);
+recv_thread(passive) ; returnpkt(dispatch)
+; halt(passive) ;
+
+using enter_critical section to protect
+*/
+struct recv_priv {
+ spinlock_t lock;
+ struct __queue free_recv_queue;
+ struct __queue recv_pending_queue;
+ struct __queue uc_swdec_pending_queue;
+ u8 *pallocated_frame_buf;
+ u8 *precv_frame_buf;
+ uint free_recvframe_cnt;
+ struct adapter *adapter;
+ u32 bIsAnyNonBEPkts;
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_drop;
+ u64 last_rx_bytes;
+
+ uint rx_icv_err;
+ uint rx_largepacket_crcerr;
+ uint rx_smallpacket_crcerr;
+ uint rx_middlepacket_crcerr;
+ struct semaphore allrxreturnevt;
+ uint ff_hwaddr;
+ u8 rx_pending_cnt;
+
+ struct tasklet_struct irq_prepare_beacon_tasklet;
+ struct tasklet_struct recv_tasklet;
+ struct sk_buff_head free_recv_skb_queue;
+ struct sk_buff_head rx_skb_queue;
+ u8 *pallocated_recv_buf;
+ u8 *precv_buf; /* 4 alignment */
+ struct __queue free_recv_buf_queue;
+ u32 free_recv_buf_queue_cnt;
+ /* For display the phy informatiom */
+ u8 is_signal_dbg; /* for debug */
+ u8 signal_strength_dbg; /* for debug */
+ s8 rssi;
+ s8 rxpwdb;
+ u8 signal_strength;
+ u8 signal_qual;
+ u8 noise;
+ int RxSNRdB[2];
+ s8 RxRssi[2];
+ int FalseAlmCnt_all;
+
+ struct timer_list signal_stat_timer;
+ u32 signal_stat_sampling_interval;
+ struct signal_stat signal_qual_data;
+ struct signal_stat signal_strength_data;
+};
+
+#define rtw_set_signal_stat_timer(recvpriv) \
+ _set_timer(&(recvpriv)->signal_stat_timer, \
+ (recvpriv)->signal_stat_sampling_interval)
+
+struct sta_recv_priv {
+ spinlock_t lock;
+ int option;
+ struct __queue defrag_q; /* keeping the fragment frame until defrag */
+ struct stainfo_rxcache rxcache;
+};
+
+struct recv_buf {
+ struct list_head list;
+ spinlock_t recvbuf_lock;
+ u32 ref_cnt;
+ struct adapter *adapter;
+ u8 *pbuf;
+ u8 *pallocated_buf;
+ u32 len;
+ u8 *phead;
+ u8 *pdata;
+ u8 *ptail;
+ u8 *pend;
+ struct urb *purb;
+ dma_addr_t dma_transfer_addr; /* (in) dma addr for transfer_buffer */
+ u32 alloc_sz;
+ u8 irp_pending;
+ int transfer_len;
+ struct sk_buff *pskb;
+ u8 reuse;
+};
+
+/*
+ head ----->
+
+ data ----->
+
+ payload
+
+ tail ----->
+
+
+ end ----->
+
+ len = (unsigned int )(tail - data);
+
+*/
+struct recv_frame_hdr {
+ struct list_head list;
+ struct sk_buff *pkt;
+ struct sk_buff *pkt_newalloc;
+ struct adapter *adapter;
+ u8 fragcnt;
+ int frame_tag;
+ struct rx_pkt_attrib attrib;
+ uint len;
+ u8 *rx_head;
+ u8 *rx_data;
+ u8 *rx_tail;
+ u8 *rx_end;
+ void *precvbuf;
+ struct sta_info *psta;
+ /* for A-MPDU Rx reordering buffer control */
+ struct recv_reorder_ctrl *preorder_ctrl;
+};
+
+union recv_frame {
+ union {
+ struct list_head list;
+ struct recv_frame_hdr hdr;
+ uint mem[RECVFRAME_HDR_ALIGN>>2];
+ } u;
+};
+
+union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
+union recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
+void rtw_init_recvframe(union recv_frame *precvframe,
+ struct recv_priv *precvpriv);
+int rtw_free_recvframe(union recv_frame *precvframe,
+ struct __queue *pfree_recv_queue);
+#define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
+int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
+int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
+void rtw_free_recvframe_queue(struct __queue *pframequeue,
+ struct __queue *pfree_recv_queue);
+u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
+int rtw_enqueue_recvbuf_to_head(struct recv_buf *buf, struct __queue *queue);
+int rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue);
+struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue);
+
+void rtw_reordering_ctrl_timeout_handler(void *pcontext);
+
+static inline u8 *get_rxmem(union recv_frame *precvframe)
+{
+ /* always return rx_head... */
+ if (precvframe == NULL)
+ return NULL;
+ return precvframe->u.hdr.rx_head;
+}
+
+static inline u8 *get_rx_status(union recv_frame *precvframe)
+{
+ return get_rxmem(precvframe);
+}
+
+static inline u8 *get_recvframe_data(union recv_frame *precvframe)
+{
+ /* always return rx_data */
+ if (precvframe == NULL)
+ return NULL;
+
+ return precvframe->u.hdr.rx_data;
+}
+
+static inline u8 *recvframe_push(union recv_frame *precvframe, int sz)
+{
+ /* append data before rx_data */
+
+ /* add data to the start of recv_frame
+ *
+ * This function extends the used data area of the recv_frame at the buffer
+ * start. rx_data must be still larger than rx_head, after pushing.
+ */
+ if (precvframe == NULL)
+ return NULL;
+ precvframe->u.hdr.rx_data -= sz ;
+ if (precvframe->u.hdr.rx_data < precvframe->u.hdr.rx_head) {
+ precvframe->u.hdr.rx_data += sz;
+ return NULL;
+ }
+ precvframe->u.hdr.len += sz;
+ return precvframe->u.hdr.rx_data;
+}
+
+static inline u8 *recvframe_pull(union recv_frame *precvframe, int sz)
+{
+ /* rx_data += sz; move rx_data sz bytes hereafter */
+
+ /* used for extract sz bytes from rx_data, update rx_data and return
+ * the updated rx_data to the caller */
+
+ if (precvframe == NULL)
+ return NULL;
+ precvframe->u.hdr.rx_data += sz;
+ if (precvframe->u.hdr.rx_data > precvframe->u.hdr.rx_tail) {
+ precvframe->u.hdr.rx_data -= sz;
+ return NULL;
+ }
+ precvframe->u.hdr.len -= sz;
+ return precvframe->u.hdr.rx_data;
+}
+
+static inline u8 *recvframe_put(union recv_frame *precvframe, int sz)
+{
+ /* used for append sz bytes from ptr to rx_tail, update rx_tail
+ * and return the updated rx_tail to the caller */
+ /* after putting, rx_tail must be still larger than rx_end. */
+
+ if (precvframe == NULL)
+ return NULL;
+
+ precvframe->u.hdr.rx_tail += sz;
+
+ if (precvframe->u.hdr.rx_tail > precvframe->u.hdr.rx_end) {
+ precvframe->u.hdr.rx_tail -= sz;
+ return NULL;
+ }
+ precvframe->u.hdr.len += sz;
+ return precvframe->u.hdr.rx_tail;
+}
+
+static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, int sz)
+{
+ /* rmv data from rx_tail (by yitsen) */
+
+ /* used for extract sz bytes from rx_end, update rx_end and return
+ * the updated rx_end to the caller */
+ /* after pulling, rx_end must be still larger than rx_data. */
+
+ if (precvframe == NULL)
+ return NULL;
+ precvframe->u.hdr.rx_tail -= sz;
+ if (precvframe->u.hdr.rx_tail < precvframe->u.hdr.rx_data) {
+ precvframe->u.hdr.rx_tail += sz;
+ return NULL;
+ }
+ precvframe->u.hdr.len -= sz;
+ return precvframe->u.hdr.rx_tail;
+}
+
+static inline unsigned char *get_rxbuf_desc(union recv_frame *precvframe)
+{
+ unsigned char *buf_desc;
+
+ if (precvframe == NULL)
+ return NULL;
+ return buf_desc;
+}
+
+static inline union recv_frame *rxmem_to_recvframe(u8 *rxmem)
+{
+ /* due to the design of 2048 bytes alignment of recv_frame,
+ * we can reference the union recv_frame */
+ /* from any given member of recv_frame. */
+ /* rxmem indicates the any member/address in recv_frame */
+
+ return (union recv_frame *)(((size_t)rxmem >> RXFRAME_ALIGN) << RXFRAME_ALIGN);
+}
+
+static inline union recv_frame *pkt_to_recvframe(struct sk_buff *pkt)
+{
+ u8 *buf_star;
+ union recv_frame *precv_frame;
+ precv_frame = rxmem_to_recvframe((unsigned char *)buf_star);
+
+ return precv_frame;
+}
+
+static inline u8 *pkt_to_recvmem(struct sk_buff *pkt)
+{
+ /* return the rx_head */
+
+ union recv_frame *precv_frame = pkt_to_recvframe(pkt);
+
+ return precv_frame->u.hdr.rx_head;
+}
+
+static inline u8 *pkt_to_recvdata(struct sk_buff *pkt)
+{
+ /* return the rx_data */
+
+ union recv_frame *precv_frame = pkt_to_recvframe(pkt);
+
+ return precv_frame->u.hdr.rx_data;
+}
+
+static inline int get_recvframe_len(union recv_frame *precvframe)
+{
+ return precvframe->u.hdr.len;
+}
+
+static inline s32 translate_percentage_to_dbm(u32 sig_stren_index)
+{
+ s32 power; /* in dBm. */
+
+ /* Translate to dBm (x=0.5y-95). */
+ power = (s32)((sig_stren_index + 1) >> 1);
+ power -= 95;
+
+ return power;
+}
+
+
+struct sta_info;
+
+void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv);
+
+void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
new file mode 100644
index 0000000..089ecee
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -0,0 +1,146 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_RF_H_
+#define __RTW_RF_H_
+
+#include <rtw_cmd.h>
+
+#define OFDM_PHY 1
+#define MIXED_PHY 2
+#define CCK_PHY 3
+
+#define NumRates (13)
+
+/* slot time for 11g */
+#define SHORT_SLOT_TIME 9
+#define NON_SHORT_SLOT_TIME 20
+
+#define RTL8711_RF_MAX_SENS 6
+#define RTL8711_RF_DEF_SENS 4
+
+/* We now define the following channels as the max channels in each
+ * channel plan. */
+/* 2G, total 14 chnls */
+/* {1,2,3,4,5,6,7,8,9,10,11,12,13,14} */
+#define MAX_CHANNEL_NUM_2G 14
+#define MAX_CHANNEL_NUM 14 /* 2.4 GHz only */
+
+#define NUM_REGULATORYS 1
+
+/* Country codes */
+#define USA 0x555320
+#define EUROPE 0x1 /* temp, should be provided later */
+#define JAPAN 0x2 /* temp, should be provided later */
+
+struct regulatory_class {
+ u32 starting_freq; /* MHz, */
+ u8 channel_set[MAX_CHANNEL_NUM];
+ u8 channel_cck_power[MAX_CHANNEL_NUM]; /* dbm */
+ u8 channel_ofdm_power[MAX_CHANNEL_NUM]; /* dbm */
+ u8 txpower_limit; /* dbm */
+ u8 channel_spacing; /* MHz */
+ u8 modem;
+};
+
+enum capability {
+ cESS = 0x0001,
+ cIBSS = 0x0002,
+ cPollable = 0x0004,
+ cPollReq = 0x0008,
+ cPrivacy = 0x0010,
+ cShortPreamble = 0x0020,
+ cPBCC = 0x0040,
+ cChannelAgility = 0x0080,
+ cSpectrumMgnt = 0x0100,
+ cQos = 0x0200, /* For HCCA, use with CF-Pollable
+ * and CF-PollReq */
+ cShortSlotTime = 0x0400,
+ cAPSD = 0x0800,
+ cRM = 0x1000, /* RRM (Radio Request Measurement) */
+ cDSSS_OFDM = 0x2000,
+ cDelayedBA = 0x4000,
+ cImmediateBA = 0x8000,
+};
+
+enum _REG_PREAMBLE_MODE {
+ PREAMBLE_LONG = 1,
+ PREAMBLE_AUTO = 2,
+ PREAMBLE_SHORT = 3,
+};
+
+enum _RTL8712_RF_MIMO_CONFIG_ {
+ RTL8712_RFCONFIG_1T = 0x10,
+ RTL8712_RFCONFIG_2T = 0x20,
+ RTL8712_RFCONFIG_1R = 0x01,
+ RTL8712_RFCONFIG_2R = 0x02,
+ RTL8712_RFCONFIG_1T1R = 0x11,
+ RTL8712_RFCONFIG_1T2R = 0x12,
+ RTL8712_RFCONFIG_TURBO = 0x92,
+ RTL8712_RFCONFIG_2T2R = 0x22
+};
+
+enum rf90_radio_path {
+ RF90_PATH_A = 0, /* Radio Path A */
+ RF90_PATH_B = 1, /* Radio Path B */
+ RF90_PATH_C = 2, /* Radio Path C */
+ RF90_PATH_D = 3 /* Radio Path D */
+};
+
+/* Bandwidth Offset */
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+/* Represent Channel Width in HT Capabilities */
+/* */
+enum ht_channel_width {
+ HT_CHANNEL_WIDTH_20 = 0,
+ HT_CHANNEL_WIDTH_40 = 1,
+ HT_CHANNEL_WIDTH_80 = 2,
+ HT_CHANNEL_WIDTH_160 = 3,
+ HT_CHANNEL_WIDTH_10 = 4,
+};
+
+/* */
+/* Represent Extention Channel Offset in HT Capabilities */
+/* This is available only in 40Mhz mode. */
+/* */
+enum ht_extchnl_offset {
+ HT_EXTCHNL_OFFSET_NO_EXT = 0,
+ HT_EXTCHNL_OFFSET_UPPER = 1,
+ HT_EXTCHNL_OFFSET_NO_DEF = 2,
+ HT_EXTCHNL_OFFSET_LOWER = 3,
+};
+
+/* 2007/11/15 MH Define different RF type. */
+enum rt_rf_type_def {
+ RF_1T2R = 0,
+ RF_2T4R = 1,
+ RF_2T2R = 2,
+ RF_1T1R = 3,
+ RF_2T2R_GREEN = 4,
+ RF_819X_MAX_TYPE = 5,
+};
+
+u32 rtw_ch2freq(u32 ch);
+u32 rtw_freq2ch(u32 freq);
+
+
+#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
new file mode 100644
index 0000000..23c7814
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -0,0 +1,383 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_SECURITY_H_
+#define __RTW_SECURITY_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define _NO_PRIVACY_ 0x0
+#define _WEP40_ 0x1
+#define _TKIP_ 0x2
+#define _TKIP_WTMIC_ 0x3
+#define _AES_ 0x4
+#define _WEP104_ 0x5
+#define _WEP_WPA_MIXED_ 0x07 /* WEP + WPA */
+#define _SMS4_ 0x06
+
+#define is_wep_enc(alg) (((alg) == _WEP40_) || ((alg) == _WEP104_))
+
+#define _WPA_IE_ID_ 0xdd
+#define _WPA2_IE_ID_ 0x30
+
+#define SHA256_MAC_LEN 32
+#define AES_BLOCK_SIZE 16
+#define AES_PRIV_SIZE (4 * 44)
+
+enum {
+ ENCRYP_PROTOCOL_OPENSYS, /* open system */
+ ENCRYP_PROTOCOL_WEP, /* WEP */
+ ENCRYP_PROTOCOL_WPA, /* WPA */
+ ENCRYP_PROTOCOL_WPA2, /* WPA2 */
+ ENCRYP_PROTOCOL_WAPI, /* WAPI: Not support in this version */
+ ENCRYP_PROTOCOL_MAX
+};
+
+
+#ifndef Ndis802_11AuthModeWPA2
+#define Ndis802_11AuthModeWPA2 (Ndis802_11AuthModeWPANone + 1)
+#endif
+
+#ifndef Ndis802_11AuthModeWPA2PSK
+#define Ndis802_11AuthModeWPA2PSK (Ndis802_11AuthModeWPANone + 2)
+#endif
+
+union pn48 {
+ u64 val;
+
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u8 TSC0;
+ u8 TSC1;
+ u8 TSC2;
+ u8 TSC3;
+ u8 TSC4;
+ u8 TSC5;
+ u8 TSC6;
+ u8 TSC7;
+ } _byte_;
+
+#elif defined(__BIG_ENDIAN)
+
+ struct {
+ u8 TSC7;
+ u8 TSC6;
+ u8 TSC5;
+ u8 TSC4;
+ u8 TSC3;
+ u8 TSC2;
+ u8 TSC1;
+ u8 TSC0;
+ } _byte_;
+#endif
+};
+
+union Keytype {
+ u8 skey[16];
+ u32 lkey[4];
+};
+
+struct rt_pmkid_list {
+ u8 bUsed;
+ u8 Bssid[6];
+ u8 PMKID[16];
+ u8 SsidBuf[33];
+ u8 *ssid_octet;
+ u16 ssid_length;
+};
+
+struct security_priv {
+ u32 dot11AuthAlgrthm; /* 802.11 auth, could be open,
+ * shared, 8021x and authswitch */
+ u32 dot11PrivacyAlgrthm; /* This specify the privacy for
+ * shared auth. algorithm. */
+ /* WEP */
+ u32 dot11PrivacyKeyIndex; /* this is only valid for legendary
+ * wep, 0~3 for key id.(tx key index) */
+ union Keytype dot11DefKey[4]; /* this is only valid for def. key */
+ u32 dot11DefKeylen[4];
+ u32 dot118021XGrpPrivacy; /* This specify the privacy algthm.
+ * used for Grp key */
+ u32 dot118021XGrpKeyid; /* key id used for Grp Key
+ * ( tx key index) */
+ union Keytype dot118021XGrpKey[4]; /* 802.1x Group Key,
+ * for inx0 and inx1 */
+ union Keytype dot118021XGrptxmickey[4];
+ union Keytype dot118021XGrprxmickey[4];
+ union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit.*/
+ union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv.*/
+#ifdef CONFIG_88EU_AP_MODE
+ /* extend security capabilities for AP_MODE */
+ unsigned int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
+ unsigned int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
+ unsigned int wpa_group_cipher;
+ unsigned int wpa2_group_cipher;
+ unsigned int wpa_pairwise_cipher;
+ unsigned int wpa2_pairwise_cipher;
+#endif
+ u8 wps_ie[MAX_WPS_IE_LEN];/* added in assoc req */
+ int wps_ie_len;
+ u8 binstallGrpkey;
+ u8 busetkipkey;
+ u8 bcheck_grpkey;
+ u8 bgrpkey_handshake;
+ s32 sw_encrypt;/* from registry_priv */
+ s32 sw_decrypt;/* from registry_priv */
+ s32 hw_decrypted;/* if the rx packets is hw_decrypted==false,i
+ * it means the hw has not been ready. */
+
+ /* keeps the auth_type & enc_status from upper layer
+ * ioctl(wpa_supplicant or wzc) */
+ u32 ndisauthtype; /* NDIS_802_11_AUTHENTICATION_MODE */
+ u32 ndisencryptstatus; /* NDIS_802_11_ENCRYPTION_STATUS */
+ struct wlan_bssid_ex sec_bss; /* for joinbss (h2c buffer) usage */
+ struct ndis_802_11_wep ndiswep;
+ u8 assoc_info[600];
+ u8 szofcapability[256]; /* for wpa2 usage */
+ u8 oidassociation[512]; /* for wpa/wpa2 usage */
+ u8 authenticator_ie[256]; /* store ap security information element */
+ u8 supplicant_ie[256]; /* store sta security information element */
+
+ /* for tkip countermeasure */
+ u32 last_mic_err_time;
+ u8 btkip_countermeasure;
+ u8 btkip_wait_report;
+ u32 btkip_countermeasure_time;
+
+ /* */
+ /* For WPA2 Pre-Authentication. */
+ /* */
+ struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE];
+ u8 PMKIDIndex;
+ u8 bWepDefaultKeyIdxSet;
+};
+
+struct sha256_state {
+ u64 length;
+ u32 state[8], curlen;
+ u8 buf[64];
+};
+
+#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst) \
+do { \
+ switch (psecuritypriv->dot11AuthAlgrthm) { \
+ case dot11AuthAlgrthm_Open: \
+ case dot11AuthAlgrthm_Shared: \
+ case dot11AuthAlgrthm_Auto: \
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm; \
+ break; \
+ case dot11AuthAlgrthm_8021X: \
+ if (bmcst) \
+ encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
+ else \
+ encry_algo = (u8)psta->dot118021XPrivacy; \
+ break; \
+ case dot11AuthAlgrthm_WAPI: \
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm; \
+ break; \
+ } \
+} while (0)
+
+#define SET_ICE_IV_LEN(iv_len, icv_len, encrypt) \
+do { \
+ switch (encrypt) { \
+ case _WEP40_: \
+ case _WEP104_: \
+ iv_len = 4; \
+ icv_len = 4; \
+ break; \
+ case _TKIP_: \
+ iv_len = 8; \
+ icv_len = 4; \
+ break; \
+ case _AES_: \
+ iv_len = 8; \
+ icv_len = 8; \
+ break; \
+ case _SMS4_: \
+ iv_len = 18; \
+ icv_len = 16; \
+ break; \
+ default: \
+ iv_len = 0; \
+ icv_len = 0; \
+ break; \
+ } \
+} while (0)
+
+
+#define GET_TKIP_PN(iv, dot11txpn) \
+do { \
+ dot11txpn._byte_.TSC0 = iv[2]; \
+ dot11txpn._byte_.TSC1 = iv[0]; \
+ dot11txpn._byte_.TSC2 = iv[4]; \
+ dot11txpn._byte_.TSC3 = iv[5]; \
+ dot11txpn._byte_.TSC4 = iv[6]; \
+ dot11txpn._byte_.TSC5 = iv[7]; \
+} while (0)
+
+
+#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
+#define ROR32(A, n) ROL32((A), 32-(n))
+
+struct mic_data {
+ u32 K0, K1; /* Key */
+ u32 L, R; /* Current state */
+ u32 M; /* Message accumulator (single word) */
+ u32 nBytesInM; /* # bytes in M */
+};
+
+extern const u32 Te0[256];
+extern const u32 Te1[256];
+extern const u32 Te2[256];
+extern const u32 Te3[256];
+extern const u32 Te4[256];
+extern const u32 Td0[256];
+extern const u32 Td1[256];
+extern const u32 Td2[256];
+extern const u32 Td3[256];
+extern const u32 Td4[256];
+extern const u32 rcon[10];
+extern const u8 Td4s[256];
+extern const u8 rcons[10];
+
+#define RCON(i) (rcons[(i)] << 24)
+
+static inline u32 rotr(u32 val, int bits)
+{
+ return (val >> bits) | (val << (32 - bits));
+}
+
+#define TE0(i) Te0[((i) >> 24) & 0xff]
+#define TE1(i) rotr(Te0[((i) >> 16) & 0xff], 8)
+#define TE2(i) rotr(Te0[((i) >> 8) & 0xff], 16)
+#define TE3(i) rotr(Te0[(i) & 0xff], 24)
+#define TE41(i) ((Te0[((i) >> 24) & 0xff] << 8) & 0xff000000)
+#define TE42(i) (Te0[((i) >> 16) & 0xff] & 0x00ff0000)
+#define TE43(i) (Te0[((i) >> 8) & 0xff] & 0x0000ff00)
+#define TE44(i) ((Te0[(i) & 0xff] >> 8) & 0x000000ff)
+#define TE421(i) ((Te0[((i) >> 16) & 0xff] << 8) & 0xff000000)
+#define TE432(i) (Te0[((i) >> 8) & 0xff] & 0x00ff0000)
+#define TE443(i) (Te0[(i) & 0xff] & 0x0000ff00)
+#define TE414(i) ((Te0[((i) >> 24) & 0xff] >> 8) & 0x000000ff)
+#define TE4(i) ((Te0[(i)] >> 8) & 0x000000ff)
+
+#define TD0(i) Td0[((i) >> 24) & 0xff]
+#define TD1(i) rotr(Td0[((i) >> 16) & 0xff], 8)
+#define TD2(i) rotr(Td0[((i) >> 8) & 0xff], 16)
+#define TD3(i) rotr(Td0[(i) & 0xff], 24)
+#define TD41(i) (Td4s[((i) >> 24) & 0xff] << 24)
+#define TD42(i) (Td4s[((i) >> 16) & 0xff] << 16)
+#define TD43(i) (Td4s[((i) >> 8) & 0xff] << 8)
+#define TD44(i) (Td4s[(i) & 0xff])
+#define TD0_(i) Td0[(i) & 0xff]
+#define TD1_(i) rotr(Td0[(i) & 0xff], 8)
+#define TD2_(i) rotr(Td0[(i) & 0xff], 16)
+#define TD3_(i) rotr(Td0[(i) & 0xff], 24)
+
+#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ \
+ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3]))
+
+#define PUTU32(ct, st) { \
+(ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); \
+(ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); }
+
+#define WPA_GET_BE32(a) ((((u32)(a)[0]) << 24) | (((u32)(a)[1]) << 16) | \
+ (((u32)(a)[2]) << 8) | ((u32)(a)[3]))
+
+#define WPA_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16)(val)) >> 8; \
+ (a)[0] = ((u16)(val)) & 0xff; \
+ } while (0)
+
+#define WPA_PUT_BE32(a, val) \
+ do { \
+ (a)[0] = (u8)((((u32)(val)) >> 24) & 0xff); \
+ (a)[1] = (u8)((((u32)(val)) >> 16) & 0xff); \
+ (a)[2] = (u8)((((u32)(val)) >> 8) & 0xff); \
+ (a)[3] = (u8)(((u32)(val)) & 0xff); \
+ } while (0)
+
+#define WPA_PUT_BE64(a, val) \
+ do { \
+ (a)[0] = (u8)(((u64)(val)) >> 56); \
+ (a)[1] = (u8)(((u64)(val)) >> 48); \
+ (a)[2] = (u8)(((u64)(val)) >> 40); \
+ (a)[3] = (u8)(((u64)(val)) >> 32); \
+ (a)[4] = (u8)(((u64)(val)) >> 24); \
+ (a)[5] = (u8)(((u64)(val)) >> 16); \
+ (a)[6] = (u8)(((u64)(val)) >> 8); \
+ (a)[7] = (u8)(((u64)(val)) & 0xff); \
+ } while (0)
+
+/* ===== start - public domain SHA256 implementation ===== */
+
+/* This is based on SHA256 implementation in LibTomCrypt that was released into
+ * public domain by Tom St Denis. */
+
+/* the K array */
+static const unsigned long K[64] = {
+ 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL,
+ 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL,
+ 0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL,
+ 0xc19bf174UL, 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+ 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 0x983e5152UL,
+ 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 0xc6e00bf3UL, 0xd5a79147UL,
+ 0x06ca6351UL, 0x14292967UL, 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL,
+ 0x53380d13UL, 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+ 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 0xd192e819UL,
+ 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 0x19a4c116UL, 0x1e376c08UL,
+ 0x2748774cUL, 0x34b0bcb5UL, 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL,
+ 0x682e6ff3UL, 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+ 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
+};
+
+/* Various logical functions */
+#define RORc(x, y) \
+ (((((unsigned long)(x) & 0xFFFFFFFFUL) >> (unsigned long)((y)&31)) | \
+ ((unsigned long)(x) << (unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL)
+#define Ch(x, y ,z) (z ^ (x & (y ^ z)))
+#define Maj(x, y, z) (((x | y) & z) | (x & y))
+#define S(x, n) RORc((x), (n))
+#define R(x, n) (((x)&0xFFFFFFFFUL)>>(n))
+#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
+#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
+#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
+#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+
+void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key);
+void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b);
+void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nBytes);
+void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst);
+void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len,
+ u8 *Miccode, u8 priority);
+u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe);
+u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe);
+void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe);
+u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe);
+u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe);
+void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe);
+void rtw_use_tkipkey_handler(void *FunctionContext);
+
+#endif /* __RTL871X_SECURITY_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_sreset.h b/drivers/staging/rtl8188eu/include/rtw_sreset.h
new file mode 100644
index 0000000..2a1244f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_sreset.h
@@ -0,0 +1,50 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_SRESET_C_
+#define _RTW_SRESET_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+struct sreset_priv {
+ struct mutex silentreset_mutex;
+ u8 silent_reset_inprogress;
+ u8 Wifi_Error_Status;
+ unsigned long last_tx_time;
+ unsigned long last_tx_complete_time;
+};
+
+#include <rtl8188e_hal.h>
+
+#define WIFI_STATUS_SUCCESS 0
+#define USB_VEN_REQ_CMD_FAIL BIT0
+#define USB_READ_PORT_FAIL BIT1
+#define USB_WRITE_PORT_FAIL BIT2
+#define WIFI_MAC_TXDMA_ERROR BIT3
+#define WIFI_TX_HANG BIT4
+#define WIFI_RX_HANG BIT5
+#define WIFI_IF_NOT_EXIST BIT6
+
+void sreset_init_value(struct adapter *padapter);
+void sreset_reset_value(struct adapter *padapter);
+u8 sreset_get_wifi_status(struct adapter *padapter);
+void sreset_set_wifi_error_status(struct adapter *padapter, u32 status);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_version.h b/drivers/staging/rtl8188eu/include/rtw_version.h
new file mode 100644
index 0000000..6d2d52c
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_version.h
@@ -0,0 +1 @@
+#define DRIVERVERSION "v4.1.4_6773.20130222"
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
new file mode 100644
index 0000000..1ac1dd3
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -0,0 +1,384 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_XMIT_H_
+#define _RTW_XMIT_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define MAX_XMITBUF_SZ (20480) /* 20k */
+#define NR_XMITBUFF (4)
+
+#define XMITBUF_ALIGN_SZ 4
+
+/* xmit extension buff defination */
+#define MAX_XMIT_EXTBUF_SZ (1536)
+#define NR_XMIT_EXTBUFF (32)
+
+#define MAX_NUMBLKS (1)
+
+#define XMIT_VO_QUEUE (0)
+#define XMIT_VI_QUEUE (1)
+#define XMIT_BE_QUEUE (2)
+#define XMIT_BK_QUEUE (3)
+
+#define VO_QUEUE_INX 0
+#define VI_QUEUE_INX 1
+#define BE_QUEUE_INX 2
+#define BK_QUEUE_INX 3
+#define BCN_QUEUE_INX 4
+#define MGT_QUEUE_INX 5
+#define HIGH_QUEUE_INX 6
+#define TXCMD_QUEUE_INX 7
+
+#define HW_QUEUE_ENTRY 8
+
+#define WEP_IV(pattrib_iv, dot11txpn, keyidx)\
+do {\
+ pattrib_iv[0] = dot11txpn._byte_.TSC0;\
+ pattrib_iv[1] = dot11txpn._byte_.TSC1;\
+ pattrib_iv[2] = dot11txpn._byte_.TSC2;\
+ pattrib_iv[3] = ((keyidx & 0x3)<<6);\
+ dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0 : (dot11txpn.val+1);\
+} while (0)
+
+
+#define TKIP_IV(pattrib_iv, dot11txpn, keyidx)\
+do {\
+ pattrib_iv[0] = dot11txpn._byte_.TSC1;\
+ pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f;\
+ pattrib_iv[2] = dot11txpn._byte_.TSC0;\
+ pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\
+ pattrib_iv[4] = dot11txpn._byte_.TSC2;\
+ pattrib_iv[5] = dot11txpn._byte_.TSC3;\
+ pattrib_iv[6] = dot11txpn._byte_.TSC4;\
+ pattrib_iv[7] = dot11txpn._byte_.TSC5;\
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val+1);\
+} while (0)
+
+#define AES_IV(pattrib_iv, dot11txpn, keyidx)\
+do { \
+ pattrib_iv[0] = dot11txpn._byte_.TSC0; \
+ pattrib_iv[1] = dot11txpn._byte_.TSC1; \
+ pattrib_iv[2] = 0; \
+ pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6); \
+ pattrib_iv[4] = dot11txpn._byte_.TSC2; \
+ pattrib_iv[5] = dot11txpn._byte_.TSC3; \
+ pattrib_iv[6] = dot11txpn._byte_.TSC4; \
+ pattrib_iv[7] = dot11txpn._byte_.TSC5; \
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val+1);\
+} while (0)
+
+#define HWXMIT_ENTRY 4
+
+#define TXDESC_SIZE 32
+
+#define PACKET_OFFSET_SZ (8)
+#define TXDESC_OFFSET (TXDESC_SIZE + PACKET_OFFSET_SZ)
+
+struct tx_desc {
+ /* DWORD 0 */
+ __le32 txdw0;
+ __le32 txdw1;
+ __le32 txdw2;
+ __le32 txdw3;
+ __le32 txdw4;
+ __le32 txdw5;
+ __le32 txdw6;
+ __le32 txdw7;
+};
+
+union txdesc {
+ struct tx_desc txdesc;
+ unsigned int value[TXDESC_SIZE>>2];
+};
+
+struct hw_xmit {
+ struct __queue *sta_queue;
+ int accnt;
+};
+
+/* reduce size */
+struct pkt_attrib {
+ u8 type;
+ u8 subtype;
+ u8 bswenc;
+ u8 dhcp_pkt;
+ u16 ether_type;
+ u16 seqnum;
+ u16 pkt_hdrlen; /* the original 802.3 pkt header len */
+ u16 hdrlen; /* the WLAN Header Len */
+ u32 pktlen; /* the original 802.3 pkt raw_data len (not include
+ * ether_hdr data) */
+ u32 last_txcmdsz;
+ u8 nr_frags;
+ u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
+ * indicate the encrypt algorith */
+ u8 iv_len;
+ u8 icv_len;
+ u8 iv[18];
+ u8 icv[16];
+ u8 priority;
+ u8 ack_policy;
+ u8 mac_id;
+ u8 vcs_mode; /* virtual carrier sense method */
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ u8 ra[ETH_ALEN];
+ u8 key_idx;
+ u8 qos_en;
+ u8 ht_en;
+ u8 raid;/* rate adpative id */
+ u8 bwmode;
+ u8 ch_offset;/* PRIME_CHNL_OFFSET */
+ u8 sgi;/* short GI */
+ u8 ampdu_en;/* tx ampdu enable */
+ u8 mdata;/* more data bit */
+ u8 pctrl;/* per packet txdesc control enable */
+ u8 triggered;/* for ap mode handling Power Saving sta */
+ u8 qsel;
+ u8 eosp;
+ u8 rate;
+ u8 intel_proxim;
+ u8 retry_ctrl;
+ struct sta_info *psta;
+};
+
+#define WLANHDR_OFFSET 64
+
+#define NULL_FRAMETAG (0x0)
+#define DATA_FRAMETAG 0x01
+#define L2_FRAMETAG 0x02
+#define MGNT_FRAMETAG 0x03
+#define AMSDU_FRAMETAG 0x04
+
+#define EII_FRAMETAG 0x05
+#define IEEE8023_FRAMETAG 0x06
+
+#define MP_FRAMETAG 0x07
+
+#define TXAGG_FRAMETAG 0x08
+
+struct submit_ctx {
+ u32 submit_time; /* */
+ u32 timeout_ms; /* <0: not synchronous, 0: wait forever, >0: up to ms waiting */
+ int status; /* status for operation */
+ struct completion done;
+};
+
+enum {
+ RTW_SCTX_SUBMITTED = -1,
+ RTW_SCTX_DONE_SUCCESS = 0,
+ RTW_SCTX_DONE_UNKNOWN,
+ RTW_SCTX_DONE_TIMEOUT,
+ RTW_SCTX_DONE_BUF_ALLOC,
+ RTW_SCTX_DONE_BUF_FREE,
+ RTW_SCTX_DONE_WRITE_PORT_ERR,
+ RTW_SCTX_DONE_TX_DESC_NA,
+ RTW_SCTX_DONE_TX_DENY,
+ RTW_SCTX_DONE_CCX_PKT_FAIL,
+ RTW_SCTX_DONE_DRV_STOP,
+ RTW_SCTX_DONE_DEV_REMOVE,
+};
+
+void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
+int rtw_sctx_wait(struct submit_ctx *sctx);
+void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
+void rtw_sctx_done(struct submit_ctx **sctx);
+
+struct xmit_buf {
+ struct list_head list;
+ struct adapter *padapter;
+ u8 *pallocated_buf;
+ u8 *pbuf;
+ void *priv_data;
+ u16 ext_tag; /* 0: Normal xmitbuf, 1: extension xmitbuf. */
+ u16 flags;
+ u32 alloc_sz;
+ u32 len;
+ struct submit_ctx *sctx;
+ u32 ff_hwaddr;
+ struct urb *pxmit_urb[8];
+ dma_addr_t dma_transfer_addr; /* (in) dma addr for transfer_buffer */
+ u8 bpending[8];
+ int last[8];
+};
+
+struct xmit_frame {
+ struct list_head list;
+ struct pkt_attrib attrib;
+ struct sk_buff *pkt;
+ int frame_tag;
+ struct adapter *padapter;
+ u8 *buf_addr;
+ struct xmit_buf *pxmitbuf;
+
+ u8 agg_num;
+ s8 pkt_offset;
+ u8 ack_report;
+};
+
+struct tx_servq {
+ struct list_head tx_pending;
+ struct __queue sta_pending;
+ int qcnt;
+};
+
+struct sta_xmit_priv {
+ spinlock_t lock;
+ int option;
+ int apsd_setting; /* When bit mask is on, the associated edca
+ * queue supports APSD. */
+ struct tx_servq be_q; /* priority == 0,3 */
+ struct tx_servq bk_q; /* priority == 1,2 */
+ struct tx_servq vi_q; /* priority == 4,5 */
+ struct tx_servq vo_q; /* priority == 6,7 */
+ struct list_head legacy_dz;
+ struct list_head apsd;
+ u16 txseq_tid[16];
+};
+
+struct hw_txqueue {
+ volatile int head;
+ volatile int tail;
+ volatile int free_sz; /* in units of 64 bytes */
+ volatile int free_cmdsz;
+ volatile int txsz[8];
+ uint ff_hwaddr;
+ uint cmd_hwaddr;
+ int ac_tag;
+};
+
+struct agg_pkt_info {
+ u16 offset;
+ u16 pkt_len;
+};
+
+struct xmit_priv {
+ spinlock_t lock;
+ struct semaphore xmit_sema;
+ struct semaphore terminate_xmitthread_sema;
+ struct __queue be_pending;
+ struct __queue bk_pending;
+ struct __queue vi_pending;
+ struct __queue vo_pending;
+ struct __queue bm_pending;
+ u8 *pallocated_frame_buf;
+ u8 *pxmit_frame_buf;
+ uint free_xmitframe_cnt;
+ struct __queue free_xmit_queue;
+ uint frag_len;
+ struct adapter *adapter;
+ u8 vcs_setting;
+ u8 vcs;
+ u8 vcs_type;
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_drop;
+ u64 last_tx_bytes;
+ u64 last_tx_pkts;
+ struct hw_xmit *hwxmits;
+ u8 hwxmit_entry;
+ u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength
+ * from large to small. it's value is 0->vo,
+ * 1->vi, 2->be, 3->bk. */
+ struct semaphore tx_retevt;/* all tx return event; */
+ u8 txirp_cnt;/* */
+ struct tasklet_struct xmit_tasklet;
+ /* per AC pending irp */
+ int beq_cnt;
+ int bkq_cnt;
+ int viq_cnt;
+ int voq_cnt;
+ struct __queue free_xmitbuf_queue;
+ struct __queue pending_xmitbuf_queue;
+ u8 *pallocated_xmitbuf;
+ u8 *pxmitbuf;
+ uint free_xmitbuf_cnt;
+ struct __queue free_xmit_extbuf_queue;
+ u8 *pallocated_xmit_extbuf;
+ u8 *pxmit_extbuf;
+ uint free_xmit_extbuf_cnt;
+ u16 nqos_ssn;
+ int ack_tx;
+ struct mutex ack_tx_mutex;
+ struct submit_ctx ack_tx_ops;
+};
+
+struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv);
+s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf);
+struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
+s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf);
+void rtw_count_tx_stats(struct adapter *padapter,
+ struct xmit_frame *pxmitframe, int sz);
+void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len);
+s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr,
+ struct pkt_attrib *pattrib);
+s32 rtw_put_snap(u8 *data, u16 h_proto);
+
+struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv);
+s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv,
+ struct xmit_frame *pxmitframe);
+void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv,
+ struct __queue *pframequeue);
+struct tx_servq *rtw_get_sta_pending(struct adapter *padapter,
+ struct sta_info *psta, int up, u8 *ac);
+s32 rtw_xmitframe_enqueue(struct adapter *padapter,
+ struct xmit_frame *pxmitframe);
+struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv,
+ struct hw_xmit *phwxmit_i, int entry);
+
+s32 rtw_xmit_classifier(struct adapter *padapter,
+ struct xmit_frame *pxmitframe);
+u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib);
+#define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue(&f->attrib)
+s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt,
+ struct xmit_frame *pxmitframe);
+s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
+void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv);
+s32 rtw_txframes_pending(struct adapter *padapter);
+s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
+ struct pkt_attrib *pattrib);
+void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
+s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
+void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
+void rtw_alloc_hwxmits(struct adapter *padapter);
+void rtw_free_hwxmits(struct adapter *padapter);
+s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
+
+#if defined(CONFIG_88EU_AP_MODE)
+int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe);
+void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta);
+void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta);
+void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta);
+#endif
+
+u8 qos_acm(u8 acm_mask, u8 priority);
+u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe);
+int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms);
+void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status);
+
+/* include after declaring struct xmit_buf, in order to avoid warning */
+#include <xmit_osdep.h>
+
+#endif /* _RTL871X_XMIT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
new file mode 100644
index 0000000..3ed2a39
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -0,0 +1,384 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __STA_INFO_H_
+#define __STA_INFO_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+
+#define IBSS_START_MAC_ID 2
+#define NUM_STA 32
+#define NUM_ACL 16
+
+/* if mode ==0, then the sta is allowed once the addr is hit. */
+/* if mode ==1, then the sta is rejected once the addr is non-hit. */
+struct rtw_wlan_acl_node {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 valid;
+};
+
+/* mode=0, disable */
+/* mode=1, accept unless in deny list */
+/* mode=2, deny unless in accept list */
+struct wlan_acl_pool {
+ int mode;
+ int num;
+ struct rtw_wlan_acl_node aclnode[NUM_ACL];
+ struct __queue acl_node_q;
+};
+
+struct rssi_sta {
+ s32 UndecoratedSmoothedPWDB;
+ s32 UndecoratedSmoothedCCK;
+ s32 UndecoratedSmoothedOFDM;
+ u64 PacketMap;
+ u8 ValidBit;
+};
+
+struct stainfo_stats {
+ u64 rx_mgnt_pkts;
+ u64 rx_beacon_pkts;
+ u64 rx_probereq_pkts;
+ u64 rx_probersp_pkts;
+ u64 rx_probersp_bm_pkts;
+ u64 rx_probersp_uo_pkts;
+ u64 rx_ctrl_pkts;
+ u64 rx_data_pkts;
+
+ u64 last_rx_mgnt_pkts;
+ u64 last_rx_beacon_pkts;
+ u64 last_rx_probereq_pkts;
+ u64 last_rx_probersp_pkts;
+ u64 last_rx_probersp_bm_pkts;
+ u64 last_rx_probersp_uo_pkts;
+ u64 last_rx_ctrl_pkts;
+ u64 last_rx_data_pkts;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+
+struct sta_info {
+ spinlock_t lock;
+ struct list_head list; /* free_sta_queue */
+ struct list_head hash_list; /* sta_hash */
+
+ struct sta_xmit_priv sta_xmitpriv;
+ struct sta_recv_priv sta_recvpriv;
+
+ struct __queue sleep_q;
+ unsigned int sleepq_len;
+
+ uint state;
+ uint aid;
+ uint mac_id;
+ uint qos_option;
+ u8 hwaddr[ETH_ALEN];
+
+ uint ieee8021x_blocked; /* 0: allowed, 1:blocked */
+ uint dot118021XPrivacy; /* aes, tkip... */
+ union Keytype dot11tkiptxmickey;
+ union Keytype dot11tkiprxmickey;
+ union Keytype dot118021x_UncstKey;
+ union pn48 dot11txpn; /* PN48 used for Unicast xmit. */
+ union pn48 dot11rxpn; /* PN48 used for Unicast recv. */
+ u8 bssrateset[16];
+ u32 bssratelen;
+ s32 rssi;
+ s32 signal_quality;
+
+ u8 cts2self;
+ u8 rtsen;
+
+ u8 raid;
+ u8 init_rate;
+ u32 ra_mask;
+ u8 wireless_mode; /* NETWORK_TYPE */
+ struct stainfo_stats sta_stats;
+
+ /* for A-MPDU TX, ADDBA timeout check */
+ struct timer_list addba_retry_timer;
+
+ /* for A-MPDU Rx reordering buffer control */
+ struct recv_reorder_ctrl recvreorder_ctrl[16];
+
+ /* for A-MPDU Tx */
+ /* unsigned char ampdu_txen_bitmap; */
+ u16 BA_starting_seqctrl[16];
+
+ struct ht_priv htpriv;
+
+ /* Notes: */
+ /* STA_Mode: */
+ /* curr_network(mlme_priv/security_priv/qos/ht) +
+ * sta_info: (STA & AP) CAP/INFO */
+ /* scan_q: AP CAP/INFO */
+
+ /* AP_Mode: */
+ /* curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO */
+ /* sta_info: (AP & STA) CAP/INFO */
+
+ struct list_head asoc_list;
+#ifdef CONFIG_88EU_AP_MODE
+ struct list_head auth_list;
+
+ unsigned int expire_to;
+ unsigned int auth_seq;
+ unsigned int authalg;
+ unsigned char chg_txt[128];
+
+ u16 capability;
+ int flags;
+
+ int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
+ int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
+ int wpa_group_cipher;
+ int wpa2_group_cipher;
+ int wpa_pairwise_cipher;
+ int wpa2_pairwise_cipher;
+
+ u8 bpairwise_key_installed;
+
+ u8 wpa_ie[32];
+
+ u8 nonerp_set;
+ u8 no_short_slot_time_set;
+ u8 no_short_preamble_set;
+ u8 no_ht_gf_set;
+ u8 no_ht_set;
+ u8 ht_20mhz_set;
+
+ unsigned int tx_ra_bitmap;
+ u8 qos_info;
+
+ u8 max_sp_len;
+ u8 uapsd_bk;/* BIT(0): Delivery enabled, BIT(1): Trigger enabled */
+ u8 uapsd_be;
+ u8 uapsd_vi;
+ u8 uapsd_vo;
+
+ u8 has_legacy_ac;
+ unsigned int sleepq_ac_len;
+#endif /* CONFIG_88EU_AP_MODE */
+
+#ifdef CONFIG_88EU_P2P
+ /* p2p priv data */
+ u8 is_p2p_device;
+ u8 p2p_status_code;
+
+ /* p2p client info */
+ u8 dev_addr[ETH_ALEN];
+ u8 dev_cap;
+ u16 config_methods;
+ u8 primary_dev_type[8];
+ u8 num_of_secdev_type;
+ u8 secdev_types_list[32];/* 32/8 == 4; */
+ u16 dev_name_len;
+ u8 dev_name[32];
+#endif /* CONFIG_88EU_P2P */
+ u8 under_exist_checking;
+ u8 keep_alive_trycnt;
+
+ /* for DM */
+ struct rssi_sta rssi_stat;
+
+ /* ================ODM Relative Info======================= */
+ /* Please be careful, don't declare too much structure here.
+ * It will cost memory * STA support num. */
+ /* 2011/10/20 MH Add for ODM STA info. */
+ /* Driver Write */
+ u8 bValid; /* record the sta status link or not? */
+ u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
+ u8 rssi_level; /* for Refresh RA mask */
+ /* ODM Write */
+ /* 1 PHY_STATUS_INFO */
+ u8 RSSI_Path[4]; /* */
+ u8 RSSI_Ave;
+ u8 RXEVM[4];
+ u8 RXSNR[4];
+
+ /* ================ODM Relative Info======================= */
+ /* */
+
+ /* To store the sequence number of received management frame */
+ u16 RxMgmtFrameSeqNum;
+};
+
+#define sta_rx_pkts(sta) \
+ (sta->sta_stats.rx_mgnt_pkts \
+ + sta->sta_stats.rx_ctrl_pkts \
+ + sta->sta_stats.rx_data_pkts)
+
+#define sta_last_rx_pkts(sta) \
+ (sta->sta_stats.last_rx_mgnt_pkts \
+ + sta->sta_stats.last_rx_ctrl_pkts \
+ + sta->sta_stats.last_rx_data_pkts)
+
+#define sta_rx_data_pkts(sta) \
+ (sta->sta_stats.rx_data_pkts)
+
+#define sta_last_rx_data_pkts(sta) \
+ (sta->sta_stats.last_rx_data_pkts)
+
+#define sta_rx_mgnt_pkts(sta) \
+ (sta->sta_stats.rx_mgnt_pkts)
+
+#define sta_last_rx_mgnt_pkts(sta) \
+ (sta->sta_stats.last_rx_mgnt_pkts)
+
+#define sta_rx_beacon_pkts(sta) \
+ (sta->sta_stats.rx_beacon_pkts)
+
+#define sta_last_rx_beacon_pkts(sta) \
+ (sta->sta_stats.last_rx_beacon_pkts)
+
+#define sta_rx_probereq_pkts(sta) \
+ (sta->sta_stats.rx_probereq_pkts)
+
+#define sta_last_rx_probereq_pkts(sta) \
+ (sta->sta_stats.last_rx_probereq_pkts)
+
+#define sta_rx_probersp_pkts(sta) \
+ (sta->sta_stats.rx_probersp_pkts)
+
+#define sta_last_rx_probersp_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_pkts)
+
+#define sta_rx_probersp_bm_pkts(sta) \
+ (sta->sta_stats.rx_probersp_bm_pkts)
+
+#define sta_last_rx_probersp_bm_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_bm_pkts)
+
+#define sta_rx_probersp_uo_pkts(sta) \
+ (sta->sta_stats.rx_probersp_uo_pkts)
+
+#define sta_last_rx_probersp_uo_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_uo_pkts)
+
+#define sta_update_last_rx_pkts(sta) \
+do { \
+ sta->sta_stats.last_rx_mgnt_pkts = sta->sta_stats.rx_mgnt_pkts; \
+ sta->sta_stats.last_rx_beacon_pkts = sta->sta_stats.rx_beacon_pkts; \
+ sta->sta_stats.last_rx_probereq_pkts = sta->sta_stats.rx_probereq_pkts; \
+ sta->sta_stats.last_rx_probersp_pkts = sta->sta_stats.rx_probersp_pkts; \
+ sta->sta_stats.last_rx_probersp_bm_pkts = sta->sta_stats.rx_probersp_bm_pkts; \
+ sta->sta_stats.last_rx_probersp_uo_pkts = sta->sta_stats.rx_probersp_uo_pkts; \
+ sta->sta_stats.last_rx_ctrl_pkts = sta->sta_stats.rx_ctrl_pkts; \
+ sta->sta_stats.last_rx_data_pkts = sta->sta_stats.rx_data_pkts; \
+} while (0)
+
+#define STA_RX_PKTS_ARG(sta) \
+ sta->sta_stats.rx_mgnt_pkts \
+ , sta->sta_stats.rx_ctrl_pkts \
+ , sta->sta_stats.rx_data_pkts
+
+#define STA_LAST_RX_PKTS_ARG(sta) \
+ sta->sta_stats.last_rx_mgnt_pkts \
+ , sta->sta_stats.last_rx_ctrl_pkts \
+ , sta->sta_stats.last_rx_data_pkts
+
+#define STA_RX_PKTS_DIFF_ARG(sta) \
+ sta->sta_stats.rx_mgnt_pkts - sta->sta_stats.last_rx_mgnt_pkts \
+ , sta->sta_stats.rx_ctrl_pkts - sta->sta_stats.last_rx_ctrl_pkts \
+ , sta->sta_stats.rx_data_pkts - sta->sta_stats.last_rx_data_pkts
+
+#define STA_PKTS_FMT "(m:%llu, c:%llu, d:%llu)"
+
+struct sta_priv {
+ u8 *pallocated_stainfo_buf;
+ u8 *pstainfo_buf;
+ struct __queue free_sta_queue;
+
+ spinlock_t sta_hash_lock;
+ struct list_head sta_hash[NUM_STA];
+ int asoc_sta_count;
+ struct __queue sleep_q;
+ struct __queue wakeup_q;
+
+ struct adapter *padapter;
+
+ spinlock_t asoc_list_lock;
+ struct list_head asoc_list;
+
+#ifdef CONFIG_88EU_AP_MODE
+ struct list_head auth_list;
+ spinlock_t auth_list_lock;
+ u8 asoc_list_cnt;
+ u8 auth_list_cnt;
+
+ unsigned int auth_to; /* sec, time to expire in authenticating. */
+ unsigned int assoc_to; /* sec, time to expire before associating. */
+ unsigned int expire_to; /* sec , time to expire after associated. */
+
+ /* pointers to STA info; based on allocated AID or NULL if AID free
+ * AID is in the range 1-2007, so sta_aid[0] corresponders to AID 1
+ * and so on
+ */
+ struct sta_info *sta_aid[NUM_STA];
+
+ u16 sta_dz_bitmap;/* only support 15 stations, staion aid bitmap
+ * for sleeping sta. */
+ u16 tim_bitmap; /* only support 15 stations, aid=0~15 mapping
+ * bit0~bit15 */
+
+ u16 max_num_sta;
+
+ struct wlan_acl_pool acl_list;
+#endif
+
+};
+
+static inline u32 wifi_mac_hash(u8 *mac)
+{
+ u32 x;
+
+ x = mac[0];
+ x = (x << 2) ^ mac[1];
+ x = (x << 2) ^ mac[2];
+ x = (x << 2) ^ mac[3];
+ x = (x << 2) ^ mac[4];
+ x = (x << 2) ^ mac[5];
+
+ x ^= x >> 8;
+ x = x & (NUM_STA - 1);
+ return x;
+}
+
+extern u32 _rtw_init_sta_priv(struct sta_priv *pstapriv);
+extern u32 _rtw_free_sta_priv(struct sta_priv *pstapriv);
+
+#define stainfo_offset_valid(offset) (offset < NUM_STA && offset >= 0)
+int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta);
+struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int off);
+
+extern struct sta_info *rtw_alloc_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
+extern u32 rtw_free_stainfo(struct adapter *adapt, struct sta_info *psta);
+extern void rtw_free_all_stainfo(struct adapter *adapt);
+extern struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
+extern u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
+extern struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
+extern u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
+
+#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/usb_hal.h b/drivers/staging/rtl8188eu/include/usb_hal.h
new file mode 100644
index 0000000..8a65995
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_hal.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __USB_HAL_H__
+#define __USB_HAL_H__
+
+void rtl8188eu_set_hal_ops(struct adapter *padapter);
+#define hal_set_hal_ops rtl8188eu_set_hal_ops
+
+#endif /* __USB_HAL_H__ */
diff --git a/drivers/staging/rtl8188eu/include/usb_ops.h b/drivers/staging/rtl8188eu/include/usb_ops.h
new file mode 100644
index 0000000..df34237
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_ops.h
@@ -0,0 +1,115 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __USB_OPS_H_
+#define __USB_OPS_H_
+
+#include <linux/version.h>
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <osdep_intf.h>
+
+#define REALTEK_USB_VENQT_READ 0xC0
+#define REALTEK_USB_VENQT_WRITE 0x40
+#define REALTEK_USB_VENQT_CMD_REQ 0x05
+#define REALTEK_USB_VENQT_CMD_IDX 0x00
+
+enum{
+ VENDOR_WRITE = 0x00,
+ VENDOR_READ = 0x01,
+};
+#define ALIGNMENT_UNIT 16
+#define MAX_VENDOR_REQ_CMD_SIZE 254 /* 8188cu SIE Support */
+#define MAX_USB_IO_CTL_SIZE (MAX_VENDOR_REQ_CMD_SIZE + ALIGNMENT_UNIT)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12))
+#define rtw_usb_control_msg(dev, pipe, request, requesttype, \
+ value, index, data, size, timeout_ms) \
+ usb_control_msg((dev), (pipe), (request), (requesttype), (value),\
+ (index), (data), (size), (timeout_ms))
+#define rtw_usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout_ms) \
+ usb_bulk_msg((usb_dev), (pipe), (data), (len), \
+ (actual_length), (timeout_ms))
+#else
+#define rtw_usb_control_msg(dev, pipe, request, requesttype, \
+ value, index, data, size, timeout_ms) \
+ usb_control_msg((dev), (pipe), (request), (requesttype), \
+ (value), (index), (data), (size), \
+ ((timeout_ms) == 0) || \
+ ((timeout_ms)*HZ/1000 > 0) ? \
+ ((timeout_ms)*HZ/1000) : 1)
+#define rtw_usb_bulk_msg(usb_dev, pipe, data, len, \
+ actual_length, timeout_ms) \
+ usb_bulk_msg((usb_dev), (pipe), (data), (len), (actual_length), \
+ ((timeout_ms) == 0) || ((timeout_ms)*HZ/1000 > 0) ?\
+ ((timeout_ms)*HZ/1000) : 1)
+#endif
+#include <usb_ops_linux.h>
+
+void rtl8188eu_set_hw_type(struct adapter *padapter);
+#define hal_set_hw_type rtl8188eu_set_hw_type
+void rtl8188eu_set_intf_ops(struct _io_ops *pops);
+#define usb_set_intf_ops rtl8188eu_set_intf_ops
+
+/*
+ * Increase and check if the continual_urb_error of this @param dvobjprivei
+ * is larger than MAX_CONTINUAL_URB_ERR
+ * @return true:
+ * @return false:
+ */
+static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+{
+ int ret = false;
+ int value;
+ value = ATOMIC_INC_RETURN(&dvobj->continual_urb_error);
+ if (value > MAX_CONTINUAL_URB_ERR) {
+ DBG_88E("[dvobj:%p][ERROR] continual_urb_error:%d > %d\n",
+ dvobj, value, MAX_CONTINUAL_URB_ERR);
+ ret = true;
+ }
+ return ret;
+}
+
+/*
+* Set the continual_urb_error of this @param dvobjprive to 0
+*/
+static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
+{
+ ATOMIC_SET(&dvobj->continual_urb_error, 0);
+}
+
+#define USB_HIGH_SPEED_BULK_SIZE 512
+#define USB_FULL_SPEED_BULK_SIZE 64
+
+static inline u8 rtw_usb_bulk_size_boundary(struct adapter *padapter,
+ int buf_len)
+{
+ u8 rst = true;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+
+ if (pdvobjpriv->ishighspeed)
+ rst = (0 == (buf_len) % USB_HIGH_SPEED_BULK_SIZE) ?
+ true : false;
+ else
+ rst = (0 == (buf_len) % USB_FULL_SPEED_BULK_SIZE) ?
+ true : false;
+ return rst;
+}
+
+#endif /* __USB_OPS_H_ */
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
new file mode 100644
index 0000000..e5b758a
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __USB_OPS_LINUX_H__
+#define __USB_OPS_LINUX_H__
+
+#define VENDOR_CMD_MAX_DATA_LEN 254
+
+#define RTW_USB_CONTROL_MSG_TIMEOUT_TEST 10/* ms */
+#define RTW_USB_CONTROL_MSG_TIMEOUT 500/* ms */
+
+#define MAX_USBCTRL_VENDORREQ_TIMES 10
+
+#define RTW_USB_BULKOUT_TIME 5000/* ms */
+
+#define _usbctrl_vendorreq_async_callback(urb, regs) \
+ _usbctrl_vendorreq_async_callback(urb)
+#define usb_bulkout_zero_complete(purb, regs) \
+ usb_bulkout_zero_complete(purb)
+#define usb_write_mem_complete(purb, regs) \
+ usb_write_mem_complete(purb)
+#define usb_write_port_complete(purb, regs) \
+ usb_write_port_complete(purb)
+#define usb_read_port_complete(purb, regs) \
+ usb_read_port_complete(purb)
+#define usb_read_interrupt_complete(purb, regs) \
+ usb_read_interrupt_complete(purb)
+
+unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr);
+
+void usb_read_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem);
+void usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem);
+
+void usb_read_port_cancel(struct intf_hdl *pintfhdl);
+
+u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem);
+void usb_write_port_cancel(struct intf_hdl *pintfhdl);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/usb_osintf.h b/drivers/staging/rtl8188eu/include/usb_osintf.h
new file mode 100644
index 0000000..9de99ca
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_osintf.h
@@ -0,0 +1,45 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __USB_OSINTF_H
+#define __USB_OSINTF_H
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <usb_vendor_req.h>
+
+extern char *rtw_initmac;
+extern int rtw_mc2u_disable;
+
+#define USBD_HALTED(Status) ((u32)(Status) >> 30 == 3)
+
+u8 usbvendorrequest(struct dvobj_priv *pdvobjpriv, enum bt_usb_request brequest,
+ enum rt_usb_wvalue wvalue, u8 windex, void *data,
+ u8 datalen, u8 isdirectionin);
+int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
+void netdev_br_init(struct net_device *netdev);
+void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb);
+void *scdb_findEntry(struct adapter *priv, unsigned char *macAddr,
+ unsigned char *ipAddr);
+void nat25_db_expire(struct adapter *priv);
+int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method);
+
+int rtw_resume_process(struct adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/usb_vendor_req.h b/drivers/staging/rtl8188eu/include/usb_vendor_req.h
new file mode 100644
index 0000000..7f26c8f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_vendor_req.h
@@ -0,0 +1,52 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _USB_VENDOR_REQUEST_H_
+#define _USB_VENDOR_REQUEST_H_
+
+/* 4 Set/Get Register related wIndex/Data */
+#define RT_USB_RESET_MASK_OFF 0
+#define RT_USB_RESET_MASK_ON 1
+#define RT_USB_SLEEP_MASK_OFF 0
+#define RT_USB_SLEEP_MASK_ON 1
+#define RT_USB_LDO_ON 1
+#define RT_USB_LDO_OFF 0
+
+/* 4 Set/Get SYSCLK related wValue or Data */
+#define RT_USB_SYSCLK_32KHZ 0
+#define RT_USB_SYSCLK_40MHZ 1
+#define RT_USB_SYSCLK_60MHZ 2
+
+
+enum bt_usb_request {
+ RT_USB_SET_REGISTER = 1,
+ RT_USB_SET_SYSCLK = 2,
+ RT_USB_GET_SYSCLK = 3,
+ RT_USB_GET_REGISTER = 4
+};
+
+enum rt_usb_wvalue {
+ RT_USB_RESET_MASK = 1,
+ RT_USB_SLEEP_MASK = 2,
+ RT_USB_USB_HRCPWM = 3,
+ RT_USB_LDO = 4,
+ RT_USB_BOOT_TYPE = 5
+};
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
new file mode 100644
index 0000000..a615659
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -0,0 +1,1127 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _WIFI_H_
+#define _WIFI_H_
+
+
+#ifdef BIT
+/* error "BIT define occurred earlier elsewhere!\n" */
+#undef BIT
+#endif
+#define BIT(x) (1 << (x))
+
+
+#define WLAN_ETHHDR_LEN 14
+#define WLAN_ETHADDR_LEN 6
+#define WLAN_IEEE_OUI_LEN 3
+#define WLAN_ADDR_LEN 6
+#define WLAN_CRC_LEN 4
+#define WLAN_BSSID_LEN 6
+#define WLAN_BSS_TS_LEN 8
+#define WLAN_HDR_A3_LEN 24
+#define WLAN_HDR_A4_LEN 30
+#define WLAN_HDR_A3_QOS_LEN 26
+#define WLAN_HDR_A4_QOS_LEN 32
+#define WLAN_SSID_MAXLEN 32
+#define WLAN_DATA_MAXLEN 2312
+
+#define WLAN_A3_PN_OFFSET 24
+#define WLAN_A4_PN_OFFSET 30
+
+#define WLAN_MIN_ETHFRM_LEN 60
+#define WLAN_MAX_ETHFRM_LEN 1514
+#define WLAN_ETHHDR_LEN 14
+
+#define P80211CAPTURE_VERSION 0x80211001
+
+/* This value is tested by WiFi 11n Test Plan 5.2.3. */
+/* This test verifies the WLAN NIC can update the NAV through sending
+ * the CTS with large duration. */
+#define WiFiNavUpperUs 30000 /* 30 ms */
+
+enum WIFI_FRAME_TYPE {
+ WIFI_MGT_TYPE = (0),
+ WIFI_CTRL_TYPE = (BIT(2)),
+ WIFI_DATA_TYPE = (BIT(3)),
+ WIFI_QOS_DATA_TYPE = (BIT(7)|BIT(3)), /* QoS Data */
+};
+
+enum WIFI_FRAME_SUBTYPE {
+ /* below is for mgt frame */
+ WIFI_ASSOCREQ = (0 | WIFI_MGT_TYPE),
+ WIFI_ASSOCRSP = (BIT(4) | WIFI_MGT_TYPE),
+ WIFI_REASSOCREQ = (BIT(5) | WIFI_MGT_TYPE),
+ WIFI_REASSOCRSP = (BIT(5) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_PROBEREQ = (BIT(6) | WIFI_MGT_TYPE),
+ WIFI_PROBERSP = (BIT(6) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_BEACON = (BIT(7) | WIFI_MGT_TYPE),
+ WIFI_ATIM = (BIT(7) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_DISASSOC = (BIT(7) | BIT(5) | WIFI_MGT_TYPE),
+ WIFI_AUTH = (BIT(7) | BIT(5) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_DEAUTH = (BIT(7) | BIT(6) | WIFI_MGT_TYPE),
+ WIFI_ACTION = (BIT(7) | BIT(6) | BIT(4) | WIFI_MGT_TYPE),
+
+ /* below is for control frame */
+ WIFI_PSPOLL = (BIT(7) | BIT(5) | WIFI_CTRL_TYPE),
+ WIFI_RTS = (BIT(7) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
+ WIFI_CTS = (BIT(7) | BIT(6) | WIFI_CTRL_TYPE),
+ WIFI_ACK = (BIT(7) | BIT(6) | BIT(4) | WIFI_CTRL_TYPE),
+ WIFI_CFEND = (BIT(7) | BIT(6) | BIT(5) | WIFI_CTRL_TYPE),
+ WIFI_CFEND_CFACK = (BIT(7) | BIT(6) | BIT(5) | BIT(4) |
+ WIFI_CTRL_TYPE),
+
+ /* below is for data frame */
+ WIFI_DATA = (0 | WIFI_DATA_TYPE),
+ WIFI_DATA_CFACK = (BIT(4) | WIFI_DATA_TYPE),
+ WIFI_DATA_CFPOLL = (BIT(5) | WIFI_DATA_TYPE),
+ WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_DATA_NULL = (BIT(6) | WIFI_DATA_TYPE),
+ WIFI_CF_ACK = (BIT(6) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_CF_POLL = (BIT(6) | BIT(5) | WIFI_DATA_TYPE),
+ WIFI_CF_ACKPOLL = (BIT(6) | BIT(5) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_QOS_DATA_NULL = (BIT(6) | WIFI_QOS_DATA_TYPE),
+};
+
+enum WIFI_REASON_CODE {
+ _RSON_RESERVED_ = 0,
+ _RSON_UNSPECIFIED_ = 1,
+ _RSON_AUTH_NO_LONGER_VALID_ = 2,
+ _RSON_DEAUTH_STA_LEAVING_ = 3,
+ _RSON_INACTIVITY_ = 4,
+ _RSON_UNABLE_HANDLE_ = 5,
+ _RSON_CLS2_ = 6,
+ _RSON_CLS3_ = 7,
+ _RSON_DISAOC_STA_LEAVING_ = 8,
+ _RSON_ASOC_NOT_AUTH_ = 9,
+
+ /* WPA reason */
+ _RSON_INVALID_IE_ = 13,
+ _RSON_MIC_FAILURE_ = 14,
+ _RSON_4WAY_HNDSHK_TIMEOUT_ = 15,
+ _RSON_GROUP_KEY_UPDATE_TIMEOUT_ = 16,
+ _RSON_DIFF_IE_ = 17,
+ _RSON_MLTCST_CIPHER_NOT_VALID_ = 18,
+ _RSON_UNICST_CIPHER_NOT_VALID_ = 19,
+ _RSON_AKMP_NOT_VALID_ = 20,
+ _RSON_UNSUPPORT_RSNE_VER_ = 21,
+ _RSON_INVALID_RSNE_CAP_ = 22,
+ _RSON_IEEE_802DOT1X_AUTH_FAIL_ = 23,
+
+ /* belowing are Realtek definition */
+ _RSON_PMK_NOT_AVAILABLE_ = 24,
+ _RSON_TDLS_TEAR_TOOFAR_ = 25,
+ _RSON_TDLS_TEAR_UN_RSN_ = 26,
+};
+
+/* Reason codes (IEEE 802.11-2007, 7.3.1.7, Table 7-22)
+
+#define WLAN_REASON_UNSPECIFIED 1
+#define WLAN_REASON_PREV_AUTH_NOT_VALID 2
+#define WLAN_REASON_DEAUTH_LEAVING 3
+#define WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY 4
+#define WLAN_REASON_DISASSOC_AP_BUSY 5
+#define WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA 6
+#define WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA 7
+#define WLAN_REASON_DISASSOC_STA_HAS_LEFT 8
+#define WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH 9 */
+/* IEEE 802.11h */
+#define WLAN_REASON_PWR_CAPABILITY_NOT_VALID 10
+#define WLAN_REASON_SUPPORTED_CHANNEL_NOT_VALID 11
+
+/* IEEE 802.11i
+#define WLAN_REASON_INVALID_IE 13
+#define WLAN_REASON_MICHAEL_MIC_FAILURE 14
+#define WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT 15
+#define WLAN_REASON_GROUP_KEY_UPDATE_TIMEOUT 16
+#define WLAN_REASON_IE_IN_4WAY_DIFFERS 17
+#define WLAN_REASON_GROUP_CIPHER_NOT_VALID 18
+#define WLAN_REASON_PAIRWISE_CIPHER_NOT_VALID 19
+#define WLAN_REASON_AKMP_NOT_VALID 20
+#define WLAN_REASON_UNSUPPORTED_RSN_IE_VERSION 21
+#define WLAN_REASON_INVALID_RSN_IE_CAPAB 22
+#define WLAN_REASON_IEEE_802_1X_AUTH_FAILED 23
+#define WLAN_REASON_CIPHER_SUITE_REJECTED 24 */
+
+enum WIFI_STATUS_CODE {
+ _STATS_SUCCESSFUL_ = 0,
+ _STATS_FAILURE_ = 1,
+ _STATS_CAP_FAIL_ = 10,
+ _STATS_NO_ASOC_ = 11,
+ _STATS_OTHER_ = 12,
+ _STATS_NO_SUPP_ALG_ = 13,
+ _STATS_OUT_OF_AUTH_SEQ_ = 14,
+ _STATS_CHALLENGE_FAIL_ = 15,
+ _STATS_AUTH_TIMEOUT_ = 16,
+ _STATS_UNABLE_HANDLE_STA_ = 17,
+ _STATS_RATE_FAIL_ = 18,
+};
+
+/* Status codes (IEEE 802.11-2007, 7.3.1.9, Table 7-23)
+#define WLAN_STATUS_SUCCESS 0
+#define WLAN_STATUS_UNSPECIFIED_FAILURE 1
+#define WLAN_STATUS_CAPS_UNSUPPORTED 10
+#define WLAN_STATUS_REASSOC_NO_ASSOC 11
+#define WLAN_STATUS_ASSOC_DENIED_UNSPEC 12
+#define WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG 13
+#define WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION 14
+#define WLAN_STATUS_CHALLENGE_FAIL 15
+#define WLAN_STATUS_AUTH_TIMEOUT 16
+#define WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA 17
+#define WLAN_STATUS_ASSOC_DENIED_RATES 18 */
+
+/* entended */
+/* IEEE 802.11b */
+#define WLAN_STATUS_ASSOC_DENIED_NOSHORT 19
+#define WLAN_STATUS_ASSOC_DENIED_NOPBCC 20
+#define WLAN_STATUS_ASSOC_DENIED_NOAGILITY 21
+/* IEEE 802.11h */
+#define WLAN_STATUS_SPEC_MGMT_REQUIRED 22
+#define WLAN_STATUS_PWR_CAPABILITY_NOT_VALID 23
+#define WLAN_STATUS_SUPPORTED_CHANNEL_NOT_VALID 24
+/* IEEE 802.11g */
+#define WLAN_STATUS_ASSOC_DENIED_NO_SHORT_SLOT_TIME 25
+#define WLAN_STATUS_ASSOC_DENIED_NO_ER_PBCC 26
+#define WLAN_STATUS_ASSOC_DENIED_NO_DSSS_OFDM 27
+/* IEEE 802.11w */
+#define WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY 30
+#define WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION 31
+/* IEEE 802.11i */
+#define WLAN_STATUS_INVALID_IE 40
+#define WLAN_STATUS_GROUP_CIPHER_NOT_VALID 41
+#define WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID 42
+#define WLAN_STATUS_AKMP_NOT_VALID 43
+#define WLAN_STATUS_UNSUPPORTED_RSN_IE_VERSION 44
+#define WLAN_STATUS_INVALID_RSN_IE_CAPAB 45
+#define WLAN_STATUS_CIPHER_REJECTED_PER_POLICY 46
+#define WLAN_STATUS_TS_NOT_CREATED 47
+#define WLAN_STATUS_DIRECT_LINK_NOT_ALLOWED 48
+#define WLAN_STATUS_DEST_STA_NOT_PRESENT 49
+#define WLAN_STATUS_DEST_STA_NOT_QOS_STA 50
+#define WLAN_STATUS_ASSOC_DENIED_LISTEN_INT_TOO_LARGE 51
+/* IEEE 802.11r */
+#define WLAN_STATUS_INVALID_FT_ACTION_FRAME_COUNT 52
+#define WLAN_STATUS_INVALID_PMKID 53
+#define WLAN_STATUS_INVALID_MDIE 54
+#define WLAN_STATUS_INVALID_FTIE 55
+
+enum WIFI_REG_DOMAIN {
+ DOMAIN_FCC = 1,
+ DOMAIN_IC = 2,
+ DOMAIN_ETSI = 3,
+ DOMAIN_SPA = 4,
+ DOMAIN_FRANCE = 5,
+ DOMAIN_MKK = 6,
+ DOMAIN_ISRAEL = 7,
+ DOMAIN_MKK1 = 8,
+ DOMAIN_MKK2 = 9,
+ DOMAIN_MKK3 = 10,
+ DOMAIN_MAX
+};
+
+#define _TO_DS_ BIT(8)
+#define _FROM_DS_ BIT(9)
+#define _MORE_FRAG_ BIT(10)
+#define _RETRY_ BIT(11)
+#define _PWRMGT_ BIT(12)
+#define _MORE_DATA_ BIT(13)
+#define _PRIVACY_ BIT(14)
+#define _ORDER_ BIT(15)
+
+#define SetToDs(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_TO_DS_)
+
+#define GetToDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_TO_DS_)) != 0)
+
+#define ClearToDs(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_TO_DS_))
+
+#define SetFrDs(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_FROM_DS_)
+
+#define GetFrDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_FROM_DS_)) != 0)
+
+#define ClearFrDs(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_FROM_DS_))
+
+#define get_tofr_ds(pframe) ((GetToDs(pframe) << 1) | GetFrDs(pframe))
+
+
+#define SetMFrag(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_FRAG_)
+
+#define GetMFrag(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_MORE_FRAG_)) != 0)
+
+#define ClearMFrag(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_MORE_FRAG_))
+
+#define SetRetry(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_RETRY_)
+
+#define GetRetry(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_RETRY_)) != 0)
+
+#define ClearRetry(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_RETRY_))
+
+#define SetPwrMgt(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_PWRMGT_)
+
+#define GetPwrMgt(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_PWRMGT_)) != 0)
+
+#define ClearPwrMgt(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_PWRMGT_))
+
+#define SetMData(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_DATA_)
+
+#define GetMData(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_MORE_DATA_)) != 0)
+
+#define ClearMData(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_MORE_DATA_))
+
+#define SetPrivacy(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_PRIVACY_)
+
+#define GetPrivacy(pbuf) \
+ (((*(__le16 *)(pbuf)) & cpu_to_le16(_PRIVACY_)) != 0)
+
+#define ClearPrivacy(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_PRIVACY_))
+
+
+#define GetOrder(pbuf) \
+ (((*(__le16 *)(pbuf)) & cpu_to_le16(_ORDER_)) != 0)
+
+#define GetFrameType(pbuf) \
+ (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(3) | BIT(2)))
+
+#define SetFrameType(pbuf, type) \
+ do { \
+ *(unsigned short *)(pbuf) &= __constant_cpu_to_le16(~(BIT(3) | BIT(2))); \
+ *(unsigned short *)(pbuf) |= __constant_cpu_to_le16(type); \
+ } while (0)
+
+#define GetFrameSubType(pbuf) (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(7) |\
+ BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2)))
+
+#define SetFrameSubType(pbuf, type) \
+ do { \
+ *(__le16 *)(pbuf) &= cpu_to_le16(~(BIT(7) | BIT(6) | \
+ BIT(5) | BIT(4) | BIT(3) | BIT(2))); \
+ *(__le16 *)(pbuf) |= cpu_to_le16(type); \
+ } while (0)
+
+#define GetSequence(pbuf) \
+ (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) >> 4)
+
+#define GetFragNum(pbuf) \
+ (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) & 0x0f)
+
+#define GetTupleCache(pbuf) \
+ (cpu_to_le16(*(unsigned short *)((size_t)(pbuf) + 22)))
+
+#define SetFragNum(pbuf, num) \
+ do { \
+ *(unsigned short *)((size_t)(pbuf) + 22) = \
+ ((*(unsigned short *)((size_t)(pbuf) + 22)) & \
+ le16_to_cpu(~(0x000f))) | \
+ cpu_to_le16(0x0f & (num)); \
+ } while (0)
+
+#define SetSeqNum(pbuf, num) \
+ do { \
+ *(__le16 *)((size_t)(pbuf) + 22) = \
+ ((*(__le16 *)((size_t)(pbuf) + 22)) & cpu_to_le16((unsigned short)0x000f)) | \
+ cpu_to_le16((unsigned short)(0xfff0 & (num << 4))); \
+ } while (0)
+
+#define SetDuration(pbuf, dur) \
+ *(__le16 *)((size_t)(pbuf) + 2) = cpu_to_le16(0xffff & (dur))
+
+
+#define SetPriority(pbuf, tid) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(tid & 0xf)
+
+#define GetPriority(pbuf) ((le16_to_cpu(*(__le16 *)(pbuf))) & 0xf)
+
+#define SetEOSP(pbuf, eosp) \
+ *(__le16 *)(pbuf) |= cpu_to_le16((eosp & 1) << 4)
+
+#define SetAckpolicy(pbuf, ack) \
+ *(__le16 *)(pbuf) |= cpu_to_le16((ack & 3) << 5)
+
+#define GetAckpolicy(pbuf) (((le16_to_cpu(*(__le16 *)pbuf)) >> 5) & 0x3)
+
+#define GetAMsdu(pbuf) (((le16_to_cpu(*(__le16 *)pbuf)) >> 7) & 0x1)
+
+#define SetAMsdu(pbuf, amsdu) \
+ *(__le16 *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7)
+
+#define GetAid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 2)) & 0x3fff)
+
+#define GetTid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + \
+ (((GetToDs(pbuf)<<1) | GetFrDs(pbuf)) == 3 ? \
+ 30 : 24))) & 0x000f)
+
+#define GetAddr1Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 4))
+
+#define GetAddr2Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 10))
+
+#define GetAddr3Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 16))
+
+#define GetAddr4Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 24))
+
+#define MacAddr_isBcst(addr) \
+ ( \
+ ((addr[0] == 0xff) && (addr[1] == 0xff) && \
+ (addr[2] == 0xff) && (addr[3] == 0xff) && \
+ (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
+)
+
+static inline int IS_MCAST(unsigned char *da)
+{
+ if ((*da) & 0x01)
+ return true;
+ else
+ return false;
+}
+
+static inline unsigned char *get_da(unsigned char *pframe)
+{
+ unsigned char *da;
+ unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs=0, FromDs=0 */
+ da = GetAddr1Ptr(pframe);
+ break;
+ case 0x01: /* ToDs=0, FromDs=1 */
+ da = GetAddr1Ptr(pframe);
+ break;
+ case 0x02: /* ToDs=1, FromDs=0 */
+ da = GetAddr3Ptr(pframe);
+ break;
+ default: /* ToDs=1, FromDs=1 */
+ da = GetAddr3Ptr(pframe);
+ break;
+ }
+ return da;
+}
+
+static inline unsigned char *get_sa(unsigned char *pframe)
+{
+ unsigned char *sa;
+ unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs=0, FromDs=0 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ case 0x01: /* ToDs=0, FromDs=1 */
+ sa = GetAddr3Ptr(pframe);
+ break;
+ case 0x02: /* ToDs=1, FromDs=0 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ default: /* ToDs=1, FromDs=1 */
+ sa = GetAddr4Ptr(pframe);
+ break;
+ }
+ return sa;
+}
+
+static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
+{
+ unsigned char *sa;
+ unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs=0, FromDs=0 */
+ sa = GetAddr3Ptr(pframe);
+ break;
+ case 0x01: /* ToDs=0, FromDs=1 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ case 0x02: /* ToDs=1, FromDs=0 */
+ sa = GetAddr1Ptr(pframe);
+ break;
+ case 0x03: /* ToDs=1, FromDs=1 */
+ sa = GetAddr1Ptr(pframe);
+ break;
+ default:
+ sa = NULL; /* */
+ break;
+ }
+ return sa;
+}
+
+static inline int IsFrameTypeCtrl(unsigned char *pframe)
+{
+ if (WIFI_CTRL_TYPE == GetFrameType(pframe))
+ return true;
+ else
+ return false;
+}
+/*-----------------------------------------------------------------------------
+ Below is for the security related definition
+------------------------------------------------------------------------------*/
+#define _RESERVED_FRAME_TYPE_ 0
+#define _SKB_FRAME_TYPE_ 2
+#define _PRE_ALLOCMEM_ 1
+#define _PRE_ALLOCHDR_ 3
+#define _PRE_ALLOCLLCHDR_ 4
+#define _PRE_ALLOCICVHDR_ 5
+#define _PRE_ALLOCMICHDR_ 6
+
+#define _SIFSTIME_ \
+ ((priv->pmib->dot11BssType.net_work_type & WIRELESS_11A) ? 16 : 10)
+#define _ACKCTSLNG_ 14 /* 14 bytes long, including crclng */
+#define _CRCLNG_ 4
+
+#define _ASOCREQ_IE_OFFSET_ 4 /* excluding wlan_hdr */
+#define _ASOCRSP_IE_OFFSET_ 6
+#define _REASOCREQ_IE_OFFSET_ 10
+#define _REASOCRSP_IE_OFFSET_ 6
+#define _PROBEREQ_IE_OFFSET_ 0
+#define _PROBERSP_IE_OFFSET_ 12
+#define _AUTH_IE_OFFSET_ 6
+#define _DEAUTH_IE_OFFSET_ 0
+#define _BEACON_IE_OFFSET_ 12
+#define _PUBLIC_ACTION_IE_OFFSET_ 8
+
+#define _FIXED_IE_LENGTH_ _BEACON_IE_OFFSET_
+
+#define _SSID_IE_ 0
+#define _SUPPORTEDRATES_IE_ 1
+#define _DSSET_IE_ 3
+#define _TIM_IE_ 5
+#define _IBSS_PARA_IE_ 6
+#define _COUNTRY_IE_ 7
+#define _CHLGETXT_IE_ 16
+#define _SUPPORTED_CH_IE_ 36
+#define _CH_SWTICH_ANNOUNCE_ 37 /* Secondary Channel Offset */
+#define _RSN_IE_2_ 48
+#define _SSN_IE_1_ 221
+#define _ERPINFO_IE_ 42
+#define _EXT_SUPPORTEDRATES_IE_ 50
+
+#define _HT_CAPABILITY_IE_ 45
+#define _FTIE_ 55
+#define _TIMEOUT_ITVL_IE_ 56
+#define _SRC_IE_ 59
+#define _HT_EXTRA_INFO_IE_ 61
+#define _HT_ADD_INFO_IE_ 61 /* _HT_EXTRA_INFO_IE_ */
+#define _WAPI_IE_ 68
+
+
+#define EID_BSSCoexistence 72 /* 20/40 BSS Coexistence */
+#define EID_BSSIntolerantChlReport 73
+#define _RIC_Descriptor_IE_ 75
+
+#define _LINK_ID_IE_ 101
+#define _CH_SWITCH_TIMING_ 104
+#define _PTI_BUFFER_STATUS_ 106
+#define _EXT_CAP_IE_ 127
+#define _VENDOR_SPECIFIC_IE_ 221
+
+#define _RESERVED47_ 47
+
+/* ---------------------------------------------------------------------------
+ Below is the fixed elements...
+-----------------------------------------------------------------------------*/
+#define _AUTH_ALGM_NUM_ 2
+#define _AUTH_SEQ_NUM_ 2
+#define _BEACON_ITERVAL_ 2
+#define _CAPABILITY_ 2
+#define _CURRENT_APADDR_ 6
+#define _LISTEN_INTERVAL_ 2
+#define _RSON_CODE_ 2
+#define _ASOC_ID_ 2
+#define _STATUS_CODE_ 2
+#define _TIMESTAMP_ 8
+
+#define AUTH_ODD_TO 0
+#define AUTH_EVEN_TO 1
+
+#define WLAN_ETHCONV_ENCAP 1
+#define WLAN_ETHCONV_RFC1042 2
+#define WLAN_ETHCONV_8021h 3
+
+#define cap_ESS BIT(0)
+#define cap_IBSS BIT(1)
+#define cap_CFPollable BIT(2)
+#define cap_CFRequest BIT(3)
+#define cap_Privacy BIT(4)
+#define cap_ShortPremble BIT(5)
+#define cap_PBCC BIT(6)
+#define cap_ChAgility BIT(7)
+#define cap_SpecMgmt BIT(8)
+#define cap_QoSi BIT(9)
+#define cap_ShortSlot BIT(10)
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for 802.11i / 802.1x
+------------------------------------------------------------------------------*/
+#define _IEEE8021X_MGT_ 1 /* WPA */
+#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
+
+/*
+#define _NO_PRIVACY_ 0
+#define _WEP_40_PRIVACY_ 1
+#define _TKIP_PRIVACY_ 2
+#define _WRAP_PRIVACY_ 3
+#define _CCMP_PRIVACY_ 4
+#define _WEP_104_PRIVACY_ 5
+#define _WEP_WPA_MIXED_PRIVACY_ 6 WEP + WPA
+*/
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for WMM
+------------------------------------------------------------------------------*/
+#define _WMM_IE_Length_ 7 /* for WMM STA */
+#define _WMM_Para_Element_Length_ 24
+
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for 802.11n
+------------------------------------------------------------------------------*/
+
+#define SetOrderBit(pbuf) \
+ do { \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_ORDER_); \
+ } while (0)
+
+#define GetOrderBit(pbuf) \
+ (((*(unsigned short *)(pbuf)) & le16_to_cpu(_ORDER_)) != 0)
+
+
+/**
+ * struct rtw_ieee80211_bar - HT Block Ack Request
+ *
+ * This structure refers to "HT BlockAckReq" as
+ * described in 802.11n draft section 7.2.1.7.1
+ */
+struct rtw_ieee80211_bar {
+ unsigned short frame_control;
+ unsigned short duration;
+ unsigned char ra[6];
+ unsigned char ta[6];
+ unsigned short control;
+ unsigned short start_seq_num;
+} __packed;
+
+/* 802.11 BAR control masks */
+#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+
+ /**
+ * struct rtw_ieee80211_ht_cap - HT capabilities
+ *
+ * This structure refers to "HT capabilities element" as
+ * described in 802.11n draft section 7.3.2.52
+ */
+
+struct rtw_ieee80211_ht_cap {
+ unsigned short cap_info;
+ unsigned char ampdu_params_info;
+ unsigned char supp_mcs_set[16];
+ unsigned short extended_ht_cap_info;
+ unsigned int tx_BF_cap_info;
+ unsigned char antenna_selection_info;
+} __packed;
+
+/**
+ * struct rtw_ieee80211_ht_cap - HT additional information
+ *
+ * This structure refers to "HT information element" as
+ * described in 802.11n draft section 7.3.2.53
+ */
+struct ieee80211_ht_addt_info {
+ unsigned char control_chan;
+ unsigned char ht_param;
+ unsigned short operation_mode;
+ unsigned short stbc_param;
+ unsigned char basic_set[16];
+} __packed;
+
+struct HT_caps_element {
+ union {
+ struct {
+ __le16 HT_caps_info;
+ unsigned char AMPDU_para;
+ unsigned char MCS_rate[16];
+ unsigned short HT_ext_caps;
+ unsigned int Beamforming_caps;
+ unsigned char ASEL_caps;
+ } HT_cap_element;
+ unsigned char HT_cap[26];
+ } u;
+} __packed;
+
+struct HT_info_element {
+ unsigned char primary_channel;
+ unsigned char infos[5];
+ unsigned char MCS_rate[16];
+} __packed;
+
+struct AC_param {
+ unsigned char ACI_AIFSN;
+ unsigned char CW;
+ __le16 TXOP_limit;
+} __packed;
+
+struct WMM_para_element {
+ unsigned char QoS_info;
+ unsigned char reserved;
+ struct AC_param ac_param[4];
+} __packed;
+
+struct ADDBA_request {
+ unsigned char dialog_token;
+ unsigned short BA_para_set;
+ unsigned short BA_timeout_value;
+ unsigned short BA_starting_seqctrl;
+} __packed;
+
+enum ht_cap_ampdu_factor {
+ MAX_AMPDU_FACTOR_8K = 0,
+ MAX_AMPDU_FACTOR_16K = 1,
+ MAX_AMPDU_FACTOR_32K = 2,
+ MAX_AMPDU_FACTOR_64K = 3,
+};
+
+/* 802.11n HT capabilities masks */
+#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002
+#define IEEE80211_HT_CAP_SM_PS 0x000C
+#define IEEE80211_HT_CAP_GRN_FLD 0x0010
+#define IEEE80211_HT_CAP_SGI_20 0x0020
+#define IEEE80211_HT_CAP_SGI_40 0x0040
+#define IEEE80211_HT_CAP_TX_STBC 0x0080
+#define IEEE80211_HT_CAP_RX_STBC 0x0300
+#define IEEE80211_HT_CAP_DELAY_BA 0x0400
+#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
+#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
+/* 802.11n HT capability AMPDU settings */
+#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
+#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
+/* 802.11n HT capability MSC set */
+#define IEEE80211_SUPP_MCS_SET_UEQM 4
+#define IEEE80211_HT_CAP_MAX_STREAMS 4
+#define IEEE80211_SUPP_MCS_SET_LEN 10
+/* maximum streams the spec allows */
+#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01
+#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02
+#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C
+#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10
+/* 802.11n HT IE masks */
+#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03
+#define IEEE80211_HT_IE_CHA_SEC_NONE 0x00
+#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01
+#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03
+#define IEEE80211_HT_IE_CHA_WIDTH 0x04
+#define IEEE80211_HT_IE_HT_PROTECTION 0x0003
+#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
+#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
+
+/* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
+#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
+#define RTW_IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
+#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
+#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
+
+/*
+ * A-PMDU buffer sizes
+ * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ */
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF 0x40
+
+
+/* Spatial Multiplexing Power Save Modes */
+#define WLAN_HT_CAP_SM_PS_STATIC 0
+#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
+#define WLAN_HT_CAP_SM_PS_INVALID 2
+#define WLAN_HT_CAP_SM_PS_DISABLED 3
+
+
+#define OP_MODE_PURE 0
+#define OP_MODE_MAY_BE_LEGACY_STAS 1
+#define OP_MODE_20MHZ_HT_STA_ASSOCED 2
+#define OP_MODE_MIXED 3
+
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_OFF_MASK ((u8) BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_ABOVE ((u8) BIT(0))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_BELOW ((u8) BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH ((u8) BIT(2))
+#define HT_INFO_HT_PARAM_RIFS_MODE ((u8) BIT(3))
+#define HT_INFO_HT_PARAM_CTRL_ACCESS_ONLY ((u8) BIT(4))
+#define HT_INFO_HT_PARAM_SRV_INTERVAL_GRANULARITY ((u8) BIT(5))
+
+#define HT_INFO_OPERATION_MODE_OP_MODE_MASK \
+ ((u16) (0x0001 | 0x0002))
+#define HT_INFO_OPERATION_MODE_OP_MODE_OFFSET 0
+#define HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT ((u8) BIT(2))
+#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8) BIT(3))
+#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8) BIT(4))
+
+#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16) BIT(6))
+#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16) BIT(7))
+#define HT_INFO_STBC_PARAM_SECONDARY_BC ((u16) BIT(8))
+#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16) BIT(9))
+#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16) BIT(10))
+#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16) BIT(11))
+
+/* ===============WPS Section=============== */
+/* For WPSv1.0 */
+#define WPSOUI 0x0050f204
+/* WPS attribute ID */
+#define WPS_ATTR_VER1 0x104A
+#define WPS_ATTR_SIMPLE_CONF_STATE 0x1044
+#define WPS_ATTR_RESP_TYPE 0x103B
+#define WPS_ATTR_UUID_E 0x1047
+#define WPS_ATTR_MANUFACTURER 0x1021
+#define WPS_ATTR_MODEL_NAME 0x1023
+#define WPS_ATTR_MODEL_NUMBER 0x1024
+#define WPS_ATTR_SERIAL_NUMBER 0x1042
+#define WPS_ATTR_PRIMARY_DEV_TYPE 0x1054
+#define WPS_ATTR_SEC_DEV_TYPE_LIST 0x1055
+#define WPS_ATTR_DEVICE_NAME 0x1011
+#define WPS_ATTR_CONF_METHOD 0x1008
+#define WPS_ATTR_RF_BANDS 0x103C
+#define WPS_ATTR_DEVICE_PWID 0x1012
+#define WPS_ATTR_REQUEST_TYPE 0x103A
+#define WPS_ATTR_ASSOCIATION_STATE 0x1002
+#define WPS_ATTR_CONFIG_ERROR 0x1009
+#define WPS_ATTR_VENDOR_EXT 0x1049
+#define WPS_ATTR_SELECTED_REGISTRAR 0x1041
+
+/* Value of WPS attribute "WPS_ATTR_DEVICE_NAME */
+#define WPS_MAX_DEVICE_NAME_LEN 32
+
+/* Value of WPS Request Type Attribute */
+#define WPS_REQ_TYPE_ENROLLEE_INFO_ONLY 0x00
+#define WPS_REQ_TYPE_ENROLLEE_OPEN_8021X 0x01
+#define WPS_REQ_TYPE_REGISTRAR 0x02
+#define WPS_REQ_TYPE_WLAN_MANAGER_REGISTRAR 0x03
+
+/* Value of WPS Response Type Attribute */
+#define WPS_RESPONSE_TYPE_INFO_ONLY 0x00
+#define WPS_RESPONSE_TYPE_8021X 0x01
+#define WPS_RESPONSE_TYPE_REGISTRAR 0x02
+#define WPS_RESPONSE_TYPE_AP 0x03
+
+/* Value of WPS WiFi Simple Configuration State Attribute */
+#define WPS_WSC_STATE_NOT_CONFIG 0x01
+#define WPS_WSC_STATE_CONFIG 0x02
+
+/* Value of WPS Version Attribute */
+#define WPS_VERSION_1 0x10
+
+/* Value of WPS Configuration Method Attribute */
+#define WPS_CONFIG_METHOD_FLASH 0x0001
+#define WPS_CONFIG_METHOD_ETHERNET 0x0002
+#define WPS_CONFIG_METHOD_LABEL 0x0004
+#define WPS_CONFIG_METHOD_DISPLAY 0x0008
+#define WPS_CONFIG_METHOD_E_NFC 0x0010
+#define WPS_CONFIG_METHOD_I_NFC 0x0020
+#define WPS_CONFIG_METHOD_NFC 0x0040
+#define WPS_CONFIG_METHOD_PBC 0x0080
+#define WPS_CONFIG_METHOD_KEYPAD 0x0100
+#define WPS_CONFIG_METHOD_VPBC 0x0280
+#define WPS_CONFIG_METHOD_PPBC 0x0480
+#define WPS_CONFIG_METHOD_VDISPLAY 0x2008
+#define WPS_CONFIG_METHOD_PDISPLAY 0x4008
+
+/* Value of Category ID of WPS Primary Device Type Attribute */
+#define WPS_PDT_CID_DISPLAYS 0x0007
+#define WPS_PDT_CID_MULIT_MEDIA 0x0008
+#define WPS_PDT_CID_RTK_WIDI WPS_PDT_CID_MULIT_MEDIA
+
+/* Value of Sub Category ID of WPS Primary Device Type Attribute */
+#define WPS_PDT_SCID_MEDIA_SERVER 0x0005
+#define WPS_PDT_SCID_RTK_DMP WPS_PDT_SCID_MEDIA_SERVER
+
+/* Value of Device Password ID */
+#define WPS_DPID_P 0x0000
+#define WPS_DPID_USER_SPEC 0x0001
+#define WPS_DPID_MACHINE_SPEC 0x0002
+#define WPS_DPID_REKEY 0x0003
+#define WPS_DPID_PBC 0x0004
+#define WPS_DPID_REGISTRAR_SPEC 0x0005
+
+/* Value of WPS RF Bands Attribute */
+#define WPS_RF_BANDS_2_4_GHZ 0x01
+#define WPS_RF_BANDS_5_GHZ 0x02
+
+/* Value of WPS Association State Attribute */
+#define WPS_ASSOC_STATE_NOT_ASSOCIATED 0x00
+#define WPS_ASSOC_STATE_CONNECTION_SUCCESS 0x01
+#define WPS_ASSOC_STATE_CONFIGURATION_FAILURE 0x02
+#define WPS_ASSOC_STATE_ASSOCIATION_FAILURE 0x03
+#define WPS_ASSOC_STATE_IP_FAILURE 0x04
+
+/* =====================P2P Section===================== */
+/* For P2P */
+#define P2POUI 0x506F9A09
+
+/* P2P Attribute ID */
+#define P2P_ATTR_STATUS 0x00
+#define P2P_ATTR_MINOR_REASON_CODE 0x01
+#define P2P_ATTR_CAPABILITY 0x02
+#define P2P_ATTR_DEVICE_ID 0x03
+#define P2P_ATTR_GO_INTENT 0x04
+#define P2P_ATTR_CONF_TIMEOUT 0x05
+#define P2P_ATTR_LISTEN_CH 0x06
+#define P2P_ATTR_GROUP_BSSID 0x07
+#define P2P_ATTR_EX_LISTEN_TIMING 0x08
+#define P2P_ATTR_INTENTED_IF_ADDR 0x09
+#define P2P_ATTR_MANAGEABILITY 0x0A
+#define P2P_ATTR_CH_LIST 0x0B
+#define P2P_ATTR_NOA 0x0C
+#define P2P_ATTR_DEVICE_INFO 0x0D
+#define P2P_ATTR_GROUP_INFO 0x0E
+#define P2P_ATTR_GROUP_ID 0x0F
+#define P2P_ATTR_INTERFACE 0x10
+#define P2P_ATTR_OPERATING_CH 0x11
+#define P2P_ATTR_INVITATION_FLAGS 0x12
+
+/* Value of Status Attribute */
+#define P2P_STATUS_SUCCESS 0x00
+#define P2P_STATUS_FAIL_INFO_UNAVAILABLE 0x01
+#define P2P_STATUS_FAIL_INCOMPATIBLE_PARAM 0x02
+#define P2P_STATUS_FAIL_LIMIT_REACHED 0x03
+#define P2P_STATUS_FAIL_INVALID_PARAM 0x04
+#define P2P_STATUS_FAIL_REQUEST_UNABLE 0x05
+#define P2P_STATUS_FAIL_PREVOUS_PROTO_ERR 0x06
+#define P2P_STATUS_FAIL_NO_COMMON_CH 0x07
+#define P2P_STATUS_FAIL_UNKNOWN_P2PGROUP 0x08
+#define P2P_STATUS_FAIL_BOTH_GOINTENT_15 0x09
+#define P2P_STATUS_FAIL_INCOMPATIBLE_PROVSION 0x0A
+#define P2P_STATUS_FAIL_USER_REJECT 0x0B
+
+/* Value of Inviation Flags Attribute */
+#define P2P_INVITATION_FLAGS_PERSISTENT BIT(0)
+
+#define DMP_P2P_DEVCAP_SUPPORT (P2P_DEVCAP_SERVICE_DISCOVERY | \
+ P2P_DEVCAP_CLIENT_DISCOVERABILITY | \
+ P2P_DEVCAP_CONCURRENT_OPERATION | \
+ P2P_DEVCAP_INVITATION_PROC)
+
+#define DMP_P2P_GRPCAP_SUPPORT (P2P_GRPCAP_INTRABSS)
+
+/* Value of Device Capability Bitmap */
+#define P2P_DEVCAP_SERVICE_DISCOVERY BIT(0)
+#define P2P_DEVCAP_CLIENT_DISCOVERABILITY BIT(1)
+#define P2P_DEVCAP_CONCURRENT_OPERATION BIT(2)
+#define P2P_DEVCAP_INFRA_MANAGED BIT(3)
+#define P2P_DEVCAP_DEVICE_LIMIT BIT(4)
+#define P2P_DEVCAP_INVITATION_PROC BIT(5)
+
+/* Value of Group Capability Bitmap */
+#define P2P_GRPCAP_GO BIT(0)
+#define P2P_GRPCAP_PERSISTENT_GROUP BIT(1)
+#define P2P_GRPCAP_GROUP_LIMIT BIT(2)
+#define P2P_GRPCAP_INTRABSS BIT(3)
+#define P2P_GRPCAP_CROSS_CONN BIT(4)
+#define P2P_GRPCAP_PERSISTENT_RECONN BIT(5)
+#define P2P_GRPCAP_GROUP_FORMATION BIT(6)
+
+/* P2P Public Action Frame (Management Frame) */
+#define P2P_PUB_ACTION_ACTION 0x09
+
+/* P2P Public Action Frame Type */
+#define P2P_GO_NEGO_REQ 0
+#define P2P_GO_NEGO_RESP 1
+#define P2P_GO_NEGO_CONF 2
+#define P2P_INVIT_REQ 3
+#define P2P_INVIT_RESP 4
+#define P2P_DEVDISC_REQ 5
+#define P2P_DEVDISC_RESP 6
+#define P2P_PROVISION_DISC_REQ 7
+#define P2P_PROVISION_DISC_RESP 8
+
+/* P2P Action Frame Type */
+#define P2P_NOTICE_OF_ABSENCE 0
+#define P2P_PRESENCE_REQUEST 1
+#define P2P_PRESENCE_RESPONSE 2
+#define P2P_GO_DISC_REQUEST 3
+
+
+#define P2P_MAX_PERSISTENT_GROUP_NUM 10
+
+#define P2P_PROVISIONING_SCAN_CNT 3
+
+#define P2P_WILDCARD_SSID_LEN 7
+
+/* default value, used when: (1)p2p disabed or (2)p2p enabled
+ * but only do 1 scan phase */
+#define P2P_FINDPHASE_EX_NONE 0
+/* used when p2p enabled and want to do 1 scan phase and
+ * P2P_FINDPHASE_EX_MAX-1 find phase */
+#define P2P_FINDPHASE_EX_FULL 1
+#define P2P_FINDPHASE_EX_SOCIAL_FIRST (P2P_FINDPHASE_EX_FULL+1)
+#define P2P_FINDPHASE_EX_MAX 4
+#define P2P_FINDPHASE_EX_SOCIAL_LAST P2P_FINDPHASE_EX_MAX
+
+/* 5 seconds timeout for sending the provision discovery request */
+#define P2P_PROVISION_TIMEOUT 5000
+/* 3 seconds timeout for sending the prov disc request concurrent mode */
+#define P2P_CONCURRENT_PROVISION_TIME 3000
+/* 5 seconds timeout for receiving the group negotation response */
+#define P2P_GO_NEGO_TIMEOUT 5000
+/* 3 seconds timeout for sending the negotiation request under concurrent mode */
+#define P2P_CONCURRENT_GO_NEGO_TIME 3000
+/* 100ms */
+#define P2P_TX_PRESCAN_TIMEOUT 100
+/* 5 seconds timeout for sending the invitation request */
+#define P2P_INVITE_TIMEOUT 5000
+/* 3 seconds timeout for sending the invitation request under concurrent mode */
+#define P2P_CONCURRENT_INVITE_TIME 3000
+/* 25 seconds timeout to reset the scan channel (based on channel plan) */
+#define P2P_RESET_SCAN_CH 25000
+#define P2P_MAX_INTENT 15
+
+#define P2P_MAX_NOA_NUM 2
+
+/* WPS Configuration Method */
+#define WPS_CM_NONE 0x0000
+#define WPS_CM_LABEL 0x0004
+#define WPS_CM_DISPLYA 0x0008
+#define WPS_CM_EXTERNAL_NFC_TOKEN 0x0010
+#define WPS_CM_INTEGRATED_NFC_TOKEN 0x0020
+#define WPS_CM_NFC_INTERFACE 0x0040
+#define WPS_CM_PUSH_BUTTON 0x0080
+#define WPS_CM_KEYPAD 0x0100
+#define WPS_CM_SW_PUHS_BUTTON 0x0280
+#define WPS_CM_HW_PUHS_BUTTON 0x0480
+#define WPS_CM_SW_DISPLAY_P 0x2008
+#define WPS_CM_LCD_DISPLAY_P 0x4008
+
+enum P2P_ROLE {
+ P2P_ROLE_DISABLE = 0,
+ P2P_ROLE_DEVICE = 1,
+ P2P_ROLE_CLIENT = 2,
+ P2P_ROLE_GO = 3
+};
+
+enum P2P_STATE {
+ P2P_STATE_NONE = 0, /* P2P disable */
+ /* P2P had enabled and do nothing */
+ P2P_STATE_IDLE = 1,
+ P2P_STATE_LISTEN = 2, /* In pure listen state */
+ P2P_STATE_SCAN = 3, /* In scan phase */
+ /* In the listen state of find phase */
+ P2P_STATE_FIND_PHASE_LISTEN = 4,
+ /* In the search state of find phase */
+ P2P_STATE_FIND_PHASE_SEARCH = 5,
+ /* In P2P provisioning discovery */
+ P2P_STATE_TX_PROVISION_DIS_REQ = 6,
+ P2P_STATE_RX_PROVISION_DIS_RSP = 7,
+ P2P_STATE_RX_PROVISION_DIS_REQ = 8,
+ /* Doing the group owner negoitation handshake */
+ P2P_STATE_GONEGO_ING = 9,
+ /* finish the group negoitation handshake with success */
+ P2P_STATE_GONEGO_OK = 10,
+ /* finish the group negoitation handshake with failure */
+ P2P_STATE_GONEGO_FAIL = 11,
+ /* receiving the P2P Inviation request and match with the profile. */
+ P2P_STATE_RECV_INVITE_REQ_MATCH = 12,
+ /* Doing the P2P WPS */
+ P2P_STATE_PROVISIONING_ING = 13,
+ /* Finish the P2P WPS */
+ P2P_STATE_PROVISIONING_DONE = 14,
+ /* Transmit the P2P Invitation request */
+ P2P_STATE_TX_INVITE_REQ = 15,
+ /* Receiving the P2P Invitation response */
+ P2P_STATE_RX_INVITE_RESP_OK = 16,
+ /* receiving the P2P Inviation request and dismatch with the profile. */
+ P2P_STATE_RECV_INVITE_REQ_DISMATCH = 17,
+ /* receiving the P2P Inviation request and this wifi is GO. */
+ P2P_STATE_RECV_INVITE_REQ_GO = 18,
+ /* receiving the P2P Inviation request to join an existing P2P Group. */
+ P2P_STATE_RECV_INVITE_REQ_JOIN = 19,
+ /* recveing the P2P Inviation response with failure */
+ P2P_STATE_RX_INVITE_RESP_FAIL = 20,
+ /* receiving p2p negoitation response with information is not available */
+ P2P_STATE_RX_INFOR_NOREADY = 21,
+ /* sending p2p negoitation response with information is not available */
+ P2P_STATE_TX_INFOR_NOREADY = 22,
+};
+
+enum P2P_WPSINFO {
+ P2P_NO_WPSINFO = 0,
+ P2P_GOT_WPSINFO_PEER_DISPLAY_PIN = 1,
+ P2P_GOT_WPSINFO_SELF_DISPLAY_PIN = 2,
+ P2P_GOT_WPSINFO_PBC = 3,
+};
+
+#define P2P_PRIVATE_IOCTL_SET_LEN 64
+
+enum P2P_PROTO_WK_ID {
+ P2P_FIND_PHASE_WK = 0,
+ P2P_RESTORE_STATE_WK = 1,
+ P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
+ P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
+ P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
+ P2P_AP_P2P_CH_SWITCH_PROCESS_WK =5,
+ P2P_RO_CH_WK = 6,
+};
+
+enum P2P_PS_STATE {
+ P2P_PS_DISABLE = 0,
+ P2P_PS_ENABLE = 1,
+ P2P_PS_SCAN = 2,
+ P2P_PS_SCAN_DONE = 3,
+ P2P_PS_ALLSTASLEEP = 4, /* for P2P GO */
+};
+
+enum P2P_PS_MODE {
+ P2P_PS_NONE = 0,
+ P2P_PS_CTWINDOW = 1,
+ P2P_PS_NOA = 2,
+ P2P_PS_MIX = 3, /* CTWindow and NoA */
+};
+
+/* =====================WFD Section===================== */
+/* For Wi-Fi Display */
+#define WFD_ATTR_DEVICE_INFO 0x00
+#define WFD_ATTR_ASSOC_BSSID 0x01
+#define WFD_ATTR_COUPLED_SINK_INFO 0x06
+#define WFD_ATTR_LOCAL_IP_ADDR 0x08
+#define WFD_ATTR_SESSION_INFO 0x09
+#define WFD_ATTR_ALTER_MAC 0x0a
+
+/* For WFD Device Information Attribute */
+#define WFD_DEVINFO_SOURCE 0x0000
+#define WFD_DEVINFO_PSINK 0x0001
+#define WFD_DEVINFO_SSINK 0x0002
+#define WFD_DEVINFO_DUAL 0x0003
+
+#define WFD_DEVINFO_SESSION_AVAIL 0x0010
+#define WFD_DEVINFO_WSD 0x0040
+#define WFD_DEVINFO_PC_TDLS 0x0080
+#define WFD_DEVINFO_HDCP_SUPPORT 0x0100
+
+#define IP_MCAST_MAC(mac) \
+ ((mac[0] == 0x01) && (mac[1] == 0x00) && (mac[2] == 0x5e))
+#define ICMPV6_MCAST_MAC(mac) \
+ ((mac[0] == 0x33) && (mac[1] == 0x33) && (mac[2] != 0xff))
+
+#endif /* _WIFI_H_ */
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
new file mode 100644
index 0000000..e70075d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -0,0 +1,347 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __WLAN_BSSDEF_H__
+#define __WLAN_BSSDEF_H__
+
+
+#define MAX_IE_SZ 768
+
+#define NDIS_802_11_LENGTH_SSID 32
+#define NDIS_802_11_LENGTH_RATES 8
+#define NDIS_802_11_LENGTH_RATES_EX 16
+
+#define NDIS_802_11_RSSI long /* in dBm */
+
+struct ndis_802_11_ssid {
+ u32 SsidLength;
+ u8 Ssid[32];
+};
+
+enum NDIS_802_11_NETWORK_TYPE {
+ Ndis802_11FH,
+ Ndis802_11DS,
+ Ndis802_11OFDM5,
+ Ndis802_11OFDM24,
+ Ndis802_11NetworkTypeMax /* dummy upper bound */
+};
+
+struct ndis_802_11_config_fh {
+ u32 Length; /* Length of structure */
+ u32 HopPattern; /* As defined by 802.11, MSB set */
+ u32 HopSet; /* to one if non-802.11 */
+ u32 DwellTime; /* units are Kusec */
+};
+
+/*
+ * FW will only save the channel number in DSConfig.
+ * ODI Handler will convert the channel number to freq. number.
+ */
+struct ndis_802_11_config {
+ u32 Length; /* Length of structure */
+ u32 BeaconPeriod; /* units are Kusec */
+ u32 ATIMWindow; /* units are Kusec */
+ u32 DSConfig; /* Frequency, units are kHz */
+ struct ndis_802_11_config_fh FHConfig;
+};
+
+enum ndis_802_11_network_infra {
+ Ndis802_11IBSS,
+ Ndis802_11Infrastructure,
+ Ndis802_11AutoUnknown,
+ Ndis802_11InfrastructureMax, /* dummy upper bound */
+ Ndis802_11APMode
+};
+
+struct ndis_802_11_fixed_ie {
+ u8 Timestamp[8];
+ u16 BeaconInterval;
+ u16 Capabilities;
+};
+
+
+
+struct ndis_802_11_var_ie {
+ u8 ElementID;
+ u8 Length;
+ u8 data[1];
+};
+
+/*
+ * Length is the 4 bytes multiples of the sume of
+ * [ETH_ALEN] + 2 + sizeof (struct ndis_802_11_ssid) + sizeof (u32)
+ * + sizeof (NDIS_802_11_RSSI) + sizeof (enum NDIS_802_11_NETWORK_TYPE)
+ * + sizeof (struct ndis_802_11_config)
+ * + NDIS_802_11_LENGTH_RATES_EX + IELength
+ *
+ * Except the IELength, all other fields are fixed length.
+ * Therefore, we can define a macro to represent the partial sum. */
+
+enum ndis_802_11_auth_mode {
+ Ndis802_11AuthModeOpen,
+ Ndis802_11AuthModeShared,
+ Ndis802_11AuthModeAutoSwitch,
+ Ndis802_11AuthModeWPA,
+ Ndis802_11AuthModeWPAPSK,
+ Ndis802_11AuthModeWPANone,
+ Ndis802_11AuthModeWAPI,
+ Ndis802_11AuthModeMax /* Not a real mode, upper bound */
+};
+
+enum ndis_802_11_wep_status {
+ Ndis802_11WEPEnabled,
+ Ndis802_11Encryption1Enabled = Ndis802_11WEPEnabled,
+ Ndis802_11WEPDisabled,
+ Ndis802_11EncryptionDisabled = Ndis802_11WEPDisabled,
+ Ndis802_11WEPKeyAbsent,
+ Ndis802_11Encryption1KeyAbsent = Ndis802_11WEPKeyAbsent,
+ Ndis802_11WEPNotSupported,
+ Ndis802_11EncryptionNotSupported = Ndis802_11WEPNotSupported,
+ Ndis802_11Encryption2Enabled,
+ Ndis802_11Encryption2KeyAbsent,
+ Ndis802_11Encryption3Enabled,
+ Ndis802_11Encryption3KeyAbsent,
+ Ndis802_11_EncryptionWAPI
+};
+
+#define NDIS_802_11_AI_REQFI_CAPABILITIES 1
+#define NDIS_802_11_AI_REQFI_LISTENINTERVAL 2
+#define NDIS_802_11_AI_REQFI_CURRENTAPADDRESS 4
+
+#define NDIS_802_11_AI_RESFI_CAPABILITIES 1
+#define NDIS_802_11_AI_RESFI_STATUSCODE 2
+#define NDIS_802_11_AI_RESFI_ASSOCIATIONID 4
+
+struct ndis_802_11_ai_reqfi {
+ u16 Capabilities;
+ u16 ListenInterval;
+ unsigned char CurrentAPAddress[ETH_ALEN];
+};
+
+struct ndis_802_11_ai_resfi {
+ u16 Capabilities;
+ u16 StatusCode;
+ u16 AssociationId;
+};
+
+struct ndis_802_11_assoc_info {
+ u32 Length;
+ u16 AvailableRequestFixedIEs;
+ struct ndis_802_11_ai_reqfi RequestFixedIEs;
+ u32 RequestIELength;
+ u32 OffsetRequestIEs;
+ u16 AvailableResponseFixedIEs;
+ struct ndis_802_11_ai_resfi ResponseFixedIEs;
+ u32 ResponseIELength;
+ u32 OffsetResponseIEs;
+};
+
+enum ndis_802_11_reload_def {
+ Ndis802_11ReloadWEPKeys
+};
+
+/* Key mapping keys require a BSSID */
+struct ndis_802_11_key {
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex;
+ u32 KeyLength; /* length of key in bytes */
+ unsigned char BSSID[ETH_ALEN];
+ unsigned long long KeyRSC;
+ u8 KeyMaterial[32]; /* var len depending on above field */
+};
+
+struct ndis_802_11_remove_key {
+ u32 Length; /* Length */
+ u32 KeyIndex;
+ unsigned char BSSID[ETH_ALEN];
+};
+
+struct ndis_802_11_wep {
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex; /* 0 is the per-client key,
+ * 1-N are the global keys */
+ u32 KeyLength; /* length of key in bytes */
+ u8 KeyMaterial[16];/* variable len depending on above field */
+};
+
+struct ndis_802_11_auth_req {
+ u32 Length; /* Length of structure */
+ unsigned char Bssid[ETH_ALEN];
+ u32 Flags;
+};
+
+enum ndis_802_11_status_type {
+ Ndis802_11StatusType_Authentication,
+ Ndis802_11StatusType_MediaStreamMode,
+ Ndis802_11StatusType_PMKID_CandidateList,
+ Ndis802_11StatusTypeMax /* not a real type, defined as
+ * an upper bound */
+};
+
+struct ndis_802_11_status_ind {
+ enum ndis_802_11_status_type StatusType;
+};
+
+/* mask for authentication/integrity fields */
+#define NDIS_802_11_AUTH_REQUEST_AUTH_FIELDS 0x0f
+#define NDIS_802_11_AUTH_REQUEST_REAUTH 0x01
+#define NDIS_802_11_AUTH_REQUEST_KEYUPDATE 0x02
+#define NDIS_802_11_AUTH_REQUEST_PAIRWISE_ERROR 0x06
+#define NDIS_802_11_AUTH_REQUEST_GROUP_ERROR 0x0E
+
+/* MIC check time, 60 seconds. */
+#define MIC_CHECK_TIME 60000000
+
+struct ndis_802_11_auth_evt {
+ struct ndis_802_11_status_ind Status;
+ struct ndis_802_11_auth_req Request[1];
+};
+
+struct ndis_802_11_test {
+ u32 Length;
+ u32 Type;
+ union {
+ struct ndis_802_11_auth_evt AuthenticationEvent;
+ NDIS_802_11_RSSI RssiTrigger;
+ } tt;
+};
+
+
+#ifndef Ndis802_11APMode
+#define Ndis802_11APMode (Ndis802_11InfrastructureMax+1)
+#endif
+
+struct wlan_phy_info {
+ u8 SignalStrength;/* in percentage) */
+ u8 SignalQuality;/* in percentage) */
+ u8 Optimum_antenna; /* for Antenna diversity */
+ u8 Reserved_0;
+};
+
+struct wlan_bcn_info {
+ /* these infor get from rtw_get_encrypt_info when
+ * * translate scan to UI */
+ u8 encryp_protocol;/* ENCRYP_PROTOCOL_E: OPEN/WEP/WPA/WPA2/WAPI */
+ int group_cipher; /* WPA/WPA2 group cipher */
+ int pairwise_cipher;/* WPA/WPA2/WEP pairwise cipher */
+ int is_8021x;
+
+ /* bwmode 20/40 and ch_offset UP/LOW */
+ unsigned short ht_cap_info;
+ unsigned char ht_info_infos_0;
+};
+
+/* temporally add #pragma pack for structure alignment issue of
+* struct wlan_bssid_ex and get_struct wlan_bssid_ex_sz()
+*/
+struct wlan_bssid_ex {
+ u32 Length;
+ unsigned char MacAddress[ETH_ALEN];
+ u8 Reserved[2];/* 0]: IS beacon frame */
+ struct ndis_802_11_ssid Ssid;
+ u32 Privacy;
+ NDIS_802_11_RSSI Rssi;/* in dBM,raw data ,get from PHY) */
+ enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse;
+ struct ndis_802_11_config Configuration;
+ enum ndis_802_11_network_infra InfrastructureMode;
+ unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
+ struct wlan_phy_info PhyInfo;
+ u32 IELength;
+ u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and
+ * capability information) */
+} __packed;
+
+static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
+{
+ return sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->IELength;
+}
+
+struct wlan_network {
+ struct list_head list;
+ int network_type; /* refer to ieee80211.h for WIRELESS_11A/B/G */
+ int fixed; /* set fixed when not to be removed
+ * in site-surveying */
+ unsigned long last_scanned; /* timestamp for the network */
+ int aid; /* will only be valid when a BSS is joinned. */
+ int join_res;
+ struct wlan_bssid_ex network; /* must be the last item */
+ struct wlan_bcn_info BcnInfo;
+};
+
+enum VRTL_CARRIER_SENSE {
+ DISABLE_VCS,
+ ENABLE_VCS,
+ AUTO_VCS
+};
+
+enum VCS_TYPE {
+ NONE_VCS,
+ RTS_CTS,
+ CTS_TO_SELF
+};
+
+#define PWR_CAM 0
+#define PWR_MINPS 1
+#define PWR_MAXPS 2
+#define PWR_UAPSD 3
+#define PWR_VOIP 4
+
+enum UAPSD_MAX_SP {
+ NO_LIMIT,
+ TWO_MSDU,
+ FOUR_MSDU,
+ SIX_MSDU
+};
+
+#define NUM_PRE_AUTH_KEY 16
+#define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY
+
+/*
+* WPA2
+*/
+
+struct pmkid_candidate {
+ unsigned char BSSID[ETH_ALEN];
+ u32 Flags;
+};
+
+struct ndis_802_11_pmkid_list {
+ u32 Version; /* Version of the structure */
+ u32 NumCandidates; /* No. of pmkid candidates */
+ struct pmkid_candidate CandidateList[1];
+};
+
+struct ndis_802_11_auth_encrypt {
+ enum ndis_802_11_auth_mode AuthModeSupported;
+ enum ndis_802_11_wep_status EncryptStatusSupported;
+};
+
+struct ndis_802_11_cap {
+ u32 Length;
+ u32 Version;
+ u32 NoOfPMKIDs;
+ u32 NoOfAuthEncryptPairsSupported;
+ struct ndis_802_11_auth_encrypt AuthenticationEncryptionSupported[1];
+};
+
+u8 key_2char2num(u8 hch, u8 lch);
+u8 key_char2num(u8 ch);
+u8 str_2char2num(u8 hch, u8 lch);
+
+#endif /* ifndef WLAN_BSSDEF_H_ */
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
new file mode 100644
index 0000000..2ff622b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -0,0 +1,67 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __XMIT_OSDEP_H_
+#define __XMIT_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+struct pkt_file {
+ struct sk_buff *pkt;
+ size_t pkt_len; /* the remainder length of the open_file */
+ unsigned char *cur_buffer;
+ u8 *buf_start;
+ u8 *cur_addr;
+ size_t buf_len;
+};
+
+extern int rtw_ht_enable;
+extern int rtw_cbw40_enable;
+extern int rtw_ampdu_enable;/* for enable tx_ampdu */
+
+#define NR_XMITFRAME 256
+
+struct xmit_priv;
+struct pkt_attrib;
+struct sta_xmit_priv;
+struct xmit_frame;
+struct xmit_buf;
+
+int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
+
+void rtw_os_xmit_schedule(struct adapter *padapter);
+
+int rtw_os_xmit_resource_alloc(struct adapter *padapter,
+ struct xmit_buf *pxmitbuf, u32 alloc_sz);
+void rtw_os_xmit_resource_free(struct adapter *padapter,
+ struct xmit_buf *pxmitbuf, u32 free_sz);
+
+void rtw_set_tx_chksum_offload(struct sk_buff *pkt, struct pkt_attrib *pattrib);
+
+uint rtw_remainder_len(struct pkt_file *pfile);
+void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile);
+uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
+int rtw_endofpktfile(struct pkt_file *pfile);
+
+void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt);
+void rtw_os_xmit_complete(struct adapter *padapter,
+ struct xmit_frame *pxframe);
+
+#endif /* __XMIT_OSDEP_H_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
new file mode 100644
index 0000000..95953eb
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -0,0 +1,8222 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _IOCTL_LINUX_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wlan_bssdef.h>
+#include <rtw_debug.h>
+#include <wifi.h>
+#include <rtw_mlme.h>
+#include <rtw_mlme_ext.h>
+#include <rtw_ioctl.h>
+#include <rtw_ioctl_set.h>
+#include <rtw_mp_ioctl.h>
+#include <usb_ops.h>
+#include <rtw_version.h>
+#include <rtl8188e_hal.h>
+
+#include <rtw_mp.h>
+#include <rtw_iol.h>
+
+#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
+
+#define SCAN_ITEM_SIZE 768
+#define MAX_CUSTOM_LEN 64
+#define RATE_COUNT 4
+
+/* combo scan */
+#define WEXT_CSCAN_AMOUNT 9
+#define WEXT_CSCAN_BUF_LEN 360
+#define WEXT_CSCAN_HEADER "CSCAN S\x01\x00\x00S\x00"
+#define WEXT_CSCAN_HEADER_SIZE 12
+#define WEXT_CSCAN_SSID_SECTION 'S'
+#define WEXT_CSCAN_CHANNEL_SECTION 'C'
+#define WEXT_CSCAN_NPROBE_SECTION 'N'
+#define WEXT_CSCAN_ACTV_DWELL_SECTION 'A'
+#define WEXT_CSCAN_PASV_DWELL_SECTION 'P'
+#define WEXT_CSCAN_HOME_DWELL_SECTION 'H'
+#define WEXT_CSCAN_TYPE_SECTION 'T'
+
+static struct mp_ioctl_handler mp_ioctl_hdl[] = {
+/*0*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_start_test_hdl, OID_RT_PRO_START_TEST)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_stop_test_hdl, OID_RT_PRO_STOP_TEST)
+
+ GEN_HANDLER(sizeof(struct rwreg_param), rtl8188eu_oid_rt_pro_read_register_hdl, OID_RT_PRO_READ_REGISTER)
+ GEN_HANDLER(sizeof(struct rwreg_param), rtl8188eu_oid_rt_pro_write_register_hdl, OID_RT_PRO_WRITE_REGISTER)
+ GEN_HANDLER(sizeof(struct bb_reg_param), rtl8188eu_oid_rt_pro_read_bb_reg_hdl, OID_RT_PRO_READ_BB_REG)
+/*5*/ GEN_HANDLER(sizeof(struct bb_reg_param), rtl8188eu_oid_rt_pro_write_bb_reg_hdl, OID_RT_PRO_WRITE_BB_REG)
+ GEN_HANDLER(sizeof(struct rf_reg_param), rtl8188eu_oid_rt_pro_read_rf_reg_hdl, OID_RT_PRO_RF_READ_REGISTRY)
+ GEN_HANDLER(sizeof(struct rf_reg_param), rtl8188eu_oid_rt_pro_write_rf_reg_hdl, OID_RT_PRO_RF_WRITE_REGISTRY)
+
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl, OID_RT_PRO_SET_CHANNEL_DIRECT_CALL)
+ GEN_HANDLER(sizeof(struct txpower_param), rtl8188eu_oid_rt_pro_set_tx_power_control_hdl, OID_RT_PRO_SET_TX_POWER_CONTROL)
+/*10*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_data_rate_hdl, OID_RT_PRO_SET_DATA_RATE)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_set_bandwidth_hdl, OID_RT_SET_BANDWIDTH)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_antenna_bb_hdl, OID_RT_PRO_SET_ANTENNA_BB)
+
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_continuous_tx_hdl, OID_RT_PRO_SET_CONTINUOUS_TX)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl, OID_RT_PRO_SET_SINGLE_CARRIER_TX)
+/*15*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl, OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl, OID_RT_PRO_SET_SINGLE_TONE_TX)
+
+ EXT_MP_IOCTL_HANDLER(0, xmit_packet, 0)
+
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_set_rx_packet_type_hdl, OID_RT_SET_RX_PACKET_TYPE)
+ GEN_HANDLER(0, rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl, OID_RT_RESET_PHY_RX_PACKET_COUNT)
+/*20*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl, OID_RT_GET_PHY_RX_PACKET_RECEIVED)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl, OID_RT_GET_PHY_RX_PACKET_CRC32_ERROR)
+
+ GEN_HANDLER(sizeof(struct eeprom_rw_param), NULL, 0)
+ GEN_HANDLER(sizeof(struct eeprom_rw_param), NULL, 0)
+ GEN_HANDLER(sizeof(struct efuse_access_struct), rtl8188eu_oid_rt_pro_efuse_hdl, OID_RT_PRO_EFUSE)
+/*25*/ GEN_HANDLER(0, rtl8188eu_oid_rt_pro_efuse_map_hdl, OID_RT_PRO_EFUSE_MAP)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_efuse_max_size_hdl, OID_RT_GET_EFUSE_MAX_SIZE)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_efuse_current_size_hdl, OID_RT_GET_EFUSE_CURRENT_SIZE)
+
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_thermal_meter_hdl, OID_RT_PRO_GET_THERMAL_METER)
+ GEN_HANDLER(sizeof(u8), rtl8188eu_oid_rt_pro_set_power_tracking_hdl, OID_RT_PRO_SET_POWER_TRACKING)
+/*30*/ GEN_HANDLER(sizeof(u8), rtl8188eu_oid_rt_set_power_down_hdl, OID_RT_SET_POWER_DOWN)
+/*31*/ GEN_HANDLER(0, rtl8188eu_oid_rt_pro_trigger_gpio_hdl, 0)
+};
+
+static u32 rtw_rates[] = {1000000, 2000000, 5500000, 11000000,
+ 6000000, 9000000, 12000000, 18000000, 24000000, 36000000,
+ 48000000, 54000000};
+
+static const char * const iw_operation_mode[] = {
+ "Auto", "Ad-Hoc", "Managed", "Master", "Repeater",
+ "Secondary", "Monitor"
+};
+
+static int hex2num_i(char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+/**
+ * hwaddr_aton - Convert ASCII string to MAC address
+ * @txt: MAC address as a string (e.g., "00:11:22:33:44:55")
+ * @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes)
+ * Returns: 0 on success, -1 on failure (e.g., string not a MAC address)
+ */
+static int hwaddr_aton_i(const char *txt, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ int a, b;
+
+ a = hex2num_i(*txt++);
+ if (a < 0)
+ return -1;
+ b = hex2num_i(*txt++);
+ if (b < 0)
+ return -1;
+ *addr++ = (a << 4) | b;
+ if (i < 5 && *txt++ != ':')
+ return -1;
+ }
+
+ return 0;
+}
+
+void indicate_wx_scan_complete_event(struct adapter *padapter)
+{
+ union iwreq_data wrqu;
+
+ _rtw_memset(&wrqu, 0, sizeof(union iwreq_data));
+ wireless_send_event(padapter->pnetdev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+void rtw_indicate_wx_assoc_event(struct adapter *padapter)
+{
+ union iwreq_data wrqu;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ _rtw_memset(&wrqu, 0, sizeof(union iwreq_data));
+
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
+
+ DBG_88E_LEVEL(_drv_always_, "assoc success\n");
+ wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
+}
+
+void rtw_indicate_wx_disassoc_event(struct adapter *padapter)
+{
+ union iwreq_data wrqu;
+
+ _rtw_memset(&wrqu, 0, sizeof(union iwreq_data));
+
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ _rtw_memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+
+ DBG_88E_LEVEL(_drv_always_, "indicate disassoc\n");
+ wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
+}
+
+static char *translate_scan(struct adapter *padapter,
+ struct iw_request_info *info,
+ struct wlan_network *pnetwork,
+ char *start, char *stop)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct iw_event iwe;
+ u16 cap;
+ __le16 le_tmp;
+ u32 ht_ielen = 0;
+ char custom[MAX_CUSTOM_LEN];
+ char *p;
+ u16 max_rate = 0, rate, ht_cap = false;
+ u32 i = 0;
+ u8 bw_40MHz = 0, short_GI = 0;
+ u16 mcs_rate = 0;
+ u8 ss, sq;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ u32 blnGotP2PIE = false;
+
+ /* User is doing the P2P device discovery */
+ /* The prefix of SSID should be "DIRECT-" and the IE should contains the P2P IE. */
+ /* If not, the driver should ignore this AP and go to the next AP. */
+
+ /* Verifying the SSID */
+ if (!memcmp(pnetwork->network.Ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN)) {
+ u32 p2pielen = 0;
+
+ if (pnetwork->network.Reserved[0] == 2) {/* Probe Request */
+ /* Verifying the P2P IE */
+ if (rtw_get_p2p_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &p2pielen))
+ blnGotP2PIE = true;
+ } else {/* Beacon or Probe Respones */
+ /* Verifying the P2P IE */
+ if (rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen))
+ blnGotP2PIE = true;
+ }
+ }
+
+ if (!blnGotP2PIE)
+ return start;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ /* AP MAC address */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(iwe.u.ap_addr.sa_data, pnetwork->network.MacAddress, ETH_ALEN);
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
+
+ /* Add the ESSID */
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ iwe.u.data.length = min_t(u16, pnetwork->network.Ssid.SsidLength, 32);
+ start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
+
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength-12);
+
+ if (p && ht_ielen > 0) {
+ struct rtw_ieee80211_ht_cap *pht_capie;
+ ht_cap = true;
+ pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
+ memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
+ bw_40MHz = (pht_capie->cap_info&IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0;
+ short_GI = (pht_capie->cap_info&(IEEE80211_HT_CAP_SGI_20|IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
+ }
+
+ /* Add the protocol name */
+ iwe.cmd = SIOCGIWNAME;
+ if ((rtw_is_cckratesonly_included((u8 *)&pnetwork->network.SupportedRates))) {
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
+ } else if ((rtw_is_cckrates_included((u8 *)&pnetwork->network.SupportedRates))) {
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg");
+ } else {
+ if (pnetwork->network.Configuration.DSConfig > 14) {
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11an");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11a");
+ } else {
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
+ }
+ }
+
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
+
+ /* Add mode */
+ iwe.cmd = SIOCGIWMODE;
+ memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
+
+ cap = le16_to_cpu(le_tmp);
+
+ if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) {
+ if (cap & WLAN_CAPABILITY_BSS)
+ iwe.u.mode = IW_MODE_MASTER;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
+ }
+
+ if (pnetwork->network.Configuration.DSConfig < 1)
+ pnetwork->network.Configuration.DSConfig = 1;
+
+ /* Add frequency/channel */
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = rtw_ch2freq(pnetwork->network.Configuration.DSConfig) * 100000;
+ iwe.u.freq.e = 1;
+ iwe.u.freq.i = pnetwork->network.Configuration.DSConfig;
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
+
+ /* Add encryption capability */
+ iwe.cmd = SIOCGIWENCODE;
+ if (cap & WLAN_CAPABILITY_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
+
+ /*Add basic and extended rates */
+ max_rate = 0;
+ p = custom;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ while (pnetwork->network.SupportedRates[i] != 0) {
+ rate = pnetwork->network.SupportedRates[i]&0x7F;
+ if (rate > max_rate)
+ max_rate = rate;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
+ i++;
+ }
+
+ if (ht_cap) {
+ if (mcs_rate&0x8000)/* MCS15 */
+ max_rate = (bw_40MHz) ? ((short_GI) ? 300 : 270) : ((short_GI) ? 144 : 130);
+ else if (mcs_rate&0x0080)/* MCS7 */
+ ;
+ else/* default MCS7 */
+ max_rate = (bw_40MHz) ? ((short_GI) ? 150 : 135) : ((short_GI) ? 72 : 65);
+
+ max_rate = max_rate*2;/* Mbps/2; */
+ }
+
+ iwe.cmd = SIOCGIWRATE;
+ iwe.u.bitrate.fixed = 0;
+ iwe.u.bitrate.disabled = 0;
+ iwe.u.bitrate.value = max_rate * 500000;
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN);
+
+ /* parsing WPA/WPA2 IE */
+ {
+ u8 buf[MAX_WPA_IE_LEN];
+ u8 wpa_ie[255], rsn_ie[255];
+ u16 wpa_len = 0, rsn_len = 0;
+ u8 *p;
+
+ rtw_get_sec_ie(pnetwork->network.IEs, pnetwork->network.IELength, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+
+ if (wpa_len > 0) {
+ p = buf;
+ _rtw_memset(buf, 0, MAX_WPA_IE_LEN);
+ p += sprintf(p, "wpa_ie =");
+ for (i = 0; i < wpa_len; i++)
+ p += sprintf(p, "%02x", wpa_ie[i]);
+
+ _rtw_memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = strlen(buf);
+ start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+
+ _rtw_memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = wpa_len;
+ start = iwe_stream_add_point(info, start, stop, &iwe, wpa_ie);
+ }
+ if (rsn_len > 0) {
+ p = buf;
+ _rtw_memset(buf, 0, MAX_WPA_IE_LEN);
+ p += sprintf(p, "rsn_ie =");
+ for (i = 0; i < rsn_len; i++)
+ p += sprintf(p, "%02x", rsn_ie[i]);
+ _rtw_memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = strlen(buf);
+ start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+
+ _rtw_memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = rsn_len;
+ start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
+ }
+ }
+
+ {/* parsing WPS IE */
+ uint cnt = 0, total_ielen;
+ u8 *wpsie_ptr = NULL;
+ uint wps_ielen = 0;
+
+ u8 *ie_ptr = pnetwork->network.IEs + _FIXED_IE_LENGTH_;
+ total_ielen = pnetwork->network.IELength - _FIXED_IE_LENGTH_;
+
+ while (cnt < total_ielen) {
+ if (rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen > 2)) {
+ wpsie_ptr = &ie_ptr[cnt];
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = (u16)wps_ielen;
+ start = iwe_stream_add_point(info, start, stop, &iwe, wpsie_ptr);
+ }
+ cnt += ie_ptr[cnt+1]+2; /* goto next */
+ }
+ }
+
+ /* Add quality statistics */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
+ is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network)) {
+ ss = padapter->recvpriv.signal_strength;
+ sq = padapter->recvpriv.signal_qual;
+ } else {
+ ss = pnetwork->network.PhyInfo.SignalStrength;
+ sq = pnetwork->network.PhyInfo.SignalQuality;
+ }
+
+ iwe.u.qual.level = (u8)ss;
+ iwe.u.qual.qual = (u8)sq; /* signal quality */
+ iwe.u.qual.noise = 0; /* noise level */
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
+ return start;
+}
+
+static int wpa_set_auth_algs(struct net_device *dev, u32 value)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int ret = 0;
+
+ if ((value & AUTH_ALG_SHARED_KEY) && (value & AUTH_ALG_OPEN_SYSTEM)) {
+ DBG_88E("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY and AUTH_ALG_OPEN_SYSTEM [value:0x%x]\n", value);
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
+ } else if (value & AUTH_ALG_SHARED_KEY) {
+ DBG_88E("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY [value:0x%x]\n", value);
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
+ } else if (value & AUTH_ALG_OPEN_SYSTEM) {
+ DBG_88E("wpa_set_auth_algs, AUTH_ALG_OPEN_SYSTEM\n");
+ if (padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK) {
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ }
+ } else if (value & AUTH_ALG_LEAP) {
+ DBG_88E("wpa_set_auth_algs, AUTH_ALG_LEAP\n");
+ } else {
+ DBG_88E("wpa_set_auth_algs, error!\n");
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len, wep_total_len;
+ struct ndis_802_11_wep *pwep = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_88EU_P2P */
+
+_func_enter_;
+
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+ if (param_len < (u32) ((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+ if (param->u.crypt.idx >= WEP_KEYS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("wpa_set_encryption, crypt.alg = WEP\n"));
+ DBG_88E("wpa_set_encryption, crypt.alg = WEP\n");
+
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("(1)wep_key_idx =%d\n", wep_key_idx));
+ DBG_88E("(1)wep_key_idx =%d\n", wep_key_idx);
+
+ if (wep_key_idx > WEP_KEYS)
+ return -EINVAL;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("(2)wep_key_idx =%d\n", wep_key_idx));
+
+ if (wep_key_len > 0) {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
+ if (pwep == NULL) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, (" wpa_set_encryption: pwep allocate fail !!!\n"));
+ goto exit;
+ }
+ _rtw_memset(pwep, 0, wep_total_len);
+ pwep->KeyLength = wep_key_len;
+ pwep->Length = wep_total_len;
+ if (wep_key_len == 13) {
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ }
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+ pwep->KeyIndex = wep_key_idx;
+ pwep->KeyIndex |= 0x80000000;
+ memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
+ if (param->u.crypt.set_tx) {
+ DBG_88E("wep, set_tx = 1\n");
+ if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
+ ret = -EOPNOTSUPP;
+ } else {
+ DBG_88E("wep, set_tx = 0\n");
+ if (wep_key_idx >= WEP_KEYS) {
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
+ rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0);
+ }
+ goto exit;
+ }
+
+ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { /* 802_1x */
+ struct sta_info *psta, *pbcmc_sta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE)) { /* sta mode */
+ psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
+ if (psta == NULL) {
+ ;
+ } else {
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ psta->ieee8021x_blocked = false;
+
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+
+ if (param->u.crypt.set_tx == 1) { /* pairwise key */
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+
+ if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
+ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+ padapter->securitypriv.busetkipkey = false;
+ }
+
+ DBG_88E(" ~~~~set sta key:unicastkey\n");
+
+ rtw_setstakey_cmd(padapter, (unsigned char *)psta, true);
+ } else { /* group key */
+ memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ padapter->securitypriv.binstallGrpkey = true;
+ DBG_88E(" ~~~~set sta key:groupkey\n");
+
+ padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
+
+ rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1);
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_DONE);
+#endif /* CONFIG_88EU_P2P */
+ }
+ }
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta == NULL) {
+ ;
+ } else {
+ /* Jeff: don't disable ieee8021x_blocked while clearing key */
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ pbcmc_sta->ieee8021x_blocked = false;
+
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+ }
+ }
+ }
+
+exit:
+
+ kfree(pwep);
+
+_func_exit_;
+
+ return ret;
+}
+
+static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ielen)
+{
+ u8 *buf = NULL;
+ int group_cipher = 0, pairwise_cipher = 0;
+ int ret = 0;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_88EU_P2P */
+
+ if ((ielen > MAX_WPA_IE_LEN) || (pie == NULL)) {
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ if (pie == NULL)
+ return ret;
+ else
+ return -EINVAL;
+ }
+
+ if (ielen) {
+ buf = rtw_zmalloc(ielen);
+ if (buf == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(buf, pie, ielen);
+
+ /* dump */
+ {
+ int i;
+ DBG_88E("\n wpa_ie(length:%d):\n", ielen);
+ for (i = 0; i < ielen; i += 8)
+ DBG_88E("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
+ }
+
+ if (ielen < RSN_HEADER_LEN) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("Ie len too short %d\n", ielen));
+ ret = -1;
+ goto exit;
+ }
+
+ if (rtw_parse_wpa_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
+ }
+
+ if (rtw_parse_wpa2_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
+ }
+
+ switch (group_cipher) {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ switch (pairwise_cipher) {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ {/* set wps_ie */
+ u16 cnt = 0;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ while (cnt < ielen) {
+ eid = buf[cnt];
+ if ((eid == _VENDOR_SPECIFIC_IE_) && (!memcmp(&buf[cnt+2], wps_oui, 4))) {
+ DBG_88E("SET WPS_IE\n");
+
+ padapter->securitypriv.wps_ie_len = ((buf[cnt+1]+2) < (MAX_WPA_IE_LEN<<2)) ? (buf[cnt+1]+2) : (MAX_WPA_IE_LEN<<2);
+
+ memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len);
+
+ set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_OK))
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_ING);
+#endif /* CONFIG_88EU_P2P */
+ cnt += buf[cnt+1]+2;
+ break;
+ } else {
+ cnt += buf[cnt+1]+2; /* goto next */
+ }
+ }
+ }
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_set_wpa_ie: pairwise_cipher = 0x%08x padapter->securitypriv.ndisencryptstatus =%d padapter->securitypriv.ndisauthtype =%d\n",
+ pairwise_cipher, padapter->securitypriv.ndisencryptstatus, padapter->securitypriv.ndisauthtype));
+exit:
+ kfree(buf);
+ return ret;
+}
+
+typedef unsigned char NDIS_802_11_RATES_EX[NDIS_802_11_LENGTH_RATES_EX];
+
+static int rtw_wx_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u32 ht_ielen = 0;
+ char *p;
+ u8 ht_cap = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ NDIS_802_11_RATES_EX *prates = NULL;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("cmd_code =%x\n", info->cmd));
+
+ _func_enter_;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
+ if (p && ht_ielen > 0)
+ ht_cap = true;
+
+ prates = &pcur_bss->SupportedRates;
+
+ if (rtw_is_cckratesonly_included((u8 *)prates) == true) {
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
+ } else if ((rtw_is_cckrates_included((u8 *)prates)) == true) {
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bgn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
+ } else {
+ if (pcur_bss->Configuration.DSConfig > 14) {
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11an");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11a");
+ } else {
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
+ }
+ }
+ } else {
+ snprintf(wrqu->name, IFNAMSIZ, "unassociated");
+ }
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_set_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ _func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_wx_set_freq\n"));
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_get_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ /* wrqu->freq.m = ieee80211_wlan_frequencies[pcur_bss->Configuration.DSConfig-1] * 100000; */
+ wrqu->freq.m = rtw_ch2freq(pcur_bss->Configuration.DSConfig) * 100000;
+ wrqu->freq.e = 1;
+ wrqu->freq.i = pcur_bss->Configuration.DSConfig;
+ } else {
+ wrqu->freq.m = rtw_ch2freq(padapter->mlmeextpriv.cur_channel) * 100000;
+ wrqu->freq.e = 1;
+ wrqu->freq.i = padapter->mlmeextpriv.cur_channel;
+ }
+
+ return 0;
+}
+
+static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ enum ndis_802_11_network_infra networkType;
+ int ret = 0;
+
+ _func_enter_;
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (!padapter->hw_init_completed) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ switch (wrqu->mode) {
+ case IW_MODE_AUTO:
+ networkType = Ndis802_11AutoUnknown;
+ DBG_88E("set_mode = IW_MODE_AUTO\n");
+ break;
+ case IW_MODE_ADHOC:
+ networkType = Ndis802_11IBSS;
+ DBG_88E("set_mode = IW_MODE_ADHOC\n");
+ break;
+ case IW_MODE_MASTER:
+ networkType = Ndis802_11APMode;
+ DBG_88E("set_mode = IW_MODE_MASTER\n");
+ break;
+ case IW_MODE_INFRA:
+ networkType = Ndis802_11Infrastructure;
+ DBG_88E("set_mode = IW_MODE_INFRA\n");
+ break;
+ default:
+ ret = -EINVAL;
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("\n Mode: %s is not supported\n", iw_operation_mode[wrqu->mode]));
+ goto exit;
+ }
+ if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false) {
+ ret = -EPERM;
+ goto exit;
+ }
+ rtw_setopmode_cmd(padapter, networkType);
+exit:
+ _func_exit_;
+ return ret;
+}
+
+static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_get_mode\n"));
+
+ _func_enter_;
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ wrqu->mode = IW_MODE_INFRA;
+ else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
+ wrqu->mode = IW_MODE_ADHOC;
+ else if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ wrqu->mode = IW_MODE_MASTER;
+ else
+ wrqu->mode = IW_MODE_AUTO;
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_set_pmkid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 j, blInserted = false;
+ int ret = false;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct iw_pmksa *pPMK = (struct iw_pmksa *)extra;
+ u8 strZeroMacAddress[ETH_ALEN] = {0x00};
+ u8 strIssueBssid[ETH_ALEN] = {0x00};
+
+ memcpy(strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN);
+ if (pPMK->cmd == IW_PMKSA_ADD) {
+ DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n");
+ if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN) == true)
+ return ret;
+ else
+ ret = true;
+ blInserted = false;
+
+ /* overwrite PMKID */
+ for (j = 0; j < NUM_PMKID_CACHE; j++) {
+ if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
+ /* BSSID is matched, the same AP => rewrite with new PMKID. */
+ DBG_88E("[rtw_wx_set_pmkid] BSSID exists in the PMKList.\n");
+ memcpy(psecuritypriv->PMKIDList[j].PMKID, pPMK->pmkid, IW_PMKID_LEN);
+ psecuritypriv->PMKIDList[j].bUsed = true;
+ psecuritypriv->PMKIDIndex = j+1;
+ blInserted = true;
+ break;
+ }
+ }
+
+ if (!blInserted) {
+ /* Find a new entry */
+ DBG_88E("[rtw_wx_set_pmkid] Use the new entry index = %d for this PMKID.\n",
+ psecuritypriv->PMKIDIndex);
+
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN);
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
+
+ psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = true;
+ psecuritypriv->PMKIDIndex++;
+ if (psecuritypriv->PMKIDIndex == 16)
+ psecuritypriv->PMKIDIndex = 0;
+ }
+ } else if (pPMK->cmd == IW_PMKSA_REMOVE) {
+ DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_REMOVE!\n");
+ ret = true;
+ for (j = 0; j < NUM_PMKID_CACHE; j++) {
+ if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
+ /* BSSID is matched, the same AP => Remove this PMKID information and reset it. */
+ _rtw_memset(psecuritypriv->PMKIDList[j].Bssid, 0x00, ETH_ALEN);
+ psecuritypriv->PMKIDList[j].bUsed = false;
+ break;
+ }
+ }
+ } else if (pPMK->cmd == IW_PMKSA_FLUSH) {
+ DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_FLUSH!\n");
+ _rtw_memset(&psecuritypriv->PMKIDList[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ psecuritypriv->PMKIDIndex = 0;
+ ret = true;
+ }
+ return ret;
+}
+
+static int rtw_wx_get_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ wrqu->sens.value = 0;
+ wrqu->sens.fixed = 0; /* no auto select */
+ wrqu->sens.disabled = 1;
+ return 0;
+}
+
+static int rtw_wx_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct iw_range *range = (struct iw_range *)extra;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ u16 val;
+ int i;
+
+ _func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_range. cmd_code =%x\n", info->cmd));
+
+ wrqu->data.length = sizeof(*range);
+ _rtw_memset(range, 0, sizeof(*range));
+
+ /* Let's try to keep this struct in the same order as in
+ * linux/include/wireless.h
+ */
+
+ /* TODO: See what values we can set, and remove the ones we can't
+ * set, or fill them with some default data.
+ */
+
+ /* ~5 Mb/s real (802.11b) */
+ range->throughput = 5 * 1000 * 1000;
+
+ /* signal level threshold range */
+
+ /* percent values between 0 and 100. */
+ range->max_qual.qual = 100;
+ range->max_qual.level = 100;
+ range->max_qual.noise = 100;
+ range->max_qual.updated = 7; /* Updated all three */
+
+ range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
+ /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
+ range->avg_qual.level = 20 + -98;
+ range->avg_qual.noise = 0;
+ range->avg_qual.updated = 7; /* Updated all three */
+
+ range->num_bitrates = RATE_COUNT;
+
+ for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++)
+ range->bitrate[i] = rtw_rates[i];
+
+ range->min_frag = MIN_FRAG_THRESHOLD;
+ range->max_frag = MAX_FRAG_THRESHOLD;
+
+ range->pm_capa = 0;
+
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 16;
+
+ for (i = 0, val = 0; i < MAX_CHANNEL_NUM; i++) {
+ /* Include only legal frequencies for some countries */
+ if (pmlmeext->channel_set[i].ChannelNum != 0) {
+ range->freq[val].i = pmlmeext->channel_set[i].ChannelNum;
+ range->freq[val].m = rtw_ch2freq(pmlmeext->channel_set[i].ChannelNum) * 100000;
+ range->freq[val].e = 1;
+ val++;
+ }
+
+ if (val == IW_MAX_FREQUENCIES)
+ break;
+ }
+
+ range->num_channels = val;
+ range->num_frequency = val;
+
+/* The following code will proivde the security capability to network manager. */
+/* If the driver doesn't provide this capability to network manager, */
+/* the WPA/WPA2 routers can't be choosen in the network manager. */
+
+/*
+#define IW_SCAN_CAPA_NONE 0x00
+#define IW_SCAN_CAPA_ESSID 0x01
+#define IW_SCAN_CAPA_BSSID 0x02
+#define IW_SCAN_CAPA_CHANNEL 0x04
+#define IW_SCAN_CAPA_MODE 0x08
+#define IW_SCAN_CAPA_RATE 0x10
+#define IW_SCAN_CAPA_TYPE 0x20
+#define IW_SCAN_CAPA_TIME 0x40
+*/
+
+ range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+ IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+
+ range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE |
+ IW_SCAN_CAPA_BSSID | IW_SCAN_CAPA_CHANNEL |
+ IW_SCAN_CAPA_MODE | IW_SCAN_CAPA_RATE;
+ _func_exit_;
+
+ return 0;
+}
+
+/* set bssid flow */
+/* s1. rtw_set_802_11_infrastructure_mode() */
+/* s2. rtw_set_802_11_authentication_mode() */
+/* s3. set_802_11_encryption_mode() */
+/* s4. rtw_set_802_11_bssid() */
+static int rtw_wx_set_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra)
+{
+ unsigned long irqL;
+ uint ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct sockaddr *temp = (struct sockaddr *)awrq;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct list_head *phead;
+ u8 *dst_bssid, *src_bssid;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ enum ndis_802_11_auth_mode authmode;
+
+ _func_enter_;
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->bup) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (temp->sa_family != ARPHRD_ETHER) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ authmode = padapter->securitypriv.ndisauthtype;
+ _enter_critical_bh(&queue->lock, &irqL);
+ phead = get_list_head(queue);
+ pmlmepriv->pscanned = get_next(phead);
+
+ while (1) {
+ if ((rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+
+ pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+
+ dst_bssid = pnetwork->network.MacAddress;
+
+ src_bssid = temp->sa_data;
+
+ if ((!memcmp(dst_bssid, src_bssid, ETH_ALEN))) {
+ if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
+ ret = -1;
+ _exit_critical_bh(&queue->lock, &irqL);
+ goto exit;
+ }
+
+ break;
+ }
+ }
+ _exit_critical_bh(&queue->lock, &irqL);
+
+ rtw_set_802_11_authentication_mode(padapter, authmode);
+ /* set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); */
+ if (rtw_set_802_11_bssid(padapter, temp->sa_data) == false) {
+ ret = -1;
+ goto exit;
+ }
+
+exit:
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+
+ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+
+ _rtw_memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_wap\n"));
+
+ _func_enter_;
+
+ if (((check_fwstate(pmlmepriv, _FW_LINKED)) == true) ||
+ ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) ||
+ ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) == true))
+ memcpy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress, ETH_ALEN);
+ else
+ _rtw_memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_set_mlme(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u16 reason;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
+
+ if (mlme == NULL)
+ return -1;
+
+ DBG_88E("%s\n", __func__);
+
+ reason = mlme->reason_code;
+
+ DBG_88E("%s, cmd =%d, reason =%d\n", __func__, mlme->cmd, reason);
+
+ switch (mlme->cmd) {
+ case IW_MLME_DEAUTH:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ case IW_MLME_DISASSOC:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ u8 _status = false;
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ unsigned long irqL;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_set_scan\n"));
+
+_func_enter_;
+ if (padapter->registrypriv.mp_mode == 1) {
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
+ ret = -1;
+ goto exit;
+ }
+ }
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (padapter->bDriverStopped) {
+ DBG_88E("bDriverStopped =%d\n", padapter->bDriverStopped);
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->bup) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->hw_init_completed) {
+ ret = -1;
+ goto exit;
+ }
+
+ /* When Busy Traffic, driver do not site survey. So driver return success. */
+ /* wpa_supplicant will not issue SIOCSIWSCAN cmd again after scan timeout. */
+ /* modify by thomas 2011-02-22. */
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
+ indicate_wx_scan_complete_event(padapter);
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) {
+ indicate_wx_scan_complete_event(padapter);
+ goto exit;
+ }
+
+/* For the DMP WiFi Display project, the driver won't to scan because */
+/* the pmlmepriv->scan_interval is always equal to 3. */
+/* So, the wpa_supplicant won't find out the WPS SoftAP. */
+
+#ifdef CONFIG_88EU_P2P
+ if (pwdinfo->p2p_state != P2P_STATE_NONE) {
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_FULL);
+ rtw_free_network_queue(padapter, true);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ _rtw_memset(ssid, 0, sizeof(struct ndis_802_11_ssid)*RTW_SSID_SCAN_AMOUNT);
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ int len = min((int)req->essid_len, IW_ESSID_MAX_SIZE);
+
+ memcpy(ssid[0].Ssid, req->essid, len);
+ ssid[0].SsidLength = len;
+
+ DBG_88E("IW_SCAN_THIS_ESSID, ssid =%s, len =%d\n", req->essid, req->essid_len);
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ _status = rtw_sitesurvey_cmd(padapter, ssid, 1, NULL, 0);
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
+ DBG_88E("rtw_wx_set_scan, req->scan_type == IW_SCAN_TYPE_PASSIVE\n");
+ }
+ } else {
+ if (wrqu->data.length >= WEXT_CSCAN_HEADER_SIZE &&
+ !memcmp(extra, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)) {
+ int len = wrqu->data.length - WEXT_CSCAN_HEADER_SIZE;
+ char *pos = extra+WEXT_CSCAN_HEADER_SIZE;
+ char section;
+ char sec_len;
+ int ssid_index = 0;
+
+ while (len >= 1) {
+ section = *(pos++);
+ len -= 1;
+
+ switch (section) {
+ case WEXT_CSCAN_SSID_SECTION:
+ if (len < 1) {
+ len = 0;
+ break;
+ }
+ sec_len = *(pos++); len -= 1;
+ if (sec_len > 0 && sec_len <= len) {
+ ssid[ssid_index].SsidLength = sec_len;
+ memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+ ssid_index++;
+ }
+ pos += sec_len;
+ len -= sec_len;
+ break;
+ case WEXT_CSCAN_TYPE_SECTION:
+ case WEXT_CSCAN_CHANNEL_SECTION:
+ pos += 1;
+ len -= 1;
+ break;
+ case WEXT_CSCAN_PASV_DWELL_SECTION:
+ case WEXT_CSCAN_HOME_DWELL_SECTION:
+ case WEXT_CSCAN_ACTV_DWELL_SECTION:
+ pos += 2;
+ len -= 2;
+ break;
+ default:
+ len = 0; /* stop parsing */
+ }
+ }
+
+ /* it has still some scan paramater to parse, we only do this now... */
+ _status = rtw_set_802_11_bssid_list_scan(padapter, ssid, RTW_SSID_SCAN_AMOUNT);
+ } else {
+ _status = rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
+ }
+ }
+
+ if (!_status)
+ ret = -1;
+
+exit:
+
+_func_exit_;
+ return ret;
+}
+
+static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ char *ev = extra;
+ char *stop = ev + wrqu->data.length;
+ u32 ret = 0;
+ u32 cnt = 0;
+ u32 wait_for_surveydone;
+ int wait_status;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_88EU_P2P */
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan\n"));
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, (" Start of Query SIOCGIWSCAN .\n"));
+
+ _func_enter_;
+
+ if (padapter->pwrctrlpriv.brfoffbyhw && padapter->bDriverStopped) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+#ifdef CONFIG_88EU_P2P
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ /* P2P is enabled */
+ wait_for_surveydone = 200;
+ } else {
+ /* P2P is disabled */
+ wait_for_surveydone = 100;
+ }
+#else
+ {
+ wait_for_surveydone = 100;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ wait_status = _FW_UNDER_SURVEY | _FW_UNDER_LINKING;
+
+ while (check_fwstate(pmlmepriv, wait_status)) {
+ rtw_msleep_os(30);
+ cnt++;
+ if (cnt > wait_for_surveydone)
+ break;
+ }
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist))
+ break;
+
+ if ((stop - ev) < SCAN_ITEM_SIZE) {
+ ret = -E2BIG;
+ break;
+ }
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ /* report network only if the current channel set contains the channel to which this network belongs */
+ if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0)
+ ev = translate_scan(padapter, a, pnetwork, ev, stop);
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ wrqu->data.length = ev-extra;
+ wrqu->data.flags = 0;
+
+exit:
+ _func_exit_;
+ return ret;
+}
+
+/* set ssid flow */
+/* s1. rtw_set_802_11_infrastructure_mode() */
+/* s2. set_802_11_authenticaion_mode() */
+/* s3. set_802_11_encryption_mode() */
+/* s4. rtw_set_802_11_ssid() */
+static int rtw_wx_set_essid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ unsigned long irqL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct __queue *queue = &pmlmepriv->scanned_queue;
+ struct list_head *phead;
+ struct wlan_network *pnetwork = NULL;
+ enum ndis_802_11_auth_mode authmode;
+ struct ndis_802_11_ssid ndis_ssid;
+ u8 *dst_ssid, *src_ssid;
+
+ uint ret = 0, len;
+
+ _func_enter_;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("+rtw_wx_set_essid: fw_state = 0x%08x\n", get_fwstate(pmlmepriv)));
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->bup) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (wrqu->essid.length > IW_ESSID_MAX_SIZE) {
+ ret = -E2BIG;
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ ret = -1;
+ goto exit;
+ }
+
+ authmode = padapter->securitypriv.ndisauthtype;
+ DBG_88E("=>%s\n", __func__);
+ if (wrqu->essid.flags && wrqu->essid.length) {
+ len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ? wrqu->essid.length : IW_ESSID_MAX_SIZE;
+
+ if (wrqu->essid.length != 33)
+ DBG_88E("ssid =%s, len =%d\n", extra, wrqu->essid.length);
+
+ _rtw_memset(&ndis_ssid, 0, sizeof(struct ndis_802_11_ssid));
+ ndis_ssid.SsidLength = len;
+ memcpy(ndis_ssid.Ssid, extra, len);
+ src_ssid = ndis_ssid.Ssid;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid =[%s]\n", src_ssid));
+ _enter_critical_bh(&queue->lock, &irqL);
+ phead = get_list_head(queue);
+ pmlmepriv->pscanned = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, pmlmepriv->pscanned) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_warning_,
+ ("rtw_wx_set_essid: scan_q is empty, set ssid to check if scanning again!\n"));
+
+ break;
+ }
+
+ pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+
+ pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+
+ dst_ssid = pnetwork->network.Ssid.Ssid;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_wx_set_essid: dst_ssid =%s\n",
+ pnetwork->network.Ssid.Ssid));
+
+ if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength)) &&
+ (pnetwork->network.Ssid.SsidLength == ndis_ssid.SsidLength)) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_wx_set_essid: find match, set infra mode\n"));
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) {
+ if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
+ continue;
+ }
+
+ if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
+ ret = -1;
+ _exit_critical_bh(&queue->lock, &irqL);
+ goto exit;
+ }
+
+ break;
+ }
+ }
+ _exit_critical_bh(&queue->lock, &irqL);
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("set ssid: set_802_11_auth. mode =%d\n", authmode));
+ rtw_set_802_11_authentication_mode(padapter, authmode);
+ if (rtw_set_802_11_ssid(padapter, &ndis_ssid) == false) {
+ ret = -1;
+ goto exit;
+ }
+ }
+
+exit:
+
+ DBG_88E("<=%s, ret %d\n", __func__, ret);
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_essid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ u32 len, ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_essid\n"));
+
+ _func_enter_;
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
+ len = pcur_bss->Ssid.SsidLength;
+
+ wrqu->essid.length = len;
+
+ memcpy(extra, pcur_bss->Ssid.Ssid, len);
+
+ wrqu->essid.flags = 1;
+ } else {
+ ret = -1;
+ goto exit;
+ }
+
+exit:
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_set_rate(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ int i, ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 datarates[NumRates];
+ u32 target_rate = wrqu->bitrate.value;
+ u32 fixed = wrqu->bitrate.fixed;
+ u32 ratevalue = 0;
+ u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_set_rate\n"));
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed));
+
+ if (target_rate == -1) {
+ ratevalue = 11;
+ goto set_rate;
+ }
+ target_rate = target_rate/100000;
+
+ switch (target_rate) {
+ case 10:
+ ratevalue = 0;
+ break;
+ case 20:
+ ratevalue = 1;
+ break;
+ case 55:
+ ratevalue = 2;
+ break;
+ case 60:
+ ratevalue = 3;
+ break;
+ case 90:
+ ratevalue = 4;
+ break;
+ case 110:
+ ratevalue = 5;
+ break;
+ case 120:
+ ratevalue = 6;
+ break;
+ case 180:
+ ratevalue = 7;
+ break;
+ case 240:
+ ratevalue = 8;
+ break;
+ case 360:
+ ratevalue = 9;
+ break;
+ case 480:
+ ratevalue = 10;
+ break;
+ case 540:
+ ratevalue = 11;
+ break;
+ default:
+ ratevalue = 11;
+ break;
+ }
+
+set_rate:
+
+ for (i = 0; i < NumRates; i++) {
+ if (ratevalue == mpdatarate[i]) {
+ datarates[i] = mpdatarate[i];
+ if (fixed == 0)
+ break;
+ } else {
+ datarates[i] = 0xff;
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("datarate_inx =%d\n", datarates[i]));
+ }
+
+ if (rtw_setdatarate_cmd(padapter, datarates) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("rtw_wx_set_rate Fail!!!\n"));
+ ret = -1;
+ }
+
+_func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u16 max_rate = 0;
+
+ max_rate = rtw_get_cur_max_rate((struct adapter *)rtw_netdev_priv(dev));
+
+ if (max_rate == 0)
+ return -EPERM;
+
+ wrqu->bitrate.fixed = 0; /* no auto select */
+ wrqu->bitrate.value = max_rate * 100000;
+
+ return 0;
+}
+
+static int rtw_wx_set_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ _func_enter_;
+
+ if (wrqu->rts.disabled) {
+ padapter->registrypriv.rts_thresh = 2347;
+ } else {
+ if (wrqu->rts.value < 0 ||
+ wrqu->rts.value > 2347)
+ return -EINVAL;
+
+ padapter->registrypriv.rts_thresh = wrqu->rts.value;
+ }
+
+ DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_get_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ _func_enter_;
+
+ DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
+
+ wrqu->rts.value = padapter->registrypriv.rts_thresh;
+ wrqu->rts.fixed = 0; /* no auto select */
+ /* wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); */
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_set_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ _func_enter_;
+
+ if (wrqu->frag.disabled) {
+ padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
+ } else {
+ if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
+ wrqu->frag.value > MAX_FRAG_THRESHOLD)
+ return -EINVAL;
+
+ padapter->xmitpriv.frag_len = wrqu->frag.value & ~0x1;
+ }
+
+ DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_get_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ _func_enter_;
+
+ DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
+
+ wrqu->frag.value = padapter->xmitpriv.frag_len;
+ wrqu->frag.fixed = 0; /* no auto select */
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_get_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ wrqu->retry.value = 7;
+ wrqu->retry.fixed = 0; /* no auto select */
+ wrqu->retry.disabled = 1;
+
+ return 0;
+}
+
+static int rtw_wx_set_enc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
+{
+ u32 key, ret = 0;
+ u32 keyindex_provided;
+ struct ndis_802_11_wep wep;
+ enum ndis_802_11_auth_mode authmode;
+
+ struct iw_point *erq = &(wrqu->encoding);
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ DBG_88E("+rtw_wx_set_enc, flags = 0x%x\n", erq->flags);
+
+ _rtw_memset(&wep, 0, sizeof(struct ndis_802_11_wep));
+
+ key = erq->flags & IW_ENCODE_INDEX;
+
+ _func_enter_;
+
+ if (erq->flags & IW_ENCODE_DISABLED) {
+ DBG_88E("EncryptionDisabled\n");
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ authmode = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype = authmode;
+
+ goto exit;
+ }
+
+ if (key) {
+ if (key > WEP_KEYS)
+ return -EINVAL;
+ key--;
+ keyindex_provided = 1;
+ } else {
+ keyindex_provided = 0;
+ key = padapter->securitypriv.dot11PrivacyKeyIndex;
+ DBG_88E("rtw_wx_set_enc, key =%d\n", key);
+ }
+
+ /* set authentication mode */
+ if (erq->flags & IW_ENCODE_OPEN) {
+ DBG_88E("rtw_wx_set_enc():IW_ENCODE_OPEN\n");
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ authmode = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype = authmode;
+ } else if (erq->flags & IW_ENCODE_RESTRICTED) {
+ DBG_88E("rtw_wx_set_enc():IW_ENCODE_RESTRICTED\n");
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ authmode = Ndis802_11AuthModeShared;
+ padapter->securitypriv.ndisauthtype = authmode;
+ } else {
+ DBG_88E("rtw_wx_set_enc():erq->flags = 0x%x\n", erq->flags);
+
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ authmode = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype = authmode;
+ }
+
+ wep.KeyIndex = key;
+ if (erq->length > 0) {
+ wep.KeyLength = erq->length <= 5 ? 5 : 13;
+
+ wep.Length = wep.KeyLength + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ } else {
+ wep.KeyLength = 0;
+
+ if (keyindex_provided == 1) {
+ /* set key_id only, no given KeyMaterial(erq->length == 0). */
+ padapter->securitypriv.dot11PrivacyKeyIndex = key;
+
+ DBG_88E("(keyindex_provided == 1), keyid =%d, key_len =%d\n", key, padapter->securitypriv.dot11DefKeylen[key]);
+
+ switch (padapter->securitypriv.dot11DefKeylen[key]) {
+ case 5:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ break;
+ case 13:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ break;
+ default:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ break;
+ }
+
+ goto exit;
+ }
+ }
+
+ wep.KeyIndex |= 0x80000000;
+
+ memcpy(wep.KeyMaterial, keybuf, wep.KeyLength);
+
+ if (rtw_set_802_11_add_wep(padapter, &wep) == false) {
+ if (rf_on == pwrpriv->rf_pwrstate)
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+exit:
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_enc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
+{
+ uint key, ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *erq = &(wrqu->encoding);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ _func_enter_;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) != true) {
+ if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ return 0;
+ }
+ }
+
+ key = erq->flags & IW_ENCODE_INDEX;
+
+ if (key) {
+ if (key > WEP_KEYS)
+ return -EINVAL;
+ key--;
+ } else {
+ key = padapter->securitypriv.dot11PrivacyKeyIndex;
+ }
+
+ erq->flags = key + 1;
+
+ switch (padapter->securitypriv.ndisencryptstatus) {
+ case Ndis802_11EncryptionNotSupported:
+ case Ndis802_11EncryptionDisabled:
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ break;
+ case Ndis802_11Encryption1Enabled:
+ erq->length = padapter->securitypriv.dot11DefKeylen[key];
+ if (erq->length) {
+ memcpy(keybuf, padapter->securitypriv.dot11DefKey[key].skey, padapter->securitypriv.dot11DefKeylen[key]);
+
+ erq->flags |= IW_ENCODE_ENABLED;
+
+ if (padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeOpen)
+ erq->flags |= IW_ENCODE_OPEN;
+ else if (padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeShared)
+ erq->flags |= IW_ENCODE_RESTRICTED;
+ } else {
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ }
+ break;
+ case Ndis802_11Encryption2Enabled:
+ case Ndis802_11Encryption3Enabled:
+ erq->length = 16;
+ erq->flags |= (IW_ENCODE_ENABLED | IW_ENCODE_OPEN | IW_ENCODE_NOKEY);
+ break;
+ default:
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ break;
+ }
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_power(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ wrqu->power.value = 0;
+ wrqu->power.fixed = 0; /* no auto select */
+ wrqu->power.disabled = 1;
+
+ return 0;
+}
+
+static int rtw_wx_set_gen_ie(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ ret = rtw_set_wpa_ie(padapter, extra, wrqu->data.length);
+ return ret;
+}
+
+static int rtw_wx_set_auth(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_param *param = (struct iw_param *)&(wrqu->param);
+ int ret = 0;
+
+ switch (param->flags & IW_AUTH_INDEX) {
+ case IW_AUTH_WPA_VERSION:
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+
+ break;
+ case IW_AUTH_CIPHER_GROUP:
+
+ break;
+ case IW_AUTH_KEY_MGMT:
+ /*
+ * ??? does not use these parameters
+ */
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ if (param->value) {
+ /* wpa_supplicant is enabling the tkip countermeasure. */
+ padapter->securitypriv.btkip_countermeasure = true;
+ } else {
+ /* wpa_supplicant is disabling the tkip countermeasure. */
+ padapter->securitypriv.btkip_countermeasure = false;
+ }
+ break;
+ case IW_AUTH_DROP_UNENCRYPTED:
+ /* HACK:
+ *
+ * wpa_supplicant calls set_wpa_enabled when the driver
+ * is loaded and unloaded, regardless of if WPA is being
+ * used. No other calls are made which can be used to
+ * determine if encryption will be used or not prior to
+ * association being expected. If encryption is not being
+ * used, drop_unencrypted is set to false, else true -- we
+ * can use this to determine if the CAP_PRIVACY_ON bit should
+ * be set.
+ */
+
+ if (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption1Enabled)
+ break;/* it means init value, or using wep, ndisencryptstatus = Ndis802_11Encryption1Enabled, */
+ /* then it needn't reset it; */
+
+ if (param->value) {
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ }
+
+ break;
+ case IW_AUTH_80211_AUTH_ALG:
+ /*
+ * It's the starting point of a link layer connection using wpa_supplicant
+ */
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ LeaveAllPowerSaveMode(padapter);
+ rtw_disassoc_cmd(padapter, 500, false);
+ DBG_88E("%s...call rtw_indicate_disconnect\n ", __func__);
+ rtw_indicate_disconnect(padapter);
+ rtw_free_assoc_resources(padapter, 1);
+ }
+ ret = wpa_set_auth_algs(dev, (u32)param->value);
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ break;
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ break;
+ case IW_AUTH_PRIVACY_INVOKED:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtw_wx_set_enc_ext(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ char *alg_name;
+ u32 param_len;
+ struct ieee_param *param = NULL;
+ struct iw_point *pencoding = &wrqu->encoding;
+ struct iw_encode_ext *pext = (struct iw_encode_ext *)extra;
+ int ret = 0;
+
+ param_len = sizeof(struct ieee_param) + pext->key_len;
+ param = (struct ieee_param *)rtw_malloc(param_len);
+ if (param == NULL)
+ return -1;
+
+ _rtw_memset(param, 0, param_len);
+
+ param->cmd = IEEE_CMD_SET_ENCRYPTION;
+ _rtw_memset(param->sta_addr, 0xff, ETH_ALEN);
+
+ switch (pext->alg) {
+ case IW_ENCODE_ALG_NONE:
+ /* todo: remove key */
+ /* remove = 1; */
+ alg_name = "none";
+ break;
+ case IW_ENCODE_ALG_WEP:
+ alg_name = "WEP";
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ alg_name = "TKIP";
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ alg_name = "CCMP";
+ break;
+ default:
+ return -1;
+ }
+
+ strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+
+ if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
+ param->u.crypt.set_tx = 1;
+
+ /* cliW: WEP does not have group key
+ * just not checking GROUP key setting
+ */
+ if ((pext->alg != IW_ENCODE_ALG_WEP) &&
+ (pext->ext_flags & IW_ENCODE_EXT_GROUP_KEY))
+ param->u.crypt.set_tx = 0;
+
+ param->u.crypt.idx = (pencoding->flags&0x00FF) - 1;
+
+ if (pext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
+ memcpy(param->u.crypt.seq, pext->rx_seq, 8);
+
+ if (pext->key_len) {
+ param->u.crypt.key_len = pext->key_len;
+ memcpy(param->u.crypt.key, pext + 1, pext->key_len);
+ }
+
+ ret = wpa_set_encryption(dev, param, param_len);
+
+ kfree(param);
+ return ret;
+}
+
+static int rtw_wx_get_nick(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ if (extra) {
+ wrqu->data.length = 14;
+ wrqu->data.flags = 1;
+ memcpy(extra, "<WIFI@REALTEK>", 14);
+ }
+
+ /* dump debug info here */
+ return 0;
+}
+
+static int rtw_wx_read32(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter;
+ struct iw_point *p;
+ u16 len;
+ u32 addr;
+ u32 data32;
+ u32 bytes;
+ u8 *ptmp;
+
+ padapter = (struct adapter *)rtw_netdev_priv(dev);
+ p = &wrqu->data;
+ len = p->length;
+ ptmp = (u8 *)rtw_malloc(len);
+ if (NULL == ptmp)
+ return -ENOMEM;
+
+ if (copy_from_user(ptmp, p->pointer, len)) {
+ kfree(ptmp);
+ return -EFAULT;
+ }
+
+ bytes = 0;
+ addr = 0;
+ sscanf(ptmp, "%d,%x", &bytes, &addr);
+
+ switch (bytes) {
+ case 1:
+ data32 = rtw_read8(padapter, addr);
+ sprintf(extra, "0x%02X", data32);
+ break;
+ case 2:
+ data32 = rtw_read16(padapter, addr);
+ sprintf(extra, "0x%04X", data32);
+ break;
+ case 4:
+ data32 = rtw_read32(padapter, addr);
+ sprintf(extra, "0x%08X", data32);
+ break;
+ default:
+ DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__);
+ return -EINVAL;
+ }
+ DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra);
+
+ kfree(ptmp);
+ return 0;
+}
+
+static int rtw_wx_write32(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ u32 addr;
+ u32 data32;
+ u32 bytes;
+
+ bytes = 0;
+ addr = 0;
+ data32 = 0;
+ sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
+
+ switch (bytes) {
+ case 1:
+ rtw_write8(padapter, addr, (u8)data32);
+ DBG_88E(KERN_INFO "%s: addr = 0x%08X data = 0x%02X\n", __func__, addr, (u8)data32);
+ break;
+ case 2:
+ rtw_write16(padapter, addr, (u16)data32);
+ DBG_88E(KERN_INFO "%s: addr = 0x%08X data = 0x%04X\n", __func__, addr, (u16)data32);
+ break;
+ case 4:
+ rtw_write32(padapter, addr, data32);
+ DBG_88E(KERN_INFO "%s: addr = 0x%08X data = 0x%08X\n", __func__, addr, data32);
+ break;
+ default:
+ DBG_88E(KERN_INFO "%s: usage> write [bytes],[address(hex)],[data(hex)]\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw_wx_read_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u32 path, addr, data32;
+
+ path = *(u32 *)extra;
+ addr = *((u32 *)extra + 1);
+ data32 = rtw_hal_read_rfreg(padapter, path, addr, 0xFFFFF);
+ /*
+ * IMPORTANT!!
+ * Only when wireless private ioctl is at odd order,
+ * "extra" would be copied to user space.
+ */
+ sprintf(extra, "0x%05x", data32);
+
+ return 0;
+}
+
+static int rtw_wx_write_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u32 path, addr, data32;
+
+ path = *(u32 *)extra;
+ addr = *((u32 *)extra + 1);
+ data32 = *((u32 *)extra + 2);
+ rtw_hal_write_rfreg(padapter, path, addr, 0xFFFFF, data32);
+
+ return 0;
+}
+
+static int rtw_wx_priv_null(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ return -1;
+}
+
+static int dummy(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ return -1;
+}
+
+static int rtw_wx_set_channel_plan(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 channel_plan_req = (u8) (*((int *)wrqu));
+
+ if (_SUCCESS == rtw_set_chplan_cmd(padapter, channel_plan_req, 1))
+ DBG_88E("%s set channel_plan = 0x%02X\n", __func__, pmlmepriv->ChannelPlan);
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+static int rtw_wx_set_mtk_wps_probe_ie(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ return 0;
+}
+
+static int rtw_wx_get_sensitivity(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *buf)
+{
+ return 0;
+}
+
+static int rtw_wx_set_mtk_wps_ie(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+/*
+ * For all data larger than 16 octets, we need to use a
+ * pointer to memory allocated in user space.
+ */
+static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+static void rtw_dbg_mode_hdl(struct adapter *padapter, u32 id, u8 *pdata, u32 len)
+{
+ struct mp_rw_reg *RegRWStruct;
+ struct rf_reg_param *prfreg;
+ u8 path;
+ u8 offset;
+ u32 value;
+
+ DBG_88E("%s\n", __func__);
+
+ switch (id) {
+ case GEN_MP_IOCTL_SUBCODE(MP_START):
+ DBG_88E("871x_driver is only for normal mode, can't enter mp mode\n");
+ break;
+ case GEN_MP_IOCTL_SUBCODE(READ_REG):
+ RegRWStruct = (struct mp_rw_reg *)pdata;
+ switch (RegRWStruct->width) {
+ case 1:
+ RegRWStruct->value = rtw_read8(padapter, RegRWStruct->offset);
+ break;
+ case 2:
+ RegRWStruct->value = rtw_read16(padapter, RegRWStruct->offset);
+ break;
+ case 4:
+ RegRWStruct->value = rtw_read32(padapter, RegRWStruct->offset);
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case GEN_MP_IOCTL_SUBCODE(WRITE_REG):
+ RegRWStruct = (struct mp_rw_reg *)pdata;
+ switch (RegRWStruct->width) {
+ case 1:
+ rtw_write8(padapter, RegRWStruct->offset, (u8)RegRWStruct->value);
+ break;
+ case 2:
+ rtw_write16(padapter, RegRWStruct->offset, (u16)RegRWStruct->value);
+ break;
+ case 4:
+ rtw_write32(padapter, RegRWStruct->offset, (u32)RegRWStruct->value);
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case GEN_MP_IOCTL_SUBCODE(READ_RF_REG):
+
+ prfreg = (struct rf_reg_param *)pdata;
+
+ path = (u8)prfreg->path;
+ offset = (u8)prfreg->offset;
+
+ value = rtw_hal_read_rfreg(padapter, path, offset, 0xffffffff);
+
+ prfreg->value = value;
+
+ break;
+ case GEN_MP_IOCTL_SUBCODE(WRITE_RF_REG):
+
+ prfreg = (struct rf_reg_param *)pdata;
+
+ path = (u8)prfreg->path;
+ offset = (u8)prfreg->offset;
+ value = prfreg->value;
+
+ rtw_hal_write_rfreg(padapter, path, offset, 0xffffffff, value);
+
+ break;
+ case GEN_MP_IOCTL_SUBCODE(TRIGGER_GPIO):
+ DBG_88E("==> trigger gpio 0\n");
+ rtw_hal_set_hwreg(padapter, HW_VAR_TRIGGER_GPIO_0, NULL);
+ break;
+ case GEN_MP_IOCTL_SUBCODE(GET_WIFI_STATUS):
+ *pdata = rtw_hal_sreset_get_wifi_status(padapter);
+ break;
+ default:
+ break;
+ }
+}
+
+static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u32 BytesRead, BytesWritten, BytesNeeded;
+ struct oid_par_priv oid_par;
+ struct mp_ioctl_handler *phandler;
+ struct mp_ioctl_param *poidparam;
+ uint status = 0;
+ u16 len;
+ u8 *pparmbuf = NULL, bset;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *p = &wrqu->data;
+
+ if ((!p->length) || (!p->pointer)) {
+ ret = -EINVAL;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+ pparmbuf = NULL;
+ bset = (u8)(p->flags & 0xFFFF);
+ len = p->length;
+ pparmbuf = (u8 *)rtw_malloc(len);
+ if (pparmbuf == NULL) {
+ ret = -ENOMEM;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+ if (copy_from_user(pparmbuf, p->pointer, len)) {
+ ret = -EFAULT;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+ poidparam = (struct mp_ioctl_param *)pparmbuf;
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
+ poidparam->subcode, poidparam->len, len));
+
+ if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
+ ret = -EINVAL;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+ if (padapter->registrypriv.mp_mode == 1) {
+ phandler = mp_ioctl_hdl + poidparam->subcode;
+
+ if ((phandler->paramsize != 0) && (poidparam->len < phandler->paramsize)) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_,
+ ("no matching drvext param size %d vs %d\r\n",
+ poidparam->len, phandler->paramsize));
+ ret = -EINVAL;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+ if (phandler->handler) {
+ oid_par.adapter_context = padapter;
+ oid_par.oid = phandler->oid;
+ oid_par.information_buf = poidparam->data;
+ oid_par.information_buf_len = poidparam->len;
+ oid_par.dbg = 0;
+
+ BytesWritten = 0;
+ BytesNeeded = 0;
+
+ if (bset) {
+ oid_par.bytes_rw = &BytesRead;
+ oid_par.bytes_needed = &BytesNeeded;
+ oid_par.type_of_oid = SET_OID;
+ } else {
+ oid_par.bytes_rw = &BytesWritten;
+ oid_par.bytes_needed = &BytesNeeded;
+ oid_par.type_of_oid = QUERY_OID;
+ }
+
+ status = phandler->handler(&oid_par);
+ } else {
+ DBG_88E("rtw_mp_ioctl_hdl(): err!, subcode =%d, oid =%d, handler =%p\n",
+ poidparam->subcode, phandler->oid, phandler->handler);
+ ret = -EFAULT;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+ } else {
+ rtw_dbg_mode_hdl(padapter, poidparam->subcode, poidparam->data, poidparam->len);
+ }
+
+ if (bset == 0x00) {/* query info */
+ if (copy_to_user(p->pointer, pparmbuf, len))
+ ret = -EFAULT;
+ }
+
+ if (status) {
+ ret = -EFAULT;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+_rtw_mp_ioctl_hdl_exit:
+
+ kfree(pparmbuf);
+ return ret;
+}
+
+static int rtw_get_ap_info(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u32 cnt = 0, wpa_ielen;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ unsigned char *pbuf;
+ u8 bssid[ETH_ALEN];
+ char data[32];
+ struct wlan_network *pnetwork = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct iw_point *pdata = &wrqu->data;
+
+ DBG_88E("+rtw_get_aplist_info\n");
+
+ if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ while ((check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)))) {
+ rtw_msleep_os(30);
+ cnt++;
+ if (cnt > 100)
+ break;
+ }
+ pdata->flags = 0;
+ if (pdata->length >= 32) {
+ if (copy_from_user(data, pdata->pointer, 32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (hwaddr_aton_i(data, bssid)) {
+ DBG_88E("Invalid BSSID '%s'.\n", (u8 *)data);
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ return -EINVAL;
+ }
+
+ if (!memcmp(bssid, pnetwork->network.MacAddress, ETH_ALEN) == true) {
+ /* BSSID match, then check if supporting wpa/wpa2 */
+ DBG_88E("BSSID:%pM\n", (bssid));
+
+ pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+ if (pbuf && (wpa_ielen > 0)) {
+ pdata->flags = 1;
+ break;
+ }
+
+ pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+ if (pbuf && (wpa_ielen > 0)) {
+ pdata->flags = 2;
+ break;
+ }
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (pdata->length >= 34) {
+ if (copy_to_user(pdata->pointer+32, (u8 *)&pdata->flags, 1)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+exit:
+
+ return ret;
+}
+
+static int rtw_set_pid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ int *pdata = (int *)wrqu;
+ int selector;
+
+ if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ selector = *pdata;
+ if (selector < 3 && selector >= 0) {
+ padapter->pid[selector] = *(pdata+1);
+ ui_pid[selector] = *(pdata+1);
+ DBG_88E("%s set pid[%d] =%d\n", __func__, selector, padapter->pid[selector]);
+ } else {
+ DBG_88E("%s selector %d error\n", __func__, selector);
+ }
+exit:
+ return ret;
+}
+
+static int rtw_wps_start(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *pdata = &wrqu->data;
+ u32 u32wps_start = 0;
+
+ ret = copy_from_user((void *)&u32wps_start, pdata->pointer, 4);
+ if (ret) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (u32wps_start == 0)
+ u32wps_start = *extra;
+
+ DBG_88E("[%s] wps_start = %d\n", __func__, u32wps_start);
+
+ if (u32wps_start == 1) /* WPS Start */
+ rtw_led_control(padapter, LED_CTL_START_WPS);
+ else if (u32wps_start == 2) /* WPS Stop because of wps success */
+ rtw_led_control(padapter, LED_CTL_STOP_WPS);
+ else if (u32wps_start == 3) /* WPS Stop because of wps fail */
+ rtw_led_control(padapter, LED_CTL_STOP_WPS_FAIL);
+
+exit:
+ return ret;
+}
+
+#ifdef CONFIG_88EU_P2P
+static int rtw_wext_p2p_enable(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ enum P2P_ROLE init_role = P2P_ROLE_DISABLE;
+
+ if (*extra == '0')
+ init_role = P2P_ROLE_DISABLE;
+ else if (*extra == '1')
+ init_role = P2P_ROLE_DEVICE;
+ else if (*extra == '2')
+ init_role = P2P_ROLE_CLIENT;
+ else if (*extra == '3')
+ init_role = P2P_ROLE_GO;
+
+ if (_FAIL == rtw_p2p_enable(padapter, init_role)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ /* set channel/bandwidth */
+ if (init_role != P2P_ROLE_DISABLE) {
+ u8 channel, ch_offset;
+ u16 bwmode;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_LISTEN)) {
+ /* Stay at the listen state and wait for discovery. */
+ channel = pwdinfo->listen_channel;
+ pwdinfo->operating_channel = pwdinfo->listen_channel;
+ ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ bwmode = HT_CHANNEL_WIDTH_20;
+ } else {
+ pwdinfo->operating_channel = pmlmeext->cur_channel;
+
+ channel = pwdinfo->operating_channel;
+ ch_offset = pmlmeext->cur_ch_offset;
+ bwmode = pmlmeext->cur_bwmode;
+ }
+
+ set_channel_bwmode(padapter, channel, ch_offset, bwmode);
+ }
+
+exit:
+ return ret;
+}
+
+static int rtw_p2p_set_go_nego_ssid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] ssid = %s, len = %zu\n", __func__, extra, strlen(extra));
+ memcpy(pwdinfo->nego_ssid, extra, strlen(extra));
+ pwdinfo->nego_ssidlen = strlen(extra);
+
+ return ret;
+}
+
+static int rtw_p2p_set_intent(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 intent = pwdinfo->intent;
+
+ switch (wrqu->data.length) {
+ case 1:
+ intent = extra[0] - '0';
+ break;
+ case 2:
+ intent = str_2char2num(extra[0], extra[1]);
+ break;
+ }
+ if (intent <= 15)
+ pwdinfo->intent = intent;
+ else
+ ret = -1;
+ DBG_88E("[%s] intent = %d\n", __func__, intent);
+ return ret;
+}
+
+static int rtw_p2p_set_listen_ch(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 listen_ch = pwdinfo->listen_channel; /* Listen channel number */
+
+ switch (wrqu->data.length) {
+ case 1:
+ listen_ch = extra[0] - '0';
+ break;
+ case 2:
+ listen_ch = str_2char2num(extra[0], extra[1]);
+ break;
+ }
+
+ if ((listen_ch == 1) || (listen_ch == 6) || (listen_ch == 11)) {
+ pwdinfo->listen_channel = listen_ch;
+ set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ } else {
+ ret = -1;
+ }
+
+ DBG_88E("[%s] listen_ch = %d\n", __func__, pwdinfo->listen_channel);
+
+ return ret;
+}
+
+static int rtw_p2p_set_op_ch(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+/* Commented by Albert 20110524 */
+/* This function is used to set the operating channel if the driver will become the group owner */
+
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 op_ch = pwdinfo->operating_channel; /* Operating channel number */
+
+ switch (wrqu->data.length) {
+ case 1:
+ op_ch = extra[0] - '0';
+ break;
+ case 2:
+ op_ch = str_2char2num(extra[0], extra[1]);
+ break;
+ }
+
+ if (op_ch > 0)
+ pwdinfo->operating_channel = op_ch;
+ else
+ ret = -1;
+
+ DBG_88E("[%s] op_ch = %d\n", __func__, pwdinfo->operating_channel);
+
+ return ret;
+}
+
+static int rtw_p2p_profilefound(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ /* Comment by Albert 2010/10/13 */
+ /* Input data format: */
+ /* Ex: 0 */
+ /* Ex: 1XX:XX:XX:XX:XX:XXYYSSID */
+ /* 0 => Reflush the profile record list. */
+ /* 1 => Add the profile list */
+ /* XX:XX:XX:XX:XX:XX => peer's MAC Address (ex: 00:E0:4C:00:00:01) */
+ /* YY => SSID Length */
+ /* SSID => SSID for persistence group */
+
+ DBG_88E("[%s] In value = %s, len = %d\n", __func__, extra, wrqu->data.length - 1);
+
+ /* The upper application should pass the SSID to driver by using this rtw_p2p_profilefound function. */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ if (extra[0] == '0') {
+ /* Remove all the profile information of wifidirect_info structure. */
+ _rtw_memset(&pwdinfo->profileinfo[0], 0x00, sizeof(struct profile_info) * P2P_MAX_PERSISTENT_GROUP_NUM);
+ pwdinfo->profileindex = 0;
+ } else {
+ if (pwdinfo->profileindex >= P2P_MAX_PERSISTENT_GROUP_NUM) {
+ ret = -1;
+ } else {
+ int jj, kk;
+
+ /* Add this profile information into pwdinfo->profileinfo */
+ /* Ex: 1XX:XX:XX:XX:XX:XXYYSSID */
+ for (jj = 0, kk = 1; jj < ETH_ALEN; jj++, kk += 3)
+ pwdinfo->profileinfo[pwdinfo->profileindex].peermac[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ pwdinfo->profileinfo[pwdinfo->profileindex].ssidlen = (extra[18] - '0') * 10 + (extra[19] - '0');
+ memcpy(pwdinfo->profileinfo[pwdinfo->profileindex].ssid, &extra[20], pwdinfo->profileinfo[pwdinfo->profileindex].ssidlen);
+ pwdinfo->profileindex++;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int rtw_p2p_setDN(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] %s %d\n", __func__, extra, wrqu->data.length - 1);
+ _rtw_memset(pwdinfo->device_name, 0x00, WPS_MAX_DEVICE_NAME_LEN);
+ memcpy(pwdinfo->device_name, extra, wrqu->data.length - 1);
+ pwdinfo->device_name_len = wrqu->data.length - 1;
+
+ return ret;
+}
+
+static int rtw_p2p_get_status(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ if (padapter->bShowGetP2PState)
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->p2p_peer_interface_addr[0], pwdinfo->p2p_peer_interface_addr[1], pwdinfo->p2p_peer_interface_addr[2],
+ pwdinfo->p2p_peer_interface_addr[3], pwdinfo->p2p_peer_interface_addr[4], pwdinfo->p2p_peer_interface_addr[5]);
+
+ /* Commented by Albert 2010/10/12 */
+ /* Because of the output size limitation, I had removed the "Role" information. */
+ /* About the "Role" information, we will use the new private IOCTL to get the "Role" information. */
+ sprintf(extra, "\n\nStatus =%.2d\n", rtw_p2p_state(pwdinfo));
+ wrqu->data.length = strlen(extra);
+
+ return ret;
+}
+
+/* Commented by Albert 20110520 */
+/* This function will return the config method description */
+/* This config method description will show us which config method the remote P2P device is intented to use */
+/* by sending the provisioning discovery request frame. */
+
+static int rtw_p2p_get_req_cm(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ sprintf(extra, "\n\nCM =%s\n", pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_role(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->p2p_peer_interface_addr[0], pwdinfo->p2p_peer_interface_addr[1], pwdinfo->p2p_peer_interface_addr[2],
+ pwdinfo->p2p_peer_interface_addr[3], pwdinfo->p2p_peer_interface_addr[4], pwdinfo->p2p_peer_interface_addr[5]);
+
+ sprintf(extra, "\n\nRole =%.2d\n", rtw_p2p_role(pwdinfo));
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_peer_ifaddr(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %pM\n", __func__,
+ rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->p2p_peer_interface_addr);
+ sprintf(extra, "\nMAC %pM",
+ pwdinfo->p2p_peer_interface_addr);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_peer_devaddr(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %pM\n", __func__,
+ rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->rx_prov_disc_info.peerDevAddr);
+ sprintf(extra, "\n%pM",
+ pwdinfo->rx_prov_disc_info.peerDevAddr);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_peer_devaddr_by_invitation(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %pM\n",
+ __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->p2p_peer_device_addr);
+ sprintf(extra, "\nMAC %pM",
+ pwdinfo->p2p_peer_device_addr);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_groupid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ sprintf(extra, "\n%.2X:%.2X:%.2X:%.2X:%.2X:%.2X %s",
+ pwdinfo->groupid_info.go_device_addr[0], pwdinfo->groupid_info.go_device_addr[1],
+ pwdinfo->groupid_info.go_device_addr[2], pwdinfo->groupid_info.go_device_addr[3],
+ pwdinfo->groupid_info.go_device_addr[4], pwdinfo->groupid_info.go_device_addr[5],
+ pwdinfo->groupid_info.ssid);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_op_ch(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Op_ch = %02x\n", __func__, pwdinfo->operating_channel);
+
+ sprintf(extra, "\n\nOp_ch =%.2d\n", pwdinfo->operating_channel);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u16 attr_content = 0;
+ uint attr_contentlen = 0;
+ /* 6 is the string "wpsCM =", 17 is the MAC addr, we have to clear it at wrqu->data.pointer */
+ u8 attr_content_str[6 + 17] = {0x00};
+
+ /* Commented by Albert 20110727 */
+ /* The input data is the MAC address which the application wants to know its WPS config method. */
+ /* After knowing its WPS config method, the application can decide the config method for provisioning discovery. */
+ /* Format: iwpriv wlanx p2p_get_wpsCM 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 6, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ u8 *wpsie;
+ uint wpsie_len = 0;
+ __be16 be_tmp;
+
+ /* The mac address is matched. */
+ wpsie = rtw_get_wps_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &wpsie_len);
+ if (wpsie) {
+ rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_CONF_METHOD, (u8 *) &be_tmp, &attr_contentlen);
+ if (attr_contentlen) {
+ attr_content = be16_to_cpu(be_tmp);
+ sprintf(attr_content_str, "\n\nM =%.4d", attr_content);
+ blnMatch = 1;
+ }
+ }
+ break;
+ }
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch)
+ sprintf(attr_content_str, "\n\nM = 0000");
+
+ if (copy_to_user(wrqu->data.pointer, attr_content_str, 6 + 17))
+ return -EFAULT;
+ return ret;
+}
+
+static int rtw_p2p_get_go_device_address(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u8 *p2pie;
+ uint p2pielen = 0, attr_contentlen = 0;
+ u8 attr_content[100] = {0x00};
+
+ u8 go_devadd_str[17 + 10] = {0x00};
+ /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
+
+ /* Commented by Albert 20121209 */
+ /* The input data is the GO's interface address which the application wants to know its device address. */
+ /* Format: iwpriv wlanx p2p_get2 go_devadd = 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 10, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ /* Commented by Albert 2011/05/18 */
+ /* Match the device address located in the P2P IE */
+ /* This is for the case that the P2P device address is not the same as the P2P interface address. */
+
+ p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
+ if (p2pie) {
+ while (p2pie) {
+ /* The P2P Device ID attribute is included in the Beacon frame. */
+ /* The P2P Device Info attribute is included in the probe response frame. */
+
+ _rtw_memset(attr_content, 0x00, 100);
+ if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device ID attribute of Beacon first */
+ blnMatch = 1;
+ break;
+ } else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device Info attribute of probe response */
+ blnMatch = 1;
+ break;
+ }
+
+ /* Get the next P2P IE */
+ p2pie = rtw_get_p2p_ie(p2pie+p2pielen, pnetwork->network.IELength - 12 - (p2pie - &pnetwork->network.IEs[12] + p2pielen), NULL, &p2pielen);
+ }
+ }
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch)
+ sprintf(go_devadd_str, "\n\ndev_add = NULL");
+ else
+ sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
+ attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
+
+ if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
+ return -EFAULT;
+ return ret;
+}
+
+static int rtw_p2p_get_device_type(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u8 dev_type[8] = {0x00};
+ uint dev_type_len = 0;
+ u8 dev_type_str[17 + 9] = {0x00}; /* +9 is for the str "dev_type =", we have to clear it at wrqu->data.pointer */
+
+ /* Commented by Albert 20121209 */
+ /* The input data is the MAC address which the application wants to know its device type. */
+ /* Such user interface could know the device type. */
+ /* Format: iwpriv wlanx p2p_get2 dev_type = 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 9, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ u8 *wpsie;
+ uint wpsie_len = 0;
+
+ /* The mac address is matched. */
+
+ wpsie = rtw_get_wps_ie(&pnetwork->network.IEs[12],
+ pnetwork->network.IELength - 12,
+ NULL, &wpsie_len);
+ if (wpsie) {
+ rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_PRIMARY_DEV_TYPE, dev_type, &dev_type_len);
+ if (dev_type_len) {
+ u16 type = 0;
+ __be16 be_tmp;
+
+ memcpy(&be_tmp, dev_type, 2);
+ type = be16_to_cpu(be_tmp);
+ sprintf(dev_type_str, "\n\nN =%.2d", type);
+ blnMatch = 1;
+ }
+ }
+ break;
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch)
+ sprintf(dev_type_str, "\n\nN = 00");
+
+ if (copy_to_user(wrqu->data.pointer, dev_type_str, 9 + 17)) {
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static int rtw_p2p_get_device_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u8 dev_name[WPS_MAX_DEVICE_NAME_LEN] = {0x00};
+ uint dev_len = 0;
+ u8 dev_name_str[WPS_MAX_DEVICE_NAME_LEN + 5] = {0x00}; /* +5 is for the str "devN =", we have to clear it at wrqu->data.pointer */
+
+ /* Commented by Albert 20121225 */
+ /* The input data is the MAC address which the application wants to know its device name. */
+ /* Such user interface could show peer device's device name instead of ssid. */
+ /* Format: iwpriv wlanx p2p_get2 devN = 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 5, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ u8 *wpsie;
+ uint wpsie_len = 0;
+
+ /* The mac address is matched. */
+ wpsie = rtw_get_wps_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &wpsie_len);
+ if (wpsie) {
+ rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_DEVICE_NAME, dev_name, &dev_len);
+ if (dev_len) {
+ sprintf(dev_name_str, "\n\nN =%s", dev_name);
+ blnMatch = 1;
+ }
+ }
+ break;
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch)
+ sprintf(dev_name_str, "\n\nN = 0000");
+
+ if (copy_to_user(wrqu->data.pointer, dev_name_str, 5 + ((dev_len > 17) ? dev_len : 17)))
+ return -EFAULT;
+ return ret;
+}
+
+static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u8 *p2pie;
+ uint p2pielen = 0, attr_contentlen = 0;
+ u8 attr_content[2] = {0x00};
+
+ u8 inv_proc_str[17 + 8] = {0x00};
+ /* +8 is for the str "InvProc =", we have to clear it at wrqu->data.pointer */
+
+ /* Commented by Ouden 20121226 */
+ /* The application wants to know P2P initation procedure is support or not. */
+ /* Format: iwpriv wlanx p2p_get2 InvProc = 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 8, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ /* Commented by Albert 20121226 */
+ /* Match the device address located in the P2P IE */
+ /* This is for the case that the P2P device address is not the same as the P2P interface address. */
+
+ p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
+ if (p2pie) {
+ while (p2pie) {
+ if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_CAPABILITY, attr_content, &attr_contentlen)) {
+ /* Handle the P2P capability attribute */
+ blnMatch = 1;
+ break;
+ }
+
+ /* Get the next P2P IE */
+ p2pie = rtw_get_p2p_ie(p2pie+p2pielen, pnetwork->network.IELength - 12 - (p2pie - &pnetwork->network.IEs[12] + p2pielen), NULL, &p2pielen);
+ }
+ }
+ }
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch) {
+ sprintf(inv_proc_str, "\nIP =-1");
+ } else {
+ if (attr_content[0] & 0x20)
+ sprintf(inv_proc_str, "\nIP = 1");
+ else
+ sprintf(inv_proc_str, "\nIP = 0");
+ }
+ if (copy_to_user(wrqu->data.pointer, inv_proc_str, 8 + 17))
+ return -EFAULT;
+ return ret;
+}
+
+static int rtw_p2p_connect(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ uint uintPeerChannel = 0;
+
+ /* Commented by Albert 20110304 */
+ /* The input data contains two informations. */
+ /* 1. First information is the MAC address which wants to formate with */
+ /* 2. Second information is the WPS PINCode or "pbc" string for push button method */
+ /* Format: 00:E0:4C:00:00:05 */
+ /* Format: 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+
+ if (pwdinfo->p2p_state == P2P_STATE_NONE) {
+ DBG_88E("[%s] WiFi Direct is disable!\n", __func__);
+ return ret;
+ }
+
+ if (pwdinfo->ui_got_wps_info == P2P_NO_WPSINFO)
+ return -1;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (uintPeerChannel) {
+ _rtw_memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
+ _rtw_memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
+
+ pwdinfo->nego_req_info.peer_channel_num[0] = uintPeerChannel;
+ memcpy(pwdinfo->nego_req_info.peerDevAddr, pnetwork->network.MacAddress, ETH_ALEN);
+ pwdinfo->nego_req_info.benable = true;
+
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ if (rtw_p2p_state(pwdinfo) != P2P_STATE_GONEGO_OK) {
+ /* Restore to the listen state if the current p2p state is not nego OK */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
+ }
+
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_ING);
+
+ DBG_88E("[%s] Start PreTx Procedure!\n", __func__);
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_GO_NEGO_TIMEOUT);
+ } else {
+ DBG_88E("[%s] Not Found in Scanning Queue~\n", __func__);
+ ret = -1;
+ }
+ return ret;
+}
+
+static int rtw_p2p_invite_req(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ int jj, kk;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ uint uintPeerChannel = 0;
+ u8 attr_content[50] = {0x00};
+ u8 *p2pie;
+ uint p2pielen = 0, attr_contentlen = 0;
+ unsigned long irqL;
+ struct tx_invite_req_info *pinvite_req_info = &pwdinfo->invitereq_info;
+
+ /* The input data contains two informations. */
+ /* 1. First information is the P2P device address which you want to send to. */
+ /* 2. Second information is the group id which combines with GO's mac address, space and GO's ssid. */
+ /* Command line sample: iwpriv wlan0 p2p_set invite ="00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy" */
+ /* Format: 00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy */
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+
+ if (wrqu->data.length <= 37) {
+ DBG_88E("[%s] Wrong format!\n", __func__);
+ return ret;
+ }
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ DBG_88E("[%s] WiFi Direct is disable!\n", __func__);
+ return ret;
+ } else {
+ /* Reset the content of struct tx_invite_req_info */
+ pinvite_req_info->benable = false;
+ _rtw_memset(pinvite_req_info->go_bssid, 0x00, ETH_ALEN);
+ _rtw_memset(pinvite_req_info->go_ssid, 0x00, WLAN_SSID_MAXLEN);
+ pinvite_req_info->ssidlen = 0x00;
+ pinvite_req_info->operating_ch = pwdinfo->operating_channel;
+ _rtw_memset(pinvite_req_info->peer_macaddr, 0x00, ETH_ALEN);
+ pinvite_req_info->token = 3;
+ }
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ pinvite_req_info->peer_macaddr[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ /* Commented by Albert 2011/05/18 */
+ /* Match the device address located in the P2P IE */
+ /* This is for the case that the P2P device address is not the same as the P2P interface address. */
+
+ p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
+ if (p2pie) {
+ /* The P2P Device ID attribute is included in the Beacon frame. */
+ /* The P2P Device Info attribute is included in the probe response frame. */
+
+ if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device ID attribute of Beacon first */
+ if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+ } else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device Info attribute of probe response */
+ if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+ }
+ }
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (uintPeerChannel) {
+ /* Store the GO's bssid */
+ for (jj = 0, kk = 18; jj < ETH_ALEN; jj++, kk += 3)
+ pinvite_req_info->go_bssid[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ /* Store the GO's ssid */
+ pinvite_req_info->ssidlen = wrqu->data.length - 36;
+ memcpy(pinvite_req_info->go_ssid, &extra[36], (u32) pinvite_req_info->ssidlen);
+ pinvite_req_info->benable = true;
+ pinvite_req_info->peer_ch = uintPeerChannel;
+
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_INVITE_REQ);
+
+ set_channel_bwmode(padapter, uintPeerChannel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_INVITE_TIMEOUT);
+ } else {
+ DBG_88E("[%s] NOT Found in the Scanning Queue!\n", __func__);
+ }
+ return ret;
+}
+
+static int rtw_p2p_set_persistent(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ /* The input data is 0 or 1 */
+ /* 0: disable persistent group functionality */
+ /* 1: enable persistent group founctionality */
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ DBG_88E("[%s] WiFi Direct is disable!\n", __func__);
+ return ret;
+ } else {
+ if (extra[0] == '0') /* Disable the persistent group function. */
+ pwdinfo->persistent_supported = false;
+ else if (extra[0] == '1') /* Enable the persistent group function. */
+ pwdinfo->persistent_supported = true;
+ else
+ pwdinfo->persistent_supported = false;
+ }
+ pr_info("[%s] persistent_supported = %d\n", __func__, pwdinfo->persistent_supported);
+ return ret;
+}
+
+static int rtw_p2p_prov_disc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ uint uintPeerChannel = 0;
+ u8 attr_content[100] = {0x00};
+ u8 *p2pie;
+ uint p2pielen = 0, attr_contentlen = 0;
+ unsigned long irqL;
+
+ /* The input data contains two informations. */
+ /* 1. First information is the MAC address which wants to issue the provisioning discovery request frame. */
+ /* 2. Second information is the WPS configuration method which wants to discovery */
+ /* Format: 00:E0:4C:00:00:05_display */
+ /* Format: 00:E0:4C:00:00:05_keypad */
+ /* Format: 00:E0:4C:00:00:05_pbc */
+ /* Format: 00:E0:4C:00:00:05_label */
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+
+ if (pwdinfo->p2p_state == P2P_STATE_NONE) {
+ DBG_88E("[%s] WiFi Direct is disable!\n", __func__);
+ return ret;
+ } else {
+ /* Reset the content of struct tx_provdisc_req_info excluded the wps_config_method_request. */
+ _rtw_memset(pwdinfo->tx_prov_disc_info.peerDevAddr, 0x00, ETH_ALEN);
+ _rtw_memset(pwdinfo->tx_prov_disc_info.peerIFAddr, 0x00, ETH_ALEN);
+ _rtw_memset(&pwdinfo->tx_prov_disc_info.ssid, 0x00, sizeof(struct ndis_802_11_ssid));
+ pwdinfo->tx_prov_disc_info.peer_channel_num[0] = 0;
+ pwdinfo->tx_prov_disc_info.peer_channel_num[1] = 0;
+ pwdinfo->tx_prov_disc_info.benable = false;
+ }
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ if (!memcmp(&extra[18], "display", 7)) {
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_DISPLYA;
+ } else if (!memcmp(&extra[18], "keypad", 7)) {
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_KEYPAD;
+ } else if (!memcmp(&extra[18], "pbc", 3)) {
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_PUSH_BUTTON;
+ } else if (!memcmp(&extra[18], "label", 5)) {
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_LABEL;
+ } else {
+ DBG_88E("[%s] Unknown WPS config methodn", __func__);
+ return ret;
+ }
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ if (uintPeerChannel != 0)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ /* Commented by Albert 2011/05/18 */
+ /* Match the device address located in the P2P IE */
+ /* This is for the case that the P2P device address is not the same as the P2P interface address. */
+
+ p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
+ if (p2pie) {
+ while (p2pie) {
+ /* The P2P Device ID attribute is included in the Beacon frame. */
+ /* The P2P Device Info attribute is included in the probe response frame. */
+
+ if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device ID attribute of Beacon first */
+ if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+ } else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device Info attribute of probe response */
+ if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+ }
+
+ /* Get the next P2P IE */
+ p2pie = rtw_get_p2p_ie(p2pie+p2pielen, pnetwork->network.IELength - 12 - (p2pie - &pnetwork->network.IEs[12] + p2pielen), NULL, &p2pielen);
+ }
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (uintPeerChannel) {
+ DBG_88E("[%s] peer channel: %d!\n", __func__, uintPeerChannel);
+ memcpy(pwdinfo->tx_prov_disc_info.peerIFAddr, pnetwork->network.MacAddress, ETH_ALEN);
+ memcpy(pwdinfo->tx_prov_disc_info.peerDevAddr, peerMAC, ETH_ALEN);
+ pwdinfo->tx_prov_disc_info.peer_channel_num[0] = (u16) uintPeerChannel;
+ pwdinfo->tx_prov_disc_info.benable = true;
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ);
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ memcpy(&pwdinfo->tx_prov_disc_info.ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
+ } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ memcpy(pwdinfo->tx_prov_disc_info.ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN);
+ pwdinfo->tx_prov_disc_info.ssid.SsidLength = P2P_WILDCARD_SSID_LEN;
+ }
+
+ set_channel_bwmode(padapter, uintPeerChannel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
+ } else {
+ DBG_88E("[%s] NOT Found in the Scanning Queue!\n", __func__);
+ }
+ return ret;
+}
+
+/* This function is used to inform the driver the user had specified the pin code value or pbc */
+/* to application. */
+
+static int rtw_p2p_got_wpsinfo(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+ /* Added by Albert 20110328 */
+ /* if the input data is P2P_NO_WPSINFO -> reset the wpsinfo */
+ /* if the input data is P2P_GOT_WPSINFO_PEER_DISPLAY_PIN -> the utility just input the PIN code got from the peer P2P device. */
+ /* if the input data is P2P_GOT_WPSINFO_SELF_DISPLAY_PIN -> the utility just got the PIN code from itself. */
+ /* if the input data is P2P_GOT_WPSINFO_PBC -> the utility just determine to use the PBC */
+
+ if (*extra == '0')
+ pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
+ else if (*extra == '1')
+ pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_PEER_DISPLAY_PIN;
+ else if (*extra == '2')
+ pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_SELF_DISPLAY_PIN;
+ else if (*extra == '3')
+ pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_PBC;
+ else
+ pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
+ return ret;
+}
+
+#endif /* CONFIG_88EU_P2P */
+
+static int rtw_p2p_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+
+#ifdef CONFIG_88EU_P2P
+ DBG_88E("[%s] extra = %s\n", __func__, extra);
+ if (!memcmp(extra, "enable =", 7)) {
+ rtw_wext_p2p_enable(dev, info, wrqu, &extra[7]);
+ } else if (!memcmp(extra, "setDN =", 6)) {
+ wrqu->data.length -= 6;
+ rtw_p2p_setDN(dev, info, wrqu, &extra[6]);
+ } else if (!memcmp(extra, "profilefound =", 13)) {
+ wrqu->data.length -= 13;
+ rtw_p2p_profilefound(dev, info, wrqu, &extra[13]);
+ } else if (!memcmp(extra, "prov_disc =", 10)) {
+ wrqu->data.length -= 10;
+ rtw_p2p_prov_disc(dev, info, wrqu, &extra[10]);
+ } else if (!memcmp(extra, "nego =", 5)) {
+ wrqu->data.length -= 5;
+ rtw_p2p_connect(dev, info, wrqu, &extra[5]);
+ } else if (!memcmp(extra, "intent =", 7)) {
+ /* Commented by Albert 2011/03/23 */
+ /* The wrqu->data.length will include the null character */
+ /* So, we will decrease 7 + 1 */
+ wrqu->data.length -= 8;
+ rtw_p2p_set_intent(dev, info, wrqu, &extra[7]);
+ } else if (!memcmp(extra, "ssid =", 5)) {
+ wrqu->data.length -= 5;
+ rtw_p2p_set_go_nego_ssid(dev, info, wrqu, &extra[5]);
+ } else if (!memcmp(extra, "got_wpsinfo =", 12)) {
+ wrqu->data.length -= 12;
+ rtw_p2p_got_wpsinfo(dev, info, wrqu, &extra[12]);
+ } else if (!memcmp(extra, "listen_ch =", 10)) {
+ /* Commented by Albert 2011/05/24 */
+ /* The wrqu->data.length will include the null character */
+ /* So, we will decrease (10 + 1) */
+ wrqu->data.length -= 11;
+ rtw_p2p_set_listen_ch(dev, info, wrqu, &extra[10]);
+ } else if (!memcmp(extra, "op_ch =", 6)) {
+ /* Commented by Albert 2011/05/24 */
+ /* The wrqu->data.length will include the null character */
+ /* So, we will decrease (6 + 1) */
+ wrqu->data.length -= 7;
+ rtw_p2p_set_op_ch(dev, info, wrqu, &extra[6]);
+ } else if (!memcmp(extra, "invite =", 7)) {
+ wrqu->data.length -= 8;
+ rtw_p2p_invite_req(dev, info, wrqu, &extra[7]);
+ } else if (!memcmp(extra, "persistent =", 11)) {
+ wrqu->data.length -= 11;
+ rtw_p2p_set_persistent(dev, info, wrqu, &extra[11]);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ return ret;
+}
+
+static int rtw_p2p_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+
+#ifdef CONFIG_88EU_P2P
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ if (padapter->bShowGetP2PState)
+ DBG_88E("[%s] extra = %s\n", __func__, (char *)wrqu->data.pointer);
+ if (!memcmp(wrqu->data.pointer, "status", 6)) {
+ rtw_p2p_get_status(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "role", 4)) {
+ rtw_p2p_get_role(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "peer_ifa", 8)) {
+ rtw_p2p_get_peer_ifaddr(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "req_cm", 6)) {
+ rtw_p2p_get_req_cm(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "peer_deva", 9)) {
+ /* Get the P2P device address when receiving the provision discovery request frame. */
+ rtw_p2p_get_peer_devaddr(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "group_id", 8)) {
+ rtw_p2p_get_groupid(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "peer_deva_inv", 9)) {
+ /* Get the P2P device address when receiving the P2P Invitation request frame. */
+ rtw_p2p_get_peer_devaddr_by_invitation(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "op_ch", 5)) {
+ rtw_p2p_get_op_ch(dev, info, wrqu, extra);
+ }
+#endif /* CONFIG_88EU_P2P */
+ return ret;
+}
+
+static int rtw_p2p_get2(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+
+#ifdef CONFIG_88EU_P2P
+ DBG_88E("[%s] extra = %s\n", __func__, (char *)wrqu->data.pointer);
+ if (!memcmp(extra, "wpsCM =", 6)) {
+ wrqu->data.length -= 6;
+ rtw_p2p_get_wps_configmethod(dev, info, wrqu, &extra[6]);
+ } else if (!memcmp(extra, "devN =", 5)) {
+ wrqu->data.length -= 5;
+ rtw_p2p_get_device_name(dev, info, wrqu, &extra[5]);
+ } else if (!memcmp(extra, "dev_type =", 9)) {
+ wrqu->data.length -= 9;
+ rtw_p2p_get_device_type(dev, info, wrqu, &extra[9]);
+ } else if (!memcmp(extra, "go_devadd =", 10)) {
+ wrqu->data.length -= 10;
+ rtw_p2p_get_go_device_address(dev, info, wrqu, &extra[10]);
+ } else if (!memcmp(extra, "InvProc =", 8)) {
+ wrqu->data.length -= 8;
+ rtw_p2p_get_invitation_procedure(dev, info, wrqu, &extra[8]);
+ }
+
+#endif /* CONFIG_88EU_P2P */
+
+ return ret;
+}
+
+static int rtw_cta_test_start(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ DBG_88E("%s %s\n", __func__, extra);
+ if (!strcmp(extra, "1"))
+ padapter->in_cta_test = 1;
+ else
+ padapter->in_cta_test = 0;
+
+ if (padapter->in_cta_test) {
+ u32 v = rtw_read32(padapter, REG_RCR);
+ v &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
+ rtw_write32(padapter, REG_RCR, v);
+ DBG_88E("enable RCR_ADF\n");
+ } else {
+ u32 v = rtw_read32(padapter, REG_RCR);
+ v |= RCR_CBSSID_DATA | RCR_CBSSID_BCN;/* RCR_ADF */
+ rtw_write32(padapter, REG_RCR, v);
+ DBG_88E("disable RCR_ADF\n");
+ }
+ return ret;
+}
+
+static int rtw_rereg_nd_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ struct rereg_nd_name_data *rereg_priv = &padapter->rereg_nd_name_priv;
+ char new_ifname[IFNAMSIZ];
+
+ if (rereg_priv->old_ifname[0] == 0) {
+ char *reg_ifname;
+ reg_ifname = padapter->registrypriv.if2name;
+
+ strncpy(rereg_priv->old_ifname, reg_ifname, IFNAMSIZ);
+ rereg_priv->old_ifname[IFNAMSIZ-1] = 0;
+ }
+
+ if (wrqu->data.length > IFNAMSIZ)
+ return -EFAULT;
+
+ if (copy_from_user(new_ifname, wrqu->data.pointer, IFNAMSIZ))
+ return -EFAULT;
+
+ if (0 == strcmp(rereg_priv->old_ifname, new_ifname))
+ return ret;
+
+ DBG_88E("%s new_ifname:%s\n", __func__, new_ifname);
+ ret = rtw_change_ifname(padapter, new_ifname);
+ if (0 != ret)
+ goto exit;
+
+ if (!memcmp(rereg_priv->old_ifname, "disable%d", 9) == true) {
+ padapter->ledpriv.bRegUseLed = rereg_priv->old_bRegUseLed;
+ rtw_hal_sw_led_init(padapter);
+ rtw_ips_mode_req(&padapter->pwrctrlpriv, rereg_priv->old_ips_mode);
+ }
+
+ strncpy(rereg_priv->old_ifname, new_ifname, IFNAMSIZ);
+ rereg_priv->old_ifname[IFNAMSIZ-1] = 0;
+
+ if (!memcmp(new_ifname, "disable%d", 9) == true) {
+ DBG_88E("%s disable\n", __func__);
+ /* free network queue for Android's timming issue */
+ rtw_free_network_queue(padapter, true);
+
+ /* close led */
+ rtw_led_control(padapter, LED_CTL_POWER_OFF);
+ rereg_priv->old_bRegUseLed = padapter->ledpriv.bRegUseLed;
+ padapter->ledpriv.bRegUseLed = false;
+ rtw_hal_sw_led_deinit(padapter);
+
+ /* the interface is being "disabled", we can do deeper IPS */
+ rereg_priv->old_ips_mode = rtw_get_ips_mode_req(&padapter->pwrctrlpriv);
+ rtw_ips_mode_req(&padapter->pwrctrlpriv, IPS_NORMAL);
+ }
+exit:
+ return ret;
+}
+
+static void mac_reg_dump(struct adapter *padapter)
+{
+ int i, j = 1;
+ pr_info("\n ======= MAC REG =======\n");
+ for (i = 0x0; i < 0x300; i += 4) {
+ if (j%4 == 1)
+ pr_info("0x%02x", i);
+ pr_info(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ pr_info("\n");
+ }
+ for (i = 0x400; i < 0x800; i += 4) {
+ if (j%4 == 1)
+ pr_info("0x%02x", i);
+ pr_info(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ pr_info("\n");
+ }
+}
+
+static void bb_reg_dump(struct adapter *padapter)
+{
+ int i, j = 1;
+ pr_info("\n ======= BB REG =======\n");
+ for (i = 0x800; i < 0x1000; i += 4) {
+ if (j%4 == 1)
+ pr_info("0x%02x", i);
+
+ pr_info(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ pr_info("\n");
+ }
+}
+
+static void rf_reg_dump(struct adapter *padapter)
+{
+ int i, j = 1, path;
+ u32 value;
+ u8 rf_type, path_nums = 0;
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ pr_info("\n ======= RF REG =======\n");
+ if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type))
+ path_nums = 1;
+ else
+ path_nums = 2;
+
+ for (path = 0; path < path_nums; path++) {
+ pr_info("\nRF_Path(%x)\n", path);
+ for (i = 0; i < 0x100; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ pr_info("0x%02x ", i);
+ pr_info(" 0x%08x ", value);
+ if ((j++)%4 == 0)
+ pr_info("\n");
+ }
+ }
+}
+
+static int rtw_dbg_port(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ unsigned long irqL;
+ int ret = 0;
+ u8 major_cmd, minor_cmd;
+ u16 arg;
+ s32 extra_arg;
+ u32 *pdata, val32;
+ struct sta_info *psta;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ pdata = (u32 *)&wrqu->data;
+
+ val32 = *pdata;
+ arg = (u16)(val32 & 0x0000ffff);
+ major_cmd = (u8)(val32 >> 24);
+ minor_cmd = (u8)((val32 >> 16) & 0x00ff);
+
+ extra_arg = *(pdata+1);
+
+ switch (major_cmd) {
+ case 0x70:/* read_reg */
+ switch (minor_cmd) {
+ case 1:
+ DBG_88E("rtw_read8(0x%x) = 0x%02x\n", arg, rtw_read8(padapter, arg));
+ break;
+ case 2:
+ DBG_88E("rtw_read16(0x%x) = 0x%04x\n", arg, rtw_read16(padapter, arg));
+ break;
+ case 4:
+ DBG_88E("rtw_read32(0x%x) = 0x%08x\n", arg, rtw_read32(padapter, arg));
+ break;
+ }
+ break;
+ case 0x71:/* write_reg */
+ switch (minor_cmd) {
+ case 1:
+ rtw_write8(padapter, arg, extra_arg);
+ DBG_88E("rtw_write8(0x%x) = 0x%02x\n", arg, rtw_read8(padapter, arg));
+ break;
+ case 2:
+ rtw_write16(padapter, arg, extra_arg);
+ DBG_88E("rtw_write16(0x%x) = 0x%04x\n", arg, rtw_read16(padapter, arg));
+ break;
+ case 4:
+ rtw_write32(padapter, arg, extra_arg);
+ DBG_88E("rtw_write32(0x%x) = 0x%08x\n", arg, rtw_read32(padapter, arg));
+ break;
+ }
+ break;
+ case 0x72:/* read_bb */
+ DBG_88E("read_bbreg(0x%x) = 0x%x\n", arg, rtw_hal_read_bbreg(padapter, arg, 0xffffffff));
+ break;
+ case 0x73:/* write_bb */
+ rtw_hal_write_bbreg(padapter, arg, 0xffffffff, extra_arg);
+ DBG_88E("write_bbreg(0x%x) = 0x%x\n", arg, rtw_hal_read_bbreg(padapter, arg, 0xffffffff));
+ break;
+ case 0x74:/* read_rf */
+ DBG_88E("read RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtw_hal_read_rfreg(padapter, minor_cmd, arg, 0xffffffff));
+ break;
+ case 0x75:/* write_rf */
+ rtw_hal_write_rfreg(padapter, minor_cmd, arg, 0xffffffff, extra_arg);
+ DBG_88E("write RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtw_hal_read_rfreg(padapter, minor_cmd, arg, 0xffffffff));
+ break;
+
+ case 0x76:
+ switch (minor_cmd) {
+ case 0x00: /* normal mode, */
+ padapter->recvpriv.is_signal_dbg = 0;
+ break;
+ case 0x01: /* dbg mode */
+ padapter->recvpriv.is_signal_dbg = 1;
+ extra_arg = extra_arg > 100 ? 100 : extra_arg;
+ extra_arg = extra_arg < 0 ? 0 : extra_arg;
+ padapter->recvpriv.signal_strength_dbg = extra_arg;
+ break;
+ }
+ break;
+ case 0x78: /* IOL test */
+ switch (minor_cmd) {
+ case 0x04: /* LLT table initialization test */
+ {
+ u8 page_boundary = 0xf9;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ rtw_IOL_append_LLT_cmd(xmit_frame, page_boundary);
+
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 500, 0))
+ ret = -EPERM;
+ }
+ break;
+ case 0x05: /* blink LED test */
+ {
+ u16 reg = 0x4c;
+ u32 blink_num = 50;
+ u32 blink_delay_ms = 200;
+ int i;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < blink_num; i++) {
+ rtw_IOL_append_WB_cmd(xmit_frame, reg, 0x00, 0xff);
+ rtw_IOL_append_DELAY_MS_cmd(xmit_frame, blink_delay_ms);
+ rtw_IOL_append_WB_cmd(xmit_frame, reg, 0x08, 0xff);
+ rtw_IOL_append_DELAY_MS_cmd(xmit_frame, blink_delay_ms);
+ }
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, (blink_delay_ms*blink_num*2)+200, 0))
+ ret = -EPERM;
+ }
+ break;
+
+ case 0x06: /* continuous write byte test */
+ {
+ u16 reg = arg;
+ u16 start_value = 0;
+ u32 write_num = extra_arg;
+ int i;
+ u8 final;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < write_num; i++)
+ rtw_IOL_append_WB_cmd(xmit_frame, reg, i+start_value, 0xFF);
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0))
+ ret = -EPERM;
+
+ final = rtw_read8(padapter, reg);
+ if (start_value+write_num-1 == final)
+ DBG_88E("continuous IOL_CMD_WB_REG to 0x%x %u times Success, start:%u, final:%u\n", reg, write_num, start_value, final);
+ else
+ DBG_88E("continuous IOL_CMD_WB_REG to 0x%x %u times Fail, start:%u, final:%u\n", reg, write_num, start_value, final);
+ }
+ break;
+
+ case 0x07: /* continuous write word test */
+ {
+ u16 reg = arg;
+ u16 start_value = 200;
+ u32 write_num = extra_arg;
+
+ int i;
+ u16 final;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < write_num; i++)
+ rtw_IOL_append_WW_cmd(xmit_frame, reg, i+start_value, 0xFFFF);
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0))
+ ret = -EPERM;
+
+ final = rtw_read16(padapter, reg);
+ if (start_value+write_num-1 == final)
+ DBG_88E("continuous IOL_CMD_WW_REG to 0x%x %u times Success, start:%u, final:%u\n", reg, write_num, start_value, final);
+ else
+ DBG_88E("continuous IOL_CMD_WW_REG to 0x%x %u times Fail, start:%u, final:%u\n", reg, write_num, start_value, final);
+ }
+ break;
+ case 0x08: /* continuous write dword test */
+ {
+ u16 reg = arg;
+ u32 start_value = 0x110000c7;
+ u32 write_num = extra_arg;
+
+ int i;
+ u32 final;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < write_num; i++)
+ rtw_IOL_append_WD_cmd(xmit_frame, reg, i+start_value, 0xFFFFFFFF);
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0))
+ ret = -EPERM;
+
+ final = rtw_read32(padapter, reg);
+ if (start_value+write_num-1 == final)
+ DBG_88E("continuous IOL_CMD_WD_REG to 0x%x %u times Success, start:%u, final:%u\n",
+ reg, write_num, start_value, final);
+ else
+ DBG_88E("continuous IOL_CMD_WD_REG to 0x%x %u times Fail, start:%u, final:%u\n",
+ reg, write_num, start_value, final);
+ }
+ break;
+ }
+ break;
+ case 0x79:
+ {
+ /*
+ * dbg 0x79000000 [value], set RESP_TXAGC to + value, value:0~15
+ * dbg 0x79010000 [value], set RESP_TXAGC to - value, value:0~15
+ */
+ u8 value = extra_arg & 0x0f;
+ u8 sign = minor_cmd;
+ u16 write_value = 0;
+
+ DBG_88E("%s set RESP_TXAGC to %s %u\n", __func__, sign ? "minus" : "plus", value);
+
+ if (sign)
+ value = value | 0x10;
+
+ write_value = value | (value << 5);
+ rtw_write16(padapter, 0x6d9, write_value);
+ }
+ break;
+ case 0x7a:
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress
+ , WLAN_REASON_EXPIRATION_CHK);
+ break;
+ case 0x7F:
+ switch (minor_cmd) {
+ case 0x0:
+ DBG_88E("fwstate = 0x%x\n", get_fwstate(pmlmepriv));
+ break;
+ case 0x01:
+ DBG_88E("auth_alg = 0x%x, enc_alg = 0x%x, auth_type = 0x%x, enc_type = 0x%x\n",
+ psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm,
+ psecuritypriv->ndisauthtype, psecuritypriv->ndisencryptstatus);
+ break;
+ case 0x02:
+ DBG_88E("pmlmeinfo->state = 0x%x\n", pmlmeinfo->state);
+ break;
+ case 0x03:
+ DBG_88E("qos_option =%d\n", pmlmepriv->qospriv.qos_option);
+ DBG_88E("ht_option =%d\n", pmlmepriv->htpriv.ht_option);
+ break;
+ case 0x04:
+ DBG_88E("cur_ch =%d\n", pmlmeext->cur_channel);
+ DBG_88E("cur_bw =%d\n", pmlmeext->cur_bwmode);
+ DBG_88E("cur_ch_off =%d\n", pmlmeext->cur_ch_offset);
+ break;
+ case 0x05:
+ psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
+ if (psta) {
+ int i;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ DBG_88E("SSID =%s\n", cur_network->network.Ssid.Ssid);
+ DBG_88E("sta's macaddr: %pM\n", psta->hwaddr);
+ DBG_88E("cur_channel =%d, cur_bwmode =%d, cur_ch_offset =%d\n", pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
+ DBG_88E("rtsen =%d, cts2slef =%d\n", psta->rtsen, psta->cts2self);
+ DBG_88E("state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ DBG_88E("qos_en =%d, ht_en =%d, init_rate =%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ DBG_88E("bwmode =%d, ch_offset =%d, sgi =%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
+ DBG_88E("ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ DBG_88E("agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ if (preorder_ctrl->enable)
+ DBG_88E("tid =%d, indicate_seq =%d\n", i, preorder_ctrl->indicate_seq);
+ }
+ } else {
+ DBG_88E("can't get sta's macaddr, cur_network's macaddr:%pM\n", (cur_network->network.MacAddress));
+ }
+ break;
+ case 0x06:
+ {
+ u32 ODMFlag;
+ rtw_hal_get_hwreg(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
+ DBG_88E("(B)DMFlag = 0x%x, arg = 0x%x\n", ODMFlag, arg);
+ ODMFlag = (u32)(0x0f&arg);
+ DBG_88E("(A)DMFlag = 0x%x\n", ODMFlag);
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
+ }
+ break;
+ case 0x07:
+ DBG_88E("bSurpriseRemoved =%d, bDriverStopped =%d\n",
+ padapter->bSurpriseRemoved, padapter->bDriverStopped);
+ break;
+ case 0x08:
+ {
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ DBG_88E("free_xmitbuf_cnt =%d, free_xmitframe_cnt =%d, free_xmit_extbuf_cnt =%d\n",
+ pxmitpriv->free_xmitbuf_cnt, pxmitpriv->free_xmitframe_cnt, pxmitpriv->free_xmit_extbuf_cnt);
+ DBG_88E("rx_urb_pending_cn =%d\n", precvpriv->rx_pending_cnt);
+ }
+ break;
+ case 0x09:
+ {
+ int i, j;
+ struct list_head *plist, *phead;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+#ifdef CONFIG_88EU_AP_MODE
+ DBG_88E("sta_dz_bitmap = 0x%x, tim_bitmap = 0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
+#endif
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+
+ for (i = 0; i < NUM_STA; i++) {
+ phead = &(pstapriv->sta_hash[i]);
+ plist = get_next(phead);
+
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ plist = get_next(plist);
+
+ if (extra_arg == psta->aid) {
+ DBG_88E("sta's macaddr:%pM\n", (psta->hwaddr));
+ DBG_88E("rtsen =%d, cts2slef =%d\n", psta->rtsen, psta->cts2self);
+ DBG_88E("state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ DBG_88E("qos_en =%d, ht_en =%d, init_rate =%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ DBG_88E("bwmode =%d, ch_offset =%d, sgi =%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
+ DBG_88E("ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ DBG_88E("agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+
+#ifdef CONFIG_88EU_AP_MODE
+ DBG_88E("capability = 0x%x\n", psta->capability);
+ DBG_88E("flags = 0x%x\n", psta->flags);
+ DBG_88E("wpa_psk = 0x%x\n", psta->wpa_psk);
+ DBG_88E("wpa2_group_cipher = 0x%x\n", psta->wpa2_group_cipher);
+ DBG_88E("wpa2_pairwise_cipher = 0x%x\n", psta->wpa2_pairwise_cipher);
+ DBG_88E("qos_info = 0x%x\n", psta->qos_info);
+#endif
+ DBG_88E("dot118021XPrivacy = 0x%x\n", psta->dot118021XPrivacy);
+
+ for (j = 0; j < 16; j++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[j];
+ if (preorder_ctrl->enable)
+ DBG_88E("tid =%d, indicate_seq =%d\n", j, preorder_ctrl->indicate_seq);
+ }
+ }
+ }
+ }
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ }
+ break;
+ case 0x0c:/* dump rx/tx packet */
+ if (arg == 0) {
+ DBG_88E("dump rx packet (%d)\n", extra_arg);
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DUMP_RXPKT, &(extra_arg));
+ } else if (arg == 1) {
+ DBG_88E("dump tx packet (%d)\n", extra_arg);
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DUMP_TXPKT, &(extra_arg));
+ }
+ break;
+ case 0x0f:
+ if (extra_arg == 0) {
+ DBG_88E("###### silent reset test.......#####\n");
+ rtw_hal_sreset_reset(padapter);
+ }
+ break;
+ case 0x15:
+ {
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ DBG_88E("==>silent resete cnts:%d\n", pwrpriv->ips_enter_cnts);
+ }
+ break;
+ case 0x10:/* driver version display */
+ DBG_88E("rtw driver version =%s\n", DRIVERVERSION);
+ break;
+ case 0x11:
+ DBG_88E("turn %s Rx RSSI display function\n", (extra_arg == 1) ? "on" : "off");
+ padapter->bRxRSSIDisplay = extra_arg;
+ rtw_hal_set_def_var(padapter, HW_DEF_FA_CNT_DUMP, &extra_arg);
+ break;
+ case 0x12: /* set rx_stbc */
+ {
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ /* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, 0x3: enable both 2.4g and 5g */
+ /* default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
+ if (pregpriv &&
+ (extra_arg == 0 ||
+ extra_arg == 1 ||
+ extra_arg == 2 ||
+ extra_arg == 3)) {
+ pregpriv->rx_stbc = extra_arg;
+ DBG_88E("set rx_stbc =%d\n", pregpriv->rx_stbc);
+ } else {
+ DBG_88E("get rx_stbc =%d\n", pregpriv->rx_stbc);
+ }
+ }
+ break;
+ case 0x13: /* set ampdu_enable */
+ {
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ /* 0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
+ if (pregpriv && extra_arg >= 0 && extra_arg < 3) {
+ pregpriv->ampdu_enable = extra_arg;
+ DBG_88E("set ampdu_enable =%d\n", pregpriv->ampdu_enable);
+ } else {
+ DBG_88E("get ampdu_enable =%d\n", pregpriv->ampdu_enable);
+ }
+ }
+ break;
+ case 0x14: /* get wifi_spec */
+ {
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ DBG_88E("get wifi_spec =%d\n", pregpriv->wifi_spec);
+ }
+ break;
+ case 0x16:
+ if (arg == 0xff) {
+ pr_info("ODM_COMP_DIG\t\tBIT0\n");
+ pr_info("ODM_COMP_RA_MASK\t\tBIT1\n");
+ pr_info("ODM_COMP_DYNAMIC_TXPWR\tBIT2\n");
+ pr_info("ODM_COMP_FA_CNT\t\tBIT3\n");
+ pr_info("ODM_COMP_RSSI_MONITOR\tBIT4\n");
+ pr_info("ODM_COMP_CCK_PD\t\tBIT5\n");
+ pr_info("ODM_COMP_ANT_DIV\t\tBIT6\n");
+ pr_info("ODM_COMP_PWR_SAVE\t\tBIT7\n");
+ pr_info("ODM_COMP_PWR_TRAIN\tBIT8\n");
+ pr_info("ODM_COMP_RATE_ADAPTIVE\tBIT9\n");
+ pr_info("ODM_COMP_PATH_DIV\t\tBIT10\n");
+ pr_info("ODM_COMP_PSD \tBIT11\n");
+ pr_info("ODM_COMP_DYNAMIC_PRICCA\tBIT12\n");
+ pr_info("ODM_COMP_RXHP\t\tBIT13\n");
+ pr_info("ODM_COMP_EDCA_TURBO\tBIT16\n");
+ pr_info("ODM_COMP_EARLY_MODE\tBIT17\n");
+ pr_info("ODM_COMP_TX_PWR_TRACK\tBIT24\n");
+ pr_info("ODM_COMP_RX_GAIN_TRACK\tBIT25\n");
+ pr_info("ODM_COMP_CALIBRATION\tBIT26\n");
+ rtw_hal_get_def_var(padapter, HW_DEF_ODM_DBG_FLAG, &extra_arg);
+ } else {
+ rtw_hal_set_def_var(padapter, HW_DEF_ODM_DBG_FLAG, &extra_arg);
+ }
+ break;
+ case 0x23:
+ DBG_88E("turn %s the bNotifyChannelChange Variable\n", (extra_arg == 1) ? "on" : "off");
+ padapter->bNotifyChannelChange = extra_arg;
+ break;
+ case 0x24:
+#ifdef CONFIG_88EU_P2P
+ DBG_88E("turn %s the bShowGetP2PState Variable\n", (extra_arg == 1) ? "on" : "off");
+ padapter->bShowGetP2PState = extra_arg;
+#endif /* CONFIG_88EU_P2P */
+ break;
+ case 0xaa:
+ if (extra_arg > 0x13)
+ extra_arg = 0xFF;
+ DBG_88E("chang data rate to :0x%02x\n", extra_arg);
+ padapter->fix_rate = extra_arg;
+ break;
+ case 0xdd:/* registers dump, 0 for mac reg, 1 for bb reg, 2 for rf reg */
+ if (extra_arg == 0)
+ mac_reg_dump(padapter);
+ else if (extra_arg == 1)
+ bb_reg_dump(padapter);
+ else if (extra_arg == 2)
+ rf_reg_dump(padapter);
+ break;
+ case 0xee:/* turn on/off dynamic funcs */
+ {
+ u32 odm_flag;
+
+ if (0xf == extra_arg) {
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
+ DBG_88E(" === DMFlag(0x%08x) ===\n", odm_flag);
+ DBG_88E("extra_arg = 0 - disable all dynamic func\n");
+ DBG_88E("extra_arg = 1 - disable DIG- BIT(0)\n");
+ DBG_88E("extra_arg = 2 - disable High power - BIT(1)\n");
+ DBG_88E("extra_arg = 3 - disable tx power tracking - BIT(2)\n");
+ DBG_88E("extra_arg = 4 - disable BT coexistence - BIT(3)\n");
+ DBG_88E("extra_arg = 5 - disable antenna diversity - BIT(4)\n");
+ DBG_88E("extra_arg = 6 - enable all dynamic func\n");
+ } else {
+ /* extra_arg = 0 - disable all dynamic func
+ extra_arg = 1 - disable DIG
+ extra_arg = 2 - disable tx power tracking
+ extra_arg = 3 - turn on all dynamic func
+ */
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &(extra_arg));
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
+ DBG_88E(" === DMFlag(0x%08x) ===\n", odm_flag);
+ }
+ }
+ break;
+
+ case 0xfd:
+ rtw_write8(padapter, 0xc50, arg);
+ DBG_88E("wr(0xc50) = 0x%x\n", rtw_read8(padapter, 0xc50));
+ rtw_write8(padapter, 0xc58, arg);
+ DBG_88E("wr(0xc58) = 0x%x\n", rtw_read8(padapter, 0xc58));
+ break;
+ case 0xfe:
+ DBG_88E("rd(0xc50) = 0x%x\n", rtw_read8(padapter, 0xc50));
+ DBG_88E("rd(0xc58) = 0x%x\n", rtw_read8(padapter, 0xc58));
+ break;
+ case 0xff:
+ DBG_88E("dbg(0x210) = 0x%x\n", rtw_read32(padapter, 0x210));
+ DBG_88E("dbg(0x608) = 0x%x\n", rtw_read32(padapter, 0x608));
+ DBG_88E("dbg(0x280) = 0x%x\n", rtw_read32(padapter, 0x280));
+ DBG_88E("dbg(0x284) = 0x%x\n", rtw_read32(padapter, 0x284));
+ DBG_88E("dbg(0x288) = 0x%x\n", rtw_read32(padapter, 0x288));
+
+ DBG_88E("dbg(0x664) = 0x%x\n", rtw_read32(padapter, 0x664));
+
+ DBG_88E("\n");
+
+ DBG_88E("dbg(0x430) = 0x%x\n", rtw_read32(padapter, 0x430));
+ DBG_88E("dbg(0x438) = 0x%x\n", rtw_read32(padapter, 0x438));
+
+ DBG_88E("dbg(0x440) = 0x%x\n", rtw_read32(padapter, 0x440));
+
+ DBG_88E("dbg(0x458) = 0x%x\n", rtw_read32(padapter, 0x458));
+
+ DBG_88E("dbg(0x484) = 0x%x\n", rtw_read32(padapter, 0x484));
+ DBG_88E("dbg(0x488) = 0x%x\n", rtw_read32(padapter, 0x488));
+
+ DBG_88E("dbg(0x444) = 0x%x\n", rtw_read32(padapter, 0x444));
+ DBG_88E("dbg(0x448) = 0x%x\n", rtw_read32(padapter, 0x448));
+ DBG_88E("dbg(0x44c) = 0x%x\n", rtw_read32(padapter, 0x44c));
+ DBG_88E("dbg(0x450) = 0x%x\n", rtw_read32(padapter, 0x450));
+ break;
+ }
+ break;
+ default:
+ DBG_88E("error dbg cmd!\n");
+ break;
+ }
+ return ret;
+}
+
+static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
+{
+ uint ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ switch (name) {
+ case IEEE_PARAM_WPA_ENABLED:
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; /* 802.1x */
+ switch ((value)&0xff) {
+ case 1: /* WPA */
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; /* WPA_PSK */
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case 2: /* WPA2 */
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK; /* WPA2_PSK */
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ }
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("wpa_set_param:padapter->securitypriv.ndisauthtype =%d\n", padapter->securitypriv.ndisauthtype));
+ break;
+ case IEEE_PARAM_TKIP_COUNTERMEASURES:
+ break;
+ case IEEE_PARAM_DROP_UNENCRYPTED: {
+ /* HACK:
+ *
+ * wpa_supplicant calls set_wpa_enabled when the driver
+ * is loaded and unloaded, regardless of if WPA is being
+ * used. No other calls are made which can be used to
+ * determine if encryption will be used or not prior to
+ * association being expected. If encryption is not being
+ * used, drop_unencrypted is set to false, else true -- we
+ * can use this to determine if the CAP_PRIVACY_ON bit should
+ * be set.
+ */
+
+ break;
+ }
+ case IEEE_PARAM_PRIVACY_INVOKED:
+ break;
+
+ case IEEE_PARAM_AUTH_ALGS:
+ ret = wpa_set_auth_algs(dev, value);
+ break;
+ case IEEE_PARAM_IEEE_802_1X:
+ break;
+ case IEEE_PARAM_WPAX_SELECT:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ return ret;
+}
+
+static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ switch (command) {
+ case IEEE_MLME_STA_DEAUTH:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ case IEEE_MLME_STA_DISASSOC:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
+{
+ struct ieee_param *param;
+ uint ret = 0;
+
+ if (p->length < sizeof(struct ieee_param) || !p->pointer) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ param = (struct ieee_param *)rtw_malloc(p->length);
+ if (param == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(param, p->pointer, p->length)) {
+ kfree(param);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ switch (param->cmd) {
+ case IEEE_CMD_SET_WPA_PARAM:
+ ret = wpa_set_param(dev, param->u.wpa_param.name, param->u.wpa_param.value);
+ break;
+
+ case IEEE_CMD_SET_WPA_IE:
+ ret = rtw_set_wpa_ie((struct adapter *)rtw_netdev_priv(dev),
+ (char *)param->u.wpa_ie.data, (u16)param->u.wpa_ie.len);
+ break;
+
+ case IEEE_CMD_SET_ENCRYPTION:
+ ret = wpa_set_encryption(dev, param, p->length);
+ break;
+
+ case IEEE_CMD_MLME:
+ ret = wpa_mlme(dev, param->u.mlme.command, param->u.mlme.reason_code);
+ break;
+
+ default:
+ DBG_88E("Unknown WPA supplicant request: %d\n", param->cmd);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (ret == 0 && copy_to_user(p->pointer, param, p->length))
+ ret = -EFAULT;
+
+ kfree(param);
+
+out:
+
+ return ret;
+}
+
+#ifdef CONFIG_88EU_AP_MODE
+static u8 set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ if (psetstakey_para == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+
+ psetstakey_para->algorithm = (u8)psta->dot118021XPrivacy;
+
+ memcpy(psetstakey_para->addr, psta->hwaddr, ETH_ALEN);
+
+ memcpy(psetstakey_para->key, &psta->dot118021x_UncstKey, 16);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+}
+
+static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
+{
+ u8 keylen;
+ struct cmd_obj *pcmd;
+ struct setkey_parm *psetkeyparm;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ int res = _SUCCESS;
+
+ DBG_88E("%s\n", __func__);
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ if (psetkeyparm == NULL) {
+ kfree(pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_memset(psetkeyparm, 0, sizeof(struct setkey_parm));
+
+ psetkeyparm->keyid = (u8)keyid;
+
+ psetkeyparm->algorithm = alg;
+
+ psetkeyparm->set_tx = 1;
+
+ switch (alg) {
+ case _WEP40_:
+ keylen = 5;
+ break;
+ case _WEP104_:
+ keylen = 13;
+ break;
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ case _AES_:
+ keylen = 16;
+ default:
+ keylen = 16;
+ }
+
+ memcpy(&(psetkeyparm->key[0]), key, keylen);
+
+ pcmd->cmdcode = _SetKey_CMD_;
+ pcmd->parmbuf = (u8 *)psetkeyparm;
+ pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ _rtw_init_listhead(&pcmd->list);
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+
+ return res;
+}
+
+static int set_wep_key(struct adapter *padapter, u8 *key, u8 keylen, int keyid)
+{
+ u8 alg;
+
+ switch (keylen) {
+ case 5:
+ alg = _WEP40_;
+ break;
+ case 13:
+ alg = _WEP104_;
+ break;
+ default:
+ alg = _NO_PRIVACY_;
+ }
+
+ return set_group_key(padapter, key, alg, keyid);
+}
+
+static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len, wep_total_len;
+ struct ndis_802_11_wep *pwep = NULL;
+ struct sta_info *psta = NULL, *pbcmc_sta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E("%s\n", __func__);
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+ if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+ if (param->u.crypt.idx >= WEP_KEYS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (!psta) {
+ DBG_88E("rtw_set_encryption(), sta has already been removed or never been added\n");
+ goto exit;
+ }
+ }
+
+ if (strcmp(param->u.crypt.alg, "none") == 0 && (psta == NULL)) {
+ /* todo:clear default encryption keys */
+
+ DBG_88E("clear default encryption keys, keyid =%d\n", param->u.crypt.idx);
+ goto exit;
+ }
+ if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta == NULL)) {
+ DBG_88E("r871x_set_encryption, crypt.alg = WEP\n");
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+ DBG_88E("r871x_set_encryption, wep_key_idx=%d, len=%d\n", wep_key_idx, wep_key_len);
+ if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (wep_key_len > 0) {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
+ if (pwep == NULL) {
+ DBG_88E(" r871x_set_encryption: pwep allocate fail !!!\n");
+ goto exit;
+ }
+
+ _rtw_memset(pwep, 0, wep_total_len);
+
+ pwep->KeyLength = wep_key_len;
+ pwep->Length = wep_total_len;
+ }
+
+ pwep->KeyIndex = wep_key_idx;
+
+ memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
+
+ if (param->u.crypt.set_tx) {
+ DBG_88E("wep, set_tx = 1\n");
+
+ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+
+ if (pwep->KeyLength == 13) {
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
+
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
+
+ set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx);
+ } else {
+ DBG_88E("wep, set_tx = 0\n");
+
+ /* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
+ /* psecuritypriv->dot11PrivacyKeyIndex = keyid", but can rtw_set_key to cam */
+
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
+
+ set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx);
+ }
+
+ goto exit;
+ }
+
+ if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* group key */
+ if (param->u.crypt.set_tx == 1) {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ DBG_88E("%s, set group_key, WEP\n", __func__);
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ DBG_88E("%s, set group_key, TKIP\n", __func__);
+ psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ /* set mic key */
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ DBG_88E("%s, set group_key, CCMP\n", __func__);
+ psecuritypriv->dot118021XGrpPrivacy = _AES_;
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ } else {
+ DBG_88E("%s, set group_key, none\n", __func__);
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ }
+ psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
+ psecuritypriv->binstallGrpkey = true;
+ psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
+ set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta) {
+ pbcmc_sta->ieee8021x_blocked = false;
+ pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
+ }
+ }
+ goto exit;
+ }
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ if (param->u.crypt.set_tx == 1) {
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ DBG_88E("%s, set pairwise key, WEP\n", __func__);
+
+ psta->dot118021XPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ psta->dot118021XPrivacy = _WEP104_;
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ DBG_88E("%s, set pairwise key, TKIP\n", __func__);
+
+ psta->dot118021XPrivacy = _TKIP_;
+
+ /* set mic key */
+ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ DBG_88E("%s, set pairwise key, CCMP\n", __func__);
+
+ psta->dot118021XPrivacy = _AES_;
+ } else {
+ DBG_88E("%s, set pairwise key, none\n", __func__);
+
+ psta->dot118021XPrivacy = _NO_PRIVACY_;
+ }
+
+ set_pairwise_key(padapter, psta);
+
+ psta->ieee8021x_blocked = false;
+ } else { /* group key??? */
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+
+ /* set mic key */
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ psecuritypriv->dot118021XGrpPrivacy = _AES_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ } else {
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ }
+
+ psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
+
+ psecuritypriv->binstallGrpkey = true;
+
+ psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
+
+ set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
+
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta) {
+ pbcmc_sta->ieee8021x_blocked = false;
+ pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
+ }
+ }
+ }
+ }
+
+exit:
+
+ kfree(pwep);
+
+ return ret;
+}
+
+static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ unsigned char *pbuf = param->u.bcn_ie.buf;
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2);
+
+ if ((pstapriv->max_num_sta > NUM_STA) || (pstapriv->max_num_sta <= 0))
+ pstapriv->max_num_sta = NUM_STA;
+
+ if (rtw_check_beacon_data(padapter, pbuf, (len-12-2)) == _SUCCESS)/* 12 = param header, 2:no packed */
+ ret = 0;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int rtw_hostapd_sta_flush(struct net_device *dev)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_88E("%s\n", __func__);
+
+ flush_all_cam_entry(padapter); /* clear CAM */
+
+ ret = rtw_sta_flush(padapter);
+
+ return ret;
+}
+
+static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E("rtw_add_sta(aid =%d) =%pM\n", param->u.add_sta.aid, (param->sta_addr));
+
+ if (!check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)))
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta) {
+ int flags = param->u.add_sta.flags;
+
+ psta->aid = param->u.add_sta.aid;/* aid = 1~2007 */
+
+ memcpy(psta->bssrateset, param->u.add_sta.tx_supp_rates, 16);
+
+ /* check wmm cap. */
+ if (WLAN_STA_WME&flags)
+ psta->qos_option = 1;
+ else
+ psta->qos_option = 0;
+
+ if (pmlmepriv->qospriv.qos_option == 0)
+ psta->qos_option = 0;
+
+ /* chec 802.11n ht cap. */
+ if (WLAN_STA_HT&flags) {
+ psta->htpriv.ht_option = true;
+ psta->qos_option = 1;
+ memcpy((void *)&psta->htpriv.ht_cap, (void *)&param->u.add_sta.ht_cap, sizeof(struct rtw_ieee80211_ht_cap));
+ } else {
+ psta->htpriv.ht_option = false;
+ }
+
+ if (pmlmepriv->htpriv.ht_option == false)
+ psta->htpriv.ht_option = false;
+
+ update_sta_info_apmode(padapter, psta);
+ } else {
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
+{
+ unsigned long irqL;
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ int updated = 0;
+
+ DBG_88E("rtw_del_sta =%pM\n", (param->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta) {
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (!rtw_is_list_empty(&psta->asoc_list)) {
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ associated_clients_update(padapter, updated);
+ psta = NULL;
+ } else {
+ DBG_88E("rtw_del_sta(), sta has already been removed or never been added\n");
+ }
+
+ return ret;
+}
+
+static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
+ struct sta_data *psta_data = (struct sta_data *)param_ex->data;
+
+ DBG_88E("rtw_ioctl_get_sta_info, sta_addr: %pM\n", (param_ex->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ return -EINVAL;
+
+ if (param_ex->sta_addr[0] == 0xff && param_ex->sta_addr[1] == 0xff &&
+ param_ex->sta_addr[2] == 0xff && param_ex->sta_addr[3] == 0xff &&
+ param_ex->sta_addr[4] == 0xff && param_ex->sta_addr[5] == 0xff)
+ return -EINVAL;
+
+ psta = rtw_get_stainfo(pstapriv, param_ex->sta_addr);
+ if (psta) {
+ psta_data->aid = (u16)psta->aid;
+ psta_data->capability = psta->capability;
+ psta_data->flags = psta->flags;
+
+/*
+ nonerp_set : BIT(0)
+ no_short_slot_time_set : BIT(1)
+ no_short_preamble_set : BIT(2)
+ no_ht_gf_set : BIT(3)
+ no_ht_set : BIT(4)
+ ht_20mhz_set : BIT(5)
+*/
+
+ psta_data->sta_set = ((psta->nonerp_set) |
+ (psta->no_short_slot_time_set << 1) |
+ (psta->no_short_preamble_set << 2) |
+ (psta->no_ht_gf_set << 3) |
+ (psta->no_ht_set << 4) |
+ (psta->ht_20mhz_set << 5));
+ psta_data->tx_supp_rates_len = psta->bssratelen;
+ memcpy(psta_data->tx_supp_rates, psta->bssrateset, psta->bssratelen);
+ memcpy(&psta_data->ht_cap, &psta->htpriv.ht_cap, sizeof(struct rtw_ieee80211_ht_cap));
+ psta_data->rx_pkts = psta->sta_stats.rx_data_pkts;
+ psta_data->rx_bytes = psta->sta_stats.rx_bytes;
+ psta_data->rx_drops = psta->sta_stats.rx_drops;
+ psta_data->tx_pkts = psta->sta_stats.tx_pkts;
+ psta_data->tx_bytes = psta->sta_stats.tx_bytes;
+ psta_data->tx_drops = psta->sta_stats.tx_drops;
+ } else {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E("rtw_get_sta_wpaie, sta_addr: %pM\n", (param->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta) {
+ if ((psta->wpa_ie[0] == WLAN_EID_RSN) || (psta->wpa_ie[0] == WLAN_EID_GENERIC)) {
+ int wpa_ie_len;
+ int copy_len;
+
+ wpa_ie_len = psta->wpa_ie[1];
+ copy_len = ((wpa_ie_len+2) > sizeof(psta->wpa_ie)) ? (sizeof(psta->wpa_ie)) : (wpa_ie_len+2);
+ param->u.wpa_ie.len = copy_len;
+ memcpy(param->u.wpa_ie.reserved, psta->wpa_ie, copy_len);
+ } else {
+ DBG_88E("sta's wpa_ie is NONE\n");
+ }
+ } else {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ int ie_len;
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+
+ if (pmlmepriv->wps_beacon_ie) {
+ kfree(pmlmepriv->wps_beacon_ie);
+ pmlmepriv->wps_beacon_ie = NULL;
+ }
+
+ if (ie_len > 0) {
+ pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
+ pmlmepriv->wps_beacon_ie_len = ie_len;
+ if (pmlmepriv->wps_beacon_ie == NULL) {
+ DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ memcpy(pmlmepriv->wps_beacon_ie, param->u.bcn_ie.buf, ie_len);
+
+ update_beacon(padapter, _VENDOR_SPECIFIC_IE_, wps_oui, true);
+
+ pmlmeext->bstart_bss = true;
+ }
+
+ return ret;
+}
+
+static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ int ie_len;
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+
+ if (pmlmepriv->wps_probe_resp_ie) {
+ kfree(pmlmepriv->wps_probe_resp_ie);
+ pmlmepriv->wps_probe_resp_ie = NULL;
+ }
+
+ if (ie_len > 0) {
+ pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
+ pmlmepriv->wps_probe_resp_ie_len = ie_len;
+ if (pmlmepriv->wps_probe_resp_ie == NULL) {
+ DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ memcpy(pmlmepriv->wps_probe_resp_ie, param->u.bcn_ie.buf, ie_len);
+ }
+
+ return ret;
+}
+
+static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ int ie_len;
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+
+ if (pmlmepriv->wps_assoc_resp_ie) {
+ kfree(pmlmepriv->wps_assoc_resp_ie);
+ pmlmepriv->wps_assoc_resp_ie = NULL;
+ }
+
+ if (ie_len > 0) {
+ pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
+ pmlmepriv->wps_assoc_resp_ie_len = ie_len;
+ if (pmlmepriv->wps_assoc_resp_ie == NULL) {
+ DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ memcpy(pmlmepriv->wps_assoc_resp_ie, param->u.bcn_ie.buf, ie_len);
+ }
+
+ return ret;
+}
+
+static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ u8 value;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (param->u.wpa_param.name != 0) /* dummy test... */
+ DBG_88E("%s name(%u) != 0\n", __func__, param->u.wpa_param.name);
+ value = param->u.wpa_param.value;
+
+ /* use the same definition of hostapd's ignore_broadcast_ssid */
+ if (value != 1 && value != 2)
+ value = 0;
+ DBG_88E("%s value(%u)\n", __func__, value);
+ pmlmeinfo->hidden_ssid_mode = value;
+ return ret;
+}
+
+static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+ ret = rtw_acl_remove_sta(padapter, param->sta_addr);
+ return ret;
+}
+
+static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+ ret = rtw_acl_add_sta(padapter, param->sta_addr);
+ return ret;
+}
+
+static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ rtw_set_macaddr_acl(padapter, param->u.mlme.command);
+
+ return ret;
+}
+
+static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
+{
+ struct ieee_param *param;
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ /*
+ * this function is expect to call in master mode, which allows no power saving
+ * so, we just check hw_init_completed
+ */
+
+ if (!padapter->hw_init_completed) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ if (!p->pointer) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ param = (struct ieee_param *)rtw_malloc(p->length);
+ if (param == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(param, p->pointer, p->length)) {
+ kfree(param);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ switch (param->cmd) {
+ case RTL871X_HOSTAPD_FLUSH:
+ ret = rtw_hostapd_sta_flush(dev);
+ break;
+ case RTL871X_HOSTAPD_ADD_STA:
+ ret = rtw_add_sta(dev, param);
+ break;
+ case RTL871X_HOSTAPD_REMOVE_STA:
+ ret = rtw_del_sta(dev, param);
+ break;
+ case RTL871X_HOSTAPD_SET_BEACON:
+ ret = rtw_set_beacon(dev, param, p->length);
+ break;
+ case RTL871X_SET_ENCRYPTION:
+ ret = rtw_set_encryption(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_GET_WPAIE_STA:
+ ret = rtw_get_sta_wpaie(dev, param);
+ break;
+ case RTL871X_HOSTAPD_SET_WPS_BEACON:
+ ret = rtw_set_wps_beacon(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_SET_WPS_PROBE_RESP:
+ ret = rtw_set_wps_probe_resp(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP:
+ ret = rtw_set_wps_assoc_resp(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_SET_HIDDEN_SSID:
+ ret = rtw_set_hidden_ssid(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_GET_INFO_STA:
+ ret = rtw_ioctl_get_sta_data(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_SET_MACADDR_ACL:
+ ret = rtw_ioctl_set_macaddr_acl(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_ACL_ADD_STA:
+ ret = rtw_ioctl_acl_add_sta(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_ACL_REMOVE_STA:
+ ret = rtw_ioctl_acl_remove_sta(dev, param, p->length);
+ break;
+ default:
+ DBG_88E("Unknown hostapd request: %d\n", param->cmd);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (ret == 0 && copy_to_user(p->pointer, param, p->length))
+ ret = -EFAULT;
+ kfree(param);
+out:
+ return ret;
+}
+#endif
+
+#include <rtw_android.h>
+static int rtw_wx_set_priv(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra)
+{
+ int ret = 0;
+ int len = 0;
+ char *ext;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *dwrq = (struct iw_point *)awrq;
+
+ if (dwrq->length == 0)
+ return -EFAULT;
+
+ len = dwrq->length;
+ ext = rtw_vmalloc(len);
+ if (!ext)
+ return -ENOMEM;
+
+ if (copy_from_user(ext, dwrq->pointer, len)) {
+ rtw_vmfree(ext, len);
+ return -EFAULT;
+ }
+
+ /* added for wps2.0 @20110524 */
+ if (dwrq->flags == 0x8766 && len > 8) {
+ u32 cp_sz;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 *probereq_wpsie = ext;
+ int probereq_wpsie_len = len;
+ u8 wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if ((_VENDOR_SPECIFIC_IE_ == probereq_wpsie[0]) &&
+ (!memcmp(&probereq_wpsie[2], wps_oui, 4))) {
+ cp_sz = probereq_wpsie_len > MAX_WPS_IE_LEN ? MAX_WPS_IE_LEN : probereq_wpsie_len;
+
+ pmlmepriv->wps_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_req_ie);
+ pmlmepriv->wps_probe_req_ie = NULL;
+
+ pmlmepriv->wps_probe_req_ie = rtw_malloc(cp_sz);
+ if (pmlmepriv->wps_probe_req_ie == NULL) {
+ pr_info("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ ret = -EINVAL;
+ goto FREE_EXT;
+ }
+ memcpy(pmlmepriv->wps_probe_req_ie, probereq_wpsie, cp_sz);
+ pmlmepriv->wps_probe_req_ie_len = cp_sz;
+ }
+ goto FREE_EXT;
+ }
+
+ if (len >= WEXT_CSCAN_HEADER_SIZE &&
+ !memcmp(ext, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)) {
+ ret = rtw_wx_set_scan(dev, info, awrq, ext);
+ goto FREE_EXT;
+ }
+
+FREE_EXT:
+
+ rtw_vmfree(ext, len);
+
+ return ret;
+}
+
+static int rtw_pm_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ unsigned mode = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_88E("[%s] extra = %s\n", __func__, extra);
+
+ if (!memcmp(extra, "lps =", 4)) {
+ sscanf(extra+4, "%u", &mode);
+ ret = rtw_pm_set_lps(padapter, mode);
+ } else if (!memcmp(extra, "ips =", 4)) {
+ sscanf(extra+4, "%u", &mode);
+ ret = rtw_pm_set_ips(padapter, mode);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rtw_mp_efuse_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ struct hal_data_8188e *haldata = GET_HAL_DATA(padapter);
+ struct efuse_hal *pEfuseHal;
+ struct iw_point *wrqu;
+
+ u8 *PROMContent = pEEPROM->efuse_eeprom_data;
+ u8 ips_mode = 0, lps_mode = 0;
+ struct pwrctrl_priv *pwrctrlpriv;
+ u8 *data = NULL;
+ u8 *rawdata = NULL;
+ char *pch, *ptmp, *token, *tmp[3] = {NULL, NULL, NULL};
+ u16 i = 0, j = 0, mapLen = 0, addr = 0, cnts = 0;
+ u16 max_available_size = 0, raw_cursize = 0, raw_maxsize = 0;
+ int err;
+ u8 org_fw_iol = padapter->registrypriv.fw_iol;/* 0:Disable, 1:enable, 2:by usb speed */
+
+ wrqu = (struct iw_point *)wdata;
+ pwrctrlpriv = &padapter->pwrctrlpriv;
+ pEfuseHal = &haldata->EfuseHal;
+
+ err = 0;
+ data = _rtw_zmalloc(EFUSE_BT_MAX_MAP_LEN);
+ if (data == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ rawdata = _rtw_zmalloc(EFUSE_BT_MAX_MAP_LEN);
+ if (rawdata == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ if (copy_from_user(extra, wrqu->pointer, wrqu->length)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ lps_mode = pwrctrlpriv->power_mgnt;/* keep org value */
+ rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
+
+ ips_mode = pwrctrlpriv->ips_mode;/* keep org value */
+ rtw_pm_set_ips(padapter, IPS_NONE);
+
+ pch = extra;
+ DBG_88E("%s: in =%s\n", __func__, extra);
+
+ i = 0;
+ /* mac 16 "00e04c871200" rmap, 00, 2 */
+ while ((token = strsep(&pch, ",")) != NULL) {
+ if (i > 2)
+ break;
+ tmp[i] = token;
+ i++;
+ }
+ padapter->registrypriv.fw_iol = 0;/* 0:Disable, 1:enable, 2:by usb speed */
+
+ if (strcmp(tmp[0], "status") == 0) {
+ sprintf(extra, "Load File efuse =%s, Load File MAC =%s", (pEEPROM->bloadfile_fail_flag ? "FAIL" : "OK"), (pEEPROM->bloadmac_fail_flag ? "FAIL" : "OK"));
+
+ goto exit;
+ } else if (strcmp(tmp[0], "filemap") == 0) {
+ mapLen = EFUSE_MAP_SIZE;
+
+ sprintf(extra, "\n");
+ for (i = 0; i < EFUSE_MAP_SIZE; i += 16) {
+ sprintf(extra, "%s0x%02x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, PROMContent[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, PROMContent[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "realmap") == 0) {
+ mapLen = EFUSE_MAP_SIZE;
+ if (rtw_efuse_map_read(padapter, 0, mapLen, pEfuseHal->fakeEfuseInitMap) == _FAIL) {
+ DBG_88E("%s: read realmap Fail!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ sprintf(extra, "\n");
+ for (i = 0; i < EFUSE_MAP_SIZE; i += 16) {
+ sprintf(extra, "%s0x%02x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeEfuseInitMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeEfuseInitMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "rmap") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ DBG_88E("%s: rmap Fail!! Parameters error!\n", __func__);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* rmap addr cnts */
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ DBG_88E("%s: addr =%x\n", __func__, addr);
+
+ cnts = simple_strtoul(tmp[2], &ptmp, 10);
+ if (cnts == 0) {
+ DBG_88E("%s: rmap Fail!! cnts error!\n", __func__);
+ err = -EINVAL;
+ goto exit;
+ }
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr + cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_read error!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ *extra = 0;
+ for (i = 0; i < cnts; i++)
+ sprintf(extra, "%s0x%02X ", extra, data[i]);
+ } else if (strcmp(tmp[0], "realraw") == 0) {
+ addr = 0;
+ mapLen = EFUSE_MAX_SIZE;
+ if (rtw_efuse_access(padapter, false, addr, mapLen, rawdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_access Fail!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ sprintf(extra, "\n");
+ for (i = 0; i < mapLen; i++) {
+ sprintf(extra, "%s%02X", extra, rawdata[i]);
+
+ if ((i & 0xF) == 0xF)
+ sprintf(extra, "%s\n", extra);
+ else if ((i & 0x7) == 0x7)
+ sprintf(extra, "%s\t", extra);
+ else
+ sprintf(extra, "%s ", extra);
+ }
+ } else if (strcmp(tmp[0], "mac") == 0) {
+ cnts = 6;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr + cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%02x)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_read error!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ *extra = 0;
+ for (i = 0; i < cnts; i++) {
+ sprintf(extra, "%s%02X", extra, data[i]);
+ if (i != (cnts-1))
+ sprintf(extra, "%s:", extra);
+ }
+ } else if (strcmp(tmp[0], "vidpid") == 0) {
+ cnts = 4;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr + cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%02x)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+ if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_access error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ *extra = 0;
+ for (i = 0; i < cnts; i++) {
+ sprintf(extra, "%s0x%02X", extra, data[i]);
+ if (i != (cnts-1))
+ sprintf(extra, "%s,", extra);
+ }
+ } else if (strcmp(tmp[0], "ableraw") == 0) {
+ efuse_GetCurrentSize(padapter, &raw_cursize);
+ raw_maxsize = efuse_GetMaxSize(padapter);
+ sprintf(extra, "[available raw size] = %d bytes", raw_maxsize-raw_cursize);
+ } else if (strcmp(tmp[0], "btfmap") == 0) {
+ mapLen = EFUSE_BT_MAX_MAP_LEN;
+ if (rtw_BT_efuse_map_read(padapter, 0, mapLen, pEfuseHal->BTEfuseInitMap) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_read Fail!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ sprintf(extra, "\n");
+ for (i = 0; i < 512; i += 16) {
+ /* set 512 because the iwpriv's extra size have limit 0x7FF */
+ sprintf(extra, "%s0x%03x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->BTEfuseInitMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->BTEfuseInitMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "btbmap") == 0) {
+ mapLen = EFUSE_BT_MAX_MAP_LEN;
+ if (rtw_BT_efuse_map_read(padapter, 0, mapLen, pEfuseHal->BTEfuseInitMap) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_read Fail!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ sprintf(extra, "\n");
+ for (i = 512; i < 1024; i += 16) {
+ sprintf(extra, "%s0x%03x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->BTEfuseInitMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->BTEfuseInitMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "btrmap") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* rmap addr cnts */
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+
+ cnts = simple_strtoul(tmp[2], &ptmp, 10);
+ if (cnts == 0) {
+ DBG_88E("%s: btrmap Fail!! cnts error!\n", __func__);
+ err = -EINVAL;
+ goto exit;
+ }
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr + cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_BT_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_read error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ *extra = 0;
+ for (i = 0; i < cnts; i++)
+ sprintf(extra, "%s 0x%02X ", extra, data[i]);
+ } else if (strcmp(tmp[0], "btffake") == 0) {
+ sprintf(extra, "\n");
+ for (i = 0; i < 512; i += 16) {
+ sprintf(extra, "%s0x%03x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeBTEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeBTEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "btbfake") == 0) {
+ sprintf(extra, "\n");
+ for (i = 512; i < 1024; i += 16) {
+ sprintf(extra, "%s0x%03x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeBTEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeBTEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "wlrfkmap") == 0) {
+ sprintf(extra, "\n");
+ for (i = 0; i < EFUSE_MAP_SIZE; i += 16) {
+ sprintf(extra, "%s0x%02x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s %02X", extra, pEfuseHal->fakeEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else {
+ sprintf(extra, "Command not found!");
+ }
+
+exit:
+ kfree(data);
+ kfree(rawdata);
+ if (!err)
+ wrqu->length = strlen(extra);
+
+ rtw_pm_set_ips(padapter, ips_mode);
+ rtw_pm_set_lps(padapter, lps_mode);
+ padapter->registrypriv.fw_iol = org_fw_iol;/* 0:Disable, 1:enable, 2:by usb speed */
+ return err;
+}
+
+static int rtw_mp_efuse_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ struct adapter *padapter;
+ struct pwrctrl_priv *pwrctrlpriv;
+ struct hal_data_8188e *haldata;
+ struct efuse_hal *pEfuseHal;
+
+ u8 ips_mode = 0, lps_mode = 0;
+ u32 i, jj, kk;
+ u8 *setdata = NULL;
+ u8 *ShadowMapBT = NULL;
+ u8 *ShadowMapWiFi = NULL;
+ u8 *setrawdata = NULL;
+ char *pch, *ptmp, *token, *tmp[3] = {NULL, NULL, NULL};
+ u16 addr = 0, cnts = 0, max_available_size = 0;
+ int err;
+
+ padapter = rtw_netdev_priv(dev);
+ pwrctrlpriv = &padapter->pwrctrlpriv;
+ haldata = GET_HAL_DATA(padapter);
+ pEfuseHal = &haldata->EfuseHal;
+ err = 0;
+ setdata = _rtw_zmalloc(1024);
+ if (setdata == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ ShadowMapBT = _rtw_malloc(EFUSE_BT_MAX_MAP_LEN);
+ if (ShadowMapBT == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ ShadowMapWiFi = _rtw_malloc(EFUSE_MAP_SIZE);
+ if (ShadowMapWiFi == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ setrawdata = _rtw_malloc(EFUSE_MAX_SIZE);
+ if (setrawdata == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ lps_mode = pwrctrlpriv->power_mgnt;/* keep org value */
+ rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
+
+ ips_mode = pwrctrlpriv->ips_mode;/* keep org value */
+ rtw_pm_set_ips(padapter, IPS_NONE);
+
+ pch = extra;
+ DBG_88E("%s: in =%s\n", __func__, extra);
+
+ i = 0;
+ while ((token = strsep(&pch, ",")) != NULL) {
+ if (i > 2)
+ break;
+ tmp[i] = token;
+ i++;
+ }
+
+ /* tmp[0],[1],[2] */
+ /* wmap, addr, 00e04c871200 */
+ if (strcmp(tmp[0], "wmap") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: map data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+ /* Change to check TYPE_EFUSE_MAP_LEN, beacuse 8188E raw 256, logic map over 256. */
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&max_available_size, false);
+ if ((addr+cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_write error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "wraw") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: raw data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setrawdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+
+ if (rtw_efuse_access(padapter, true, addr, cnts, setrawdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_access error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "mac") == 0) {
+ if (tmp[1] == NULL) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* mac, 00e04c871200 */
+ addr = EEPROM_MAC_ADDR_88EU;
+ cnts = strlen(tmp[1]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (cnts > 6) {
+ DBG_88E("%s: error data for mac addr =\"%s\"\n", __func__, tmp[1]);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: MAC address =%s\n", __func__, tmp[1]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setdata[jj] = key_2char2num(tmp[1][kk], tmp[1][kk + 1]);
+ /* Change to check TYPE_EFUSE_MAP_LEN, beacuse 8188E raw 256, logic map over 256. */
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&max_available_size, false);
+ if ((addr+cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_write error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "vidpid") == 0) {
+ if (tmp[1] == NULL) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* pidvid, da0b7881 */
+ addr = EEPROM_VID_88EE;
+ cnts = strlen(tmp[1]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: VID/PID =%s\n", __func__, tmp[1]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setdata[jj] = key_2char2num(tmp[1][kk], tmp[1][kk + 1]);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr+cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_write error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "btwmap") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: BT data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr+cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_BT_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_write error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "btwfake") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: BT tmp data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ pEfuseHal->fakeBTEfuseModifiedMap[addr+jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+ } else if (strcmp(tmp[0], "btdumpfake") == 0) {
+ if (rtw_BT_efuse_map_read(padapter, 0, EFUSE_BT_MAX_MAP_LEN, pEfuseHal->fakeBTEfuseModifiedMap) == _SUCCESS) {
+ DBG_88E("%s: BT read all map success\n", __func__);
+ } else {
+ DBG_88E("%s: BT read all map Fail!\n", __func__);
+ err = -EFAULT;
+ }
+ } else if (strcmp(tmp[0], "wldumpfake") == 0) {
+ if (rtw_efuse_map_read(padapter, 0, EFUSE_BT_MAX_MAP_LEN, pEfuseHal->fakeEfuseModifiedMap) == _SUCCESS) {
+ DBG_88E("%s: BT read all map success\n", __func__);
+ } else {
+ DBG_88E("%s: BT read all map Fail\n", __func__);
+ err = -EFAULT;
+ }
+ } else if (strcmp(tmp[0], "btfk2map") == 0) {
+ memcpy(pEfuseHal->BTEfuseModifiedMap, pEfuseHal->fakeBTEfuseModifiedMap, EFUSE_BT_MAX_MAP_LEN);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if (max_available_size < 1) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_BT_efuse_map_write(padapter, 0x00, EFUSE_BT_MAX_MAP_LEN, pEfuseHal->fakeBTEfuseModifiedMap) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_write error!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "wlfk2map") == 0) {
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if (max_available_size < 1) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_write(padapter, 0x00, EFUSE_MAX_MAP_LEN, pEfuseHal->fakeEfuseModifiedMap) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_write error!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "wlwfake") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: map tmp data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ pEfuseHal->fakeEfuseModifiedMap[addr+jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+ }
+
+exit:
+ kfree(setdata);
+ kfree(ShadowMapBT);
+ kfree(ShadowMapWiFi);
+ kfree(setrawdata);
+
+ rtw_pm_set_ips(padapter, ips_mode);
+ rtw_pm_set_lps(padapter, lps_mode);
+
+ return err;
+}
+
+/*
+ * Input Format: %s,%d,%d
+ * %s is width, could be
+ * "b" for 1 byte
+ * "w" for WORD (2 bytes)
+ * "dw" for DWORD (4 bytes)
+ * 1st %d is address(offset)
+ * 2st %d is data to write
+ */
+static int rtw_mp_write_reg(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ char *pch, *pnext, *ptmp;
+ char *width_str;
+ char width;
+ u32 addr, data;
+ int ret;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ pch = extra;
+ pnext = strpbrk(pch, ",.-");
+ if (pnext == NULL)
+ return -EINVAL;
+ *pnext = 0;
+ width_str = pch;
+
+ pch = pnext + 1;
+ pnext = strpbrk(pch, ",.-");
+ if (pnext == NULL)
+ return -EINVAL;
+ *pnext = 0;
+ addr = simple_strtoul(pch, &ptmp, 16);
+ if (addr > 0x3FFF)
+ return -EINVAL;
+
+ pch = pnext + 1;
+ if ((pch - extra) >= wrqu->length)
+ return -EINVAL;
+ data = simple_strtoul(pch, &ptmp, 16);
+
+ ret = 0;
+ width = width_str[0];
+ switch (width) {
+ case 'b':
+ /* 1 byte */
+ if (data > 0xFF) {
+ ret = -EINVAL;
+ break;
+ }
+ rtw_write8(padapter, addr, data);
+ break;
+ case 'w':
+ /* 2 bytes */
+ if (data > 0xFFFF) {
+ ret = -EINVAL;
+ break;
+ }
+ rtw_write16(padapter, addr, data);
+ break;
+ case 'd':
+ /* 4 bytes */
+ rtw_write32(padapter, addr, data);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Input Format: %s,%d
+ * %s is width, could be
+ * "b" for 1 byte
+ * "w" for WORD (2 bytes)
+ * "dw" for DWORD (4 bytes)
+ * %d is address(offset)
+ *
+ * Return:
+ * %d for data readed
+ */
+static int rtw_mp_read_reg(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ char *pch, *pnext, *ptmp;
+ char *width_str;
+ char width;
+ char data[20], tmp[20];
+ u32 addr;
+ u32 ret, i = 0, j = 0, strtout = 0;
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ _rtw_memset(data, 0, 20);
+ _rtw_memset(tmp, 0, 20);
+ _rtw_memset(extra, 0, wrqu->length);
+
+ pch = input;
+ pnext = strpbrk(pch, ",.-");
+ if (pnext == NULL) {
+ kfree(input);
+ return -EINVAL;
+ }
+ *pnext = 0;
+ width_str = pch;
+
+ pch = pnext + 1;
+ if ((pch - input) >= wrqu->length) {
+ kfree(input);
+ return -EINVAL;
+ }
+ kfree(input);
+ addr = simple_strtoul(pch, &ptmp, 16);
+ if (addr > 0x3FFF)
+ return -EINVAL;
+
+ ret = 0;
+ width = width_str[0];
+ switch (width) {
+ case 'b':
+ /* 1 byte */
+ sprintf(extra, "%d\n", rtw_read8(padapter, addr));
+ wrqu->length = strlen(extra);
+ break;
+ case 'w':
+ /* 2 bytes */
+ sprintf(data, "%04x\n", rtw_read16(padapter, addr));
+ for (i = 0; i <= strlen(data); i++) {
+ if (i%2 == 0) {
+ tmp[j] = ' ';
+ j++;
+ }
+ if (data[i] != '\0')
+ tmp[j] = data[i];
+ j++;
+ }
+ pch = tmp;
+ DBG_88E("pch =%s", pch);
+
+ while (*pch != '\0') {
+ pnext = strpbrk(pch, " ");
+ if (!pnext)
+ break;
+
+ pnext++;
+ if (*pnext != '\0') {
+ strtout = simple_strtoul(pnext, &ptmp, 16);
+ sprintf(extra, "%s %d", extra, strtout);
+ } else {
+ break;
+ }
+ pch = pnext;
+ }
+ wrqu->length = 6;
+ break;
+ case 'd':
+ /* 4 bytes */
+ sprintf(data, "%08x", rtw_read32(padapter, addr));
+ /* add read data format blank */
+ for (i = 0; i <= strlen(data); i++) {
+ if (i%2 == 0) {
+ tmp[j] = ' ';
+ j++;
+ }
+ if (data[i] != '\0')
+ tmp[j] = data[i];
+
+ j++;
+ }
+ pch = tmp;
+ DBG_88E("pch =%s", pch);
+
+ while (*pch != '\0') {
+ pnext = strpbrk(pch, " ");
+ if (!pnext)
+ break;
+ pnext++;
+ if (*pnext != '\0') {
+ strtout = simple_strtoul(pnext, &ptmp, 16);
+ sprintf(extra, "%s %d", extra, strtout);
+ } else {
+ break;
+ }
+ pch = pnext;
+ }
+ wrqu->length = strlen(extra);
+ break;
+ default:
+ wrqu->length = 0;
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Input Format: %d,%x,%x
+ * %d is RF path, should be smaller than MAX_RF_PATH_NUMS
+ * 1st %x is address(offset)
+ * 2st %x is data to write
+ */
+ static int rtw_mp_write_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 path, addr, data;
+ int ret;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ ret = sscanf(extra, "%d,%x,%x", &path, &addr, &data);
+ if (ret < 3)
+ return -EINVAL;
+
+ if (path >= MAX_RF_PATH_NUMS)
+ return -EINVAL;
+ if (addr > 0xFF)
+ return -EINVAL;
+ if (data > 0xFFFFF)
+ return -EINVAL;
+
+ _rtw_memset(extra, 0, wrqu->length);
+
+ write_rfreg(padapter, path, addr, data);
+
+ sprintf(extra, "write_rf completed\n");
+ wrqu->length = strlen(extra);
+
+ return 0;
+}
+
+/*
+ * Input Format: %d,%x
+ * %d is RF path, should be smaller than MAX_RF_PATH_NUMS
+ * %x is address(offset)
+ *
+ * Return:
+ * %d for data readed
+ */
+static int rtw_mp_read_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ char *pch, *pnext, *ptmp;
+ char data[20], tmp[20];
+ u32 path, addr;
+ u32 ret, i = 0, j = 0, strtou = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ ret = sscanf(input, "%d,%x", &path, &addr);
+ kfree(input);
+ if (ret < 2)
+ return -EINVAL;
+
+ if (path >= MAX_RF_PATH_NUMS)
+ return -EINVAL;
+ if (addr > 0xFF)
+ return -EINVAL;
+
+ _rtw_memset(extra, 0, wrqu->length);
+
+ sprintf(data, "%08x", read_rfreg(padapter, path, addr));
+ /* add read data format blank */
+ for (i = 0; i <= strlen(data); i++) {
+ if (i%2 == 0) {
+ tmp[j] = ' ';
+ j++;
+ }
+ tmp[j] = data[i];
+ j++;
+ }
+ pch = tmp;
+ DBG_88E("pch =%s", pch);
+
+ while (*pch != '\0') {
+ pnext = strpbrk(pch, " ");
+ pnext++;
+ if (*pnext != '\0') {
+ strtou = simple_strtoul(pnext, &ptmp, 16);
+ sprintf(extra, "%s %d", extra, strtou);
+ } else {
+ break;
+ }
+ pch = pnext;
+ }
+ wrqu->length = strlen(extra);
+ return 0;
+}
+
+static int rtw_mp_start(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (padapter->registrypriv.mp_mode == 0) {
+ padapter->registrypriv.mp_mode = 1;
+
+ rtw_pm_set_ips(padapter, IPS_NONE);
+ LeaveAllPowerSaveMode(padapter);
+
+ MPT_InitializeAdapter(padapter, 1);
+ }
+ if (padapter->registrypriv.mp_mode == 0)
+ return -EPERM;
+ if (padapter->mppriv.mode == MP_OFF) {
+ if (mp_start_test(padapter) == _FAIL)
+ return -EPERM;
+ padapter->mppriv.mode = MP_ON;
+ }
+ return 0;
+}
+
+static int rtw_mp_stop(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (padapter->registrypriv.mp_mode == 1) {
+ MPT_DeInitAdapter(padapter);
+ padapter->registrypriv.mp_mode = 0;
+ }
+
+ if (padapter->mppriv.mode != MP_OFF) {
+ mp_stop_test(padapter);
+ padapter->mppriv.mode = MP_OFF;
+ }
+
+ return 0;
+}
+
+extern int wifirate2_ratetbl_inx(unsigned char rate);
+
+static int rtw_mp_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 rate = MPT_RATE_1M;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ rate = rtw_atoi(input);
+ sprintf(extra, "Set data rate to %d", rate);
+ kfree(input);
+ if (rate <= 0x7f)
+ rate = wifirate2_ratetbl_inx((u8)rate);
+ else
+ rate = (rate-0x80+MPT_RATE_MCS0);
+
+ if (rate >= MPT_RATE_LAST)
+ return -EINVAL;
+
+ padapter->mppriv.rateidx = rate;
+ Hal_SetDataRate(padapter);
+
+ wrqu->length = strlen(extra) + 1;
+ return 0;
+}
+
+static int rtw_mp_channel(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ u32 channel = 1;
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ channel = rtw_atoi(input);
+ sprintf(extra, "Change channel %d to channel %d", padapter->mppriv.channel, channel);
+
+ padapter->mppriv.channel = channel;
+ Hal_SetChannel(padapter);
+
+ wrqu->length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_bandwidth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 bandwidth = 0, sg = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ sscanf(extra, "40M =%d, shortGI =%d", &bandwidth, &sg);
+
+ if (bandwidth != HT_CHANNEL_WIDTH_40)
+ bandwidth = HT_CHANNEL_WIDTH_20;
+
+ padapter->mppriv.bandwidth = (u8)bandwidth;
+ padapter->mppriv.preamble = sg;
+
+ SetBandwidth(padapter);
+
+ return 0;
+}
+
+static int rtw_mp_txpower(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 idx_a = 0, idx_b = 0;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ sscanf(input, "patha =%d, pathb =%d", &idx_a, &idx_b);
+
+ sprintf(extra, "Set power level path_A:%d path_B:%d", idx_a, idx_b);
+ padapter->mppriv.txpoweridx = (u8)idx_a;
+ padapter->mppriv.txpoweridx_b = (u8)idx_b;
+ padapter->mppriv.bSetTxPower = 1;
+ Hal_SetAntennaPathPower(padapter);
+
+ wrqu->length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_ant_tx(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 i;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ u16 antenna = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+
+ sprintf(extra, "switch Tx antenna to %s", input);
+
+ for (i = 0; i < strlen(input); i++) {
+ switch (input[i]) {
+ case 'a':
+ antenna |= ANTENNA_A;
+ break;
+ case 'b':
+ antenna |= ANTENNA_B;
+ break;
+ }
+ }
+ padapter->mppriv.antenna_tx = antenna;
+
+ Hal_SetAntenna(padapter);
+
+ wrqu->length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_ant_rx(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 i;
+ u16 antenna = 0;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ _rtw_memset(extra, 0, wrqu->length);
+
+ sprintf(extra, "switch Rx antenna to %s", input);
+
+ for (i = 0; i < strlen(input); i++) {
+ switch (input[i]) {
+ case 'a':
+ antenna |= ANTENNA_A;
+ break;
+ case 'b':
+ antenna |= ANTENNA_B;
+ break;
+ }
+ }
+
+ padapter->mppriv.antenna_rx = antenna;
+ Hal_SetAntenna(padapter);
+ wrqu->length = strlen(extra);
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_ctx(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 pkTx = 1, countPkTx = 1, cotuTx = 1, CarrSprTx = 1, scTx = 1, sgleTx = 1, stop = 1;
+ u32 bStartTest = 1;
+ u32 count = 0;
+ struct mp_priv *pmp_priv;
+ struct pkt_attrib *pattrib;
+
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ pmp_priv = &padapter->mppriv;
+
+ if (copy_from_user(extra, wrqu->pointer, wrqu->length))
+ return -EFAULT;
+
+ DBG_88E("%s: in =%s\n", __func__, extra);
+
+ countPkTx = strncmp(extra, "count =", 5); /* strncmp true is 0 */
+ cotuTx = strncmp(extra, "background", 20);
+ CarrSprTx = strncmp(extra, "background, cs", 20);
+ scTx = strncmp(extra, "background, sc", 20);
+ sgleTx = strncmp(extra, "background, stone", 20);
+ pkTx = strncmp(extra, "background, pkt", 20);
+ stop = strncmp(extra, "stop", 4);
+ sscanf(extra, "count =%d, pkt", &count);
+
+ _rtw_memset(extra, '\0', sizeof(*extra));
+
+ if (stop == 0) {
+ bStartTest = 0; /* To set Stop */
+ pmp_priv->tx.stop = 1;
+ sprintf(extra, "Stop continuous Tx");
+ } else {
+ bStartTest = 1;
+ if (pmp_priv->mode != MP_ON) {
+ if (pmp_priv->tx.stop != 1) {
+ DBG_88E("%s: MP_MODE != ON %d\n", __func__, pmp_priv->mode);
+ return -EFAULT;
+ }
+ }
+ }
+
+ if (pkTx == 0 || countPkTx == 0)
+ pmp_priv->mode = MP_PACKET_TX;
+ if (sgleTx == 0)
+ pmp_priv->mode = MP_SINGLE_TONE_TX;
+ if (cotuTx == 0)
+ pmp_priv->mode = MP_CONTINUOUS_TX;
+ if (CarrSprTx == 0)
+ pmp_priv->mode = MP_CARRIER_SUPPRISSION_TX;
+ if (scTx == 0)
+ pmp_priv->mode = MP_SINGLE_CARRIER_TX;
+
+ switch (pmp_priv->mode) {
+ case MP_PACKET_TX:
+ if (bStartTest == 0) {
+ pmp_priv->tx.stop = 1;
+ pmp_priv->mode = MP_ON;
+ sprintf(extra, "Stop continuous Tx");
+ } else if (pmp_priv->tx.stop == 1) {
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500 count =%u,\n", count);
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = count;
+ pmp_priv->tx.payload = 2;
+ pattrib = &pmp_priv->tx.attrib;
+ pattrib->pktlen = 1500;
+ _rtw_memset(pattrib->dst, 0xFF, ETH_ALEN);
+ SetPacketTx(padapter);
+ } else {
+ return -EFAULT;
+ }
+ wrqu->length = strlen(extra);
+ return 0;
+ case MP_SINGLE_TONE_TX:
+ if (bStartTest != 0)
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
+ Hal_SetSingleToneTx(padapter, (u8)bStartTest);
+ break;
+ case MP_CONTINUOUS_TX:
+ if (bStartTest != 0)
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
+ Hal_SetContinuousTx(padapter, (u8)bStartTest);
+ break;
+ case MP_CARRIER_SUPPRISSION_TX:
+ if (bStartTest != 0) {
+ if (pmp_priv->rateidx <= MPT_RATE_11M) {
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
+ Hal_SetCarrierSuppressionTx(padapter, (u8)bStartTest);
+ } else {
+ sprintf(extra, "Specify carrier suppression but not CCK rate");
+ }
+ }
+ break;
+ case MP_SINGLE_CARRIER_TX:
+ if (bStartTest != 0)
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
+ Hal_SetSingleCarrierTx(padapter, (u8)bStartTest);
+ break;
+ default:
+ sprintf(extra, "Error! Continuous-Tx is not on-going.");
+ return -EFAULT;
+ }
+
+ if (bStartTest == 1 && pmp_priv->mode != MP_ON) {
+ struct mp_priv *pmp_priv = &padapter->mppriv;
+ if (pmp_priv->tx.stop == 0) {
+ pmp_priv->tx.stop = 1;
+ rtw_msleep_os(5);
+ }
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = 1;
+ SetPacketTx(padapter);
+ } else {
+ pmp_priv->mode = MP_ON;
+ }
+
+ wrqu->length = strlen(extra);
+ return 0;
+}
+
+static int rtw_mp_arx(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 bStartRx = 0, bStopRx = 0, bQueryPhy;
+ u32 cckok = 0, cckcrc = 0, ofdmok = 0, ofdmcrc = 0, htok = 0, htcrc = 0, OFDM_FA = 0, CCK_FA = 0;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ DBG_88E("%s: %s\n", __func__, input);
+
+ bStartRx = (strncmp(input, "start", 5) == 0) ? 1 : 0; /* strncmp true is 0 */
+ bStopRx = (strncmp(input, "stop", 5) == 0) ? 1 : 0; /* strncmp true is 0 */
+ bQueryPhy = (strncmp(input, "phy", 3) == 0) ? 1 : 0; /* strncmp true is 0 */
+
+ if (bStartRx) {
+ sprintf(extra, "start");
+ SetPacketRx(padapter, bStartRx);
+ } else if (bStopRx) {
+ SetPacketRx(padapter, 0);
+ sprintf(extra, "Received packet OK:%d CRC error:%d", padapter->mppriv.rx_pktcount, padapter->mppriv.rx_crcerrpktcount);
+ } else if (bQueryPhy) {
+ /*
+ OFDM FA
+ RegCF0[15:0]
+ RegCF2[31:16]
+ RegDA0[31:16]
+ RegDA4[15:0]
+ RegDA4[31:16]
+ RegDA8[15:0]
+ CCK FA
+ (RegA5B<<8) | RegA5C
+ */
+ cckok = read_bbreg(padapter, 0xf88, 0xffffffff);
+ cckcrc = read_bbreg(padapter, 0xf84, 0xffffffff);
+ ofdmok = read_bbreg(padapter, 0xf94, 0x0000FFFF);
+ ofdmcrc = read_bbreg(padapter, 0xf94, 0xFFFF0000);
+ htok = read_bbreg(padapter, 0xf90, 0x0000FFFF);
+ htcrc = read_bbreg(padapter, 0xf90, 0xFFFF0000);
+
+ OFDM_FA = read_bbreg(padapter, 0xcf0, 0x0000FFFF);
+ OFDM_FA = read_bbreg(padapter, 0xcf2, 0xFFFF0000);
+ OFDM_FA = read_bbreg(padapter, 0xda0, 0xFFFF0000);
+ OFDM_FA = read_bbreg(padapter, 0xda4, 0x0000FFFF);
+ OFDM_FA = read_bbreg(padapter, 0xda4, 0xFFFF0000);
+ OFDM_FA = read_bbreg(padapter, 0xda8, 0x0000FFFF);
+ CCK_FA = (rtw_read8(padapter, 0xa5b)<<8) | (rtw_read8(padapter, 0xa5c));
+
+ sprintf(extra, "Phy Received packet OK:%d CRC error:%d FA Counter: %d", cckok+ofdmok+htok, cckcrc+ofdmcrc+htcrc, OFDM_FA+CCK_FA);
+ }
+ wrqu->length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_trx_query(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 txok, txfail, rxok, rxfail;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ txok = padapter->mppriv.tx.sended;
+ txfail = 0;
+ rxok = padapter->mppriv.rx_pktcount;
+ rxfail = padapter->mppriv.rx_crcerrpktcount;
+
+ _rtw_memset(extra, '\0', 128);
+
+ sprintf(extra, "Tx OK:%d, Tx Fail:%d, Rx OK:%d, CRC error:%d ", txok, txfail, rxok, rxfail);
+
+ wrqu->length = strlen(extra)+1;
+
+ return 0;
+}
+
+static int rtw_mp_pwrtrk(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 enable;
+ u32 thermal;
+ s32 ret;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ _rtw_memset(extra, 0, wrqu->length);
+
+ enable = 1;
+ if (wrqu->length > 1) {/* not empty string */
+ if (strncmp(input, "stop", 4) == 0) {
+ enable = 0;
+ sprintf(extra, "mp tx power tracking stop");
+ } else if (sscanf(input, "ther =%d", &thermal)) {
+ ret = Hal_SetThermalMeter(padapter, (u8)thermal);
+ if (ret == _FAIL)
+ return -EPERM;
+ sprintf(extra, "mp tx power tracking start, target value =%d ok ", thermal);
+ } else {
+ kfree(input);
+ return -EINVAL;
+ }
+ }
+
+ kfree(input);
+ ret = Hal_SetPowerTracking(padapter, enable);
+ if (ret == _FAIL)
+ return -EPERM;
+
+ wrqu->length = strlen(extra);
+ return 0;
+}
+
+static int rtw_mp_psd(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+
+ strcpy(extra, input);
+
+ wrqu->length = mp_query_psd(padapter, extra);
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_thermal(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 val;
+ u16 bwrite = 1;
+ u16 addr = EEPROM_THERMAL_METER_88E;
+
+ u16 cnt = 1;
+ u16 max_available_size = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (copy_from_user(extra, wrqu->pointer, wrqu->length))
+ return -EFAULT;
+
+ bwrite = strncmp(extra, "write", 6); /* strncmp true is 0 */
+
+ Hal_GetThermalMeter(padapter, &val);
+
+ if (bwrite == 0) {
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if (2 > max_available_size) {
+ DBG_88E("no available efuse!\n");
+ return -EFAULT;
+ }
+ if (rtw_efuse_map_write(padapter, addr, cnt, &val) == _FAIL) {
+ DBG_88E("rtw_efuse_map_write error\n");
+ return -EFAULT;
+ } else {
+ sprintf(extra, " efuse write ok :%d", val);
+ }
+ } else {
+ sprintf(extra, "%d", val);
+ }
+ wrqu->length = strlen(extra);
+
+ return 0;
+}
+
+static int rtw_mp_reset_stats(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct mp_priv *pmp_priv;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ pmp_priv = &padapter->mppriv;
+
+ pmp_priv->tx.sended = 0;
+ pmp_priv->tx_pktcount = 0;
+ pmp_priv->rx_pktcount = 0;
+ pmp_priv->rx_crcerrpktcount = 0;
+
+ /* reset phy counter */
+ write_bbreg(padapter, 0xf14, BIT16, 0x1);
+ rtw_msleep_os(10);
+ write_bbreg(padapter, 0xf14, BIT16, 0x0);
+
+ return 0;
+}
+
+static int rtw_mp_dump(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 value;
+ u8 rf_type, path_nums = 0;
+ u32 i, j = 1, path;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (strncmp(extra, "all", 4) == 0) {
+ DBG_88E("\n ======= MAC REG =======\n");
+ for (i = 0x0; i < 0x300; i += 4) {
+ if (j%4 == 1)
+ DBG_88E("0x%02x", i);
+ DBG_88E(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ DBG_88E("\n");
+ }
+ for (i = 0x400; i < 0x1000; i += 4) {
+ if (j%4 == 1)
+ DBG_88E("0x%02x", i);
+ DBG_88E(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ DBG_88E("\n");
+ }
+
+ j = 1;
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ DBG_88E("\n ======= RF REG =======\n");
+ if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type))
+ path_nums = 1;
+ else
+ path_nums = 2;
+
+ for (path = 0; path < path_nums; path++) {
+ for (i = 0; i < 0x34; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ DBG_88E("0x%02x ", i);
+ DBG_88E(" 0x%08x ", value);
+ if ((j++)%4 == 0)
+ DBG_88E("\n");
+ }
+ }
+ }
+ return 0;
+}
+
+static int rtw_mp_phypara(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ u32 valxcap;
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+
+ DBG_88E("%s:iwpriv in =%s\n", __func__, input);
+
+ sscanf(input, "xcap =%d", &valxcap);
+
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_SetRFPath(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->data.length, GFP_KERNEL);
+ u8 bMain = 1, bTurnoff = 1;
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->data.pointer, wrqu->data.length))
+ return -EFAULT;
+ DBG_88E("%s:iwpriv in =%s\n", __func__, input);
+
+ bMain = strncmp(input, "1", 2); /* strncmp true is 0 */
+ bTurnoff = strncmp(input, "0", 3); /* strncmp true is 0 */
+
+ if (bMain == 0) {
+ MP_PHY_SetRFPathSwitch(padapter, true);
+ DBG_88E("%s:PHY_SetRFPathSwitch = true\n", __func__);
+ } else if (bTurnoff == 0) {
+ MP_PHY_SetRFPathSwitch(padapter, false);
+ DBG_88E("%s:PHY_SetRFPathSwitch = false\n", __func__);
+ }
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_QueryDrv(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->data.length, GFP_KERNEL);
+ u8 qAutoLoad = 1;
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ if (!input)
+ return -ENOMEM;
+
+ if (copy_from_user(input, wrqu->data.pointer, wrqu->data.length))
+ return -EFAULT;
+ DBG_88E("%s:iwpriv in =%s\n", __func__, input);
+
+ qAutoLoad = strncmp(input, "autoload", 8); /* strncmp true is 0 */
+
+ if (qAutoLoad == 0) {
+ DBG_88E("%s:qAutoLoad\n", __func__);
+
+ if (pEEPROM->bautoload_fail_flag)
+ sprintf(extra, "fail");
+ else
+ sprintf(extra, "ok");
+ }
+ wrqu->data.length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ struct iw_point *wrqu = (struct iw_point *)wdata;
+ u32 subcmd = wrqu->flags;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (padapter == NULL)
+ return -ENETDOWN;
+
+ if (extra == NULL) {
+ wrqu->length = 0;
+ return -EIO;
+ }
+
+ switch (subcmd) {
+ case MP_START:
+ DBG_88E("set case mp_start\n");
+ rtw_mp_start(dev, info, wrqu, extra);
+ break;
+ case MP_STOP:
+ DBG_88E("set case mp_stop\n");
+ rtw_mp_stop(dev, info, wrqu, extra);
+ break;
+ case MP_BANDWIDTH:
+ DBG_88E("set case mp_bandwidth\n");
+ rtw_mp_bandwidth(dev, info, wrqu, extra);
+ break;
+ case MP_RESET_STATS:
+ DBG_88E("set case MP_RESET_STATS\n");
+ rtw_mp_reset_stats(dev, info, wrqu, extra);
+ break;
+ case MP_SetRFPathSwh:
+ DBG_88E("set MP_SetRFPathSwitch\n");
+ rtw_mp_SetRFPath(dev, info, wdata, extra);
+ break;
+ case CTA_TEST:
+ DBG_88E("set CTA_TEST\n");
+ rtw_cta_test_start(dev, info, wdata, extra);
+ break;
+ }
+
+ return 0;
+}
+
+static int rtw_mp_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ struct iw_point *wrqu = (struct iw_point *)wdata;
+ u32 subcmd = wrqu->flags;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (padapter == NULL)
+ return -ENETDOWN;
+ if (extra == NULL) {
+ wrqu->length = 0;
+ return -EIO;
+ }
+
+ switch (subcmd) {
+ case WRITE_REG:
+ rtw_mp_write_reg(dev, info, wrqu, extra);
+ break;
+ case WRITE_RF:
+ rtw_mp_write_rf(dev, info, wrqu, extra);
+ break;
+ case MP_PHYPARA:
+ DBG_88E("mp_get MP_PHYPARA\n");
+ rtw_mp_phypara(dev, info, wrqu, extra);
+ break;
+ case MP_CHANNEL:
+ DBG_88E("set case mp_channel\n");
+ rtw_mp_channel(dev, info, wrqu, extra);
+ break;
+ case READ_REG:
+ DBG_88E("mp_get READ_REG\n");
+ rtw_mp_read_reg(dev, info, wrqu, extra);
+ break;
+ case READ_RF:
+ DBG_88E("mp_get READ_RF\n");
+ rtw_mp_read_rf(dev, info, wrqu, extra);
+ break;
+ case MP_RATE:
+ DBG_88E("set case mp_rate\n");
+ rtw_mp_rate(dev, info, wrqu, extra);
+ break;
+ case MP_TXPOWER:
+ DBG_88E("set case MP_TXPOWER\n");
+ rtw_mp_txpower(dev, info, wrqu, extra);
+ break;
+ case MP_ANT_TX:
+ DBG_88E("set case MP_ANT_TX\n");
+ rtw_mp_ant_tx(dev, info, wrqu, extra);
+ break;
+ case MP_ANT_RX:
+ DBG_88E("set case MP_ANT_RX\n");
+ rtw_mp_ant_rx(dev, info, wrqu, extra);
+ break;
+ case MP_QUERY:
+ rtw_mp_trx_query(dev, info, wrqu, extra);
+ break;
+ case MP_CTX:
+ DBG_88E("set case MP_CTX\n");
+ rtw_mp_ctx(dev, info, wrqu, extra);
+ break;
+ case MP_ARX:
+ DBG_88E("set case MP_ARX\n");
+ rtw_mp_arx(dev, info, wrqu, extra);
+ break;
+ case EFUSE_GET:
+ DBG_88E("efuse get EFUSE_GET\n");
+ rtw_mp_efuse_get(dev, info, wdata, extra);
+ break;
+ case MP_DUMP:
+ DBG_88E("set case MP_DUMP\n");
+ rtw_mp_dump(dev, info, wrqu, extra);
+ break;
+ case MP_PSD:
+ DBG_88E("set case MP_PSD\n");
+ rtw_mp_psd(dev, info, wrqu, extra);
+ break;
+ case MP_THER:
+ DBG_88E("set case MP_THER\n");
+ rtw_mp_thermal(dev, info, wrqu, extra);
+ break;
+ case MP_QueryDrvStats:
+ DBG_88E("mp_get MP_QueryDrvStats\n");
+ rtw_mp_QueryDrv (dev, info, wdata, extra);
+ break;
+ case MP_PWRTRK:
+ DBG_88E("set case MP_PWRTRK\n");
+ rtw_mp_pwrtrk(dev, info, wrqu, extra);
+ break;
+ case EFUSE_SET:
+ DBG_88E("set case efuse set\n");
+ rtw_mp_efuse_set(dev, info, wdata, extra);
+ break;
+ }
+
+ rtw_msleep_os(10); /* delay 5ms for sending pkt before exit adb shell operation */
+ return 0;
+}
+
+static int rtw_tdls(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+static int rtw_tdls_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+static int rtw_test(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u32 len;
+ u8 *pbuf, *pch;
+ char *ptmp;
+ u8 *delim = ",";
+
+ DBG_88E("+%s\n", __func__);
+ len = wrqu->data.length;
+
+ pbuf = (u8 *)rtw_zmalloc(len);
+ if (pbuf == NULL) {
+ DBG_88E("%s: no memory!\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(pbuf, wrqu->data.pointer, len)) {
+ kfree(pbuf);
+ DBG_88E("%s: copy from user fail!\n", __func__);
+ return -EFAULT;
+ }
+ DBG_88E("%s: string =\"%s\"\n", __func__, pbuf);
+
+ ptmp = (char *)pbuf;
+ pch = strsep(&ptmp, delim);
+ if ((pch == NULL) || (strlen(pch) == 0)) {
+ kfree(pbuf);
+ DBG_88E("%s: parameter error(level 1)!\n", __func__);
+ return -EFAULT;
+ }
+ kfree(pbuf);
+ return 0;
+}
+
+static iw_handler rtw_handlers[] = {
+ NULL, /* SIOCSIWCOMMIT */
+ rtw_wx_get_name, /* SIOCGIWNAME */
+ dummy, /* SIOCSIWNWID */
+ dummy, /* SIOCGIWNWID */
+ rtw_wx_set_freq, /* SIOCSIWFREQ */
+ rtw_wx_get_freq, /* SIOCGIWFREQ */
+ rtw_wx_set_mode, /* SIOCSIWMODE */
+ rtw_wx_get_mode, /* SIOCGIWMODE */
+ dummy, /* SIOCSIWSENS */
+ rtw_wx_get_sens, /* SIOCGIWSENS */
+ NULL, /* SIOCSIWRANGE */
+ rtw_wx_get_range, /* SIOCGIWRANGE */
+ rtw_wx_set_priv, /* SIOCSIWPRIV */
+ NULL, /* SIOCGIWPRIV */
+ NULL, /* SIOCSIWSTATS */
+ NULL, /* SIOCGIWSTATS */
+ dummy, /* SIOCSIWSPY */
+ dummy, /* SIOCGIWSPY */
+ NULL, /* SIOCGIWTHRSPY */
+ NULL, /* SIOCWIWTHRSPY */
+ rtw_wx_set_wap, /* SIOCSIWAP */
+ rtw_wx_get_wap, /* SIOCGIWAP */
+ rtw_wx_set_mlme, /* request MLME operation; uses struct iw_mlme */
+ dummy, /* SIOCGIWAPLIST -- depricated */
+ rtw_wx_set_scan, /* SIOCSIWSCAN */
+ rtw_wx_get_scan, /* SIOCGIWSCAN */
+ rtw_wx_set_essid, /* SIOCSIWESSID */
+ rtw_wx_get_essid, /* SIOCGIWESSID */
+ dummy, /* SIOCSIWNICKN */
+ rtw_wx_get_nick, /* SIOCGIWNICKN */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ rtw_wx_set_rate, /* SIOCSIWRATE */
+ rtw_wx_get_rate, /* SIOCGIWRATE */
+ rtw_wx_set_rts, /* SIOCSIWRTS */
+ rtw_wx_get_rts, /* SIOCGIWRTS */
+ rtw_wx_set_frag, /* SIOCSIWFRAG */
+ rtw_wx_get_frag, /* SIOCGIWFRAG */
+ dummy, /* SIOCSIWTXPOW */
+ dummy, /* SIOCGIWTXPOW */
+ dummy, /* SIOCSIWRETRY */
+ rtw_wx_get_retry, /* SIOCGIWRETRY */
+ rtw_wx_set_enc, /* SIOCSIWENCODE */
+ rtw_wx_get_enc, /* SIOCGIWENCODE */
+ dummy, /* SIOCSIWPOWER */
+ rtw_wx_get_power, /* SIOCGIWPOWER */
+ NULL, /*---hole---*/
+ NULL, /*---hole---*/
+ rtw_wx_set_gen_ie, /* SIOCSIWGENIE */
+ NULL, /* SIOCGWGENIE */
+ rtw_wx_set_auth, /* SIOCSIWAUTH */
+ NULL, /* SIOCGIWAUTH */
+ rtw_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
+ NULL, /* SIOCGIWENCODEEXT */
+ rtw_wx_set_pmkid, /* SIOCSIWPMKSA */
+ NULL, /*---hole---*/
+};
+
+static const struct iw_priv_args rtw_private_args[] = {
+ {
+ SIOCIWFIRSTPRIV + 0x0,
+ IW_PRIV_TYPE_CHAR | 0x7FF, 0, "write"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x1,
+ IW_PRIV_TYPE_CHAR | 0x7FF,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "read"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x2, 0, 0, "driver_ext"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x3, 0, 0, "mp_ioctl"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x4,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x5,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "setpid"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x6,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_start"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x7,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "get_sensitivity"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x8,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_prob_req_ie"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x9,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_assoc_req_ie"
+ },
+
+ {
+ SIOCIWFIRSTPRIV + 0xA,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "channel_plan"
+ },
+
+ {
+ SIOCIWFIRSTPRIV + 0xB,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "dbg"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0xC,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "rfw"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0xD,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "rfr"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x10,
+ IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, 0, "p2p_set"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x11,
+ IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | P2P_PRIVATE_IOCTL_SET_LEN, "p2p_get"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x12,
+ IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IFNAMSIZ, "p2p_get2"
+ },
+ {SIOCIWFIRSTPRIV + 0x13, IW_PRIV_TYPE_CHAR | 128, 0, "NULL"},
+ {
+ SIOCIWFIRSTPRIV + 0x14,
+ IW_PRIV_TYPE_CHAR | 64, 0, "tdls"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x15,
+ IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | P2P_PRIVATE_IOCTL_SET_LEN, "tdls_get"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x16,
+ IW_PRIV_TYPE_CHAR | 64, 0, "pm_set"
+ },
+
+ {SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ, 0, "rereg_nd_name"},
+
+ {SIOCIWFIRSTPRIV + 0x1A, IW_PRIV_TYPE_CHAR | 1024, 0, "efuse_set"},
+ {SIOCIWFIRSTPRIV + 0x1B, IW_PRIV_TYPE_CHAR | 128, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get"},
+ {SIOCIWFIRSTPRIV + 0x1D, IW_PRIV_TYPE_CHAR | 40, IW_PRIV_TYPE_CHAR | 0x7FF, "test"
+ },
+
+ {SIOCIWFIRSTPRIV + 0x0E, IW_PRIV_TYPE_CHAR | 1024, 0, ""}, /* set */
+ {SIOCIWFIRSTPRIV + 0x0F, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, ""},/* get */
+/* --- sub-ioctls definitions --- */
+
+ {MP_START, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_start"}, /* set */
+ {MP_PHYPARA, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_phypara"},/* get */
+ {MP_STOP, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_stop"}, /* set */
+ {MP_CHANNEL, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_channel"},/* get */
+ {MP_BANDWIDTH, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_bandwidth"}, /* set */
+ {MP_RATE, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_rate"},/* get */
+ {MP_RESET_STATS, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_reset_stats"},
+ {MP_QUERY, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_query"}, /* get */
+ {READ_REG, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "read_reg"},
+ {MP_RATE, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_rate"},
+ {READ_RF, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "read_rf"},
+ {MP_PSD, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_psd"},
+ {MP_DUMP, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_dump"},
+ {MP_TXPOWER, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_txpower"},
+ {MP_ANT_TX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ant_tx"},
+ {MP_ANT_RX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ant_rx"},
+ {WRITE_REG, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "write_reg"},
+ {WRITE_RF, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "write_rf"},
+ {MP_CTX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ctx"},
+ {MP_ARX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_arx"},
+ {MP_THER, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ther"},
+ {EFUSE_SET, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_set"},
+ {EFUSE_GET, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get"},
+ {MP_PWRTRK, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_pwrtrk"},
+ {MP_QueryDrvStats, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_drvquery"},
+ {MP_IOCTL, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_ioctl"}, /* mp_ioctl */
+ {MP_SetRFPathSwh, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_setrfpath"},
+ {CTA_TEST, IW_PRIV_TYPE_CHAR | 1024, 0, "cta_test"},
+};
+
+static iw_handler rtw_private_handler[] = {
+rtw_wx_write32, /* 0x00 */
+rtw_wx_read32, /* 0x01 */
+rtw_drvext_hdl, /* 0x02 */
+rtw_mp_ioctl_hdl, /* 0x03 */
+
+/* for MM DTV platform */
+ rtw_get_ap_info, /* 0x04 */
+
+ rtw_set_pid, /* 0x05 */
+ rtw_wps_start, /* 0x06 */
+
+ rtw_wx_get_sensitivity, /* 0x07 */
+ rtw_wx_set_mtk_wps_probe_ie, /* 0x08 */
+ rtw_wx_set_mtk_wps_ie, /* 0x09 */
+
+/* Set Channel depend on the country code */
+ rtw_wx_set_channel_plan, /* 0x0A */
+
+ rtw_dbg_port, /* 0x0B */
+ rtw_wx_write_rf, /* 0x0C */
+ rtw_wx_read_rf, /* 0x0D */
+
+ rtw_mp_set, /* 0x0E */
+ rtw_mp_get, /* 0x0F */
+ rtw_p2p_set, /* 0x10 */
+ rtw_p2p_get, /* 0x11 */
+ rtw_p2p_get2, /* 0x12 */
+
+ NULL, /* 0x13 */
+ rtw_tdls, /* 0x14 */
+ rtw_tdls_get, /* 0x15 */
+
+ rtw_pm_set, /* 0x16 */
+ rtw_wx_priv_null, /* 0x17 */
+ rtw_rereg_nd_name, /* 0x18 */
+ rtw_wx_priv_null, /* 0x19 */
+
+ rtw_mp_efuse_set, /* 0x1A */
+ rtw_mp_efuse_get, /* 0x1B */
+ NULL, /* 0x1C is reserved for hostapd */
+ rtw_test, /* 0x1D */
+};
+
+static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_statistics *piwstats = &padapter->iwstats;
+ int tmp_level = 0;
+ int tmp_qual = 0;
+ int tmp_noise = 0;
+
+ if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ piwstats->qual.qual = 0;
+ piwstats->qual.level = 0;
+ piwstats->qual.noise = 0;
+ } else {
+ tmp_level = padapter->recvpriv.signal_strength;
+ tmp_qual = padapter->recvpriv.signal_qual;
+ tmp_noise = padapter->recvpriv.noise;
+
+ piwstats->qual.level = tmp_level;
+ piwstats->qual.qual = tmp_qual;
+ piwstats->qual.noise = tmp_noise;
+ }
+ piwstats->qual.updated = IW_QUAL_ALL_UPDATED;/* IW_QUAL_DBM; */
+ return &padapter->iwstats;
+}
+
+struct iw_handler_def rtw_handlers_def = {
+ .standard = rtw_handlers,
+ .num_standard = sizeof(rtw_handlers) / sizeof(iw_handler),
+ .private = rtw_private_handler,
+ .private_args = (struct iw_priv_args *)rtw_private_args,
+ .num_private = sizeof(rtw_private_handler) / sizeof(iw_handler),
+ .num_private_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args),
+ .get_wireless_stats = rtw_get_wireless_stats,
+};
+
+/* copy from net/wireless/wext.c start */
+/* ---------------------------------------------------------------- */
+/*
+ * Calculate size of private arguments
+ */
+static const char iw_priv_type_size[] = {
+ 0, /* IW_PRIV_TYPE_NONE */
+ 1, /* IW_PRIV_TYPE_BYTE */
+ 1, /* IW_PRIV_TYPE_CHAR */
+ 0, /* Not defined */
+ sizeof(__u32), /* IW_PRIV_TYPE_INT */
+ sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */
+ sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */
+ 0, /* Not defined */
+};
+
+static int get_priv_size(__u16 args)
+{
+ int num = args & IW_PRIV_SIZE_MASK;
+ int type = (args & IW_PRIV_TYPE_MASK) >> 12;
+
+ return num * iw_priv_type_size[type];
+}
+/* copy from net/wireless/wext.c end */
+
+static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_data)
+{
+ int err = 0;
+ u8 *input = NULL;
+ u32 input_len = 0;
+ const char delim[] = " ";
+ u8 *output = NULL;
+ u32 output_len = 0;
+ u32 count = 0;
+ u8 *buffer = NULL;
+ u32 buffer_len = 0;
+ char *ptr = NULL;
+ u8 cmdname[17] = {0}; /* IFNAMSIZ+1 */
+ u32 cmdlen;
+ s32 len;
+ u8 *extra = NULL;
+ u32 extra_size = 0;
+
+ s32 k;
+ const iw_handler *priv; /* Private ioctl */
+ const struct iw_priv_args *priv_args; /* Private ioctl description */
+ u32 num_priv_args; /* Number of descriptions */
+ iw_handler handler;
+ int temp;
+ int subcmd = 0; /* sub-ioctl index */
+ int offset = 0; /* Space for sub-ioctl index */
+
+ union iwreq_data wdata;
+
+ memcpy(&wdata, wrq_data, sizeof(wdata));
+
+ input_len = wdata.data.length;
+ input = rtw_zmalloc(input_len);
+ if (NULL == input)
+ return -ENOMEM;
+ if (copy_from_user(input, wdata.data.pointer, input_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ ptr = input;
+ len = input_len;
+
+ sscanf(ptr, "%16s", cmdname);
+ cmdlen = strlen(cmdname);
+ DBG_88E("%s: cmd =%s\n", __func__, cmdname);
+
+ /* skip command string */
+ if (cmdlen > 0)
+ cmdlen += 1; /* skip one space */
+ ptr += cmdlen;
+ len -= cmdlen;
+ DBG_88E("%s: parameters =%s\n", __func__, ptr);
+
+ priv = rtw_private_handler;
+ priv_args = rtw_private_args;
+ num_priv_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args);
+
+ if (num_priv_args == 0) {
+ err = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ /* Search the correct ioctl */
+ k = -1;
+ while ((++k < num_priv_args) && strcmp(priv_args[k].name, cmdname));
+
+ /* If not found... */
+ if (k == num_priv_args) {
+ err = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ /* Watch out for sub-ioctls ! */
+ if (priv_args[k].cmd < SIOCDEVPRIVATE) {
+ int j = -1;
+
+ /* Find the matching *real* ioctl */
+ while ((++j < num_priv_args) && ((priv_args[j].name[0] != '\0') ||
+ (priv_args[j].set_args != priv_args[k].set_args) ||
+ (priv_args[j].get_args != priv_args[k].get_args)));
+
+ /* If not found... */
+ if (j == num_priv_args) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Save sub-ioctl number */
+ subcmd = priv_args[k].cmd;
+ /* Reserve one int (simplify alignment issues) */
+ offset = sizeof(__u32);
+ /* Use real ioctl definition from now on */
+ k = j;
+ }
+
+ buffer = rtw_zmalloc(4096);
+ if (NULL == buffer) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* If we have to set some data */
+ if ((priv_args[k].set_args & IW_PRIV_TYPE_MASK) &&
+ (priv_args[k].set_args & IW_PRIV_SIZE_MASK)) {
+ u8 *str;
+
+ switch (priv_args[k].set_args & IW_PRIV_TYPE_MASK) {
+ case IW_PRIV_TYPE_BYTE:
+ /* Fetch args */
+ count = 0;
+ do {
+ str = strsep(&ptr, delim);
+ if (NULL == str)
+ break;
+ sscanf(str, "%i", &temp);
+ buffer[count++] = (u8)temp;
+ } while (1);
+ buffer_len = count;
+ /* Number of args to fetch */
+ wdata.data.length = count;
+ if (wdata.data.length > (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ wdata.data.length = priv_args[k].set_args & IW_PRIV_SIZE_MASK;
+ break;
+ case IW_PRIV_TYPE_INT:
+ /* Fetch args */
+ count = 0;
+ do {
+ str = strsep(&ptr, delim);
+ if (NULL == str)
+ break;
+ sscanf(str, "%i", &temp);
+ ((s32 *)buffer)[count++] = (s32)temp;
+ } while (1);
+ buffer_len = count * sizeof(s32);
+ /* Number of args to fetch */
+ wdata.data.length = count;
+ if (wdata.data.length > (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ wdata.data.length = priv_args[k].set_args & IW_PRIV_SIZE_MASK;
+ break;
+ case IW_PRIV_TYPE_CHAR:
+ if (len > 0) {
+ /* Size of the string to fetch */
+ wdata.data.length = len;
+ if (wdata.data.length > (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ wdata.data.length = priv_args[k].set_args & IW_PRIV_SIZE_MASK;
+
+ /* Fetch string */
+ memcpy(buffer, ptr, wdata.data.length);
+ } else {
+ wdata.data.length = 1;
+ buffer[0] = '\0';
+ }
+ buffer_len = wdata.data.length;
+ break;
+ default:
+ DBG_88E("%s: Not yet implemented...\n", __func__);
+ err = -1;
+ goto exit;
+ }
+
+ if ((priv_args[k].set_args & IW_PRIV_SIZE_FIXED) &&
+ (wdata.data.length != (priv_args[k].set_args & IW_PRIV_SIZE_MASK))) {
+ DBG_88E("%s: The command %s needs exactly %d argument(s)...\n",
+ __func__, cmdname, priv_args[k].set_args & IW_PRIV_SIZE_MASK);
+ err = -EINVAL;
+ goto exit;
+ }
+ } else {
+ /* if args to set */
+ wdata.data.length = 0L;
+ }
+
+ /* Those two tests are important. They define how the driver
+ * will have to handle the data */
+ if ((priv_args[k].set_args & IW_PRIV_SIZE_FIXED) &&
+ ((get_priv_size(priv_args[k].set_args) + offset) <= IFNAMSIZ)) {
+ /* First case : all SET args fit within wrq */
+ if (offset)
+ wdata.mode = subcmd;
+ memcpy(wdata.name + offset, buffer, IFNAMSIZ - offset);
+ } else {
+ if ((priv_args[k].set_args == 0) &&
+ (priv_args[k].get_args & IW_PRIV_SIZE_FIXED) &&
+ (get_priv_size(priv_args[k].get_args) <= IFNAMSIZ)) {
+ /* Second case : no SET args, GET args fit within wrq */
+ if (offset)
+ wdata.mode = subcmd;
+ } else {
+ /* Third case : args won't fit in wrq, or variable number of args */
+ if (copy_to_user(wdata.data.pointer, buffer, buffer_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ wdata.data.flags = subcmd;
+ }
+ }
+
+ kfree(input);
+ input = NULL;
+
+ extra_size = 0;
+ if (IW_IS_SET(priv_args[k].cmd)) {
+ /* Size of set arguments */
+ extra_size = get_priv_size(priv_args[k].set_args);
+
+ /* Does it fits in iwr ? */
+ if ((priv_args[k].set_args & IW_PRIV_SIZE_FIXED) &&
+ ((extra_size + offset) <= IFNAMSIZ))
+ extra_size = 0;
+ } else {
+ /* Size of get arguments */
+ extra_size = get_priv_size(priv_args[k].get_args);
+
+ /* Does it fits in iwr ? */
+ if ((priv_args[k].get_args & IW_PRIV_SIZE_FIXED) &&
+ (extra_size <= IFNAMSIZ))
+ extra_size = 0;
+ }
+
+ if (extra_size == 0) {
+ extra = (u8 *)&wdata;
+ kfree(buffer);
+ buffer = NULL;
+ } else {
+ extra = buffer;
+ }
+
+ handler = priv[priv_args[k].cmd - SIOCIWFIRSTPRIV];
+ err = handler(dev, NULL, &wdata, extra);
+
+ /* If we have to get some data */
+ if ((priv_args[k].get_args & IW_PRIV_TYPE_MASK) &&
+ (priv_args[k].get_args & IW_PRIV_SIZE_MASK)) {
+ int j;
+ int n = 0; /* number of args */
+ u8 str[20] = {0};
+
+ /* Check where is the returned data */
+ if ((priv_args[k].get_args & IW_PRIV_SIZE_FIXED) &&
+ (get_priv_size(priv_args[k].get_args) <= IFNAMSIZ))
+ n = priv_args[k].get_args & IW_PRIV_SIZE_MASK;
+ else
+ n = wdata.data.length;
+
+ output = rtw_zmalloc(4096);
+ if (NULL == output) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ switch (priv_args[k].get_args & IW_PRIV_TYPE_MASK) {
+ case IW_PRIV_TYPE_BYTE:
+ /* Display args */
+ for (j = 0; j < n; j++) {
+ sprintf(str, "%d ", extra[j]);
+ len = strlen(str);
+ output_len = strlen(output);
+ if ((output_len + len + 1) > 4096) {
+ err = -E2BIG;
+ goto exit;
+ }
+ memcpy(output+output_len, str, len);
+ }
+ break;
+ case IW_PRIV_TYPE_INT:
+ /* Display args */
+ for (j = 0; j < n; j++) {
+ sprintf(str, "%d ", ((__s32 *)extra)[j]);
+ len = strlen(str);
+ output_len = strlen(output);
+ if ((output_len + len + 1) > 4096) {
+ err = -E2BIG;
+ goto exit;
+ }
+ memcpy(output+output_len, str, len);
+ }
+ break;
+ case IW_PRIV_TYPE_CHAR:
+ /* Display args */
+ memcpy(output, extra, n);
+ break;
+ default:
+ DBG_88E("%s: Not yet implemented...\n", __func__);
+ err = -1;
+ goto exit;
+ }
+
+ output_len = strlen(output) + 1;
+ wrq_data->data.length = output_len;
+ if (copy_to_user(wrq_data->data.pointer, output, output_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ } else {
+ /* if args to set */
+ wrq_data->data.length = 0;
+ }
+
+exit:
+ kfree(input);
+ kfree(buffer);
+ kfree(output);
+ return err;
+}
+
+#include <rtw_android.h>
+int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct iwreq *wrq = (struct iwreq *)rq;
+ int ret = 0;
+
+ switch (cmd) {
+ case RTL_IOCTL_WPA_SUPPLICANT:
+ ret = wpa_supplicant_ioctl(dev, &wrq->u.data);
+ break;
+#ifdef CONFIG_88EU_AP_MODE
+ case RTL_IOCTL_HOSTAPD:
+ ret = rtw_hostapd_ioctl(dev, &wrq->u.data);
+ break;
+#endif /* CONFIG_88EU_AP_MODE */
+ case SIOCDEVPRIVATE:
+ ret = rtw_ioctl_wext_private(dev, &wrq->u);
+ break;
+ case (SIOCDEVPRIVATE+1):
+ ret = rtw_android_priv_cmd(dev, rq, cmd);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
new file mode 100644
index 0000000..57d1ff7
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -0,0 +1,246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#define _MLME_OSDEP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <mlme_osdep.h>
+
+void rtw_join_timeout_handler (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+
+ _rtw_join_timeout_handler(adapter);
+}
+
+
+void _rtw_scan_timeout_handler (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+
+ rtw_scan_timeout_handler(adapter);
+}
+
+static void _dynamic_check_timer_handlder(void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+
+ if (adapter->registrypriv.mp_mode == 1)
+ return;
+ rtw_dynamic_check_timer_handlder(adapter);
+ _set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
+}
+
+void rtw_init_mlme_timer(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ _init_timer(&(pmlmepriv->assoc_timer), padapter->pnetdev, rtw_join_timeout_handler, padapter);
+ _init_timer(&(pmlmepriv->scan_to_timer), padapter->pnetdev, _rtw_scan_timeout_handler, padapter);
+ _init_timer(&(pmlmepriv->dynamic_chk_timer), padapter->pnetdev, _dynamic_check_timer_handlder, padapter);
+}
+
+void rtw_os_indicate_connect(struct adapter *adapter)
+{
+_func_enter_;
+ rtw_indicate_wx_assoc_event(adapter);
+ netif_carrier_on(adapter->pnetdev);
+ if (adapter->pid[2] != 0)
+ rtw_signal_process(adapter->pid[2], SIGALRM);
+_func_exit_;
+}
+
+void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted)
+{
+ indicate_wx_scan_complete_event(padapter);
+}
+
+static struct rt_pmkid_list backup_pmkid[NUM_PMKID_CACHE];
+
+void rtw_reset_securitypriv(struct adapter *adapter)
+{
+ u8 backup_index = 0;
+ u8 backup_counter = 0x00;
+ u32 backup_time = 0;
+
+ if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ /* 802.1x */
+ /* We have to backup the PMK information for WiFi PMK Caching test item. */
+ /* Backup the btkip_countermeasure information. */
+ /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
+ _rtw_memset(&backup_pmkid[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ backup_index = adapter->securitypriv.PMKIDIndex;
+ backup_counter = adapter->securitypriv.btkip_countermeasure;
+ backup_time = adapter->securitypriv.btkip_countermeasure_time;
+ _rtw_memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
+
+ /* Restore the PMK information to securitypriv structure for the following connection. */
+ memcpy(&adapter->securitypriv.PMKIDList[0],
+ &backup_pmkid[0],
+ sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ adapter->securitypriv.PMKIDIndex = backup_index;
+ adapter->securitypriv.btkip_countermeasure = backup_counter;
+ adapter->securitypriv.btkip_countermeasure_time = backup_time;
+ adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+ } else {
+ /* reset values in securitypriv */
+ struct security_priv *psec_priv = &adapter->securitypriv;
+
+ psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psec_priv->dot11PrivacyKeyIndex = 0;
+ psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psec_priv->dot118021XGrpKeyid = 1;
+ psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
+ }
+}
+
+void rtw_os_indicate_disconnect(struct adapter *adapter)
+{
+_func_enter_;
+ netif_carrier_off(adapter->pnetdev); /* Do it first for tx broadcast pkt after disconnection issue! */
+ rtw_indicate_wx_disassoc_event(adapter);
+ rtw_reset_securitypriv(adapter);
+_func_exit_;
+}
+
+void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
+{
+ uint len;
+ u8 *buff, *p, i;
+ union iwreq_data wrqu;
+
+_func_enter_;
+ RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
+ ("+rtw_report_sec_ie, authmode=%d\n", authmode));
+ buff = NULL;
+ if (authmode == _WPA_IE_ID_) {
+ RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
+ ("rtw_report_sec_ie, authmode=%d\n", authmode));
+ buff = rtw_malloc(IW_CUSTOM_MAX);
+ if (!buff)
+ goto exit;
+ _rtw_memset(buff, 0, IW_CUSTOM_MAX);
+ p = buff;
+ p += sprintf(p, "ASSOCINFO(ReqIEs =");
+ len = sec_ie[1]+2;
+ len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
+ for (i = 0; i < len; i++)
+ p += sprintf(p, "%02x", sec_ie[i]);
+ p += sprintf(p, ")");
+ _rtw_memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = p-buff;
+ wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ?
+ wrqu.data.length : IW_CUSTOM_MAX;
+ wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
+ kfree(buff);
+ }
+exit:
+_func_exit_;
+}
+
+static void _survey_timer_hdl(void *FunctionContext)
+{
+ struct adapter *padapter = (struct adapter *)FunctionContext;
+
+ survey_timer_hdl(padapter);
+}
+
+static void _link_timer_hdl(void *FunctionContext)
+{
+ struct adapter *padapter = (struct adapter *)FunctionContext;
+ link_timer_hdl(padapter);
+}
+
+static void _addba_timer_hdl(void *FunctionContext)
+{
+ struct sta_info *psta = (struct sta_info *)FunctionContext;
+ addba_timer_hdl(psta);
+}
+
+void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
+{
+ _init_timer(&psta->addba_retry_timer, padapter->pnetdev, _addba_timer_hdl, psta);
+}
+
+void init_mlme_ext_timer(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ _init_timer(&pmlmeext->survey_timer, padapter->pnetdev, _survey_timer_hdl, padapter);
+ _init_timer(&pmlmeext->link_timer, padapter->pnetdev, _link_timer_hdl, padapter);
+}
+
+#ifdef CONFIG_88EU_AP_MODE
+
+void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *psta)
+{
+ union iwreq_data wrqu;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (psta == NULL)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ DBG_88E("+rtw_indicate_sta_assoc_event\n");
+
+ wireless_send_event(padapter->pnetdev, IWEVREGISTERED, &wrqu, NULL);
+}
+
+void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *psta)
+{
+ union iwreq_data wrqu;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (psta == NULL)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ DBG_88E("+rtw_indicate_sta_disassoc_event\n");
+
+ wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
+}
+
+#endif
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
new file mode 100644
index 0000000..63bc913
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -0,0 +1,1251 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _OS_INTFS_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <xmit_osdep.h>
+#include <recv_osdep.h>
+#include <hal_intf.h>
+#include <rtw_ioctl.h>
+#include <rtw_version.h>
+
+#include <usb_osintf.h>
+#include <usb_hal.h>
+#include <rtw_br_ext.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
+MODULE_AUTHOR("Realtek Semiconductor Corp.");
+MODULE_VERSION(DRIVERVERSION);
+
+#define CONFIG_BR_EXT_BRNAME "br0"
+#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
+
+/* module param defaults */
+static int rtw_chip_version = 0x00;
+static int rtw_rfintfs = HWPI;
+static int rtw_lbkmode;/* RTL8712_AIR_TRX; */
+static int rtw_network_mode = Ndis802_11IBSS;/* Ndis802_11Infrastructure; infra, ad-hoc, auto */
+static int rtw_channel = 1;/* ad-hoc support requirement */
+static int rtw_wireless_mode = WIRELESS_11BG_24N;
+static int rtw_vrtl_carrier_sense = AUTO_VCS;
+static int rtw_vcs_type = RTS_CTS;/* */
+static int rtw_rts_thresh = 2347;/* */
+static int rtw_frag_thresh = 2346;/* */
+static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */
+static int rtw_scan_mode = 1;/* active, passive */
+static int rtw_adhoc_tx_pwr = 1;
+static int rtw_soft_ap;
+static int rtw_power_mgnt = 1;
+static int rtw_ips_mode = IPS_NORMAL;
+
+static int rtw_smart_ps = 2;
+
+module_param(rtw_ips_mode, int, 0644);
+MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode");
+
+static int rtw_debug = 1;
+static int rtw_radio_enable = 1;
+static int rtw_long_retry_lmt = 7;
+static int rtw_short_retry_lmt = 7;
+static int rtw_busy_thresh = 40;
+static int rtw_ack_policy = NORMAL_ACK;
+
+static int rtw_mp_mode;
+
+static int rtw_software_encrypt;
+static int rtw_software_decrypt;
+
+static int rtw_acm_method;/* 0:By SW 1:By HW. */
+
+static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
+static int rtw_uapsd_enable;
+static int rtw_uapsd_max_sp = NO_LIMIT;
+static int rtw_uapsd_acbk_en;
+static int rtw_uapsd_acbe_en;
+static int rtw_uapsd_acvi_en;
+static int rtw_uapsd_acvo_en;
+
+int rtw_ht_enable = 1;
+int rtw_cbw40_enable = 3; /* 0 :diable, bit(0): enable 2.4g, bit(1): enable 5g */
+int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
+static int rtw_rx_stbc = 1;/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
+static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
+
+static int rtw_lowrate_two_xmit = 1;/* Use 2 path Tx to transmit MCS0~7 and legacy mode */
+
+static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */
+static int rtw_low_power;
+static int rtw_wifi_spec;
+static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
+static int rtw_AcceptAddbaReq = true;/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
+
+static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */
+static int rtw_antdiv_type; /* 0:decide by efuse 1: for 88EE, 1Tx and 1RxCG are diversity.(2 Ant with SPDT), 2: for 88EE, 1Tx and 2Rx are diversity.(2 Ant, Tx and RxCG are both on aux port, RxCS is on main port), 3: for 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
+
+static int rtw_enusbss;/* 0:disable, 1:enable */
+
+static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */
+
+static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */
+
+static int rtw_hw_wps_pbc = 1;
+
+int rtw_mc2u_disable;
+
+static int rtw_80211d;
+
+static char *ifname = "wlan%d";
+module_param(ifname, charp, 0644);
+MODULE_PARM_DESC(ifname, "The default name to allocate for first interface");
+
+static char *if2name = "wlan%d";
+module_param(if2name, charp, 0644);
+MODULE_PARM_DESC(if2name, "The default name to allocate for second interface");
+
+char *rtw_initmac; /* temp mac address if users want to use instead of the mac address in Efuse */
+
+module_param(rtw_initmac, charp, 0644);
+module_param(rtw_channel_plan, int, 0644);
+module_param(rtw_chip_version, int, 0644);
+module_param(rtw_rfintfs, int, 0644);
+module_param(rtw_lbkmode, int, 0644);
+module_param(rtw_network_mode, int, 0644);
+module_param(rtw_channel, int, 0644);
+module_param(rtw_mp_mode, int, 0644);
+module_param(rtw_wmm_enable, int, 0644);
+module_param(rtw_vrtl_carrier_sense, int, 0644);
+module_param(rtw_vcs_type, int, 0644);
+module_param(rtw_busy_thresh, int, 0644);
+module_param(rtw_ht_enable, int, 0644);
+module_param(rtw_cbw40_enable, int, 0644);
+module_param(rtw_ampdu_enable, int, 0644);
+module_param(rtw_rx_stbc, int, 0644);
+module_param(rtw_ampdu_amsdu, int, 0644);
+module_param(rtw_lowrate_two_xmit, int, 0644);
+module_param(rtw_rf_config, int, 0644);
+module_param(rtw_power_mgnt, int, 0644);
+module_param(rtw_smart_ps, int, 0644);
+module_param(rtw_low_power, int, 0644);
+module_param(rtw_wifi_spec, int, 0644);
+module_param(rtw_antdiv_cfg, int, 0644);
+module_param(rtw_antdiv_type, int, 0644);
+module_param(rtw_enusbss, int, 0644);
+module_param(rtw_hwpdn_mode, int, 0644);
+module_param(rtw_hwpwrp_detect, int, 0644);
+module_param(rtw_hw_wps_pbc, int, 0644);
+
+static uint rtw_max_roaming_times = 2;
+module_param(rtw_max_roaming_times, uint, 0644);
+MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try");
+
+static int rtw_fw_iol = 1;/* 0:Disable, 1:enable, 2:by usb speed */
+module_param(rtw_fw_iol, int, 0644);
+MODULE_PARM_DESC(rtw_fw_iol, "FW IOL");
+
+module_param(rtw_mc2u_disable, int, 0644);
+
+module_param(rtw_80211d, int, 0644);
+MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism");
+
+static uint rtw_notch_filter = RTW_NOTCH_FILTER;
+module_param(rtw_notch_filter, uint, 0644);
+MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P");
+module_param_named(debug, rtw_debug, int, 0444);
+MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)");
+
+/* dummy routines */
+void rtw_proc_remove_one(struct net_device *dev)
+{
+}
+
+void rtw_proc_init_one(struct net_device *dev)
+{
+}
+
+#if 0 /* TODO: Convert these to /sys */
+void rtw_proc_init_one(struct net_device *dev)
+{
+ struct proc_dir_entry *dir_dev = NULL;
+ struct proc_dir_entry *entry = NULL;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ u8 rf_type;
+
+ if (rtw_proc == NULL) {
+ memcpy(rtw_proc_name, DRV_NAME, sizeof(DRV_NAME));
+
+ rtw_proc = create_proc_entry(rtw_proc_name, S_IFDIR, init_net.proc_net);
+ if (rtw_proc == NULL) {
+ DBG_88E(KERN_ERR "Unable to create rtw_proc directory\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("ver_info", S_IFREG | S_IRUGO, rtw_proc, proc_get_drv_version, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ }
+
+ if (padapter->dir_dev == NULL) {
+ padapter->dir_dev = create_proc_entry(dev->name,
+ S_IFDIR | S_IRUGO | S_IXUGO,
+ rtw_proc);
+ dir_dev = padapter->dir_dev;
+ if (dir_dev == NULL) {
+ if (rtw_proc_cnt == 0) {
+ if (rtw_proc) {
+ remove_proc_entry(rtw_proc_name, init_net.proc_net);
+ rtw_proc = NULL;
+ }
+ }
+
+ pr_info("Unable to create dir_dev directory\n");
+ return;
+ }
+ } else {
+ return;
+ }
+
+ rtw_proc_cnt++;
+
+ entry = create_proc_read_entry("write_reg", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_write_reg, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_write_reg;
+
+ entry = create_proc_read_entry("read_reg", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_read_reg, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_read_reg;
+
+
+ entry = create_proc_read_entry("fwstate", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_fwstate, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("sec_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_sec_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("mlmext_state", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_mlmext_state, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("qos_option", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_qos_option, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("ht_option", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_ht_option, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rf_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("ap_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_ap_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("adapter_state", S_IFREG | S_IRUGO,
+ dir_dev, proc_getstruct adapter_state, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("trx_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_trx_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("mac_reg_dump1", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_mac_reg_dump1, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("mac_reg_dump2", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_mac_reg_dump2, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("mac_reg_dump3", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_mac_reg_dump3, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("bb_reg_dump1", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_bb_reg_dump1, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("bb_reg_dump2", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_bb_reg_dump2, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("bb_reg_dump3", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_bb_reg_dump3, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rf_reg_dump1", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_reg_dump1, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rf_reg_dump2", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_reg_dump2, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type)) {
+ entry = create_proc_read_entry("rf_reg_dump3", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_reg_dump3, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rf_reg_dump4", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_reg_dump4, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ }
+
+#ifdef CONFIG_88EU_AP_MODE
+
+ entry = create_proc_read_entry("all_sta_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_all_sta_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+#endif
+
+ entry = create_proc_read_entry("best_channel", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_best_channel, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rx_signal", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rx_signal, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_rx_signal;
+ entry = create_proc_read_entry("ht_enable", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_ht_enable, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_ht_enable;
+
+ entry = create_proc_read_entry("cbw40_enable", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_cbw40_enable, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_cbw40_enable;
+
+ entry = create_proc_read_entry("ampdu_enable", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_ampdu_enable, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_ampdu_enable;
+
+ entry = create_proc_read_entry("rx_stbc", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rx_stbc, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_rx_stbc;
+
+ entry = create_proc_read_entry("path_rssi", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_two_path_rssi, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry = create_proc_read_entry("rssi_disp", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rssi_disp, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_rssi_disp;
+}
+
+void rtw_proc_remove_one(struct net_device *dev)
+{
+ struct proc_dir_entry *dir_dev = NULL;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ u8 rf_type;
+
+ dir_dev = padapter->dir_dev;
+ padapter->dir_dev = NULL;
+
+ if (dir_dev) {
+ remove_proc_entry("write_reg", dir_dev);
+ remove_proc_entry("read_reg", dir_dev);
+ remove_proc_entry("fwstate", dir_dev);
+ remove_proc_entry("sec_info", dir_dev);
+ remove_proc_entry("mlmext_state", dir_dev);
+ remove_proc_entry("qos_option", dir_dev);
+ remove_proc_entry("ht_option", dir_dev);
+ remove_proc_entry("rf_info", dir_dev);
+ remove_proc_entry("ap_info", dir_dev);
+ remove_proc_entry("adapter_state", dir_dev);
+ remove_proc_entry("trx_info", dir_dev);
+ remove_proc_entry("mac_reg_dump1", dir_dev);
+ remove_proc_entry("mac_reg_dump2", dir_dev);
+ remove_proc_entry("mac_reg_dump3", dir_dev);
+ remove_proc_entry("bb_reg_dump1", dir_dev);
+ remove_proc_entry("bb_reg_dump2", dir_dev);
+ remove_proc_entry("bb_reg_dump3", dir_dev);
+ remove_proc_entry("rf_reg_dump1", dir_dev);
+ remove_proc_entry("rf_reg_dump2", dir_dev);
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type)) {
+ remove_proc_entry("rf_reg_dump3", dir_dev);
+ remove_proc_entry("rf_reg_dump4", dir_dev);
+ }
+#ifdef CONFIG_88EU_AP_MODE
+ remove_proc_entry("all_sta_info", dir_dev);
+#endif
+
+ remove_proc_entry("best_channel", dir_dev);
+ remove_proc_entry("rx_signal", dir_dev);
+ remove_proc_entry("cbw40_enable", dir_dev);
+ remove_proc_entry("ht_enable", dir_dev);
+ remove_proc_entry("ampdu_enable", dir_dev);
+ remove_proc_entry("rx_stbc", dir_dev);
+ remove_proc_entry("path_rssi", dir_dev);
+ remove_proc_entry("rssi_disp", dir_dev);
+ remove_proc_entry(dev->name, rtw_proc);
+ dir_dev = NULL;
+ } else {
+ return;
+ }
+ rtw_proc_cnt--;
+
+ if (rtw_proc_cnt == 0) {
+ if (rtw_proc) {
+ remove_proc_entry("ver_info", rtw_proc);
+
+ remove_proc_entry(rtw_proc_name, init_net.proc_net);
+ rtw_proc = NULL;
+ }
+ }
+}
+#endif
+
+static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
+{
+ uint status = _SUCCESS;
+ struct registry_priv *registry_par = &padapter->registrypriv;
+
+_func_enter_;
+
+ GlobalDebugLevel = rtw_debug;
+ registry_par->chip_version = (u8)rtw_chip_version;
+ registry_par->rfintfs = (u8)rtw_rfintfs;
+ registry_par->lbkmode = (u8)rtw_lbkmode;
+ registry_par->network_mode = (u8)rtw_network_mode;
+
+ memcpy(registry_par->ssid.Ssid, "ANY", 3);
+ registry_par->ssid.SsidLength = 3;
+
+ registry_par->channel = (u8)rtw_channel;
+ registry_par->wireless_mode = (u8)rtw_wireless_mode;
+ registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ;
+ registry_par->vcs_type = (u8)rtw_vcs_type;
+ registry_par->rts_thresh = (u16)rtw_rts_thresh;
+ registry_par->frag_thresh = (u16)rtw_frag_thresh;
+ registry_par->preamble = (u8)rtw_preamble;
+ registry_par->scan_mode = (u8)rtw_scan_mode;
+ registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr;
+ registry_par->soft_ap = (u8)rtw_soft_ap;
+ registry_par->smart_ps = (u8)rtw_smart_ps;
+ registry_par->power_mgnt = (u8)rtw_power_mgnt;
+ registry_par->ips_mode = (u8)rtw_ips_mode;
+ registry_par->radio_enable = (u8)rtw_radio_enable;
+ registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt;
+ registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt;
+ registry_par->busy_thresh = (u16)rtw_busy_thresh;
+ registry_par->ack_policy = (u8)rtw_ack_policy;
+ registry_par->mp_mode = (u8)rtw_mp_mode;
+ registry_par->software_encrypt = (u8)rtw_software_encrypt;
+ registry_par->software_decrypt = (u8)rtw_software_decrypt;
+ registry_par->acm_method = (u8)rtw_acm_method;
+
+ /* UAPSD */
+ registry_par->wmm_enable = (u8)rtw_wmm_enable;
+ registry_par->uapsd_enable = (u8)rtw_uapsd_enable;
+ registry_par->uapsd_max_sp = (u8)rtw_uapsd_max_sp;
+ registry_par->uapsd_acbk_en = (u8)rtw_uapsd_acbk_en;
+ registry_par->uapsd_acbe_en = (u8)rtw_uapsd_acbe_en;
+ registry_par->uapsd_acvi_en = (u8)rtw_uapsd_acvi_en;
+ registry_par->uapsd_acvo_en = (u8)rtw_uapsd_acvo_en;
+
+ registry_par->ht_enable = (u8)rtw_ht_enable;
+ registry_par->cbw40_enable = (u8)rtw_cbw40_enable;
+ registry_par->ampdu_enable = (u8)rtw_ampdu_enable;
+ registry_par->rx_stbc = (u8)rtw_rx_stbc;
+ registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu;
+ registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit;
+ registry_par->rf_config = (u8)rtw_rf_config;
+ registry_par->low_power = (u8)rtw_low_power;
+ registry_par->wifi_spec = (u8)rtw_wifi_spec;
+ registry_par->channel_plan = (u8)rtw_channel_plan;
+ registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
+ registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
+ registry_par->antdiv_type = (u8)rtw_antdiv_type;
+ registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode;/* 0:disable, 1:enable, 2:by EFUSE config */
+ registry_par->hwpwrp_detect = (u8)rtw_hwpwrp_detect;/* 0:disable, 1:enable */
+ registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc;
+
+ registry_par->max_roaming_times = (u8)rtw_max_roaming_times;
+
+ registry_par->fw_iol = rtw_fw_iol;
+
+ registry_par->enable80211d = (u8)rtw_80211d;
+ snprintf(registry_par->ifname, 16, "%s", ifname);
+ snprintf(registry_par->if2name, 16, "%s", if2name);
+ registry_par->notch_filter = (u8)rtw_notch_filter;
+_func_exit_;
+ return status;
+}
+
+static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct sockaddr *addr = p;
+
+ if (!padapter->bup)
+ memcpy(padapter->eeprompriv.mac_addr, addr->sa_data, ETH_ALEN);
+
+ return 0;
+}
+
+static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct recv_priv *precvpriv = &(padapter->recvpriv);
+
+ padapter->stats.tx_packets = pxmitpriv->tx_pkts;/* pxmitpriv->tx_pkts++; */
+ padapter->stats.rx_packets = precvpriv->rx_pkts;/* precvpriv->rx_pkts++; */
+ padapter->stats.tx_dropped = pxmitpriv->tx_drop;
+ padapter->stats.rx_dropped = precvpriv->rx_drop;
+ padapter->stats.tx_bytes = pxmitpriv->tx_bytes;
+ padapter->stats.rx_bytes = precvpriv->rx_bytes;
+ return &padapter->stats;
+}
+
+/*
+ * AC to queue mapping
+ *
+ * AC_VO -> queue 0
+ * AC_VI -> queue 1
+ * AC_BE -> queue 2
+ * AC_BK -> queue 3
+ */
+static const u16 rtw_1d_to_queue[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
+
+/* Given a data frame determine the 802.1p/1d tag to use. */
+static unsigned int rtw_classify8021d(struct sk_buff *skb)
+{
+ unsigned int dscp;
+
+ /* skb->priority values from 256->263 are magic values to
+ * directly indicate a specific 802.1d priority. This is used
+ * to allow 802.1d priority to be passed directly in from VLAN
+ * tags, etc.
+ */
+ if (skb->priority >= 256 && skb->priority <= 263)
+ return skb->priority - 256;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ dscp = ip_hdr(skb)->tos & 0xfc;
+ break;
+ default:
+ return 0;
+ }
+
+ return dscp >> 5;
+}
+
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ skb->priority = rtw_classify8021d(skb);
+
+ if (pmlmepriv->acm_mask != 0)
+ skb->priority = qos_acm(pmlmepriv->acm_mask, skb->priority);
+
+ return rtw_1d_to_queue[skb->priority];
+}
+
+u16 rtw_recv_select_queue(struct sk_buff *skb)
+{
+ struct iphdr *piphdr;
+ unsigned int dscp;
+ __be16 eth_type;
+ u32 priority;
+ u8 *pdata = skb->data;
+
+ memcpy(&eth_type, pdata+(ETH_ALEN<<1), 2);
+
+ switch (eth_type) {
+ case htons(ETH_P_IP):
+ piphdr = (struct iphdr *)(pdata+ETH_HLEN);
+ dscp = piphdr->tos & 0xfc;
+ priority = dscp >> 5;
+ break;
+ default:
+ priority = 0;
+ }
+
+ return rtw_1d_to_queue[priority];
+}
+
+static const struct net_device_ops rtw_netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = rtw_xmit_entry,
+ .ndo_select_queue = rtw_select_queue,
+ .ndo_set_mac_address = rtw_net_set_mac_address,
+ .ndo_get_stats = rtw_net_get_stats,
+ .ndo_do_ioctl = rtw_ioctl,
+};
+
+int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
+{
+ if (dev_alloc_name(pnetdev, ifname) < 0)
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("dev_alloc_name, fail!\n"));
+
+ netif_carrier_off(pnetdev);
+ return 0;
+}
+
+struct net_device *rtw_init_netdev(struct adapter *old_padapter)
+{
+ struct adapter *padapter;
+ struct net_device *pnetdev;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n"));
+
+ if (old_padapter != NULL)
+ pnetdev = rtw_alloc_etherdev_with_old_priv(sizeof(struct adapter), (void *)old_padapter);
+ else
+ pnetdev = rtw_alloc_etherdev(sizeof(struct adapter));
+
+ if (!pnetdev)
+ return NULL;
+
+ padapter = rtw_netdev_priv(pnetdev);
+ padapter->pnetdev = pnetdev;
+ DBG_88E("register rtw_netdev_ops to netdev_ops\n");
+ pnetdev->netdev_ops = &rtw_netdev_ops;
+ pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */
+ pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
+
+ /* step 2. */
+ loadparam(padapter, pnetdev);
+
+ return pnetdev;
+}
+
+u32 rtw_start_drv_threads(struct adapter *padapter)
+{
+ u32 _status = _SUCCESS;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_start_drv_threads\n"));
+
+ padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter, "RTW_CMD_THREAD");
+ if (IS_ERR(padapter->cmdThread))
+ _status = _FAIL;
+ else
+ _rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema); /* wait for cmd_thread to run */
+
+ rtw_hal_start_thread(padapter);
+ return _status;
+}
+
+void rtw_stop_drv_threads(struct adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads\n"));
+
+ /* Below is to termindate rtw_cmd_thread & event_thread... */
+ _rtw_up_sema(&padapter->cmdpriv.cmd_queue_sema);
+ if (padapter->cmdThread)
+ _rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema);
+
+ rtw_hal_stop_thread(padapter);
+}
+
+static u8 rtw_init_default_value(struct adapter *padapter)
+{
+ u8 ret = _SUCCESS;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ /* xmit_priv */
+ pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense;
+ pxmitpriv->vcs = pregistrypriv->vcs_type;
+ pxmitpriv->vcs_type = pregistrypriv->vcs_type;
+ pxmitpriv->frag_len = pregistrypriv->frag_thresh;
+
+ /* mlme_priv */
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+ pmlmepriv->scan_mode = SCAN_ACTIVE;
+
+ /* ht_priv */
+ pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */
+
+ /* security_priv */
+ psecuritypriv->binstallGrpkey = _FAIL;
+ psecuritypriv->sw_encrypt = pregistrypriv->software_encrypt;
+ psecuritypriv->sw_decrypt = pregistrypriv->software_decrypt;
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psecuritypriv->dot11PrivacyKeyIndex = 0;
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psecuritypriv->dot118021XGrpKeyid = 1;
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psecuritypriv->ndisencryptstatus = Ndis802_11WEPDisabled;
+
+ /* registry_priv */
+ rtw_init_registrypriv_dev_network(padapter);
+ rtw_update_registrypriv_dev_network(padapter);
+
+ /* hal_priv */
+ rtw_hal_def_value_init(padapter);
+
+ /* misc. */
+ padapter->bReadPortCancel = false;
+ padapter->bWritePortCancel = false;
+ padapter->bRxRSSIDisplay = 0;
+ padapter->bNotifyChannelChange = 0;
+#ifdef CONFIG_88EU_P2P
+ padapter->bShowGetP2PState = 1;
+#endif
+ return ret;
+}
+
+u8 rtw_reset_drv_sw(struct adapter *padapter)
+{
+ u8 ret8 = _SUCCESS;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ /* hal_priv */
+ rtw_hal_def_value_init(padapter);
+ padapter->bReadPortCancel = false;
+ padapter->bWritePortCancel = false;
+ padapter->bRxRSSIDisplay = 0;
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+
+ padapter->xmitpriv.tx_pkts = 0;
+ padapter->recvpriv.rx_pkts = 0;
+
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
+
+ rtw_hal_sreset_reset_value(padapter);
+ pwrctrlpriv->pwr_state_check_cnts = 0;
+
+ /* mlmeextpriv */
+ padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE;
+
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+
+ return ret8;
+}
+
+u8 rtw_init_drv_sw(struct adapter *padapter)
+{
+ u8 ret8 = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw\n"));
+
+ if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ padapter->cmdpriv.padapter = padapter;
+
+ if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (rtw_init_mlme_priv(padapter) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+#ifdef CONFIG_88EU_P2P
+ rtw_init_wifidirect_timers(padapter);
+ init_wifidirect_info(padapter, P2P_ROLE_DISABLE);
+ reset_global_wifidirect_info(padapter);
+#endif /* CONFIG_88EU_P2P */
+
+ if (init_mlme_ext_priv(padapter) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_ext_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
+ DBG_88E("Can't _rtw_init_xmit_priv\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL) {
+ DBG_88E("Can't _rtw_init_recv_priv\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL) {
+ DBG_88E("Can't _rtw_init_sta_priv\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ padapter->stapriv.padapter = padapter;
+
+ rtw_init_bcmc_stainfo(padapter);
+
+ rtw_init_pwrctrl_priv(padapter);
+
+ if (init_mp_priv(padapter) == _FAIL)
+ DBG_88E("%s: initialize MP private data Fail!\n", __func__);
+
+ ret8 = rtw_init_default_value(padapter);
+
+ rtw_hal_dm_init(padapter);
+ rtw_hal_sw_led_init(padapter);
+
+ rtw_hal_sreset_init(padapter);
+
+ _rtw_spinlock_init(&padapter->br_ext_lock);
+
+exit:
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
+
+ _func_exit_;
+
+ return ret8;
+}
+
+void rtw_cancel_all_timer(struct adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_cancel_all_timer\n"));
+
+ _cancel_timer_ex(&padapter->mlmepriv.assoc_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel association timer complete!\n"));
+
+ _cancel_timer_ex(&padapter->mlmepriv.scan_to_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel scan_to_timer!\n"));
+
+ _cancel_timer_ex(&padapter->mlmepriv.dynamic_chk_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel dynamic_chk_timer!\n"));
+
+ /* cancel sw led timer */
+ rtw_hal_sw_led_deinit(padapter);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel DeInitSwLeds!\n"));
+
+ _cancel_timer_ex(&padapter->pwrctrlpriv.pwr_state_check_timer);
+
+ _cancel_timer_ex(&padapter->recvpriv.signal_stat_timer);
+ /* cancel dm timer */
+ rtw_hal_dm_deinit(padapter);
+}
+
+u8 rtw_free_drv_sw(struct adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw"));
+
+ /* we can call rtw_p2p_enable here, but: */
+ /* 1. rtw_p2p_enable may have IO operation */
+ /* 2. rtw_p2p_enable is bundled with wext interface */
+ #ifdef CONFIG_88EU_P2P
+ {
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ _cancel_timer_ex(&pwdinfo->find_phase_timer);
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ _cancel_timer_ex(&pwdinfo->pre_tx_scan_timer);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
+ }
+ }
+ #endif
+
+
+ _rtw_spinlock_free(&padapter->br_ext_lock);
+
+ free_mlme_ext_priv(&padapter->mlmeextpriv);
+
+ rtw_free_cmd_priv(&padapter->cmdpriv);
+
+ rtw_free_evt_priv(&padapter->evtpriv);
+
+ rtw_free_mlme_priv(&padapter->mlmepriv);
+ _rtw_free_xmit_priv(&padapter->xmitpriv);
+
+ _rtw_free_sta_priv(&padapter->stapriv); /* will free bcmc_stainfo here */
+
+ _rtw_free_recv_priv(&padapter->recvpriv);
+
+ rtw_free_pwrctrl_priv(padapter);
+
+ rtw_hal_free_data(padapter);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== rtw_free_drv_sw\n"));
+
+ /* free the old_pnetdev */
+ if (padapter->rereg_nd_name_priv.old_pnetdev) {
+ free_netdev(padapter->rereg_nd_name_priv.old_pnetdev);
+ padapter->rereg_nd_name_priv.old_pnetdev = NULL;
+ }
+
+ /* clear pbuddystruct adapter to avoid access wrong pointer. */
+ if (padapter->pbuddy_adapter != NULL)
+ padapter->pbuddy_adapter->pbuddy_adapter = NULL;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_free_drv_sw\n"));
+
+ return _SUCCESS;
+}
+
+void netdev_br_init(struct net_device *netdev)
+{
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(netdev);
+
+ rcu_read_lock();
+
+ if (rcu_dereference(adapter->pnetdev->rx_handler_data)) {
+ struct net_device *br_netdev;
+ struct net *devnet = NULL;
+
+ devnet = dev_net(netdev);
+ br_netdev = dev_get_by_name(devnet, CONFIG_BR_EXT_BRNAME);
+ if (br_netdev) {
+ memcpy(adapter->br_mac, br_netdev->dev_addr, ETH_ALEN);
+ dev_put(br_netdev);
+ } else {
+ pr_info("%s()-%d: dev_get_by_name(%s) failed!",
+ __func__, __LINE__, CONFIG_BR_EXT_BRNAME);
+ }
+ }
+ adapter->ethBrExtInfo.addPPPoETag = 1;
+
+ rcu_read_unlock();
+}
+
+int _netdev_open(struct net_device *pnetdev)
+{
+ uint status;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - dev_open\n"));
+ DBG_88E("+88eu_drv - drv_open, bup =%d\n", padapter->bup);
+
+ if (pwrctrlpriv->ps_flag) {
+ padapter->net_closed = false;
+ goto netdev_open_normal_process;
+ }
+
+ if (!padapter->bup) {
+ padapter->bDriverStopped = false;
+ padapter->bSurpriseRemoved = false;
+ padapter->bCardDisableWOHSM = false;
+
+ status = rtw_hal_init(padapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("rtl88eu_hal_init(): Can't init h/w!\n"));
+ goto netdev_open_error;
+ }
+
+ pr_info("MAC Address = %pM\n", pnetdev->dev_addr);
+
+ status = rtw_start_drv_threads(padapter);
+ if (status == _FAIL) {
+ pr_info("Initialize driver software resource Failed!\n");
+ goto netdev_open_error;
+ }
+
+ if (init_hw_mlme_ext(padapter) == _FAIL) {
+ pr_info("can't init mlme_ext_priv\n");
+ goto netdev_open_error;
+ }
+ if (padapter->intf_start)
+ padapter->intf_start(padapter);
+ rtw_proc_init_one(pnetdev);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ padapter->bup = true;
+ }
+ padapter->net_closed = false;
+
+ _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
+
+ padapter->pwrctrlpriv.bips_processing = false;
+ rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
+
+ if (!rtw_netif_queue_stopped(pnetdev))
+ rtw_netif_start_queue(pnetdev);
+ else
+ rtw_netif_wake_queue(pnetdev);
+
+ netdev_br_init(pnetdev);
+
+netdev_open_normal_process:
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - dev_open\n"));
+ DBG_88E("-88eu_drv - drv_open, bup =%d\n", padapter->bup);
+ return 0;
+
+netdev_open_error:
+ padapter->bup = false;
+ netif_carrier_off(pnetdev);
+ rtw_netif_stop_queue(pnetdev);
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("-88eu_drv - dev_open, fail!\n"));
+ DBG_88E("-88eu_drv - drv_open fail, bup =%d\n", padapter->bup);
+ return -1;
+}
+
+int netdev_open(struct net_device *pnetdev)
+{
+ int ret;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+
+ _enter_critical_mutex(padapter->hw_init_mutex, NULL);
+ ret = _netdev_open(pnetdev);
+ _exit_critical_mutex(padapter->hw_init_mutex, NULL);
+ return ret;
+}
+
+static int ips_netdrv_open(struct adapter *padapter)
+{
+ int status = _SUCCESS;
+ padapter->net_closed = false;
+ DBG_88E("===> %s.........\n", __func__);
+
+ padapter->bDriverStopped = false;
+ padapter->bSurpriseRemoved = false;
+ padapter->bCardDisableWOHSM = false;
+
+ status = rtw_hal_init(padapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("ips_netdrv_open(): Can't init h/w!\n"));
+ goto netdev_open_error;
+ }
+
+ if (padapter->intf_start)
+ padapter->intf_start(padapter);
+
+ rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
+ _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 5000);
+
+ return _SUCCESS;
+
+netdev_open_error:
+ DBG_88E("-ips_netdrv_open - drv_open failure, bup =%d\n", padapter->bup);
+
+ return _FAIL;
+}
+
+
+int rtw_ips_pwr_up(struct adapter *padapter)
+{
+ int result;
+ u32 start_time = rtw_get_current_time();
+ DBG_88E("===> rtw_ips_pwr_up..............\n");
+ rtw_reset_drv_sw(padapter);
+
+ result = ips_netdrv_open(padapter);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ DBG_88E("<=== rtw_ips_pwr_up.............. in %dms\n", rtw_get_passing_time_ms(start_time));
+ return result;
+}
+
+void rtw_ips_pwr_down(struct adapter *padapter)
+{
+ u32 start_time = rtw_get_current_time();
+ DBG_88E("===> rtw_ips_pwr_down...................\n");
+
+ padapter->bCardDisableWOHSM = true;
+ padapter->net_closed = true;
+
+ rtw_led_control(padapter, LED_CTL_POWER_OFF);
+
+ rtw_ips_dev_unload(padapter);
+ padapter->bCardDisableWOHSM = false;
+ DBG_88E("<=== rtw_ips_pwr_down..................... in %dms\n", rtw_get_passing_time_ms(start_time));
+}
+
+void rtw_ips_dev_unload(struct adapter *padapter)
+{
+ DBG_88E("====> %s...\n", __func__);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
+
+ if (padapter->intf_stop)
+ padapter->intf_stop(padapter);
+
+ /* s5. */
+ if (!padapter->bSurpriseRemoved)
+ rtw_hal_deinit(padapter);
+}
+
+int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
+{
+ int status;
+
+ if (bnormal)
+ status = netdev_open(pnetdev);
+ else
+ status = (_SUCCESS == ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev))) ? (0) : (-1);
+ return status;
+}
+
+int netdev_close(struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n"));
+
+ if (padapter->pwrctrlpriv.bInternalAutoSuspend) {
+ if (padapter->pwrctrlpriv.rf_pwrstate == rf_off)
+ padapter->pwrctrlpriv.ps_flag = true;
+ }
+ padapter->net_closed = true;
+
+ if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) {
+ DBG_88E("(2)88eu_drv - drv_close, bup =%d, hw_init_completed =%d\n",
+ padapter->bup, padapter->hw_init_completed);
+
+ /* s1. */
+ if (pnetdev) {
+ if (!rtw_netif_queue_stopped(pnetdev))
+ rtw_netif_stop_queue(pnetdev);
+ }
+
+ /* s2. */
+ LeaveAllPowerSaveMode(padapter);
+ rtw_disassoc_cmd(padapter, 500, false);
+ /* s2-2. indicate disconnect to os */
+ rtw_indicate_disconnect(padapter);
+ /* s2-3. */
+ rtw_free_assoc_resources(padapter, 1);
+ /* s2-4. */
+ rtw_free_network_queue(padapter, true);
+ /* Close LED */
+ rtw_led_control(padapter, LED_CTL_POWER_OFF);
+ }
+
+ nat25_db_cleanup(padapter);
+
+#ifdef CONFIG_88EU_P2P
+ rtw_p2p_enable(padapter, P2P_ROLE_DISABLE);
+#endif /* CONFIG_88EU_P2P */
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
+ DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup);
+ return 0;
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
new file mode 100644
index 0000000..4e0bfb7
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -0,0 +1,815 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#define _OSDEP_SERVICE_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <linux/vmalloc.h>
+#include <rtw_ioctl_set.h>
+
+/*
+* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
+* @return: one of RTW_STATUS_CODE
+*/
+inline int RTW_STATUS_CODE(int error_code)
+{
+ if (error_code >= 0)
+ return _SUCCESS;
+ return _FAIL;
+}
+
+u32 rtw_atoi(u8 *s)
+{
+ int num = 0, flag = 0;
+ int i;
+ for (i = 0; i <= strlen(s); i++) {
+ if (s[i] >= '0' && s[i] <= '9')
+ num = num * 10 + s[i] - '0';
+ else if (s[0] == '-' && i == 0)
+ flag = 1;
+ else
+ break;
+ }
+ if (flag == 1)
+ num = num * -1;
+ return num;
+}
+
+inline u8 *_rtw_vmalloc(u32 sz)
+{
+ u8 *pbuf;
+ pbuf = vmalloc(sz);
+ return pbuf;
+}
+
+inline u8 *_rtw_zvmalloc(u32 sz)
+{
+ u8 *pbuf;
+ pbuf = _rtw_vmalloc(sz);
+ if (pbuf != NULL)
+ memset(pbuf, 0, sz);
+ return pbuf;
+}
+
+inline void _rtw_vmfree(u8 *pbuf, u32 sz)
+{
+ vfree(pbuf);
+}
+
+u8 *_rtw_malloc(u32 sz)
+{
+ u8 *pbuf = NULL;
+
+ pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+ return pbuf;
+}
+
+u8 *_rtw_zmalloc(u32 sz)
+{
+ u8 *pbuf = _rtw_malloc(sz);
+
+ if (pbuf != NULL)
+ memset(pbuf, 0, sz);
+ return pbuf;
+}
+
+void *rtw_malloc2d(int h, int w, int size)
+{
+ int j;
+
+ void **a = (void **)rtw_zmalloc(h*sizeof(void *) + h*w*size);
+ if (a == NULL) {
+ pr_info("%s: alloc memory fail!\n", __func__);
+ return NULL;
+ }
+
+ for (j = 0; j < h; j++)
+ a[j] = ((char *)(a+h)) + j*w*size;
+
+ return a;
+}
+
+void rtw_mfree2d(void *pbuf, int h, int w, int size)
+{
+ kfree(pbuf);
+}
+
+int _rtw_memcmp(void *dst, void *src, u32 sz)
+{
+/* under Linux/GNU/GLibc, the return value of memcmp for two same
+ * mem. chunk is 0 */
+ if (!(memcmp(dst, src, sz)))
+ return true;
+ else
+ return false;
+}
+
+void _rtw_memset(void *pbuf, int c, u32 sz)
+{
+ memset(pbuf, c, sz);
+}
+
+void _rtw_init_listhead(struct list_head *list)
+{
+ INIT_LIST_HEAD(list);
+}
+
+/*
+For the following list_xxx operations,
+caller must guarantee the atomic context.
+Otherwise, there will be racing condition.
+*/
+u32 rtw_is_list_empty(struct list_head *phead)
+{
+ if (list_empty(phead))
+ return true;
+ else
+ return false;
+}
+
+void rtw_list_insert_head(struct list_head *plist, struct list_head *phead)
+{
+ list_add(plist, phead);
+}
+
+void rtw_list_insert_tail(struct list_head *plist, struct list_head *phead)
+{
+ list_add_tail(plist, phead);
+}
+
+/*
+Caller must check if the list is empty before calling rtw_list_delete
+*/
+
+void _rtw_init_sema(struct semaphore *sema, int init_val)
+{
+ sema_init(sema, init_val);
+}
+
+void _rtw_free_sema(struct semaphore *sema)
+{
+}
+
+void _rtw_up_sema(struct semaphore *sema)
+{
+ up(sema);
+}
+
+u32 _rtw_down_sema(struct semaphore *sema)
+{
+ if (down_interruptible(sema))
+ return _FAIL;
+ else
+ return _SUCCESS;
+}
+
+void _rtw_mutex_init(struct mutex *pmutex)
+{
+ mutex_init(pmutex);
+}
+
+void _rtw_mutex_free(struct mutex *pmutex)
+{
+ mutex_destroy(pmutex);
+}
+
+void _rtw_spinlock_init(spinlock_t *plock)
+{
+ spin_lock_init(plock);
+}
+
+void _rtw_spinlock_free(spinlock_t *plock)
+{
+}
+
+void _rtw_init_queue(struct __queue *pqueue)
+{
+ _rtw_init_listhead(&(pqueue->queue));
+ _rtw_spinlock_init(&(pqueue->lock));
+}
+
+u32 _rtw_queue_empty(struct __queue *pqueue)
+{
+ return rtw_is_list_empty(&(pqueue->queue));
+}
+
+u32 rtw_end_of_queue_search(struct list_head *head, struct list_head *plist)
+{
+ if (head == plist)
+ return true;
+ else
+ return false;
+}
+
+u32 rtw_get_current_time(void)
+{
+ return jiffies;
+}
+
+inline u32 rtw_systime_to_ms(u32 systime)
+{
+ return systime * 1000 / HZ;
+}
+
+inline u32 rtw_ms_to_systime(u32 ms)
+{
+ return ms * HZ / 1000;
+}
+
+/* the input parameter start use the same unit as returned by
+ * rtw_get_current_time */
+inline s32 rtw_get_passing_time_ms(u32 start)
+{
+ return rtw_systime_to_ms(jiffies-start);
+}
+
+inline s32 rtw_get_time_interval_ms(u32 start, u32 end)
+{
+ return rtw_systime_to_ms(end-start);
+}
+
+void rtw_sleep_schedulable(int ms)
+{
+ u32 delta;
+
+ delta = (ms * HZ)/1000;/* ms) */
+ if (delta == 0)
+ delta = 1;/* 1 ms */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(delta) != 0)
+ return;
+}
+
+void rtw_msleep_os(int ms)
+{
+ msleep((unsigned int)ms);
+}
+
+void rtw_usleep_os(int us)
+{
+ if (1 < (us/1000))
+ msleep(1);
+ else
+ msleep((us/1000) + 1);
+}
+
+void rtw_mdelay_os(int ms)
+{
+ mdelay((unsigned long)ms);
+}
+
+void rtw_udelay_os(int us)
+{
+ udelay((unsigned long)us);
+}
+
+void rtw_yield_os(void)
+{
+ yield();
+}
+
+#define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
+
+inline void rtw_suspend_lock_init(void)
+{
+}
+
+inline void rtw_suspend_lock_uninit(void)
+{
+}
+
+inline void rtw_lock_suspend(void)
+{
+}
+
+inline void rtw_unlock_suspend(void)
+{
+}
+
+inline void ATOMIC_SET(ATOMIC_T *v, int i)
+{
+ atomic_set(v, i);
+}
+
+inline int ATOMIC_READ(ATOMIC_T *v)
+{
+ return atomic_read(v);
+}
+
+inline void ATOMIC_ADD(ATOMIC_T *v, int i)
+{
+ atomic_add(i, v);
+}
+
+inline void ATOMIC_SUB(ATOMIC_T *v, int i)
+{
+ atomic_sub(i, v);
+}
+
+inline void ATOMIC_INC(ATOMIC_T *v)
+{
+ atomic_inc(v);
+}
+
+inline void ATOMIC_DEC(ATOMIC_T *v)
+{
+ atomic_dec(v);
+}
+
+inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
+{
+ return atomic_add_return(i, v);
+}
+
+inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
+{
+ return atomic_sub_return(i, v);
+}
+
+inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
+{
+ return atomic_inc_return(v);
+}
+
+inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
+{
+ return atomic_dec_return(v);
+}
+
+/* Open a file with the specific @param path, @param flag, @param mode
+ * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
+ * @param path the path of the file to open
+ * @param flag file operation flags, please refer to linux document
+ * @param mode please refer to linux document
+ * @return Linux specific error code
+ */
+static int openfile(struct file **fpp, char *path, int flag, int mode)
+{
+ struct file *fp;
+
+ fp = filp_open(path, flag, mode);
+ if (IS_ERR(fp)) {
+ *fpp = NULL;
+ return PTR_ERR(fp);
+ } else {
+ *fpp = fp;
+ return 0;
+ }
+}
+
+/* Close the file with the specific @param fp
+ * @param fp the pointer of struct file to close
+ * @return always 0
+ */
+static int closefile(struct file *fp)
+{
+ filp_close(fp, NULL);
+ return 0;
+}
+
+static int readfile(struct file *fp, char __user *buf, int len)
+{
+ int rlen = 0, sum = 0;
+
+ if (!fp->f_op || !fp->f_op->read)
+ return -EPERM;
+
+ while (sum < len) {
+ rlen = fp->f_op->read(fp, buf+sum, len-sum, &fp->f_pos);
+ if (rlen > 0)
+ sum += rlen;
+ else if (0 != rlen)
+ return rlen;
+ else
+ break;
+ }
+ return sum;
+}
+
+static int writefile(struct file *fp, char __user *buf, int len)
+{
+ int wlen = 0, sum = 0;
+
+ if (!fp->f_op || !fp->f_op->write)
+ return -EPERM;
+
+ while (sum < len) {
+ wlen = fp->f_op->write(fp, buf+sum, len-sum, &fp->f_pos);
+ if (wlen > 0)
+ sum += wlen;
+ else if (0 != wlen)
+ return wlen;
+ else
+ break;
+ }
+ return sum;
+}
+
+/* Test if the specifi @param path is a file and readable
+ * @param path the path of the file to test
+ * @return Linux specific error code
+ */
+static int isfilereadable(char *path)
+{
+ struct file *fp;
+ int ret = 0;
+ mm_segment_t oldfs;
+ char __user buf;
+
+ fp = filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ ret = PTR_ERR(fp);
+ } else {
+ oldfs = get_fs(); set_fs(get_ds());
+
+ if (1 != readfile(fp, &buf, 1))
+ ret = PTR_ERR(fp);
+
+ set_fs(oldfs);
+ filp_close(fp, NULL);
+ }
+ return ret;
+}
+
+/* Open the file with @param path and retrive the file content into
+ * memory starting from @param buf for @param sz at most
+ * @param path the path of the file to open and read
+ * @param buf the starting address of the buffer to store file content
+ * @param sz how many bytes to read at most
+ * @return the byte we've read, or Linux specific error code
+ */
+static int retrievefromfile(char *path, u8 __user *buf, u32 sz)
+{
+ int ret = -1;
+ mm_segment_t oldfs;
+ struct file *fp;
+
+ if (path && buf) {
+ ret = openfile(&fp, path, O_RDONLY, 0);
+ if (0 == ret) {
+ DBG_88E("%s openfile path:%s fp =%p\n", __func__,
+ path, fp);
+
+ oldfs = get_fs(); set_fs(get_ds());
+ ret = readfile(fp, buf, sz);
+ set_fs(oldfs);
+ closefile(fp);
+
+ DBG_88E("%s readfile, ret:%d\n", __func__, ret);
+
+ } else {
+ DBG_88E("%s openfile path:%s Fail, ret:%d\n", __func__,
+ path, ret);
+ }
+ } else {
+ DBG_88E("%s NULL pointer\n", __func__);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+* Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
+* @param path the path of the file to open and write
+* @param buf the starting address of the data to write into file
+* @param sz how many bytes to write at most
+* @return the byte we've written, or Linux specific error code
+*/
+static int storetofile(char *path, u8 __user *buf, u32 sz)
+{
+ int ret = 0;
+ mm_segment_t oldfs;
+ struct file *fp;
+
+ if (path && buf) {
+ ret = openfile(&fp, path, O_CREAT|O_WRONLY, 0666);
+ if (0 == ret) {
+ DBG_88E("%s openfile path:%s fp =%p\n", __func__, path, fp);
+
+ oldfs = get_fs(); set_fs(get_ds());
+ ret = writefile(fp, buf, sz);
+ set_fs(oldfs);
+ closefile(fp);
+
+ DBG_88E("%s writefile, ret:%d\n", __func__, ret);
+
+ } else {
+ DBG_88E("%s openfile path:%s Fail, ret:%d\n", __func__, path, ret);
+ }
+ } else {
+ DBG_88E("%s NULL pointer\n", __func__);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+* Test if the specifi @param path is a file and readable
+* @param path the path of the file to test
+* @return true or false
+*/
+int rtw_is_file_readable(char *path)
+{
+ if (isfilereadable(path) == 0)
+ return true;
+ else
+ return false;
+}
+
+/*
+* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
+* @param path the path of the file to open and read
+* @param buf the starting address of the buffer to store file content
+* @param sz how many bytes to read at most
+* @return the byte we've read
+*/
+int rtw_retrive_from_file(char *path, u8 __user *buf, u32 sz)
+{
+ int ret = retrievefromfile(path, buf, sz);
+
+ return ret >= 0 ? ret : 0;
+}
+
+/*
+ * Open the file with @param path and wirte @param sz byte of data
+ * starting from @param buf into the file
+ * @param path the path of the file to open and write
+ * @param buf the starting address of the data to write into file
+ * @param sz how many bytes to write at most
+ * @return the byte we've written
+ */
+int rtw_store_to_file(char *path, u8 __user *buf, u32 sz)
+{
+ int ret = storetofile(path, buf, sz);
+ return ret >= 0 ? ret : 0;
+}
+
+struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
+ void *old_priv)
+{
+ struct net_device *pnetdev;
+ struct rtw_netdev_priv_indicator *pnpi;
+
+ pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
+ if (!pnetdev)
+ goto RETURN;
+
+ pnpi = netdev_priv(pnetdev);
+ pnpi->priv = old_priv;
+ pnpi->sizeof_priv = sizeof_priv;
+
+RETURN:
+ return pnetdev;
+}
+
+struct net_device *rtw_alloc_etherdev(int sizeof_priv)
+{
+ struct net_device *pnetdev;
+ struct rtw_netdev_priv_indicator *pnpi;
+
+ pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
+ if (!pnetdev)
+ goto RETURN;
+
+ pnpi = netdev_priv(pnetdev);
+
+ pnpi->priv = rtw_zvmalloc(sizeof_priv);
+ if (!pnpi->priv) {
+ free_netdev(pnetdev);
+ pnetdev = NULL;
+ goto RETURN;
+ }
+
+ pnpi->sizeof_priv = sizeof_priv;
+RETURN:
+ return pnetdev;
+}
+
+void rtw_free_netdev(struct net_device *netdev)
+{
+ struct rtw_netdev_priv_indicator *pnpi;
+
+ if (!netdev)
+ goto RETURN;
+
+ pnpi = netdev_priv(netdev);
+
+ if (!pnpi->priv)
+ goto RETURN;
+
+ rtw_vmfree(pnpi->priv, pnpi->sizeof_priv);
+ free_netdev(netdev);
+
+RETURN:
+ return;
+}
+
+int rtw_change_ifname(struct adapter *padapter, const char *ifname)
+{
+ struct net_device *pnetdev;
+ struct net_device *cur_pnetdev = padapter->pnetdev;
+ struct rereg_nd_name_data *rereg_priv;
+ int ret;
+
+ if (!padapter)
+ goto error;
+
+ rereg_priv = &padapter->rereg_nd_name_priv;
+
+ /* free the old_pnetdev */
+ if (rereg_priv->old_pnetdev) {
+ free_netdev(rereg_priv->old_pnetdev);
+ rereg_priv->old_pnetdev = NULL;
+ }
+
+ if (!rtnl_is_locked())
+ unregister_netdev(cur_pnetdev);
+ else
+ unregister_netdevice(cur_pnetdev);
+
+ rtw_proc_remove_one(cur_pnetdev);
+
+ rereg_priv->old_pnetdev = cur_pnetdev;
+
+ pnetdev = rtw_init_netdev(padapter);
+ if (!pnetdev) {
+ ret = -1;
+ goto error;
+ }
+
+ SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter)));
+
+ rtw_init_netdev_name(pnetdev, ifname);
+
+ memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+
+ if (!rtnl_is_locked())
+ ret = register_netdev(pnetdev);
+ else
+ ret = register_netdevice(pnetdev);
+ if (ret != 0) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("register_netdev() failed\n"));
+ goto error;
+ }
+ rtw_proc_init_one(pnetdev);
+ return 0;
+error:
+ return -1;
+}
+
+u64 rtw_modular64(u64 x, u64 y)
+{
+ return do_div(x, y);
+}
+
+u64 rtw_division64(u64 x, u64 y)
+{
+ do_div(x, y);
+ return x;
+}
+
+void rtw_buf_free(u8 **buf, u32 *buf_len)
+{
+ *buf_len = 0;
+ kfree(*buf);
+ *buf = NULL;
+}
+
+void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len)
+{
+ u32 ori_len = 0, dup_len = 0;
+ u8 *ori = NULL;
+ u8 *dup = NULL;
+
+ if (!buf || !buf_len)
+ return;
+
+ if (!src || !src_len)
+ goto keep_ori;
+
+ /* duplicate src */
+ dup = rtw_malloc(src_len);
+ if (dup) {
+ dup_len = src_len;
+ memcpy(dup, src, dup_len);
+ }
+
+keep_ori:
+ ori = *buf;
+ ori_len = *buf_len;
+
+ /* replace buf with dup */
+ *buf_len = 0;
+ *buf = dup;
+ *buf_len = dup_len;
+
+ /* free ori */
+ kfree(ori);
+}
+
+
+/**
+ * rtw_cbuf_full - test if cbuf is full
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Returns: true if cbuf is full
+ */
+inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
+{
+ return (cbuf->write == cbuf->read-1) ? true : false;
+}
+
+/**
+ * rtw_cbuf_empty - test if cbuf is empty
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Returns: true if cbuf is empty
+ */
+inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
+{
+ return (cbuf->write == cbuf->read) ? true : false;
+}
+
+/**
+ * rtw_cbuf_push - push a pointer into cbuf
+ * @cbuf: pointer of struct rtw_cbuf
+ * @buf: pointer to push in
+ *
+ * Lock free operation, be careful of the use scheme
+ * Returns: true push success
+ */
+bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
+{
+ if (rtw_cbuf_full(cbuf))
+ return _FAIL;
+
+ if (0)
+ DBG_88E("%s on %u\n", __func__, cbuf->write);
+ cbuf->bufs[cbuf->write] = buf;
+ cbuf->write = (cbuf->write+1)%cbuf->size;
+
+ return _SUCCESS;
+}
+
+/**
+ * rtw_cbuf_pop - pop a pointer from cbuf
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Lock free operation, be careful of the use scheme
+ * Returns: pointer popped out
+ */
+void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
+{
+ void *buf;
+ if (rtw_cbuf_empty(cbuf))
+ return NULL;
+
+ if (0)
+ DBG_88E("%s on %u\n", __func__, cbuf->read);
+ buf = cbuf->bufs[cbuf->read];
+ cbuf->read = (cbuf->read+1)%cbuf->size;
+
+ return buf;
+}
+
+/**
+ * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
+ * @size: size of pointer
+ *
+ * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
+ */
+struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
+{
+ struct rtw_cbuf *cbuf;
+
+ cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) +
+ sizeof(void *)*size);
+
+ if (cbuf) {
+ cbuf->write = 0;
+ cbuf->read = 0;
+ cbuf->size = size;
+ }
+ return cbuf;
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
new file mode 100644
index 0000000..e2f4e7d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -0,0 +1,261 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RECV_OSDEP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <wifi.h>
+#include <recv_osdep.h>
+
+#include <osdep_intf.h>
+#include <ethernet.h>
+#include <usb_ops.h>
+
+/* init os related resource in struct recv_priv */
+int rtw_os_recv_resource_init(struct recv_priv *precvpriv,
+ struct adapter *padapter)
+{
+ return _SUCCESS;
+}
+
+/* alloc os related resource in union recv_frame */
+int rtw_os_recv_resource_alloc(struct adapter *padapter,
+ union recv_frame *precvframe)
+{
+ precvframe->u.hdr.pkt_newalloc = NULL;
+ precvframe->u.hdr.pkt = NULL;
+ return _SUCCESS;
+}
+
+/* free os related resource in union recv_frame */
+void rtw_os_recv_resource_free(struct recv_priv *precvpriv)
+{
+}
+
+/* alloc os related resource in struct recv_buf */
+int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
+ struct recv_buf *precvbuf)
+{
+ int res = _SUCCESS;
+
+ precvbuf->irp_pending = false;
+ precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
+ if (precvbuf->purb == NULL)
+ res = _FAIL;
+ precvbuf->pskb = NULL;
+ precvbuf->reuse = false;
+ precvbuf->pallocated_buf = NULL;
+ precvbuf->pbuf = NULL;
+ precvbuf->pdata = NULL;
+ precvbuf->phead = NULL;
+ precvbuf->ptail = NULL;
+ precvbuf->pend = NULL;
+ precvbuf->transfer_len = 0;
+ precvbuf->len = 0;
+ return res;
+}
+
+/* free os related resource in struct recv_buf */
+int rtw_os_recvbuf_resource_free(struct adapter *padapter,
+ struct recv_buf *precvbuf)
+{
+ if (precvbuf->purb)
+ usb_free_urb(precvbuf->purb);
+ return _SUCCESS;
+}
+
+void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
+{
+ union iwreq_data wrqu;
+ struct iw_michaelmicfailure ev;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 cur_time = 0;
+
+ if (psecuritypriv->last_mic_err_time == 0) {
+ psecuritypriv->last_mic_err_time = rtw_get_current_time();
+ } else {
+ cur_time = rtw_get_current_time();
+
+ if (cur_time - psecuritypriv->last_mic_err_time < 60*HZ) {
+ psecuritypriv->btkip_countermeasure = true;
+ psecuritypriv->last_mic_err_time = 0;
+ psecuritypriv->btkip_countermeasure_time = cur_time;
+ } else {
+ psecuritypriv->last_mic_err_time = rtw_get_current_time();
+ }
+ }
+
+ _rtw_memset(&ev, 0x00, sizeof(ev));
+ if (bgroup)
+ ev.flags |= IW_MICFAILURE_GROUP;
+ else
+ ev.flags |= IW_MICFAILURE_PAIRWISE;
+
+ ev.src_addr.sa_family = ARPHRD_ETHER;
+ memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN);
+ _rtw_memset(&wrqu, 0x00, sizeof(wrqu));
+ wrqu.data.length = sizeof(ev);
+ wireless_send_event(padapter->pnetdev, IWEVMICHAELMICFAILURE,
+ &wrqu, (char *)&ev);
+}
+
+void rtw_hostapd_mlme_rx(struct adapter *padapter,
+ union recv_frame *precv_frame)
+{
+}
+
+int rtw_recv_indicatepkt(struct adapter *padapter,
+ union recv_frame *precv_frame)
+{
+ struct recv_priv *precvpriv;
+ struct __queue *pfree_recv_queue;
+ struct sk_buff *skb;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ precvpriv = &(padapter->recvpriv);
+ pfree_recv_queue = &(precvpriv->free_recv_queue);
+
+ skb = precv_frame->u.hdr.pkt;
+ if (skb == NULL) {
+ RT_TRACE(_module_recv_osdep_c_, _drv_err_,
+ ("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n"));
+ goto _recv_indicatepkt_drop;
+ }
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("rtw_recv_indicatepkt():skb != NULL !!!\n"));
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("rtw_recv_indicatepkt():precv_frame->u.hdr.rx_head =%p precv_frame->hdr.rx_data =%p\n",
+ precv_frame->u.hdr.rx_head, precv_frame->u.hdr.rx_data));
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("precv_frame->hdr.rx_tail =%p precv_frame->u.hdr.rx_end =%p precv_frame->hdr.len =%d\n",
+ precv_frame->u.hdr.rx_tail, precv_frame->u.hdr.rx_end,
+ precv_frame->u.hdr.len));
+
+ skb->data = precv_frame->u.hdr.rx_data;
+
+ skb_set_tail_pointer(skb, precv_frame->u.hdr.len);
+
+ skb->len = precv_frame->u.hdr.len;
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->len));
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ struct sk_buff *pskb2 = NULL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ int bmcast = IS_MCAST(pattrib->dst);
+
+ if (!_rtw_memcmp(pattrib->dst, myid(&padapter->eeprompriv),
+ ETH_ALEN)) {
+ if (bmcast) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ pskb2 = skb_clone(skb, GFP_ATOMIC);
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->dst);
+ }
+
+ if (psta) {
+ struct net_device *pnetdev;
+
+ pnetdev = (struct net_device *)padapter->pnetdev;
+ skb->dev = pnetdev;
+ skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
+
+ rtw_xmit_entry(skb, pnetdev);
+
+ if (bmcast)
+ skb = pskb2;
+ else
+ goto _recv_indicatepkt_end;
+ }
+ }
+ }
+
+ rcu_read_lock();
+ rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->dev = padapter->pnetdev;
+ skb->protocol = eth_type_trans(skb, padapter->pnetdev);
+
+ netif_rx(skb);
+
+_recv_indicatepkt_end:
+
+ /* pointers to NULL before rtw_free_recvframe() */
+ precv_frame->u.hdr.pkt = NULL;
+
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("\n rtw_recv_indicatepkt :after netif_rx!!!!\n"));
+
+_func_exit_;
+
+ return _SUCCESS;
+
+_recv_indicatepkt_drop:
+
+ /* enqueue back to free_recv_queue */
+ if (precv_frame)
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+_func_exit_;
+ return _FAIL;
+}
+
+void rtw_os_read_port(struct adapter *padapter, struct recv_buf *precvbuf)
+{
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ precvbuf->ref_cnt--;
+ /* free skb in recv_buf */
+ dev_kfree_skb_any(precvbuf->pskb);
+ precvbuf->pskb = NULL;
+ precvbuf->reuse = false;
+ if (!precvbuf->irp_pending)
+ rtw_read_port(padapter, precvpriv->ff_hwaddr, 0,
+ (unsigned char *)precvbuf);
+}
+
+static void _rtw_reordering_ctrl_timeout_handler(void *func_context)
+{
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ preorder_ctrl = (struct recv_reorder_ctrl *)func_context;
+ rtw_reordering_ctrl_timeout_handler(preorder_ctrl);
+}
+
+void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
+{
+ struct adapter *padapter = preorder_ctrl->padapter;
+
+ _init_timer(&(preorder_ctrl->reordering_ctrl_timer), padapter->pnetdev, _rtw_reordering_ctrl_timeout_handler, preorder_ctrl);
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
new file mode 100644
index 0000000..6cf71cc
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -0,0 +1,293 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <rtw_android.h>
+#include <osdep_service.h>
+#include <rtw_debug.h>
+#include <ioctl_cfg80211.h>
+#include <rtw_ioctl_set.h>
+
+static const char *android_wifi_cmd_str[ANDROID_WIFI_CMD_MAX] = {
+ "START",
+ "STOP",
+ "SCAN-ACTIVE",
+ "SCAN-PASSIVE",
+ "RSSI",
+ "LINKSPEED",
+ "RXFILTER-START",
+ "RXFILTER-STOP",
+ "RXFILTER-ADD",
+ "RXFILTER-REMOVE",
+ "BTCOEXSCAN-START",
+ "BTCOEXSCAN-STOP",
+ "BTCOEXMODE",
+ "SETSUSPENDOPT",
+ "P2P_DEV_ADDR",
+ "SETFWPATH",
+ "SETBAND",
+ "GETBAND",
+ "COUNTRY",
+ "P2P_SET_NOA",
+ "P2P_GET_NOA",
+ "P2P_SET_PS",
+ "SET_AP_WPS_P2P_IE",
+ "MACADDR",
+ "BLOCK",
+ "WFD-ENABLE",
+ "WFD-DISABLE",
+ "WFD-SET-TCPPORT",
+ "WFD-SET-MAXTPUT",
+ "WFD-SET-DEVTYPE",
+};
+
+struct android_wifi_priv_cmd {
+ const char __user *buf;
+ int used_len;
+ int total_len;
+};
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+static int g_wifi_on = true;
+
+int rtw_android_cmdstr_to_num(char *cmdstr)
+{
+ int cmd_num;
+ for (cmd_num = 0; cmd_num < ANDROID_WIFI_CMD_MAX; cmd_num++)
+ if (0 == strnicmp(cmdstr , android_wifi_cmd_str[cmd_num],
+ strlen(android_wifi_cmd_str[cmd_num])))
+ break;
+ return cmd_num;
+}
+
+static int rtw_android_get_rssi(struct net_device *net, char *command,
+ int total_len)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(net);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_network *pcur_network = &pmlmepriv->cur_network;
+ int bytes_written = 0;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ bytes_written += snprintf(&command[bytes_written], total_len,
+ "%s rssi %d",
+ pcur_network->network.Ssid.Ssid,
+ padapter->recvpriv.rssi);
+ }
+ return bytes_written;
+}
+
+static int rtw_android_get_link_speed(struct net_device *net, char *command,
+ int total_len)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(net);
+ int bytes_written;
+ u16 link_speed;
+
+ link_speed = rtw_get_cur_max_rate(padapter) / 10;
+ bytes_written = snprintf(command, total_len, "LinkSpeed %d",
+ link_speed);
+ return bytes_written;
+}
+
+static int rtw_android_get_macaddr(struct net_device *net, char *command,
+ int total_len)
+{
+ int bytes_written;
+
+ bytes_written = snprintf(command, total_len, "Macaddr = %pM",
+ net->dev_addr);
+ return bytes_written;
+}
+
+static int android_set_cntry(struct net_device *net, char *command,
+ int total_len)
+{
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(net);
+ char *country_code = command + strlen(android_wifi_cmd_str[ANDROID_WIFI_CMD_COUNTRY]) + 1;
+ int ret;
+
+ ret = rtw_set_country(adapter, country_code);
+ return (ret == _SUCCESS) ? 0 : -1;
+}
+
+static int android_get_p2p_addr(struct net_device *net, char *command,
+ int total_len)
+{
+ /* We use the same address as our HW MAC address */
+ memcpy(command, net->dev_addr, ETH_ALEN);
+ return ETH_ALEN;
+}
+
+static int rtw_android_set_block(struct net_device *net, char *command,
+ int total_len)
+{
+ return 0;
+}
+
+int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+ char *command = NULL;
+ int cmd_num;
+ int bytes_written = 0;
+ struct android_wifi_priv_cmd priv_cmd;
+
+ rtw_lock_suspend();
+ if (!ifr->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (copy_from_user(&priv_cmd, ifr->ifr_data,
+ sizeof(struct android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ command = kmalloc(priv_cmd.total_len, GFP_KERNEL);
+ if (!command) {
+ DBG_88E("%s: failed to allocate memory\n", __func__);
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (!access_ok(VERIFY_READ, priv_cmd.buf, priv_cmd.total_len)) {
+ DBG_88E("%s: failed to access memory\n", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+ if (copy_from_user(command, (char __user *)priv_cmd.buf,
+ priv_cmd.total_len)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ DBG_88E("%s: Android private cmd \"%s\" on %s\n",
+ __func__, command, ifr->ifr_name);
+ cmd_num = rtw_android_cmdstr_to_num(command);
+ switch (cmd_num) {
+ case ANDROID_WIFI_CMD_START:
+ goto response;
+ case ANDROID_WIFI_CMD_SETFWPATH:
+ goto response;
+ }
+ if (!g_wifi_on) {
+ DBG_88E("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+ __func__, command, ifr->ifr_name);
+ ret = 0;
+ goto exit;
+ }
+ switch (cmd_num) {
+ case ANDROID_WIFI_CMD_STOP:
+ break;
+ case ANDROID_WIFI_CMD_SCAN_ACTIVE:
+ break;
+ case ANDROID_WIFI_CMD_SCAN_PASSIVE:
+ break;
+ case ANDROID_WIFI_CMD_RSSI:
+ bytes_written = rtw_android_get_rssi(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_LINKSPEED:
+ bytes_written = rtw_android_get_link_speed(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_MACADDR:
+ bytes_written = rtw_android_get_macaddr(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_BLOCK:
+ bytes_written = rtw_android_set_block(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_RXFILTER_START:
+ break;
+ case ANDROID_WIFI_CMD_RXFILTER_STOP:
+ break;
+ case ANDROID_WIFI_CMD_RXFILTER_ADD:
+ break;
+ case ANDROID_WIFI_CMD_RXFILTER_REMOVE:
+ break;
+ case ANDROID_WIFI_CMD_BTCOEXSCAN_START:
+ /* TBD: BTCOEXSCAN-START */
+ break;
+ case ANDROID_WIFI_CMD_BTCOEXSCAN_STOP:
+ /* TBD: BTCOEXSCAN-STOP */
+ break;
+ case ANDROID_WIFI_CMD_BTCOEXMODE:
+ break;
+ case ANDROID_WIFI_CMD_SETSUSPENDOPT:
+ break;
+ case ANDROID_WIFI_CMD_SETBAND:
+ break;
+ case ANDROID_WIFI_CMD_GETBAND:
+ break;
+ case ANDROID_WIFI_CMD_COUNTRY:
+ bytes_written = android_set_cntry(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_P2P_DEV_ADDR:
+ bytes_written = android_get_p2p_addr(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_P2P_SET_NOA:
+ break;
+ case ANDROID_WIFI_CMD_P2P_GET_NOA:
+ break;
+ case ANDROID_WIFI_CMD_P2P_SET_PS:
+ break;
+ default:
+ DBG_88E("Unknown PRIVATE command %s - ignored\n", command);
+ snprintf(command, 3, "OK");
+ bytes_written = strlen("OK");
+ }
+
+response:
+ if (bytes_written >= 0) {
+ if ((bytes_written == 0) && (priv_cmd.total_len > 0))
+ command[0] = '\0';
+ if (bytes_written >= priv_cmd.total_len) {
+ DBG_88E("%s: bytes_written = %d\n", __func__,
+ bytes_written);
+ bytes_written = priv_cmd.total_len;
+ } else {
+ bytes_written++;
+ }
+ priv_cmd.used_len = bytes_written;
+ if (copy_to_user((char __user *)priv_cmd.buf, command,
+ bytes_written)) {
+ DBG_88E("%s: failed to copy data to user buffer\n",
+ __func__);
+ ret = -EFAULT;
+ }
+ } else {
+ ret = bytes_written;
+ }
+exit:
+ rtw_unlock_suspend();
+ kfree(command);
+ return ret;
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
new file mode 100644
index 0000000..9ca3180
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -0,0 +1,893 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _HCI_INTF_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+#include <hal_intf.h>
+#include <rtw_version.h>
+#include <linux/usb.h>
+#include <osdep_intf.h>
+
+#include <usb_vendor_req.h>
+#include <usb_ops.h>
+#include <usb_osintf.h>
+#include <usb_hal.h>
+#include <rtw_ioctl.h>
+
+int ui_pid[3] = {0, 0, 0};
+
+static int rtw_suspend(struct usb_interface *intf, pm_message_t message);
+static int rtw_resume(struct usb_interface *intf);
+
+
+static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid);
+static void rtw_dev_remove(struct usb_interface *pusb_intf);
+
+
+#define USB_VENDER_ID_REALTEK 0x0bda
+
+/* DID_USB_v916_20130116 */
+static struct usb_device_id rtw_usb_id_tbl[] = {
+ /*=== Realtek demoboard ===*/
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ /*=== Customer ID ===*/
+ /****** 8188EUS ********/
+ {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, rtw_usb_id_tbl);
+
+static struct specific_device_id specific_device_id_tbl[] = {
+ {} /* empty table for now */
+};
+
+struct rtw_usb_drv {
+ struct usb_driver usbdrv;
+ int drv_registered;
+ struct mutex hw_init_mutex;
+};
+
+static struct rtw_usb_drv rtl8188e_usb_drv = {
+ .usbdrv.name = (char *)"r8188eu",
+ .usbdrv.probe = rtw_drv_init,
+ .usbdrv.disconnect = rtw_dev_remove,
+ .usbdrv.id_table = rtw_usb_id_tbl,
+ .usbdrv.suspend = rtw_suspend,
+ .usbdrv.resume = rtw_resume,
+ .usbdrv.reset_resume = rtw_resume,
+};
+
+static struct rtw_usb_drv *usb_drv = &rtl8188e_usb_drv;
+
+static inline int RT_usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN;
+}
+
+static inline int RT_usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT;
+}
+
+static inline int RT_usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT;
+}
+
+static inline int RT_usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK;
+}
+
+static inline int RT_usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
+{
+ return RT_usb_endpoint_xfer_bulk(epd) && RT_usb_endpoint_dir_in(epd);
+}
+
+static inline int RT_usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
+{
+ return RT_usb_endpoint_xfer_bulk(epd) && RT_usb_endpoint_dir_out(epd);
+}
+
+static inline int usb_endpoint_is_int(const struct usb_endpoint_descriptor *epd)
+{
+ return RT_usb_endpoint_xfer_int(epd) && RT_usb_endpoint_dir_in(epd);
+}
+
+static inline int RT_usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
+{
+ return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+}
+
+static u8 rtw_init_intf_priv(struct dvobj_priv *dvobj)
+{
+ u8 rst = _SUCCESS;
+
+ _rtw_mutex_init(&dvobj->usb_vendor_req_mutex);
+
+ dvobj->usb_alloc_vendor_req_buf = rtw_zmalloc(MAX_USB_IO_CTL_SIZE);
+ if (dvobj->usb_alloc_vendor_req_buf == NULL) {
+ DBG_88E("alloc usb_vendor_req_buf failed... /n");
+ rst = _FAIL;
+ goto exit;
+ }
+ dvobj->usb_vendor_req_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(dvobj->usb_alloc_vendor_req_buf), ALIGNMENT_UNIT);
+exit:
+ return rst;
+}
+
+static u8 rtw_deinit_intf_priv(struct dvobj_priv *dvobj)
+{
+ u8 rst = _SUCCESS;
+
+ kfree(dvobj->usb_alloc_vendor_req_buf);
+ _rtw_mutex_free(&dvobj->usb_vendor_req_mutex);
+ return rst;
+}
+
+static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
+{
+ int i;
+ int status = _FAIL;
+ struct dvobj_priv *pdvobjpriv;
+ struct usb_host_config *phost_conf;
+ struct usb_config_descriptor *pconf_desc;
+ struct usb_host_interface *phost_iface;
+ struct usb_interface_descriptor *piface_desc;
+ struct usb_host_endpoint *phost_endp;
+ struct usb_endpoint_descriptor *pendp_desc;
+ struct usb_device *pusbd;
+
+_func_enter_;
+
+ pdvobjpriv = (struct dvobj_priv *)rtw_zmalloc(sizeof(*pdvobjpriv));
+ if (pdvobjpriv == NULL)
+ goto exit;
+
+ pdvobjpriv->pusbintf = usb_intf;
+ pusbd = interface_to_usbdev(usb_intf);
+ pdvobjpriv->pusbdev = pusbd;
+ usb_set_intfdata(usb_intf, pdvobjpriv);
+
+ pdvobjpriv->RtNumInPipes = 0;
+ pdvobjpriv->RtNumOutPipes = 0;
+
+ phost_conf = pusbd->actconfig;
+ pconf_desc = &phost_conf->desc;
+
+ phost_iface = &usb_intf->altsetting[0];
+ piface_desc = &phost_iface->desc;
+
+ pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
+ pdvobjpriv->InterfaceNumber = piface_desc->bInterfaceNumber;
+ pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
+
+ for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
+ phost_endp = phost_iface->endpoint + i;
+ if (phost_endp) {
+ pendp_desc = &phost_endp->desc;
+
+ DBG_88E("\nusb_endpoint_descriptor(%d):\n", i);
+ DBG_88E("bLength=%x\n", pendp_desc->bLength);
+ DBG_88E("bDescriptorType=%x\n",
+ pendp_desc->bDescriptorType);
+ DBG_88E("bEndpointAddress=%x\n",
+ pendp_desc->bEndpointAddress);
+ DBG_88E("wMaxPacketSize=%d\n",
+ le16_to_cpu(pendp_desc->wMaxPacketSize));
+ DBG_88E("bInterval=%x\n", pendp_desc->bInterval);
+
+ if (RT_usb_endpoint_is_bulk_in(pendp_desc)) {
+ DBG_88E("RT_usb_endpoint_is_bulk_in = %x\n",
+ RT_usb_endpoint_num(pendp_desc));
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = RT_usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumInPipes++;
+ } else if (usb_endpoint_is_int(pendp_desc)) {
+ DBG_88E("usb_endpoint_is_int = %x, Interval = %x\n",
+ RT_usb_endpoint_num(pendp_desc),
+ pendp_desc->bInterval);
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = RT_usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumInPipes++;
+ } else if (RT_usb_endpoint_is_bulk_out(pendp_desc)) {
+ DBG_88E("RT_usb_endpoint_is_bulk_out = %x\n",
+ RT_usb_endpoint_num(pendp_desc));
+ pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] = RT_usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumOutPipes++;
+ }
+ pdvobjpriv->ep_num[i] = RT_usb_endpoint_num(pendp_desc);
+ }
+ }
+
+ DBG_88E("nr_endpoint=%d, in_num=%d, out_num=%d\n\n",
+ pdvobjpriv->nr_endpoint, pdvobjpriv->RtNumInPipes,
+ pdvobjpriv->RtNumOutPipes);
+
+ if (pusbd->speed == USB_SPEED_HIGH) {
+ pdvobjpriv->ishighspeed = true;
+ DBG_88E("USB_SPEED_HIGH\n");
+ } else {
+ pdvobjpriv->ishighspeed = false;
+ DBG_88E("NON USB_SPEED_HIGH\n");
+ }
+
+ if (rtw_init_intf_priv(pdvobjpriv) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("\n Can't INIT rtw_init_intf_priv\n"));
+ goto free_dvobj;
+ }
+
+ /* 3 misc */
+ _rtw_init_sema(&(pdvobjpriv->usb_suspend_sema), 0);
+ rtw_reset_continual_urb_error(pdvobjpriv);
+
+ usb_get_dev(pusbd);
+
+ status = _SUCCESS;
+
+free_dvobj:
+ if (status != _SUCCESS && pdvobjpriv) {
+ usb_set_intfdata(usb_intf, NULL);
+ kfree(pdvobjpriv);
+ pdvobjpriv = NULL;
+ }
+exit:
+_func_exit_;
+ return pdvobjpriv;
+}
+
+static void usb_dvobj_deinit(struct usb_interface *usb_intf)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(usb_intf);
+
+_func_enter_;
+
+ usb_set_intfdata(usb_intf, NULL);
+ if (dvobj) {
+ /* Modify condition for 92DU DMDP 2010.11.18, by Thomas */
+ if ((dvobj->NumInterfaces != 2 &&
+ dvobj->NumInterfaces != 3) ||
+ (dvobj->InterfaceNumber == 1)) {
+ if (interface_to_usbdev(usb_intf)->state !=
+ USB_STATE_NOTATTACHED) {
+ /* If we didn't unplug usb dongle and
+ * remove/insert module, driver fails
+ * on sitesurvey for the first time when
+ * device is up . Reset usb port for sitesurvey
+ * fail issue. */
+ DBG_88E("usb attached..., try to reset usb device\n");
+ usb_reset_device(interface_to_usbdev(usb_intf));
+ }
+ }
+ rtw_deinit_intf_priv(dvobj);
+ kfree(dvobj);
+ }
+
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+_func_exit_;
+}
+
+static void chip_by_usb_id(struct adapter *padapter,
+ const struct usb_device_id *pdid)
+{
+ padapter->chip_type = NULL_CHIP_TYPE;
+ hal_set_hw_type(padapter);
+}
+
+static void usb_intf_start(struct adapter *padapter)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_start\n"));
+
+ rtw_hal_inirp_init(padapter);
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_start\n"));
+}
+
+static void usb_intf_stop(struct adapter *padapter)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n"));
+
+ /* disabel_hw_interrupt */
+ if (!padapter->bSurpriseRemoved) {
+ /* device still exists, so driver can do i/o operation */
+ /* TODO: */
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("SurpriseRemoved == false\n"));
+ }
+
+ /* cancel in irp */
+ rtw_hal_inirp_deinit(padapter);
+
+ /* cancel out irp */
+ rtw_write_port_cancel(padapter);
+
+ /* todo:cancel other irps */
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_stop\n"));
+}
+
+static void rtw_dev_unload(struct adapter *padapter)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_dev_unload\n"));
+
+ if (padapter->bup) {
+ DBG_88E("===> rtw_dev_unload\n");
+ padapter->bDriverStopped = true;
+ if (padapter->xmitpriv.ack_tx)
+ rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
+ /* s3. */
+ if (padapter->intf_stop)
+ padapter->intf_stop(padapter);
+ /* s4. */
+ if (!padapter->pwrctrlpriv.bInternalAutoSuspend)
+ rtw_stop_drv_threads(padapter);
+
+ /* s5. */
+ if (!padapter->bSurpriseRemoved) {
+ rtw_hal_deinit(padapter);
+ padapter->bSurpriseRemoved = true;
+ }
+
+ padapter->bup = false;
+ } else {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("r871x_dev_unload():padapter->bup == false\n"));
+ }
+
+ DBG_88E("<=== rtw_dev_unload\n");
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-rtw_dev_unload\n"));
+}
+
+static void process_spec_devid(const struct usb_device_id *pdid)
+{
+ u16 vid, pid;
+ u32 flags;
+ int i;
+ int num = sizeof(specific_device_id_tbl) /
+ sizeof(struct specific_device_id);
+
+ for (i = 0; i < num; i++) {
+ vid = specific_device_id_tbl[i].idVendor;
+ pid = specific_device_id_tbl[i].idProduct;
+ flags = specific_device_id_tbl[i].flags;
+
+ if ((pdid->idVendor == vid) && (pdid->idProduct == pid) &&
+ (flags&SPEC_DEV_ID_DISABLE_HT)) {
+ rtw_ht_enable = 0;
+ rtw_cbw40_enable = 0;
+ rtw_ampdu_enable = 0;
+ }
+ }
+}
+
+int rtw_hw_suspend(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ _func_enter_;
+
+ if ((!padapter->bup) || (padapter->bDriverStopped) ||
+ (padapter->bSurpriseRemoved)) {
+ DBG_88E("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
+ padapter->bup, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved);
+ goto error_exit;
+ }
+
+ if (padapter) { /* system suspend */
+ LeaveAllPowerSaveMode(padapter);
+
+ DBG_88E("==> rtw_hw_suspend\n");
+ _enter_pwrlock(&pwrpriv->lock);
+ pwrpriv->bips_processing = true;
+ /* s1. */
+ if (pnetdev) {
+ netif_carrier_off(pnetdev);
+ rtw_netif_stop_queue(pnetdev);
+ }
+
+ /* s2. */
+ rtw_disassoc_cmd(padapter, 500, false);
+
+ /* s2-2. indicate disconnect to os */
+ {
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ rtw_os_indicate_disconnect(padapter);
+
+ /* donnot enqueue cmd */
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 0);
+ }
+ }
+ /* s2-3. */
+ rtw_free_assoc_resources(padapter, 1);
+
+ /* s2-4. */
+ rtw_free_network_queue(padapter, true);
+ rtw_ips_dev_unload(padapter);
+ pwrpriv->rf_pwrstate = rf_off;
+ pwrpriv->bips_processing = false;
+
+ _exit_pwrlock(&pwrpriv->lock);
+ } else {
+ goto error_exit;
+ }
+ _func_exit_;
+ return 0;
+
+error_exit:
+ DBG_88E("%s, failed\n", __func__);
+ return -1;
+}
+
+int rtw_hw_resume(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ _func_enter_;
+
+ if (padapter) { /* system resume */
+ DBG_88E("==> rtw_hw_resume\n");
+ _enter_pwrlock(&pwrpriv->lock);
+ pwrpriv->bips_processing = true;
+ rtw_reset_drv_sw(padapter);
+
+ if (pm_netdev_open(pnetdev, false) != 0) {
+ _exit_pwrlock(&pwrpriv->lock);
+ goto error_exit;
+ }
+
+ netif_device_attach(pnetdev);
+ netif_carrier_on(pnetdev);
+
+ if (!netif_queue_stopped(pnetdev))
+ netif_start_queue(pnetdev);
+ else
+ netif_wake_queue(pnetdev);
+
+ pwrpriv->bkeepfwalive = false;
+ pwrpriv->brfoffbyhw = false;
+
+ pwrpriv->rf_pwrstate = rf_on;
+ pwrpriv->bips_processing = false;
+
+ _exit_pwrlock(&pwrpriv->lock);
+ } else {
+ goto error_exit;
+ }
+
+ _func_exit_;
+
+ return 0;
+error_exit:
+ DBG_88E("%s, Open net dev failed\n", __func__);
+ return -1;
+}
+
+static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
+ struct adapter *padapter = dvobj->if1;
+ struct net_device *pnetdev = padapter->pnetdev;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+ int ret = 0;
+ u32 start_time = rtw_get_current_time();
+
+ _func_enter_;
+
+ DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+
+ if ((!padapter->bup) || (padapter->bDriverStopped) ||
+ (padapter->bSurpriseRemoved)) {
+ DBG_88E("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
+ padapter->bup, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved);
+ goto exit;
+ }
+
+ pwrpriv->bInSuspend = true;
+ rtw_cancel_all_timer(padapter);
+ LeaveAllPowerSaveMode(padapter);
+
+ _enter_pwrlock(&pwrpriv->lock);
+ /* s1. */
+ if (pnetdev) {
+ netif_carrier_off(pnetdev);
+ rtw_netif_stop_queue(pnetdev);
+ }
+
+ /* s2. */
+ rtw_disassoc_cmd(padapter, 0, false);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
+ check_fwstate(pmlmepriv, _FW_LINKED)) {
+ DBG_88E("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
+ __func__, __LINE__,
+ pmlmepriv->cur_network.network.Ssid.Ssid,
+ pmlmepriv->cur_network.network.MacAddress,
+ pmlmepriv->cur_network.network.Ssid.SsidLength,
+ pmlmepriv->assoc_ssid.SsidLength);
+
+ pmlmepriv->to_roaming = 1;
+ }
+ /* s2-2. indicate disconnect to os */
+ rtw_indicate_disconnect(padapter);
+ /* s2-3. */
+ rtw_free_assoc_resources(padapter, 1);
+ /* s2-4. */
+ rtw_free_network_queue(padapter, true);
+
+ rtw_dev_unload(padapter);
+ _exit_pwrlock(&pwrpriv->lock);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
+ rtw_indicate_scan_done(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
+ rtw_indicate_disconnect(padapter);
+
+exit:
+ DBG_88E("<=== %s return %d.............. in %dms\n", __func__
+ , ret, rtw_get_passing_time_ms(start_time));
+
+ _func_exit_;
+ return ret;
+}
+
+static int rtw_resume(struct usb_interface *pusb_intf)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
+ struct adapter *padapter = dvobj->if1;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ int ret = 0;
+
+ if (pwrpriv->bInternalAutoSuspend)
+ ret = rtw_resume_process(padapter);
+ else
+ ret = rtw_resume_process(padapter);
+ return ret;
+}
+
+int rtw_resume_process(struct adapter *padapter)
+{
+ struct net_device *pnetdev;
+ struct pwrctrl_priv *pwrpriv = NULL;
+ int ret = -1;
+ u32 start_time = rtw_get_current_time();
+ _func_enter_;
+
+ DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+
+ if (padapter) {
+ pnetdev = padapter->pnetdev;
+ pwrpriv = &padapter->pwrctrlpriv;
+ } else {
+ goto exit;
+ }
+
+ _enter_pwrlock(&pwrpriv->lock);
+ rtw_reset_drv_sw(padapter);
+ if (pwrpriv)
+ pwrpriv->bkeepfwalive = false;
+
+ DBG_88E("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
+ if (pm_netdev_open(pnetdev, true) != 0)
+ goto exit;
+
+ netif_device_attach(pnetdev);
+ netif_carrier_on(pnetdev);
+
+ _exit_pwrlock(&pwrpriv->lock);
+
+ if (padapter->pid[1] != 0) {
+ DBG_88E("pid[1]:%d\n", padapter->pid[1]);
+ rtw_signal_process(padapter->pid[1], SIGUSR2);
+ }
+
+ rtw_roaming(padapter, NULL);
+
+ ret = 0;
+exit:
+ if (pwrpriv)
+ pwrpriv->bInSuspend = false;
+ DBG_88E("<=== %s return %d.............. in %dms\n", __func__,
+ ret, rtw_get_passing_time_ms(start_time));
+
+ _func_exit_;
+
+ return ret;
+}
+
+/*
+ * drv_init() - a device potentially for us
+ *
+ * notes: drv_init() is called when the bus driver has located
+ * a card for us to support.
+ * We accept the new device by returning 0.
+ */
+
+static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
+ struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
+{
+ struct adapter *padapter = NULL;
+ struct net_device *pnetdev = NULL;
+ int status = _FAIL;
+
+ padapter = (struct adapter *)rtw_zvmalloc(sizeof(*padapter));
+ if (padapter == NULL)
+ goto exit;
+ padapter->dvobj = dvobj;
+ dvobj->if1 = padapter;
+
+ padapter->bDriverStopped = true;
+
+ padapter->hw_init_mutex = &usb_drv->hw_init_mutex;
+
+ /* step 1-1., decide the chip_type via vid/pid */
+ padapter->interface_type = RTW_USB;
+ chip_by_usb_id(padapter, pdid);
+
+ if (rtw_handle_dualmac(padapter, 1) != _SUCCESS)
+ goto free_adapter;
+
+ pnetdev = rtw_init_netdev(padapter);
+ if (pnetdev == NULL)
+ goto handle_dualmac;
+ SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
+ padapter = rtw_netdev_priv(pnetdev);
+
+ /* step 2. hook HalFunc, allocate HalData */
+ hal_set_hal_ops(padapter);
+
+ padapter->intf_start = &usb_intf_start;
+ padapter->intf_stop = &usb_intf_stop;
+
+ /* step init_io_priv */
+ rtw_init_io_priv(padapter, usb_set_intf_ops);
+
+ /* step read_chip_version */
+ rtw_hal_read_chip_version(padapter);
+
+ /* step usb endpoint mapping */
+ rtw_hal_chip_configure(padapter);
+
+ /* step read efuse/eeprom data and get mac_addr */
+ rtw_hal_read_chip_info(padapter);
+
+ /* step 5. */
+ if (rtw_init_drv_sw(padapter) == _FAIL) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("Initialize driver software resource Failed!\n"));
+ goto free_hal_data;
+ }
+
+#ifdef CONFIG_PM
+ if (padapter->pwrctrlpriv.bSupportRemoteWakeup) {
+ dvobj->pusbdev->do_remote_wakeup = 1;
+ pusb_intf->needs_remote_wakeup = 1;
+ device_init_wakeup(&pusb_intf->dev, 1);
+ DBG_88E("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~~~~\n");
+ DBG_88E("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~[%d]~~~\n",
+ device_may_wakeup(&pusb_intf->dev));
+ }
+#endif
+
+ /* 2012-07-11 Move here to prevent the 8723AS-VAU BT auto
+ * suspend influence */
+ if (usb_autopm_get_interface(pusb_intf) < 0)
+ DBG_88E("can't get autopm:\n");
+
+ /* alloc dev name after read efuse. */
+ rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname);
+ rtw_macaddr_cfg(padapter->eeprompriv.mac_addr);
+#ifdef CONFIG_88EU_P2P
+ rtw_init_wifidirect_addrs(padapter, padapter->eeprompriv.mac_addr,
+ padapter->eeprompriv.mac_addr);
+#endif
+ memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+ DBG_88E("MAC Address from pnetdev->dev_addr = %pM\n",
+ pnetdev->dev_addr);
+
+ /* step 6. Tell the network stack we exist */
+ if (register_netdev(pnetdev) != 0) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("register_netdev() failed\n"));
+ goto free_hal_data;
+ }
+
+ DBG_88E("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n"
+ , padapter->bDriverStopped
+ , padapter->bSurpriseRemoved
+ , padapter->bup
+ , padapter->hw_init_completed
+ );
+
+ status = _SUCCESS;
+
+free_hal_data:
+ if (status != _SUCCESS && padapter->HalData)
+ kfree(padapter->HalData);
+handle_dualmac:
+ if (status != _SUCCESS)
+ rtw_handle_dualmac(padapter, 0);
+free_adapter:
+ if (status != _SUCCESS) {
+ if (pnetdev)
+ rtw_free_netdev(pnetdev);
+ else if (padapter)
+ rtw_vmfree((u8 *)padapter, sizeof(*padapter));
+ padapter = NULL;
+ }
+exit:
+ return padapter;
+}
+
+static void rtw_usb_if1_deinit(struct adapter *if1)
+{
+ struct net_device *pnetdev = if1->pnetdev;
+ struct mlme_priv *pmlmepriv = &if1->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ rtw_disassoc_cmd(if1, 0, false);
+
+#ifdef CONFIG_88EU_AP_MODE
+ free_mlme_ap_info(if1);
+#endif
+
+ if (if1->DriverState != DRIVER_DISAPPEAR) {
+ if (pnetdev) {
+ /* will call netdev_close() */
+ unregister_netdev(pnetdev);
+ rtw_proc_remove_one(pnetdev);
+ }
+ }
+ rtw_cancel_all_timer(if1);
+
+ rtw_dev_unload(if1);
+ DBG_88E("+r871xu_dev_remove, hw_init_completed=%d\n",
+ if1->hw_init_completed);
+ rtw_handle_dualmac(if1, 0);
+ rtw_free_drv_sw(if1);
+ if (pnetdev)
+ rtw_free_netdev(pnetdev);
+}
+
+static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
+{
+ struct adapter *if1 = NULL;
+ int status;
+ struct dvobj_priv *dvobj;
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_init\n"));
+
+ /* step 0. */
+ process_spec_devid(pdid);
+
+ /* Initialize dvobj_priv */
+ dvobj = usb_dvobj_init(pusb_intf);
+ if (dvobj == NULL) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("initialize device object priv Failed!\n"));
+ goto exit;
+ }
+
+ if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
+ if (if1 == NULL) {
+ DBG_88E("rtw_init_primarystruct adapter Failed!\n");
+ goto free_dvobj;
+ }
+
+ if (ui_pid[1] != 0) {
+ DBG_88E("ui_pid[1]:%d\n", ui_pid[1]);
+ rtw_signal_process(ui_pid[1], SIGUSR2);
+ }
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-871x_drv - drv_init, success!\n"));
+
+ status = _SUCCESS;
+
+ if (status != _SUCCESS && if1)
+ rtw_usb_if1_deinit(if1);
+free_dvobj:
+ if (status != _SUCCESS)
+ usb_dvobj_deinit(pusb_intf);
+exit:
+ return status == _SUCCESS ? 0 : -ENODEV;
+}
+
+/*
+ * dev_remove() - our device is being removed
+*/
+/* rmmod module & unplug(SurpriseRemoved) will call r871xu_dev_remove() => how to recognize both */
+static void rtw_dev_remove(struct usb_interface *pusb_intf)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
+ struct adapter *padapter = dvobj->if1;
+
+_func_enter_;
+
+ DBG_88E("+rtw_dev_remove\n");
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n"));
+
+ if (usb_drv->drv_registered)
+ padapter->bSurpriseRemoved = true;
+
+ rtw_pm_set_ips(padapter, IPS_NONE);
+ rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
+
+ LeaveAllPowerSaveMode(padapter);
+
+ rtw_usb_if1_deinit(padapter);
+
+ usb_dvobj_deinit(pusb_intf);
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
+ DBG_88E("-r871xu_dev_remove, done\n");
+_func_exit_;
+
+ return;
+}
+
+static int __init rtw_drv_entry(void)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_entry\n"));
+
+ DBG_88E(DRV_NAME " driver version=%s\n", DRIVERVERSION);
+ DBG_88E("build time: %s %s\n", __DATE__, __TIME__);
+
+ rtw_suspend_lock_init();
+
+ _rtw_mutex_init(&usb_drv->hw_init_mutex);
+
+ usb_drv->drv_registered = true;
+ return usb_register(&usb_drv->usbdrv);
+}
+
+static void __exit rtw_drv_halt(void)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_halt\n"));
+ DBG_88E("+rtw_drv_halt\n");
+
+ rtw_suspend_lock_uninit();
+
+ usb_drv->drv_registered = false;
+ usb_deregister(&usb_drv->usbdrv);
+
+ _rtw_mutex_free(&usb_drv->hw_init_mutex);
+ DBG_88E("-rtw_drv_halt\n");
+}
+
+module_init(rtw_drv_entry);
+module_exit(rtw_drv_halt);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
new file mode 100644
index 0000000..4c71e3b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -0,0 +1,288 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ ******************************************************************************/
+#define _USB_OPS_LINUX_C_
+
+#include <drv_types.h>
+#include <usb_ops_linux.h>
+#include <rtw_sreset.h>
+
+unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
+{
+ unsigned int pipe = 0, ep_num = 0;
+ struct usb_device *pusbd = pdvobj->pusbdev;
+
+ if (addr == RECV_BULK_IN_ADDR) {
+ pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
+ } else if (addr == RECV_INT_IN_ADDR) {
+ pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]);
+ } else if (addr < HW_QUEUE_ENTRY) {
+ ep_num = pdvobj->Queue2Pipe[addr];
+ pipe = usb_sndbulkpipe(pusbd, ep_num);
+ }
+
+ return pipe;
+}
+
+struct zero_bulkout_context {
+ void *pbuf;
+ void *purb;
+ void *pirp;
+ void *padapter;
+};
+
+void usb_read_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
+{
+}
+
+void usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
+{
+}
+
+void usb_read_port_cancel(struct intf_hdl *pintfhdl)
+{
+ int i;
+ struct recv_buf *precvbuf;
+ struct adapter *padapter = pintfhdl->padapter;
+ precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
+
+ DBG_88E("%s\n", __func__);
+
+ padapter->bReadPortCancel = true;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ precvbuf->reuse = true;
+ if (precvbuf->purb)
+ usb_kill_urb(precvbuf->purb);
+ precvbuf++;
+ }
+}
+
+static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
+{
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context;
+ struct adapter *padapter = pxmitbuf->padapter;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct hal_data_8188e *haldata;
+
+_func_enter_;
+
+ switch (pxmitbuf->flags) {
+ case VO_QUEUE_INX:
+ pxmitpriv->voq_cnt--;
+ break;
+ case VI_QUEUE_INX:
+ pxmitpriv->viq_cnt--;
+ break;
+ case BE_QUEUE_INX:
+ pxmitpriv->beq_cnt--;
+ break;
+ case BK_QUEUE_INX:
+ pxmitpriv->bkq_cnt--;
+ break;
+ case HIGH_QUEUE_INX:
+#ifdef CONFIG_88EU_AP_MODE
+ rtw_chk_hi_queue_cmd(padapter);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
+ padapter->bWritePortCancel) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ DBG_88E("%s(): TX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bWritePortCancel(%d) pxmitbuf->ext_tag(%x)\n",
+ __func__, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved, padapter->bReadPortCancel,
+ pxmitbuf->ext_tag);
+
+ goto check_completion;
+ }
+
+ if (purb->status) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete : purb->status(%d) != 0\n", purb->status));
+ DBG_88E("###=> urb_write_port_complete status(%d)\n", purb->status);
+ if ((purb->status == -EPIPE) || (purb->status == -EPROTO)) {
+ sreset_set_wifi_error_status(padapter, USB_WRITE_PORT_FAIL);
+ } else if (purb->status == -EINPROGRESS) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete: EINPROGESS\n"));
+ goto check_completion;
+ } else if (purb->status == -ENOENT) {
+ DBG_88E("%s: -ENOENT\n", __func__);
+ goto check_completion;
+ } else if (purb->status == -ECONNRESET) {
+ DBG_88E("%s: -ECONNRESET\n", __func__);
+ goto check_completion;
+ } else if (purb->status == -ESHUTDOWN) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete: ESHUTDOWN\n"));
+ padapter->bDriverStopped = true;
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete:bDriverStopped = true\n"));
+ goto check_completion;
+ } else {
+ padapter->bSurpriseRemoved = true;
+ DBG_88E("bSurpriseRemoved = true\n");
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete:bSurpriseRemoved = true\n"));
+
+ goto check_completion;
+ }
+ }
+
+ haldata = GET_HAL_DATA(padapter);
+ haldata->srestpriv.last_tx_complete_time = rtw_get_current_time();
+
+check_completion:
+ rtw_sctx_done_err(&pxmitbuf->sctx,
+ purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR :
+ RTW_SCTX_DONE_SUCCESS);
+
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+
+_func_exit_;
+}
+
+u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
+{
+ unsigned long irqL;
+ unsigned int pipe;
+ int status;
+ u32 ret = _FAIL;
+ struct urb *purb = NULL;
+ struct adapter *padapter = (struct adapter *)pintfhdl->padapter;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)wmem;
+ struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
+ struct usb_device *pusbd = pdvobj->pusbdev;
+
+_func_enter_;
+
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("+usb_write_port\n"));
+
+ if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
+ (padapter->pwrctrlpriv.pnp_bstop_trx)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port:( padapter->bDriverStopped ||padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n"));
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
+ goto exit;
+ }
+
+ _enter_critical(&pxmitpriv->lock, &irqL);
+
+ switch (addr) {
+ case VO_QUEUE_INX:
+ pxmitpriv->voq_cnt++;
+ pxmitbuf->flags = VO_QUEUE_INX;
+ break;
+ case VI_QUEUE_INX:
+ pxmitpriv->viq_cnt++;
+ pxmitbuf->flags = VI_QUEUE_INX;
+ break;
+ case BE_QUEUE_INX:
+ pxmitpriv->beq_cnt++;
+ pxmitbuf->flags = BE_QUEUE_INX;
+ break;
+ case BK_QUEUE_INX:
+ pxmitpriv->bkq_cnt++;
+ pxmitbuf->flags = BK_QUEUE_INX;
+ break;
+ case HIGH_QUEUE_INX:
+ pxmitbuf->flags = HIGH_QUEUE_INX;
+ break;
+ default:
+ pxmitbuf->flags = MGT_QUEUE_INX;
+ break;
+ }
+
+ _exit_critical(&pxmitpriv->lock, &irqL);
+
+ purb = pxmitbuf->pxmit_urb[0];
+
+ /* translate DMA FIFO addr to pipehandle */
+ pipe = ffaddr2pipehdl(pdvobj, addr);
+
+ usb_fill_bulk_urb(purb, pusbd, pipe,
+ pxmitframe->buf_addr, /* pxmitbuf->pbuf */
+ cnt,
+ usb_write_port_complete,
+ pxmitbuf);/* context is pxmitbuf */
+
+ status = usb_submit_urb(purb, GFP_ATOMIC);
+ if (!status) {
+ struct hal_data_8188e *haldata = GET_HAL_DATA(padapter);
+
+ haldata->srestpriv.last_tx_time = rtw_get_current_time();
+ } else {
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
+ DBG_88E("usb_write_port, status =%d\n", status);
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port(): usb_submit_urb, status =%x\n", status));
+
+ switch (status) {
+ case -ENODEV:
+ padapter->bDriverStopped = true;
+ break;
+ default:
+ break;
+ }
+ goto exit;
+ }
+
+ ret = _SUCCESS;
+
+/* We add the URB_ZERO_PACKET flag to urb so that the host will send the zero packet automatically. */
+
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("-usb_write_port\n"));
+
+exit:
+ if (ret != _SUCCESS)
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+_func_exit_;
+ return ret;
+}
+
+void usb_write_port_cancel(struct intf_hdl *pintfhdl)
+{
+ int i, j;
+ struct adapter *padapter = pintfhdl->padapter;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf;
+
+ DBG_88E("%s\n", __func__);
+
+ padapter->bWritePortCancel = true;
+
+ for (i = 0; i < NR_XMITBUFF; i++) {
+ for (j = 0; j < 8; j++) {
+ if (pxmitbuf->pxmit_urb[j])
+ usb_kill_urb(pxmitbuf->pxmit_urb[j]);
+ }
+ pxmitbuf++;
+ }
+
+ pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmit_extbuf;
+ for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
+ for (j = 0; j < 8; j++) {
+ if (pxmitbuf->pxmit_urb[j])
+ usb_kill_urb(pxmitbuf->pxmit_urb[j]);
+ }
+ pxmitbuf++;
+ }
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
new file mode 100644
index 0000000..2e586c0
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -0,0 +1,290 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _XMIT_OSDEP_C_
+
+#include <linux/version.h>
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <if_ether.h>
+#include <ip.h>
+#include <wifi.h>
+#include <mlme_osdep.h>
+#include <xmit_osdep.h>
+#include <osdep_intf.h>
+#include <usb_osintf.h>
+
+uint rtw_remainder_len(struct pkt_file *pfile)
+{
+ return pfile->buf_len - ((size_t)(pfile->cur_addr) -
+ (size_t)(pfile->buf_start));
+}
+
+void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
+{
+_func_enter_;
+
+ pfile->pkt = pktptr;
+ pfile->cur_addr = pktptr->data;
+ pfile->buf_start = pktptr->data;
+ pfile->pkt_len = pktptr->len;
+ pfile->buf_len = pktptr->len;
+
+ pfile->cur_buffer = pfile->buf_start;
+
+_func_exit_;
+}
+
+uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
+{
+ uint len = 0;
+
+_func_enter_;
+
+ len = rtw_remainder_len(pfile);
+ len = (rlen > len) ? len : rlen;
+
+ if (rmem)
+ skb_copy_bits(pfile->pkt, pfile->buf_len-pfile->pkt_len, rmem, len);
+
+ pfile->cur_addr += len;
+ pfile->pkt_len -= len;
+
+_func_exit_;
+
+ return len;
+}
+
+int rtw_endofpktfile(struct pkt_file *pfile)
+{
+_func_enter_;
+
+ if (pfile->pkt_len == 0) {
+ _func_exit_;
+ return true;
+ }
+
+_func_exit_;
+
+ return false;
+}
+
+void rtw_set_tx_chksum_offload(struct sk_buff *pkt, struct pkt_attrib *pattrib)
+{
+}
+
+int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
+{
+ int i;
+
+ pxmitbuf->pallocated_buf = rtw_zmalloc(alloc_sz);
+ if (pxmitbuf->pallocated_buf == NULL)
+ return _FAIL;
+
+ pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
+ pxmitbuf->dma_transfer_addr = 0;
+
+ for (i = 0; i < 8; i++) {
+ pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (pxmitbuf->pxmit_urb[i] == NULL) {
+ DBG_88E("pxmitbuf->pxmit_urb[i]==NULL");
+ return _FAIL;
+ }
+ }
+ return _SUCCESS;
+}
+
+void rtw_os_xmit_resource_free(struct adapter *padapter,
+ struct xmit_buf *pxmitbuf, u32 free_sz)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ usb_free_urb(pxmitbuf->pxmit_urb[i]);
+
+ kfree(pxmitbuf->pallocated_buf);
+}
+
+#define WMM_XMIT_THRESHOLD (NR_XMITFRAME*2/5)
+
+void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+ u16 queue;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
+ (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ }
+#else
+ if (netif_queue_stopped(padapter->pnetdev))
+ netif_wake_queue(padapter->pnetdev);
+#endif
+
+ dev_kfree_skb_any(pkt);
+}
+
+void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
+{
+ if (pxframe->pkt)
+ rtw_os_pkt_complete(padapter, pxframe->pkt);
+ pxframe->pkt = NULL;
+}
+
+void rtw_os_xmit_schedule(struct adapter *padapter)
+{
+ unsigned long irql;
+ struct xmit_priv *pxmitpriv;
+
+ if (!padapter)
+ return;
+
+ pxmitpriv = &padapter->xmitpriv;
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql);
+
+ if (rtw_txframes_pending(padapter))
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql);
+}
+
+static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u16 queue;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ /* No free space for Tx, tx_worker is too slow */
+ if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (pxmitpriv->free_xmitframe_cnt <= 4) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ }
+ }
+}
+
+static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ unsigned long irql;
+ struct list_head *phead, *plist;
+ struct sk_buff *newskb;
+ struct sta_info *psta = NULL;
+ s32 res;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* free sta asoc_queue */
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ /* avoid come from STA1 and send back STA1 */
+ if (!memcmp(psta->hwaddr, &skb->data[6], 6))
+ continue;
+
+ newskb = skb_copy(skb, GFP_ATOMIC);
+
+ if (newskb) {
+ memcpy(newskb->data, psta->hwaddr, 6);
+ res = rtw_xmit(padapter, &newskb);
+ if (res < 0) {
+ DBG_88E("%s()-%d: rtw_xmit() return error!\n", __func__, __LINE__);
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(newskb);
+ } else {
+ pxmitpriv->tx_pkts++;
+ }
+ } else {
+ DBG_88E("%s-%d: skb_copy() failed!\n", __func__, __LINE__);
+ pxmitpriv->tx_drop++;
+
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ return false; /* Caller shall tx this multicast frame via normal way. */
+ }
+ }
+
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ dev_kfree_skb_any(skb);
+ return true;
+}
+
+
+int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ s32 res = 0;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));
+
+ if (rtw_if_up(padapter) == false) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit_entry: rtw_if_up fail\n"));
+ goto drop_packet;
+ }
+
+ rtw_check_xmit_resource(padapter, pkt);
+
+ if (!rtw_mc2u_disable && check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
+ (IP_MCAST_MAC(pkt->data) || ICMPV6_MCAST_MAC(pkt->data)) &&
+ (padapter->registrypriv.wifi_spec == 0)) {
+ if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME/4)) {
+ res = rtw_mlcst2unicst(padapter, pkt);
+ if (res)
+ goto exit;
+ }
+ }
+
+ res = rtw_xmit(padapter, &pkt);
+ if (res < 0)
+ goto drop_packet;
+
+ pxmitpriv->tx_pkts++;
+ RT_TRACE(_module_xmit_osdep_c_, _drv_info_, ("rtw_xmit_entry: tx_pkts=%d\n", (u32)pxmitpriv->tx_pkts));
+ goto exit;
+
+drop_packet:
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(pkt);
+ RT_TRACE(_module_xmit_osdep_c_, _drv_notice_, ("rtw_xmit_entry: drop, tx_drop=%d\n", (u32)pxmitpriv->tx_drop));
+
+exit:
+
+_func_exit_;
+
+ return 0;
+}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index ea91744..5f10e40 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -20,20 +20,7 @@
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8192E_cmdpkt.h"
-/*---------------------------Define Local Constant---------------------------*/
-/* Debug constant*/
-#define CMPK_DEBOUNCE_CNT 1
-#define CMPK_PRINT(Address)\
-{\
- unsigned char i;\
- u32 temp[10];\
- \
- memcpy(temp, Address, 40);\
- for (i = 0; i < 40; i += 4)\
- printk(KERN_INFO "\r\n %08x", temp[i]);\
-}
-/*---------------------------Define functions---------------------------------*/
bool cmpk_message_handle_tx(
struct net_device *dev,
u8 *code_virtual_address,
@@ -100,7 +87,7 @@ bool cmpk_message_handle_tx(
write_nic_byte(dev, TPPoll, TPPoll_CQ);
Failed:
return rt_status;
-} /* CMPK_Message_Handle_Tx */
+}
static void
cmpk_count_txstatistic(
@@ -149,23 +136,19 @@ cmpk_count_txstatistic(
priv->stats.txretrycount += pstx_fb->retry_cnt;
priv->stats.txfeedbackretry += pstx_fb->retry_cnt;
-
-} /* cmpk_CountTxStatistic */
-
-
+}
static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
{
struct r8192_priv *priv = rtllib_priv(dev);
- struct cmpk_txfb rx_tx_fb; /* */
+ struct cmpk_txfb rx_tx_fb;
priv->stats.txfeedback++;
memcpy((u8 *)&rx_tx_fb, pmsg, sizeof(struct cmpk_txfb));
cmpk_count_txstatistic(dev, &rx_tx_fb);
-
-} /* cmpk_Handle_Tx_Feedback */
+}
static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
{
@@ -182,7 +165,6 @@ static void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
tx_rate = 10;
DMESG("send beacon frame tx rate is 1Mbpm\n");
}
-
}
static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
@@ -192,14 +174,12 @@ static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
DMESG("---> cmpk_Handle_Interrupt_Status()\n");
-
rx_intr_status.length = pmsg[1];
if (rx_intr_status.length != (sizeof(struct cmpk_intr_sta) - 2)) {
DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n");
return;
}
-
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4));
@@ -220,12 +200,11 @@ static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
DMESG("<---- cmpk_handle_interrupt_status()\n");
-} /* cmpk_handle_interrupt_status */
-
+}
static void cmpk_handle_query_config_rx(struct net_device *dev, u8 *pmsg)
{
- cmpk_query_cfg_t rx_query_cfg; /* */
+ cmpk_query_cfg_t rx_query_cfg;
rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31;
@@ -238,8 +217,7 @@ static void cmpk_handle_query_config_rx(struct net_device *dev, u8 *pmsg)
rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
(pmsg[14] << 8) | (pmsg[15] << 0);
-} /* cmpk_Handle_Query_Config_Rx */
-
+}
static void cmpk_count_tx_status(struct net_device *dev,
struct cmpk_tx_status *pstx_status)
@@ -280,13 +258,11 @@ static void cmpk_count_tx_status(struct net_device *dev,
priv->stats.txbytesunicast += pstx_status->txuclength;
priv->stats.last_packet_rate = pstx_status->rate;
-} /* cmpk_CountTxStatus */
-
-
+}
static void cmpk_handle_tx_status(struct net_device *dev, u8 *pmsg)
{
- struct cmpk_tx_status rx_tx_sts; /* */
+ struct cmpk_tx_status rx_tx_sts;
memcpy((void *)&rx_tx_sts, (void *)pmsg, sizeof(struct cmpk_tx_status));
cmpk_count_tx_status(dev, &rx_tx_sts);
@@ -300,7 +276,6 @@ static void cmpk_handle_tx_rate_history(struct net_device *dev, u8 *pmsg)
u32 *ptemp;
struct r8192_priv *priv = rtllib_priv(dev);
-
#ifdef ENABLE_PS
pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
(pu1Byte)(&rtState));
@@ -335,10 +310,8 @@ static void cmpk_handle_tx_rate_history(struct net_device *dev, u8 *pmsg)
priv->stats.txrate.ht_mcs[j][i] +=
ptxrate->ht_mcs[j][i];
}
-
}
-
u32 cmpk_message_handle_rx(struct net_device *dev,
struct rtllib_rx_stats *pstats)
{
@@ -349,12 +322,8 @@ u32 cmpk_message_handle_rx(struct net_device *dev,
RT_TRACE(COMP_CMDPKT, "---->cmpk_message_handle_rx()\n");
- if (pstats == NULL) {
- /* Print error message. */
- /*RT_TRACE(COMP_SEND, DebugLevel,
- ("\n\r[CMPK]-->Err queue id or pointer"));*/
+ if (pstats == NULL)
return 0;
- }
total_length = pstats->Length;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index a9d78e9..74fbd70 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -2128,10 +2128,11 @@ void rtl8192_update_ratr_table(struct net_device *dev)
struct rtllib_device *ieee = priv->rtllib;
u8 *pMcsRate = ieee->dot11HTOperationalRateSet;
u32 ratr_value = 0;
+ u16 rate_config = 0;
u8 rate_index = 0;
- rtl8192_config_rate(dev, (u16 *)(&ratr_value));
- ratr_value |= (*(u16 *)(pMcsRate)) << 12;
+ rtl8192_config_rate(dev, &rate_config);
+ ratr_value = rate_config | *pMcsRate << 12;
switch (ieee->mode) {
case IEEE_A:
ratr_value &= 0x00000FF0;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index b9b3b52..dbe0e1c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index baf3b63..fa5603a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index fa607f9..7d075d3 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 2b6c61c..e068443 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -94,6 +94,7 @@ MODULE_DEVICE_TABLE(pci, rtl8192_pci_id_tbl);
static int rtl8192_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
static void rtl8192_pci_disconnect(struct pci_dev *pdev);
+static irqreturn_t rtl8192_interrupt(int irq, void *netdev);
static struct pci_driver rtl8192_pci_driver = {
.name = DRV_NAME, /* Driver name */
@@ -1324,7 +1325,7 @@ static short rtl8192_init(struct net_device *dev)
(unsigned long)dev);
rtl8192_irq_disable(dev);
- if (request_irq(dev->irq, (void *)rtl8192_interrupt_rsl, IRQF_SHARED,
+ if (request_irq(dev->irq, rtl8192_interrupt, IRQF_SHARED,
dev->name, dev)) {
printk(KERN_ERR "Error allocating IRQ %d", dev->irq);
return -1;
@@ -2704,7 +2705,7 @@ out:
}
-irqreturn_type rtl8192_interrupt(int irq, void *netdev, struct pt_regs *regs)
+irqreturn_t rtl8192_interrupt(int irq, void *netdev)
{
struct net_device *dev = (struct net_device *) netdev;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 87d4d34..b015bf6 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -88,10 +88,6 @@
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID , \
.driver_data = (kernel_ulong_t)&(cfg)
-#define irqreturn_type irqreturn_t
-
-#define rtl8192_interrupt(x, y, z) rtl8192_interrupt_rsl(x, y)
-
#define RTL_MAX_SCAN_SIZE 128
#define RTL_RATE_MAX 30
@@ -1044,8 +1040,6 @@ void rtl8192_set_chan(struct net_device *dev, short ch);
void check_rfctrl_gpio_timer(unsigned long data);
void rtl8192_hw_wakeup_wq(void *data);
-irqreturn_type rtl8192_interrupt(int irq, void *netdev, struct pt_regs *regs);
-
short rtl8192_pci_initdescring(struct net_device *dev);
void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
index c1ccff4..a6778e0 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
index 9452e16..adea2b4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
index 0cfb3ec..529ea54 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index 5abbee3..2ad92ee 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index 28c7da6..356aec4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index c9a7c56..a8c2ade 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
index df79d6c..962f2e5 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 3485ef1..05ef49f 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -14,7 +14,7 @@
* Copyright (c) 2004, Intel Corporation
*
* Modified for Realtek's wi-fi cards by Andrea Merello
- * <andreamrl@tiscali.it>
+ * <andrea.merello@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index 2bfc115..c59f67b 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 84ea721..51d46e0 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -233,7 +233,8 @@ static const struct file_operations fops = {
.open = open_debug_level,
.read = seq_read,
.llseek = seq_lseek,
- .write = write_debug_level
+ .write = write_debug_level,
+ .release = single_release,
};
int __init rtllib_init(void)
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 8b8a5c6..8aeaed5 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -14,7 +14,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
@@ -777,6 +777,8 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(RTLLIB_SKBBUFFER_SIZE);
+ if (!sub_skb)
+ return 0;
skb_reserve(sub_skb, 12);
data_ptr = (u8 *)skb_put(sub_skb, skb->len);
memcpy(data_ptr, skb->data, skb->len);
@@ -825,6 +827,8 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
+ if (!sub_skb)
+ return 0;
skb_reserve(sub_skb, 12);
data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
memcpy(data_ptr, skb->data, nSubframe_Length);
@@ -1822,7 +1826,7 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
network->rates_ex[i] = info_element->data[i];
p += snprintf(p, sizeof(rates_str) -
(p - rates_str), "%02X ",
- network->rates[i]);
+ network->rates_ex[i]);
if (rtllib_is_ofdm_rate
(info_element->data[i])) {
network->flags |= NETWORK_HAS_OFDM;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 4feecec..0cbf6f5 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
@@ -1801,8 +1801,9 @@ static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
if (*(t++) == MFIE_TYPE_CHALLENGE) {
*chlen = *(t++);
- *challenge = kmalloc(*chlen, GFP_ATOMIC);
- memcpy(*challenge, t, *chlen); /*TODO - check here*/
+ *challenge = kmemdup(t, *chlen, GFP_ATOMIC);
+ if (!*challenge)
+ return -ENOMEM;
}
}
return cpu_to_le16(a->status);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 740cf85..e6af8cf 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 759d7c7..1cc6a9d 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -25,7 +25,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
diff --git a/drivers/staging/rtl8192u/authors b/drivers/staging/rtl8192u/authors
index b08bbae..0fab112 100644
--- a/drivers/staging/rtl8192u/authors
+++ b/drivers/staging/rtl8192u/authors
@@ -1 +1 @@
-Andrea Merello <andreamrl@tiscali.it>
+Andrea Merello <andrea.merello@gmail.com>
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index f10fd5a..34edcfa 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -67,9 +67,9 @@ Dot11d_Reset(struct ieee80211_device *ieee)
void
Dot11d_UpdateCountryIe(
struct ieee80211_device *dev,
- u8 * pTaddr,
+ u8 *pTaddr,
u16 CoutryIeLen,
- u8 * pCoutryIe
+ u8 *pCoutryIe
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
@@ -101,7 +101,7 @@ Dot11d_UpdateCountryIe(
MaxChnlNum = pTriple->FirstChnl + j;
}
- pTriple = (PCHNL_TXPOWER_TRIPLE)((u8*)pTriple + 3);
+ pTriple = (PCHNL_TXPOWER_TRIPLE)((u8 *)pTriple + 3);
}
//printk("Dot11d_UpdateCountryIe(): Channel List:\n");
printk("Channel List:");
@@ -143,12 +143,12 @@ DOT11D_GetMaxTxPwrInDbm(
void
DOT11D_ScanComplete(
- struct ieee80211_device * dev
+ struct ieee80211_device *dev
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- switch(pDot11dInfo->State)
+ switch (pDot11dInfo->State)
{
case DOT11D_STATE_LEARNED:
pDot11dInfo->State = DOT11D_STATE_DONE;
@@ -166,7 +166,7 @@ DOT11D_ScanComplete(
}
int IsLegalChannel(
- struct ieee80211_device * dev,
+ struct ieee80211_device *dev,
u8 channel
)
{
@@ -183,7 +183,7 @@ int IsLegalChannel(
}
int ToLegalChannel(
- struct ieee80211_device * dev,
+ struct ieee80211_device *dev,
u8 channel
)
{
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.h b/drivers/staging/rtl8192u/ieee80211/dot11d.h
index 54f2b4c..6aa8c15 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.h
@@ -71,9 +71,9 @@ Dot11d_Reset(
void
Dot11d_UpdateCountryIe(
struct ieee80211_device *dev,
- u8 * pTaddr,
+ u8 *pTaddr,
u16 CoutryIeLen,
- u8 * pCoutryIe
+ u8 *pCoutryIe
);
u8
@@ -84,16 +84,16 @@ DOT11D_GetMaxTxPwrInDbm(
void
DOT11D_ScanComplete(
- struct ieee80211_device * dev
+ struct ieee80211_device *dev
);
int IsLegalChannel(
- struct ieee80211_device * dev,
+ struct ieee80211_device *dev,
u8 channel
);
int ToLegalChannel(
- struct ieee80211_device * dev,
+ struct ieee80211_device *dev,
u8 channel
);
#endif // #ifndef __INC_DOT11D_H
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 210898c..bc64f05 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -14,7 +14,7 @@
* Copyright (c) 2004, Intel Corporation
*
* Modified for Realtek's wi-fi cards by Andrea Merello
- * <andreamrl@tiscali.it>
+ * <andrea.merello@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -493,8 +493,8 @@ typedef struct ieee_param {
#define IsDataFrame(pdu) ( ((pdu[0] & 0x0C)==0x08) ? true : false )
#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0]&FC_QOS_BIT)) )
//added by wb. Is this right?
-#define IsQoSDataFrame(pframe) ((*(u16*)pframe&(IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA))
-#define Frame_Order(pframe) (*(u16*)pframe&IEEE80211_FCTL_ORDER)
+#define IsQoSDataFrame(pframe) ((*(u16 *)pframe&(IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA))
+#define Frame_Order(pframe) (*(u16 *)pframe&IEEE80211_FCTL_ORDER)
#define SN_LESS(a, b) (((a-b)&0x800)!=0)
#define SN_EQUAL(a, b) (a == b)
#define MAX_DEV_ADDR_SIZE 8
@@ -538,7 +538,7 @@ do { if (ieee80211_debug_level & (level)) \
do{ if ((ieee80211_debug_level & (level)) == (level)) \
{ \
int i; \
- u8* pdata = (u8*) data; \
+ u8 *pdata = (u8 *) data; \
printk(KERN_DEBUG "ieee80211: %s()\n", __FUNCTION__); \
for(i=0; i<(int)(datalen); i++) \
{ \
@@ -914,7 +914,7 @@ struct ieee80211_rx_stats {
bool bIsCCK;
bool bPacketToSelf;
//added by amy
- u8* virtual_address;
+ u8 *virtual_address;
u16 packetlength; // Total packet length: Must equal to sum of all FragLength
u16 fraglength; // FragLength should equal to PacketLength in non-fragment case
u16 fragoffset; // Data offset for this fragment
@@ -1366,13 +1366,13 @@ static inline const char *eap_get_type(int type)
return ((u32)type >= ARRAY_SIZE(eap_types)) ? "Unknown" : eap_types[type];
}
//added by amy for reorder
-static inline u8 Frame_QoSTID(u8* buf)
+static inline u8 Frame_QoSTID(u8 *buf)
{
struct ieee80211_hdr_3addr *hdr;
u16 fc;
hdr = (struct ieee80211_hdr_3addr *)buf;
fc = le16_to_cpu(hdr->frame_ctl);
- return (u8)((frameqos*)(buf + (((fc & IEEE80211_FCTL_TODS)&&(fc & IEEE80211_FCTL_FROMDS))? 30 : 24)))->field.tid;
+ return (u8)((frameqos *)(buf + (((fc & IEEE80211_FCTL_TODS)&&(fc & IEEE80211_FCTL_FROMDS))? 30 : 24)))->field.tid;
}
//added by amy for reorder
@@ -1670,7 +1670,7 @@ typedef struct _bandwidth_autoswitch {
typedef struct _RX_REORDER_ENTRY {
struct list_head List;
u16 SeqNum;
- struct ieee80211_rxb* prxb;
+ struct ieee80211_rxb *prxb;
} RX_REORDER_ENTRY, *PRX_REORDER_ENTRY;
//added by amy for order
typedef enum _Fsync_State{
@@ -1965,7 +1965,7 @@ struct ieee80211_device {
/* map of allowed channels. 0 is dummy */
// FIXME: remember to default to a basic channel plan depending of the PHY type
- void* pDot11dInfo;
+ void *pDot11dInfo;
bool bGlobalDomain;
int rate; /* current rate */
int basic_rate;
@@ -2107,10 +2107,10 @@ struct ieee80211_device {
struct net_device *dev);
int (*reset_port)(struct net_device *dev);
- int (*is_queue_full) (struct net_device * dev, int pri);
+ int (*is_queue_full) (struct net_device *dev, int pri);
- int (*handle_management) (struct net_device * dev,
- struct ieee80211_network * network, u16 type);
+ int (*handle_management) (struct net_device *dev,
+ struct ieee80211_network *network, u16 type);
int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
/* Softmac-generated frames (management) are TXed via this
@@ -2187,8 +2187,8 @@ struct ieee80211_device {
void (*ps_request_tx_ack) (struct net_device *dev);
void (*enter_sleep_state) (struct net_device *dev, u32 th, u32 tl);
short (*ps_is_queue_empty) (struct net_device *dev);
- int (*handle_beacon) (struct net_device * dev, struct ieee80211_beacon * beacon, struct ieee80211_network * network);
- int (*handle_assoc_response) (struct net_device * dev, struct ieee80211_assoc_response_frame * resp, struct ieee80211_network * network);
+ int (*handle_beacon) (struct net_device *dev, struct ieee80211_beacon *beacon, struct ieee80211_network *network);
+ int (*handle_assoc_response) (struct net_device *dev, struct ieee80211_assoc_response_frame *resp, struct ieee80211_network *network);
/* check whether Tx hw resource available */
@@ -2197,9 +2197,9 @@ struct ieee80211_device {
// void (*SwChnlByTimerHandler)(struct net_device *dev, int channel);
void (*SetBWModeHandler)(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
// void (*UpdateHalRATRTableHandler)(struct net_device* dev, u8* pMcsRate);
- bool (*GetNmodeSupportBySecCfg)(struct net_device* dev);
- void (*SetWirelessMode)(struct net_device* dev, u8 wireless_mode);
- bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device* dev);
+ bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
+ void (*SetWirelessMode)(struct net_device *dev, u8 wireless_mode);
+ bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device *dev);
void (*InitialGainHandler)(struct net_device *dev, u8 Operation);
/* This must be the last item so that it points to the data
@@ -2401,10 +2401,10 @@ extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
#if WIRELESS_EXT >= 18
extern int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
struct iw_request_info *info,
- union iwreq_data* wrqu, char *extra);
+ union iwreq_data *wrqu, char *extra);
extern int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
struct iw_request_info *info,
- union iwreq_data* wrqu, char *extra);
+ union iwreq_data *wrqu, char *extra);
extern int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
struct iw_request_info *info,
struct iw_param *data, char *extra);
@@ -2422,7 +2422,7 @@ extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_b
u16 stype);
extern void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net);
-void SendDisassociation(struct ieee80211_device *ieee, u8* asSta, u8 asRsn);
+void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta, u8 asRsn);
extern void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee);
extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
@@ -2528,52 +2528,52 @@ extern int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
union iwreq_data *wrqu, char *extra);
//HT
#define MAX_RECEIVE_BUFFER_SIZE 9100 //
-extern void HTDebugHTCapability(u8* CapIE, u8* TitleString );
-extern void HTDebugHTInfo(u8* InfoIE, u8* TitleString);
-
-void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
-extern void HTUpdateDefaultSetting(struct ieee80211_device* ieee);
-extern void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u8* len, u8 isEncrypt);
-extern void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* len, u8 isEncrypt);
-extern void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg, u8* len);
+extern void HTDebugHTCapability(u8 *CapIE, u8 *TitleString );
+extern void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
+
+void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
+extern void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
+extern void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u8 *len, u8 isEncrypt);
+extern void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *len, u8 isEncrypt);
+extern void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg, u8 *len);
extern void HTOnAssocRsp(struct ieee80211_device *ieee);
-extern void HTInitializeHTInfo(struct ieee80211_device* ieee);
+extern void HTInitializeHTInfo(struct ieee80211_device *ieee);
extern void HTInitializeBssDesc(PBSS_HT pBssHT);
-extern void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork);
-extern void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork);
-extern u8 HTGetHighestMCSRate(struct ieee80211_device* ieee, u8* pMCSRateSet, u8* pMCSFilter);
+extern void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
+extern void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
+extern u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
extern u16 MCS_DATA_RATE[2][2][77] ;
-extern u8 HTCCheck(struct ieee80211_device* ieee, u8* pFrame);
+extern u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
//extern void HTSetConnectBwModeCallback(unsigned long data);
extern void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
-extern bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee);
-extern u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate);
-extern u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate);
-extern u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate);
+extern bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee);
+extern u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
+extern u16 HTMcsToDataRate( struct ieee80211_device *ieee, u8 nMcsRate);
+extern u16 TxCountToDataRate( struct ieee80211_device *ieee, u8 nDataRate);
//function in BAPROC.c
-extern int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb);
-extern int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb);
-extern int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb);
-extern void TsInitAddBA( struct ieee80211_device* ieee, PTX_TS_RECORD pTS, u8 Policy, u8 bOverwritePending);
-extern void TsInitDelBA( struct ieee80211_device* ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
+extern int ieee80211_rx_ADDBAReq( struct ieee80211_device *ieee, struct sk_buff *skb);
+extern int ieee80211_rx_ADDBARsp( struct ieee80211_device *ieee, struct sk_buff *skb);
+extern int ieee80211_rx_DELBA(struct ieee80211_device *ieee,struct sk_buff *skb);
+extern void TsInitAddBA( struct ieee80211_device *ieee, PTX_TS_RECORD pTS, u8 Policy, u8 bOverwritePending);
+extern void TsInitDelBA( struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
extern void BaSetupTimeOut(unsigned long data);
extern void TxBaInactTimeout(unsigned long data);
extern void RxBaInactTimeout(unsigned long data);
extern void ResetBaEntry( PBA_RECORD pBA);
//function in TS.c
extern bool GetTs(
- struct ieee80211_device* ieee,
+ struct ieee80211_device *ieee,
PTS_COMMON_INFO *ppTS,
- u8* Addr,
+ u8 *Addr,
u8 TID,
TR_SELECT TxRxSelect, //Rx:1, Tx:0
bool bAddNewTs
);
extern void TSInitialize(struct ieee80211_device *ieee);
-extern void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS);
-extern void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr);
-extern void RemoveAllTS(struct ieee80211_device* ieee);
+extern void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS);
+extern void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
+extern void RemoveAllTS(struct ieee80211_device *ieee);
void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
extern const long ieee80211_wlan_frequencies[];
@@ -2623,6 +2623,6 @@ extern int ieee80211_parse_info_param(struct ieee80211_device *ieee,
struct ieee80211_network *network,
struct ieee80211_rx_stats *stats);
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb** prxbIndicateArray,u8 index);
+void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray,u8 index);
#define RT_ASOC_RETRY_LIMIT 5
#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
index a464d11..5533221 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
@@ -155,7 +155,7 @@ int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
}
-struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name)
+struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name)
{
unsigned long flags;
struct list_head *ptr;
@@ -182,7 +182,7 @@ struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name)
}
-static void * ieee80211_crypt_null_init(int keyidx) { return (void *) 1; }
+static void *ieee80211_crypt_null_init(int keyidx) { return (void *) 1; }
static void ieee80211_crypt_null_deinit(void *priv) {}
static struct ieee80211_crypto_ops ieee80211_crypt_null = {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
index b58a3bc..0b4ea43 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
@@ -77,7 +77,7 @@ struct ieee80211_crypt_data {
int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
-struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name);
+struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
void ieee80211_crypt_deinit_handler(unsigned long);
void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index fec0176..f2b1677 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -60,10 +60,10 @@ struct ieee80211_ccmp_data {
void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
const u8 pt[16], u8 ct[16])
{
- crypto_cipher_encrypt_one((void*)tfm, ct, pt);
+ crypto_cipher_encrypt_one((void *)tfm, ct, pt);
}
-static void * ieee80211_ccmp_init(int key_idx)
+static void *ieee80211_ccmp_init(int key_idx)
{
struct ieee80211_ccmp_data *priv;
@@ -72,7 +72,7 @@ static void * ieee80211_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tfm = (void*)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
"crypto API aes\n");
@@ -85,7 +85,7 @@ static void * ieee80211_ccmp_init(int key_idx)
fail:
if (priv) {
if (priv->tfm)
- crypto_free_cipher((void*)priv->tfm);
+ crypto_free_cipher((void *)priv->tfm);
kfree(priv);
}
@@ -98,7 +98,7 @@ static void ieee80211_ccmp_deinit(void *priv)
struct ieee80211_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
- crypto_free_cipher((void*)_priv->tfm);
+ crypto_free_cipher((void *)_priv->tfm);
kfree(priv);
}
@@ -393,7 +393,7 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
data->rx_pn[4] = seq[1];
data->rx_pn[5] = seq[0];
}
- crypto_cipher_setkey((void*)data->tfm, data->key, CCMP_TK_LEN);
+ crypto_cipher_setkey((void *)data->tfm, data->key, CCMP_TK_LEN);
} else if (len == 0)
data->key_set = 0;
else
@@ -427,7 +427,7 @@ static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
}
-static char * ieee80211_ccmp_print_stats(char *p, void *priv)
+static char *ieee80211_ccmp_print_stats(char *p, void *priv)
{
struct ieee80211_ccmp_data *ccmp = priv;
p += sprintf(p, "key[%d] alg=CCMP key_set=%d "
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 555eb80..93121b4 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -62,7 +62,7 @@ struct ieee80211_tkip_data {
u8 rx_hdr[16], tx_hdr[16];
};
-static void * ieee80211_tkip_init(int key_idx)
+static void *ieee80211_tkip_init(int key_idx)
{
struct ieee80211_tkip_data *priv;
@@ -499,8 +499,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return keyidx;
}
-static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
- u8 * data, size_t data_len, u8 * mic)
+static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+ u8 *data, size_t data_len, u8 *mic)
{
struct hash_desc desc;
struct scatterlist sg[2];
@@ -718,7 +718,7 @@ static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
}
-static char * ieee80211_tkip_print_stats(char *p, void *priv)
+static char *ieee80211_tkip_print_stats(char *p, void *priv)
{
struct ieee80211_tkip_data *tkip = priv;
p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 3801f12..f202236 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -38,7 +38,7 @@ struct prism2_wep_data {
};
-static void * prism2_wep_init(int keyidx)
+static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
@@ -253,7 +253,7 @@ static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
}
-static char * prism2_wep_print_stats(char *p, void *priv)
+static char *prism2_wep_print_stats(char *p, void *priv)
{
struct prism2_wep_data *wep = priv;
p += sprintf(p, "key[%d] alg=WEP len=%d\n",
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index e0870c0..434c431 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -268,7 +268,8 @@ static const struct file_operations fops = {
.open = open_debug_level,
.read = seq_read,
.llseek = seq_lseek,
- .write = write_debug_level
+ .write = write_debug_level,
+ .release = single_release,
};
int __init ieee80211_debug_init(void)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index ee7ce5f..59900bf 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -14,7 +14,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
@@ -218,7 +218,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
* this is not mandatory.... but seems that the probe
* response parser uses it
*/
- struct ieee80211_hdr_3addr * hdr = (struct ieee80211_hdr_3addr *)skb->data;
+ struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)skb->data;
rx_stats->len = skb->len;
ieee80211_rx_mgt(ieee,(struct ieee80211_hdr_4addr *)skb->data,rx_stats);
@@ -336,7 +336,7 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
/* Called only as a tasklet (software IRQ), by ieee80211_rx */
static inline int
-ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
+ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_crypt_data *crypt)
{
struct ieee80211_hdr_4addr *hdr;
@@ -385,7 +385,7 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
/* Called only as a tasklet (software IRQ), by ieee80211_rx */
static inline int
-ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *skb,
+ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *skb,
int keyidx, struct ieee80211_crypt_data *crypt)
{
struct ieee80211_hdr_4addr *hdr;
@@ -439,7 +439,7 @@ static int is_duplicate_packet(struct ieee80211_device *ieee,
tid = UP2AC(tid);
tid ++;
} else if(IEEE80211_QOS_HAS_SEQ(fc)) { //QoS
- hdr_3addrqos = (struct ieee80211_hdr_3addrqos*)header;
+ hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)header;
tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID;
tid = UP2AC(tid);
tid ++;
@@ -454,8 +454,7 @@ static int is_duplicate_packet(struct ieee80211_device *ieee,
struct ieee_ibss_seq *entry = NULL;
u8 *mac = header->addr2;
int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE;
- //for (pos = (head)->next; pos != (head); pos = pos->next)
- //__list_for_each(p, &ieee->ibss_mac_hash[index]) {
+
list_for_each(p, &ieee->ibss_mac_hash[index]) {
entry = list_entry(p, struct ieee_ibss_seq, list);
if (!memcmp(entry->mac, mac, ETH_ALEN))
@@ -548,7 +547,7 @@ AddReorderEntry(
return true;
}
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb** prxbIndicateArray,u8 index)
+void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray,u8 index)
{
u8 i = 0 , j=0;
u16 ethertype;
@@ -557,7 +556,7 @@ void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_
for(j = 0; j<index; j++)
{
//added by amy for reorder
- struct ieee80211_rxb* prxb = prxbIndicateArray[j];
+ struct ieee80211_rxb *prxb = prxbIndicateArray[j];
for(i = 0; i<prxb->nr_subframes; i++) {
struct sk_buff *sub_skb = prxb->subframes[i];
@@ -603,13 +602,13 @@ void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_
void RxReorderIndicatePacket( struct ieee80211_device *ieee,
- struct ieee80211_rxb* prxb,
+ struct ieee80211_rxb *prxb,
PRX_TS_RECORD pTS,
u16 SeqNum)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PRX_REORDER_ENTRY pReorderEntry = NULL;
- struct ieee80211_rxb* prxbIndicateArray[REORDER_WIN_SIZE];
+ struct ieee80211_rxb *prxbIndicateArray[REORDER_WIN_SIZE];
u8 WinSize = pHTInfo->RxReorderWinSize;
u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096;
u8 index = 0;
@@ -774,9 +773,9 @@ void RxReorderIndicatePacket( struct ieee80211_device *ieee,
u8 parse_subframe(struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats,
- struct ieee80211_rxb *rxb,u8* src,u8* dst)
+ struct ieee80211_rxb *rxb,u8 *src,u8 *dst)
{
- struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr* )skb->data;
+ struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
u16 LLCOffset= sizeof(struct ieee80211_hdr_3addr);
@@ -831,7 +830,7 @@ u8 parse_subframe(struct sk_buff *skb,
memcpy(rxb->dst,dst,ETH_ALEN);
while(skb->len > ETHERNET_HEADER_SIZE) {
/* Offset 12 denote 2 mac address */
- nSubframe_Length = *((u16*)(skb->data + 12));
+ nSubframe_Length = *((u16 *)(skb->data + 12));
//==m==>change the length order
nSubframe_Length = (nSubframe_Length>>8) + (nSubframe_Length<<8);
@@ -926,7 +925,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
int keyidx = 0;
int i;
- struct ieee80211_rxb* rxb = NULL;
+ struct ieee80211_rxb *rxb = NULL;
// cheat the the hdr type
hdr = (struct ieee80211_hdr_4addr *)skb->data;
stats = &ieee->stats;
@@ -1035,9 +1034,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
//IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): QOS ENABLE AND RECEIVE QOS DATA , we will get Ts, tid:%d\n",__FUNCTION__, tid);
if(GetTs(
ieee,
- (PTS_COMMON_INFO*) &pRxTS,
+ (PTS_COMMON_INFO *) &pRxTS,
hdr->addr2,
- (u8)Frame_QoSTID((u8*)(skb->data)),
+ (u8)Frame_QoSTID((u8 *)(skb->data)),
RX_DIR,
true))
{
@@ -1289,7 +1288,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
{
TID = Frame_QoSTID(skb->data);
SeqNum = WLAN_GET_SEQ_SEQ(sc);
- GetTs(ieee,(PTS_COMMON_INFO*) &pTS,hdr->addr2,TID,RX_DIR,true);
+ GetTs(ieee,(PTS_COMMON_INFO *) &pTS,hdr->addr2,TID,RX_DIR,true);
if(TID !=0 && TID !=3)
{
ieee->bis_any_nonbepkts = true;
@@ -1597,7 +1596,7 @@ static inline void ieee80211_extract_country_ie(
struct ieee80211_device *ieee,
struct ieee80211_info_element *info_element,
struct ieee80211_network *network,
- u8 * addr2
+ u8 *addr2
)
{
if(IS_DOT11D_ENABLE(ieee))
@@ -2275,7 +2274,7 @@ static inline int ieee80211_network_init(
}
static inline int is_same_network(struct ieee80211_network *src,
- struct ieee80211_network *dst, struct ieee80211_device* ieee)
+ struct ieee80211_network *dst, struct ieee80211_device *ieee)
{
/* A network is only a duplicate if the channel, BSSID, ESSID
* and the capability field (in particular IBSS and BSS) all match.
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 454f889..5fd6969 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
@@ -688,7 +688,7 @@ inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *be
}
-static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest)
+static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *dest)
{
u8 *tag;
int beacon_size;
@@ -696,7 +696,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
struct sk_buff *skb = NULL;
int encrypt;
int atim_len,erp_len;
- struct ieee80211_crypt_data* crypt;
+ struct ieee80211_crypt_data *crypt;
char *ssid = ieee->current_network.ssid;
int ssid_len = ieee->current_network.ssid_len;
@@ -705,12 +705,12 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
int wpa_ie_len = ieee->wpa_ie_len;
u8 erpinfo_content = 0;
- u8* tmp_ht_cap_buf;
+ u8 *tmp_ht_cap_buf;
u8 tmp_ht_cap_len=0;
- u8* tmp_ht_info_buf;
+ u8 *tmp_ht_info_buf;
u8 tmp_ht_info_len=0;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- u8* tmp_generic_ie_buf=NULL;
+ u8 *tmp_generic_ie_buf=NULL;
u8 tmp_generic_ie_len=0;
if(rate_ex_len > 0) rate_ex_len+=2;
@@ -732,9 +732,9 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
//HT ralated element
- tmp_ht_cap_buf =(u8*) &(ieee->pHTInfo->SelfHTCap);
+ tmp_ht_cap_buf =(u8 *) &(ieee->pHTInfo->SelfHTCap);
tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
- tmp_ht_info_buf =(u8*) &(ieee->pHTInfo->SelfHTInfo);
+ tmp_ht_info_buf =(u8 *) &(ieee->pHTInfo->SelfHTInfo);
tmp_ht_info_len = sizeof(ieee->pHTInfo->SelfHTInfo);
HTConstructCapabilityElement(ieee, tmp_ht_cap_buf, &tmp_ht_cap_len,encrypt);
HTConstructInfoElement(ieee,tmp_ht_info_buf,&tmp_ht_info_len, encrypt);
@@ -764,7 +764,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
if (!skb)
return NULL;
skb_reserve(skb, ieee->tx_headroom);
- beacon_buf = (struct ieee80211_probe_response*) skb_put(skb, (beacon_size - ieee->tx_headroom));
+ beacon_buf = (struct ieee80211_probe_response *) skb_put(skb, (beacon_size - ieee->tx_headroom));
memcpy (beacon_buf->header.addr1, dest,ETH_ALEN);
memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
@@ -789,7 +789,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
beacon_buf->info_element[0].len = ssid_len;
- tag = (u8*) beacon_buf->info_element[0].data;
+ tag = (u8 *) beacon_buf->info_element[0].data;
memcpy(tag, ssid, ssid_len);
@@ -841,12 +841,12 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
}
-struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
+struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *skb;
- u8* tag;
+ u8 *tag;
- struct ieee80211_crypt_data* crypt;
+ struct ieee80211_crypt_data *crypt;
struct ieee80211_assoc_response_frame *assoc;
short encrypt;
@@ -888,7 +888,7 @@ struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
if (ieee->assoc_id == 0x2007) ieee->assoc_id=0;
else ieee->assoc_id++;
- tag = (u8*) skb_put(skb, rate_len);
+ tag = (u8 *) skb_put(skb, rate_len);
ieee80211_MFIE_Brate(ieee, &tag);
ieee80211_MFIE_Grate(ieee, &tag);
@@ -896,7 +896,7 @@ struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
return skb;
}
-struct sk_buff* ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8 *dest)
+struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8 *dest)
{
struct sk_buff *skb;
struct ieee80211_authentication *auth;
@@ -924,17 +924,17 @@ struct sk_buff* ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8
}
-struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
+struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
{
struct sk_buff *skb;
- struct ieee80211_hdr_3addr* hdr;
+ struct ieee80211_hdr_3addr *hdr;
skb = dev_alloc_skb(sizeof(struct ieee80211_hdr_3addr));
if (!skb)
return NULL;
- hdr = (struct ieee80211_hdr_3addr*)skb_put(skb,sizeof(struct ieee80211_hdr_3addr));
+ hdr = (struct ieee80211_hdr_3addr *)skb_put(skb,sizeof(struct ieee80211_hdr_3addr));
memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN);
memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -950,7 +950,7 @@ struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
}
-void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8* dest)
+void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
{
struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
@@ -959,7 +959,7 @@ void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8* dest)
}
-void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8* dest)
+void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
{
struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
@@ -991,15 +991,15 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
//u8 suit_select = 0;
//unsigned int wpa_len = beacon->wpa_ie_len;
//for HT
- u8* ht_cap_buf = NULL;
+ u8 *ht_cap_buf = NULL;
u8 ht_cap_len=0;
- u8* realtek_ie_buf=NULL;
+ u8 *realtek_ie_buf=NULL;
u8 realtek_ie_len=0;
int wpa_ie_len= ieee->wpa_ie_len;
unsigned int ckip_ie_len=0;
unsigned int ccxrm_ie_len=0;
unsigned int cxvernum_ie_len=0;
- struct ieee80211_crypt_data* crypt;
+ struct ieee80211_crypt_data *crypt;
int encrypt;
unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
@@ -1016,7 +1016,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
//Include High Throuput capability && Realtek proprietary
if(ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT)
{
- ht_cap_buf = (u8*)&(ieee->pHTInfo->SelfHTCap);
+ ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap);
ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len, encrypt);
if(ieee->pHTInfo->bCurrentRT2RTAggregation)
@@ -1314,7 +1314,7 @@ void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int
void ieee80211_associate_step2(struct ieee80211_device *ieee)
{
- struct sk_buff* skb;
+ struct sk_buff *skb;
struct ieee80211_network *beacon = &ieee->current_network;
del_timer_sync(&ieee->associate_timer);
@@ -1536,7 +1536,7 @@ void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
}
-static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
+static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
{
struct ieee80211_authentication *a;
u8 *t;
@@ -1545,7 +1545,7 @@ static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
return 0xcafe;
}
*challenge = NULL;
- a = (struct ieee80211_authentication*) skb->data;
+ a = (struct ieee80211_authentication *) skb->data;
if(skb->len > (sizeof(struct ieee80211_authentication) +3)){
t = skb->data + sizeof(struct ieee80211_authentication);
@@ -1562,7 +1562,7 @@ static inline u16 auth_parse(struct sk_buff *skb, u8** challenge, int *chlen)
}
-int auth_rq_parse(struct sk_buff *skb,u8* dest)
+int auth_rq_parse(struct sk_buff *skb,u8 *dest)
{
struct ieee80211_authentication *a;
@@ -1570,7 +1570,7 @@ int auth_rq_parse(struct sk_buff *skb,u8* dest)
IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n",skb->len);
return -1;
}
- a = (struct ieee80211_authentication*) skb->data;
+ a = (struct ieee80211_authentication *) skb->data;
memcpy(dest,a->header.addr2, ETH_ALEN);
@@ -1595,7 +1595,7 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
memcpy(src,header->addr2, ETH_ALEN);
- skbend = (u8*)skb->data + skb->len;
+ skbend = (u8 *)skb->data + skb->len;
tag = skb->data + sizeof (struct ieee80211_hdr_3addr );
@@ -1618,7 +1618,7 @@ static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
}
-int assoc_rq_parse(struct sk_buff *skb,u8* dest)
+int assoc_rq_parse(struct sk_buff *skb,u8 *dest)
{
struct ieee80211_assoc_request_frame *a;
@@ -1629,7 +1629,7 @@ int assoc_rq_parse(struct sk_buff *skb,u8* dest)
return -1;
}
- a = (struct ieee80211_assoc_request_frame*) skb->data;
+ a = (struct ieee80211_assoc_request_frame *) skb->data;
memcpy(dest,a->header.addr2,ETH_ALEN);
@@ -1646,7 +1646,7 @@ static inline u16 assoc_parse(struct ieee80211_device *ieee, struct sk_buff *skb
return 0xcafe;
}
- response_head = (struct ieee80211_assoc_response_frame*) skb->data;
+ response_head = (struct ieee80211_assoc_response_frame *) skb->data;
*aid = le16_to_cpu(response_head->aid) & 0x3fff;
status_code = le16_to_cpu(response_head->status);
@@ -1888,10 +1888,10 @@ void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
-void ieee80211_process_action(struct ieee80211_device* ieee, struct sk_buff* skb)
+void ieee80211_process_action(struct ieee80211_device *ieee, struct sk_buff *skb)
{
- struct ieee80211_hdr* header = (struct ieee80211_hdr*)skb->data;
- u8* act = ieee80211_get_payload(header);
+ struct ieee80211_hdr *header = (struct ieee80211_hdr *)skb->data;
+ u8 *act = ieee80211_get_payload(header);
u8 tmp = 0;
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
if (act == NULL)
@@ -1926,7 +1926,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
{
struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data;
u16 errcode;
- u8* challenge;
+ u8 *challenge;
int chlen=0;
int aid;
struct ieee80211_assoc_response_frame *assoc_resp;
@@ -1966,7 +1966,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
/* station support qos */
/* Let the register setting defaultly with Legacy station */
if(ieee->qos_support) {
- assoc_resp = (struct ieee80211_assoc_response_frame*)skb->data;
+ assoc_resp = (struct ieee80211_assoc_response_frame *)skb->data;
memset(network, 0, sizeof(*network));
if (ieee80211_parse_info_param(ieee,assoc_resp->info_element,\
rx_stats->len - sizeof(*assoc_resp),\
@@ -1979,7 +1979,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
memcpy(ieee->pHTInfo->PeerHTInfoBuf, network->bssht.bdHTInfoBuf, network->bssht.bdHTInfoLen);
}
if (ieee->handle_assoc_response != NULL)
- ieee->handle_assoc_response(ieee->dev, (struct ieee80211_assoc_response_frame*)header, network);
+ ieee->handle_assoc_response(ieee->dev, (struct ieee80211_assoc_response_frame *)header, network);
}
ieee80211_associate_complete(ieee);
} else {
@@ -3124,7 +3124,7 @@ inline struct sk_buff *ieee80211_disassociate_skb(
void
SendDisassociation(
struct ieee80211_device *ieee,
- u8* asSta,
+ u8 *asSta,
u8 asRsn
)
{
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 60746b8..7b7d929 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index c39e680..a7bcc64f 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -25,7 +25,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
@@ -183,7 +183,7 @@ int ieee80211_encrypt_fragment(
struct sk_buff *frag,
int hdr_len)
{
- struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
+ struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
int res;
if (!(crypt && crypt->ops))
@@ -243,7 +243,7 @@ struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
struct ieee80211_txb *txb;
int i;
txb = kmalloc(
- sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
+ sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
gfp_mask);
if (!txb)
return NULL;
@@ -303,11 +303,11 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
}
#define SN_LESS(a, b) (((a-b)&0x800)!=0)
-void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* skb, cb_desc* tcb_desc)
+void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee, struct sk_buff *skb, cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PTX_TS_RECORD pTxTs = NULL;
- struct ieee80211_hdr_1addr* hdr = (struct ieee80211_hdr_1addr*)skb->data;
+ struct ieee80211_hdr_1addr *hdr = (struct ieee80211_hdr_1addr *)skb->data;
if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
return;
@@ -330,7 +330,7 @@ void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* s
}
if(pHTInfo->bCurrentAMPDUEnable)
{
- if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
+ if (!GetTs(ieee, (PTS_COMMON_INFO *)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
{
printk("===>can't get TS\n");
return;
@@ -356,7 +356,7 @@ void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* s
}
}
FORCED_AGG_SETTING:
- switch(pHTInfo->ForcedAMPDUMode )
+ switch (pHTInfo->ForcedAMPDUMode )
{
case HT_AGG_AUTO:
break;
@@ -377,7 +377,7 @@ FORCED_AGG_SETTING:
return;
}
-extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
+extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
{
tcb_desc->bUseShortPreamble = false;
if (tcb_desc->data_rate == 2)
@@ -412,7 +412,7 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
tcb_desc->bUseShortGI = true;
}
-void ieee80211_query_BandwidthMode(struct ieee80211_device* ieee, cb_desc *tcb_desc)
+void ieee80211_query_BandwidthMode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -432,7 +432,7 @@ void ieee80211_query_BandwidthMode(struct ieee80211_device* ieee, cb_desc *tcb_d
return;
}
-void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_desc, struct sk_buff* skb)
+void ieee80211_query_protectionmode(struct ieee80211_device *ieee, cb_desc *tcb_desc, struct sk_buff *skb)
{
// Common Settings
tcb_desc->bRTSSTBC = false;
@@ -543,7 +543,7 @@ NO_PROTECTION:
}
-void ieee80211_txrate_selectmode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
+void ieee80211_txrate_selectmode(struct ieee80211_device *ieee, cb_desc *tcb_desc)
{
#ifdef TO_DO_LIST
if(!IsDataFrame(pFrame))
@@ -573,14 +573,14 @@ void ieee80211_txrate_selectmode(struct ieee80211_device* ieee, cb_desc* tcb_des
}
}
-void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u8* dst)
+void ieee80211_query_seqnum(struct ieee80211_device *ieee, struct sk_buff *skb, u8 *dst)
{
if (is_multicast_ether_addr(dst))
return;
if (IsQoSDataFrame(skb->data)) //we deal qos data only
{
PTX_TS_RECORD pTS = NULL;
- if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTS), dst, skb->priority, TX_DIR, true))
+ if (!GetTs(ieee, (PTS_COMMON_INFO *)(&pTS), dst, skb->priority, TX_DIR, true))
{
return;
}
@@ -607,7 +607,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
u8 dest[ETH_ALEN], src[ETH_ALEN];
int qos_actived = ieee->current_network.qos_data.active;
- struct ieee80211_crypt_data* crypt;
+ struct ieee80211_crypt_data *crypt;
cb_desc *tcb_desc;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 69735d3..db0db93 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -13,7 +13,7 @@
* u16 Time //indicate time delay.
* output: none
********************************************************************************************************************/
-void ActivateBAEntry(struct ieee80211_device* ieee, PBA_RECORD pBA, u16 Time)
+void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 Time)
{
pBA->bValid = true;
if(Time != 0)
@@ -25,7 +25,7 @@ void ActivateBAEntry(struct ieee80211_device* ieee, PBA_RECORD pBA, u16 Time)
* input: PBA_RECORD pBA //BA entry to be disabled
* output: none
********************************************************************************************************************/
-void DeActivateBAEntry( struct ieee80211_device* ieee, PBA_RECORD pBA)
+void DeActivateBAEntry( struct ieee80211_device *ieee, PBA_RECORD pBA)
{
pBA->bValid = false;
del_timer_sync(&pBA->Timer);
@@ -37,7 +37,7 @@ void DeActivateBAEntry( struct ieee80211_device* ieee, PBA_RECORD pBA)
* output: none
* notice: As PTX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME
********************************************************************************************************************/
-u8 TxTsDeleteBA( struct ieee80211_device* ieee, PTX_TS_RECORD pTxTs)
+u8 TxTsDeleteBA( struct ieee80211_device *ieee, PTX_TS_RECORD pTxTs)
{
PBA_RECORD pAdmittedBa = &pTxTs->TxAdmittedBARecord; //These two BA entries must exist in TS structure
PBA_RECORD pPendingBa = &pTxTs->TxPendingBARecord;
@@ -67,7 +67,7 @@ u8 TxTsDeleteBA( struct ieee80211_device* ieee, PTX_TS_RECORD pTxTs)
* output: none
* notice: As PRX_TS_RECORD structure will be defined in QOS, so wait to be merged. //FIXME, same with above
********************************************************************************************************************/
-u8 RxTsDeleteBA( struct ieee80211_device* ieee, PRX_TS_RECORD pRxTs)
+u8 RxTsDeleteBA( struct ieee80211_device *ieee, PRX_TS_RECORD pRxTs)
{
PBA_RECORD pBa = &pRxTs->RxAdmittedBARecord;
u8 bSendDELBA = false;
@@ -105,11 +105,11 @@ void ResetBaEntry( PBA_RECORD pBA)
* output: none
* return: sk_buff* skb //return constructed skb to xmit
*******************************************************************************************************************************/
-static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, PBA_RECORD pBA, u16 StatusCode, u8 type)
+static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, PBA_RECORD pBA, u16 StatusCode, u8 type)
{
struct sk_buff *skb = NULL;
- struct ieee80211_hdr_3addr* BAReq = NULL;
- u8* tag = NULL;
+ struct ieee80211_hdr_3addr *BAReq = NULL;
+ u8 *tag = NULL;
u16 tmp = 0;
u16 len = ieee->tx_headroom + 9;
//category(1) + action field(1) + Dialog Token(1) + BA Parameter Set(2) + BA Timeout Value(2) + BA Start SeqCtrl(2)(or StatusCode(2))
@@ -139,7 +139,7 @@ static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, P
BAReq->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
//tag += sizeof( struct ieee80211_hdr_3addr); //move to action field
- tag = (u8*)skb_put(skb, 9);
+ tag = (u8 *)skb_put(skb, 9);
*tag ++= ACT_CAT_BA;
*tag ++= type;
// Dialog Token
@@ -150,22 +150,22 @@ static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, P
// Status Code
printk("=====>to send ADDBARSP\n");
tmp = cpu_to_le16(StatusCode);
- memcpy(tag, (u8*)&tmp, 2);
+ memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
}
// BA Parameter Set
tmp = cpu_to_le16(pBA->BaParamSet.shortData);
- memcpy(tag, (u8*)&tmp, 2);
+ memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
// BA Timeout Value
tmp = cpu_to_le16(pBA->BaTimeoutValue);
- memcpy(tag, (u8*)&tmp, 2);
+ memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
if (ACT_ADDBAREQ == type)
{
// BA Start SeqCtrl
- memcpy(tag,(u8*)&(pBA->BaStartSeqCtrl), 2);
+ memcpy(tag,(u8 *)&(pBA->BaStartSeqCtrl), 2);
tag += 2;
}
@@ -184,9 +184,9 @@ static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, P
* output: none
* return: sk_buff* skb //return constructed skb to xmit
********************************************************************************************************************/
-static struct sk_buff* ieee80211_DELBA(
- struct ieee80211_device* ieee,
- u8* dst,
+static struct sk_buff *ieee80211_DELBA(
+ struct ieee80211_device *ieee,
+ u8 *dst,
PBA_RECORD pBA,
TR_SELECT TxRxSelect,
u16 ReasonCode
@@ -194,8 +194,8 @@ static struct sk_buff* ieee80211_DELBA(
{
DELBA_PARAM_SET DelbaParamSet;
struct sk_buff *skb = NULL;
- struct ieee80211_hdr_3addr* Delba = NULL;
- u8* tag = NULL;
+ struct ieee80211_hdr_3addr *Delba = NULL;
+ u8 *tag = NULL;
u16 tmp = 0;
//len = head len + DELBA Parameter Set(2) + Reason Code(2)
u16 len = 6 + ieee->tx_headroom;
@@ -224,18 +224,18 @@ static struct sk_buff* ieee80211_DELBA(
memcpy(Delba->addr3, ieee->current_network.bssid, ETH_ALEN);
Delba->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
- tag = (u8*)skb_put(skb, 6);
+ tag = (u8 *)skb_put(skb, 6);
*tag ++= ACT_CAT_BA;
*tag ++= ACT_DELBA;
// DELBA Parameter Set
tmp = cpu_to_le16(DelbaParamSet.shortData);
- memcpy(tag, (u8*)&tmp, 2);
+ memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
// Reason Code
tmp = cpu_to_le16(ReasonCode);
- memcpy(tag, (u8*)&tmp, 2);
+ memcpy(tag, (u8 *)&tmp, 2);
tag += 2;
IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
@@ -251,7 +251,7 @@ static struct sk_buff* ieee80211_DELBA(
* output: none
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
-void ieee80211_send_ADDBAReq(struct ieee80211_device* ieee, u8* dst, PBA_RECORD pBA)
+void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA)
{
struct sk_buff *skb = NULL;
skb = ieee80211_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ); //construct ACT_ADDBAREQ frames so set statuscode zero.
@@ -278,7 +278,7 @@ void ieee80211_send_ADDBAReq(struct ieee80211_device* ieee, u8* dst, PBA_RECORD
* output: none
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
-void ieee80211_send_ADDBARsp(struct ieee80211_device* ieee, u8* dst, PBA_RECORD pBA, u16 StatusCode)
+void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA, u16 StatusCode)
{
struct sk_buff *skb = NULL;
skb = ieee80211_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP); //construct ACT_ADDBARSP frames
@@ -305,7 +305,7 @@ void ieee80211_send_ADDBARsp(struct ieee80211_device* ieee, u8* dst, PBA_RECORD
* notice: If any possible, please hide pBA in ieee. And temporarily use Manage Queue as softmac_mgmt_xmit() usually does
********************************************************************************************************************/
-void ieee80211_send_DELBA(struct ieee80211_device* ieee, u8* dst, PBA_RECORD pBA, TR_SELECT TxRxSelect, u16 ReasonCode)
+void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst, PBA_RECORD pBA, TR_SELECT TxRxSelect, u16 ReasonCode)
{
struct sk_buff *skb = NULL;
skb = ieee80211_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode); //construct ACT_ADDBARSP frames
@@ -327,14 +327,14 @@ void ieee80211_send_DELBA(struct ieee80211_device* ieee, u8* dst, PBA_RECORD pBA
* return: 0(pass), other(fail)
* notice: As this function need support of QOS, I comment some code out. And when qos is ready, this code need to be support.
********************************************************************************************************************/
-int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
+int ieee80211_rx_ADDBAReq( struct ieee80211_device *ieee, struct sk_buff *skb)
{
- struct ieee80211_hdr_3addr* req = NULL;
+ struct ieee80211_hdr_3addr *req = NULL;
u16 rc = 0;
- u8 * dst = NULL, *pDialogToken = NULL, *tag = NULL;
+ u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
PBA_RECORD pBA = NULL;
PBA_PARAM_SET pBaParamSet = NULL;
- u16* pBaTimeoutVal = NULL;
+ u16 *pBaTimeoutVal = NULL;
PSEQUENCE_CONTROL pBaStartSeqCtrl = NULL;
PRX_TS_RECORD pTS = NULL;
@@ -346,13 +346,13 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
- req = ( struct ieee80211_hdr_3addr*) skb->data;
- tag = (u8*)req;
- dst = (u8*)(&req->addr2[0]);
+ req = ( struct ieee80211_hdr_3addr *) skb->data;
+ tag = (u8 *)req;
+ dst = (u8 *)(&req->addr2[0]);
tag += sizeof( struct ieee80211_hdr_3addr);
pDialogToken = tag + 2; //category+action
pBaParamSet = (PBA_PARAM_SET)(tag + 3); //+DialogToken
- pBaTimeoutVal = (u16*)(tag + 5);
+ pBaTimeoutVal = (u16 *)(tag + 5);
pBaStartSeqCtrl = (PSEQUENCE_CONTROL)(req + 7);
printk("====================>rx ADDBAREQ from :%pM\n", dst);
@@ -369,7 +369,7 @@ int ieee80211_rx_ADDBAReq( struct ieee80211_device* ieee, struct sk_buff *skb)
// If there is no matched TS, reject the ADDBA request.
if( !GetTs(
ieee,
- (PTS_COMMON_INFO*)(&pTS),
+ (PTS_COMMON_INFO *)(&pTS),
dst,
(u8)(pBaParamSet->field.TID),
RX_DIR,
@@ -427,13 +427,13 @@ OnADDBAReq_Fail:
* return: 0(pass), other(fail)
* notice: As this function need support of QOS, I comment some code out. And when qos is ready, this code need to be support.
********************************************************************************************************************/
-int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
+int ieee80211_rx_ADDBARsp( struct ieee80211_device *ieee, struct sk_buff *skb)
{
- struct ieee80211_hdr_3addr* rsp = NULL;
+ struct ieee80211_hdr_3addr *rsp = NULL;
PBA_RECORD pPendingBA, pAdmittedBA;
PTX_TS_RECORD pTS = NULL;
- u8* dst = NULL, *pDialogToken = NULL, *tag = NULL;
- u16* pStatusCode = NULL, *pBaTimeoutVal = NULL;
+ u8 *dst = NULL, *pDialogToken = NULL, *tag = NULL;
+ u16 *pStatusCode = NULL, *pBaTimeoutVal = NULL;
PBA_PARAM_SET pBaParamSet = NULL;
u16 ReasonCode;
@@ -442,14 +442,14 @@ int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
IEEE80211_DEBUG(IEEE80211_DL_ERR, " Invalid skb len in BARSP(%d / %zu)\n", skb->len, (sizeof( struct ieee80211_hdr_3addr) + 9));
return -1;
}
- rsp = ( struct ieee80211_hdr_3addr*)skb->data;
- tag = (u8*)rsp;
- dst = (u8*)(&rsp->addr2[0]);
+ rsp = ( struct ieee80211_hdr_3addr *)skb->data;
+ tag = (u8 *)rsp;
+ dst = (u8 *)(&rsp->addr2[0]);
tag += sizeof( struct ieee80211_hdr_3addr);
pDialogToken = tag + 2;
- pStatusCode = (u16*)(tag + 3);
+ pStatusCode = (u16 *)(tag + 3);
pBaParamSet = (PBA_PARAM_SET)(tag + 5);
- pBaTimeoutVal = (u16*)(tag + 7);
+ pBaTimeoutVal = (u16 *)(tag + 7);
// Check the capability
// Since we can always receive A-MPDU, we just check if it is under HT mode.
@@ -469,7 +469,7 @@ int ieee80211_rx_ADDBARsp( struct ieee80211_device* ieee, struct sk_buff *skb)
//
if (!GetTs(
ieee,
- (PTS_COMMON_INFO*)(&pTS),
+ (PTS_COMMON_INFO *)(&pTS),
dst,
(u8)(pBaParamSet->field.TID),
TX_DIR,
@@ -560,12 +560,12 @@ OnADDBARsp_Reject:
* return: 0(pass), other(fail)
* notice: As this function need support of QOS, I comment some code out. And when qos is ready, this code need to be support.
********************************************************************************************************************/
-int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
+int ieee80211_rx_DELBA(struct ieee80211_device *ieee,struct sk_buff *skb)
{
- struct ieee80211_hdr_3addr* delba = NULL;
+ struct ieee80211_hdr_3addr *delba = NULL;
PDELBA_PARAM_SET pDelBaParamSet = NULL;
- u16* pReasonCode = NULL;
- u8* dst = NULL;
+ u16 *pReasonCode = NULL;
+ u8 *dst = NULL;
if (skb->len < sizeof( struct ieee80211_hdr_3addr) + 6)
{
@@ -581,11 +581,11 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
}
IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
- delba = ( struct ieee80211_hdr_3addr*)skb->data;
- dst = (u8*)(&delba->addr2[0]);
+ delba = ( struct ieee80211_hdr_3addr *)skb->data;
+ dst = (u8 *)(&delba->addr2[0]);
delba += sizeof( struct ieee80211_hdr_3addr);
pDelBaParamSet = (PDELBA_PARAM_SET)(delba+2);
- pReasonCode = (u16*)(delba+4);
+ pReasonCode = (u16 *)(delba+4);
if(pDelBaParamSet->field.Initiator == 1)
{
@@ -593,7 +593,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
if( !GetTs(
ieee,
- (PTS_COMMON_INFO*)&pRxTs,
+ (PTS_COMMON_INFO *)&pRxTs,
dst,
(u8)pDelBaParamSet->field.TID,
RX_DIR,
@@ -611,7 +611,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
if(!GetTs(
ieee,
- (PTS_COMMON_INFO*)&pTxTs,
+ (PTS_COMMON_INFO *)&pTxTs,
dst,
(u8)pDelBaParamSet->field.TID,
TX_DIR,
@@ -636,7 +636,7 @@ int ieee80211_rx_DELBA(struct ieee80211_device* ieee,struct sk_buff *skb)
//
void
TsInitAddBA(
- struct ieee80211_device* ieee,
+ struct ieee80211_device *ieee,
PTX_TS_RECORD pTS,
u8 Policy,
u8 bOverwritePending
@@ -665,7 +665,7 @@ TsInitAddBA(
}
void
-TsInitDelBA( struct ieee80211_device* ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect)
+TsInitDelBA( struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect)
{
if(TxRxSelect == TX_DIR)
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index 268b270..e956da5 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -51,7 +51,7 @@ static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
* return: none
* notice: These value need be modified if any changes.
* *****************************************************************************************************************/
-void HTUpdateDefaultSetting(struct ieee80211_device* ieee)
+void HTUpdateDefaultSetting(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
//const typeof( ((struct ieee80211_device *)0)->pHTInfo ) *__mptr = &pHTInfo;
@@ -121,7 +121,7 @@ void HTUpdateDefaultSetting(struct ieee80211_device* ieee)
* return: none
* notice: Driver should not print out this message by default.
* *****************************************************************************************************************/
-void HTDebugHTCapability(u8* CapIE, u8* TitleString )
+void HTDebugHTCapability(u8 *CapIE, u8 *TitleString )
{
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
@@ -158,7 +158,7 @@ void HTDebugHTCapability(u8* CapIE, u8* TitleString )
* return: none
* notice: Driver should not print out this message by default.
* *****************************************************************************************************************/
-void HTDebugHTInfo(u8* InfoIE, u8* TitleString)
+void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString)
{
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
@@ -177,7 +177,7 @@ void HTDebugHTInfo(u8* InfoIE, u8* TitleString)
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tPrimary channel = %d\n", pHTInfoEle->ControlChl);
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSenondary channel =");
- switch(pHTInfoEle->ExtChlOffset)
+ switch (pHTInfoEle->ExtChlOffset)
{
case 0:
IEEE80211_DEBUG(IEEE80211_DL_HT, "Not Present\n");
@@ -195,7 +195,7 @@ void HTDebugHTInfo(u8* InfoIE, u8* TitleString)
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tRecommended channel width = %s\n", (pHTInfoEle->RecommemdedTxWidth)?"20Mhz": "40Mhz");
IEEE80211_DEBUG(IEEE80211_DL_HT, "\tOperation mode for protection = ");
- switch(pHTInfoEle->OptMode)
+ switch (pHTInfoEle->OptMode)
{
case 0:
IEEE80211_DEBUG(IEEE80211_DL_HT, "No Protection\n");
@@ -219,7 +219,7 @@ void HTDebugHTInfo(u8* InfoIE, u8* TitleString)
/*
* Return: true if station in half n mode and AP supports 40 bw
*/
-bool IsHTHalfNmode40Bandwidth(struct ieee80211_device* ieee)
+bool IsHTHalfNmode40Bandwidth(struct ieee80211_device *ieee)
{
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -238,7 +238,7 @@ bool IsHTHalfNmode40Bandwidth(struct ieee80211_device* ieee)
return retValue;
}
-bool IsHTHalfNmodeSGI(struct ieee80211_device* ieee, bool is40MHz)
+bool IsHTHalfNmodeSGI(struct ieee80211_device *ieee, bool is40MHz)
{
bool retValue = false;
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -265,7 +265,7 @@ bool IsHTHalfNmodeSGI(struct ieee80211_device* ieee, bool is40MHz)
return retValue;
}
-u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate)
+u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate)
{
u8 is40MHz;
@@ -278,7 +278,7 @@ u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate)
}
-u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate)
+u16 HTMcsToDataRate( struct ieee80211_device *ieee, u8 nMcsRate)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -297,7 +297,7 @@ u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate)
* return: tx rate
* notice: quite unsure about how to use this function //wb
* *****************************************************************************************************************/
-u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate)
+u16 TxCountToDataRate( struct ieee80211_device *ieee, u8 nDataRate)
{
//PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u16 CCKOFDMRate[12] = {0x02 , 0x04 , 0x0b , 0x16 , 0x0c , 0x12 , 0x18 , 0x24 , 0x30 , 0x48 , 0x60 , 0x6c};
@@ -344,10 +344,10 @@ u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate)
-bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee)
+bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee)
{
bool retValue = false;
- struct ieee80211_network* net = &ieee->current_network;
+ struct ieee80211_network *net = &ieee->current_network;
if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) ||
(memcmp(net->bssid, PCI_RALINK, 3)==0) ||
@@ -376,10 +376,10 @@ bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee)
* return:
* notice:
* *****************************************************************************************************************/
-void HTIOTPeerDetermine(struct ieee80211_device* ieee)
+void HTIOTPeerDetermine(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
- struct ieee80211_network* net = &ieee->current_network;
+ struct ieee80211_network *net = &ieee->current_network;
if(net->bssht.bdRT2RTAggregation)
pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK;
else if(net->broadcom_cap_exist)
@@ -413,7 +413,7 @@ void HTIOTPeerDetermine(struct ieee80211_device* ieee)
* output: none
* return: return 1 if driver should declare MCS13 only(otherwise return 0)
* *****************************************************************************************************************/
-u8 HTIOTActIsDisableMCS14(struct ieee80211_device* ieee, u8* PeerMacAddr)
+u8 HTIOTActIsDisableMCS14(struct ieee80211_device *ieee, u8 *PeerMacAddr)
{
u8 ret = 0;
return ret;
@@ -432,7 +432,7 @@ u8 HTIOTActIsDisableMCS14(struct ieee80211_device* ieee, u8* PeerMacAddr)
* Return: true if driver should disable MCS15
* 2008.04.15 Emily
*/
-bool HTIOTActIsDisableMCS15(struct ieee80211_device* ieee)
+bool HTIOTActIsDisableMCS15(struct ieee80211_device *ieee)
{
bool retValue = false;
@@ -469,7 +469,7 @@ bool HTIOTActIsDisableMCS15(struct ieee80211_device* ieee)
* Return: true if driver should disable all two spatial stream packet
* 2008.04.21 Emily
*/
-bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device* ieee, u8 *PeerMacAddr)
+bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device *ieee, u8 *PeerMacAddr)
{
bool retValue = false;
@@ -486,7 +486,7 @@ bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device* ieee, u8 *Pee
* output: none
* return: return 1 if driver should disable EDCA turbo mode(otherwise return 0)
* *****************************************************************************************************************/
-u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device* ieee, u8* PeerMacAddr)
+u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device *ieee, u8 *PeerMacAddr)
{
u8 retValue = false; // default enable EDCA Turbo mode.
// Set specific EDCA parameter for different AP in DM handler.
@@ -515,7 +515,7 @@ u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network)
return retValue;
}
-u8 HTIOTActIsCCDFsync(u8* PeerMacAddr)
+u8 HTIOTActIsCCDFsync(u8 *PeerMacAddr)
{
u8 retValue = 0;
if( (memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) ||
@@ -547,7 +547,7 @@ void HTResetIOTSetting(
* return: none
* notice: posHTCap can't be null and should be initialized before.
* *****************************************************************************************************************/
-void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u8* len, u8 IsEncrypt)
+void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u8 *len, u8 IsEncrypt)
{
PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
PHT_CAPABILITY_ELE pCapELE = NULL;
@@ -666,7 +666,7 @@ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u
* return: none
* notice: posHTCap can't be null and be initialized before. only AP and IBSS sta should do this
* *****************************************************************************************************************/
-void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* len, u8 IsEncrypt)
+void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *len, u8 IsEncrypt)
{
PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo;
PHT_INFORMATION_ELE pHTInfoEle = (PHT_INFORMATION_ELE)posHTInfo;
@@ -738,7 +738,7 @@ void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* le
* return: none
* notice:
* *****************************************************************************************************************/
-void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg, u8* len)
+void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg, u8 *len)
{
if (posRT2RTAgg == NULL) {
IEEE80211_DEBUG(IEEE80211_DL_ERR, "posRT2RTAgg can't be null in HTConstructRT2RTAggElement()\n");
@@ -792,7 +792,7 @@ void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg,
* return: always we return true
* notice:
* *****************************************************************************************************************/
-u8 HT_PickMCSRate(struct ieee80211_device* ieee, u8* pOperateMCS)
+u8 HT_PickMCSRate(struct ieee80211_device *ieee, u8 *pOperateMCS)
{
u8 i;
if (pOperateMCS == NULL)
@@ -801,7 +801,7 @@ u8 HT_PickMCSRate(struct ieee80211_device* ieee, u8* pOperateMCS)
return false;
}
- switch(ieee->mode)
+ switch (ieee->mode)
{
case IEEE_A:
case IEEE_B:
@@ -855,7 +855,7 @@ u8 HT_PickMCSRate(struct ieee80211_device* ieee, u8* pOperateMCS)
* return: Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter
* notice:
* *****************************************************************************************************************/
-u8 HTGetHighestMCSRate(struct ieee80211_device* ieee, u8* pMCSRateSet, u8* pMCSFilter)
+u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter)
{
u8 i, j;
u8 bitMap;
@@ -907,7 +907,7 @@ u8 HTGetHighestMCSRate(struct ieee80211_device* ieee, u8* pMCSRateSet, u8* pMCSF
**
** \pHTSupportedCap: the connected STA's supported rate Capability element
*/
-u8 HTFilterMCSRate( struct ieee80211_device* ieee, u8* pSupportMCS, u8* pOperateMCS)
+u8 HTFilterMCSRate( struct ieee80211_device *ieee, u8 *pSupportMCS, u8 *pOperateMCS)
{
u8 i=0;
@@ -937,14 +937,14 @@ u8 HTFilterMCSRate( struct ieee80211_device* ieee, u8* pSupportMCS, u8* pOperate
return true;
}
-void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
+void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
void HTOnAssocRsp(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PHT_CAPABILITY_ELE pPeerHTCap = NULL;
PHT_INFORMATION_ELE pPeerHTInfo = NULL;
u16 nMaxAMSDUSize = 0;
- u8* pMcsFilter = NULL;
+ u8 *pMcsFilter = NULL;
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily
@@ -1115,7 +1115,7 @@ void HTOnAssocRsp(struct ieee80211_device *ieee)
}
-void HTSetConnectBwModeCallback(struct ieee80211_device* ieee);
+void HTSetConnectBwModeCallback(struct ieee80211_device *ieee);
/********************************************************************************************************************
*function: initialize HT info(struct PRT_HIGH_THROUGHPUT)
* input: struct ieee80211_device* ieee
@@ -1124,7 +1124,7 @@ void HTSetConnectBwModeCallback(struct ieee80211_device* ieee);
* notice: This function is called when * (1) MPInitialization Phase * (2) Receiving of Deauthentication from AP
********************************************************************************************************************/
// TODO: Should this funciton be called when receiving of Disassociation?
-void HTInitializeHTInfo(struct ieee80211_device* ieee)
+void HTInitializeHTInfo(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -1160,10 +1160,10 @@ void HTInitializeHTInfo(struct ieee80211_device* ieee)
// Initialize all of the parameters related to 11n
- memset((void*)(&(pHTInfo->SelfHTCap)), 0, sizeof(pHTInfo->SelfHTCap));
- memset((void*)(&(pHTInfo->SelfHTInfo)), 0, sizeof(pHTInfo->SelfHTInfo));
- memset((void*)(&(pHTInfo->PeerHTCapBuf)), 0, sizeof(pHTInfo->PeerHTCapBuf));
- memset((void*)(&(pHTInfo->PeerHTInfoBuf)), 0, sizeof(pHTInfo->PeerHTInfoBuf));
+ memset((void *)(&(pHTInfo->SelfHTCap)), 0, sizeof(pHTInfo->SelfHTCap));
+ memset((void *)(&(pHTInfo->SelfHTInfo)), 0, sizeof(pHTInfo->SelfHTInfo));
+ memset((void *)(&(pHTInfo->PeerHTCapBuf)), 0, sizeof(pHTInfo->PeerHTCapBuf));
+ memset((void *)(&(pHTInfo->PeerHTInfoBuf)), 0, sizeof(pHTInfo->PeerHTInfoBuf));
pHTInfo->bSwBwInProgress = false;
pHTInfo->ChnlOp = CHNLOP_NONE;
@@ -1179,7 +1179,7 @@ void HTInitializeHTInfo(struct ieee80211_device* ieee)
//MCS rate initialized here
{
- u8* RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]);
+ u8 *RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]);
RegHTSuppRateSets[0] = 0xFF; //support MCS 0~7
RegHTSuppRateSets[1] = 0xFF; //support MCS 8~15
RegHTSuppRateSets[4] = 0x01; //support MCS 32
@@ -1214,7 +1214,7 @@ void HTInitializeBssDesc(PBSS_HT pBssHT)
* return: none
* notice: This function should ONLY be called before association
********************************************************************************************************************/
-void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
+void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u16 nMaxAMSDUSize;
@@ -1297,7 +1297,7 @@ void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee802
}
-void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork)
+void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf;
@@ -1317,7 +1317,7 @@ void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_
}
}
-void HTUseDefaultSetting(struct ieee80211_device* ieee)
+void HTUseDefaultSetting(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u8 regBwOpMode;
@@ -1370,7 +1370,7 @@ void HTUseDefaultSetting(struct ieee80211_device* ieee)
* return: return true if HT control field exists(false otherwise)
* notice:
********************************************************************************************************************/
-u8 HTCCheck(struct ieee80211_device* ieee, u8* pFrame)
+u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame)
{
if(ieee->pHTInfo->bCurrentHTSupport)
{
@@ -1386,7 +1386,7 @@ u8 HTCCheck(struct ieee80211_device* ieee, u8* pFrame)
//
// This function set bandwidth mode in protocol layer.
//
-void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset)
+void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
// u32 flags = 0;
@@ -1435,7 +1435,7 @@ void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidt
// spin_unlock_irqrestore(&(ieee->bw_spinlock), flags);
}
-void HTSetConnectBwModeCallback(struct ieee80211_device* ieee)
+void HTSetConnectBwModeCallback(struct ieee80211_device *ieee)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
index 2348ccd..f2d52ca 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
@@ -483,7 +483,7 @@ typedef struct _OCTET_STRING{
typedef struct _STA_QOS{
//DECLARE_RT_OBJECT(STA_QOS);
u8 WMMIEBuf[MAX_WMMELE_LENGTH];
- u8* WMMIE;
+ u8 *WMMIE;
// Part 1. Self QoS Mode.
QOS_MODE QosCapability; //QoS Capability, 2006-06-14 Isaiah
@@ -498,7 +498,7 @@ typedef struct _STA_QOS{
int NumBcnBeforeTrigger;
// Part 2. EDCA Parameter (perAC)
- u8 * pWMMInfoEle;
+ u8 *pWMMInfoEle;
u8 WMMParamEle[WMM_PARAM_ELEMENT_SIZE];
u8 WMMPELength;
@@ -537,12 +537,12 @@ typedef struct _BSS_QOS{
QOS_MODE bdQoSMode;
u8 bdWMMIEBuf[MAX_WMMELE_LENGTH];
- u8* bdWMMIE;
+ u8 *bdWMMIE;
QOS_ELE_SUBTYPE EleSubType;
- u8 * pWMMInfoEle;
- u8 * pWMMParamEle;
+ u8 *pWMMInfoEle;
+ u8 *pWMMParamEle;
QOS_INFO_FIELD QosInfoField;
AC_PARAM AcParameter[4];
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 0310d07..3058120 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -234,12 +234,12 @@ void AdmitTS(struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, u32 I
}
-PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8* Addr, u8 TID, TR_SELECT TxRxSelect)
+PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8 *Addr, u8 TID, TR_SELECT TxRxSelect)
{
//DIRECTION_VALUE dir;
u8 dir;
bool search_dir[4] = {0, 0, 0, 0};
- struct list_head* psearch_list; //FIXME
+ struct list_head *psearch_list; //FIXME
PTS_COMMON_INFO pRet = NULL;
if(ieee->iw_mode == IW_MODE_MASTER) //ap mode
{
@@ -311,7 +311,7 @@ PTS_COMMON_INFO SearchAdmitTRStream(struct ieee80211_device *ieee, u8* Addr, u8
void MakeTSEntry(
PTS_COMMON_INFO pTsCommonInfo,
- u8* Addr,
+ u8 *Addr,
PTSPEC_BODY pTSPEC,
PQOS_TCLAS pTCLAS,
u8 TCLAS_Num,
@@ -326,10 +326,10 @@ void MakeTSEntry(
memcpy(pTsCommonInfo->Addr, Addr, 6);
if(pTSPEC != NULL)
- memcpy((u8*)(&(pTsCommonInfo->TSpec)), (u8*)pTSPEC, sizeof(TSPEC_BODY));
+ memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC, sizeof(TSPEC_BODY));
for(count = 0; count < TCLAS_Num; count++)
- memcpy((u8*)(&(pTsCommonInfo->TClass[count])), (u8*)pTCLAS, sizeof(QOS_TCLAS));
+ memcpy((u8 *)(&(pTsCommonInfo->TClass[count])), (u8 *)pTCLAS, sizeof(QOS_TCLAS));
pTsCommonInfo->TClasProc = TCLAS_Proc;
pTsCommonInfo->TClasNum = TCLAS_Num;
@@ -337,9 +337,9 @@ void MakeTSEntry(
bool GetTs(
- struct ieee80211_device* ieee,
+ struct ieee80211_device *ieee,
PTS_COMMON_INFO *ppTS,
- u8* Addr,
+ u8 *Addr,
u8 TID,
TR_SELECT TxRxSelect, //Rx:1, Tx:0
bool bAddNewTs
@@ -367,7 +367,7 @@ bool GetTs(
return false;
}
- switch(TID)
+ switch (TID)
{
case 0:
case 3:
@@ -416,12 +416,12 @@ bool GetTs(
//
TSPEC_BODY TSpec;
PQOS_TSINFO pTSInfo = &TSpec.f.TSInfo;
- struct list_head* pUnusedList =
+ struct list_head *pUnusedList =
(TxRxSelect == TX_DIR)?
(&ieee->Tx_TS_Unused_List):
(&ieee->Rx_TS_Unused_List);
- struct list_head* pAddmitList =
+ struct list_head *pAddmitList =
(TxRxSelect == TX_DIR)?
(&ieee->Tx_TS_Admit_List):
(&ieee->Rx_TS_Admit_List);
@@ -473,7 +473,7 @@ bool GetTs(
}
void RemoveTsEntry(
- struct ieee80211_device* ieee,
+ struct ieee80211_device *ieee,
PTS_COMMON_INFO pTs,
TR_SELECT TxRxSelect
)
@@ -501,7 +501,7 @@ void RemoveTsEntry(
list_del_init(&pRxReorderEntry->List);
{
int i = 0;
- struct ieee80211_rxb * prxb = pRxReorderEntry->prxb;
+ struct ieee80211_rxb *prxb = pRxReorderEntry->prxb;
if (unlikely(!prxb))
{
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
@@ -527,7 +527,7 @@ void RemoveTsEntry(
}
}
-void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr)
+void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr)
{
PTS_COMMON_INFO pTS, pTmpTS;
@@ -574,7 +574,7 @@ void RemovePeerTS(struct ieee80211_device* ieee, u8* Addr)
}
}
-void RemoveAllTS(struct ieee80211_device* ieee)
+void RemoveAllTS(struct ieee80211_device *ieee)
{
PTS_COMMON_INFO pTS, pTmpTS;
@@ -607,7 +607,7 @@ void RemoveAllTS(struct ieee80211_device* ieee)
}
}
-void TsStartAddBaProcess(struct ieee80211_device* ieee, PTX_TS_RECORD pTxTS)
+void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS)
{
if(pTxTS->bAddBaReqInProgress == false)
{
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index 7e49ad8..c61729b 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -3,7 +3,7 @@
memory is addressed by 16 bits words.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the
@@ -22,13 +22,15 @@
void eprom_cs(struct net_device *dev, short bit)
{
- if(bit)
- write_nic_byte_E(dev, EPROM_CMD,
- (1<<EPROM_CS_SHIFT) | \
- read_nic_byte_E(dev, EPROM_CMD)); //enable EPROM
+ u8 cmdreg;
+
+ read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ if (bit)
+ /* enable EPROM */
+ write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CS_BIT);
else
- write_nic_byte_E(dev, EPROM_CMD, read_nic_byte_E(dev, EPROM_CMD)\
- &~(1<<EPROM_CS_SHIFT)); //disable EPROM
+ /* disable EPROM */
+ write_nic_byte_E(dev, EPROM_CMD, cmdreg & ~EPROM_CS_BIT);
force_pci_posting(dev);
udelay(EPROM_DELAY);
@@ -37,12 +39,15 @@ void eprom_cs(struct net_device *dev, short bit)
void eprom_ck_cycle(struct net_device *dev)
{
- write_nic_byte_E(dev, EPROM_CMD,
- (1<<EPROM_CK_SHIFT) | read_nic_byte_E(dev,EPROM_CMD));
+ u8 cmdreg;
+
+ read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CK_BIT);
force_pci_posting(dev);
udelay(EPROM_DELAY);
- write_nic_byte_E(dev, EPROM_CMD,
- read_nic_byte_E(dev, EPROM_CMD) &~ (1<<EPROM_CK_SHIFT));
+
+ read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ write_nic_byte_E(dev, EPROM_CMD, cmdreg & ~EPROM_CK_BIT);
force_pci_posting(dev);
udelay(EPROM_DELAY);
}
@@ -50,12 +55,13 @@ void eprom_ck_cycle(struct net_device *dev)
void eprom_w(struct net_device *dev,short bit)
{
- if(bit)
- write_nic_byte_E(dev, EPROM_CMD, (1<<EPROM_W_SHIFT) | \
- read_nic_byte_E(dev,EPROM_CMD));
+ u8 cmdreg;
+
+ read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ if (bit)
+ write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_W_BIT);
else
- write_nic_byte_E(dev, EPROM_CMD, read_nic_byte_E(dev,EPROM_CMD)\
- &~(1<<EPROM_W_SHIFT));
+ write_nic_byte_E(dev, EPROM_CMD, cmdreg & ~EPROM_W_BIT);
force_pci_posting(dev);
udelay(EPROM_DELAY);
@@ -64,12 +70,14 @@ void eprom_w(struct net_device *dev,short bit)
short eprom_r(struct net_device *dev)
{
- short bit;
+ u8 bit;
- bit=(read_nic_byte_E(dev, EPROM_CMD) & (1<<EPROM_R_SHIFT) );
+ read_nic_byte_E(dev, EPROM_CMD, &bit);
udelay(EPROM_DELAY);
- if(bit) return 1;
+ if (bit & EPROM_R_BIT)
+ return 1;
+
return 0;
}
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.h b/drivers/staging/rtl8192u/r8180_93cx6.h
index 5cea51e..ee55dbf 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.h
+++ b/drivers/staging/rtl8192u/r8180_93cx6.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8187 OpenSource driver
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the official realtek driver
diff --git a/drivers/staging/rtl8192u/r8180_pm.c b/drivers/staging/rtl8192u/r8180_pm.c
index 0c58d0e..999968d 100644
--- a/drivers/staging/rtl8192u/r8180_pm.c
+++ b/drivers/staging/rtl8192u/r8180_pm.c
@@ -5,7 +5,7 @@
does not do anything useful.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
*/
diff --git a/drivers/staging/rtl8192u/r8180_pm.h b/drivers/staging/rtl8192u/r8180_pm.h
index 52d6fba..4be63da 100644
--- a/drivers/staging/rtl8192u/r8180_pm.h
+++ b/drivers/staging/rtl8192u/r8180_pm.h
@@ -5,7 +5,7 @@
does not do anything useful.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
*/
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index cf9713f..40b14a2 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -23,7 +23,7 @@
* Return: NONE
* Note: 8226 support both 20M and 40 MHz
*---------------------------------------------------------------------------*/
-void PHY_SetRF8256Bandwidth(struct net_device* dev , HT_CHANNEL_WIDTH Bandwidth) //20M or 40M
+void PHY_SetRF8256Bandwidth(struct net_device *dev , HT_CHANNEL_WIDTH Bandwidth) //20M or 40M
{
u8 eRFPath;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -34,7 +34,7 @@ void PHY_SetRF8256Bandwidth(struct net_device* dev , HT_CHANNEL_WIDTH Bandwidth)
if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
continue;
- switch(Bandwidth)
+ switch (Bandwidth)
{
case HT_CHANNEL_WIDTH_20:
if(priv->card_8192_version == VERSION_819xU_A || priv->card_8192_version == VERSION_819xU_B)// 8256 D-cut, E-cut, xiong: consider it later!
@@ -73,7 +73,7 @@ void PHY_SetRF8256Bandwidth(struct net_device* dev , HT_CHANNEL_WIDTH Bandwidth)
break;
default:
- RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown Bandwidth: %#X\n",Bandwidth );
+ RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown Bandwidth: %#X\n",Bandwidth);
break;
}
@@ -86,7 +86,7 @@ void PHY_SetRF8256Bandwidth(struct net_device* dev , HT_CHANNEL_WIDTH Bandwidth)
* Output: NONE
* Return: NONE
*---------------------------------------------------------------------------*/
-void PHY_RF8256_Config(struct net_device* dev)
+void PHY_RF8256_Config(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
// Initialize general global value
@@ -104,7 +104,7 @@ void PHY_RF8256_Config(struct net_device* dev)
* Output: NONE
* Return: NONE
*---------------------------------------------------------------------------*/
-void phy_RF8256_Config_ParaFile(struct net_device* dev)
+void phy_RF8256_Config_ParaFile(struct net_device *dev)
{
u32 u4RegValue = 0;
//static s1Byte szRadioAFile[] = RTL819X_PHY_RADIO_A;
@@ -133,7 +133,7 @@ void phy_RF8256_Config_ParaFile(struct net_device* dev)
// pHalData->RfReg0Value[eRFPath] = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, rGlobalCtrl, bMaskDWord);
/*----Store original RFENV control type----*/
- switch(eRFPath)
+ switch (eRFPath)
{
case RF90_PATH_A:
case RF90_PATH_C:
@@ -168,7 +168,7 @@ void phy_RF8256_Config_ParaFile(struct net_device* dev)
RetryTimes = ConstRetryTimes;
RF3_Final_Value = 0;
/*----Initialize RF fom connfiguration file----*/
- switch(eRFPath)
+ switch (eRFPath)
{
case RF90_PATH_A:
while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0)
@@ -209,7 +209,7 @@ void phy_RF8256_Config_ParaFile(struct net_device* dev)
}
/*----Restore RFENV control type----*/;
- switch(eRFPath)
+ switch (eRFPath)
{
case RF90_PATH_A:
case RF90_PATH_C:
@@ -237,14 +237,14 @@ phy_RF8256_Config_ParaFile_Fail:
}
-void PHY_SetRF8256CCKTxPower(struct net_device* dev, u8 powerlevel)
+void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel)
{
u32 TxAGC=0;
struct r8192_priv *priv = ieee80211_priv(dev);
//modified by vivi, 20080109
TxAGC = powerlevel;
- if(priv->bDynamicTxLowPower == TRUE ) //cosa 05/22/2008 for scan
+ if(priv->bDynamicTxLowPower == TRUE) //cosa 05/22/2008 for scan
{
if(priv->CustomerID == RT_CID_819x_Netcore)
TxAGC = 0x22;
@@ -258,7 +258,7 @@ void PHY_SetRF8256CCKTxPower(struct net_device* dev, u8 powerlevel)
}
-void PHY_SetRF8256OFDMTxPower(struct net_device* dev, u8 powerlevel)
+void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
//Joseph TxPower for 8192 testing
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.h b/drivers/staging/rtl8192u/r8190_rtl8256.h
index 5c1f650..592e780 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.h
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.h
@@ -1,7 +1,7 @@
/*
This is part of the rtl8180-sa2400 driver
released under the GPL (See file COPYING for details).
- Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
This files contains programming code for the rtl8256
radio frontend.
@@ -18,10 +18,10 @@
#else
#define RTL819X_TOTAL_RF_PATH 2 //for 8192U
#endif
-extern void PHY_SetRF8256Bandwidth(struct net_device* dev , HT_CHANNEL_WIDTH Bandwidth);
-extern void PHY_RF8256_Config(struct net_device* dev);
-extern void phy_RF8256_Config_ParaFile(struct net_device* dev);
-extern void PHY_SetRF8256CCKTxPower(struct net_device* dev, u8 powerlevel);
-extern void PHY_SetRF8256OFDMTxPower(struct net_device* dev, u8 powerlevel);
+extern void PHY_SetRF8256Bandwidth(struct net_device *dev , HT_CHANNEL_WIDTH Bandwidth);
+extern void PHY_RF8256_Config(struct net_device *dev);
+extern void phy_RF8256_Config_ParaFile(struct net_device *dev);
+extern void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8 powerlevel);
+extern void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel);
#endif
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index bedeb33..b484ee1 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1,40 +1,38 @@
/*
- This is part of rtl8187 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part of the
- official realtek driver
-
- Parts of this driver are based on the rtl8192 driver skeleton
- from Patric Schenke & Andres Salomon
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
-
- We want to thank the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
+ * This is part of rtl8187 OpenSource driver.
+ * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
+ * Released under the terms of GPL (General Public Licence)
+ *
+ * Parts of this driver are based on the GPL part of the
+ * official realtek driver
+ *
+ * Parts of this driver are based on the rtl8192 driver skeleton
+ * from Patric Schenke & Andres Salomon
+ *
+ * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
+ *
+ * We want to thank the Authors of those projects and the Ndiswrapper
+ * project Authors.
+ */
#ifndef R819xU_H
#define R819xU_H
#include <linux/module.h>
#include <linux/kernel.h>
-//#include <linux/config.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
-//#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
-#include <linux/rtnetlink.h> //for rtnl_lock()
+#include <linux/rtnetlink.h>
#include <linux/wireless.h>
#include <linux/timer.h>
-#include <linux/proc_fs.h> // Necessary because we use the proc fs
+#include <linux/proc_fs.h>
#include <linux/if_arp.h>
#include <linux/random.h>
#include <asm/io.h>
@@ -42,7 +40,7 @@
#define RTL8192U
#define RTL819xU_MODULE_NAME "rtl819xU"
-//added for HW security, john.0629
+/* HW security */
#define FALSE 0
#define TRUE 1
#define MAX_KEY_LEN 61
@@ -81,90 +79,91 @@
#define BIT30 0x40000000
#define BIT31 0x80000000
-// Rx smooth factor
#define Rx_Smooth_Factor 20
-#define DMESG(x,a...)
-#define DMESGW(x,a...)
-#define DMESGE(x,a...)
+#define DMESG(x, a...)
+#define DMESGW(x, a...)
+#define DMESGE(x, a...)
extern u32 rt_global_debug_component;
#define RT_TRACE(component, x, args...) \
-do { if(rt_global_debug_component & component) \
- printk(KERN_DEBUG RTL819xU_MODULE_NAME ":" x "\n" , \
- ##args);\
-}while(0);
-
-#define COMP_TRACE BIT0 // For function call tracing.
-#define COMP_DBG BIT1 // Only for temporary debug message.
-#define COMP_INIT BIT2 // during driver initialization / halt / reset.
-
-
-#define COMP_RECV BIT3 // Receive data path.
-#define COMP_SEND BIT4 // Send part path.
-#define COMP_IO BIT5 // I/O Related. Added by Annie, 2006-03-02.
-#define COMP_POWER BIT6 // 802.11 Power Save mode or System/Device Power state related.
-#define COMP_EPROM BIT7 // 802.11 link related: join/start BSS, leave BSS.
-#define COMP_SWBW BIT8 // For bandwidth switch.
-#define COMP_POWER_TRACKING BIT9 //FOR 8190 TX POWER TRACKING
-#define COMP_TURBO BIT10 // For Turbo Mode related. By Annie, 2005-10-21.
-#define COMP_QOS BIT11 // For QoS.
-#define COMP_RATE BIT12 // For Rate Adaptive mechanism, 2006.07.02, by rcnjko.
-#define COMP_RM BIT13 // For Radio Measurement.
-#define COMP_DIG BIT14 // For DIG, 2006.09.25, by rcnjko.
-#define COMP_PHY BIT15
-#define COMP_CH BIT16 //channel setting debug
-#define COMP_TXAGC BIT17 // For Tx power, 060928, by rcnjko.
-#define COMP_HIPWR BIT18 // For High Power Mechanism, 060928, by rcnjko.
-#define COMP_HALDM BIT19 // For HW Dynamic Mechanism, 061010, by rcnjko.
-#define COMP_SEC BIT20 // Event handling
-#define COMP_LED BIT21 // For LED.
-#define COMP_RF BIT22 // For RF.
-//1!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#define COMP_RXDESC BIT23 // Show Rx desc information for SD3 debug. Added by Annie, 2006-07-15.
-//1//1Attention Please!!!<11n or 8190 specific code should be put below this line>
-//1!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-#define COMP_FIRMWARE BIT24 //for firmware downloading
-#define COMP_HT BIT25 // For 802.11n HT related information. by Emily 2006-8-11
-#define COMP_AMSDU BIT26 // For A-MSDU Debugging
-
-#define COMP_SCAN BIT27
-//#define COMP_RESET BIT28
-#define COMP_DOWN BIT29 //for rm driver module
-#define COMP_RESET BIT30 //for silent reset
-#define COMP_ERR BIT31 //for error out, always on
+ do { \
+ if (rt_global_debug_component & component) \
+ pr_debug("RTL8192U: " x "\n", ##args); \
+ } while (0)
+
+#define COMP_TRACE BIT0 /* Function call tracing. */
+#define COMP_DBG BIT1
+#define COMP_INIT BIT2 /* Driver initialization/halt/reset. */
+
+
+#define COMP_RECV BIT3 /* Receive data path. */
+#define COMP_SEND BIT4 /* Send data path. */
+#define COMP_IO BIT5
+/* 802.11 Power Save mode or System/Device Power state. */
+#define COMP_POWER BIT6
+/* 802.11 link related: join/start BSS, leave BSS. */
+#define COMP_EPROM BIT7
+#define COMP_SWBW BIT8 /* Bandwidth switch. */
+#define COMP_POWER_TRACKING BIT9 /* 8190 TX Power Tracking */
+#define COMP_TURBO BIT10 /* Turbo Mode */
+#define COMP_QOS BIT11
+#define COMP_RATE BIT12 /* Rate Adaptive mechanism */
+#define COMP_RM BIT13 /* Radio Measurement */
+#define COMP_DIG BIT14
+#define COMP_PHY BIT15
+#define COMP_CH BIT16 /* Channel setting debug */
+#define COMP_TXAGC BIT17 /* Tx power */
+#define COMP_HIPWR BIT18 /* High Power Mechanism */
+#define COMP_HALDM BIT19 /* HW Dynamic Mechanism */
+#define COMP_SEC BIT20 /* Event handling */
+#define COMP_LED BIT21
+#define COMP_RF BIT22
+#define COMP_RXDESC BIT23 /* Rx desc information for SD3 debug */
+
+/* 11n or 8190 specific code */
+
+#define COMP_FIRMWARE BIT24 /* Firmware downloading */
+#define COMP_HT BIT25 /* 802.11n HT related information */
+#define COMP_AMSDU BIT26 /* A-MSDU Debugging */
+#define COMP_SCAN BIT27
+#define COMP_DOWN BIT29 /* rm driver module */
+#define COMP_RESET BIT30 /* Silent reset */
+#define COMP_ERR BIT31 /* Error out, always on */
#define RTL819x_DEBUG
#ifdef RTL819x_DEBUG
-#define assert(expr) \
- if (!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__FUNCTION__,__LINE__); \
- }
-//wb added to debug out data buf
-//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
-#define RT_DEBUG_DATA(level, data, datalen) \
- do{ if ((rt_global_debug_component & (level)) == (level)) \
- { \
- int i; \
- u8* pdata = (u8*) data; \
- printk(KERN_DEBUG RTL819xU_MODULE_NAME ": %s()\n", __FUNCTION__); \
- for(i=0; i<(int)(datalen); i++) \
- { \
+#define RTL8192U_ASSERT(expr) \
+ do { \
+ if (!(expr)) { \
+ pr_debug("Assertion failed! %s, %s, %s, line = %d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ } \
+ } while (0)
+/*
+ * Debug out data buf.
+ * If you want to print DATA buffer related BA,
+ * please set ieee80211_debug_level to DATA|BA
+ */
+#define RT_DEBUG_DATA(level, data, datalen) \
+ do { \
+ if ((rt_global_debug_component & (level)) == (level)) { \
+ int i; \
+ u8 *pdata = (u8 *) data; \
+ pr_debug("RTL8192U: %s()\n", __func__); \
+ for (i = 0; i < (int)(datalen); i++) { \
printk("%2x ", pdata[i]); \
- if ((i+1)%16 == 0) printk("\n"); \
- } \
- printk("\n"); \
- } \
+ if ((i+1)%16 == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+ } \
} while (0)
#else
-#define assert(expr) do {} while (0)
-#define RT_DEBUG_DATA(level, data, datalen) do {} while(0)
+#define RTL8192U_ASSERT(expr) do {} while (0)
+#define RT_DEBUG_DATA(level, data, datalen) do {} while (0)
#endif /* RTL8169_DEBUG */
-//
-// Queue Select Value in TxDesc
-//
+/* Queue Select Value in TxDesc */
#define QSLT_BK 0x1
#define QSLT_BE 0x0
#define QSLT_VI 0x4
@@ -208,13 +207,13 @@ do { if(rt_global_debug_component & component) \
#define IEEE80211_WATCH_DOG_TIME 2000
#define PHY_Beacon_RSSI_SLID_WIN_MAX 10
-//for txpowertracking by amy
+/* For Tx Power Tracking */
#define OFDM_Table_Length 19
#define CCK_Table_length 12
-/* for rtl819x */
+/* For rtl819x */
typedef struct _tx_desc_819x_usb {
- //DWORD 0
+ /* DWORD 0 */
u16 PktSize;
u8 Offset;
u8 Reserved0:3;
@@ -224,7 +223,7 @@ typedef struct _tx_desc_819x_usb {
u8 LINIP:1;
u8 OWN:1;
- //DWORD 1
+ /* DWORD 1 */
u8 TxFWInfoSize;
u8 RATid:3;
u8 DISFB:1;
@@ -239,27 +238,26 @@ typedef struct _tx_desc_819x_usb {
u8 SecDescAssign:1;
u8 SecType:2;
- //DWORD 2
+ /* DWORD 2 */
u16 TxBufferSize;
- //u16 Reserved2;
u8 ResvForPaddingLen:7;
u8 Reserved3:1;
u8 Reserved4;
- //DWORD 3, 4, 5
+ /* DWORD 3, 4, 5 */
u32 Reserved5;
u32 Reserved6;
u32 Reserved7;
-}tx_desc_819x_usb, *ptx_desc_819x_usb;
+} tx_desc_819x_usb, *ptx_desc_819x_usb;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
typedef struct _tx_desc_819x_usb_aggr_subframe {
- //DWORD 0
+ /* DWORD 0 */
u16 PktSize;
u8 Offset;
u8 TxFWInfoSize;
- //DWORD 1
+ /* DWORD 1 */
u8 RATid:3;
u8 DISFB:1;
u8 USERATE:1;
@@ -274,13 +272,13 @@ typedef struct _tx_desc_819x_usb_aggr_subframe {
u8 SecType:2;
u8 PacketID:7;
u8 OWN:1;
-}tx_desc_819x_usb_aggr_subframe, *ptx_desc_819x_usb_aggr_subframe;
+} tx_desc_819x_usb_aggr_subframe, *ptx_desc_819x_usb_aggr_subframe;
#endif
typedef struct _tx_desc_cmd_819x_usb {
- //DWORD 0
+ /* DWORD 0 */
u16 Reserved0;
u8 Reserved1;
u8 Reserved2:3;
@@ -290,65 +288,64 @@ typedef struct _tx_desc_cmd_819x_usb {
u8 LINIP:1;
u8 OWN:1;
- //DOWRD 1
- //u32 Reserved3;
+ /* DOWRD 1 */
u8 TxFWInfoSize;
u8 Reserved3;
u8 QueueSelect;
u8 Reserved4;
- //DOWRD 2
+ /* DOWRD 2 */
u16 TxBufferSize;
u16 Reserved5;
- //DWORD 3,4,5
- //u32 TxBufferAddr;
- //u32 NextDescAddress;
+ /* DWORD 3, 4, 5 */
u32 Reserved6;
u32 Reserved7;
u32 Reserved8;
-}tx_desc_cmd_819x_usb, *ptx_desc_cmd_819x_usb;
+} tx_desc_cmd_819x_usb, *ptx_desc_cmd_819x_usb;
typedef struct _tx_fwinfo_819x_usb {
- //DOWRD 0
- u8 TxRate:7;
- u8 CtsEnable:1;
- u8 RtsRate:7;
- u8 RtsEnable:1;
- u8 TxHT:1;
- u8 Short:1; //Short PLCP for CCK, or short GI for 11n MCS
- u8 TxBandwidth:1; // This is used for HT MCS rate only.
- u8 TxSubCarrier:2; // This is used for legacy OFDM rate only.
- u8 STBC:2;
- u8 AllowAggregation:1;
- u8 RtsHT:1; //Interpret RtsRate field as high throughput data rate
- u8 RtsShort:1; //Short PLCP for CCK, or short GI for 11n MCS
- u8 RtsBandwidth:1; // This is used for HT MCS rate only.
- u8 RtsSubcarrier:2; // This is used for legacy OFDM rate only.
- u8 RtsSTBC:2;
- u8 EnableCPUDur:1; //Enable firmware to recalculate and assign packet duration
-
- //DWORD 1
- u32 RxMF:2;
- u32 RxAMD:3;
- u32 TxPerPktInfoFeedback:1;//1 indicate Tx info gathtered by firmware and returned by Rx Cmd
- u32 Reserved1:2;
- u32 TxAGCOffSet:4;
- u32 TxAGCSign:1;
- u32 Tx_INFO_RSVD:6;
- u32 PacketID:13;
- //u32 Reserved;
-}tx_fwinfo_819x_usb, *ptx_fwinfo_819x_usb;
+ /* DOWRD 0 */
+ u8 TxRate:7;
+ u8 CtsEnable:1;
+ u8 RtsRate:7;
+ u8 RtsEnable:1;
+ u8 TxHT:1;
+ u8 Short:1; /* Error out, always on */
+ u8 TxBandwidth:1; /* Used for HT MCS rate only */
+ u8 TxSubCarrier:2; /* Used for legacy OFDM rate only */
+ u8 STBC:2;
+ u8 AllowAggregation:1;
+ /* Interpret RtsRate field as high throughput data rate */
+ u8 RtsHT:1;
+ u8 RtsShort:1; /* Short PLCP for CCK or short GI for 11n MCS */
+ u8 RtsBandwidth:1; /* Used for HT MCS rate only */
+ u8 RtsSubcarrier:2;/* Used for legacy OFDM rate only */
+ u8 RtsSTBC:2;
+ /* Enable firmware to recalculate and assign packet duration */
+ u8 EnableCPUDur:1;
+
+ /* DWORD 1 */
+ u32 RxMF:2;
+ u32 RxAMD:3;
+ /* 1 indicate Tx info gathered by firmware and returned by Rx Cmd */
+ u32 TxPerPktInfoFeedback:1;
+ u32 Reserved1:2;
+ u32 TxAGCOffSet:4;
+ u32 TxAGCSign:1;
+ u32 Tx_INFO_RSVD:6;
+ u32 PacketID:13;
+} tx_fwinfo_819x_usb, *ptx_fwinfo_819x_usb;
typedef struct rtl8192_rx_info {
struct urb *urb;
struct net_device *dev;
u8 out_pipe;
-}rtl8192_rx_info ;
+} rtl8192_rx_info ;
-typedef struct rx_desc_819x_usb{
- //DOWRD 0
+typedef struct rx_desc_819x_usb {
+ /* DOWRD 0 */
u16 Length:14;
u16 CRC32:1;
u16 ICV:1;
@@ -356,47 +353,32 @@ typedef struct rx_desc_819x_usb{
u8 Shift:2;
u8 PHYStatus:1;
u8 SWDec:1;
- //u8 LastSeg:1;
- //u8 FirstSeg:1;
- //u8 EOR:1;
- //u8 OWN:1;
u8 Reserved1:4;
- //DWORD 1
+ /* DWORD 1 */
u32 Reserved2;
-
- //DWORD 2
- //u32 Reserved3;
-
- //DWORD 3
- //u32 BufferAddress;
-
-}rx_desc_819x_usb, *prx_desc_819x_usb;
+} rx_desc_819x_usb, *prx_desc_819x_usb;
#ifdef USB_RX_AGGREGATION_SUPPORT
-typedef struct _rx_desc_819x_usb_aggr_subframe{
- //DOWRD 0
+typedef struct _rx_desc_819x_usb_aggr_subframe {
+ /* DOWRD 0 */
u16 Length:14;
u16 CRC32:1;
u16 ICV:1;
u8 Offset;
u8 RxDrvInfoSize;
- //DOWRD 1
+ /* DOWRD 1 */
u8 Shift:2;
u8 PHYStatus:1;
u8 SWDec:1;
u8 Reserved1:4;
u8 Reserved2;
u16 Reserved3;
- //DWORD 2
- //u4Byte Reserved3;
- //DWORD 3
- //u4Byte BufferAddress;
-}rx_desc_819x_usb_aggr_subframe, *prx_desc_819x_usb_aggr_subframe;
+} rx_desc_819x_usb_aggr_subframe, *prx_desc_819x_usb_aggr_subframe;
#endif
-typedef struct rx_drvinfo_819x_usb{
- //DWORD 0
+typedef struct rx_drvinfo_819x_usb {
+ /* DWORD 0 */
u16 Reserved1:12;
u16 PartAggr:1;
u16 FirstAGGR:1;
@@ -413,14 +395,15 @@ typedef struct rx_drvinfo_819x_usb{
u8 Bcast:1;
u8 Reserved4:1;
- //DWORD 1
+ /* DWORD 1 */
u32 TSFL;
-}rx_drvinfo_819x_usb, *prx_drvinfo_819x_usb;
+} rx_drvinfo_819x_usb, *prx_drvinfo_819x_usb;
-
-#define MAX_DEV_ADDR_SIZE 8 /* support till 64 bit bus width OS */
-#define MAX_FIRMWARE_INFORMATION_SIZE 32 /*2006/04/30 by Emily forRTL8190*/
+/* Support till 64 bit bus width OS */
+#define MAX_DEV_ADDR_SIZE 8
+/* For RTL8190 */
+#define MAX_FIRMWARE_INFORMATION_SIZE 32
#define MAX_802_11_HEADER_LENGTH (40 + MAX_FIRMWARE_INFORMATION_SIZE)
#define ENCRYPTION_MAX_OVERHEAD 128
#define USB_HWDESC_HEADER_LEN sizeof(tx_desc_819x_usb)
@@ -438,55 +421,55 @@ typedef struct rx_drvinfo_819x_usb{
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
#define TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES (sizeof(tx_desc_819x_usb_aggr_subframe) + sizeof(tx_fwinfo_819x_usb))
#endif
-#define scrclng 4 // octets for crc32 (FCS, ICV)
+/* Octets for crc32 (FCS, ICV) */
+#define scrclng 4
-typedef enum rf_optype
-{
+typedef enum rf_optype {
RF_OP_By_SW_3wire = 0,
RF_OP_By_FW,
RF_OP_MAX
-}rf_op_type;
+} rf_op_type;
/* 8190 Loopback Mode definition */
-typedef enum _rtl819xUsb_loopback{
+typedef enum _rtl819xUsb_loopback {
RTL819xU_NO_LOOPBACK = 0,
RTL819xU_MAC_LOOPBACK = 1,
RTL819xU_DMA_LOOPBACK = 2,
RTL819xU_CCK_LOOPBACK = 3,
-}rtl819xUsb_loopback_e;
+} rtl819xUsb_loopback_e;
/* due to rtl8192 firmware */
-typedef enum _desc_packet_type_e{
+typedef enum _desc_packet_type_e {
DESC_PACKET_TYPE_INIT = 0,
DESC_PACKET_TYPE_NORMAL = 1,
-}desc_packet_type_e;
+} desc_packet_type_e;
-typedef enum _firmware_status{
+typedef enum _firmware_status {
FW_STATUS_0_INIT = 0,
FW_STATUS_1_MOVE_BOOT_CODE = 1,
FW_STATUS_2_MOVE_MAIN_CODE = 2,
FW_STATUS_3_TURNON_CPU = 3,
FW_STATUS_4_MOVE_DATA_CODE = 4,
FW_STATUS_5_READY = 5,
-}firmware_status_e;
+} firmware_status_e;
typedef struct _rt_firmare_seg_container {
u16 seg_size;
u8 *seg_ptr;
-}fw_seg_container, *pfw_seg_container;
-typedef struct _rt_firmware{
+} fw_seg_container, *pfw_seg_container;
+typedef struct _rt_firmware {
firmware_status_e firmware_status;
u16 cmdpacket_frag_thresold;
-#define RTL8190_MAX_FIRMWARE_CODE_SIZE 64000 //64k
+#define RTL8190_MAX_FIRMWARE_CODE_SIZE 64000
u8 firmware_buf[RTL8190_MAX_FIRMWARE_CODE_SIZE];
u16 firmware_buf_size;
-}rt_firmware, *prt_firmware;
+} rt_firmware, *prt_firmware;
-//+by amy 080507
-#define MAX_RECEIVE_BUFFER_SIZE 9100 // Add this to 9100 bytes to receive A-MSDU from RT-AP
+/* Add this to 9100 bytes to receive A-MSDU from RT-AP */
+#define MAX_RECEIVE_BUFFER_SIZE 9100
-typedef struct _rt_firmware_info_819xUsb{
+typedef struct _rt_firmware_info_819xUsb {
u8 sz_info[16];
-}rt_firmware_info_819xUsb, *prt_firmware_info_819xUsb;
+} rt_firmware_info_819xUsb, *prt_firmware_info_819xUsb;
/* Firmware Queue Layout */
#define NUM_OF_FIRMWARE_QUEUE 10
@@ -527,8 +510,11 @@ typedef struct _rt_firmware_info_819xUsb{
#define RSVD_FW_QUEUE_PAGE_CMD_SHIFT 0x08
#define RSVD_FW_QUEUE_PAGE_BCN_SHIFT 0x00
#define RSVD_FW_QUEUE_PAGE_PUB_SHIFT 0x08
-//=================================================================
-//=================================================================
+
+/*
+ * =================================================================
+ * =================================================================
+ */
#define EPROM_93c46 0
#define EPROM_93c56 1
@@ -557,7 +543,7 @@ typedef enum _WIRELESS_MODE {
} WIRELESS_MODE;
-#define RTL_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
+#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
typedef struct buffer {
struct buffer *next;
@@ -565,7 +551,7 @@ typedef struct buffer {
} buffer;
-typedef struct rtl_reg_debug{
+typedef struct rtl_reg_debug {
unsigned int cmd;
struct {
unsigned char type;
@@ -574,7 +560,7 @@ typedef struct rtl_reg_debug{
unsigned char length;
} head;
unsigned char buf[0xff];
-}rtl_reg_debug;
+} rtl_reg_debug;
@@ -584,58 +570,45 @@ typedef struct rtl_reg_debug{
typedef struct _rt_9x_tx_rate_history {
u32 cck[4];
u32 ofdm[8];
- // HT_MCS[0][]: BW=0 SG=0
- // HT_MCS[1][]: BW=1 SG=0
- // HT_MCS[2][]: BW=0 SG=1
- // HT_MCS[3][]: BW=1 SG=1
u32 ht_mcs[4][16];
-}rt_tx_rahis_t, *prt_tx_rahis_t;
+} rt_tx_rahis_t, *prt_tx_rahis_t;
typedef struct _RT_SMOOTH_DATA_4RF {
- char elements[4][100];//array to store values
- u32 index; //index to current array to store
- u32 TotalNum; //num of valid elements
- u32 TotalVal[4]; //sum of valid elements
-}RT_SMOOTH_DATA_4RF, *PRT_SMOOTH_DATA_4RF;
-
-#define MAX_8192U_RX_SIZE 8192 // This maybe changed for D-cut larger aggregation size
-//stats seems messed up, clean it ASAP
+ char elements[4][100]; /* array to store values */
+ u32 index; /* index to current array to store */
+ u32 TotalNum; /* num of valid elements */
+ u32 TotalVal[4]; /* sum of valid elements */
+} RT_SMOOTH_DATA_4RF, *PRT_SMOOTH_DATA_4RF;
+
+/* This maybe changed for D-cut larger aggregation size */
+#define MAX_8192U_RX_SIZE 8192
+/* Stats seems messed up, clean it ASAP */
typedef struct Stats {
unsigned long txrdu;
-// unsigned long rxrdu;
- //unsigned long rxnolast;
- //unsigned long rxnodata;
-// unsigned long rxreset;
-// unsigned long rxnopointer;
unsigned long rxok;
unsigned long rxframgment;
unsigned long rxurberr;
unsigned long rxstaterr;
- unsigned long received_rate_histogram[4][32]; //0: Total, 1:OK, 2:CRC, 3:ICV, 2007 07 03 cosa
- unsigned long received_preamble_GI[2][32]; //0: Long preamble/GI, 1:Short preamble/GI
- unsigned long rx_AMPDUsize_histogram[5]; // level: (<4K), (4K~8K), (8K~16K), (16K~32K), (32K~64K)
- unsigned long rx_AMPDUnum_histogram[5]; // level: (<5), (5~10), (10~20), (20~40), (>40)
- unsigned long numpacket_matchbssid; // debug use only.
- unsigned long numpacket_toself; // debug use only.
- unsigned long num_process_phyinfo; // debug use only.
+ /* 0: Total, 1: OK, 2: CRC, 3: ICV */
+ unsigned long received_rate_histogram[4][32];
+ /* 0: Long preamble/GI, 1: Short preamble/GI */
+ unsigned long received_preamble_GI[2][32];
+ /* level: (<4K), (4K~8K), (8K~16K), (16K~32K), (32K~64K) */
+ unsigned long rx_AMPDUsize_histogram[5];
+ /* level: (<5), (5~10), (10~20), (20~40), (>40) */
+ unsigned long rx_AMPDUnum_histogram[5];
+ unsigned long numpacket_matchbssid;
+ unsigned long numpacket_toself;
+ unsigned long num_process_phyinfo;
unsigned long numqry_phystatus;
unsigned long numqry_phystatusCCK;
unsigned long numqry_phystatusHT;
- unsigned long received_bwtype[5]; //0: 20M, 1: funn40M, 2: upper20M, 3: lower20M, 4: duplicate
+ /* 0: 20M, 1: funn40M, 2: upper20M, 3: lower20M, 4: duplicate */
+ unsigned long received_bwtype[5];
unsigned long txnperr;
unsigned long txnpdrop;
unsigned long txresumed;
-// unsigned long rxerr;
-// unsigned long rxoverflow;
-// unsigned long rxint;
unsigned long txnpokint;
-// unsigned long txhpokint;
-// unsigned long txhperr;
-// unsigned long ints;
-// unsigned long shints;
unsigned long txoverflow;
-// unsigned long rxdmafail;
-// unsigned long txbeacon;
-// unsigned long txbeaconerr;
unsigned long txlpokint;
unsigned long txlpdrop;
unsigned long txlperr;
@@ -684,30 +657,35 @@ typedef struct Stats {
u8 last_packet_rate;
unsigned long slide_signal_strength[100];
unsigned long slide_evm[100];
- unsigned long slide_rssi_total; // For recording sliding window's RSSI value
- unsigned long slide_evm_total; // For recording sliding window's EVM value
- long signal_strength; // Transformed, in dbm. Beautified signal strength for UI, not correct.
+ /* For recording sliding window's RSSI value */
+ unsigned long slide_rssi_total;
+ /* For recording sliding window's EVM value */
+ unsigned long slide_evm_total;
+ /* Transformed in dbm. Beautified signal strength for UI, not correct */
+ long signal_strength;
long signal_quality;
long last_signal_strength_inpercent;
- long recv_signal_power; // Correct smoothed ss in Dbm, only used in driver to report real power now.
+ /* Correct smoothed ss in dbm, only used in driver
+ * to report real power now */
+ long recv_signal_power;
u8 rx_rssi_percentage[4];
u8 rx_evm_percentage[2];
long rxSNRdB[4];
rt_tx_rahis_t txrate;
- u32 Slide_Beacon_pwdb[100]; //cosa add for beacon rssi
- u32 Slide_Beacon_Total; //cosa add for beacon rssi
+ /* For beacon RSSI */
+ u32 Slide_Beacon_pwdb[100];
+ u32 Slide_Beacon_Total;
RT_SMOOTH_DATA_4RF cck_adc_pwdb;
u32 CurrentShowTxate;
} Stats;
-// Bandwidth Offset
+/* Bandwidth Offset */
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
#define HAL_PRIME_CHNL_OFFSET_UPPER 2
-//+by amy 080507
typedef struct ChnlAccessSetting {
u16 SIFS_Timer;
@@ -716,35 +694,62 @@ typedef struct ChnlAccessSetting {
u16 EIFS_Timer;
u16 CWminIndex;
u16 CWmaxIndex;
-}*PCHANNEL_ACCESS_SETTING,CHANNEL_ACCESS_SETTING;
-
-typedef struct _BB_REGISTER_DEFINITION{
- u32 rfintfs; // set software control: // 0x870~0x877[8 bytes]
- u32 rfintfi; // readback data: // 0x8e0~0x8e7[8 bytes]
- u32 rfintfo; // output data: // 0x860~0x86f [16 bytes]
- u32 rfintfe; // output enable: // 0x860~0x86f [16 bytes]
- u32 rf3wireOffset; // LSSI data: // 0x840~0x84f [16 bytes]
- u32 rfLSSI_Select; // BB Band Select: // 0x878~0x87f [8 bytes]
- u32 rfTxGainStage; // Tx gain stage: // 0x80c~0x80f [4 bytes]
- u32 rfHSSIPara1; // wire parameter control1 : // 0x820~0x823,0x828~0x82b, 0x830~0x833, 0x838~0x83b [16 bytes]
- u32 rfHSSIPara2; // wire parameter control2 : // 0x824~0x827,0x82c~0x82f, 0x834~0x837, 0x83c~0x83f [16 bytes]
- u32 rfSwitchControl; //Tx Rx antenna control : // 0x858~0x85f [16 bytes]
- u32 rfAGCControl1; //AGC parameter control1 : // 0xc50~0xc53,0xc58~0xc5b, 0xc60~0xc63, 0xc68~0xc6b [16 bytes]
- u32 rfAGCControl2; //AGC parameter control2 : // 0xc54~0xc57,0xc5c~0xc5f, 0xc64~0xc67, 0xc6c~0xc6f [16 bytes]
- u32 rfRxIQImbalance; //OFDM Rx IQ imbalance matrix : // 0xc14~0xc17,0xc1c~0xc1f, 0xc24~0xc27, 0xc2c~0xc2f [16 bytes]
- u32 rfRxAFE; //Rx IQ DC ofset and Rx digital filter, Rx DC notch filter : // 0xc10~0xc13,0xc18~0xc1b, 0xc20~0xc23, 0xc28~0xc2b [16 bytes]
- u32 rfTxIQImbalance; //OFDM Tx IQ imbalance matrix // 0xc80~0xc83,0xc88~0xc8b, 0xc90~0xc93, 0xc98~0xc9b [16 bytes]
- u32 rfTxAFE; //Tx IQ DC Offset and Tx DFIR type // 0xc84~0xc87,0xc8c~0xc8f, 0xc94~0xc97, 0xc9c~0xc9f [16 bytes]
- u32 rfLSSIReadBack; //LSSI RF readback data // 0x8a0~0x8af [16 bytes]
-}BB_REGISTER_DEFINITION_T, *PBB_REGISTER_DEFINITION_T;
-
-typedef enum _RT_RF_TYPE_819xU{
+} *PCHANNEL_ACCESS_SETTING, CHANNEL_ACCESS_SETTING;
+
+typedef struct _BB_REGISTER_DEFINITION {
+ /* set software control: 0x870~0x877 [8 bytes] */
+ u32 rfintfs;
+ /* readback data: 0x8e0~0x8e7 [8 bytes] */
+ u32 rfintfi;
+ /* output data: 0x860~0x86f [16 bytes] */
+ u32 rfintfo;
+ /* output enable: 0x860~0x86f [16 bytes] */
+ u32 rfintfe;
+ /* LSSI data: 0x840~0x84f [16 bytes] */
+ u32 rf3wireOffset;
+ /* BB Band Select: 0x878~0x87f [8 bytes] */
+ u32 rfLSSI_Select;
+ /* Tx gain stage: 0x80c~0x80f [4 bytes] */
+ u32 rfTxGainStage;
+ /* wire parameter control1: 0x820~0x823, 0x828~0x82b,
+ * 0x830~0x833, 0x838~0x83b [16 bytes] */
+ u32 rfHSSIPara1;
+ /* wire parameter control2: 0x824~0x827, 0x82c~0x82f,
+ * 0x834~0x837, 0x83c~0x83f [16 bytes] */
+ u32 rfHSSIPara2;
+ /* Tx Rx antenna control: 0x858~0x85f [16 bytes] */
+ u32 rfSwitchControl;
+ /* AGC parameter control1: 0xc50~0xc53, 0xc58~0xc5b,
+ * 0xc60~0xc63, 0xc68~0xc6b [16 bytes] */
+ u32 rfAGCControl1;
+ /* AGC parameter control2: 0xc54~0xc57, 0xc5c~0xc5f,
+ * 0xc64~0xc67, 0xc6c~0xc6f [16 bytes] */
+ u32 rfAGCControl2;
+ /* OFDM Rx IQ imbalance matrix: 0xc14~0xc17, 0xc1c~0xc1f,
+ * 0xc24~0xc27, 0xc2c~0xc2f [16 bytes] */
+ u32 rfRxIQImbalance;
+ /* Rx IQ DC offset and Rx digital filter, Rx DC notch filter:
+ * 0xc10~0xc13, 0xc18~0xc1b,
+ * 0xc20~0xc23, 0xc28~0xc2b [16 bytes] */
+ u32 rfRxAFE;
+ /* OFDM Tx IQ imbalance matrix: 0xc80~0xc83, 0xc88~0xc8b,
+ * 0xc90~0xc93, 0xc98~0xc9b [16 bytes] */
+ u32 rfTxIQImbalance;
+ /* Tx IQ DC Offset and Tx DFIR type:
+ * 0xc84~0xc87, 0xc8c~0xc8f,
+ * 0xc94~0xc97, 0xc9c~0xc9f [16 bytes] */
+ u32 rfTxAFE;
+ /* LSSI RF readback data: 0x8a0~0x8af [16 bytes] */
+ u32 rfLSSIReadBack;
+} BB_REGISTER_DEFINITION_T, *PBB_REGISTER_DEFINITION_T;
+
+typedef enum _RT_RF_TYPE_819xU {
RF_TYPE_MIN = 0,
RF_8225,
RF_8256,
RF_8258,
RF_PSEUDO_11N = 4,
-}RT_RF_TYPE_819xU, *PRT_RF_TYPE_819xU;
+} RT_RF_TYPE_819xU, *PRT_RF_TYPE_819xU;
typedef struct _rate_adaptive {
u8 rate_adaptive_disabled;
@@ -762,9 +767,9 @@ typedef struct _rate_adaptive {
u32 low_rssi_threshold_ratr;
u32 low_rssi_threshold_ratr_40M;
u32 low_rssi_threshold_ratr_20M;
- u8 ping_rssi_enable; //cosa add for test
- u32 ping_rssi_ratr; //cosa add for test
- u32 ping_rssi_thresh_for_ra;//cosa add for test
+ u8 ping_rssi_enable;
+ u32 ping_rssi_ratr;
+ u32 ping_rssi_thresh_for_ra;
u32 last_ratr;
} rate_adaptive, *prate_adaptive;
@@ -778,9 +783,9 @@ typedef struct _txbbgain_struct {
} txbbgain_struct, *ptxbbgain_struct;
typedef struct _ccktxbbgain_struct {
- //The Value is from a22 to a29 one Byte one time is much Safer
+ /* The value is from a22 to a29, one byte one time is much safer */
u8 ccktxbb_valuearray[8];
-} ccktxbbgain_struct,*pccktxbbgain_struct;
+} ccktxbbgain_struct, *pccktxbbgain_struct;
typedef struct _init_gain {
@@ -791,7 +796,6 @@ typedef struct _init_gain {
u8 cca;
} init_gain, *pinit_gain;
-//by amy 0606
typedef struct _phy_ofdm_rx_status_report_819xusb {
u8 trsw_gain_X[4];
@@ -807,26 +811,26 @@ typedef struct _phy_ofdm_rx_status_report_819xusb {
u8 max_ex_pwr;
u8 sgi_en;
u8 rxsc_sgien_exflg;
-}phy_sts_ofdm_819xusb_t;
+} phy_sts_ofdm_819xusb_t;
typedef struct _phy_cck_rx_status_report_819xusb {
- /* For CCK rate descriptor. This is a unsigned 8:1 variable. LSB bit presend
- 0.5. And MSB 7 bts presend a signed value. Range from -64~+63.5. */
+ /* For CCK rate descriptor. This is an unsigned 8:1 variable.
+ * LSB bit presend 0.5. And MSB 7 bts presend a signed value.
+ * Range from -64~+63.5. */
u8 adc_pwdb_X[4];
u8 sq_rpt;
u8 cck_agc_rpt;
-}phy_sts_cck_819xusb_t;
+} phy_sts_cck_819xusb_t;
-typedef struct _phy_ofdm_rx_status_rxsc_sgien_exintfflag{
+typedef struct _phy_ofdm_rx_status_rxsc_sgien_exintfflag {
u8 reserved:4;
u8 rxsc:2;
u8 sgi_en:1;
u8 ex_intf_flag:1;
-}phy_ofdm_rx_status_rxsc_sgien_exintfflag;
+} phy_ofdm_rx_status_rxsc_sgien_exintfflag;
-typedef enum _RT_CUSTOMER_ID
-{
+typedef enum _RT_CUSTOMER_ID {
RT_CID_DEFAULT = 0,
RT_CID_8187_ALPHA0 = 1,
RT_CID_8187_SERCOMM_PS = 2,
@@ -836,25 +840,28 @@ typedef enum _RT_CUSTOMER_ID
RT_CID_819x_CAMEO = 6,
RT_CID_819x_RUNTOP = 7,
RT_CID_819x_Senao = 8,
- RT_CID_TOSHIBA = 9, // Merge by Jacken, 2008/01/31.
+ RT_CID_TOSHIBA = 9,
RT_CID_819x_Netcore = 10,
RT_CID_Nettronix = 11,
RT_CID_DLINK = 12,
RT_CID_PRONET = 13,
-}RT_CUSTOMER_ID, *PRT_CUSTOMER_ID;
+} RT_CUSTOMER_ID, *PRT_CUSTOMER_ID;
-//================================================================================
-// LED customization.
-//================================================================================
-
-typedef enum _LED_STRATEGY_8190{
- SW_LED_MODE0, // SW control 1 LED via GPIO0. It is default option.
- SW_LED_MODE1, // SW control for PCI Express
- SW_LED_MODE2, // SW control for Cameo.
- SW_LED_MODE3, // SW contorl for RunTop.
- SW_LED_MODE4, // SW control for Netcore
- HW_LED, // HW control 2 LEDs, LED0 and LED1 (there are 4 different control modes)
-}LED_STRATEGY_8190, *PLED_STRATEGY_8190;
+/*
+ * ==========================================================================
+ * LED customization.
+ * ==========================================================================
+ */
+
+typedef enum _LED_STRATEGY_8190 {
+ SW_LED_MODE0, /* SW control 1 LED via GPIO0. It is default option. */
+ SW_LED_MODE1, /* SW control for PCI Express */
+ SW_LED_MODE2, /* SW control for Cameo. */
+ SW_LED_MODE3, /* SW control for RunTop. */
+ SW_LED_MODE4, /* SW control for Netcore. */
+ /* HW control 2 LEDs, LED0 and LED1 (4 different control modes) */
+ HW_LED,
+} LED_STRATEGY_8190, *PLED_STRATEGY_8190;
typedef enum _RESET_TYPE {
RESET_TYPE_NORESET = 0x00,
@@ -863,7 +870,7 @@ typedef enum _RESET_TYPE {
} RESET_TYPE;
/* The simple tx command OP code. */
-typedef enum _tag_TxCmd_Config_Index{
+typedef enum _tag_TxCmd_Config_Index {
TXCMD_TXRA_HISTORY_CTRL = 0xFF900000,
TXCMD_RESET_TX_PKT_BUFF = 0xFF900001,
TXCMD_RESET_RX_PKT_BUFF = 0xFF900002,
@@ -871,11 +878,11 @@ typedef enum _tag_TxCmd_Config_Index{
TXCMD_SET_RX_RSSI = 0xFF900004,
TXCMD_SET_TX_PWR_TRACKING = 0xFF900005,
TXCMD_XXXX_CTRL,
-}DCMD_TXCMD_OP;
+} DCMD_TXCMD_OP;
typedef struct r8192_priv {
struct usb_device *udev;
- //added for maintain info from eeprom
+ /* For maintain info from eeprom */
short epromtype;
u16 eeprom_vid;
u16 eeprom_pid;
@@ -887,105 +894,81 @@ typedef struct r8192_priv {
int irq;
struct ieee80211_device *ieee80211;
- short card_8192; /* O: rtl8192, 1:rtl8185 V B/C, 2:rtl8185 V D */
- u8 card_8192_version; /* if TCR reports card V B/C this discriminates */
-// short phy_ver; /* meaningful for rtl8225 1:A 2:B 3:C */
+ /* O: rtl8192, 1: rtl8185 V B/C, 2: rtl8185 V D */
+ short card_8192;
+ /* If TCR reports card V B/C, this discriminates */
+ u8 card_8192_version;
short enable_gpio0;
- enum card_type {PCI,MINIPCI,CARDBUS,USB}card_type;
+ enum card_type {
+ PCI, MINIPCI, CARDBUS, USB
+ } card_type;
short hw_plcp_len;
short plcp_preamble_mode;
spinlock_t irq_lock;
-// spinlock_t irq_th_lock;
spinlock_t tx_lock;
struct mutex mutex;
- //spinlock_t rf_lock; //used to lock rf write operation added by wb
u16 irq_mask;
-// short irq_enabled;
-// struct net_device *dev; //comment this out.
short chan;
short sens;
short max_sens;
- // u8 chtxpwr[15]; //channels from 1 to 14, 0 not used
-// u8 chtxpwr_ofdm[15]; //channels from 1 to 14, 0 not used
-// u8 cck_txpwr_base;
-// u8 ofdm_txpwr_base;
-// u8 challow[15]; //channels from 1 to 14, 0 not used
short up;
- short crcmon; //if 1 allow bad crc frame reception in monitor mode
-// short prism_hdr;
-
-// struct timer_list scan_timer;
- /*short scanpending;
- short stopscan;*/
-// spinlock_t scan_lock;
-// u8 active_probe;
- //u8 active_scan_num;
+ /* If 1, allow bad crc frame, reception in monitor mode */
+ short crcmon;
+
struct semaphore wx_sem;
- struct semaphore rf_sem; //used to lock rf write operation added by wb, modified by david
-// short hw_wep;
-
-// short digphy;
-// short antb;
-// short diversity;
-// u8 cs_treshold;
-// short rcr_csense;
- u8 rf_type; //0 means 1T2R, 1 means 2T4R
+ struct semaphore rf_sem; /* Used to lock rf write operation */
+
+ u8 rf_type; /* 0: 1T2R, 1: 2T4R */
RT_RF_TYPE_819xU rf_chip;
-// u32 key0[4];
- short (*rf_set_sens)(struct net_device *dev,short sens);
- u8 (*rf_set_chan)(struct net_device *dev,u8 ch);
+ short (*rf_set_sens)(struct net_device *dev, short sens);
+ u8 (*rf_set_chan)(struct net_device *dev, u8 ch);
void (*rf_close)(struct net_device *dev);
void (*rf_init)(struct net_device *dev);
- //short rate;
short promisc;
- /*stats*/
+ /* Stats */
struct Stats stats;
struct iw_statistics wstats;
- /*RX stuff*/
-// u32 *rxring;
-// u32 *rxringtail;
-// dma_addr_t rxringdma;
+ /* RX stuff */
struct urb **rx_urb;
struct urb **rx_cmd_urb;
#ifdef THOMAS_BEACON
u32 *oldaddr;
#endif
#ifdef THOMAS_TASKLET
- atomic_t irt_counter;//count for irq_rx_tasklet
+ atomic_t irt_counter; /* count for irq_rx_tasklet */
#endif
#ifdef JACKSON_NEW_RX
struct sk_buff **pp_rxskb;
int rx_inx;
#endif
-/* modified by davad for Rx process */
struct sk_buff_head rx_queue;
struct sk_buff_head skb_queue;
struct work_struct qos_activate;
short tx_urb_index;
- atomic_t tx_pending[0x10];//UART_PRIORITY+1
+ atomic_t tx_pending[0x10]; /* UART_PRIORITY + 1 */
struct tasklet_struct irq_rx_tasklet;
struct urb *rxurb_task;
- //2 Tx Related variables
+ /* Tx Related variables */
u16 ShortRetryLimit;
u16 LongRetryLimit;
u32 TransmitConfig;
- u8 RegCWinMin; // For turbo mode CW adaptive. Added by Annie, 2005-10-27.
+ u8 RegCWinMin; /* For turbo mode CW adaptive */
u32 LastRxDescTSFHigh;
u32 LastRxDescTSFLow;
- //2 Rx Related variables
+ /* Rx Related variables */
u16 EarlyRxThreshold;
u32 ReceiveConfig;
u8 AcmControl;
@@ -1000,13 +983,13 @@ typedef struct r8192_priv {
struct work_struct reset_wq;
/**********************************************************/
- //for rtl819xUsb
+ /* For rtl819xUsb */
u16 basic_rate;
u8 short_preamble;
u8 slot_time;
bool bDcut;
bool bCurrentRxAggrEnable;
- u8 Rf_Mode; //add for Firmware RF -R/W switch
+ u8 Rf_Mode; /* For Firmware RF -R/W switch */
prt_firmware pFirmware;
rtl819xUsb_loopback_e LoopbackMode;
u16 EEPROMTxPowerDiff;
@@ -1014,71 +997,70 @@ typedef struct r8192_priv {
u8 EEPROMPwDiff;
u8 EEPROMCrystalCap;
u8 EEPROM_Def_Ver;
- u8 EEPROMTxPowerLevelCCK;// CCK channel 1~14
+ u8 EEPROMTxPowerLevelCCK; /* CCK channel 1~14 */
u8 EEPROMTxPowerLevelCCK_V1[3];
- u8 EEPROMTxPowerLevelOFDM24G[3]; // OFDM 2.4G channel 1~14
- u8 EEPROMTxPowerLevelOFDM5G[24]; // OFDM 5G
+ u8 EEPROMTxPowerLevelOFDM24G[3]; /* OFDM 2.4G channel 1~14 */
+ u8 EEPROMTxPowerLevelOFDM5G[24]; /* OFDM 5G */
-/*PHY related*/
- BB_REGISTER_DEFINITION_T PHYRegDef[4]; //Radio A/B/C/D
- // Read/write are allow for following hardware information variables
+ /* PHY related */
+ BB_REGISTER_DEFINITION_T PHYRegDef[4]; /* Radio A/B/C/D */
+ /* Read/write are allow for following hardware information variables */
u32 MCSTxPowerLevelOriginalOffset[6];
u32 CCKTxPowerLevelOriginalOffset;
- u8 TxPowerLevelCCK[14]; // CCK channel 1~14
- u8 TxPowerLevelOFDM24G[14]; // OFDM 2.4G channel 1~14
- u8 TxPowerLevelOFDM5G[14]; // OFDM 5G
+ u8 TxPowerLevelCCK[14]; /* CCK channel 1~14 */
+ u8 TxPowerLevelOFDM24G[14]; /* OFDM 2.4G channel 1~14 */
+ u8 TxPowerLevelOFDM5G[14]; /* OFDM 5G */
u32 Pwr_Track;
u8 TxPowerDiff;
- u8 AntennaTxPwDiff[2]; // Antenna gain offset, index 0 for B, 1 for C, and 2 for D
- u8 CrystalCap; // CrystalCap.
- u8 ThermalMeter[2]; // ThermalMeter, index 0 for RFIC0, and 1 for RFIC1
+ u8 AntennaTxPwDiff[2]; /* Antenna gain offset, 0: B, 1: C, 2: D */
+ u8 CrystalCap;
+ u8 ThermalMeter[2]; /* index 0: RFIC0, index 1: RFIC1 */
u8 CckPwEnl;
- // Use to calculate PWBD.
+ /* Use to calculate PWBD */
u8 bCckHighPower;
long undecorated_smoothed_pwdb;
- //for set channel
+ /* For set channel */
u8 SwChnlInProgress;
u8 SwChnlStage;
u8 SwChnlStep;
u8 SetBWModeInProgress;
HT_CHANNEL_WIDTH CurrentChannelBW;
u8 ChannelPlan;
- // 8190 40MHz mode
- //
- u8 nCur40MhzPrimeSC; // Control channel sub-carrier
- // Joseph test for shorten RF configuration time.
- // We save RF reg0 in this variable to reduce RF reading.
- //
+ /* 8190 40MHz mode */
+ /* Control channel sub-carrier */
+ u8 nCur40MhzPrimeSC;
+ /* Test for shorten RF configuration time.
+ * We save RF reg0 in this variable to reduce RF reading. */
u32 RfReg0Value[4];
u8 NumTotalRFPath;
bool brfpath_rxenable[4];
- //RF set related
+ /* RF set related */
bool SetRFPowerStateInProgress;
-//+by amy 080507
struct timer_list watch_dog_timer;
-//+by amy 080515 for dynamic mechenism
- //Add by amy Tx Power Control for Near/Far Range 2008/05/15
- bool bdynamic_txpower; //bDynamicTxPower
- bool bDynamicTxHighPower; // Tx high power state
- bool bDynamicTxLowPower; // Tx low power state
+ /* For dynamic mechanism */
+ /* Tx Power Control for Near/Far Range */
+ bool bdynamic_txpower;
+ bool bDynamicTxHighPower;
+ bool bDynamicTxLowPower;
bool bLastDTPFlag_High;
bool bLastDTPFlag_Low;
bool bstore_last_dtpflag;
- bool bstart_txctrl_bydtp; //Define to discriminate on High power State or on sitesuvey to change Tx gain index
- //Add by amy for Rate Adaptive
+ /* Define to discriminate on High power State or
+ * on sitesurvey to change Tx gain index */
+ bool bstart_txctrl_bydtp;
rate_adaptive rate_adaptive;
- //Add by amy for TX power tracking
- //2008/05/15 Mars OPEN/CLOSE TX POWER TRACKING
- txbbgain_struct txbbgain_table[TxBBGainTableLength];
- u8 txpower_count;//For 6 sec do tracking again
- bool btxpower_trackingInit;
- u8 OFDM_index;
- u8 CCK_index;
- //2007/09/10 Mars Add CCK TX Power Tracking
+ /* TX power tracking
+ * OPEN/CLOSE TX POWER TRACKING */
+ txbbgain_struct txbbgain_table[TxBBGainTableLength];
+ u8 txpower_count; /* For 6 sec do tracking again */
+ bool btxpower_trackingInit;
+ u8 OFDM_index;
+ u8 CCK_index;
+ /* CCK TX Power Tracking */
ccktxbbgain_struct cck_txbbgain_table[CCKTxBBGainTableLength];
ccktxbbgain_struct cck_txbbgain_ch14_table[CCKTxBBGainTableLength];
u8 rfa_txpowertrackingindex;
@@ -1095,15 +1077,14 @@ typedef struct r8192_priv {
bool bcck_in_ch14;
bool btxpowerdata_readfromEEPORM;
u16 TSSI_13dBm;
- //For Backup Initial Gain
init_gain initgain_backup;
u8 DefaultInitialGain[4];
- // For EDCA Turbo mode, Added by amy 080515.
+ /* For EDCA Turbo mode */
bool bis_any_nonbepkts;
bool bcurrent_turbo_EDCA;
bool bis_cur_rdlstate;
struct timer_list fsync_timer;
- bool bfsync_processing; // 500ms Fsync timer is active or not
+ bool bfsync_processing; /* 500ms Fsync timer is active or not */
u32 rate_record;
u32 rateCountDiffRecord;
u32 ContinueDiffCount;
@@ -1112,17 +1093,14 @@ typedef struct r8192_priv {
u8 framesync;
u32 framesyncC34;
u8 framesyncMonitor;
- //Added by amy 080516 for RX related
u16 nrxAMPDU_size;
u8 nrxAMPDU_aggr_num;
- //by amy for gpio
+ /* For gpio */
bool bHwRadioOff;
- //by amy for reset_count
u32 reset_count;
bool bpbc_pressed;
- //by amy for debug
u32 txpower_checkcnt;
u32 txpower_tracking_callback_cnt;
u8 thermal_read_val[40];
@@ -1131,7 +1109,7 @@ typedef struct r8192_priv {
u32 ccktxpower_adjustcnt_ch14;
u8 tx_fwinfo_force_subcarriermode;
u8 tx_fwinfo_force_subcarrierval;
- //by amy for silent reset
+ /* For silent reset */
RESET_TYPE ResetProgress;
bool bForcedSilentReset;
bool bDisableNormalResetCheck;
@@ -1144,7 +1122,7 @@ typedef struct r8192_priv {
u16 SifsTime;
- //define work item by amy 080526
+ /* Define work item */
struct delayed_work update_beacon_wq;
struct delayed_work watch_dog_wq;
@@ -1153,42 +1131,32 @@ typedef struct r8192_priv {
struct delayed_work gpio_change_rf_wq;
struct delayed_work initialgain_operate_wq;
struct workqueue_struct *priv_wq;
-}r8192_priv;
+} r8192_priv;
-// for rtl8187
-// now mirging to rtl8187B
-/*
-typedef enum{
- LOW_PRIORITY = 0x02,
- NORM_PRIORITY
- } priority_t;
-*/
-//for rtl8187B
+/* For rtl8187B */
typedef enum{
BULK_PRIORITY = 0x01,
- //RSVD0,
- //RSVD1,
LOW_PRIORITY,
NORM_PRIORITY,
VO_PRIORITY,
- VI_PRIORITY, //0x05
+ VI_PRIORITY,
BE_PRIORITY,
BK_PRIORITY,
RSVD2,
RSVD3,
- BEACON_PRIORITY, //0x0A
+ BEACON_PRIORITY,
HIGH_PRIORITY,
MANAGE_PRIORITY,
RSVD4,
RSVD5,
- UART_PRIORITY //0x0F
+ UART_PRIORITY
} priority_t;
-typedef enum{
+typedef enum {
NIC_8192U = 1,
NIC_8190P = 2,
NIC_8192E = 3,
- } nic_t;
+} nic_t;
#ifdef JOHN_HWSEC
@@ -1200,19 +1168,19 @@ struct ssid_thread {
bool init_firmware(struct net_device *dev);
short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb);
-short rtl8192_tx(struct net_device *dev, struct sk_buff* skb);
+short rtl8192_tx(struct net_device *dev, struct sk_buff *skb);
u32 read_cam(struct net_device *dev, u8 addr);
void write_cam(struct net_device *dev, u8 addr, u32 data);
-u8 read_nic_byte(struct net_device *dev, int x);
-u8 read_nic_byte_E(struct net_device *dev, int x);
-u32 read_nic_dword(struct net_device *dev, int x);
-u16 read_nic_word(struct net_device *dev, int x) ;
-void write_nic_byte(struct net_device *dev, int x,u8 y);
-void write_nic_byte_E(struct net_device *dev, int x,u8 y);
-void write_nic_word(struct net_device *dev, int x,u16 y);
-void write_nic_dword(struct net_device *dev, int x,u32 y);
+int read_nic_byte(struct net_device *dev, int x, u8 *data);
+int read_nic_byte_E(struct net_device *dev, int x, u8 *data);
+int read_nic_dword(struct net_device *dev, int x, u32 *data);
+int read_nic_word(struct net_device *dev, int x, u16 *data);
+void write_nic_byte(struct net_device *dev, int x, u8 y);
+void write_nic_byte_E(struct net_device *dev, int x, u8 y);
+void write_nic_word(struct net_device *dev, int x, u16 y);
+void write_nic_dword(struct net_device *dev, int x, u32 y);
void force_pci_posting(struct net_device *dev);
void rtl8192_rtx_disable(struct net_device *);
@@ -1220,26 +1188,24 @@ void rtl8192_rx_enable(struct net_device *);
void rtl8192_tx_enable(struct net_device *);
void rtl8192_disassociate(struct net_device *dev);
-//void fix_rx_fifo(struct net_device *dev);
-void rtl8185_set_rf_pins_enable(struct net_device *dev,u32 a);
+void rtl8185_set_rf_pins_enable(struct net_device *dev, u32 a);
-void rtl8192_set_anaparam(struct net_device *dev,u32 a);
-void rtl8185_set_anaparam2(struct net_device *dev,u32 a);
+void rtl8192_set_anaparam(struct net_device *dev, u32 a);
+void rtl8185_set_anaparam2(struct net_device *dev, u32 a);
void rtl8192_update_msr(struct net_device *dev);
int rtl8192_down(struct net_device *dev);
int rtl8192_up(struct net_device *dev);
void rtl8192_commit(struct net_device *dev);
-void rtl8192_set_chan(struct net_device *dev,short ch);
+void rtl8192_set_chan(struct net_device *dev, short ch);
void write_phy(struct net_device *dev, u8 adr, u8 data);
void write_phy_cck(struct net_device *dev, u8 adr, u32 data);
void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data);
void rtl8185_tx_antenna(struct net_device *dev, u8 ant);
void rtl8192_set_rxconf(struct net_device *dev);
-//short check_nic_enough_desc(struct net_device *dev, priority_t priority);
-extern void rtl819xusb_beacon_tx(struct net_device *dev,u16 tx_rate);
+extern void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate);
void EnableHWSecurityConfig8192(struct net_device *dev);
-void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType, u8 *MacAddr, u8 DefaultKey, u32 *KeyContent );
+void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType, u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
#endif
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 71f5cde..cd0946d 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -3,7 +3,7 @@
* Linux device driver for RTL8192U
*
* Based on the r8187 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -25,12 +25,35 @@
*/
#ifndef CONFIG_FORCE_HARD_FLOAT
-double __floatsidf (int i) { return i; }
-unsigned int __fixunsdfsi (double d) { return d; }
-double __adddf3(double a, double b) { return a+b; }
-double __addsf3(float a, float b) { return a+b; }
-double __subdf3(double a, double b) { return a-b; }
-double __extendsfdf2(float a) {return a;}
+double __floatsidf(int i)
+{
+ return i;
+}
+
+unsigned int __fixunsdfsi(double d)
+{
+ return d;
+}
+
+double __adddf3(double a, double b)
+{
+ return a+b;
+}
+
+double __addsf3(float a, float b)
+{
+ return a+b;
+}
+
+double __subdf3(double a, double b)
+{
+ return a-b;
+}
+
+double __extendsfdf2(float a)
+{
+ return a;
+}
#endif
#undef LOOP_TEST
@@ -68,7 +91,6 @@ double __extendsfdf2(float a) {return a;}
#include "r819xU_phyreg.h"
#include "r819xU_cmdpkt.h"
#include "r8192U_dm.h"
-//#include "r8192xU_phyreg.h"
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
@@ -81,26 +103,9 @@ double __extendsfdf2(float a) {return a;}
#include "dot11d.h"
//set here to open your trace code. //WB
-u32 rt_global_debug_component = \
- // COMP_INIT |
-// COMP_DBG |
- // COMP_EPROM |
-// COMP_PHY |
- // COMP_RF |
-// COMP_FIRMWARE |
-// COMP_CH |
- // COMP_POWER_TRACKING |
-// COMP_RATE |
- // COMP_TXAGC |
- // COMP_TRACE |
- COMP_DOWN |
- // COMP_RECV |
- // COMP_SWBW |
+u32 rt_global_debug_component = COMP_DOWN |
COMP_SEC |
- // COMP_RESET |
- // COMP_SEND |
- // COMP_EVENTS |
- COMP_ERR ; //always open err flags on
+ COMP_ERR; //always open err flags on
#define TOTAL_CAM_ENTRY 32
#define CAM_CONTENT_COUNT 8
@@ -130,24 +135,22 @@ MODULE_VERSION("V 1.1");
MODULE_DEVICE_TABLE(usb, rtl8192_usb_id_tbl);
MODULE_DESCRIPTION("Linux driver for Realtek RTL8192 USB WiFi cards");
-static char* ifname = "wlan%d";
+static char *ifname = "wlan%d";
static int hwwep = 1; //default use hw. set 0 to use software security
static int channels = 0x3fff;
-module_param(ifname, charp, S_IRUGO|S_IWUSR );
-//module_param(hwseqnum,int, S_IRUGO|S_IWUSR);
-module_param(hwwep,int, S_IRUGO|S_IWUSR);
-module_param(channels,int, S_IRUGO|S_IWUSR);
+module_param(ifname, charp, S_IRUGO|S_IWUSR);
+module_param(hwwep, int, S_IRUGO|S_IWUSR);
+module_param(channels, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ifname," Net interface name, wlan%d=default");
-//MODULE_PARM_DESC(hwseqnum," Try to use hardware 802.11 header sequence numbers. Zero=default");
-MODULE_PARM_DESC(hwwep," Try to use hardware security support. ");
-MODULE_PARM_DESC(channels," Channel bitmask for specific locales. NYI");
+MODULE_PARM_DESC(ifname, " Net interface name, wlan%d=default");
+MODULE_PARM_DESC(hwwep, " Try to use hardware security support. ");
+MODULE_PARM_DESC(channels, " Channel bitmask for specific locales. NYI");
static int rtl8192_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id);
+ const struct usb_device_id *id);
static void rtl8192_usb_disconnect(struct usb_interface *intf);
@@ -169,7 +172,7 @@ static struct usb_driver rtl8192_usb_driver = {
typedef struct _CHANNEL_LIST {
u8 Channel[32];
u8 Len;
-}CHANNEL_LIST, *PCHANNEL_LIST;
+} CHANNEL_LIST, *PCHANNEL_LIST;
static CHANNEL_LIST ChannelPlan[] = {
{{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64,149,153,157,161,165},24}, //FCC
@@ -185,12 +188,11 @@ static CHANNEL_LIST ChannelPlan[] = {
{{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14} //For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626
};
-static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
+static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv *priv)
{
- int i, max_chan=-1, min_chan=-1;
- struct ieee80211_device* ieee = priv->ieee80211;
- switch (channel_plan)
- {
+ int i, max_chan = -1, min_chan = -1;
+ struct ieee80211_device *ieee = priv->ieee80211;
+ switch (channel_plan) {
case COUNTRY_CODE_FCC:
case COUNTRY_CODE_IC:
case COUNTRY_CODE_ETSI:
@@ -200,22 +202,21 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
case COUNTRY_CODE_MKK1:
case COUNTRY_CODE_ISRAEL:
case COUNTRY_CODE_TELEC:
- case COUNTRY_CODE_MIC:
+ case COUNTRY_CODE_MIC:
Dot11d_Init(ieee);
ieee->bGlobalDomain = false;
//actually 8225 & 8256 rf chips only support B,G,24N mode
if ((priv->rf_chip == RF_8225) || (priv->rf_chip == RF_8256)) {
min_chan = 1;
max_chan = 14;
- }
- else {
- RT_TRACE(COMP_ERR, "unknown rf chip, can't set channel map in function:%s()\n", __FUNCTION__);
+ } else {
+ RT_TRACE(COMP_ERR, "unknown rf chip, can't set channel map in function:%s()\n", __func__);
}
if (ChannelPlan[channel_plan].Len != 0) {
// Clear old channel map
memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
// Set new channel map
- for (i=0;i<ChannelPlan[channel_plan].Len;i++) {
+ for (i = 0; i < ChannelPlan[channel_plan].Len; i++) {
if (ChannelPlan[channel_plan].Channel[i] < min_chan || ChannelPlan[channel_plan].Channel[i] > max_chan)
break;
GET_DOT11D_INFO(ieee)->channel_map[ChannelPlan[channel_plan].Channel[i]] = 1;
@@ -228,19 +229,13 @@ static void rtl819x_set_channel_map(u8 channel_plan, struct r8192_priv* priv)
Dot11d_Reset(ieee);
ieee->bGlobalDomain = true;
break;
-
+
default:
break;
}
}
-#define rx_hal_is_cck_rate(_pdrvinfo)\
- (_pdrvinfo->RxRate == DESC90_RATE1M ||\
- _pdrvinfo->RxRate == DESC90_RATE2M ||\
- _pdrvinfo->RxRate == DESC90_RATE5_5M ||\
- _pdrvinfo->RxRate == DESC90_RATE11M) &&\
- !_pdrvinfo->RxHT\
void CamResetAllEntry(struct net_device *dev)
@@ -249,12 +244,6 @@ void CamResetAllEntry(struct net_device *dev)
//2004/02/11 In static WEP, OID_ADD_KEY or OID_ADD_WEP are set before STA associate to AP.
// However, ResetKey is called on OID_802_11_INFRASTRUCTURE_MODE and MlmeAssociateRequest
// In this condition, Cam can not be reset because upper layer will not set this static key again.
- //if(Adapter->EncAlgorithm == WEP_Encryption)
- // return;
-//debug
- //DbgPrint("========================================\n");
- //DbgPrint(" Call ResetAllEntry \n");
- //DbgPrint("========================================\n\n");
ulcommand |= BIT31|BIT30;
write_nic_dword(dev, RWCAM, ulcommand);
@@ -264,13 +253,16 @@ void CamResetAllEntry(struct net_device *dev)
void write_cam(struct net_device *dev, u8 addr, u32 data)
{
write_nic_dword(dev, WCAMI, data);
- write_nic_dword(dev, RWCAM, BIT31|BIT16|(addr&0xff) );
+ write_nic_dword(dev, RWCAM, BIT31|BIT16|(addr&0xff));
}
u32 read_cam(struct net_device *dev, u8 addr)
{
- write_nic_dword(dev, RWCAM, 0x80000000|(addr&0xff) );
- return read_nic_dword(dev, 0xa8);
+ u32 data;
+
+ write_nic_dword(dev, RWCAM, 0x80000000|(addr&0xff));
+ read_nic_dword(dev, 0xa8, &data);
+ return data;
}
void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
@@ -280,32 +272,29 @@ void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
- indx|0xfe00, 0, &data, 1, HZ / 2);
+ RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
+ indx|0xfe00, 0, &data, 1, HZ / 2);
if (status < 0)
- {
- printk("write_nic_byte_E TimeOut! status:%d\n", status);
- }
+ netdev_err(dev, "write_nic_byte_E TimeOut! status: %d\n", status);
}
-u8 read_nic_byte_E(struct net_device *dev, int indx)
+int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
{
int status;
- u8 data;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- indx|0xfe00, 0, &data, 1, HZ / 2);
+ RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
+ indx|0xfe00, 0, data, 1, HZ / 2);
- if (status < 0)
- {
- printk("read_nic_byte_E TimeOut! status:%d\n", status);
+ if (status < 0) {
+ netdev_err(dev, "%s failure status: %d\n", __func__, status);
+ return status;
}
- return data;
+ return 0;
}
//as 92U has extend page from 4 to 16, so modify functions below.
void write_nic_byte(struct net_device *dev, int indx, u8 data)
@@ -316,13 +305,11 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
- (indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 1, HZ / 2);
+ RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
+ (indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 1, HZ / 2);
if (status < 0)
- {
- printk("write_nic_byte TimeOut! status:%d\n", status);
- }
+ netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status);
}
@@ -337,13 +324,11 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
- (indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 2, HZ / 2);
+ RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
+ (indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 2, HZ / 2);
if (status < 0)
- {
- printk("write_nic_word TimeOut! status:%d\n", status);
- }
+ netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status);
}
@@ -357,98 +342,92 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
- (indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 4, HZ / 2);
+ RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
+ (indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 4, HZ / 2);
if (status < 0)
- {
- printk("write_nic_dword TimeOut! status:%d\n", status);
- }
+ netdev_err(dev, "write_nic_dword TimeOut! status: %d\n", status);
}
-u8 read_nic_byte(struct net_device *dev, int indx)
+int read_nic_byte(struct net_device *dev, int indx, u8 *data)
{
- u8 data;
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- (indx&0xff)|0xff00, (indx>>8)&0x0f, &data, 1, HZ / 2);
+ RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
+ (indx&0xff)|0xff00, (indx>>8)&0x0f, data, 1, HZ / 2);
- if (status < 0)
- {
- printk("read_nic_byte TimeOut! status:%d\n", status);
+ if (status < 0) {
+ netdev_err(dev, "%s failure status: %d\n", __func__, status);
+ return status;
}
- return data;
+ return 0;
}
-u16 read_nic_word(struct net_device *dev, int indx)
+int read_nic_word(struct net_device *dev, int indx, u16 *data)
{
- u16 data;
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- (indx&0xff)|0xff00, (indx>>8)&0x0f,
- &data, 2, HZ / 2);
+ RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
+ (indx&0xff)|0xff00, (indx>>8)&0x0f,
+ data, 2, HZ / 2);
- if (status < 0)
- printk("read_nic_word TimeOut! status:%d\n", status);
+ if (status < 0) {
+ netdev_err(dev, "%s failure status: %d\n", __func__, status);
+ return status;
+ }
- return data;
+ return 0;
}
-u16 read_nic_word_E(struct net_device *dev, int indx)
+int read_nic_word_E(struct net_device *dev, int indx, u16 *data)
{
- u16 data;
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- indx|0xfe00, 0, &data, 2, HZ / 2);
+ RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
+ indx|0xfe00, 0, data, 2, HZ / 2);
- if (status < 0)
- printk("read_nic_word TimeOut! status:%d\n", status);
+ if (status < 0) {
+ netdev_err(dev, "%s failure status: %d\n", __func__, status);
+ return status;
+ }
- return data;
+ return 0;
}
-u32 read_nic_dword(struct net_device *dev, int indx)
+int read_nic_dword(struct net_device *dev, int indx, u32 *data)
{
- u32 data;
int status;
- /* int result; */
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct usb_device *udev = priv->udev;
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- (indx&0xff)|0xff00, (indx>>8)&0x0f,
- &data, 4, HZ / 2);
- /* if(0 != result) {
- * printk(KERN_WARNING "read size of data = %d\, date = %d\n",
- * result, data);
- * }
- */
+ RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
+ (indx&0xff)|0xff00, (indx>>8)&0x0f,
+ data, 4, HZ / 2);
- if (status < 0)
- printk("read_nic_dword TimeOut! status:%d\n", status);
+ if (status < 0) {
+ netdev_err(dev, "%s failure status: %d\n", __func__, status);
+ return status;
+ }
- return data;
+ return 0;
}
/* u8 read_phy_cck(struct net_device *dev, u8 adr); */
@@ -462,9 +441,7 @@ inline void force_pci_posting(struct net_device *dev)
static struct net_device_stats *rtl8192_stats(struct net_device *dev);
void rtl8192_commit(struct net_device *dev);
-/* void rtl8192_restart(struct net_device *dev); */
void rtl8192_restart(struct work_struct *work);
-/* void rtl8192_rq_tx_ack(struct work_struct *work); */
void watch_dog_timer_callback(unsigned long data);
/****************************************************************************
@@ -495,40 +472,38 @@ static int proc_get_stats_ap(struct seq_file *m, void *v)
static int proc_get_registers(struct seq_file *m, void *v)
{
struct net_device *dev = m->private;
- int i,n, max = 0xff;
+ int i, n, max = 0xff;
+ u8 byte_rd;
seq_puts(m, "\n####################page 0##################\n ");
- for (n=0;n<=max;) {
- //printk( "\nD: %2x> ", n);
- seq_printf(m, "\nD: %2x > ",n);
-
- for (i=0;i<16 && n<=max;i++,n++)
- seq_printf(m, "%2x ",read_nic_byte(dev,0x000|n));
+ for (n = 0; n <= max;) {
+ seq_printf(m, "\nD: %2x > ", n);
- // printk("%2x ",read_nic_byte(dev,n));
+ for (i = 0; i < 16 && n <= max; i++, n++) {
+ read_nic_byte(dev, 0x000|n, &byte_rd);
+ seq_printf(m, "%2x ", byte_rd);
+ }
}
seq_puts(m, "\n####################page 1##################\n ");
- for (n=0;n<=max;) {
- //printk( "\nD: %2x> ", n);
- seq_printf(m, "\nD: %2x > ",n);
+ for (n = 0; n <= max;) {
+ seq_printf(m, "\nD: %2x > ", n);
- for (i=0;i<16 && n<=max;i++,n++)
- seq_printf(m, "%2x ",read_nic_byte(dev,0x100|n));
-
- // printk("%2x ",read_nic_byte(dev,n));
+ for (i = 0; i < 16 && n <= max; i++, n++) {
+ read_nic_byte(dev, 0x100|n, &byte_rd);
+ seq_printf(m, "%2x ", byte_rd);
+ }
}
seq_puts(m, "\n####################page 3##################\n ");
- for (n=0;n<=max;) {
- //printk( "\nD: %2x> ", n);
- seq_printf(m, "\nD: %2x > ",n);
+ for (n = 0; n <= max;) {
+ seq_printf(m, "\nD: %2x > ", n);
- for(i=0;i<16 && n<=max;i++,n++)
- seq_printf(m, "%2x ",read_nic_byte(dev,0x300|n));
-
- // printk("%2x ",read_nic_byte(dev,n));
+ for (i = 0; i < 16 && n <= max; i++, n++) {
+ read_nic_byte(dev, 0x300|n, &byte_rd);
+ seq_printf(m, "%2x ", byte_rd);
+ }
}
seq_putc(m, '\n');
@@ -541,64 +516,54 @@ static int proc_get_stats_tx(struct seq_file *m, void *v)
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
seq_printf(m,
- "TX VI priority ok int: %lu\n"
- "TX VI priority error int: %lu\n"
- "TX VO priority ok int: %lu\n"
- "TX VO priority error int: %lu\n"
- "TX BE priority ok int: %lu\n"
- "TX BE priority error int: %lu\n"
- "TX BK priority ok int: %lu\n"
- "TX BK priority error int: %lu\n"
- "TX MANAGE priority ok int: %lu\n"
- "TX MANAGE priority error int: %lu\n"
- "TX BEACON priority ok int: %lu\n"
- "TX BEACON priority error int: %lu\n"
-// "TX high priority ok int: %lu\n"
-// "TX high priority failed error int: %lu\n"
- "TX queue resume: %lu\n"
- "TX queue stopped?: %d\n"
- "TX fifo overflow: %lu\n"
-// "TX beacon: %lu\n"
- "TX VI queue: %d\n"
- "TX VO queue: %d\n"
- "TX BE queue: %d\n"
- "TX BK queue: %d\n"
-// "TX HW queue: %d\n"
- "TX VI dropped: %lu\n"
- "TX VO dropped: %lu\n"
- "TX BE dropped: %lu\n"
- "TX BK dropped: %lu\n"
- "TX total data packets %lu\n",
-// "TX beacon aborted: %lu\n",
- priv->stats.txviokint,
- priv->stats.txvierr,
- priv->stats.txvookint,
- priv->stats.txvoerr,
- priv->stats.txbeokint,
- priv->stats.txbeerr,
- priv->stats.txbkokint,
- priv->stats.txbkerr,
- priv->stats.txmanageokint,
- priv->stats.txmanageerr,
- priv->stats.txbeaconokint,
- priv->stats.txbeaconerr,
-// priv->stats.txhpokint,
-// priv->stats.txhperr,
- priv->stats.txresumed,
- netif_queue_stopped(dev),
- priv->stats.txoverflow,
-// priv->stats.txbeacon,
- atomic_read(&(priv->tx_pending[VI_PRIORITY])),
- atomic_read(&(priv->tx_pending[VO_PRIORITY])),
- atomic_read(&(priv->tx_pending[BE_PRIORITY])),
- atomic_read(&(priv->tx_pending[BK_PRIORITY])),
-// read_nic_byte(dev, TXFIFOCOUNT),
- priv->stats.txvidrop,
- priv->stats.txvodrop,
- priv->stats.txbedrop,
- priv->stats.txbkdrop,
- priv->stats.txdatapkt
-// priv->stats.txbeaconerr
+ "TX VI priority ok int: %lu\n"
+ "TX VI priority error int: %lu\n"
+ "TX VO priority ok int: %lu\n"
+ "TX VO priority error int: %lu\n"
+ "TX BE priority ok int: %lu\n"
+ "TX BE priority error int: %lu\n"
+ "TX BK priority ok int: %lu\n"
+ "TX BK priority error int: %lu\n"
+ "TX MANAGE priority ok int: %lu\n"
+ "TX MANAGE priority error int: %lu\n"
+ "TX BEACON priority ok int: %lu\n"
+ "TX BEACON priority error int: %lu\n"
+ "TX queue resume: %lu\n"
+ "TX queue stopped?: %d\n"
+ "TX fifo overflow: %lu\n"
+ "TX VI queue: %d\n"
+ "TX VO queue: %d\n"
+ "TX BE queue: %d\n"
+ "TX BK queue: %d\n"
+ "TX VI dropped: %lu\n"
+ "TX VO dropped: %lu\n"
+ "TX BE dropped: %lu\n"
+ "TX BK dropped: %lu\n"
+ "TX total data packets %lu\n",
+ priv->stats.txviokint,
+ priv->stats.txvierr,
+ priv->stats.txvookint,
+ priv->stats.txvoerr,
+ priv->stats.txbeokint,
+ priv->stats.txbeerr,
+ priv->stats.txbkokint,
+ priv->stats.txbkerr,
+ priv->stats.txmanageokint,
+ priv->stats.txmanageerr,
+ priv->stats.txbeaconokint,
+ priv->stats.txbeaconerr,
+ priv->stats.txresumed,
+ netif_queue_stopped(dev),
+ priv->stats.txoverflow,
+ atomic_read(&(priv->tx_pending[VI_PRIORITY])),
+ atomic_read(&(priv->tx_pending[VO_PRIORITY])),
+ atomic_read(&(priv->tx_pending[BE_PRIORITY])),
+ atomic_read(&(priv->tx_pending[BK_PRIORITY])),
+ priv->stats.txvidrop,
+ priv->stats.txvodrop,
+ priv->stats.txbedrop,
+ priv->stats.txbkdrop,
+ priv->stats.txdatapkt
);
return 0;
@@ -610,12 +575,12 @@ static int proc_get_stats_rx(struct seq_file *m, void *v)
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
seq_printf(m,
- "RX packets: %lu\n"
- "RX urb status error: %lu\n"
- "RX invalid urb error: %lu\n",
- priv->stats.rxoktotal,
- priv->stats.rxstaterr,
- priv->stats.rxurberr);
+ "RX packets: %lu\n"
+ "RX urb status error: %lu\n"
+ "RX invalid urb error: %lu\n",
+ priv->stats.rxoktotal,
+ priv->stats.rxstaterr,
+ priv->stats.rxurberr);
return 0;
}
@@ -700,27 +665,7 @@ void rtl8192_proc_remove_one(struct net_device *dev)
-----------------------------MISC STUFF-------------------------
*****************************************************************************/
-/* this is only for debugging */
-void print_buffer(u32 *buffer, int len)
-{
- int i;
- u8 *buf =(u8*)buffer;
-
- printk("ASCII BUFFER DUMP (len: %x):\n",len);
-
- for(i=0;i<len;i++)
- printk("%c",buf[i]);
-
- printk("\nBINARY BUFFER DUMP (len: %x):\n",len);
-
- for(i=0;i<len;i++)
- printk("%x",buf[i]);
-
- printk("\n");
-}
-
-//short check_nic_enough_desc(struct net_device *dev, priority_t priority)
-short check_nic_enough_desc(struct net_device *dev,int queue_index)
+short check_nic_enough_desc(struct net_device *dev, int queue_index)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int used = atomic_read(&priv->tx_pending[queue_index]);
@@ -731,10 +676,8 @@ short check_nic_enough_desc(struct net_device *dev,int queue_index)
void tx_timeout(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //rtl8192_commit(dev);
schedule_work(&priv->reset_wq);
- //DMESG("TXTIMEOUT");
}
@@ -742,41 +685,24 @@ void tx_timeout(struct net_device *dev)
void dump_eprom(struct net_device *dev)
{
int i;
- for(i=0; i<63; i++)
- RT_TRACE(COMP_EPROM, "EEPROM addr %x : %x", i, eprom_read(dev,i));
+ for (i = 0; i < 63; i++)
+ RT_TRACE(COMP_EPROM, "EEPROM addr %x : %x", i, eprom_read(dev, i));
}
-/* this is only for debug */
-void rtl8192_dump_reg(struct net_device *dev)
-{
- int i;
- int n;
- int max=0x1ff;
-
- RT_TRACE(COMP_PHY, "Dumping NIC register map");
-
- for(n=0;n<=max;)
- {
- printk( "\nD: %2x> ", n);
- for(i=0;i<16 && n<=max;i++,n++)
- printk("%2x ",read_nic_byte(dev,n));
- }
- printk("\n");
-}
/****************************************************************************
------------------------------HW STUFF---------------------------
*****************************************************************************/
-void rtl8192_set_mode(struct net_device *dev,int mode)
+void rtl8192_set_mode(struct net_device *dev, int mode)
{
u8 ecmd;
- ecmd=read_nic_byte(dev, EPROM_CMD);
- ecmd=ecmd &~ EPROM_CMD_OPERATING_MODE_MASK;
- ecmd=ecmd | (mode<<EPROM_CMD_OPERATING_MODE_SHIFT);
- ecmd=ecmd &~ (1<<EPROM_CS_SHIFT);
- ecmd=ecmd &~ (1<<EPROM_CK_SHIFT);
+ read_nic_byte(dev, EPROM_CMD, &ecmd);
+ ecmd = ecmd & ~EPROM_CMD_OPERATING_MODE_MASK;
+ ecmd = ecmd | (mode<<EPROM_CMD_OPERATING_MODE_SHIFT);
+ ecmd = ecmd & ~EPROM_CS_BIT;
+ ecmd = ecmd & ~EPROM_CK_BIT;
write_nic_byte(dev, EPROM_CMD, ecmd);
}
@@ -786,15 +712,15 @@ void rtl8192_update_msr(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
u8 msr;
- msr = read_nic_byte(dev, MSR);
- msr &= ~ MSR_LINK_MASK;
+ read_nic_byte(dev, MSR, &msr);
+ msr &= ~MSR_LINK_MASK;
/* do not change in link_state != WLAN_LINK_ASSOCIATED.
* msr must be updated if the state is ASSOCIATING.
* this is intentional and make sense for ad-hoc and
* master (see the create BSS/IBSS func)
*/
- if (priv->ieee80211->state == IEEE80211_LINKED){
+ if (priv->ieee80211->state == IEEE80211_LINKED) {
if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
msr |= (MSR_LINK_MANAGED<<MSR_LINK_SHIFT);
@@ -803,39 +729,31 @@ void rtl8192_update_msr(struct net_device *dev)
else if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
msr |= (MSR_LINK_MASTER<<MSR_LINK_SHIFT);
- }else
+ } else {
msr |= (MSR_LINK_NONE<<MSR_LINK_SHIFT);
+ }
write_nic_byte(dev, MSR, msr);
}
-void rtl8192_set_chan(struct net_device *dev,short ch)
+void rtl8192_set_chan(struct net_device *dev, short ch)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
-// u32 tx;
- RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __FUNCTION__, ch);
- priv->chan=ch;
+ RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __func__, ch);
+ priv->chan = ch;
/* this hack should avoid frame TX during channel setting*/
-
-// tx = read_nic_dword(dev,TX_CONF);
-// tx &= ~TX_LOOPBACK_MASK;
-
#ifndef LOOP_TEST
-// write_nic_dword(dev,TX_CONF, tx |( TX_LOOPBACK_MAC<<TX_LOOPBACK_SHIFT));
-
//need to implement rf set channel here WB
if (priv->rf_set_chan)
- priv->rf_set_chan(dev,priv->chan);
+ priv->rf_set_chan(dev, priv->chan);
mdelay(10);
-// write_nic_dword(dev,TX_CONF,tx | (TX_LOOPBACK_NONE<<TX_LOOPBACK_SHIFT));
#endif
}
static void rtl8192_rx_isr(struct urb *urb);
-//static void rtl8192_rx_isr(struct urb *rx_urb);
u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
{
@@ -847,10 +765,10 @@ u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
else
#endif
return (sizeof(rx_desc_819x_usb) + pstats->RxDrvInfoSize
- + pstats->RxBufShift);
+ + pstats->RxBufShift);
}
-static int rtl8192_rx_initiate(struct net_device*dev)
+static int rtl8192_rx_initiate(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct urb *entry;
@@ -867,7 +785,6 @@ static int rtl8192_rx_initiate(struct net_device*dev)
kfree_skb(skb);
break;
}
-// printk("nomal packet IN request!\n");
usb_fill_bulk_urb(entry, priv->udev,
usb_rcvbulkpipe(priv->udev, 3), skb_tail_pointer(skb),
RX_URB_SIZE, rtl8192_rx_isr, skb);
@@ -881,8 +798,7 @@ static int rtl8192_rx_initiate(struct net_device*dev)
/* command packet rx procedure */
while (skb_queue_len(&priv->rx_queue) < MAX_RX_URB + 3) {
-// printk("command packet IN request!\n");
- skb = __dev_alloc_skb(RX_URB_SIZE ,GFP_KERNEL);
+ skb = __dev_alloc_skb(RX_URB_SIZE, GFP_KERNEL);
if (!skb)
break;
entry = usb_alloc_urb(0, GFP_KERNEL);
@@ -896,7 +812,7 @@ static int rtl8192_rx_initiate(struct net_device*dev)
info = (struct rtl8192_rx_info *) skb->cb;
info->urb = entry;
info->dev = dev;
- info->out_pipe = 9; //denote rx cmd packet queue
+ info->out_pipe = 9; //denote rx cmd packet queue
skb_queue_tail(&priv->rx_queue, skb);
usb_submit_urb(entry, GFP_KERNEL);
}
@@ -909,64 +825,47 @@ void rtl8192_set_rxconf(struct net_device *dev)
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
u32 rxconf;
- rxconf=read_nic_dword(dev,RCR);
- rxconf = rxconf &~ MAC_FILTER_MASK;
+ read_nic_dword(dev, RCR, &rxconf);
+ rxconf = rxconf & ~MAC_FILTER_MASK;
rxconf = rxconf | RCR_AMF;
rxconf = rxconf | RCR_ADF;
rxconf = rxconf | RCR_AB;
rxconf = rxconf | RCR_AM;
- //rxconf = rxconf | RCR_ACF;
- if (dev->flags & IFF_PROMISC) {DMESG ("NIC in promisc mode");}
+ if (dev->flags & IFF_PROMISC)
+ DMESG("NIC in promisc mode");
- if(priv->ieee80211->iw_mode == IW_MODE_MONITOR || \
- dev->flags & IFF_PROMISC){
+ if (priv->ieee80211->iw_mode == IW_MODE_MONITOR ||
+ dev->flags & IFF_PROMISC) {
rxconf = rxconf | RCR_AAP;
- } /*else if(priv->ieee80211->iw_mode == IW_MODE_MASTER){
- rxconf = rxconf | (1<<ACCEPT_ALLMAC_FRAME_SHIFT);
- rxconf = rxconf | (1<<RX_CHECK_BSSID_SHIFT);
- }*/else{
+ } else {
rxconf = rxconf | RCR_APM;
rxconf = rxconf | RCR_CBSSID;
}
- if(priv->ieee80211->iw_mode == IW_MODE_MONITOR){
+ if (priv->ieee80211->iw_mode == IW_MODE_MONITOR) {
rxconf = rxconf | RCR_AICV;
rxconf = rxconf | RCR_APWRMGT;
}
- if( priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
+ if (priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
rxconf = rxconf | RCR_ACRC32;
- rxconf = rxconf &~ RX_FIFO_THRESHOLD_MASK;
+ rxconf = rxconf & ~RX_FIFO_THRESHOLD_MASK;
rxconf = rxconf | (RX_FIFO_THRESHOLD_NONE<<RX_FIFO_THRESHOLD_SHIFT);
- rxconf = rxconf &~ MAX_RX_DMA_MASK;
+ rxconf = rxconf & ~MAX_RX_DMA_MASK;
rxconf = rxconf | ((u32)7<<RCR_MXDMA_OFFSET);
-// rxconf = rxconf | (1<<RX_AUTORESETPHY_SHIFT);
rxconf = rxconf | RCR_ONLYERLPKT;
-// rxconf = rxconf &~ RCR_CS_MASK;
-// rxconf = rxconf | (1<<RCR_CS_SHIFT);
-
write_nic_dword(dev, RCR, rxconf);
-
- #ifdef DEBUG_RX
- DMESG("rxconf: %x %x",rxconf ,read_nic_dword(dev,RCR));
- #endif
}
//wait to be removed
void rtl8192_rx_enable(struct net_device *dev)
{
- //u8 cmd;
-
- //struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
-
rtl8192_rx_initiate(dev);
-
-// rtl8192_set_rxconf(dev);
}
@@ -983,9 +882,8 @@ void rtl8192_rtx_disable(struct net_device *dev)
struct sk_buff *skb;
struct rtl8192_rx_info *info;
- cmd=read_nic_byte(dev,CMDR);
- write_nic_byte(dev, CMDR, cmd &~ \
- (CR_TE|CR_RE));
+ read_nic_byte(dev, CMDR, &cmd);
+ write_nic_byte(dev, CMDR, cmd & ~(CR_TE|CR_RE));
force_pci_posting(dev);
mdelay(10);
@@ -998,9 +896,8 @@ void rtl8192_rtx_disable(struct net_device *dev)
kfree_skb(skb);
}
- if (skb_queue_len(&priv->skb_queue)) {
- printk(KERN_WARNING "skb_queue not empty\n");
- }
+ if (skb_queue_len(&priv->skb_queue))
+ netdev_warn(dev, "skb_queue not empty\n");
skb_queue_purge(&priv->skb_queue);
return;
@@ -1014,40 +911,40 @@ int alloc_tx_beacon_desc_ring(struct net_device *dev, int count)
inline u16 ieeerate2rtlrate(int rate)
{
- switch(rate){
+ switch (rate) {
case 10:
- return 0;
+ return 0;
case 20:
- return 1;
+ return 1;
case 55:
- return 2;
+ return 2;
case 110:
- return 3;
+ return 3;
case 60:
- return 4;
+ return 4;
case 90:
- return 5;
+ return 5;
case 120:
- return 6;
+ return 6;
case 180:
- return 7;
+ return 7;
case 240:
- return 8;
+ return 8;
case 360:
- return 9;
+ return 9;
case 480:
- return 10;
+ return 10;
case 540:
- return 11;
+ return 11;
default:
- return 3;
+ return 3;
}
}
-static u16 rtl_rate[] = {10,20,55,110,60,90,120,180,240,360,480,540};
+static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
inline u16 rtl8192_rate2rate(short rate)
{
- if (rate >11) return 0;
+ if (rate > 11) return 0;
return rtl_rate[rate];
}
@@ -1061,14 +958,13 @@ static void rtl8192_rx_isr(struct urb *urb)
struct r8192_priv *priv = ieee80211_priv(dev);
int out_pipe = info->out_pipe;
int err;
- if(!priv->up)
+ if (!priv->up)
return;
if (unlikely(urb->status)) {
info->urb = NULL;
priv->stats.rxstaterr++;
priv->ieee80211->stats.rx_errors++;
usb_free_urb(urb);
- // printk("%s():rx status err\n",__FUNCTION__);
return;
}
skb_unlink(skb, &priv->rx_queue);
@@ -1080,14 +976,14 @@ static void rtl8192_rx_isr(struct urb *urb)
skb = dev_alloc_skb(RX_URB_SIZE);
if (unlikely(!skb)) {
usb_free_urb(urb);
- printk("%s():can,t alloc skb\n",__FUNCTION__);
+ netdev_err(dev, "%s(): can't alloc skb\n", __func__);
/* TODO check rx queue length and refill *somewhere* */
return;
}
usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, out_pipe), skb_tail_pointer(skb),
- RX_URB_SIZE, rtl8192_rx_isr, skb);
+ usb_rcvbulkpipe(priv->udev, out_pipe), skb_tail_pointer(skb),
+ RX_URB_SIZE, rtl8192_rx_isr, skb);
info = (struct rtl8192_rx_info *) skb->cb;
info->urb = urb;
@@ -1098,31 +994,19 @@ static void rtl8192_rx_isr(struct urb *urb)
urb->context = skb;
skb_queue_tail(&priv->rx_queue, skb);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if(err && err != EPERM)
- printk("can not submit rxurb, err is %x,URB status is %x\n",err,urb->status);
+ if (err && err != EPERM)
+ netdev_err(dev, "can not submit rxurb, err is %x, URB status is %x\n", err, urb->status);
}
-u32
-rtl819xusb_rx_command_packet(
- struct net_device *dev,
- struct ieee80211_rx_stats *pstats
- )
+u32 rtl819xusb_rx_command_packet(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
u32 status;
- //RT_TRACE(COMP_RECV, DBG_TRACE, ("---> RxCommandPacketHandle819xUsb()\n"));
-
status = cmpk_message_handle_rx(dev, pstats);
if (status)
- {
DMESG("rxcommandpackethandle819xusb: It is a command packet\n");
- }
- else
- {
- //RT_TRACE(COMP_RECV, DBG_TRACE, ("RxCommandPacketHandle819xUsb: It is not a command packet\n"));
- }
- //RT_TRACE(COMP_RECV, DBG_TRACE, ("<--- RxCommandPacketHandle819xUsb()\n"));
return status;
}
@@ -1150,24 +1034,17 @@ void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rat
u8 queue_index = tcb_desc->queue_index;
/* shall not be referred by command packet */
- assert(queue_index != TXCMD_QUEUE);
+ RTL8192U_ASSERT(queue_index != TXCMD_QUEUE);
- spin_lock_irqsave(&priv->tx_lock,flags);
+ spin_lock_irqsave(&priv->tx_lock, flags);
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
-// tcb_desc->RATRIndex = 7;
-// tcb_desc->bTxDisableRateFallBack = 1;
-// tcb_desc->bTxUseDriverAssingedRate = 1;
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc->bTxEnableFwCalcDur = 1;
skb_push(skb, priv->ieee80211->tx_headroom);
ret = rtl8192_tx(dev, skb);
- //priv->ieee80211->stats.tx_bytes+=(skb->len - priv->ieee80211->tx_headroom);
- //priv->ieee80211->stats.tx_packets++;
-
- spin_unlock_irqrestore(&priv->tx_lock,flags);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
-// return ret;
return;
}
@@ -1176,7 +1053,7 @@ void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev, int rat
* If the ring is full packet are dropped (for data frame the queue
* is stopped before this can happen).
*/
-int rtl8192_hard_start_xmit(struct sk_buff *skb,struct net_device *dev)
+int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
int ret;
@@ -1185,21 +1062,21 @@ int rtl8192_hard_start_xmit(struct sk_buff *skb,struct net_device *dev)
u8 queue_index = tcb_desc->queue_index;
- spin_lock_irqsave(&priv->tx_lock,flags);
+ spin_lock_irqsave(&priv->tx_lock, flags);
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
- if(queue_index == TXCMD_QUEUE) {
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ if (queue_index == TXCMD_QUEUE) {
skb_push(skb, USB_HWDESC_HEADER_LEN);
rtl819xU_tx_cmd(dev, skb);
ret = 1;
- spin_unlock_irqrestore(&priv->tx_lock,flags);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
return ret;
} else {
skb_push(skb, priv->ieee80211->tx_headroom);
ret = rtl8192_tx(dev, skb);
}
- spin_unlock_irqrestore(&priv->tx_lock,flags);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
return ret;
}
@@ -1211,7 +1088,7 @@ void rtl8192_try_wake_queue(struct net_device *dev, int pri);
u16 DrvAggr_PaddingAdd(struct net_device *dev, struct sk_buff *skb)
{
u16 PaddingNum = 256 - ((skb->len + TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES) % 256);
- return (PaddingNum&0xff);
+ return PaddingNum & 0xff;
}
u8 MRateToHwRate8190Pci(u8 rate);
@@ -1239,7 +1116,7 @@ struct sk_buff *DrvAggr_Aggregation(struct net_device *dev, struct ieee80211_drv
/* Get the total aggregation length including the padding space and
* sub frame header.
*/
- for(i = 1; i < pSendList->nr_drv_agg_frames; i++) {
+ for (i = 1; i < pSendList->nr_drv_agg_frames; i++) {
TotalLength += DrvAggr_PaddingAdd(dev, skb);
skb = pSendList->tx_agg_frames[i];
TotalLength += (skb->len + TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES);
@@ -1250,37 +1127,33 @@ struct sk_buff *DrvAggr_Aggregation(struct net_device *dev, struct ieee80211_drv
memset(agg_skb->data, 0, agg_skb->len);
skb_reserve(agg_skb, ieee->tx_headroom);
-// RT_DEBUG_DATA(COMP_SEND, skb->cb, sizeof(skb->cb));
/* reserve info for first subframe Tx descriptor to be set in the tx function */
skb = pSendList->tx_agg_frames[0];
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->drv_agg_enable = 1;
tcb_desc->pkt_size = skb->len;
tcb_desc->DrvAggrNum = pSendList->nr_drv_agg_frames;
- printk("DrvAggNum = %d\n", tcb_desc->DrvAggrNum);
-// RT_DEBUG_DATA(COMP_SEND, skb->cb, sizeof(skb->cb));
-// printk("========>skb->data ======> \n");
-// RT_DEBUG_DATA(COMP_SEND, skb->data, skb->len);
+ netdev_dbg(dev, "DrvAggNum = %d\n", tcb_desc->DrvAggrNum);
memcpy(agg_skb->cb, skb->cb, sizeof(skb->cb));
- memcpy(skb_put(agg_skb,skb->len),skb->data,skb->len);
+ memcpy(skb_put(agg_skb, skb->len), skb->data, skb->len);
- for(i = 1; i < pSendList->nr_drv_agg_frames; i++) {
+ for (i = 1; i < pSendList->nr_drv_agg_frames; i++) {
/* push the next sub frame to be 256 byte aline */
- skb_put(agg_skb,DrvAggr_PaddingAdd(dev,skb));
+ skb_put(agg_skb, DrvAggr_PaddingAdd(dev, skb));
/* Subframe drv Tx descriptor and firmware info setting */
skb = pSendList->tx_agg_frames[i];
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- tx_agg_desc = (tx_desc_819x_usb_aggr_subframe *)agg_skb->tail;
- tx_fwinfo = (tx_fwinfo_819x_usb *)(agg_skb->tail + sizeof(tx_desc_819x_usb_aggr_subframe));
+ tx_agg_desc = (tx_desc_819x_usb_aggr_subframe *)skb_tail_pointer(agg_skb);
+ tx_fwinfo = (tx_fwinfo_819x_usb *)(skb_tail_pointer(agg_skb) + sizeof(tx_desc_819x_usb_aggr_subframe));
- memset(tx_fwinfo,0,sizeof(tx_fwinfo_819x_usb));
+ memset(tx_fwinfo, 0, sizeof(tx_fwinfo_819x_usb));
/* DWORD 0 */
- tx_fwinfo->TxHT = (tcb_desc->data_rate&0x80)?1:0;
+ tx_fwinfo->TxHT = (tcb_desc->data_rate&0x80) ? 1 : 0;
tx_fwinfo->TxRate = MRateToHwRate8190Pci(tcb_desc->data_rate);
tx_fwinfo->EnableCPUDur = tcb_desc->bTxEnableFwCalcDur;
tx_fwinfo->Short = QueryIsShort(tx_fwinfo->TxHT, tx_fwinfo->TxRate, tcb_desc);
- if(tcb_desc->bAMPDUEnable) {//AMPDU enabled
+ if (tcb_desc->bAMPDUEnable) {//AMPDU enabled
tx_fwinfo->AllowAggregation = 1;
/* DWORD 1 */
tx_fwinfo->RxMF = tcb_desc->ampdu_factor;
@@ -1293,20 +1166,19 @@ struct sk_buff *DrvAggr_Aggregation(struct net_device *dev, struct ieee80211_drv
}
/* Protection mode related */
- tx_fwinfo->RtsEnable = (tcb_desc->bRTSEnable)?1:0;
- tx_fwinfo->CtsEnable = (tcb_desc->bCTSEnable)?1:0;
- tx_fwinfo->RtsSTBC = (tcb_desc->bRTSSTBC)?1:0;
- tx_fwinfo->RtsHT = (tcb_desc->rts_rate&0x80)?1:0;
+ tx_fwinfo->RtsEnable = (tcb_desc->bRTSEnable) ? 1 : 0;
+ tx_fwinfo->CtsEnable = (tcb_desc->bCTSEnable) ? 1 : 0;
+ tx_fwinfo->RtsSTBC = (tcb_desc->bRTSSTBC) ? 1 : 0;
+ tx_fwinfo->RtsHT = (tcb_desc->rts_rate&0x80) ? 1 : 0;
tx_fwinfo->RtsRate = MRateToHwRate8190Pci((u8)tcb_desc->rts_rate);
- tx_fwinfo->RtsSubcarrier = (tx_fwinfo->RtsHT==0)?(tcb_desc->RTSSC):0;
- tx_fwinfo->RtsBandwidth = (tx_fwinfo->RtsHT==1)?((tcb_desc->bRTSBW)?1:0):0;
- tx_fwinfo->RtsShort = (tx_fwinfo->RtsHT==0)?(tcb_desc->bRTSUseShortPreamble?1:0):\
- (tcb_desc->bRTSUseShortGI?1:0);
+ tx_fwinfo->RtsSubcarrier = (tx_fwinfo->RtsHT == 0) ? (tcb_desc->RTSSC) : 0;
+ tx_fwinfo->RtsBandwidth = (tx_fwinfo->RtsHT == 1) ? ((tcb_desc->bRTSBW) ? 1 : 0) : 0;
+ tx_fwinfo->RtsShort = (tx_fwinfo->RtsHT == 0) ? (tcb_desc->bRTSUseShortPreamble ? 1 : 0) :
+ (tcb_desc->bRTSUseShortGI ? 1 : 0);
/* Set Bandwidth and sub-channel settings. */
- if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40)
- {
- if(tcb_desc->bPacketBW) {
+ if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40) {
+ if (tcb_desc->bPacketBW) {
tx_fwinfo->TxBandwidth = 1;
tx_fwinfo->TxSubCarrier = 0; //By SD3's Jerry suggestion, use duplicated mode
} else {
@@ -1321,41 +1193,35 @@ struct sk_buff *DrvAggr_Aggregation(struct net_device *dev, struct ieee80211_drv
/* Fill Tx descriptor */
memset(tx_agg_desc, 0, sizeof(tx_desc_819x_usb_aggr_subframe));
/* DWORD 0 */
- //tx_agg_desc->LINIP = 0;
- //tx_agg_desc->CmdInit = 1;
tx_agg_desc->Offset = sizeof(tx_fwinfo_819x_usb) + 8;
/* already raw data, need not to subtract header length */
tx_agg_desc->PktSize = skb->len & 0xffff;
/*DWORD 1*/
- tx_agg_desc->SecCAMID= 0;
+ tx_agg_desc->SecCAMID = 0;
tx_agg_desc->RATid = tcb_desc->RATRIndex;
- {
- //MPDUOverhead = 0;
- tx_agg_desc->NoEnc = 1;
- }
+ tx_agg_desc->NoEnc = 1;
tx_agg_desc->SecType = 0x0;
if (tcb_desc->bHwSec) {
- switch (priv->ieee80211->pairwise_key_type)
- {
- case KEY_TYPE_WEP40:
- case KEY_TYPE_WEP104:
- tx_agg_desc->SecType = 0x1;
- tx_agg_desc->NoEnc = 0;
- break;
- case KEY_TYPE_TKIP:
- tx_agg_desc->SecType = 0x2;
- tx_agg_desc->NoEnc = 0;
- break;
- case KEY_TYPE_CCMP:
- tx_agg_desc->SecType = 0x3;
- tx_agg_desc->NoEnc = 0;
- break;
- case KEY_TYPE_NA:
- tx_agg_desc->SecType = 0x0;
- tx_agg_desc->NoEnc = 1;
- break;
+ switch (priv->ieee80211->pairwise_key_type) {
+ case KEY_TYPE_WEP40:
+ case KEY_TYPE_WEP104:
+ tx_agg_desc->SecType = 0x1;
+ tx_agg_desc->NoEnc = 0;
+ break;
+ case KEY_TYPE_TKIP:
+ tx_agg_desc->SecType = 0x2;
+ tx_agg_desc->NoEnc = 0;
+ break;
+ case KEY_TYPE_CCMP:
+ tx_agg_desc->SecType = 0x3;
+ tx_agg_desc->NoEnc = 0;
+ break;
+ case KEY_TYPE_NA:
+ tx_agg_desc->SecType = 0x0;
+ tx_agg_desc->NoEnc = 1;
+ break;
}
}
@@ -1369,16 +1235,14 @@ struct sk_buff *DrvAggr_Aggregation(struct net_device *dev, struct ieee80211_drv
//DWORD 2
/* According windows driver, it seems that there no need to fill this field */
- //tx_agg_desc->TxBufferSize= (u32)(skb->len - USB_HWDESC_HEADER_LEN);
/* to fill next packet */
- skb_put(agg_skb,TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES);
- memcpy(skb_put(agg_skb,skb->len),skb->data,skb->len);
+ skb_put(agg_skb, TX_PACKET_DRVAGGR_SUBFRAME_SHIFT_BYTES);
+ memcpy(skb_put(agg_skb, skb->len), skb->data, skb->len);
}
- for(i = 0; i < pSendList->nr_drv_agg_frames; i++) {
+ for (i = 0; i < pSendList->nr_drv_agg_frames; i++)
dev_kfree_skb_any(pSendList->tx_agg_frames[i]);
- }
return agg_skb;
}
@@ -1388,7 +1252,7 @@ struct sk_buff *DrvAggr_Aggregation(struct net_device *dev, struct ieee80211_drv
If no proper TCB is found to do aggregation, SendList will only contain the input TCB.
*/
u8 DrvAggr_GetAggregatibleList(struct net_device *dev, struct sk_buff *skb,
- struct ieee80211_drv_agg_txb *pSendList)
+ struct ieee80211_drv_agg_txb *pSendList)
{
struct ieee80211_device *ieee = netdev_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
@@ -1398,11 +1262,10 @@ u8 DrvAggr_GetAggregatibleList(struct net_device *dev, struct sk_buff *skb,
do {
pSendList->tx_agg_frames[pSendList->nr_drv_agg_frames++] = skb;
- if(pSendList->nr_drv_agg_frames >= nMaxAggrNum) {
+ if (pSendList->nr_drv_agg_frames >= nMaxAggrNum)
break;
- }
- } while((skb = skb_dequeue(&ieee->skb_drv_aggQ[QueueID])));
+ } while ((skb = skb_dequeue(&ieee->skb_drv_aggQ[QueueID])));
RT_TRACE(COMP_AMSDU, "DrvAggr_GetAggregatibleList, nAggrTcbNum = %d \n", pSendList->nr_drv_agg_frames);
return pSendList->nr_drv_agg_frames;
@@ -1411,105 +1274,86 @@ u8 DrvAggr_GetAggregatibleList(struct net_device *dev, struct sk_buff *skb,
static void rtl8192_tx_isr(struct urb *tx_urb)
{
- struct sk_buff *skb = (struct sk_buff*)tx_urb->context;
+ struct sk_buff *skb = (struct sk_buff *)tx_urb->context;
struct net_device *dev = NULL;
struct r8192_priv *priv = NULL;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
-// bool bToSend0Byte;
-// u16 BufLen = skb->len;
- memcpy(&dev,(struct net_device*)(skb->cb),sizeof(struct net_device*));
+ memcpy(&dev, (struct net_device *)(skb->cb), sizeof(struct net_device *));
priv = ieee80211_priv(dev);
- if(tcb_desc->queue_index != TXCMD_QUEUE) {
- if(tx_urb->status == 0) {
+ if (tcb_desc->queue_index != TXCMD_QUEUE) {
+ if (tx_urb->status == 0) {
dev->trans_start = jiffies;
- // Act as station mode, destination shall be unicast address.
- //priv->ieee80211->stats.tx_bytes+=(skb->len - priv->ieee80211->tx_headroom);
- //priv->ieee80211->stats.tx_packets++;
priv->stats.txoktotal++;
priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
priv->stats.txbytesunicast += (skb->len - priv->ieee80211->tx_headroom);
} else {
priv->ieee80211->stats.tx_errors++;
- //priv->stats.txmanageerr++;
/* TODO */
}
}
/* free skb and tx_urb */
- if(skb != NULL) {
+ if (skb != NULL) {
dev_kfree_skb_any(skb);
usb_free_urb(tx_urb);
atomic_dec(&priv->tx_pending[queue_index]);
}
- {
- //
- // Handle HW Beacon:
- // We had transfer our beacon frame to host controller at this moment.
- //
- //
- // Caution:
- // Handling the wait queue of command packets.
- // For Tx command packets, we must not do TCB fragment because it is not handled right now.
- // We must cut the packets to match the size of TX_CMD_PKT before we send it.
- //
+ //
+ // Handle HW Beacon:
+ // We had transfer our beacon frame to host controller at this moment.
+ //
+ //
+ // Caution:
+ // Handling the wait queue of command packets.
+ // For Tx command packets, we must not do TCB fragment because it is not handled right now.
+ // We must cut the packets to match the size of TX_CMD_PKT before we send it.
+ //
- /* Handle MPDU in wait queue. */
- if(queue_index != BEACON_QUEUE) {
- /* Don't send data frame during scanning.*/
- if((skb_queue_len(&priv->ieee80211->skb_waitQ[queue_index]) != 0)&&\
- (!(priv->ieee80211->queue_stop))) {
- if(NULL != (skb = skb_dequeue(&(priv->ieee80211->skb_waitQ[queue_index]))))
- priv->ieee80211->softmac_hard_start_xmit(skb, dev);
+ /* Handle MPDU in wait queue. */
+ if (queue_index != BEACON_QUEUE) {
+ /* Don't send data frame during scanning.*/
+ if ((skb_queue_len(&priv->ieee80211->skb_waitQ[queue_index]) != 0) &&
+ (!(priv->ieee80211->queue_stop))) {
+ if (NULL != (skb = skb_dequeue(&(priv->ieee80211->skb_waitQ[queue_index]))))
+ priv->ieee80211->softmac_hard_start_xmit(skb, dev);
- return; //modified by david to avoid further processing AMSDU
- }
+ return; //modified by david to avoid further processing AMSDU
+ }
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- else if ((skb_queue_len(&priv->ieee80211->skb_drv_aggQ[queue_index])!= 0)&&\
- (!(priv->ieee80211->queue_stop))) {
- // Tx Driver Aggregation process
- /* The driver will aggregation the packets according to the following stats
- * 1. check whether there's tx irq available, for it's a completion return
- * function, it should contain enough tx irq;
- * 2. check packet type;
- * 3. initialize sendlist, check whether the to-be send packet no greater than 1
- * 4. aggregates the packets, and fill firmware info and tx desc into it, etc.
- * 5. check whether the packet could be sent, otherwise just insert into wait head
- * */
- skb = skb_dequeue(&priv->ieee80211->skb_drv_aggQ[queue_index]);
- if(!check_nic_enough_desc(dev, queue_index)) {
- skb_queue_head(&(priv->ieee80211->skb_drv_aggQ[queue_index]), skb);
- return;
- }
+ else if ((skb_queue_len(&priv->ieee80211->skb_drv_aggQ[queue_index]) != 0) &&
+ (!(priv->ieee80211->queue_stop))) {
+ // Tx Driver Aggregation process
+ /* The driver will aggregation the packets according to the following stats
+ * 1. check whether there's tx irq available, for it's a completion return
+ * function, it should contain enough tx irq;
+ * 2. check packet type;
+ * 3. initialize sendlist, check whether the to-be send packet no greater than 1
+ * 4. aggregates the packets, and fill firmware info and tx desc into it, etc.
+ * 5. check whether the packet could be sent, otherwise just insert into wait head
+ * */
+ skb = skb_dequeue(&priv->ieee80211->skb_drv_aggQ[queue_index]);
+ if (!check_nic_enough_desc(dev, queue_index)) {
+ skb_queue_head(&(priv->ieee80211->skb_drv_aggQ[queue_index]), skb);
+ return;
+ }
+
+ /*TODO*/
+ {
+ struct ieee80211_drv_agg_txb SendList;
+
+ memset(&SendList, 0, sizeof(struct ieee80211_drv_agg_txb));
+ if (DrvAggr_GetAggregatibleList(dev, skb, &SendList) > 1) {
+ skb = DrvAggr_Aggregation(dev, &SendList);
- {
- /*TODO*/
- /*
- u8* pHeader = skb->data;
-
- if(IsMgntQosData(pHeader) ||
- IsMgntQData_Ack(pHeader) ||
- IsMgntQData_Poll(pHeader) ||
- IsMgntQData_Poll_Ack(pHeader)
- )
- */
- {
- struct ieee80211_drv_agg_txb SendList;
-
- memset(&SendList, 0, sizeof(struct ieee80211_drv_agg_txb));
- if(DrvAggr_GetAggregatibleList(dev, skb, &SendList) > 1) {
- skb = DrvAggr_Aggregation(dev, &SendList);
-
- }
- }
- priv->ieee80211->softmac_hard_start_xmit(skb, dev);
}
}
-#endif
+ priv->ieee80211->softmac_hard_start_xmit(skb, dev);
}
+#endif
}
}
@@ -1519,72 +1363,67 @@ void rtl8192_beacon_stop(struct net_device *dev)
u8 msr, msrm, msr2;
struct r8192_priv *priv = ieee80211_priv(dev);
- msr = read_nic_byte(dev, MSR);
+ read_nic_byte(dev, MSR, &msr);
msrm = msr & MSR_LINK_MASK;
msr2 = msr & ~MSR_LINK_MASK;
- if(NIC_8192U == priv->card_8192) {
+ if (NIC_8192U == priv->card_8192)
usb_kill_urb(priv->rx_urb[MAX_RX_URB]);
- }
if ((msrm == (MSR_LINK_ADHOC<<MSR_LINK_SHIFT) ||
- (msrm == (MSR_LINK_MASTER<<MSR_LINK_SHIFT)))){
+ (msrm == (MSR_LINK_MASTER<<MSR_LINK_SHIFT)))) {
write_nic_byte(dev, MSR, msr2 | MSR_LINK_NONE);
write_nic_byte(dev, MSR, msr);
}
}
-void rtl8192_config_rate(struct net_device* dev, u16* rate_config)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_network *net;
- u8 i=0, basic_rate = 0;
- net = & priv->ieee80211->current_network;
-
- for (i=0; i<net->rates_len; i++)
- {
- basic_rate = net->rates[i]&0x7f;
- switch(basic_rate)
- {
- case MGN_1M: *rate_config |= RRSR_1M; break;
- case MGN_2M: *rate_config |= RRSR_2M; break;
- case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
- case MGN_11M: *rate_config |= RRSR_11M; break;
- case MGN_6M: *rate_config |= RRSR_6M; break;
- case MGN_9M: *rate_config |= RRSR_9M; break;
- case MGN_12M: *rate_config |= RRSR_12M; break;
- case MGN_18M: *rate_config |= RRSR_18M; break;
- case MGN_24M: *rate_config |= RRSR_24M; break;
- case MGN_36M: *rate_config |= RRSR_36M; break;
- case MGN_48M: *rate_config |= RRSR_48M; break;
- case MGN_54M: *rate_config |= RRSR_54M; break;
- }
- }
- for (i=0; i<net->rates_ex_len; i++)
- {
- basic_rate = net->rates_ex[i]&0x7f;
- switch(basic_rate)
- {
- case MGN_1M: *rate_config |= RRSR_1M; break;
- case MGN_2M: *rate_config |= RRSR_2M; break;
- case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
- case MGN_11M: *rate_config |= RRSR_11M; break;
- case MGN_6M: *rate_config |= RRSR_6M; break;
- case MGN_9M: *rate_config |= RRSR_9M; break;
- case MGN_12M: *rate_config |= RRSR_12M; break;
- case MGN_18M: *rate_config |= RRSR_18M; break;
- case MGN_24M: *rate_config |= RRSR_24M; break;
- case MGN_36M: *rate_config |= RRSR_36M; break;
- case MGN_48M: *rate_config |= RRSR_48M; break;
- case MGN_54M: *rate_config |= RRSR_54M; break;
- }
- }
+void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct ieee80211_network *net;
+ u8 i = 0, basic_rate = 0;
+ net = &priv->ieee80211->current_network;
+
+ for (i = 0; i < net->rates_len; i++) {
+ basic_rate = net->rates[i]&0x7f;
+ switch (basic_rate) {
+ case MGN_1M: *rate_config |= RRSR_1M; break;
+ case MGN_2M: *rate_config |= RRSR_2M; break;
+ case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
+ case MGN_11M: *rate_config |= RRSR_11M; break;
+ case MGN_6M: *rate_config |= RRSR_6M; break;
+ case MGN_9M: *rate_config |= RRSR_9M; break;
+ case MGN_12M: *rate_config |= RRSR_12M; break;
+ case MGN_18M: *rate_config |= RRSR_18M; break;
+ case MGN_24M: *rate_config |= RRSR_24M; break;
+ case MGN_36M: *rate_config |= RRSR_36M; break;
+ case MGN_48M: *rate_config |= RRSR_48M; break;
+ case MGN_54M: *rate_config |= RRSR_54M; break;
+ }
+ }
+ for (i = 0; i < net->rates_ex_len; i++) {
+ basic_rate = net->rates_ex[i]&0x7f;
+ switch (basic_rate) {
+ case MGN_1M: *rate_config |= RRSR_1M; break;
+ case MGN_2M: *rate_config |= RRSR_2M; break;
+ case MGN_5_5M: *rate_config |= RRSR_5_5M; break;
+ case MGN_11M: *rate_config |= RRSR_11M; break;
+ case MGN_6M: *rate_config |= RRSR_6M; break;
+ case MGN_9M: *rate_config |= RRSR_9M; break;
+ case MGN_12M: *rate_config |= RRSR_12M; break;
+ case MGN_18M: *rate_config |= RRSR_18M; break;
+ case MGN_24M: *rate_config |= RRSR_24M; break;
+ case MGN_36M: *rate_config |= RRSR_36M; break;
+ case MGN_48M: *rate_config |= RRSR_48M; break;
+ case MGN_54M: *rate_config |= RRSR_54M; break;
+ }
+ }
}
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
-void rtl8192_update_cap(struct net_device* dev, u16 cap)
+void rtl8192_update_cap(struct net_device *dev, u16 cap)
{
u32 tmp = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1595,13 +1434,10 @@ void rtl8192_update_cap(struct net_device* dev, u16 cap)
tmp |= BRSR_AckShortPmb;
write_nic_dword(dev, RRSR, tmp);
- if (net->mode & (IEEE_G|IEEE_N_24G))
- {
+ if (net->mode & (IEEE_G|IEEE_N_24G)) {
u8 slot_time = 0;
- if ((cap & WLAN_CAPABILITY_SHORT_SLOT)&&(!priv->ieee80211->pHTInfo->bCurrentRT2RTLongSlotTime))
- {//short slot time
+ if ((cap & WLAN_CAPABILITY_SHORT_SLOT) && (!priv->ieee80211->pHTInfo->bCurrentRT2RTLongSlotTime)) //short slot time
slot_time = SHORT_SLOT_TIME;
- }
else //long slot time
slot_time = NON_SHORT_SLOT_TIME;
priv->slot_time = slot_time;
@@ -1616,31 +1452,26 @@ void rtl8192_net_update(struct net_device *dev)
struct ieee80211_network *net;
u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
u16 rate_config = 0;
- net = & priv->ieee80211->current_network;
+ net = &priv->ieee80211->current_network;
rtl8192_config_rate(dev, &rate_config);
priv->basic_rate = rate_config &= 0x15f;
- write_nic_dword(dev,BSSIDR,((u32*)net->bssid)[0]);
- write_nic_word(dev,BSSIDR+4,((u16*)net->bssid)[2]);
- //for(i=0;i<ETH_ALEN;i++)
- // write_nic_byte(dev,BSSID+i,net->bssid[i]);
+ write_nic_dword(dev, BSSIDR, ((u32 *)net->bssid)[0]);
+ write_nic_word(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
rtl8192_update_msr(dev);
-// rtl8192_update_cap(dev, net->capability);
- if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- {
- write_nic_word(dev, ATIMWND, 2);
- write_nic_word(dev, BCN_DMATIME, 1023);
- write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
-// write_nic_word(dev, BcnIntTime, 100);
- write_nic_word(dev, BCN_DRV_EARLY_INT, 1);
- write_nic_byte(dev, BCN_ERR_THRESH, 100);
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) {
+ write_nic_word(dev, ATIMWND, 2);
+ write_nic_word(dev, BCN_DMATIME, 1023);
+ write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
+ write_nic_word(dev, BCN_DRV_EARLY_INT, 1);
+ write_nic_byte(dev, BCN_ERR_THRESH, 100);
BcnTimeCfg |= (BcnCW<<BCN_TCFG_CW_SHIFT);
- // TODO: BcnIFS may required to be changed on ASIC
+ // TODO: BcnIFS may required to be changed on ASIC
BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
- write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
+ write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
}
@@ -1649,46 +1480,37 @@ void rtl8192_net_update(struct net_device *dev)
//temporary hw beacon is not used any more.
//open it when necessary
-void rtl819xusb_beacon_tx(struct net_device *dev,u16 tx_rate)
+void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
{
}
inline u8 rtl8192_IsWirelessBMode(u16 rate)
{
- if( ((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220) )
+ if (((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220))
return 1;
else return 0;
}
u16 N_DBPSOfRate(u16 DataRate);
-u16 ComputeTxTime(
- u16 FrameLength,
- u16 DataRate,
- u8 bManagementFrame,
- u8 bShortPreamble
-)
+u16 ComputeTxTime(u16 FrameLength, u16 DataRate, u8 bManagementFrame,
+ u8 bShortPreamble)
{
u16 FrameTime;
u16 N_DBPS;
u16 Ceiling;
- if( rtl8192_IsWirelessBMode(DataRate) )
- {
- if( bManagementFrame || !bShortPreamble || DataRate == 10 )
- { // long preamble
+ if (rtl8192_IsWirelessBMode(DataRate)) {
+ if (bManagementFrame || !bShortPreamble || DataRate == 10) // long preamble
FrameTime = (u16)(144+48+(FrameLength*8/(DataRate/10)));
- }
- else
- { // Short preamble
+ else // Short preamble
FrameTime = (u16)(72+24+(FrameLength*8/(DataRate/10)));
- }
- if( ( FrameLength*8 % (DataRate/10) ) != 0 ) //Get the Ceilling
- FrameTime ++;
+ if ((FrameLength*8 % (DataRate/10)) != 0) //Get the Ceilling
+ FrameTime++;
} else { //802.11g DSSS-OFDM PLCP length field calculation.
N_DBPS = N_DBPSOfRate(DataRate);
Ceiling = (16 + 8*FrameLength + 6) / N_DBPS
- + (((16 + 8*FrameLength + 6) % N_DBPS) ? 1 : 0);
+ + (((16 + 8*FrameLength + 6) % N_DBPS) ? 1 : 0);
FrameTime = (u16)(16 + 4 + 4*Ceiling + 6);
}
return FrameTime;
@@ -1696,47 +1518,46 @@ u16 ComputeTxTime(
u16 N_DBPSOfRate(u16 DataRate)
{
- u16 N_DBPS = 24;
+ u16 N_DBPS = 24;
- switch(DataRate)
- {
- case 60:
- N_DBPS = 24;
- break;
+ switch (DataRate) {
+ case 60:
+ N_DBPS = 24;
+ break;
- case 90:
- N_DBPS = 36;
- break;
+ case 90:
+ N_DBPS = 36;
+ break;
- case 120:
- N_DBPS = 48;
- break;
+ case 120:
+ N_DBPS = 48;
+ break;
- case 180:
- N_DBPS = 72;
- break;
+ case 180:
+ N_DBPS = 72;
+ break;
- case 240:
- N_DBPS = 96;
- break;
+ case 240:
+ N_DBPS = 96;
+ break;
- case 360:
- N_DBPS = 144;
- break;
+ case 360:
+ N_DBPS = 144;
+ break;
- case 480:
- N_DBPS = 192;
- break;
+ case 480:
+ N_DBPS = 192;
+ break;
- case 540:
- N_DBPS = 216;
- break;
+ case 540:
+ N_DBPS = 216;
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
- return N_DBPS;
+ return N_DBPS;
}
void rtl819xU_cmd_isr(struct urb *tx_cmd_urb, struct pt_regs *regs)
@@ -1744,11 +1565,10 @@ void rtl819xU_cmd_isr(struct urb *tx_cmd_urb, struct pt_regs *regs)
usb_free_urb(tx_cmd_urb);
}
-unsigned int txqueue2outpipe(struct r8192_priv* priv,unsigned int tx_queue) {
-
- if(tx_queue >= 9)
- {
- RT_TRACE(COMP_ERR,"%s():Unknown queue ID!!!\n",__FUNCTION__);
+unsigned int txqueue2outpipe(struct r8192_priv *priv, unsigned int tx_queue)
+{
+ if (tx_queue >= 9) {
+ RT_TRACE(COMP_ERR, "%s():Unknown queue ID!!!\n", __func__);
return 0x04;
}
return priv->txqueue_to_outpipemap[tx_queue];
@@ -1757,19 +1577,16 @@ unsigned int txqueue2outpipe(struct r8192_priv* priv,unsigned int tx_queue) {
short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //u8 *tx;
int status;
struct urb *tx_urb;
- //int urb_buf_len;
unsigned int idx_pipe;
tx_desc_cmd_819x_usb *pdesc = (tx_desc_cmd_819x_usb *)skb->data;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
- //printk("\n %s::queue_index = %d\n",__FUNCTION__, queue_index);
atomic_inc(&priv->tx_pending[queue_index]);
- tx_urb = usb_alloc_urb(0,GFP_ATOMIC);
- if(!tx_urb){
+ tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!tx_urb) {
dev_kfree_skb(skb);
return -ENOMEM;
}
@@ -1788,27 +1605,26 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
//----------------------------------------------------------------------------
// Get index to out pipe from specified QueueID.
#ifndef USE_ONE_PIPE
- idx_pipe = txqueue2outpipe(priv,queue_index);
+ idx_pipe = txqueue2outpipe(priv, queue_index);
#else
idx_pipe = 0x04;
#endif
#ifdef JOHN_DUMP_TXDESC
int i;
- printk("<Tx descriptor>--rate %x---",rate);
+ printk("<Tx descriptor>--rate %x---", rate);
for (i = 0; i < 8; i++)
printk("%8x ", tx[i]);
printk("\n");
#endif
- usb_fill_bulk_urb(tx_urb,priv->udev, usb_sndbulkpipe(priv->udev,idx_pipe), \
- skb->data, skb->len, rtl8192_tx_isr, skb);
+ usb_fill_bulk_urb(tx_urb, priv->udev, usb_sndbulkpipe(priv->udev, idx_pipe),
+ skb->data, skb->len, rtl8192_tx_isr, skb);
status = usb_submit_urb(tx_urb, GFP_ATOMIC);
- if (!status){
+ if (!status) {
return 0;
- }else{
- DMESGE("Error TX CMD URB, error %d",
- status);
+ } else {
+ DMESGE("Error TX CMD URB, error %d", status);
return -1;
}
}
@@ -1824,21 +1640,21 @@ u8 MapHwQueueToFirmwareQueue(u8 QueueID)
{
u8 QueueSelect = 0x0; //defualt set to
- switch(QueueID) {
+ switch (QueueID) {
case BE_QUEUE:
- QueueSelect = QSLT_BE; //or QSelect = pTcb->priority;
+ QueueSelect = QSLT_BE;
break;
case BK_QUEUE:
- QueueSelect = QSLT_BK; //or QSelect = pTcb->priority;
+ QueueSelect = QSLT_BK;
break;
case VO_QUEUE:
- QueueSelect = QSLT_VO; //or QSelect = pTcb->priority;
+ QueueSelect = QSLT_VO;
break;
case VI_QUEUE:
- QueueSelect = QSLT_VI; //or QSelect = pTcb->priority;
+ QueueSelect = QSLT_VI;
break;
case MGNT_QUEUE:
QueueSelect = QSLT_MGNT;
@@ -1850,11 +1666,9 @@ u8 MapHwQueueToFirmwareQueue(u8 QueueID)
// TODO: 2006.10.30 mark other queue selection until we verify it is OK
// TODO: Remove Assertions
-//#if (RTL819X_FPGA_VER & RTL819X_FPGA_GUANGAN_070502)
case TXCMD_QUEUE:
QueueSelect = QSLT_CMD;
break;
-//#endif
case HIGH_QUEUE:
QueueSelect = QSLT_HIGH;
break;
@@ -1870,7 +1684,7 @@ u8 MRateToHwRate8190Pci(u8 rate)
{
u8 ret = DESC90_RATE1M;
- switch(rate) {
+ switch (rate) {
case MGN_1M: ret = DESC90_RATE1M; break;
case MGN_2M: ret = DESC90_RATE2M; break;
case MGN_5_5M: ret = DESC90_RATE5_5M; break;
@@ -1913,9 +1727,9 @@ u8 QueryIsShort(u8 TxHT, u8 TxRate, cb_desc *tcb_desc)
{
u8 tmp_Short;
- tmp_Short = (TxHT==1)?((tcb_desc->bUseShortGI)?1:0):((tcb_desc->bUseShortPreamble)?1:0);
+ tmp_Short = (TxHT == 1) ? ((tcb_desc->bUseShortGI) ? 1 : 0) : ((tcb_desc->bUseShortPreamble) ? 1 : 0);
- if(TxHT==1 && TxRate != DESC90_RATEMCS15)
+ if (TxHT == 1 && TxRate != DESC90_RATEMCS15)
tmp_Short = 0;
return tmp_Short;
@@ -1931,7 +1745,7 @@ static void tx_zero_isr(struct urb *tx_urb)
* skb->cb will contain all the following information,
* priority, morefrag, rate, &dev.
* */
-short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
+short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
@@ -1941,35 +1755,32 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
int pend;
int status;
struct urb *tx_urb = NULL, *tx_urb_zero = NULL;
- //int urb_len;
unsigned int idx_pipe;
-// RT_DEBUG_DATA(COMP_SEND, tcb_desc, sizeof(cb_desc));
-// printk("=============> %s\n", __FUNCTION__);
pend = atomic_read(&priv->tx_pending[tcb_desc->queue_index]);
/* we are locked here so the two atomic_read and inc are executed
* without interleaves
* !!! For debug purpose
*/
- if( pend > MAX_TX_URB){
- printk("To discard skb packet!\n");
+ if (pend > MAX_TX_URB) {
+ netdev_dbg(dev, "To discard skb packet!\n");
dev_kfree_skb_any(skb);
return -1;
}
- tx_urb = usb_alloc_urb(0,GFP_ATOMIC);
- if(!tx_urb){
+ tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!tx_urb) {
dev_kfree_skb_any(skb);
return -ENOMEM;
}
/* Fill Tx firmware info */
- memset(tx_fwinfo,0,sizeof(tx_fwinfo_819x_usb));
+ memset(tx_fwinfo, 0, sizeof(tx_fwinfo_819x_usb));
/* DWORD 0 */
- tx_fwinfo->TxHT = (tcb_desc->data_rate&0x80)?1:0;
+ tx_fwinfo->TxHT = (tcb_desc->data_rate&0x80) ? 1 : 0;
tx_fwinfo->TxRate = MRateToHwRate8190Pci(tcb_desc->data_rate);
tx_fwinfo->EnableCPUDur = tcb_desc->bTxEnableFwCalcDur;
tx_fwinfo->Short = QueryIsShort(tx_fwinfo->TxHT, tx_fwinfo->TxRate, tcb_desc);
- if(tcb_desc->bAMPDUEnable) {//AMPDU enabled
+ if (tcb_desc->bAMPDUEnable) {//AMPDU enabled
tx_fwinfo->AllowAggregation = 1;
/* DWORD 1 */
tx_fwinfo->RxMF = tcb_desc->ampdu_factor;
@@ -1982,20 +1793,19 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
}
/* Protection mode related */
- tx_fwinfo->RtsEnable = (tcb_desc->bRTSEnable)?1:0;
- tx_fwinfo->CtsEnable = (tcb_desc->bCTSEnable)?1:0;
- tx_fwinfo->RtsSTBC = (tcb_desc->bRTSSTBC)?1:0;
- tx_fwinfo->RtsHT = (tcb_desc->rts_rate&0x80)?1:0;
+ tx_fwinfo->RtsEnable = (tcb_desc->bRTSEnable) ? 1 : 0;
+ tx_fwinfo->CtsEnable = (tcb_desc->bCTSEnable) ? 1 : 0;
+ tx_fwinfo->RtsSTBC = (tcb_desc->bRTSSTBC) ? 1 : 0;
+ tx_fwinfo->RtsHT = (tcb_desc->rts_rate&0x80) ? 1 : 0;
tx_fwinfo->RtsRate = MRateToHwRate8190Pci((u8)tcb_desc->rts_rate);
- tx_fwinfo->RtsSubcarrier = (tx_fwinfo->RtsHT==0)?(tcb_desc->RTSSC):0;
- tx_fwinfo->RtsBandwidth = (tx_fwinfo->RtsHT==1)?((tcb_desc->bRTSBW)?1:0):0;
- tx_fwinfo->RtsShort = (tx_fwinfo->RtsHT==0)?(tcb_desc->bRTSUseShortPreamble?1:0):\
- (tcb_desc->bRTSUseShortGI?1:0);
+ tx_fwinfo->RtsSubcarrier = (tx_fwinfo->RtsHT == 0) ? (tcb_desc->RTSSC) : 0;
+ tx_fwinfo->RtsBandwidth = (tx_fwinfo->RtsHT == 1) ? ((tcb_desc->bRTSBW) ? 1 : 0) : 0;
+ tx_fwinfo->RtsShort = (tx_fwinfo->RtsHT == 0) ? (tcb_desc->bRTSUseShortPreamble ? 1 : 0) :
+ (tcb_desc->bRTSUseShortGI ? 1 : 0);
/* Set Bandwidth and sub-channel settings. */
- if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40)
- {
- if(tcb_desc->bPacketBW) {
+ if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40) {
+ if (tcb_desc->bPacketBW) {
tx_fwinfo->TxBandwidth = 1;
tx_fwinfo->TxSubCarrier = 0; //By SD3's Jerry suggestion, use duplicated mode
} else {
@@ -2009,9 +1819,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
if (tcb_desc->drv_agg_enable)
- {
tx_fwinfo->Tx_INFO_RSVD = (tcb_desc->DrvAggrNum & 0x1f) << 1;
- }
#endif
/* Fill Tx descriptor */
memset(tx_desc, 0, sizeof(tx_desc_819x_usb));
@@ -2021,45 +1829,40 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
tx_desc->Offset = sizeof(tx_fwinfo_819x_usb) + 8;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- if (tcb_desc->drv_agg_enable) {
+ if (tcb_desc->drv_agg_enable)
tx_desc->PktSize = tcb_desc->pkt_size;
- } else
+ else
#endif
{
tx_desc->PktSize = (skb->len - TX_PACKET_SHIFT_BYTES) & 0xffff;
}
/*DWORD 1*/
- tx_desc->SecCAMID= 0;
+ tx_desc->SecCAMID = 0;
tx_desc->RATid = tcb_desc->RATRIndex;
- {
- //MPDUOverhead = 0;
- tx_desc->NoEnc = 1;
- }
+ tx_desc->NoEnc = 1;
tx_desc->SecType = 0x0;
- if (tcb_desc->bHwSec)
- {
- switch (priv->ieee80211->pairwise_key_type)
- {
- case KEY_TYPE_WEP40:
- case KEY_TYPE_WEP104:
- tx_desc->SecType = 0x1;
- tx_desc->NoEnc = 0;
- break;
- case KEY_TYPE_TKIP:
- tx_desc->SecType = 0x2;
- tx_desc->NoEnc = 0;
- break;
- case KEY_TYPE_CCMP:
- tx_desc->SecType = 0x3;
- tx_desc->NoEnc = 0;
- break;
- case KEY_TYPE_NA:
- tx_desc->SecType = 0x0;
- tx_desc->NoEnc = 1;
- break;
- }
- }
+ if (tcb_desc->bHwSec) {
+ switch (priv->ieee80211->pairwise_key_type) {
+ case KEY_TYPE_WEP40:
+ case KEY_TYPE_WEP104:
+ tx_desc->SecType = 0x1;
+ tx_desc->NoEnc = 0;
+ break;
+ case KEY_TYPE_TKIP:
+ tx_desc->SecType = 0x2;
+ tx_desc->NoEnc = 0;
+ break;
+ case KEY_TYPE_CCMP:
+ tx_desc->SecType = 0x3;
+ tx_desc->NoEnc = 0;
+ break;
+ case KEY_TYPE_NA:
+ tx_desc->SecType = 0x0;
+ tx_desc->NoEnc = 1;
+ break;
+ }
+ }
tx_desc->QueueSelect = MapHwQueueToFirmwareQueue(tcb_desc->queue_index);
tx_desc->TxFWInfoSize = sizeof(tx_fwinfo_819x_usb);
@@ -2084,48 +1887,41 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
}
/* Get index to out pipe from specified QueueID */
#ifndef USE_ONE_PIPE
- idx_pipe = txqueue2outpipe(priv,tcb_desc->queue_index);
+ idx_pipe = txqueue2outpipe(priv, tcb_desc->queue_index);
#else
idx_pipe = 0x5;
#endif
- //RT_DEBUG_DATA(COMP_SEND,tx_fwinfo,sizeof(tx_fwinfo_819x_usb));
- //RT_DEBUG_DATA(COMP_SEND,tx_desc,sizeof(tx_desc_819x_usb));
-
/* To submit bulk urb */
- usb_fill_bulk_urb(tx_urb,udev,
- usb_sndbulkpipe(udev,idx_pipe), skb->data,
- skb->len, rtl8192_tx_isr, skb);
+ usb_fill_bulk_urb(tx_urb, udev,
+ usb_sndbulkpipe(udev, idx_pipe), skb->data,
+ skb->len, rtl8192_tx_isr, skb);
status = usb_submit_urb(tx_urb, GFP_ATOMIC);
- if (!status){
-//we need to send 0 byte packet whenever 512N bytes/64N(HIGN SPEED/NORMAL SPEED) bytes packet has been transmitted. Otherwise, it will be halt to wait for another packet. WB. 2008.08.27
+ if (!status) {
+ //we need to send 0 byte packet whenever 512N bytes/64N(HIGN SPEED/NORMAL SPEED) bytes packet has been transmitted. Otherwise, it will be halt to wait for another packet. WB. 2008.08.27
bool bSend0Byte = false;
u8 zero = 0;
- if(udev->speed == USB_SPEED_HIGH)
- {
+ if (udev->speed == USB_SPEED_HIGH) {
if (skb->len > 0 && skb->len % 512 == 0)
bSend0Byte = true;
- }
- else
- {
+ } else {
if (skb->len > 0 && skb->len % 64 == 0)
bSend0Byte = true;
}
- if (bSend0Byte)
- {
- tx_urb_zero = usb_alloc_urb(0,GFP_ATOMIC);
- if(!tx_urb_zero){
+ if (bSend0Byte) {
+ tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!tx_urb_zero) {
RT_TRACE(COMP_ERR, "can't alloc urb for zero byte\n");
return -ENOMEM;
}
- usb_fill_bulk_urb(tx_urb_zero,udev,
- usb_sndbulkpipe(udev,idx_pipe), &zero,
- 0, tx_zero_isr, dev);
+ usb_fill_bulk_urb(tx_urb_zero, udev,
+ usb_sndbulkpipe(udev, idx_pipe), &zero,
+ 0, tx_zero_isr, dev);
status = usb_submit_urb(tx_urb_zero, GFP_ATOMIC);
- if (status){
- RT_TRACE(COMP_ERR, "Error TX URB for zero byte %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]), status);
- return -1;
+ if (status) {
+ RT_TRACE(COMP_ERR, "Error TX URB for zero byte %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]), status);
+ return -1;
}
}
dev->trans_start = jiffies;
@@ -2133,7 +1929,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff* skb)
return 0;
} else {
RT_TRACE(COMP_ERR, "Error TX URB %d, error %d", atomic_read(&priv->tx_pending[tcb_desc->queue_index]),
- status);
+ status);
return -1;
}
}
@@ -2143,14 +1939,14 @@ short rtl8192_usb_initendpoints(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
priv->rx_urb = kmalloc(sizeof(struct urb *) * (MAX_RX_URB+1),
- GFP_KERNEL);
+ GFP_KERNEL);
if (priv->rx_urb == NULL)
return -ENOMEM;
#ifndef JACKSON_NEW_RX
- for(i=0;i<(MAX_RX_URB+1);i++){
+ for (i = 0; i < (MAX_RX_URB+1); i++) {
- priv->rx_urb[i] = usb_alloc_urb(0,GFP_KERNEL);
+ priv->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
priv->rx_urb[i]->transfer_buffer = kmalloc(RX_URB_SIZE, GFP_KERNEL);
@@ -2159,26 +1955,26 @@ short rtl8192_usb_initendpoints(struct net_device *dev)
#endif
#ifdef THOMAS_BEACON
-{
- long align = 0;
- void *oldaddr, *newaddr;
-
- priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
- priv->oldaddr = kmalloc(16, GFP_KERNEL);
- oldaddr = priv->oldaddr;
- align = ((long)oldaddr) & 3;
- if (align) {
- newaddr = oldaddr + 4 - align;
- priv->rx_urb[16]->transfer_buffer_length = 16 - 4 + align;
- } else {
- newaddr = oldaddr;
- priv->rx_urb[16]->transfer_buffer_length = 16;
+ {
+ long align = 0;
+ void *oldaddr, *newaddr;
+
+ priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
+ priv->oldaddr = kmalloc(16, GFP_KERNEL);
+ oldaddr = priv->oldaddr;
+ align = ((long)oldaddr) & 3;
+ if (align) {
+ newaddr = oldaddr + 4 - align;
+ priv->rx_urb[16]->transfer_buffer_length = 16 - 4 + align;
+ } else {
+ newaddr = oldaddr;
+ priv->rx_urb[16]->transfer_buffer_length = 16;
+ }
+ priv->rx_urb[16]->transfer_buffer = newaddr;
}
- priv->rx_urb[16]->transfer_buffer = newaddr;
-}
#endif
- memset(priv->rx_urb, 0, sizeof(struct urb*) * MAX_RX_URB);
+ memset(priv->rx_urb, 0, sizeof(struct urb *) * MAX_RX_URB);
priv->pp_rxskb = kcalloc(MAX_RX_URB, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->pp_rxskb) {
@@ -2191,7 +1987,7 @@ short rtl8192_usb_initendpoints(struct net_device *dev)
return -ENOMEM;
}
- printk("End of initendpoints\n");
+ netdev_dbg(dev, "End of initendpoints\n");
return 0;
}
@@ -2201,8 +1997,8 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
int i;
struct r8192_priv *priv = ieee80211_priv(dev);
- if(priv->rx_urb){
- for(i=0;i<(MAX_RX_URB+1);i++){
+ if (priv->rx_urb) {
+ for (i = 0; i < (MAX_RX_URB+1); i++) {
usb_kill_urb(priv->rx_urb[i]);
usb_free_urb(priv->rx_urb[i]);
}
@@ -2224,8 +2020,8 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
#ifndef JACKSON_NEW_RX
- if(priv->rx_urb){
- for(i=0;i<(MAX_RX_URB+1);i++){
+ if (priv->rx_urb) {
+ for (i = 0; i < (MAX_RX_URB+1); i++) {
usb_kill_urb(priv->rx_urb[i]);
kfree(priv->rx_urb[i]->transfer_buffer);
usb_free_urb(priv->rx_urb[i]);
@@ -2249,54 +2045,45 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
}
#endif
-extern void rtl8192_update_ratr_table(struct net_device* dev);
+extern void rtl8192_update_ratr_table(struct net_device *dev);
void rtl8192_link_change(struct net_device *dev)
{
-// int i;
-
struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
- //write_nic_word(dev, BCN_INTR_ITV, net->beacon_interval);
- if (ieee->state == IEEE80211_LINKED)
- {
+ struct ieee80211_device *ieee = priv->ieee80211;
+ if (ieee->state == IEEE80211_LINKED) {
rtl8192_net_update(dev);
rtl8192_update_ratr_table(dev);
//add this as in pure N mode, wep encryption will use software way, but there is no chance to set this as wep will not set group key in wext. WB.2008.07.08
if ((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type))
- EnableHWSecurityConfig8192(dev);
+ EnableHWSecurityConfig8192(dev);
}
/*update timing params*/
-// RT_TRACE(COMP_CH, "========>%s(), chan:%d\n", __FUNCTION__, priv->chan);
-// rtl8192_set_chan(dev, priv->chan);
- if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
- {
+ if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
u32 reg = 0;
- reg = read_nic_dword(dev, RCR);
+ read_nic_dword(dev, RCR, &reg);
if (priv->ieee80211->state == IEEE80211_LINKED)
priv->ReceiveConfig = reg |= RCR_CBSSID;
else
priv->ReceiveConfig = reg &= ~RCR_CBSSID;
write_nic_dword(dev, RCR, reg);
}
-
-// rtl8192_set_rxconf(dev);
}
static struct ieee80211_qos_parameters def_qos_parameters = {
- {3,3,3,3},/* cw_min */
- {7,7,7,7},/* cw_max */
- {2,2,2,2},/* aifs */
- {0,0,0,0},/* flags */
- {0,0,0,0} /* tx_op_limit */
+ {3, 3, 3, 3},/* cw_min */
+ {7, 7, 7, 7},/* cw_max */
+ {2, 2, 2, 2},/* aifs */
+ {0, 0, 0, 0},/* flags */
+ {0, 0, 0, 0} /* tx_op_limit */
};
-void rtl8192_update_beacon(struct work_struct * work)
+void rtl8192_update_beacon(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, update_beacon_wq.work);
struct net_device *dev = priv->ieee80211->dev;
- struct ieee80211_device* ieee = priv->ieee80211;
- struct ieee80211_network* net = &ieee->current_network;
+ struct ieee80211_device *ieee = priv->ieee80211;
+ struct ieee80211_network *net = &ieee->current_network;
if (ieee->pHTInfo->bCurrentHTSupport)
HTUpdateSelfAndPeerSetting(ieee, net);
@@ -2306,14 +2093,13 @@ void rtl8192_update_beacon(struct work_struct * work)
/*
* background support to run QoS activate functionality
*/
-int WDCAPARA_ADD[] = {EDCAPARA_BE,EDCAPARA_BK,EDCAPARA_VI,EDCAPARA_VO};
-void rtl8192_qos_activate(struct work_struct * work)
+int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI, EDCAPARA_VO};
+void rtl8192_qos_activate(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, qos_activate);
struct net_device *dev = priv->ieee80211->dev;
struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters;
u8 mode = priv->ieee80211->current_network.mode;
- //u32 size = sizeof(struct ieee80211_qos_parameters);
u8 u1bAIFS;
u32 u4bAcParam;
int i;
@@ -2321,37 +2107,36 @@ void rtl8192_qos_activate(struct work_struct * work)
if (priv == NULL)
return;
- mutex_lock(&priv->mutex);
- if(priv->ieee80211->state != IEEE80211_LINKED)
+ mutex_lock(&priv->mutex);
+ if (priv->ieee80211->state != IEEE80211_LINKED)
goto success;
- RT_TRACE(COMP_QOS,"qos active process with associate response received\n");
+ RT_TRACE(COMP_QOS, "qos active process with associate response received\n");
/* It better set slot time at first */
/* For we just support b/g mode at present, let the slot time at 9/20 selection */
/* update the ac parameter to related registers */
- for(i = 0; i < QOS_QUEUE_NUM; i++) {
+ for (i = 0; i < QOS_QUEUE_NUM; i++) {
//Mode G/A: slotTimeTimer = 9; Mode B: 20
- u1bAIFS = qos_parameters->aifs[i] * ((mode&(IEEE_G|IEEE_N_24G)) ?9:20) + aSifsTime;
+ u1bAIFS = qos_parameters->aifs[i] * ((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[i]))<< AC_PARAM_TXOP_LIMIT_OFFSET)|
- (((u32)(qos_parameters->cw_max[i]))<< AC_PARAM_ECW_MAX_OFFSET)|
- (((u32)(qos_parameters->cw_min[i]))<< AC_PARAM_ECW_MIN_OFFSET)|
- ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
+ (((u32)(qos_parameters->cw_max[i]))<< AC_PARAM_ECW_MAX_OFFSET)|
+ (((u32)(qos_parameters->cw_min[i]))<< AC_PARAM_ECW_MIN_OFFSET)|
+ ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam);
- //write_nic_dword(dev, WDCAPARA_ADD[i], 0x005e4332);
}
success:
- mutex_unlock(&priv->mutex);
+ mutex_unlock(&priv->mutex);
}
static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
- int active_network,
- struct ieee80211_network *network)
+ int active_network,
+ struct ieee80211_network *network)
{
int ret = 0;
u32 size = sizeof(struct ieee80211_qos_parameters);
- if(priv->ieee80211->state !=IEEE80211_LINKED)
+ if (priv->ieee80211->state != IEEE80211_LINKED)
return ret;
if ((priv->ieee80211->iw_mode != IW_MODE_INFRA))
@@ -2359,21 +2144,21 @@ static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
if (network->flags & NETWORK_HAS_QOS_MASK) {
if (active_network &&
- (network->flags & NETWORK_HAS_QOS_PARAMETERS))
+ (network->flags & NETWORK_HAS_QOS_PARAMETERS))
network->qos_data.active = network->qos_data.supported;
if ((network->qos_data.active == 1) && (active_network == 1) &&
- (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
- (network->qos_data.old_param_count !=
- network->qos_data.param_count)) {
+ (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
+ (network->qos_data.old_param_count !=
+ network->qos_data.param_count)) {
network->qos_data.old_param_count =
network->qos_data.param_count;
queue_work(priv->priv_wq, &priv->qos_activate);
- RT_TRACE (COMP_QOS, "QoS parameters change call "
- "qos_activate\n");
+ RT_TRACE(COMP_QOS, "QoS parameters change call "
+ "qos_activate\n");
}
} else {
- memcpy(&priv->ieee80211->current_network.qos_data.parameters,\
+ memcpy(&priv->ieee80211->current_network.qos_data.parameters,
&def_qos_parameters, size);
if ((network->qos_data.active == 1) && (active_network == 1)) {
@@ -2388,13 +2173,13 @@ static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
}
/* handle and manage frame from beacon and probe response */
-static int rtl8192_handle_beacon(struct net_device * dev,
- struct ieee80211_beacon * beacon,
- struct ieee80211_network * network)
+static int rtl8192_handle_beacon(struct net_device *dev,
+ struct ieee80211_beacon *beacon,
+ struct ieee80211_network *network)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- rtl8192_qos_handle_probe_response(priv,1,network);
+ rtl8192_qos_handle_probe_response(priv, 1, network);
queue_delayed_work(priv->priv_wq, &priv->update_beacon_wq, 0);
return 0;
@@ -2406,7 +2191,7 @@ static int rtl8192_handle_beacon(struct net_device * dev,
* setting
*/
static int rtl8192_qos_association_resp(struct r8192_priv *priv,
- struct ieee80211_network *network)
+ struct ieee80211_network *network)
{
int ret = 0;
unsigned long flags;
@@ -2416,28 +2201,26 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
if ((priv == NULL) || (network == NULL))
return ret;
- if(priv->ieee80211->state !=IEEE80211_LINKED)
+ if (priv->ieee80211->state != IEEE80211_LINKED)
return ret;
if ((priv->ieee80211->iw_mode != IW_MODE_INFRA))
return ret;
spin_lock_irqsave(&priv->ieee80211->lock, flags);
- if(network->flags & NETWORK_HAS_QOS_PARAMETERS) {
- memcpy(&priv->ieee80211->current_network.qos_data.parameters,\
- &network->qos_data.parameters,\
- sizeof(struct ieee80211_qos_parameters));
+ if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
+ memcpy(&priv->ieee80211->current_network.qos_data.parameters,
+ &network->qos_data.parameters,
+ sizeof(struct ieee80211_qos_parameters));
priv->ieee80211->current_network.qos_data.active = 1;
- {
- set_qos_param = 1;
- /* update qos parameter for current network */
- priv->ieee80211->current_network.qos_data.old_param_count = \
- priv->ieee80211->current_network.qos_data.param_count;
- priv->ieee80211->current_network.qos_data.param_count = \
- network->qos_data.param_count;
- }
+ set_qos_param = 1;
+ /* update qos parameter for current network */
+ priv->ieee80211->current_network.qos_data.old_param_count =
+ priv->ieee80211->current_network.qos_data.param_count;
+ priv->ieee80211->current_network.qos_data.param_count =
+ network->qos_data.param_count;
} else {
- memcpy(&priv->ieee80211->current_network.qos_data.parameters,\
+ memcpy(&priv->ieee80211->current_network.qos_data.parameters,
&def_qos_parameters, size);
priv->ieee80211->current_network.qos_data.active = 0;
priv->ieee80211->current_network.qos_data.supported = 0;
@@ -2446,7 +2229,7 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
spin_unlock_irqrestore(&priv->ieee80211->lock, flags);
- RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n",__FUNCTION__,network->flags ,priv->ieee80211->current_network.qos_data.active);
+ RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __func__, network->flags, priv->ieee80211->current_network.qos_data.active);
if (set_qos_param == 1)
queue_work(priv->priv_wq, &priv->qos_activate);
@@ -2456,8 +2239,8 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
static int rtl8192_handle_assoc_response(struct net_device *dev,
- struct ieee80211_assoc_response_frame *resp,
- struct ieee80211_network *network)
+ struct ieee80211_assoc_response_frame *resp,
+ struct ieee80211_network *network)
{
struct r8192_priv *priv = ieee80211_priv(dev);
rtl8192_qos_association_resp(priv, network);
@@ -2465,79 +2248,70 @@ static int rtl8192_handle_assoc_response(struct net_device *dev,
}
-void rtl8192_update_ratr_table(struct net_device* dev)
- // POCTET_STRING posLegacyRate,
- // u8* pMcsRate)
- // PRT_WLAN_STA pEntry)
+void rtl8192_update_ratr_table(struct net_device *dev)
{
- struct r8192_priv* priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
- u8* pMcsRate = ieee->dot11HTOperationalRateSet;
- //struct ieee80211_network *net = &ieee->current_network;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct ieee80211_device *ieee = priv->ieee80211;
+ u8 *pMcsRate = ieee->dot11HTOperationalRateSet;
u32 ratr_value = 0;
u8 rate_index = 0;
- rtl8192_config_rate(dev, (u16*)(&ratr_value));
- ratr_value |= (*(u16*)(pMcsRate)) << 12;
-// switch (net->mode)
- switch (ieee->mode)
- {
- case IEEE_A:
- ratr_value &= 0x00000FF0;
- break;
- case IEEE_B:
- ratr_value &= 0x0000000F;
- break;
- case IEEE_G:
- ratr_value &= 0x00000FF7;
- break;
- case IEEE_N_24G:
- case IEEE_N_5G:
- if (ieee->pHTInfo->PeerMimoPs == 0) //MIMO_PS_STATIC
- ratr_value &= 0x0007F007;
- else{
- if (priv->rf_type == RF_1T2R)
- ratr_value &= 0x000FF007;
- else
- ratr_value &= 0x0F81F007;
- }
- break;
- default:
- break;
+ rtl8192_config_rate(dev, (u16 *)(&ratr_value));
+ ratr_value |= (*(u16 *)(pMcsRate)) << 12;
+ switch (ieee->mode) {
+ case IEEE_A:
+ ratr_value &= 0x00000FF0;
+ break;
+ case IEEE_B:
+ ratr_value &= 0x0000000F;
+ break;
+ case IEEE_G:
+ ratr_value &= 0x00000FF7;
+ break;
+ case IEEE_N_24G:
+ case IEEE_N_5G:
+ if (ieee->pHTInfo->PeerMimoPs == 0) {//MIMO_PS_STATIC
+ ratr_value &= 0x0007F007;
+ } else {
+ if (priv->rf_type == RF_1T2R)
+ ratr_value &= 0x000FF007;
+ else
+ ratr_value &= 0x0F81F007;
+ }
+ break;
+ default:
+ break;
}
ratr_value &= 0x0FFFFFFF;
- if(ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI40MHz){
+ if (ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI40MHz)
ratr_value |= 0x80000000;
- }else if(!ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI20MHz){
+ else if (!ieee->pHTInfo->bCurTxBW40MHz && ieee->pHTInfo->bCurShortGI20MHz)
ratr_value |= 0x80000000;
- }
write_nic_dword(dev, RATR0+rate_index*4, ratr_value);
write_nic_byte(dev, UFWP, 1);
}
-static u8 ccmp_ie[4] = {0x00,0x50,0xf2,0x04};
+static u8 ccmp_ie[4] = {0x00, 0x50, 0xf2, 0x04};
static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
-bool GetNmodeSupportBySecCfg8192(struct net_device*dev)
+bool GetNmodeSupportBySecCfg8192(struct net_device *dev)
{
- struct r8192_priv* priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
- struct ieee80211_network * network = &ieee->current_network;
- int wpa_ie_len= ieee->wpa_ie_len;
- struct ieee80211_crypt_data* crypt;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct ieee80211_device *ieee = priv->ieee80211;
+ struct ieee80211_network *network = &ieee->current_network;
+ int wpa_ie_len = ieee->wpa_ie_len;
+ struct ieee80211_crypt_data *crypt;
int encrypt;
crypt = ieee->crypt[ieee->tx_keyidx];
//we use connecting AP's capability instead of only security config on our driver to distinguish whether it should use N mode or G mode
- encrypt = (network->capability & WLAN_CAPABILITY_PRIVACY) || (ieee->host_encrypt && crypt && crypt->ops && (0 == strcmp(crypt->ops->name,"WEP")));
+ encrypt = (network->capability & WLAN_CAPABILITY_PRIVACY) || (ieee->host_encrypt && crypt && crypt->ops && (0 == strcmp(crypt->ops->name, "WEP")));
/* simply judge */
- if(encrypt && (wpa_ie_len == 0)) {
+ if (encrypt && (wpa_ie_len == 0)) {
/* wep encryption, no N mode setting */
return false;
-// } else if((wpa_ie_len != 0)&&(memcmp(&(ieee->wpa_ie[14]),ccmp_ie,4))) {
- } else if((wpa_ie_len != 0)) {
+ } else if ((wpa_ie_len != 0)) {
/* parse pairwise key type */
- //if((pairwisekey = WEP40)||(pairwisekey = WEP104)||(pairwisekey = TKIP))
- if (((ieee->wpa_ie[0] == 0xdd) && (!memcmp(&(ieee->wpa_ie[14]),ccmp_ie,4))) || ((ieee->wpa_ie[0] == 0x30) && (!memcmp(&ieee->wpa_ie[10],ccmp_rsn_ie, 4))))
+ if (((ieee->wpa_ie[0] == 0xdd) && (!memcmp(&(ieee->wpa_ie[14]), ccmp_ie, 4))) || ((ieee->wpa_ie[0] == 0x30) && (!memcmp(&ieee->wpa_ie[10], ccmp_rsn_ie, 4))))
return true;
else
return false;
@@ -2548,13 +2322,13 @@ bool GetNmodeSupportBySecCfg8192(struct net_device*dev)
return true;
}
-bool GetHalfNmodeSupportByAPs819xUsb(struct net_device* dev)
+bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
{
bool Reval;
- struct r8192_priv* priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct ieee80211_device *ieee = priv->ieee80211;
- if(ieee->bHalfWirelessN24GMode == true)
+ if (ieee->bHalfWirelessN24GMode == true)
Reval = true;
else
Reval = false;
@@ -2562,75 +2336,59 @@ bool GetHalfNmodeSupportByAPs819xUsb(struct net_device* dev)
return Reval;
}
-void rtl8192_refresh_supportrate(struct r8192_priv* priv)
+void rtl8192_refresh_supportrate(struct r8192_priv *priv)
{
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct ieee80211_device *ieee = priv->ieee80211;
//we do not consider set support rate for ABG mode, only HT MCS rate is set here.
if (ieee->mode == WIRELESS_MODE_N_24G || ieee->mode == WIRELESS_MODE_N_5G)
- {
memcpy(ieee->Regdot11HTOperationalRateSet, ieee->RegHTSuppRateSet, 16);
- //RT_DEBUG_DATA(COMP_INIT, ieee->RegHTSuppRateSet, 16);
- //RT_DEBUG_DATA(COMP_INIT, ieee->Regdot11HTOperationalRateSet, 16);
- }
else
memset(ieee->Regdot11HTOperationalRateSet, 0, 16);
return;
}
-u8 rtl8192_getSupportedWireleeMode(struct net_device*dev)
+u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 ret = 0;
- switch(priv->rf_chip)
- {
- case RF_8225:
- case RF_8256:
- case RF_PSEUDO_11N:
- ret = (WIRELESS_MODE_N_24G|WIRELESS_MODE_G|WIRELESS_MODE_B);
- break;
- case RF_8258:
- ret = (WIRELESS_MODE_A|WIRELESS_MODE_N_5G);
- break;
- default:
- ret = WIRELESS_MODE_B;
- break;
+ switch (priv->rf_chip) {
+ case RF_8225:
+ case RF_8256:
+ case RF_PSEUDO_11N:
+ ret = (WIRELESS_MODE_N_24G|WIRELESS_MODE_G|WIRELESS_MODE_B);
+ break;
+ case RF_8258:
+ ret = (WIRELESS_MODE_A|WIRELESS_MODE_N_5G);
+ break;
+ default:
+ ret = WIRELESS_MODE_B;
+ break;
}
return ret;
}
-void rtl8192_SetWirelessMode(struct net_device* dev, u8 wireless_mode)
+void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 bSupportMode = rtl8192_getSupportedWireleeMode(dev);
- if ((wireless_mode == WIRELESS_MODE_AUTO) || ((wireless_mode&bSupportMode)==0))
- {
- if(bSupportMode & WIRELESS_MODE_N_24G)
- {
+ if ((wireless_mode == WIRELESS_MODE_AUTO) || ((wireless_mode&bSupportMode) == 0)) {
+ if (bSupportMode & WIRELESS_MODE_N_24G) {
wireless_mode = WIRELESS_MODE_N_24G;
- }
- else if(bSupportMode & WIRELESS_MODE_N_5G)
- {
+ } else if (bSupportMode & WIRELESS_MODE_N_5G) {
wireless_mode = WIRELESS_MODE_N_5G;
- }
- else if((bSupportMode & WIRELESS_MODE_A))
- {
+ } else if ((bSupportMode & WIRELESS_MODE_A)) {
wireless_mode = WIRELESS_MODE_A;
- }
- else if((bSupportMode & WIRELESS_MODE_G))
- {
+ } else if ((bSupportMode & WIRELESS_MODE_G)) {
wireless_mode = WIRELESS_MODE_G;
- }
- else if((bSupportMode & WIRELESS_MODE_B))
- {
+ } else if ((bSupportMode & WIRELESS_MODE_B)) {
wireless_mode = WIRELESS_MODE_B;
- }
- else{
- RT_TRACE(COMP_ERR, "%s(), No valid wireless mode supported, SupportedWirelessMode(%x)!!!\n", __FUNCTION__,bSupportMode);
+ } else {
+ RT_TRACE(COMP_ERR, "%s(), No valid wireless mode supported, SupportedWirelessMode(%x)!!!\n", __func__, bSupportMode);
wireless_mode = WIRELESS_MODE_B;
}
}
#ifdef TO_DO_LIST //// TODO: this function doesn't work well at this time, we should wait for FPGA
- ActUpdateChannelAccessSetting( pAdapter, pHalData->CurrentWirelessMode, &pAdapter->MgntInfo.Info8185.ChannelAccessSetting );
+ ActUpdateChannelAccessSetting(pAdapter, pHalData->CurrentWirelessMode, &pAdapter->MgntInfo.Info8185.ChannelAccessSetting);
#endif
priv->ieee80211->mode = wireless_mode;
@@ -2643,7 +2401,7 @@ void rtl8192_SetWirelessMode(struct net_device* dev, u8 wireless_mode)
}
//init priv variables here. only non_zero value should be initialized here.
-static void rtl8192_init_priv_variable(struct net_device* dev)
+static void rtl8192_init_priv_variable(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 i;
@@ -2651,13 +2409,13 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
priv->chan = 1; //set to channel 1
priv->ieee80211->mode = WIRELESS_MODE_AUTO; //SET AUTO
priv->ieee80211->iw_mode = IW_MODE_INFRA;
- priv->ieee80211->ieee_up=0;
+ priv->ieee80211->ieee_up = 0;
priv->retry_rts = DEFAULT_RETRY_RTS;
priv->retry_data = DEFAULT_RETRY_DATA;
priv->ieee80211->rts = DEFAULT_RTS_THRESHOLD;
priv->ieee80211->rate = 110; //11 mbps
priv->ieee80211->short_slot = 1;
- priv->promisc = (dev->flags & IFF_PROMISC) ? 1:0;
+ priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
priv->CckPwEnl = 6;
//for silent reset
priv->IrpPendingCount = 1;
@@ -2672,14 +2430,14 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
priv->ieee80211->softmac_features = IEEE_SOFTMAC_SCAN |
IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
IEEE_SOFTMAC_PROBERS | IEEE_SOFTMAC_TX_QUEUE |
- IEEE_SOFTMAC_BEACONS;//added by amy 080604 //| //IEEE_SOFTMAC_SINGLE_QUEUE;
+ IEEE_SOFTMAC_BEACONS;//added by amy 080604
priv->ieee80211->active_scan = 1;
priv->ieee80211->modulation = IEEE80211_CCK_MODULATION | IEEE80211_OFDM_MODULATION;
priv->ieee80211->host_encrypt = 1;
priv->ieee80211->host_decrypt = 1;
- priv->ieee80211->start_send_beacons = NULL;//rtl819xusb_beacon_tx;//-by amy 080604
- priv->ieee80211->stop_send_beacons = NULL;//rtl8192_beacon_stop;//-by amy 080604
+ priv->ieee80211->start_send_beacons = NULL; //-by amy 080604
+ priv->ieee80211->stop_send_beacons = NULL; //-by amy 080604
priv->ieee80211->softmac_hard_start_xmit = rtl8192_hard_start_xmit;
priv->ieee80211->set_chan = rtl8192_set_chan;
priv->ieee80211->link_change = rtl8192_link_change;
@@ -2693,7 +2451,6 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
priv->ieee80211->qos_support = 1;
//added by WB
-// priv->ieee80211->SwChnlByTimerHandler = rtl8192_phy_SwChnl;
priv->ieee80211->SetBWModeHandler = rtl8192_SetBWMode;
priv->ieee80211->handle_assoc_response = rtl8192_handle_assoc_response;
priv->ieee80211->handle_beacon = rtl8192_handle_beacon;
@@ -2705,36 +2462,31 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
priv->ieee80211->InitialGainHandler = InitialGain819xUsb;
priv->card_type = USB;
#ifdef TO_DO_LIST
- if(Adapter->bInHctTest)
- {
+ if (Adapter->bInHctTest) {
pHalData->ShortRetryLimit = 7;
pHalData->LongRetryLimit = 7;
}
#endif
- {
- priv->ShortRetryLimit = 0x30;
- priv->LongRetryLimit = 0x30;
- }
+ priv->ShortRetryLimit = 0x30;
+ priv->LongRetryLimit = 0x30;
priv->EarlyRxThreshold = 7;
priv->enable_gpio0 = 0;
priv->TransmitConfig =
- // TCR_DurProcMode | //for RTL8185B, duration setting by HW
- //? TCR_DISReqQsize |
(TCR_MXDMA_2048<<TCR_MXDMA_OFFSET)| // Max DMA Burst Size per Tx DMA Burst, 7: reserved.
(priv->ShortRetryLimit<<TCR_SRL_OFFSET)| // Short retry limit
(priv->LongRetryLimit<<TCR_LRL_OFFSET) | // Long retry limit
- (false ? TCR_SAT: 0); // FALSE: HW provides PLCP length and LENGEXT, TRUE: SW provides them
+ (false ? TCR_SAT : 0); // FALSE: HW provides PLCP length and LENGEXT, TRUE: SW provides them
#ifdef TO_DO_LIST
- if(Adapter->bInHctTest)
+ if (Adapter->bInHctTest)
pHalData->ReceiveConfig = pHalData->CSMethod |
- RCR_AMF | RCR_ADF | //RCR_AAP | //accept management/data
+ RCR_AMF | RCR_ADF | //accept management/data
//guangan200710
RCR_ACF | //accept control frame for SW AP needs PS-poll, 2005.07.07, by rcnjko.
RCR_AB | RCR_AM | RCR_APM | //accept BC/MC/UC
RCR_AICV | RCR_ACRC32 | //accept ICV/CRC error packet
((u32)7<<RCR_MXDMA_OFFSET) | // Max DMA Burst Size per Rx DMA Burst, 7: unlimited.
(pHalData->EarlyRxThreshold<<RCR_FIFO_OFFSET) | // Rx FIFO Threshold, 7: No Rx threshold.
- (pHalData->EarlyRxThreshold == 7 ? RCR_OnlyErlPkt:0);
+ (pHalData->EarlyRxThreshold == 7 ? RCR_OnlyErlPkt : 0);
else
#endif
@@ -2742,10 +2494,9 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
RCR_AMF | RCR_ADF | //accept management/data
RCR_ACF | //accept control frame for SW AP needs PS-poll, 2005.07.07, by rcnjko.
RCR_AB | RCR_AM | RCR_APM | //accept BC/MC/UC
- //RCR_AICV | RCR_ACRC32 | //accept ICV/CRC error packet
((u32)7<<RCR_MXDMA_OFFSET)| // Max DMA Burst Size per Rx DMA Burst, 7: unlimited.
(priv->EarlyRxThreshold<<RX_FIFO_THRESHOLD_SHIFT) | // Rx FIFO Threshold, 7: No Rx threshold.
- (priv->EarlyRxThreshold == 7 ? RCR_ONLYERLPKT:0);
+ (priv->EarlyRxThreshold == 7 ? RCR_ONLYERLPKT : 0);
priv->AcmControl = 0;
priv->pFirmware = kzalloc(sizeof(rt_firmware), GFP_KERNEL);
@@ -2755,26 +2506,22 @@ static void rtl8192_init_priv_variable(struct net_device* dev)
skb_queue_head_init(&priv->skb_queue);
/* Tx related queue */
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_head_init(&priv->ieee80211->skb_waitQ [i]);
- }
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_head_init(&priv->ieee80211->skb_aggQ [i]);
- }
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_head_init(&priv->ieee80211->skb_drv_aggQ [i]);
- }
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_head_init(&priv->ieee80211->skb_waitQ[i]);
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_head_init(&priv->ieee80211->skb_aggQ[i]);
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_head_init(&priv->ieee80211->skb_drv_aggQ[i]);
priv->rf_set_chan = rtl8192_phy_SwChnl;
}
//init lock here
-static void rtl8192_init_priv_lock(struct r8192_priv* priv)
+static void rtl8192_init_priv_lock(struct r8192_priv *priv)
{
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->irq_lock);//added by thomas
- //spin_lock_init(&priv->rf_lock);
- sema_init(&priv->wx_sem,1);
- sema_init(&priv->rf_sem,1);
+ sema_init(&priv->wx_sem, 1);
+ sema_init(&priv->rf_sem, 1);
mutex_init(&priv->mutex);
}
@@ -2783,7 +2530,7 @@ extern void rtl819x_watchdog_wqcallback(struct work_struct *work);
void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
//init tasklet and wait_queue here. only 2.6 above kernel is considered
#define DRV_NAME "wlan0"
-static void rtl8192_init_priv_task(struct net_device* dev)
+static void rtl8192_init_priv_task(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2791,71 +2538,64 @@ static void rtl8192_init_priv_task(struct net_device* dev)
INIT_WORK(&priv->reset_wq, rtl8192_restart);
- //INIT_DELAYED_WORK(&priv->watch_dog_wq, hal_dm_watchdog);
INIT_DELAYED_WORK(&priv->watch_dog_wq, rtl819x_watchdog_wqcallback);
INIT_DELAYED_WORK(&priv->txpower_tracking_wq, dm_txpower_trackingcallback);
-// INIT_DELAYED_WORK(&priv->gpio_change_rf_wq, dm_gpio_change_rf_callback);
INIT_DELAYED_WORK(&priv->rfpath_check_wq, dm_rf_pathcheck_workitemcallback);
INIT_DELAYED_WORK(&priv->update_beacon_wq, rtl8192_update_beacon);
INIT_DELAYED_WORK(&priv->initialgain_operate_wq, InitialGainOperateWorkItemCallBack);
- //INIT_WORK(&priv->SwChnlWorkItem, rtl8192_SwChnl_WorkItem);
- //INIT_WORK(&priv->SetBWModeWorkItem, rtl8192_SetBWModeWorkItem);
INIT_WORK(&priv->qos_activate, rtl8192_qos_activate);
tasklet_init(&priv->irq_rx_tasklet,
- (void(*)(unsigned long))rtl8192_irq_rx_tasklet,
- (unsigned long)priv);
+ (void(*)(unsigned long))rtl8192_irq_rx_tasklet,
+ (unsigned long)priv);
}
-static void rtl8192_get_eeprom_size(struct net_device* dev)
+static void rtl8192_get_eeprom_size(struct net_device *dev)
{
u16 curCR = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
- RT_TRACE(COMP_EPROM, "===========>%s()\n", __FUNCTION__);
- curCR = read_nic_word_E(dev,EPROM_CMD);
+ RT_TRACE(COMP_EPROM, "===========>%s()\n", __func__);
+ read_nic_word_E(dev, EPROM_CMD, &curCR);
RT_TRACE(COMP_EPROM, "read from Reg EPROM_CMD(%x):%x\n", EPROM_CMD, curCR);
//whether need I consider BIT5?
priv->epromtype = (curCR & Cmd9346CR_9356SEL) ? EPROM_93c56 : EPROM_93c46;
- RT_TRACE(COMP_EPROM, "<===========%s(), epromtype:%d\n", __FUNCTION__, priv->epromtype);
+ RT_TRACE(COMP_EPROM, "<===========%s(), epromtype:%d\n", __func__, priv->epromtype);
}
//used to swap endian. as ntohl & htonl are not necessary to swap endian, so use this instead.
-static inline u16 endian_swap(u16* data)
+static inline u16 endian_swap(u16 *data)
{
u16 tmp = *data;
*data = (tmp >> 8) | (tmp << 8);
return *data;
}
-static void rtl8192_read_eeprom_info(struct net_device* dev)
+static void rtl8192_read_eeprom_info(struct net_device *dev)
{
u16 wEPROM_ID = 0;
u8 bMac_Tmp_Addr[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x02};
u8 bLoad_From_EEPOM = false;
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tmpValue = 0;
- RT_TRACE(COMP_EPROM, "===========>%s()\n", __FUNCTION__);
+ int i;
+ RT_TRACE(COMP_EPROM, "===========>%s()\n", __func__);
wEPROM_ID = eprom_read(dev, 0); //first read EEPROM ID out;
RT_TRACE(COMP_EPROM, "EEPROM ID is 0x%x\n", wEPROM_ID);
- if (wEPROM_ID != RTL8190_EEPROM_ID)
- {
+ if (wEPROM_ID != RTL8190_EEPROM_ID) {
RT_TRACE(COMP_ERR, "EEPROM ID is invalid(is 0x%x(should be 0x%x)\n", wEPROM_ID, RTL8190_EEPROM_ID);
- }
- else
+ } else {
bLoad_From_EEPOM = true;
+ }
- if (bLoad_From_EEPOM)
- {
+ if (bLoad_From_EEPOM) {
tmpValue = eprom_read(dev, (EEPROM_VID>>1));
priv->eeprom_vid = endian_swap(&tmpValue);
priv->eeprom_pid = eprom_read(dev, (EEPROM_PID>>1));
tmpValue = eprom_read(dev, (EEPROM_ChannelPlan>>1));
- priv->eeprom_ChannelPlan =((tmpValue&0xff00)>>8);
+ priv->eeprom_ChannelPlan = ((tmpValue&0xff00)>>8);
priv->btxpowerdata_readfromEEPORM = true;
priv->eeprom_CustomerID = eprom_read(dev, (EEPROM_Customer_ID>>1)) >>8;
- }
- else
- {
+ } else {
priv->eeprom_vid = 0;
priv->eeprom_pid = 0;
priv->card_8192_version = VERSION_819xU_B;
@@ -2865,18 +2605,14 @@ static void rtl8192_read_eeprom_info(struct net_device* dev)
RT_TRACE(COMP_EPROM, "vid:0x%4x, pid:0x%4x, CustomID:0x%2x, ChanPlan:0x%x\n", priv->eeprom_vid, priv->eeprom_pid, priv->eeprom_CustomerID, priv->eeprom_ChannelPlan);
//set channelplan from eeprom
priv->ChannelPlan = priv->eeprom_ChannelPlan;
- if (bLoad_From_EEPOM)
- {
+ if (bLoad_From_EEPOM) {
int i;
- for (i=0; i<6; i+=2)
- {
+ for (i = 0; i < 6; i += 2) {
u16 tmp = 0;
tmp = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i)>>1));
- *(u16*)(&dev->dev_addr[i]) = tmp;
+ *(u16 *)(&dev->dev_addr[i]) = tmp;
}
- }
- else
- {
+ } else {
memcpy(dev->dev_addr, bMac_Tmp_Addr, 6);
//should I set IDR0 here?
}
@@ -2884,8 +2620,7 @@ static void rtl8192_read_eeprom_info(struct net_device* dev)
priv->rf_type = RTL819X_DEFAULT_RF_TYPE; //default 1T2R
priv->rf_chip = RF_8256;
- if (priv->card_8192_version == (u8)VERSION_819xU_A)
- {
+ if (priv->card_8192_version == (u8)VERSION_819xU_A) {
//read Tx power gain offset of legacy OFDM to HT rate
if (bLoad_From_EEPOM)
priv->EEPROMTxPowerDiff = (eprom_read(dev, (EEPROM_TxPowerDiff>>1))&0xff00) >> 8;
@@ -2918,51 +2653,45 @@ static void rtl8192_read_eeprom_info(struct net_device* dev)
else
priv->EEPROM_Def_Ver = 1;
RT_TRACE(COMP_EPROM, "EEPROM_DEF_VER:%d\n", priv->EEPROM_Def_Ver);
- if (priv->EEPROM_Def_Ver == 0) //old eeprom definition
- {
+ if (priv->EEPROM_Def_Ver == 0) { //old eeprom definition
int i;
if (bLoad_From_EEPOM)
priv->EEPROMTxPowerLevelCCK = (eprom_read(dev, (EEPROM_TxPwIndex_CCK>>1))&0xff) >> 8;
else
priv->EEPROMTxPowerLevelCCK = 0x10;
RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK);
- for (i=0; i<3; i++)
- {
- if (bLoad_From_EEPOM)
- {
+ for (i = 0; i < 3; i++) {
+ if (bLoad_From_EEPOM) {
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G+i)>>1);
if (((EEPROM_TxPwIndex_OFDM_24G+i) % 2) == 0)
tmpValue = tmpValue & 0x00ff;
else
tmpValue = (tmpValue & 0xff00) >> 8;
- }
- else
+ } else {
tmpValue = 0x10;
+ }
priv->EEPROMTxPowerLevelOFDM24G[i] = (u8) tmpValue;
RT_TRACE(COMP_EPROM, "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n", i, priv->EEPROMTxPowerLevelCCK);
}
- }//end if EEPROM_DEF_VER == 0
- else if (priv->EEPROM_Def_Ver == 1)
- {
- if (bLoad_From_EEPOM)
- {
+ } else if (priv->EEPROM_Def_Ver == 1) {
+ if (bLoad_From_EEPOM) {
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1>>1));
tmpValue = (tmpValue & 0xff00) >> 8;
- }
- else
+ } else {
tmpValue = 0x10;
+ }
priv->EEPROMTxPowerLevelCCK_V1[0] = (u8)tmpValue;
if (bLoad_From_EEPOM)
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1 + 2)>>1);
else
tmpValue = 0x1010;
- *((u16*)(&priv->EEPROMTxPowerLevelCCK_V1[1])) = tmpValue;
+ *((u16 *)(&priv->EEPROMTxPowerLevelCCK_V1[1])) = tmpValue;
if (bLoad_From_EEPOM)
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G_V1>>1));
else
tmpValue = 0x1010;
- *((u16*)(&priv->EEPROMTxPowerLevelOFDM24G[0])) = tmpValue;
+ *((u16 *)(&priv->EEPROMTxPowerLevelOFDM24G[0])) = tmpValue;
if (bLoad_From_EEPOM)
tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G_V1+2)>>1);
else
@@ -2972,42 +2701,34 @@ static void rtl8192_read_eeprom_info(struct net_device* dev)
//update HAL variables
//
- {
- int i;
- for (i=0; i<14; i++)
- {
- if (i<=3)
- priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[0];
- else if (i>=4 && i<=9)
- priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[1];
- else
- priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[2];
- }
+ for (i = 0; i < 14; i++) {
+ if (i <= 3)
+ priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[0];
+ else if (i >= 4 && i <= 9)
+ priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[1];
+ else
+ priv->TxPowerLevelOFDM24G[i] = priv->EEPROMTxPowerLevelOFDM24G[2];
+ }
- for (i=0; i<14; i++)
- {
- if (priv->EEPROM_Def_Ver == 0)
- {
- if (i<=3)
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelOFDM24G[0] + (priv->EEPROMTxPowerLevelCCK - priv->EEPROMTxPowerLevelOFDM24G[1]);
- else if (i>=4 && i<=9)
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK;
- else
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelOFDM24G[2] + (priv->EEPROMTxPowerLevelCCK - priv->EEPROMTxPowerLevelOFDM24G[1]);
- }
- else if (priv->EEPROM_Def_Ver == 1)
- {
- if (i<=3)
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[0];
- else if (i>=4 && i<=9)
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[1];
- else
- priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[2];
- }
+ for (i = 0; i < 14; i++) {
+ if (priv->EEPROM_Def_Ver == 0) {
+ if (i <= 3)
+ priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelOFDM24G[0] + (priv->EEPROMTxPowerLevelCCK - priv->EEPROMTxPowerLevelOFDM24G[1]);
+ else if (i >= 4 && i <= 9)
+ priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK;
+ else
+ priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelOFDM24G[2] + (priv->EEPROMTxPowerLevelCCK - priv->EEPROMTxPowerLevelOFDM24G[1]);
+ } else if (priv->EEPROM_Def_Ver == 1) {
+ if (i <= 3)
+ priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[0];
+ else if (i >= 4 && i <= 9)
+ priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[1];
+ else
+ priv->TxPowerLevelCCK[i] = priv->EEPROMTxPowerLevelCCK_V1[2];
}
- }//end update HAL variables
+ }
priv->TxPowerDiff = priv->EEPROMPwDiff;
-// Antenna B gain offset to antenna A, bit0~3
+ // Antenna B gain offset to antenna A, bit0~3
priv->AntennaTxPwDiff[0] = (priv->EEPROMTxPowerDiff & 0xf);
// Antenna C gain offset to antenna A, bit4~7
priv->AntennaTxPwDiff[1] = ((priv->EEPROMTxPowerDiff & 0xf0)>>4);
@@ -3018,46 +2739,41 @@ static void rtl8192_read_eeprom_info(struct net_device* dev)
priv->ThermalMeter[0] = priv->EEPROMThermalMeter;
}//end if VersionID == VERSION_819xU_A
-//added by vivi, for dlink led, 20080416
- switch(priv->eeprom_CustomerID)
- {
- case EEPROM_CID_RUNTOP:
- priv->CustomerID = RT_CID_819x_RUNTOP;
- break;
+ //added by vivi, for dlink led, 20080416
+ switch (priv->eeprom_CustomerID) {
+ case EEPROM_CID_RUNTOP:
+ priv->CustomerID = RT_CID_819x_RUNTOP;
+ break;
- case EEPROM_CID_DLINK:
- priv->CustomerID = RT_CID_DLINK;
- break;
+ case EEPROM_CID_DLINK:
+ priv->CustomerID = RT_CID_DLINK;
+ break;
- default:
- priv->CustomerID = RT_CID_DEFAULT;
- break;
+ default:
+ priv->CustomerID = RT_CID_DEFAULT;
+ break;
}
- switch(priv->CustomerID)
- {
- case RT_CID_819x_RUNTOP:
- priv->LedStrategy = SW_LED_MODE2;
- break;
+ switch (priv->CustomerID) {
+ case RT_CID_819x_RUNTOP:
+ priv->LedStrategy = SW_LED_MODE2;
+ break;
- case RT_CID_DLINK:
- priv->LedStrategy = SW_LED_MODE4;
- break;
+ case RT_CID_DLINK:
+ priv->LedStrategy = SW_LED_MODE4;
+ break;
- default:
- priv->LedStrategy = SW_LED_MODE0;
- break;
+ default:
+ priv->LedStrategy = SW_LED_MODE0;
+ break;
}
- if(priv->rf_type == RF_1T2R)
- {
+ if (priv->rf_type == RF_1T2R) {
RT_TRACE(COMP_EPROM, "\n1T2R config\n");
- }
- else
- {
+ } else {
RT_TRACE(COMP_EPROM, "\n2T4R config\n");
}
@@ -3066,18 +2782,18 @@ static void rtl8192_read_eeprom_info(struct net_device* dev)
init_rate_adaptive(dev);
//we need init DIG RATR table here again.
- RT_TRACE(COMP_EPROM, "<===========%s()\n", __FUNCTION__);
+ RT_TRACE(COMP_EPROM, "<===========%s()\n", __func__);
return;
}
-short rtl8192_get_channel_map(struct net_device * dev)
+short rtl8192_get_channel_map(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- if(priv->ChannelPlan > COUNTRY_CODE_GLOBAL_DOMAIN){
- printk("rtl8180_init:Error channel plan! Set to default.\n");
- priv->ChannelPlan= 0;
+ if (priv->ChannelPlan > COUNTRY_CODE_GLOBAL_DOMAIN) {
+ netdev_err(dev, "rtl8180_init: Error channel plan! Set to default.\n");
+ priv->ChannelPlan = 0;
}
- RT_TRACE(COMP_INIT, "Channel plan is %d\n",priv->ChannelPlan);
+ RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan);
rtl819x_set_channel_map(priv->ChannelPlan, priv);
return 0;
@@ -3088,24 +2804,18 @@ short rtl8192_init(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
- memset(&(priv->stats),0,sizeof(struct Stats));
- memset(priv->txqueue_to_outpipemap,0,9);
+ memset(&(priv->stats), 0, sizeof(struct Stats));
+ memset(priv->txqueue_to_outpipemap, 0, 9);
#ifdef PIPE12
{
- int i=0;
- u8 queuetopipe[]={3,2,1,0,4,8,7,6,5};
- memcpy(priv->txqueue_to_outpipemap,queuetopipe,9);
-/* for(i=0;i<9;i++)
- printk("%d ",priv->txqueue_to_outpipemap[i]);
- printk("\n");*/
+ int i = 0;
+ u8 queuetopipe[] = {3, 2, 1, 0, 4, 8, 7, 6, 5};
+ memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
}
#else
{
- u8 queuetopipe[]={3,2,1,0,4,4,0,4,4};
- memcpy(priv->txqueue_to_outpipemap,queuetopipe,9);
-/* for(i=0;i<9;i++)
- printk("%d ",priv->txqueue_to_outpipemap[i]);
- printk("\n");*/
+ u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
+ memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
}
#endif
rtl8192_init_priv_variable(dev);
@@ -3118,12 +2828,11 @@ short rtl8192_init(struct net_device *dev)
init_timer(&priv->watch_dog_timer);
priv->watch_dog_timer.data = (unsigned long)dev;
priv->watch_dog_timer.function = watch_dog_timer_callback;
- if(rtl8192_usb_initendpoints(dev)!=0){
+ if (rtl8192_usb_initendpoints(dev) != 0) {
DMESG("Endopoints initialization failed");
return -ENOMEM;
}
- //rtl8192_adapter_start(dev);
#ifdef DEBUG_EPROM
dump_eprom(dev);
#endif
@@ -3138,16 +2847,16 @@ short rtl8192_init(struct net_device *dev)
* return: none
* notice: This part need to modified according to the rate set we filtered
* ****************************************************************************/
-void rtl8192_hwconfig(struct net_device* dev)
+void rtl8192_hwconfig(struct net_device *dev)
{
u32 regRATR = 0, regRRSR = 0;
u8 regBwOpMode = 0, regTmp = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
+ u32 ratr_value = 0;
-// Set RRSR, RATR, and BW_OPMODE registers
+ // Set RRSR, RATR, and BW_OPMODE registers
//
- switch(priv->ieee80211->mode)
- {
+ switch (priv->ieee80211->mode) {
case WIRELESS_MODE_B:
regBwOpMode = BW_OPMODE_20MHZ;
regRATR = RATE_ALL_CCK;
@@ -3165,26 +2874,25 @@ void rtl8192_hwconfig(struct net_device* dev)
break;
case WIRELESS_MODE_AUTO:
#ifdef TO_DO_LIST
- if (Adapter->bInHctTest)
- {
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ if (Adapter->bInHctTest) {
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
}
else
#endif
{
- regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ regBwOpMode = BW_OPMODE_20MHZ;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
}
break;
case WIRELESS_MODE_N_24G:
// It support CCK rate by default.
// CCK rate will be filtered out only when associated AP does not support it.
regBwOpMode = BW_OPMODE_20MHZ;
- regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
- regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ regRATR = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+ regRRSR = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
break;
case WIRELESS_MODE_N_5G:
regBwOpMode = BW_OPMODE_5G;
@@ -3194,17 +2902,12 @@ void rtl8192_hwconfig(struct net_device* dev)
}
write_nic_byte(dev, BW_OPMODE, regBwOpMode);
- {
- u32 ratr_value = 0;
- ratr_value = regRATR;
- if (priv->rf_type == RF_1T2R)
- {
- ratr_value &= ~(RATE_ALL_OFDM_2SS);
- }
- write_nic_dword(dev, RATR0, ratr_value);
- write_nic_byte(dev, UFWP, 1);
- }
- regTmp = read_nic_byte(dev, 0x313);
+ ratr_value = regRATR;
+ if (priv->rf_type == RF_1T2R)
+ ratr_value &= ~(RATE_ALL_OFDM_2SS);
+ write_nic_dword(dev, RATR0, ratr_value);
+ write_nic_byte(dev, UFWP, 1);
+ read_nic_byte(dev, 0x313, &regTmp);
regRRSR = ((regTmp) << 24) | (regRRSR & 0x00ffffff);
write_nic_dword(dev, RRSR, regRRSR);
@@ -3212,8 +2915,8 @@ void rtl8192_hwconfig(struct net_device* dev)
// Set Retry Limit here
//
write_nic_word(dev, RETRY_LIMIT,
- priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT | \
- priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
+ priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
+ priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
// Set Contention Window here
// Set Tx AGC
@@ -3232,7 +2935,9 @@ bool rtl8192_adapter_start(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
u32 dwRegRead = 0;
bool init_status = true;
- RT_TRACE(COMP_INIT, "====>%s()\n", __FUNCTION__);
+ u8 SECR_value = 0x0;
+ u8 tmp;
+ RT_TRACE(COMP_INIT, "====>%s()\n", __func__);
priv->Rf_Mode = RF_OP_By_SW_3wire;
//for ASIC power on sequence
write_nic_byte_E(dev, 0x5f, 0x80);
@@ -3242,34 +2947,31 @@ bool rtl8192_adapter_start(struct net_device *dev)
write_nic_byte_E(dev, 0x5e, 0x80);
write_nic_byte(dev, 0x17, 0x37);
mdelay(10);
-//#ifdef TO_DO_LIST
priv->pFirmware->firmware_status = FW_STATUS_0_INIT;
//config CPUReset Register
//Firmware Reset or not?
- dwRegRead = read_nic_dword(dev, CPU_GEN);
+ read_nic_dword(dev, CPU_GEN, &dwRegRead);
if (priv->pFirmware->firmware_status == FW_STATUS_0_INIT)
dwRegRead |= CPU_GEN_SYSTEM_RESET; //do nothing here?
else if (priv->pFirmware->firmware_status == FW_STATUS_5_READY)
dwRegRead |= CPU_GEN_FIRMWARE_RESET;
else
- RT_TRACE(COMP_ERR, "ERROR in %s(): undefined firmware state(%d)\n", __FUNCTION__, priv->pFirmware->firmware_status);
+ RT_TRACE(COMP_ERR, "ERROR in %s(): undefined firmware state(%d)\n", __func__, priv->pFirmware->firmware_status);
write_nic_dword(dev, CPU_GEN, dwRegRead);
- //mdelay(30);
//config BB.
rtl8192_BBConfig(dev);
//Loopback mode or not
priv->LoopbackMode = RTL819xU_NO_LOOPBACK;
-// priv->LoopbackMode = RTL819xU_MAC_LOOPBACK;
- dwRegRead = read_nic_dword(dev, CPU_GEN);
+ read_nic_dword(dev, CPU_GEN, &dwRegRead);
if (priv->LoopbackMode == RTL819xU_NO_LOOPBACK)
dwRegRead = ((dwRegRead & CPU_GEN_NO_LOOPBACK_MSK) | CPU_GEN_NO_LOOPBACK_SET);
else if (priv->LoopbackMode == RTL819xU_MAC_LOOPBACK)
dwRegRead |= CPU_CCK_LOOPBACK;
else
- RT_TRACE(COMP_ERR, "Serious error in %s(): wrong loopback mode setting(%d)\n", __FUNCTION__, priv->LoopbackMode);
+ RT_TRACE(COMP_ERR, "Serious error in %s(): wrong loopback mode setting(%d)\n", __func__, priv->LoopbackMode);
write_nic_dword(dev, CPU_GEN, dwRegRead);
@@ -3277,7 +2979,8 @@ bool rtl8192_adapter_start(struct net_device *dev)
udelay(500);
//xiong add for new bitfile:usb suspend reset pin set to 1. //do we need?
- write_nic_byte_E(dev, 0x5f, (read_nic_byte_E(dev, 0x5f)|0x20));
+ read_nic_byte_E(dev, 0x5f, &tmp);
+ write_nic_byte_E(dev, 0x5f, tmp|0x20);
//Set Hardware
rtl8192_hwconfig(dev);
@@ -3286,61 +2989,54 @@ bool rtl8192_adapter_start(struct net_device *dev)
write_nic_byte(dev, CMDR, CR_RE|CR_TE);
//set IDR0 here
- write_nic_dword(dev, MAC0, ((u32*)dev->dev_addr)[0]);
- write_nic_word(dev, MAC4, ((u16*)(dev->dev_addr + 4))[0]);
+ write_nic_dword(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
+ write_nic_word(dev, MAC4, ((u16 *)(dev->dev_addr + 4))[0]);
//set RCR
write_nic_dword(dev, RCR, priv->ReceiveConfig);
//Initialize Number of Reserved Pages in Firmware Queue
- write_nic_dword(dev, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK << RSVD_FW_QUEUE_PAGE_BK_SHIFT |\
- NUM_OF_PAGE_IN_FW_QUEUE_BE << RSVD_FW_QUEUE_PAGE_BE_SHIFT | \
- NUM_OF_PAGE_IN_FW_QUEUE_VI << RSVD_FW_QUEUE_PAGE_VI_SHIFT | \
- NUM_OF_PAGE_IN_FW_QUEUE_VO <<RSVD_FW_QUEUE_PAGE_VO_SHIFT);
- write_nic_dword(dev, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT << RSVD_FW_QUEUE_PAGE_MGNT_SHIFT |\
- NUM_OF_PAGE_IN_FW_QUEUE_CMD << RSVD_FW_QUEUE_PAGE_CMD_SHIFT);
- write_nic_dword(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW| \
- NUM_OF_PAGE_IN_FW_QUEUE_BCN<<RSVD_FW_QUEUE_PAGE_BCN_SHIFT
-// | NUM_OF_PAGE_IN_FW_QUEUE_PUB<<RSVD_FW_QUEUE_PAGE_PUB_SHIFT
- );
+ write_nic_dword(dev, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK << RSVD_FW_QUEUE_PAGE_BK_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_BE << RSVD_FW_QUEUE_PAGE_BE_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_VI << RSVD_FW_QUEUE_PAGE_VI_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_VO <<RSVD_FW_QUEUE_PAGE_VO_SHIFT);
+ write_nic_dword(dev, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT << RSVD_FW_QUEUE_PAGE_MGNT_SHIFT |
+ NUM_OF_PAGE_IN_FW_QUEUE_CMD << RSVD_FW_QUEUE_PAGE_CMD_SHIFT);
+ write_nic_dword(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW|
+ NUM_OF_PAGE_IN_FW_QUEUE_BCN<<RSVD_FW_QUEUE_PAGE_BCN_SHIFT);
write_nic_dword(dev, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
//Set AckTimeout
// TODO: (it value is only for FPGA version). need to be changed!!2006.12.18, by Emily
write_nic_byte(dev, ACK_TIMEOUT, 0x30);
-// RT_TRACE(COMP_INIT, "%s():priv->ResetProgress is %d\n", __FUNCTION__,priv->ResetProgress);
- if(priv->ResetProgress == RESET_TYPE_NORESET)
- rtl8192_SetWirelessMode(dev, priv->ieee80211->mode);
- if(priv->ResetProgress == RESET_TYPE_NORESET){
- CamResetAllEntry(dev);
- {
- u8 SECR_value = 0x0;
+ if (priv->ResetProgress == RESET_TYPE_NORESET)
+ rtl8192_SetWirelessMode(dev, priv->ieee80211->mode);
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
+ CamResetAllEntry(dev);
SECR_value |= SCR_TxEncEnable;
SECR_value |= SCR_RxDecEnable;
SECR_value |= SCR_NoSKMC;
write_nic_byte(dev, SECR, SECR_value);
}
- }
//Beacon related
write_nic_word(dev, ATIMWND, 2);
write_nic_word(dev, BCN_INTERVAL, 100);
- {
#define DEFAULT_EDCA 0x005e4332
+ {
int i;
- for (i=0; i<QOS_QUEUE_NUM; i++)
- write_nic_dword(dev, WDCAPARA_ADD[i], DEFAULT_EDCA);
+ for (i = 0; i < QOS_QUEUE_NUM; i++)
+ write_nic_dword(dev, WDCAPARA_ADD[i], DEFAULT_EDCA);
}
#ifdef USB_RX_AGGREGATION_SUPPORT
//3 For usb rx firmware aggregation control
- if(priv->ResetProgress == RESET_TYPE_NORESET)
- {
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
u32 ulValue;
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
ulValue = (pHTInfo->UsbRxFwAggrEn<<24) | (pHTInfo->UsbRxFwAggrPageNum<<16) |
- (pHTInfo->UsbRxFwAggrPacketNum<<8) | (pHTInfo->UsbRxFwAggrTimeout);
+ (pHTInfo->UsbRxFwAggrPacketNum<<8) | (pHTInfo->UsbRxFwAggrTimeout);
/*
* If usb rx firmware aggregation is enabled,
* when anyone of three threshold conditions above is reached,
@@ -3353,63 +3049,52 @@ bool rtl8192_adapter_start(struct net_device *dev)
rtl8192_phy_configmac(dev);
- if (priv->card_8192_version == (u8) VERSION_819xU_A)
- {
+ if (priv->card_8192_version == (u8) VERSION_819xU_A) {
rtl8192_phy_getTxPower(dev);
rtl8192_phy_setTxPower(dev, priv->chan);
}
//Firmware download
init_status = init_firmware(dev);
- if(!init_status)
- {
- RT_TRACE(COMP_ERR,"ERR!!! %s(): Firmware download is failed\n", __FUNCTION__);
+ if (!init_status) {
+ RT_TRACE(COMP_ERR, "ERR!!! %s(): Firmware download is failed\n", __func__);
return init_status;
}
- RT_TRACE(COMP_INIT, "%s():after firmware download\n", __FUNCTION__);
+ RT_TRACE(COMP_INIT, "%s():after firmware download\n", __func__);
//
#ifdef TO_DO_LIST
-if(Adapter->ResetProgress == RESET_TYPE_NORESET)
- {
- if(pMgntInfo->RegRfOff == TRUE)
- { // User disable RF via registry.
+ if (Adapter->ResetProgress == RESET_TYPE_NORESET) {
+ if (pMgntInfo->RegRfOff == TRUE) { // User disable RF via registry.
RT_TRACE((COMP_INIT|COMP_RF), DBG_LOUD, ("InitializeAdapter819xUsb(): Turn off RF for RegRfOff ----------\n"));
MgntActSet_RF_State(Adapter, eRfOff, RF_CHANGE_BY_SW);
// Those actions will be discard in MgntActSet_RF_State because of the same state
- for(eRFPath = 0; eRFPath <pHalData->NumTotalRFPath; eRFPath++)
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
PHY_SetRFReg(Adapter, (RF90_RADIO_PATH_E)eRFPath, 0x4, 0xC00, 0x0);
- }
- else if(pMgntInfo->RfOffReason > RF_CHANGE_BY_PS)
- { // H/W or S/W RF OFF before sleep.
+ } else if (pMgntInfo->RfOffReason > RF_CHANGE_BY_PS) { // H/W or S/W RF OFF before sleep.
RT_TRACE((COMP_INIT|COMP_RF), DBG_LOUD, ("InitializeAdapter819xUsb(): Turn off RF for RfOffReason(%d) ----------\n", pMgntInfo->RfOffReason));
MgntActSet_RF_State(Adapter, eRfOff, pMgntInfo->RfOffReason);
- }
- else
- {
+ } else {
pHalData->eRFPowerState = eRfOn;
pMgntInfo->RfOffReason = 0;
RT_TRACE((COMP_INIT|COMP_RF), DBG_LOUD, ("InitializeAdapter819xUsb(): RF is on ----------\n"));
}
- }
- else
- {
- if(pHalData->eRFPowerState == eRfOff)
- {
+ } else {
+ if (pHalData->eRFPowerState == eRfOff) {
MgntActSet_RF_State(Adapter, eRfOff, pMgntInfo->RfOffReason);
// Those actions will be discard in MgntActSet_RF_State because of the same state
- for(eRFPath = 0; eRFPath <pHalData->NumTotalRFPath; eRFPath++)
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
PHY_SetRFReg(Adapter, (RF90_RADIO_PATH_E)eRFPath, 0x4, 0xC00, 0x0);
}
}
#endif
//config RF.
- if(priv->ResetProgress == RESET_TYPE_NORESET){
- rtl8192_phy_RFConfig(dev);
- RT_TRACE(COMP_INIT, "%s():after phy RF config\n", __FUNCTION__);
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
+ rtl8192_phy_RFConfig(dev);
+ RT_TRACE(COMP_INIT, "%s():after phy RF config\n", __func__);
}
- if(priv->ieee80211->FwRWRF)
+ if (priv->ieee80211->FwRWRF)
// We can force firmware to do RF-R/W
priv->Rf_Mode = RF_OP_By_FW;
else
@@ -3421,54 +3106,44 @@ if(Adapter->ResetProgress == RESET_TYPE_NORESET)
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bOFDMEn, 0x1);
- if(priv->ResetProgress == RESET_TYPE_NORESET)
- {
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
//if D or C cut
- u8 tmpvalue = read_nic_byte(dev, 0x301);
- if(tmpvalue ==0x03)
- {
+ u8 tmpvalue;
+ read_nic_byte(dev, 0x301, &tmpvalue);
+ if (tmpvalue == 0x03) {
priv->bDcut = TRUE;
RT_TRACE(COMP_POWER_TRACKING, "D-cut\n");
- }
- else
- {
+ } else {
priv->bDcut = FALSE;
RT_TRACE(COMP_POWER_TRACKING, "C-cut\n");
}
dm_initialize_txpower_tracking(dev);
- if(priv->bDcut == TRUE)
- {
+ if (priv->bDcut == TRUE) {
u32 i, TempCCk;
- u32 tmpRegA= rtl8192_QueryBBReg(dev,rOFDM0_XATxIQImbalance,bMaskDWord);
- // u32 tmpRegC= rtl8192_QueryBBReg(dev,rOFDM0_XCTxIQImbalance,bMaskDWord);
- for(i = 0; i<TxBBGainTableLength; i++)
- {
- if(tmpRegA == priv->txbbgain_table[i].txbbgain_value)
- {
- priv->rfa_txpowertrackingindex= (u8)i;
- priv->rfa_txpowertrackingindex_real= (u8)i;
- priv->rfa_txpowertracking_default= priv->rfa_txpowertrackingindex;
+ u32 tmpRegA = rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance, bMaskDWord);
+ for (i = 0; i < TxBBGainTableLength; i++) {
+ if (tmpRegA == priv->txbbgain_table[i].txbbgain_value) {
+ priv->rfa_txpowertrackingindex = (u8)i;
+ priv->rfa_txpowertrackingindex_real = (u8)i;
+ priv->rfa_txpowertracking_default = priv->rfa_txpowertrackingindex;
break;
}
}
TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2);
- for(i=0 ; i<CCKTxBBGainTableLength ; i++)
- {
+ for (i = 0; i < CCKTxBBGainTableLength; i++) {
- if(TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0])
- {
- priv->cck_present_attentuation_20Mdefault=(u8) i;
+ if (TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0]) {
+ priv->cck_present_attentuation_20Mdefault = (u8) i;
break;
}
}
- priv->cck_present_attentuation_40Mdefault= 0;
- priv->cck_present_attentuation_difference= 0;
+ priv->cck_present_attentuation_40Mdefault = 0;
+ priv->cck_present_attentuation_difference = 0;
priv->cck_present_attentuation = priv->cck_present_attentuation_20Mdefault;
- // pMgntInfo->bTXPowerTracking = FALSE;//TEMPLY DISABLE
}
}
write_nic_byte(dev, 0x87, 0x0);
@@ -3492,16 +3167,14 @@ static struct net_device_stats *rtl8192_stats(struct net_device *dev)
return &priv->ieee80211->stats;
}
-bool
-HalTxCheckStuck819xUsb(
- struct net_device *dev
- )
+bool HalTxCheckStuck819xUsb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u16 RegTxCounter = read_nic_word(dev, 0x128);
+ u16 RegTxCounter;
bool bStuck = FALSE;
- RT_TRACE(COMP_RESET,"%s():RegTxCounter is %d,TxCounter is %d\n",__FUNCTION__,RegTxCounter,priv->TxCounter);
- if(priv->TxCounter==RegTxCounter)
+ read_nic_word(dev, 0x128, &RegTxCounter);
+ RT_TRACE(COMP_RESET, "%s():RegTxCounter is %d,TxCounter is %d\n", __func__, RegTxCounter, priv->TxCounter);
+ if (priv->TxCounter == RegTxCounter)
bStuck = TRUE;
priv->TxCounter = RegTxCounter;
@@ -3513,43 +3186,30 @@ HalTxCheckStuck819xUsb(
* <Assumption: RT_TX_SPINLOCK is acquired.>
* First added: 2006.11.19 by emily
*/
-RESET_TYPE
-TxCheckStuck(struct net_device *dev)
+RESET_TYPE TxCheckStuck(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 QueueID;
-// PRT_TCB pTcb;
-// u8 ResetThreshold;
bool bCheckFwTxCnt = false;
- //unsigned long flags;
//
// Decide such threshold according to current power save mode
//
-// RT_TRACE(COMP_RESET, " ==> TxCheckStuck()\n");
-// PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
-// spin_lock_irqsave(&priv->ieee80211->lock,flags);
- for (QueueID = 0; QueueID<=BEACON_QUEUE;QueueID ++)
- {
- if(QueueID == TXCMD_QUEUE)
- continue;
+ for (QueueID = 0; QueueID <= BEACON_QUEUE; QueueID++) {
+ if (QueueID == TXCMD_QUEUE)
+ continue;
#ifdef USB_TX_DRIVER_AGGREGATION_ENABLE
- if((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_drv_aggQ[QueueID]) == 0))
+ if ((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_drv_aggQ[QueueID]) == 0))
#else
- if((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0))
+ if ((skb_queue_len(&priv->ieee80211->skb_waitQ[QueueID]) == 0) && (skb_queue_len(&priv->ieee80211->skb_aggQ[QueueID]) == 0))
#endif
continue;
- bCheckFwTxCnt = true;
- }
-// PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
-// spin_unlock_irqrestore(&priv->ieee80211->lock,flags);
-// RT_TRACE(COMP_RESET,"bCheckFwTxCnt is %d\n",bCheckFwTxCnt);
- if(bCheckFwTxCnt)
- {
- if(HalTxCheckStuck819xUsb(dev))
- {
+ bCheckFwTxCnt = true;
+ }
+ if (bCheckFwTxCnt) {
+ if (HalTxCheckStuck819xUsb(dev)) {
RT_TRACE(COMP_RESET, "TxCheckStuck(): Fw indicates no Tx condition! \n");
return RESET_TYPE_SILENT;
}
@@ -3557,64 +3217,41 @@ TxCheckStuck(struct net_device *dev)
return RESET_TYPE_NORESET;
}
-bool
-HalRxCheckStuck819xUsb(struct net_device *dev)
+bool HalRxCheckStuck819xUsb(struct net_device *dev)
{
- u16 RegRxCounter = read_nic_word(dev, 0x130);
+ u16 RegRxCounter;
struct r8192_priv *priv = ieee80211_priv(dev);
bool bStuck = FALSE;
static u8 rx_chk_cnt;
- RT_TRACE(COMP_RESET,"%s(): RegRxCounter is %d,RxCounter is %d\n",__FUNCTION__,RegRxCounter,priv->RxCounter);
+ read_nic_word(dev, 0x130, &RegRxCounter);
+ RT_TRACE(COMP_RESET, "%s(): RegRxCounter is %d,RxCounter is %d\n", __func__, RegRxCounter, priv->RxCounter);
// If rssi is small, we should check rx for long time because of bad rx.
// or maybe it will continuous silent reset every 2 seconds.
rx_chk_cnt++;
- if(priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High+5))
- {
+ if (priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High+5)) {
rx_chk_cnt = 0; //high rssi, check rx stuck right now.
- }
- else if(priv->undecorated_smoothed_pwdb < (RateAdaptiveTH_High+5) &&
- ((priv->CurrentChannelBW!=HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb>=RateAdaptiveTH_Low_40M) ||
- (priv->CurrentChannelBW==HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb>=RateAdaptiveTH_Low_20M)) )
- {
- if(rx_chk_cnt < 2)
- {
+ } else if (priv->undecorated_smoothed_pwdb < (RateAdaptiveTH_High+5) &&
+ ((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb >= RateAdaptiveTH_Low_40M) ||
+ (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb >= RateAdaptiveTH_Low_20M))) {
+ if (rx_chk_cnt < 2)
return bStuck;
- }
else
- {
rx_chk_cnt = 0;
- }
- }
- else if(((priv->CurrentChannelBW!=HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb<RateAdaptiveTH_Low_40M) ||
- (priv->CurrentChannelBW==HT_CHANNEL_WIDTH_20&&priv->undecorated_smoothed_pwdb<RateAdaptiveTH_Low_20M)) &&
- priv->undecorated_smoothed_pwdb >= VeryLowRSSI)
- {
- if(rx_chk_cnt < 4)
- {
- //DbgPrint("RSSI < %d && RSSI >= %d, no check this time \n", RateAdaptiveTH_Low, VeryLowRSSI);
+ } else if (((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb < RateAdaptiveTH_Low_40M) ||
+ (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 && priv->undecorated_smoothed_pwdb < RateAdaptiveTH_Low_20M)) &&
+ priv->undecorated_smoothed_pwdb >= VeryLowRSSI) {
+ if (rx_chk_cnt < 4)
return bStuck;
- }
else
- {
rx_chk_cnt = 0;
- //DbgPrint("RSSI < %d && RSSI >= %d, check this time \n", RateAdaptiveTH_Low, VeryLowRSSI);
- }
- }
- else
- {
- if(rx_chk_cnt < 8)
- {
- //DbgPrint("RSSI <= %d, no check this time \n", VeryLowRSSI);
+ } else {
+ if (rx_chk_cnt < 8)
return bStuck;
- }
else
- {
rx_chk_cnt = 0;
- //DbgPrint("RSSI <= %d, check this time \n", VeryLowRSSI);
- }
}
- if(priv->RxCounter==RegRxCounter)
+ if (priv->RxCounter == RegRxCounter)
bStuck = TRUE;
priv->RxCounter = RegRxCounter;
@@ -3622,25 +3259,16 @@ HalRxCheckStuck819xUsb(struct net_device *dev)
return bStuck;
}
-RESET_TYPE
-RxCheckStuck(struct net_device *dev)
+RESET_TYPE RxCheckStuck(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //int i;
bool bRxCheck = FALSE;
-// RT_TRACE(COMP_RESET," ==> RxCheckStuck()\n");
- //PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
-
- if(priv->IrpPendingCount > 1)
+ if (priv->IrpPendingCount > 1)
bRxCheck = TRUE;
- //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
-// RT_TRACE(COMP_RESET,"bRxCheck is %d \n",bRxCheck);
- if(bRxCheck)
- {
- if(HalRxCheckStuck819xUsb(dev))
- {
+ if (bRxCheck) {
+ if (HalRxCheckStuck819xUsb(dev)) {
RT_TRACE(COMP_RESET, "RxStuck Condition\n");
return RESET_TYPE_SILENT;
}
@@ -3661,8 +3289,7 @@ RxCheckStuck(struct net_device *dev)
*
* 8185 and 8185b does not implement this function. This is added by Emily at 2006.11.24
*/
-RESET_TYPE
-rtl819x_ifcheck_resetornot(struct net_device *dev)
+RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
RESET_TYPE TxResetType = RESET_TYPE_NORESET;
@@ -3672,10 +3299,8 @@ rtl819x_ifcheck_resetornot(struct net_device *dev)
rfState = priv->ieee80211->eRFPowerState;
TxResetType = TxCheckStuck(dev);
- if( rfState != eRfOff ||
- /*ADAPTER_TEST_STATUS_FLAG(Adapter, ADAPTER_STATUS_FW_DOWNLOAD_FAILURE)) &&*/
- (priv->ieee80211->iw_mode != IW_MODE_ADHOC))
- {
+ if (rfState != eRfOff ||
+ (priv->ieee80211->iw_mode != IW_MODE_ADHOC)) {
// If driver is in the status of firmware download failure , driver skips RF initialization and RF is
// in turned off state. Driver should check whether Rx stuck and do silent reset. And
// if driver is in firmware download failure status, driver should initialize RF in the following
@@ -3686,155 +3311,91 @@ rtl819x_ifcheck_resetornot(struct net_device *dev)
// set, STA cannot hear any packet at all. Emily, 2008.04.12
RxResetType = RxCheckStuck(dev);
}
- if(TxResetType==RESET_TYPE_NORMAL || RxResetType==RESET_TYPE_NORMAL)
+ if (TxResetType == RESET_TYPE_NORMAL || RxResetType == RESET_TYPE_NORMAL) {
return RESET_TYPE_NORMAL;
- else if(TxResetType==RESET_TYPE_SILENT || RxResetType==RESET_TYPE_SILENT){
- RT_TRACE(COMP_RESET,"%s():silent reset\n",__FUNCTION__);
+ } else if (TxResetType == RESET_TYPE_SILENT || RxResetType == RESET_TYPE_SILENT) {
+ RT_TRACE(COMP_RESET, "%s():silent reset\n", __func__);
return RESET_TYPE_SILENT;
- }
- else
+ } else {
return RESET_TYPE_NORESET;
+ }
}
-void rtl8192_cancel_deferred_work(struct r8192_priv* priv);
+void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
int _rtl8192_up(struct net_device *dev);
int rtl8192_close(struct net_device *dev);
-void
-CamRestoreAllEntry( struct net_device *dev)
+void CamRestoreAllEntry(struct net_device *dev)
{
u8 EntryId = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
- u8* MacAddr = priv->ieee80211->current_network.bssid;
+ u8 *MacAddr = priv->ieee80211->current_network.bssid;
static u8 CAM_CONST_ADDR[4][6] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}};
- static u8 CAM_CONST_BROAD[] =
- {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} };
+ static u8 CAM_CONST_BROAD[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
RT_TRACE(COMP_SEC, "CamRestoreAllEntry: \n");
- if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40)||
- (priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104))
- {
+ if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
+ (priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) {
- for(EntryId=0; EntryId<4; EntryId++)
- {
- {
- MacAddr = CAM_CONST_ADDR[EntryId];
- setKey(dev,
- EntryId ,
- EntryId,
- priv->ieee80211->pairwise_key_type,
- MacAddr,
- 0,
- NULL);
- }
+ for (EntryId = 0; EntryId < 4; EntryId++) {
+ MacAddr = CAM_CONST_ADDR[EntryId];
+ setKey(dev, EntryId, EntryId,
+ priv->ieee80211->pairwise_key_type,
+ MacAddr, 0, NULL);
}
- }
- else if(priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP)
- {
+ } else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP) {
- {
- if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- setKey(dev,
- 4,
- 0,
- priv->ieee80211->pairwise_key_type,
- (u8*)dev->dev_addr,
- 0,
- NULL);
- else
- setKey(dev,
- 4,
- 0,
- priv->ieee80211->pairwise_key_type,
- MacAddr,
- 0,
- NULL);
- }
- }
- else if(priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP)
- {
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
+ setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
+ (u8 *)dev->dev_addr, 0, NULL);
+ else
+ setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
+ MacAddr, 0, NULL);
+ } else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP) {
- {
- if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- setKey(dev,
- 4,
- 0,
- priv->ieee80211->pairwise_key_type,
- (u8*)dev->dev_addr,
- 0,
- NULL);
- else
- setKey(dev,
- 4,
- 0,
- priv->ieee80211->pairwise_key_type,
- MacAddr,
- 0,
- NULL);
- }
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
+ setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
+ (u8 *)dev->dev_addr, 0, NULL);
+ else
+ setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
+ MacAddr, 0, NULL);
}
- if(priv->ieee80211->group_key_type == KEY_TYPE_TKIP)
- {
+ if (priv->ieee80211->group_key_type == KEY_TYPE_TKIP) {
MacAddr = CAM_CONST_BROAD;
- for(EntryId=1 ; EntryId<4 ; EntryId++)
- {
- {
- setKey(dev,
- EntryId,
- EntryId,
- priv->ieee80211->group_key_type,
- MacAddr,
- 0,
- NULL);
- }
+ for (EntryId = 1; EntryId < 4; EntryId++) {
+ setKey(dev, EntryId, EntryId,
+ priv->ieee80211->group_key_type,
+ MacAddr, 0, NULL);
}
- if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- setKey(dev,
- 0,
- 0,
- priv->ieee80211->group_key_type,
- CAM_CONST_ADDR[0],
- 0,
- NULL);
- }
- else if(priv->ieee80211->group_key_type == KEY_TYPE_CCMP)
- {
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
+ setKey(dev, 0, 0, priv->ieee80211->group_key_type,
+ CAM_CONST_ADDR[0], 0, NULL);
+ } else if (priv->ieee80211->group_key_type == KEY_TYPE_CCMP) {
MacAddr = CAM_CONST_BROAD;
- for(EntryId=1; EntryId<4 ; EntryId++)
- {
- {
- setKey(dev,
- EntryId ,
- EntryId,
- priv->ieee80211->group_key_type,
- MacAddr,
- 0,
- NULL);
- }
+ for (EntryId = 1; EntryId < 4; EntryId++) {
+ setKey(dev, EntryId, EntryId,
+ priv->ieee80211->group_key_type,
+ MacAddr, 0, NULL);
}
- if(priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- setKey(dev,
- 0 ,
- 0,
- priv->ieee80211->group_key_type,
- CAM_CONST_ADDR[0],
- 0,
- NULL);
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
+ setKey(dev, 0, 0, priv->ieee80211->group_key_type,
+ CAM_CONST_ADDR[0], 0, NULL);
}
}
//////////////////////////////////////////////////////////////
@@ -3843,10 +3404,8 @@ CamRestoreAllEntry( struct net_device *dev)
// The method checking Tx/Rx stuck of this function is supported by FW,
// which reports Tx and Rx counter to register 0x128 and 0x130.
//////////////////////////////////////////////////////////////
-void
-rtl819x_ifsilentreset(struct net_device *dev)
+void rtl819x_ifsilentreset(struct net_device *dev)
{
- //OCTET_STRING asocpdu;
struct r8192_priv *priv = ieee80211_priv(dev);
u8 reset_times = 0;
int reset_status = 0;
@@ -3856,26 +3415,21 @@ rtl819x_ifsilentreset(struct net_device *dev)
// 2007.07.20. If we need to check CCK stop, please uncomment this line.
//bStuck = Adapter->HalFunc.CheckHWStopHandler(Adapter);
- if(priv->ResetProgress==RESET_TYPE_NORESET)
- {
+ if (priv->ResetProgress == RESET_TYPE_NORESET) {
RESET_START:
- RT_TRACE(COMP_RESET,"=========>Reset progress!! \n");
+ RT_TRACE(COMP_RESET, "=========>Reset progress!! \n");
// Set the variable for reset.
priv->ResetProgress = RESET_TYPE_SILENT;
-// rtl8192_close(dev);
down(&priv->wx_sem);
- if(priv->up == 0)
- {
- RT_TRACE(COMP_ERR,"%s():the driver is not up! return\n",__FUNCTION__);
+ if (priv->up == 0) {
+ RT_TRACE(COMP_ERR, "%s():the driver is not up! return\n", __func__);
up(&priv->wx_sem);
- return ;
+ return;
}
priv->up = 0;
- RT_TRACE(COMP_RESET,"%s():======>start to down the driver\n",__FUNCTION__);
-// if(!netif_queue_stopped(dev))
-// netif_stop_queue(dev);
+ RT_TRACE(COMP_RESET, "%s():======>start to down the driver\n", __func__);
rtl8192_rtx_disable(dev);
rtl8192_cancel_deferred_work(priv);
@@ -3883,55 +3437,44 @@ RESET_START:
del_timer_sync(&priv->watch_dog_timer);
ieee->sync_scan_hurryup = 1;
- if(ieee->state == IEEE80211_LINKED)
- {
+ if (ieee->state == IEEE80211_LINKED) {
down(&ieee->wx_sem);
- printk("ieee->state is IEEE80211_LINKED\n");
+ netdev_dbg(dev, "ieee->state is IEEE80211_LINKED\n");
ieee80211_stop_send_beacons(priv->ieee80211);
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
ieee80211_stop_scan(ieee);
netif_carrier_off(dev);
up(&ieee->wx_sem);
+ } else {
+ netdev_dbg(dev, "ieee->state is NOT LINKED\n");
+ ieee80211_softmac_stop_protocol(priv->ieee80211);
}
- else{
- printk("ieee->state is NOT LINKED\n");
- ieee80211_softmac_stop_protocol(priv->ieee80211); }
up(&priv->wx_sem);
- RT_TRACE(COMP_RESET,"%s():<==========down process is finished\n",__FUNCTION__);
- //rtl8192_irq_disable(dev);
- RT_TRACE(COMP_RESET,"%s():===========>start up the driver\n",__FUNCTION__);
+ RT_TRACE(COMP_RESET, "%s():<==========down process is finished\n", __func__);
+ RT_TRACE(COMP_RESET, "%s():===========>start up the driver\n", __func__);
reset_status = _rtl8192_up(dev);
- RT_TRACE(COMP_RESET,"%s():<===========up process is finished\n",__FUNCTION__);
- if(reset_status == -EAGAIN)
- {
- if(reset_times < 3)
- {
+ RT_TRACE(COMP_RESET, "%s():<===========up process is finished\n", __func__);
+ if (reset_status == -EAGAIN) {
+ if (reset_times < 3) {
reset_times++;
goto RESET_START;
- }
- else
- {
- RT_TRACE(COMP_ERR," ERR!!! %s(): Reset Failed!!\n", __FUNCTION__);
+ } else {
+ RT_TRACE(COMP_ERR, " ERR!!! %s(): Reset Failed!!\n", __func__);
}
}
ieee->is_silent_reset = 1;
EnableHWSecurityConfig8192(dev);
- if(ieee->state == IEEE80211_LINKED && ieee->iw_mode == IW_MODE_INFRA)
- {
+ if (ieee->state == IEEE80211_LINKED && ieee->iw_mode == IW_MODE_INFRA) {
ieee->set_chan(ieee->dev, ieee->current_network.channel);
queue_work(ieee->wq, &ieee->associate_complete_wq);
- }
- else if(ieee->state == IEEE80211_LINKED && ieee->iw_mode == IW_MODE_ADHOC)
- {
+ } else if (ieee->state == IEEE80211_LINKED && ieee->iw_mode == IW_MODE_ADHOC) {
ieee->set_chan(ieee->dev, ieee->current_network.channel);
ieee->link_change(ieee->dev);
- // notify_wx_assoc_event(ieee);
-
ieee80211_start_send_beacons(ieee);
if (ieee->data_hard_resume)
@@ -3944,7 +3487,7 @@ RESET_START:
priv->ResetProgress = RESET_TYPE_NORESET;
priv->reset_count++;
- priv->bForcedSilentReset =false;
+ priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
// For test --> force write UFWP.
@@ -3953,50 +3496,36 @@ RESET_START:
}
}
-void CAM_read_entry(
- struct net_device *dev,
- u32 iIndex
-)
+void CAM_read_entry(struct net_device *dev, u32 iIndex)
{
- u32 target_command=0;
- u32 target_content=0;
- u8 entry_i=0;
- u32 ulStatus;
- s32 i=100;
-// printk("=======>start read CAM\n");
- for(entry_i=0;entry_i<CAM_CONTENT_COUNT;entry_i++)
- {
- // polling bit, and No Write enable, and address
- target_command= entry_i+CAM_CONTENT_COUNT*iIndex;
- target_command= target_command | BIT31;
+ u32 target_command = 0;
+ u32 target_content = 0;
+ u8 entry_i = 0;
+ u32 ulStatus;
+ s32 i = 100;
+ for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+ // polling bit, and No Write enable, and address
+ target_command = entry_i+CAM_CONTENT_COUNT*iIndex;
+ target_command = target_command | BIT31;
- //Check polling bit is clear
-// mdelay(1);
- while((i--)>=0)
- {
- ulStatus = read_nic_dword(dev, RWCAM);
- if(ulStatus & BIT31){
+ //Check polling bit is clear
+ while ((i--) >= 0) {
+ read_nic_dword(dev, RWCAM, &ulStatus);
+ if (ulStatus & BIT31)
continue;
- }
- else{
+ else
break;
- }
}
write_nic_dword(dev, RWCAM, target_command);
- RT_TRACE(COMP_SEC,"CAM_read_entry(): WRITE A0: %x \n",target_command);
- // printk("CAM_read_entry(): WRITE A0: %lx \n",target_command);
- target_content = read_nic_dword(dev, RCAMO);
- RT_TRACE(COMP_SEC, "CAM_read_entry(): WRITE A8: %x \n",target_content);
- // printk("CAM_read_entry(): WRITE A8: %lx \n",target_content);
+ RT_TRACE(COMP_SEC, "CAM_read_entry(): WRITE A0: %x \n", target_command);
+ read_nic_dword(dev, RCAMO, &target_content);
+ RT_TRACE(COMP_SEC, "CAM_read_entry(): WRITE A8: %x \n", target_content);
}
printk("\n");
}
-void rtl819x_update_rxcounts(
- struct r8192_priv *priv,
- u32* TotalRxBcnNum,
- u32* TotalRxDataNum
-)
+void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
+ u32 *TotalRxDataNum)
{
u16 SlotIndex;
u8 i;
@@ -4007,80 +3536,68 @@ void rtl819x_update_rxcounts(
SlotIndex = (priv->ieee80211->LinkDetectInfo.SlotIndex++)%(priv->ieee80211->LinkDetectInfo.SlotNum);
priv->ieee80211->LinkDetectInfo.RxBcnNum[SlotIndex] = priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod;
priv->ieee80211->LinkDetectInfo.RxDataNum[SlotIndex] = priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod;
- for( i=0; i<priv->ieee80211->LinkDetectInfo.SlotNum; i++ ){
+ for (i = 0; i < priv->ieee80211->LinkDetectInfo.SlotNum; i++) {
*TotalRxBcnNum += priv->ieee80211->LinkDetectInfo.RxBcnNum[i];
*TotalRxDataNum += priv->ieee80211->LinkDetectInfo.RxDataNum[i];
}
}
-extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
+extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,watch_dog_wq);
- struct net_device *dev = priv->ieee80211->dev;
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct r8192_priv *priv = container_of(dwork, struct r8192_priv, watch_dog_wq);
+ struct net_device *dev = priv->ieee80211->dev;
+ struct ieee80211_device *ieee = priv->ieee80211;
RESET_TYPE ResetType = RESET_TYPE_NORESET;
static u8 check_reset_cnt;
bool bBusyTraffic = false;
+ u32 TotalRxBcnNum = 0;
+ u32 TotalRxDataNum = 0;
- if(!priv->up)
+ if (!priv->up)
return;
hal_dm_watchdog(dev);
- {//to get busy traffic condition
- if(ieee->state == IEEE80211_LINKED)
- {
- if( ieee->LinkDetectInfo.NumRxOkInPeriod> 666 ||
- ieee->LinkDetectInfo.NumTxOkInPeriod> 666 ) {
- bBusyTraffic = true;
- }
- ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
- ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
- ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
+ //to get busy traffic condition
+ if (ieee->state == IEEE80211_LINKED) {
+ if (ieee->LinkDetectInfo.NumRxOkInPeriod > 666 ||
+ ieee->LinkDetectInfo.NumTxOkInPeriod > 666 ) {
+ bBusyTraffic = true;
}
+ ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
+ ieee->LinkDetectInfo.NumTxOkInPeriod = 0;
+ ieee->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
}
//added by amy for AP roaming
- {
- if(priv->ieee80211->state == IEEE80211_LINKED && priv->ieee80211->iw_mode == IW_MODE_INFRA)
- {
- u32 TotalRxBcnNum = 0;
- u32 TotalRxDataNum = 0;
+ if (priv->ieee80211->state == IEEE80211_LINKED && priv->ieee80211->iw_mode == IW_MODE_INFRA) {
- rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
- if((TotalRxBcnNum+TotalRxDataNum) == 0)
- {
- #ifdef TODO
- if(rfState == eRfOff)
- RT_TRACE(COMP_ERR,"========>%s()\n",__FUNCTION__);
- #endif
- printk("===>%s(): AP is power off,connect another one\n",__FUNCTION__);
- // Dot11d_Reset(dev);
- priv->ieee80211->state = IEEE80211_ASSOCIATING;
- notify_wx_assoc_event(priv->ieee80211);
- RemovePeerTS(priv->ieee80211,priv->ieee80211->current_network.bssid);
- priv->ieee80211->link_change(dev);
- queue_work(priv->ieee80211->wq, &priv->ieee80211->associate_procedure_wq);
+ rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
+ if ((TotalRxBcnNum+TotalRxDataNum) == 0) {
+#ifdef TODO
+ if (rfState == eRfOff)
+ RT_TRACE(COMP_ERR, "========>%s()\n", __func__);
+#endif
+ netdev_dbg(dev, "===>%s(): AP is power off, connect another one\n", __func__);
+ priv->ieee80211->state = IEEE80211_ASSOCIATING;
+ notify_wx_assoc_event(priv->ieee80211);
+ RemovePeerTS(priv->ieee80211, priv->ieee80211->current_network.bssid);
+ priv->ieee80211->link_change(dev);
+ queue_work(priv->ieee80211->wq, &priv->ieee80211->associate_procedure_wq);
- }
}
- priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod=0;
- priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod=0;
}
-// CAM_read_entry(dev,4);
+ priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod = 0;
+ priv->ieee80211->LinkDetectInfo.NumRecvDataInPeriod = 0;
//check if reset the driver
- if(check_reset_cnt++ >= 3)
- {
+ if (check_reset_cnt++ >= 3) {
ResetType = rtl819x_ifcheck_resetornot(dev);
check_reset_cnt = 3;
- //DbgPrint("Start to check silent reset\n");
}
- // RT_TRACE(COMP_RESET,"%s():priv->force_reset is %d,priv->ResetProgress is %d, priv->bForcedSilentReset is %d,priv->bDisableNormalResetCheck is %d,ResetType is %d\n",__FUNCTION__,priv->force_reset,priv->ResetProgress,priv->bForcedSilentReset,priv->bDisableNormalResetCheck,ResetType);
- if( (priv->force_reset) || (priv->ResetProgress==RESET_TYPE_NORESET &&
- (priv->bForcedSilentReset ||
- (!priv->bDisableNormalResetCheck && ResetType==RESET_TYPE_SILENT)))) // This is control by OID set in Pomelo
- {
- RT_TRACE(COMP_RESET,"%s():priv->force_reset is %d,priv->ResetProgress is %d, priv->bForcedSilentReset is %d,priv->bDisableNormalResetCheck is %d,ResetType is %d\n",__FUNCTION__,priv->force_reset,priv->ResetProgress,priv->bForcedSilentReset,priv->bDisableNormalResetCheck,ResetType);
+ if ((priv->force_reset) || (priv->ResetProgress == RESET_TYPE_NORESET &&
+ (priv->bForcedSilentReset ||
+ (!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_SILENT)))) { // This is control by OID set in Pomelo
+ RT_TRACE(COMP_RESET, "%s():priv->force_reset is %d,priv->ResetProgress is %d, priv->bForcedSilentReset is %d,priv->bDisableNormalResetCheck is %d,ResetType is %d\n", __func__, priv->force_reset, priv->ResetProgress, priv->bForcedSilentReset, priv->bDisableNormalResetCheck, ResetType);
rtl819x_ifsilentreset(dev);
}
priv->force_reset = false;
@@ -4093,33 +3610,29 @@ extern void rtl819x_watchdog_wqcallback(struct work_struct *work)
void watch_dog_timer_callback(unsigned long data)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *) data);
- //printk("===============>watch_dog timer\n");
- queue_delayed_work(priv->priv_wq,&priv->watch_dog_wq, 0);
+ queue_delayed_work(priv->priv_wq, &priv->watch_dog_wq, 0);
mod_timer(&priv->watch_dog_timer, jiffies + MSECS(IEEE80211_WATCH_DOG_TIME));
}
int _rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- //int i;
int init_status = 0;
- priv->up=1;
- priv->ieee80211->ieee_up=1;
+ priv->up = 1;
+ priv->ieee80211->ieee_up = 1;
RT_TRACE(COMP_INIT, "Bringing up iface");
init_status = rtl8192_adapter_start(dev);
- if(!init_status)
- {
- RT_TRACE(COMP_ERR,"ERR!!! %s(): initialization failed!\n", __FUNCTION__);
- priv->up=priv->ieee80211->ieee_up = 0;
+ if (!init_status) {
+ RT_TRACE(COMP_ERR, "ERR!!! %s(): initialization failed!\n", __func__);
+ priv->up = priv->ieee80211->ieee_up = 0;
return -EAGAIN;
}
RT_TRACE(COMP_INIT, "start adapter finished\n");
rtl8192_rx_enable(dev);
-// rtl8192_tx_enable(dev);
- if(priv->ieee80211->state != IEEE80211_LINKED)
- ieee80211_softmac_start_protocol(priv->ieee80211);
+ if (priv->ieee80211->state != IEEE80211_LINKED)
+ ieee80211_softmac_start_protocol(priv->ieee80211);
ieee80211_reset_queue(priv->ieee80211);
watch_dog_timer_callback((unsigned long) dev);
- if(!netif_queue_stopped(dev))
+ if (!netif_queue_stopped(dev))
netif_start_queue(dev);
else
netif_wake_queue(dev);
@@ -4172,40 +3685,35 @@ int rtl8192_down(struct net_device *dev)
if (priv->up == 0) return -1;
- priv->up=0;
+ priv->up = 0;
priv->ieee80211->ieee_up = 0;
- RT_TRACE(COMP_DOWN, "==========>%s()\n", __FUNCTION__);
-/* FIXME */
+ RT_TRACE(COMP_DOWN, "==========>%s()\n", __func__);
+ /* FIXME */
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
rtl8192_rtx_disable(dev);
- //rtl8192_irq_disable(dev);
- /* Tx related queue release */
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_purge(&priv->ieee80211->skb_waitQ [i]);
- }
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_purge(&priv->ieee80211->skb_aggQ [i]);
- }
+ /* Tx related queue release */
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_purge(&priv->ieee80211->skb_waitQ[i]);
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_purge(&priv->ieee80211->skb_aggQ[i]);
- for(i = 0; i < MAX_QUEUE_SIZE; i++) {
- skb_queue_purge(&priv->ieee80211->skb_drv_aggQ [i]);
- }
+ for (i = 0; i < MAX_QUEUE_SIZE; i++)
+ skb_queue_purge(&priv->ieee80211->skb_drv_aggQ[i]);
//as cancel_delayed_work will del work->timer, so if work is not defined as struct delayed_work, it will corrupt
-// flush_scheduled_work();
rtl8192_cancel_deferred_work(priv);
deinit_hal_dm(dev);
del_timer_sync(&priv->watch_dog_timer);
ieee80211_softmac_stop_protocol(priv->ieee80211);
- memset(&priv->ieee80211->current_network, 0 , offsetof(struct ieee80211_network, list));
- RT_TRACE(COMP_DOWN, "<==========%s()\n", __FUNCTION__);
+ memset(&priv->ieee80211->current_network, 0, offsetof(struct ieee80211_network, list));
+ RT_TRACE(COMP_DOWN, "<==========%s()\n", __func__);
- return 0;
+ return 0;
}
@@ -4213,27 +3721,19 @@ void rtl8192_commit(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int reset_status = 0;
- //u8 reset_times = 0;
- if (priv->up == 0) return ;
+ if (priv->up == 0) return;
priv->up = 0;
rtl8192_cancel_deferred_work(priv);
del_timer_sync(&priv->watch_dog_timer);
- //cancel_delayed_work(&priv->SwChnlWorkItem);
ieee80211_softmac_stop_protocol(priv->ieee80211);
- //rtl8192_irq_disable(dev);
rtl8192_rtx_disable(dev);
reset_status = _rtl8192_up(dev);
}
-/*
-void rtl8192_restart(struct net_device *dev)
-{
- struct r8192_priv *priv = ieee80211_priv(dev);
-*/
void rtl8192_restart(struct work_struct *work)
{
struct r8192_priv *priv = container_of(work, struct r8192_priv, reset_wq);
@@ -4251,19 +3751,13 @@ static void r8192_set_multicast(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
short promisc;
- //down(&priv->wx_sem);
-
/* FIXME FIXME */
- promisc = (dev->flags & IFF_PROMISC) ? 1:0;
+ promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
if (promisc != priv->promisc)
- // rtl8192_commit(dev);
-
- priv->promisc = promisc;
- //schedule_work(&priv->reset_wq);
- //up(&priv->wx_sem);
+ priv->promisc = promisc;
}
@@ -4287,99 +3781,90 @@ int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct iwreq *wrq = (struct iwreq *)rq;
- int ret=-1;
+ int ret = -1;
struct ieee80211_device *ieee = priv->ieee80211;
u32 key[4];
- u8 broadcast_addr[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
+ u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct iw_point *p = &wrq->u.data;
- struct ieee_param *ipw = NULL;//(struct ieee_param *)wrq->u.data.pointer;
+ struct ieee_param *ipw = NULL;
down(&priv->wx_sem);
- if (p->length < sizeof(struct ieee_param) || !p->pointer){
- ret = -EINVAL;
- goto out;
+ if (p->length < sizeof(struct ieee_param) || !p->pointer) {
+ ret = -EINVAL;
+ goto out;
}
- ipw = kmalloc(p->length, GFP_KERNEL);
- if (ipw == NULL){
- ret = -ENOMEM;
- goto out;
- }
- if (copy_from_user(ipw, p->pointer, p->length)) {
+ ipw = kmalloc(p->length, GFP_KERNEL);
+ if (ipw == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (copy_from_user(ipw, p->pointer, p->length)) {
kfree(ipw);
- ret = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ goto out;
}
switch (cmd) {
case RTL_IOCTL_WPA_SUPPLICANT:
- //parse here for HW security
- if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION)
- {
- if (ipw->u.crypt.set_tx)
- {
- if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
+ //parse here for HW security
+ if (ipw->cmd == IEEE_CMD_SET_ENCRYPTION) {
+ if (ipw->u.crypt.set_tx) {
+ if (strcmp(ipw->u.crypt.alg, "CCMP") == 0) {
ieee->pairwise_key_type = KEY_TYPE_CCMP;
- else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
+ } else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0) {
ieee->pairwise_key_type = KEY_TYPE_TKIP;
- else if (strcmp(ipw->u.crypt.alg, "WEP") == 0)
- {
+ } else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
if (ipw->u.crypt.key_len == 13)
ieee->pairwise_key_type = KEY_TYPE_WEP104;
else if (ipw->u.crypt.key_len == 5)
ieee->pairwise_key_type = KEY_TYPE_WEP40;
- }
- else
+ } else {
ieee->pairwise_key_type = KEY_TYPE_NA;
+ }
- if (ieee->pairwise_key_type)
- {
- memcpy((u8*)key, ipw->u.crypt.key, 16);
+ if (ieee->pairwise_key_type) {
+ memcpy((u8 *)key, ipw->u.crypt.key, 16);
EnableHWSecurityConfig8192(dev);
- //we fill both index entry and 4th entry for pairwise key as in IPW interface, adhoc will only get here, so we need index entry for its default key serching!
- //added by WB.
- setKey(dev, 4, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8*)ieee->ap_mac_addr, 0, key);
+ //we fill both index entry and 4th entry for pairwise key as in IPW interface, adhoc will only get here, so we need index entry for its default key serching!
+ //added by WB.
+ setKey(dev, 4, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8 *)ieee->ap_mac_addr, 0, key);
if (ieee->auth_mode != 2)
- setKey(dev, ipw->u.crypt.idx, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8*)ieee->ap_mac_addr, 0, key);
+ setKey(dev, ipw->u.crypt.idx, ipw->u.crypt.idx, ieee->pairwise_key_type, (u8 *)ieee->ap_mac_addr, 0, key);
}
- }
- else //if (ipw->u.crypt.idx) //group key use idx > 0
- {
- memcpy((u8*)key, ipw->u.crypt.key, 16);
- if (strcmp(ipw->u.crypt.alg, "CCMP") == 0)
- ieee->group_key_type= KEY_TYPE_CCMP;
- else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0)
+ } else {
+ memcpy((u8 *)key, ipw->u.crypt.key, 16);
+ if (strcmp(ipw->u.crypt.alg, "CCMP") == 0) {
+ ieee->group_key_type = KEY_TYPE_CCMP;
+ } else if (strcmp(ipw->u.crypt.alg, "TKIP") == 0) {
ieee->group_key_type = KEY_TYPE_TKIP;
- else if (strcmp(ipw->u.crypt.alg, "WEP") == 0)
- {
+ } else if (strcmp(ipw->u.crypt.alg, "WEP") == 0) {
if (ipw->u.crypt.key_len == 13)
ieee->group_key_type = KEY_TYPE_WEP104;
else if (ipw->u.crypt.key_len == 5)
ieee->group_key_type = KEY_TYPE_WEP40;
- }
- else
+ } else {
ieee->group_key_type = KEY_TYPE_NA;
+ }
- if (ieee->group_key_type)
- {
- setKey( dev,
- ipw->u.crypt.idx,
- ipw->u.crypt.idx, //KeyIndex
- ieee->group_key_type, //KeyType
- broadcast_addr, //MacAddr
- 0, //DefaultKey
- key); //KeyContent
+ if (ieee->group_key_type) {
+ setKey(dev, ipw->u.crypt.idx,
+ ipw->u.crypt.idx, //KeyIndex
+ ieee->group_key_type, //KeyType
+ broadcast_addr, //MacAddr
+ 0, //DefaultKey
+ key); //KeyContent
}
}
}
#ifdef JOHN_HWSEC_DEBUG
//john's test 0711
printk("@@ wrq->u pointer = ");
- for(i=0;i<wrq->u.data.length;i++){
- if(i%10==0) printk("\n");
- printk( "%8x|", ((u32*)wrq->u.data.pointer)[i] );
+ for (i = 0; i < wrq->u.data.length; i++) {
+ if (i%10 == 0) printk("\n");
+ printk("%8x|", ((u32 *)wrq->u.data.pointer)[i]);
}
printk("\n");
#endif /*JOHN_HWSEC_DEBUG*/
@@ -4401,8 +3886,8 @@ u8 HwRateToMRate90(bool bIsHT, u8 rate)
{
u8 ret_rate = 0xff;
- if(!bIsHT) {
- switch(rate) {
+ if (!bIsHT) {
+ switch (rate) {
case DESC90_RATE1M: ret_rate = MGN_1M; break;
case DESC90_RATE2M: ret_rate = MGN_2M; break;
case DESC90_RATE5_5M: ret_rate = MGN_5_5M; break;
@@ -4423,7 +3908,7 @@ u8 HwRateToMRate90(bool bIsHT, u8 rate)
}
} else {
- switch(rate) {
+ switch (rate) {
case DESC90_RATEMCS0: ret_rate = MGN_MCS0; break;
case DESC90_RATEMCS1: ret_rate = MGN_MCS1; break;
case DESC90_RATEMCS2: ret_rate = MGN_MCS2; break;
@@ -4444,7 +3929,7 @@ u8 HwRateToMRate90(bool bIsHT, u8 rate)
default:
ret_rate = 0xff;
- RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n",rate, bIsHT);
+ RT_TRACE(COMP_RECV, "HwRateToMRate90(): Non supported Rate [%x], bIsHT = %d!!!\n", rate, bIsHT);
break;
}
}
@@ -4467,11 +3952,11 @@ u8 HwRateToMRate90(bool bIsHT, u8 rate)
* Return:
* None
*/
-void UpdateRxPktTimeStamp8190 (struct net_device *dev, struct ieee80211_rx_stats *stats)
+void UpdateRxPktTimeStamp8190(struct net_device *dev, struct ieee80211_rx_stats *stats)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- if(stats->bIsAMPDU && !stats->bFirstMPDU) {
+ if (stats->bIsAMPDU && !stats->bFirstMPDU) {
stats->mac_time[0] = priv->LastRxDescTSFLow;
stats->mac_time[1] = priv->LastRxDescTSFHigh;
} else {
@@ -4482,7 +3967,7 @@ void UpdateRxPktTimeStamp8190 (struct net_device *dev, struct ieee80211_rx_stats
//by amy 080606
-long rtl819x_translate_todbm(u8 signal_strength_index )// 0-100 index.
+long rtl819x_translate_todbm(u8 signal_strength_index)// 0-100 index.
{
long signal_power; // in dBm.
@@ -4498,12 +3983,11 @@ long rtl819x_translate_todbm(u8 signal_strength_index )// 0-100 index.
be a local static. Otherwise, it may increase when we return from S3/S4. The
value will be kept in memory or disk. Declare the value in the adaptor
and it will be reinitialized when returned from S3/S4. */
-void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee80211_rx_stats * pprevious_stats, struct ieee80211_rx_stats * pcurrent_stats)
+void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer, struct ieee80211_rx_stats *pprevious_stats, struct ieee80211_rx_stats *pcurrent_stats)
{
bool bcheck = false;
u8 rfpath;
u32 nspatial_stream, tmp_val;
- //u8 i;
static u32 slide_rssi_index, slide_rssi_statistics;
static u32 slide_evm_index, slide_evm_statistics;
static u32 last_rssi, last_evm;
@@ -4512,8 +3996,8 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
static u32 last_beacon_adc_pwdb;
struct ieee80211_hdr_3addr *hdr;
- u16 sc ;
- unsigned int frag,seq;
+ u16 sc;
+ unsigned int frag, seq;
hdr = (struct ieee80211_hdr_3addr *)buffer;
sc = le16_to_cpu(hdr->seq_ctl);
frag = WLAN_GET_SEQ_FRAG(sc);
@@ -4523,14 +4007,12 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
//
// Check whether we should take the previous packet into accounting
//
- if(!pprevious_stats->bIsAMPDU)
- {
+ if (!pprevious_stats->bIsAMPDU) {
// if previous packet is not aggregated packet
bcheck = true;
}
- if(slide_rssi_statistics++ >= PHY_RSSI_SLID_WIN_MAX)
- {
+ if (slide_rssi_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
slide_rssi_statistics = PHY_RSSI_SLID_WIN_MAX;
last_rssi = priv->stats.slide_signal_strength[slide_rssi_index];
priv->stats.slide_rssi_total -= last_rssi;
@@ -4538,7 +4020,7 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
priv->stats.slide_rssi_total += pprevious_stats->SignalStrength;
priv->stats.slide_signal_strength[slide_rssi_index++] = pprevious_stats->SignalStrength;
- if(slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
+ if (slide_rssi_index >= PHY_RSSI_SLID_WIN_MAX)
slide_rssi_index = 0;
// <1> Showed on UI for user, in dbm
@@ -4548,13 +4030,12 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
//
// If the previous packet does not match the criteria, neglect it
//
- if(!pprevious_stats->bPacketMatchBSSID)
- {
- if(!pprevious_stats->bToSelfBA)
+ if (!pprevious_stats->bPacketMatchBSSID) {
+ if (!pprevious_stats->bToSelfBA)
return;
}
- if(!bcheck)
+ if (!bcheck)
return;
@@ -4570,33 +4051,25 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
// <2> Showed on UI for engineering
// hardware does not provide rssi information for each rf path in CCK
- if(!pprevious_stats->bIsCCK && (pprevious_stats->bPacketToSelf || pprevious_stats->bToSelfBA))
- {
- for (rfpath = RF90_PATH_A; rfpath < priv->NumTotalRFPath; rfpath++)
- {
- if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, rfpath))
- continue;
+ if (!pprevious_stats->bIsCCK && (pprevious_stats->bPacketToSelf || pprevious_stats->bToSelfBA)) {
+ for (rfpath = RF90_PATH_A; rfpath < priv->NumTotalRFPath; rfpath++) {
+ if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, rfpath))
+ continue;
//Fixed by Jacken 2008-03-20
- if(priv->stats.rx_rssi_percentage[rfpath] == 0)
- {
+ if (priv->stats.rx_rssi_percentage[rfpath] == 0)
priv->stats.rx_rssi_percentage[rfpath] = pprevious_stats->RxMIMOSignalStrength[rfpath];
- //DbgPrint("MIMO RSSI initialize \n");
- }
- if(pprevious_stats->RxMIMOSignalStrength[rfpath] > priv->stats.rx_rssi_percentage[rfpath])
- {
+ if (pprevious_stats->RxMIMOSignalStrength[rfpath] > priv->stats.rx_rssi_percentage[rfpath]) {
priv->stats.rx_rssi_percentage[rfpath] =
- ( (priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
+ ((priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
+ (pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
priv->stats.rx_rssi_percentage[rfpath] = priv->stats.rx_rssi_percentage[rfpath] + 1;
- }
- else
- {
+ } else {
priv->stats.rx_rssi_percentage[rfpath] =
- ( (priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
+ ((priv->stats.rx_rssi_percentage[rfpath]*(Rx_Smooth_Factor-1)) +
+ (pprevious_stats->RxMIMOSignalStrength[rfpath])) /(Rx_Smooth_Factor);
}
- RT_TRACE(COMP_DBG,"priv->stats.rx_rssi_percentage[rfPath] = %d \n" ,priv->stats.rx_rssi_percentage[rfpath] );
+ RT_TRACE(COMP_DBG, "priv->stats.rx_rssi_percentage[rfPath] = %d \n", priv->stats.rx_rssi_percentage[rfpath]);
}
}
@@ -4605,55 +4078,43 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
// Check PWDB.
//
RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
- pprevious_stats->bIsCCK? "CCK": "OFDM",
- pprevious_stats->RxPWDBAll);
+ pprevious_stats->bIsCCK ? "CCK" : "OFDM",
+ pprevious_stats->RxPWDBAll);
- if(pprevious_stats->bPacketBeacon)
- {
-/* record the beacon pwdb to the sliding window. */
- if(slide_beacon_adc_pwdb_statistics++ >= PHY_Beacon_RSSI_SLID_WIN_MAX)
- {
+ if (pprevious_stats->bPacketBeacon) {
+ /* record the beacon pwdb to the sliding window. */
+ if (slide_beacon_adc_pwdb_statistics++ >= PHY_Beacon_RSSI_SLID_WIN_MAX) {
slide_beacon_adc_pwdb_statistics = PHY_Beacon_RSSI_SLID_WIN_MAX;
last_beacon_adc_pwdb = priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index];
priv->stats.Slide_Beacon_Total -= last_beacon_adc_pwdb;
- //DbgPrint("slide_beacon_adc_pwdb_index = %d, last_beacon_adc_pwdb = %d, Adapter->RxStats.Slide_Beacon_Total = %d\n",
- // slide_beacon_adc_pwdb_index, last_beacon_adc_pwdb, Adapter->RxStats.Slide_Beacon_Total);
}
priv->stats.Slide_Beacon_Total += pprevious_stats->RxPWDBAll;
priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index] = pprevious_stats->RxPWDBAll;
- //DbgPrint("slide_beacon_adc_pwdb_index = %d, pPreviousRfd->Status.RxPWDBAll = %d\n", slide_beacon_adc_pwdb_index, pPreviousRfd->Status.RxPWDBAll);
slide_beacon_adc_pwdb_index++;
- if(slide_beacon_adc_pwdb_index >= PHY_Beacon_RSSI_SLID_WIN_MAX)
+ if (slide_beacon_adc_pwdb_index >= PHY_Beacon_RSSI_SLID_WIN_MAX)
slide_beacon_adc_pwdb_index = 0;
pprevious_stats->RxPWDBAll = priv->stats.Slide_Beacon_Total/slide_beacon_adc_pwdb_statistics;
- if(pprevious_stats->RxPWDBAll >= 3)
+ if (pprevious_stats->RxPWDBAll >= 3)
pprevious_stats->RxPWDBAll -= 3;
}
RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
- pprevious_stats->bIsCCK? "CCK": "OFDM",
- pprevious_stats->RxPWDBAll);
+ pprevious_stats->bIsCCK ? "CCK" : "OFDM",
+ pprevious_stats->RxPWDBAll);
- if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA)
- {
- if(priv->undecorated_smoothed_pwdb < 0) // initialize
- {
+ if (pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA) {
+ if (priv->undecorated_smoothed_pwdb < 0) // initialize
priv->undecorated_smoothed_pwdb = pprevious_stats->RxPWDBAll;
- //DbgPrint("First pwdb initialize \n");
- }
- if(pprevious_stats->RxPWDBAll > (u32)priv->undecorated_smoothed_pwdb)
- {
+ if (pprevious_stats->RxPWDBAll > (u32)priv->undecorated_smoothed_pwdb) {
priv->undecorated_smoothed_pwdb =
- ( ((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
+ (((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
+ (pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
priv->undecorated_smoothed_pwdb = priv->undecorated_smoothed_pwdb + 1;
- }
- else
- {
+ } else {
priv->undecorated_smoothed_pwdb =
- ( ((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
+ (((priv->undecorated_smoothed_pwdb)*(Rx_Smooth_Factor-1)) +
+ (pprevious_stats->RxPWDBAll)) /(Rx_Smooth_Factor);
}
}
@@ -4662,13 +4123,9 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
// Check EVM
//
/* record the general EVM to the sliding window. */
- if(pprevious_stats->SignalQuality == 0)
- {
- }
- else
- {
- if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA){
- if(slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX){
+ if (pprevious_stats->SignalQuality) {
+ if (pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA) {
+ if (slide_evm_statistics++ >= PHY_RSSI_SLID_WIN_MAX) {
slide_evm_statistics = PHY_RSSI_SLID_WIN_MAX;
last_evm = priv->stats.slide_evm[slide_evm_index];
priv->stats.slide_evm_total -= last_evm;
@@ -4677,7 +4134,7 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
priv->stats.slide_evm_total += pprevious_stats->SignalQuality;
priv->stats.slide_evm[slide_evm_index++] = pprevious_stats->SignalQuality;
- if(slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
+ if (slide_evm_index >= PHY_RSSI_SLID_WIN_MAX)
slide_evm_index = 0;
// <1> Showed on UI for user, in percentage.
@@ -4688,19 +4145,14 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
}
// <2> Showed on UI for engineering
- if(pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA)
- {
- for(nspatial_stream = 0; nspatial_stream<2 ; nspatial_stream++) // 2 spatial stream
- {
- if(pprevious_stats->RxMIMOSignalQuality[nspatial_stream] != -1)
- {
- if(priv->stats.rx_evm_percentage[nspatial_stream] == 0) // initialize
- {
+ if (pprevious_stats->bPacketToSelf || pprevious_stats->bPacketBeacon || pprevious_stats->bToSelfBA) {
+ for (nspatial_stream = 0; nspatial_stream < 2; nspatial_stream++) { // 2 spatial stream
+ if (pprevious_stats->RxMIMOSignalQuality[nspatial_stream] != -1) {
+ if (priv->stats.rx_evm_percentage[nspatial_stream] == 0) // initialize
priv->stats.rx_evm_percentage[nspatial_stream] = pprevious_stats->RxMIMOSignalQuality[nspatial_stream];
- }
priv->stats.rx_evm_percentage[nspatial_stream] =
- ( (priv->stats.rx_evm_percentage[nspatial_stream]* (Rx_Smooth_Factor-1)) +
- (pprevious_stats->RxMIMOSignalQuality[nspatial_stream]* 1)) / (Rx_Smooth_Factor);
+ ((priv->stats.rx_evm_percentage[nspatial_stream]* (Rx_Smooth_Factor-1)) +
+ (pprevious_stats->RxMIMOSignalQuality[nspatial_stream]* 1)) / (Rx_Smooth_Factor);
}
}
}
@@ -4725,126 +4177,104 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
* 05/26/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static u8 rtl819x_query_rxpwrpercentage(
- char antpower
- )
+static u8 rtl819x_query_rxpwrpercentage(char antpower)
{
if ((antpower <= -100) || (antpower >= 20))
- {
return 0;
- }
else if (antpower >= 0)
- {
return 100;
- }
else
- {
- return (100+antpower);
- }
+ return 100 + antpower;
} /* QueryRxPwrPercentage */
-static u8
-rtl819x_evm_dbtopercentage(
- char value
- )
+static u8 rtl819x_evm_dbtopercentage(char value)
{
- char ret_val;
+ char ret_val;
- ret_val = value;
+ ret_val = value;
- if(ret_val >= 0)
- ret_val = 0;
- if(ret_val <= -33)
- ret_val = -33;
- ret_val = 0 - ret_val;
- ret_val*=3;
- if(ret_val == 99)
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+ if (ret_val == 99)
ret_val = 100;
- return(ret_val);
+ return ret_val;
}
//
// Description:
// We want good-looking for signal strength/quality
// 2007/7/19 01:09, by cosa.
//
-long
-rtl819x_signal_scale_mapping(
- long currsig
- )
+long rtl819x_signal_scale_mapping(long currsig)
{
long retsig;
// Step 1. Scale mapping.
- if(currsig >= 61 && currsig <= 100)
- {
+ if (currsig >= 61 && currsig <= 100)
retsig = 90 + ((currsig - 60) / 4);
- }
- else if(currsig >= 41 && currsig <= 60)
- {
+ else if (currsig >= 41 && currsig <= 60)
retsig = 78 + ((currsig - 40) / 2);
- }
- else if(currsig >= 31 && currsig <= 40)
- {
+ else if (currsig >= 31 && currsig <= 40)
retsig = 66 + (currsig - 30);
- }
- else if(currsig >= 21 && currsig <= 30)
- {
+ else if (currsig >= 21 && currsig <= 30)
retsig = 54 + (currsig - 20);
- }
- else if(currsig >= 5 && currsig <= 20)
- {
+ else if (currsig >= 5 && currsig <= 20)
retsig = 42 + (((currsig - 5) * 2) / 3);
- }
- else if(currsig == 4)
- {
+ else if (currsig == 4)
retsig = 36;
- }
- else if(currsig == 3)
- {
+ else if (currsig == 3)
retsig = 27;
- }
- else if(currsig == 2)
- {
+ else if (currsig == 2)
retsig = 18;
- }
- else if(currsig == 1)
- {
+ else if (currsig == 1)
retsig = 9;
- }
else
- {
retsig = currsig;
- }
return retsig;
}
-static void rtl8192_query_rxphystatus(
- struct r8192_priv * priv,
- struct ieee80211_rx_stats * pstats,
- rx_drvinfo_819x_usb * pdrvinfo,
- struct ieee80211_rx_stats * precord_stats,
- bool bpacket_match_bssid,
- bool bpacket_toself,
- bool bPacketBeacon,
- bool bToSelfBA
- )
-{
- //PRT_RFD_STATUS pRtRfdStatus = &(pRfd->Status);
- phy_sts_ofdm_819xusb_t* pofdm_buf;
- phy_sts_cck_819xusb_t * pcck_buf;
- phy_ofdm_rx_status_rxsc_sgien_exintfflag* prxsc;
+static inline bool rx_hal_is_cck_rate(struct rx_drvinfo_819x_usb *pdrvinfo)
+{
+ if (pdrvinfo->RxHT)
+ return false;
+
+ switch (pdrvinfo->RxRate) {
+ case DESC90_RATE1M:
+ case DESC90_RATE2M:
+ case DESC90_RATE5_5M:
+ case DESC90_RATE11M:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
+ struct ieee80211_rx_stats *pstats,
+ rx_drvinfo_819x_usb *pdrvinfo,
+ struct ieee80211_rx_stats *precord_stats,
+ bool bpacket_match_bssid,
+ bool bpacket_toself,
+ bool bPacketBeacon,
+ bool bToSelfBA)
+{
+ phy_sts_ofdm_819xusb_t *pofdm_buf;
+ phy_sts_cck_819xusb_t *pcck_buf;
+ phy_ofdm_rx_status_rxsc_sgien_exintfflag *prxsc;
u8 *prxpkt;
u8 i, max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
- char rx_pwr[4], rx_pwr_all=0;
- //long rx_avg_pwr = 0;
+ char rx_pwr[4], rx_pwr_all = 0;
char rx_snrX, rx_evmX;
u8 evm, pwdb_all;
- u32 RSSI, total_rssi=0;//, total_evm=0;
-// long signal_strength_index = 0;
- u8 is_cck_rate=0;
+ u32 RSSI, total_rssi = 0;
+ u8 is_cck_rate = 0;
u8 rf_rx_num = 0;
+ u8 sq;
priv->stats.numqry_phystatus++;
@@ -4855,11 +4285,11 @@ static void rtl8192_query_rxphystatus(
memset(precord_stats, 0, sizeof(struct ieee80211_rx_stats));
pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID = bpacket_match_bssid;
pstats->bPacketToSelf = precord_stats->bPacketToSelf = bpacket_toself;
- pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;//RX_HAL_IS_CCK_RATE(pDrvInfo);
+ pstats->bIsCCK = precord_stats->bIsCCK = is_cck_rate;
pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
- prxpkt = (u8*)pdrvinfo;
+ prxpkt = (u8 *)pdrvinfo;
/* Move pointer to the 16th bytes. Phy status start address. */
prxpkt += sizeof(rx_drvinfo_819x_usb);
@@ -4873,8 +4303,7 @@ static void rtl8192_query_rxphystatus(
precord_stats->RxMIMOSignalQuality[0] = -1;
precord_stats->RxMIMOSignalQuality[1] = -1;
- if(is_cck_rate)
- {
+ if (is_cck_rate) {
//
// (1)Hardware does not provide RSSI for CCK
//
@@ -4882,51 +4311,46 @@ static void rtl8192_query_rxphystatus(
//
// (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive)
//
- u8 report;//, cck_agc_rpt;
+ u8 report;
priv->stats.numqry_phystatusCCK++;
- if(!priv->bCckHighPower)
- {
+ if (!priv->bCckHighPower) {
report = pcck_buf->cck_agc_rpt & 0xc0;
report = report>>6;
- switch(report)
- {
+ switch (report) {
//Fixed by Jacken from Bryant 2008-03-20
//Original value is -38 , -26 , -14 , -2
//Fixed value is -35 , -23 , -11 , 6
- case 0x3:
- rx_pwr_all = -35 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- case 0x2:
- rx_pwr_all = -23 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- case 0x1:
- rx_pwr_all = -11 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
- case 0x0:
- rx_pwr_all = 6 - (pcck_buf->cck_agc_rpt & 0x3e);
- break;
+ case 0x3:
+ rx_pwr_all = -35 - (pcck_buf->cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -23 - (pcck_buf->cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -11 - (pcck_buf->cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 6 - (pcck_buf->cck_agc_rpt & 0x3e);
+ break;
}
- }
- else
- {
+ } else {
report = pcck_buf->cck_agc_rpt & 0x60;
report = report>>5;
- switch(report)
- {
- case 0x3:
- rx_pwr_all = -35 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
- break;
- case 0x2:
- rx_pwr_all = -23 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1);
- break;
- case 0x1:
- rx_pwr_all = -11 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
- break;
- case 0x0:
- rx_pwr_all = 6 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1) ;
- break;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -35 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1);
+ break;
+ case 0x2:
+ rx_pwr_all = -23 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1);
+ break;
+ case 0x1:
+ rx_pwr_all = -11 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1);
+ break;
+ case 0x0:
+ rx_pwr_all = 6 - ((pcck_buf->cck_agc_rpt & 0x1f)<<1);
+ break;
}
}
@@ -4937,44 +4361,36 @@ static void rtl8192_query_rxphystatus(
//
// (3) Get Signal Quality (EVM)
//
- //if(bpacket_match_bssid)
- {
- u8 sq;
- if(pstats->RxPWDBAll > 40)
- {
- sq = 100;
- }else
- {
- sq = pcck_buf->sq_rpt;
+ if (pstats->RxPWDBAll > 40) {
+ sq = 100;
+ } else {
+ sq = pcck_buf->sq_rpt;
- if(pcck_buf->sq_rpt > 64)
- sq = 0;
- else if (pcck_buf->sq_rpt < 20)
- sq = 100;
- else
- sq = ((64-sq) * 100) / 44;
- }
- pstats->SignalQuality = precord_stats->SignalQuality = sq;
- pstats->RxMIMOSignalQuality[0] = precord_stats->RxMIMOSignalQuality[0] = sq;
- pstats->RxMIMOSignalQuality[1] = precord_stats->RxMIMOSignalQuality[1] = -1;
+ if (pcck_buf->sq_rpt > 64)
+ sq = 0;
+ else if (pcck_buf->sq_rpt < 20)
+ sq = 100;
+ else
+ sq = ((64-sq) * 100) / 44;
}
- }
- else
- {
+ pstats->SignalQuality = precord_stats->SignalQuality = sq;
+ pstats->RxMIMOSignalQuality[0] = precord_stats->RxMIMOSignalQuality[0] = sq;
+ pstats->RxMIMOSignalQuality[1] = precord_stats->RxMIMOSignalQuality[1] = -1;
+
+ } else {
priv->stats.numqry_phystatusHT++;
//
// (1)Get RSSI for HT rate
//
- for(i=RF90_PATH_A; i<priv->NumTotalRFPath; i++)
- {
+ for (i = RF90_PATH_A; i < priv->NumTotalRFPath; i++) {
// 2008/01/30 MH we will judge RF RX path now.
if (priv->brfpath_rxenable[i])
rf_rx_num++;
else
continue;
- if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, i))
+ if (!rtl8192_phy_CheckIsLegalRFPath(priv->ieee80211->dev, i))
continue;
//Fixed by Jacken from Bryant 2008-03-20
@@ -4984,7 +4400,6 @@ static void rtl8192_query_rxphystatus(
//Get Rx snr value in DB
tmp_rxsnr = pofdm_buf->rxsnr_X[i];
rx_snrX = (char)(tmp_rxsnr);
- //rx_snrX >>= 1;
rx_snrX /= 2;
priv->stats.rxSNRdB[i] = (long)rx_snrX;
@@ -4993,11 +4408,8 @@ static void rtl8192_query_rxphystatus(
total_rssi += RSSI;
/* Record Signal Strength for next packet */
- //if(bpacket_match_bssid)
- {
- pstats->RxMIMOSignalStrength[i] =(u8) RSSI;
- precord_stats->RxMIMOSignalStrength[i] =(u8) RSSI;
- }
+ pstats->RxMIMOSignalStrength[i] = (u8) RSSI;
+ precord_stats->RxMIMOSignalStrength[i] = (u8) RSSI;
}
@@ -5006,7 +4418,7 @@ static void rtl8192_query_rxphystatus(
//
//Fixed by Jacken from Bryant 2008-03-20
//Original value is 106
- rx_pwr_all = (((pofdm_buf->pwdb_all ) >> 1 )& 0x7f) -106;
+ rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1)& 0x7f) -106;
pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
@@ -5015,14 +4427,13 @@ static void rtl8192_query_rxphystatus(
//
// (3)EVM of HT rate
//
- if(pdrvinfo->RxHT && pdrvinfo->RxRate>=DESC90_RATEMCS8 &&
- pdrvinfo->RxRate<=DESC90_RATEMCS15)
+ if (pdrvinfo->RxHT && pdrvinfo->RxRate >= DESC90_RATEMCS8 &&
+ pdrvinfo->RxRate <= DESC90_RATEMCS15)
max_spatial_stream = 2; //both spatial stream make sense
else
max_spatial_stream = 1; //only spatial stream 1 makes sense
- for(i=0; i<max_spatial_stream; i++)
- {
+ for (i = 0; i < max_spatial_stream; i++) {
tmp_rxevm = pofdm_buf->rxevm_X[i];
rx_evmX = (char)(tmp_rxevm);
@@ -5032,19 +4443,16 @@ static void rtl8192_query_rxphystatus(
rx_evmX /= 2; //dbm
evm = rtl819x_evm_dbtopercentage(rx_evmX);
- //if(bpacket_match_bssid)
- {
- if(i==0) // Fill value in RFD, Get the first spatial stream only
- pstats->SignalQuality = precord_stats->SignalQuality = (u8)(evm & 0xff);
- pstats->RxMIMOSignalQuality[i] = precord_stats->RxMIMOSignalQuality[i] = (u8)(evm & 0xff);
- }
+ if (i == 0) // Fill value in RFD, Get the first spatial stream only
+ pstats->SignalQuality = precord_stats->SignalQuality = (u8)(evm & 0xff);
+ pstats->RxMIMOSignalQuality[i] = precord_stats->RxMIMOSignalQuality[i] = (u8)(evm & 0xff);
}
/* record rx statistics for debug */
rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
prxsc = (phy_ofdm_rx_status_rxsc_sgien_exintfflag *)&rxsc_sgien_exflg;
- if(pdrvinfo->BW) //40M channel
+ if (pdrvinfo->BW) //40M channel
priv->stats.received_bwtype[1+prxsc->rxsc]++;
else //20M channel
priv->stats.received_bwtype[0]++;
@@ -5052,25 +4460,17 @@ static void rtl8192_query_rxphystatus(
//UI BSS List signal strength(in percentage), make it good looking, from 0~100.
//It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp().
- if(is_cck_rate)
- {
- pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)pwdb_all));//PWDB_ALL;
-
- }
- else
- {
- //pRfd->Status.SignalStrength = pRecordRfd->Status.SignalStrength = (u8)(SignalScaleMapping(total_rssi/=RF90_PATH_MAX));//(u8)(total_rssi/=RF90_PATH_MAX);
+ if (is_cck_rate) {
+ pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)pwdb_all));
+ } else {
// We can judge RX path number now.
if (rf_rx_num != 0)
- pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)(total_rssi/=rf_rx_num)));
+ pstats->SignalStrength = precord_stats->SignalStrength = (u8)(rtl819x_signal_scale_mapping((long)(total_rssi /= rf_rx_num)));
}
} /* QueryRxPhyStatus8190Pci */
-void
-rtl8192_record_rxdesc_forlateruse(
- struct ieee80211_rx_stats * psrc_stats,
- struct ieee80211_rx_stats * ptarget_stats
-)
+void rtl8192_record_rxdesc_forlateruse(struct ieee80211_rx_stats *psrc_stats,
+ struct ieee80211_rx_stats *ptarget_stats)
{
ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
@@ -5079,27 +4479,26 @@ rtl8192_record_rxdesc_forlateruse(
void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
- struct ieee80211_rx_stats * pstats,
+ struct ieee80211_rx_stats *pstats,
rx_drvinfo_819x_usb *pdrvinfo)
{
// TODO: We must only check packet for current MAC address. Not finish
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
- struct net_device *dev=info->dev;
+ struct net_device *dev = info->dev;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
bool bpacket_match_bssid, bpacket_toself;
- bool bPacketBeacon=FALSE, bToSelfBA=FALSE;
+ bool bPacketBeacon = FALSE, bToSelfBA = FALSE;
static struct ieee80211_rx_stats previous_stats;
struct ieee80211_hdr_3addr *hdr;//by amy
- u16 fc,type;
+ u16 fc, type;
// Get Signal Quality for only RX data queue (but not command queue)
- u8* tmp_buf;
- //u16 tmp_buf_len = 0;
+ u8 *tmp_buf;
u8 *praddr;
/* Get MAC frame start address. */
- tmp_buf = (u8*)skb->data;// + get_rxpacket_shiftbytes_819xusb(pstats);
+ tmp_buf = (u8 *)skb->data;
hdr = (struct ieee80211_hdr_3addr *)tmp_buf;
fc = le16_to_cpu(hdr->frame_ctl);
@@ -5108,38 +4507,30 @@ void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
/* Check if the received packet is acceptable. */
bpacket_match_bssid = ((IEEE80211_FTYPE_CTL != type) &&
- (eqMacAddr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS)? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS )? hdr->addr2 : hdr->addr3))
- && (!pstats->bHwError) && (!pstats->bCRC)&& (!pstats->bICV));
+ (eqMacAddr(priv->ieee80211->current_network.bssid, (fc & IEEE80211_FCTL_TODS) ? hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3))
+ && (!pstats->bHwError) && (!pstats->bCRC) && (!pstats->bICV));
bpacket_toself = bpacket_match_bssid & (eqMacAddr(praddr, priv->ieee80211->dev->dev_addr));
- if(WLAN_FC_GET_FRAMETYPE(fc)== IEEE80211_STYPE_BEACON)
- {
- bPacketBeacon = true;
- //DbgPrint("Beacon 2, MatchBSSID = %d, ToSelf = %d \n", bPacketMatchBSSID, bPacketToSelf);
- }
- if(WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BLOCKACK)
- {
- if((eqMacAddr(praddr,dev->dev_addr)))
- bToSelfBA = true;
- //DbgPrint("BlockAck, MatchBSSID = %d, ToSelf = %d \n", bPacketMatchBSSID, bPacketToSelf);
- }
+ if (WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BEACON)
+ bPacketBeacon = true;
+ if (WLAN_FC_GET_FRAMETYPE(fc) == IEEE80211_STYPE_BLOCKACK) {
+ if ((eqMacAddr(praddr, dev->dev_addr)))
+ bToSelfBA = true;
+ }
- if(bpacket_match_bssid)
- {
+ if (bpacket_match_bssid)
priv->stats.numpacket_matchbssid++;
- }
- if(bpacket_toself){
+ if (bpacket_toself)
priv->stats.numpacket_toself++;
- }
//
// Process PHY information for previous packet (RSSI/PWDB/EVM)
//
// Because phy information is contained in the last packet of AMPDU only, so driver
// should process phy information of previous packet
rtl8192_process_phyinfo(priv, tmp_buf, &previous_stats, pstats);
- rtl8192_query_rxphystatus(priv, pstats, pdrvinfo, &previous_stats, bpacket_match_bssid,bpacket_toself,bPacketBeacon,bToSelfBA);
+ rtl8192_query_rxphystatus(priv, pstats, pdrvinfo, &previous_stats, bpacket_match_bssid, bpacket_toself, bPacketBeacon, bToSelfBA);
rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
}
@@ -5158,91 +4549,85 @@ void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
* Return:
* None
*/
-void
-UpdateReceivedRateHistogramStatistics8190(
- struct net_device *dev,
- struct ieee80211_rx_stats *stats
- )
+void UpdateReceivedRateHistogramStatistics8190(struct net_device *dev,
+ struct ieee80211_rx_stats *stats)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- u32 rcvType=1; //0: Total, 1:OK, 2:CRC, 3:ICV
+ u32 rcvType = 1; //0: Total, 1:OK, 2:CRC, 3:ICV
u32 rateIndex;
u32 preamble_guardinterval; //1: short preamble/GI, 0: long preamble/GI
- if(stats->bCRC)
- rcvType = 2;
- else if(stats->bICV)
- rcvType = 3;
+ if (stats->bCRC)
+ rcvType = 2;
+ else if (stats->bICV)
+ rcvType = 3;
- if(stats->bShortPreamble)
- preamble_guardinterval = 1;// short
+ if (stats->bShortPreamble)
+ preamble_guardinterval = 1;// short
else
- preamble_guardinterval = 0;// long
+ preamble_guardinterval = 0;// long
- switch(stats->rate)
- {
+ switch (stats->rate) {
//
// CCK rate
//
- case MGN_1M: rateIndex = 0; break;
- case MGN_2M: rateIndex = 1; break;
- case MGN_5_5M: rateIndex = 2; break;
- case MGN_11M: rateIndex = 3; break;
+ case MGN_1M: rateIndex = 0; break;
+ case MGN_2M: rateIndex = 1; break;
+ case MGN_5_5M: rateIndex = 2; break;
+ case MGN_11M: rateIndex = 3; break;
//
// Legacy OFDM rate
//
- case MGN_6M: rateIndex = 4; break;
- case MGN_9M: rateIndex = 5; break;
- case MGN_12M: rateIndex = 6; break;
- case MGN_18M: rateIndex = 7; break;
- case MGN_24M: rateIndex = 8; break;
- case MGN_36M: rateIndex = 9; break;
- case MGN_48M: rateIndex = 10; break;
- case MGN_54M: rateIndex = 11; break;
+ case MGN_6M: rateIndex = 4; break;
+ case MGN_9M: rateIndex = 5; break;
+ case MGN_12M: rateIndex = 6; break;
+ case MGN_18M: rateIndex = 7; break;
+ case MGN_24M: rateIndex = 8; break;
+ case MGN_36M: rateIndex = 9; break;
+ case MGN_48M: rateIndex = 10; break;
+ case MGN_54M: rateIndex = 11; break;
//
// 11n High throughput rate
//
- case MGN_MCS0: rateIndex = 12; break;
- case MGN_MCS1: rateIndex = 13; break;
- case MGN_MCS2: rateIndex = 14; break;
- case MGN_MCS3: rateIndex = 15; break;
- case MGN_MCS4: rateIndex = 16; break;
- case MGN_MCS5: rateIndex = 17; break;
- case MGN_MCS6: rateIndex = 18; break;
- case MGN_MCS7: rateIndex = 19; break;
- case MGN_MCS8: rateIndex = 20; break;
- case MGN_MCS9: rateIndex = 21; break;
- case MGN_MCS10: rateIndex = 22; break;
- case MGN_MCS11: rateIndex = 23; break;
- case MGN_MCS12: rateIndex = 24; break;
- case MGN_MCS13: rateIndex = 25; break;
- case MGN_MCS14: rateIndex = 26; break;
- case MGN_MCS15: rateIndex = 27; break;
- default: rateIndex = 28; break;
- }
- priv->stats.received_preamble_GI[preamble_guardinterval][rateIndex]++;
- priv->stats.received_rate_histogram[0][rateIndex]++; //total
- priv->stats.received_rate_histogram[rcvType][rateIndex]++;
+ case MGN_MCS0: rateIndex = 12; break;
+ case MGN_MCS1: rateIndex = 13; break;
+ case MGN_MCS2: rateIndex = 14; break;
+ case MGN_MCS3: rateIndex = 15; break;
+ case MGN_MCS4: rateIndex = 16; break;
+ case MGN_MCS5: rateIndex = 17; break;
+ case MGN_MCS6: rateIndex = 18; break;
+ case MGN_MCS7: rateIndex = 19; break;
+ case MGN_MCS8: rateIndex = 20; break;
+ case MGN_MCS9: rateIndex = 21; break;
+ case MGN_MCS10: rateIndex = 22; break;
+ case MGN_MCS11: rateIndex = 23; break;
+ case MGN_MCS12: rateIndex = 24; break;
+ case MGN_MCS13: rateIndex = 25; break;
+ case MGN_MCS14: rateIndex = 26; break;
+ case MGN_MCS15: rateIndex = 27; break;
+ default: rateIndex = 28; break;
+ }
+ priv->stats.received_preamble_GI[preamble_guardinterval][rateIndex]++;
+ priv->stats.received_rate_histogram[0][rateIndex]++; //total
+ priv->stats.received_rate_histogram[rcvType][rateIndex]++;
}
void query_rxdesc_status(struct sk_buff *skb, struct ieee80211_rx_stats *stats, bool bIsRxAggrSubframe)
{
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
- struct net_device *dev=info->dev;
+ struct net_device *dev = info->dev;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- //rx_desc_819x_usb *desc = (rx_desc_819x_usb *)skb->data;
rx_drvinfo_819x_usb *driver_info = NULL;
//
//Get Rx Descriptor Information
//
#ifdef USB_RX_AGGREGATION_SUPPORT
- if (bIsRxAggrSubframe)
- {
+ if (bIsRxAggrSubframe) {
rx_desc_819x_usb_aggr_subframe *desc = (rx_desc_819x_usb_aggr_subframe *)skb->data;
- stats->Length = desc->Length ;
+ stats->Length = desc->Length;
stats->RxDrvInfoSize = desc->RxDrvInfoSize;
stats->RxBufShift = 0; //RxBufShift = 2 in RxDesc, but usb didn't shift bytes in fact.
stats->bICV = desc->ICV;
@@ -5256,7 +4641,7 @@ void query_rxdesc_status(struct sk_buff *skb, struct ieee80211_rx_stats *stats,
stats->Length = desc->Length;
stats->RxDrvInfoSize = desc->RxDrvInfoSize;
- stats->RxBufShift = 0;//desc->Shift&0x03;
+ stats->RxBufShift = 0;
stats->bICV = desc->ICV;
stats->bCRC = desc->CRC32;
stats->bHwError = stats->bCRC|stats->bICV;
@@ -5264,16 +4649,12 @@ void query_rxdesc_status(struct sk_buff *skb, struct ieee80211_rx_stats *stats,
stats->Decrypted = !desc->SWDec;
}
- if((priv->ieee80211->pHTInfo->bCurrentHTSupport == true) && (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP))
- {
+ if ((priv->ieee80211->pHTInfo->bCurrentHTSupport == true) && (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP))
stats->bHwError = false;
- }
else
- {
stats->bHwError = stats->bCRC|stats->bICV;
- }
- if(stats->Length < 24 || stats->Length > MAX_8192U_RX_SIZE)
+ if (stats->Length < 24 || stats->Length > MAX_8192U_RX_SIZE)
stats->bHwError |= 1;
//
//Get Driver Info
@@ -5281,71 +4662,66 @@ void query_rxdesc_status(struct sk_buff *skb, struct ieee80211_rx_stats *stats,
// TODO: Need to verify it on FGPA platform
//Driver info are written to the RxBuffer following rx desc
if (stats->RxDrvInfoSize != 0) {
- driver_info = (rx_drvinfo_819x_usb *)(skb->data + sizeof(rx_desc_819x_usb) + \
- stats->RxBufShift);
+ driver_info = (rx_drvinfo_819x_usb *)(skb->data + sizeof(rx_desc_819x_usb) +
+ stats->RxBufShift);
/* unit: 0.5M */
/* TODO */
- if(!stats->bHwError){
+ if (!stats->bHwError) {
u8 ret_rate;
ret_rate = HwRateToMRate90(driver_info->RxHT, driver_info->RxRate);
- if(ret_rate == 0xff)
- {
+ if (ret_rate == 0xff) {
// Abnormal Case: Receive CRC OK packet with Rx descriptor indicating non supported rate.
// Special Error Handling here, 2008.05.16, by Emily
stats->bHwError = 1;
stats->rate = MGN_1M; //Set 1M rate by default
- }else
- {
+ } else {
stats->rate = ret_rate;
}
- }
- else
+ } else {
stats->rate = 0x02;
+ }
stats->bShortPreamble = driver_info->SPLCP;
UpdateReceivedRateHistogramStatistics8190(dev, stats);
- stats->bIsAMPDU = (driver_info->PartAggr==1);
- stats->bFirstMPDU = (driver_info->PartAggr==1) && (driver_info->FirstAGGR==1);
+ stats->bIsAMPDU = (driver_info->PartAggr == 1);
+ stats->bFirstMPDU = (driver_info->PartAggr == 1) && (driver_info->FirstAGGR == 1);
stats->TimeStampLow = driver_info->TSFL;
// xiong mask it, 070514
- //pRfd->Status.TimeStampHigh = PlatformEFIORead4Byte(Adapter, TSFR+4);
- // stats->TimeStampHigh = read_nic_dword(dev, TSFR+4);
UpdateRxPktTimeStamp8190(dev, stats);
//
// Rx A-MPDU
//
- if(driver_info->FirstAGGR==1 || driver_info->PartAggr == 1)
+ if (driver_info->FirstAGGR == 1 || driver_info->PartAggr == 1)
RT_TRACE(COMP_RXDESC, "driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
- driver_info->FirstAGGR, driver_info->PartAggr);
+ driver_info->FirstAGGR, driver_info->PartAggr);
}
- skb_pull(skb,sizeof(rx_desc_819x_usb));
+ skb_pull(skb, sizeof(rx_desc_819x_usb));
//
// Get Total offset of MPDU Frame Body
//
- if((stats->RxBufShift + stats->RxDrvInfoSize) > 0) {
+ if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0) {
stats->bShift = 1;
- skb_pull(skb,stats->RxBufShift + stats->RxDrvInfoSize);
+ skb_pull(skb, stats->RxBufShift + stats->RxDrvInfoSize);
}
#ifdef USB_RX_AGGREGATION_SUPPORT
/* for the rx aggregated sub frame, the redundant space truly contained in the packet */
- if(bIsRxAggrSubframe) {
+ if (bIsRxAggrSubframe)
skb_pull(skb, 8);
- }
#endif
/* for debug 2008.5.29 */
//added by vivi, for MP, 20080108
stats->RxIs40MHzPacket = driver_info->BW;
- if(stats->RxDrvInfoSize != 0)
+ if (stats->RxDrvInfoSize != 0)
TranslateRxSignalStuff819xUsb(skb, stats, driver_info);
}
@@ -5359,19 +4735,18 @@ u32 GetRxPacketShiftBytes819xUsb(struct ieee80211_rx_stats *Status, bool bIsRxA
else
#endif
return (sizeof(rx_desc_819x_usb) + Status->RxDrvInfoSize
- + Status->RxBufShift);
+ + Status->RxBufShift);
}
-void rtl8192_rx_nomal(struct sk_buff* skb)
+void rtl8192_rx_nomal(struct sk_buff *skb)
{
rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
- struct net_device *dev=info->dev;
+ struct net_device *dev = info->dev;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
struct ieee80211_rx_stats stats = {
.signal = 0,
.noise = -98,
.rate = 0,
- // .mac_time = jiffies,
.freq = IEEE80211_24GHZ_BAND,
};
u32 rx_pkt_len = 0;
@@ -5393,7 +4768,7 @@ void rtl8192_rx_nomal(struct sk_buff* skb)
#endif
/* 20 is for ps-poll */
- if((skb->len >=(20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
+ if ((skb->len >= (20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
#ifdef USB_RX_AGGREGATION_SUPPORT
TempByte = *(skb->data + sizeof(rx_desc_819x_usb));
#endif
@@ -5404,14 +4779,12 @@ void rtl8192_rx_nomal(struct sk_buff* skb)
#ifdef USB_RX_AGGREGATION_SUPPORT
if (TempByte & BIT0) {
agg_skb = skb;
- //TotalLength = agg_skb->len - 4; /*sCrcLng*/
TotalLength = stats.Length - 4; /*sCrcLng*/
- //RT_TRACE(COMP_RECV, "%s:first aggregated packet!Length=%d\n",__FUNCTION__,TotalLength);
/* though the head pointer has passed this position */
TempDWord = *(u32 *)(agg_skb->data - 4);
PacketLength = (u16)(TempDWord & 0x3FFF); /*sCrcLng*/
skb = dev_alloc_skb(PacketLength);
- memcpy(skb_put(skb,PacketLength),agg_skb->data,PacketLength);
+ memcpy(skb_put(skb, PacketLength), agg_skb->data, PacketLength);
PacketShiftBytes = GetRxPacketShiftBytes819xUsb(&stats, false);
}
#endif
@@ -5421,26 +4794,24 @@ void rtl8192_rx_nomal(struct sk_buff* skb)
rx_pkt_len = skb->len;
ieee80211_hdr = (struct ieee80211_hdr_1addr *)skb->data;
unicast_packet = false;
- if(is_broadcast_ether_addr(ieee80211_hdr->addr1)) {
+ if (is_broadcast_ether_addr(ieee80211_hdr->addr1)) {
//TODO
- }else if(is_multicast_ether_addr(ieee80211_hdr->addr1)){
+ } else if (is_multicast_ether_addr(ieee80211_hdr->addr1)) {
//TODO
- }else {
+ } else {
/* unicast packet */
unicast_packet = true;
}
- if(!ieee80211_rx(priv->ieee80211,skb, &stats)) {
+ if (!ieee80211_rx(priv->ieee80211, skb, &stats)) {
dev_kfree_skb_any(skb);
} else {
priv->stats.rxoktotal++;
- if(unicast_packet) {
+ if (unicast_packet)
priv->stats.rxbytesunicast += rx_pkt_len;
- }
}
#ifdef USB_RX_AGGREGATION_SUPPORT
testing = 1;
- // (PipeIndex == 0) && (TempByte & BIT0) => TotalLength > 0.
if (TotalLength > 0) {
PacketOccupiedLendth = PacketLength + (PacketShiftBytes + 8);
if ((PacketOccupiedLendth & 0xFF) != 0)
@@ -5452,9 +4823,8 @@ void rtl8192_rx_nomal(struct sk_buff* skb)
else
agg_skb->len = 0;
- while (agg_skb->len>=GetRxPacketShiftBytes819xUsb(&stats, true)) {
+ while (agg_skb->len >= GetRxPacketShiftBytes819xUsb(&stats, true)) {
u8 tmpCRC = 0, tmpICV = 0;
- //RT_TRACE(COMP_RECV,"%s:aggred pkt,total_len = %d\n",__FUNCTION__,agg_skb->len);
RxDescr = (rx_desc_819x_usb_aggr_subframe *)(agg_skb->data);
tmpCRC = RxDescr->CRC32;
tmpICV = RxDescr->ICV;
@@ -5470,32 +4840,30 @@ void rtl8192_rx_nomal(struct sk_buff* skb)
query_rxdesc_status(agg_skb, &stats, true);
PacketLength = stats.Length;
- if(PacketLength > agg_skb->len) {
+ if (PacketLength > agg_skb->len)
break;
- }
/* Process the MPDU received */
skb = dev_alloc_skb(PacketLength);
- memcpy(skb_put(skb,PacketLength),agg_skb->data, PacketLength);
+ memcpy(skb_put(skb, PacketLength), agg_skb->data, PacketLength);
skb_trim(skb, skb->len - 4/*sCrcLng*/);
rx_pkt_len = skb->len;
ieee80211_hdr = (struct ieee80211_hdr_1addr *)skb->data;
unicast_packet = false;
- if(is_broadcast_ether_addr(ieee80211_hdr->addr1)) {
+ if (is_broadcast_ether_addr(ieee80211_hdr->addr1)) {
//TODO
- }else if(is_multicast_ether_addr(ieee80211_hdr->addr1)){
+ } else if (is_multicast_ether_addr(ieee80211_hdr->addr1)) {
//TODO
- }else {
+ } else {
/* unicast packet */
unicast_packet = true;
}
- if(!ieee80211_rx(priv->ieee80211,skb, &stats)) {
+ if (!ieee80211_rx(priv->ieee80211, skb, &stats)) {
dev_kfree_skb_any(skb);
} else {
priv->stats.rxoktotal++;
- if(unicast_packet) {
+ if (unicast_packet)
priv->stats.rxbytesunicast += rx_pkt_len;
- }
}
/* should trim the packet which has been copied to target skb */
skb_pull(agg_skb, PacketLength);
@@ -5514,26 +4882,18 @@ void rtl8192_rx_nomal(struct sk_buff* skb)
#endif
} else {
priv->stats.rxurberr++;
- printk("actual_length:%d\n", skb->len);
+ netdev_dbg(dev, "actual_length: %d\n", skb->len);
dev_kfree_skb_any(skb);
}
}
-void
-rtl819xusb_process_received_packet(
- struct net_device *dev,
- struct ieee80211_rx_stats *pstats
- )
+void rtl819xusb_process_received_packet(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
-// bool bfreerfd=false, bqueued=false;
- u8* frame;
- u16 frame_len=0;
+ u8 *frame;
+ u16 frame_len = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
-// u8 index = 0;
-// u8 TID = 0;
- //u16 seqnum = 0;
- //PRX_TS_RECORD pts = NULL;
// Get shifted bytes of Starting address of 802.11 header. 2006.09.28, by Emily
//porting by amy 080508
@@ -5541,33 +4901,27 @@ rtl819xusb_process_received_packet(
frame = pstats->virtual_address;
frame_len = pstats->packetlength;
#ifdef TODO // by amy about HCT
- if(!Adapter->bInHctTest)
+ if (!Adapter->bInHctTest)
CountRxErrStatistics(Adapter, pRfd);
#endif
- {
- #ifdef ENABLE_PS //by amy for adding ps function in future
- RT_RF_POWER_STATE rtState;
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
- Adapter->HalFunc.GetHwRegHandler(Adapter, HW_VAR_RF_STATE, (u8* )(&rtState));
- if (rtState == eRfOff)
- {
- return;
- }
- #endif
+#ifdef ENABLE_PS //by amy for adding ps function in future
+ RT_RF_POWER_STATE rtState;
+ // When RF is off, we should not count the packet for hw/sw synchronize
+ // reason, ie. there may be a duration while sw switch is changed and hw
+ // switch is being changed. 2006.12.04, by shien chang.
+ Adapter->HalFunc.GetHwRegHandler(Adapter, HW_VAR_RF_STATE, (u8 *)(&rtState));
+ if (rtState == eRfOff)
+ return;
+#endif
priv->stats.rxframgment++;
- }
#ifdef TODO
RmMonitorSignalStrength(Adapter, pRfd);
#endif
/* 2007/01/16 MH Add RX command packet handle here. */
/* 2007/03/01 MH We have to release RFD and return if rx pkt is cmd pkt. */
if (rtl819xusb_rx_command_packet(dev, pstats))
- {
return;
- }
#ifdef SW_CRC_CHECK
SwCrcCheck();
@@ -5578,16 +4932,12 @@ rtl819xusb_process_received_packet(
void query_rx_cmdpkt_desc_status(struct sk_buff *skb, struct ieee80211_rx_stats *stats)
{
-// rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
-// struct net_device *dev=info->dev;
-// struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
rx_desc_819x_usb *desc = (rx_desc_819x_usb *)skb->data;
-// rx_drvinfo_819x_usb *driver_info;
//
//Get Rx Descriptor Information
//
- stats->virtual_address = (u8*)skb->data;
+ stats->virtual_address = (u8 *)skb->data;
stats->Length = desc->Length;
stats->RxDrvInfoSize = 0;
stats->RxBufShift = 0;
@@ -5602,21 +4952,17 @@ void rtl8192_rx_cmd(struct sk_buff *skb)
{
struct rtl8192_rx_info *info = (struct rtl8192_rx_info *)skb->cb;
struct net_device *dev = info->dev;
- //int ret;
-// struct urb *rx_urb = info->urb;
/* TODO */
struct ieee80211_rx_stats stats = {
.signal = 0,
.noise = -98,
.rate = 0,
- // .mac_time = jiffies,
.freq = IEEE80211_24GHZ_BAND,
};
- if((skb->len >=(20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE))
- {
+ if ((skb->len >= (20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
- query_rx_cmdpkt_desc_status(skb,&stats);
+ query_rx_cmdpkt_desc_status(skb, &stats);
// this is to be done by amy 080508 prfd->queue_id = 1;
@@ -5624,7 +4970,7 @@ void rtl8192_rx_cmd(struct sk_buff *skb)
// Process the command packet received.
//
- rtl819xusb_process_received_packet(dev,&stats);
+ rtl819xusb_process_received_packet(dev, &stats);
dev_kfree_skb_any(skb);
}
@@ -5640,22 +4986,21 @@ void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
switch (info->out_pipe) {
/* Nomal packet pipe */
case 3:
- //RT_TRACE(COMP_RECV, "normal in-pipe index(%d)\n",info->out_pipe);
priv->IrpPendingCount--;
rtl8192_rx_nomal(skb);
break;
- /* Command packet pipe */
+ /* Command packet pipe */
case 9:
- RT_TRACE(COMP_RECV, "command in-pipe index(%d)\n",\
- info->out_pipe);
+ RT_TRACE(COMP_RECV, "command in-pipe index(%d)\n",
+ info->out_pipe);
rtl8192_rx_cmd(skb);
break;
default: /* should never get here! */
- RT_TRACE(COMP_ERR, "Unknown in-pipe index(%d)\n",\
- info->out_pipe);
+ RT_TRACE(COMP_ERR, "Unknown in-pipe index(%d)\n",
+ info->out_pipe);
dev_kfree_skb(skb);
break;
@@ -5682,11 +5027,10 @@ static const struct net_device_ops rtl8192_netdev_ops = {
*****************************************************************************/
static int rtl8192_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id)
{
-// unsigned long ioaddr = 0;
struct net_device *dev = NULL;
- struct r8192_priv *priv= NULL;
+ struct r8192_priv *priv = NULL;
struct usb_device *udev = interface_to_usbdev(intf);
int ret;
RT_TRACE(COMP_INIT, "Oops: i'm coming\n");
@@ -5699,29 +5043,28 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
SET_NETDEV_DEV(dev, &intf->dev);
priv = ieee80211_priv(dev);
priv->ieee80211 = netdev_priv(dev);
- priv->udev=udev;
+ priv->udev = udev;
dev->netdev_ops = &rtl8192_netdev_ops;
- //DMESG("Oops: i'm coming\n");
#if WIRELESS_EXT >= 12
#if WIRELESS_EXT < 17
dev->get_wireless_stats = r8192_get_wireless_stats;
#endif
dev->wireless_handlers = (struct iw_handler_def *) &r8192_wx_handlers_def;
#endif
- dev->type=ARPHRD_ETHER;
+ dev->type = ARPHRD_ETHER;
dev->watchdog_timeo = HZ*3; //modified by john, 0805
- if (dev_alloc_name(dev, ifname) < 0){
+ if (dev_alloc_name(dev, ifname) < 0) {
RT_TRACE(COMP_INIT, "Oops: devname already taken! Trying wlan%%d...\n");
ifname = "wlan%d";
dev_alloc_name(dev, ifname);
}
RT_TRACE(COMP_INIT, "Driver probe completed1\n");
- if(rtl8192_init(dev)!=0){
+ if (rtl8192_init(dev) != 0) {
RT_TRACE(COMP_ERR, "Initialization failed");
ret = -ENODEV;
goto fail;
@@ -5733,7 +5076,7 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
if (ret)
goto fail2;
- RT_TRACE(COMP_INIT, "dev name=======> %s\n",dev->name);
+ RT_TRACE(COMP_INIT, "dev name=======> %s\n", dev->name);
rtl8192_proc_init_one(dev);
@@ -5755,16 +5098,13 @@ fail:
}
//detach all the work and timer structure declared or inititialize in r8192U_init function.
-void rtl8192_cancel_deferred_work(struct r8192_priv* priv)
+void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
{
cancel_work_sync(&priv->reset_wq);
cancel_delayed_work(&priv->watch_dog_wq);
cancel_delayed_work(&priv->update_beacon_wq);
cancel_work_sync(&priv->qos_activate);
- //cancel_work_sync(&priv->SetBWModeWorkItem);
- //cancel_work_sync(&priv->SwChnlWorkItem);
-
}
@@ -5773,22 +5113,18 @@ static void rtl8192_usb_disconnect(struct usb_interface *intf)
struct net_device *dev = usb_get_intfdata(intf);
struct r8192_priv *priv = ieee80211_priv(dev);
- if(dev){
+ if (dev) {
unregister_netdev(dev);
RT_TRACE(COMP_DOWN, "=============>wlan driver to be removed\n");
rtl8192_proc_remove_one(dev);
- rtl8192_down(dev);
+ rtl8192_down(dev);
kfree(priv->pFirmware);
priv->pFirmware = NULL;
- // priv->rf_close(dev);
-// rtl8192_SetRFPowerState(dev, eRfOff);
rtl8192_usb_deleteendpoints(dev);
destroy_workqueue(priv->priv_wq);
- //rtl8192_irq_disable(dev);
- //rtl8192_reset(dev);
mdelay(10);
}
@@ -5815,38 +5151,36 @@ static int __init rtl8192_usb_module_init(void)
#ifdef CONFIG_IEEE80211_DEBUG
ret = ieee80211_debug_init();
if (ret) {
- printk(KERN_ERR "ieee80211_debug_init() failed %d\n", ret);
+ pr_err("ieee80211_debug_init() failed %d\n", ret);
return ret;
}
#endif
ret = ieee80211_crypto_init();
if (ret) {
- printk(KERN_ERR "ieee80211_crypto_init() failed %d\n", ret);
+ pr_err("ieee80211_crypto_init() failed %d\n", ret);
return ret;
}
ret = ieee80211_crypto_tkip_init();
if (ret) {
- printk(KERN_ERR "ieee80211_crypto_tkip_init() failed %d\n",
- ret);
+ pr_err("ieee80211_crypto_tkip_init() failed %d\n", ret);
return ret;
}
ret = ieee80211_crypto_ccmp_init();
if (ret) {
- printk(KERN_ERR "ieee80211_crypto_ccmp_init() failed %d\n",
- ret);
+ pr_err("ieee80211_crypto_ccmp_init() failed %d\n", ret);
return ret;
}
ret = ieee80211_crypto_wep_init();
if (ret) {
- printk(KERN_ERR "ieee80211_crypto_wep_init() failed %d\n", ret);
+ pr_err("ieee80211_crypto_wep_init() failed %d\n", ret);
return ret;
}
- printk(KERN_INFO "\nLinux kernel driver for RTL8192 based WLAN cards\n");
- printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan\n");
+ pr_info("\nLinux kernel driver for RTL8192 based WLAN cards\n");
+ pr_info("Copyright (c) 2007-2008, Realsil Wlan\n");
RT_TRACE(COMP_INIT, "Initializing module");
RT_TRACE(COMP_INIT, "Wireless extensions version %d", WIRELESS_EXT);
rtl8192_proc_module_init();
@@ -5859,7 +5193,6 @@ static void __exit rtl8192_usb_module_exit(void)
usb_deregister(&rtl8192_usb_driver);
RT_TRACE(COMP_DOWN, "Exiting");
-// rtl8192_proc_module_remove();
}
@@ -5869,11 +5202,11 @@ void rtl8192_try_wake_queue(struct net_device *dev, int pri)
short enough_desc;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- spin_lock_irqsave(&priv->tx_lock,flags);
- enough_desc = check_nic_enough_desc(dev,pri);
- spin_unlock_irqrestore(&priv->tx_lock,flags);
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ enough_desc = check_nic_enough_desc(dev, pri);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
- if(enough_desc)
+ if (enough_desc)
ieee80211_wake_queue(priv->ieee80211);
}
@@ -5881,43 +5214,32 @@ void EnableHWSecurityConfig8192(struct net_device *dev)
{
u8 SECR_value = 0x0;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct ieee80211_device *ieee = priv->ieee80211;
SECR_value = SCR_TxEncEnable | SCR_RxDecEnable;
- if (((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type)) && (priv->ieee80211->auth_mode != 2))
- {
+ if (((KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->pairwise_key_type)) && (priv->ieee80211->auth_mode != 2)) {
SECR_value |= SCR_RxUseDK;
SECR_value |= SCR_TxUseDK;
- }
- else if ((ieee->iw_mode == IW_MODE_ADHOC) && (ieee->pairwise_key_type & (KEY_TYPE_CCMP | KEY_TYPE_TKIP)))
- {
+ } else if ((ieee->iw_mode == IW_MODE_ADHOC) && (ieee->pairwise_key_type & (KEY_TYPE_CCMP | KEY_TYPE_TKIP))) {
SECR_value |= SCR_RxUseDK;
SECR_value |= SCR_TxUseDK;
}
//add HWSec active enable here.
-//default using hwsec. when peer AP is in N mode only and pairwise_key_type is none_aes(which HT_IOT_ACT_PURE_N_MODE indicates it), use software security. when peer AP is in b,g,n mode mixed and pairwise_key_type is none_aes, use g mode hw security. WB on 2008.7.4
+ //default using hwsec. when peer AP is in N mode only and pairwise_key_type is none_aes(which HT_IOT_ACT_PURE_N_MODE indicates it), use software security. when peer AP is in b,g,n mode mixed and pairwise_key_type is none_aes, use g mode hw security. WB on 2008.7.4
ieee->hwsec_active = 1;
- if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep)//!ieee->hwsec_support) //add hwsec_support flag to totol control hw_sec on/off
- {
+ if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep) { //add hwsec_support flag to totol control hw_sec on/off
ieee->hwsec_active = 0;
SECR_value &= ~SCR_RxDecEnable;
}
- RT_TRACE(COMP_SEC,"%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n", __FUNCTION__, \
- ieee->hwsec_active, ieee->pairwise_key_type, SECR_value);
- {
- write_nic_byte(dev, SECR, SECR_value);//SECR_value | SCR_UseDK );
- }
+ RT_TRACE(COMP_SEC, "%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n", __func__,
+ ieee->hwsec_active, ieee->pairwise_key_type, SECR_value);
+ write_nic_byte(dev, SECR, SECR_value);
}
-void setKey( struct net_device *dev,
- u8 EntryNo,
- u8 KeyIndex,
- u16 KeyType,
- u8 *MacAddr,
- u8 DefaultKey,
- u32 *KeyContent )
+void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
+ u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
{
u32 TargetCommand = 0;
u32 TargetContent = 0;
@@ -5926,44 +5248,40 @@ void setKey( struct net_device *dev,
if (EntryNo >= TOTAL_CAM_ENTRY)
RT_TRACE(COMP_ERR, "cam entry exceeds in setKey()\n");
- RT_TRACE(COMP_SEC, "====>to setKey(), dev:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n", dev,EntryNo, KeyIndex, KeyType, MacAddr);
+ RT_TRACE(COMP_SEC, "====>to setKey(), dev:%p, EntryNo:%d, KeyIndex:%d, KeyType:%d, MacAddr%pM\n", dev, EntryNo, KeyIndex, KeyType, MacAddr);
if (DefaultKey)
usConfig |= BIT15 | (KeyType<<2);
else
usConfig |= BIT15 | (KeyType<<2) | KeyIndex;
-// usConfig |= BIT15 | (KeyType<<2) | (DefaultKey<<5) | KeyIndex;
- for(i=0 ; i<CAM_CONTENT_COUNT; i++){
+ for (i = 0; i < CAM_CONTENT_COUNT; i++) {
TargetCommand = i+CAM_CONTENT_COUNT*EntryNo;
TargetCommand |= BIT31|BIT16;
- if(i==0){//MAC|Config
+ if (i == 0) { //MAC|Config
TargetContent = (u32)(*(MacAddr+0)) << 16|
(u32)(*(MacAddr+1)) << 24|
(u32)usConfig;
write_nic_dword(dev, WCAMI, TargetContent);
write_nic_dword(dev, RWCAM, TargetCommand);
- // printk("setkey cam =%8x\n", read_cam(dev, i+6*EntryNo));
- }
- else if(i==1){//MAC
+ } else if (i == 1) { //MAC
TargetContent = (u32)(*(MacAddr+2)) |
(u32)(*(MacAddr+3)) << 8|
(u32)(*(MacAddr+4)) << 16|
(u32)(*(MacAddr+5)) << 24;
write_nic_dword(dev, WCAMI, TargetContent);
write_nic_dword(dev, RWCAM, TargetCommand);
- }
- else {
+ } else {
//Key Material
- if(KeyContent !=NULL){
- write_nic_dword(dev, WCAMI, (u32)(*(KeyContent+i-2)) );
- write_nic_dword(dev, RWCAM, TargetCommand);
+ if (KeyContent != NULL) {
+ write_nic_dword(dev, WCAMI, (u32)(*(KeyContent+i-2)));
+ write_nic_dword(dev, RWCAM, TargetCommand);
+ }
}
}
- }
}
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index ea46717f..a6e4c37 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -88,7 +88,7 @@ static void dm_check_rate_adaptive(struct net_device *dev);
// DM --> Bandwidth switch
static void dm_init_bandwidth_autoswitch(struct net_device *dev);
-static void dm_bandwidth_autoswitch( struct net_device *dev);
+static void dm_bandwidth_autoswitch(struct net_device *dev);
// DM --> TX power control
//static void dm_initialize_txpower_tracking(struct net_device *dev);
@@ -112,7 +112,7 @@ static void dm_bb_initialgain_backup(struct net_device *dev);
static void dm_dig_init(struct net_device *dev);
static void dm_ctrl_initgain_byrssi(struct net_device *dev);
static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev);
-static void dm_ctrl_initgain_byrssi_by_driverrssi( struct net_device *dev);
+static void dm_ctrl_initgain_byrssi_by_driverrssi(struct net_device *dev);
static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(struct net_device *dev);
static void dm_initial_gain(struct net_device *dev);
static void dm_pd_th(struct net_device *dev);
@@ -289,7 +289,7 @@ extern void hal_dm_watchdog(struct net_device *dev)
* 01/16/2008 MHC RF_Type is assigned in ReadAdapterInfo(). We must call
* the function after making sure RF_Type.
*/
-extern void init_rate_adaptive(struct net_device * dev)
+extern void init_rate_adaptive(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -351,7 +351,7 @@ extern void init_rate_adaptive(struct net_device * dev)
* 05/26/08 amy Create version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void dm_check_rate_adaptive(struct net_device * dev)
+static void dm_check_rate_adaptive(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
@@ -372,11 +372,11 @@ static void dm_check_rate_adaptive(struct net_device * dev)
return;
// TODO: Only 11n mode is implemented currently,
- if( !(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
+ if(!(priv->ieee80211->mode == WIRELESS_MODE_N_24G ||
priv->ieee80211->mode == WIRELESS_MODE_N_5G))
return;
- if( priv->ieee80211->state == IEEE80211_LINKED )
+ if(priv->ieee80211->state == IEEE80211_LINKED)
{
// RT_TRACE(COMP_RATE, "dm_CheckRateAdaptive(): \t");
@@ -454,8 +454,8 @@ static void dm_check_rate_adaptive(struct net_device * dev)
//pHalData->UndecoratedSmoothedPWDB = 19;
if(priv->undecorated_smoothed_pwdb < (long)(pra->ping_rssi_thresh_for_ra+5))
{
- if( (priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) ||
- ping_rssi_state )
+ if((priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) ||
+ ping_rssi_state)
{
//DbgPrint("TestRSSI = %d, set RATR to 0x%x \n", pHalData->UndecoratedSmoothedPWDB, pRA->TestRSSIRATR);
pra->ratr_state = DM_RATR_STA_LOW;
@@ -480,8 +480,8 @@ static void dm_check_rate_adaptive(struct net_device * dev)
//
// Check whether updating of RATR0 is required
//
- currentRATR = read_nic_dword(dev, RATR0);
- if( targetRATR != currentRATR )
+ read_nic_dword(dev, RATR0, &currentRATR);
+ if(targetRATR != currentRATR)
{
u32 ratr_value;
ratr_value = targetRATR;
@@ -505,7 +505,7 @@ static void dm_check_rate_adaptive(struct net_device * dev)
} // dm_CheckRateAdaptive
-static void dm_init_bandwidth_autoswitch(struct net_device * dev)
+static void dm_init_bandwidth_autoswitch(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -517,7 +517,7 @@ static void dm_init_bandwidth_autoswitch(struct net_device * dev)
} // dm_init_bandwidth_autoswitch
-static void dm_bandwidth_autoswitch(struct net_device * dev)
+static void dm_bandwidth_autoswitch(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -588,7 +588,7 @@ static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
{0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00} // 11, -11db
};
-static void dm_TXPowerTrackingCallback_TSSI(struct net_device * dev)
+static void dm_TXPowerTrackingCallback_TSSI(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
bool bHighpowerstate, viviflag = FALSE;
@@ -627,14 +627,14 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device * dev)
RT_TRACE(COMP_POWER_TRACKING, "Set configuration with tx cmd queue fail!\n");
}
#else
- cmpk_message_handle_tx(dev, (u8*)&tx_cmd,
+ cmpk_message_handle_tx(dev, (u8 *)&tx_cmd,
DESC_PACKET_TYPE_INIT, sizeof(DCMD_TXCMD_T));
#endif
mdelay(1);
//DbgPrint("hi, vivi, strange\n");
for(i = 0;i <= 30; i++)
{
- Pwr_Flag = read_nic_byte(dev, 0x1ba);
+ read_nic_byte(dev, 0x1ba, &Pwr_Flag);
if (Pwr_Flag == 0)
{
@@ -642,9 +642,9 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device * dev)
continue;
}
#ifdef RTL8190P
- Avg_TSSI_Meas = read_nic_word(dev, 0x1bc);
+ read_nic_word(dev, 0x1bc, &Avg_TSSI_Meas);
#else
- Avg_TSSI_Meas = read_nic_word(dev, 0x13c);
+ read_nic_word(dev, 0x13c, &Avg_TSSI_Meas);
#endif
if(Avg_TSSI_Meas == 0)
{
@@ -655,12 +655,12 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device * dev)
for(k = 0;k < 5; k++)
{
#ifdef RTL8190P
- tmp_report[k] = read_nic_byte(dev, 0x1d8+k);
+ read_nic_byte(dev, 0x1d8+k, &tmp_report[k]);
#else
if(k !=4)
- tmp_report[k] = read_nic_byte(dev, 0x134+k);
+ read_nic_byte(dev, 0x134+k, &tmp_report[k]);
else
- tmp_report[k] = read_nic_byte(dev, 0x13e);
+ read_nic_byte(dev, 0x13e, &tmp_report[k]);
#endif
RT_TRACE(COMP_POWER_TRACKING, "TSSI_report_value = %d\n", tmp_report[k]);
}
@@ -816,7 +816,7 @@ static void dm_TXPowerTrackingCallback_TSSI(struct net_device * dev)
write_nic_byte(dev, 0x1ba, 0);
}
-static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device * dev)
+static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
{
#define ThermalMeterVal 9
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -1572,7 +1572,7 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
TempVal = 0;
TempVal = priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
- (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16 )+
+ (priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2,bMaskDWord, TempVal);
//Write 0xa28 0xa29
@@ -1592,7 +1592,7 @@ static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14)
TempVal = 0;
TempVal = priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[2] +
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[3]<<8) +
- (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16 )+
+ (priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[4]<<16)+
(priv->cck_txbbgain_ch14_table[priv->cck_present_attentuation].ccktxbb_valuearray[5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2,bMaskDWord, TempVal);
//Write 0xa28 0xa29
@@ -1624,7 +1624,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
TempVal = 0;
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16 )+
+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16)+
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n",
@@ -1652,7 +1652,7 @@ static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH
TempVal = 0;
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3]<<8) +
- (CCKSwingTable_Ch14[priv->CCK_index][4]<<16 )+
+ (CCKSwingTable_Ch14[priv->CCK_index][4]<<16)+
(CCKSwingTable_Ch14[priv->CCK_index][5]<<24);
rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
@@ -1727,7 +1727,7 @@ extern void dm_restore_dynamic_mechanism_state(struct net_device *dev)
if(priv->rate_adaptive.rate_adaptive_disabled)
return;
// TODO: Only 11n mode is implemented currently,
- if( !(priv->ieee80211->mode==WIRELESS_MODE_N_24G ||
+ if(!(priv->ieee80211->mode==WIRELESS_MODE_N_24G ||
priv->ieee80211->mode==WIRELESS_MODE_N_5G))
return;
{
@@ -1736,7 +1736,7 @@ extern void dm_restore_dynamic_mechanism_state(struct net_device *dev)
ratr_value = reg_ratr;
if(priv->rf_type == RF_1T2R) // 1T2R, Spatial Stream 2 should be disabled
{
- ratr_value &=~ (RATE_ALL_OFDM_2SS);
+ ratr_value &= ~(RATE_ALL_OFDM_2SS);
//DbgPrint("HW_VAR_TATR_0 from 0x%x ==> 0x%x\n", ((pu4Byte)(val))[0], ratr_value);
}
//DbgPrint("set HW_VAR_TATR_0 = 0x%x\n", ratr_value);
@@ -2222,7 +2222,7 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
/* 2. When RSSI increase, We have to judge if it is larger than a threshold
and then execute the step below. */
- if ((priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) )
+ if ((priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh))
{
u8 reset_flag = 0;
@@ -2316,7 +2316,7 @@ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
*
*---------------------------------------------------------------------------*/
static void dm_ctrl_initgain_byrssi_highpwr(
- struct net_device * dev)
+ struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u32 reset_cnt_highpwr;
@@ -2391,12 +2391,13 @@ static void dm_ctrl_initgain_byrssi_highpwr(
static void dm_initial_gain(
- struct net_device * dev)
+ struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 initial_gain=0;
static u8 initialized, force_write;
static u32 reset_cnt;
+ u8 tmp;
if(dm_digtable.dig_algorithm_switch)
{
@@ -2437,7 +2438,8 @@ static void dm_initial_gain(
reset_cnt = priv->reset_count;
}
- if(dm_digtable.pre_ig_value != read_nic_byte(dev, rOFDM0_XAAGCCore1))
+ read_nic_byte(dev, rOFDM0_XAAGCCore1, &tmp);
+ if (dm_digtable.pre_ig_value != tmp)
force_write = 1;
{
@@ -2459,7 +2461,7 @@ static void dm_initial_gain(
}
static void dm_pd_th(
- struct net_device * dev)
+ struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u8 initialized, force_write;
@@ -2571,7 +2573,7 @@ static void dm_pd_th(
}
static void dm_cs_ratio(
- struct net_device * dev)
+ struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
static u8 initialized,force_write;
@@ -2589,7 +2591,7 @@ static void dm_cs_ratio(
{
if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh))
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER;
- else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) )
+ else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh))
dm_digtable.curcs_ratio_state = DIG_CS_RATIO_HIGHER;
else
dm_digtable.curcs_ratio_state = dm_digtable.precs_ratio_state;
@@ -2634,7 +2636,7 @@ static void dm_cs_ratio(
}
}
-extern void dm_init_edca_turbo(struct net_device * dev)
+extern void dm_init_edca_turbo(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2644,7 +2646,7 @@ extern void dm_init_edca_turbo(struct net_device * dev)
} // dm_init_edca_turbo
static void dm_check_edca_turbo(
- struct net_device * dev)
+ struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo;
@@ -2727,8 +2729,9 @@ static void dm_check_edca_turbo(
// TODO: Modified this part and try to set acm control in only 1 IO processing!!
PACI_AIFSN pAciAifsn = (PACI_AIFSN)&(qos_parameters->aifs[0]);
- u8 AcmCtrl = read_nic_byte( dev, AcmHwCtrl );
- if( pAciAifsn->f.ACM )
+ u8 AcmCtrl;
+ read_nic_byte(dev, AcmHwCtrl, &AcmCtrl);
+ if(pAciAifsn->f.ACM)
{ // ACM bit is 1.
AcmCtrl |= AcmHw_BeqEn;
}
@@ -2737,8 +2740,8 @@ static void dm_check_edca_turbo(
AcmCtrl &= (~AcmHw_BeqEn);
}
- RT_TRACE( COMP_QOS,"SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl ) ;
- write_nic_byte(dev, AcmHwCtrl, AcmCtrl );
+ RT_TRACE(COMP_QOS,"SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl) ;
+ write_nic_byte(dev, AcmHwCtrl, AcmCtrl);
}
}
priv->bcurrent_turbo_EDCA = false;
@@ -2753,7 +2756,7 @@ dm_CheckEdcaTurbo_EXIT:
lastRxOkCnt = priv->stats.rxbytesunicast;
} // dm_CheckEdcaTurbo
-extern void DM_CTSToSelfSetting(struct net_device * dev,u32 DM_Type, u32 DM_Value)
+extern void DM_CTSToSelfSetting(struct net_device *dev,u32 DM_Type, u32 DM_Value)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
@@ -2773,7 +2776,7 @@ extern void DM_CTSToSelfSetting(struct net_device * dev,u32 DM_Type, u32 DM_Valu
}
}
-static void dm_init_ctstoself(struct net_device * dev)
+static void dm_init_ctstoself(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev);
@@ -2837,7 +2840,7 @@ static void dm_ctstoself(struct net_device *dev)
* 05/28/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void dm_check_rfctrl_gpio(struct net_device * dev)
+static void dm_check_rfctrl_gpio(struct net_device *dev)
{
//struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2881,7 +2884,7 @@ static void dm_check_pbc_gpio(struct net_device *dev)
u8 tmp1byte;
- tmp1byte = read_nic_byte(dev,GPI);
+ read_nic_byte(dev, GPI, &tmp1byte);
if(tmp1byte == 0xff)
return;
@@ -2933,18 +2936,18 @@ extern void dm_gpio_change_rf_callback(struct work_struct *work)
{
// 0x108 GPIO input register is read only
//set 0x108 B1= 1: RF-ON; 0: RF-OFF.
- tmp1byte = read_nic_byte(dev,GPI);
+ read_nic_byte(dev, GPI, &tmp1byte);
eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff;
- if( (priv->bHwRadioOff == true) && (eRfPowerStateToSet == eRfOn))
+ if((priv->bHwRadioOff == true) && (eRfPowerStateToSet == eRfOn))
{
RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n");
priv->bHwRadioOff = false;
bActuallySet = true;
}
- else if ( (priv->bHwRadioOff == false) && (eRfPowerStateToSet == eRfOff))
+ else if ((priv->bHwRadioOff == false) && (eRfPowerStateToSet == eRfOff))
{
RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n");
priv->bHwRadioOff = true;
@@ -2996,7 +2999,7 @@ extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
/* 2008/01/30 MH After discussing with SD3 Jerry, 0xc04/0xd04 register will
always be the same. We only read 0xc04 now. */
- rfpath = read_nic_byte(dev, 0xc04);
+ read_nic_byte(dev, 0xc04, &rfpath);
// Check Bit 0-3, it means if RF A-D is enabled.
for (i = 0; i < RF90_PATH_MAX; i++)
@@ -3012,7 +3015,7 @@ extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
dm_rxpath_sel_byrssi(dev);
} /* DM_RFPathCheckWorkItemCallBack */
-static void dm_init_rxpath_selection(struct net_device * dev)
+static void dm_init_rxpath_selection(struct net_device *dev)
{
u8 i;
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3033,7 +3036,7 @@ static void dm_init_rxpath_selection(struct net_device * dev)
}
}
-static void dm_rxpath_sel_byrssi(struct net_device * dev)
+static void dm_rxpath_sel_byrssi(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 i, max_rssi_index=0, min_rssi_index=0, sec_rssi_index=0, rf_num=0;
@@ -3052,12 +3055,13 @@ static void dm_rxpath_sel_byrssi(struct net_device * dev)
if(!cck_Rx_Path_initialized)
{
- DM_RxPathSelTable.cck_Rx_path = (read_nic_byte(dev, 0xa07)&0xf);
+ read_nic_byte(dev, 0xa07, &DM_RxPathSelTable.cck_Rx_path);
+ DM_RxPathSelTable.cck_Rx_path &= 0xf;
cck_Rx_Path_initialized = 1;
}
- DM_RxPathSelTable.disabledRF = 0xf;
- DM_RxPathSelTable.disabledRF &=~ (read_nic_byte(dev, 0xc04));
+ read_nic_byte(dev, 0xc04, &DM_RxPathSelTable.disabledRF);
+ DM_RxPathSelTable.disabledRF = ~DM_RxPathSelTable.disabledRF & 0xf;
if(priv->ieee80211->mode == WIRELESS_MODE_B)
{
@@ -3356,7 +3360,7 @@ extern void dm_fsync_timer_callback(unsigned long data)
bool bSwitchFromCountDiff = false;
bool bDoubleTimeInterval = false;
- if( priv->ieee80211->state == IEEE80211_LINKED &&
+ if(priv->ieee80211->state == IEEE80211_LINKED &&
priv->ieee80211->bfsync_enable &&
(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC))
{
@@ -3576,12 +3580,12 @@ void dm_check_fsync(struct net_device *dev)
RT_TRACE(COMP_HALDM, "RSSI %d TimeInterval %d MultipleTimeInterval %d\n", priv->ieee80211->fsync_rssi_threshold, priv->ieee80211->fsync_time_interval, priv->ieee80211->fsync_multiple_timeinterval);
RT_TRACE(COMP_HALDM, "RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n", priv->ieee80211->fsync_rate_bitmap, priv->ieee80211->fsync_firstdiff_ratethreshold, priv->ieee80211->fsync_seconddiff_ratethreshold);
- if( priv->ieee80211->state == IEEE80211_LINKED &&
+ if(priv->ieee80211->state == IEEE80211_LINKED &&
(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC))
{
if(priv->ieee80211->bfsync_enable == 0)
{
- switch(priv->ieee80211->fsync_state)
+ switch (priv->ieee80211->fsync_state)
{
case Default_Fsync:
dm_StartHWFsync(dev);
@@ -3599,7 +3603,7 @@ void dm_check_fsync(struct net_device *dev)
}
else
{
- switch(priv->ieee80211->fsync_state)
+ switch (priv->ieee80211->fsync_state)
{
case Default_Fsync:
dm_StartSWFsync(dev);
@@ -3632,7 +3636,7 @@ void dm_check_fsync(struct net_device *dev)
}
else
{
- switch(priv->ieee80211->fsync_state)
+ switch (priv->ieee80211->fsync_state)
{
case HW_Fsync:
dm_EndHWFsync(dev);
@@ -3731,17 +3735,17 @@ extern void dm_shadow_init(struct net_device *dev)
for (page = 0; page < 5; page++)
for (offset = 0; offset < 256; offset++)
{
- dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256);
+ read_nic_byte(dev, offset+page*256, &dm_shadow[page][offset]);
//DbgPrint("P-%d/O-%02x=%02x\r\n", page, offset, DM_Shadow[page][offset]);
}
for (page = 8; page < 11; page++)
for (offset = 0; offset < 256; offset++)
- dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256);
+ read_nic_byte(dev, offset+page*256, &dm_shadow[page][offset]);
for (page = 12; page < 15; page++)
for (offset = 0; offset < 256; offset++)
- dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256);
+ read_nic_byte(dev, offset+page*256, &dm_shadow[page][offset]);
} /* dm_shadow_init */
@@ -3787,7 +3791,7 @@ static void dm_dynamic_txpower(struct net_device *dev)
return;
}
//printk("priv->ieee80211->current_network.unknown_cap_exist is %d ,priv->ieee80211->current_network.broadcom_cap_exist is %d\n",priv->ieee80211->current_network.unknown_cap_exist,priv->ieee80211->current_network.broadcom_cap_exist);
- if((priv->ieee80211->current_network.atheros_cap_exist ) && (priv->ieee80211->mode == IEEE_G)){
+ if((priv->ieee80211->current_network.atheros_cap_exist) && (priv->ieee80211->mode == IEEE_G)){
txhipower_threshhold = TX_POWER_ATHEROAP_THRESH_HIGH;
txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
}
@@ -3832,8 +3836,8 @@ static void dm_dynamic_txpower(struct net_device *dev)
priv->bDynamicTxLowPower = false;
}
- if( (priv->bDynamicTxHighPower != priv->bLastDTPFlag_High ) ||
- (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low ) )
+ if((priv->bDynamicTxHighPower != priv->bLastDTPFlag_High) ||
+ (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low))
{
RT_TRACE(COMP_TXAGC,"SetTxPowerLevel8190() channel = %d \n" , priv->ieee80211->current_network.channel);
@@ -3852,20 +3856,20 @@ static void dm_dynamic_txpower(struct net_device *dev)
} /* dm_dynamic_txpower */
//added by vivi, for read tx rate and retrycount
-static void dm_check_txrateandretrycount(struct net_device * dev)
+static void dm_check_txrateandretrycount(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct ieee80211_device *ieee = priv->ieee80211;
//for 11n tx rate
// priv->stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg);
- ieee->softmac_stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg);
+ read_nic_byte(dev, Current_Tx_Rate_Reg, &ieee->softmac_stats.CurrentShowTxate);
//printk("=============>tx_rate_reg:%x\n", ieee->softmac_stats.CurrentShowTxate);
//for initial tx rate
// priv->stats.last_packet_rate = read_nic_byte(dev, Initial_Tx_Rate_Reg);
- ieee->softmac_stats.last_packet_rate = read_nic_byte(dev ,Initial_Tx_Rate_Reg);
+ read_nic_byte(dev, Initial_Tx_Rate_Reg, &ieee->softmac_stats.last_packet_rate);
//for tx tx retry count
// priv->stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg);
- ieee->softmac_stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg);
+ read_nic_dword(dev, Tx_Retry_Count_Reg, &ieee->softmac_stats.txretrycount);
}
static void dm_send_rssi_tofw(struct net_device *dev)
@@ -3882,7 +3886,7 @@ static void dm_send_rssi_tofw(struct net_device *dev)
tx_cmd.Length = 4;
tx_cmd.Value = priv->undecorated_smoothed_pwdb;
- cmpk_message_handle_tx(dev, (u8*)&tx_cmd,
+ cmpk_message_handle_tx(dev, (u8 *)&tx_cmd,
DESC_PACKET_TYPE_INIT, sizeof(DCMD_TXCMD_T));
}
diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h
index 15b0423..dd07a73 100644
--- a/drivers/staging/rtl8192u/r8192U_hw.h
+++ b/drivers/staging/rtl8192u/r8192U_hw.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8187 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the
@@ -388,10 +388,11 @@ enum _RTL8192Usb_HW {
#define EPROM_CMD_NORMAL 0
#define EPROM_CMD_LOAD 1
#define EPROM_CMD_PROGRAM 2
-#define EPROM_CS_SHIFT 3
-#define EPROM_CK_SHIFT 2
-#define EPROM_W_SHIFT 1
-#define EPROM_R_SHIFT 0
+#define EPROM_CS_BIT BIT(3)
+#define EPROM_CK_BIT BIT(2)
+#define EPROM_W_BIT BIT(1)
+#define EPROM_R_BIT BIT(0)
+
MAC0 = 0x000,
MAC1 = 0x001,
MAC2 = 0x002,
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index c904aa8..61f6620 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -2,7 +2,7 @@
This file contains wireless extension handlers.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part
@@ -144,7 +144,7 @@ static int r8192_wx_read_regs(struct net_device *dev,
down(&priv->wx_sem);
- get_user(addr,(u8*)wrqu->data.pointer);
+ get_user(addr,(u8 *)wrqu->data.pointer);
data1 = read_rtl8225(dev, addr);
wrqu->data.length = data1;
@@ -162,7 +162,7 @@ static int r8192_wx_write_regs(struct net_device *dev,
down(&priv->wx_sem);
- get_user(addr, (u8*)wrqu->data.pointer);
+ get_user(addr, (u8 *)wrqu->data.pointer);
write_rtl8225(dev, addr, wrqu->data.length);
up(&priv->wx_sem);
@@ -199,7 +199,7 @@ static int r8192_wx_write_bb(struct net_device *dev,
down(&priv->wx_sem);
- get_user(databb, (u8*)wrqu->data.pointer);
+ get_user(databb, (u8 *)wrqu->data.pointer);
rtl8187_write_phy(dev, wrqu->data.length, databb);
up(&priv->wx_sem);
@@ -217,7 +217,7 @@ static int r8192_wx_write_nicb(struct net_device *dev,
down(&priv->wx_sem);
- get_user(addr, (u32*)wrqu->data.pointer);
+ get_user(addr, (u32 *)wrqu->data.pointer);
write_nic_byte(dev, addr, wrqu->data.length);
up(&priv->wx_sem);
@@ -234,8 +234,8 @@ static int r8192_wx_read_nicb(struct net_device *dev,
down(&priv->wx_sem);
- get_user(addr,(u32*)wrqu->data.pointer);
- data1 = read_nic_byte(dev, addr);
+ get_user(addr,(u32 *)wrqu->data.pointer);
+ read_nic_byte(dev, addr, &data1);
wrqu->data.length = data1;
up(&priv->wx_sem);
@@ -254,12 +254,12 @@ static int r8192_wx_get_ap_status(struct net_device *dev,
down(&priv->wx_sem);
//count the length of input ssid
- for(name_len=0 ; ((char*)wrqu->data.pointer)[name_len]!='\0' ; name_len++);
+ for(name_len=0 ; ((char *)wrqu->data.pointer)[name_len]!='\0' ; name_len++);
//search for the corresponding info which is received
list_for_each_entry(target, &ieee->network_list, list) {
if ( (target->ssid_len == name_len) &&
- (strncmp(target->ssid, (char*)wrqu->data.pointer, name_len)==0)){
+ (strncmp(target->ssid, (char *)wrqu->data.pointer, name_len)==0)){
if(target->wpa_ie_len>0 || target->rsn_ie_len>0 )
//set flags=1 to indicate this ap is WPA
wrqu->data.flags = 1;
@@ -380,7 +380,7 @@ static int rtl8180_wx_get_range(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
- struct iw_range_with_scan_capa* tmp = (struct iw_range_with_scan_capa*)range;
+ struct iw_range_with_scan_capa *tmp = (struct iw_range_with_scan_capa *)range;
struct r8192_priv *priv = ieee80211_priv(dev);
u16 val;
int i;
@@ -483,7 +483,7 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct ieee80211_device *ieee = priv->ieee80211;
int ret = 0;
if(!priv->up) return -ENETDOWN;
@@ -492,7 +492,7 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
return -EAGAIN;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID)
{
- struct iw_scan_req* req = (struct iw_scan_req*)b;
+ struct iw_scan_req *req = (struct iw_scan_req *)b;
if (req->essid_len)
{
//printk("==**&*&*&**===>scan set ssid:%s\n", req->essid);
@@ -709,7 +709,7 @@ static int r8192_wx_set_enc(struct net_device *dev,
#define CONF_WEP40 0x4
#define CONF_WEP104 0x14
- switch(wrqu->encoding.flags & IW_ENCODE_INDEX){
+ switch (wrqu->encoding.flags & IW_ENCODE_INDEX){
case 0: key_idx = ieee->tx_keyidx; break;
case 1: key_idx = 0; break;
case 2: key_idx = 1; break;
@@ -757,7 +757,7 @@ static int r8192_wx_set_scan_type(struct net_device *dev, struct iw_request_info
iwreq_data *wrqu, char *p){
struct r8192_priv *priv = ieee80211_priv(dev);
- int *parms=(int*)p;
+ int *parms=(int *)p;
int mode=parms[0];
priv->ieee80211->active_scan = mode;
@@ -891,7 +891,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
{
int ret=0;
struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
+ struct ieee80211_device *ieee = priv->ieee80211;
//printk("===>%s()\n", __FUNCTION__);
@@ -922,7 +922,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
ieee->pairwise_key_type = alg;
EnableHWSecurityConfig8192(dev);
}
- memcpy((u8*)key, ext->key, 16); //we only get 16 bytes key.why? WB 2008.7.1
+ memcpy((u8 *)key, ext->key, 16); //we only get 16 bytes key.why? WB 2008.7.1
if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode !=2) )
{
@@ -952,7 +952,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
4,//EntryNo
idx, //KeyIndex
alg, //KeyType
- (u8*)ieee->ap_mac_addr, //MacAddr
+ (u8 *)ieee->ap_mac_addr, //MacAddr
0, //DefaultKey
key); //KeyContent
}
@@ -1180,8 +1180,8 @@ static iw_handler r8192_private_handler[] = {
struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- struct ieee80211_device* ieee = priv->ieee80211;
- struct iw_statistics* wstats = &priv->wstats;
+ struct ieee80211_device *ieee = priv->ieee80211;
+ struct iw_statistics *wstats = &priv->wstats;
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h
index 9f6b105..ae7a617 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.h
+++ b/drivers/staging/rtl8192u/r8192U_wx.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver - v 0.3
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the official realtek driver
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index b755eb4..5614401 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -1,81 +1,63 @@
/******************************************************************************
-
- (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
-
- Module: r819xusb_cmdpkt.c (RTL8190 TX/RX command packet handler Source C File)
-
- Note: The module is responsible for handling TX and RX command packet.
- 1. TX : Send set and query configuration command packet.
- 2. RX : Receive tx feedback, beacon state, query configuration
- command packet.
-
- Function:
-
- Export:
-
- Abbrev:
-
- History:
- Data Who Remark
-
- 05/06/2008 amy Create initial version porting from windows driver.
-
-******************************************************************************/
+ *
+ * (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
+ *
+ * Module: r819xusb_cmdpkt.c
+ * (RTL8190 TX/RX command packet handler Source C File)
+ *
+ * Note: The module is responsible for handling TX and RX command packet.
+ * 1. TX : Send set and query configuration command packet.
+ * 2. RX : Receive tx feedback, beacon state, query configuration
+ * command packet.
+ *
+ * Function:
+ *
+ * Export:
+ *
+ * Abbrev:
+ *
+ * History:
+ *
+ * Date Who Remark
+ * 05/06/2008 amy Create initial version porting from
+ * windows driver.
+ *
+ ******************************************************************************/
#include "r8192U.h"
#include "r819xU_cmdpkt.h"
-/*---------------------------Define Local Constant---------------------------*/
-/* Debug constant*/
-#define CMPK_DEBOUNCE_CNT 1
-/* 2007/10/24 MH Add for printing a range of data. */
-#define CMPK_PRINT(Address)\
-{\
- unsigned char i;\
- u32 temp[10];\
- \
- memcpy(temp, Address, 40);\
- for (i = 0; i <40; i+=4)\
- printk("\r\n %08x", temp[i]);\
-}\
-/*---------------------------Define functions---------------------------------*/
-
-rt_status
-SendTxCommandPacket(
- struct net_device *dev,
- void* pData,
- u32 DataLen
- )
+
+rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
{
rt_status rtStatus = RT_STATUS_SUCCESS;
struct r8192_priv *priv = ieee80211_priv(dev);
struct sk_buff *skb;
cb_desc *tcb_desc;
unsigned char *ptr_buf;
- //bool bLastInitPacket = false;
- //PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
-
- //Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
+ /* Get TCB and local buffer from common pool.
+ (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
- tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
+ if (!skb)
+ return RT_STATUS_FAILURE;
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
tcb_desc->bLastIniPkt = 0;
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
ptr_buf = skb_put(skb, DataLen);
- memcpy(ptr_buf,pData,DataLen);
- tcb_desc->txbuf_size= (u16)DataLen;
-
- if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop) ) {
- RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n");
- skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
- } else {
- priv->ieee80211->softmac_hard_start_xmit(skb,dev);
- }
+ memcpy(ptr_buf, pData, DataLen);
+ tcb_desc->txbuf_size = (u16)DataLen;
+
+ if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
+ (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index])) ||
+ (priv->ieee80211->queue_stop)) {
+ RT_TRACE(COMP_FIRMWARE, "=== NULL packet ======> tx full!\n");
+ skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
+ } else {
+ priv->ieee80211->softmac_hard_start_xmit(skb, dev);
+ }
- //PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
return rtStatus;
}
@@ -83,27 +65,25 @@ SendTxCommandPacket(
* Function: cmpk_message_handle_tx()
*
* Overview: Driver internal module can call the API to send message to
- * firmware side. For example, you can send a debug command packet.
- * Or you can send a request for FW to modify RLX4181 LBUS HW bank.
- * Otherwise, you can change MAC/PHT/RF register by firmware at
- * run time. We do not support message more than one segment now.
+ * firmware side. For example, you can send a debug command packet.
+ * Or you can send a request for FW to modify RLX4181 LBUS HW bank.
+ * Otherwise, you can change MAC/PHT/RF register by firmware at
+ * run time. We do not support message more than one segment now.
*
- * Input: NONE
+ * Input: NONE
*
- * Output: NONE
+ * Output: NONE
*
- * Return: NONE
+ * Return: NONE
*
* Revised History:
* When Who Remark
* 05/06/2008 amy porting from windows code.
*
*---------------------------------------------------------------------------*/
- extern rt_status cmpk_message_handle_tx(
- struct net_device *dev,
- u8* codevirtualaddress,
- u32 packettype,
- u32 buffer_len)
+extern rt_status cmpk_message_handle_tx(struct net_device *dev,
+ u8 *codevirtualaddress,
+ u32 packettype, u32 buffer_len)
{
bool rt_status = true;
@@ -113,8 +93,6 @@ SendTxCommandPacket(
struct r8192_priv *priv = ieee80211_priv(dev);
u16 frag_threshold;
u16 frag_length, frag_offset = 0;
- //u16 total_size;
- //int i;
rt_firmware *pfirmware = priv->pFirmware;
struct sk_buff *skb;
@@ -123,11 +101,11 @@ SendTxCommandPacket(
u8 bLastIniPkt;
firmware_init_param(dev);
- //Fragmentation might be required
+ /* Fragmentation might be required */
frag_threshold = pfirmware->cmdpacket_frag_thresold;
do {
- if((buffer_len - frag_offset) > frag_threshold) {
- frag_length = frag_threshold ;
+ if ((buffer_len - frag_offset) > frag_threshold) {
+ frag_length = frag_threshold;
bLastIniPkt = 0;
} else {
@@ -136,146 +114,127 @@ SendTxCommandPacket(
}
- /* Allocate skb buffer to contain firmware info and tx descriptor info
- * add 4 to avoid packet appending overflow.
- * */
- #ifdef RTL8192U
+ /* Allocate skb buffer to contain firmware info and tx
+ descriptor info add 4 to avoid packet appending overflow. */
+#ifdef RTL8192U
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
- #else
+#else
skb = dev_alloc_skb(frag_length + 4);
- #endif
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
- tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
+#endif
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = packettype;
tcb_desc->bLastIniPkt = bLastIniPkt;
- #ifdef RTL8192U
+#ifdef RTL8192U
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
- #endif
+#endif
seg_ptr = skb_put(skb, buffer_len);
/*
* Transform from little endian to big endian
* and pending zero
*/
- memcpy(seg_ptr,codevirtualaddress,buffer_len);
- tcb_desc->txbuf_size= (u16)buffer_len;
+ memcpy(seg_ptr, codevirtualaddress, buffer_len);
+ tcb_desc->txbuf_size = (u16)buffer_len;
- if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop) ) {
- RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
+ if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
+ (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index])) ||
+ (priv->ieee80211->queue_stop)) {
+ RT_TRACE(COMP_FIRMWARE, "======> tx full!\n");
skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
} else {
- priv->ieee80211->softmac_hard_start_xmit(skb,dev);
+ priv->ieee80211->softmac_hard_start_xmit(skb, dev);
}
codevirtualaddress += frag_length;
frag_offset += frag_length;
- }while(frag_offset < buffer_len);
+ } while (frag_offset < buffer_len);
return rt_status;
#endif
-} /* CMPK_Message_Handle_Tx */
+}
/*-----------------------------------------------------------------------------
* Function: cmpk_counttxstatistic()
*
* Overview:
*
- * Input: PADAPTER pAdapter - .
- * CMPK_TXFB_T *psTx_FB - .
+ * Input: PADAPTER pAdapter
+ * CMPK_TXFB_T *psTx_FB
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_count_txstatistic(
- struct net_device *dev,
- cmpk_txfb_t *pstx_fb)
+static void cmpk_count_txstatistic(struct net_device *dev, cmpk_txfb_t *pstx_fb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_PS
RT_RF_POWER_STATE rtState;
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
+ /* When RF is off, we should not count the packet for hw/sw synchronize
+ reason, ie. there may be a duration while sw switch is changed and
+ hw switch is being changed. */
if (rtState == eRfOff)
- {
return;
- }
#endif
#ifdef TODO
- if(pAdapter->bInHctTest)
+ if (pAdapter->bInHctTest)
return;
#endif
- /* We can not know the packet length and transmit type: broadcast or uni
- or multicast. So the relative statistics must be collected in tx
- feedback info. */
- if (pstx_fb->tok)
- {
+ /* We can not know the packet length and transmit type:
+ broadcast or uni or multicast. So the relative statistics
+ must be collected in tx feedback info. */
+ if (pstx_fb->tok) {
priv->stats.txfeedbackok++;
priv->stats.txoktotal++;
priv->stats.txokbytestotal += pstx_fb->pkt_length;
priv->stats.txokinperiod++;
/* We can not make sure broadcast/multicast or unicast mode. */
- if (pstx_fb->pkt_type == PACKET_MULTICAST)
- {
+ if (pstx_fb->pkt_type == PACKET_MULTICAST) {
priv->stats.txmulticast++;
priv->stats.txbytesmulticast += pstx_fb->pkt_length;
- }
- else if (pstx_fb->pkt_type == PACKET_BROADCAST)
- {
+ } else if (pstx_fb->pkt_type == PACKET_BROADCAST) {
priv->stats.txbroadcast++;
priv->stats.txbytesbroadcast += pstx_fb->pkt_length;
- }
- else
- {
+ } else {
priv->stats.txunicast++;
priv->stats.txbytesunicast += pstx_fb->pkt_length;
}
- }
- else
- {
+ } else {
priv->stats.txfeedbackfail++;
priv->stats.txerrtotal++;
priv->stats.txerrbytestotal += pstx_fb->pkt_length;
/* We can not make sure broadcast/multicast or unicast mode. */
if (pstx_fb->pkt_type == PACKET_MULTICAST)
- {
priv->stats.txerrmulticast++;
- }
else if (pstx_fb->pkt_type == PACKET_BROADCAST)
- {
priv->stats.txerrbroadcast++;
- }
else
- {
priv->stats.txerrunicast++;
- }
}
priv->stats.txretrycount += pstx_fb->retry_cnt;
priv->stats.txfeedbackretry += pstx_fb->retry_cnt;
-} /* cmpk_CountTxStatistic */
+}
@@ -283,80 +242,63 @@ cmpk_count_txstatistic(
* Function: cmpk_handle_tx_feedback()
*
* Overview: The function is responsible for extract the message inside TX
- * feedbck message from firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification. Please
- * refer to chapter "TX Feedback Element". We have to read 20 bytes
- * in the command packet.
+ * feedbck message from firmware. It will contain dedicated info in
+ * ws-06-0063-rtl8190-command-packet-specification.
+ * Please refer to chapter "TX Feedback Element".
+ * We have to read 20 bytes in the command packet.
*
- * Input: struct net_device * dev
- * u8 * pmsg - Msg Ptr of the command packet.
+ * Input: struct net_device *dev
+ * u8 *pmsg - Msg Ptr of the command packet.
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/08/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/08/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_tx_feedback(
- struct net_device *dev,
- u8 * pmsg)
+static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- cmpk_txfb_t rx_tx_fb; /* */
+ cmpk_txfb_t rx_tx_fb;
priv->stats.txfeedback++;
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_TX_FB_SIZE, pMsg);
-
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
- /* 2007/07/05 MH Use pointer to transfer structure memory. */
- //memcpy((UINT8 *)&rx_tx_fb, pMsg, sizeof(CMPK_TXFB_T));
- memcpy((u8*)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t));
+ /* Use pointer to transfer structure memory. */
+ memcpy((u8 *)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t));
/* 2. Use tx feedback info to count TX statistics. */
cmpk_count_txstatistic(dev, &rx_tx_fb);
- /* 2007/01/17 MH Comment previous method for TX statistic function. */
+ /* Comment previous method for TX statistic function. */
/* Collect info TX feedback packet to fill TCB. */
/* We can not know the packet length and transmit type: broadcast or uni
or multicast. */
- //CountTxStatistics( pAdapter, &tcb );
-} /* cmpk_Handle_Tx_Feedback */
+}
-void
-cmdpkt_beacontimerinterrupt_819xusb(
- struct net_device *dev
-)
+void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tx_rate;
- {
- //
- // 070117, rcnjko: 87B have to S/W beacon for DTM encryption_cmn.
- //
- if(priv->ieee80211->current_network.mode == IEEE_A ||
+ /* 87B have to S/W beacon for DTM encryption_cmn. */
+ if (priv->ieee80211->current_network.mode == IEEE_A ||
priv->ieee80211->current_network.mode == IEEE_N_5G ||
- (priv->ieee80211->current_network.mode == IEEE_N_24G && (!priv->ieee80211->pHTInfo->bCurSuppCCK)))
- {
+ (priv->ieee80211->current_network.mode == IEEE_N_24G &&
+ (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
tx_rate = 60;
DMESG("send beacon frame tx rate is 6Mbpm\n");
- }
- else
- {
- tx_rate =10;
+ } else {
+ tx_rate = 10;
DMESG("send beacon frame tx rate is 1Mbpm\n");
}
- rtl819xusb_beacon_tx(dev,tx_rate); // HW Beacon
+ rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
- }
}
@@ -367,151 +309,129 @@ cmdpkt_beacontimerinterrupt_819xusb(
* Function: cmpk_handle_interrupt_status()
*
* Overview: The function is responsible for extract the message from
- * firmware. It will contain dedicated info in
- * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
- * Please refer to chapter "Interrupt Status Element".
+ * firmware. It will contain dedicated info in
+ * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
+ * Please refer to chapter "Interrupt Status Element".
*
- * Input: struct net_device *dev,
- * u8* pmsg - Message Pointer of the command packet.
+ * Input: struct net_device *dev
+ * u8 *pmsg - Message Pointer of the command packet.
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Add this for rtl8192 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Add this for rtl8192 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_interrupt_status(
- struct net_device *dev,
- u8* pmsg)
+static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
{
cmpk_intr_sta_t rx_intr_status; /* */
struct r8192_priv *priv = ieee80211_priv(dev);
DMESG("---> cmpk_Handle_Interrupt_Status()\n");
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg);
-
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
- //rx_bcn_state.Element_ID = pMsg[0];
- //rx_bcn_state.Length = pMsg[1];
rx_intr_status.length = pmsg[1];
- if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2))
- {
+ if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2)) {
DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n");
return;
}
- // Statistics of beacon for ad-hoc mode.
- if( priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- {
- //2 maybe need endian transform?
+ /* Statistics of beacon for ad-hoc mode. */
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) {
+ /* 2 maybe need endian transform? */
rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4));
- //rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4)));
- DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status);
+ DMESG("interrupt status = 0x%x\n",
+ rx_intr_status.interrupt_status);
- if (rx_intr_status.interrupt_status & ISR_TxBcnOk)
- {
+ if (rx_intr_status.interrupt_status & ISR_TxBcnOk) {
priv->ieee80211->bibsscoordinator = true;
priv->stats.txbeaconokint++;
- }
- else if (rx_intr_status.interrupt_status & ISR_TxBcnErr)
- {
+ } else if (rx_intr_status.interrupt_status & ISR_TxBcnErr) {
priv->ieee80211->bibsscoordinator = false;
priv->stats.txbeaconerr++;
}
if (rx_intr_status.interrupt_status & ISR_BcnTimerIntr)
- {
cmdpkt_beacontimerinterrupt_819xusb(dev);
- }
}
- // Other informations in interrupt status we need?
+ /* Other informations in interrupt status we need? */
DMESG("<---- cmpk_handle_interrupt_status()\n");
-} /* cmpk_handle_interrupt_status */
+}
/*-----------------------------------------------------------------------------
* Function: cmpk_handle_query_config_rx()
*
* Overview: The function is responsible for extract the message from
- * firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification. Please
- * refer to chapter "Beacon State Element".
+ * firmware. It will contain dedicated info in
+ * ws-06-0063-rtl8190-command-packet-specification. Please
+ * refer to chapter "Beacon State Element".
*
- * Input: u8 * pmsg - Message Pointer of the command packet.
+ * Input: u8 *pmsg - Message Pointer of the command packet.
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_query_config_rx(
- struct net_device *dev,
- u8* pmsg)
+static void cmpk_handle_query_config_rx(struct net_device *dev, u8 *pmsg)
{
- cmpk_query_cfg_t rx_query_cfg; /* */
+ cmpk_query_cfg_t rx_query_cfg;
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg);
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
- //rx_query_cfg.Element_ID = pMsg[0];
- //rx_query_cfg.Length = pMsg[1];
- rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31;
+ rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000) >> 31;
rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
- rx_query_cfg.cfg_offset = pmsg[7];
- rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
- (pmsg[10] << 8) | (pmsg[11] << 0);
- rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
- (pmsg[14] << 8) | (pmsg[15] << 0);
+ rx_query_cfg.cfg_offset = pmsg[7];
+ rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
+ (pmsg[10] << 8) | (pmsg[11] << 0);
+ rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
+ (pmsg[14] << 8) | (pmsg[15] << 0);
-} /* cmpk_Handle_Query_Config_Rx */
+}
/*-----------------------------------------------------------------------------
* Function: cmpk_count_tx_status()
*
* Overview: Count aggregated tx status from firmwar of one type rx command
- * packet element id = RX_TX_STATUS.
+ * packet element id = RX_TX_STATUS.
*
- * Input: NONE
+ * Input: NONE
*
- * Output: NONE
+ * Output: NONE
*
- * Return: NONE
+ * Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void cmpk_count_tx_status( struct net_device *dev,
- cmpk_tx_status_t *pstx_status)
+static void cmpk_count_tx_status(struct net_device *dev,
+ cmpk_tx_status_t *pstx_status)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -519,15 +439,14 @@ static void cmpk_count_tx_status( struct net_device *dev,
RT_RF_POWER_STATE rtstate;
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
+ /* When RF is off, we should not count the packet for hw/sw synchronize
+ reason, ie. there may be a duration while sw switch is changed and
+ hw switch is being changed. */
if (rtState == eRfOff)
- {
return;
- }
#endif
priv->stats.txfeedbackok += pstx_status->txok;
@@ -536,15 +455,12 @@ static void cmpk_count_tx_status( struct net_device *dev,
priv->stats.txfeedbackfail += pstx_status->txfail;
priv->stats.txerrtotal += pstx_status->txfail;
- priv->stats.txretrycount += pstx_status->txretry;
+ priv->stats.txretrycount += pstx_status->txretry;
priv->stats.txfeedbackretry += pstx_status->txretry;
- //pAdapter->TxStats.NumTxOkBytesTotal += psTx_FB->pkt_length;
- //pAdapter->TxStats.NumTxErrBytesTotal += psTx_FB->pkt_length;
- //pAdapter->MgntInfo.LinkDetectInfo.NumTxOkInPeriod++;
- priv->stats.txmulticast += pstx_status->txmcok;
- priv->stats.txbroadcast += pstx_status->txbcok;
+ priv->stats.txmulticast += pstx_status->txmcok;
+ priv->stats.txbroadcast += pstx_status->txbcok;
priv->stats.txunicast += pstx_status->txucok;
priv->stats.txerrmulticast += pstx_status->txmcfail;
@@ -553,10 +469,10 @@ static void cmpk_count_tx_status( struct net_device *dev,
priv->stats.txbytesmulticast += pstx_status->txmclength;
priv->stats.txbytesbroadcast += pstx_status->txbclength;
- priv->stats.txbytesunicast += pstx_status->txuclength;
+ priv->stats.txbytesunicast += pstx_status->txuclength;
- priv->stats.last_packet_rate = pstx_status->rate;
-} /* cmpk_CountTxStatus */
+ priv->stats.last_packet_rate = pstx_status->rate;
+}
@@ -564,7 +480,7 @@ static void cmpk_count_tx_status( struct net_device *dev,
* Function: cmpk_handle_tx_status()
*
* Overview: Firmware add a new tx feedback status to reduce rx command
- * packet buffer operation load.
+ * packet buffer operation load.
*
* Input: NONE
*
@@ -573,22 +489,19 @@ static void cmpk_count_tx_status( struct net_device *dev,
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_tx_status(
- struct net_device *dev,
- u8* pmsg)
+static void cmpk_handle_tx_status(struct net_device *dev, u8 *pmsg)
{
- cmpk_tx_status_t rx_tx_sts; /* */
+ cmpk_tx_status_t rx_tx_sts;
- memcpy((void*)&rx_tx_sts, (void*)pmsg, sizeof(cmpk_tx_status_t));
+ memcpy((void *)&rx_tx_sts, (void *)pmsg, sizeof(cmpk_tx_status_t));
/* 2. Use tx feedback info to count TX statistics. */
cmpk_count_tx_status(dev, &rx_tx_sts);
-} /* cmpk_Handle_Tx_Status */
+}
/*-----------------------------------------------------------------------------
@@ -603,82 +516,71 @@ cmpk_handle_tx_status(
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_tx_rate_history(
- struct net_device *dev,
- u8* pmsg)
+static void cmpk_handle_tx_rate_history(struct net_device *dev, u8 *pmsg)
{
cmpk_tx_rahis_t *ptxrate;
-// RT_RF_POWER_STATE rtState;
- u8 i, j;
- u16 length = sizeof(cmpk_tx_rahis_t);
- u32 *ptemp;
+ u8 i, j;
+ u16 length = sizeof(cmpk_tx_rahis_t);
+ u32 *ptemp;
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_PS
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
+ /* When RF is off, we should not count the packet for hw/sw synchronize
+ reason, ie. there may be a duration while sw switch is changed and
+ hw switch is being changed. */
if (rtState == eRfOff)
- {
return;
- }
#endif
ptemp = (u32 *)pmsg;
- //
- // Do endian transfer to word alignment(16 bits) for windows system.
- // You must do different endian transfer for linux and MAC OS
- //
- for (i = 0; i < (length/4); i++)
- {
+ /* Do endian transfer to word alignment(16 bits) for windows system.
+ You must do different endian transfer for linux and MAC OS */
+ for (i = 0; i < (length/4); i++) {
u16 temp1, temp2;
- temp1 = ptemp[i]&0x0000FFFF;
- temp2 = ptemp[i]>>16;
- ptemp[i] = (temp1<<16)|temp2;
+ temp1 = ptemp[i] & 0x0000FFFF;
+ temp2 = ptemp[i] >> 16;
+ ptemp[i] = (temp1 << 16) | temp2;
}
ptxrate = (cmpk_tx_rahis_t *)pmsg;
- if (ptxrate == NULL )
- {
+ if (ptxrate == NULL)
return;
- }
- for (i = 0; i < 16; i++)
- {
- // Collect CCK rate packet num
+ for (i = 0; i < 16; i++) {
+ /* Collect CCK rate packet num */
if (i < 4)
priv->stats.txrate.cck[i] += ptxrate->cck[i];
- // Collect OFDM rate packet num
- if (i< 8)
+ /* Collect OFDM rate packet num */
+ if (i < 8)
priv->stats.txrate.ofdm[i] += ptxrate->ofdm[i];
for (j = 0; j < 4; j++)
priv->stats.txrate.ht_mcs[j][i] += ptxrate->ht_mcs[j][i];
}
-} /* cmpk_Handle_Tx_Rate_History */
+}
/*-----------------------------------------------------------------------------
* Function: cmpk_message_handle_rx()
*
* Overview: In the function, we will capture different RX command packet
- * info. Every RX command packet element has different message
- * length and meaning in content. We only support three type of RX
- * command packet now. Please refer to document
- * ws-06-0063-rtl8190-command-packet-specification.
+ * info. Every RX command packet element has different message
+ * length and meaning in content. We only support three type of RX
+ * command packet now. Please refer to document
+ * ws-06-0063-rtl8190-command-packet-specification.
*
* Input: NONE
*
@@ -687,30 +589,22 @@ cmpk_handle_tx_rate_history(
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/06/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/06/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-extern u32
-cmpk_message_handle_rx(
- struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
+extern u32 cmpk_message_handle_rx(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
-// u32 debug_level = DBG_LOUD;
int total_length;
u8 cmd_length, exe_cnt = 0;
u8 element_id;
u8 *pcmd_buff;
- /* 0. Check inpt arguments. If is is a command queue message or pointer is
- null. */
- if (/*(prfd->queue_id != CMPK_RX_QUEUE_ID) || */(pstats== NULL))
- {
- /* Print error message. */
- /*RT_TRACE(COMP_SEND, DebugLevel,
- ("\n\r[CMPK]-->Err queue id or pointer"));*/
+ /* 0. Check inpt arguments. If is is a command queue message or
+ pointer is null. */
+ if (pstats == NULL)
return 0; /* This is not a command packet. */
- }
/* 1. Read received command packet message length from RFD. */
total_length = pstats->Length;
@@ -720,67 +614,58 @@ cmpk_message_handle_rx(
/* 3. Read command packet element id and length. */
element_id = pcmd_buff[0];
- /*RT_TRACE(COMP_SEND, DebugLevel,
- ("\n\r[CMPK]-->element ID=%d Len=%d", element_id, total_length));*/
/* 4. Check every received command packet content according to different
- element type. Because FW may aggregate RX command packet to minimize
- transmit time between DRV and FW.*/
- // Add a counter to prevent the lock in the loop from being held too long
- while (total_length > 0 || exe_cnt++ >100)
- {
- /* 2007/01/17 MH We support aggregation of different cmd in the same packet. */
+ element type. Because FW may aggregate RX command packet to
+ minimize transmit time between DRV and FW.*/
+ /* Add a counter to prevent the lock in the loop from being held too
+ long */
+ while (total_length > 0 && exe_cnt++ < 100) {
+ /* We support aggregation of different cmd in the same packet */
element_id = pcmd_buff[0];
- switch(element_id)
- {
- case RX_TX_FEEDBACK:
- cmpk_handle_tx_feedback (dev, pcmd_buff);
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_INTERRUPT_STATUS:
- cmpk_handle_interrupt_status(dev, pcmd_buff);
- cmd_length = sizeof(cmpk_intr_sta_t);
- break;
-
- case BOTH_QUERY_CONFIG:
- cmpk_handle_query_config_rx(dev, pcmd_buff);
- cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
- break;
-
- case RX_TX_STATUS:
- cmpk_handle_tx_status(dev, pcmd_buff);
- cmd_length = CMPK_RX_TX_STS_SIZE;
- break;
-
- case RX_TX_PER_PKT_FEEDBACK:
- // You must at lease add a switch case element here,
- // Otherwise, we will jump to default case.
- //DbgPrint("CCX Test\r\n");
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_TX_RATE_HISTORY:
- //DbgPrint(" rx tx rate history\r\n");
- cmpk_handle_tx_rate_history(dev, pcmd_buff);
- cmd_length = CMPK_TX_RAHIS_SIZE;
- break;
-
- default:
-
- RT_TRACE(COMP_ERR, "---->cmpk_message_handle_rx():unknow CMD Element\n");
- return 1; /* This is a command packet. */
+ switch (element_id) {
+ case RX_TX_FEEDBACK:
+ cmpk_handle_tx_feedback(dev, pcmd_buff);
+ cmd_length = CMPK_RX_TX_FB_SIZE;
+ break;
+
+ case RX_INTERRUPT_STATUS:
+ cmpk_handle_interrupt_status(dev, pcmd_buff);
+ cmd_length = sizeof(cmpk_intr_sta_t);
+ break;
+
+ case BOTH_QUERY_CONFIG:
+ cmpk_handle_query_config_rx(dev, pcmd_buff);
+ cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
+ break;
+
+ case RX_TX_STATUS:
+ cmpk_handle_tx_status(dev, pcmd_buff);
+ cmd_length = CMPK_RX_TX_STS_SIZE;
+ break;
+
+ case RX_TX_PER_PKT_FEEDBACK:
+ /* You must at lease add a switch case element here,
+ Otherwise, we will jump to default case. */
+ cmd_length = CMPK_RX_TX_FB_SIZE;
+ break;
+
+ case RX_TX_RATE_HISTORY:
+ cmpk_handle_tx_rate_history(dev, pcmd_buff);
+ cmd_length = CMPK_TX_RAHIS_SIZE;
+ break;
+
+ default:
+
+ RT_TRACE(COMP_ERR, "---->%s():unknown CMD Element\n",
+ __func__);
+ return 1; /* This is a command packet. */
}
- // 2007/01/22 MH Display received rx command packet info.
- //cmpk_Display_Message(cmd_length, pcmd_buff);
-
- // 2007/01/22 MH Add to display tx statistic.
- //cmpk_DisplayTxStatistic(pAdapter);
total_length -= cmd_length;
pcmd_buff += cmd_length;
- } /* while (total_length > 0) */
+ }
return 1; /* This is a command packet. */
-} /* CMPK_Message_Handle_Rx */
+}
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.h b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
index 59caa4e..ebe4032 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.h
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
@@ -192,10 +192,10 @@ typedef enum _rt_status{
RT_STATUS_RESOURCE
}rt_status,*prt_status;
-extern rt_status cmpk_message_handle_tx(struct net_device *dev, u8* codevirtualaddress, u32 packettype, u32 buffer_len);
+extern rt_status cmpk_message_handle_tx(struct net_device *dev, u8 *codevirtualaddress, u32 packettype, u32 buffer_len);
-extern u32 cmpk_message_handle_rx(struct net_device *dev, struct ieee80211_rx_stats * pstats);
-extern rt_status SendTxCommandPacket( struct net_device *dev, void* pData, u32 DataLen);
+extern u32 cmpk_message_handle_rx(struct net_device *dev, struct ieee80211_rx_stats *pstats);
+extern rt_status SendTxCommandPacket( struct net_device *dev, void *pData, u32 DataLen);
#endif
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index 573e9cd..bb924ac 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -48,7 +48,7 @@ bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buff
//Fragmentation might be required
frag_threshold = pfirmware->cmdpacket_frag_thresold;
do {
- if((buffer_len - frag_offset) > frag_threshold) {
+ if ((buffer_len - frag_offset) > frag_threshold) {
frag_length = frag_threshold ;
bLastIniPkt = 0;
@@ -67,7 +67,7 @@ bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buff
skb = dev_alloc_skb(frag_length + 4);
#endif
memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
- tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
+ tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
tcb_desc->bLastIniPkt = bLastIniPkt;
@@ -89,7 +89,7 @@ bool fw_download_code(struct net_device *dev, u8 *code_virtual_address, u32 buff
tcb_desc->txbuf_size= (u16)i;
skb_put(skb, i);
- if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
+ if (!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
(!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
(priv->ieee80211->queue_stop) ) {
RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
@@ -125,7 +125,7 @@ fwSendNullPacket(
//Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
skb = dev_alloc_skb(Length+ 4);
memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
- tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE);
+ tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
tcb_desc->bLastIniPkt = bLastInitPacket;
@@ -133,7 +133,7 @@ fwSendNullPacket(
memset(ptr_buf,0,Length);
tcb_desc->txbuf_size= (u16)Length;
- if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
+ if (!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
(!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
(priv->ieee80211->queue_stop) ) {
RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n");
@@ -168,14 +168,14 @@ bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
/* Check whether put code OK */
do {
- CPU_status = read_nic_dword(dev, CPU_GEN);
+ read_nic_dword(dev, CPU_GEN, &CPU_status);
- if(CPU_status&CPU_GEN_PUT_CODE_OK)
+ if (CPU_status&CPU_GEN_PUT_CODE_OK)
break;
}while(check_putcodeOK_time--);
- if(!(CPU_status&CPU_GEN_PUT_CODE_OK)) {
+ if (!(CPU_status&CPU_GEN_PUT_CODE_OK)) {
RT_TRACE(COMP_ERR, "Download Firmware: Put code fail!\n");
goto CPUCheckMainCodeOKAndTurnOnCPU_Fail;
} else {
@@ -183,19 +183,19 @@ bool CPUcheck_maincodeok_turnonCPU(struct net_device *dev)
}
/* Turn On CPU */
- CPU_status = read_nic_dword(dev, CPU_GEN);
+ read_nic_dword(dev, CPU_GEN, &CPU_status);
write_nic_byte(dev, CPU_GEN, (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
mdelay(1000);
/* Check whether CPU boot OK */
do {
- CPU_status = read_nic_dword(dev, CPU_GEN);
+ read_nic_dword(dev, CPU_GEN, &CPU_status);
- if(CPU_status&CPU_GEN_BOOT_RDY)
+ if (CPU_status&CPU_GEN_BOOT_RDY)
break;
}while(check_bootOk_time--);
- if(!(CPU_status&CPU_GEN_BOOT_RDY)) {
+ if (!(CPU_status&CPU_GEN_BOOT_RDY)) {
goto CPUCheckMainCodeOKAndTurnOnCPU_Fail;
} else {
RT_TRACE(COMP_FIRMWARE, "Download Firmware: Boot ready!\n");
@@ -218,14 +218,14 @@ bool CPUcheck_firmware_ready(struct net_device *dev)
/* Check Firmware Ready */
do {
- CPU_status = read_nic_dword(dev, CPU_GEN);
+ read_nic_dword(dev, CPU_GEN, &CPU_status);
- if(CPU_status&CPU_GEN_FIRM_RDY)
+ if (CPU_status&CPU_GEN_FIRM_RDY)
break;
}while(check_time--);
- if(!(CPU_status&CPU_GEN_FIRM_RDY))
+ if (!(CPU_status&CPU_GEN_FIRM_RDY))
goto CPUCheckFirmwareReady_Fail;
else
RT_TRACE(COMP_FIRMWARE, "Download Firmware: Firmware ready!\n");
@@ -265,7 +265,7 @@ bool init_firmware(struct net_device *dev)
starting_state = FW_INIT_STEP0_BOOT;
// TODO: system reset
- }else if(pfirmware->firmware_status == FW_STATUS_5_READY) {
+ }else if (pfirmware->firmware_status == FW_STATUS_5_READY) {
/* it is called by Initialize */
rst_opt = OPT_FIRMWARE_RESET;
starting_state = FW_INIT_STEP2_DATA;
@@ -282,19 +282,19 @@ bool init_firmware(struct net_device *dev)
* Open image file, and map file to continuous memory if open file success.
* or read image file from array. Default load from IMG file
*/
- if(rst_opt == OPT_SYSTEM_RESET) {
+ if (rst_opt == OPT_SYSTEM_RESET) {
rc = request_firmware(&fw_entry, fw_name[init_step],&priv->udev->dev);
- if(rc < 0 ) {
+ if (rc < 0 ) {
RT_TRACE(COMP_ERR, "request firmware fail!\n");
goto download_firmware_fail;
}
- if(fw_entry->size > sizeof(pfirmware->firmware_buf)) {
+ if (fw_entry->size > sizeof(pfirmware->firmware_buf)) {
RT_TRACE(COMP_ERR, "img file size exceed the container buffer fail!\n");
goto download_firmware_fail;
}
- if(init_step != FW_INIT_STEP1_MAIN) {
+ if (init_step != FW_INIT_STEP1_MAIN) {
memcpy(pfirmware->firmware_buf,fw_entry->data,fw_entry->size);
mapped_file = pfirmware->firmware_buf;
file_length = fw_entry->size;
@@ -311,7 +311,7 @@ bool init_firmware(struct net_device *dev)
#endif
}
pfirmware->firmware_buf_size = file_length;
- }else if(rst_opt == OPT_FIRMWARE_RESET ) {
+ }else if (rst_opt == OPT_FIRMWARE_RESET ) {
/* we only need to download data.img here */
mapped_file = pfirmware->firmware_buf;
file_length = pfirmware->firmware_buf_size;
@@ -325,15 +325,15 @@ bool init_firmware(struct net_device *dev)
* and Tx descriptor info
* */
rt_status = fw_download_code(dev,mapped_file,file_length);
- if(rst_opt == OPT_SYSTEM_RESET) {
+ if (rst_opt == OPT_SYSTEM_RESET) {
release_firmware(fw_entry);
}
- if(rt_status != TRUE) {
+ if (rt_status != TRUE) {
goto download_firmware_fail;
}
- switch(init_step) {
+ switch (init_step) {
case FW_INIT_STEP0_BOOT:
/* Download boot
* initialize command descriptor.
@@ -343,7 +343,7 @@ bool init_firmware(struct net_device *dev)
#ifdef RTL8190P
// To initialize IMEM, CPU move code from 0x80000080, hence, we send 0x80 byte packet
rt_status = fwSendNullPacket(dev, RTL8190_CPU_START_OFFSET);
- if(rt_status != true)
+ if (rt_status != true)
{
RT_TRACE(COMP_INIT, "fwSendNullPacket() fail ! \n");
goto download_firmware_fail;
@@ -362,7 +362,7 @@ bool init_firmware(struct net_device *dev)
/* Check Put Code OK and Turn On CPU */
rt_status = CPUcheck_maincodeok_turnonCPU(dev);
- if(rt_status != TRUE) {
+ if (rt_status != TRUE) {
RT_TRACE(COMP_ERR, "CPUcheck_maincodeok_turnonCPU fail!\n");
goto download_firmware_fail;
}
@@ -376,7 +376,7 @@ bool init_firmware(struct net_device *dev)
mdelay(1);
rt_status = CPUcheck_firmware_ready(dev);
- if(rt_status != TRUE) {
+ if (rt_status != TRUE) {
RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n",rt_status);
goto download_firmware_fail;
}
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index 17fac41..a6fac08 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -7,22 +7,24 @@
#include "r819xU_firmware_img.h"
#include "dot11d.h"
+#include <linux/bitops.h>
+
static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
0,
- 0x085c, //2412 1
- 0x08dc, //2417 2
- 0x095c, //2422 3
- 0x09dc, //2427 4
- 0x0a5c, //2432 5
- 0x0adc, //2437 6
- 0x0b5c, //2442 7
- 0x0bdc, //2447 8
- 0x0c5c, //2452 9
- 0x0cdc, //2457 10
- 0x0d5c, //2462 11
- 0x0ddc, //2467 12
- 0x0e5c, //2472 13
- 0x0f72, //2484
+ 0x085c, /* 2412 1 */
+ 0x08dc, /* 2417 2 */
+ 0x095c, /* 2422 3 */
+ 0x09dc, /* 2427 4 */
+ 0x0a5c, /* 2432 5 */
+ 0x0adc, /* 2437 6 */
+ 0x0b5c, /* 2442 7 */
+ 0x0bdc, /* 2447 8 */
+ 0x0c5c, /* 2452 9 */
+ 0x0cdc, /* 2457 10 */
+ 0x0d5c, /* 2462 11 */
+ 0x0ddc, /* 2467 12 */
+ 0x0e5c, /* 2472 13 */
+ 0x0f72, /* 2484 */
};
@@ -36,36 +38,36 @@ static u32 RF_CHANNEL_TABLE_ZEBRA[] = {
#define rtl819XAGCTAB_Array Rtl8192UsbAGCTAB_Array
/******************************************************************************
- *function: This function read BB parameters from Header file we gen,
- * and do register read/write
- * input: u32 dwBitMask //taget bit pos in the addr to be modified
- * output: none
- * return: u32 return the shift bit position of the mask
- * ****************************************************************************/
-u32 rtl8192_CalculateBitShift(u32 dwBitMask)
+ * function: This function reads BB parameters from header file we generate,
+ * and does register read/write
+ * input: u32 bitmask //taget bit pos in the addr to be modified
+ * output: none
+ * return: u32 return the shift bit position of the mask
+ ******************************************************************************/
+u32 rtl8192_CalculateBitShift(u32 bitmask)
{
u32 i;
- for (i=0; i<=31; i++)
- {
- if (((dwBitMask>>i)&0x1) == 1)
- break;
- }
+
+ i = ffs(bitmask) - 1;
return i;
}
+
/******************************************************************************
- *function: This function check different RF type to execute legal judgement. If RF Path is illegal, we will return false.
- * input: none
- * output: none
- * return: 0(illegal, false), 1(legal,true)
- * ***************************************************************************/
-u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device* dev, u32 eRFPath)
+ * function: This function checks different RF type to execute legal judgement.
+ * If RF Path is illegal, we will return false.
+ * input: net_device *dev
+ * u32 eRFPath
+ * output: none
+ * return: 0(illegal, false), 1(legal, true)
+ *****************************************************************************/
+u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath)
{
u8 ret = 1;
struct r8192_priv *priv = ieee80211_priv(dev);
- if (priv->rf_type == RF_2T4R)
+
+ if (priv->rf_type == RF_2T4R) {
ret = 0;
- else if (priv->rf_type == RF_1T2R)
- {
+ } else if (priv->rf_type == RF_1T2R) {
if (eRFPath == RF90_PATH_A || eRFPath == RF90_PATH_B)
ret = 1;
else if (eRFPath == RF90_PATH_C || eRFPath == RF90_PATH_D)
@@ -73,662 +75,682 @@ u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device* dev, u32 eRFPath)
}
return ret;
}
+
/******************************************************************************
- *function: This function set specific bits to BB register
- * input: net_device dev
- * u32 dwRegAddr //target addr to be modified
- * u32 dwBitMask //taget bit pos in the addr to be modified
- * u32 dwData //value to be write
- * output: none
- * return: none
- * notice:
- * ****************************************************************************/
-void rtl8192_setBBreg(struct net_device* dev, u32 dwRegAddr, u32 dwBitMask, u32 dwData)
+ * function: This function sets specific bits to BB register
+ * input: net_device *dev
+ * u32 reg_addr //target addr to be modified
+ * u32 bitmask //taget bit pos to be modified
+ * u32 data //value to be write
+ * output: none
+ * return: none
+ * notice:
+ ******************************************************************************/
+void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr, u32 bitmask,
+ u32 data)
{
- u32 OriginalValue, BitShift, NewValue;
+ u32 reg, bitshift;
- if(dwBitMask!= bMaskDWord)
- {//if not "double word" write
- OriginalValue = read_nic_dword(dev, dwRegAddr);
- BitShift = rtl8192_CalculateBitShift(dwBitMask);
- NewValue = (((OriginalValue) & (~dwBitMask)) | (dwData << BitShift));
- write_nic_dword(dev, dwRegAddr, NewValue);
- }else
- write_nic_dword(dev, dwRegAddr, dwData);
+ if (bitmask != bMaskDWord) {
+ read_nic_dword(dev, reg_addr, &reg);
+ bitshift = rtl8192_CalculateBitShift(bitmask);
+ reg &= ~bitmask;
+ reg |= data << bitshift;
+ write_nic_dword(dev, reg_addr, reg);
+ } else {
+ write_nic_dword(dev, reg_addr, data);
+ }
return;
}
+
/******************************************************************************
- *function: This function reads specific bits from BB register
- * input: net_device dev
- * u32 dwRegAddr //target addr to be readback
- * u32 dwBitMask //taget bit pos in the addr to be readback
- * output: none
- * return: u32 Data //the readback register value
- * notice:
- * ****************************************************************************/
-u32 rtl8192_QueryBBReg(struct net_device* dev, u32 dwRegAddr, u32 dwBitMask)
+ * function: This function reads specific bits from BB register
+ * input: net_device *dev
+ * u32 reg_addr //target addr to be readback
+ * u32 bitmask //taget bit pos to be readback
+ * output: none
+ * return: u32 data //the readback register value
+ * notice:
+ ******************************************************************************/
+u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr, u32 bitmask)
{
- u32 Ret = 0, OriginalValue, BitShift;
+ u32 reg, bitshift;
- OriginalValue = read_nic_dword(dev, dwRegAddr);
- BitShift = rtl8192_CalculateBitShift(dwBitMask);
- Ret =(OriginalValue & dwBitMask) >> BitShift;
+ read_nic_dword(dev, reg_addr, &reg);
+ bitshift = rtl8192_CalculateBitShift(bitmask);
- return (Ret);
+ return (reg & bitmask) >> bitshift;
}
-static u32 phy_FwRFSerialRead( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset );
-static void phy_FwRFSerialWrite( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data);
+static u32 phy_FwRFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+ u32 offset);
+
+static void phy_FwRFSerialWrite(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath, u32 offset,
+ u32 data);
/******************************************************************************
- *function: This function read register from RF chip
- * input: net_device dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
- * u32 Offset //target address to be read
- * output: none
- * return: u32 readback value
- * notice: There are three types of serial operations:(1) Software serial write.(2)Hardware LSSI-Low Speed Serial Interface.(3)Hardware HSSI-High speed serial write. Driver here need to implement (1) and (2)---need more spec for this information.
- * ****************************************************************************/
-u32 rtl8192_phy_RFSerialRead(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset)
+ * function: This function reads register from RF chip
+ * input: net_device *dev
+ * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
+ * u32 offset //target address to be read
+ * output: none
+ * return: u32 readback value
+ * notice: There are three types of serial operations:
+ * (1) Software serial write.
+ * (2)Hardware LSSI-Low Speed Serial Interface.
+ * (3)Hardware HSSI-High speed serial write.
+ * Driver here need to implement (1) and (2)
+ * ---need more spec for this information.
+ ******************************************************************************/
+u32 rtl8192_phy_RFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+ u32 offset)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 ret = 0;
- u32 NewOffset = 0;
- BB_REGISTER_DEFINITION_T* pPhyReg = &priv->PHYRegDef[eRFPath];
+ u32 new_offset = 0;
+ BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath];
+
rtl8192_setBBreg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData, 0);
- //make sure RF register offset is correct
- Offset &= 0x3f;
-
- //switch page for 8256 RF IC
- if (priv->rf_chip == RF_8256)
- {
- if (Offset >= 31)
- {
+ /* Make sure RF register offset is correct */
+ offset &= 0x3f;
+
+ /* Switch page for 8256 RF IC */
+ if (priv->rf_chip == RF_8256) {
+ if (offset >= 31) {
priv->RfReg0Value[eRFPath] |= 0x140;
- //Switch to Reg_Mode2 for Reg 31-45
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16) );
- //modify offset
- NewOffset = Offset -30;
- }
- else if (Offset >= 16)
- {
+ /* Switch to Reg_Mode2 for Reg 31-45 */
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ priv->RfReg0Value[eRFPath]<<16);
+ /* Modify offset */
+ new_offset = offset - 30;
+ } else if (offset >= 16) {
priv->RfReg0Value[eRFPath] |= 0x100;
priv->RfReg0Value[eRFPath] &= (~0x40);
- //Switch to Reg_Mode 1 for Reg16-30
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16) );
-
- NewOffset = Offset - 15;
+ /* Switch to Reg_Mode1 for Reg16-30 */
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ priv->RfReg0Value[eRFPath]<<16);
+
+ new_offset = offset - 15;
+ } else {
+ new_offset = offset;
}
- else
- NewOffset = Offset;
- }
- else
- {
- RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need to be 8256\n");
- NewOffset = Offset;
+ } else {
+ RT_TRACE((COMP_PHY|COMP_ERR),
+ "check RF type here, need to be 8256\n");
+ new_offset = offset;
}
- //put desired read addr to LSSI control Register
- rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress, NewOffset);
- //Issue a posedge trigger
- //
+ /* Put desired read addr to LSSI control Register */
+ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
+ new_offset);
+ /* Issue a posedge trigger */
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0);
rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1);
- // TODO: we should not delay such a long time. Ask for help from SD3
- msleep(1);
+ /* TODO: we should not delay such a long time. Ask for help from SD3 */
+ usleep_range(1000, 1000);
- ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData);
+ ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack,
+ bLSSIReadBackData);
- // Switch back to Reg_Mode0;
- if(priv->rf_chip == RF_8256)
- {
+ /* Switch back to Reg_Mode0 */
+ if (priv->rf_chip == RF_8256) {
priv->RfReg0Value[eRFPath] &= 0xebf;
- rtl8192_setBBreg(
- dev,
- pPhyReg->rf3wireOffset,
- bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
+ priv->RfReg0Value[eRFPath] << 16);
}
return ret;
-
}
/******************************************************************************
- *function: This function write data to RF register
- * input: net_device dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
- * u32 Offset //target address to be written
- * u32 Data //The new register data to be written
- * output: none
- * return: none
- * notice: For RF8256 only.
- ===========================================================
- *Reg Mode RegCTL[1] RegCTL[0] Note
+ * function: This function writes data to RF register
+ * input: net_device *dev
+ * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
+ * u32 offset //target address to be written
+ * u32 data //the new register data to be written
+ * output: none
+ * return: none
+ * notice: For RF8256 only.
+ * ===========================================================================
+ * Reg Mode RegCTL[1] RegCTL[0] Note
* (Reg00[12]) (Reg00[10])
- *===========================================================
- *Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf)
- *------------------------------------------------------------------
- *Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf)
- *------------------------------------------------------------------
+ * ===========================================================================
+ * Reg_Mode0 0 x Reg 0 ~ 15(0x0 ~ 0xf)
+ * ---------------------------------------------------------------------------
+ * Reg_Mode1 1 0 Reg 16 ~ 30(0x1 ~ 0xf)
+ * ---------------------------------------------------------------------------
* Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
- *------------------------------------------------------------------
- * ****************************************************************************/
-void rtl8192_phy_RFSerialWrite(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data)
+ * ---------------------------------------------------------------------------
+ *****************************************************************************/
+void rtl8192_phy_RFSerialWrite(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath, u32 offset, u32 data)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u32 DataAndAddr = 0, NewOffset = 0;
+ u32 DataAndAddr = 0, new_offset = 0;
BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath];
- Offset &= 0x3f;
- //spin_lock_irqsave(&priv->rf_lock, flags);
-// down(&priv->rf_sem);
- if (priv->rf_chip == RF_8256)
- {
+ offset &= 0x3f;
+ if (priv->rf_chip == RF_8256) {
- if (Offset >= 31)
- {
+ if (offset >= 31) {
priv->RfReg0Value[eRFPath] |= 0x140;
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16));
- NewOffset = Offset - 30;
- }
- else if (Offset >= 16)
- {
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ priv->RfReg0Value[eRFPath] << 16);
+ new_offset = offset - 30;
+ } else if (offset >= 16) {
priv->RfReg0Value[eRFPath] |= 0x100;
priv->RfReg0Value[eRFPath] &= (~0x40);
- rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16));
- NewOffset = Offset - 15;
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ priv->RfReg0Value[eRFPath]<<16);
+ new_offset = offset - 15;
+ } else {
+ new_offset = offset;
}
- else
- NewOffset = Offset;
- }
- else
- {
- RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need to be 8256\n");
- NewOffset = Offset;
+ } else {
+ RT_TRACE((COMP_PHY|COMP_ERR),
+ "check RF type here, need to be 8256\n");
+ new_offset = offset;
}
- // Put write addr in [5:0] and write data in [31:16]
- DataAndAddr = (Data<<16) | (NewOffset&0x3f);
+ /* Put write addr in [5:0] and write data in [31:16] */
+ DataAndAddr = (data<<16) | (new_offset&0x3f);
- // Write Operation
+ /* Write operation */
rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
- if(Offset==0x0)
- priv->RfReg0Value[eRFPath] = Data;
+ if (offset == 0x0)
+ priv->RfReg0Value[eRFPath] = data;
- // Switch back to Reg_Mode0;
- if(priv->rf_chip == RF_8256)
- {
- if(Offset != 0)
- {
+ /* Switch back to Reg_Mode0 */
+ if (priv->rf_chip == RF_8256) {
+ if (offset != 0) {
priv->RfReg0Value[eRFPath] &= 0xebf;
- rtl8192_setBBreg(
- dev,
- pPhyReg->rf3wireOffset,
- bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
+ bMaskDWord,
+ priv->RfReg0Value[eRFPath] << 16);
}
}
- //spin_unlock_irqrestore(&priv->rf_lock, flags);
-// up(&priv->rf_sem);
return;
}
/******************************************************************************
- *function: This function set specific bits to RF register
- * input: net_device dev
- * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
- * u32 RegAddr //target addr to be modified
- * u32 BitMask //taget bit pos in the addr to be modified
- * u32 Data //value to be write
- * output: none
- * return: none
- * notice:
- * ****************************************************************************/
-void rtl8192_phy_SetRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
+ * function: This function set specific bits to RF register
+ * input: net_device dev
+ * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D
+ * u32 reg_addr //target addr to be modified
+ * u32 bitmask //taget bit pos to be modified
+ * u32 data //value to be written
+ * output: none
+ * return: none
+ * notice:
+ *****************************************************************************/
+void rtl8192_phy_SetRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+ u32 reg_addr, u32 bitmask, u32 data)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u32 Original_Value, BitShift, New_Value;
-// u8 time = 0;
+ u32 reg, bitshift;
if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
return;
- if (priv->Rf_Mode == RF_OP_By_FW)
- {
- if (BitMask != bMask12Bits) // RF data is 12 bits only
- {
- Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr);
- BitShift = rtl8192_CalculateBitShift(BitMask);
- New_Value = ((Original_Value) & (~BitMask)) | (Data<< BitShift);
-
- phy_FwRFSerialWrite(dev, eRFPath, RegAddr, New_Value);
- }else
- phy_FwRFSerialWrite(dev, eRFPath, RegAddr, Data);
+ if (priv->Rf_Mode == RF_OP_By_FW) {
+ if (bitmask != bMask12Bits) {
+ /* RF data is 12 bits only */
+ reg = phy_FwRFSerialRead(dev, eRFPath, reg_addr);
+ bitshift = rtl8192_CalculateBitShift(bitmask);
+ reg &= ~bitmask;
+ reg |= data << bitshift;
+
+ phy_FwRFSerialWrite(dev, eRFPath, reg_addr, reg);
+ } else {
+ phy_FwRFSerialWrite(dev, eRFPath, reg_addr, data);
+ }
udelay(200);
- }
- else
- {
- if (BitMask != bMask12Bits) // RF data is 12 bits only
- {
- Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr);
- BitShift = rtl8192_CalculateBitShift(BitMask);
- New_Value = (((Original_Value) & (~BitMask)) | (Data<< BitShift));
-
- rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, New_Value);
- }else
- rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, Data);
+ } else {
+ if (bitmask != bMask12Bits) {
+ /* RF data is 12 bits only */
+ reg = rtl8192_phy_RFSerialRead(dev, eRFPath, reg_addr);
+ bitshift = rtl8192_CalculateBitShift(bitmask);
+ reg &= ~bitmask;
+ reg |= data << bitshift;
+
+ rtl8192_phy_RFSerialWrite(dev, eRFPath, reg_addr, reg);
+ } else {
+ rtl8192_phy_RFSerialWrite(dev, eRFPath, reg_addr, data);
+ }
}
return;
}
/******************************************************************************
- *function: This function reads specific bits from RF register
- * input: net_device dev
- * u32 RegAddr //target addr to be readback
- * u32 BitMask //taget bit pos in the addr to be readback
- * output: none
- * return: u32 Data //the readback register value
- * notice:
- * ****************************************************************************/
-u32 rtl8192_phy_QueryRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask)
+ * function: This function reads specific bits from RF register
+ * input: net_device *dev
+ * u32 reg_addr //target addr to be readback
+ * u32 bitmask //taget bit pos to be readback
+ * output: none
+ * return: u32 data //the readback register value
+ * notice:
+ *****************************************************************************/
+u32 rtl8192_phy_QueryRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+ u32 reg_addr, u32 bitmask)
{
- u32 Original_Value, Readback_Value, BitShift;
+ u32 reg, bitshift;
struct r8192_priv *priv = ieee80211_priv(dev);
if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
return 0;
- if (priv->Rf_Mode == RF_OP_By_FW)
- {
- Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr);
- BitShift = rtl8192_CalculateBitShift(BitMask);
- Readback_Value = (Original_Value & BitMask) >> BitShift;
+ if (priv->Rf_Mode == RF_OP_By_FW) {
+ reg = phy_FwRFSerialRead(dev, eRFPath, reg_addr);
+ bitshift = rtl8192_CalculateBitShift(bitmask);
+ reg = (reg & bitmask) >> bitshift;
udelay(200);
- return (Readback_Value);
- }
- else
- {
- Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr);
- BitShift = rtl8192_CalculateBitShift(BitMask);
- Readback_Value = (Original_Value & BitMask) >> BitShift;
- return (Readback_Value);
+ return reg;
+ } else {
+ reg = rtl8192_phy_RFSerialRead(dev, eRFPath, reg_addr);
+ bitshift = rtl8192_CalculateBitShift(bitmask);
+ reg = (reg & bitmask) >> bitshift;
+ return reg;
}
}
+
/******************************************************************************
- *function: We support firmware to execute RF-R/W.
- * input: dev
- * output: none
- * return: none
- * notice:
- * ***************************************************************************/
-static u32
-phy_FwRFSerialRead(
- struct net_device* dev,
- RF90_RADIO_PATH_E eRFPath,
- u32 Offset )
+ * function: We support firmware to execute RF-R/W.
+ * input: net_device *dev
+ * RF90_RADIO_PATH_E eRFPath
+ * u32 offset
+ * output: none
+ * return: u32
+ * notice:
+ ****************************************************************************/
+static u32 phy_FwRFSerialRead(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+ u32 offset)
{
- u32 retValue = 0;
- u32 Data = 0;
+ u32 reg = 0;
+ u32 data = 0;
u8 time = 0;
- //DbgPrint("FW RF CTRL\n\r");
- /* 2007/11/02 MH Firmware RF Write control. By Francis' suggestion, we can
- not execute the scheme in the initial step. Otherwise, RF-R/W will waste
- much time. This is only for site survey. */
- // 1. Read operation need not insert data. bit 0-11
- //Data &= bMask12Bits;
- // 2. Write RF register address. Bit 12-19
- Data |= ((Offset&0xFF)<<12);
- // 3. Write RF path. bit 20-21
- Data |= ((eRFPath&0x3)<<20);
- // 4. Set RF read indicator. bit 22=0
- //Data |= 0x00000;
- // 5. Trigger Fw to operate the command. bit 31
- Data |= 0x80000000;
- // 6. We can not execute read operation if bit 31 is 1.
- while (read_nic_dword(dev, QPNR)&0x80000000)
- {
- // If FW can not finish RF-R/W for more than ?? times. We must reset FW.
- if (time++ < 100)
- {
- //DbgPrint("FW not finish RF-R Time=%d\n\r", time);
+ u32 tmp;
+
+ /* Firmware RF Write control.
+ * We can not execute the scheme in the initial step.
+ * Otherwise, RF-R/W will waste much time.
+ * This is only for site survey. */
+ /* 1. Read operation need not insert data. bit 0-11 */
+ /* 2. Write RF register address. bit 12-19 */
+ data |= ((offset&0xFF)<<12);
+ /* 3. Write RF path. bit 20-21 */
+ data |= ((eRFPath&0x3)<<20);
+ /* 4. Set RF read indicator. bit 22=0 */
+ /* 5. Trigger Fw to operate the command. bit 31 */
+ data |= 0x80000000;
+ /* 6. We can not execute read operation if bit 31 is 1. */
+ read_nic_dword(dev, QPNR, &tmp);
+ while (tmp & 0x80000000) {
+ /* If FW can not finish RF-R/W for more than ?? times.
+ We must reset FW. */
+ if (time++ < 100) {
udelay(10);
- }
- else
+ read_nic_dword(dev, QPNR, &tmp);
+ } else {
break;
+ }
}
- // 7. Execute read operation.
- write_nic_dword(dev, QPNR, Data);
- // 8. Check if firmawre send back RF content.
- while (read_nic_dword(dev, QPNR)&0x80000000)
- {
- // If FW can not finish RF-R/W for more than ?? times. We must reset FW.
- if (time++ < 100)
- {
- //DbgPrint("FW not finish RF-W Time=%d\n\r", time);
+ /* 7. Execute read operation. */
+ write_nic_dword(dev, QPNR, data);
+ /* 8. Check if firmware send back RF content. */
+ read_nic_dword(dev, QPNR, &tmp);
+ while (tmp & 0x80000000) {
+ /* If FW can not finish RF-R/W for more than ?? times.
+ We must reset FW. */
+ if (time++ < 100) {
udelay(10);
+ read_nic_dword(dev, QPNR, &tmp);
+ } else {
+ return 0;
}
- else
- return (0);
}
- retValue = read_nic_dword(dev, RF_DATA);
-
- return (retValue);
+ read_nic_dword(dev, RF_DATA, &reg);
-} /* phy_FwRFSerialRead */
+ return reg;
+}
/******************************************************************************
- *function: We support firmware to execute RF-R/W.
- * input: dev
- * output: none
- * return: none
- * notice:
- * ***************************************************************************/
-static void
-phy_FwRFSerialWrite(
- struct net_device* dev,
- RF90_RADIO_PATH_E eRFPath,
- u32 Offset,
- u32 Data )
+ * function: We support firmware to execute RF-R/W.
+ * input: net_device *dev
+ * RF90_RADIO_PATH_E eRFPath
+ * u32 offset
+ * u32 data
+ * output: none
+ * return: none
+ * notice:
+ ****************************************************************************/
+static void phy_FwRFSerialWrite(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath, u32 offset, u32 data)
{
u8 time = 0;
-
- //DbgPrint("N FW RF CTRL RF-%d OF%02x DATA=%03x\n\r", eRFPath, Offset, Data);
- /* 2007/11/02 MH Firmware RF Write control. By Francis' suggestion, we can
- not execute the scheme in the initial step. Otherwise, RF-R/W will waste
- much time. This is only for site survey. */
-
- // 1. Set driver write bit and 12 bit data. bit 0-11
- //Data &= bMask12Bits; // Done by uper layer.
- // 2. Write RF register address. bit 12-19
- Data |= ((Offset&0xFF)<<12);
- // 3. Write RF path. bit 20-21
- Data |= ((eRFPath&0x3)<<20);
- // 4. Set RF write indicator. bit 22=1
- Data |= 0x400000;
- // 5. Trigger Fw to operate the command. bit 31=1
- Data |= 0x80000000;
-
- // 6. Write operation. We can not write if bit 31 is 1.
- while (read_nic_dword(dev, QPNR)&0x80000000)
- {
- // If FW can not finish RF-R/W for more than ?? times. We must reset FW.
- if (time++ < 100)
- {
- //DbgPrint("FW not finish RF-W Time=%d\n\r", time);
+ u32 tmp;
+
+ /* Firmware RF Write control.
+ * We can not execute the scheme in the initial step.
+ * Otherwise, RF-R/W will waste much time.
+ * This is only for site survey. */
+
+ /* 1. Set driver write bit and 12 bit data. bit 0-11 */
+ /* 2. Write RF register address. bit 12-19 */
+ data |= ((offset&0xFF)<<12);
+ /* 3. Write RF path. bit 20-21 */
+ data |= ((eRFPath&0x3)<<20);
+ /* 4. Set RF write indicator. bit 22=1 */
+ data |= 0x400000;
+ /* 5. Trigger Fw to operate the command. bit 31=1 */
+ data |= 0x80000000;
+
+ /* 6. Write operation. We can not write if bit 31 is 1. */
+ read_nic_dword(dev, QPNR, &tmp);
+ while (tmp & 0x80000000) {
+ /* If FW can not finish RF-R/W for more than ?? times.
+ We must reset FW. */
+ if (time++ < 100) {
udelay(10);
- }
- else
+ read_nic_dword(dev, QPNR, &tmp);
+ } else {
break;
+ }
}
- // 7. No matter check bit. We always force the write. Because FW will
- // not accept the command.
- write_nic_dword(dev, QPNR, Data);
- /* 2007/11/02 MH Acoording to test, we must delay 20us to wait firmware
+ /* 7. No matter check bit. We always force the write.
+ Because FW will not accept the command. */
+ write_nic_dword(dev, QPNR, data);
+ /* According to test, we must delay 20us to wait firmware
to finish RF write operation. */
- /* 2008/01/17 MH We support delay in firmware side now. */
- //delay_us(20);
-
-} /* phy_FwRFSerialWrite */
-
+ /* We support delay in firmware side now. */
+}
/******************************************************************************
- *function: This function read BB parameters from Header file we gen,
- * and do register read/write
- * input: dev
- * output: none
- * return: none
- * notice: BB parameters may change all the time, so please make
- * sure it has been synced with the newest.
- * ***************************************************************************/
-void rtl8192_phy_configmac(struct net_device* dev)
+ * function: This function reads BB parameters from header file we generate,
+ * and do register read/write
+ * input: net_device *dev
+ * output: none
+ * return: none
+ * notice: BB parameters may change all the time, so please make
+ * sure it has been synced with the newest.
+ *****************************************************************************/
+void rtl8192_phy_configmac(struct net_device *dev)
{
u32 dwArrayLen = 0, i;
- u32* pdwArray = NULL;
+ u32 *pdwArray = NULL;
struct r8192_priv *priv = ieee80211_priv(dev);
- if(priv->btxpowerdata_readfromEEPORM)
- {
+ if (priv->btxpowerdata_readfromEEPORM) {
RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n");
dwArrayLen = MACPHY_Array_PGLength;
pdwArray = rtl819XMACPHY_Array_PG;
- }
- else
- {
+ } else {
RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array\n");
dwArrayLen = MACPHY_ArrayLength;
pdwArray = rtl819XMACPHY_Array;
}
- for(i = 0; i<dwArrayLen; i=i+3){
- if(pdwArray[i] == 0x318)
- {
+ for (i = 0; i < dwArrayLen; i = i+3) {
+ if (pdwArray[i] == 0x318) {
pdwArray[i+2] = 0x00000800;
- //DbgPrint("ptrArray[i], ptrArray[i+1], ptrArray[i+2] = %x, %x, %x\n",
- // ptrArray[i], ptrArray[i+1], ptrArray[i+2]);
}
- RT_TRACE(COMP_DBG, "The Rtl8190MACPHY_Array[0] is %x Rtl8190MACPHY_Array[1] is %x Rtl8190MACPHY_Array[2] is %x\n",
- pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
- rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
+ RT_TRACE(COMP_DBG,
+ "Rtl8190MACPHY_Array[0]=%x Rtl8190MACPHY_Array[1]=%x Rtl8190MACPHY_Array[2]=%x\n",
+ pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
+ rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1],
+ pdwArray[i+2]);
}
return;
-
}
/******************************************************************************
- *function: This function does dirty work
- * input: dev
- * output: none
- * return: none
- * notice: BB parameters may change all the time, so please make
- * sure it has been synced with the newest.
- * ***************************************************************************/
-
-void rtl8192_phyConfigBB(struct net_device* dev, u8 ConfigType)
+ * function: This function does dirty work
+ * input: net_device *dev
+ * u8 ConfigType
+ * output: none
+ * return: none
+ * notice: BB parameters may change all the time, so please make
+ * sure it has been synced with the newest.
+ *****************************************************************************/
+void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
{
u32 i;
#ifdef TO_DO_LIST
u32 *rtl8192PhyRegArrayTable = NULL, *rtl8192AgcTabArrayTable = NULL;
- if(Adapter->bInHctTest)
- {
+
+ if (Adapter->bInHctTest) {
PHY_REGArrayLen = PHY_REGArrayLengthDTM;
AGCTAB_ArrayLen = AGCTAB_ArrayLengthDTM;
Rtl8190PHY_REGArray_Table = Rtl819XPHY_REGArrayDTM;
Rtl8190AGCTAB_Array_Table = Rtl819XAGCTAB_ArrayDTM;
}
#endif
- if (ConfigType == BaseBand_Config_PHY_REG)
- {
- for (i=0; i<PHY_REG_1T2RArrayLength; i+=2)
- {
- rtl8192_setBBreg(dev, rtl819XPHY_REG_1T2RArray[i], bMaskDWord, rtl819XPHY_REG_1T2RArray[i+1]);
- RT_TRACE(COMP_DBG, "i: %x, The Rtl819xUsbPHY_REGArray[0] is %x Rtl819xUsbPHY_REGArray[1] is %x \n",i, rtl819XPHY_REG_1T2RArray[i], rtl819XPHY_REG_1T2RArray[i+1]);
+ if (ConfigType == BaseBand_Config_PHY_REG) {
+ for (i = 0; i < PHY_REG_1T2RArrayLength; i += 2) {
+ rtl8192_setBBreg(dev, rtl819XPHY_REG_1T2RArray[i],
+ bMaskDWord,
+ rtl819XPHY_REG_1T2RArray[i+1]);
+ RT_TRACE(COMP_DBG,
+ "i: %x, Rtl819xUsbPHY_REGArray[0]=%x Rtl819xUsbPHY_REGArray[1]=%x\n",
+ i, rtl819XPHY_REG_1T2RArray[i],
+ rtl819XPHY_REG_1T2RArray[i+1]);
}
- }
- else if (ConfigType == BaseBand_Config_AGC_TAB)
- {
- for (i=0; i<AGCTAB_ArrayLength; i+=2)
- {
- rtl8192_setBBreg(dev, rtl819XAGCTAB_Array[i], bMaskDWord, rtl819XAGCTAB_Array[i+1]);
- RT_TRACE(COMP_DBG, "i:%x, The rtl819XAGCTAB_Array[0] is %x rtl819XAGCTAB_Array[1] is %x \n",i, rtl819XAGCTAB_Array[i], rtl819XAGCTAB_Array[i+1]);
+ } else if (ConfigType == BaseBand_Config_AGC_TAB) {
+ for (i = 0; i < AGCTAB_ArrayLength; i += 2) {
+ rtl8192_setBBreg(dev, rtl819XAGCTAB_Array[i],
+ bMaskDWord, rtl819XAGCTAB_Array[i+1]);
+ RT_TRACE(COMP_DBG,
+ "i: %x, rtl819XAGCTAB_Array[0]=%x rtl819XAGCTAB_Array[1]=%x\n",
+ i, rtl819XAGCTAB_Array[i],
+ rtl819XAGCTAB_Array[i+1]);
}
}
return;
-
-
}
+
/******************************************************************************
- *function: This function initialize Register definition offset for Radio Path
- * A/B/C/D
- * input: net_device dev
- * output: none
- * return: none
- * notice: Initialization value here is constant and it should never be changed
- * ***************************************************************************/
-void rtl8192_InitBBRFRegDef(struct net_device* dev)
+ * function: This function initializes Register definition offset for
+ * Radio Path A/B/C/D
+ * input: net_device *dev
+ * output: none
+ * return: none
+ * notice: Initialization value here is constant and it should never
+ * be changed
+ *****************************************************************************/
+void rtl8192_InitBBRFRegDef(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
-// RF Interface Software Control
- priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; // 16 LSBs if read 32-bit from 0x870
- priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; // 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872)
- priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;// 16 LSBs if read 32-bit from 0x874
- priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;// 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876)
-
- // RF Interface Readback Value
- priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; // 16 LSBs if read 32-bit from 0x8E0
- priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;// 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2)
- priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;// 16 LSBs if read 32-bit from 0x8E4
- priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;// 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6)
-
- // RF Interface Output (and Enable)
- priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; // 16 LSBs if read 32-bit from 0x860
- priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE; // 16 LSBs if read 32-bit from 0x864
- priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE;// 16 LSBs if read 32-bit from 0x868
- priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE;// 16 LSBs if read 32-bit from 0x86C
-
- // RF Interface (Output and) Enable
- priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE; // 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862)
- priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE; // 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866)
- priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE;// 16 MSBs if read 32-bit from 0x86A (16-bit for 0x86A)
- priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE;// 16 MSBs if read 32-bit from 0x86C (16-bit for 0x86E)
-
- //Addr of LSSI. Write RF register by driver
- priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter; //LSSI Parameter
+
+ /* RF Interface Software Control */
+ /* 16 LSBs if read 32-bit from 0x870 */
+ priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW;
+ /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
+ priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW;
+ /* 16 LSBs if read 32-bit from 0x874 */
+ priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;
+ /* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
+ priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;
+
+ /* RF Interface Readback Value */
+ /* 16 LSBs if read 32-bit from 0x8E0 */
+ priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB;
+ /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
+ priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;
+ /* 16 LSBs if read 32-bit from 0x8E4 */
+ priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;
+ /* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
+ priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;
+
+ /* RF Interface Output (and Enable) */
+ /* 16 LSBs if read 32-bit from 0x860 */
+ priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE;
+ /* 16 LSBs if read 32-bit from 0x864 */
+ priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE;
+ /* 16 LSBs if read 32-bit from 0x868 */
+ priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE;
+ /* 16 LSBs if read 32-bit from 0x86C */
+ priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE;
+
+ /* RF Interface (Output and) Enable */
+ /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
+ priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE;
+ /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
+ priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE;
+ /* 16 MSBs if read 32-bit from 0x86A (16-bit for 0x86A) */
+ priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE;
+ /* 16 MSBs if read 32-bit from 0x86C (16-bit for 0x86E) */
+ priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE;
+
+ /* Addr of LSSI. Write RF register by driver */
+ priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter;
priv->PHYRegDef[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
priv->PHYRegDef[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter;
priv->PHYRegDef[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter;
- // RF parameter
- priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; //BB Band Select
+ /* RF parameter */
+ /* BB Band Select */
+ priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter;
priv->PHYRegDef[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
priv->PHYRegDef[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
priv->PHYRegDef[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
- // Tx AGC Gain Stage (same for all path. Should we remove this?)
- priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage
- priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage
- priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage
- priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage
-
- // Tranceiver A~D HSSI Parameter-1
- priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; //wire control parameter1
- priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1; //wire control parameter1
- priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1; //wire control parameter1
- priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1; //wire control parameter1
-
- // Tranceiver A~D HSSI Parameter-2
- priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; //wire control parameter2
- priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; //wire control parameter2
- priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2; //wire control parameter2
- priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2; //wire control parameter1
-
- // RF switch Control
- priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; //TR/Ant switch control
+ /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
+ priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage;
+ priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage;
+ priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage;
+ priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage;
+
+ /* Tranceiver A~D HSSI Parameter-1 */
+ /* wire control parameter1 */
+ priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;
+ priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1;
+ priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1;
+ priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1;
+
+ /* Tranceiver A~D HSSI Parameter-2 */
+ /* wire control parameter2 */
+ priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2;
+ priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2;
+ priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2;
+ priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2;
+
+ /* RF Switch Control */
+ /* TR/Ant switch control */
+ priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl;
priv->PHYRegDef[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
priv->PHYRegDef[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
priv->PHYRegDef[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
- // AGC control 1
+ /* AGC control 1 */
priv->PHYRegDef[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
priv->PHYRegDef[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
priv->PHYRegDef[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
priv->PHYRegDef[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
- // AGC control 2
+ /* AGC control 2 */
priv->PHYRegDef[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
priv->PHYRegDef[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
priv->PHYRegDef[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
priv->PHYRegDef[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
- // RX AFE control 1
+ /* RX AFE control 1 */
priv->PHYRegDef[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
priv->PHYRegDef[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
priv->PHYRegDef[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
priv->PHYRegDef[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
- // RX AFE control 1
+ /* RX AFE control 1 */
priv->PHYRegDef[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
priv->PHYRegDef[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
priv->PHYRegDef[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
priv->PHYRegDef[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
- // Tx AFE control 1
+ /* Tx AFE control 1 */
priv->PHYRegDef[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
priv->PHYRegDef[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
priv->PHYRegDef[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
priv->PHYRegDef[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
- // Tx AFE control 2
+ /* Tx AFE control 2 */
priv->PHYRegDef[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
priv->PHYRegDef[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
priv->PHYRegDef[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
priv->PHYRegDef[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
- // Tranceiver LSSI Readback
+ /* Tranceiver LSSI Readback */
priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
priv->PHYRegDef[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
priv->PHYRegDef[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
-
}
+
/******************************************************************************
- *function: This function is to write register and then readback to make sure whether BB and RF is OK
- * input: net_device dev
- * HW90_BLOCK_E CheckBlock
- * RF90_RADIO_PATH_E eRFPath //only used when checkblock is HW90_BLOCK_RF
- * output: none
- * return: return whether BB and RF is ok(0:OK; 1:Fail)
- * notice: This function may be removed in the ASIC
- * ***************************************************************************/
-u8 rtl8192_phy_checkBBAndRF(struct net_device* dev, HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath)
+ * function: This function is to write register and then readback to make
+ * sure whether BB and RF is OK
+ * input: net_device *dev
+ * HW90_BLOCK_E CheckBlock
+ * RF90_RADIO_PATH_E eRFPath //only used when checkblock is
+ * //HW90_BLOCK_RF
+ * output: none
+ * return: return whether BB and RF is ok (0:OK, 1:Fail)
+ * notice: This function may be removed in the ASIC
+ ******************************************************************************/
+u8 rtl8192_phy_checkBBAndRF(struct net_device *dev, HW90_BLOCK_E CheckBlock,
+ RF90_RADIO_PATH_E eRFPath)
{
-// struct r8192_priv *priv = ieee80211_priv(dev);
-// BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath];
u8 ret = 0;
- u32 i, CheckTimes = 4, dwRegRead = 0;
+ u32 i, CheckTimes = 4, reg = 0;
u32 WriteAddr[4];
u32 WriteData[] = {0xfffff027, 0xaa55a02f, 0x00000027, 0x55aa502f};
- // Initialize register address offset to be checked
+
+ /* Initialize register address offset to be checked */
WriteAddr[HW90_BLOCK_MAC] = 0x100;
WriteAddr[HW90_BLOCK_PHY0] = 0x900;
WriteAddr[HW90_BLOCK_PHY1] = 0x800;
WriteAddr[HW90_BLOCK_RF] = 0x3;
- RT_TRACE(COMP_PHY, "=======>%s(), CheckBlock:%d\n", __FUNCTION__, CheckBlock);
- for(i=0 ; i < CheckTimes ; i++)
- {
-
- //
- // Write Data to register and readback
- //
- switch(CheckBlock)
- {
+ RT_TRACE(COMP_PHY, "%s(), CheckBlock: %d\n", __func__, CheckBlock);
+ for (i = 0; i < CheckTimes; i++) {
+
+ /* Write data to register and readback */
+ switch (CheckBlock) {
case HW90_BLOCK_MAC:
- RT_TRACE(COMP_ERR, "PHY_CheckBBRFOK(): Never Write 0x100 here!");
+ RT_TRACE(COMP_ERR,
+ "PHY_CheckBBRFOK(): Never Write 0x100 here!\n");
break;
case HW90_BLOCK_PHY0:
case HW90_BLOCK_PHY1:
- write_nic_dword(dev, WriteAddr[CheckBlock], WriteData[i]);
- dwRegRead = read_nic_dword(dev, WriteAddr[CheckBlock]);
+ write_nic_dword(dev, WriteAddr[CheckBlock],
+ WriteData[i]);
+ read_nic_dword(dev, WriteAddr[CheckBlock], &reg);
break;
case HW90_BLOCK_RF:
WriteData[i] &= 0xfff;
- rtl8192_phy_SetRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMask12Bits, WriteData[i]);
- // TODO: we should not delay for such a long time. Ask SD3
- msleep(1);
- dwRegRead = rtl8192_phy_QueryRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMask12Bits);
- msleep(1);
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ WriteAddr[HW90_BLOCK_RF],
+ bMask12Bits, WriteData[i]);
+ /* TODO: we should not delay for such a long time.
+ Ask SD3 */
+ usleep_range(1000, 1000);
+ reg = rtl8192_phy_QueryRFReg(dev, eRFPath,
+ WriteAddr[HW90_BLOCK_RF],
+ bMask12Bits);
+ usleep_range(1000, 1000);
break;
default:
@@ -737,12 +759,11 @@ u8 rtl8192_phy_checkBBAndRF(struct net_device* dev, HW90_BLOCK_E CheckBlock, RF9
}
- //
- // Check whether readback data is correct
- //
- if(dwRegRead != WriteData[i])
- {
- RT_TRACE((COMP_PHY|COMP_ERR), "====>error=====dwRegRead: %x, WriteData: %x \n", dwRegRead, WriteData[i]);
+ /* Check whether readback data is correct */
+ if (reg != WriteData[i]) {
+ RT_TRACE((COMP_PHY|COMP_ERR),
+ "error reg: %x, WriteData: %x\n",
+ reg, WriteData[i]);
ret = 1;
break;
}
@@ -751,179 +772,193 @@ u8 rtl8192_phy_checkBBAndRF(struct net_device* dev, HW90_BLOCK_E CheckBlock, RF9
return ret;
}
-
/******************************************************************************
- *function: This function initialize BB&RF
- * input: net_device dev
- * output: none
- * return: none
- * notice: Initialization value may change all the time, so please make
- * sure it has been synced with the newest.
- * ***************************************************************************/
-void rtl8192_BB_Config_ParaFile(struct net_device* dev)
+ * function: This function initializes BB&RF
+ * input: net_device *dev
+ * output: none
+ * return: none
+ * notice: Initialization value may change all the time, so please make
+ * sure it has been synced with the newest.
+ ******************************************************************************/
+void rtl8192_BB_Config_ParaFile(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- u8 bRegValue = 0, eCheckItem = 0, rtStatus = 0;
- u32 dwRegValue = 0;
+ u8 reg_u8 = 0, eCheckItem = 0, status = 0;
+ u32 reg_u32 = 0;
+
/**************************************
- //<1>Initialize BaseBand
- **************************************/
+ * <1> Initialize BaseBand
+ *************************************/
- /*--set BB Global Reset--*/
- bRegValue = read_nic_byte(dev, BB_GLOBAL_RESET);
- write_nic_byte(dev, BB_GLOBAL_RESET,(bRegValue|BB_GLOBAL_RESET_BIT));
+ /* --set BB Global Reset-- */
+ read_nic_byte(dev, BB_GLOBAL_RESET, &reg_u8);
+ write_nic_byte(dev, BB_GLOBAL_RESET, (reg_u8|BB_GLOBAL_RESET_BIT));
mdelay(50);
- /*---set BB reset Active---*/
- dwRegValue = read_nic_dword(dev, CPU_GEN);
- write_nic_dword(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST)));
-
- /*----Ckeck FPGAPHY0 and PHY1 board is OK----*/
- // TODO: this function should be removed on ASIC , Emily 2007.2.2
- for(eCheckItem=(HW90_BLOCK_E)HW90_BLOCK_PHY0; eCheckItem<=HW90_BLOCK_PHY1; eCheckItem++)
- {
- rtStatus = rtl8192_phy_checkBBAndRF(dev, (HW90_BLOCK_E)eCheckItem, (RF90_RADIO_PATH_E)0); //don't care RF path
- if(rtStatus != 0)
- {
- RT_TRACE((COMP_ERR | COMP_PHY), "PHY_RF8256_Config():Check PHY%d Fail!!\n", eCheckItem-1);
- return ;
+ /* ---set BB reset Active--- */
+ read_nic_dword(dev, CPU_GEN, &reg_u32);
+ write_nic_dword(dev, CPU_GEN, (reg_u32&(~CPU_GEN_BB_RST)));
+
+ /* ----Ckeck FPGAPHY0 and PHY1 board is OK---- */
+ /* TODO: this function should be removed on ASIC */
+ for (eCheckItem = (HW90_BLOCK_E)HW90_BLOCK_PHY0;
+ eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) {
+ /* don't care RF path */
+ status = rtl8192_phy_checkBBAndRF(dev, (HW90_BLOCK_E)eCheckItem,
+ (RF90_RADIO_PATH_E)0);
+ if (status != 0) {
+ RT_TRACE((COMP_ERR | COMP_PHY),
+ "PHY_RF8256_Config(): Check PHY%d Fail!!\n",
+ eCheckItem-1);
+ return;
}
}
- /*---- Set CCK and OFDM Block "OFF"----*/
+ /* ---- Set CCK and OFDM Block "OFF"---- */
rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
- /*----BB Register Initilazation----*/
- //==m==>Set PHY REG From Header<==m==
+ /* ----BB Register Initilazation---- */
+ /* ==m==>Set PHY REG From Header<==m== */
rtl8192_phyConfigBB(dev, BaseBand_Config_PHY_REG);
- /*----Set BB reset de-Active----*/
- dwRegValue = read_nic_dword(dev, CPU_GEN);
- write_nic_dword(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST));
+ /* ----Set BB reset de-Active---- */
+ read_nic_dword(dev, CPU_GEN, &reg_u32);
+ write_nic_dword(dev, CPU_GEN, (reg_u32|CPU_GEN_BB_RST));
- /*----BB AGC table Initialization----*/
- //==m==>Set PHY REG From Header<==m==
+ /* ----BB AGC table Initialization---- */
+ /* ==m==>Set PHY REG From Header<==m== */
rtl8192_phyConfigBB(dev, BaseBand_Config_AGC_TAB);
- /*----Enable XSTAL ----*/
+ /* ----Enable XSTAL ---- */
write_nic_byte_E(dev, 0x5e, 0x00);
- if (priv->card_8192_version == (u8)VERSION_819xU_A)
- {
- //Antenna gain offset from B/C/D to A
- dwRegValue = (priv->AntennaTxPwDiff[1]<<4 | priv->AntennaTxPwDiff[0]);
- rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC), dwRegValue);
-
- //XSTALLCap
- dwRegValue = priv->CrystalCap & 0xf;
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap, dwRegValue);
+ if (priv->card_8192_version == (u8)VERSION_819xU_A) {
+ /* Antenna gain offset from B/C/D to A */
+ reg_u32 = (priv->AntennaTxPwDiff[1]<<4 |
+ priv->AntennaTxPwDiff[0]);
+ rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC),
+ reg_u32);
+
+ /* XSTALLCap */
+ reg_u32 = priv->CrystalCap & 0xf;
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap,
+ reg_u32);
}
- // Check if the CCK HighPower is turned ON.
- // This is used to calculate PWDB.
- priv->bCckHighPower = (u8)(rtl8192_QueryBBReg(dev, rFPGA0_XA_HSSIParameter2, 0x200));
+ /* Check if the CCK HighPower is turned ON.
+ This is used to calculate PWDB. */
+ priv->bCckHighPower = (u8)rtl8192_QueryBBReg(dev,
+ rFPGA0_XA_HSSIParameter2,
+ 0x200);
return;
}
+
/******************************************************************************
- *function: This function initialize BB&RF
- * input: net_device dev
- * output: none
- * return: none
- * notice: Initialization value may change all the time, so please make
- * sure it has been synced with the newest.
- * ***************************************************************************/
-void rtl8192_BBConfig(struct net_device* dev)
+ * function: This function initializes BB&RF
+ * input: net_device *dev
+ * output: none
+ * return: none
+ * notice: Initialization value may change all the time, so please make
+ * sure it has been synced with the newest.
+ *****************************************************************************/
+void rtl8192_BBConfig(struct net_device *dev)
{
rtl8192_InitBBRFRegDef(dev);
- //config BB&RF. As hardCode based initialization has not been well
- //implemented, so use file first.FIXME:should implement it for hardcode?
+ /* config BB&RF. As hardCode based initialization has not been well
+ * implemented, so use file first.
+ * FIXME: should implement it for hardcode? */
rtl8192_BB_Config_ParaFile(dev);
return;
}
+
/******************************************************************************
- *function: This function obtains the initialization value of Tx power Level offset
- * input: net_device dev
- * output: none
- * return: none
- * ***************************************************************************/
-void rtl8192_phy_getTxPower(struct net_device* dev)
+ * function: This function obtains the initialization value of Tx power Level
+ * offset
+ * input: net_device *dev
+ * output: none
+ * return: none
+ *****************************************************************************/
+void rtl8192_phy_getTxPower(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- priv->MCSTxPowerLevelOriginalOffset[0] =
- read_nic_dword(dev, rTxAGC_Rate18_06);
- priv->MCSTxPowerLevelOriginalOffset[1] =
- read_nic_dword(dev, rTxAGC_Rate54_24);
- priv->MCSTxPowerLevelOriginalOffset[2] =
- read_nic_dword(dev, rTxAGC_Mcs03_Mcs00);
- priv->MCSTxPowerLevelOriginalOffset[3] =
- read_nic_dword(dev, rTxAGC_Mcs07_Mcs04);
- priv->MCSTxPowerLevelOriginalOffset[4] =
- read_nic_dword(dev, rTxAGC_Mcs11_Mcs08);
- priv->MCSTxPowerLevelOriginalOffset[5] =
- read_nic_dword(dev, rTxAGC_Mcs15_Mcs12);
-
- // read rx initial gain
- priv->DefaultInitialGain[0] = read_nic_byte(dev, rOFDM0_XAAGCCore1);
- priv->DefaultInitialGain[1] = read_nic_byte(dev, rOFDM0_XBAGCCore1);
- priv->DefaultInitialGain[2] = read_nic_byte(dev, rOFDM0_XCAGCCore1);
- priv->DefaultInitialGain[3] = read_nic_byte(dev, rOFDM0_XDAGCCore1);
- RT_TRACE(COMP_INIT, "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x) \n",
- priv->DefaultInitialGain[0], priv->DefaultInitialGain[1],
- priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]);
-
- // read framesync
- priv->framesync = read_nic_byte(dev, rOFDM0_RxDetector3);
- priv->framesyncC34 = read_nic_byte(dev, rOFDM0_RxDetector2);
+ u8 tmp;
+
+ read_nic_dword(dev, rTxAGC_Rate18_06,
+ &priv->MCSTxPowerLevelOriginalOffset[0]);
+ read_nic_dword(dev, rTxAGC_Rate54_24,
+ &priv->MCSTxPowerLevelOriginalOffset[1]);
+ read_nic_dword(dev, rTxAGC_Mcs03_Mcs00,
+ &priv->MCSTxPowerLevelOriginalOffset[2]);
+ read_nic_dword(dev, rTxAGC_Mcs07_Mcs04,
+ &priv->MCSTxPowerLevelOriginalOffset[3]);
+ read_nic_dword(dev, rTxAGC_Mcs11_Mcs08,
+ &priv->MCSTxPowerLevelOriginalOffset[4]);
+ read_nic_dword(dev, rTxAGC_Mcs15_Mcs12,
+ &priv->MCSTxPowerLevelOriginalOffset[5]);
+
+ /* Read rx initial gain */
+ read_nic_byte(dev, rOFDM0_XAAGCCore1, &priv->DefaultInitialGain[0]);
+ read_nic_byte(dev, rOFDM0_XBAGCCore1, &priv->DefaultInitialGain[1]);
+ read_nic_byte(dev, rOFDM0_XCAGCCore1, &priv->DefaultInitialGain[2]);
+ read_nic_byte(dev, rOFDM0_XDAGCCore1, &priv->DefaultInitialGain[3]);
+ RT_TRACE(COMP_INIT,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
+ priv->DefaultInitialGain[0], priv->DefaultInitialGain[1],
+ priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]);
+
+ /* Read framesync */
+ read_nic_byte(dev, rOFDM0_RxDetector3, &priv->framesync);
+ read_nic_byte(dev, rOFDM0_RxDetector2, &tmp);
+ priv->framesyncC34 = tmp;
RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x \n",
rOFDM0_RxDetector3, priv->framesync);
- // read SIFS (save the value read fome MACPHY_REG.txt)
- priv->SifsTime = read_nic_word(dev, SIFS);
+ /* Read SIFS (save the value read fome MACPHY_REG.txt) */
+ read_nic_word(dev, SIFS, &priv->SifsTime);
return;
}
/******************************************************************************
- *function: This function obtains the initialization value of Tx power Level offset
- * input: net_device dev
- * output: none
- * return: none
- * ***************************************************************************/
-void rtl8192_phy_setTxPower(struct net_device* dev, u8 channel)
+ * function: This function sets the initialization value of Tx power Level
+ * offset
+ * input: net_device *dev
+ * u8 channel
+ * output: none
+ * return: none
+ ******************************************************************************/
+void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 powerlevel = priv->TxPowerLevelCCK[channel-1];
u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
- switch(priv->rf_chip)
- {
+ switch (priv->rf_chip) {
case RF_8256:
- PHY_SetRF8256CCKTxPower(dev, powerlevel); //need further implement
+ /* need further implement */
+ PHY_SetRF8256CCKTxPower(dev, powerlevel);
PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G);
break;
default:
-// case RF_8225:
-// case RF_8258:
- RT_TRACE((COMP_PHY|COMP_ERR), "error RF chipID(8225 or 8258) in function %s()\n", __FUNCTION__);
+ RT_TRACE((COMP_PHY|COMP_ERR),
+ "error RF chipID(8225 or 8258) in function %s()\n",
+ __func__);
break;
}
return;
}
/******************************************************************************
- *function: This function check Rf chip to do RF config
- * input: net_device dev
- * output: none
- * return: only 8256 is supported
- * ***************************************************************************/
-void rtl8192_phy_RFConfig(struct net_device* dev)
+ * function: This function checks Rf chip to do RF config
+ * input: net_device *dev
+ * output: none
+ * return: only 8256 is supported
+ ******************************************************************************/
+void rtl8192_phy_RFConfig(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- switch(priv->rf_chip)
- {
+ switch (priv->rf_chip) {
case RF_8256:
PHY_RF8256_Config(dev);
break;
- // case RF_8225:
- // case RF_8258:
default:
RT_TRACE(COMP_ERR, "error chip id\n");
break;
@@ -932,75 +967,89 @@ void rtl8192_phy_RFConfig(struct net_device* dev)
}
/******************************************************************************
- *function: This function update Initial gain
- * input: net_device dev
- * output: none
- * return: As Windows has not implemented this, wait for complement
- * ***************************************************************************/
-void rtl8192_phy_updateInitGain(struct net_device* dev)
+ * function: This function updates Initial gain
+ * input: net_device *dev
+ * output: none
+ * return: As Windows has not implemented this, wait for complement
+ ******************************************************************************/
+void rtl8192_phy_updateInitGain(struct net_device *dev)
{
return;
}
/******************************************************************************
- *function: This function read RF parameters from general head file, and do RF 3-wire
- * input: net_device dev
- * output: none
- * return: return code show if RF configuration is successful(0:pass, 1:fail)
- * Note: Delay may be required for RF configuration
- * ***************************************************************************/
-u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E eRFPath)
+ * function: This function read RF parameters from general head file,
+ * and do RF 3-wire
+ * input: net_device *dev
+ * RF90_RADIO_PATH_E eRFPath
+ * output: none
+ * return: return code show if RF configuration is successful(0:pass, 1:fail)
+ * notice: Delay may be required for RF configuration
+ *****************************************************************************/
+u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath)
{
int i;
- //u32* pRFArray;
u8 ret = 0;
- switch(eRFPath){
+ switch (eRFPath) {
case RF90_PATH_A:
- for(i = 0;i<RadioA_ArrayLength; i=i+2){
+ for (i = 0; i < RadioA_ArrayLength; i = i+2) {
- if(rtl819XRadioA_Array[i] == 0xfe){
- mdelay(100);
- continue;
+ if (rtl819XRadioA_Array[i] == 0xfe) {
+ mdelay(100);
+ continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioA_Array[i], bMask12Bits, rtl819XRadioA_Array[i+1]);
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ rtl819XRadioA_Array[i],
+ bMask12Bits,
+ rtl819XRadioA_Array[i+1]);
mdelay(1);
}
break;
case RF90_PATH_B:
- for(i = 0;i<RadioB_ArrayLength; i=i+2){
+ for (i = 0; i < RadioB_ArrayLength; i = i+2) {
- if(rtl819XRadioB_Array[i] == 0xfe){
- mdelay(100);
- continue;
+ if (rtl819XRadioB_Array[i] == 0xfe) {
+ mdelay(100);
+ continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioB_Array[i], bMask12Bits, rtl819XRadioB_Array[i+1]);
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ rtl819XRadioB_Array[i],
+ bMask12Bits,
+ rtl819XRadioB_Array[i+1]);
mdelay(1);
}
break;
case RF90_PATH_C:
- for(i = 0;i<RadioC_ArrayLength; i=i+2){
+ for (i = 0; i < RadioC_ArrayLength; i = i+2) {
- if(rtl819XRadioC_Array[i] == 0xfe){
- mdelay(100);
- continue;
+ if (rtl819XRadioC_Array[i] == 0xfe) {
+ mdelay(100);
+ continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioC_Array[i], bMask12Bits, rtl819XRadioC_Array[i+1]);
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ rtl819XRadioC_Array[i],
+ bMask12Bits,
+ rtl819XRadioC_Array[i+1]);
mdelay(1);
}
break;
case RF90_PATH_D:
- for(i = 0;i<RadioD_ArrayLength; i=i+2){
+ for (i = 0; i < RadioD_ArrayLength; i = i+2) {
- if(rtl819XRadioD_Array[i] == 0xfe){
- mdelay(100);
- continue;
+ if (rtl819XRadioD_Array[i] == 0xfe) {
+ mdelay(100);
+ continue;
}
- rtl8192_phy_SetRFReg(dev, eRFPath, rtl819XRadioD_Array[i], bMask12Bits, rtl819XRadioD_Array[i+1]);
+ rtl8192_phy_SetRFReg(dev, eRFPath,
+ rtl819XRadioD_Array[i],
+ bMask12Bits,
+ rtl819XRadioD_Array[i+1]);
mdelay(1);
}
@@ -1012,22 +1061,22 @@ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E
return ret;
}
+
/******************************************************************************
- *function: This function set Tx Power of the channel
- * input: struct net_device *dev
- * u8 channel
- * output: none
- * return: none
- * Note:
- * ***************************************************************************/
+ * function: This function sets Tx Power of the channel
+ * input: net_device *dev
+ * u8 channel
+ * output: none
+ * return: none
+ * notice:
+ ******************************************************************************/
void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 powerlevel = priv->TxPowerLevelCCK[channel-1];
u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
- switch(priv->rf_chip)
- {
+ switch (priv->rf_chip) {
case RF_8225:
#ifdef TO_DO_LIST
PHY_SetRF8225CckTxPower(Adapter, powerlevel);
@@ -1043,136 +1092,132 @@ void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel)
case RF_8258:
break;
default:
- RT_TRACE(COMP_ERR, "unknown rf chip ID in rtl8192_SetTxPowerLevel()\n");
+ RT_TRACE(COMP_ERR, "unknown rf chip ID in %s()\n", __func__);
break;
}
return;
}
/******************************************************************************
- *function: This function set RF state on or off
- * input: struct net_device *dev
- * RT_RF_POWER_STATE eRFPowerState //Power State to set
- * output: none
- * return: none
- * Note:
- * ***************************************************************************/
-bool rtl8192_SetRFPowerState(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState)
+ * function: This function sets RF state on or off
+ * input: net_device *dev
+ * RT_RF_POWER_STATE eRFPowerState //Power State to set
+ * output: none
+ * return: none
+ * notice:
+ *****************************************************************************/
+bool rtl8192_SetRFPowerState(struct net_device *dev,
+ RT_RF_POWER_STATE eRFPowerState)
{
bool bResult = true;
-// u8 eRFPath;
struct r8192_priv *priv = ieee80211_priv(dev);
- if(eRFPowerState == priv->ieee80211->eRFPowerState)
+ if (eRFPowerState == priv->ieee80211->eRFPowerState)
return false;
- if(priv->SetRFPowerStateInProgress == true)
+ if (priv->SetRFPowerStateInProgress == true)
return false;
priv->SetRFPowerStateInProgress = true;
- switch(priv->rf_chip)
- {
- case RF_8256:
- switch( eRFPowerState )
- {
- case eRfOn:
- //RF-A, RF-B
- //enable RF-Chip A/B
- rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x1); // 0x860[4]
- //analog to digital on
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);// 0x88c[9:8]
- //digital to analog on
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x3); // 0x880[4:3]
- //rx antenna on
- rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x3, 0x3);// 0xc04[1:0]
- //rx antenna on
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x3, 0x3);// 0xd04[1:0]
- //analog to digital part2 on
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x3); // 0x880[6:5]
+ switch (priv->rf_chip) {
+ case RF_8256:
+ switch (eRFPowerState) {
+ case eRfOn:
+ /* RF-A, RF-B */
+ /* enable RF-Chip A/B - 0x860[4] */
+ rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4,
+ 0x1);
+ /* analog to digital on - 0x88c[9:8] */
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300,
+ 0x3);
+ /* digital to analog on - 0x880[4:3] */
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18,
+ 0x3);
+ /* rx antenna on - 0xc04[1:0] */
+ rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x3, 0x3);
+ /* rx antenna on - 0xd04[1:0] */
+ rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x3, 0x3);
+ /* analog to digital part2 on - 0x880[6:5] */
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60,
+ 0x3);
- break;
+ break;
- case eRfSleep:
+ case eRfSleep:
- break;
-
- case eRfOff:
- //RF-A, RF-B
- //disable RF-Chip A/B
- rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0); // 0x860[4]
- //analog to digital off, for power save
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);// 0x88c[11:8]
- //digital to analog off, for power save
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0); // 0x880[4:3]
- //rx antenna off
- rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);// 0xc04[3:0]
- //rx antenna off
- rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);// 0xd04[3:0]
- //analog to digital part2 off, for power save
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0); // 0x880[6:5]
+ break;
- break;
+ case eRfOff:
+ /* RF-A, RF-B */
+ /* disable RF-Chip A/B - 0x860[4] */
+ rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4,
+ 0x0);
+ /* analog to digital off, for power save */
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00,
+ 0x0); /* 0x88c[11:8] */
+ /* digital to analog off, for power save - 0x880[4:3] */
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18,
+ 0x0);
+ /* rx antenna off - 0xc04[3:0] */
+ rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);
+ /* rx antenna off - 0xd04[3:0] */
+ rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);
+ /* analog to digital part2 off, for power save */
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60,
+ 0x0); /* 0x880[6:5] */
- default:
- bResult = false;
- RT_TRACE(COMP_ERR, "SetRFPowerState819xUsb(): unknow state to set: 0x%X!!!\n", eRFPowerState);
- break;
- }
break;
+
default:
- RT_TRACE(COMP_ERR, "Not support rf_chip(%x)\n", priv->rf_chip);
+ bResult = false;
+ RT_TRACE(COMP_ERR, "%s(): unknown state to set: 0x%X\n",
+ __func__, eRFPowerState);
break;
+ }
+ break;
+ default:
+ RT_TRACE(COMP_ERR, "Not support rf_chip(%x)\n", priv->rf_chip);
+ break;
}
#ifdef TO_DO_LIST
- if(bResult)
- {
- // Update current RF state variable.
+ if (bResult) {
+ /* Update current RF state variable. */
pHalData->eRFPowerState = eRFPowerState;
- switch(pHalData->RFChipID )
- {
- case RF_8256:
- switch(pHalData->eRFPowerState)
- {
- case eRfOff:
- //
- //If Rf off reason is from IPS, Led should blink with no link, by Maddest 071015
- //
- if(pMgntInfo->RfOffReason==RF_CHANGE_BY_IPS )
- {
- Adapter->HalFunc.LedControlHandler(Adapter,LED_CTL_NO_LINK);
- }
- else
- {
- // Turn off LED if RF is not ON.
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_POWER_OFF);
- }
- break;
-
- case eRfOn:
- // Turn on RF we are still linked, which might happen when
- // we quickly turn off and on HW RF. 2006.05.12, by rcnjko.
- if( pMgntInfo->bMediaConnect == TRUE )
- {
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_LINK);
- }
- else
- {
- // Turn off LED if RF is not ON.
- Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_NO_LINK);
- }
- break;
-
- default:
- // do nothing.
- break;
- }// Switch RF state
+ switch (pHalData->RFChipID) {
+ case RF_8256:
+ switch (pHalData->eRFPowerState) {
+ case eRfOff:
+ /* If Rf off reason is from IPS,
+ LED should blink with no link */
+ if (pMgntInfo->RfOffReason == RF_CHANGE_BY_IPS)
+ Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_NO_LINK);
+ else
+ /* Turn off LED if RF is not ON. */
+ Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_POWER_OFF);
+ break;
+
+ case eRfOn:
+ /* Turn on RF we are still linked, which might
+ happen when we quickly turn off and on HW RF.
+ */
+ if (pMgntInfo->bMediaConnect == TRUE)
+ Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_LINK);
+ else
+ /* Turn off LED if RF is not ON. */
+ Adapter->HalFunc.LedControlHandler(Adapter, LED_CTL_NO_LINK);
break;
- default:
- RT_TRACE(COMP_RF, DBG_LOUD, ("SetRFPowerState8190(): Unknown RF type\n"));
- break;
+ default:
+ break;
}
+ break;
+
+ default:
+ RT_TRACE(COMP_RF, DBG_LOUD, "%s(): Unknown RF type\n",
+ __func__);
+ break;
+ }
}
#endif
@@ -1181,40 +1226,32 @@ bool rtl8192_SetRFPowerState(struct net_device *dev, RT_RF_POWER_STATE eRFPowerS
return bResult;
}
-/****************************************************************************************
- *function: This function set command table variable(struct SwChnlCmd).
- * input: SwChnlCmd* CmdTable //table to be set.
- * u32 CmdTableIdx //variable index in table to be set
- * u32 CmdTableSz //table size.
- * SwChnlCmdID CmdID //command ID to set.
- * u32 Para1
- * u32 Para2
- * u32 msDelay
- * output:
- * return: true if finished, false otherwise
- * Note:
- * ************************************************************************************/
-u8 rtl8192_phy_SetSwChnlCmdArray(
- SwChnlCmd* CmdTable,
- u32 CmdTableIdx,
- u32 CmdTableSz,
- SwChnlCmdID CmdID,
- u32 Para1,
- u32 Para2,
- u32 msDelay
- )
+/******************************************************************************
+ * function: This function sets command table variable (struct SwChnlCmd).
+ * input: SwChnlCmd *CmdTable //table to be set
+ * u32 CmdTableIdx //variable index in table to be set
+ * u32 CmdTableSz //table size
+ * SwChnlCmdID CmdID //command ID to set
+ * u32 Para1
+ * u32 Para2
+ * u32 msDelay
+ * output:
+ * return: true if finished, false otherwise
+ * notice:
+ ******************************************************************************/
+u8 rtl8192_phy_SetSwChnlCmdArray(SwChnlCmd *CmdTable, u32 CmdTableIdx,
+ u32 CmdTableSz, SwChnlCmdID CmdID, u32 Para1,
+ u32 Para2, u32 msDelay)
{
- SwChnlCmd* pCmd;
+ SwChnlCmd *pCmd;
- if(CmdTable == NULL)
- {
- RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): CmdTable cannot be NULL.\n");
+ if (CmdTable == NULL) {
+ RT_TRACE(COMP_ERR, "%s(): CmdTable cannot be NULL\n", __func__);
return false;
}
- if(CmdTableIdx >= CmdTableSz)
- {
- RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): Access invalid index, please check size of the table, CmdTableIdx:%d, CmdTableSz:%d\n",
- CmdTableIdx, CmdTableSz);
+ if (CmdTableIdx >= CmdTableSz) {
+ RT_TRACE(COMP_ERR, "%s(): Access invalid index, please check size of the table, CmdTableIdx:%d, CmdTableSz:%d\n",
+ __func__, CmdTableIdx, CmdTableSz);
return false;
}
@@ -1226,455 +1263,442 @@ u8 rtl8192_phy_SetSwChnlCmdArray(
return true;
}
+
/******************************************************************************
- *function: This function set channel step by step
- * input: struct net_device *dev
- * u8 channel
- * u8* stage //3 stages
- * u8* step //
- * u32* delay //whether need to delay
- * output: store new stage, step and delay for next step(combine with function above)
- * return: true if finished, false otherwise
- * Note: Wait for simpler function to replace it //wb
- * ***************************************************************************/
-u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8* stage, u8* step, u32* delay)
+ * function: This function sets channel step by step
+ * input: net_device *dev
+ * u8 channel
+ * u8 *stage //3 stages
+ * u8 *step
+ * u32 *delay //whether need to delay
+ * output: store new stage, step and delay for next step
+ * (combine with function above)
+ * return: true if finished, false otherwise
+ * notice: Wait for simpler function to replace it
+ *****************************************************************************/
+u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8 *stage,
+ u8 *step, u32 *delay)
{
struct r8192_priv *priv = ieee80211_priv(dev);
-// PCHANNEL_ACCESS_SETTING pChnlAccessSetting;
- SwChnlCmd PreCommonCmd[MAX_PRECMD_CNT];
- u32 PreCommonCmdCnt;
- SwChnlCmd PostCommonCmd[MAX_POSTCMD_CNT];
- u32 PostCommonCmdCnt;
- SwChnlCmd RfDependCmd[MAX_RFDEPENDCMD_CNT];
- u32 RfDependCmdCnt;
- SwChnlCmd *CurrentCmd = NULL;
- //RF90_RADIO_PATH_E eRFPath;
+ SwChnlCmd PreCommonCmd[MAX_PRECMD_CNT];
+ u32 PreCommonCmdCnt;
+ SwChnlCmd PostCommonCmd[MAX_POSTCMD_CNT];
+ u32 PostCommonCmdCnt;
+ SwChnlCmd RfDependCmd[MAX_RFDEPENDCMD_CNT];
+ u32 RfDependCmdCnt;
+ SwChnlCmd *CurrentCmd = NULL;
u8 eRFPath;
-// u32 RfRetVal;
-// u8 RetryCnt;
-
- RT_TRACE(COMP_CH, "====>%s()====stage:%d, step:%d, channel:%d\n", __FUNCTION__, *stage, *step, channel);
-// RT_ASSERT(IsLegalChannel(Adapter, channel), ("illegal channel: %d\n", channel));
- if (!IsLegalChannel(priv->ieee80211, channel))
- {
- RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n", channel);
- return true; //return true to tell upper caller function this channel setting is finished! Or it will in while loop.
+
+ RT_TRACE(COMP_CH, "%s() stage: %d, step: %d, channel: %d\n",
+ __func__, *stage, *step, channel);
+ if (!IsLegalChannel(priv->ieee80211, channel)) {
+ RT_TRACE(COMP_ERR, "set to illegal channel: %d\n", channel);
+ /* return true to tell upper caller function this channel
+ setting is finished! Or it will in while loop. */
+ return true;
}
-//FIXME:need to check whether channel is legal or not here.WB
-
-
- //for(eRFPath = RF90_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++)
-// for(eRFPath = 0; eRFPath <RF90_PATH_MAX; eRFPath++)
-// {
-// if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
-// continue;
- // <1> Fill up pre common command.
- PreCommonCmdCnt = 0;
- rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT,
- CmdID_SetTxPowerLevel, 0, 0, 0);
- rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT,
- CmdID_End, 0, 0, 0);
-
- // <2> Fill up post common command.
- PostCommonCmdCnt = 0;
-
- rtl8192_phy_SetSwChnlCmdArray(PostCommonCmd, PostCommonCmdCnt++, MAX_POSTCMD_CNT,
- CmdID_End, 0, 0, 0);
-
- // <3> Fill up RF dependent command.
- RfDependCmdCnt = 0;
- switch( priv->rf_chip )
- {
- case RF_8225:
- if (!(channel >= 1 && channel <= 14))
- {
- RT_TRACE(COMP_ERR, "illegal channel for Zebra 8225: %d\n", channel);
- return true;
- }
- rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
- CmdID_RF_WriteReg, rZebra1_Channel, RF_CHANNEL_TABLE_ZEBRA[channel], 10);
- rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
- CmdID_End, 0, 0, 0);
- break;
+ /* FIXME: need to check whether channel is legal or not here */
- case RF_8256:
- // TEST!! This is not the table for 8256!!
- if (!(channel >= 1 && channel <= 14))
- {
- RT_TRACE(COMP_ERR, "illegal channel for Zebra 8256: %d\n", channel);
- return true;
- }
- rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
- CmdID_RF_WriteReg, rZebra1_Channel, channel, 10);
- rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT,
- CmdID_End, 0, 0, 0);
- break;
- case RF_8258:
- break;
+ /* <1> Fill up pre common command. */
+ PreCommonCmdCnt = 0;
+ rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++,
+ MAX_PRECMD_CNT, CmdID_SetTxPowerLevel,
+ 0, 0, 0);
+ rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++,
+ MAX_PRECMD_CNT, CmdID_End, 0, 0, 0);
- default:
- RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip);
+ /* <2> Fill up post common command. */
+ PostCommonCmdCnt = 0;
+
+ rtl8192_phy_SetSwChnlCmdArray(PostCommonCmd, PostCommonCmdCnt++,
+ MAX_POSTCMD_CNT, CmdID_End, 0, 0, 0);
+
+ /* <3> Fill up RF dependent command. */
+ RfDependCmdCnt = 0;
+ switch (priv->rf_chip) {
+ case RF_8225:
+ if (!(channel >= 1 && channel <= 14)) {
+ RT_TRACE(COMP_ERR,
+ "illegal channel for Zebra 8225: %d\n",
+ channel);
return true;
- break;
}
+ rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
+ MAX_RFDEPENDCMD_CNT,
+ CmdID_RF_WriteReg,
+ rZebra1_Channel,
+ RF_CHANNEL_TABLE_ZEBRA[channel],
+ 10);
+ rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
+ MAX_RFDEPENDCMD_CNT,
+ CmdID_End, 0, 0, 0);
+ break;
+ case RF_8256:
+ /* TEST!! This is not the table for 8256!! */
+ if (!(channel >= 1 && channel <= 14)) {
+ RT_TRACE(COMP_ERR,
+ "illegal channel for Zebra 8256: %d\n",
+ channel);
+ return true;
+ }
+ rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
+ MAX_RFDEPENDCMD_CNT,
+ CmdID_RF_WriteReg,
+ rZebra1_Channel, channel, 10);
+ rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++,
+ MAX_RFDEPENDCMD_CNT,
+ CmdID_End, 0, 0, 0);
+ break;
- do{
- switch(*stage)
- {
- case 0:
- CurrentCmd=&PreCommonCmd[*step];
- break;
- case 1:
- CurrentCmd=&RfDependCmd[*step];
- break;
- case 2:
- CurrentCmd=&PostCommonCmd[*step];
- break;
- }
+ case RF_8258:
+ break;
- if(CurrentCmd->CmdID==CmdID_End)
- {
- if((*stage)==2)
- {
- (*delay)=CurrentCmd->msDelay;
- return true;
- }
- else
- {
- (*stage)++;
- (*step)=0;
- continue;
- }
- }
+ default:
+ RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip);
+ return true;
+ break;
+ }
- switch(CurrentCmd->CmdID)
- {
- case CmdID_SetTxPowerLevel:
- if(priv->card_8192_version == (u8)VERSION_819xU_A) //xiong: consider it later!
- rtl8192_SetTxPowerLevel(dev,channel);
- break;
- case CmdID_WritePortUlong:
- write_nic_dword(dev, CurrentCmd->Para1, CurrentCmd->Para2);
- break;
- case CmdID_WritePortUshort:
- write_nic_word(dev, CurrentCmd->Para1, (u16)CurrentCmd->Para2);
- break;
- case CmdID_WritePortUchar:
- write_nic_byte(dev, CurrentCmd->Para1, (u8)CurrentCmd->Para2);
- break;
- case CmdID_RF_WriteReg:
- for(eRFPath = 0; eRFPath < RF90_PATH_MAX; eRFPath++)
- {
- rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, CurrentCmd->Para1, bZebra1_ChannelNum, CurrentCmd->Para2);
- }
- break;
- default:
- break;
+
+ do {
+ switch (*stage) {
+ case 0:
+ CurrentCmd = &PreCommonCmd[*step];
+ break;
+ case 1:
+ CurrentCmd = &RfDependCmd[*step];
+ break;
+ case 2:
+ CurrentCmd = &PostCommonCmd[*step];
+ break;
+ }
+
+ if (CurrentCmd->CmdID == CmdID_End) {
+ if ((*stage) == 2) {
+ (*delay) = CurrentCmd->msDelay;
+ return true;
+ } else {
+ (*stage)++;
+ (*step) = 0;
+ continue;
}
+ }
+ switch (CurrentCmd->CmdID) {
+ case CmdID_SetTxPowerLevel:
+ if (priv->card_8192_version == (u8)VERSION_819xU_A)
+ /* consider it later! */
+ rtl8192_SetTxPowerLevel(dev, channel);
+ break;
+ case CmdID_WritePortUlong:
+ write_nic_dword(dev, CurrentCmd->Para1,
+ CurrentCmd->Para2);
+ break;
+ case CmdID_WritePortUshort:
+ write_nic_word(dev, CurrentCmd->Para1,
+ (u16)CurrentCmd->Para2);
break;
- }while(true);
-// }/*for(Number of RF paths)*/
+ case CmdID_WritePortUchar:
+ write_nic_byte(dev, CurrentCmd->Para1,
+ (u8)CurrentCmd->Para2);
+ break;
+ case CmdID_RF_WriteReg:
+ for (eRFPath = 0; eRFPath < RF90_PATH_MAX; eRFPath++) {
+ rtl8192_phy_SetRFReg(dev,
+ (RF90_RADIO_PATH_E)eRFPath,
+ CurrentCmd->Para1,
+ bZebra1_ChannelNum,
+ CurrentCmd->Para2);
+ }
+ break;
+ default:
+ break;
+ }
- (*delay)=CurrentCmd->msDelay;
+ break;
+ } while (true);
+
+ (*delay) = CurrentCmd->msDelay;
(*step)++;
return false;
}
/******************************************************************************
- *function: This function does actually set channel work
- * input: struct net_device *dev
- * u8 channel
- * output: none
- * return: noin
- * Note: We should not call this function directly
- * ***************************************************************************/
+ * function: This function does actually set channel work
+ * input: net_device *dev
+ * u8 channel
+ * output: none
+ * return: none
+ * notice: We should not call this function directly
+ *****************************************************************************/
void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 delay = 0;
- while(!rtl8192_phy_SwChnlStepByStep(dev,channel,&priv->SwChnlStage,&priv->SwChnlStep,&delay))
- {
- // if(delay>0)
- // msleep(delay);//or mdelay? need further consideration
- if(!priv->up)
+ while (!rtl8192_phy_SwChnlStepByStep(dev, channel, &priv->SwChnlStage,
+ &priv->SwChnlStep, &delay)) {
+ if (!priv->up)
break;
}
}
+
/******************************************************************************
- *function: Callback routine of the work item for switch channel.
- * input:
+ * function: Callback routine of the work item for switch channel.
+ * input: net_device *dev
*
- * output: none
- * return: noin
- * ***************************************************************************/
+ * output: none
+ * return: none
+ *****************************************************************************/
void rtl8192_SwChnl_WorkItem(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- RT_TRACE(COMP_CH, "==> SwChnlCallback819xUsbWorkItem(), chan:%d\n", priv->chan);
+ RT_TRACE(COMP_CH, "==> SwChnlCallback819xUsbWorkItem(), chan:%d\n",
+ priv->chan);
- rtl8192_phy_FinishSwChnlNow(dev , priv->chan);
+ rtl8192_phy_FinishSwChnlNow(dev, priv->chan);
RT_TRACE(COMP_CH, "<== SwChnlCallback819xUsbWorkItem()\n");
}
/******************************************************************************
- *function: This function scheduled actual work item to set channel
- * input: net_device dev
- * u8 channel //channel to set
- * output: none
- * return: return code show if workitem is scheduled(1:pass, 0:fail)
- * Note: Delay may be required for RF configuration
- * ***************************************************************************/
-u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel)
+ * function: This function scheduled actual work item to set channel
+ * input: net_device *dev
+ * u8 channel //channel to set
+ * output: none
+ * return: return code show if workitem is scheduled (1:pass, 0:fail)
+ * notice: Delay may be required for RF configuration
+ ******************************************************************************/
+u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- RT_TRACE(COMP_CH, "=====>%s(), SwChnlInProgress:%d\n", __FUNCTION__, priv->SwChnlInProgress);
- if(!priv->up)
+ RT_TRACE(COMP_CH, "%s(), SwChnlInProgress: %d\n", __func__,
+ priv->SwChnlInProgress);
+ if (!priv->up)
return false;
- if(priv->SwChnlInProgress)
+ if (priv->SwChnlInProgress)
return false;
-// if(pHalData->SetBWModeInProgress)
-// return;
-if (0) //to test current channel from RF reg 0x7.
-{
- u8 eRFPath;
- for(eRFPath = 0; eRFPath < 2; eRFPath++){
- printk("====>set channel:%x\n",rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x7, bZebra1_ChannelNum));
- udelay(10);
- }
-}
- //--------------------------------------------
- switch(priv->ieee80211->mode)
- {
+ /* -------------------------------------------- */
+ switch (priv->ieee80211->mode) {
case WIRELESS_MODE_A:
case WIRELESS_MODE_N_5G:
- if (channel<=14){
- RT_TRACE(COMP_ERR, "WIRELESS_MODE_A but channel<=14");
+ if (channel <= 14) {
+ RT_TRACE(COMP_ERR, "WIRELESS_MODE_A but channel<=14\n");
return false;
}
break;
case WIRELESS_MODE_B:
- if (channel>14){
- RT_TRACE(COMP_ERR, "WIRELESS_MODE_B but channel>14");
+ if (channel > 14) {
+ RT_TRACE(COMP_ERR, "WIRELESS_MODE_B but channel>14\n");
return false;
}
break;
case WIRELESS_MODE_G:
case WIRELESS_MODE_N_24G:
- if (channel>14){
- RT_TRACE(COMP_ERR, "WIRELESS_MODE_G but channel>14");
+ if (channel > 14) {
+ RT_TRACE(COMP_ERR, "WIRELESS_MODE_G but channel>14\n");
return false;
}
break;
}
- //--------------------------------------------
+ /* -------------------------------------------- */
priv->SwChnlInProgress = true;
- if(channel == 0)
+ if (channel == 0)
channel = 1;
- priv->chan=channel;
+ priv->chan = channel;
- priv->SwChnlStage=0;
- priv->SwChnlStep=0;
-// schedule_work(&(priv->SwChnlWorkItem));
-// rtl8192_SwChnl_WorkItem(dev);
- if(priv->up) {
-// queue_work(priv->priv_wq,&(priv->SwChnlWorkItem));
- rtl8192_SwChnl_WorkItem(dev);
- }
+ priv->SwChnlStage = 0;
+ priv->SwChnlStep = 0;
+ if (priv->up)
+ rtl8192_SwChnl_WorkItem(dev);
priv->SwChnlInProgress = false;
return true;
}
-
-//
/******************************************************************************
- *function: Callback routine of the work item for set bandwidth mode.
- * input: struct net_device *dev
- * HT_CHANNEL_WIDTH Bandwidth //20M or 40M
- * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care
- * output: none
- * return: none
- * Note: I doubt whether SetBWModeInProgress flag is necessary as we can
- * test whether current work in the queue or not.//do I?
- * ***************************************************************************/
+ * function: Callback routine of the work item for set bandwidth mode.
+ * input: net_device *dev
+ * output: none
+ * return: none
+ * notice: I doubt whether SetBWModeInProgress flag is necessary as we can
+ * test whether current work in the queue or not.//do I?
+ *****************************************************************************/
void rtl8192_SetBWModeWorkItem(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 regBwOpMode;
- RT_TRACE(COMP_SWBW, "==>rtl8192_SetBWModeWorkItem() Switch to %s bandwidth\n", \
- priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz")
+ RT_TRACE(COMP_SWBW, "%s() Switch to %s bandwidth\n", __func__,
+ priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz");
- if(priv->rf_chip == RF_PSEUDO_11N)
- {
- priv->SetBWModeInProgress= false;
+ if (priv->rf_chip == RF_PSEUDO_11N) {
+ priv->SetBWModeInProgress = false;
return;
}
- //<1>Set MAC register
- regBwOpMode = read_nic_byte(dev, BW_OPMODE);
+ /* <1> Set MAC register */
+ read_nic_byte(dev, BW_OPMODE, &regBwOpMode);
- switch(priv->CurrentChannelBW)
- {
- case HT_CHANNEL_WIDTH_20:
- regBwOpMode |= BW_OPMODE_20MHZ;
- // 2007/02/07 Mark by Emily because we have not verify whether this register works
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
- break;
+ switch (priv->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ regBwOpMode |= BW_OPMODE_20MHZ;
+ /* We have not verify whether this register works */
+ write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ break;
- case HT_CHANNEL_WIDTH_20_40:
- regBwOpMode &= ~BW_OPMODE_20MHZ;
- // 2007/02/07 Mark by Emily because we have not verify whether this register works
- write_nic_byte(dev, BW_OPMODE, regBwOpMode);
- break;
+ case HT_CHANNEL_WIDTH_20_40:
+ regBwOpMode &= ~BW_OPMODE_20MHZ;
+ /* We have not verify whether this register works */
+ write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+ break;
- default:
- RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n",priv->CurrentChannelBW);
- break;
+ default:
+ RT_TRACE(COMP_ERR,
+ "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n",
+ priv->CurrentChannelBW);
+ break;
}
- //<2>Set PHY related register
- switch(priv->CurrentChannelBW)
- {
- case HT_CHANNEL_WIDTH_20:
- // Add by Vivi 20071119
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
- rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1);
-
- // Correct the tx power for CCK rate in 20M. Suggest by YN, 20071207
- priv->cck_present_attentuation =
- priv->cck_present_attentuation_20Mdefault + priv->cck_present_attentuation_difference;
-
- if(priv->cck_present_attentuation > 22)
- priv->cck_present_attentuation= 22;
- if(priv->cck_present_attentuation< 0)
- priv->cck_present_attentuation = 0;
- RT_TRACE(COMP_INIT, "20M, pHalData->CCKPresentAttentuation = %d\n", priv->cck_present_attentuation);
-
- if(priv->chan == 14 && !priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = TRUE;
- dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
- }
- else if(priv->chan != 14 && priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = FALSE;
- dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
- }
- else
- dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
+ /* <2> Set PHY related register */
+ switch (priv->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
+ rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
+ 0x00100000, 1);
+
+ /* Correct the tx power for CCK rate in 20M. */
+ priv->cck_present_attentuation =
+ priv->cck_present_attentuation_20Mdefault +
+ priv->cck_present_attentuation_difference;
+
+ if (priv->cck_present_attentuation > 22)
+ priv->cck_present_attentuation = 22;
+ if (priv->cck_present_attentuation < 0)
+ priv->cck_present_attentuation = 0;
+ RT_TRACE(COMP_INIT,
+ "20M, pHalData->CCKPresentAttentuation = %d\n",
+ priv->cck_present_attentuation);
+
+ if (priv->chan == 14 && !priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = TRUE;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else if (priv->chan != 14 && priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = FALSE;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else {
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ }
- break;
- case HT_CHANNEL_WIDTH_20_40:
- // Add by Vivi 20071119
- rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
- rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
- rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1));
- rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
- rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC);
- priv->cck_present_attentuation =
- priv->cck_present_attentuation_40Mdefault + priv->cck_present_attentuation_difference;
-
- if(priv->cck_present_attentuation > 22)
- priv->cck_present_attentuation = 22;
- if(priv->cck_present_attentuation < 0)
- priv->cck_present_attentuation = 0;
-
- RT_TRACE(COMP_INIT, "40M, pHalData->CCKPresentAttentuation = %d\n", priv->cck_present_attentuation);
- if(priv->chan == 14 && !priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = true;
- dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
- }
- else if(priv->chan!= 14 && priv->bcck_in_ch14)
- {
- priv->bcck_in_ch14 = false;
- dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
- }
- else
- dm_cck_txpower_adjust(dev,priv->bcck_in_ch14);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
+ rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
+ rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand,
+ priv->nCur40MhzPrimeSC>>1);
+ rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
+ rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00,
+ priv->nCur40MhzPrimeSC);
+ priv->cck_present_attentuation =
+ priv->cck_present_attentuation_40Mdefault +
+ priv->cck_present_attentuation_difference;
+
+ if (priv->cck_present_attentuation > 22)
+ priv->cck_present_attentuation = 22;
+ if (priv->cck_present_attentuation < 0)
+ priv->cck_present_attentuation = 0;
+
+ RT_TRACE(COMP_INIT,
+ "40M, pHalData->CCKPresentAttentuation = %d\n",
+ priv->cck_present_attentuation);
+ if (priv->chan == 14 && !priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = true;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else if (priv->chan != 14 && priv->bcck_in_ch14) {
+ priv->bcck_in_ch14 = false;
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ } else {
+ dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+ }
- break;
- default:
- RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n" ,priv->CurrentChannelBW);
- break;
+ break;
+ default:
+ RT_TRACE(COMP_ERR,
+ "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n",
+ priv->CurrentChannelBW);
+ break;
}
- //Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315
+ /* Skip over setting of J-mode in BB register here.
+ Default value is "None J mode". */
- //<3>Set RF related register
- switch( priv->rf_chip )
- {
- case RF_8225:
+ /* <3> Set RF related register */
+ switch (priv->rf_chip) {
+ case RF_8225:
#ifdef TO_DO_LIST
- PHY_SetRF8225Bandwidth(Adapter, pHalData->CurrentChannelBW);
+ PHY_SetRF8225Bandwidth(Adapter, pHalData->CurrentChannelBW);
#endif
- break;
+ break;
- case RF_8256:
- PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW);
- break;
+ case RF_8256:
+ PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW);
+ break;
- case RF_8258:
- // PHY_SetRF8258Bandwidth();
- break;
+ case RF_8258:
+ break;
- case RF_PSEUDO_11N:
- // Do Nothing
- break;
+ case RF_PSEUDO_11N:
+ break;
- default:
- RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip);
- break;
+ default:
+ RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip);
+ break;
}
- priv->SetBWModeInProgress= false;
+ priv->SetBWModeInProgress = false;
- RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb(), %d", atomic_read(&(priv->ieee80211->atm_swbw)) );
+ RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb(), %d\n",
+ atomic_read(&priv->ieee80211->atm_swbw));
}
/******************************************************************************
- *function: This function schedules bandwidth switch work.
- * input: struct net_device *dev
- * HT_CHANNEL_WIDTH Bandwidth //20M or 40M
- * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care
- * output: none
- * return: none
- * Note: I doubt whether SetBWModeInProgress flag is necessary as we can
- * test whether current work in the queue or not.//do I?
- * ***************************************************************************/
-void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset)
+ * function: This function schedules bandwidth switch work.
+ * input: struct net_deviceq *dev
+ * HT_CHANNEL_WIDTH bandwidth //20M or 40M
+ * HT_EXTCHNL_OFFSET offset //Upper, Lower, or Don't care
+ * output: none
+ * return: none
+ * notice: I doubt whether SetBWModeInProgress flag is necessary as we can
+ * test whether current work in the queue or not.//do I?
+ *****************************************************************************/
+void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH bandwidth,
+ HT_EXTCHNL_OFFSET offset)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- if(priv->SetBWModeInProgress)
+ if (priv->SetBWModeInProgress)
return;
- priv->SetBWModeInProgress= true;
+ priv->SetBWModeInProgress = true;
- priv->CurrentChannelBW = Bandwidth;
+ priv->CurrentChannelBW = bandwidth;
- if(Offset==HT_EXTCHNL_OFFSET_LOWER)
+ if (offset == HT_EXTCHNL_OFFSET_LOWER)
priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER;
- else if(Offset==HT_EXTCHNL_OFFSET_UPPER)
+ else if (offset == HT_EXTCHNL_OFFSET_UPPER)
priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_LOWER;
else
priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- //queue_work(priv->priv_wq, &(priv->SetBWModeWorkItem));
- // schedule_work(&(priv->SetBWModeWorkItem));
rtl8192_SetBWModeWorkItem(dev);
}
@@ -1685,88 +1709,110 @@ void InitialGain819xUsb(struct net_device *dev, u8 Operation)
priv->InitialGainOperateType = Operation;
- if(priv->up)
- {
- queue_delayed_work(priv->priv_wq,&priv->initialgain_operate_wq,0);
- }
+ if (priv->up)
+ queue_delayed_work(priv->priv_wq, &priv->initialgain_operate_wq, 0);
}
extern void InitialGainOperateWorkItemCallBack(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work,struct delayed_work,work);
- struct r8192_priv *priv = container_of(dwork,struct r8192_priv,initialgain_operate_wq);
- struct net_device *dev = priv->ieee80211->dev;
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct r8192_priv *priv = container_of(dwork, struct r8192_priv,
+ initialgain_operate_wq);
+ struct net_device *dev = priv->ieee80211->dev;
#define SCAN_RX_INITIAL_GAIN 0x17
#define POWER_DETECTION_TH 0x08
- u32 BitMask;
+ u32 bitmask;
u8 initial_gain;
u8 Operation;
Operation = priv->InitialGainOperateType;
- switch(Operation)
- {
- case IG_Backup:
- RT_TRACE(COMP_SCAN, "IG_Backup, backup the initial gain.\n");
- initial_gain = SCAN_RX_INITIAL_GAIN;//priv->DefaultInitialGain[0];//
- BitMask = bMaskByte0;
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // FW DIG OFF
- priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, BitMask);
- priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, BitMask);
- priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, BitMask);
- priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, BitMask);
- BitMask = bMaskByte2;
- priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, BitMask);
-
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc50 is %x\n",priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc58 is %x\n",priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc60 is %x\n",priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc68 is %x\n",priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xa0a is %x\n",priv->initgain_backup.cca);
-
- RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x \n", initial_gain);
- write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
- write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
- RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x \n", POWER_DETECTION_TH);
- write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH);
- break;
- case IG_Restore:
- RT_TRACE(COMP_SCAN, "IG_Restore, restore the initial gain.\n");
- BitMask = 0x7f; //Bit0~ Bit6
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // FW DIG OFF
-
- rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, BitMask, (u32)priv->initgain_backup.xaagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, BitMask, (u32)priv->initgain_backup.xbagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, BitMask, (u32)priv->initgain_backup.xcagccore1);
- rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, BitMask, (u32)priv->initgain_backup.xdagccore1);
- BitMask = bMaskByte2;
- rtl8192_setBBreg(dev, rCCK0_CCA, BitMask, (u32)priv->initgain_backup.cca);
-
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc50 is %x\n",priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc58 is %x\n",priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc60 is %x\n",priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc68 is %x\n",priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a is %x\n",priv->initgain_backup.cca);
+ switch (Operation) {
+ case IG_Backup:
+ RT_TRACE(COMP_SCAN, "IG_Backup, backup the initial gain.\n");
+ initial_gain = SCAN_RX_INITIAL_GAIN;
+ bitmask = bMaskByte0;
+ if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
+ /* FW DIG OFF */
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+ priv->initgain_backup.xaagccore1 =
+ (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bitmask);
+ priv->initgain_backup.xbagccore1 =
+ (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bitmask);
+ priv->initgain_backup.xcagccore1 =
+ (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bitmask);
+ priv->initgain_backup.xdagccore1 =
+ (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, bitmask);
+ bitmask = bMaskByte2;
+ priv->initgain_backup.cca =
+ (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bitmask);
+
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc50 is %x\n",
+ priv->initgain_backup.xaagccore1);
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc58 is %x\n",
+ priv->initgain_backup.xbagccore1);
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc60 is %x\n",
+ priv->initgain_backup.xcagccore1);
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc68 is %x\n",
+ priv->initgain_backup.xdagccore1);
+ RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xa0a is %x\n",
+ priv->initgain_backup.cca);
+
+ RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x \n",
+ initial_gain);
+ write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
+ write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
+ write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
+ write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
+ RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x \n",
+ POWER_DETECTION_TH);
+ write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH);
+ break;
+ case IG_Restore:
+ RT_TRACE(COMP_SCAN, "IG_Restore, restore the initial gain.\n");
+ bitmask = 0x7f; /* Bit0 ~ Bit6 */
+ if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
+ /* FW DIG OFF */
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+
+ rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bitmask,
+ (u32)priv->initgain_backup.xaagccore1);
+ rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bitmask,
+ (u32)priv->initgain_backup.xbagccore1);
+ rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bitmask,
+ (u32)priv->initgain_backup.xcagccore1);
+ rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, bitmask,
+ (u32)priv->initgain_backup.xdagccore1);
+ bitmask = bMaskByte2;
+ rtl8192_setBBreg(dev, rCCK0_CCA, bitmask,
+ (u32)priv->initgain_backup.cca);
+
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc50 is %x\n",
+ priv->initgain_backup.xaagccore1);
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc58 is %x\n",
+ priv->initgain_backup.xbagccore1);
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc60 is %x\n",
+ priv->initgain_backup.xcagccore1);
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc68 is %x\n",
+ priv->initgain_backup.xdagccore1);
+ RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a is %x\n",
+ priv->initgain_backup.cca);
#ifdef RTL8190P
- SetTxPowerLevel8190(Adapter,priv->CurrentChannel);
+ SetTxPowerLevel8190(Adapter, priv->CurrentChannel);
#endif
#ifdef RTL8192E
- SetTxPowerLevel8190(Adapter,priv->CurrentChannel);
+ SetTxPowerLevel8190(Adapter, priv->CurrentChannel);
#endif
-//#ifdef RTL8192U
- rtl8192_phy_setTxPower(dev,priv->ieee80211->current_network.channel);
-//#endif
+ rtl8192_phy_setTxPower(dev, priv->ieee80211->current_network.channel);
- if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
- rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // FW DIG ON
- break;
- default:
- RT_TRACE(COMP_SCAN, "Unknown IG Operation. \n");
- break;
+ if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
+ /* FW DIG ON */
+ rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+ break;
+ default:
+ RT_TRACE(COMP_SCAN, "Unknown IG Operation. \n");
+ break;
}
}
diff --git a/drivers/staging/rtl8192u/r819xU_phy.h b/drivers/staging/rtl8192u/r819xU_phy.h
index 3e3bc57..f3c352a 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.h
+++ b/drivers/staging/rtl8192u/r819xU_phy.h
@@ -1,12 +1,12 @@
#ifndef _R819XU_PHY_H
#define _R819XU_PHY_H
-/* Channel switch:The size of command tables for switch channel*/
+/* Channel switch: The size of command tables for switch channel */
#define MAX_PRECMD_CNT 16
#define MAX_RFDEPENDCMD_CNT 16
#define MAX_POSTCMD_CNT 16
-typedef enum _SwChnlCmdID{
+typedef enum _SwChnlCmdID {
CmdID_End,
CmdID_SetTxPowerLevel,
CmdID_BBRegWrite10,
@@ -14,16 +14,16 @@ typedef enum _SwChnlCmdID{
CmdID_WritePortUshort,
CmdID_WritePortUchar,
CmdID_RF_WriteReg,
-}SwChnlCmdID;
+} SwChnlCmdID;
-/*--------------------------------Define structure--------------------------------*/
+/* -----------------------Define structure---------------------- */
/* 1. Switch channel related */
-typedef struct _SwChnlCmd{
+typedef struct _SwChnlCmd {
SwChnlCmdID CmdID;
- u32 Para1;
- u32 Para2;
- u32 msDelay;
-}__attribute__ ((packed)) SwChnlCmd;
+ u32 Para1;
+ u32 Para2;
+ u32 msDelay;
+} __attribute__ ((packed)) SwChnlCmd;
extern u32 rtl819XMACPHY_Array_PG[];
extern u32 rtl819XPHY_REG_1T2RArray[];
@@ -33,21 +33,21 @@ extern u32 rtl819XRadioB_Array[];
extern u32 rtl819XRadioC_Array[];
extern u32 rtl819XRadioD_Array[];
-typedef enum _HW90_BLOCK{
+typedef enum _HW90_BLOCK {
HW90_BLOCK_MAC = 0,
HW90_BLOCK_PHY0 = 1,
HW90_BLOCK_PHY1 = 2,
HW90_BLOCK_RF = 3,
- HW90_BLOCK_MAXIMUM = 4, // Never use this
-}HW90_BLOCK_E, *PHW90_BLOCK_E;
+ HW90_BLOCK_MAXIMUM = 4, /* Never use this */
+} HW90_BLOCK_E, *PHW90_BLOCK_E;
-typedef enum _RF90_RADIO_PATH{
- RF90_PATH_A = 0, //Radio Path A
- RF90_PATH_B = 1, //Radio Path B
- RF90_PATH_C = 2, //Radio Path C
- RF90_PATH_D = 3, //Radio Path D
- RF90_PATH_MAX //Max RF number 92 support
-}RF90_RADIO_PATH_E, *PRF90_RADIO_PATH_E;
+typedef enum _RF90_RADIO_PATH {
+ RF90_PATH_A = 0, /* Radio Path A */
+ RF90_PATH_B = 1, /* Radio Path B */
+ RF90_PATH_C = 2, /* Radio Path C */
+ RF90_PATH_D = 3, /* Radio Path D */
+ RF90_PATH_MAX /* Max RF number 92 support */
+} RF90_RADIO_PATH_E, *PRF90_RADIO_PATH_E;
#define bMaskByte0 0xff
#define bMaskByte1 0xff00
@@ -57,33 +57,35 @@ typedef enum _RF90_RADIO_PATH{
#define bMaskLWord 0x0000ffff
#define bMaskDWord 0xffffffff
-//extern u32 rtl8192_CalculateBitShift(u32 dwBitMask);
-extern u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device* dev, u32 eRFPath);
-extern void rtl8192_setBBreg(struct net_device* dev, u32 dwRegAddr, u32 dwBitMask, u32 dwData);
-extern u32 rtl8192_QueryBBReg(struct net_device* dev, u32 dwRegAddr, u32 dwBitMask);
-//extern u32 rtl8192_phy_RFSerialRead(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset);
-//extern void rtl8192_phy_RFSerialWrite(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data);
-extern void rtl8192_phy_SetRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask, u32 Data);
-extern u32 rtl8192_phy_QueryRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask);
-extern void rtl8192_phy_configmac(struct net_device* dev);
-extern void rtl8192_phyConfigBB(struct net_device* dev, u8 ConfigType);
-//extern void rtl8192_InitBBRFRegDef(struct net_device* dev);
-extern u8 rtl8192_phy_checkBBAndRF(struct net_device* dev, HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath);
-//extern void rtl8192_BB_Config_ParaFile(struct net_device* dev);
-extern void rtl8192_BBConfig(struct net_device* dev);
-extern void rtl8192_phy_getTxPower(struct net_device* dev);
-extern void rtl8192_phy_setTxPower(struct net_device* dev, u8 channel);
-extern void rtl8192_phy_RFConfig(struct net_device* dev);
-extern void rtl8192_phy_updateInitGain(struct net_device* dev);
-extern u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E eRFPath);
+extern u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath);
+extern void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr,
+ u32 bitmask, u32 data);
+extern u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr,
+ u32 bitmask);
+extern void rtl8192_phy_SetRFReg(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath, u32 reg_addr, u32 bitmask, u32 data);
+extern u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath, u32 reg_addr, u32 bitmask);
+extern void rtl8192_phy_configmac(struct net_device *dev);
+extern void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType);
+extern u8 rtl8192_phy_checkBBAndRF(struct net_device *dev,
+ HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath);
+extern void rtl8192_BBConfig(struct net_device *dev);
+extern void rtl8192_phy_getTxPower(struct net_device *dev);
+extern void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
+extern void rtl8192_phy_RFConfig(struct net_device *dev);
+extern void rtl8192_phy_updateInitGain(struct net_device *dev);
+extern u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
+ RF90_RADIO_PATH_E eRFPath);
-extern u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel);
-extern void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
+extern u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
+extern void rtl8192_SetBWMode(struct net_device *dev,
+ HT_CHANNEL_WIDTH bandwidth, HT_EXTCHNL_OFFSET offset);
extern void rtl8192_SwChnl_WorkItem(struct net_device *dev);
void rtl8192_SetBWModeWorkItem(struct net_device *dev);
-extern bool rtl8192_SetRFPowerState(struct net_device *dev, RT_RF_POWER_STATE eRFPowerState);
-//added by amy
-extern void InitialGain819xUsb(struct net_device *dev, u8 Operation);
+extern bool rtl8192_SetRFPowerState(struct net_device *dev,
+ RT_RF_POWER_STATE eRFPowerState);
+extern void InitialGain819xUsb(struct net_device *dev, u8 Operation);
extern void InitialGainOperateWorkItemCallBack(struct work_struct *work);
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index b65bf5e..6e81ba0 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -238,7 +238,7 @@ struct net_device *r8712_init_netdev(void)
static u32 start_drv_threads(struct _adapter *padapter)
{
- padapter->cmdThread = kthread_run(r8712_cmd_thread, padapter,
+ padapter->cmdThread = kthread_run(r8712_cmd_thread, padapter, "%s",
padapter->pnetdev->name);
if (IS_ERR(padapter->cmdThread) < 0)
return _FAIL;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index f034567..d58aa7e 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -1000,12 +1000,8 @@ static int r871x_wx_set_priv(struct net_device *dev,
sprintf(ext, "LINKSPEED %d", mbps);
} else if (0 == strcasecmp(ext, "MACADDR")) {
/*Return mac address of the station */
- /*Macaddr = xx.xx.xx.xx.xx.xx */
- sprintf(ext,
- "MACADDR = %02x.%02x.%02x.%02x.%02x.%02x",
- *(dev->dev_addr), *(dev->dev_addr+1),
- *(dev->dev_addr+2), *(dev->dev_addr+3),
- *(dev->dev_addr+4), *(dev->dev_addr+5));
+ /* Macaddr = xx:xx:xx:xx:xx:xx */
+ sprintf(ext, "MACADDR = %pM", dev->dev_addr);
} else if (0 == strcasecmp(ext, "SCAN-ACTIVE")) {
/*Set scan type to active */
/*OK if successful */
diff --git a/drivers/staging/rts5139/rts51x_transport.c b/drivers/staging/rts5139/rts51x_transport.c
index 89e4d80..c172f4ae 100644
--- a/drivers/staging/rts5139/rts51x_transport.c
+++ b/drivers/staging/rts5139/rts51x_transport.c
@@ -635,12 +635,12 @@ int rts51x_get_epc_status(struct rts51x_chip *chip, u16 *status)
ep = chip->usb->pusb_dev->ep_in[usb_pipeendpoint(pipe)];
/* fill and submit the URB */
- /* We set interval to 1 here, so the polling interval is controlled
- * by our polling thread */
+ /* Set interval to 10 here to match the endpoint descriptor,
+ * the polling interval is controlled by the polling thread */
usb_fill_int_urb(chip->usb->intr_urb, chip->usb->pusb_dev, pipe,
- status, 2, urb_done_completion, &urb_done, 1);
+ status, 2, urb_done_completion, &urb_done, 10);
- result = rts51x_msg_common(chip, chip->usb->intr_urb, 50);
+ result = rts51x_msg_common(chip, chip->usb->intr_urb, 100);
return interpret_urb_result(chip, pipe, 2, result,
chip->usb->intr_urb->actual_length);
diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
index cd94f6c..23db32f 100644
--- a/drivers/staging/sb105x/sb_pci_mp.c
+++ b/drivers/staging/sb105x/sb_pci_mp.c
@@ -18,11 +18,11 @@ static struct irq_info irq_lists[NR_IRQS];
static _INLINE_ unsigned int serial_in(struct mp_port *mtpt, int offset);
static _INLINE_ void serial_out(struct mp_port *mtpt, int offset, int value);
static _INLINE_ unsigned int read_option_register(struct mp_port *mtpt, int offset);
-static int sb1054_get_register(struct sb_uart_port * port, int page, int reg);
-static int sb1054_set_register(struct sb_uart_port * port, int page, int reg, int value);
-static void SendATCommand(struct mp_port * mtpt);
-static int set_deep_fifo(struct sb_uart_port * port, int status);
-static int get_deep_fifo(struct sb_uart_port * port);
+static int sb1054_get_register(struct sb_uart_port *port, int page, int reg);
+static int sb1054_set_register(struct sb_uart_port *port, int page, int reg, int value);
+static void SendATCommand(struct mp_port *mtpt);
+static int set_deep_fifo(struct sb_uart_port *port, int status);
+static int get_deep_fifo(struct sb_uart_port *port);
static int get_device_type(int arg);
static int set_auto_rts(struct sb_uart_port *port, int status);
static void mp_stop(struct tty_struct *tty);
@@ -38,7 +38,7 @@ static inline int __mp_put_char(struct sb_uart_port *port, struct circ_buf *circ
static int mp_put_char(struct tty_struct *tty, unsigned char ch);
static void mp_put_chars(struct tty_struct *tty);
-static int mp_write(struct tty_struct *tty, const unsigned char * buf, int count);
+static int mp_write(struct tty_struct *tty, const unsigned char *buf, int count);
static int mp_write_room(struct tty_struct *tty);
static int mp_chars_in_buffer(struct tty_struct *tty);
static void mp_flush_buffer(struct tty_struct *tty);
@@ -102,7 +102,7 @@ static void multi_release_port(struct sb_uart_port *port);
static int multi_request_port(struct sb_uart_port *port);
static void multi_config_port(struct sb_uart_port *port, int flags);
static int multi_verify_port(struct sb_uart_port *port, struct serial_struct *ser);
-static const char * multi_type(struct sb_uart_port *port);
+static const char *multi_type(struct sb_uart_port *port);
static void __init multi_init_ports(void);
static void __init multi_register_ports(struct uart_driver *drv);
static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd);
@@ -173,7 +173,7 @@ static int sb1053a_get_interface(struct mp_port *mtpt, int port_num)
return (interface);
}
-static int sb1054_get_register(struct sb_uart_port * port, int page, int reg)
+static int sb1054_get_register(struct sb_uart_port *port, int page, int reg)
{
int ret = 0;
unsigned int lcr = 0;
@@ -235,7 +235,7 @@ static int sb1054_get_register(struct sb_uart_port * port, int page, int reg)
return ret;
}
-static int sb1054_set_register(struct sb_uart_port * port, int page, int reg, int value)
+static int sb1054_set_register(struct sb_uart_port *port, int page, int reg, int value)
{
int lcr = 0;
int mcr = 0;
@@ -332,7 +332,7 @@ static int set_multidrop_addr(struct sb_uart_port *port, unsigned int addr)
return 0;
}
-static void SendATCommand(struct mp_port * mtpt)
+static void SendATCommand(struct mp_port *mtpt)
{
// a t cr lf
unsigned char ch[] = {0x61,0x74,0x0d,0x0a,0x0};
@@ -360,7 +360,7 @@ static void SendATCommand(struct mp_port * mtpt)
}// end of SendATCommand()
-static int set_deep_fifo(struct sb_uart_port * port, int status)
+static int set_deep_fifo(struct sb_uart_port *port, int status)
{
int afr_status = 0;
afr_status = sb1054_get_register(port, PAGE_4, SB105X_AFR);
@@ -416,7 +416,7 @@ static int get_device_type(int arg)
}
}
-static int get_deep_fifo(struct sb_uart_port * port)
+static int get_deep_fifo(struct sb_uart_port *port)
{
int afr_status = 0;
afr_status = sb1054_get_register(port, PAGE_4, SB105X_AFR);
@@ -638,7 +638,7 @@ static void mp_put_chars(struct tty_struct *tty)
mp_start(tty);
}
-static int mp_write(struct tty_struct *tty, const unsigned char * buf, int count)
+static int mp_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
struct sb_uart_state *state = tty->driver_data;
struct sb_uart_port *port;
@@ -2754,7 +2754,7 @@ static int multi_verify_port(struct sb_uart_port *port, struct serial_struct *se
return 0;
}
-static const char * multi_type(struct sb_uart_port *port)
+static const char *multi_type(struct sb_uart_port *port)
{
int type = port->type;
@@ -2800,7 +2800,7 @@ static void __init multi_init_ports(void)
int i,j,k;
unsigned char osc;
unsigned char b_ret = 0;
- static struct mp_device_t * sbdev;
+ static struct mp_device_t *sbdev;
if (!first)
return;
@@ -2918,10 +2918,10 @@ static int pci_remap_base(struct pci_dev *pcidev, unsigned int offset,
static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
{
- static struct mp_device_t * sbdev = mp_devs;
+ static struct mp_device_t *sbdev = mp_devs;
unsigned long addr = 0;
int j;
- struct resource * ret = NULL;
+ struct resource *ret = NULL;
sbdev->device_id = brd.device_id;
pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &(sbdev->revision));
diff --git a/drivers/staging/sb105x/sb_pci_mp.h b/drivers/staging/sb105x/sb_pci_mp.h
index a15f470a..11d9299 100644
--- a/drivers/staging/sb105x/sb_pci_mp.h
+++ b/drivers/staging/sb105x/sb_pci_mp.h
@@ -19,7 +19,6 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/segment.h>
#include <asm/serial.h>
#include <linux/interrupt.h>
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 8a6e5ea..73fc3cc 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -155,10 +155,10 @@ struct quatech_port {
struct urb *read_urb; /* read URB for this port */
struct urb *int_urb;
- __u8 shadowLCR; /* last LCR value received */
- __u8 shadowMCR; /* last MCR value received */
- __u8 shadowMSR; /* last MSR value received */
- __u8 shadowLSR; /* last LSR value received */
+ __u8 shadow_lcr; /* last LCR value received */
+ __u8 shadow_mcr; /* last MCR value received */
+ __u8 shadow_msr; /* last MSR value received */
+ __u8 shadow_lsr; /* last LSR value received */
char open_ports;
/* Used for TIOCMIWAIT */
@@ -170,12 +170,12 @@ struct quatech_port {
struct async_icount icount;
struct usb_serial_port *port; /* owner of this object */
- struct qt_get_device_data DeviceData;
+ struct qt_get_device_data device_data;
struct mutex lock;
bool read_urb_busy;
- int RxHolding;
- int ReadBulkStopped;
- char closePending;
+ int rx_holding;
+ int read_bulk_stopped;
+ char close_pending;
};
static int port_paranoia_check(struct usb_serial_port *port,
@@ -238,24 +238,24 @@ static struct usb_serial *get_usb_serial(struct usb_serial_port *port,
return port->serial;
}
-static void ProcessLineStatus(struct quatech_port *qt_port,
+static void process_line_status(struct quatech_port *qt_port,
unsigned char line_status)
{
- qt_port->shadowLSR =
+ qt_port->shadow_lsr =
line_status & (SERIAL_LSR_OE | SERIAL_LSR_PE | SERIAL_LSR_FE |
SERIAL_LSR_BI);
}
-static void ProcessModemStatus(struct quatech_port *qt_port,
+static void process_modem_status(struct quatech_port *qt_port,
unsigned char modem_status)
{
- qt_port->shadowMSR = modem_status;
+ qt_port->shadow_msr = modem_status;
wake_up_interruptible(&qt_port->wait);
}
-static void ProcessRxChar(struct usb_serial_port *port, unsigned char data)
+static void process_rx_char(struct usb_serial_port *port, unsigned char data)
{
struct urb *urb = port->read_urb;
if (urb->actual_length)
@@ -291,35 +291,35 @@ static void qt_status_change_check(struct urb *urb,
{
int flag, i;
unsigned char *data = urb->transfer_buffer;
- unsigned int RxCount = urb->actual_length;
+ unsigned int rx_count = urb->actual_length;
- for (i = 0; i < RxCount; ++i) {
+ for (i = 0; i < rx_count; ++i) {
/* Look ahead code here */
- if ((i <= (RxCount - 3)) && (data[i] == 0x1b)
+ if ((i <= (rx_count - 3)) && (data[i] == 0x1b)
&& (data[i + 1] == 0x1b)) {
flag = 0;
switch (data[i + 2]) {
case 0x00:
- if (i > (RxCount - 4)) {
+ if (i > (rx_count - 4)) {
dev_dbg(&port->dev,
"Illegal escape seuences in received data\n");
break;
}
- ProcessLineStatus(qt_port, data[i + 3]);
+ process_line_status(qt_port, data[i + 3]);
i += 3;
flag = 1;
break;
case 0x01:
- if (i > (RxCount - 4)) {
+ if (i > (rx_count - 4)) {
dev_dbg(&port->dev,
"Illegal escape seuences in received data\n");
break;
}
- ProcessModemStatus(qt_port, data[i + 3]);
+ process_modem_status(qt_port, data[i + 3]);
i += 3;
flag = 1;
@@ -328,8 +328,8 @@ static void qt_status_change_check(struct urb *urb,
case 0xff:
dev_dbg(&port->dev, "No status sequence.\n");
- ProcessRxChar(port, data[i]);
- ProcessRxChar(port, data[i + 1]);
+ process_rx_char(port, data[i]);
+ process_rx_char(port, data[i + 1]);
i += 2;
break;
@@ -354,7 +354,7 @@ static void qt_read_bulk_callback(struct urb *urb)
int result;
if (urb->status) {
- qt_port->ReadBulkStopped = 1;
+ qt_port->read_bulk_stopped = 1;
dev_dbg(&urb->dev->dev,
"%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
@@ -362,36 +362,36 @@ static void qt_read_bulk_callback(struct urb *urb)
}
dev_dbg(&port->dev,
- "%s - port->RxHolding = %d\n", __func__, qt_port->RxHolding);
+ "%s - port->rx_holding = %d\n", __func__, qt_port->rx_holding);
if (port_paranoia_check(port, __func__) != 0) {
- qt_port->ReadBulkStopped = 1;
+ qt_port->read_bulk_stopped = 1;
return;
}
if (!serial)
return;
- if (qt_port->closePending == 1) {
+ if (qt_port->close_pending == 1) {
/* Were closing , stop reading */
dev_dbg(&port->dev,
- "%s - (qt_port->closepending == 1\n", __func__);
- qt_port->ReadBulkStopped = 1;
+ "%s - (qt_port->close_pending == 1\n", __func__);
+ qt_port->read_bulk_stopped = 1;
return;
}
/*
- * RxHolding is asserted by throttle, if we assert it, we're not
+ * rx_holding is asserted by throttle, if we assert it, we're not
* receiving any more characters and let the box handle the flow
* control
*/
- if (qt_port->RxHolding == 1) {
- qt_port->ReadBulkStopped = 1;
+ if (qt_port->rx_holding == 1) {
+ qt_port->read_bulk_stopped = 1;
return;
}
if (urb->status) {
- qt_port->ReadBulkStopped = 1;
+ qt_port->read_bulk_stopped = 1;
dev_dbg(&port->dev,
"%s - nonzero read bulk status received: %d\n",
@@ -455,10 +455,10 @@ static int qt_get_device(struct usb_serial *serial,
}
/****************************************************************************
- * BoxSetPrebufferLevel
+ * box_set_prebuffer_level
TELLS BOX WHEN TO ASSERT FLOW CONTROL
****************************************************************************/
-static int BoxSetPrebufferLevel(struct usb_serial *serial)
+static int box_set_prebuffer_level(struct usb_serial *serial)
{
int result;
__u16 buffer_length;
@@ -471,10 +471,10 @@ static int BoxSetPrebufferLevel(struct usb_serial *serial)
}
/****************************************************************************
- * BoxSetATC
+ * box_set_atc
TELLS BOX WHEN TO ASSERT automatic transmitter control
****************************************************************************/
-static int BoxSetATC(struct usb_serial *serial, __u16 n_Mode)
+static int box_set_atc(struct usb_serial *serial, __u16 n_mode)
{
int result;
__u16 buffer_length;
@@ -483,7 +483,7 @@ static int BoxSetATC(struct usb_serial *serial, __u16 n_Mode)
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- QT_SET_ATF, 0x40, n_Mode, 0, NULL, 0, 300);
+ QT_SET_ATF, 0x40, n_mode, 0, NULL, 0, 300);
return result;
}
@@ -499,42 +499,42 @@ static int qt_set_device(struct usb_serial *serial,
{
int result;
__u16 length;
- __u16 PortSettings;
+ __u16 port_settings;
- PortSettings = ((__u16) (device_data->portb));
- PortSettings = (PortSettings << 8);
- PortSettings += ((__u16) (device_data->porta));
+ port_settings = ((__u16) (device_data->portb));
+ port_settings = (port_settings << 8);
+ port_settings += ((__u16) (device_data->porta));
length = sizeof(struct qt_get_device_data);
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- QT_SET_GET_DEVICE, 0x40, PortSettings,
+ QT_SET_GET_DEVICE, 0x40, port_settings,
0, NULL, 0, 300);
return result;
}
-static int qt_open_channel(struct usb_serial *serial, __u16 Uart_Number,
- struct qt_open_channel_data *pDeviceData)
+static int qt_open_channel(struct usb_serial *serial, __u16 uart_num,
+ struct qt_open_channel_data *pdevice_data)
{
int result;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_OPEN_CLOSE_CHANNEL,
- USBD_TRANSFER_DIRECTION_IN, 1, Uart_Number,
- pDeviceData,
+ USBD_TRANSFER_DIRECTION_IN, 1, uart_num,
+ pdevice_data,
sizeof(struct qt_open_channel_data), 300);
return result;
}
-static int qt_close_channel(struct usb_serial *serial, __u16 Uart_Number)
+static int qt_close_channel(struct usb_serial *serial, __u16 uart_num)
{
int result;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_OPEN_CLOSE_CHANNEL,
- USBD_TRANSFER_DIRECTION_OUT, 0, Uart_Number,
+ USBD_TRANSFER_DIRECTION_OUT, 0, uart_num,
NULL, 0, 300);
return result;
@@ -542,12 +542,12 @@ static int qt_close_channel(struct usb_serial *serial, __u16 Uart_Number)
}
/****************************************************************************
-* BoxGetRegister
+* box_get_register
* issuse a GET_REGISTER vendor-spcific request on the default control pipe
-* If successful, fills in the pValue with the register value asked for
+* If successful, fills in the p_value with the register value asked for
****************************************************************************/
-static int BoxGetRegister(struct usb_serial *serial, unsigned short Uart_Number,
- unsigned short Register_Num, __u8 *pValue)
+static int box_get_register(struct usb_serial *serial, unsigned short uart_num,
+ unsigned short register_num, __u8 *p_value)
{
int result;
__u16 current_length;
@@ -556,36 +556,36 @@ static int BoxGetRegister(struct usb_serial *serial, unsigned short Uart_Number,
result =
usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- QT_GET_SET_REGISTER, 0xC0, Register_Num,
- Uart_Number, (void *)pValue, sizeof(*pValue), 300);
+ QT_GET_SET_REGISTER, 0xC0, register_num,
+ uart_num, (void *)p_value, sizeof(*p_value), 300);
return result;
}
/****************************************************************************
-* BoxSetRegister
+* box_set_register
* issuse a GET_REGISTER vendor-spcific request on the default control pipe
-* If successful, fills in the pValue with the register value asked for
+* If successful, fills in the p_value with the register value asked for
****************************************************************************/
-static int BoxSetRegister(struct usb_serial *serial, unsigned short Uart_Number,
- unsigned short Register_Num, unsigned short Value)
+static int box_set_register(struct usb_serial *serial, unsigned short uart_num,
+ unsigned short register_num, unsigned short value)
{
int result;
- unsigned short RegAndByte;
+ unsigned short reg_and_byte;
- RegAndByte = Value;
- RegAndByte = RegAndByte << 8;
- RegAndByte = RegAndByte + Register_Num;
+ reg_and_byte = value;
+ reg_and_byte = reg_and_byte << 8;
+ reg_and_byte = reg_and_byte + register_num;
/*
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- QT_GET_SET_REGISTER, 0xC0, Register_Num,
- Uart_Number, NULL, 0, 300);
+ QT_GET_SET_REGISTER, 0xC0, register_num,
+ uart_num, NULL, 0, 300);
*/
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- QT_GET_SET_REGISTER, 0x40, RegAndByte, Uart_Number,
+ QT_GET_SET_REGISTER, 0x40, reg_and_byte, uart_num,
NULL, 0, 300);
return result;
@@ -596,30 +596,30 @@ static int BoxSetRegister(struct usb_serial *serial, unsigned short Uart_Number,
* issues a SET_UART vendor-specific request on the default control pipe
* If successful sets baud rate divisor and LCR value
*/
-static int qt_setuart(struct usb_serial *serial, unsigned short Uart_Number,
- unsigned short default_divisor, unsigned char default_LCR)
+static int qt_setuart(struct usb_serial *serial, unsigned short uart_num,
+ unsigned short default_divisor, unsigned char default_lcr)
{
int result;
- unsigned short UartNumandLCR;
+ unsigned short uart_num_and_lcr;
- UartNumandLCR = (default_LCR << 8) + Uart_Number;
+ uart_num_and_lcr = (default_lcr << 8) + uart_num;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_GET_SET_UART, 0x40, default_divisor,
- UartNumandLCR, NULL, 0, 300);
+ uart_num_and_lcr, NULL, 0, 300);
return result;
}
-static int BoxSetHW_FlowCtrl(struct usb_serial *serial, unsigned int index,
- int bSet)
+static int box_set_hw_flow_ctrl(struct usb_serial *serial, unsigned int index,
+ int b_set)
{
__u8 mcr = 0;
- __u8 msr = 0, MOUT_Value = 0;
+ __u8 msr = 0, mout_value = 0;
unsigned int status;
- if (bSet == 1) {
+ if (b_set == 1) {
/* flow control, box will clear RTS line to prevent remote */
mcr = SERIAL_MCR_RTS;
} /* device from xmitting more chars */
@@ -628,9 +628,9 @@ static int BoxSetHW_FlowCtrl(struct usb_serial *serial, unsigned int index,
mcr = 0;
}
- MOUT_Value = mcr << 8;
+ mout_value = mcr << 8;
- if (bSet == 1) {
+ if (b_set == 1) {
/* flow control, box will inhibit xmit data if CTS line is
* asserted */
msr = SERIAL_MSR_CTS;
@@ -638,34 +638,34 @@ static int BoxSetHW_FlowCtrl(struct usb_serial *serial, unsigned int index,
/* Box will not inhimbe xmit data due to CTS line */
msr = 0;
}
- MOUT_Value |= msr;
+ mout_value |= msr;
status =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- QT_HW_FLOW_CONTROL_MASK, 0x40, MOUT_Value,
+ QT_HW_FLOW_CONTROL_MASK, 0x40, mout_value,
index, NULL, 0, 300);
return status;
}
-static int BoxSetSW_FlowCtrl(struct usb_serial *serial, __u16 index,
+static int box_set_sw_flow_ctrl(struct usb_serial *serial, __u16 index,
unsigned char stop_char, unsigned char start_char)
{
- __u16 nSWflowout;
+ __u16 n_sw_flow_out;
int result;
- nSWflowout = start_char << 8;
- nSWflowout = (unsigned short)stop_char;
+ n_sw_flow_out = start_char << 8;
+ n_sw_flow_out = (unsigned short)stop_char;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- QT_SW_FLOW_CONTROL_MASK, 0x40, nSWflowout,
+ QT_SW_FLOW_CONTROL_MASK, 0x40, n_sw_flow_out,
index, NULL, 0, 300);
return result;
}
-static int BoxDisable_SW_FlowCtrl(struct usb_serial *serial, __u16 index)
+static int box_disable_sw_flow_ctrl(struct usb_serial *serial, __u16 index)
{
int result;
@@ -682,7 +682,7 @@ static int qt_startup(struct usb_serial *serial)
struct device *dev = &serial->dev->dev;
struct usb_serial_port *port;
struct quatech_port *qt_port;
- struct qt_get_device_data DeviceData;
+ struct qt_get_device_data device_data;
int i;
int status;
@@ -704,22 +704,22 @@ static int qt_startup(struct usb_serial *serial)
}
- status = qt_get_device(serial, &DeviceData);
+ status = qt_get_device(serial, &device_data);
if (status < 0)
goto startup_error;
- dev_dbg(dev, "DeviceData.portb = 0x%x\n", DeviceData.portb);
+ dev_dbg(dev, "device_data.portb = 0x%x\n", device_data.portb);
- DeviceData.portb &= ~FULLPWRBIT;
- dev_dbg(dev, "Changing DeviceData.portb to 0x%x\n", DeviceData.portb);
+ device_data.portb &= ~FULLPWRBIT;
+ dev_dbg(dev, "Changing device_data.portb to 0x%x\n", device_data.portb);
- status = qt_set_device(serial, &DeviceData);
+ status = qt_set_device(serial, &device_data);
if (status < 0) {
dev_dbg(dev, "qt_set_device failed\n");
goto startup_error;
}
- status = qt_get_device(serial, &DeviceData);
+ status = qt_get_device(serial, &device_data);
if (status < 0) {
dev_dbg(dev, "qt_get_device failed\n");
goto startup_error;
@@ -734,10 +734,10 @@ static int qt_startup(struct usb_serial *serial)
case QUATECH_HSU100B:
case QUATECH_HSU100C:
case QUATECH_HSU100D:
- DeviceData.porta &= ~(RR_BITS | DUPMODE_BITS);
- DeviceData.porta |= CLKS_X4;
- DeviceData.portb &= ~(LOOPMODE_BITS);
- DeviceData.portb |= RS232_MODE;
+ device_data.porta &= ~(RR_BITS | DUPMODE_BITS);
+ device_data.porta |= CLKS_X4;
+ device_data.portb &= ~(LOOPMODE_BITS);
+ device_data.portb |= RS232_MODE;
break;
case QUATECH_SSU200:
@@ -749,38 +749,38 @@ static int qt_startup(struct usb_serial *serial)
case QUATECH_HSU200B:
case QUATECH_HSU200C:
case QUATECH_HSU200D:
- DeviceData.porta &= ~(RR_BITS | DUPMODE_BITS);
- DeviceData.porta |= CLKS_X4;
- DeviceData.portb &= ~(LOOPMODE_BITS);
- DeviceData.portb |= ALL_LOOPBACK;
+ device_data.porta &= ~(RR_BITS | DUPMODE_BITS);
+ device_data.porta |= CLKS_X4;
+ device_data.portb &= ~(LOOPMODE_BITS);
+ device_data.portb |= ALL_LOOPBACK;
break;
default:
- DeviceData.porta &= ~(RR_BITS | DUPMODE_BITS);
- DeviceData.porta |= CLKS_X4;
- DeviceData.portb &= ~(LOOPMODE_BITS);
- DeviceData.portb |= RS232_MODE;
+ device_data.porta &= ~(RR_BITS | DUPMODE_BITS);
+ device_data.porta |= CLKS_X4;
+ device_data.portb &= ~(LOOPMODE_BITS);
+ device_data.portb |= RS232_MODE;
break;
}
- status = BoxSetPrebufferLevel(serial); /* sets to default value */
+ status = box_set_prebuffer_level(serial); /* sets to default value */
if (status < 0) {
- dev_dbg(dev, "BoxSetPrebufferLevel failed\n");
+ dev_dbg(dev, "box_set_prebuffer_level failed\n");
goto startup_error;
}
- status = BoxSetATC(serial, ATC_DISABLED);
+ status = box_set_atc(serial, ATC_DISABLED);
if (status < 0) {
- dev_dbg(dev, "BoxSetATC failed\n");
+ dev_dbg(dev, "box_set_atc failed\n");
goto startup_error;
}
- dev_dbg(dev, "DeviceData.portb = 0x%x\n", DeviceData.portb);
+ dev_dbg(dev, "device_data.portb = 0x%x\n", device_data.portb);
- DeviceData.portb |= NEXT_BOARD_POWER_BIT;
- dev_dbg(dev, "Changing DeviceData.portb to 0x%x\n", DeviceData.portb);
+ device_data.portb |= NEXT_BOARD_POWER_BIT;
+ dev_dbg(dev, "Changing device_data.portb to 0x%x\n", device_data.portb);
- status = qt_set_device(serial, &DeviceData);
+ status = qt_set_device(serial, &device_data);
if (status < 0) {
dev_dbg(dev, "qt_set_device failed\n");
goto startup_error;
@@ -848,7 +848,7 @@ static int qt_open(struct tty_struct *tty,
struct usb_serial *serial;
struct quatech_port *quatech_port;
struct quatech_port *port0;
- struct qt_open_channel_data ChannelData;
+ struct qt_open_channel_data channel_data;
int result;
@@ -870,10 +870,10 @@ static int qt_open(struct tty_struct *tty,
usb_clear_halt(serial->dev, port->read_urb->pipe);
port0->open_ports++;
- result = qt_get_device(serial, &port0->DeviceData);
+ result = qt_get_device(serial, &port0->device_data);
/* Port specific setups */
- result = qt_open_channel(serial, port->number, &ChannelData);
+ result = qt_open_channel(serial, port->port_number, &channel_data);
if (result < 0) {
dev_dbg(&port->dev, "qt_open_channel failed\n");
return result;
@@ -881,14 +881,14 @@ static int qt_open(struct tty_struct *tty,
dev_dbg(&port->dev, "qt_open_channel completed.\n");
/* FIXME: are these needed? Does it even do anything useful? */
- quatech_port->shadowLSR = ChannelData.line_status &
+ quatech_port->shadow_lsr = channel_data.line_status &
(SERIAL_LSR_OE | SERIAL_LSR_PE | SERIAL_LSR_FE | SERIAL_LSR_BI);
- quatech_port->shadowMSR = ChannelData.modem_status &
+ quatech_port->shadow_msr = channel_data.modem_status &
(SERIAL_MSR_CTS | SERIAL_MSR_DSR | SERIAL_MSR_RI | SERIAL_MSR_CD);
/* Set Baud rate to default and turn off (default)flow control here */
- result = qt_setuart(serial, port->number, DEFAULT_DIVISOR, DEFAULT_LCR);
+ result = qt_setuart(serial, port->port_number, DEFAULT_DIVISOR, DEFAULT_LCR);
if (result < 0) {
dev_dbg(&port->dev, "qt_setuart failed\n");
return result;
@@ -906,8 +906,7 @@ static int qt_open(struct tty_struct *tty,
qt_submit_urb_from_open(serial, port);
}
- dev_dbg(&port->dev, "port number is %d\n", port->number);
- dev_dbg(&port->dev, "serial number is %d\n", port->serial->minor);
+ dev_dbg(&port->dev, "minor number is %d\n", port->minor);
dev_dbg(&port->dev,
"Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
dev_dbg(&port->dev,
@@ -1003,7 +1002,7 @@ static void qt_close(struct usb_serial_port *port)
status = 0;
tty = tty_port_tty_get(&port->port);
- index = tty->index - serial->minor;
+ index = port->port_number;
qt_port = qt_get_port_private(port);
port0 = qt_get_port_private(serial->port[0]);
@@ -1022,14 +1021,11 @@ static void qt_close(struct usb_serial_port *port)
/* Close uart channel */
status = qt_close_channel(serial, index);
if (status < 0)
- dev_dbg(&port->dev,
- "%s - port %d qt_close_channel failed.\n",
- __func__, port->number);
+ dev_dbg(&port->dev, "%s - qt_close_channel failed.\n", __func__);
port0->open_ports--;
- dev_dbg(&port->dev, "qt_num_open_ports in close%d:in port%d\n",
- port0->open_ports, port->number);
+ dev_dbg(&port->dev, "qt_num_open_ports in close%d\n", port0->open_ports);
if (port0->open_ports == 0) {
if (serial->port[0]->interrupt_in_urb) {
@@ -1133,12 +1129,11 @@ static int qt_ioctl(struct tty_struct *tty,
{
struct usb_serial_port *port = tty->driver_data;
struct quatech_port *qt_port = qt_get_port_private(port);
- struct usb_serial *serial = get_usb_serial(port, __func__);
unsigned int index;
dev_dbg(&port->dev, "%s cmd 0x%04x\n", __func__, cmd);
- index = tty->index - serial->minor;
+ index = port->port_number;
if (cmd == TIOCMIWAIT) {
while (qt_port != NULL) {
@@ -1169,8 +1164,7 @@ static int qt_ioctl(struct tty_struct *tty,
return 0;
}
- dev_dbg(&port->dev, "%s -No ioctl for that one. port = %d\n",
- __func__, port->number);
+ dev_dbg(&port->dev, "%s -No ioctl for that one.\n", __func__);
return -ENOIOCTLCMD;
}
@@ -1179,43 +1173,43 @@ static void qt_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
struct ktermios *termios = &tty->termios;
- unsigned char new_LCR = 0;
+ unsigned char new_lcr = 0;
unsigned int cflag = termios->c_cflag;
unsigned int index;
int baud, divisor, remainder;
int status;
- index = tty->index - port->serial->minor;
+ index = port->port_number;
switch (cflag & CSIZE) {
case CS5:
- new_LCR |= SERIAL_5_DATA;
+ new_lcr |= SERIAL_5_DATA;
break;
case CS6:
- new_LCR |= SERIAL_6_DATA;
+ new_lcr |= SERIAL_6_DATA;
break;
case CS7:
- new_LCR |= SERIAL_7_DATA;
+ new_lcr |= SERIAL_7_DATA;
break;
default:
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= CS8;
case CS8:
- new_LCR |= SERIAL_8_DATA;
+ new_lcr |= SERIAL_8_DATA;
break;
}
/* Parity stuff */
if (cflag & PARENB) {
if (cflag & PARODD)
- new_LCR |= SERIAL_ODD_PARITY;
+ new_lcr |= SERIAL_ODD_PARITY;
else
- new_LCR |= SERIAL_EVEN_PARITY;
+ new_lcr |= SERIAL_EVEN_PARITY;
}
if (cflag & CSTOPB)
- new_LCR |= SERIAL_TWO_STOPB;
+ new_lcr |= SERIAL_TWO_STOPB;
else
- new_LCR |= SERIAL_ONE_STOPB;
+ new_lcr |= SERIAL_ONE_STOPB;
dev_dbg(&port->dev, "%s - 4\n", __func__);
@@ -1237,7 +1231,7 @@ static void qt_set_termios(struct tty_struct *tty,
* Set Baud rate to default and turn off (default)flow control here
*/
status =
- qt_setuart(port->serial, index, (unsigned short)divisor, new_LCR);
+ qt_setuart(port->serial, index, (unsigned short)divisor, new_lcr);
if (status < 0) {
dev_dbg(&port->dev, "qt_setuart failed\n");
return;
@@ -1245,25 +1239,23 @@ static void qt_set_termios(struct tty_struct *tty,
/* Now determine flow control */
if (cflag & CRTSCTS) {
- dev_dbg(&port->dev, "%s - Enabling HW flow control port %d\n",
- __func__, port->number);
+ dev_dbg(&port->dev, "%s - Enabling HW flow control\n", __func__);
/* Enable RTS/CTS flow control */
- status = BoxSetHW_FlowCtrl(port->serial, index, 1);
+ status = box_set_hw_flow_ctrl(port->serial, index, 1);
if (status < 0) {
- dev_dbg(&port->dev, "BoxSetHW_FlowCtrl failed\n");
+ dev_dbg(&port->dev, "box_set_hw_flow_ctrl failed\n");
return;
}
} else {
/* Disable RTS/CTS flow control */
dev_dbg(&port->dev,
- "%s - disabling HW flow control port %d\n",
- __func__, port->number);
+ "%s - disabling HW flow control\n", __func__);
- status = BoxSetHW_FlowCtrl(port->serial, index, 0);
+ status = box_set_hw_flow_ctrl(port->serial, index, 0);
if (status < 0) {
- dev_dbg(&port->dev, "BoxSetHW_FlowCtrl failed\n");
+ dev_dbg(&port->dev, "box_set_hw_flow_ctrl failed\n");
return;
}
@@ -1275,18 +1267,18 @@ static void qt_set_termios(struct tty_struct *tty,
unsigned char stop_char = STOP_CHAR(tty);
unsigned char start_char = START_CHAR(tty);
status =
- BoxSetSW_FlowCtrl(port->serial, index, stop_char,
+ box_set_sw_flow_ctrl(port->serial, index, stop_char,
start_char);
if (status < 0)
dev_dbg(&port->dev,
- "BoxSetSW_FlowCtrl (enabled) failed\n");
+ "box_set_sw_flow_ctrl (enabled) failed\n");
} else {
/* disable SW flow control */
- status = BoxDisable_SW_FlowCtrl(port->serial, index);
+ status = box_disable_sw_flow_ctrl(port->serial, index);
if (status < 0)
dev_dbg(&port->dev,
- "BoxSetSW_FlowCtrl (diabling) failed\n");
+ "box_set_sw_flow_ctrl (diabling) failed\n");
}
termios->c_cflag &= ~CMSPAR;
@@ -1303,7 +1295,7 @@ static void qt_break(struct tty_struct *tty, int break_state)
u16 index, onoff;
unsigned int result;
- index = tty->index - serial->minor;
+ index = port->port_number;
qt_port = qt_get_port_private(port);
@@ -1332,12 +1324,12 @@ static inline int qt_real_tiocmget(struct tty_struct *tty,
int status;
unsigned int index;
- index = tty->index - serial->minor;
+ index = port->port_number;
status =
- BoxGetRegister(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
+ box_get_register(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
if (status >= 0) {
status =
- BoxGetRegister(port->serial, index,
+ box_get_register(port->serial, index,
MODEM_STATUS_REGISTER, &msr);
}
@@ -1371,9 +1363,9 @@ static inline int qt_real_tiocmset(struct tty_struct *tty,
int status;
unsigned int index;
- index = tty->index - serial->minor;
+ index = port->port_number;
status =
- BoxGetRegister(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
+ box_get_register(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
if (status < 0)
return -ESPIPE;
@@ -1390,7 +1382,7 @@ static inline int qt_real_tiocmset(struct tty_struct *tty,
mcr |= SERIAL_MCR_LOOP;
status =
- BoxSetRegister(port->serial, index, MODEM_CONTROL_REGISTER, mcr);
+ box_set_register(port->serial, index, MODEM_CONTROL_REGISTER, mcr);
if (status < 0)
return -ESPIPE;
else
@@ -1445,7 +1437,7 @@ static void qt_throttle(struct tty_struct *tty)
mutex_lock(&qt_port->lock);
/* pass on to the driver specific version of this function */
- qt_port->RxHolding = 1;
+ qt_port->rx_holding = 1;
mutex_unlock(&qt_port->lock);
}
@@ -1484,14 +1476,14 @@ static void qt_unthrottle(struct tty_struct *tty)
mutex_lock(&qt_port->lock);
- if (qt_port->RxHolding == 1) {
- dev_dbg(&port->dev, "%s -qt_port->RxHolding == 1\n", __func__);
+ if (qt_port->rx_holding == 1) {
+ dev_dbg(&port->dev, "%s -qt_port->rx_holding == 1\n", __func__);
- qt_port->RxHolding = 0;
- dev_dbg(&port->dev, "%s - qt_port->RxHolding = 0\n", __func__);
+ qt_port->rx_holding = 0;
+ dev_dbg(&port->dev, "%s - qt_port->rx_holding = 0\n", __func__);
/* if we have a bulk endpoint, start it up */
- if ((serial->num_bulk_in) && (qt_port->ReadBulkStopped == 1))
+ if ((serial->num_bulk_in) && (qt_port->read_bulk_stopped == 1))
qt_submit_urb_from_unthrottle(port, serial);
}
mutex_unlock(&qt_port->lock);
diff --git a/drivers/staging/silicom/Kconfig b/drivers/staging/silicom/Kconfig
index eda2e7d..6651bd8 100644
--- a/drivers/staging/silicom/Kconfig
+++ b/drivers/staging/silicom/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_SILICOM
bool "Silicom devices"
default y
- depends on PCI
+ depends on PCI && NETDEVICES
---help---
If you have a network card (Ethernet) belonging to this class,
say Y.
@@ -19,7 +19,7 @@ if NET_VENDOR_SILICOM
config SBYPASS
tristate "Silicom BypassCTL library support"
- depends on PCI && NET
+ depends on PCI
depends on m
---help---
If you have a network (Ethernet) controller of this type, say Y
@@ -29,10 +29,9 @@ config SBYPASS
config BPCTL
tristate "Silicom BypassCTL net support"
- depends on PCI && NET
+ depends on PCI
depends on m
select SBYPASS
- select NET_CORE
select MII
---help---
If you have a network (Ethernet) controller of this type, say Y
diff --git a/drivers/staging/silicom/bp_mod.h b/drivers/staging/silicom/bp_mod.h
index b8275f5..cfa1f43 100644
--- a/drivers/staging/silicom/bp_mod.h
+++ b/drivers/staging/silicom/bp_mod.h
@@ -15,8 +15,6 @@
#define BP_MOD_H
#include "bits.h"
-#define EXPORT_SYMBOL_NOVERS EXPORT_SYMBOL
-
#define usec_delay(x) udelay(x)
#ifndef msec_delay_bp
#define msec_delay_bp(x) \
diff --git a/drivers/staging/silicom/bpctl_mod.c b/drivers/staging/silicom/bpctl_mod.c
index b7e570c..495272d 100644
--- a/drivers/staging/silicom/bpctl_mod.c
+++ b/drivers/staging/silicom/bpctl_mod.c
@@ -35,7 +35,7 @@
#define BP_MOD_DESCR "Silicom Bypass-SD Control driver"
#define BP_SYNC_FLAG 1
-static int major_num = 0;
+static int major_num;
MODULE_AUTHOR("Anna Lukin, annal@silicom.co.il");
MODULE_LICENSE("GPL");
@@ -43,28 +43,23 @@ MODULE_DESCRIPTION(BP_MOD_DESCR);
MODULE_VERSION(BP_MOD_VER);
spinlock_t bpvm_lock;
-#define lock_bpctl() \
-if (down_interruptible(&bpctl_sema)) { \
- return -ERESTARTSYS; \
-} \
-
-#define unlock_bpctl() \
+#define unlock_bpctl() \
up(&bpctl_sema);
/* Media Types */
-typedef enum {
- bp_copper = 0,
- bp_fiber,
- bp_cx4,
- bp_none,
-} bp_media_type;
+enum bp_media_type {
+ BP_COPPER = 0,
+ BP_FIBER,
+ BP_CX4,
+ BP_NONE,
+};
struct bypass_pfs_sd {
char dir_name[32];
struct proc_dir_entry *bypass_entry;
};
-typedef struct _bpctl_dev {
+struct bpctl_dev {
char *name;
char *desc;
struct pci_dev *pdev; /* PCI device */
@@ -89,7 +84,7 @@ typedef struct _bpctl_dev {
uint32_t reset_time;
uint8_t bp_status_un;
atomic_t wdt_busy;
- bp_media_type media_type;
+ enum bp_media_type media_type;
int bp_tpl_flag;
struct timer_list bp_tpl_timer;
spinlock_t bypass_wr_lock;
@@ -107,34 +102,34 @@ typedef struct _bpctl_dev {
char *bp_tx_data;
struct bypass_pfs_sd bypass_pfs_set;
-} bpctl_dev_t;
+};
-static bpctl_dev_t *bpctl_dev_arr;
+static struct bpctl_dev *bpctl_dev_arr;
static struct semaphore bpctl_sema;
-static int device_num = 0;
+static int device_num;
static int get_dev_idx(int ifindex);
-static bpctl_dev_t *get_master_port_fn(bpctl_dev_t *pbpctl_dev);
-static int disc_status(bpctl_dev_t *pbpctl_dev);
-static int bypass_status(bpctl_dev_t *pbpctl_dev);
-static int wdt_timer(bpctl_dev_t *pbpctl_dev, int *time_left);
-static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev);
+static struct bpctl_dev *get_master_port_fn(struct bpctl_dev *pbpctl_dev);
+static int disc_status(struct bpctl_dev *pbpctl_dev);
+static int bypass_status(struct bpctl_dev *pbpctl_dev);
+static int wdt_timer(struct bpctl_dev *pbpctl_dev, int *time_left);
+static struct bpctl_dev *get_status_port_fn(struct bpctl_dev *pbpctl_dev);
static void if_scan_init(void);
-int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block);
-int bypass_proc_remove_dev_sd(bpctl_dev_t *pbp_device_block);
+int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block);
+int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block);
int bp_proc_create(void);
-int is_bypass_fn(bpctl_dev_t *pbpctl_dev);
+int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
int get_dev_idx_bsf(int bus, int slot, int func);
static unsigned long str_to_hex(char *p);
static int bp_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
- static bpctl_dev_t *pbpctl_dev = NULL, *pbpctl_dev_m = NULL;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ static struct bpctl_dev *pbpctl_dev, *pbpctl_dev_m;
int dev_num = 0, ret = 0, ret_d = 0, time_left = 0;
/* printk("BP_PROC_SUPPORT event =%d %s %d\n", event,dev->name, dev->ifindex ); */
/* return NOTIFY_DONE; */
@@ -165,7 +160,8 @@ static int bp_device_event(struct notifier_block *unused,
memcpy(&cbuf, drvinfo.bus_info, 32);
buf = &cbuf[0];
- while (*buf++ != ':') ;
+ while (*buf++ != ':')
+ ;
for (i = 0; i < 10; i++, buf++) {
if (*buf == ':')
break;
@@ -288,17 +284,17 @@ static struct notifier_block bp_notifier_block = {
.notifier_call = bp_device_event,
};
-int is_bypass_fn(bpctl_dev_t *pbpctl_dev);
-int wdt_time_left(bpctl_dev_t *pbpctl_dev);
+int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
+int wdt_time_left(struct bpctl_dev *pbpctl_dev);
-static void write_pulse(bpctl_dev_t *pbpctl_dev,
+static void write_pulse(struct bpctl_dev *pbpctl_dev,
unsigned int ctrl_ext,
unsigned char value, unsigned char len)
{
unsigned char ctrl_val = 0;
unsigned int i = len;
unsigned int ctrl = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
if (pbpctl_dev->bp_i80)
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
@@ -306,7 +302,8 @@ static void write_pulse(bpctl_dev_t *pbpctl_dev,
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
if (pbpctl_dev->bp_10g9) {
- if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_c = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_c)
return;
ctrl = BP10G_READ_REG(pbpctl_dev_c, ESDP);
}
@@ -593,20 +590,21 @@ static void write_pulse(bpctl_dev_t *pbpctl_dev,
}
}
-static int read_pulse(bpctl_dev_t *pbpctl_dev, unsigned int ctrl_ext,
+static int read_pulse(struct bpctl_dev *pbpctl_dev, unsigned int ctrl_ext,
unsigned char len)
{
unsigned char ctrl_val = 0;
unsigned int i = len;
unsigned int ctrl = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
if (pbpctl_dev->bp_i80)
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
if (pbpctl_dev->bp_540)
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
if (pbpctl_dev->bp_10g9) {
- if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_c = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_c)
return -1;
ctrl = BP10G_READ_REG(pbpctl_dev_c, ESDP);
}
@@ -720,16 +718,15 @@ static int read_pulse(bpctl_dev_t *pbpctl_dev, unsigned int ctrl_ext,
BP10G_MDIO_DATA_OUT));
}
- if (pbpctl_dev->bp_10g9) {
- ctrl_ext = BP10G_READ_REG(pbpctl_dev, I2CCTL);
- } else if ((pbpctl_dev->bp_fiber5) || (pbpctl_dev->bp_i80)) {
+ if (pbpctl_dev->bp_10g9)
+ ctrl_ext = BP10G_READ_REG(pbpctl_dev, I2CCTL);
+ else if ((pbpctl_dev->bp_fiber5) || (pbpctl_dev->bp_i80))
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL);
- } else if (pbpctl_dev->bp_540) {
+ else if (pbpctl_dev->bp_540)
ctrl_ext = BP10G_READ_REG(pbpctl_dev, ESDP);
- } else if (pbpctl_dev->bp_10gb)
+ else if (pbpctl_dev->bp_10gb)
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
-
else if (!pbpctl_dev->bp_10g)
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
else
@@ -768,14 +765,15 @@ static int read_pulse(bpctl_dev_t *pbpctl_dev, unsigned int ctrl_ext,
return ctrl_val;
}
-static void write_reg(bpctl_dev_t *pbpctl_dev, unsigned char value,
+static void write_reg(struct bpctl_dev *pbpctl_dev, unsigned char value,
unsigned char addr)
{
uint32_t ctrl_ext = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
unsigned long flags;
if (pbpctl_dev->bp_10g9) {
- if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_c = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_c)
return;
}
if ((pbpctl_dev->wdt_status == WDT_STATUS_EN) &&
@@ -936,15 +934,15 @@ static void write_reg(bpctl_dev_t *pbpctl_dev, unsigned char value,
}
-static void write_data(bpctl_dev_t *pbpctl_dev, unsigned char value)
+static void write_data(struct bpctl_dev *pbpctl_dev, unsigned char value)
{
write_reg(pbpctl_dev, value, CMND_REG_ADDR);
}
-static int read_reg(bpctl_dev_t *pbpctl_dev, unsigned char addr)
+static int read_reg(struct bpctl_dev *pbpctl_dev, unsigned char addr)
{
uint32_t ctrl_ext = 0, ctrl = 0, ctrl_value = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
#ifdef BP_SYNC_FLAG
unsigned long flags;
@@ -953,7 +951,8 @@ static int read_reg(bpctl_dev_t *pbpctl_dev, unsigned char addr)
atomic_set(&pbpctl_dev->wdt_busy, 1);
#endif
if (pbpctl_dev->bp_10g9) {
- if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_c = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_c)
return -1;
}
@@ -1209,10 +1208,10 @@ static int read_reg(bpctl_dev_t *pbpctl_dev, unsigned char addr)
return ctrl_value;
}
-static int wdt_pulse(bpctl_dev_t *pbpctl_dev)
+static int wdt_pulse(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
#ifdef BP_SYNC_FLAG
unsigned long flags;
@@ -1224,7 +1223,8 @@ static int wdt_pulse(bpctl_dev_t *pbpctl_dev)
return -1;
#endif
if (pbpctl_dev->bp_10g9) {
- if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_c = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_c)
return -1;
}
@@ -1414,8 +1414,8 @@ static int wdt_pulse(bpctl_dev_t *pbpctl_dev)
(ctrl_ext &
~(BP10G_MCLK_DATA_OUT | BP10G_MDIO_DATA_OUT)));
}
- if ((pbpctl_dev->wdt_status == WDT_STATUS_EN) /*&&
- (pbpctl_dev->bp_ext_ver<PXG4BPFI_VER) */ )
+ if ((pbpctl_dev->wdt_status == WDT_STATUS_EN))
+ /*&& (pbpctl_dev->bp_ext_ver<PXG4BPFI_VER) */
pbpctl_dev->bypass_wdt_on_time = jiffies;
#ifdef BP_SYNC_FLAG
spin_unlock_irqrestore(&pbpctl_dev->bypass_wr_lock, flags);
@@ -1424,7 +1424,7 @@ static int wdt_pulse(bpctl_dev_t *pbpctl_dev)
return 0;
}
-static void data_pulse(bpctl_dev_t *pbpctl_dev, unsigned char value)
+static void data_pulse(struct bpctl_dev *pbpctl_dev, unsigned char value)
{
uint32_t ctrl_ext = 0;
@@ -1490,7 +1490,7 @@ static void data_pulse(bpctl_dev_t *pbpctl_dev, unsigned char value)
}
-static int send_wdt_pulse(bpctl_dev_t *pbpctl_dev)
+static int send_wdt_pulse(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1524,7 +1524,7 @@ static int send_wdt_pulse(bpctl_dev_t *pbpctl_dev)
return 0;
}
-void send_bypass_clear_pulse(bpctl_dev_t *pbpctl_dev, unsigned int value)
+void send_bypass_clear_pulse(struct bpctl_dev *pbpctl_dev, unsigned int value)
{
uint32_t ctrl_ext = 0;
@@ -1550,7 +1550,7 @@ void send_bypass_clear_pulse(bpctl_dev_t *pbpctl_dev, unsigned int value)
/* #endif OLD_FW */
#ifdef BYPASS_DEBUG
-int pulse_set_fn(bpctl_dev_t *pbpctl_dev, unsigned int counter)
+int pulse_set_fn(struct bpctl_dev *pbpctl_dev, unsigned int counter)
{
uint32_t ctrl_ext = 0;
@@ -1578,7 +1578,7 @@ int pulse_set_fn(bpctl_dev_t *pbpctl_dev, unsigned int counter)
return 0;
}
-int zero_set_fn(bpctl_dev_t *pbpctl_dev)
+int zero_set_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl_value = 0;
if (!pbpctl_dev)
@@ -1603,7 +1603,7 @@ int zero_set_fn(bpctl_dev_t *pbpctl_dev)
return ctrl_value;
}
-int pulse_get2_fn(bpctl_dev_t *pbpctl_dev)
+int pulse_get2_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl_value = 0;
if (!pbpctl_dev)
@@ -1618,7 +1618,7 @@ int pulse_get2_fn(bpctl_dev_t *pbpctl_dev)
return ctrl_value;
}
-int pulse_get1_fn(bpctl_dev_t *pbpctl_dev)
+int pulse_get1_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl_value = 0;
if (!pbpctl_dev)
@@ -1635,7 +1635,7 @@ int pulse_get1_fn(bpctl_dev_t *pbpctl_dev)
return ctrl_value;
}
-int gpio6_set_fn(bpctl_dev_t *pbpctl_dev)
+int gpio6_set_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1646,7 +1646,7 @@ int gpio6_set_fn(bpctl_dev_t *pbpctl_dev)
return 0;
}
-int gpio7_set_fn(bpctl_dev_t *pbpctl_dev)
+int gpio7_set_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1657,7 +1657,7 @@ int gpio7_set_fn(bpctl_dev_t *pbpctl_dev)
return 0;
}
-int gpio7_clear_fn(bpctl_dev_t *pbpctl_dev)
+int gpio7_clear_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1668,7 +1668,7 @@ int gpio7_clear_fn(bpctl_dev_t *pbpctl_dev)
return 0;
}
-int gpio6_clear_fn(bpctl_dev_t *pbpctl_dev)
+int gpio6_clear_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1680,9 +1680,9 @@ int gpio6_clear_fn(bpctl_dev_t *pbpctl_dev)
}
#endif /*BYPASS_DEBUG */
-static bpctl_dev_t *lookup_port(bpctl_dev_t *dev)
+static struct bpctl_dev *lookup_port(struct bpctl_dev *dev)
{
- bpctl_dev_t *p;
+ struct bpctl_dev *p;
int n;
for (n = 0, p = bpctl_dev_arr; n < device_num && p->pdev; n++) {
if (p->bus == dev->bus
@@ -1693,7 +1693,7 @@ static bpctl_dev_t *lookup_port(bpctl_dev_t *dev)
return NULL;
}
-static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev)
+static struct bpctl_dev *get_status_port_fn(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev) {
if (pbpctl_dev->func == 0 || pbpctl_dev->func == 2)
@@ -1702,7 +1702,7 @@ static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev)
return NULL;
}
-static bpctl_dev_t *get_master_port_fn(bpctl_dev_t *pbpctl_dev)
+static struct bpctl_dev *get_master_port_fn(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev) {
if (pbpctl_dev->func == 1 || pbpctl_dev->func == 3)
@@ -1715,7 +1715,7 @@ static bpctl_dev_t *get_master_port_fn(bpctl_dev_t *pbpctl_dev)
/**************INTEL API***************/
/**************************************/
-static void write_data_port_int(bpctl_dev_t *pbpctl_dev,
+static void write_data_port_int(struct bpctl_dev *pbpctl_dev,
unsigned char ctrl_value)
{
uint32_t value;
@@ -1740,11 +1740,12 @@ static void write_data_port_int(bpctl_dev_t *pbpctl_dev,
}
-static int write_data_int(bpctl_dev_t *pbpctl_dev, unsigned char value)
+static int write_data_int(struct bpctl_dev *pbpctl_dev, unsigned char value)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
- if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
return -1;
atomic_set(&pbpctl_dev->wdt_busy, 1);
write_data_port_int(pbpctl_dev, value & 0x3);
@@ -1754,7 +1755,7 @@ static int write_data_int(bpctl_dev_t *pbpctl_dev, unsigned char value)
return 0;
}
-static int wdt_pulse_int(bpctl_dev_t *pbpctl_dev)
+static int wdt_pulse_int(struct bpctl_dev *pbpctl_dev)
{
if ((atomic_read(&pbpctl_dev->wdt_busy)) == 1)
@@ -1778,7 +1779,7 @@ static int wdt_pulse_int(bpctl_dev_t *pbpctl_dev)
/*************************************/
/* CMND_ON 0x4 (100)*/
-int cmnd_on(bpctl_dev_t *pbpctl_dev)
+int cmnd_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1795,7 +1796,7 @@ int cmnd_on(bpctl_dev_t *pbpctl_dev)
}
/* CMND_OFF 0x2 (10)*/
-int cmnd_off(bpctl_dev_t *pbpctl_dev)
+int cmnd_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1808,12 +1809,12 @@ int cmnd_off(bpctl_dev_t *pbpctl_dev)
else
data_pulse(pbpctl_dev, CMND_OFF);
ret = 0;
- };
+ }
return ret;
}
/* BYPASS_ON (0xa)*/
-int bypass_on(bpctl_dev_t *pbpctl_dev)
+int bypass_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1829,12 +1830,12 @@ int bypass_on(bpctl_dev_t *pbpctl_dev)
} else
data_pulse(pbpctl_dev, BYPASS_ON);
ret = 0;
- };
+ }
return ret;
}
/* BYPASS_OFF (0x8 111)*/
-int bypass_off(bpctl_dev_t *pbpctl_dev)
+int bypass_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1857,7 +1858,7 @@ int bypass_off(bpctl_dev_t *pbpctl_dev)
}
/* TAP_OFF (0x9)*/
-int tap_off(bpctl_dev_t *pbpctl_dev)
+int tap_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
@@ -1865,12 +1866,12 @@ int tap_off(bpctl_dev_t *pbpctl_dev)
write_data(pbpctl_dev, TAP_OFF);
msec_delay_bp(LATCH_DELAY);
ret = 0;
- };
+ }
return ret;
}
/* TAP_ON (0xb)*/
-int tap_on(bpctl_dev_t *pbpctl_dev)
+int tap_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
@@ -1878,12 +1879,12 @@ int tap_on(bpctl_dev_t *pbpctl_dev)
write_data(pbpctl_dev, TAP_ON);
msec_delay_bp(LATCH_DELAY);
ret = 0;
- };
+ }
return ret;
}
/* DISC_OFF (0x9)*/
-int disc_off(bpctl_dev_t *pbpctl_dev)
+int disc_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
@@ -1895,7 +1896,7 @@ int disc_off(bpctl_dev_t *pbpctl_dev)
}
/* DISC_ON (0xb)*/
-int disc_on(bpctl_dev_t *pbpctl_dev)
+int disc_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
@@ -1907,10 +1908,10 @@ int disc_on(bpctl_dev_t *pbpctl_dev)
}
/* DISC_PORT_ON */
-int disc_port_on(bpctl_dev_t *pbpctl_dev)
+int disc_port_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
@@ -1920,13 +1921,10 @@ int disc_port_on(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
- if (is_bypass_fn(pbpctl_dev) == 1) {
-
+ if (is_bypass_fn(pbpctl_dev) == 1)
write_data(pbpctl_dev_m, TX_DISA);
- } else {
-
+ else
write_data(pbpctl_dev_m, TX_DISB);
- }
msec_delay_bp(LATCH_DELAY);
@@ -1935,10 +1933,10 @@ int disc_port_on(bpctl_dev_t *pbpctl_dev)
}
/* DISC_PORT_OFF */
-int disc_port_off(bpctl_dev_t *pbpctl_dev)
+int disc_port_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
@@ -1960,12 +1958,13 @@ int disc_port_off(bpctl_dev_t *pbpctl_dev)
}
/*TWO_PORT_LINK_HW_EN (0xe)*/
-int tpl_hw_on(bpctl_dev_t *pbpctl_dev)
+int tpl_hw_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
- if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
return BP_NOT_CAP;
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX) {
@@ -1987,12 +1986,13 @@ int tpl_hw_on(bpctl_dev_t *pbpctl_dev)
}
/*TWO_PORT_LINK_HW_DIS (0xc)*/
-int tpl_hw_off(bpctl_dev_t *pbpctl_dev)
+int tpl_hw_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
- if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
return BP_NOT_CAP;
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX) {
cmnd_on(pbpctl_dev);
@@ -2012,20 +2012,20 @@ int tpl_hw_off(bpctl_dev_t *pbpctl_dev)
}
/* WDT_OFF (0x6 110)*/
-int wdt_off(bpctl_dev_t *pbpctl_dev)
+int wdt_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
- if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
+ if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
bypass_off(pbpctl_dev);
- } else if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER)
+ else if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER)
write_data(pbpctl_dev, WDT_OFF);
else
data_pulse(pbpctl_dev, WDT_OFF);
pbpctl_dev->wdt_status = WDT_STATUS_DIS;
ret = 0;
- };
+ }
return ret;
}
@@ -2035,7 +2035,7 @@ int wdt_off(bpctl_dev_t *pbpctl_dev)
static unsigned int
wdt_val_array[] = { 1000, 1500, 2000, 3000, 4000, 8000, 16000, 32000, 0 };
-int wdt_on(bpctl_dev_t *pbpctl_dev, unsigned int timeout)
+int wdt_on(struct bpctl_dev *pbpctl_dev, unsigned int timeout)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -2087,7 +2087,7 @@ int wdt_on(bpctl_dev_t *pbpctl_dev, unsigned int timeout)
return BP_NOT_CAP;
}
-void bp75_put_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
+void bp75_put_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
{
u32 swsm;
@@ -2098,7 +2098,7 @@ void bp75_put_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
BPCTL_WRITE_REG(pbpctl_dev, SWSM, swsm);
}
-s32 bp75_get_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
+s32 bp75_get_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
{
u32 swsm;
s32 ret_val = 0;
@@ -2146,16 +2146,18 @@ s32 bp75_get_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
return ret_val;
}
-static void bp75_release_phy(bpctl_dev_t *pbpctl_dev)
+static void bp75_release_phy(struct bpctl_dev *pbpctl_dev)
{
u16 mask = BPCTLI_SWFW_PHY0_SM;
u32 swfw_sync;
+ s32 ret_val;
if ((pbpctl_dev->func == 1) || (pbpctl_dev->func == 3))
mask = BPCTLI_SWFW_PHY1_SM;
- while (bp75_get_hw_semaphore_generic(pbpctl_dev) != 0) ;
- /* Empty */
+ do
+ ret_val = bp75_get_hw_semaphore_generic(pbpctl_dev);
+ while (ret_val != 0);
swfw_sync = BPCTL_READ_REG(pbpctl_dev, SW_FW_SYNC);
swfw_sync &= ~mask;
@@ -2164,7 +2166,7 @@ static void bp75_release_phy(bpctl_dev_t *pbpctl_dev)
bp75_put_hw_semaphore_generic(pbpctl_dev);
}
-static s32 bp75_acquire_phy(bpctl_dev_t *pbpctl_dev)
+static s32 bp75_acquire_phy(struct bpctl_dev *pbpctl_dev)
{
u16 mask = BPCTLI_SWFW_PHY0_SM;
u32 swfw_sync;
@@ -2210,7 +2212,7 @@ static s32 bp75_acquire_phy(bpctl_dev_t *pbpctl_dev)
return ret_val;
}
-s32 bp75_read_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
+s32 bp75_read_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 *data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
@@ -2243,7 +2245,7 @@ s32 bp75_read_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
return ret_val;
}
-s32 bp75_write_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
+s32 bp75_write_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
@@ -2276,7 +2278,7 @@ s32 bp75_write_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
return ret_val;
}
-static s32 bp75_read_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
+static s32 bp75_read_phy_reg(struct bpctl_dev *pbpctl_dev, u32 offset, u16 *data)
{
s32 ret_val = 0;
@@ -2302,7 +2304,7 @@ static s32 bp75_read_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
return ret_val;
}
-static s32 bp75_write_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
+static s32 bp75_write_phy_reg(struct bpctl_dev *pbpctl_dev, u32 offset, u16 data)
{
s32 ret_val = 0;
@@ -2330,10 +2332,10 @@ static s32 bp75_write_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
}
/* SET_TX (non-Bypass command :)) */
-static int set_tx(bpctl_dev_t *pbpctl_dev, int tx_state)
+static int set_tx(struct bpctl_dev *pbpctl_dev, int tx_state)
{
int ret = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
@@ -2404,12 +2406,10 @@ static int set_tx(bpctl_dev_t *pbpctl_dev, int tx_state)
}
}
- if (pbpctl_dev->bp_fiber5) {
+ if (pbpctl_dev->bp_fiber5)
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
-
- } else if (pbpctl_dev->bp_10gb)
+ else if (pbpctl_dev->bp_10gb)
ctrl = BP10GB_READ_REG(pbpctl_dev, MISC_REG_GPIO);
-
else if (!pbpctl_dev->bp_10g)
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL);
else
@@ -2532,7 +2532,7 @@ static int set_tx(bpctl_dev_t *pbpctl_dev, int tx_state)
}
/* SET_FORCE_LINK (non-Bypass command :)) */
-static int set_bp_force_link(bpctl_dev_t *pbpctl_dev, int tx_state)
+static int set_bp_force_link(struct bpctl_dev *pbpctl_dev, int tx_state)
{
int ret = 0, ctrl = 0;
@@ -2556,7 +2556,7 @@ static int set_bp_force_link(bpctl_dev_t *pbpctl_dev, int tx_state)
}
/*RESET_CONT 0x20 */
-int reset_cont(bpctl_dev_t *pbpctl_dev)
+int reset_cont(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -2568,12 +2568,12 @@ int reset_cont(bpctl_dev_t *pbpctl_dev)
else
data_pulse(pbpctl_dev, RESET_CONT);
ret = 0;
- };
+ }
return ret;
}
/*DIS_BYPASS_CAP 0x22 */
-int dis_bypass_cap(bpctl_dev_t *pbpctl_dev)
+int dis_bypass_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
@@ -2592,7 +2592,7 @@ int dis_bypass_cap(bpctl_dev_t *pbpctl_dev)
}
/*EN_BYPASS_CAP 0x24 */
-int en_bypass_cap(bpctl_dev_t *pbpctl_dev)
+int en_bypass_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
@@ -2608,7 +2608,7 @@ int en_bypass_cap(bpctl_dev_t *pbpctl_dev)
}
/* BYPASS_STATE_PWRON 0x26*/
-int bypass_state_pwron(bpctl_dev_t *pbpctl_dev)
+int bypass_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWRON);
@@ -2622,7 +2622,7 @@ int bypass_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/* NORMAL_STATE_PWRON 0x28*/
-int normal_state_pwron(bpctl_dev_t *pbpctl_dev)
+int normal_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP)
|| (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP)) {
@@ -2637,7 +2637,7 @@ int normal_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/* BYPASS_STATE_PWROFF 0x27*/
-int bypass_state_pwroff(bpctl_dev_t *pbpctl_dev)
+int bypass_state_pwroff(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWROFF);
@@ -2648,7 +2648,7 @@ int bypass_state_pwroff(bpctl_dev_t *pbpctl_dev)
}
/* NORMAL_STATE_PWROFF 0x29*/
-int normal_state_pwroff(bpctl_dev_t *pbpctl_dev)
+int normal_state_pwroff(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP)) {
write_data(pbpctl_dev, NORMAL_STATE_PWROFF);
@@ -2659,7 +2659,7 @@ int normal_state_pwroff(bpctl_dev_t *pbpctl_dev)
}
/*TAP_STATE_PWRON 0x2a*/
-int tap_state_pwron(bpctl_dev_t *pbpctl_dev)
+int tap_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, TAP_STATE_PWRON);
@@ -2670,7 +2670,7 @@ int tap_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/*DIS_TAP_CAP 0x2c*/
-int dis_tap_cap(bpctl_dev_t *pbpctl_dev)
+int dis_tap_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, DIS_TAP_CAP);
@@ -2681,7 +2681,7 @@ int dis_tap_cap(bpctl_dev_t *pbpctl_dev)
}
/*EN_TAP_CAP 0x2e*/
-int en_tap_cap(bpctl_dev_t *pbpctl_dev)
+int en_tap_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, EN_TAP_CAP);
@@ -2692,7 +2692,7 @@ int en_tap_cap(bpctl_dev_t *pbpctl_dev)
}
/*DISC_STATE_PWRON 0x2a*/
-int disc_state_pwron(bpctl_dev_t *pbpctl_dev)
+int disc_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2705,7 +2705,7 @@ int disc_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/*DIS_DISC_CAP 0x2c*/
-int dis_disc_cap(bpctl_dev_t *pbpctl_dev)
+int dis_disc_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2718,10 +2718,10 @@ int dis_disc_cap(bpctl_dev_t *pbpctl_dev)
}
/*DISC_STATE_PWRON 0x2a*/
-int disc_port_state_pwron(bpctl_dev_t *pbpctl_dev)
+int disc_port_state_pwron(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
return BP_NOT_CAP;
@@ -2744,10 +2744,10 @@ int disc_port_state_pwron(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int normal_port_state_pwron(bpctl_dev_t *pbpctl_dev)
+int normal_port_state_pwron(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
return BP_NOT_CAP;
if ((is_bypass_fn(pbpctl_dev)) == 1)
@@ -2770,7 +2770,7 @@ int normal_port_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/*EN_TAP_CAP 0x2e*/
-int en_disc_cap(bpctl_dev_t *pbpctl_dev)
+int en_disc_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2782,7 +2782,7 @@ int en_disc_cap(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int std_nic_on(bpctl_dev_t *pbpctl_dev)
+int std_nic_on(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
@@ -2836,7 +2836,7 @@ int std_nic_on(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int std_nic_off(bpctl_dev_t *pbpctl_dev)
+int std_nic_off(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
@@ -2888,7 +2888,7 @@ int std_nic_off(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int wdt_time_left(bpctl_dev_t *pbpctl_dev)
+int wdt_time_left(struct bpctl_dev *pbpctl_dev)
{
/* unsigned long curr_time=((long long)(jiffies*1000))/HZ, delta_time=0,wdt_on_time=((long long)(pbpctl_dev->bypass_wdt_on_time*1000))/HZ; */
@@ -2920,7 +2920,7 @@ int wdt_time_left(bpctl_dev_t *pbpctl_dev)
return time_left;
}
-static int wdt_timer(bpctl_dev_t *pbpctl_dev, int *time_left)
+static int wdt_timer(struct bpctl_dev *pbpctl_dev, int *time_left)
{
int ret = 0;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -2936,7 +2936,7 @@ static int wdt_timer(bpctl_dev_t *pbpctl_dev, int *time_left)
return ret;
}
-static int wdt_timer_reload(bpctl_dev_t *pbpctl_dev)
+static int wdt_timer_reload(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
@@ -2960,7 +2960,7 @@ static int wdt_timer_reload(bpctl_dev_t *pbpctl_dev)
static void wd_reset_timer(unsigned long param)
{
- bpctl_dev_t *pbpctl_dev = (bpctl_dev_t *) param;
+ struct bpctl_dev *pbpctl_dev = (struct bpctl_dev *) param;
#ifdef BP_SELF_TEST
struct sk_buff *skb_tmp;
#endif
@@ -2999,7 +2999,7 @@ static void wd_reset_timer(unsigned long param)
}
/*WAIT_AT_PWRUP 0x80 */
-int bp_wait_at_pwup_en(bpctl_dev_t *pbpctl_dev)
+int bp_wait_at_pwup_en(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3014,7 +3014,7 @@ int bp_wait_at_pwup_en(bpctl_dev_t *pbpctl_dev)
}
/*DIS_WAIT_AT_PWRUP 0x81 */
-int bp_wait_at_pwup_dis(bpctl_dev_t *pbpctl_dev)
+int bp_wait_at_pwup_dis(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3031,7 +3031,7 @@ int bp_wait_at_pwup_dis(bpctl_dev_t *pbpctl_dev)
/*EN_HW_RESET 0x82 */
-int bp_hw_reset_en(bpctl_dev_t *pbpctl_dev)
+int bp_hw_reset_en(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3047,7 +3047,7 @@ int bp_hw_reset_en(bpctl_dev_t *pbpctl_dev)
/*DIS_HW_RESET 0x83 */
-int bp_hw_reset_dis(bpctl_dev_t *pbpctl_dev)
+int bp_hw_reset_dis(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3062,7 +3062,7 @@ int bp_hw_reset_dis(bpctl_dev_t *pbpctl_dev)
}
-int wdt_exp_mode(bpctl_dev_t *pbpctl_dev, int mode)
+int wdt_exp_mode(struct bpctl_dev *pbpctl_dev, int mode)
{
uint32_t status_reg = 0, status_reg1 = 0;
@@ -3113,7 +3113,7 @@ int wdt_exp_mode(bpctl_dev_t *pbpctl_dev, int mode)
return BP_NOT_CAP;
}
-int bypass_fw_ver(bpctl_dev_t *pbpctl_dev)
+int bypass_fw_ver(struct bpctl_dev *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
return read_reg(pbpctl_dev, VER_REG_ADDR);
@@ -3121,7 +3121,7 @@ int bypass_fw_ver(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_sign_check(bpctl_dev_t *pbpctl_dev)
+int bypass_sign_check(struct bpctl_dev *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
@@ -3131,10 +3131,10 @@ int bypass_sign_check(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int tx_status(bpctl_dev_t *pbpctl_dev)
+static int tx_status(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
@@ -3218,7 +3218,7 @@ static int tx_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int bp_force_link_status(bpctl_dev_t *pbpctl_dev)
+static int bp_force_link_status(struct bpctl_dev *pbpctl_dev)
{
if (DBI_IF_SERIES(pbpctl_dev->subdevice)) {
@@ -3232,13 +3232,15 @@ static int bp_force_link_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_from_last_read(bpctl_dev_t *pbpctl_dev)
+int bypass_from_last_read(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
- if ((pbpctl_dev->bp_caps & SW_CTL_CAP)
- && (pbpctl_dev_b = get_status_port_fn(pbpctl_dev))) {
+ if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
+ return BP_NOT_CAP;
ctrl_ext = BPCTL_READ_REG(pbpctl_dev_b, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev_b, CTRL_EXT,
(ctrl_ext & ~BPCTLI_CTRL_EXT_SDP7_DIR));
@@ -3250,20 +3252,21 @@ int bypass_from_last_read(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_status_clear(bpctl_dev_t *pbpctl_dev)
+int bypass_status_clear(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
-
- if ((pbpctl_dev->bp_caps & SW_CTL_CAP)
- && (pbpctl_dev_b = get_status_port_fn(pbpctl_dev))) {
+ struct bpctl_dev *pbpctl_dev_b = NULL;
+ if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
+ return BP_NOT_CAP;
send_bypass_clear_pulse(pbpctl_dev_b, 1);
return 0;
} else
return BP_NOT_CAP;
}
-int bypass_flag_status(bpctl_dev_t *pbpctl_dev)
+int bypass_flag_status(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_CAP)) {
@@ -3276,7 +3279,7 @@ int bypass_flag_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_flag_status_clear(bpctl_dev_t *pbpctl_dev)
+int bypass_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_CAP) {
@@ -3291,7 +3294,7 @@ int bypass_flag_status_clear(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_change_status(bpctl_dev_t *pbpctl_dev)
+int bypass_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -3310,7 +3313,7 @@ int bypass_change_status(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int bypass_off_status(bpctl_dev_t *pbpctl_dev)
+int bypass_off_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_CAP) {
@@ -3322,14 +3325,15 @@ int bypass_off_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int bypass_status(bpctl_dev_t *pbpctl_dev)
+static int bypass_status(struct bpctl_dev *pbpctl_dev)
{
u32 ctrl_ext = 0;
if (pbpctl_dev->bp_caps & BP_CAP) {
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
- if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
return BP_NOT_CAP;
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
@@ -3391,7 +3395,7 @@ static int bypass_status(bpctl_dev_t *pbpctl_dev)
BP10G_SDP7_DATA_IN) != 0 ? 0 : 1);
}
- } else if (pbpctl_dev->media_type == bp_copper) {
+ } else if (pbpctl_dev->media_type == BP_COPPER) {
return (((BPCTL_READ_REG(pbpctl_dev_b, CTRL)) &
BPCTLI_CTRL_SWDPIN1) != 0 ? 1 : 0);
@@ -3404,7 +3408,7 @@ static int bypass_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_status(bpctl_dev_t *pbpctl_dev)
+int default_pwron_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3422,7 +3426,7 @@ int default_pwron_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int default_pwroff_status(bpctl_dev_t *pbpctl_dev)
+static int default_pwroff_status(struct bpctl_dev *pbpctl_dev)
{
/*if ((!pbpctl_dev->bp_caps&BP_DIS_CAP)&&
@@ -3436,7 +3440,7 @@ static int default_pwroff_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_bypass_cap_status(bpctl_dev_t *pbpctl_dev)
+int dis_bypass_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
@@ -3449,7 +3453,7 @@ int dis_bypass_cap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int cmd_en_status(bpctl_dev_t *pbpctl_dev)
+int cmd_en_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3461,7 +3465,7 @@ int cmd_en_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int wdt_en_status(bpctl_dev_t *pbpctl_dev)
+int wdt_en_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -3473,7 +3477,7 @@ int wdt_en_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int wdt_programmed(bpctl_dev_t *pbpctl_dev, int *timeout)
+int wdt_programmed(struct bpctl_dev *pbpctl_dev, int *timeout)
{
int ret = 0;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -3493,13 +3497,13 @@ int wdt_programmed(bpctl_dev_t *pbpctl_dev, int *timeout)
*timeout =
curr_wdt_status ==
0 ? 0 : pbpctl_dev->bypass_timer_interval;
- };
+ }
} else
ret = BP_NOT_CAP;
return ret;
}
-int bypass_support(bpctl_dev_t *pbpctl_dev)
+int bypass_support(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
@@ -3516,7 +3520,7 @@ int bypass_support(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int tap_support(bpctl_dev_t *pbpctl_dev)
+int tap_support(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
@@ -3532,7 +3536,7 @@ int tap_support(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int normal_support(bpctl_dev_t *pbpctl_dev)
+int normal_support(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -3544,11 +3548,11 @@ int normal_support(bpctl_dev_t *pbpctl_dev)
NORMAL_UNSUPPORT_MASK) ? 0 : 1);
} else
ret = 1;
- };
+ }
return ret;
}
-int get_bp_prod_caps(bpctl_dev_t *pbpctl_dev)
+int get_bp_prod_caps(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & SW_CTL_CAP) &&
(pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER))
@@ -3557,7 +3561,7 @@ int get_bp_prod_caps(bpctl_dev_t *pbpctl_dev)
}
-int tap_flag_status(bpctl_dev_t *pbpctl_dev)
+int tap_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
@@ -3569,7 +3573,7 @@ int tap_flag_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_flag_status_clear(bpctl_dev_t *pbpctl_dev)
+int tap_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
@@ -3583,7 +3587,7 @@ int tap_flag_status_clear(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_change_status(bpctl_dev_t *pbpctl_dev)
+int tap_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
@@ -3600,7 +3604,7 @@ int tap_change_status(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int tap_off_status(bpctl_dev_t *pbpctl_dev)
+int tap_off_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3610,14 +3614,15 @@ int tap_off_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_status(bpctl_dev_t *pbpctl_dev)
+int tap_status(struct bpctl_dev *pbpctl_dev)
{
u32 ctrl_ext = 0;
if (pbpctl_dev->bp_caps & TAP_CAP) {
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
- if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
return BP_NOT_CAP;
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -3636,7 +3641,7 @@ int tap_status(bpctl_dev_t *pbpctl_dev)
BP10G_SDP6_DATA_IN) != 0 ? 0 : 1);
}
- } else if (pbpctl_dev->media_type == bp_copper)
+ } else if (pbpctl_dev->media_type == BP_COPPER)
return (((BPCTL_READ_REG(pbpctl_dev, CTRL)) &
BPCTLI_CTRL_SWDPIN0) != 0 ? 1 : 0);
else {
@@ -3648,7 +3653,7 @@ int tap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_tap_status(bpctl_dev_t *pbpctl_dev)
+int default_pwron_tap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3659,7 +3664,7 @@ int default_pwron_tap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_tap_cap_status(bpctl_dev_t *pbpctl_dev)
+int dis_tap_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3670,7 +3675,7 @@ int dis_tap_cap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_flag_status(bpctl_dev_t *pbpctl_dev)
+int disc_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3682,7 +3687,7 @@ int disc_flag_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_flag_status_clear(bpctl_dev_t *pbpctl_dev)
+int disc_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3696,7 +3701,7 @@ int disc_flag_status_clear(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_change_status(bpctl_dev_t *pbpctl_dev)
+int disc_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3707,13 +3712,14 @@ int disc_change_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_off_status(bpctl_dev_t *pbpctl_dev)
+int disc_off_status(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
u32 ctrl_ext = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
- if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
return BP_NOT_CAP;
if (DISCF_IF_SERIES(pbpctl_dev->subdevice))
return ((((read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR)) &
@@ -3730,7 +3736,7 @@ int disc_off_status(bpctl_dev_t *pbpctl_dev)
BP10G_SDP2_DATA) != 0 ? 1 : 0);
}
- if (pbpctl_dev->media_type == bp_copper) {
+ if (pbpctl_dev->media_type == BP_COPPER) {
#if 0
return ((((read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR)) &
@@ -3790,20 +3796,19 @@ int disc_off_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int disc_status(bpctl_dev_t *pbpctl_dev)
+static int disc_status(struct bpctl_dev *pbpctl_dev)
{
int ctrl = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
-
- if ((ctrl = disc_off_status(pbpctl_dev)) < 0)
+ ctrl = disc_off_status(pbpctl_dev);
+ if (ctrl < 0)
return ctrl;
return ((ctrl == 0) ? 1 : 0);
-
}
return BP_NOT_CAP;
}
-int default_pwron_disc_status(bpctl_dev_t *pbpctl_dev)
+int default_pwron_disc_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3814,7 +3819,7 @@ int default_pwron_disc_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_disc_cap_status(bpctl_dev_t *pbpctl_dev)
+int dis_disc_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DIS_DISC_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3825,10 +3830,10 @@ int dis_disc_cap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_port_status(bpctl_dev_t *pbpctl_dev)
+int disc_port_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
@@ -3849,10 +3854,10 @@ int disc_port_status(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int default_pwron_disc_port_status(bpctl_dev_t *pbpctl_dev)
+int default_pwron_disc_port_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
@@ -3873,7 +3878,7 @@ int default_pwron_disc_port_status(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int wdt_exp_mode_status(bpctl_dev_t *pbpctl_dev)
+int wdt_exp_mode_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver <= PXG2BPI_VER)
@@ -3896,7 +3901,7 @@ int wdt_exp_mode_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tpl2_flag_status(bpctl_dev_t *pbpctl_dev)
+int tpl2_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX) {
@@ -3907,11 +3912,12 @@ int tpl2_flag_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tpl_hw_status(bpctl_dev_t *pbpctl_dev)
+int tpl_hw_status(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
- if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
return BP_NOT_CAP;
if (TPL_IF_SERIES(pbpctl_dev->subdevice))
@@ -3921,7 +3927,7 @@ int tpl_hw_status(bpctl_dev_t *pbpctl_dev)
}
-int bp_wait_at_pwup_status(bpctl_dev_t *pbpctl_dev)
+int bp_wait_at_pwup_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3932,7 +3938,7 @@ int bp_wait_at_pwup_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bp_hw_reset_status(bpctl_dev_t *pbpctl_dev)
+int bp_hw_reset_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3946,7 +3952,7 @@ int bp_hw_reset_status(bpctl_dev_t *pbpctl_dev)
}
-int std_nic_status(bpctl_dev_t *pbpctl_dev)
+int std_nic_status(struct bpctl_dev *pbpctl_dev)
{
int status_val = 0;
@@ -3994,10 +4000,10 @@ int std_nic_status(bpctl_dev_t *pbpctl_dev)
/******************************************************/
/**************SW_INIT*********************************/
/******************************************************/
-void bypass_caps_init(bpctl_dev_t *pbpctl_dev)
+void bypass_caps_init(struct bpctl_dev *pbpctl_dev)
{
u_int32_t ctrl_ext = 0;
- bpctl_dev_t *pbpctl_dev_m = NULL;
+ struct bpctl_dev *pbpctl_dev_m = NULL;
#ifdef BYPASS_DEBUG
int ret = 0;
@@ -4021,42 +4027,41 @@ void bypass_caps_init(bpctl_dev_t *pbpctl_dev)
}
#endif
if ((pbpctl_dev->bp_fiber5) || (pbpctl_dev->bp_10g9)) {
- pbpctl_dev->media_type = bp_fiber;
+ pbpctl_dev->media_type = BP_FIBER;
} else if (pbpctl_dev->bp_10gb) {
if (BP10GB_CX4_SERIES(pbpctl_dev->subdevice))
- pbpctl_dev->media_type = bp_cx4;
+ pbpctl_dev->media_type = BP_CX4;
else
- pbpctl_dev->media_type = bp_fiber;
+ pbpctl_dev->media_type = BP_FIBER;
}
else if (pbpctl_dev->bp_540)
- pbpctl_dev->media_type = bp_none;
+ pbpctl_dev->media_type = BP_NONE;
else if (!pbpctl_dev->bp_10g) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
if ((ctrl_ext & BPCTLI_CTRL_EXT_LINK_MODE_MASK) == 0x0)
- pbpctl_dev->media_type = bp_copper;
+ pbpctl_dev->media_type = BP_COPPER;
else
- pbpctl_dev->media_type = bp_fiber;
+ pbpctl_dev->media_type = BP_FIBER;
} else {
if (BP10G_CX4_SERIES(pbpctl_dev->subdevice))
- pbpctl_dev->media_type = bp_cx4;
+ pbpctl_dev->media_type = BP_CX4;
else
- pbpctl_dev->media_type = bp_fiber;
+ pbpctl_dev->media_type = BP_FIBER;
}
if (is_bypass_fn(pbpctl_dev)) {
pbpctl_dev->bp_caps |= BP_PWOFF_ON_CAP;
- if (pbpctl_dev->media_type == bp_fiber)
+ if (pbpctl_dev->media_type == BP_FIBER)
pbpctl_dev->bp_caps |=
(TX_CTL_CAP | TX_STATUS_CAP | TPL_CAP);
- if (TPL_IF_SERIES(pbpctl_dev->subdevice)) {
+ if (TPL_IF_SERIES(pbpctl_dev->subdevice))
pbpctl_dev->bp_caps |= TPL_CAP;
- }
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
pbpctl_dev->bp_caps |=
@@ -4196,9 +4201,9 @@ void bypass_caps_init(bpctl_dev_t *pbpctl_dev)
if (PEG5_IF_SERIES(pbpctl_dev->subdevice))
pbpctl_dev->bp_caps |= (TX_CTL_CAP | TX_STATUS_CAP);
- if (BP10GB_IF_SERIES(pbpctl_dev->subdevice)) {
+ if (BP10GB_IF_SERIES(pbpctl_dev->subdevice))
pbpctl_dev->bp_caps &= ~(TX_CTL_CAP | TX_STATUS_CAP);
- }
+
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m != NULL) {
int cap_reg = 0;
@@ -4213,11 +4218,10 @@ void bypass_caps_init(bpctl_dev_t *pbpctl_dev)
}
}
-int bypass_off_init(bpctl_dev_t *pbpctl_dev)
+int bypass_off_init(struct bpctl_dev *pbpctl_dev)
{
- int ret = 0;
-
- if ((ret = cmnd_on(pbpctl_dev)) < 0)
+ int ret = cmnd_on(pbpctl_dev);
+ if (ret < 0)
return ret;
if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
return dis_bypass_cap(pbpctl_dev);
@@ -4230,10 +4234,10 @@ int bypass_off_init(bpctl_dev_t *pbpctl_dev)
return 0;
}
-void remove_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
+void remove_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
#ifdef BP_SELF_TEST
- bpctl_dev_t *pbpctl_dev_sl = NULL;
+ struct bpctl_dev *pbpctl_dev_sl = NULL;
#endif
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -4259,7 +4263,7 @@ void remove_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
}
-int init_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
+int init_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
init_timer(&pbpctl_dev->bp_timer);
@@ -4273,7 +4277,7 @@ int init_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
#ifdef BP_SELF_TEST
int bp_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- bpctl_dev_t *pbpctl_dev = NULL, *pbpctl_dev_m = NULL;
+ struct bpctl_dev *pbpctl_dev = NULL, *pbpctl_dev_m = NULL;
int idx_dev = 0;
struct ethhdr *eth = (struct ethhdr *)skb->data;
@@ -4306,7 +4310,7 @@ int bp_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
-int set_bypass_wd_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
+int set_bypass_wd_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->reset_time != param) {
@@ -4325,20 +4329,19 @@ int set_bypass_wd_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
+int get_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
-
- if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
+ if (pbpctl_dev->bp_caps & WD_CTL_CAP)
return pbpctl_dev->reset_time;
- }
+
return BP_NOT_CAP;
}
-#ifdef BP_SELF_TEST
+#ifdef BP_SELF_TEST
-int set_bp_self_test(bpctl_dev_t *pbpctl_dev, unsigned int param)
+int set_bp_self_test(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
- bpctl_dev_t *pbpctl_dev_sl = NULL;
+ struct bpctl_dev *pbpctl_dev_sl = NULL;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
pbpctl_dev->bp_self_test_flag = param == 0 ? 0 : 1;
@@ -4371,7 +4374,7 @@ int set_bp_self_test(bpctl_dev_t *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bp_self_test(bpctl_dev_t *pbpctl_dev)
+int get_bp_self_test(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -4389,7 +4392,7 @@ int get_bp_self_test(bpctl_dev_t *pbpctl_dev)
/************************* API ********************************/
/**************************************************************/
-int is_bypass_fn(bpctl_dev_t *pbpctl_dev)
+int is_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4397,13 +4400,14 @@ int is_bypass_fn(bpctl_dev_t *pbpctl_dev)
return (((pbpctl_dev->func == 0) || (pbpctl_dev->func == 2)) ? 1 : 0);
}
-int set_bypass_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
+int set_bypass_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!(pbpctl_dev->bp_caps & BP_CAP))
return BP_NOT_CAP;
- if ((ret = cmnd_on(pbpctl_dev)) < 0)
+ ret = cmnd_on(pbpctl_dev);
+ if (ret < 0)
return ret;
if (!bypass_mode)
ret = bypass_off(pbpctl_dev);
@@ -4414,12 +4418,12 @@ int set_bypass_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
return bypass_status(pbpctl_dev);
}
-int get_bypass_change_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_change_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4427,7 +4431,7 @@ int get_bypass_change_fn(bpctl_dev_t *pbpctl_dev)
return bypass_change_status(pbpctl_dev);
}
-int set_dis_bypass_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
+int set_dis_bypass_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4435,7 +4439,8 @@ int set_dis_bypass_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
if (!(pbpctl_dev->bp_caps & BP_DIS_CAP))
return BP_NOT_CAP;
- if ((ret = cmnd_on(pbpctl_dev)) < 0)
+ ret = cmnd_on(pbpctl_dev);
+ if (ret < 0)
return ret;
if (dis_param)
ret = dis_bypass_cap(pbpctl_dev);
@@ -4445,7 +4450,7 @@ int set_dis_bypass_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
return ret;
}
-int get_dis_bypass_fn(bpctl_dev_t *pbpctl_dev)
+int get_dis_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4453,7 +4458,7 @@ int get_dis_bypass_fn(bpctl_dev_t *pbpctl_dev)
return dis_bypass_cap_status(pbpctl_dev);
}
-int set_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
+int set_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4461,7 +4466,8 @@ int set_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
if (!(pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP))
return BP_NOT_CAP;
- if ((ret = cmnd_on(pbpctl_dev)) < 0)
+ ret = cmnd_on(pbpctl_dev);
+ if (ret < 0)
return ret;
if (bypass_mode)
ret = bypass_state_pwroff(pbpctl_dev);
@@ -4471,7 +4477,7 @@ int set_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4479,7 +4485,7 @@ int get_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev)
return default_pwroff_status(pbpctl_dev);
}
-int set_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
+int set_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4487,7 +4493,8 @@ int set_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
if (!(pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP))
return BP_NOT_CAP;
- if ((ret = cmnd_on(pbpctl_dev)) < 0)
+ ret = cmnd_on(pbpctl_dev);
+ if (ret < 0)
return ret;
if (bypass_mode)
ret = bypass_state_pwron(pbpctl_dev);
@@ -4497,7 +4504,7 @@ int set_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4505,7 +4512,7 @@ int get_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev)
return default_pwron_status(pbpctl_dev);
}
-int set_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int timeout)
+int set_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int timeout)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4514,7 +4521,8 @@ int set_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int timeout)
if (!(pbpctl_dev->bp_caps & WD_CTL_CAP))
return BP_NOT_CAP;
- if ((ret = cmnd_on(pbpctl_dev)) < 0)
+ ret = cmnd_on(pbpctl_dev);
+ if (ret < 0)
return ret;
if (!timeout)
ret = wdt_off(pbpctl_dev);
@@ -4526,7 +4534,7 @@ int set_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int timeout)
return ret;
}
-int get_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int *timeout)
+int get_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int *timeout)
{
if (!pbpctl_dev)
return -1;
@@ -4534,7 +4542,7 @@ int get_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int *timeout)
return wdt_programmed(pbpctl_dev, timeout);
}
-int get_wd_expire_time_fn(bpctl_dev_t *pbpctl_dev, int *time_left)
+int get_wd_expire_time_fn(struct bpctl_dev *pbpctl_dev, int *time_left)
{
if (!pbpctl_dev)
return -1;
@@ -4542,7 +4550,7 @@ int get_wd_expire_time_fn(bpctl_dev_t *pbpctl_dev, int *time_left)
return wdt_timer(pbpctl_dev, time_left);
}
-int reset_bypass_wd_timer_fn(bpctl_dev_t *pbpctl_dev)
+int reset_bypass_wd_timer_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4550,7 +4558,7 @@ int reset_bypass_wd_timer_fn(bpctl_dev_t *pbpctl_dev)
return wdt_timer_reload(pbpctl_dev);
}
-int get_wd_set_caps_fn(bpctl_dev_t *pbpctl_dev)
+int get_wd_set_caps_fn(struct bpctl_dev *pbpctl_dev)
{
int bp_status = 0;
@@ -4574,7 +4582,7 @@ int get_wd_set_caps_fn(bpctl_dev_t *pbpctl_dev)
return bp_status;
}
-int set_std_nic_fn(bpctl_dev_t *pbpctl_dev, int nic_mode)
+int set_std_nic_fn(struct bpctl_dev *pbpctl_dev, int nic_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4583,7 +4591,8 @@ int set_std_nic_fn(bpctl_dev_t *pbpctl_dev, int nic_mode)
if (!(pbpctl_dev->bp_caps & STD_NIC_CAP))
return BP_NOT_CAP;
- if ((ret = cmnd_on(pbpctl_dev)) < 0)
+ ret = cmnd_on(pbpctl_dev);
+ if (ret < 0)
return ret;
if (nic_mode)
ret = std_nic_on(pbpctl_dev);
@@ -4593,7 +4602,7 @@ int set_std_nic_fn(bpctl_dev_t *pbpctl_dev, int nic_mode)
return ret;
}
-int get_std_nic_fn(bpctl_dev_t *pbpctl_dev)
+int get_std_nic_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4601,7 +4610,7 @@ int get_std_nic_fn(bpctl_dev_t *pbpctl_dev)
return std_nic_status(pbpctl_dev);
}
-int set_tap_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
+int set_tap_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -4617,7 +4626,7 @@ int set_tap_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_tap_fn(bpctl_dev_t *pbpctl_dev)
+int get_tap_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4625,7 +4634,7 @@ int get_tap_fn(bpctl_dev_t *pbpctl_dev)
return tap_status(pbpctl_dev);
}
-int set_tap_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
+int set_tap_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4643,18 +4652,19 @@ int set_tap_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
return ret;
}
-int get_tap_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_tap_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
- if ((ret = default_pwron_tap_status(pbpctl_dev)) < 0)
+ ret = default_pwron_tap_status(pbpctl_dev);
+ if (ret < 0)
return ret;
return ((ret == 0) ? 1 : 0);
}
-int get_tap_change_fn(bpctl_dev_t *pbpctl_dev)
+int get_tap_change_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4662,7 +4672,7 @@ int get_tap_change_fn(bpctl_dev_t *pbpctl_dev)
return tap_change_status(pbpctl_dev);
}
-int set_dis_tap_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
+int set_dis_tap_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4679,7 +4689,7 @@ int set_dis_tap_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
return BP_NOT_CAP;
}
-int get_dis_tap_fn(bpctl_dev_t *pbpctl_dev)
+int get_dis_tap_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4687,7 +4697,7 @@ int get_dis_tap_fn(bpctl_dev_t *pbpctl_dev)
return dis_tap_cap_status(pbpctl_dev);
}
-int set_disc_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
+int set_disc_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
if (!pbpctl_dev)
return -1;
@@ -4704,7 +4714,7 @@ int set_disc_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
return BP_NOT_CAP;
}
-int get_disc_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4715,7 +4725,7 @@ int get_disc_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_disc_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
+int set_disc_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4733,7 +4743,7 @@ int set_disc_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
return ret;
}
-int get_disc_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4743,7 +4753,7 @@ int get_disc_pwup_fn(bpctl_dev_t *pbpctl_dev)
return (ret == 0 ? 1 : (ret < 0 ? BP_NOT_CAP : 0));
}
-int get_disc_change_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_change_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4753,7 +4763,7 @@ int get_disc_change_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_dis_disc_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
+int set_dis_disc_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4771,7 +4781,7 @@ int set_dis_disc_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
return BP_NOT_CAP;
}
-int get_dis_disc_fn(bpctl_dev_t *pbpctl_dev)
+int get_dis_disc_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4782,7 +4792,7 @@ int get_dis_disc_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_disc_port_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
+int set_disc_port_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
@@ -4796,7 +4806,7 @@ int set_disc_port_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
return ret;
}
-int get_disc_port_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_port_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4804,7 +4814,7 @@ int get_disc_port_fn(bpctl_dev_t *pbpctl_dev)
return disc_port_status(pbpctl_dev);
}
-int set_disc_port_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
+int set_disc_port_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
@@ -4818,18 +4828,19 @@ int set_disc_port_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
return ret;
}
-int get_disc_port_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_port_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
- if ((ret = default_pwron_disc_port_status(pbpctl_dev)) < 0)
+ ret = default_pwron_disc_port_status(pbpctl_dev);
+ if (ret < 0)
return ret;
return ((ret == 0) ? 1 : 0);
}
-int get_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev)
+int get_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4837,7 +4848,7 @@ int get_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev)
return wdt_exp_mode_status(pbpctl_dev);
}
-int set_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev, int param)
+int set_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4845,21 +4856,22 @@ int set_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev, int param)
return wdt_exp_mode(pbpctl_dev, param);
}
-int reset_cont_fn(bpctl_dev_t *pbpctl_dev)
+int reset_cont_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
- if ((ret = cmnd_on(pbpctl_dev)) < 0)
+ ret = cmnd_on(pbpctl_dev);
+ if (ret < 0)
return ret;
return reset_cont(pbpctl_dev);
}
-int set_tx_fn(bpctl_dev_t *pbpctl_dev, int tx_state)
+int set_tx_fn(struct bpctl_dev *pbpctl_dev, int tx_state)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return -1;
@@ -4867,8 +4879,10 @@ int set_tx_fn(bpctl_dev_t *pbpctl_dev, int tx_state)
(pbpctl_dev->bp_caps & SW_CTL_CAP)) {
if ((pbpctl_dev->bp_tpl_flag))
return BP_NOT_CAP;
- } else if ((pbpctl_dev_b = get_master_port_fn(pbpctl_dev))) {
- if ((pbpctl_dev_b->bp_caps & TPL_CAP) &&
+ } else {
+ pbpctl_dev_b = get_master_port_fn(pbpctl_dev);
+ if (pbpctl_dev_b &&
+ (pbpctl_dev_b->bp_caps & TPL_CAP) &&
(pbpctl_dev_b->bp_tpl_flag))
return BP_NOT_CAP;
}
@@ -4877,7 +4891,7 @@ int set_tx_fn(bpctl_dev_t *pbpctl_dev, int tx_state)
int set_bp_force_link_fn(int dev_num, int tx_state)
{
- static bpctl_dev_t *bpctl_dev_curr;
+ static struct bpctl_dev *bpctl_dev_curr;
if ((dev_num < 0) || (dev_num > device_num)
|| (bpctl_dev_arr[dev_num].pdev == NULL))
@@ -4887,7 +4901,7 @@ int set_bp_force_link_fn(int dev_num, int tx_state)
return set_bp_force_link(bpctl_dev_curr, tx_state);
}
-int set_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev, int param)
+int set_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4895,7 +4909,7 @@ int set_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev, int param)
return set_bypass_wd_auto(pbpctl_dev, param);
}
-int get_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev)
+int get_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4904,7 +4918,7 @@ int get_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev)
}
#ifdef BP_SELF_TEST
-int set_bp_self_test_fn(bpctl_dev_t *pbpctl_dev, int param)
+int set_bp_self_test_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4912,7 +4926,7 @@ int set_bp_self_test_fn(bpctl_dev_t *pbpctl_dev, int param)
return set_bp_self_test(pbpctl_dev, param);
}
-int get_bp_self_test_fn(bpctl_dev_t *pbpctl_dev)
+int get_bp_self_test_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4922,7 +4936,7 @@ int get_bp_self_test_fn(bpctl_dev_t *pbpctl_dev)
#endif
-int get_bypass_caps_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_caps_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4931,7 +4945,7 @@ int get_bypass_caps_fn(bpctl_dev_t *pbpctl_dev)
}
-int get_bypass_slave_fn(bpctl_dev_t *pbpctl_dev, bpctl_dev_t **pbpctl_dev_out)
+int get_bypass_slave_fn(struct bpctl_dev *pbpctl_dev, struct bpctl_dev **pbpctl_dev_out)
{
int idx_dev = 0;
if (!pbpctl_dev)
@@ -4963,7 +4977,7 @@ int get_bypass_slave_fn(bpctl_dev_t *pbpctl_dev, bpctl_dev_t **pbpctl_dev_out)
return 0;
}
-int is_bypass(bpctl_dev_t *pbpctl_dev)
+int is_bypass(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4974,9 +4988,9 @@ int is_bypass(bpctl_dev_t *pbpctl_dev)
return 0;
}
-int get_tx_fn(bpctl_dev_t *pbpctl_dev)
+int get_tx_fn(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return -1;
@@ -4984,8 +4998,10 @@ int get_tx_fn(bpctl_dev_t *pbpctl_dev)
(pbpctl_dev->bp_caps & SW_CTL_CAP)) {
if ((pbpctl_dev->bp_tpl_flag))
return BP_NOT_CAP;
- } else if ((pbpctl_dev_b = get_master_port_fn(pbpctl_dev))) {
- if ((pbpctl_dev_b->bp_caps & TPL_CAP) &&
+ } else {
+ pbpctl_dev_b = get_master_port_fn(pbpctl_dev);
+ if (pbpctl_dev_b &&
+ (pbpctl_dev_b->bp_caps & TPL_CAP) &&
(pbpctl_dev_b->bp_tpl_flag))
return BP_NOT_CAP;
}
@@ -4994,7 +5010,7 @@ int get_tx_fn(bpctl_dev_t *pbpctl_dev)
int get_bp_force_link_fn(int dev_num)
{
- static bpctl_dev_t *bpctl_dev_curr;
+ static struct bpctl_dev *bpctl_dev_curr;
if ((dev_num < 0) || (dev_num > device_num)
|| (bpctl_dev_arr[dev_num].pdev == NULL))
@@ -5004,12 +5020,12 @@ int get_bp_force_link_fn(int dev_num)
return bp_force_link_status(bpctl_dev_curr);
}
-static int get_bypass_link_status(bpctl_dev_t *pbpctl_dev)
+static int get_bypass_link_status(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
- if (pbpctl_dev->media_type == bp_fiber)
+ if (pbpctl_dev->media_type == BP_FIBER)
return ((BPCTL_READ_REG(pbpctl_dev, CTRL) &
BPCTLI_CTRL_SWDPIN1));
else
@@ -5020,11 +5036,12 @@ static int get_bypass_link_status(bpctl_dev_t *pbpctl_dev)
static void bp_tpl_timer_fn(unsigned long param)
{
- bpctl_dev_t *pbpctl_dev = (bpctl_dev_t *) param;
+ struct bpctl_dev *pbpctl_dev = (struct bpctl_dev *) param;
uint32_t link1, link2;
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
- if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (!pbpctl_dev_b)
return;
if (!pbpctl_dev->bp_tpl_flag) {
@@ -5036,31 +5053,27 @@ static void bp_tpl_timer_fn(unsigned long param)
link2 = get_bypass_link_status(pbpctl_dev_b);
if ((link1) && (tx_status(pbpctl_dev))) {
- if ((!link2) && (tx_status(pbpctl_dev_b))) {
+ if ((!link2) && (tx_status(pbpctl_dev_b)))
set_tx(pbpctl_dev, 0);
- } else if (!tx_status(pbpctl_dev_b)) {
+ else if (!tx_status(pbpctl_dev_b))
set_tx(pbpctl_dev_b, 1);
- }
} else if ((!link1) && (tx_status(pbpctl_dev))) {
- if ((link2) && (tx_status(pbpctl_dev_b))) {
+ if ((link2) && (tx_status(pbpctl_dev_b)))
set_tx(pbpctl_dev_b, 0);
- }
} else if ((link1) && (!tx_status(pbpctl_dev))) {
- if ((link2) && (tx_status(pbpctl_dev_b))) {
+ if ((link2) && (tx_status(pbpctl_dev_b)))
set_tx(pbpctl_dev, 1);
- }
} else if ((!link1) && (!tx_status(pbpctl_dev))) {
- if ((link2) && (tx_status(pbpctl_dev_b))) {
+ if ((link2) && (tx_status(pbpctl_dev_b)))
set_tx(pbpctl_dev, 1);
- }
}
mod_timer(&pbpctl_dev->bp_tpl_timer, jiffies + BP_LINK_MON_DELAY * HZ);
}
-void remove_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
+void remove_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
@@ -5076,7 +5089,7 @@ void remove_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
return;
}
-int init_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
+int init_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -5089,7 +5102,7 @@ int init_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int set_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
+int set_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
if (!pbpctl_dev)
return -1;
@@ -5098,7 +5111,7 @@ int set_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
pbpctl_dev->bp_tpl_flag = param;
mod_timer(&pbpctl_dev->bp_tpl_timer, jiffies + 1);
return BP_OK;
- };
+ }
if ((!param) && (pbpctl_dev->bp_tpl_flag))
remove_bypass_tpl_auto(pbpctl_dev);
@@ -5107,20 +5120,20 @@ int set_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
+int get_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
- if (pbpctl_dev->bp_caps & TPL_CAP) {
+ if (pbpctl_dev->bp_caps & TPL_CAP)
return pbpctl_dev->bp_tpl_flag;
- }
+
return BP_NOT_CAP;
}
-int set_tpl_fn(bpctl_dev_t *pbpctl_dev, int tpl_mode)
+int set_tpl_fn(struct bpctl_dev *pbpctl_dev, int tpl_mode)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return -1;
@@ -5128,7 +5141,8 @@ int set_tpl_fn(bpctl_dev_t *pbpctl_dev, int tpl_mode)
if (pbpctl_dev->bp_caps & TPL_CAP) {
if (tpl_mode) {
- if ((pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
+ pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
+ if (pbpctl_dev_b)
set_tx(pbpctl_dev_b, 1);
set_tx(pbpctl_dev, 1);
}
@@ -5146,7 +5160,7 @@ int set_tpl_fn(bpctl_dev_t *pbpctl_dev, int tpl_mode)
return BP_NOT_CAP;
}
-int get_tpl_fn(bpctl_dev_t *pbpctl_dev)
+int get_tpl_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
@@ -5160,7 +5174,7 @@ int get_tpl_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
+int set_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -5180,7 +5194,7 @@ int set_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -5193,7 +5207,7 @@ int get_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
+int set_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -5213,7 +5227,7 @@ int set_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev)
+int get_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -5228,7 +5242,7 @@ int get_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev)
}
-int get_bypass_info_fn(bpctl_dev_t *pbpctl_dev, char *dev_name,
+int get_bypass_info_fn(struct bpctl_dev *pbpctl_dev, char *dev_name,
char *add_param)
{
if (!pbpctl_dev)
@@ -5299,7 +5313,7 @@ static int get_dev_idx(int ifindex)
return -1;
}
-static bpctl_dev_t *get_dev_idx_p(int ifindex)
+static struct bpctl_dev *get_dev_idx_p(int ifindex)
{
int idx_dev = 0;
@@ -5345,7 +5359,8 @@ static void if_scan_init(void)
memcpy(&cbuf, drvinfo.bus_info, 32);
buf = &cbuf[0];
- while (*buf++ != ':') ;
+ while (*buf++ != ':')
+ ;
for (i = 0; i < 10; i++, buf++) {
if (*buf == ':')
break;
@@ -5386,15 +5401,16 @@ static long device_ioctl(struct file *file, /* see include/linux/fs.h */
{
struct bpctl_cmd bpctl_cmd;
int dev_idx = 0;
- bpctl_dev_t *pbpctl_dev_out;
+ struct bpctl_dev *pbpctl_dev_out;
void __user *argp = (void __user *)ioctl_param;
int ret = 0;
unsigned long flags;
- static bpctl_dev_t *pbpctl_dev;
+ static struct bpctl_dev *pbpctl_dev;
/* lock_kernel(); */
- lock_bpctl();
+ if (down_interruptible(&bpctl_sema))
+ return -ERESTARTSYS;
/* local_irq_save(flags); */
/* if(!spin_trylock_irqsave(&bpvm_lock)){
local_irq_restore(flags);
@@ -5438,9 +5454,9 @@ static long device_ioctl(struct file *file, /* see include/linux/fs.h */
return -1;
}
-/* preempt_disable();
+/* preempt_disable();
rcu_read_lock();
- spin_lock_irqsave(&bpvm_lock, flags);
+ spin_lock_irqsave(&bpvm_lock, flags);
*/
if ((bpctl_cmd.in_param[5]) ||
(bpctl_cmd.in_param[6]) || (bpctl_cmd.in_param[7]))
@@ -5787,7 +5803,7 @@ static const struct file_operations Fops = {
};
#ifndef PCI_DEVICE
-#define PCI_DEVICE(vend,dev) \
+#define PCI_DEVICE(vend, dev) \
.vendor = (vend), .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
#endif
@@ -5795,7 +5811,7 @@ static const struct file_operations Fops = {
#define SILICOM_E1000BP_ETHERNET_DEVICE(device_id) {\
PCI_DEVICE(SILICOM_VID, device_id)}
-typedef enum {
+enum board_type {
PXG2BPFI,
PXG2BPFIL,
PXG2BPFILX,
@@ -5953,9 +5969,9 @@ typedef enum {
PE310G4BPi9SR,
PE310G4BPi9LR,
PE210G2BPi40,
-} board_t;
+};
-typedef struct _bpmod_info_t {
+struct bpmod_info {
unsigned int vendor;
unsigned int device;
unsigned int subvendor;
@@ -5963,13 +5979,11 @@ typedef struct _bpmod_info_t {
unsigned int index;
char *bp_name;
-} bpmod_info_t;
+};
-typedef struct _dev_desc {
+struct {
char *name;
-} dev_desc_t;
-
-dev_desc_t dev_desc[] = {
+} dev_desc[] = {
{"Silicom Bypass PXG2BPFI-SD series adapter"},
{"Silicom Bypass PXG2BPFIL-SD series adapter"},
{"Silicom Bypass PXG2BPFILX-SD series adapter"},
@@ -6139,7 +6153,7 @@ dev_desc_t dev_desc[] = {
{0},
};
-static bpmod_info_t tx_ctl_pci_tbl[] = {
+static struct bpmod_info tx_ctl_pci_tbl[] = {
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG2BPFI_SSID, PXG2BPFI,
"PXG2BPFI-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG2BPFIL_SSID, PXG2BPFIL,
@@ -6607,7 +6621,7 @@ static bpmod_info_t tx_ctl_pci_tbl[] = {
{0,}
};
-static void find_fw(bpctl_dev_t *dev)
+static void find_fw(struct bpctl_dev *dev)
{
unsigned long mmio_start, mmio_len;
struct pci_dev *pdev1 = dev->pdev;
@@ -6629,7 +6643,7 @@ static void find_fw(bpctl_dev_t *dev)
ioremap(mmio_start, mmio_len);
dev->bp_fw_ver = bypass_fw_ver(dev);
- if (dev-> bp_fw_ver == 0xa8)
+ if (dev->bp_fw_ver == 0xa8)
break;
}
}
@@ -6637,7 +6651,7 @@ static void find_fw(bpctl_dev_t *dev)
printk("firmware version: 0x%x\n", dev->bp_fw_ver);
}
-static int init_one(bpctl_dev_t *dev, bpmod_info_t *info, struct pci_dev *pdev1)
+static int init_one(struct bpctl_dev *dev, struct bpmod_info *info, struct pci_dev *pdev1)
{
unsigned long mmio_start, mmio_len;
@@ -6708,7 +6722,8 @@ static int init_one(bpctl_dev_t *dev, bpmod_info_t *info, struct pci_dev *pdev1)
reset_cont(dev);
}
#ifdef BP_SELF_TEST
- if ((dev->bp_tx_data = kzalloc(BPTEST_DATA_LEN, GFP_KERNEL))) {
+ dev->bp_tx_data = kzalloc(BPTEST_DATA_LEN, GFP_KERNEL);
+ if (dev->bp_tx_data) {
memset(dev->bp_tx_data, 0xff, 6);
memset(dev->bp_tx_data + 6, 0x0, 1);
memset(dev->bp_tx_data + 7, 0xaa, 5);
@@ -6727,7 +6742,7 @@ static int __init bypass_init_module(void)
{
int ret_val, idx, idx_dev = 0;
struct pci_dev *pdev1 = NULL;
- bpctl_dev_t *dev;
+ struct bpctl_dev *dev;
printk(BP_MOD_DESCR " v" BP_MOD_VER "\n");
ret_val = register_chrdev(major_num, DEVICE_NAME, &Fops);
@@ -6752,14 +6767,14 @@ static int __init bypass_init_module(void)
return -1;
}
- bpctl_dev_arr = kmalloc((device_num) * sizeof(bpctl_dev_t), GFP_KERNEL);
+ bpctl_dev_arr = kmalloc((device_num) * sizeof(struct bpctl_dev), GFP_KERNEL);
if (!bpctl_dev_arr) {
printk("Allocation error\n");
unregister_chrdev(major_num, DEVICE_NAME);
return -1;
}
- memset(bpctl_dev_arr, 0, ((device_num) * sizeof(bpctl_dev_t)));
+ memset(bpctl_dev_arr, 0, ((device_num) * sizeof(struct bpctl_dev)));
pdev1 = NULL;
dev = bpctl_dev_arr;
@@ -6780,7 +6795,7 @@ static int __init bypass_init_module(void)
spin_lock_init(&bpvm_lock);
{
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
for (idx_dev = 0, dev = bpctl_dev_arr;
idx_dev < device_num && dev->pdev;
idx_dev++, dev++) {
@@ -6878,59 +6893,69 @@ int is_bypass_sd(int ifindex)
{
return is_bypass(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(is_bypass_sd);
int set_bypass_sd(int ifindex, int bypass_mode)
{
return set_bypass_fn(get_dev_idx_p(ifindex), bypass_mode);
}
+EXPORT_SYMBOL(set_bypass_sd);
int get_bypass_sd(int ifindex)
{
return get_bypass_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bypass_sd);
int get_bypass_change_sd(int ifindex)
{
return get_bypass_change_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bypass_change_sd);
int set_dis_bypass_sd(int ifindex, int dis_param)
{
return set_dis_bypass_fn(get_dev_idx_p(ifindex), dis_param);
}
+EXPORT_SYMBOL(set_dis_bypass_sd);
int get_dis_bypass_sd(int ifindex)
{
return get_dis_bypass_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_dis_bypass_sd);
int set_bypass_pwoff_sd(int ifindex, int bypass_mode)
{
return set_bypass_pwoff_fn(get_dev_idx_p(ifindex), bypass_mode);
}
+EXPORT_SYMBOL(set_bypass_pwoff_sd);
int get_bypass_pwoff_sd(int ifindex)
{
return get_bypass_pwoff_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bypass_pwoff_sd);
int set_bypass_pwup_sd(int ifindex, int bypass_mode)
{
return set_bypass_pwup_fn(get_dev_idx_p(ifindex), bypass_mode);
}
+EXPORT_SYMBOL(set_bypass_pwup_sd);
int get_bypass_pwup_sd(int ifindex)
{
return get_bypass_pwup_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bypass_pwup_sd);
int set_bypass_wd_sd(int if_index, int ms_timeout, int *ms_timeout_set)
{
@@ -6939,136 +6964,159 @@ int set_bypass_wd_sd(int if_index, int ms_timeout, int *ms_timeout_set)
*ms_timeout_set = set_bypass_wd_fn(get_dev_idx_p(if_index), ms_timeout);
return 0;
}
+EXPORT_SYMBOL(set_bypass_wd_sd);
int get_bypass_wd_sd(int ifindex, int *timeout)
{
return get_bypass_wd_fn(get_dev_idx_p(ifindex), timeout);
}
+EXPORT_SYMBOL(get_bypass_wd_sd);
int get_wd_expire_time_sd(int ifindex, int *time_left)
{
return get_wd_expire_time_fn(get_dev_idx_p(ifindex), time_left);
}
+EXPORT_SYMBOL(get_wd_expire_time_sd);
int reset_bypass_wd_timer_sd(int ifindex)
{
return reset_bypass_wd_timer_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(reset_bypass_wd_timer_sd);
int get_wd_set_caps_sd(int ifindex)
{
return get_wd_set_caps_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_wd_set_caps_sd);
int set_std_nic_sd(int ifindex, int nic_mode)
{
return set_std_nic_fn(get_dev_idx_p(ifindex), nic_mode);
}
+EXPORT_SYMBOL(set_std_nic_sd);
int get_std_nic_sd(int ifindex)
{
return get_std_nic_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_std_nic_sd);
int set_tap_sd(int ifindex, int tap_mode)
{
return set_tap_fn(get_dev_idx_p(ifindex), tap_mode);
}
+EXPORT_SYMBOL(set_tap_sd);
int get_tap_sd(int ifindex)
{
return get_tap_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_tap_sd);
int set_tap_pwup_sd(int ifindex, int tap_mode)
{
return set_tap_pwup_fn(get_dev_idx_p(ifindex), tap_mode);
}
+EXPORT_SYMBOL(set_tap_pwup_sd);
int get_tap_pwup_sd(int ifindex)
{
return get_tap_pwup_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_tap_pwup_sd);
int get_tap_change_sd(int ifindex)
{
return get_tap_change_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_tap_change_sd);
int set_dis_tap_sd(int ifindex, int dis_param)
{
return set_dis_tap_fn(get_dev_idx_p(ifindex), dis_param);
}
+EXPORT_SYMBOL(set_dis_tap_sd);
int get_dis_tap_sd(int ifindex)
{
return get_dis_tap_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_dis_tap_sd);
int set_bp_disc_sd(int ifindex, int disc_mode)
{
return set_disc_fn(get_dev_idx_p(ifindex), disc_mode);
}
+EXPORT_SYMBOL(set_bp_disc_sd);
int get_bp_disc_sd(int ifindex)
{
return get_disc_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bp_disc_sd);
int set_bp_disc_pwup_sd(int ifindex, int disc_mode)
{
return set_disc_pwup_fn(get_dev_idx_p(ifindex), disc_mode);
}
+EXPORT_SYMBOL(set_bp_disc_pwup_sd);
int get_bp_disc_pwup_sd(int ifindex)
{
return get_disc_pwup_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bp_disc_pwup_sd);
int get_bp_disc_change_sd(int ifindex)
{
return get_disc_change_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bp_disc_change_sd);
int set_bp_dis_disc_sd(int ifindex, int dis_param)
{
return set_dis_disc_fn(get_dev_idx_p(ifindex), dis_param);
}
+EXPORT_SYMBOL(set_bp_dis_disc_sd);
int get_bp_dis_disc_sd(int ifindex)
{
return get_dis_disc_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bp_dis_disc_sd);
int get_wd_exp_mode_sd(int ifindex)
{
return get_wd_exp_mode_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_wd_exp_mode_sd);
int set_wd_exp_mode_sd(int ifindex, int param)
{
return set_wd_exp_mode_fn(get_dev_idx_p(ifindex), param);
}
+EXPORT_SYMBOL(set_wd_exp_mode_sd);
int reset_cont_sd(int ifindex)
{
@@ -7081,120 +7129,86 @@ int set_tx_sd(int ifindex, int tx_state)
return set_tx_fn(get_dev_idx_p(ifindex), tx_state);
}
+EXPORT_SYMBOL(set_tx_sd);
int set_tpl_sd(int ifindex, int tpl_state)
{
return set_tpl_fn(get_dev_idx_p(ifindex), tpl_state);
}
+EXPORT_SYMBOL(set_tpl_sd);
int set_bp_hw_reset_sd(int ifindex, int status)
{
return set_bp_hw_reset_fn(get_dev_idx_p(ifindex), status);
}
+EXPORT_SYMBOL(set_bp_hw_reset_sd);
int set_wd_autoreset_sd(int ifindex, int param)
{
return set_wd_autoreset_fn(get_dev_idx_p(ifindex), param);
}
+EXPORT_SYMBOL(set_wd_autoreset_sd);
int get_wd_autoreset_sd(int ifindex)
{
return get_wd_autoreset_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_wd_autoreset_sd);
int get_bypass_caps_sd(int ifindex)
{
return get_bypass_caps_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bypass_caps_sd);
int get_bypass_slave_sd(int ifindex)
{
- bpctl_dev_t *pbpctl_dev_out;
+ struct bpctl_dev *pbpctl_dev_out;
int ret = get_bypass_slave_fn(get_dev_idx_p(ifindex), &pbpctl_dev_out);
if (ret == 1)
return pbpctl_dev_out->ifindex;
return -1;
}
+EXPORT_SYMBOL(get_bypass_slave_sd);
int get_tx_sd(int ifindex)
{
return get_tx_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_tx_sd);
int get_tpl_sd(int ifindex)
{
return get_tpl_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_tpl_sd);
int get_bp_hw_reset_sd(int ifindex)
{
return get_bp_hw_reset_fn(get_dev_idx_p(ifindex));
}
+EXPORT_SYMBOL(get_bp_hw_reset_sd);
int get_bypass_info_sd(int ifindex, struct bp_info *bp_info)
{
return get_bypass_info_fn(get_dev_idx_p(ifindex), bp_info->prod_name, &bp_info->fw_ver);
}
+EXPORT_SYMBOL(get_bypass_info_sd);
int bp_if_scan_sd(void)
{
if_scan_init();
return 0;
}
-
-EXPORT_SYMBOL_NOVERS(is_bypass_sd);
-EXPORT_SYMBOL_NOVERS(get_bypass_slave_sd);
-EXPORT_SYMBOL_NOVERS(get_bypass_caps_sd);
-EXPORT_SYMBOL_NOVERS(get_wd_set_caps_sd);
-EXPORT_SYMBOL_NOVERS(set_bypass_sd);
-EXPORT_SYMBOL_NOVERS(get_bypass_sd);
-EXPORT_SYMBOL_NOVERS(get_bypass_change_sd);
-EXPORT_SYMBOL_NOVERS(set_dis_bypass_sd);
-EXPORT_SYMBOL_NOVERS(get_dis_bypass_sd);
-EXPORT_SYMBOL_NOVERS(set_bypass_pwoff_sd);
-EXPORT_SYMBOL_NOVERS(get_bypass_pwoff_sd);
-EXPORT_SYMBOL_NOVERS(set_bypass_pwup_sd);
-EXPORT_SYMBOL_NOVERS(get_bypass_pwup_sd);
-EXPORT_SYMBOL_NOVERS(set_bypass_wd_sd);
-EXPORT_SYMBOL_NOVERS(get_bypass_wd_sd);
-EXPORT_SYMBOL_NOVERS(get_wd_expire_time_sd);
-EXPORT_SYMBOL_NOVERS(reset_bypass_wd_timer_sd);
-EXPORT_SYMBOL_NOVERS(set_std_nic_sd);
-EXPORT_SYMBOL_NOVERS(get_std_nic_sd);
-EXPORT_SYMBOL_NOVERS(set_tx_sd);
-EXPORT_SYMBOL_NOVERS(get_tx_sd);
-EXPORT_SYMBOL_NOVERS(set_tpl_sd);
-EXPORT_SYMBOL_NOVERS(get_tpl_sd);
-EXPORT_SYMBOL_NOVERS(set_bp_hw_reset_sd);
-EXPORT_SYMBOL_NOVERS(get_bp_hw_reset_sd);
-EXPORT_SYMBOL_NOVERS(set_tap_sd);
-EXPORT_SYMBOL_NOVERS(get_tap_sd);
-EXPORT_SYMBOL_NOVERS(get_tap_change_sd);
-EXPORT_SYMBOL_NOVERS(set_dis_tap_sd);
-EXPORT_SYMBOL_NOVERS(get_dis_tap_sd);
-EXPORT_SYMBOL_NOVERS(set_tap_pwup_sd);
-EXPORT_SYMBOL_NOVERS(get_tap_pwup_sd);
-EXPORT_SYMBOL_NOVERS(set_wd_exp_mode_sd);
-EXPORT_SYMBOL_NOVERS(get_wd_exp_mode_sd);
-EXPORT_SYMBOL_NOVERS(set_wd_autoreset_sd);
-EXPORT_SYMBOL_NOVERS(get_wd_autoreset_sd);
-EXPORT_SYMBOL_NOVERS(set_bp_disc_sd);
-EXPORT_SYMBOL_NOVERS(get_bp_disc_sd);
-EXPORT_SYMBOL_NOVERS(get_bp_disc_change_sd);
-EXPORT_SYMBOL_NOVERS(set_bp_dis_disc_sd);
-EXPORT_SYMBOL_NOVERS(get_bp_dis_disc_sd);
-EXPORT_SYMBOL_NOVERS(set_bp_disc_pwup_sd);
-EXPORT_SYMBOL_NOVERS(get_bp_disc_pwup_sd);
-EXPORT_SYMBOL_NOVERS(get_bypass_info_sd);
-EXPORT_SYMBOL_NOVERS(bp_if_scan_sd);
+EXPORT_SYMBOL(bp_if_scan_sd);
#define BP_PROC_DIR "bypass"
@@ -7213,7 +7227,7 @@ int bp_proc_create(void)
}
static int procfs_add(char *proc_name, const struct file_operations *fops,
- bpctl_dev_t *dev)
+ struct bpctl_dev *dev)
{
struct bypass_pfs_sd *pfs = &dev->bypass_pfs_set;
if (!proc_create_data(proc_name, 0644, pfs->bypass_entry, fops, dev))
@@ -7248,7 +7262,7 @@ static const struct file_operations name##_ops = { \
static int show_bypass_info(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
seq_printf(m, "Name\t\t\t%s\n", dev->name);
seq_printf(m, "Firmware version\t0x%x\n", dev->bp_fw_ver);
@@ -7258,12 +7272,12 @@ RO_FOPS(bypass_info)
static int show_bypass_slave(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
- bpctl_dev_t *slave = get_status_port_fn(dev);
+ struct bpctl_dev *dev = m->private;
+ struct bpctl_dev *slave = get_status_port_fn(dev);
if (!slave)
slave = dev;
if (!slave)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (slave->ndev)
seq_printf(m, "%s\n", slave->ndev->name);
return 0;
@@ -7272,10 +7286,10 @@ RO_FOPS(bypass_slave)
static int show_bypass_caps(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_caps_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "-1\n");
+ seq_puts(m, "-1\n");
else
seq_printf(m, "0x%x\n", ret);
return 0;
@@ -7284,10 +7298,10 @@ RO_FOPS(bypass_caps)
static int show_wd_set_caps(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_wd_set_caps_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "-1\n");
+ seq_puts(m, "-1\n");
else
seq_printf(m, "0x%x\n", ret);
return 0;
@@ -7330,14 +7344,14 @@ static ssize_t bypass_write(struct file *file, const char __user *buffer,
}
static int show_bypass(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 1)
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
return 0;
}
RW_FOPS(bypass)
@@ -7354,14 +7368,14 @@ static ssize_t tap_write(struct file *file, const char __user *buffer,
}
static int show_tap(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_tap_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 1)
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
return 0;
}
RW_FOPS(tap)
@@ -7378,56 +7392,56 @@ static ssize_t disc_write(struct file *file, const char __user *buffer,
}
static int show_disc(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_disc_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 1)
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
return 0;
}
RW_FOPS(disc)
static int show_bypass_change(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_change_fn(dev);
if (ret == 1)
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
return 0;
}
RO_FOPS(bypass_change)
static int show_tap_change(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_tap_change_fn(dev);
if (ret == 1)
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
return 0;
}
RO_FOPS(tap_change)
static int show_disc_change(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_disc_change_fn(dev);
if (ret == 1)
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
return 0;
}
RO_FOPS(disc_change)
@@ -7435,7 +7449,7 @@ RO_FOPS(disc_change)
static ssize_t bypass_wd_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ struct bpctl_dev *dev = PDE_DATA(file_inode(file));
int timeout;
int ret = kstrtoint_from_user(buffer, count, 10, &timeout);
if (ret)
@@ -7445,16 +7459,16 @@ static ssize_t bypass_wd_write(struct file *file, const char __user *buffer,
}
static int show_bypass_wd(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = 0, timeout = 0;
ret = get_bypass_wd_fn(dev, &timeout);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (timeout == -1)
- seq_printf(m, "unknown\n");
+ seq_puts(m, "unknown\n");
else if (timeout == 0)
- seq_printf(m, "disable\n");
+ seq_puts(m, "disable\n");
else
seq_printf(m, "%d\n", timeout);
return 0;
@@ -7463,15 +7477,15 @@ RW_FOPS(bypass_wd)
static int show_wd_expire_time(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = 0, timeout = 0;
ret = get_wd_expire_time_fn(dev, &timeout);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (timeout == -1)
- seq_printf(m, "expire\n");
+ seq_puts(m, "expire\n");
else if (timeout == 0)
- seq_printf(m, "disable\n");
+ seq_puts(m, "disable\n");
else
seq_printf(m, "%d\n", timeout);
return 0;
@@ -7481,7 +7495,7 @@ RO_FOPS(wd_expire_time)
static ssize_t tpl_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ struct bpctl_dev *dev = PDE_DATA(file_inode(file));
int tpl_param = user_on_off(buffer, count);
if (tpl_param < 0)
return -1;
@@ -7491,14 +7505,14 @@ static ssize_t tpl_write(struct file *file, const char __user *buffer,
}
static int show_tpl(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_tpl_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 1)
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
return 0;
}
RW_FOPS(tpl)
@@ -7507,7 +7521,7 @@ RW_FOPS(tpl)
static ssize_t wait_at_pwup_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ struct bpctl_dev *dev = PDE_DATA(file_inode(file));
int tpl_param = user_on_off(buffer, count);
if (tpl_param < 0)
return -1;
@@ -7517,14 +7531,14 @@ static ssize_t wait_at_pwup_write(struct file *file, const char __user *buffer,
}
static int show_wait_at_pwup(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bp_wait_at_pwup_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 1)
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
return 0;
}
RW_FOPS(wait_at_pwup)
@@ -7532,7 +7546,7 @@ RW_FOPS(wait_at_pwup)
static ssize_t hw_reset_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ struct bpctl_dev *dev = PDE_DATA(file_inode(file));
int tpl_param = user_on_off(buffer, count);
if (tpl_param < 0)
return -1;
@@ -7542,14 +7556,14 @@ static ssize_t hw_reset_write(struct file *file, const char __user *buffer,
}
static int show_hw_reset(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bp_hw_reset_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 1)
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
return 0;
}
RW_FOPS(hw_reset)
@@ -7558,14 +7572,14 @@ RW_FOPS(hw_reset)
static int show_reset_bypass_wd(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = reset_bypass_wd_timer_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 0)
- seq_printf(m, "disable\n");
+ seq_puts(m, "disable\n");
else if (ret == 1)
- seq_printf(m, "success\n");
+ seq_puts(m, "success\n");
return 0;
}
RO_FOPS(reset_bypass_wd)
@@ -7582,14 +7596,14 @@ static ssize_t dis_bypass_write(struct file *file, const char __user *buffer,
}
static int show_dis_bypass(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_dis_bypass_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
return 0;
}
RW_FOPS(dis_bypass)
@@ -7606,14 +7620,14 @@ static ssize_t dis_tap_write(struct file *file, const char __user *buffer,
}
static int show_dis_tap(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_dis_tap_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
return 0;
}
RW_FOPS(dis_tap)
@@ -7630,14 +7644,14 @@ static ssize_t dis_disc_write(struct file *file, const char __user *buffer,
}
static int show_dis_disc(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_dis_disc_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
return 0;
}
RW_FOPS(dis_disc)
@@ -7654,14 +7668,14 @@ static ssize_t bypass_pwup_write(struct file *file, const char __user *buffer,
}
static int show_bypass_pwup(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_pwup_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
return 0;
}
RW_FOPS(bypass_pwup)
@@ -7678,14 +7692,14 @@ static ssize_t bypass_pwoff_write(struct file *file, const char __user *buffer,
}
static int show_bypass_pwoff(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_pwoff_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
return 0;
}
RW_FOPS(bypass_pwoff)
@@ -7702,14 +7716,14 @@ static ssize_t tap_pwup_write(struct file *file, const char __user *buffer,
}
static int show_tap_pwup(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_tap_pwup_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
return 0;
}
RW_FOPS(tap_pwup)
@@ -7726,14 +7740,14 @@ static ssize_t disc_pwup_write(struct file *file, const char __user *buffer,
}
static int show_disc_pwup(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_disc_pwup_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
return 0;
}
RW_FOPS(disc_pwup)
@@ -7750,14 +7764,14 @@ static ssize_t std_nic_write(struct file *file, const char __user *buffer,
}
static int show_std_nic(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_std_nic_fn(dev);
if (ret == BP_NOT_CAP)
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
else if (ret == 0)
- seq_printf(m, "off\n");
+ seq_puts(m, "off\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
return 0;
}
RW_FOPS(std_nic)
@@ -7792,16 +7806,16 @@ static ssize_t wd_exp_mode_write(struct file *file, const char __user *buffer,
}
static int show_wd_exp_mode(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_wd_exp_mode_fn(dev);
if (ret == 1)
- seq_printf(m, "tap\n");
+ seq_puts(m, "tap\n");
else if (ret == 0)
- seq_printf(m, "bypass\n");
+ seq_puts(m, "bypass\n");
else if (ret == 2)
- seq_printf(m, "disc\n");
+ seq_puts(m, "disc\n");
else
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
return 0;
}
RW_FOPS(wd_exp_mode)
@@ -7818,20 +7832,20 @@ static ssize_t wd_autoreset_write(struct file *file, const char __user *buffer,
}
static int show_wd_autoreset(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_wd_autoreset_fn(dev);
if (ret >= 0)
seq_printf(m, "%d\n", ret);
else
- seq_printf(m, "fail\n");
+ seq_puts(m, "fail\n");
return 0;
}
RW_FOPS(wd_autoreset)
-int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block)
+int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &(pbp_device_block->bypass_pfs_set);
- static struct proc_dir_entry *procfs_dir = NULL;
+ static struct proc_dir_entry *procfs_dir;
int ret = 0;
if (!pbp_device_block->ndev)
@@ -7851,7 +7865,8 @@ int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block)
}
current_pfs->bypass_entry = procfs_dir;
-#define ENTRY(x) ret |= procfs_add(#x, &x##_ops, pbp_device_block)
+#define ENTRY(x) (ret |= procfs_add(#x, &x##_ops, pbp_device_block))
+
ENTRY(bypass_info);
if (pbp_device_block->bp_caps & SW_CTL_CAP) {
/* Create set param proc's */
@@ -7897,7 +7912,7 @@ int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block)
return ret;
}
-int bypass_proc_remove_dev_sd(bpctl_dev_t *pbp_device_block)
+int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &pbp_device_block->bypass_pfs_set;
diff --git a/drivers/staging/silicom/bypasslib/bp_ioctl.h b/drivers/staging/silicom/bypasslib/bp_ioctl.h
index 040c6fa..2d1ef53 100644
--- a/drivers/staging/silicom/bypasslib/bp_ioctl.h
+++ b/drivers/staging/silicom/bypasslib/bp_ioctl.h
@@ -14,41 +14,41 @@
#ifndef BP_IOCTL_H
#define BP_IOCTL_H
-#define BP_CAP 0x01 //BIT_0
-#define BP_STATUS_CAP 0x02 //BIT_1
-#define BP_STATUS_CHANGE_CAP 0x04 //BIT_2
-#define SW_CTL_CAP 0x08 //BIT_3
-#define BP_DIS_CAP 0x10 //BIT_4
-#define BP_DIS_STATUS_CAP 0x20 //BIT_5
-#define STD_NIC_CAP 0x40 //BIT_6
-#define BP_PWOFF_ON_CAP 0x80 //BIT_7
-#define BP_PWOFF_OFF_CAP 0x0100 //BIT_8
-#define BP_PWOFF_CTL_CAP 0x0200 //BIT_9
-#define BP_PWUP_ON_CAP 0x0400 //BIT_10
-#define BP_PWUP_OFF_CAP 0x0800 //BIT_11
-#define BP_PWUP_CTL_CAP 0x1000 //BIT_12
-#define WD_CTL_CAP 0x2000 //BIT_13
-#define WD_STATUS_CAP 0x4000 //BIT_14
-#define WD_TIMEOUT_CAP 0x8000 //BIT_15
-#define TX_CTL_CAP 0x10000 //BIT_16
-#define TX_STATUS_CAP 0x20000 //BIT_17
-#define TAP_CAP 0x40000 //BIT_18
-#define TAP_STATUS_CAP 0x80000 //BIT_19
-#define TAP_STATUS_CHANGE_CAP 0x100000 //BIT_20
-#define TAP_DIS_CAP 0x200000 //BIT_21
-#define TAP_DIS_STATUS_CAP 0x400000 //BIT_22
-#define TAP_PWUP_ON_CAP 0x800000 //BIT_23
-#define TAP_PWUP_OFF_CAP 0x1000000 //BIT 24
-#define TAP_PWUP_CTL_CAP 0x2000000 //BIT 25
-#define NIC_CAP_NEG 0x4000000 //BIT 26
-#define TPL_CAP 0x8000000 //BIT 27
-#define DISC_CAP 0x10000000 //BIT 28
-#define DISC_DIS_CAP 0x20000000 //BIT 29
-#define DISC_PWUP_CTL_CAP 0x40000000 //BIT 30
+#define BP_CAP 0x01 /* BIT_0 */
+#define BP_STATUS_CAP 0x02 /* BIT_1 */
+#define BP_STATUS_CHANGE_CAP 0x04 /* BIT_2 */
+#define SW_CTL_CAP 0x08 /* BIT_3 */
+#define BP_DIS_CAP 0x10 /* BIT_4 */
+#define BP_DIS_STATUS_CAP 0x20 /* BIT_5 */
+#define STD_NIC_CAP 0x40 /* BIT_6 */
+#define BP_PWOFF_ON_CAP 0x80 /* BIT_7 */
+#define BP_PWOFF_OFF_CAP 0x0100 /* BIT_8 */
+#define BP_PWOFF_CTL_CAP 0x0200 /* BIT_9 */
+#define BP_PWUP_ON_CAP 0x0400 /* BIT_10 */
+#define BP_PWUP_OFF_CAP 0x0800 /* BIT_11 */
+#define BP_PWUP_CTL_CAP 0x1000 /* BIT_12 */
+#define WD_CTL_CAP 0x2000 /* BIT_13 */
+#define WD_STATUS_CAP 0x4000 /* BIT_14 */
+#define WD_TIMEOUT_CAP 0x8000 /* BIT_15 */
+#define TX_CTL_CAP 0x10000 /* BIT_16 */
+#define TX_STATUS_CAP 0x20000 /* BIT_17 */
+#define TAP_CAP 0x40000 /* BIT_18 */
+#define TAP_STATUS_CAP 0x80000 /* BIT_19 */
+#define TAP_STATUS_CHANGE_CAP 0x100000 /* BIT_20 */
+#define TAP_DIS_CAP 0x200000 /* BIT_21 */
+#define TAP_DIS_STATUS_CAP 0x400000 /* BIT_22 */
+#define TAP_PWUP_ON_CAP 0x800000 /* BIT_23 */
+#define TAP_PWUP_OFF_CAP 0x1000000 /* BIT 24 */
+#define TAP_PWUP_CTL_CAP 0x2000000 /* BIT 25 */
+#define NIC_CAP_NEG 0x4000000 /* BIT 26 */
+#define TPL_CAP 0x8000000 /* BIT 27 */
+#define DISC_CAP 0x10000000 /* BIT 28 */
+#define DISC_DIS_CAP 0x20000000 /* BIT 29 */
+#define DISC_PWUP_CTL_CAP 0x40000000 /* BIT 30 */
#define WD_MIN_TIME_MASK(val) (val & 0xf)
#define WD_STEP_COUNT_MASK(val) ((val & 0xf) << 5)
-#define WDT_STEP_TIME 0x10 //BIT_4
+#define WDT_STEP_TIME 0x10 /* BIT_4 */
#define WD_MIN_TIME_GET(desc) (desc & 0xf)
#define WD_STEP_COUNT_GET(desc) (desc>>5) & 0xf
diff --git a/drivers/staging/silicom/bypasslib/bplibk.h b/drivers/staging/silicom/bypasslib/bplibk.h
index d8c1d27..c5c75c4 100644
--- a/drivers/staging/silicom/bypasslib/bplibk.h
+++ b/drivers/staging/silicom/bypasslib/bplibk.h
@@ -24,15 +24,13 @@
#define INTEL_PEG4BPFII_PID 0x10a1
#define PEGII_IF_SERIES(vid, pid) \
- ((vid==0x8086)&& \
- ((pid==INTEL_PEG4BPII_PID)|| \
- (pid==INTEL_PEG4BPFII_PID)))
-
-#define EXPORT_SYMBOL_NOVERS EXPORT_SYMBOL
+ ((vid == 0x8086) && \
+ ((pid == INTEL_PEG4BPII_PID) || \
+ (pid == INTEL_PEG4BPFII_PID)))
#ifdef BP_VENDOR_SUPPORT
-char *bp_desc_array[] =
- { "e1000bp", "e1000bpe", "slcm5700", "bnx2xbp", "ixgbp", "ixgbpe", NULL };
+char *bp_desc_array[] = { "e1000bp", "e1000bpe", "slcm5700",
+ "bnx2xbp", "ixgbp", "ixgbpe", NULL };
#endif
#endif
diff --git a/drivers/staging/silicom/bypasslib/bypass.c b/drivers/staging/silicom/bypasslib/bypass.c
index 9ed2508..ba0d23a 100644
--- a/drivers/staging/silicom/bypasslib/bypass.c
+++ b/drivers/staging/silicom/bypasslib/bypass.c
@@ -188,69 +188,82 @@ static int is_bypass(int if_index)
return is_bypass_dev(if_index);
return ret;
}
+EXPORT_SYMBOL(is_bypass);
static int get_bypass_slave(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bypass_slave, GET_BYPASS_SLAVE, if_index);
}
+EXPORT_SYMBOL(get_bypass_slave);
static int get_bypass_caps(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bypass_caps, GET_BYPASS_CAPS, if_index);
}
+EXPORT_SYMBOL(get_bypass_caps);
static int get_wd_set_caps(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_wd_set_caps, GET_WD_SET_CAPS, if_index);
}
+EXPORT_SYMBOL(get_wd_set_caps);
static int set_bypass(int if_index, int bypass_mode)
{
DO_BPLIB_SET_ARG_FN(set_bypass, SET_BYPASS, if_index, bypass_mode);
}
+EXPORT_SYMBOL(set_bypass);
static int get_bypass(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bypass, GET_BYPASS, if_index);
}
+EXPORT_SYMBOL(get_bypass);
static int get_bypass_change(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bypass_change, GET_BYPASS_CHANGE, if_index);
}
+EXPORT_SYMBOL(get_bypass_change);
static int set_dis_bypass(int if_index, int dis_bypass)
{
DO_BPLIB_SET_ARG_FN(set_dis_bypass, SET_DIS_BYPASS, if_index,
dis_bypass);
}
+EXPORT_SYMBOL(set_dis_bypass);
static int get_dis_bypass(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_dis_bypass, GET_DIS_BYPASS, if_index);
}
+EXPORT_SYMBOL(get_dis_bypass);
static int set_bypass_pwoff(int if_index, int bypass_mode)
{
DO_BPLIB_SET_ARG_FN(set_bypass_pwoff, SET_BYPASS_PWOFF, if_index,
bypass_mode);
}
+EXPORT_SYMBOL(set_bypass_pwoff);
static int get_bypass_pwoff(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bypass_pwoff, GET_BYPASS_PWOFF, if_index);
}
+EXPORT_SYMBOL(get_bypass_pwoff);
static int set_bypass_pwup(int if_index, int bypass_mode)
{
DO_BPLIB_SET_ARG_FN(set_bypass_pwup, SET_BYPASS_PWUP, if_index,
bypass_mode);
}
+EXPORT_SYMBOL(set_bypass_pwup);
static int get_bypass_pwup(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bypass_pwup, GET_BYPASS_PWUP, if_index);
}
+EXPORT_SYMBOL(get_bypass_pwup);
static int set_bypass_wd(int if_index, int ms_timeout, int *ms_timeout_set)
{
@@ -267,6 +280,7 @@ static int set_bypass_wd(int if_index, int ms_timeout, int *ms_timeout_set)
}
return ret;
}
+EXPORT_SYMBOL(set_bypass_wd);
static int get_bypass_wd(int if_index, int *ms_timeout_set)
{
@@ -278,6 +292,7 @@ static int get_bypass_wd(int if_index, int *ms_timeout_set)
ret = doit(GET_BYPASS_WD, if_index, data);
return ret;
}
+EXPORT_SYMBOL(get_bypass_wd);
static int get_wd_expire_time(int if_index, int *ms_time_left)
{
@@ -292,143 +307,171 @@ static int get_wd_expire_time(int if_index, int *ms_time_left)
}
return ret;
}
+EXPORT_SYMBOL(get_wd_expire_time);
static int reset_bypass_wd_timer(int if_index)
{
DO_BPLIB_GET_ARG_FN(reset_bypass_wd_timer, RESET_BYPASS_WD_TIMER,
if_index);
}
+EXPORT_SYMBOL(reset_bypass_wd_timer);
static int set_std_nic(int if_index, int bypass_mode)
{
DO_BPLIB_SET_ARG_FN(set_std_nic, SET_STD_NIC, if_index, bypass_mode);
}
+EXPORT_SYMBOL(set_std_nic);
static int get_std_nic(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_std_nic, GET_STD_NIC, if_index);
}
+EXPORT_SYMBOL(get_std_nic);
static int set_tx(int if_index, int tx_state)
{
DO_BPLIB_SET_ARG_FN(set_tx, SET_TX, if_index, tx_state);
}
+EXPORT_SYMBOL(set_tx);
static int get_tx(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_tx, GET_TX, if_index);
}
+EXPORT_SYMBOL(get_tx);
static int set_tap(int if_index, int tap_mode)
{
DO_BPLIB_SET_ARG_FN(set_tap, SET_TAP, if_index, tap_mode);
}
+EXPORT_SYMBOL(set_tap);
static int get_tap(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_tap, GET_TAP, if_index);
}
+EXPORT_SYMBOL(get_tap);
static int get_tap_change(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_tap_change, GET_TAP_CHANGE, if_index);
}
+EXPORT_SYMBOL(get_tap_change);
static int set_dis_tap(int if_index, int dis_tap)
{
DO_BPLIB_SET_ARG_FN(set_dis_tap, SET_DIS_TAP, if_index, dis_tap);
}
+EXPORT_SYMBOL(set_dis_tap);
static int get_dis_tap(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_dis_tap, GET_DIS_TAP, if_index);
}
+EXPORT_SYMBOL(get_dis_tap);
static int set_tap_pwup(int if_index, int tap_mode)
{
DO_BPLIB_SET_ARG_FN(set_tap_pwup, SET_TAP_PWUP, if_index, tap_mode);
}
+EXPORT_SYMBOL(set_tap_pwup);
static int get_tap_pwup(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_tap_pwup, GET_TAP_PWUP, if_index);
}
+EXPORT_SYMBOL(get_tap_pwup);
static int set_bp_disc(int if_index, int disc_mode)
{
DO_BPLIB_SET_ARG_FN(set_bp_disc, SET_DISC, if_index, disc_mode);
}
+EXPORT_SYMBOL(set_bp_disc);
static int get_bp_disc(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bp_disc, GET_DISC, if_index);
}
+EXPORT_SYMBOL(get_bp_disc);
static int get_bp_disc_change(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bp_disc_change, GET_DISC_CHANGE, if_index);
}
+EXPORT_SYMBOL(get_bp_disc_change);
static int set_bp_dis_disc(int if_index, int dis_disc)
{
DO_BPLIB_SET_ARG_FN(set_bp_dis_disc, SET_DIS_DISC, if_index, dis_disc);
}
+EXPORT_SYMBOL(set_bp_dis_disc);
static int get_bp_dis_disc(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bp_dis_disc, GET_DIS_DISC, if_index);
}
+EXPORT_SYMBOL(get_bp_dis_disc);
static int set_bp_disc_pwup(int if_index, int disc_mode)
{
DO_BPLIB_SET_ARG_FN(set_bp_disc_pwup, SET_DISC_PWUP, if_index,
disc_mode);
}
+EXPORT_SYMBOL(set_bp_disc_pwup);
static int get_bp_disc_pwup(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_bp_disc_pwup, GET_DISC_PWUP, if_index);
}
+EXPORT_SYMBOL(get_bp_disc_pwup);
static int set_wd_exp_mode(int if_index, int mode)
{
DO_BPLIB_SET_ARG_FN(set_wd_exp_mode, SET_WD_EXP_MODE, if_index, mode);
}
+EXPORT_SYMBOL(set_wd_exp_mode);
static int get_wd_exp_mode(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_wd_exp_mode, GET_WD_EXP_MODE, if_index);
}
+EXPORT_SYMBOL(get_wd_exp_mode);
static int set_wd_autoreset(int if_index, int time)
{
DO_BPLIB_SET_ARG_FN(set_wd_autoreset, SET_WD_AUTORESET, if_index, time);
}
+EXPORT_SYMBOL(set_wd_autoreset);
static int get_wd_autoreset(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_wd_autoreset, GET_WD_AUTORESET, if_index);
}
+EXPORT_SYMBOL(get_wd_autoreset);
static int set_tpl(int if_index, int tpl_mode)
{
DO_BPLIB_SET_ARG_FN(set_tpl, SET_TPL, if_index, tpl_mode);
}
+EXPORT_SYMBOL(set_tpl);
static int get_tpl(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_tpl, GET_TPL, if_index);
}
+EXPORT_SYMBOL(get_tpl);
static int set_bp_hw_reset(int if_index, int mode)
{
DO_BPLIB_SET_ARG_FN(set_tpl, SET_BP_HW_RESET, if_index, mode);
}
+EXPORT_SYMBOL(set_bp_hw_reset);
static int get_bp_hw_reset(int if_index)
{
DO_BPLIB_GET_ARG_FN(get_tpl, GET_BP_HW_RESET, if_index);
}
+EXPORT_SYMBOL(get_bp_hw_reset);
static int get_bypass_info(int if_index, struct bp_info *bp_info)
{
@@ -467,6 +510,7 @@ static int get_bypass_info(int if_index, struct bp_info *bp_info)
}
return ret;
}
+EXPORT_SYMBOL(get_bypass_info);
int init_lib_module(void)
{
@@ -479,50 +523,5 @@ void cleanup_lib_module(void)
{
}
-EXPORT_SYMBOL_NOVERS(is_bypass);
-EXPORT_SYMBOL_NOVERS(get_bypass_slave);
-EXPORT_SYMBOL_NOVERS(get_bypass_caps);
-EXPORT_SYMBOL_NOVERS(get_wd_set_caps);
-EXPORT_SYMBOL_NOVERS(set_bypass);
-EXPORT_SYMBOL_NOVERS(get_bypass);
-EXPORT_SYMBOL_NOVERS(get_bypass_change);
-EXPORT_SYMBOL_NOVERS(set_dis_bypass);
-EXPORT_SYMBOL_NOVERS(get_dis_bypass);
-EXPORT_SYMBOL_NOVERS(set_bypass_pwoff);
-EXPORT_SYMBOL_NOVERS(get_bypass_pwoff);
-EXPORT_SYMBOL_NOVERS(set_bypass_pwup);
-EXPORT_SYMBOL_NOVERS(get_bypass_pwup);
-EXPORT_SYMBOL_NOVERS(set_bypass_wd);
-EXPORT_SYMBOL_NOVERS(get_bypass_wd);
-EXPORT_SYMBOL_NOVERS(get_wd_expire_time);
-EXPORT_SYMBOL_NOVERS(reset_bypass_wd_timer);
-EXPORT_SYMBOL_NOVERS(set_std_nic);
-EXPORT_SYMBOL_NOVERS(get_std_nic);
-EXPORT_SYMBOL_NOVERS(set_tx);
-EXPORT_SYMBOL_NOVERS(get_tx);
-EXPORT_SYMBOL_NOVERS(set_tap);
-EXPORT_SYMBOL_NOVERS(get_tap);
-EXPORT_SYMBOL_NOVERS(get_tap_change);
-EXPORT_SYMBOL_NOVERS(set_dis_tap);
-EXPORT_SYMBOL_NOVERS(get_dis_tap);
-EXPORT_SYMBOL_NOVERS(set_tap_pwup);
-EXPORT_SYMBOL_NOVERS(get_tap_pwup);
-EXPORT_SYMBOL_NOVERS(set_bp_disc);
-EXPORT_SYMBOL_NOVERS(get_bp_disc);
-EXPORT_SYMBOL_NOVERS(get_bp_disc_change);
-EXPORT_SYMBOL_NOVERS(set_bp_dis_disc);
-EXPORT_SYMBOL_NOVERS(get_bp_dis_disc);
-EXPORT_SYMBOL_NOVERS(set_bp_disc_pwup);
-EXPORT_SYMBOL_NOVERS(get_bp_disc_pwup);
-EXPORT_SYMBOL_NOVERS(set_wd_exp_mode);
-EXPORT_SYMBOL_NOVERS(get_wd_exp_mode);
-EXPORT_SYMBOL_NOVERS(set_wd_autoreset);
-EXPORT_SYMBOL_NOVERS(get_wd_autoreset);
-EXPORT_SYMBOL_NOVERS(set_tpl);
-EXPORT_SYMBOL_NOVERS(get_tpl);
-EXPORT_SYMBOL_NOVERS(set_bp_hw_reset);
-EXPORT_SYMBOL_NOVERS(get_bp_hw_reset);
-EXPORT_SYMBOL_NOVERS(get_bypass_info);
-
module_init(init_lib_module);
module_exit(cleanup_lib_module);
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index e4b8277..869dcd3 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -3651,17 +3651,20 @@ static int slic_entry_probe(struct pci_dev *pcidev,
if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
- if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+ err = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+ if (err) {
dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for "
"consistent allocations\n");
goto err_out_disable_pci;
}
- } else if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
+ } else {
+ err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pcidev->dev, "no usable DMA configuration\n");
+ goto err_out_disable_pci;
+ }
pci_using_dac = 0;
pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
- } else {
- dev_err(&pcidev->dev, "no usable DMA configuration\n");
- goto err_out_disable_pci;
}
err = pci_request_regions(pcidev, DRV_NAME);
@@ -3696,6 +3699,7 @@ static int slic_entry_probe(struct pci_dev *pcidev,
if (!memmapped_ioaddr) {
dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
mmio_len, mmio_start);
+ err = -ENOMEM;
goto err_out_free_netdev;
}
@@ -3706,8 +3710,8 @@ static int slic_entry_probe(struct pci_dev *pcidev,
slic_init_adapter(netdev,
pcidev, pci_tbl_entry, memmapped_ioaddr, cards_found);
- status = slic_card_locate(adapter);
- if (status) {
+ err = slic_card_locate(adapter);
+ if (err) {
dev_err(&pcidev->dev, "cannot locate card\n");
goto err_out_free_mmio_region;
}
diff --git a/drivers/staging/speakup/Kconfig b/drivers/staging/speakup/Kconfig
index b416ace..8c3e7a6 100644
--- a/drivers/staging/speakup/Kconfig
+++ b/drivers/staging/speakup/Kconfig
@@ -11,7 +11,7 @@ config SPEAKUP
point your browser at <http://www.linux-speakup.org/>.
There is also a mailing list at the above url that you
can subscribe to.
-
+
Supported synthesizers are accent sa, accent pc,
appollo II., Auddapter, Braille 'n Speak, Dectalk
external (old), Dectalk PC (full length isa board),
@@ -19,24 +19,24 @@ config SPEAKUP
Litetalk, Keynote Gold internal PC, software
synthesizers, Speakout, transport, and a dummy module
that can be used with a plain text terminal.
-
+
Speakup can either be built in or compiled as a module
by answering y or m. If you answer y here, then you
must answer either y or m to at least one of the
synthesizer drivers below. If you answer m here, then
the synthesizer drivers below can only be built as
modules.
-
+
These drivers are not standalone drivers, but must be
used in conjunction with Speakup. Think of them as
video cards for blind people.
-
-
+
+
The Dectalk pc driver can only be built as a module, and
requires software to be pre-loaded on to the card before
the module can be loaded. See the decpc choice below
for more details.
-
+
If you are not a blind person, or don't have access to
one of the listed synthesizers, you should say n.
@@ -84,7 +84,7 @@ config SPEAKUP_SYNTH_BNS
config SPEAKUP_SYNTH_DECTLK
tristate "DECtalk Express synthesizer support"
---help---
-
+
This is the Speakup driver for the DecTalk Express
synthesizer. You can say y to build it into the kernel,
or m to build it as a module. See the configuration
@@ -93,7 +93,7 @@ config SPEAKUP_SYNTH_DECTLK
config SPEAKUP_SYNTH_DECEXT
tristate "DECtalk External (old) synthesizer support"
---help---
-
+
This is the Speakup driver for the DecTalk External
(old) synthesizer. You can say y to build it into the
kernel, or m to build it as a module. See the
@@ -104,12 +104,12 @@ config SPEAKUP_SYNTH_DECPC
depends on m
tristate "DECtalk PC (big ISA card) synthesizer support"
---help---
-
+
This is the Speakup driver for the DecTalk PC (full
length ISA) synthesizer. You can say m to build it as
a module. See the configuration help on the Speakup
choice above for more info.
-
+
In order to use the DecTalk PC driver, you must download
the dec_pc.tgz file from linux-speakup.org. It is in
the pub/linux/goodies directory. The dec_pc.tgz file
@@ -118,14 +118,14 @@ config SPEAKUP_SYNTH_DECPC
This driver must be built as a module, and can not be
loaded until the file system is mounted and the DecTalk
PC software has been pre-loaded on to the board.
-
+
See the README file in the dec_pc.tgz file for more
details.
config SPEAKUP_SYNTH_DTLK
tristate "DoubleTalk PC synthesizer support"
---help---
-
+
This is the Speakup driver for the internal DoubleTalk
PC synthesizer. You can say y to build it into the
kernel, or m to build it as a module. See the
@@ -135,7 +135,7 @@ config SPEAKUP_SYNTH_DTLK
config SPEAKUP_SYNTH_KEYPC
tristate "Keynote Gold PC synthesizer support"
---help---
-
+
This is the Speakup driver for the Keynote Gold
PC synthesizer. You can say y to build it into the
kernel, or m to build it as a module. See the
@@ -166,7 +166,7 @@ config SPEAKUP_SYNTH_SOFT
config SPEAKUP_SYNTH_SPKOUT
tristate "Speak Out synthesizer support"
---help---
-
+
This is the Speakup driver for the Speakout synthesizer.
You can say y to build it into the kernel, or m to
build it as a module. See the configuration help on the
@@ -175,7 +175,7 @@ config SPEAKUP_SYNTH_SPKOUT
config SPEAKUP_SYNTH_TXPRT
tristate "Transport synthesizer support"
---help---
-
+
This is the Speakup driver for the Transport
synthesizer. You can say y to build it into the kernel,
or m to build it as a module. See the configuration
@@ -184,7 +184,7 @@ config SPEAKUP_SYNTH_TXPRT
config SPEAKUP_SYNTH_DUMMY
tristate "Dummy synthesizer driver (for testing)"
---help---
-
+
This is a dummy Speakup driver for plugging a mere serial
terminal. This is handy if you want to test speakup but
don't have the hardware. You can say y to build it into
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index 940769e..71c728a 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -13,11 +13,11 @@
static int misc_registered;
static int dev_opened;
-static ssize_t speakup_file_write(struct file *fp, const char *buffer,
- size_t nbytes, loff_t *ppos)
+static ssize_t speakup_file_write(struct file *fp, const char __user *buffer,
+ size_t nbytes, loff_t *ppos)
{
size_t count = nbytes;
- const char *ptr = buffer;
+ const char __user *ptr = buffer;
size_t bytes;
unsigned long flags;
u_char buf[256];
@@ -30,15 +30,15 @@ static ssize_t speakup_file_write(struct file *fp, const char *buffer,
return -EFAULT;
count -= bytes;
ptr += bytes;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_write(buf, bytes);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
return (ssize_t) nbytes;
}
-static ssize_t speakup_file_read(struct file *fp, char *buf, size_t nbytes,
- loff_t *ppos)
+static ssize_t speakup_file_read(struct file *fp, char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
return 0;
}
diff --git a/drivers/staging/speakup/i18n.c b/drivers/staging/speakup/i18n.c
index 2add1fc..9ea16c5 100644
--- a/drivers/staging/speakup/i18n.c
+++ b/drivers/staging/speakup/i18n.c
@@ -558,11 +558,11 @@ ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length)
kfree(newstr);
return -EINVAL;
}
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_msgs[index] != speakup_default_msgs[index])
kfree(speakup_msgs[index]);
speakup_msgs[index] = newstr;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
} else {
rc = -ENOMEM;
}
@@ -595,14 +595,14 @@ void spk_reset_msg_group(struct msg_group_t *group)
unsigned long flags;
enum msg_index_t i;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
for (i = group->start; i <= group->end; i++) {
if (speakup_msgs[i] != speakup_default_msgs[i])
kfree(speakup_msgs[i]);
speakup_msgs[i] = speakup_default_msgs[i];
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
/* Called at initialization time, to establish default messages. */
@@ -618,12 +618,12 @@ void spk_free_user_msgs(void)
enum msg_index_t index;
unsigned long flags;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
for (index = MSG_FIRST_INDEX; index < MSG_LAST_INDEX; index++) {
if (speakup_msgs[index] != speakup_default_msgs[index]) {
kfree(speakup_msgs[index]);
speakup_msgs[index] = speakup_default_msgs[index];
}
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 943b6c1..51bdea3 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -35,7 +35,7 @@ static ssize_t chars_chartab_show(struct kobject *kobj,
size_t bufsize = PAGE_SIZE;
unsigned long flags;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
*buf_pointer = '\0';
for (i = 0; i < 256; i++) {
if (bufsize <= 1)
@@ -70,7 +70,7 @@ static ssize_t chars_chartab_show(struct kobject *kobj,
bufsize -= len;
buf_pointer += len;
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return buf_pointer - buf;
}
@@ -127,7 +127,7 @@ static ssize_t chars_chartab_store(struct kobject *kobj,
size_t desc_length = 0;
int i;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
while (cp < end) {
while ((cp < end) && (*cp == ' ' || *cp == '\t'))
@@ -212,7 +212,7 @@ static ssize_t chars_chartab_store(struct kobject *kobj,
spk_reset_default_chartab();
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
report_char_chartab_status(reset, received, used, rejected,
do_characters);
return retval;
@@ -232,7 +232,7 @@ static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
u_char *cp1;
u_char ch;
unsigned long flags;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
cp1 = spk_key_buf + SHIFT_TBL_SIZE;
num_keys = (int)(*cp1);
nstates = (int)cp1[1];
@@ -248,7 +248,7 @@ static ssize_t keymap_show(struct kobject *kobj, struct kobj_attribute *attr,
}
}
cp += sprintf(cp, "0, %d\n", KEY_MAP_VER);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return (int)(cp-buf);
}
@@ -265,17 +265,17 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
u_char *cp1;
unsigned long flags;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
in_buff = kmemdup(buf, count + 1, GFP_ATOMIC);
if (!in_buff) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return -ENOMEM;
}
if (strchr("dDrR", *in_buff)) {
spk_set_key_info(spk_key_defaults, spk_key_buf);
pr_info("keymap set to default values\n");
kfree(in_buff);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return count;
}
if (in_buff[count - 1] == '\n')
@@ -294,7 +294,7 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
pr_warn("i %d %d %d %d\n", i,
(int)cp1[-3], (int)cp1[-2], (int)cp1[-1]);
kfree(in_buff);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return -EINVAL;
}
while (--i >= 0) {
@@ -315,7 +315,7 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
}
}
kfree(in_buff);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return ret;
}
@@ -341,7 +341,7 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
pr_warn("silent value '%c' not in range (0,7)\n", ch);
return -EINVAL;
}
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (ch&2) {
shut = 1;
spk_do_flush();
@@ -354,7 +354,7 @@ static ssize_t silent_store(struct kobject *kobj, struct kobj_attribute *attr,
spk_shut_up |= shut;
else
spk_shut_up &= ~shut;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return count;
}
@@ -470,7 +470,7 @@ static ssize_t punc_show(struct kobject *kobj, struct kobj_attribute *attr,
return -EINVAL;
}
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
pb = (struct st_bits_data *) &spk_punc_info[var->value];
mask = pb->mask;
for (i = 33; i < 128; i++) {
@@ -478,7 +478,7 @@ static ssize_t punc_show(struct kobject *kobj, struct kobj_attribute *attr,
continue;
*cp++ = (char)i;
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return cp-buf;
}
@@ -518,14 +518,14 @@ static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
x--;
punc_buf[x] = '\0';
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (*punc_buf == 'd' || *punc_buf == 'r')
- x = spk_set_mask_bits(0, var->value, 3);
+ x = spk_set_mask_bits(NULL, var->value, 3);
else
x = spk_set_mask_bits(punc_buf, var->value, 3);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return count;
}
@@ -547,7 +547,7 @@ ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
if (param == NULL)
return -EINVAL;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
var = (struct var_t *) param->data;
switch (param->var_type) {
case VAR_NUM:
@@ -580,7 +580,7 @@ ssize_t spk_var_show(struct kobject *kobj, struct kobj_attribute *attr,
param->name, param->var_type);
break;
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return rv;
}
EXPORT_SYMBOL_GPL(spk_var_show);
@@ -609,7 +609,7 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
cp = (char *)buf;
string_unescape_any_inplace(cp);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
switch (param->var_type) {
case VAR_NUM:
case VAR_TIME:
@@ -670,7 +670,7 @@ ssize_t spk_var_store(struct kobject *kobj, struct kobj_attribute *attr,
}
}
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ret == -ERESTART)
pr_info("%s reset to default value\n", attr->attr.name);
@@ -818,9 +818,9 @@ static ssize_t message_show(struct kobject *kobj,
unsigned long flags;
BUG_ON(!group);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
retval = message_show_helper(buf, group->start, group->end);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return retval;
}
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 6c7b55c..14079c4 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -95,7 +95,8 @@ const struct st_bits_data spk_punc_info[] = {
static char mark_cut_flag;
#define MAX_KEY 160
-u_char *spk_our_keys[MAX_KEY], *spk_shift_table;
+static u_char *spk_shift_table;
+u_char *spk_our_keys[MAX_KEY];
u_char spk_key_buf[600];
const u_char spk_key_defaults[] = {
#include "speakupmap.h"
@@ -457,7 +458,7 @@ static void speak_char(u_char ch)
synth_buffer_add(SPACE);
}
-static u16 get_char(struct vc_data *vc, u16 * pos, u_char * attribs)
+static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
{
u16 ch = ' ';
if (vc && pos) {
@@ -1129,7 +1130,7 @@ static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
unsigned long flags;
if (synth == NULL || up_flag || spk_killed)
return;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_track == read_all_mode) {
switch (value) {
case KVAL(K_SHIFT):
@@ -1151,20 +1152,20 @@ static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
}
if (spk_say_ctrl && value < NUM_CTL_LABELS)
synth_printf("%s", spk_msg_get(MSG_CTL_START + value));
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
static void do_handle_latin(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (up_flag) {
spk_lastkey = spk_keydown = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
if (synth == NULL || spk_killed) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
spk_shut_up &= 0xfe;
@@ -1173,7 +1174,7 @@ static void do_handle_latin(struct vc_data *vc, u_char value, char up_flag)
spk_parked &= 0xfe;
if (spk_key_echo == 2 && value >= MINECHOCHAR)
speak_char(value);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
int spk_set_key_info(const u_char *key_info, u_char *k_buffer)
@@ -1282,7 +1283,7 @@ static int edit_bits(struct vc_data *vc, u_char type, u_char ch, u_short key)
}
/* Allocation concurrency is protected by the console semaphore */
-int speakup_allocate(struct vc_data *vc)
+static int speakup_allocate(struct vc_data *vc)
{
int vc_num;
@@ -1299,7 +1300,7 @@ int speakup_allocate(struct vc_data *vc)
return 0;
}
-void speakup_deallocate(struct vc_data *vc)
+static void speakup_deallocate(struct vc_data *vc)
{
int vc_num;
@@ -1449,21 +1450,21 @@ static void handle_cursor_read_all(struct vc_data *vc, int command)
static int pre_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
{
unsigned long flags;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_track == read_all_mode) {
spk_parked &= 0xfe;
if (synth == NULL || up_flag || spk_shut_up) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return NOTIFY_STOP;
}
del_timer(&cursor_timer);
spk_shut_up &= 0xfe;
spk_do_flush();
start_read_all_timer(vc, value + 1);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return NOTIFY_STOP;
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return NOTIFY_OK;
}
@@ -1472,10 +1473,10 @@ static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
unsigned long flags;
struct var_t *cursor_timeout;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
spk_parked &= 0xfe;
if (synth == NULL || up_flag || spk_shut_up || cursor_track == CT_Off) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
spk_shut_up &= 0xfe;
@@ -1494,7 +1495,7 @@ static void do_handle_cursor(struct vc_data *vc, u_char value, char up_flag)
cursor_timeout = spk_get_var(CURSOR_TIME);
mod_timer(&cursor_timer,
jiffies + msecs_to_jiffies(cursor_timeout->u.n.value));
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
static void update_color_buffer(struct vc_data *vc, const char *ic, int len)
@@ -1619,7 +1620,7 @@ static void cursor_done(u_long data)
struct vc_data *vc = vc_cons[cursor_con].d;
unsigned long flags;
del_timer(&cursor_timer);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (cursor_con != fg_console) {
is_cursor = 0;
goto out;
@@ -1650,7 +1651,7 @@ static void cursor_done(u_long data)
say_char(vc);
spk_keydown = is_cursor = 0;
out:
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
/* called by: vt_notifier_call() */
@@ -1659,13 +1660,13 @@ static void speakup_bs(struct vc_data *vc)
unsigned long flags;
if (!speakup_console[vc->vc_num])
return;
- if (!spk_trylock(flags))
+ if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
return;
if (!spk_parked)
speakup_date(vc);
if (spk_shut_up || synth == NULL) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
if (vc->vc_num == fg_console && spk_keydown) {
@@ -1673,7 +1674,7 @@ static void speakup_bs(struct vc_data *vc)
if (!is_cursor)
say_char(vc);
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
/* called by: vt_notifier_call() */
@@ -1682,7 +1683,7 @@ static void speakup_con_write(struct vc_data *vc, const char *str, int len)
unsigned long flags;
if ((vc->vc_num != fg_console) || spk_shut_up || synth == NULL)
return;
- if (!spk_trylock(flags))
+ if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
return;
if (spk_bell_pos && spk_keydown && (vc->vc_x == spk_bell_pos - 1))
@@ -1690,31 +1691,31 @@ static void speakup_con_write(struct vc_data *vc, const char *str, int len)
if ((is_cursor) || (cursor_track == read_all_mode)) {
if (cursor_track == CT_Highlight)
update_color_buffer(vc, str, len);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
if (win_enabled) {
if (vc->vc_x >= win_left && vc->vc_x <= win_right &&
vc->vc_y >= win_top && vc->vc_y <= win_bottom) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
}
spkup_write(str, len);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
-void speakup_con_update(struct vc_data *vc)
+static void speakup_con_update(struct vc_data *vc)
{
unsigned long flags;
if (speakup_console[vc->vc_num] == NULL || spk_parked)
return;
- if (!spk_trylock(flags))
+ if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
return;
speakup_date(vc);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
static void do_handle_spec(struct vc_data *vc, u_char value, char up_flag)
@@ -1724,7 +1725,7 @@ static void do_handle_spec(struct vc_data *vc, u_char value, char up_flag)
char *label;
if (synth == NULL || up_flag || spk_killed)
return;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
spk_shut_up &= 0xfe;
if (spk_no_intr)
spk_do_flush();
@@ -1745,13 +1746,13 @@ static void do_handle_spec(struct vc_data *vc, u_char value, char up_flag)
break;
default:
spk_parked &= 0xfe;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
if (on_off < 2)
synth_printf("%s %s\n",
label, spk_msg_get(MSG_STATUS_START + on_off));
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
static int inc_dec_var(u_char value)
@@ -1892,7 +1893,7 @@ oops:
spk_special_handler = NULL;
return 1;
}
- go_pos = simple_strtol(goto_buf, &cp, 10);
+ go_pos = kstrtol(goto_buf, 10, (long *)&cp);
goto_pos = (u_long) go_pos;
if (*cp == 'x') {
if (*goto_buf < '0')
@@ -1964,7 +1965,7 @@ static void speakup_lock(struct vc_data *vc)
}
typedef void (*spkup_hand) (struct vc_data *);
-spkup_hand spkup_handler[] = {
+static spkup_hand spkup_handler[] = {
/* must be ordered same as defines in speakup.h */
do_nothing, speakup_goto, speech_kill, speakup_shut_up,
speakup_cut, speakup_paste, say_first_char, say_last_char,
@@ -2002,7 +2003,7 @@ static void do_spkup(struct vc_data *vc, u_char value)
static const char *pad_chars = "0123456789+-*/\015,.?()";
-int
+static int
speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
int up_flag)
{
@@ -2015,7 +2016,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
if (synth == NULL)
return 0;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
tty = vc->port.tty;
if (type >= 0xf0)
type -= 0xf0;
@@ -2033,7 +2034,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
if (keycode >= MAX_KEY)
goto no_map;
key_info = spk_our_keys[keycode];
- if (key_info == 0)
+ if (!key_info)
goto no_map;
/* Check valid read all mode keys */
if ((cursor_track == read_all_mode) && (!up_flag)) {
@@ -2114,7 +2115,7 @@ no_map:
}
last_keycode = 0;
out:
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return ret;
}
@@ -2265,7 +2266,7 @@ static int __init speakup_init(void)
(var->var_id >= 0) && (var->var_id < MAXVARS); var++)
speakup_register_var(var);
for (i = 1; spk_punc_info[i].mask != 0; i++)
- spk_set_mask_bits(0, i, 2);
+ spk_set_mask_bits(NULL, i, 2);
spk_set_key_info(spk_key_defaults, spk_key_buf);
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index e4d27aa..1354288 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -79,7 +79,7 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
/*printk(KERN_ERR "in irq\n"); */
/*pr_warn("in IRQ\n"); */
int c;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
while (inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR) {
c = inb_p(speakup_info.port_tts+UART_RX);
@@ -87,7 +87,7 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
/*printk(KERN_ERR "c = %d\n", c); */
/*pr_warn("C = %d\n", c); */
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index 1c1f0d5..80141ac 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -166,7 +166,7 @@ static const char *synth_immediate(struct spk_synth *synth, const char *buf)
outb_p(ch, speakup_info.port_tts);
buf++;
}
- return 0;
+ return NULL;
}
static void do_catch_up(struct spk_synth *synth)
@@ -186,26 +186,26 @@ static void do_catch_up(struct spk_synth *synth)
delay_time = spk_get_var(DELAY);
full_time = spk_get_var(FULL);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth_full()) {
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
@@ -217,9 +217,9 @@ static void do_catch_up(struct spk_synth *synth)
break;
udelay(1);
}
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
ch = synth_buffer_getc();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = PROCSPEECH;
outb_p(ch, speakup_info.port_tts);
@@ -231,10 +231,10 @@ static void do_catch_up(struct spk_synth *synth)
udelay(1);
}
outb_p(PROCSPEECH, speakup_info.port_tts);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies(delay_time_val));
jiff_max = jiffies+jiffy_delta_val;
}
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 3e450cc..95d3132 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -148,30 +148,30 @@ static void do_catch_up(struct spk_synth *synth)
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
full_time = spk_get_var(FULL);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
full_time_val = full_time->u.n.value;
delay_time_val = delay_time->u.n.value;
if (speakup_info.flushing) {
speakup_info.flushing = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (!spk_serial_out(ch)) {
outb(UART_MCR_DTR, speakup_info.port_tts + UART_MCR);
outb(UART_MCR_DTR | UART_MCR_RTS,
@@ -180,11 +180,11 @@ static void do_catch_up(struct spk_synth *synth)
continue;
}
if ((jiffies >= jiff_max) && (ch == SPACE)) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
full_time_val = full_time->u.n.value;
delay_time_val = delay_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (spk_serial_out(synth->procspeech))
schedule_timeout(msecs_to_jiffies
(delay_time_val));
@@ -194,9 +194,9 @@ static void do_catch_up(struct spk_synth *synth)
jiff_max = jiffies + jiffy_delta_val;
}
set_current_state(TASK_RUNNING);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
spk_serial_out(PROCSPEECH);
}
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index d39a0de..d306e01 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -165,27 +165,27 @@ static void do_catch_up(struct spk_synth *synth)
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = 0x0D;
if (synth_full() || !spk_serial_out(ch)) {
@@ -193,9 +193,9 @@ static void do_catch_up(struct spk_synth *synth)
continue;
}
set_current_state(TASK_RUNNING);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '[')
in_escape = 1;
else if (ch == ']')
@@ -206,10 +206,10 @@ static void do_catch_up(struct spk_synth *synth)
if (jiffies >= jiff_max) {
if (!in_escape)
spk_serial_out(PROCSPEECH);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 6c88b55..ea6b72d 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -377,27 +377,27 @@ static void do_catch_up(struct spk_synth *synth)
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = 0x0D;
if (dt_sendchar(ch)) {
@@ -405,9 +405,9 @@ static void do_catch_up(struct spk_synth *synth)
continue;
}
set_current_state(TASK_RUNNING);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '[')
in_escape = 1;
else if (ch == ']')
@@ -418,10 +418,10 @@ static void do_catch_up(struct spk_synth *synth)
if (jiffies >= jiff_max) {
if (!in_escape)
dt_sendchar(PROCSPEECH);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
@@ -444,7 +444,7 @@ static const char *synth_immediate(struct spk_synth *synth, const char *buf)
return buf;
buf++;
}
- return 0;
+ return NULL;
}
static int synth_probe(struct spk_synth *synth)
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index 0dd2eb9..15fdec3 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -216,9 +216,9 @@ static void do_catch_up(struct spk_synth *synth)
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
@@ -234,22 +234,22 @@ static void do_catch_up(struct spk_synth *synth)
is_flushing = 0;
spin_unlock_irqrestore(&flush_lock, flags);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
synth_full_val = synth_full();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = 0x0D;
if (synth_full_val || !spk_serial_out(ch)) {
@@ -257,9 +257,9 @@ static void do_catch_up(struct spk_synth *synth)
continue;
}
set_current_state(TASK_RUNNING);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '[')
in_escape = 1;
else if (ch == ']')
@@ -270,10 +270,10 @@ static void do_catch_up(struct spk_synth *synth)
if (jiffies >= jiff_max) {
if (!in_escape)
spk_serial_out(PROCSPEECH);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index a9cefbd..1feb0fb 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -200,42 +200,42 @@ static void do_catch_up(struct spk_synth *synth)
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth_full()) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
ch = synth_buffer_getc();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = PROCSPEECH;
spk_out(ch);
if ((jiffies >= jiff_max) && (ch == SPACE)) {
spk_out(PROCSPEECH);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
delay_time_val = delay_time->u.n.value;
jiffy_delta_val = jiffy_delta->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
@@ -254,7 +254,7 @@ static const char *synth_immediate(struct spk_synth *synth, const char *buf)
spk_out(ch);
buf++;
}
- return 0;
+ return NULL;
}
static void synth_flush(struct spk_synth *synth)
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index feb5f22..2f2fe5e 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -168,7 +168,7 @@ static const char *synth_immediate(struct spk_synth *synth, const char *buf)
udelay(70);
buf++;
}
- return 0;
+ return NULL;
}
static void do_catch_up(struct spk_synth *synth)
@@ -187,26 +187,26 @@ static void do_catch_up(struct spk_synth *synth)
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
full_time = spk_get_var(FULL);
-spk_lock(flags);
+spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth_full()) {
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
@@ -220,9 +220,9 @@ spk_lock(flags);
oops();
break;
}
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
ch = synth_buffer_getc();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = PROCSPEECH;
outb_p(ch, synth_port);
@@ -237,10 +237,10 @@ spk_lock(flags);
break;
}
outb_p(PROCSPEECH, synth_port);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies(delay_time_val));
jiff_max = jiffies+jiffy_delta_val;
}
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index e2f5c81..243c3d5 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -179,45 +179,45 @@ static int softsynth_open(struct inode *inode, struct file *fp)
unsigned long flags;
/*if ((fp->f_flags & O_ACCMODE) != O_RDONLY) */
/* return -EPERM; */
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (synth_soft.alive) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return -EBUSY;
}
synth_soft.alive = 1;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return 0;
}
static int softsynth_close(struct inode *inode, struct file *fp)
{
unsigned long flags;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_soft.alive = 0;
init_pos = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
/* Make sure we let applications go before leaving */
speakup_start_ttys();
return 0;
}
-static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
+static ssize_t softsynth_read(struct file *fp, char __user *buf, size_t count,
loff_t *pos)
{
int chars_sent = 0;
- char *cp;
+ char __user *cp;
char *init;
char ch;
int empty;
unsigned long flags;
DEFINE_WAIT(wait);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
while (1) {
prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
if (!synth_buffer_empty() || speakup_info.flushing)
break;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (fp->f_flags & O_NONBLOCK) {
finish_wait(&speakup_event, &wait);
return -EAGAIN;
@@ -227,7 +227,7 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
return -ERESTARTSYS;
}
schedule();
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
}
finish_wait(&speakup_event, &wait);
@@ -244,16 +244,16 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
} else {
ch = synth_buffer_getc();
}
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (copy_to_user(cp, &ch, 1))
return -EFAULT;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
chars_sent++;
cp++;
}
*pos += chars_sent;
empty = synth_buffer_empty();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (empty) {
speakup_start_ttys();
*pos = 0;
@@ -263,8 +263,8 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
static int last_index;
-static ssize_t softsynth_write(struct file *fp, const char *buf, size_t count,
- loff_t *pos)
+static ssize_t softsynth_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
{
unsigned long supplied_index = 0;
int converted;
@@ -285,10 +285,10 @@ static unsigned int softsynth_poll(struct file *fp,
int ret = 0;
poll_wait(fp, &speakup_event, wait);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (!synth_buffer_empty() || speakup_info.flushing)
ret = POLLIN | POLLRDNORM;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return ret;
}
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 303105b..637ba67 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -77,17 +77,4 @@ extern struct speakup_info_t speakup_info;
extern struct var_t synth_time_vars[];
-/* Protect the whole speakup machinery, must be taken at each kernel->speakup
- * transition and released at all corresponding speakup->kernel transitions
- * (flags must be the same variable between lock/trylock and unlock).
- *
- * The progression thread only interferes with the speakup machinery through
- * the synth buffer, and so only needs to take the lock while tinkering with
- * it.
- */
-/* Speakup needs to disable the keyboard IRQ, hence _irqsave/restore */
-#define spk_lock(flags) spin_lock_irqsave(&speakup_info.spinlock, flags)
-#define spk_trylock(flags) spin_trylock_irqsave(&speakup_info.spinlock, flags)
-#define spk_unlock(flags) spin_unlock_irqrestore(&speakup_info.spinlock, flags)
-
#endif
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index d867dd9..0b3549b 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -25,6 +25,18 @@ static int module_status;
bool spk_quiet_boot;
struct speakup_info_t speakup_info = {
+ /*
+ * This spinlock is used to protect the entire speakup machinery, and
+ * must be taken at each kernel->speakup transition and released at
+ * each corresponding speakup->kernel transition.
+ *
+ * The progression thread only interferes with the speakup machinery through
+ * the synth buffer, so only needs to take the lock while tinkering with
+ * the buffer.
+ *
+ * We use spin_lock/trylock_irqsave and spin_unlock_irqrestore with this
+ * spinlock because speakup needs to disable the keyboard IRQ.
+ */
.spinlock = __SPIN_LOCK_UNLOCKED(speakup_info.spinlock),
.flushing = 0,
};
@@ -83,27 +95,27 @@ void spk_do_catch_up(struct spk_synth *synth)
full_time = spk_get_var(FULL);
delay_time = spk_get_var(DELAY);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = synth->procspeech;
if (!spk_serial_out(ch)) {
@@ -111,11 +123,11 @@ void spk_do_catch_up(struct spk_synth *synth)
continue;
}
if ((jiffies >= jiff_max) && (ch == SPACE)) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
full_time_val = full_time->u.n.value;
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (spk_serial_out(synth->procspeech))
schedule_timeout(
msecs_to_jiffies(delay_time_val));
@@ -125,9 +137,9 @@ void spk_do_catch_up(struct spk_synth *synth)
jiff_max = jiffies + jiffy_delta_val;
}
set_current_state(TASK_RUNNING);
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
spk_serial_out(synth->procspeech);
}
@@ -145,7 +157,7 @@ const char *spk_synth_immediate(struct spk_synth *synth, const char *buff)
return buff;
buff++;
}
- return 0;
+ return NULL;
}
EXPORT_SYMBOL_GPL(spk_synth_immediate);
@@ -403,11 +415,11 @@ void synth_release(void)
if (synth == NULL)
return;
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
pr_info("releasing synth %s\n", synth->name);
synth->alive = 0;
del_timer(&thread_timer);
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth->attributes.name)
sysfs_remove_group(speakup_kobj, &(synth->attributes));
for (var = synth->vars; var->var_id != MAXVARS; var++)
diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c
index 42fa660..4397c8e 100644
--- a/drivers/staging/speakup/thread.c
+++ b/drivers/staging/speakup/thread.c
@@ -22,7 +22,7 @@ int speakup_thread(void *data)
while (1) {
DEFINE_WAIT(wait);
while (1) {
- spk_lock(flags);
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
our_sound = spk_unprocessed_sound;
spk_unprocessed_sound.active = 0;
prepare_to_wait(&speakup_event, &wait,
@@ -32,7 +32,7 @@ int speakup_thread(void *data)
(synth && synth->catch_up && synth->alive &&
(speakup_info.flushing ||
!synth_buffer_empty()));
- spk_unlock(flags);
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (should_break)
break;
mutex_unlock(&spk_mutex);
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 7f6288f..9aa2a78 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -137,18 +137,15 @@ struct st_var_header *spk_get_var_header(enum var_id_t var_id)
struct st_var_header *spk_var_header_by_name(const char *name)
{
int i;
- struct st_var_header *where = NULL;
- if (name != NULL) {
- i = 0;
- while ((i < MAXVARS) && (where == NULL)) {
- if (strcmp(name, var_ptrs[i]->name) == 0)
- where = var_ptrs[i];
- else
- i++;
- }
+ if (!name)
+ return NULL;
+
+ for (i = 0; i < MAXVARS; i++) {
+ if (strcmp(name, var_ptrs[i]->name) == 0)
+ return var_ptrs[i];
}
- return where;
+ return NULL;
}
struct var_t *spk_get_var(enum var_id_t var_id)
@@ -280,7 +277,7 @@ int spk_set_mask_bits(const char *input, const int which, const int how)
spk_chartab[*cp] &= ~mask;
}
cp = (u_char *)input;
- if (cp == 0)
+ if (!cp)
cp = spk_punc_info[which].value;
else {
for ( ; *cp; cp++) {
diff --git a/drivers/staging/ti-soc-thermal/Kconfig b/drivers/staging/ti-soc-thermal/Kconfig
deleted file mode 100644
index e81375f..0000000
--- a/drivers/staging/ti-soc-thermal/Kconfig
+++ /dev/null
@@ -1,48 +0,0 @@
-config TI_SOC_THERMAL
- tristate "Texas Instruments SoCs temperature sensor driver"
- depends on THERMAL
- depends on ARCH_HAS_BANDGAP
- help
- If you say yes here you get support for the Texas Instruments
- OMAP4460+ on die bandgap temperature sensor support. The register
- set is part of system control module.
-
- This includes alert interrupts generation and also the TSHUT
- support.
-
-config TI_THERMAL
- bool "Texas Instruments SoCs thermal framework support"
- depends on TI_SOC_THERMAL
- depends on CPU_THERMAL
- help
- If you say yes here you want to get support for generic thermal
- framework for the Texas Instruments on die bandgap temperature sensor.
-
- This includes trip points definitions, extrapolation rules and
- CPU cooling device bindings.
-
-config OMAP4_THERMAL
- bool "Texas Instruments OMAP4 thermal support"
- depends on TI_SOC_THERMAL
- depends on ARCH_OMAP4
- help
- If you say yes here you get thermal support for the Texas Instruments
- OMAP4 SoC family. The current chip supported are:
- - OMAP4430
- - OMAP4460
- - OMAP4470
-
- This includes alert interrupts generation and also the TSHUT
- support.
-
-config OMAP5_THERMAL
- bool "Texas Instruments OMAP5 thermal support"
- depends on TI_SOC_THERMAL
- depends on SOC_OMAP5
- help
- If you say yes here you get thermal support for the Texas Instruments
- OMAP5 SoC family. The current chip supported are:
- - OMAP5430
-
- This includes alert interrupts generation and also the TSHUT
- support.
diff --git a/drivers/staging/ti-soc-thermal/Makefile b/drivers/staging/ti-soc-thermal/Makefile
deleted file mode 100644
index 0ca034f..0000000
--- a/drivers/staging/ti-soc-thermal/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal.o
-ti-soc-thermal-y := ti-bandgap.o
-ti-soc-thermal-$(CONFIG_TI_THERMAL) += ti-thermal-common.o
-ti-soc-thermal-$(CONFIG_OMAP4_THERMAL) += omap4-thermal-data.o
-ti-soc-thermal-$(CONFIG_OMAP5_THERMAL) += omap5-thermal-data.o
diff --git a/drivers/staging/ti-soc-thermal/TODO b/drivers/staging/ti-soc-thermal/TODO
deleted file mode 100644
index 7da787d..0000000
--- a/drivers/staging/ti-soc-thermal/TODO
+++ /dev/null
@@ -1,12 +0,0 @@
-List of TODOs (by Eduardo Valentin)
-
-on ti-bandgap.c:
-- Revisit PM support
-
-on ti-thermal-common.c/ti-thermal.h:
-- Revisit need for locking
-
-generally:
-- make sure this code works on OMAP4430, OMAP4460 and OMAP5430
-
-Copy patches to Eduardo Valentin <eduardo.valentin@ti.com>
diff --git a/drivers/staging/ti-soc-thermal/omap4-thermal-data.c b/drivers/staging/ti-soc-thermal/omap4-thermal-data.c
deleted file mode 100644
index d255d33..0000000
--- a/drivers/staging/ti-soc-thermal/omap4-thermal-data.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * OMAP4 thermal driver.
- *
- * Copyright (C) 2011-2012 Texas Instruments Inc.
- * Contact:
- * Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "ti-thermal.h"
-#include "ti-bandgap.h"
-#include "omap4xxx-bandgap.h"
-
-/*
- * OMAP4430 has one instance of thermal sensor for MPU
- * need to describe the individual bit fields
- */
-static struct temp_sensor_registers
-omap4430_mpu_temp_sensor_registers = {
- .temp_sensor_ctrl = OMAP4430_TEMP_SENSOR_CTRL_OFFSET,
- .bgap_tempsoff_mask = OMAP4430_BGAP_TEMPSOFF_MASK,
- .bgap_soc_mask = OMAP4430_BGAP_TEMP_SENSOR_SOC_MASK,
- .bgap_eocz_mask = OMAP4430_BGAP_TEMP_SENSOR_EOCZ_MASK,
- .bgap_dtemp_mask = OMAP4430_BGAP_TEMP_SENSOR_DTEMP_MASK,
-
- .bgap_mode_ctrl = OMAP4430_TEMP_SENSOR_CTRL_OFFSET,
- .mode_ctrl_mask = OMAP4430_SINGLE_MODE_MASK,
-
- .bgap_efuse = OMAP4430_FUSE_OPP_BGAP,
-};
-
-/* Thresholds and limits for OMAP4430 MPU temperature sensor */
-static struct temp_sensor_data omap4430_mpu_temp_sensor_data = {
- .min_freq = OMAP4430_MIN_FREQ,
- .max_freq = OMAP4430_MAX_FREQ,
- .max_temp = OMAP4430_MAX_TEMP,
- .min_temp = OMAP4430_MIN_TEMP,
- .hyst_val = OMAP4430_HYST_VAL,
-};
-
-/*
- * Temperature values in milli degree celsius
- * ADC code values from 530 to 923
- */
-static const int
-omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = {
- -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000,
- -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000,
- -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000,
- 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000,
- 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000,
- 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000,
- 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000,
- 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000,
- 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000,
- 117000, 118000, 120000, 122000, 123000,
-};
-
-/* OMAP4430 data */
-const struct ti_bandgap_data omap4430_data = {
- .features = TI_BANDGAP_FEATURE_MODE_CONFIG |
- TI_BANDGAP_FEATURE_CLK_CTRL |
- TI_BANDGAP_FEATURE_POWER_SWITCH,
- .fclock_name = "bandgap_fclk",
- .div_ck_name = "bandgap_fclk",
- .conv_table = omap4430_adc_to_temp,
- .adc_start_val = OMAP4430_ADC_START_VALUE,
- .adc_end_val = OMAP4430_ADC_END_VALUE,
- .expose_sensor = ti_thermal_expose_sensor,
- .remove_sensor = ti_thermal_remove_sensor,
- .sensors = {
- {
- .registers = &omap4430_mpu_temp_sensor_registers,
- .ts_data = &omap4430_mpu_temp_sensor_data,
- .domain = "cpu",
- .slope = OMAP_GRADIENT_SLOPE_4430,
- .constant = OMAP_GRADIENT_CONST_4430,
- .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_4430,
- .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_4430,
- .register_cooling = ti_thermal_register_cpu_cooling,
- .unregister_cooling = ti_thermal_unregister_cpu_cooling,
- },
- },
- .sensor_count = 1,
-};
-/*
- * OMAP4460 has one instance of thermal sensor for MPU
- * need to describe the individual bit fields
- */
-static struct temp_sensor_registers
-omap4460_mpu_temp_sensor_registers = {
- .temp_sensor_ctrl = OMAP4460_TEMP_SENSOR_CTRL_OFFSET,
- .bgap_tempsoff_mask = OMAP4460_BGAP_TEMPSOFF_MASK,
- .bgap_soc_mask = OMAP4460_BGAP_TEMP_SENSOR_SOC_MASK,
- .bgap_eocz_mask = OMAP4460_BGAP_TEMP_SENSOR_EOCZ_MASK,
- .bgap_dtemp_mask = OMAP4460_BGAP_TEMP_SENSOR_DTEMP_MASK,
-
- .bgap_mask_ctrl = OMAP4460_BGAP_CTRL_OFFSET,
- .mask_hot_mask = OMAP4460_MASK_HOT_MASK,
- .mask_cold_mask = OMAP4460_MASK_COLD_MASK,
-
- .bgap_mode_ctrl = OMAP4460_BGAP_CTRL_OFFSET,
- .mode_ctrl_mask = OMAP4460_SINGLE_MODE_MASK,
-
- .bgap_counter = OMAP4460_BGAP_COUNTER_OFFSET,
- .counter_mask = OMAP4460_COUNTER_MASK,
-
- .bgap_threshold = OMAP4460_BGAP_THRESHOLD_OFFSET,
- .threshold_thot_mask = OMAP4460_T_HOT_MASK,
- .threshold_tcold_mask = OMAP4460_T_COLD_MASK,
-
- .tshut_threshold = OMAP4460_BGAP_TSHUT_OFFSET,
- .tshut_hot_mask = OMAP4460_TSHUT_HOT_MASK,
- .tshut_cold_mask = OMAP4460_TSHUT_COLD_MASK,
-
- .bgap_status = OMAP4460_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = OMAP4460_CLEAN_STOP_MASK,
- .status_bgap_alert_mask = OMAP4460_BGAP_ALERT_MASK,
- .status_hot_mask = OMAP4460_HOT_FLAG_MASK,
- .status_cold_mask = OMAP4460_COLD_FLAG_MASK,
-
- .bgap_efuse = OMAP4460_FUSE_OPP_BGAP,
-};
-
-/* Thresholds and limits for OMAP4460 MPU temperature sensor */
-static struct temp_sensor_data omap4460_mpu_temp_sensor_data = {
- .tshut_hot = OMAP4460_TSHUT_HOT,
- .tshut_cold = OMAP4460_TSHUT_COLD,
- .t_hot = OMAP4460_T_HOT,
- .t_cold = OMAP4460_T_COLD,
- .min_freq = OMAP4460_MIN_FREQ,
- .max_freq = OMAP4460_MAX_FREQ,
- .max_temp = OMAP4460_MAX_TEMP,
- .min_temp = OMAP4460_MIN_TEMP,
- .hyst_val = OMAP4460_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
-};
-
-/*
- * Temperature values in milli degree celsius
- * ADC code values from 530 to 923
- */
-static const int
-omap4460_adc_to_temp[OMAP4460_ADC_END_VALUE - OMAP4460_ADC_START_VALUE + 1] = {
- -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200,
- -37800, -37300, -36800, -36400, -36000, -35600, -35200, -34800,
- -34300, -33800, -33400, -33000, -32600, -32200, -31800, -31300,
- -30800, -30400, -30000, -29600, -29200, -28700, -28200, -27800,
- -27400, -27000, -26600, -26200, -25700, -25200, -24800, -24400,
- -24000, -23600, -23200, -22700, -22200, -21800, -21400, -21000,
- -20600, -20200, -19700, -19200, -18800, -18400, -18000, -17600,
- -17200, -16700, -16200, -15800, -15400, -15000, -14600, -14200,
- -13700, -13200, -12800, -12400, -12000, -11600, -11200, -10700,
- -10200, -9800, -9400, -9000, -8600, -8200, -7700, -7200, -6800,
- -6400, -6000, -5600, -5200, -4800, -4300, -3800, -3400, -3000,
- -2600, -2200, -1800, -1300, -800, -400, 0, 400, 800, 1200, 1600,
- 2100, 2600, 3000, 3400, 3800, 4200, 4600, 5100, 5600, 6000, 6400,
- 6800, 7200, 7600, 8000, 8500, 9000, 9400, 9800, 10200, 10600, 11000,
- 11400, 11900, 12400, 12800, 13200, 13600, 14000, 14400, 14800,
- 15300, 15800, 16200, 16600, 17000, 17400, 17800, 18200, 18700,
- 19200, 19600, 20000, 20400, 20800, 21200, 21600, 22100, 22600,
- 23000, 23400, 23800, 24200, 24600, 25000, 25400, 25900, 26400,
- 26800, 27200, 27600, 28000, 28400, 28800, 29300, 29800, 30200,
- 30600, 31000, 31400, 31800, 32200, 32600, 33100, 33600, 34000,
- 34400, 34800, 35200, 35600, 36000, 36400, 36800, 37300, 37800,
- 38200, 38600, 39000, 39400, 39800, 40200, 40600, 41100, 41600,
- 42000, 42400, 42800, 43200, 43600, 44000, 44400, 44800, 45300,
- 45800, 46200, 46600, 47000, 47400, 47800, 48200, 48600, 49000,
- 49500, 50000, 50400, 50800, 51200, 51600, 52000, 52400, 52800,
- 53200, 53700, 54200, 54600, 55000, 55400, 55800, 56200, 56600,
- 57000, 57400, 57800, 58200, 58700, 59200, 59600, 60000, 60400,
- 60800, 61200, 61600, 62000, 62400, 62800, 63300, 63800, 64200,
- 64600, 65000, 65400, 65800, 66200, 66600, 67000, 67400, 67800,
- 68200, 68700, 69200, 69600, 70000, 70400, 70800, 71200, 71600,
- 72000, 72400, 72800, 73200, 73600, 74100, 74600, 75000, 75400,
- 75800, 76200, 76600, 77000, 77400, 77800, 78200, 78600, 79000,
- 79400, 79800, 80300, 80800, 81200, 81600, 82000, 82400, 82800,
- 83200, 83600, 84000, 84400, 84800, 85200, 85600, 86000, 86400,
- 86800, 87300, 87800, 88200, 88600, 89000, 89400, 89800, 90200,
- 90600, 91000, 91400, 91800, 92200, 92600, 93000, 93400, 93800,
- 94200, 94600, 95000, 95500, 96000, 96400, 96800, 97200, 97600,
- 98000, 98400, 98800, 99200, 99600, 100000, 100400, 100800, 101200,
- 101600, 102000, 102400, 102800, 103200, 103600, 104000, 104400,
- 104800, 105200, 105600, 106100, 106600, 107000, 107400, 107800,
- 108200, 108600, 109000, 109400, 109800, 110200, 110600, 111000,
- 111400, 111800, 112200, 112600, 113000, 113400, 113800, 114200,
- 114600, 115000, 115400, 115800, 116200, 116600, 117000, 117400,
- 117800, 118200, 118600, 119000, 119400, 119800, 120200, 120600,
- 121000, 121400, 121800, 122200, 122600, 123000, 123400, 123800, 124200,
- 124600, 124900, 125000, 125000, 125000, 125000
-};
-
-/* OMAP4460 data */
-const struct ti_bandgap_data omap4460_data = {
- .features = TI_BANDGAP_FEATURE_TSHUT |
- TI_BANDGAP_FEATURE_TSHUT_CONFIG |
- TI_BANDGAP_FEATURE_TALERT |
- TI_BANDGAP_FEATURE_MODE_CONFIG |
- TI_BANDGAP_FEATURE_POWER_SWITCH |
- TI_BANDGAP_FEATURE_CLK_CTRL |
- TI_BANDGAP_FEATURE_COUNTER,
- .fclock_name = "bandgap_ts_fclk",
- .div_ck_name = "div_ts_ck",
- .conv_table = omap4460_adc_to_temp,
- .adc_start_val = OMAP4460_ADC_START_VALUE,
- .adc_end_val = OMAP4460_ADC_END_VALUE,
- .expose_sensor = ti_thermal_expose_sensor,
- .remove_sensor = ti_thermal_remove_sensor,
- .report_temperature = ti_thermal_report_sensor_temperature,
- .sensors = {
- {
- .registers = &omap4460_mpu_temp_sensor_registers,
- .ts_data = &omap4460_mpu_temp_sensor_data,
- .domain = "cpu",
- .slope = OMAP_GRADIENT_SLOPE_4460,
- .constant = OMAP_GRADIENT_CONST_4460,
- .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_4460,
- .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_4460,
- .register_cooling = ti_thermal_register_cpu_cooling,
- .unregister_cooling = ti_thermal_unregister_cpu_cooling,
- },
- },
- .sensor_count = 1,
-};
-
-/* OMAP4470 data */
-const struct ti_bandgap_data omap4470_data = {
- .features = TI_BANDGAP_FEATURE_TSHUT |
- TI_BANDGAP_FEATURE_TSHUT_CONFIG |
- TI_BANDGAP_FEATURE_TALERT |
- TI_BANDGAP_FEATURE_MODE_CONFIG |
- TI_BANDGAP_FEATURE_POWER_SWITCH |
- TI_BANDGAP_FEATURE_CLK_CTRL |
- TI_BANDGAP_FEATURE_COUNTER,
- .fclock_name = "bandgap_ts_fclk",
- .div_ck_name = "div_ts_ck",
- .conv_table = omap4460_adc_to_temp,
- .adc_start_val = OMAP4460_ADC_START_VALUE,
- .adc_end_val = OMAP4460_ADC_END_VALUE,
- .expose_sensor = ti_thermal_expose_sensor,
- .remove_sensor = ti_thermal_remove_sensor,
- .report_temperature = ti_thermal_report_sensor_temperature,
- .sensors = {
- {
- .registers = &omap4460_mpu_temp_sensor_registers,
- .ts_data = &omap4460_mpu_temp_sensor_data,
- .domain = "cpu",
- .slope = OMAP_GRADIENT_SLOPE_4470,
- .constant = OMAP_GRADIENT_CONST_4470,
- .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_4470,
- .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_4470,
- .register_cooling = ti_thermal_register_cpu_cooling,
- .unregister_cooling = ti_thermal_unregister_cpu_cooling,
- },
- },
- .sensor_count = 1,
-};
diff --git a/drivers/staging/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/staging/ti-soc-thermal/omap4xxx-bandgap.h
deleted file mode 100644
index 6f2de3a..0000000
--- a/drivers/staging/ti-soc-thermal/omap4xxx-bandgap.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * OMAP4xxx bandgap registers, bitfields and temperature definitions
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Contact:
- * Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-#ifndef __OMAP4XXX_BANDGAP_H
-#define __OMAP4XXX_BANDGAP_H
-
-/**
- * *** OMAP4430 ***
- *
- * Below, in sequence, are the Register definitions,
- * the bitfields and the temperature definitions for OMAP4430.
- */
-
-/**
- * OMAP4430 register definitions
- *
- * Registers are defined as offsets. The offsets are
- * relative to FUSE_OPP_BGAP on 4430.
- */
-
-/* OMAP4430.FUSE_OPP_BGAP */
-#define OMAP4430_FUSE_OPP_BGAP 0x0
-
-/* OMAP4430.TEMP_SENSOR */
-#define OMAP4430_TEMP_SENSOR_CTRL_OFFSET 0xCC
-
-/**
- * Register and bit definitions for OMAP4430
- *
- * All the macros bellow define the required bits for
- * controlling temperature on OMAP4430. Bit defines are
- * grouped by register.
- */
-
-/* OMAP4430.TEMP_SENSOR bits */
-#define OMAP4430_BGAP_TEMPSOFF_MASK BIT(12)
-#define OMAP4430_BGAP_TSHUT_MASK BIT(11)
-#define OMAP4430_SINGLE_MODE_MASK BIT(10)
-#define OMAP4430_BGAP_TEMP_SENSOR_SOC_MASK BIT(9)
-#define OMAP4430_BGAP_TEMP_SENSOR_EOCZ_MASK BIT(8)
-#define OMAP4430_BGAP_TEMP_SENSOR_DTEMP_MASK (0xff << 0)
-
-/**
- * Temperature limits and thresholds for OMAP4430
- *
- * All the macros bellow are definitions for handling the
- * ADC conversions and representation of temperature limits
- * and thresholds for OMAP4430.
- */
-
-/* ADC conversion table limits */
-#define OMAP4430_ADC_START_VALUE 0
-#define OMAP4430_ADC_END_VALUE 127
-/* bandgap clock limits (no control on 4430) */
-#define OMAP4430_MAX_FREQ 32768
-#define OMAP4430_MIN_FREQ 32768
-/* sensor limits */
-#define OMAP4430_MIN_TEMP -40000
-#define OMAP4430_MAX_TEMP 125000
-#define OMAP4430_HYST_VAL 5000
-
-/**
- * *** OMAP4460 *** Applicable for OMAP4470
- *
- * Below, in sequence, are the Register definitions,
- * the bitfields and the temperature definitions for OMAP4460.
- */
-
-/**
- * OMAP4460 register definitions
- *
- * Registers are defined as offsets. The offsets are
- * relative to FUSE_OPP_BGAP on 4460.
- */
-
-/* OMAP4460.FUSE_OPP_BGAP */
-#define OMAP4460_FUSE_OPP_BGAP 0x0
-
-/* OMAP4460.TEMP_SENSOR */
-#define OMAP4460_TEMP_SENSOR_CTRL_OFFSET 0xCC
-
-/* OMAP4460.BANDGAP_CTRL */
-#define OMAP4460_BGAP_CTRL_OFFSET 0x118
-
-/* OMAP4460.BANDGAP_COUNTER */
-#define OMAP4460_BGAP_COUNTER_OFFSET 0x11C
-
-/* OMAP4460.BANDGAP_THRESHOLD */
-#define OMAP4460_BGAP_THRESHOLD_OFFSET 0x120
-
-/* OMAP4460.TSHUT_THRESHOLD */
-#define OMAP4460_BGAP_TSHUT_OFFSET 0x124
-
-/* OMAP4460.BANDGAP_STATUS */
-#define OMAP4460_BGAP_STATUS_OFFSET 0x128
-
-/**
- * Register bitfields for OMAP4460
- *
- * All the macros bellow define the required bits for
- * controlling temperature on OMAP4460. Bit defines are
- * grouped by register.
- */
-/* OMAP4460.TEMP_SENSOR bits */
-#define OMAP4460_BGAP_TEMPSOFF_MASK BIT(13)
-#define OMAP4460_BGAP_TEMP_SENSOR_SOC_MASK BIT(11)
-#define OMAP4460_BGAP_TEMP_SENSOR_EOCZ_MASK BIT(10)
-#define OMAP4460_BGAP_TEMP_SENSOR_DTEMP_MASK (0x3ff << 0)
-
-/* OMAP4460.BANDGAP_CTRL bits */
-#define OMAP4460_SINGLE_MODE_MASK BIT(31)
-#define OMAP4460_MASK_HOT_MASK BIT(1)
-#define OMAP4460_MASK_COLD_MASK BIT(0)
-
-/* OMAP4460.BANDGAP_COUNTER bits */
-#define OMAP4460_COUNTER_MASK (0xffffff << 0)
-
-/* OMAP4460.BANDGAP_THRESHOLD bits */
-#define OMAP4460_T_HOT_MASK (0x3ff << 16)
-#define OMAP4460_T_COLD_MASK (0x3ff << 0)
-
-/* OMAP4460.TSHUT_THRESHOLD bits */
-#define OMAP4460_TSHUT_HOT_MASK (0x3ff << 16)
-#define OMAP4460_TSHUT_COLD_MASK (0x3ff << 0)
-
-/* OMAP4460.BANDGAP_STATUS bits */
-#define OMAP4460_CLEAN_STOP_MASK BIT(3)
-#define OMAP4460_BGAP_ALERT_MASK BIT(2)
-#define OMAP4460_HOT_FLAG_MASK BIT(1)
-#define OMAP4460_COLD_FLAG_MASK BIT(0)
-
-/**
- * Temperature limits and thresholds for OMAP4460
- *
- * All the macros bellow are definitions for handling the
- * ADC conversions and representation of temperature limits
- * and thresholds for OMAP4460.
- */
-
-/* ADC conversion table limits */
-#define OMAP4460_ADC_START_VALUE 530
-#define OMAP4460_ADC_END_VALUE 932
-/* bandgap clock limits */
-#define OMAP4460_MAX_FREQ 1500000
-#define OMAP4460_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP4460_MIN_TEMP -40000
-#define OMAP4460_MAX_TEMP 123000
-#define OMAP4460_HYST_VAL 5000
-/* interrupts thresholds */
-#define OMAP4460_TSHUT_HOT 900 /* 122 deg C */
-#define OMAP4460_TSHUT_COLD 895 /* 100 deg C */
-#define OMAP4460_T_HOT 800 /* 73 deg C */
-#define OMAP4460_T_COLD 795 /* 71 deg C */
-
-#endif /* __OMAP4XXX_BANDGAP_H */
diff --git a/drivers/staging/ti-soc-thermal/omap5-thermal-data.c b/drivers/staging/ti-soc-thermal/omap5-thermal-data.c
deleted file mode 100644
index eff0c80..0000000
--- a/drivers/staging/ti-soc-thermal/omap5-thermal-data.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * OMAP5 thermal driver.
- *
- * Copyright (C) 2011-2012 Texas Instruments Inc.
- * Contact:
- * Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "ti-thermal.h"
-#include "ti-bandgap.h"
-#include "omap5xxx-bandgap.h"
-
-/*
- * OMAP5430 has three instances of thermal sensor for MPU, GPU & CORE,
- * need to describe the individual registers and bit fields.
- */
-
-/*
- * OMAP5430 MPU thermal sensor register offset and bit-fields
- */
-static struct temp_sensor_registers
-omap5430_mpu_temp_sensor_registers = {
- .temp_sensor_ctrl = OMAP5430_TEMP_SENSOR_MPU_OFFSET,
- .bgap_tempsoff_mask = OMAP5430_BGAP_TEMPSOFF_MASK,
- .bgap_eocz_mask = OMAP5430_BGAP_TEMP_SENSOR_EOCZ_MASK,
- .bgap_dtemp_mask = OMAP5430_BGAP_TEMP_SENSOR_DTEMP_MASK,
-
- .bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET,
- .mask_hot_mask = OMAP5430_MASK_HOT_MPU_MASK,
- .mask_cold_mask = OMAP5430_MASK_COLD_MPU_MASK,
- .mask_sidlemode_mask = OMAP5430_MASK_SIDLEMODE_MASK,
- .mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK,
- .mask_freeze_mask = OMAP5430_MASK_FREEZE_MPU_MASK,
- .mask_clear_mask = OMAP5430_MASK_CLEAR_MPU_MASK,
- .mask_clear_accum_mask = OMAP5430_MASK_CLEAR_ACCUM_MPU_MASK,
-
-
- .bgap_counter = OMAP5430_BGAP_CTRL_OFFSET,
- .counter_mask = OMAP5430_COUNTER_MASK,
-
- .bgap_threshold = OMAP5430_BGAP_THRESHOLD_MPU_OFFSET,
- .threshold_thot_mask = OMAP5430_T_HOT_MASK,
- .threshold_tcold_mask = OMAP5430_T_COLD_MASK,
-
- .tshut_threshold = OMAP5430_BGAP_TSHUT_MPU_OFFSET,
- .tshut_hot_mask = OMAP5430_TSHUT_HOT_MASK,
- .tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK,
-
- .bgap_status = OMAP5430_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = 0x0,
- .status_bgap_alert_mask = OMAP5430_BGAP_ALERT_MASK,
- .status_hot_mask = OMAP5430_HOT_MPU_FLAG_MASK,
- .status_cold_mask = OMAP5430_COLD_MPU_FLAG_MASK,
-
- .bgap_cumul_dtemp = OMAP5430_BGAP_CUMUL_DTEMP_MPU_OFFSET,
- .ctrl_dtemp_0 = OMAP5430_BGAP_DTEMP_MPU_0_OFFSET,
- .ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_MPU_1_OFFSET,
- .ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_MPU_2_OFFSET,
- .ctrl_dtemp_3 = OMAP5430_BGAP_DTEMP_MPU_3_OFFSET,
- .ctrl_dtemp_4 = OMAP5430_BGAP_DTEMP_MPU_4_OFFSET,
- .bgap_efuse = OMAP5430_FUSE_OPP_BGAP_MPU,
-};
-
-/*
- * OMAP5430 GPU thermal sensor register offset and bit-fields
- */
-static struct temp_sensor_registers
-omap5430_gpu_temp_sensor_registers = {
- .temp_sensor_ctrl = OMAP5430_TEMP_SENSOR_GPU_OFFSET,
- .bgap_tempsoff_mask = OMAP5430_BGAP_TEMPSOFF_MASK,
- .bgap_eocz_mask = OMAP5430_BGAP_TEMP_SENSOR_EOCZ_MASK,
- .bgap_dtemp_mask = OMAP5430_BGAP_TEMP_SENSOR_DTEMP_MASK,
-
- .bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET,
- .mask_hot_mask = OMAP5430_MASK_HOT_GPU_MASK,
- .mask_cold_mask = OMAP5430_MASK_COLD_GPU_MASK,
- .mask_sidlemode_mask = OMAP5430_MASK_SIDLEMODE_MASK,
- .mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK,
- .mask_freeze_mask = OMAP5430_MASK_FREEZE_GPU_MASK,
- .mask_clear_mask = OMAP5430_MASK_CLEAR_GPU_MASK,
- .mask_clear_accum_mask = OMAP5430_MASK_CLEAR_ACCUM_GPU_MASK,
-
- .bgap_counter = OMAP5430_BGAP_CTRL_OFFSET,
- .counter_mask = OMAP5430_COUNTER_MASK,
-
- .bgap_threshold = OMAP5430_BGAP_THRESHOLD_GPU_OFFSET,
- .threshold_thot_mask = OMAP5430_T_HOT_MASK,
- .threshold_tcold_mask = OMAP5430_T_COLD_MASK,
-
- .tshut_threshold = OMAP5430_BGAP_TSHUT_GPU_OFFSET,
- .tshut_hot_mask = OMAP5430_TSHUT_HOT_MASK,
- .tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK,
-
- .bgap_status = OMAP5430_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = 0x0,
- .status_bgap_alert_mask = OMAP5430_BGAP_ALERT_MASK,
- .status_hot_mask = OMAP5430_HOT_GPU_FLAG_MASK,
- .status_cold_mask = OMAP5430_COLD_GPU_FLAG_MASK,
-
- .bgap_cumul_dtemp = OMAP5430_BGAP_CUMUL_DTEMP_GPU_OFFSET,
- .ctrl_dtemp_0 = OMAP5430_BGAP_DTEMP_GPU_0_OFFSET,
- .ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_GPU_1_OFFSET,
- .ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_GPU_2_OFFSET,
- .ctrl_dtemp_3 = OMAP5430_BGAP_DTEMP_GPU_3_OFFSET,
- .ctrl_dtemp_4 = OMAP5430_BGAP_DTEMP_GPU_4_OFFSET,
-
- .bgap_efuse = OMAP5430_FUSE_OPP_BGAP_GPU,
-};
-
-/*
- * OMAP5430 CORE thermal sensor register offset and bit-fields
- */
-static struct temp_sensor_registers
-omap5430_core_temp_sensor_registers = {
- .temp_sensor_ctrl = OMAP5430_TEMP_SENSOR_CORE_OFFSET,
- .bgap_tempsoff_mask = OMAP5430_BGAP_TEMPSOFF_MASK,
- .bgap_eocz_mask = OMAP5430_BGAP_TEMP_SENSOR_EOCZ_MASK,
- .bgap_dtemp_mask = OMAP5430_BGAP_TEMP_SENSOR_DTEMP_MASK,
-
- .bgap_mask_ctrl = OMAP5430_BGAP_CTRL_OFFSET,
- .mask_hot_mask = OMAP5430_MASK_HOT_CORE_MASK,
- .mask_cold_mask = OMAP5430_MASK_COLD_CORE_MASK,
- .mask_sidlemode_mask = OMAP5430_MASK_SIDLEMODE_MASK,
- .mask_counter_delay_mask = OMAP5430_MASK_COUNTER_DELAY_MASK,
- .mask_freeze_mask = OMAP5430_MASK_FREEZE_CORE_MASK,
- .mask_clear_mask = OMAP5430_MASK_CLEAR_CORE_MASK,
- .mask_clear_accum_mask = OMAP5430_MASK_CLEAR_ACCUM_CORE_MASK,
-
- .bgap_counter = OMAP5430_BGAP_CTRL_OFFSET,
- .counter_mask = OMAP5430_COUNTER_MASK,
-
- .bgap_threshold = OMAP5430_BGAP_THRESHOLD_CORE_OFFSET,
- .threshold_thot_mask = OMAP5430_T_HOT_MASK,
- .threshold_tcold_mask = OMAP5430_T_COLD_MASK,
-
- .tshut_threshold = OMAP5430_BGAP_TSHUT_CORE_OFFSET,
- .tshut_hot_mask = OMAP5430_TSHUT_HOT_MASK,
- .tshut_cold_mask = OMAP5430_TSHUT_COLD_MASK,
-
- .bgap_status = OMAP5430_BGAP_STATUS_OFFSET,
- .status_clean_stop_mask = 0x0,
- .status_bgap_alert_mask = OMAP5430_BGAP_ALERT_MASK,
- .status_hot_mask = OMAP5430_HOT_CORE_FLAG_MASK,
- .status_cold_mask = OMAP5430_COLD_CORE_FLAG_MASK,
-
- .bgap_cumul_dtemp = OMAP5430_BGAP_CUMUL_DTEMP_CORE_OFFSET,
- .ctrl_dtemp_0 = OMAP5430_BGAP_DTEMP_CORE_0_OFFSET,
- .ctrl_dtemp_1 = OMAP5430_BGAP_DTEMP_CORE_1_OFFSET,
- .ctrl_dtemp_2 = OMAP5430_BGAP_DTEMP_CORE_2_OFFSET,
- .ctrl_dtemp_3 = OMAP5430_BGAP_DTEMP_CORE_3_OFFSET,
- .ctrl_dtemp_4 = OMAP5430_BGAP_DTEMP_CORE_4_OFFSET,
-
- .bgap_efuse = OMAP5430_FUSE_OPP_BGAP_CORE,
-};
-
-/* Thresholds and limits for OMAP5430 MPU temperature sensor */
-static struct temp_sensor_data omap5430_mpu_temp_sensor_data = {
- .tshut_hot = OMAP5430_MPU_TSHUT_HOT,
- .tshut_cold = OMAP5430_MPU_TSHUT_COLD,
- .t_hot = OMAP5430_MPU_T_HOT,
- .t_cold = OMAP5430_MPU_T_COLD,
- .min_freq = OMAP5430_MPU_MIN_FREQ,
- .max_freq = OMAP5430_MPU_MAX_FREQ,
- .max_temp = OMAP5430_MPU_MAX_TEMP,
- .min_temp = OMAP5430_MPU_MIN_TEMP,
- .hyst_val = OMAP5430_MPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
-};
-
-/* Thresholds and limits for OMAP5430 GPU temperature sensor */
-static struct temp_sensor_data omap5430_gpu_temp_sensor_data = {
- .tshut_hot = OMAP5430_GPU_TSHUT_HOT,
- .tshut_cold = OMAP5430_GPU_TSHUT_COLD,
- .t_hot = OMAP5430_GPU_T_HOT,
- .t_cold = OMAP5430_GPU_T_COLD,
- .min_freq = OMAP5430_GPU_MIN_FREQ,
- .max_freq = OMAP5430_GPU_MAX_FREQ,
- .max_temp = OMAP5430_GPU_MAX_TEMP,
- .min_temp = OMAP5430_GPU_MIN_TEMP,
- .hyst_val = OMAP5430_GPU_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
-};
-
-/* Thresholds and limits for OMAP5430 CORE temperature sensor */
-static struct temp_sensor_data omap5430_core_temp_sensor_data = {
- .tshut_hot = OMAP5430_CORE_TSHUT_HOT,
- .tshut_cold = OMAP5430_CORE_TSHUT_COLD,
- .t_hot = OMAP5430_CORE_T_HOT,
- .t_cold = OMAP5430_CORE_T_COLD,
- .min_freq = OMAP5430_CORE_MIN_FREQ,
- .max_freq = OMAP5430_CORE_MAX_FREQ,
- .max_temp = OMAP5430_CORE_MAX_TEMP,
- .min_temp = OMAP5430_CORE_MIN_TEMP,
- .hyst_val = OMAP5430_CORE_HYST_VAL,
- .update_int1 = 1000,
- .update_int2 = 2000,
-};
-
-/*
- * OMAP54xx ES2.0 : Temperature values in milli degree celsius
- * ADC code values from 540 to 945
- */
-static int
-omap5430_adc_to_temp[
- OMAP5430_ADC_END_VALUE - OMAP5430_ADC_START_VALUE + 1] = {
- /* Index 540 - 549 */
- -40000, -40000, -40000, -40000, -39800, -39400, -39000, -38600, -38200,
- -37800,
- /* Index 550 - 559 */
- -37400, -37000, -36600, -36200, -35800, -35300, -34700, -34200, -33800,
- -33400,
- /* Index 560 - 569 */
- -33000, -32600, -32200, -31800, -31400, -31000, -30600, -30200, -29800,
- -29400,
- /* Index 570 - 579 */
- -29000, -28600, -28200, -27700, -27100, -26600, -26200, -25800, -25400,
- -25000,
- /* Index 580 - 589 */
- -24600, -24200, -23800, -23400, -23000, -22600, -22200, -21600, -21400,
- -21000,
- /* Index 590 - 599 */
- -20500, -19900, -19400, -19000, -18600, -18200, -17800, -17400, -17000,
- -16600,
- /* Index 600 - 609 */
- -16200, -15800, -15400, -15000, -14600, -14200, -13800, -13400, -13000,
- -12500,
- /* Index 610 - 619 */
- -11900, -11400, -11000, -10600, -10200, -9800, -9400, -9000, -8600,
- -8200,
- /* Index 620 - 629 */
- -7800, -7400, -7000, -6600, -6200, -5800, -5400, -5000, -4500, -3900,
- /* Index 630 - 639 */
- -3400, -3000, -2600, -2200, -1800, -1400, -1000, -600, -200, 200,
- /* Index 640 - 649 */
- 600, 1000, 1400, 1800, 2200, 2600, 3000, 3400, 3900, 4500,
- /* Index 650 - 659 */
- 5000, 5400, 5800, 6200, 6600, 7000, 7400, 7800, 8200, 8600,
- /* Index 660 - 669 */
- 9000, 9400, 9800, 10200, 10600, 11000, 11400, 11800, 12200, 12700,
- /* Index 670 - 679 */
- 13300, 13800, 14200, 14600, 15000, 15400, 15800, 16200, 16600, 17000,
- /* Index 680 - 689 */
- 17400, 17800, 18200, 18600, 19000, 19400, 19800, 20200, 20600, 21100,
- /* Index 690 - 699 */
- 21400, 21900, 22500, 23000, 23400, 23800, 24200, 24600, 25000, 25400,
- /* Index 700 - 709 */
- 25800, 26200, 26600, 27000, 27400, 27800, 28200, 28600, 29000, 29400,
- /* Index 710 - 719 */
- 29800, 30200, 30600, 31000, 31400, 31900, 32500, 33000, 33400, 33800,
- /* Index 720 - 729 */
- 34200, 34600, 35000, 35400, 35800, 36200, 36600, 37000, 37400, 37800,
- /* Index 730 - 739 */
- 38200, 38600, 39000, 39400, 39800, 40200, 40600, 41000, 41400, 41800,
- /* Index 740 - 749 */
- 42200, 42600, 43100, 43700, 44200, 44600, 45000, 45400, 45800, 46200,
- /* Index 750 - 759 */
- 46600, 47000, 47400, 47800, 48200, 48600, 49000, 49400, 49800, 50200,
- /* Index 760 - 769 */
- 50600, 51000, 51400, 51800, 52200, 52600, 53000, 53400, 53800, 54200,
- /* Index 770 - 779 */
- 54600, 55000, 55400, 55900, 56500, 57000, 57400, 57800, 58200, 58600,
- /* Index 780 - 789 */
- 59000, 59400, 59800, 60200, 60600, 61000, 61400, 61800, 62200, 62600,
- /* Index 790 - 799 */
- 63000, 63400, 63800, 64200, 64600, 65000, 65400, 65800, 66200, 66600,
- /* Index 800 - 809 */
- 67000, 67400, 67800, 68200, 68600, 69000, 69400, 69800, 70200, 70600,
- /* Index 810 - 819 */
- 71000, 71500, 72100, 72600, 73000, 73400, 73800, 74200, 74600, 75000,
- /* Index 820 - 829 */
- 75400, 75800, 76200, 76600, 77000, 77400, 77800, 78200, 78600, 79000,
- /* Index 830 - 839 */
- 79400, 79800, 80200, 80600, 81000, 81400, 81800, 82200, 82600, 83000,
- /* Index 840 - 849 */
- 83400, 83800, 84200, 84600, 85000, 85400, 85800, 86200, 86600, 87000,
- /* Index 850 - 859 */
- 87400, 87800, 88200, 88600, 89000, 89400, 89800, 90200, 90600, 91000,
- /* Index 860 - 869 */
- 91400, 91800, 92200, 92600, 93000, 93400, 93800, 94200, 94600, 95000,
- /* Index 870 - 879 */
- 95400, 95800, 96200, 96600, 97000, 97500, 98100, 98600, 99000, 99400,
- /* Index 880 - 889 */
- 99800, 100200, 100600, 101000, 101400, 101800, 102200, 102600, 103000,
- 103400,
- /* Index 890 - 899 */
- 103800, 104200, 104600, 105000, 105400, 105800, 106200, 106600, 107000,
- 107400,
- /* Index 900 - 909 */
- 107800, 108200, 108600, 109000, 109400, 109800, 110200, 110600, 111000,
- 111400,
- /* Index 910 - 919 */
- 111800, 112200, 112600, 113000, 113400, 113800, 114200, 114600, 115000,
- 115400,
- /* Index 920 - 929 */
- 115800, 116200, 116600, 117000, 117400, 117800, 118200, 118600, 119000,
- 119400,
- /* Index 930 - 939 */
- 119800, 120200, 120600, 121000, 121400, 121800, 122400, 122600, 123000,
- 123400,
- /* Index 940 - 945 */
- 123800, 1242000, 124600, 124900, 125000, 125000,
-};
-
-/* OMAP54xx ES2.0 data */
-const struct ti_bandgap_data omap5430_data = {
- .features = TI_BANDGAP_FEATURE_TSHUT_CONFIG |
- TI_BANDGAP_FEATURE_FREEZE_BIT |
- TI_BANDGAP_FEATURE_TALERT |
- TI_BANDGAP_FEATURE_COUNTER_DELAY |
- TI_BANDGAP_FEATURE_HISTORY_BUFFER,
- .fclock_name = "l3instr_ts_gclk_div",
- .div_ck_name = "l3instr_ts_gclk_div",
- .conv_table = omap5430_adc_to_temp,
- .adc_start_val = OMAP5430_ADC_START_VALUE,
- .adc_end_val = OMAP5430_ADC_END_VALUE,
- .expose_sensor = ti_thermal_expose_sensor,
- .remove_sensor = ti_thermal_remove_sensor,
- .report_temperature = ti_thermal_report_sensor_temperature,
- .sensors = {
- {
- .registers = &omap5430_mpu_temp_sensor_registers,
- .ts_data = &omap5430_mpu_temp_sensor_data,
- .domain = "cpu",
- .register_cooling = ti_thermal_register_cpu_cooling,
- .unregister_cooling = ti_thermal_unregister_cpu_cooling,
- .slope = OMAP_GRADIENT_SLOPE_5430_CPU,
- .constant = OMAP_GRADIENT_CONST_5430_CPU,
- .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_5430_CPU,
- .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_5430_CPU,
- },
- {
- .registers = &omap5430_gpu_temp_sensor_registers,
- .ts_data = &omap5430_gpu_temp_sensor_data,
- .domain = "gpu",
- .slope = OMAP_GRADIENT_SLOPE_5430_GPU,
- .constant = OMAP_GRADIENT_CONST_5430_GPU,
- .slope_pcb = OMAP_GRADIENT_SLOPE_W_PCB_5430_GPU,
- .constant_pcb = OMAP_GRADIENT_CONST_W_PCB_5430_GPU,
- },
- {
- .registers = &omap5430_core_temp_sensor_registers,
- .ts_data = &omap5430_core_temp_sensor_data,
- .domain = "core",
- },
- },
- .sensor_count = 3,
-};
diff --git a/drivers/staging/ti-soc-thermal/omap5xxx-bandgap.h b/drivers/staging/ti-soc-thermal/omap5xxx-bandgap.h
deleted file mode 100644
index 400b55d..0000000
--- a/drivers/staging/ti-soc-thermal/omap5xxx-bandgap.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * OMAP5xxx bandgap registers, bitfields and temperature definitions
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Contact:
- * Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-#ifndef __OMAP5XXX_BANDGAP_H
-#define __OMAP5XXX_BANDGAP_H
-
-/**
- * *** OMAP5430 ***
- *
- * Below, in sequence, are the Register definitions,
- * the bitfields and the temperature definitions for OMAP5430.
- */
-
-/**
- * OMAP5430 register definitions
- *
- * Registers are defined as offsets. The offsets are
- * relative to FUSE_OPP_BGAP_GPU on 5430.
- *
- * Register below are grouped by domain (not necessarily in offset order)
- */
-
-/* OMAP5430.GPU register offsets */
-#define OMAP5430_FUSE_OPP_BGAP_GPU 0x0
-#define OMAP5430_TEMP_SENSOR_GPU_OFFSET 0x150
-#define OMAP5430_BGAP_THRESHOLD_GPU_OFFSET 0x1A8
-#define OMAP5430_BGAP_TSHUT_GPU_OFFSET 0x1B4
-#define OMAP5430_BGAP_CUMUL_DTEMP_GPU_OFFSET 0x1C0
-#define OMAP5430_BGAP_DTEMP_GPU_0_OFFSET 0x1F4
-#define OMAP5430_BGAP_DTEMP_GPU_1_OFFSET 0x1F8
-#define OMAP5430_BGAP_DTEMP_GPU_2_OFFSET 0x1FC
-#define OMAP5430_BGAP_DTEMP_GPU_3_OFFSET 0x200
-#define OMAP5430_BGAP_DTEMP_GPU_4_OFFSET 0x204
-
-/* OMAP5430.MPU register offsets */
-#define OMAP5430_FUSE_OPP_BGAP_MPU 0x4
-#define OMAP5430_TEMP_SENSOR_MPU_OFFSET 0x14C
-#define OMAP5430_BGAP_THRESHOLD_MPU_OFFSET 0x1A4
-#define OMAP5430_BGAP_TSHUT_MPU_OFFSET 0x1B0
-#define OMAP5430_BGAP_CUMUL_DTEMP_MPU_OFFSET 0x1BC
-#define OMAP5430_BGAP_DTEMP_MPU_0_OFFSET 0x1E0
-#define OMAP5430_BGAP_DTEMP_MPU_1_OFFSET 0x1E4
-#define OMAP5430_BGAP_DTEMP_MPU_2_OFFSET 0x1E8
-#define OMAP5430_BGAP_DTEMP_MPU_3_OFFSET 0x1EC
-#define OMAP5430_BGAP_DTEMP_MPU_4_OFFSET 0x1F0
-
-/* OMAP5430.MPU register offsets */
-#define OMAP5430_FUSE_OPP_BGAP_CORE 0x8
-#define OMAP5430_TEMP_SENSOR_CORE_OFFSET 0x154
-#define OMAP5430_BGAP_THRESHOLD_CORE_OFFSET 0x1AC
-#define OMAP5430_BGAP_TSHUT_CORE_OFFSET 0x1B8
-#define OMAP5430_BGAP_CUMUL_DTEMP_CORE_OFFSET 0x1C4
-#define OMAP5430_BGAP_DTEMP_CORE_0_OFFSET 0x208
-#define OMAP5430_BGAP_DTEMP_CORE_1_OFFSET 0x20C
-#define OMAP5430_BGAP_DTEMP_CORE_2_OFFSET 0x210
-#define OMAP5430_BGAP_DTEMP_CORE_3_OFFSET 0x214
-#define OMAP5430_BGAP_DTEMP_CORE_4_OFFSET 0x218
-
-/* OMAP5430.common register offsets */
-#define OMAP5430_BGAP_CTRL_OFFSET 0x1A0
-#define OMAP5430_BGAP_STATUS_OFFSET 0x1C8
-
-/**
- * Register bitfields for OMAP5430
- *
- * All the macros bellow define the required bits for
- * controlling temperature on OMAP5430. Bit defines are
- * grouped by register.
- */
-
-/* OMAP5430.TEMP_SENSOR */
-#define OMAP5430_BGAP_TEMP_SENSOR_SOC_MASK BIT(12)
-#define OMAP5430_BGAP_TEMPSOFF_MASK BIT(11)
-#define OMAP5430_BGAP_TEMP_SENSOR_EOCZ_MASK BIT(10)
-#define OMAP5430_BGAP_TEMP_SENSOR_DTEMP_MASK (0x3ff << 0)
-
-/* OMAP5430.BANDGAP_CTRL */
-#define OMAP5430_MASK_SIDLEMODE_MASK (0x3 << 30)
-#define OMAP5430_MASK_COUNTER_DELAY_MASK (0x7 << 27)
-#define OMAP5430_MASK_FREEZE_CORE_MASK BIT(23)
-#define OMAP5430_MASK_FREEZE_GPU_MASK BIT(22)
-#define OMAP5430_MASK_FREEZE_MPU_MASK BIT(21)
-#define OMAP5430_MASK_CLEAR_CORE_MASK BIT(20)
-#define OMAP5430_MASK_CLEAR_GPU_MASK BIT(19)
-#define OMAP5430_MASK_CLEAR_MPU_MASK BIT(18)
-#define OMAP5430_MASK_CLEAR_ACCUM_CORE_MASK BIT(17)
-#define OMAP5430_MASK_CLEAR_ACCUM_GPU_MASK BIT(16)
-#define OMAP5430_MASK_CLEAR_ACCUM_MPU_MASK BIT(15)
-#define OMAP5430_MASK_HOT_CORE_MASK BIT(5)
-#define OMAP5430_MASK_COLD_CORE_MASK BIT(4)
-#define OMAP5430_MASK_HOT_GPU_MASK BIT(3)
-#define OMAP5430_MASK_COLD_GPU_MASK BIT(2)
-#define OMAP5430_MASK_HOT_MPU_MASK BIT(1)
-#define OMAP5430_MASK_COLD_MPU_MASK BIT(0)
-
-/* OMAP5430.BANDGAP_COUNTER */
-#define OMAP5430_COUNTER_MASK (0xffffff << 0)
-
-/* OMAP5430.BANDGAP_THRESHOLD */
-#define OMAP5430_T_HOT_MASK (0x3ff << 16)
-#define OMAP5430_T_COLD_MASK (0x3ff << 0)
-
-/* OMAP5430.TSHUT_THRESHOLD */
-#define OMAP5430_TSHUT_HOT_MASK (0x3ff << 16)
-#define OMAP5430_TSHUT_COLD_MASK (0x3ff << 0)
-
-/* OMAP5430.BANDGAP_CUMUL_DTEMP_MPU */
-#define OMAP5430_CUMUL_DTEMP_MPU_MASK (0xffffffff << 0)
-
-/* OMAP5430.BANDGAP_CUMUL_DTEMP_GPU */
-#define OMAP5430_CUMUL_DTEMP_GPU_MASK (0xffffffff << 0)
-
-/* OMAP5430.BANDGAP_CUMUL_DTEMP_CORE */
-#define OMAP5430_CUMUL_DTEMP_CORE_MASK (0xffffffff << 0)
-
-/* OMAP5430.BANDGAP_STATUS */
-#define OMAP5430_BGAP_ALERT_MASK BIT(31)
-#define OMAP5430_HOT_CORE_FLAG_MASK BIT(5)
-#define OMAP5430_COLD_CORE_FLAG_MASK BIT(4)
-#define OMAP5430_HOT_GPU_FLAG_MASK BIT(3)
-#define OMAP5430_COLD_GPU_FLAG_MASK BIT(2)
-#define OMAP5430_HOT_MPU_FLAG_MASK BIT(1)
-#define OMAP5430_COLD_MPU_FLAG_MASK BIT(0)
-
-/**
- * Temperature limits and thresholds for OMAP5430
- *
- * All the macros bellow are definitions for handling the
- * ADC conversions and representation of temperature limits
- * and thresholds for OMAP5430. Definitions are grouped
- * by temperature domain.
- */
-
-/* OMAP5430.common temperature definitions */
-/* ADC conversion table limits */
-#define OMAP5430_ADC_START_VALUE 540
-#define OMAP5430_ADC_END_VALUE 945
-
-/* OMAP5430.GPU temperature definitions */
-/* bandgap clock limits */
-#define OMAP5430_GPU_MAX_FREQ 1500000
-#define OMAP5430_GPU_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP5430_GPU_MIN_TEMP -40000
-#define OMAP5430_GPU_MAX_TEMP 125000
-#define OMAP5430_GPU_HYST_VAL 5000
-/* interrupts thresholds */
-#define OMAP5430_GPU_TSHUT_HOT 915
-#define OMAP5430_GPU_TSHUT_COLD 900
-#define OMAP5430_GPU_T_HOT 800
-#define OMAP5430_GPU_T_COLD 795
-
-/* OMAP5430.MPU temperature definitions */
-/* bandgap clock limits */
-#define OMAP5430_MPU_MAX_FREQ 1500000
-#define OMAP5430_MPU_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP5430_MPU_MIN_TEMP -40000
-#define OMAP5430_MPU_MAX_TEMP 125000
-#define OMAP5430_MPU_HYST_VAL 5000
-/* interrupts thresholds */
-#define OMAP5430_MPU_TSHUT_HOT 915
-#define OMAP5430_MPU_TSHUT_COLD 900
-#define OMAP5430_MPU_T_HOT 800
-#define OMAP5430_MPU_T_COLD 795
-
-/* OMAP5430.CORE temperature definitions */
-/* bandgap clock limits */
-#define OMAP5430_CORE_MAX_FREQ 1500000
-#define OMAP5430_CORE_MIN_FREQ 1000000
-/* sensor limits */
-#define OMAP5430_CORE_MIN_TEMP -40000
-#define OMAP5430_CORE_MAX_TEMP 125000
-#define OMAP5430_CORE_HYST_VAL 5000
-/* interrupts thresholds */
-#define OMAP5430_CORE_TSHUT_HOT 915
-#define OMAP5430_CORE_TSHUT_COLD 900
-#define OMAP5430_CORE_T_HOT 800
-#define OMAP5430_CORE_T_COLD 795
-
-#endif /* __OMAP5XXX_BANDGAP_H */
diff --git a/drivers/staging/ti-soc-thermal/ti-bandgap.c b/drivers/staging/ti-soc-thermal/ti-bandgap.c
deleted file mode 100644
index f20c1cf..0000000
--- a/drivers/staging/ti-soc-thermal/ti-bandgap.c
+++ /dev/null
@@ -1,1546 +0,0 @@
-/*
- * TI Bandgap temperature sensor driver
- *
- * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
- * Author: J Keerthy <j-keerthy@ti.com>
- * Author: Moiz Sonasath <m-sonasath@ti.com>
- * Couple of fixes, DT and MFD adaptation:
- * Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/reboot.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-
-#include "ti-bandgap.h"
-
-/*** Helper functions to access registers and their bitfields ***/
-
-/**
- * ti_bandgap_readl() - simple read helper function
- * @bgp: pointer to ti_bandgap structure
- * @reg: desired register (offset) to be read
- *
- * Helper function to read bandgap registers. It uses the io remapped area.
- * Return: the register value.
- */
-static u32 ti_bandgap_readl(struct ti_bandgap *bgp, u32 reg)
-{
- return readl(bgp->base + reg);
-}
-
-/**
- * ti_bandgap_writel() - simple write helper function
- * @bgp: pointer to ti_bandgap structure
- * @val: desired register value to be written
- * @reg: desired register (offset) to be written
- *
- * Helper function to write bandgap registers. It uses the io remapped area.
- */
-static void ti_bandgap_writel(struct ti_bandgap *bgp, u32 val, u32 reg)
-{
- writel(val, bgp->base + reg);
-}
-
-/**
- * DOC: macro to update bits.
- *
- * RMW_BITS() - used to read, modify and update bandgap bitfields.
- * The value passed will be shifted.
- */
-#define RMW_BITS(bgp, id, reg, mask, val) \
-do { \
- struct temp_sensor_registers *t; \
- u32 r; \
- \
- t = bgp->conf->sensors[(id)].registers; \
- r = ti_bandgap_readl(bgp, t->reg); \
- r &= ~t->mask; \
- r |= (val) << __ffs(t->mask); \
- ti_bandgap_writel(bgp, r, t->reg); \
-} while (0)
-
-/*** Basic helper functions ***/
-
-/**
- * ti_bandgap_power() - controls the power state of a bandgap device
- * @bgp: pointer to ti_bandgap structure
- * @on: desired power state (1 - on, 0 - off)
- *
- * Used to power on/off a bandgap device instance. Only used on those
- * that features tempsoff bit.
- *
- * Return: 0 on success, -ENOTSUPP if tempsoff is not supported.
- */
-static int ti_bandgap_power(struct ti_bandgap *bgp, bool on)
-{
- int i, ret = 0;
-
- if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH)) {
- ret = -ENOTSUPP;
- goto exit;
- }
-
- for (i = 0; i < bgp->conf->sensor_count; i++)
- /* active on 0 */
- RMW_BITS(bgp, i, temp_sensor_ctrl, bgap_tempsoff_mask, !on);
-
-exit:
- return ret;
-}
-
-/**
- * ti_bandgap_read_temp() - helper function to read sensor temperature
- * @bgp: pointer to ti_bandgap structure
- * @id: bandgap sensor id
- *
- * Function to concentrate the steps to read sensor temperature register.
- * This function is desired because, depending on bandgap device version,
- * it might be needed to freeze the bandgap state machine, before fetching
- * the register value.
- *
- * Return: temperature in ADC values.
- */
-static u32 ti_bandgap_read_temp(struct ti_bandgap *bgp, int id)
-{
- struct temp_sensor_registers *tsr;
- u32 temp, reg;
-
- tsr = bgp->conf->sensors[id].registers;
- reg = tsr->temp_sensor_ctrl;
-
- if (TI_BANDGAP_HAS(bgp, FREEZE_BIT)) {
- RMW_BITS(bgp, id, bgap_mask_ctrl, mask_freeze_mask, 1);
- /*
- * In case we cannot read from cur_dtemp / dtemp_0,
- * then we read from the last valid temp read
- */
- reg = tsr->ctrl_dtemp_1;
- }
-
- /* read temperature */
- temp = ti_bandgap_readl(bgp, reg);
- temp &= tsr->bgap_dtemp_mask;
-
- if (TI_BANDGAP_HAS(bgp, FREEZE_BIT))
- RMW_BITS(bgp, id, bgap_mask_ctrl, mask_freeze_mask, 0);
-
- return temp;
-}
-
-/*** IRQ handlers ***/
-
-/**
- * ti_bandgap_talert_irq_handler() - handles Temperature alert IRQs
- * @irq: IRQ number
- * @data: private data (struct ti_bandgap *)
- *
- * This is the Talert handler. Use it only if bandgap device features
- * HAS(TALERT). This handler goes over all sensors and checks their
- * conditions and acts accordingly. In case there are events pending,
- * it will reset the event mask to wait for the opposite event (next event).
- * Every time there is a new event, it will be reported to thermal layer.
- *
- * Return: IRQ_HANDLED
- */
-static irqreturn_t ti_bandgap_talert_irq_handler(int irq, void *data)
-{
- struct ti_bandgap *bgp = data;
- struct temp_sensor_registers *tsr;
- u32 t_hot = 0, t_cold = 0, ctrl;
- int i;
-
- spin_lock(&bgp->lock);
- for (i = 0; i < bgp->conf->sensor_count; i++) {
- tsr = bgp->conf->sensors[i].registers;
- ctrl = ti_bandgap_readl(bgp, tsr->bgap_status);
-
- /* Read the status of t_hot */
- t_hot = ctrl & tsr->status_hot_mask;
-
- /* Read the status of t_cold */
- t_cold = ctrl & tsr->status_cold_mask;
-
- if (!t_cold && !t_hot)
- continue;
-
- ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
- /*
- * One TALERT interrupt: Two sources
- * If the interrupt is due to t_hot then mask t_hot and
- * and unmask t_cold else mask t_cold and unmask t_hot
- */
- if (t_hot) {
- ctrl &= ~tsr->mask_hot_mask;
- ctrl |= tsr->mask_cold_mask;
- } else if (t_cold) {
- ctrl &= ~tsr->mask_cold_mask;
- ctrl |= tsr->mask_hot_mask;
- }
-
- ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
-
- dev_dbg(bgp->dev,
- "%s: IRQ from %s sensor: hotevent %d coldevent %d\n",
- __func__, bgp->conf->sensors[i].domain,
- t_hot, t_cold);
-
- /* report temperature to whom may concern */
- if (bgp->conf->report_temperature)
- bgp->conf->report_temperature(bgp, i);
- }
- spin_unlock(&bgp->lock);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ti_bandgap_tshut_irq_handler() - handles Temperature shutdown signal
- * @irq: IRQ number
- * @data: private data (unused)
- *
- * This is the Tshut handler. Use it only if bandgap device features
- * HAS(TSHUT). If any sensor fires the Tshut signal, we simply shutdown
- * the system.
- *
- * Return: IRQ_HANDLED
- */
-static irqreturn_t ti_bandgap_tshut_irq_handler(int irq, void *data)
-{
- pr_emerg("%s: TSHUT temperature reached. Needs shut down...\n",
- __func__);
-
- orderly_poweroff(true);
-
- return IRQ_HANDLED;
-}
-
-/*** Helper functions which manipulate conversion ADC <-> mi Celsius ***/
-
-/**
- * ti_bandgap_adc_to_mcelsius() - converts an ADC value to mCelsius scale
- * @bgp: struct ti_bandgap pointer
- * @adc_val: value in ADC representation
- * @t: address where to write the resulting temperature in mCelsius
- *
- * Simple conversion from ADC representation to mCelsius. In case the ADC value
- * is out of the ADC conv table range, it returns -ERANGE, 0 on success.
- * The conversion table is indexed by the ADC values.
- *
- * Return: 0 if conversion was successful, else -ERANGE in case the @adc_val
- * argument is out of the ADC conv table range.
- */
-static
-int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t)
-{
- const struct ti_bandgap_data *conf = bgp->conf;
- int ret = 0;
-
- /* look up for temperature in the table and return the temperature */
- if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val) {
- ret = -ERANGE;
- goto exit;
- }
-
- *t = bgp->conf->conv_table[adc_val - conf->adc_start_val];
-
-exit:
- return ret;
-}
-
-/**
- * ti_bandgap_mcelsius_to_adc() - converts a mCelsius value to ADC scale
- * @bgp: struct ti_bandgap pointer
- * @temp: value in mCelsius
- * @adc: address where to write the resulting temperature in ADC representation
- *
- * Simple conversion from mCelsius to ADC values. In case the temp value
- * is out of the ADC conv table range, it returns -ERANGE, 0 on success.
- * The conversion table is indexed by the ADC values.
- *
- * Return: 0 if conversion was successful, else -ERANGE in case the @temp
- * argument is out of the ADC conv table range.
- */
-static
-int ti_bandgap_mcelsius_to_adc(struct ti_bandgap *bgp, long temp, int *adc)
-{
- const struct ti_bandgap_data *conf = bgp->conf;
- const int *conv_table = bgp->conf->conv_table;
- int high, low, mid, ret = 0;
-
- low = 0;
- high = conf->adc_end_val - conf->adc_start_val;
- mid = (high + low) / 2;
-
- if (temp < conv_table[low] || temp > conv_table[high]) {
- ret = -ERANGE;
- goto exit;
- }
-
- while (low < high) {
- if (temp < conv_table[mid])
- high = mid - 1;
- else
- low = mid + 1;
- mid = (low + high) / 2;
- }
-
- *adc = conf->adc_start_val + low;
-
-exit:
- return ret;
-}
-
-/**
- * ti_bandgap_add_hyst() - add hysteresis (in mCelsius) to an ADC value
- * @bgp: struct ti_bandgap pointer
- * @adc_val: temperature value in ADC representation
- * @hyst_val: hysteresis value in mCelsius
- * @sum: address where to write the resulting temperature (in ADC scale)
- *
- * Adds an hysteresis value (in mCelsius) to a ADC temperature value.
- *
- * Return: 0 on success, -ERANGE otherwise.
- */
-static
-int ti_bandgap_add_hyst(struct ti_bandgap *bgp, int adc_val, int hyst_val,
- u32 *sum)
-{
- int temp, ret;
-
- /*
- * Need to add in the mcelsius domain, so we have a temperature
- * the conv_table range
- */
- ret = ti_bandgap_adc_to_mcelsius(bgp, adc_val, &temp);
- if (ret < 0)
- goto exit;
-
- temp += hyst_val;
-
- ret = ti_bandgap_mcelsius_to_adc(bgp, temp, sum);
-
-exit:
- return ret;
-}
-
-/*** Helper functions handling device Alert/Shutdown signals ***/
-
-/**
- * ti_bandgap_unmask_interrupts() - unmasks the events of thot & tcold
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @t_hot: hot temperature value to trigger alert signal
- * @t_cold: cold temperature value to trigger alert signal
- *
- * Checks the requested t_hot and t_cold values and configures the IRQ event
- * masks accordingly. Call this function only if bandgap features HAS(TALERT).
- */
-static void ti_bandgap_unmask_interrupts(struct ti_bandgap *bgp, int id,
- u32 t_hot, u32 t_cold)
-{
- struct temp_sensor_registers *tsr;
- u32 temp, reg_val;
-
- /* Read the current on die temperature */
- temp = ti_bandgap_read_temp(bgp, id);
-
- tsr = bgp->conf->sensors[id].registers;
- reg_val = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
-
- if (temp < t_hot)
- reg_val |= tsr->mask_hot_mask;
- else
- reg_val &= ~tsr->mask_hot_mask;
-
- if (t_cold < temp)
- reg_val |= tsr->mask_cold_mask;
- else
- reg_val &= ~tsr->mask_cold_mask;
- ti_bandgap_writel(bgp, reg_val, tsr->bgap_mask_ctrl);
-}
-
-/**
- * ti_bandgap_update_alert_threshold() - sequence to update thresholds
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @val: value (ADC) of a new threshold
- * @hot: desired threshold to be updated. true if threshold hot, false if
- * threshold cold
- *
- * It will program the required thresholds (hot and cold) for TALERT signal.
- * This function can be used to update t_hot or t_cold, depending on @hot value.
- * It checks the resulting t_hot and t_cold values, based on the new passed @val
- * and configures the thresholds so that t_hot is always greater than t_cold.
- * Call this function only if bandgap features HAS(TALERT).
- *
- * Return: 0 if no error, else corresponding error
- */
-static int ti_bandgap_update_alert_threshold(struct ti_bandgap *bgp, int id,
- int val, bool hot)
-{
- struct temp_sensor_data *ts_data = bgp->conf->sensors[id].ts_data;
- struct temp_sensor_registers *tsr;
- u32 thresh_val, reg_val, t_hot, t_cold;
- int err = 0;
-
- tsr = bgp->conf->sensors[id].registers;
-
- /* obtain the current value */
- thresh_val = ti_bandgap_readl(bgp, tsr->bgap_threshold);
- t_cold = (thresh_val & tsr->threshold_tcold_mask) >>
- __ffs(tsr->threshold_tcold_mask);
- t_hot = (thresh_val & tsr->threshold_thot_mask) >>
- __ffs(tsr->threshold_thot_mask);
- if (hot)
- t_hot = val;
- else
- t_cold = val;
-
- if (t_cold > t_hot) {
- if (hot)
- err = ti_bandgap_add_hyst(bgp, t_hot,
- -ts_data->hyst_val,
- &t_cold);
- else
- err = ti_bandgap_add_hyst(bgp, t_cold,
- ts_data->hyst_val,
- &t_hot);
- }
-
- /* write the new threshold values */
- reg_val = thresh_val &
- ~(tsr->threshold_thot_mask | tsr->threshold_tcold_mask);
- reg_val |= (t_hot << __ffs(tsr->threshold_thot_mask)) |
- (t_cold << __ffs(tsr->threshold_tcold_mask));
- ti_bandgap_writel(bgp, reg_val, tsr->bgap_threshold);
-
- if (err) {
- dev_err(bgp->dev, "failed to reprogram thot threshold\n");
- err = -EIO;
- goto exit;
- }
-
- ti_bandgap_unmask_interrupts(bgp, id, t_hot, t_cold);
-exit:
- return err;
-}
-
-/**
- * ti_bandgap_validate() - helper to check the sanity of a struct ti_bandgap
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- *
- * Checks if the bandgap pointer is valid and if the sensor id is also
- * applicable.
- *
- * Return: 0 if no errors, -EINVAL for invalid @bgp pointer or -ERANGE if
- * @id cannot index @bgp sensors.
- */
-static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id)
-{
- int ret = 0;
-
- if (IS_ERR_OR_NULL(bgp)) {
- pr_err("%s: invalid bandgap pointer\n", __func__);
- ret = -EINVAL;
- goto exit;
- }
-
- if ((id < 0) || (id >= bgp->conf->sensor_count)) {
- dev_err(bgp->dev, "%s: sensor id out of range (%d)\n",
- __func__, id);
- ret = -ERANGE;
- }
-
-exit:
- return ret;
-}
-
-/**
- * _ti_bandgap_write_threshold() - helper to update TALERT t_cold or t_hot
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @val: value (mCelsius) of a new threshold
- * @hot: desired threshold to be updated. true if threshold hot, false if
- * threshold cold
- *
- * It will update the required thresholds (hot and cold) for TALERT signal.
- * This function can be used to update t_hot or t_cold, depending on @hot value.
- * Validates the mCelsius range and update the requested threshold.
- * Call this function only if bandgap features HAS(TALERT).
- *
- * Return: 0 if no error, else corresponding error value.
- */
-static int _ti_bandgap_write_threshold(struct ti_bandgap *bgp, int id, int val,
- bool hot)
-{
- struct temp_sensor_data *ts_data;
- struct temp_sensor_registers *tsr;
- u32 adc_val;
- int ret;
-
- ret = ti_bandgap_validate(bgp, id);
- if (ret)
- goto exit;
-
- if (!TI_BANDGAP_HAS(bgp, TALERT)) {
- ret = -ENOTSUPP;
- goto exit;
- }
-
- ts_data = bgp->conf->sensors[id].ts_data;
- tsr = bgp->conf->sensors[id].registers;
- if (hot) {
- if (val < ts_data->min_temp + ts_data->hyst_val)
- ret = -EINVAL;
- } else {
- if (val > ts_data->max_temp + ts_data->hyst_val)
- ret = -EINVAL;
- }
-
- if (ret)
- goto exit;
-
- ret = ti_bandgap_mcelsius_to_adc(bgp, val, &adc_val);
- if (ret < 0)
- goto exit;
-
- spin_lock(&bgp->lock);
- ret = ti_bandgap_update_alert_threshold(bgp, id, adc_val, hot);
- spin_unlock(&bgp->lock);
-
-exit:
- return ret;
-}
-
-/**
- * _ti_bandgap_read_threshold() - helper to read TALERT t_cold or t_hot
- * @bgp: struct ti_bandgap pointer
- * @id: bandgap sensor id
- * @val: value (mCelsius) of a threshold
- * @hot: desired threshold to be read. true if threshold hot, false if
- * threshold cold
- *
- * It will fetch the required thresholds (hot and cold) for TALERT signal.
- * This function can be used to read t_hot or t_cold, depending on @hot value.
- * Call this function only if bandgap features HAS(TALERT).
- *
- * Return: 0 if no error, -ENOTSUPP if it has no TALERT support, or the
- * corresponding error value if some operation fails.
- */
-static int _ti_bandgap_read_threshold(struct ti_bandgap *bgp, int id,
- int *val, bool hot)
-{
- struct temp_sensor_registers *tsr;
- u32 temp, mask;
- int ret = 0;
-
- ret = ti_bandgap_validate(bgp, id);
- if (ret)
- goto exit;
-
- if (!TI_BANDGAP_HAS(bgp, TALERT)) {
- ret = -ENOTSUPP;
- goto exit;
- }
-
- tsr = bgp->conf->sensors[id].registers;
- if (hot)
- mask = tsr->threshold_thot_mask;
- else
- mask = tsr->threshold_tcold_mask;
-
- temp = ti_bandgap_readl(bgp, tsr->bgap_threshold);
- temp = (temp & mask) >> __ffs(mask);
- ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
- if (ret) {
- dev_err(bgp->dev, "failed to read thot\n");
- ret = -EIO;
- goto exit;
- }
-
- *val = temp;
-
-exit:
- return ret;
-}
-
-/*** Exposed APIs ***/
-
-/**
- * ti_bandgap_read_thot() - reads sensor current thot
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @thot: resulting current thot value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_read_thot(struct ti_bandgap *bgp, int id, int *thot)
-{
- return _ti_bandgap_read_threshold(bgp, id, thot, true);
-}
-
-/**
- * ti_bandgap_write_thot() - sets sensor current thot
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @val: desired thot value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_write_thot(struct ti_bandgap *bgp, int id, int val)
-{
- return _ti_bandgap_write_threshold(bgp, id, val, true);
-}
-
-/**
- * ti_bandgap_read_tcold() - reads sensor current tcold
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @tcold: resulting current tcold value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_read_tcold(struct ti_bandgap *bgp, int id, int *tcold)
-{
- return _ti_bandgap_read_threshold(bgp, id, tcold, false);
-}
-
-/**
- * ti_bandgap_write_tcold() - sets the sensor tcold
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @val: desired tcold value
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_write_tcold(struct ti_bandgap *bgp, int id, int val)
-{
- return _ti_bandgap_write_threshold(bgp, id, val, false);
-}
-
-/**
- * ti_bandgap_read_counter() - read the sensor counter
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @interval: resulting update interval in miliseconds
- */
-static void ti_bandgap_read_counter(struct ti_bandgap *bgp, int id,
- int *interval)
-{
- struct temp_sensor_registers *tsr;
- int time;
-
- tsr = bgp->conf->sensors[id].registers;
- time = ti_bandgap_readl(bgp, tsr->bgap_counter);
- time = (time & tsr->counter_mask) >>
- __ffs(tsr->counter_mask);
- time = time * 1000 / bgp->clk_rate;
- *interval = time;
-}
-
-/**
- * ti_bandgap_read_counter_delay() - read the sensor counter delay
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @interval: resulting update interval in miliseconds
- */
-static void ti_bandgap_read_counter_delay(struct ti_bandgap *bgp, int id,
- int *interval)
-{
- struct temp_sensor_registers *tsr;
- int reg_val;
-
- tsr = bgp->conf->sensors[id].registers;
-
- reg_val = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
- reg_val = (reg_val & tsr->mask_counter_delay_mask) >>
- __ffs(tsr->mask_counter_delay_mask);
- switch (reg_val) {
- case 0:
- *interval = 0;
- break;
- case 1:
- *interval = 1;
- break;
- case 2:
- *interval = 10;
- break;
- case 3:
- *interval = 100;
- break;
- case 4:
- *interval = 250;
- break;
- case 5:
- *interval = 500;
- break;
- default:
- dev_warn(bgp->dev, "Wrong counter delay value read from register %X",
- reg_val);
- }
-}
-
-/**
- * ti_bandgap_read_update_interval() - read the sensor update interval
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @interval: resulting update interval in miliseconds
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_read_update_interval(struct ti_bandgap *bgp, int id,
- int *interval)
-{
- int ret = 0;
-
- ret = ti_bandgap_validate(bgp, id);
- if (ret)
- goto exit;
-
- if (!TI_BANDGAP_HAS(bgp, COUNTER) &&
- !TI_BANDGAP_HAS(bgp, COUNTER_DELAY)) {
- ret = -ENOTSUPP;
- goto exit;
- }
-
- if (TI_BANDGAP_HAS(bgp, COUNTER)) {
- ti_bandgap_read_counter(bgp, id, interval);
- goto exit;
- }
-
- ti_bandgap_read_counter_delay(bgp, id, interval);
-exit:
- return ret;
-}
-
-/**
- * ti_bandgap_write_counter_delay() - set the counter_delay
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @interval: desired update interval in miliseconds
- *
- * Return: 0 on success or the proper error code
- */
-static int ti_bandgap_write_counter_delay(struct ti_bandgap *bgp, int id,
- u32 interval)
-{
- int rval;
-
- switch (interval) {
- case 0: /* Immediate conversion */
- rval = 0x0;
- break;
- case 1: /* Conversion after ever 1ms */
- rval = 0x1;
- break;
- case 10: /* Conversion after ever 10ms */
- rval = 0x2;
- break;
- case 100: /* Conversion after ever 100ms */
- rval = 0x3;
- break;
- case 250: /* Conversion after ever 250ms */
- rval = 0x4;
- break;
- case 500: /* Conversion after ever 500ms */
- rval = 0x5;
- break;
- default:
- dev_warn(bgp->dev, "Delay %d ms is not supported\n", interval);
- return -EINVAL;
- }
-
- spin_lock(&bgp->lock);
- RMW_BITS(bgp, id, bgap_mask_ctrl, mask_counter_delay_mask, rval);
- spin_unlock(&bgp->lock);
-
- return 0;
-}
-
-/**
- * ti_bandgap_write_counter() - set the bandgap sensor counter
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @interval: desired update interval in miliseconds
- */
-static void ti_bandgap_write_counter(struct ti_bandgap *bgp, int id,
- u32 interval)
-{
- interval = interval * bgp->clk_rate / 1000;
- spin_lock(&bgp->lock);
- RMW_BITS(bgp, id, bgap_counter, counter_mask, interval);
- spin_unlock(&bgp->lock);
-}
-
-/**
- * ti_bandgap_write_update_interval() - set the update interval
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @interval: desired update interval in miliseconds
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_write_update_interval(struct ti_bandgap *bgp,
- int id, u32 interval)
-{
- int ret = ti_bandgap_validate(bgp, id);
- if (ret)
- goto exit;
-
- if (!TI_BANDGAP_HAS(bgp, COUNTER) &&
- !TI_BANDGAP_HAS(bgp, COUNTER_DELAY)) {
- ret = -ENOTSUPP;
- goto exit;
- }
-
- if (TI_BANDGAP_HAS(bgp, COUNTER)) {
- ti_bandgap_write_counter(bgp, id, interval);
- goto exit;
- }
-
- ret = ti_bandgap_write_counter_delay(bgp, id, interval);
-exit:
- return ret;
-}
-
-/**
- * ti_bandgap_read_temperature() - report current temperature
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @temperature: resulting temperature
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_read_temperature(struct ti_bandgap *bgp, int id,
- int *temperature)
-{
- u32 temp;
- int ret;
-
- ret = ti_bandgap_validate(bgp, id);
- if (ret)
- return ret;
-
- spin_lock(&bgp->lock);
- temp = ti_bandgap_read_temp(bgp, id);
- spin_unlock(&bgp->lock);
-
- ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp);
- if (ret)
- return -EIO;
-
- *temperature = temp;
-
- return 0;
-}
-
-/**
- * ti_bandgap_set_sensor_data() - helper function to store thermal
- * framework related data.
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- * @data: thermal framework related data to be stored
- *
- * Return: 0 on success or the proper error code
- */
-int ti_bandgap_set_sensor_data(struct ti_bandgap *bgp, int id, void *data)
-{
- int ret = ti_bandgap_validate(bgp, id);
- if (ret)
- return ret;
-
- bgp->regval[id].data = data;
-
- return 0;
-}
-
-/**
- * ti_bandgap_get_sensor_data() - helper function to get thermal
- * framework related data.
- * @bgp: pointer to bandgap instance
- * @id: sensor id
- *
- * Return: data stored by set function with sensor id on success or NULL
- */
-void *ti_bandgap_get_sensor_data(struct ti_bandgap *bgp, int id)
-{
- int ret = ti_bandgap_validate(bgp, id);
- if (ret)
- return ERR_PTR(ret);
-
- return bgp->regval[id].data;
-}
-
-/*** Helper functions used during device initialization ***/
-
-/**
- * ti_bandgap_force_single_read() - executes 1 single ADC conversion
- * @bgp: pointer to struct ti_bandgap
- * @id: sensor id which it is desired to read 1 temperature
- *
- * Used to initialize the conversion state machine and set it to a valid
- * state. Called during device initialization and context restore events.
- *
- * Return: 0
- */
-static int
-ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id)
-{
- u32 temp = 0, counter = 1000;
-
- /* Select single conversion mode */
- if (TI_BANDGAP_HAS(bgp, MODE_CONFIG))
- RMW_BITS(bgp, id, bgap_mode_ctrl, mode_ctrl_mask, 0);
-
- /* Start of Conversion = 1 */
- RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 1);
- /* Wait until DTEMP is updated */
- temp = ti_bandgap_read_temp(bgp, id);
-
- while ((temp == 0) && --counter)
- temp = ti_bandgap_read_temp(bgp, id);
- /* REVISIT: Check correct condition for end of conversion */
-
- /* Start of Conversion = 0 */
- RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 0);
-
- return 0;
-}
-
-/**
- * ti_bandgap_set_continous_mode() - One time enabling of continuous mode
- * @bgp: pointer to struct ti_bandgap
- *
- * Call this function only if HAS(MODE_CONFIG) is set. As this driver may
- * be used for junction temperature monitoring, it is desirable that the
- * sensors are operational all the time, so that alerts are generated
- * properly.
- *
- * Return: 0
- */
-static int ti_bandgap_set_continuous_mode(struct ti_bandgap *bgp)
-{
- int i;
-
- for (i = 0; i < bgp->conf->sensor_count; i++) {
- /* Perform a single read just before enabling continuous */
- ti_bandgap_force_single_read(bgp, i);
- RMW_BITS(bgp, i, bgap_mode_ctrl, mode_ctrl_mask, 1);
- }
-
- return 0;
-}
-
-/**
- * ti_bandgap_get_trend() - To fetch the temperature trend of a sensor
- * @bgp: pointer to struct ti_bandgap
- * @id: id of the individual sensor
- * @trend: Pointer to trend.
- *
- * This function needs to be called to fetch the temperature trend of a
- * Particular sensor. The function computes the difference in temperature
- * w.r.t time. For the bandgaps with built in history buffer the temperatures
- * are read from the buffer and for those without the Buffer -ENOTSUPP is
- * returned.
- *
- * Return: 0 if no error, else return corresponding error. If no
- * error then the trend value is passed on to trend parameter
- */
-int ti_bandgap_get_trend(struct ti_bandgap *bgp, int id, int *trend)
-{
- struct temp_sensor_registers *tsr;
- u32 temp1, temp2, reg1, reg2;
- int t1, t2, interval, ret = 0;
-
- ret = ti_bandgap_validate(bgp, id);
- if (ret)
- goto exit;
-
- if (!TI_BANDGAP_HAS(bgp, HISTORY_BUFFER) ||
- !TI_BANDGAP_HAS(bgp, FREEZE_BIT)) {
- ret = -ENOTSUPP;
- goto exit;
- }
-
- tsr = bgp->conf->sensors[id].registers;
-
- /* Freeze and read the last 2 valid readings */
- reg1 = tsr->ctrl_dtemp_1;
- reg2 = tsr->ctrl_dtemp_2;
-
- /* read temperature from history buffer */
- temp1 = ti_bandgap_readl(bgp, reg1);
- temp1 &= tsr->bgap_dtemp_mask;
-
- temp2 = ti_bandgap_readl(bgp, reg2);
- temp2 &= tsr->bgap_dtemp_mask;
-
- /* Convert from adc values to mCelsius temperature */
- ret = ti_bandgap_adc_to_mcelsius(bgp, temp1, &t1);
- if (ret)
- goto exit;
-
- ret = ti_bandgap_adc_to_mcelsius(bgp, temp2, &t2);
- if (ret)
- goto exit;
-
- /* Fetch the update interval */
- ret = ti_bandgap_read_update_interval(bgp, id, &interval);
- if (ret || !interval)
- goto exit;
-
- *trend = (t1 - t2) / interval;
-
- dev_dbg(bgp->dev, "The temperatures are t1 = %d and t2 = %d and trend =%d\n",
- t1, t2, *trend);
-
-exit:
- return ret;
-}
-
-/**
- * ti_bandgap_tshut_init() - setup and initialize tshut handling
- * @bgp: pointer to struct ti_bandgap
- * @pdev: pointer to device struct platform_device
- *
- * Call this function only in case the bandgap features HAS(TSHUT).
- * In this case, the driver needs to handle the TSHUT signal as an IRQ.
- * The IRQ is wired as a GPIO, and for this purpose, it is required
- * to specify which GPIO line is used. TSHUT IRQ is fired anytime
- * one of the bandgap sensors violates the TSHUT high/hot threshold.
- * And in that case, the system must go off.
- *
- * Return: 0 if no error, else error status
- */
-static int ti_bandgap_tshut_init(struct ti_bandgap *bgp,
- struct platform_device *pdev)
-{
- int gpio_nr = bgp->tshut_gpio;
- int status;
-
- /* Request for gpio_86 line */
- status = gpio_request(gpio_nr, "tshut");
- if (status < 0) {
- dev_err(bgp->dev, "Could not request for TSHUT GPIO:%i\n", 86);
- return status;
- }
- status = gpio_direction_input(gpio_nr);
- if (status) {
- dev_err(bgp->dev, "Cannot set input TSHUT GPIO %d\n", gpio_nr);
- return status;
- }
-
- status = request_irq(gpio_to_irq(gpio_nr), ti_bandgap_tshut_irq_handler,
- IRQF_TRIGGER_RISING, "tshut", NULL);
- if (status) {
- gpio_free(gpio_nr);
- dev_err(bgp->dev, "request irq failed for TSHUT");
- }
-
- return 0;
-}
-
-/**
- * ti_bandgap_alert_init() - setup and initialize talert handling
- * @bgp: pointer to struct ti_bandgap
- * @pdev: pointer to device struct platform_device
- *
- * Call this function only in case the bandgap features HAS(TALERT).
- * In this case, the driver needs to handle the TALERT signals as an IRQs.
- * TALERT is a normal IRQ and it is fired any time thresholds (hot or cold)
- * are violated. In these situation, the driver must reprogram the thresholds,
- * accordingly to specified policy.
- *
- * Return: 0 if no error, else return corresponding error.
- */
-static int ti_bandgap_talert_init(struct ti_bandgap *bgp,
- struct platform_device *pdev)
-{
- int ret;
-
- bgp->irq = platform_get_irq(pdev, 0);
- if (bgp->irq < 0) {
- dev_err(&pdev->dev, "get_irq failed\n");
- return bgp->irq;
- }
- ret = request_threaded_irq(bgp->irq, NULL,
- ti_bandgap_talert_irq_handler,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "talert", bgp);
- if (ret) {
- dev_err(&pdev->dev, "Request threaded irq failed.\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct of_device_id of_ti_bandgap_match[];
-/**
- * ti_bandgap_build() - parse DT and setup a struct ti_bandgap
- * @pdev: pointer to device struct platform_device
- *
- * Used to read the device tree properties accordingly to the bandgap
- * matching version. Based on bandgap version and its capabilities it
- * will build a struct ti_bandgap out of the required DT entries.
- *
- * Return: valid bandgap structure if successful, else returns ERR_PTR
- * return value must be verified with IS_ERR.
- */
-static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- const struct of_device_id *of_id;
- struct ti_bandgap *bgp;
- struct resource *res;
- u32 prop;
- int i;
-
- /* just for the sake */
- if (!node) {
- dev_err(&pdev->dev, "no platform information available\n");
- return ERR_PTR(-EINVAL);
- }
-
- bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL);
- if (!bgp) {
- dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
- return ERR_PTR(-ENOMEM);
- }
-
- of_id = of_match_device(of_ti_bandgap_match, &pdev->dev);
- if (of_id)
- bgp->conf = of_id->data;
-
- /* register shadow for context save and restore */
- bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) *
- bgp->conf->sensor_count, GFP_KERNEL);
- if (!bgp) {
- dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
- return ERR_PTR(-ENOMEM);
- }
-
- i = 0;
- do {
- void __iomem *chunk;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res)
- break;
- chunk = devm_ioremap_resource(&pdev->dev, res);
- if (i == 0)
- bgp->base = chunk;
- if (IS_ERR(chunk))
- return ERR_CAST(chunk);
-
- i++;
- } while (res);
-
- if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- if (of_property_read_u32(node, "ti,tshut-gpio", &prop) < 0) {
- dev_err(&pdev->dev, "missing tshut gpio in device tree\n");
- return ERR_PTR(-EINVAL);
- }
- bgp->tshut_gpio = prop;
- if (!gpio_is_valid(bgp->tshut_gpio)) {
- dev_err(&pdev->dev, "invalid gpio for tshut (%d)\n",
- bgp->tshut_gpio);
- return ERR_PTR(-EINVAL);
- }
- }
-
- return bgp;
-}
-
-/*** Device driver call backs ***/
-
-static
-int ti_bandgap_probe(struct platform_device *pdev)
-{
- struct ti_bandgap *bgp;
- int clk_rate, ret = 0, i;
-
- bgp = ti_bandgap_build(pdev);
- if (IS_ERR_OR_NULL(bgp)) {
- dev_err(&pdev->dev, "failed to fetch platform data\n");
- return PTR_ERR(bgp);
- }
- bgp->dev = &pdev->dev;
-
- if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- ret = ti_bandgap_tshut_init(bgp, pdev);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to initialize system tshut IRQ\n");
- return ret;
- }
- }
-
- bgp->fclock = clk_get(NULL, bgp->conf->fclock_name);
- ret = IS_ERR_OR_NULL(bgp->fclock);
- if (ret) {
- dev_err(&pdev->dev, "failed to request fclock reference\n");
- goto free_irqs;
- }
-
- bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name);
- ret = IS_ERR_OR_NULL(bgp->div_clk);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to request div_ts_ck clock ref\n");
- goto free_irqs;
- }
-
- for (i = 0; i < bgp->conf->sensor_count; i++) {
- struct temp_sensor_registers *tsr;
- u32 val;
-
- tsr = bgp->conf->sensors[i].registers;
- /*
- * check if the efuse has a non-zero value if not
- * it is an untrimmed sample and the temperatures
- * may not be accurate
- */
- val = ti_bandgap_readl(bgp, tsr->bgap_efuse);
- if (ret || !val)
- dev_info(&pdev->dev,
- "Non-trimmed BGAP, Temp not accurate\n");
- }
-
- clk_rate = clk_round_rate(bgp->div_clk,
- bgp->conf->sensors[0].ts_data->max_freq);
- if (clk_rate < bgp->conf->sensors[0].ts_data->min_freq ||
- clk_rate == 0xffffffff) {
- ret = -ENODEV;
- dev_err(&pdev->dev, "wrong clock rate (%d)\n", clk_rate);
- goto put_clks;
- }
-
- ret = clk_set_rate(bgp->div_clk, clk_rate);
- if (ret)
- dev_err(&pdev->dev, "Cannot re-set clock rate. Continuing\n");
-
- bgp->clk_rate = clk_rate;
- if (TI_BANDGAP_HAS(bgp, CLK_CTRL))
- clk_prepare_enable(bgp->fclock);
-
-
- spin_lock_init(&bgp->lock);
- bgp->dev = &pdev->dev;
- platform_set_drvdata(pdev, bgp);
-
- ti_bandgap_power(bgp, true);
-
- /* Set default counter to 1 for now */
- if (TI_BANDGAP_HAS(bgp, COUNTER))
- for (i = 0; i < bgp->conf->sensor_count; i++)
- RMW_BITS(bgp, i, bgap_counter, counter_mask, 1);
-
- /* Set default thresholds for alert and shutdown */
- for (i = 0; i < bgp->conf->sensor_count; i++) {
- struct temp_sensor_data *ts_data;
-
- ts_data = bgp->conf->sensors[i].ts_data;
-
- if (TI_BANDGAP_HAS(bgp, TALERT)) {
- /* Set initial Talert thresholds */
- RMW_BITS(bgp, i, bgap_threshold,
- threshold_tcold_mask, ts_data->t_cold);
- RMW_BITS(bgp, i, bgap_threshold,
- threshold_thot_mask, ts_data->t_hot);
- /* Enable the alert events */
- RMW_BITS(bgp, i, bgap_mask_ctrl, mask_hot_mask, 1);
- RMW_BITS(bgp, i, bgap_mask_ctrl, mask_cold_mask, 1);
- }
-
- if (TI_BANDGAP_HAS(bgp, TSHUT_CONFIG)) {
- /* Set initial Tshut thresholds */
- RMW_BITS(bgp, i, tshut_threshold,
- tshut_hot_mask, ts_data->tshut_hot);
- RMW_BITS(bgp, i, tshut_threshold,
- tshut_cold_mask, ts_data->tshut_cold);
- }
- }
-
- if (TI_BANDGAP_HAS(bgp, MODE_CONFIG))
- ti_bandgap_set_continuous_mode(bgp);
-
- /* Set .250 seconds time as default counter */
- if (TI_BANDGAP_HAS(bgp, COUNTER))
- for (i = 0; i < bgp->conf->sensor_count; i++)
- RMW_BITS(bgp, i, bgap_counter, counter_mask,
- bgp->clk_rate / 4);
-
- /* Every thing is good? Then expose the sensors */
- for (i = 0; i < bgp->conf->sensor_count; i++) {
- char *domain;
-
- if (bgp->conf->sensors[i].register_cooling) {
- ret = bgp->conf->sensors[i].register_cooling(bgp, i);
- if (ret)
- goto remove_sensors;
- }
-
- if (bgp->conf->expose_sensor) {
- domain = bgp->conf->sensors[i].domain;
- ret = bgp->conf->expose_sensor(bgp, i, domain);
- if (ret)
- goto remove_last_cooling;
- }
- }
-
- /*
- * Enable the Interrupts once everything is set. Otherwise irq handler
- * might be called as soon as it is enabled where as rest of framework
- * is still getting initialised.
- */
- if (TI_BANDGAP_HAS(bgp, TALERT)) {
- ret = ti_bandgap_talert_init(bgp, pdev);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize Talert IRQ\n");
- i = bgp->conf->sensor_count;
- goto disable_clk;
- }
- }
-
- return 0;
-
-remove_last_cooling:
- if (bgp->conf->sensors[i].unregister_cooling)
- bgp->conf->sensors[i].unregister_cooling(bgp, i);
-remove_sensors:
- for (i--; i >= 0; i--) {
- if (bgp->conf->sensors[i].unregister_cooling)
- bgp->conf->sensors[i].unregister_cooling(bgp, i);
- if (bgp->conf->remove_sensor)
- bgp->conf->remove_sensor(bgp, i);
- }
- ti_bandgap_power(bgp, false);
-disable_clk:
- if (TI_BANDGAP_HAS(bgp, CLK_CTRL))
- clk_disable_unprepare(bgp->fclock);
-put_clks:
- clk_put(bgp->fclock);
- clk_put(bgp->div_clk);
-free_irqs:
- if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- free_irq(gpio_to_irq(bgp->tshut_gpio), NULL);
- gpio_free(bgp->tshut_gpio);
- }
-
- return ret;
-}
-
-static
-int ti_bandgap_remove(struct platform_device *pdev)
-{
- struct ti_bandgap *bgp = platform_get_drvdata(pdev);
- int i;
-
- /* First thing is to remove sensor interfaces */
- for (i = 0; i < bgp->conf->sensor_count; i++) {
- if (bgp->conf->sensors[i].unregister_cooling)
- bgp->conf->sensors[i].unregister_cooling(bgp, i);
-
- if (bgp->conf->remove_sensor)
- bgp->conf->remove_sensor(bgp, i);
- }
-
- ti_bandgap_power(bgp, false);
-
- if (TI_BANDGAP_HAS(bgp, CLK_CTRL))
- clk_disable_unprepare(bgp->fclock);
- clk_put(bgp->fclock);
- clk_put(bgp->div_clk);
-
- if (TI_BANDGAP_HAS(bgp, TALERT))
- free_irq(bgp->irq, bgp);
-
- if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- free_irq(gpio_to_irq(bgp->tshut_gpio), NULL);
- gpio_free(bgp->tshut_gpio);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp)
-{
- int i;
-
- for (i = 0; i < bgp->conf->sensor_count; i++) {
- struct temp_sensor_registers *tsr;
- struct temp_sensor_regval *rval;
-
- rval = &bgp->regval[i];
- tsr = bgp->conf->sensors[i].registers;
-
- if (TI_BANDGAP_HAS(bgp, MODE_CONFIG))
- rval->bg_mode_ctrl = ti_bandgap_readl(bgp,
- tsr->bgap_mode_ctrl);
- if (TI_BANDGAP_HAS(bgp, COUNTER))
- rval->bg_counter = ti_bandgap_readl(bgp,
- tsr->bgap_counter);
- if (TI_BANDGAP_HAS(bgp, TALERT)) {
- rval->bg_threshold = ti_bandgap_readl(bgp,
- tsr->bgap_threshold);
- rval->bg_ctrl = ti_bandgap_readl(bgp,
- tsr->bgap_mask_ctrl);
- }
-
- if (TI_BANDGAP_HAS(bgp, TSHUT_CONFIG))
- rval->tshut_threshold = ti_bandgap_readl(bgp,
- tsr->tshut_threshold);
- }
-
- return 0;
-}
-
-static int ti_bandgap_restore_ctxt(struct ti_bandgap *bgp)
-{
- int i;
-
- for (i = 0; i < bgp->conf->sensor_count; i++) {
- struct temp_sensor_registers *tsr;
- struct temp_sensor_regval *rval;
- u32 val = 0;
-
- rval = &bgp->regval[i];
- tsr = bgp->conf->sensors[i].registers;
-
- if (TI_BANDGAP_HAS(bgp, COUNTER))
- val = ti_bandgap_readl(bgp, tsr->bgap_counter);
-
- if (TI_BANDGAP_HAS(bgp, TSHUT_CONFIG))
- ti_bandgap_writel(bgp, rval->tshut_threshold,
- tsr->tshut_threshold);
- /* Force immediate temperature measurement and update
- * of the DTEMP field
- */
- ti_bandgap_force_single_read(bgp, i);
-
- if (TI_BANDGAP_HAS(bgp, COUNTER))
- ti_bandgap_writel(bgp, rval->bg_counter,
- tsr->bgap_counter);
- if (TI_BANDGAP_HAS(bgp, MODE_CONFIG))
- ti_bandgap_writel(bgp, rval->bg_mode_ctrl,
- tsr->bgap_mode_ctrl);
- if (TI_BANDGAP_HAS(bgp, TALERT)) {
- ti_bandgap_writel(bgp, rval->bg_threshold,
- tsr->bgap_threshold);
- ti_bandgap_writel(bgp, rval->bg_ctrl,
- tsr->bgap_mask_ctrl);
- }
- }
-
- return 0;
-}
-
-static int ti_bandgap_suspend(struct device *dev)
-{
- struct ti_bandgap *bgp = dev_get_drvdata(dev);
- int err;
-
- err = ti_bandgap_save_ctxt(bgp);
- ti_bandgap_power(bgp, false);
-
- if (TI_BANDGAP_HAS(bgp, CLK_CTRL))
- clk_disable_unprepare(bgp->fclock);
-
- return err;
-}
-
-static int ti_bandgap_resume(struct device *dev)
-{
- struct ti_bandgap *bgp = dev_get_drvdata(dev);
-
- if (TI_BANDGAP_HAS(bgp, CLK_CTRL))
- clk_prepare_enable(bgp->fclock);
-
- ti_bandgap_power(bgp, true);
-
- return ti_bandgap_restore_ctxt(bgp);
-}
-static const struct dev_pm_ops ti_bandgap_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ti_bandgap_suspend,
- ti_bandgap_resume)
-};
-
-#define DEV_PM_OPS (&ti_bandgap_dev_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif
-
-static const struct of_device_id of_ti_bandgap_match[] = {
-#ifdef CONFIG_OMAP4_THERMAL
- {
- .compatible = "ti,omap4430-bandgap",
- .data = (void *)&omap4430_data,
- },
- {
- .compatible = "ti,omap4460-bandgap",
- .data = (void *)&omap4460_data,
- },
- {
- .compatible = "ti,omap4470-bandgap",
- .data = (void *)&omap4470_data,
- },
-#endif
-#ifdef CONFIG_OMAP5_THERMAL
- {
- .compatible = "ti,omap5430-bandgap",
- .data = (void *)&omap5430_data,
- },
-#endif
- /* Sentinel */
- { },
-};
-MODULE_DEVICE_TABLE(of, of_ti_bandgap_match);
-
-static struct platform_driver ti_bandgap_sensor_driver = {
- .probe = ti_bandgap_probe,
- .remove = ti_bandgap_remove,
- .driver = {
- .name = "ti-soc-thermal",
- .pm = DEV_PM_OPS,
- .of_match_table = of_ti_bandgap_match,
- },
-};
-
-module_platform_driver(ti_bandgap_sensor_driver);
-
-MODULE_DESCRIPTION("OMAP4+ bandgap temperature sensor driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:ti-soc-thermal");
-MODULE_AUTHOR("Texas Instrument Inc.");
diff --git a/drivers/staging/ti-soc-thermal/ti-bandgap.h b/drivers/staging/ti-soc-thermal/ti-bandgap.h
deleted file mode 100644
index 5f4794a..0000000
--- a/drivers/staging/ti-soc-thermal/ti-bandgap.h
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * OMAP4 Bandgap temperature sensor driver
- *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- * Contact:
- * Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-#ifndef __TI_BANDGAP_H
-#define __TI_BANDGAP_H
-
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/err.h>
-
-/**
- * DOC: bandgap driver data structure
- * ==================================
- *
- * +----------+----------------+
- * | struct temp_sensor_regval |
- * +---------------------------+
- * * (Array of)
- * |
- * |
- * +-------------------+ +-----------------+
- * | struct ti_bandgap |-->| struct device * |
- * +----------+--------+ +-----------------+
- * |
- * |
- * V
- * +------------------------+
- * | struct ti_bandgap_data |
- * +------------------------+
- * |
- * |
- * * (Array of)
- * +------------+------------------------------------------------------+
- * | +----------+------------+ +-------------------------+ |
- * | | struct ti_temp_sensor |-->| struct temp_sensor_data | |
- * | +-----------------------+ +------------+------------+ |
- * | | |
- * | + |
- * | V |
- * | +----------+-------------------+ |
- * | | struct temp_sensor_registers | |
- * | +------------------------------+ |
- * | |
- * +-------------------------------------------------------------------+
- *
- * Above is a simple diagram describing how the data structure below
- * are organized. For each bandgap device there should be a ti_bandgap_data
- * containing the device instance configuration, as well as, an array of
- * sensors, representing every sensor instance present in this bandgap.
- */
-
-/**
- * struct temp_sensor_registers - descriptor to access registers and bitfields
- * @temp_sensor_ctrl: TEMP_SENSOR_CTRL register offset
- * @bgap_tempsoff_mask: mask to temp_sensor_ctrl.tempsoff
- * @bgap_soc_mask: mask to temp_sensor_ctrl.soc
- * @bgap_eocz_mask: mask to temp_sensor_ctrl.eocz
- * @bgap_dtemp_mask: mask to temp_sensor_ctrl.dtemp
- * @bgap_mask_ctrl: BANDGAP_MASK_CTRL register offset
- * @mask_hot_mask: mask to bandgap_mask_ctrl.mask_hot
- * @mask_cold_mask: mask to bandgap_mask_ctrl.mask_cold
- * @mask_sidlemode_mask: mask to bandgap_mask_ctrl.mask_sidlemode
- * @mask_counter_delay_mask: mask to bandgap_mask_ctrl.mask_counter_delay
- * @mask_freeze_mask: mask to bandgap_mask_ctrl.mask_free
- * @mask_clear_mask: mask to bandgap_mask_ctrl.mask_clear
- * @mask_clear_accum_mask: mask to bandgap_mask_ctrl.mask_clear_accum
- * @bgap_mode_ctrl: BANDGAP_MODE_CTRL register offset
- * @mode_ctrl_mask: mask to bandgap_mode_ctrl.mode_ctrl
- * @bgap_counter: BANDGAP_COUNTER register offset
- * @counter_mask: mask to bandgap_counter.counter
- * @bgap_threshold: BANDGAP_THRESHOLD register offset (TALERT thresholds)
- * @threshold_thot_mask: mask to bandgap_threhold.thot
- * @threshold_tcold_mask: mask to bandgap_threhold.tcold
- * @tshut_threshold: TSHUT_THRESHOLD register offset (TSHUT thresholds)
- * @tshut_efuse_mask: mask to tshut_threshold.tshut_efuse
- * @tshut_efuse_shift: shift to tshut_threshold.tshut_efuse
- * @tshut_hot_mask: mask to tshut_threhold.thot
- * @tshut_cold_mask: mask to tshut_threhold.thot
- * @bgap_status: BANDGAP_STATUS register offset
- * @status_clean_stop_mask: mask to bandgap_status.clean_stop
- * @status_bgap_alert_mask: mask to bandgap_status.bandgap_alert
- * @status_hot_mask: mask to bandgap_status.hot
- * @status_cold_mask: mask to bandgap_status.cold
- * @bgap_cumul_dtemp: BANDGAP_CUMUL_DTEMP register offset
- * @ctrl_dtemp_0: CTRL_DTEMP0 register offset
- * @ctrl_dtemp_1: CTRL_DTEMP1 register offset
- * @ctrl_dtemp_2: CTRL_DTEMP2 register offset
- * @ctrl_dtemp_3: CTRL_DTEMP3 register offset
- * @ctrl_dtemp_4: CTRL_DTEMP4 register offset
- * @bgap_efuse: BANDGAP_EFUSE register offset
- *
- * The register offsets and bitfields might change across
- * OMAP and variants versions. Hence this struct serves as a
- * descriptor map on how to access the registers and the bitfields.
- *
- * This descriptor contains registers of all versions of bandgap chips.
- * Not all versions will use all registers, depending on the available
- * features. Please read TRMs for descriptive explanation on each bitfield.
- */
-
-struct temp_sensor_registers {
- u32 temp_sensor_ctrl;
- u32 bgap_tempsoff_mask;
- u32 bgap_soc_mask;
- u32 bgap_eocz_mask; /* not used: but needs revisit */
- u32 bgap_dtemp_mask;
-
- u32 bgap_mask_ctrl;
- u32 mask_hot_mask;
- u32 mask_cold_mask;
- u32 mask_sidlemode_mask; /* not used: but may be needed for pm */
- u32 mask_counter_delay_mask;
- u32 mask_freeze_mask;
- u32 mask_clear_mask; /* not used: but needed for trending */
- u32 mask_clear_accum_mask; /* not used: but needed for trending */
-
- u32 bgap_mode_ctrl;
- u32 mode_ctrl_mask;
-
- u32 bgap_counter;
- u32 counter_mask;
-
- u32 bgap_threshold;
- u32 threshold_thot_mask;
- u32 threshold_tcold_mask;
-
- u32 tshut_threshold;
- u32 tshut_efuse_mask; /* not used */
- u32 tshut_efuse_shift; /* not used */
- u32 tshut_hot_mask;
- u32 tshut_cold_mask;
-
- u32 bgap_status;
- u32 status_clean_stop_mask; /* not used: but needed for trending */
- u32 status_bgap_alert_mask; /* not used */
- u32 status_hot_mask;
- u32 status_cold_mask;
-
- u32 bgap_cumul_dtemp; /* not used: but needed for trending */
- u32 ctrl_dtemp_0; /* not used: but needed for trending */
- u32 ctrl_dtemp_1; /* not used: but needed for trending */
- u32 ctrl_dtemp_2; /* not used: but needed for trending */
- u32 ctrl_dtemp_3; /* not used: but needed for trending */
- u32 ctrl_dtemp_4; /* not used: but needed for trending */
- u32 bgap_efuse;
-};
-
-/**
- * struct temp_sensor_data - The thresholds and limits for temperature sensors.
- * @tshut_hot: temperature to trigger a thermal reset (initial value)
- * @tshut_cold: temp to get the plat out of reset due to thermal (init val)
- * @t_hot: temperature to trigger a thermal alert (high initial value)
- * @t_cold: temperature to trigger a thermal alert (low initial value)
- * @min_freq: sensor minimum clock rate
- * @max_freq: sensor maximum clock rate
- * @max_temp: sensor maximum temperature
- * @min_temp: sensor minimum temperature
- * @hyst_val: temperature hysteresis considered while converting ADC values
- * @update_int1: update interval
- * @update_int2: update interval
- *
- * This data structure will hold the required thresholds and temperature limits
- * for a specific temperature sensor, like shutdown temperature, alert
- * temperature, clock / rate used, ADC conversion limits and update intervals
- */
-struct temp_sensor_data {
- u32 tshut_hot;
- u32 tshut_cold;
- u32 t_hot;
- u32 t_cold;
- u32 min_freq;
- u32 max_freq;
- int max_temp;
- int min_temp;
- int hyst_val;
- u32 update_int1; /* not used */
- u32 update_int2; /* not used */
-};
-
-struct ti_bandgap_data;
-
-/**
- * struct temp_sensor_regval - temperature sensor register values and priv data
- * @bg_mode_ctrl: temp sensor control register value
- * @bg_ctrl: bandgap ctrl register value
- * @bg_counter: bandgap counter value
- * @bg_threshold: bandgap threshold register value
- * @tshut_threshold: bandgap tshut register value
- * @data: private data
- *
- * Data structure to save and restore bandgap register set context. Only
- * required registers are shadowed, when needed.
- */
-struct temp_sensor_regval {
- u32 bg_mode_ctrl;
- u32 bg_ctrl;
- u32 bg_counter;
- u32 bg_threshold;
- u32 tshut_threshold;
- void *data;
-};
-
-/**
- * struct ti_bandgap - bandgap device structure
- * @dev: struct device pointer
- * @base: io memory base address
- * @conf: struct with bandgap configuration set (# sensors, conv_table, etc)
- * @regval: temperature sensor register values
- * @fclock: pointer to functional clock of temperature sensor
- * @div_clk: pointer to divider clock of temperature sensor fclk
- * @lock: spinlock for ti_bandgap structure
- * @irq: MPU IRQ number for thermal alert
- * @tshut_gpio: GPIO where Tshut signal is routed
- * @clk_rate: Holds current clock rate
- *
- * The bandgap device structure representing the bandgap device instance.
- * It holds most of the dynamic stuff. Configurations and sensor specific
- * entries are inside the @conf structure.
- */
-struct ti_bandgap {
- struct device *dev;
- void __iomem *base;
- const struct ti_bandgap_data *conf;
- struct temp_sensor_regval *regval;
- struct clk *fclock;
- struct clk *div_clk;
- spinlock_t lock; /* shields this struct */
- int irq;
- int tshut_gpio;
- u32 clk_rate;
-};
-
-/**
- * struct ti_temp_sensor - bandgap temperature sensor configuration data
- * @ts_data: pointer to struct with thresholds, limits of temperature sensor
- * @registers: pointer to the list of register offsets and bitfields
- * @domain: the name of the domain where the sensor is located
- * @slope: sensor gradient slope info for hotspot extrapolation equation
- * @constant: sensor gradient const info for hotspot extrapolation equation
- * @slope_pcb: sensor gradient slope info for hotspot extrapolation equation
- * with no external influence
- * @constant_pcb: sensor gradient const info for hotspot extrapolation equation
- * with no external influence
- * @register_cooling: function to describe how this sensor is going to be cooled
- * @unregister_cooling: function to release cooling data
- *
- * Data structure to describe a temperature sensor handled by a bandgap device.
- * It should provide configuration details on this sensor, such as how to
- * access the registers affecting this sensor, shadow register buffer, how to
- * assess the gradient from hotspot, how to cooldown the domain when sensor
- * reports too hot temperature.
- */
-struct ti_temp_sensor {
- struct temp_sensor_data *ts_data;
- struct temp_sensor_registers *registers;
- char *domain;
- /* for hotspot extrapolation */
- const int slope;
- const int constant;
- const int slope_pcb;
- const int constant_pcb;
- int (*register_cooling)(struct ti_bandgap *bgp, int id);
- int (*unregister_cooling)(struct ti_bandgap *bgp, int id);
-};
-
-/**
- * DOC: ti bandgap feature types
- *
- * TI_BANDGAP_FEATURE_TSHUT - used when the thermal shutdown signal output
- * of a bandgap device instance is routed to the processor. This means
- * the system must react and perform the shutdown by itself (handle an
- * IRQ, for instance).
- *
- * TI_BANDGAP_FEATURE_TSHUT_CONFIG - used when the bandgap device has control
- * over the thermal shutdown configuration. This means that the thermal
- * shutdown thresholds are programmable, for instance.
- *
- * TI_BANDGAP_FEATURE_TALERT - used when the bandgap device instance outputs
- * a signal representing violation of programmable alert thresholds.
- *
- * TI_BANDGAP_FEATURE_MODE_CONFIG - used when it is possible to choose which
- * mode, continuous or one shot, the bandgap device instance will operate.
- *
- * TI_BANDGAP_FEATURE_COUNTER - used when the bandgap device instance allows
- * programming the update interval of its internal state machine.
- *
- * TI_BANDGAP_FEATURE_POWER_SWITCH - used when the bandgap device allows
- * itself to be switched on/off.
- *
- * TI_BANDGAP_FEATURE_CLK_CTRL - used when the clocks feeding the bandgap
- * device are gateable or not.
- *
- * TI_BANDGAP_FEATURE_FREEZE_BIT - used when the bandgap device features
- * a history buffer that its update can be freezed/unfreezed.
- *
- * TI_BANDGAP_FEATURE_COUNTER_DELAY - used when the bandgap device features
- * a delay programming based on distinct values.
- *
- * TI_BANDGAP_FEATURE_HISTORY_BUFFER - used when the bandgap device features
- * a history buffer of temperatures.
- *
- * TI_BANDGAP_HAS(b, f) - macro to check if a bandgap device is capable of a
- * specific feature (above) or not. Return non-zero, if yes.
- */
-#define TI_BANDGAP_FEATURE_TSHUT BIT(0)
-#define TI_BANDGAP_FEATURE_TSHUT_CONFIG BIT(1)
-#define TI_BANDGAP_FEATURE_TALERT BIT(2)
-#define TI_BANDGAP_FEATURE_MODE_CONFIG BIT(3)
-#define TI_BANDGAP_FEATURE_COUNTER BIT(4)
-#define TI_BANDGAP_FEATURE_POWER_SWITCH BIT(5)
-#define TI_BANDGAP_FEATURE_CLK_CTRL BIT(6)
-#define TI_BANDGAP_FEATURE_FREEZE_BIT BIT(7)
-#define TI_BANDGAP_FEATURE_COUNTER_DELAY BIT(8)
-#define TI_BANDGAP_FEATURE_HISTORY_BUFFER BIT(9)
-#define TI_BANDGAP_HAS(b, f) \
- ((b)->conf->features & TI_BANDGAP_FEATURE_ ## f)
-
-/**
- * struct ti_bandgap_data - ti bandgap data configuration structure
- * @features: a bitwise flag set to describe the device features
- * @conv_table: Pointer to ADC to temperature conversion table
- * @adc_start_val: ADC conversion table starting value
- * @adc_end_val: ADC conversion table ending value
- * @fclock_name: clock name of the functional clock
- * @div_ck_name: clock name of the clock divisor
- * @sensor_count: count of temperature sensor within this bandgap device
- * @report_temperature: callback to report thermal alert to thermal API
- * @expose_sensor: callback to export sensor to thermal API
- * @remove_sensor: callback to destroy sensor from thermal API
- * @sensors: array of sensors present in this bandgap instance
- *
- * This is a data structure which should hold most of the static configuration
- * of a bandgap device instance. It should describe which features this instance
- * is capable of, the clock names to feed this device, the amount of sensors and
- * their configuration representation, and how to export and unexport them to
- * a thermal API.
- */
-struct ti_bandgap_data {
- unsigned int features;
- const int *conv_table;
- u32 adc_start_val;
- u32 adc_end_val;
- char *fclock_name;
- char *div_ck_name;
- int sensor_count;
- int (*report_temperature)(struct ti_bandgap *bgp, int id);
- int (*expose_sensor)(struct ti_bandgap *bgp, int id, char *domain);
- int (*remove_sensor)(struct ti_bandgap *bgp, int id);
-
- /* this needs to be at the end */
- struct ti_temp_sensor sensors[];
-};
-
-int ti_bandgap_read_thot(struct ti_bandgap *bgp, int id, int *thot);
-int ti_bandgap_write_thot(struct ti_bandgap *bgp, int id, int val);
-int ti_bandgap_read_tcold(struct ti_bandgap *bgp, int id, int *tcold);
-int ti_bandgap_write_tcold(struct ti_bandgap *bgp, int id, int val);
-int ti_bandgap_read_update_interval(struct ti_bandgap *bgp, int id,
- int *interval);
-int ti_bandgap_write_update_interval(struct ti_bandgap *bgp, int id,
- u32 interval);
-int ti_bandgap_read_temperature(struct ti_bandgap *bgp, int id,
- int *temperature);
-int ti_bandgap_set_sensor_data(struct ti_bandgap *bgp, int id, void *data);
-void *ti_bandgap_get_sensor_data(struct ti_bandgap *bgp, int id);
-int ti_bandgap_get_trend(struct ti_bandgap *bgp, int id, int *trend);
-
-#ifdef CONFIG_OMAP4_THERMAL
-extern const struct ti_bandgap_data omap4430_data;
-extern const struct ti_bandgap_data omap4460_data;
-extern const struct ti_bandgap_data omap4470_data;
-#else
-#define omap4430_data NULL
-#define omap4460_data NULL
-#define omap4470_data NULL
-#endif
-
-#ifdef CONFIG_OMAP5_THERMAL
-extern const struct ti_bandgap_data omap5430_data;
-#else
-#define omap5430_data NULL
-#endif
-
-#endif
diff --git a/drivers/staging/ti-soc-thermal/ti-thermal-common.c b/drivers/staging/ti-soc-thermal/ti-thermal-common.c
deleted file mode 100644
index e3c5e67..0000000
--- a/drivers/staging/ti-soc-thermal/ti-thermal-common.c
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * OMAP thermal driver interface
- *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- * Contact:
- * Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/workqueue.h>
-#include <linux/thermal.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
-#include <linux/cpu_cooling.h>
-
-#include "ti-thermal.h"
-#include "ti-bandgap.h"
-
-/* common data structures */
-struct ti_thermal_data {
- struct thermal_zone_device *ti_thermal;
- struct thermal_cooling_device *cool_dev;
- struct ti_bandgap *bgp;
- enum thermal_device_mode mode;
- struct work_struct thermal_wq;
- int sensor_id;
-};
-
-static void ti_thermal_work(struct work_struct *work)
-{
- struct ti_thermal_data *data = container_of(work,
- struct ti_thermal_data, thermal_wq);
-
- thermal_zone_device_update(data->ti_thermal);
-
- dev_dbg(&data->ti_thermal->device, "updated thermal zone %s\n",
- data->ti_thermal->type);
-}
-
-/**
- * ti_thermal_hotspot_temperature - returns sensor extrapolated temperature
- * @t: omap sensor temperature
- * @s: omap sensor slope value
- * @c: omap sensor const value
- */
-static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
-{
- int delta = t * s / 1000 + c;
-
- if (delta < 0)
- delta = 0;
-
- return t + delta;
-}
-
-/* thermal zone ops */
-/* Get temperature callback function for thermal zone*/
-static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
-{
- struct ti_thermal_data *data = thermal->devdata;
- struct ti_bandgap *bgp;
- const struct ti_temp_sensor *s;
- int ret, tmp, pcb_temp, slope, constant;
-
- if (!data)
- return 0;
-
- bgp = data->bgp;
- s = &bgp->conf->sensors[data->sensor_id];
-
- ret = ti_bandgap_read_temperature(bgp, data->sensor_id, &tmp);
- if (ret)
- return ret;
-
- pcb_temp = 0;
- /* TODO: Introduce pcb temperature lookup */
- /* In case pcb zone is available, use the extrapolation rule with it */
- if (pcb_temp) {
- tmp -= pcb_temp;
- slope = s->slope_pcb;
- constant = s->constant_pcb;
- } else {
- slope = s->slope;
- constant = s->constant;
- }
- *temp = ti_thermal_hotspot_temperature(tmp, slope, constant);
-
- return ret;
-}
-
-/* Bind callback functions for thermal zone */
-static int ti_thermal_bind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- struct ti_thermal_data *data = thermal->devdata;
- int id;
-
- if (IS_ERR_OR_NULL(data))
- return -ENODEV;
-
- /* check if this is the cooling device we registered */
- if (data->cool_dev != cdev)
- return 0;
-
- id = data->sensor_id;
-
- /* Simple thing, two trips, one passive another critical */
- return thermal_zone_bind_cooling_device(thermal, 0, cdev,
- /* bind with min and max states defined by cpu_cooling */
- THERMAL_NO_LIMIT,
- THERMAL_NO_LIMIT);
-}
-
-/* Unbind callback functions for thermal zone */
-static int ti_thermal_unbind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- struct ti_thermal_data *data = thermal->devdata;
-
- if (IS_ERR_OR_NULL(data))
- return -ENODEV;
-
- /* check if this is the cooling device we registered */
- if (data->cool_dev != cdev)
- return 0;
-
- /* Simple thing, two trips, one passive another critical */
- return thermal_zone_unbind_cooling_device(thermal, 0, cdev);
-}
-
-/* Get mode callback functions for thermal zone */
-static int ti_thermal_get_mode(struct thermal_zone_device *thermal,
- enum thermal_device_mode *mode)
-{
- struct ti_thermal_data *data = thermal->devdata;
-
- if (data)
- *mode = data->mode;
-
- return 0;
-}
-
-/* Set mode callback functions for thermal zone */
-static int ti_thermal_set_mode(struct thermal_zone_device *thermal,
- enum thermal_device_mode mode)
-{
- struct ti_thermal_data *data = thermal->devdata;
-
- if (!data->ti_thermal) {
- dev_notice(&thermal->device, "thermal zone not registered\n");
- return 0;
- }
-
- mutex_lock(&data->ti_thermal->lock);
-
- if (mode == THERMAL_DEVICE_ENABLED)
- data->ti_thermal->polling_delay = FAST_TEMP_MONITORING_RATE;
- else
- data->ti_thermal->polling_delay = 0;
-
- mutex_unlock(&data->ti_thermal->lock);
-
- data->mode = mode;
- thermal_zone_device_update(data->ti_thermal);
- dev_dbg(&thermal->device, "thermal polling set for duration=%d msec\n",
- data->ti_thermal->polling_delay);
-
- return 0;
-}
-
-/* Get trip type callback functions for thermal zone */
-static int ti_thermal_get_trip_type(struct thermal_zone_device *thermal,
- int trip, enum thermal_trip_type *type)
-{
- if (!ti_thermal_is_valid_trip(trip))
- return -EINVAL;
-
- if (trip + 1 == OMAP_TRIP_NUMBER)
- *type = THERMAL_TRIP_CRITICAL;
- else
- *type = THERMAL_TRIP_PASSIVE;
-
- return 0;
-}
-
-/* Get trip temperature callback functions for thermal zone */
-static int ti_thermal_get_trip_temp(struct thermal_zone_device *thermal,
- int trip, unsigned long *temp)
-{
- if (!ti_thermal_is_valid_trip(trip))
- return -EINVAL;
-
- *temp = ti_thermal_get_trip_value(trip);
-
- return 0;
-}
-
-/* Get the temperature trend callback functions for thermal zone */
-static int ti_thermal_get_trend(struct thermal_zone_device *thermal,
- int trip, enum thermal_trend *trend)
-{
- struct ti_thermal_data *data = thermal->devdata;
- struct ti_bandgap *bgp;
- int id, tr, ret = 0;
-
- bgp = data->bgp;
- id = data->sensor_id;
-
- ret = ti_bandgap_get_trend(bgp, id, &tr);
- if (ret)
- return ret;
-
- if (tr > 0)
- *trend = THERMAL_TREND_RAISING;
- else if (tr < 0)
- *trend = THERMAL_TREND_DROPPING;
- else
- *trend = THERMAL_TREND_STABLE;
-
- return 0;
-}
-
-/* Get critical temperature callback functions for thermal zone */
-static int ti_thermal_get_crit_temp(struct thermal_zone_device *thermal,
- unsigned long *temp)
-{
- /* shutdown zone */
- return ti_thermal_get_trip_temp(thermal, OMAP_TRIP_NUMBER - 1, temp);
-}
-
-static struct thermal_zone_device_ops ti_thermal_ops = {
- .get_temp = ti_thermal_get_temp,
- .get_trend = ti_thermal_get_trend,
- .bind = ti_thermal_bind,
- .unbind = ti_thermal_unbind,
- .get_mode = ti_thermal_get_mode,
- .set_mode = ti_thermal_set_mode,
- .get_trip_type = ti_thermal_get_trip_type,
- .get_trip_temp = ti_thermal_get_trip_temp,
- .get_crit_temp = ti_thermal_get_crit_temp,
-};
-
-static struct ti_thermal_data
-*ti_thermal_build_data(struct ti_bandgap *bgp, int id)
-{
- struct ti_thermal_data *data;
-
- data = devm_kzalloc(bgp->dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(bgp->dev, "kzalloc fail\n");
- return NULL;
- }
- data->sensor_id = id;
- data->bgp = bgp;
- data->mode = THERMAL_DEVICE_ENABLED;
- INIT_WORK(&data->thermal_wq, ti_thermal_work);
-
- return data;
-}
-
-int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
- char *domain)
-{
- struct ti_thermal_data *data;
-
- data = ti_bandgap_get_sensor_data(bgp, id);
-
- if (IS_ERR_OR_NULL(data))
- data = ti_thermal_build_data(bgp, id);
-
- if (!data)
- return -EINVAL;
-
- /* Create thermal zone */
- data->ti_thermal = thermal_zone_device_register(domain,
- OMAP_TRIP_NUMBER, 0, data, &ti_thermal_ops,
- NULL, FAST_TEMP_MONITORING_RATE,
- FAST_TEMP_MONITORING_RATE);
- if (IS_ERR_OR_NULL(data->ti_thermal)) {
- dev_err(bgp->dev, "thermal zone device is NULL\n");
- return PTR_ERR(data->ti_thermal);
- }
- data->ti_thermal->polling_delay = FAST_TEMP_MONITORING_RATE;
- ti_bandgap_set_sensor_data(bgp, id, data);
-
- return 0;
-}
-
-int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id)
-{
- struct ti_thermal_data *data;
-
- data = ti_bandgap_get_sensor_data(bgp, id);
-
- thermal_zone_device_unregister(data->ti_thermal);
-
- return 0;
-}
-
-int ti_thermal_report_sensor_temperature(struct ti_bandgap *bgp, int id)
-{
- struct ti_thermal_data *data;
-
- data = ti_bandgap_get_sensor_data(bgp, id);
-
- schedule_work(&data->thermal_wq);
-
- return 0;
-}
-
-int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
-{
- struct ti_thermal_data *data;
-
- data = ti_bandgap_get_sensor_data(bgp, id);
- if (IS_ERR_OR_NULL(data))
- data = ti_thermal_build_data(bgp, id);
-
- if (!data)
- return -EINVAL;
-
- if (!cpufreq_get_current_driver()) {
- dev_dbg(bgp->dev, "no cpufreq driver yet\n");
- return -EPROBE_DEFER;
- }
-
- /* Register cooling device */
- data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
- if (IS_ERR_OR_NULL(data->cool_dev)) {
- dev_err(bgp->dev,
- "Failed to register cpufreq cooling device\n");
- return PTR_ERR(data->cool_dev);
- }
- ti_bandgap_set_sensor_data(bgp, id, data);
-
- return 0;
-}
-
-int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
-{
- struct ti_thermal_data *data;
-
- data = ti_bandgap_get_sensor_data(bgp, id);
- cpufreq_cooling_unregister(data->cool_dev);
-
- return 0;
-}
diff --git a/drivers/staging/ti-soc-thermal/ti-thermal.h b/drivers/staging/ti-soc-thermal/ti-thermal.h
deleted file mode 100644
index 5055777..0000000
--- a/drivers/staging/ti-soc-thermal/ti-thermal.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * OMAP thermal definitions
- *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- * Contact:
- * Eduardo Valentin <eduardo.valentin@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-#ifndef __TI_THERMAL_H
-#define __TI_THERMAL_H
-
-#include "ti-bandgap.h"
-
-/* sensors gradient and offsets */
-#define OMAP_GRADIENT_SLOPE_4430 0
-#define OMAP_GRADIENT_CONST_4430 20000
-#define OMAP_GRADIENT_SLOPE_4460 348
-#define OMAP_GRADIENT_CONST_4460 -9301
-#define OMAP_GRADIENT_SLOPE_4470 308
-#define OMAP_GRADIENT_CONST_4470 -7896
-
-#define OMAP_GRADIENT_SLOPE_5430_CPU 65
-#define OMAP_GRADIENT_CONST_5430_CPU -1791
-#define OMAP_GRADIENT_SLOPE_5430_GPU 117
-#define OMAP_GRADIENT_CONST_5430_GPU -2992
-
-/* PCB sensor calculation constants */
-#define OMAP_GRADIENT_SLOPE_W_PCB_4430 0
-#define OMAP_GRADIENT_CONST_W_PCB_4430 20000
-#define OMAP_GRADIENT_SLOPE_W_PCB_4460 1142
-#define OMAP_GRADIENT_CONST_W_PCB_4460 -393
-#define OMAP_GRADIENT_SLOPE_W_PCB_4470 1063
-#define OMAP_GRADIENT_CONST_W_PCB_4470 -477
-
-#define OMAP_GRADIENT_SLOPE_W_PCB_5430_CPU 100
-#define OMAP_GRADIENT_CONST_W_PCB_5430_CPU 484
-#define OMAP_GRADIENT_SLOPE_W_PCB_5430_GPU 464
-#define OMAP_GRADIENT_CONST_W_PCB_5430_GPU -5102
-
-/* trip points of interest in milicelsius (at hotspot level) */
-#define OMAP_TRIP_COLD 100000
-#define OMAP_TRIP_HOT 110000
-#define OMAP_TRIP_SHUTDOWN 125000
-#define OMAP_TRIP_NUMBER 2
-#define OMAP_TRIP_STEP \
- ((OMAP_TRIP_SHUTDOWN - OMAP_TRIP_HOT) / (OMAP_TRIP_NUMBER - 1))
-
-/* Update rates */
-#define FAST_TEMP_MONITORING_RATE 250
-
-/* helper macros */
-/**
- * ti_thermal_get_trip_value - returns trip temperature based on index
- * @i: trip index
- */
-#define ti_thermal_get_trip_value(i) \
- (OMAP_TRIP_HOT + ((i) * OMAP_TRIP_STEP))
-
-/**
- * ti_thermal_is_valid_trip - check for trip index
- * @i: trip index
- */
-#define ti_thermal_is_valid_trip(trip) \
- ((trip) >= 0 && (trip) < OMAP_TRIP_NUMBER)
-
-#ifdef CONFIG_TI_THERMAL
-int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, char *domain);
-int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id);
-int ti_thermal_report_sensor_temperature(struct ti_bandgap *bgp, int id);
-int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id);
-int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id);
-#else
-static inline
-int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, char *domain)
-{
- return 0;
-}
-
-static inline
-int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id)
-{
- return 0;
-}
-
-static inline
-int ti_thermal_report_sensor_temperature(struct ti_bandgap *bgp, int id)
-{
- return 0;
-}
-
-static inline
-int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
-{
- return 0;
-}
-
-static inline
-int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
-{
- return 0;
-}
-#endif
-#endif
diff --git a/drivers/staging/ti-soc-thermal/ti_soc_thermal.txt b/drivers/staging/ti-soc-thermal/ti_soc_thermal.txt
deleted file mode 100644
index a4a33d1..0000000
--- a/drivers/staging/ti-soc-thermal/ti_soc_thermal.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-* Texas Instrument OMAP SCM bandgap bindings
-
-In the System Control Module, OMAP supplies a voltage reference
-and a temperature sensor feature that are gathered in the band
-gap voltage and temperature sensor (VBGAPTS) module. The band
-gap provides current and voltage reference for its internal
-circuits and other analog IP blocks. The analog-to-digital
-converter (ADC) produces an output value that is proportional
-to the silicon temperature.
-
-Required properties:
-- compatible : Should be:
- - "ti,omap4430-bandgap" : for OMAP4430 bandgap
- - "ti,omap4460-bandgap" : for OMAP4460 bandgap
- - "ti,omap4470-bandgap" : for OMAP4470 bandgap
- - "ti,omap5430-bandgap" : for OMAP5430 bandgap
-- interrupts : this entry should indicate which interrupt line
-the talert signal is routed to;
-Specific:
-- ti,tshut-gpio : this entry should be used to inform which GPIO
-line the tshut signal is routed to;
-- regs : this entry must also be specified and it is specific
-to each bandgap version, because the mapping may change from
-soc to soc, apart of depending on available features.
-
-Example:
-OMAP4430:
-bandgap {
- reg = <0x4a002260 0x4 0x4a00232C 0x4>;
- compatible = "ti,omap4430-bandgap";
-};
-
-OMAP4460:
-bandgap {
- reg = <0x4a002260 0x4
- 0x4a00232C 0x4
- 0x4a002378 0x18>;
- compatible = "ti,omap4460-bandgap";
- interrupts = <0 126 4>; /* talert */
- ti,tshut-gpio = <86>;
-};
-
-OMAP4470:
-bandgap {
- reg = <0x4a002260 0x4
- 0x4a00232C 0x4
- 0x4a002378 0x18>;
- compatible = "ti,omap4470-bandgap";
- interrupts = <0 126 4>; /* talert */
- ti,tshut-gpio = <86>;
-};
-
-OMAP5430:
-bandgap {
- reg = <0x4a0021e0 0xc
- 0x4a00232c 0xc
- 0x4a002380 0x2c
- 0x4a0023C0 0x3c>;
- compatible = "ti,omap5430-bandgap";
-};
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 60848f1..165b918 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -5,7 +5,8 @@
menuconfig TIDSPBRIDGE
tristate "DSP Bridge driver"
depends on ARCH_OMAP3 && !ARCH_MULTIPLATFORM
- select OMAP_MBOX_FWK
+ select MAILBOX
+ select OMAP2PLUS_MBOX
help
DSP/BIOS Bridge is designed for platforms that contain a GPP and
one or more attached DSPs. The GPP is considered the master or
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index b783bfa..65971b7 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -145,8 +145,8 @@ struct map_l4_peripheral {
#define L4_PERIPHERAL_MBOX 0x48094000
#define DSPVA_PERIPHERAL_MBOX 0x11808000
-#define PM_GRPSEL_BASE 0x48307000
-#define DSPVA_GRPSEL_BASE 0x11821000
+#define PM_GRPSEL_BASE 0x48307000
+#define DSPVA_GRPSEL_BASE 0x11821000
#define L4_PERIPHERAL_SIDETONE_MCBSP2 0x49028000
#define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000
@@ -311,7 +311,7 @@ static const struct bpwr_clk_t bpwr_clks[] = {
#define SET_GROUP_BITS16(reg, position, width, value) \
do {\
- reg &= ~((0xFFFF >> (16 - (width))) << (position)) ; \
+ reg &= ~((0xFFFF >> (16 - (width))) << (position)); \
reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \
} while (0);
diff --git a/drivers/staging/tidspbridge/core/_tiomap_pwr.h b/drivers/staging/tidspbridge/core/_tiomap_pwr.h
index bd0354d..7bbd380 100644
--- a/drivers/staging/tidspbridge/core/_tiomap_pwr.h
+++ b/drivers/staging/tidspbridge/core/_tiomap_pwr.h
@@ -40,7 +40,7 @@ extern int sleep_dsp(struct bridge_dev_context *dev_context,
u32 dw_cmd, void *pargs);
/*
* ========interrupt_dsp========
- * Sends an interrupt to DSP unconditionally.
+ * Sends an interrupt to DSP unconditionally.
*/
extern void interrupt_dsp(struct bridge_dev_context *dev_context,
u16 mb_val);
@@ -53,24 +53,24 @@ extern int dsp_peripheral_clk_ctrl(struct bridge_dev_context
*dev_context, void *pargs);
/*
* ======== handle_hibernation_from_dsp ========
- * Handle Hibernation requested from DSP
+ * Handle Hibernation requested from DSP
*/
int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context);
/*
* ======== post_scale_dsp ========
- * Handle Post Scale notification to DSP
+ * Handle Post Scale notification to DSP
*/
int post_scale_dsp(struct bridge_dev_context *dev_context,
void *pargs);
/*
* ======== pre_scale_dsp ========
- * Handle Pre Scale notification to DSP
+ * Handle Pre Scale notification to DSP
*/
int pre_scale_dsp(struct bridge_dev_context *dev_context,
void *pargs);
/*
* ======== handle_constraints_set ========
- * Handle constraints request from DSP
+ * Handle constraints request from DSP
*/
int handle_constraints_set(struct bridge_dev_context *dev_context,
void *pargs);
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index dafa6d9..1862afd 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -51,7 +51,7 @@
/*
* ======== handle_constraints_set ========
- * Sets new DSP constraint
+ * Sets new DSP constraint
*/
int handle_constraints_set(struct bridge_dev_context *dev_context,
void *pargs)
@@ -75,7 +75,7 @@ int handle_constraints_set(struct bridge_dev_context *dev_context,
/*
* ======== handle_hibernation_from_dsp ========
- * Handle Hibernation requested from DSP
+ * Handle Hibernation requested from DSP
*/
int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
{
@@ -144,7 +144,7 @@ int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
/*
* ======== sleep_dsp ========
- * Put DSP in low power consuming state.
+ * Put DSP in low power consuming state.
*/
int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
void *pargs)
@@ -250,7 +250,7 @@ int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
/*
* ======== wake_dsp ========
- * Wake up DSP from sleep.
+ * Wake up DSP from sleep.
*/
int wake_dsp(struct bridge_dev_context *dev_context, void *pargs)
{
@@ -276,7 +276,7 @@ int wake_dsp(struct bridge_dev_context *dev_context, void *pargs)
/*
* ======== dsp_peripheral_clk_ctrl ========
- * Enable/Disable the DSP peripheral clocks as needed..
+ * Enable/Disable the DSP peripheral clocks as needed..
*/
int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
void *pargs)
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index 6aea6f1..e68f0ba 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -177,7 +177,7 @@ static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
void *dummy_va_addr;
resources = dev_context->resources;
- dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
+ dummy_va_addr = (void *)__get_free_page(GFP_ATOMIC);
/*
* Before acking the MMU fault, let's make sure MMU can only
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 7ff0e6c..c7ee467 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -25,8 +25,8 @@
#include <dspbridge/host_os.h>
-#define OMAP34XX_WDT3_BASE (0x49000000 + 0x30000)
-#define INT_34XX_WDT3_IRQ (36 + NR_IRQS)
+#define OMAP34XX_WDT3_BASE (0x49000000 + 0x30000)
+#define INT_34XX_WDT3_IRQ (36 + NR_IRQS)
static struct dsp_wdt_setting dsp_wdt;
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmm.h b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
index c66bcf7..2adf9ec 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
@@ -293,7 +293,7 @@ extern int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator,
* ======== cmm_xlator_info ========
* Purpose:
* Set/Get process specific "translator" address info.
- * This is used to perform fast virtaul address translation
+ * This is used to perform fast virtual address translation
* for shared memory buffers between the GPP and DSP.
* Parameters:
* xlator: handle to translator.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
index 7f3a1db..d1441db 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/host_os.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
@@ -41,7 +41,7 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <plat/mailbox.h>
+#include <linux/omap-mailbox.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index 774a3f6..64c2457 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -284,7 +284,7 @@ extern int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size);
* user_envp: An Array of Environment settings(Unicode Strings)
* Returns:
* 0: Success.
- * -ENOENT: The DSP Execuetable was not found.
+ * -ENOENT: The DSP Executable was not found.
* -EFAULT: Invalid processor handle.
* -EPERM : Unable to Load the Processor
* Requires:
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index c191ae2..41e88ab 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -1120,8 +1120,11 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
or DYN_EXTERNAL, then mem granularity information is present
within the section name - only process if there are at least three
tokens within the section name (just a minor optimization) */
- if (count >= 3)
- strict_strtol(sz_last_token, 10, (long *)&req);
+ if (count >= 3) {
+ status = kstrtos32(sz_last_token, 10, &req);
+ if (status)
+ goto func_cont;
+ }
if ((req == 0) || (req == 1)) {
if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) {
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index df0f37e..6d04eb4 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -421,12 +421,11 @@ static int omap3_bridge_startup(struct platform_device *pdev)
drv_datap->tc_wordswapon = tc_wordswapon;
if (base_img) {
- drv_datap->base_img = kmalloc(strlen(base_img) + 1, GFP_KERNEL);
+ drv_datap->base_img = kstrdup(base_img, GFP_KERNEL);
if (!drv_datap->base_img) {
err = -ENOMEM;
goto err2;
}
- strncpy(drv_datap->base_img, base_img, strlen(base_img) + 1);
}
dev_set_drvdata(bridge, drv_datap);
@@ -508,6 +507,7 @@ static int omap34_xx_bridge_probe(struct platform_device *pdev)
bridge_class = class_create(THIS_MODULE, "ti_bridge");
if (IS_ERR(bridge_class)) {
pr_err("%s: Error creating bridge class\n", __func__);
+ err = PTR_ERR(bridge_class);
goto err3;
}
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 83d629a..d8957a5 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -56,8 +56,8 @@ MODULE_DEVICE_TABLE(usb, stub_table);
* usbip_status shows the status of usbip-host as long as this driver is bound
* to the target device.
*/
-static ssize_t show_status(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t usbip_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct stub_device *sdev = dev_get_drvdata(dev);
int status;
@@ -73,7 +73,7 @@ static ssize_t show_status(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", status);
}
-static DEVICE_ATTR(usbip_status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR_RO(usbip_status);
/*
* usbip_sockfd gets a socket descriptor of an established TCP connection that
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 7b97df6..e3fc749 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -45,19 +45,20 @@ MODULE_PARM_DESC(usbip_debug_flag, "debug flags (defined in usbip_common.h)");
struct device_attribute dev_attr_usbip_debug;
EXPORT_SYMBOL_GPL(dev_attr_usbip_debug);
-static ssize_t show_flag(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t usbip_debug_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lx\n", usbip_debug_flag);
}
-static ssize_t store_flag(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t usbip_debug_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
sscanf(buf, "%lx", &usbip_debug_flag);
return count;
}
-DEVICE_ATTR(usbip_debug, (S_IRUGO | S_IWUSR), show_flag, store_flag);
+DEVICE_ATTR_RW(usbip_debug);
static void usbip_dump_buffer(char *buff, int bufflen)
{
diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c
index 82123be..64933b9 100644
--- a/drivers/staging/usbip/usbip_event.c
+++ b/drivers/staging/usbip/usbip_event.c
@@ -85,7 +85,7 @@ int usbip_start_eh(struct usbip_device *ud)
ud->eh = kthread_run(event_handler_loop, ud, "usbip_eh");
if (IS_ERR(ud->eh)) {
- pr_warning("Unable to start control thread\n");
+ pr_warn("Unable to start control thread\n");
return PTR_ERR(ud->eh);
}
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
index 25e62e9..1091bb2 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
@@ -230,7 +230,7 @@ static int refresh_class_device_list(void)
sysfs_close_list(cname_list);
- /* seach under /sys/block */
+ /* search under /sys/block */
ret = search_class_for_usbip_device(SYSFS_BLOCK_NAME);
if (ret < 0)
return -1;
diff --git a/drivers/staging/usbip/userspace/src/usbip.c b/drivers/staging/usbip/userspace/src/usbip.c
index fff4b76..04a5f20 100644
--- a/drivers/staging/usbip/userspace/src/usbip.c
+++ b/drivers/staging/usbip/userspace/src/usbip.c
@@ -26,6 +26,7 @@
#include <syslog.h>
#include "usbip_common.h"
+#include "usbip_network.h"
#include "usbip.h"
static int usbip_help(int argc, char *argv[]);
@@ -34,7 +35,7 @@ static int usbip_version(int argc, char *argv[]);
static const char usbip_version_string[] = PACKAGE_STRING;
static const char usbip_usage_string[] =
- "usbip [--debug] [--log] [version]\n"
+ "usbip [--debug] [--log] [--tcp-port PORT] [version]\n"
" [help] <command> <args>\n";
static void usbip_usage(void)
@@ -138,9 +139,10 @@ static int run_command(const struct command *cmd, int argc, char *argv[])
int main(int argc, char *argv[])
{
static const struct option opts[] = {
- { "debug", no_argument, NULL, 'd' },
- { "log", no_argument, NULL, 'l' },
- { NULL, 0, NULL, 0 }
+ { "debug", no_argument, NULL, 'd' },
+ { "log", no_argument, NULL, 'l' },
+ { "tcp-port", required_argument, NULL, 't' },
+ { NULL, 0, NULL, 0 }
};
char *cmd;
@@ -150,7 +152,7 @@ int main(int argc, char *argv[])
usbip_use_stderr = 1;
opterr = 0;
for (;;) {
- opt = getopt_long(argc, argv, "+d", opts, NULL);
+ opt = getopt_long(argc, argv, "+dlt:", opts, NULL);
if (opt == -1)
break;
@@ -163,6 +165,9 @@ int main(int argc, char *argv[])
usbip_use_syslog = 1;
openlog("", LOG_PID, LOG_USER);
break;
+ case 't':
+ usbip_setup_port_number(optarg);
+ break;
case '?':
printf("usbip: invalid option\n");
default:
diff --git a/drivers/staging/usbip/userspace/src/usbip_attach.c b/drivers/staging/usbip/userspace/src/usbip_attach.c
index 0ec16e5..0858411 100644
--- a/drivers/staging/usbip/userspace/src/usbip_attach.c
+++ b/drivers/staging/usbip/userspace/src/usbip_attach.c
@@ -144,7 +144,7 @@ static int query_import_device(int sockfd, char *busid)
return -1;
}
- /* recieve a reply */
+ /* receive a reply */
rc = usbip_net_recv_op_common(sockfd, &code);
if (rc < 0) {
err("recv op_common");
@@ -175,7 +175,7 @@ static int attach_device(char *host, char *busid)
int rc;
int rhport;
- sockfd = usbip_net_tcp_connect(host, USBIP_PORT_STRING);
+ sockfd = usbip_net_tcp_connect(host, usbip_port_string);
if (sockfd < 0) {
err("tcp connect");
return -1;
@@ -189,7 +189,7 @@ static int attach_device(char *host, char *busid)
close(sockfd);
- rc = record_connection(host, USBIP_PORT_STRING, busid, rhport);
+ rc = record_connection(host, usbip_port_string, busid, rhport);
if (rc < 0) {
err("record connection");
return -1;
diff --git a/drivers/staging/usbip/userspace/src/usbip_list.c b/drivers/staging/usbip/userspace/src/usbip_list.c
index ff56255..237e099 100644
--- a/drivers/staging/usbip/userspace/src/usbip_list.c
+++ b/drivers/staging/usbip/userspace/src/usbip_list.c
@@ -131,13 +131,13 @@ static int list_exported_devices(char *host)
int rc;
int sockfd;
- sockfd = usbip_net_tcp_connect(host, USBIP_PORT_STRING);
+ sockfd = usbip_net_tcp_connect(host, usbip_port_string);
if (sockfd < 0) {
err("could not connect to %s:%s: %s", host,
- USBIP_PORT_STRING, gai_strerror(sockfd));
+ usbip_port_string, gai_strerror(sockfd));
return -1;
}
- dbg("connected to %s:%s", host, USBIP_PORT_STRING);
+ dbg("connected to %s:%s", host, usbip_port_string);
rc = get_exported_devices(host, sockfd);
if (rc < 0) {
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.c b/drivers/staging/usbip/userspace/src/usbip_network.c
index b12448e..c39a07f 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.c
+++ b/drivers/staging/usbip/userspace/src/usbip_network.c
@@ -28,6 +28,36 @@
#include "usbip_common.h"
#include "usbip_network.h"
+int usbip_port = 3240;
+char *usbip_port_string = "3240";
+
+void usbip_setup_port_number(char *arg)
+{
+ dbg("parsing port arg '%s'", arg);
+ char *end;
+ unsigned long int port = strtoul(arg, &end, 10);
+
+ if (end == arg) {
+ err("port: could not parse '%s' as a decimal integer", arg);
+ return;
+ }
+
+ if (*end != '\0') {
+ err("port: garbage at end of '%s'", arg);
+ return;
+ }
+
+ if (port > UINT16_MAX) {
+ err("port: %s too high (max=%d)",
+ arg, UINT16_MAX);
+ return;
+ }
+
+ usbip_port = port;
+ usbip_port_string = arg;
+ info("using port %d (\"%s\")", usbip_port, usbip_port_string);
+}
+
void usbip_net_pack_uint32_t(int pack, uint32_t *num)
{
uint32_t i;
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.h b/drivers/staging/usbip/userspace/src/usbip_network.h
index 1bbefc9..2d0e427 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.h
+++ b/drivers/staging/usbip/userspace/src/usbip_network.h
@@ -14,8 +14,9 @@
#include <stdint.h>
-#define USBIP_PORT 3240
-#define USBIP_PORT_STRING "3240"
+extern int usbip_port;
+extern char *usbip_port_string;
+void usbip_setup_port_number(char *arg);
/* ---------------------------------------------------------------------- */
/* Common header for all the kinds of PDUs. */
diff --git a/drivers/staging/usbip/userspace/src/usbipd.c b/drivers/staging/usbip/userspace/src/usbipd.c
index 3e913b8..1c76cfd 100644
--- a/drivers/staging/usbip/userspace/src/usbipd.c
+++ b/drivers/staging/usbip/userspace/src/usbipd.c
@@ -50,21 +50,30 @@
#define MAIN_LOOP_TIMEOUT 10
+#define DEFAULT_PID_FILE "/var/run/" PROGNAME ".pid"
+
static const char usbip_version_string[] = PACKAGE_STRING;
static const char usbipd_help_string[] =
- "usage: usbipd [options] \n"
- " -D, --daemon \n"
- " Run as a daemon process. \n"
- " \n"
- " -d, --debug \n"
- " Print debugging information. \n"
- " \n"
- " -h, --help \n"
- " Print this help. \n"
- " \n"
- " -v, --version \n"
- " Show version. \n";
+ "usage: usbipd [options]\n"
+ " -D, --daemon\n"
+ " Run as a daemon process.\n"
+ "\n"
+ " -d, --debug\n"
+ " Print debugging information.\n"
+ "\n"
+ " -PFILE, --pid FILE\n"
+ " Write process id to FILE.\n"
+ " If no FILE specified, use " DEFAULT_PID_FILE "\n"
+ "\n"
+ " -tPORT, --tcp-port PORT\n"
+ " Listen on TCP/IP port PORT.\n"
+ "\n"
+ " -h, --help\n"
+ " Print this help.\n"
+ "\n"
+ " -v, --version\n"
+ " Show version.\n";
static void usbipd_help(void)
{
@@ -286,13 +295,13 @@ static int do_accept(int listenfd)
memset(&ss, 0, sizeof(ss));
- connfd = accept(listenfd, (struct sockaddr *) &ss, &len);
+ connfd = accept(listenfd, (struct sockaddr *)&ss, &len);
if (connfd < 0) {
err("failed to accept connection");
return -1;
}
- rc = getnameinfo((struct sockaddr *) &ss, len, host, sizeof(host),
+ rc = getnameinfo((struct sockaddr *)&ss, len, host, sizeof(host),
port, sizeof(port), NI_NUMERICHOST | NI_NUMERICSERV);
if (rc)
err("getnameinfo: %s", gai_strerror(rc));
@@ -328,56 +337,69 @@ int process_request(int listenfd)
return 0;
}
-static void log_addrinfo(struct addrinfo *ai)
+static void addrinfo_to_text(struct addrinfo *ai, char buf[],
+ const size_t buf_size)
{
char hbuf[NI_MAXHOST];
char sbuf[NI_MAXSERV];
int rc;
+ buf[0] = '\0';
+
rc = getnameinfo(ai->ai_addr, ai->ai_addrlen, hbuf, sizeof(hbuf),
sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV);
if (rc)
err("getnameinfo: %s", gai_strerror(rc));
- info("listening on %s:%s", hbuf, sbuf);
+ snprintf(buf, buf_size, "%s:%s", hbuf, sbuf);
}
static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[])
{
struct addrinfo *ai;
int ret, nsockfd = 0;
+ const size_t ai_buf_size = NI_MAXHOST + NI_MAXSERV + 2;
+ char ai_buf[ai_buf_size];
for (ai = ai_head; ai && nsockfd < MAXSOCKFD; ai = ai->ai_next) {
- sockfdlist[nsockfd] = socket(ai->ai_family, ai->ai_socktype,
- ai->ai_protocol);
- if (sockfdlist[nsockfd] < 0)
+ int sock;
+ addrinfo_to_text(ai, ai_buf, ai_buf_size);
+ dbg("opening %s", ai_buf);
+ sock = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
+ if (sock < 0) {
+ err("socket: %s: %d (%s)",
+ ai_buf, errno, strerror(errno));
continue;
+ }
- usbip_net_set_reuseaddr(sockfdlist[nsockfd]);
- usbip_net_set_nodelay(sockfdlist[nsockfd]);
+ usbip_net_set_reuseaddr(sock);
+ usbip_net_set_nodelay(sock);
- if (sockfdlist[nsockfd] >= FD_SETSIZE) {
- close(sockfdlist[nsockfd]);
- sockfdlist[nsockfd] = -1;
+ if (sock >= FD_SETSIZE) {
+ err("FD_SETSIZE: %s: sock=%d, max=%d",
+ ai_buf, sock, FD_SETSIZE);
+ close(sock);
continue;
}
- ret = bind(sockfdlist[nsockfd], ai->ai_addr, ai->ai_addrlen);
+ ret = bind(sock, ai->ai_addr, ai->ai_addrlen);
if (ret < 0) {
- close(sockfdlist[nsockfd]);
- sockfdlist[nsockfd] = -1;
+ err("bind: %s: %d (%s)",
+ ai_buf, errno, strerror(errno));
+ close(sock);
continue;
}
- ret = listen(sockfdlist[nsockfd], SOMAXCONN);
+ ret = listen(sock, SOMAXCONN);
if (ret < 0) {
- close(sockfdlist[nsockfd]);
- sockfdlist[nsockfd] = -1;
+ err("listen: %s: %d (%s)",
+ ai_buf, errno, strerror(errno));
+ close(sock);
continue;
}
- log_addrinfo(ai);
- nsockfd++;
+ info("listening on %s", ai_buf);
+ sockfdlist[nsockfd++] = sock;
}
if (nsockfd == 0)
@@ -398,9 +420,9 @@ static struct addrinfo *do_getaddrinfo(char *host, int ai_family)
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE;
- rc = getaddrinfo(host, USBIP_PORT_STRING, &hints, &ai_head);
+ rc = getaddrinfo(host, usbip_port_string, &hints, &ai_head);
if (rc) {
- err("failed to get a network address %s: %s", USBIP_PORT_STRING,
+ err("failed to get a network address %s: %s", usbip_port_string,
gai_strerror(rc));
return NULL;
}
@@ -426,6 +448,31 @@ static void set_signal(void)
sigaction(SIGCLD, &act, NULL);
}
+static const char *pid_file;
+
+static void write_pid_file()
+{
+ if (pid_file) {
+ dbg("creating pid file %s", pid_file);
+ FILE *fp = fopen(pid_file, "w");
+ if (!fp) {
+ err("pid_file: %s: %d (%s)",
+ pid_file, errno, strerror(errno));
+ return;
+ }
+ fprintf(fp, "%d\n", getpid());
+ fclose(fp);
+ }
+}
+
+static void remove_pid_file()
+{
+ if (pid_file) {
+ dbg("removing pid file %s", pid_file);
+ unlink(pid_file);
+ }
+}
+
static int do_standalone_mode(int daemonize)
{
struct addrinfo *ai_head;
@@ -452,6 +499,7 @@ static int do_standalone_mode(int daemonize)
usbip_use_syslog = 1;
}
set_signal();
+ write_pid_file();
ai_head = do_getaddrinfo(NULL, PF_UNSPEC);
if (!ai_head) {
@@ -496,8 +544,9 @@ static int do_standalone_mode(int daemonize)
process_request(sockfdlist[i]);
}
}
- } else
+ } else {
dbg("heartbeat timeout on ppoll()");
+ }
}
info("shutting down " PROGNAME);
@@ -511,11 +560,13 @@ static int do_standalone_mode(int daemonize)
int main(int argc, char *argv[])
{
static const struct option longopts[] = {
- { "daemon", no_argument, NULL, 'D' },
- { "debug", no_argument, NULL, 'd' },
- { "help", no_argument, NULL, 'h' },
- { "version", no_argument, NULL, 'v' },
- { NULL, 0, NULL, 0 }
+ { "daemon", no_argument, NULL, 'D' },
+ { "debug", no_argument, NULL, 'd' },
+ { "pid", optional_argument, NULL, 'P' },
+ { "tcp-port", required_argument, NULL, 't' },
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, 'v' },
+ { NULL, 0, NULL, 0 }
};
enum {
@@ -526,6 +577,7 @@ int main(int argc, char *argv[])
int daemonize = 0;
int opt, rc = -1;
+ pid_file = NULL;
usbip_use_stderr = 1;
usbip_use_syslog = 0;
@@ -535,7 +587,7 @@ int main(int argc, char *argv[])
cmd = cmd_standalone_mode;
for (;;) {
- opt = getopt_long(argc, argv, "Ddhv", longopts, NULL);
+ opt = getopt_long(argc, argv, "DdP::t:hv", longopts, NULL);
if (opt == -1)
break;
@@ -550,6 +602,12 @@ int main(int argc, char *argv[])
case 'h':
cmd = cmd_help;
break;
+ case 'P':
+ pid_file = optarg ? optarg : DEFAULT_PID_FILE;
+ break;
+ case 't':
+ usbip_setup_port_number(optarg);
+ break;
case 'v':
cmd = cmd_version;
break;
@@ -563,6 +621,7 @@ int main(int argc, char *argv[])
switch (cmd) {
case cmd_standalone_mode:
rc = do_standalone_mode(daemonize);
+ remove_pid_file();
break;
case cmd_version:
printf(PROGNAME " (%s)\n", usbip_version_string);
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index c66e9c0..9b51586 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -27,7 +27,7 @@
/* TODO: refine locking ?*/
/* Sysfs entry to show port status */
-static ssize_t show_status(struct device *dev, struct device_attribute *attr,
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *out)
{
char *s = out;
@@ -74,7 +74,7 @@ static ssize_t show_status(struct device *dev, struct device_attribute *attr,
return out - s;
}
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR_RO(status);
/* Sysfs entry to shutdown a virtual connection */
static int vhci_port_disconnect(__u32 rhport)
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index da7f759..daec155 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -109,7 +109,7 @@ struct driver_stats {
unsigned long ioctls;
unsigned long irqs;
unsigned long berrs;
- unsigned long dmaErrors;
+ unsigned long dmaerrors;
unsigned long timeouts;
unsigned long external;
};
@@ -160,7 +160,7 @@ static void reset_counters(void)
statistics.ioctls = 0;
statistics.irqs = 0;
statistics.berrs = 0;
- statistics.dmaErrors = 0;
+ statistics.dmaerrors = 0;
statistics.timeouts = 0;
}
@@ -734,6 +734,7 @@ static int vme_user_probe(struct vme_dev *vdev)
if (image[i].resource == NULL) {
dev_warn(&vdev->dev,
"Unable to allocate slave resource\n");
+ err = -ENOMEM;
goto err_slave;
}
image[i].size_buf = PCI_BUF_SIZE;
@@ -760,6 +761,7 @@ static int vme_user_probe(struct vme_dev *vdev)
if (image[i].resource == NULL) {
dev_warn(&vdev->dev,
"Unable to allocate master resource\n");
+ err = -ENOMEM;
goto err_master;
}
image[i].size_buf = PCI_BUF_SIZE;
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index 7d24cd6..280ccc7 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -14,9 +14,9 @@ struct vme_master {
u32 cycle; /* Cycle properties */
u32 dwidth; /* Maximum Data Width */
#if 0
- char prefetchEnable; /* Prefetch Read Enable State */
- int prefetchSize; /* Prefetch Read Size (Cache Lines) */
- char wrPostEnable; /* Write Post State */
+ char prefetchenable; /* Prefetch Read Enable State */
+ int prefetchsize; /* Prefetch Read Size (Cache Lines) */
+ char wrpostenable; /* Write Post State */
#endif
};
@@ -37,9 +37,9 @@ struct vme_slave {
u32 aspace; /* Address Space */
u32 cycle; /* Cycle properties */
#if 0
- char wrPostEnable; /* Write Post State */
- char rmwLock; /* Lock PCI during RMW Cycles */
- char data64BitCapable; /* non-VMEbus capable of 64-bit Data */
+ char wrpostenable; /* Write Post State */
+ char rmwlock; /* Lock PCI during RMW Cycles */
+ char data64bitcapable; /* non-VMEbus capable of 64-bit Data */
#endif
};
diff --git a/drivers/staging/vt6655/80211hdr.h b/drivers/staging/vt6655/80211hdr.h
index 28078a1..ba53340 100644
--- a/drivers/staging/vt6655/80211hdr.h
+++ b/drivers/staging/vt6655/80211hdr.h
@@ -68,7 +68,7 @@
#define BIT30 0x40000000
#define BIT31 0x80000000
-// 802.11 frame related, defined as 802.11 spec
+/* 802.11 frame related, defined as 802.11 spec */
#define WLAN_ADDR_LEN 6
#define WLAN_CRC_LEN 4
#define WLAN_CRC32_LEN 4
diff --git a/drivers/staging/vt6655/80211mgr.c b/drivers/staging/vt6655/80211mgr.c
index 4cb26f3..76c8490 100644
--- a/drivers/staging/vt6655/80211mgr.c
+++ b/drivers/staging/vt6655/80211mgr.c
@@ -66,7 +66,7 @@
/*--------------------- Static Variables --------------------------*/
static int msglevel = MSG_LEVEL_INFO;
-//static int msglevel =MSG_LEVEL_DEBUG;
+/* static int msglevel =MSG_LEVEL_DEBUG; */
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
@@ -90,7 +90,7 @@ vMgrEncodeBeacon(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_TS);
pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -123,7 +123,7 @@ vMgrDecodeBeacon(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_TS);
pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -131,7 +131,7 @@ vMgrDecodeBeacon(
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_BEACON_OFF_CAPINFO);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)((unsigned char *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)))
+ WLAN_BEACON_OFF_SSID);
while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
@@ -145,7 +145,7 @@ vMgrDecodeBeacon(
pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
break;
case WLAN_EID_FH_PARMS:
- //pFrame->pFHParms = (PWLAN_IE_FH_PARMS)pItem;
+ /* pFrame->pFHParms = (PWLAN_IE_FH_PARMS)pItem; */
break;
case WLAN_EID_DS_PARMS:
if (pFrame->pDSParms == NULL)
@@ -185,22 +185,22 @@ vMgrDecodeBeacon(
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
break;
- case WLAN_EID_COUNTRY: //7
+ case WLAN_EID_COUNTRY: /* 7 */
if (pFrame->pIE_Country == NULL)
pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
break;
- case WLAN_EID_PWR_CONSTRAINT: //32
+ case WLAN_EID_PWR_CONSTRAINT: /* 32 */
if (pFrame->pIE_PowerConstraint == NULL)
pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
break;
- case WLAN_EID_CH_SWITCH: //37
+ case WLAN_EID_CH_SWITCH: /* 37 */
if (pFrame->pIE_CHSW == NULL)
pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
break;
- case WLAN_EID_QUIET: //40
+ case WLAN_EID_QUIET: /* 40 */
if (pFrame->pIE_Quiet == NULL)
pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
break;
@@ -282,7 +282,7 @@ vMgrEncodeDisassociation(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON + sizeof(*(pFrame->pwReason));
@@ -308,7 +308,7 @@ vMgrDecodeDisassociation(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
@@ -332,7 +332,7 @@ vMgrEncodeAssocRequest(
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -360,13 +360,13 @@ vMgrDecodeAssocRequest(
PWLAN_IE pItem;
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_CAP_INFO);
pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_SSID);
@@ -425,7 +425,7 @@ vMgrEncodeAssocResponse(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -458,7 +458,7 @@ vMgrDecodeAssocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_CAP_INFO);
pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -466,7 +466,7 @@ vMgrDecodeAssocResponse(
pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_AID);
- // Information elements
+ /* Information elements */
pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCRESP_OFF_SUPP_RATES);
@@ -501,7 +501,7 @@ vMgrEncodeReassocRequest(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -532,7 +532,7 @@ vMgrDecodeReassocRequest(
PWLAN_IE pItem;
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CAP_INFO);
pFrame->pwListenInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -540,7 +540,7 @@ vMgrDecodeReassocRequest(
pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CURR_AP);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_SSID);
@@ -622,7 +622,7 @@ vMgrDecodeProbeRequest(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3)));
while (((unsigned char *)pItem) < (pFrame->pBuf + pFrame->len)) {
@@ -670,7 +670,7 @@ vMgrEncodeProbeResponse(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_TS);
pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -704,7 +704,7 @@ vMgrDecodeProbeResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pqwTimestamp = (PQWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_TS);
pFrame->pwBeaconInterval = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -712,7 +712,7 @@ vMgrDecodeProbeResponse(
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_CAP_INFO);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_PROBERESP_OFF_SSID);
@@ -761,22 +761,22 @@ vMgrDecodeProbeResponse(
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
break;
- case WLAN_EID_COUNTRY: //7
+ case WLAN_EID_COUNTRY: /* 7 */
if (pFrame->pIE_Country == NULL)
pFrame->pIE_Country = (PWLAN_IE_COUNTRY)pItem;
break;
- case WLAN_EID_PWR_CONSTRAINT: //32
+ case WLAN_EID_PWR_CONSTRAINT: /* 32 */
if (pFrame->pIE_PowerConstraint == NULL)
pFrame->pIE_PowerConstraint = (PWLAN_IE_PW_CONST)pItem;
break;
- case WLAN_EID_CH_SWITCH: //37
+ case WLAN_EID_CH_SWITCH: /* 37 */
if (pFrame->pIE_CHSW == NULL)
pFrame->pIE_CHSW = (PWLAN_IE_CH_SW)pItem;
break;
- case WLAN_EID_QUIET: //40
+ case WLAN_EID_QUIET: /* 40 */
if (pFrame->pIE_Quiet == NULL)
pFrame->pIE_Quiet = (PWLAN_IE_QUIET)pItem;
break;
@@ -814,7 +814,7 @@ vMgrEncodeAuthen(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwAuthAlgorithm = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_AUTH_ALG);
pFrame->pwAuthSequence = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -846,7 +846,7 @@ vMgrDecodeAuthen(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwAuthAlgorithm = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_AUTH_ALG);
pFrame->pwAuthSequence = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -854,7 +854,7 @@ vMgrDecodeAuthen(
pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_STATUS);
- // Information elements
+ /* Information elements */
pItem = (PWLAN_IE)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_CHALLENGE);
@@ -883,7 +883,7 @@ vMgrEncodeDeauthen(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON + sizeof(*(pFrame->pwReason));
@@ -909,7 +909,7 @@ vMgrDecodeDeauthen(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwReason = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
@@ -934,7 +934,7 @@ vMgrEncodeReassocResponse(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -967,7 +967,7 @@ vMgrDecodeReassocResponse(
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
- // Fixed Fields
+ /* Fixed Fields */
pFrame->pwCapInfo = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_CAP_INFO);
pFrame->pwStatus = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
@@ -975,7 +975,7 @@ vMgrDecodeReassocResponse(
pFrame->pwAid = (unsigned short *)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_AID);
- //Information elements
+ /* Information elements */
pFrame->pSuppRates = (PWLAN_IE_SUPP_RATES)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCRESP_OFF_SUPP_RATES);
diff --git a/drivers/staging/vt6655/80211mgr.h b/drivers/staging/vt6655/80211mgr.h
index 16402cf..065238b 100644
--- a/drivers/staging/vt6655/80211mgr.h
+++ b/drivers/staging/vt6655/80211mgr.h
@@ -38,7 +38,7 @@
#define WLAN_MIN_ARRAY 1
-// Information Element ID value
+/* Information Element ID value */
#define WLAN_EID_SSID 0
#define WLAN_EID_SUPP_RATES 1
#define WLAN_EID_FH_PARMS 2
@@ -59,17 +59,17 @@
#define WLAN_EID_QUIET 40
#define WLAN_EID_IBSS_DFS 41
#define WLAN_EID_ERP 42
-// reference 802.11i 7.3.2 table 20
+/* reference 802.11i 7.3.2 table 20 */
#define WLAN_EID_RSN 48
#define WLAN_EID_EXTSUPP_RATES 50
-// reference WiFi WPA spec.
+/* reference WiFi WPA spec. */
#define WLAN_EID_RSN_WPA 221
#define WLAN_EID_ERP_NONERP_PRESENT 0x01
#define WLAN_EID_ERP_USE_PROTECTION 0x02
#define WLAN_EID_ERP_BARKER_MODE 0x04
-// Reason Codes
+/* Reason Codes */
#define WLAN_MGMT_REASON_RSVD 0
#define WLAN_MGMT_REASON_UNSPEC 1
#define WLAN_MGMT_REASON_PRIOR_AUTH_INVALID 2
@@ -94,7 +94,7 @@
#define WLAN_MGMT_REASON_RSNE_CAP_INVALID 22
#define WLAN_MGMT_REASON_80211X_AUTH_FAILED 23
-// Status Codes
+/* Status Codes */
#define WLAN_MGMT_STATUS_SUCCESS 0
#define WLAN_MGMT_STATUS_UNSPEC_FAILURE 1
#define WLAN_MGMT_STATUS_CAPS_UNSUPPORTED 10
@@ -110,19 +110,14 @@
#define WLAN_MGMT_STATUS_ASSOC_DENIED_PBCC 20
#define WLAN_MGMT_STATUS_ASSOC_DENIED_AGILITY 21
-// reference 802.11h 7.3.1.9
-//
+/* reference 802.11h 7.3.1.9 */
#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_SPECTRUM_MNG 22
#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_PWR_CAP 23
#define WLAN_MGMT_STATUS_ASSOC_REJECT_BCS_SUPP_CH 24
-//
-// reference 802.11g 7.3.1.9
-//
+/* reference 802.11g 7.3.1.9 */
#define WLAN_MGMT_STATUS_SHORTSLOTTIME_UNSUPPORTED 25
#define WLAN_MGMT_STATUS_DSSSOFDM_UNSUPPORTED 26
-//
-// reference 802.11i 7.3.1.9 table 19
-//
+/* reference 802.11i 3.7.1.9 table 19 */
#define WLAN_MGMT_STATUS_INVALID_IE 40
#define WLAN_MGMT_STATUS_GROUP_CIPHER_INVALID 41
#define WLAN_MGMT_STATUS_PAIRWISE_CIPHER_INVALID 42
@@ -131,13 +126,13 @@
#define WLAN_MGMT_STATUS_INVALID_RSN_IE_CAP 45
#define WLAN_MGMT_STATUS_CIPHER_REJECT 46
-// Auth Algorithm
+/* Auth Algorithm */
#define WLAN_AUTH_ALG_OPENSYSTEM 0
#define WLAN_AUTH_ALG_SHAREDKEY 1
-// Management Frame Field Offsets
-// Note: Not all fields are listed because of variable lengths.
-// Note: These offsets are from the start of the frame data
+/* Management Frame Field Offsets */
+/* Note: Not all fields are listed because of variable lengths. */
+/* Note: These offsets are from the start of the frame data */
#define WLAN_BEACON_OFF_TS 0
#define WLAN_BEACON_OFF_BCN_INT 8
@@ -179,9 +174,7 @@
#define WLAN_DEAUTHEN_OFF_REASON 0
-//
-// Cipher Suite Selectors defined in 802.11i
-//
+/* Cipher Suite Selectors defined in 802.11i */
#define WLAN_11i_CSS_USE_GROUP 0
#define WLAN_11i_CSS_WEP40 1
#define WLAN_11i_CSS_TKIP 2
@@ -189,24 +182,22 @@
#define WLAN_11i_CSS_WEP104 5
#define WLAN_11i_CSS_UNKNOWN 255
-//
-// Authentication and Key Management Suite Selectors defined in 802.11i
-//
+/* Authentication and Key Management Suite Selectors defined in 802.11i */
#define WLAN_11i_AKMSS_802_1X 1
#define WLAN_11i_AKMSS_PSK 2
#define WLAN_11i_AKMSS_UNKNOWN 255
-// Measurement type definitions reference ieee 802.11h Table 20b
+/* Measurement type definitions reference ieee 802.11h Table 20b */
#define MEASURE_TYPE_BASIC 0
#define MEASURE_TYPE_CCA 1
#define MEASURE_TYPE_RPI 2
-// Measurement request mode definitions reference ieee 802.11h Figure 46h
+/* Measurement request mode definitions reference ieee 802.11h Figure 46h */
#define MEASURE_MODE_ENABLE 0x02
#define MEASURE_MODE_REQ 0x04
#define MEASURE_MODE_REP 0x08
-// Measurement report mode definitions reference ieee 802.11h Figure 46m
+/* Measurement report mode definitions reference ieee 802.11h Figure 46m */
#define MEASURE_MODE_LATE 0x01
#define MEASURE_MODE_INCAPABLE 0x02
#define MEASURE_MODE_REFUSED 0x04
@@ -217,7 +208,7 @@
/*--------------------- Export Types ------------------------------*/
-// Information Element Types
+/* Information Element Types */
#pragma pack(1)
typedef struct tagWLAN_IE {
@@ -226,7 +217,7 @@ typedef struct tagWLAN_IE {
} __attribute__ ((__packed__))
WLAN_IE, *PWLAN_IE;
-// Service Set Identity (SSID)
+/* Service Set Identity (SSID) */
#pragma pack(1)
typedef struct tagWLAN_IE_SSID {
unsigned char byElementID;
@@ -235,7 +226,7 @@ typedef struct tagWLAN_IE_SSID {
} __attribute__ ((__packed__))
WLAN_IE_SSID, *PWLAN_IE_SSID;
-// Supported Rates
+/* Supported Rates */
#pragma pack(1)
typedef struct tagWLAN_IE_SUPP_RATES {
unsigned char byElementID;
@@ -244,7 +235,7 @@ typedef struct tagWLAN_IE_SUPP_RATES {
} __attribute__ ((__packed__))
WLAN_IE_SUPP_RATES, *PWLAN_IE_SUPP_RATES;
-// FH Parameter Set
+/* FH Parameter Set */
#pragma pack(1)
typedef struct _WLAN_IE_FH_PARMS {
unsigned char byElementID;
@@ -255,7 +246,7 @@ typedef struct _WLAN_IE_FH_PARMS {
unsigned char byHopIndex;
} WLAN_IE_FH_PARMS, *PWLAN_IE_FH_PARMS;
-// DS Parameter Set
+/* DS Parameter Set */
#pragma pack(1)
typedef struct tagWLAN_IE_DS_PARMS {
unsigned char byElementID;
@@ -264,7 +255,7 @@ typedef struct tagWLAN_IE_DS_PARMS {
} __attribute__ ((__packed__))
WLAN_IE_DS_PARMS, *PWLAN_IE_DS_PARMS;
-// CF Parameter Set
+/* CF Parameter Set */
#pragma pack(1)
typedef struct tagWLAN_IE_CF_PARMS {
unsigned char byElementID;
@@ -276,7 +267,7 @@ typedef struct tagWLAN_IE_CF_PARMS {
} __attribute__ ((__packed__))
WLAN_IE_CF_PARMS, *PWLAN_IE_CF_PARMS;
-// TIM
+/* TIM */
#pragma pack(1)
typedef struct tagWLAN_IE_TIM {
unsigned char byElementID;
@@ -288,7 +279,7 @@ typedef struct tagWLAN_IE_TIM {
} __attribute__ ((__packed__))
WLAN_IE_TIM, *PWLAN_IE_TIM;
-// IBSS Parameter Set
+/* IBSS Parameter Set */
#pragma pack(1)
typedef struct tagWLAN_IE_IBSS_PARMS {
unsigned char byElementID;
@@ -297,7 +288,7 @@ typedef struct tagWLAN_IE_IBSS_PARMS {
} __attribute__ ((__packed__))
WLAN_IE_IBSS_PARMS, *PWLAN_IE_IBSS_PARMS;
-// Challenge Text
+/* Challenge Text */
#pragma pack(1)
typedef struct tagWLAN_IE_CHALLENGE {
unsigned char byElementID;
@@ -316,8 +307,8 @@ typedef struct tagWLAN_IE_RSN_EXT {
unsigned short wPKCount;
struct {
unsigned char abyOUI[4];
- } PKSList[1]; // the rest is variable so need to
- // overlay ieauth structure
+ } PKSList[1]; /* the rest is variable so need to */
+ /* overlay ieauth structure */
} WLAN_IE_RSN_EXT, *PWLAN_IE_RSN_EXT;
#pragma pack(1)
@@ -328,7 +319,7 @@ typedef struct tagWLAN_IE_RSN_AUTH {
} AuthKSList[1];
} WLAN_IE_RSN_AUTH, *PWLAN_IE_RSN_AUTH;
-// RSN Identity
+/* RSN Identity */
#pragma pack(1)
typedef struct tagWLAN_IE_RSN {
unsigned char byElementID;
@@ -337,7 +328,7 @@ typedef struct tagWLAN_IE_RSN {
unsigned char abyRSN[WLAN_MIN_ARRAY];
} WLAN_IE_RSN, *PWLAN_IE_RSN;
-// ERP
+/* ERP */
#pragma pack(1)
typedef struct tagWLAN_IE_ERP {
unsigned char byElementID;
@@ -466,8 +457,8 @@ typedef struct _WLAN_IE_IBSS_DFS {
#pragma pack()
-// Frame Types
-// prototype structure, all mgmt frame types will start with these members
+/* Frame Types */
+/* prototype structure, all mgmt frame types will start with these members */
typedef struct tagWLAN_FR_MGMT {
unsigned int uType;
unsigned int len;
@@ -475,20 +466,20 @@ typedef struct tagWLAN_FR_MGMT {
PUWLAN_80211HDR pHdr;
} WLAN_FR_MGMT, *PWLAN_FR_MGMT;
-// Beacon frame
+/* Beacon frame */
typedef struct tagWLAN_FR_BEACON {
unsigned int uType;
unsigned int len;
unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
- // fixed fields
+ /* fixed fields */
PQWORD pqwTimestamp;
unsigned short *pwBeaconInterval;
unsigned short *pwCapInfo;
/*-- info elements ----------*/
PWLAN_IE_SSID pSSID;
PWLAN_IE_SUPP_RATES pSuppRates;
-// PWLAN_IE_FH_PARMS pFHParms;
+/* PWLAN_IE_FH_PARMS pFHParms; */
PWLAN_IE_DS_PARMS pDSParms;
PWLAN_IE_CF_PARMS pCFParms;
PWLAN_IE_TIM pTIM;
@@ -504,19 +495,19 @@ typedef struct tagWLAN_FR_BEACON {
PWLAN_IE_QUIET pIE_Quiet;
} WLAN_FR_BEACON, *PWLAN_FR_BEACON;
-// IBSS ATIM frame
+/* IBSS ATIM frame */
typedef struct tagWLAN_FR_IBSSATIM {
unsigned int uType;
unsigned int len;
unsigned char *pBuf;
PUWLAN_80211HDR pHdr;
- // fixed fields
- // info elements
- // this frame type has a null body
+ /* fixed fields */
+ /* info elements */
+ /* this frame type has a null body */
} WLAN_FR_IBSSATIM, *PWLAN_FR_IBSSATIM;
-// Disassociation
+/* Disassociation */
typedef struct tagWLAN_FR_DISASSOC {
unsigned int uType;
unsigned int len;
@@ -527,7 +518,7 @@ typedef struct tagWLAN_FR_DISASSOC {
/*-- info elements ----------*/
} WLAN_FR_DISASSOC, *PWLAN_FR_DISASSOC;
-// Association Request
+/* Association Request */
typedef struct tagWLAN_FR_ASSOCREQ {
unsigned int uType;
unsigned int len;
@@ -546,7 +537,7 @@ typedef struct tagWLAN_FR_ASSOCREQ {
PWLAN_IE_SUPP_CH pCurrSuppCh;
} WLAN_FR_ASSOCREQ, *PWLAN_FR_ASSOCREQ;
-// Association Response
+/* Association Response */
typedef struct tagWLAN_FR_ASSOCRESP {
unsigned int uType;
unsigned int len;
@@ -561,7 +552,7 @@ typedef struct tagWLAN_FR_ASSOCRESP {
PWLAN_IE_SUPP_RATES pExtSuppRates;
} WLAN_FR_ASSOCRESP, *PWLAN_FR_ASSOCRESP;
-// Reassociation Request
+/* Reassociation Request */
typedef struct tagWLAN_FR_REASSOCREQ {
unsigned int uType;
unsigned int len;
@@ -581,7 +572,7 @@ typedef struct tagWLAN_FR_REASSOCREQ {
PWLAN_IE_SUPP_RATES pExtSuppRates;
} WLAN_FR_REASSOCREQ, *PWLAN_FR_REASSOCREQ;
-// Reassociation Response
+/* Reassociation Response */
typedef struct tagWLAN_FR_REASSOCRESP {
unsigned int uType;
unsigned int len;
@@ -596,7 +587,7 @@ typedef struct tagWLAN_FR_REASSOCRESP {
PWLAN_IE_SUPP_RATES pExtSuppRates;
} WLAN_FR_REASSOCRESP, *PWLAN_FR_REASSOCRESP;
-// Probe Request
+/* Probe Request */
typedef struct tagWLAN_FR_PROBEREQ {
unsigned int uType;
unsigned int len;
@@ -609,7 +600,7 @@ typedef struct tagWLAN_FR_PROBEREQ {
PWLAN_IE_SUPP_RATES pExtSuppRates;
} WLAN_FR_PROBEREQ, *PWLAN_FR_PROBEREQ;
-// Probe Response
+/* Probe Response */
typedef struct tagWLAN_FR_PROBERESP {
unsigned int uType;
unsigned int len;
@@ -636,7 +627,7 @@ typedef struct tagWLAN_FR_PROBERESP {
PWLAN_IE_QUIET pIE_Quiet;
} WLAN_FR_PROBERESP, *PWLAN_FR_PROBERESP;
-// Authentication
+/* Authentication */
typedef struct tagWLAN_FR_AUTHEN {
unsigned int uType;
unsigned int len;
@@ -650,7 +641,7 @@ typedef struct tagWLAN_FR_AUTHEN {
PWLAN_IE_CHALLENGE pChallenge;
} WLAN_FR_AUTHEN, *PWLAN_FR_AUTHEN;
-// Deauthenication
+/* Deauthenication */
typedef struct tagWLAN_FR_DEAUTHEN {
unsigned int uType;
unsigned int len;
@@ -774,4 +765,4 @@ vMgrDecodeReassocResponse(
PWLAN_FR_REASSOCRESP pFrame
);
-#endif// __80211MGR_H__
+#endif/* __80211MGR_H__ */
diff --git a/drivers/staging/vt6655/aes_ccmp.c b/drivers/staging/vt6655/aes_ccmp.c
index 3608148..fc056fc 100644
--- a/drivers/staging/vt6655/aes_ccmp.c
+++ b/drivers/staging/vt6655/aes_ccmp.c
@@ -205,7 +205,7 @@ void AESv128(unsigned char *key, unsigned char *data, unsigned char *ciphertext)
SubBytes(ciphertext, TmpdataA);
ShiftRows(TmpdataA, TmpdataB);
xor_128(TmpdataB, abyRoundKey, ciphertext);
- } else // round 1 ~ 9
+ } else /* round 1 ~ 9 */
{
SubBytes(ciphertext, TmpdataA);
ShiftRows(TmpdataA, TmpdataB);
@@ -249,7 +249,7 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
unsigned char *pbyIV;
unsigned char *pbyPayload;
unsigned short wHLen = 22;
- unsigned short wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;//8 is IV, 8 is MIC, 4 is CRC
+ unsigned short wPayloadSize = wFrameSize - 8 - 8 - 4 - WLAN_HDR_ADDR3_LEN;/* 8 is IV, 8 is MIC, 4 is CRC */
bool bA4 = false;
unsigned char byTmp;
unsigned short wCnt;
@@ -259,13 +259,13 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
if (WLAN_GET_FC_TODS(*(unsigned short *)pbyFrame) &&
WLAN_GET_FC_FROMDS(*(unsigned short *)pbyFrame)) {
bA4 = true;
- pbyIV += 6; // 6 is 802.11 address4
+ pbyIV += 6; /* 6 is 802.11 address4 */
wHLen += 6;
wPayloadSize -= 6;
}
- pbyPayload = pbyIV + 8; //IV-length
+ pbyPayload = pbyIV + 8; /* IV-length */
- abyNonce[0] = 0x00; //now is 0, if Qos here will be priority
+ abyNonce[0] = 0x00; /* now is 0, if Qos here will be priority */
memcpy(&(abyNonce[1]), pMACHeader->abyAddr2, ETH_ALEN);
abyNonce[7] = pbyIV[7];
abyNonce[8] = pbyIV[6];
@@ -274,13 +274,13 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
abyNonce[11] = pbyIV[1];
abyNonce[12] = pbyIV[0];
- //MIC_IV
+ /* MIC_IV */
MIC_IV[0] = 0x59;
memcpy(&(MIC_IV[1]), &(abyNonce[0]), 13);
MIC_IV[14] = (unsigned char)(wPayloadSize >> 8);
MIC_IV[15] = (unsigned char)(wPayloadSize & 0xff);
- //MIC_HDR1
+ /* MIC_HDR1 */
MIC_HDR1[0] = (unsigned char)(wHLen >> 8);
MIC_HDR1[1] = (unsigned char)(wHLen & 0xff);
byTmp = (unsigned char)(pMACHeader->wFrameCtl & 0xff);
@@ -291,7 +291,7 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
memcpy(&(MIC_HDR1[4]), pMACHeader->abyAddr1, ETH_ALEN);
memcpy(&(MIC_HDR1[10]), pMACHeader->abyAddr2, ETH_ALEN);
- //MIC_HDR2
+ /* MIC_HDR2 */
memcpy(&(MIC_HDR2[0]), pMACHeader->abyAddr3, ETH_ALEN);
byTmp = (unsigned char)(pMACHeader->wSeqCtl & 0xff);
MIC_HDR2[6] = byTmp & 0x0f;
@@ -309,7 +309,7 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
MIC_HDR2[14] = 0x00;
MIC_HDR2[15] = 0x00;
- //CCMP
+ /* CCMP */
AESv128(pbyRxKey, MIC_IV, abyMIC);
for (kk = 0; kk < 16; kk++) {
abyTmp[kk] = MIC_HDR1[kk] ^ abyMIC[kk];
@@ -341,9 +341,9 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
memcpy(pbyPayload, abyPlainText, 16);
wCnt++;
pbyPayload += 16;
- } //for wPayloadSize
+ } /* for wPayloadSize */
- //last payload
+ /* last payload */
memcpy(&(abyLastCipher[0]), pbyPayload, jj);
for (ii = jj; ii < 16; ii++) {
abyLastCipher[ii] = 0x00;
@@ -359,7 +359,7 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
memcpy(pbyPayload, abyPlainText, jj);
pbyPayload += jj;
- //for MIC calculation
+ /* for MIC calculation */
for (ii = jj; ii < 16; ii++) {
abyPlainText[ii] = 0x00;
}
@@ -368,8 +368,8 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
}
AESv128(pbyRxKey, abyTmp, abyMIC);
- //=>above is the calculate MIC
- //--------------------------------------------
+ /* =>above is the calculate MIC */
+ /* -------------------------------------------- */
wCnt = 0;
abyCTRPLD[14] = (unsigned char)(wCnt >> 8);
@@ -378,12 +378,11 @@ bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned shor
for (kk = 0; kk < 8; kk++) {
abyTmp[kk] = abyTmp[kk] ^ pbyPayload[kk];
}
- //=>above is the dec-MIC from packet
- //--------------------------------------------
+ /* =>above is the dec-MIC from packet */
+ /* -------------------------------------------- */
- if (!memcmp(abyMIC, abyTmp, 8)) {
+ if (!memcmp(abyMIC, abyTmp, 8))
return true;
- } else {
+ else
return false;
- }
}
diff --git a/drivers/staging/vt6655/aes_ccmp.h b/drivers/staging/vt6655/aes_ccmp.h
index c8b28b0..cc02e64 100644
--- a/drivers/staging/vt6655/aes_ccmp.h
+++ b/drivers/staging/vt6655/aes_ccmp.h
@@ -43,4 +43,4 @@
/*--------------------- Export Functions --------------------------*/
bool AESbGenCCMP(unsigned char *pbyRxKey, unsigned char *pbyFrame, unsigned short wFrameSize);
-#endif //__AES_H__
+#endif /* __AES_H__ */
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 08b250f..7f36a71 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3370,8 +3370,8 @@ viawget_resume(struct pci_dev *pcid)
PSMgmtObject pMgmt = pDevice->pMgmt;
int power_status; // to silence the compiler
- power_status = pci_set_power_state(pcid, 0);
- power_status = pci_enable_wake(pcid, 0, 0);
+ power_status = pci_set_power_state(pcid, PCI_D0);
+ power_status = pci_enable_wake(pcid, PCI_D0, 0);
pci_restore_state(pcid);
if (netif_running(pDevice->dev)) {
spin_lock_irq(&pDevice->lock);
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index 8417c2f..8acff44 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -80,13 +80,13 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
- pDevice->apdev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
+ pDevice->apdev = alloc_etherdev(sizeof(*apdev_priv));
if (pDevice->apdev == NULL)
return -ENOMEM;
apdev_priv = netdev_priv(pDevice->apdev);
*apdev_priv = *pDevice;
- memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(pDevice->apdev, dev);
pDevice->apdev->netdev_ops = &apdev_netdev_ops;
@@ -104,6 +104,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
if (ret) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdevice(AP) failed!\n",
dev->name);
+ free_netdev(pDevice->apdev);
+ pDevice->apdev = NULL;
return -1;
}
@@ -141,7 +143,7 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
pDevice->dev->name, pDevice->apdev->name);
}
- kfree(pDevice->apdev);
+ free_netdev(pDevice->apdev);
pDevice->apdev = NULL;
pDevice->bEnable8021x = false;
pDevice->bEnableHostWEP = false;
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 2ae8116..b5cd2e4 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -64,7 +64,6 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
PKnownBSS pBSS;
PKnownNodeDB pNode;
unsigned int ii, jj;
- SCmdLinkStatus sLinkStatus;
unsigned char abySuppRates[] = {WLAN_EID_SUPP_RATES, 4, 0x02, 0x04, 0x0B, 0x16};
unsigned char abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
unsigned long dwKeyIndex = 0;
@@ -245,10 +244,12 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
break;
- case WLAN_CMD_GET_LINK:
+ case WLAN_CMD_GET_LINK: {
+ SCmdLinkStatus sLinkStatus;
+
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_GET_LINK status.\n");
- memset(sLinkStatus.abySSID, 0 , WLAN_SSID_MAXLEN + 1);
+ memset(&sLinkStatus, 0, sizeof(sLinkStatus));
if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA)
sLinkStatus.wBSSType = ADHOC;
@@ -277,7 +278,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
break;
}
break;
-
+ }
case WLAN_CMD_GET_LISTLEN:
cbListCount = 0;
pBSS = &(pMgmt->sBSSList[0]);
@@ -459,7 +460,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
}
if (sValue.dwValue == 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
- memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(pDevice->wpadev, pDevice->dev);
pDevice->bWPADEVUp = true;
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 869f62c..e8d9ecd 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -96,7 +96,7 @@ static int wpa_init_wpadev(PSDevice pDevice)
wpadev_priv = netdev_priv(pDevice->wpadev);
*wpadev_priv = *pDevice;
- memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(pDevice->wpadev, dev);
pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 33fa767..1e8b841 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -680,7 +680,6 @@ BBuGetFrameTime(
unsigned int uRate = 0;
if (uRateIdx > RATE_54M) {
- ASSERT(0);
return 0;
}
@@ -724,16 +723,16 @@ BBuGetFrameTime(
* cbFrameLength - Tx Frame Length
* wRate - Tx Rate
* Out:
- * pwPhyLen - pointer to Phy Length field
- * pbyPhySrv - pointer to Phy Service field
- * pbyPhySgn - pointer to Phy Signal field
+ * struct vnt_phy_field *phy
+ * - pointer to Phy Length field
+ * - pointer to Phy Service field
+ * - pointer to Phy Signal field
*
* Return Value: none
*
*/
void BBvCalculateParameter(struct vnt_private *pDevice, u32 cbFrameLength,
- u16 wRate, u8 byPacketType, u16 *pwPhyLen, u8 *pbyPhySrv,
- u8 *pbyPhySgn)
+ u16 wRate, u8 byPacketType, struct vnt_phy_field *phy)
{
u32 cbBitCount;
u32 cbUsCount = 0;
@@ -748,15 +747,15 @@ void BBvCalculateParameter(struct vnt_private *pDevice, u32 cbFrameLength,
switch (wRate) {
case RATE_1M :
cbUsCount = cbBitCount;
- *pbyPhySgn = 0x00;
+ phy->signal = 0x00;
break;
case RATE_2M :
cbUsCount = cbBitCount / 2;
if (byPreambleType == 1)
- *pbyPhySgn = 0x09;
+ phy->signal = 0x09;
else // long preamble
- *pbyPhySgn = 0x01;
+ phy->signal = 0x01;
break;
case RATE_5M :
@@ -767,9 +766,9 @@ void BBvCalculateParameter(struct vnt_private *pDevice, u32 cbFrameLength,
if (cbTmp != cbBitCount)
cbUsCount ++;
if (byPreambleType == 1)
- *pbyPhySgn = 0x0a;
+ phy->signal = 0x0a;
else // long preamble
- *pbyPhySgn = 0x02;
+ phy->signal = 0x02;
break;
case RATE_11M :
@@ -784,103 +783,102 @@ void BBvCalculateParameter(struct vnt_private *pDevice, u32 cbFrameLength,
bExtBit = true;
}
if (byPreambleType == 1)
- *pbyPhySgn = 0x0b;
+ phy->signal = 0x0b;
else // long preamble
- *pbyPhySgn = 0x03;
+ phy->signal = 0x03;
break;
case RATE_6M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9B; //1001 1011
+ phy->signal = 0x9b;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8B; //1000 1011
+ phy->signal = 0x8b;
}
break;
case RATE_9M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9F; //1001 1111
+ phy->signal = 0x9f;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8F; //1000 1111
+ phy->signal = 0x8f;
}
break;
case RATE_12M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9A; //1001 1010
+ phy->signal = 0x9a;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8A; //1000 1010
+ phy->signal = 0x8a;
}
break;
case RATE_18M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9E; //1001 1110
+ phy->signal = 0x9e;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8E; //1000 1110
+ phy->signal = 0x8e;
}
break;
case RATE_24M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x99; //1001 1001
+ phy->signal = 0x99;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x89; //1000 1001
+ phy->signal = 0x89;
}
break;
case RATE_36M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9D; //1001 1101
+ phy->signal = 0x9d;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8D; //1000 1101
+ phy->signal = 0x8d;
}
break;
case RATE_48M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x98; //1001 1000
+ phy->signal = 0x98;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x88; //1000 1000
+ phy->signal = 0x88;
}
break;
case RATE_54M :
if (byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9C; //1001 1100
+ phy->signal = 0x9c;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8C; //1000 1100
+ phy->signal = 0x8c;
}
break;
default :
if (byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9C; //1001 1100
+ phy->signal = 0x9c;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8C; //1000 1100
+ phy->signal = 0x8c;
}
break;
}
- if (byPacketType == PK_TYPE_11B) {
- *pbyPhySrv = 0x00;
- if (bExtBit)
- *pbyPhySrv = *pbyPhySrv | 0x80;
- *pwPhyLen = (u16) cbUsCount;
- }
- else {
- *pbyPhySrv = 0x00;
- *pwPhyLen = (u16)cbFrameLength;
- }
+ if (byPacketType == PK_TYPE_11B) {
+ phy->service = 0x00;
+ if (bExtBit)
+ phy->service |= 0x80;
+ phy->len = cpu_to_le16((u16)cbUsCount);
+ } else {
+ phy->service = 0x00;
+ phy->len = cpu_to_le16((u16)cbFrameLength);
+ }
}
/*
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index 0a634ad..79faedf4 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -81,6 +81,13 @@
#define TOP_RATE_2M 0x00200000
#define TOP_RATE_1M 0x00100000
+/* Length, Service, and Signal fields of Phy for Tx */
+struct vnt_phy_field {
+ u8 signal;
+ u8 service;
+ __le16 len;
+} __packed;
+
unsigned int
BBuGetFrameTime(
u8 byPreambleType,
@@ -90,8 +97,7 @@ BBuGetFrameTime(
);
void BBvCalculateParameter(struct vnt_private *, u32 cbFrameLength,
- u16 wRate, u8 byPacketType, u16 *pwPhyLen, u8 *pbyPhySrv,
- u8 *pbyPhySgn);
+ u16 wRate, u8 byPacketType, struct vnt_phy_field *);
/* timer for antenna diversity */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 24291ae..19d3cf4 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -172,8 +172,8 @@ static u16 swGetOFDMControlRate(struct vnt_private *pDevice, u16 wRateIdx)
if (!CARDbIsOFDMinBasicRate(pDevice)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"swGetOFDMControlRate:(NO OFDM) %d\n", wRateIdx);
- if (wRateIdx > RATE_24M)
- wRateIdx = RATE_24M;
+ if (wRateIdx > RATE_24M)
+ wRateIdx = RATE_24M;
return wRateIdx;
}
@@ -319,53 +319,27 @@ CARDvCalculateOFDMRParameter (
*/
void CARDvSetRSPINF(struct vnt_private *pDevice, u8 byBBType)
{
- u8 abyServ[4] = {0, 0, 0, 0}; /* For CCK */
- u8 abySignal[4] = {0, 0, 0, 0};
- u16 awLen[4] = {0, 0, 0, 0};
+ struct vnt_phy_field phy[4];
u8 abyTxRate[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; /* For OFDM */
u8 abyRsvTime[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
u8 abyData[34];
int i;
//RSPINF_b_1
- BBvCalculateParameter(pDevice,
- 14,
- swGetCCKControlRate(pDevice, RATE_1M),
- PK_TYPE_11B,
- &awLen[0],
- &abyServ[0],
- &abySignal[0]
- );
+ BBvCalculateParameter(pDevice, 14,
+ swGetCCKControlRate(pDevice, RATE_1M), PK_TYPE_11B, &phy[0]);
///RSPINF_b_2
- BBvCalculateParameter(pDevice,
- 14,
- swGetCCKControlRate(pDevice, RATE_2M),
- PK_TYPE_11B,
- &awLen[1],
- &abyServ[1],
- &abySignal[1]
- );
+ BBvCalculateParameter(pDevice, 14,
+ swGetCCKControlRate(pDevice, RATE_2M), PK_TYPE_11B, &phy[1]);
//RSPINF_b_5
- BBvCalculateParameter(pDevice,
- 14,
- swGetCCKControlRate(pDevice, RATE_5M),
- PK_TYPE_11B,
- &awLen[2],
- &abyServ[2],
- &abySignal[2]
- );
+ BBvCalculateParameter(pDevice, 14,
+ swGetCCKControlRate(pDevice, RATE_5M), PK_TYPE_11B, &phy[2]);
//RSPINF_b_11
- BBvCalculateParameter(pDevice,
- 14,
- swGetCCKControlRate(pDevice, RATE_11M),
- PK_TYPE_11B,
- &awLen[3],
- &abyServ[3],
- &abySignal[3]
- );
+ BBvCalculateParameter(pDevice, 14,
+ swGetCCKControlRate(pDevice, RATE_11M), PK_TYPE_11B, &phy[3]);
//RSPINF_a_6
CARDvCalculateOFDMRParameter (RATE_6M,
@@ -421,25 +395,21 @@ void CARDvSetRSPINF(struct vnt_private *pDevice, u8 byBBType)
&abyTxRate[8],
&abyRsvTime[8]);
- abyData[0] = (u8)(awLen[0]&0xFF);
- abyData[1] = (u8)(awLen[0]>>8);
- abyData[2] = abySignal[0];
- abyData[3] = abyServ[0];
-
- abyData[4] = (u8)(awLen[1]&0xFF);
- abyData[5] = (u8)(awLen[1]>>8);
- abyData[6] = abySignal[1];
- abyData[7] = abyServ[1];
-
- abyData[8] = (u8)(awLen[2]&0xFF);
- abyData[9] = (u8)(awLen[2]>>8);
- abyData[10] = abySignal[2];
- abyData[11] = abyServ[2];
-
- abyData[12] = (u8)(awLen[3]&0xFF);
- abyData[13] = (u8)(awLen[3]>>8);
- abyData[14] = abySignal[3];
- abyData[15] = abyServ[3];
+ put_unaligned(phy[0].len, (u16 *)&abyData[0]);
+ abyData[2] = phy[0].signal;
+ abyData[3] = phy[0].service;
+
+ put_unaligned(phy[1].len, (u16 *)&abyData[4]);
+ abyData[6] = phy[1].signal;
+ abyData[7] = phy[1].service;
+
+ put_unaligned(phy[2].len, (u16 *)&abyData[8]);
+ abyData[10] = phy[2].signal;
+ abyData[11] = phy[2].service;
+
+ put_unaligned(phy[3].len, (u16 *)&abyData[12]);
+ abyData[14] = phy[3].signal;
+ abyData[15] = phy[3].service;
for (i = 0; i < 9; i++) {
abyData[16+i*2] = abyTxRate[i];
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index 64cb046..4675135 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -144,160 +144,6 @@
#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */
/*
- * RsvTime buffer header
- */
-typedef struct tagSRrvTime_gRTS {
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-} __attribute__ ((__packed__))
-SRrvTime_gRTS, *PSRrvTime_gRTS;
-
-typedef const SRrvTime_gRTS *PCSRrvTime_gRTS;
-
-typedef struct tagSRrvTime_gCTS {
- u16 wCTSTxRrvTime_ba;
- u16 wReserved;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-} __attribute__ ((__packed__))
-SRrvTime_gCTS, *PSRrvTime_gCTS;
-
-typedef const SRrvTime_gCTS *PCSRrvTime_gCTS;
-
-typedef struct tagSRrvTime_ab {
- u16 wRTSTxRrvTime;
- u16 wTxRrvTime;
-} __attribute__ ((__packed__))
-SRrvTime_ab, *PSRrvTime_ab;
-
-typedef const SRrvTime_ab *PCSRrvTime_ab;
-
-typedef struct tagSRrvTime_atim {
- u16 wCTSTxRrvTime_ba;
- u16 wTxRrvTime_a;
-} __attribute__ ((__packed__))
-SRrvTime_atim, *PSRrvTime_atim;
-
-typedef const SRrvTime_atim *PCSRrvTime_atim;
-
-/*
- * RTS buffer header
- */
-typedef struct tagSRTSData {
- u16 wFrameControl;
- u16 wDurationID;
- u8 abyRA[ETH_ALEN];
- u8 abyTA[ETH_ALEN];
-} __attribute__ ((__packed__))
-SRTSData, *PSRTSData;
-
-typedef const SRTSData *PCSRTSData;
-
-typedef struct tagSRTS_g {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_ba;
- u16 wDuration_aa;
- u16 wDuration_bb;
- u16 wReserved;
- SRTSData Data;
-} __attribute__ ((__packed__))
-SRTS_g, *PSRTS_g;
-typedef const SRTS_g *PCSRTS_g;
-
-typedef struct tagSRTS_g_FB {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_ba;
- u16 wDuration_aa;
- u16 wDuration_bb;
- u16 wReserved;
- u16 wRTSDuration_ba_f0;
- u16 wRTSDuration_aa_f0;
- u16 wRTSDuration_ba_f1;
- u16 wRTSDuration_aa_f1;
- SRTSData Data;
-} __attribute__ ((__packed__))
-SRTS_g_FB, *PSRTS_g_FB;
-
-typedef const SRTS_g_FB *PCSRTS_g_FB;
-
-typedef struct tagSRTS_ab {
- u8 bySignalField;
- u8 byServiceField;
- u16 wTransmitLength;
- u16 wDuration;
- u16 wReserved;
- SRTSData Data;
-} __attribute__ ((__packed__))
-SRTS_ab, *PSRTS_ab;
-
-typedef const SRTS_ab *PCSRTS_ab;
-
-typedef struct tagSRTS_a_FB {
- u8 bySignalField;
- u8 byServiceField;
- u16 wTransmitLength;
- u16 wDuration;
- u16 wReserved;
- u16 wRTSDuration_f0;
- u16 wRTSDuration_f1;
- SRTSData Data;
-} __attribute__ ((__packed__))
-SRTS_a_FB, *PSRTS_a_FB;
-
-typedef const SRTS_a_FB *PCSRTS_a_FB;
-
-/*
- * CTS buffer header
- */
-typedef struct tagSCTSData {
- u16 wFrameControl;
- u16 wDurationID;
- u8 abyRA[ETH_ALEN];
- u16 wReserved;
-} __attribute__ ((__packed__))
-SCTSData, *PSCTSData;
-
-typedef struct tagSCTS {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u16 wDuration_ba;
- u16 wReserved;
- SCTSData Data;
-} __attribute__ ((__packed__))
-SCTS, *PSCTS;
-
-typedef const SCTS *PCSCTS;
-
-typedef struct tagSCTS_FB {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u16 wDuration_ba;
- u16 wReserved;
- u16 wCTSDuration_ba_f0;
- u16 wCTSDuration_ba_f1;
- SCTSData Data;
-} __attribute__ ((__packed__))
-SCTS_FB, *PSCTS_FB;
-
-typedef const SCTS_FB *PCSCTS_FB;
-
-/*
* TX FIFO header
*/
typedef struct tagSTxBufHead {
@@ -317,76 +163,6 @@ typedef struct tagSTxShortBufHead {
STxShortBufHead, *PSTxShortBufHead;
typedef const STxShortBufHead *PCSTxShortBufHead;
-/*
- * TX data header
- */
-typedef struct tagSTxDataHead_g {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-} __attribute__ ((__packed__))
-STxDataHead_g, *PSTxDataHead_g;
-
-typedef const STxDataHead_g *PCSTxDataHead_g;
-
-typedef struct tagSTxDataHead_g_FB {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-} __attribute__ ((__packed__))
-STxDataHead_g_FB, *PSTxDataHead_g_FB;
-typedef const STxDataHead_g_FB *PCSTxDataHead_g_FB;
-
-typedef struct tagSTxDataHead_ab {
- u8 bySignalField;
- u8 byServiceField;
- u16 wTransmitLength;
- u16 wDuration;
- u16 wTimeStampOff;
-} __attribute__ ((__packed__))
-STxDataHead_ab, *PSTxDataHead_ab;
-typedef const STxDataHead_ab *PCSTxDataHead_ab;
-
-typedef struct tagSTxDataHead_a_FB {
- u8 bySignalField;
- u8 byServiceField;
- u16 wTransmitLength;
- u16 wDuration;
- u16 wTimeStampOff;
- u16 wDuration_f0;
- u16 wDuration_f1;
-} __attribute__ ((__packed__))
-STxDataHead_a_FB, *PSTxDataHead_a_FB;
-typedef const STxDataHead_a_FB *PCSTxDataHead_a_FB;
-
-/*
- * MICHDR data header
- */
-typedef struct tagSMICHDRHead {
- u32 adwHDR0[4];
- u32 adwHDR1[4];
- u32 adwHDR2[4];
-} __attribute__ ((__packed__))
-SMICHDRHead, *PSMICHDRHead;
-
-typedef const SMICHDRHead *PCSMICHDRHead;
-
typedef struct tagSBEACONCtl {
u32 BufReady:1;
u32 TSF:15;
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index f07ba24..8e39634 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -166,8 +166,7 @@ typedef enum _CONTEXT_TYPE {
} CONTEXT_TYPE;
/* RCB (Receive Control Block) */
-typedef struct _RCB
-{
+struct vnt_rcb {
void *Next;
signed long Ref;
void *pDevice;
@@ -175,21 +174,20 @@ typedef struct _RCB
struct vnt_rx_mgmt sMngPacket;
struct sk_buff *skb;
int bBoolInUse;
-
-} RCB, *PRCB;
+};
/* used to track bulk out irps */
-typedef struct _USB_SEND_CONTEXT {
- void *pDevice;
- struct sk_buff *pPacket;
- struct urb *pUrb;
- unsigned int uBufLen;
- CONTEXT_TYPE Type;
- struct ethhdr sEthHeader;
- void *Next;
- bool bBoolInUse;
- unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
-} USB_SEND_CONTEXT, *PUSB_SEND_CONTEXT;
+struct vnt_usb_send_context {
+ void *pDevice;
+ struct sk_buff *pPacket;
+ struct urb *pUrb;
+ unsigned int uBufLen;
+ CONTEXT_TYPE Type;
+ struct ethhdr sEthHeader;
+ void *Next;
+ bool bBoolInUse;
+ unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
+};
/* structure got from configuration file as user-desired default settings */
typedef struct _DEFAULT_CONFIG {
@@ -416,21 +414,21 @@ struct vnt_private {
u32 int_interval;
/* Variables to track resources for the BULK In Pipe */
- PRCB pRCBMem;
- PRCB apRCB[CB_MAX_RX_DESC];
+ struct vnt_rcb *pRCBMem;
+ struct vnt_rcb *apRCB[CB_MAX_RX_DESC];
u32 cbRD;
- PRCB FirstRecvFreeList;
- PRCB LastRecvFreeList;
+ struct vnt_rcb *FirstRecvFreeList;
+ struct vnt_rcb *LastRecvFreeList;
u32 NumRecvFreeList;
- PRCB FirstRecvMngList;
- PRCB LastRecvMngList;
+ struct vnt_rcb *FirstRecvMngList;
+ struct vnt_rcb *LastRecvMngList;
u32 NumRecvMngList;
int bIsRxWorkItemQueued;
int bIsRxMngWorkItemQueued;
unsigned long ulRcvRefCount; /* packets that have not returned back */
/* Variables to track resources for the BULK Out Pipe */
- PUSB_SEND_CONTEXT apTD[CB_MAX_TX_DESC];
+ struct vnt_usb_send_context *apTD[CB_MAX_TX_DESC];
u32 cbTD;
/* Variables to track resources for the Interrupt In Pipe */
@@ -591,18 +589,11 @@ struct vnt_private {
u8 abyBSSID[ETH_ALEN];
u8 abyDesireBSSID[ETH_ALEN];
- u16 wCTSDuration; /* update while speed change */
- u16 wACKDuration;
- u16 wRTSTransmitLen;
- u8 byRTSServiceField;
- u8 byRTSSignalField;
-
u32 dwMaxReceiveLifetime; /* dot11MaxReceiveLifetime */
int bCCK;
int bEncryptionEnable;
int bLongHeader;
- int bSoftwareGenCrcErr;
int bShortSlotTime;
int bProtectMode;
int bNonERPPresent;
@@ -781,7 +772,7 @@ struct vnt_private {
#define DequeueRCB(Head, Tail) \
{ \
- PRCB RCB = Head; \
+ struct vnt_rcb *RCB = Head; \
if (!RCB->Next) { \
Tail = NULL; \
} \
diff --git a/drivers/staging/vt6656/device_cfg.h b/drivers/staging/vt6656/device_cfg.h
index ea66b97..a97f7bb 100644
--- a/drivers/staging/vt6656/device_cfg.h
+++ b/drivers/staging/vt6656/device_cfg.h
@@ -82,18 +82,4 @@ typedef enum _chip_type {
VT3184 = 1
} CHIP_TYPE, *PCHIP_TYPE;
-#ifdef VIAWET_DEBUG
-#define ASSERT(x) { \
- if (!(x)) { \
- printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x, \
- __FUNCTION__, __LINE__);\
- *(int *) 0 = 0; \
- } \
-}
-#define DBG_PORT80(value) outb(value, 0x80)
-#else
-#define ASSERT(x)
-#define DBG_PORT80(value)
-#endif
-
#endif
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 7ec166a..ea7d443 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -246,7 +246,7 @@ s_vGetDASA (
*pcbHeaderSize = cbHeaderSize;
}
-int RXbBulkInProcessData(struct vnt_private *pDevice, PRCB pRCB,
+int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
unsigned long BytesToIndicate)
{
struct net_device_stats *pStats = &pDevice->stats;
@@ -271,7 +271,7 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, PRCB pRCB,
/* signed long ldBm = 0; */
int bIsWEP = false; int bExtIV = false;
u32 dwWbkStatus;
- PRCB pRCBIndicate = pRCB;
+ struct vnt_rcb *pRCBIndicate = pRCB;
u8 *pbyDAddress;
u16 *pwPLCP_Length;
u8 abyVaildRate[MAX_RATE]
@@ -314,7 +314,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, PRCB pRCB,
(BytesToIndicate < (*pwPLCP_Length)) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Wrong PLCP Length %x\n", (int) *pwPLCP_Length);
- ASSERT(0);
return false;
}
for ( ii=RATE_1M;ii<MAX_RATE;ii++) {
@@ -1337,7 +1336,7 @@ static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb,
void RXvWorkItem(struct vnt_private *pDevice)
{
int ntStatus;
- PRCB pRCB = NULL;
+ struct vnt_rcb *pRCB = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n");
spin_lock_irq(&pDevice->lock);
@@ -1347,7 +1346,6 @@ void RXvWorkItem(struct vnt_private *pDevice)
(pDevice->NumRecvFreeList != 0) ) {
pRCB = pDevice->FirstRecvFreeList;
pDevice->NumRecvFreeList--;
- ASSERT(pRCB);// cannot be NULL
DequeueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList);
ntStatus = PIPEnsBulkInUsbRead(pDevice, pRCB);
}
@@ -1356,15 +1354,12 @@ void RXvWorkItem(struct vnt_private *pDevice)
}
-void RXvFreeRCB(PRCB pRCB, int bReAllocSkb)
+void RXvFreeRCB(struct vnt_rcb *pRCB, int bReAllocSkb)
{
struct vnt_private *pDevice = pRCB->pDevice;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->RXvFreeRCB\n");
- ASSERT(!pRCB->Ref); // should be 0
- ASSERT(pRCB->pDevice); // shouldn't be NULL
-
if (bReAllocSkb == false) {
kfree_skb(pRCB->skb);
bReAllocSkb = true;
@@ -1396,7 +1391,7 @@ void RXvFreeRCB(PRCB pRCB, int bReAllocSkb)
void RXvMngWorkItem(struct vnt_private *pDevice)
{
- PRCB pRCB = NULL;
+ struct vnt_rcb *pRCB = NULL;
struct vnt_rx_mgmt *pRxPacket;
int bReAllocSkb = false;
@@ -1411,7 +1406,6 @@ void RXvMngWorkItem(struct vnt_private *pDevice)
if(!pRCB){
break;
}
- ASSERT(pRCB);// cannot be NULL
pRxPacket = &(pRCB->sMngPacket);
vMgrRxManagePacket(pDevice, &pDevice->vnt_mgmt, pRxPacket);
pRCB->Ref--;
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
index 876468f..95388dc 100644
--- a/drivers/staging/vt6656/dpc.h
+++ b/drivers/staging/vt6656/dpc.h
@@ -36,9 +36,9 @@ void RXvWorkItem(void *Context);
void RXvMngWorkItem(void *Context);
-void RXvFreeRCB(PRCB pRCB, int bReAllocSkb);
+void RXvFreeRCB(struct vnt_rcb *pRCB, int bReAllocSkb);
-int RXbBulkInProcessData(struct vnt_private *, PRCB pRCB,
+int RXbBulkInProcessData(struct vnt_private *, struct vnt_rcb *pRCB,
unsigned long BytesToIndicate);
#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index d0cf7d8..8872e0f 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1634,6 +1634,9 @@ int iwctl_siwencodeext(struct net_device *dev, struct iw_request_info *info,
if (pMgmt == NULL)
return -EFAULT;
+ if (!(pDevice->flags & DEVICE_FLAGS_OPENED))
+ return -ENODEV;
+
buf = kzalloc(sizeof(struct viawget_wpa_param), GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 343db19..54414ed27 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -101,7 +101,7 @@ void MACvSetBBType(struct vnt_private *pDevice, u8 byType)
MESSAGE_TYPE_WRITE_MASK,
MAC_REG_ENCFG0,
MESSAGE_REQUEST_MACREG,
- 2,
+ ARRAY_SIZE(pbyData),
pbyData
);
}
@@ -122,18 +122,10 @@ void MACvSetBBType(struct vnt_private *pDevice, u8 byType)
*/
void MACvDisableKeyEntry(struct vnt_private *pDevice, u32 uEntryIdx)
{
- u16 wOffset;
u8 byData;
byData = (u8) uEntryIdx;
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
-
- //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, 0);
- //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
-
//issue write misc fifo command to device
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_CLRKEYENTRY,
@@ -182,12 +174,6 @@ void MACvSetKeyEntry(struct vnt_private *pDevice, u16 wKeyCtl, u32 uEntryIdx,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %X,"\
" KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
- //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
-
- //wOffset++;
-
dwData2 = 0;
dwData2 |= *(pbyAddr+3);
dwData2 <<= 8;
@@ -200,21 +186,6 @@ void MACvSetKeyEntry(struct vnt_private *pDevice, u16 wKeyCtl, u32 uEntryIdx,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %X\n",
wOffset, dwData2);
- //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
-
- //wOffset++;
-
- //wOffset += (uKeyIdx * 4);
-/* for (ii=0;ii<4;ii++) {
- // alway push 128 bits
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"3.(%d) wOffset: %d, Data: %lX\n", ii, wOffset+ii, *pdwKey);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- }
-*/
pbyKey = (u8 *)pdwKey;
pbyData[0] = (u8)dwData1;
@@ -232,7 +203,7 @@ void MACvSetKeyEntry(struct vnt_private *pDevice, u16 wKeyCtl, u32 uEntryIdx,
MESSAGE_TYPE_SETKEY,
wOffset,
(u16)uKeyIdx,
- 24,
+ ARRAY_SIZE(pbyData),
pbyData
);
@@ -249,7 +220,7 @@ void MACvRegBitsOff(struct vnt_private *pDevice, u8 byRegOfs, u8 byBits)
MESSAGE_TYPE_WRITE_MASK,
byRegOfs,
MESSAGE_REQUEST_MACREG,
- 2,
+ ARRAY_SIZE(pbyData),
pbyData
);
}
@@ -265,7 +236,7 @@ void MACvRegBitsOn(struct vnt_private *pDevice, u8 byRegOfs, u8 byBits)
MESSAGE_TYPE_WRITE_MASK,
byRegOfs,
MESSAGE_REQUEST_MACREG,
- 2,
+ ARRAY_SIZE(pbyData),
pbyData
);
}
@@ -281,7 +252,7 @@ void MACvWriteWord(struct vnt_private *pDevice, u8 byRegOfs, u16 wData)
MESSAGE_TYPE_WRITE,
byRegOfs,
MESSAGE_REQUEST_MACREG,
- 2,
+ ARRAY_SIZE(pbyData),
pbyData
);
@@ -302,7 +273,7 @@ void MACvWriteBSSIDAddress(struct vnt_private *pDevice, u8 *pbyEtherAddr)
MESSAGE_TYPE_WRITE,
MAC_REG_BSSID0,
MESSAGE_REQUEST_MACREG,
- 6,
+ ARRAY_SIZE(pbyData),
pbyData
);
}
@@ -318,7 +289,7 @@ void MACvEnableProtectMD(struct vnt_private *pDevice)
MESSAGE_TYPE_WRITE_MASK,
MAC_REG_ENCFG0,
MESSAGE_REQUEST_MACREG,
- 2,
+ ARRAY_SIZE(pbyData),
pbyData
);
}
@@ -334,7 +305,7 @@ void MACvDisableProtectMD(struct vnt_private *pDevice)
MESSAGE_TYPE_WRITE_MASK,
MAC_REG_ENCFG0,
MESSAGE_REQUEST_MACREG,
- 2,
+ ARRAY_SIZE(pbyData),
pbyData
);
}
@@ -350,7 +321,7 @@ void MACvEnableBarkerPreambleMd(struct vnt_private *pDevice)
MESSAGE_TYPE_WRITE_MASK,
MAC_REG_ENCFG2,
MESSAGE_REQUEST_MACREG,
- 2,
+ ARRAY_SIZE(pbyData),
pbyData
);
}
@@ -366,7 +337,7 @@ void MACvDisableBarkerPreambleMd(struct vnt_private *pDevice)
MESSAGE_TYPE_WRITE_MASK,
MAC_REG_ENCFG2,
MESSAGE_REQUEST_MACREG,
- 2,
+ ARRAY_SIZE(pbyData),
pbyData
);
}
@@ -382,7 +353,7 @@ void MACvWriteBeaconInterval(struct vnt_private *pDevice, u16 wInterval)
MESSAGE_TYPE_WRITE,
MAC_REG_BI,
MESSAGE_REQUEST_MACREG,
- 2,
+ ARRAY_SIZE(pbyData),
pbyData
);
}
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 3a3fdc5..6f9d281 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -267,7 +267,6 @@ device_set_options(struct vnt_private *pDevice) {
pDevice->bUpdateBBVGA = true;
pDevice->byFOETuning = 0;
pDevice->byAutoPwrTunning = 0;
- pDevice->wCTSDuration = 0;
pDevice->byPreambleType = 0;
pDevice->bExistSWNetAddr = false;
/* pDevice->bDiversityRegCtlON = true; */
@@ -734,7 +733,7 @@ err_nomem:
static void device_free_tx_bufs(struct vnt_private *pDevice)
{
- PUSB_SEND_CONTEXT pTxContext;
+ struct vnt_usb_send_context *pTxContext;
int ii;
for (ii = 0; ii < pDevice->cbTD; ii++) {
@@ -752,8 +751,8 @@ static void device_free_tx_bufs(struct vnt_private *pDevice)
static void device_free_rx_bufs(struct vnt_private *pDevice)
{
- PRCB pRCB;
- int ii;
+ struct vnt_rcb *pRCB;
+ int ii;
for (ii = 0; ii < pDevice->cbRD; ii++) {
@@ -789,14 +788,13 @@ static void device_free_int_bufs(struct vnt_private *pDevice)
static bool device_alloc_bufs(struct vnt_private *pDevice)
{
-
- PUSB_SEND_CONTEXT pTxContext;
- PRCB pRCB;
- int ii;
+ struct vnt_usb_send_context *pTxContext;
+ struct vnt_rcb *pRCB;
+ int ii;
for (ii = 0; ii < pDevice->cbTD; ii++) {
- pTxContext = kmalloc(sizeof(USB_SEND_CONTEXT), GFP_KERNEL);
+ pTxContext = kmalloc(sizeof(struct vnt_usb_send_context), GFP_KERNEL);
if (pTxContext == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : allocate tx usb context failed\n", pDevice->dev->name);
goto free_tx;
@@ -813,7 +811,8 @@ static bool device_alloc_bufs(struct vnt_private *pDevice)
}
/* allocate RCB mem */
- pDevice->pRCBMem = kzalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
+ pDevice->pRCBMem = kzalloc((sizeof(struct vnt_rcb) * pDevice->cbRD),
+ GFP_KERNEL);
if (pDevice->pRCBMem == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name);
goto free_tx;
@@ -824,7 +823,8 @@ static bool device_alloc_bufs(struct vnt_private *pDevice)
pDevice->FirstRecvMngList = NULL;
pDevice->LastRecvMngList = NULL;
pDevice->NumRecvFreeList = 0;
- pRCB = (PRCB) pDevice->pRCBMem;
+
+ pRCB = (struct vnt_rcb *)pDevice->pRCBMem;
for (ii = 0; ii < pDevice->cbRD; ii++) {
@@ -925,7 +925,6 @@ int device_alloc_frag_buf(struct vnt_private *pDevice,
pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDeF->skb == NULL)
return false;
- ASSERT(pDeF->skb);
pDeF->skb->dev = pDevice->dev;
return true;
@@ -1099,6 +1098,8 @@ static int device_close(struct net_device *dev)
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
+ pDevice->flags &= ~DEVICE_FLAGS_OPENED;
+
device_free_tx_bufs(pDevice);
device_free_rx_bufs(pDevice);
device_free_int_bufs(pDevice);
@@ -1110,7 +1111,6 @@ static int device_close(struct net_device *dev)
usb_free_urb(pDevice->pInterruptURB);
BSSvClearNodeDBTable(pDevice, 0);
- pDevice->flags &=(~DEVICE_FLAGS_OPENED);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 44cfe0b..d27fa43 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -29,6 +29,9 @@
* IFRFbWriteEmbedded - Embedded write RF register via MAC
*
* Revision History:
+ * RF_VT3226: RobertYu:20051111, VT3226C0 and before
+ * RF_VT3226D0: RobertYu:20051228
+ * RF_VT3342A0: RobertYu:20060609
*
*/
@@ -61,7 +64,7 @@ static int msglevel =MSG_LEVEL_INFO;
#define VT3342_PWR_IDX_LEN 64
//}}
-u8 abyAL2230InitTable[CB_AL2230_INIT_SEQ][3] = {
+static u8 al2230_init_table[CB_AL2230_INIT_SEQ][3] = {
{0x03, 0xF7, 0x90},
{0x03, 0x33, 0x31},
{0x01, 0xB8, 0x02},
@@ -79,7 +82,7 @@ u8 abyAL2230InitTable[CB_AL2230_INIT_SEQ][3] = {
{0x00, 0x58, 0x0F}
};
-u8 abyAL2230ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
+static u8 al2230_channel_table0[CB_MAX_CHANNEL_24G][3] = {
{0x03, 0xF7, 0x90}, // channel = 1, Tf = 2412MHz
{0x03, 0xF7, 0x90}, // channel = 2, Tf = 2417MHz
{0x03, 0xE7, 0x90}, // channel = 3, Tf = 2422MHz
@@ -96,7 +99,7 @@ u8 abyAL2230ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
{0x03, 0xE7, 0xC0} // channel = 14, Tf = 2412M
};
-u8 abyAL2230ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
+static u8 al2230_channel_table1[CB_MAX_CHANNEL_24G][3] = {
{0x03, 0x33, 0x31}, // channel = 1, Tf = 2412MHz
{0x0B, 0x33, 0x31}, // channel = 2, Tf = 2417MHz
{0x03, 0x33, 0x31}, // channel = 3, Tf = 2422MHz
@@ -115,7 +118,7 @@ u8 abyAL2230ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
// 40MHz reference frequency
// Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
-u8 abyAL7230InitTable[CB_AL7230_INIT_SEQ][3] = {
+static u8 al7230_init_table[CB_AL7230_INIT_SEQ][3] = {
{0x20, 0x37, 0x90}, // Channel1 // Need modify for 11a
{0x13, 0x33, 0x31}, // Channel1 // Need modify for 11a
{0x84, 0x1F, 0xF2}, // Need modify for 11a: 451FE2
@@ -138,7 +141,7 @@ u8 abyAL7230InitTable[CB_AL7230_INIT_SEQ][3] = {
{0x1A, 0xBA, 0x8F} // Need modify for 11a: 12BACF
};
-u8 abyAL7230InitTableAMode[CB_AL7230_INIT_SEQ][3] = {
+static u8 al7230_init_table_amode[CB_AL7230_INIT_SEQ][3] = {
{0x2F, 0xF5, 0x20}, // Channel184 // Need modify for 11b/g
{0x00, 0x00, 0x01}, // Channel184 // Need modify for 11b/g
{0x45, 0x1F, 0xE2}, // Need modify for 11b/g
@@ -157,7 +160,7 @@ u8 abyAL7230InitTableAMode[CB_AL7230_INIT_SEQ][3] = {
{0x12, 0xBA, 0xCF} // Need modify for 11b/g
};
-u8 abyAL7230ChannelTable0[CB_MAX_CHANNEL][3] = {
+static u8 al7230_channel_table0[CB_MAX_CHANNEL][3] = {
{0x20, 0x37, 0x90}, // channel = 1, Tf = 2412MHz
{0x20, 0x37, 0x90}, // channel = 2, Tf = 2417MHz
{0x20, 0x37, 0x90}, // channel = 3, Tf = 2422MHz
@@ -223,7 +226,7 @@ u8 abyAL7230ChannelTable0[CB_MAX_CHANNEL][3] = {
{0x2F, 0xF6, 0x10} // channel = 165, Tf = 5825MHz (56)
};
-u8 abyAL7230ChannelTable1[CB_MAX_CHANNEL][3] = {
+static u8 al7230_channel_table1[CB_MAX_CHANNEL][3] = {
{0x13, 0x33, 0x31}, // channel = 1, Tf = 2412MHz
{0x1B, 0x33, 0x31}, // channel = 2, Tf = 2417MHz
{0x03, 0x33, 0x31}, // channel = 3, Tf = 2422MHz
@@ -287,7 +290,7 @@ u8 abyAL7230ChannelTable1[CB_MAX_CHANNEL][3] = {
{0x02, 0xAA, 0xB1} // channel = 165, Tf = 5825MHz (56)
};
-u8 abyAL7230ChannelTable2[CB_MAX_CHANNEL][3] = {
+static u8 al7230_channel_table2[CB_MAX_CHANNEL][3] = {
{0x7F, 0xD7, 0x84}, // channel = 1, Tf = 2412MHz
{0x7F, 0xD7, 0x84}, // channel = 2, Tf = 2417MHz
{0x7F, 0xD7, 0x84}, // channel = 3, Tf = 2422MHz
@@ -352,7 +355,7 @@ u8 abyAL7230ChannelTable2[CB_MAX_CHANNEL][3] = {
};
///{{RobertYu:20051111
-u8 abyVT3226_InitTable[CB_VT3226_INIT_SEQ][3] = {
+static u8 at3226_init_table[CB_VT3226_INIT_SEQ][3] = {
{0x03, 0xFF, 0x80},
{0x02, 0x82, 0xA1},
{0x03, 0xC6, 0xA2},
@@ -366,7 +369,7 @@ u8 abyVT3226_InitTable[CB_VT3226_INIT_SEQ][3] = {
{0x02, 0x00, 0x2A}
};
-u8 abyVT3226D0_InitTable[CB_VT3226_INIT_SEQ][3] = {
+static u8 at3226d0_init_table[CB_VT3226_INIT_SEQ][3] = {
{0x03, 0xFF, 0x80},
{0x03, 0x02, 0x21}, //RobertYu:20060327
{0x03, 0xC6, 0xA2},
@@ -380,7 +383,7 @@ u8 abyVT3226D0_InitTable[CB_VT3226_INIT_SEQ][3] = {
{0x02, 0x01, 0xAA} //RobertYu:20060523
};
-u8 abyVT3226_ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
+static u8 vt3226_channel_table0[CB_MAX_CHANNEL_24G][3] = {
{0x01, 0x97, 0x83}, // channel = 1, Tf = 2412MHz
{0x01, 0x97, 0x83}, // channel = 2, Tf = 2417MHz
{0x01, 0x97, 0x93}, // channel = 3, Tf = 2422MHz
@@ -397,7 +400,7 @@ u8 abyVT3226_ChannelTable0[CB_MAX_CHANNEL_24G][3] = {
{0x03, 0x37, 0xC3} // channel = 14, Tf = 2484MHz
};
-u8 abyVT3226_ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
+static u8 vt3226_channel_table1[CB_MAX_CHANNEL_24G][3] = {
{0x02, 0x66, 0x64}, // channel = 1, Tf = 2412MHz
{0x03, 0x66, 0x64}, // channel = 2, Tf = 2417MHz
{0x00, 0x66, 0x64}, // channel = 3, Tf = 2422MHz
@@ -416,7 +419,7 @@ u8 abyVT3226_ChannelTable1[CB_MAX_CHANNEL_24G][3] = {
///}}RobertYu
//{{RobertYu:20060502, TWIF 1.14, LO Current for 11b mode
-u32 dwVT3226D0LoCurrentTable[CB_MAX_CHANNEL_24G] = {
+const u32 vt3226d0_lo_current_table[CB_MAX_CHANNEL_24G] = {
0x0135C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
0x0135C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
0x0235C600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
@@ -435,7 +438,7 @@ u32 dwVT3226D0LoCurrentTable[CB_MAX_CHANNEL_24G] = {
//}}
//{{RobertYu:20060609
-u8 abyVT3342A0_InitTable[CB_VT3342_INIT_SEQ][3] = { /* 11b/g mode */
+static u8 vt3342a0_init_table[CB_VT3342_INIT_SEQ][3] = { /* 11b/g mode */
{0x03, 0xFF, 0x80}, //update for mode//
{0x02, 0x08, 0x81},
{0x00, 0xC6, 0x02},
@@ -458,7 +461,7 @@ u8 abyVT3342A0_InitTable[CB_VT3342_INIT_SEQ][3] = { /* 11b/g mode */
// channel56, 5280MHz 0x00C402 for disable Frac
// other channels 0x00C602
-u8 abyVT3342_ChannelTable0[CB_MAX_CHANNEL][3] = {
+static u8 vt3342_channel_table0[CB_MAX_CHANNEL][3] = {
{0x02, 0x05, 0x03}, // channel = 1, Tf = 2412MHz
{0x01, 0x15, 0x03}, // channel = 2, Tf = 2417MHz
{0x03, 0xC5, 0x03}, // channel = 3, Tf = 2422MHz
@@ -524,7 +527,7 @@ u8 abyVT3342_ChannelTable0[CB_MAX_CHANNEL][3] = {
{0x00, 0x06, 0x03} // channel = 165, Tf = 5825MHz (56), TBD
};
-u8 abyVT3342_ChannelTable1[CB_MAX_CHANNEL][3] = {
+static u8 vt3342_channel_table1[CB_MAX_CHANNEL][3] = {
{0x01, 0x99, 0x94}, // channel = 1, Tf = 2412MHz
{0x02, 0x44, 0x44}, // channel = 2, Tf = 2417MHz
{0x02, 0xEE, 0xE4}, // channel = 3, Tf = 2422MHz
@@ -594,7 +597,7 @@ u8 abyVT3342_ChannelTable1[CB_MAX_CHANNEL][3] = {
*
-*/
-const u32 dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
+const u32 al2230_power_table[AL2230_PWR_IDX_LEN] = {
0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
@@ -732,42 +735,41 @@ int IFRFbWriteEmbedded(struct vnt_private *pDevice, u32 dwData)
* Return Value: true if succeeded; false if failed.
*
*/
-int RFbSetPower(struct vnt_private *pDevice, u32 uRATE, u32 uCH)
+int RFbSetPower(struct vnt_private *priv, u32 rate, u32 channel)
{
- int bResult = true;
- u8 byPwr = pDevice->byCCKPwr;
+ int ret = true;
+ u8 power = priv->byCCKPwr;
- if (pDevice->dwDiagRefCount)
+ if (priv->dwDiagRefCount)
return true;
- if (uCH == 0)
+ if (channel == 0)
return -EINVAL;
- switch (uRATE) {
- case RATE_1M:
- case RATE_2M:
- case RATE_5M:
- case RATE_11M:
- byPwr = pDevice->abyCCKPwrTbl[uCH-1];
- break;
- case RATE_6M:
- case RATE_9M:
- case RATE_18M:
- case RATE_24M:
- case RATE_36M:
- case RATE_48M:
- case RATE_54M:
- if (uCH > CB_MAX_CHANNEL_24G) {
- byPwr = pDevice->abyOFDMAPwrTbl[uCH-15];
- } else {
- byPwr = pDevice->abyOFDMPwrTbl[uCH-1];
- }
- break;
- }
-
- bResult = RFbRawSetPower(pDevice, byPwr, uRATE);
-
- return bResult;
+ switch (rate) {
+ case RATE_1M:
+ case RATE_2M:
+ case RATE_5M:
+ case RATE_11M:
+ power = priv->abyCCKPwrTbl[channel-1];
+ break;
+ case RATE_6M:
+ case RATE_9M:
+ case RATE_18M:
+ case RATE_24M:
+ case RATE_36M:
+ case RATE_48M:
+ case RATE_54M:
+ if (channel > CB_MAX_CHANNEL_24G)
+ power = priv->abyOFDMAPwrTbl[channel-15];
+ else
+ power = priv->abyOFDMPwrTbl[channel-1];
+ break;
+ }
+
+ ret = RFbRawSetPower(priv, power, rate);
+
+ return ret;
}
/*
@@ -784,136 +786,146 @@ int RFbSetPower(struct vnt_private *pDevice, u32 uRATE, u32 uCH)
*
*/
-int RFbRawSetPower(struct vnt_private *pDevice, u8 byPwr, u32 uRATE)
+int RFbRawSetPower(struct vnt_private *priv, u8 power, u32 rate)
{
- int bResult = true;
-
- if (pDevice->byCurPwr == byPwr)
- return true;
-
- pDevice->byCurPwr = byPwr;
-
- switch (pDevice->byRFType) {
-
- case RF_AL2230 :
- if (pDevice->byCurPwr >= AL2230_PWR_IDX_LEN)
- return false;
- bResult &= IFRFbWriteEmbedded(pDevice, dwAL2230PowerTable[pDevice->byCurPwr]);
- if (uRATE <= RATE_11M)
- bResult &= IFRFbWriteEmbedded(pDevice, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- else
- bResult &= IFRFbWriteEmbedded(pDevice, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- break;
-
- case RF_AL2230S :
- if (pDevice->byCurPwr >= AL2230_PWR_IDX_LEN)
- return false;
- bResult &= IFRFbWriteEmbedded(pDevice, dwAL2230PowerTable[pDevice->byCurPwr]);
- if (uRATE <= RATE_11M) {
- bResult &= IFRFbWriteEmbedded(pDevice, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- bResult &= IFRFbWriteEmbedded(pDevice, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- }else {
- bResult &= IFRFbWriteEmbedded(pDevice, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- bResult &= IFRFbWriteEmbedded(pDevice, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- }
- break;
-
- case RF_AIROHA7230:
- {
- u32 dwMax7230Pwr;
-
- if (uRATE <= RATE_11M) { //RobertYu:20060426, for better 11b mask
- bResult &= IFRFbWriteEmbedded(pDevice, 0x111BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW);
- }
- else {
- bResult &= IFRFbWriteEmbedded(pDevice, 0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW);
- }
-
- if (pDevice->byCurPwr > AL7230_PWR_IDX_LEN) return false;
-
- // 0x080F1B00 for 3 wire control TxGain(D10) and 0x31 as TX Gain value
- dwMax7230Pwr = 0x080C0B00 | ( (pDevice->byCurPwr) << 12 ) |
- (BY_AL7230_REG_LEN << 3 ) | IFREGCTL_REGW;
-
- bResult &= IFRFbWriteEmbedded(pDevice, dwMax7230Pwr);
- break;
- }
- break;
-
- case RF_VT3226: //RobertYu:20051111, VT3226C0 and before
- {
- u32 dwVT3226Pwr;
-
- if (pDevice->byCurPwr >= VT3226_PWR_IDX_LEN)
- return false;
- dwVT3226Pwr = ((0x3F-pDevice->byCurPwr) << 20 ) | ( 0x17 << 8 ) /* Reg7 */ |
- (BY_VT3226_REG_LEN << 3 ) | IFREGCTL_REGW;
- bResult &= IFRFbWriteEmbedded(pDevice, dwVT3226Pwr);
- break;
- }
-
- case RF_VT3226D0: //RobertYu:20051228
- {
- u32 dwVT3226Pwr;
-
- if (pDevice->byCurPwr >= VT3226_PWR_IDX_LEN)
- return false;
-
- if (uRATE <= RATE_11M) {
-
- dwVT3226Pwr = ((0x3F-pDevice->byCurPwr) << 20 ) | ( 0xE07 << 8 ) /* Reg7 */ | //RobertYu:20060420, TWIF 1.10
- (BY_VT3226_REG_LEN << 3 ) | IFREGCTL_REGW;
- bResult &= IFRFbWriteEmbedded(pDevice, dwVT3226Pwr);
-
- bResult &= IFRFbWriteEmbedded(pDevice, 0x03C6A200+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW);
- if (pDevice->vnt_mgmt.eScanState != WMAC_NO_SCANNING) {
- /* scanning, channel number is pDevice->uScanChannel */
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ u32 power_setting = 0;
+ int ret = true;
+
+ if (priv->byCurPwr == power)
+ return true;
+
+ priv->byCurPwr = power;
+
+ switch (priv->byRFType) {
+ case RF_AL2230:
+ if (priv->byCurPwr >= AL2230_PWR_IDX_LEN)
+ return false;
+
+ ret &= IFRFbWriteEmbedded(priv,
+ al2230_power_table[priv->byCurPwr]);
+
+ if (rate <= RATE_11M)
+ ret &= IFRFbWriteEmbedded(priv, 0x0001b400 +
+ (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ else
+ ret &= IFRFbWriteEmbedded(priv, 0x0005a400 +
+ (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ break;
+ case RF_AL2230S:
+ if (priv->byCurPwr >= AL2230_PWR_IDX_LEN)
+ return false;
+
+ ret &= IFRFbWriteEmbedded(priv,
+ al2230_power_table[priv->byCurPwr]);
+
+ if (rate <= RATE_11M) {
+ ret &= IFRFbWriteEmbedded(priv, 0x040c1400 +
+ (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x00299b00 +
+ (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ } else {
+ ret &= IFRFbWriteEmbedded(priv, 0x0005a400 +
+ (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x00099b00 +
+ (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+ }
+ break;
+
+ case RF_AIROHA7230:
+ if (rate <= RATE_11M)
+ ret &= IFRFbWriteEmbedded(priv, 0x111bb900 +
+ (BY_AL7230_REG_LEN << 3)+IFREGCTL_REGW);
+ else
+ ret &= IFRFbWriteEmbedded(priv, 0x221bb900 +
+ (BY_AL7230_REG_LEN << 3)+IFREGCTL_REGW);
+
+ if (priv->byCurPwr > AL7230_PWR_IDX_LEN)
+ return false;
+
+ /*
+ * 0x080F1B00 for 3 wire control TxGain(D10)
+ * and 0x31 as TX Gain value
+ */
+ power_setting = 0x080c0b00 | ((priv->byCurPwr) << 12) |
+ (BY_AL7230_REG_LEN << 3) | IFREGCTL_REGW;
+
+ ret &= IFRFbWriteEmbedded(priv, power_setting);
+
+ break;
+
+ case RF_VT3226:
+ if (priv->byCurPwr >= VT3226_PWR_IDX_LEN)
+ return false;
+ power_setting = ((0x3f - priv->byCurPwr) << 20) | (0x17 << 8) |
+ (BY_VT3226_REG_LEN << 3) | IFREGCTL_REGW;
+
+ ret &= IFRFbWriteEmbedded(priv, power_setting);
+
+ break;
+ case RF_VT3226D0:
+ if (priv->byCurPwr >= VT3226_PWR_IDX_LEN)
+ return false;
+
+ if (rate <= RATE_11M) {
+ power_setting = ((0x3f-priv->byCurPwr) << 20) |
+ (0xe07 << 8) | (BY_VT3226_REG_LEN << 3) |
+ IFREGCTL_REGW;
+
+ ret &= IFRFbWriteEmbedded(priv, power_setting);
+ ret &= IFRFbWriteEmbedded(priv, 0x03c6a200 +
+ (BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW);
+
+ if (priv->vnt_mgmt.eScanState != WMAC_NO_SCANNING) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "RFbRawSetPower> 11B mode uCurrChannel[%d]\n",
+ priv->vnt_mgmt.uScanChannel);
+ ret &= IFRFbWriteEmbedded(priv,
+ vt3226d0_lo_current_table[priv->
+ vnt_mgmt.uScanChannel - 1]);
+ } else {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
"RFbRawSetPower> 11B mode uCurrChannel[%d]\n",
- pDevice->vnt_mgmt.uScanChannel);
- bResult &= IFRFbWriteEmbedded(pDevice,
- dwVT3226D0LoCurrentTable[pDevice->
- vnt_mgmt.uScanChannel - 1]);
+ priv->vnt_mgmt.uCurrChannel);
+ ret &= IFRFbWriteEmbedded(priv,
+ vt3226d0_lo_current_table[priv->
+ vnt_mgmt.uCurrChannel - 1]);
+ }
+
+ ret &= IFRFbWriteEmbedded(priv, 0x015C0800 +
+ (BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW);
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
- "RFbRawSetPower> 11B mode uCurrChannel[%d]\n",
- pDevice->vnt_mgmt.uCurrChannel);
- bResult &= IFRFbWriteEmbedded(pDevice,
- dwVT3226D0LoCurrentTable[pDevice->
- vnt_mgmt.uCurrChannel - 1]);
+ "@@@@ RFbRawSetPower> 11G mode\n");
+
+ power_setting = ((0x3f-priv->byCurPwr) << 20) |
+ (0x7 << 8) | (BY_VT3226_REG_LEN << 3) |
+ IFREGCTL_REGW;
+
+ ret &= IFRFbWriteEmbedded(priv, power_setting);
+ ret &= IFRFbWriteEmbedded(priv, 0x00C6A200 +
+ (BY_VT3226_REG_LEN << 3) + IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x016BC600 +
+ (BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x00900800 +
+ (BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW);
}
+ break;
+
+ case RF_VT3342A0:
+ if (priv->byCurPwr >= VT3342_PWR_IDX_LEN)
+ return false;
+
+ power_setting = ((0x3F-priv->byCurPwr) << 20) |
+ (0x27 << 8) | (BY_VT3342_REG_LEN << 3) |
+ IFREGCTL_REGW;
- bResult &= IFRFbWriteEmbedded(pDevice, 0x015C0800+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW); //RobertYu:20060420, ok now, new switching power (mini-pci can have bigger power consumption)
- } else {
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"@@@@ RFbRawSetPower> 11G mode\n");
- dwVT3226Pwr = ((0x3F-pDevice->byCurPwr) << 20 ) | ( 0x7 << 8 ) /* Reg7 */ | //RobertYu:20060420, TWIF 1.10
- (BY_VT3226_REG_LEN << 3 ) | IFREGCTL_REGW;
- bResult &= IFRFbWriteEmbedded(pDevice, dwVT3226Pwr);
- bResult &= IFRFbWriteEmbedded(pDevice, 0x00C6A200+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW); //RobertYu:20060327
- bResult &= IFRFbWriteEmbedded(pDevice, 0x016BC600+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW); //RobertYu:20060111
- bResult &= IFRFbWriteEmbedded(pDevice, 0x00900800+(BY_VT3226_REG_LEN<<3)+IFREGCTL_REGW); //RobertYu:20060111
- }
- break;
- }
-
- //{{RobertYu:20060609
- case RF_VT3342A0:
- {
- u32 dwVT3342Pwr;
-
- if (pDevice->byCurPwr >= VT3342_PWR_IDX_LEN)
- return false;
-
- dwVT3342Pwr = ((0x3F-pDevice->byCurPwr) << 20 ) | ( 0x27 << 8 ) /* Reg7 */ |
- (BY_VT3342_REG_LEN << 3 ) | IFREGCTL_REGW;
- bResult &= IFRFbWriteEmbedded(pDevice, dwVT3342Pwr);
- break;
- }
-
- default :
- break;
- }
- return bResult;
+ ret &= IFRFbWriteEmbedded(priv, power_setting);
+
+ break;
+ default:
+ break;
+ }
+ return ret;
}
/*+
@@ -931,169 +943,150 @@ int RFbRawSetPower(struct vnt_private *pDevice, u8 byPwr, u32 uRATE)
* Return Value: none
*
-*/
-void RFvRSSITodBm(struct vnt_private *pDevice, u8 byCurrRSSI, long *pldBm)
+void RFvRSSITodBm(struct vnt_private *priv, u8 rssi, long *dbm)
{
- u8 byIdx = (((byCurrRSSI & 0xC0) >> 6) & 0x03);
- signed long b = (byCurrRSSI & 0x3F);
- signed long a = 0;
- u8 abyAIROHARF[4] = {0, 18, 0, 40};
-
- switch (pDevice->byRFType) {
- case RF_AL2230:
- case RF_AL2230S:
- case RF_AIROHA7230:
- case RF_VT3226: //RobertYu:20051111
- case RF_VT3226D0:
- case RF_VT3342A0: //RobertYu:20060609
- a = abyAIROHARF[byIdx];
- break;
- default:
- break;
- }
-
- *pldBm = -1 * (a + b * 2);
+ u8 idx = (((rssi & 0xc0) >> 6) & 0x03);
+ long b = (rssi & 0x3f);
+ long a = 0;
+ u8 airoharf[4] = {0, 18, 0, 40};
+
+ switch (priv->byRFType) {
+ case RF_AL2230:
+ case RF_AL2230S:
+ case RF_AIROHA7230:
+ case RF_VT3226:
+ case RF_VT3226D0:
+ case RF_VT3342A0:
+ a = airoharf[idx];
+ break;
+ default:
+ break;
+ }
+
+ *dbm = -1 * (a + b * 2);
}
-void RFbRFTableDownload(struct vnt_private *pDevice)
+void RFbRFTableDownload(struct vnt_private *priv)
{
- u16 wLength1 = 0, wLength2 = 0, wLength3 = 0;
- u8 *pbyAddr1 = NULL, *pbyAddr2 = NULL, *pbyAddr3 = NULL;
- u16 wLength, wValue;
- u8 abyArray[256];
-
- switch ( pDevice->byRFType ) {
- case RF_AL2230:
- case RF_AL2230S:
- wLength1 = CB_AL2230_INIT_SEQ * 3;
- wLength2 = CB_MAX_CHANNEL_24G * 3;
- wLength3 = CB_MAX_CHANNEL_24G * 3;
- pbyAddr1 = &(abyAL2230InitTable[0][0]);
- pbyAddr2 = &(abyAL2230ChannelTable0[0][0]);
- pbyAddr3 = &(abyAL2230ChannelTable1[0][0]);
- break;
- case RF_AIROHA7230:
- wLength1 = CB_AL7230_INIT_SEQ * 3;
- wLength2 = CB_MAX_CHANNEL * 3;
- wLength3 = CB_MAX_CHANNEL * 3;
- pbyAddr1 = &(abyAL7230InitTable[0][0]);
- pbyAddr2 = &(abyAL7230ChannelTable0[0][0]);
- pbyAddr3 = &(abyAL7230ChannelTable1[0][0]);
- break;
- case RF_VT3226: //RobertYu:20051111
- wLength1 = CB_VT3226_INIT_SEQ * 3;
- wLength2 = CB_MAX_CHANNEL_24G * 3;
- wLength3 = CB_MAX_CHANNEL_24G * 3;
- pbyAddr1 = &(abyVT3226_InitTable[0][0]);
- pbyAddr2 = &(abyVT3226_ChannelTable0[0][0]);
- pbyAddr3 = &(abyVT3226_ChannelTable1[0][0]);
- break;
- case RF_VT3226D0: //RobertYu:20051114
- wLength1 = CB_VT3226_INIT_SEQ * 3;
- wLength2 = CB_MAX_CHANNEL_24G * 3;
- wLength3 = CB_MAX_CHANNEL_24G * 3;
- pbyAddr1 = &(abyVT3226D0_InitTable[0][0]);
- pbyAddr2 = &(abyVT3226_ChannelTable0[0][0]);
- pbyAddr3 = &(abyVT3226_ChannelTable1[0][0]);
- break;
- case RF_VT3342A0: //RobertYu:20060609
- wLength1 = CB_VT3342_INIT_SEQ * 3;
- wLength2 = CB_MAX_CHANNEL * 3;
- wLength3 = CB_MAX_CHANNEL * 3;
- pbyAddr1 = &(abyVT3342A0_InitTable[0][0]);
- pbyAddr2 = &(abyVT3342_ChannelTable0[0][0]);
- pbyAddr3 = &(abyVT3342_ChannelTable1[0][0]);
- break;
-
- }
- //Init Table
-
- memcpy(abyArray, pbyAddr1, wLength1);
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- 0,
- MESSAGE_REQUEST_RF_INIT,
- wLength1,
- abyArray
- );
- //Channel Table 0
- wValue = 0;
- while ( wLength2 > 0 ) {
-
- if ( wLength2 >= 64 ) {
- wLength = 64;
- } else {
- wLength = wLength2;
- }
- memcpy(abyArray, pbyAddr2, wLength);
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- wValue,
- MESSAGE_REQUEST_RF_CH0,
- wLength,
- abyArray);
-
- wLength2 -= wLength;
- wValue += wLength;
- pbyAddr2 += wLength;
- }
- //Channel table 1
- wValue = 0;
- while ( wLength3 > 0 ) {
-
- if ( wLength3 >= 64 ) {
- wLength = 64;
- } else {
- wLength = wLength3;
- }
- memcpy(abyArray, pbyAddr3, wLength);
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- wValue,
- MESSAGE_REQUEST_RF_CH1,
- wLength,
- abyArray);
-
- wLength3 -= wLength;
- wValue += wLength;
- pbyAddr3 += wLength;
- }
-
- //7230 needs 2 InitTable and 3 Channel Table
- if ( pDevice->byRFType == RF_AIROHA7230 ) {
- wLength1 = CB_AL7230_INIT_SEQ * 3;
- wLength2 = CB_MAX_CHANNEL * 3;
- pbyAddr1 = &(abyAL7230InitTableAMode[0][0]);
- pbyAddr2 = &(abyAL7230ChannelTable2[0][0]);
- memcpy(abyArray, pbyAddr1, wLength1);
- //Init Table 2
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- 0,
- MESSAGE_REQUEST_RF_INIT2,
- wLength1,
- abyArray);
-
- //Channel Table 0
- wValue = 0;
- while ( wLength2 > 0 ) {
-
- if ( wLength2 >= 64 ) {
- wLength = 64;
- } else {
- wLength = wLength2;
- }
- memcpy(abyArray, pbyAddr2, wLength);
- CONTROLnsRequestOut(pDevice,
- MESSAGE_TYPE_WRITE,
- wValue,
- MESSAGE_REQUEST_RF_CH2,
- wLength,
- abyArray);
-
- wLength2 -= wLength;
- wValue += wLength;
- pbyAddr2 += wLength;
- }
- }
-
+ u16 length1 = 0, length2 = 0, length3 = 0;
+ u8 *addr1 = NULL, *addr2 = NULL, *addr3 = NULL;
+ u16 length, value;
+ u8 array[256];
+
+ switch (priv->byRFType) {
+ case RF_AL2230:
+ case RF_AL2230S:
+ length1 = CB_AL2230_INIT_SEQ * 3;
+ length2 = CB_MAX_CHANNEL_24G * 3;
+ length3 = CB_MAX_CHANNEL_24G * 3;
+ addr1 = &al2230_init_table[0][0];
+ addr2 = &al2230_channel_table0[0][0];
+ addr3 = &al2230_channel_table1[0][0];
+ break;
+ case RF_AIROHA7230:
+ length1 = CB_AL7230_INIT_SEQ * 3;
+ length2 = CB_MAX_CHANNEL * 3;
+ length3 = CB_MAX_CHANNEL * 3;
+ addr1 = &al7230_init_table[0][0];
+ addr2 = &al7230_channel_table0[0][0];
+ addr3 = &al7230_channel_table1[0][0];
+ break;
+ case RF_VT3226:
+ length1 = CB_VT3226_INIT_SEQ * 3;
+ length2 = CB_MAX_CHANNEL_24G * 3;
+ length3 = CB_MAX_CHANNEL_24G * 3;
+ addr1 = &at3226_init_table[0][0];
+ addr2 = &vt3226_channel_table0[0][0];
+ addr3 = &vt3226_channel_table1[0][0];
+ break;
+ case RF_VT3226D0:
+ length1 = CB_VT3226_INIT_SEQ * 3;
+ length2 = CB_MAX_CHANNEL_24G * 3;
+ length3 = CB_MAX_CHANNEL_24G * 3;
+ addr1 = &at3226d0_init_table[0][0];
+ addr2 = &vt3226_channel_table0[0][0];
+ addr3 = &vt3226_channel_table1[0][0];
+ break;
+ case RF_VT3342A0:
+ length1 = CB_VT3342_INIT_SEQ * 3;
+ length2 = CB_MAX_CHANNEL * 3;
+ length3 = CB_MAX_CHANNEL * 3;
+ addr1 = &vt3342a0_init_table[0][0];
+ addr2 = &vt3342_channel_table0[0][0];
+ addr3 = &vt3342_channel_table1[0][0];
+ break;
+ }
+
+ /* Init Table */
+ memcpy(array, addr1, length1);
+
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE, 0,
+ MESSAGE_REQUEST_RF_INIT, length1, array);
+
+ /* Channel Table 0 */
+ value = 0;
+ while (length2 > 0) {
+ if (length2 >= 64)
+ length = 64;
+ else
+ length = length2;
+
+ memcpy(array, addr2, length);
+
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE,
+ value, MESSAGE_REQUEST_RF_CH0, length, array);
+
+ length2 -= length;
+ value += length;
+ addr2 += length;
+ }
+
+ /* Channel table 1 */
+ value = 0;
+ while (length3 > 0) {
+ if (length3 >= 64)
+ length = 64;
+ else
+ length = length3;
+
+ memcpy(array, addr3, length);
+
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE,
+ value, MESSAGE_REQUEST_RF_CH1, length, array);
+
+ length3 -= length;
+ value += length;
+ addr3 += length;
+ }
+
+ if (priv->byRFType == RF_AIROHA7230) {
+ length1 = CB_AL7230_INIT_SEQ * 3;
+ length2 = CB_MAX_CHANNEL * 3;
+ addr1 = &(al7230_init_table_amode[0][0]);
+ addr2 = &(al7230_channel_table2[0][0]);
+
+ memcpy(array, addr1, length1);
+
+ /* Init Table 2 */
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE,
+ 0, MESSAGE_REQUEST_RF_INIT2, length1, array);
+
+ /* Channel Table 0 */
+ value = 0;
+ while (length2 > 0) {
+ if (length2 >= 64)
+ length = 64;
+ else
+ length = length2;
+
+ memcpy(array, addr2, length);
+
+ CONTROLnsRequestOut(priv, MESSAGE_TYPE_WRITE,
+ value, MESSAGE_REQUEST_RF_CH2, length, array);
+
+ length2 -= length;
+ value += length;
+ addr2 += length;
+ }
+ }
}
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 9bf2f8d..14f3e85 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -52,7 +52,6 @@
#include "card.h"
#include "bssdb.h"
#include "mac.h"
-#include "baseband.h"
#include "michael.h"
#include "tkip.h"
#include "tcrc.h"
@@ -101,13 +100,12 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice);
static void s_vGenerateTxParameter(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, void *pTxBufHead, void *pvRrvTime,
- void *pvRTS, void *pvCTS, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
- struct ethhdr *psEthHeader);
+ void *rts_cts, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
+ struct ethhdr *psEthHeader, bool need_rts);
static u32 s_uFillDataHead(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, void *pTxDataHead, u32 cbFrameLength,
- u32 uDMAIdx, int bNeedAck, u32 uFragIdx, u32 cbLastFragmentSize,
- u32 uMACfragNum, u8 byFBOption);
+ u32 uDMAIdx, int bNeedAck, u8 byFBOption);
static void s_vGenerateMACHeader(struct vnt_private *pDevice,
u8 *pbyBufferAddr, u16 wDuration, struct ethhdr *psEthHeader,
@@ -115,7 +113,7 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
static void s_vFillTxKey(struct vnt_private *pDevice, u8 *pbyBuf,
u8 *pbyIVHead, PSKeyItem pTransmitKey, u8 *pbyHdrBuf, u16 wPayloadLen,
- u8 *pMICHDR);
+ struct vnt_mic_hdr *mic_hdr);
static void s_vSWencryption(struct vnt_private *pDevice,
PSKeyItem pTransmitKey, u8 *pbyPayloadHead, u16 wPayloadSize);
@@ -123,38 +121,39 @@ static void s_vSWencryption(struct vnt_private *pDevice,
static unsigned int s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
u32 cbFrameLength, u16 wRate, int bNeedAck);
-static u32 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice, u8 byRTSRsvType,
+static u16 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice, u8 byRTSRsvType,
u8 byPktType, u32 cbFrameLength, u16 wCurrentRate);
static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
- u8 byPktType, void *pvCTS, u32 cbFrameLength, int bNeedAck,
- int bDisCRC, u16 wCurrentRate, u8 byFBOption);
+ u8 byPktType, union vnt_tx_data_head *head, u32 cbFrameLength,
+ int bNeedAck, u16 wCurrentRate, u8 byFBOption);
static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
- void *pvRTS, u32 cbFrameLength, int bNeedAck, int bDisCRC,
+ union vnt_tx_data_head *head, u32 cbFrameLength, int bNeedAck,
struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption);
-static u32 s_uGetDataDuration(struct vnt_private *pDevice, u8 byDurType,
- u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
- u32 uFragIdx, u32 cbLastFragmentSize, u32 uMACfragNum,
- u8 byFBOption);
+static u16 s_uGetDataDuration(struct vnt_private *pDevice,
+ u8 byPktType, int bNeedAck);
-static unsigned int s_uGetRTSCTSDuration(struct vnt_private *pDevice,
+static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice,
u8 byDurType, u32 cbFrameLength, u8 byPktType, u16 wRate,
int bNeedAck, u8 byFBOption);
static void *s_vGetFreeContext(struct vnt_private *pDevice)
{
- PUSB_SEND_CONTEXT pContext = NULL;
- PUSB_SEND_CONTEXT pReturnContext = NULL;
+ struct vnt_usb_send_context *pContext = NULL;
+ struct vnt_usb_send_context *pReturnContext = NULL;
int ii;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
for (ii = 0; ii < pDevice->cbTD; ii++) {
+ if (!pDevice->apTD[ii])
+ return NULL;
pContext = pDevice->apTD[ii];
if (pContext->bBoolInUse == false) {
pContext->bBoolInUse = true;
+ memset(pContext->Data, 0, MAX_TOTAL_SIZE_WITH_ALL_HEADERS);
pReturnContext = pContext;
break;
}
@@ -186,109 +185,117 @@ static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
static void s_vFillTxKey(struct vnt_private *pDevice, u8 *pbyBuf,
u8 *pbyIVHead, PSKeyItem pTransmitKey, u8 *pbyHdrBuf,
- u16 wPayloadLen, u8 *pMICHDR)
+ u16 wPayloadLen, struct vnt_mic_hdr *mic_hdr)
{
u32 *pdwIV = (u32 *)pbyIVHead;
u32 *pdwExtIV = (u32 *)((u8 *)pbyIVHead + 4);
- u16 wValue;
struct ieee80211_hdr *pMACHeader = (struct ieee80211_hdr *)pbyHdrBuf;
u32 dwRevIVCounter;
- //Fill TXKEY
- if (pTransmitKey == NULL)
- return;
+ /* Fill TXKEY */
+ if (pTransmitKey == NULL)
+ return;
- dwRevIVCounter = cpu_to_le32(pDevice->dwIVCounter);
- *pdwIV = pDevice->dwIVCounter;
- pDevice->byKeyIndex = pTransmitKey->dwKeyIndex & 0xf;
+ dwRevIVCounter = cpu_to_le32(pDevice->dwIVCounter);
+ *pdwIV = pDevice->dwIVCounter;
+ pDevice->byKeyIndex = pTransmitKey->dwKeyIndex & 0xf;
- if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN ){
- memcpy(pDevice->abyPRNG, (u8 *)&(dwRevIVCounter), 3);
- memcpy(pDevice->abyPRNG+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- } else {
- memcpy(pbyBuf, (u8 *)&(dwRevIVCounter), 3);
- memcpy(pbyBuf+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- if(pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) {
- memcpy(pbyBuf+8, (u8 *)&(dwRevIVCounter), 3);
- memcpy(pbyBuf+11, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- }
- memcpy(pDevice->abyPRNG, pbyBuf, 16);
- }
- // Append IV after Mac Header
- *pdwIV &= WEP_IV_MASK;//00000000 11111111 11111111 11111111
- *pdwIV |= (u32)pDevice->byKeyIndex << 30;
- *pdwIV = cpu_to_le32(*pdwIV);
- pDevice->dwIVCounter++;
- if (pDevice->dwIVCounter > WEP_IV_MASK) {
- pDevice->dwIVCounter = 0;
- }
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- pTransmitKey->wTSC15_0++;
- if (pTransmitKey->wTSC15_0 == 0) {
- pTransmitKey->dwTSC47_16++;
- }
- TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
- pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
- memcpy(pbyBuf, pDevice->abyPRNG, 16);
- // Make IV
- memcpy(pdwIV, pDevice->abyPRNG, 3);
-
- *(pbyIVHead+3) = (u8)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
- // Append IV&ExtIV after Mac Header
- *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n",
- *pdwExtIV);
-
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
- pTransmitKey->wTSC15_0++;
- if (pTransmitKey->wTSC15_0 == 0) {
- pTransmitKey->dwTSC47_16++;
- }
- memcpy(pbyBuf, pTransmitKey->abyKey, 16);
-
- // Make IV
- *pdwIV = 0;
- *(pbyIVHead+3) = (u8)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
- *pdwIV |= cpu_to_le16((u16)(pTransmitKey->wTSC15_0));
- //Append IV&ExtIV after Mac Header
- *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
-
- //Fill MICHDR0
- *pMICHDR = 0x59;
- *((u8 *)(pMICHDR+1)) = 0; // TxPriority
- memcpy(pMICHDR+2, &(pMACHeader->addr2[0]), 6);
- *((u8 *)(pMICHDR+8)) = HIBYTE(HIWORD(pTransmitKey->dwTSC47_16));
- *((u8 *)(pMICHDR+9)) = LOBYTE(HIWORD(pTransmitKey->dwTSC47_16));
- *((u8 *)(pMICHDR+10)) = HIBYTE(LOWORD(pTransmitKey->dwTSC47_16));
- *((u8 *)(pMICHDR+11)) = LOBYTE(LOWORD(pTransmitKey->dwTSC47_16));
- *((u8 *)(pMICHDR+12)) = HIBYTE(pTransmitKey->wTSC15_0);
- *((u8 *)(pMICHDR+13)) = LOBYTE(pTransmitKey->wTSC15_0);
- *((u8 *)(pMICHDR+14)) = HIBYTE(wPayloadLen);
- *((u8 *)(pMICHDR+15)) = LOBYTE(wPayloadLen);
-
- //Fill MICHDR1
- *((u8 *)(pMICHDR+16)) = 0; // HLEN[15:8]
- if (pDevice->bLongHeader) {
- *((u8 *)(pMICHDR+17)) = 28; // HLEN[7:0]
- } else {
- *((u8 *)(pMICHDR+17)) = 22; // HLEN[7:0]
- }
- wValue = cpu_to_le16(pMACHeader->frame_control & 0xC78F);
- memcpy(pMICHDR+18, (u8 *)&wValue, 2); // MSKFRACTL
- memcpy(pMICHDR+20, &(pMACHeader->addr1[0]), 6);
- memcpy(pMICHDR+26, &(pMACHeader->addr2[0]), 6);
-
- //Fill MICHDR2
- memcpy(pMICHDR+32, &(pMACHeader->addr3[0]), 6);
- wValue = pMACHeader->seq_ctrl;
- wValue &= 0x000F;
- wValue = cpu_to_le16(wValue);
- memcpy(pMICHDR+38, (u8 *)&wValue, 2); // MSKSEQCTL
- if (pDevice->bLongHeader) {
- memcpy(pMICHDR+40, &(pMACHeader->addr4[0]), 6);
- }
- }
+ switch (pTransmitKey->byCipherSuite) {
+ case KEY_CTL_WEP:
+ if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN) {
+ memcpy(pDevice->abyPRNG, (u8 *)&dwRevIVCounter, 3);
+ memcpy(pDevice->abyPRNG + 3, pTransmitKey->abyKey,
+ pTransmitKey->uKeyLength);
+ } else {
+ memcpy(pbyBuf, (u8 *)&dwRevIVCounter, 3);
+ memcpy(pbyBuf + 3, pTransmitKey->abyKey,
+ pTransmitKey->uKeyLength);
+ if (pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) {
+ memcpy(pbyBuf+8, (u8 *)&dwRevIVCounter, 3);
+ memcpy(pbyBuf+11, pTransmitKey->abyKey,
+ pTransmitKey->uKeyLength);
+ }
+
+ memcpy(pDevice->abyPRNG, pbyBuf, 16);
+ }
+ /* Append IV after Mac Header */
+ *pdwIV &= WEP_IV_MASK;
+ *pdwIV |= (u32)pDevice->byKeyIndex << 30;
+ *pdwIV = cpu_to_le32(*pdwIV);
+
+ pDevice->dwIVCounter++;
+ if (pDevice->dwIVCounter > WEP_IV_MASK)
+ pDevice->dwIVCounter = 0;
+
+ break;
+ case KEY_CTL_TKIP:
+ pTransmitKey->wTSC15_0++;
+ if (pTransmitKey->wTSC15_0 == 0)
+ pTransmitKey->dwTSC47_16++;
+
+ TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
+ pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16,
+ pDevice->abyPRNG);
+ memcpy(pbyBuf, pDevice->abyPRNG, 16);
+
+ /* Make IV */
+ memcpy(pdwIV, pDevice->abyPRNG, 3);
+
+ *(pbyIVHead+3) = (u8)(((pDevice->byKeyIndex << 6) &
+ 0xc0) | 0x20);
+ /* Append IV&ExtIV after Mac Header */
+ *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "vFillTxKey()---- pdwExtIV: %x\n", *pdwExtIV);
+
+ break;
+ case KEY_CTL_CCMP:
+ pTransmitKey->wTSC15_0++;
+ if (pTransmitKey->wTSC15_0 == 0)
+ pTransmitKey->dwTSC47_16++;
+
+ memcpy(pbyBuf, pTransmitKey->abyKey, 16);
+
+ /* Make IV */
+ *pdwIV = 0;
+ *(pbyIVHead+3) = (u8)(((pDevice->byKeyIndex << 6) &
+ 0xc0) | 0x20);
+
+ *pdwIV |= cpu_to_le16((u16)(pTransmitKey->wTSC15_0));
+
+ /* Append IV&ExtIV after Mac Header */
+ *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
+
+ if (!mic_hdr)
+ return;
+
+ /* MICHDR0 */
+ mic_hdr->id = 0x59;
+ mic_hdr->payload_len = cpu_to_be16(wPayloadLen);
+ memcpy(mic_hdr->mic_addr2, pMACHeader->addr2, ETH_ALEN);
+
+ mic_hdr->tsc_47_16 = cpu_to_be32(pTransmitKey->dwTSC47_16);
+ mic_hdr->tsc_15_0 = cpu_to_be16(pTransmitKey->wTSC15_0);
+
+ /* MICHDR1 */
+ if (pDevice->bLongHeader)
+ mic_hdr->hlen = cpu_to_be16(28);
+ else
+ mic_hdr->hlen = cpu_to_be16(22);
+
+ memcpy(mic_hdr->addr1, pMACHeader->addr1, ETH_ALEN);
+ memcpy(mic_hdr->addr2, pMACHeader->addr2, ETH_ALEN);
+
+ /* MICHDR2 */
+ memcpy(mic_hdr->addr3, pMACHeader->addr3, ETH_ALEN);
+ mic_hdr->frame_control = cpu_to_le16(pMACHeader->frame_control
+ & 0xc78f);
+ mic_hdr->seq_ctrl = cpu_to_le16(pMACHeader->seq_ctrl & 0xf);
+
+ if (pDevice->bLongHeader)
+ memcpy(mic_hdr->addr4, pMACHeader->addr4, ETH_ALEN);
+ }
}
static void s_vSWencryption(struct vnt_private *pDevice,
@@ -326,6 +333,12 @@ static void s_vSWencryption(struct vnt_private *pDevice,
}
}
+static u16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
+{
+ return cpu_to_le16(wTimeStampOff[priv->byPreambleType % 2]
+ [rate % MAX_RATE]);
+}
+
/*byPktType : PK_TYPE_11A 0
PK_TYPE_11B 1
PK_TYPE_11GB 2
@@ -351,8 +364,15 @@ static u32 s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
}
}
+static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
+ u32 frame_length, u16 rate, int need_ack)
+{
+ return cpu_to_le16((u16)s_uGetTxRsvTime(priv, pkt_type,
+ frame_length, rate, need_ack));
+}
+
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u32 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice,
+static u16 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice,
u8 byRTSRsvType, u8 byPktType, u32 cbFrameLength, u16 wCurrentRate)
{
u32 uRrvTime, uRTSTime, uCTSTime, uAckTime, uDataTime;
@@ -382,168 +402,30 @@ static u32 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice,
//RTSRrvTime
uRrvTime = uRTSTime + uCTSTime + uAckTime + uDataTime + 3*pDevice->uSIFS;
- return uRrvTime;
+ return cpu_to_le16((u16)uRrvTime);
}
//byFreqType 0: 5GHz, 1:2.4Ghz
-static u32 s_uGetDataDuration(struct vnt_private *pDevice, u8 byDurType,
- u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
- u32 uFragIdx, u32 cbLastFragmentSize, u32 uMACfragNum,
- u8 byFBOption)
+static u16 s_uGetDataDuration(struct vnt_private *pDevice,
+ u8 byPktType, int bNeedAck)
{
- int bLastFrag = 0;
- u32 uAckTime = 0, uNextPktTime = 0;
-
- if (uFragIdx == (uMACfragNum-1)) {
- bLastFrag = 1;
- }
-
- switch (byDurType) {
-
- case DATADUR_B: //DATADUR_B
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- return (pDevice->uSIFS + uAckTime);
- } else {
- return 0;
- }
- }
- else {//First Frag or Mid Frag
- if (uFragIdx == (uMACfragNum-2)) {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
- }
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- return (pDevice->uSIFS + uAckTime + uNextPktTime);
- } else {
- return (pDevice->uSIFS + uNextPktTime);
- }
- }
- break;
-
- case DATADUR_A: //DATADUR_A
- if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime);
- } else {
- return 0;
- }
- }
- else {//First Frag or Mid Frag
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
- }
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime + uNextPktTime);
- } else {
- return (pDevice->uSIFS + uNextPktTime);
- }
- }
- break;
-
- case DATADUR_A_F0: //DATADUR_A_F0
- if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime);
- } else {
- return 0;
- }
- }
- else { //First Frag or Mid Frag
- if (byFBOption == AUTO_FB_0) {
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
- }
- } else { // (byFBOption == AUTO_FB_1)
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- }
- }
-
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime + uNextPktTime);
- } else {
- return (pDevice->uSIFS + uNextPktTime);
- }
- }
- break;
-
- case DATADUR_A_F1: //DATADUR_A_F1
- if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime);
- } else {
- return 0;
- }
- }
- else { //First Frag or Mid Frag
- if (byFBOption == AUTO_FB_0) {
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
- }
-
- } else { // (byFBOption == AUTO_FB_1)
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- }
- }
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime + uNextPktTime);
- } else {
- return (pDevice->uSIFS + uNextPktTime);
- }
- }
- break;
-
- default:
- break;
- }
+ u32 uAckTime = 0;
+
+ if (bNeedAck) {
+ if (byPktType == PK_TYPE_11B)
+ uAckTime = BBuGetFrameTime(pDevice->byPreambleType,
+ byPktType, 14, pDevice->byTopCCKBasicRate);
+ else
+ uAckTime = BBuGetFrameTime(pDevice->byPreambleType,
+ byPktType, 14, pDevice->byTopOFDMBasicRate);
+ return cpu_to_le16((u16)(pDevice->uSIFS + uAckTime));
+ }
- ASSERT(false);
return 0;
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u32 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
+static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
u8 byFBOption)
{
@@ -626,14 +508,12 @@ static u32 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
break;
}
- return uDurTime;
-
+ return cpu_to_le16((u16)uDurTime);
}
static u32 s_uFillDataHead(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, void *pTxDataHead, u32 cbFrameLength,
- u32 uDMAIdx, int bNeedAck, u32 uFragIdx, u32 cbLastFragmentSize,
- u32 uMACfragNum, u8 byFBOption)
+ u32 uDMAIdx, int bNeedAck, u8 byFBOption)
{
if (pTxDataHead == NULL) {
@@ -641,409 +521,301 @@ static u32 s_uFillDataHead(struct vnt_private *pDevice,
}
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- if ((uDMAIdx == TYPE_ATIMDMA) || (uDMAIdx == TYPE_BEACONDMA)) {
- PSTxDataHead_ab pBuf = (PSTxDataHead_ab) pTxDataHead;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
- //Get Duration and TimeStampOff
- pBuf->wDuration = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption); //1: 2.4GHz
- if(uDMAIdx!=TYPE_ATIMDMA) {
- pBuf->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- }
- return (pBuf->wDuration);
- }
- else { // DATA & MANAGE Frame
if (byFBOption == AUTO_FB_NONE) {
- PSTxDataHead_g pBuf = (PSTxDataHead_g)pTxDataHead;
+ struct vnt_tx_datahead_g *pBuf =
+ (struct vnt_tx_datahead_g *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength_a), (u8 *)&(pBuf->byServiceField_a), (u8 *)&(pBuf->bySignalField_a)
- );
- BBvCalculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(pBuf->wTransmitLength_b), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->a);
+ BBvCalculateParameter(pDevice, cbFrameLength,
+ pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
//Get Duration and TimeStamp
- pBuf->wDuration_a = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
- byPktType, wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption); //1: 2.4GHz
- pBuf->wDuration_b = (u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
- PK_TYPE_11B, pDevice->byTopCCKBasicRate,
- bNeedAck, uFragIdx, cbLastFragmentSize,
- uMACfragNum, byFBOption); //1: 2.4GHz
-
- pBuf->wTimeStampOff_a = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- pBuf->wTimeStampOff_b = wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE];
+ pBuf->wDuration_a = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_b = s_uGetDataDuration(pDevice,
+ PK_TYPE_11B, bNeedAck);
+
+ pBuf->wTimeStampOff_a = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
+ pBuf->wTimeStampOff_b = vnt_time_stamp_off(pDevice,
+ pDevice->byTopCCKBasicRate);
return (pBuf->wDuration_a);
} else {
// Auto Fallback
- PSTxDataHead_g_FB pBuf = (PSTxDataHead_g_FB)pTxDataHead;
+ struct vnt_tx_datahead_g_fb *pBuf =
+ (struct vnt_tx_datahead_g_fb *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength_a), (u8 *)&(pBuf->byServiceField_a), (u8 *)&(pBuf->bySignalField_a)
- );
- BBvCalculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(pBuf->wTransmitLength_b), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->a);
+ BBvCalculateParameter(pDevice, cbFrameLength,
+ pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
//Get Duration and TimeStamp
- pBuf->wDuration_a = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //1: 2.4GHz
- pBuf->wDuration_b = (u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B,
- pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //1: 2.4GHz
- pBuf->wDuration_a_f0 = (u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //1: 2.4GHz
- pBuf->wDuration_a_f1 = (u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //1: 2.4GHz
- pBuf->wTimeStampOff_a = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- pBuf->wTimeStampOff_b = wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE];
+ pBuf->wDuration_a = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_b = s_uGetDataDuration(pDevice,
+ PK_TYPE_11B, bNeedAck);
+ pBuf->wDuration_a_f0 = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_a_f1 = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wTimeStampOff_a = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
+ pBuf->wTimeStampOff_b = vnt_time_stamp_off(pDevice,
+ pDevice->byTopCCKBasicRate);
return (pBuf->wDuration_a);
} //if (byFBOption == AUTO_FB_NONE)
- }
}
else if (byPktType == PK_TYPE_11A) {
- if ((byFBOption != AUTO_FB_NONE) && (uDMAIdx != TYPE_ATIMDMA) && (uDMAIdx != TYPE_BEACONDMA)) {
- // Auto Fallback
- PSTxDataHead_a_FB pBuf = (PSTxDataHead_a_FB)pTxDataHead;
+ if (byFBOption != AUTO_FB_NONE) {
+ struct vnt_tx_datahead_a_fb *pBuf =
+ (struct vnt_tx_datahead_a_fb *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->a);
//Get Duration and TimeStampOff
- pBuf->wDuration = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //0: 5GHz
- pBuf->wDuration_f0 = (u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //0: 5GHz
- pBuf->wDuration_f1 = (u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //0: 5GHz
- if(uDMAIdx!=TYPE_ATIMDMA) {
- pBuf->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- }
+ pBuf->wDuration = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_f0 = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_f1 = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wTimeStampOff = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
return (pBuf->wDuration);
} else {
- PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
+ struct vnt_tx_datahead_ab *pBuf =
+ (struct vnt_tx_datahead_ab *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->ab);
//Get Duration and TimeStampOff
- pBuf->wDuration = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption);
-
- if(uDMAIdx!=TYPE_ATIMDMA) {
- pBuf->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- }
+ pBuf->wDuration = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wTimeStampOff = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
return (pBuf->wDuration);
}
}
else if (byPktType == PK_TYPE_11B) {
- PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
+ struct vnt_tx_datahead_ab *pBuf =
+ (struct vnt_tx_datahead_ab *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->ab);
//Get Duration and TimeStampOff
- pBuf->wDuration = (u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption);
- if (uDMAIdx != TYPE_ATIMDMA) {
- pBuf->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- }
+ pBuf->wDuration = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wTimeStampOff = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
return (pBuf->wDuration);
}
return 0;
}
-static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
- void *pvRTS, u32 cbFrameLength, int bNeedAck, int bDisCRC,
- struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption)
+static int vnt_fill_ieee80211_rts(struct vnt_private *priv,
+ struct ieee80211_rts *rts, struct ethhdr *eth_hdr,
+ u16 duration)
{
- u32 uRTSFrameLen = 20;
- u16 wLen = 0;
+ rts->duration = duration;
+ rts->frame_control = TYPE_CTL_RTS;
- if (pvRTS == NULL)
- return;
+ if (priv->eOPMode == OP_MODE_ADHOC || priv->eOPMode == OP_MODE_AP)
+ memcpy(rts->ra, eth_hdr->h_dest, ETH_ALEN);
+ else
+ memcpy(rts->ra, priv->abyBSSID, ETH_ALEN);
- if (bDisCRC) {
- // When CRCDIS bit is on, H/W forgot to generate FCS for RTS frame,
- // in this case we need to decrease its length by 4.
- uRTSFrameLen -= 4;
- }
+ if (priv->eOPMode == OP_MODE_AP)
+ memcpy(rts->ta, priv->abyBSSID, ETH_ALEN);
+ else
+ memcpy(rts->ta, eth_hdr->h_source, ETH_ALEN);
- // Note: So far RTSHead doesn't appear in ATIM & Beacom DMA, so we don't need to take them into account.
- // Otherwise, we need to modified codes for them.
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- if (byFBOption == AUTO_FB_NONE) {
- PSRTS_g pBuf = (PSRTS_g)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
- pBuf->wTransmitLength_b = cpu_to_le16(wLen);
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_a), (u8 *)&(pBuf->bySignalField_a)
- );
- pBuf->wTransmitLength_a = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration_bb = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
- pBuf->wDuration_aa = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3: 2.4G OFDMData
- pBuf->wDuration_ba = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
-
- pBuf->Data.wDurationID = pBuf->wDuration_aa;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
+ return 0;
+}
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
- }
- else {
- PSRTS_g_FB pBuf = (PSRTS_g_FB)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
- pBuf->wTransmitLength_b = cpu_to_le16(wLen);
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_a), (u8 *)&(pBuf->bySignalField_a)
- );
- pBuf->wTransmitLength_a = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration_bb = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
- pBuf->wDuration_aa = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3:2.4G OFDMData
- pBuf->wDuration_ba = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDMData
- pBuf->wRTSDuration_ba_f0 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //4:wRTSDuration_ba_f0, 1:2.4G, 1:CCKData
- pBuf->wRTSDuration_aa_f0 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:wRTSDuration_aa_f0, 1:2.4G, 1:CCKData
- pBuf->wRTSDuration_ba_f1 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //6:wRTSDuration_ba_f1, 1:2.4G, 1:CCKData
- pBuf->wRTSDuration_aa_f1 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:wRTSDuration_aa_f1, 1:2.4G, 1:CCKData
- pBuf->Data.wDurationID = pBuf->wDuration_aa;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
+static int vnt_rxtx_rts_g_head(struct vnt_private *priv,
+ struct vnt_rts_g *buf, struct ethhdr *eth_hdr,
+ u8 pkt_type, u32 frame_len, int need_ack,
+ u16 current_rate, u8 fb_option)
+{
+ u16 rts_frame_len = 20;
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
+ BBvCalculateParameter(priv, rts_frame_len, priv->byTopCCKBasicRate,
+ PK_TYPE_11B, &buf->b);
+ BBvCalculateParameter(priv, rts_frame_len,
+ priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
+ buf->wDuration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
+ PK_TYPE_11B, priv->byTopCCKBasicRate, need_ack, fb_option);
+ buf->wDuration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
+ buf->wDuration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
- } // if (byFBOption == AUTO_FB_NONE)
- }
- else if (byPktType == PK_TYPE_11A) {
- if (byFBOption == AUTO_FB_NONE) {
- PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
- pBuf->wTransmitLength = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
- pBuf->Data.wDurationID = pBuf->wDuration;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
+ return 0;
+}
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
+static int vnt_rxtx_rts_g_fb_head(struct vnt_private *priv,
+ struct vnt_rts_g_fb *buf, struct ethhdr *eth_hdr,
+ u8 pkt_type, u32 frame_len, int need_ack,
+ u16 current_rate, u8 fb_option)
+{
+ u16 rts_frame_len = 20;
- }
- else {
- PSRTS_a_FB pBuf = (PSRTS_a_FB)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
- pBuf->wTransmitLength = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
- pBuf->wRTSDuration_f0 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:RTSDuration_aa_f0, 0:5G, 0: 5G OFDMData
- pBuf->wRTSDuration_f1 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:RTSDuration_aa_f1, 0:5G, 0:
- pBuf->Data.wDurationID = pBuf->wDuration;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
+ BBvCalculateParameter(priv, rts_frame_len, priv->byTopCCKBasicRate,
+ PK_TYPE_11B, &buf->b);
+ BBvCalculateParameter(priv, rts_frame_len,
+ priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
- }
- }
- else if (byPktType == PK_TYPE_11B) {
- PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
- pBuf->wTransmitLength = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
- pBuf->Data.wDurationID = pBuf->wDuration;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
+ buf->wDuration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
+ PK_TYPE_11B, priv->byTopCCKBasicRate, need_ack, fb_option);
+ buf->wDuration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
+ buf->wDuration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
- }
+
+ buf->wRTSDuration_ba_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F0,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+ buf->wRTSDuration_aa_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+ buf->wRTSDuration_ba_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F1,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+ buf->wRTSDuration_aa_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
+
+ return 0;
}
-static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
- u8 byPktType, void *pvCTS, u32 cbFrameLength, int bNeedAck,
- int bDisCRC, u16 wCurrentRate, u8 byFBOption)
+static int vnt_rxtx_rts_ab_head(struct vnt_private *priv,
+ struct vnt_rts_ab *buf, struct ethhdr *eth_hdr,
+ u8 pkt_type, u32 frame_len, int need_ack,
+ u16 current_rate, u8 fb_option)
{
- u32 uCTSFrameLen = 14;
- u16 wLen = 0;
+ u16 rts_frame_len = 20;
- if (pvCTS == NULL) {
- return;
- }
+ BBvCalculateParameter(priv, rts_frame_len,
+ priv->byTopOFDMBasicRate, pkt_type, &buf->ab);
- if (bDisCRC) {
- // When CRCDIS bit is on, H/W forgot to generate FCS for CTS frame,
- // in this case we need to decrease its length by 4.
- uCTSFrameLen -= 4;
- }
+ buf->wDuration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA) {
- // Auto Fall back
- PSCTS_FB pBuf = (PSCTS_FB)pvCTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
- pBuf->wTransmitLength_b = cpu_to_le16(wLen);
- pBuf->wDuration_ba = (u16)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
- pBuf->wDuration_ba += pDevice->wCTSDuration;
- pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
- //Get CTSDuration_ba_f0
- pBuf->wCTSDuration_ba_f0 = (u16)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //8:CTSDuration_ba_f0, 1:2.4G, 2,3:2.4G OFDM Data
- pBuf->wCTSDuration_ba_f0 += pDevice->wCTSDuration;
- pBuf->wCTSDuration_ba_f0 = cpu_to_le16(pBuf->wCTSDuration_ba_f0);
- //Get CTSDuration_ba_f1
- pBuf->wCTSDuration_ba_f1 = (u16)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //9:CTSDuration_ba_f1, 1:2.4G, 2,3:2.4G OFDM Data
- pBuf->wCTSDuration_ba_f1 += pDevice->wCTSDuration;
- pBuf->wCTSDuration_ba_f1 = cpu_to_le16(pBuf->wCTSDuration_ba_f1);
- //Get CTS Frame body
- pBuf->Data.wDurationID = pBuf->wDuration_ba;
- pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
- pBuf->Data.wReserved = 0x0000;
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyCurrentNetAddr[0]),
- ETH_ALEN);
- } else { //if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA)
- PSCTS pBuf = (PSCTS)pvCTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
- pBuf->wTransmitLength_b = cpu_to_le16(wLen);
- //Get CTSDuration_ba
- pBuf->wDuration_ba = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
- pBuf->wDuration_ba += pDevice->wCTSDuration;
- pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
-
- //Get CTS Frame body
- pBuf->Data.wDurationID = pBuf->wDuration_ba;
- pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
- pBuf->Data.wReserved = 0x0000;
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyCurrentNetAddr[0]),
- ETH_ALEN);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
+
+ return 0;
+}
+
+static int vnt_rxtx_rts_a_fb_head(struct vnt_private *priv,
+ struct vnt_rts_a_fb *buf, struct ethhdr *eth_hdr,
+ u8 pkt_type, u32 frame_len, int need_ack,
+ u16 current_rate, u8 fb_option)
+{
+ u16 rts_frame_len = 20;
+
+ BBvCalculateParameter(priv, rts_frame_len,
+ priv->byTopOFDMBasicRate, pkt_type, &buf->a);
+
+ buf->wDuration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
+
+ buf->wRTSDuration_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+
+ buf->wRTSDuration_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
+
+ return 0;
+}
+
+static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
+ union vnt_tx_data_head *head, u32 cbFrameLength, int bNeedAck,
+ struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption)
+{
+
+ if (!head)
+ return;
+
+ /* Note: So far RTSHead doesn't appear in ATIM
+ * & Beacom DMA, so we don't need to take them
+ * into account.
+ * Otherwise, we need to modified codes for them.
+ */
+ switch (byPktType) {
+ case PK_TYPE_11GB:
+ case PK_TYPE_11GA:
+ if (byFBOption == AUTO_FB_NONE)
+ vnt_rxtx_rts_g_head(pDevice, &head->rts_g,
+ psEthHeader, byPktType, cbFrameLength,
+ bNeedAck, wCurrentRate, byFBOption);
+ else
+ vnt_rxtx_rts_g_fb_head(pDevice, &head->rts_g_fb,
+ psEthHeader, byPktType, cbFrameLength,
+ bNeedAck, wCurrentRate, byFBOption);
+ break;
+ case PK_TYPE_11A:
+ if (byFBOption) {
+ vnt_rxtx_rts_a_fb_head(pDevice, &head->rts_a_fb,
+ psEthHeader, byPktType, cbFrameLength,
+ bNeedAck, wCurrentRate, byFBOption);
+ break;
+ }
+ case PK_TYPE_11B:
+ vnt_rxtx_rts_ab_head(pDevice, &head->rts_ab,
+ psEthHeader, byPktType, cbFrameLength,
+ bNeedAck, wCurrentRate, byFBOption);
+ }
+}
+
+static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
+ u8 byPktType, union vnt_tx_data_head *head, u32 cbFrameLength,
+ int bNeedAck, u16 wCurrentRate, u8 byFBOption)
+{
+ u32 uCTSFrameLen = 14;
+
+ if (!head)
+ return;
+
+ if (byFBOption != AUTO_FB_NONE) {
+ /* Auto Fall back */
+ struct vnt_cts_fb *pBuf = &head->cts_g_fb;
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(pDevice, uCTSFrameLen,
+ pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
+ pBuf->wDuration_ba = s_uGetRTSCTSDuration(pDevice, CTSDUR_BA,
+ cbFrameLength, byPktType,
+ wCurrentRate, bNeedAck, byFBOption);
+ /* Get CTSDuration_ba_f0 */
+ pBuf->wCTSDuration_ba_f0 = s_uGetRTSCTSDuration(pDevice,
+ CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate,
+ bNeedAck, byFBOption);
+ /* Get CTSDuration_ba_f1 */
+ pBuf->wCTSDuration_ba_f1 = s_uGetRTSCTSDuration(pDevice,
+ CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate,
+ bNeedAck, byFBOption);
+ /* Get CTS Frame body */
+ pBuf->data.duration = pBuf->wDuration_ba;
+ pBuf->data.frame_control = TYPE_CTL_CTS;
+ memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
+ } else {
+ struct vnt_cts *pBuf = &head->cts_g;
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(pDevice, uCTSFrameLen,
+ pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
+ /* Get CTSDuration_ba */
+ pBuf->wDuration_ba = s_uGetRTSCTSDuration(pDevice,
+ CTSDUR_BA, cbFrameLength, byPktType,
+ wCurrentRate, bNeedAck, byFBOption);
+ /*Get CTS Frame body*/
+ pBuf->data.duration = pBuf->wDuration_ba;
+ pBuf->data.frame_control = TYPE_CTL_CTS;
+ memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
}
- }
}
/*+
@@ -1071,12 +843,12 @@ static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
static void s_vGenerateTxParameter(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, void *pTxBufHead, void *pvRrvTime,
- void *pvRTS, void *pvCTS, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
- struct ethhdr *psEthHeader)
+ void *rts_cts, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
+ struct ethhdr *psEthHeader, bool need_rts)
{
+ union vnt_tx_data_head *head = rts_cts;
u32 cbMACHdLen = WLAN_HDR_ADDR3_LEN; /* 24 */
u16 wFifoCtl;
- int bDisCRC = false;
u8 byFBOption = AUTO_FB_NONE;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter...\n");
@@ -1084,10 +856,6 @@ static void s_vGenerateTxParameter(struct vnt_private *pDevice,
pFifoHead->wReserved = wCurrentRate;
wFifoCtl = pFifoHead->wFIFOCtl;
- if (wFifoCtl & FIFOCTL_CRCDIS) {
- bDisCRC = true;
- }
-
if (wFifoCtl & FIFOCTL_AUTO_FB_0) {
byFBOption = AUTO_FB_0;
}
@@ -1095,75 +863,87 @@ static void s_vGenerateTxParameter(struct vnt_private *pDevice,
byFBOption = AUTO_FB_1;
}
+ if (!pvRrvTime)
+ return;
+
if (pDevice->bLongHeader)
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
-
- if (pvRTS != NULL) { //RTS_need
+ if (need_rts) {
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_gRTS pBuf = (PSRrvTime_gRTS)pvRrvTime;
- pBuf->wRTSTxRrvTime_aa = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 1:2.4GHz
- pBuf->wRTSTxRrvTime_ba = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 1, byPktType, cbFrameSize, wCurrentRate));//1:RTSTxRrvTime_ba, 1:2.4GHz
- pBuf->wRTSTxRrvTime_bb = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
- pBuf->wTxRrvTime_a = cpu_to_le16((u16) s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
- pBuf->wTxRrvTime_b = cpu_to_le16((u16) s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
- }
- //Fill RTS
- s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
+ struct vnt_rrv_time_rts *pBuf =
+ (struct vnt_rrv_time_rts *)pvRrvTime;
+ pBuf->wRTSTxRrvTime_aa = s_uGetRTSCTSRsvTime(pDevice, 2,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wRTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 1,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wRTSTxRrvTime_bb = s_uGetRTSCTSRsvTime(pDevice, 0,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice,
+ byPktType, cbFrameSize, wCurrentRate, bNeedACK);
+ pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate,
+ bNeedACK);
+ /* Fill RTS */
+ s_vFillRTSHead(pDevice, byPktType, head, cbFrameSize,
+ bNeedACK, psEthHeader, wCurrentRate, byFBOption);
}
else {//RTS_needless, PCF mode
-
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_gCTS pBuf = (PSRrvTime_gCTS)pvRrvTime;
- pBuf->wTxRrvTime_a = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
- pBuf->wTxRrvTime_b = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
- pBuf->wCTSTxRrvTime_ba = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 3, byPktType, cbFrameSize, wCurrentRate));//3:CTSTxRrvTime_Ba, 1:2.4GHz
- }
- //Fill CTS
- s_vFillCTSHead(pDevice, uDMAIdx, byPktType, pvCTS, cbFrameSize, bNeedACK, bDisCRC, wCurrentRate, byFBOption);
+ struct vnt_rrv_time_cts *pBuf =
+ (struct vnt_rrv_time_cts *)pvRrvTime;
+ pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice, byPktType,
+ cbFrameSize, wCurrentRate, bNeedACK);
+ pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ PK_TYPE_11B, cbFrameSize,
+ pDevice->byTopCCKBasicRate, bNeedACK);
+ pBuf->wCTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 3,
+ byPktType, cbFrameSize, wCurrentRate);
+ /* Fill CTS */
+ s_vFillCTSHead(pDevice, uDMAIdx, byPktType, head,
+ cbFrameSize, bNeedACK, wCurrentRate, byFBOption);
}
}
else if (byPktType == PK_TYPE_11A) {
-
- if (pvRTS != NULL) {//RTS_need, non PCF mode
+ if (need_rts) {
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wRTSTxRrvTime = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 0:5GHz
- pBuf->wTxRrvTime = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//0:OFDM
- }
- //Fill RTS
- s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
- }
- else if (pvRTS == NULL) {//RTS_needless, non PCF mode
+ struct vnt_rrv_time_ab *pBuf =
+ (struct vnt_rrv_time_ab *)pvRrvTime;
+ pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 2,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, byPktType,
+ cbFrameSize, wCurrentRate, bNeedACK);
+ /* Fill RTS */
+ s_vFillRTSHead(pDevice, byPktType, head, cbFrameSize,
+ bNeedACK, psEthHeader, wCurrentRate, byFBOption);
+ } else {
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wTxRrvTime = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, PK_TYPE_11A, cbFrameSize, wCurrentRate, bNeedACK)); //0:OFDM
- }
+ struct vnt_rrv_time_ab *pBuf =
+ (struct vnt_rrv_time_ab *)pvRrvTime;
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11A,
+ cbFrameSize, wCurrentRate, bNeedACK);
}
}
else if (byPktType == PK_TYPE_11B) {
-
- if ((pvRTS != NULL)) {//RTS_need, non PCF mode
+ if (need_rts) {
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wRTSTxRrvTime = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
- pBuf->wTxRrvTime = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK));//1:CCK
- }
- //Fill RTS
- s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
+ struct vnt_rrv_time_ab *pBuf =
+ (struct vnt_rrv_time_ab *)pvRrvTime;
+ pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 0,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B,
+ cbFrameSize, wCurrentRate, bNeedACK);
+ /* Fill RTS */
+ s_vFillRTSHead(pDevice, byPktType, head, cbFrameSize,
+ bNeedACK, psEthHeader, wCurrentRate, byFBOption);
}
else { //RTS_needless, non PCF mode
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wTxRrvTime = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK)); //1:CCK
- }
+ struct vnt_rrv_time_ab *pBuf =
+ (struct vnt_rrv_time_ab *)pvRrvTime;
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B,
+ cbFrameSize, wCurrentRate, bNeedACK);
}
}
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter END.\n");
@@ -1175,17 +955,18 @@ static void s_vGenerateTxParameter(struct vnt_private *pDevice,
*/
static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
- u8 *usbPacketBuf, int bNeedEncryption, u32 uSkbPacketLen, u32 uDMAIdx,
- struct ethhdr *psEthHeader, u8 *pPacket, PSKeyItem pTransmitKey,
- u32 uNodeIndex, u16 wCurrentRate, u32 *pcbHeaderLen, u32 *pcbTotalLen)
+ struct vnt_tx_buffer *pTxBufHead, int bNeedEncryption,
+ u32 uSkbPacketLen, u32 uDMAIdx, struct ethhdr *psEthHeader,
+ u8 *pPacket, PSKeyItem pTransmitKey, u32 uNodeIndex, u16 wCurrentRate,
+ u32 *pcbHeaderLen, u32 *pcbTotalLen)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u32 cbFrameSize, cbFrameBodySize;
- PTX_BUFFER pTxBufHead;
u32 cb802_1_H_len;
u32 cbIVlen = 0, cbICVlen = 0, cbMIClen = 0, cbMACHdLen = 0;
u32 cbFCSlen = 4, cbMICHDR = 0;
- int bNeedACK, bRTS;
+ int bNeedACK;
+ bool bRTS = false;
u8 *pbyType, *pbyMacHdr, *pbyIVHead, *pbyPayloadHead, *pbyTxBufferAddr;
u8 abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
u8 abySNAP_Bridgetunnel[ETH_ALEN]
@@ -1193,26 +974,22 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
u32 uDuration;
u32 cbHeaderLength = 0, uPadding = 0;
void *pvRrvTime;
- PSMICHDRHead pMICHDR;
- void *pvRTS;
- void *pvCTS;
+ struct vnt_mic_hdr *pMICHDR;
+ void *rts_cts = NULL;
void *pvTxDataHd;
u8 byFBOption = AUTO_FB_NONE, byFragType;
u16 wTxBufSize;
- u32 dwMICKey0, dwMICKey1, dwMIC_Priority, dwCRC;
+ u32 dwMICKey0, dwMICKey1, dwMIC_Priority;
u32 *pdwMIC_L, *pdwMIC_R;
int bSoftWEP = false;
- pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
+ pvRrvTime = pMICHDR = pvTxDataHd = NULL;
if (bNeedEncryption && pTransmitKey->pvKeyTable) {
if (((PSKeyTable)pTransmitKey->pvKeyTable)->bSoftWEP == true)
bSoftWEP = true; /* WEP 256 */
}
- pTxBufHead = (PTX_BUFFER) usbPacketBuf;
- memset(pTxBufHead, 0, sizeof(TX_BUFFER));
-
// Get pkt type
if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN) {
if (pDevice->dwDiagRefCount == 0) {
@@ -1257,10 +1034,6 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
if (pDevice->bLongHeader)
pTxBufHead->wFIFOCtl |= FIFOCTL_LHEAD;
- if (pDevice->bSoftwareGenCrcErr) {
- pTxBufHead->wFIFOCtl |= FIFOCTL_CRCDIS; // set tx descriptors to NO hardware CRC
- }
-
//Set FRAGCTL_MACHDCNT
if (pDevice->bLongHeader) {
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
@@ -1313,7 +1086,7 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
- cbMICHDR = sizeof(SMICHDRHead);
+ cbMICHDR = sizeof(struct vnt_mic_hdr);
}
if (bSoftWEP == false) {
//MAC Header should be padding 0 to DW alignment.
@@ -1336,76 +1109,116 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == true) {//RTS_need
- pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
- pvRTS = (PSRTS_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g));
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g) + sizeof(STxDataHead_g);
+ pvRrvTime = (struct vnt_rrv_time_rts *)
+ (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_rts));
+ rts_cts = (struct vnt_rts_g *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_rts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
+ cbMICHDR + sizeof(struct vnt_rts_g));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
+ cbMICHDR + sizeof(struct vnt_rts_g) +
+ sizeof(struct vnt_tx_datahead_g);
}
else { //RTS_needless
- pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
- pvRTS = NULL;
- pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
- pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS));
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g);
+ pvRrvTime = (struct vnt_rrv_time_cts *)
+ (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts));
+ rts_cts = (struct vnt_cts *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ cbMICHDR + sizeof(struct vnt_cts));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ cbMICHDR + sizeof(struct vnt_cts) +
+ sizeof(struct vnt_tx_datahead_g);
}
} else {
// Auto Fall Back
if (bRTS == true) {//RTS_need
- pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
- pvRTS = (PSRTS_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB));
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB) + sizeof(STxDataHead_g_FB);
+ pvRrvTime = (struct vnt_rrv_time_rts *)(pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_rts));
+ rts_cts = (struct vnt_rts_g_fb *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_rts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g_fb *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
+ cbMICHDR + sizeof(struct vnt_rts_g_fb));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
+ cbMICHDR + sizeof(struct vnt_rts_g_fb) +
+ sizeof(struct vnt_tx_datahead_g_fb);
}
else if (bRTS == false) { //RTS_needless
- pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
- pvRTS = NULL;
- pvCTS = (PSCTS_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
- pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB));
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB) + sizeof(STxDataHead_g_FB);
+ pvRrvTime = (struct vnt_rrv_time_cts *)
+ (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts));
+ rts_cts = (struct vnt_cts_fb *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g_fb *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ cbMICHDR + sizeof(struct vnt_cts_fb));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ cbMICHDR + sizeof(struct vnt_cts_fb) +
+ sizeof(struct vnt_tx_datahead_g_fb);
}
} // Auto Fall Back
}
else {//802.11a/b packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == true) {//RTS_need
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = (PSRTS_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab));
- cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab) + sizeof(STxDataHead_ab);
+ pvRrvTime = (struct vnt_rrv_time_ab *) (pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ rts_cts = (struct vnt_rts_ab *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_ab *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR +
+ sizeof(struct vnt_rts_ab));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ cbMICHDR + sizeof(struct vnt_rts_ab) +
+ sizeof(struct vnt_tx_datahead_ab);
}
else if (bRTS == false) { //RTS_needless, no MICHDR
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = NULL;
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab);
+ pvRrvTime = (struct vnt_rrv_time_ab *)(pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ pvTxDataHd = (struct vnt_tx_datahead_ab *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ cbMICHDR + sizeof(struct vnt_tx_datahead_ab);
}
} else {
// Auto Fall Back
if (bRTS == true) {//RTS_need
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = (PSRTS_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB));
- cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB) + sizeof(STxDataHead_a_FB);
+ pvRrvTime = (struct vnt_rrv_time_ab *)(pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ rts_cts = (struct vnt_rts_a_fb *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_a_fb *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR +
+ sizeof(struct vnt_rts_a_fb));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ cbMICHDR + sizeof(struct vnt_rts_a_fb) +
+ sizeof(struct vnt_tx_datahead_a_fb);
}
else if (bRTS == false) { //RTS_needless
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = NULL;
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_a_FB);
+ pvRrvTime = (struct vnt_rrv_time_ab *)(pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ pvTxDataHd = (struct vnt_tx_datahead_a_fb *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ cbMICHDR + sizeof(struct vnt_tx_datahead_a_fb);
}
} // Auto Fall Back
}
@@ -1424,11 +1237,11 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
- (void *)pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
- cbFrameSize, bNeedACK, uDMAIdx, psEthHeader);
+ (void *)pbyTxBufferAddr, pvRrvTime, rts_cts,
+ cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, bRTS);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
- 0, 0, 1/*uMACfragNum*/, byFBOption);
+ byFBOption);
// Generate TX MAC Header
s_vGenerateMACHeader(pDevice, pbyMacHdr, (u16)uDuration, psEthHeader, bNeedEncryption,
byFragType, uDMAIdx, 0);
@@ -1436,7 +1249,7 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
if (bNeedEncryption == true) {
//Fill TXKEY
s_vFillTxKey(pDevice, (u8 *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (u16)cbFrameBodySize, (u8 *)pMICHDR);
+ pbyMacHdr, (u16)cbFrameBodySize, pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
@@ -1475,8 +1288,6 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
memcpy((pbyPayloadHead + cb802_1_H_len), ((u8 *)psEthHeader) + ETH_HLEN, uSkbPacketLen - ETH_HLEN);
}
- ASSERT(uLength == cbNdisBodySize);
-
if ((bNeedEncryption == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
///////////////////////////////////////////////////////////////////
@@ -1537,22 +1348,7 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
cbFrameSize -= cbICVlen;
}
- if (pDevice->bSoftwareGenCrcErr == true) {
- unsigned int cbLen;
- u32 * pdwCRC;
-
- dwCRC = 0xFFFFFFFFL;
- cbLen = cbFrameSize - cbFCSlen;
- // calculate CRC, and wrtie CRC value to end of TD
- dwCRC = CRCdwGetCrc32Ex(pbyMacHdr, cbLen, dwCRC);
- pdwCRC = (u32 *)(pbyMacHdr + cbLen);
- // finally, we must invert dwCRC to get the correct answer
- *pdwCRC = ~dwCRC;
- // Force Error
- *pdwCRC -= 1;
- } else {
cbFrameSize -= cbFCSlen;
- }
*pcbHeaderLen = cbHeaderLength;
*pcbTotalLen = cbHeaderLength + cbFrameSize ;
@@ -1589,13 +1385,7 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
{
struct ieee80211_hdr *pMACHeader = (struct ieee80211_hdr *)pbyBufferAddr;
- memset(pMACHeader, 0, (sizeof(struct ieee80211_hdr)));
-
- if (uDMAIdx == TYPE_ATIMDMA) {
- pMACHeader->frame_control = TYPE_802_11_ATIM;
- } else {
- pMACHeader->frame_control = TYPE_802_11_DATA;
- }
+ pMACHeader->frame_control = TYPE_802_11_DATA;
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pMACHeader->addr1[0]),
@@ -1678,14 +1468,14 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
struct vnt_tx_mgmt *pPacket)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- PTX_BUFFER pTX_Buffer;
+ struct vnt_tx_buffer *pTX_Buffer;
PSTxBufHead pTxBufHead;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
struct ieee80211_hdr *pMACHeader;
- PSCTS pCTS;
struct ethhdr sEthHeader;
u8 byPktType, *pbyTxBufferAddr;
- void *pvRTS, *pvTxDataHd, *pvRrvTime, *pMICHDR;
+ void *rts_cts = NULL;
+ void *pvTxDataHd, *pvRrvTime, *pMICHDR;
u32 uDuration, cbReqCount, cbHeaderSize, cbFrameBodySize, cbFrameSize;
int bNeedACK, bIsPSPOLL = false;
u32 cbIVlen = 0, cbICVlen = 0, cbMIClen = 0, cbFCSlen = 4;
@@ -1694,19 +1484,18 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
u32 cbMacHdLen;
u16 wCurrentRate = RATE_1M;
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (NULL == pContext) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ManagementSend TX...NO CONTEXT!\n");
return CMD_STATUS_RESOURCES;
}
- pTX_Buffer = (PTX_BUFFER) (&pContext->Data[0]);
+ pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
pbyTxBufferAddr = (u8 *)&(pTX_Buffer->adwTxKey[0]);
cbFrameBodySize = pPacket->cbPayloadLen;
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->byBBType == BB_TYPE_11A) {
wCurrentRate = RATE_6M;
@@ -1819,25 +1608,24 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
//Set RrvTime/RTS/CTS Buffer
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
- pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
+ pvRrvTime = (struct vnt_rrv_time_cts *) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = NULL;
- pvRTS = NULL;
- pCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
- pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS));
- cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS) + sizeof(STxDataHead_g);
+ rts_cts = (struct vnt_cts *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts));
+ pvTxDataHd = (struct vnt_tx_datahead_g *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts) + sizeof(struct vnt_cts));
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
}
else { // 802.11a/b packet
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
+ pvRrvTime = (struct vnt_rrv_time_ab *) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = NULL;
- pvRTS = NULL;
- pCTS = NULL;
- pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + sizeof(STxDataHead_ab);
+ pvTxDataHd = (struct vnt_tx_datahead_ab *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab));
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ sizeof(struct vnt_tx_datahead_ab);
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0,
- (cbHeaderSize - wTxBufSize));
-
memcpy(&(sEthHeader.h_dest[0]),
&(pPacket->p80211Header->sA3.abyAddr1[0]),
ETH_ALEN);
@@ -1849,13 +1637,14 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
//=========================
pTxBufHead->wFragCtl |= (u16)FRAGCTL_NONFRAG;
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate, pbyTxBufferAddr, pvRrvTime, pvRTS, pCTS,
- cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader);
+ /* Fill FIFO,RrvTime,RTS,and CTS */
+ s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
+ pbyTxBufferAddr, pvRrvTime, rts_cts,
+ cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, false);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- 0, 0, 1, AUTO_FB_NONE);
+ AUTO_FB_NONE);
pMACHeader = (struct ieee80211_hdr *) (pbyTxBufferAddr + cbHeaderSize);
@@ -1918,12 +1707,15 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
// This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- ((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- } else {
- ((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- }
+ if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
+ ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_a =
+ cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
+ ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_b =
+ cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
+ } else {
+ ((struct vnt_tx_datahead_ab *)pvTxDataHd)->wDuration =
+ cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
+ }
}
pTX_Buffer->wTxByteCount = cpu_to_le16((u16)(cbReqCount));
@@ -1948,60 +1740,60 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
struct vnt_tx_mgmt *pPacket)
{
+ struct vnt_beacon_buffer *pTX_Buffer;
u32 cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
u32 cbHeaderSize = 0;
u16 wTxBufSize = sizeof(STxShortBufHead);
PSTxShortBufHead pTxBufHead;
struct ieee80211_hdr *pMACHeader;
- PSTxDataHead_ab pTxDataHead;
+ struct vnt_tx_datahead_ab *pTxDataHead;
u16 wCurrentRate;
u32 cbFrameBodySize;
u32 cbReqCount;
- PBEACON_BUFFER pTX_Buffer;
u8 *pbyTxBufferAddr;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
CMD_STATUS status;
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (NULL == pContext) {
status = CMD_STATUS_RESOURCES;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ManagementSend TX...NO CONTEXT!\n");
return status ;
}
- pTX_Buffer = (PBEACON_BUFFER) (&pContext->Data[0]);
+
+ pTX_Buffer = (struct vnt_beacon_buffer *)&pContext->Data[0];
pbyTxBufferAddr = (u8 *)&(pTX_Buffer->wFIFOCtl);
cbFrameBodySize = pPacket->cbPayloadLen;
pTxBufHead = (PSTxShortBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxShortBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->byBBType == BB_TYPE_11A) {
wCurrentRate = RATE_6M;
- pTxDataHead = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize);
+ pTxDataHead = (struct vnt_tx_datahead_ab *)
+ (pbyTxBufferAddr + wTxBufSize);
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11A,
- (u16 *)&(pTxDataHead->wTransmitLength), (u8 *)&(pTxDataHead->byServiceField), (u8 *)&(pTxDataHead->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11A,
+ &pTxDataHead->ab);
//Get Duration and TimeStampOff
- pTxDataHead->wDuration = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameSize, PK_TYPE_11A,
- wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
- pTxDataHead->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- cbHeaderSize = wTxBufSize + sizeof(STxDataHead_ab);
+ pTxDataHead->wDuration = s_uGetDataDuration(pDevice,
+ PK_TYPE_11A, false);
+ pTxDataHead->wTimeStampOff = vnt_time_stamp_off(pDevice, wCurrentRate);
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_tx_datahead_ab);
} else {
wCurrentRate = RATE_1M;
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- pTxDataHead = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize);
+ pTxDataHead = (struct vnt_tx_datahead_ab *)
+ (pbyTxBufferAddr + wTxBufSize);
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11B,
- (u16 *)&(pTxDataHead->wTransmitLength), (u8 *)&(pTxDataHead->byServiceField), (u8 *)&(pTxDataHead->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11B,
+ &pTxDataHead->ab);
//Get Duration and TimeStampOff
- pTxDataHead->wDuration = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameSize, PK_TYPE_11B,
- wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
- pTxDataHead->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- cbHeaderSize = wTxBufSize + sizeof(STxDataHead_ab);
+ pTxDataHead->wDuration = s_uGetDataDuration(pDevice,
+ PK_TYPE_11B, false);
+ pTxDataHead->wTimeStampOff = vnt_time_stamp_off(pDevice, wCurrentRate);
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_tx_datahead_ab);
}
//Generate Beacon Header
@@ -2032,9 +1824,11 @@ CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_buffer *pTX_Buffer;
u8 byPktType;
u8 *pbyTxBufferAddr;
- void *pvRTS, *pvCTS, *pvTxDataHd;
+ void *rts_cts = NULL;
+ void *pvTxDataHd;
u32 uDuration, cbReqCount;
struct ieee80211_hdr *pMACHeader;
u32 cbHeaderSize, cbFrameBodySize;
@@ -2059,10 +1853,9 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
PSKeyItem pTransmitKey = NULL;
u8 *pbyIVHead, *pbyPayloadHead, *pbyMacHdr;
u32 cbExtSuppRate = 0;
- PTX_BUFFER pTX_Buffer;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
- pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
+ pvRrvTime = pMICHDR = pvTxDataHd = NULL;
if(skb->len <= WLAN_HDR_ADDR3_LEN) {
cbFrameBodySize = 0;
@@ -2072,7 +1865,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
}
p80211Header = (PUWLAN_80211HDR)skb->data;
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (NULL == pContext) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0 TX...NO CONTEXT!\n");
@@ -2080,11 +1873,10 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
return ;
}
- pTX_Buffer = (PTX_BUFFER)(&pContext->Data[0]);
+ pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
pbyTxBufferAddr = (u8 *)(&pTX_Buffer->adwTxKey[0]);
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->byBBType == BB_TYPE_11A) {
wCurrentRate = RATE_6M;
@@ -2204,7 +1996,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
- cbMICHDR = sizeof(SMICHDRHead);
+ cbMICHDR = sizeof(struct vnt_mic_hdr);
pTxBufHead->wFragCtl |= FRAGCTL_AES;
pDevice->bAES = true;
}
@@ -2222,26 +2014,28 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
-
- pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
- pvRTS = NULL;
- pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
- pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS));
- cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g);
+ pvRrvTime = (struct vnt_rrv_time_cts *) (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts));
+ rts_cts = (struct vnt_cts *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_cts) + cbMICHDR +
+ sizeof(struct vnt_cts));
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) + cbMICHDR +
+ sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
}
else {//802.11a/b packet
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = NULL;
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab);
+ pvRrvTime = (struct vnt_rrv_time_ab *) (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ pvTxDataHd = (struct vnt_tx_datahead_ab *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR +
+ sizeof(struct vnt_tx_datahead_ab);
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0,
- (cbHeaderSize - wTxBufSize));
memcpy(&(sEthHeader.h_dest[0]),
&(p80211Header->sA3.abyAddr1[0]),
ETH_ALEN);
@@ -2253,13 +2047,14 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
//=========================
pTxBufHead->wFragCtl |= (u16)FRAGCTL_NONFRAG;
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate, pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
- cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader);
+ /* Fill FIFO,RrvTime,RTS,and CTS */
+ s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
+ pbyTxBufferAddr, pvRrvTime, rts_cts,
+ cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, false);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- 0, 0, 1, AUTO_FB_NONE);
+ AUTO_FB_NONE);
pMACHeader = (struct ieee80211_hdr *) (pbyTxBufferAddr + cbHeaderSize);
@@ -2345,7 +2140,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
}
s_vFillTxKey(pDevice, (u8 *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (u16)cbFrameBodySize, (u8 *)pMICHDR);
+ pbyMacHdr, (u16)cbFrameBodySize, pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
@@ -2368,12 +2163,15 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
// This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(p80211Header->sA2.wDurationID);
- ((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(p80211Header->sA2.wDurationID);
- } else {
- ((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(p80211Header->sA2.wDurationID);
- }
+ if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
+ ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_a =
+ cpu_to_le16(p80211Header->sA2.wDurationID);
+ ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_b =
+ cpu_to_le16(p80211Header->sA2.wDurationID);
+ } else {
+ ((struct vnt_tx_datahead_ab *)pvTxDataHd)->wDuration =
+ cpu_to_le16(p80211Header->sA2.wDurationID);
+ }
}
pTX_Buffer->wTxByteCount = cpu_to_le16((u16)(cbReqCount));
@@ -2415,6 +2213,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
{
struct net_device_stats *pStats = &pDevice->stats;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_buffer *pTX_Buffer;
u32 BytesToWrite = 0, uHeaderLen = 0;
u32 uNodeIndex = 0;
u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
@@ -2428,9 +2227,8 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
int bNeedDeAuth = false;
u8 *pbyBSSID;
int bNodeExist = false;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
bool fConvertedPacket;
- PTX_BUFFER pTX_Buffer;
u32 status;
u16 wKeepRate = pDevice->wCurrentRate;
int bTxeapol_key = false;
@@ -2500,7 +2298,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (pContext == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG" pContext == NULL\n");
@@ -2738,8 +2536,10 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
+ pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
+
fConvertedPacket = s_bPacketToWirelessUsb(pDevice, byPktType,
- (u8 *)(&pContext->Data[0]), bNeedEncryption,
+ pTX_Buffer, bNeedEncryption,
skb->len, uDMAIdx, &pDevice->sTxEthHeader,
(u8 *)skb->data, pTransmitKey, uNodeIndex,
pDevice->wCurrentRate,
@@ -2761,7 +2561,6 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
- pTX_Buffer = (PTX_BUFFER)&(pContext->Data[0]);
pTX_Buffer->byPKTNO = (u8) (((pDevice->wCurrentRate<<4) &0x00F0) | ((pDevice->wSeqCounter - 1) & 0x000F));
pTX_Buffer->wTxByteCount = (u16)BytesToWrite;
@@ -2808,20 +2607,20 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
u32 uNodeIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_buffer *pTX_Buffer;
u32 BytesToWrite = 0, uHeaderLen = 0;
u8 byPktType = PK_TYPE_11B;
int bNeedEncryption = false;
SKeyItem STempKey;
PSKeyItem pTransmitKey = NULL;
u8 *pbyBSSID;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
u8 byPktTyp;
int fConvertedPacket;
- PTX_BUFFER pTX_Buffer;
u32 status;
u16 wKeepRate = pDevice->wCurrentRate;
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (NULL == pContext) {
return false;
@@ -2898,8 +2697,10 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
// Convert the packet to an usb frame and copy into our buffer
// and send the irp.
+ pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
+
fConvertedPacket = s_bPacketToWirelessUsb(pDevice, byPktType,
- (u8 *)(&pContext->Data[0]), bNeedEncryption,
+ pTX_Buffer, bNeedEncryption,
uDataLen, TYPE_AC0DMA, &pDevice->sTxEthHeader,
pbySkbData, pTransmitKey, uNodeIndex,
pDevice->wCurrentRate,
@@ -2911,7 +2712,6 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
return false;
}
- pTX_Buffer = (PTX_BUFFER)&(pContext->Data[0]);
pTX_Buffer->byPKTNO = (u8) (((pDevice->wCurrentRate<<4) &0x00F0) | ((pDevice->wSeqCounter - 1) & 0x000F));
pTX_Buffer->wTxByteCount = (u16)BytesToWrite;
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index dd7e85d..4bbee1c 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -31,602 +31,173 @@
#include "device.h"
#include "wcmd.h"
-
-//
-// RTS buffer header
-//
-typedef struct tagSRTSDataF {
- u16 wFrameControl;
- u16 wDurationID;
- u8 abyRA[ETH_ALEN];
- u8 abyTA[ETH_ALEN];
-} SRTSDataF, *PSRTSDataF;
-
-//
-// CTS buffer header
-//
-typedef struct tagSCTSDataF {
- u16 wFrameControl;
- u16 wDurationID;
- u8 abyRA[ETH_ALEN];
- u16 wReserved;
-} SCTSDataF, *PSCTSDataF;
-
-//
-// MICHDR data header
-//
-typedef struct tagSMICHDR {
- u32 adwHDR0[4];
- u32 adwHDR1[4];
- u32 adwHDR2[4];
-} SMICHDR, *PSMICHDR;
-
-typedef struct tagSTX_NAF_G_RTS
-{
- //RsvTime
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- //RTS
- u8 byRTSSignalField_b;
- u8 byRTSServiceField_b;
- u16 wRTSTransmitLength_b;
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_ba;
- u16 wRTSDuration_aa;
- u16 wRTSDuration_bb;
- u16 wReserved3;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_RTS, *PTX_NAF_G_RTS;
-
-typedef struct tagSTX_NAF_G_RTS_MIC
-{
- //RsvTime
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //RTS
- u8 byRTSSignalField_b;
- u8 byRTSServiceField_b;
- u16 wRTSTransmitLength_b;
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_ba;
- u16 wRTSDuration_aa;
- u16 wRTSDuration_bb;
- u16 wReserved3;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_RTS_MIC, *PTX_NAF_G_RTS_MIC;
-
-typedef struct tagSTX_NAF_G_CTS
-{
- //RsvTime
- u16 wCTSTxRrvTime_ba;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved3;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_CTS, *PTX_NAF_G_CTS;
-
-typedef struct tagSTX_NAF_G_CTS_MIC
-{
- //RsvTime
- u16 wCTSTxRrvTime_ba;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved3;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_CTS_MIC, *PTX_NAF_G_CTS_MIC;
-
-typedef struct tagSTX_NAF_G_BEACON
-{
- u16 wFIFOCtl;
- u16 wTimeStamp;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved1;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_BEACON, *PTX_NAF_G_BEACON;
-
-typedef struct tagSTX_NAF_AB_RTS
-{
- //RsvTime
- u16 wRTSTxRrvTime_ab;
- u16 wTxRrvTime_ab;
-
- //RTS
- u8 byRTSSignalField_ab;
- u8 byRTSServiceField_ab;
- u16 wRTSTransmitLength_ab;
- u16 wRTSDuration_ab;
- u16 wReserved2;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_RTS, *PTX_NAF_AB_RTS;
-
-typedef struct tagSTX_NAF_AB_RTS_MIC
-{
- //RsvTime
- u16 wRTSTxRrvTime_ab;
- u16 wTxRrvTime_ab;
-
- SMICHDR sMICHDR;
-
- //RTS
- u8 byRTSSignalField_ab;
- u8 byRTSServiceField_ab;
- u16 wRTSTransmitLength_ab;
- u16 wRTSDuration_ab;
- u16 wReserved2;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_RTS_MIC, *PTX_NAF_AB_RTS_MIC;
-
-typedef struct tagSTX_NAF_AB_CTS
-{
- //RsvTime
- u16 wReserved2;
- u16 wTxRrvTime_ab;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_CTS, *PTX_NAF_AB_CTS;
-
-typedef struct tagSTX_NAF_AB_CTS_MIC
-{
- //RsvTime
- u16 wReserved2;
- u16 wTxRrvTime_ab;
-
- SMICHDR sMICHDR;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_CTS_MIC, *PTX_NAF_AB_CTS_MIC;
-
-typedef struct tagSTX_NAF_AB_BEACON
-{
- u16 wFIFOCtl;
- u16 wTimeStamp;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_BEACON, *PTX_NAF_AB_BEACON;
-
-typedef struct tagSTX_AF_G_RTS
-{
- //RsvTime
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- //RTS
- u8 byRTSSignalField_b;
- u8 byRTSServiceField_b;
- u16 wRTSTransmitLength_b;
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_ba;
- u16 wRTSDuration_aa;
- u16 wRTSDuration_bb;
- u16 wReserved3;
- u16 wRTSDuration_ba_f0;
- u16 wRTSDuration_aa_f0;
- u16 wRTSDuration_ba_f1;
- u16 wRTSDuration_aa_f1;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_AF_G_RTS, *PTX_AF_G_RTS;
-
-typedef struct tagSTX_AF_G_RTS_MIC
-{
- //RsvTime
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //RTS
- u8 byRTSSignalField_b;
- u8 byRTSServiceField_b;
- u16 wRTSTransmitLength_b;
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_ba;
- u16 wRTSDuration_aa;
- u16 wRTSDuration_bb;
- u16 wReserved3;
- u16 wRTSDuration_ba_f0;
- u16 wRTSDuration_aa_f0;
- u16 wRTSDuration_ba_f1;
- u16 wRTSDuration_aa_f1;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_AF_G_RTS_MIC, *PTX_AF_G_RTS_MIC;
-
-typedef struct tagSTX_AF_G_CTS
-{
- //RsvTime
- u16 wCTSTxRrvTime_ba;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved3;
- u16 wCTSDuration_ba_f0;
- u16 wCTSDuration_ba_f1;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_AF_G_CTS, *PTX_AF_G_CTS;
-
-typedef struct tagSTX_AF_G_CTS_MIC
-{
- //RsvTime
- u16 wCTSTxRrvTime_ba;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved3;
- u16 wCTSDuration_ba_f0;
- u16 wCTSDuration_ba_f1;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_AF_G_CTS_MIC, *PTX_AF_G_CTS_MIC;
-
-typedef struct tagSTX_AF_A_RTS
-{
- //RsvTime
- u16 wRTSTxRrvTime_a;
- u16 wTxRrvTime_a;
-
- //RTS
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_a;
- u16 wReserved2;
- u16 wRTSDuration_a_f0;
- u16 wRTSDuration_a_f1;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
-
-} TX_AF_A_RTS, *PTX_AF_A_RTS;
-
-typedef struct tagSTX_AF_A_RTS_MIC
-{
- //RsvTime
- u16 wRTSTxRrvTime_a;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //RTS
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_a;
- u16 wReserved2;
- u16 wRTSDuration_a_f0;
- u16 wRTSDuration_a_f1;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
-
-} TX_AF_A_RTS_MIC, *PTX_AF_A_RTS_MIC;
-
-typedef struct tagSTX_AF_A_CTS
-{
- //RsvTime
- u16 wReserved2;
- u16 wTxRrvTime_a;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
-
-} TX_AF_A_CTS, *PTX_AF_A_CTS;
-
-typedef struct tagSTX_AF_A_CTS_MIC
-{
- //RsvTime
- u16 wReserved2;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
-
-} TX_AF_A_CTS_MIC, *PTX_AF_A_CTS_MIC;
-
-//
-// union with all of the TX Buffer Type
-//
-typedef union tagUTX_BUFFER_CONTAINER
-{
- TX_NAF_G_RTS RTS_G;
- TX_NAF_G_RTS_MIC RTS_G_MIC;
- TX_NAF_G_CTS CTS_G;
- TX_NAF_G_CTS_MIC CTS_G_MIC;
- //TX_NAF_G_BEACON Beacon_G;
- TX_NAF_AB_RTS RTS_AB;
- TX_NAF_AB_RTS_MIC RTS_AB_MIC;
- TX_NAF_AB_CTS CTS_AB;
- TX_NAF_AB_CTS_MIC CTS_AB_MIC;
- //TX_NAF_AB_BEACON Beacon_AB;
- TX_AF_G_RTS RTS_G_AutoFB;
- TX_AF_G_RTS_MIC RTS_G_AutoFB_MIC;
- TX_AF_G_CTS CTS_G_AutoFB;
- TX_AF_G_CTS_MIC CTS_G_AutoFB_MIC;
- TX_AF_A_RTS RTS_A_AutoFB;
- TX_AF_A_RTS_MIC RTS_A_AutoFB_MIC;
- TX_AF_A_CTS CTS_A_AutoFB;
- TX_AF_A_CTS_MIC CTS_A_AutoFB_MIC;
-
-} TX_BUFFER_CONTAINER, *PTX_BUFFER_CONTAINER;
-
-//
-// Remote NDIS message format
-//
-typedef struct tagSTX_BUFFER
-{
- u8 byType;
- u8 byPKTNO;
- u16 wTxByteCount;
-
+#include "baseband.h"
+
+/* MIC HDR data header */
+struct vnt_mic_hdr {
+ u8 id;
+ u8 tx_priority;
+ u8 mic_addr2[6];
+ __be32 tsc_47_16;
+ __be16 tsc_15_0;
+ __be16 payload_len;
+ __be16 hlen;
+ __le16 frame_control;
+ u8 addr1[6];
+ u8 addr2[6];
+ u8 addr3[6];
+ __le16 seq_ctrl;
+ u8 addr4[6];
+ u16 packing; /* packing to 48 bytes */
+} __packed;
+
+/* RsvTime buffer header */
+struct vnt_rrv_time_rts {
+ u16 wRTSTxRrvTime_ba;
+ u16 wRTSTxRrvTime_aa;
+ u16 wRTSTxRrvTime_bb;
+ u16 wReserved;
+ u16 wTxRrvTime_b;
+ u16 wTxRrvTime_a;
+} __packed;
+
+struct vnt_rrv_time_cts {
+ u16 wCTSTxRrvTime_ba;
+ u16 wReserved;
+ u16 wTxRrvTime_b;
+ u16 wTxRrvTime_a;
+} __packed;
+
+struct vnt_rrv_time_ab {
+ u16 wRTSTxRrvTime;
+ u16 wTxRrvTime;
+} __packed;
+
+/* TX data header */
+struct vnt_tx_datahead_g {
+ struct vnt_phy_field b;
+ struct vnt_phy_field a;
+ u16 wDuration_b;
+ u16 wDuration_a;
+ u16 wTimeStampOff_b;
+ u16 wTimeStampOff_a;
+} __packed;
+
+struct vnt_tx_datahead_g_fb {
+ struct vnt_phy_field b;
+ struct vnt_phy_field a;
+ u16 wDuration_b;
+ u16 wDuration_a;
+ u16 wDuration_a_f0;
+ u16 wDuration_a_f1;
+ u16 wTimeStampOff_b;
+ u16 wTimeStampOff_a;
+} __packed;
+
+struct vnt_tx_datahead_ab {
+ struct vnt_phy_field ab;
+ u16 wDuration;
+ u16 wTimeStampOff;
+} __packed;
+
+struct vnt_tx_datahead_a_fb {
+ struct vnt_phy_field a;
+ u16 wDuration;
+ u16 wTimeStampOff;
+ u16 wDuration_f0;
+ u16 wDuration_f1;
+} __packed;
+
+/* RTS buffer header */
+struct vnt_rts_g {
+ struct vnt_phy_field b;
+ struct vnt_phy_field a;
+ u16 wDuration_ba;
+ u16 wDuration_aa;
+ u16 wDuration_bb;
+ u16 wReserved;
+ struct ieee80211_rts data;
+} __packed;
+
+struct vnt_rts_g_fb {
+ struct vnt_phy_field b;
+ struct vnt_phy_field a;
+ u16 wDuration_ba;
+ u16 wDuration_aa;
+ u16 wDuration_bb;
+ u16 wReserved;
+ u16 wRTSDuration_ba_f0;
+ u16 wRTSDuration_aa_f0;
+ u16 wRTSDuration_ba_f1;
+ u16 wRTSDuration_aa_f1;
+ struct ieee80211_rts data;
+} __packed;
+
+struct vnt_rts_ab {
+ struct vnt_phy_field ab;
+ u16 wDuration;
+ u16 wReserved;
+ struct ieee80211_rts data;
+} __packed;
+
+struct vnt_rts_a_fb {
+ struct vnt_phy_field a;
+ u16 wDuration;
+ u16 wReserved;
+ u16 wRTSDuration_f0;
+ u16 wRTSDuration_f1;
+ struct ieee80211_rts data;
+} __packed;
+
+/* CTS buffer header */
+struct vnt_cts {
+ struct vnt_phy_field b;
+ u16 wDuration_ba;
+ u16 wReserved;
+ struct ieee80211_cts data;
+ u16 reserved2;
+} __packed;
+
+struct vnt_cts_fb {
+ struct vnt_phy_field b;
+ u16 wDuration_ba;
+ u16 wReserved;
+ u16 wCTSDuration_ba_f0;
+ u16 wCTSDuration_ba_f1;
+ struct ieee80211_cts data;
+ u16 reserved2;
+} __packed;
+
+union vnt_tx_data_head {
+ /* rts g */
+ struct vnt_rts_g rts_g;
+ struct vnt_rts_g_fb rts_g_fb;
+ /* rts a/b */
+ struct vnt_rts_ab rts_ab;
+ struct vnt_rts_a_fb rts_a_fb;
+ /* cts g */
+ struct vnt_cts cts_g;
+ struct vnt_cts_fb cts_g_fb;
+};
+
+struct vnt_tx_buffer {
+ u8 byType;
+ u8 byPKTNO;
+ u16 wTxByteCount;
u32 adwTxKey[4];
- u16 wFIFOCtl;
- u16 wTimeStamp;
- u16 wFragCtl;
- u16 wReserved;
-
- // Actual message
- TX_BUFFER_CONTAINER BufferHeader;
-
-} TX_BUFFER, *PTX_BUFFER;
-
-//
-// Remote NDIS message format
-//
-typedef struct tagSBEACON_BUFFER
-{
- u8 byType;
- u8 byPKTNO;
- u16 wTxByteCount;
-
- u16 wFIFOCtl;
- u16 wTimeStamp;
-
- // Actual message
- TX_BUFFER_CONTAINER BufferHeader;
-
-} BEACON_BUFFER, *PBEACON_BUFFER;
+ u16 wFIFOCtl;
+ u16 wTimeStamp;
+ u16 wFragCtl;
+ u16 wReserved;
+} __packed;
+
+struct vnt_beacon_buffer {
+ u8 byType;
+ u8 byPKTNO;
+ u16 wTxByteCount;
+ u16 wFIFOCtl;
+ u16 wTimeStamp;
+} __packed;
void vDMA0_tx_80211(struct vnt_private *, struct sk_buff *skb);
int nsDMA_tx_packet(struct vnt_private *, u32 uDMAIdx, struct sk_buff *skb);
diff --git a/drivers/staging/vt6656/tether.h b/drivers/staging/vt6656/tether.h
index 24465cf..aec6b56 100644
--- a/drivers/staging/vt6656/tether.h
+++ b/drivers/staging/vt6656/tether.h
@@ -99,16 +99,6 @@
#define WEP_IV_MASK 0x00FFFFFF
-//
-// 802_3 packet
-//
-typedef struct tagS802_3Header {
- u8 abyDstAddr[ETH_ALEN];
- u8 abySrcAddr[ETH_ALEN];
- u16 wLen;
-} __attribute__ ((__packed__))
-S802_3Header, *PS802_3Header;
-
//u8 ETHbyGetHashIndexByCrc(u8 * pbyMultiAddr);
bool ETHbIsBufferCrc32Ok(u8 * pbyBuffer, unsigned int cbFrameLength);
diff --git a/drivers/staging/vt6656/tmacro.h b/drivers/staging/vt6656/tmacro.h
index 15cd5ab..15e724e 100644
--- a/drivers/staging/vt6656/tmacro.h
+++ b/drivers/staging/vt6656/tmacro.h
@@ -45,14 +45,8 @@
#define HIWORD(d) ((u16)((((u32)(d)) >> 16) & 0xFFFF))
#endif
-#define LODWORD(q) ((q).u.dwLowDword)
-#define HIDWORD(q) ((q).u.dwHighDword)
-
#if !defined(MAKEWORD)
#define MAKEWORD(lb, hb) ((u16)(((u8)(lb)) | (((u16)((u8)(hb))) << 8)))
#endif
-#if !defined(MAKEDWORD)
-#define MAKEDWORD(lw, hw) ((u32)(((u16)(lw)) | (((u32)((u16)(hw))) << 16)))
-#endif
#endif /* __TMACRO_H__ */
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 098be60..3a03f1d 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -421,7 +421,7 @@ static void s_nsInterruptUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, PRCB pRCB)
+int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, struct vnt_rcb *pRCB)
{
int ntStatus = 0;
struct urb *pUrb;
@@ -479,7 +479,7 @@ int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, PRCB pRCB)
static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
{
- PRCB pRCB = (PRCB)urb->context;
+ struct vnt_rcb *pRCB = (struct vnt_rcb *)urb->context;
struct vnt_private *pDevice = pRCB->pDevice;
unsigned long bytesRead;
int bIndicateReceive = false;
@@ -546,7 +546,8 @@ static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsSendBulkOut(struct vnt_private *pDevice, PUSB_SEND_CONTEXT pContext)
+int PIPEnsSendBulkOut(struct vnt_private *pDevice,
+ struct vnt_usb_send_context *pContext)
{
int status;
struct urb *pUrb;
@@ -628,14 +629,13 @@ static void s_nsBulkOutIoCompleteWrite(struct urb *urb)
int status;
CONTEXT_TYPE ContextType;
unsigned long ulBufLen;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n");
//
// The context given to IoSetCompletionRoutine is an USB_CONTEXT struct
//
- pContext = (PUSB_SEND_CONTEXT) urb->context;
- ASSERT( NULL != pContext );
+ pContext = (struct vnt_usb_send_context *)urb->context;
pDevice = pContext->pDevice;
ContextType = pContext->Type;
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index bb7a611..f537703 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -40,7 +40,8 @@ int PIPEnsControlIn(struct vnt_private *, u8 byRequest, u16 wValue,
u16 wIndex, u16 wLength, u8 *pbyBuffer);
int PIPEnsInterruptRead(struct vnt_private *);
-int PIPEnsBulkInUsbRead(struct vnt_private *, PRCB pRCB);
-int PIPEnsSendBulkOut(struct vnt_private *, PUSB_SEND_CONTEXT pContext);
+int PIPEnsBulkInUsbRead(struct vnt_private *, struct vnt_rcb *pRCB);
+int PIPEnsSendBulkOut(struct vnt_private *,
+ struct vnt_usb_send_context *pContext);
#endif /* __USBPIPE_H__ */
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index 6d1ff5e..b6cbd13 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -751,7 +751,6 @@ static void s_vMgrRxAssocResponse(struct vnt_private *pDevice,
|| (sFrame.pwStatus == NULL)
|| (sFrame.pwAid == NULL)
|| (sFrame.pSuppRates == NULL)) {
- DBG_PORT80(0xCC);
return;
}
@@ -3750,7 +3749,6 @@ static void s_vMgrRxProbeResponse(struct vnt_private *pDevice,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe resp:Fail addr:[%p]\n",
pRxPacket->p80211Header);
- DBG_PORT80(0xCC);
return;
}
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index faa93f0..fcc3d21 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -15,7 +15,8 @@ Mds_initial(struct wbsoft_priv *adapter)
return hal_get_tx_buffer(&adapter->sHwData, &pMds->pTxBuffer);
}
-static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *buffer)
+static void Mds_DurationSet(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pDes, u8 *buffer)
{
struct T00_descriptor *pT00;
struct T01_descriptor *pT01;
@@ -43,10 +44,11 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
* Set RTS/CTS mechanism
******************************************/
if (!boGroupAddr) {
- /* NOTE : If the protection mode is enabled and the MSDU will be fragmented,
- * the tx rates of MPDUs will all be DSSS rates. So it will not use
- * CTS-to-self in this case. CTS-To-self will only be used when without
- * fragmentation. -- 20050112 */
+ /* NOTE : If the protection mode is enabled and the MSDU will
+ * be fragmented, the tx rates of MPDUs will all be DSSS
+ * rates. So it will not use CTS-to-self in this case.
+ * CTS-To-self will only be used when without
+ * fragmentation. -- 20050112 */
BodyLen = (u16)pT00->T00_frame_length; /* include 802.11 header */
BodyLen += 4; /* CRC */
@@ -90,8 +92,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
* CTS Rate : 24 Mega bps
* CTS frame length = 14 bytes */
Duration += (DEFAULT_SIFSTIME +
- PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
- ((112 + 22 + 95)/96)*Tsym);
+ PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
+ ((112 + 22 + 95)/96)*Tsym);
} else {
/* CTS + 1 SIFS + CTS duration
* CTS Rate : ?? Mega bps
@@ -101,7 +103,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
else
Duration += SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME;
- Duration += (((112 + Rate-1) / Rate) + DEFAULT_SIFSTIME);
+ Duration += (((112 + Rate-1) / Rate) +
+ DEFAULT_SIFSTIME);
}
}
@@ -127,9 +130,10 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
* Rate : ??Mega bps
* ACK frame length = 14 bytes, tx rate = 24M */
Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION * 3;
- Duration += (((NextBodyLen*8 + 22 + Rate*4 - 1)/(Rate*4)) * Tsym +
- (((2*14)*8 + 22 + 95)/96)*Tsym +
- DEFAULT_SIFSTIME*3);
+ Duration += (((NextBodyLen*8 + 22 + Rate*4 - 1)
+ /(Rate*4)) * Tsym +
+ (((2*14)*8 + 22 + 95)/96)*Tsym +
+ DEFAULT_SIFSTIME*3);
} else {
/* DSSS
* data transmit time + 2 ACK + 3 SIFS
@@ -141,8 +145,9 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
else
Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME*3;
- Duration += (((NextBodyLen + (2*14))*8 + Rate-1) / Rate +
- DEFAULT_SIFSTIME*3);
+ Duration += (((NextBodyLen + (2*14))*8
+ + Rate-1) / Rate +
+ DEFAULT_SIFSTIME*3);
}
((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
@@ -168,7 +173,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
* ACK frame length = 14 bytes */
Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION;
/* The Tx rate of ACK use 24M */
- Duration += (((112 + 22 + 95)/96)*Tsym + DEFAULT_SIFSTIME);
+ Duration += (((112 + 22 + 95)/96)*Tsym +
+ DEFAULT_SIFSTIME);
} else {
/* DSSS
* 1 ACK + 1 SIFS
@@ -191,7 +197,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
}
/* The function return the 4n size of usb pk */
-static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer)
+static u16 Mds_BodyCopy(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pDes, u8 *TargetBuffer)
{
struct T00_descriptor *pT00;
struct wb35_mds *pMds = &adapter->Mds;
@@ -246,7 +253,7 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDe
buf_index++;
buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX;
} else {
- u8 *pctmp = pDes->buffer_address[buf_index];
+ u8 *pctmp = pDes->buffer_address[buf_index];
pctmp += CopySize;
pDes->buffer_address[buf_index] = pctmp;
pDes->buffer_size[buf_index] -= CopySize;
@@ -290,7 +297,8 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDe
return Size;
}
-static void Mds_HeaderCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer)
+static void Mds_HeaderCopy(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pDes, u8 *TargetBuffer)
{
struct wb35_mds *pMds = &adapter->Mds;
u8 *src_buffer = pDes->buffer_address[0]; /* 931130.5.g */
@@ -391,11 +399,12 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *
pDes->PreambleMode = WLAN_PREAMBLE_TYPE_LONG;
else
pDes->PreambleMode = CURRENT_PREAMBLE_MODE;
- pT01->T01_plcp_header_length = pDes->PreambleMode; /* Set preamble */
+ pT01->T01_plcp_header_length = pDes->PreambleMode; /* Set preamble */
}
-static void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *desc)
+static void MLME_GetNextPacket(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *desc)
{
desc->InternalUsed = desc->buffer_start_index + desc->buffer_number;
desc->InternalUsed %= MAX_DESCRIPTOR_BUFFER_INDEX;
@@ -423,7 +432,8 @@ static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
}
}
-static void MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID, unsigned char SendOK)
+static void MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID,
+ unsigned char SendOK)
{
/* Reclaim the data buffer */
adapter->sMlmeFrame.len = 0;
@@ -440,9 +450,9 @@ Mds_Tx(struct wbsoft_priv *adapter)
struct wb35_mds *pMds = &adapter->Mds;
struct wb35_descriptor TxDes;
struct wb35_descriptor *pTxDes = &TxDes;
- u8 *XmitBufAddress;
- u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold;
- u8 FillIndex, TxDesIndex, FragmentCount, FillCount;
+ u8 *XmitBufAddress;
+ u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold;
+ u8 FillIndex, TxDesIndex, FragmentCount, FillCount;
unsigned char BufferFilled = false;
diff --git a/drivers/staging/winbond/mds_f.h b/drivers/staging/winbond/mds_f.h
index ce8be07..159b2eb 100644
--- a/drivers/staging/winbond/mds_f.h
+++ b/drivers/staging/winbond/mds_f.h
@@ -7,13 +7,16 @@
unsigned char Mds_initial(struct wbsoft_priv *adapter);
void Mds_Tx(struct wbsoft_priv *adapter);
void Mds_SendComplete(struct wbsoft_priv *adapter, struct T02_descriptor *pt02);
-void Mds_MpduProcess(struct wbsoft_priv *adapter, struct wb35_descriptor *prxdes);
-extern void DataDmp(u8 *pdata, u32 len, u32 offset);
+void Mds_MpduProcess(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *prxdes);
/* For data frame sending */
u16 MDS_GetPacketSize(struct wbsoft_priv *adapter);
-void MDS_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *pdes);
-void MDS_GetNextPacketComplete(struct wbsoft_priv *adapter, struct wb35_descriptor *pdes);
-void MDS_SendResult(struct wbsoft_priv *adapter, u8 packetid, unsigned char sendok);
+void MDS_GetNextPacket(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pdes);
+void MDS_GetNextPacketComplete(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pdes);
+void MDS_SendResult(struct wbsoft_priv *adapter, u8 packetid,
+ unsigned char sendok);
#endif
diff --git a/drivers/staging/winbond/phy_calibration.c b/drivers/staging/winbond/phy_calibration.c
index cabae34..cfbfbbb 100644
--- a/drivers/staging/winbond/phy_calibration.c
+++ b/drivers/staging/winbond/phy_calibration.c
@@ -296,7 +296,7 @@ void _sin_cos(s32 angle, s32 *sin, s32 *cos)
}
}
-static unsigned char hal_get_dxx_reg(struct hw_data *pHwData, u16 number, u32 * pValue)
+static unsigned char hal_get_dxx_reg(struct hw_data *pHwData, u16 number, u32 *pValue)
{
if (number < 0x1000)
number += 0x1000;
diff --git a/drivers/staging/winbond/phy_calibration.h b/drivers/staging/winbond/phy_calibration.h
index 84f6e84..78fc680 100644
--- a/drivers/staging/winbond/phy_calibration.h
+++ b/drivers/staging/winbond/phy_calibration.h
@@ -79,6 +79,7 @@
#define SHIFT_IQCAL_TONE_Q(x) ((x) >> 13)
void phy_set_rf_data(struct hw_data *pHwData, u32 index, u32 value);
+void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency);
#define phy_init_rf(_A) /* RFSynthesizer_initial(_A) */
#endif
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index 5ecf9a1..75b7752 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -920,20 +920,20 @@ void Uxx_power_on_procedure(struct hw_data *pHwData)
Wb35Reg_WriteSync(pHwData, 0x03f8, 0x7ff);
}
-void Set_ChanIndep_RfData_al7230_24(struct hw_data *pHwData, u32 *pltmp , char number)
+static void Set_ChanIndep_RfData_al7230_24(struct hw_data *pHwData, u32 *pltmp,
+ char number)
{
u8 i;
-
for (i = 0; i < number; i++) {
pHwData->phy_para[i] = al7230_rf_data_24[i];
pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_rf_data_24[i] & 0xffffff);
}
}
-void Set_ChanIndep_RfData_al7230_50(struct hw_data *pHwData, u32 *pltmp, char number)
+static void Set_ChanIndep_RfData_al7230_50(struct hw_data *pHwData, u32 *pltmp,
+ char number)
{
u8 i;
-
for (i = 0; i < number; i++) {
pHwData->phy_para[i] = al7230_rf_data_50[i];
pltmp[i] = (1 << 31) | (0 << 30) | (24 << 24) | (al7230_rf_data_50[i] & 0xffffff);
@@ -1263,7 +1263,7 @@ void RFSynthesizer_initial(struct hw_data *pHwData)
}
}
-void BBProcessor_AL7230_2400(struct hw_data *pHwData)
+static void BBProcessor_AL7230_2400(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
u32 pltmp[12];
@@ -1304,7 +1304,7 @@ void BBProcessor_AL7230_2400(struct hw_data *pHwData)
Wb35Reg_BurstWrite(pHwData, 0x1030, pltmp, 12, AUTO_INCREMENT);
}
-void BBProcessor_AL7230_5000(struct hw_data *pHwData)
+static void BBProcessor_AL7230_5000(struct hw_data *pHwData)
{
struct wb35_reg *reg = &pHwData->reg;
u32 pltmp[12];
@@ -1620,22 +1620,24 @@ void BBProcessor_initial(struct hw_data *pHwData)
reg->SQ3_filter[i] = 0x2f; /* half of Bit 0 ~ 6 */
}
-void set_tx_power_per_channel_max2829(struct hw_data *pHwData, struct chan_info Channel)
+static inline void set_tx_power_per_channel_max2829(struct hw_data *pHwData,
+ struct chan_info Channel)
{
RFSynthesizer_SetPowerIndex(pHwData, 100);
}
-void set_tx_power_per_channel_al2230(struct hw_data *pHwData, struct chan_info Channel)
+static void set_tx_power_per_channel_al2230(struct hw_data *pHwData,
+ struct chan_info Channel)
{
u8 index = 100;
-
if (pHwData->TxVgaFor24[Channel.ChanNo - 1] != 0xff)
index = pHwData->TxVgaFor24[Channel.ChanNo - 1];
RFSynthesizer_SetPowerIndex(pHwData, index);
}
-void set_tx_power_per_channel_al7230(struct hw_data *pHwData, struct chan_info Channel)
+static void set_tx_power_per_channel_al7230(struct hw_data *pHwData,
+ struct chan_info Channel)
{
u8 i, index = 100;
@@ -1658,7 +1660,8 @@ void set_tx_power_per_channel_al7230(struct hw_data *pHwData, struct chan_info
RFSynthesizer_SetPowerIndex(pHwData, index);
}
-void set_tx_power_per_channel_wb242(struct hw_data *pHwData, struct chan_info Channel)
+static void set_tx_power_per_channel_wb242(struct hw_data *pHwData,
+ struct chan_info Channel)
{
u8 index = 100;
diff --git a/drivers/staging/winbond/wb35reg.c b/drivers/staging/winbond/wb35reg.c
index 1bff7d1..a5e255b 100644
--- a/drivers/staging/winbond/wb35reg.c
+++ b/drivers/staging/winbond/wb35reg.c
@@ -1,10 +1,9 @@
#include "wb35reg_f.h"
+#include "phy_calibration.h"
#include <linux/usb.h>
#include <linux/slab.h>
-extern void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency);
-
/*
* true : read command process successfully
* false : register not support
@@ -14,7 +13,8 @@ extern void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency);
* Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
* NO_INCREMENT - Function will write data into the same register
*/
-unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterData, u8 NumberOfData, u8 Flag)
+unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo,
+ u32 *pRegisterData, u8 NumberOfData, u8 Flag)
{
struct wb35_reg *reg = &pHwData->reg;
struct urb *urb = NULL;
@@ -30,49 +30,49 @@ unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo, u32 *p
/* Trying to use burst write function if use new hardware */
UrbSize = sizeof(struct wb35_reg_queue) + DataSize + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
+ if (reg_queue == NULL)
+ return false;
+
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (urb && reg_queue) {
- reg_queue->DIRECT = 2; /* burst write register */
- reg_queue->INDEX = RegisterNo;
- reg_queue->pBuffer = (u32 *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
- memcpy(reg_queue->pBuffer, pRegisterData, DataSize);
- /* the function for reversing register data from little endian to big endian */
- for (i = 0; i < NumberOfData ; i++)
- reg_queue->pBuffer[i] = cpu_to_le32(reg_queue->pBuffer[i]);
-
- dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue) + DataSize);
- dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
- dr->bRequest = 0x04; /* USB or vendor-defined request code, burst mode */
- dr->wValue = cpu_to_le16(Flag); /* 0: Register number auto-increment, 1: No auto increment */
- dr->wIndex = cpu_to_le16(RegisterNo);
- dr->wLength = cpu_to_le16(DataSize);
- reg_queue->Next = NULL;
- reg_queue->pUsbReq = dr;
- reg_queue->urb = urb;
+ if (urb == NULL) {
+ kfree(reg_queue);
+ return false;
+ }
- spin_lock_irq(&reg->EP0VM_spin_lock);
- if (reg->reg_first == NULL)
- reg->reg_first = reg_queue;
- else
- reg->reg_last->Next = reg_queue;
- reg->reg_last = reg_queue;
+ reg_queue->DIRECT = 2; /* burst write register */
+ reg_queue->INDEX = RegisterNo;
+ reg_queue->pBuffer = (u32 *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
+ memcpy(reg_queue->pBuffer, pRegisterData, DataSize);
+ /* the function for reversing register data from little endian to big endian */
+ for (i = 0; i < NumberOfData; i++)
+ reg_queue->pBuffer[i] = cpu_to_le32(reg_queue->pBuffer[i]);
+
+ dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue) + DataSize);
+ dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
+ dr->bRequest = 0x04; /* USB or vendor-defined request code, burst mode */
+ dr->wValue = cpu_to_le16(Flag); /* 0: Register number auto-increment, 1: No auto increment */
+ dr->wIndex = cpu_to_le16(RegisterNo);
+ dr->wLength = cpu_to_le16(DataSize);
+ reg_queue->Next = NULL;
+ reg_queue->pUsbReq = dr;
+ reg_queue->urb = urb;
- spin_unlock_irq(&reg->EP0VM_spin_lock);
+ spin_lock_irq(&reg->EP0VM_spin_lock);
+ if (reg->reg_first == NULL)
+ reg->reg_first = reg_queue;
+ else
+ reg->reg_last->Next = reg_queue;
+ reg->reg_last = reg_queue;
- /* Start EP0VM */
- Wb35Reg_EP0VM_start(pHwData);
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
- return true;
- } else {
- if (urb)
- usb_free_urb(urb);
- kfree(reg_queue);
- return false;
- }
- return false;
+ /* Start EP0VM */
+ Wb35Reg_EP0VM_start(pHwData);
+
+ return true;
}
-void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
+void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
switch (RegisterNo) {
@@ -118,7 +118,8 @@ void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue
* true : read command process successfully
* false : register not support
*/
-unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
+unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo,
+ u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
int ret = -1;
@@ -139,9 +140,10 @@ unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 Reg
/* Sync IoCallDriver */
reg->EP0vm_state = VM_RUNNING;
ret = usb_control_msg(pHwData->udev,
- usb_sndctrlpipe(pHwData->udev, 0),
- 0x03, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- 0x0, RegisterNo, &RegisterValue, 4, HZ * 100);
+ usb_sndctrlpipe(pHwData->udev, 0),
+ 0x03,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ 0x0, RegisterNo, &RegisterValue, 4, HZ * 100);
reg->EP0vm_state = VM_STOP;
reg->SyncIoPause = 0;
@@ -159,7 +161,8 @@ unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 Reg
* true : read command process successfully
* false : register not support
*/
-unsigned char Wb35Reg_Write(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
+unsigned char Wb35Reg_Write(struct hw_data *pHwData, u16 RegisterNo,
+ u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
@@ -174,43 +177,44 @@ unsigned char Wb35Reg_Write(struct hw_data *pHwData, u16 RegisterNo, u32 Registe
/* update the register by send urb request */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
+ if (reg_queue == NULL)
+ return false;
+
urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (urb && reg_queue) {
- reg_queue->DIRECT = 1; /* burst write register */
- reg_queue->INDEX = RegisterNo;
- reg_queue->VALUE = cpu_to_le32(RegisterValue);
- reg_queue->RESERVED_VALID = false;
- dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
- dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
- dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
- dr->wValue = cpu_to_le16(0x0);
- dr->wIndex = cpu_to_le16(RegisterNo);
- dr->wLength = cpu_to_le16(4);
-
- /* Enter the sending queue */
- reg_queue->Next = NULL;
- reg_queue->pUsbReq = dr;
- reg_queue->urb = urb;
+ if (urb == NULL) {
+ kfree(reg_queue);
+ return false;
+ }
- spin_lock_irq(&reg->EP0VM_spin_lock);
- if (reg->reg_first == NULL)
- reg->reg_first = reg_queue;
- else
- reg->reg_last->Next = reg_queue;
- reg->reg_last = reg_queue;
+ reg_queue->DIRECT = 1; /* burst write register */
+ reg_queue->INDEX = RegisterNo;
+ reg_queue->VALUE = cpu_to_le32(RegisterValue);
+ reg_queue->RESERVED_VALID = false;
+ dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
+ dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
+ dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
+ dr->wValue = cpu_to_le16(0x0);
+ dr->wIndex = cpu_to_le16(RegisterNo);
+ dr->wLength = cpu_to_le16(4);
+
+ /* Enter the sending queue */
+ reg_queue->Next = NULL;
+ reg_queue->pUsbReq = dr;
+ reg_queue->urb = urb;
- spin_unlock_irq(&reg->EP0VM_spin_lock);
+ spin_lock_irq(&reg->EP0VM_spin_lock);
+ if (reg->reg_first == NULL)
+ reg->reg_first = reg_queue;
+ else
+ reg->reg_last->Next = reg_queue;
+ reg->reg_last = reg_queue;
- /* Start EP0VM */
- Wb35Reg_EP0VM_start(pHwData);
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
- return true;
- } else {
- if (urb)
- usb_free_urb(urb);
- kfree(reg_queue);
- return false;
- }
+ /* Start EP0VM */
+ Wb35Reg_EP0VM_start(pHwData);
+
+ return true;
}
/*
@@ -238,43 +242,45 @@ unsigned char Wb35Reg_WriteWithCallbackValue(struct hw_data *pHwData,
/* update the register by send urb request */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (urb && reg_queue) {
- reg_queue->DIRECT = 1; /* burst write register */
- reg_queue->INDEX = RegisterNo;
- reg_queue->VALUE = cpu_to_le32(RegisterValue);
- /* NOTE : Users must guarantee the size of value will not exceed the buffer size. */
- memcpy(reg_queue->RESERVED, pValue, Len);
- reg_queue->RESERVED_VALID = true;
- dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
- dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
- dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
- dr->wValue = cpu_to_le16(0x0);
- dr->wIndex = cpu_to_le16(RegisterNo);
- dr->wLength = cpu_to_le16(4);
-
- /* Enter the sending queue */
- reg_queue->Next = NULL;
- reg_queue->pUsbReq = dr;
- reg_queue->urb = urb;
- spin_lock_irq(&reg->EP0VM_spin_lock);
- if (reg->reg_first == NULL)
- reg->reg_first = reg_queue;
- else
- reg->reg_last->Next = reg_queue;
- reg->reg_last = reg_queue;
-
- spin_unlock_irq(&reg->EP0VM_spin_lock);
+ if (reg_queue == NULL)
+ return false;
- /* Start EP0VM */
- Wb35Reg_EP0VM_start(pHwData);
- return true;
- } else {
- if (urb)
- usb_free_urb(urb);
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urb == NULL) {
kfree(reg_queue);
return false;
}
+
+ reg_queue->DIRECT = 1; /* burst write register */
+ reg_queue->INDEX = RegisterNo;
+ reg_queue->VALUE = cpu_to_le32(RegisterValue);
+ /* NOTE : Users must guarantee the size of value will not exceed the buffer size. */
+ memcpy(reg_queue->RESERVED, pValue, Len);
+ reg_queue->RESERVED_VALID = true;
+ dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
+ dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
+ dr->bRequest = 0x03; /* USB or vendor-defined request code, burst mode */
+ dr->wValue = cpu_to_le16(0x0);
+ dr->wIndex = cpu_to_le16(RegisterNo);
+ dr->wLength = cpu_to_le16(4);
+
+ /* Enter the sending queue */
+ reg_queue->Next = NULL;
+ reg_queue->pUsbReq = dr;
+ reg_queue->urb = urb;
+ spin_lock_irq(&reg->EP0VM_spin_lock);
+ if (reg->reg_first == NULL)
+ reg->reg_first = reg_queue;
+ else
+ reg->reg_last->Next = reg_queue;
+ reg->reg_last = reg_queue;
+
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
+
+ /* Start EP0VM */
+ Wb35Reg_EP0VM_start(pHwData);
+
+ return true;
}
/*
@@ -283,7 +289,8 @@ unsigned char Wb35Reg_WriteWithCallbackValue(struct hw_data *pHwData,
* pRegisterValue : It must be a resident buffer due to
* asynchronous read register.
*/
-unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
+unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo,
+ u32 *pRegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
u32 *pltmp = pRegisterValue;
@@ -302,9 +309,10 @@ unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRe
reg->EP0vm_state = VM_RUNNING;
ret = usb_control_msg(pHwData->udev,
- usb_rcvctrlpipe(pHwData->udev, 0),
- 0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x0, RegisterNo, pltmp, 4, HZ * 100);
+ usb_rcvctrlpipe(pHwData->udev, 0),
+ 0x01,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0x0, RegisterNo, pltmp, 4, HZ * 100);
*pRegisterValue = cpu_to_le32(*pltmp);
@@ -329,7 +337,8 @@ unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRe
* pRegisterValue : It must be a resident buffer due to
* asynchronous read register.
*/
-unsigned char Wb35Reg_Read(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
+unsigned char Wb35Reg_Read(struct hw_data *pHwData, u16 RegisterNo,
+ u32 *pRegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
@@ -344,41 +353,41 @@ unsigned char Wb35Reg_Read(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegist
/* update the variable by send Urb to read register */
UrbSize = sizeof(struct wb35_reg_queue) + sizeof(struct usb_ctrlrequest);
reg_queue = kzalloc(UrbSize, GFP_ATOMIC);
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (urb && reg_queue) {
- reg_queue->DIRECT = 0; /* read register */
- reg_queue->INDEX = RegisterNo;
- reg_queue->pBuffer = pRegisterValue;
- dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
- dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN;
- dr->bRequest = 0x01; /* USB or vendor-defined request code, burst mode */
- dr->wValue = cpu_to_le16(0x0);
- dr->wIndex = cpu_to_le16(RegisterNo);
- dr->wLength = cpu_to_le16(4);
-
- /* Enter the sending queue */
- reg_queue->Next = NULL;
- reg_queue->pUsbReq = dr;
- reg_queue->urb = urb;
- spin_lock_irq(&reg->EP0VM_spin_lock);
- if (reg->reg_first == NULL)
- reg->reg_first = reg_queue;
- else
- reg->reg_last->Next = reg_queue;
- reg->reg_last = reg_queue;
-
- spin_unlock_irq(&reg->EP0VM_spin_lock);
-
- /* Start EP0VM */
- Wb35Reg_EP0VM_start(pHwData);
+ if (reg_queue == NULL)
+ return false;
- return true;
- } else {
- if (urb)
- usb_free_urb(urb);
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (urb == NULL) {
kfree(reg_queue);
return false;
}
+ reg_queue->DIRECT = 0; /* read register */
+ reg_queue->INDEX = RegisterNo;
+ reg_queue->pBuffer = pRegisterValue;
+ dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
+ dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN;
+ dr->bRequest = 0x01; /* USB or vendor-defined request code, burst mode */
+ dr->wValue = cpu_to_le16(0x0);
+ dr->wIndex = cpu_to_le16(RegisterNo);
+ dr->wLength = cpu_to_le16(4);
+
+ /* Enter the sending queue */
+ reg_queue->Next = NULL;
+ reg_queue->pUsbReq = dr;
+ reg_queue->urb = urb;
+ spin_lock_irq(&reg->EP0VM_spin_lock);
+ if (reg->reg_first == NULL)
+ reg->reg_first = reg_queue;
+ else
+ reg->reg_last->Next = reg_queue;
+ reg->reg_last = reg_queue;
+
+ spin_unlock_irq(&reg->EP0VM_spin_lock);
+
+ /* Start EP0VM */
+ Wb35Reg_EP0VM_start(pHwData);
+
+ return true;
}
diff --git a/drivers/staging/winbond/wb35rx.c b/drivers/staging/winbond/wb35rx.c
index f118eeb..8d71bc2 100644
--- a/drivers/staging/winbond/wb35rx.c
+++ b/drivers/staging/winbond/wb35rx.c
@@ -343,8 +343,7 @@ void Wb35Rx_destroy(struct hw_data *pHwData)
} while (pWb35Rx->EP3vm_state != VM_STOP);
msleep(10); /* Delay for waiting function exit */
- if (pWb35Rx->RxUrb)
- usb_free_urb(pWb35Rx->RxUrb);
+ usb_free_urb(pWb35Rx->RxUrb);
pr_debug("Wb35Rx_destroy OK\n");
}
diff --git a/drivers/staging/wlags49_h2/Makefile b/drivers/staging/wlags49_h2/Makefile
index 31e1d89..6eeb5d1 100644
--- a/drivers/staging/wlags49_h2/Makefile
+++ b/drivers/staging/wlags49_h2/Makefile
@@ -51,5 +51,3 @@ $(WLNAME)-y += wl_profile.o \
mmd.o \
hcf.o \
dhf.o
-
-$(WLNAME)-$(CONFIG_SYSFS) += wl_sysfs.o
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index 7c7c77f..a458705 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -99,7 +99,6 @@
#include <wl_main.h>
#include <wl_netdev.h>
#include <wl_cs.h>
-#include <wl_sysfs.h>
/*******************************************************************************
@@ -133,6 +132,7 @@ static int wl_adapter_attach(struct pcmcia_device *link)
{
struct net_device *dev;
struct wl_private *lp;
+ int ret;
/*--------------------------------------------------------------------*/
DBG_FUNC("wl_adapter_attach");
@@ -154,10 +154,12 @@ static int wl_adapter_attach(struct pcmcia_device *link)
lp = wl_priv(dev);
lp->link = link;
- wl_adapter_insert(link);
+ ret = wl_adapter_insert(link);
+ if (ret != 0)
+ wl_device_dealloc(dev);
DBG_LEAVE(DbgInfo);
- return 0;
+ return ret;
} /* wl_adapter_attach */
/*============================================================================*/
@@ -175,7 +177,6 @@ static void wl_adapter_detach(struct pcmcia_device *link)
wl_adapter_release(link);
if (dev) {
- unregister_wlags_sysfs(dev);
unregister_netdev(dev);
wl_device_dealloc(dev);
}
@@ -224,7 +225,7 @@ static int wl_adapter_resume(struct pcmcia_device *link)
return 0;
} /* wl_adapter_resume */
-void wl_adapter_insert(struct pcmcia_device *link)
+int wl_adapter_insert(struct pcmcia_device *link)
{
struct net_device *dev;
int ret;
@@ -256,24 +257,23 @@ void wl_adapter_insert(struct pcmcia_device *link)
dev->base_addr = link->resource[0]->start;
SET_NETDEV_DEV(dev, &link->dev);
- if (register_netdev(dev) != 0) {
+ ret = register_netdev(dev);
+ if (ret != 0) {
printk("%s: register_netdev() failed\n", MODULE_NAME);
goto failed;
}
- register_wlags_sysfs(dev);
-
printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, mac_address"
" %pM\n", dev->name, dev->base_addr, dev->irq, dev->dev_addr);
DBG_LEAVE(DbgInfo);
- return;
+ return 0;
failed:
wl_adapter_release(link);
DBG_LEAVE(DbgInfo);
- return;
+ return ret;
} /* wl_adapter_insert */
/*============================================================================*/
diff --git a/drivers/staging/wlags49_h2/wl_cs.h b/drivers/staging/wlags49_h2/wl_cs.h
index a7ab579..081cc6f 100644
--- a/drivers/staging/wlags49_h2/wl_cs.h
+++ b/drivers/staging/wlags49_h2/wl_cs.h
@@ -65,10 +65,10 @@
/*******************************************************************************
- * function protoypes
+ * function prototypes
******************************************************************************/
-void wl_adapter_insert(struct pcmcia_device *link);
+int wl_adapter_insert(struct pcmcia_device *link);
void wl_adapter_release(struct pcmcia_device *link);
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index b230781..78129e9 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -883,7 +883,6 @@ struct wl_private
int is_registered;
int is_handling_int;
int firmware_present;
- bool sysfsCreated;
CFG_DRV_INFO_STRCT driverInfo;
CFG_IDENTITY_STRCT driverIdentity;
CFG_FW_IDENTITY_STRCT StationIdentity;
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index f28f15b..4353561 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -3171,7 +3171,9 @@ void wl_process_mailbox( struct wl_private *lp )
memset( ssid, 0, sizeof( ssid ));
strncpy( ssid, &probe_rsp->rawData[2],
- probe_rsp->rawData[1] );
+ min_t(u8,
+ probe_rsp->rawData[1],
+ HCF_MAX_NAME_LEN - 1));
DBG_TRACE( DbgInfo, "(%s) SSID : %s\n",
lp->dev->name, ssid );
diff --git a/drivers/staging/wlags49_h2/wl_sysfs.c b/drivers/staging/wlags49_h2/wl_sysfs.c
deleted file mode 100644
index 1508f04..0000000
--- a/drivers/staging/wlags49_h2/wl_sysfs.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * ex: sw=4
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <net/sock.h>
-#include <linux/rtnetlink.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#include <linux/sysfs.h>
-
-#include <debug.h>
-#include <hcf.h>
-#include <hcfdef.h>
-
-#include <wl_if.h>
-#include <wl_internal.h>
-#include <wl_util.h>
-#include <wl_main.h>
-#include <wl_wext.h>
-#include <wl_priv.h>
-
-static inline int dev_isalive(const struct net_device *dev)
-{
- return dev->reg_state == NETREG_REGISTERED;
-}
-
-/*
- * empirically even if tallies are defined as 32 bits entities, only
- * high 16 bits are relevant; low half is always zero. It means tallies
- * are pretty much useless for traffic counting but at least give overview
- * about where error come from
- */
-static ssize_t show_tallies(struct device *d, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *dev = to_net_dev(d);
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- CFG_HERMES_TALLIES_STRCT tallies;
- ssize_t ret = -EINVAL;
-
- rcu_read_lock();
- if (dev_isalive(dev)) {
- wl_lock(lp, &flags);
-
- ret = wl_get_tallies(lp, &tallies);
- if (ret == 0) {
- wl_unlock(lp, &flags);
- ret = snprintf(buf, PAGE_SIZE,
- "TxUnicastFrames: %u\n"
- "TxMulticastFrames: %u\n"
- "TxFragments: %u\n"
- "TxUnicastOctets: %u\n"
- "TxMulticastOctets: %u\n"
- "TxDeferredTransmissions: %u\n"
- "TxSingleRetryFrames: %u\n"
- "TxMultipleRetryFrames: %u\n"
- "TxRetryLimitExceeded: %u\n"
- "TxDiscards: %u\n"
- "RxUnicastFrames: %u\n"
- "RxMulticastFrames: %u\n"
- "RxFragments: %u\n"
- "RxUnicastOctets: %u\n"
- "RxMulticastOctets: %u\n"
- "RxFCSErrors: %u\n"
- "RxDiscardsNoBuffer: %u\n"
- "TxDiscardsWrongSA: %u\n"
- "RxWEPUndecryptable: %u\n"
- "RxMsgInMsgFragments: %u\n"
- "RxMsgInBadMsgFragments: %u\n"
- "RxDiscardsWEPICVError: %u\n"
- "RxDiscardsWEPExcluded: %u\n"
- ,
- (unsigned int)tallies.TxUnicastFrames,
- (unsigned int)tallies.TxMulticastFrames,
- (unsigned int)tallies.TxFragments,
- (unsigned int)tallies.TxUnicastOctets,
- (unsigned int)tallies.TxMulticastOctets,
- (unsigned int)tallies.TxDeferredTransmissions,
- (unsigned int)tallies.TxSingleRetryFrames,
- (unsigned int)tallies.TxMultipleRetryFrames,
- (unsigned int)tallies.TxRetryLimitExceeded,
- (unsigned int)tallies.TxDiscards,
- (unsigned int)tallies.RxUnicastFrames,
- (unsigned int)tallies.RxMulticastFrames,
- (unsigned int)tallies.RxFragments,
- (unsigned int)tallies.RxUnicastOctets,
- (unsigned int)tallies.RxMulticastOctets,
- (unsigned int)tallies.RxFCSErrors,
- (unsigned int)tallies.RxDiscardsNoBuffer,
- (unsigned int)tallies.TxDiscardsWrongSA,
- (unsigned int)tallies.RxWEPUndecryptable,
- (unsigned int)tallies.RxMsgInMsgFragments,
- (unsigned int)tallies.RxMsgInBadMsgFragments,
- (unsigned int)tallies.RxDiscardsWEPICVError,
- (unsigned int)tallies.RxDiscardsWEPExcluded);
- } else {
- wl_unlock( lp, &flags );
- }
- }
-
- rcu_read_unlock();
- return ret;
-}
-
-static DEVICE_ATTR(tallies, S_IRUGO, show_tallies, NULL);
-
-static struct attribute *wlags_attrs[] = {
- &dev_attr_tallies.attr,
- NULL
-};
-
-static struct attribute_group wlags_group = {
- .name = "wlags",
- .attrs = wlags_attrs,
-};
-
-void register_wlags_sysfs(struct net_device *net)
-{
- struct device *dev = &(net->dev);
- struct wl_private *lp = wl_priv(net);
- int err;
- err = sysfs_create_group(&dev->kobj, &wlags_group);
- if (!err)
- lp->sysfsCreated = true;
-}
-
-void unregister_wlags_sysfs(struct net_device *net)
-{
- struct device *dev = &(net->dev);
- struct wl_private *lp = wl_priv(net);
-
- if (lp->sysfsCreated)
- sysfs_remove_group(&dev->kobj, &wlags_group);
-}
diff --git a/drivers/staging/wlags49_h2/wl_sysfs.h b/drivers/staging/wlags49_h2/wl_sysfs.h
deleted file mode 100644
index fa658c3..0000000
--- a/drivers/staging/wlags49_h2/wl_sysfs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifdef CONFIG_SYSFS
-extern void register_wlags_sysfs(struct net_device *);
-extern void unregister_wlags_sysfs(struct net_device *);
-#else
-static inline void register_wlags_sysfs(struct net_device *net) { }
-static inline void unregister_wlags_sysfs(struct net_device *net) { }
-#endif
diff --git a/drivers/staging/wlags49_h25/Makefile b/drivers/staging/wlags49_h25/Makefile
index 6e0159d..513ba01 100644
--- a/drivers/staging/wlags49_h25/Makefile
+++ b/drivers/staging/wlags49_h25/Makefile
@@ -50,6 +50,3 @@ $(WLNAME)-y += wl_profile.o \
mmd.o \
hcf.o \
dhf.o
-
-$(WLNAME)-$(CONFIG_SYSFS) += wl_sysfs.o
-
diff --git a/drivers/staging/wlags49_h25/wl_sysfs.c b/drivers/staging/wlags49_h25/wl_sysfs.c
deleted file mode 100644
index 6458ee6..0000000
--- a/drivers/staging/wlags49_h25/wl_sysfs.c
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Use common source from wlags49_h2 */
-#include "../wlags49_h2/wl_sysfs.c"
diff --git a/drivers/staging/wlags49_h25/wl_sysfs.h b/drivers/staging/wlags49_h25/wl_sysfs.h
deleted file mode 100644
index eb819a5..0000000
--- a/drivers/staging/wlags49_h25/wl_sysfs.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Use common source from wlags49_h2 */
-#include "../wlags49_h2/wl_sysfs.h"
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 428a9be..76374b2 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -1122,8 +1122,7 @@ static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev,
kfree(hw->scanresults);
- hw->scanresults = kmalloc(sizeof(hfa384x_InfFrame_t), GFP_ATOMIC);
- memcpy(hw->scanresults, inf, sizeof(hfa384x_InfFrame_t));
+ hw->scanresults = kmemdup(inf, sizeof(hfa384x_InfFrame_t), GFP_ATOMIC);
if (nbss == 0)
nbss = -1;
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 801ac40..3b3e17d 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -60,7 +60,7 @@ static inline void dumpVGAReg(void)
static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno, unsigned char rateindex)
+ unsigned char modeno)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, ClockIndex = 0;
@@ -68,7 +68,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
int Clock;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
- XGI_SearchModeID(ModeNo, &ModeIdIndex, XGI_Pr);
+ XGI_SearchModeID(ModeNo, &ModeIdIndex);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, XGI_Pr);
@@ -82,7 +82,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno, unsigned char rateindex,
+ unsigned char modeno,
u32 *left_margin, u32 *right_margin, u32 *upper_margin,
u32 *lower_margin, u32 *hsync_len, u32 *vsync_len, u32 *sync,
u32 *vmode)
@@ -96,7 +96,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
unsigned char sr_data, cr_data, cr_data2;
int B, C, D, F, temp, j;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
- if (!XGI_SearchModeID(ModeNo, &ModeIdIndex, XGI_Pr))
+ if (!XGI_SearchModeID(ModeNo, &ModeIdIndex))
return 0;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, XGI_Pr);
@@ -1980,12 +1980,10 @@ static int xgifb_probe(struct pci_dev *pdev,
fb_info->var.pixclock = (u32) (1000000000 /
XGIfb_mode_rate_to_dclock(&xgifb_info->dev_info,
hw_info,
- XGIbios_mode[xgifb_info->mode_idx].mode_no,
- xgifb_info->rate_idx));
+ XGIbios_mode[xgifb_info->mode_idx].mode_no));
if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no,
- xgifb_info->rate_idx,
&fb_info->var.left_margin,
&fb_info->var.right_margin,
&fb_info->var.upper_margin,
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 80c9723..5c739be 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -30,11 +30,6 @@
#define SetCRT2ToDualEdge 0x8000
#define ReserveTVOption 0x0008
-#define GatingCRT 0x0800
-#define DisableChB 0x1000
-#define EnableChB 0x2000
-#define DisableChA 0x4000
-#define EnableChA 0x8000
#define SetTVLowResolution 0x0400
#define TVSimuMode 0x0800
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 19ce5a9..2154172 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -54,14 +54,12 @@ XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
udelay(800);
xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */
/* GPIOF 0:DVI 1:DVO */
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
+ data = xgifb_reg_get(pVBInfo->P3d4, 0x48);
/* HOTPLUG_SUPPORT */
/* for current XG20 & XG21, GPIOH is floating, driver will
* fix DDR temporarily */
- if (temp & 0x01) /* DVI read GPIOH */
- data = 1; /* DDRII */
- else
- data = 0; /* DDR */
+ /* DVI read GPIOH */
+ data &= 0x01; /* 1=DDRII, 0=DDR */
/* ~HOTPLUG_SUPPORT */
xgifb_reg_or(pVBInfo->P3d4, 0xB4, 0x02);
return data;
@@ -104,10 +102,8 @@ static void XGINew_DDR1x_MRS_340(unsigned long P3c4,
xgifb_reg_set(P3c4, 0x1B, 0x00);
}
-static void XGINew_SetMemoryClock(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGINew_SetMemoryClock(struct vb_device_info *pVBInfo)
{
-
xgifb_reg_set(pVBInfo->P3c4,
0x28,
pVBInfo->MCLKData[pVBInfo->ram_type].SR28);
@@ -135,7 +131,7 @@ static void XGINew_DDRII_Bootup_XG27(
{
unsigned long P3d4 = P3c4 + 0x10;
pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
- XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
+ XGINew_SetMemoryClock(pVBInfo);
/* Set Double Frequency */
xgifb_reg_set(P3d4, 0x97, pVBInfo->XGINew_CR97); /* CR97 */
@@ -208,7 +204,7 @@ static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
unsigned long P3d4 = P3c4 + 0x10;
pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
- XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
+ XGINew_SetMemoryClock(pVBInfo);
xgifb_reg_set(P3d4, 0x97, 0x11); /* CR97 */
@@ -282,7 +278,7 @@ static void XGINew_DDR1x_DefaultRegister(
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
if (HwDeviceExtension->jChipType >= XG20) {
- XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
+ XGINew_SetMemoryClock(pVBInfo);
xgifb_reg_set(P3d4,
0x82,
pVBInfo->CR40[11][pVBInfo->ram_type]); /* CR82 */
@@ -298,7 +294,7 @@ static void XGINew_DDR1x_DefaultRegister(
XGINew_DDR1x_MRS_XG20(P3c4, pVBInfo);
} else {
- XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
+ XGINew_SetMemoryClock(pVBInfo);
switch (HwDeviceExtension->jChipType) {
case XG42:
@@ -878,8 +874,7 @@ done:
return rom_copy;
}
-static bool xgifb_read_vbios(struct pci_dev *pdev,
- struct vb_device_info *pVBInfo)
+static bool xgifb_read_vbios(struct pci_dev *pdev)
{
struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
u8 *vbios;
@@ -950,8 +945,7 @@ error:
return false;
}
-static void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGINew_ChkSenseStatus(struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0, temp, tempcx, CR3CData;
@@ -993,8 +987,7 @@ static void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3d4, 0x3e, ((tempbx & 0xFF00) >> 8));
}
-static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGINew_SetModeScratch(struct vb_device_info *pVBInfo)
{
unsigned short temp, tempcl = 0, tempch = 0, CR31Data, CR38Data;
@@ -1079,44 +1072,23 @@ static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
*HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
- unsigned short temp;
-
- /* add lcd sense */
- if (HwDeviceExtension->ulCRT2LCDType == LCD_UNKNOWN) {
+ unsigned short temp = HwDeviceExtension->ulCRT2LCDType;
+
+ switch (HwDeviceExtension->ulCRT2LCDType) {
+ case LCD_640x480:
+ case LCD_1024x600:
+ case LCD_1152x864:
+ case LCD_1280x960:
+ case LCD_1152x768:
+ case LCD_1920x1440:
+ case LCD_2048x1536:
+ temp = 0; /* overwrite used ulCRT2LCDType */
+ break;
+ case LCD_UNKNOWN: /* unknown lcd, do nothing */
return 0;
- } else {
- temp = (unsigned short) HwDeviceExtension->ulCRT2LCDType;
- switch (HwDeviceExtension->ulCRT2LCDType) {
- case LCD_INVALID:
- case LCD_800x600:
- case LCD_1024x768:
- case LCD_1280x1024:
- break;
-
- case LCD_640x480:
- case LCD_1024x600:
- case LCD_1152x864:
- case LCD_1280x960:
- case LCD_1152x768:
- temp = 0;
- break;
-
- case LCD_1400x1050:
- case LCD_1280x768:
- case LCD_1600x1200:
- break;
-
- case LCD_1920x1440:
- case LCD_2048x1536:
- temp = 0;
- break;
-
- default:
- break;
- }
- xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
- return 1;
}
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
+ return 1;
}
static void XGINew_GetXG21Sense(struct pci_dev *pdev,
@@ -1125,7 +1097,7 @@ static void XGINew_GetXG21Sense(struct pci_dev *pdev,
struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
unsigned char Temp;
- if (xgifb_read_vbios(pdev, pVBInfo)) { /* For XG21 LVDS */
+ if (xgifb_read_vbios(pdev)) { /* For XG21 LVDS */
xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
/* LVDS on chip */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
@@ -1138,25 +1110,18 @@ static void XGINew_GetXG21Sense(struct pci_dev *pdev,
xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
/* Enable read GPIOF */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x20, 0x20);
- Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x04;
- if (!Temp)
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x38,
- ~0xE0,
- 0x80); /* TMDS on chip */
+ if (xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x04)
+ Temp = 0xA0; /* Only DVO on chip */
else
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x38,
- ~0xE0,
- 0xA0); /* Only DVO on chip */
+ Temp = 0x80; /* TMDS on chip */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, Temp);
/* Disable read GPIOF */
xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x20);
}
}
}
-static void XGINew_GetXG27Sense(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGINew_GetXG27Sense(struct vb_device_info *pVBInfo)
{
unsigned char Temp, bCR4A;
@@ -1206,9 +1171,7 @@ static unsigned char GetXG27FPBits(struct vb_device_info *pVBInfo)
/* enable GPIOA/B/C read */
xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03);
temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
- if (temp <= 2)
- temp &= 0x03;
- else
+ if (temp > 2)
temp = ((temp & 0x04) >> 1) | ((~temp) & 0x01);
xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A);
@@ -1216,6 +1179,14 @@ static unsigned char GetXG27FPBits(struct vb_device_info *pVBInfo)
return temp;
}
+static bool xgifb_bridge_is_on(struct vb_device_info *vb_info)
+{
+ u8 flag;
+
+ flag = xgifb_reg_get(vb_info->Part4Port, 0x00);
+ return flag == 1 || flag == 2;
+}
+
unsigned char XGIInitNew(struct pci_dev *pdev)
{
struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
@@ -1235,10 +1206,6 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
outb(0x67, pVBInfo->P3c2);
- if (HwDeviceExtension->jChipType < XG20)
- /* Run XGI_GetVBType before InitTo330Pointer */
- XGI_GetVBType(pVBInfo);
-
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
/* Openkey */
@@ -1249,7 +1216,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
XGINew_GetXG21Sense(pdev, pVBInfo);
if (HwDeviceExtension->jChipType == XG27)
- XGINew_GetXG27Sense(HwDeviceExtension, pVBInfo);
+ XGINew_GetXG27Sense(pVBInfo);
/* Reset Extended register */
@@ -1321,13 +1288,12 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
if (HwDeviceExtension->jChipType < XG20) {
/* Set VB */
- XGI_UnLockCRT2(HwDeviceExtension, pVBInfo);
+ XGI_UnLockCRT2(pVBInfo);
/* disable VideoCapture */
xgifb_reg_and_or(pVBInfo->Part0Port, 0x3F, 0xEF, 0x00);
xgifb_reg_set(pVBInfo->Part1Port, 0x00, 0x00);
/* chk if BCLK>=100MHz */
temp1 = xgifb_reg_get(pVBInfo->P3d4, 0x7B);
- temp = (unsigned char) ((temp1 >> 4) & 0x0F);
xgifb_reg_set(pVBInfo->Part1Port,
0x02, XGI330_CRT2Data_1_2);
@@ -1353,7 +1319,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
xgifb_reg_set(pVBInfo->P3c4, 0x33, XGI330_SR33);
if (HwDeviceExtension->jChipType < XG20) {
- if (XGI_BridgeIsOn(pVBInfo) == 1) {
+ if (xgifb_bridge_is_on(pVBInfo)) {
xgifb_reg_set(pVBInfo->Part2Port, 0x00, 0x1C);
xgifb_reg_set(pVBInfo->Part4Port,
0x0D, XGI330_CRT2Data_4_D);
@@ -1362,7 +1328,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
xgifb_reg_set(pVBInfo->Part4Port,
0x10, XGI330_CRT2Data_4_10);
xgifb_reg_set(pVBInfo->Part4Port, 0x0F, 0x3F);
- XGI_LockCRT2(HwDeviceExtension, pVBInfo);
+ XGI_LockCRT2(pVBInfo);
}
} /* != XG20 */
@@ -1398,8 +1364,8 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
xgifb_reg_set(pVBInfo->P3c4, 0x22, 0xfa);
xgifb_reg_set(pVBInfo->P3c4, 0x21, 0xa3);
- XGINew_ChkSenseStatus(HwDeviceExtension, pVBInfo);
- XGINew_SetModeScratch(HwDeviceExtension, pVBInfo);
+ XGINew_ChkSenseStatus(pVBInfo);
+ XGINew_SetModeScratch(pVBInfo);
xgifb_reg_set(pVBInfo->P3d4, 0x8c, 0x87);
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 3adec3f..46dea3f 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -35,6 +35,9 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->SR18 = XGI340_SR18;
pVBInfo->CR40 = XGI340_cr41;
+ if (ChipType < XG20)
+ XGI_GetVBType(pVBInfo);
+
/* 310 customization related */
if ((pVBInfo->VBType & VB_SIS301LV) || (pVBInfo->VBType & VB_SIS302LV))
pVBInfo->LCDCapList = XGI_LCDDLCapList;
@@ -60,9 +63,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
}
-static void XGI_SetSeqRegs(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
{
unsigned char SRdata, i;
@@ -76,8 +77,7 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
}
}
-static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_SetCRTCRegs(struct vb_device_info *pVBInfo)
{
unsigned char CRTCdata;
unsigned short i;
@@ -93,8 +93,7 @@ static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
}
}
-static void XGI_SetATTRegs(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_SetATTRegs(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char ARdata;
@@ -168,8 +167,7 @@ static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
return 0;
}
-static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex, unsigned short *i,
struct vb_device_info *pVBInfo)
{
@@ -180,66 +178,45 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
tempbx = XGI330_RefIndex[RefreshRateTableIndex + (*i)].ModeID;
tempax = 0;
- if (pVBInfo->IF_DEF_LVDS == 0) {
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
- tempax |= SupportRAMDAC2;
-
- if (pVBInfo->VBType & VB_XGI301C)
- tempax |= SupportCRT2in301C;
- }
-
- /* 301b */
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- tempax |= SupportLCD;
+ if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
+ tempax |= SupportRAMDAC2;
- if (pVBInfo->LCDResInfo != Panel_1280x1024 &&
- pVBInfo->LCDResInfo != Panel_1280x960 &&
- (pVBInfo->LCDInfo & LCDNonExpanding) &&
- resinfo >= 9)
- return 0;
- }
+ if (pVBInfo->VBType & VB_XGI301C)
+ tempax |= SupportCRT2in301C;
+ }
- if (pVBInfo->VBInfo & SetCRT2ToHiVision) { /* for HiTV */
- tempax |= SupportHiVision;
- if ((pVBInfo->VBInfo & SetInSlaveMode) &&
- ((resinfo == 4) ||
- (resinfo == 3 &&
- (pVBInfo->SetFlag & TVSimuMode)) ||
- (resinfo > 7)))
- return 0;
- } else if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO |
- SetCRT2ToSVIDEO |
- SetCRT2ToSCART |
- SetCRT2ToYPbPr525750 |
- SetCRT2ToHiVision)) {
- tempax |= SupportTV;
-
- if (pVBInfo->VBType & (VB_SIS301B |
- VB_SIS302B |
- VB_SIS301LV |
- VB_SIS302LV |
- VB_XGI301C))
- tempax |= SupportTV1024;
-
- if (!(pVBInfo->VBInfo & TVSetPAL) &&
- (modeflag & NoSupportSimuTV) &&
- (pVBInfo->VBInfo & SetInSlaveMode) &&
- (!(pVBInfo->VBInfo & SetNotSimuMode)))
- return 0;
- }
- } else if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* for LVDS */
+ /* 301b */
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
tempax |= SupportLCD;
- if (resinfo > 0x08)
- return 0; /* 1024x768 */
-
- if (pVBInfo->LCDResInfo < Panel_1024x768) {
- if (resinfo > 0x07)
- return 0; /* 800x600 */
+ if (pVBInfo->LCDResInfo != Panel_1280x1024 &&
+ pVBInfo->LCDResInfo != Panel_1280x960 &&
+ (pVBInfo->LCDInfo & LCDNonExpanding) &&
+ resinfo >= 9)
+ return 0;
+ }
- if (resinfo == 0x04)
- return 0; /* 512x384 */
- }
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision) { /* for HiTV */
+ tempax |= SupportHiVision;
+ if ((pVBInfo->VBInfo & SetInSlaveMode) &&
+ ((resinfo == 4) ||
+ (resinfo == 3 && (pVBInfo->SetFlag & TVSimuMode)) ||
+ (resinfo > 7)))
+ return 0;
+ } else if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO | SetCRT2ToSVIDEO |
+ SetCRT2ToSCART | SetCRT2ToYPbPr525750 |
+ SetCRT2ToHiVision)) {
+ tempax |= SupportTV;
+
+ if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV |
+ VB_SIS302LV | VB_XGI301C))
+ tempax |= SupportTV1024;
+
+ if (!(pVBInfo->VBInfo & TVSetPAL) &&
+ (modeflag & NoSupportSimuTV) &&
+ (pVBInfo->VBInfo & SetInSlaveMode) &&
+ (!(pVBInfo->VBInfo & SetNotSimuMode)))
+ return 0;
}
for (; XGI330_RefIndex[RefreshRateTableIndex + (*i)].ModeID ==
@@ -340,7 +317,6 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
}
static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
- unsigned short ModeNo,
struct vb_device_info *pVBInfo)
{
unsigned char data;
@@ -383,7 +359,7 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->P3d4, 0x09, data);
}
-static void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo,
struct xgi_hw_device_info *HwDeviceExtension)
@@ -409,7 +385,7 @@ static void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
- XGI_SetCRT1Timing_V(ModeIdIndex, ModeNo, pVBInfo);
+ XGI_SetCRT1Timing_V(ModeIdIndex, pVBInfo);
if (pVBInfo->ModeType > 0x03)
xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F);
@@ -421,8 +397,7 @@ static void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
/* Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F */
/* Description : Set LCD timing */
/* --------------------------------------------------------------------- */
-static void XGI_SetXG21CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
+static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char index, Tempax, Tempbx, Tempcx, Tempdx;
@@ -518,9 +493,7 @@ static void XGI_SetXG21CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->P3c4, 0x3F, Tempax);
}
-static void XGI_SetXG27CRTC(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
+static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short index, Tempax, Tempbx, Tempcx;
@@ -623,8 +596,7 @@ static void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
static void xgifb_set_lcd(int chip_id,
struct vb_device_info *pVBInfo,
- unsigned short RefreshRateTableIndex,
- unsigned short ModeNo)
+ unsigned short RefreshRateTableIndex)
{
unsigned short temp;
@@ -705,8 +677,7 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
}
}
-static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -759,7 +730,6 @@ static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x42, tempax);
data = xgifb_reg_get(pVBInfo->P3d4, 0x07);
- data &= 0xFF;
tempax = 0;
if (tempbx & 0x04)
@@ -853,10 +823,8 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
xgifb_reg_set(pVBInfo->P3c4, 0x10, ah);
}
-static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short VCLKIndex, modeflag;
@@ -905,8 +873,7 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
return VCLKIndex;
}
-static void XGI_SetCRT1VCLK(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_SetCRT1VCLK(unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
@@ -914,19 +881,12 @@ static void XGI_SetCRT1VCLK(unsigned short ModeNo,
unsigned char index, data;
unsigned short vclkindex;
- if (pVBInfo->IF_DEF_LVDS == 1) {
- index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
- data = xgifb_reg_get(pVBInfo->P3c4, 0x31) & 0xCF;
- xgifb_reg_set(pVBInfo->P3c4, 0x31, data);
- xgifb_reg_set(pVBInfo->P3c4, 0x2B, XGI_VCLKData[index].SR2B);
- xgifb_reg_set(pVBInfo->P3c4, 0x2C, XGI_VCLKData[index].SR2C);
- xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
- } else if ((pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
- | VB_SIS302LV | VB_XGI301C)) && (pVBInfo->VBInfo
- & XGI_SetCRT2ToLCDA)) {
- vclkindex = XGI_GetVCLK2Ptr(ModeNo, ModeIdIndex,
- RefreshRateTableIndex, HwDeviceExtension,
- pVBInfo);
+ if ((pVBInfo->IF_DEF_LVDS == 0) &&
+ (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV |
+ VB_SIS302LV | VB_XGI301C)) &&
+ (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) {
+ vclkindex = XGI_GetVCLK2Ptr(ModeIdIndex, RefreshRateTableIndex,
+ pVBInfo);
data = xgifb_reg_get(pVBInfo->P3c4, 0x31) & 0xCF;
xgifb_reg_set(pVBInfo->P3c4, 0x31, data);
data = XGI_VBVCLKData[vclkindex].Part4_A;
@@ -973,9 +933,8 @@ static void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
}
-static void XGI_SetCRT1FIFO(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short data;
@@ -996,7 +955,7 @@ static void XGI_SetCRT1FIFO(unsigned short ModeNo,
}
static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo, unsigned short RefreshRateTableIndex,
+ unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short data, data2 = 0;
@@ -1035,7 +994,7 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
}
static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -1088,8 +1047,7 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
data = data ^ 0xA0;
xgifb_reg_and_or(pVBInfo->P3c4, 0x21, 0x1F, data);
- XGI_SetVCLKState(HwDeviceExtension, ModeNo, RefreshRateTableIndex,
- pVBInfo);
+ XGI_SetVCLKState(HwDeviceExtension, RefreshRateTableIndex, pVBInfo);
data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
@@ -1147,8 +1105,7 @@ static void XGI_WriteDAC(unsigned short dl,
outb((unsigned short) bl, pVBInfo->P3c9);
}
-static void XGI_LoadDAC(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_LoadDAC(struct vb_device_info *pVBInfo)
{
unsigned short data, data2, i, k, m, n, o, si, di, bx, dl, al, ah, dh;
const unsigned short *table = XGINew_VGA_DAC;
@@ -1213,8 +1170,7 @@ static void XGI_LoadDAC(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_GetLVDSResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_GetLVDSResInfo(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short resindex, xres, yres, modeflag;
@@ -1244,9 +1200,7 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
}
static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
- unsigned short ModeNo,
unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short i, tempdx, tempbx, modeflag;
@@ -1284,8 +1238,7 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
return table[i].DATAPTR;
}
-static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -1314,17 +1267,15 @@ static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeNo,
return &XGI_TVDataTable[i].DATAPTR[tempal];
}
-static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_GetLVDSData(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
struct SiS_LVDSData const *LCDPtr;
if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
return;
- LCDPtr = XGI_GetLcdPtr(XGI_EPLLCDDataPtr, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDPtr = XGI_GetLcdPtr(XGI_EPLLCDDataPtr, ModeIdIndex, pVBInfo);
pVBInfo->VGAHT = LCDPtr->VGAHT;
pVBInfo->VGAVT = LCDPtr->VGAVT;
pVBInfo->HT = LCDPtr->LCDHT;
@@ -1350,18 +1301,17 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_ModCRT1Regs(unsigned short ModeIdIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short i;
struct XGI_LVDSCRT1HDataStruct const *LCDPtr = NULL;
struct XGI_LVDSCRT1VDataStruct const *LCDPtr1 = NULL;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- LCDPtr = XGI_GetLcdPtr(xgifb_epllcd_crt1_h, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDPtr = XGI_GetLcdPtr(xgifb_epllcd_crt1_h, ModeIdIndex,
+ pVBInfo);
for (i = 0; i < 8; i++)
pVBInfo->TimingH.data[i] = LCDPtr[0].Reg[i];
@@ -1370,14 +1320,13 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- LCDPtr1 = XGI_GetLcdPtr(xgifb_epllcd_crt1_v, ModeNo,
- ModeIdIndex, RefreshRateTableIndex,
+ LCDPtr1 = XGI_GetLcdPtr(xgifb_epllcd_crt1_v, ModeIdIndex,
pVBInfo);
for (i = 0; i < 7; i++)
pVBInfo->TimingV.data[i] = LCDPtr1[0].Reg[i];
}
- XGI_SetCRT1Timing_V(ModeIdIndex, ModeNo, pVBInfo);
+ XGI_SetCRT1Timing_V(ModeIdIndex, pVBInfo);
}
static unsigned short XGI_GetLCDCapPtr(struct vb_device_info *pVBInfo)
@@ -1448,21 +1397,17 @@ static void XGI_GetLCDSync(unsigned short *HSyncWidth,
Index = XGI_GetLCDCapPtr(pVBInfo);
*HSyncWidth = pVBInfo->LCDCapList[Index].LCD_HSyncWidth;
*VSyncWidth = pVBInfo->LCDCapList[Index].LCD_VSyncWidth;
-
- return;
}
-static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempbx, tempax, tempcx, tempdx, push1, push2, modeflag;
unsigned long temp, temp1, temp2, temp3, push3;
struct XGI330_LCDDataDesStruct2 const *LCDPtr1 = NULL;
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- LCDPtr1 = XGI_GetLcdPtr(XGI_EPLLCDDesDataPtr, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDPtr1 = XGI_GetLcdPtr(XGI_EPLLCDDesDataPtr, ModeIdIndex, pVBInfo);
XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
push1 = tempbx;
@@ -1589,10 +1534,8 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
tempax);
- tempcx = pVBInfo->VGAVT;
tempbx = pVBInfo->VDE;
tempax = pVBInfo->VGAVDE;
- tempcx -= tempax;
temp = tempax; /* 0430 ylshieh */
temp1 = (temp << 18) / tempbx;
@@ -1712,12 +1655,10 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
*di_1 = pVBInfo->LCDCapList[index].LCDA_VCLKData2;
}
}
- return;
}
static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
- unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short index, modeflag;
@@ -1799,15 +1740,14 @@ static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
}
}
-static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
int i;
- tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeNo, ModeIdIndex,
- pVBInfo);
+ tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeIdIndex, pVBInfo);
XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
@@ -1825,8 +1765,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_UpdateModeInfo(struct vb_device_info *pVBInfo)
{
unsigned short tempcl, tempch, temp, tempbl, tempax;
@@ -1907,8 +1846,6 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
if (!(pVBInfo->SetFlag & ReserveTVOption))
xgifb_reg_set(pVBInfo->P3d4, 0x3e, tempch);
- } else {
- return;
}
}
@@ -1916,9 +1853,6 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
{
unsigned short flag, tempbx, tempah;
- if (pVBInfo->IF_DEF_LVDS != 0)
- return;
-
tempbx = VB_SIS302B;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
if (flag == 0x02)
@@ -1957,8 +1891,7 @@ finish:
pVBInfo->VBType = tempbx;
}
-static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_GetVBInfo(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, push, tempbx, temp, modeflag;
@@ -1995,37 +1928,23 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
- if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (pVBInfo->VBType & (VB_SIS301LV|VB_SIS302LV|VB_XGI301C)) {
- if (temp & SetYPbPr) {
- if (pVBInfo->IF_DEF_HiVision == 1) {
- /* shampoo add for new scratch */
- temp = xgifb_reg_get(pVBInfo->P3d4,
- 0x35);
- temp &= YPbPrMode;
- tempbx |= SetCRT2ToHiVision;
+ if (pVBInfo->VBType & (VB_SIS301LV|VB_SIS302LV|VB_XGI301C)) {
+ if (temp & SetYPbPr) {
+ /* shampoo add for new scratch */
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x35);
+ temp &= YPbPrMode;
+ tempbx |= SetCRT2ToHiVision;
- if (temp != YPbPrMode1080i) {
- tempbx &= (~SetCRT2ToHiVision);
- tempbx |= SetCRT2ToYPbPr525750;
- }
- }
+ if (temp != YPbPrMode1080i) {
+ tempbx &= (~SetCRT2ToHiVision);
+ tempbx |= SetCRT2ToYPbPr525750;
}
}
}
tempax = push; /* restore CR31 */
- if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (pVBInfo->IF_DEF_HiVision == 1)
- temp = 0x09FC;
- else
- temp = 0x097C;
- } else if (pVBInfo->IF_DEF_HiVision == 1) {
- temp = 0x01FC;
- } else {
- temp = 0x017C;
- }
+ temp = 0x09FC;
if (!(tempbx & temp)) {
tempax |= DisableCRT2Display;
@@ -2046,15 +1965,10 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
/* shampoo add */
/* for driver abnormal */
if (!(tempbx & (SwitchCRT2 | SetSimuScanMode))) {
- if (pVBInfo->IF_DEF_CRT2Monitor == 1) {
- if (tempbx & SetCRT2ToRAMDAC) {
- tempbx &= (0xFF00 | SetCRT2ToRAMDAC |
- SwitchCRT2 | SetSimuScanMode);
- tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
- }
- } else {
- tempbx &= (~(SetCRT2ToRAMDAC | SetCRT2ToLCD |
- SetCRT2ToTV));
+ if (tempbx & SetCRT2ToRAMDAC) {
+ tempbx &= (0xFF00 | SetCRT2ToRAMDAC |
+ SwitchCRT2 | SetSimuScanMode);
+ tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
}
@@ -2072,16 +1986,12 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx &= (0x00FF | (~SetCRT2ToYPbPr525750));
}
- if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (tempbx & SetCRT2ToYPbPr525750)
- tempbx &= (0xFF00 | SwitchCRT2 | SetSimuScanMode);
- }
+ if (tempbx & SetCRT2ToYPbPr525750)
+ tempbx &= (0xFF00 | SwitchCRT2 | SetSimuScanMode);
- if (pVBInfo->IF_DEF_HiVision == 1) {
- if (tempbx & SetCRT2ToHiVision)
- tempbx &= (0xFF00 | SetCRT2ToHiVision | SwitchCRT2 |
- SetSimuScanMode);
- }
+ if (tempbx & SetCRT2ToHiVision)
+ tempbx &= (0xFF00 | SetCRT2ToHiVision | SwitchCRT2 |
+ SetSimuScanMode);
if (tempax & DisableCRT2Display) { /* Set Display Device Info */
if (!(tempbx & (SwitchCRT2 | SetSimuScanMode)))
@@ -2106,7 +2016,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->VBInfo = tempbx;
}
-static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetTVInfo(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0, resinfo = 0, modeflag, index1;
@@ -2132,25 +2042,21 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToSCART)
tempbx |= TVSetPAL;
- if (pVBInfo->IF_DEF_YPbPr == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
- index1 = xgifb_reg_get(pVBInfo->P3d4, 0x35);
- index1 &= YPbPrMode;
+ if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
+ index1 = xgifb_reg_get(pVBInfo->P3d4, 0x35);
+ index1 &= YPbPrMode;
- if (index1 == YPbPrMode525i)
- tempbx |= TVSetYPbPr525i;
+ if (index1 == YPbPrMode525i)
+ tempbx |= TVSetYPbPr525i;
- if (index1 == YPbPrMode525p)
- tempbx = tempbx | TVSetYPbPr525p;
- if (index1 == YPbPrMode750p)
- tempbx = tempbx | TVSetYPbPr750p;
- }
+ if (index1 == YPbPrMode525p)
+ tempbx = tempbx | TVSetYPbPr525p;
+ if (index1 == YPbPrMode750p)
+ tempbx = tempbx | TVSetYPbPr750p;
}
- if (pVBInfo->IF_DEF_HiVision == 1) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVision)
- tempbx = tempbx | TVSetHiVision | TVSetPAL;
- }
+ if (pVBInfo->VBInfo & SetCRT2ToHiVision)
+ tempbx = tempbx | TVSetHiVision | TVSetPAL;
if ((pVBInfo->VBInfo & SetInSlaveMode) &&
(!(pVBInfo->VBInfo & SetNotSimuMode)))
@@ -2177,8 +2083,8 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->TVInfo = tempbx;
}
-static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+static unsigned char XGI_GetLCDInfo(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short temp, tempax, tempbx, resinfo = 0, LCDIdIndex;
@@ -2258,7 +2164,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
}
unsigned char XGI_SearchModeID(unsigned short ModeNo,
- unsigned short *ModeIdIndex, struct vb_device_info *pVBInfo)
+ unsigned short *ModeIdIndex)
{
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (XGI330_EModeIDTable[*ModeIdIndex].Ext_ModeID == ModeNo)
@@ -2497,8 +2403,7 @@ static void XGI_SaveCRT2Info(unsigned short ModeNo,
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, temp2, temp1);
}
-static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_GetCRT2ResInfo(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short xres, yres, modeflag, resindex;
@@ -2570,8 +2475,7 @@ static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
return 0;
}
-static void XGI_GetRAMDAC2DATA(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -2613,7 +2517,7 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeNo,
pVBInfo->VT = tempbx;
}
-static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -2628,14 +2532,13 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->RVBHRS = 50;
if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
- XGI_GetRAMDAC2DATA(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- pVBInfo);
+ XGI_GetRAMDAC2DATA(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
return;
}
if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- LCDPtr = XGI_GetLcdPtr(XGI_LCDDataTable, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDPtr = XGI_GetLcdPtr(XGI_LCDDataTable, ModeIdIndex,
+ pVBInfo);
pVBInfo->RVBHCMAX = LCDPtr->RVBHCMAX;
pVBInfo->RVBHCFACT = LCDPtr->RVBHCFACT;
@@ -2657,10 +2560,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 775;
else if (pVBInfo->VGAVDE == 600)
tempbx = 775;
- else
- tempbx = 768;
- } else
- tempbx = 768;
+ }
} else if (pVBInfo->LCDResInfo == Panel_1024x768x75) {
tempax = 1024;
tempbx = 768;
@@ -2719,7 +2619,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & (SetCRT2ToTV)) {
struct SiS_TVData const *TVPtr;
- TVPtr = XGI_GetTVPtr(ModeNo, ModeIdIndex, RefreshRateTableIndex,
+ TVPtr = XGI_GetTVPtr(ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
pVBInfo->RVBHCMAX = TVPtr->RVBHCMAX;
@@ -2784,18 +2684,16 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->HT = tempax;
pVBInfo->VT = tempbx;
- return;
}
}
-static void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetCRT2VCLK(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
- tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeNo, ModeIdIndex,
- pVBInfo);
+ tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeIdIndex, pVBInfo);
XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
@@ -2817,8 +2715,7 @@ static void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_or(pVBInfo->Part4Port, 0x12, 0x08);
}
-static unsigned short XGI_GetColorDepth(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+static unsigned short XGI_GetColorDepth(unsigned short ModeIdIndex)
{
unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 };
short index;
@@ -2835,9 +2732,7 @@ static unsigned short XGI_GetColorDepth(unsigned short ModeNo,
static unsigned short XGI_GetOffset(unsigned short ModeNo,
unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex)
{
unsigned short temp, colordepth, modeinfo, index, infoflag,
ColorDepth[] = { 0x01, 0x02, 0x04 };
@@ -2852,7 +2747,7 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo,
if (infoflag & InterlaceMode)
temp = temp << 1;
- colordepth = XGI_GetColorDepth(ModeNo, ModeIdIndex, pVBInfo);
+ colordepth = XGI_GetColorDepth(ModeIdIndex);
if ((ModeNo >= 0x7C) && (ModeNo <= 0x7E)) {
temp = ModeNo - 0x7C;
@@ -2867,7 +2762,6 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo,
static void XGI_SetCRT2Offset(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short offset;
@@ -2876,8 +2770,7 @@ static void XGI_SetCRT2Offset(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetInSlaveMode)
return;
- offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
+ offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex);
temp = (unsigned char) (offset & 0xFF);
xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
temp = (unsigned char) ((offset & 0xFF00) >> 8);
@@ -2895,14 +2788,12 @@ static void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo)
}
static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
u8 tempcx;
- XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
+ XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_SetCRT2FIFO(pVBInfo);
for (tempcx = 4; tempcx < 7; tempcx++)
@@ -2912,8 +2803,7 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part1Port, 0x02, 0x44); /* temp 0206 */
}
-static void XGI_SetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_SetGroup1(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -3015,9 +2905,6 @@ static void XGI_SetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
temp |= ((tempcx & 0xFF00) >> 8);
xgifb_reg_set(pVBInfo->Part1Port, 0x12, temp);
- tempax = pVBInfo->VGAVDE;
- tempbx = pVBInfo->VGAVDE;
- tempcx = pVBInfo->VGAVT;
/* BTVGA2VRS 0x10,0x11 */
tempbx = (pVBInfo->VGAVT + pVBInfo->VGAVDE) >> 1;
/* BTVGA2VRE 0x11 */
@@ -3071,8 +2958,6 @@ static unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo)
}
static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo,
@@ -3178,7 +3063,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->TVInfo & TVSimuMode) {
if (ModeNo == 0x50) {
- if (pVBInfo->TVInfo & SetNTSCTV) {
+ if (pVBInfo->TVInfo == SetNTSCTV) {
xgifb_reg_set(pVBInfo->Part1Port,
0x07, 0x30);
xgifb_reg_set(pVBInfo->Part1Port,
@@ -3226,7 +3111,6 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
tempbx--;
- temp = tempbx & 0x00FF;
tempbx--;
temp = tempbx & 0x00FF;
/* 0x10 vertical Blank Start */
@@ -3361,13 +3245,9 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = 0x00;
xgifb_reg_set(pVBInfo->Part1Port, 0x1A, temp); /* 0x1A SR0E */
-
- return;
}
static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2,
@@ -3445,9 +3325,6 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
temp &= 0x80;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xFF, temp);
- if (pVBInfo->VBInfo & SetCRT2ToHiVision)
- tempax = 950;
-
if (pVBInfo->TVInfo & TVSetPAL)
tempax = 520;
else
@@ -3797,14 +3674,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->VBInfo & SetInSlaveMode))
xgifb_reg_set(pVBInfo->Part2Port, 0x0B, 0x00);
}
-
- if (pVBInfo->VBInfo & SetCRT2ToTV)
- return;
}
-static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short RefreshRateTableIndex,
+static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah,
@@ -3850,11 +3722,10 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
/* Customized LCDB Does not add */
if ((pVBInfo->VBType & VB_SIS301LV) || (pVBInfo->VBType & VB_SIS302LV))
- LCDBDesPtr = XGI_GetLcdPtr(xgifb_lcddldes, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDBDesPtr = XGI_GetLcdPtr(xgifb_lcddldes, ModeIdIndex,
+ pVBInfo);
else
- LCDBDesPtr = XGI_GetLcdPtr(XGI_LCDDesDataTable, ModeNo,
- ModeIdIndex, RefreshRateTableIndex,
+ LCDBDesPtr = XGI_GetLcdPtr(XGI_LCDDesDataTable, ModeIdIndex,
pVBInfo);
tempah = pVBInfo->LCDResInfo;
@@ -4081,8 +3952,8 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x10);
}
-static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetGroup3(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short i;
unsigned char const *tempdi;
@@ -4135,12 +4006,10 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part3Port, 0x28, 0x3f);
}
}
- return;
-} /* {end of XGI_SetGroup3} */
+}
-static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetGroup4(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2;
@@ -4211,11 +4080,6 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
tempebx = pVBInfo->VDE;
- if (tempcx & SetCRT2ToHiVision) {
- if (!(temp & 0xE000))
- tempbx = tempbx >> 1;
- }
-
tempcx = pVBInfo->RVBHRS;
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part4Port, 0x18, temp);
@@ -4308,7 +4172,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
}
/* end 301b */
- XGI_SetCRT2VCLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetCRT2VCLK(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
static void XGINew_EnableCRT2(struct vb_device_info *pVBInfo)
@@ -4316,8 +4180,7 @@ static void XGINew_EnableCRT2(struct vb_device_info *pVBInfo)
xgifb_reg_and_or(pVBInfo->P3c4, 0x1E, 0xFF, 0x20);
}
-static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetGroup5(struct vb_device_info *pVBInfo)
{
if (pVBInfo->ModeType == ModeVGA) {
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
@@ -4325,25 +4188,15 @@ static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
XGINew_EnableCRT2(pVBInfo);
}
}
- return;
-}
-
-static void XGI_EnableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- xgifb_reg_and_or(pVBInfo->P3d4, 0x63, 0xBF, 0x40);
}
-static void XGI_DisableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_DisableGatingCRT(struct vb_device_info *pVBInfo)
{
-
xgifb_reg_and_or(pVBInfo->P3d4, 0x63, 0xBF, 0x00);
}
static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
- unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeNo, unsigned short ModeIdIndex)
{
unsigned short xres, yres, colordepth, modeflag, resindex;
@@ -4372,7 +4225,7 @@ static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
if (xres != xgifb_info->lvds_data.LVDSHDE ||
yres != xgifb_info->lvds_data.LVDSVDE) {
- colordepth = XGI_GetColorDepth(ModeNo, ModeIdIndex, pVBInfo);
+ colordepth = XGI_GetColorDepth(ModeIdIndex);
if (colordepth > 2)
return 0;
}
@@ -4381,7 +4234,6 @@ static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
int chip_id,
- unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
@@ -4592,38 +4444,6 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
return 0;
}
-/* --------------------------------------------------------------------- */
-/* Function : XGI_EnableChISLCD */
-/* Input : */
-/* Output : 0 -> Not LCD mode */
-/* Description : if bool enable = true -> enable, else disable */
-/* --------------------------------------------------------------------- */
-static unsigned char XGI_EnableChISLCD(struct vb_device_info *pVBInfo,
- bool enable)
-{
- unsigned short tempbx, tempah;
-
- if (enable)
- tempbx = pVBInfo->SetFlag & (EnableChA | EnableChB);
- else
- tempbx = pVBInfo->SetFlag & (DisableChA | DisableChB);
-
- tempah = ~((unsigned short) xgifb_reg_get(pVBInfo->Part1Port, 0x2E));
-
- if (tempbx & (EnableChA | DisableChA)) {
- if (!(tempah & 0x08)) /* Chk LCDA Mode */
- return 0;
- }
-
- if (!(tempbx & (EnableChB | DisableChB)))
- return 0;
-
- if (tempah & 0x01) /* Chk LCDB Mode */
- return 1;
-
- return 0;
-}
-
static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
@@ -4636,21 +4456,8 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if (!(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode))) {
if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
- if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
+ if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempah = 0x7F; /* Disable Channel A */
- if (!(pVBInfo->VBInfo &
- XGI_SetCRT2ToLCDA))
- /* Disable Channel B */
- tempah = 0xBF;
-
- if (pVBInfo->SetFlag & DisableChB)
- /* force to disable Cahnnel */
- tempah &= 0xBF;
-
- if (pVBInfo->SetFlag & DisableChA)
- /* Force to disable Channel B */
- tempah &= 0x7F;
- }
}
}
@@ -4660,26 +4467,18 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
if (((pVBInfo->VBInfo &
(SetCRT2ToLCD | XGI_SetCRT2ToLCDA))) ||
- (XGI_EnableChISLCD(pVBInfo, false)) ||
(XGI_IsLCDON(pVBInfo)))
/* LVDS Driver power down */
xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x80);
}
- if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
- & (DisableCRT2Display | XGI_SetCRT2ToLCDA
- | SetSimuScanMode))) {
- if (pVBInfo->SetFlag & GatingCRT)
- XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo);
+ if (pVBInfo->VBInfo & (DisableCRT2Display | XGI_SetCRT2ToLCDA |
+ SetSimuScanMode))
XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
- }
- if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
- if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
- & XGI_SetCRT2ToLCDA))
- /* Power down */
- xgifb_reg_and(pVBInfo->Part1Port, 0x1e, 0xdf);
- }
+ if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
+ /* Power down */
+ xgifb_reg_and(pVBInfo->Part1Port, 0x1e, 0xdf);
/* disable TV as primary VGA swap */
xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xdf);
@@ -4687,16 +4486,14 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
if ((pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToDualEdge)))
xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xdf);
- if ((pVBInfo->SetFlag & DisableChB) ||
- (pVBInfo->VBInfo &
+ if ((pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
((!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) &&
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
- if ((pVBInfo->SetFlag & DisableChB) ||
- (pVBInfo->VBInfo &
+ if ((pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
(!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) ||
(pVBInfo->VBInfo &
@@ -4977,9 +4774,7 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
/* Output : */
/* Description : Set TV Customized Param. */
/* --------------------------------------------------------------------- */
-static void XGI_SetAntiFlicker(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetAntiFlicker(struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
@@ -4996,9 +4791,7 @@ static void XGI_SetAntiFlicker(unsigned short ModeNo,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0x8F, tempah);
}
-static void XGI_SetEdgeEnhance(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetEdgeEnhance(struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
@@ -5033,8 +4826,8 @@ static void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo)
& 0xFF000000) >> 24));
}
-static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetYFilter(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempbx, index;
unsigned char const *filterPtr;
@@ -5103,8 +4896,7 @@ static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
/* Output : */
/* Description : Customized Param. for 301 */
/* --------------------------------------------------------------------- */
-static void XGI_OEM310Setting(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_OEM310Setting(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
XGI_SetDelayComp(pVBInfo);
@@ -5114,11 +4906,11 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
XGI_SetPhaseIncr(pVBInfo);
- XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
- XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetYFilter(ModeIdIndex, pVBInfo);
+ XGI_SetAntiFlicker(pVBInfo);
if (pVBInfo->VBType & VB_SIS301)
- XGI_SetEdgeEnhance(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetEdgeEnhance(pVBInfo);
}
}
@@ -5128,9 +4920,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
/* Output : */
/* Description : Origin code for crt2group */
/* --------------------------------------------------------------------- */
-static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo)
{
unsigned short tempbl;
short tempcl;
@@ -5292,35 +5082,14 @@ reg_and_or:
}
-void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+void XGI_UnLockCRT2(struct vb_device_info *pVBInfo)
{
-
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2f, 0xFF, 0x01);
-
}
-void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+void XGI_LockCRT2(struct vb_device_info *pVBInfo)
{
-
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2F, 0xFE, 0x00);
-
-}
-
-unsigned char XGI_BridgeIsOn(struct vb_device_info *pVBInfo)
-{
- unsigned short flag;
-
- if (pVBInfo->IF_DEF_LVDS == 1) {
- return 1;
- } else {
- flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
- if ((flag == 1) || (flag == 2))
- return 1; /* 301b */
- else
- return 0;
- }
}
unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
@@ -5344,15 +5113,10 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
if (pVBInfo->SetFlag & ProgrammingCRT2) {
if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- if (pVBInfo->IF_DEF_LVDS == 0) {
- temp = LCDARefreshIndex[
- pVBInfo->LCDResInfo & 0x07];
+ temp = LCDARefreshIndex[pVBInfo->LCDResInfo & 0x07];
- if (index > temp)
- index = temp;
- } else {
- index = 0;
- }
+ if (index > temp)
+ index = temp;
}
}
@@ -5397,8 +5161,8 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
}
i--;
if ((pVBInfo->SetFlag & ProgrammingCRT2)) {
- temp = XGI_AjustCRT2Rate(ModeNo, ModeIdIndex,
- RefreshRateTableIndex, &i, pVBInfo);
+ temp = XGI_AjustCRT2Rate(ModeIdIndex, RefreshRateTableIndex,
+ &i, pVBInfo);
}
return RefreshRateTableIndex + i;
}
@@ -5412,12 +5176,11 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->SetFlag |= ProgrammingCRT2;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
- XGI_GetLVDSResInfo(ModeNo, ModeIdIndex, pVBInfo);
- XGI_GetLVDSData(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_ModCRT1Regs(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
- XGI_SetLVDSRegs(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetCRT2ECLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_GetLVDSResInfo(ModeIdIndex, pVBInfo);
+ XGI_GetLVDSData(ModeIdIndex, pVBInfo);
+ XGI_ModCRT1Regs(ModeIdIndex, HwDeviceExtension, pVBInfo);
+ XGI_SetLVDSRegs(ModeIdIndex, pVBInfo);
+ XGI_SetCRT2ECLK(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
@@ -5427,29 +5190,23 @@ static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
unsigned short ModeIdIndex, RefreshRateTableIndex;
pVBInfo->SetFlag |= ProgrammingCRT2;
- XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
+ XGI_SearchModeID(ModeNo, &ModeIdIndex);
pVBInfo->SelectCRT2Rate = 4;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
XGI_SaveCRT2Info(ModeNo, pVBInfo);
- XGI_GetCRT2ResInfo(ModeNo, ModeIdIndex, pVBInfo);
- XGI_GetCRT2Data(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_PreSetGroup1(ModeNo, ModeIdIndex, HwDeviceExtension,
- RefreshRateTableIndex, pVBInfo);
- XGI_SetGroup1(ModeNo, ModeIdIndex, HwDeviceExtension,
- RefreshRateTableIndex, pVBInfo);
- XGI_SetLockRegs(ModeNo, ModeIdIndex, HwDeviceExtension,
- RefreshRateTableIndex, pVBInfo);
- XGI_SetGroup2(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
- XGI_SetLCDRegs(ModeNo, ModeIdIndex, HwDeviceExtension,
- RefreshRateTableIndex, pVBInfo);
+ XGI_GetCRT2ResInfo(ModeIdIndex, pVBInfo);
+ XGI_GetCRT2Data(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_PreSetGroup1(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetGroup1(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetLockRegs(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetGroup2(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetLCDRegs(ModeIdIndex, pVBInfo);
XGI_SetTap4Regs(pVBInfo);
- XGI_SetGroup3(ModeNo, ModeIdIndex, pVBInfo);
- XGI_SetGroup4(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
- XGI_SetCRT2VCLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetGroup5(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetGroup3(ModeIdIndex, pVBInfo);
+ XGI_SetGroup4(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetCRT2VCLK(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetGroup5(pVBInfo);
XGI_AutoThreshold(pVBInfo);
return 1;
}
@@ -5555,53 +5312,37 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
| VB_SIS302LV | VB_XGI301C)) {
- if (!(pVBInfo->SetFlag & DisableChA)) {
- if ((pVBInfo->SetFlag & EnableChA) ||
- (pVBInfo->VBInfo & SetCRT2ToDualEdge)) {
- /* Power on */
- xgifb_reg_set(pVBInfo->Part1Port, 0x1E, 0x20);
+ if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
+ /* Power on */
+ xgifb_reg_set(pVBInfo->Part1Port, 0x1E, 0x20);
+
+ if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV |
+ SetCRT2ToRAMDAC)) {
+ tempah = xgifb_reg_get(pVBInfo->P3c4, 0x32);
+ tempah &= 0xDF;
+ if (pVBInfo->VBInfo & SetInSlaveMode) {
+ if (!(pVBInfo->VBInfo & SetCRT2ToRAMDAC))
+ tempah |= 0x20;
}
- }
-
- if (!(pVBInfo->SetFlag & DisableChB)) {
- if ((pVBInfo->SetFlag & EnableChB) || (pVBInfo->VBInfo
- & (SetCRT2ToLCD | SetCRT2ToTV
- | SetCRT2ToRAMDAC))) {
- tempah = xgifb_reg_get(pVBInfo->P3c4, 0x32);
- tempah &= 0xDF;
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (!(pVBInfo->VBInfo &
- SetCRT2ToRAMDAC))
- tempah |= 0x20;
- }
- xgifb_reg_set(pVBInfo->P3c4, 0x32, tempah);
- xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x20);
+ xgifb_reg_set(pVBInfo->P3c4, 0x32, tempah);
+ xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x20);
- tempah = xgifb_reg_get(pVBInfo->Part1Port,
- 0x2E);
+ tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x2E);
- if (!(tempah & 0x80))
- xgifb_reg_or(pVBInfo->Part1Port,
- 0x2E, 0x80);
- xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
- }
+ if (!(tempah & 0x80))
+ xgifb_reg_or(pVBInfo->Part1Port, 0x2E, 0x80);
+ xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
}
- if ((pVBInfo->SetFlag & (EnableChA | EnableChB))
- || (!(pVBInfo->VBInfo & DisableCRT2Display))) {
+ if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
0x20); /* shampoo 0129 */
if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
- if (!XGI_EnableChISLCD(pVBInfo, false)) {
- if (XGI_EnableChISLCD(pVBInfo, true) ||
- (pVBInfo->VBInfo &
- (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
- /* LVDS PLL power on */
- xgifb_reg_and(
- pVBInfo->Part4Port,
- 0x2A,
- 0x7F);
- }
+ if (pVBInfo->VBInfo &
+ (SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
+ /* LVDS PLL power on */
+ xgifb_reg_and(pVBInfo->Part4Port, 0x2A,
+ 0x7F);
/* LVDS Driver power on */
xgifb_reg_and(pVBInfo->Part4Port, 0x30, 0x7F);
}
@@ -5618,32 +5359,14 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
tempah = tempah & 0x40;
if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)
tempah = tempah ^ 0xC0;
-
- if (pVBInfo->SetFlag & DisableChB)
- tempah &= 0xBF;
-
- if (pVBInfo->SetFlag & DisableChA)
- tempah &= 0x7F;
-
- if (pVBInfo->SetFlag & EnableChB)
- tempah |= 0x40;
-
- if (pVBInfo->SetFlag & EnableChA)
- tempah |= 0x80;
}
}
/* EnablePart4_1F */
xgifb_reg_or(pVBInfo->Part4Port, 0x1F, tempah);
- if (!(pVBInfo->SetFlag & DisableChA)) {
- if (!(pVBInfo->SetFlag & GatingCRT)) {
- XGI_DisableGatingCRT(HwDeviceExtension,
- pVBInfo);
- XGI_DisplayOn(xgifb_info, HwDeviceExtension,
- pVBInfo);
- }
- }
+ XGI_DisableGatingCRT(pVBInfo);
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
} /* 301 */
else { /* LVDS */
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD
@@ -5667,10 +5390,10 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
{
unsigned short RefreshRateTableIndex, temp;
- XGI_SetSeqRegs(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetSeqRegs(pVBInfo);
outb(XGI330_StandTable.MISC, pVBInfo->P3c2);
- XGI_SetCRTCRegs(HwDeviceExtension, pVBInfo);
- XGI_SetATTRegs(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetCRTCRegs(pVBInfo);
+ XGI_SetATTRegs(ModeIdIndex, pVBInfo);
XGI_SetGRCRegs(pVBInfo);
XGI_ClearExt1Regs(pVBInfo);
@@ -5695,13 +5418,12 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
ModeIdIndex, pVBInfo);
if (RefreshRateTableIndex != 0xFFFF) {
XGI_SetSync(RefreshRateTableIndex, pVBInfo);
- XGI_SetCRT1CRTC(ModeNo, ModeIdIndex, RefreshRateTableIndex,
+ XGI_SetCRT1CRTC(ModeIdIndex, RefreshRateTableIndex,
pVBInfo, HwDeviceExtension);
- XGI_SetCRT1DE(HwDeviceExtension, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ XGI_SetCRT1DE(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
- XGI_SetCRT1VCLK(ModeNo, ModeIdIndex, HwDeviceExtension,
+ XGI_SetCRT1VCLK(ModeIdIndex, HwDeviceExtension,
RefreshRateTableIndex, pVBInfo);
}
@@ -5710,30 +5432,28 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
if (temp & 0xA0) {
if (HwDeviceExtension->jChipType == XG27)
- XGI_SetXG27CRTC(ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ XGI_SetXG27CRTC(RefreshRateTableIndex, pVBInfo);
else
- XGI_SetXG21CRTC(ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ XGI_SetXG21CRTC(RefreshRateTableIndex, pVBInfo);
XGI_UpdateXG21CRTC(ModeNo, pVBInfo,
RefreshRateTableIndex);
xgifb_set_lcd(HwDeviceExtension->jChipType,
- pVBInfo, RefreshRateTableIndex, ModeNo);
+ pVBInfo, RefreshRateTableIndex);
if (pVBInfo->IF_DEF_LVDS == 1)
xgifb_set_lvds(xgifb_info,
HwDeviceExtension->jChipType,
- ModeNo, ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
}
}
pVBInfo->SetFlag &= (~ProgrammingCRT2);
- XGI_SetCRT1FIFO(ModeNo, HwDeviceExtension, pVBInfo);
- XGI_SetCRT1ModeRegs(HwDeviceExtension, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
- XGI_LoadDAC(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetCRT1FIFO(HwDeviceExtension, pVBInfo);
+ XGI_SetCRT1ModeRegs(HwDeviceExtension, ModeIdIndex,
+ RefreshRateTableIndex, pVBInfo);
+ XGI_LoadDAC(pVBInfo);
}
unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
@@ -5745,16 +5465,8 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
struct vb_device_info *pVBInfo = &VBINF;
pVBInfo->IF_DEF_LVDS = 0;
- if (HwDeviceExtension->jChipType >= XG20) {
- pVBInfo->IF_DEF_YPbPr = 0;
- pVBInfo->IF_DEF_HiVision = 0;
- pVBInfo->IF_DEF_CRT2Monitor = 0;
+ if (HwDeviceExtension->jChipType >= XG20)
pVBInfo->VBType = 0; /*set VBType default 0*/
- } else {
- pVBInfo->IF_DEF_YPbPr = 1;
- pVBInfo->IF_DEF_HiVision = 1;
- pVBInfo->IF_DEF_CRT2Monitor = 1;
- }
XGIRegInit(pVBInfo, xgifb_info->vga_base);
@@ -5770,23 +5482,20 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
}
}
- if (HwDeviceExtension->jChipType < XG20)
- XGI_GetVBType(pVBInfo);
-
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
if (ModeNo & 0x80)
ModeNo = ModeNo & 0x7F;
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
if (HwDeviceExtension->jChipType < XG20)
- XGI_UnLockCRT2(HwDeviceExtension, pVBInfo);
+ XGI_UnLockCRT2(pVBInfo);
- XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
+ XGI_SearchModeID(ModeNo, &ModeIdIndex);
if (HwDeviceExtension->jChipType < XG20) {
- XGI_GetVBInfo(ModeNo, ModeIdIndex, HwDeviceExtension, pVBInfo);
- XGI_GetTVInfo(ModeNo, ModeIdIndex, pVBInfo);
- XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_GetVBInfo(ModeIdIndex, pVBInfo);
+ XGI_GetTVInfo(ModeIdIndex, pVBInfo);
+ XGI_GetLCDInfo(ModeIdIndex, pVBInfo);
XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) ||
@@ -5813,15 +5522,14 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
}
}
- XGI_SetCRT2ModeRegs(ModeNo, HwDeviceExtension, pVBInfo);
- XGI_OEM310Setting(ModeNo, ModeIdIndex, pVBInfo); /*0212*/
+ XGI_SetCRT2ModeRegs(pVBInfo);
+ XGI_OEM310Setting(ModeIdIndex, pVBInfo); /*0212*/
XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
} /* !XG20 */
else {
if (pVBInfo->IF_DEF_LVDS == 1)
if (!XGI_XG21CheckLVDSMode(xgifb_info, ModeNo,
- ModeIdIndex,
- pVBInfo))
+ ModeIdIndex))
return 0;
pVBInfo->ModeType = XGI330_EModeIDTable[ModeIdIndex].
@@ -5838,10 +5546,10 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
}
- XGI_UpdateModeInfo(HwDeviceExtension, pVBInfo);
+ XGI_UpdateModeInfo(pVBInfo);
if (HwDeviceExtension->jChipType < XG20)
- XGI_LockCRT2(HwDeviceExtension, pVBInfo);
+ XGI_LockCRT2(pVBInfo);
return 1;
}
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
index 5524828..5301bec 100644
--- a/drivers/staging/xgifb/vb_setmode.h
+++ b/drivers/staging/xgifb/vb_setmode.h
@@ -2,10 +2,8 @@
#define _VBSETMODE_
extern void InitTo330Pointer(unsigned char, struct vb_device_info *);
-extern void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
+extern void XGI_UnLockCRT2(struct vb_device_info *);
+extern void XGI_LockCRT2(struct vb_device_info *);
extern void XGI_DisplayOff(struct xgifb_video_info *,
struct xgi_hw_device_info *,
struct vb_device_info *);
@@ -13,12 +11,10 @@ extern void XGI_GetVBType(struct vb_device_info *);
extern void XGI_SenseCRT1(struct vb_device_info *);
extern unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo) ;
+ unsigned short ModeNo);
extern unsigned char XGI_SearchModeID(unsigned short ModeNo,
- unsigned short *ModeIdIndex,
- struct vb_device_info *);
-extern unsigned char XGI_BridgeIsOn(struct vb_device_info *);
+ unsigned short *ModeIdIndex);
extern unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo,
unsigned short ModeIdIndex,
diff --git a/drivers/staging/xillybus/Kconfig b/drivers/staging/xillybus/Kconfig
new file mode 100644
index 0000000..8a4181f
--- /dev/null
+++ b/drivers/staging/xillybus/Kconfig
@@ -0,0 +1,32 @@
+#
+# Xillybus devices
+#
+
+config XILLYBUS
+ tristate "Xillybus generic FPGA interface"
+ depends on PCI || (OF_ADDRESS && OF_IRQ) && m
+ help
+ Xillybus is a generic interface for peripherals designed on
+ programmable logic (FPGA). The driver probes the hardware for
+ its capabilities, and creates device files accordingly.
+
+ If unsure, say N.
+
+if XILLYBUS
+
+config XILLYBUS_PCIE
+ tristate "Xillybus over PCIe"
+ depends on XILLYBUS && PCI
+ help
+ Set to M if you want Xillybus to use PCI Express for communicating
+ with the FPGA.
+
+config XILLYBUS_OF
+ tristate "Xillybus over Device Tree"
+ depends on XILLYBUS && OF_ADDRESS && OF_IRQ
+ help
+ Set to M if you want Xillybus to find its resources from the
+ Open Firmware Flattened Device Tree. If the target is an embedded
+ system, say M.
+
+endif # if XILLYBUS
diff --git a/drivers/staging/xillybus/Makefile b/drivers/staging/xillybus/Makefile
new file mode 100644
index 0000000..b68b7eb
--- /dev/null
+++ b/drivers/staging/xillybus/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Xillybus driver
+#
+
+obj-$(CONFIG_XILLYBUS) += xillybus_core.o
+obj-$(CONFIG_XILLYBUS_PCIE) += xillybus_pcie.o
+obj-$(CONFIG_XILLYBUS_OF) += xillybus_of.o
diff --git a/drivers/staging/xillybus/README b/drivers/staging/xillybus/README
new file mode 100644
index 0000000..d2d848a
--- /dev/null
+++ b/drivers/staging/xillybus/README
@@ -0,0 +1,403 @@
+
+ ==========================================
+ Xillybus driver for generic FPGA interface
+ ==========================================
+
+Author: Eli Billauer, Xillybus Ltd. (http://xillybus.com)
+Email: eli.billauer@gmail.com or as advertised on Xillybus' site.
+
+Contents:
+
+ - Introduction
+ -- Background
+ -- Xillybus Overview
+
+ - Usage
+ -- User interface
+ -- Synchronization
+ -- Seekable pipes
+
+- Internals
+ -- Source code organization
+ -- Pipe attributes
+ -- Host never reads from the FPGA
+ -- Channels, pipes, and the message channel
+ -- Data streaming
+ -- Data granularity
+ -- Probing
+ -- Buffer allocation
+ -- Memory management
+ -- The "nonempty" message (supporting poll)
+
+
+INTRODUCTION
+============
+
+Background
+----------
+
+An FPGA (Field Programmable Gate Array) is a piece of logic hardware, which
+can be programmed to become virtually anything that is usually found as a
+dedicated chipset: For instance, a display adapter, network interface card,
+or even a processor with its peripherals. FPGAs are the LEGO of hardware:
+Based upon certain building blocks, you make your own toys the way you like
+them. It's usually pointless to reimplement something that is already
+available on the market as a chipset, so FPGAs are mostly used when some
+special functionality is needed, and the production volume is relatively low
+(hence not justifying the development of an ASIC).
+
+The challenge with FPGAs is that everything is implemented at a very low
+level, even lower than assembly language. In order to allow FPGA designers to
+focus on their specific project, and not reinvent the wheel over and over
+again, pre-designed building blocks, IP cores, are often used. These are the
+FPGA parallels of library functions. IP cores may implement certain
+mathematical functions, a functional unit (e.g. a USB interface), an entire
+processor (e.g. ARM) or anything that might come handy. Think of them as a
+building block, with electrical wires dangling on the sides for connection to
+other blocks.
+
+One of the daunting tasks in FPGA design is communicating with a fullblown
+operating system (actually, with the processor running it): Implementing the
+low-level bus protocol and the somewhat higher-level interface with the host
+(registers, interrupts, DMA etc.) is a project in itself. When the FPGA's
+function is a well-known one (e.g. a video adapter card, or a NIC), it can
+make sense to design the FPGA's interface logic specifically for the project.
+A special driver is then written to present the FPGA as a well-known interface
+to the kernel and/or user space. In that case, there is no reason to treat the
+FPGA differently than any device on the bus.
+
+It's however common that the desired data communication doesn't fit any well-
+known peripheral function. Also, the effort of designing an elegant
+abstraction for the data exchange is often considered too big. In those cases,
+a quicker and possibly less elegant solution is sought: The driver is
+effectively written as a user space program, leaving the kernel space part
+with just elementary data transport. This still requires designing some
+interface logic for the FPGA, and write a simple ad-hoc driver for the kernel.
+
+Xillybus Overview
+-----------------
+
+Xillybus is an IP core and a Linux driver. Together, they form a kit for
+elementary data transport between an FPGA and the host, providing pipe-like
+data streams with a straightforward user interface. It's intended as a low-
+effort solution for mixed FPGA-host projects, for which it makes sense to
+have the project-specific part of the driver running in a user-space program.
+
+Since the communication requirements may vary significantly from one FPGA
+project to another (the number of data pipes needed in each direction and
+their attributes), there isn't one specific chunk of logic being the Xillybus
+IP core. Rather, the IP core is configured and built based upon a
+specification given by its end user.
+
+Xillybus presents independent data streams, which resemble pipes or TCP/IP
+communication to the user. At the host side, a character device file is used
+just like any pipe file. On the FPGA side, hardware FIFOs are used to stream
+the data. This is contrary to a common method of communicating through fixed-
+sized buffers (even though such buffers are used by Xillybus under the hood).
+There may be more than a hundred of these streams on a single IP core, but
+also no more than one, depending on the configuration.
+
+In order to ease the deployment of the Xillybus IP core, it contains a simple
+data structure which completely defines the core's configuration. The Linux
+driver fetches this data structure during its initialization process, and sets
+up the DMA buffers and character devices accordingly. As a result, a single
+driver is used to work out of the box with any Xillybus IP core.
+
+The data structure just mentioned should not be confused with PCI's
+configuration space or the Flattened Device Tree.
+
+USAGE
+=====
+
+User interface
+--------------
+
+On the host, all interface with Xillybus is done through /dev/xillybus_*
+device files, which are generated automatically as the drivers loads. The
+names of these files depend on the IP core that is loaded in the FPGA (see
+Probing below). To communicate with the FPGA, open the device file that
+corresponds to the hardware FIFO you want to send data or receive data from,
+and use plain write() or read() calls, just like with a regular pipe. In
+particular, it makes perfect sense to go:
+
+$ cat mydata > /dev/xillybus_thisfifo
+
+$ cat /dev/xillybus_thatfifo > hisdata
+
+possibly pressing CTRL-C as some stage, even though the xillybus_* pipes have
+the capability to send an EOF (but may not use it).
+
+The driver and hardware are designed to behave sensibly as pipes, including:
+
+* Supporting non-blocking I/O (by setting O_NONBLOCK on open() ).
+
+* Supporting poll() and select().
+
+* Being bandwidth efficient under load (using DMA) but also handle small
+ pieces of data sent across (like TCP/IP) by autoflushing.
+
+A device file can be read only, write only or bidirectional. Bidirectional
+device files are treated like two independent pipes (except for sharing a
+"channel" structure in the implementation code).
+
+Synchronization
+---------------
+
+Xillybus pipes are configured (on the IP core) to be either synchronous or
+asynchronous. For a synchronous pipe, write() returns successfully only after
+some data has been submitted and acknowledged by the FPGA. This slows down
+bulk data transfers, and is nearly impossible for use with streams that
+require data at a constant rate: There is no data transmitted to the FPGA
+between write() calls, in particular when the process loses the CPU.
+
+When a pipe is configured asynchronous, write() returns if there was enough
+room in the buffers to store any of the data in the buffers.
+
+For FPGA to host pipes, asynchronous pipes allow data transfer from the FPGA
+as soon as the respective device file is opened, regardless of if the data
+has been requested by a read() call. On synchronous pipes, only the amount
+of data requested by a read() call is transmitted.
+
+In summary, for synchronous pipes, data between the host and FPGA is
+transmitted only to satisfy the read() or write() call currently handled
+by the driver, and those calls wait for the transmission to complete before
+returning.
+
+Note that the synchronization attribute has nothing to do with the possibility
+that read() or write() completes less bytes than requested. There is a
+separate configuration flag ("allowpartial") that determines whether such a
+partial completion is allowed.
+
+Seekable pipes
+--------------
+
+A synchronous pipe can be configured to have the stream's position exposed
+to the user logic at the FPGA. Such a pipe is also seekable on the host API.
+With this feature, a memory or register interface can be attached on the
+FPGA side to the seekable stream. Reading or writing to a certain address in
+the attached memory is done by seeking to the desired address, and calling
+read() or write() as required.
+
+
+INTERNALS
+=========
+
+Source code organization
+------------------------
+
+The Xillybus driver consists of a core module, xillybus_core.c, and modules
+that depend on the specific bus interface (xillybus_of.c and xillybus_pcie.c).
+
+The bus specific modules are those probed when a suitable device is found by
+the kernel. Since the DMA mapping and synchronization functions, which are bus
+dependent by their nature, are used by the core module, a
+xilly_endpoint_hardware structure is passed to the core module on
+initialization. This structure is populated with pointers to wrapper functions
+which execute the DMA-related operations on the bus.
+
+Pipe attributes
+---------------
+
+Each pipe has a number of attributes which are set when the FPGA component
+(IP core) is built. They are fetched from the IDT (the data structure which
+defines the core's configuration, see Probing below) by xilly_setupchannels()
+in xillybus_core.c as follows:
+
+* is_writebuf: The pipe's direction. A non-zero value means it's an FPGA to
+ host pipe (the FPGA "writes").
+
+* channelnum: The pipe's identification number in communication between the
+ host and FPGA.
+
+* format: The underlying data width. See Data Granularity below.
+
+* allowpartial: A non-zero value means that a read() or write() (whichever
+ applies) may return with less than the requested number of bytes. The common
+ choice is a non-zero value, to match standard UNIX behavior.
+
+* synchronous: A non-zero value means that the pipe is synchronous. See
+ Syncronization above.
+
+* bufsize: Each DMA buffer's size. Always a power of two.
+
+* bufnum: The number of buffers allocated for this pipe. Always a power of two.
+
+* exclusive_open: A non-zero value forces exclusive opening of the associated
+ device file. If the device file is bidirectional, and already opened only in
+ one direction, the opposite direction may be opened once.
+
+* seekable: A non-zero value indicates that the pipe is seekable. See
+ Seekable pipes above.
+
+* supports_nonempty: A non-zero value (which is typical) indicates that the
+ hardware will send the messages that are necessary to support select() and
+ poll() for this pipe.
+
+Host never reads from the FPGA
+------------------------------
+
+Even though PCI Express is hotpluggable in general, a typical motherboard
+doesn't expect a card to go away all of the sudden. But since the PCIe card
+is based upon reprogrammable logic, a sudden disappearance from the bus is
+quite likely as a result of an accidental reprogramming of the FPGA while the
+host is up. In practice, nothing happens immediately in such a situation. But
+if the host attempts to read from an address that is mapped to the PCI Express
+device, that leads to an immediate freeze of the system on some motherboards,
+even though the PCIe standard requires a graceful recovery.
+
+In order to avoid these freezes, the Xillybus driver refrains completely from
+reading from the device's register space. All communication from the FPGA to
+the host is done through DMA. In particular, the Interrupt Service Routine
+doesn't follow the common practice of checking a status register when it's
+invoked. Rather, the FPGA prepares a small buffer which contains short
+messages, which inform the host what the interrupt was about.
+
+This mechanism is used on non-PCIe buses as well for the sake of uniformity.
+
+
+Channels, pipes, and the message channel
+----------------------------------------
+
+Each of the (possibly bidirectional) pipes presented to the user is allocated
+a data channel between the FPGA and the host. The distinction between channels
+and pipes is necessary only because of channel 0, which is used for interrupt-
+related messages from the FPGA, and has no pipe attached to it.
+
+Data streaming
+--------------
+
+Even though a non-segmented data stream is presented to the user at both
+sides, the implementation relies on a set of DMA buffers which is allocated
+for each channel. For the sake of illustration, let's take the FPGA to host
+direction: As data streams into the respective channel's interface in the
+FPGA, the Xillybus IP core writes it to one of the DMA buffers. When the
+buffer is full, the FPGA informs the host about that (appending a
+XILLYMSG_OPCODE_RELEASEBUF message channel 0 and sending an interrupt if
+necessary). The host responds by making the data available for reading through
+the character device. When all data has been read, the host writes on the
+the FPGA's buffer control register, allowing the buffer's overwriting. Flow
+control mechanisms exist on both sides to prevent underflows and overflows.
+
+This is not good enough for creating a TCP/IP-like stream: If the data flow
+stops momentarily before a DMA buffer is filled, the intuitive expectation is
+that the partial data in buffer will arrive anyhow, despite the buffer not
+being completed. This is implemented by adding a field in the
+XILLYMSG_OPCODE_RELEASEBUF message, through which the FPGA informs not just
+which buffer is submitted, but how much data it contains.
+
+But the FPGA will submit a partially filled buffer only if directed to do so
+by the host. This situation occurs when the read() method has been blocking
+for XILLY_RX_TIMEOUT jiffies (currently 10 ms), after which the host commands
+the FPGA to submit a DMA buffer as soon as it can. This timeout mechanism
+balances between bus bandwidth efficiency (preventing a lot of partially
+filled buffers being sent) and a latency held fairly low for tails of data.
+
+A similar setting is used in the host to FPGA direction. The handling of
+partial DMA buffers is somewhat different, though. The user can tell the
+driver to submit all data it has in the buffers to the FPGA, by issuing a
+write() with the byte count set to zero. This is similar to a flush request,
+but it doesn't block. There is also an autoflushing mechanism, which triggers
+an equivalent flush roughly XILLY_RX_TIMEOUT jiffies after the last write().
+This allows the user to be oblivious about the underlying buffering mechanism
+and yet enjoy a stream-like interface.
+
+Note that the issue of partial buffer flushing is irrelevant for pipes having
+the "synchronous" attribute nonzero, since synchronous pipes don't allow data
+to lay around in the DMA buffers between read() and write() anyhow.
+
+Data granularity
+----------------
+
+The data arrives or is sent at the FPGA as 8, 16 or 32 bit wide words, as
+configured by the "format" attribute. Whenever possible, the driver attempts
+to hide this when the pipe is accessed differently from its natural alignment.
+For example, reading single bytes from a pipe with 32 bit granularity works
+with no issues. Writing single bytes to pipes with 16 or 32 bit granularity
+will also work, but the driver can't send partially completed words to the
+FPGA, so the transmission of up to one word may be held until it's fully
+occupied with user data.
+
+This somewhat complicates the handling of host to FPGA streams, because
+when a buffer is flushed, it may contain up to 3 bytes don't form a word in
+the FPGA, and hence can't be sent. To prevent loss of data, these leftover
+bytes need to be moved to the next buffer. The parts in xillybus_core.c
+that mention "leftovers" in some way are related to this complication.
+
+Probing
+-------
+
+As mentioned earlier, the number of pipes that are created when the driver
+loads and their attributes depend on the Xillybus IP core in the FPGA. During
+the driver's initialization, a blob containing configuration info, the
+Interface Description Table (IDT), is sent from the FPGA to the host. The
+bootstrap process is done in three phases:
+
+1. Acquire the length of the IDT, so a buffer can be allocated for it. This
+ is done by sending a quiesce command to the device, since the acknowledge
+ for this command contains the IDT's buffer length.
+
+2. Acquire the IDT itself.
+
+3. Create the interfaces according to the IDT.
+
+Buffer allocation
+-----------------
+
+In order to simplify the logic that prevents illegal boundary crossings of
+PCIe packets, the following rule applies: If a buffer is smaller than 4kB,
+it must not cross a 4kB boundary. Otherwise, it must be 4kB aligned. The
+xilly_setupchannels() functions allocates these buffers by requesting whole
+pages from the kernel, and diving them into DMA buffers as necessary. Since
+all buffers' sizes are powers of two, it's possible to pack any set of such
+buffers, with a maximal waste of one page of memory.
+
+All buffers are allocated when the driver is loaded. This is necessary,
+since large continuous physical memory segments are sometimes requested,
+which are more likely to be available when the system is freshly booted.
+
+The allocation of buffer memory takes place in the same order they appear in
+the IDT. The driver relies on a rule that the pipes are sorted with decreasing
+buffer size in the IDT. If a requested buffer is larger or equal to a page,
+the necessary number of pages is requested from the kernel, and these are
+used for this buffer. If the requested buffer is smaller than a page, one
+single page is requested from the kernel, and that page is partially used.
+Or, if there already is a partially used page at hand, the buffer is packed
+into that page. It can be shown that all pages requested from the kernel
+(except possibly for the last) are 100% utilized this way.
+
+Memory management
+-----------------
+
+The tricky part about the buffer allocation procedure described above is
+freeing and unmapping the buffers, in particular if something goes wrong in
+the middle, and the allocations need to be rolled back. The three-stage
+probing procedure makes this even more crucial, since temporary buffers are
+set up and mapped in the first of its two stages.
+
+To keep the code clean from complicated and bug-prone memory release routines,
+there are special routines for allocating memory. For example, instead of
+calling kzalloc, there's
+
+void *xilly_malloc(struct xilly_cleanup *mem, size_t size)
+
+which effectively allocates a zeroed buffer of size "size". Its first
+argument, "mem", is where this allocation is enlisted, so that it's released
+when xillybus_do_cleanup() is called with the same "mem" structure.
+
+Two other functions enlist allocations in this structure: xilly_pagealloc()
+for page allocations and xilly_map_single_*() for DMA mapping.
+
+The "nonempty" message (supporting poll)
+---------------------------------------
+
+In order to support the "poll" method (and hence select() ), there is a small
+catch regarding the FPGA to host direction: The FPGA may have filled a DMA
+buffer with some data, but not submitted that buffer. If the host waited for
+the buffer's submission by the FPGA, there would be a possibility that the
+FPGA side has sent data, but a select() call would still block, because the
+host has not received any notification about this. This is solved with
+XILLYMSG_OPCODE_NONEMPTY messages sent by the FPGA when a channel goes from
+completely empty to containing some data.
+
+These messages are used only to support poll() and select(). The IP core can
+be configured not to send them for a slight reduction of bandwidth.
diff --git a/drivers/staging/xillybus/TODO b/drivers/staging/xillybus/TODO
new file mode 100644
index 0000000..95cfe2f
--- /dev/null
+++ b/drivers/staging/xillybus/TODO
@@ -0,0 +1,5 @@
+TODO:
+- have the driver reviewed
+
+Please send any patches and/or comments to Eli Billauer,
+<eli.billauer@gmail.com>.
diff --git a/drivers/staging/xillybus/xillybus.h b/drivers/staging/xillybus/xillybus.h
new file mode 100644
index 0000000..e5e91d6
--- /dev/null
+++ b/drivers/staging/xillybus/xillybus.h
@@ -0,0 +1,182 @@
+/*
+ * linux/drivers/misc/xillybus.h
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Header file for the Xillybus FPGA/host framework.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __XILLYBUS_H
+#define __XILLYBUS_H
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/cdev.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct xilly_endpoint_hardware;
+
+struct xilly_page {
+ struct list_head node;
+ unsigned long addr;
+ unsigned int order;
+};
+
+struct xilly_dma {
+ struct list_head node;
+ struct pci_dev *pdev;
+ struct device *dev;
+ dma_addr_t dma_addr;
+ size_t size;
+ int direction;
+};
+
+struct xilly_buffer {
+ void *addr;
+ dma_addr_t dma_addr;
+ int end_offset; /* Counting elements, not bytes */
+};
+
+struct xilly_cleanup {
+ struct list_head to_kfree;
+ struct list_head to_pagefree;
+ struct list_head to_unmap;
+};
+
+struct xilly_idt_handle {
+ unsigned char *chandesc;
+ unsigned char *idt;
+ int entries;
+};
+
+/*
+ * Read-write confusion: wr_* and rd_* notation sticks to FPGA view, so
+ * wr_* buffers are those consumed by read(), since the FPGA writes to them
+ * and vice versa.
+ */
+
+struct xilly_channel {
+ struct xilly_endpoint *endpoint;
+ int chan_num;
+ int log2_element_size;
+ int seekable;
+
+ struct xilly_buffer **wr_buffers; /* FPGA writes, driver reads! */
+ int num_wr_buffers;
+ unsigned int wr_buf_size; /* In bytes */
+ int wr_fpga_buf_idx;
+ int wr_host_buf_idx;
+ int wr_host_buf_pos;
+ int wr_empty;
+ int wr_ready; /* Significant only when wr_empty == 1 */
+ int wr_sleepy;
+ int wr_eof;
+ int wr_hangup;
+ spinlock_t wr_spinlock;
+ struct mutex wr_mutex;
+ wait_queue_head_t wr_wait;
+ wait_queue_head_t wr_ready_wait;
+ int wr_ref_count;
+ int wr_synchronous;
+ int wr_allow_partial;
+ int wr_exclusive_open;
+ int wr_supports_nonempty;
+
+ struct xilly_buffer **rd_buffers; /* FPGA reads, driver writes! */
+ int num_rd_buffers;
+ unsigned int rd_buf_size; /* In bytes */
+ int rd_fpga_buf_idx;
+ int rd_host_buf_pos;
+ int rd_host_buf_idx;
+ int rd_full;
+ spinlock_t rd_spinlock;
+ struct mutex rd_mutex;
+ wait_queue_head_t rd_wait;
+ int rd_ref_count;
+ int rd_allow_partial;
+ int rd_synchronous;
+ int rd_exclusive_open;
+ struct delayed_work rd_workitem;
+ unsigned char rd_leftovers[4];
+};
+
+struct xilly_endpoint {
+ /*
+ * One of pdev and dev is always NULL, and the other is a valid
+ * pointer, depending on the type of device
+ */
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct resource res; /* OF devices only */
+ struct xilly_endpoint_hardware *ephw;
+
+ struct list_head ep_list;
+ int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
+ __iomem u32 *registers;
+ int fatal_error;
+
+ struct mutex register_mutex;
+ wait_queue_head_t ep_wait;
+
+ /* List of memory allocations, to make release easy */
+ struct xilly_cleanup cleanup;
+
+ /* Channels and message handling */
+ struct cdev cdev;
+
+ int major;
+ int lowest_minor; /* Highest minor = lowest_minor + num_channels - 1 */
+
+ int num_channels; /* EXCLUDING message buffer */
+ struct xilly_channel **channels;
+ int msg_counter;
+ int failed_messages;
+ int idtlen;
+
+ u32 *msgbuf_addr;
+ dma_addr_t msgbuf_dma_addr;
+ unsigned int msg_buf_size;
+};
+
+struct xilly_endpoint_hardware {
+ struct module *owner;
+ void (*hw_sync_sgl_for_cpu)(struct xilly_endpoint *,
+ dma_addr_t,
+ size_t,
+ int);
+ void (*hw_sync_sgl_for_device)(struct xilly_endpoint *,
+ dma_addr_t,
+ size_t,
+ int);
+ dma_addr_t (*map_single)(struct xilly_cleanup *,
+ struct xilly_endpoint *,
+ void *,
+ size_t,
+ int);
+ void (*unmap_single)(struct xilly_dma *entry);
+};
+
+irqreturn_t xillybus_isr(int irq, void *data);
+
+void xillybus_do_cleanup(struct xilly_cleanup *mem,
+ struct xilly_endpoint *endpoint);
+
+struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
+ struct device *dev,
+ struct xilly_endpoint_hardware
+ *ephw);
+
+int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint);
+
+void xillybus_endpoint_remove(struct xilly_endpoint *endpoint);
+
+#endif /* __XILLYBUS_H */
diff --git a/drivers/staging/xillybus/xillybus_core.c b/drivers/staging/xillybus/xillybus_core.c
new file mode 100644
index 0000000..7db6f03
--- /dev/null
+++ b/drivers/staging/xillybus/xillybus_core.c
@@ -0,0 +1,2345 @@
+/*
+ * linux/drivers/misc/xillybus_core.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework.
+ *
+ * This driver interfaces with a special IP core in an FPGA, setting up
+ * a pipe between a hardware FIFO in the programmable logic and a device
+ * file in the host. The number of such pipes and their attributes are
+ * set up on the logic. This driver detects these automatically and
+ * creates the device files accordingly.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/crc32.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus core functions");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_VERSION("1.07");
+MODULE_ALIAS("xillybus_core");
+MODULE_LICENSE("GPL v2");
+
+/* General timeout is 100 ms, rx timeout is 10 ms */
+#define XILLY_RX_TIMEOUT (10*HZ/1000)
+#define XILLY_TIMEOUT (100*HZ/1000)
+
+#define fpga_msg_ctrl_reg 0x0002
+#define fpga_dma_control_reg 0x0008
+#define fpga_dma_bufno_reg 0x0009
+#define fpga_dma_bufaddr_lowaddr_reg 0x000a
+#define fpga_dma_bufaddr_highaddr_reg 0x000b
+#define fpga_buf_ctrl_reg 0x000c
+#define fpga_buf_offset_reg 0x000d
+#define fpga_endian_reg 0x0010
+
+#define XILLYMSG_OPCODE_RELEASEBUF 1
+#define XILLYMSG_OPCODE_QUIESCEACK 2
+#define XILLYMSG_OPCODE_FIFOEOF 3
+#define XILLYMSG_OPCODE_FATAL_ERROR 4
+#define XILLYMSG_OPCODE_NONEMPTY 5
+
+static const char xillyname[] = "xillybus";
+
+static struct class *xillybus_class;
+
+/*
+ * ep_list_lock is the last lock to be taken; No other lock requests are
+ * allowed while holding it. It merely protects list_of_endpoints, and not
+ * the endpoints listed in it.
+ */
+
+static LIST_HEAD(list_of_endpoints);
+static struct mutex ep_list_lock;
+static struct workqueue_struct *xillybus_wq;
+
+/*
+ * Locking scheme: Mutexes protect invocations of character device methods.
+ * If both locks are taken, wr_mutex is taken first, rd_mutex second.
+ *
+ * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
+ * buffers' end_offset fields against changes made by IRQ handler (and in
+ * theory, other file request handlers, but the mutex handles that). Nothing
+ * else.
+ * They are held for short direct memory manipulations. Needless to say,
+ * no mutex locking is allowed when a spinlock is held.
+ *
+ * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
+ *
+ * register_mutex is endpoint-specific, and is held when non-atomic
+ * register operations are performed. wr_mutex and rd_mutex may be
+ * held when register_mutex is taken, but none of the spinlocks. Note that
+ * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
+ * which are unrelated to buf_offset_reg, since they are harmless.
+ *
+ * Blocking on the wait queues is allowed with mutexes held, but not with
+ * spinlocks.
+ *
+ * Only interruptible blocking is allowed on mutexes and wait queues.
+ *
+ * All in all, the locking order goes (with skips allowed, of course):
+ * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
+ */
+
+static void malformed_message(u32 *buf)
+{
+ int opcode;
+ int msg_channel, msg_bufno, msg_data, msg_dir;
+
+ opcode = (buf[0] >> 24) & 0xff;
+ msg_dir = buf[0] & 1;
+ msg_channel = (buf[0] >> 1) & 0x7ff;
+ msg_bufno = (buf[0] >> 12) & 0x3ff;
+ msg_data = buf[1] & 0xfffffff;
+
+ pr_warn("xillybus: Malformed message (skipping): "
+ "opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
+ opcode, msg_channel, msg_dir, msg_bufno, msg_data);
+}
+
+/*
+ * xillybus_isr assumes the interrupt is allocated exclusively to it,
+ * which is the natural case MSI and several other hardware-oriented
+ * interrupts. Sharing is not allowed.
+ */
+
+irqreturn_t xillybus_isr(int irq, void *data)
+{
+ struct xilly_endpoint *ep = data;
+ u32 *buf;
+ unsigned int buf_size;
+ int i;
+ int opcode;
+ unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
+ struct xilly_channel *channel;
+
+ /*
+ * The endpoint structure is altered during periods when it's
+ * guaranteed no interrupt will occur, but in theory, the cache
+ * lines may not be updated. So a memory barrier is issued.
+ */
+
+ smp_rmb();
+
+ buf = ep->msgbuf_addr;
+ buf_size = ep->msg_buf_size/sizeof(u32);
+
+
+ ep->ephw->hw_sync_sgl_for_cpu(ep,
+ ep->msgbuf_dma_addr,
+ ep->msg_buf_size,
+ DMA_FROM_DEVICE);
+
+ for (i = 0; i < buf_size; i += 2)
+ if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
+ malformed_message(&buf[i]);
+ pr_warn("xillybus: Sending a NACK on "
+ "counter %x (instead of %x) on entry %d\n",
+ ((buf[i+1] >> 28) & 0xf),
+ ep->msg_counter,
+ i/2);
+
+ if (++ep->failed_messages > 10)
+ pr_err("xillybus: Lost sync with "
+ "interrupt messages. Stopping.\n");
+ else {
+ ep->ephw->hw_sync_sgl_for_device(
+ ep,
+ ep->msgbuf_dma_addr,
+ ep->msg_buf_size,
+ DMA_FROM_DEVICE);
+
+ iowrite32(0x01, /* Message NACK */
+ &ep->registers[fpga_msg_ctrl_reg]);
+ }
+ return IRQ_HANDLED;
+ } else if (buf[i] & (1 << 22)) /* Last message */
+ break;
+
+ if (i >= buf_size) {
+ pr_err("xillybus: Bad interrupt message. Stopping.\n");
+ return IRQ_HANDLED;
+ }
+
+ buf_size = i;
+
+ for (i = 0; i <= buf_size; i += 2) { /* Scan through messages */
+ opcode = (buf[i] >> 24) & 0xff;
+
+ msg_dir = buf[i] & 1;
+ msg_channel = (buf[i] >> 1) & 0x7ff;
+ msg_bufno = (buf[i] >> 12) & 0x3ff;
+ msg_data = buf[i+1] & 0xfffffff;
+
+ switch (opcode) {
+ case XILLYMSG_OPCODE_RELEASEBUF:
+
+ if ((msg_channel > ep->num_channels) ||
+ (msg_channel == 0)) {
+ malformed_message(&buf[i]);
+ break;
+ }
+
+ channel = ep->channels[msg_channel];
+
+ if (msg_dir) { /* Write channel */
+ if (msg_bufno >= channel->num_wr_buffers) {
+ malformed_message(&buf[i]);
+ break;
+ }
+ spin_lock(&channel->wr_spinlock);
+ channel->wr_buffers[msg_bufno]->end_offset =
+ msg_data;
+ channel->wr_fpga_buf_idx = msg_bufno;
+ channel->wr_empty = 0;
+ channel->wr_sleepy = 0;
+ spin_unlock(&channel->wr_spinlock);
+
+ wake_up_interruptible(&channel->wr_wait);
+
+ } else {
+ /* Read channel */
+
+ if (msg_bufno >= channel->num_rd_buffers) {
+ malformed_message(&buf[i]);
+ break;
+ }
+
+ spin_lock(&channel->rd_spinlock);
+ channel->rd_fpga_buf_idx = msg_bufno;
+ channel->rd_full = 0;
+ spin_unlock(&channel->rd_spinlock);
+
+ wake_up_interruptible(&channel->rd_wait);
+ if (!channel->rd_synchronous)
+ queue_delayed_work(
+ xillybus_wq,
+ &channel->rd_workitem,
+ XILLY_RX_TIMEOUT);
+ }
+
+ break;
+ case XILLYMSG_OPCODE_NONEMPTY:
+ if ((msg_channel > ep->num_channels) ||
+ (msg_channel == 0) || (!msg_dir) ||
+ !ep->channels[msg_channel]->wr_supports_nonempty) {
+ malformed_message(&buf[i]);
+ break;
+ }
+
+ channel = ep->channels[msg_channel];
+
+ if (msg_bufno >= channel->num_wr_buffers) {
+ malformed_message(&buf[i]);
+ break;
+ }
+ spin_lock(&channel->wr_spinlock);
+ if (msg_bufno == channel->wr_host_buf_idx)
+ channel->wr_ready = 1;
+ spin_unlock(&channel->wr_spinlock);
+
+ wake_up_interruptible(&channel->wr_ready_wait);
+
+ break;
+ case XILLYMSG_OPCODE_QUIESCEACK:
+ ep->idtlen = msg_data;
+ wake_up_interruptible(&ep->ep_wait);
+
+ break;
+ case XILLYMSG_OPCODE_FIFOEOF:
+ channel = ep->channels[msg_channel];
+ spin_lock(&channel->wr_spinlock);
+ channel->wr_eof = msg_bufno;
+ channel->wr_sleepy = 0;
+
+ channel->wr_hangup = channel->wr_empty &&
+ (channel->wr_host_buf_idx == msg_bufno);
+
+ spin_unlock(&channel->wr_spinlock);
+
+ wake_up_interruptible(&channel->wr_wait);
+
+ break;
+ case XILLYMSG_OPCODE_FATAL_ERROR:
+ ep->fatal_error = 1;
+ wake_up_interruptible(&ep->ep_wait); /* For select() */
+ pr_err("xillybus: FPGA reported a fatal "
+ "error. This means that the low-level "
+ "communication with the device has failed. "
+ "This hardware problem is most likely "
+ "unrelated to xillybus (neither kernel "
+ "module nor FPGA core), but reports are "
+ "still welcome. All I/O is aborted.\n");
+ break;
+ default:
+ malformed_message(&buf[i]);
+ break;
+ }
+ }
+
+ ep->ephw->hw_sync_sgl_for_device(ep,
+ ep->msgbuf_dma_addr,
+ ep->msg_buf_size,
+ DMA_FROM_DEVICE);
+
+ ep->msg_counter = (ep->msg_counter + 1) & 0xf;
+ ep->failed_messages = 0;
+ iowrite32(0x03, &ep->registers[fpga_msg_ctrl_reg]); /* Message ACK */
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(xillybus_isr);
+
+/*
+ * A few trivial memory management functions.
+ * NOTE: These functions are used only on probe and remove, and therefore
+ * no locks are applied!
+ */
+
+void xillybus_do_cleanup(struct xilly_cleanup *mem,
+ struct xilly_endpoint *endpoint)
+{
+ struct list_head *this, *next;
+
+ list_for_each_safe(this, next, &mem->to_unmap) {
+ struct xilly_dma *entry =
+ list_entry(this, struct xilly_dma, node);
+
+ endpoint->ephw->unmap_single(entry);
+ kfree(entry);
+ }
+
+ INIT_LIST_HEAD(&mem->to_unmap);
+
+ list_for_each_safe(this, next, &mem->to_kfree)
+ kfree(this);
+
+ INIT_LIST_HEAD(&mem->to_kfree);
+
+ list_for_each_safe(this, next, &mem->to_pagefree) {
+ struct xilly_page *entry =
+ list_entry(this, struct xilly_page, node);
+
+ free_pages(entry->addr, entry->order);
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&mem->to_pagefree);
+}
+EXPORT_SYMBOL(xillybus_do_cleanup);
+
+static void *xilly_malloc(struct xilly_cleanup *mem, size_t size)
+{
+ void *ptr;
+
+ ptr = kzalloc(sizeof(struct list_head) + size, GFP_KERNEL);
+
+ if (!ptr)
+ return ptr;
+
+ list_add_tail((struct list_head *) ptr, &mem->to_kfree);
+
+ return ptr + sizeof(struct list_head);
+}
+
+static unsigned long xilly_pagealloc(struct xilly_cleanup *mem,
+ unsigned long order)
+{
+ unsigned long addr;
+ struct xilly_page *this;
+
+ this = kmalloc(sizeof(struct xilly_page), GFP_KERNEL);
+ if (!this)
+ return 0;
+
+ addr = __get_free_pages(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO, order);
+
+ if (!addr) {
+ kfree(this);
+ return 0;
+ }
+
+ this->addr = addr;
+ this->order = order;
+
+ list_add_tail(&this->node, &mem->to_pagefree);
+
+ return addr;
+}
+
+
+static void xillybus_autoflush(struct work_struct *work);
+
+static int xilly_setupchannels(struct xilly_endpoint *ep,
+ struct xilly_cleanup *mem,
+ unsigned char *chandesc,
+ int entries
+ )
+{
+ int i, entry, wr_nbuffer, rd_nbuffer;
+ struct xilly_channel *channel;
+ int channelnum, bufnum, bufsize, format, is_writebuf;
+ int bytebufsize;
+ int synchronous, allowpartial, exclusive_open, seekable;
+ int supports_nonempty;
+ void *wr_salami = NULL;
+ void *rd_salami = NULL;
+ int left_of_wr_salami = 0;
+ int left_of_rd_salami = 0;
+ dma_addr_t dma_addr;
+ int msg_buf_done = 0;
+
+ struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
+
+ channel = xilly_malloc(mem, ep->num_channels *
+ sizeof(struct xilly_channel));
+
+ if (!channel)
+ goto memfail;
+
+ ep->channels = xilly_malloc(mem, (ep->num_channels + 1) *
+ sizeof(struct xilly_channel *));
+
+ if (!ep->channels)
+ goto memfail;
+
+ ep->channels[0] = NULL; /* Channel 0 is message buf. */
+
+ /* Initialize all channels with defaults */
+
+ for (i = 1; i <= ep->num_channels; i++) {
+ channel->wr_buffers = NULL;
+ channel->rd_buffers = NULL;
+ channel->num_wr_buffers = 0;
+ channel->num_rd_buffers = 0;
+ channel->wr_fpga_buf_idx = -1;
+ channel->wr_host_buf_idx = 0;
+ channel->wr_host_buf_pos = 0;
+ channel->wr_empty = 1;
+ channel->wr_ready = 0;
+ channel->wr_sleepy = 1;
+ channel->rd_fpga_buf_idx = 0;
+ channel->rd_host_buf_idx = 0;
+ channel->rd_host_buf_pos = 0;
+ channel->rd_full = 0;
+ channel->wr_ref_count = 0;
+ channel->rd_ref_count = 0;
+
+ spin_lock_init(&channel->wr_spinlock);
+ spin_lock_init(&channel->rd_spinlock);
+ mutex_init(&channel->wr_mutex);
+ mutex_init(&channel->rd_mutex);
+ init_waitqueue_head(&channel->rd_wait);
+ init_waitqueue_head(&channel->wr_wait);
+ init_waitqueue_head(&channel->wr_ready_wait);
+
+ INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
+
+ channel->endpoint = ep;
+ channel->chan_num = i;
+
+ channel->log2_element_size = 0;
+
+ ep->channels[i] = channel++;
+ }
+
+ /*
+ * The DMA buffer address update is atomic on the FPGA, so even if
+ * it was in the middle of sending messages to some buffer, changing
+ * the address is safe, since the data will go to either of the
+ * buffers. Not that this situation should occur at all anyhow.
+ */
+
+ wr_nbuffer = 1;
+ rd_nbuffer = 1; /* Buffer zero isn't used at all */
+
+ for (entry = 0; entry < entries; entry++, chandesc += 4) {
+ is_writebuf = chandesc[0] & 0x01;
+ channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
+ format = (chandesc[1] >> 4) & 0x03;
+ allowpartial = (chandesc[1] >> 6) & 0x01;
+ synchronous = (chandesc[1] >> 7) & 0x01;
+ bufsize = 1 << (chandesc[2] & 0x1f);
+ bufnum = 1 << (chandesc[3] & 0x0f);
+ exclusive_open = (chandesc[2] >> 7) & 0x01;
+ seekable = (chandesc[2] >> 6) & 0x01;
+ supports_nonempty = (chandesc[2] >> 5) & 0x01;
+
+ if ((channelnum > ep->num_channels) ||
+ ((channelnum == 0) && !is_writebuf)) {
+ pr_err("xillybus: IDT requests channel out "
+ "of range. Aborting.\n");
+ return -ENODEV;
+ }
+
+ channel = ep->channels[channelnum]; /* NULL for msg channel */
+
+ bytebufsize = bufsize << 2; /* Overwritten just below */
+
+ if (!is_writebuf) {
+ channel->num_rd_buffers = bufnum;
+ channel->log2_element_size = ((format > 2) ?
+ 2 : format);
+ bytebufsize = channel->rd_buf_size = bufsize *
+ (1 << channel->log2_element_size);
+ channel->rd_allow_partial = allowpartial;
+ channel->rd_synchronous = synchronous;
+ channel->rd_exclusive_open = exclusive_open;
+ channel->seekable = seekable;
+
+ channel->rd_buffers = xilly_malloc(
+ mem,
+ bufnum * sizeof(struct xilly_buffer *));
+
+ if (!channel->rd_buffers)
+ goto memfail;
+
+ this_buffer = xilly_malloc(
+ mem,
+ bufnum * sizeof(struct xilly_buffer));
+
+ if (!this_buffer)
+ goto memfail;
+ }
+
+ else if (channelnum > 0) {
+ channel->num_wr_buffers = bufnum;
+ channel->log2_element_size = ((format > 2) ?
+ 2 : format);
+ bytebufsize = channel->wr_buf_size = bufsize *
+ (1 << channel->log2_element_size);
+
+ channel->seekable = seekable;
+ channel->wr_supports_nonempty = supports_nonempty;
+
+ channel->wr_allow_partial = allowpartial;
+ channel->wr_synchronous = synchronous;
+ channel->wr_exclusive_open = exclusive_open;
+
+ channel->wr_buffers = xilly_malloc(
+ mem,
+ bufnum * sizeof(struct xilly_buffer *));
+
+ if (!channel->wr_buffers)
+ goto memfail;
+
+ this_buffer = xilly_malloc(
+ mem,
+ bufnum * sizeof(struct xilly_buffer));
+
+ if (!this_buffer)
+ goto memfail;
+ }
+
+ /*
+ * Although daunting, we cut the chunks for read buffers
+ * from a different salami than the write buffers',
+ * possibly improving performance.
+ */
+
+ if (is_writebuf)
+ for (i = 0; i < bufnum; i++) {
+ /*
+ * Buffers are expected in descending
+ * byte-size order, so there is either
+ * enough for this buffer or none at all.
+ */
+ if ((left_of_wr_salami < bytebufsize) &&
+ (left_of_wr_salami > 0)) {
+ pr_err("xillybus: "
+ "Corrupt buffer allocation "
+ "in IDT. Aborting.\n");
+ return -ENODEV;
+ }
+
+ if (left_of_wr_salami == 0) {
+ int allocorder, allocsize;
+
+ allocsize = PAGE_SIZE;
+ allocorder = 0;
+ while (bytebufsize > allocsize) {
+ allocsize *= 2;
+ allocorder++;
+ }
+
+ wr_salami = (void *)
+ xilly_pagealloc(mem,
+ allocorder);
+ if (!wr_salami)
+ goto memfail;
+ left_of_wr_salami = allocsize;
+ }
+
+ dma_addr = ep->ephw->map_single(
+ mem,
+ ep,
+ wr_salami,
+ bytebufsize,
+ DMA_FROM_DEVICE);
+
+ if (!dma_addr)
+ goto dmafail;
+
+ iowrite32(
+ (u32) (dma_addr & 0xffffffff),
+ &ep->registers[
+ fpga_dma_bufaddr_lowaddr_reg]
+ );
+ iowrite32(
+ ((u32) ((((u64) dma_addr) >> 32)
+ & 0xffffffff)),
+ &ep->registers[
+ fpga_dma_bufaddr_highaddr_reg]
+ );
+ mmiowb();
+
+ if (channelnum > 0) {
+ this_buffer->addr = wr_salami;
+ this_buffer->dma_addr = dma_addr;
+ channel->wr_buffers[i] = this_buffer++;
+
+ iowrite32(
+ 0x80000000 | wr_nbuffer++,
+ &ep->registers[
+ fpga_dma_bufno_reg]);
+ } else {
+ ep->msgbuf_addr = wr_salami;
+ ep->msgbuf_dma_addr = dma_addr;
+ ep->msg_buf_size = bytebufsize;
+ msg_buf_done++;
+
+ iowrite32(
+ 0x80000000, &ep->registers[
+ fpga_dma_bufno_reg]);
+ }
+
+ left_of_wr_salami -= bytebufsize;
+ wr_salami += bytebufsize;
+ }
+ else /* Read buffers */
+ for (i = 0; i < bufnum; i++) {
+ /*
+ * Buffers are expected in descending
+ * byte-size order, so there is either
+ * enough for this buffer or none at all.
+ */
+ if ((left_of_rd_salami < bytebufsize) &&
+ (left_of_rd_salami > 0)) {
+ pr_err("xillybus: "
+ "Corrupt buffer allocation "
+ "in IDT. Aborting.\n");
+ return -ENODEV;
+ }
+
+ if (left_of_rd_salami == 0) {
+ int allocorder, allocsize;
+
+ allocsize = PAGE_SIZE;
+ allocorder = 0;
+ while (bytebufsize > allocsize) {
+ allocsize *= 2;
+ allocorder++;
+ }
+
+ rd_salami = (void *)
+ xilly_pagealloc(
+ mem,
+ allocorder);
+
+ if (!rd_salami)
+ goto memfail;
+ left_of_rd_salami = allocsize;
+ }
+
+ dma_addr = ep->ephw->map_single(
+ mem,
+ ep,
+ rd_salami,
+ bytebufsize,
+ DMA_TO_DEVICE);
+
+ if (!dma_addr)
+ goto dmafail;
+
+ iowrite32(
+ (u32) (dma_addr & 0xffffffff),
+ &ep->registers[
+ fpga_dma_bufaddr_lowaddr_reg]
+ );
+ iowrite32(
+ ((u32) ((((u64) dma_addr) >> 32)
+ & 0xffffffff)),
+ &ep->registers[
+ fpga_dma_bufaddr_highaddr_reg]
+ );
+ mmiowb();
+
+ this_buffer->addr = rd_salami;
+ this_buffer->dma_addr = dma_addr;
+ channel->rd_buffers[i] = this_buffer++;
+
+ iowrite32(rd_nbuffer++,
+ &ep->registers[fpga_dma_bufno_reg]);
+
+ left_of_rd_salami -= bytebufsize;
+ rd_salami += bytebufsize;
+ }
+ }
+
+ if (!msg_buf_done) {
+ pr_err("xillybus: Corrupt IDT: No message buffer. "
+ "Aborting.\n");
+ return -ENODEV;
+ }
+
+ return 0;
+
+memfail:
+ pr_err("xillybus: Failed to allocate write buffer memory. "
+ "Aborting.\n");
+ return -ENOMEM;
+dmafail:
+ pr_err("xillybus: Failed to map DMA memory!. Aborting.\n");
+ return -ENOMEM;
+}
+
+static void xilly_scan_idt(struct xilly_endpoint *endpoint,
+ struct xilly_idt_handle *idt_handle)
+{
+ int count = 0;
+ unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
+ unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
+ unsigned char *scan;
+ int len;
+
+ scan = idt;
+ idt_handle->idt = idt;
+
+ scan++; /* Skip version number */
+
+ while ((scan <= end_of_idt) && *scan) {
+ while ((scan <= end_of_idt) && *scan++)
+ /* Do nothing, just scan thru string */;
+ count++;
+ }
+
+ scan++;
+
+ if (scan > end_of_idt) {
+ pr_err("xillybus: IDT device name list overflow. "
+ "Aborting.\n");
+ idt_handle->chandesc = NULL;
+ return;
+ } else
+ idt_handle->chandesc = scan;
+
+ len = endpoint->idtlen - (3 + ((int) (scan - idt)));
+
+ if (len & 0x03) {
+ idt_handle->chandesc = NULL;
+
+ pr_err("xillybus: Corrupt IDT device name list. "
+ "Aborting.\n");
+ }
+
+ idt_handle->entries = len >> 2;
+
+ endpoint->num_channels = count;
+}
+
+static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
+{
+ int rc = 0;
+ struct xilly_channel *channel;
+ unsigned char *version;
+
+ channel = endpoint->channels[1]; /* This should be generated ad-hoc */
+
+ channel->wr_sleepy = 1;
+ wmb(); /* Setting wr_sleepy must come before the command */
+
+ iowrite32(1 |
+ (3 << 24), /* Opcode 3 for channel 0 = Send IDT */
+ &endpoint->registers[fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ wait_event_interruptible_timeout(channel->wr_wait,
+ (!channel->wr_sleepy),
+ XILLY_TIMEOUT);
+
+ if (channel->wr_sleepy) {
+ pr_err("xillybus: Failed to obtain IDT. Aborting.\n");
+
+ if (endpoint->fatal_error)
+ return -EIO;
+
+ rc = -ENODEV;
+ return rc;
+ }
+
+ endpoint->ephw->hw_sync_sgl_for_cpu(
+ channel->endpoint,
+ channel->wr_buffers[0]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
+
+ if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
+ pr_err("xillybus: IDT length mismatch (%d != %d). "
+ "Aborting.\n",
+ channel->wr_buffers[0]->end_offset, endpoint->idtlen);
+ rc = -ENODEV;
+ return rc;
+ }
+
+ if (crc32_le(~0, channel->wr_buffers[0]->addr,
+ endpoint->idtlen+1) != 0) {
+ pr_err("xillybus: IDT failed CRC check. Aborting.\n");
+ rc = -ENODEV;
+ return rc;
+ }
+
+ version = channel->wr_buffers[0]->addr;
+
+ /* Check version number. Accept anything below 0x82 for now. */
+ if (*version > 0x82) {
+ pr_err("xillybus: No support for IDT version 0x%02x. "
+ "Maybe the xillybus driver needs an upgarde. "
+ "Aborting.\n",
+ (int) *version);
+ rc = -ENODEV;
+ return rc;
+ }
+
+ return 0; /* Success */
+}
+
+static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t rc;
+ unsigned long flags;
+ int bytes_done = 0;
+ int no_time_left = 0;
+ long deadline, left_to_sleep;
+ struct xilly_channel *channel = filp->private_data;
+
+ int empty, reached_eof, exhausted, ready;
+ /* Initializations are there only to silence warnings */
+
+ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
+ int waiting_bufidx;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
+
+ rc = mutex_lock_interruptible(&channel->wr_mutex);
+
+ if (rc)
+ return rc;
+
+ rc = 0; /* Just to be clear about it. Compiler optimizes this out */
+
+ while (1) { /* Note that we may drop mutex within this loop */
+ int bytes_to_do = count - bytes_done;
+ spin_lock_irqsave(&channel->wr_spinlock, flags);
+
+ empty = channel->wr_empty;
+ ready = !empty || channel->wr_ready;
+
+ if (!empty) {
+ bufidx = channel->wr_host_buf_idx;
+ bufpos = channel->wr_host_buf_pos;
+ howmany = ((channel->wr_buffers[bufidx]->end_offset
+ + 1) << channel->log2_element_size)
+ - bufpos;
+
+ /* Update wr_host_* to its post-operation state */
+ if (howmany > bytes_to_do) {
+ bufferdone = 0;
+
+ howmany = bytes_to_do;
+ channel->wr_host_buf_pos += howmany;
+ } else {
+ bufferdone = 1;
+
+ channel->wr_host_buf_pos = 0;
+
+ if (bufidx == channel->wr_fpga_buf_idx) {
+ channel->wr_empty = 1;
+ channel->wr_sleepy = 1;
+ channel->wr_ready = 0;
+ }
+
+ if (bufidx >= (channel->num_wr_buffers - 1))
+ channel->wr_host_buf_idx = 0;
+ else
+ channel->wr_host_buf_idx++;
+ }
+ }
+
+ /*
+ * Marking our situation after the possible changes above,
+ * for use after releasing the spinlock.
+ *
+ * empty = empty before change
+ * exhasted = empty after possible change
+ */
+
+ reached_eof = channel->wr_empty &&
+ (channel->wr_host_buf_idx == channel->wr_eof);
+ channel->wr_hangup = reached_eof;
+ exhausted = channel->wr_empty;
+ waiting_bufidx = channel->wr_host_buf_idx;
+
+ spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+
+ if (!empty) { /* Go on, now without the spinlock */
+
+ if (bufpos == 0) /* Position zero means it's virgin */
+ channel->endpoint->ephw->hw_sync_sgl_for_cpu(
+ channel->endpoint,
+ channel->wr_buffers[bufidx]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
+
+ if (copy_to_user(
+ userbuf,
+ channel->wr_buffers[bufidx]->addr
+ + bufpos, howmany))
+ rc = -EFAULT;
+
+ userbuf += howmany;
+ bytes_done += howmany;
+
+ if (bufferdone) {
+ channel->endpoint->ephw->
+ hw_sync_sgl_for_device
+ (
+ channel->endpoint,
+ channel->wr_buffers[bufidx]->
+ dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
+
+ /*
+ * Tell FPGA the buffer is done with. It's an
+ * atomic operation to the FPGA, so what
+ * happens with other channels doesn't matter,
+ * and the certain channel is protected with
+ * the channel-specific mutex.
+ */
+
+ iowrite32(1 | (channel->chan_num << 1)
+ | (bufidx << 12),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+
+ if (rc) {
+ mutex_unlock(&channel->wr_mutex);
+ return rc;
+ }
+ }
+
+ /* This includes a zero-count return = EOF */
+ if ((bytes_done >= count) || reached_eof)
+ break;
+
+ if (!exhausted)
+ continue; /* More in RAM buffer(s)? Just go on. */
+
+ if ((bytes_done > 0) &&
+ (no_time_left ||
+ (channel->wr_synchronous && channel->wr_allow_partial)))
+ break;
+
+ /*
+ * Nonblocking read: The "ready" flag tells us that the FPGA
+ * has data to send. In non-blocking mode, if it isn't on,
+ * just return. But if there is, we jump directly to the point
+ * where we ask for the FPGA to send all it has, and wait
+ * until that data arrives. So in a sense, we *do* block in
+ * nonblocking mode, but only for a very short time.
+ */
+
+ if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
+ if (bytes_done > 0)
+ break;
+
+ if (ready)
+ goto desperate;
+
+ bytes_done = -EAGAIN;
+ break;
+ }
+
+ if (!no_time_left || (bytes_done > 0)) {
+ /*
+ * Note that in case of an element-misaligned read
+ * request, offsetlimit will include the last element,
+ * which will be partially read from.
+ */
+ int offsetlimit = ((count - bytes_done) - 1) >>
+ channel->log2_element_size;
+ int buf_elements = channel->wr_buf_size >>
+ channel->log2_element_size;
+
+ /*
+ * In synchronous mode, always send an offset limit.
+ * Just don't send a value too big.
+ */
+
+ if (channel->wr_synchronous) {
+ /* Don't request more than one buffer */
+ if (channel->wr_allow_partial &&
+ (offsetlimit >= buf_elements))
+ offsetlimit = buf_elements - 1;
+
+ /* Don't request more than all buffers */
+ if (!channel->wr_allow_partial &&
+ (offsetlimit >=
+ (buf_elements * channel->num_wr_buffers)))
+ offsetlimit = buf_elements *
+ channel->num_wr_buffers - 1;
+ }
+
+ /*
+ * In asynchronous mode, force early flush of a buffer
+ * only if that will allow returning a full count. The
+ * "offsetlimit < ( ... )" rather than "<=" excludes
+ * requesting a full buffer, which would obviously
+ * cause a buffer transmission anyhow
+ */
+
+ if (channel->wr_synchronous ||
+ (offsetlimit < (buf_elements - 1))) {
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(offsetlimit,
+ &channel->endpoint->registers[
+ fpga_buf_offset_reg]);
+ mmiowb();
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (2 << 24) | /* 2 = offset limit */
+ (waiting_bufidx << 12),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+
+ mmiowb(); /* Just to appear safe */
+
+ mutex_unlock(&channel->endpoint->
+ register_mutex);
+ }
+
+ }
+
+ /*
+ * If partial completion is disallowed, there is no point in
+ * timeout sleeping. Neither if no_time_left is set and
+ * there's no data.
+ */
+
+ if (!channel->wr_allow_partial ||
+ (no_time_left && (bytes_done == 0))) {
+
+ /*
+ * This do-loop will run more than once if another
+ * thread reasserted wr_sleepy before we got the mutex
+ * back, so we try again.
+ */
+
+ do {
+ mutex_unlock(&channel->wr_mutex);
+
+ if (wait_event_interruptible(
+ channel->wr_wait,
+ (!channel->wr_sleepy)))
+ goto interrupted;
+
+ if (mutex_lock_interruptible(
+ &channel->wr_mutex))
+ goto interrupted;
+ } while (channel->wr_sleepy);
+
+ continue;
+
+interrupted: /* Mutex is not held if got here */
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+ if (bytes_done)
+ return bytes_done;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN; /* Don't admit snoozing */
+ return -EINTR;
+ }
+
+ left_to_sleep = deadline - ((long) jiffies);
+
+ /*
+ * If our time is out, skip the waiting. We may miss wr_sleepy
+ * being deasserted but hey, almost missing the train is like
+ * missing it.
+ */
+
+ if (left_to_sleep > 0) {
+ left_to_sleep =
+ wait_event_interruptible_timeout(
+ channel->wr_wait,
+ (!channel->wr_sleepy),
+ left_to_sleep);
+
+ if (!channel->wr_sleepy)
+ continue;
+
+ if (left_to_sleep < 0) { /* Interrupt */
+ mutex_unlock(&channel->wr_mutex);
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+ if (bytes_done)
+ return bytes_done;
+ return -EINTR;
+ }
+ }
+
+desperate:
+ no_time_left = 1; /* We're out of sleeping time. Desperate! */
+
+ if (bytes_done == 0) {
+ /*
+ * Reaching here means that we allow partial return,
+ * that we've run out of time, and that we have
+ * nothing to return.
+ * So tell the FPGA to send anything it has or gets.
+ */
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (3 << 24) | /* Opcode 3, flush it all! */
+ (waiting_bufidx << 12),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+
+ /*
+ * Formally speaking, we should block for data at this point.
+ * But to keep the code cleaner, we'll just finish the loop,
+ * make the unlikely check for data, and then block at the
+ * usual place.
+ */
+ }
+
+ mutex_unlock(&channel->wr_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ return bytes_done;
+}
+
+/*
+ * The timeout argument takes values as follows:
+ * >0 : Flush with timeout
+ * ==0 : Flush, and wait idefinitely for the flush to complete
+ * <0 : Autoflush: Flush only if there's a single buffer occupied
+ */
+
+static int xillybus_myflush(struct xilly_channel *channel, long timeout)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ int end_offset_plus1;
+ int bufidx, bufidx_minus1;
+ int i;
+ int empty;
+ int new_rd_host_buf_pos;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+
+ if (rc)
+ return rc;
+
+ /*
+ * Don't flush a closed channel. This can happen when the work queued
+ * autoflush thread fires off after the file has closed. This is not
+ * an error, just something to dismiss.
+ */
+
+ if (!channel->rd_ref_count)
+ goto done;
+
+ bufidx = channel->rd_host_buf_idx;
+
+ bufidx_minus1 = (bufidx == 0) ? channel->num_rd_buffers - 1 : bufidx-1;
+
+ end_offset_plus1 = channel->rd_host_buf_pos >>
+ channel->log2_element_size;
+
+ new_rd_host_buf_pos = channel->rd_host_buf_pos -
+ (end_offset_plus1 << channel->log2_element_size);
+
+ /* Submit the current buffer if it's nonempty */
+ if (end_offset_plus1) {
+ unsigned char *tail = channel->rd_buffers[bufidx]->addr +
+ (end_offset_plus1 << channel->log2_element_size);
+
+ /* Copy unflushed data, so we can put it in next buffer */
+ for (i = 0; i < new_rd_host_buf_pos; i++)
+ channel->rd_leftovers[i] = *tail++;
+
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+ /* Autoflush only if a single buffer is occupied */
+
+ if ((timeout < 0) &&
+ (channel->rd_full ||
+ (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+ /*
+ * A new work item may be queued by the ISR exactly
+ * now, since the execution of a work item allows the
+ * queuing of a new one while it's running.
+ */
+ goto done;
+ }
+
+ /* The 4th element is never needed for data, so it's a flag */
+ channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
+
+ /* Set up rd_full to reflect a certain moment's state */
+
+ if (bufidx == channel->rd_fpga_buf_idx)
+ channel->rd_full = 1;
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ if (bufidx >= (channel->num_rd_buffers - 1))
+ channel->rd_host_buf_idx = 0;
+ else
+ channel->rd_host_buf_idx++;
+
+ channel->endpoint->ephw->hw_sync_sgl_for_device(
+ channel->endpoint,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(end_offset_plus1 - 1,
+ &channel->endpoint->registers[fpga_buf_offset_reg]);
+ mmiowb();
+
+ iowrite32((channel->chan_num << 1) | /* Channel ID */
+ (2 << 24) | /* Opcode 2, submit buffer */
+ (bufidx << 12),
+ &channel->endpoint->registers[fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ mutex_unlock(&channel->endpoint->register_mutex);
+ } else if (bufidx == 0)
+ bufidx = channel->num_rd_buffers - 1;
+ else
+ bufidx--;
+
+ channel->rd_host_buf_pos = new_rd_host_buf_pos;
+
+ if (timeout < 0)
+ goto done; /* Autoflush */
+
+
+ /*
+ * bufidx is now the last buffer written to (or equal to
+ * rd_fpga_buf_idx if buffer was never written to), and
+ * channel->rd_host_buf_idx the one after it.
+ *
+ * If bufidx == channel->rd_fpga_buf_idx we're either empty or full.
+ */
+
+ rc = 0;
+
+ while (1) { /* Loop waiting for draining of buffers */
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+ if (bufidx != channel->rd_fpga_buf_idx)
+ channel->rd_full = 1; /*
+ * Not really full,
+ * but needs waiting.
+ */
+
+ empty = !channel->rd_full;
+
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ if (empty)
+ break;
+
+ /*
+ * Indefinite sleep with mutex taken. With data waiting for
+ * flushing user should not be surprised if open() for write
+ * sleeps.
+ */
+ if (timeout == 0)
+ wait_event_interruptible(channel->rd_wait,
+ (!channel->rd_full));
+
+ else if (wait_event_interruptible_timeout(
+ channel->rd_wait,
+ (!channel->rd_full),
+ timeout) == 0) {
+ pr_warn("xillybus: "
+ "Timed out while flushing. "
+ "Output data may be lost.\n");
+
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ if (channel->rd_full) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+done:
+ mutex_unlock(&channel->rd_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ return rc;
+}
+
+static int xillybus_flush(struct file *filp, fl_owner_t id)
+{
+ if (!(filp->f_mode & FMODE_WRITE))
+ return 0;
+
+ return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */
+}
+
+static void xillybus_autoflush(struct work_struct *work)
+{
+ struct delayed_work *workitem = container_of(
+ work, struct delayed_work, work);
+ struct xilly_channel *channel = container_of(
+ workitem, struct xilly_channel, rd_workitem);
+ int rc;
+
+ rc = xillybus_myflush(channel, -1);
+
+ if (rc == -EINTR)
+ pr_warn("xillybus: Autoflush failed because "
+ "work queue thread got a signal.\n");
+ else if (rc)
+ pr_err("xillybus: Autoflush failed under "
+ "weird circumstances.\n");
+
+}
+
+static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t rc;
+ unsigned long flags;
+ int bytes_done = 0;
+ struct xilly_channel *channel = filp->private_data;
+
+ int full, exhausted;
+ /* Initializations are there only to silence warnings */
+
+ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
+ int end_offset_plus1 = 0;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+
+ if (rc)
+ return rc;
+
+ rc = 0; /* Just to be clear about it. Compiler optimizes this out */
+
+ while (1) {
+ int bytes_to_do = count - bytes_done;
+
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+ full = channel->rd_full;
+
+ if (!full) {
+ bufidx = channel->rd_host_buf_idx;
+ bufpos = channel->rd_host_buf_pos;
+ howmany = channel->rd_buf_size - bufpos;
+
+ /*
+ * Update rd_host_* to its state after this operation.
+ * count=0 means committing the buffer immediately,
+ * which is like flushing, but not necessarily block.
+ */
+
+ if ((howmany > bytes_to_do) &&
+ (count ||
+ ((bufpos >> channel->log2_element_size) == 0))) {
+ bufferdone = 0;
+
+ howmany = bytes_to_do;
+ channel->rd_host_buf_pos += howmany;
+ } else {
+ bufferdone = 1;
+
+ if (count) {
+ end_offset_plus1 =
+ channel->rd_buf_size >>
+ channel->log2_element_size;
+ channel->rd_host_buf_pos = 0;
+ } else {
+ unsigned char *tail;
+ int i;
+
+ end_offset_plus1 = bufpos >>
+ channel->log2_element_size;
+
+ channel->rd_host_buf_pos -=
+ end_offset_plus1 <<
+ channel->log2_element_size;
+
+ tail = channel->
+ rd_buffers[bufidx]->addr +
+ (end_offset_plus1 <<
+ channel->log2_element_size);
+
+ for (i = 0;
+ i < channel->rd_host_buf_pos;
+ i++)
+ channel->rd_leftovers[i] =
+ *tail++;
+ }
+
+ if (bufidx == channel->rd_fpga_buf_idx)
+ channel->rd_full = 1;
+
+ if (bufidx >= (channel->num_rd_buffers - 1))
+ channel->rd_host_buf_idx = 0;
+ else
+ channel->rd_host_buf_idx++;
+ }
+ }
+
+ /*
+ * Marking our situation after the possible changes above,
+ * for use after releasing the spinlock.
+ *
+ * full = full before change
+ * exhasted = full after possible change
+ */
+
+ exhausted = channel->rd_full;
+
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ if (!full) { /* Go on, now without the spinlock */
+ unsigned char *head =
+ channel->rd_buffers[bufidx]->addr;
+ int i;
+
+ if ((bufpos == 0) || /* Zero means it's virgin */
+ (channel->rd_leftovers[3] != 0)) {
+ channel->endpoint->ephw->hw_sync_sgl_for_cpu(
+ channel->endpoint,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
+
+ /* Virgin, but leftovers are due */
+ for (i = 0; i < bufpos; i++)
+ *head++ = channel->rd_leftovers[i];
+
+ channel->rd_leftovers[3] = 0; /* Clear flag */
+ }
+
+ if (copy_from_user(
+ channel->rd_buffers[bufidx]->addr + bufpos,
+ userbuf, howmany))
+ rc = -EFAULT;
+
+ userbuf += howmany;
+ bytes_done += howmany;
+
+ if (bufferdone) {
+ channel->endpoint->ephw->
+ hw_sync_sgl_for_device(
+ channel->endpoint,
+ channel->rd_buffers[bufidx]->
+ dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(end_offset_plus1 - 1,
+ &channel->endpoint->registers[
+ fpga_buf_offset_reg]);
+ mmiowb();
+ iowrite32((channel->chan_num << 1) |
+ (2 << 24) | /* 2 = submit buffer */
+ (bufidx << 12),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ mutex_unlock(&channel->endpoint->
+ register_mutex);
+
+ channel->rd_leftovers[3] =
+ (channel->rd_host_buf_pos != 0);
+ }
+
+ if (rc) {
+ mutex_unlock(&channel->rd_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (!channel->rd_synchronous)
+ queue_delayed_work(
+ xillybus_wq,
+ &channel->rd_workitem,
+ XILLY_RX_TIMEOUT);
+
+ return rc;
+ }
+ }
+
+ if (bytes_done >= count)
+ break;
+
+ if (!exhausted)
+ continue; /* If there's more space, just go on */
+
+ if ((bytes_done > 0) && channel->rd_allow_partial)
+ break;
+
+ /*
+ * Indefinite sleep with mutex taken. With data waiting for
+ * flushing, user should not be surprised if open() for write
+ * sleeps.
+ */
+
+ if (filp->f_flags & O_NONBLOCK) {
+ bytes_done = -EAGAIN;
+ break;
+ }
+
+ wait_event_interruptible(channel->rd_wait,
+ (!channel->rd_full));
+
+ if (channel->rd_full) {
+ mutex_unlock(&channel->rd_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (bytes_done)
+ return bytes_done;
+ return -EINTR;
+ }
+ }
+
+ mutex_unlock(&channel->rd_mutex);
+
+ if (!channel->rd_synchronous)
+ queue_delayed_work(xillybus_wq,
+ &channel->rd_workitem,
+ XILLY_RX_TIMEOUT);
+
+ if ((channel->rd_synchronous) && (bytes_done > 0)) {
+ rc = xillybus_myflush(filp->private_data, 0); /* No timeout */
+
+ if (rc && (rc != -EINTR))
+ return rc;
+ }
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ return bytes_done;
+}
+
+static int xillybus_open(struct inode *inode, struct file *filp)
+{
+ int rc = 0;
+ unsigned long flags;
+ int minor = iminor(inode);
+ int major = imajor(inode);
+ struct xilly_endpoint *ep_iter, *endpoint = NULL;
+ struct xilly_channel *channel;
+
+ mutex_lock(&ep_list_lock);
+
+ list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) {
+ if ((ep_iter->major == major) &&
+ (minor >= ep_iter->lowest_minor) &&
+ (minor < (ep_iter->lowest_minor +
+ ep_iter->num_channels))) {
+ endpoint = ep_iter;
+ break;
+ }
+ }
+ mutex_unlock(&ep_list_lock);
+
+ if (!endpoint) {
+ pr_err("xillybus: open() failed to find a device "
+ "for major=%d and minor=%d\n", major, minor);
+ return -ENODEV;
+ }
+
+ if (endpoint->fatal_error)
+ return -EIO;
+
+ channel = endpoint->channels[1 + minor - endpoint->lowest_minor];
+ filp->private_data = channel;
+
+
+ /*
+ * It gets complicated because:
+ * 1. We don't want to take a mutex we don't have to
+ * 2. We don't want to open one direction if the other will fail.
+ */
+
+ if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
+ return -ENODEV;
+
+ if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
+ return -ENODEV;
+
+ if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
+ (channel->wr_synchronous || !channel->wr_allow_partial ||
+ !channel->wr_supports_nonempty)) {
+ pr_err("xillybus: open() failed: "
+ "O_NONBLOCK not allowed for read on this device\n");
+ return -ENODEV;
+ }
+
+ if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
+ (channel->rd_synchronous || !channel->rd_allow_partial)) {
+ pr_err("xillybus: open() failed: "
+ "O_NONBLOCK not allowed for write on this device\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Note: open() may block on getting mutexes despite O_NONBLOCK.
+ * This shouldn't occur normally, since multiple open of the same
+ * file descriptor is almost always prohibited anyhow
+ * (*_exclusive_open is normally set in real-life systems).
+ */
+
+ if (filp->f_mode & FMODE_READ) {
+ rc = mutex_lock_interruptible(&channel->wr_mutex);
+ if (rc)
+ return rc;
+ }
+
+ if (filp->f_mode & FMODE_WRITE) {
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+ if (rc)
+ goto unlock_wr;
+ }
+
+ if ((filp->f_mode & FMODE_READ) &&
+ (channel->wr_ref_count != 0) &&
+ (channel->wr_exclusive_open)) {
+ rc = -EBUSY;
+ goto unlock;
+ }
+
+ if ((filp->f_mode & FMODE_WRITE) &&
+ (channel->rd_ref_count != 0) &&
+ (channel->rd_exclusive_open)) {
+ rc = -EBUSY;
+ goto unlock;
+ }
+
+
+ if (filp->f_mode & FMODE_READ) {
+ if (channel->wr_ref_count == 0) { /* First open of file */
+ /* Move the host to first buffer */
+ spin_lock_irqsave(&channel->wr_spinlock, flags);
+ channel->wr_host_buf_idx = 0;
+ channel->wr_host_buf_pos = 0;
+ channel->wr_fpga_buf_idx = -1;
+ channel->wr_empty = 1;
+ channel->wr_ready = 0;
+ channel->wr_sleepy = 1;
+ channel->wr_eof = -1;
+ channel->wr_hangup = 0;
+
+ spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (4 << 24) | /* Opcode 4, open channel */
+ ((channel->wr_synchronous & 1) << 23),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+
+ channel->wr_ref_count++;
+ }
+
+ if (filp->f_mode & FMODE_WRITE) {
+ if (channel->rd_ref_count == 0) { /* First open of file */
+ /* Move the host to first buffer */
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+ channel->rd_host_buf_idx = 0;
+ channel->rd_host_buf_pos = 0;
+ channel->rd_leftovers[3] = 0; /* No leftovers. */
+ channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
+ channel->rd_full = 0;
+
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ iowrite32((channel->chan_num << 1) |
+ (4 << 24), /* Opcode 4, open channel */
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+
+ channel->rd_ref_count++;
+ }
+
+unlock:
+ if (filp->f_mode & FMODE_WRITE)
+ mutex_unlock(&channel->rd_mutex);
+unlock_wr:
+ if (filp->f_mode & FMODE_READ)
+ mutex_unlock(&channel->wr_mutex);
+
+ if (!rc && (!channel->seekable))
+ return nonseekable_open(inode, filp);
+
+ return rc;
+}
+
+static int xillybus_release(struct inode *inode, struct file *filp)
+{
+ int rc;
+ unsigned long flags;
+ struct xilly_channel *channel = filp->private_data;
+
+ int buf_idx;
+ int eof;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (filp->f_mode & FMODE_WRITE) {
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+
+ if (rc) {
+ pr_warn("xillybus: Failed to close file. "
+ "Hardware left in messy state.\n");
+ return rc;
+ }
+
+ channel->rd_ref_count--;
+
+ if (channel->rd_ref_count == 0) {
+
+ /*
+ * We rely on the kernel calling flush()
+ * before we get here.
+ */
+
+ iowrite32((channel->chan_num << 1) | /* Channel ID */
+ (5 << 24), /* Opcode 5, close channel */
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+ mutex_unlock(&channel->rd_mutex);
+ }
+
+ if (filp->f_mode & FMODE_READ) {
+ rc = mutex_lock_interruptible(&channel->wr_mutex);
+ if (rc) {
+ pr_warn("xillybus: Failed to close file. "
+ "Hardware left in messy state.\n");
+ return rc;
+ }
+
+ channel->wr_ref_count--;
+
+ if (channel->wr_ref_count == 0) {
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (5 << 24), /* Opcode 5, close channel */
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ /*
+ * This is crazily cautious: We make sure that not
+ * only that we got an EOF (be it because we closed
+ * the channel or because of a user's EOF), but verify
+ * that it's one beyond the last buffer arrived, so
+ * we have no leftover buffers pending before wrapping
+ * up (which can only happen in asynchronous channels,
+ * BTW)
+ */
+
+ while (1) {
+ spin_lock_irqsave(&channel->wr_spinlock,
+ flags);
+ buf_idx = channel->wr_fpga_buf_idx;
+ eof = channel->wr_eof;
+ channel->wr_sleepy = 1;
+ spin_unlock_irqrestore(&channel->wr_spinlock,
+ flags);
+
+ /*
+ * Check if eof points at the buffer after
+ * the last one the FPGA submitted. Note that
+ * no EOF is marked by negative eof.
+ */
+
+ buf_idx++;
+ if (buf_idx == channel->num_wr_buffers)
+ buf_idx = 0;
+
+ if (buf_idx == eof)
+ break;
+
+ /*
+ * Steal extra 100 ms if awaken by interrupt.
+ * This is a simple workaround for an
+ * interrupt pending when entering, which would
+ * otherwise result in declaring the hardware
+ * non-responsive.
+ */
+
+ if (wait_event_interruptible(
+ channel->wr_wait,
+ (!channel->wr_sleepy)))
+ msleep(100);
+
+ if (channel->wr_sleepy) {
+ mutex_unlock(&channel->wr_mutex);
+ pr_warn("xillybus: Hardware failed to "
+ "respond to close command, "
+ "therefore left in "
+ "messy state.\n");
+ return -EINTR;
+ }
+ }
+ }
+
+ mutex_unlock(&channel->wr_mutex);
+ }
+
+ return 0;
+}
+static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
+{
+ struct xilly_channel *channel = filp->private_data;
+ loff_t pos = filp->f_pos;
+ int rc = 0;
+
+ /*
+ * Take both mutexes not allowing interrupts, since it seems like
+ * common applications don't expect an -EINTR here. Besides, multiple
+ * access to a single file descriptor on seekable devices is a mess
+ * anyhow.
+ */
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ mutex_lock(&channel->wr_mutex);
+ mutex_lock(&channel->rd_mutex);
+
+ switch (whence) {
+ case 0:
+ pos = offset;
+ break;
+ case 1:
+ pos += offset;
+ break;
+ case 2:
+ pos = offset; /* Going to the end => to the beginning */
+ break;
+ default:
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* In any case, we must finish on an element boundary */
+ if (pos & ((1 << channel->log2_element_size) - 1)) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(pos >> channel->log2_element_size,
+ &channel->endpoint->registers[fpga_buf_offset_reg]);
+ mmiowb();
+ iowrite32((channel->chan_num << 1) |
+ (6 << 24), /* Opcode 6, set address */
+ &channel->endpoint->registers[fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ mutex_unlock(&channel->endpoint->register_mutex);
+
+end:
+ mutex_unlock(&channel->rd_mutex);
+ mutex_unlock(&channel->wr_mutex);
+
+ if (rc) /* Return error after releasing mutexes */
+ return rc;
+
+ filp->f_pos = pos;
+
+ /*
+ * Since seekable devices are allowed only when the channel is
+ * synchronous, we assume that there is no data pending in either
+ * direction (which holds true as long as no concurrent access on the
+ * file descriptor takes place).
+ * The only thing we may need to throw away is leftovers from partial
+ * write() flush.
+ */
+
+ channel->rd_leftovers[3] = 0;
+
+ return pos;
+}
+
+static unsigned int xillybus_poll(struct file *filp, poll_table *wait)
+{
+ struct xilly_channel *channel = filp->private_data;
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ poll_wait(filp, &channel->endpoint->ep_wait, wait);
+
+ /*
+ * poll() won't play ball regarding read() channels which
+ * aren't asynchronous and support the nonempty message. Allowing
+ * that will create situations where data has been delivered at
+ * the FPGA, and users expecting select() to wake up, which it may
+ * not.
+ */
+
+ if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
+ poll_wait(filp, &channel->wr_wait, wait);
+ poll_wait(filp, &channel->wr_ready_wait, wait);
+
+ spin_lock_irqsave(&channel->wr_spinlock, flags);
+ if (!channel->wr_empty || channel->wr_ready)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (channel->wr_hangup)
+ /*
+ * Not POLLHUP, because its behavior is in the
+ * mist, and POLLIN does what we want: Wake up
+ * the read file descriptor so it sees EOF.
+ */
+ mask |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+ }
+
+ /*
+ * If partial data write is disallowed on a write() channel,
+ * it's pointless to ever signal OK to write, because is could
+ * block despite some space being available.
+ */
+
+ if (channel->rd_allow_partial) {
+ poll_wait(filp, &channel->rd_wait, wait);
+
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+ if (!channel->rd_full)
+ mask |= POLLOUT | POLLWRNORM;
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+ }
+
+ if (channel->endpoint->fatal_error)
+ mask |= POLLERR;
+
+ return mask;
+}
+
+static const struct file_operations xillybus_fops = {
+ .owner = THIS_MODULE,
+ .read = xillybus_read,
+ .write = xillybus_write,
+ .open = xillybus_open,
+ .flush = xillybus_flush,
+ .release = xillybus_release,
+ .llseek = xillybus_llseek,
+ .poll = xillybus_poll,
+};
+
+static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
+ const unsigned char *idt)
+{
+ int rc;
+ dev_t dev;
+ int devnum, i, minor, major;
+ char devname[48];
+ struct device *device;
+
+ rc = alloc_chrdev_region(&dev, 0, /* minor start */
+ endpoint->num_channels,
+ xillyname);
+
+ if (rc) {
+ pr_warn("xillybus: Failed to obtain major/minors");
+ goto error1;
+ }
+
+ endpoint->major = major = MAJOR(dev);
+ endpoint->lowest_minor = minor = MINOR(dev);
+
+ cdev_init(&endpoint->cdev, &xillybus_fops);
+ endpoint->cdev.owner = endpoint->ephw->owner;
+ rc = cdev_add(&endpoint->cdev, MKDEV(major, minor),
+ endpoint->num_channels);
+ if (rc) {
+ pr_warn("xillybus: Failed to add cdev. Aborting.\n");
+ goto error2;
+ }
+
+ idt++;
+
+ for (i = minor, devnum = 0;
+ devnum < endpoint->num_channels;
+ devnum++, i++) {
+ snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt);
+
+ devname[sizeof(devname)-1] = 0; /* Should never matter */
+
+ while (*idt++)
+ /* Skip to next */;
+
+ device = device_create(xillybus_class,
+ NULL,
+ MKDEV(major, i),
+ NULL,
+ "%s", devname);
+
+ if (IS_ERR(device)) {
+ pr_warn("xillybus: Failed to create %s "
+ "device. Aborting.\n", devname);
+ goto error3;
+ }
+ }
+
+ pr_info("xillybus: Created %d device files.\n",
+ endpoint->num_channels);
+ return 0; /* succeed */
+
+error3:
+ devnum--; i--;
+ for (; devnum >= 0; devnum--, i--)
+ device_destroy(xillybus_class, MKDEV(major, i));
+
+ cdev_del(&endpoint->cdev);
+error2:
+ unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels);
+error1:
+
+ return rc;
+}
+
+static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint)
+{
+ int minor;
+
+ for (minor = endpoint->lowest_minor;
+ minor < (endpoint->lowest_minor + endpoint->num_channels);
+ minor++)
+ device_destroy(xillybus_class, MKDEV(endpoint->major, minor));
+ cdev_del(&endpoint->cdev);
+ unregister_chrdev_region(MKDEV(endpoint->major,
+ endpoint->lowest_minor),
+ endpoint->num_channels);
+
+ pr_info("xillybus: Removed %d device files.\n",
+ endpoint->num_channels);
+}
+
+
+struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
+ struct device *dev,
+ struct xilly_endpoint_hardware
+ *ephw)
+{
+ struct xilly_endpoint *endpoint;
+
+ endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
+ if (!endpoint) {
+ pr_err("xillybus: Failed to allocate memory. Aborting.\n");
+ return NULL;
+ }
+
+ endpoint->pdev = pdev;
+ endpoint->dev = dev;
+ endpoint->ephw = ephw;
+ INIT_LIST_HEAD(&endpoint->cleanup.to_kfree);
+ INIT_LIST_HEAD(&endpoint->cleanup.to_pagefree);
+ INIT_LIST_HEAD(&endpoint->cleanup.to_unmap);
+ endpoint->msg_counter = 0x0b;
+ endpoint->failed_messages = 0;
+ endpoint->fatal_error = 0;
+
+ init_waitqueue_head(&endpoint->ep_wait);
+ mutex_init(&endpoint->register_mutex);
+
+ return endpoint;
+}
+EXPORT_SYMBOL(xillybus_init_endpoint);
+
+static int xilly_quiesce(struct xilly_endpoint *endpoint)
+{
+ endpoint->idtlen = -1;
+ wmb(); /* Make sure idtlen is set before sending command */
+ iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
+ &endpoint->registers[fpga_dma_control_reg]);
+ mmiowb();
+
+ wait_event_interruptible_timeout(endpoint->ep_wait,
+ (endpoint->idtlen >= 0),
+ XILLY_TIMEOUT);
+
+ if (endpoint->idtlen < 0) {
+ pr_err("xillybus: Failed to quiesce the device on "
+ "exit. Quitting while leaving a mess.\n");
+ return -ENODEV;
+ }
+ return 0; /* Success */
+}
+
+int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
+{
+ int rc = 0;
+
+ struct xilly_cleanup tmpmem;
+ int idtbuffersize = (1 << PAGE_SHIFT);
+
+ /*
+ * The bogus IDT is used during bootstrap for allocating the initial
+ * message buffer, and then the message buffer and space for the IDT
+ * itself. The initial message buffer is of a single page's size, but
+ * it's soon replaced with a more modest one (and memory is freed).
+ */
+
+ unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
+ 3, 192, PAGE_SHIFT, 0 };
+ struct xilly_idt_handle idt_handle;
+
+ INIT_LIST_HEAD(&tmpmem.to_kfree);
+ INIT_LIST_HEAD(&tmpmem.to_pagefree);
+ INIT_LIST_HEAD(&tmpmem.to_unmap);
+
+ /*
+ * Writing the value 0x00000001 to Endianness register signals which
+ * endianness this processor is using, so the FPGA can swap words as
+ * necessary.
+ */
+
+ iowrite32(1, &endpoint->registers[fpga_endian_reg]);
+ mmiowb(); /* Writes below are affected by the one above. */
+
+ /* Bootstrap phase I: Allocate temporary message buffer */
+
+ endpoint->num_channels = 0;
+
+ rc = xilly_setupchannels(endpoint, &tmpmem, bogus_idt, 1);
+
+ if (rc)
+ goto failed_buffers;
+
+ /* Clear the message subsystem (and counter in particular) */
+ iowrite32(0x04, &endpoint->registers[fpga_msg_ctrl_reg]);
+ mmiowb();
+
+ endpoint->idtlen = -1;
+
+ smp_wmb();
+
+ /*
+ * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT
+ * buffer size.
+ */
+ iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
+ &endpoint->registers[fpga_dma_control_reg]);
+ mmiowb();
+
+ wait_event_interruptible_timeout(endpoint->ep_wait,
+ (endpoint->idtlen >= 0),
+ XILLY_TIMEOUT);
+
+ if (endpoint->idtlen < 0) {
+ pr_err("xillybus: No response from FPGA. Aborting.\n");
+ rc = -ENODEV;
+ goto failed_quiesce;
+ }
+
+ /* Enable DMA */
+ iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
+ &endpoint->registers[fpga_dma_control_reg]);
+ mmiowb();
+
+ /* Bootstrap phase II: Allocate buffer for IDT and obtain it */
+ while (endpoint->idtlen >= idtbuffersize) {
+ idtbuffersize *= 2;
+ bogus_idt[6]++;
+ }
+
+ endpoint->num_channels = 1;
+
+ rc = xilly_setupchannels(endpoint, &tmpmem, bogus_idt, 2);
+
+ if (rc)
+ goto failed_idt;
+
+ smp_wmb();
+
+ rc = xilly_obtain_idt(endpoint);
+
+ if (rc)
+ goto failed_idt;
+
+ xilly_scan_idt(endpoint, &idt_handle);
+
+ if (!idt_handle.chandesc) {
+ rc = -ENODEV;
+ goto failed_idt;
+ }
+ /* Bootstrap phase III: Allocate buffers according to IDT */
+
+ rc = xilly_setupchannels(endpoint,
+ &endpoint->cleanup,
+ idt_handle.chandesc,
+ idt_handle.entries);
+
+ if (rc)
+ goto failed_idt;
+
+ smp_wmb(); /* mutex_lock below should suffice, but won't hurt.*/
+
+ /*
+ * endpoint is now completely configured. We put it on the list
+ * available to open() before registering the char device(s)
+ */
+
+ mutex_lock(&ep_list_lock);
+ list_add_tail(&endpoint->ep_list, &list_of_endpoints);
+ mutex_unlock(&ep_list_lock);
+
+ rc = xillybus_init_chrdev(endpoint, idt_handle.idt);
+
+ if (rc)
+ goto failed_chrdevs;
+
+ xillybus_do_cleanup(&tmpmem, endpoint);
+
+ return 0;
+
+failed_chrdevs:
+ mutex_lock(&ep_list_lock);
+ list_del(&endpoint->ep_list);
+ mutex_unlock(&ep_list_lock);
+
+failed_idt:
+ /* Quiesce the device. Now it's serious to do it */
+ rc = xilly_quiesce(endpoint);
+
+ if (rc)
+ return rc; /* FPGA may still DMA, so no release */
+
+ flush_workqueue(xillybus_wq);
+failed_quiesce:
+failed_buffers:
+ xillybus_do_cleanup(&tmpmem, endpoint);
+
+ return rc;
+}
+EXPORT_SYMBOL(xillybus_endpoint_discovery);
+
+void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
+{
+ xillybus_cleanup_chrdev(endpoint);
+
+ mutex_lock(&ep_list_lock);
+ list_del(&endpoint->ep_list);
+ mutex_unlock(&ep_list_lock);
+
+ xilly_quiesce(endpoint);
+
+ /*
+ * Flushing is done upon endpoint release to prevent access to memory
+ * just about to be released. This makes the quiesce complete.
+ */
+ flush_workqueue(xillybus_wq);
+}
+EXPORT_SYMBOL(xillybus_endpoint_remove);
+
+static int __init xillybus_init(void)
+{
+ int rc = 0;
+
+ mutex_init(&ep_list_lock);
+
+ xillybus_class = class_create(THIS_MODULE, xillyname);
+ if (IS_ERR(xillybus_class)) {
+ rc = PTR_ERR(xillybus_class);
+ pr_warn("xillybus: Failed to register class xillybus\n");
+
+ return rc;
+ }
+
+ xillybus_wq = alloc_workqueue(xillyname, 0, 0);
+
+ return 0; /* Success */
+}
+
+static void __exit xillybus_exit(void)
+{
+ /* flush_workqueue() was called for each endpoint released */
+ destroy_workqueue(xillybus_wq);
+
+ class_destroy(xillybus_class);
+}
+
+module_init(xillybus_init);
+module_exit(xillybus_exit);
diff --git a/drivers/staging/xillybus/xillybus_of.c b/drivers/staging/xillybus/xillybus_of.c
new file mode 100644
index 0000000..92c2931
--- /dev/null
+++ b/drivers/staging/xillybus/xillybus_of.c
@@ -0,0 +1,212 @@
+/*
+ * linux/drivers/misc/xillybus_of.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework using Open Firmware.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus driver for Open Firmware");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_VERSION("1.06");
+MODULE_ALIAS("xillybus_of");
+MODULE_LICENSE("GPL v2");
+
+static const char xillyname[] = "xillybus_of";
+
+/* Match table for of_platform binding */
+static struct of_device_id xillybus_of_match[] = {
+ { .compatible = "xlnx,xillybus-1.00.a", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, xillybus_of_match);
+
+static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+ dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction);
+}
+
+static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+ dma_sync_single_for_device(ep->dev, dma_handle, size, direction);
+}
+
+static dma_addr_t xilly_map_single_of(struct xilly_cleanup *mem,
+ struct xilly_endpoint *ep,
+ void *ptr,
+ size_t size,
+ int direction
+ )
+{
+
+ dma_addr_t addr = 0;
+ struct xilly_dma *this;
+
+ this = kmalloc(sizeof(struct xilly_dma), GFP_KERNEL);
+ if (!this)
+ return 0;
+
+ addr = dma_map_single(ep->dev, ptr, size, direction);
+ this->direction = direction;
+
+ if (dma_mapping_error(ep->dev, addr)) {
+ kfree(this);
+ return 0;
+ }
+
+ this->dma_addr = addr;
+ this->dev = ep->dev;
+ this->size = size;
+
+ list_add_tail(&this->node, &mem->to_unmap);
+
+ return addr;
+}
+
+static void xilly_unmap_single_of(struct xilly_dma *entry)
+{
+ dma_unmap_single(entry->dev,
+ entry->dma_addr,
+ entry->size,
+ entry->direction);
+}
+
+static struct xilly_endpoint_hardware of_hw = {
+ .owner = THIS_MODULE,
+ .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of,
+ .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of,
+ .map_single = xilly_map_single_of,
+ .unmap_single = xilly_unmap_single_of
+};
+
+static int xilly_drv_probe(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct xilly_endpoint *endpoint;
+ int rc = 0;
+ int irq;
+
+ endpoint = xillybus_init_endpoint(NULL, dev, &of_hw);
+
+ if (!endpoint)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, endpoint);
+
+ rc = of_address_to_resource(dev->of_node, 0, &endpoint->res);
+ if (rc) {
+ pr_warn("xillybus: Failed to obtain device tree "
+ "resource\n");
+ goto failed_request_regions;
+ }
+
+ if (!request_mem_region(endpoint->res.start,
+ resource_size(&endpoint->res), xillyname)) {
+ pr_err("xillybus: request_mem_region failed. Aborting.\n");
+ rc = -EBUSY;
+ goto failed_request_regions;
+ }
+
+ endpoint->registers = of_iomap(dev->of_node, 0);
+
+ if (!endpoint->registers) {
+ pr_err("xillybus: Failed to map I/O memory. Aborting.\n");
+ goto failed_iomap0;
+ }
+
+ irq = irq_of_parse_and_map(dev->of_node, 0);
+
+ rc = request_irq(irq, xillybus_isr, 0, xillyname, endpoint);
+
+ if (rc) {
+ pr_err("xillybus: Failed to register IRQ handler. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto failed_register_irq;
+ }
+
+ rc = xillybus_endpoint_discovery(endpoint);
+
+ if (!rc)
+ return 0;
+
+ free_irq(irq, endpoint);
+
+failed_register_irq:
+ iounmap(endpoint->registers);
+failed_iomap0:
+ release_mem_region(endpoint->res.start,
+ resource_size(&endpoint->res));
+
+failed_request_regions:
+ xillybus_do_cleanup(&endpoint->cleanup, endpoint);
+
+ kfree(endpoint);
+ return rc;
+}
+
+static int xilly_drv_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct xilly_endpoint *endpoint = dev_get_drvdata(dev);
+ int irq = irq_of_parse_and_map(dev->of_node, 0);
+
+ xillybus_endpoint_remove(endpoint);
+
+ free_irq(irq, endpoint);
+
+ iounmap(endpoint->registers);
+ release_mem_region(endpoint->res.start,
+ resource_size(&endpoint->res));
+
+ xillybus_do_cleanup(&endpoint->cleanup, endpoint);
+
+ kfree(endpoint);
+
+ return 0;
+}
+
+static struct platform_driver xillybus_platform_driver = {
+ .probe = xilly_drv_probe,
+ .remove = xilly_drv_remove,
+ .driver = {
+ .name = xillyname,
+ .owner = THIS_MODULE,
+ .of_match_table = xillybus_of_match,
+ },
+};
+
+static int __init xillybus_of_init(void)
+{
+ return platform_driver_register(&xillybus_platform_driver);
+}
+
+static void __exit xillybus_of_exit(void)
+{
+ platform_driver_unregister(&xillybus_platform_driver);
+}
+
+module_init(xillybus_of_init);
+module_exit(xillybus_of_exit);
diff --git a/drivers/staging/xillybus/xillybus_pcie.c b/drivers/staging/xillybus/xillybus_pcie.c
new file mode 100644
index 0000000..6701365
--- /dev/null
+++ b/drivers/staging/xillybus/xillybus_pcie.c
@@ -0,0 +1,262 @@
+/*
+ * linux/drivers/misc/xillybus_pcie.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework using PCI Express.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus driver for PCIe");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_VERSION("1.06");
+MODULE_ALIAS("xillybus_pcie");
+MODULE_LICENSE("GPL v2");
+
+#define PCI_DEVICE_ID_XILLYBUS 0xebeb
+
+#define PCI_VENDOR_ID_ALTERA 0x1172
+#define PCI_VENDOR_ID_ACTEL 0x11aa
+#define PCI_VENDOR_ID_LATTICE 0x1204
+
+static const char xillyname[] = "xillybus_pcie";
+
+static DEFINE_PCI_DEVICE_TABLE(xillyids) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LATTICE, PCI_DEVICE_ID_XILLYBUS)},
+ { /* End: all zeroes */ }
+};
+
+static int xilly_pci_direction(int direction)
+{
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ return PCI_DMA_TODEVICE;
+ case DMA_FROM_DEVICE:
+ return PCI_DMA_FROMDEVICE;
+ default:
+ return PCI_DMA_BIDIRECTIONAL;
+ }
+}
+
+static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+ pci_dma_sync_single_for_cpu(ep->pdev,
+ dma_handle,
+ size,
+ xilly_pci_direction(direction));
+}
+
+static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+ pci_dma_sync_single_for_device(ep->pdev,
+ dma_handle,
+ size,
+ xilly_pci_direction(direction));
+}
+
+/*
+ * Map either through the PCI DMA mapper or the non_PCI one. Behind the
+ * scenes exactly the same functions are called with the same parameters,
+ * but that can change.
+ */
+
+static dma_addr_t xilly_map_single_pci(struct xilly_cleanup *mem,
+ struct xilly_endpoint *ep,
+ void *ptr,
+ size_t size,
+ int direction
+ )
+{
+
+ dma_addr_t addr = 0;
+ struct xilly_dma *this;
+ int pci_direction;
+
+ this = kmalloc(sizeof(struct xilly_dma), GFP_KERNEL);
+ if (!this)
+ return 0;
+
+ pci_direction = xilly_pci_direction(direction);
+ addr = pci_map_single(ep->pdev, ptr, size, pci_direction);
+ this->direction = pci_direction;
+
+ if (pci_dma_mapping_error(ep->pdev, addr)) {
+ kfree(this);
+ return 0;
+ }
+
+ this->dma_addr = addr;
+ this->pdev = ep->pdev;
+ this->size = size;
+
+ list_add_tail(&this->node, &mem->to_unmap);
+
+ return addr;
+}
+
+static void xilly_unmap_single_pci(struct xilly_dma *entry)
+{
+ pci_unmap_single(entry->pdev,
+ entry->dma_addr,
+ entry->size,
+ entry->direction);
+}
+
+static struct xilly_endpoint_hardware pci_hw = {
+ .owner = THIS_MODULE,
+ .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci,
+ .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci,
+ .map_single = xilly_map_single_pci,
+ .unmap_single = xilly_unmap_single_pci
+};
+
+static int xilly_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct xilly_endpoint *endpoint;
+ int rc = 0;
+
+ endpoint = xillybus_init_endpoint(pdev, NULL, &pci_hw);
+
+ if (!endpoint)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, endpoint);
+
+ rc = pci_enable_device(pdev);
+
+ /* L0s has caused packet drops. No power saving, thank you. */
+
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
+ if (rc) {
+ pr_err("xillybus: pci_enable_device() failed. "
+ "Aborting.\n");
+ goto no_enable;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ pr_err("xillybus: Incorrect BAR configuration. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto bad_bar;
+ }
+
+ rc = pci_request_regions(pdev, xillyname);
+ if (rc) {
+ pr_err("xillybus: pci_request_regions() failed. "
+ "Aborting.\n");
+ goto failed_request_regions;
+ }
+
+ endpoint->registers = pci_iomap(pdev, 0, 128);
+
+ if (!endpoint->registers) {
+ pr_err("xillybus: Failed to map BAR 0. Aborting.\n");
+ goto failed_iomap0;
+ }
+
+ pci_set_master(pdev);
+
+ /* Set up a single MSI interrupt */
+ if (pci_enable_msi(pdev)) {
+ pr_err("xillybus: Failed to enable MSI interrupts. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto failed_enable_msi;
+ }
+ rc = request_irq(pdev->irq, xillybus_isr, 0, xillyname, endpoint);
+
+ if (rc) {
+ pr_err("xillybus: Failed to register MSI handler. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto failed_register_msi;
+ }
+
+ /*
+ * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1
+ * is the right thing. But some unclever PCIe drivers report it's OK
+ * when the hardware drops those 64-bit PCIe packets. So trust
+ * nobody and use 32 bits DMA addressing in any case.
+ */
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ endpoint->dma_using_dac = 0;
+ else {
+ pr_err("xillybus: Failed to set DMA mask. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto failed_dmamask;
+ }
+
+ rc = xillybus_endpoint_discovery(endpoint);
+
+ if (!rc)
+ return 0;
+
+failed_dmamask:
+ free_irq(pdev->irq, endpoint);
+failed_register_msi:
+ pci_disable_msi(pdev);
+failed_enable_msi:
+ /* pci_clear_master(pdev); Nobody else seems to do this */
+ pci_iounmap(pdev, endpoint->registers);
+failed_iomap0:
+ pci_release_regions(pdev);
+failed_request_regions:
+bad_bar:
+ pci_disable_device(pdev);
+no_enable:
+ xillybus_do_cleanup(&endpoint->cleanup, endpoint);
+
+ kfree(endpoint);
+ return rc;
+}
+
+static void xilly_remove(struct pci_dev *pdev)
+{
+ struct xilly_endpoint *endpoint = pci_get_drvdata(pdev);
+
+ xillybus_endpoint_remove(endpoint);
+
+ free_irq(pdev->irq, endpoint);
+
+ pci_disable_msi(pdev);
+ pci_iounmap(pdev, endpoint->registers);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ xillybus_do_cleanup(&endpoint->cleanup, endpoint);
+
+ kfree(endpoint);
+}
+
+MODULE_DEVICE_TABLE(pci, xillyids);
+
+static struct pci_driver xillybus_driver = {
+ .name = xillyname,
+ .id_table = xillyids,
+ .probe = xilly_probe,
+ .remove = xilly_remove,
+};
+
+module_pci_driver(xillybus_driver);
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
deleted file mode 100644
index 2d7b2da..0000000
--- a/drivers/staging/zcache/Kconfig
+++ /dev/null
@@ -1,59 +0,0 @@
-config ZCACHE
- tristate "Dynamic compression of swap pages and clean pagecache pages"
- depends on CRYPTO=y && SWAP=y && CLEANCACHE && FRONTSWAP
- select CRYPTO_LZO
- default n
- help
- Zcache doubles RAM efficiency while providing a significant
- performance boosts on many workloads. Zcache uses
- compression and an in-kernel implementation of transcendent
- memory to store clean page cache pages and swap in RAM,
- providing a noticeable reduction in disk I/O.
-
-config ZCACHE_DEBUG
- bool "Enable debug statistics"
- depends on DEBUG_FS && ZCACHE
- default n
- help
- This is used to provide an debugfs directory with counters of
- how zcache is doing. You probably want to set this to 'N'.
-
-config RAMSTER
- tristate "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
- depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE
- depends on NET
- # must ensure struct page is 8-byte aligned
- select HAVE_ALIGNED_STRUCT_PAGE if !64BIT
- default n
- help
- RAMster allows RAM on other machines in a cluster to be utilized
- dynamically and symmetrically instead of swapping to a local swap
- disk, thus improving performance on memory-constrained workloads
- while minimizing total RAM across the cluster. RAMster, like
- zcache2, compresses swap pages into local RAM, but then remotifies
- the compressed pages to another node in the RAMster cluster.
-
-config RAMSTER_DEBUG
- bool "Enable ramster debug statistics"
- depends on DEBUG_FS && RAMSTER
- default n
- help
- This is used to provide an debugfs directory with counters of
- how ramster is doing. You probably want to set this to 'N'.
-
-# Depends on not-yet-upstreamed mm patches to export end_swap_bio_write and
-# __add_to_swap_cache, and implement __swap_writepage (which is swap_writepage
-# without the frontswap call. When these are in-tree, the dependency on
-# BROKEN can be removed
-config ZCACHE_WRITEBACK
- bool "Allow compressed swap pages to be writtenback to swap disk"
- depends on ZCACHE=y && BROKEN
- default n
- help
- Zcache caches compressed swap pages (and other data) in RAM which
- often improves performance by avoiding I/O's due to swapping.
- In some workloads with very long-lived large processes, it can
- instead reduce performance. Writeback decompresses zcache-compressed
- pages (in LRU order) when under memory pressure and writes them to
- the backing swap disk to ameliorate this problem. Policy driving
- writeback is still under development.
diff --git a/drivers/staging/zcache/Makefile b/drivers/staging/zcache/Makefile
deleted file mode 100644
index 845a5c2..0000000
--- a/drivers/staging/zcache/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-zcache-y := zcache-main.o tmem.o zbud.o
-zcache-$(CONFIG_ZCACHE_DEBUG) += debug.o
-zcache-$(CONFIG_RAMSTER_DEBUG) += ramster/debug.o
-zcache-$(CONFIG_RAMSTER) += ramster/ramster.o ramster/r2net.o
-zcache-$(CONFIG_RAMSTER) += ramster/nodemanager.o ramster/tcp.o
-zcache-$(CONFIG_RAMSTER) += ramster/heartbeat.o ramster/masklog.o
-
-obj-$(CONFIG_ZCACHE) += zcache.o
diff --git a/drivers/staging/zcache/TODO b/drivers/staging/zcache/TODO
deleted file mode 100644
index d0c18fa..0000000
--- a/drivers/staging/zcache/TODO
+++ /dev/null
@@ -1,64 +0,0 @@
-
-** ZCACHE PLAN FOR PROMOTION FROM STAGING **
-
-Last updated: Feb 13, 2013
-
-PLAN STEPS
-
-1. merge zcache and ramster to eliminate horrible code duplication
-2. converge on a predictable, writeback-capable allocator
-3. use debugfs instead of sysfs (per akpm feedback in 2011)
-4. zcache side of cleancache/mm WasActive patch
-5. zcache side of frontswap exclusive gets
-6. zcache must be able to writeback to physical swap disk
- (per Andrea Arcangeli feedback in 2011)
-7. implement adequate policy for writeback
-8. frontswap/cleancache work to allow zcache to be loaded
- as a module
-9. get core mm developer to review
-10. incorporate feedback from review
-11. get review/acks from 1-2 additional mm developers
-12. incorporate any feedback from additional mm reviews
-13. propose location/file-naming in mm tree
-14. repeat 9-13 as necessary until akpm is happy and merges
-
-STATUS/OWNERSHIP
-
-1. DONE as part of "new" zcache; in staging/zcache for 3.9
-2. DONE as part of "new" zcache (cf zbud.[ch]); in staging/zcache for 3.9
- (this was the core of the zcache1 vs zcache2 flail)
-3. DONE as part of "new" zcache; in staging/zcache for 3.9
-4. DONE (w/caveats) as part of "new" zcache; per cleancache performance
- feedback see https://lkml.org/lkml/2011/8/17/351, in
- staging/zcache for 3.9; dependent on proposed mm patch, see
- https://lkml.org/lkml/2012/1/25/300
-5. DONE as part of "new" zcache; performance tuning only,
- in staging/zcache for 3.9; dependent on frontswap patch
- merged in 3.7 (33c2a174)
-6. DONE (w/caveats), prototyped as part of "new" zcache, had
- bad memory leak; reimplemented to use sjennings clever tricks
- and proposed mm patches with new version in staging/zcache
- for 3.9, see https://lkml.org/lkml/2013/2/6/437;
-7. PROTOTYPED as part of "new" zcache; in staging/zcache for 3.9;
- needs more review (plan to discuss at LSF/MM 2013)
-9. IN PROGRESS; owned by Konrad Wilk; Mel Gorman provided
- great feedback in August 2012 (unfortunately of "old"
- zcache)
-11. NOT DONE; owned by Konrad Wilk and Bob Liu
-12. TBD (depends on quantity of feedback)
-13. PROPOSED; one suggestion proposed by Dan; needs more ideas/feedback
-14. TBD (depends on feedback)
-
-WHO NEEDS TO AGREE
-
-Not sure. Seth Jennings is now pursuing a separate but semi-parallel
-track. Akpm clearly has to approve for any mm merge to happen. Minchan
-Kim has interest but may be happy if/when zram is merged into mm. Konrad
-Wilk may be maintainer if akpm decides compression is maintainable
-separately from the rest of mm. (More LSF/MM 2013 discussion.)
-
-ZCACHE FUTURE NEW FUNCTIONALITY
-
-A. Support zsmalloc as an alternative high-density allocator
- (See https://lkml.org/lkml/2013/1/23/511)
-B. Possibly support three zbuds per pageframe when space allows
diff --git a/drivers/staging/zcache/debug.c b/drivers/staging/zcache/debug.c
deleted file mode 100644
index daa2691..0000000
--- a/drivers/staging/zcache/debug.c
+++ /dev/null
@@ -1,107 +0,0 @@
-#include <linux/atomic.h>
-#include "debug.h"
-
-#ifdef CONFIG_ZCACHE_DEBUG
-#include <linux/debugfs.h>
-
-ssize_t zcache_obj_count;
-ssize_t zcache_obj_count_max;
-ssize_t zcache_objnode_count;
-ssize_t zcache_objnode_count_max;
-u64 zcache_eph_zbytes;
-u64 zcache_eph_zbytes_max;
-u64 zcache_pers_zbytes_max;
-ssize_t zcache_eph_pageframes_max;
-ssize_t zcache_pers_pageframes_max;
-ssize_t zcache_pageframes_alloced;
-ssize_t zcache_pageframes_freed;
-ssize_t zcache_eph_zpages;
-ssize_t zcache_eph_zpages_max;
-ssize_t zcache_pers_zpages_max;
-ssize_t zcache_flush_total;
-ssize_t zcache_flush_found;
-ssize_t zcache_flobj_total;
-ssize_t zcache_flobj_found;
-ssize_t zcache_failed_eph_puts;
-ssize_t zcache_failed_pers_puts;
-ssize_t zcache_failed_getfreepages;
-ssize_t zcache_failed_alloc;
-ssize_t zcache_put_to_flush;
-ssize_t zcache_compress_poor;
-ssize_t zcache_mean_compress_poor;
-ssize_t zcache_eph_ate_tail;
-ssize_t zcache_eph_ate_tail_failed;
-ssize_t zcache_pers_ate_eph;
-ssize_t zcache_pers_ate_eph_failed;
-ssize_t zcache_evicted_eph_zpages;
-ssize_t zcache_evicted_eph_pageframes;
-ssize_t zcache_zero_filled_pages;
-ssize_t zcache_zero_filled_pages_max;
-
-#define ATTR(x) { .name = #x, .val = &zcache_##x, }
-static struct debug_entry {
- const char *name;
- ssize_t *val;
-} attrs[] = {
- ATTR(obj_count), ATTR(obj_count_max),
- ATTR(objnode_count), ATTR(objnode_count_max),
- ATTR(flush_total), ATTR(flush_found),
- ATTR(flobj_total), ATTR(flobj_found),
- ATTR(failed_eph_puts), ATTR(failed_pers_puts),
- ATTR(failed_getfreepages), ATTR(failed_alloc),
- ATTR(put_to_flush),
- ATTR(compress_poor), ATTR(mean_compress_poor),
- ATTR(eph_ate_tail), ATTR(eph_ate_tail_failed),
- ATTR(pers_ate_eph), ATTR(pers_ate_eph_failed),
- ATTR(evicted_eph_zpages), ATTR(evicted_eph_pageframes),
- ATTR(eph_pageframes), ATTR(eph_pageframes_max),
- ATTR(pers_pageframes), ATTR(pers_pageframes_max),
- ATTR(eph_zpages), ATTR(eph_zpages_max),
- ATTR(pers_zpages), ATTR(pers_zpages_max),
- ATTR(last_active_file_pageframes),
- ATTR(last_inactive_file_pageframes),
- ATTR(last_active_anon_pageframes),
- ATTR(last_inactive_anon_pageframes),
- ATTR(eph_nonactive_puts_ignored),
- ATTR(pers_nonactive_puts_ignored),
- ATTR(zero_filled_pages),
-#ifdef CONFIG_ZCACHE_WRITEBACK
- ATTR(outstanding_writeback_pages),
- ATTR(writtenback_pages),
-#endif
-};
-#undef ATTR
-int zcache_debugfs_init(void)
-{
- unsigned int i;
- struct dentry *root = debugfs_create_dir("zcache", NULL);
- if (root == NULL)
- return -ENXIO;
-
- for (i = 0; i < ARRAY_SIZE(attrs); i++)
- if (!debugfs_create_size_t(attrs[i].name, S_IRUGO, root, attrs[i].val))
- goto out;
-
- debugfs_create_u64("eph_zbytes", S_IRUGO, root, &zcache_eph_zbytes);
- debugfs_create_u64("eph_zbytes_max", S_IRUGO, root, &zcache_eph_zbytes_max);
- debugfs_create_u64("pers_zbytes", S_IRUGO, root, &zcache_pers_zbytes);
- debugfs_create_u64("pers_zbytes_max", S_IRUGO, root, &zcache_pers_zbytes_max);
-
- return 0;
-out:
- return -ENODEV;
-}
-
-/* developers can call this in case of ooms, e.g. to find memory leaks */
-void zcache_dump(void)
-{
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(attrs); i++)
- pr_debug("zcache: %s=%zu\n", attrs[i].name, *attrs[i].val);
-
- pr_debug("zcache: eph_zbytes=%llu\n", (unsigned long long)zcache_eph_zbytes);
- pr_debug("zcache: eph_zbytes_max=%llu\n", (unsigned long long)zcache_eph_zbytes_max);
- pr_debug("zcache: pers_zbytes=%llu\n", (unsigned long long)zcache_pers_zbytes);
- pr_debug("zcache: pers_zbytes_max=%llu\n", (unsigned long long)zcache_pers_zbytes_max);
-}
-#endif
diff --git a/drivers/staging/zcache/debug.h b/drivers/staging/zcache/debug.h
deleted file mode 100644
index 8088d28..0000000
--- a/drivers/staging/zcache/debug.h
+++ /dev/null
@@ -1,305 +0,0 @@
-#include <linux/bug.h>
-
-#ifdef CONFIG_ZCACHE_DEBUG
-
-/* we try to keep these statistics SMP-consistent */
-extern ssize_t zcache_obj_count;
-static atomic_t zcache_obj_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_obj_count_max;
-static inline void inc_zcache_obj_count(void)
-{
- zcache_obj_count = atomic_inc_return(&zcache_obj_atomic);
- if (zcache_obj_count > zcache_obj_count_max)
- zcache_obj_count_max = zcache_obj_count;
-}
-static inline void dec_zcache_obj_count(void)
-{
- zcache_obj_count = atomic_dec_return(&zcache_obj_atomic);
- BUG_ON(zcache_obj_count < 0);
-};
-extern ssize_t zcache_objnode_count;
-static atomic_t zcache_objnode_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_objnode_count_max;
-static inline void inc_zcache_objnode_count(void)
-{
- zcache_objnode_count = atomic_inc_return(&zcache_objnode_atomic);
- if (zcache_objnode_count > zcache_objnode_count_max)
- zcache_objnode_count_max = zcache_objnode_count;
-};
-static inline void dec_zcache_objnode_count(void)
-{
- zcache_objnode_count = atomic_dec_return(&zcache_objnode_atomic);
- BUG_ON(zcache_objnode_count < 0);
-};
-extern u64 zcache_eph_zbytes;
-static atomic_long_t zcache_eph_zbytes_atomic = ATOMIC_INIT(0);
-extern u64 zcache_eph_zbytes_max;
-static inline void inc_zcache_eph_zbytes(unsigned clen)
-{
- zcache_eph_zbytes = atomic_long_add_return(clen, &zcache_eph_zbytes_atomic);
- if (zcache_eph_zbytes > zcache_eph_zbytes_max)
- zcache_eph_zbytes_max = zcache_eph_zbytes;
-};
-static inline void dec_zcache_eph_zbytes(unsigned zsize)
-{
- zcache_eph_zbytes = atomic_long_sub_return(zsize, &zcache_eph_zbytes_atomic);
-};
-extern u64 zcache_pers_zbytes;
-static atomic_long_t zcache_pers_zbytes_atomic = ATOMIC_INIT(0);
-extern u64 zcache_pers_zbytes_max;
-static inline void inc_zcache_pers_zbytes(unsigned clen)
-{
- zcache_pers_zbytes = atomic_long_add_return(clen, &zcache_pers_zbytes_atomic);
- if (zcache_pers_zbytes > zcache_pers_zbytes_max)
- zcache_pers_zbytes_max = zcache_pers_zbytes;
-}
-static inline void dec_zcache_pers_zbytes(unsigned zsize)
-{
- zcache_pers_zbytes = atomic_long_sub_return(zsize, &zcache_pers_zbytes_atomic);
-}
-extern ssize_t zcache_eph_pageframes;
-static atomic_t zcache_eph_pageframes_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_eph_pageframes_max;
-static inline void inc_zcache_eph_pageframes(void)
-{
- zcache_eph_pageframes = atomic_inc_return(&zcache_eph_pageframes_atomic);
- if (zcache_eph_pageframes > zcache_eph_pageframes_max)
- zcache_eph_pageframes_max = zcache_eph_pageframes;
-};
-static inline void dec_zcache_eph_pageframes(void)
-{
- zcache_eph_pageframes = atomic_dec_return(&zcache_eph_pageframes_atomic);
-};
-extern ssize_t zcache_pers_pageframes;
-static atomic_t zcache_pers_pageframes_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_pers_pageframes_max;
-static inline void inc_zcache_pers_pageframes(void)
-{
- zcache_pers_pageframes = atomic_inc_return(&zcache_pers_pageframes_atomic);
- if (zcache_pers_pageframes > zcache_pers_pageframes_max)
- zcache_pers_pageframes_max = zcache_pers_pageframes;
-}
-static inline void dec_zcache_pers_pageframes(void)
-{
- zcache_pers_pageframes = atomic_dec_return(&zcache_pers_pageframes_atomic);
-}
-extern ssize_t zcache_pageframes_alloced;
-static atomic_t zcache_pageframes_alloced_atomic = ATOMIC_INIT(0);
-static inline void inc_zcache_pageframes_alloced(void)
-{
- zcache_pageframes_alloced = atomic_inc_return(&zcache_pageframes_alloced_atomic);
-};
-extern ssize_t zcache_pageframes_freed;
-static atomic_t zcache_pageframes_freed_atomic = ATOMIC_INIT(0);
-static inline void inc_zcache_pageframes_freed(void)
-{
- zcache_pageframes_freed = atomic_inc_return(&zcache_pageframes_freed_atomic);
-}
-extern ssize_t zcache_eph_zpages;
-static atomic_t zcache_eph_zpages_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_eph_zpages_max;
-static inline void inc_zcache_eph_zpages(void)
-{
- zcache_eph_zpages = atomic_inc_return(&zcache_eph_zpages_atomic);
- if (zcache_eph_zpages > zcache_eph_zpages_max)
- zcache_eph_zpages_max = zcache_eph_zpages;
-}
-static inline void dec_zcache_eph_zpages(unsigned zpages)
-{
- zcache_eph_zpages = atomic_sub_return(zpages, &zcache_eph_zpages_atomic);
-}
-extern ssize_t zcache_pers_zpages;
-static atomic_t zcache_pers_zpages_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_pers_zpages_max;
-static inline void inc_zcache_pers_zpages(void)
-{
- zcache_pers_zpages = atomic_inc_return(&zcache_pers_zpages_atomic);
- if (zcache_pers_zpages > zcache_pers_zpages_max)
- zcache_pers_zpages_max = zcache_pers_zpages;
-}
-static inline void dec_zcache_pers_zpages(unsigned zpages)
-{
- zcache_pers_zpages = atomic_sub_return(zpages, &zcache_pers_zpages_atomic);
-}
-
-extern ssize_t zcache_zero_filled_pages;
-static atomic_t zcache_zero_filled_pages_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_zero_filled_pages_max;
-static inline void inc_zcache_zero_filled_pages(void)
-{
- zcache_zero_filled_pages = atomic_inc_return(
- &zcache_zero_filled_pages_atomic);
- if (zcache_zero_filled_pages > zcache_zero_filled_pages_max)
- zcache_zero_filled_pages_max = zcache_zero_filled_pages;
-}
-static inline void dec_zcache_zero_filled_pages(void)
-{
- zcache_zero_filled_pages = atomic_dec_return(
- &zcache_zero_filled_pages_atomic);
-}
-static inline unsigned long curr_pageframes_count(void)
-{
- return zcache_pageframes_alloced -
- atomic_read(&zcache_pageframes_freed_atomic) -
- atomic_read(&zcache_eph_pageframes_atomic) -
- atomic_read(&zcache_pers_pageframes_atomic);
-};
-/* but for the rest of these, counting races are ok */
-extern ssize_t zcache_flush_total;
-extern ssize_t zcache_flush_found;
-extern ssize_t zcache_flobj_total;
-extern ssize_t zcache_flobj_found;
-extern ssize_t zcache_failed_eph_puts;
-extern ssize_t zcache_failed_pers_puts;
-extern ssize_t zcache_failed_getfreepages;
-extern ssize_t zcache_failed_alloc;
-extern ssize_t zcache_put_to_flush;
-extern ssize_t zcache_compress_poor;
-extern ssize_t zcache_mean_compress_poor;
-extern ssize_t zcache_eph_ate_tail;
-extern ssize_t zcache_eph_ate_tail_failed;
-extern ssize_t zcache_pers_ate_eph;
-extern ssize_t zcache_pers_ate_eph_failed;
-extern ssize_t zcache_evicted_eph_zpages;
-extern ssize_t zcache_evicted_eph_pageframes;
-
-extern ssize_t zcache_last_active_file_pageframes;
-extern ssize_t zcache_last_inactive_file_pageframes;
-extern ssize_t zcache_last_active_anon_pageframes;
-extern ssize_t zcache_last_inactive_anon_pageframes;
-static ssize_t zcache_eph_nonactive_puts_ignored;
-static ssize_t zcache_pers_nonactive_puts_ignored;
-#ifdef CONFIG_ZCACHE_WRITEBACK
-extern ssize_t zcache_writtenback_pages;
-extern ssize_t zcache_outstanding_writeback_pages;
-#endif
-
-static inline void inc_zcache_flush_total(void)
-{
- zcache_flush_total++;
-};
-static inline void inc_zcache_flush_found(void)
-{
- zcache_flush_found++;
-};
-static inline void inc_zcache_flobj_total(void)
-{
- zcache_flobj_total++;
-};
-static inline void inc_zcache_flobj_found(void)
-{
- zcache_flobj_found++;
-};
-static inline void inc_zcache_failed_eph_puts(void)
-{
- zcache_failed_eph_puts++;
-};
-static inline void inc_zcache_failed_pers_puts(void)
-{
- zcache_failed_pers_puts++;
-};
-static inline void inc_zcache_failed_getfreepages(void)
-{
- zcache_failed_getfreepages++;
-};
-static inline void inc_zcache_failed_alloc(void)
-{
- zcache_failed_alloc++;
-};
-static inline void inc_zcache_put_to_flush(void)
-{
- zcache_put_to_flush++;
-};
-static inline void inc_zcache_compress_poor(void)
-{
- zcache_compress_poor++;
-};
-static inline void inc_zcache_mean_compress_poor(void)
-{
- zcache_mean_compress_poor++;
-};
-static inline void inc_zcache_eph_ate_tail(void)
-{
- zcache_eph_ate_tail++;
-};
-static inline void inc_zcache_eph_ate_tail_failed(void)
-{
- zcache_eph_ate_tail_failed++;
-};
-static inline void inc_zcache_pers_ate_eph(void)
-{
- zcache_pers_ate_eph++;
-};
-static inline void inc_zcache_pers_ate_eph_failed(void)
-{
- zcache_pers_ate_eph_failed++;
-};
-static inline void inc_zcache_evicted_eph_zpages(unsigned zpages)
-{
- zcache_evicted_eph_zpages += zpages;
-};
-static inline void inc_zcache_evicted_eph_pageframes(void)
-{
- zcache_evicted_eph_pageframes++;
-};
-
-static inline void inc_zcache_eph_nonactive_puts_ignored(void)
-{
- zcache_eph_nonactive_puts_ignored++;
-};
-static inline void inc_zcache_pers_nonactive_puts_ignored(void)
-{
- zcache_pers_nonactive_puts_ignored++;
-};
-
-int zcache_debugfs_init(void);
-#else
-static inline void inc_zcache_obj_count(void) { };
-static inline void dec_zcache_obj_count(void) { };
-static inline void inc_zcache_objnode_count(void) { };
-static inline void dec_zcache_objnode_count(void) { };
-static inline void inc_zcache_eph_zbytes(unsigned clen) { };
-static inline void dec_zcache_eph_zbytes(unsigned zsize) { };
-static inline void inc_zcache_pers_zbytes(unsigned clen) { };
-static inline void dec_zcache_pers_zbytes(unsigned zsize) { };
-static inline void inc_zcache_eph_pageframes(void) { };
-static inline void dec_zcache_eph_pageframes(void) { };
-static inline void inc_zcache_pers_pageframes(void) { };
-static inline void dec_zcache_pers_pageframes(void) { };
-static inline void inc_zcache_pageframes_alloced(void) { };
-static inline void inc_zcache_pageframes_freed(void) { };
-static inline void inc_zcache_eph_zpages(void) { };
-static inline void dec_zcache_eph_zpages(unsigned zpages) { };
-static inline void inc_zcache_pers_zpages(void) { };
-static inline void dec_zcache_pers_zpages(unsigned zpages) { };
-static inline void inc_zcache_zero_filled_pages(void) { };
-static inline void dec_zcache_zero_filled_pages(void) { };
-static inline unsigned long curr_pageframes_count(void)
-{
- return 0;
-};
-static inline int zcache_debugfs_init(void)
-{
- return 0;
-};
-static inline void inc_zcache_flush_total(void) { };
-static inline void inc_zcache_flush_found(void) { };
-static inline void inc_zcache_flobj_total(void) { };
-static inline void inc_zcache_flobj_found(void) { };
-static inline void inc_zcache_failed_eph_puts(void) { };
-static inline void inc_zcache_failed_pers_puts(void) { };
-static inline void inc_zcache_failed_getfreepages(void) { };
-static inline void inc_zcache_failed_alloc(void) { };
-static inline void inc_zcache_put_to_flush(void) { };
-static inline void inc_zcache_compress_poor(void) { };
-static inline void inc_zcache_mean_compress_poor(void) { };
-static inline void inc_zcache_eph_ate_tail(void) { };
-static inline void inc_zcache_eph_ate_tail_failed(void) { };
-static inline void inc_zcache_pers_ate_eph(void) { };
-static inline void inc_zcache_pers_ate_eph_failed(void) { };
-static inline void inc_zcache_evicted_eph_zpages(unsigned zpages) { };
-static inline void inc_zcache_evicted_eph_pageframes(void) { };
-
-static inline void inc_zcache_eph_nonactive_puts_ignored(void) { };
-static inline void inc_zcache_pers_nonactive_puts_ignored(void) { };
-#endif
diff --git a/drivers/staging/zcache/ramster.h b/drivers/staging/zcache/ramster.h
deleted file mode 100644
index a858666..0000000
--- a/drivers/staging/zcache/ramster.h
+++ /dev/null
@@ -1,59 +0,0 @@
-
-/*
- * zcache/ramster.h
- *
- * Placeholder to resolve ramster references when !CONFIG_RAMSTER
- * Real ramster.h lives in ramster subdirectory.
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- */
-
-#ifndef _ZCACHE_RAMSTER_H_
-#define _ZCACHE_RAMSTER_H_
-
-#ifdef CONFIG_RAMSTER
-#include "ramster/ramster.h"
-#else
-static inline void ramster_init(bool x, bool y, bool z, bool w)
-{
-}
-
-static inline void ramster_register_pamops(struct tmem_pamops *p)
-{
-}
-
-static inline int ramster_remotify_pageframe(bool b)
-{
- return 0;
-}
-
-static inline void *ramster_pampd_free(void *v, struct tmem_pool *p,
- struct tmem_oid *o, uint32_t u, bool b)
-{
- return NULL;
-}
-
-static inline int ramster_do_preload_flnode(struct tmem_pool *p)
-{
- return -1;
-}
-
-static inline bool pampd_is_remote(void *v)
-{
- return false;
-}
-
-static inline void ramster_count_foreign_pages(bool b, int i)
-{
-}
-
-static inline void ramster_cpu_up(int cpu)
-{
-}
-
-static inline void ramster_cpu_down(int cpu)
-{
-}
-#endif
-
-#endif /* _ZCACHE_RAMSTER_H */
diff --git a/drivers/staging/zcache/ramster/debug.c b/drivers/staging/zcache/ramster/debug.c
deleted file mode 100644
index 5b26ee9..0000000
--- a/drivers/staging/zcache/ramster/debug.c
+++ /dev/null
@@ -1,68 +0,0 @@
-#include <linux/atomic.h>
-#include "debug.h"
-
-ssize_t ramster_foreign_eph_pages;
-ssize_t ramster_foreign_pers_pages;
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-
-ssize_t ramster_eph_pages_remoted;
-ssize_t ramster_pers_pages_remoted;
-ssize_t ramster_eph_pages_remote_failed;
-ssize_t ramster_pers_pages_remote_failed;
-ssize_t ramster_remote_eph_pages_succ_get;
-ssize_t ramster_remote_pers_pages_succ_get;
-ssize_t ramster_remote_eph_pages_unsucc_get;
-ssize_t ramster_remote_pers_pages_unsucc_get;
-ssize_t ramster_pers_pages_remote_nomem;
-ssize_t ramster_remote_objects_flushed;
-ssize_t ramster_remote_object_flushes_failed;
-ssize_t ramster_remote_pages_flushed;
-ssize_t ramster_remote_page_flushes_failed;
-
-#define ATTR(x) { .name = #x, .val = &ramster_##x, }
-static struct debug_entry {
- const char *name;
- ssize_t *val;
-} attrs[] = {
- ATTR(eph_pages_remoted),
- ATTR(pers_pages_remoted),
- ATTR(eph_pages_remote_failed),
- ATTR(pers_pages_remote_failed),
- ATTR(remote_eph_pages_succ_get),
- ATTR(remote_pers_pages_succ_get),
- ATTR(remote_eph_pages_unsucc_get),
- ATTR(remote_pers_pages_unsucc_get),
- ATTR(pers_pages_remote_nomem),
- ATTR(remote_objects_flushed),
- ATTR(remote_pages_flushed),
- ATTR(remote_object_flushes_failed),
- ATTR(remote_page_flushes_failed),
- ATTR(foreign_eph_pages),
- ATTR(foreign_eph_pages_max),
- ATTR(foreign_pers_pages),
- ATTR(foreign_pers_pages_max),
-};
-#undef ATTR
-
-int ramster_debugfs_init(void)
-{
- int i;
- struct dentry *root = debugfs_create_dir("ramster", NULL);
- if (root == NULL)
- return -ENXIO;
-
- for (i = 0; i < ARRAY_SIZE(attrs); i++)
- if (!debugfs_create_size_t(attrs[i].name,
- S_IRUGO, root, attrs[i].val))
- goto out;
- return 0;
-out:
- return -ENODEV;
-}
-#else
-static inline int ramster_debugfs_init(void)
-{
- return 0;
-}
-#endif
diff --git a/drivers/staging/zcache/ramster/debug.h b/drivers/staging/zcache/ramster/debug.h
deleted file mode 100644
index 5ffab50..0000000
--- a/drivers/staging/zcache/ramster/debug.h
+++ /dev/null
@@ -1,145 +0,0 @@
-#include <linux/bug.h>
-
-#ifdef CONFIG_RAMSTER_DEBUG
-
-extern long ramster_flnodes;
-static atomic_t ramster_flnodes_atomic = ATOMIC_INIT(0);
-static unsigned long ramster_flnodes_max;
-static inline void inc_ramster_flnodes(void)
-{
- ramster_flnodes = atomic_inc_return(&ramster_flnodes_atomic);
- if (ramster_flnodes > ramster_flnodes_max)
- ramster_flnodes_max = ramster_flnodes;
-}
-static inline void dec_ramster_flnodes(void)
-{
- ramster_flnodes = atomic_dec_return(&ramster_flnodes_atomic);
-}
-extern ssize_t ramster_foreign_eph_pages;
-static atomic_t ramster_foreign_eph_pages_atomic = ATOMIC_INIT(0);
-static ssize_t ramster_foreign_eph_pages_max;
-static inline void inc_ramster_foreign_eph_pages(void)
-{
- ramster_foreign_eph_pages = atomic_inc_return(
- &ramster_foreign_eph_pages_atomic);
- if (ramster_foreign_eph_pages > ramster_foreign_eph_pages_max)
- ramster_foreign_eph_pages_max = ramster_foreign_eph_pages;
-}
-static inline void dec_ramster_foreign_eph_pages(void)
-{
- ramster_foreign_eph_pages = atomic_dec_return(
- &ramster_foreign_eph_pages_atomic);
-}
-extern ssize_t ramster_foreign_pers_pages;
-static atomic_t ramster_foreign_pers_pages_atomic = ATOMIC_INIT(0);
-static ssize_t ramster_foreign_pers_pages_max;
-static inline void inc_ramster_foreign_pers_pages(void)
-{
- ramster_foreign_pers_pages = atomic_inc_return(
- &ramster_foreign_pers_pages_atomic);
- if (ramster_foreign_pers_pages > ramster_foreign_pers_pages_max)
- ramster_foreign_pers_pages_max = ramster_foreign_pers_pages;
-}
-static inline void dec_ramster_foreign_pers_pages(void)
-{
- ramster_foreign_pers_pages = atomic_dec_return(
- &ramster_foreign_pers_pages_atomic);
-}
-
-extern ssize_t ramster_eph_pages_remoted;
-extern ssize_t ramster_pers_pages_remoted;
-extern ssize_t ramster_eph_pages_remote_failed;
-extern ssize_t ramster_pers_pages_remote_failed;
-extern ssize_t ramster_remote_eph_pages_succ_get;
-extern ssize_t ramster_remote_pers_pages_succ_get;
-extern ssize_t ramster_remote_eph_pages_unsucc_get;
-extern ssize_t ramster_remote_pers_pages_unsucc_get;
-extern ssize_t ramster_pers_pages_remote_nomem;
-extern ssize_t ramster_remote_objects_flushed;
-extern ssize_t ramster_remote_object_flushes_failed;
-extern ssize_t ramster_remote_pages_flushed;
-extern ssize_t ramster_remote_page_flushes_failed;
-
-int ramster_debugfs_init(void);
-
-static inline void inc_ramster_eph_pages_remoted(void)
-{
- ramster_eph_pages_remoted++;
-};
-static inline void inc_ramster_pers_pages_remoted(void)
-{
- ramster_pers_pages_remoted++;
-};
-static inline void inc_ramster_eph_pages_remote_failed(void)
-{
- ramster_eph_pages_remote_failed++;
-};
-static inline void inc_ramster_pers_pages_remote_failed(void)
-{
- ramster_pers_pages_remote_failed++;
-};
-static inline void inc_ramster_remote_eph_pages_succ_get(void)
-{
- ramster_remote_eph_pages_succ_get++;
-};
-static inline void inc_ramster_remote_pers_pages_succ_get(void)
-{
- ramster_remote_pers_pages_succ_get++;
-};
-static inline void inc_ramster_remote_eph_pages_unsucc_get(void)
-{
- ramster_remote_eph_pages_unsucc_get++;
-};
-static inline void inc_ramster_remote_pers_pages_unsucc_get(void)
-{
- ramster_remote_pers_pages_unsucc_get++;
-};
-static inline void inc_ramster_pers_pages_remote_nomem(void)
-{
- ramster_pers_pages_remote_nomem++;
-};
-static inline void inc_ramster_remote_objects_flushed(void)
-{
- ramster_remote_objects_flushed++;
-};
-static inline void inc_ramster_remote_object_flushes_failed(void)
-{
- ramster_remote_object_flushes_failed++;
-};
-static inline void inc_ramster_remote_pages_flushed(void)
-{
- ramster_remote_pages_flushed++;
-};
-static inline void inc_ramster_remote_page_flushes_failed(void)
-{
- ramster_remote_page_flushes_failed++;
-};
-
-#else
-
-static inline void inc_ramster_flnodes(void) { };
-static inline void dec_ramster_flnodes(void) { };
-static inline void inc_ramster_foreign_eph_pages(void) { };
-static inline void dec_ramster_foreign_eph_pages(void) { };
-static inline void inc_ramster_foreign_pers_pages(void) { };
-static inline void dec_ramster_foreign_pers_pages(void) { };
-
-static inline void inc_ramster_eph_pages_remoted(void) { };
-static inline void inc_ramster_pers_pages_remoted(void) { };
-static inline void inc_ramster_eph_pages_remote_failed(void) { };
-static inline void inc_ramster_pers_pages_remote_failed(void) { };
-static inline void inc_ramster_remote_eph_pages_succ_get(void) { };
-static inline void inc_ramster_remote_pers_pages_succ_get(void) { };
-static inline void inc_ramster_remote_eph_pages_unsucc_get(void) { };
-static inline void inc_ramster_remote_pers_pages_unsucc_get(void) { };
-static inline void inc_ramster_pers_pages_remote_nomem(void) { };
-static inline void inc_ramster_remote_objects_flushed(void) { };
-static inline void inc_ramster_remote_object_flushes_failed(void) { };
-static inline void inc_ramster_remote_pages_flushed(void) { };
-static inline void inc_ramster_remote_page_flushes_failed(void) { };
-
-static inline int ramster_debugfs_init(void)
-{
- return 0;
-}
-#endif
diff --git a/drivers/staging/zcache/ramster/heartbeat.c b/drivers/staging/zcache/ramster/heartbeat.c
deleted file mode 100644
index 75d3fe8..0000000
--- a/drivers/staging/zcache/ramster/heartbeat.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/configfs.h>
-
-#include "heartbeat.h"
-#include "tcp.h"
-#include "nodemanager.h"
-
-#include "masklog.h"
-
-/*
- * The first heartbeat pass had one global thread that would serialize all hb
- * callback calls. This global serializing sem should only be removed once
- * we've made sure that all callees can deal with being called concurrently
- * from multiple hb region threads.
- */
-static DECLARE_RWSEM(r2hb_callback_sem);
-
-/*
- * multiple hb threads are watching multiple regions. A node is live
- * whenever any of the threads sees activity from the node in its region.
- */
-static DEFINE_SPINLOCK(r2hb_live_lock);
-static unsigned long r2hb_live_node_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
-
-static struct r2hb_callback {
- struct list_head list;
-} r2hb_callbacks[R2HB_NUM_CB];
-
-enum r2hb_heartbeat_modes {
- R2HB_HEARTBEAT_LOCAL = 0,
- R2HB_HEARTBEAT_GLOBAL,
- R2HB_HEARTBEAT_NUM_MODES,
-};
-
-char *r2hb_heartbeat_mode_desc[R2HB_HEARTBEAT_NUM_MODES] = {
- "local", /* R2HB_HEARTBEAT_LOCAL */
- "global", /* R2HB_HEARTBEAT_GLOBAL */
-};
-
-unsigned int r2hb_dead_threshold = R2HB_DEFAULT_DEAD_THRESHOLD;
-unsigned int r2hb_heartbeat_mode = R2HB_HEARTBEAT_LOCAL;
-
-/* Only sets a new threshold if there are no active regions.
- *
- * No locking or otherwise interesting code is required for reading
- * r2hb_dead_threshold as it can't change once regions are active and
- * it's not interesting to anyone until then anyway. */
-static void r2hb_dead_threshold_set(unsigned int threshold)
-{
- if (threshold > R2HB_MIN_DEAD_THRESHOLD) {
- spin_lock(&r2hb_live_lock);
- r2hb_dead_threshold = threshold;
- spin_unlock(&r2hb_live_lock);
- }
-}
-
-static int r2hb_global_hearbeat_mode_set(unsigned int hb_mode)
-{
- int ret = -1;
-
- if (hb_mode < R2HB_HEARTBEAT_NUM_MODES) {
- spin_lock(&r2hb_live_lock);
- r2hb_heartbeat_mode = hb_mode;
- ret = 0;
- spin_unlock(&r2hb_live_lock);
- }
-
- return ret;
-}
-
-void r2hb_exit(void)
-{
-}
-
-int r2hb_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(r2hb_callbacks); i++)
- INIT_LIST_HEAD(&r2hb_callbacks[i].list);
-
- memset(r2hb_live_node_bitmap, 0, sizeof(r2hb_live_node_bitmap));
-
- return 0;
-}
-
-/* if we're already in a callback then we're already serialized by the sem */
-static void r2hb_fill_node_map_from_callback(unsigned long *map,
- unsigned bytes)
-{
- BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
-
- memcpy(map, &r2hb_live_node_bitmap, bytes);
-}
-
-/*
- * get a map of all nodes that are heartbeating in any regions
- */
-void r2hb_fill_node_map(unsigned long *map, unsigned bytes)
-{
- /* callers want to serialize this map and callbacks so that they
- * can trust that they don't miss nodes coming to the party */
- down_read(&r2hb_callback_sem);
- spin_lock(&r2hb_live_lock);
- r2hb_fill_node_map_from_callback(map, bytes);
- spin_unlock(&r2hb_live_lock);
- up_read(&r2hb_callback_sem);
-}
-EXPORT_SYMBOL_GPL(r2hb_fill_node_map);
-
-/*
- * heartbeat configfs bits. The heartbeat set is a default set under
- * the cluster set in nodemanager.c.
- */
-
-/* heartbeat set */
-
-struct r2hb_hb_group {
- struct config_group hs_group;
- /* some stuff? */
-};
-
-static struct r2hb_hb_group *to_r2hb_hb_group(struct config_group *group)
-{
- return group ?
- container_of(group, struct r2hb_hb_group, hs_group)
- : NULL;
-}
-
-static struct config_item r2hb_config_item;
-
-static struct config_item *r2hb_hb_group_make_item(struct config_group *group,
- const char *name)
-{
- int ret;
-
- if (strlen(name) > R2HB_MAX_REGION_NAME_LEN) {
- ret = -ENAMETOOLONG;
- goto free;
- }
-
- config_item_put(&r2hb_config_item);
-
- return &r2hb_config_item;
-free:
- return ERR_PTR(ret);
-}
-
-static void r2hb_hb_group_drop_item(struct config_group *group,
- struct config_item *item)
-{
- if (r2hb_global_heartbeat_active()) {
- pr_notice("ramster: Heartbeat %s on region %s (%s)\n",
- "stopped/aborted", config_item_name(item),
- "no region");
- }
-
- config_item_put(item);
-}
-
-struct r2hb_hb_group_attribute {
- struct configfs_attribute attr;
- ssize_t (*show)(struct r2hb_hb_group *, char *);
- ssize_t (*store)(struct r2hb_hb_group *, const char *, size_t);
-};
-
-static ssize_t r2hb_hb_group_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
-{
- struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
- struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
- container_of(attr, struct r2hb_hb_group_attribute, attr);
- ssize_t ret = 0;
-
- if (r2hb_hb_group_attr->show)
- ret = r2hb_hb_group_attr->show(reg, page);
- return ret;
-}
-
-static ssize_t r2hb_hb_group_store(struct config_item *item,
- struct configfs_attribute *attr,
- const char *page, size_t count)
-{
- struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
- struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
- container_of(attr, struct r2hb_hb_group_attribute, attr);
- ssize_t ret = -EINVAL;
-
- if (r2hb_hb_group_attr->store)
- ret = r2hb_hb_group_attr->store(reg, page, count);
- return ret;
-}
-
-static ssize_t r2hb_hb_group_threshold_show(struct r2hb_hb_group *group,
- char *page)
-{
- return sprintf(page, "%u\n", r2hb_dead_threshold);
-}
-
-static ssize_t r2hb_hb_group_threshold_store(struct r2hb_hb_group *group,
- const char *page,
- size_t count)
-{
- unsigned long tmp;
- char *p = (char *)page;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- /* this will validate ranges for us. */
- r2hb_dead_threshold_set((unsigned int) tmp);
-
- return count;
-}
-
-static
-ssize_t r2hb_hb_group_mode_show(struct r2hb_hb_group *group,
- char *page)
-{
- return sprintf(page, "%s\n",
- r2hb_heartbeat_mode_desc[r2hb_heartbeat_mode]);
-}
-
-static
-ssize_t r2hb_hb_group_mode_store(struct r2hb_hb_group *group,
- const char *page, size_t count)
-{
- unsigned int i;
- int ret;
- size_t len;
-
- len = (page[count - 1] == '\n') ? count - 1 : count;
- if (!len)
- return -EINVAL;
-
- for (i = 0; i < R2HB_HEARTBEAT_NUM_MODES; ++i) {
- if (strnicmp(page, r2hb_heartbeat_mode_desc[i], len))
- continue;
-
- ret = r2hb_global_hearbeat_mode_set(i);
- if (!ret)
- pr_notice("ramster: Heartbeat mode set to %s\n",
- r2hb_heartbeat_mode_desc[i]);
- return count;
- }
-
- return -EINVAL;
-
-}
-
-static struct r2hb_hb_group_attribute r2hb_hb_group_attr_threshold = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "dead_threshold",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2hb_hb_group_threshold_show,
- .store = r2hb_hb_group_threshold_store,
-};
-
-static struct r2hb_hb_group_attribute r2hb_hb_group_attr_mode = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "mode",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2hb_hb_group_mode_show,
- .store = r2hb_hb_group_mode_store,
-};
-
-static struct configfs_attribute *r2hb_hb_group_attrs[] = {
- &r2hb_hb_group_attr_threshold.attr,
- &r2hb_hb_group_attr_mode.attr,
- NULL,
-};
-
-static struct configfs_item_operations r2hb_hearbeat_group_item_ops = {
- .show_attribute = r2hb_hb_group_show,
- .store_attribute = r2hb_hb_group_store,
-};
-
-static struct configfs_group_operations r2hb_hb_group_group_ops = {
- .make_item = r2hb_hb_group_make_item,
- .drop_item = r2hb_hb_group_drop_item,
-};
-
-static struct config_item_type r2hb_hb_group_type = {
- .ct_group_ops = &r2hb_hb_group_group_ops,
- .ct_item_ops = &r2hb_hearbeat_group_item_ops,
- .ct_attrs = r2hb_hb_group_attrs,
- .ct_owner = THIS_MODULE,
-};
-
-/* this is just here to avoid touching group in heartbeat.h which the
- * entire damn world #includes */
-struct config_group *r2hb_alloc_hb_set(void)
-{
- struct r2hb_hb_group *hs = NULL;
- struct config_group *ret = NULL;
-
- hs = kzalloc(sizeof(struct r2hb_hb_group), GFP_KERNEL);
- if (hs == NULL)
- goto out;
-
- config_group_init_type_name(&hs->hs_group, "heartbeat",
- &r2hb_hb_group_type);
-
- ret = &hs->hs_group;
-out:
- if (ret == NULL)
- kfree(hs);
- return ret;
-}
-
-void r2hb_free_hb_set(struct config_group *group)
-{
- struct r2hb_hb_group *hs = to_r2hb_hb_group(group);
- kfree(hs);
-}
-
-/* hb callback registration and issuing */
-
-static struct r2hb_callback *hbcall_from_type(enum r2hb_callback_type type)
-{
- if (type == R2HB_NUM_CB)
- return ERR_PTR(-EINVAL);
-
- return &r2hb_callbacks[type];
-}
-
-void r2hb_setup_callback(struct r2hb_callback_func *hc,
- enum r2hb_callback_type type,
- r2hb_cb_func *func,
- void *data,
- int priority)
-{
- INIT_LIST_HEAD(&hc->hc_item);
- hc->hc_func = func;
- hc->hc_data = data;
- hc->hc_priority = priority;
- hc->hc_type = type;
- hc->hc_magic = R2HB_CB_MAGIC;
-}
-EXPORT_SYMBOL_GPL(r2hb_setup_callback);
-
-int r2hb_register_callback(const char *region_uuid,
- struct r2hb_callback_func *hc)
-{
- struct r2hb_callback_func *tmp;
- struct list_head *iter;
- struct r2hb_callback *hbcall;
- int ret;
-
- BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
- BUG_ON(!list_empty(&hc->hc_item));
-
- hbcall = hbcall_from_type(hc->hc_type);
- if (IS_ERR(hbcall)) {
- ret = PTR_ERR(hbcall);
- goto out;
- }
-
- down_write(&r2hb_callback_sem);
-
- list_for_each(iter, &hbcall->list) {
- tmp = list_entry(iter, struct r2hb_callback_func, hc_item);
- if (hc->hc_priority < tmp->hc_priority) {
- list_add_tail(&hc->hc_item, iter);
- break;
- }
- }
- if (list_empty(&hc->hc_item))
- list_add_tail(&hc->hc_item, &hbcall->list);
-
- up_write(&r2hb_callback_sem);
- ret = 0;
-out:
- mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
- ret, __builtin_return_address(0), hc);
- return ret;
-}
-EXPORT_SYMBOL_GPL(r2hb_register_callback);
-
-void r2hb_unregister_callback(const char *region_uuid,
- struct r2hb_callback_func *hc)
-{
- BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
-
- mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
- __builtin_return_address(0), hc);
-
- /* XXX Can this happen _with_ a region reference? */
- if (list_empty(&hc->hc_item))
- return;
-
- down_write(&r2hb_callback_sem);
-
- list_del_init(&hc->hc_item);
-
- up_write(&r2hb_callback_sem);
-}
-EXPORT_SYMBOL_GPL(r2hb_unregister_callback);
-
-int r2hb_check_node_heartbeating_from_callback(u8 node_num)
-{
- unsigned long testing_map[BITS_TO_LONGS(R2NM_MAX_NODES)];
-
- r2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
- if (!test_bit(node_num, testing_map)) {
- mlog(ML_HEARTBEAT,
- "node (%u) does not have heartbeating enabled.\n",
- node_num);
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(r2hb_check_node_heartbeating_from_callback);
-
-void r2hb_stop_all_regions(void)
-{
-}
-EXPORT_SYMBOL_GPL(r2hb_stop_all_regions);
-
-/*
- * this is just a hack until we get the plumbing which flips file systems
- * read only and drops the hb ref instead of killing the node dead.
- */
-int r2hb_global_heartbeat_active(void)
-{
- return (r2hb_heartbeat_mode == R2HB_HEARTBEAT_GLOBAL);
-}
-EXPORT_SYMBOL(r2hb_global_heartbeat_active);
-
-/* added for RAMster */
-void r2hb_manual_set_node_heartbeating(int node_num)
-{
- if (node_num < R2NM_MAX_NODES)
- set_bit(node_num, r2hb_live_node_bitmap);
-}
-EXPORT_SYMBOL(r2hb_manual_set_node_heartbeating);
diff --git a/drivers/staging/zcache/ramster/heartbeat.h b/drivers/staging/zcache/ramster/heartbeat.h
deleted file mode 100644
index 6cbc775..0000000
--- a/drivers/staging/zcache/ramster/heartbeat.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * heartbeat.h
- *
- * Function prototypes
- *
- * Copyright (C) 2004 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- */
-
-#ifndef R2CLUSTER_HEARTBEAT_H
-#define R2CLUSTER_HEARTBEAT_H
-
-#define R2HB_REGION_TIMEOUT_MS 2000
-
-#define R2HB_MAX_REGION_NAME_LEN 32
-
-/* number of changes to be seen as live */
-#define R2HB_LIVE_THRESHOLD 2
-/* number of equal samples to be seen as dead */
-extern unsigned int r2hb_dead_threshold;
-#define R2HB_DEFAULT_DEAD_THRESHOLD 31
-/* Otherwise MAX_WRITE_TIMEOUT will be zero... */
-#define R2HB_MIN_DEAD_THRESHOLD 2
-#define R2HB_MAX_WRITE_TIMEOUT_MS \
- (R2HB_REGION_TIMEOUT_MS * (r2hb_dead_threshold - 1))
-
-#define R2HB_CB_MAGIC 0x51d1e4ec
-
-/* callback stuff */
-enum r2hb_callback_type {
- R2HB_NODE_DOWN_CB = 0,
- R2HB_NODE_UP_CB,
- R2HB_NUM_CB
-};
-
-struct r2nm_node;
-typedef void (r2hb_cb_func)(struct r2nm_node *, int, void *);
-
-struct r2hb_callback_func {
- u32 hc_magic;
- struct list_head hc_item;
- r2hb_cb_func *hc_func;
- void *hc_data;
- int hc_priority;
- enum r2hb_callback_type hc_type;
-};
-
-struct config_group *r2hb_alloc_hb_set(void);
-void r2hb_free_hb_set(struct config_group *group);
-
-void r2hb_setup_callback(struct r2hb_callback_func *hc,
- enum r2hb_callback_type type,
- r2hb_cb_func *func,
- void *data,
- int priority);
-int r2hb_register_callback(const char *region_uuid,
- struct r2hb_callback_func *hc);
-void r2hb_unregister_callback(const char *region_uuid,
- struct r2hb_callback_func *hc);
-void r2hb_fill_node_map(unsigned long *map,
- unsigned bytes);
-void r2hb_exit(void);
-int r2hb_init(void);
-int r2hb_check_node_heartbeating_from_callback(u8 node_num);
-void r2hb_stop_all_regions(void);
-int r2hb_get_all_regions(char *region_uuids, u8 numregions);
-int r2hb_global_heartbeat_active(void);
-void r2hb_manual_set_node_heartbeating(int);
-
-#endif /* R2CLUSTER_HEARTBEAT_H */
diff --git a/drivers/staging/zcache/ramster/masklog.c b/drivers/staging/zcache/ramster/masklog.c
deleted file mode 100644
index 1261d85..0000000
--- a/drivers/staging/zcache/ramster/masklog.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-
-#include "masklog.h"
-
-struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
-EXPORT_SYMBOL_GPL(r2_mlog_and_bits);
-struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0);
-EXPORT_SYMBOL_GPL(r2_mlog_not_bits);
-
-static ssize_t mlog_mask_show(u64 mask, char *buf)
-{
- char *state;
-
- if (__mlog_test_u64(mask, r2_mlog_and_bits))
- state = "allow";
- else if (__mlog_test_u64(mask, r2_mlog_not_bits))
- state = "deny";
- else
- state = "off";
-
- return snprintf(buf, PAGE_SIZE, "%s\n", state);
-}
-
-static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
-{
- if (!strnicmp(buf, "allow", 5)) {
- __mlog_set_u64(mask, r2_mlog_and_bits);
- __mlog_clear_u64(mask, r2_mlog_not_bits);
- } else if (!strnicmp(buf, "deny", 4)) {
- __mlog_set_u64(mask, r2_mlog_not_bits);
- __mlog_clear_u64(mask, r2_mlog_and_bits);
- } else if (!strnicmp(buf, "off", 3)) {
- __mlog_clear_u64(mask, r2_mlog_not_bits);
- __mlog_clear_u64(mask, r2_mlog_and_bits);
- } else
- return -EINVAL;
-
- return count;
-}
-
-struct mlog_attribute {
- struct attribute attr;
- u64 mask;
-};
-
-#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
-
-#define define_mask(_name) { \
- .attr = { \
- .name = #_name, \
- .mode = S_IRUGO | S_IWUSR, \
- }, \
- .mask = ML_##_name, \
-}
-
-static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
- define_mask(TCP),
- define_mask(MSG),
- define_mask(SOCKET),
- define_mask(HEARTBEAT),
- define_mask(HB_BIO),
- define_mask(DLMFS),
- define_mask(DLM),
- define_mask(DLM_DOMAIN),
- define_mask(DLM_THREAD),
- define_mask(DLM_MASTER),
- define_mask(DLM_RECOVERY),
- define_mask(DLM_GLUE),
- define_mask(VOTE),
- define_mask(CONN),
- define_mask(QUORUM),
- define_mask(BASTS),
- define_mask(CLUSTER),
- define_mask(ERROR),
- define_mask(NOTICE),
- define_mask(KTHREAD),
-};
-
-static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
-
-static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
- char *buf)
-{
- struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
-
- return mlog_mask_show(mlog_attr->mask, buf);
-}
-
-static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
-
- return mlog_mask_store(mlog_attr->mask, buf, count);
-}
-
-static const struct sysfs_ops mlog_attr_ops = {
- .show = mlog_show,
- .store = mlog_store,
-};
-
-static struct kobj_type mlog_ktype = {
- .default_attrs = mlog_attr_ptrs,
- .sysfs_ops = &mlog_attr_ops,
-};
-
-static struct kset mlog_kset = {
- .kobj = {.ktype = &mlog_ktype},
-};
-
-int r2_mlog_sys_init(struct kset *r2cb_kset)
-{
- int i = 0;
-
- while (mlog_attrs[i].attr.mode) {
- mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
- i++;
- }
- mlog_attr_ptrs[i] = NULL;
-
- kobject_set_name(&mlog_kset.kobj, "logmask");
- mlog_kset.kobj.kset = r2cb_kset;
- return kset_register(&mlog_kset);
-}
-
-void r2_mlog_sys_shutdown(void)
-{
- kset_unregister(&mlog_kset);
-}
diff --git a/drivers/staging/zcache/ramster/masklog.h b/drivers/staging/zcache/ramster/masklog.h
deleted file mode 100644
index 918ae11..0000000
--- a/drivers/staging/zcache/ramster/masklog.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2005, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#ifndef R2CLUSTER_MASKLOG_H
-#define R2CLUSTER_MASKLOG_H
-
-/*
- * For now this is a trivial wrapper around printk() that gives the critical
- * ability to enable sets of debugging output at run-time. In the future this
- * will almost certainly be redirected to relayfs so that it can pay a
- * substantially lower heisenberg tax.
- *
- * Callers associate the message with a bitmask and a global bitmask is
- * maintained with help from /proc. If any of the bits match the message is
- * output.
- *
- * We must have efficient bit tests on i386 and it seems gcc still emits crazy
- * code for the 64bit compare. It emits very good code for the dual unsigned
- * long tests, though, completely avoiding tests that can never pass if the
- * caller gives a constant bitmask that fills one of the longs with all 0s. So
- * the desire is to have almost all of the calls decided on by comparing just
- * one of the longs. This leads to having infrequently given bits that are
- * frequently matched in the high bits.
- *
- * _ERROR and _NOTICE are used for messages that always go to the console and
- * have appropriate KERN_ prefixes. We wrap these in our function instead of
- * just calling printk() so that this can eventually make its way through
- * relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
- * The inline tests and macro dance give GCC the opportunity to quite cleverly
- * only emit the appropriage printk() when the caller passes in a constant
- * mask, as is almost always the case.
- *
- * All this bitmask nonsense is managed from the files under
- * /sys/fs/r2cb/logmask/. Reading the files gives a straightforward
- * indication of which bits are allowed (allow) or denied (off/deny).
- * ENTRY deny
- * EXIT deny
- * TCP off
- * MSG off
- * SOCKET off
- * ERROR allow
- * NOTICE allow
- *
- * Writing changes the state of a given bit and requires a strictly formatted
- * single write() call:
- *
- * write(fd, "allow", 5);
- *
- * Echoing allow/deny/off string into the logmask files can flip the bits
- * on or off as expected; here is the bash script for example:
- *
- * log_mask="/sys/fs/r2cb/log_mask"
- * for node in ENTRY EXIT TCP MSG SOCKET ERROR NOTICE; do
- * echo allow >"$log_mask"/"$node"
- * done
- *
- * The debugfs.ramster tool can also flip the bits with the -l option:
- *
- * debugfs.ramster -l TCP allow
- */
-
-/* for task_struct */
-#include <linux/sched.h>
-
-/* bits that are frequently given and infrequently matched in the low word */
-/* NOTE: If you add a flag, you need to also update masklog.c! */
-#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
-#define ML_MSG 0x0000000000000002ULL /* net network messages */
-#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
-#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
-#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
-#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
-#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
-#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
-#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
-#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
-#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
-#define ML_DLM_GLUE 0x0000000000000800ULL /* ramster dlm glue layer */
-#define ML_VOTE 0x0000000000001000ULL /* ramster node messaging */
-#define ML_CONN 0x0000000000002000ULL /* net connection management */
-#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
-#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
-#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
-
-/* bits that are infrequently given and frequently matched in the high word */
-#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
-#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
-#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
-
-#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
-#ifndef MLOG_MASK_PREFIX
-#define MLOG_MASK_PREFIX 0
-#endif
-
-/*
- * When logging is disabled, force the bit test to 0 for anything other
- * than errors and notices, allowing gcc to remove the code completely.
- * When enabled, allow all masks.
- */
-#if defined(CONFIG_RAMSTER_DEBUG_MASKLOG)
-#define ML_ALLOWED_BITS (~0)
-#else
-#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE)
-#endif
-
-#define MLOG_MAX_BITS 64
-
-struct mlog_bits {
- unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG];
-};
-
-extern struct mlog_bits r2_mlog_and_bits, r2_mlog_not_bits;
-
-#if BITS_PER_LONG == 32
-
-#define __mlog_test_u64(mask, bits) \
- ((u32)(mask & 0xffffffff) & bits.words[0] || \
- ((u64)(mask) >> 32) & bits.words[1])
-#define __mlog_set_u64(mask, bits) do { \
- bits.words[0] |= (u32)(mask & 0xffffffff); \
- bits.words[1] |= (u64)(mask) >> 32; \
-} while (0)
-#define __mlog_clear_u64(mask, bits) do { \
- bits.words[0] &= ~((u32)(mask & 0xffffffff)); \
- bits.words[1] &= ~((u64)(mask) >> 32); \
-} while (0)
-#define MLOG_BITS_RHS(mask) { \
- { \
- [0] = (u32)(mask & 0xffffffff), \
- [1] = (u64)(mask) >> 32, \
- } \
-}
-
-#else /* 32bit long above, 64bit long below */
-
-#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0])
-#define __mlog_set_u64(mask, bits) do { \
- bits.words[0] |= (mask); \
-} while (0)
-#define __mlog_clear_u64(mask, bits) do { \
- bits.words[0] &= ~(mask); \
-} while (0)
-#define MLOG_BITS_RHS(mask) { { (mask) } }
-
-#endif
-
-/*
- * smp_processor_id() "helpfully" screams when called outside preemptible
- * regions in current kernels. sles doesn't have the variants that don't
- * scream. just do this instead of trying to guess which we're building
- * against.. *sigh*.
- */
-#define __mlog_cpu_guess ({ \
- unsigned long _cpu = get_cpu(); \
- put_cpu(); \
- _cpu; \
-})
-
-/* In the following two macros, the whitespace after the ',' just
- * before ##args is intentional. Otherwise, gcc 2.95 will eat the
- * previous token if args expands to nothing.
- */
-#define __mlog_printk(level, fmt, args...) \
- printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
- task_pid_nr(current), __mlog_cpu_guess, \
- __PRETTY_FUNCTION__, __LINE__ , ##args)
-
-#define mlog(mask, fmt, args...) do { \
- u64 __m = MLOG_MASK_PREFIX | (mask); \
- if ((__m & ML_ALLOWED_BITS) && \
- __mlog_test_u64(__m, r2_mlog_and_bits) && \
- !__mlog_test_u64(__m, r2_mlog_not_bits)) { \
- if (__m & ML_ERROR) \
- __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
- else if (__m & ML_NOTICE) \
- __mlog_printk(KERN_NOTICE, fmt , ##args); \
- else \
- __mlog_printk(KERN_INFO, fmt , ##args); \
- } \
-} while (0)
-
-#define mlog_errno(st) do { \
- int _st = (st); \
- if (_st != -ERESTARTSYS && _st != -EINTR && \
- _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \
- mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
-} while (0)
-
-#define mlog_bug_on_msg(cond, fmt, args...) do { \
- if (cond) { \
- mlog(ML_ERROR, "bug expression: " #cond "\n"); \
- mlog(ML_ERROR, fmt, ##args); \
- BUG(); \
- } \
-} while (0)
-
-#include <linux/kobject.h>
-#include <linux/sysfs.h>
-int r2_mlog_sys_init(struct kset *r2cb_subsys);
-void r2_mlog_sys_shutdown(void);
-
-#endif /* R2CLUSTER_MASKLOG_H */
diff --git a/drivers/staging/zcache/ramster/nodemanager.c b/drivers/staging/zcache/ramster/nodemanager.c
deleted file mode 100644
index 2cfe933..0000000
--- a/drivers/staging/zcache/ramster/nodemanager.c
+++ /dev/null
@@ -1,996 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/configfs.h>
-
-#include "tcp.h"
-#include "nodemanager.h"
-#include "heartbeat.h"
-#include "masklog.h"
-
-/* for now we operate under the assertion that there can be only one
- * cluster active at a time. Changing this will require trickling
- * cluster references throughout where nodes are looked up */
-struct r2nm_cluster *r2nm_single_cluster;
-
-char *r2nm_fence_method_desc[R2NM_FENCE_METHODS] = {
- "reset", /* R2NM_FENCE_RESET */
- "panic", /* R2NM_FENCE_PANIC */
-};
-
-struct r2nm_node *r2nm_get_node_by_num(u8 node_num)
-{
- struct r2nm_node *node = NULL;
-
- if (node_num >= R2NM_MAX_NODES || r2nm_single_cluster == NULL)
- goto out;
-
- read_lock(&r2nm_single_cluster->cl_nodes_lock);
- node = r2nm_single_cluster->cl_nodes[node_num];
- if (node)
- config_item_get(&node->nd_item);
- read_unlock(&r2nm_single_cluster->cl_nodes_lock);
-out:
- return node;
-}
-EXPORT_SYMBOL_GPL(r2nm_get_node_by_num);
-
-int r2nm_configured_node_map(unsigned long *map, unsigned bytes)
-{
- struct r2nm_cluster *cluster = r2nm_single_cluster;
-
- BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
-
- if (cluster == NULL)
- return -EINVAL;
-
- read_lock(&cluster->cl_nodes_lock);
- memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
- read_unlock(&cluster->cl_nodes_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(r2nm_configured_node_map);
-
-static struct r2nm_node *r2nm_node_ip_tree_lookup(struct r2nm_cluster *cluster,
- __be32 ip_needle,
- struct rb_node ***ret_p,
- struct rb_node **ret_parent)
-{
- struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
- struct rb_node *parent = NULL;
- struct r2nm_node *node, *ret = NULL;
-
- while (*p) {
- int cmp;
-
- parent = *p;
- node = rb_entry(parent, struct r2nm_node, nd_ip_node);
-
- cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
- sizeof(ip_needle));
- if (cmp < 0)
- p = &(*p)->rb_left;
- else if (cmp > 0)
- p = &(*p)->rb_right;
- else {
- ret = node;
- break;
- }
- }
-
- if (ret_p != NULL)
- *ret_p = p;
- if (ret_parent != NULL)
- *ret_parent = parent;
-
- return ret;
-}
-
-struct r2nm_node *r2nm_get_node_by_ip(__be32 addr)
-{
- struct r2nm_node *node = NULL;
- struct r2nm_cluster *cluster = r2nm_single_cluster;
-
- if (cluster == NULL)
- goto out;
-
- read_lock(&cluster->cl_nodes_lock);
- node = r2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
- if (node)
- config_item_get(&node->nd_item);
- read_unlock(&cluster->cl_nodes_lock);
-
-out:
- return node;
-}
-EXPORT_SYMBOL_GPL(r2nm_get_node_by_ip);
-
-void r2nm_node_put(struct r2nm_node *node)
-{
- config_item_put(&node->nd_item);
-}
-EXPORT_SYMBOL_GPL(r2nm_node_put);
-
-void r2nm_node_get(struct r2nm_node *node)
-{
- config_item_get(&node->nd_item);
-}
-EXPORT_SYMBOL_GPL(r2nm_node_get);
-
-u8 r2nm_this_node(void)
-{
- u8 node_num = R2NM_MAX_NODES;
-
- if (r2nm_single_cluster && r2nm_single_cluster->cl_has_local)
- node_num = r2nm_single_cluster->cl_local_node;
-
- return node_num;
-}
-EXPORT_SYMBOL_GPL(r2nm_this_node);
-
-/* node configfs bits */
-
-static struct r2nm_cluster *to_r2nm_cluster(struct config_item *item)
-{
- return item ?
- container_of(to_config_group(item), struct r2nm_cluster,
- cl_group)
- : NULL;
-}
-
-static struct r2nm_node *to_r2nm_node(struct config_item *item)
-{
- return item ? container_of(item, struct r2nm_node, nd_item) : NULL;
-}
-
-static void r2nm_node_release(struct config_item *item)
-{
- struct r2nm_node *node = to_r2nm_node(item);
- kfree(node);
-}
-
-static ssize_t r2nm_node_num_read(struct r2nm_node *node, char *page)
-{
- return sprintf(page, "%d\n", node->nd_num);
-}
-
-static struct r2nm_cluster *to_r2nm_cluster_from_node(struct r2nm_node *node)
-{
- /* through the first node_set .parent
- * mycluster/nodes/mynode == r2nm_cluster->r2nm_node_group->r2nm_node */
- return to_r2nm_cluster(node->nd_item.ci_parent->ci_parent);
-}
-
-enum {
- R2NM_NODE_ATTR_NUM = 0,
- R2NM_NODE_ATTR_PORT,
- R2NM_NODE_ATTR_ADDRESS,
- R2NM_NODE_ATTR_LOCAL,
-};
-
-static ssize_t r2nm_node_num_write(struct r2nm_node *node, const char *page,
- size_t count)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
- unsigned long tmp;
- char *p = (char *)page;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- if (tmp >= R2NM_MAX_NODES)
- return -ERANGE;
-
- /* once we're in the cl_nodes tree networking can look us up by
- * node number and try to use our address and port attributes
- * to connect to this node.. make sure that they've been set
- * before writing the node attribute? */
- if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
- !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
- return -EINVAL; /* XXX */
-
- write_lock(&cluster->cl_nodes_lock);
- if (cluster->cl_nodes[tmp])
- p = NULL;
- else {
- cluster->cl_nodes[tmp] = node;
- node->nd_num = tmp;
- set_bit(tmp, cluster->cl_nodes_bitmap);
- }
- write_unlock(&cluster->cl_nodes_lock);
- if (p == NULL)
- return -EEXIST;
-
- return count;
-}
-static ssize_t r2nm_node_ipv4_port_read(struct r2nm_node *node, char *page)
-{
- return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
-}
-
-static ssize_t r2nm_node_ipv4_port_write(struct r2nm_node *node,
- const char *page, size_t count)
-{
- unsigned long tmp;
- char *p = (char *)page;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- if (tmp == 0)
- return -EINVAL;
- if (tmp >= (u16)-1)
- return -ERANGE;
-
- node->nd_ipv4_port = htons(tmp);
-
- return count;
-}
-
-static ssize_t r2nm_node_ipv4_address_read(struct r2nm_node *node, char *page)
-{
- return sprintf(page, "%pI4\n", &node->nd_ipv4_address);
-}
-
-static ssize_t r2nm_node_ipv4_address_write(struct r2nm_node *node,
- const char *page,
- size_t count)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
- int ret, i;
- struct rb_node **p, *parent;
- unsigned int octets[4];
- __be32 ipv4_addr = 0;
-
- ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
- &octets[1], &octets[0]);
- if (ret != 4)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(octets); i++) {
- if (octets[i] > 255)
- return -ERANGE;
- be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
- }
-
- ret = 0;
- write_lock(&cluster->cl_nodes_lock);
- if (r2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
- ret = -EEXIST;
- else {
- rb_link_node(&node->nd_ip_node, parent, p);
- rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
- }
- write_unlock(&cluster->cl_nodes_lock);
- if (ret)
- return ret;
-
- memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
-
- return count;
-}
-
-static ssize_t r2nm_node_local_read(struct r2nm_node *node, char *page)
-{
- return sprintf(page, "%d\n", node->nd_local);
-}
-
-static ssize_t r2nm_node_local_write(struct r2nm_node *node, const char *page,
- size_t count)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
- unsigned long tmp;
- char *p = (char *)page;
- ssize_t ret;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- tmp = !!tmp; /* boolean of whether this node wants to be local */
-
- /* setting local turns on networking rx for now so we require having
- * set everything else first */
- if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
- !test_bit(R2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
- !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
- return -EINVAL; /* XXX */
-
- /* the only failure case is trying to set a new local node
- * when a different one is already set */
- if (tmp && tmp == cluster->cl_has_local &&
- cluster->cl_local_node != node->nd_num)
- return -EBUSY;
-
- /* bring up the rx thread if we're setting the new local node. */
- if (tmp && !cluster->cl_has_local) {
- ret = r2net_start_listening(node);
- if (ret)
- return ret;
- }
-
- if (!tmp && cluster->cl_has_local &&
- cluster->cl_local_node == node->nd_num) {
- r2net_stop_listening(node);
- cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
- }
-
- node->nd_local = tmp;
- if (node->nd_local) {
- cluster->cl_has_local = tmp;
- cluster->cl_local_node = node->nd_num;
- }
-
- return count;
-}
-
-struct r2nm_node_attribute {
- struct configfs_attribute attr;
- ssize_t (*show)(struct r2nm_node *, char *);
- ssize_t (*store)(struct r2nm_node *, const char *, size_t);
-};
-
-static struct r2nm_node_attribute r2nm_node_attr_num = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "num",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_node_num_read,
- .store = r2nm_node_num_write,
-};
-
-static struct r2nm_node_attribute r2nm_node_attr_ipv4_port = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "ipv4_port",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_node_ipv4_port_read,
- .store = r2nm_node_ipv4_port_write,
-};
-
-static struct r2nm_node_attribute r2nm_node_attr_ipv4_address = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "ipv4_address",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_node_ipv4_address_read,
- .store = r2nm_node_ipv4_address_write,
-};
-
-static struct r2nm_node_attribute r2nm_node_attr_local = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "local",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_node_local_read,
- .store = r2nm_node_local_write,
-};
-
-static struct configfs_attribute *r2nm_node_attrs[] = {
- [R2NM_NODE_ATTR_NUM] = &r2nm_node_attr_num.attr,
- [R2NM_NODE_ATTR_PORT] = &r2nm_node_attr_ipv4_port.attr,
- [R2NM_NODE_ATTR_ADDRESS] = &r2nm_node_attr_ipv4_address.attr,
- [R2NM_NODE_ATTR_LOCAL] = &r2nm_node_attr_local.attr,
- NULL,
-};
-
-static int r2nm_attr_index(struct configfs_attribute *attr)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(r2nm_node_attrs); i++) {
- if (attr == r2nm_node_attrs[i])
- return i;
- }
- BUG();
- return 0;
-}
-
-static ssize_t r2nm_node_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
-{
- struct r2nm_node *node = to_r2nm_node(item);
- struct r2nm_node_attribute *r2nm_node_attr =
- container_of(attr, struct r2nm_node_attribute, attr);
- ssize_t ret = 0;
-
- if (r2nm_node_attr->show)
- ret = r2nm_node_attr->show(node, page);
- return ret;
-}
-
-static ssize_t r2nm_node_store(struct config_item *item,
- struct configfs_attribute *attr,
- const char *page, size_t count)
-{
- struct r2nm_node *node = to_r2nm_node(item);
- struct r2nm_node_attribute *r2nm_node_attr =
- container_of(attr, struct r2nm_node_attribute, attr);
- ssize_t ret;
- int attr_index = r2nm_attr_index(attr);
-
- if (r2nm_node_attr->store == NULL) {
- ret = -EINVAL;
- goto out;
- }
-
- if (test_bit(attr_index, &node->nd_set_attributes))
- return -EBUSY;
-
- ret = r2nm_node_attr->store(node, page, count);
- if (ret < count)
- goto out;
-
- set_bit(attr_index, &node->nd_set_attributes);
-out:
- return ret;
-}
-
-static struct configfs_item_operations r2nm_node_item_ops = {
- .release = r2nm_node_release,
- .show_attribute = r2nm_node_show,
- .store_attribute = r2nm_node_store,
-};
-
-static struct config_item_type r2nm_node_type = {
- .ct_item_ops = &r2nm_node_item_ops,
- .ct_attrs = r2nm_node_attrs,
- .ct_owner = THIS_MODULE,
-};
-
-/* node set */
-
-struct r2nm_node_group {
- struct config_group ns_group;
- /* some stuff? */
-};
-
-#if 0
-static struct r2nm_node_group *to_r2nm_node_group(struct config_group *group)
-{
- return group ?
- container_of(group, struct r2nm_node_group, ns_group)
- : NULL;
-}
-#endif
-
-struct r2nm_cluster_attribute {
- struct configfs_attribute attr;
- ssize_t (*show)(struct r2nm_cluster *, char *);
- ssize_t (*store)(struct r2nm_cluster *, const char *, size_t);
-};
-
-static ssize_t r2nm_cluster_attr_write(const char *page, ssize_t count,
- unsigned int *val)
-{
- unsigned long tmp;
- char *p = (char *)page;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- if (tmp == 0)
- return -EINVAL;
- if (tmp >= (u32)-1)
- return -ERANGE;
-
- *val = tmp;
-
- return count;
-}
-
-static ssize_t r2nm_cluster_attr_idle_timeout_ms_read(
- struct r2nm_cluster *cluster, char *page)
-{
- return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
-}
-
-static ssize_t r2nm_cluster_attr_idle_timeout_ms_write(
- struct r2nm_cluster *cluster, const char *page, size_t count)
-{
- ssize_t ret;
- unsigned int val = 0;
-
- ret = r2nm_cluster_attr_write(page, count, &val);
-
- if (ret > 0) {
- if (cluster->cl_idle_timeout_ms != val
- && r2net_num_connected_peers()) {
- mlog(ML_NOTICE,
- "r2net: cannot change idle timeout after "
- "the first peer has agreed to it."
- " %d connected peers\n",
- r2net_num_connected_peers());
- ret = -EINVAL;
- } else if (val <= cluster->cl_keepalive_delay_ms) {
- mlog(ML_NOTICE,
- "r2net: idle timeout must be larger "
- "than keepalive delay\n");
- ret = -EINVAL;
- } else {
- cluster->cl_idle_timeout_ms = val;
- }
- }
-
- return ret;
-}
-
-static ssize_t r2nm_cluster_attr_keepalive_delay_ms_read(
- struct r2nm_cluster *cluster, char *page)
-{
- return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
-}
-
-static ssize_t r2nm_cluster_attr_keepalive_delay_ms_write(
- struct r2nm_cluster *cluster, const char *page, size_t count)
-{
- ssize_t ret;
- unsigned int val = 0;
-
- ret = r2nm_cluster_attr_write(page, count, &val);
-
- if (ret > 0) {
- if (cluster->cl_keepalive_delay_ms != val
- && r2net_num_connected_peers()) {
- mlog(ML_NOTICE,
- "r2net: cannot change keepalive delay after"
- " the first peer has agreed to it."
- " %d connected peers\n",
- r2net_num_connected_peers());
- ret = -EINVAL;
- } else if (val >= cluster->cl_idle_timeout_ms) {
- mlog(ML_NOTICE,
- "r2net: keepalive delay must be "
- "smaller than idle timeout\n");
- ret = -EINVAL;
- } else {
- cluster->cl_keepalive_delay_ms = val;
- }
- }
-
- return ret;
-}
-
-static ssize_t r2nm_cluster_attr_reconnect_delay_ms_read(
- struct r2nm_cluster *cluster, char *page)
-{
- return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
-}
-
-static ssize_t r2nm_cluster_attr_reconnect_delay_ms_write(
- struct r2nm_cluster *cluster, const char *page, size_t count)
-{
- return r2nm_cluster_attr_write(page, count,
- &cluster->cl_reconnect_delay_ms);
-}
-
-static ssize_t r2nm_cluster_attr_fence_method_read(
- struct r2nm_cluster *cluster, char *page)
-{
- ssize_t ret = 0;
-
- if (cluster)
- ret = sprintf(page, "%s\n",
- r2nm_fence_method_desc[cluster->cl_fence_method]);
- return ret;
-}
-
-static ssize_t r2nm_cluster_attr_fence_method_write(
- struct r2nm_cluster *cluster, const char *page, size_t count)
-{
- unsigned int i;
-
- if (page[count - 1] != '\n')
- goto bail;
-
- for (i = 0; i < R2NM_FENCE_METHODS; ++i) {
- if (count != strlen(r2nm_fence_method_desc[i]) + 1)
- continue;
- if (strncasecmp(page, r2nm_fence_method_desc[i], count - 1))
- continue;
- if (cluster->cl_fence_method != i) {
- pr_info("ramster: Changing fence method to %s\n",
- r2nm_fence_method_desc[i]);
- cluster->cl_fence_method = i;
- }
- return count;
- }
-
-bail:
- return -EINVAL;
-}
-
-static struct r2nm_cluster_attribute r2nm_cluster_attr_idle_timeout_ms = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "idle_timeout_ms",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_cluster_attr_idle_timeout_ms_read,
- .store = r2nm_cluster_attr_idle_timeout_ms_write,
-};
-
-static struct r2nm_cluster_attribute r2nm_cluster_attr_keepalive_delay_ms = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "keepalive_delay_ms",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_cluster_attr_keepalive_delay_ms_read,
- .store = r2nm_cluster_attr_keepalive_delay_ms_write,
-};
-
-static struct r2nm_cluster_attribute r2nm_cluster_attr_reconnect_delay_ms = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "reconnect_delay_ms",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_cluster_attr_reconnect_delay_ms_read,
- .store = r2nm_cluster_attr_reconnect_delay_ms_write,
-};
-
-static struct r2nm_cluster_attribute r2nm_cluster_attr_fence_method = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "fence_method",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_cluster_attr_fence_method_read,
- .store = r2nm_cluster_attr_fence_method_write,
-};
-
-static struct configfs_attribute *r2nm_cluster_attrs[] = {
- &r2nm_cluster_attr_idle_timeout_ms.attr,
- &r2nm_cluster_attr_keepalive_delay_ms.attr,
- &r2nm_cluster_attr_reconnect_delay_ms.attr,
- &r2nm_cluster_attr_fence_method.attr,
- NULL,
-};
-static ssize_t r2nm_cluster_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster(item);
- struct r2nm_cluster_attribute *r2nm_cluster_attr =
- container_of(attr, struct r2nm_cluster_attribute, attr);
- ssize_t ret = 0;
-
- if (r2nm_cluster_attr->show)
- ret = r2nm_cluster_attr->show(cluster, page);
- return ret;
-}
-
-static ssize_t r2nm_cluster_store(struct config_item *item,
- struct configfs_attribute *attr,
- const char *page, size_t count)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster(item);
- struct r2nm_cluster_attribute *r2nm_cluster_attr =
- container_of(attr, struct r2nm_cluster_attribute, attr);
- ssize_t ret;
-
- if (r2nm_cluster_attr->store == NULL) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = r2nm_cluster_attr->store(cluster, page, count);
- if (ret < count)
- goto out;
-out:
- return ret;
-}
-
-static struct config_item *r2nm_node_group_make_item(struct config_group *group,
- const char *name)
-{
- struct r2nm_node *node = NULL;
-
- if (strlen(name) > R2NM_MAX_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
-
- node = kzalloc(sizeof(struct r2nm_node), GFP_KERNEL);
- if (node == NULL)
- return ERR_PTR(-ENOMEM);
-
- strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
- config_item_init_type_name(&node->nd_item, name, &r2nm_node_type);
- spin_lock_init(&node->nd_lock);
-
- mlog(ML_CLUSTER, "r2nm: Registering node %s\n", name);
-
- return &node->nd_item;
-}
-
-static void r2nm_node_group_drop_item(struct config_group *group,
- struct config_item *item)
-{
- struct r2nm_node *node = to_r2nm_node(item);
- struct r2nm_cluster *cluster =
- to_r2nm_cluster(group->cg_item.ci_parent);
-
- r2net_disconnect_node(node);
-
- if (cluster->cl_has_local &&
- (cluster->cl_local_node == node->nd_num)) {
- cluster->cl_has_local = 0;
- cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
- r2net_stop_listening(node);
- }
-
- /* XXX call into net to stop this node from trading messages */
-
- write_lock(&cluster->cl_nodes_lock);
-
- /* XXX sloppy */
- if (node->nd_ipv4_address)
- rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
-
- /* nd_num might be 0 if the node number hasn't been set.. */
- if (cluster->cl_nodes[node->nd_num] == node) {
- cluster->cl_nodes[node->nd_num] = NULL;
- clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
- }
- write_unlock(&cluster->cl_nodes_lock);
-
- mlog(ML_CLUSTER, "r2nm: Unregistered node %s\n",
- config_item_name(&node->nd_item));
-
- config_item_put(item);
-}
-
-static struct configfs_group_operations r2nm_node_group_group_ops = {
- .make_item = r2nm_node_group_make_item,
- .drop_item = r2nm_node_group_drop_item,
-};
-
-static struct config_item_type r2nm_node_group_type = {
- .ct_group_ops = &r2nm_node_group_group_ops,
- .ct_owner = THIS_MODULE,
-};
-
-/* cluster */
-
-static void r2nm_cluster_release(struct config_item *item)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster(item);
-
- kfree(cluster->cl_group.default_groups);
- kfree(cluster);
-}
-
-static struct configfs_item_operations r2nm_cluster_item_ops = {
- .release = r2nm_cluster_release,
- .show_attribute = r2nm_cluster_show,
- .store_attribute = r2nm_cluster_store,
-};
-
-static struct config_item_type r2nm_cluster_type = {
- .ct_item_ops = &r2nm_cluster_item_ops,
- .ct_attrs = r2nm_cluster_attrs,
- .ct_owner = THIS_MODULE,
-};
-
-/* cluster set */
-
-struct r2nm_cluster_group {
- struct configfs_subsystem cs_subsys;
- /* some stuff? */
-};
-
-#if 0
-static struct r2nm_cluster_group *
-to_r2nm_cluster_group(struct config_group *group)
-{
- return group ?
- container_of(to_configfs_subsystem(group),
- struct r2nm_cluster_group, cs_subsys)
- : NULL;
-}
-#endif
-
-static struct config_group *
-r2nm_cluster_group_make_group(struct config_group *group,
- const char *name)
-{
- struct r2nm_cluster *cluster = NULL;
- struct r2nm_node_group *ns = NULL;
- struct config_group *r2hb_group = NULL, *ret = NULL;
- void *defs = NULL;
-
- /* this runs under the parent dir's i_mutex; there can be only
- * one caller in here at a time */
- if (r2nm_single_cluster)
- return ERR_PTR(-ENOSPC);
-
- cluster = kzalloc(sizeof(struct r2nm_cluster), GFP_KERNEL);
- ns = kzalloc(sizeof(struct r2nm_node_group), GFP_KERNEL);
- defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
- r2hb_group = r2hb_alloc_hb_set();
- if (cluster == NULL || ns == NULL || r2hb_group == NULL || defs == NULL)
- goto out;
-
- config_group_init_type_name(&cluster->cl_group, name,
- &r2nm_cluster_type);
- config_group_init_type_name(&ns->ns_group, "node",
- &r2nm_node_group_type);
-
- cluster->cl_group.default_groups = defs;
- cluster->cl_group.default_groups[0] = &ns->ns_group;
- cluster->cl_group.default_groups[1] = r2hb_group;
- cluster->cl_group.default_groups[2] = NULL;
- rwlock_init(&cluster->cl_nodes_lock);
- cluster->cl_node_ip_tree = RB_ROOT;
- cluster->cl_reconnect_delay_ms = R2NET_RECONNECT_DELAY_MS_DEFAULT;
- cluster->cl_idle_timeout_ms = R2NET_IDLE_TIMEOUT_MS_DEFAULT;
- cluster->cl_keepalive_delay_ms = R2NET_KEEPALIVE_DELAY_MS_DEFAULT;
- cluster->cl_fence_method = R2NM_FENCE_RESET;
-
- ret = &cluster->cl_group;
- r2nm_single_cluster = cluster;
-
-out:
- if (ret == NULL) {
- kfree(cluster);
- kfree(ns);
- r2hb_free_hb_set(r2hb_group);
- kfree(defs);
- ret = ERR_PTR(-ENOMEM);
- }
-
- return ret;
-}
-
-static void r2nm_cluster_group_drop_item(struct config_group *group,
- struct config_item *item)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster(item);
- int i;
- struct config_item *killme;
-
- BUG_ON(r2nm_single_cluster != cluster);
- r2nm_single_cluster = NULL;
-
- for (i = 0; cluster->cl_group.default_groups[i]; i++) {
- killme = &cluster->cl_group.default_groups[i]->cg_item;
- cluster->cl_group.default_groups[i] = NULL;
- config_item_put(killme);
- }
-
- config_item_put(item);
-}
-
-static struct configfs_group_operations r2nm_cluster_group_group_ops = {
- .make_group = r2nm_cluster_group_make_group,
- .drop_item = r2nm_cluster_group_drop_item,
-};
-
-static struct config_item_type r2nm_cluster_group_type = {
- .ct_group_ops = &r2nm_cluster_group_group_ops,
- .ct_owner = THIS_MODULE,
-};
-
-static struct r2nm_cluster_group r2nm_cluster_group = {
- .cs_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "cluster",
- .ci_type = &r2nm_cluster_group_type,
- },
- },
- },
-};
-
-int r2nm_depend_item(struct config_item *item)
-{
- return configfs_depend_item(&r2nm_cluster_group.cs_subsys, item);
-}
-
-void r2nm_undepend_item(struct config_item *item)
-{
- configfs_undepend_item(&r2nm_cluster_group.cs_subsys, item);
-}
-
-int r2nm_depend_this_node(void)
-{
- int ret = 0;
- struct r2nm_node *local_node;
-
- local_node = r2nm_get_node_by_num(r2nm_this_node());
- if (!local_node) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = r2nm_depend_item(&local_node->nd_item);
- r2nm_node_put(local_node);
-
-out:
- return ret;
-}
-
-void r2nm_undepend_this_node(void)
-{
- struct r2nm_node *local_node;
-
- local_node = r2nm_get_node_by_num(r2nm_this_node());
- BUG_ON(!local_node);
-
- r2nm_undepend_item(&local_node->nd_item);
- r2nm_node_put(local_node);
-}
-
-
-static void __exit exit_r2nm(void)
-{
- /* XXX sync with hb callbacks and shut down hb? */
- r2net_unregister_hb_callbacks();
- configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
-
- r2net_exit();
- r2hb_exit();
-}
-
-int r2nm_init(void)
-{
- int ret = -1;
-
- ret = r2hb_init();
- if (ret)
- goto out;
-
- ret = r2net_init();
- if (ret)
- goto out_r2hb;
-
- ret = r2net_register_hb_callbacks();
- if (ret)
- goto out_r2net;
-
- config_group_init(&r2nm_cluster_group.cs_subsys.su_group);
- mutex_init(&r2nm_cluster_group.cs_subsys.su_mutex);
- ret = configfs_register_subsystem(&r2nm_cluster_group.cs_subsys);
- if (ret) {
- pr_err("nodemanager: Registration returned %d\n", ret);
- goto out_callbacks;
- }
-
- if (!ret)
- goto out;
-
- configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
-out_callbacks:
- r2net_unregister_hb_callbacks();
-out_r2net:
- r2net_exit();
-out_r2hb:
- r2hb_exit();
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(r2nm_init);
-
-MODULE_AUTHOR("Oracle");
-MODULE_LICENSE("GPL");
-
-#ifndef CONFIG_RAMSTER_MODULE
-late_initcall(r2nm_init);
-#endif
diff --git a/drivers/staging/zcache/ramster/nodemanager.h b/drivers/staging/zcache/ramster/nodemanager.h
deleted file mode 100644
index 41a04df..0000000
--- a/drivers/staging/zcache/ramster/nodemanager.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * nodemanager.h
- *
- * Function prototypes
- *
- * Copyright (C) 2004 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- */
-
-#ifndef R2CLUSTER_NODEMANAGER_H
-#define R2CLUSTER_NODEMANAGER_H
-
-#include "ramster_nodemanager.h"
-
-/* This totally doesn't belong here. */
-#include <linux/configfs.h>
-#include <linux/rbtree.h>
-
-enum r2nm_fence_method {
- R2NM_FENCE_RESET = 0,
- R2NM_FENCE_PANIC,
- R2NM_FENCE_METHODS, /* Number of fence methods */
-};
-
-struct r2nm_node {
- spinlock_t nd_lock;
- struct config_item nd_item;
- char nd_name[R2NM_MAX_NAME_LEN+1]; /* replace? */
- __u8 nd_num;
- /* only one address per node, as attributes, for now. */
- __be32 nd_ipv4_address;
- __be16 nd_ipv4_port;
- struct rb_node nd_ip_node;
- /* there can be only one local node for now */
- int nd_local;
-
- unsigned long nd_set_attributes;
-};
-
-struct r2nm_cluster {
- struct config_group cl_group;
- unsigned cl_has_local:1;
- u8 cl_local_node;
- rwlock_t cl_nodes_lock;
- struct r2nm_node *cl_nodes[R2NM_MAX_NODES];
- struct rb_root cl_node_ip_tree;
- unsigned int cl_idle_timeout_ms;
- unsigned int cl_keepalive_delay_ms;
- unsigned int cl_reconnect_delay_ms;
- enum r2nm_fence_method cl_fence_method;
-
- /* part of a hack for disk bitmap.. will go eventually. - zab */
- unsigned long cl_nodes_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
-};
-
-extern struct r2nm_cluster *r2nm_single_cluster;
-
-u8 r2nm_this_node(void);
-
-int r2nm_configured_node_map(unsigned long *map, unsigned bytes);
-struct r2nm_node *r2nm_get_node_by_num(u8 node_num);
-struct r2nm_node *r2nm_get_node_by_ip(__be32 addr);
-void r2nm_node_get(struct r2nm_node *node);
-void r2nm_node_put(struct r2nm_node *node);
-
-int r2nm_depend_item(struct config_item *item);
-void r2nm_undepend_item(struct config_item *item);
-int r2nm_depend_this_node(void);
-void r2nm_undepend_this_node(void);
-
-#endif /* R2CLUSTER_NODEMANAGER_H */
diff --git a/drivers/staging/zcache/ramster/r2net.c b/drivers/staging/zcache/ramster/r2net.c
deleted file mode 100644
index 34818dc..0000000
--- a/drivers/staging/zcache/ramster/r2net.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * r2net.c
- *
- * Copyright (c) 2011-2012, Dan Magenheimer, Oracle Corp.
- *
- * Ramster_r2net provides an interface between zcache and r2net.
- *
- * FIXME: support more than two nodes
- */
-
-#include <linux/list.h>
-#include "tcp.h"
-#include "nodemanager.h"
-#include "../tmem.h"
-#include "../zcache.h"
-#include "ramster.h"
-
-#define RAMSTER_TESTING
-
-#define RMSTR_KEY 0x77347734
-
-enum {
- RMSTR_TMEM_PUT_EPH = 100,
- RMSTR_TMEM_PUT_PERS,
- RMSTR_TMEM_ASYNC_GET_REQUEST,
- RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
- RMSTR_TMEM_ASYNC_GET_REPLY,
- RMSTR_TMEM_FLUSH,
- RMSTR_TMEM_FLOBJ,
- RMSTR_TMEM_DESTROY_POOL,
-};
-
-#define RMSTR_R2NET_MAX_LEN \
- (R2NET_MAX_PAYLOAD_BYTES - sizeof(struct tmem_xhandle))
-
-#include "tcp_internal.h"
-
-static struct r2nm_node *r2net_target_node;
-static int r2net_target_nodenum;
-
-int r2net_remote_target_node_set(int node_num)
-{
- int ret = -1;
-
- r2net_target_node = r2nm_get_node_by_num(node_num);
- if (r2net_target_node != NULL) {
- r2net_target_nodenum = node_num;
- r2nm_node_put(r2net_target_node);
- ret = 0;
- }
- return ret;
-}
-
-/* FIXME following buffer should be per-cpu, protected by preempt_disable */
-static char ramster_async_get_buf[R2NET_MAX_PAYLOAD_BYTES];
-
-static int ramster_remote_async_get_request_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- char *pdata;
- struct tmem_xhandle xh;
- int found;
- size_t size = RMSTR_R2NET_MAX_LEN;
- u16 msgtype = be16_to_cpu(msg->msg_type);
- bool get_and_free = (msgtype == RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST);
- unsigned long flags;
-
- xh = *(struct tmem_xhandle *)msg->buf;
- if (xh.xh_data_size > RMSTR_R2NET_MAX_LEN)
- BUG();
- pdata = ramster_async_get_buf;
- *(struct tmem_xhandle *)pdata = xh;
- pdata += sizeof(struct tmem_xhandle);
- local_irq_save(flags);
- found = zcache_get_page(xh.client_id, xh.pool_id, &xh.oid, xh.index,
- pdata, &size, true, get_and_free ? 1 : -1);
- local_irq_restore(flags);
- if (found < 0) {
- /* a zero size indicates the get failed */
- size = 0;
- }
- if (size > RMSTR_R2NET_MAX_LEN)
- BUG();
- *ret_data = pdata - sizeof(struct tmem_xhandle);
- /* now make caller (r2net_process_message) handle specially */
- r2net_force_data_magic(msg, RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY);
- return size + sizeof(struct tmem_xhandle);
-}
-
-static int ramster_remote_async_get_reply_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- char *in = (char *)msg->buf;
- int datalen = len - sizeof(struct r2net_msg);
- int ret = -1;
- struct tmem_xhandle *xh = (struct tmem_xhandle *)in;
-
- in += sizeof(struct tmem_xhandle);
- datalen -= sizeof(struct tmem_xhandle);
- BUG_ON(datalen < 0 || datalen > PAGE_SIZE);
- ret = ramster_localify(xh->pool_id, &xh->oid, xh->index,
- in, datalen, xh->extra);
-#ifdef RAMSTER_TESTING
- if (ret == -EEXIST)
- pr_err("TESTING ArrgREP, aborted overwrite on racy put\n");
-#endif
- return ret;
-}
-
-int ramster_remote_put_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- struct tmem_xhandle *xh;
- char *p = (char *)msg->buf;
- int datalen = len - sizeof(struct r2net_msg) -
- sizeof(struct tmem_xhandle);
- u16 msgtype = be16_to_cpu(msg->msg_type);
- bool ephemeral = (msgtype == RMSTR_TMEM_PUT_EPH);
- unsigned long flags;
- int ret;
-
- xh = (struct tmem_xhandle *)p;
- p += sizeof(struct tmem_xhandle);
- zcache_autocreate_pool(xh->client_id, xh->pool_id, ephemeral);
- local_irq_save(flags);
- ret = zcache_put_page(xh->client_id, xh->pool_id, &xh->oid, xh->index,
- p, datalen, true, ephemeral);
- local_irq_restore(flags);
- return ret;
-}
-
-int ramster_remote_flush_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- struct tmem_xhandle *xh;
- char *p = (char *)msg->buf;
-
- xh = (struct tmem_xhandle *)p;
- p += sizeof(struct tmem_xhandle);
- (void)zcache_flush_page(xh->client_id, xh->pool_id,
- &xh->oid, xh->index);
- return 0;
-}
-
-int ramster_remote_flobj_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- struct tmem_xhandle *xh;
- char *p = (char *)msg->buf;
-
- xh = (struct tmem_xhandle *)p;
- p += sizeof(struct tmem_xhandle);
- (void)zcache_flush_object(xh->client_id, xh->pool_id, &xh->oid);
- return 0;
-}
-
-int r2net_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
- size_t expect_size, uint8_t expect_cksum,
- void *extra)
-{
- int nodenum, ret = -1, status;
- struct r2nm_node *node = NULL;
- struct kvec vec[1];
- size_t veclen = 1;
- u32 msg_type;
- struct r2net_node *nn;
-
- node = r2nm_get_node_by_num(remotenode);
- if (node == NULL)
- goto out;
- xh->client_id = r2nm_this_node(); /* which node is getting */
- xh->xh_data_cksum = expect_cksum;
- xh->xh_data_size = expect_size;
- xh->extra = extra;
- vec[0].iov_len = sizeof(*xh);
- vec[0].iov_base = xh;
-
- node = r2net_target_node;
- if (!node)
- goto out;
-
- nodenum = r2net_target_nodenum;
-
- r2nm_node_get(node);
- nn = r2net_nn_from_num(nodenum);
- if (nn->nn_persistent_error || !nn->nn_sc_valid) {
- ret = -ENOTCONN;
- r2nm_node_put(node);
- goto out;
- }
-
- if (free)
- msg_type = RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST;
- else
- msg_type = RMSTR_TMEM_ASYNC_GET_REQUEST;
- ret = r2net_send_message_vec(msg_type, RMSTR_KEY,
- vec, veclen, remotenode, &status);
- r2nm_node_put(node);
- if (ret < 0) {
- if (ret == -ENOTCONN || ret == -EHOSTDOWN)
- goto out;
- if (ret == -EAGAIN)
- goto out;
- /* FIXME handle bad message possibilities here? */
- pr_err("UNTESTED ret<0 in ramster_remote_async_get: ret=%d\n",
- ret);
- }
- ret = status;
-out:
- return ret;
-}
-
-#ifdef RAMSTER_TESTING
-/* leave me here to see if it catches a weird crash */
-static void ramster_check_irq_counts(void)
-{
- static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt;
- int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt;
-
- cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT;
- if (cur_hardirq_cnt > last_hardirq_cnt) {
- last_hardirq_cnt = cur_hardirq_cnt;
- if (!(last_hardirq_cnt&(last_hardirq_cnt-1)))
- pr_err("RAMSTER TESTING RRP hardirq_count=%d\n",
- last_hardirq_cnt);
- }
- cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT;
- if (cur_softirq_cnt > last_softirq_cnt) {
- last_softirq_cnt = cur_softirq_cnt;
- if (!(last_softirq_cnt&(last_softirq_cnt-1)))
- pr_err("RAMSTER TESTING RRP softirq_count=%d\n",
- last_softirq_cnt);
- }
- cur_preempt_cnt = preempt_count() & PREEMPT_MASK;
- if (cur_preempt_cnt > last_preempt_cnt) {
- last_preempt_cnt = cur_preempt_cnt;
- if (!(last_preempt_cnt&(last_preempt_cnt-1)))
- pr_err("RAMSTER TESTING RRP preempt_count=%d\n",
- last_preempt_cnt);
- }
-}
-#endif
-
-int r2net_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
- bool ephemeral, int *remotenode)
-{
- int nodenum, ret = -1, status;
- struct r2nm_node *node = NULL;
- struct kvec vec[2];
- size_t veclen = 2;
- u32 msg_type;
- struct r2net_node *nn;
-
- BUG_ON(size > RMSTR_R2NET_MAX_LEN);
- xh->client_id = r2nm_this_node(); /* which node is putting */
- vec[0].iov_len = sizeof(*xh);
- vec[0].iov_base = xh;
- vec[1].iov_len = size;
- vec[1].iov_base = data;
-
- node = r2net_target_node;
- if (!node)
- goto out;
-
- nodenum = r2net_target_nodenum;
-
- r2nm_node_get(node);
-
- nn = r2net_nn_from_num(nodenum);
- if (nn->nn_persistent_error || !nn->nn_sc_valid) {
- ret = -ENOTCONN;
- r2nm_node_put(node);
- goto out;
- }
-
- if (ephemeral)
- msg_type = RMSTR_TMEM_PUT_EPH;
- else
- msg_type = RMSTR_TMEM_PUT_PERS;
-#ifdef RAMSTER_TESTING
- /* leave me here to see if it catches a weird crash */
- ramster_check_irq_counts();
-#endif
-
- ret = r2net_send_message_vec(msg_type, RMSTR_KEY, vec, veclen,
- nodenum, &status);
- if (ret < 0)
- ret = -1;
- else {
- ret = status;
- *remotenode = nodenum;
- }
-
- r2nm_node_put(node);
-out:
- return ret;
-}
-
-int r2net_remote_flush(struct tmem_xhandle *xh, int remotenode)
-{
- int ret = -1, status;
- struct r2nm_node *node = NULL;
- struct kvec vec[1];
- size_t veclen = 1;
-
- node = r2nm_get_node_by_num(remotenode);
- BUG_ON(node == NULL);
- xh->client_id = r2nm_this_node(); /* which node is flushing */
- vec[0].iov_len = sizeof(*xh);
- vec[0].iov_base = xh;
- BUG_ON(irqs_disabled());
- BUG_ON(in_softirq());
- ret = r2net_send_message_vec(RMSTR_TMEM_FLUSH, RMSTR_KEY,
- vec, veclen, remotenode, &status);
- r2nm_node_put(node);
- return ret;
-}
-
-int r2net_remote_flush_object(struct tmem_xhandle *xh, int remotenode)
-{
- int ret = -1, status;
- struct r2nm_node *node = NULL;
- struct kvec vec[1];
- size_t veclen = 1;
-
- node = r2nm_get_node_by_num(remotenode);
- BUG_ON(node == NULL);
- xh->client_id = r2nm_this_node(); /* which node is flobjing */
- vec[0].iov_len = sizeof(*xh);
- vec[0].iov_base = xh;
- ret = r2net_send_message_vec(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
- vec, veclen, remotenode, &status);
- r2nm_node_put(node);
- return ret;
-}
-
-/*
- * Handler registration
- */
-
-static LIST_HEAD(r2net_unreg_list);
-
-static void r2net_unregister_handlers(void)
-{
- r2net_unregister_handler_list(&r2net_unreg_list);
-}
-
-int r2net_register_handlers(void)
-{
- int status;
-
- status = r2net_register_handler(RMSTR_TMEM_PUT_EPH, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_put_handler,
- NULL, NULL, &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_PUT_PERS, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_put_handler,
- NULL, NULL, &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REQUEST, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_async_get_request_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
- RMSTR_KEY, RMSTR_R2NET_MAX_LEN,
- ramster_remote_async_get_request_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_async_get_reply_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_FLUSH, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_flush_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_flobj_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- pr_info("ramster: r2net handlers registered\n");
-
-bail:
- if (status) {
- r2net_unregister_handlers();
- pr_err("ramster: couldn't register r2net handlers\n");
- }
- return status;
-}
diff --git a/drivers/staging/zcache/ramster/ramster-howto.txt b/drivers/staging/zcache/ramster/ramster-howto.txt
deleted file mode 100644
index 7b1ee3b..0000000
--- a/drivers/staging/zcache/ramster/ramster-howto.txt
+++ /dev/null
@@ -1,366 +0,0 @@
- RAMSTER HOW-TO
-
-Author: Dan Magenheimer
-Ramster maintainer: Konrad Wilk <konrad.wilk@oracle.com>
-
-This is a HOWTO document for ramster which, as of this writing, is in
-the kernel as a subdirectory of zcache in drivers/staging, called ramster.
-(Zcache can be built with or without ramster functionality.) If enabled
-and properly configured, ramster allows memory capacity load balancing
-across multiple machines in a cluster. Further, the ramster code serves
-as an example of asynchronous access for zcache (as well as cleancache and
-frontswap) that may prove useful for future transcendent memory
-implementations, such as KVM and NVRAM. While ramster works today on
-any network connection that supports kernel sockets, its features may
-become more interesting on future high-speed fabrics/interconnects.
-
-Ramster requires both kernel and userland support. The userland support,
-called ramster-tools, is known to work with EL6-based distros, but is a
-set of poorly-hacked slightly-modified cluster tools based on ocfs2, which
-includes an init file, a config file, and a userland binary that interfaces
-to the kernel. This state of userland support reflects the abysmal userland
-skills of this suitably-embarrassed author; any help/patches to turn
-ramster-tools into more distributable rpms/debs useful for a wider range
-of distros would be appreciated. The source RPM that can be used as a
-starting point is available at:
- http://oss.oracle.com/projects/tmem/files/RAMster/
-
-As a result of this author's ignorance, userland setup described in this
-HOWTO assumes an EL6 distro and is described in EL6 syntax. Apologies
-if this offends anyone!
-
-Kernel support has only been tested on x86_64. Systems with an active
-ocfs2 filesystem should work, but since ramster leverages a lot of
-code from ocfs2, there may be latent issues. A kernel configuration that
-includes CONFIG_OCFS2_FS should build OK, and should certainly run OK
-if no ocfs2 filesystem is mounted.
-
-This HOWTO demonstrates memory capacity load balancing for a two-node
-cluster, where one node called the "local" node becomes overcommitted
-and the other node called the "remote" node provides additional RAM
-capacity for use by the local node. Ramster is capable of more complex
-topologies; see the last section titled "ADVANCED RAMSTER TOPOLOGIES".
-
-If you find any terms in this HOWTO unfamiliar or don't understand the
-motivation for ramster, the following LWN reading is recommended:
--- Transcendent Memory in a Nutshell (lwn.net/Articles/454795)
--- The future calculus of memory management (lwn.net/Articles/475681)
-And since ramster is built on top of zcache, this article may be helpful:
--- In-kernel memory compression (lwn.net/Articles/545244)
-
-Now that you've memorized the contents of those articles, let's get started!
-
-A. PRELIMINARY
-
-1) Install two x86_64 Linux systems that are known to work when
- upgraded to a recent upstream Linux kernel version.
-
-On each system:
-
-2) Configure, build and install, then boot Linux, just to ensure it
- can be done with an unmodified upstream kernel. Confirm you booted
- the upstream kernel with "uname -a".
-
-3) If you plan to do any performance testing or unless you plan to
- test only swapping, the "WasActive" patch is also highly recommended.
- (Search lkml.org for WasActive, apply the patch, rebuild your kernel.)
- For a demo or simple testing, the patch can be ignored.
-
-4) Install ramster-tools as root. An x86_64 rpm for EL6-based systems
- can be found at:
- http://oss.oracle.com/projects/tmem/files/RAMster/
- (Sorry but for now, non-EL6 users must recreate ramster-tools on
- their own from source. See above.)
-
-5) Ensure that debugfs is mounted at each boot. Examples below assume it
- is mounted at /sys/kernel/debug.
-
-B. BUILDING RAMSTER INTO THE KERNEL
-
-Do the following on each system:
-
-1) Using the kernel configuration mechanism of your choice, change
- your config to include:
-
- CONFIG_CLEANCACHE=y
- CONFIG_FRONTSWAP=y
- CONFIG_STAGING=y
- CONFIG_CONFIGFS_FS=y # NOTE: MUST BE y, not m
- CONFIG_ZCACHE=y
- CONFIG_RAMSTER=y
-
- For a linux-3.10 or later kernel, you should also set:
-
- CONFIG_ZCACHE_DEBUG=y
- CONFIG_RAMSTER_DEBUG=y
-
- Before building the kernel please doublecheck your kernel config
- file to ensure all of the settings are correct.
-
-2) Build this kernel and change your boot file (e.g. /etc/grub.conf)
- so that the new kernel will boot.
-
-3) Add "zcache" and "ramster" as kernel boot parameters for the new kernel.
-
-4) Reboot each system approximately simultaneously.
-
-5) Check dmesg to ensure there are some messages from ramster, prefixed
- by "ramster:"
-
- # dmesg | grep ramster
-
- You should also see a lot of files in:
-
- # ls /sys/kernel/debug/zcache
- # ls /sys/kernel/debug/ramster
-
- These are mostly counters for various zcache and ramster activities.
- You should also see files in:
-
- # ls /sys/kernel/mm/ramster
-
- These are sysfs files that control ramster as we shall see.
-
- Ramster now will act as a single-system zcache on each system
- but doesn't yet know anything about the cluster so can't yet do
- anything remotely.
-
-C. CONFIGURING THE RAMSTER CLUSTER
-
-This part can be error prone unless you are familiar with clustering
-filesystems. We need to describe the cluster in a /etc/ramster.conf
-file and the init scripts that parse it are extremely picky about
-the syntax.
-
-1) Create a /etc/ramster.conf file and ensure it is identical on both
- systems. This file mimics the ocfs2 format and there is a good amount
- of documentation that can be searched for ocfs2.conf, but you can use:
-
- cluster:
- name = ramster
- node_count = 2
- node:
- name = system1
- cluster = ramster
- number = 0
- ip_address = my.ip.ad.r1
- ip_port = 7777
- node:
- name = system2
- cluster = ramster
- number = 1
- ip_address = my.ip.ad.r2
- ip_port = 7777
-
- You must ensure that the "name" field in the file exactly matches
- the output of "hostname" on each system; if "hostname" shows a
- fully-qualified hostname, ensure the name is fully qualified in
- /etc/ramster.conf. Obviously, substitute my.ip.ad.rx with proper
- ip addresses.
-
-2) Enable the ramster service and configure it. If you used the
- EL6 ramster-tools, this would be:
-
- # chkconfig --add ramster
- # service ramster configure
-
- Set "load on boot" to "y", cluster to start is "ramster" (or whatever
- name you chose in ramster.conf), heartbeat dead threshold as "500",
- network idle timeout as "1000000". Leave the others as default.
-
-3) Reboot both systems. After reboot, try (assuming EL6 ramster-tools):
-
- # service ramster status
-
- You should see "Checking RAMSTER cluster "ramster": Online". If you do
- not, something is wrong and ramster will not work. Note that you
- should also see that the driver for "configfs" is loaded and mounted,
- the driver for ocfs2_dlmfs is not loaded, and some numbers for network
- parameters. You will also see "Checking RAMSTER heartbeat: Not active".
- That's all OK.
-
-4) Now you need to start the cluster heartbeat; the cluster is not "up"
- until all nodes detect a heartbeat. In a real cluster, heartbeat detection
- is done via a cluster filesystem, but ramster doesn't require one. Some
- hack-y kernel code in ramster can start the heartbeat for you though if
- you tell it what nodes are "up". To enable the heartbeat, do:
-
- # echo 0 > /sys/kernel/mm/ramster/manual_node_up
- # echo 1 > /sys/kernel/mm/ramster/manual_node_up
-
- This must be done on BOTH nodes and, to avoid timeouts, must be done
- approximately concurrently on both nodes. On an EL6 system, it is
- convenient to put these lines in /etc/rc.local. To confirm that the
- cluster is now up, on both systems do:
-
- # dmesg | grep ramster
-
- You should see ramster "Accepted connection" messages in dmesg on both
- nodes after this. Note that if you check userland status again with
-
- # service ramster status
-
- you will still see "Checking RAMSTER heartbeat: Not active". That's
- still OK... the ramster kernel heartbeat hack doesn't communicate to
- userland.
-
-5) You now must tell each node the node to which it should "remotify" pages.
- On this two node cluster, we will assume the "local" node, node 0, has
- memory overcommitted and will use ramster to utilize RAM capacity on
- the "remote node", node 1. To configure this, on node 0, you do:
-
- # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
-
- You should see "ramster: node 1 set as remotification target" in dmesg
- on node 0. Again, on EL6, /etc/rc.local is a good place to put this
- on node 0 so you don't forget to do it at each boot.
-
-6) One more step: By default, the ramster code does not "remotify" any
- pages; this is primarily for testing purposes, but sometimes it is
- useful. This may change in the future, but for now, on node 0, you do:
-
- # echo 1 > /sys/kernel/mm/ramster/pers_remotify_enable
- # echo 1 > /sys/kernel/mm/ramster/eph_remotify_enable
-
- The first enables remotifying swap (persistent, aka frontswap) pages,
- the second enables remotifying of page cache (ephemeral, cleancache)
- pages.
-
- On EL6, these lines can also be put in /etc/rc.local (AFTER the
- node_up lines), or at the beginning of a script that runs a workload.
-
-7) Note that most testing has been done with both/all machines booted
- roughly simultaneously to avoid cluster timeouts. Ideally, you should
- do this too unless you are trying to break ramster rather than just
- use it. ;-)
-
-D. TESTING RAMSTER
-
-1) Note that ramster has no value unless pages get "remotified". For
- swap/frontswap/persistent pages, this doesn't happen unless/until
- the workload would cause swapping to occur, at which point pages
- are put into frontswap/zcache, and the remotification thread starts
- working. To get to the point where the system swaps, you either
- need a workload for which the working set exceeds the RAM in the
- system; or you need to somehow reduce the amount of RAM one of
- the system sees. This latter is easy when testing in a VM, but
- harder on physical systems. In some cases, "mem=xxxM" on the
- kernel command line restricts memory, but for some values of xxx
- the kernel may fail to boot. One may also try creating a fixed
- RAMdisk, doing nothing with it, but ensuring that it eats up a fixed
- amount of RAM.
-
-2) To see if ramster is working, on the "remote node", node 1, try:
-
- # grep . /sys/kernel/debug/ramster/foreign_*
- # # note, that is space-dot-space between grep and the pathname
-
- to monitor the number (and max) ephemeral and persistent pages
- that ramster has sent. If these stay at zero, ramster is not working
- either because the workload on the local node (node 0) isn't creating
- enough memory pressure or because "remotifying" isn't working. On the
- local system, node 0, you can watch lots of useful information also.
- Try:
-
- grep . /sys/kernel/debug/zcache/*pageframes* \
- /sys/kernel/debug/zcache/*zbytes* \
- /sys/kernel/debug/zcache/*zpages* \
- /sys/kernel/debug/ramster/*remote*
-
- Of particular note are the remote_*_pages_succ_get counters. These
- show how many disk reads and/or disk writes have been avoided on the
- overcommitted local system by storing pages remotely using ramster.
-
- At the risk of information overload, you can also grep:
-
- /sys/kernel/debug/cleancache/* and /sys/kernel/debug/frontswap/*
-
- These show, for example, how many disk reads and/or disk writes have
- been avoided by using zcache to optimize RAM on the local system.
-
-
-AUTOMATIC SWAP REPATRIATION
-
-You may notice that while the systems are idle, the foreign persistent
-page count on the remote machine slowly decreases. This is because
-ramster implements "frontswap selfshrinking": When possible, swap
-pages that have been remotified are slowly repatriated to the local
-machine. This is so that local RAM can be used when possible and
-so that, in case of remote machine crash, the probability of loss
-of data is reduced.
-
-REBOOTING / POWEROFF
-
-If a system is shut down while some of its swap pages still reside
-on a remote system, the system may lock up during the shutdown
-sequence. This will occur if the network is shut down before the
-swap mechansim is shut down, which is the default ordering on many
-distros. To avoid this annoying problem, simply shut off the swap
-subsystem before starting the shutdown sequence, e.g.:
-
- # swapoff -a
- # reboot
-
-Ideally, this swapoff-before-ifdown ordering should be enforced permanently
-using shutdown scripts.
-
-KNOWN PROBLEMS
-
-1) You may periodically see messages such as:
-
- ramster_r2net, message length problem
-
- This is harmless but indicates that a node is sending messages
- containing compressed pages that exceed the maximum for zcache
- (PAGE_SIZE*15/16). The sender side needs to be fixed.
-
-2) If you see a "No longer connected to node..." message or a "No connection
- established with node X after N seconds", it is possible you may
- be in an unrecoverable state. If you are certain all of the
- appropriate cluster configuration steps described above have been
- performed, try rebooting the two servers concurrently to see if
- the cluster starts.
-
- Note that "Connection to node... shutdown, state 7" is an intermediate
- connection state. As long as you later see "Accepted connection", the
- intermediate states are harmless.
-
-3) There are known issues in counting certain values. As a result
- you may see periodic warnings from the kernel. Almost always you
- will see "ramster: bad accounting for XXX". There are also "WARN_ONCE"
- messages. If you see kernel warnings with a tombstone, please report
- them. They are harmless but reflect bugs that need to be eventually fixed.
-
-ADVANCED RAMSTER TOPOLOGIES
-
-The kernel code for ramster can support up to eight nodes in a cluster,
-but no testing has been done with more than three nodes.
-
-In the example described above, the "remote" node serves as a RAM
-overflow for the "local" node. This can be made symmetric by appropriate
-settings of the sysfs remote_target_nodenum file. For example, by setting:
-
- # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
-
-on node 0, and
-
- # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
-
-on node 1, each node can serve as a RAM overflow for the other.
-
-For more than two nodes, a "RAM server" can be configured. For a
-three node system, set:
-
- # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
-
-on node 1, and
-
- # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
-
-on node 2. Then node 0 is a RAM server for node 1 and node 2.
-
-In this implementation of ramster, any remote node is potentially a single
-point of failure (SPOF). Though the probability of failure is reduced
-by automatic swap repatriation (see above), a proposed future enhancement
-to ramster improves high-availability for the cluster by sending a copy
-of each page of date to two other nodes. Patches welcome!
diff --git a/drivers/staging/zcache/ramster/ramster.c b/drivers/staging/zcache/ramster/ramster.c
deleted file mode 100644
index a937ce1..0000000
--- a/drivers/staging/zcache/ramster/ramster.c
+++ /dev/null
@@ -1,925 +0,0 @@
-/*
- * ramster.c
- *
- * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
- *
- * RAMster implements peer-to-peer transcendent memory, allowing a "cluster" of
- * kernels to dynamically pool their RAM so that a RAM-hungry workload on one
- * machine can temporarily and transparently utilize RAM on another machine
- * which is presumably idle or running a non-RAM-hungry workload.
- *
- * RAMster combines a clustering and messaging foundation based on the ocfs2
- * cluster layer with the in-kernel compression implementation of zcache, and
- * adds code to glue them together. When a page is "put" to RAMster, it is
- * compressed and stored locally. Periodically, a thread will "remotify" these
- * pages by sending them via messages to a remote machine. When the page is
- * later needed as indicated by a page fault, a "get" is issued. If the data
- * is local, it is uncompressed and the fault is resolved. If the data is
- * remote, a message is sent to fetch the data and the faulting thread sleeps;
- * when the data arrives, the thread awakens, the data is decompressed and
- * the fault is resolved.
-
- * As of V5, clusters up to eight nodes are supported; each node can remotify
- * pages to one specified node, so clusters can be configured as clients to
- * a "memory server". Some simple policy is in place that will need to be
- * refined over time. Larger clusters and fault-resistant protocols can also
- * be added over time.
- */
-
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/highmem.h>
-#include <linux/list.h>
-#include <linux/lzo.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/atomic.h>
-#include <linux/frontswap.h>
-#include "../tmem.h"
-#include "../zcache.h"
-#include "../zbud.h"
-#include "ramster.h"
-#include "ramster_nodemanager.h"
-#include "tcp.h"
-#include "debug.h"
-
-#define RAMSTER_TESTING
-
-#ifndef CONFIG_SYSFS
-#error "ramster needs sysfs to define cluster nodes to use"
-#endif
-
-static bool use_cleancache __read_mostly;
-static bool use_frontswap __read_mostly;
-static bool use_frontswap_exclusive_gets __read_mostly;
-
-/* These must be sysfs not debugfs as they are checked/used by userland!! */
-static unsigned long ramster_interface_revision __read_mostly =
- R2NM_API_VERSION; /* interface revision must match userspace! */
-static unsigned long ramster_pers_remotify_enable __read_mostly;
-static unsigned long ramster_eph_remotify_enable __read_mostly;
-static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0);
-#define MANUAL_NODES 8
-static bool ramster_nodes_manual_up[MANUAL_NODES] __read_mostly;
-static int ramster_remote_target_nodenum __read_mostly = -1;
-
-/* Used by this code. */
-long ramster_flnodes;
-/* FIXME frontswap selfshrinking knobs in debugfs? */
-
-static LIST_HEAD(ramster_rem_op_list);
-static DEFINE_SPINLOCK(ramster_rem_op_list_lock);
-static DEFINE_PER_CPU(struct ramster_preload, ramster_preloads);
-
-static DEFINE_PER_CPU(unsigned char *, ramster_remoteputmem1);
-static DEFINE_PER_CPU(unsigned char *, ramster_remoteputmem2);
-
-static struct kmem_cache *ramster_flnode_cache __read_mostly;
-
-static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
-{
- struct flushlist_node *flnode = NULL;
- struct ramster_preload *kp;
-
- kp = &__get_cpu_var(ramster_preloads);
- flnode = kp->flnode;
- BUG_ON(flnode == NULL);
- kp->flnode = NULL;
- inc_ramster_flnodes();
- return flnode;
-}
-
-/* the "flush list" asynchronously collects pages to remotely flush */
-#define FLUSH_ENTIRE_OBJECT ((uint32_t)-1)
-static void ramster_flnode_free(struct flushlist_node *flnode,
- struct tmem_pool *pool)
-{
- dec_ramster_flnodes();
- BUG_ON(ramster_flnodes < 0);
- kmem_cache_free(ramster_flnode_cache, flnode);
-}
-
-int ramster_do_preload_flnode(struct tmem_pool *pool)
-{
- struct ramster_preload *kp;
- struct flushlist_node *flnode;
- int ret = -ENOMEM;
-
- BUG_ON(!irqs_disabled());
- if (unlikely(ramster_flnode_cache == NULL))
- BUG();
- kp = &__get_cpu_var(ramster_preloads);
- flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC);
- if (unlikely(flnode == NULL) && kp->flnode == NULL)
- BUG(); /* FIXME handle more gracefully, but how??? */
- else if (kp->flnode == NULL)
- kp->flnode = flnode;
- else
- kmem_cache_free(ramster_flnode_cache, flnode);
- return ret;
-}
-EXPORT_SYMBOL_GPL(ramster_do_preload_flnode);
-
-/*
- * Called by the message handler after a (still compressed) page has been
- * fetched from the remote machine in response to an "is_remote" tmem_get
- * or persistent tmem_localify. For a tmem_get, "extra" is the address of
- * the page that is to be filled to successfully resolve the tmem_get; for
- * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only
- * in the local zcache). "data" points to "size" bytes of (compressed) data
- * passed in the message. In the case of a persistent remote get, if
- * pre-allocation was successful (see ramster_repatriate_preload), the page
- * is placed into both local zcache and at "extra".
- */
-int ramster_localify(int pool_id, struct tmem_oid *oidp, uint32_t index,
- char *data, unsigned int size, void *extra)
-{
- int ret = -ENOENT;
- unsigned long flags;
- struct tmem_pool *pool;
- bool eph, delete = false;
- void *pampd, *saved_hb;
- struct tmem_obj *obj;
-
- pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id);
- if (unlikely(pool == NULL))
- /* pool doesn't exist anymore */
- goto out;
- eph = is_ephemeral(pool);
- local_irq_save(flags); /* FIXME: maybe only disable softirqs? */
- pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb);
- if (pampd == NULL) {
- /* hmmm... must have been a flush while waiting */
-#ifdef RAMSTER_TESTING
- pr_err("UNTESTED pampd==NULL in ramster_localify\n");
-#endif
- if (eph)
- inc_ramster_remote_eph_pages_unsucc_get();
- else
- inc_ramster_remote_pers_pages_unsucc_get();
- obj = NULL;
- goto finish;
- } else if (unlikely(!pampd_is_remote(pampd))) {
- /* hmmm... must have been a dup put while waiting */
-#ifdef RAMSTER_TESTING
- pr_err("UNTESTED dup while waiting in ramster_localify\n");
-#endif
- if (eph)
- inc_ramster_remote_eph_pages_unsucc_get();
- else
- inc_ramster_remote_pers_pages_unsucc_get();
- obj = NULL;
- pampd = NULL;
- ret = -EEXIST;
- goto finish;
- } else if (size == 0) {
- /* no remote data, delete the local is_remote pampd */
- pampd = NULL;
- if (eph)
- inc_ramster_remote_eph_pages_unsucc_get();
- else
- BUG();
- delete = true;
- goto finish;
- }
- if (pampd_is_intransit(pampd)) {
- /*
- * a pampd is marked intransit if it is remote and space has
- * been allocated for it locally (note, only happens for
- * persistent pages, in which case the remote copy is freed)
- */
- BUG_ON(eph);
- pampd = pampd_mask_intransit_and_remote(pampd);
- zbud_copy_to_zbud(pampd, data, size);
- } else {
- /*
- * setting pampd to NULL tells tmem_localify_finish to leave
- * pampd alone... meaning it is left pointing to the
- * remote copy
- */
- pampd = NULL;
- obj = NULL;
- }
- /*
- * but in all cases, we decompress direct-to-memory to complete
- * the remotify and return success
- */
- BUG_ON(extra == NULL);
- zcache_decompress_to_page(data, size, (struct page *)extra);
- if (eph)
- inc_ramster_remote_eph_pages_succ_get();
- else
- inc_ramster_remote_pers_pages_succ_get();
- ret = 0;
-finish:
- tmem_localify_finish(obj, index, pampd, saved_hb, delete);
- zcache_put_pool(pool);
- local_irq_restore(flags);
-out:
- return ret;
-}
-
-void ramster_pampd_new_obj(struct tmem_obj *obj)
-{
- obj->extra = NULL;
-}
-
-void ramster_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj,
- bool pool_destroy)
-{
- struct flushlist_node *flnode;
-
- BUG_ON(preemptible());
- if (obj->extra == NULL)
- return;
- if (pool_destroy && is_ephemeral(pool))
- /* FIXME don't bother with remote eph data for now */
- return;
- BUG_ON(!pampd_is_remote(obj->extra));
- flnode = ramster_flnode_alloc(pool);
- flnode->xh.client_id = pampd_remote_node(obj->extra);
- flnode->xh.pool_id = pool->pool_id;
- flnode->xh.oid = obj->oid;
- flnode->xh.index = FLUSH_ENTIRE_OBJECT;
- flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ;
- spin_lock(&ramster_rem_op_list_lock);
- list_add(&flnode->rem_op.list, &ramster_rem_op_list);
- spin_unlock(&ramster_rem_op_list_lock);
-}
-
-/*
- * Called on a remote persistent tmem_get to attempt to preallocate
- * local storage for the data contained in the remote persistent page.
- * If successfully preallocated, returns the pampd, marked as remote and
- * in_transit. Else returns NULL. Note that the appropriate tmem data
- * structure must be locked.
- */
-void *ramster_pampd_repatriate_preload(void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oidp, uint32_t index,
- bool *intransit)
-{
- int clen = pampd_remote_size(pampd), c;
- void *ret_pampd = NULL;
- unsigned long flags;
- struct tmem_handle th;
-
- BUG_ON(!pampd_is_remote(pampd));
- BUG_ON(is_ephemeral(pool));
- if (use_frontswap_exclusive_gets)
- /* don't need local storage */
- goto out;
- if (pampd_is_intransit(pampd)) {
- /*
- * to avoid multiple allocations (and maybe a memory leak)
- * don't preallocate if already in the process of being
- * repatriated
- */
- *intransit = true;
- goto out;
- }
- *intransit = false;
- local_irq_save(flags);
- th.client_id = pampd_remote_node(pampd);
- th.pool_id = pool->pool_id;
- th.oid = *oidp;
- th.index = index;
- ret_pampd = zcache_pampd_create(NULL, clen, true, false, &th);
- if (ret_pampd != NULL) {
- /*
- * a pampd is marked intransit if it is remote and space has
- * been allocated for it locally (note, only happens for
- * persistent pages, in which case the remote copy is freed)
- */
- ret_pampd = pampd_mark_intransit(ret_pampd);
- c = atomic_dec_return(&ramster_remote_pers_pages);
- WARN_ON_ONCE(c < 0);
- } else {
- inc_ramster_pers_pages_remote_nomem();
- }
- local_irq_restore(flags);
-out:
- return ret_pampd;
-}
-
-/*
- * Called on a remote tmem_get to invoke a message to fetch the page.
- * Might sleep so no tmem locks can be held. "extra" is passed
- * all the way through the round-trip messaging to ramster_localify.
- */
-int ramster_pampd_repatriate(void *fake_pampd, void *real_pampd,
- struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index,
- bool free, void *extra)
-{
- struct tmem_xhandle xh;
- int ret;
-
- if (pampd_is_intransit(real_pampd))
- /* have local space pre-reserved, so free remote copy */
- free = true;
- xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index);
- /* unreliable request/response for now */
- ret = r2net_remote_async_get(&xh, free,
- pampd_remote_node(fake_pampd),
- pampd_remote_size(fake_pampd),
- pampd_remote_cksum(fake_pampd),
- extra);
- return ret;
-}
-
-bool ramster_pampd_is_remote(void *pampd)
-{
- return pampd_is_remote(pampd);
-}
-
-int ramster_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj)
-{
- int ret = -1;
-
- if (new_pampd != NULL) {
- if (obj->extra == NULL)
- obj->extra = new_pampd;
- /* enforce that all remote pages in an object reside
- * in the same node! */
- else if (pampd_remote_node(new_pampd) !=
- pampd_remote_node((void *)(obj->extra)))
- BUG();
- ret = 0;
- }
- return ret;
-}
-
-void *ramster_pampd_free(void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index, bool acct)
-{
- bool eph = is_ephemeral(pool);
- void *local_pampd = NULL;
- int c;
-
- BUG_ON(preemptible());
- BUG_ON(!pampd_is_remote(pampd));
- WARN_ON(acct == false);
- if (oid == NULL) {
- /*
- * a NULL oid means to ignore this pampd free
- * as the remote freeing will be handled elsewhere
- */
- } else if (eph) {
- /* FIXME remote flush optional but probably good idea */
- } else if (pampd_is_intransit(pampd)) {
- /* did a pers remote get_and_free, so just free local */
- local_pampd = pampd_mask_intransit_and_remote(pampd);
- } else {
- struct flushlist_node *flnode =
- ramster_flnode_alloc(pool);
-
- flnode->xh.client_id = pampd_remote_node(pampd);
- flnode->xh.pool_id = pool->pool_id;
- flnode->xh.oid = *oid;
- flnode->xh.index = index;
- flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE;
- spin_lock(&ramster_rem_op_list_lock);
- list_add(&flnode->rem_op.list, &ramster_rem_op_list);
- spin_unlock(&ramster_rem_op_list_lock);
- c = atomic_dec_return(&ramster_remote_pers_pages);
- WARN_ON_ONCE(c < 0);
- }
- return local_pampd;
-}
-EXPORT_SYMBOL_GPL(ramster_pampd_free);
-
-void ramster_count_foreign_pages(bool eph, int count)
-{
- BUG_ON(count != 1 && count != -1);
- if (eph) {
- if (count > 0) {
- inc_ramster_foreign_eph_pages();
- } else {
- dec_ramster_foreign_eph_pages();
-#ifdef CONFIG_RAMSTER_DEBUG
- WARN_ON_ONCE(ramster_foreign_eph_pages < 0);
-#endif
- }
- } else {
- if (count > 0) {
- inc_ramster_foreign_pers_pages();
- } else {
- dec_ramster_foreign_pers_pages();
-#ifdef CONFIG_RAMSTER_DEBUG
- WARN_ON_ONCE(ramster_foreign_pers_pages < 0);
-#endif
- }
- }
-}
-EXPORT_SYMBOL_GPL(ramster_count_foreign_pages);
-
-/*
- * For now, just push over a few pages every few seconds to
- * ensure that it basically works
- */
-static struct workqueue_struct *ramster_remotify_workqueue;
-static void ramster_remotify_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(ramster_remotify_worker,
- ramster_remotify_process);
-
-static void ramster_remotify_queue_delayed_work(unsigned long delay)
-{
- if (!queue_delayed_work(ramster_remotify_workqueue,
- &ramster_remotify_worker, delay))
- pr_err("ramster_remotify: bad workqueue\n");
-}
-
-static void ramster_remote_flush_page(struct flushlist_node *flnode)
-{
- struct tmem_xhandle *xh;
- int remotenode, ret;
-
- preempt_disable();
- xh = &flnode->xh;
- remotenode = flnode->xh.client_id;
- ret = r2net_remote_flush(xh, remotenode);
- if (ret >= 0)
- inc_ramster_remote_pages_flushed();
- else
- inc_ramster_remote_page_flushes_failed();
- preempt_enable_no_resched();
- ramster_flnode_free(flnode, NULL);
-}
-
-static void ramster_remote_flush_object(struct flushlist_node *flnode)
-{
- struct tmem_xhandle *xh;
- int remotenode, ret;
-
- preempt_disable();
- xh = &flnode->xh;
- remotenode = flnode->xh.client_id;
- ret = r2net_remote_flush_object(xh, remotenode);
- if (ret >= 0)
- inc_ramster_remote_objects_flushed();
- else
- inc_ramster_remote_object_flushes_failed();
- preempt_enable_no_resched();
- ramster_flnode_free(flnode, NULL);
-}
-
-int ramster_remotify_pageframe(bool eph)
-{
- struct tmem_xhandle xh;
- unsigned int size;
- int remotenode, ret, zbuds;
- struct tmem_pool *pool;
- unsigned long flags;
- unsigned char cksum;
- char *p;
- int i, j;
- unsigned char *tmpmem[2];
- struct tmem_handle th[2];
- unsigned int zsize[2];
-
- tmpmem[0] = __get_cpu_var(ramster_remoteputmem1);
- tmpmem[1] = __get_cpu_var(ramster_remoteputmem2);
- local_bh_disable();
- zbuds = zbud_make_zombie_lru(&th[0], &tmpmem[0], &zsize[0], eph);
- /* now OK to release lock set in caller */
- local_bh_enable();
- if (zbuds == 0)
- goto out;
- BUG_ON(zbuds > 2);
- for (i = 0; i < zbuds; i++) {
- xh.client_id = th[i].client_id;
- xh.pool_id = th[i].pool_id;
- xh.oid = th[i].oid;
- xh.index = th[i].index;
- size = zsize[i];
- BUG_ON(size == 0 || size > zbud_max_buddy_size());
- for (p = tmpmem[i], cksum = 0, j = 0; j < size; j++)
- cksum += *p++;
- ret = r2net_remote_put(&xh, tmpmem[i], size, eph, &remotenode);
- if (ret != 0) {
- /*
- * This is some form of a memory leak... if the remote put
- * fails, there will never be another attempt to remotify
- * this page. But since we've dropped the zv pointer,
- * the page may have been freed or the data replaced
- * so we can't just "put it back" in the remote op list.
- * Even if we could, not sure where to put it in the list
- * because there may be flushes that must be strictly
- * ordered vs the put. So leave this as a FIXME for now.
- * But count them so we know if it becomes a problem.
- */
- if (eph)
- inc_ramster_eph_pages_remote_failed();
- else
- inc_ramster_pers_pages_remote_failed();
- break;
- } else {
- if (!eph)
- atomic_inc(&ramster_remote_pers_pages);
- }
- if (eph)
- inc_ramster_eph_pages_remoted();
- else
- inc_ramster_pers_pages_remoted();
- /*
- * data was successfully remoted so change the local version to
- * point to the remote node where it landed
- */
- local_bh_disable();
- pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id);
- local_irq_save(flags);
- (void)tmem_replace(pool, &xh.oid, xh.index,
- pampd_make_remote(remotenode, size, cksum));
- local_irq_restore(flags);
- zcache_put_pool(pool);
- local_bh_enable();
- }
-out:
- return zbuds;
-}
-
-static void zcache_do_remotify_flushes(void)
-{
- struct ramster_remotify_hdr *rem_op;
- union remotify_list_node *u;
-
- while (1) {
- spin_lock(&ramster_rem_op_list_lock);
- if (list_empty(&ramster_rem_op_list)) {
- spin_unlock(&ramster_rem_op_list_lock);
- goto out;
- }
- rem_op = list_first_entry(&ramster_rem_op_list,
- struct ramster_remotify_hdr, list);
- list_del_init(&rem_op->list);
- spin_unlock(&ramster_rem_op_list_lock);
- u = (union remotify_list_node *)rem_op;
- switch (rem_op->op) {
- case RAMSTER_REMOTIFY_FLUSH_PAGE:
- ramster_remote_flush_page((struct flushlist_node *)u);
- break;
- case RAMSTER_REMOTIFY_FLUSH_OBJ:
- ramster_remote_flush_object((struct flushlist_node *)u);
- break;
- default:
- BUG();
- }
- }
-out:
- return;
-}
-
-static void ramster_remotify_process(struct work_struct *work)
-{
- static bool remotify_in_progress;
- int i;
-
- BUG_ON(irqs_disabled());
- if (remotify_in_progress)
- goto requeue;
- if (ramster_remote_target_nodenum == -1)
- goto requeue;
- remotify_in_progress = true;
- if (use_cleancache && ramster_eph_remotify_enable) {
- for (i = 0; i < 100; i++) {
- zcache_do_remotify_flushes();
- (void)ramster_remotify_pageframe(true);
- }
- }
- if (use_frontswap && ramster_pers_remotify_enable) {
- for (i = 0; i < 100; i++) {
- zcache_do_remotify_flushes();
- (void)ramster_remotify_pageframe(false);
- }
- }
- remotify_in_progress = false;
-requeue:
- ramster_remotify_queue_delayed_work(HZ);
-}
-
-void ramster_remotify_init(void)
-{
- unsigned long n = 60UL;
- ramster_remotify_workqueue =
- create_singlethread_workqueue("ramster_remotify");
- ramster_remotify_queue_delayed_work(n * HZ);
-}
-
-static ssize_t ramster_manual_node_up_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int i;
- char *p = buf;
- for (i = 0; i < MANUAL_NODES; i++)
- if (ramster_nodes_manual_up[i])
- p += sprintf(p, "%d ", i);
- p += sprintf(p, "\n");
- return p - buf;
-}
-
-static ssize_t ramster_manual_node_up_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
-{
- int err;
- unsigned long node_num;
-
- err = kstrtoul(buf, 10, &node_num);
- if (err) {
- pr_err("ramster: bad strtoul?\n");
- return -EINVAL;
- }
- if (node_num >= MANUAL_NODES) {
- pr_err("ramster: bad node_num=%lu?\n", node_num);
- return -EINVAL;
- }
- if (ramster_nodes_manual_up[node_num]) {
- pr_err("ramster: node %d already up, ignoring\n",
- (int)node_num);
- } else {
- ramster_nodes_manual_up[node_num] = true;
- r2net_hb_node_up_manual((int)node_num);
- }
- return count;
-}
-
-static struct kobj_attribute ramster_manual_node_up_attr = {
- .attr = { .name = "manual_node_up", .mode = 0644 },
- .show = ramster_manual_node_up_show,
- .store = ramster_manual_node_up_store,
-};
-
-static ssize_t ramster_remote_target_nodenum_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- if (ramster_remote_target_nodenum == -1UL)
- return sprintf(buf, "unset\n");
- else
- return sprintf(buf, "%d\n", ramster_remote_target_nodenum);
-}
-
-static ssize_t ramster_remote_target_nodenum_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
-{
- int err;
- unsigned long node_num;
-
- err = kstrtoul(buf, 10, &node_num);
- if (err) {
- pr_err("ramster: bad strtoul?\n");
- return -EINVAL;
- } else if (node_num == -1UL) {
- pr_err("ramster: disabling all remotification, "
- "data may still reside on remote nodes however\n");
- return -EINVAL;
- } else if (node_num >= MANUAL_NODES) {
- pr_err("ramster: bad node_num=%lu?\n", node_num);
- return -EINVAL;
- } else if (!ramster_nodes_manual_up[node_num]) {
- pr_err("ramster: node %d not up, ignoring setting "
- "of remotification target\n", (int)node_num);
- } else if (r2net_remote_target_node_set((int)node_num) >= 0) {
- pr_info("ramster: node %d set as remotification target\n",
- (int)node_num);
- ramster_remote_target_nodenum = (int)node_num;
- } else {
- pr_err("ramster: bad num to node node_num=%d?\n",
- (int)node_num);
- return -EINVAL;
- }
- return count;
-}
-
-static struct kobj_attribute ramster_remote_target_nodenum_attr = {
- .attr = { .name = "remote_target_nodenum", .mode = 0644 },
- .show = ramster_remote_target_nodenum_show,
- .store = ramster_remote_target_nodenum_store,
-};
-
-#define RAMSTER_SYSFS_RO(_name) \
- static ssize_t ramster_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", ramster_##_name); \
- } \
- static struct kobj_attribute ramster_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = ramster_##_name##_show, \
- }
-
-#define RAMSTER_SYSFS_RW(_name) \
- static ssize_t ramster_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", ramster_##_name); \
- } \
- static ssize_t ramster_##_name##_store(struct kobject *kobj, \
- struct kobj_attribute *attr, const char *buf, size_t count) \
- { \
- int err; \
- unsigned long enable; \
- err = kstrtoul(buf, 10, &enable); \
- if (err) \
- return -EINVAL; \
- ramster_##_name = enable; \
- return count; \
- } \
- static struct kobj_attribute ramster_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0644 }, \
- .show = ramster_##_name##_show, \
- .store = ramster_##_name##_store, \
- }
-
-#define RAMSTER_SYSFS_RO_ATOMIC(_name) \
- static ssize_t ramster_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \
- } \
- static struct kobj_attribute ramster_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = ramster_##_name##_show, \
- }
-
-RAMSTER_SYSFS_RO(interface_revision);
-RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages);
-RAMSTER_SYSFS_RW(pers_remotify_enable);
-RAMSTER_SYSFS_RW(eph_remotify_enable);
-
-static struct attribute *ramster_attrs[] = {
- &ramster_interface_revision_attr.attr,
- &ramster_remote_pers_pages_attr.attr,
- &ramster_manual_node_up_attr.attr,
- &ramster_remote_target_nodenum_attr.attr,
- &ramster_pers_remotify_enable_attr.attr,
- &ramster_eph_remotify_enable_attr.attr,
- NULL,
-};
-
-static struct attribute_group ramster_attr_group = {
- .attrs = ramster_attrs,
- .name = "ramster",
-};
-
-/*
- * frontswap selfshrinking
- */
-
-/* In HZ, controls frequency of worker invocation. */
-static unsigned int selfshrink_interval __read_mostly = 5;
-/* Enable/disable with sysfs. */
-static bool frontswap_selfshrinking __read_mostly;
-
-static void selfshrink_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
-
-#ifndef CONFIG_RAMSTER_MODULE
-/* Enable/disable with kernel boot option. */
-static bool use_frontswap_selfshrink = true;
-#endif
-
-/*
- * The default values for the following parameters were deemed reasonable
- * by experimentation, may be workload-dependent, and can all be
- * adjusted via sysfs.
- */
-
-/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
-static unsigned int frontswap_hysteresis __read_mostly = 20;
-
-/*
- * Number of selfshrink worker invocations to wait before observing that
- * frontswap selfshrinking should commence. Note that selfshrinking does
- * not use a separate worker thread.
- */
-static unsigned int frontswap_inertia __read_mostly = 3;
-
-/* Countdown to next invocation of frontswap_shrink() */
-static unsigned long frontswap_inertia_counter;
-
-/*
- * Invoked by the selfshrink worker thread, uses current number of pages
- * in frontswap (frontswap_curr_pages()), previous status, and control
- * values (hysteresis and inertia) to determine if frontswap should be
- * shrunk and what the new frontswap size should be. Note that
- * frontswap_shrink is essentially a partial swapoff that immediately
- * transfers pages from the "swap device" (frontswap) back into kernel
- * RAM; despite the name, frontswap "shrinking" is very different from
- * the "shrinker" interface used by the kernel MM subsystem to reclaim
- * memory.
- */
-static void frontswap_selfshrink(void)
-{
- static unsigned long cur_frontswap_pages;
- static unsigned long last_frontswap_pages;
- static unsigned long tgt_frontswap_pages;
-
- last_frontswap_pages = cur_frontswap_pages;
- cur_frontswap_pages = frontswap_curr_pages();
- if (!cur_frontswap_pages ||
- (cur_frontswap_pages > last_frontswap_pages)) {
- frontswap_inertia_counter = frontswap_inertia;
- return;
- }
- if (frontswap_inertia_counter && --frontswap_inertia_counter)
- return;
- if (cur_frontswap_pages <= frontswap_hysteresis)
- tgt_frontswap_pages = 0;
- else
- tgt_frontswap_pages = cur_frontswap_pages -
- (cur_frontswap_pages / frontswap_hysteresis);
- frontswap_shrink(tgt_frontswap_pages);
-}
-
-#ifndef CONFIG_RAMSTER_MODULE
-static int __init ramster_nofrontswap_selfshrink_setup(char *s)
-{
- use_frontswap_selfshrink = false;
- return 1;
-}
-
-__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
-#endif
-
-static void selfshrink_process(struct work_struct *work)
-{
- if (frontswap_selfshrinking && frontswap_enabled) {
- frontswap_selfshrink();
- schedule_delayed_work(&selfshrink_worker,
- selfshrink_interval * HZ);
- }
-}
-
-void ramster_cpu_up(int cpu)
-{
- unsigned char *p1 = kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
- unsigned char *p2 = kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
- BUG_ON(!p1 || !p2);
- per_cpu(ramster_remoteputmem1, cpu) = p1;
- per_cpu(ramster_remoteputmem2, cpu) = p2;
-}
-EXPORT_SYMBOL_GPL(ramster_cpu_up);
-
-void ramster_cpu_down(int cpu)
-{
- struct ramster_preload *kp;
-
- kfree(per_cpu(ramster_remoteputmem1, cpu));
- per_cpu(ramster_remoteputmem1, cpu) = NULL;
- kfree(per_cpu(ramster_remoteputmem2, cpu));
- per_cpu(ramster_remoteputmem2, cpu) = NULL;
- kp = &per_cpu(ramster_preloads, cpu);
- if (kp->flnode) {
- kmem_cache_free(ramster_flnode_cache, kp->flnode);
- kp->flnode = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(ramster_cpu_down);
-
-void ramster_register_pamops(struct tmem_pamops *pamops)
-{
- pamops->free_obj = ramster_pampd_free_obj;
- pamops->new_obj = ramster_pampd_new_obj;
- pamops->replace_in_obj = ramster_pampd_replace_in_obj;
- pamops->is_remote = ramster_pampd_is_remote;
- pamops->repatriate = ramster_pampd_repatriate;
- pamops->repatriate_preload = ramster_pampd_repatriate_preload;
-}
-EXPORT_SYMBOL_GPL(ramster_register_pamops);
-
-void ramster_init(bool cleancache, bool frontswap,
- bool frontswap_exclusive_gets,
- bool frontswap_selfshrink)
-{
- int ret = 0;
-
- if (cleancache)
- use_cleancache = true;
- if (frontswap)
- use_frontswap = true;
- if (frontswap_exclusive_gets)
- use_frontswap_exclusive_gets = true;
- ramster_debugfs_init();
- ret = sysfs_create_group(mm_kobj, &ramster_attr_group);
- if (ret)
- pr_err("ramster: can't create sysfs for ramster\n");
- (void)r2net_register_handlers();
-#ifdef CONFIG_RAMSTER_MODULE
- ret = r2nm_init();
- if (ret)
- pr_err("ramster: can't init r2net\n");
- frontswap_selfshrinking = frontswap_selfshrink;
-#else
- frontswap_selfshrinking = use_frontswap_selfshrink;
-#endif
- INIT_LIST_HEAD(&ramster_rem_op_list);
- ramster_flnode_cache = kmem_cache_create("ramster_flnode",
- sizeof(struct flushlist_node), 0, 0, NULL);
- if (frontswap_selfshrinking) {
- pr_info("ramster: Initializing frontswap selfshrink driver.\n");
- schedule_delayed_work(&selfshrink_worker,
- selfshrink_interval * HZ);
- }
- ramster_remotify_init();
-}
-EXPORT_SYMBOL_GPL(ramster_init);
diff --git a/drivers/staging/zcache/ramster/ramster.h b/drivers/staging/zcache/ramster/ramster.h
deleted file mode 100644
index 6d41a7a..0000000
--- a/drivers/staging/zcache/ramster/ramster.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * ramster.h
- *
- * Peer-to-peer transcendent memory
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- */
-
-#ifndef _RAMSTER_RAMSTER_H_
-#define _RAMSTER_RAMSTER_H_
-
-#include "../tmem.h"
-
-enum ramster_remotify_op {
- RAMSTER_REMOTIFY_FLUSH_PAGE,
- RAMSTER_REMOTIFY_FLUSH_OBJ,
-};
-
-struct ramster_remotify_hdr {
- enum ramster_remotify_op op;
- struct list_head list;
-};
-
-struct flushlist_node {
- struct ramster_remotify_hdr rem_op;
- struct tmem_xhandle xh;
-};
-
-struct ramster_preload {
- struct flushlist_node *flnode;
-};
-
-union remotify_list_node {
- struct ramster_remotify_hdr rem_op;
- struct {
- struct ramster_remotify_hdr rem_op;
- struct tmem_handle th;
- } zbud_hdr;
- struct flushlist_node flist;
-};
-
-/*
- * format of remote pampd:
- * bit 0 is reserved for zbud (in-page buddy selection)
- * bit 1 == intransit
- * bit 2 == is_remote... if this bit is set, then
- * bit 3-10 == remotenode
- * bit 11-23 == size
- * bit 24-31 == cksum
- */
-#define FAKE_PAMPD_INTRANSIT_BITS 1
-#define FAKE_PAMPD_ISREMOTE_BITS 1
-#define FAKE_PAMPD_REMOTENODE_BITS 8
-#define FAKE_PAMPD_REMOTESIZE_BITS 13
-#define FAKE_PAMPD_CHECKSUM_BITS 8
-
-#define FAKE_PAMPD_INTRANSIT_SHIFT 1
-#define FAKE_PAMPD_ISREMOTE_SHIFT (FAKE_PAMPD_INTRANSIT_SHIFT + \
- FAKE_PAMPD_INTRANSIT_BITS)
-#define FAKE_PAMPD_REMOTENODE_SHIFT (FAKE_PAMPD_ISREMOTE_SHIFT + \
- FAKE_PAMPD_ISREMOTE_BITS)
-#define FAKE_PAMPD_REMOTESIZE_SHIFT (FAKE_PAMPD_REMOTENODE_SHIFT + \
- FAKE_PAMPD_REMOTENODE_BITS)
-#define FAKE_PAMPD_CHECKSUM_SHIFT (FAKE_PAMPD_REMOTESIZE_SHIFT + \
- FAKE_PAMPD_REMOTESIZE_BITS)
-
-#define FAKE_PAMPD_MASK(x) ((1UL << (x)) - 1)
-
-static inline void *pampd_make_remote(int remotenode, size_t size,
- unsigned char cksum)
-{
- unsigned long fake_pampd = 0;
- fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
- fake_pampd |= ((unsigned long)remotenode &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS)) <<
- FAKE_PAMPD_REMOTENODE_SHIFT;
- fake_pampd |= ((unsigned long)size &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS)) <<
- FAKE_PAMPD_REMOTESIZE_SHIFT;
- fake_pampd |= ((unsigned long)cksum &
- FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS)) <<
- FAKE_PAMPD_CHECKSUM_SHIFT;
- return (void *)fake_pampd;
-}
-
-static inline unsigned int pampd_remote_node(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_REMOTENODE_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS);
-}
-
-static inline unsigned int pampd_remote_size(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_REMOTESIZE_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS);
-}
-
-static inline unsigned char pampd_remote_cksum(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_CHECKSUM_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS);
-}
-
-static inline bool pampd_is_remote(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_ISREMOTE_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_ISREMOTE_BITS);
-}
-
-static inline bool pampd_is_intransit(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_INTRANSIT_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_INTRANSIT_BITS);
-}
-
-/* note that it is a BUG for intransit to be set without isremote also set */
-static inline void *pampd_mark_intransit(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
-
- fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
- fake_pampd |= 1UL << FAKE_PAMPD_INTRANSIT_SHIFT;
- return (void *)fake_pampd;
-}
-
-static inline void *pampd_mask_intransit_and_remote(void *marked_pampd)
-{
- unsigned long pampd = (unsigned long)marked_pampd;
-
- pampd &= ~(1UL << FAKE_PAMPD_INTRANSIT_SHIFT);
- pampd &= ~(1UL << FAKE_PAMPD_ISREMOTE_SHIFT);
- return (void *)pampd;
-}
-
-extern int r2net_remote_async_get(struct tmem_xhandle *,
- bool, int, size_t, uint8_t, void *extra);
-extern int r2net_remote_put(struct tmem_xhandle *, char *, size_t,
- bool, int *);
-extern int r2net_remote_flush(struct tmem_xhandle *, int);
-extern int r2net_remote_flush_object(struct tmem_xhandle *, int);
-extern int r2net_register_handlers(void);
-extern int r2net_remote_target_node_set(int);
-
-extern int ramster_remotify_pageframe(bool);
-extern void ramster_init(bool, bool, bool, bool);
-extern void ramster_register_pamops(struct tmem_pamops *);
-extern int ramster_localify(int, struct tmem_oid *oidp, uint32_t, char *,
- unsigned int, void *);
-extern void *ramster_pampd_free(void *, struct tmem_pool *, struct tmem_oid *,
- uint32_t, bool);
-extern void ramster_count_foreign_pages(bool, int);
-extern int ramster_do_preload_flnode(struct tmem_pool *);
-extern void ramster_cpu_up(int);
-extern void ramster_cpu_down(int);
-
-#endif /* _RAMSTER_RAMSTER_H */
diff --git a/drivers/staging/zcache/ramster/ramster_nodemanager.h b/drivers/staging/zcache/ramster/ramster_nodemanager.h
deleted file mode 100644
index dbaae34..0000000
--- a/drivers/staging/zcache/ramster/ramster_nodemanager.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * ramster_nodemanager.h
- *
- * Header describing the interface between userspace and the kernel
- * for the ramster_nodemanager module.
- *
- * Copyright (C) 2002, 2004, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- */
-
-#ifndef _RAMSTER_NODEMANAGER_H
-#define _RAMSTER_NODEMANAGER_H
-
-#define R2NM_API_VERSION 5
-
-#define R2NM_MAX_NODES 255
-#define R2NM_INVALID_NODE_NUM 255
-
-/* host name, group name, cluster name all 64 bytes */
-#define R2NM_MAX_NAME_LEN 64 /* __NEW_UTS_LEN */
-
-extern int r2nm_init(void);
-
-#endif /* _RAMSTER_NODEMANAGER_H */
diff --git a/drivers/staging/zcache/ramster/tcp.c b/drivers/staging/zcache/ramster/tcp.c
deleted file mode 100644
index f6e1e52..0000000
--- a/drivers/staging/zcache/ramster/tcp.c
+++ /dev/null
@@ -1,2248 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- *
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2004 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- * ----
- *
- * Callers for this were originally written against a very simple synchronus
- * API. This implementation reflects those simple callers. Some day I'm sure
- * we'll need to move to a more robust posting/callback mechanism.
- *
- * Transmit calls pass in kernel virtual addresses and block copying this into
- * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
- * for a failed socket to timeout. TX callers can also pass in a poniter to an
- * 'int' which gets filled with an errno off the wire in response to the
- * message they send.
- *
- * Handlers for unsolicited messages are registered. Each socket has a page
- * that incoming data is copied into. First the header, then the data.
- * Handlers are called from only one thread with a reference to this per-socket
- * page. This page is destroyed after the handler call, so it can't be
- * referenced beyond the call. Handlers may block but are discouraged from
- * doing so.
- *
- * Any framing errors (bad magic, large payload lengths) close a connection.
- *
- * Our sock_container holds the state we associate with a socket. It's current
- * framing state is held there as well as the refcounting we do around when it
- * is safe to tear down the socket. The socket is only finally torn down from
- * the container when the container loses all of its references -- so as long
- * as you hold a ref on the container you can trust that the socket is valid
- * for use with kernel socket APIs.
- *
- * Connections are initiated between a pair of nodes when the node with the
- * higher node number gets a heartbeat callback which indicates that the lower
- * numbered node has started heartbeating. The lower numbered node is passive
- * and only accepts the connection if the higher numbered node is heartbeating.
- */
-
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/kref.h>
-#include <linux/net.h>
-#include <linux/export.h>
-#include <linux/uaccess.h>
-#include <net/tcp.h>
-
-
-#include "heartbeat.h"
-#include "tcp.h"
-#include "nodemanager.h"
-#define MLOG_MASK_PREFIX ML_TCP
-#include "masklog.h"
-
-#include "tcp_internal.h"
-
-#define SC_NODEF_FMT "node %s (num %u) at %pI4:%u"
-
-/*
- * In the following two log macros, the whitespace after the ',' just
- * before ##args is intentional. Otherwise, gcc 2.95 will eat the
- * previous token if args expands to nothing.
- */
-#define msglog(hdr, fmt, args...) do { \
- typeof(hdr) __hdr = (hdr); \
- mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
- "key %08x num %u] " fmt, \
- be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
- be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
- be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
- be32_to_cpu(__hdr->msg_num) , ##args); \
-} while (0)
-
-#define sclog(sc, fmt, args...) do { \
- typeof(sc) __sc = (sc); \
- mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
- "pg_off %zu] " fmt, __sc, \
- atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \
- __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \
- ##args); \
-} while (0)
-
-static DEFINE_RWLOCK(r2net_handler_lock);
-static struct rb_root r2net_handler_tree = RB_ROOT;
-
-static struct r2net_node r2net_nodes[R2NM_MAX_NODES];
-
-/* XXX someday we'll need better accounting */
-static struct socket *r2net_listen_sock;
-
-/*
- * listen work is only queued by the listening socket callbacks on the
- * r2net_wq. teardown detaches the callbacks before destroying the workqueue.
- * quorum work is queued as sock containers are shutdown.. stop_listening
- * tears down all the node's sock containers, preventing future shutdowns
- * and queued quorum work, before canceling delayed quorum work and
- * destroying the work queue.
- */
-static struct workqueue_struct *r2net_wq;
-static struct work_struct r2net_listen_work;
-
-static struct r2hb_callback_func r2net_hb_up, r2net_hb_down;
-#define R2NET_HB_PRI 0x1
-
-static struct r2net_handshake *r2net_hand;
-static struct r2net_msg *r2net_keep_req, *r2net_keep_resp;
-
-static int r2net_sys_err_translations[R2NET_ERR_MAX] = {
- [R2NET_ERR_NONE] = 0,
- [R2NET_ERR_NO_HNDLR] = -ENOPROTOOPT,
- [R2NET_ERR_OVERFLOW] = -EOVERFLOW,
- [R2NET_ERR_DIED] = -EHOSTDOWN,};
-
-/* can't quite avoid *all* internal declarations :/ */
-static void r2net_sc_connect_completed(struct work_struct *work);
-static void r2net_rx_until_empty(struct work_struct *work);
-static void r2net_shutdown_sc(struct work_struct *work);
-static void r2net_listen_data_ready(struct sock *sk, int bytes);
-static void r2net_sc_send_keep_req(struct work_struct *work);
-static void r2net_idle_timer(unsigned long data);
-static void r2net_sc_postpone_idle(struct r2net_sock_container *sc);
-static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc);
-
-#ifdef CONFIG_DEBUG_FS
-static void r2net_init_nst(struct r2net_send_tracking *nst, u32 msgtype,
- u32 msgkey, struct task_struct *task, u8 node)
-{
- INIT_LIST_HEAD(&nst->st_net_debug_item);
- nst->st_task = task;
- nst->st_msg_type = msgtype;
- nst->st_msg_key = msgkey;
- nst->st_node = node;
-}
-
-static inline void r2net_set_nst_sock_time(struct r2net_send_tracking *nst)
-{
- nst->st_sock_time = ktime_get();
-}
-
-static inline void r2net_set_nst_send_time(struct r2net_send_tracking *nst)
-{
- nst->st_send_time = ktime_get();
-}
-
-static inline void r2net_set_nst_status_time(struct r2net_send_tracking *nst)
-{
- nst->st_status_time = ktime_get();
-}
-
-static inline void r2net_set_nst_sock_container(struct r2net_send_tracking *nst,
- struct r2net_sock_container *sc)
-{
- nst->st_sc = sc;
-}
-
-static inline void r2net_set_nst_msg_id(struct r2net_send_tracking *nst,
- u32 msg_id)
-{
- nst->st_id = msg_id;
-}
-
-static inline void r2net_set_sock_timer(struct r2net_sock_container *sc)
-{
- sc->sc_tv_timer = ktime_get();
-}
-
-static inline void r2net_set_data_ready_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_data_ready = ktime_get();
-}
-
-static inline void r2net_set_advance_start_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_advance_start = ktime_get();
-}
-
-static inline void r2net_set_advance_stop_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_advance_stop = ktime_get();
-}
-
-static inline void r2net_set_func_start_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_func_start = ktime_get();
-}
-
-static inline void r2net_set_func_stop_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_func_stop = ktime_get();
-}
-
-#else /* CONFIG_DEBUG_FS */
-# define r2net_init_nst(a, b, c, d, e)
-# define r2net_set_nst_sock_time(a)
-# define r2net_set_nst_send_time(a)
-# define r2net_set_nst_status_time(a)
-# define r2net_set_nst_sock_container(a, b)
-# define r2net_set_nst_msg_id(a, b)
-# define r2net_set_sock_timer(a)
-# define r2net_set_data_ready_time(a)
-# define r2net_set_advance_start_time(a)
-# define r2net_set_advance_stop_time(a)
-# define r2net_set_func_start_time(a)
-# define r2net_set_func_stop_time(a)
-#endif /* CONFIG_DEBUG_FS */
-
-#ifdef CONFIG_RAMSTER_FS_STATS
-static ktime_t r2net_get_func_run_time(struct r2net_sock_container *sc)
-{
- return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
-}
-
-static void r2net_update_send_stats(struct r2net_send_tracking *nst,
- struct r2net_sock_container *sc)
-{
- sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
- ktime_sub(ktime_get(),
- nst->st_status_time));
- sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
- ktime_sub(nst->st_status_time,
- nst->st_send_time));
- sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
- ktime_sub(nst->st_send_time,
- nst->st_sock_time));
- sc->sc_send_count++;
-}
-
-static void r2net_update_recv_stats(struct r2net_sock_container *sc)
-{
- sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
- r2net_get_func_run_time(sc));
- sc->sc_recv_count++;
-}
-
-#else
-
-# define r2net_update_send_stats(a, b)
-
-# define r2net_update_recv_stats(sc)
-
-#endif /* CONFIG_RAMSTER_FS_STATS */
-
-static inline int r2net_reconnect_delay(void)
-{
- return r2nm_single_cluster->cl_reconnect_delay_ms;
-}
-
-static inline int r2net_keepalive_delay(void)
-{
- return r2nm_single_cluster->cl_keepalive_delay_ms;
-}
-
-static inline int r2net_idle_timeout(void)
-{
- return r2nm_single_cluster->cl_idle_timeout_ms;
-}
-
-static inline int r2net_sys_err_to_errno(enum r2net_system_error err)
-{
- int trans;
- BUG_ON(err >= R2NET_ERR_MAX);
- trans = r2net_sys_err_translations[err];
-
- /* Just in case we mess up the translation table above */
- BUG_ON(err != R2NET_ERR_NONE && trans == 0);
- return trans;
-}
-
-struct r2net_node *r2net_nn_from_num(u8 node_num)
-{
- BUG_ON(node_num >= ARRAY_SIZE(r2net_nodes));
- return &r2net_nodes[node_num];
-}
-
-static u8 r2net_num_from_nn(struct r2net_node *nn)
-{
- BUG_ON(nn == NULL);
- return nn - r2net_nodes;
-}
-
-/* ------------------------------------------------------------ */
-
-static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw)
-{
- int ret;
-
- spin_lock(&nn->nn_lock);
- ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
- if (ret >= 0) {
- nsw->ns_id = ret;
- list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
- }
- spin_unlock(&nn->nn_lock);
-
- if (ret >= 0) {
- init_waitqueue_head(&nsw->ns_wq);
- nsw->ns_sys_status = R2NET_ERR_NONE;
- nsw->ns_status = 0;
- return 0;
- }
- return ret;
-}
-
-static void r2net_complete_nsw_locked(struct r2net_node *nn,
- struct r2net_status_wait *nsw,
- enum r2net_system_error sys_status,
- s32 status)
-{
- assert_spin_locked(&nn->nn_lock);
-
- if (!list_empty(&nsw->ns_node_item)) {
- list_del_init(&nsw->ns_node_item);
- nsw->ns_sys_status = sys_status;
- nsw->ns_status = status;
- idr_remove(&nn->nn_status_idr, nsw->ns_id);
- wake_up(&nsw->ns_wq);
- }
-}
-
-static void r2net_complete_nsw(struct r2net_node *nn,
- struct r2net_status_wait *nsw,
- u64 id, enum r2net_system_error sys_status,
- s32 status)
-{
- spin_lock(&nn->nn_lock);
- if (nsw == NULL) {
- if (id > INT_MAX)
- goto out;
-
- nsw = idr_find(&nn->nn_status_idr, id);
- if (nsw == NULL)
- goto out;
- }
-
- r2net_complete_nsw_locked(nn, nsw, sys_status, status);
-
-out:
- spin_unlock(&nn->nn_lock);
- return;
-}
-
-static void r2net_complete_nodes_nsw(struct r2net_node *nn)
-{
- struct r2net_status_wait *nsw, *tmp;
- unsigned int num_kills = 0;
-
- assert_spin_locked(&nn->nn_lock);
-
- list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) {
- r2net_complete_nsw_locked(nn, nsw, R2NET_ERR_DIED, 0);
- num_kills++;
- }
-
- mlog(0, "completed %d messages for node %u\n", num_kills,
- r2net_num_from_nn(nn));
-}
-
-static int r2net_nsw_completed(struct r2net_node *nn,
- struct r2net_status_wait *nsw)
-{
- int completed;
- spin_lock(&nn->nn_lock);
- completed = list_empty(&nsw->ns_node_item);
- spin_unlock(&nn->nn_lock);
- return completed;
-}
-
-/* ------------------------------------------------------------ */
-
-static void sc_kref_release(struct kref *kref)
-{
- struct r2net_sock_container *sc = container_of(kref,
- struct r2net_sock_container, sc_kref);
- BUG_ON(timer_pending(&sc->sc_idle_timeout));
-
- sclog(sc, "releasing\n");
-
- if (sc->sc_sock) {
- sock_release(sc->sc_sock);
- sc->sc_sock = NULL;
- }
-
- r2nm_undepend_item(&sc->sc_node->nd_item);
- r2nm_node_put(sc->sc_node);
- sc->sc_node = NULL;
-
- r2net_debug_del_sc(sc);
- kfree(sc);
-}
-
-static void sc_put(struct r2net_sock_container *sc)
-{
- sclog(sc, "put\n");
- kref_put(&sc->sc_kref, sc_kref_release);
-}
-static void sc_get(struct r2net_sock_container *sc)
-{
- sclog(sc, "get\n");
- kref_get(&sc->sc_kref);
-}
-static struct r2net_sock_container *sc_alloc(struct r2nm_node *node)
-{
- struct r2net_sock_container *sc, *ret = NULL;
- struct page *page = NULL;
- int status = 0;
-
- page = alloc_page(GFP_NOFS);
- sc = kzalloc(sizeof(*sc), GFP_NOFS);
- if (sc == NULL || page == NULL)
- goto out;
-
- kref_init(&sc->sc_kref);
- r2nm_node_get(node);
- sc->sc_node = node;
-
- /* pin the node item of the remote node */
- status = r2nm_depend_item(&node->nd_item);
- if (status) {
- mlog_errno(status);
- r2nm_node_put(node);
- goto out;
- }
- INIT_WORK(&sc->sc_connect_work, r2net_sc_connect_completed);
- INIT_WORK(&sc->sc_rx_work, r2net_rx_until_empty);
- INIT_WORK(&sc->sc_shutdown_work, r2net_shutdown_sc);
- INIT_DELAYED_WORK(&sc->sc_keepalive_work, r2net_sc_send_keep_req);
-
- init_timer(&sc->sc_idle_timeout);
- sc->sc_idle_timeout.function = r2net_idle_timer;
- sc->sc_idle_timeout.data = (unsigned long)sc;
-
- sclog(sc, "alloced\n");
-
- ret = sc;
- sc->sc_page = page;
- r2net_debug_add_sc(sc);
- sc = NULL;
- page = NULL;
-
-out:
- if (page)
- __free_page(page);
- kfree(sc);
-
- return ret;
-}
-
-/* ------------------------------------------------------------ */
-
-static void r2net_sc_queue_work(struct r2net_sock_container *sc,
- struct work_struct *work)
-{
- sc_get(sc);
- if (!queue_work(r2net_wq, work))
- sc_put(sc);
-}
-static void r2net_sc_queue_delayed_work(struct r2net_sock_container *sc,
- struct delayed_work *work,
- int delay)
-{
- sc_get(sc);
- if (!queue_delayed_work(r2net_wq, work, delay))
- sc_put(sc);
-}
-static void r2net_sc_cancel_delayed_work(struct r2net_sock_container *sc,
- struct delayed_work *work)
-{
- if (cancel_delayed_work(work))
- sc_put(sc);
-}
-
-static atomic_t r2net_connected_peers = ATOMIC_INIT(0);
-
-int r2net_num_connected_peers(void)
-{
- return atomic_read(&r2net_connected_peers);
-}
-
-static void r2net_set_nn_state(struct r2net_node *nn,
- struct r2net_sock_container *sc,
- unsigned valid, int err)
-{
- int was_valid = nn->nn_sc_valid;
- int was_err = nn->nn_persistent_error;
- struct r2net_sock_container *old_sc = nn->nn_sc;
-
- assert_spin_locked(&nn->nn_lock);
-
- if (old_sc && !sc)
- atomic_dec(&r2net_connected_peers);
- else if (!old_sc && sc)
- atomic_inc(&r2net_connected_peers);
-
- /* the node num comparison and single connect/accept path should stop
- * an non-null sc from being overwritten with another */
- BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
- mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
- mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
-
- if (was_valid && !valid && err == 0)
- err = -ENOTCONN;
-
- mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
- r2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
- nn->nn_persistent_error, err);
-
- nn->nn_sc = sc;
- nn->nn_sc_valid = valid ? 1 : 0;
- nn->nn_persistent_error = err;
-
- /* mirrors r2net_tx_can_proceed() */
- if (nn->nn_persistent_error || nn->nn_sc_valid)
- wake_up(&nn->nn_sc_wq);
-
- if (!was_err && nn->nn_persistent_error) {
- queue_delayed_work(r2net_wq, &nn->nn_still_up,
- msecs_to_jiffies(R2NET_QUORUM_DELAY_MS));
- }
-
- if (was_valid && !valid) {
- pr_notice("ramster: No longer connected to " SC_NODEF_FMT "\n",
- old_sc->sc_node->nd_name, old_sc->sc_node->nd_num,
- &old_sc->sc_node->nd_ipv4_address,
- ntohs(old_sc->sc_node->nd_ipv4_port));
- r2net_complete_nodes_nsw(nn);
- }
-
- if (!was_valid && valid) {
- cancel_delayed_work(&nn->nn_connect_expired);
- pr_notice("ramster: %s " SC_NODEF_FMT "\n",
- r2nm_this_node() > sc->sc_node->nd_num ?
- "Connected to" : "Accepted connection from",
- sc->sc_node->nd_name, sc->sc_node->nd_num,
- &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port));
- }
-
- /* trigger the connecting worker func as long as we're not valid,
- * it will back off if it shouldn't connect. This can be called
- * from node config teardown and so needs to be careful about
- * the work queue actually being up. */
- if (!valid && r2net_wq) {
- unsigned long delay;
- /* delay if we're within a RECONNECT_DELAY of the
- * last attempt */
- delay = (nn->nn_last_connect_attempt +
- msecs_to_jiffies(r2net_reconnect_delay()))
- - jiffies;
- if (delay > msecs_to_jiffies(r2net_reconnect_delay()))
- delay = 0;
- mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
- queue_delayed_work(r2net_wq, &nn->nn_connect_work, delay);
-
- /*
- * Delay the expired work after idle timeout.
- *
- * We might have lots of failed connection attempts that run
- * through here but we only cancel the connect_expired work when
- * a connection attempt succeeds. So only the first enqueue of
- * the connect_expired work will do anything. The rest will see
- * that it's already queued and do nothing.
- */
- delay += msecs_to_jiffies(r2net_idle_timeout());
- queue_delayed_work(r2net_wq, &nn->nn_connect_expired, delay);
- }
-
- /* keep track of the nn's sc ref for the caller */
- if ((old_sc == NULL) && sc)
- sc_get(sc);
- if (old_sc && (old_sc != sc)) {
- r2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
- sc_put(old_sc);
- }
-}
-
-/* see r2net_register_callbacks() */
-static void r2net_data_ready(struct sock *sk, int bytes)
-{
- void (*ready)(struct sock *sk, int bytes);
-
- read_lock(&sk->sk_callback_lock);
- if (sk->sk_user_data) {
- struct r2net_sock_container *sc = sk->sk_user_data;
- sclog(sc, "data_ready hit\n");
- r2net_set_data_ready_time(sc);
- r2net_sc_queue_work(sc, &sc->sc_rx_work);
- ready = sc->sc_data_ready;
- } else {
- ready = sk->sk_data_ready;
- }
- read_unlock(&sk->sk_callback_lock);
-
- ready(sk, bytes);
-}
-
-/* see r2net_register_callbacks() */
-static void r2net_state_change(struct sock *sk)
-{
- void (*state_change)(struct sock *sk);
- struct r2net_sock_container *sc;
-
- read_lock(&sk->sk_callback_lock);
- sc = sk->sk_user_data;
- if (sc == NULL) {
- state_change = sk->sk_state_change;
- goto out;
- }
-
- sclog(sc, "state_change to %d\n", sk->sk_state);
-
- state_change = sc->sc_state_change;
-
- switch (sk->sk_state) {
-
- /* ignore connecting sockets as they make progress */
- case TCP_SYN_SENT:
- case TCP_SYN_RECV:
- break;
- case TCP_ESTABLISHED:
- r2net_sc_queue_work(sc, &sc->sc_connect_work);
- break;
- default:
- pr_info("ramster: Connection to "
- SC_NODEF_FMT " shutdown, state %d\n",
- sc->sc_node->nd_name, sc->sc_node->nd_num,
- &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port), sk->sk_state);
- r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
- break;
-
- }
-out:
- read_unlock(&sk->sk_callback_lock);
- state_change(sk);
-}
-
-/*
- * we register callbacks so we can queue work on events before calling
- * the original callbacks. our callbacks are careful to test user_data
- * to discover when they've reaced with r2net_unregister_callbacks().
- */
-static void r2net_register_callbacks(struct sock *sk,
- struct r2net_sock_container *sc)
-{
- write_lock_bh(&sk->sk_callback_lock);
-
- /* accepted sockets inherit the old listen socket data ready */
- if (sk->sk_data_ready == r2net_listen_data_ready) {
- sk->sk_data_ready = sk->sk_user_data;
- sk->sk_user_data = NULL;
- }
-
- BUG_ON(sk->sk_user_data != NULL);
- sk->sk_user_data = sc;
- sc_get(sc);
-
- sc->sc_data_ready = sk->sk_data_ready;
- sc->sc_state_change = sk->sk_state_change;
- sk->sk_data_ready = r2net_data_ready;
- sk->sk_state_change = r2net_state_change;
-
- mutex_init(&sc->sc_send_lock);
-
- write_unlock_bh(&sk->sk_callback_lock);
-}
-
-static int r2net_unregister_callbacks(struct sock *sk,
- struct r2net_sock_container *sc)
-{
- int ret = 0;
-
- write_lock_bh(&sk->sk_callback_lock);
- if (sk->sk_user_data == sc) {
- ret = 1;
- sk->sk_user_data = NULL;
- sk->sk_data_ready = sc->sc_data_ready;
- sk->sk_state_change = sc->sc_state_change;
- }
- write_unlock_bh(&sk->sk_callback_lock);
-
- return ret;
-}
-
-/*
- * this is a little helper that is called by callers who have seen a problem
- * with an sc and want to detach it from the nn if someone already hasn't beat
- * them to it. if an error is given then the shutdown will be persistent
- * and pending transmits will be canceled.
- */
-static void r2net_ensure_shutdown(struct r2net_node *nn,
- struct r2net_sock_container *sc,
- int err)
-{
- spin_lock(&nn->nn_lock);
- if (nn->nn_sc == sc)
- r2net_set_nn_state(nn, NULL, 0, err);
- spin_unlock(&nn->nn_lock);
-}
-
-/*
- * This work queue function performs the blocking parts of socket shutdown. A
- * few paths lead here. set_nn_state will trigger this callback if it sees an
- * sc detached from the nn. state_change will also trigger this callback
- * directly when it sees errors. In that case we need to call set_nn_state
- * ourselves as state_change couldn't get the nn_lock and call set_nn_state
- * itself.
- */
-static void r2net_shutdown_sc(struct work_struct *work)
-{
- struct r2net_sock_container *sc =
- container_of(work, struct r2net_sock_container,
- sc_shutdown_work);
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
-
- sclog(sc, "shutting down\n");
-
- /* drop the callbacks ref and call shutdown only once */
- if (r2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
- /* we shouldn't flush as we're in the thread, the
- * races with pending sc work structs are harmless */
- del_timer_sync(&sc->sc_idle_timeout);
- r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
- sc_put(sc);
- kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
- }
-
- /* not fatal so failed connects before the other guy has our
- * heartbeat can be retried */
- r2net_ensure_shutdown(nn, sc, 0);
- sc_put(sc);
-}
-
-/* ------------------------------------------------------------ */
-
-static int r2net_handler_cmp(struct r2net_msg_handler *nmh, u32 msg_type,
- u32 key)
-{
- int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
-
- if (ret == 0)
- ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
-
- return ret;
-}
-
-static struct r2net_msg_handler *
-r2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
- struct rb_node **ret_parent)
-{
- struct rb_node **p = &r2net_handler_tree.rb_node;
- struct rb_node *parent = NULL;
- struct r2net_msg_handler *nmh, *ret = NULL;
- int cmp;
-
- while (*p) {
- parent = *p;
- nmh = rb_entry(parent, struct r2net_msg_handler, nh_node);
- cmp = r2net_handler_cmp(nmh, msg_type, key);
-
- if (cmp < 0)
- p = &(*p)->rb_left;
- else if (cmp > 0)
- p = &(*p)->rb_right;
- else {
- ret = nmh;
- break;
- }
- }
-
- if (ret_p != NULL)
- *ret_p = p;
- if (ret_parent != NULL)
- *ret_parent = parent;
-
- return ret;
-}
-
-static void r2net_handler_kref_release(struct kref *kref)
-{
- struct r2net_msg_handler *nmh;
- nmh = container_of(kref, struct r2net_msg_handler, nh_kref);
-
- kfree(nmh);
-}
-
-static void r2net_handler_put(struct r2net_msg_handler *nmh)
-{
- kref_put(&nmh->nh_kref, r2net_handler_kref_release);
-}
-
-/* max_len is protection for the handler func. incoming messages won't
- * be given to the handler if their payload is longer than the max. */
-int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
- r2net_msg_handler_func *func, void *data,
- r2net_post_msg_handler_func *post_func,
- struct list_head *unreg_list)
-{
- struct r2net_msg_handler *nmh = NULL;
- struct rb_node **p, *parent;
- int ret = 0;
-
- if (max_len > R2NET_MAX_PAYLOAD_BYTES) {
- mlog(0, "max_len for message handler out of range: %u\n",
- max_len);
- ret = -EINVAL;
- goto out;
- }
-
- if (!msg_type) {
- mlog(0, "no message type provided: %u, %p\n", msg_type, func);
- ret = -EINVAL;
- goto out;
-
- }
- if (!func) {
- mlog(0, "no message handler provided: %u, %p\n",
- msg_type, func);
- ret = -EINVAL;
- goto out;
- }
-
- nmh = kzalloc(sizeof(struct r2net_msg_handler), GFP_NOFS);
- if (nmh == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- nmh->nh_func = func;
- nmh->nh_func_data = data;
- nmh->nh_post_func = post_func;
- nmh->nh_msg_type = msg_type;
- nmh->nh_max_len = max_len;
- nmh->nh_key = key;
- /* the tree and list get this ref.. they're both removed in
- * unregister when this ref is dropped */
- kref_init(&nmh->nh_kref);
- INIT_LIST_HEAD(&nmh->nh_unregister_item);
-
- write_lock(&r2net_handler_lock);
- if (r2net_handler_tree_lookup(msg_type, key, &p, &parent))
- ret = -EEXIST;
- else {
- rb_link_node(&nmh->nh_node, parent, p);
- rb_insert_color(&nmh->nh_node, &r2net_handler_tree);
- list_add_tail(&nmh->nh_unregister_item, unreg_list);
-
- mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
- func, msg_type, key);
- /* we've had some trouble with handlers seemingly vanishing. */
- mlog_bug_on_msg(r2net_handler_tree_lookup(msg_type, key, &p,
- &parent) == NULL,
- "couldn't find handler we *just* registered "
- "for type %u key %08x\n", msg_type, key);
- }
- write_unlock(&r2net_handler_lock);
- if (ret)
- goto out;
-
-out:
- if (ret)
- kfree(nmh);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(r2net_register_handler);
-
-void r2net_unregister_handler_list(struct list_head *list)
-{
- struct r2net_msg_handler *nmh, *n;
-
- write_lock(&r2net_handler_lock);
- list_for_each_entry_safe(nmh, n, list, nh_unregister_item) {
- mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
- nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
- rb_erase(&nmh->nh_node, &r2net_handler_tree);
- list_del_init(&nmh->nh_unregister_item);
- kref_put(&nmh->nh_kref, r2net_handler_kref_release);
- }
- write_unlock(&r2net_handler_lock);
-}
-EXPORT_SYMBOL_GPL(r2net_unregister_handler_list);
-
-static struct r2net_msg_handler *r2net_handler_get(u32 msg_type, u32 key)
-{
- struct r2net_msg_handler *nmh;
-
- read_lock(&r2net_handler_lock);
- nmh = r2net_handler_tree_lookup(msg_type, key, NULL, NULL);
- if (nmh)
- kref_get(&nmh->nh_kref);
- read_unlock(&r2net_handler_lock);
-
- return nmh;
-}
-
-/* ------------------------------------------------------------ */
-
-static int r2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
-{
- int ret;
- mm_segment_t oldfs;
- struct kvec vec = {
- .iov_len = len,
- .iov_base = data,
- };
- struct msghdr msg = {
- .msg_iovlen = 1,
- .msg_iov = (struct iovec *)&vec,
- .msg_flags = MSG_DONTWAIT,
- };
-
- oldfs = get_fs();
- set_fs(get_ds());
- ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
- set_fs(oldfs);
-
- return ret;
-}
-
-static int r2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
- size_t veclen, size_t total)
-{
- int ret;
- mm_segment_t oldfs;
- struct msghdr msg = {
- .msg_iov = (struct iovec *)vec,
- .msg_iovlen = veclen,
- };
-
- if (sock == NULL) {
- ret = -EINVAL;
- goto out;
- }
-
- oldfs = get_fs();
- set_fs(get_ds());
- ret = sock_sendmsg(sock, &msg, total);
- set_fs(oldfs);
- if (ret != total) {
- mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
- total);
- if (ret >= 0)
- ret = -EPIPE; /* should be smarter, I bet */
- goto out;
- }
-
- ret = 0;
-out:
- if (ret < 0)
- mlog(0, "returning error: %d\n", ret);
- return ret;
-}
-
-static void r2net_sendpage(struct r2net_sock_container *sc,
- void *kmalloced_virt,
- size_t size)
-{
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
- ssize_t ret;
-
- while (1) {
- mutex_lock(&sc->sc_send_lock);
- ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
- virt_to_page(kmalloced_virt),
- (long)kmalloced_virt & ~PAGE_MASK,
- size, MSG_DONTWAIT);
- mutex_unlock(&sc->sc_send_lock);
- if (ret == size)
- break;
- if (ret == (ssize_t)-EAGAIN) {
- mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
- " returned EAGAIN\n", size, sc->sc_node->nd_name,
- sc->sc_node->nd_num,
- &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port));
- cond_resched();
- continue;
- }
- mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
- " failed with %zd\n", size, sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port), ret);
- r2net_ensure_shutdown(nn, sc, 0);
- break;
- }
-}
-
-static void r2net_init_msg(struct r2net_msg *msg, u16 data_len,
- u16 msg_type, u32 key)
-{
- memset(msg, 0, sizeof(struct r2net_msg));
- msg->magic = cpu_to_be16(R2NET_MSG_MAGIC);
- msg->data_len = cpu_to_be16(data_len);
- msg->msg_type = cpu_to_be16(msg_type);
- msg->sys_status = cpu_to_be32(R2NET_ERR_NONE);
- msg->status = 0;
- msg->key = cpu_to_be32(key);
-}
-
-static int r2net_tx_can_proceed(struct r2net_node *nn,
- struct r2net_sock_container **sc_ret,
- int *error)
-{
- int ret = 0;
-
- spin_lock(&nn->nn_lock);
- if (nn->nn_persistent_error) {
- ret = 1;
- *sc_ret = NULL;
- *error = nn->nn_persistent_error;
- } else if (nn->nn_sc_valid) {
- kref_get(&nn->nn_sc->sc_kref);
-
- ret = 1;
- *sc_ret = nn->nn_sc;
- *error = 0;
- }
- spin_unlock(&nn->nn_lock);
-
- return ret;
-}
-
-/* Get a map of all nodes to which this node is currently connected to */
-void r2net_fill_node_map(unsigned long *map, unsigned bytes)
-{
- struct r2net_sock_container *sc;
- int node, ret;
-
- BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
-
- memset(map, 0, bytes);
- for (node = 0; node < R2NM_MAX_NODES; ++node) {
- r2net_tx_can_proceed(r2net_nn_from_num(node), &sc, &ret);
- if (!ret) {
- set_bit(node, map);
- sc_put(sc);
- }
- }
-}
-EXPORT_SYMBOL_GPL(r2net_fill_node_map);
-
-int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
- size_t caller_veclen, u8 target_node, int *status)
-{
- int ret = 0;
- struct r2net_msg *msg = NULL;
- size_t veclen, caller_bytes = 0;
- struct kvec *vec = NULL;
- struct r2net_sock_container *sc = NULL;
- struct r2net_node *nn = r2net_nn_from_num(target_node);
- struct r2net_status_wait nsw = {
- .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
- };
- struct r2net_send_tracking nst;
-
- /* this may be a general bug fix */
- init_waitqueue_head(&nsw.ns_wq);
-
- r2net_init_nst(&nst, msg_type, key, current, target_node);
-
- if (r2net_wq == NULL) {
- mlog(0, "attempt to tx without r2netd running\n");
- ret = -ESRCH;
- goto out;
- }
-
- if (caller_veclen == 0) {
- mlog(0, "bad kvec array length\n");
- ret = -EINVAL;
- goto out;
- }
-
- caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
- if (caller_bytes > R2NET_MAX_PAYLOAD_BYTES) {
- mlog(0, "total payload len %zu too large\n", caller_bytes);
- ret = -EINVAL;
- goto out;
- }
-
- if (target_node == r2nm_this_node()) {
- ret = -ELOOP;
- goto out;
- }
-
- r2net_debug_add_nst(&nst);
-
- r2net_set_nst_sock_time(&nst);
-
- wait_event(nn->nn_sc_wq, r2net_tx_can_proceed(nn, &sc, &ret));
- if (ret)
- goto out;
-
- r2net_set_nst_sock_container(&nst, sc);
-
- veclen = caller_veclen + 1;
- vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
- if (vec == NULL) {
- mlog(0, "failed to %zu element kvec!\n", veclen);
- ret = -ENOMEM;
- goto out;
- }
-
- msg = kmalloc(sizeof(struct r2net_msg), GFP_ATOMIC);
- if (!msg) {
- mlog(0, "failed to allocate a r2net_msg!\n");
- ret = -ENOMEM;
- goto out;
- }
-
- r2net_init_msg(msg, caller_bytes, msg_type, key);
-
- vec[0].iov_len = sizeof(struct r2net_msg);
- vec[0].iov_base = msg;
- memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
-
- ret = r2net_prep_nsw(nn, &nsw);
- if (ret)
- goto out;
-
- msg->msg_num = cpu_to_be32(nsw.ns_id);
- r2net_set_nst_msg_id(&nst, nsw.ns_id);
-
- r2net_set_nst_send_time(&nst);
-
- /* finally, convert the message header to network byte-order
- * and send */
- mutex_lock(&sc->sc_send_lock);
- ret = r2net_send_tcp_msg(sc->sc_sock, vec, veclen,
- sizeof(struct r2net_msg) + caller_bytes);
- mutex_unlock(&sc->sc_send_lock);
- msglog(msg, "sending returned %d\n", ret);
- if (ret < 0) {
- mlog(0, "error returned from r2net_send_tcp_msg=%d\n", ret);
- goto out;
- }
-
- /* wait on other node's handler */
- r2net_set_nst_status_time(&nst);
- wait_event(nsw.ns_wq, r2net_nsw_completed(nn, &nsw) ||
- nn->nn_persistent_error || !nn->nn_sc_valid);
-
- r2net_update_send_stats(&nst, sc);
-
- /* Note that we avoid overwriting the callers status return
- * variable if a system error was reported on the other
- * side. Callers beware. */
- ret = r2net_sys_err_to_errno(nsw.ns_sys_status);
- if (status && !ret)
- *status = nsw.ns_status;
-
- mlog(0, "woken, returning system status %d, user status %d\n",
- ret, nsw.ns_status);
-out:
- r2net_debug_del_nst(&nst); /* must be before dropping sc and node */
- if (sc)
- sc_put(sc);
- kfree(vec);
- kfree(msg);
- r2net_complete_nsw(nn, &nsw, 0, 0, 0);
- return ret;
-}
-EXPORT_SYMBOL_GPL(r2net_send_message_vec);
-
-int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
- u8 target_node, int *status)
-{
- struct kvec vec = {
- .iov_base = data,
- .iov_len = len,
- };
- return r2net_send_message_vec(msg_type, key, &vec, 1,
- target_node, status);
-}
-EXPORT_SYMBOL_GPL(r2net_send_message);
-
-static int r2net_send_status_magic(struct socket *sock, struct r2net_msg *hdr,
- enum r2net_system_error syserr, int err)
-{
- struct kvec vec = {
- .iov_base = hdr,
- .iov_len = sizeof(struct r2net_msg),
- };
-
- BUG_ON(syserr >= R2NET_ERR_MAX);
-
- /* leave other fields intact from the incoming message, msg_num
- * in particular */
- hdr->sys_status = cpu_to_be32(syserr);
- hdr->status = cpu_to_be32(err);
- /* twiddle the magic */
- hdr->magic = cpu_to_be16(R2NET_MSG_STATUS_MAGIC);
- hdr->data_len = 0;
-
- msglog(hdr, "about to send status magic %d\n", err);
- /* hdr has been in host byteorder this whole time */
- return r2net_send_tcp_msg(sock, &vec, 1, sizeof(struct r2net_msg));
-}
-
-/*
- * "data magic" is a long version of "status magic" where the message
- * payload actually contains data to be passed in reply to certain messages
- */
-static int r2net_send_data_magic(struct r2net_sock_container *sc,
- struct r2net_msg *hdr,
- void *data, size_t data_len,
- enum r2net_system_error syserr, int err)
-{
- struct kvec vec[2];
- int ret;
-
- vec[0].iov_base = hdr;
- vec[0].iov_len = sizeof(struct r2net_msg);
- vec[1].iov_base = data;
- vec[1].iov_len = data_len;
-
- BUG_ON(syserr >= R2NET_ERR_MAX);
-
- /* leave other fields intact from the incoming message, msg_num
- * in particular */
- hdr->sys_status = cpu_to_be32(syserr);
- hdr->status = cpu_to_be32(err);
- hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC); /* twiddle magic */
- hdr->data_len = cpu_to_be16(data_len);
-
- msglog(hdr, "about to send data magic %d\n", err);
- /* hdr has been in host byteorder this whole time */
- ret = r2net_send_tcp_msg(sc->sc_sock, vec, 2,
- sizeof(struct r2net_msg) + data_len);
- return ret;
-}
-
-/*
- * called by a message handler to convert an otherwise normal reply
- * message into a "data magic" message
- */
-void r2net_force_data_magic(struct r2net_msg *hdr, u16 msgtype, u32 msgkey)
-{
- hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC);
- hdr->msg_type = cpu_to_be16(msgtype);
- hdr->key = cpu_to_be32(msgkey);
-}
-
-/* this returns -errno if the header was unknown or too large, etc.
- * after this is called the buffer us reused for the next message */
-static int r2net_process_message(struct r2net_sock_container *sc,
- struct r2net_msg *hdr)
-{
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
- int ret = 0, handler_status;
- enum r2net_system_error syserr;
- struct r2net_msg_handler *nmh = NULL;
- void *ret_data = NULL;
- int data_magic = 0;
-
- msglog(hdr, "processing message\n");
-
- r2net_sc_postpone_idle(sc);
-
- switch (be16_to_cpu(hdr->magic)) {
-
- case R2NET_MSG_STATUS_MAGIC:
- /* special type for returning message status */
- r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
- be32_to_cpu(hdr->sys_status),
- be32_to_cpu(hdr->status));
- goto out;
- case R2NET_MSG_KEEP_REQ_MAGIC:
- r2net_sendpage(sc, r2net_keep_resp, sizeof(*r2net_keep_resp));
- goto out;
- case R2NET_MSG_KEEP_RESP_MAGIC:
- goto out;
- case R2NET_MSG_MAGIC:
- break;
- case R2NET_MSG_DATA_MAGIC:
- /*
- * unlike a normal status magic, a data magic DOES
- * (MUST) have a handler, so the control flow is
- * a little funky here as a result
- */
- data_magic = 1;
- break;
- default:
- msglog(hdr, "bad magic\n");
- ret = -EINVAL;
- goto out;
- break;
- }
-
- /* find a handler for it */
- handler_status = 0;
- nmh = r2net_handler_get(be16_to_cpu(hdr->msg_type),
- be32_to_cpu(hdr->key));
- if (!nmh) {
- mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
- be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
- syserr = R2NET_ERR_NO_HNDLR;
- goto out_respond;
- }
-
- syserr = R2NET_ERR_NONE;
-
- if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
- syserr = R2NET_ERR_OVERFLOW;
-
- if (syserr != R2NET_ERR_NONE) {
- pr_err("ramster_r2net, message length problem\n");
- goto out_respond;
- }
-
- r2net_set_func_start_time(sc);
- sc->sc_msg_key = be32_to_cpu(hdr->key);
- sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
- handler_status = (nmh->nh_func)(hdr, sizeof(struct r2net_msg) +
- be16_to_cpu(hdr->data_len),
- nmh->nh_func_data, &ret_data);
- if (data_magic) {
- /*
- * handler handled data sent in reply to request
- * so complete the transaction
- */
- r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
- be32_to_cpu(hdr->sys_status), handler_status);
- goto out;
- }
- /*
- * handler changed magic to DATA_MAGIC to reply to request for data,
- * implies ret_data points to data to return and handler_status
- * is the number of bytes of data
- */
- if (be16_to_cpu(hdr->magic) == R2NET_MSG_DATA_MAGIC) {
- ret = r2net_send_data_magic(sc, hdr,
- ret_data, handler_status,
- syserr, 0);
- hdr = NULL;
- mlog(0, "sending data reply %d, syserr %d returned %d\n",
- handler_status, syserr, ret);
- r2net_set_func_stop_time(sc);
-
- r2net_update_recv_stats(sc);
- goto out;
- }
- r2net_set_func_stop_time(sc);
-
- r2net_update_recv_stats(sc);
-
-out_respond:
- /* this destroys the hdr, so don't use it after this */
- mutex_lock(&sc->sc_send_lock);
- ret = r2net_send_status_magic(sc->sc_sock, hdr, syserr,
- handler_status);
- mutex_unlock(&sc->sc_send_lock);
- hdr = NULL;
- mlog(0, "sending handler status %d, syserr %d returned %d\n",
- handler_status, syserr, ret);
-
- if (nmh) {
- BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
- if (nmh->nh_post_func)
- (nmh->nh_post_func)(handler_status, nmh->nh_func_data,
- ret_data);
- }
-
-out:
- if (nmh)
- r2net_handler_put(nmh);
- return ret;
-}
-
-static int r2net_check_handshake(struct r2net_sock_container *sc)
-{
- struct r2net_handshake *hand = page_address(sc->sc_page);
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
-
- if (hand->protocol_version != cpu_to_be64(R2NET_PROTOCOL_VERSION)) {
- pr_notice("ramster: " SC_NODEF_FMT " Advertised net "
- "protocol version %llu but %llu is required. "
- "Disconnecting.\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port),
- (unsigned long long)be64_to_cpu(hand->protocol_version),
- R2NET_PROTOCOL_VERSION);
-
- /* don't bother reconnecting if its the wrong version. */
- r2net_ensure_shutdown(nn, sc, -ENOTCONN);
- return -1;
- }
-
- /*
- * Ensure timeouts are consistent with other nodes, otherwise
- * we can end up with one node thinking that the other must be down,
- * but isn't. This can ultimately cause corruption.
- */
- if (be32_to_cpu(hand->r2net_idle_timeout_ms) !=
- r2net_idle_timeout()) {
- pr_notice("ramster: " SC_NODEF_FMT " uses a network "
- "idle timeout of %u ms, but we use %u ms locally. "
- "Disconnecting.\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port),
- be32_to_cpu(hand->r2net_idle_timeout_ms),
- r2net_idle_timeout());
- r2net_ensure_shutdown(nn, sc, -ENOTCONN);
- return -1;
- }
-
- if (be32_to_cpu(hand->r2net_keepalive_delay_ms) !=
- r2net_keepalive_delay()) {
- pr_notice("ramster: " SC_NODEF_FMT " uses a keepalive "
- "delay of %u ms, but we use %u ms locally. "
- "Disconnecting.\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port),
- be32_to_cpu(hand->r2net_keepalive_delay_ms),
- r2net_keepalive_delay());
- r2net_ensure_shutdown(nn, sc, -ENOTCONN);
- return -1;
- }
-
- if (be32_to_cpu(hand->r2hb_heartbeat_timeout_ms) !=
- R2HB_MAX_WRITE_TIMEOUT_MS) {
- pr_notice("ramster: " SC_NODEF_FMT " uses a heartbeat "
- "timeout of %u ms, but we use %u ms locally. "
- "Disconnecting.\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port),
- be32_to_cpu(hand->r2hb_heartbeat_timeout_ms),
- R2HB_MAX_WRITE_TIMEOUT_MS);
- r2net_ensure_shutdown(nn, sc, -ENOTCONN);
- return -1;
- }
-
- sc->sc_handshake_ok = 1;
-
- spin_lock(&nn->nn_lock);
- /* set valid and queue the idle timers only if it hasn't been
- * shut down already */
- if (nn->nn_sc == sc) {
- r2net_sc_reset_idle_timer(sc);
- atomic_set(&nn->nn_timeout, 0);
- r2net_set_nn_state(nn, sc, 1, 0);
- }
- spin_unlock(&nn->nn_lock);
-
- /* shift everything up as though it wasn't there */
- sc->sc_page_off -= sizeof(struct r2net_handshake);
- if (sc->sc_page_off)
- memmove(hand, hand + 1, sc->sc_page_off);
-
- return 0;
-}
-
-/* this demuxes the queued rx bytes into header or payload bits and calls
- * handlers as each full message is read off the socket. it returns -error,
- * == 0 eof, or > 0 for progress made.*/
-static int r2net_advance_rx(struct r2net_sock_container *sc)
-{
- struct r2net_msg *hdr;
- int ret = 0;
- void *data;
- size_t datalen;
-
- sclog(sc, "receiving\n");
- r2net_set_advance_start_time(sc);
-
- if (unlikely(sc->sc_handshake_ok == 0)) {
- if (sc->sc_page_off < sizeof(struct r2net_handshake)) {
- data = page_address(sc->sc_page) + sc->sc_page_off;
- datalen = sizeof(struct r2net_handshake) -
- sc->sc_page_off;
- ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
- if (ret > 0)
- sc->sc_page_off += ret;
- }
-
- if (sc->sc_page_off == sizeof(struct r2net_handshake)) {
- r2net_check_handshake(sc);
- if (unlikely(sc->sc_handshake_ok == 0))
- ret = -EPROTO;
- }
- goto out;
- }
-
- /* do we need more header? */
- if (sc->sc_page_off < sizeof(struct r2net_msg)) {
- data = page_address(sc->sc_page) + sc->sc_page_off;
- datalen = sizeof(struct r2net_msg) - sc->sc_page_off;
- ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
- if (ret > 0) {
- sc->sc_page_off += ret;
- /* only swab incoming here.. we can
- * only get here once as we cross from
- * being under to over */
- if (sc->sc_page_off == sizeof(struct r2net_msg)) {
- hdr = page_address(sc->sc_page);
- if (be16_to_cpu(hdr->data_len) >
- R2NET_MAX_PAYLOAD_BYTES)
- ret = -EOVERFLOW;
- WARN_ON_ONCE(ret == -EOVERFLOW);
- }
- }
- if (ret <= 0)
- goto out;
- }
-
- if (sc->sc_page_off < sizeof(struct r2net_msg)) {
- /* oof, still don't have a header */
- goto out;
- }
-
- /* this was swabbed above when we first read it */
- hdr = page_address(sc->sc_page);
-
- msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
-
- /* do we need more payload? */
- if (sc->sc_page_off - sizeof(struct r2net_msg) <
- be16_to_cpu(hdr->data_len)) {
- /* need more payload */
- data = page_address(sc->sc_page) + sc->sc_page_off;
- datalen = (sizeof(struct r2net_msg) +
- be16_to_cpu(hdr->data_len)) -
- sc->sc_page_off;
- ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
- if (ret > 0)
- sc->sc_page_off += ret;
- if (ret <= 0)
- goto out;
- }
-
- if (sc->sc_page_off - sizeof(struct r2net_msg) ==
- be16_to_cpu(hdr->data_len)) {
- /* we can only get here once, the first time we read
- * the payload.. so set ret to progress if the handler
- * works out. after calling this the message is toast */
- ret = r2net_process_message(sc, hdr);
- if (ret == 0)
- ret = 1;
- sc->sc_page_off = 0;
- }
-
-out:
- sclog(sc, "ret = %d\n", ret);
- r2net_set_advance_stop_time(sc);
- return ret;
-}
-
-/* this work func is triggerd by data ready. it reads until it can read no
- * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
- * our work the work struct will be marked and we'll be called again. */
-static void r2net_rx_until_empty(struct work_struct *work)
-{
- struct r2net_sock_container *sc =
- container_of(work, struct r2net_sock_container, sc_rx_work);
- int ret;
-
- do {
- ret = r2net_advance_rx(sc);
- } while (ret > 0);
-
- if (ret <= 0 && ret != -EAGAIN) {
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
- sclog(sc, "saw error %d, closing\n", ret);
- /* not permanent so read failed handshake can retry */
- r2net_ensure_shutdown(nn, sc, 0);
- }
- sc_put(sc);
-}
-
-static int r2net_set_nodelay(struct socket *sock)
-{
- int ret, val = 1;
- mm_segment_t oldfs;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
-
- /*
- * Dear unsuspecting programmer,
- *
- * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level
- * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
- * silently turn into SO_DEBUG.
- *
- * Yours,
- * Keeper of hilariously fragile interfaces.
- */
- ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char __user *)&val, sizeof(val));
-
- set_fs(oldfs);
- return ret;
-}
-
-static void r2net_initialize_handshake(void)
-{
- r2net_hand->r2hb_heartbeat_timeout_ms = cpu_to_be32(
- R2HB_MAX_WRITE_TIMEOUT_MS);
- r2net_hand->r2net_idle_timeout_ms = cpu_to_be32(r2net_idle_timeout());
- r2net_hand->r2net_keepalive_delay_ms = cpu_to_be32(
- r2net_keepalive_delay());
- r2net_hand->r2net_reconnect_delay_ms = cpu_to_be32(
- r2net_reconnect_delay());
-}
-
-/* ------------------------------------------------------------ */
-
-/* called when a connect completes and after a sock is accepted. the
- * rx path will see the response and mark the sc valid */
-static void r2net_sc_connect_completed(struct work_struct *work)
-{
- struct r2net_sock_container *sc =
- container_of(work, struct r2net_sock_container,
- sc_connect_work);
-
- mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
- (unsigned long long)R2NET_PROTOCOL_VERSION,
- (unsigned long long)be64_to_cpu(r2net_hand->connector_id));
-
- r2net_initialize_handshake();
- r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
- sc_put(sc);
-}
-
-/* this is called as a work_struct func. */
-static void r2net_sc_send_keep_req(struct work_struct *work)
-{
- struct r2net_sock_container *sc =
- container_of(work, struct r2net_sock_container,
- sc_keepalive_work.work);
-
- r2net_sendpage(sc, r2net_keep_req, sizeof(*r2net_keep_req));
- sc_put(sc);
-}
-
-/* socket shutdown does a del_timer_sync against this as it tears down.
- * we can't start this timer until we've got to the point in sc buildup
- * where shutdown is going to be involved */
-static void r2net_idle_timer(unsigned long data)
-{
- struct r2net_sock_container *sc = (struct r2net_sock_container *)data;
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
-#ifdef CONFIG_DEBUG_FS
- unsigned long msecs = ktime_to_ms(ktime_get()) -
- ktime_to_ms(sc->sc_tv_timer);
-#else
- unsigned long msecs = r2net_idle_timeout();
-#endif
-
- pr_notice("ramster: Connection to " SC_NODEF_FMT " has been "
- "idle for %lu.%lu secs, shutting it down.\n",
- sc->sc_node->nd_name, sc->sc_node->nd_num,
- &sc->sc_node->nd_ipv4_address, ntohs(sc->sc_node->nd_ipv4_port),
- msecs / 1000, msecs % 1000);
-
- /*
- * Initialize the nn_timeout so that the next connection attempt
- * will continue in r2net_start_connect.
- */
- atomic_set(&nn->nn_timeout, 1);
- r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
-}
-
-static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc)
-{
- r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
- r2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
- msecs_to_jiffies(r2net_keepalive_delay()));
- r2net_set_sock_timer(sc);
- mod_timer(&sc->sc_idle_timeout,
- jiffies + msecs_to_jiffies(r2net_idle_timeout()));
-}
-
-static void r2net_sc_postpone_idle(struct r2net_sock_container *sc)
-{
- /* Only push out an existing timer */
- if (timer_pending(&sc->sc_idle_timeout))
- r2net_sc_reset_idle_timer(sc);
-}
-
-/* this work func is kicked whenever a path sets the nn state which doesn't
- * have valid set. This includes seeing hb come up, losing a connection,
- * having a connect attempt fail, etc. This centralizes the logic which decides
- * if a connect attempt should be made or if we should give up and all future
- * transmit attempts should fail */
-static void r2net_start_connect(struct work_struct *work)
-{
- struct r2net_node *nn =
- container_of(work, struct r2net_node, nn_connect_work.work);
- struct r2net_sock_container *sc = NULL;
- struct r2nm_node *node = NULL, *mynode = NULL;
- struct socket *sock = NULL;
- struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
- int ret = 0, stop;
- unsigned int timeout;
-
- /* if we're greater we initiate tx, otherwise we accept */
- if (r2nm_this_node() <= r2net_num_from_nn(nn))
- goto out;
-
- /* watch for racing with tearing a node down */
- node = r2nm_get_node_by_num(r2net_num_from_nn(nn));
- if (node == NULL) {
- ret = 0;
- goto out;
- }
-
- mynode = r2nm_get_node_by_num(r2nm_this_node());
- if (mynode == NULL) {
- ret = 0;
- goto out;
- }
-
- spin_lock(&nn->nn_lock);
- /*
- * see if we already have one pending or have given up.
- * For nn_timeout, it is set when we close the connection
- * because of the idle time out. So it means that we have
- * at least connected to that node successfully once,
- * now try to connect to it again.
- */
- timeout = atomic_read(&nn->nn_timeout);
- stop = (nn->nn_sc ||
- (nn->nn_persistent_error &&
- (nn->nn_persistent_error != -ENOTCONN || timeout == 0)));
- spin_unlock(&nn->nn_lock);
- if (stop)
- goto out;
-
- nn->nn_last_connect_attempt = jiffies;
-
- sc = sc_alloc(node);
- if (sc == NULL) {
- mlog(0, "couldn't allocate sc\n");
- ret = -ENOMEM;
- goto out;
- }
-
- ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (ret < 0) {
- mlog(0, "can't create socket: %d\n", ret);
- goto out;
- }
- sc->sc_sock = sock; /* freed by sc_kref_release */
-
- sock->sk->sk_allocation = GFP_ATOMIC;
-
- myaddr.sin_family = AF_INET;
- myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
- myaddr.sin_port = htons(0); /* any port */
-
- ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
- sizeof(myaddr));
- if (ret) {
- mlog(ML_ERROR, "bind failed with %d at address %pI4\n",
- ret, &mynode->nd_ipv4_address);
- goto out;
- }
-
- ret = r2net_set_nodelay(sc->sc_sock);
- if (ret) {
- mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
- goto out;
- }
-
- r2net_register_callbacks(sc->sc_sock->sk, sc);
-
- spin_lock(&nn->nn_lock);
- /* handshake completion will set nn->nn_sc_valid */
- r2net_set_nn_state(nn, sc, 0, 0);
- spin_unlock(&nn->nn_lock);
-
- remoteaddr.sin_family = AF_INET;
- remoteaddr.sin_addr.s_addr = node->nd_ipv4_address;
- remoteaddr.sin_port = node->nd_ipv4_port;
-
- ret = sc->sc_sock->ops->connect(sc->sc_sock,
- (struct sockaddr *)&remoteaddr,
- sizeof(remoteaddr),
- O_NONBLOCK);
- if (ret == -EINPROGRESS)
- ret = 0;
-
-out:
- if (ret) {
- pr_notice("ramster: Connect attempt to " SC_NODEF_FMT
- " failed with errno %d\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port), ret);
- /* 0 err so that another will be queued and attempted
- * from set_nn_state */
- if (sc)
- r2net_ensure_shutdown(nn, sc, 0);
- }
- if (sc)
- sc_put(sc);
- if (node)
- r2nm_node_put(node);
- if (mynode)
- r2nm_node_put(mynode);
-
- return;
-}
-
-static void r2net_connect_expired(struct work_struct *work)
-{
- struct r2net_node *nn =
- container_of(work, struct r2net_node, nn_connect_expired.work);
-
- spin_lock(&nn->nn_lock);
- if (!nn->nn_sc_valid) {
- pr_notice("ramster: No connection established with "
- "node %u after %u.%u seconds, giving up.\n",
- r2net_num_from_nn(nn),
- r2net_idle_timeout() / 1000,
- r2net_idle_timeout() % 1000);
-
- r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
- }
- spin_unlock(&nn->nn_lock);
-}
-
-static void r2net_still_up(struct work_struct *work)
-{
-}
-
-/* ------------------------------------------------------------ */
-
-void r2net_disconnect_node(struct r2nm_node *node)
-{
- struct r2net_node *nn = r2net_nn_from_num(node->nd_num);
-
- /* don't reconnect until it's heartbeating again */
- spin_lock(&nn->nn_lock);
- atomic_set(&nn->nn_timeout, 0);
- r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
- spin_unlock(&nn->nn_lock);
-
- if (r2net_wq) {
- cancel_delayed_work(&nn->nn_connect_expired);
- cancel_delayed_work(&nn->nn_connect_work);
- cancel_delayed_work(&nn->nn_still_up);
- flush_workqueue(r2net_wq);
- }
-}
-
-static void r2net_hb_node_down_cb(struct r2nm_node *node, int node_num,
- void *data)
-{
- if (!node)
- return;
-
- if (node_num != r2nm_this_node())
- r2net_disconnect_node(node);
-
- BUG_ON(atomic_read(&r2net_connected_peers) < 0);
-}
-
-static void r2net_hb_node_up_cb(struct r2nm_node *node, int node_num,
- void *data)
-{
- struct r2net_node *nn = r2net_nn_from_num(node_num);
-
- BUG_ON(!node);
-
- /* ensure an immediate connect attempt */
- nn->nn_last_connect_attempt = jiffies -
- (msecs_to_jiffies(r2net_reconnect_delay()) + 1);
-
- if (node_num != r2nm_this_node()) {
- /* believe it or not, accept and node hearbeating testing
- * can succeed for this node before we got here.. so
- * only use set_nn_state to clear the persistent error
- * if that hasn't already happened */
- spin_lock(&nn->nn_lock);
- atomic_set(&nn->nn_timeout, 0);
- if (nn->nn_persistent_error)
- r2net_set_nn_state(nn, NULL, 0, 0);
- spin_unlock(&nn->nn_lock);
- }
-}
-
-void r2net_unregister_hb_callbacks(void)
-{
- r2hb_unregister_callback(NULL, &r2net_hb_up);
- r2hb_unregister_callback(NULL, &r2net_hb_down);
-}
-
-int r2net_register_hb_callbacks(void)
-{
- int ret;
-
- r2hb_setup_callback(&r2net_hb_down, R2HB_NODE_DOWN_CB,
- r2net_hb_node_down_cb, NULL, R2NET_HB_PRI);
- r2hb_setup_callback(&r2net_hb_up, R2HB_NODE_UP_CB,
- r2net_hb_node_up_cb, NULL, R2NET_HB_PRI);
-
- ret = r2hb_register_callback(NULL, &r2net_hb_up);
- if (ret == 0)
- ret = r2hb_register_callback(NULL, &r2net_hb_down);
-
- if (ret)
- r2net_unregister_hb_callbacks();
-
- return ret;
-}
-
-/* ------------------------------------------------------------ */
-
-static int r2net_accept_one(struct socket *sock)
-{
- int ret, slen;
- struct sockaddr_in sin;
- struct socket *new_sock = NULL;
- struct r2nm_node *node = NULL;
- struct r2nm_node *local_node = NULL;
- struct r2net_sock_container *sc = NULL;
- struct r2net_node *nn;
-
- BUG_ON(sock == NULL);
- ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
- sock->sk->sk_protocol, &new_sock);
- if (ret)
- goto out;
-
- new_sock->type = sock->type;
- new_sock->ops = sock->ops;
- ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
- if (ret < 0)
- goto out;
-
- new_sock->sk->sk_allocation = GFP_ATOMIC;
-
- ret = r2net_set_nodelay(new_sock);
- if (ret) {
- mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
- goto out;
- }
-
- slen = sizeof(sin);
- ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin,
- &slen, 1);
- if (ret < 0)
- goto out;
-
- node = r2nm_get_node_by_ip(sin.sin_addr.s_addr);
- if (node == NULL) {
- pr_notice("ramster: Attempt to connect from unknown "
- "node at %pI4:%d\n", &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
- ret = -EINVAL;
- goto out;
- }
-
- if (r2nm_this_node() >= node->nd_num) {
- local_node = r2nm_get_node_by_num(r2nm_this_node());
- pr_notice("ramster: Unexpected connect attempt seen "
- "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
- "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
- &(local_node->nd_ipv4_address),
- ntohs(local_node->nd_ipv4_port), node->nd_name,
- node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
- ret = -EINVAL;
- goto out;
- }
-
- /* this happens all the time when the other node sees our heartbeat
- * and tries to connect before we see their heartbeat */
- if (!r2hb_check_node_heartbeating_from_callback(node->nd_num)) {
- mlog(ML_CONN, "attempt to connect from node '%s' at "
- "%pI4:%d but it isn't heartbeating\n",
- node->nd_name, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
- ret = -EINVAL;
- goto out;
- }
-
- nn = r2net_nn_from_num(node->nd_num);
-
- spin_lock(&nn->nn_lock);
- if (nn->nn_sc)
- ret = -EBUSY;
- else
- ret = 0;
- spin_unlock(&nn->nn_lock);
- if (ret) {
- pr_notice("ramster: Attempt to connect from node '%s' "
- "at %pI4:%d but it already has an open connection\n",
- node->nd_name, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
- goto out;
- }
-
- sc = sc_alloc(node);
- if (sc == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- sc->sc_sock = new_sock;
- new_sock = NULL;
-
- spin_lock(&nn->nn_lock);
- atomic_set(&nn->nn_timeout, 0);
- r2net_set_nn_state(nn, sc, 0, 0);
- spin_unlock(&nn->nn_lock);
-
- r2net_register_callbacks(sc->sc_sock->sk, sc);
- r2net_sc_queue_work(sc, &sc->sc_rx_work);
-
- r2net_initialize_handshake();
- r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
-
-out:
- if (new_sock)
- sock_release(new_sock);
- if (node)
- r2nm_node_put(node);
- if (local_node)
- r2nm_node_put(local_node);
- if (sc)
- sc_put(sc);
- return ret;
-}
-
-static void r2net_accept_many(struct work_struct *work)
-{
- struct socket *sock = r2net_listen_sock;
- while (r2net_accept_one(sock) == 0)
- cond_resched();
-}
-
-static void r2net_listen_data_ready(struct sock *sk, int bytes)
-{
- void (*ready)(struct sock *sk, int bytes);
-
- read_lock(&sk->sk_callback_lock);
- ready = sk->sk_user_data;
- if (ready == NULL) { /* check for teardown race */
- ready = sk->sk_data_ready;
- goto out;
- }
-
- /* ->sk_data_ready is also called for a newly established child socket
- * before it has been accepted and the acceptor has set up their
- * data_ready.. we only want to queue listen work for our listening
- * socket */
- if (sk->sk_state == TCP_LISTEN) {
- mlog(ML_TCP, "bytes: %d\n", bytes);
- queue_work(r2net_wq, &r2net_listen_work);
- }
-
-out:
- read_unlock(&sk->sk_callback_lock);
- ready(sk, bytes);
-}
-
-static int r2net_open_listening_sock(__be32 addr, __be16 port)
-{
- struct socket *sock = NULL;
- int ret;
- struct sockaddr_in sin = {
- .sin_family = PF_INET,
- .sin_addr = { .s_addr = addr },
- .sin_port = port,
- };
-
- ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (ret < 0) {
- pr_err("ramster: Error %d while creating socket\n", ret);
- goto out;
- }
-
- sock->sk->sk_allocation = GFP_ATOMIC;
-
- write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = sock->sk->sk_data_ready;
- sock->sk->sk_data_ready = r2net_listen_data_ready;
- write_unlock_bh(&sock->sk->sk_callback_lock);
-
- r2net_listen_sock = sock;
- INIT_WORK(&r2net_listen_work, r2net_accept_many);
-
- sock->sk->sk_reuse = /* SK_CAN_REUSE FIXME FOR 3.4 */ 1;
- ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
- if (ret < 0) {
- pr_err("ramster: Error %d while binding socket at %pI4:%u\n",
- ret, &addr, ntohs(port));
- goto out;
- }
-
- ret = sock->ops->listen(sock, 64);
- if (ret < 0)
- pr_err("ramster: Error %d while listening on %pI4:%u\n",
- ret, &addr, ntohs(port));
-
-out:
- if (ret) {
- r2net_listen_sock = NULL;
- if (sock)
- sock_release(sock);
- }
- return ret;
-}
-
-/*
- * called from node manager when we should bring up our network listening
- * socket. node manager handles all the serialization to only call this
- * once and to match it with r2net_stop_listening(). note,
- * r2nm_this_node() doesn't work yet as we're being called while it
- * is being set up.
- */
-int r2net_start_listening(struct r2nm_node *node)
-{
- int ret = 0;
-
- BUG_ON(r2net_wq != NULL);
- BUG_ON(r2net_listen_sock != NULL);
-
- mlog(ML_KTHREAD, "starting r2net thread...\n");
- r2net_wq = create_singlethread_workqueue("r2net");
- if (r2net_wq == NULL) {
- mlog(ML_ERROR, "unable to launch r2net thread\n");
- return -ENOMEM; /* ? */
- }
-
- ret = r2net_open_listening_sock(node->nd_ipv4_address,
- node->nd_ipv4_port);
- if (ret) {
- destroy_workqueue(r2net_wq);
- r2net_wq = NULL;
- }
-
- return ret;
-}
-
-/* again, r2nm_this_node() doesn't work here as we're involved in
- * tearing it down */
-void r2net_stop_listening(struct r2nm_node *node)
-{
- struct socket *sock = r2net_listen_sock;
- size_t i;
-
- BUG_ON(r2net_wq == NULL);
- BUG_ON(r2net_listen_sock == NULL);
-
- /* stop the listening socket from generating work */
- write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_data_ready = sock->sk->sk_user_data;
- sock->sk->sk_user_data = NULL;
- write_unlock_bh(&sock->sk->sk_callback_lock);
-
- for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
- struct r2nm_node *node = r2nm_get_node_by_num(i);
- if (node) {
- r2net_disconnect_node(node);
- r2nm_node_put(node);
- }
- }
-
- /* finish all work and tear down the work queue */
- mlog(ML_KTHREAD, "waiting for r2net thread to exit....\n");
- destroy_workqueue(r2net_wq);
- r2net_wq = NULL;
-
- sock_release(r2net_listen_sock);
- r2net_listen_sock = NULL;
-}
-
-void r2net_hb_node_up_manual(int node_num)
-{
- struct r2nm_node dummy;
- if (r2nm_single_cluster == NULL)
- pr_err("ramster: cluster not alive, node_up_manual ignored\n");
- else {
- r2hb_manual_set_node_heartbeating(node_num);
- r2net_hb_node_up_cb(&dummy, node_num, NULL);
- }
-}
-
-/* ------------------------------------------------------------ */
-
-int r2net_init(void)
-{
- unsigned long i;
-
- if (r2net_debugfs_init())
- return -ENOMEM;
-
- r2net_hand = kzalloc(sizeof(struct r2net_handshake), GFP_KERNEL);
- r2net_keep_req = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
- r2net_keep_resp = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
- if (!r2net_hand || !r2net_keep_req || !r2net_keep_resp) {
- kfree(r2net_hand);
- kfree(r2net_keep_req);
- kfree(r2net_keep_resp);
- return -ENOMEM;
- }
-
- r2net_hand->protocol_version = cpu_to_be64(R2NET_PROTOCOL_VERSION);
- r2net_hand->connector_id = cpu_to_be64(1);
-
- r2net_keep_req->magic = cpu_to_be16(R2NET_MSG_KEEP_REQ_MAGIC);
- r2net_keep_resp->magic = cpu_to_be16(R2NET_MSG_KEEP_RESP_MAGIC);
-
- for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
- struct r2net_node *nn = r2net_nn_from_num(i);
-
- atomic_set(&nn->nn_timeout, 0);
- spin_lock_init(&nn->nn_lock);
- INIT_DELAYED_WORK(&nn->nn_connect_work, r2net_start_connect);
- INIT_DELAYED_WORK(&nn->nn_connect_expired,
- r2net_connect_expired);
- INIT_DELAYED_WORK(&nn->nn_still_up, r2net_still_up);
- /* until we see hb from a node we'll return einval */
- nn->nn_persistent_error = -ENOTCONN;
- init_waitqueue_head(&nn->nn_sc_wq);
- idr_init(&nn->nn_status_idr);
- INIT_LIST_HEAD(&nn->nn_status_list);
- }
-
- return 0;
-}
-
-void r2net_exit(void)
-{
- kfree(r2net_hand);
- kfree(r2net_keep_req);
- kfree(r2net_keep_resp);
- r2net_debugfs_exit();
-}
diff --git a/drivers/staging/zcache/ramster/tcp.h b/drivers/staging/zcache/ramster/tcp.h
deleted file mode 100644
index 9d05833..0000000
--- a/drivers/staging/zcache/ramster/tcp.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * tcp.h
- *
- * Function prototypes
- *
- * Copyright (C) 2004 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- */
-
-#ifndef R2CLUSTER_TCP_H
-#define R2CLUSTER_TCP_H
-
-#include <linux/socket.h>
-#ifdef __KERNEL__
-#include <net/sock.h>
-#include <linux/tcp.h>
-#else
-#include <sys/socket.h>
-#endif
-#include <linux/inet.h>
-#include <linux/in.h>
-
-struct r2net_msg {
- __be16 magic;
- __be16 data_len;
- __be16 msg_type;
- __be16 pad1;
- __be32 sys_status;
- __be32 status;
- __be32 key;
- __be32 msg_num;
- __u8 buf[0];
-};
-
-typedef int (r2net_msg_handler_func)(struct r2net_msg *msg, u32 len, void *data,
- void **ret_data);
-typedef void (r2net_post_msg_handler_func)(int status, void *data,
- void *ret_data);
-
-#define R2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct r2net_msg))
-
-/* same as hb delay, we're waiting for another node to recognize our hb */
-#define R2NET_RECONNECT_DELAY_MS_DEFAULT 2000
-
-#define R2NET_KEEPALIVE_DELAY_MS_DEFAULT 2000
-#define R2NET_IDLE_TIMEOUT_MS_DEFAULT 30000
-
-
-/* TODO: figure this out.... */
-static inline int r2net_link_down(int err, struct socket *sock)
-{
- if (sock) {
- if (sock->sk->sk_state != TCP_ESTABLISHED &&
- sock->sk->sk_state != TCP_CLOSE_WAIT)
- return 1;
- }
-
- if (err >= 0)
- return 0;
- switch (err) {
-
- /* ????????????????????????? */
- case -ERESTARTSYS:
- case -EBADF:
- /* When the server has died, an ICMP port unreachable
- * message prompts ECONNREFUSED. */
- case -ECONNREFUSED:
- case -ENOTCONN:
- case -ECONNRESET:
- case -EPIPE:
- return 1;
-
- }
- return 0;
-}
-
-enum {
- R2NET_DRIVER_UNINITED,
- R2NET_DRIVER_READY,
-};
-
-int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
- u8 target_node, int *status);
-int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
- size_t veclen, u8 target_node, int *status);
-
-int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
- r2net_msg_handler_func *func, void *data,
- r2net_post_msg_handler_func *post_func,
- struct list_head *unreg_list);
-void r2net_unregister_handler_list(struct list_head *list);
-
-void r2net_fill_node_map(unsigned long *map, unsigned bytes);
-
-void r2net_force_data_magic(struct r2net_msg *, u16, u32);
-void r2net_hb_node_up_manual(int);
-struct r2net_node *r2net_nn_from_num(u8);
-
-struct r2nm_node;
-int r2net_register_hb_callbacks(void);
-void r2net_unregister_hb_callbacks(void);
-int r2net_start_listening(struct r2nm_node *node);
-void r2net_stop_listening(struct r2nm_node *node);
-void r2net_disconnect_node(struct r2nm_node *node);
-int r2net_num_connected_peers(void);
-
-int r2net_init(void);
-void r2net_exit(void);
-
-struct r2net_send_tracking;
-struct r2net_sock_container;
-
-#if 0
-int r2net_debugfs_init(void);
-void r2net_debugfs_exit(void);
-void r2net_debug_add_nst(struct r2net_send_tracking *nst);
-void r2net_debug_del_nst(struct r2net_send_tracking *nst);
-void r2net_debug_add_sc(struct r2net_sock_container *sc);
-void r2net_debug_del_sc(struct r2net_sock_container *sc);
-#else
-static inline int r2net_debugfs_init(void)
-{
- return 0;
-}
-static inline void r2net_debugfs_exit(void)
-{
-}
-static inline void r2net_debug_add_nst(struct r2net_send_tracking *nst)
-{
-}
-static inline void r2net_debug_del_nst(struct r2net_send_tracking *nst)
-{
-}
-static inline void r2net_debug_add_sc(struct r2net_sock_container *sc)
-{
-}
-static inline void r2net_debug_del_sc(struct r2net_sock_container *sc)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
-
-#endif /* R2CLUSTER_TCP_H */
diff --git a/drivers/staging/zcache/ramster/tcp_internal.h b/drivers/staging/zcache/ramster/tcp_internal.h
deleted file mode 100644
index 4d8cc9f..0000000
--- a/drivers/staging/zcache/ramster/tcp_internal.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2005 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#ifndef R2CLUSTER_TCP_INTERNAL_H
-#define R2CLUSTER_TCP_INTERNAL_H
-
-#define R2NET_MSG_MAGIC ((u16)0xfa55)
-#define R2NET_MSG_STATUS_MAGIC ((u16)0xfa56)
-#define R2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57)
-#define R2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
-/*
- * "data magic" is a long version of "status magic" where the message
- * payload actually contains data to be passed in reply to certain messages
- */
-#define R2NET_MSG_DATA_MAGIC ((u16)0xfa59)
-
-/* we're delaying our quorum decision so that heartbeat will have timed
- * out truly dead nodes by the time we come around to making decisions
- * on their number */
-#define R2NET_QUORUM_DELAY_MS \
- ((r2hb_dead_threshold + 2) * R2HB_REGION_TIMEOUT_MS)
-
-/*
- * This version number represents quite a lot, unfortunately. It not
- * only represents the raw network message protocol on the wire but also
- * locking semantics of the file system using the protocol. It should
- * be somewhere else, I'm sure, but right now it isn't.
- *
- * With version 11, we separate out the filesystem locking portion. The
- * filesystem now has a major.minor version it negotiates. Version 11
- * introduces this negotiation to the r2dlm protocol, and as such the
- * version here in tcp_internal.h should not need to be bumped for
- * filesystem locking changes.
- *
- * New in version 11
- * - Negotiation of filesystem locking in the dlm join.
- *
- * New in version 10:
- * - Meta/data locks combined
- *
- * New in version 9:
- * - All votes removed
- *
- * New in version 8:
- * - Replace delete inode votes with a cluster lock
- *
- * New in version 7:
- * - DLM join domain includes the live nodemap
- *
- * New in version 6:
- * - DLM lockres remote refcount fixes.
- *
- * New in version 5:
- * - Network timeout checking protocol
- *
- * New in version 4:
- * - Remove i_generation from lock names for better stat performance.
- *
- * New in version 3:
- * - Replace dentry votes with a cluster lock
- *
- * New in version 2:
- * - full 64 bit i_size in the metadata lock lvbs
- * - introduction of "rw" lock and pushing meta/data locking down
- */
-#define R2NET_PROTOCOL_VERSION 11ULL
-struct r2net_handshake {
- __be64 protocol_version;
- __be64 connector_id;
- __be32 r2hb_heartbeat_timeout_ms;
- __be32 r2net_idle_timeout_ms;
- __be32 r2net_keepalive_delay_ms;
- __be32 r2net_reconnect_delay_ms;
-};
-
-struct r2net_node {
- /* this is never called from int/bh */
- spinlock_t nn_lock;
-
- /* set the moment an sc is allocated and a connect is started */
- struct r2net_sock_container *nn_sc;
- /* _valid is only set after the handshake passes and tx can happen */
- unsigned nn_sc_valid:1;
- /* if this is set tx just returns it */
- int nn_persistent_error;
- /* It is only set to 1 after the idle time out. */
- atomic_t nn_timeout;
-
- /* threads waiting for an sc to arrive wait on the wq for generation
- * to increase. it is increased when a connecting socket succeeds
- * or fails or when an accepted socket is attached. */
- wait_queue_head_t nn_sc_wq;
-
- struct idr nn_status_idr;
- struct list_head nn_status_list;
-
- /* connects are attempted from when heartbeat comes up until either hb
- * goes down, the node is unconfigured, no connect attempts succeed
- * before R2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
- * is queued from set_nn_state both from hb up and from itself if a
- * connect attempt fails and so can be self-arming. shutdown is
- * careful to first mark the nn such that no connects will be attempted
- * before canceling delayed connect work and flushing the queue. */
- struct delayed_work nn_connect_work;
- unsigned long nn_last_connect_attempt;
-
- /* this is queued as nodes come up and is canceled when a connection is
- * established. this expiring gives up on the node and errors out
- * transmits */
- struct delayed_work nn_connect_expired;
-
- /* after we give up on a socket we wait a while before deciding
- * that it is still heartbeating and that we should do some
- * quorum work */
- struct delayed_work nn_still_up;
-};
-
-struct r2net_sock_container {
- struct kref sc_kref;
- /* the next two are valid for the life time of the sc */
- struct socket *sc_sock;
- struct r2nm_node *sc_node;
-
- /* all of these sc work structs hold refs on the sc while they are
- * queued. they should not be able to ref a freed sc. the teardown
- * race is with r2net_wq destruction in r2net_stop_listening() */
-
- /* rx and connect work are generated from socket callbacks. sc
- * shutdown removes the callbacks and then flushes the work queue */
- struct work_struct sc_rx_work;
- struct work_struct sc_connect_work;
- /* shutdown work is triggered in two ways. the simple way is
- * for a code path calls ensure_shutdown which gets a lock, removes
- * the sc from the nn, and queues the work. in this case the
- * work is single-shot. the work is also queued from a sock
- * callback, though, and in this case the work will find the sc
- * still on the nn and will call ensure_shutdown itself.. this
- * ends up triggering the shutdown work again, though nothing
- * will be done in that second iteration. so work queue teardown
- * has to be careful to remove the sc from the nn before waiting
- * on the work queue so that the shutdown work doesn't remove the
- * sc and rearm itself.
- */
- struct work_struct sc_shutdown_work;
-
- struct timer_list sc_idle_timeout;
- struct delayed_work sc_keepalive_work;
-
- unsigned sc_handshake_ok:1;
-
- struct page *sc_page;
- size_t sc_page_off;
-
- /* original handlers for the sockets */
- void (*sc_state_change)(struct sock *sk);
- void (*sc_data_ready)(struct sock *sk, int bytes);
-
- u32 sc_msg_key;
- u16 sc_msg_type;
-
-#ifdef CONFIG_DEBUG_FS
- struct list_head sc_net_debug_item;
- ktime_t sc_tv_timer;
- ktime_t sc_tv_data_ready;
- ktime_t sc_tv_advance_start;
- ktime_t sc_tv_advance_stop;
- ktime_t sc_tv_func_start;
- ktime_t sc_tv_func_stop;
-#endif
-#ifdef CONFIG_RAMSTER_FS_STATS
- ktime_t sc_tv_acquiry_total;
- ktime_t sc_tv_send_total;
- ktime_t sc_tv_status_total;
- u32 sc_send_count;
- u32 sc_recv_count;
- ktime_t sc_tv_process_total;
-#endif
- struct mutex sc_send_lock;
-};
-
-struct r2net_msg_handler {
- struct rb_node nh_node;
- u32 nh_max_len;
- u32 nh_msg_type;
- u32 nh_key;
- r2net_msg_handler_func *nh_func;
- r2net_msg_handler_func *nh_func_data;
- r2net_post_msg_handler_func
- *nh_post_func;
- struct kref nh_kref;
- struct list_head nh_unregister_item;
-};
-
-enum r2net_system_error {
- R2NET_ERR_NONE = 0,
- R2NET_ERR_NO_HNDLR,
- R2NET_ERR_OVERFLOW,
- R2NET_ERR_DIED,
- R2NET_ERR_MAX
-};
-
-struct r2net_status_wait {
- enum r2net_system_error ns_sys_status;
- s32 ns_status;
- int ns_id;
- wait_queue_head_t ns_wq;
- struct list_head ns_node_item;
-};
-
-#ifdef CONFIG_DEBUG_FS
-/* just for state dumps */
-struct r2net_send_tracking {
- struct list_head st_net_debug_item;
- struct task_struct *st_task;
- struct r2net_sock_container *st_sc;
- u32 st_id;
- u32 st_msg_type;
- u32 st_msg_key;
- u8 st_node;
- ktime_t st_sock_time;
- ktime_t st_send_time;
- ktime_t st_status_time;
-};
-#else
-struct r2net_send_tracking {
- u32 dummy;
-};
-#endif /* CONFIG_DEBUG_FS */
-
-#endif /* R2CLUSTER_TCP_INTERNAL_H */
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
deleted file mode 100644
index d7e51e4..0000000
--- a/drivers/staging/zcache/tmem.c
+++ /dev/null
@@ -1,898 +0,0 @@
-/*
- * In-kernel transcendent memory (generic implementation)
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- *
- * The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
- * "handles" (triples containing a pool id, and object id, and an index), to
- * pages in a page-accessible memory (PAM). Tmem references the PAM pages via
- * an abstract "pampd" (PAM page-descriptor), which can be operated on by a
- * set of functions (pamops). Each pampd contains some representation of
- * PAGE_SIZE bytes worth of data. For those familiar with key-value stores,
- * the tmem handle is a three-level hierarchical key, and the value is always
- * reconstituted (but not necessarily stored) as PAGE_SIZE bytes and is
- * referenced in the datastore by the pampd. The hierarchy is required
- * to ensure that certain invalidation functions can be performed efficiently
- * (i.e. flush all indexes associated with this object_id, or
- * flush all objects associated with this pool).
- *
- * Tmem must support potentially millions of pages and must be able to insert,
- * find, and delete these pages at a potential frequency of thousands per
- * second concurrently across many CPUs, (and, if used with KVM, across many
- * vcpus across many guests). Tmem is tracked with a hierarchy of data
- * structures, organized by the elements in the handle-tuple: pool_id,
- * object_id, and page index. One or more "clients" (e.g. guests) each
- * provide one or more tmem_pools. Each pool, contains a hash table of
- * rb_trees of tmem_objs. Each tmem_obj contains a radix-tree-like tree
- * of pointers, with intermediate nodes called tmem_objnodes. Each leaf
- * pointer in this tree points to a pampd, which is accessible only through
- * a small set of callbacks registered by the PAM implementation (see
- * tmem_register_pamops). Tmem only needs to memory allocation for objs
- * and objnodes and this is done via a set of callbacks that must be
- * registered by the tmem host implementation (e.g. see tmem_register_hostops).
- */
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/export.h>
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
-#include <linux/delay.h>
-#endif
-
-#include "tmem.h"
-
-/* data structure sentinels used for debugging... see tmem.h */
-#define POOL_SENTINEL 0x87658765
-#define OBJ_SENTINEL 0x12345678
-#define OBJNODE_SENTINEL 0xfedcba09
-
-/*
- * A tmem host implementation must use this function to register callbacks
- * for memory allocation.
- */
-static struct tmem_hostops tmem_hostops;
-
-static void tmem_objnode_tree_init(void);
-
-void tmem_register_hostops(struct tmem_hostops *m)
-{
- tmem_objnode_tree_init();
- tmem_hostops = *m;
-}
-
-/*
- * A tmem host implementation must use this function to register
- * callbacks for a page-accessible memory (PAM) implementation.
- */
-static struct tmem_pamops tmem_pamops;
-
-void tmem_register_pamops(struct tmem_pamops *m)
-{
- tmem_pamops = *m;
-}
-
-/*
- * Oid's are potentially very sparse and tmem_objs may have an indeterminately
- * short life, being added and deleted at a relatively high frequency.
- * So an rb_tree is an ideal data structure to manage tmem_objs. But because
- * of the potentially huge number of tmem_objs, each pool manages a hashtable
- * of rb_trees to reduce search, insert, delete, and rebalancing time.
- * Each hashbucket also has a lock to manage concurrent access and no
- * searches, inserts, or deletions can be performed unless the lock is held.
- * As a result, care must be taken to ensure tmem routines are not called
- * recursively; the vast majority of the time, a recursive call may work
- * but a deadlock will occur a small fraction of the time due to the
- * hashbucket lock.
- *
- * The following routines manage tmem_objs. In all of these routines,
- * the hashbucket lock is already held.
- */
-
-/* Search for object==oid in pool, returns object if found. */
-static struct tmem_obj *__tmem_obj_find(struct tmem_hashbucket *hb,
- struct tmem_oid *oidp,
- struct rb_node **parent,
- struct rb_node ***link)
-{
- struct rb_node *_parent = NULL, **rbnode;
- struct tmem_obj *obj = NULL;
-
- rbnode = &hb->obj_rb_root.rb_node;
- while (*rbnode) {
- BUG_ON(RB_EMPTY_NODE(*rbnode));
- _parent = *rbnode;
- obj = rb_entry(*rbnode, struct tmem_obj,
- rb_tree_node);
- switch (tmem_oid_compare(oidp, &obj->oid)) {
- case 0: /* equal */
- goto out;
- case -1:
- rbnode = &(*rbnode)->rb_left;
- break;
- case 1:
- rbnode = &(*rbnode)->rb_right;
- break;
- }
- }
-
- if (parent)
- *parent = _parent;
- if (link)
- *link = rbnode;
- obj = NULL;
-out:
- return obj;
-}
-
-static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
- struct tmem_oid *oidp)
-{
- return __tmem_obj_find(hb, oidp, NULL, NULL);
-}
-
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *, bool);
-
-/* Free an object that has no more pampds in it. */
-static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
-{
- struct tmem_pool *pool;
-
- BUG_ON(obj == NULL);
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pampd_count > 0);
- pool = obj->pool;
- BUG_ON(pool == NULL);
- if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
- tmem_pampd_destroy_all_in_obj(obj, false);
- BUG_ON(obj->objnode_tree_root != NULL);
- BUG_ON((long)obj->objnode_count != 0);
- atomic_dec(&pool->obj_count);
- BUG_ON(atomic_read(&pool->obj_count) < 0);
- INVERT_SENTINEL(obj, OBJ);
- obj->pool = NULL;
- tmem_oid_set_invalid(&obj->oid);
- rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
-}
-
-/*
- * Initialize, and insert an tmem_object_root (called only if find failed).
- */
-static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
- struct tmem_pool *pool,
- struct tmem_oid *oidp)
-{
- struct rb_root *root = &hb->obj_rb_root;
- struct rb_node **new = NULL, *parent = NULL;
-
- BUG_ON(pool == NULL);
- atomic_inc(&pool->obj_count);
- obj->objnode_tree_height = 0;
- obj->objnode_tree_root = NULL;
- obj->pool = pool;
- obj->oid = *oidp;
- obj->objnode_count = 0;
- obj->pampd_count = 0;
-#ifdef CONFIG_RAMSTER
- if (tmem_pamops.new_obj != NULL)
- (*tmem_pamops.new_obj)(obj);
-#endif
- SET_SENTINEL(obj, OBJ);
-
- if (__tmem_obj_find(hb, oidp, &parent, &new))
- BUG();
-
- rb_link_node(&obj->rb_tree_node, parent, new);
- rb_insert_color(&obj->rb_tree_node, root);
-}
-
-/*
- * Tmem is managed as a set of tmem_pools with certain attributes, such as
- * "ephemeral" vs "persistent". These attributes apply to all tmem_objs
- * and all pampds that belong to a tmem_pool. A tmem_pool is created
- * or deleted relatively rarely (for example, when a filesystem is
- * mounted or unmounted).
- */
-
-/* flush all data from a pool and, optionally, free it */
-static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
-{
- struct rb_node *rbnode;
- struct tmem_obj *obj;
- struct tmem_hashbucket *hb = &pool->hashbucket[0];
- int i;
-
- BUG_ON(pool == NULL);
- for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
- spin_lock(&hb->lock);
- rbnode = rb_first(&hb->obj_rb_root);
- while (rbnode != NULL) {
- obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
- rbnode = rb_next(rbnode);
- tmem_pampd_destroy_all_in_obj(obj, true);
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- }
- spin_unlock(&hb->lock);
- }
- if (destroy)
- list_del(&pool->pool_list);
-}
-
-/*
- * A tmem_obj contains a radix-tree-like tree in which the intermediate
- * nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
- * is very specialized and tuned for specific uses and is not particularly
- * suited for use from this code, though some code from the core algorithms has
- * been reused, thus the copyright notices below). Each tmem_objnode contains
- * a set of pointers which point to either a set of intermediate tmem_objnodes
- * or a set of of pampds.
- *
- * Portions Copyright (C) 2001 Momchil Velikov
- * Portions Copyright (C) 2001 Christoph Hellwig
- * Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
- */
-
-struct tmem_objnode_tree_path {
- struct tmem_objnode *objnode;
- int offset;
-};
-
-/* objnode height_to_maxindex translation */
-static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
-
-static void tmem_objnode_tree_init(void)
-{
- unsigned int ht, tmp;
-
- for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
- tmp = ht * OBJNODE_TREE_MAP_SHIFT;
- if (tmp >= OBJNODE_TREE_INDEX_BITS)
- tmem_objnode_tree_h2max[ht] = ~0UL;
- else
- tmem_objnode_tree_h2max[ht] =
- (~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
- }
-}
-
-static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
-{
- struct tmem_objnode *objnode;
-
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pool == NULL);
- ASSERT_SENTINEL(obj->pool, POOL);
- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
- if (unlikely(objnode == NULL))
- goto out;
- objnode->obj = obj;
- SET_SENTINEL(objnode, OBJNODE);
- memset(&objnode->slots, 0, sizeof(objnode->slots));
- objnode->slots_in_use = 0;
- obj->objnode_count++;
-out:
- return objnode;
-}
-
-static void tmem_objnode_free(struct tmem_objnode *objnode)
-{
- struct tmem_pool *pool;
- int i;
-
- BUG_ON(objnode == NULL);
- for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
- BUG_ON(objnode->slots[i] != NULL);
- ASSERT_SENTINEL(objnode, OBJNODE);
- INVERT_SENTINEL(objnode, OBJNODE);
- BUG_ON(objnode->obj == NULL);
- ASSERT_SENTINEL(objnode->obj, OBJ);
- pool = objnode->obj->pool;
- BUG_ON(pool == NULL);
- ASSERT_SENTINEL(pool, POOL);
- objnode->obj->objnode_count--;
- objnode->obj = NULL;
- (*tmem_hostops.objnode_free)(objnode, pool);
-}
-
-/*
- * Lookup index in object and return associated pampd (or NULL if not found).
- */
-static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
-{
- unsigned int height, shift;
- struct tmem_objnode **slot = NULL;
-
- BUG_ON(obj == NULL);
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pool == NULL);
- ASSERT_SENTINEL(obj->pool, POOL);
-
- height = obj->objnode_tree_height;
- if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
- goto out;
- if (height == 0 && obj->objnode_tree_root) {
- slot = &obj->objnode_tree_root;
- goto out;
- }
- shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
- slot = &obj->objnode_tree_root;
- while (height > 0) {
- if (*slot == NULL)
- goto out;
- slot = (struct tmem_objnode **)
- ((*slot)->slots +
- ((index >> shift) & OBJNODE_TREE_MAP_MASK));
- shift -= OBJNODE_TREE_MAP_SHIFT;
- height--;
- }
-out:
- return slot != NULL ? (void **)slot : NULL;
-}
-
-static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
-{
- struct tmem_objnode **slot;
-
- slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
- return slot != NULL ? *slot : NULL;
-}
-
-#ifdef CONFIG_RAMSTER
-static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
- void *new_pampd, bool no_free)
-{
- struct tmem_objnode **slot;
- void *ret = NULL;
-
- slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
- if ((slot != NULL) && (*slot != NULL)) {
- void *old_pampd = *(void **)slot;
- *(void **)slot = new_pampd;
- if (!no_free)
- (*tmem_pamops.free)(old_pampd, obj->pool,
- NULL, 0, false);
- ret = new_pampd;
- }
- return ret;
-}
-#endif
-
-static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
- void *pampd)
-{
- int ret = 0;
- struct tmem_objnode *objnode = NULL, *newnode, *slot;
- unsigned int height, shift;
- int offset = 0;
-
- /* if necessary, extend the tree to be higher */
- if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
- height = obj->objnode_tree_height + 1;
- if (index > tmem_objnode_tree_h2max[height])
- while (index > tmem_objnode_tree_h2max[height])
- height++;
- if (obj->objnode_tree_root == NULL) {
- obj->objnode_tree_height = height;
- goto insert;
- }
- do {
- newnode = tmem_objnode_alloc(obj);
- if (!newnode) {
- ret = -ENOMEM;
- goto out;
- }
- newnode->slots[0] = obj->objnode_tree_root;
- newnode->slots_in_use = 1;
- obj->objnode_tree_root = newnode;
- obj->objnode_tree_height++;
- } while (height > obj->objnode_tree_height);
- }
-insert:
- slot = obj->objnode_tree_root;
- height = obj->objnode_tree_height;
- shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
- while (height > 0) {
- if (slot == NULL) {
- /* add a child objnode. */
- slot = tmem_objnode_alloc(obj);
- if (!slot) {
- ret = -ENOMEM;
- goto out;
- }
- if (objnode) {
-
- objnode->slots[offset] = slot;
- objnode->slots_in_use++;
- } else
- obj->objnode_tree_root = slot;
- }
- /* go down a level */
- offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
- objnode = slot;
- slot = objnode->slots[offset];
- shift -= OBJNODE_TREE_MAP_SHIFT;
- height--;
- }
- BUG_ON(slot != NULL);
- if (objnode) {
- objnode->slots_in_use++;
- objnode->slots[offset] = pampd;
- } else
- obj->objnode_tree_root = pampd;
- obj->pampd_count++;
-out:
- return ret;
-}
-
-static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
-{
- struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
- struct tmem_objnode_tree_path *pathp = path;
- struct tmem_objnode *slot = NULL;
- unsigned int height, shift;
- int offset;
-
- BUG_ON(obj == NULL);
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pool == NULL);
- ASSERT_SENTINEL(obj->pool, POOL);
- height = obj->objnode_tree_height;
- if (index > tmem_objnode_tree_h2max[height])
- goto out;
- slot = obj->objnode_tree_root;
- if (height == 0 && obj->objnode_tree_root) {
- obj->objnode_tree_root = NULL;
- goto out;
- }
- shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
- pathp->objnode = NULL;
- do {
- if (slot == NULL)
- goto out;
- pathp++;
- offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
- pathp->offset = offset;
- pathp->objnode = slot;
- slot = slot->slots[offset];
- shift -= OBJNODE_TREE_MAP_SHIFT;
- height--;
- } while (height > 0);
- if (slot == NULL)
- goto out;
- while (pathp->objnode) {
- pathp->objnode->slots[pathp->offset] = NULL;
- pathp->objnode->slots_in_use--;
- if (pathp->objnode->slots_in_use) {
- if (pathp->objnode == obj->objnode_tree_root) {
- while (obj->objnode_tree_height > 0 &&
- obj->objnode_tree_root->slots_in_use == 1 &&
- obj->objnode_tree_root->slots[0]) {
- struct tmem_objnode *to_free =
- obj->objnode_tree_root;
-
- obj->objnode_tree_root =
- to_free->slots[0];
- obj->objnode_tree_height--;
- to_free->slots[0] = NULL;
- to_free->slots_in_use = 0;
- tmem_objnode_free(to_free);
- }
- }
- goto out;
- }
- tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
- pathp--;
- }
- obj->objnode_tree_height = 0;
- obj->objnode_tree_root = NULL;
-
-out:
- if (slot != NULL)
- obj->pampd_count--;
- BUG_ON(obj->pampd_count < 0);
- return slot;
-}
-
-/* Recursively walk the objnode_tree destroying pampds and objnodes. */
-static void tmem_objnode_node_destroy(struct tmem_obj *obj,
- struct tmem_objnode *objnode,
- unsigned int ht)
-{
- int i;
-
- if (ht == 0)
- return;
- for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
- if (objnode->slots[i]) {
- if (ht == 1) {
- obj->pampd_count--;
- (*tmem_pamops.free)(objnode->slots[i],
- obj->pool, NULL, 0, true);
- objnode->slots[i] = NULL;
- continue;
- }
- tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
- tmem_objnode_free(objnode->slots[i]);
- objnode->slots[i] = NULL;
- }
- }
-}
-
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
- bool pool_destroy)
-{
- if (obj->objnode_tree_root == NULL)
- return;
- if (obj->objnode_tree_height == 0) {
- obj->pampd_count--;
- (*tmem_pamops.free)(obj->objnode_tree_root,
- obj->pool, NULL, 0, true);
- } else {
- tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
- obj->objnode_tree_height);
- tmem_objnode_free(obj->objnode_tree_root);
- obj->objnode_tree_height = 0;
- }
- obj->objnode_tree_root = NULL;
-#ifdef CONFIG_RAMSTER
- if (tmem_pamops.free_obj != NULL)
- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
-#endif
-}
-
-/*
- * Tmem is operated on by a set of well-defined actions:
- * "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
- * (The tmem ABI allows for subpages and exchanges but these operations
- * are not included in this implementation.)
- *
- * These "tmem core" operations are implemented in the following functions.
- */
-
-/*
- * "Put" a page, e.g. associate the passed pampd with the passed handle.
- * Tmem_put is complicated by a corner case: What if a page with matching
- * handle already exists in tmem? To guarantee coherency, one of two
- * actions is necessary: Either the data for the page must be overwritten,
- * or the page must be "flushed" so that the data is not accessible to a
- * subsequent "get". Since these "duplicate puts" are relatively rare,
- * this implementation always flushes for simplicity.
- */
-int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- bool raw, void *pampd_to_use)
-{
- struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
- void *pampd = NULL, *pampd_del = NULL;
- int ret = -ENOMEM;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = objfound = tmem_obj_find(hb, oidp);
- if (obj != NULL) {
- pampd = tmem_pampd_lookup_in_obj(objfound, index);
- if (pampd != NULL) {
- /* if found, is a dup put, flush the old one */
- pampd_del = tmem_pampd_delete_from_obj(obj, index);
- BUG_ON(pampd_del != pampd);
- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
- if (obj->pampd_count == 0) {
- objnew = obj;
- objfound = NULL;
- }
- pampd = NULL;
- }
- } else {
- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
- if (unlikely(obj == NULL)) {
- ret = -ENOMEM;
- goto out;
- }
- tmem_obj_init(obj, hb, pool, oidp);
- }
- BUG_ON(obj == NULL);
- BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
- pampd = pampd_to_use;
- BUG_ON(pampd_to_use == NULL);
- ret = tmem_pampd_add_to_obj(obj, index, pampd);
- if (unlikely(ret == -ENOMEM))
- /* may have partially built objnode tree ("stump") */
- goto delete_and_free;
- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
- goto out;
-
-delete_and_free:
- (void)tmem_pampd_delete_from_obj(obj, index);
- if (pampd)
- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
- if (objnew) {
- tmem_obj_free(objnew, hb);
- (*tmem_hostops.obj_free)(objnew, pool);
- }
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-#ifdef CONFIG_RAMSTER
-/*
- * For ramster only: The following routines provide a two-step sequence
- * to allow the caller to replace a pampd in the tmem data structures with
- * another pampd. Here, we lookup the passed handle and, if found, return the
- * associated pampd and object, leaving the hashbucket locked and returning
- * a reference to it. The caller is expected to immediately call the
- * matching tmem_localify_finish routine which will handles the replacement
- * and unlocks the hashbucket.
- */
-void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, struct tmem_obj **ret_obj,
- void **saved_hb)
-{
- struct tmem_hashbucket *hb;
- struct tmem_obj *obj = NULL;
- void *pampd = NULL;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (likely(obj != NULL))
- pampd = tmem_pampd_lookup_in_obj(obj, index);
- *ret_obj = obj;
- *saved_hb = (void *)hb;
- /* note, hashbucket remains locked */
- return pampd;
-}
-EXPORT_SYMBOL_GPL(tmem_localify_get_pampd);
-
-void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
- void *pampd, void *saved_hb, bool delete)
-{
- struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
-
- BUG_ON(!spin_is_locked(&hb->lock));
- if (pampd != NULL) {
- BUG_ON(obj == NULL);
- (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
- } else if (delete) {
- BUG_ON(obj == NULL);
- (void)tmem_pampd_delete_from_obj(obj, index);
- }
- spin_unlock(&hb->lock);
-}
-EXPORT_SYMBOL_GPL(tmem_localify_finish);
-
-/*
- * For ramster only. Helper function to support asynchronous tmem_get.
- */
-static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
- struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, bool free, char *data)
-{
- void *old_pampd = *ppampd, *new_pampd = NULL;
- bool intransit = false;
- int ret = 0;
-
- if (!is_ephemeral(pool))
- new_pampd = (*tmem_pamops.repatriate_preload)(
- old_pampd, pool, oidp, index, &intransit);
- if (intransit)
- ret = -EAGAIN;
- else if (new_pampd != NULL)
- *ppampd = new_pampd;
- /* must release the hb->lock else repatriate can't sleep */
- spin_unlock(&hb->lock);
- if (!intransit)
- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
- oidp, index, free, data);
- if (ret == -EAGAIN) {
- /* rare I think, but should cond_resched()??? */
- usleep_range(10, 1000);
- } else if (ret == -ENOTCONN || ret == -EHOSTDOWN) {
- ret = -1;
- } else if (ret != 0 && ret != -ENOENT) {
- ret = -1;
- }
- /* note hb->lock has now been unlocked */
- return ret;
-}
-
-/*
- * For ramster only. If a page in tmem matches the handle, replace the
- * page so that any subsequent "get" gets the new page. Returns 0 if
- * there was a page to replace, else returns -1.
- */
-int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, void *new_pampd)
-{
- struct tmem_obj *obj;
- int ret = -1;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
- /* if we bug here, pamops wasn't properly set up for ramster */
- BUG_ON(tmem_pamops.replace_in_obj == NULL);
- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(tmem_replace);
-#endif
-
-/*
- * "Get" a page, e.g. if a pampd can be found matching the passed handle,
- * use a pamops callback to recreated the page from the pampd with the
- * matching handle. By tmem definition, when a "get" is successful on
- * an ephemeral page, the page is "flushed", and when a "get" is successful
- * on a persistent page, the page is retained in tmem. Note that to preserve
- * coherency, "get" can never be skipped if tmem contains the data.
- * That is, if a get is done with a certain handle and fails, any
- * subsequent "get" must also fail (unless of course there is a
- * "put" done with the same handle).
- */
-int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- char *data, size_t *sizep, bool raw, int get_and_free)
-{
- struct tmem_obj *obj;
- void *pampd = NULL;
- bool ephemeral = is_ephemeral(pool);
- int ret = -1;
- struct tmem_hashbucket *hb;
- bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
- bool lock_held = false;
- void **ppampd;
-
- do {
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- lock_held = true;
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- ppampd = __tmem_pampd_lookup_in_obj(obj, index);
- if (ppampd == NULL)
- goto out;
-#ifdef CONFIG_RAMSTER
- if ((tmem_pamops.is_remote != NULL) &&
- tmem_pamops.is_remote(*ppampd)) {
- ret = tmem_repatriate(ppampd, hb, pool, oidp,
- index, free, data);
- /* tmem_repatriate releases hb->lock */
- lock_held = false;
- *sizep = PAGE_SIZE;
- if (ret != -EAGAIN)
- goto out;
- }
-#endif
- } while (ret == -EAGAIN);
- if (free)
- pampd = tmem_pampd_delete_from_obj(obj, index);
- else
- pampd = tmem_pampd_lookup_in_obj(obj, index);
- if (pampd == NULL)
- goto out;
- if (free) {
- if (obj->pampd_count == 0) {
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- obj = NULL;
- }
- }
- if (free)
- ret = (*tmem_pamops.get_data_and_free)(
- data, sizep, raw, pampd, pool, oidp, index);
- else
- ret = (*tmem_pamops.get_data)(
- data, sizep, raw, pampd, pool, oidp, index);
- if (ret < 0)
- goto out;
- ret = 0;
-out:
- if (lock_held)
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
- * If a page in tmem matches the handle, "flush" this page from tmem such
- * that any subsequent "get" does not succeed (unless, of course, there
- * was another "put" with the same handle).
- */
-int tmem_flush_page(struct tmem_pool *pool,
- struct tmem_oid *oidp, uint32_t index)
-{
- struct tmem_obj *obj;
- void *pampd;
- int ret = -1;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- pampd = tmem_pampd_delete_from_obj(obj, index);
- if (pampd == NULL)
- goto out;
- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
- if (obj->pampd_count == 0) {
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- }
- ret = 0;
-
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
- * "Flush" all pages in tmem matching this oid.
- */
-int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
-{
- struct tmem_obj *obj;
- struct tmem_hashbucket *hb;
- int ret = -1;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- tmem_pampd_destroy_all_in_obj(obj, false);
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- ret = 0;
-
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
- * "Flush" all pages (and tmem_objs) from this tmem_pool and disable
- * all subsequent access to this tmem_pool.
- */
-int tmem_destroy_pool(struct tmem_pool *pool)
-{
- int ret = -1;
-
- if (pool == NULL)
- goto out;
- tmem_pool_flush(pool, 1);
- ret = 0;
-out:
- return ret;
-}
-
-static LIST_HEAD(tmem_global_pool_list);
-
-/*
- * Create a new tmem_pool with the provided flag and return
- * a pool id provided by the tmem host implementation.
- */
-void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
-{
- int persistent = flags & TMEM_POOL_PERSIST;
- int shared = flags & TMEM_POOL_SHARED;
- struct tmem_hashbucket *hb = &pool->hashbucket[0];
- int i;
-
- for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
- hb->obj_rb_root = RB_ROOT;
- spin_lock_init(&hb->lock);
- }
- INIT_LIST_HEAD(&pool->pool_list);
- atomic_set(&pool->obj_count, 0);
- SET_SENTINEL(pool, POOL);
- list_add_tail(&pool->pool_list, &tmem_global_pool_list);
- pool->persistent = persistent;
- pool->shared = shared;
-}
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
deleted file mode 100644
index d128ce2..0000000
--- a/drivers/staging/zcache/tmem.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * tmem.h
- *
- * Transcendent memory
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- */
-
-#ifndef _TMEM_H_
-#define _TMEM_H_
-
-#include <linux/types.h>
-#include <linux/highmem.h>
-#include <linux/hash.h>
-#include <linux/atomic.h>
-
-/*
- * These are defined by the Xen<->Linux ABI so should remain consistent
- */
-#define TMEM_POOL_PERSIST 1
-#define TMEM_POOL_SHARED 2
-#define TMEM_POOL_PRECOMPRESSED 4
-#define TMEM_POOL_PAGESIZE_SHIFT 4
-#define TMEM_POOL_PAGESIZE_MASK 0xf
-#define TMEM_POOL_RESERVED_BITS 0x00ffff00
-
-/*
- * sentinels have proven very useful for debugging but can be removed
- * or disabled before final merge.
- */
-#undef SENTINELS
-#ifdef SENTINELS
-#define DECL_SENTINEL uint32_t sentinel;
-#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
-#define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
-#define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
-#define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
-#else
-#define DECL_SENTINEL
-#define SET_SENTINEL(_x, _y) do { } while (0)
-#define INVERT_SENTINEL(_x, _y) do { } while (0)
-#define ASSERT_SENTINEL(_x, _y) do { } while (0)
-#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
-#endif
-
-#define ASSERT_SPINLOCK(_l) lockdep_assert_held(_l)
-
-/*
- * A pool is the highest-level data structure managed by tmem and
- * usually corresponds to a large independent set of pages such as
- * a filesystem. Each pool has an id, and certain attributes and counters.
- * It also contains a set of hash buckets, each of which contains an rbtree
- * of objects and a lock to manage concurrency within the pool.
- */
-
-#define TMEM_HASH_BUCKET_BITS 8
-#define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
-
-struct tmem_hashbucket {
- struct rb_root obj_rb_root;
- spinlock_t lock;
-};
-
-struct tmem_pool {
- void *client; /* "up" for some clients, avoids table lookup */
- struct list_head pool_list;
- uint32_t pool_id;
- bool persistent;
- bool shared;
- atomic_t obj_count;
- atomic_t refcount;
- struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
- DECL_SENTINEL
-};
-
-#define is_persistent(_p) (_p->persistent)
-#define is_ephemeral(_p) (!(_p->persistent))
-
-/*
- * An object id ("oid") is large: 192-bits (to ensure, for example, files
- * in a modern filesystem can be uniquely identified).
- */
-
-struct tmem_oid {
- uint64_t oid[3];
-};
-
-static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
-{
- oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
-}
-
-static inline bool tmem_oid_valid(struct tmem_oid *oidp)
-{
- return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
- oidp->oid[2] != -1UL;
-}
-
-static inline int tmem_oid_compare(struct tmem_oid *left,
- struct tmem_oid *right)
-{
- int ret;
-
- if (left->oid[2] == right->oid[2]) {
- if (left->oid[1] == right->oid[1]) {
- if (left->oid[0] == right->oid[0])
- ret = 0;
- else if (left->oid[0] < right->oid[0])
- ret = -1;
- else
- return 1;
- } else if (left->oid[1] < right->oid[1])
- ret = -1;
- else
- ret = 1;
- } else if (left->oid[2] < right->oid[2])
- ret = -1;
- else
- ret = 1;
- return ret;
-}
-
-static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
-{
- return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
- TMEM_HASH_BUCKET_BITS);
-}
-
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
-struct tmem_xhandle {
- uint8_t client_id;
- uint8_t xh_data_cksum;
- uint16_t xh_data_size;
- uint16_t pool_id;
- struct tmem_oid oid;
- uint32_t index;
- void *extra;
-};
-
-static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
- struct tmem_pool *pool,
- struct tmem_oid *oidp,
- uint32_t index)
-{
- struct tmem_xhandle xh;
- xh.client_id = client_id;
- xh.xh_data_cksum = (uint8_t)-1;
- xh.xh_data_size = (uint16_t)-1;
- xh.pool_id = pool->pool_id;
- xh.oid = *oidp;
- xh.index = index;
- return xh;
-}
-#endif
-
-
-/*
- * A tmem_obj contains an identifier (oid), pointers to the parent
- * pool and the rb_tree to which it belongs, counters, and an ordered
- * set of pampds, structured in a radix-tree-like tree. The intermediate
- * nodes of the tree are called tmem_objnodes.
- */
-
-struct tmem_objnode;
-
-struct tmem_obj {
- struct tmem_oid oid;
- struct tmem_pool *pool;
- struct rb_node rb_tree_node;
- struct tmem_objnode *objnode_tree_root;
- unsigned int objnode_tree_height;
- unsigned long objnode_count;
- long pampd_count;
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
- /*
- * for current design of ramster, all pages belonging to
- * an object reside on the same remotenode and extra is
- * used to record the number of the remotenode so a
- * flush-object operation can specify it
- */
- void *extra; /* for private use by pampd implementation */
-#endif
- DECL_SENTINEL
-};
-
-#define OBJNODE_TREE_MAP_SHIFT 6
-#define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
-#define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
-#define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
-#define OBJNODE_TREE_MAX_PATH \
- (OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
-
-struct tmem_objnode {
- struct tmem_obj *obj;
- DECL_SENTINEL
- void *slots[OBJNODE_TREE_MAP_SIZE];
- unsigned int slots_in_use;
-};
-
-struct tmem_handle {
- struct tmem_oid oid; /* 24 bytes */
- uint32_t index;
- uint16_t pool_id;
- uint16_t client_id;
-};
-
-
-/* pampd abstract datatype methods provided by the PAM implementation */
-struct tmem_pamops {
- void (*create_finish)(void *, bool);
- int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t);
- int (*get_data_and_free)(char *, size_t *, bool, void *,
- struct tmem_pool *, struct tmem_oid *,
- uint32_t);
- void (*free)(void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t, bool);
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
- void (*new_obj)(struct tmem_obj *);
- void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool);
- void *(*repatriate_preload)(void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t, bool *);
- int (*repatriate)(void *, void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t, bool, void *);
- bool (*is_remote)(void *);
- int (*replace_in_obj)(void *, struct tmem_obj *);
-#endif
-};
-extern void tmem_register_pamops(struct tmem_pamops *m);
-
-/* memory allocation methods provided by the host implementation */
-struct tmem_hostops {
- struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
- void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
- struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
- void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
-};
-extern void tmem_register_hostops(struct tmem_hostops *m);
-
-/* core tmem accessor functions */
-extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- bool, void *);
-extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- char *, size_t *, bool, int);
-extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
- uint32_t index);
-extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
-extern int tmem_destroy_pool(struct tmem_pool *);
-extern void tmem_new_pool(struct tmem_pool *, uint32_t);
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
-extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- void *);
-extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
- uint32_t index, struct tmem_obj **,
- void **);
-extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
- void *, void *, bool);
-#endif
-#endif /* _TMEM_H */
diff --git a/drivers/staging/zcache/zbud.c b/drivers/staging/zcache/zbud.c
deleted file mode 100644
index 6cda4ed..0000000
--- a/drivers/staging/zcache/zbud.c
+++ /dev/null
@@ -1,1066 +0,0 @@
-/*
- * zbud.c - Compression buddies allocator
- *
- * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
- *
- * Compression buddies ("zbud") provides for efficiently packing two
- * (or, possibly in the future, more) compressed pages ("zpages") into
- * a single "raw" pageframe and for tracking both zpages and pageframes
- * so that whole pageframes can be easily reclaimed in LRU-like order.
- * It is designed to be used in conjunction with transcendent memory
- * ("tmem"); for example separate LRU lists are maintained for persistent
- * vs. ephemeral pages.
- *
- * A zbudpage is an overlay for a struct page and thus each zbudpage
- * refers to a physical pageframe of RAM. When the caller passes a
- * struct page from the kernel's page allocator, zbud "transforms" it
- * to a zbudpage which sets/uses a different set of fields than the
- * struct-page and thus must "untransform" it back by reinitializing
- * certain fields before the struct-page can be freed. The fields
- * of a zbudpage include a page lock for controlling access to the
- * corresponding pageframe, and there is a size field for each zpage.
- * Each zbudpage also lives on two linked lists: a "budlist" which is
- * used to support efficient buddying of zpages; and an "lru" which
- * is used for reclaiming pageframes in approximately least-recently-used
- * order.
- *
- * A zbudpageframe is a pageframe divided up into aligned 64-byte "chunks"
- * which contain the compressed data for zero, one, or two zbuds. Contained
- * with the compressed data is a tmem_handle which is a key to allow
- * the same data to be found via the tmem interface so the zpage can
- * be invalidated (for ephemeral pages) or repatriated to the swap cache
- * (for persistent pages). The contents of a zbudpageframe must never
- * be accessed without holding the page lock for the corresponding
- * zbudpage and, to accomodate highmem machines, the contents may
- * only be examined or changes when kmapped. Thus, when in use, a
- * kmapped zbudpageframe is referred to in the zbud code as "void *zbpg".
- *
- * Note that the term "zbud" refers to the combination of a zpage and
- * a tmem_handle that is stored as one of possibly two "buddied" zpages;
- * it also generically refers to this allocator... sorry for any confusion.
- *
- * A zbudref is a pointer to a struct zbudpage (which can be cast to a
- * struct page), with the LSB either cleared or set to indicate, respectively,
- * the first or second zpage in the zbudpageframe. Since a zbudref can be
- * cast to a pointer, it is used as the tmem "pampd" pointer and uniquely
- * references a stored tmem page and so is the only zbud data structure
- * externally visible to zbud.c/zbud.h.
- *
- * Since we wish to reclaim entire pageframes but zpages may be randomly
- * added and deleted to any given pageframe, we approximate LRU by
- * promoting a pageframe to MRU when a zpage is added to it, but
- * leaving it at the current place in the list when a zpage is deleted
- * from it. As a side effect, zpages that are difficult to buddy (e.g.
- * very large paages) will be reclaimed faster than average, which seems
- * reasonable.
- *
- * In the current implementation, no more than two zpages may be stored in
- * any pageframe and no zpage ever crosses a pageframe boundary. While
- * other zpage allocation mechanisms may allow greater density, this two
- * zpage-per-pageframe limit both ensures simple reclaim of pageframes
- * (including garbage collection of references to the contents of those
- * pageframes from tmem data structures) AND avoids the need for compaction.
- * With additional complexity, zbud could be modified to support storing
- * up to three zpages per pageframe or, to handle larger average zpages,
- * up to three zpages per pair of pageframes, but it is not clear if the
- * additional complexity would be worth it. So consider it an exercise
- * for future developers.
- *
- * Note also that zbud does no page allocation or freeing. This is so
- * that the caller has complete control over and, for accounting, visibility
- * into if/when pages are allocated and freed.
- *
- * Finally, note that zbud limits the size of zpages it can store; the
- * caller must check the zpage size with zbud_max_buddy_size before
- * storing it, else BUGs will result. User beware.
- */
-
-#include <linux/module.h>
-#include <linux/highmem.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/pagemap.h>
-#include <linux/atomic.h>
-#include <linux/bug.h>
-#include "tmem.h"
-#include "zcache.h"
-#include "zbud.h"
-
-/*
- * We need to ensure that a struct zbudpage is never larger than a
- * struct page. This is checked with a BUG_ON in zbud_init.
- *
- * The unevictable field indicates that a zbud is being added to the
- * zbudpage. Since this is a two-phase process (due to tmem locking),
- * this field locks the zbudpage against eviction when a zbud match
- * or creation is in process. Since this addition process may occur
- * in parallel for two zbuds in one zbudpage, the field is a counter
- * that must not exceed two.
- */
-struct zbudpage {
- union {
- struct page page;
- struct {
- unsigned long space_for_flags;
- struct {
- unsigned zbud0_size:PAGE_SHIFT;
- unsigned zbud1_size:PAGE_SHIFT;
- unsigned unevictable:2;
- };
- struct list_head budlist;
- struct list_head lru;
- };
- };
-};
-#if (PAGE_SHIFT * 2) + 2 > BITS_PER_LONG
-#error "zbud won't work for this arch, PAGE_SIZE is too large"
-#endif
-
-struct zbudref {
- union {
- struct zbudpage *zbudpage;
- unsigned long zbudref;
- };
-};
-
-#define CHUNK_SHIFT 6
-#define CHUNK_SIZE (1 << CHUNK_SHIFT)
-#define CHUNK_MASK (~(CHUNK_SIZE-1))
-#define NCHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
-#define MAX_CHUNK (NCHUNKS-1)
-
-/*
- * The following functions deal with the difference between struct
- * page and struct zbudpage. Note the hack of using the pageflags
- * from struct page; this is to avoid duplicating all the complex
- * pageflag macros.
- */
-static inline void zbudpage_spin_lock(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- while (unlikely(test_and_set_bit_lock(PG_locked, &page->flags))) {
- do {
- cpu_relax();
- } while (test_bit(PG_locked, &page->flags));
- }
-}
-
-static inline void zbudpage_spin_unlock(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- clear_bit(PG_locked, &page->flags);
-}
-
-static inline int zbudpage_spin_trylock(struct zbudpage *zbudpage)
-{
- return trylock_page((struct page *)zbudpage);
-}
-
-static inline int zbudpage_is_locked(struct zbudpage *zbudpage)
-{
- return PageLocked((struct page *)zbudpage);
-}
-
-static inline void *kmap_zbudpage_atomic(struct zbudpage *zbudpage)
-{
- return kmap_atomic((struct page *)zbudpage);
-}
-
-/*
- * A dying zbudpage is an ephemeral page in the process of being evicted.
- * Any data contained in the zbudpage is invalid and we are just waiting for
- * the tmem pampds to be invalidated before freeing the page
- */
-static inline int zbudpage_is_dying(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- return test_bit(PG_reclaim, &page->flags);
-}
-
-static inline void zbudpage_set_dying(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- set_bit(PG_reclaim, &page->flags);
-}
-
-static inline void zbudpage_clear_dying(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- clear_bit(PG_reclaim, &page->flags);
-}
-
-/*
- * A zombie zbudpage is a persistent page in the process of being evicted.
- * The data contained in the zbudpage is valid and we are just waiting for
- * the tmem pampds to be invalidated before freeing the page
- */
-static inline int zbudpage_is_zombie(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- return test_bit(PG_dirty, &page->flags);
-}
-
-static inline void zbudpage_set_zombie(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- set_bit(PG_dirty, &page->flags);
-}
-
-static inline void zbudpage_clear_zombie(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- clear_bit(PG_dirty, &page->flags);
-}
-
-static inline void kunmap_zbudpage_atomic(void *zbpg)
-{
- kunmap_atomic(zbpg);
-}
-
-/*
- * zbud "translation" and helper functions
- */
-
-static inline struct zbudpage *zbudref_to_zbudpage(struct zbudref *zref)
-{
- unsigned long zbud = (unsigned long)zref;
- zbud &= ~1UL;
- return (struct zbudpage *)zbud;
-}
-
-static inline struct zbudref *zbudpage_to_zbudref(struct zbudpage *zbudpage,
- unsigned budnum)
-{
- unsigned long zbud = (unsigned long)zbudpage;
- BUG_ON(budnum > 1);
- zbud |= budnum;
- return (struct zbudref *)zbud;
-}
-
-static inline int zbudref_budnum(struct zbudref *zbudref)
-{
- unsigned long zbud = (unsigned long)zbudref;
- return zbud & 1UL;
-}
-
-static inline unsigned zbud_max_size(void)
-{
- return MAX_CHUNK << CHUNK_SHIFT;
-}
-
-static inline unsigned zbud_size_to_chunks(unsigned size)
-{
- BUG_ON(size == 0 || size > zbud_max_size());
- return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
-}
-
-/* can only be used between kmap_zbudpage_atomic/kunmap_zbudpage_atomic! */
-static inline char *zbud_data(void *zbpg,
- unsigned budnum, unsigned size)
-{
- char *p;
-
- BUG_ON(size == 0 || size > zbud_max_size());
- p = (char *)zbpg;
- if (budnum == 1)
- p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
- return p;
-}
-
-/*
- * These are all informative and exposed through debugfs... except for
- * the arrays... anyone know how to do that? To avoid confusion for
- * debugfs viewers, some of these should also be atomic_long_t, but
- * I don't know how to expose atomics via debugfs either...
- */
-static ssize_t zbud_eph_pageframes;
-static ssize_t zbud_pers_pageframes;
-static ssize_t zbud_eph_zpages;
-static ssize_t zbud_pers_zpages;
-static u64 zbud_eph_zbytes;
-static u64 zbud_pers_zbytes;
-static ssize_t zbud_eph_evicted_pageframes;
-static ssize_t zbud_pers_evicted_pageframes;
-static ssize_t zbud_eph_cumul_zpages;
-static ssize_t zbud_pers_cumul_zpages;
-static u64 zbud_eph_cumul_zbytes;
-static u64 zbud_pers_cumul_zbytes;
-static ssize_t zbud_eph_cumul_chunk_counts[NCHUNKS];
-static ssize_t zbud_pers_cumul_chunk_counts[NCHUNKS];
-static ssize_t zbud_eph_buddied_count;
-static ssize_t zbud_pers_buddied_count;
-static ssize_t zbud_eph_unbuddied_count;
-static ssize_t zbud_pers_unbuddied_count;
-static ssize_t zbud_eph_zombie_count;
-static ssize_t zbud_pers_zombie_count;
-static atomic_t zbud_eph_zombie_atomic;
-static atomic_t zbud_pers_zombie_atomic;
-
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-#define zdfs debugfs_create_size_t
-#define zdfs64 debugfs_create_u64
-static int zbud_debugfs_init(void)
-{
- struct dentry *root = debugfs_create_dir("zbud", NULL);
- if (root == NULL)
- return -ENXIO;
-
- /*
- * would be nice to dump the sizes of the unbuddied
- * arrays, like was done with sysfs, but it doesn't
- * look like debugfs is flexible enough to do that
- */
- zdfs64("eph_zbytes", S_IRUGO, root, &zbud_eph_zbytes);
- zdfs64("eph_cumul_zbytes", S_IRUGO, root, &zbud_eph_cumul_zbytes);
- zdfs64("pers_zbytes", S_IRUGO, root, &zbud_pers_zbytes);
- zdfs64("pers_cumul_zbytes", S_IRUGO, root, &zbud_pers_cumul_zbytes);
- zdfs("eph_cumul_zpages", S_IRUGO, root, &zbud_eph_cumul_zpages);
- zdfs("eph_evicted_pageframes", S_IRUGO, root,
- &zbud_eph_evicted_pageframes);
- zdfs("eph_zpages", S_IRUGO, root, &zbud_eph_zpages);
- zdfs("eph_pageframes", S_IRUGO, root, &zbud_eph_pageframes);
- zdfs("eph_buddied_count", S_IRUGO, root, &zbud_eph_buddied_count);
- zdfs("eph_unbuddied_count", S_IRUGO, root, &zbud_eph_unbuddied_count);
- zdfs("pers_cumul_zpages", S_IRUGO, root, &zbud_pers_cumul_zpages);
- zdfs("pers_evicted_pageframes", S_IRUGO, root,
- &zbud_pers_evicted_pageframes);
- zdfs("pers_zpages", S_IRUGO, root, &zbud_pers_zpages);
- zdfs("pers_pageframes", S_IRUGO, root, &zbud_pers_pageframes);
- zdfs("pers_buddied_count", S_IRUGO, root, &zbud_pers_buddied_count);
- zdfs("pers_unbuddied_count", S_IRUGO, root, &zbud_pers_unbuddied_count);
- zdfs("pers_zombie_count", S_IRUGO, root, &zbud_pers_zombie_count);
- return 0;
-}
-#undef zdfs
-#undef zdfs64
-#else
-static inline int zbud_debugfs_init(void)
-{
- return 0;
-}
-#endif
-
-/* protects the buddied list and all unbuddied lists */
-static DEFINE_SPINLOCK(zbud_eph_lists_lock);
-static DEFINE_SPINLOCK(zbud_pers_lists_lock);
-
-struct zbud_unbuddied {
- struct list_head list;
- unsigned count;
-};
-
-/* list N contains pages with N chunks USED and NCHUNKS-N unused */
-/* element 0 is never used but optimizing that isn't worth it */
-static struct zbud_unbuddied zbud_eph_unbuddied[NCHUNKS];
-static struct zbud_unbuddied zbud_pers_unbuddied[NCHUNKS];
-static LIST_HEAD(zbud_eph_lru_list);
-static LIST_HEAD(zbud_pers_lru_list);
-static LIST_HEAD(zbud_eph_buddied_list);
-static LIST_HEAD(zbud_pers_buddied_list);
-static LIST_HEAD(zbud_eph_zombie_list);
-static LIST_HEAD(zbud_pers_zombie_list);
-
-/*
- * Given a struct page, transform it to a zbudpage so that it can be
- * used by zbud and initialize fields as necessary.
- */
-static inline struct zbudpage *zbud_init_zbudpage(struct page *page, bool eph)
-{
- struct zbudpage *zbudpage = (struct zbudpage *)page;
-
- BUG_ON(page == NULL);
- INIT_LIST_HEAD(&zbudpage->budlist);
- INIT_LIST_HEAD(&zbudpage->lru);
- zbudpage->zbud0_size = 0;
- zbudpage->zbud1_size = 0;
- zbudpage->unevictable = 0;
- if (eph)
- zbud_eph_pageframes++;
- else
- zbud_pers_pageframes++;
- return zbudpage;
-}
-
-/* "Transform" a zbudpage back to a struct page suitable to free. */
-static inline struct page *zbud_unuse_zbudpage(struct zbudpage *zbudpage,
- bool eph)
-{
- struct page *page = (struct page *)zbudpage;
-
- BUG_ON(!list_empty(&zbudpage->budlist));
- BUG_ON(!list_empty(&zbudpage->lru));
- BUG_ON(zbudpage->zbud0_size != 0);
- BUG_ON(zbudpage->zbud1_size != 0);
- BUG_ON(!PageLocked(page));
- BUG_ON(zbudpage->unevictable != 0);
- BUG_ON(zbudpage_is_dying(zbudpage));
- BUG_ON(zbudpage_is_zombie(zbudpage));
- if (eph)
- zbud_eph_pageframes--;
- else
- zbud_pers_pageframes--;
- zbudpage_spin_unlock(zbudpage);
- page_mapcount_reset(page);
- init_page_count(page);
- page->index = 0;
- return page;
-}
-
-/* Mark a zbud as unused and do accounting */
-static inline void zbud_unuse_zbud(struct zbudpage *zbudpage,
- int budnum, bool eph)
-{
- unsigned size;
-
- BUG_ON(!zbudpage_is_locked(zbudpage));
- if (budnum == 0) {
- size = zbudpage->zbud0_size;
- zbudpage->zbud0_size = 0;
- } else {
- size = zbudpage->zbud1_size;
- zbudpage->zbud1_size = 0;
- }
- if (eph) {
- zbud_eph_zbytes -= size;
- zbud_eph_zpages--;
- } else {
- zbud_pers_zbytes -= size;
- zbud_pers_zpages--;
- }
-}
-
-/*
- * Given a zbudpage/budnum/size, a tmem handle, and a kmapped pointer
- * to some data, set up the zbud appropriately including data copying
- * and accounting. Note that if cdata is NULL, the data copying is
- * skipped. (This is useful for lazy writes such as for RAMster.)
- */
-static void zbud_init_zbud(struct zbudpage *zbudpage, struct tmem_handle *th,
- bool eph, void *cdata,
- unsigned budnum, unsigned size)
-{
- char *to;
- void *zbpg;
- struct tmem_handle *to_th;
- unsigned nchunks = zbud_size_to_chunks(size);
-
- BUG_ON(!zbudpage_is_locked(zbudpage));
- zbpg = kmap_zbudpage_atomic(zbudpage);
- to = zbud_data(zbpg, budnum, size);
- to_th = (struct tmem_handle *)to;
- to_th->index = th->index;
- to_th->oid = th->oid;
- to_th->pool_id = th->pool_id;
- to_th->client_id = th->client_id;
- to += sizeof(struct tmem_handle);
- if (cdata != NULL)
- memcpy(to, cdata, size - sizeof(struct tmem_handle));
- kunmap_zbudpage_atomic(zbpg);
- if (budnum == 0)
- zbudpage->zbud0_size = size;
- else
- zbudpage->zbud1_size = size;
- if (eph) {
- zbud_eph_cumul_chunk_counts[nchunks]++;
- zbud_eph_zpages++;
- zbud_eph_cumul_zpages++;
- zbud_eph_zbytes += size;
- zbud_eph_cumul_zbytes += size;
- } else {
- zbud_pers_cumul_chunk_counts[nchunks]++;
- zbud_pers_zpages++;
- zbud_pers_cumul_zpages++;
- zbud_pers_zbytes += size;
- zbud_pers_cumul_zbytes += size;
- }
-}
-
-/*
- * Given a locked dying zbudpage, read out the tmem handles from the data,
- * unlock the page, then use the handles to tell tmem to flush out its
- * references
- */
-static void zbud_evict_tmem(struct zbudpage *zbudpage)
-{
- int i, j;
- uint32_t pool_id[2], client_id[2];
- uint32_t index[2];
- struct tmem_oid oid[2];
- struct tmem_pool *pool;
- void *zbpg;
- struct tmem_handle *th;
- unsigned size;
-
- /* read out the tmem handles from the data and set aside */
- zbpg = kmap_zbudpage_atomic(zbudpage);
- for (i = 0, j = 0; i < 2; i++) {
- size = (i == 0) ? zbudpage->zbud0_size : zbudpage->zbud1_size;
- if (size) {
- th = (struct tmem_handle *)zbud_data(zbpg, i, size);
- client_id[j] = th->client_id;
- pool_id[j] = th->pool_id;
- oid[j] = th->oid;
- index[j] = th->index;
- j++;
- zbud_unuse_zbud(zbudpage, i, true);
- }
- }
- kunmap_zbudpage_atomic(zbpg);
- zbudpage_spin_unlock(zbudpage);
- /* zbudpage is now an unlocked dying... tell tmem to flush pointers */
- for (i = 0; i < j; i++) {
- pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
- if (pool != NULL) {
- tmem_flush_page(pool, &oid[i], index[i]);
- zcache_put_pool(pool);
- }
- }
-}
-
-/*
- * Externally callable zbud handling routines.
- */
-
-/*
- * Return the maximum size compressed page that can be stored (secretly
- * setting aside space for the tmem handle.
- */
-unsigned int zbud_max_buddy_size(void)
-{
- return zbud_max_size() - sizeof(struct tmem_handle);
-}
-
-/*
- * Given a zbud reference, free the corresponding zbud from all lists,
- * mark it as unused, do accounting, and if the freeing of the zbud
- * frees up an entire pageframe, return it to the caller (else NULL).
- */
-struct page *zbud_free_and_delist(struct zbudref *zref, bool eph,
- unsigned int *zsize, unsigned int *zpages)
-{
- unsigned long budnum = zbudref_budnum(zref);
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- struct page *page = NULL;
- unsigned chunks, bud_size, other_bud_size;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
- struct zbud_unbuddied *unbud =
- eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
-
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- if (zbudpage_is_dying(zbudpage)) {
- /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- *zpages = 0;
- *zsize = 0;
- goto out;
- }
- if (budnum == 0) {
- bud_size = zbudpage->zbud0_size;
- other_bud_size = zbudpage->zbud1_size;
- } else {
- bud_size = zbudpage->zbud1_size;
- other_bud_size = zbudpage->zbud0_size;
- }
- *zsize = bud_size - sizeof(struct tmem_handle);
- *zpages = 1;
- zbud_unuse_zbud(zbudpage, budnum, eph);
- if (other_bud_size == 0) { /* was unbuddied: unlist and free */
- chunks = zbud_size_to_chunks(bud_size) ;
- if (zbudpage_is_zombie(zbudpage)) {
- if (eph)
- zbud_pers_zombie_count =
- atomic_dec_return(&zbud_eph_zombie_atomic);
- else
- zbud_pers_zombie_count =
- atomic_dec_return(&zbud_pers_zombie_atomic);
- zbudpage_clear_zombie(zbudpage);
- } else {
- BUG_ON(list_empty(&unbud[chunks].list));
- list_del_init(&zbudpage->budlist);
- unbud[chunks].count--;
- }
- list_del_init(&zbudpage->lru);
- spin_unlock(lists_lock);
- if (eph)
- zbud_eph_unbuddied_count--;
- else
- zbud_pers_unbuddied_count--;
- page = zbud_unuse_zbudpage(zbudpage, eph);
- } else { /* was buddied: move remaining buddy to unbuddied list */
- chunks = zbud_size_to_chunks(other_bud_size) ;
- if (!zbudpage_is_zombie(zbudpage)) {
- list_del_init(&zbudpage->budlist);
- list_add_tail(&zbudpage->budlist, &unbud[chunks].list);
- unbud[chunks].count++;
- }
- if (eph) {
- zbud_eph_buddied_count--;
- zbud_eph_unbuddied_count++;
- } else {
- zbud_pers_unbuddied_count++;
- zbud_pers_buddied_count--;
- }
- /* don't mess with lru, no need to move it */
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- }
-out:
- return page;
-}
-
-/*
- * Given a tmem handle, and a kmapped pointer to compressed data of
- * the given size, try to find an unbuddied zbudpage in which to
- * create a zbud. If found, put it there, mark the zbudpage unevictable,
- * and return a zbudref to it. Else return NULL.
- */
-struct zbudref *zbud_match_prep(struct tmem_handle *th, bool eph,
- void *cdata, unsigned size)
-{
- struct zbudpage *zbudpage = NULL, *zbudpage2;
- unsigned long budnum = 0UL;
- unsigned nchunks;
- int i, found_good_buddy = 0;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
- struct zbud_unbuddied *unbud =
- eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
-
- size += sizeof(struct tmem_handle);
- nchunks = zbud_size_to_chunks(size);
- for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
- spin_lock(lists_lock);
- if (!list_empty(&unbud[i].list)) {
- list_for_each_entry_safe(zbudpage, zbudpage2,
- &unbud[i].list, budlist) {
- if (zbudpage_spin_trylock(zbudpage)) {
- found_good_buddy = i;
- goto found_unbuddied;
- }
- }
- }
- spin_unlock(lists_lock);
- }
- zbudpage = NULL;
- goto out;
-
-found_unbuddied:
- BUG_ON(!zbudpage_is_locked(zbudpage));
- BUG_ON(!((zbudpage->zbud0_size == 0) ^ (zbudpage->zbud1_size == 0)));
- if (zbudpage->zbud0_size == 0)
- budnum = 0UL;
- else if (zbudpage->zbud1_size == 0)
- budnum = 1UL;
- list_del_init(&zbudpage->budlist);
- if (eph) {
- list_add_tail(&zbudpage->budlist, &zbud_eph_buddied_list);
- unbud[found_good_buddy].count--;
- zbud_eph_unbuddied_count--;
- zbud_eph_buddied_count++;
- /* "promote" raw zbudpage to most-recently-used */
- list_del_init(&zbudpage->lru);
- list_add_tail(&zbudpage->lru, &zbud_eph_lru_list);
- } else {
- list_add_tail(&zbudpage->budlist, &zbud_pers_buddied_list);
- unbud[found_good_buddy].count--;
- zbud_pers_unbuddied_count--;
- zbud_pers_buddied_count++;
- /* "promote" raw zbudpage to most-recently-used */
- list_del_init(&zbudpage->lru);
- list_add_tail(&zbudpage->lru, &zbud_pers_lru_list);
- }
- zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
- zbudpage->unevictable++;
- BUG_ON(zbudpage->unevictable == 3);
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
-out:
- return zbudpage_to_zbudref(zbudpage, budnum);
-
-}
-
-/*
- * Given a tmem handle, and a kmapped pointer to compressed data of
- * the given size, and a newly allocated struct page, create an unevictable
- * zbud in that new page and return a zbudref to it.
- */
-struct zbudref *zbud_create_prep(struct tmem_handle *th, bool eph,
- void *cdata, unsigned size,
- struct page *newpage)
-{
- struct zbudpage *zbudpage;
- unsigned long budnum = 0;
- unsigned nchunks;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
- struct zbud_unbuddied *unbud =
- eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
-
-#if 0
- /* this may be worth it later to support decompress-in-place? */
- static unsigned long counter;
- budnum = counter++ & 1; /* alternate using zbud0 and zbud1 */
-#endif
-
- if (size > zbud_max_buddy_size())
- return NULL;
- if (newpage == NULL)
- return NULL;
-
- size += sizeof(struct tmem_handle);
- nchunks = zbud_size_to_chunks(size) ;
- spin_lock(lists_lock);
- zbudpage = zbud_init_zbudpage(newpage, eph);
- zbudpage_spin_lock(zbudpage);
- list_add_tail(&zbudpage->budlist, &unbud[nchunks].list);
- if (eph) {
- list_add_tail(&zbudpage->lru, &zbud_eph_lru_list);
- zbud_eph_unbuddied_count++;
- } else {
- list_add_tail(&zbudpage->lru, &zbud_pers_lru_list);
- zbud_pers_unbuddied_count++;
- }
- unbud[nchunks].count++;
- zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
- zbudpage->unevictable++;
- BUG_ON(zbudpage->unevictable == 3);
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- return zbudpage_to_zbudref(zbudpage, budnum);
-}
-
-/*
- * Finish creation of a zbud by, assuming another zbud isn't being created
- * in parallel, marking it evictable.
- */
-void zbud_create_finish(struct zbudref *zref, bool eph)
-{
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- BUG_ON(zbudpage_is_dying(zbudpage));
- zbudpage->unevictable--;
- BUG_ON((int)zbudpage->unevictable < 0);
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
-}
-
-/*
- * Given a zbudref and a struct page, decompress the data from
- * the zbud into the physical page represented by the struct page
- * by upcalling to zcache_decompress
- */
-int zbud_decompress(struct page *data_page, struct zbudref *zref, bool eph,
- void (*decompress)(char *, unsigned int, char *))
-{
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- unsigned long budnum = zbudref_budnum(zref);
- void *zbpg;
- char *to_va, *from_va;
- unsigned size;
- int ret = -1;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- if (zbudpage_is_dying(zbudpage)) {
- /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
- goto out;
- }
- zbpg = kmap_zbudpage_atomic(zbudpage);
- to_va = kmap_atomic(data_page);
- if (budnum == 0)
- size = zbudpage->zbud0_size;
- else
- size = zbudpage->zbud1_size;
- BUG_ON(size == 0 || size > zbud_max_size());
- from_va = zbud_data(zbpg, budnum, size);
- from_va += sizeof(struct tmem_handle);
- size -= sizeof(struct tmem_handle);
- decompress(from_va, size, to_va);
- kunmap_atomic(to_va);
- kunmap_zbudpage_atomic(zbpg);
- ret = 0;
-out:
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- return ret;
-}
-
-/*
- * Given a zbudref and a kernel pointer, copy the data from
- * the zbud to the kernel pointer.
- */
-int zbud_copy_from_zbud(char *to_va, struct zbudref *zref,
- size_t *sizep, bool eph)
-{
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- unsigned long budnum = zbudref_budnum(zref);
- void *zbpg;
- char *from_va;
- unsigned size;
- int ret = -1;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- if (zbudpage_is_dying(zbudpage)) {
- /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
- goto out;
- }
- zbpg = kmap_zbudpage_atomic(zbudpage);
- if (budnum == 0)
- size = zbudpage->zbud0_size;
- else
- size = zbudpage->zbud1_size;
- BUG_ON(size == 0 || size > zbud_max_size());
- from_va = zbud_data(zbpg, budnum, size);
- from_va += sizeof(struct tmem_handle);
- size -= sizeof(struct tmem_handle);
- *sizep = size;
- memcpy(to_va, from_va, size);
-
- kunmap_zbudpage_atomic(zbpg);
- ret = 0;
-out:
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- return ret;
-}
-
-/*
- * Given a zbudref and a kernel pointer, copy the data from
- * the kernel pointer to the zbud.
- */
-int zbud_copy_to_zbud(struct zbudref *zref, char *from_va, bool eph)
-{
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- unsigned long budnum = zbudref_budnum(zref);
- void *zbpg;
- char *to_va;
- unsigned size;
- int ret = -1;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- if (zbudpage_is_dying(zbudpage)) {
- /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
- goto out;
- }
- zbpg = kmap_zbudpage_atomic(zbudpage);
- if (budnum == 0)
- size = zbudpage->zbud0_size;
- else
- size = zbudpage->zbud1_size;
- BUG_ON(size == 0 || size > zbud_max_size());
- to_va = zbud_data(zbpg, budnum, size);
- to_va += sizeof(struct tmem_handle);
- size -= sizeof(struct tmem_handle);
- memcpy(to_va, from_va, size);
-
- kunmap_zbudpage_atomic(zbpg);
- ret = 0;
-out:
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- return ret;
-}
-
-/*
- * Choose an ephemeral LRU zbudpage that is evictable (not locked), ensure
- * there are no references to it remaining, and return the now unused
- * (and re-init'ed) struct page and the total amount of compressed
- * data that was evicted.
- */
-struct page *zbud_evict_pageframe_lru(unsigned int *zsize, unsigned int *zpages)
-{
- struct zbudpage *zbudpage = NULL, *zbudpage2;
- struct zbud_unbuddied *unbud = zbud_eph_unbuddied;
- struct page *page = NULL;
- bool irqs_disabled = irqs_disabled();
-
- /*
- * Since this can be called indirectly from cleancache_put, which
- * has interrupts disabled, as well as frontswap_put, which does not,
- * we need to be able to handle both cases, even though it is ugly.
- */
- if (irqs_disabled)
- spin_lock(&zbud_eph_lists_lock);
- else
- spin_lock_bh(&zbud_eph_lists_lock);
- *zsize = 0;
- if (list_empty(&zbud_eph_lru_list))
- goto unlock_out;
- list_for_each_entry_safe(zbudpage, zbudpage2, &zbud_eph_lru_list, lru) {
- /* skip a locked zbudpage */
- if (unlikely(!zbudpage_spin_trylock(zbudpage)))
- continue;
- /* skip an unevictable zbudpage */
- if (unlikely(zbudpage->unevictable != 0)) {
- zbudpage_spin_unlock(zbudpage);
- continue;
- }
- /* got a locked evictable page */
- goto evict_page;
-
- }
-unlock_out:
- /* no unlocked evictable pages, give up */
- if (irqs_disabled)
- spin_unlock(&zbud_eph_lists_lock);
- else
- spin_unlock_bh(&zbud_eph_lists_lock);
- goto out;
-
-evict_page:
- list_del_init(&zbudpage->budlist);
- list_del_init(&zbudpage->lru);
- zbudpage_set_dying(zbudpage);
- /*
- * the zbudpage is now "dying" and attempts to read, write,
- * or delete data from it will be ignored
- */
- if (zbudpage->zbud0_size != 0 && zbudpage->zbud1_size != 0) {
- *zsize = zbudpage->zbud0_size + zbudpage->zbud1_size -
- (2 * sizeof(struct tmem_handle));
- *zpages = 2;
- } else if (zbudpage->zbud0_size != 0) {
- unbud[zbud_size_to_chunks(zbudpage->zbud0_size)].count--;
- *zsize = zbudpage->zbud0_size - sizeof(struct tmem_handle);
- *zpages = 1;
- } else if (zbudpage->zbud1_size != 0) {
- unbud[zbud_size_to_chunks(zbudpage->zbud1_size)].count--;
- *zsize = zbudpage->zbud1_size - sizeof(struct tmem_handle);
- *zpages = 1;
- } else {
- BUG();
- }
- spin_unlock(&zbud_eph_lists_lock);
- zbud_eph_evicted_pageframes++;
- if (*zpages == 1)
- zbud_eph_unbuddied_count--;
- else
- zbud_eph_buddied_count--;
- zbud_evict_tmem(zbudpage);
- zbudpage_spin_lock(zbudpage);
- zbudpage_clear_dying(zbudpage);
- page = zbud_unuse_zbudpage(zbudpage, true);
- if (!irqs_disabled)
- local_bh_enable();
-out:
- return page;
-}
-
-/*
- * Choose a persistent LRU zbudpage that is evictable (not locked), zombify it,
- * read the tmem_handle(s) out of it into the passed array, and return the
- * number of zbuds. Caller must perform necessary tmem functions and,
- * indirectly, zbud functions to fetch any valid data and cause the
- * now-zombified zbudpage to eventually be freed. We track the zombified
- * zbudpage count so it is possible to observe if there is a leak.
- FIXME: describe (ramster) case where data pointers are passed in for memcpy
- */
-unsigned int zbud_make_zombie_lru(struct tmem_handle *th, unsigned char **data,
- unsigned int *zsize, bool eph)
-{
- struct zbudpage *zbudpage = NULL, *zbudpag2;
- struct tmem_handle *thfrom;
- char *from_va;
- void *zbpg;
- unsigned size;
- int ret = 0, i;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
- struct list_head *lru_list =
- eph ? &zbud_eph_lru_list : &zbud_pers_lru_list;
-
- spin_lock_bh(lists_lock);
- if (list_empty(lru_list))
- goto out;
- list_for_each_entry_safe(zbudpage, zbudpag2, lru_list, lru) {
- /* skip a locked zbudpage */
- if (unlikely(!zbudpage_spin_trylock(zbudpage)))
- continue;
- /* skip an unevictable zbudpage */
- if (unlikely(zbudpage->unevictable != 0)) {
- zbudpage_spin_unlock(zbudpage);
- continue;
- }
- /* got a locked evictable page */
- goto zombify_page;
- }
- /* no unlocked evictable pages, give up */
- goto out;
-
-zombify_page:
- /* got an unlocked evictable page, zombify it */
- list_del_init(&zbudpage->budlist);
- zbudpage_set_zombie(zbudpage);
- /* FIXME what accounting do I need to do here? */
- list_del_init(&zbudpage->lru);
- if (eph) {
- list_add_tail(&zbudpage->lru, &zbud_eph_zombie_list);
- zbud_eph_zombie_count =
- atomic_inc_return(&zbud_eph_zombie_atomic);
- } else {
- list_add_tail(&zbudpage->lru, &zbud_pers_zombie_list);
- zbud_pers_zombie_count =
- atomic_inc_return(&zbud_pers_zombie_atomic);
- }
- /* FIXME what accounting do I need to do here? */
- zbpg = kmap_zbudpage_atomic(zbudpage);
- for (i = 0; i < 2; i++) {
- size = (i == 0) ? zbudpage->zbud0_size : zbudpage->zbud1_size;
- if (size) {
- from_va = zbud_data(zbpg, i, size);
- thfrom = (struct tmem_handle *)from_va;
- from_va += sizeof(struct tmem_handle);
- size -= sizeof(struct tmem_handle);
- if (th != NULL)
- th[ret] = *thfrom;
- if (data != NULL)
- memcpy(data[ret], from_va, size);
- if (zsize != NULL)
- *zsize++ = size;
- ret++;
- }
- }
- kunmap_zbudpage_atomic(zbpg);
- zbudpage_spin_unlock(zbudpage);
-out:
- spin_unlock_bh(lists_lock);
- return ret;
-}
-
-void zbud_init(void)
-{
- int i;
-
- zbud_debugfs_init();
- BUG_ON((sizeof(struct tmem_handle) * 2 > CHUNK_SIZE));
- BUG_ON(sizeof(struct zbudpage) > sizeof(struct page));
- for (i = 0; i < NCHUNKS; i++) {
- INIT_LIST_HEAD(&zbud_eph_unbuddied[i].list);
- INIT_LIST_HEAD(&zbud_pers_unbuddied[i].list);
- }
-}
diff --git a/drivers/staging/zcache/zbud.h b/drivers/staging/zcache/zbud.h
deleted file mode 100644
index 891e8a7..0000000
--- a/drivers/staging/zcache/zbud.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * zbud.h
- *
- * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
- *
- */
-
-#ifndef _ZBUD_H_
-#define _ZBUD_H_
-
-#include "tmem.h"
-
-struct zbudref;
-
-extern unsigned int zbud_max_buddy_size(void);
-extern struct zbudref *zbud_match_prep(struct tmem_handle *th, bool eph,
- void *cdata, unsigned size);
-extern struct zbudref *zbud_create_prep(struct tmem_handle *th, bool eph,
- void *cdata, unsigned size,
- struct page *newpage);
-extern void zbud_create_finish(struct zbudref *, bool);
-extern int zbud_decompress(struct page *, struct zbudref *, bool,
- void (*func)(char *, unsigned int, char *));
-extern int zbud_copy_from_zbud(char *, struct zbudref *, size_t *, bool);
-extern int zbud_copy_to_zbud(struct zbudref *, char *, bool);
-extern struct page *zbud_free_and_delist(struct zbudref *, bool eph,
- unsigned int *, unsigned int *);
-extern struct page *zbud_evict_pageframe_lru(unsigned int *, unsigned int *);
-extern unsigned int zbud_make_zombie_lru(struct tmem_handle *, unsigned char **,
- unsigned int *, bool);
-extern void zbud_init(void);
-
-#endif /* _ZBUD_H_ */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
deleted file mode 100644
index dcceed2..0000000
--- a/drivers/staging/zcache/zcache-main.c
+++ /dev/null
@@ -1,1941 +0,0 @@
-/*
- * zcache.c
- *
- * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
- * Copyright (c) 2010,2011, Nitin Gupta
- *
- * Zcache provides an in-kernel "host implementation" for transcendent memory
- * ("tmem") and, thus indirectly, for cleancache and frontswap. Zcache uses
- * lzo1x compression to improve density and an embedded allocator called
- * "zbud" which "buddies" two compressed pages semi-optimally in each physical
- * pageframe. Zbud is integrally tied into tmem to allow pageframes to
- * be "reclaimed" efficiently.
- */
-
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/highmem.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/atomic.h>
-#include <linux/math64.h>
-#include <linux/crypto.h>
-#include <linux/swap.h>
-#include <linux/swapops.h>
-#include <linux/pagemap.h>
-#include <linux/writeback.h>
-
-#include <linux/cleancache.h>
-#include <linux/frontswap.h>
-#include "tmem.h"
-#include "zcache.h"
-#include "zbud.h"
-#include "ramster.h"
-#include "debug.h"
-#ifdef CONFIG_RAMSTER
-static bool ramster_enabled __read_mostly;
-static int disable_frontswap_selfshrink;
-#else
-#define ramster_enabled false
-#define disable_frontswap_selfshrink 0
-#endif
-
-#ifndef __PG_WAS_ACTIVE
-static inline bool PageWasActive(struct page *page)
-{
- return true;
-}
-
-static inline void SetPageWasActive(struct page *page)
-{
-}
-#endif
-
-#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
-static bool frontswap_has_exclusive_gets __read_mostly = true;
-#else
-static bool frontswap_has_exclusive_gets __read_mostly;
-static inline void frontswap_tmem_exclusive_gets(bool b)
-{
-}
-#endif
-
-/*
- * mark pampd to special value in order that later
- * retrieve will identify zero-filled pages
- */
-#define ZERO_FILLED 0x2
-
-/* enable (or fix code) when Seth's patches are accepted upstream */
-#define zcache_writeback_enabled 0
-
-static bool zcache_enabled __read_mostly;
-static bool disable_cleancache __read_mostly;
-static bool disable_frontswap __read_mostly;
-static bool disable_frontswap_ignore_nonactive __read_mostly;
-static bool disable_cleancache_ignore_nonactive __read_mostly;
-static char *namestr __read_mostly = "zcache";
-
-#define ZCACHE_GFP_MASK \
- (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
-
-/* crypto API for zcache */
-#ifdef CONFIG_ZCACHE_MODULE
-static char *zcache_comp_name = "lzo";
-#else
-#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
-static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly;
-#endif
-static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly;
-
-enum comp_op {
- ZCACHE_COMPOP_COMPRESS,
- ZCACHE_COMPOP_DECOMPRESS
-};
-
-static inline int zcache_comp_op(enum comp_op op,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto_comp *tfm;
- int ret = -1;
-
- BUG_ON(!zcache_comp_pcpu_tfms);
- tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
- BUG_ON(!tfm);
- switch (op) {
- case ZCACHE_COMPOP_COMPRESS:
- ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
- break;
- case ZCACHE_COMPOP_DECOMPRESS:
- ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
- break;
- default:
- ret = -EINVAL;
- }
- put_cpu();
- return ret;
-}
-
-/*
- * policy parameters
- */
-
-/*
- * byte count defining poor compression; pages with greater zsize will be
- * rejected
- */
-static unsigned int zbud_max_zsize __read_mostly = (PAGE_SIZE / 8) * 7;
-/*
- * byte count defining poor *mean* compression; pages with greater zsize
- * will be rejected until sufficient better-compressed pages are accepted
- * driving the mean below this threshold
- */
-static unsigned int zbud_max_mean_zsize __read_mostly = (PAGE_SIZE / 8) * 5;
-
-/*
- * for now, used named slabs so can easily track usage; later can
- * either just use kmalloc, or perhaps add a slab-like allocator
- * to more carefully manage total memory utilization
- */
-static struct kmem_cache *zcache_objnode_cache;
-static struct kmem_cache *zcache_obj_cache;
-
-static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
-
-/* Used by debug.c */
-ssize_t zcache_pers_zpages;
-u64 zcache_pers_zbytes;
-ssize_t zcache_eph_pageframes;
-ssize_t zcache_pers_pageframes;
-
-/* Used by this code. */
-ssize_t zcache_last_active_file_pageframes;
-ssize_t zcache_last_inactive_file_pageframes;
-ssize_t zcache_last_active_anon_pageframes;
-ssize_t zcache_last_inactive_anon_pageframes;
-#ifdef CONFIG_ZCACHE_WRITEBACK
-ssize_t zcache_writtenback_pages;
-ssize_t zcache_outstanding_writeback_pages;
-#endif
-/*
- * zcache core code starts here
- */
-
-static struct zcache_client zcache_host;
-static struct zcache_client zcache_clients[MAX_CLIENTS];
-
-static inline bool is_local_client(struct zcache_client *cli)
-{
- return cli == &zcache_host;
-}
-
-static struct zcache_client *zcache_get_client_by_id(uint16_t cli_id)
-{
- struct zcache_client *cli = &zcache_host;
-
- if (cli_id != LOCAL_CLIENT) {
- if (cli_id >= MAX_CLIENTS)
- goto out;
- cli = &zcache_clients[cli_id];
- }
-out:
- return cli;
-}
-
-/*
- * Tmem operations assume the poolid implies the invoking client.
- * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
- * RAMster has each client numbered by cluster node, and a KVM version
- * of zcache would have one client per guest and each client might
- * have a poolid==N.
- */
-struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
-{
- struct tmem_pool *pool = NULL;
- struct zcache_client *cli = NULL;
-
- cli = zcache_get_client_by_id(cli_id);
- if (cli == NULL)
- goto out;
- if (!is_local_client(cli))
- atomic_inc(&cli->refcount);
- if (poolid < MAX_POOLS_PER_CLIENT) {
- pool = cli->tmem_pools[poolid];
- if (pool != NULL)
- atomic_inc(&pool->refcount);
- }
-out:
- return pool;
-}
-
-void zcache_put_pool(struct tmem_pool *pool)
-{
- struct zcache_client *cli = NULL;
-
- if (pool == NULL)
- BUG();
- cli = pool->client;
- atomic_dec(&pool->refcount);
- if (!is_local_client(cli))
- atomic_dec(&cli->refcount);
-}
-
-int zcache_new_client(uint16_t cli_id)
-{
- struct zcache_client *cli;
- int ret = -1;
-
- cli = zcache_get_client_by_id(cli_id);
- if (cli == NULL)
- goto out;
- if (cli->allocated)
- goto out;
- cli->allocated = 1;
- ret = 0;
-out:
- return ret;
-}
-
-/*
- * zcache implementation for tmem host ops
- */
-
-static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
-{
- struct tmem_objnode *objnode = NULL;
- struct zcache_preload *kp;
- int i;
-
- kp = &__get_cpu_var(zcache_preloads);
- for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
- objnode = kp->objnodes[i];
- if (objnode != NULL) {
- kp->objnodes[i] = NULL;
- break;
- }
- }
- BUG_ON(objnode == NULL);
- inc_zcache_objnode_count();
- return objnode;
-}
-
-static void zcache_objnode_free(struct tmem_objnode *objnode,
- struct tmem_pool *pool)
-{
- dec_zcache_objnode_count();
- kmem_cache_free(zcache_objnode_cache, objnode);
-}
-
-static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
-{
- struct tmem_obj *obj = NULL;
- struct zcache_preload *kp;
-
- kp = &__get_cpu_var(zcache_preloads);
- obj = kp->obj;
- BUG_ON(obj == NULL);
- kp->obj = NULL;
- inc_zcache_obj_count();
- return obj;
-}
-
-static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
-{
- dec_zcache_obj_count();
- kmem_cache_free(zcache_obj_cache, obj);
-}
-
-/*
- * Compressing zero-filled pages will waste memory and introduce
- * serious fragmentation, skip it to avoid overhead.
- */
-static bool page_is_zero_filled(struct page *p)
-{
- unsigned int pos;
- char *page;
-
- page = kmap_atomic(p);
- for (pos = 0; pos < PAGE_SIZE / sizeof(*page); pos++) {
- if (page[pos]) {
- kunmap_atomic(page);
- return false;
- }
- }
- kunmap_atomic(page);
-
- return true;
-}
-
-static void handle_zero_filled_page(void *p)
-{
- void *user_mem;
- struct page *page = (struct page *)p;
-
- user_mem = kmap_atomic(page);
- memset(user_mem, 0, PAGE_SIZE);
- kunmap_atomic(user_mem);
-
- flush_dcache_page(page);
-}
-
-static struct tmem_hostops zcache_hostops = {
- .obj_alloc = zcache_obj_alloc,
- .obj_free = zcache_obj_free,
- .objnode_alloc = zcache_objnode_alloc,
- .objnode_free = zcache_objnode_free,
-};
-
-static struct page *zcache_alloc_page(void)
-{
- struct page *page = alloc_page(ZCACHE_GFP_MASK);
-
- if (page != NULL)
- inc_zcache_pageframes_alloced();
- return page;
-}
-
-static void zcache_free_page(struct page *page)
-{
- long curr_pageframes;
- static long max_pageframes, min_pageframes;
-
- if (page == NULL)
- BUG();
- __free_page(page);
- inc_zcache_pageframes_freed();
- curr_pageframes = curr_pageframes_count();
- if (curr_pageframes > max_pageframes)
- max_pageframes = curr_pageframes;
- if (curr_pageframes < min_pageframes)
- min_pageframes = curr_pageframes;
-#ifdef CONFIG_ZCACHE_DEBUG
- if (curr_pageframes > 2L || curr_pageframes < -2L) {
- /* pr_info here */
- }
-#endif
-}
-
-/*
- * zcache implementations for PAM page descriptor ops
- */
-
-/* forward reference */
-static void zcache_compress(struct page *from,
- void **out_va, unsigned *out_len);
-
-static struct page *zcache_evict_eph_pageframe(void);
-
-static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
- struct tmem_handle *th)
-{
- void *pampd = NULL, *cdata = data;
- unsigned clen = size;
- bool zero_filled = false;
- struct page *page = (struct page *)(data), *newpage;
-
- if (page_is_zero_filled(page)) {
- clen = 0;
- zero_filled = true;
- inc_zcache_zero_filled_pages();
- goto got_pampd;
- }
-
- if (!raw) {
- zcache_compress(page, &cdata, &clen);
- if (clen > zbud_max_buddy_size()) {
- inc_zcache_compress_poor();
- goto out;
- }
- } else {
- BUG_ON(clen > zbud_max_buddy_size());
- }
-
- /* look for space via an existing match first */
- pampd = (void *)zbud_match_prep(th, true, cdata, clen);
- if (pampd != NULL)
- goto got_pampd;
-
- /* no match, now we need to find (or free up) a full page */
- newpage = zcache_alloc_page();
- if (newpage != NULL)
- goto create_in_new_page;
-
- inc_zcache_failed_getfreepages();
- /* can't allocate a page, evict an ephemeral page via LRU */
- newpage = zcache_evict_eph_pageframe();
- if (newpage == NULL) {
- inc_zcache_eph_ate_tail_failed();
- goto out;
- }
- inc_zcache_eph_ate_tail();
-
-create_in_new_page:
- pampd = (void *)zbud_create_prep(th, true, cdata, clen, newpage);
- BUG_ON(pampd == NULL);
- inc_zcache_eph_pageframes();
-
-got_pampd:
- inc_zcache_eph_zbytes(clen);
- inc_zcache_eph_zpages();
- if (ramster_enabled && raw && !zero_filled)
- ramster_count_foreign_pages(true, 1);
- if (zero_filled)
- pampd = (void *)ZERO_FILLED;
-out:
- return pampd;
-}
-
-static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
- struct tmem_handle *th)
-{
- void *pampd = NULL, *cdata = data;
- unsigned clen = size;
- bool zero_filled = false;
- struct page *page = (struct page *)(data), *newpage;
- unsigned long zbud_mean_zsize;
- unsigned long curr_pers_zpages, total_zsize;
-
- if (data == NULL) {
- BUG_ON(!ramster_enabled);
- goto create_pampd;
- }
-
- if (page_is_zero_filled(page)) {
- clen = 0;
- zero_filled = true;
- inc_zcache_zero_filled_pages();
- goto got_pampd;
- }
-
- curr_pers_zpages = zcache_pers_zpages;
-/* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
- if (!raw)
- zcache_compress(page, &cdata, &clen);
- /* reject if compression is too poor */
- if (clen > zbud_max_zsize) {
- inc_zcache_compress_poor();
- goto out;
- }
- /* reject if mean compression is too poor */
- if ((clen > zbud_max_mean_zsize) && (curr_pers_zpages > 0)) {
- total_zsize = zcache_pers_zbytes;
- if ((long)total_zsize < 0)
- total_zsize = 0;
- zbud_mean_zsize = div_u64(total_zsize,
- curr_pers_zpages);
- if (zbud_mean_zsize > zbud_max_mean_zsize) {
- inc_zcache_mean_compress_poor();
- goto out;
- }
- }
-
-create_pampd:
- /* look for space via an existing match first */
- pampd = (void *)zbud_match_prep(th, false, cdata, clen);
- if (pampd != NULL)
- goto got_pampd;
-
- /* no match, now we need to find (or free up) a full page */
- newpage = zcache_alloc_page();
- if (newpage != NULL)
- goto create_in_new_page;
- /*
- * FIXME do the following only if eph is oversized?
- * if (zcache_eph_pageframes >
- * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
- * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
- */
- inc_zcache_failed_getfreepages();
- /* can't allocate a page, evict an ephemeral page via LRU */
- newpage = zcache_evict_eph_pageframe();
- if (newpage == NULL) {
- inc_zcache_pers_ate_eph_failed();
- goto out;
- }
- inc_zcache_pers_ate_eph();
-
-create_in_new_page:
- pampd = (void *)zbud_create_prep(th, false, cdata, clen, newpage);
- BUG_ON(pampd == NULL);
- inc_zcache_pers_pageframes();
-
-got_pampd:
- inc_zcache_pers_zpages();
- inc_zcache_pers_zbytes(clen);
- if (ramster_enabled && raw && !zero_filled)
- ramster_count_foreign_pages(false, 1);
- if (zero_filled)
- pampd = (void *)ZERO_FILLED;
-out:
- return pampd;
-}
-
-/*
- * This is called directly from zcache_put_page to pre-allocate space
- * to store a zpage.
- */
-void *zcache_pampd_create(char *data, unsigned int size, bool raw,
- int eph, struct tmem_handle *th)
-{
- void *pampd = NULL;
- struct zcache_preload *kp;
- struct tmem_objnode *objnode;
- struct tmem_obj *obj;
- int i;
-
- BUG_ON(!irqs_disabled());
- /* pre-allocate per-cpu metadata */
- BUG_ON(zcache_objnode_cache == NULL);
- BUG_ON(zcache_obj_cache == NULL);
- kp = &__get_cpu_var(zcache_preloads);
- for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
- objnode = kp->objnodes[i];
- if (objnode == NULL) {
- objnode = kmem_cache_alloc(zcache_objnode_cache,
- ZCACHE_GFP_MASK);
- if (unlikely(objnode == NULL)) {
- inc_zcache_failed_alloc();
- goto out;
- }
- kp->objnodes[i] = objnode;
- }
- }
- if (kp->obj == NULL) {
- obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
- kp->obj = obj;
- }
- if (unlikely(kp->obj == NULL)) {
- inc_zcache_failed_alloc();
- goto out;
- }
- /*
- * ok, have all the metadata pre-allocated, now do the data
- * but since how we allocate the data is dependent on ephemeral
- * or persistent, we split the call here to different sub-functions
- */
- if (eph)
- pampd = zcache_pampd_eph_create(data, size, raw, th);
- else
- pampd = zcache_pampd_pers_create(data, size, raw, th);
-out:
- return pampd;
-}
-
-/*
- * This is a pamops called via tmem_put and is necessary to "finish"
- * a pampd creation.
- */
-void zcache_pampd_create_finish(void *pampd, bool eph)
-{
- if (pampd != (void *)ZERO_FILLED)
- zbud_create_finish((struct zbudref *)pampd, eph);
-}
-
-/*
- * This is passed as a function parameter to zbud_decompress so that
- * zbud need not be familiar with the details of crypto. It assumes that
- * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
- * kmapped. It must be successful, else there is a logic bug somewhere.
- */
-static void zcache_decompress(char *from_va, unsigned int size, char *to_va)
-{
- int ret;
- unsigned int outlen = PAGE_SIZE;
-
- ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
- to_va, &outlen);
- BUG_ON(ret);
- BUG_ON(outlen != PAGE_SIZE);
-}
-
-/*
- * Decompress from the kernel va to a pageframe
- */
-void zcache_decompress_to_page(char *from_va, unsigned int size,
- struct page *to_page)
-{
- char *to_va = kmap_atomic(to_page);
- zcache_decompress(from_va, size, to_va);
- kunmap_atomic(to_va);
-}
-
-/*
- * fill the pageframe corresponding to the struct page with the data
- * from the passed pampd
- */
-static int zcache_pampd_get_data(char *data, size_t *sizep, bool raw,
- void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index)
-{
- int ret;
- bool eph = !is_persistent(pool);
-
- BUG_ON(preemptible());
- BUG_ON(eph); /* fix later if shared pools get implemented */
- BUG_ON(pampd_is_remote(pampd));
-
- if (pampd == (void *)ZERO_FILLED) {
- handle_zero_filled_page(data);
- if (!raw)
- *sizep = PAGE_SIZE;
- return 0;
- }
-
- if (raw)
- ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
- sizep, eph);
- else {
- ret = zbud_decompress((struct page *)(data),
- (struct zbudref *)pampd, false,
- zcache_decompress);
- *sizep = PAGE_SIZE;
- }
- return ret;
-}
-
-/*
- * fill the pageframe corresponding to the struct page with the data
- * from the passed pampd
- */
-static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
- void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index)
-{
- int ret = 0;
- bool eph = !is_persistent(pool), zero_filled = false;
- struct page *page = NULL;
- unsigned int zsize, zpages;
-
- BUG_ON(preemptible());
- BUG_ON(pampd_is_remote(pampd));
-
- if (pampd == (void *)ZERO_FILLED) {
- handle_zero_filled_page(data);
- zero_filled = true;
- zsize = 0;
- zpages = 1;
- if (!raw)
- *sizep = PAGE_SIZE;
- dec_zcache_zero_filled_pages();
- goto zero_fill;
- }
-
- if (raw)
- ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
- sizep, eph);
- else {
- ret = zbud_decompress((struct page *)(data),
- (struct zbudref *)pampd, eph,
- zcache_decompress);
- *sizep = PAGE_SIZE;
- }
- page = zbud_free_and_delist((struct zbudref *)pampd, eph,
- &zsize, &zpages);
-zero_fill:
- if (eph) {
- if (page)
- dec_zcache_eph_pageframes();
- dec_zcache_eph_zpages(zpages);
- dec_zcache_eph_zbytes(zsize);
- } else {
- if (page)
- dec_zcache_pers_pageframes();
- dec_zcache_pers_zpages(zpages);
- dec_zcache_pers_zbytes(zsize);
- }
- if (!is_local_client(pool->client) && !zero_filled)
- ramster_count_foreign_pages(eph, -1);
- if (page && !zero_filled)
- zcache_free_page(page);
- return ret;
-}
-
-/*
- * free the pampd and remove it from any zcache lists
- * pampd must no longer be pointed to from any tmem data structures!
- */
-static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index, bool acct)
-{
- struct page *page = NULL;
- unsigned int zsize, zpages;
- bool zero_filled = false;
-
- BUG_ON(preemptible());
-
- if (pampd == (void *)ZERO_FILLED) {
- zero_filled = true;
- zsize = 0;
- zpages = 1;
- dec_zcache_zero_filled_pages();
- }
-
- if (pampd_is_remote(pampd) && !zero_filled) {
- BUG_ON(!ramster_enabled);
- pampd = ramster_pampd_free(pampd, pool, oid, index, acct);
- if (pampd == NULL)
- return;
- }
- if (is_ephemeral(pool)) {
- if (!zero_filled)
- page = zbud_free_and_delist((struct zbudref *)pampd,
- true, &zsize, &zpages);
- if (page)
- dec_zcache_eph_pageframes();
- dec_zcache_eph_zpages(zpages);
- dec_zcache_eph_zbytes(zsize);
- /* FIXME CONFIG_RAMSTER... check acct parameter? */
- } else {
- if (!zero_filled)
- page = zbud_free_and_delist((struct zbudref *)pampd,
- false, &zsize, &zpages);
- if (page)
- dec_zcache_pers_pageframes();
- dec_zcache_pers_zpages(zpages);
- dec_zcache_pers_zbytes(zsize);
- }
- if (!is_local_client(pool->client) && !zero_filled)
- ramster_count_foreign_pages(is_ephemeral(pool), -1);
- if (page && !zero_filled)
- zcache_free_page(page);
-}
-
-static struct tmem_pamops zcache_pamops = {
- .create_finish = zcache_pampd_create_finish,
- .get_data = zcache_pampd_get_data,
- .get_data_and_free = zcache_pampd_get_data_and_free,
- .free = zcache_pampd_free,
-};
-
-/*
- * zcache compression/decompression and related per-cpu stuff
- */
-
-static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
-#define ZCACHE_DSTMEM_ORDER 1
-
-static void zcache_compress(struct page *from, void **out_va, unsigned *out_len)
-{
- int ret;
- unsigned char *dmem = __get_cpu_var(zcache_dstmem);
- char *from_va;
-
- BUG_ON(!irqs_disabled());
- /* no buffer or no compressor so can't compress */
- BUG_ON(dmem == NULL);
- *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
- from_va = kmap_atomic(from);
- mb();
- ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
- out_len);
- BUG_ON(ret);
- *out_va = dmem;
- kunmap_atomic(from_va);
-}
-
-static int zcache_comp_cpu_up(int cpu)
-{
- struct crypto_comp *tfm;
-
- tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
- if (IS_ERR(tfm))
- return NOTIFY_BAD;
- *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
- return NOTIFY_OK;
-}
-
-static void zcache_comp_cpu_down(int cpu)
-{
- struct crypto_comp *tfm;
-
- tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
- crypto_free_comp(tfm);
- *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
-}
-
-static int zcache_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
-{
- int ret, i, cpu = (long)pcpu;
- struct zcache_preload *kp;
-
- switch (action) {
- case CPU_UP_PREPARE:
- ret = zcache_comp_cpu_up(cpu);
- if (ret != NOTIFY_OK) {
- pr_err("%s: can't allocate compressor xform\n",
- namestr);
- return ret;
- }
- per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
- if (ramster_enabled)
- ramster_cpu_up(cpu);
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- zcache_comp_cpu_down(cpu);
- free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
- ZCACHE_DSTMEM_ORDER);
- per_cpu(zcache_dstmem, cpu) = NULL;
- kp = &per_cpu(zcache_preloads, cpu);
- for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
- if (kp->objnodes[i])
- kmem_cache_free(zcache_objnode_cache,
- kp->objnodes[i]);
- }
- if (kp->obj) {
- kmem_cache_free(zcache_obj_cache, kp->obj);
- kp->obj = NULL;
- }
- if (ramster_enabled)
- ramster_cpu_down(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block zcache_cpu_notifier_block = {
- .notifier_call = zcache_cpu_notifier
-};
-
-/*
- * The following code interacts with the zbud eviction and zbud
- * zombify code to access LRU pages
- */
-
-static struct page *zcache_evict_eph_pageframe(void)
-{
- struct page *page;
- unsigned int zsize = 0, zpages = 0;
-
- page = zbud_evict_pageframe_lru(&zsize, &zpages);
- if (page == NULL)
- goto out;
- dec_zcache_eph_zbytes(zsize);
- dec_zcache_eph_zpages(zpages);
- inc_zcache_evicted_eph_zpages(zpages);
- dec_zcache_eph_pageframes();
- inc_zcache_evicted_eph_pageframes();
-out:
- return page;
-}
-
-#ifdef CONFIG_ZCACHE_WRITEBACK
-
-static atomic_t zcache_outstanding_writeback_pages_atomic = ATOMIC_INIT(0);
-
-static inline void inc_zcache_outstanding_writeback_pages(void)
-{
- zcache_outstanding_writeback_pages =
- atomic_inc_return(&zcache_outstanding_writeback_pages_atomic);
-}
-static inline void dec_zcache_outstanding_writeback_pages(void)
-{
- zcache_outstanding_writeback_pages =
- atomic_dec_return(&zcache_outstanding_writeback_pages_atomic);
-};
-static void unswiz(struct tmem_oid oid, u32 index,
- unsigned *type, pgoff_t *offset);
-
-/*
- * Choose an LRU persistent pageframe and attempt to write it back to
- * the backing swap disk by calling frontswap_writeback on both zpages.
- *
- * This is work-in-progress.
- */
-
-static void zcache_end_swap_write(struct bio *bio, int err)
-{
- end_swap_bio_write(bio, err);
- dec_zcache_outstanding_writeback_pages();
- zcache_writtenback_pages++;
-}
-
-/*
- * zcache_get_swap_cache_page
- *
- * This is an adaption of read_swap_cache_async()
- *
- * If success, page is returned in retpage
- * Returns 0 if page was already in the swap cache, page is not locked
- * Returns 1 if the new page needs to be populated, page is locked
- */
-static int zcache_get_swap_cache_page(int type, pgoff_t offset,
- struct page *new_page)
-{
- struct page *found_page;
- swp_entry_t entry = swp_entry(type, offset);
- int err;
-
- BUG_ON(new_page == NULL);
- do {
- /*
- * First check the swap cache. Since this is normally
- * called after lookup_swap_cache() failed, re-calling
- * that would confuse statistics.
- */
- found_page = find_get_page(&swapper_space, entry.val);
- if (found_page)
- return 0;
-
- /*
- * call radix_tree_preload() while we can wait.
- */
- err = radix_tree_preload(GFP_KERNEL);
- if (err)
- break;
-
- /*
- * Swap entry may have been freed since our caller observed it.
- */
- err = swapcache_prepare(entry);
- if (err == -EEXIST) { /* seems racy */
- radix_tree_preload_end();
- continue;
- }
- if (err) { /* swp entry is obsolete ? */
- radix_tree_preload_end();
- break;
- }
-
- /* May fail (-ENOMEM) if radix-tree node allocation failed. */
- __set_page_locked(new_page);
- SetPageSwapBacked(new_page);
- err = __add_to_swap_cache(new_page, entry);
- if (likely(!err)) {
- radix_tree_preload_end();
- lru_cache_add_anon(new_page);
- return 1;
- }
- radix_tree_preload_end();
- ClearPageSwapBacked(new_page);
- __clear_page_locked(new_page);
- /*
- * add_to_swap_cache() doesn't return -EEXIST, so we can safely
- * clear SWAP_HAS_CACHE flag.
- */
- swapcache_free(entry, NULL);
- /* FIXME: is it possible to get here without err==-ENOMEM?
- * If not, we can dispense with the do loop, use goto retry */
- } while (err != -ENOMEM);
-
- return -ENOMEM;
-}
-
-/*
- * Given a frontswap zpage in zcache (identified by type/offset) and
- * an empty page, put the page into the swap cache, use frontswap
- * to get the page from zcache into the empty page, then give it
- * to the swap subsystem to send to disk (carefully avoiding the
- * possibility that frontswap might snatch it back).
- * Returns < 0 if error, 0 if successful, and 1 if successful but
- * the newpage passed in not needed and should be freed.
- */
-static int zcache_frontswap_writeback_zpage(int type, pgoff_t offset,
- struct page *newpage)
-{
- struct page *page = newpage;
- int ret;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- };
-
- ret = zcache_get_swap_cache_page(type, offset, page);
- if (ret < 0)
- return ret;
- else if (ret == 0) {
- /* more uptodate page is already in swapcache */
- __frontswap_invalidate_page(type, offset);
- return 1;
- }
-
- BUG_ON(!frontswap_has_exclusive_gets); /* load must also invalidate */
- /* FIXME: how is it possible to get here when page is unlocked? */
- __frontswap_load(page);
- SetPageUptodate(page); /* above does SetPageDirty, is that enough? */
-
- /* start writeback */
- SetPageReclaim(page);
- /*
- * Return value is ignored here because it doesn't change anything
- * for us. Page is returned unlocked.
- */
- (void)__swap_writepage(page, &wbc, zcache_end_swap_write);
- page_cache_release(page);
- inc_zcache_outstanding_writeback_pages();
-
- return 0;
-}
-
-/*
- * The following is still a magic number... we want to allow forward progress
- * for writeback because it clears out needed RAM when under pressure, but
- * we don't want to allow writeback to absorb and queue too many GFP_KERNEL
- * pages if the swap device is very slow.
- */
-#define ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES 6400
-
-/*
- * Try to allocate two free pages, first using a non-aggressive alloc,
- * then by evicting zcache ephemeral (clean pagecache) pages, and last
- * by aggressive GFP_KERNEL alloc. We allow zbud to choose a pageframe
- * consisting of 1-2 zbuds/zpages, then call the writeback_zpage helper
- * function above for each.
- */
-static int zcache_frontswap_writeback(void)
-{
- struct tmem_handle th[2];
- int ret = 0;
- int nzbuds, writeback_ret;
- unsigned type;
- struct page *znewpage1 = NULL, *znewpage2 = NULL;
- struct page *evictpage1 = NULL, *evictpage2 = NULL;
- struct page *newpage1 = NULL, *newpage2 = NULL;
- struct page *page1 = NULL, *page2 = NULL;
- pgoff_t offset;
-
- znewpage1 = alloc_page(ZCACHE_GFP_MASK);
- znewpage2 = alloc_page(ZCACHE_GFP_MASK);
- if (znewpage1 == NULL)
- evictpage1 = zcache_evict_eph_pageframe();
- if (znewpage2 == NULL)
- evictpage2 = zcache_evict_eph_pageframe();
-
- if ((evictpage1 == NULL || evictpage2 == NULL) &&
- atomic_read(&zcache_outstanding_writeback_pages_atomic) >
- ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES) {
- goto free_and_out;
- }
- if (znewpage1 == NULL && evictpage1 == NULL)
- newpage1 = alloc_page(GFP_KERNEL);
- if (znewpage2 == NULL && evictpage2 == NULL)
- newpage2 = alloc_page(GFP_KERNEL);
- if (newpage1 == NULL || newpage2 == NULL)
- goto free_and_out;
-
- /* ok, we have two pageframes pre-allocated, get a pair of zbuds */
- nzbuds = zbud_make_zombie_lru(&th[0], NULL, NULL, false);
- if (nzbuds == 0) {
- ret = -ENOENT;
- goto free_and_out;
- }
-
- /* process the first zbud */
- unswiz(th[0].oid, th[0].index, &type, &offset);
- page1 = (znewpage1 != NULL) ? znewpage1 :
- ((newpage1 != NULL) ? newpage1 : evictpage1);
- writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page1);
- if (writeback_ret < 0) {
- ret = -ENOMEM;
- goto free_and_out;
- }
- if (evictpage1 != NULL)
- zcache_pageframes_freed =
- atomic_inc_return(&zcache_pageframes_freed_atomic);
- if (writeback_ret == 0) {
- /* zcache_get_swap_cache_page will free, don't double free */
- znewpage1 = NULL;
- newpage1 = NULL;
- evictpage1 = NULL;
- }
- if (nzbuds < 2)
- goto free_and_out;
-
- /* if there is a second zbud, process it */
- unswiz(th[1].oid, th[1].index, &type, &offset);
- page2 = (znewpage2 != NULL) ? znewpage2 :
- ((newpage2 != NULL) ? newpage2 : evictpage2);
- writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page2);
- if (writeback_ret < 0) {
- ret = -ENOMEM;
- goto free_and_out;
- }
- if (evictpage2 != NULL)
- zcache_pageframes_freed =
- atomic_inc_return(&zcache_pageframes_freed_atomic);
- if (writeback_ret == 0) {
- znewpage2 = NULL;
- newpage2 = NULL;
- evictpage2 = NULL;
- }
-
-free_and_out:
- if (znewpage1 != NULL)
- page_cache_release(znewpage1);
- if (znewpage2 != NULL)
- page_cache_release(znewpage2);
- if (newpage1 != NULL)
- page_cache_release(newpage1);
- if (newpage2 != NULL)
- page_cache_release(newpage2);
- if (evictpage1 != NULL)
- zcache_free_page(evictpage1);
- if (evictpage2 != NULL)
- zcache_free_page(evictpage2);
- return ret;
-}
-#endif /* CONFIG_ZCACHE_WRITEBACK */
-
-/*
- * When zcache is disabled ("frozen"), pools can be created and destroyed,
- * but all puts (and thus all other operations that require memory allocation)
- * must fail. If zcache is unfrozen, accepts puts, then frozen again,
- * data consistency requires all puts while frozen to be converted into
- * flushes.
- */
-static bool zcache_freeze;
-
-/*
- * This zcache shrinker interface reduces the number of ephemeral pageframes
- * used by zcache to approximately the same as the total number of LRU_FILE
- * pageframes in use, and now also reduces the number of persistent pageframes
- * used by zcache to approximately the same as the total number of LRU_ANON
- * pageframes in use. FIXME POLICY: Probably the writeback should only occur
- * if the eviction doesn't free enough pages.
- */
-static int shrink_zcache_memory(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- static bool in_progress;
- int ret = -1;
- int nr = sc->nr_to_scan;
- int nr_evict = 0;
- int nr_writeback = 0;
- struct page *page;
- int file_pageframes_inuse, anon_pageframes_inuse;
-
- if (nr <= 0)
- goto skip_evict;
-
- /* don't allow more than one eviction thread at a time */
- if (in_progress)
- goto skip_evict;
-
- in_progress = true;
-
- /* we are going to ignore nr, and target a different value */
- zcache_last_active_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
- zcache_last_inactive_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
- file_pageframes_inuse = zcache_last_active_file_pageframes +
- zcache_last_inactive_file_pageframes;
- if (zcache_eph_pageframes > file_pageframes_inuse)
- nr_evict = zcache_eph_pageframes - file_pageframes_inuse;
- else
- nr_evict = 0;
- while (nr_evict-- > 0) {
- page = zcache_evict_eph_pageframe();
- if (page == NULL)
- break;
- zcache_free_page(page);
- }
-
- zcache_last_active_anon_pageframes =
- global_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON);
- zcache_last_inactive_anon_pageframes =
- global_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON);
- anon_pageframes_inuse = zcache_last_active_anon_pageframes +
- zcache_last_inactive_anon_pageframes;
- if (zcache_pers_pageframes > anon_pageframes_inuse)
- nr_writeback = zcache_pers_pageframes - anon_pageframes_inuse;
- else
- nr_writeback = 0;
- while (nr_writeback-- > 0) {
-#ifdef CONFIG_ZCACHE_WRITEBACK
- int writeback_ret;
- writeback_ret = zcache_frontswap_writeback();
- if (writeback_ret == -ENOMEM)
-#endif
- break;
- }
- in_progress = false;
-
-skip_evict:
- /* resample: has changed, but maybe not all the way yet */
- zcache_last_active_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
- zcache_last_inactive_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
- ret = zcache_eph_pageframes - zcache_last_active_file_pageframes +
- zcache_last_inactive_file_pageframes;
- if (ret < 0)
- ret = 0;
- return ret;
-}
-
-static struct shrinker zcache_shrinker = {
- .shrink = shrink_zcache_memory,
- .seeks = DEFAULT_SEEKS,
-};
-
-/*
- * zcache shims between cleancache/frontswap ops and tmem
- */
-
-/* FIXME rename these core routines to zcache_tmemput etc? */
-int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, void *page,
- unsigned int size, bool raw, int ephemeral)
-{
- struct tmem_pool *pool;
- struct tmem_handle th;
- int ret = -1;
- void *pampd = NULL;
-
- BUG_ON(!irqs_disabled());
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- if (unlikely(pool == NULL))
- goto out;
- if (!zcache_freeze) {
- ret = 0;
- th.client_id = cli_id;
- th.pool_id = pool_id;
- th.oid = *oidp;
- th.index = index;
- pampd = zcache_pampd_create((char *)page, size, raw,
- ephemeral, &th);
- if (pampd == NULL) {
- ret = -ENOMEM;
- if (ephemeral)
- inc_zcache_failed_eph_puts();
- else
- inc_zcache_failed_pers_puts();
- } else {
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- ret = tmem_put(pool, oidp, index, 0, pampd);
- if (ret < 0)
- BUG();
- }
- zcache_put_pool(pool);
- } else {
- inc_zcache_put_to_flush();
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- if (atomic_read(&pool->obj_count) > 0)
- /* the put fails whether the flush succeeds or not */
- (void)tmem_flush_page(pool, oidp, index);
- zcache_put_pool(pool);
- }
-out:
- return ret;
-}
-
-int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, void *page,
- size_t *sizep, bool raw, int get_and_free)
-{
- struct tmem_pool *pool;
- int ret = -1;
- bool eph;
-
- if (!raw) {
- BUG_ON(irqs_disabled());
- BUG_ON(in_softirq());
- }
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- eph = is_ephemeral(pool);
- if (likely(pool != NULL)) {
- if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_get(pool, oidp, index, (char *)(page),
- sizep, raw, get_and_free);
- zcache_put_pool(pool);
- }
- WARN_ONCE((!is_ephemeral(pool) && (ret != 0)),
- "zcache_get fails on persistent pool, "
- "bad things are very likely to happen soon\n");
-#ifdef RAMSTER_TESTING
- if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
- pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
-#endif
- return ret;
-}
-
-int zcache_flush_page(int cli_id, int pool_id,
- struct tmem_oid *oidp, uint32_t index)
-{
- struct tmem_pool *pool;
- int ret = -1;
- unsigned long flags;
-
- local_irq_save(flags);
- inc_zcache_flush_total();
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- if (likely(pool != NULL)) {
- if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_flush_page(pool, oidp, index);
- zcache_put_pool(pool);
- }
- if (ret >= 0)
- inc_zcache_flush_found();
- local_irq_restore(flags);
- return ret;
-}
-
-int zcache_flush_object(int cli_id, int pool_id,
- struct tmem_oid *oidp)
-{
- struct tmem_pool *pool;
- int ret = -1;
- unsigned long flags;
-
- local_irq_save(flags);
- inc_zcache_flobj_total();
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- if (likely(pool != NULL)) {
- if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_flush_object(pool, oidp);
- zcache_put_pool(pool);
- }
- if (ret >= 0)
- inc_zcache_flobj_found();
- local_irq_restore(flags);
- return ret;
-}
-
-static int zcache_client_destroy_pool(int cli_id, int pool_id)
-{
- struct tmem_pool *pool = NULL;
- struct zcache_client *cli = NULL;
- int ret = -1;
-
- if (pool_id < 0)
- goto out;
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
- if (cli == NULL)
- goto out;
- atomic_inc(&cli->refcount);
- pool = cli->tmem_pools[pool_id];
- if (pool == NULL)
- goto out;
- cli->tmem_pools[pool_id] = NULL;
- /* wait for pool activity on other cpus to quiesce */
- while (atomic_read(&pool->refcount) != 0)
- ;
- atomic_dec(&cli->refcount);
- local_bh_disable();
- ret = tmem_destroy_pool(pool);
- local_bh_enable();
- kfree(pool);
- if (cli_id == LOCAL_CLIENT)
- pr_info("%s: destroyed local pool id=%d\n", namestr, pool_id);
- else
- pr_info("%s: destroyed pool id=%d, client=%d\n",
- namestr, pool_id, cli_id);
-out:
- return ret;
-}
-
-int zcache_new_pool(uint16_t cli_id, uint32_t flags)
-{
- int poolid = -1;
- struct tmem_pool *pool;
- struct zcache_client *cli = NULL;
-
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
- if (cli == NULL)
- goto out;
- atomic_inc(&cli->refcount);
- pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
- if (pool == NULL)
- goto out;
-
- for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
- if (cli->tmem_pools[poolid] == NULL)
- break;
- if (poolid >= MAX_POOLS_PER_CLIENT) {
- pr_info("%s: pool creation failed: max exceeded\n", namestr);
- kfree(pool);
- poolid = -1;
- goto out;
- }
- atomic_set(&pool->refcount, 0);
- pool->client = cli;
- pool->pool_id = poolid;
- tmem_new_pool(pool, flags);
- cli->tmem_pools[poolid] = pool;
- if (cli_id == LOCAL_CLIENT)
- pr_info("%s: created %s local tmem pool, id=%d\n", namestr,
- flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- poolid);
- else
- pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr,
- flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- poolid, cli_id);
-out:
- if (cli != NULL)
- atomic_dec(&cli->refcount);
- return poolid;
-}
-
-static int zcache_local_new_pool(uint32_t flags)
-{
- return zcache_new_pool(LOCAL_CLIENT, flags);
-}
-
-int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph)
-{
- struct tmem_pool *pool;
- struct zcache_client *cli = NULL;
- uint32_t flags = eph ? 0 : TMEM_POOL_PERSIST;
- int ret = -1;
-
- BUG_ON(!ramster_enabled);
- if (cli_id == LOCAL_CLIENT)
- goto out;
- if (pool_id >= MAX_POOLS_PER_CLIENT)
- goto out;
- if (cli_id >= MAX_CLIENTS)
- goto out;
-
- cli = &zcache_clients[cli_id];
- if ((eph && disable_cleancache) || (!eph && disable_frontswap)) {
- pr_err("zcache_autocreate_pool: pool type disabled\n");
- goto out;
- }
- if (!cli->allocated) {
- if (zcache_new_client(cli_id)) {
- pr_err("zcache_autocreate_pool: can't create client\n");
- goto out;
- }
- cli = &zcache_clients[cli_id];
- }
- atomic_inc(&cli->refcount);
- pool = cli->tmem_pools[pool_id];
- if (pool != NULL) {
- if (pool->persistent && eph) {
- pr_err("zcache_autocreate_pool: type mismatch\n");
- goto out;
- }
- ret = 0;
- goto out;
- }
- pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
- if (pool == NULL)
- goto out;
-
- atomic_set(&pool->refcount, 0);
- pool->client = cli;
- pool->pool_id = pool_id;
- tmem_new_pool(pool, flags);
- cli->tmem_pools[pool_id] = pool;
- pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
- namestr, flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- pool_id, cli_id);
- ret = 0;
-out:
- if (cli != NULL)
- atomic_dec(&cli->refcount);
- return ret;
-}
-
-/**********
- * Two kernel functionalities currently can be layered on top of tmem.
- * These are "cleancache" which is used as a second-chance cache for clean
- * page cache pages; and "frontswap" which is used for swap pages
- * to avoid writes to disk. A generic "shim" is provided here for each
- * to translate in-kernel semantics to zcache semantics.
- */
-
-static void zcache_cleancache_put_page(int pool_id,
- struct cleancache_filekey key,
- pgoff_t index, struct page *page)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- if (!disable_cleancache_ignore_nonactive && !PageWasActive(page)) {
- inc_zcache_eph_nonactive_puts_ignored();
- return;
- }
- if (likely(ind == index))
- (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index,
- page, PAGE_SIZE, false, 1);
-}
-
-static int zcache_cleancache_get_page(int pool_id,
- struct cleancache_filekey key,
- pgoff_t index, struct page *page)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
- size_t size;
- int ret = -1;
-
- if (likely(ind == index)) {
- ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index,
- page, &size, false, 0);
- BUG_ON(ret >= 0 && size != PAGE_SIZE);
- if (ret == 0)
- SetPageWasActive(page);
- }
- return ret;
-}
-
-static void zcache_cleancache_flush_page(int pool_id,
- struct cleancache_filekey key,
- pgoff_t index)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- if (likely(ind == index))
- (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
-}
-
-static void zcache_cleancache_flush_inode(int pool_id,
- struct cleancache_filekey key)
-{
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
-}
-
-static void zcache_cleancache_flush_fs(int pool_id)
-{
- if (pool_id >= 0)
- (void)zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
-}
-
-static int zcache_cleancache_init_fs(size_t pagesize)
-{
- BUG_ON(sizeof(struct cleancache_filekey) !=
- sizeof(struct tmem_oid));
- BUG_ON(pagesize != PAGE_SIZE);
- return zcache_local_new_pool(0);
-}
-
-static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
-{
- /* shared pools are unsupported and map to private */
- BUG_ON(sizeof(struct cleancache_filekey) !=
- sizeof(struct tmem_oid));
- BUG_ON(pagesize != PAGE_SIZE);
- return zcache_local_new_pool(0);
-}
-
-static struct cleancache_ops zcache_cleancache_ops = {
- .put_page = zcache_cleancache_put_page,
- .get_page = zcache_cleancache_get_page,
- .invalidate_page = zcache_cleancache_flush_page,
- .invalidate_inode = zcache_cleancache_flush_inode,
- .invalidate_fs = zcache_cleancache_flush_fs,
- .init_shared_fs = zcache_cleancache_init_shared_fs,
- .init_fs = zcache_cleancache_init_fs
-};
-
-struct cleancache_ops *zcache_cleancache_register_ops(void)
-{
- struct cleancache_ops *old_ops =
- cleancache_register_ops(&zcache_cleancache_ops);
-
- return old_ops;
-}
-
-/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
-static int zcache_frontswap_poolid __read_mostly = -1;
-
-/*
- * Swizzling increases objects per swaptype, increasing tmem concurrency
- * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
- * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
- */
-#define SWIZ_BITS 8
-#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
-#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
-#define iswiz(_ind) (_ind >> SWIZ_BITS)
-
-static inline struct tmem_oid oswiz(unsigned type, u32 ind)
-{
- struct tmem_oid oid = { .oid = { 0 } };
- oid.oid[0] = _oswiz(type, ind);
- return oid;
-}
-
-#ifdef CONFIG_ZCACHE_WRITEBACK
-static void unswiz(struct tmem_oid oid, u32 index,
- unsigned *type, pgoff_t *offset)
-{
- *type = (unsigned)(oid.oid[0] >> SWIZ_BITS);
- *offset = (pgoff_t)((index << SWIZ_BITS) |
- (oid.oid[0] & SWIZ_MASK));
-}
-#endif
-
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
- struct page *page)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- struct tmem_oid oid = oswiz(type, ind);
- int ret = -1;
- unsigned long flags;
-
- BUG_ON(!PageLocked(page));
- if (!disable_frontswap_ignore_nonactive && !PageWasActive(page)) {
- inc_zcache_pers_nonactive_puts_ignored();
- ret = -ERANGE;
- goto out;
- }
- if (likely(ind64 == ind)) {
- local_irq_save(flags);
- ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind),
- page, PAGE_SIZE, false, 0);
- local_irq_restore(flags);
- }
-out:
- return ret;
-}
-
-/* returns 0 if the page was successfully gotten from frontswap, -1 if
- * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
- struct page *page)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- struct tmem_oid oid = oswiz(type, ind);
- size_t size;
- int ret = -1, get_and_free;
-
- if (frontswap_has_exclusive_gets)
- get_and_free = 1;
- else
- get_and_free = -1;
- BUG_ON(!PageLocked(page));
- if (likely(ind64 == ind)) {
- ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind),
- page, &size, false, get_and_free);
- BUG_ON(ret >= 0 && size != PAGE_SIZE);
- }
- return ret;
-}
-
-/* flush a single page from frontswap */
-static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- struct tmem_oid oid = oswiz(type, ind);
-
- if (likely(ind64 == ind))
- (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind));
-}
-
-/* flush all pages from the passed swaptype */
-static void zcache_frontswap_flush_area(unsigned type)
-{
- struct tmem_oid oid;
- int ind;
-
- for (ind = SWIZ_MASK; ind >= 0; ind--) {
- oid = oswiz(type, ind);
- (void)zcache_flush_object(LOCAL_CLIENT,
- zcache_frontswap_poolid, &oid);
- }
-}
-
-static void zcache_frontswap_init(unsigned ignored)
-{
- /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
- if (zcache_frontswap_poolid < 0)
- zcache_frontswap_poolid =
- zcache_local_new_pool(TMEM_POOL_PERSIST);
-}
-
-static struct frontswap_ops zcache_frontswap_ops = {
- .store = zcache_frontswap_put_page,
- .load = zcache_frontswap_get_page,
- .invalidate_page = zcache_frontswap_flush_page,
- .invalidate_area = zcache_frontswap_flush_area,
- .init = zcache_frontswap_init
-};
-
-struct frontswap_ops *zcache_frontswap_register_ops(void)
-{
- struct frontswap_ops *old_ops =
- frontswap_register_ops(&zcache_frontswap_ops);
-
- return old_ops;
-}
-
-/*
- * zcache initialization
- * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
- * OR NOTHING HAPPENS!
- */
-
-#ifndef CONFIG_ZCACHE_MODULE
-static int __init enable_zcache(char *s)
-{
- zcache_enabled = true;
- return 1;
-}
-__setup("zcache", enable_zcache);
-
-static int __init enable_ramster(char *s)
-{
- zcache_enabled = true;
-#ifdef CONFIG_RAMSTER
- ramster_enabled = true;
-#endif
- return 1;
-}
-__setup("ramster", enable_ramster);
-
-/* allow independent dynamic disabling of cleancache and frontswap */
-
-static int __init no_cleancache(char *s)
-{
- disable_cleancache = true;
- return 1;
-}
-
-__setup("nocleancache", no_cleancache);
-
-static int __init no_frontswap(char *s)
-{
- disable_frontswap = true;
- return 1;
-}
-
-__setup("nofrontswap", no_frontswap);
-
-static int __init no_frontswap_exclusive_gets(char *s)
-{
- frontswap_has_exclusive_gets = false;
- return 1;
-}
-
-__setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets);
-
-static int __init no_frontswap_ignore_nonactive(char *s)
-{
- disable_frontswap_ignore_nonactive = true;
- return 1;
-}
-
-__setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive);
-
-static int __init no_cleancache_ignore_nonactive(char *s)
-{
- disable_cleancache_ignore_nonactive = true;
- return 1;
-}
-
-__setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive);
-
-static int __init enable_zcache_compressor(char *s)
-{
- strlcpy(zcache_comp_name, s, sizeof(zcache_comp_name));
- zcache_enabled = true;
- return 1;
-}
-__setup("zcache=", enable_zcache_compressor);
-#endif
-
-
-static int zcache_comp_init(void)
-{
- int ret = 0;
-
- /* check crypto algorithm */
-#ifdef CONFIG_ZCACHE_MODULE
- ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret) {
- ret = -1;
- goto out;
- }
-#else
- if (*zcache_comp_name != '\0') {
- ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret)
- pr_info("zcache: %s not supported\n",
- zcache_comp_name);
- goto out;
- }
- if (!ret)
- strcpy(zcache_comp_name, "lzo");
- ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret) {
- ret = 1;
- goto out;
- }
-#endif
- pr_info("zcache: using %s compressor\n", zcache_comp_name);
-
- /* alloc percpu transforms */
- ret = 0;
- zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
- if (!zcache_comp_pcpu_tfms)
- ret = 1;
-out:
- return ret;
-}
-
-static int zcache_init(void)
-{
- int ret = 0;
-
-#ifdef CONFIG_ZCACHE_MODULE
- zcache_enabled = 1;
-#endif
- if (ramster_enabled) {
- namestr = "ramster";
- ramster_register_pamops(&zcache_pamops);
- }
- zcache_debugfs_init();
- if (zcache_enabled) {
- unsigned int cpu;
-
- tmem_register_hostops(&zcache_hostops);
- tmem_register_pamops(&zcache_pamops);
- ret = register_cpu_notifier(&zcache_cpu_notifier_block);
- if (ret) {
- pr_err("%s: can't register cpu notifier\n", namestr);
- goto out;
- }
- ret = zcache_comp_init();
- if (ret) {
- pr_err("%s: compressor initialization failed\n",
- namestr);
- goto out;
- }
- for_each_online_cpu(cpu) {
- void *pcpu = (void *)(long)cpu;
- zcache_cpu_notifier(&zcache_cpu_notifier_block,
- CPU_UP_PREPARE, pcpu);
- }
- }
- zcache_objnode_cache = kmem_cache_create("zcache_objnode",
- sizeof(struct tmem_objnode), 0, 0, NULL);
- zcache_obj_cache = kmem_cache_create("zcache_obj",
- sizeof(struct tmem_obj), 0, 0, NULL);
- ret = zcache_new_client(LOCAL_CLIENT);
- if (ret) {
- pr_err("%s: can't create client\n", namestr);
- goto out;
- }
- zbud_init();
- if (zcache_enabled && !disable_cleancache) {
- struct cleancache_ops *old_ops;
-
- register_shrinker(&zcache_shrinker);
- old_ops = zcache_cleancache_register_ops();
- pr_info("%s: cleancache enabled using kernel transcendent "
- "memory and compression buddies\n", namestr);
-#ifdef CONFIG_ZCACHE_DEBUG
- pr_info("%s: cleancache: ignorenonactive = %d\n",
- namestr, !disable_cleancache_ignore_nonactive);
-#endif
- if (old_ops != NULL)
- pr_warn("%s: cleancache_ops overridden\n", namestr);
- }
- if (zcache_enabled && !disable_frontswap) {
- struct frontswap_ops *old_ops;
-
- old_ops = zcache_frontswap_register_ops();
- if (frontswap_has_exclusive_gets)
- frontswap_tmem_exclusive_gets(true);
- pr_info("%s: frontswap enabled using kernel transcendent "
- "memory and compression buddies\n", namestr);
-#ifdef CONFIG_ZCACHE_DEBUG
- pr_info("%s: frontswap: excl gets = %d active only = %d\n",
- namestr, frontswap_has_exclusive_gets,
- !disable_frontswap_ignore_nonactive);
-#endif
- if (IS_ERR(old_ops) || old_ops) {
- if (IS_ERR(old_ops))
- return PTR_RET(old_ops);
- pr_warn("%s: frontswap_ops overridden\n", namestr);
- }
- }
- if (ramster_enabled)
- ramster_init(!disable_cleancache, !disable_frontswap,
- frontswap_has_exclusive_gets,
- !disable_frontswap_selfshrink);
-out:
- return ret;
-}
-
-#ifdef CONFIG_ZCACHE_MODULE
-#ifdef CONFIG_RAMSTER
-module_param(ramster_enabled, bool, S_IRUGO);
-module_param(disable_frontswap_selfshrink, int, S_IRUGO);
-#endif
-module_param(disable_cleancache, bool, S_IRUGO);
-module_param(disable_frontswap, bool, S_IRUGO);
-#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
-module_param(frontswap_has_exclusive_gets, bool, S_IRUGO);
-#endif
-module_param(disable_frontswap_ignore_nonactive, bool, S_IRUGO);
-module_param(zcache_comp_name, charp, S_IRUGO);
-module_init(zcache_init);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
-MODULE_DESCRIPTION("In-kernel compression of cleancache/frontswap pages");
-#else
-late_initcall(zcache_init);
-#endif
diff --git a/drivers/staging/zcache/zcache.h b/drivers/staging/zcache/zcache.h
deleted file mode 100644
index 8491200..0000000
--- a/drivers/staging/zcache/zcache.h
+++ /dev/null
@@ -1,53 +0,0 @@
-
-/*
- * zcache.h
- *
- * Copyright (c) 2012, Dan Magenheimer, Oracle Corp.
- */
-
-#ifndef _ZCACHE_H_
-#define _ZCACHE_H_
-
-struct zcache_preload {
- struct tmem_obj *obj;
- struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
-};
-
-struct tmem_pool;
-
-#define MAX_POOLS_PER_CLIENT 16
-
-#define MAX_CLIENTS 16
-#define LOCAL_CLIENT ((uint16_t)-1)
-
-struct zcache_client {
- struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
- bool allocated;
- atomic_t refcount;
-};
-
-extern struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
- uint16_t poolid);
-extern void zcache_put_pool(struct tmem_pool *pool);
-
-extern int zcache_put_page(int, int, struct tmem_oid *,
- uint32_t, void *,
- unsigned int, bool, int);
-extern int zcache_get_page(int, int, struct tmem_oid *, uint32_t,
- void *, size_t *, bool, int);
-extern int zcache_flush_page(int, int, struct tmem_oid *, uint32_t);
-extern int zcache_flush_object(int, int, struct tmem_oid *);
-extern void zcache_decompress_to_page(char *, unsigned int, struct page *);
-
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
-extern void *zcache_pampd_create(char *, unsigned int, bool, int,
- struct tmem_handle *);
-int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph);
-#endif
-
-#define MAX_POOLS_PER_CLIENT 16
-
-#define MAX_CLIENTS 16
-#define LOCAL_CLIENT ((uint16_t)-1)
-
-#endif /* _ZCACHE_H_ */
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
index 7f4a301..cb0f9ce 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/staging/zram/Makefile
@@ -1,3 +1,3 @@
-zram-y := zram_drv.o zram_sysfs.o
+zram-y := zram_drv.o
obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index e34e3fe0..2c4ed52 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -37,28 +37,107 @@
/* Globals */
static int zram_major;
-struct zram *zram_devices;
+static struct zram *zram_devices;
/* Module params (documentation at end) */
static unsigned int num_devices = 1;
-static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
+static inline struct zram *dev_to_zram(struct device *dev)
{
- spin_lock(&zram->stat64_lock);
- *v = *v + inc;
- spin_unlock(&zram->stat64_lock);
+ return (struct zram *)dev_to_disk(dev)->private_data;
}
-static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
+static ssize_t disksize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- spin_lock(&zram->stat64_lock);
- *v = *v - dec;
- spin_unlock(&zram->stat64_lock);
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n", zram->disksize);
}
-static void zram_stat64_inc(struct zram *zram, u64 *v)
+static ssize_t initstate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- zram_stat64_add(zram, v, 1);
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%u\n", zram->init_done);
+}
+
+static ssize_t num_reads_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.num_reads));
+}
+
+static ssize_t num_writes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.num_writes));
+}
+
+static ssize_t invalid_io_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.invalid_io));
+}
+
+static ssize_t notify_free_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.notify_free));
+}
+
+static ssize_t zero_pages_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%u\n", zram->stats.pages_zero);
+}
+
+static ssize_t orig_data_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
+}
+
+static ssize_t compr_data_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.compr_size));
+}
+
+static ssize_t mem_used_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val = 0;
+ struct zram *zram = dev_to_zram(dev);
+ struct zram_meta *meta = zram->meta;
+
+ down_read(&zram->init_lock);
+ if (zram->init_done)
+ val = zs_get_total_size_bytes(meta->mem_pool);
+ up_read(&zram->init_lock);
+
+ return sprintf(buf, "%llu\n", val);
}
static int zram_test_flag(struct zram_meta *meta, u32 index,
@@ -79,6 +158,97 @@ static void zram_clear_flag(struct zram_meta *meta, u32 index,
meta->table[index].flags &= ~BIT(flag);
}
+static inline int is_partial_io(struct bio_vec *bvec)
+{
+ return bvec->bv_len != PAGE_SIZE;
+}
+
+/*
+ * Check if request is within bounds and aligned on zram logical blocks.
+ */
+static inline int valid_io_request(struct zram *zram, struct bio *bio)
+{
+ u64 start, end, bound;
+
+ /* unaligned request */
+ if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ return 0;
+ if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ return 0;
+
+ start = bio->bi_sector;
+ end = start + (bio->bi_size >> SECTOR_SHIFT);
+ bound = zram->disksize >> SECTOR_SHIFT;
+ /* out of range range */
+ if (unlikely(start >= bound || end > bound || start > end))
+ return 0;
+
+ /* I/O request is valid */
+ return 1;
+}
+
+static void zram_meta_free(struct zram_meta *meta)
+{
+ zs_destroy_pool(meta->mem_pool);
+ kfree(meta->compress_workmem);
+ free_pages((unsigned long)meta->compress_buffer, 1);
+ vfree(meta->table);
+ kfree(meta);
+}
+
+static struct zram_meta *zram_meta_alloc(u64 disksize)
+{
+ size_t num_pages;
+ struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ goto out;
+
+ meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!meta->compress_workmem)
+ goto free_meta;
+
+ meta->compress_buffer =
+ (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!meta->compress_buffer) {
+ pr_err("Error allocating compressor buffer space\n");
+ goto free_workmem;
+ }
+
+ num_pages = disksize >> PAGE_SHIFT;
+ meta->table = vzalloc(num_pages * sizeof(*meta->table));
+ if (!meta->table) {
+ pr_err("Error allocating zram address table\n");
+ goto free_buffer;
+ }
+
+ meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
+ if (!meta->mem_pool) {
+ pr_err("Error creating memory pool\n");
+ goto free_table;
+ }
+
+ return meta;
+
+free_table:
+ vfree(meta->table);
+free_buffer:
+ free_pages((unsigned long)meta->compress_buffer, 1);
+free_workmem:
+ kfree(meta->compress_workmem);
+free_meta:
+ kfree(meta);
+ meta = NULL;
+out:
+ return meta;
+}
+
+static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
+{
+ if (*offset + bvec->bv_len >= PAGE_SIZE)
+ (*index)++;
+ *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
+}
+
static int page_zero_filled(void *ptr)
{
unsigned int pos;
@@ -94,6 +264,21 @@ static int page_zero_filled(void *ptr)
return 1;
}
+static void handle_zero_page(struct bio_vec *bvec)
+{
+ struct page *page = bvec->bv_page;
+ void *user_mem;
+
+ user_mem = kmap_atomic(page);
+ if (is_partial_io(bvec))
+ memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
+ else
+ clear_page(user_mem);
+ kunmap_atomic(user_mem);
+
+ flush_dcache_page(page);
+}
+
static void zram_free_page(struct zram *zram, size_t index)
{
struct zram_meta *meta = zram->meta;
@@ -120,31 +305,13 @@ static void zram_free_page(struct zram *zram, size_t index)
if (size <= PAGE_SIZE / 2)
zram->stats.good_compress--;
- zram_stat64_sub(zram, &zram->stats.compr_size,
- meta->table[index].size);
+ atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
zram->stats.pages_stored--;
meta->table[index].handle = 0;
meta->table[index].size = 0;
}
-static void handle_zero_page(struct bio_vec *bvec)
-{
- struct page *page = bvec->bv_page;
- void *user_mem;
-
- user_mem = kmap_atomic(page);
- memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
- kunmap_atomic(user_mem);
-
- flush_dcache_page(page);
-}
-
-static inline int is_partial_io(struct bio_vec *bvec)
-{
- return bvec->bv_len != PAGE_SIZE;
-}
-
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
int ret = LZO_E_OK;
@@ -154,13 +321,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
unsigned long handle = meta->table[index].handle;
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
- memset(mem, 0, PAGE_SIZE);
+ clear_page(mem);
return 0;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (meta->table[index].size == PAGE_SIZE)
- memcpy(mem, cmem, PAGE_SIZE);
+ copy_page(mem, cmem);
else
ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
mem, &clen);
@@ -169,7 +336,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
- zram_stat64_inc(zram, &zram->stats.failed_reads);
+ atomic64_inc(&zram->stats.failed_reads);
return ret;
}
@@ -251,14 +418,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
- /*
- * System overwrites unused sectors. Free memory associated
- * with this sector now.
- */
- if (meta->table[index].handle ||
- zram_test_flag(meta, index, ZRAM_ZERO))
- zram_free_page(zram, index);
-
user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) {
@@ -272,14 +431,23 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ /* Free memory associated with this sector now. */
+ zram_free_page(zram, index);
+
zram->stats.pages_zero++;
zram_set_flag(meta, index, ZRAM_ZERO);
ret = 0;
goto out;
}
+ /*
+ * zram_slot_free_notify could miss free so that let's
+ * double check.
+ */
+ if (unlikely(meta->table[index].handle ||
+ zram_test_flag(meta, index, ZRAM_ZERO)))
+ zram_free_page(zram, index);
+
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem);
@@ -304,26 +472,34 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
handle = zs_malloc(meta->mem_pool, clen);
if (!handle) {
- pr_info("Error allocating memory for compressed "
- "page: %u, size=%zu\n", index, clen);
+ pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
+ index, clen);
ret = -ENOMEM;
goto out;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
- if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
- memcpy(cmem, src, clen);
- if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+ copy_page(cmem, src);
kunmap_atomic(src);
+ } else {
+ memcpy(cmem, src, clen);
+ }
zs_unmap_object(meta->mem_pool, handle);
+ /*
+ * Free memory associated with this sector
+ * before overwriting unused sectors.
+ */
+ zram_free_page(zram, index);
+
meta->table[index].handle = handle;
meta->table[index].size = clen;
/* Update stats */
- zram_stat64_add(zram, &zram->stats.compr_size, clen);
+ atomic64_add(clen, &zram->stats.compr_size);
zram->stats.pages_stored++;
if (clen <= PAGE_SIZE / 2)
zram->stats.good_compress++;
@@ -333,10 +509,24 @@ out:
kfree(uncmem);
if (ret)
- zram_stat64_inc(zram, &zram->stats.failed_writes);
+ atomic64_inc(&zram->stats.failed_writes);
return ret;
}
+static void handle_pending_slot_free(struct zram *zram)
+{
+ struct zram_slot_free *free_rq;
+
+ spin_lock(&zram->slot_free_lock);
+ while (zram->slot_free_rq) {
+ free_rq = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq->next;
+ zram_free_page(zram, free_rq->index);
+ kfree(free_rq);
+ }
+ spin_unlock(&zram->slot_free_lock);
+}
+
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, struct bio *bio, int rw)
{
@@ -344,10 +534,12 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
if (rw == READ) {
down_read(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
up_read(&zram->lock);
} else {
down_write(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_write(zram, bvec, index, offset);
up_write(&zram->lock);
}
@@ -355,11 +547,124 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
return ret;
}
-static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
+static void zram_reset_device(struct zram *zram, bool reset_capacity)
{
- if (*offset + bvec->bv_len >= PAGE_SIZE)
- (*index)++;
- *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
+ size_t index;
+ struct zram_meta *meta;
+
+ flush_work(&zram->free_work);
+
+ down_write(&zram->init_lock);
+ if (!zram->init_done) {
+ up_write(&zram->init_lock);
+ return;
+ }
+
+ meta = zram->meta;
+ zram->init_done = 0;
+
+ /* Free all pages that are still in this zram device */
+ for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
+ unsigned long handle = meta->table[index].handle;
+ if (!handle)
+ continue;
+
+ zs_free(meta->mem_pool, handle);
+ }
+
+ zram_meta_free(zram->meta);
+ zram->meta = NULL;
+ /* Reset stats */
+ memset(&zram->stats, 0, sizeof(zram->stats));
+
+ zram->disksize = 0;
+ if (reset_capacity)
+ set_capacity(zram->disk, 0);
+ up_write(&zram->init_lock);
+}
+
+static void zram_init_device(struct zram *zram, struct zram_meta *meta)
+{
+ if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
+ pr_info(
+ "There is little point creating a zram of greater than "
+ "twice the size of memory since we expect a 2:1 compression "
+ "ratio. Note that zram uses about 0.1%% of the size of "
+ "the disk when not in use so a huge zram is "
+ "wasteful.\n"
+ "\tMemory Size: %lu kB\n"
+ "\tSize you selected: %llu kB\n"
+ "Continuing anyway ...\n",
+ (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
+ );
+ }
+
+ /* zram devices sort of resembles non-rotational disks */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+
+ zram->meta = meta;
+ zram->init_done = 1;
+
+ pr_debug("Initialization done!\n");
+}
+
+static ssize_t disksize_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ u64 disksize;
+ struct zram_meta *meta;
+ struct zram *zram = dev_to_zram(dev);
+
+ disksize = memparse(buf, NULL);
+ if (!disksize)
+ return -EINVAL;
+
+ disksize = PAGE_ALIGN(disksize);
+ meta = zram_meta_alloc(disksize);
+ down_write(&zram->init_lock);
+ if (zram->init_done) {
+ up_write(&zram->init_lock);
+ zram_meta_free(meta);
+ pr_info("Cannot change disksize for initialized device\n");
+ return -EBUSY;
+ }
+
+ zram->disksize = disksize;
+ set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ zram_init_device(zram, meta);
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int ret;
+ unsigned short do_reset;
+ struct zram *zram;
+ struct block_device *bdev;
+
+ zram = dev_to_zram(dev);
+ bdev = bdget_disk(zram->disk, 0);
+
+ /* Do not reset an active device! */
+ if (bdev->bd_holders)
+ return -EBUSY;
+
+ ret = kstrtou16(buf, 10, &do_reset);
+ if (ret)
+ return ret;
+
+ if (!do_reset)
+ return -EINVAL;
+
+ /* Make sure all pending I/O is finished */
+ if (bdev)
+ fsync_bdev(bdev);
+
+ zram_reset_device(zram, true);
+ return len;
}
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
@@ -370,10 +675,10 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
switch (rw) {
case READ:
- zram_stat64_inc(zram, &zram->stats.num_reads);
+ atomic64_inc(&zram->stats.num_reads);
break;
case WRITE:
- zram_stat64_inc(zram, &zram->stats.num_writes);
+ atomic64_inc(&zram->stats.num_writes);
break;
}
@@ -418,23 +723,6 @@ out:
}
/*
- * Check if request is within bounds and aligned on zram logical blocks.
- */
-static inline int valid_io_request(struct zram *zram, struct bio *bio)
-{
- if (unlikely(
- (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
- (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
- (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
-
- return 0;
- }
-
- /* I/O request is valid */
- return 1;
-}
-
-/*
* Handler function for all zram I/O requests.
*/
static void zram_make_request(struct request_queue *queue, struct bio *bio)
@@ -446,7 +734,7 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
goto error;
if (!valid_io_request(zram, bio)) {
- zram_stat64_inc(zram, &zram->stats.invalid_io);
+ atomic64_inc(&zram->stats.invalid_io);
goto error;
}
@@ -460,130 +748,40 @@ error:
bio_io_error(bio);
}
-static void __zram_reset_device(struct zram *zram)
+static void zram_slot_free(struct work_struct *work)
{
- size_t index;
- struct zram_meta *meta;
-
- if (!zram->init_done)
- return;
-
- meta = zram->meta;
- zram->init_done = 0;
-
- /* Free all pages that are still in this zram device */
- for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- unsigned long handle = meta->table[index].handle;
- if (!handle)
- continue;
-
- zs_free(meta->mem_pool, handle);
- }
-
- zram_meta_free(zram->meta);
- zram->meta = NULL;
- /* Reset stats */
- memset(&zram->stats, 0, sizeof(zram->stats));
-
- zram->disksize = 0;
- set_capacity(zram->disk, 0);
-}
-
-void zram_reset_device(struct zram *zram)
-{
- down_write(&zram->init_lock);
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
-}
-
-void zram_meta_free(struct zram_meta *meta)
-{
- zs_destroy_pool(meta->mem_pool);
- kfree(meta->compress_workmem);
- free_pages((unsigned long)meta->compress_buffer, 1);
- vfree(meta->table);
- kfree(meta);
-}
-
-struct zram_meta *zram_meta_alloc(u64 disksize)
-{
- size_t num_pages;
- struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
- if (!meta)
- goto out;
-
- meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!meta->compress_workmem)
- goto free_meta;
-
- meta->compress_buffer =
- (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
- if (!meta->compress_buffer) {
- pr_err("Error allocating compressor buffer space\n");
- goto free_workmem;
- }
-
- num_pages = disksize >> PAGE_SHIFT;
- meta->table = vzalloc(num_pages * sizeof(*meta->table));
- if (!meta->table) {
- pr_err("Error allocating zram address table\n");
- goto free_buffer;
- }
-
- meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
- if (!meta->mem_pool) {
- pr_err("Error creating memory pool\n");
- goto free_table;
- }
-
- return meta;
+ struct zram *zram;
-free_table:
- vfree(meta->table);
-free_buffer:
- free_pages((unsigned long)meta->compress_buffer, 1);
-free_workmem:
- kfree(meta->compress_workmem);
-free_meta:
- kfree(meta);
- meta = NULL;
-out:
- return meta;
+ zram = container_of(work, struct zram, free_work);
+ down_write(&zram->lock);
+ handle_pending_slot_free(zram);
+ up_write(&zram->lock);
}
-void zram_init_device(struct zram *zram, struct zram_meta *meta)
+static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
{
- if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
- pr_info(
- "There is little point creating a zram of greater than "
- "twice the size of memory since we expect a 2:1 compression "
- "ratio. Note that zram uses about 0.1%% of the size of "
- "the disk when not in use so a huge zram is "
- "wasteful.\n"
- "\tMemory Size: %lu kB\n"
- "\tSize you selected: %llu kB\n"
- "Continuing anyway ...\n",
- (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
- );
- }
-
- /* zram devices sort of resembles non-rotational disks */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
-
- zram->meta = meta;
- zram->init_done = 1;
-
- pr_debug("Initialization done!\n");
+ spin_lock(&zram->slot_free_lock);
+ free_rq->next = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq;
+ spin_unlock(&zram->slot_free_lock);
}
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
+ struct zram_slot_free *free_rq;
zram = bdev->bd_disk->private_data;
- zram_free_page(zram, index);
- zram_stat64_inc(zram, &zram->stats.notify_free);
+ atomic64_inc(&zram->stats.notify_free);
+
+ free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
+ if (!free_rq)
+ return;
+
+ free_rq->index = index;
+ add_slot_free(zram, free_rq);
+ schedule_work(&zram->free_work);
}
static const struct block_device_operations zram_devops = {
@@ -591,19 +789,53 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
+static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
+ disksize_show, disksize_store);
+static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
+static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
+static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
+static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
+static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
+static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
+static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
+static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
+static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
+static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
+
+static struct attribute *zram_disk_attrs[] = {
+ &dev_attr_disksize.attr,
+ &dev_attr_initstate.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_num_reads.attr,
+ &dev_attr_num_writes.attr,
+ &dev_attr_invalid_io.attr,
+ &dev_attr_notify_free.attr,
+ &dev_attr_zero_pages.attr,
+ &dev_attr_orig_data_size.attr,
+ &dev_attr_compr_data_size.attr,
+ &dev_attr_mem_used_total.attr,
+ NULL,
+};
+
+static struct attribute_group zram_disk_attr_group = {
+ .attrs = zram_disk_attrs,
+};
+
static int create_device(struct zram *zram, int device_id)
{
- int ret = 0;
+ int ret = -ENOMEM;
init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
- spin_lock_init(&zram->stat64_lock);
+
+ INIT_WORK(&zram->free_work, zram_slot_free);
+ spin_lock_init(&zram->slot_free_lock);
+ zram->slot_free_rq = NULL;
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
- ret = -ENOMEM;
goto out;
}
@@ -613,11 +845,9 @@ static int create_device(struct zram *zram, int device_id)
/* gendisk structure */
zram->disk = alloc_disk(1);
if (!zram->disk) {
- blk_cleanup_queue(zram->queue);
pr_warn("Error allocating disk structure for device %d\n",
device_id);
- ret = -ENOMEM;
- goto out;
+ goto out_free_queue;
}
zram->disk->major = zram_major;
@@ -646,11 +876,17 @@ static int create_device(struct zram *zram, int device_id)
&zram_disk_attr_group);
if (ret < 0) {
pr_warn("Error creating sysfs group");
- goto out;
+ goto out_free_disk;
}
zram->init_done = 0;
+ return 0;
+out_free_disk:
+ del_gendisk(zram->disk);
+ put_disk(zram->disk);
+out_free_queue:
+ blk_cleanup_queue(zram->queue);
out:
return ret;
}
@@ -669,11 +905,6 @@ static void destroy_device(struct zram *zram)
blk_cleanup_queue(zram->queue);
}
-unsigned int zram_get_num_devices(void)
-{
- return num_devices;
-}
-
static int __init zram_init(void)
{
int ret, dev_id;
@@ -728,7 +959,11 @@ static void __exit zram_exit(void)
zram = &zram_devices[i];
destroy_device(zram);
- zram_reset_device(zram);
+ /*
+ * Shouldn't access zram->disk after destroy_device
+ * because destroy_device already released zram->disk.
+ */
+ zram_reset_device(zram, false);
}
unregister_blkdev(zram_major, "zram");
@@ -737,12 +972,12 @@ static void __exit zram_exit(void)
pr_debug("Cleanup done!\n");
}
-module_param(num_devices, uint, 0);
-MODULE_PARM_DESC(num_devices, "Number of zram devices");
-
module_init(zram_init);
module_exit(zram_exit);
+module_param(num_devices, uint, 0);
+MODULE_PARM_DESC(num_devices, "Number of zram devices");
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
MODULE_DESCRIPTION("Compressed RAM Block Device");
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index 2d1a3f1..97a3acf 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -69,14 +69,18 @@ struct table {
u8 flags;
} __aligned(4);
+/*
+ * All 64bit fields should only be manipulated by 64bit atomic accessors.
+ * All modifications to 32bit counter should be protected by zram->lock.
+ */
struct zram_stats {
- u64 compr_size; /* compressed size of pages stored */
- u64 num_reads; /* failed + successful */
- u64 num_writes; /* --do-- */
- u64 failed_reads; /* should NEVER! happen */
- u64 failed_writes; /* can happen when memory is too low */
- u64 invalid_io; /* non-page-aligned I/O requests */
- u64 notify_free; /* no. of swap slot free notifications */
+ atomic64_t compr_size; /* compressed size of pages stored */
+ atomic64_t num_reads; /* failed + successful */
+ atomic64_t num_writes; /* --do-- */
+ atomic64_t failed_reads; /* should NEVER! happen */
+ atomic64_t failed_writes; /* can happen when memory is too low */
+ atomic64_t invalid_io; /* non-page-aligned I/O requests */
+ atomic64_t notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */
@@ -90,11 +94,20 @@ struct zram_meta {
struct zs_pool *mem_pool;
};
+struct zram_slot_free {
+ unsigned long index;
+ struct zram_slot_free *next;
+};
+
struct zram {
struct zram_meta *meta;
- spinlock_t stat64_lock; /* protect 64-bit stats */
- struct rw_semaphore lock; /* protect compression buffers and table
- * against concurrent read and writes */
+ struct rw_semaphore lock; /* protect compression buffers, table,
+ * 32bit stat counters against concurrent
+ * notifications, reads and writes */
+
+ struct work_struct free_work; /* handle pending free request */
+ struct zram_slot_free *slot_free_rq; /* list head of free request */
+
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -105,19 +118,8 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
+ spinlock_t slot_free_lock;
struct zram_stats stats;
};
-
-extern struct zram *zram_devices;
-unsigned int zram_get_num_devices(void);
-#ifdef CONFIG_SYSFS
-extern struct attribute_group zram_disk_attr_group;
-#endif
-
-extern void zram_reset_device(struct zram *zram);
-extern struct zram_meta *zram_meta_alloc(u64 disksize);
-extern void zram_meta_free(struct zram_meta *meta);
-extern void zram_init_device(struct zram *zram, struct zram_meta *meta);
-
#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
deleted file mode 100644
index e6a929d..0000000
--- a/drivers/staging/zram/zram_sysfs.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Compressed RAM block device
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- *
- * Project home: http://compcache.googlecode.com/
- */
-
-#include <linux/device.h>
-#include <linux/genhd.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-
-#include "zram_drv.h"
-
-static u64 zram_stat64_read(struct zram *zram, u64 *v)
-{
- u64 val;
-
- spin_lock(&zram->stat64_lock);
- val = *v;
- spin_unlock(&zram->stat64_lock);
-
- return val;
-}
-
-static struct zram *dev_to_zram(struct device *dev)
-{
- int i;
- struct zram *zram = NULL;
-
- for (i = 0; i < zram_get_num_devices(); i++) {
- zram = &zram_devices[i];
- if (disk_to_dev(zram->disk) == dev)
- break;
- }
-
- return zram;
-}
-
-static ssize_t disksize_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n", zram->disksize);
-}
-
-static ssize_t disksize_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- u64 disksize;
- struct zram_meta *meta;
- struct zram *zram = dev_to_zram(dev);
-
- disksize = memparse(buf, NULL);
- if (!disksize)
- return -EINVAL;
-
- disksize = PAGE_ALIGN(disksize);
- meta = zram_meta_alloc(disksize);
- down_write(&zram->init_lock);
- if (zram->init_done) {
- up_write(&zram->init_lock);
- zram_meta_free(meta);
- pr_info("Cannot change disksize for initialized device\n");
- return -EBUSY;
- }
-
- zram->disksize = disksize;
- set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- zram_init_device(zram, meta);
- up_write(&zram->init_lock);
-
- return len;
-}
-
-static ssize_t initstate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->init_done);
-}
-
-static ssize_t reset_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- int ret;
- unsigned short do_reset;
- struct zram *zram;
- struct block_device *bdev;
-
- zram = dev_to_zram(dev);
- bdev = bdget_disk(zram->disk, 0);
-
- /* Do not reset an active device! */
- if (bdev->bd_holders)
- return -EBUSY;
-
- ret = kstrtou16(buf, 10, &do_reset);
- if (ret)
- return ret;
-
- if (!do_reset)
- return -EINVAL;
-
- /* Make sure all pending I/O is finished */
- if (bdev)
- fsync_bdev(bdev);
-
- zram_reset_device(zram);
- return len;
-}
-
-static ssize_t num_reads_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.num_reads));
-}
-
-static ssize_t num_writes_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.num_writes));
-}
-
-static ssize_t invalid_io_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.invalid_io));
-}
-
-static ssize_t notify_free_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.notify_free));
-}
-
-static ssize_t zero_pages_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->stats.pages_zero);
-}
-
-static ssize_t orig_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
-}
-
-static ssize_t compr_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.compr_size));
-}
-
-static ssize_t mem_used_total_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u64 val = 0;
- struct zram *zram = dev_to_zram(dev);
- struct zram_meta *meta = zram->meta;
-
- if (zram->init_done)
- val = zs_get_total_size_bytes(meta->mem_pool);
-
- return sprintf(buf, "%llu\n", val);
-}
-
-static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
- disksize_show, disksize_store);
-static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
-static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
-static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
-static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
-static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
-static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
-static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
-static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
-static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
-
-static struct attribute *zram_disk_attrs[] = {
- &dev_attr_disksize.attr,
- &dev_attr_initstate.attr,
- &dev_attr_reset.attr,
- &dev_attr_num_reads.attr,
- &dev_attr_num_writes.attr,
- &dev_attr_invalid_io.attr,
- &dev_attr_notify_free.attr,
- &dev_attr_zero_pages.attr,
- &dev_attr_orig_data_size.attr,
- &dev_attr_compr_data_size.attr,
- &dev_attr_mem_used_total.attr,
- NULL,
-};
-
-struct attribute_group zram_disk_attr_group = {
- .attrs = zram_disk_attrs,
-};
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index f82f7e6..1a67537 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -224,7 +224,7 @@ struct zs_pool {
* performs VM mapping faster than copying, then it should be added here
* so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use
* page table mapping rather than copying for object mapping.
-*/
+ */
#if defined(CONFIG_ARM) && !defined(MODULE)
#define USE_PGTABLE_MAPPING
#endif
@@ -423,7 +423,7 @@ static struct page *get_next_page(struct page *page)
if (is_last_page(page))
next = NULL;
else if (is_first_page(page))
- next = (struct page *)page->private;
+ next = (struct page *)page_private(page);
else
next = list_entry(page->lru.next, struct page, lru);
@@ -581,7 +581,7 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
first_page->inuse = 0;
}
if (i == 1)
- first_page->private = (unsigned long)page;
+ set_page_private(first_page, (unsigned long)page);
if (i >= 1)
page->first_page = first_page;
if (i >= 2)
@@ -844,8 +844,7 @@ void zs_destroy_pool(struct zs_pool *pool)
for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
if (class->fullness_list[fg]) {
- pr_info("Freeing non-empty class with size "
- "%db, fullness group %d\n",
+ pr_info("Freeing non-empty class with size %db, fullness group %d\n",
class->size, fg);
}
}
@@ -968,7 +967,7 @@ EXPORT_SYMBOL_GPL(zs_free);
* against nested mappings.
*
* This function returns with preemption and page faults disabled.
-*/
+ */
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm)
{
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
index 46dbd05..fbe6bec 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -19,7 +19,7 @@
* zsmalloc mapping modes
*
* NOTE: These only make a difference when a mapped object spans pages
-*/
+ */
enum zs_mapmode {
ZS_MM_RW, /* normal read-write mapping */
ZS_MM_RO, /* read-only (no copy-out at unmap time) */